xref: /OK3568_Linux_fs/kernel/drivers/bluetooth/btqca.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *  Bluetooth supports for Qualcomm Atheros chips
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  *  Copyright (c) 2015 The Linux Foundation. All rights reserved.
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun #include <linux/module.h>
8*4882a593Smuzhiyun #include <linux/firmware.h>
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <net/bluetooth/bluetooth.h>
11*4882a593Smuzhiyun #include <net/bluetooth/hci_core.h>
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include "btqca.h"
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #define VERSION "0.1"
16*4882a593Smuzhiyun 
qca_read_soc_version(struct hci_dev * hdev,u32 * soc_version,enum qca_btsoc_type soc_type)17*4882a593Smuzhiyun int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version,
18*4882a593Smuzhiyun 			 enum qca_btsoc_type soc_type)
19*4882a593Smuzhiyun {
20*4882a593Smuzhiyun 	struct sk_buff *skb;
21*4882a593Smuzhiyun 	struct edl_event_hdr *edl;
22*4882a593Smuzhiyun 	struct qca_btsoc_version *ver;
23*4882a593Smuzhiyun 	char cmd;
24*4882a593Smuzhiyun 	int err = 0;
25*4882a593Smuzhiyun 	u8 event_type = HCI_EV_VENDOR;
26*4882a593Smuzhiyun 	u8 rlen = sizeof(*edl) + sizeof(*ver);
27*4882a593Smuzhiyun 	u8 rtype = EDL_APP_VER_RES_EVT;
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun 	bt_dev_dbg(hdev, "QCA Version Request");
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun 	/* Unlike other SoC's sending version command response as payload to
32*4882a593Smuzhiyun 	 * VSE event. WCN3991 sends version command response as a payload to
33*4882a593Smuzhiyun 	 * command complete event.
34*4882a593Smuzhiyun 	 */
35*4882a593Smuzhiyun 	if (soc_type >= QCA_WCN3991) {
36*4882a593Smuzhiyun 		event_type = 0;
37*4882a593Smuzhiyun 		rlen += 1;
38*4882a593Smuzhiyun 		rtype = EDL_PATCH_VER_REQ_CMD;
39*4882a593Smuzhiyun 	}
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun 	cmd = EDL_PATCH_VER_REQ_CMD;
42*4882a593Smuzhiyun 	skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, EDL_PATCH_CMD_LEN,
43*4882a593Smuzhiyun 				&cmd, event_type, HCI_INIT_TIMEOUT);
44*4882a593Smuzhiyun 	if (IS_ERR(skb)) {
45*4882a593Smuzhiyun 		err = PTR_ERR(skb);
46*4882a593Smuzhiyun 		bt_dev_err(hdev, "Reading QCA version information failed (%d)",
47*4882a593Smuzhiyun 			   err);
48*4882a593Smuzhiyun 		return err;
49*4882a593Smuzhiyun 	}
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	if (skb->len != rlen) {
52*4882a593Smuzhiyun 		bt_dev_err(hdev, "QCA Version size mismatch len %d", skb->len);
53*4882a593Smuzhiyun 		err = -EILSEQ;
54*4882a593Smuzhiyun 		goto out;
55*4882a593Smuzhiyun 	}
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	edl = (struct edl_event_hdr *)(skb->data);
58*4882a593Smuzhiyun 	if (!edl) {
59*4882a593Smuzhiyun 		bt_dev_err(hdev, "QCA TLV with no header");
60*4882a593Smuzhiyun 		err = -EILSEQ;
61*4882a593Smuzhiyun 		goto out;
62*4882a593Smuzhiyun 	}
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 	if (edl->cresp != EDL_CMD_REQ_RES_EVT ||
65*4882a593Smuzhiyun 	    edl->rtype != rtype) {
66*4882a593Smuzhiyun 		bt_dev_err(hdev, "QCA Wrong packet received %d %d", edl->cresp,
67*4882a593Smuzhiyun 			   edl->rtype);
68*4882a593Smuzhiyun 		err = -EIO;
69*4882a593Smuzhiyun 		goto out;
70*4882a593Smuzhiyun 	}
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	if (soc_type >= QCA_WCN3991)
73*4882a593Smuzhiyun 		memmove(&edl->data, &edl->data[1], sizeof(*ver));
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	ver = (struct qca_btsoc_version *)(edl->data);
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	bt_dev_info(hdev, "QCA Product ID   :0x%08x",
78*4882a593Smuzhiyun 		    le32_to_cpu(ver->product_id));
79*4882a593Smuzhiyun 	bt_dev_info(hdev, "QCA SOC Version  :0x%08x",
80*4882a593Smuzhiyun 		    le32_to_cpu(ver->soc_id));
81*4882a593Smuzhiyun 	bt_dev_info(hdev, "QCA ROM Version  :0x%08x",
82*4882a593Smuzhiyun 		    le16_to_cpu(ver->rom_ver));
83*4882a593Smuzhiyun 	bt_dev_info(hdev, "QCA Patch Version:0x%08x",
84*4882a593Smuzhiyun 		    le16_to_cpu(ver->patch_ver));
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	/* QCA chipset version can be decided by patch and SoC
87*4882a593Smuzhiyun 	 * version, combination with upper 2 bytes from SoC
88*4882a593Smuzhiyun 	 * and lower 2 bytes from patch will be used.
89*4882a593Smuzhiyun 	 */
90*4882a593Smuzhiyun 	*soc_version = (le32_to_cpu(ver->soc_id) << 16) |
91*4882a593Smuzhiyun 		       (le16_to_cpu(ver->rom_ver) & 0x0000ffff);
92*4882a593Smuzhiyun 	if (*soc_version == 0)
93*4882a593Smuzhiyun 		err = -EILSEQ;
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun out:
96*4882a593Smuzhiyun 	kfree_skb(skb);
97*4882a593Smuzhiyun 	if (err)
98*4882a593Smuzhiyun 		bt_dev_err(hdev, "QCA Failed to get version (%d)", err);
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	return err;
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(qca_read_soc_version);
103*4882a593Smuzhiyun 
qca_send_reset(struct hci_dev * hdev)104*4882a593Smuzhiyun static int qca_send_reset(struct hci_dev *hdev)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun 	struct sk_buff *skb;
107*4882a593Smuzhiyun 	int err;
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	bt_dev_dbg(hdev, "QCA HCI_RESET");
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
112*4882a593Smuzhiyun 	if (IS_ERR(skb)) {
113*4882a593Smuzhiyun 		err = PTR_ERR(skb);
114*4882a593Smuzhiyun 		bt_dev_err(hdev, "QCA Reset failed (%d)", err);
115*4882a593Smuzhiyun 		return err;
116*4882a593Smuzhiyun 	}
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	kfree_skb(skb);
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	return 0;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun 
qca_send_pre_shutdown_cmd(struct hci_dev * hdev)123*4882a593Smuzhiyun int qca_send_pre_shutdown_cmd(struct hci_dev *hdev)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun 	struct sk_buff *skb;
126*4882a593Smuzhiyun 	int err;
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	bt_dev_dbg(hdev, "QCA pre shutdown cmd");
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	skb = __hci_cmd_sync_ev(hdev, QCA_PRE_SHUTDOWN_CMD, 0,
131*4882a593Smuzhiyun 				NULL, HCI_EV_CMD_COMPLETE, HCI_INIT_TIMEOUT);
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	if (IS_ERR(skb)) {
134*4882a593Smuzhiyun 		err = PTR_ERR(skb);
135*4882a593Smuzhiyun 		bt_dev_err(hdev, "QCA preshutdown_cmd failed (%d)", err);
136*4882a593Smuzhiyun 		return err;
137*4882a593Smuzhiyun 	}
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	kfree_skb(skb);
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	return 0;
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(qca_send_pre_shutdown_cmd);
144*4882a593Smuzhiyun 
qca_tlv_check_data(struct qca_fw_config * config,u8 * fw_data,enum qca_btsoc_type soc_type)145*4882a593Smuzhiyun static void qca_tlv_check_data(struct qca_fw_config *config,
146*4882a593Smuzhiyun 		u8 *fw_data, enum qca_btsoc_type soc_type)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun 	const u8 *data;
149*4882a593Smuzhiyun 	u32 type_len;
150*4882a593Smuzhiyun 	u16 tag_id, tag_len;
151*4882a593Smuzhiyun 	int idx, length;
152*4882a593Smuzhiyun 	struct tlv_type_hdr *tlv;
153*4882a593Smuzhiyun 	struct tlv_type_patch *tlv_patch;
154*4882a593Smuzhiyun 	struct tlv_type_nvm *tlv_nvm;
155*4882a593Smuzhiyun 	uint8_t nvm_baud_rate = config->user_baud_rate;
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	tlv = (struct tlv_type_hdr *)fw_data;
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	type_len = le32_to_cpu(tlv->type_len);
160*4882a593Smuzhiyun 	length = (type_len >> 8) & 0x00ffffff;
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	BT_DBG("TLV Type\t\t : 0x%x", type_len & 0x000000ff);
163*4882a593Smuzhiyun 	BT_DBG("Length\t\t : %d bytes", length);
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	config->dnld_mode = QCA_SKIP_EVT_NONE;
166*4882a593Smuzhiyun 	config->dnld_type = QCA_SKIP_EVT_NONE;
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	switch (config->type) {
169*4882a593Smuzhiyun 	case TLV_TYPE_PATCH:
170*4882a593Smuzhiyun 		tlv_patch = (struct tlv_type_patch *)tlv->data;
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 		/* For Rome version 1.1 to 3.1, all segment commands
173*4882a593Smuzhiyun 		 * are acked by a vendor specific event (VSE).
174*4882a593Smuzhiyun 		 * For Rome >= 3.2, the download mode field indicates
175*4882a593Smuzhiyun 		 * if VSE is skipped by the controller.
176*4882a593Smuzhiyun 		 * In case VSE is skipped, only the last segment is acked.
177*4882a593Smuzhiyun 		 */
178*4882a593Smuzhiyun 		config->dnld_mode = tlv_patch->download_mode;
179*4882a593Smuzhiyun 		config->dnld_type = config->dnld_mode;
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 		BT_DBG("Total Length           : %d bytes",
182*4882a593Smuzhiyun 		       le32_to_cpu(tlv_patch->total_size));
183*4882a593Smuzhiyun 		BT_DBG("Patch Data Length      : %d bytes",
184*4882a593Smuzhiyun 		       le32_to_cpu(tlv_patch->data_length));
185*4882a593Smuzhiyun 		BT_DBG("Signing Format Version : 0x%x",
186*4882a593Smuzhiyun 		       tlv_patch->format_version);
187*4882a593Smuzhiyun 		BT_DBG("Signature Algorithm    : 0x%x",
188*4882a593Smuzhiyun 		       tlv_patch->signature);
189*4882a593Smuzhiyun 		BT_DBG("Download mode          : 0x%x",
190*4882a593Smuzhiyun 		       tlv_patch->download_mode);
191*4882a593Smuzhiyun 		BT_DBG("Reserved               : 0x%x",
192*4882a593Smuzhiyun 		       tlv_patch->reserved1);
193*4882a593Smuzhiyun 		BT_DBG("Product ID             : 0x%04x",
194*4882a593Smuzhiyun 		       le16_to_cpu(tlv_patch->product_id));
195*4882a593Smuzhiyun 		BT_DBG("Rom Build Version      : 0x%04x",
196*4882a593Smuzhiyun 		       le16_to_cpu(tlv_patch->rom_build));
197*4882a593Smuzhiyun 		BT_DBG("Patch Version          : 0x%04x",
198*4882a593Smuzhiyun 		       le16_to_cpu(tlv_patch->patch_version));
199*4882a593Smuzhiyun 		BT_DBG("Reserved               : 0x%x",
200*4882a593Smuzhiyun 		       le16_to_cpu(tlv_patch->reserved2));
201*4882a593Smuzhiyun 		BT_DBG("Patch Entry Address    : 0x%x",
202*4882a593Smuzhiyun 		       le32_to_cpu(tlv_patch->entry));
203*4882a593Smuzhiyun 		break;
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	case TLV_TYPE_NVM:
206*4882a593Smuzhiyun 		idx = 0;
207*4882a593Smuzhiyun 		data = tlv->data;
208*4882a593Smuzhiyun 		while (idx < length) {
209*4882a593Smuzhiyun 			tlv_nvm = (struct tlv_type_nvm *)(data + idx);
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 			tag_id = le16_to_cpu(tlv_nvm->tag_id);
212*4882a593Smuzhiyun 			tag_len = le16_to_cpu(tlv_nvm->tag_len);
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 			/* Update NVM tags as needed */
215*4882a593Smuzhiyun 			switch (tag_id) {
216*4882a593Smuzhiyun 			case EDL_TAG_ID_HCI:
217*4882a593Smuzhiyun 				/* HCI transport layer parameters
218*4882a593Smuzhiyun 				 * enabling software inband sleep
219*4882a593Smuzhiyun 				 * onto controller side.
220*4882a593Smuzhiyun 				 */
221*4882a593Smuzhiyun 				tlv_nvm->data[0] |= 0x80;
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 				/* UART Baud Rate */
224*4882a593Smuzhiyun 				if (soc_type >= QCA_WCN3991)
225*4882a593Smuzhiyun 					tlv_nvm->data[1] = nvm_baud_rate;
226*4882a593Smuzhiyun 				else
227*4882a593Smuzhiyun 					tlv_nvm->data[2] = nvm_baud_rate;
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 				break;
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 			case EDL_TAG_ID_DEEP_SLEEP:
232*4882a593Smuzhiyun 				/* Sleep enable mask
233*4882a593Smuzhiyun 				 * enabling deep sleep feature on controller.
234*4882a593Smuzhiyun 				 */
235*4882a593Smuzhiyun 				tlv_nvm->data[0] |= 0x01;
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 				break;
238*4882a593Smuzhiyun 			}
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 			idx += (sizeof(u16) + sizeof(u16) + 8 + tag_len);
241*4882a593Smuzhiyun 		}
242*4882a593Smuzhiyun 		break;
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	default:
245*4882a593Smuzhiyun 		BT_ERR("Unknown TLV type %d", config->type);
246*4882a593Smuzhiyun 		break;
247*4882a593Smuzhiyun 	}
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun 
qca_tlv_send_segment(struct hci_dev * hdev,int seg_size,const u8 * data,enum qca_tlv_dnld_mode mode,enum qca_btsoc_type soc_type)250*4882a593Smuzhiyun static int qca_tlv_send_segment(struct hci_dev *hdev, int seg_size,
251*4882a593Smuzhiyun 				const u8 *data, enum qca_tlv_dnld_mode mode,
252*4882a593Smuzhiyun 				enum qca_btsoc_type soc_type)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun 	struct sk_buff *skb;
255*4882a593Smuzhiyun 	struct edl_event_hdr *edl;
256*4882a593Smuzhiyun 	struct tlv_seg_resp *tlv_resp;
257*4882a593Smuzhiyun 	u8 cmd[MAX_SIZE_PER_TLV_SEGMENT + 2];
258*4882a593Smuzhiyun 	int err = 0;
259*4882a593Smuzhiyun 	u8 event_type = HCI_EV_VENDOR;
260*4882a593Smuzhiyun 	u8 rlen = (sizeof(*edl) + sizeof(*tlv_resp));
261*4882a593Smuzhiyun 	u8 rtype = EDL_TVL_DNLD_RES_EVT;
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	cmd[0] = EDL_PATCH_TLV_REQ_CMD;
264*4882a593Smuzhiyun 	cmd[1] = seg_size;
265*4882a593Smuzhiyun 	memcpy(cmd + 2, data, seg_size);
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	if (mode == QCA_SKIP_EVT_VSE_CC || mode == QCA_SKIP_EVT_VSE)
268*4882a593Smuzhiyun 		return __hci_cmd_send(hdev, EDL_PATCH_CMD_OPCODE, seg_size + 2,
269*4882a593Smuzhiyun 				      cmd);
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	/* Unlike other SoC's sending version command response as payload to
272*4882a593Smuzhiyun 	 * VSE event. WCN3991 sends version command response as a payload to
273*4882a593Smuzhiyun 	 * command complete event.
274*4882a593Smuzhiyun 	 */
275*4882a593Smuzhiyun 	if (soc_type >= QCA_WCN3991) {
276*4882a593Smuzhiyun 		event_type = 0;
277*4882a593Smuzhiyun 		rlen = sizeof(*edl);
278*4882a593Smuzhiyun 		rtype = EDL_PATCH_TLV_REQ_CMD;
279*4882a593Smuzhiyun 	}
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 	skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, seg_size + 2, cmd,
282*4882a593Smuzhiyun 				event_type, HCI_INIT_TIMEOUT);
283*4882a593Smuzhiyun 	if (IS_ERR(skb)) {
284*4882a593Smuzhiyun 		err = PTR_ERR(skb);
285*4882a593Smuzhiyun 		bt_dev_err(hdev, "QCA Failed to send TLV segment (%d)", err);
286*4882a593Smuzhiyun 		return err;
287*4882a593Smuzhiyun 	}
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	if (skb->len != rlen) {
290*4882a593Smuzhiyun 		bt_dev_err(hdev, "QCA TLV response size mismatch");
291*4882a593Smuzhiyun 		err = -EILSEQ;
292*4882a593Smuzhiyun 		goto out;
293*4882a593Smuzhiyun 	}
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	edl = (struct edl_event_hdr *)(skb->data);
296*4882a593Smuzhiyun 	if (!edl) {
297*4882a593Smuzhiyun 		bt_dev_err(hdev, "TLV with no header");
298*4882a593Smuzhiyun 		err = -EILSEQ;
299*4882a593Smuzhiyun 		goto out;
300*4882a593Smuzhiyun 	}
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	if (edl->cresp != EDL_CMD_REQ_RES_EVT || edl->rtype != rtype) {
303*4882a593Smuzhiyun 		bt_dev_err(hdev, "QCA TLV with error stat 0x%x rtype 0x%x",
304*4882a593Smuzhiyun 			   edl->cresp, edl->rtype);
305*4882a593Smuzhiyun 		err = -EIO;
306*4882a593Smuzhiyun 	}
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	if (soc_type >= QCA_WCN3991)
309*4882a593Smuzhiyun 		goto out;
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	tlv_resp = (struct tlv_seg_resp *)(edl->data);
312*4882a593Smuzhiyun 	if (tlv_resp->result) {
313*4882a593Smuzhiyun 		bt_dev_err(hdev, "QCA TLV with error stat 0x%x rtype 0x%x (0x%x)",
314*4882a593Smuzhiyun 			   edl->cresp, edl->rtype, tlv_resp->result);
315*4882a593Smuzhiyun 	}
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun out:
318*4882a593Smuzhiyun 	kfree_skb(skb);
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	return err;
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun 
qca_inject_cmd_complete_event(struct hci_dev * hdev)323*4882a593Smuzhiyun static int qca_inject_cmd_complete_event(struct hci_dev *hdev)
324*4882a593Smuzhiyun {
325*4882a593Smuzhiyun 	struct hci_event_hdr *hdr;
326*4882a593Smuzhiyun 	struct hci_ev_cmd_complete *evt;
327*4882a593Smuzhiyun 	struct sk_buff *skb;
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 	skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_KERNEL);
330*4882a593Smuzhiyun 	if (!skb)
331*4882a593Smuzhiyun 		return -ENOMEM;
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	hdr = skb_put(skb, sizeof(*hdr));
334*4882a593Smuzhiyun 	hdr->evt = HCI_EV_CMD_COMPLETE;
335*4882a593Smuzhiyun 	hdr->plen = sizeof(*evt) + 1;
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	evt = skb_put(skb, sizeof(*evt));
338*4882a593Smuzhiyun 	evt->ncmd = 1;
339*4882a593Smuzhiyun 	evt->opcode = cpu_to_le16(QCA_HCI_CC_OPCODE);
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	skb_put_u8(skb, QCA_HCI_CC_SUCCESS);
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	return hci_recv_frame(hdev, skb);
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun 
qca_download_firmware(struct hci_dev * hdev,struct qca_fw_config * config,enum qca_btsoc_type soc_type)348*4882a593Smuzhiyun static int qca_download_firmware(struct hci_dev *hdev,
349*4882a593Smuzhiyun 				 struct qca_fw_config *config,
350*4882a593Smuzhiyun 				 enum qca_btsoc_type soc_type)
351*4882a593Smuzhiyun {
352*4882a593Smuzhiyun 	const struct firmware *fw;
353*4882a593Smuzhiyun 	u8 *data;
354*4882a593Smuzhiyun 	const u8 *segment;
355*4882a593Smuzhiyun 	int ret, size, remain, i = 0;
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 	bt_dev_info(hdev, "QCA Downloading %s", config->fwname);
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	ret = request_firmware(&fw, config->fwname, &hdev->dev);
360*4882a593Smuzhiyun 	if (ret) {
361*4882a593Smuzhiyun 		bt_dev_err(hdev, "QCA Failed to request file: %s (%d)",
362*4882a593Smuzhiyun 			   config->fwname, ret);
363*4882a593Smuzhiyun 		return ret;
364*4882a593Smuzhiyun 	}
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	size = fw->size;
367*4882a593Smuzhiyun 	data = vmalloc(fw->size);
368*4882a593Smuzhiyun 	if (!data) {
369*4882a593Smuzhiyun 		bt_dev_err(hdev, "QCA Failed to allocate memory for file: %s",
370*4882a593Smuzhiyun 			   config->fwname);
371*4882a593Smuzhiyun 		release_firmware(fw);
372*4882a593Smuzhiyun 		return -ENOMEM;
373*4882a593Smuzhiyun 	}
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 	memcpy(data, fw->data, size);
376*4882a593Smuzhiyun 	release_firmware(fw);
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	qca_tlv_check_data(config, data, soc_type);
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	segment = data;
381*4882a593Smuzhiyun 	remain = size;
382*4882a593Smuzhiyun 	while (remain > 0) {
383*4882a593Smuzhiyun 		int segsize = min(MAX_SIZE_PER_TLV_SEGMENT, remain);
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 		bt_dev_dbg(hdev, "Send segment %d, size %d", i++, segsize);
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 		remain -= segsize;
388*4882a593Smuzhiyun 		/* The last segment is always acked regardless download mode */
389*4882a593Smuzhiyun 		if (!remain || segsize < MAX_SIZE_PER_TLV_SEGMENT)
390*4882a593Smuzhiyun 			config->dnld_mode = QCA_SKIP_EVT_NONE;
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 		ret = qca_tlv_send_segment(hdev, segsize, segment,
393*4882a593Smuzhiyun 					   config->dnld_mode, soc_type);
394*4882a593Smuzhiyun 		if (ret)
395*4882a593Smuzhiyun 			goto out;
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 		segment += segsize;
398*4882a593Smuzhiyun 	}
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	/* Latest qualcomm chipsets are not sending a command complete event
401*4882a593Smuzhiyun 	 * for every fw packet sent. They only respond with a vendor specific
402*4882a593Smuzhiyun 	 * event for the last packet. This optimization in the chip will
403*4882a593Smuzhiyun 	 * decrease the BT in initialization time. Here we will inject a command
404*4882a593Smuzhiyun 	 * complete event to avoid a command timeout error message.
405*4882a593Smuzhiyun 	 */
406*4882a593Smuzhiyun 	if (config->dnld_type == QCA_SKIP_EVT_VSE_CC ||
407*4882a593Smuzhiyun 	    config->dnld_type == QCA_SKIP_EVT_VSE)
408*4882a593Smuzhiyun 		ret = qca_inject_cmd_complete_event(hdev);
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun out:
411*4882a593Smuzhiyun 	vfree(data);
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 	return ret;
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun 
qca_disable_soc_logging(struct hci_dev * hdev)416*4882a593Smuzhiyun static int qca_disable_soc_logging(struct hci_dev *hdev)
417*4882a593Smuzhiyun {
418*4882a593Smuzhiyun 	struct sk_buff *skb;
419*4882a593Smuzhiyun 	u8 cmd[2];
420*4882a593Smuzhiyun 	int err;
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 	cmd[0] = QCA_DISABLE_LOGGING_SUB_OP;
423*4882a593Smuzhiyun 	cmd[1] = 0x00;
424*4882a593Smuzhiyun 	skb = __hci_cmd_sync_ev(hdev, QCA_DISABLE_LOGGING, sizeof(cmd), cmd,
425*4882a593Smuzhiyun 				HCI_EV_CMD_COMPLETE, HCI_INIT_TIMEOUT);
426*4882a593Smuzhiyun 	if (IS_ERR(skb)) {
427*4882a593Smuzhiyun 		err = PTR_ERR(skb);
428*4882a593Smuzhiyun 		bt_dev_err(hdev, "QCA Failed to disable soc logging(%d)", err);
429*4882a593Smuzhiyun 		return err;
430*4882a593Smuzhiyun 	}
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	kfree_skb(skb);
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 	return 0;
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun 
qca_set_bdaddr_rome(struct hci_dev * hdev,const bdaddr_t * bdaddr)437*4882a593Smuzhiyun int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr)
438*4882a593Smuzhiyun {
439*4882a593Smuzhiyun 	struct sk_buff *skb;
440*4882a593Smuzhiyun 	u8 cmd[9];
441*4882a593Smuzhiyun 	int err;
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 	cmd[0] = EDL_NVM_ACCESS_SET_REQ_CMD;
444*4882a593Smuzhiyun 	cmd[1] = 0x02; 			/* TAG ID */
445*4882a593Smuzhiyun 	cmd[2] = sizeof(bdaddr_t);	/* size */
446*4882a593Smuzhiyun 	memcpy(cmd + 3, bdaddr, sizeof(bdaddr_t));
447*4882a593Smuzhiyun 	skb = __hci_cmd_sync_ev(hdev, EDL_NVM_ACCESS_OPCODE, sizeof(cmd), cmd,
448*4882a593Smuzhiyun 				HCI_EV_VENDOR, HCI_INIT_TIMEOUT);
449*4882a593Smuzhiyun 	if (IS_ERR(skb)) {
450*4882a593Smuzhiyun 		err = PTR_ERR(skb);
451*4882a593Smuzhiyun 		bt_dev_err(hdev, "QCA Change address command failed (%d)", err);
452*4882a593Smuzhiyun 		return err;
453*4882a593Smuzhiyun 	}
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 	kfree_skb(skb);
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun 	return 0;
458*4882a593Smuzhiyun }
459*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(qca_set_bdaddr_rome);
460*4882a593Smuzhiyun 
qca_uart_setup(struct hci_dev * hdev,uint8_t baudrate,enum qca_btsoc_type soc_type,u32 soc_ver,const char * firmware_name)461*4882a593Smuzhiyun int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
462*4882a593Smuzhiyun 		   enum qca_btsoc_type soc_type, u32 soc_ver,
463*4882a593Smuzhiyun 		   const char *firmware_name)
464*4882a593Smuzhiyun {
465*4882a593Smuzhiyun 	struct qca_fw_config config;
466*4882a593Smuzhiyun 	int err;
467*4882a593Smuzhiyun 	u8 rom_ver = 0;
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 	bt_dev_dbg(hdev, "QCA setup on UART");
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 	config.user_baud_rate = baudrate;
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	/* Download rampatch file */
474*4882a593Smuzhiyun 	config.type = TLV_TYPE_PATCH;
475*4882a593Smuzhiyun 	if (qca_is_wcn399x(soc_type)) {
476*4882a593Smuzhiyun 		/* Firmware files to download are based on ROM version.
477*4882a593Smuzhiyun 		 * ROM version is derived from last two bytes of soc_ver.
478*4882a593Smuzhiyun 		 */
479*4882a593Smuzhiyun 		rom_ver = ((soc_ver & 0x00000f00) >> 0x04) |
480*4882a593Smuzhiyun 			    (soc_ver & 0x0000000f);
481*4882a593Smuzhiyun 		snprintf(config.fwname, sizeof(config.fwname),
482*4882a593Smuzhiyun 			 "qca/crbtfw%02x.tlv", rom_ver);
483*4882a593Smuzhiyun 	} else if (soc_type == QCA_QCA6390) {
484*4882a593Smuzhiyun 		rom_ver = ((soc_ver & 0x00000f00) >> 0x04) |
485*4882a593Smuzhiyun 			    (soc_ver & 0x0000000f);
486*4882a593Smuzhiyun 		snprintf(config.fwname, sizeof(config.fwname),
487*4882a593Smuzhiyun 			 "qca/htbtfw%02x.tlv", rom_ver);
488*4882a593Smuzhiyun 	} else {
489*4882a593Smuzhiyun 		snprintf(config.fwname, sizeof(config.fwname),
490*4882a593Smuzhiyun 			 "qca/rampatch_%08x.bin", soc_ver);
491*4882a593Smuzhiyun 	}
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun 	err = qca_download_firmware(hdev, &config, soc_type);
494*4882a593Smuzhiyun 	if (err < 0) {
495*4882a593Smuzhiyun 		bt_dev_err(hdev, "QCA Failed to download patch (%d)", err);
496*4882a593Smuzhiyun 		return err;
497*4882a593Smuzhiyun 	}
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun 	/* Give the controller some time to get ready to receive the NVM */
500*4882a593Smuzhiyun 	msleep(10);
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun 	/* Download NVM configuration */
503*4882a593Smuzhiyun 	config.type = TLV_TYPE_NVM;
504*4882a593Smuzhiyun 	if (firmware_name)
505*4882a593Smuzhiyun 		snprintf(config.fwname, sizeof(config.fwname),
506*4882a593Smuzhiyun 			 "qca/%s", firmware_name);
507*4882a593Smuzhiyun 	else if (qca_is_wcn399x(soc_type))
508*4882a593Smuzhiyun 		snprintf(config.fwname, sizeof(config.fwname),
509*4882a593Smuzhiyun 			 "qca/crnv%02x.bin", rom_ver);
510*4882a593Smuzhiyun 	else if (soc_type == QCA_QCA6390)
511*4882a593Smuzhiyun 		snprintf(config.fwname, sizeof(config.fwname),
512*4882a593Smuzhiyun 			 "qca/htnv%02x.bin", rom_ver);
513*4882a593Smuzhiyun 	else
514*4882a593Smuzhiyun 		snprintf(config.fwname, sizeof(config.fwname),
515*4882a593Smuzhiyun 			 "qca/nvm_%08x.bin", soc_ver);
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun 	err = qca_download_firmware(hdev, &config, soc_type);
518*4882a593Smuzhiyun 	if (err < 0) {
519*4882a593Smuzhiyun 		bt_dev_err(hdev, "QCA Failed to download NVM (%d)", err);
520*4882a593Smuzhiyun 		return err;
521*4882a593Smuzhiyun 	}
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun 	if (soc_type >= QCA_WCN3991) {
524*4882a593Smuzhiyun 		err = qca_disable_soc_logging(hdev);
525*4882a593Smuzhiyun 		if (err < 0)
526*4882a593Smuzhiyun 			return err;
527*4882a593Smuzhiyun 	}
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun 	/* Perform HCI reset */
530*4882a593Smuzhiyun 	err = qca_send_reset(hdev);
531*4882a593Smuzhiyun 	if (err < 0) {
532*4882a593Smuzhiyun 		bt_dev_err(hdev, "QCA Failed to run HCI_RESET (%d)", err);
533*4882a593Smuzhiyun 		return err;
534*4882a593Smuzhiyun 	}
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun 	bt_dev_info(hdev, "QCA setup on UART is completed");
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	return 0;
539*4882a593Smuzhiyun }
540*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(qca_uart_setup);
541*4882a593Smuzhiyun 
qca_set_bdaddr(struct hci_dev * hdev,const bdaddr_t * bdaddr)542*4882a593Smuzhiyun int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
543*4882a593Smuzhiyun {
544*4882a593Smuzhiyun 	struct sk_buff *skb;
545*4882a593Smuzhiyun 	int err;
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 	skb = __hci_cmd_sync_ev(hdev, EDL_WRITE_BD_ADDR_OPCODE, 6, bdaddr,
548*4882a593Smuzhiyun 				HCI_EV_VENDOR, HCI_INIT_TIMEOUT);
549*4882a593Smuzhiyun 	if (IS_ERR(skb)) {
550*4882a593Smuzhiyun 		err = PTR_ERR(skb);
551*4882a593Smuzhiyun 		bt_dev_err(hdev, "QCA Change address cmd failed (%d)", err);
552*4882a593Smuzhiyun 		return err;
553*4882a593Smuzhiyun 	}
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 	kfree_skb(skb);
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 	return 0;
558*4882a593Smuzhiyun }
559*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(qca_set_bdaddr);
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun MODULE_AUTHOR("Ben Young Tae Kim <ytkim@qca.qualcomm.com>");
563*4882a593Smuzhiyun MODULE_DESCRIPTION("Bluetooth support for Qualcomm Atheros family ver " VERSION);
564*4882a593Smuzhiyun MODULE_VERSION(VERSION);
565*4882a593Smuzhiyun MODULE_LICENSE("GPL");
566