xref: /OK3568_Linux_fs/kernel/drivers/bluetooth/hci_qca.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *  Bluetooth Software UART Qualcomm protocol
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  *  HCI_IBS (HCI In-Band Sleep) is Qualcomm's power management
6*4882a593Smuzhiyun  *  protocol extension to H4.
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  *  Copyright (C) 2007 Texas Instruments, Inc.
9*4882a593Smuzhiyun  *  Copyright (c) 2010, 2012, 2018 The Linux Foundation. All rights reserved.
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  *  Acknowledgements:
12*4882a593Smuzhiyun  *  This file is based on hci_ll.c, which was...
13*4882a593Smuzhiyun  *  Written by Ohad Ben-Cohen <ohad@bencohen.org>
14*4882a593Smuzhiyun  *  which was in turn based on hci_h4.c, which was written
15*4882a593Smuzhiyun  *  by Maxim Krasnyansky and Marcel Holtmann.
16*4882a593Smuzhiyun  */
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #include <linux/kernel.h>
19*4882a593Smuzhiyun #include <linux/clk.h>
20*4882a593Smuzhiyun #include <linux/completion.h>
21*4882a593Smuzhiyun #include <linux/debugfs.h>
22*4882a593Smuzhiyun #include <linux/delay.h>
23*4882a593Smuzhiyun #include <linux/devcoredump.h>
24*4882a593Smuzhiyun #include <linux/device.h>
25*4882a593Smuzhiyun #include <linux/gpio/consumer.h>
26*4882a593Smuzhiyun #include <linux/mod_devicetable.h>
27*4882a593Smuzhiyun #include <linux/module.h>
28*4882a593Smuzhiyun #include <linux/of_device.h>
29*4882a593Smuzhiyun #include <linux/acpi.h>
30*4882a593Smuzhiyun #include <linux/platform_device.h>
31*4882a593Smuzhiyun #include <linux/regulator/consumer.h>
32*4882a593Smuzhiyun #include <linux/serdev.h>
33*4882a593Smuzhiyun #include <linux/mutex.h>
34*4882a593Smuzhiyun #include <asm/unaligned.h>
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun #include <net/bluetooth/bluetooth.h>
37*4882a593Smuzhiyun #include <net/bluetooth/hci_core.h>
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun #include "hci_uart.h"
40*4882a593Smuzhiyun #include "btqca.h"
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun /* HCI_IBS protocol messages */
43*4882a593Smuzhiyun #define HCI_IBS_SLEEP_IND	0xFE
44*4882a593Smuzhiyun #define HCI_IBS_WAKE_IND	0xFD
45*4882a593Smuzhiyun #define HCI_IBS_WAKE_ACK	0xFC
46*4882a593Smuzhiyun #define HCI_MAX_IBS_SIZE	10
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun #define IBS_WAKE_RETRANS_TIMEOUT_MS	100
49*4882a593Smuzhiyun #define IBS_BTSOC_TX_IDLE_TIMEOUT_MS	200
50*4882a593Smuzhiyun #define IBS_HOST_TX_IDLE_TIMEOUT_MS	2000
51*4882a593Smuzhiyun #define CMD_TRANS_TIMEOUT_MS		100
52*4882a593Smuzhiyun #define MEMDUMP_TIMEOUT_MS		8000
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun /* susclk rate */
55*4882a593Smuzhiyun #define SUSCLK_RATE_32KHZ	32768
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun /* Controller debug log header */
58*4882a593Smuzhiyun #define QCA_DEBUG_HANDLE	0x2EDC
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun /* max retry count when init fails */
61*4882a593Smuzhiyun #define MAX_INIT_RETRIES 3
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun /* Controller dump header */
64*4882a593Smuzhiyun #define QCA_SSR_DUMP_HANDLE		0x0108
65*4882a593Smuzhiyun #define QCA_DUMP_PACKET_SIZE		255
66*4882a593Smuzhiyun #define QCA_LAST_SEQUENCE_NUM		0xFFFF
67*4882a593Smuzhiyun #define QCA_CRASHBYTE_PACKET_LEN	1096
68*4882a593Smuzhiyun #define QCA_MEMDUMP_BYTE		0xFB
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun enum qca_flags {
71*4882a593Smuzhiyun 	QCA_IBS_ENABLED,
72*4882a593Smuzhiyun 	QCA_DROP_VENDOR_EVENT,
73*4882a593Smuzhiyun 	QCA_SUSPENDING,
74*4882a593Smuzhiyun 	QCA_MEMDUMP_COLLECTION,
75*4882a593Smuzhiyun 	QCA_HW_ERROR_EVENT,
76*4882a593Smuzhiyun 	QCA_SSR_TRIGGERED
77*4882a593Smuzhiyun };
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun enum qca_capabilities {
80*4882a593Smuzhiyun 	QCA_CAP_WIDEBAND_SPEECH = BIT(0),
81*4882a593Smuzhiyun 	QCA_CAP_VALID_LE_STATES = BIT(1),
82*4882a593Smuzhiyun };
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun /* HCI_IBS transmit side sleep protocol states */
85*4882a593Smuzhiyun enum tx_ibs_states {
86*4882a593Smuzhiyun 	HCI_IBS_TX_ASLEEP,
87*4882a593Smuzhiyun 	HCI_IBS_TX_WAKING,
88*4882a593Smuzhiyun 	HCI_IBS_TX_AWAKE,
89*4882a593Smuzhiyun };
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun /* HCI_IBS receive side sleep protocol states */
92*4882a593Smuzhiyun enum rx_states {
93*4882a593Smuzhiyun 	HCI_IBS_RX_ASLEEP,
94*4882a593Smuzhiyun 	HCI_IBS_RX_AWAKE,
95*4882a593Smuzhiyun };
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun /* HCI_IBS transmit and receive side clock state vote */
98*4882a593Smuzhiyun enum hci_ibs_clock_state_vote {
99*4882a593Smuzhiyun 	HCI_IBS_VOTE_STATS_UPDATE,
100*4882a593Smuzhiyun 	HCI_IBS_TX_VOTE_CLOCK_ON,
101*4882a593Smuzhiyun 	HCI_IBS_TX_VOTE_CLOCK_OFF,
102*4882a593Smuzhiyun 	HCI_IBS_RX_VOTE_CLOCK_ON,
103*4882a593Smuzhiyun 	HCI_IBS_RX_VOTE_CLOCK_OFF,
104*4882a593Smuzhiyun };
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun /* Controller memory dump states */
107*4882a593Smuzhiyun enum qca_memdump_states {
108*4882a593Smuzhiyun 	QCA_MEMDUMP_IDLE,
109*4882a593Smuzhiyun 	QCA_MEMDUMP_COLLECTING,
110*4882a593Smuzhiyun 	QCA_MEMDUMP_COLLECTED,
111*4882a593Smuzhiyun 	QCA_MEMDUMP_TIMEOUT,
112*4882a593Smuzhiyun };
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun struct qca_memdump_data {
115*4882a593Smuzhiyun 	char *memdump_buf_head;
116*4882a593Smuzhiyun 	char *memdump_buf_tail;
117*4882a593Smuzhiyun 	u32 current_seq_no;
118*4882a593Smuzhiyun 	u32 received_dump;
119*4882a593Smuzhiyun 	u32 ram_dump_size;
120*4882a593Smuzhiyun };
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun struct qca_memdump_event_hdr {
123*4882a593Smuzhiyun 	__u8    evt;
124*4882a593Smuzhiyun 	__u8    plen;
125*4882a593Smuzhiyun 	__u16   opcode;
126*4882a593Smuzhiyun 	__u16   seq_no;
127*4882a593Smuzhiyun 	__u8    reserved;
128*4882a593Smuzhiyun } __packed;
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun struct qca_dump_size {
132*4882a593Smuzhiyun 	u32 dump_size;
133*4882a593Smuzhiyun } __packed;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun struct qca_data {
136*4882a593Smuzhiyun 	struct hci_uart *hu;
137*4882a593Smuzhiyun 	struct sk_buff *rx_skb;
138*4882a593Smuzhiyun 	struct sk_buff_head txq;
139*4882a593Smuzhiyun 	struct sk_buff_head tx_wait_q;	/* HCI_IBS wait queue	*/
140*4882a593Smuzhiyun 	struct sk_buff_head rx_memdump_q;	/* Memdump wait queue	*/
141*4882a593Smuzhiyun 	spinlock_t hci_ibs_lock;	/* HCI_IBS state lock	*/
142*4882a593Smuzhiyun 	u8 tx_ibs_state;	/* HCI_IBS transmit side power state*/
143*4882a593Smuzhiyun 	u8 rx_ibs_state;	/* HCI_IBS receive side power state */
144*4882a593Smuzhiyun 	bool tx_vote;		/* Clock must be on for TX */
145*4882a593Smuzhiyun 	bool rx_vote;		/* Clock must be on for RX */
146*4882a593Smuzhiyun 	struct timer_list tx_idle_timer;
147*4882a593Smuzhiyun 	u32 tx_idle_delay;
148*4882a593Smuzhiyun 	struct timer_list wake_retrans_timer;
149*4882a593Smuzhiyun 	u32 wake_retrans;
150*4882a593Smuzhiyun 	struct workqueue_struct *workqueue;
151*4882a593Smuzhiyun 	struct work_struct ws_awake_rx;
152*4882a593Smuzhiyun 	struct work_struct ws_awake_device;
153*4882a593Smuzhiyun 	struct work_struct ws_rx_vote_off;
154*4882a593Smuzhiyun 	struct work_struct ws_tx_vote_off;
155*4882a593Smuzhiyun 	struct work_struct ctrl_memdump_evt;
156*4882a593Smuzhiyun 	struct delayed_work ctrl_memdump_timeout;
157*4882a593Smuzhiyun 	struct qca_memdump_data *qca_memdump;
158*4882a593Smuzhiyun 	unsigned long flags;
159*4882a593Smuzhiyun 	struct completion drop_ev_comp;
160*4882a593Smuzhiyun 	wait_queue_head_t suspend_wait_q;
161*4882a593Smuzhiyun 	enum qca_memdump_states memdump_state;
162*4882a593Smuzhiyun 	struct mutex hci_memdump_lock;
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	/* For debugging purpose */
165*4882a593Smuzhiyun 	u64 ibs_sent_wacks;
166*4882a593Smuzhiyun 	u64 ibs_sent_slps;
167*4882a593Smuzhiyun 	u64 ibs_sent_wakes;
168*4882a593Smuzhiyun 	u64 ibs_recv_wacks;
169*4882a593Smuzhiyun 	u64 ibs_recv_slps;
170*4882a593Smuzhiyun 	u64 ibs_recv_wakes;
171*4882a593Smuzhiyun 	u64 vote_last_jif;
172*4882a593Smuzhiyun 	u32 vote_on_ms;
173*4882a593Smuzhiyun 	u32 vote_off_ms;
174*4882a593Smuzhiyun 	u64 tx_votes_on;
175*4882a593Smuzhiyun 	u64 rx_votes_on;
176*4882a593Smuzhiyun 	u64 tx_votes_off;
177*4882a593Smuzhiyun 	u64 rx_votes_off;
178*4882a593Smuzhiyun 	u64 votes_on;
179*4882a593Smuzhiyun 	u64 votes_off;
180*4882a593Smuzhiyun };
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun enum qca_speed_type {
183*4882a593Smuzhiyun 	QCA_INIT_SPEED = 1,
184*4882a593Smuzhiyun 	QCA_OPER_SPEED
185*4882a593Smuzhiyun };
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun /*
188*4882a593Smuzhiyun  * Voltage regulator information required for configuring the
189*4882a593Smuzhiyun  * QCA Bluetooth chipset
190*4882a593Smuzhiyun  */
191*4882a593Smuzhiyun struct qca_vreg {
192*4882a593Smuzhiyun 	const char *name;
193*4882a593Smuzhiyun 	unsigned int load_uA;
194*4882a593Smuzhiyun };
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun struct qca_device_data {
197*4882a593Smuzhiyun 	enum qca_btsoc_type soc_type;
198*4882a593Smuzhiyun 	struct qca_vreg *vregs;
199*4882a593Smuzhiyun 	size_t num_vregs;
200*4882a593Smuzhiyun 	uint32_t capabilities;
201*4882a593Smuzhiyun };
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun /*
204*4882a593Smuzhiyun  * Platform data for the QCA Bluetooth power driver.
205*4882a593Smuzhiyun  */
206*4882a593Smuzhiyun struct qca_power {
207*4882a593Smuzhiyun 	struct device *dev;
208*4882a593Smuzhiyun 	struct regulator_bulk_data *vreg_bulk;
209*4882a593Smuzhiyun 	int num_vregs;
210*4882a593Smuzhiyun 	bool vregs_on;
211*4882a593Smuzhiyun };
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun struct qca_serdev {
214*4882a593Smuzhiyun 	struct hci_uart	 serdev_hu;
215*4882a593Smuzhiyun 	struct gpio_desc *bt_en;
216*4882a593Smuzhiyun 	struct clk	 *susclk;
217*4882a593Smuzhiyun 	enum qca_btsoc_type btsoc_type;
218*4882a593Smuzhiyun 	struct qca_power *bt_power;
219*4882a593Smuzhiyun 	u32 init_speed;
220*4882a593Smuzhiyun 	u32 oper_speed;
221*4882a593Smuzhiyun 	const char *firmware_name;
222*4882a593Smuzhiyun };
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun static int qca_regulator_enable(struct qca_serdev *qcadev);
225*4882a593Smuzhiyun static void qca_regulator_disable(struct qca_serdev *qcadev);
226*4882a593Smuzhiyun static void qca_power_shutdown(struct hci_uart *hu);
227*4882a593Smuzhiyun static int qca_power_off(struct hci_dev *hdev);
228*4882a593Smuzhiyun static void qca_controller_memdump(struct work_struct *work);
229*4882a593Smuzhiyun 
qca_soc_type(struct hci_uart * hu)230*4882a593Smuzhiyun static enum qca_btsoc_type qca_soc_type(struct hci_uart *hu)
231*4882a593Smuzhiyun {
232*4882a593Smuzhiyun 	enum qca_btsoc_type soc_type;
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	if (hu->serdev) {
235*4882a593Smuzhiyun 		struct qca_serdev *qsd = serdev_device_get_drvdata(hu->serdev);
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 		soc_type = qsd->btsoc_type;
238*4882a593Smuzhiyun 	} else {
239*4882a593Smuzhiyun 		soc_type = QCA_ROME;
240*4882a593Smuzhiyun 	}
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	return soc_type;
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun 
qca_get_firmware_name(struct hci_uart * hu)245*4882a593Smuzhiyun static const char *qca_get_firmware_name(struct hci_uart *hu)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun 	if (hu->serdev) {
248*4882a593Smuzhiyun 		struct qca_serdev *qsd = serdev_device_get_drvdata(hu->serdev);
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 		return qsd->firmware_name;
251*4882a593Smuzhiyun 	} else {
252*4882a593Smuzhiyun 		return NULL;
253*4882a593Smuzhiyun 	}
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun 
__serial_clock_on(struct tty_struct * tty)256*4882a593Smuzhiyun static void __serial_clock_on(struct tty_struct *tty)
257*4882a593Smuzhiyun {
258*4882a593Smuzhiyun 	/* TODO: Some chipset requires to enable UART clock on client
259*4882a593Smuzhiyun 	 * side to save power consumption or manual work is required.
260*4882a593Smuzhiyun 	 * Please put your code to control UART clock here if needed
261*4882a593Smuzhiyun 	 */
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun 
__serial_clock_off(struct tty_struct * tty)264*4882a593Smuzhiyun static void __serial_clock_off(struct tty_struct *tty)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun 	/* TODO: Some chipset requires to disable UART clock on client
267*4882a593Smuzhiyun 	 * side to save power consumption or manual work is required.
268*4882a593Smuzhiyun 	 * Please put your code to control UART clock off here if needed
269*4882a593Smuzhiyun 	 */
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun /* serial_clock_vote needs to be called with the ibs lock held */
serial_clock_vote(unsigned long vote,struct hci_uart * hu)273*4882a593Smuzhiyun static void serial_clock_vote(unsigned long vote, struct hci_uart *hu)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun 	struct qca_data *qca = hu->priv;
276*4882a593Smuzhiyun 	unsigned int diff;
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	bool old_vote = (qca->tx_vote | qca->rx_vote);
279*4882a593Smuzhiyun 	bool new_vote;
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 	switch (vote) {
282*4882a593Smuzhiyun 	case HCI_IBS_VOTE_STATS_UPDATE:
283*4882a593Smuzhiyun 		diff = jiffies_to_msecs(jiffies - qca->vote_last_jif);
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 		if (old_vote)
286*4882a593Smuzhiyun 			qca->vote_off_ms += diff;
287*4882a593Smuzhiyun 		else
288*4882a593Smuzhiyun 			qca->vote_on_ms += diff;
289*4882a593Smuzhiyun 		return;
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	case HCI_IBS_TX_VOTE_CLOCK_ON:
292*4882a593Smuzhiyun 		qca->tx_vote = true;
293*4882a593Smuzhiyun 		qca->tx_votes_on++;
294*4882a593Smuzhiyun 		break;
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	case HCI_IBS_RX_VOTE_CLOCK_ON:
297*4882a593Smuzhiyun 		qca->rx_vote = true;
298*4882a593Smuzhiyun 		qca->rx_votes_on++;
299*4882a593Smuzhiyun 		break;
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	case HCI_IBS_TX_VOTE_CLOCK_OFF:
302*4882a593Smuzhiyun 		qca->tx_vote = false;
303*4882a593Smuzhiyun 		qca->tx_votes_off++;
304*4882a593Smuzhiyun 		break;
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	case HCI_IBS_RX_VOTE_CLOCK_OFF:
307*4882a593Smuzhiyun 		qca->rx_vote = false;
308*4882a593Smuzhiyun 		qca->rx_votes_off++;
309*4882a593Smuzhiyun 		break;
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	default:
312*4882a593Smuzhiyun 		BT_ERR("Voting irregularity");
313*4882a593Smuzhiyun 		return;
314*4882a593Smuzhiyun 	}
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	new_vote = qca->rx_vote | qca->tx_vote;
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	if (new_vote != old_vote) {
319*4882a593Smuzhiyun 		if (new_vote)
320*4882a593Smuzhiyun 			__serial_clock_on(hu->tty);
321*4882a593Smuzhiyun 		else
322*4882a593Smuzhiyun 			__serial_clock_off(hu->tty);
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 		BT_DBG("Vote serial clock %s(%s)", new_vote ? "true" : "false",
325*4882a593Smuzhiyun 		       vote ? "true" : "false");
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 		diff = jiffies_to_msecs(jiffies - qca->vote_last_jif);
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 		if (new_vote) {
330*4882a593Smuzhiyun 			qca->votes_on++;
331*4882a593Smuzhiyun 			qca->vote_off_ms += diff;
332*4882a593Smuzhiyun 		} else {
333*4882a593Smuzhiyun 			qca->votes_off++;
334*4882a593Smuzhiyun 			qca->vote_on_ms += diff;
335*4882a593Smuzhiyun 		}
336*4882a593Smuzhiyun 		qca->vote_last_jif = jiffies;
337*4882a593Smuzhiyun 	}
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun /* Builds and sends an HCI_IBS command packet.
341*4882a593Smuzhiyun  * These are very simple packets with only 1 cmd byte.
342*4882a593Smuzhiyun  */
send_hci_ibs_cmd(u8 cmd,struct hci_uart * hu)343*4882a593Smuzhiyun static int send_hci_ibs_cmd(u8 cmd, struct hci_uart *hu)
344*4882a593Smuzhiyun {
345*4882a593Smuzhiyun 	int err = 0;
346*4882a593Smuzhiyun 	struct sk_buff *skb = NULL;
347*4882a593Smuzhiyun 	struct qca_data *qca = hu->priv;
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 	BT_DBG("hu %p send hci ibs cmd 0x%x", hu, cmd);
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 	skb = bt_skb_alloc(1, GFP_ATOMIC);
352*4882a593Smuzhiyun 	if (!skb) {
353*4882a593Smuzhiyun 		BT_ERR("Failed to allocate memory for HCI_IBS packet");
354*4882a593Smuzhiyun 		return -ENOMEM;
355*4882a593Smuzhiyun 	}
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 	/* Assign HCI_IBS type */
358*4882a593Smuzhiyun 	skb_put_u8(skb, cmd);
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	skb_queue_tail(&qca->txq, skb);
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	return err;
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun 
qca_wq_awake_device(struct work_struct * work)365*4882a593Smuzhiyun static void qca_wq_awake_device(struct work_struct *work)
366*4882a593Smuzhiyun {
367*4882a593Smuzhiyun 	struct qca_data *qca = container_of(work, struct qca_data,
368*4882a593Smuzhiyun 					    ws_awake_device);
369*4882a593Smuzhiyun 	struct hci_uart *hu = qca->hu;
370*4882a593Smuzhiyun 	unsigned long retrans_delay;
371*4882a593Smuzhiyun 	unsigned long flags;
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 	BT_DBG("hu %p wq awake device", hu);
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 	/* Vote for serial clock */
376*4882a593Smuzhiyun 	serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_ON, hu);
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	spin_lock_irqsave(&qca->hci_ibs_lock, flags);
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	/* Send wake indication to device */
381*4882a593Smuzhiyun 	if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0)
382*4882a593Smuzhiyun 		BT_ERR("Failed to send WAKE to device");
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	qca->ibs_sent_wakes++;
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 	/* Start retransmit timer */
387*4882a593Smuzhiyun 	retrans_delay = msecs_to_jiffies(qca->wake_retrans);
388*4882a593Smuzhiyun 	mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay);
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 	spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	/* Actually send the packets */
393*4882a593Smuzhiyun 	hci_uart_tx_wakeup(hu);
394*4882a593Smuzhiyun }
395*4882a593Smuzhiyun 
qca_wq_awake_rx(struct work_struct * work)396*4882a593Smuzhiyun static void qca_wq_awake_rx(struct work_struct *work)
397*4882a593Smuzhiyun {
398*4882a593Smuzhiyun 	struct qca_data *qca = container_of(work, struct qca_data,
399*4882a593Smuzhiyun 					    ws_awake_rx);
400*4882a593Smuzhiyun 	struct hci_uart *hu = qca->hu;
401*4882a593Smuzhiyun 	unsigned long flags;
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	BT_DBG("hu %p wq awake rx", hu);
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun 	serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_ON, hu);
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 	spin_lock_irqsave(&qca->hci_ibs_lock, flags);
408*4882a593Smuzhiyun 	qca->rx_ibs_state = HCI_IBS_RX_AWAKE;
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	/* Always acknowledge device wake up,
411*4882a593Smuzhiyun 	 * sending IBS message doesn't count as TX ON.
412*4882a593Smuzhiyun 	 */
413*4882a593Smuzhiyun 	if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK, hu) < 0)
414*4882a593Smuzhiyun 		BT_ERR("Failed to acknowledge device wake up");
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	qca->ibs_sent_wacks++;
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 	spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun 	/* Actually send the packets */
421*4882a593Smuzhiyun 	hci_uart_tx_wakeup(hu);
422*4882a593Smuzhiyun }
423*4882a593Smuzhiyun 
qca_wq_serial_rx_clock_vote_off(struct work_struct * work)424*4882a593Smuzhiyun static void qca_wq_serial_rx_clock_vote_off(struct work_struct *work)
425*4882a593Smuzhiyun {
426*4882a593Smuzhiyun 	struct qca_data *qca = container_of(work, struct qca_data,
427*4882a593Smuzhiyun 					    ws_rx_vote_off);
428*4882a593Smuzhiyun 	struct hci_uart *hu = qca->hu;
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 	BT_DBG("hu %p rx clock vote off", hu);
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_OFF, hu);
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun 
qca_wq_serial_tx_clock_vote_off(struct work_struct * work)435*4882a593Smuzhiyun static void qca_wq_serial_tx_clock_vote_off(struct work_struct *work)
436*4882a593Smuzhiyun {
437*4882a593Smuzhiyun 	struct qca_data *qca = container_of(work, struct qca_data,
438*4882a593Smuzhiyun 					    ws_tx_vote_off);
439*4882a593Smuzhiyun 	struct hci_uart *hu = qca->hu;
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 	BT_DBG("hu %p tx clock vote off", hu);
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 	/* Run HCI tx handling unlocked */
444*4882a593Smuzhiyun 	hci_uart_tx_wakeup(hu);
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 	/* Now that message queued to tty driver, vote for tty clocks off.
447*4882a593Smuzhiyun 	 * It is up to the tty driver to pend the clocks off until tx done.
448*4882a593Smuzhiyun 	 */
449*4882a593Smuzhiyun 	serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_OFF, hu);
450*4882a593Smuzhiyun }
451*4882a593Smuzhiyun 
hci_ibs_tx_idle_timeout(struct timer_list * t)452*4882a593Smuzhiyun static void hci_ibs_tx_idle_timeout(struct timer_list *t)
453*4882a593Smuzhiyun {
454*4882a593Smuzhiyun 	struct qca_data *qca = from_timer(qca, t, tx_idle_timer);
455*4882a593Smuzhiyun 	struct hci_uart *hu = qca->hu;
456*4882a593Smuzhiyun 	unsigned long flags;
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	BT_DBG("hu %p idle timeout in %d state", hu, qca->tx_ibs_state);
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 	spin_lock_irqsave_nested(&qca->hci_ibs_lock,
461*4882a593Smuzhiyun 				 flags, SINGLE_DEPTH_NESTING);
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 	switch (qca->tx_ibs_state) {
464*4882a593Smuzhiyun 	case HCI_IBS_TX_AWAKE:
465*4882a593Smuzhiyun 		/* TX_IDLE, go to SLEEP */
466*4882a593Smuzhiyun 		if (send_hci_ibs_cmd(HCI_IBS_SLEEP_IND, hu) < 0) {
467*4882a593Smuzhiyun 			BT_ERR("Failed to send SLEEP to device");
468*4882a593Smuzhiyun 			break;
469*4882a593Smuzhiyun 		}
470*4882a593Smuzhiyun 		qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
471*4882a593Smuzhiyun 		qca->ibs_sent_slps++;
472*4882a593Smuzhiyun 		queue_work(qca->workqueue, &qca->ws_tx_vote_off);
473*4882a593Smuzhiyun 		break;
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 	case HCI_IBS_TX_ASLEEP:
476*4882a593Smuzhiyun 	case HCI_IBS_TX_WAKING:
477*4882a593Smuzhiyun 	default:
478*4882a593Smuzhiyun 		BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state);
479*4882a593Smuzhiyun 		break;
480*4882a593Smuzhiyun 	}
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun 	spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
483*4882a593Smuzhiyun }
484*4882a593Smuzhiyun 
hci_ibs_wake_retrans_timeout(struct timer_list * t)485*4882a593Smuzhiyun static void hci_ibs_wake_retrans_timeout(struct timer_list *t)
486*4882a593Smuzhiyun {
487*4882a593Smuzhiyun 	struct qca_data *qca = from_timer(qca, t, wake_retrans_timer);
488*4882a593Smuzhiyun 	struct hci_uart *hu = qca->hu;
489*4882a593Smuzhiyun 	unsigned long flags, retrans_delay;
490*4882a593Smuzhiyun 	bool retransmit = false;
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun 	BT_DBG("hu %p wake retransmit timeout in %d state",
493*4882a593Smuzhiyun 		hu, qca->tx_ibs_state);
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	spin_lock_irqsave_nested(&qca->hci_ibs_lock,
496*4882a593Smuzhiyun 				 flags, SINGLE_DEPTH_NESTING);
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun 	/* Don't retransmit the HCI_IBS_WAKE_IND when suspending. */
499*4882a593Smuzhiyun 	if (test_bit(QCA_SUSPENDING, &qca->flags)) {
500*4882a593Smuzhiyun 		spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
501*4882a593Smuzhiyun 		return;
502*4882a593Smuzhiyun 	}
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun 	switch (qca->tx_ibs_state) {
505*4882a593Smuzhiyun 	case HCI_IBS_TX_WAKING:
506*4882a593Smuzhiyun 		/* No WAKE_ACK, retransmit WAKE */
507*4882a593Smuzhiyun 		retransmit = true;
508*4882a593Smuzhiyun 		if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0) {
509*4882a593Smuzhiyun 			BT_ERR("Failed to acknowledge device wake up");
510*4882a593Smuzhiyun 			break;
511*4882a593Smuzhiyun 		}
512*4882a593Smuzhiyun 		qca->ibs_sent_wakes++;
513*4882a593Smuzhiyun 		retrans_delay = msecs_to_jiffies(qca->wake_retrans);
514*4882a593Smuzhiyun 		mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay);
515*4882a593Smuzhiyun 		break;
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun 	case HCI_IBS_TX_ASLEEP:
518*4882a593Smuzhiyun 	case HCI_IBS_TX_AWAKE:
519*4882a593Smuzhiyun 	default:
520*4882a593Smuzhiyun 		BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state);
521*4882a593Smuzhiyun 		break;
522*4882a593Smuzhiyun 	}
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 	spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 	if (retransmit)
527*4882a593Smuzhiyun 		hci_uart_tx_wakeup(hu);
528*4882a593Smuzhiyun }
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun 
qca_controller_memdump_timeout(struct work_struct * work)531*4882a593Smuzhiyun static void qca_controller_memdump_timeout(struct work_struct *work)
532*4882a593Smuzhiyun {
533*4882a593Smuzhiyun 	struct qca_data *qca = container_of(work, struct qca_data,
534*4882a593Smuzhiyun 					ctrl_memdump_timeout.work);
535*4882a593Smuzhiyun 	struct hci_uart *hu = qca->hu;
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 	mutex_lock(&qca->hci_memdump_lock);
538*4882a593Smuzhiyun 	if (test_bit(QCA_MEMDUMP_COLLECTION, &qca->flags)) {
539*4882a593Smuzhiyun 		qca->memdump_state = QCA_MEMDUMP_TIMEOUT;
540*4882a593Smuzhiyun 		if (!test_bit(QCA_HW_ERROR_EVENT, &qca->flags)) {
541*4882a593Smuzhiyun 			/* Inject hw error event to reset the device
542*4882a593Smuzhiyun 			 * and driver.
543*4882a593Smuzhiyun 			 */
544*4882a593Smuzhiyun 			hci_reset_dev(hu->hdev);
545*4882a593Smuzhiyun 		}
546*4882a593Smuzhiyun 	}
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun 	mutex_unlock(&qca->hci_memdump_lock);
549*4882a593Smuzhiyun }
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun /* Initialize protocol */
qca_open(struct hci_uart * hu)553*4882a593Smuzhiyun static int qca_open(struct hci_uart *hu)
554*4882a593Smuzhiyun {
555*4882a593Smuzhiyun 	struct qca_serdev *qcadev;
556*4882a593Smuzhiyun 	struct qca_data *qca;
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun 	BT_DBG("hu %p qca_open", hu);
559*4882a593Smuzhiyun 
560*4882a593Smuzhiyun 	if (!hci_uart_has_flow_control(hu))
561*4882a593Smuzhiyun 		return -EOPNOTSUPP;
562*4882a593Smuzhiyun 
563*4882a593Smuzhiyun 	qca = kzalloc(sizeof(struct qca_data), GFP_KERNEL);
564*4882a593Smuzhiyun 	if (!qca)
565*4882a593Smuzhiyun 		return -ENOMEM;
566*4882a593Smuzhiyun 
567*4882a593Smuzhiyun 	skb_queue_head_init(&qca->txq);
568*4882a593Smuzhiyun 	skb_queue_head_init(&qca->tx_wait_q);
569*4882a593Smuzhiyun 	skb_queue_head_init(&qca->rx_memdump_q);
570*4882a593Smuzhiyun 	spin_lock_init(&qca->hci_ibs_lock);
571*4882a593Smuzhiyun 	mutex_init(&qca->hci_memdump_lock);
572*4882a593Smuzhiyun 	qca->workqueue = alloc_ordered_workqueue("qca_wq", 0);
573*4882a593Smuzhiyun 	if (!qca->workqueue) {
574*4882a593Smuzhiyun 		BT_ERR("QCA Workqueue not initialized properly");
575*4882a593Smuzhiyun 		kfree(qca);
576*4882a593Smuzhiyun 		return -ENOMEM;
577*4882a593Smuzhiyun 	}
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun 	INIT_WORK(&qca->ws_awake_rx, qca_wq_awake_rx);
580*4882a593Smuzhiyun 	INIT_WORK(&qca->ws_awake_device, qca_wq_awake_device);
581*4882a593Smuzhiyun 	INIT_WORK(&qca->ws_rx_vote_off, qca_wq_serial_rx_clock_vote_off);
582*4882a593Smuzhiyun 	INIT_WORK(&qca->ws_tx_vote_off, qca_wq_serial_tx_clock_vote_off);
583*4882a593Smuzhiyun 	INIT_WORK(&qca->ctrl_memdump_evt, qca_controller_memdump);
584*4882a593Smuzhiyun 	INIT_DELAYED_WORK(&qca->ctrl_memdump_timeout,
585*4882a593Smuzhiyun 			  qca_controller_memdump_timeout);
586*4882a593Smuzhiyun 	init_waitqueue_head(&qca->suspend_wait_q);
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun 	qca->hu = hu;
589*4882a593Smuzhiyun 	init_completion(&qca->drop_ev_comp);
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun 	/* Assume we start with both sides asleep -- extra wakes OK */
592*4882a593Smuzhiyun 	qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
593*4882a593Smuzhiyun 	qca->rx_ibs_state = HCI_IBS_RX_ASLEEP;
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun 	qca->vote_last_jif = jiffies;
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun 	hu->priv = qca;
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 	if (hu->serdev) {
600*4882a593Smuzhiyun 		qcadev = serdev_device_get_drvdata(hu->serdev);
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun 		if (qca_is_wcn399x(qcadev->btsoc_type))
603*4882a593Smuzhiyun 			hu->init_speed = qcadev->init_speed;
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun 		if (qcadev->oper_speed)
606*4882a593Smuzhiyun 			hu->oper_speed = qcadev->oper_speed;
607*4882a593Smuzhiyun 	}
608*4882a593Smuzhiyun 
609*4882a593Smuzhiyun 	timer_setup(&qca->wake_retrans_timer, hci_ibs_wake_retrans_timeout, 0);
610*4882a593Smuzhiyun 	qca->wake_retrans = IBS_WAKE_RETRANS_TIMEOUT_MS;
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun 	timer_setup(&qca->tx_idle_timer, hci_ibs_tx_idle_timeout, 0);
613*4882a593Smuzhiyun 	qca->tx_idle_delay = IBS_HOST_TX_IDLE_TIMEOUT_MS;
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun 	BT_DBG("HCI_UART_QCA open, tx_idle_delay=%u, wake_retrans=%u",
616*4882a593Smuzhiyun 	       qca->tx_idle_delay, qca->wake_retrans);
617*4882a593Smuzhiyun 
618*4882a593Smuzhiyun 	return 0;
619*4882a593Smuzhiyun }
620*4882a593Smuzhiyun 
qca_debugfs_init(struct hci_dev * hdev)621*4882a593Smuzhiyun static void qca_debugfs_init(struct hci_dev *hdev)
622*4882a593Smuzhiyun {
623*4882a593Smuzhiyun 	struct hci_uart *hu = hci_get_drvdata(hdev);
624*4882a593Smuzhiyun 	struct qca_data *qca = hu->priv;
625*4882a593Smuzhiyun 	struct dentry *ibs_dir;
626*4882a593Smuzhiyun 	umode_t mode;
627*4882a593Smuzhiyun 
628*4882a593Smuzhiyun 	if (!hdev->debugfs)
629*4882a593Smuzhiyun 		return;
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun 	ibs_dir = debugfs_create_dir("ibs", hdev->debugfs);
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun 	/* read only */
634*4882a593Smuzhiyun 	mode = S_IRUGO;
635*4882a593Smuzhiyun 	debugfs_create_u8("tx_ibs_state", mode, ibs_dir, &qca->tx_ibs_state);
636*4882a593Smuzhiyun 	debugfs_create_u8("rx_ibs_state", mode, ibs_dir, &qca->rx_ibs_state);
637*4882a593Smuzhiyun 	debugfs_create_u64("ibs_sent_sleeps", mode, ibs_dir,
638*4882a593Smuzhiyun 			   &qca->ibs_sent_slps);
639*4882a593Smuzhiyun 	debugfs_create_u64("ibs_sent_wakes", mode, ibs_dir,
640*4882a593Smuzhiyun 			   &qca->ibs_sent_wakes);
641*4882a593Smuzhiyun 	debugfs_create_u64("ibs_sent_wake_acks", mode, ibs_dir,
642*4882a593Smuzhiyun 			   &qca->ibs_sent_wacks);
643*4882a593Smuzhiyun 	debugfs_create_u64("ibs_recv_sleeps", mode, ibs_dir,
644*4882a593Smuzhiyun 			   &qca->ibs_recv_slps);
645*4882a593Smuzhiyun 	debugfs_create_u64("ibs_recv_wakes", mode, ibs_dir,
646*4882a593Smuzhiyun 			   &qca->ibs_recv_wakes);
647*4882a593Smuzhiyun 	debugfs_create_u64("ibs_recv_wake_acks", mode, ibs_dir,
648*4882a593Smuzhiyun 			   &qca->ibs_recv_wacks);
649*4882a593Smuzhiyun 	debugfs_create_bool("tx_vote", mode, ibs_dir, &qca->tx_vote);
650*4882a593Smuzhiyun 	debugfs_create_u64("tx_votes_on", mode, ibs_dir, &qca->tx_votes_on);
651*4882a593Smuzhiyun 	debugfs_create_u64("tx_votes_off", mode, ibs_dir, &qca->tx_votes_off);
652*4882a593Smuzhiyun 	debugfs_create_bool("rx_vote", mode, ibs_dir, &qca->rx_vote);
653*4882a593Smuzhiyun 	debugfs_create_u64("rx_votes_on", mode, ibs_dir, &qca->rx_votes_on);
654*4882a593Smuzhiyun 	debugfs_create_u64("rx_votes_off", mode, ibs_dir, &qca->rx_votes_off);
655*4882a593Smuzhiyun 	debugfs_create_u64("votes_on", mode, ibs_dir, &qca->votes_on);
656*4882a593Smuzhiyun 	debugfs_create_u64("votes_off", mode, ibs_dir, &qca->votes_off);
657*4882a593Smuzhiyun 	debugfs_create_u32("vote_on_ms", mode, ibs_dir, &qca->vote_on_ms);
658*4882a593Smuzhiyun 	debugfs_create_u32("vote_off_ms", mode, ibs_dir, &qca->vote_off_ms);
659*4882a593Smuzhiyun 
660*4882a593Smuzhiyun 	/* read/write */
661*4882a593Smuzhiyun 	mode = S_IRUGO | S_IWUSR;
662*4882a593Smuzhiyun 	debugfs_create_u32("wake_retrans", mode, ibs_dir, &qca->wake_retrans);
663*4882a593Smuzhiyun 	debugfs_create_u32("tx_idle_delay", mode, ibs_dir,
664*4882a593Smuzhiyun 			   &qca->tx_idle_delay);
665*4882a593Smuzhiyun }
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun /* Flush protocol data */
qca_flush(struct hci_uart * hu)668*4882a593Smuzhiyun static int qca_flush(struct hci_uart *hu)
669*4882a593Smuzhiyun {
670*4882a593Smuzhiyun 	struct qca_data *qca = hu->priv;
671*4882a593Smuzhiyun 
672*4882a593Smuzhiyun 	BT_DBG("hu %p qca flush", hu);
673*4882a593Smuzhiyun 
674*4882a593Smuzhiyun 	skb_queue_purge(&qca->tx_wait_q);
675*4882a593Smuzhiyun 	skb_queue_purge(&qca->txq);
676*4882a593Smuzhiyun 
677*4882a593Smuzhiyun 	return 0;
678*4882a593Smuzhiyun }
679*4882a593Smuzhiyun 
680*4882a593Smuzhiyun /* Close protocol */
qca_close(struct hci_uart * hu)681*4882a593Smuzhiyun static int qca_close(struct hci_uart *hu)
682*4882a593Smuzhiyun {
683*4882a593Smuzhiyun 	struct qca_data *qca = hu->priv;
684*4882a593Smuzhiyun 
685*4882a593Smuzhiyun 	BT_DBG("hu %p qca close", hu);
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun 	serial_clock_vote(HCI_IBS_VOTE_STATS_UPDATE, hu);
688*4882a593Smuzhiyun 
689*4882a593Smuzhiyun 	skb_queue_purge(&qca->tx_wait_q);
690*4882a593Smuzhiyun 	skb_queue_purge(&qca->txq);
691*4882a593Smuzhiyun 	skb_queue_purge(&qca->rx_memdump_q);
692*4882a593Smuzhiyun 	destroy_workqueue(qca->workqueue);
693*4882a593Smuzhiyun 	del_timer_sync(&qca->tx_idle_timer);
694*4882a593Smuzhiyun 	del_timer_sync(&qca->wake_retrans_timer);
695*4882a593Smuzhiyun 	qca->hu = NULL;
696*4882a593Smuzhiyun 
697*4882a593Smuzhiyun 	kfree_skb(qca->rx_skb);
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun 	hu->priv = NULL;
700*4882a593Smuzhiyun 
701*4882a593Smuzhiyun 	kfree(qca);
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun 	return 0;
704*4882a593Smuzhiyun }
705*4882a593Smuzhiyun 
706*4882a593Smuzhiyun /* Called upon a wake-up-indication from the device.
707*4882a593Smuzhiyun  */
device_want_to_wakeup(struct hci_uart * hu)708*4882a593Smuzhiyun static void device_want_to_wakeup(struct hci_uart *hu)
709*4882a593Smuzhiyun {
710*4882a593Smuzhiyun 	unsigned long flags;
711*4882a593Smuzhiyun 	struct qca_data *qca = hu->priv;
712*4882a593Smuzhiyun 
713*4882a593Smuzhiyun 	BT_DBG("hu %p want to wake up", hu);
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun 	spin_lock_irqsave(&qca->hci_ibs_lock, flags);
716*4882a593Smuzhiyun 
717*4882a593Smuzhiyun 	qca->ibs_recv_wakes++;
718*4882a593Smuzhiyun 
719*4882a593Smuzhiyun 	/* Don't wake the rx up when suspending. */
720*4882a593Smuzhiyun 	if (test_bit(QCA_SUSPENDING, &qca->flags)) {
721*4882a593Smuzhiyun 		spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
722*4882a593Smuzhiyun 		return;
723*4882a593Smuzhiyun 	}
724*4882a593Smuzhiyun 
725*4882a593Smuzhiyun 	switch (qca->rx_ibs_state) {
726*4882a593Smuzhiyun 	case HCI_IBS_RX_ASLEEP:
727*4882a593Smuzhiyun 		/* Make sure clock is on - we may have turned clock off since
728*4882a593Smuzhiyun 		 * receiving the wake up indicator awake rx clock.
729*4882a593Smuzhiyun 		 */
730*4882a593Smuzhiyun 		queue_work(qca->workqueue, &qca->ws_awake_rx);
731*4882a593Smuzhiyun 		spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
732*4882a593Smuzhiyun 		return;
733*4882a593Smuzhiyun 
734*4882a593Smuzhiyun 	case HCI_IBS_RX_AWAKE:
735*4882a593Smuzhiyun 		/* Always acknowledge device wake up,
736*4882a593Smuzhiyun 		 * sending IBS message doesn't count as TX ON.
737*4882a593Smuzhiyun 		 */
738*4882a593Smuzhiyun 		if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK, hu) < 0) {
739*4882a593Smuzhiyun 			BT_ERR("Failed to acknowledge device wake up");
740*4882a593Smuzhiyun 			break;
741*4882a593Smuzhiyun 		}
742*4882a593Smuzhiyun 		qca->ibs_sent_wacks++;
743*4882a593Smuzhiyun 		break;
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun 	default:
746*4882a593Smuzhiyun 		/* Any other state is illegal */
747*4882a593Smuzhiyun 		BT_ERR("Received HCI_IBS_WAKE_IND in rx state %d",
748*4882a593Smuzhiyun 		       qca->rx_ibs_state);
749*4882a593Smuzhiyun 		break;
750*4882a593Smuzhiyun 	}
751*4882a593Smuzhiyun 
752*4882a593Smuzhiyun 	spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
753*4882a593Smuzhiyun 
754*4882a593Smuzhiyun 	/* Actually send the packets */
755*4882a593Smuzhiyun 	hci_uart_tx_wakeup(hu);
756*4882a593Smuzhiyun }
757*4882a593Smuzhiyun 
758*4882a593Smuzhiyun /* Called upon a sleep-indication from the device.
759*4882a593Smuzhiyun  */
device_want_to_sleep(struct hci_uart * hu)760*4882a593Smuzhiyun static void device_want_to_sleep(struct hci_uart *hu)
761*4882a593Smuzhiyun {
762*4882a593Smuzhiyun 	unsigned long flags;
763*4882a593Smuzhiyun 	struct qca_data *qca = hu->priv;
764*4882a593Smuzhiyun 
765*4882a593Smuzhiyun 	BT_DBG("hu %p want to sleep in %d state", hu, qca->rx_ibs_state);
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun 	spin_lock_irqsave(&qca->hci_ibs_lock, flags);
768*4882a593Smuzhiyun 
769*4882a593Smuzhiyun 	qca->ibs_recv_slps++;
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun 	switch (qca->rx_ibs_state) {
772*4882a593Smuzhiyun 	case HCI_IBS_RX_AWAKE:
773*4882a593Smuzhiyun 		/* Update state */
774*4882a593Smuzhiyun 		qca->rx_ibs_state = HCI_IBS_RX_ASLEEP;
775*4882a593Smuzhiyun 		/* Vote off rx clock under workqueue */
776*4882a593Smuzhiyun 		queue_work(qca->workqueue, &qca->ws_rx_vote_off);
777*4882a593Smuzhiyun 		break;
778*4882a593Smuzhiyun 
779*4882a593Smuzhiyun 	case HCI_IBS_RX_ASLEEP:
780*4882a593Smuzhiyun 		break;
781*4882a593Smuzhiyun 
782*4882a593Smuzhiyun 	default:
783*4882a593Smuzhiyun 		/* Any other state is illegal */
784*4882a593Smuzhiyun 		BT_ERR("Received HCI_IBS_SLEEP_IND in rx state %d",
785*4882a593Smuzhiyun 		       qca->rx_ibs_state);
786*4882a593Smuzhiyun 		break;
787*4882a593Smuzhiyun 	}
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun 	wake_up_interruptible(&qca->suspend_wait_q);
790*4882a593Smuzhiyun 
791*4882a593Smuzhiyun 	spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
792*4882a593Smuzhiyun }
793*4882a593Smuzhiyun 
794*4882a593Smuzhiyun /* Called upon wake-up-acknowledgement from the device
795*4882a593Smuzhiyun  */
device_woke_up(struct hci_uart * hu)796*4882a593Smuzhiyun static void device_woke_up(struct hci_uart *hu)
797*4882a593Smuzhiyun {
798*4882a593Smuzhiyun 	unsigned long flags, idle_delay;
799*4882a593Smuzhiyun 	struct qca_data *qca = hu->priv;
800*4882a593Smuzhiyun 	struct sk_buff *skb = NULL;
801*4882a593Smuzhiyun 
802*4882a593Smuzhiyun 	BT_DBG("hu %p woke up", hu);
803*4882a593Smuzhiyun 
804*4882a593Smuzhiyun 	spin_lock_irqsave(&qca->hci_ibs_lock, flags);
805*4882a593Smuzhiyun 
806*4882a593Smuzhiyun 	qca->ibs_recv_wacks++;
807*4882a593Smuzhiyun 
808*4882a593Smuzhiyun 	/* Don't react to the wake-up-acknowledgment when suspending. */
809*4882a593Smuzhiyun 	if (test_bit(QCA_SUSPENDING, &qca->flags)) {
810*4882a593Smuzhiyun 		spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
811*4882a593Smuzhiyun 		return;
812*4882a593Smuzhiyun 	}
813*4882a593Smuzhiyun 
814*4882a593Smuzhiyun 	switch (qca->tx_ibs_state) {
815*4882a593Smuzhiyun 	case HCI_IBS_TX_AWAKE:
816*4882a593Smuzhiyun 		/* Expect one if we send 2 WAKEs */
817*4882a593Smuzhiyun 		BT_DBG("Received HCI_IBS_WAKE_ACK in tx state %d",
818*4882a593Smuzhiyun 		       qca->tx_ibs_state);
819*4882a593Smuzhiyun 		break;
820*4882a593Smuzhiyun 
821*4882a593Smuzhiyun 	case HCI_IBS_TX_WAKING:
822*4882a593Smuzhiyun 		/* Send pending packets */
823*4882a593Smuzhiyun 		while ((skb = skb_dequeue(&qca->tx_wait_q)))
824*4882a593Smuzhiyun 			skb_queue_tail(&qca->txq, skb);
825*4882a593Smuzhiyun 
826*4882a593Smuzhiyun 		/* Switch timers and change state to HCI_IBS_TX_AWAKE */
827*4882a593Smuzhiyun 		del_timer(&qca->wake_retrans_timer);
828*4882a593Smuzhiyun 		idle_delay = msecs_to_jiffies(qca->tx_idle_delay);
829*4882a593Smuzhiyun 		mod_timer(&qca->tx_idle_timer, jiffies + idle_delay);
830*4882a593Smuzhiyun 		qca->tx_ibs_state = HCI_IBS_TX_AWAKE;
831*4882a593Smuzhiyun 		break;
832*4882a593Smuzhiyun 
833*4882a593Smuzhiyun 	case HCI_IBS_TX_ASLEEP:
834*4882a593Smuzhiyun 	default:
835*4882a593Smuzhiyun 		BT_ERR("Received HCI_IBS_WAKE_ACK in tx state %d",
836*4882a593Smuzhiyun 		       qca->tx_ibs_state);
837*4882a593Smuzhiyun 		break;
838*4882a593Smuzhiyun 	}
839*4882a593Smuzhiyun 
840*4882a593Smuzhiyun 	spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
841*4882a593Smuzhiyun 
842*4882a593Smuzhiyun 	/* Actually send the packets */
843*4882a593Smuzhiyun 	hci_uart_tx_wakeup(hu);
844*4882a593Smuzhiyun }
845*4882a593Smuzhiyun 
846*4882a593Smuzhiyun /* Enqueue frame for transmittion (padding, crc, etc) may be called from
847*4882a593Smuzhiyun  * two simultaneous tasklets.
848*4882a593Smuzhiyun  */
qca_enqueue(struct hci_uart * hu,struct sk_buff * skb)849*4882a593Smuzhiyun static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb)
850*4882a593Smuzhiyun {
851*4882a593Smuzhiyun 	unsigned long flags = 0, idle_delay;
852*4882a593Smuzhiyun 	struct qca_data *qca = hu->priv;
853*4882a593Smuzhiyun 
854*4882a593Smuzhiyun 	BT_DBG("hu %p qca enq skb %p tx_ibs_state %d", hu, skb,
855*4882a593Smuzhiyun 	       qca->tx_ibs_state);
856*4882a593Smuzhiyun 
857*4882a593Smuzhiyun 	if (test_bit(QCA_SSR_TRIGGERED, &qca->flags)) {
858*4882a593Smuzhiyun 		/* As SSR is in progress, ignore the packets */
859*4882a593Smuzhiyun 		bt_dev_dbg(hu->hdev, "SSR is in progress");
860*4882a593Smuzhiyun 		kfree_skb(skb);
861*4882a593Smuzhiyun 		return 0;
862*4882a593Smuzhiyun 	}
863*4882a593Smuzhiyun 
864*4882a593Smuzhiyun 	/* Prepend skb with frame type */
865*4882a593Smuzhiyun 	memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
866*4882a593Smuzhiyun 
867*4882a593Smuzhiyun 	spin_lock_irqsave(&qca->hci_ibs_lock, flags);
868*4882a593Smuzhiyun 
869*4882a593Smuzhiyun 	/* Don't go to sleep in middle of patch download or
870*4882a593Smuzhiyun 	 * Out-Of-Band(GPIOs control) sleep is selected.
871*4882a593Smuzhiyun 	 * Don't wake the device up when suspending.
872*4882a593Smuzhiyun 	 */
873*4882a593Smuzhiyun 	if (!test_bit(QCA_IBS_ENABLED, &qca->flags) ||
874*4882a593Smuzhiyun 	    test_bit(QCA_SUSPENDING, &qca->flags)) {
875*4882a593Smuzhiyun 		skb_queue_tail(&qca->txq, skb);
876*4882a593Smuzhiyun 		spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
877*4882a593Smuzhiyun 		return 0;
878*4882a593Smuzhiyun 	}
879*4882a593Smuzhiyun 
880*4882a593Smuzhiyun 	/* Act according to current state */
881*4882a593Smuzhiyun 	switch (qca->tx_ibs_state) {
882*4882a593Smuzhiyun 	case HCI_IBS_TX_AWAKE:
883*4882a593Smuzhiyun 		BT_DBG("Device awake, sending normally");
884*4882a593Smuzhiyun 		skb_queue_tail(&qca->txq, skb);
885*4882a593Smuzhiyun 		idle_delay = msecs_to_jiffies(qca->tx_idle_delay);
886*4882a593Smuzhiyun 		mod_timer(&qca->tx_idle_timer, jiffies + idle_delay);
887*4882a593Smuzhiyun 		break;
888*4882a593Smuzhiyun 
889*4882a593Smuzhiyun 	case HCI_IBS_TX_ASLEEP:
890*4882a593Smuzhiyun 		BT_DBG("Device asleep, waking up and queueing packet");
891*4882a593Smuzhiyun 		/* Save packet for later */
892*4882a593Smuzhiyun 		skb_queue_tail(&qca->tx_wait_q, skb);
893*4882a593Smuzhiyun 
894*4882a593Smuzhiyun 		qca->tx_ibs_state = HCI_IBS_TX_WAKING;
895*4882a593Smuzhiyun 		/* Schedule a work queue to wake up device */
896*4882a593Smuzhiyun 		queue_work(qca->workqueue, &qca->ws_awake_device);
897*4882a593Smuzhiyun 		break;
898*4882a593Smuzhiyun 
899*4882a593Smuzhiyun 	case HCI_IBS_TX_WAKING:
900*4882a593Smuzhiyun 		BT_DBG("Device waking up, queueing packet");
901*4882a593Smuzhiyun 		/* Transient state; just keep packet for later */
902*4882a593Smuzhiyun 		skb_queue_tail(&qca->tx_wait_q, skb);
903*4882a593Smuzhiyun 		break;
904*4882a593Smuzhiyun 
905*4882a593Smuzhiyun 	default:
906*4882a593Smuzhiyun 		BT_ERR("Illegal tx state: %d (losing packet)",
907*4882a593Smuzhiyun 		       qca->tx_ibs_state);
908*4882a593Smuzhiyun 		kfree_skb(skb);
909*4882a593Smuzhiyun 		break;
910*4882a593Smuzhiyun 	}
911*4882a593Smuzhiyun 
912*4882a593Smuzhiyun 	spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
913*4882a593Smuzhiyun 
914*4882a593Smuzhiyun 	return 0;
915*4882a593Smuzhiyun }
916*4882a593Smuzhiyun 
qca_ibs_sleep_ind(struct hci_dev * hdev,struct sk_buff * skb)917*4882a593Smuzhiyun static int qca_ibs_sleep_ind(struct hci_dev *hdev, struct sk_buff *skb)
918*4882a593Smuzhiyun {
919*4882a593Smuzhiyun 	struct hci_uart *hu = hci_get_drvdata(hdev);
920*4882a593Smuzhiyun 
921*4882a593Smuzhiyun 	BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_SLEEP_IND);
922*4882a593Smuzhiyun 
923*4882a593Smuzhiyun 	device_want_to_sleep(hu);
924*4882a593Smuzhiyun 
925*4882a593Smuzhiyun 	kfree_skb(skb);
926*4882a593Smuzhiyun 	return 0;
927*4882a593Smuzhiyun }
928*4882a593Smuzhiyun 
qca_ibs_wake_ind(struct hci_dev * hdev,struct sk_buff * skb)929*4882a593Smuzhiyun static int qca_ibs_wake_ind(struct hci_dev *hdev, struct sk_buff *skb)
930*4882a593Smuzhiyun {
931*4882a593Smuzhiyun 	struct hci_uart *hu = hci_get_drvdata(hdev);
932*4882a593Smuzhiyun 
933*4882a593Smuzhiyun 	BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_WAKE_IND);
934*4882a593Smuzhiyun 
935*4882a593Smuzhiyun 	device_want_to_wakeup(hu);
936*4882a593Smuzhiyun 
937*4882a593Smuzhiyun 	kfree_skb(skb);
938*4882a593Smuzhiyun 	return 0;
939*4882a593Smuzhiyun }
940*4882a593Smuzhiyun 
qca_ibs_wake_ack(struct hci_dev * hdev,struct sk_buff * skb)941*4882a593Smuzhiyun static int qca_ibs_wake_ack(struct hci_dev *hdev, struct sk_buff *skb)
942*4882a593Smuzhiyun {
943*4882a593Smuzhiyun 	struct hci_uart *hu = hci_get_drvdata(hdev);
944*4882a593Smuzhiyun 
945*4882a593Smuzhiyun 	BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_WAKE_ACK);
946*4882a593Smuzhiyun 
947*4882a593Smuzhiyun 	device_woke_up(hu);
948*4882a593Smuzhiyun 
949*4882a593Smuzhiyun 	kfree_skb(skb);
950*4882a593Smuzhiyun 	return 0;
951*4882a593Smuzhiyun }
952*4882a593Smuzhiyun 
qca_recv_acl_data(struct hci_dev * hdev,struct sk_buff * skb)953*4882a593Smuzhiyun static int qca_recv_acl_data(struct hci_dev *hdev, struct sk_buff *skb)
954*4882a593Smuzhiyun {
955*4882a593Smuzhiyun 	/* We receive debug logs from chip as an ACL packets.
956*4882a593Smuzhiyun 	 * Instead of sending the data to ACL to decode the
957*4882a593Smuzhiyun 	 * received data, we are pushing them to the above layers
958*4882a593Smuzhiyun 	 * as a diagnostic packet.
959*4882a593Smuzhiyun 	 */
960*4882a593Smuzhiyun 	if (get_unaligned_le16(skb->data) == QCA_DEBUG_HANDLE)
961*4882a593Smuzhiyun 		return hci_recv_diag(hdev, skb);
962*4882a593Smuzhiyun 
963*4882a593Smuzhiyun 	return hci_recv_frame(hdev, skb);
964*4882a593Smuzhiyun }
965*4882a593Smuzhiyun 
qca_controller_memdump(struct work_struct * work)966*4882a593Smuzhiyun static void qca_controller_memdump(struct work_struct *work)
967*4882a593Smuzhiyun {
968*4882a593Smuzhiyun 	struct qca_data *qca = container_of(work, struct qca_data,
969*4882a593Smuzhiyun 					    ctrl_memdump_evt);
970*4882a593Smuzhiyun 	struct hci_uart *hu = qca->hu;
971*4882a593Smuzhiyun 	struct sk_buff *skb;
972*4882a593Smuzhiyun 	struct qca_memdump_event_hdr *cmd_hdr;
973*4882a593Smuzhiyun 	struct qca_memdump_data *qca_memdump = qca->qca_memdump;
974*4882a593Smuzhiyun 	struct qca_dump_size *dump;
975*4882a593Smuzhiyun 	char *memdump_buf;
976*4882a593Smuzhiyun 	char nullBuff[QCA_DUMP_PACKET_SIZE] = { 0 };
977*4882a593Smuzhiyun 	u16 seq_no;
978*4882a593Smuzhiyun 	u32 dump_size;
979*4882a593Smuzhiyun 	u32 rx_size;
980*4882a593Smuzhiyun 	enum qca_btsoc_type soc_type = qca_soc_type(hu);
981*4882a593Smuzhiyun 
982*4882a593Smuzhiyun 	while ((skb = skb_dequeue(&qca->rx_memdump_q))) {
983*4882a593Smuzhiyun 
984*4882a593Smuzhiyun 		mutex_lock(&qca->hci_memdump_lock);
985*4882a593Smuzhiyun 		/* Skip processing the received packets if timeout detected
986*4882a593Smuzhiyun 		 * or memdump collection completed.
987*4882a593Smuzhiyun 		 */
988*4882a593Smuzhiyun 		if (qca->memdump_state == QCA_MEMDUMP_TIMEOUT ||
989*4882a593Smuzhiyun 		    qca->memdump_state == QCA_MEMDUMP_COLLECTED) {
990*4882a593Smuzhiyun 			mutex_unlock(&qca->hci_memdump_lock);
991*4882a593Smuzhiyun 			return;
992*4882a593Smuzhiyun 		}
993*4882a593Smuzhiyun 
994*4882a593Smuzhiyun 		if (!qca_memdump) {
995*4882a593Smuzhiyun 			qca_memdump = kzalloc(sizeof(struct qca_memdump_data),
996*4882a593Smuzhiyun 					      GFP_ATOMIC);
997*4882a593Smuzhiyun 			if (!qca_memdump) {
998*4882a593Smuzhiyun 				mutex_unlock(&qca->hci_memdump_lock);
999*4882a593Smuzhiyun 				return;
1000*4882a593Smuzhiyun 			}
1001*4882a593Smuzhiyun 
1002*4882a593Smuzhiyun 			qca->qca_memdump = qca_memdump;
1003*4882a593Smuzhiyun 		}
1004*4882a593Smuzhiyun 
1005*4882a593Smuzhiyun 		qca->memdump_state = QCA_MEMDUMP_COLLECTING;
1006*4882a593Smuzhiyun 		cmd_hdr = (void *) skb->data;
1007*4882a593Smuzhiyun 		seq_no = __le16_to_cpu(cmd_hdr->seq_no);
1008*4882a593Smuzhiyun 		skb_pull(skb, sizeof(struct qca_memdump_event_hdr));
1009*4882a593Smuzhiyun 
1010*4882a593Smuzhiyun 		if (!seq_no) {
1011*4882a593Smuzhiyun 
1012*4882a593Smuzhiyun 			/* This is the first frame of memdump packet from
1013*4882a593Smuzhiyun 			 * the controller, Disable IBS to recevie dump
1014*4882a593Smuzhiyun 			 * with out any interruption, ideally time required for
1015*4882a593Smuzhiyun 			 * the controller to send the dump is 8 seconds. let us
1016*4882a593Smuzhiyun 			 * start timer to handle this asynchronous activity.
1017*4882a593Smuzhiyun 			 */
1018*4882a593Smuzhiyun 			clear_bit(QCA_IBS_ENABLED, &qca->flags);
1019*4882a593Smuzhiyun 			set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
1020*4882a593Smuzhiyun 			dump = (void *) skb->data;
1021*4882a593Smuzhiyun 			dump_size = __le32_to_cpu(dump->dump_size);
1022*4882a593Smuzhiyun 			if (!(dump_size)) {
1023*4882a593Smuzhiyun 				bt_dev_err(hu->hdev, "Rx invalid memdump size");
1024*4882a593Smuzhiyun 				kfree(qca_memdump);
1025*4882a593Smuzhiyun 				kfree_skb(skb);
1026*4882a593Smuzhiyun 				qca->qca_memdump = NULL;
1027*4882a593Smuzhiyun 				mutex_unlock(&qca->hci_memdump_lock);
1028*4882a593Smuzhiyun 				return;
1029*4882a593Smuzhiyun 			}
1030*4882a593Smuzhiyun 
1031*4882a593Smuzhiyun 			bt_dev_info(hu->hdev, "QCA collecting dump of size:%u",
1032*4882a593Smuzhiyun 				    dump_size);
1033*4882a593Smuzhiyun 			queue_delayed_work(qca->workqueue,
1034*4882a593Smuzhiyun 					   &qca->ctrl_memdump_timeout,
1035*4882a593Smuzhiyun 					   msecs_to_jiffies(MEMDUMP_TIMEOUT_MS)
1036*4882a593Smuzhiyun 					  );
1037*4882a593Smuzhiyun 
1038*4882a593Smuzhiyun 			skb_pull(skb, sizeof(dump_size));
1039*4882a593Smuzhiyun 			memdump_buf = vmalloc(dump_size);
1040*4882a593Smuzhiyun 			qca_memdump->ram_dump_size = dump_size;
1041*4882a593Smuzhiyun 			qca_memdump->memdump_buf_head = memdump_buf;
1042*4882a593Smuzhiyun 			qca_memdump->memdump_buf_tail = memdump_buf;
1043*4882a593Smuzhiyun 		}
1044*4882a593Smuzhiyun 
1045*4882a593Smuzhiyun 		memdump_buf = qca_memdump->memdump_buf_tail;
1046*4882a593Smuzhiyun 
1047*4882a593Smuzhiyun 		/* If sequence no 0 is missed then there is no point in
1048*4882a593Smuzhiyun 		 * accepting the other sequences.
1049*4882a593Smuzhiyun 		 */
1050*4882a593Smuzhiyun 		if (!memdump_buf) {
1051*4882a593Smuzhiyun 			bt_dev_err(hu->hdev, "QCA: Discarding other packets");
1052*4882a593Smuzhiyun 			kfree(qca_memdump);
1053*4882a593Smuzhiyun 			kfree_skb(skb);
1054*4882a593Smuzhiyun 			qca->qca_memdump = NULL;
1055*4882a593Smuzhiyun 			mutex_unlock(&qca->hci_memdump_lock);
1056*4882a593Smuzhiyun 			return;
1057*4882a593Smuzhiyun 		}
1058*4882a593Smuzhiyun 
1059*4882a593Smuzhiyun 		/* There could be chance of missing some packets from
1060*4882a593Smuzhiyun 		 * the controller. In such cases let us store the dummy
1061*4882a593Smuzhiyun 		 * packets in the buffer.
1062*4882a593Smuzhiyun 		 */
1063*4882a593Smuzhiyun 		/* For QCA6390, controller does not lost packets but
1064*4882a593Smuzhiyun 		 * sequence number field of packat sometimes has error
1065*4882a593Smuzhiyun 		 * bits, so skip this checking for missing packet.
1066*4882a593Smuzhiyun 		 */
1067*4882a593Smuzhiyun 		while ((seq_no > qca_memdump->current_seq_no + 1) &&
1068*4882a593Smuzhiyun 		       (soc_type != QCA_QCA6390) &&
1069*4882a593Smuzhiyun 		       seq_no != QCA_LAST_SEQUENCE_NUM) {
1070*4882a593Smuzhiyun 			bt_dev_err(hu->hdev, "QCA controller missed packet:%d",
1071*4882a593Smuzhiyun 				   qca_memdump->current_seq_no);
1072*4882a593Smuzhiyun 			rx_size = qca_memdump->received_dump;
1073*4882a593Smuzhiyun 			rx_size += QCA_DUMP_PACKET_SIZE;
1074*4882a593Smuzhiyun 			if (rx_size > qca_memdump->ram_dump_size) {
1075*4882a593Smuzhiyun 				bt_dev_err(hu->hdev,
1076*4882a593Smuzhiyun 					   "QCA memdump received %d, no space for missed packet",
1077*4882a593Smuzhiyun 					   qca_memdump->received_dump);
1078*4882a593Smuzhiyun 				break;
1079*4882a593Smuzhiyun 			}
1080*4882a593Smuzhiyun 			memcpy(memdump_buf, nullBuff, QCA_DUMP_PACKET_SIZE);
1081*4882a593Smuzhiyun 			memdump_buf = memdump_buf + QCA_DUMP_PACKET_SIZE;
1082*4882a593Smuzhiyun 			qca_memdump->received_dump += QCA_DUMP_PACKET_SIZE;
1083*4882a593Smuzhiyun 			qca_memdump->current_seq_no++;
1084*4882a593Smuzhiyun 		}
1085*4882a593Smuzhiyun 
1086*4882a593Smuzhiyun 		rx_size = qca_memdump->received_dump + skb->len;
1087*4882a593Smuzhiyun 		if (rx_size <= qca_memdump->ram_dump_size) {
1088*4882a593Smuzhiyun 			if ((seq_no != QCA_LAST_SEQUENCE_NUM) &&
1089*4882a593Smuzhiyun 			    (seq_no != qca_memdump->current_seq_no))
1090*4882a593Smuzhiyun 				bt_dev_err(hu->hdev,
1091*4882a593Smuzhiyun 					   "QCA memdump unexpected packet %d",
1092*4882a593Smuzhiyun 					   seq_no);
1093*4882a593Smuzhiyun 			bt_dev_dbg(hu->hdev,
1094*4882a593Smuzhiyun 				   "QCA memdump packet %d with length %d",
1095*4882a593Smuzhiyun 				   seq_no, skb->len);
1096*4882a593Smuzhiyun 			memcpy(memdump_buf, (unsigned char *)skb->data,
1097*4882a593Smuzhiyun 			       skb->len);
1098*4882a593Smuzhiyun 			memdump_buf = memdump_buf + skb->len;
1099*4882a593Smuzhiyun 			qca_memdump->memdump_buf_tail = memdump_buf;
1100*4882a593Smuzhiyun 			qca_memdump->current_seq_no = seq_no + 1;
1101*4882a593Smuzhiyun 			qca_memdump->received_dump += skb->len;
1102*4882a593Smuzhiyun 		} else {
1103*4882a593Smuzhiyun 			bt_dev_err(hu->hdev,
1104*4882a593Smuzhiyun 				   "QCA memdump received %d, no space for packet %d",
1105*4882a593Smuzhiyun 				   qca_memdump->received_dump, seq_no);
1106*4882a593Smuzhiyun 		}
1107*4882a593Smuzhiyun 		qca->qca_memdump = qca_memdump;
1108*4882a593Smuzhiyun 		kfree_skb(skb);
1109*4882a593Smuzhiyun 		if (seq_no == QCA_LAST_SEQUENCE_NUM) {
1110*4882a593Smuzhiyun 			bt_dev_info(hu->hdev,
1111*4882a593Smuzhiyun 				    "QCA memdump Done, received %d, total %d",
1112*4882a593Smuzhiyun 				    qca_memdump->received_dump,
1113*4882a593Smuzhiyun 				    qca_memdump->ram_dump_size);
1114*4882a593Smuzhiyun 			memdump_buf = qca_memdump->memdump_buf_head;
1115*4882a593Smuzhiyun 			dev_coredumpv(&hu->serdev->dev, memdump_buf,
1116*4882a593Smuzhiyun 				      qca_memdump->received_dump, GFP_KERNEL);
1117*4882a593Smuzhiyun 			cancel_delayed_work(&qca->ctrl_memdump_timeout);
1118*4882a593Smuzhiyun 			kfree(qca->qca_memdump);
1119*4882a593Smuzhiyun 			qca->qca_memdump = NULL;
1120*4882a593Smuzhiyun 			qca->memdump_state = QCA_MEMDUMP_COLLECTED;
1121*4882a593Smuzhiyun 			clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
1122*4882a593Smuzhiyun 		}
1123*4882a593Smuzhiyun 
1124*4882a593Smuzhiyun 		mutex_unlock(&qca->hci_memdump_lock);
1125*4882a593Smuzhiyun 	}
1126*4882a593Smuzhiyun 
1127*4882a593Smuzhiyun }
1128*4882a593Smuzhiyun 
qca_controller_memdump_event(struct hci_dev * hdev,struct sk_buff * skb)1129*4882a593Smuzhiyun static int qca_controller_memdump_event(struct hci_dev *hdev,
1130*4882a593Smuzhiyun 					struct sk_buff *skb)
1131*4882a593Smuzhiyun {
1132*4882a593Smuzhiyun 	struct hci_uart *hu = hci_get_drvdata(hdev);
1133*4882a593Smuzhiyun 	struct qca_data *qca = hu->priv;
1134*4882a593Smuzhiyun 
1135*4882a593Smuzhiyun 	set_bit(QCA_SSR_TRIGGERED, &qca->flags);
1136*4882a593Smuzhiyun 	skb_queue_tail(&qca->rx_memdump_q, skb);
1137*4882a593Smuzhiyun 	queue_work(qca->workqueue, &qca->ctrl_memdump_evt);
1138*4882a593Smuzhiyun 
1139*4882a593Smuzhiyun 	return 0;
1140*4882a593Smuzhiyun }
1141*4882a593Smuzhiyun 
qca_recv_event(struct hci_dev * hdev,struct sk_buff * skb)1142*4882a593Smuzhiyun static int qca_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
1143*4882a593Smuzhiyun {
1144*4882a593Smuzhiyun 	struct hci_uart *hu = hci_get_drvdata(hdev);
1145*4882a593Smuzhiyun 	struct qca_data *qca = hu->priv;
1146*4882a593Smuzhiyun 
1147*4882a593Smuzhiyun 	if (test_bit(QCA_DROP_VENDOR_EVENT, &qca->flags)) {
1148*4882a593Smuzhiyun 		struct hci_event_hdr *hdr = (void *)skb->data;
1149*4882a593Smuzhiyun 
1150*4882a593Smuzhiyun 		/* For the WCN3990 the vendor command for a baudrate change
1151*4882a593Smuzhiyun 		 * isn't sent as synchronous HCI command, because the
1152*4882a593Smuzhiyun 		 * controller sends the corresponding vendor event with the
1153*4882a593Smuzhiyun 		 * new baudrate. The event is received and properly decoded
1154*4882a593Smuzhiyun 		 * after changing the baudrate of the host port. It needs to
1155*4882a593Smuzhiyun 		 * be dropped, otherwise it can be misinterpreted as
1156*4882a593Smuzhiyun 		 * response to a later firmware download command (also a
1157*4882a593Smuzhiyun 		 * vendor command).
1158*4882a593Smuzhiyun 		 */
1159*4882a593Smuzhiyun 
1160*4882a593Smuzhiyun 		if (hdr->evt == HCI_EV_VENDOR)
1161*4882a593Smuzhiyun 			complete(&qca->drop_ev_comp);
1162*4882a593Smuzhiyun 
1163*4882a593Smuzhiyun 		kfree_skb(skb);
1164*4882a593Smuzhiyun 
1165*4882a593Smuzhiyun 		return 0;
1166*4882a593Smuzhiyun 	}
1167*4882a593Smuzhiyun 	/* We receive chip memory dump as an event packet, With a dedicated
1168*4882a593Smuzhiyun 	 * handler followed by a hardware error event. When this event is
1169*4882a593Smuzhiyun 	 * received we store dump into a file before closing hci. This
1170*4882a593Smuzhiyun 	 * dump will help in triaging the issues.
1171*4882a593Smuzhiyun 	 */
1172*4882a593Smuzhiyun 	if ((skb->data[0] == HCI_VENDOR_PKT) &&
1173*4882a593Smuzhiyun 	    (get_unaligned_be16(skb->data + 2) == QCA_SSR_DUMP_HANDLE))
1174*4882a593Smuzhiyun 		return qca_controller_memdump_event(hdev, skb);
1175*4882a593Smuzhiyun 
1176*4882a593Smuzhiyun 	return hci_recv_frame(hdev, skb);
1177*4882a593Smuzhiyun }
1178*4882a593Smuzhiyun 
1179*4882a593Smuzhiyun #define QCA_IBS_SLEEP_IND_EVENT \
1180*4882a593Smuzhiyun 	.type = HCI_IBS_SLEEP_IND, \
1181*4882a593Smuzhiyun 	.hlen = 0, \
1182*4882a593Smuzhiyun 	.loff = 0, \
1183*4882a593Smuzhiyun 	.lsize = 0, \
1184*4882a593Smuzhiyun 	.maxlen = HCI_MAX_IBS_SIZE
1185*4882a593Smuzhiyun 
1186*4882a593Smuzhiyun #define QCA_IBS_WAKE_IND_EVENT \
1187*4882a593Smuzhiyun 	.type = HCI_IBS_WAKE_IND, \
1188*4882a593Smuzhiyun 	.hlen = 0, \
1189*4882a593Smuzhiyun 	.loff = 0, \
1190*4882a593Smuzhiyun 	.lsize = 0, \
1191*4882a593Smuzhiyun 	.maxlen = HCI_MAX_IBS_SIZE
1192*4882a593Smuzhiyun 
1193*4882a593Smuzhiyun #define QCA_IBS_WAKE_ACK_EVENT \
1194*4882a593Smuzhiyun 	.type = HCI_IBS_WAKE_ACK, \
1195*4882a593Smuzhiyun 	.hlen = 0, \
1196*4882a593Smuzhiyun 	.loff = 0, \
1197*4882a593Smuzhiyun 	.lsize = 0, \
1198*4882a593Smuzhiyun 	.maxlen = HCI_MAX_IBS_SIZE
1199*4882a593Smuzhiyun 
1200*4882a593Smuzhiyun static const struct h4_recv_pkt qca_recv_pkts[] = {
1201*4882a593Smuzhiyun 	{ H4_RECV_ACL,             .recv = qca_recv_acl_data },
1202*4882a593Smuzhiyun 	{ H4_RECV_SCO,             .recv = hci_recv_frame    },
1203*4882a593Smuzhiyun 	{ H4_RECV_EVENT,           .recv = qca_recv_event    },
1204*4882a593Smuzhiyun 	{ QCA_IBS_WAKE_IND_EVENT,  .recv = qca_ibs_wake_ind  },
1205*4882a593Smuzhiyun 	{ QCA_IBS_WAKE_ACK_EVENT,  .recv = qca_ibs_wake_ack  },
1206*4882a593Smuzhiyun 	{ QCA_IBS_SLEEP_IND_EVENT, .recv = qca_ibs_sleep_ind },
1207*4882a593Smuzhiyun };
1208*4882a593Smuzhiyun 
qca_recv(struct hci_uart * hu,const void * data,int count)1209*4882a593Smuzhiyun static int qca_recv(struct hci_uart *hu, const void *data, int count)
1210*4882a593Smuzhiyun {
1211*4882a593Smuzhiyun 	struct qca_data *qca = hu->priv;
1212*4882a593Smuzhiyun 
1213*4882a593Smuzhiyun 	if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
1214*4882a593Smuzhiyun 		return -EUNATCH;
1215*4882a593Smuzhiyun 
1216*4882a593Smuzhiyun 	qca->rx_skb = h4_recv_buf(hu->hdev, qca->rx_skb, data, count,
1217*4882a593Smuzhiyun 				  qca_recv_pkts, ARRAY_SIZE(qca_recv_pkts));
1218*4882a593Smuzhiyun 	if (IS_ERR(qca->rx_skb)) {
1219*4882a593Smuzhiyun 		int err = PTR_ERR(qca->rx_skb);
1220*4882a593Smuzhiyun 		bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err);
1221*4882a593Smuzhiyun 		qca->rx_skb = NULL;
1222*4882a593Smuzhiyun 		return err;
1223*4882a593Smuzhiyun 	}
1224*4882a593Smuzhiyun 
1225*4882a593Smuzhiyun 	return count;
1226*4882a593Smuzhiyun }
1227*4882a593Smuzhiyun 
qca_dequeue(struct hci_uart * hu)1228*4882a593Smuzhiyun static struct sk_buff *qca_dequeue(struct hci_uart *hu)
1229*4882a593Smuzhiyun {
1230*4882a593Smuzhiyun 	struct qca_data *qca = hu->priv;
1231*4882a593Smuzhiyun 
1232*4882a593Smuzhiyun 	return skb_dequeue(&qca->txq);
1233*4882a593Smuzhiyun }
1234*4882a593Smuzhiyun 
qca_get_baudrate_value(int speed)1235*4882a593Smuzhiyun static uint8_t qca_get_baudrate_value(int speed)
1236*4882a593Smuzhiyun {
1237*4882a593Smuzhiyun 	switch (speed) {
1238*4882a593Smuzhiyun 	case 9600:
1239*4882a593Smuzhiyun 		return QCA_BAUDRATE_9600;
1240*4882a593Smuzhiyun 	case 19200:
1241*4882a593Smuzhiyun 		return QCA_BAUDRATE_19200;
1242*4882a593Smuzhiyun 	case 38400:
1243*4882a593Smuzhiyun 		return QCA_BAUDRATE_38400;
1244*4882a593Smuzhiyun 	case 57600:
1245*4882a593Smuzhiyun 		return QCA_BAUDRATE_57600;
1246*4882a593Smuzhiyun 	case 115200:
1247*4882a593Smuzhiyun 		return QCA_BAUDRATE_115200;
1248*4882a593Smuzhiyun 	case 230400:
1249*4882a593Smuzhiyun 		return QCA_BAUDRATE_230400;
1250*4882a593Smuzhiyun 	case 460800:
1251*4882a593Smuzhiyun 		return QCA_BAUDRATE_460800;
1252*4882a593Smuzhiyun 	case 500000:
1253*4882a593Smuzhiyun 		return QCA_BAUDRATE_500000;
1254*4882a593Smuzhiyun 	case 921600:
1255*4882a593Smuzhiyun 		return QCA_BAUDRATE_921600;
1256*4882a593Smuzhiyun 	case 1000000:
1257*4882a593Smuzhiyun 		return QCA_BAUDRATE_1000000;
1258*4882a593Smuzhiyun 	case 2000000:
1259*4882a593Smuzhiyun 		return QCA_BAUDRATE_2000000;
1260*4882a593Smuzhiyun 	case 3000000:
1261*4882a593Smuzhiyun 		return QCA_BAUDRATE_3000000;
1262*4882a593Smuzhiyun 	case 3200000:
1263*4882a593Smuzhiyun 		return QCA_BAUDRATE_3200000;
1264*4882a593Smuzhiyun 	case 3500000:
1265*4882a593Smuzhiyun 		return QCA_BAUDRATE_3500000;
1266*4882a593Smuzhiyun 	default:
1267*4882a593Smuzhiyun 		return QCA_BAUDRATE_115200;
1268*4882a593Smuzhiyun 	}
1269*4882a593Smuzhiyun }
1270*4882a593Smuzhiyun 
qca_set_baudrate(struct hci_dev * hdev,uint8_t baudrate)1271*4882a593Smuzhiyun static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
1272*4882a593Smuzhiyun {
1273*4882a593Smuzhiyun 	struct hci_uart *hu = hci_get_drvdata(hdev);
1274*4882a593Smuzhiyun 	struct qca_data *qca = hu->priv;
1275*4882a593Smuzhiyun 	struct sk_buff *skb;
1276*4882a593Smuzhiyun 	u8 cmd[] = { 0x01, 0x48, 0xFC, 0x01, 0x00 };
1277*4882a593Smuzhiyun 
1278*4882a593Smuzhiyun 	if (baudrate > QCA_BAUDRATE_3200000)
1279*4882a593Smuzhiyun 		return -EINVAL;
1280*4882a593Smuzhiyun 
1281*4882a593Smuzhiyun 	cmd[4] = baudrate;
1282*4882a593Smuzhiyun 
1283*4882a593Smuzhiyun 	skb = bt_skb_alloc(sizeof(cmd), GFP_KERNEL);
1284*4882a593Smuzhiyun 	if (!skb) {
1285*4882a593Smuzhiyun 		bt_dev_err(hdev, "Failed to allocate baudrate packet");
1286*4882a593Smuzhiyun 		return -ENOMEM;
1287*4882a593Smuzhiyun 	}
1288*4882a593Smuzhiyun 
1289*4882a593Smuzhiyun 	/* Assign commands to change baudrate and packet type. */
1290*4882a593Smuzhiyun 	skb_put_data(skb, cmd, sizeof(cmd));
1291*4882a593Smuzhiyun 	hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
1292*4882a593Smuzhiyun 
1293*4882a593Smuzhiyun 	skb_queue_tail(&qca->txq, skb);
1294*4882a593Smuzhiyun 	hci_uart_tx_wakeup(hu);
1295*4882a593Smuzhiyun 
1296*4882a593Smuzhiyun 	/* Wait for the baudrate change request to be sent */
1297*4882a593Smuzhiyun 
1298*4882a593Smuzhiyun 	while (!skb_queue_empty(&qca->txq))
1299*4882a593Smuzhiyun 		usleep_range(100, 200);
1300*4882a593Smuzhiyun 
1301*4882a593Smuzhiyun 	if (hu->serdev)
1302*4882a593Smuzhiyun 		serdev_device_wait_until_sent(hu->serdev,
1303*4882a593Smuzhiyun 		      msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS));
1304*4882a593Smuzhiyun 
1305*4882a593Smuzhiyun 	/* Give the controller time to process the request */
1306*4882a593Smuzhiyun 	if (qca_is_wcn399x(qca_soc_type(hu)))
1307*4882a593Smuzhiyun 		msleep(10);
1308*4882a593Smuzhiyun 	else
1309*4882a593Smuzhiyun 		msleep(300);
1310*4882a593Smuzhiyun 
1311*4882a593Smuzhiyun 	return 0;
1312*4882a593Smuzhiyun }
1313*4882a593Smuzhiyun 
host_set_baudrate(struct hci_uart * hu,unsigned int speed)1314*4882a593Smuzhiyun static inline void host_set_baudrate(struct hci_uart *hu, unsigned int speed)
1315*4882a593Smuzhiyun {
1316*4882a593Smuzhiyun 	if (hu->serdev)
1317*4882a593Smuzhiyun 		serdev_device_set_baudrate(hu->serdev, speed);
1318*4882a593Smuzhiyun 	else
1319*4882a593Smuzhiyun 		hci_uart_set_baudrate(hu, speed);
1320*4882a593Smuzhiyun }
1321*4882a593Smuzhiyun 
qca_send_power_pulse(struct hci_uart * hu,bool on)1322*4882a593Smuzhiyun static int qca_send_power_pulse(struct hci_uart *hu, bool on)
1323*4882a593Smuzhiyun {
1324*4882a593Smuzhiyun 	int ret;
1325*4882a593Smuzhiyun 	int timeout = msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS);
1326*4882a593Smuzhiyun 	u8 cmd = on ? QCA_WCN3990_POWERON_PULSE : QCA_WCN3990_POWEROFF_PULSE;
1327*4882a593Smuzhiyun 
1328*4882a593Smuzhiyun 	/* These power pulses are single byte command which are sent
1329*4882a593Smuzhiyun 	 * at required baudrate to wcn3990. On wcn3990, we have an external
1330*4882a593Smuzhiyun 	 * circuit at Tx pin which decodes the pulse sent at specific baudrate.
1331*4882a593Smuzhiyun 	 * For example, wcn3990 supports RF COEX antenna for both Wi-Fi/BT
1332*4882a593Smuzhiyun 	 * and also we use the same power inputs to turn on and off for
1333*4882a593Smuzhiyun 	 * Wi-Fi/BT. Powering up the power sources will not enable BT, until
1334*4882a593Smuzhiyun 	 * we send a power on pulse at 115200 bps. This algorithm will help to
1335*4882a593Smuzhiyun 	 * save power. Disabling hardware flow control is mandatory while
1336*4882a593Smuzhiyun 	 * sending power pulses to SoC.
1337*4882a593Smuzhiyun 	 */
1338*4882a593Smuzhiyun 	bt_dev_dbg(hu->hdev, "sending power pulse %02x to controller", cmd);
1339*4882a593Smuzhiyun 
1340*4882a593Smuzhiyun 	serdev_device_write_flush(hu->serdev);
1341*4882a593Smuzhiyun 	hci_uart_set_flow_control(hu, true);
1342*4882a593Smuzhiyun 	ret = serdev_device_write_buf(hu->serdev, &cmd, sizeof(cmd));
1343*4882a593Smuzhiyun 	if (ret < 0) {
1344*4882a593Smuzhiyun 		bt_dev_err(hu->hdev, "failed to send power pulse %02x", cmd);
1345*4882a593Smuzhiyun 		return ret;
1346*4882a593Smuzhiyun 	}
1347*4882a593Smuzhiyun 
1348*4882a593Smuzhiyun 	serdev_device_wait_until_sent(hu->serdev, timeout);
1349*4882a593Smuzhiyun 	hci_uart_set_flow_control(hu, false);
1350*4882a593Smuzhiyun 
1351*4882a593Smuzhiyun 	/* Give to controller time to boot/shutdown */
1352*4882a593Smuzhiyun 	if (on)
1353*4882a593Smuzhiyun 		msleep(100);
1354*4882a593Smuzhiyun 	else
1355*4882a593Smuzhiyun 		msleep(10);
1356*4882a593Smuzhiyun 
1357*4882a593Smuzhiyun 	return 0;
1358*4882a593Smuzhiyun }
1359*4882a593Smuzhiyun 
qca_get_speed(struct hci_uart * hu,enum qca_speed_type speed_type)1360*4882a593Smuzhiyun static unsigned int qca_get_speed(struct hci_uart *hu,
1361*4882a593Smuzhiyun 				  enum qca_speed_type speed_type)
1362*4882a593Smuzhiyun {
1363*4882a593Smuzhiyun 	unsigned int speed = 0;
1364*4882a593Smuzhiyun 
1365*4882a593Smuzhiyun 	if (speed_type == QCA_INIT_SPEED) {
1366*4882a593Smuzhiyun 		if (hu->init_speed)
1367*4882a593Smuzhiyun 			speed = hu->init_speed;
1368*4882a593Smuzhiyun 		else if (hu->proto->init_speed)
1369*4882a593Smuzhiyun 			speed = hu->proto->init_speed;
1370*4882a593Smuzhiyun 	} else {
1371*4882a593Smuzhiyun 		if (hu->oper_speed)
1372*4882a593Smuzhiyun 			speed = hu->oper_speed;
1373*4882a593Smuzhiyun 		else if (hu->proto->oper_speed)
1374*4882a593Smuzhiyun 			speed = hu->proto->oper_speed;
1375*4882a593Smuzhiyun 	}
1376*4882a593Smuzhiyun 
1377*4882a593Smuzhiyun 	return speed;
1378*4882a593Smuzhiyun }
1379*4882a593Smuzhiyun 
qca_check_speeds(struct hci_uart * hu)1380*4882a593Smuzhiyun static int qca_check_speeds(struct hci_uart *hu)
1381*4882a593Smuzhiyun {
1382*4882a593Smuzhiyun 	if (qca_is_wcn399x(qca_soc_type(hu))) {
1383*4882a593Smuzhiyun 		if (!qca_get_speed(hu, QCA_INIT_SPEED) &&
1384*4882a593Smuzhiyun 		    !qca_get_speed(hu, QCA_OPER_SPEED))
1385*4882a593Smuzhiyun 			return -EINVAL;
1386*4882a593Smuzhiyun 	} else {
1387*4882a593Smuzhiyun 		if (!qca_get_speed(hu, QCA_INIT_SPEED) ||
1388*4882a593Smuzhiyun 		    !qca_get_speed(hu, QCA_OPER_SPEED))
1389*4882a593Smuzhiyun 			return -EINVAL;
1390*4882a593Smuzhiyun 	}
1391*4882a593Smuzhiyun 
1392*4882a593Smuzhiyun 	return 0;
1393*4882a593Smuzhiyun }
1394*4882a593Smuzhiyun 
qca_set_speed(struct hci_uart * hu,enum qca_speed_type speed_type)1395*4882a593Smuzhiyun static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type)
1396*4882a593Smuzhiyun {
1397*4882a593Smuzhiyun 	unsigned int speed, qca_baudrate;
1398*4882a593Smuzhiyun 	struct qca_data *qca = hu->priv;
1399*4882a593Smuzhiyun 	int ret = 0;
1400*4882a593Smuzhiyun 
1401*4882a593Smuzhiyun 	if (speed_type == QCA_INIT_SPEED) {
1402*4882a593Smuzhiyun 		speed = qca_get_speed(hu, QCA_INIT_SPEED);
1403*4882a593Smuzhiyun 		if (speed)
1404*4882a593Smuzhiyun 			host_set_baudrate(hu, speed);
1405*4882a593Smuzhiyun 	} else {
1406*4882a593Smuzhiyun 		enum qca_btsoc_type soc_type = qca_soc_type(hu);
1407*4882a593Smuzhiyun 
1408*4882a593Smuzhiyun 		speed = qca_get_speed(hu, QCA_OPER_SPEED);
1409*4882a593Smuzhiyun 		if (!speed)
1410*4882a593Smuzhiyun 			return 0;
1411*4882a593Smuzhiyun 
1412*4882a593Smuzhiyun 		/* Disable flow control for wcn3990 to deassert RTS while
1413*4882a593Smuzhiyun 		 * changing the baudrate of chip and host.
1414*4882a593Smuzhiyun 		 */
1415*4882a593Smuzhiyun 		if (qca_is_wcn399x(soc_type))
1416*4882a593Smuzhiyun 			hci_uart_set_flow_control(hu, true);
1417*4882a593Smuzhiyun 
1418*4882a593Smuzhiyun 		if (soc_type == QCA_WCN3990) {
1419*4882a593Smuzhiyun 			reinit_completion(&qca->drop_ev_comp);
1420*4882a593Smuzhiyun 			set_bit(QCA_DROP_VENDOR_EVENT, &qca->flags);
1421*4882a593Smuzhiyun 		}
1422*4882a593Smuzhiyun 
1423*4882a593Smuzhiyun 		qca_baudrate = qca_get_baudrate_value(speed);
1424*4882a593Smuzhiyun 		bt_dev_dbg(hu->hdev, "Set UART speed to %d", speed);
1425*4882a593Smuzhiyun 		ret = qca_set_baudrate(hu->hdev, qca_baudrate);
1426*4882a593Smuzhiyun 		if (ret)
1427*4882a593Smuzhiyun 			goto error;
1428*4882a593Smuzhiyun 
1429*4882a593Smuzhiyun 		host_set_baudrate(hu, speed);
1430*4882a593Smuzhiyun 
1431*4882a593Smuzhiyun error:
1432*4882a593Smuzhiyun 		if (qca_is_wcn399x(soc_type))
1433*4882a593Smuzhiyun 			hci_uart_set_flow_control(hu, false);
1434*4882a593Smuzhiyun 
1435*4882a593Smuzhiyun 		if (soc_type == QCA_WCN3990) {
1436*4882a593Smuzhiyun 			/* Wait for the controller to send the vendor event
1437*4882a593Smuzhiyun 			 * for the baudrate change command.
1438*4882a593Smuzhiyun 			 */
1439*4882a593Smuzhiyun 			if (!wait_for_completion_timeout(&qca->drop_ev_comp,
1440*4882a593Smuzhiyun 						 msecs_to_jiffies(100))) {
1441*4882a593Smuzhiyun 				bt_dev_err(hu->hdev,
1442*4882a593Smuzhiyun 					   "Failed to change controller baudrate\n");
1443*4882a593Smuzhiyun 				ret = -ETIMEDOUT;
1444*4882a593Smuzhiyun 			}
1445*4882a593Smuzhiyun 
1446*4882a593Smuzhiyun 			clear_bit(QCA_DROP_VENDOR_EVENT, &qca->flags);
1447*4882a593Smuzhiyun 		}
1448*4882a593Smuzhiyun 	}
1449*4882a593Smuzhiyun 
1450*4882a593Smuzhiyun 	return ret;
1451*4882a593Smuzhiyun }
1452*4882a593Smuzhiyun 
qca_send_crashbuffer(struct hci_uart * hu)1453*4882a593Smuzhiyun static int qca_send_crashbuffer(struct hci_uart *hu)
1454*4882a593Smuzhiyun {
1455*4882a593Smuzhiyun 	struct qca_data *qca = hu->priv;
1456*4882a593Smuzhiyun 	struct sk_buff *skb;
1457*4882a593Smuzhiyun 
1458*4882a593Smuzhiyun 	skb = bt_skb_alloc(QCA_CRASHBYTE_PACKET_LEN, GFP_KERNEL);
1459*4882a593Smuzhiyun 	if (!skb) {
1460*4882a593Smuzhiyun 		bt_dev_err(hu->hdev, "Failed to allocate memory for skb packet");
1461*4882a593Smuzhiyun 		return -ENOMEM;
1462*4882a593Smuzhiyun 	}
1463*4882a593Smuzhiyun 
1464*4882a593Smuzhiyun 	/* We forcefully crash the controller, by sending 0xfb byte for
1465*4882a593Smuzhiyun 	 * 1024 times. We also might have chance of losing data, To be
1466*4882a593Smuzhiyun 	 * on safer side we send 1096 bytes to the SoC.
1467*4882a593Smuzhiyun 	 */
1468*4882a593Smuzhiyun 	memset(skb_put(skb, QCA_CRASHBYTE_PACKET_LEN), QCA_MEMDUMP_BYTE,
1469*4882a593Smuzhiyun 	       QCA_CRASHBYTE_PACKET_LEN);
1470*4882a593Smuzhiyun 	hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
1471*4882a593Smuzhiyun 	bt_dev_info(hu->hdev, "crash the soc to collect controller dump");
1472*4882a593Smuzhiyun 	skb_queue_tail(&qca->txq, skb);
1473*4882a593Smuzhiyun 	hci_uart_tx_wakeup(hu);
1474*4882a593Smuzhiyun 
1475*4882a593Smuzhiyun 	return 0;
1476*4882a593Smuzhiyun }
1477*4882a593Smuzhiyun 
qca_wait_for_dump_collection(struct hci_dev * hdev)1478*4882a593Smuzhiyun static void qca_wait_for_dump_collection(struct hci_dev *hdev)
1479*4882a593Smuzhiyun {
1480*4882a593Smuzhiyun 	struct hci_uart *hu = hci_get_drvdata(hdev);
1481*4882a593Smuzhiyun 	struct qca_data *qca = hu->priv;
1482*4882a593Smuzhiyun 
1483*4882a593Smuzhiyun 	wait_on_bit_timeout(&qca->flags, QCA_MEMDUMP_COLLECTION,
1484*4882a593Smuzhiyun 			    TASK_UNINTERRUPTIBLE, MEMDUMP_TIMEOUT_MS);
1485*4882a593Smuzhiyun 
1486*4882a593Smuzhiyun 	clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
1487*4882a593Smuzhiyun }
1488*4882a593Smuzhiyun 
qca_hw_error(struct hci_dev * hdev,u8 code)1489*4882a593Smuzhiyun static void qca_hw_error(struct hci_dev *hdev, u8 code)
1490*4882a593Smuzhiyun {
1491*4882a593Smuzhiyun 	struct hci_uart *hu = hci_get_drvdata(hdev);
1492*4882a593Smuzhiyun 	struct qca_data *qca = hu->priv;
1493*4882a593Smuzhiyun 
1494*4882a593Smuzhiyun 	set_bit(QCA_SSR_TRIGGERED, &qca->flags);
1495*4882a593Smuzhiyun 	set_bit(QCA_HW_ERROR_EVENT, &qca->flags);
1496*4882a593Smuzhiyun 	bt_dev_info(hdev, "mem_dump_status: %d", qca->memdump_state);
1497*4882a593Smuzhiyun 
1498*4882a593Smuzhiyun 	if (qca->memdump_state == QCA_MEMDUMP_IDLE) {
1499*4882a593Smuzhiyun 		/* If hardware error event received for other than QCA
1500*4882a593Smuzhiyun 		 * soc memory dump event, then we need to crash the SOC
1501*4882a593Smuzhiyun 		 * and wait here for 8 seconds to get the dump packets.
1502*4882a593Smuzhiyun 		 * This will block main thread to be on hold until we
1503*4882a593Smuzhiyun 		 * collect dump.
1504*4882a593Smuzhiyun 		 */
1505*4882a593Smuzhiyun 		set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
1506*4882a593Smuzhiyun 		qca_send_crashbuffer(hu);
1507*4882a593Smuzhiyun 		qca_wait_for_dump_collection(hdev);
1508*4882a593Smuzhiyun 	} else if (qca->memdump_state == QCA_MEMDUMP_COLLECTING) {
1509*4882a593Smuzhiyun 		/* Let us wait here until memory dump collected or
1510*4882a593Smuzhiyun 		 * memory dump timer expired.
1511*4882a593Smuzhiyun 		 */
1512*4882a593Smuzhiyun 		bt_dev_info(hdev, "waiting for dump to complete");
1513*4882a593Smuzhiyun 		qca_wait_for_dump_collection(hdev);
1514*4882a593Smuzhiyun 	}
1515*4882a593Smuzhiyun 
1516*4882a593Smuzhiyun 	mutex_lock(&qca->hci_memdump_lock);
1517*4882a593Smuzhiyun 	if (qca->memdump_state != QCA_MEMDUMP_COLLECTED) {
1518*4882a593Smuzhiyun 		bt_dev_err(hu->hdev, "clearing allocated memory due to memdump timeout");
1519*4882a593Smuzhiyun 		if (qca->qca_memdump) {
1520*4882a593Smuzhiyun 			vfree(qca->qca_memdump->memdump_buf_head);
1521*4882a593Smuzhiyun 			kfree(qca->qca_memdump);
1522*4882a593Smuzhiyun 			qca->qca_memdump = NULL;
1523*4882a593Smuzhiyun 		}
1524*4882a593Smuzhiyun 		qca->memdump_state = QCA_MEMDUMP_TIMEOUT;
1525*4882a593Smuzhiyun 		cancel_delayed_work(&qca->ctrl_memdump_timeout);
1526*4882a593Smuzhiyun 	}
1527*4882a593Smuzhiyun 	mutex_unlock(&qca->hci_memdump_lock);
1528*4882a593Smuzhiyun 
1529*4882a593Smuzhiyun 	if (qca->memdump_state == QCA_MEMDUMP_TIMEOUT ||
1530*4882a593Smuzhiyun 	    qca->memdump_state == QCA_MEMDUMP_COLLECTED) {
1531*4882a593Smuzhiyun 		cancel_work_sync(&qca->ctrl_memdump_evt);
1532*4882a593Smuzhiyun 		skb_queue_purge(&qca->rx_memdump_q);
1533*4882a593Smuzhiyun 	}
1534*4882a593Smuzhiyun 
1535*4882a593Smuzhiyun 	clear_bit(QCA_HW_ERROR_EVENT, &qca->flags);
1536*4882a593Smuzhiyun }
1537*4882a593Smuzhiyun 
qca_cmd_timeout(struct hci_dev * hdev)1538*4882a593Smuzhiyun static void qca_cmd_timeout(struct hci_dev *hdev)
1539*4882a593Smuzhiyun {
1540*4882a593Smuzhiyun 	struct hci_uart *hu = hci_get_drvdata(hdev);
1541*4882a593Smuzhiyun 	struct qca_data *qca = hu->priv;
1542*4882a593Smuzhiyun 
1543*4882a593Smuzhiyun 	set_bit(QCA_SSR_TRIGGERED, &qca->flags);
1544*4882a593Smuzhiyun 	if (qca->memdump_state == QCA_MEMDUMP_IDLE) {
1545*4882a593Smuzhiyun 		set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
1546*4882a593Smuzhiyun 		qca_send_crashbuffer(hu);
1547*4882a593Smuzhiyun 		qca_wait_for_dump_collection(hdev);
1548*4882a593Smuzhiyun 	} else if (qca->memdump_state == QCA_MEMDUMP_COLLECTING) {
1549*4882a593Smuzhiyun 		/* Let us wait here until memory dump collected or
1550*4882a593Smuzhiyun 		 * memory dump timer expired.
1551*4882a593Smuzhiyun 		 */
1552*4882a593Smuzhiyun 		bt_dev_info(hdev, "waiting for dump to complete");
1553*4882a593Smuzhiyun 		qca_wait_for_dump_collection(hdev);
1554*4882a593Smuzhiyun 	}
1555*4882a593Smuzhiyun 
1556*4882a593Smuzhiyun 	mutex_lock(&qca->hci_memdump_lock);
1557*4882a593Smuzhiyun 	if (qca->memdump_state != QCA_MEMDUMP_COLLECTED) {
1558*4882a593Smuzhiyun 		qca->memdump_state = QCA_MEMDUMP_TIMEOUT;
1559*4882a593Smuzhiyun 		if (!test_bit(QCA_HW_ERROR_EVENT, &qca->flags)) {
1560*4882a593Smuzhiyun 			/* Inject hw error event to reset the device
1561*4882a593Smuzhiyun 			 * and driver.
1562*4882a593Smuzhiyun 			 */
1563*4882a593Smuzhiyun 			hci_reset_dev(hu->hdev);
1564*4882a593Smuzhiyun 		}
1565*4882a593Smuzhiyun 	}
1566*4882a593Smuzhiyun 	mutex_unlock(&qca->hci_memdump_lock);
1567*4882a593Smuzhiyun }
1568*4882a593Smuzhiyun 
qca_wcn3990_init(struct hci_uart * hu)1569*4882a593Smuzhiyun static int qca_wcn3990_init(struct hci_uart *hu)
1570*4882a593Smuzhiyun {
1571*4882a593Smuzhiyun 	struct qca_serdev *qcadev;
1572*4882a593Smuzhiyun 	int ret;
1573*4882a593Smuzhiyun 
1574*4882a593Smuzhiyun 	/* Check for vregs status, may be hci down has turned
1575*4882a593Smuzhiyun 	 * off the voltage regulator.
1576*4882a593Smuzhiyun 	 */
1577*4882a593Smuzhiyun 	qcadev = serdev_device_get_drvdata(hu->serdev);
1578*4882a593Smuzhiyun 	if (!qcadev->bt_power->vregs_on) {
1579*4882a593Smuzhiyun 		serdev_device_close(hu->serdev);
1580*4882a593Smuzhiyun 		ret = qca_regulator_enable(qcadev);
1581*4882a593Smuzhiyun 		if (ret)
1582*4882a593Smuzhiyun 			return ret;
1583*4882a593Smuzhiyun 
1584*4882a593Smuzhiyun 		ret = serdev_device_open(hu->serdev);
1585*4882a593Smuzhiyun 		if (ret) {
1586*4882a593Smuzhiyun 			bt_dev_err(hu->hdev, "failed to open port");
1587*4882a593Smuzhiyun 			return ret;
1588*4882a593Smuzhiyun 		}
1589*4882a593Smuzhiyun 	}
1590*4882a593Smuzhiyun 
1591*4882a593Smuzhiyun 	/* Forcefully enable wcn3990 to enter in to boot mode. */
1592*4882a593Smuzhiyun 	host_set_baudrate(hu, 2400);
1593*4882a593Smuzhiyun 	ret = qca_send_power_pulse(hu, false);
1594*4882a593Smuzhiyun 	if (ret)
1595*4882a593Smuzhiyun 		return ret;
1596*4882a593Smuzhiyun 
1597*4882a593Smuzhiyun 	qca_set_speed(hu, QCA_INIT_SPEED);
1598*4882a593Smuzhiyun 	ret = qca_send_power_pulse(hu, true);
1599*4882a593Smuzhiyun 	if (ret)
1600*4882a593Smuzhiyun 		return ret;
1601*4882a593Smuzhiyun 
1602*4882a593Smuzhiyun 	/* Now the device is in ready state to communicate with host.
1603*4882a593Smuzhiyun 	 * To sync host with device we need to reopen port.
1604*4882a593Smuzhiyun 	 * Without this, we will have RTS and CTS synchronization
1605*4882a593Smuzhiyun 	 * issues.
1606*4882a593Smuzhiyun 	 */
1607*4882a593Smuzhiyun 	serdev_device_close(hu->serdev);
1608*4882a593Smuzhiyun 	ret = serdev_device_open(hu->serdev);
1609*4882a593Smuzhiyun 	if (ret) {
1610*4882a593Smuzhiyun 		bt_dev_err(hu->hdev, "failed to open port");
1611*4882a593Smuzhiyun 		return ret;
1612*4882a593Smuzhiyun 	}
1613*4882a593Smuzhiyun 
1614*4882a593Smuzhiyun 	hci_uart_set_flow_control(hu, false);
1615*4882a593Smuzhiyun 
1616*4882a593Smuzhiyun 	return 0;
1617*4882a593Smuzhiyun }
1618*4882a593Smuzhiyun 
qca_power_on(struct hci_dev * hdev)1619*4882a593Smuzhiyun static int qca_power_on(struct hci_dev *hdev)
1620*4882a593Smuzhiyun {
1621*4882a593Smuzhiyun 	struct hci_uart *hu = hci_get_drvdata(hdev);
1622*4882a593Smuzhiyun 	enum qca_btsoc_type soc_type = qca_soc_type(hu);
1623*4882a593Smuzhiyun 	struct qca_serdev *qcadev;
1624*4882a593Smuzhiyun 	int ret = 0;
1625*4882a593Smuzhiyun 
1626*4882a593Smuzhiyun 	/* Non-serdev device usually is powered by external power
1627*4882a593Smuzhiyun 	 * and don't need additional action in driver for power on
1628*4882a593Smuzhiyun 	 */
1629*4882a593Smuzhiyun 	if (!hu->serdev)
1630*4882a593Smuzhiyun 		return 0;
1631*4882a593Smuzhiyun 
1632*4882a593Smuzhiyun 	if (qca_is_wcn399x(soc_type)) {
1633*4882a593Smuzhiyun 		ret = qca_wcn3990_init(hu);
1634*4882a593Smuzhiyun 	} else {
1635*4882a593Smuzhiyun 		qcadev = serdev_device_get_drvdata(hu->serdev);
1636*4882a593Smuzhiyun 		if (qcadev->bt_en) {
1637*4882a593Smuzhiyun 			gpiod_set_value_cansleep(qcadev->bt_en, 1);
1638*4882a593Smuzhiyun 			/* Controller needs time to bootup. */
1639*4882a593Smuzhiyun 			msleep(150);
1640*4882a593Smuzhiyun 		}
1641*4882a593Smuzhiyun 	}
1642*4882a593Smuzhiyun 
1643*4882a593Smuzhiyun 	return ret;
1644*4882a593Smuzhiyun }
1645*4882a593Smuzhiyun 
qca_setup(struct hci_uart * hu)1646*4882a593Smuzhiyun static int qca_setup(struct hci_uart *hu)
1647*4882a593Smuzhiyun {
1648*4882a593Smuzhiyun 	struct hci_dev *hdev = hu->hdev;
1649*4882a593Smuzhiyun 	struct qca_data *qca = hu->priv;
1650*4882a593Smuzhiyun 	unsigned int speed, qca_baudrate = QCA_BAUDRATE_115200;
1651*4882a593Smuzhiyun 	unsigned int retries = 0;
1652*4882a593Smuzhiyun 	enum qca_btsoc_type soc_type = qca_soc_type(hu);
1653*4882a593Smuzhiyun 	const char *firmware_name = qca_get_firmware_name(hu);
1654*4882a593Smuzhiyun 	int ret;
1655*4882a593Smuzhiyun 	int soc_ver = 0;
1656*4882a593Smuzhiyun 
1657*4882a593Smuzhiyun 	ret = qca_check_speeds(hu);
1658*4882a593Smuzhiyun 	if (ret)
1659*4882a593Smuzhiyun 		return ret;
1660*4882a593Smuzhiyun 
1661*4882a593Smuzhiyun 	/* Patch downloading has to be done without IBS mode */
1662*4882a593Smuzhiyun 	clear_bit(QCA_IBS_ENABLED, &qca->flags);
1663*4882a593Smuzhiyun 
1664*4882a593Smuzhiyun 	/* Enable controller to do both LE scan and BR/EDR inquiry
1665*4882a593Smuzhiyun 	 * simultaneously.
1666*4882a593Smuzhiyun 	 */
1667*4882a593Smuzhiyun 	set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
1668*4882a593Smuzhiyun 
1669*4882a593Smuzhiyun 	bt_dev_info(hdev, "setting up %s",
1670*4882a593Smuzhiyun 		qca_is_wcn399x(soc_type) ? "wcn399x" : "ROME/QCA6390");
1671*4882a593Smuzhiyun 
1672*4882a593Smuzhiyun 	qca->memdump_state = QCA_MEMDUMP_IDLE;
1673*4882a593Smuzhiyun 
1674*4882a593Smuzhiyun retry:
1675*4882a593Smuzhiyun 	ret = qca_power_on(hdev);
1676*4882a593Smuzhiyun 	if (ret)
1677*4882a593Smuzhiyun 		return ret;
1678*4882a593Smuzhiyun 
1679*4882a593Smuzhiyun 	clear_bit(QCA_SSR_TRIGGERED, &qca->flags);
1680*4882a593Smuzhiyun 
1681*4882a593Smuzhiyun 	if (qca_is_wcn399x(soc_type)) {
1682*4882a593Smuzhiyun 		set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
1683*4882a593Smuzhiyun 
1684*4882a593Smuzhiyun 		ret = qca_read_soc_version(hdev, &soc_ver, soc_type);
1685*4882a593Smuzhiyun 		if (ret)
1686*4882a593Smuzhiyun 			return ret;
1687*4882a593Smuzhiyun 	} else {
1688*4882a593Smuzhiyun 		qca_set_speed(hu, QCA_INIT_SPEED);
1689*4882a593Smuzhiyun 	}
1690*4882a593Smuzhiyun 
1691*4882a593Smuzhiyun 	/* Setup user speed if needed */
1692*4882a593Smuzhiyun 	speed = qca_get_speed(hu, QCA_OPER_SPEED);
1693*4882a593Smuzhiyun 	if (speed) {
1694*4882a593Smuzhiyun 		ret = qca_set_speed(hu, QCA_OPER_SPEED);
1695*4882a593Smuzhiyun 		if (ret)
1696*4882a593Smuzhiyun 			return ret;
1697*4882a593Smuzhiyun 
1698*4882a593Smuzhiyun 		qca_baudrate = qca_get_baudrate_value(speed);
1699*4882a593Smuzhiyun 	}
1700*4882a593Smuzhiyun 
1701*4882a593Smuzhiyun 	if (!qca_is_wcn399x(soc_type)) {
1702*4882a593Smuzhiyun 		/* Get QCA version information */
1703*4882a593Smuzhiyun 		ret = qca_read_soc_version(hdev, &soc_ver, soc_type);
1704*4882a593Smuzhiyun 		if (ret)
1705*4882a593Smuzhiyun 			return ret;
1706*4882a593Smuzhiyun 	}
1707*4882a593Smuzhiyun 
1708*4882a593Smuzhiyun 	bt_dev_info(hdev, "QCA controller version 0x%08x", soc_ver);
1709*4882a593Smuzhiyun 	/* Setup patch / NVM configurations */
1710*4882a593Smuzhiyun 	ret = qca_uart_setup(hdev, qca_baudrate, soc_type, soc_ver,
1711*4882a593Smuzhiyun 			firmware_name);
1712*4882a593Smuzhiyun 	if (!ret) {
1713*4882a593Smuzhiyun 		set_bit(QCA_IBS_ENABLED, &qca->flags);
1714*4882a593Smuzhiyun 		qca_debugfs_init(hdev);
1715*4882a593Smuzhiyun 		hu->hdev->hw_error = qca_hw_error;
1716*4882a593Smuzhiyun 		hu->hdev->cmd_timeout = qca_cmd_timeout;
1717*4882a593Smuzhiyun 	} else if (ret == -ENOENT) {
1718*4882a593Smuzhiyun 		/* No patch/nvm-config found, run with original fw/config */
1719*4882a593Smuzhiyun 		ret = 0;
1720*4882a593Smuzhiyun 	} else if (ret == -EAGAIN) {
1721*4882a593Smuzhiyun 		/*
1722*4882a593Smuzhiyun 		 * Userspace firmware loader will return -EAGAIN in case no
1723*4882a593Smuzhiyun 		 * patch/nvm-config is found, so run with original fw/config.
1724*4882a593Smuzhiyun 		 */
1725*4882a593Smuzhiyun 		ret = 0;
1726*4882a593Smuzhiyun 	} else {
1727*4882a593Smuzhiyun 		if (retries < MAX_INIT_RETRIES) {
1728*4882a593Smuzhiyun 			qca_power_shutdown(hu);
1729*4882a593Smuzhiyun 			if (hu->serdev) {
1730*4882a593Smuzhiyun 				serdev_device_close(hu->serdev);
1731*4882a593Smuzhiyun 				ret = serdev_device_open(hu->serdev);
1732*4882a593Smuzhiyun 				if (ret) {
1733*4882a593Smuzhiyun 					bt_dev_err(hdev, "failed to open port");
1734*4882a593Smuzhiyun 					return ret;
1735*4882a593Smuzhiyun 				}
1736*4882a593Smuzhiyun 			}
1737*4882a593Smuzhiyun 			retries++;
1738*4882a593Smuzhiyun 			goto retry;
1739*4882a593Smuzhiyun 		}
1740*4882a593Smuzhiyun 	}
1741*4882a593Smuzhiyun 
1742*4882a593Smuzhiyun 	/* Setup bdaddr */
1743*4882a593Smuzhiyun 	if (soc_type == QCA_ROME)
1744*4882a593Smuzhiyun 		hu->hdev->set_bdaddr = qca_set_bdaddr_rome;
1745*4882a593Smuzhiyun 	else
1746*4882a593Smuzhiyun 		hu->hdev->set_bdaddr = qca_set_bdaddr;
1747*4882a593Smuzhiyun 
1748*4882a593Smuzhiyun 	return ret;
1749*4882a593Smuzhiyun }
1750*4882a593Smuzhiyun 
1751*4882a593Smuzhiyun static const struct hci_uart_proto qca_proto = {
1752*4882a593Smuzhiyun 	.id		= HCI_UART_QCA,
1753*4882a593Smuzhiyun 	.name		= "QCA",
1754*4882a593Smuzhiyun 	.manufacturer	= 29,
1755*4882a593Smuzhiyun 	.init_speed	= 115200,
1756*4882a593Smuzhiyun 	.oper_speed	= 3000000,
1757*4882a593Smuzhiyun 	.open		= qca_open,
1758*4882a593Smuzhiyun 	.close		= qca_close,
1759*4882a593Smuzhiyun 	.flush		= qca_flush,
1760*4882a593Smuzhiyun 	.setup		= qca_setup,
1761*4882a593Smuzhiyun 	.recv		= qca_recv,
1762*4882a593Smuzhiyun 	.enqueue	= qca_enqueue,
1763*4882a593Smuzhiyun 	.dequeue	= qca_dequeue,
1764*4882a593Smuzhiyun };
1765*4882a593Smuzhiyun 
1766*4882a593Smuzhiyun static const struct qca_device_data qca_soc_data_wcn3990 = {
1767*4882a593Smuzhiyun 	.soc_type = QCA_WCN3990,
1768*4882a593Smuzhiyun 	.vregs = (struct qca_vreg []) {
1769*4882a593Smuzhiyun 		{ "vddio", 15000  },
1770*4882a593Smuzhiyun 		{ "vddxo", 80000  },
1771*4882a593Smuzhiyun 		{ "vddrf", 300000 },
1772*4882a593Smuzhiyun 		{ "vddch0", 450000 },
1773*4882a593Smuzhiyun 	},
1774*4882a593Smuzhiyun 	.num_vregs = 4,
1775*4882a593Smuzhiyun };
1776*4882a593Smuzhiyun 
1777*4882a593Smuzhiyun static const struct qca_device_data qca_soc_data_wcn3991 = {
1778*4882a593Smuzhiyun 	.soc_type = QCA_WCN3991,
1779*4882a593Smuzhiyun 	.vregs = (struct qca_vreg []) {
1780*4882a593Smuzhiyun 		{ "vddio", 15000  },
1781*4882a593Smuzhiyun 		{ "vddxo", 80000  },
1782*4882a593Smuzhiyun 		{ "vddrf", 300000 },
1783*4882a593Smuzhiyun 		{ "vddch0", 450000 },
1784*4882a593Smuzhiyun 	},
1785*4882a593Smuzhiyun 	.num_vregs = 4,
1786*4882a593Smuzhiyun 	.capabilities = QCA_CAP_WIDEBAND_SPEECH | QCA_CAP_VALID_LE_STATES,
1787*4882a593Smuzhiyun };
1788*4882a593Smuzhiyun 
1789*4882a593Smuzhiyun static const struct qca_device_data qca_soc_data_wcn3998 = {
1790*4882a593Smuzhiyun 	.soc_type = QCA_WCN3998,
1791*4882a593Smuzhiyun 	.vregs = (struct qca_vreg []) {
1792*4882a593Smuzhiyun 		{ "vddio", 10000  },
1793*4882a593Smuzhiyun 		{ "vddxo", 80000  },
1794*4882a593Smuzhiyun 		{ "vddrf", 300000 },
1795*4882a593Smuzhiyun 		{ "vddch0", 450000 },
1796*4882a593Smuzhiyun 	},
1797*4882a593Smuzhiyun 	.num_vregs = 4,
1798*4882a593Smuzhiyun };
1799*4882a593Smuzhiyun 
1800*4882a593Smuzhiyun static const struct qca_device_data qca_soc_data_qca6390 = {
1801*4882a593Smuzhiyun 	.soc_type = QCA_QCA6390,
1802*4882a593Smuzhiyun 	.num_vregs = 0,
1803*4882a593Smuzhiyun };
1804*4882a593Smuzhiyun 
qca_power_shutdown(struct hci_uart * hu)1805*4882a593Smuzhiyun static void qca_power_shutdown(struct hci_uart *hu)
1806*4882a593Smuzhiyun {
1807*4882a593Smuzhiyun 	struct qca_serdev *qcadev;
1808*4882a593Smuzhiyun 	struct qca_data *qca = hu->priv;
1809*4882a593Smuzhiyun 	unsigned long flags;
1810*4882a593Smuzhiyun 	enum qca_btsoc_type soc_type = qca_soc_type(hu);
1811*4882a593Smuzhiyun 
1812*4882a593Smuzhiyun 	/* From this point we go into power off state. But serial port is
1813*4882a593Smuzhiyun 	 * still open, stop queueing the IBS data and flush all the buffered
1814*4882a593Smuzhiyun 	 * data in skb's.
1815*4882a593Smuzhiyun 	 */
1816*4882a593Smuzhiyun 	spin_lock_irqsave(&qca->hci_ibs_lock, flags);
1817*4882a593Smuzhiyun 	clear_bit(QCA_IBS_ENABLED, &qca->flags);
1818*4882a593Smuzhiyun 	qca_flush(hu);
1819*4882a593Smuzhiyun 	spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
1820*4882a593Smuzhiyun 
1821*4882a593Smuzhiyun 	/* Non-serdev device usually is powered by external power
1822*4882a593Smuzhiyun 	 * and don't need additional action in driver for power down
1823*4882a593Smuzhiyun 	 */
1824*4882a593Smuzhiyun 	if (!hu->serdev)
1825*4882a593Smuzhiyun 		return;
1826*4882a593Smuzhiyun 
1827*4882a593Smuzhiyun 	qcadev = serdev_device_get_drvdata(hu->serdev);
1828*4882a593Smuzhiyun 
1829*4882a593Smuzhiyun 	if (qca_is_wcn399x(soc_type)) {
1830*4882a593Smuzhiyun 		host_set_baudrate(hu, 2400);
1831*4882a593Smuzhiyun 		qca_send_power_pulse(hu, false);
1832*4882a593Smuzhiyun 		qca_regulator_disable(qcadev);
1833*4882a593Smuzhiyun 	} else if (qcadev->bt_en) {
1834*4882a593Smuzhiyun 		gpiod_set_value_cansleep(qcadev->bt_en, 0);
1835*4882a593Smuzhiyun 	}
1836*4882a593Smuzhiyun }
1837*4882a593Smuzhiyun 
qca_power_off(struct hci_dev * hdev)1838*4882a593Smuzhiyun static int qca_power_off(struct hci_dev *hdev)
1839*4882a593Smuzhiyun {
1840*4882a593Smuzhiyun 	struct hci_uart *hu = hci_get_drvdata(hdev);
1841*4882a593Smuzhiyun 	struct qca_data *qca = hu->priv;
1842*4882a593Smuzhiyun 	enum qca_btsoc_type soc_type = qca_soc_type(hu);
1843*4882a593Smuzhiyun 
1844*4882a593Smuzhiyun 	hu->hdev->hw_error = NULL;
1845*4882a593Smuzhiyun 	hu->hdev->cmd_timeout = NULL;
1846*4882a593Smuzhiyun 
1847*4882a593Smuzhiyun 	del_timer_sync(&qca->wake_retrans_timer);
1848*4882a593Smuzhiyun 	del_timer_sync(&qca->tx_idle_timer);
1849*4882a593Smuzhiyun 
1850*4882a593Smuzhiyun 	/* Stop sending shutdown command if soc crashes. */
1851*4882a593Smuzhiyun 	if (soc_type != QCA_ROME
1852*4882a593Smuzhiyun 		&& qca->memdump_state == QCA_MEMDUMP_IDLE) {
1853*4882a593Smuzhiyun 		qca_send_pre_shutdown_cmd(hdev);
1854*4882a593Smuzhiyun 		usleep_range(8000, 10000);
1855*4882a593Smuzhiyun 	}
1856*4882a593Smuzhiyun 
1857*4882a593Smuzhiyun 	qca_power_shutdown(hu);
1858*4882a593Smuzhiyun 	return 0;
1859*4882a593Smuzhiyun }
1860*4882a593Smuzhiyun 
qca_regulator_enable(struct qca_serdev * qcadev)1861*4882a593Smuzhiyun static int qca_regulator_enable(struct qca_serdev *qcadev)
1862*4882a593Smuzhiyun {
1863*4882a593Smuzhiyun 	struct qca_power *power = qcadev->bt_power;
1864*4882a593Smuzhiyun 	int ret;
1865*4882a593Smuzhiyun 
1866*4882a593Smuzhiyun 	/* Already enabled */
1867*4882a593Smuzhiyun 	if (power->vregs_on)
1868*4882a593Smuzhiyun 		return 0;
1869*4882a593Smuzhiyun 
1870*4882a593Smuzhiyun 	BT_DBG("enabling %d regulators)", power->num_vregs);
1871*4882a593Smuzhiyun 
1872*4882a593Smuzhiyun 	ret = regulator_bulk_enable(power->num_vregs, power->vreg_bulk);
1873*4882a593Smuzhiyun 	if (ret)
1874*4882a593Smuzhiyun 		return ret;
1875*4882a593Smuzhiyun 
1876*4882a593Smuzhiyun 	power->vregs_on = true;
1877*4882a593Smuzhiyun 
1878*4882a593Smuzhiyun 	ret = clk_prepare_enable(qcadev->susclk);
1879*4882a593Smuzhiyun 	if (ret)
1880*4882a593Smuzhiyun 		qca_regulator_disable(qcadev);
1881*4882a593Smuzhiyun 
1882*4882a593Smuzhiyun 	return ret;
1883*4882a593Smuzhiyun }
1884*4882a593Smuzhiyun 
qca_regulator_disable(struct qca_serdev * qcadev)1885*4882a593Smuzhiyun static void qca_regulator_disable(struct qca_serdev *qcadev)
1886*4882a593Smuzhiyun {
1887*4882a593Smuzhiyun 	struct qca_power *power;
1888*4882a593Smuzhiyun 
1889*4882a593Smuzhiyun 	if (!qcadev)
1890*4882a593Smuzhiyun 		return;
1891*4882a593Smuzhiyun 
1892*4882a593Smuzhiyun 	power = qcadev->bt_power;
1893*4882a593Smuzhiyun 
1894*4882a593Smuzhiyun 	/* Already disabled? */
1895*4882a593Smuzhiyun 	if (!power->vregs_on)
1896*4882a593Smuzhiyun 		return;
1897*4882a593Smuzhiyun 
1898*4882a593Smuzhiyun 	regulator_bulk_disable(power->num_vregs, power->vreg_bulk);
1899*4882a593Smuzhiyun 	power->vregs_on = false;
1900*4882a593Smuzhiyun 
1901*4882a593Smuzhiyun 	clk_disable_unprepare(qcadev->susclk);
1902*4882a593Smuzhiyun }
1903*4882a593Smuzhiyun 
qca_init_regulators(struct qca_power * qca,const struct qca_vreg * vregs,size_t num_vregs)1904*4882a593Smuzhiyun static int qca_init_regulators(struct qca_power *qca,
1905*4882a593Smuzhiyun 				const struct qca_vreg *vregs, size_t num_vregs)
1906*4882a593Smuzhiyun {
1907*4882a593Smuzhiyun 	struct regulator_bulk_data *bulk;
1908*4882a593Smuzhiyun 	int ret;
1909*4882a593Smuzhiyun 	int i;
1910*4882a593Smuzhiyun 
1911*4882a593Smuzhiyun 	bulk = devm_kcalloc(qca->dev, num_vregs, sizeof(*bulk), GFP_KERNEL);
1912*4882a593Smuzhiyun 	if (!bulk)
1913*4882a593Smuzhiyun 		return -ENOMEM;
1914*4882a593Smuzhiyun 
1915*4882a593Smuzhiyun 	for (i = 0; i < num_vregs; i++)
1916*4882a593Smuzhiyun 		bulk[i].supply = vregs[i].name;
1917*4882a593Smuzhiyun 
1918*4882a593Smuzhiyun 	ret = devm_regulator_bulk_get(qca->dev, num_vregs, bulk);
1919*4882a593Smuzhiyun 	if (ret < 0)
1920*4882a593Smuzhiyun 		return ret;
1921*4882a593Smuzhiyun 
1922*4882a593Smuzhiyun 	for (i = 0; i < num_vregs; i++) {
1923*4882a593Smuzhiyun 		ret = regulator_set_load(bulk[i].consumer, vregs[i].load_uA);
1924*4882a593Smuzhiyun 		if (ret)
1925*4882a593Smuzhiyun 			return ret;
1926*4882a593Smuzhiyun 	}
1927*4882a593Smuzhiyun 
1928*4882a593Smuzhiyun 	qca->vreg_bulk = bulk;
1929*4882a593Smuzhiyun 	qca->num_vregs = num_vregs;
1930*4882a593Smuzhiyun 
1931*4882a593Smuzhiyun 	return 0;
1932*4882a593Smuzhiyun }
1933*4882a593Smuzhiyun 
qca_serdev_probe(struct serdev_device * serdev)1934*4882a593Smuzhiyun static int qca_serdev_probe(struct serdev_device *serdev)
1935*4882a593Smuzhiyun {
1936*4882a593Smuzhiyun 	struct qca_serdev *qcadev;
1937*4882a593Smuzhiyun 	struct hci_dev *hdev;
1938*4882a593Smuzhiyun 	const struct qca_device_data *data;
1939*4882a593Smuzhiyun 	int err;
1940*4882a593Smuzhiyun 	bool power_ctrl_enabled = true;
1941*4882a593Smuzhiyun 
1942*4882a593Smuzhiyun 	qcadev = devm_kzalloc(&serdev->dev, sizeof(*qcadev), GFP_KERNEL);
1943*4882a593Smuzhiyun 	if (!qcadev)
1944*4882a593Smuzhiyun 		return -ENOMEM;
1945*4882a593Smuzhiyun 
1946*4882a593Smuzhiyun 	qcadev->serdev_hu.serdev = serdev;
1947*4882a593Smuzhiyun 	data = device_get_match_data(&serdev->dev);
1948*4882a593Smuzhiyun 	serdev_device_set_drvdata(serdev, qcadev);
1949*4882a593Smuzhiyun 	device_property_read_string(&serdev->dev, "firmware-name",
1950*4882a593Smuzhiyun 					 &qcadev->firmware_name);
1951*4882a593Smuzhiyun 	device_property_read_u32(&serdev->dev, "max-speed",
1952*4882a593Smuzhiyun 				 &qcadev->oper_speed);
1953*4882a593Smuzhiyun 	if (!qcadev->oper_speed)
1954*4882a593Smuzhiyun 		BT_DBG("UART will pick default operating speed");
1955*4882a593Smuzhiyun 
1956*4882a593Smuzhiyun 	if (data && qca_is_wcn399x(data->soc_type)) {
1957*4882a593Smuzhiyun 		qcadev->btsoc_type = data->soc_type;
1958*4882a593Smuzhiyun 		qcadev->bt_power = devm_kzalloc(&serdev->dev,
1959*4882a593Smuzhiyun 						sizeof(struct qca_power),
1960*4882a593Smuzhiyun 						GFP_KERNEL);
1961*4882a593Smuzhiyun 		if (!qcadev->bt_power)
1962*4882a593Smuzhiyun 			return -ENOMEM;
1963*4882a593Smuzhiyun 
1964*4882a593Smuzhiyun 		qcadev->bt_power->dev = &serdev->dev;
1965*4882a593Smuzhiyun 		err = qca_init_regulators(qcadev->bt_power, data->vregs,
1966*4882a593Smuzhiyun 					  data->num_vregs);
1967*4882a593Smuzhiyun 		if (err) {
1968*4882a593Smuzhiyun 			BT_ERR("Failed to init regulators:%d", err);
1969*4882a593Smuzhiyun 			return err;
1970*4882a593Smuzhiyun 		}
1971*4882a593Smuzhiyun 
1972*4882a593Smuzhiyun 		qcadev->bt_power->vregs_on = false;
1973*4882a593Smuzhiyun 
1974*4882a593Smuzhiyun 		qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL);
1975*4882a593Smuzhiyun 		if (IS_ERR(qcadev->susclk)) {
1976*4882a593Smuzhiyun 			dev_err(&serdev->dev, "failed to acquire clk\n");
1977*4882a593Smuzhiyun 			return PTR_ERR(qcadev->susclk);
1978*4882a593Smuzhiyun 		}
1979*4882a593Smuzhiyun 
1980*4882a593Smuzhiyun 		err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto);
1981*4882a593Smuzhiyun 		if (err) {
1982*4882a593Smuzhiyun 			BT_ERR("wcn3990 serdev registration failed");
1983*4882a593Smuzhiyun 			return err;
1984*4882a593Smuzhiyun 		}
1985*4882a593Smuzhiyun 	} else {
1986*4882a593Smuzhiyun 		if (data)
1987*4882a593Smuzhiyun 			qcadev->btsoc_type = data->soc_type;
1988*4882a593Smuzhiyun 		else
1989*4882a593Smuzhiyun 			qcadev->btsoc_type = QCA_ROME;
1990*4882a593Smuzhiyun 
1991*4882a593Smuzhiyun 		qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",
1992*4882a593Smuzhiyun 					       GPIOD_OUT_LOW);
1993*4882a593Smuzhiyun 		if (IS_ERR_OR_NULL(qcadev->bt_en)) {
1994*4882a593Smuzhiyun 			dev_warn(&serdev->dev, "failed to acquire enable gpio\n");
1995*4882a593Smuzhiyun 			power_ctrl_enabled = false;
1996*4882a593Smuzhiyun 		}
1997*4882a593Smuzhiyun 
1998*4882a593Smuzhiyun 		qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL);
1999*4882a593Smuzhiyun 		if (IS_ERR(qcadev->susclk)) {
2000*4882a593Smuzhiyun 			dev_warn(&serdev->dev, "failed to acquire clk\n");
2001*4882a593Smuzhiyun 			return PTR_ERR(qcadev->susclk);
2002*4882a593Smuzhiyun 		}
2003*4882a593Smuzhiyun 		err = clk_set_rate(qcadev->susclk, SUSCLK_RATE_32KHZ);
2004*4882a593Smuzhiyun 		if (err)
2005*4882a593Smuzhiyun 			return err;
2006*4882a593Smuzhiyun 
2007*4882a593Smuzhiyun 		err = clk_prepare_enable(qcadev->susclk);
2008*4882a593Smuzhiyun 		if (err)
2009*4882a593Smuzhiyun 			return err;
2010*4882a593Smuzhiyun 
2011*4882a593Smuzhiyun 		err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto);
2012*4882a593Smuzhiyun 		if (err) {
2013*4882a593Smuzhiyun 			BT_ERR("Rome serdev registration failed");
2014*4882a593Smuzhiyun 			clk_disable_unprepare(qcadev->susclk);
2015*4882a593Smuzhiyun 			return err;
2016*4882a593Smuzhiyun 		}
2017*4882a593Smuzhiyun 	}
2018*4882a593Smuzhiyun 
2019*4882a593Smuzhiyun 	hdev = qcadev->serdev_hu.hdev;
2020*4882a593Smuzhiyun 
2021*4882a593Smuzhiyun 	if (power_ctrl_enabled) {
2022*4882a593Smuzhiyun 		set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
2023*4882a593Smuzhiyun 		hdev->shutdown = qca_power_off;
2024*4882a593Smuzhiyun 	}
2025*4882a593Smuzhiyun 
2026*4882a593Smuzhiyun 	if (data) {
2027*4882a593Smuzhiyun 		/* Wideband speech support must be set per driver since it can't
2028*4882a593Smuzhiyun 		 * be queried via hci. Same with the valid le states quirk.
2029*4882a593Smuzhiyun 		 */
2030*4882a593Smuzhiyun 		if (data->capabilities & QCA_CAP_WIDEBAND_SPEECH)
2031*4882a593Smuzhiyun 			set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
2032*4882a593Smuzhiyun 				&hdev->quirks);
2033*4882a593Smuzhiyun 
2034*4882a593Smuzhiyun 		if (data->capabilities & QCA_CAP_VALID_LE_STATES)
2035*4882a593Smuzhiyun 			set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
2036*4882a593Smuzhiyun 	}
2037*4882a593Smuzhiyun 
2038*4882a593Smuzhiyun 	return 0;
2039*4882a593Smuzhiyun }
2040*4882a593Smuzhiyun 
qca_serdev_remove(struct serdev_device * serdev)2041*4882a593Smuzhiyun static void qca_serdev_remove(struct serdev_device *serdev)
2042*4882a593Smuzhiyun {
2043*4882a593Smuzhiyun 	struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
2044*4882a593Smuzhiyun 	struct qca_power *power = qcadev->bt_power;
2045*4882a593Smuzhiyun 
2046*4882a593Smuzhiyun 	if (qca_is_wcn399x(qcadev->btsoc_type) && power->vregs_on)
2047*4882a593Smuzhiyun 		qca_power_shutdown(&qcadev->serdev_hu);
2048*4882a593Smuzhiyun 	else if (qcadev->susclk)
2049*4882a593Smuzhiyun 		clk_disable_unprepare(qcadev->susclk);
2050*4882a593Smuzhiyun 
2051*4882a593Smuzhiyun 	hci_uart_unregister_device(&qcadev->serdev_hu);
2052*4882a593Smuzhiyun }
2053*4882a593Smuzhiyun 
qca_serdev_shutdown(struct device * dev)2054*4882a593Smuzhiyun static void qca_serdev_shutdown(struct device *dev)
2055*4882a593Smuzhiyun {
2056*4882a593Smuzhiyun 	int ret;
2057*4882a593Smuzhiyun 	int timeout = msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS);
2058*4882a593Smuzhiyun 	struct serdev_device *serdev = to_serdev_device(dev);
2059*4882a593Smuzhiyun 	struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
2060*4882a593Smuzhiyun 	const u8 ibs_wake_cmd[] = { 0xFD };
2061*4882a593Smuzhiyun 	const u8 edl_reset_soc_cmd[] = { 0x01, 0x00, 0xFC, 0x01, 0x05 };
2062*4882a593Smuzhiyun 
2063*4882a593Smuzhiyun 	if (qcadev->btsoc_type == QCA_QCA6390) {
2064*4882a593Smuzhiyun 		serdev_device_write_flush(serdev);
2065*4882a593Smuzhiyun 		ret = serdev_device_write_buf(serdev, ibs_wake_cmd,
2066*4882a593Smuzhiyun 					      sizeof(ibs_wake_cmd));
2067*4882a593Smuzhiyun 		if (ret < 0) {
2068*4882a593Smuzhiyun 			BT_ERR("QCA send IBS_WAKE_IND error: %d", ret);
2069*4882a593Smuzhiyun 			return;
2070*4882a593Smuzhiyun 		}
2071*4882a593Smuzhiyun 		serdev_device_wait_until_sent(serdev, timeout);
2072*4882a593Smuzhiyun 		usleep_range(8000, 10000);
2073*4882a593Smuzhiyun 
2074*4882a593Smuzhiyun 		serdev_device_write_flush(serdev);
2075*4882a593Smuzhiyun 		ret = serdev_device_write_buf(serdev, edl_reset_soc_cmd,
2076*4882a593Smuzhiyun 					      sizeof(edl_reset_soc_cmd));
2077*4882a593Smuzhiyun 		if (ret < 0) {
2078*4882a593Smuzhiyun 			BT_ERR("QCA send EDL_RESET_REQ error: %d", ret);
2079*4882a593Smuzhiyun 			return;
2080*4882a593Smuzhiyun 		}
2081*4882a593Smuzhiyun 		serdev_device_wait_until_sent(serdev, timeout);
2082*4882a593Smuzhiyun 		usleep_range(8000, 10000);
2083*4882a593Smuzhiyun 	}
2084*4882a593Smuzhiyun }
2085*4882a593Smuzhiyun 
qca_suspend(struct device * dev)2086*4882a593Smuzhiyun static int __maybe_unused qca_suspend(struct device *dev)
2087*4882a593Smuzhiyun {
2088*4882a593Smuzhiyun 	struct serdev_device *serdev = to_serdev_device(dev);
2089*4882a593Smuzhiyun 	struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
2090*4882a593Smuzhiyun 	struct hci_uart *hu = &qcadev->serdev_hu;
2091*4882a593Smuzhiyun 	struct qca_data *qca = hu->priv;
2092*4882a593Smuzhiyun 	unsigned long flags;
2093*4882a593Smuzhiyun 	bool tx_pending = false;
2094*4882a593Smuzhiyun 	int ret = 0;
2095*4882a593Smuzhiyun 	u8 cmd;
2096*4882a593Smuzhiyun 
2097*4882a593Smuzhiyun 	set_bit(QCA_SUSPENDING, &qca->flags);
2098*4882a593Smuzhiyun 
2099*4882a593Smuzhiyun 	/* Device is downloading patch or doesn't support in-band sleep. */
2100*4882a593Smuzhiyun 	if (!test_bit(QCA_IBS_ENABLED, &qca->flags))
2101*4882a593Smuzhiyun 		return 0;
2102*4882a593Smuzhiyun 
2103*4882a593Smuzhiyun 	cancel_work_sync(&qca->ws_awake_device);
2104*4882a593Smuzhiyun 	cancel_work_sync(&qca->ws_awake_rx);
2105*4882a593Smuzhiyun 
2106*4882a593Smuzhiyun 	spin_lock_irqsave_nested(&qca->hci_ibs_lock,
2107*4882a593Smuzhiyun 				 flags, SINGLE_DEPTH_NESTING);
2108*4882a593Smuzhiyun 
2109*4882a593Smuzhiyun 	switch (qca->tx_ibs_state) {
2110*4882a593Smuzhiyun 	case HCI_IBS_TX_WAKING:
2111*4882a593Smuzhiyun 		del_timer(&qca->wake_retrans_timer);
2112*4882a593Smuzhiyun 		fallthrough;
2113*4882a593Smuzhiyun 	case HCI_IBS_TX_AWAKE:
2114*4882a593Smuzhiyun 		del_timer(&qca->tx_idle_timer);
2115*4882a593Smuzhiyun 
2116*4882a593Smuzhiyun 		serdev_device_write_flush(hu->serdev);
2117*4882a593Smuzhiyun 		cmd = HCI_IBS_SLEEP_IND;
2118*4882a593Smuzhiyun 		ret = serdev_device_write_buf(hu->serdev, &cmd, sizeof(cmd));
2119*4882a593Smuzhiyun 
2120*4882a593Smuzhiyun 		if (ret < 0) {
2121*4882a593Smuzhiyun 			BT_ERR("Failed to send SLEEP to device");
2122*4882a593Smuzhiyun 			break;
2123*4882a593Smuzhiyun 		}
2124*4882a593Smuzhiyun 
2125*4882a593Smuzhiyun 		qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
2126*4882a593Smuzhiyun 		qca->ibs_sent_slps++;
2127*4882a593Smuzhiyun 		tx_pending = true;
2128*4882a593Smuzhiyun 		break;
2129*4882a593Smuzhiyun 
2130*4882a593Smuzhiyun 	case HCI_IBS_TX_ASLEEP:
2131*4882a593Smuzhiyun 		break;
2132*4882a593Smuzhiyun 
2133*4882a593Smuzhiyun 	default:
2134*4882a593Smuzhiyun 		BT_ERR("Spurious tx state %d", qca->tx_ibs_state);
2135*4882a593Smuzhiyun 		ret = -EINVAL;
2136*4882a593Smuzhiyun 		break;
2137*4882a593Smuzhiyun 	}
2138*4882a593Smuzhiyun 
2139*4882a593Smuzhiyun 	spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
2140*4882a593Smuzhiyun 
2141*4882a593Smuzhiyun 	if (ret < 0)
2142*4882a593Smuzhiyun 		goto error;
2143*4882a593Smuzhiyun 
2144*4882a593Smuzhiyun 	if (tx_pending) {
2145*4882a593Smuzhiyun 		serdev_device_wait_until_sent(hu->serdev,
2146*4882a593Smuzhiyun 					      msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS));
2147*4882a593Smuzhiyun 		serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_OFF, hu);
2148*4882a593Smuzhiyun 	}
2149*4882a593Smuzhiyun 
2150*4882a593Smuzhiyun 	/* Wait for HCI_IBS_SLEEP_IND sent by device to indicate its Tx is going
2151*4882a593Smuzhiyun 	 * to sleep, so that the packet does not wake the system later.
2152*4882a593Smuzhiyun 	 */
2153*4882a593Smuzhiyun 	ret = wait_event_interruptible_timeout(qca->suspend_wait_q,
2154*4882a593Smuzhiyun 			qca->rx_ibs_state == HCI_IBS_RX_ASLEEP,
2155*4882a593Smuzhiyun 			msecs_to_jiffies(IBS_BTSOC_TX_IDLE_TIMEOUT_MS));
2156*4882a593Smuzhiyun 	if (ret == 0) {
2157*4882a593Smuzhiyun 		ret = -ETIMEDOUT;
2158*4882a593Smuzhiyun 		goto error;
2159*4882a593Smuzhiyun 	}
2160*4882a593Smuzhiyun 
2161*4882a593Smuzhiyun 	return 0;
2162*4882a593Smuzhiyun 
2163*4882a593Smuzhiyun error:
2164*4882a593Smuzhiyun 	clear_bit(QCA_SUSPENDING, &qca->flags);
2165*4882a593Smuzhiyun 
2166*4882a593Smuzhiyun 	return ret;
2167*4882a593Smuzhiyun }
2168*4882a593Smuzhiyun 
qca_resume(struct device * dev)2169*4882a593Smuzhiyun static int __maybe_unused qca_resume(struct device *dev)
2170*4882a593Smuzhiyun {
2171*4882a593Smuzhiyun 	struct serdev_device *serdev = to_serdev_device(dev);
2172*4882a593Smuzhiyun 	struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
2173*4882a593Smuzhiyun 	struct hci_uart *hu = &qcadev->serdev_hu;
2174*4882a593Smuzhiyun 	struct qca_data *qca = hu->priv;
2175*4882a593Smuzhiyun 
2176*4882a593Smuzhiyun 	clear_bit(QCA_SUSPENDING, &qca->flags);
2177*4882a593Smuzhiyun 
2178*4882a593Smuzhiyun 	return 0;
2179*4882a593Smuzhiyun }
2180*4882a593Smuzhiyun 
2181*4882a593Smuzhiyun static SIMPLE_DEV_PM_OPS(qca_pm_ops, qca_suspend, qca_resume);
2182*4882a593Smuzhiyun 
2183*4882a593Smuzhiyun #ifdef CONFIG_OF
2184*4882a593Smuzhiyun static const struct of_device_id qca_bluetooth_of_match[] = {
2185*4882a593Smuzhiyun 	{ .compatible = "qcom,qca6174-bt" },
2186*4882a593Smuzhiyun 	{ .compatible = "qcom,qca6390-bt", .data = &qca_soc_data_qca6390},
2187*4882a593Smuzhiyun 	{ .compatible = "qcom,qca9377-bt" },
2188*4882a593Smuzhiyun 	{ .compatible = "qcom,wcn3990-bt", .data = &qca_soc_data_wcn3990},
2189*4882a593Smuzhiyun 	{ .compatible = "qcom,wcn3991-bt", .data = &qca_soc_data_wcn3991},
2190*4882a593Smuzhiyun 	{ .compatible = "qcom,wcn3998-bt", .data = &qca_soc_data_wcn3998},
2191*4882a593Smuzhiyun 	{ /* sentinel */ }
2192*4882a593Smuzhiyun };
2193*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, qca_bluetooth_of_match);
2194*4882a593Smuzhiyun #endif
2195*4882a593Smuzhiyun 
2196*4882a593Smuzhiyun #ifdef CONFIG_ACPI
2197*4882a593Smuzhiyun static const struct acpi_device_id qca_bluetooth_acpi_match[] = {
2198*4882a593Smuzhiyun 	{ "QCOM6390", (kernel_ulong_t)&qca_soc_data_qca6390 },
2199*4882a593Smuzhiyun 	{ "DLA16390", (kernel_ulong_t)&qca_soc_data_qca6390 },
2200*4882a593Smuzhiyun 	{ "DLB16390", (kernel_ulong_t)&qca_soc_data_qca6390 },
2201*4882a593Smuzhiyun 	{ "DLB26390", (kernel_ulong_t)&qca_soc_data_qca6390 },
2202*4882a593Smuzhiyun 	{ },
2203*4882a593Smuzhiyun };
2204*4882a593Smuzhiyun MODULE_DEVICE_TABLE(acpi, qca_bluetooth_acpi_match);
2205*4882a593Smuzhiyun #endif
2206*4882a593Smuzhiyun 
2207*4882a593Smuzhiyun 
2208*4882a593Smuzhiyun static struct serdev_device_driver qca_serdev_driver = {
2209*4882a593Smuzhiyun 	.probe = qca_serdev_probe,
2210*4882a593Smuzhiyun 	.remove = qca_serdev_remove,
2211*4882a593Smuzhiyun 	.driver = {
2212*4882a593Smuzhiyun 		.name = "hci_uart_qca",
2213*4882a593Smuzhiyun 		.of_match_table = of_match_ptr(qca_bluetooth_of_match),
2214*4882a593Smuzhiyun 		.acpi_match_table = ACPI_PTR(qca_bluetooth_acpi_match),
2215*4882a593Smuzhiyun 		.shutdown = qca_serdev_shutdown,
2216*4882a593Smuzhiyun 		.pm = &qca_pm_ops,
2217*4882a593Smuzhiyun 	},
2218*4882a593Smuzhiyun };
2219*4882a593Smuzhiyun 
qca_init(void)2220*4882a593Smuzhiyun int __init qca_init(void)
2221*4882a593Smuzhiyun {
2222*4882a593Smuzhiyun 	serdev_device_driver_register(&qca_serdev_driver);
2223*4882a593Smuzhiyun 
2224*4882a593Smuzhiyun 	return hci_uart_register_proto(&qca_proto);
2225*4882a593Smuzhiyun }
2226*4882a593Smuzhiyun 
qca_deinit(void)2227*4882a593Smuzhiyun int __exit qca_deinit(void)
2228*4882a593Smuzhiyun {
2229*4882a593Smuzhiyun 	serdev_device_driver_unregister(&qca_serdev_driver);
2230*4882a593Smuzhiyun 
2231*4882a593Smuzhiyun 	return hci_uart_unregister_proto(&qca_proto);
2232*4882a593Smuzhiyun }
2233