1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Bluetooth HCI UART driver for Intel/AG6xx devices
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Copyright (C) 2016 Intel Corporation
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/kernel.h>
10*4882a593Smuzhiyun #include <linux/errno.h>
11*4882a593Smuzhiyun #include <linux/skbuff.h>
12*4882a593Smuzhiyun #include <linux/firmware.h>
13*4882a593Smuzhiyun #include <linux/module.h>
14*4882a593Smuzhiyun #include <linux/tty.h>
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #include <net/bluetooth/bluetooth.h>
17*4882a593Smuzhiyun #include <net/bluetooth/hci_core.h>
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #include "hci_uart.h"
20*4882a593Smuzhiyun #include "btintel.h"
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun struct ag6xx_data {
23*4882a593Smuzhiyun struct sk_buff *rx_skb;
24*4882a593Smuzhiyun struct sk_buff_head txq;
25*4882a593Smuzhiyun };
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun struct pbn_entry {
28*4882a593Smuzhiyun __le32 addr;
29*4882a593Smuzhiyun __le32 plen;
30*4882a593Smuzhiyun __u8 data[];
31*4882a593Smuzhiyun } __packed;
32*4882a593Smuzhiyun
ag6xx_open(struct hci_uart * hu)33*4882a593Smuzhiyun static int ag6xx_open(struct hci_uart *hu)
34*4882a593Smuzhiyun {
35*4882a593Smuzhiyun struct ag6xx_data *ag6xx;
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun BT_DBG("hu %p", hu);
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun ag6xx = kzalloc(sizeof(*ag6xx), GFP_KERNEL);
40*4882a593Smuzhiyun if (!ag6xx)
41*4882a593Smuzhiyun return -ENOMEM;
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun skb_queue_head_init(&ag6xx->txq);
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun hu->priv = ag6xx;
46*4882a593Smuzhiyun return 0;
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun
ag6xx_close(struct hci_uart * hu)49*4882a593Smuzhiyun static int ag6xx_close(struct hci_uart *hu)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun struct ag6xx_data *ag6xx = hu->priv;
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun BT_DBG("hu %p", hu);
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun skb_queue_purge(&ag6xx->txq);
56*4882a593Smuzhiyun kfree_skb(ag6xx->rx_skb);
57*4882a593Smuzhiyun kfree(ag6xx);
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun hu->priv = NULL;
60*4882a593Smuzhiyun return 0;
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun
ag6xx_flush(struct hci_uart * hu)63*4882a593Smuzhiyun static int ag6xx_flush(struct hci_uart *hu)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun struct ag6xx_data *ag6xx = hu->priv;
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun BT_DBG("hu %p", hu);
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun skb_queue_purge(&ag6xx->txq);
70*4882a593Smuzhiyun return 0;
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun
ag6xx_dequeue(struct hci_uart * hu)73*4882a593Smuzhiyun static struct sk_buff *ag6xx_dequeue(struct hci_uart *hu)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun struct ag6xx_data *ag6xx = hu->priv;
76*4882a593Smuzhiyun struct sk_buff *skb;
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun skb = skb_dequeue(&ag6xx->txq);
79*4882a593Smuzhiyun if (!skb)
80*4882a593Smuzhiyun return skb;
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun /* Prepend skb with frame type */
83*4882a593Smuzhiyun memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
84*4882a593Smuzhiyun return skb;
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun
ag6xx_enqueue(struct hci_uart * hu,struct sk_buff * skb)87*4882a593Smuzhiyun static int ag6xx_enqueue(struct hci_uart *hu, struct sk_buff *skb)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun struct ag6xx_data *ag6xx = hu->priv;
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun skb_queue_tail(&ag6xx->txq, skb);
92*4882a593Smuzhiyun return 0;
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun static const struct h4_recv_pkt ag6xx_recv_pkts[] = {
96*4882a593Smuzhiyun { H4_RECV_ACL, .recv = hci_recv_frame },
97*4882a593Smuzhiyun { H4_RECV_SCO, .recv = hci_recv_frame },
98*4882a593Smuzhiyun { H4_RECV_EVENT, .recv = hci_recv_frame },
99*4882a593Smuzhiyun };
100*4882a593Smuzhiyun
ag6xx_recv(struct hci_uart * hu,const void * data,int count)101*4882a593Smuzhiyun static int ag6xx_recv(struct hci_uart *hu, const void *data, int count)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun struct ag6xx_data *ag6xx = hu->priv;
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
106*4882a593Smuzhiyun return -EUNATCH;
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun ag6xx->rx_skb = h4_recv_buf(hu->hdev, ag6xx->rx_skb, data, count,
109*4882a593Smuzhiyun ag6xx_recv_pkts,
110*4882a593Smuzhiyun ARRAY_SIZE(ag6xx_recv_pkts));
111*4882a593Smuzhiyun if (IS_ERR(ag6xx->rx_skb)) {
112*4882a593Smuzhiyun int err = PTR_ERR(ag6xx->rx_skb);
113*4882a593Smuzhiyun bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err);
114*4882a593Smuzhiyun ag6xx->rx_skb = NULL;
115*4882a593Smuzhiyun return err;
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun return count;
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun
intel_mem_write(struct hci_dev * hdev,u32 addr,u32 plen,const void * data)121*4882a593Smuzhiyun static int intel_mem_write(struct hci_dev *hdev, u32 addr, u32 plen,
122*4882a593Smuzhiyun const void *data)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun /* Can write a maximum of 247 bytes per HCI command.
125*4882a593Smuzhiyun * HCI cmd Header (3), Intel mem write header (6), data (247).
126*4882a593Smuzhiyun */
127*4882a593Smuzhiyun while (plen > 0) {
128*4882a593Smuzhiyun struct sk_buff *skb;
129*4882a593Smuzhiyun u8 cmd_param[253], fragment_len = (plen > 247) ? 247 : plen;
130*4882a593Smuzhiyun __le32 leaddr = cpu_to_le32(addr);
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun memcpy(cmd_param, &leaddr, 4);
133*4882a593Smuzhiyun cmd_param[4] = 0;
134*4882a593Smuzhiyun cmd_param[5] = fragment_len;
135*4882a593Smuzhiyun memcpy(cmd_param + 6, data, fragment_len);
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun skb = __hci_cmd_sync(hdev, 0xfc8e, fragment_len + 6, cmd_param,
138*4882a593Smuzhiyun HCI_INIT_TIMEOUT);
139*4882a593Smuzhiyun if (IS_ERR(skb))
140*4882a593Smuzhiyun return PTR_ERR(skb);
141*4882a593Smuzhiyun kfree_skb(skb);
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun plen -= fragment_len;
144*4882a593Smuzhiyun data += fragment_len;
145*4882a593Smuzhiyun addr += fragment_len;
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun return 0;
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun
ag6xx_setup(struct hci_uart * hu)151*4882a593Smuzhiyun static int ag6xx_setup(struct hci_uart *hu)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun struct hci_dev *hdev = hu->hdev;
154*4882a593Smuzhiyun struct sk_buff *skb;
155*4882a593Smuzhiyun struct intel_version ver;
156*4882a593Smuzhiyun const struct firmware *fw;
157*4882a593Smuzhiyun const u8 *fw_ptr;
158*4882a593Smuzhiyun char fwname[64];
159*4882a593Smuzhiyun bool patched = false;
160*4882a593Smuzhiyun int err;
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun hu->hdev->set_diag = btintel_set_diag;
163*4882a593Smuzhiyun hu->hdev->set_bdaddr = btintel_set_bdaddr;
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun err = btintel_enter_mfg(hdev);
166*4882a593Smuzhiyun if (err)
167*4882a593Smuzhiyun return err;
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun err = btintel_read_version(hdev, &ver);
170*4882a593Smuzhiyun if (err)
171*4882a593Smuzhiyun return err;
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun btintel_version_info(hdev, &ver);
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun /* The hardware platform number has a fixed value of 0x37 and
176*4882a593Smuzhiyun * for now only accept this single value.
177*4882a593Smuzhiyun */
178*4882a593Smuzhiyun if (ver.hw_platform != 0x37) {
179*4882a593Smuzhiyun bt_dev_err(hdev, "Unsupported Intel hardware platform: 0x%X",
180*4882a593Smuzhiyun ver.hw_platform);
181*4882a593Smuzhiyun return -EINVAL;
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun /* Only the hardware variant iBT 2.1 (AG6XX) is supported by this
185*4882a593Smuzhiyun * firmware setup method.
186*4882a593Smuzhiyun */
187*4882a593Smuzhiyun if (ver.hw_variant != 0x0a) {
188*4882a593Smuzhiyun bt_dev_err(hdev, "Unsupported Intel hardware variant: 0x%x",
189*4882a593Smuzhiyun ver.hw_variant);
190*4882a593Smuzhiyun return -EINVAL;
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun snprintf(fwname, sizeof(fwname), "intel/ibt-hw-%x.%x.bddata",
194*4882a593Smuzhiyun ver.hw_platform, ver.hw_variant);
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun err = request_firmware(&fw, fwname, &hdev->dev);
197*4882a593Smuzhiyun if (err < 0) {
198*4882a593Smuzhiyun bt_dev_err(hdev, "Failed to open Intel bddata file: %s (%d)",
199*4882a593Smuzhiyun fwname, err);
200*4882a593Smuzhiyun goto patch;
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun fw_ptr = fw->data;
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun bt_dev_info(hdev, "Applying bddata (%s)", fwname);
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun skb = __hci_cmd_sync_ev(hdev, 0xfc2f, fw->size, fw->data,
207*4882a593Smuzhiyun HCI_EV_CMD_STATUS, HCI_CMD_TIMEOUT);
208*4882a593Smuzhiyun if (IS_ERR(skb)) {
209*4882a593Smuzhiyun bt_dev_err(hdev, "Applying bddata failed (%ld)", PTR_ERR(skb));
210*4882a593Smuzhiyun release_firmware(fw);
211*4882a593Smuzhiyun return PTR_ERR(skb);
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun kfree_skb(skb);
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun release_firmware(fw);
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun patch:
218*4882a593Smuzhiyun /* If there is no applied patch, fw_patch_num is always 0x00. In other
219*4882a593Smuzhiyun * cases, current firmware is already patched. No need to patch it.
220*4882a593Smuzhiyun */
221*4882a593Smuzhiyun if (ver.fw_patch_num) {
222*4882a593Smuzhiyun bt_dev_info(hdev, "Device is already patched. patch num: %02x",
223*4882a593Smuzhiyun ver.fw_patch_num);
224*4882a593Smuzhiyun patched = true;
225*4882a593Smuzhiyun goto complete;
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun snprintf(fwname, sizeof(fwname),
229*4882a593Smuzhiyun "intel/ibt-hw-%x.%x.%x-fw-%x.%x.%x.%x.%x.pbn",
230*4882a593Smuzhiyun ver.hw_platform, ver.hw_variant, ver.hw_revision,
231*4882a593Smuzhiyun ver.fw_variant, ver.fw_revision, ver.fw_build_num,
232*4882a593Smuzhiyun ver.fw_build_ww, ver.fw_build_yy);
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun err = request_firmware(&fw, fwname, &hdev->dev);
235*4882a593Smuzhiyun if (err < 0) {
236*4882a593Smuzhiyun bt_dev_err(hdev, "Failed to open Intel patch file: %s(%d)",
237*4882a593Smuzhiyun fwname, err);
238*4882a593Smuzhiyun goto complete;
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun fw_ptr = fw->data;
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun bt_dev_info(hdev, "Patching firmware file (%s)", fwname);
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun /* PBN patch file contains a list of binary patches to be applied on top
245*4882a593Smuzhiyun * of the embedded firmware. Each patch entry header contains the target
246*4882a593Smuzhiyun * address and patch size.
247*4882a593Smuzhiyun *
248*4882a593Smuzhiyun * Patch entry:
249*4882a593Smuzhiyun * | addr(le) | patch_len(le) | patch_data |
250*4882a593Smuzhiyun * | 4 Bytes | 4 Bytes | n Bytes |
251*4882a593Smuzhiyun *
252*4882a593Smuzhiyun * PBN file is terminated by a patch entry whose address is 0xffffffff.
253*4882a593Smuzhiyun */
254*4882a593Smuzhiyun while (fw->size > fw_ptr - fw->data) {
255*4882a593Smuzhiyun struct pbn_entry *pbn = (void *)fw_ptr;
256*4882a593Smuzhiyun u32 addr, plen;
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun if (pbn->addr == 0xffffffff) {
259*4882a593Smuzhiyun bt_dev_info(hdev, "Patching complete");
260*4882a593Smuzhiyun patched = true;
261*4882a593Smuzhiyun break;
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun addr = le32_to_cpu(pbn->addr);
265*4882a593Smuzhiyun plen = le32_to_cpu(pbn->plen);
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun if (fw->data + fw->size <= pbn->data + plen) {
268*4882a593Smuzhiyun bt_dev_info(hdev, "Invalid patch len (%d)", plen);
269*4882a593Smuzhiyun break;
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun bt_dev_info(hdev, "Patching %td/%zu", (fw_ptr - fw->data),
273*4882a593Smuzhiyun fw->size);
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun err = intel_mem_write(hdev, addr, plen, pbn->data);
276*4882a593Smuzhiyun if (err) {
277*4882a593Smuzhiyun bt_dev_err(hdev, "Patching failed");
278*4882a593Smuzhiyun break;
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun fw_ptr = pbn->data + plen;
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun release_firmware(fw);
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun complete:
287*4882a593Smuzhiyun /* Exit manufacturing mode and reset */
288*4882a593Smuzhiyun err = btintel_exit_mfg(hdev, true, patched);
289*4882a593Smuzhiyun if (err)
290*4882a593Smuzhiyun return err;
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun /* Set the event mask for Intel specific vendor events. This enables
293*4882a593Smuzhiyun * a few extra events that are useful during general operation.
294*4882a593Smuzhiyun */
295*4882a593Smuzhiyun btintel_set_event_mask_mfg(hdev, false);
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun btintel_check_bdaddr(hdev);
298*4882a593Smuzhiyun return 0;
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun static const struct hci_uart_proto ag6xx_proto = {
302*4882a593Smuzhiyun .id = HCI_UART_AG6XX,
303*4882a593Smuzhiyun .name = "AG6XX",
304*4882a593Smuzhiyun .manufacturer = 2,
305*4882a593Smuzhiyun .open = ag6xx_open,
306*4882a593Smuzhiyun .close = ag6xx_close,
307*4882a593Smuzhiyun .flush = ag6xx_flush,
308*4882a593Smuzhiyun .setup = ag6xx_setup,
309*4882a593Smuzhiyun .recv = ag6xx_recv,
310*4882a593Smuzhiyun .enqueue = ag6xx_enqueue,
311*4882a593Smuzhiyun .dequeue = ag6xx_dequeue,
312*4882a593Smuzhiyun };
313*4882a593Smuzhiyun
ag6xx_init(void)314*4882a593Smuzhiyun int __init ag6xx_init(void)
315*4882a593Smuzhiyun {
316*4882a593Smuzhiyun return hci_uart_register_proto(&ag6xx_proto);
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun
ag6xx_deinit(void)319*4882a593Smuzhiyun int __exit ag6xx_deinit(void)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun return hci_uart_unregister_proto(&ag6xx_proto);
322*4882a593Smuzhiyun }
323