1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun BlueZ - Bluetooth protocol stack for Linux
3*4882a593Smuzhiyun Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun This program is free software; you can redistribute it and/or modify
8*4882a593Smuzhiyun it under the terms of the GNU General Public License version 2 as
9*4882a593Smuzhiyun published by the Free Software Foundation;
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12*4882a593Smuzhiyun OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13*4882a593Smuzhiyun FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14*4882a593Smuzhiyun IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15*4882a593Smuzhiyun CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16*4882a593Smuzhiyun WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17*4882a593Smuzhiyun ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18*4882a593Smuzhiyun OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21*4882a593Smuzhiyun COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22*4882a593Smuzhiyun SOFTWARE IS DISCLAIMED.
23*4882a593Smuzhiyun */
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun /* Bluetooth HCI event handling. */
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun #include <asm/unaligned.h>
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #include <net/bluetooth/bluetooth.h>
30*4882a593Smuzhiyun #include <net/bluetooth/hci_core.h>
31*4882a593Smuzhiyun #include <net/bluetooth/mgmt.h>
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun #include "hci_request.h"
34*4882a593Smuzhiyun #include "hci_debugfs.h"
35*4882a593Smuzhiyun #include "a2mp.h"
36*4882a593Smuzhiyun #include "amp.h"
37*4882a593Smuzhiyun #include "smp.h"
38*4882a593Smuzhiyun #include "msft.h"
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
41*4882a593Smuzhiyun "\x00\x00\x00\x00\x00\x00\x00\x00"
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun /* Handle HCI Event packets */
44*4882a593Smuzhiyun
hci_cc_inquiry_cancel(struct hci_dev * hdev,struct sk_buff * skb,u8 * new_status)45*4882a593Smuzhiyun static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb,
46*4882a593Smuzhiyun u8 *new_status)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun __u8 status = *((__u8 *) skb->data);
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, status);
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun /* It is possible that we receive Inquiry Complete event right
53*4882a593Smuzhiyun * before we receive Inquiry Cancel Command Complete event, in
54*4882a593Smuzhiyun * which case the latter event should have status of Command
55*4882a593Smuzhiyun * Disallowed (0x0c). This should not be treated as error, since
56*4882a593Smuzhiyun * we actually achieve what Inquiry Cancel wants to achieve,
57*4882a593Smuzhiyun * which is to end the last Inquiry session.
58*4882a593Smuzhiyun */
59*4882a593Smuzhiyun if (status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
60*4882a593Smuzhiyun bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
61*4882a593Smuzhiyun status = 0x00;
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun *new_status = status;
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun if (status)
67*4882a593Smuzhiyun return;
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun clear_bit(HCI_INQUIRY, &hdev->flags);
70*4882a593Smuzhiyun smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
71*4882a593Smuzhiyun wake_up_bit(&hdev->flags, HCI_INQUIRY);
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun hci_dev_lock(hdev);
74*4882a593Smuzhiyun /* Set discovery state to stopped if we're not doing LE active
75*4882a593Smuzhiyun * scanning.
76*4882a593Smuzhiyun */
77*4882a593Smuzhiyun if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
78*4882a593Smuzhiyun hdev->le_scan_type != LE_SCAN_ACTIVE)
79*4882a593Smuzhiyun hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
80*4882a593Smuzhiyun hci_dev_unlock(hdev);
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun hci_conn_check_pending(hdev);
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun
hci_cc_periodic_inq(struct hci_dev * hdev,struct sk_buff * skb)85*4882a593Smuzhiyun static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun __u8 status = *((__u8 *) skb->data);
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, status);
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun if (status)
92*4882a593Smuzhiyun return;
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun
hci_cc_exit_periodic_inq(struct hci_dev * hdev,struct sk_buff * skb)97*4882a593Smuzhiyun static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun __u8 status = *((__u8 *) skb->data);
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, status);
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun if (status)
104*4882a593Smuzhiyun return;
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun hci_conn_check_pending(hdev);
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun
hci_cc_remote_name_req_cancel(struct hci_dev * hdev,struct sk_buff * skb)111*4882a593Smuzhiyun static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
112*4882a593Smuzhiyun struct sk_buff *skb)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun BT_DBG("%s", hdev->name);
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun
hci_cc_role_discovery(struct hci_dev * hdev,struct sk_buff * skb)117*4882a593Smuzhiyun static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun struct hci_rp_role_discovery *rp = (void *) skb->data;
120*4882a593Smuzhiyun struct hci_conn *conn;
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun if (rp->status)
125*4882a593Smuzhiyun return;
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun hci_dev_lock(hdev);
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
130*4882a593Smuzhiyun if (conn)
131*4882a593Smuzhiyun conn->role = rp->role;
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun hci_dev_unlock(hdev);
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun
hci_cc_read_link_policy(struct hci_dev * hdev,struct sk_buff * skb)136*4882a593Smuzhiyun static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
137*4882a593Smuzhiyun {
138*4882a593Smuzhiyun struct hci_rp_read_link_policy *rp = (void *) skb->data;
139*4882a593Smuzhiyun struct hci_conn *conn;
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun if (rp->status)
144*4882a593Smuzhiyun return;
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun hci_dev_lock(hdev);
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
149*4882a593Smuzhiyun if (conn)
150*4882a593Smuzhiyun conn->link_policy = __le16_to_cpu(rp->policy);
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun hci_dev_unlock(hdev);
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun
hci_cc_write_link_policy(struct hci_dev * hdev,struct sk_buff * skb)155*4882a593Smuzhiyun static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun struct hci_rp_write_link_policy *rp = (void *) skb->data;
158*4882a593Smuzhiyun struct hci_conn *conn;
159*4882a593Smuzhiyun void *sent;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun if (rp->status)
164*4882a593Smuzhiyun return;
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
167*4882a593Smuzhiyun if (!sent)
168*4882a593Smuzhiyun return;
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun hci_dev_lock(hdev);
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
173*4882a593Smuzhiyun if (conn)
174*4882a593Smuzhiyun conn->link_policy = get_unaligned_le16(sent + 2);
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun hci_dev_unlock(hdev);
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun
hci_cc_read_def_link_policy(struct hci_dev * hdev,struct sk_buff * skb)179*4882a593Smuzhiyun static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
180*4882a593Smuzhiyun struct sk_buff *skb)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun if (rp->status)
187*4882a593Smuzhiyun return;
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun hdev->link_policy = __le16_to_cpu(rp->policy);
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun
hci_cc_write_def_link_policy(struct hci_dev * hdev,struct sk_buff * skb)192*4882a593Smuzhiyun static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
193*4882a593Smuzhiyun struct sk_buff *skb)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun __u8 status = *((__u8 *) skb->data);
196*4882a593Smuzhiyun void *sent;
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, status);
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun if (status)
201*4882a593Smuzhiyun return;
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
204*4882a593Smuzhiyun if (!sent)
205*4882a593Smuzhiyun return;
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun hdev->link_policy = get_unaligned_le16(sent);
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun
hci_cc_reset(struct hci_dev * hdev,struct sk_buff * skb)210*4882a593Smuzhiyun static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun __u8 status = *((__u8 *) skb->data);
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, status);
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun clear_bit(HCI_RESET, &hdev->flags);
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun if (status)
219*4882a593Smuzhiyun return;
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun /* Reset all non-persistent flags */
222*4882a593Smuzhiyun hci_dev_clear_volatile_flags(hdev);
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun hdev->inq_tx_power = HCI_TX_POWER_INVALID;
227*4882a593Smuzhiyun hdev->adv_tx_power = HCI_TX_POWER_INVALID;
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
230*4882a593Smuzhiyun hdev->adv_data_len = 0;
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
233*4882a593Smuzhiyun hdev->scan_rsp_data_len = 0;
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun hdev->le_scan_type = LE_SCAN_PASSIVE;
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun hdev->ssp_debug_mode = 0;
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun hci_bdaddr_list_clear(&hdev->le_white_list);
240*4882a593Smuzhiyun hci_bdaddr_list_clear(&hdev->le_resolv_list);
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun
hci_cc_read_stored_link_key(struct hci_dev * hdev,struct sk_buff * skb)243*4882a593Smuzhiyun static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
244*4882a593Smuzhiyun struct sk_buff *skb)
245*4882a593Smuzhiyun {
246*4882a593Smuzhiyun struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
247*4882a593Smuzhiyun struct hci_cp_read_stored_link_key *sent;
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
252*4882a593Smuzhiyun if (!sent)
253*4882a593Smuzhiyun return;
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun if (!rp->status && sent->read_all == 0x01) {
256*4882a593Smuzhiyun hdev->stored_max_keys = rp->max_keys;
257*4882a593Smuzhiyun hdev->stored_num_keys = rp->num_keys;
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun
hci_cc_delete_stored_link_key(struct hci_dev * hdev,struct sk_buff * skb)261*4882a593Smuzhiyun static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
262*4882a593Smuzhiyun struct sk_buff *skb)
263*4882a593Smuzhiyun {
264*4882a593Smuzhiyun struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun if (rp->status)
269*4882a593Smuzhiyun return;
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun if (rp->num_keys <= hdev->stored_num_keys)
272*4882a593Smuzhiyun hdev->stored_num_keys -= rp->num_keys;
273*4882a593Smuzhiyun else
274*4882a593Smuzhiyun hdev->stored_num_keys = 0;
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun
hci_cc_write_local_name(struct hci_dev * hdev,struct sk_buff * skb)277*4882a593Smuzhiyun static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
278*4882a593Smuzhiyun {
279*4882a593Smuzhiyun __u8 status = *((__u8 *) skb->data);
280*4882a593Smuzhiyun void *sent;
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, status);
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
285*4882a593Smuzhiyun if (!sent)
286*4882a593Smuzhiyun return;
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun hci_dev_lock(hdev);
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun if (hci_dev_test_flag(hdev, HCI_MGMT))
291*4882a593Smuzhiyun mgmt_set_local_name_complete(hdev, sent, status);
292*4882a593Smuzhiyun else if (!status)
293*4882a593Smuzhiyun memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun hci_dev_unlock(hdev);
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun
hci_cc_read_local_name(struct hci_dev * hdev,struct sk_buff * skb)298*4882a593Smuzhiyun static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun struct hci_rp_read_local_name *rp = (void *) skb->data;
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun if (rp->status)
305*4882a593Smuzhiyun return;
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun if (hci_dev_test_flag(hdev, HCI_SETUP) ||
308*4882a593Smuzhiyun hci_dev_test_flag(hdev, HCI_CONFIG))
309*4882a593Smuzhiyun memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun
hci_cc_write_auth_enable(struct hci_dev * hdev,struct sk_buff * skb)312*4882a593Smuzhiyun static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
313*4882a593Smuzhiyun {
314*4882a593Smuzhiyun __u8 status = *((__u8 *) skb->data);
315*4882a593Smuzhiyun void *sent;
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, status);
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
320*4882a593Smuzhiyun if (!sent)
321*4882a593Smuzhiyun return;
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun hci_dev_lock(hdev);
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun if (!status) {
326*4882a593Smuzhiyun __u8 param = *((__u8 *) sent);
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun if (param == AUTH_ENABLED)
329*4882a593Smuzhiyun set_bit(HCI_AUTH, &hdev->flags);
330*4882a593Smuzhiyun else
331*4882a593Smuzhiyun clear_bit(HCI_AUTH, &hdev->flags);
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun if (hci_dev_test_flag(hdev, HCI_MGMT))
335*4882a593Smuzhiyun mgmt_auth_enable_complete(hdev, status);
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun hci_dev_unlock(hdev);
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun
hci_cc_write_encrypt_mode(struct hci_dev * hdev,struct sk_buff * skb)340*4882a593Smuzhiyun static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
341*4882a593Smuzhiyun {
342*4882a593Smuzhiyun __u8 status = *((__u8 *) skb->data);
343*4882a593Smuzhiyun __u8 param;
344*4882a593Smuzhiyun void *sent;
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, status);
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun if (status)
349*4882a593Smuzhiyun return;
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
352*4882a593Smuzhiyun if (!sent)
353*4882a593Smuzhiyun return;
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun param = *((__u8 *) sent);
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun if (param)
358*4882a593Smuzhiyun set_bit(HCI_ENCRYPT, &hdev->flags);
359*4882a593Smuzhiyun else
360*4882a593Smuzhiyun clear_bit(HCI_ENCRYPT, &hdev->flags);
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun
hci_cc_write_scan_enable(struct hci_dev * hdev,struct sk_buff * skb)363*4882a593Smuzhiyun static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
364*4882a593Smuzhiyun {
365*4882a593Smuzhiyun __u8 status = *((__u8 *) skb->data);
366*4882a593Smuzhiyun __u8 param;
367*4882a593Smuzhiyun void *sent;
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, status);
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
372*4882a593Smuzhiyun if (!sent)
373*4882a593Smuzhiyun return;
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun param = *((__u8 *) sent);
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun hci_dev_lock(hdev);
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun if (status) {
380*4882a593Smuzhiyun hdev->discov_timeout = 0;
381*4882a593Smuzhiyun goto done;
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun if (param & SCAN_INQUIRY)
385*4882a593Smuzhiyun set_bit(HCI_ISCAN, &hdev->flags);
386*4882a593Smuzhiyun else
387*4882a593Smuzhiyun clear_bit(HCI_ISCAN, &hdev->flags);
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun if (param & SCAN_PAGE)
390*4882a593Smuzhiyun set_bit(HCI_PSCAN, &hdev->flags);
391*4882a593Smuzhiyun else
392*4882a593Smuzhiyun clear_bit(HCI_PSCAN, &hdev->flags);
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun done:
395*4882a593Smuzhiyun hci_dev_unlock(hdev);
396*4882a593Smuzhiyun }
397*4882a593Smuzhiyun
hci_cc_read_class_of_dev(struct hci_dev * hdev,struct sk_buff * skb)398*4882a593Smuzhiyun static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
399*4882a593Smuzhiyun {
400*4882a593Smuzhiyun struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun if (rp->status)
405*4882a593Smuzhiyun return;
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun memcpy(hdev->dev_class, rp->dev_class, 3);
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
410*4882a593Smuzhiyun hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun
hci_cc_write_class_of_dev(struct hci_dev * hdev,struct sk_buff * skb)413*4882a593Smuzhiyun static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
414*4882a593Smuzhiyun {
415*4882a593Smuzhiyun __u8 status = *((__u8 *) skb->data);
416*4882a593Smuzhiyun void *sent;
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, status);
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
421*4882a593Smuzhiyun if (!sent)
422*4882a593Smuzhiyun return;
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun hci_dev_lock(hdev);
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun if (status == 0)
427*4882a593Smuzhiyun memcpy(hdev->dev_class, sent, 3);
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun if (hci_dev_test_flag(hdev, HCI_MGMT))
430*4882a593Smuzhiyun mgmt_set_class_of_dev_complete(hdev, sent, status);
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun hci_dev_unlock(hdev);
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun
hci_cc_read_voice_setting(struct hci_dev * hdev,struct sk_buff * skb)435*4882a593Smuzhiyun static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
436*4882a593Smuzhiyun {
437*4882a593Smuzhiyun struct hci_rp_read_voice_setting *rp = (void *) skb->data;
438*4882a593Smuzhiyun __u16 setting;
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun if (rp->status)
443*4882a593Smuzhiyun return;
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun setting = __le16_to_cpu(rp->voice_setting);
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun if (hdev->voice_setting == setting)
448*4882a593Smuzhiyun return;
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun hdev->voice_setting = setting;
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun if (hdev->notify)
455*4882a593Smuzhiyun hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun
hci_cc_write_voice_setting(struct hci_dev * hdev,struct sk_buff * skb)458*4882a593Smuzhiyun static void hci_cc_write_voice_setting(struct hci_dev *hdev,
459*4882a593Smuzhiyun struct sk_buff *skb)
460*4882a593Smuzhiyun {
461*4882a593Smuzhiyun __u8 status = *((__u8 *) skb->data);
462*4882a593Smuzhiyun __u16 setting;
463*4882a593Smuzhiyun void *sent;
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, status);
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun if (status)
468*4882a593Smuzhiyun return;
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
471*4882a593Smuzhiyun if (!sent)
472*4882a593Smuzhiyun return;
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun setting = get_unaligned_le16(sent);
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun if (hdev->voice_setting == setting)
477*4882a593Smuzhiyun return;
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun hdev->voice_setting = setting;
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun if (hdev->notify)
484*4882a593Smuzhiyun hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
485*4882a593Smuzhiyun }
486*4882a593Smuzhiyun
hci_cc_read_num_supported_iac(struct hci_dev * hdev,struct sk_buff * skb)487*4882a593Smuzhiyun static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
488*4882a593Smuzhiyun struct sk_buff *skb)
489*4882a593Smuzhiyun {
490*4882a593Smuzhiyun struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun if (rp->status)
495*4882a593Smuzhiyun return;
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun hdev->num_iac = rp->num_iac;
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
500*4882a593Smuzhiyun }
501*4882a593Smuzhiyun
hci_cc_write_ssp_mode(struct hci_dev * hdev,struct sk_buff * skb)502*4882a593Smuzhiyun static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
503*4882a593Smuzhiyun {
504*4882a593Smuzhiyun __u8 status = *((__u8 *) skb->data);
505*4882a593Smuzhiyun struct hci_cp_write_ssp_mode *sent;
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, status);
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
510*4882a593Smuzhiyun if (!sent)
511*4882a593Smuzhiyun return;
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun hci_dev_lock(hdev);
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun if (!status) {
516*4882a593Smuzhiyun if (sent->mode)
517*4882a593Smuzhiyun hdev->features[1][0] |= LMP_HOST_SSP;
518*4882a593Smuzhiyun else
519*4882a593Smuzhiyun hdev->features[1][0] &= ~LMP_HOST_SSP;
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun if (hci_dev_test_flag(hdev, HCI_MGMT))
523*4882a593Smuzhiyun mgmt_ssp_enable_complete(hdev, sent->mode, status);
524*4882a593Smuzhiyun else if (!status) {
525*4882a593Smuzhiyun if (sent->mode)
526*4882a593Smuzhiyun hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
527*4882a593Smuzhiyun else
528*4882a593Smuzhiyun hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
529*4882a593Smuzhiyun }
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun hci_dev_unlock(hdev);
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun
hci_cc_write_sc_support(struct hci_dev * hdev,struct sk_buff * skb)534*4882a593Smuzhiyun static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
535*4882a593Smuzhiyun {
536*4882a593Smuzhiyun u8 status = *((u8 *) skb->data);
537*4882a593Smuzhiyun struct hci_cp_write_sc_support *sent;
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, status);
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
542*4882a593Smuzhiyun if (!sent)
543*4882a593Smuzhiyun return;
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun hci_dev_lock(hdev);
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun if (!status) {
548*4882a593Smuzhiyun if (sent->support)
549*4882a593Smuzhiyun hdev->features[1][0] |= LMP_HOST_SC;
550*4882a593Smuzhiyun else
551*4882a593Smuzhiyun hdev->features[1][0] &= ~LMP_HOST_SC;
552*4882a593Smuzhiyun }
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
555*4882a593Smuzhiyun if (sent->support)
556*4882a593Smuzhiyun hci_dev_set_flag(hdev, HCI_SC_ENABLED);
557*4882a593Smuzhiyun else
558*4882a593Smuzhiyun hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun hci_dev_unlock(hdev);
562*4882a593Smuzhiyun }
563*4882a593Smuzhiyun
hci_cc_read_local_version(struct hci_dev * hdev,struct sk_buff * skb)564*4882a593Smuzhiyun static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
565*4882a593Smuzhiyun {
566*4882a593Smuzhiyun struct hci_rp_read_local_version *rp = (void *) skb->data;
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun if (rp->status)
571*4882a593Smuzhiyun return;
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun if (hci_dev_test_flag(hdev, HCI_SETUP) ||
574*4882a593Smuzhiyun hci_dev_test_flag(hdev, HCI_CONFIG)) {
575*4882a593Smuzhiyun hdev->hci_ver = rp->hci_ver;
576*4882a593Smuzhiyun hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
577*4882a593Smuzhiyun hdev->lmp_ver = rp->lmp_ver;
578*4882a593Smuzhiyun hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
579*4882a593Smuzhiyun hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
580*4882a593Smuzhiyun }
581*4882a593Smuzhiyun }
582*4882a593Smuzhiyun
hci_cc_read_local_commands(struct hci_dev * hdev,struct sk_buff * skb)583*4882a593Smuzhiyun static void hci_cc_read_local_commands(struct hci_dev *hdev,
584*4882a593Smuzhiyun struct sk_buff *skb)
585*4882a593Smuzhiyun {
586*4882a593Smuzhiyun struct hci_rp_read_local_commands *rp = (void *) skb->data;
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun if (rp->status)
591*4882a593Smuzhiyun return;
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun if (hci_dev_test_flag(hdev, HCI_SETUP) ||
594*4882a593Smuzhiyun hci_dev_test_flag(hdev, HCI_CONFIG))
595*4882a593Smuzhiyun memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
596*4882a593Smuzhiyun }
597*4882a593Smuzhiyun
hci_cc_read_auth_payload_timeout(struct hci_dev * hdev,struct sk_buff * skb)598*4882a593Smuzhiyun static void hci_cc_read_auth_payload_timeout(struct hci_dev *hdev,
599*4882a593Smuzhiyun struct sk_buff *skb)
600*4882a593Smuzhiyun {
601*4882a593Smuzhiyun struct hci_rp_read_auth_payload_to *rp = (void *)skb->data;
602*4882a593Smuzhiyun struct hci_conn *conn;
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun if (rp->status)
607*4882a593Smuzhiyun return;
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun hci_dev_lock(hdev);
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
612*4882a593Smuzhiyun if (conn)
613*4882a593Smuzhiyun conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun hci_dev_unlock(hdev);
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun
hci_cc_write_auth_payload_timeout(struct hci_dev * hdev,struct sk_buff * skb)618*4882a593Smuzhiyun static void hci_cc_write_auth_payload_timeout(struct hci_dev *hdev,
619*4882a593Smuzhiyun struct sk_buff *skb)
620*4882a593Smuzhiyun {
621*4882a593Smuzhiyun struct hci_rp_write_auth_payload_to *rp = (void *)skb->data;
622*4882a593Smuzhiyun struct hci_conn *conn;
623*4882a593Smuzhiyun void *sent;
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun if (rp->status)
628*4882a593Smuzhiyun return;
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
631*4882a593Smuzhiyun if (!sent)
632*4882a593Smuzhiyun return;
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun hci_dev_lock(hdev);
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
637*4882a593Smuzhiyun if (conn)
638*4882a593Smuzhiyun conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun hci_dev_unlock(hdev);
641*4882a593Smuzhiyun }
642*4882a593Smuzhiyun
hci_cc_read_local_features(struct hci_dev * hdev,struct sk_buff * skb)643*4882a593Smuzhiyun static void hci_cc_read_local_features(struct hci_dev *hdev,
644*4882a593Smuzhiyun struct sk_buff *skb)
645*4882a593Smuzhiyun {
646*4882a593Smuzhiyun struct hci_rp_read_local_features *rp = (void *) skb->data;
647*4882a593Smuzhiyun
648*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
649*4882a593Smuzhiyun
650*4882a593Smuzhiyun if (rp->status)
651*4882a593Smuzhiyun return;
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun memcpy(hdev->features, rp->features, 8);
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun /* Adjust default settings according to features
656*4882a593Smuzhiyun * supported by device. */
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun if (hdev->features[0][0] & LMP_3SLOT)
659*4882a593Smuzhiyun hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun if (hdev->features[0][0] & LMP_5SLOT)
662*4882a593Smuzhiyun hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun if (hdev->features[0][1] & LMP_HV2) {
665*4882a593Smuzhiyun hdev->pkt_type |= (HCI_HV2);
666*4882a593Smuzhiyun hdev->esco_type |= (ESCO_HV2);
667*4882a593Smuzhiyun }
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun if (hdev->features[0][1] & LMP_HV3) {
670*4882a593Smuzhiyun hdev->pkt_type |= (HCI_HV3);
671*4882a593Smuzhiyun hdev->esco_type |= (ESCO_HV3);
672*4882a593Smuzhiyun }
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun if (lmp_esco_capable(hdev))
675*4882a593Smuzhiyun hdev->esco_type |= (ESCO_EV3);
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun if (hdev->features[0][4] & LMP_EV4)
678*4882a593Smuzhiyun hdev->esco_type |= (ESCO_EV4);
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun if (hdev->features[0][4] & LMP_EV5)
681*4882a593Smuzhiyun hdev->esco_type |= (ESCO_EV5);
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
684*4882a593Smuzhiyun hdev->esco_type |= (ESCO_2EV3);
685*4882a593Smuzhiyun
686*4882a593Smuzhiyun if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
687*4882a593Smuzhiyun hdev->esco_type |= (ESCO_3EV3);
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
690*4882a593Smuzhiyun hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
691*4882a593Smuzhiyun }
692*4882a593Smuzhiyun
hci_cc_read_local_ext_features(struct hci_dev * hdev,struct sk_buff * skb)693*4882a593Smuzhiyun static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
694*4882a593Smuzhiyun struct sk_buff *skb)
695*4882a593Smuzhiyun {
696*4882a593Smuzhiyun struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
697*4882a593Smuzhiyun
698*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun if (rp->status)
701*4882a593Smuzhiyun return;
702*4882a593Smuzhiyun
703*4882a593Smuzhiyun if (hdev->max_page < rp->max_page)
704*4882a593Smuzhiyun hdev->max_page = rp->max_page;
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun if (rp->page < HCI_MAX_PAGES)
707*4882a593Smuzhiyun memcpy(hdev->features[rp->page], rp->features, 8);
708*4882a593Smuzhiyun }
709*4882a593Smuzhiyun
hci_cc_read_flow_control_mode(struct hci_dev * hdev,struct sk_buff * skb)710*4882a593Smuzhiyun static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
711*4882a593Smuzhiyun struct sk_buff *skb)
712*4882a593Smuzhiyun {
713*4882a593Smuzhiyun struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun if (rp->status)
718*4882a593Smuzhiyun return;
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun hdev->flow_ctl_mode = rp->mode;
721*4882a593Smuzhiyun }
722*4882a593Smuzhiyun
hci_cc_read_buffer_size(struct hci_dev * hdev,struct sk_buff * skb)723*4882a593Smuzhiyun static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
724*4882a593Smuzhiyun {
725*4882a593Smuzhiyun struct hci_rp_read_buffer_size *rp = (void *) skb->data;
726*4882a593Smuzhiyun
727*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
728*4882a593Smuzhiyun
729*4882a593Smuzhiyun if (rp->status)
730*4882a593Smuzhiyun return;
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
733*4882a593Smuzhiyun hdev->sco_mtu = rp->sco_mtu;
734*4882a593Smuzhiyun hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
735*4882a593Smuzhiyun hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
736*4882a593Smuzhiyun
737*4882a593Smuzhiyun if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
738*4882a593Smuzhiyun hdev->sco_mtu = 64;
739*4882a593Smuzhiyun hdev->sco_pkts = 8;
740*4882a593Smuzhiyun }
741*4882a593Smuzhiyun
742*4882a593Smuzhiyun hdev->acl_cnt = hdev->acl_pkts;
743*4882a593Smuzhiyun hdev->sco_cnt = hdev->sco_pkts;
744*4882a593Smuzhiyun
745*4882a593Smuzhiyun BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
746*4882a593Smuzhiyun hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
747*4882a593Smuzhiyun }
748*4882a593Smuzhiyun
hci_cc_read_bd_addr(struct hci_dev * hdev,struct sk_buff * skb)749*4882a593Smuzhiyun static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
750*4882a593Smuzhiyun {
751*4882a593Smuzhiyun struct hci_rp_read_bd_addr *rp = (void *) skb->data;
752*4882a593Smuzhiyun
753*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun if (rp->status)
756*4882a593Smuzhiyun return;
757*4882a593Smuzhiyun
758*4882a593Smuzhiyun if (test_bit(HCI_INIT, &hdev->flags))
759*4882a593Smuzhiyun bacpy(&hdev->bdaddr, &rp->bdaddr);
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun if (hci_dev_test_flag(hdev, HCI_SETUP))
762*4882a593Smuzhiyun bacpy(&hdev->setup_addr, &rp->bdaddr);
763*4882a593Smuzhiyun }
764*4882a593Smuzhiyun
hci_cc_read_local_pairing_opts(struct hci_dev * hdev,struct sk_buff * skb)765*4882a593Smuzhiyun static void hci_cc_read_local_pairing_opts(struct hci_dev *hdev,
766*4882a593Smuzhiyun struct sk_buff *skb)
767*4882a593Smuzhiyun {
768*4882a593Smuzhiyun struct hci_rp_read_local_pairing_opts *rp = (void *) skb->data;
769*4882a593Smuzhiyun
770*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun if (rp->status)
773*4882a593Smuzhiyun return;
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun if (hci_dev_test_flag(hdev, HCI_SETUP) ||
776*4882a593Smuzhiyun hci_dev_test_flag(hdev, HCI_CONFIG)) {
777*4882a593Smuzhiyun hdev->pairing_opts = rp->pairing_opts;
778*4882a593Smuzhiyun hdev->max_enc_key_size = rp->max_key_size;
779*4882a593Smuzhiyun }
780*4882a593Smuzhiyun }
781*4882a593Smuzhiyun
hci_cc_read_page_scan_activity(struct hci_dev * hdev,struct sk_buff * skb)782*4882a593Smuzhiyun static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
783*4882a593Smuzhiyun struct sk_buff *skb)
784*4882a593Smuzhiyun {
785*4882a593Smuzhiyun struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
786*4882a593Smuzhiyun
787*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun if (rp->status)
790*4882a593Smuzhiyun return;
791*4882a593Smuzhiyun
792*4882a593Smuzhiyun if (test_bit(HCI_INIT, &hdev->flags)) {
793*4882a593Smuzhiyun hdev->page_scan_interval = __le16_to_cpu(rp->interval);
794*4882a593Smuzhiyun hdev->page_scan_window = __le16_to_cpu(rp->window);
795*4882a593Smuzhiyun }
796*4882a593Smuzhiyun }
797*4882a593Smuzhiyun
hci_cc_write_page_scan_activity(struct hci_dev * hdev,struct sk_buff * skb)798*4882a593Smuzhiyun static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
799*4882a593Smuzhiyun struct sk_buff *skb)
800*4882a593Smuzhiyun {
801*4882a593Smuzhiyun u8 status = *((u8 *) skb->data);
802*4882a593Smuzhiyun struct hci_cp_write_page_scan_activity *sent;
803*4882a593Smuzhiyun
804*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, status);
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun if (status)
807*4882a593Smuzhiyun return;
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
810*4882a593Smuzhiyun if (!sent)
811*4882a593Smuzhiyun return;
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun hdev->page_scan_interval = __le16_to_cpu(sent->interval);
814*4882a593Smuzhiyun hdev->page_scan_window = __le16_to_cpu(sent->window);
815*4882a593Smuzhiyun }
816*4882a593Smuzhiyun
hci_cc_read_page_scan_type(struct hci_dev * hdev,struct sk_buff * skb)817*4882a593Smuzhiyun static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
818*4882a593Smuzhiyun struct sk_buff *skb)
819*4882a593Smuzhiyun {
820*4882a593Smuzhiyun struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
821*4882a593Smuzhiyun
822*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
823*4882a593Smuzhiyun
824*4882a593Smuzhiyun if (rp->status)
825*4882a593Smuzhiyun return;
826*4882a593Smuzhiyun
827*4882a593Smuzhiyun if (test_bit(HCI_INIT, &hdev->flags))
828*4882a593Smuzhiyun hdev->page_scan_type = rp->type;
829*4882a593Smuzhiyun }
830*4882a593Smuzhiyun
hci_cc_write_page_scan_type(struct hci_dev * hdev,struct sk_buff * skb)831*4882a593Smuzhiyun static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
832*4882a593Smuzhiyun struct sk_buff *skb)
833*4882a593Smuzhiyun {
834*4882a593Smuzhiyun u8 status = *((u8 *) skb->data);
835*4882a593Smuzhiyun u8 *type;
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, status);
838*4882a593Smuzhiyun
839*4882a593Smuzhiyun if (status)
840*4882a593Smuzhiyun return;
841*4882a593Smuzhiyun
842*4882a593Smuzhiyun type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
843*4882a593Smuzhiyun if (type)
844*4882a593Smuzhiyun hdev->page_scan_type = *type;
845*4882a593Smuzhiyun }
846*4882a593Smuzhiyun
hci_cc_read_data_block_size(struct hci_dev * hdev,struct sk_buff * skb)847*4882a593Smuzhiyun static void hci_cc_read_data_block_size(struct hci_dev *hdev,
848*4882a593Smuzhiyun struct sk_buff *skb)
849*4882a593Smuzhiyun {
850*4882a593Smuzhiyun struct hci_rp_read_data_block_size *rp = (void *) skb->data;
851*4882a593Smuzhiyun
852*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
853*4882a593Smuzhiyun
854*4882a593Smuzhiyun if (rp->status)
855*4882a593Smuzhiyun return;
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
858*4882a593Smuzhiyun hdev->block_len = __le16_to_cpu(rp->block_len);
859*4882a593Smuzhiyun hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
860*4882a593Smuzhiyun
861*4882a593Smuzhiyun hdev->block_cnt = hdev->num_blocks;
862*4882a593Smuzhiyun
863*4882a593Smuzhiyun BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
864*4882a593Smuzhiyun hdev->block_cnt, hdev->block_len);
865*4882a593Smuzhiyun }
866*4882a593Smuzhiyun
hci_cc_read_clock(struct hci_dev * hdev,struct sk_buff * skb)867*4882a593Smuzhiyun static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
868*4882a593Smuzhiyun {
869*4882a593Smuzhiyun struct hci_rp_read_clock *rp = (void *) skb->data;
870*4882a593Smuzhiyun struct hci_cp_read_clock *cp;
871*4882a593Smuzhiyun struct hci_conn *conn;
872*4882a593Smuzhiyun
873*4882a593Smuzhiyun BT_DBG("%s", hdev->name);
874*4882a593Smuzhiyun
875*4882a593Smuzhiyun if (skb->len < sizeof(*rp))
876*4882a593Smuzhiyun return;
877*4882a593Smuzhiyun
878*4882a593Smuzhiyun if (rp->status)
879*4882a593Smuzhiyun return;
880*4882a593Smuzhiyun
881*4882a593Smuzhiyun hci_dev_lock(hdev);
882*4882a593Smuzhiyun
883*4882a593Smuzhiyun cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
884*4882a593Smuzhiyun if (!cp)
885*4882a593Smuzhiyun goto unlock;
886*4882a593Smuzhiyun
887*4882a593Smuzhiyun if (cp->which == 0x00) {
888*4882a593Smuzhiyun hdev->clock = le32_to_cpu(rp->clock);
889*4882a593Smuzhiyun goto unlock;
890*4882a593Smuzhiyun }
891*4882a593Smuzhiyun
892*4882a593Smuzhiyun conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
893*4882a593Smuzhiyun if (conn) {
894*4882a593Smuzhiyun conn->clock = le32_to_cpu(rp->clock);
895*4882a593Smuzhiyun conn->clock_accuracy = le16_to_cpu(rp->accuracy);
896*4882a593Smuzhiyun }
897*4882a593Smuzhiyun
898*4882a593Smuzhiyun unlock:
899*4882a593Smuzhiyun hci_dev_unlock(hdev);
900*4882a593Smuzhiyun }
901*4882a593Smuzhiyun
hci_cc_read_local_amp_info(struct hci_dev * hdev,struct sk_buff * skb)902*4882a593Smuzhiyun static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
903*4882a593Smuzhiyun struct sk_buff *skb)
904*4882a593Smuzhiyun {
905*4882a593Smuzhiyun struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
906*4882a593Smuzhiyun
907*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
908*4882a593Smuzhiyun
909*4882a593Smuzhiyun if (rp->status)
910*4882a593Smuzhiyun return;
911*4882a593Smuzhiyun
912*4882a593Smuzhiyun hdev->amp_status = rp->amp_status;
913*4882a593Smuzhiyun hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
914*4882a593Smuzhiyun hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
915*4882a593Smuzhiyun hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
916*4882a593Smuzhiyun hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
917*4882a593Smuzhiyun hdev->amp_type = rp->amp_type;
918*4882a593Smuzhiyun hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
919*4882a593Smuzhiyun hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
920*4882a593Smuzhiyun hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
921*4882a593Smuzhiyun hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
922*4882a593Smuzhiyun }
923*4882a593Smuzhiyun
hci_cc_read_inq_rsp_tx_power(struct hci_dev * hdev,struct sk_buff * skb)924*4882a593Smuzhiyun static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
925*4882a593Smuzhiyun struct sk_buff *skb)
926*4882a593Smuzhiyun {
927*4882a593Smuzhiyun struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
928*4882a593Smuzhiyun
929*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
930*4882a593Smuzhiyun
931*4882a593Smuzhiyun if (rp->status)
932*4882a593Smuzhiyun return;
933*4882a593Smuzhiyun
934*4882a593Smuzhiyun hdev->inq_tx_power = rp->tx_power;
935*4882a593Smuzhiyun }
936*4882a593Smuzhiyun
hci_cc_read_def_err_data_reporting(struct hci_dev * hdev,struct sk_buff * skb)937*4882a593Smuzhiyun static void hci_cc_read_def_err_data_reporting(struct hci_dev *hdev,
938*4882a593Smuzhiyun struct sk_buff *skb)
939*4882a593Smuzhiyun {
940*4882a593Smuzhiyun struct hci_rp_read_def_err_data_reporting *rp = (void *)skb->data;
941*4882a593Smuzhiyun
942*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
943*4882a593Smuzhiyun
944*4882a593Smuzhiyun if (rp->status)
945*4882a593Smuzhiyun return;
946*4882a593Smuzhiyun
947*4882a593Smuzhiyun hdev->err_data_reporting = rp->err_data_reporting;
948*4882a593Smuzhiyun }
949*4882a593Smuzhiyun
hci_cc_write_def_err_data_reporting(struct hci_dev * hdev,struct sk_buff * skb)950*4882a593Smuzhiyun static void hci_cc_write_def_err_data_reporting(struct hci_dev *hdev,
951*4882a593Smuzhiyun struct sk_buff *skb)
952*4882a593Smuzhiyun {
953*4882a593Smuzhiyun __u8 status = *((__u8 *)skb->data);
954*4882a593Smuzhiyun struct hci_cp_write_def_err_data_reporting *cp;
955*4882a593Smuzhiyun
956*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, status);
957*4882a593Smuzhiyun
958*4882a593Smuzhiyun if (status)
959*4882a593Smuzhiyun return;
960*4882a593Smuzhiyun
961*4882a593Smuzhiyun cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
962*4882a593Smuzhiyun if (!cp)
963*4882a593Smuzhiyun return;
964*4882a593Smuzhiyun
965*4882a593Smuzhiyun hdev->err_data_reporting = cp->err_data_reporting;
966*4882a593Smuzhiyun }
967*4882a593Smuzhiyun
hci_cc_pin_code_reply(struct hci_dev * hdev,struct sk_buff * skb)968*4882a593Smuzhiyun static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
969*4882a593Smuzhiyun {
970*4882a593Smuzhiyun struct hci_rp_pin_code_reply *rp = (void *) skb->data;
971*4882a593Smuzhiyun struct hci_cp_pin_code_reply *cp;
972*4882a593Smuzhiyun struct hci_conn *conn;
973*4882a593Smuzhiyun
974*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
975*4882a593Smuzhiyun
976*4882a593Smuzhiyun hci_dev_lock(hdev);
977*4882a593Smuzhiyun
978*4882a593Smuzhiyun if (hci_dev_test_flag(hdev, HCI_MGMT))
979*4882a593Smuzhiyun mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
980*4882a593Smuzhiyun
981*4882a593Smuzhiyun if (rp->status)
982*4882a593Smuzhiyun goto unlock;
983*4882a593Smuzhiyun
984*4882a593Smuzhiyun cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
985*4882a593Smuzhiyun if (!cp)
986*4882a593Smuzhiyun goto unlock;
987*4882a593Smuzhiyun
988*4882a593Smuzhiyun conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
989*4882a593Smuzhiyun if (conn)
990*4882a593Smuzhiyun conn->pin_length = cp->pin_len;
991*4882a593Smuzhiyun
992*4882a593Smuzhiyun unlock:
993*4882a593Smuzhiyun hci_dev_unlock(hdev);
994*4882a593Smuzhiyun }
995*4882a593Smuzhiyun
hci_cc_pin_code_neg_reply(struct hci_dev * hdev,struct sk_buff * skb)996*4882a593Smuzhiyun static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
997*4882a593Smuzhiyun {
998*4882a593Smuzhiyun struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
999*4882a593Smuzhiyun
1000*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1001*4882a593Smuzhiyun
1002*4882a593Smuzhiyun hci_dev_lock(hdev);
1003*4882a593Smuzhiyun
1004*4882a593Smuzhiyun if (hci_dev_test_flag(hdev, HCI_MGMT))
1005*4882a593Smuzhiyun mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1006*4882a593Smuzhiyun rp->status);
1007*4882a593Smuzhiyun
1008*4882a593Smuzhiyun hci_dev_unlock(hdev);
1009*4882a593Smuzhiyun }
1010*4882a593Smuzhiyun
hci_cc_le_read_buffer_size(struct hci_dev * hdev,struct sk_buff * skb)1011*4882a593Smuzhiyun static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
1012*4882a593Smuzhiyun struct sk_buff *skb)
1013*4882a593Smuzhiyun {
1014*4882a593Smuzhiyun struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
1015*4882a593Smuzhiyun
1016*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1017*4882a593Smuzhiyun
1018*4882a593Smuzhiyun if (rp->status)
1019*4882a593Smuzhiyun return;
1020*4882a593Smuzhiyun
1021*4882a593Smuzhiyun hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1022*4882a593Smuzhiyun hdev->le_pkts = rp->le_max_pkt;
1023*4882a593Smuzhiyun
1024*4882a593Smuzhiyun hdev->le_cnt = hdev->le_pkts;
1025*4882a593Smuzhiyun
1026*4882a593Smuzhiyun BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1027*4882a593Smuzhiyun }
1028*4882a593Smuzhiyun
hci_cc_le_read_local_features(struct hci_dev * hdev,struct sk_buff * skb)1029*4882a593Smuzhiyun static void hci_cc_le_read_local_features(struct hci_dev *hdev,
1030*4882a593Smuzhiyun struct sk_buff *skb)
1031*4882a593Smuzhiyun {
1032*4882a593Smuzhiyun struct hci_rp_le_read_local_features *rp = (void *) skb->data;
1033*4882a593Smuzhiyun
1034*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1035*4882a593Smuzhiyun
1036*4882a593Smuzhiyun if (rp->status)
1037*4882a593Smuzhiyun return;
1038*4882a593Smuzhiyun
1039*4882a593Smuzhiyun memcpy(hdev->le_features, rp->features, 8);
1040*4882a593Smuzhiyun }
1041*4882a593Smuzhiyun
hci_cc_le_read_adv_tx_power(struct hci_dev * hdev,struct sk_buff * skb)1042*4882a593Smuzhiyun static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
1043*4882a593Smuzhiyun struct sk_buff *skb)
1044*4882a593Smuzhiyun {
1045*4882a593Smuzhiyun struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
1046*4882a593Smuzhiyun
1047*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1048*4882a593Smuzhiyun
1049*4882a593Smuzhiyun if (rp->status)
1050*4882a593Smuzhiyun return;
1051*4882a593Smuzhiyun
1052*4882a593Smuzhiyun hdev->adv_tx_power = rp->tx_power;
1053*4882a593Smuzhiyun }
1054*4882a593Smuzhiyun
hci_cc_user_confirm_reply(struct hci_dev * hdev,struct sk_buff * skb)1055*4882a593Smuzhiyun static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
1056*4882a593Smuzhiyun {
1057*4882a593Smuzhiyun struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1058*4882a593Smuzhiyun
1059*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1060*4882a593Smuzhiyun
1061*4882a593Smuzhiyun hci_dev_lock(hdev);
1062*4882a593Smuzhiyun
1063*4882a593Smuzhiyun if (hci_dev_test_flag(hdev, HCI_MGMT))
1064*4882a593Smuzhiyun mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1065*4882a593Smuzhiyun rp->status);
1066*4882a593Smuzhiyun
1067*4882a593Smuzhiyun hci_dev_unlock(hdev);
1068*4882a593Smuzhiyun }
1069*4882a593Smuzhiyun
hci_cc_user_confirm_neg_reply(struct hci_dev * hdev,struct sk_buff * skb)1070*4882a593Smuzhiyun static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
1071*4882a593Smuzhiyun struct sk_buff *skb)
1072*4882a593Smuzhiyun {
1073*4882a593Smuzhiyun struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1074*4882a593Smuzhiyun
1075*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1076*4882a593Smuzhiyun
1077*4882a593Smuzhiyun hci_dev_lock(hdev);
1078*4882a593Smuzhiyun
1079*4882a593Smuzhiyun if (hci_dev_test_flag(hdev, HCI_MGMT))
1080*4882a593Smuzhiyun mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1081*4882a593Smuzhiyun ACL_LINK, 0, rp->status);
1082*4882a593Smuzhiyun
1083*4882a593Smuzhiyun hci_dev_unlock(hdev);
1084*4882a593Smuzhiyun }
1085*4882a593Smuzhiyun
hci_cc_user_passkey_reply(struct hci_dev * hdev,struct sk_buff * skb)1086*4882a593Smuzhiyun static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1087*4882a593Smuzhiyun {
1088*4882a593Smuzhiyun struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1089*4882a593Smuzhiyun
1090*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1091*4882a593Smuzhiyun
1092*4882a593Smuzhiyun hci_dev_lock(hdev);
1093*4882a593Smuzhiyun
1094*4882a593Smuzhiyun if (hci_dev_test_flag(hdev, HCI_MGMT))
1095*4882a593Smuzhiyun mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1096*4882a593Smuzhiyun 0, rp->status);
1097*4882a593Smuzhiyun
1098*4882a593Smuzhiyun hci_dev_unlock(hdev);
1099*4882a593Smuzhiyun }
1100*4882a593Smuzhiyun
hci_cc_user_passkey_neg_reply(struct hci_dev * hdev,struct sk_buff * skb)1101*4882a593Smuzhiyun static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1102*4882a593Smuzhiyun struct sk_buff *skb)
1103*4882a593Smuzhiyun {
1104*4882a593Smuzhiyun struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1105*4882a593Smuzhiyun
1106*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1107*4882a593Smuzhiyun
1108*4882a593Smuzhiyun hci_dev_lock(hdev);
1109*4882a593Smuzhiyun
1110*4882a593Smuzhiyun if (hci_dev_test_flag(hdev, HCI_MGMT))
1111*4882a593Smuzhiyun mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1112*4882a593Smuzhiyun ACL_LINK, 0, rp->status);
1113*4882a593Smuzhiyun
1114*4882a593Smuzhiyun hci_dev_unlock(hdev);
1115*4882a593Smuzhiyun }
1116*4882a593Smuzhiyun
hci_cc_read_local_oob_data(struct hci_dev * hdev,struct sk_buff * skb)1117*4882a593Smuzhiyun static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1118*4882a593Smuzhiyun struct sk_buff *skb)
1119*4882a593Smuzhiyun {
1120*4882a593Smuzhiyun struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1121*4882a593Smuzhiyun
1122*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1123*4882a593Smuzhiyun }
1124*4882a593Smuzhiyun
hci_cc_read_local_oob_ext_data(struct hci_dev * hdev,struct sk_buff * skb)1125*4882a593Smuzhiyun static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1126*4882a593Smuzhiyun struct sk_buff *skb)
1127*4882a593Smuzhiyun {
1128*4882a593Smuzhiyun struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1129*4882a593Smuzhiyun
1130*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1131*4882a593Smuzhiyun }
1132*4882a593Smuzhiyun
hci_cc_le_set_random_addr(struct hci_dev * hdev,struct sk_buff * skb)1133*4882a593Smuzhiyun static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1134*4882a593Smuzhiyun {
1135*4882a593Smuzhiyun __u8 status = *((__u8 *) skb->data);
1136*4882a593Smuzhiyun bdaddr_t *sent;
1137*4882a593Smuzhiyun
1138*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, status);
1139*4882a593Smuzhiyun
1140*4882a593Smuzhiyun if (status)
1141*4882a593Smuzhiyun return;
1142*4882a593Smuzhiyun
1143*4882a593Smuzhiyun sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1144*4882a593Smuzhiyun if (!sent)
1145*4882a593Smuzhiyun return;
1146*4882a593Smuzhiyun
1147*4882a593Smuzhiyun hci_dev_lock(hdev);
1148*4882a593Smuzhiyun
1149*4882a593Smuzhiyun bacpy(&hdev->random_addr, sent);
1150*4882a593Smuzhiyun
1151*4882a593Smuzhiyun hci_dev_unlock(hdev);
1152*4882a593Smuzhiyun }
1153*4882a593Smuzhiyun
hci_cc_le_set_default_phy(struct hci_dev * hdev,struct sk_buff * skb)1154*4882a593Smuzhiyun static void hci_cc_le_set_default_phy(struct hci_dev *hdev, struct sk_buff *skb)
1155*4882a593Smuzhiyun {
1156*4882a593Smuzhiyun __u8 status = *((__u8 *) skb->data);
1157*4882a593Smuzhiyun struct hci_cp_le_set_default_phy *cp;
1158*4882a593Smuzhiyun
1159*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, status);
1160*4882a593Smuzhiyun
1161*4882a593Smuzhiyun if (status)
1162*4882a593Smuzhiyun return;
1163*4882a593Smuzhiyun
1164*4882a593Smuzhiyun cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1165*4882a593Smuzhiyun if (!cp)
1166*4882a593Smuzhiyun return;
1167*4882a593Smuzhiyun
1168*4882a593Smuzhiyun hci_dev_lock(hdev);
1169*4882a593Smuzhiyun
1170*4882a593Smuzhiyun hdev->le_tx_def_phys = cp->tx_phys;
1171*4882a593Smuzhiyun hdev->le_rx_def_phys = cp->rx_phys;
1172*4882a593Smuzhiyun
1173*4882a593Smuzhiyun hci_dev_unlock(hdev);
1174*4882a593Smuzhiyun }
1175*4882a593Smuzhiyun
hci_cc_le_set_adv_set_random_addr(struct hci_dev * hdev,struct sk_buff * skb)1176*4882a593Smuzhiyun static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev,
1177*4882a593Smuzhiyun struct sk_buff *skb)
1178*4882a593Smuzhiyun {
1179*4882a593Smuzhiyun __u8 status = *((__u8 *) skb->data);
1180*4882a593Smuzhiyun struct hci_cp_le_set_adv_set_rand_addr *cp;
1181*4882a593Smuzhiyun struct adv_info *adv_instance;
1182*4882a593Smuzhiyun
1183*4882a593Smuzhiyun if (status)
1184*4882a593Smuzhiyun return;
1185*4882a593Smuzhiyun
1186*4882a593Smuzhiyun cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1187*4882a593Smuzhiyun if (!cp)
1188*4882a593Smuzhiyun return;
1189*4882a593Smuzhiyun
1190*4882a593Smuzhiyun hci_dev_lock(hdev);
1191*4882a593Smuzhiyun
1192*4882a593Smuzhiyun if (!hdev->cur_adv_instance) {
1193*4882a593Smuzhiyun /* Store in hdev for instance 0 (Set adv and Directed advs) */
1194*4882a593Smuzhiyun bacpy(&hdev->random_addr, &cp->bdaddr);
1195*4882a593Smuzhiyun } else {
1196*4882a593Smuzhiyun adv_instance = hci_find_adv_instance(hdev,
1197*4882a593Smuzhiyun hdev->cur_adv_instance);
1198*4882a593Smuzhiyun if (adv_instance)
1199*4882a593Smuzhiyun bacpy(&adv_instance->random_addr, &cp->bdaddr);
1200*4882a593Smuzhiyun }
1201*4882a593Smuzhiyun
1202*4882a593Smuzhiyun hci_dev_unlock(hdev);
1203*4882a593Smuzhiyun }
1204*4882a593Smuzhiyun
hci_cc_le_set_adv_enable(struct hci_dev * hdev,struct sk_buff * skb)1205*4882a593Smuzhiyun static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1206*4882a593Smuzhiyun {
1207*4882a593Smuzhiyun __u8 *sent, status = *((__u8 *) skb->data);
1208*4882a593Smuzhiyun
1209*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, status);
1210*4882a593Smuzhiyun
1211*4882a593Smuzhiyun if (status)
1212*4882a593Smuzhiyun return;
1213*4882a593Smuzhiyun
1214*4882a593Smuzhiyun sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1215*4882a593Smuzhiyun if (!sent)
1216*4882a593Smuzhiyun return;
1217*4882a593Smuzhiyun
1218*4882a593Smuzhiyun hci_dev_lock(hdev);
1219*4882a593Smuzhiyun
1220*4882a593Smuzhiyun /* If we're doing connection initiation as peripheral. Set a
1221*4882a593Smuzhiyun * timeout in case something goes wrong.
1222*4882a593Smuzhiyun */
1223*4882a593Smuzhiyun if (*sent) {
1224*4882a593Smuzhiyun struct hci_conn *conn;
1225*4882a593Smuzhiyun
1226*4882a593Smuzhiyun hci_dev_set_flag(hdev, HCI_LE_ADV);
1227*4882a593Smuzhiyun
1228*4882a593Smuzhiyun conn = hci_lookup_le_connect(hdev);
1229*4882a593Smuzhiyun if (conn)
1230*4882a593Smuzhiyun queue_delayed_work(hdev->workqueue,
1231*4882a593Smuzhiyun &conn->le_conn_timeout,
1232*4882a593Smuzhiyun conn->conn_timeout);
1233*4882a593Smuzhiyun } else {
1234*4882a593Smuzhiyun hci_dev_clear_flag(hdev, HCI_LE_ADV);
1235*4882a593Smuzhiyun }
1236*4882a593Smuzhiyun
1237*4882a593Smuzhiyun hci_dev_unlock(hdev);
1238*4882a593Smuzhiyun }
1239*4882a593Smuzhiyun
hci_cc_le_set_ext_adv_enable(struct hci_dev * hdev,struct sk_buff * skb)1240*4882a593Smuzhiyun static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev,
1241*4882a593Smuzhiyun struct sk_buff *skb)
1242*4882a593Smuzhiyun {
1243*4882a593Smuzhiyun struct hci_cp_le_set_ext_adv_enable *cp;
1244*4882a593Smuzhiyun __u8 status = *((__u8 *) skb->data);
1245*4882a593Smuzhiyun
1246*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, status);
1247*4882a593Smuzhiyun
1248*4882a593Smuzhiyun if (status)
1249*4882a593Smuzhiyun return;
1250*4882a593Smuzhiyun
1251*4882a593Smuzhiyun cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1252*4882a593Smuzhiyun if (!cp)
1253*4882a593Smuzhiyun return;
1254*4882a593Smuzhiyun
1255*4882a593Smuzhiyun hci_dev_lock(hdev);
1256*4882a593Smuzhiyun
1257*4882a593Smuzhiyun if (cp->enable) {
1258*4882a593Smuzhiyun struct hci_conn *conn;
1259*4882a593Smuzhiyun
1260*4882a593Smuzhiyun hci_dev_set_flag(hdev, HCI_LE_ADV);
1261*4882a593Smuzhiyun
1262*4882a593Smuzhiyun conn = hci_lookup_le_connect(hdev);
1263*4882a593Smuzhiyun if (conn)
1264*4882a593Smuzhiyun queue_delayed_work(hdev->workqueue,
1265*4882a593Smuzhiyun &conn->le_conn_timeout,
1266*4882a593Smuzhiyun conn->conn_timeout);
1267*4882a593Smuzhiyun } else {
1268*4882a593Smuzhiyun hci_dev_clear_flag(hdev, HCI_LE_ADV);
1269*4882a593Smuzhiyun }
1270*4882a593Smuzhiyun
1271*4882a593Smuzhiyun hci_dev_unlock(hdev);
1272*4882a593Smuzhiyun }
1273*4882a593Smuzhiyun
hci_cc_le_set_scan_param(struct hci_dev * hdev,struct sk_buff * skb)1274*4882a593Smuzhiyun static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1275*4882a593Smuzhiyun {
1276*4882a593Smuzhiyun struct hci_cp_le_set_scan_param *cp;
1277*4882a593Smuzhiyun __u8 status = *((__u8 *) skb->data);
1278*4882a593Smuzhiyun
1279*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, status);
1280*4882a593Smuzhiyun
1281*4882a593Smuzhiyun if (status)
1282*4882a593Smuzhiyun return;
1283*4882a593Smuzhiyun
1284*4882a593Smuzhiyun cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1285*4882a593Smuzhiyun if (!cp)
1286*4882a593Smuzhiyun return;
1287*4882a593Smuzhiyun
1288*4882a593Smuzhiyun hci_dev_lock(hdev);
1289*4882a593Smuzhiyun
1290*4882a593Smuzhiyun hdev->le_scan_type = cp->type;
1291*4882a593Smuzhiyun
1292*4882a593Smuzhiyun hci_dev_unlock(hdev);
1293*4882a593Smuzhiyun }
1294*4882a593Smuzhiyun
hci_cc_le_set_ext_scan_param(struct hci_dev * hdev,struct sk_buff * skb)1295*4882a593Smuzhiyun static void hci_cc_le_set_ext_scan_param(struct hci_dev *hdev,
1296*4882a593Smuzhiyun struct sk_buff *skb)
1297*4882a593Smuzhiyun {
1298*4882a593Smuzhiyun struct hci_cp_le_set_ext_scan_params *cp;
1299*4882a593Smuzhiyun __u8 status = *((__u8 *) skb->data);
1300*4882a593Smuzhiyun struct hci_cp_le_scan_phy_params *phy_param;
1301*4882a593Smuzhiyun
1302*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, status);
1303*4882a593Smuzhiyun
1304*4882a593Smuzhiyun if (status)
1305*4882a593Smuzhiyun return;
1306*4882a593Smuzhiyun
1307*4882a593Smuzhiyun cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1308*4882a593Smuzhiyun if (!cp)
1309*4882a593Smuzhiyun return;
1310*4882a593Smuzhiyun
1311*4882a593Smuzhiyun phy_param = (void *)cp->data;
1312*4882a593Smuzhiyun
1313*4882a593Smuzhiyun hci_dev_lock(hdev);
1314*4882a593Smuzhiyun
1315*4882a593Smuzhiyun hdev->le_scan_type = phy_param->type;
1316*4882a593Smuzhiyun
1317*4882a593Smuzhiyun hci_dev_unlock(hdev);
1318*4882a593Smuzhiyun }
1319*4882a593Smuzhiyun
has_pending_adv_report(struct hci_dev * hdev)1320*4882a593Smuzhiyun static bool has_pending_adv_report(struct hci_dev *hdev)
1321*4882a593Smuzhiyun {
1322*4882a593Smuzhiyun struct discovery_state *d = &hdev->discovery;
1323*4882a593Smuzhiyun
1324*4882a593Smuzhiyun return bacmp(&d->last_adv_addr, BDADDR_ANY);
1325*4882a593Smuzhiyun }
1326*4882a593Smuzhiyun
clear_pending_adv_report(struct hci_dev * hdev)1327*4882a593Smuzhiyun static void clear_pending_adv_report(struct hci_dev *hdev)
1328*4882a593Smuzhiyun {
1329*4882a593Smuzhiyun struct discovery_state *d = &hdev->discovery;
1330*4882a593Smuzhiyun
1331*4882a593Smuzhiyun bacpy(&d->last_adv_addr, BDADDR_ANY);
1332*4882a593Smuzhiyun d->last_adv_data_len = 0;
1333*4882a593Smuzhiyun }
1334*4882a593Smuzhiyun
store_pending_adv_report(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,s8 rssi,u32 flags,u8 * data,u8 len)1335*4882a593Smuzhiyun static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1336*4882a593Smuzhiyun u8 bdaddr_type, s8 rssi, u32 flags,
1337*4882a593Smuzhiyun u8 *data, u8 len)
1338*4882a593Smuzhiyun {
1339*4882a593Smuzhiyun struct discovery_state *d = &hdev->discovery;
1340*4882a593Smuzhiyun
1341*4882a593Smuzhiyun if (len > HCI_MAX_AD_LENGTH)
1342*4882a593Smuzhiyun return;
1343*4882a593Smuzhiyun
1344*4882a593Smuzhiyun bacpy(&d->last_adv_addr, bdaddr);
1345*4882a593Smuzhiyun d->last_adv_addr_type = bdaddr_type;
1346*4882a593Smuzhiyun d->last_adv_rssi = rssi;
1347*4882a593Smuzhiyun d->last_adv_flags = flags;
1348*4882a593Smuzhiyun memcpy(d->last_adv_data, data, len);
1349*4882a593Smuzhiyun d->last_adv_data_len = len;
1350*4882a593Smuzhiyun }
1351*4882a593Smuzhiyun
le_set_scan_enable_complete(struct hci_dev * hdev,u8 enable)1352*4882a593Smuzhiyun static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1353*4882a593Smuzhiyun {
1354*4882a593Smuzhiyun hci_dev_lock(hdev);
1355*4882a593Smuzhiyun
1356*4882a593Smuzhiyun switch (enable) {
1357*4882a593Smuzhiyun case LE_SCAN_ENABLE:
1358*4882a593Smuzhiyun hci_dev_set_flag(hdev, HCI_LE_SCAN);
1359*4882a593Smuzhiyun if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1360*4882a593Smuzhiyun clear_pending_adv_report(hdev);
1361*4882a593Smuzhiyun break;
1362*4882a593Smuzhiyun
1363*4882a593Smuzhiyun case LE_SCAN_DISABLE:
1364*4882a593Smuzhiyun /* We do this here instead of when setting DISCOVERY_STOPPED
1365*4882a593Smuzhiyun * since the latter would potentially require waiting for
1366*4882a593Smuzhiyun * inquiry to stop too.
1367*4882a593Smuzhiyun */
1368*4882a593Smuzhiyun if (has_pending_adv_report(hdev)) {
1369*4882a593Smuzhiyun struct discovery_state *d = &hdev->discovery;
1370*4882a593Smuzhiyun
1371*4882a593Smuzhiyun mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1372*4882a593Smuzhiyun d->last_adv_addr_type, NULL,
1373*4882a593Smuzhiyun d->last_adv_rssi, d->last_adv_flags,
1374*4882a593Smuzhiyun d->last_adv_data,
1375*4882a593Smuzhiyun d->last_adv_data_len, NULL, 0);
1376*4882a593Smuzhiyun }
1377*4882a593Smuzhiyun
1378*4882a593Smuzhiyun /* Cancel this timer so that we don't try to disable scanning
1379*4882a593Smuzhiyun * when it's already disabled.
1380*4882a593Smuzhiyun */
1381*4882a593Smuzhiyun cancel_delayed_work(&hdev->le_scan_disable);
1382*4882a593Smuzhiyun
1383*4882a593Smuzhiyun hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1384*4882a593Smuzhiyun
1385*4882a593Smuzhiyun /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1386*4882a593Smuzhiyun * interrupted scanning due to a connect request. Mark
1387*4882a593Smuzhiyun * therefore discovery as stopped. If this was not
1388*4882a593Smuzhiyun * because of a connect request advertising might have
1389*4882a593Smuzhiyun * been disabled because of active scanning, so
1390*4882a593Smuzhiyun * re-enable it again if necessary.
1391*4882a593Smuzhiyun */
1392*4882a593Smuzhiyun if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1393*4882a593Smuzhiyun hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1394*4882a593Smuzhiyun else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1395*4882a593Smuzhiyun hdev->discovery.state == DISCOVERY_FINDING)
1396*4882a593Smuzhiyun hci_req_reenable_advertising(hdev);
1397*4882a593Smuzhiyun
1398*4882a593Smuzhiyun break;
1399*4882a593Smuzhiyun
1400*4882a593Smuzhiyun default:
1401*4882a593Smuzhiyun bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1402*4882a593Smuzhiyun enable);
1403*4882a593Smuzhiyun break;
1404*4882a593Smuzhiyun }
1405*4882a593Smuzhiyun
1406*4882a593Smuzhiyun hci_dev_unlock(hdev);
1407*4882a593Smuzhiyun }
1408*4882a593Smuzhiyun
hci_cc_le_set_scan_enable(struct hci_dev * hdev,struct sk_buff * skb)1409*4882a593Smuzhiyun static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1410*4882a593Smuzhiyun struct sk_buff *skb)
1411*4882a593Smuzhiyun {
1412*4882a593Smuzhiyun struct hci_cp_le_set_scan_enable *cp;
1413*4882a593Smuzhiyun __u8 status = *((__u8 *) skb->data);
1414*4882a593Smuzhiyun
1415*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, status);
1416*4882a593Smuzhiyun
1417*4882a593Smuzhiyun if (status)
1418*4882a593Smuzhiyun return;
1419*4882a593Smuzhiyun
1420*4882a593Smuzhiyun cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1421*4882a593Smuzhiyun if (!cp)
1422*4882a593Smuzhiyun return;
1423*4882a593Smuzhiyun
1424*4882a593Smuzhiyun le_set_scan_enable_complete(hdev, cp->enable);
1425*4882a593Smuzhiyun }
1426*4882a593Smuzhiyun
hci_cc_le_set_ext_scan_enable(struct hci_dev * hdev,struct sk_buff * skb)1427*4882a593Smuzhiyun static void hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev,
1428*4882a593Smuzhiyun struct sk_buff *skb)
1429*4882a593Smuzhiyun {
1430*4882a593Smuzhiyun struct hci_cp_le_set_ext_scan_enable *cp;
1431*4882a593Smuzhiyun __u8 status = *((__u8 *) skb->data);
1432*4882a593Smuzhiyun
1433*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, status);
1434*4882a593Smuzhiyun
1435*4882a593Smuzhiyun if (status)
1436*4882a593Smuzhiyun return;
1437*4882a593Smuzhiyun
1438*4882a593Smuzhiyun cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1439*4882a593Smuzhiyun if (!cp)
1440*4882a593Smuzhiyun return;
1441*4882a593Smuzhiyun
1442*4882a593Smuzhiyun le_set_scan_enable_complete(hdev, cp->enable);
1443*4882a593Smuzhiyun }
1444*4882a593Smuzhiyun
hci_cc_le_read_num_adv_sets(struct hci_dev * hdev,struct sk_buff * skb)1445*4882a593Smuzhiyun static void hci_cc_le_read_num_adv_sets(struct hci_dev *hdev,
1446*4882a593Smuzhiyun struct sk_buff *skb)
1447*4882a593Smuzhiyun {
1448*4882a593Smuzhiyun struct hci_rp_le_read_num_supported_adv_sets *rp = (void *) skb->data;
1449*4882a593Smuzhiyun
1450*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x No of Adv sets %u", hdev->name, rp->status,
1451*4882a593Smuzhiyun rp->num_of_sets);
1452*4882a593Smuzhiyun
1453*4882a593Smuzhiyun if (rp->status)
1454*4882a593Smuzhiyun return;
1455*4882a593Smuzhiyun
1456*4882a593Smuzhiyun hdev->le_num_of_adv_sets = rp->num_of_sets;
1457*4882a593Smuzhiyun }
1458*4882a593Smuzhiyun
hci_cc_le_read_white_list_size(struct hci_dev * hdev,struct sk_buff * skb)1459*4882a593Smuzhiyun static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1460*4882a593Smuzhiyun struct sk_buff *skb)
1461*4882a593Smuzhiyun {
1462*4882a593Smuzhiyun struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1463*4882a593Smuzhiyun
1464*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1465*4882a593Smuzhiyun
1466*4882a593Smuzhiyun if (rp->status)
1467*4882a593Smuzhiyun return;
1468*4882a593Smuzhiyun
1469*4882a593Smuzhiyun hdev->le_white_list_size = rp->size;
1470*4882a593Smuzhiyun }
1471*4882a593Smuzhiyun
hci_cc_le_clear_white_list(struct hci_dev * hdev,struct sk_buff * skb)1472*4882a593Smuzhiyun static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1473*4882a593Smuzhiyun struct sk_buff *skb)
1474*4882a593Smuzhiyun {
1475*4882a593Smuzhiyun __u8 status = *((__u8 *) skb->data);
1476*4882a593Smuzhiyun
1477*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, status);
1478*4882a593Smuzhiyun
1479*4882a593Smuzhiyun if (status)
1480*4882a593Smuzhiyun return;
1481*4882a593Smuzhiyun
1482*4882a593Smuzhiyun hci_bdaddr_list_clear(&hdev->le_white_list);
1483*4882a593Smuzhiyun }
1484*4882a593Smuzhiyun
hci_cc_le_add_to_white_list(struct hci_dev * hdev,struct sk_buff * skb)1485*4882a593Smuzhiyun static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1486*4882a593Smuzhiyun struct sk_buff *skb)
1487*4882a593Smuzhiyun {
1488*4882a593Smuzhiyun struct hci_cp_le_add_to_white_list *sent;
1489*4882a593Smuzhiyun __u8 status = *((__u8 *) skb->data);
1490*4882a593Smuzhiyun
1491*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, status);
1492*4882a593Smuzhiyun
1493*4882a593Smuzhiyun if (status)
1494*4882a593Smuzhiyun return;
1495*4882a593Smuzhiyun
1496*4882a593Smuzhiyun sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1497*4882a593Smuzhiyun if (!sent)
1498*4882a593Smuzhiyun return;
1499*4882a593Smuzhiyun
1500*4882a593Smuzhiyun hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1501*4882a593Smuzhiyun sent->bdaddr_type);
1502*4882a593Smuzhiyun }
1503*4882a593Smuzhiyun
hci_cc_le_del_from_white_list(struct hci_dev * hdev,struct sk_buff * skb)1504*4882a593Smuzhiyun static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1505*4882a593Smuzhiyun struct sk_buff *skb)
1506*4882a593Smuzhiyun {
1507*4882a593Smuzhiyun struct hci_cp_le_del_from_white_list *sent;
1508*4882a593Smuzhiyun __u8 status = *((__u8 *) skb->data);
1509*4882a593Smuzhiyun
1510*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, status);
1511*4882a593Smuzhiyun
1512*4882a593Smuzhiyun if (status)
1513*4882a593Smuzhiyun return;
1514*4882a593Smuzhiyun
1515*4882a593Smuzhiyun sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1516*4882a593Smuzhiyun if (!sent)
1517*4882a593Smuzhiyun return;
1518*4882a593Smuzhiyun
1519*4882a593Smuzhiyun hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1520*4882a593Smuzhiyun sent->bdaddr_type);
1521*4882a593Smuzhiyun }
1522*4882a593Smuzhiyun
hci_cc_le_read_supported_states(struct hci_dev * hdev,struct sk_buff * skb)1523*4882a593Smuzhiyun static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1524*4882a593Smuzhiyun struct sk_buff *skb)
1525*4882a593Smuzhiyun {
1526*4882a593Smuzhiyun struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1527*4882a593Smuzhiyun
1528*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1529*4882a593Smuzhiyun
1530*4882a593Smuzhiyun if (rp->status)
1531*4882a593Smuzhiyun return;
1532*4882a593Smuzhiyun
1533*4882a593Smuzhiyun memcpy(hdev->le_states, rp->le_states, 8);
1534*4882a593Smuzhiyun }
1535*4882a593Smuzhiyun
hci_cc_le_read_def_data_len(struct hci_dev * hdev,struct sk_buff * skb)1536*4882a593Smuzhiyun static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1537*4882a593Smuzhiyun struct sk_buff *skb)
1538*4882a593Smuzhiyun {
1539*4882a593Smuzhiyun struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1540*4882a593Smuzhiyun
1541*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1542*4882a593Smuzhiyun
1543*4882a593Smuzhiyun if (rp->status)
1544*4882a593Smuzhiyun return;
1545*4882a593Smuzhiyun
1546*4882a593Smuzhiyun hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1547*4882a593Smuzhiyun hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1548*4882a593Smuzhiyun }
1549*4882a593Smuzhiyun
hci_cc_le_write_def_data_len(struct hci_dev * hdev,struct sk_buff * skb)1550*4882a593Smuzhiyun static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1551*4882a593Smuzhiyun struct sk_buff *skb)
1552*4882a593Smuzhiyun {
1553*4882a593Smuzhiyun struct hci_cp_le_write_def_data_len *sent;
1554*4882a593Smuzhiyun __u8 status = *((__u8 *) skb->data);
1555*4882a593Smuzhiyun
1556*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, status);
1557*4882a593Smuzhiyun
1558*4882a593Smuzhiyun if (status)
1559*4882a593Smuzhiyun return;
1560*4882a593Smuzhiyun
1561*4882a593Smuzhiyun sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1562*4882a593Smuzhiyun if (!sent)
1563*4882a593Smuzhiyun return;
1564*4882a593Smuzhiyun
1565*4882a593Smuzhiyun hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1566*4882a593Smuzhiyun hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1567*4882a593Smuzhiyun }
1568*4882a593Smuzhiyun
hci_cc_le_add_to_resolv_list(struct hci_dev * hdev,struct sk_buff * skb)1569*4882a593Smuzhiyun static void hci_cc_le_add_to_resolv_list(struct hci_dev *hdev,
1570*4882a593Smuzhiyun struct sk_buff *skb)
1571*4882a593Smuzhiyun {
1572*4882a593Smuzhiyun struct hci_cp_le_add_to_resolv_list *sent;
1573*4882a593Smuzhiyun __u8 status = *((__u8 *) skb->data);
1574*4882a593Smuzhiyun
1575*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, status);
1576*4882a593Smuzhiyun
1577*4882a593Smuzhiyun if (status)
1578*4882a593Smuzhiyun return;
1579*4882a593Smuzhiyun
1580*4882a593Smuzhiyun sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
1581*4882a593Smuzhiyun if (!sent)
1582*4882a593Smuzhiyun return;
1583*4882a593Smuzhiyun
1584*4882a593Smuzhiyun hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1585*4882a593Smuzhiyun sent->bdaddr_type, sent->peer_irk,
1586*4882a593Smuzhiyun sent->local_irk);
1587*4882a593Smuzhiyun }
1588*4882a593Smuzhiyun
hci_cc_le_del_from_resolv_list(struct hci_dev * hdev,struct sk_buff * skb)1589*4882a593Smuzhiyun static void hci_cc_le_del_from_resolv_list(struct hci_dev *hdev,
1590*4882a593Smuzhiyun struct sk_buff *skb)
1591*4882a593Smuzhiyun {
1592*4882a593Smuzhiyun struct hci_cp_le_del_from_resolv_list *sent;
1593*4882a593Smuzhiyun __u8 status = *((__u8 *) skb->data);
1594*4882a593Smuzhiyun
1595*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, status);
1596*4882a593Smuzhiyun
1597*4882a593Smuzhiyun if (status)
1598*4882a593Smuzhiyun return;
1599*4882a593Smuzhiyun
1600*4882a593Smuzhiyun sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
1601*4882a593Smuzhiyun if (!sent)
1602*4882a593Smuzhiyun return;
1603*4882a593Smuzhiyun
1604*4882a593Smuzhiyun hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1605*4882a593Smuzhiyun sent->bdaddr_type);
1606*4882a593Smuzhiyun }
1607*4882a593Smuzhiyun
hci_cc_le_clear_resolv_list(struct hci_dev * hdev,struct sk_buff * skb)1608*4882a593Smuzhiyun static void hci_cc_le_clear_resolv_list(struct hci_dev *hdev,
1609*4882a593Smuzhiyun struct sk_buff *skb)
1610*4882a593Smuzhiyun {
1611*4882a593Smuzhiyun __u8 status = *((__u8 *) skb->data);
1612*4882a593Smuzhiyun
1613*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, status);
1614*4882a593Smuzhiyun
1615*4882a593Smuzhiyun if (status)
1616*4882a593Smuzhiyun return;
1617*4882a593Smuzhiyun
1618*4882a593Smuzhiyun hci_bdaddr_list_clear(&hdev->le_resolv_list);
1619*4882a593Smuzhiyun }
1620*4882a593Smuzhiyun
hci_cc_le_read_resolv_list_size(struct hci_dev * hdev,struct sk_buff * skb)1621*4882a593Smuzhiyun static void hci_cc_le_read_resolv_list_size(struct hci_dev *hdev,
1622*4882a593Smuzhiyun struct sk_buff *skb)
1623*4882a593Smuzhiyun {
1624*4882a593Smuzhiyun struct hci_rp_le_read_resolv_list_size *rp = (void *) skb->data;
1625*4882a593Smuzhiyun
1626*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1627*4882a593Smuzhiyun
1628*4882a593Smuzhiyun if (rp->status)
1629*4882a593Smuzhiyun return;
1630*4882a593Smuzhiyun
1631*4882a593Smuzhiyun hdev->le_resolv_list_size = rp->size;
1632*4882a593Smuzhiyun }
1633*4882a593Smuzhiyun
hci_cc_le_set_addr_resolution_enable(struct hci_dev * hdev,struct sk_buff * skb)1634*4882a593Smuzhiyun static void hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev,
1635*4882a593Smuzhiyun struct sk_buff *skb)
1636*4882a593Smuzhiyun {
1637*4882a593Smuzhiyun __u8 *sent, status = *((__u8 *) skb->data);
1638*4882a593Smuzhiyun
1639*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, status);
1640*4882a593Smuzhiyun
1641*4882a593Smuzhiyun if (status)
1642*4882a593Smuzhiyun return;
1643*4882a593Smuzhiyun
1644*4882a593Smuzhiyun sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
1645*4882a593Smuzhiyun if (!sent)
1646*4882a593Smuzhiyun return;
1647*4882a593Smuzhiyun
1648*4882a593Smuzhiyun hci_dev_lock(hdev);
1649*4882a593Smuzhiyun
1650*4882a593Smuzhiyun if (*sent)
1651*4882a593Smuzhiyun hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
1652*4882a593Smuzhiyun else
1653*4882a593Smuzhiyun hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
1654*4882a593Smuzhiyun
1655*4882a593Smuzhiyun hci_dev_unlock(hdev);
1656*4882a593Smuzhiyun }
1657*4882a593Smuzhiyun
hci_cc_le_read_max_data_len(struct hci_dev * hdev,struct sk_buff * skb)1658*4882a593Smuzhiyun static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1659*4882a593Smuzhiyun struct sk_buff *skb)
1660*4882a593Smuzhiyun {
1661*4882a593Smuzhiyun struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1662*4882a593Smuzhiyun
1663*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1664*4882a593Smuzhiyun
1665*4882a593Smuzhiyun if (rp->status)
1666*4882a593Smuzhiyun return;
1667*4882a593Smuzhiyun
1668*4882a593Smuzhiyun hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1669*4882a593Smuzhiyun hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1670*4882a593Smuzhiyun hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1671*4882a593Smuzhiyun hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1672*4882a593Smuzhiyun }
1673*4882a593Smuzhiyun
hci_cc_write_le_host_supported(struct hci_dev * hdev,struct sk_buff * skb)1674*4882a593Smuzhiyun static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1675*4882a593Smuzhiyun struct sk_buff *skb)
1676*4882a593Smuzhiyun {
1677*4882a593Smuzhiyun struct hci_cp_write_le_host_supported *sent;
1678*4882a593Smuzhiyun __u8 status = *((__u8 *) skb->data);
1679*4882a593Smuzhiyun
1680*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, status);
1681*4882a593Smuzhiyun
1682*4882a593Smuzhiyun if (status)
1683*4882a593Smuzhiyun return;
1684*4882a593Smuzhiyun
1685*4882a593Smuzhiyun sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1686*4882a593Smuzhiyun if (!sent)
1687*4882a593Smuzhiyun return;
1688*4882a593Smuzhiyun
1689*4882a593Smuzhiyun hci_dev_lock(hdev);
1690*4882a593Smuzhiyun
1691*4882a593Smuzhiyun if (sent->le) {
1692*4882a593Smuzhiyun hdev->features[1][0] |= LMP_HOST_LE;
1693*4882a593Smuzhiyun hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1694*4882a593Smuzhiyun } else {
1695*4882a593Smuzhiyun hdev->features[1][0] &= ~LMP_HOST_LE;
1696*4882a593Smuzhiyun hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1697*4882a593Smuzhiyun hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1698*4882a593Smuzhiyun }
1699*4882a593Smuzhiyun
1700*4882a593Smuzhiyun if (sent->simul)
1701*4882a593Smuzhiyun hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1702*4882a593Smuzhiyun else
1703*4882a593Smuzhiyun hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1704*4882a593Smuzhiyun
1705*4882a593Smuzhiyun hci_dev_unlock(hdev);
1706*4882a593Smuzhiyun }
1707*4882a593Smuzhiyun
hci_cc_set_adv_param(struct hci_dev * hdev,struct sk_buff * skb)1708*4882a593Smuzhiyun static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1709*4882a593Smuzhiyun {
1710*4882a593Smuzhiyun struct hci_cp_le_set_adv_param *cp;
1711*4882a593Smuzhiyun u8 status = *((u8 *) skb->data);
1712*4882a593Smuzhiyun
1713*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, status);
1714*4882a593Smuzhiyun
1715*4882a593Smuzhiyun if (status)
1716*4882a593Smuzhiyun return;
1717*4882a593Smuzhiyun
1718*4882a593Smuzhiyun cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1719*4882a593Smuzhiyun if (!cp)
1720*4882a593Smuzhiyun return;
1721*4882a593Smuzhiyun
1722*4882a593Smuzhiyun hci_dev_lock(hdev);
1723*4882a593Smuzhiyun hdev->adv_addr_type = cp->own_address_type;
1724*4882a593Smuzhiyun hci_dev_unlock(hdev);
1725*4882a593Smuzhiyun }
1726*4882a593Smuzhiyun
hci_cc_set_ext_adv_param(struct hci_dev * hdev,struct sk_buff * skb)1727*4882a593Smuzhiyun static void hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1728*4882a593Smuzhiyun {
1729*4882a593Smuzhiyun struct hci_rp_le_set_ext_adv_params *rp = (void *) skb->data;
1730*4882a593Smuzhiyun struct hci_cp_le_set_ext_adv_params *cp;
1731*4882a593Smuzhiyun struct adv_info *adv_instance;
1732*4882a593Smuzhiyun
1733*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1734*4882a593Smuzhiyun
1735*4882a593Smuzhiyun if (rp->status)
1736*4882a593Smuzhiyun return;
1737*4882a593Smuzhiyun
1738*4882a593Smuzhiyun cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
1739*4882a593Smuzhiyun if (!cp)
1740*4882a593Smuzhiyun return;
1741*4882a593Smuzhiyun
1742*4882a593Smuzhiyun hci_dev_lock(hdev);
1743*4882a593Smuzhiyun hdev->adv_addr_type = cp->own_addr_type;
1744*4882a593Smuzhiyun if (!hdev->cur_adv_instance) {
1745*4882a593Smuzhiyun /* Store in hdev for instance 0 */
1746*4882a593Smuzhiyun hdev->adv_tx_power = rp->tx_power;
1747*4882a593Smuzhiyun } else {
1748*4882a593Smuzhiyun adv_instance = hci_find_adv_instance(hdev,
1749*4882a593Smuzhiyun hdev->cur_adv_instance);
1750*4882a593Smuzhiyun if (adv_instance)
1751*4882a593Smuzhiyun adv_instance->tx_power = rp->tx_power;
1752*4882a593Smuzhiyun }
1753*4882a593Smuzhiyun /* Update adv data as tx power is known now */
1754*4882a593Smuzhiyun hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1755*4882a593Smuzhiyun hci_dev_unlock(hdev);
1756*4882a593Smuzhiyun }
1757*4882a593Smuzhiyun
hci_cc_read_rssi(struct hci_dev * hdev,struct sk_buff * skb)1758*4882a593Smuzhiyun static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1759*4882a593Smuzhiyun {
1760*4882a593Smuzhiyun struct hci_rp_read_rssi *rp = (void *) skb->data;
1761*4882a593Smuzhiyun struct hci_conn *conn;
1762*4882a593Smuzhiyun
1763*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1764*4882a593Smuzhiyun
1765*4882a593Smuzhiyun if (rp->status)
1766*4882a593Smuzhiyun return;
1767*4882a593Smuzhiyun
1768*4882a593Smuzhiyun hci_dev_lock(hdev);
1769*4882a593Smuzhiyun
1770*4882a593Smuzhiyun conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1771*4882a593Smuzhiyun if (conn)
1772*4882a593Smuzhiyun conn->rssi = rp->rssi;
1773*4882a593Smuzhiyun
1774*4882a593Smuzhiyun hci_dev_unlock(hdev);
1775*4882a593Smuzhiyun }
1776*4882a593Smuzhiyun
hci_cc_read_tx_power(struct hci_dev * hdev,struct sk_buff * skb)1777*4882a593Smuzhiyun static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1778*4882a593Smuzhiyun {
1779*4882a593Smuzhiyun struct hci_cp_read_tx_power *sent;
1780*4882a593Smuzhiyun struct hci_rp_read_tx_power *rp = (void *) skb->data;
1781*4882a593Smuzhiyun struct hci_conn *conn;
1782*4882a593Smuzhiyun
1783*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1784*4882a593Smuzhiyun
1785*4882a593Smuzhiyun if (rp->status)
1786*4882a593Smuzhiyun return;
1787*4882a593Smuzhiyun
1788*4882a593Smuzhiyun sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1789*4882a593Smuzhiyun if (!sent)
1790*4882a593Smuzhiyun return;
1791*4882a593Smuzhiyun
1792*4882a593Smuzhiyun hci_dev_lock(hdev);
1793*4882a593Smuzhiyun
1794*4882a593Smuzhiyun conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1795*4882a593Smuzhiyun if (!conn)
1796*4882a593Smuzhiyun goto unlock;
1797*4882a593Smuzhiyun
1798*4882a593Smuzhiyun switch (sent->type) {
1799*4882a593Smuzhiyun case 0x00:
1800*4882a593Smuzhiyun conn->tx_power = rp->tx_power;
1801*4882a593Smuzhiyun break;
1802*4882a593Smuzhiyun case 0x01:
1803*4882a593Smuzhiyun conn->max_tx_power = rp->tx_power;
1804*4882a593Smuzhiyun break;
1805*4882a593Smuzhiyun }
1806*4882a593Smuzhiyun
1807*4882a593Smuzhiyun unlock:
1808*4882a593Smuzhiyun hci_dev_unlock(hdev);
1809*4882a593Smuzhiyun }
1810*4882a593Smuzhiyun
hci_cc_write_ssp_debug_mode(struct hci_dev * hdev,struct sk_buff * skb)1811*4882a593Smuzhiyun static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
1812*4882a593Smuzhiyun {
1813*4882a593Smuzhiyun u8 status = *((u8 *) skb->data);
1814*4882a593Smuzhiyun u8 *mode;
1815*4882a593Smuzhiyun
1816*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, status);
1817*4882a593Smuzhiyun
1818*4882a593Smuzhiyun if (status)
1819*4882a593Smuzhiyun return;
1820*4882a593Smuzhiyun
1821*4882a593Smuzhiyun mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1822*4882a593Smuzhiyun if (mode)
1823*4882a593Smuzhiyun hdev->ssp_debug_mode = *mode;
1824*4882a593Smuzhiyun }
1825*4882a593Smuzhiyun
hci_cs_inquiry(struct hci_dev * hdev,__u8 status)1826*4882a593Smuzhiyun static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1827*4882a593Smuzhiyun {
1828*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, status);
1829*4882a593Smuzhiyun
1830*4882a593Smuzhiyun if (status) {
1831*4882a593Smuzhiyun hci_conn_check_pending(hdev);
1832*4882a593Smuzhiyun return;
1833*4882a593Smuzhiyun }
1834*4882a593Smuzhiyun
1835*4882a593Smuzhiyun set_bit(HCI_INQUIRY, &hdev->flags);
1836*4882a593Smuzhiyun }
1837*4882a593Smuzhiyun
hci_cs_create_conn(struct hci_dev * hdev,__u8 status)1838*4882a593Smuzhiyun static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1839*4882a593Smuzhiyun {
1840*4882a593Smuzhiyun struct hci_cp_create_conn *cp;
1841*4882a593Smuzhiyun struct hci_conn *conn;
1842*4882a593Smuzhiyun
1843*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, status);
1844*4882a593Smuzhiyun
1845*4882a593Smuzhiyun cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1846*4882a593Smuzhiyun if (!cp)
1847*4882a593Smuzhiyun return;
1848*4882a593Smuzhiyun
1849*4882a593Smuzhiyun hci_dev_lock(hdev);
1850*4882a593Smuzhiyun
1851*4882a593Smuzhiyun conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1852*4882a593Smuzhiyun
1853*4882a593Smuzhiyun BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1854*4882a593Smuzhiyun
1855*4882a593Smuzhiyun if (status) {
1856*4882a593Smuzhiyun if (conn && conn->state == BT_CONNECT) {
1857*4882a593Smuzhiyun if (status != 0x0c || conn->attempt > 2) {
1858*4882a593Smuzhiyun conn->state = BT_CLOSED;
1859*4882a593Smuzhiyun hci_connect_cfm(conn, status);
1860*4882a593Smuzhiyun hci_conn_del(conn);
1861*4882a593Smuzhiyun } else
1862*4882a593Smuzhiyun conn->state = BT_CONNECT2;
1863*4882a593Smuzhiyun }
1864*4882a593Smuzhiyun } else {
1865*4882a593Smuzhiyun if (!conn) {
1866*4882a593Smuzhiyun conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1867*4882a593Smuzhiyun HCI_ROLE_MASTER);
1868*4882a593Smuzhiyun if (!conn)
1869*4882a593Smuzhiyun bt_dev_err(hdev, "no memory for new connection");
1870*4882a593Smuzhiyun }
1871*4882a593Smuzhiyun }
1872*4882a593Smuzhiyun
1873*4882a593Smuzhiyun hci_dev_unlock(hdev);
1874*4882a593Smuzhiyun }
1875*4882a593Smuzhiyun
hci_cs_add_sco(struct hci_dev * hdev,__u8 status)1876*4882a593Smuzhiyun static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1877*4882a593Smuzhiyun {
1878*4882a593Smuzhiyun struct hci_cp_add_sco *cp;
1879*4882a593Smuzhiyun struct hci_conn *acl, *sco;
1880*4882a593Smuzhiyun __u16 handle;
1881*4882a593Smuzhiyun
1882*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, status);
1883*4882a593Smuzhiyun
1884*4882a593Smuzhiyun if (!status)
1885*4882a593Smuzhiyun return;
1886*4882a593Smuzhiyun
1887*4882a593Smuzhiyun cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1888*4882a593Smuzhiyun if (!cp)
1889*4882a593Smuzhiyun return;
1890*4882a593Smuzhiyun
1891*4882a593Smuzhiyun handle = __le16_to_cpu(cp->handle);
1892*4882a593Smuzhiyun
1893*4882a593Smuzhiyun BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1894*4882a593Smuzhiyun
1895*4882a593Smuzhiyun hci_dev_lock(hdev);
1896*4882a593Smuzhiyun
1897*4882a593Smuzhiyun acl = hci_conn_hash_lookup_handle(hdev, handle);
1898*4882a593Smuzhiyun if (acl) {
1899*4882a593Smuzhiyun sco = acl->link;
1900*4882a593Smuzhiyun if (sco) {
1901*4882a593Smuzhiyun sco->state = BT_CLOSED;
1902*4882a593Smuzhiyun
1903*4882a593Smuzhiyun hci_connect_cfm(sco, status);
1904*4882a593Smuzhiyun hci_conn_del(sco);
1905*4882a593Smuzhiyun }
1906*4882a593Smuzhiyun }
1907*4882a593Smuzhiyun
1908*4882a593Smuzhiyun hci_dev_unlock(hdev);
1909*4882a593Smuzhiyun }
1910*4882a593Smuzhiyun
hci_cs_auth_requested(struct hci_dev * hdev,__u8 status)1911*4882a593Smuzhiyun static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1912*4882a593Smuzhiyun {
1913*4882a593Smuzhiyun struct hci_cp_auth_requested *cp;
1914*4882a593Smuzhiyun struct hci_conn *conn;
1915*4882a593Smuzhiyun
1916*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, status);
1917*4882a593Smuzhiyun
1918*4882a593Smuzhiyun if (!status)
1919*4882a593Smuzhiyun return;
1920*4882a593Smuzhiyun
1921*4882a593Smuzhiyun cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1922*4882a593Smuzhiyun if (!cp)
1923*4882a593Smuzhiyun return;
1924*4882a593Smuzhiyun
1925*4882a593Smuzhiyun hci_dev_lock(hdev);
1926*4882a593Smuzhiyun
1927*4882a593Smuzhiyun conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1928*4882a593Smuzhiyun if (conn) {
1929*4882a593Smuzhiyun if (conn->state == BT_CONFIG) {
1930*4882a593Smuzhiyun hci_connect_cfm(conn, status);
1931*4882a593Smuzhiyun hci_conn_drop(conn);
1932*4882a593Smuzhiyun }
1933*4882a593Smuzhiyun }
1934*4882a593Smuzhiyun
1935*4882a593Smuzhiyun hci_dev_unlock(hdev);
1936*4882a593Smuzhiyun }
1937*4882a593Smuzhiyun
hci_cs_set_conn_encrypt(struct hci_dev * hdev,__u8 status)1938*4882a593Smuzhiyun static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1939*4882a593Smuzhiyun {
1940*4882a593Smuzhiyun struct hci_cp_set_conn_encrypt *cp;
1941*4882a593Smuzhiyun struct hci_conn *conn;
1942*4882a593Smuzhiyun
1943*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, status);
1944*4882a593Smuzhiyun
1945*4882a593Smuzhiyun if (!status)
1946*4882a593Smuzhiyun return;
1947*4882a593Smuzhiyun
1948*4882a593Smuzhiyun cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1949*4882a593Smuzhiyun if (!cp)
1950*4882a593Smuzhiyun return;
1951*4882a593Smuzhiyun
1952*4882a593Smuzhiyun hci_dev_lock(hdev);
1953*4882a593Smuzhiyun
1954*4882a593Smuzhiyun conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1955*4882a593Smuzhiyun if (conn) {
1956*4882a593Smuzhiyun if (conn->state == BT_CONFIG) {
1957*4882a593Smuzhiyun hci_connect_cfm(conn, status);
1958*4882a593Smuzhiyun hci_conn_drop(conn);
1959*4882a593Smuzhiyun }
1960*4882a593Smuzhiyun }
1961*4882a593Smuzhiyun
1962*4882a593Smuzhiyun hci_dev_unlock(hdev);
1963*4882a593Smuzhiyun }
1964*4882a593Smuzhiyun
hci_outgoing_auth_needed(struct hci_dev * hdev,struct hci_conn * conn)1965*4882a593Smuzhiyun static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1966*4882a593Smuzhiyun struct hci_conn *conn)
1967*4882a593Smuzhiyun {
1968*4882a593Smuzhiyun if (conn->state != BT_CONFIG || !conn->out)
1969*4882a593Smuzhiyun return 0;
1970*4882a593Smuzhiyun
1971*4882a593Smuzhiyun if (conn->pending_sec_level == BT_SECURITY_SDP)
1972*4882a593Smuzhiyun return 0;
1973*4882a593Smuzhiyun
1974*4882a593Smuzhiyun /* Only request authentication for SSP connections or non-SSP
1975*4882a593Smuzhiyun * devices with sec_level MEDIUM or HIGH or if MITM protection
1976*4882a593Smuzhiyun * is requested.
1977*4882a593Smuzhiyun */
1978*4882a593Smuzhiyun if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1979*4882a593Smuzhiyun conn->pending_sec_level != BT_SECURITY_FIPS &&
1980*4882a593Smuzhiyun conn->pending_sec_level != BT_SECURITY_HIGH &&
1981*4882a593Smuzhiyun conn->pending_sec_level != BT_SECURITY_MEDIUM)
1982*4882a593Smuzhiyun return 0;
1983*4882a593Smuzhiyun
1984*4882a593Smuzhiyun return 1;
1985*4882a593Smuzhiyun }
1986*4882a593Smuzhiyun
hci_resolve_name(struct hci_dev * hdev,struct inquiry_entry * e)1987*4882a593Smuzhiyun static int hci_resolve_name(struct hci_dev *hdev,
1988*4882a593Smuzhiyun struct inquiry_entry *e)
1989*4882a593Smuzhiyun {
1990*4882a593Smuzhiyun struct hci_cp_remote_name_req cp;
1991*4882a593Smuzhiyun
1992*4882a593Smuzhiyun memset(&cp, 0, sizeof(cp));
1993*4882a593Smuzhiyun
1994*4882a593Smuzhiyun bacpy(&cp.bdaddr, &e->data.bdaddr);
1995*4882a593Smuzhiyun cp.pscan_rep_mode = e->data.pscan_rep_mode;
1996*4882a593Smuzhiyun cp.pscan_mode = e->data.pscan_mode;
1997*4882a593Smuzhiyun cp.clock_offset = e->data.clock_offset;
1998*4882a593Smuzhiyun
1999*4882a593Smuzhiyun return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2000*4882a593Smuzhiyun }
2001*4882a593Smuzhiyun
hci_resolve_next_name(struct hci_dev * hdev)2002*4882a593Smuzhiyun static bool hci_resolve_next_name(struct hci_dev *hdev)
2003*4882a593Smuzhiyun {
2004*4882a593Smuzhiyun struct discovery_state *discov = &hdev->discovery;
2005*4882a593Smuzhiyun struct inquiry_entry *e;
2006*4882a593Smuzhiyun
2007*4882a593Smuzhiyun if (list_empty(&discov->resolve))
2008*4882a593Smuzhiyun return false;
2009*4882a593Smuzhiyun
2010*4882a593Smuzhiyun e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2011*4882a593Smuzhiyun if (!e)
2012*4882a593Smuzhiyun return false;
2013*4882a593Smuzhiyun
2014*4882a593Smuzhiyun if (hci_resolve_name(hdev, e) == 0) {
2015*4882a593Smuzhiyun e->name_state = NAME_PENDING;
2016*4882a593Smuzhiyun return true;
2017*4882a593Smuzhiyun }
2018*4882a593Smuzhiyun
2019*4882a593Smuzhiyun return false;
2020*4882a593Smuzhiyun }
2021*4882a593Smuzhiyun
hci_check_pending_name(struct hci_dev * hdev,struct hci_conn * conn,bdaddr_t * bdaddr,u8 * name,u8 name_len)2022*4882a593Smuzhiyun static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2023*4882a593Smuzhiyun bdaddr_t *bdaddr, u8 *name, u8 name_len)
2024*4882a593Smuzhiyun {
2025*4882a593Smuzhiyun struct discovery_state *discov = &hdev->discovery;
2026*4882a593Smuzhiyun struct inquiry_entry *e;
2027*4882a593Smuzhiyun
2028*4882a593Smuzhiyun /* Update the mgmt connected state if necessary. Be careful with
2029*4882a593Smuzhiyun * conn objects that exist but are not (yet) connected however.
2030*4882a593Smuzhiyun * Only those in BT_CONFIG or BT_CONNECTED states can be
2031*4882a593Smuzhiyun * considered connected.
2032*4882a593Smuzhiyun */
2033*4882a593Smuzhiyun if (conn &&
2034*4882a593Smuzhiyun (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
2035*4882a593Smuzhiyun !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2036*4882a593Smuzhiyun mgmt_device_connected(hdev, conn, 0, name, name_len);
2037*4882a593Smuzhiyun
2038*4882a593Smuzhiyun if (discov->state == DISCOVERY_STOPPED)
2039*4882a593Smuzhiyun return;
2040*4882a593Smuzhiyun
2041*4882a593Smuzhiyun if (discov->state == DISCOVERY_STOPPING)
2042*4882a593Smuzhiyun goto discov_complete;
2043*4882a593Smuzhiyun
2044*4882a593Smuzhiyun if (discov->state != DISCOVERY_RESOLVING)
2045*4882a593Smuzhiyun return;
2046*4882a593Smuzhiyun
2047*4882a593Smuzhiyun e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2048*4882a593Smuzhiyun /* If the device was not found in a list of found devices names of which
2049*4882a593Smuzhiyun * are pending. there is no need to continue resolving a next name as it
2050*4882a593Smuzhiyun * will be done upon receiving another Remote Name Request Complete
2051*4882a593Smuzhiyun * Event */
2052*4882a593Smuzhiyun if (!e)
2053*4882a593Smuzhiyun return;
2054*4882a593Smuzhiyun
2055*4882a593Smuzhiyun list_del(&e->list);
2056*4882a593Smuzhiyun if (name) {
2057*4882a593Smuzhiyun e->name_state = NAME_KNOWN;
2058*4882a593Smuzhiyun mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
2059*4882a593Smuzhiyun e->data.rssi, name, name_len);
2060*4882a593Smuzhiyun } else {
2061*4882a593Smuzhiyun e->name_state = NAME_NOT_KNOWN;
2062*4882a593Smuzhiyun }
2063*4882a593Smuzhiyun
2064*4882a593Smuzhiyun if (hci_resolve_next_name(hdev))
2065*4882a593Smuzhiyun return;
2066*4882a593Smuzhiyun
2067*4882a593Smuzhiyun discov_complete:
2068*4882a593Smuzhiyun hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2069*4882a593Smuzhiyun }
2070*4882a593Smuzhiyun
hci_cs_remote_name_req(struct hci_dev * hdev,__u8 status)2071*4882a593Smuzhiyun static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2072*4882a593Smuzhiyun {
2073*4882a593Smuzhiyun struct hci_cp_remote_name_req *cp;
2074*4882a593Smuzhiyun struct hci_conn *conn;
2075*4882a593Smuzhiyun
2076*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, status);
2077*4882a593Smuzhiyun
2078*4882a593Smuzhiyun /* If successful wait for the name req complete event before
2079*4882a593Smuzhiyun * checking for the need to do authentication */
2080*4882a593Smuzhiyun if (!status)
2081*4882a593Smuzhiyun return;
2082*4882a593Smuzhiyun
2083*4882a593Smuzhiyun cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2084*4882a593Smuzhiyun if (!cp)
2085*4882a593Smuzhiyun return;
2086*4882a593Smuzhiyun
2087*4882a593Smuzhiyun hci_dev_lock(hdev);
2088*4882a593Smuzhiyun
2089*4882a593Smuzhiyun conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2090*4882a593Smuzhiyun
2091*4882a593Smuzhiyun if (hci_dev_test_flag(hdev, HCI_MGMT))
2092*4882a593Smuzhiyun hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2093*4882a593Smuzhiyun
2094*4882a593Smuzhiyun if (!conn)
2095*4882a593Smuzhiyun goto unlock;
2096*4882a593Smuzhiyun
2097*4882a593Smuzhiyun if (!hci_outgoing_auth_needed(hdev, conn))
2098*4882a593Smuzhiyun goto unlock;
2099*4882a593Smuzhiyun
2100*4882a593Smuzhiyun if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2101*4882a593Smuzhiyun struct hci_cp_auth_requested auth_cp;
2102*4882a593Smuzhiyun
2103*4882a593Smuzhiyun set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2104*4882a593Smuzhiyun
2105*4882a593Smuzhiyun auth_cp.handle = __cpu_to_le16(conn->handle);
2106*4882a593Smuzhiyun hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2107*4882a593Smuzhiyun sizeof(auth_cp), &auth_cp);
2108*4882a593Smuzhiyun }
2109*4882a593Smuzhiyun
2110*4882a593Smuzhiyun unlock:
2111*4882a593Smuzhiyun hci_dev_unlock(hdev);
2112*4882a593Smuzhiyun }
2113*4882a593Smuzhiyun
hci_cs_read_remote_features(struct hci_dev * hdev,__u8 status)2114*4882a593Smuzhiyun static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2115*4882a593Smuzhiyun {
2116*4882a593Smuzhiyun struct hci_cp_read_remote_features *cp;
2117*4882a593Smuzhiyun struct hci_conn *conn;
2118*4882a593Smuzhiyun
2119*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, status);
2120*4882a593Smuzhiyun
2121*4882a593Smuzhiyun if (!status)
2122*4882a593Smuzhiyun return;
2123*4882a593Smuzhiyun
2124*4882a593Smuzhiyun cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2125*4882a593Smuzhiyun if (!cp)
2126*4882a593Smuzhiyun return;
2127*4882a593Smuzhiyun
2128*4882a593Smuzhiyun hci_dev_lock(hdev);
2129*4882a593Smuzhiyun
2130*4882a593Smuzhiyun conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2131*4882a593Smuzhiyun if (conn) {
2132*4882a593Smuzhiyun if (conn->state == BT_CONFIG) {
2133*4882a593Smuzhiyun hci_connect_cfm(conn, status);
2134*4882a593Smuzhiyun hci_conn_drop(conn);
2135*4882a593Smuzhiyun }
2136*4882a593Smuzhiyun }
2137*4882a593Smuzhiyun
2138*4882a593Smuzhiyun hci_dev_unlock(hdev);
2139*4882a593Smuzhiyun }
2140*4882a593Smuzhiyun
hci_cs_read_remote_ext_features(struct hci_dev * hdev,__u8 status)2141*4882a593Smuzhiyun static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2142*4882a593Smuzhiyun {
2143*4882a593Smuzhiyun struct hci_cp_read_remote_ext_features *cp;
2144*4882a593Smuzhiyun struct hci_conn *conn;
2145*4882a593Smuzhiyun
2146*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, status);
2147*4882a593Smuzhiyun
2148*4882a593Smuzhiyun if (!status)
2149*4882a593Smuzhiyun return;
2150*4882a593Smuzhiyun
2151*4882a593Smuzhiyun cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2152*4882a593Smuzhiyun if (!cp)
2153*4882a593Smuzhiyun return;
2154*4882a593Smuzhiyun
2155*4882a593Smuzhiyun hci_dev_lock(hdev);
2156*4882a593Smuzhiyun
2157*4882a593Smuzhiyun conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2158*4882a593Smuzhiyun if (conn) {
2159*4882a593Smuzhiyun if (conn->state == BT_CONFIG) {
2160*4882a593Smuzhiyun hci_connect_cfm(conn, status);
2161*4882a593Smuzhiyun hci_conn_drop(conn);
2162*4882a593Smuzhiyun }
2163*4882a593Smuzhiyun }
2164*4882a593Smuzhiyun
2165*4882a593Smuzhiyun hci_dev_unlock(hdev);
2166*4882a593Smuzhiyun }
2167*4882a593Smuzhiyun
hci_cs_setup_sync_conn(struct hci_dev * hdev,__u8 status)2168*4882a593Smuzhiyun static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2169*4882a593Smuzhiyun {
2170*4882a593Smuzhiyun struct hci_cp_setup_sync_conn *cp;
2171*4882a593Smuzhiyun struct hci_conn *acl, *sco;
2172*4882a593Smuzhiyun __u16 handle;
2173*4882a593Smuzhiyun
2174*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, status);
2175*4882a593Smuzhiyun
2176*4882a593Smuzhiyun if (!status)
2177*4882a593Smuzhiyun return;
2178*4882a593Smuzhiyun
2179*4882a593Smuzhiyun cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2180*4882a593Smuzhiyun if (!cp)
2181*4882a593Smuzhiyun return;
2182*4882a593Smuzhiyun
2183*4882a593Smuzhiyun handle = __le16_to_cpu(cp->handle);
2184*4882a593Smuzhiyun
2185*4882a593Smuzhiyun BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
2186*4882a593Smuzhiyun
2187*4882a593Smuzhiyun hci_dev_lock(hdev);
2188*4882a593Smuzhiyun
2189*4882a593Smuzhiyun acl = hci_conn_hash_lookup_handle(hdev, handle);
2190*4882a593Smuzhiyun if (acl) {
2191*4882a593Smuzhiyun sco = acl->link;
2192*4882a593Smuzhiyun if (sco) {
2193*4882a593Smuzhiyun sco->state = BT_CLOSED;
2194*4882a593Smuzhiyun
2195*4882a593Smuzhiyun hci_connect_cfm(sco, status);
2196*4882a593Smuzhiyun hci_conn_del(sco);
2197*4882a593Smuzhiyun }
2198*4882a593Smuzhiyun }
2199*4882a593Smuzhiyun
2200*4882a593Smuzhiyun hci_dev_unlock(hdev);
2201*4882a593Smuzhiyun }
2202*4882a593Smuzhiyun
hci_cs_sniff_mode(struct hci_dev * hdev,__u8 status)2203*4882a593Smuzhiyun static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2204*4882a593Smuzhiyun {
2205*4882a593Smuzhiyun struct hci_cp_sniff_mode *cp;
2206*4882a593Smuzhiyun struct hci_conn *conn;
2207*4882a593Smuzhiyun
2208*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, status);
2209*4882a593Smuzhiyun
2210*4882a593Smuzhiyun if (!status)
2211*4882a593Smuzhiyun return;
2212*4882a593Smuzhiyun
2213*4882a593Smuzhiyun cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2214*4882a593Smuzhiyun if (!cp)
2215*4882a593Smuzhiyun return;
2216*4882a593Smuzhiyun
2217*4882a593Smuzhiyun hci_dev_lock(hdev);
2218*4882a593Smuzhiyun
2219*4882a593Smuzhiyun conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2220*4882a593Smuzhiyun if (conn) {
2221*4882a593Smuzhiyun clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2222*4882a593Smuzhiyun
2223*4882a593Smuzhiyun if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2224*4882a593Smuzhiyun hci_sco_setup(conn, status);
2225*4882a593Smuzhiyun }
2226*4882a593Smuzhiyun
2227*4882a593Smuzhiyun hci_dev_unlock(hdev);
2228*4882a593Smuzhiyun }
2229*4882a593Smuzhiyun
hci_cs_exit_sniff_mode(struct hci_dev * hdev,__u8 status)2230*4882a593Smuzhiyun static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2231*4882a593Smuzhiyun {
2232*4882a593Smuzhiyun struct hci_cp_exit_sniff_mode *cp;
2233*4882a593Smuzhiyun struct hci_conn *conn;
2234*4882a593Smuzhiyun
2235*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, status);
2236*4882a593Smuzhiyun
2237*4882a593Smuzhiyun if (!status)
2238*4882a593Smuzhiyun return;
2239*4882a593Smuzhiyun
2240*4882a593Smuzhiyun cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2241*4882a593Smuzhiyun if (!cp)
2242*4882a593Smuzhiyun return;
2243*4882a593Smuzhiyun
2244*4882a593Smuzhiyun hci_dev_lock(hdev);
2245*4882a593Smuzhiyun
2246*4882a593Smuzhiyun conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2247*4882a593Smuzhiyun if (conn) {
2248*4882a593Smuzhiyun clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2249*4882a593Smuzhiyun
2250*4882a593Smuzhiyun if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2251*4882a593Smuzhiyun hci_sco_setup(conn, status);
2252*4882a593Smuzhiyun }
2253*4882a593Smuzhiyun
2254*4882a593Smuzhiyun hci_dev_unlock(hdev);
2255*4882a593Smuzhiyun }
2256*4882a593Smuzhiyun
hci_cs_disconnect(struct hci_dev * hdev,u8 status)2257*4882a593Smuzhiyun static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2258*4882a593Smuzhiyun {
2259*4882a593Smuzhiyun struct hci_cp_disconnect *cp;
2260*4882a593Smuzhiyun struct hci_conn *conn;
2261*4882a593Smuzhiyun
2262*4882a593Smuzhiyun if (!status)
2263*4882a593Smuzhiyun return;
2264*4882a593Smuzhiyun
2265*4882a593Smuzhiyun cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2266*4882a593Smuzhiyun if (!cp)
2267*4882a593Smuzhiyun return;
2268*4882a593Smuzhiyun
2269*4882a593Smuzhiyun hci_dev_lock(hdev);
2270*4882a593Smuzhiyun
2271*4882a593Smuzhiyun conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2272*4882a593Smuzhiyun if (conn) {
2273*4882a593Smuzhiyun u8 type = conn->type;
2274*4882a593Smuzhiyun
2275*4882a593Smuzhiyun mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2276*4882a593Smuzhiyun conn->dst_type, status);
2277*4882a593Smuzhiyun
2278*4882a593Smuzhiyun /* If the disconnection failed for any reason, the upper layer
2279*4882a593Smuzhiyun * does not retry to disconnect in current implementation.
2280*4882a593Smuzhiyun * Hence, we need to do some basic cleanup here and re-enable
2281*4882a593Smuzhiyun * advertising if necessary.
2282*4882a593Smuzhiyun */
2283*4882a593Smuzhiyun hci_conn_del(conn);
2284*4882a593Smuzhiyun if (type == LE_LINK)
2285*4882a593Smuzhiyun hci_req_reenable_advertising(hdev);
2286*4882a593Smuzhiyun }
2287*4882a593Smuzhiyun
2288*4882a593Smuzhiyun hci_dev_unlock(hdev);
2289*4882a593Smuzhiyun }
2290*4882a593Smuzhiyun
cs_le_create_conn(struct hci_dev * hdev,bdaddr_t * peer_addr,u8 peer_addr_type,u8 own_address_type,u8 filter_policy)2291*4882a593Smuzhiyun static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2292*4882a593Smuzhiyun u8 peer_addr_type, u8 own_address_type,
2293*4882a593Smuzhiyun u8 filter_policy)
2294*4882a593Smuzhiyun {
2295*4882a593Smuzhiyun struct hci_conn *conn;
2296*4882a593Smuzhiyun
2297*4882a593Smuzhiyun conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2298*4882a593Smuzhiyun peer_addr_type);
2299*4882a593Smuzhiyun if (!conn)
2300*4882a593Smuzhiyun return;
2301*4882a593Smuzhiyun
2302*4882a593Smuzhiyun /* When using controller based address resolution, then the new
2303*4882a593Smuzhiyun * address types 0x02 and 0x03 are used. These types need to be
2304*4882a593Smuzhiyun * converted back into either public address or random address type
2305*4882a593Smuzhiyun */
2306*4882a593Smuzhiyun if (use_ll_privacy(hdev) &&
2307*4882a593Smuzhiyun hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
2308*4882a593Smuzhiyun switch (own_address_type) {
2309*4882a593Smuzhiyun case ADDR_LE_DEV_PUBLIC_RESOLVED:
2310*4882a593Smuzhiyun own_address_type = ADDR_LE_DEV_PUBLIC;
2311*4882a593Smuzhiyun break;
2312*4882a593Smuzhiyun case ADDR_LE_DEV_RANDOM_RESOLVED:
2313*4882a593Smuzhiyun own_address_type = ADDR_LE_DEV_RANDOM;
2314*4882a593Smuzhiyun break;
2315*4882a593Smuzhiyun }
2316*4882a593Smuzhiyun }
2317*4882a593Smuzhiyun
2318*4882a593Smuzhiyun /* Store the initiator and responder address information which
2319*4882a593Smuzhiyun * is needed for SMP. These values will not change during the
2320*4882a593Smuzhiyun * lifetime of the connection.
2321*4882a593Smuzhiyun */
2322*4882a593Smuzhiyun conn->init_addr_type = own_address_type;
2323*4882a593Smuzhiyun if (own_address_type == ADDR_LE_DEV_RANDOM)
2324*4882a593Smuzhiyun bacpy(&conn->init_addr, &hdev->random_addr);
2325*4882a593Smuzhiyun else
2326*4882a593Smuzhiyun bacpy(&conn->init_addr, &hdev->bdaddr);
2327*4882a593Smuzhiyun
2328*4882a593Smuzhiyun conn->resp_addr_type = peer_addr_type;
2329*4882a593Smuzhiyun bacpy(&conn->resp_addr, peer_addr);
2330*4882a593Smuzhiyun
2331*4882a593Smuzhiyun /* We don't want the connection attempt to stick around
2332*4882a593Smuzhiyun * indefinitely since LE doesn't have a page timeout concept
2333*4882a593Smuzhiyun * like BR/EDR. Set a timer for any connection that doesn't use
2334*4882a593Smuzhiyun * the white list for connecting.
2335*4882a593Smuzhiyun */
2336*4882a593Smuzhiyun if (filter_policy == HCI_LE_USE_PEER_ADDR)
2337*4882a593Smuzhiyun queue_delayed_work(conn->hdev->workqueue,
2338*4882a593Smuzhiyun &conn->le_conn_timeout,
2339*4882a593Smuzhiyun conn->conn_timeout);
2340*4882a593Smuzhiyun }
2341*4882a593Smuzhiyun
hci_cs_le_create_conn(struct hci_dev * hdev,u8 status)2342*4882a593Smuzhiyun static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2343*4882a593Smuzhiyun {
2344*4882a593Smuzhiyun struct hci_cp_le_create_conn *cp;
2345*4882a593Smuzhiyun
2346*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, status);
2347*4882a593Smuzhiyun
2348*4882a593Smuzhiyun /* All connection failure handling is taken care of by the
2349*4882a593Smuzhiyun * hci_le_conn_failed function which is triggered by the HCI
2350*4882a593Smuzhiyun * request completion callbacks used for connecting.
2351*4882a593Smuzhiyun */
2352*4882a593Smuzhiyun if (status)
2353*4882a593Smuzhiyun return;
2354*4882a593Smuzhiyun
2355*4882a593Smuzhiyun cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2356*4882a593Smuzhiyun if (!cp)
2357*4882a593Smuzhiyun return;
2358*4882a593Smuzhiyun
2359*4882a593Smuzhiyun hci_dev_lock(hdev);
2360*4882a593Smuzhiyun
2361*4882a593Smuzhiyun cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2362*4882a593Smuzhiyun cp->own_address_type, cp->filter_policy);
2363*4882a593Smuzhiyun
2364*4882a593Smuzhiyun hci_dev_unlock(hdev);
2365*4882a593Smuzhiyun }
2366*4882a593Smuzhiyun
hci_cs_le_ext_create_conn(struct hci_dev * hdev,u8 status)2367*4882a593Smuzhiyun static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2368*4882a593Smuzhiyun {
2369*4882a593Smuzhiyun struct hci_cp_le_ext_create_conn *cp;
2370*4882a593Smuzhiyun
2371*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, status);
2372*4882a593Smuzhiyun
2373*4882a593Smuzhiyun /* All connection failure handling is taken care of by the
2374*4882a593Smuzhiyun * hci_le_conn_failed function which is triggered by the HCI
2375*4882a593Smuzhiyun * request completion callbacks used for connecting.
2376*4882a593Smuzhiyun */
2377*4882a593Smuzhiyun if (status)
2378*4882a593Smuzhiyun return;
2379*4882a593Smuzhiyun
2380*4882a593Smuzhiyun cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2381*4882a593Smuzhiyun if (!cp)
2382*4882a593Smuzhiyun return;
2383*4882a593Smuzhiyun
2384*4882a593Smuzhiyun hci_dev_lock(hdev);
2385*4882a593Smuzhiyun
2386*4882a593Smuzhiyun cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2387*4882a593Smuzhiyun cp->own_addr_type, cp->filter_policy);
2388*4882a593Smuzhiyun
2389*4882a593Smuzhiyun hci_dev_unlock(hdev);
2390*4882a593Smuzhiyun }
2391*4882a593Smuzhiyun
hci_cs_le_read_remote_features(struct hci_dev * hdev,u8 status)2392*4882a593Smuzhiyun static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2393*4882a593Smuzhiyun {
2394*4882a593Smuzhiyun struct hci_cp_le_read_remote_features *cp;
2395*4882a593Smuzhiyun struct hci_conn *conn;
2396*4882a593Smuzhiyun
2397*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, status);
2398*4882a593Smuzhiyun
2399*4882a593Smuzhiyun if (!status)
2400*4882a593Smuzhiyun return;
2401*4882a593Smuzhiyun
2402*4882a593Smuzhiyun cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2403*4882a593Smuzhiyun if (!cp)
2404*4882a593Smuzhiyun return;
2405*4882a593Smuzhiyun
2406*4882a593Smuzhiyun hci_dev_lock(hdev);
2407*4882a593Smuzhiyun
2408*4882a593Smuzhiyun conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2409*4882a593Smuzhiyun if (conn) {
2410*4882a593Smuzhiyun if (conn->state == BT_CONFIG) {
2411*4882a593Smuzhiyun hci_connect_cfm(conn, status);
2412*4882a593Smuzhiyun hci_conn_drop(conn);
2413*4882a593Smuzhiyun }
2414*4882a593Smuzhiyun }
2415*4882a593Smuzhiyun
2416*4882a593Smuzhiyun hci_dev_unlock(hdev);
2417*4882a593Smuzhiyun }
2418*4882a593Smuzhiyun
hci_cs_le_start_enc(struct hci_dev * hdev,u8 status)2419*4882a593Smuzhiyun static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2420*4882a593Smuzhiyun {
2421*4882a593Smuzhiyun struct hci_cp_le_start_enc *cp;
2422*4882a593Smuzhiyun struct hci_conn *conn;
2423*4882a593Smuzhiyun
2424*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, status);
2425*4882a593Smuzhiyun
2426*4882a593Smuzhiyun if (!status)
2427*4882a593Smuzhiyun return;
2428*4882a593Smuzhiyun
2429*4882a593Smuzhiyun hci_dev_lock(hdev);
2430*4882a593Smuzhiyun
2431*4882a593Smuzhiyun cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2432*4882a593Smuzhiyun if (!cp)
2433*4882a593Smuzhiyun goto unlock;
2434*4882a593Smuzhiyun
2435*4882a593Smuzhiyun conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2436*4882a593Smuzhiyun if (!conn)
2437*4882a593Smuzhiyun goto unlock;
2438*4882a593Smuzhiyun
2439*4882a593Smuzhiyun if (conn->state != BT_CONNECTED)
2440*4882a593Smuzhiyun goto unlock;
2441*4882a593Smuzhiyun
2442*4882a593Smuzhiyun hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2443*4882a593Smuzhiyun hci_conn_drop(conn);
2444*4882a593Smuzhiyun
2445*4882a593Smuzhiyun unlock:
2446*4882a593Smuzhiyun hci_dev_unlock(hdev);
2447*4882a593Smuzhiyun }
2448*4882a593Smuzhiyun
hci_cs_switch_role(struct hci_dev * hdev,u8 status)2449*4882a593Smuzhiyun static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2450*4882a593Smuzhiyun {
2451*4882a593Smuzhiyun struct hci_cp_switch_role *cp;
2452*4882a593Smuzhiyun struct hci_conn *conn;
2453*4882a593Smuzhiyun
2454*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, status);
2455*4882a593Smuzhiyun
2456*4882a593Smuzhiyun if (!status)
2457*4882a593Smuzhiyun return;
2458*4882a593Smuzhiyun
2459*4882a593Smuzhiyun cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2460*4882a593Smuzhiyun if (!cp)
2461*4882a593Smuzhiyun return;
2462*4882a593Smuzhiyun
2463*4882a593Smuzhiyun hci_dev_lock(hdev);
2464*4882a593Smuzhiyun
2465*4882a593Smuzhiyun conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2466*4882a593Smuzhiyun if (conn)
2467*4882a593Smuzhiyun clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2468*4882a593Smuzhiyun
2469*4882a593Smuzhiyun hci_dev_unlock(hdev);
2470*4882a593Smuzhiyun }
2471*4882a593Smuzhiyun
hci_inquiry_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)2472*4882a593Smuzhiyun static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2473*4882a593Smuzhiyun {
2474*4882a593Smuzhiyun __u8 status = *((__u8 *) skb->data);
2475*4882a593Smuzhiyun struct discovery_state *discov = &hdev->discovery;
2476*4882a593Smuzhiyun struct inquiry_entry *e;
2477*4882a593Smuzhiyun
2478*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, status);
2479*4882a593Smuzhiyun
2480*4882a593Smuzhiyun hci_conn_check_pending(hdev);
2481*4882a593Smuzhiyun
2482*4882a593Smuzhiyun if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2483*4882a593Smuzhiyun return;
2484*4882a593Smuzhiyun
2485*4882a593Smuzhiyun smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2486*4882a593Smuzhiyun wake_up_bit(&hdev->flags, HCI_INQUIRY);
2487*4882a593Smuzhiyun
2488*4882a593Smuzhiyun if (!hci_dev_test_flag(hdev, HCI_MGMT))
2489*4882a593Smuzhiyun return;
2490*4882a593Smuzhiyun
2491*4882a593Smuzhiyun hci_dev_lock(hdev);
2492*4882a593Smuzhiyun
2493*4882a593Smuzhiyun if (discov->state != DISCOVERY_FINDING)
2494*4882a593Smuzhiyun goto unlock;
2495*4882a593Smuzhiyun
2496*4882a593Smuzhiyun if (list_empty(&discov->resolve)) {
2497*4882a593Smuzhiyun /* When BR/EDR inquiry is active and no LE scanning is in
2498*4882a593Smuzhiyun * progress, then change discovery state to indicate completion.
2499*4882a593Smuzhiyun *
2500*4882a593Smuzhiyun * When running LE scanning and BR/EDR inquiry simultaneously
2501*4882a593Smuzhiyun * and the LE scan already finished, then change the discovery
2502*4882a593Smuzhiyun * state to indicate completion.
2503*4882a593Smuzhiyun */
2504*4882a593Smuzhiyun if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2505*4882a593Smuzhiyun !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2506*4882a593Smuzhiyun hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2507*4882a593Smuzhiyun goto unlock;
2508*4882a593Smuzhiyun }
2509*4882a593Smuzhiyun
2510*4882a593Smuzhiyun e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2511*4882a593Smuzhiyun if (e && hci_resolve_name(hdev, e) == 0) {
2512*4882a593Smuzhiyun e->name_state = NAME_PENDING;
2513*4882a593Smuzhiyun hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2514*4882a593Smuzhiyun } else {
2515*4882a593Smuzhiyun /* When BR/EDR inquiry is active and no LE scanning is in
2516*4882a593Smuzhiyun * progress, then change discovery state to indicate completion.
2517*4882a593Smuzhiyun *
2518*4882a593Smuzhiyun * When running LE scanning and BR/EDR inquiry simultaneously
2519*4882a593Smuzhiyun * and the LE scan already finished, then change the discovery
2520*4882a593Smuzhiyun * state to indicate completion.
2521*4882a593Smuzhiyun */
2522*4882a593Smuzhiyun if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2523*4882a593Smuzhiyun !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2524*4882a593Smuzhiyun hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2525*4882a593Smuzhiyun }
2526*4882a593Smuzhiyun
2527*4882a593Smuzhiyun unlock:
2528*4882a593Smuzhiyun hci_dev_unlock(hdev);
2529*4882a593Smuzhiyun }
2530*4882a593Smuzhiyun
hci_inquiry_result_evt(struct hci_dev * hdev,struct sk_buff * skb)2531*4882a593Smuzhiyun static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2532*4882a593Smuzhiyun {
2533*4882a593Smuzhiyun struct inquiry_data data;
2534*4882a593Smuzhiyun struct inquiry_info *info = (void *) (skb->data + 1);
2535*4882a593Smuzhiyun int num_rsp = *((__u8 *) skb->data);
2536*4882a593Smuzhiyun
2537*4882a593Smuzhiyun BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2538*4882a593Smuzhiyun
2539*4882a593Smuzhiyun if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
2540*4882a593Smuzhiyun return;
2541*4882a593Smuzhiyun
2542*4882a593Smuzhiyun if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2543*4882a593Smuzhiyun return;
2544*4882a593Smuzhiyun
2545*4882a593Smuzhiyun hci_dev_lock(hdev);
2546*4882a593Smuzhiyun
2547*4882a593Smuzhiyun for (; num_rsp; num_rsp--, info++) {
2548*4882a593Smuzhiyun u32 flags;
2549*4882a593Smuzhiyun
2550*4882a593Smuzhiyun bacpy(&data.bdaddr, &info->bdaddr);
2551*4882a593Smuzhiyun data.pscan_rep_mode = info->pscan_rep_mode;
2552*4882a593Smuzhiyun data.pscan_period_mode = info->pscan_period_mode;
2553*4882a593Smuzhiyun data.pscan_mode = info->pscan_mode;
2554*4882a593Smuzhiyun memcpy(data.dev_class, info->dev_class, 3);
2555*4882a593Smuzhiyun data.clock_offset = info->clock_offset;
2556*4882a593Smuzhiyun data.rssi = HCI_RSSI_INVALID;
2557*4882a593Smuzhiyun data.ssp_mode = 0x00;
2558*4882a593Smuzhiyun
2559*4882a593Smuzhiyun flags = hci_inquiry_cache_update(hdev, &data, false);
2560*4882a593Smuzhiyun
2561*4882a593Smuzhiyun mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2562*4882a593Smuzhiyun info->dev_class, HCI_RSSI_INVALID,
2563*4882a593Smuzhiyun flags, NULL, 0, NULL, 0);
2564*4882a593Smuzhiyun }
2565*4882a593Smuzhiyun
2566*4882a593Smuzhiyun hci_dev_unlock(hdev);
2567*4882a593Smuzhiyun }
2568*4882a593Smuzhiyun
hci_conn_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)2569*4882a593Smuzhiyun static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2570*4882a593Smuzhiyun {
2571*4882a593Smuzhiyun struct hci_ev_conn_complete *ev = (void *) skb->data;
2572*4882a593Smuzhiyun struct hci_conn *conn;
2573*4882a593Smuzhiyun
2574*4882a593Smuzhiyun BT_DBG("%s", hdev->name);
2575*4882a593Smuzhiyun
2576*4882a593Smuzhiyun hci_dev_lock(hdev);
2577*4882a593Smuzhiyun
2578*4882a593Smuzhiyun conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2579*4882a593Smuzhiyun if (!conn) {
2580*4882a593Smuzhiyun /* Connection may not exist if auto-connected. Check the bredr
2581*4882a593Smuzhiyun * allowlist to see if this device is allowed to auto connect.
2582*4882a593Smuzhiyun * If link is an ACL type, create a connection class
2583*4882a593Smuzhiyun * automatically.
2584*4882a593Smuzhiyun *
2585*4882a593Smuzhiyun * Auto-connect will only occur if the event filter is
2586*4882a593Smuzhiyun * programmed with a given address. Right now, event filter is
2587*4882a593Smuzhiyun * only used during suspend.
2588*4882a593Smuzhiyun */
2589*4882a593Smuzhiyun if (ev->link_type == ACL_LINK &&
2590*4882a593Smuzhiyun hci_bdaddr_list_lookup_with_flags(&hdev->whitelist,
2591*4882a593Smuzhiyun &ev->bdaddr,
2592*4882a593Smuzhiyun BDADDR_BREDR)) {
2593*4882a593Smuzhiyun conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2594*4882a593Smuzhiyun HCI_ROLE_SLAVE);
2595*4882a593Smuzhiyun if (!conn) {
2596*4882a593Smuzhiyun bt_dev_err(hdev, "no memory for new conn");
2597*4882a593Smuzhiyun goto unlock;
2598*4882a593Smuzhiyun }
2599*4882a593Smuzhiyun } else {
2600*4882a593Smuzhiyun if (ev->link_type != SCO_LINK)
2601*4882a593Smuzhiyun goto unlock;
2602*4882a593Smuzhiyun
2603*4882a593Smuzhiyun conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
2604*4882a593Smuzhiyun &ev->bdaddr);
2605*4882a593Smuzhiyun if (!conn)
2606*4882a593Smuzhiyun goto unlock;
2607*4882a593Smuzhiyun
2608*4882a593Smuzhiyun conn->type = SCO_LINK;
2609*4882a593Smuzhiyun }
2610*4882a593Smuzhiyun }
2611*4882a593Smuzhiyun
2612*4882a593Smuzhiyun if (!ev->status) {
2613*4882a593Smuzhiyun conn->handle = __le16_to_cpu(ev->handle);
2614*4882a593Smuzhiyun
2615*4882a593Smuzhiyun if (conn->type == ACL_LINK) {
2616*4882a593Smuzhiyun conn->state = BT_CONFIG;
2617*4882a593Smuzhiyun hci_conn_hold(conn);
2618*4882a593Smuzhiyun
2619*4882a593Smuzhiyun if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2620*4882a593Smuzhiyun !hci_find_link_key(hdev, &ev->bdaddr))
2621*4882a593Smuzhiyun conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2622*4882a593Smuzhiyun else
2623*4882a593Smuzhiyun conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2624*4882a593Smuzhiyun } else
2625*4882a593Smuzhiyun conn->state = BT_CONNECTED;
2626*4882a593Smuzhiyun
2627*4882a593Smuzhiyun hci_debugfs_create_conn(conn);
2628*4882a593Smuzhiyun hci_conn_add_sysfs(conn);
2629*4882a593Smuzhiyun
2630*4882a593Smuzhiyun if (test_bit(HCI_AUTH, &hdev->flags))
2631*4882a593Smuzhiyun set_bit(HCI_CONN_AUTH, &conn->flags);
2632*4882a593Smuzhiyun
2633*4882a593Smuzhiyun if (test_bit(HCI_ENCRYPT, &hdev->flags))
2634*4882a593Smuzhiyun set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2635*4882a593Smuzhiyun
2636*4882a593Smuzhiyun /* Get remote features */
2637*4882a593Smuzhiyun if (conn->type == ACL_LINK) {
2638*4882a593Smuzhiyun struct hci_cp_read_remote_features cp;
2639*4882a593Smuzhiyun cp.handle = ev->handle;
2640*4882a593Smuzhiyun hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2641*4882a593Smuzhiyun sizeof(cp), &cp);
2642*4882a593Smuzhiyun
2643*4882a593Smuzhiyun hci_req_update_scan(hdev);
2644*4882a593Smuzhiyun }
2645*4882a593Smuzhiyun
2646*4882a593Smuzhiyun /* Set packet type for incoming connection */
2647*4882a593Smuzhiyun if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2648*4882a593Smuzhiyun struct hci_cp_change_conn_ptype cp;
2649*4882a593Smuzhiyun cp.handle = ev->handle;
2650*4882a593Smuzhiyun cp.pkt_type = cpu_to_le16(conn->pkt_type);
2651*4882a593Smuzhiyun hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2652*4882a593Smuzhiyun &cp);
2653*4882a593Smuzhiyun }
2654*4882a593Smuzhiyun } else {
2655*4882a593Smuzhiyun conn->state = BT_CLOSED;
2656*4882a593Smuzhiyun if (conn->type == ACL_LINK)
2657*4882a593Smuzhiyun mgmt_connect_failed(hdev, &conn->dst, conn->type,
2658*4882a593Smuzhiyun conn->dst_type, ev->status);
2659*4882a593Smuzhiyun }
2660*4882a593Smuzhiyun
2661*4882a593Smuzhiyun if (conn->type == ACL_LINK)
2662*4882a593Smuzhiyun hci_sco_setup(conn, ev->status);
2663*4882a593Smuzhiyun
2664*4882a593Smuzhiyun if (ev->status) {
2665*4882a593Smuzhiyun hci_connect_cfm(conn, ev->status);
2666*4882a593Smuzhiyun hci_conn_del(conn);
2667*4882a593Smuzhiyun } else if (ev->link_type == SCO_LINK) {
2668*4882a593Smuzhiyun switch (conn->setting & SCO_AIRMODE_MASK) {
2669*4882a593Smuzhiyun case SCO_AIRMODE_CVSD:
2670*4882a593Smuzhiyun if (hdev->notify)
2671*4882a593Smuzhiyun hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
2672*4882a593Smuzhiyun break;
2673*4882a593Smuzhiyun }
2674*4882a593Smuzhiyun
2675*4882a593Smuzhiyun hci_connect_cfm(conn, ev->status);
2676*4882a593Smuzhiyun }
2677*4882a593Smuzhiyun
2678*4882a593Smuzhiyun unlock:
2679*4882a593Smuzhiyun hci_dev_unlock(hdev);
2680*4882a593Smuzhiyun
2681*4882a593Smuzhiyun hci_conn_check_pending(hdev);
2682*4882a593Smuzhiyun }
2683*4882a593Smuzhiyun
hci_reject_conn(struct hci_dev * hdev,bdaddr_t * bdaddr)2684*4882a593Smuzhiyun static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2685*4882a593Smuzhiyun {
2686*4882a593Smuzhiyun struct hci_cp_reject_conn_req cp;
2687*4882a593Smuzhiyun
2688*4882a593Smuzhiyun bacpy(&cp.bdaddr, bdaddr);
2689*4882a593Smuzhiyun cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2690*4882a593Smuzhiyun hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2691*4882a593Smuzhiyun }
2692*4882a593Smuzhiyun
hci_conn_request_evt(struct hci_dev * hdev,struct sk_buff * skb)2693*4882a593Smuzhiyun static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2694*4882a593Smuzhiyun {
2695*4882a593Smuzhiyun struct hci_ev_conn_request *ev = (void *) skb->data;
2696*4882a593Smuzhiyun int mask = hdev->link_mode;
2697*4882a593Smuzhiyun struct inquiry_entry *ie;
2698*4882a593Smuzhiyun struct hci_conn *conn;
2699*4882a593Smuzhiyun __u8 flags = 0;
2700*4882a593Smuzhiyun
2701*4882a593Smuzhiyun BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2702*4882a593Smuzhiyun ev->link_type);
2703*4882a593Smuzhiyun
2704*4882a593Smuzhiyun mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2705*4882a593Smuzhiyun &flags);
2706*4882a593Smuzhiyun
2707*4882a593Smuzhiyun if (!(mask & HCI_LM_ACCEPT)) {
2708*4882a593Smuzhiyun hci_reject_conn(hdev, &ev->bdaddr);
2709*4882a593Smuzhiyun return;
2710*4882a593Smuzhiyun }
2711*4882a593Smuzhiyun
2712*4882a593Smuzhiyun if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2713*4882a593Smuzhiyun BDADDR_BREDR)) {
2714*4882a593Smuzhiyun hci_reject_conn(hdev, &ev->bdaddr);
2715*4882a593Smuzhiyun return;
2716*4882a593Smuzhiyun }
2717*4882a593Smuzhiyun
2718*4882a593Smuzhiyun /* Require HCI_CONNECTABLE or a whitelist entry to accept the
2719*4882a593Smuzhiyun * connection. These features are only touched through mgmt so
2720*4882a593Smuzhiyun * only do the checks if HCI_MGMT is set.
2721*4882a593Smuzhiyun */
2722*4882a593Smuzhiyun if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2723*4882a593Smuzhiyun !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2724*4882a593Smuzhiyun !hci_bdaddr_list_lookup_with_flags(&hdev->whitelist, &ev->bdaddr,
2725*4882a593Smuzhiyun BDADDR_BREDR)) {
2726*4882a593Smuzhiyun hci_reject_conn(hdev, &ev->bdaddr);
2727*4882a593Smuzhiyun return;
2728*4882a593Smuzhiyun }
2729*4882a593Smuzhiyun
2730*4882a593Smuzhiyun /* Connection accepted */
2731*4882a593Smuzhiyun
2732*4882a593Smuzhiyun hci_dev_lock(hdev);
2733*4882a593Smuzhiyun
2734*4882a593Smuzhiyun ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2735*4882a593Smuzhiyun if (ie)
2736*4882a593Smuzhiyun memcpy(ie->data.dev_class, ev->dev_class, 3);
2737*4882a593Smuzhiyun
2738*4882a593Smuzhiyun conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2739*4882a593Smuzhiyun &ev->bdaddr);
2740*4882a593Smuzhiyun if (!conn) {
2741*4882a593Smuzhiyun conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2742*4882a593Smuzhiyun HCI_ROLE_SLAVE);
2743*4882a593Smuzhiyun if (!conn) {
2744*4882a593Smuzhiyun bt_dev_err(hdev, "no memory for new connection");
2745*4882a593Smuzhiyun hci_dev_unlock(hdev);
2746*4882a593Smuzhiyun return;
2747*4882a593Smuzhiyun }
2748*4882a593Smuzhiyun }
2749*4882a593Smuzhiyun
2750*4882a593Smuzhiyun memcpy(conn->dev_class, ev->dev_class, 3);
2751*4882a593Smuzhiyun
2752*4882a593Smuzhiyun hci_dev_unlock(hdev);
2753*4882a593Smuzhiyun
2754*4882a593Smuzhiyun if (ev->link_type == ACL_LINK ||
2755*4882a593Smuzhiyun (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2756*4882a593Smuzhiyun struct hci_cp_accept_conn_req cp;
2757*4882a593Smuzhiyun conn->state = BT_CONNECT;
2758*4882a593Smuzhiyun
2759*4882a593Smuzhiyun bacpy(&cp.bdaddr, &ev->bdaddr);
2760*4882a593Smuzhiyun
2761*4882a593Smuzhiyun if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2762*4882a593Smuzhiyun cp.role = 0x00; /* Become central */
2763*4882a593Smuzhiyun else
2764*4882a593Smuzhiyun cp.role = 0x01; /* Remain peripheral */
2765*4882a593Smuzhiyun
2766*4882a593Smuzhiyun hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2767*4882a593Smuzhiyun } else if (!(flags & HCI_PROTO_DEFER)) {
2768*4882a593Smuzhiyun struct hci_cp_accept_sync_conn_req cp;
2769*4882a593Smuzhiyun conn->state = BT_CONNECT;
2770*4882a593Smuzhiyun
2771*4882a593Smuzhiyun bacpy(&cp.bdaddr, &ev->bdaddr);
2772*4882a593Smuzhiyun cp.pkt_type = cpu_to_le16(conn->pkt_type);
2773*4882a593Smuzhiyun
2774*4882a593Smuzhiyun cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2775*4882a593Smuzhiyun cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2776*4882a593Smuzhiyun cp.max_latency = cpu_to_le16(0xffff);
2777*4882a593Smuzhiyun cp.content_format = cpu_to_le16(hdev->voice_setting);
2778*4882a593Smuzhiyun cp.retrans_effort = 0xff;
2779*4882a593Smuzhiyun
2780*4882a593Smuzhiyun hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2781*4882a593Smuzhiyun &cp);
2782*4882a593Smuzhiyun } else {
2783*4882a593Smuzhiyun conn->state = BT_CONNECT2;
2784*4882a593Smuzhiyun hci_connect_cfm(conn, 0);
2785*4882a593Smuzhiyun }
2786*4882a593Smuzhiyun }
2787*4882a593Smuzhiyun
hci_to_mgmt_reason(u8 err)2788*4882a593Smuzhiyun static u8 hci_to_mgmt_reason(u8 err)
2789*4882a593Smuzhiyun {
2790*4882a593Smuzhiyun switch (err) {
2791*4882a593Smuzhiyun case HCI_ERROR_CONNECTION_TIMEOUT:
2792*4882a593Smuzhiyun return MGMT_DEV_DISCONN_TIMEOUT;
2793*4882a593Smuzhiyun case HCI_ERROR_REMOTE_USER_TERM:
2794*4882a593Smuzhiyun case HCI_ERROR_REMOTE_LOW_RESOURCES:
2795*4882a593Smuzhiyun case HCI_ERROR_REMOTE_POWER_OFF:
2796*4882a593Smuzhiyun return MGMT_DEV_DISCONN_REMOTE;
2797*4882a593Smuzhiyun case HCI_ERROR_LOCAL_HOST_TERM:
2798*4882a593Smuzhiyun return MGMT_DEV_DISCONN_LOCAL_HOST;
2799*4882a593Smuzhiyun default:
2800*4882a593Smuzhiyun return MGMT_DEV_DISCONN_UNKNOWN;
2801*4882a593Smuzhiyun }
2802*4882a593Smuzhiyun }
2803*4882a593Smuzhiyun
hci_disconn_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)2804*4882a593Smuzhiyun static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2805*4882a593Smuzhiyun {
2806*4882a593Smuzhiyun struct hci_ev_disconn_complete *ev = (void *) skb->data;
2807*4882a593Smuzhiyun u8 reason;
2808*4882a593Smuzhiyun struct hci_conn_params *params;
2809*4882a593Smuzhiyun struct hci_conn *conn;
2810*4882a593Smuzhiyun bool mgmt_connected;
2811*4882a593Smuzhiyun u8 type;
2812*4882a593Smuzhiyun
2813*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2814*4882a593Smuzhiyun
2815*4882a593Smuzhiyun hci_dev_lock(hdev);
2816*4882a593Smuzhiyun
2817*4882a593Smuzhiyun conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2818*4882a593Smuzhiyun if (!conn)
2819*4882a593Smuzhiyun goto unlock;
2820*4882a593Smuzhiyun
2821*4882a593Smuzhiyun if (ev->status) {
2822*4882a593Smuzhiyun mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2823*4882a593Smuzhiyun conn->dst_type, ev->status);
2824*4882a593Smuzhiyun goto unlock;
2825*4882a593Smuzhiyun }
2826*4882a593Smuzhiyun
2827*4882a593Smuzhiyun conn->state = BT_CLOSED;
2828*4882a593Smuzhiyun
2829*4882a593Smuzhiyun mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2830*4882a593Smuzhiyun
2831*4882a593Smuzhiyun if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
2832*4882a593Smuzhiyun reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
2833*4882a593Smuzhiyun else
2834*4882a593Smuzhiyun reason = hci_to_mgmt_reason(ev->reason);
2835*4882a593Smuzhiyun
2836*4882a593Smuzhiyun mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2837*4882a593Smuzhiyun reason, mgmt_connected);
2838*4882a593Smuzhiyun
2839*4882a593Smuzhiyun if (conn->type == ACL_LINK) {
2840*4882a593Smuzhiyun if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2841*4882a593Smuzhiyun hci_remove_link_key(hdev, &conn->dst);
2842*4882a593Smuzhiyun
2843*4882a593Smuzhiyun hci_req_update_scan(hdev);
2844*4882a593Smuzhiyun }
2845*4882a593Smuzhiyun
2846*4882a593Smuzhiyun params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2847*4882a593Smuzhiyun if (params) {
2848*4882a593Smuzhiyun switch (params->auto_connect) {
2849*4882a593Smuzhiyun case HCI_AUTO_CONN_LINK_LOSS:
2850*4882a593Smuzhiyun if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2851*4882a593Smuzhiyun break;
2852*4882a593Smuzhiyun fallthrough;
2853*4882a593Smuzhiyun
2854*4882a593Smuzhiyun case HCI_AUTO_CONN_DIRECT:
2855*4882a593Smuzhiyun case HCI_AUTO_CONN_ALWAYS:
2856*4882a593Smuzhiyun list_del_init(¶ms->action);
2857*4882a593Smuzhiyun list_add(¶ms->action, &hdev->pend_le_conns);
2858*4882a593Smuzhiyun hci_update_background_scan(hdev);
2859*4882a593Smuzhiyun break;
2860*4882a593Smuzhiyun
2861*4882a593Smuzhiyun default:
2862*4882a593Smuzhiyun break;
2863*4882a593Smuzhiyun }
2864*4882a593Smuzhiyun }
2865*4882a593Smuzhiyun
2866*4882a593Smuzhiyun type = conn->type;
2867*4882a593Smuzhiyun
2868*4882a593Smuzhiyun hci_disconn_cfm(conn, ev->reason);
2869*4882a593Smuzhiyun hci_conn_del(conn);
2870*4882a593Smuzhiyun
2871*4882a593Smuzhiyun /* The suspend notifier is waiting for all devices to disconnect so
2872*4882a593Smuzhiyun * clear the bit from pending tasks and inform the wait queue.
2873*4882a593Smuzhiyun */
2874*4882a593Smuzhiyun if (list_empty(&hdev->conn_hash.list) &&
2875*4882a593Smuzhiyun test_and_clear_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks)) {
2876*4882a593Smuzhiyun wake_up(&hdev->suspend_wait_q);
2877*4882a593Smuzhiyun }
2878*4882a593Smuzhiyun
2879*4882a593Smuzhiyun /* Re-enable advertising if necessary, since it might
2880*4882a593Smuzhiyun * have been disabled by the connection. From the
2881*4882a593Smuzhiyun * HCI_LE_Set_Advertise_Enable command description in
2882*4882a593Smuzhiyun * the core specification (v4.0):
2883*4882a593Smuzhiyun * "The Controller shall continue advertising until the Host
2884*4882a593Smuzhiyun * issues an LE_Set_Advertise_Enable command with
2885*4882a593Smuzhiyun * Advertising_Enable set to 0x00 (Advertising is disabled)
2886*4882a593Smuzhiyun * or until a connection is created or until the Advertising
2887*4882a593Smuzhiyun * is timed out due to Directed Advertising."
2888*4882a593Smuzhiyun */
2889*4882a593Smuzhiyun if (type == LE_LINK)
2890*4882a593Smuzhiyun hci_req_reenable_advertising(hdev);
2891*4882a593Smuzhiyun
2892*4882a593Smuzhiyun unlock:
2893*4882a593Smuzhiyun hci_dev_unlock(hdev);
2894*4882a593Smuzhiyun }
2895*4882a593Smuzhiyun
hci_auth_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)2896*4882a593Smuzhiyun static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2897*4882a593Smuzhiyun {
2898*4882a593Smuzhiyun struct hci_ev_auth_complete *ev = (void *) skb->data;
2899*4882a593Smuzhiyun struct hci_conn *conn;
2900*4882a593Smuzhiyun
2901*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2902*4882a593Smuzhiyun
2903*4882a593Smuzhiyun hci_dev_lock(hdev);
2904*4882a593Smuzhiyun
2905*4882a593Smuzhiyun conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2906*4882a593Smuzhiyun if (!conn)
2907*4882a593Smuzhiyun goto unlock;
2908*4882a593Smuzhiyun
2909*4882a593Smuzhiyun if (!ev->status) {
2910*4882a593Smuzhiyun clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2911*4882a593Smuzhiyun
2912*4882a593Smuzhiyun if (!hci_conn_ssp_enabled(conn) &&
2913*4882a593Smuzhiyun test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2914*4882a593Smuzhiyun bt_dev_info(hdev, "re-auth of legacy device is not possible.");
2915*4882a593Smuzhiyun } else {
2916*4882a593Smuzhiyun set_bit(HCI_CONN_AUTH, &conn->flags);
2917*4882a593Smuzhiyun conn->sec_level = conn->pending_sec_level;
2918*4882a593Smuzhiyun }
2919*4882a593Smuzhiyun } else {
2920*4882a593Smuzhiyun if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
2921*4882a593Smuzhiyun set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2922*4882a593Smuzhiyun
2923*4882a593Smuzhiyun mgmt_auth_failed(conn, ev->status);
2924*4882a593Smuzhiyun }
2925*4882a593Smuzhiyun
2926*4882a593Smuzhiyun clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2927*4882a593Smuzhiyun clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2928*4882a593Smuzhiyun
2929*4882a593Smuzhiyun if (conn->state == BT_CONFIG) {
2930*4882a593Smuzhiyun if (!ev->status && hci_conn_ssp_enabled(conn)) {
2931*4882a593Smuzhiyun struct hci_cp_set_conn_encrypt cp;
2932*4882a593Smuzhiyun cp.handle = ev->handle;
2933*4882a593Smuzhiyun cp.encrypt = 0x01;
2934*4882a593Smuzhiyun hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2935*4882a593Smuzhiyun &cp);
2936*4882a593Smuzhiyun } else {
2937*4882a593Smuzhiyun conn->state = BT_CONNECTED;
2938*4882a593Smuzhiyun hci_connect_cfm(conn, ev->status);
2939*4882a593Smuzhiyun hci_conn_drop(conn);
2940*4882a593Smuzhiyun }
2941*4882a593Smuzhiyun } else {
2942*4882a593Smuzhiyun hci_auth_cfm(conn, ev->status);
2943*4882a593Smuzhiyun
2944*4882a593Smuzhiyun hci_conn_hold(conn);
2945*4882a593Smuzhiyun conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2946*4882a593Smuzhiyun hci_conn_drop(conn);
2947*4882a593Smuzhiyun }
2948*4882a593Smuzhiyun
2949*4882a593Smuzhiyun if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2950*4882a593Smuzhiyun if (!ev->status) {
2951*4882a593Smuzhiyun struct hci_cp_set_conn_encrypt cp;
2952*4882a593Smuzhiyun cp.handle = ev->handle;
2953*4882a593Smuzhiyun cp.encrypt = 0x01;
2954*4882a593Smuzhiyun hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2955*4882a593Smuzhiyun &cp);
2956*4882a593Smuzhiyun } else {
2957*4882a593Smuzhiyun clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2958*4882a593Smuzhiyun hci_encrypt_cfm(conn, ev->status);
2959*4882a593Smuzhiyun }
2960*4882a593Smuzhiyun }
2961*4882a593Smuzhiyun
2962*4882a593Smuzhiyun unlock:
2963*4882a593Smuzhiyun hci_dev_unlock(hdev);
2964*4882a593Smuzhiyun }
2965*4882a593Smuzhiyun
hci_remote_name_evt(struct hci_dev * hdev,struct sk_buff * skb)2966*4882a593Smuzhiyun static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2967*4882a593Smuzhiyun {
2968*4882a593Smuzhiyun struct hci_ev_remote_name *ev = (void *) skb->data;
2969*4882a593Smuzhiyun struct hci_conn *conn;
2970*4882a593Smuzhiyun
2971*4882a593Smuzhiyun BT_DBG("%s", hdev->name);
2972*4882a593Smuzhiyun
2973*4882a593Smuzhiyun hci_conn_check_pending(hdev);
2974*4882a593Smuzhiyun
2975*4882a593Smuzhiyun hci_dev_lock(hdev);
2976*4882a593Smuzhiyun
2977*4882a593Smuzhiyun conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2978*4882a593Smuzhiyun
2979*4882a593Smuzhiyun if (!hci_dev_test_flag(hdev, HCI_MGMT))
2980*4882a593Smuzhiyun goto check_auth;
2981*4882a593Smuzhiyun
2982*4882a593Smuzhiyun if (ev->status == 0)
2983*4882a593Smuzhiyun hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2984*4882a593Smuzhiyun strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2985*4882a593Smuzhiyun else
2986*4882a593Smuzhiyun hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2987*4882a593Smuzhiyun
2988*4882a593Smuzhiyun check_auth:
2989*4882a593Smuzhiyun if (!conn)
2990*4882a593Smuzhiyun goto unlock;
2991*4882a593Smuzhiyun
2992*4882a593Smuzhiyun if (!hci_outgoing_auth_needed(hdev, conn))
2993*4882a593Smuzhiyun goto unlock;
2994*4882a593Smuzhiyun
2995*4882a593Smuzhiyun if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2996*4882a593Smuzhiyun struct hci_cp_auth_requested cp;
2997*4882a593Smuzhiyun
2998*4882a593Smuzhiyun set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2999*4882a593Smuzhiyun
3000*4882a593Smuzhiyun cp.handle = __cpu_to_le16(conn->handle);
3001*4882a593Smuzhiyun hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3002*4882a593Smuzhiyun }
3003*4882a593Smuzhiyun
3004*4882a593Smuzhiyun unlock:
3005*4882a593Smuzhiyun hci_dev_unlock(hdev);
3006*4882a593Smuzhiyun }
3007*4882a593Smuzhiyun
read_enc_key_size_complete(struct hci_dev * hdev,u8 status,u16 opcode,struct sk_buff * skb)3008*4882a593Smuzhiyun static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
3009*4882a593Smuzhiyun u16 opcode, struct sk_buff *skb)
3010*4882a593Smuzhiyun {
3011*4882a593Smuzhiyun const struct hci_rp_read_enc_key_size *rp;
3012*4882a593Smuzhiyun struct hci_conn *conn;
3013*4882a593Smuzhiyun u16 handle;
3014*4882a593Smuzhiyun
3015*4882a593Smuzhiyun BT_DBG("%s status 0x%02x", hdev->name, status);
3016*4882a593Smuzhiyun
3017*4882a593Smuzhiyun if (!skb || skb->len < sizeof(*rp)) {
3018*4882a593Smuzhiyun bt_dev_err(hdev, "invalid read key size response");
3019*4882a593Smuzhiyun return;
3020*4882a593Smuzhiyun }
3021*4882a593Smuzhiyun
3022*4882a593Smuzhiyun rp = (void *)skb->data;
3023*4882a593Smuzhiyun handle = le16_to_cpu(rp->handle);
3024*4882a593Smuzhiyun
3025*4882a593Smuzhiyun hci_dev_lock(hdev);
3026*4882a593Smuzhiyun
3027*4882a593Smuzhiyun conn = hci_conn_hash_lookup_handle(hdev, handle);
3028*4882a593Smuzhiyun if (!conn)
3029*4882a593Smuzhiyun goto unlock;
3030*4882a593Smuzhiyun
3031*4882a593Smuzhiyun /* While unexpected, the read_enc_key_size command may fail. The most
3032*4882a593Smuzhiyun * secure approach is to then assume the key size is 0 to force a
3033*4882a593Smuzhiyun * disconnection.
3034*4882a593Smuzhiyun */
3035*4882a593Smuzhiyun if (rp->status) {
3036*4882a593Smuzhiyun bt_dev_err(hdev, "failed to read key size for handle %u",
3037*4882a593Smuzhiyun handle);
3038*4882a593Smuzhiyun conn->enc_key_size = 0;
3039*4882a593Smuzhiyun } else {
3040*4882a593Smuzhiyun conn->enc_key_size = rp->key_size;
3041*4882a593Smuzhiyun }
3042*4882a593Smuzhiyun
3043*4882a593Smuzhiyun hci_encrypt_cfm(conn, 0);
3044*4882a593Smuzhiyun
3045*4882a593Smuzhiyun unlock:
3046*4882a593Smuzhiyun hci_dev_unlock(hdev);
3047*4882a593Smuzhiyun }
3048*4882a593Smuzhiyun
hci_encrypt_change_evt(struct hci_dev * hdev,struct sk_buff * skb)3049*4882a593Smuzhiyun static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3050*4882a593Smuzhiyun {
3051*4882a593Smuzhiyun struct hci_ev_encrypt_change *ev = (void *) skb->data;
3052*4882a593Smuzhiyun struct hci_conn *conn;
3053*4882a593Smuzhiyun
3054*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3055*4882a593Smuzhiyun
3056*4882a593Smuzhiyun hci_dev_lock(hdev);
3057*4882a593Smuzhiyun
3058*4882a593Smuzhiyun conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3059*4882a593Smuzhiyun if (!conn)
3060*4882a593Smuzhiyun goto unlock;
3061*4882a593Smuzhiyun
3062*4882a593Smuzhiyun if (!ev->status) {
3063*4882a593Smuzhiyun if (ev->encrypt) {
3064*4882a593Smuzhiyun /* Encryption implies authentication */
3065*4882a593Smuzhiyun set_bit(HCI_CONN_AUTH, &conn->flags);
3066*4882a593Smuzhiyun set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3067*4882a593Smuzhiyun conn->sec_level = conn->pending_sec_level;
3068*4882a593Smuzhiyun
3069*4882a593Smuzhiyun /* P-256 authentication key implies FIPS */
3070*4882a593Smuzhiyun if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3071*4882a593Smuzhiyun set_bit(HCI_CONN_FIPS, &conn->flags);
3072*4882a593Smuzhiyun
3073*4882a593Smuzhiyun if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3074*4882a593Smuzhiyun conn->type == LE_LINK)
3075*4882a593Smuzhiyun set_bit(HCI_CONN_AES_CCM, &conn->flags);
3076*4882a593Smuzhiyun } else {
3077*4882a593Smuzhiyun clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3078*4882a593Smuzhiyun clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3079*4882a593Smuzhiyun }
3080*4882a593Smuzhiyun }
3081*4882a593Smuzhiyun
3082*4882a593Smuzhiyun /* We should disregard the current RPA and generate a new one
3083*4882a593Smuzhiyun * whenever the encryption procedure fails.
3084*4882a593Smuzhiyun */
3085*4882a593Smuzhiyun if (ev->status && conn->type == LE_LINK) {
3086*4882a593Smuzhiyun hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3087*4882a593Smuzhiyun hci_adv_instances_set_rpa_expired(hdev, true);
3088*4882a593Smuzhiyun }
3089*4882a593Smuzhiyun
3090*4882a593Smuzhiyun clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3091*4882a593Smuzhiyun
3092*4882a593Smuzhiyun /* Check link security requirements are met */
3093*4882a593Smuzhiyun if (!hci_conn_check_link_mode(conn))
3094*4882a593Smuzhiyun ev->status = HCI_ERROR_AUTH_FAILURE;
3095*4882a593Smuzhiyun
3096*4882a593Smuzhiyun if (ev->status && conn->state == BT_CONNECTED) {
3097*4882a593Smuzhiyun if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3098*4882a593Smuzhiyun set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3099*4882a593Smuzhiyun
3100*4882a593Smuzhiyun /* Notify upper layers so they can cleanup before
3101*4882a593Smuzhiyun * disconnecting.
3102*4882a593Smuzhiyun */
3103*4882a593Smuzhiyun hci_encrypt_cfm(conn, ev->status);
3104*4882a593Smuzhiyun hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3105*4882a593Smuzhiyun hci_conn_drop(conn);
3106*4882a593Smuzhiyun goto unlock;
3107*4882a593Smuzhiyun }
3108*4882a593Smuzhiyun
3109*4882a593Smuzhiyun /* Try reading the encryption key size for encrypted ACL links */
3110*4882a593Smuzhiyun if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3111*4882a593Smuzhiyun struct hci_cp_read_enc_key_size cp;
3112*4882a593Smuzhiyun struct hci_request req;
3113*4882a593Smuzhiyun
3114*4882a593Smuzhiyun /* Only send HCI_Read_Encryption_Key_Size if the
3115*4882a593Smuzhiyun * controller really supports it. If it doesn't, assume
3116*4882a593Smuzhiyun * the default size (16).
3117*4882a593Smuzhiyun */
3118*4882a593Smuzhiyun if (!(hdev->commands[20] & 0x10)) {
3119*4882a593Smuzhiyun conn->enc_key_size = HCI_LINK_KEY_SIZE;
3120*4882a593Smuzhiyun goto notify;
3121*4882a593Smuzhiyun }
3122*4882a593Smuzhiyun
3123*4882a593Smuzhiyun hci_req_init(&req, hdev);
3124*4882a593Smuzhiyun
3125*4882a593Smuzhiyun cp.handle = cpu_to_le16(conn->handle);
3126*4882a593Smuzhiyun hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
3127*4882a593Smuzhiyun
3128*4882a593Smuzhiyun if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
3129*4882a593Smuzhiyun bt_dev_err(hdev, "sending read key size failed");
3130*4882a593Smuzhiyun conn->enc_key_size = HCI_LINK_KEY_SIZE;
3131*4882a593Smuzhiyun goto notify;
3132*4882a593Smuzhiyun }
3133*4882a593Smuzhiyun
3134*4882a593Smuzhiyun goto unlock;
3135*4882a593Smuzhiyun }
3136*4882a593Smuzhiyun
3137*4882a593Smuzhiyun /* Set the default Authenticated Payload Timeout after
3138*4882a593Smuzhiyun * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3139*4882a593Smuzhiyun * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3140*4882a593Smuzhiyun * sent when the link is active and Encryption is enabled, the conn
3141*4882a593Smuzhiyun * type can be either LE or ACL and controller must support LMP Ping.
3142*4882a593Smuzhiyun * Ensure for AES-CCM encryption as well.
3143*4882a593Smuzhiyun */
3144*4882a593Smuzhiyun if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3145*4882a593Smuzhiyun test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3146*4882a593Smuzhiyun ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3147*4882a593Smuzhiyun (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3148*4882a593Smuzhiyun struct hci_cp_write_auth_payload_to cp;
3149*4882a593Smuzhiyun
3150*4882a593Smuzhiyun cp.handle = cpu_to_le16(conn->handle);
3151*4882a593Smuzhiyun cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3152*4882a593Smuzhiyun hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3153*4882a593Smuzhiyun sizeof(cp), &cp);
3154*4882a593Smuzhiyun }
3155*4882a593Smuzhiyun
3156*4882a593Smuzhiyun notify:
3157*4882a593Smuzhiyun hci_encrypt_cfm(conn, ev->status);
3158*4882a593Smuzhiyun
3159*4882a593Smuzhiyun unlock:
3160*4882a593Smuzhiyun hci_dev_unlock(hdev);
3161*4882a593Smuzhiyun }
3162*4882a593Smuzhiyun
hci_change_link_key_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)3163*4882a593Smuzhiyun static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
3164*4882a593Smuzhiyun struct sk_buff *skb)
3165*4882a593Smuzhiyun {
3166*4882a593Smuzhiyun struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
3167*4882a593Smuzhiyun struct hci_conn *conn;
3168*4882a593Smuzhiyun
3169*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3170*4882a593Smuzhiyun
3171*4882a593Smuzhiyun hci_dev_lock(hdev);
3172*4882a593Smuzhiyun
3173*4882a593Smuzhiyun conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3174*4882a593Smuzhiyun if (conn) {
3175*4882a593Smuzhiyun if (!ev->status)
3176*4882a593Smuzhiyun set_bit(HCI_CONN_SECURE, &conn->flags);
3177*4882a593Smuzhiyun
3178*4882a593Smuzhiyun clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3179*4882a593Smuzhiyun
3180*4882a593Smuzhiyun hci_key_change_cfm(conn, ev->status);
3181*4882a593Smuzhiyun }
3182*4882a593Smuzhiyun
3183*4882a593Smuzhiyun hci_dev_unlock(hdev);
3184*4882a593Smuzhiyun }
3185*4882a593Smuzhiyun
hci_remote_features_evt(struct hci_dev * hdev,struct sk_buff * skb)3186*4882a593Smuzhiyun static void hci_remote_features_evt(struct hci_dev *hdev,
3187*4882a593Smuzhiyun struct sk_buff *skb)
3188*4882a593Smuzhiyun {
3189*4882a593Smuzhiyun struct hci_ev_remote_features *ev = (void *) skb->data;
3190*4882a593Smuzhiyun struct hci_conn *conn;
3191*4882a593Smuzhiyun
3192*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3193*4882a593Smuzhiyun
3194*4882a593Smuzhiyun hci_dev_lock(hdev);
3195*4882a593Smuzhiyun
3196*4882a593Smuzhiyun conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3197*4882a593Smuzhiyun if (!conn)
3198*4882a593Smuzhiyun goto unlock;
3199*4882a593Smuzhiyun
3200*4882a593Smuzhiyun if (!ev->status)
3201*4882a593Smuzhiyun memcpy(conn->features[0], ev->features, 8);
3202*4882a593Smuzhiyun
3203*4882a593Smuzhiyun if (conn->state != BT_CONFIG)
3204*4882a593Smuzhiyun goto unlock;
3205*4882a593Smuzhiyun
3206*4882a593Smuzhiyun if (!ev->status && lmp_ext_feat_capable(hdev) &&
3207*4882a593Smuzhiyun lmp_ext_feat_capable(conn)) {
3208*4882a593Smuzhiyun struct hci_cp_read_remote_ext_features cp;
3209*4882a593Smuzhiyun cp.handle = ev->handle;
3210*4882a593Smuzhiyun cp.page = 0x01;
3211*4882a593Smuzhiyun hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3212*4882a593Smuzhiyun sizeof(cp), &cp);
3213*4882a593Smuzhiyun goto unlock;
3214*4882a593Smuzhiyun }
3215*4882a593Smuzhiyun
3216*4882a593Smuzhiyun if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3217*4882a593Smuzhiyun struct hci_cp_remote_name_req cp;
3218*4882a593Smuzhiyun memset(&cp, 0, sizeof(cp));
3219*4882a593Smuzhiyun bacpy(&cp.bdaddr, &conn->dst);
3220*4882a593Smuzhiyun cp.pscan_rep_mode = 0x02;
3221*4882a593Smuzhiyun hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3222*4882a593Smuzhiyun } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3223*4882a593Smuzhiyun mgmt_device_connected(hdev, conn, 0, NULL, 0);
3224*4882a593Smuzhiyun
3225*4882a593Smuzhiyun if (!hci_outgoing_auth_needed(hdev, conn)) {
3226*4882a593Smuzhiyun conn->state = BT_CONNECTED;
3227*4882a593Smuzhiyun hci_connect_cfm(conn, ev->status);
3228*4882a593Smuzhiyun hci_conn_drop(conn);
3229*4882a593Smuzhiyun }
3230*4882a593Smuzhiyun
3231*4882a593Smuzhiyun unlock:
3232*4882a593Smuzhiyun hci_dev_unlock(hdev);
3233*4882a593Smuzhiyun }
3234*4882a593Smuzhiyun
hci_cmd_complete_evt(struct hci_dev * hdev,struct sk_buff * skb,u16 * opcode,u8 * status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)3235*4882a593Smuzhiyun static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
3236*4882a593Smuzhiyun u16 *opcode, u8 *status,
3237*4882a593Smuzhiyun hci_req_complete_t *req_complete,
3238*4882a593Smuzhiyun hci_req_complete_skb_t *req_complete_skb)
3239*4882a593Smuzhiyun {
3240*4882a593Smuzhiyun struct hci_ev_cmd_complete *ev = (void *) skb->data;
3241*4882a593Smuzhiyun
3242*4882a593Smuzhiyun *opcode = __le16_to_cpu(ev->opcode);
3243*4882a593Smuzhiyun *status = skb->data[sizeof(*ev)];
3244*4882a593Smuzhiyun
3245*4882a593Smuzhiyun skb_pull(skb, sizeof(*ev));
3246*4882a593Smuzhiyun
3247*4882a593Smuzhiyun switch (*opcode) {
3248*4882a593Smuzhiyun case HCI_OP_INQUIRY_CANCEL:
3249*4882a593Smuzhiyun hci_cc_inquiry_cancel(hdev, skb, status);
3250*4882a593Smuzhiyun break;
3251*4882a593Smuzhiyun
3252*4882a593Smuzhiyun case HCI_OP_PERIODIC_INQ:
3253*4882a593Smuzhiyun hci_cc_periodic_inq(hdev, skb);
3254*4882a593Smuzhiyun break;
3255*4882a593Smuzhiyun
3256*4882a593Smuzhiyun case HCI_OP_EXIT_PERIODIC_INQ:
3257*4882a593Smuzhiyun hci_cc_exit_periodic_inq(hdev, skb);
3258*4882a593Smuzhiyun break;
3259*4882a593Smuzhiyun
3260*4882a593Smuzhiyun case HCI_OP_REMOTE_NAME_REQ_CANCEL:
3261*4882a593Smuzhiyun hci_cc_remote_name_req_cancel(hdev, skb);
3262*4882a593Smuzhiyun break;
3263*4882a593Smuzhiyun
3264*4882a593Smuzhiyun case HCI_OP_ROLE_DISCOVERY:
3265*4882a593Smuzhiyun hci_cc_role_discovery(hdev, skb);
3266*4882a593Smuzhiyun break;
3267*4882a593Smuzhiyun
3268*4882a593Smuzhiyun case HCI_OP_READ_LINK_POLICY:
3269*4882a593Smuzhiyun hci_cc_read_link_policy(hdev, skb);
3270*4882a593Smuzhiyun break;
3271*4882a593Smuzhiyun
3272*4882a593Smuzhiyun case HCI_OP_WRITE_LINK_POLICY:
3273*4882a593Smuzhiyun hci_cc_write_link_policy(hdev, skb);
3274*4882a593Smuzhiyun break;
3275*4882a593Smuzhiyun
3276*4882a593Smuzhiyun case HCI_OP_READ_DEF_LINK_POLICY:
3277*4882a593Smuzhiyun hci_cc_read_def_link_policy(hdev, skb);
3278*4882a593Smuzhiyun break;
3279*4882a593Smuzhiyun
3280*4882a593Smuzhiyun case HCI_OP_WRITE_DEF_LINK_POLICY:
3281*4882a593Smuzhiyun hci_cc_write_def_link_policy(hdev, skb);
3282*4882a593Smuzhiyun break;
3283*4882a593Smuzhiyun
3284*4882a593Smuzhiyun case HCI_OP_RESET:
3285*4882a593Smuzhiyun hci_cc_reset(hdev, skb);
3286*4882a593Smuzhiyun break;
3287*4882a593Smuzhiyun
3288*4882a593Smuzhiyun case HCI_OP_READ_STORED_LINK_KEY:
3289*4882a593Smuzhiyun hci_cc_read_stored_link_key(hdev, skb);
3290*4882a593Smuzhiyun break;
3291*4882a593Smuzhiyun
3292*4882a593Smuzhiyun case HCI_OP_DELETE_STORED_LINK_KEY:
3293*4882a593Smuzhiyun hci_cc_delete_stored_link_key(hdev, skb);
3294*4882a593Smuzhiyun break;
3295*4882a593Smuzhiyun
3296*4882a593Smuzhiyun case HCI_OP_WRITE_LOCAL_NAME:
3297*4882a593Smuzhiyun hci_cc_write_local_name(hdev, skb);
3298*4882a593Smuzhiyun break;
3299*4882a593Smuzhiyun
3300*4882a593Smuzhiyun case HCI_OP_READ_LOCAL_NAME:
3301*4882a593Smuzhiyun hci_cc_read_local_name(hdev, skb);
3302*4882a593Smuzhiyun break;
3303*4882a593Smuzhiyun
3304*4882a593Smuzhiyun case HCI_OP_WRITE_AUTH_ENABLE:
3305*4882a593Smuzhiyun hci_cc_write_auth_enable(hdev, skb);
3306*4882a593Smuzhiyun break;
3307*4882a593Smuzhiyun
3308*4882a593Smuzhiyun case HCI_OP_WRITE_ENCRYPT_MODE:
3309*4882a593Smuzhiyun hci_cc_write_encrypt_mode(hdev, skb);
3310*4882a593Smuzhiyun break;
3311*4882a593Smuzhiyun
3312*4882a593Smuzhiyun case HCI_OP_WRITE_SCAN_ENABLE:
3313*4882a593Smuzhiyun hci_cc_write_scan_enable(hdev, skb);
3314*4882a593Smuzhiyun break;
3315*4882a593Smuzhiyun
3316*4882a593Smuzhiyun case HCI_OP_READ_CLASS_OF_DEV:
3317*4882a593Smuzhiyun hci_cc_read_class_of_dev(hdev, skb);
3318*4882a593Smuzhiyun break;
3319*4882a593Smuzhiyun
3320*4882a593Smuzhiyun case HCI_OP_WRITE_CLASS_OF_DEV:
3321*4882a593Smuzhiyun hci_cc_write_class_of_dev(hdev, skb);
3322*4882a593Smuzhiyun break;
3323*4882a593Smuzhiyun
3324*4882a593Smuzhiyun case HCI_OP_READ_VOICE_SETTING:
3325*4882a593Smuzhiyun hci_cc_read_voice_setting(hdev, skb);
3326*4882a593Smuzhiyun break;
3327*4882a593Smuzhiyun
3328*4882a593Smuzhiyun case HCI_OP_WRITE_VOICE_SETTING:
3329*4882a593Smuzhiyun hci_cc_write_voice_setting(hdev, skb);
3330*4882a593Smuzhiyun break;
3331*4882a593Smuzhiyun
3332*4882a593Smuzhiyun case HCI_OP_READ_NUM_SUPPORTED_IAC:
3333*4882a593Smuzhiyun hci_cc_read_num_supported_iac(hdev, skb);
3334*4882a593Smuzhiyun break;
3335*4882a593Smuzhiyun
3336*4882a593Smuzhiyun case HCI_OP_WRITE_SSP_MODE:
3337*4882a593Smuzhiyun hci_cc_write_ssp_mode(hdev, skb);
3338*4882a593Smuzhiyun break;
3339*4882a593Smuzhiyun
3340*4882a593Smuzhiyun case HCI_OP_WRITE_SC_SUPPORT:
3341*4882a593Smuzhiyun hci_cc_write_sc_support(hdev, skb);
3342*4882a593Smuzhiyun break;
3343*4882a593Smuzhiyun
3344*4882a593Smuzhiyun case HCI_OP_READ_AUTH_PAYLOAD_TO:
3345*4882a593Smuzhiyun hci_cc_read_auth_payload_timeout(hdev, skb);
3346*4882a593Smuzhiyun break;
3347*4882a593Smuzhiyun
3348*4882a593Smuzhiyun case HCI_OP_WRITE_AUTH_PAYLOAD_TO:
3349*4882a593Smuzhiyun hci_cc_write_auth_payload_timeout(hdev, skb);
3350*4882a593Smuzhiyun break;
3351*4882a593Smuzhiyun
3352*4882a593Smuzhiyun case HCI_OP_READ_LOCAL_VERSION:
3353*4882a593Smuzhiyun hci_cc_read_local_version(hdev, skb);
3354*4882a593Smuzhiyun break;
3355*4882a593Smuzhiyun
3356*4882a593Smuzhiyun case HCI_OP_READ_LOCAL_COMMANDS:
3357*4882a593Smuzhiyun hci_cc_read_local_commands(hdev, skb);
3358*4882a593Smuzhiyun break;
3359*4882a593Smuzhiyun
3360*4882a593Smuzhiyun case HCI_OP_READ_LOCAL_FEATURES:
3361*4882a593Smuzhiyun hci_cc_read_local_features(hdev, skb);
3362*4882a593Smuzhiyun break;
3363*4882a593Smuzhiyun
3364*4882a593Smuzhiyun case HCI_OP_READ_LOCAL_EXT_FEATURES:
3365*4882a593Smuzhiyun hci_cc_read_local_ext_features(hdev, skb);
3366*4882a593Smuzhiyun break;
3367*4882a593Smuzhiyun
3368*4882a593Smuzhiyun case HCI_OP_READ_BUFFER_SIZE:
3369*4882a593Smuzhiyun hci_cc_read_buffer_size(hdev, skb);
3370*4882a593Smuzhiyun break;
3371*4882a593Smuzhiyun
3372*4882a593Smuzhiyun case HCI_OP_READ_BD_ADDR:
3373*4882a593Smuzhiyun hci_cc_read_bd_addr(hdev, skb);
3374*4882a593Smuzhiyun break;
3375*4882a593Smuzhiyun
3376*4882a593Smuzhiyun case HCI_OP_READ_LOCAL_PAIRING_OPTS:
3377*4882a593Smuzhiyun hci_cc_read_local_pairing_opts(hdev, skb);
3378*4882a593Smuzhiyun break;
3379*4882a593Smuzhiyun
3380*4882a593Smuzhiyun case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
3381*4882a593Smuzhiyun hci_cc_read_page_scan_activity(hdev, skb);
3382*4882a593Smuzhiyun break;
3383*4882a593Smuzhiyun
3384*4882a593Smuzhiyun case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
3385*4882a593Smuzhiyun hci_cc_write_page_scan_activity(hdev, skb);
3386*4882a593Smuzhiyun break;
3387*4882a593Smuzhiyun
3388*4882a593Smuzhiyun case HCI_OP_READ_PAGE_SCAN_TYPE:
3389*4882a593Smuzhiyun hci_cc_read_page_scan_type(hdev, skb);
3390*4882a593Smuzhiyun break;
3391*4882a593Smuzhiyun
3392*4882a593Smuzhiyun case HCI_OP_WRITE_PAGE_SCAN_TYPE:
3393*4882a593Smuzhiyun hci_cc_write_page_scan_type(hdev, skb);
3394*4882a593Smuzhiyun break;
3395*4882a593Smuzhiyun
3396*4882a593Smuzhiyun case HCI_OP_READ_DATA_BLOCK_SIZE:
3397*4882a593Smuzhiyun hci_cc_read_data_block_size(hdev, skb);
3398*4882a593Smuzhiyun break;
3399*4882a593Smuzhiyun
3400*4882a593Smuzhiyun case HCI_OP_READ_FLOW_CONTROL_MODE:
3401*4882a593Smuzhiyun hci_cc_read_flow_control_mode(hdev, skb);
3402*4882a593Smuzhiyun break;
3403*4882a593Smuzhiyun
3404*4882a593Smuzhiyun case HCI_OP_READ_LOCAL_AMP_INFO:
3405*4882a593Smuzhiyun hci_cc_read_local_amp_info(hdev, skb);
3406*4882a593Smuzhiyun break;
3407*4882a593Smuzhiyun
3408*4882a593Smuzhiyun case HCI_OP_READ_CLOCK:
3409*4882a593Smuzhiyun hci_cc_read_clock(hdev, skb);
3410*4882a593Smuzhiyun break;
3411*4882a593Smuzhiyun
3412*4882a593Smuzhiyun case HCI_OP_READ_INQ_RSP_TX_POWER:
3413*4882a593Smuzhiyun hci_cc_read_inq_rsp_tx_power(hdev, skb);
3414*4882a593Smuzhiyun break;
3415*4882a593Smuzhiyun
3416*4882a593Smuzhiyun case HCI_OP_READ_DEF_ERR_DATA_REPORTING:
3417*4882a593Smuzhiyun hci_cc_read_def_err_data_reporting(hdev, skb);
3418*4882a593Smuzhiyun break;
3419*4882a593Smuzhiyun
3420*4882a593Smuzhiyun case HCI_OP_WRITE_DEF_ERR_DATA_REPORTING:
3421*4882a593Smuzhiyun hci_cc_write_def_err_data_reporting(hdev, skb);
3422*4882a593Smuzhiyun break;
3423*4882a593Smuzhiyun
3424*4882a593Smuzhiyun case HCI_OP_PIN_CODE_REPLY:
3425*4882a593Smuzhiyun hci_cc_pin_code_reply(hdev, skb);
3426*4882a593Smuzhiyun break;
3427*4882a593Smuzhiyun
3428*4882a593Smuzhiyun case HCI_OP_PIN_CODE_NEG_REPLY:
3429*4882a593Smuzhiyun hci_cc_pin_code_neg_reply(hdev, skb);
3430*4882a593Smuzhiyun break;
3431*4882a593Smuzhiyun
3432*4882a593Smuzhiyun case HCI_OP_READ_LOCAL_OOB_DATA:
3433*4882a593Smuzhiyun hci_cc_read_local_oob_data(hdev, skb);
3434*4882a593Smuzhiyun break;
3435*4882a593Smuzhiyun
3436*4882a593Smuzhiyun case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
3437*4882a593Smuzhiyun hci_cc_read_local_oob_ext_data(hdev, skb);
3438*4882a593Smuzhiyun break;
3439*4882a593Smuzhiyun
3440*4882a593Smuzhiyun case HCI_OP_LE_READ_BUFFER_SIZE:
3441*4882a593Smuzhiyun hci_cc_le_read_buffer_size(hdev, skb);
3442*4882a593Smuzhiyun break;
3443*4882a593Smuzhiyun
3444*4882a593Smuzhiyun case HCI_OP_LE_READ_LOCAL_FEATURES:
3445*4882a593Smuzhiyun hci_cc_le_read_local_features(hdev, skb);
3446*4882a593Smuzhiyun break;
3447*4882a593Smuzhiyun
3448*4882a593Smuzhiyun case HCI_OP_LE_READ_ADV_TX_POWER:
3449*4882a593Smuzhiyun hci_cc_le_read_adv_tx_power(hdev, skb);
3450*4882a593Smuzhiyun break;
3451*4882a593Smuzhiyun
3452*4882a593Smuzhiyun case HCI_OP_USER_CONFIRM_REPLY:
3453*4882a593Smuzhiyun hci_cc_user_confirm_reply(hdev, skb);
3454*4882a593Smuzhiyun break;
3455*4882a593Smuzhiyun
3456*4882a593Smuzhiyun case HCI_OP_USER_CONFIRM_NEG_REPLY:
3457*4882a593Smuzhiyun hci_cc_user_confirm_neg_reply(hdev, skb);
3458*4882a593Smuzhiyun break;
3459*4882a593Smuzhiyun
3460*4882a593Smuzhiyun case HCI_OP_USER_PASSKEY_REPLY:
3461*4882a593Smuzhiyun hci_cc_user_passkey_reply(hdev, skb);
3462*4882a593Smuzhiyun break;
3463*4882a593Smuzhiyun
3464*4882a593Smuzhiyun case HCI_OP_USER_PASSKEY_NEG_REPLY:
3465*4882a593Smuzhiyun hci_cc_user_passkey_neg_reply(hdev, skb);
3466*4882a593Smuzhiyun break;
3467*4882a593Smuzhiyun
3468*4882a593Smuzhiyun case HCI_OP_LE_SET_RANDOM_ADDR:
3469*4882a593Smuzhiyun hci_cc_le_set_random_addr(hdev, skb);
3470*4882a593Smuzhiyun break;
3471*4882a593Smuzhiyun
3472*4882a593Smuzhiyun case HCI_OP_LE_SET_ADV_ENABLE:
3473*4882a593Smuzhiyun hci_cc_le_set_adv_enable(hdev, skb);
3474*4882a593Smuzhiyun break;
3475*4882a593Smuzhiyun
3476*4882a593Smuzhiyun case HCI_OP_LE_SET_SCAN_PARAM:
3477*4882a593Smuzhiyun hci_cc_le_set_scan_param(hdev, skb);
3478*4882a593Smuzhiyun break;
3479*4882a593Smuzhiyun
3480*4882a593Smuzhiyun case HCI_OP_LE_SET_SCAN_ENABLE:
3481*4882a593Smuzhiyun hci_cc_le_set_scan_enable(hdev, skb);
3482*4882a593Smuzhiyun break;
3483*4882a593Smuzhiyun
3484*4882a593Smuzhiyun case HCI_OP_LE_READ_WHITE_LIST_SIZE:
3485*4882a593Smuzhiyun hci_cc_le_read_white_list_size(hdev, skb);
3486*4882a593Smuzhiyun break;
3487*4882a593Smuzhiyun
3488*4882a593Smuzhiyun case HCI_OP_LE_CLEAR_WHITE_LIST:
3489*4882a593Smuzhiyun hci_cc_le_clear_white_list(hdev, skb);
3490*4882a593Smuzhiyun break;
3491*4882a593Smuzhiyun
3492*4882a593Smuzhiyun case HCI_OP_LE_ADD_TO_WHITE_LIST:
3493*4882a593Smuzhiyun hci_cc_le_add_to_white_list(hdev, skb);
3494*4882a593Smuzhiyun break;
3495*4882a593Smuzhiyun
3496*4882a593Smuzhiyun case HCI_OP_LE_DEL_FROM_WHITE_LIST:
3497*4882a593Smuzhiyun hci_cc_le_del_from_white_list(hdev, skb);
3498*4882a593Smuzhiyun break;
3499*4882a593Smuzhiyun
3500*4882a593Smuzhiyun case HCI_OP_LE_READ_SUPPORTED_STATES:
3501*4882a593Smuzhiyun hci_cc_le_read_supported_states(hdev, skb);
3502*4882a593Smuzhiyun break;
3503*4882a593Smuzhiyun
3504*4882a593Smuzhiyun case HCI_OP_LE_READ_DEF_DATA_LEN:
3505*4882a593Smuzhiyun hci_cc_le_read_def_data_len(hdev, skb);
3506*4882a593Smuzhiyun break;
3507*4882a593Smuzhiyun
3508*4882a593Smuzhiyun case HCI_OP_LE_WRITE_DEF_DATA_LEN:
3509*4882a593Smuzhiyun hci_cc_le_write_def_data_len(hdev, skb);
3510*4882a593Smuzhiyun break;
3511*4882a593Smuzhiyun
3512*4882a593Smuzhiyun case HCI_OP_LE_ADD_TO_RESOLV_LIST:
3513*4882a593Smuzhiyun hci_cc_le_add_to_resolv_list(hdev, skb);
3514*4882a593Smuzhiyun break;
3515*4882a593Smuzhiyun
3516*4882a593Smuzhiyun case HCI_OP_LE_DEL_FROM_RESOLV_LIST:
3517*4882a593Smuzhiyun hci_cc_le_del_from_resolv_list(hdev, skb);
3518*4882a593Smuzhiyun break;
3519*4882a593Smuzhiyun
3520*4882a593Smuzhiyun case HCI_OP_LE_CLEAR_RESOLV_LIST:
3521*4882a593Smuzhiyun hci_cc_le_clear_resolv_list(hdev, skb);
3522*4882a593Smuzhiyun break;
3523*4882a593Smuzhiyun
3524*4882a593Smuzhiyun case HCI_OP_LE_READ_RESOLV_LIST_SIZE:
3525*4882a593Smuzhiyun hci_cc_le_read_resolv_list_size(hdev, skb);
3526*4882a593Smuzhiyun break;
3527*4882a593Smuzhiyun
3528*4882a593Smuzhiyun case HCI_OP_LE_SET_ADDR_RESOLV_ENABLE:
3529*4882a593Smuzhiyun hci_cc_le_set_addr_resolution_enable(hdev, skb);
3530*4882a593Smuzhiyun break;
3531*4882a593Smuzhiyun
3532*4882a593Smuzhiyun case HCI_OP_LE_READ_MAX_DATA_LEN:
3533*4882a593Smuzhiyun hci_cc_le_read_max_data_len(hdev, skb);
3534*4882a593Smuzhiyun break;
3535*4882a593Smuzhiyun
3536*4882a593Smuzhiyun case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3537*4882a593Smuzhiyun hci_cc_write_le_host_supported(hdev, skb);
3538*4882a593Smuzhiyun break;
3539*4882a593Smuzhiyun
3540*4882a593Smuzhiyun case HCI_OP_LE_SET_ADV_PARAM:
3541*4882a593Smuzhiyun hci_cc_set_adv_param(hdev, skb);
3542*4882a593Smuzhiyun break;
3543*4882a593Smuzhiyun
3544*4882a593Smuzhiyun case HCI_OP_READ_RSSI:
3545*4882a593Smuzhiyun hci_cc_read_rssi(hdev, skb);
3546*4882a593Smuzhiyun break;
3547*4882a593Smuzhiyun
3548*4882a593Smuzhiyun case HCI_OP_READ_TX_POWER:
3549*4882a593Smuzhiyun hci_cc_read_tx_power(hdev, skb);
3550*4882a593Smuzhiyun break;
3551*4882a593Smuzhiyun
3552*4882a593Smuzhiyun case HCI_OP_WRITE_SSP_DEBUG_MODE:
3553*4882a593Smuzhiyun hci_cc_write_ssp_debug_mode(hdev, skb);
3554*4882a593Smuzhiyun break;
3555*4882a593Smuzhiyun
3556*4882a593Smuzhiyun case HCI_OP_LE_SET_EXT_SCAN_PARAMS:
3557*4882a593Smuzhiyun hci_cc_le_set_ext_scan_param(hdev, skb);
3558*4882a593Smuzhiyun break;
3559*4882a593Smuzhiyun
3560*4882a593Smuzhiyun case HCI_OP_LE_SET_EXT_SCAN_ENABLE:
3561*4882a593Smuzhiyun hci_cc_le_set_ext_scan_enable(hdev, skb);
3562*4882a593Smuzhiyun break;
3563*4882a593Smuzhiyun
3564*4882a593Smuzhiyun case HCI_OP_LE_SET_DEFAULT_PHY:
3565*4882a593Smuzhiyun hci_cc_le_set_default_phy(hdev, skb);
3566*4882a593Smuzhiyun break;
3567*4882a593Smuzhiyun
3568*4882a593Smuzhiyun case HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS:
3569*4882a593Smuzhiyun hci_cc_le_read_num_adv_sets(hdev, skb);
3570*4882a593Smuzhiyun break;
3571*4882a593Smuzhiyun
3572*4882a593Smuzhiyun case HCI_OP_LE_SET_EXT_ADV_PARAMS:
3573*4882a593Smuzhiyun hci_cc_set_ext_adv_param(hdev, skb);
3574*4882a593Smuzhiyun break;
3575*4882a593Smuzhiyun
3576*4882a593Smuzhiyun case HCI_OP_LE_SET_EXT_ADV_ENABLE:
3577*4882a593Smuzhiyun hci_cc_le_set_ext_adv_enable(hdev, skb);
3578*4882a593Smuzhiyun break;
3579*4882a593Smuzhiyun
3580*4882a593Smuzhiyun case HCI_OP_LE_SET_ADV_SET_RAND_ADDR:
3581*4882a593Smuzhiyun hci_cc_le_set_adv_set_random_addr(hdev, skb);
3582*4882a593Smuzhiyun break;
3583*4882a593Smuzhiyun
3584*4882a593Smuzhiyun default:
3585*4882a593Smuzhiyun BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3586*4882a593Smuzhiyun break;
3587*4882a593Smuzhiyun }
3588*4882a593Smuzhiyun
3589*4882a593Smuzhiyun if (*opcode != HCI_OP_NOP)
3590*4882a593Smuzhiyun cancel_delayed_work(&hdev->cmd_timer);
3591*4882a593Smuzhiyun
3592*4882a593Smuzhiyun if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3593*4882a593Smuzhiyun atomic_set(&hdev->cmd_cnt, 1);
3594*4882a593Smuzhiyun
3595*4882a593Smuzhiyun hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3596*4882a593Smuzhiyun req_complete_skb);
3597*4882a593Smuzhiyun
3598*4882a593Smuzhiyun if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3599*4882a593Smuzhiyun bt_dev_err(hdev,
3600*4882a593Smuzhiyun "unexpected event for opcode 0x%4.4x", *opcode);
3601*4882a593Smuzhiyun return;
3602*4882a593Smuzhiyun }
3603*4882a593Smuzhiyun
3604*4882a593Smuzhiyun if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3605*4882a593Smuzhiyun queue_work(hdev->workqueue, &hdev->cmd_work);
3606*4882a593Smuzhiyun }
3607*4882a593Smuzhiyun
hci_cmd_status_evt(struct hci_dev * hdev,struct sk_buff * skb,u16 * opcode,u8 * status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)3608*4882a593Smuzhiyun static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3609*4882a593Smuzhiyun u16 *opcode, u8 *status,
3610*4882a593Smuzhiyun hci_req_complete_t *req_complete,
3611*4882a593Smuzhiyun hci_req_complete_skb_t *req_complete_skb)
3612*4882a593Smuzhiyun {
3613*4882a593Smuzhiyun struct hci_ev_cmd_status *ev = (void *) skb->data;
3614*4882a593Smuzhiyun
3615*4882a593Smuzhiyun skb_pull(skb, sizeof(*ev));
3616*4882a593Smuzhiyun
3617*4882a593Smuzhiyun *opcode = __le16_to_cpu(ev->opcode);
3618*4882a593Smuzhiyun *status = ev->status;
3619*4882a593Smuzhiyun
3620*4882a593Smuzhiyun switch (*opcode) {
3621*4882a593Smuzhiyun case HCI_OP_INQUIRY:
3622*4882a593Smuzhiyun hci_cs_inquiry(hdev, ev->status);
3623*4882a593Smuzhiyun break;
3624*4882a593Smuzhiyun
3625*4882a593Smuzhiyun case HCI_OP_CREATE_CONN:
3626*4882a593Smuzhiyun hci_cs_create_conn(hdev, ev->status);
3627*4882a593Smuzhiyun break;
3628*4882a593Smuzhiyun
3629*4882a593Smuzhiyun case HCI_OP_DISCONNECT:
3630*4882a593Smuzhiyun hci_cs_disconnect(hdev, ev->status);
3631*4882a593Smuzhiyun break;
3632*4882a593Smuzhiyun
3633*4882a593Smuzhiyun case HCI_OP_ADD_SCO:
3634*4882a593Smuzhiyun hci_cs_add_sco(hdev, ev->status);
3635*4882a593Smuzhiyun break;
3636*4882a593Smuzhiyun
3637*4882a593Smuzhiyun case HCI_OP_AUTH_REQUESTED:
3638*4882a593Smuzhiyun hci_cs_auth_requested(hdev, ev->status);
3639*4882a593Smuzhiyun break;
3640*4882a593Smuzhiyun
3641*4882a593Smuzhiyun case HCI_OP_SET_CONN_ENCRYPT:
3642*4882a593Smuzhiyun hci_cs_set_conn_encrypt(hdev, ev->status);
3643*4882a593Smuzhiyun break;
3644*4882a593Smuzhiyun
3645*4882a593Smuzhiyun case HCI_OP_REMOTE_NAME_REQ:
3646*4882a593Smuzhiyun hci_cs_remote_name_req(hdev, ev->status);
3647*4882a593Smuzhiyun break;
3648*4882a593Smuzhiyun
3649*4882a593Smuzhiyun case HCI_OP_READ_REMOTE_FEATURES:
3650*4882a593Smuzhiyun hci_cs_read_remote_features(hdev, ev->status);
3651*4882a593Smuzhiyun break;
3652*4882a593Smuzhiyun
3653*4882a593Smuzhiyun case HCI_OP_READ_REMOTE_EXT_FEATURES:
3654*4882a593Smuzhiyun hci_cs_read_remote_ext_features(hdev, ev->status);
3655*4882a593Smuzhiyun break;
3656*4882a593Smuzhiyun
3657*4882a593Smuzhiyun case HCI_OP_SETUP_SYNC_CONN:
3658*4882a593Smuzhiyun hci_cs_setup_sync_conn(hdev, ev->status);
3659*4882a593Smuzhiyun break;
3660*4882a593Smuzhiyun
3661*4882a593Smuzhiyun case HCI_OP_SNIFF_MODE:
3662*4882a593Smuzhiyun hci_cs_sniff_mode(hdev, ev->status);
3663*4882a593Smuzhiyun break;
3664*4882a593Smuzhiyun
3665*4882a593Smuzhiyun case HCI_OP_EXIT_SNIFF_MODE:
3666*4882a593Smuzhiyun hci_cs_exit_sniff_mode(hdev, ev->status);
3667*4882a593Smuzhiyun break;
3668*4882a593Smuzhiyun
3669*4882a593Smuzhiyun case HCI_OP_SWITCH_ROLE:
3670*4882a593Smuzhiyun hci_cs_switch_role(hdev, ev->status);
3671*4882a593Smuzhiyun break;
3672*4882a593Smuzhiyun
3673*4882a593Smuzhiyun case HCI_OP_LE_CREATE_CONN:
3674*4882a593Smuzhiyun hci_cs_le_create_conn(hdev, ev->status);
3675*4882a593Smuzhiyun break;
3676*4882a593Smuzhiyun
3677*4882a593Smuzhiyun case HCI_OP_LE_READ_REMOTE_FEATURES:
3678*4882a593Smuzhiyun hci_cs_le_read_remote_features(hdev, ev->status);
3679*4882a593Smuzhiyun break;
3680*4882a593Smuzhiyun
3681*4882a593Smuzhiyun case HCI_OP_LE_START_ENC:
3682*4882a593Smuzhiyun hci_cs_le_start_enc(hdev, ev->status);
3683*4882a593Smuzhiyun break;
3684*4882a593Smuzhiyun
3685*4882a593Smuzhiyun case HCI_OP_LE_EXT_CREATE_CONN:
3686*4882a593Smuzhiyun hci_cs_le_ext_create_conn(hdev, ev->status);
3687*4882a593Smuzhiyun break;
3688*4882a593Smuzhiyun
3689*4882a593Smuzhiyun default:
3690*4882a593Smuzhiyun BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3691*4882a593Smuzhiyun break;
3692*4882a593Smuzhiyun }
3693*4882a593Smuzhiyun
3694*4882a593Smuzhiyun if (*opcode != HCI_OP_NOP)
3695*4882a593Smuzhiyun cancel_delayed_work(&hdev->cmd_timer);
3696*4882a593Smuzhiyun
3697*4882a593Smuzhiyun if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3698*4882a593Smuzhiyun atomic_set(&hdev->cmd_cnt, 1);
3699*4882a593Smuzhiyun
3700*4882a593Smuzhiyun /* Indicate request completion if the command failed. Also, if
3701*4882a593Smuzhiyun * we're not waiting for a special event and we get a success
3702*4882a593Smuzhiyun * command status we should try to flag the request as completed
3703*4882a593Smuzhiyun * (since for this kind of commands there will not be a command
3704*4882a593Smuzhiyun * complete event).
3705*4882a593Smuzhiyun */
3706*4882a593Smuzhiyun if (ev->status ||
3707*4882a593Smuzhiyun (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event))
3708*4882a593Smuzhiyun hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
3709*4882a593Smuzhiyun req_complete_skb);
3710*4882a593Smuzhiyun
3711*4882a593Smuzhiyun if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3712*4882a593Smuzhiyun bt_dev_err(hdev,
3713*4882a593Smuzhiyun "unexpected event for opcode 0x%4.4x", *opcode);
3714*4882a593Smuzhiyun return;
3715*4882a593Smuzhiyun }
3716*4882a593Smuzhiyun
3717*4882a593Smuzhiyun if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3718*4882a593Smuzhiyun queue_work(hdev->workqueue, &hdev->cmd_work);
3719*4882a593Smuzhiyun }
3720*4882a593Smuzhiyun
hci_hardware_error_evt(struct hci_dev * hdev,struct sk_buff * skb)3721*4882a593Smuzhiyun static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3722*4882a593Smuzhiyun {
3723*4882a593Smuzhiyun struct hci_ev_hardware_error *ev = (void *) skb->data;
3724*4882a593Smuzhiyun
3725*4882a593Smuzhiyun hdev->hw_error_code = ev->code;
3726*4882a593Smuzhiyun
3727*4882a593Smuzhiyun queue_work(hdev->req_workqueue, &hdev->error_reset);
3728*4882a593Smuzhiyun }
3729*4882a593Smuzhiyun
hci_role_change_evt(struct hci_dev * hdev,struct sk_buff * skb)3730*4882a593Smuzhiyun static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3731*4882a593Smuzhiyun {
3732*4882a593Smuzhiyun struct hci_ev_role_change *ev = (void *) skb->data;
3733*4882a593Smuzhiyun struct hci_conn *conn;
3734*4882a593Smuzhiyun
3735*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3736*4882a593Smuzhiyun
3737*4882a593Smuzhiyun hci_dev_lock(hdev);
3738*4882a593Smuzhiyun
3739*4882a593Smuzhiyun conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3740*4882a593Smuzhiyun if (conn) {
3741*4882a593Smuzhiyun if (!ev->status)
3742*4882a593Smuzhiyun conn->role = ev->role;
3743*4882a593Smuzhiyun
3744*4882a593Smuzhiyun clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3745*4882a593Smuzhiyun
3746*4882a593Smuzhiyun hci_role_switch_cfm(conn, ev->status, ev->role);
3747*4882a593Smuzhiyun }
3748*4882a593Smuzhiyun
3749*4882a593Smuzhiyun hci_dev_unlock(hdev);
3750*4882a593Smuzhiyun }
3751*4882a593Smuzhiyun
hci_num_comp_pkts_evt(struct hci_dev * hdev,struct sk_buff * skb)3752*4882a593Smuzhiyun static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3753*4882a593Smuzhiyun {
3754*4882a593Smuzhiyun struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3755*4882a593Smuzhiyun int i;
3756*4882a593Smuzhiyun
3757*4882a593Smuzhiyun if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3758*4882a593Smuzhiyun bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3759*4882a593Smuzhiyun return;
3760*4882a593Smuzhiyun }
3761*4882a593Smuzhiyun
3762*4882a593Smuzhiyun if (skb->len < sizeof(*ev) ||
3763*4882a593Smuzhiyun skb->len < struct_size(ev, handles, ev->num_hndl)) {
3764*4882a593Smuzhiyun BT_DBG("%s bad parameters", hdev->name);
3765*4882a593Smuzhiyun return;
3766*4882a593Smuzhiyun }
3767*4882a593Smuzhiyun
3768*4882a593Smuzhiyun BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3769*4882a593Smuzhiyun
3770*4882a593Smuzhiyun for (i = 0; i < ev->num_hndl; i++) {
3771*4882a593Smuzhiyun struct hci_comp_pkts_info *info = &ev->handles[i];
3772*4882a593Smuzhiyun struct hci_conn *conn;
3773*4882a593Smuzhiyun __u16 handle, count;
3774*4882a593Smuzhiyun
3775*4882a593Smuzhiyun handle = __le16_to_cpu(info->handle);
3776*4882a593Smuzhiyun count = __le16_to_cpu(info->count);
3777*4882a593Smuzhiyun
3778*4882a593Smuzhiyun conn = hci_conn_hash_lookup_handle(hdev, handle);
3779*4882a593Smuzhiyun if (!conn)
3780*4882a593Smuzhiyun continue;
3781*4882a593Smuzhiyun
3782*4882a593Smuzhiyun conn->sent -= count;
3783*4882a593Smuzhiyun
3784*4882a593Smuzhiyun switch (conn->type) {
3785*4882a593Smuzhiyun case ACL_LINK:
3786*4882a593Smuzhiyun hdev->acl_cnt += count;
3787*4882a593Smuzhiyun if (hdev->acl_cnt > hdev->acl_pkts)
3788*4882a593Smuzhiyun hdev->acl_cnt = hdev->acl_pkts;
3789*4882a593Smuzhiyun break;
3790*4882a593Smuzhiyun
3791*4882a593Smuzhiyun case LE_LINK:
3792*4882a593Smuzhiyun if (hdev->le_pkts) {
3793*4882a593Smuzhiyun hdev->le_cnt += count;
3794*4882a593Smuzhiyun if (hdev->le_cnt > hdev->le_pkts)
3795*4882a593Smuzhiyun hdev->le_cnt = hdev->le_pkts;
3796*4882a593Smuzhiyun } else {
3797*4882a593Smuzhiyun hdev->acl_cnt += count;
3798*4882a593Smuzhiyun if (hdev->acl_cnt > hdev->acl_pkts)
3799*4882a593Smuzhiyun hdev->acl_cnt = hdev->acl_pkts;
3800*4882a593Smuzhiyun }
3801*4882a593Smuzhiyun break;
3802*4882a593Smuzhiyun
3803*4882a593Smuzhiyun case SCO_LINK:
3804*4882a593Smuzhiyun hdev->sco_cnt += count;
3805*4882a593Smuzhiyun if (hdev->sco_cnt > hdev->sco_pkts)
3806*4882a593Smuzhiyun hdev->sco_cnt = hdev->sco_pkts;
3807*4882a593Smuzhiyun break;
3808*4882a593Smuzhiyun
3809*4882a593Smuzhiyun default:
3810*4882a593Smuzhiyun bt_dev_err(hdev, "unknown type %d conn %p",
3811*4882a593Smuzhiyun conn->type, conn);
3812*4882a593Smuzhiyun break;
3813*4882a593Smuzhiyun }
3814*4882a593Smuzhiyun }
3815*4882a593Smuzhiyun
3816*4882a593Smuzhiyun queue_work(hdev->workqueue, &hdev->tx_work);
3817*4882a593Smuzhiyun }
3818*4882a593Smuzhiyun
__hci_conn_lookup_handle(struct hci_dev * hdev,__u16 handle)3819*4882a593Smuzhiyun static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3820*4882a593Smuzhiyun __u16 handle)
3821*4882a593Smuzhiyun {
3822*4882a593Smuzhiyun struct hci_chan *chan;
3823*4882a593Smuzhiyun
3824*4882a593Smuzhiyun switch (hdev->dev_type) {
3825*4882a593Smuzhiyun case HCI_PRIMARY:
3826*4882a593Smuzhiyun return hci_conn_hash_lookup_handle(hdev, handle);
3827*4882a593Smuzhiyun case HCI_AMP:
3828*4882a593Smuzhiyun chan = hci_chan_lookup_handle(hdev, handle);
3829*4882a593Smuzhiyun if (chan)
3830*4882a593Smuzhiyun return chan->conn;
3831*4882a593Smuzhiyun break;
3832*4882a593Smuzhiyun default:
3833*4882a593Smuzhiyun bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3834*4882a593Smuzhiyun break;
3835*4882a593Smuzhiyun }
3836*4882a593Smuzhiyun
3837*4882a593Smuzhiyun return NULL;
3838*4882a593Smuzhiyun }
3839*4882a593Smuzhiyun
hci_num_comp_blocks_evt(struct hci_dev * hdev,struct sk_buff * skb)3840*4882a593Smuzhiyun static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3841*4882a593Smuzhiyun {
3842*4882a593Smuzhiyun struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3843*4882a593Smuzhiyun int i;
3844*4882a593Smuzhiyun
3845*4882a593Smuzhiyun if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3846*4882a593Smuzhiyun bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3847*4882a593Smuzhiyun return;
3848*4882a593Smuzhiyun }
3849*4882a593Smuzhiyun
3850*4882a593Smuzhiyun if (skb->len < sizeof(*ev) ||
3851*4882a593Smuzhiyun skb->len < struct_size(ev, handles, ev->num_hndl)) {
3852*4882a593Smuzhiyun BT_DBG("%s bad parameters", hdev->name);
3853*4882a593Smuzhiyun return;
3854*4882a593Smuzhiyun }
3855*4882a593Smuzhiyun
3856*4882a593Smuzhiyun BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3857*4882a593Smuzhiyun ev->num_hndl);
3858*4882a593Smuzhiyun
3859*4882a593Smuzhiyun for (i = 0; i < ev->num_hndl; i++) {
3860*4882a593Smuzhiyun struct hci_comp_blocks_info *info = &ev->handles[i];
3861*4882a593Smuzhiyun struct hci_conn *conn = NULL;
3862*4882a593Smuzhiyun __u16 handle, block_count;
3863*4882a593Smuzhiyun
3864*4882a593Smuzhiyun handle = __le16_to_cpu(info->handle);
3865*4882a593Smuzhiyun block_count = __le16_to_cpu(info->blocks);
3866*4882a593Smuzhiyun
3867*4882a593Smuzhiyun conn = __hci_conn_lookup_handle(hdev, handle);
3868*4882a593Smuzhiyun if (!conn)
3869*4882a593Smuzhiyun continue;
3870*4882a593Smuzhiyun
3871*4882a593Smuzhiyun conn->sent -= block_count;
3872*4882a593Smuzhiyun
3873*4882a593Smuzhiyun switch (conn->type) {
3874*4882a593Smuzhiyun case ACL_LINK:
3875*4882a593Smuzhiyun case AMP_LINK:
3876*4882a593Smuzhiyun hdev->block_cnt += block_count;
3877*4882a593Smuzhiyun if (hdev->block_cnt > hdev->num_blocks)
3878*4882a593Smuzhiyun hdev->block_cnt = hdev->num_blocks;
3879*4882a593Smuzhiyun break;
3880*4882a593Smuzhiyun
3881*4882a593Smuzhiyun default:
3882*4882a593Smuzhiyun bt_dev_err(hdev, "unknown type %d conn %p",
3883*4882a593Smuzhiyun conn->type, conn);
3884*4882a593Smuzhiyun break;
3885*4882a593Smuzhiyun }
3886*4882a593Smuzhiyun }
3887*4882a593Smuzhiyun
3888*4882a593Smuzhiyun queue_work(hdev->workqueue, &hdev->tx_work);
3889*4882a593Smuzhiyun }
3890*4882a593Smuzhiyun
hci_mode_change_evt(struct hci_dev * hdev,struct sk_buff * skb)3891*4882a593Smuzhiyun static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3892*4882a593Smuzhiyun {
3893*4882a593Smuzhiyun struct hci_ev_mode_change *ev = (void *) skb->data;
3894*4882a593Smuzhiyun struct hci_conn *conn;
3895*4882a593Smuzhiyun
3896*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3897*4882a593Smuzhiyun
3898*4882a593Smuzhiyun hci_dev_lock(hdev);
3899*4882a593Smuzhiyun
3900*4882a593Smuzhiyun conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3901*4882a593Smuzhiyun if (conn) {
3902*4882a593Smuzhiyun conn->mode = ev->mode;
3903*4882a593Smuzhiyun
3904*4882a593Smuzhiyun if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3905*4882a593Smuzhiyun &conn->flags)) {
3906*4882a593Smuzhiyun if (conn->mode == HCI_CM_ACTIVE)
3907*4882a593Smuzhiyun set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3908*4882a593Smuzhiyun else
3909*4882a593Smuzhiyun clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3910*4882a593Smuzhiyun }
3911*4882a593Smuzhiyun
3912*4882a593Smuzhiyun if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3913*4882a593Smuzhiyun hci_sco_setup(conn, ev->status);
3914*4882a593Smuzhiyun }
3915*4882a593Smuzhiyun
3916*4882a593Smuzhiyun hci_dev_unlock(hdev);
3917*4882a593Smuzhiyun }
3918*4882a593Smuzhiyun
hci_pin_code_request_evt(struct hci_dev * hdev,struct sk_buff * skb)3919*4882a593Smuzhiyun static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3920*4882a593Smuzhiyun {
3921*4882a593Smuzhiyun struct hci_ev_pin_code_req *ev = (void *) skb->data;
3922*4882a593Smuzhiyun struct hci_conn *conn;
3923*4882a593Smuzhiyun
3924*4882a593Smuzhiyun BT_DBG("%s", hdev->name);
3925*4882a593Smuzhiyun
3926*4882a593Smuzhiyun hci_dev_lock(hdev);
3927*4882a593Smuzhiyun
3928*4882a593Smuzhiyun conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3929*4882a593Smuzhiyun if (!conn)
3930*4882a593Smuzhiyun goto unlock;
3931*4882a593Smuzhiyun
3932*4882a593Smuzhiyun if (conn->state == BT_CONNECTED) {
3933*4882a593Smuzhiyun hci_conn_hold(conn);
3934*4882a593Smuzhiyun conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3935*4882a593Smuzhiyun hci_conn_drop(conn);
3936*4882a593Smuzhiyun }
3937*4882a593Smuzhiyun
3938*4882a593Smuzhiyun if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
3939*4882a593Smuzhiyun !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3940*4882a593Smuzhiyun hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3941*4882a593Smuzhiyun sizeof(ev->bdaddr), &ev->bdaddr);
3942*4882a593Smuzhiyun } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
3943*4882a593Smuzhiyun u8 secure;
3944*4882a593Smuzhiyun
3945*4882a593Smuzhiyun if (conn->pending_sec_level == BT_SECURITY_HIGH)
3946*4882a593Smuzhiyun secure = 1;
3947*4882a593Smuzhiyun else
3948*4882a593Smuzhiyun secure = 0;
3949*4882a593Smuzhiyun
3950*4882a593Smuzhiyun mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3951*4882a593Smuzhiyun }
3952*4882a593Smuzhiyun
3953*4882a593Smuzhiyun unlock:
3954*4882a593Smuzhiyun hci_dev_unlock(hdev);
3955*4882a593Smuzhiyun }
3956*4882a593Smuzhiyun
conn_set_key(struct hci_conn * conn,u8 key_type,u8 pin_len)3957*4882a593Smuzhiyun static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
3958*4882a593Smuzhiyun {
3959*4882a593Smuzhiyun if (key_type == HCI_LK_CHANGED_COMBINATION)
3960*4882a593Smuzhiyun return;
3961*4882a593Smuzhiyun
3962*4882a593Smuzhiyun conn->pin_length = pin_len;
3963*4882a593Smuzhiyun conn->key_type = key_type;
3964*4882a593Smuzhiyun
3965*4882a593Smuzhiyun switch (key_type) {
3966*4882a593Smuzhiyun case HCI_LK_LOCAL_UNIT:
3967*4882a593Smuzhiyun case HCI_LK_REMOTE_UNIT:
3968*4882a593Smuzhiyun case HCI_LK_DEBUG_COMBINATION:
3969*4882a593Smuzhiyun return;
3970*4882a593Smuzhiyun case HCI_LK_COMBINATION:
3971*4882a593Smuzhiyun if (pin_len == 16)
3972*4882a593Smuzhiyun conn->pending_sec_level = BT_SECURITY_HIGH;
3973*4882a593Smuzhiyun else
3974*4882a593Smuzhiyun conn->pending_sec_level = BT_SECURITY_MEDIUM;
3975*4882a593Smuzhiyun break;
3976*4882a593Smuzhiyun case HCI_LK_UNAUTH_COMBINATION_P192:
3977*4882a593Smuzhiyun case HCI_LK_UNAUTH_COMBINATION_P256:
3978*4882a593Smuzhiyun conn->pending_sec_level = BT_SECURITY_MEDIUM;
3979*4882a593Smuzhiyun break;
3980*4882a593Smuzhiyun case HCI_LK_AUTH_COMBINATION_P192:
3981*4882a593Smuzhiyun conn->pending_sec_level = BT_SECURITY_HIGH;
3982*4882a593Smuzhiyun break;
3983*4882a593Smuzhiyun case HCI_LK_AUTH_COMBINATION_P256:
3984*4882a593Smuzhiyun conn->pending_sec_level = BT_SECURITY_FIPS;
3985*4882a593Smuzhiyun break;
3986*4882a593Smuzhiyun }
3987*4882a593Smuzhiyun }
3988*4882a593Smuzhiyun
hci_link_key_request_evt(struct hci_dev * hdev,struct sk_buff * skb)3989*4882a593Smuzhiyun static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3990*4882a593Smuzhiyun {
3991*4882a593Smuzhiyun struct hci_ev_link_key_req *ev = (void *) skb->data;
3992*4882a593Smuzhiyun struct hci_cp_link_key_reply cp;
3993*4882a593Smuzhiyun struct hci_conn *conn;
3994*4882a593Smuzhiyun struct link_key *key;
3995*4882a593Smuzhiyun
3996*4882a593Smuzhiyun BT_DBG("%s", hdev->name);
3997*4882a593Smuzhiyun
3998*4882a593Smuzhiyun if (!hci_dev_test_flag(hdev, HCI_MGMT))
3999*4882a593Smuzhiyun return;
4000*4882a593Smuzhiyun
4001*4882a593Smuzhiyun hci_dev_lock(hdev);
4002*4882a593Smuzhiyun
4003*4882a593Smuzhiyun key = hci_find_link_key(hdev, &ev->bdaddr);
4004*4882a593Smuzhiyun if (!key) {
4005*4882a593Smuzhiyun BT_DBG("%s link key not found for %pMR", hdev->name,
4006*4882a593Smuzhiyun &ev->bdaddr);
4007*4882a593Smuzhiyun goto not_found;
4008*4882a593Smuzhiyun }
4009*4882a593Smuzhiyun
4010*4882a593Smuzhiyun BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
4011*4882a593Smuzhiyun &ev->bdaddr);
4012*4882a593Smuzhiyun
4013*4882a593Smuzhiyun conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4014*4882a593Smuzhiyun if (conn) {
4015*4882a593Smuzhiyun clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4016*4882a593Smuzhiyun
4017*4882a593Smuzhiyun if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4018*4882a593Smuzhiyun key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4019*4882a593Smuzhiyun conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4020*4882a593Smuzhiyun BT_DBG("%s ignoring unauthenticated key", hdev->name);
4021*4882a593Smuzhiyun goto not_found;
4022*4882a593Smuzhiyun }
4023*4882a593Smuzhiyun
4024*4882a593Smuzhiyun if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4025*4882a593Smuzhiyun (conn->pending_sec_level == BT_SECURITY_HIGH ||
4026*4882a593Smuzhiyun conn->pending_sec_level == BT_SECURITY_FIPS)) {
4027*4882a593Smuzhiyun BT_DBG("%s ignoring key unauthenticated for high security",
4028*4882a593Smuzhiyun hdev->name);
4029*4882a593Smuzhiyun goto not_found;
4030*4882a593Smuzhiyun }
4031*4882a593Smuzhiyun
4032*4882a593Smuzhiyun conn_set_key(conn, key->type, key->pin_len);
4033*4882a593Smuzhiyun }
4034*4882a593Smuzhiyun
4035*4882a593Smuzhiyun bacpy(&cp.bdaddr, &ev->bdaddr);
4036*4882a593Smuzhiyun memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4037*4882a593Smuzhiyun
4038*4882a593Smuzhiyun hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4039*4882a593Smuzhiyun
4040*4882a593Smuzhiyun hci_dev_unlock(hdev);
4041*4882a593Smuzhiyun
4042*4882a593Smuzhiyun return;
4043*4882a593Smuzhiyun
4044*4882a593Smuzhiyun not_found:
4045*4882a593Smuzhiyun hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4046*4882a593Smuzhiyun hci_dev_unlock(hdev);
4047*4882a593Smuzhiyun }
4048*4882a593Smuzhiyun
hci_link_key_notify_evt(struct hci_dev * hdev,struct sk_buff * skb)4049*4882a593Smuzhiyun static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4050*4882a593Smuzhiyun {
4051*4882a593Smuzhiyun struct hci_ev_link_key_notify *ev = (void *) skb->data;
4052*4882a593Smuzhiyun struct hci_conn *conn;
4053*4882a593Smuzhiyun struct link_key *key;
4054*4882a593Smuzhiyun bool persistent;
4055*4882a593Smuzhiyun u8 pin_len = 0;
4056*4882a593Smuzhiyun
4057*4882a593Smuzhiyun BT_DBG("%s", hdev->name);
4058*4882a593Smuzhiyun
4059*4882a593Smuzhiyun hci_dev_lock(hdev);
4060*4882a593Smuzhiyun
4061*4882a593Smuzhiyun conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4062*4882a593Smuzhiyun if (!conn)
4063*4882a593Smuzhiyun goto unlock;
4064*4882a593Smuzhiyun
4065*4882a593Smuzhiyun hci_conn_hold(conn);
4066*4882a593Smuzhiyun conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4067*4882a593Smuzhiyun hci_conn_drop(conn);
4068*4882a593Smuzhiyun
4069*4882a593Smuzhiyun set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4070*4882a593Smuzhiyun conn_set_key(conn, ev->key_type, conn->pin_length);
4071*4882a593Smuzhiyun
4072*4882a593Smuzhiyun if (!hci_dev_test_flag(hdev, HCI_MGMT))
4073*4882a593Smuzhiyun goto unlock;
4074*4882a593Smuzhiyun
4075*4882a593Smuzhiyun key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4076*4882a593Smuzhiyun ev->key_type, pin_len, &persistent);
4077*4882a593Smuzhiyun if (!key)
4078*4882a593Smuzhiyun goto unlock;
4079*4882a593Smuzhiyun
4080*4882a593Smuzhiyun /* Update connection information since adding the key will have
4081*4882a593Smuzhiyun * fixed up the type in the case of changed combination keys.
4082*4882a593Smuzhiyun */
4083*4882a593Smuzhiyun if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4084*4882a593Smuzhiyun conn_set_key(conn, key->type, key->pin_len);
4085*4882a593Smuzhiyun
4086*4882a593Smuzhiyun mgmt_new_link_key(hdev, key, persistent);
4087*4882a593Smuzhiyun
4088*4882a593Smuzhiyun /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4089*4882a593Smuzhiyun * is set. If it's not set simply remove the key from the kernel
4090*4882a593Smuzhiyun * list (we've still notified user space about it but with
4091*4882a593Smuzhiyun * store_hint being 0).
4092*4882a593Smuzhiyun */
4093*4882a593Smuzhiyun if (key->type == HCI_LK_DEBUG_COMBINATION &&
4094*4882a593Smuzhiyun !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4095*4882a593Smuzhiyun list_del_rcu(&key->list);
4096*4882a593Smuzhiyun kfree_rcu(key, rcu);
4097*4882a593Smuzhiyun goto unlock;
4098*4882a593Smuzhiyun }
4099*4882a593Smuzhiyun
4100*4882a593Smuzhiyun if (persistent)
4101*4882a593Smuzhiyun clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4102*4882a593Smuzhiyun else
4103*4882a593Smuzhiyun set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4104*4882a593Smuzhiyun
4105*4882a593Smuzhiyun unlock:
4106*4882a593Smuzhiyun hci_dev_unlock(hdev);
4107*4882a593Smuzhiyun }
4108*4882a593Smuzhiyun
hci_clock_offset_evt(struct hci_dev * hdev,struct sk_buff * skb)4109*4882a593Smuzhiyun static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
4110*4882a593Smuzhiyun {
4111*4882a593Smuzhiyun struct hci_ev_clock_offset *ev = (void *) skb->data;
4112*4882a593Smuzhiyun struct hci_conn *conn;
4113*4882a593Smuzhiyun
4114*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4115*4882a593Smuzhiyun
4116*4882a593Smuzhiyun hci_dev_lock(hdev);
4117*4882a593Smuzhiyun
4118*4882a593Smuzhiyun conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4119*4882a593Smuzhiyun if (conn && !ev->status) {
4120*4882a593Smuzhiyun struct inquiry_entry *ie;
4121*4882a593Smuzhiyun
4122*4882a593Smuzhiyun ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4123*4882a593Smuzhiyun if (ie) {
4124*4882a593Smuzhiyun ie->data.clock_offset = ev->clock_offset;
4125*4882a593Smuzhiyun ie->timestamp = jiffies;
4126*4882a593Smuzhiyun }
4127*4882a593Smuzhiyun }
4128*4882a593Smuzhiyun
4129*4882a593Smuzhiyun hci_dev_unlock(hdev);
4130*4882a593Smuzhiyun }
4131*4882a593Smuzhiyun
hci_pkt_type_change_evt(struct hci_dev * hdev,struct sk_buff * skb)4132*4882a593Smuzhiyun static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
4133*4882a593Smuzhiyun {
4134*4882a593Smuzhiyun struct hci_ev_pkt_type_change *ev = (void *) skb->data;
4135*4882a593Smuzhiyun struct hci_conn *conn;
4136*4882a593Smuzhiyun
4137*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4138*4882a593Smuzhiyun
4139*4882a593Smuzhiyun hci_dev_lock(hdev);
4140*4882a593Smuzhiyun
4141*4882a593Smuzhiyun conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4142*4882a593Smuzhiyun if (conn && !ev->status)
4143*4882a593Smuzhiyun conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4144*4882a593Smuzhiyun
4145*4882a593Smuzhiyun hci_dev_unlock(hdev);
4146*4882a593Smuzhiyun }
4147*4882a593Smuzhiyun
hci_pscan_rep_mode_evt(struct hci_dev * hdev,struct sk_buff * skb)4148*4882a593Smuzhiyun static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
4149*4882a593Smuzhiyun {
4150*4882a593Smuzhiyun struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
4151*4882a593Smuzhiyun struct inquiry_entry *ie;
4152*4882a593Smuzhiyun
4153*4882a593Smuzhiyun BT_DBG("%s", hdev->name);
4154*4882a593Smuzhiyun
4155*4882a593Smuzhiyun hci_dev_lock(hdev);
4156*4882a593Smuzhiyun
4157*4882a593Smuzhiyun ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4158*4882a593Smuzhiyun if (ie) {
4159*4882a593Smuzhiyun ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4160*4882a593Smuzhiyun ie->timestamp = jiffies;
4161*4882a593Smuzhiyun }
4162*4882a593Smuzhiyun
4163*4882a593Smuzhiyun hci_dev_unlock(hdev);
4164*4882a593Smuzhiyun }
4165*4882a593Smuzhiyun
hci_inquiry_result_with_rssi_evt(struct hci_dev * hdev,struct sk_buff * skb)4166*4882a593Smuzhiyun static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
4167*4882a593Smuzhiyun struct sk_buff *skb)
4168*4882a593Smuzhiyun {
4169*4882a593Smuzhiyun struct inquiry_data data;
4170*4882a593Smuzhiyun int num_rsp = *((__u8 *) skb->data);
4171*4882a593Smuzhiyun
4172*4882a593Smuzhiyun BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4173*4882a593Smuzhiyun
4174*4882a593Smuzhiyun if (!num_rsp)
4175*4882a593Smuzhiyun return;
4176*4882a593Smuzhiyun
4177*4882a593Smuzhiyun if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4178*4882a593Smuzhiyun return;
4179*4882a593Smuzhiyun
4180*4882a593Smuzhiyun hci_dev_lock(hdev);
4181*4882a593Smuzhiyun
4182*4882a593Smuzhiyun if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
4183*4882a593Smuzhiyun struct inquiry_info_with_rssi_and_pscan_mode *info;
4184*4882a593Smuzhiyun info = (void *) (skb->data + 1);
4185*4882a593Smuzhiyun
4186*4882a593Smuzhiyun if (skb->len < num_rsp * sizeof(*info) + 1)
4187*4882a593Smuzhiyun goto unlock;
4188*4882a593Smuzhiyun
4189*4882a593Smuzhiyun for (; num_rsp; num_rsp--, info++) {
4190*4882a593Smuzhiyun u32 flags;
4191*4882a593Smuzhiyun
4192*4882a593Smuzhiyun bacpy(&data.bdaddr, &info->bdaddr);
4193*4882a593Smuzhiyun data.pscan_rep_mode = info->pscan_rep_mode;
4194*4882a593Smuzhiyun data.pscan_period_mode = info->pscan_period_mode;
4195*4882a593Smuzhiyun data.pscan_mode = info->pscan_mode;
4196*4882a593Smuzhiyun memcpy(data.dev_class, info->dev_class, 3);
4197*4882a593Smuzhiyun data.clock_offset = info->clock_offset;
4198*4882a593Smuzhiyun data.rssi = info->rssi;
4199*4882a593Smuzhiyun data.ssp_mode = 0x00;
4200*4882a593Smuzhiyun
4201*4882a593Smuzhiyun flags = hci_inquiry_cache_update(hdev, &data, false);
4202*4882a593Smuzhiyun
4203*4882a593Smuzhiyun mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4204*4882a593Smuzhiyun info->dev_class, info->rssi,
4205*4882a593Smuzhiyun flags, NULL, 0, NULL, 0);
4206*4882a593Smuzhiyun }
4207*4882a593Smuzhiyun } else {
4208*4882a593Smuzhiyun struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
4209*4882a593Smuzhiyun
4210*4882a593Smuzhiyun if (skb->len < num_rsp * sizeof(*info) + 1)
4211*4882a593Smuzhiyun goto unlock;
4212*4882a593Smuzhiyun
4213*4882a593Smuzhiyun for (; num_rsp; num_rsp--, info++) {
4214*4882a593Smuzhiyun u32 flags;
4215*4882a593Smuzhiyun
4216*4882a593Smuzhiyun bacpy(&data.bdaddr, &info->bdaddr);
4217*4882a593Smuzhiyun data.pscan_rep_mode = info->pscan_rep_mode;
4218*4882a593Smuzhiyun data.pscan_period_mode = info->pscan_period_mode;
4219*4882a593Smuzhiyun data.pscan_mode = 0x00;
4220*4882a593Smuzhiyun memcpy(data.dev_class, info->dev_class, 3);
4221*4882a593Smuzhiyun data.clock_offset = info->clock_offset;
4222*4882a593Smuzhiyun data.rssi = info->rssi;
4223*4882a593Smuzhiyun data.ssp_mode = 0x00;
4224*4882a593Smuzhiyun
4225*4882a593Smuzhiyun flags = hci_inquiry_cache_update(hdev, &data, false);
4226*4882a593Smuzhiyun
4227*4882a593Smuzhiyun mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4228*4882a593Smuzhiyun info->dev_class, info->rssi,
4229*4882a593Smuzhiyun flags, NULL, 0, NULL, 0);
4230*4882a593Smuzhiyun }
4231*4882a593Smuzhiyun }
4232*4882a593Smuzhiyun
4233*4882a593Smuzhiyun unlock:
4234*4882a593Smuzhiyun hci_dev_unlock(hdev);
4235*4882a593Smuzhiyun }
4236*4882a593Smuzhiyun
hci_remote_ext_features_evt(struct hci_dev * hdev,struct sk_buff * skb)4237*4882a593Smuzhiyun static void hci_remote_ext_features_evt(struct hci_dev *hdev,
4238*4882a593Smuzhiyun struct sk_buff *skb)
4239*4882a593Smuzhiyun {
4240*4882a593Smuzhiyun struct hci_ev_remote_ext_features *ev = (void *) skb->data;
4241*4882a593Smuzhiyun struct hci_conn *conn;
4242*4882a593Smuzhiyun
4243*4882a593Smuzhiyun BT_DBG("%s", hdev->name);
4244*4882a593Smuzhiyun
4245*4882a593Smuzhiyun hci_dev_lock(hdev);
4246*4882a593Smuzhiyun
4247*4882a593Smuzhiyun conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4248*4882a593Smuzhiyun if (!conn)
4249*4882a593Smuzhiyun goto unlock;
4250*4882a593Smuzhiyun
4251*4882a593Smuzhiyun if (ev->page < HCI_MAX_PAGES)
4252*4882a593Smuzhiyun memcpy(conn->features[ev->page], ev->features, 8);
4253*4882a593Smuzhiyun
4254*4882a593Smuzhiyun if (!ev->status && ev->page == 0x01) {
4255*4882a593Smuzhiyun struct inquiry_entry *ie;
4256*4882a593Smuzhiyun
4257*4882a593Smuzhiyun ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4258*4882a593Smuzhiyun if (ie)
4259*4882a593Smuzhiyun ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4260*4882a593Smuzhiyun
4261*4882a593Smuzhiyun if (ev->features[0] & LMP_HOST_SSP) {
4262*4882a593Smuzhiyun set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4263*4882a593Smuzhiyun } else {
4264*4882a593Smuzhiyun /* It is mandatory by the Bluetooth specification that
4265*4882a593Smuzhiyun * Extended Inquiry Results are only used when Secure
4266*4882a593Smuzhiyun * Simple Pairing is enabled, but some devices violate
4267*4882a593Smuzhiyun * this.
4268*4882a593Smuzhiyun *
4269*4882a593Smuzhiyun * To make these devices work, the internal SSP
4270*4882a593Smuzhiyun * enabled flag needs to be cleared if the remote host
4271*4882a593Smuzhiyun * features do not indicate SSP support */
4272*4882a593Smuzhiyun clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4273*4882a593Smuzhiyun }
4274*4882a593Smuzhiyun
4275*4882a593Smuzhiyun if (ev->features[0] & LMP_HOST_SC)
4276*4882a593Smuzhiyun set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4277*4882a593Smuzhiyun }
4278*4882a593Smuzhiyun
4279*4882a593Smuzhiyun if (conn->state != BT_CONFIG)
4280*4882a593Smuzhiyun goto unlock;
4281*4882a593Smuzhiyun
4282*4882a593Smuzhiyun if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4283*4882a593Smuzhiyun struct hci_cp_remote_name_req cp;
4284*4882a593Smuzhiyun memset(&cp, 0, sizeof(cp));
4285*4882a593Smuzhiyun bacpy(&cp.bdaddr, &conn->dst);
4286*4882a593Smuzhiyun cp.pscan_rep_mode = 0x02;
4287*4882a593Smuzhiyun hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4288*4882a593Smuzhiyun } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4289*4882a593Smuzhiyun mgmt_device_connected(hdev, conn, 0, NULL, 0);
4290*4882a593Smuzhiyun
4291*4882a593Smuzhiyun if (!hci_outgoing_auth_needed(hdev, conn)) {
4292*4882a593Smuzhiyun conn->state = BT_CONNECTED;
4293*4882a593Smuzhiyun hci_connect_cfm(conn, ev->status);
4294*4882a593Smuzhiyun hci_conn_drop(conn);
4295*4882a593Smuzhiyun }
4296*4882a593Smuzhiyun
4297*4882a593Smuzhiyun unlock:
4298*4882a593Smuzhiyun hci_dev_unlock(hdev);
4299*4882a593Smuzhiyun }
4300*4882a593Smuzhiyun
hci_sync_conn_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)4301*4882a593Smuzhiyun static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
4302*4882a593Smuzhiyun struct sk_buff *skb)
4303*4882a593Smuzhiyun {
4304*4882a593Smuzhiyun struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
4305*4882a593Smuzhiyun struct hci_conn *conn;
4306*4882a593Smuzhiyun
4307*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4308*4882a593Smuzhiyun
4309*4882a593Smuzhiyun hci_dev_lock(hdev);
4310*4882a593Smuzhiyun
4311*4882a593Smuzhiyun conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
4312*4882a593Smuzhiyun if (!conn) {
4313*4882a593Smuzhiyun if (ev->link_type == ESCO_LINK)
4314*4882a593Smuzhiyun goto unlock;
4315*4882a593Smuzhiyun
4316*4882a593Smuzhiyun /* When the link type in the event indicates SCO connection
4317*4882a593Smuzhiyun * and lookup of the connection object fails, then check
4318*4882a593Smuzhiyun * if an eSCO connection object exists.
4319*4882a593Smuzhiyun *
4320*4882a593Smuzhiyun * The core limits the synchronous connections to either
4321*4882a593Smuzhiyun * SCO or eSCO. The eSCO connection is preferred and tried
4322*4882a593Smuzhiyun * to be setup first and until successfully established,
4323*4882a593Smuzhiyun * the link type will be hinted as eSCO.
4324*4882a593Smuzhiyun */
4325*4882a593Smuzhiyun conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
4326*4882a593Smuzhiyun if (!conn)
4327*4882a593Smuzhiyun goto unlock;
4328*4882a593Smuzhiyun }
4329*4882a593Smuzhiyun
4330*4882a593Smuzhiyun switch (ev->status) {
4331*4882a593Smuzhiyun case 0x00:
4332*4882a593Smuzhiyun /* The synchronous connection complete event should only be
4333*4882a593Smuzhiyun * sent once per new connection. Receiving a successful
4334*4882a593Smuzhiyun * complete event when the connection status is already
4335*4882a593Smuzhiyun * BT_CONNECTED means that the device is misbehaving and sent
4336*4882a593Smuzhiyun * multiple complete event packets for the same new connection.
4337*4882a593Smuzhiyun *
4338*4882a593Smuzhiyun * Registering the device more than once can corrupt kernel
4339*4882a593Smuzhiyun * memory, hence upon detecting this invalid event, we report
4340*4882a593Smuzhiyun * an error and ignore the packet.
4341*4882a593Smuzhiyun */
4342*4882a593Smuzhiyun if (conn->state == BT_CONNECTED) {
4343*4882a593Smuzhiyun bt_dev_err(hdev, "Ignoring connect complete event for existing connection");
4344*4882a593Smuzhiyun goto unlock;
4345*4882a593Smuzhiyun }
4346*4882a593Smuzhiyun
4347*4882a593Smuzhiyun conn->handle = __le16_to_cpu(ev->handle);
4348*4882a593Smuzhiyun conn->state = BT_CONNECTED;
4349*4882a593Smuzhiyun conn->type = ev->link_type;
4350*4882a593Smuzhiyun
4351*4882a593Smuzhiyun hci_debugfs_create_conn(conn);
4352*4882a593Smuzhiyun hci_conn_add_sysfs(conn);
4353*4882a593Smuzhiyun break;
4354*4882a593Smuzhiyun
4355*4882a593Smuzhiyun case 0x10: /* Connection Accept Timeout */
4356*4882a593Smuzhiyun case 0x0d: /* Connection Rejected due to Limited Resources */
4357*4882a593Smuzhiyun case 0x11: /* Unsupported Feature or Parameter Value */
4358*4882a593Smuzhiyun case 0x1c: /* SCO interval rejected */
4359*4882a593Smuzhiyun case 0x1a: /* Unsupported Remote Feature */
4360*4882a593Smuzhiyun case 0x1e: /* Invalid LMP Parameters */
4361*4882a593Smuzhiyun case 0x1f: /* Unspecified error */
4362*4882a593Smuzhiyun case 0x20: /* Unsupported LMP Parameter value */
4363*4882a593Smuzhiyun if (conn->out) {
4364*4882a593Smuzhiyun conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
4365*4882a593Smuzhiyun (hdev->esco_type & EDR_ESCO_MASK);
4366*4882a593Smuzhiyun if (hci_setup_sync(conn, conn->link->handle))
4367*4882a593Smuzhiyun goto unlock;
4368*4882a593Smuzhiyun }
4369*4882a593Smuzhiyun fallthrough;
4370*4882a593Smuzhiyun
4371*4882a593Smuzhiyun default:
4372*4882a593Smuzhiyun conn->state = BT_CLOSED;
4373*4882a593Smuzhiyun break;
4374*4882a593Smuzhiyun }
4375*4882a593Smuzhiyun
4376*4882a593Smuzhiyun bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
4377*4882a593Smuzhiyun
4378*4882a593Smuzhiyun switch (ev->air_mode) {
4379*4882a593Smuzhiyun case 0x02:
4380*4882a593Smuzhiyun if (hdev->notify)
4381*4882a593Smuzhiyun hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
4382*4882a593Smuzhiyun break;
4383*4882a593Smuzhiyun case 0x03:
4384*4882a593Smuzhiyun if (hdev->notify)
4385*4882a593Smuzhiyun hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
4386*4882a593Smuzhiyun break;
4387*4882a593Smuzhiyun }
4388*4882a593Smuzhiyun
4389*4882a593Smuzhiyun hci_connect_cfm(conn, ev->status);
4390*4882a593Smuzhiyun if (ev->status)
4391*4882a593Smuzhiyun hci_conn_del(conn);
4392*4882a593Smuzhiyun
4393*4882a593Smuzhiyun unlock:
4394*4882a593Smuzhiyun hci_dev_unlock(hdev);
4395*4882a593Smuzhiyun }
4396*4882a593Smuzhiyun
eir_get_length(u8 * eir,size_t eir_len)4397*4882a593Smuzhiyun static inline size_t eir_get_length(u8 *eir, size_t eir_len)
4398*4882a593Smuzhiyun {
4399*4882a593Smuzhiyun size_t parsed = 0;
4400*4882a593Smuzhiyun
4401*4882a593Smuzhiyun while (parsed < eir_len) {
4402*4882a593Smuzhiyun u8 field_len = eir[0];
4403*4882a593Smuzhiyun
4404*4882a593Smuzhiyun if (field_len == 0)
4405*4882a593Smuzhiyun return parsed;
4406*4882a593Smuzhiyun
4407*4882a593Smuzhiyun parsed += field_len + 1;
4408*4882a593Smuzhiyun eir += field_len + 1;
4409*4882a593Smuzhiyun }
4410*4882a593Smuzhiyun
4411*4882a593Smuzhiyun return eir_len;
4412*4882a593Smuzhiyun }
4413*4882a593Smuzhiyun
hci_extended_inquiry_result_evt(struct hci_dev * hdev,struct sk_buff * skb)4414*4882a593Smuzhiyun static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
4415*4882a593Smuzhiyun struct sk_buff *skb)
4416*4882a593Smuzhiyun {
4417*4882a593Smuzhiyun struct inquiry_data data;
4418*4882a593Smuzhiyun struct extended_inquiry_info *info = (void *) (skb->data + 1);
4419*4882a593Smuzhiyun int num_rsp = *((__u8 *) skb->data);
4420*4882a593Smuzhiyun size_t eir_len;
4421*4882a593Smuzhiyun
4422*4882a593Smuzhiyun BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4423*4882a593Smuzhiyun
4424*4882a593Smuzhiyun if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
4425*4882a593Smuzhiyun return;
4426*4882a593Smuzhiyun
4427*4882a593Smuzhiyun if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4428*4882a593Smuzhiyun return;
4429*4882a593Smuzhiyun
4430*4882a593Smuzhiyun hci_dev_lock(hdev);
4431*4882a593Smuzhiyun
4432*4882a593Smuzhiyun for (; num_rsp; num_rsp--, info++) {
4433*4882a593Smuzhiyun u32 flags;
4434*4882a593Smuzhiyun bool name_known;
4435*4882a593Smuzhiyun
4436*4882a593Smuzhiyun bacpy(&data.bdaddr, &info->bdaddr);
4437*4882a593Smuzhiyun data.pscan_rep_mode = info->pscan_rep_mode;
4438*4882a593Smuzhiyun data.pscan_period_mode = info->pscan_period_mode;
4439*4882a593Smuzhiyun data.pscan_mode = 0x00;
4440*4882a593Smuzhiyun memcpy(data.dev_class, info->dev_class, 3);
4441*4882a593Smuzhiyun data.clock_offset = info->clock_offset;
4442*4882a593Smuzhiyun data.rssi = info->rssi;
4443*4882a593Smuzhiyun data.ssp_mode = 0x01;
4444*4882a593Smuzhiyun
4445*4882a593Smuzhiyun if (hci_dev_test_flag(hdev, HCI_MGMT))
4446*4882a593Smuzhiyun name_known = eir_get_data(info->data,
4447*4882a593Smuzhiyun sizeof(info->data),
4448*4882a593Smuzhiyun EIR_NAME_COMPLETE, NULL);
4449*4882a593Smuzhiyun else
4450*4882a593Smuzhiyun name_known = true;
4451*4882a593Smuzhiyun
4452*4882a593Smuzhiyun flags = hci_inquiry_cache_update(hdev, &data, name_known);
4453*4882a593Smuzhiyun
4454*4882a593Smuzhiyun eir_len = eir_get_length(info->data, sizeof(info->data));
4455*4882a593Smuzhiyun
4456*4882a593Smuzhiyun mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4457*4882a593Smuzhiyun info->dev_class, info->rssi,
4458*4882a593Smuzhiyun flags, info->data, eir_len, NULL, 0);
4459*4882a593Smuzhiyun }
4460*4882a593Smuzhiyun
4461*4882a593Smuzhiyun hci_dev_unlock(hdev);
4462*4882a593Smuzhiyun }
4463*4882a593Smuzhiyun
hci_key_refresh_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)4464*4882a593Smuzhiyun static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
4465*4882a593Smuzhiyun struct sk_buff *skb)
4466*4882a593Smuzhiyun {
4467*4882a593Smuzhiyun struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
4468*4882a593Smuzhiyun struct hci_conn *conn;
4469*4882a593Smuzhiyun
4470*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
4471*4882a593Smuzhiyun __le16_to_cpu(ev->handle));
4472*4882a593Smuzhiyun
4473*4882a593Smuzhiyun hci_dev_lock(hdev);
4474*4882a593Smuzhiyun
4475*4882a593Smuzhiyun conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4476*4882a593Smuzhiyun if (!conn)
4477*4882a593Smuzhiyun goto unlock;
4478*4882a593Smuzhiyun
4479*4882a593Smuzhiyun /* For BR/EDR the necessary steps are taken through the
4480*4882a593Smuzhiyun * auth_complete event.
4481*4882a593Smuzhiyun */
4482*4882a593Smuzhiyun if (conn->type != LE_LINK)
4483*4882a593Smuzhiyun goto unlock;
4484*4882a593Smuzhiyun
4485*4882a593Smuzhiyun if (!ev->status)
4486*4882a593Smuzhiyun conn->sec_level = conn->pending_sec_level;
4487*4882a593Smuzhiyun
4488*4882a593Smuzhiyun clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
4489*4882a593Smuzhiyun
4490*4882a593Smuzhiyun if (ev->status && conn->state == BT_CONNECTED) {
4491*4882a593Smuzhiyun hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4492*4882a593Smuzhiyun hci_conn_drop(conn);
4493*4882a593Smuzhiyun goto unlock;
4494*4882a593Smuzhiyun }
4495*4882a593Smuzhiyun
4496*4882a593Smuzhiyun if (conn->state == BT_CONFIG) {
4497*4882a593Smuzhiyun if (!ev->status)
4498*4882a593Smuzhiyun conn->state = BT_CONNECTED;
4499*4882a593Smuzhiyun
4500*4882a593Smuzhiyun hci_connect_cfm(conn, ev->status);
4501*4882a593Smuzhiyun hci_conn_drop(conn);
4502*4882a593Smuzhiyun } else {
4503*4882a593Smuzhiyun hci_auth_cfm(conn, ev->status);
4504*4882a593Smuzhiyun
4505*4882a593Smuzhiyun hci_conn_hold(conn);
4506*4882a593Smuzhiyun conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4507*4882a593Smuzhiyun hci_conn_drop(conn);
4508*4882a593Smuzhiyun }
4509*4882a593Smuzhiyun
4510*4882a593Smuzhiyun unlock:
4511*4882a593Smuzhiyun hci_dev_unlock(hdev);
4512*4882a593Smuzhiyun }
4513*4882a593Smuzhiyun
hci_get_auth_req(struct hci_conn * conn)4514*4882a593Smuzhiyun static u8 hci_get_auth_req(struct hci_conn *conn)
4515*4882a593Smuzhiyun {
4516*4882a593Smuzhiyun /* If remote requests no-bonding follow that lead */
4517*4882a593Smuzhiyun if (conn->remote_auth == HCI_AT_NO_BONDING ||
4518*4882a593Smuzhiyun conn->remote_auth == HCI_AT_NO_BONDING_MITM)
4519*4882a593Smuzhiyun return conn->remote_auth | (conn->auth_type & 0x01);
4520*4882a593Smuzhiyun
4521*4882a593Smuzhiyun /* If both remote and local have enough IO capabilities, require
4522*4882a593Smuzhiyun * MITM protection
4523*4882a593Smuzhiyun */
4524*4882a593Smuzhiyun if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
4525*4882a593Smuzhiyun conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
4526*4882a593Smuzhiyun return conn->remote_auth | 0x01;
4527*4882a593Smuzhiyun
4528*4882a593Smuzhiyun /* No MITM protection possible so ignore remote requirement */
4529*4882a593Smuzhiyun return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
4530*4882a593Smuzhiyun }
4531*4882a593Smuzhiyun
bredr_oob_data_present(struct hci_conn * conn)4532*4882a593Smuzhiyun static u8 bredr_oob_data_present(struct hci_conn *conn)
4533*4882a593Smuzhiyun {
4534*4882a593Smuzhiyun struct hci_dev *hdev = conn->hdev;
4535*4882a593Smuzhiyun struct oob_data *data;
4536*4882a593Smuzhiyun
4537*4882a593Smuzhiyun data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
4538*4882a593Smuzhiyun if (!data)
4539*4882a593Smuzhiyun return 0x00;
4540*4882a593Smuzhiyun
4541*4882a593Smuzhiyun if (bredr_sc_enabled(hdev)) {
4542*4882a593Smuzhiyun /* When Secure Connections is enabled, then just
4543*4882a593Smuzhiyun * return the present value stored with the OOB
4544*4882a593Smuzhiyun * data. The stored value contains the right present
4545*4882a593Smuzhiyun * information. However it can only be trusted when
4546*4882a593Smuzhiyun * not in Secure Connection Only mode.
4547*4882a593Smuzhiyun */
4548*4882a593Smuzhiyun if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
4549*4882a593Smuzhiyun return data->present;
4550*4882a593Smuzhiyun
4551*4882a593Smuzhiyun /* When Secure Connections Only mode is enabled, then
4552*4882a593Smuzhiyun * the P-256 values are required. If they are not
4553*4882a593Smuzhiyun * available, then do not declare that OOB data is
4554*4882a593Smuzhiyun * present.
4555*4882a593Smuzhiyun */
4556*4882a593Smuzhiyun if (!memcmp(data->rand256, ZERO_KEY, 16) ||
4557*4882a593Smuzhiyun !memcmp(data->hash256, ZERO_KEY, 16))
4558*4882a593Smuzhiyun return 0x00;
4559*4882a593Smuzhiyun
4560*4882a593Smuzhiyun return 0x02;
4561*4882a593Smuzhiyun }
4562*4882a593Smuzhiyun
4563*4882a593Smuzhiyun /* When Secure Connections is not enabled or actually
4564*4882a593Smuzhiyun * not supported by the hardware, then check that if
4565*4882a593Smuzhiyun * P-192 data values are present.
4566*4882a593Smuzhiyun */
4567*4882a593Smuzhiyun if (!memcmp(data->rand192, ZERO_KEY, 16) ||
4568*4882a593Smuzhiyun !memcmp(data->hash192, ZERO_KEY, 16))
4569*4882a593Smuzhiyun return 0x00;
4570*4882a593Smuzhiyun
4571*4882a593Smuzhiyun return 0x01;
4572*4882a593Smuzhiyun }
4573*4882a593Smuzhiyun
hci_io_capa_request_evt(struct hci_dev * hdev,struct sk_buff * skb)4574*4882a593Smuzhiyun static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4575*4882a593Smuzhiyun {
4576*4882a593Smuzhiyun struct hci_ev_io_capa_request *ev = (void *) skb->data;
4577*4882a593Smuzhiyun struct hci_conn *conn;
4578*4882a593Smuzhiyun
4579*4882a593Smuzhiyun BT_DBG("%s", hdev->name);
4580*4882a593Smuzhiyun
4581*4882a593Smuzhiyun hci_dev_lock(hdev);
4582*4882a593Smuzhiyun
4583*4882a593Smuzhiyun conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4584*4882a593Smuzhiyun if (!conn)
4585*4882a593Smuzhiyun goto unlock;
4586*4882a593Smuzhiyun
4587*4882a593Smuzhiyun hci_conn_hold(conn);
4588*4882a593Smuzhiyun
4589*4882a593Smuzhiyun if (!hci_dev_test_flag(hdev, HCI_MGMT))
4590*4882a593Smuzhiyun goto unlock;
4591*4882a593Smuzhiyun
4592*4882a593Smuzhiyun /* Allow pairing if we're pairable, the initiators of the
4593*4882a593Smuzhiyun * pairing or if the remote is not requesting bonding.
4594*4882a593Smuzhiyun */
4595*4882a593Smuzhiyun if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
4596*4882a593Smuzhiyun test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
4597*4882a593Smuzhiyun (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
4598*4882a593Smuzhiyun struct hci_cp_io_capability_reply cp;
4599*4882a593Smuzhiyun
4600*4882a593Smuzhiyun bacpy(&cp.bdaddr, &ev->bdaddr);
4601*4882a593Smuzhiyun /* Change the IO capability from KeyboardDisplay
4602*4882a593Smuzhiyun * to DisplayYesNo as it is not supported by BT spec. */
4603*4882a593Smuzhiyun cp.capability = (conn->io_capability == 0x04) ?
4604*4882a593Smuzhiyun HCI_IO_DISPLAY_YESNO : conn->io_capability;
4605*4882a593Smuzhiyun
4606*4882a593Smuzhiyun /* If we are initiators, there is no remote information yet */
4607*4882a593Smuzhiyun if (conn->remote_auth == 0xff) {
4608*4882a593Smuzhiyun /* Request MITM protection if our IO caps allow it
4609*4882a593Smuzhiyun * except for the no-bonding case.
4610*4882a593Smuzhiyun */
4611*4882a593Smuzhiyun if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4612*4882a593Smuzhiyun conn->auth_type != HCI_AT_NO_BONDING)
4613*4882a593Smuzhiyun conn->auth_type |= 0x01;
4614*4882a593Smuzhiyun } else {
4615*4882a593Smuzhiyun conn->auth_type = hci_get_auth_req(conn);
4616*4882a593Smuzhiyun }
4617*4882a593Smuzhiyun
4618*4882a593Smuzhiyun /* If we're not bondable, force one of the non-bondable
4619*4882a593Smuzhiyun * authentication requirement values.
4620*4882a593Smuzhiyun */
4621*4882a593Smuzhiyun if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
4622*4882a593Smuzhiyun conn->auth_type &= HCI_AT_NO_BONDING_MITM;
4623*4882a593Smuzhiyun
4624*4882a593Smuzhiyun cp.authentication = conn->auth_type;
4625*4882a593Smuzhiyun cp.oob_data = bredr_oob_data_present(conn);
4626*4882a593Smuzhiyun
4627*4882a593Smuzhiyun hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4628*4882a593Smuzhiyun sizeof(cp), &cp);
4629*4882a593Smuzhiyun } else {
4630*4882a593Smuzhiyun struct hci_cp_io_capability_neg_reply cp;
4631*4882a593Smuzhiyun
4632*4882a593Smuzhiyun bacpy(&cp.bdaddr, &ev->bdaddr);
4633*4882a593Smuzhiyun cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4634*4882a593Smuzhiyun
4635*4882a593Smuzhiyun hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4636*4882a593Smuzhiyun sizeof(cp), &cp);
4637*4882a593Smuzhiyun }
4638*4882a593Smuzhiyun
4639*4882a593Smuzhiyun unlock:
4640*4882a593Smuzhiyun hci_dev_unlock(hdev);
4641*4882a593Smuzhiyun }
4642*4882a593Smuzhiyun
hci_io_capa_reply_evt(struct hci_dev * hdev,struct sk_buff * skb)4643*4882a593Smuzhiyun static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4644*4882a593Smuzhiyun {
4645*4882a593Smuzhiyun struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4646*4882a593Smuzhiyun struct hci_conn *conn;
4647*4882a593Smuzhiyun
4648*4882a593Smuzhiyun BT_DBG("%s", hdev->name);
4649*4882a593Smuzhiyun
4650*4882a593Smuzhiyun hci_dev_lock(hdev);
4651*4882a593Smuzhiyun
4652*4882a593Smuzhiyun conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4653*4882a593Smuzhiyun if (!conn)
4654*4882a593Smuzhiyun goto unlock;
4655*4882a593Smuzhiyun
4656*4882a593Smuzhiyun conn->remote_cap = ev->capability;
4657*4882a593Smuzhiyun conn->remote_auth = ev->authentication;
4658*4882a593Smuzhiyun
4659*4882a593Smuzhiyun unlock:
4660*4882a593Smuzhiyun hci_dev_unlock(hdev);
4661*4882a593Smuzhiyun }
4662*4882a593Smuzhiyun
hci_user_confirm_request_evt(struct hci_dev * hdev,struct sk_buff * skb)4663*4882a593Smuzhiyun static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4664*4882a593Smuzhiyun struct sk_buff *skb)
4665*4882a593Smuzhiyun {
4666*4882a593Smuzhiyun struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4667*4882a593Smuzhiyun int loc_mitm, rem_mitm, confirm_hint = 0;
4668*4882a593Smuzhiyun struct hci_conn *conn;
4669*4882a593Smuzhiyun
4670*4882a593Smuzhiyun BT_DBG("%s", hdev->name);
4671*4882a593Smuzhiyun
4672*4882a593Smuzhiyun hci_dev_lock(hdev);
4673*4882a593Smuzhiyun
4674*4882a593Smuzhiyun if (!hci_dev_test_flag(hdev, HCI_MGMT))
4675*4882a593Smuzhiyun goto unlock;
4676*4882a593Smuzhiyun
4677*4882a593Smuzhiyun conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4678*4882a593Smuzhiyun if (!conn)
4679*4882a593Smuzhiyun goto unlock;
4680*4882a593Smuzhiyun
4681*4882a593Smuzhiyun loc_mitm = (conn->auth_type & 0x01);
4682*4882a593Smuzhiyun rem_mitm = (conn->remote_auth & 0x01);
4683*4882a593Smuzhiyun
4684*4882a593Smuzhiyun /* If we require MITM but the remote device can't provide that
4685*4882a593Smuzhiyun * (it has NoInputNoOutput) then reject the confirmation
4686*4882a593Smuzhiyun * request. We check the security level here since it doesn't
4687*4882a593Smuzhiyun * necessarily match conn->auth_type.
4688*4882a593Smuzhiyun */
4689*4882a593Smuzhiyun if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4690*4882a593Smuzhiyun conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4691*4882a593Smuzhiyun BT_DBG("Rejecting request: remote device can't provide MITM");
4692*4882a593Smuzhiyun hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4693*4882a593Smuzhiyun sizeof(ev->bdaddr), &ev->bdaddr);
4694*4882a593Smuzhiyun goto unlock;
4695*4882a593Smuzhiyun }
4696*4882a593Smuzhiyun
4697*4882a593Smuzhiyun /* If no side requires MITM protection; auto-accept */
4698*4882a593Smuzhiyun if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4699*4882a593Smuzhiyun (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4700*4882a593Smuzhiyun
4701*4882a593Smuzhiyun /* If we're not the initiators request authorization to
4702*4882a593Smuzhiyun * proceed from user space (mgmt_user_confirm with
4703*4882a593Smuzhiyun * confirm_hint set to 1). The exception is if neither
4704*4882a593Smuzhiyun * side had MITM or if the local IO capability is
4705*4882a593Smuzhiyun * NoInputNoOutput, in which case we do auto-accept
4706*4882a593Smuzhiyun */
4707*4882a593Smuzhiyun if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4708*4882a593Smuzhiyun conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4709*4882a593Smuzhiyun (loc_mitm || rem_mitm)) {
4710*4882a593Smuzhiyun BT_DBG("Confirming auto-accept as acceptor");
4711*4882a593Smuzhiyun confirm_hint = 1;
4712*4882a593Smuzhiyun goto confirm;
4713*4882a593Smuzhiyun }
4714*4882a593Smuzhiyun
4715*4882a593Smuzhiyun /* If there already exists link key in local host, leave the
4716*4882a593Smuzhiyun * decision to user space since the remote device could be
4717*4882a593Smuzhiyun * legitimate or malicious.
4718*4882a593Smuzhiyun */
4719*4882a593Smuzhiyun if (hci_find_link_key(hdev, &ev->bdaddr)) {
4720*4882a593Smuzhiyun bt_dev_dbg(hdev, "Local host already has link key");
4721*4882a593Smuzhiyun confirm_hint = 1;
4722*4882a593Smuzhiyun goto confirm;
4723*4882a593Smuzhiyun }
4724*4882a593Smuzhiyun
4725*4882a593Smuzhiyun BT_DBG("Auto-accept of user confirmation with %ums delay",
4726*4882a593Smuzhiyun hdev->auto_accept_delay);
4727*4882a593Smuzhiyun
4728*4882a593Smuzhiyun if (hdev->auto_accept_delay > 0) {
4729*4882a593Smuzhiyun int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4730*4882a593Smuzhiyun queue_delayed_work(conn->hdev->workqueue,
4731*4882a593Smuzhiyun &conn->auto_accept_work, delay);
4732*4882a593Smuzhiyun goto unlock;
4733*4882a593Smuzhiyun }
4734*4882a593Smuzhiyun
4735*4882a593Smuzhiyun hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4736*4882a593Smuzhiyun sizeof(ev->bdaddr), &ev->bdaddr);
4737*4882a593Smuzhiyun goto unlock;
4738*4882a593Smuzhiyun }
4739*4882a593Smuzhiyun
4740*4882a593Smuzhiyun confirm:
4741*4882a593Smuzhiyun mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4742*4882a593Smuzhiyun le32_to_cpu(ev->passkey), confirm_hint);
4743*4882a593Smuzhiyun
4744*4882a593Smuzhiyun unlock:
4745*4882a593Smuzhiyun hci_dev_unlock(hdev);
4746*4882a593Smuzhiyun }
4747*4882a593Smuzhiyun
hci_user_passkey_request_evt(struct hci_dev * hdev,struct sk_buff * skb)4748*4882a593Smuzhiyun static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4749*4882a593Smuzhiyun struct sk_buff *skb)
4750*4882a593Smuzhiyun {
4751*4882a593Smuzhiyun struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4752*4882a593Smuzhiyun
4753*4882a593Smuzhiyun BT_DBG("%s", hdev->name);
4754*4882a593Smuzhiyun
4755*4882a593Smuzhiyun if (hci_dev_test_flag(hdev, HCI_MGMT))
4756*4882a593Smuzhiyun mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4757*4882a593Smuzhiyun }
4758*4882a593Smuzhiyun
hci_user_passkey_notify_evt(struct hci_dev * hdev,struct sk_buff * skb)4759*4882a593Smuzhiyun static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4760*4882a593Smuzhiyun struct sk_buff *skb)
4761*4882a593Smuzhiyun {
4762*4882a593Smuzhiyun struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4763*4882a593Smuzhiyun struct hci_conn *conn;
4764*4882a593Smuzhiyun
4765*4882a593Smuzhiyun BT_DBG("%s", hdev->name);
4766*4882a593Smuzhiyun
4767*4882a593Smuzhiyun conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4768*4882a593Smuzhiyun if (!conn)
4769*4882a593Smuzhiyun return;
4770*4882a593Smuzhiyun
4771*4882a593Smuzhiyun conn->passkey_notify = __le32_to_cpu(ev->passkey);
4772*4882a593Smuzhiyun conn->passkey_entered = 0;
4773*4882a593Smuzhiyun
4774*4882a593Smuzhiyun if (hci_dev_test_flag(hdev, HCI_MGMT))
4775*4882a593Smuzhiyun mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4776*4882a593Smuzhiyun conn->dst_type, conn->passkey_notify,
4777*4882a593Smuzhiyun conn->passkey_entered);
4778*4882a593Smuzhiyun }
4779*4882a593Smuzhiyun
hci_keypress_notify_evt(struct hci_dev * hdev,struct sk_buff * skb)4780*4882a593Smuzhiyun static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4781*4882a593Smuzhiyun {
4782*4882a593Smuzhiyun struct hci_ev_keypress_notify *ev = (void *) skb->data;
4783*4882a593Smuzhiyun struct hci_conn *conn;
4784*4882a593Smuzhiyun
4785*4882a593Smuzhiyun BT_DBG("%s", hdev->name);
4786*4882a593Smuzhiyun
4787*4882a593Smuzhiyun conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4788*4882a593Smuzhiyun if (!conn)
4789*4882a593Smuzhiyun return;
4790*4882a593Smuzhiyun
4791*4882a593Smuzhiyun switch (ev->type) {
4792*4882a593Smuzhiyun case HCI_KEYPRESS_STARTED:
4793*4882a593Smuzhiyun conn->passkey_entered = 0;
4794*4882a593Smuzhiyun return;
4795*4882a593Smuzhiyun
4796*4882a593Smuzhiyun case HCI_KEYPRESS_ENTERED:
4797*4882a593Smuzhiyun conn->passkey_entered++;
4798*4882a593Smuzhiyun break;
4799*4882a593Smuzhiyun
4800*4882a593Smuzhiyun case HCI_KEYPRESS_ERASED:
4801*4882a593Smuzhiyun conn->passkey_entered--;
4802*4882a593Smuzhiyun break;
4803*4882a593Smuzhiyun
4804*4882a593Smuzhiyun case HCI_KEYPRESS_CLEARED:
4805*4882a593Smuzhiyun conn->passkey_entered = 0;
4806*4882a593Smuzhiyun break;
4807*4882a593Smuzhiyun
4808*4882a593Smuzhiyun case HCI_KEYPRESS_COMPLETED:
4809*4882a593Smuzhiyun return;
4810*4882a593Smuzhiyun }
4811*4882a593Smuzhiyun
4812*4882a593Smuzhiyun if (hci_dev_test_flag(hdev, HCI_MGMT))
4813*4882a593Smuzhiyun mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4814*4882a593Smuzhiyun conn->dst_type, conn->passkey_notify,
4815*4882a593Smuzhiyun conn->passkey_entered);
4816*4882a593Smuzhiyun }
4817*4882a593Smuzhiyun
hci_simple_pair_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)4818*4882a593Smuzhiyun static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4819*4882a593Smuzhiyun struct sk_buff *skb)
4820*4882a593Smuzhiyun {
4821*4882a593Smuzhiyun struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4822*4882a593Smuzhiyun struct hci_conn *conn;
4823*4882a593Smuzhiyun
4824*4882a593Smuzhiyun BT_DBG("%s", hdev->name);
4825*4882a593Smuzhiyun
4826*4882a593Smuzhiyun hci_dev_lock(hdev);
4827*4882a593Smuzhiyun
4828*4882a593Smuzhiyun conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4829*4882a593Smuzhiyun if (!conn)
4830*4882a593Smuzhiyun goto unlock;
4831*4882a593Smuzhiyun
4832*4882a593Smuzhiyun /* Reset the authentication requirement to unknown */
4833*4882a593Smuzhiyun conn->remote_auth = 0xff;
4834*4882a593Smuzhiyun
4835*4882a593Smuzhiyun /* To avoid duplicate auth_failed events to user space we check
4836*4882a593Smuzhiyun * the HCI_CONN_AUTH_PEND flag which will be set if we
4837*4882a593Smuzhiyun * initiated the authentication. A traditional auth_complete
4838*4882a593Smuzhiyun * event gets always produced as initiator and is also mapped to
4839*4882a593Smuzhiyun * the mgmt_auth_failed event */
4840*4882a593Smuzhiyun if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4841*4882a593Smuzhiyun mgmt_auth_failed(conn, ev->status);
4842*4882a593Smuzhiyun
4843*4882a593Smuzhiyun hci_conn_drop(conn);
4844*4882a593Smuzhiyun
4845*4882a593Smuzhiyun unlock:
4846*4882a593Smuzhiyun hci_dev_unlock(hdev);
4847*4882a593Smuzhiyun }
4848*4882a593Smuzhiyun
hci_remote_host_features_evt(struct hci_dev * hdev,struct sk_buff * skb)4849*4882a593Smuzhiyun static void hci_remote_host_features_evt(struct hci_dev *hdev,
4850*4882a593Smuzhiyun struct sk_buff *skb)
4851*4882a593Smuzhiyun {
4852*4882a593Smuzhiyun struct hci_ev_remote_host_features *ev = (void *) skb->data;
4853*4882a593Smuzhiyun struct inquiry_entry *ie;
4854*4882a593Smuzhiyun struct hci_conn *conn;
4855*4882a593Smuzhiyun
4856*4882a593Smuzhiyun BT_DBG("%s", hdev->name);
4857*4882a593Smuzhiyun
4858*4882a593Smuzhiyun hci_dev_lock(hdev);
4859*4882a593Smuzhiyun
4860*4882a593Smuzhiyun conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4861*4882a593Smuzhiyun if (conn)
4862*4882a593Smuzhiyun memcpy(conn->features[1], ev->features, 8);
4863*4882a593Smuzhiyun
4864*4882a593Smuzhiyun ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4865*4882a593Smuzhiyun if (ie)
4866*4882a593Smuzhiyun ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4867*4882a593Smuzhiyun
4868*4882a593Smuzhiyun hci_dev_unlock(hdev);
4869*4882a593Smuzhiyun }
4870*4882a593Smuzhiyun
hci_remote_oob_data_request_evt(struct hci_dev * hdev,struct sk_buff * skb)4871*4882a593Smuzhiyun static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4872*4882a593Smuzhiyun struct sk_buff *skb)
4873*4882a593Smuzhiyun {
4874*4882a593Smuzhiyun struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
4875*4882a593Smuzhiyun struct oob_data *data;
4876*4882a593Smuzhiyun
4877*4882a593Smuzhiyun BT_DBG("%s", hdev->name);
4878*4882a593Smuzhiyun
4879*4882a593Smuzhiyun hci_dev_lock(hdev);
4880*4882a593Smuzhiyun
4881*4882a593Smuzhiyun if (!hci_dev_test_flag(hdev, HCI_MGMT))
4882*4882a593Smuzhiyun goto unlock;
4883*4882a593Smuzhiyun
4884*4882a593Smuzhiyun data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
4885*4882a593Smuzhiyun if (!data) {
4886*4882a593Smuzhiyun struct hci_cp_remote_oob_data_neg_reply cp;
4887*4882a593Smuzhiyun
4888*4882a593Smuzhiyun bacpy(&cp.bdaddr, &ev->bdaddr);
4889*4882a593Smuzhiyun hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4890*4882a593Smuzhiyun sizeof(cp), &cp);
4891*4882a593Smuzhiyun goto unlock;
4892*4882a593Smuzhiyun }
4893*4882a593Smuzhiyun
4894*4882a593Smuzhiyun if (bredr_sc_enabled(hdev)) {
4895*4882a593Smuzhiyun struct hci_cp_remote_oob_ext_data_reply cp;
4896*4882a593Smuzhiyun
4897*4882a593Smuzhiyun bacpy(&cp.bdaddr, &ev->bdaddr);
4898*4882a593Smuzhiyun if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4899*4882a593Smuzhiyun memset(cp.hash192, 0, sizeof(cp.hash192));
4900*4882a593Smuzhiyun memset(cp.rand192, 0, sizeof(cp.rand192));
4901*4882a593Smuzhiyun } else {
4902*4882a593Smuzhiyun memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4903*4882a593Smuzhiyun memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4904*4882a593Smuzhiyun }
4905*4882a593Smuzhiyun memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4906*4882a593Smuzhiyun memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4907*4882a593Smuzhiyun
4908*4882a593Smuzhiyun hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
4909*4882a593Smuzhiyun sizeof(cp), &cp);
4910*4882a593Smuzhiyun } else {
4911*4882a593Smuzhiyun struct hci_cp_remote_oob_data_reply cp;
4912*4882a593Smuzhiyun
4913*4882a593Smuzhiyun bacpy(&cp.bdaddr, &ev->bdaddr);
4914*4882a593Smuzhiyun memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4915*4882a593Smuzhiyun memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4916*4882a593Smuzhiyun
4917*4882a593Smuzhiyun hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4918*4882a593Smuzhiyun sizeof(cp), &cp);
4919*4882a593Smuzhiyun }
4920*4882a593Smuzhiyun
4921*4882a593Smuzhiyun unlock:
4922*4882a593Smuzhiyun hci_dev_unlock(hdev);
4923*4882a593Smuzhiyun }
4924*4882a593Smuzhiyun
4925*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_BT_HS)
hci_chan_selected_evt(struct hci_dev * hdev,struct sk_buff * skb)4926*4882a593Smuzhiyun static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4927*4882a593Smuzhiyun {
4928*4882a593Smuzhiyun struct hci_ev_channel_selected *ev = (void *)skb->data;
4929*4882a593Smuzhiyun struct hci_conn *hcon;
4930*4882a593Smuzhiyun
4931*4882a593Smuzhiyun BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4932*4882a593Smuzhiyun
4933*4882a593Smuzhiyun skb_pull(skb, sizeof(*ev));
4934*4882a593Smuzhiyun
4935*4882a593Smuzhiyun hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4936*4882a593Smuzhiyun if (!hcon)
4937*4882a593Smuzhiyun return;
4938*4882a593Smuzhiyun
4939*4882a593Smuzhiyun amp_read_loc_assoc_final_data(hdev, hcon);
4940*4882a593Smuzhiyun }
4941*4882a593Smuzhiyun
hci_phy_link_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)4942*4882a593Smuzhiyun static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4943*4882a593Smuzhiyun struct sk_buff *skb)
4944*4882a593Smuzhiyun {
4945*4882a593Smuzhiyun struct hci_ev_phy_link_complete *ev = (void *) skb->data;
4946*4882a593Smuzhiyun struct hci_conn *hcon, *bredr_hcon;
4947*4882a593Smuzhiyun
4948*4882a593Smuzhiyun BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
4949*4882a593Smuzhiyun ev->status);
4950*4882a593Smuzhiyun
4951*4882a593Smuzhiyun hci_dev_lock(hdev);
4952*4882a593Smuzhiyun
4953*4882a593Smuzhiyun hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4954*4882a593Smuzhiyun if (!hcon) {
4955*4882a593Smuzhiyun hci_dev_unlock(hdev);
4956*4882a593Smuzhiyun return;
4957*4882a593Smuzhiyun }
4958*4882a593Smuzhiyun
4959*4882a593Smuzhiyun if (!hcon->amp_mgr) {
4960*4882a593Smuzhiyun hci_dev_unlock(hdev);
4961*4882a593Smuzhiyun return;
4962*4882a593Smuzhiyun }
4963*4882a593Smuzhiyun
4964*4882a593Smuzhiyun if (ev->status) {
4965*4882a593Smuzhiyun hci_conn_del(hcon);
4966*4882a593Smuzhiyun hci_dev_unlock(hdev);
4967*4882a593Smuzhiyun return;
4968*4882a593Smuzhiyun }
4969*4882a593Smuzhiyun
4970*4882a593Smuzhiyun bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4971*4882a593Smuzhiyun
4972*4882a593Smuzhiyun hcon->state = BT_CONNECTED;
4973*4882a593Smuzhiyun bacpy(&hcon->dst, &bredr_hcon->dst);
4974*4882a593Smuzhiyun
4975*4882a593Smuzhiyun hci_conn_hold(hcon);
4976*4882a593Smuzhiyun hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4977*4882a593Smuzhiyun hci_conn_drop(hcon);
4978*4882a593Smuzhiyun
4979*4882a593Smuzhiyun hci_debugfs_create_conn(hcon);
4980*4882a593Smuzhiyun hci_conn_add_sysfs(hcon);
4981*4882a593Smuzhiyun
4982*4882a593Smuzhiyun amp_physical_cfm(bredr_hcon, hcon);
4983*4882a593Smuzhiyun
4984*4882a593Smuzhiyun hci_dev_unlock(hdev);
4985*4882a593Smuzhiyun }
4986*4882a593Smuzhiyun
hci_loglink_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)4987*4882a593Smuzhiyun static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4988*4882a593Smuzhiyun {
4989*4882a593Smuzhiyun struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4990*4882a593Smuzhiyun struct hci_conn *hcon;
4991*4882a593Smuzhiyun struct hci_chan *hchan;
4992*4882a593Smuzhiyun struct amp_mgr *mgr;
4993*4882a593Smuzhiyun
4994*4882a593Smuzhiyun BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4995*4882a593Smuzhiyun hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4996*4882a593Smuzhiyun ev->status);
4997*4882a593Smuzhiyun
4998*4882a593Smuzhiyun hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4999*4882a593Smuzhiyun if (!hcon)
5000*4882a593Smuzhiyun return;
5001*4882a593Smuzhiyun
5002*4882a593Smuzhiyun /* Create AMP hchan */
5003*4882a593Smuzhiyun hchan = hci_chan_create(hcon);
5004*4882a593Smuzhiyun if (!hchan)
5005*4882a593Smuzhiyun return;
5006*4882a593Smuzhiyun
5007*4882a593Smuzhiyun hchan->handle = le16_to_cpu(ev->handle);
5008*4882a593Smuzhiyun hchan->amp = true;
5009*4882a593Smuzhiyun
5010*4882a593Smuzhiyun BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
5011*4882a593Smuzhiyun
5012*4882a593Smuzhiyun mgr = hcon->amp_mgr;
5013*4882a593Smuzhiyun if (mgr && mgr->bredr_chan) {
5014*4882a593Smuzhiyun struct l2cap_chan *bredr_chan = mgr->bredr_chan;
5015*4882a593Smuzhiyun
5016*4882a593Smuzhiyun l2cap_chan_lock(bredr_chan);
5017*4882a593Smuzhiyun
5018*4882a593Smuzhiyun bredr_chan->conn->mtu = hdev->block_mtu;
5019*4882a593Smuzhiyun l2cap_logical_cfm(bredr_chan, hchan, 0);
5020*4882a593Smuzhiyun hci_conn_hold(hcon);
5021*4882a593Smuzhiyun
5022*4882a593Smuzhiyun l2cap_chan_unlock(bredr_chan);
5023*4882a593Smuzhiyun }
5024*4882a593Smuzhiyun }
5025*4882a593Smuzhiyun
hci_disconn_loglink_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)5026*4882a593Smuzhiyun static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
5027*4882a593Smuzhiyun struct sk_buff *skb)
5028*4882a593Smuzhiyun {
5029*4882a593Smuzhiyun struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
5030*4882a593Smuzhiyun struct hci_chan *hchan;
5031*4882a593Smuzhiyun
5032*4882a593Smuzhiyun BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
5033*4882a593Smuzhiyun le16_to_cpu(ev->handle), ev->status);
5034*4882a593Smuzhiyun
5035*4882a593Smuzhiyun if (ev->status)
5036*4882a593Smuzhiyun return;
5037*4882a593Smuzhiyun
5038*4882a593Smuzhiyun hci_dev_lock(hdev);
5039*4882a593Smuzhiyun
5040*4882a593Smuzhiyun hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
5041*4882a593Smuzhiyun if (!hchan || !hchan->amp)
5042*4882a593Smuzhiyun goto unlock;
5043*4882a593Smuzhiyun
5044*4882a593Smuzhiyun amp_destroy_logical_link(hchan, ev->reason);
5045*4882a593Smuzhiyun
5046*4882a593Smuzhiyun unlock:
5047*4882a593Smuzhiyun hci_dev_unlock(hdev);
5048*4882a593Smuzhiyun }
5049*4882a593Smuzhiyun
hci_disconn_phylink_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)5050*4882a593Smuzhiyun static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
5051*4882a593Smuzhiyun struct sk_buff *skb)
5052*4882a593Smuzhiyun {
5053*4882a593Smuzhiyun struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
5054*4882a593Smuzhiyun struct hci_conn *hcon;
5055*4882a593Smuzhiyun
5056*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5057*4882a593Smuzhiyun
5058*4882a593Smuzhiyun if (ev->status)
5059*4882a593Smuzhiyun return;
5060*4882a593Smuzhiyun
5061*4882a593Smuzhiyun hci_dev_lock(hdev);
5062*4882a593Smuzhiyun
5063*4882a593Smuzhiyun hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5064*4882a593Smuzhiyun if (hcon && hcon->type == AMP_LINK) {
5065*4882a593Smuzhiyun hcon->state = BT_CLOSED;
5066*4882a593Smuzhiyun hci_disconn_cfm(hcon, ev->reason);
5067*4882a593Smuzhiyun hci_conn_del(hcon);
5068*4882a593Smuzhiyun }
5069*4882a593Smuzhiyun
5070*4882a593Smuzhiyun hci_dev_unlock(hdev);
5071*4882a593Smuzhiyun }
5072*4882a593Smuzhiyun #endif
5073*4882a593Smuzhiyun
le_conn_update_addr(struct hci_conn * conn,bdaddr_t * bdaddr,u8 bdaddr_type,bdaddr_t * local_rpa)5074*4882a593Smuzhiyun static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
5075*4882a593Smuzhiyun u8 bdaddr_type, bdaddr_t *local_rpa)
5076*4882a593Smuzhiyun {
5077*4882a593Smuzhiyun if (conn->out) {
5078*4882a593Smuzhiyun conn->dst_type = bdaddr_type;
5079*4882a593Smuzhiyun conn->resp_addr_type = bdaddr_type;
5080*4882a593Smuzhiyun bacpy(&conn->resp_addr, bdaddr);
5081*4882a593Smuzhiyun
5082*4882a593Smuzhiyun /* Check if the controller has set a Local RPA then it must be
5083*4882a593Smuzhiyun * used instead or hdev->rpa.
5084*4882a593Smuzhiyun */
5085*4882a593Smuzhiyun if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5086*4882a593Smuzhiyun conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5087*4882a593Smuzhiyun bacpy(&conn->init_addr, local_rpa);
5088*4882a593Smuzhiyun } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
5089*4882a593Smuzhiyun conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5090*4882a593Smuzhiyun bacpy(&conn->init_addr, &conn->hdev->rpa);
5091*4882a593Smuzhiyun } else {
5092*4882a593Smuzhiyun hci_copy_identity_address(conn->hdev, &conn->init_addr,
5093*4882a593Smuzhiyun &conn->init_addr_type);
5094*4882a593Smuzhiyun }
5095*4882a593Smuzhiyun } else {
5096*4882a593Smuzhiyun conn->resp_addr_type = conn->hdev->adv_addr_type;
5097*4882a593Smuzhiyun /* Check if the controller has set a Local RPA then it must be
5098*4882a593Smuzhiyun * used instead or hdev->rpa.
5099*4882a593Smuzhiyun */
5100*4882a593Smuzhiyun if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5101*4882a593Smuzhiyun conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
5102*4882a593Smuzhiyun bacpy(&conn->resp_addr, local_rpa);
5103*4882a593Smuzhiyun } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5104*4882a593Smuzhiyun /* In case of ext adv, resp_addr will be updated in
5105*4882a593Smuzhiyun * Adv Terminated event.
5106*4882a593Smuzhiyun */
5107*4882a593Smuzhiyun if (!ext_adv_capable(conn->hdev))
5108*4882a593Smuzhiyun bacpy(&conn->resp_addr,
5109*4882a593Smuzhiyun &conn->hdev->random_addr);
5110*4882a593Smuzhiyun } else {
5111*4882a593Smuzhiyun bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
5112*4882a593Smuzhiyun }
5113*4882a593Smuzhiyun
5114*4882a593Smuzhiyun conn->init_addr_type = bdaddr_type;
5115*4882a593Smuzhiyun bacpy(&conn->init_addr, bdaddr);
5116*4882a593Smuzhiyun
5117*4882a593Smuzhiyun /* For incoming connections, set the default minimum
5118*4882a593Smuzhiyun * and maximum connection interval. They will be used
5119*4882a593Smuzhiyun * to check if the parameters are in range and if not
5120*4882a593Smuzhiyun * trigger the connection update procedure.
5121*4882a593Smuzhiyun */
5122*4882a593Smuzhiyun conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
5123*4882a593Smuzhiyun conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
5124*4882a593Smuzhiyun }
5125*4882a593Smuzhiyun }
5126*4882a593Smuzhiyun
le_conn_complete_evt(struct hci_dev * hdev,u8 status,bdaddr_t * bdaddr,u8 bdaddr_type,bdaddr_t * local_rpa,u8 role,u16 handle,u16 interval,u16 latency,u16 supervision_timeout)5127*4882a593Smuzhiyun static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
5128*4882a593Smuzhiyun bdaddr_t *bdaddr, u8 bdaddr_type,
5129*4882a593Smuzhiyun bdaddr_t *local_rpa, u8 role, u16 handle,
5130*4882a593Smuzhiyun u16 interval, u16 latency,
5131*4882a593Smuzhiyun u16 supervision_timeout)
5132*4882a593Smuzhiyun {
5133*4882a593Smuzhiyun struct hci_conn_params *params;
5134*4882a593Smuzhiyun struct hci_conn *conn;
5135*4882a593Smuzhiyun struct smp_irk *irk;
5136*4882a593Smuzhiyun u8 addr_type;
5137*4882a593Smuzhiyun
5138*4882a593Smuzhiyun hci_dev_lock(hdev);
5139*4882a593Smuzhiyun
5140*4882a593Smuzhiyun /* All controllers implicitly stop advertising in the event of a
5141*4882a593Smuzhiyun * connection, so ensure that the state bit is cleared.
5142*4882a593Smuzhiyun */
5143*4882a593Smuzhiyun hci_dev_clear_flag(hdev, HCI_LE_ADV);
5144*4882a593Smuzhiyun
5145*4882a593Smuzhiyun conn = hci_lookup_le_connect(hdev);
5146*4882a593Smuzhiyun if (!conn) {
5147*4882a593Smuzhiyun conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
5148*4882a593Smuzhiyun if (!conn) {
5149*4882a593Smuzhiyun bt_dev_err(hdev, "no memory for new connection");
5150*4882a593Smuzhiyun goto unlock;
5151*4882a593Smuzhiyun }
5152*4882a593Smuzhiyun
5153*4882a593Smuzhiyun conn->dst_type = bdaddr_type;
5154*4882a593Smuzhiyun
5155*4882a593Smuzhiyun /* If we didn't have a hci_conn object previously
5156*4882a593Smuzhiyun * but we're in central role this must be something
5157*4882a593Smuzhiyun * initiated using a white list. Since white list based
5158*4882a593Smuzhiyun * connections are not "first class citizens" we don't
5159*4882a593Smuzhiyun * have full tracking of them. Therefore, we go ahead
5160*4882a593Smuzhiyun * with a "best effort" approach of determining the
5161*4882a593Smuzhiyun * initiator address based on the HCI_PRIVACY flag.
5162*4882a593Smuzhiyun */
5163*4882a593Smuzhiyun if (conn->out) {
5164*4882a593Smuzhiyun conn->resp_addr_type = bdaddr_type;
5165*4882a593Smuzhiyun bacpy(&conn->resp_addr, bdaddr);
5166*4882a593Smuzhiyun if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5167*4882a593Smuzhiyun conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5168*4882a593Smuzhiyun bacpy(&conn->init_addr, &hdev->rpa);
5169*4882a593Smuzhiyun } else {
5170*4882a593Smuzhiyun hci_copy_identity_address(hdev,
5171*4882a593Smuzhiyun &conn->init_addr,
5172*4882a593Smuzhiyun &conn->init_addr_type);
5173*4882a593Smuzhiyun }
5174*4882a593Smuzhiyun }
5175*4882a593Smuzhiyun } else {
5176*4882a593Smuzhiyun cancel_delayed_work(&conn->le_conn_timeout);
5177*4882a593Smuzhiyun }
5178*4882a593Smuzhiyun
5179*4882a593Smuzhiyun le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
5180*4882a593Smuzhiyun
5181*4882a593Smuzhiyun /* Lookup the identity address from the stored connection
5182*4882a593Smuzhiyun * address and address type.
5183*4882a593Smuzhiyun *
5184*4882a593Smuzhiyun * When establishing connections to an identity address, the
5185*4882a593Smuzhiyun * connection procedure will store the resolvable random
5186*4882a593Smuzhiyun * address first. Now if it can be converted back into the
5187*4882a593Smuzhiyun * identity address, start using the identity address from
5188*4882a593Smuzhiyun * now on.
5189*4882a593Smuzhiyun */
5190*4882a593Smuzhiyun irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5191*4882a593Smuzhiyun if (irk) {
5192*4882a593Smuzhiyun bacpy(&conn->dst, &irk->bdaddr);
5193*4882a593Smuzhiyun conn->dst_type = irk->addr_type;
5194*4882a593Smuzhiyun }
5195*4882a593Smuzhiyun
5196*4882a593Smuzhiyun if (status) {
5197*4882a593Smuzhiyun hci_le_conn_failed(conn, status);
5198*4882a593Smuzhiyun goto unlock;
5199*4882a593Smuzhiyun }
5200*4882a593Smuzhiyun
5201*4882a593Smuzhiyun if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5202*4882a593Smuzhiyun addr_type = BDADDR_LE_PUBLIC;
5203*4882a593Smuzhiyun else
5204*4882a593Smuzhiyun addr_type = BDADDR_LE_RANDOM;
5205*4882a593Smuzhiyun
5206*4882a593Smuzhiyun /* Drop the connection if the device is blocked */
5207*4882a593Smuzhiyun if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
5208*4882a593Smuzhiyun hci_conn_drop(conn);
5209*4882a593Smuzhiyun goto unlock;
5210*4882a593Smuzhiyun }
5211*4882a593Smuzhiyun
5212*4882a593Smuzhiyun if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
5213*4882a593Smuzhiyun mgmt_device_connected(hdev, conn, 0, NULL, 0);
5214*4882a593Smuzhiyun
5215*4882a593Smuzhiyun conn->sec_level = BT_SECURITY_LOW;
5216*4882a593Smuzhiyun conn->handle = handle;
5217*4882a593Smuzhiyun conn->state = BT_CONFIG;
5218*4882a593Smuzhiyun
5219*4882a593Smuzhiyun conn->le_conn_interval = interval;
5220*4882a593Smuzhiyun conn->le_conn_latency = latency;
5221*4882a593Smuzhiyun conn->le_supv_timeout = supervision_timeout;
5222*4882a593Smuzhiyun
5223*4882a593Smuzhiyun hci_debugfs_create_conn(conn);
5224*4882a593Smuzhiyun hci_conn_add_sysfs(conn);
5225*4882a593Smuzhiyun
5226*4882a593Smuzhiyun /* The remote features procedure is defined for master
5227*4882a593Smuzhiyun * role only. So only in case of an initiated connection
5228*4882a593Smuzhiyun * request the remote features.
5229*4882a593Smuzhiyun *
5230*4882a593Smuzhiyun * If the local controller supports slave-initiated features
5231*4882a593Smuzhiyun * exchange, then requesting the remote features in slave
5232*4882a593Smuzhiyun * role is possible. Otherwise just transition into the
5233*4882a593Smuzhiyun * connected state without requesting the remote features.
5234*4882a593Smuzhiyun */
5235*4882a593Smuzhiyun if (conn->out ||
5236*4882a593Smuzhiyun (hdev->le_features[0] & HCI_LE_SLAVE_FEATURES)) {
5237*4882a593Smuzhiyun struct hci_cp_le_read_remote_features cp;
5238*4882a593Smuzhiyun
5239*4882a593Smuzhiyun cp.handle = __cpu_to_le16(conn->handle);
5240*4882a593Smuzhiyun
5241*4882a593Smuzhiyun hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
5242*4882a593Smuzhiyun sizeof(cp), &cp);
5243*4882a593Smuzhiyun
5244*4882a593Smuzhiyun hci_conn_hold(conn);
5245*4882a593Smuzhiyun } else {
5246*4882a593Smuzhiyun conn->state = BT_CONNECTED;
5247*4882a593Smuzhiyun hci_connect_cfm(conn, status);
5248*4882a593Smuzhiyun }
5249*4882a593Smuzhiyun
5250*4882a593Smuzhiyun params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
5251*4882a593Smuzhiyun conn->dst_type);
5252*4882a593Smuzhiyun if (params) {
5253*4882a593Smuzhiyun list_del_init(¶ms->action);
5254*4882a593Smuzhiyun if (params->conn) {
5255*4882a593Smuzhiyun hci_conn_drop(params->conn);
5256*4882a593Smuzhiyun hci_conn_put(params->conn);
5257*4882a593Smuzhiyun params->conn = NULL;
5258*4882a593Smuzhiyun }
5259*4882a593Smuzhiyun }
5260*4882a593Smuzhiyun
5261*4882a593Smuzhiyun unlock:
5262*4882a593Smuzhiyun hci_update_background_scan(hdev);
5263*4882a593Smuzhiyun hci_dev_unlock(hdev);
5264*4882a593Smuzhiyun }
5265*4882a593Smuzhiyun
hci_le_conn_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)5266*4882a593Smuzhiyun static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
5267*4882a593Smuzhiyun {
5268*4882a593Smuzhiyun struct hci_ev_le_conn_complete *ev = (void *) skb->data;
5269*4882a593Smuzhiyun
5270*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5271*4882a593Smuzhiyun
5272*4882a593Smuzhiyun le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5273*4882a593Smuzhiyun NULL, ev->role, le16_to_cpu(ev->handle),
5274*4882a593Smuzhiyun le16_to_cpu(ev->interval),
5275*4882a593Smuzhiyun le16_to_cpu(ev->latency),
5276*4882a593Smuzhiyun le16_to_cpu(ev->supervision_timeout));
5277*4882a593Smuzhiyun }
5278*4882a593Smuzhiyun
hci_le_enh_conn_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)5279*4882a593Smuzhiyun static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev,
5280*4882a593Smuzhiyun struct sk_buff *skb)
5281*4882a593Smuzhiyun {
5282*4882a593Smuzhiyun struct hci_ev_le_enh_conn_complete *ev = (void *) skb->data;
5283*4882a593Smuzhiyun
5284*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5285*4882a593Smuzhiyun
5286*4882a593Smuzhiyun le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5287*4882a593Smuzhiyun &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
5288*4882a593Smuzhiyun le16_to_cpu(ev->interval),
5289*4882a593Smuzhiyun le16_to_cpu(ev->latency),
5290*4882a593Smuzhiyun le16_to_cpu(ev->supervision_timeout));
5291*4882a593Smuzhiyun
5292*4882a593Smuzhiyun if (use_ll_privacy(hdev) &&
5293*4882a593Smuzhiyun hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
5294*4882a593Smuzhiyun hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
5295*4882a593Smuzhiyun hci_req_disable_address_resolution(hdev);
5296*4882a593Smuzhiyun }
5297*4882a593Smuzhiyun
hci_le_ext_adv_term_evt(struct hci_dev * hdev,struct sk_buff * skb)5298*4882a593Smuzhiyun static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
5299*4882a593Smuzhiyun {
5300*4882a593Smuzhiyun struct hci_evt_le_ext_adv_set_term *ev = (void *) skb->data;
5301*4882a593Smuzhiyun struct hci_conn *conn;
5302*4882a593Smuzhiyun
5303*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5304*4882a593Smuzhiyun
5305*4882a593Smuzhiyun if (ev->status) {
5306*4882a593Smuzhiyun struct adv_info *adv;
5307*4882a593Smuzhiyun
5308*4882a593Smuzhiyun adv = hci_find_adv_instance(hdev, ev->handle);
5309*4882a593Smuzhiyun if (!adv)
5310*4882a593Smuzhiyun return;
5311*4882a593Smuzhiyun
5312*4882a593Smuzhiyun /* Remove advertising as it has been terminated */
5313*4882a593Smuzhiyun hci_remove_adv_instance(hdev, ev->handle);
5314*4882a593Smuzhiyun mgmt_advertising_removed(NULL, hdev, ev->handle);
5315*4882a593Smuzhiyun
5316*4882a593Smuzhiyun return;
5317*4882a593Smuzhiyun }
5318*4882a593Smuzhiyun
5319*4882a593Smuzhiyun conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
5320*4882a593Smuzhiyun if (conn) {
5321*4882a593Smuzhiyun struct adv_info *adv_instance;
5322*4882a593Smuzhiyun
5323*4882a593Smuzhiyun if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
5324*4882a593Smuzhiyun bacmp(&conn->resp_addr, BDADDR_ANY))
5325*4882a593Smuzhiyun return;
5326*4882a593Smuzhiyun
5327*4882a593Smuzhiyun if (!hdev->cur_adv_instance) {
5328*4882a593Smuzhiyun bacpy(&conn->resp_addr, &hdev->random_addr);
5329*4882a593Smuzhiyun return;
5330*4882a593Smuzhiyun }
5331*4882a593Smuzhiyun
5332*4882a593Smuzhiyun adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
5333*4882a593Smuzhiyun if (adv_instance)
5334*4882a593Smuzhiyun bacpy(&conn->resp_addr, &adv_instance->random_addr);
5335*4882a593Smuzhiyun }
5336*4882a593Smuzhiyun }
5337*4882a593Smuzhiyun
hci_le_conn_update_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)5338*4882a593Smuzhiyun static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
5339*4882a593Smuzhiyun struct sk_buff *skb)
5340*4882a593Smuzhiyun {
5341*4882a593Smuzhiyun struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
5342*4882a593Smuzhiyun struct hci_conn *conn;
5343*4882a593Smuzhiyun
5344*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5345*4882a593Smuzhiyun
5346*4882a593Smuzhiyun if (ev->status)
5347*4882a593Smuzhiyun return;
5348*4882a593Smuzhiyun
5349*4882a593Smuzhiyun hci_dev_lock(hdev);
5350*4882a593Smuzhiyun
5351*4882a593Smuzhiyun conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5352*4882a593Smuzhiyun if (conn) {
5353*4882a593Smuzhiyun conn->le_conn_interval = le16_to_cpu(ev->interval);
5354*4882a593Smuzhiyun conn->le_conn_latency = le16_to_cpu(ev->latency);
5355*4882a593Smuzhiyun conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
5356*4882a593Smuzhiyun }
5357*4882a593Smuzhiyun
5358*4882a593Smuzhiyun hci_dev_unlock(hdev);
5359*4882a593Smuzhiyun }
5360*4882a593Smuzhiyun
5361*4882a593Smuzhiyun /* This function requires the caller holds hdev->lock */
check_pending_le_conn(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type,u8 adv_type,bdaddr_t * direct_rpa)5362*4882a593Smuzhiyun static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
5363*4882a593Smuzhiyun bdaddr_t *addr,
5364*4882a593Smuzhiyun u8 addr_type, u8 adv_type,
5365*4882a593Smuzhiyun bdaddr_t *direct_rpa)
5366*4882a593Smuzhiyun {
5367*4882a593Smuzhiyun struct hci_conn *conn;
5368*4882a593Smuzhiyun struct hci_conn_params *params;
5369*4882a593Smuzhiyun
5370*4882a593Smuzhiyun /* If the event is not connectable don't proceed further */
5371*4882a593Smuzhiyun if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
5372*4882a593Smuzhiyun return NULL;
5373*4882a593Smuzhiyun
5374*4882a593Smuzhiyun /* Ignore if the device is blocked */
5375*4882a593Smuzhiyun if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
5376*4882a593Smuzhiyun return NULL;
5377*4882a593Smuzhiyun
5378*4882a593Smuzhiyun /* Most controller will fail if we try to create new connections
5379*4882a593Smuzhiyun * while we have an existing one in slave role.
5380*4882a593Smuzhiyun */
5381*4882a593Smuzhiyun if (hdev->conn_hash.le_num_slave > 0 &&
5382*4882a593Smuzhiyun (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) ||
5383*4882a593Smuzhiyun !(hdev->le_states[3] & 0x10)))
5384*4882a593Smuzhiyun return NULL;
5385*4882a593Smuzhiyun
5386*4882a593Smuzhiyun /* If we're not connectable only connect devices that we have in
5387*4882a593Smuzhiyun * our pend_le_conns list.
5388*4882a593Smuzhiyun */
5389*4882a593Smuzhiyun params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
5390*4882a593Smuzhiyun addr_type);
5391*4882a593Smuzhiyun if (!params)
5392*4882a593Smuzhiyun return NULL;
5393*4882a593Smuzhiyun
5394*4882a593Smuzhiyun if (!params->explicit_connect) {
5395*4882a593Smuzhiyun switch (params->auto_connect) {
5396*4882a593Smuzhiyun case HCI_AUTO_CONN_DIRECT:
5397*4882a593Smuzhiyun /* Only devices advertising with ADV_DIRECT_IND are
5398*4882a593Smuzhiyun * triggering a connection attempt. This is allowing
5399*4882a593Smuzhiyun * incoming connections from slave devices.
5400*4882a593Smuzhiyun */
5401*4882a593Smuzhiyun if (adv_type != LE_ADV_DIRECT_IND)
5402*4882a593Smuzhiyun return NULL;
5403*4882a593Smuzhiyun break;
5404*4882a593Smuzhiyun case HCI_AUTO_CONN_ALWAYS:
5405*4882a593Smuzhiyun /* Devices advertising with ADV_IND or ADV_DIRECT_IND
5406*4882a593Smuzhiyun * are triggering a connection attempt. This means
5407*4882a593Smuzhiyun * that incoming connections from slave device are
5408*4882a593Smuzhiyun * accepted and also outgoing connections to slave
5409*4882a593Smuzhiyun * devices are established when found.
5410*4882a593Smuzhiyun */
5411*4882a593Smuzhiyun break;
5412*4882a593Smuzhiyun default:
5413*4882a593Smuzhiyun return NULL;
5414*4882a593Smuzhiyun }
5415*4882a593Smuzhiyun }
5416*4882a593Smuzhiyun
5417*4882a593Smuzhiyun conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
5418*4882a593Smuzhiyun hdev->def_le_autoconnect_timeout, HCI_ROLE_MASTER,
5419*4882a593Smuzhiyun direct_rpa);
5420*4882a593Smuzhiyun if (!IS_ERR(conn)) {
5421*4882a593Smuzhiyun /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
5422*4882a593Smuzhiyun * by higher layer that tried to connect, if no then
5423*4882a593Smuzhiyun * store the pointer since we don't really have any
5424*4882a593Smuzhiyun * other owner of the object besides the params that
5425*4882a593Smuzhiyun * triggered it. This way we can abort the connection if
5426*4882a593Smuzhiyun * the parameters get removed and keep the reference
5427*4882a593Smuzhiyun * count consistent once the connection is established.
5428*4882a593Smuzhiyun */
5429*4882a593Smuzhiyun
5430*4882a593Smuzhiyun if (!params->explicit_connect)
5431*4882a593Smuzhiyun params->conn = hci_conn_get(conn);
5432*4882a593Smuzhiyun
5433*4882a593Smuzhiyun return conn;
5434*4882a593Smuzhiyun }
5435*4882a593Smuzhiyun
5436*4882a593Smuzhiyun switch (PTR_ERR(conn)) {
5437*4882a593Smuzhiyun case -EBUSY:
5438*4882a593Smuzhiyun /* If hci_connect() returns -EBUSY it means there is already
5439*4882a593Smuzhiyun * an LE connection attempt going on. Since controllers don't
5440*4882a593Smuzhiyun * support more than one connection attempt at the time, we
5441*4882a593Smuzhiyun * don't consider this an error case.
5442*4882a593Smuzhiyun */
5443*4882a593Smuzhiyun break;
5444*4882a593Smuzhiyun default:
5445*4882a593Smuzhiyun BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
5446*4882a593Smuzhiyun return NULL;
5447*4882a593Smuzhiyun }
5448*4882a593Smuzhiyun
5449*4882a593Smuzhiyun return NULL;
5450*4882a593Smuzhiyun }
5451*4882a593Smuzhiyun
process_adv_report(struct hci_dev * hdev,u8 type,bdaddr_t * bdaddr,u8 bdaddr_type,bdaddr_t * direct_addr,u8 direct_addr_type,s8 rssi,u8 * data,u8 len,bool ext_adv)5452*4882a593Smuzhiyun static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
5453*4882a593Smuzhiyun u8 bdaddr_type, bdaddr_t *direct_addr,
5454*4882a593Smuzhiyun u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
5455*4882a593Smuzhiyun bool ext_adv)
5456*4882a593Smuzhiyun {
5457*4882a593Smuzhiyun struct discovery_state *d = &hdev->discovery;
5458*4882a593Smuzhiyun struct smp_irk *irk;
5459*4882a593Smuzhiyun struct hci_conn *conn;
5460*4882a593Smuzhiyun bool match;
5461*4882a593Smuzhiyun u32 flags;
5462*4882a593Smuzhiyun u8 *ptr;
5463*4882a593Smuzhiyun
5464*4882a593Smuzhiyun switch (type) {
5465*4882a593Smuzhiyun case LE_ADV_IND:
5466*4882a593Smuzhiyun case LE_ADV_DIRECT_IND:
5467*4882a593Smuzhiyun case LE_ADV_SCAN_IND:
5468*4882a593Smuzhiyun case LE_ADV_NONCONN_IND:
5469*4882a593Smuzhiyun case LE_ADV_SCAN_RSP:
5470*4882a593Smuzhiyun break;
5471*4882a593Smuzhiyun default:
5472*4882a593Smuzhiyun bt_dev_err_ratelimited(hdev, "unknown advertising packet "
5473*4882a593Smuzhiyun "type: 0x%02x", type);
5474*4882a593Smuzhiyun return;
5475*4882a593Smuzhiyun }
5476*4882a593Smuzhiyun
5477*4882a593Smuzhiyun if (!ext_adv && len > HCI_MAX_AD_LENGTH) {
5478*4882a593Smuzhiyun bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes");
5479*4882a593Smuzhiyun return;
5480*4882a593Smuzhiyun }
5481*4882a593Smuzhiyun
5482*4882a593Smuzhiyun /* Find the end of the data in case the report contains padded zero
5483*4882a593Smuzhiyun * bytes at the end causing an invalid length value.
5484*4882a593Smuzhiyun *
5485*4882a593Smuzhiyun * When data is NULL, len is 0 so there is no need for extra ptr
5486*4882a593Smuzhiyun * check as 'ptr < data + 0' is already false in such case.
5487*4882a593Smuzhiyun */
5488*4882a593Smuzhiyun for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
5489*4882a593Smuzhiyun if (ptr + 1 + *ptr > data + len)
5490*4882a593Smuzhiyun break;
5491*4882a593Smuzhiyun }
5492*4882a593Smuzhiyun
5493*4882a593Smuzhiyun /* Adjust for actual length. This handles the case when remote
5494*4882a593Smuzhiyun * device is advertising with incorrect data length.
5495*4882a593Smuzhiyun */
5496*4882a593Smuzhiyun len = ptr - data;
5497*4882a593Smuzhiyun
5498*4882a593Smuzhiyun /* If the direct address is present, then this report is from
5499*4882a593Smuzhiyun * a LE Direct Advertising Report event. In that case it is
5500*4882a593Smuzhiyun * important to see if the address is matching the local
5501*4882a593Smuzhiyun * controller address.
5502*4882a593Smuzhiyun */
5503*4882a593Smuzhiyun if (direct_addr) {
5504*4882a593Smuzhiyun /* Only resolvable random addresses are valid for these
5505*4882a593Smuzhiyun * kind of reports and others can be ignored.
5506*4882a593Smuzhiyun */
5507*4882a593Smuzhiyun if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
5508*4882a593Smuzhiyun return;
5509*4882a593Smuzhiyun
5510*4882a593Smuzhiyun /* If the controller is not using resolvable random
5511*4882a593Smuzhiyun * addresses, then this report can be ignored.
5512*4882a593Smuzhiyun */
5513*4882a593Smuzhiyun if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
5514*4882a593Smuzhiyun return;
5515*4882a593Smuzhiyun
5516*4882a593Smuzhiyun /* If the local IRK of the controller does not match
5517*4882a593Smuzhiyun * with the resolvable random address provided, then
5518*4882a593Smuzhiyun * this report can be ignored.
5519*4882a593Smuzhiyun */
5520*4882a593Smuzhiyun if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
5521*4882a593Smuzhiyun return;
5522*4882a593Smuzhiyun }
5523*4882a593Smuzhiyun
5524*4882a593Smuzhiyun /* Check if we need to convert to identity address */
5525*4882a593Smuzhiyun irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
5526*4882a593Smuzhiyun if (irk) {
5527*4882a593Smuzhiyun bdaddr = &irk->bdaddr;
5528*4882a593Smuzhiyun bdaddr_type = irk->addr_type;
5529*4882a593Smuzhiyun }
5530*4882a593Smuzhiyun
5531*4882a593Smuzhiyun /* Check if we have been requested to connect to this device.
5532*4882a593Smuzhiyun *
5533*4882a593Smuzhiyun * direct_addr is set only for directed advertising reports (it is NULL
5534*4882a593Smuzhiyun * for advertising reports) and is already verified to be RPA above.
5535*4882a593Smuzhiyun */
5536*4882a593Smuzhiyun conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
5537*4882a593Smuzhiyun direct_addr);
5538*4882a593Smuzhiyun if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) {
5539*4882a593Smuzhiyun /* Store report for later inclusion by
5540*4882a593Smuzhiyun * mgmt_device_connected
5541*4882a593Smuzhiyun */
5542*4882a593Smuzhiyun memcpy(conn->le_adv_data, data, len);
5543*4882a593Smuzhiyun conn->le_adv_data_len = len;
5544*4882a593Smuzhiyun }
5545*4882a593Smuzhiyun
5546*4882a593Smuzhiyun /* Passive scanning shouldn't trigger any device found events,
5547*4882a593Smuzhiyun * except for devices marked as CONN_REPORT for which we do send
5548*4882a593Smuzhiyun * device found events, or advertisement monitoring requested.
5549*4882a593Smuzhiyun */
5550*4882a593Smuzhiyun if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
5551*4882a593Smuzhiyun if (type == LE_ADV_DIRECT_IND)
5552*4882a593Smuzhiyun return;
5553*4882a593Smuzhiyun
5554*4882a593Smuzhiyun if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
5555*4882a593Smuzhiyun bdaddr, bdaddr_type) &&
5556*4882a593Smuzhiyun idr_is_empty(&hdev->adv_monitors_idr))
5557*4882a593Smuzhiyun return;
5558*4882a593Smuzhiyun
5559*4882a593Smuzhiyun if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
5560*4882a593Smuzhiyun flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5561*4882a593Smuzhiyun else
5562*4882a593Smuzhiyun flags = 0;
5563*4882a593Smuzhiyun mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5564*4882a593Smuzhiyun rssi, flags, data, len, NULL, 0);
5565*4882a593Smuzhiyun return;
5566*4882a593Smuzhiyun }
5567*4882a593Smuzhiyun
5568*4882a593Smuzhiyun /* When receiving non-connectable or scannable undirected
5569*4882a593Smuzhiyun * advertising reports, this means that the remote device is
5570*4882a593Smuzhiyun * not connectable and then clearly indicate this in the
5571*4882a593Smuzhiyun * device found event.
5572*4882a593Smuzhiyun *
5573*4882a593Smuzhiyun * When receiving a scan response, then there is no way to
5574*4882a593Smuzhiyun * know if the remote device is connectable or not. However
5575*4882a593Smuzhiyun * since scan responses are merged with a previously seen
5576*4882a593Smuzhiyun * advertising report, the flags field from that report
5577*4882a593Smuzhiyun * will be used.
5578*4882a593Smuzhiyun *
5579*4882a593Smuzhiyun * In the really unlikely case that a controller get confused
5580*4882a593Smuzhiyun * and just sends a scan response event, then it is marked as
5581*4882a593Smuzhiyun * not connectable as well.
5582*4882a593Smuzhiyun */
5583*4882a593Smuzhiyun if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
5584*4882a593Smuzhiyun type == LE_ADV_SCAN_RSP)
5585*4882a593Smuzhiyun flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5586*4882a593Smuzhiyun else
5587*4882a593Smuzhiyun flags = 0;
5588*4882a593Smuzhiyun
5589*4882a593Smuzhiyun /* If there's nothing pending either store the data from this
5590*4882a593Smuzhiyun * event or send an immediate device found event if the data
5591*4882a593Smuzhiyun * should not be stored for later.
5592*4882a593Smuzhiyun */
5593*4882a593Smuzhiyun if (!ext_adv && !has_pending_adv_report(hdev)) {
5594*4882a593Smuzhiyun /* If the report will trigger a SCAN_REQ store it for
5595*4882a593Smuzhiyun * later merging.
5596*4882a593Smuzhiyun */
5597*4882a593Smuzhiyun if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
5598*4882a593Smuzhiyun store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5599*4882a593Smuzhiyun rssi, flags, data, len);
5600*4882a593Smuzhiyun return;
5601*4882a593Smuzhiyun }
5602*4882a593Smuzhiyun
5603*4882a593Smuzhiyun mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5604*4882a593Smuzhiyun rssi, flags, data, len, NULL, 0);
5605*4882a593Smuzhiyun return;
5606*4882a593Smuzhiyun }
5607*4882a593Smuzhiyun
5608*4882a593Smuzhiyun /* Check if the pending report is for the same device as the new one */
5609*4882a593Smuzhiyun match = (!bacmp(bdaddr, &d->last_adv_addr) &&
5610*4882a593Smuzhiyun bdaddr_type == d->last_adv_addr_type);
5611*4882a593Smuzhiyun
5612*4882a593Smuzhiyun /* If the pending data doesn't match this report or this isn't a
5613*4882a593Smuzhiyun * scan response (e.g. we got a duplicate ADV_IND) then force
5614*4882a593Smuzhiyun * sending of the pending data.
5615*4882a593Smuzhiyun */
5616*4882a593Smuzhiyun if (type != LE_ADV_SCAN_RSP || !match) {
5617*4882a593Smuzhiyun /* Send out whatever is in the cache, but skip duplicates */
5618*4882a593Smuzhiyun if (!match)
5619*4882a593Smuzhiyun mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5620*4882a593Smuzhiyun d->last_adv_addr_type, NULL,
5621*4882a593Smuzhiyun d->last_adv_rssi, d->last_adv_flags,
5622*4882a593Smuzhiyun d->last_adv_data,
5623*4882a593Smuzhiyun d->last_adv_data_len, NULL, 0);
5624*4882a593Smuzhiyun
5625*4882a593Smuzhiyun /* If the new report will trigger a SCAN_REQ store it for
5626*4882a593Smuzhiyun * later merging.
5627*4882a593Smuzhiyun */
5628*4882a593Smuzhiyun if (!ext_adv && (type == LE_ADV_IND ||
5629*4882a593Smuzhiyun type == LE_ADV_SCAN_IND)) {
5630*4882a593Smuzhiyun store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5631*4882a593Smuzhiyun rssi, flags, data, len);
5632*4882a593Smuzhiyun return;
5633*4882a593Smuzhiyun }
5634*4882a593Smuzhiyun
5635*4882a593Smuzhiyun /* The advertising reports cannot be merged, so clear
5636*4882a593Smuzhiyun * the pending report and send out a device found event.
5637*4882a593Smuzhiyun */
5638*4882a593Smuzhiyun clear_pending_adv_report(hdev);
5639*4882a593Smuzhiyun mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5640*4882a593Smuzhiyun rssi, flags, data, len, NULL, 0);
5641*4882a593Smuzhiyun return;
5642*4882a593Smuzhiyun }
5643*4882a593Smuzhiyun
5644*4882a593Smuzhiyun /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
5645*4882a593Smuzhiyun * the new event is a SCAN_RSP. We can therefore proceed with
5646*4882a593Smuzhiyun * sending a merged device found event.
5647*4882a593Smuzhiyun */
5648*4882a593Smuzhiyun mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5649*4882a593Smuzhiyun d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
5650*4882a593Smuzhiyun d->last_adv_data, d->last_adv_data_len, data, len);
5651*4882a593Smuzhiyun clear_pending_adv_report(hdev);
5652*4882a593Smuzhiyun }
5653*4882a593Smuzhiyun
hci_le_adv_report_evt(struct hci_dev * hdev,struct sk_buff * skb)5654*4882a593Smuzhiyun static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5655*4882a593Smuzhiyun {
5656*4882a593Smuzhiyun u8 num_reports = skb->data[0];
5657*4882a593Smuzhiyun void *ptr = &skb->data[1];
5658*4882a593Smuzhiyun
5659*4882a593Smuzhiyun hci_dev_lock(hdev);
5660*4882a593Smuzhiyun
5661*4882a593Smuzhiyun while (num_reports--) {
5662*4882a593Smuzhiyun struct hci_ev_le_advertising_info *ev = ptr;
5663*4882a593Smuzhiyun s8 rssi;
5664*4882a593Smuzhiyun
5665*4882a593Smuzhiyun if (ptr > (void *)skb_tail_pointer(skb) - sizeof(*ev)) {
5666*4882a593Smuzhiyun bt_dev_err(hdev, "Malicious advertising data.");
5667*4882a593Smuzhiyun break;
5668*4882a593Smuzhiyun }
5669*4882a593Smuzhiyun
5670*4882a593Smuzhiyun if (ev->length <= HCI_MAX_AD_LENGTH &&
5671*4882a593Smuzhiyun ev->data + ev->length <= skb_tail_pointer(skb)) {
5672*4882a593Smuzhiyun rssi = ev->data[ev->length];
5673*4882a593Smuzhiyun process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5674*4882a593Smuzhiyun ev->bdaddr_type, NULL, 0, rssi,
5675*4882a593Smuzhiyun ev->data, ev->length, false);
5676*4882a593Smuzhiyun } else {
5677*4882a593Smuzhiyun bt_dev_err(hdev, "Dropping invalid advertising data");
5678*4882a593Smuzhiyun }
5679*4882a593Smuzhiyun
5680*4882a593Smuzhiyun ptr += sizeof(*ev) + ev->length + 1;
5681*4882a593Smuzhiyun }
5682*4882a593Smuzhiyun
5683*4882a593Smuzhiyun hci_dev_unlock(hdev);
5684*4882a593Smuzhiyun }
5685*4882a593Smuzhiyun
ext_evt_type_to_legacy(struct hci_dev * hdev,u16 evt_type)5686*4882a593Smuzhiyun static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
5687*4882a593Smuzhiyun {
5688*4882a593Smuzhiyun if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
5689*4882a593Smuzhiyun switch (evt_type) {
5690*4882a593Smuzhiyun case LE_LEGACY_ADV_IND:
5691*4882a593Smuzhiyun return LE_ADV_IND;
5692*4882a593Smuzhiyun case LE_LEGACY_ADV_DIRECT_IND:
5693*4882a593Smuzhiyun return LE_ADV_DIRECT_IND;
5694*4882a593Smuzhiyun case LE_LEGACY_ADV_SCAN_IND:
5695*4882a593Smuzhiyun return LE_ADV_SCAN_IND;
5696*4882a593Smuzhiyun case LE_LEGACY_NONCONN_IND:
5697*4882a593Smuzhiyun return LE_ADV_NONCONN_IND;
5698*4882a593Smuzhiyun case LE_LEGACY_SCAN_RSP_ADV:
5699*4882a593Smuzhiyun case LE_LEGACY_SCAN_RSP_ADV_SCAN:
5700*4882a593Smuzhiyun return LE_ADV_SCAN_RSP;
5701*4882a593Smuzhiyun }
5702*4882a593Smuzhiyun
5703*4882a593Smuzhiyun goto invalid;
5704*4882a593Smuzhiyun }
5705*4882a593Smuzhiyun
5706*4882a593Smuzhiyun if (evt_type & LE_EXT_ADV_CONN_IND) {
5707*4882a593Smuzhiyun if (evt_type & LE_EXT_ADV_DIRECT_IND)
5708*4882a593Smuzhiyun return LE_ADV_DIRECT_IND;
5709*4882a593Smuzhiyun
5710*4882a593Smuzhiyun return LE_ADV_IND;
5711*4882a593Smuzhiyun }
5712*4882a593Smuzhiyun
5713*4882a593Smuzhiyun if (evt_type & LE_EXT_ADV_SCAN_RSP)
5714*4882a593Smuzhiyun return LE_ADV_SCAN_RSP;
5715*4882a593Smuzhiyun
5716*4882a593Smuzhiyun if (evt_type & LE_EXT_ADV_SCAN_IND)
5717*4882a593Smuzhiyun return LE_ADV_SCAN_IND;
5718*4882a593Smuzhiyun
5719*4882a593Smuzhiyun if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
5720*4882a593Smuzhiyun evt_type & LE_EXT_ADV_DIRECT_IND)
5721*4882a593Smuzhiyun return LE_ADV_NONCONN_IND;
5722*4882a593Smuzhiyun
5723*4882a593Smuzhiyun invalid:
5724*4882a593Smuzhiyun bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
5725*4882a593Smuzhiyun evt_type);
5726*4882a593Smuzhiyun
5727*4882a593Smuzhiyun return LE_ADV_INVALID;
5728*4882a593Smuzhiyun }
5729*4882a593Smuzhiyun
hci_le_ext_adv_report_evt(struct hci_dev * hdev,struct sk_buff * skb)5730*4882a593Smuzhiyun static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5731*4882a593Smuzhiyun {
5732*4882a593Smuzhiyun u8 num_reports = skb->data[0];
5733*4882a593Smuzhiyun void *ptr = &skb->data[1];
5734*4882a593Smuzhiyun
5735*4882a593Smuzhiyun hci_dev_lock(hdev);
5736*4882a593Smuzhiyun
5737*4882a593Smuzhiyun while (num_reports--) {
5738*4882a593Smuzhiyun struct hci_ev_le_ext_adv_report *ev = ptr;
5739*4882a593Smuzhiyun u8 legacy_evt_type;
5740*4882a593Smuzhiyun u16 evt_type;
5741*4882a593Smuzhiyun
5742*4882a593Smuzhiyun evt_type = __le16_to_cpu(ev->evt_type);
5743*4882a593Smuzhiyun legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
5744*4882a593Smuzhiyun if (legacy_evt_type != LE_ADV_INVALID) {
5745*4882a593Smuzhiyun process_adv_report(hdev, legacy_evt_type, &ev->bdaddr,
5746*4882a593Smuzhiyun ev->bdaddr_type, NULL, 0, ev->rssi,
5747*4882a593Smuzhiyun ev->data, ev->length,
5748*4882a593Smuzhiyun !(evt_type & LE_EXT_ADV_LEGACY_PDU));
5749*4882a593Smuzhiyun }
5750*4882a593Smuzhiyun
5751*4882a593Smuzhiyun ptr += sizeof(*ev) + ev->length;
5752*4882a593Smuzhiyun }
5753*4882a593Smuzhiyun
5754*4882a593Smuzhiyun hci_dev_unlock(hdev);
5755*4882a593Smuzhiyun }
5756*4882a593Smuzhiyun
hci_le_remote_feat_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)5757*4882a593Smuzhiyun static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
5758*4882a593Smuzhiyun struct sk_buff *skb)
5759*4882a593Smuzhiyun {
5760*4882a593Smuzhiyun struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
5761*4882a593Smuzhiyun struct hci_conn *conn;
5762*4882a593Smuzhiyun
5763*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5764*4882a593Smuzhiyun
5765*4882a593Smuzhiyun hci_dev_lock(hdev);
5766*4882a593Smuzhiyun
5767*4882a593Smuzhiyun conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5768*4882a593Smuzhiyun if (conn) {
5769*4882a593Smuzhiyun if (!ev->status)
5770*4882a593Smuzhiyun memcpy(conn->features[0], ev->features, 8);
5771*4882a593Smuzhiyun
5772*4882a593Smuzhiyun if (conn->state == BT_CONFIG) {
5773*4882a593Smuzhiyun __u8 status;
5774*4882a593Smuzhiyun
5775*4882a593Smuzhiyun /* If the local controller supports slave-initiated
5776*4882a593Smuzhiyun * features exchange, but the remote controller does
5777*4882a593Smuzhiyun * not, then it is possible that the error code 0x1a
5778*4882a593Smuzhiyun * for unsupported remote feature gets returned.
5779*4882a593Smuzhiyun *
5780*4882a593Smuzhiyun * In this specific case, allow the connection to
5781*4882a593Smuzhiyun * transition into connected state and mark it as
5782*4882a593Smuzhiyun * successful.
5783*4882a593Smuzhiyun */
5784*4882a593Smuzhiyun if ((hdev->le_features[0] & HCI_LE_SLAVE_FEATURES) &&
5785*4882a593Smuzhiyun !conn->out && ev->status == 0x1a)
5786*4882a593Smuzhiyun status = 0x00;
5787*4882a593Smuzhiyun else
5788*4882a593Smuzhiyun status = ev->status;
5789*4882a593Smuzhiyun
5790*4882a593Smuzhiyun conn->state = BT_CONNECTED;
5791*4882a593Smuzhiyun hci_connect_cfm(conn, status);
5792*4882a593Smuzhiyun hci_conn_drop(conn);
5793*4882a593Smuzhiyun }
5794*4882a593Smuzhiyun }
5795*4882a593Smuzhiyun
5796*4882a593Smuzhiyun hci_dev_unlock(hdev);
5797*4882a593Smuzhiyun }
5798*4882a593Smuzhiyun
hci_le_ltk_request_evt(struct hci_dev * hdev,struct sk_buff * skb)5799*4882a593Smuzhiyun static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
5800*4882a593Smuzhiyun {
5801*4882a593Smuzhiyun struct hci_ev_le_ltk_req *ev = (void *) skb->data;
5802*4882a593Smuzhiyun struct hci_cp_le_ltk_reply cp;
5803*4882a593Smuzhiyun struct hci_cp_le_ltk_neg_reply neg;
5804*4882a593Smuzhiyun struct hci_conn *conn;
5805*4882a593Smuzhiyun struct smp_ltk *ltk;
5806*4882a593Smuzhiyun
5807*4882a593Smuzhiyun BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
5808*4882a593Smuzhiyun
5809*4882a593Smuzhiyun hci_dev_lock(hdev);
5810*4882a593Smuzhiyun
5811*4882a593Smuzhiyun conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5812*4882a593Smuzhiyun if (conn == NULL)
5813*4882a593Smuzhiyun goto not_found;
5814*4882a593Smuzhiyun
5815*4882a593Smuzhiyun ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
5816*4882a593Smuzhiyun if (!ltk)
5817*4882a593Smuzhiyun goto not_found;
5818*4882a593Smuzhiyun
5819*4882a593Smuzhiyun if (smp_ltk_is_sc(ltk)) {
5820*4882a593Smuzhiyun /* With SC both EDiv and Rand are set to zero */
5821*4882a593Smuzhiyun if (ev->ediv || ev->rand)
5822*4882a593Smuzhiyun goto not_found;
5823*4882a593Smuzhiyun } else {
5824*4882a593Smuzhiyun /* For non-SC keys check that EDiv and Rand match */
5825*4882a593Smuzhiyun if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
5826*4882a593Smuzhiyun goto not_found;
5827*4882a593Smuzhiyun }
5828*4882a593Smuzhiyun
5829*4882a593Smuzhiyun memcpy(cp.ltk, ltk->val, ltk->enc_size);
5830*4882a593Smuzhiyun memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
5831*4882a593Smuzhiyun cp.handle = cpu_to_le16(conn->handle);
5832*4882a593Smuzhiyun
5833*4882a593Smuzhiyun conn->pending_sec_level = smp_ltk_sec_level(ltk);
5834*4882a593Smuzhiyun
5835*4882a593Smuzhiyun conn->enc_key_size = ltk->enc_size;
5836*4882a593Smuzhiyun
5837*4882a593Smuzhiyun hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
5838*4882a593Smuzhiyun
5839*4882a593Smuzhiyun /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
5840*4882a593Smuzhiyun * temporary key used to encrypt a connection following
5841*4882a593Smuzhiyun * pairing. It is used during the Encrypted Session Setup to
5842*4882a593Smuzhiyun * distribute the keys. Later, security can be re-established
5843*4882a593Smuzhiyun * using a distributed LTK.
5844*4882a593Smuzhiyun */
5845*4882a593Smuzhiyun if (ltk->type == SMP_STK) {
5846*4882a593Smuzhiyun set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5847*4882a593Smuzhiyun list_del_rcu(<k->list);
5848*4882a593Smuzhiyun kfree_rcu(ltk, rcu);
5849*4882a593Smuzhiyun } else {
5850*4882a593Smuzhiyun clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5851*4882a593Smuzhiyun }
5852*4882a593Smuzhiyun
5853*4882a593Smuzhiyun hci_dev_unlock(hdev);
5854*4882a593Smuzhiyun
5855*4882a593Smuzhiyun return;
5856*4882a593Smuzhiyun
5857*4882a593Smuzhiyun not_found:
5858*4882a593Smuzhiyun neg.handle = ev->handle;
5859*4882a593Smuzhiyun hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
5860*4882a593Smuzhiyun hci_dev_unlock(hdev);
5861*4882a593Smuzhiyun }
5862*4882a593Smuzhiyun
send_conn_param_neg_reply(struct hci_dev * hdev,u16 handle,u8 reason)5863*4882a593Smuzhiyun static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
5864*4882a593Smuzhiyun u8 reason)
5865*4882a593Smuzhiyun {
5866*4882a593Smuzhiyun struct hci_cp_le_conn_param_req_neg_reply cp;
5867*4882a593Smuzhiyun
5868*4882a593Smuzhiyun cp.handle = cpu_to_le16(handle);
5869*4882a593Smuzhiyun cp.reason = reason;
5870*4882a593Smuzhiyun
5871*4882a593Smuzhiyun hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
5872*4882a593Smuzhiyun &cp);
5873*4882a593Smuzhiyun }
5874*4882a593Smuzhiyun
hci_le_remote_conn_param_req_evt(struct hci_dev * hdev,struct sk_buff * skb)5875*4882a593Smuzhiyun static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
5876*4882a593Smuzhiyun struct sk_buff *skb)
5877*4882a593Smuzhiyun {
5878*4882a593Smuzhiyun struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
5879*4882a593Smuzhiyun struct hci_cp_le_conn_param_req_reply cp;
5880*4882a593Smuzhiyun struct hci_conn *hcon;
5881*4882a593Smuzhiyun u16 handle, min, max, latency, timeout;
5882*4882a593Smuzhiyun
5883*4882a593Smuzhiyun handle = le16_to_cpu(ev->handle);
5884*4882a593Smuzhiyun min = le16_to_cpu(ev->interval_min);
5885*4882a593Smuzhiyun max = le16_to_cpu(ev->interval_max);
5886*4882a593Smuzhiyun latency = le16_to_cpu(ev->latency);
5887*4882a593Smuzhiyun timeout = le16_to_cpu(ev->timeout);
5888*4882a593Smuzhiyun
5889*4882a593Smuzhiyun hcon = hci_conn_hash_lookup_handle(hdev, handle);
5890*4882a593Smuzhiyun if (!hcon || hcon->state != BT_CONNECTED)
5891*4882a593Smuzhiyun return send_conn_param_neg_reply(hdev, handle,
5892*4882a593Smuzhiyun HCI_ERROR_UNKNOWN_CONN_ID);
5893*4882a593Smuzhiyun
5894*4882a593Smuzhiyun if (hci_check_conn_params(min, max, latency, timeout))
5895*4882a593Smuzhiyun return send_conn_param_neg_reply(hdev, handle,
5896*4882a593Smuzhiyun HCI_ERROR_INVALID_LL_PARAMS);
5897*4882a593Smuzhiyun
5898*4882a593Smuzhiyun if (hcon->role == HCI_ROLE_MASTER) {
5899*4882a593Smuzhiyun struct hci_conn_params *params;
5900*4882a593Smuzhiyun u8 store_hint;
5901*4882a593Smuzhiyun
5902*4882a593Smuzhiyun hci_dev_lock(hdev);
5903*4882a593Smuzhiyun
5904*4882a593Smuzhiyun params = hci_conn_params_lookup(hdev, &hcon->dst,
5905*4882a593Smuzhiyun hcon->dst_type);
5906*4882a593Smuzhiyun if (params) {
5907*4882a593Smuzhiyun params->conn_min_interval = min;
5908*4882a593Smuzhiyun params->conn_max_interval = max;
5909*4882a593Smuzhiyun params->conn_latency = latency;
5910*4882a593Smuzhiyun params->supervision_timeout = timeout;
5911*4882a593Smuzhiyun store_hint = 0x01;
5912*4882a593Smuzhiyun } else{
5913*4882a593Smuzhiyun store_hint = 0x00;
5914*4882a593Smuzhiyun }
5915*4882a593Smuzhiyun
5916*4882a593Smuzhiyun hci_dev_unlock(hdev);
5917*4882a593Smuzhiyun
5918*4882a593Smuzhiyun mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
5919*4882a593Smuzhiyun store_hint, min, max, latency, timeout);
5920*4882a593Smuzhiyun }
5921*4882a593Smuzhiyun
5922*4882a593Smuzhiyun cp.handle = ev->handle;
5923*4882a593Smuzhiyun cp.interval_min = ev->interval_min;
5924*4882a593Smuzhiyun cp.interval_max = ev->interval_max;
5925*4882a593Smuzhiyun cp.latency = ev->latency;
5926*4882a593Smuzhiyun cp.timeout = ev->timeout;
5927*4882a593Smuzhiyun cp.min_ce_len = 0;
5928*4882a593Smuzhiyun cp.max_ce_len = 0;
5929*4882a593Smuzhiyun
5930*4882a593Smuzhiyun hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
5931*4882a593Smuzhiyun }
5932*4882a593Smuzhiyun
hci_le_direct_adv_report_evt(struct hci_dev * hdev,struct sk_buff * skb)5933*4882a593Smuzhiyun static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
5934*4882a593Smuzhiyun struct sk_buff *skb)
5935*4882a593Smuzhiyun {
5936*4882a593Smuzhiyun u8 num_reports = skb->data[0];
5937*4882a593Smuzhiyun struct hci_ev_le_direct_adv_info *ev = (void *)&skb->data[1];
5938*4882a593Smuzhiyun
5939*4882a593Smuzhiyun if (!num_reports || skb->len < num_reports * sizeof(*ev) + 1)
5940*4882a593Smuzhiyun return;
5941*4882a593Smuzhiyun
5942*4882a593Smuzhiyun hci_dev_lock(hdev);
5943*4882a593Smuzhiyun
5944*4882a593Smuzhiyun for (; num_reports; num_reports--, ev++)
5945*4882a593Smuzhiyun process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5946*4882a593Smuzhiyun ev->bdaddr_type, &ev->direct_addr,
5947*4882a593Smuzhiyun ev->direct_addr_type, ev->rssi, NULL, 0,
5948*4882a593Smuzhiyun false);
5949*4882a593Smuzhiyun
5950*4882a593Smuzhiyun hci_dev_unlock(hdev);
5951*4882a593Smuzhiyun }
5952*4882a593Smuzhiyun
hci_le_phy_update_evt(struct hci_dev * hdev,struct sk_buff * skb)5953*4882a593Smuzhiyun static void hci_le_phy_update_evt(struct hci_dev *hdev, struct sk_buff *skb)
5954*4882a593Smuzhiyun {
5955*4882a593Smuzhiyun struct hci_ev_le_phy_update_complete *ev = (void *) skb->data;
5956*4882a593Smuzhiyun struct hci_conn *conn;
5957*4882a593Smuzhiyun
5958*4882a593Smuzhiyun BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5959*4882a593Smuzhiyun
5960*4882a593Smuzhiyun if (ev->status)
5961*4882a593Smuzhiyun return;
5962*4882a593Smuzhiyun
5963*4882a593Smuzhiyun hci_dev_lock(hdev);
5964*4882a593Smuzhiyun
5965*4882a593Smuzhiyun conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5966*4882a593Smuzhiyun if (!conn)
5967*4882a593Smuzhiyun goto unlock;
5968*4882a593Smuzhiyun
5969*4882a593Smuzhiyun conn->le_tx_phy = ev->tx_phy;
5970*4882a593Smuzhiyun conn->le_rx_phy = ev->rx_phy;
5971*4882a593Smuzhiyun
5972*4882a593Smuzhiyun unlock:
5973*4882a593Smuzhiyun hci_dev_unlock(hdev);
5974*4882a593Smuzhiyun }
5975*4882a593Smuzhiyun
hci_le_meta_evt(struct hci_dev * hdev,struct sk_buff * skb)5976*4882a593Smuzhiyun static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
5977*4882a593Smuzhiyun {
5978*4882a593Smuzhiyun struct hci_ev_le_meta *le_ev = (void *) skb->data;
5979*4882a593Smuzhiyun
5980*4882a593Smuzhiyun skb_pull(skb, sizeof(*le_ev));
5981*4882a593Smuzhiyun
5982*4882a593Smuzhiyun switch (le_ev->subevent) {
5983*4882a593Smuzhiyun case HCI_EV_LE_CONN_COMPLETE:
5984*4882a593Smuzhiyun hci_le_conn_complete_evt(hdev, skb);
5985*4882a593Smuzhiyun break;
5986*4882a593Smuzhiyun
5987*4882a593Smuzhiyun case HCI_EV_LE_CONN_UPDATE_COMPLETE:
5988*4882a593Smuzhiyun hci_le_conn_update_complete_evt(hdev, skb);
5989*4882a593Smuzhiyun break;
5990*4882a593Smuzhiyun
5991*4882a593Smuzhiyun case HCI_EV_LE_ADVERTISING_REPORT:
5992*4882a593Smuzhiyun hci_le_adv_report_evt(hdev, skb);
5993*4882a593Smuzhiyun break;
5994*4882a593Smuzhiyun
5995*4882a593Smuzhiyun case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
5996*4882a593Smuzhiyun hci_le_remote_feat_complete_evt(hdev, skb);
5997*4882a593Smuzhiyun break;
5998*4882a593Smuzhiyun
5999*4882a593Smuzhiyun case HCI_EV_LE_LTK_REQ:
6000*4882a593Smuzhiyun hci_le_ltk_request_evt(hdev, skb);
6001*4882a593Smuzhiyun break;
6002*4882a593Smuzhiyun
6003*4882a593Smuzhiyun case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
6004*4882a593Smuzhiyun hci_le_remote_conn_param_req_evt(hdev, skb);
6005*4882a593Smuzhiyun break;
6006*4882a593Smuzhiyun
6007*4882a593Smuzhiyun case HCI_EV_LE_DIRECT_ADV_REPORT:
6008*4882a593Smuzhiyun hci_le_direct_adv_report_evt(hdev, skb);
6009*4882a593Smuzhiyun break;
6010*4882a593Smuzhiyun
6011*4882a593Smuzhiyun case HCI_EV_LE_PHY_UPDATE_COMPLETE:
6012*4882a593Smuzhiyun hci_le_phy_update_evt(hdev, skb);
6013*4882a593Smuzhiyun break;
6014*4882a593Smuzhiyun
6015*4882a593Smuzhiyun case HCI_EV_LE_EXT_ADV_REPORT:
6016*4882a593Smuzhiyun hci_le_ext_adv_report_evt(hdev, skb);
6017*4882a593Smuzhiyun break;
6018*4882a593Smuzhiyun
6019*4882a593Smuzhiyun case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
6020*4882a593Smuzhiyun hci_le_enh_conn_complete_evt(hdev, skb);
6021*4882a593Smuzhiyun break;
6022*4882a593Smuzhiyun
6023*4882a593Smuzhiyun case HCI_EV_LE_EXT_ADV_SET_TERM:
6024*4882a593Smuzhiyun hci_le_ext_adv_term_evt(hdev, skb);
6025*4882a593Smuzhiyun break;
6026*4882a593Smuzhiyun
6027*4882a593Smuzhiyun default:
6028*4882a593Smuzhiyun break;
6029*4882a593Smuzhiyun }
6030*4882a593Smuzhiyun }
6031*4882a593Smuzhiyun
hci_get_cmd_complete(struct hci_dev * hdev,u16 opcode,u8 event,struct sk_buff * skb)6032*4882a593Smuzhiyun static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
6033*4882a593Smuzhiyun u8 event, struct sk_buff *skb)
6034*4882a593Smuzhiyun {
6035*4882a593Smuzhiyun struct hci_ev_cmd_complete *ev;
6036*4882a593Smuzhiyun struct hci_event_hdr *hdr;
6037*4882a593Smuzhiyun
6038*4882a593Smuzhiyun if (!skb)
6039*4882a593Smuzhiyun return false;
6040*4882a593Smuzhiyun
6041*4882a593Smuzhiyun if (skb->len < sizeof(*hdr)) {
6042*4882a593Smuzhiyun bt_dev_err(hdev, "too short HCI event");
6043*4882a593Smuzhiyun return false;
6044*4882a593Smuzhiyun }
6045*4882a593Smuzhiyun
6046*4882a593Smuzhiyun hdr = (void *) skb->data;
6047*4882a593Smuzhiyun skb_pull(skb, HCI_EVENT_HDR_SIZE);
6048*4882a593Smuzhiyun
6049*4882a593Smuzhiyun if (event) {
6050*4882a593Smuzhiyun if (hdr->evt != event)
6051*4882a593Smuzhiyun return false;
6052*4882a593Smuzhiyun return true;
6053*4882a593Smuzhiyun }
6054*4882a593Smuzhiyun
6055*4882a593Smuzhiyun /* Check if request ended in Command Status - no way to retreive
6056*4882a593Smuzhiyun * any extra parameters in this case.
6057*4882a593Smuzhiyun */
6058*4882a593Smuzhiyun if (hdr->evt == HCI_EV_CMD_STATUS)
6059*4882a593Smuzhiyun return false;
6060*4882a593Smuzhiyun
6061*4882a593Smuzhiyun if (hdr->evt != HCI_EV_CMD_COMPLETE) {
6062*4882a593Smuzhiyun bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
6063*4882a593Smuzhiyun hdr->evt);
6064*4882a593Smuzhiyun return false;
6065*4882a593Smuzhiyun }
6066*4882a593Smuzhiyun
6067*4882a593Smuzhiyun if (skb->len < sizeof(*ev)) {
6068*4882a593Smuzhiyun bt_dev_err(hdev, "too short cmd_complete event");
6069*4882a593Smuzhiyun return false;
6070*4882a593Smuzhiyun }
6071*4882a593Smuzhiyun
6072*4882a593Smuzhiyun ev = (void *) skb->data;
6073*4882a593Smuzhiyun skb_pull(skb, sizeof(*ev));
6074*4882a593Smuzhiyun
6075*4882a593Smuzhiyun if (opcode != __le16_to_cpu(ev->opcode)) {
6076*4882a593Smuzhiyun BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
6077*4882a593Smuzhiyun __le16_to_cpu(ev->opcode));
6078*4882a593Smuzhiyun return false;
6079*4882a593Smuzhiyun }
6080*4882a593Smuzhiyun
6081*4882a593Smuzhiyun return true;
6082*4882a593Smuzhiyun }
6083*4882a593Smuzhiyun
hci_store_wake_reason(struct hci_dev * hdev,u8 event,struct sk_buff * skb)6084*4882a593Smuzhiyun static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
6085*4882a593Smuzhiyun struct sk_buff *skb)
6086*4882a593Smuzhiyun {
6087*4882a593Smuzhiyun struct hci_ev_le_advertising_info *adv;
6088*4882a593Smuzhiyun struct hci_ev_le_direct_adv_info *direct_adv;
6089*4882a593Smuzhiyun struct hci_ev_le_ext_adv_report *ext_adv;
6090*4882a593Smuzhiyun const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
6091*4882a593Smuzhiyun const struct hci_ev_conn_request *conn_request = (void *)skb->data;
6092*4882a593Smuzhiyun
6093*4882a593Smuzhiyun hci_dev_lock(hdev);
6094*4882a593Smuzhiyun
6095*4882a593Smuzhiyun /* If we are currently suspended and this is the first BT event seen,
6096*4882a593Smuzhiyun * save the wake reason associated with the event.
6097*4882a593Smuzhiyun */
6098*4882a593Smuzhiyun if (!hdev->suspended || hdev->wake_reason)
6099*4882a593Smuzhiyun goto unlock;
6100*4882a593Smuzhiyun
6101*4882a593Smuzhiyun /* Default to remote wake. Values for wake_reason are documented in the
6102*4882a593Smuzhiyun * Bluez mgmt api docs.
6103*4882a593Smuzhiyun */
6104*4882a593Smuzhiyun hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
6105*4882a593Smuzhiyun
6106*4882a593Smuzhiyun /* Once configured for remote wakeup, we should only wake up for
6107*4882a593Smuzhiyun * reconnections. It's useful to see which device is waking us up so
6108*4882a593Smuzhiyun * keep track of the bdaddr of the connection event that woke us up.
6109*4882a593Smuzhiyun */
6110*4882a593Smuzhiyun if (event == HCI_EV_CONN_REQUEST) {
6111*4882a593Smuzhiyun bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
6112*4882a593Smuzhiyun hdev->wake_addr_type = BDADDR_BREDR;
6113*4882a593Smuzhiyun } else if (event == HCI_EV_CONN_COMPLETE) {
6114*4882a593Smuzhiyun bacpy(&hdev->wake_addr, &conn_request->bdaddr);
6115*4882a593Smuzhiyun hdev->wake_addr_type = BDADDR_BREDR;
6116*4882a593Smuzhiyun } else if (event == HCI_EV_LE_META) {
6117*4882a593Smuzhiyun struct hci_ev_le_meta *le_ev = (void *)skb->data;
6118*4882a593Smuzhiyun u8 subevent = le_ev->subevent;
6119*4882a593Smuzhiyun u8 *ptr = &skb->data[sizeof(*le_ev)];
6120*4882a593Smuzhiyun u8 num_reports = *ptr;
6121*4882a593Smuzhiyun
6122*4882a593Smuzhiyun if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
6123*4882a593Smuzhiyun subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
6124*4882a593Smuzhiyun subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
6125*4882a593Smuzhiyun num_reports) {
6126*4882a593Smuzhiyun adv = (void *)(ptr + 1);
6127*4882a593Smuzhiyun direct_adv = (void *)(ptr + 1);
6128*4882a593Smuzhiyun ext_adv = (void *)(ptr + 1);
6129*4882a593Smuzhiyun
6130*4882a593Smuzhiyun switch (subevent) {
6131*4882a593Smuzhiyun case HCI_EV_LE_ADVERTISING_REPORT:
6132*4882a593Smuzhiyun bacpy(&hdev->wake_addr, &adv->bdaddr);
6133*4882a593Smuzhiyun hdev->wake_addr_type = adv->bdaddr_type;
6134*4882a593Smuzhiyun break;
6135*4882a593Smuzhiyun case HCI_EV_LE_DIRECT_ADV_REPORT:
6136*4882a593Smuzhiyun bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
6137*4882a593Smuzhiyun hdev->wake_addr_type = direct_adv->bdaddr_type;
6138*4882a593Smuzhiyun break;
6139*4882a593Smuzhiyun case HCI_EV_LE_EXT_ADV_REPORT:
6140*4882a593Smuzhiyun bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
6141*4882a593Smuzhiyun hdev->wake_addr_type = ext_adv->bdaddr_type;
6142*4882a593Smuzhiyun break;
6143*4882a593Smuzhiyun }
6144*4882a593Smuzhiyun }
6145*4882a593Smuzhiyun } else {
6146*4882a593Smuzhiyun hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
6147*4882a593Smuzhiyun }
6148*4882a593Smuzhiyun
6149*4882a593Smuzhiyun unlock:
6150*4882a593Smuzhiyun hci_dev_unlock(hdev);
6151*4882a593Smuzhiyun }
6152*4882a593Smuzhiyun
hci_event_packet(struct hci_dev * hdev,struct sk_buff * skb)6153*4882a593Smuzhiyun void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
6154*4882a593Smuzhiyun {
6155*4882a593Smuzhiyun struct hci_event_hdr *hdr = (void *) skb->data;
6156*4882a593Smuzhiyun hci_req_complete_t req_complete = NULL;
6157*4882a593Smuzhiyun hci_req_complete_skb_t req_complete_skb = NULL;
6158*4882a593Smuzhiyun struct sk_buff *orig_skb = NULL;
6159*4882a593Smuzhiyun u8 status = 0, event = hdr->evt, req_evt = 0;
6160*4882a593Smuzhiyun u16 opcode = HCI_OP_NOP;
6161*4882a593Smuzhiyun
6162*4882a593Smuzhiyun if (!event) {
6163*4882a593Smuzhiyun bt_dev_warn(hdev, "Received unexpected HCI Event 00000000");
6164*4882a593Smuzhiyun goto done;
6165*4882a593Smuzhiyun }
6166*4882a593Smuzhiyun
6167*4882a593Smuzhiyun if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
6168*4882a593Smuzhiyun struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
6169*4882a593Smuzhiyun opcode = __le16_to_cpu(cmd_hdr->opcode);
6170*4882a593Smuzhiyun hci_req_cmd_complete(hdev, opcode, status, &req_complete,
6171*4882a593Smuzhiyun &req_complete_skb);
6172*4882a593Smuzhiyun req_evt = event;
6173*4882a593Smuzhiyun }
6174*4882a593Smuzhiyun
6175*4882a593Smuzhiyun /* If it looks like we might end up having to call
6176*4882a593Smuzhiyun * req_complete_skb, store a pristine copy of the skb since the
6177*4882a593Smuzhiyun * various handlers may modify the original one through
6178*4882a593Smuzhiyun * skb_pull() calls, etc.
6179*4882a593Smuzhiyun */
6180*4882a593Smuzhiyun if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
6181*4882a593Smuzhiyun event == HCI_EV_CMD_COMPLETE)
6182*4882a593Smuzhiyun orig_skb = skb_clone(skb, GFP_KERNEL);
6183*4882a593Smuzhiyun
6184*4882a593Smuzhiyun skb_pull(skb, HCI_EVENT_HDR_SIZE);
6185*4882a593Smuzhiyun
6186*4882a593Smuzhiyun /* Store wake reason if we're suspended */
6187*4882a593Smuzhiyun hci_store_wake_reason(hdev, event, skb);
6188*4882a593Smuzhiyun
6189*4882a593Smuzhiyun switch (event) {
6190*4882a593Smuzhiyun case HCI_EV_INQUIRY_COMPLETE:
6191*4882a593Smuzhiyun hci_inquiry_complete_evt(hdev, skb);
6192*4882a593Smuzhiyun break;
6193*4882a593Smuzhiyun
6194*4882a593Smuzhiyun case HCI_EV_INQUIRY_RESULT:
6195*4882a593Smuzhiyun hci_inquiry_result_evt(hdev, skb);
6196*4882a593Smuzhiyun break;
6197*4882a593Smuzhiyun
6198*4882a593Smuzhiyun case HCI_EV_CONN_COMPLETE:
6199*4882a593Smuzhiyun hci_conn_complete_evt(hdev, skb);
6200*4882a593Smuzhiyun break;
6201*4882a593Smuzhiyun
6202*4882a593Smuzhiyun case HCI_EV_CONN_REQUEST:
6203*4882a593Smuzhiyun hci_conn_request_evt(hdev, skb);
6204*4882a593Smuzhiyun break;
6205*4882a593Smuzhiyun
6206*4882a593Smuzhiyun case HCI_EV_DISCONN_COMPLETE:
6207*4882a593Smuzhiyun hci_disconn_complete_evt(hdev, skb);
6208*4882a593Smuzhiyun break;
6209*4882a593Smuzhiyun
6210*4882a593Smuzhiyun case HCI_EV_AUTH_COMPLETE:
6211*4882a593Smuzhiyun hci_auth_complete_evt(hdev, skb);
6212*4882a593Smuzhiyun break;
6213*4882a593Smuzhiyun
6214*4882a593Smuzhiyun case HCI_EV_REMOTE_NAME:
6215*4882a593Smuzhiyun hci_remote_name_evt(hdev, skb);
6216*4882a593Smuzhiyun break;
6217*4882a593Smuzhiyun
6218*4882a593Smuzhiyun case HCI_EV_ENCRYPT_CHANGE:
6219*4882a593Smuzhiyun hci_encrypt_change_evt(hdev, skb);
6220*4882a593Smuzhiyun break;
6221*4882a593Smuzhiyun
6222*4882a593Smuzhiyun case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
6223*4882a593Smuzhiyun hci_change_link_key_complete_evt(hdev, skb);
6224*4882a593Smuzhiyun break;
6225*4882a593Smuzhiyun
6226*4882a593Smuzhiyun case HCI_EV_REMOTE_FEATURES:
6227*4882a593Smuzhiyun hci_remote_features_evt(hdev, skb);
6228*4882a593Smuzhiyun break;
6229*4882a593Smuzhiyun
6230*4882a593Smuzhiyun case HCI_EV_CMD_COMPLETE:
6231*4882a593Smuzhiyun hci_cmd_complete_evt(hdev, skb, &opcode, &status,
6232*4882a593Smuzhiyun &req_complete, &req_complete_skb);
6233*4882a593Smuzhiyun break;
6234*4882a593Smuzhiyun
6235*4882a593Smuzhiyun case HCI_EV_CMD_STATUS:
6236*4882a593Smuzhiyun hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
6237*4882a593Smuzhiyun &req_complete_skb);
6238*4882a593Smuzhiyun break;
6239*4882a593Smuzhiyun
6240*4882a593Smuzhiyun case HCI_EV_HARDWARE_ERROR:
6241*4882a593Smuzhiyun hci_hardware_error_evt(hdev, skb);
6242*4882a593Smuzhiyun break;
6243*4882a593Smuzhiyun
6244*4882a593Smuzhiyun case HCI_EV_ROLE_CHANGE:
6245*4882a593Smuzhiyun hci_role_change_evt(hdev, skb);
6246*4882a593Smuzhiyun break;
6247*4882a593Smuzhiyun
6248*4882a593Smuzhiyun case HCI_EV_NUM_COMP_PKTS:
6249*4882a593Smuzhiyun hci_num_comp_pkts_evt(hdev, skb);
6250*4882a593Smuzhiyun break;
6251*4882a593Smuzhiyun
6252*4882a593Smuzhiyun case HCI_EV_MODE_CHANGE:
6253*4882a593Smuzhiyun hci_mode_change_evt(hdev, skb);
6254*4882a593Smuzhiyun break;
6255*4882a593Smuzhiyun
6256*4882a593Smuzhiyun case HCI_EV_PIN_CODE_REQ:
6257*4882a593Smuzhiyun hci_pin_code_request_evt(hdev, skb);
6258*4882a593Smuzhiyun break;
6259*4882a593Smuzhiyun
6260*4882a593Smuzhiyun case HCI_EV_LINK_KEY_REQ:
6261*4882a593Smuzhiyun hci_link_key_request_evt(hdev, skb);
6262*4882a593Smuzhiyun break;
6263*4882a593Smuzhiyun
6264*4882a593Smuzhiyun case HCI_EV_LINK_KEY_NOTIFY:
6265*4882a593Smuzhiyun hci_link_key_notify_evt(hdev, skb);
6266*4882a593Smuzhiyun break;
6267*4882a593Smuzhiyun
6268*4882a593Smuzhiyun case HCI_EV_CLOCK_OFFSET:
6269*4882a593Smuzhiyun hci_clock_offset_evt(hdev, skb);
6270*4882a593Smuzhiyun break;
6271*4882a593Smuzhiyun
6272*4882a593Smuzhiyun case HCI_EV_PKT_TYPE_CHANGE:
6273*4882a593Smuzhiyun hci_pkt_type_change_evt(hdev, skb);
6274*4882a593Smuzhiyun break;
6275*4882a593Smuzhiyun
6276*4882a593Smuzhiyun case HCI_EV_PSCAN_REP_MODE:
6277*4882a593Smuzhiyun hci_pscan_rep_mode_evt(hdev, skb);
6278*4882a593Smuzhiyun break;
6279*4882a593Smuzhiyun
6280*4882a593Smuzhiyun case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
6281*4882a593Smuzhiyun hci_inquiry_result_with_rssi_evt(hdev, skb);
6282*4882a593Smuzhiyun break;
6283*4882a593Smuzhiyun
6284*4882a593Smuzhiyun case HCI_EV_REMOTE_EXT_FEATURES:
6285*4882a593Smuzhiyun hci_remote_ext_features_evt(hdev, skb);
6286*4882a593Smuzhiyun break;
6287*4882a593Smuzhiyun
6288*4882a593Smuzhiyun case HCI_EV_SYNC_CONN_COMPLETE:
6289*4882a593Smuzhiyun hci_sync_conn_complete_evt(hdev, skb);
6290*4882a593Smuzhiyun break;
6291*4882a593Smuzhiyun
6292*4882a593Smuzhiyun case HCI_EV_EXTENDED_INQUIRY_RESULT:
6293*4882a593Smuzhiyun hci_extended_inquiry_result_evt(hdev, skb);
6294*4882a593Smuzhiyun break;
6295*4882a593Smuzhiyun
6296*4882a593Smuzhiyun case HCI_EV_KEY_REFRESH_COMPLETE:
6297*4882a593Smuzhiyun hci_key_refresh_complete_evt(hdev, skb);
6298*4882a593Smuzhiyun break;
6299*4882a593Smuzhiyun
6300*4882a593Smuzhiyun case HCI_EV_IO_CAPA_REQUEST:
6301*4882a593Smuzhiyun hci_io_capa_request_evt(hdev, skb);
6302*4882a593Smuzhiyun break;
6303*4882a593Smuzhiyun
6304*4882a593Smuzhiyun case HCI_EV_IO_CAPA_REPLY:
6305*4882a593Smuzhiyun hci_io_capa_reply_evt(hdev, skb);
6306*4882a593Smuzhiyun break;
6307*4882a593Smuzhiyun
6308*4882a593Smuzhiyun case HCI_EV_USER_CONFIRM_REQUEST:
6309*4882a593Smuzhiyun hci_user_confirm_request_evt(hdev, skb);
6310*4882a593Smuzhiyun break;
6311*4882a593Smuzhiyun
6312*4882a593Smuzhiyun case HCI_EV_USER_PASSKEY_REQUEST:
6313*4882a593Smuzhiyun hci_user_passkey_request_evt(hdev, skb);
6314*4882a593Smuzhiyun break;
6315*4882a593Smuzhiyun
6316*4882a593Smuzhiyun case HCI_EV_USER_PASSKEY_NOTIFY:
6317*4882a593Smuzhiyun hci_user_passkey_notify_evt(hdev, skb);
6318*4882a593Smuzhiyun break;
6319*4882a593Smuzhiyun
6320*4882a593Smuzhiyun case HCI_EV_KEYPRESS_NOTIFY:
6321*4882a593Smuzhiyun hci_keypress_notify_evt(hdev, skb);
6322*4882a593Smuzhiyun break;
6323*4882a593Smuzhiyun
6324*4882a593Smuzhiyun case HCI_EV_SIMPLE_PAIR_COMPLETE:
6325*4882a593Smuzhiyun hci_simple_pair_complete_evt(hdev, skb);
6326*4882a593Smuzhiyun break;
6327*4882a593Smuzhiyun
6328*4882a593Smuzhiyun case HCI_EV_REMOTE_HOST_FEATURES:
6329*4882a593Smuzhiyun hci_remote_host_features_evt(hdev, skb);
6330*4882a593Smuzhiyun break;
6331*4882a593Smuzhiyun
6332*4882a593Smuzhiyun case HCI_EV_LE_META:
6333*4882a593Smuzhiyun hci_le_meta_evt(hdev, skb);
6334*4882a593Smuzhiyun break;
6335*4882a593Smuzhiyun
6336*4882a593Smuzhiyun case HCI_EV_REMOTE_OOB_DATA_REQUEST:
6337*4882a593Smuzhiyun hci_remote_oob_data_request_evt(hdev, skb);
6338*4882a593Smuzhiyun break;
6339*4882a593Smuzhiyun
6340*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_BT_HS)
6341*4882a593Smuzhiyun case HCI_EV_CHANNEL_SELECTED:
6342*4882a593Smuzhiyun hci_chan_selected_evt(hdev, skb);
6343*4882a593Smuzhiyun break;
6344*4882a593Smuzhiyun
6345*4882a593Smuzhiyun case HCI_EV_PHY_LINK_COMPLETE:
6346*4882a593Smuzhiyun hci_phy_link_complete_evt(hdev, skb);
6347*4882a593Smuzhiyun break;
6348*4882a593Smuzhiyun
6349*4882a593Smuzhiyun case HCI_EV_LOGICAL_LINK_COMPLETE:
6350*4882a593Smuzhiyun hci_loglink_complete_evt(hdev, skb);
6351*4882a593Smuzhiyun break;
6352*4882a593Smuzhiyun
6353*4882a593Smuzhiyun case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
6354*4882a593Smuzhiyun hci_disconn_loglink_complete_evt(hdev, skb);
6355*4882a593Smuzhiyun break;
6356*4882a593Smuzhiyun
6357*4882a593Smuzhiyun case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
6358*4882a593Smuzhiyun hci_disconn_phylink_complete_evt(hdev, skb);
6359*4882a593Smuzhiyun break;
6360*4882a593Smuzhiyun #endif
6361*4882a593Smuzhiyun
6362*4882a593Smuzhiyun case HCI_EV_NUM_COMP_BLOCKS:
6363*4882a593Smuzhiyun hci_num_comp_blocks_evt(hdev, skb);
6364*4882a593Smuzhiyun break;
6365*4882a593Smuzhiyun
6366*4882a593Smuzhiyun case HCI_EV_VENDOR:
6367*4882a593Smuzhiyun msft_vendor_evt(hdev, skb);
6368*4882a593Smuzhiyun break;
6369*4882a593Smuzhiyun
6370*4882a593Smuzhiyun default:
6371*4882a593Smuzhiyun BT_DBG("%s event 0x%2.2x", hdev->name, event);
6372*4882a593Smuzhiyun break;
6373*4882a593Smuzhiyun }
6374*4882a593Smuzhiyun
6375*4882a593Smuzhiyun if (req_complete) {
6376*4882a593Smuzhiyun req_complete(hdev, status, opcode);
6377*4882a593Smuzhiyun } else if (req_complete_skb) {
6378*4882a593Smuzhiyun if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
6379*4882a593Smuzhiyun kfree_skb(orig_skb);
6380*4882a593Smuzhiyun orig_skb = NULL;
6381*4882a593Smuzhiyun }
6382*4882a593Smuzhiyun req_complete_skb(hdev, status, opcode, orig_skb);
6383*4882a593Smuzhiyun }
6384*4882a593Smuzhiyun
6385*4882a593Smuzhiyun done:
6386*4882a593Smuzhiyun kfree_skb(orig_skb);
6387*4882a593Smuzhiyun kfree_skb(skb);
6388*4882a593Smuzhiyun hdev->stat.evt_rx++;
6389*4882a593Smuzhiyun }
6390