Lines Matching refs:hdev

37 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)  in hci_req_init()  argument
40 req->hdev = hdev; in hci_req_init()
49 bool hci_req_status_pend(struct hci_dev *hdev) in hci_req_status_pend() argument
51 return hdev->req_status == HCI_REQ_PEND; in hci_req_status_pend()
57 struct hci_dev *hdev = req->hdev; in req_run() local
83 spin_lock_irqsave(&hdev->cmd_q.lock, flags); in req_run()
84 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q); in req_run()
85 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags); in req_run()
87 queue_work(hdev->workqueue, &hdev->cmd_work); in req_run()
102 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode, in hci_req_sync_complete() argument
105 BT_DBG("%s result 0x%2.2x", hdev->name, result); in hci_req_sync_complete()
107 if (hdev->req_status == HCI_REQ_PEND) { in hci_req_sync_complete()
108 hdev->req_result = result; in hci_req_sync_complete()
109 hdev->req_status = HCI_REQ_DONE; in hci_req_sync_complete()
111 hdev->req_skb = skb_get(skb); in hci_req_sync_complete()
112 wake_up_interruptible(&hdev->req_wait_q); in hci_req_sync_complete()
116 void hci_req_sync_cancel(struct hci_dev *hdev, int err) in hci_req_sync_cancel() argument
118 BT_DBG("%s err 0x%2.2x", hdev->name, err); in hci_req_sync_cancel()
120 if (hdev->req_status == HCI_REQ_PEND) { in hci_req_sync_cancel()
121 hdev->req_result = err; in hci_req_sync_cancel()
122 hdev->req_status = HCI_REQ_CANCELED; in hci_req_sync_cancel()
123 wake_up_interruptible(&hdev->req_wait_q); in hci_req_sync_cancel()
127 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen, in __hci_cmd_sync_ev() argument
134 BT_DBG("%s", hdev->name); in __hci_cmd_sync_ev()
136 hci_req_init(&req, hdev); in __hci_cmd_sync_ev()
140 hdev->req_status = HCI_REQ_PEND; in __hci_cmd_sync_ev()
146 err = wait_event_interruptible_timeout(hdev->req_wait_q, in __hci_cmd_sync_ev()
147 hdev->req_status != HCI_REQ_PEND, timeout); in __hci_cmd_sync_ev()
152 switch (hdev->req_status) { in __hci_cmd_sync_ev()
154 err = -bt_to_errno(hdev->req_result); in __hci_cmd_sync_ev()
158 err = -hdev->req_result; in __hci_cmd_sync_ev()
166 hdev->req_status = hdev->req_result = 0; in __hci_cmd_sync_ev()
167 skb = hdev->req_skb; in __hci_cmd_sync_ev()
168 hdev->req_skb = NULL; in __hci_cmd_sync_ev()
170 BT_DBG("%s end: err %d", hdev->name, err); in __hci_cmd_sync_ev()
184 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen, in __hci_cmd_sync() argument
187 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout); in __hci_cmd_sync()
192 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req, in __hci_req_sync() argument
199 BT_DBG("%s start", hdev->name); in __hci_req_sync()
201 hci_req_init(&req, hdev); in __hci_req_sync()
203 hdev->req_status = HCI_REQ_PEND; in __hci_req_sync()
214 hdev->req_status = 0; in __hci_req_sync()
233 err = wait_event_interruptible_timeout(hdev->req_wait_q, in __hci_req_sync()
234 hdev->req_status != HCI_REQ_PEND, timeout); in __hci_req_sync()
239 switch (hdev->req_status) { in __hci_req_sync()
241 err = -bt_to_errno(hdev->req_result); in __hci_req_sync()
243 *hci_status = hdev->req_result; in __hci_req_sync()
247 err = -hdev->req_result; in __hci_req_sync()
259 kfree_skb(hdev->req_skb); in __hci_req_sync()
260 hdev->req_skb = NULL; in __hci_req_sync()
261 hdev->req_status = hdev->req_result = 0; in __hci_req_sync()
263 BT_DBG("%s end: err %d", hdev->name, err); in __hci_req_sync()
268 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req, in hci_req_sync() argument
275 hci_req_sync_lock(hdev); in hci_req_sync()
280 if (test_bit(HCI_UP, &hdev->flags)) in hci_req_sync()
281 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status); in hci_req_sync()
284 hci_req_sync_unlock(hdev); in hci_req_sync()
289 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen, in hci_prepare_cmd() argument
319 struct hci_dev *hdev = req->hdev; in hci_req_add_ev() local
322 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen); in hci_req_add_ev()
330 skb = hci_prepare_cmd(hdev, opcode, plen, param); in hci_req_add_ev()
332 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)", in hci_req_add_ev()
354 struct hci_dev *hdev = req->hdev; in __hci_req_write_fast_connectable() local
358 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) in __hci_req_write_fast_connectable()
361 if (hdev->hci_ver < BLUETOOTH_VER_1_2) in __hci_req_write_fast_connectable()
370 type = hdev->def_page_scan_type; in __hci_req_write_fast_connectable()
371 acp.interval = cpu_to_le16(hdev->def_page_scan_int); in __hci_req_write_fast_connectable()
374 acp.window = cpu_to_le16(hdev->def_page_scan_window); in __hci_req_write_fast_connectable()
376 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval || in __hci_req_write_fast_connectable()
377 __cpu_to_le16(hdev->page_scan_window) != acp.window) in __hci_req_write_fast_connectable()
381 if (hdev->page_scan_type != type) in __hci_req_write_fast_connectable()
393 struct hci_dev *hdev = req->hdev; in __hci_update_background_scan() local
395 if (!test_bit(HCI_UP, &hdev->flags) || in __hci_update_background_scan()
396 test_bit(HCI_INIT, &hdev->flags) || in __hci_update_background_scan()
397 hci_dev_test_flag(hdev, HCI_SETUP) || in __hci_update_background_scan()
398 hci_dev_test_flag(hdev, HCI_CONFIG) || in __hci_update_background_scan()
399 hci_dev_test_flag(hdev, HCI_AUTO_OFF) || in __hci_update_background_scan()
400 hci_dev_test_flag(hdev, HCI_UNREGISTER)) in __hci_update_background_scan()
404 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) in __hci_update_background_scan()
408 if (hdev->discovery.state != DISCOVERY_STOPPED) in __hci_update_background_scan()
418 hci_discovery_filter_clear(hdev); in __hci_update_background_scan()
420 BT_DBG("%s ADV monitoring is %s", hdev->name, in __hci_update_background_scan()
421 hci_is_adv_monitoring(hdev) ? "on" : "off"); in __hci_update_background_scan()
423 if (list_empty(&hdev->pend_le_conns) && in __hci_update_background_scan()
424 list_empty(&hdev->pend_le_reports) && in __hci_update_background_scan()
425 !hci_is_adv_monitoring(hdev)) { in __hci_update_background_scan()
432 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) in __hci_update_background_scan()
437 BT_DBG("%s stopping background scanning", hdev->name); in __hci_update_background_scan()
447 if (hci_lookup_le_connect(hdev)) in __hci_update_background_scan()
453 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) in __hci_update_background_scan()
458 BT_DBG("%s starting background scanning", hdev->name); in __hci_update_background_scan()
464 struct hci_dev *hdev = req->hdev; in __hci_req_update_name() local
467 memcpy(cp.name, hdev->dev_name, sizeof(cp.name)); in __hci_req_update_name()
474 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len) in create_uuid16_list() argument
482 list_for_each_entry(uuid, &hdev->uuids, list) { in create_uuid16_list()
516 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len) in create_uuid32_list() argument
524 list_for_each_entry(uuid, &hdev->uuids, list) { in create_uuid32_list()
549 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len) in create_uuid128_list() argument
557 list_for_each_entry(uuid, &hdev->uuids, list) { in create_uuid128_list()
582 static void create_eir(struct hci_dev *hdev, u8 *data) in create_eir() argument
587 name_len = strlen(hdev->dev_name); in create_eir()
600 memcpy(ptr + 2, hdev->dev_name, name_len); in create_eir()
605 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) { in create_eir()
608 ptr[2] = (u8) hdev->inq_tx_power; in create_eir()
613 if (hdev->devid_source > 0) { in create_eir()
617 put_unaligned_le16(hdev->devid_source, ptr + 2); in create_eir()
618 put_unaligned_le16(hdev->devid_vendor, ptr + 4); in create_eir()
619 put_unaligned_le16(hdev->devid_product, ptr + 6); in create_eir()
620 put_unaligned_le16(hdev->devid_version, ptr + 8); in create_eir()
625 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data)); in create_eir()
626 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data)); in create_eir()
627 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data)); in create_eir()
632 struct hci_dev *hdev = req->hdev; in __hci_req_update_eir() local
635 if (!hdev_is_powered(hdev)) in __hci_req_update_eir()
638 if (!lmp_ext_inq_capable(hdev)) in __hci_req_update_eir()
641 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) in __hci_req_update_eir()
644 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE)) in __hci_req_update_eir()
649 create_eir(hdev, cp.data); in __hci_req_update_eir()
651 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0) in __hci_req_update_eir()
654 memcpy(hdev->eir, cp.data, sizeof(cp.data)); in __hci_req_update_eir()
661 struct hci_dev *hdev = req->hdev; in hci_req_add_le_scan_disable() local
663 if (hdev->scanning_paused) { in hci_req_add_le_scan_disable()
664 bt_dev_dbg(hdev, "Scanning is paused for suspend"); in hci_req_add_le_scan_disable()
668 if (use_ext_scan(hdev)) { in hci_req_add_le_scan_disable()
684 if (use_ll_privacy(hdev) && in hci_req_add_le_scan_disable()
685 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) && in hci_req_add_le_scan_disable()
686 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) { in hci_req_add_le_scan_disable()
701 bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from whitelist", &cp.bdaddr, in del_from_white_list()
705 if (use_ll_privacy(req->hdev) && in del_from_white_list()
706 hci_dev_test_flag(req->hdev, HCI_ENABLE_LL_PRIVACY)) { in del_from_white_list()
709 irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type); in del_from_white_list()
728 struct hci_dev *hdev = req->hdev; in add_to_white_list() local
731 if (hci_bdaddr_list_lookup(&hdev->le_white_list, &params->addr, in add_to_white_list()
736 if (*num_entries >= hdev->le_white_list_size) in add_to_white_list()
741 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) && in add_to_white_list()
742 hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) { in add_to_white_list()
747 if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP, in add_to_white_list()
755 bt_dev_dbg(hdev, "Add %pMR (0x%x) to whitelist", &cp.bdaddr, in add_to_white_list()
759 if (use_ll_privacy(hdev) && in add_to_white_list()
760 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) { in add_to_white_list()
763 irk = hci_find_irk_by_addr(hdev, &params->addr, in add_to_white_list()
772 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) in add_to_white_list()
773 memcpy(cp.local_irk, hdev->irk, 16); in add_to_white_list()
787 struct hci_dev *hdev = req->hdev; in update_white_list() local
797 bool allow_rpa = hdev->suspended; in update_white_list()
799 if (use_ll_privacy(hdev) && in update_white_list()
800 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) in update_white_list()
809 list_for_each_entry(b, &hdev->le_white_list, list) { in update_white_list()
810 pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns, in update_white_list()
813 pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports, in update_white_list()
827 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) && in update_white_list()
828 hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) { in update_white_list()
845 list_for_each_entry(params, &hdev->pend_le_conns, action) { in update_white_list()
854 list_for_each_entry(params, &hdev->pend_le_reports, action) { in update_white_list()
864 if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended) in update_white_list()
871 static bool scan_use_rpa(struct hci_dev *hdev) in scan_use_rpa() argument
873 return hci_dev_test_flag(hdev, HCI_PRIVACY); in scan_use_rpa()
880 struct hci_dev *hdev = req->hdev; in hci_req_start_scan() local
882 if (hdev->scanning_paused) { in hci_req_start_scan()
883 bt_dev_dbg(hdev, "Scanning is paused for suspend"); in hci_req_start_scan()
887 if (use_ll_privacy(hdev) && in hci_req_start_scan()
888 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) && in hci_req_start_scan()
898 if (use_ext_scan(hdev)) { in hci_req_start_scan()
914 if (scan_1m(hdev) || scan_2m(hdev)) { in hci_req_start_scan()
926 if (scan_coded(hdev)) { in hci_req_start_scan()
969 static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev) in hci_is_le_conn_scanning() argument
971 struct hci_conn_hash *h = &hdev->conn_hash; in hci_is_le_conn_scanning()
995 struct hci_dev *hdev = req->hdev; in hci_req_add_le_passive_scan() local
1002 if (hdev->scanning_paused) { in hci_req_add_le_passive_scan()
1003 bt_dev_dbg(hdev, "Scanning is paused for suspend"); in hci_req_add_le_passive_scan()
1013 if (hci_update_random_address(req, false, scan_use_rpa(hdev), in hci_req_add_le_passive_scan()
1032 if (hci_dev_test_flag(hdev, HCI_PRIVACY) && in hci_req_add_le_passive_scan()
1033 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)) in hci_req_add_le_passive_scan()
1036 if (hdev->suspended) { in hci_req_add_le_passive_scan()
1037 window = hdev->le_scan_window_suspend; in hci_req_add_le_passive_scan()
1038 interval = hdev->le_scan_int_suspend; in hci_req_add_le_passive_scan()
1039 } else if (hci_is_le_conn_scanning(hdev)) { in hci_req_add_le_passive_scan()
1040 window = hdev->le_scan_window_connect; in hci_req_add_le_passive_scan()
1041 interval = hdev->le_scan_int_connect; in hci_req_add_le_passive_scan()
1042 } else if (hci_is_adv_monitoring(hdev)) { in hci_req_add_le_passive_scan()
1043 window = hdev->le_scan_window_adv_monitor; in hci_req_add_le_passive_scan()
1044 interval = hdev->le_scan_int_adv_monitor; in hci_req_add_le_passive_scan()
1046 window = hdev->le_scan_window; in hci_req_add_le_passive_scan()
1047 interval = hdev->le_scan_interval; in hci_req_add_le_passive_scan()
1050 bt_dev_dbg(hdev, "LE passive scan with whitelist = %d", filter_policy); in hci_req_add_le_passive_scan()
1055 static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance) in get_adv_instance_scan_rsp_len() argument
1063 adv_instance = hci_find_adv_instance(hdev, instance); in get_adv_instance_scan_rsp_len()
1092 struct hci_dev *hdev = req->hdev; in hci_req_set_event_filter() local
1098 list_for_each_entry(b, &hdev->whitelist, list) { in hci_req_set_event_filter()
1109 bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr); in hci_req_set_event_filter()
1120 if (hci_dev_test_flag(req->hdev, HCI_LE_SCAN)) in hci_req_config_le_suspend_scan()
1127 set_bit(SUSPEND_SCAN_ENABLE, req->hdev->suspend_tasks); in hci_req_config_le_suspend_scan()
1130 static void cancel_adv_timeout(struct hci_dev *hdev) in cancel_adv_timeout() argument
1132 if (hdev->adv_instance_timeout) { in cancel_adv_timeout()
1133 hdev->adv_instance_timeout = 0; in cancel_adv_timeout()
1134 cancel_delayed_work(&hdev->adv_instance_expire); in cancel_adv_timeout()
1141 bt_dev_dbg(req->hdev, "Suspending advertising instances"); in hci_suspend_adv_instances()
1149 if (!ext_adv_capable(req->hdev)) in hci_suspend_adv_instances()
1150 cancel_adv_timeout(req->hdev); in hci_suspend_adv_instances()
1158 bt_dev_dbg(req->hdev, "Resuming advertising instances"); in hci_resume_adv_instances()
1160 if (ext_adv_capable(req->hdev)) { in hci_resume_adv_instances()
1162 list_for_each_entry(adv, &req->hdev->adv_instances, list) { in hci_resume_adv_instances()
1172 req->hdev->cur_adv_instance, in hci_resume_adv_instances()
1177 static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode) in suspend_req_complete() argument
1179 bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode, in suspend_req_complete()
1181 if (test_and_clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) || in suspend_req_complete()
1182 test_and_clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) { in suspend_req_complete()
1183 wake_up(&hdev->suspend_wait_q); in suspend_req_complete()
1188 void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next) in hci_req_prepare_suspend() argument
1196 if (next == hdev->suspend_state) { in hci_req_prepare_suspend()
1197 bt_dev_dbg(hdev, "Same state before and after: %d", next); in hci_req_prepare_suspend()
1201 hdev->suspend_state = next; in hci_req_prepare_suspend()
1202 hci_req_init(&req, hdev); in hci_req_prepare_suspend()
1206 hdev->suspended = true; in hci_req_prepare_suspend()
1209 old_state = hdev->discovery.state; in hci_req_prepare_suspend()
1211 set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks); in hci_req_prepare_suspend()
1212 hci_discovery_set_state(hdev, DISCOVERY_STOPPING); in hci_req_prepare_suspend()
1213 queue_work(hdev->req_workqueue, &hdev->discov_update); in hci_req_prepare_suspend()
1216 hdev->discovery_paused = true; in hci_req_prepare_suspend()
1217 hdev->discovery_old_state = old_state; in hci_req_prepare_suspend()
1220 old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING); in hci_req_prepare_suspend()
1222 set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks); in hci_req_prepare_suspend()
1223 cancel_delayed_work(&hdev->discov_off); in hci_req_prepare_suspend()
1224 queue_delayed_work(hdev->req_workqueue, in hci_req_prepare_suspend()
1225 &hdev->discov_off, 0); in hci_req_prepare_suspend()
1229 if (hdev->adv_instance_cnt) in hci_req_prepare_suspend()
1232 hdev->advertising_paused = true; in hci_req_prepare_suspend()
1233 hdev->advertising_old_state = old_state; in hci_req_prepare_suspend()
1239 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) in hci_req_prepare_suspend()
1243 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks); in hci_req_prepare_suspend()
1246 hdev->scanning_paused = true; in hci_req_prepare_suspend()
1253 list_for_each_entry(conn, &hdev->conn_hash.list, list) { in hci_req_prepare_suspend()
1259 bt_dev_dbg(hdev, in hci_req_prepare_suspend()
1262 set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks); in hci_req_prepare_suspend()
1266 hdev->scanning_paused = false; in hci_req_prepare_suspend()
1272 hdev->scanning_paused = true; in hci_req_prepare_suspend()
1275 hdev->suspended = false; in hci_req_prepare_suspend()
1276 hdev->scanning_paused = false; in hci_req_prepare_suspend()
1283 hdev->advertising_paused = false; in hci_req_prepare_suspend()
1284 if (hdev->advertising_old_state) { in hci_req_prepare_suspend()
1286 hdev->suspend_tasks); in hci_req_prepare_suspend()
1287 hci_dev_set_flag(hdev, HCI_ADVERTISING); in hci_req_prepare_suspend()
1288 queue_work(hdev->req_workqueue, in hci_req_prepare_suspend()
1289 &hdev->discoverable_update); in hci_req_prepare_suspend()
1290 hdev->advertising_old_state = 0; in hci_req_prepare_suspend()
1294 if (hdev->adv_instance_cnt) in hci_req_prepare_suspend()
1298 hdev->discovery_paused = false; in hci_req_prepare_suspend()
1299 if (hdev->discovery_old_state != DISCOVERY_STOPPED && in hci_req_prepare_suspend()
1300 hdev->discovery_old_state != DISCOVERY_STOPPING) { in hci_req_prepare_suspend()
1301 set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks); in hci_req_prepare_suspend()
1302 hci_discovery_set_state(hdev, DISCOVERY_STARTING); in hci_req_prepare_suspend()
1303 queue_work(hdev->req_workqueue, &hdev->discov_update); in hci_req_prepare_suspend()
1309 hdev->suspend_state = next; in hci_req_prepare_suspend()
1312 clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks); in hci_req_prepare_suspend()
1313 wake_up(&hdev->suspend_wait_q); in hci_req_prepare_suspend()
1316 static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev) in get_cur_adv_instance_scan_rsp_len() argument
1318 u8 instance = hdev->cur_adv_instance; in get_cur_adv_instance_scan_rsp_len()
1325 adv_instance = hci_find_adv_instance(hdev, instance); in get_cur_adv_instance_scan_rsp_len()
1337 if (ext_adv_capable(req->hdev)) { in __hci_req_disable_advertising()
1347 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance) in get_adv_instance_flags() argument
1361 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) in get_adv_instance_flags()
1364 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) in get_adv_instance_flags()
1366 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) in get_adv_instance_flags()
1372 adv_instance = hci_find_adv_instance(hdev, instance); in get_adv_instance_flags()
1381 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags) in adv_use_rpa() argument
1384 if (!hci_dev_test_flag(hdev, HCI_PRIVACY)) in adv_use_rpa()
1388 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) in adv_use_rpa()
1395 hci_dev_test_flag(hdev, HCI_BONDABLE)) in adv_use_rpa()
1404 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable) in is_advertising_allowed() argument
1407 if (hci_conn_num(hdev, LE_LINK) == 0) in is_advertising_allowed()
1411 if (hdev->conn_hash.le_num_slave > 0) { in is_advertising_allowed()
1413 if (!connectable && !(hdev->le_states[2] & 0x10)) in is_advertising_allowed()
1419 if (connectable && (!(hdev->le_states[4] & 0x40) || in is_advertising_allowed()
1420 !(hdev->le_states[2] & 0x20))) in is_advertising_allowed()
1425 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) { in is_advertising_allowed()
1427 if (!connectable && !(hdev->le_states[2] & 0x02)) in is_advertising_allowed()
1433 if (connectable && (!(hdev->le_states[4] & 0x08) || in is_advertising_allowed()
1434 !(hdev->le_states[2] & 0x08))) in is_advertising_allowed()
1443 struct hci_dev *hdev = req->hdev; in __hci_req_enable_advertising() local
1450 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance); in __hci_req_enable_advertising()
1456 mgmt_get_connectable(hdev); in __hci_req_enable_advertising()
1458 if (!is_advertising_allowed(hdev, connectable)) in __hci_req_enable_advertising()
1461 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) in __hci_req_enable_advertising()
1469 hci_dev_clear_flag(hdev, HCI_LE_ADV); in __hci_req_enable_advertising()
1476 adv_use_rpa(hdev, flags), in __hci_req_enable_advertising()
1485 adv_min_interval = hdev->le_adv_min_interval; in __hci_req_enable_advertising()
1486 adv_max_interval = hdev->le_adv_max_interval; in __hci_req_enable_advertising()
1488 if (get_cur_adv_instance_scan_rsp_len(hdev)) in __hci_req_enable_advertising()
1493 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) || in __hci_req_enable_advertising()
1494 hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) { in __hci_req_enable_advertising()
1498 adv_min_interval = hdev->le_adv_min_interval; in __hci_req_enable_advertising()
1499 adv_max_interval = hdev->le_adv_max_interval; in __hci_req_enable_advertising()
1506 cp.channel_map = hdev->le_adv_channel_map; in __hci_req_enable_advertising()
1513 u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len) in append_local_name() argument
1523 complete_len = strlen(hdev->dev_name); in append_local_name()
1526 hdev->dev_name, complete_len + 1); in append_local_name()
1529 short_len = strlen(hdev->short_name); in append_local_name()
1532 hdev->short_name, short_len + 1); in append_local_name()
1540 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH); in append_local_name()
1550 static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len) in append_appearance() argument
1552 return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance); in append_appearance()
1555 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr) in create_default_scan_rsp_data() argument
1559 if (hdev->appearance) { in create_default_scan_rsp_data()
1560 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len); in create_default_scan_rsp_data()
1563 return append_local_name(hdev, ptr, scan_rsp_len); in create_default_scan_rsp_data()
1566 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance, in create_instance_scan_rsp_data() argument
1573 adv_instance = hci_find_adv_instance(hdev, instance); in create_instance_scan_rsp_data()
1579 if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) { in create_instance_scan_rsp_data()
1580 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len); in create_instance_scan_rsp_data()
1589 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len); in create_instance_scan_rsp_data()
1596 struct hci_dev *hdev = req->hdev; in __hci_req_update_scan_rsp_data() local
1599 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) in __hci_req_update_scan_rsp_data()
1602 if (ext_adv_capable(hdev)) { in __hci_req_update_scan_rsp_data()
1608 len = create_instance_scan_rsp_data(hdev, instance, in __hci_req_update_scan_rsp_data()
1611 len = create_default_scan_rsp_data(hdev, cp.data); in __hci_req_update_scan_rsp_data()
1613 if (hdev->scan_rsp_data_len == len && in __hci_req_update_scan_rsp_data()
1614 !memcmp(cp.data, hdev->scan_rsp_data, len)) in __hci_req_update_scan_rsp_data()
1617 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data)); in __hci_req_update_scan_rsp_data()
1618 hdev->scan_rsp_data_len = len; in __hci_req_update_scan_rsp_data()
1633 len = create_instance_scan_rsp_data(hdev, instance, in __hci_req_update_scan_rsp_data()
1636 len = create_default_scan_rsp_data(hdev, cp.data); in __hci_req_update_scan_rsp_data()
1638 if (hdev->scan_rsp_data_len == len && in __hci_req_update_scan_rsp_data()
1639 !memcmp(cp.data, hdev->scan_rsp_data, len)) in __hci_req_update_scan_rsp_data()
1642 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data)); in __hci_req_update_scan_rsp_data()
1643 hdev->scan_rsp_data_len = len; in __hci_req_update_scan_rsp_data()
1651 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr) in create_instance_adv_data() argument
1659 adv_instance = hci_find_adv_instance(hdev, instance); in create_instance_adv_data()
1664 instance_flags = get_adv_instance_flags(hdev, instance); in create_instance_adv_data()
1683 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) in create_instance_adv_data()
1691 flags |= mgmt_get_adv_discov_flags(hdev); in create_instance_adv_data()
1717 if (ext_adv_capable(hdev)) { in create_instance_adv_data()
1721 adv_tx_power = hdev->adv_tx_power; in create_instance_adv_data()
1723 adv_tx_power = hdev->adv_tx_power; in create_instance_adv_data()
1742 struct hci_dev *hdev = req->hdev; in __hci_req_update_adv_data() local
1745 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) in __hci_req_update_adv_data()
1748 if (ext_adv_capable(hdev)) { in __hci_req_update_adv_data()
1753 len = create_instance_adv_data(hdev, instance, cp.data); in __hci_req_update_adv_data()
1756 if (hdev->adv_data_len == len && in __hci_req_update_adv_data()
1757 memcmp(cp.data, hdev->adv_data, len) == 0) in __hci_req_update_adv_data()
1760 memcpy(hdev->adv_data, cp.data, sizeof(cp.data)); in __hci_req_update_adv_data()
1761 hdev->adv_data_len = len; in __hci_req_update_adv_data()
1774 len = create_instance_adv_data(hdev, instance, cp.data); in __hci_req_update_adv_data()
1777 if (hdev->adv_data_len == len && in __hci_req_update_adv_data()
1778 memcmp(cp.data, hdev->adv_data, len) == 0) in __hci_req_update_adv_data()
1781 memcpy(hdev->adv_data, cp.data, sizeof(cp.data)); in __hci_req_update_adv_data()
1782 hdev->adv_data_len = len; in __hci_req_update_adv_data()
1790 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance) in hci_req_update_adv_data() argument
1794 hci_req_init(&req, hdev); in hci_req_update_adv_data()
1800 static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status, in enable_addr_resolution_complete() argument
1803 BT_DBG("%s status %u", hdev->name, status); in enable_addr_resolution_complete()
1806 void hci_req_disable_address_resolution(struct hci_dev *hdev) in hci_req_disable_address_resolution() argument
1811 if (!use_ll_privacy(hdev) && in hci_req_disable_address_resolution()
1812 !hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) in hci_req_disable_address_resolution()
1815 hci_req_init(&req, hdev); in hci_req_disable_address_resolution()
1822 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode) in adv_enable_complete() argument
1824 BT_DBG("%s status %u", hdev->name, status); in adv_enable_complete()
1827 void hci_req_reenable_advertising(struct hci_dev *hdev) in hci_req_reenable_advertising() argument
1831 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) && in hci_req_reenable_advertising()
1832 list_empty(&hdev->adv_instances)) in hci_req_reenable_advertising()
1835 hci_req_init(&req, hdev); in hci_req_reenable_advertising()
1837 if (hdev->cur_adv_instance) { in hci_req_reenable_advertising()
1838 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance, in hci_req_reenable_advertising()
1841 if (ext_adv_capable(hdev)) { in hci_req_reenable_advertising()
1855 struct hci_dev *hdev = container_of(work, struct hci_dev, in adv_timeout_expire() local
1861 BT_DBG("%s", hdev->name); in adv_timeout_expire()
1863 hci_dev_lock(hdev); in adv_timeout_expire()
1865 hdev->adv_instance_timeout = 0; in adv_timeout_expire()
1867 instance = hdev->cur_adv_instance; in adv_timeout_expire()
1871 hci_req_init(&req, hdev); in adv_timeout_expire()
1873 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false); in adv_timeout_expire()
1875 if (list_empty(&hdev->adv_instances)) in adv_timeout_expire()
1881 hci_dev_unlock(hdev); in adv_timeout_expire()
1884 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy, in hci_get_random_address() argument
1901 if (use_ll_privacy(hdev)) in hci_get_random_address()
1908 !bacmp(&adv_instance->random_addr, &hdev->rpa)) in hci_get_random_address()
1913 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) && in hci_get_random_address()
1914 !bacmp(&hdev->random_addr, &hdev->rpa)) in hci_get_random_address()
1918 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa); in hci_get_random_address()
1920 bt_dev_err(hdev, "failed to generate new RPA"); in hci_get_random_address()
1924 bacpy(rand_addr, &hdev->rpa); in hci_get_random_address()
1926 to = msecs_to_jiffies(hdev->rpa_timeout * 1000); in hci_get_random_address()
1928 queue_delayed_work(hdev->workqueue, in hci_get_random_address()
1931 queue_delayed_work(hdev->workqueue, in hci_get_random_address()
1932 &hdev->rpa_expired, to); in hci_get_random_address()
1955 if (bacmp(&hdev->bdaddr, &nrpa)) in hci_get_random_address()
1979 struct hci_dev *hdev = req->hdev; in __hci_req_setup_ext_adv_instance() local
1989 adv_instance = hci_find_adv_instance(hdev, instance); in __hci_req_setup_ext_adv_instance()
1996 flags = get_adv_instance_flags(hdev, instance); in __hci_req_setup_ext_adv_instance()
2002 mgmt_get_connectable(hdev); in __hci_req_setup_ext_adv_instance()
2004 if (!is_advertising_allowed(hdev, connectable)) in __hci_req_setup_ext_adv_instance()
2011 err = hci_get_random_address(hdev, !connectable, in __hci_req_setup_ext_adv_instance()
2012 adv_use_rpa(hdev, flags), adv_instance, in __hci_req_setup_ext_adv_instance()
2020 hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval); in __hci_req_setup_ext_adv_instance()
2021 hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval); in __hci_req_setup_ext_adv_instance()
2030 } else if (get_adv_instance_scan_rsp_len(hdev, instance)) { in __hci_req_setup_ext_adv_instance()
2043 cp.channel_map = hdev->le_adv_channel_map; in __hci_req_setup_ext_adv_instance()
2070 if (!bacmp(&random_addr, &hdev->random_addr)) in __hci_req_setup_ext_adv_instance()
2089 struct hci_dev *hdev = req->hdev; in __hci_req_enable_ext_advertising() local
2096 adv_instance = hci_find_adv_instance(hdev, instance); in __hci_req_enable_ext_advertising()
2134 struct hci_dev *hdev = req->hdev; in __hci_req_disable_ext_adv_instance() local
2141 if (instance > 0 && !hci_find_adv_instance(hdev, instance)) in __hci_req_disable_ext_adv_instance()
2163 struct hci_dev *hdev = req->hdev; in __hci_req_remove_ext_adv_instance() local
2166 if (instance > 0 && !hci_find_adv_instance(hdev, instance)) in __hci_req_remove_ext_adv_instance()
2176 struct hci_dev *hdev = req->hdev; in __hci_req_start_ext_adv() local
2177 struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance); in __hci_req_start_ext_adv()
2199 struct hci_dev *hdev = req->hdev; in __hci_req_schedule_adv_instance() local
2203 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || in __hci_req_schedule_adv_instance()
2204 list_empty(&hdev->adv_instances)) in __hci_req_schedule_adv_instance()
2207 if (hdev->adv_instance_timeout) in __hci_req_schedule_adv_instance()
2210 adv_instance = hci_find_adv_instance(hdev, instance); in __hci_req_schedule_adv_instance()
2236 if (!ext_adv_capable(hdev)) { in __hci_req_schedule_adv_instance()
2237 hdev->adv_instance_timeout = timeout; in __hci_req_schedule_adv_instance()
2238 queue_delayed_work(hdev->req_workqueue, in __hci_req_schedule_adv_instance()
2239 &hdev->adv_instance_expire, in __hci_req_schedule_adv_instance()
2247 if (!force && hdev->cur_adv_instance == instance && in __hci_req_schedule_adv_instance()
2248 hci_dev_test_flag(hdev, HCI_LE_ADV)) in __hci_req_schedule_adv_instance()
2251 hdev->cur_adv_instance = instance; in __hci_req_schedule_adv_instance()
2252 if (ext_adv_capable(hdev)) { in __hci_req_schedule_adv_instance()
2274 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk, in hci_req_clear_adv_instance() argument
2283 if (!instance || hdev->cur_adv_instance == instance) in hci_req_clear_adv_instance()
2284 cancel_adv_timeout(hdev); in hci_req_clear_adv_instance()
2290 if (instance && hdev->cur_adv_instance == instance) in hci_req_clear_adv_instance()
2291 next_instance = hci_get_next_instance(hdev, instance); in hci_req_clear_adv_instance()
2294 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, in hci_req_clear_adv_instance()
2300 err = hci_remove_adv_instance(hdev, rem_inst); in hci_req_clear_adv_instance()
2302 mgmt_advertising_removed(sk, hdev, rem_inst); in hci_req_clear_adv_instance()
2305 adv_instance = hci_find_adv_instance(hdev, instance); in hci_req_clear_adv_instance()
2314 err = hci_remove_adv_instance(hdev, instance); in hci_req_clear_adv_instance()
2316 mgmt_advertising_removed(sk, hdev, instance); in hci_req_clear_adv_instance()
2320 if (!req || !hdev_is_powered(hdev) || in hci_req_clear_adv_instance()
2321 hci_dev_test_flag(hdev, HCI_ADVERTISING)) in hci_req_clear_adv_instance()
2324 if (next_instance && !ext_adv_capable(hdev)) in hci_req_clear_adv_instance()
2331 struct hci_dev *hdev = req->hdev; in set_random_addr() local
2343 if (hci_dev_test_flag(hdev, HCI_LE_ADV) || in set_random_addr()
2344 hci_lookup_le_connect(hdev)) { in set_random_addr()
2346 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); in set_random_addr()
2356 struct hci_dev *hdev = req->hdev; in hci_update_random_address() local
2369 if (use_ll_privacy(hdev)) in hci_update_random_address()
2374 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) && in hci_update_random_address()
2375 !bacmp(&hdev->random_addr, &hdev->rpa)) in hci_update_random_address()
2378 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa); in hci_update_random_address()
2380 bt_dev_err(hdev, "failed to generate new RPA"); in hci_update_random_address()
2384 set_random_addr(req, &hdev->rpa); in hci_update_random_address()
2386 to = msecs_to_jiffies(hdev->rpa_timeout * 1000); in hci_update_random_address()
2387 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to); in hci_update_random_address()
2410 if (bacmp(&hdev->bdaddr, &nrpa)) in hci_update_random_address()
2428 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || in hci_update_random_address()
2429 !bacmp(&hdev->bdaddr, BDADDR_ANY) || in hci_update_random_address()
2430 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && in hci_update_random_address()
2431 bacmp(&hdev->static_addr, BDADDR_ANY))) { in hci_update_random_address()
2433 if (bacmp(&hdev->static_addr, &hdev->random_addr)) in hci_update_random_address()
2435 &hdev->static_addr); in hci_update_random_address()
2447 static bool disconnected_whitelist_entries(struct hci_dev *hdev) in disconnected_whitelist_entries() argument
2451 list_for_each_entry(b, &hdev->whitelist, list) { in disconnected_whitelist_entries()
2454 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr); in disconnected_whitelist_entries()
2467 struct hci_dev *hdev = req->hdev; in __hci_req_update_scan() local
2470 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) in __hci_req_update_scan()
2473 if (!hdev_is_powered(hdev)) in __hci_req_update_scan()
2476 if (mgmt_powering_down(hdev)) in __hci_req_update_scan()
2479 if (hdev->scanning_paused) in __hci_req_update_scan()
2482 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) || in __hci_req_update_scan()
2483 disconnected_whitelist_entries(hdev)) in __hci_req_update_scan()
2488 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) in __hci_req_update_scan()
2491 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) && in __hci_req_update_scan()
2492 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY)) in __hci_req_update_scan()
2500 hci_dev_lock(req->hdev); in update_scan()
2502 hci_dev_unlock(req->hdev); in update_scan()
2508 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update); in scan_update_work() local
2510 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL); in scan_update_work()
2515 struct hci_dev *hdev = req->hdev; in connectable_update() local
2517 hci_dev_lock(hdev); in connectable_update()
2525 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) in connectable_update()
2526 __hci_req_update_adv_data(req, hdev->cur_adv_instance); in connectable_update()
2529 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || in connectable_update()
2530 !list_empty(&hdev->adv_instances)) { in connectable_update()
2531 if (ext_adv_capable(hdev)) in connectable_update()
2532 __hci_req_start_ext_adv(req, hdev->cur_adv_instance); in connectable_update()
2539 hci_dev_unlock(hdev); in connectable_update()
2546 struct hci_dev *hdev = container_of(work, struct hci_dev, in connectable_update_work() local
2550 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status); in connectable_update_work()
2551 mgmt_set_connectable_complete(hdev, status); in connectable_update_work()
2554 static u8 get_service_classes(struct hci_dev *hdev) in get_service_classes() argument
2559 list_for_each_entry(uuid, &hdev->uuids, list) in get_service_classes()
2567 struct hci_dev *hdev = req->hdev; in __hci_req_update_class() local
2570 BT_DBG("%s", hdev->name); in __hci_req_update_class()
2572 if (!hdev_is_powered(hdev)) in __hci_req_update_class()
2575 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) in __hci_req_update_class()
2578 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE)) in __hci_req_update_class()
2581 cod[0] = hdev->minor_class; in __hci_req_update_class()
2582 cod[1] = hdev->major_class; in __hci_req_update_class()
2583 cod[2] = get_service_classes(hdev); in __hci_req_update_class()
2585 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) in __hci_req_update_class()
2588 if (memcmp(cod, hdev->dev_class, 3) == 0) in __hci_req_update_class()
2596 struct hci_dev *hdev = req->hdev; in write_iac() local
2599 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) in write_iac()
2602 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) { in write_iac()
2604 cp.num_iac = min_t(u8, hdev->num_iac, 2); in write_iac()
2625 struct hci_dev *hdev = req->hdev; in discoverable_update() local
2627 hci_dev_lock(hdev); in discoverable_update()
2629 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { in discoverable_update()
2638 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) { in discoverable_update()
2644 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) { in discoverable_update()
2645 if (ext_adv_capable(hdev)) in discoverable_update()
2652 hci_dev_unlock(hdev); in discoverable_update()
2659 struct hci_dev *hdev = container_of(work, struct hci_dev, in discoverable_update_work() local
2663 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status); in discoverable_update_work()
2664 mgmt_set_discoverable_complete(hdev, status); in discoverable_update_work()
2698 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2) in __hci_abort_conn()
2736 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode) in abort_conn_complete() argument
2747 hci_req_init(&req, conn->hdev); in hci_abort_conn()
2753 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err); in hci_abort_conn()
2762 hci_dev_lock(req->hdev); in update_bg_scan()
2764 hci_dev_unlock(req->hdev); in update_bg_scan()
2770 struct hci_dev *hdev = container_of(work, struct hci_dev, in bg_scan_update() local
2776 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status); in bg_scan_update()
2780 hci_dev_lock(hdev); in bg_scan_update()
2782 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT); in bg_scan_update()
2786 hci_dev_unlock(hdev); in bg_scan_update()
2802 BT_DBG("%s", req->hdev->name); in bredr_inquiry()
2804 hci_dev_lock(req->hdev); in bredr_inquiry()
2805 hci_inquiry_cache_flush(req->hdev); in bredr_inquiry()
2806 hci_dev_unlock(req->hdev); in bredr_inquiry()
2810 if (req->hdev->discovery.limited) in bredr_inquiry()
2824 struct hci_dev *hdev = container_of(work, struct hci_dev, in le_scan_disable_work() local
2828 BT_DBG("%s", hdev->name); in le_scan_disable_work()
2830 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) in le_scan_disable_work()
2833 cancel_delayed_work(&hdev->le_scan_restart); in le_scan_disable_work()
2835 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status); in le_scan_disable_work()
2837 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x", in le_scan_disable_work()
2842 hdev->discovery.scan_start = 0; in le_scan_disable_work()
2852 if (hdev->discovery.type == DISCOV_TYPE_LE) in le_scan_disable_work()
2855 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED) in le_scan_disable_work()
2858 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) { in le_scan_disable_work()
2859 if (!test_bit(HCI_INQUIRY, &hdev->flags) && in le_scan_disable_work()
2860 hdev->discovery.state != DISCOVERY_RESOLVING) in le_scan_disable_work()
2866 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN, in le_scan_disable_work()
2869 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status); in le_scan_disable_work()
2876 hci_dev_lock(hdev); in le_scan_disable_work()
2877 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); in le_scan_disable_work()
2878 hci_dev_unlock(hdev); in le_scan_disable_work()
2883 struct hci_dev *hdev = req->hdev; in le_scan_restart() local
2886 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) in le_scan_restart()
2889 if (hdev->scanning_paused) { in le_scan_restart()
2890 bt_dev_dbg(hdev, "Scanning is paused for suspend"); in le_scan_restart()
2896 if (use_ext_scan(hdev)) { in le_scan_restart()
2919 struct hci_dev *hdev = container_of(work, struct hci_dev, in le_scan_restart_work() local
2924 BT_DBG("%s", hdev->name); in le_scan_restart_work()
2926 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status); in le_scan_restart_work()
2928 bt_dev_err(hdev, "failed to restart LE scan: status %d", in le_scan_restart_work()
2933 hci_dev_lock(hdev); in le_scan_restart_work()
2935 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) || in le_scan_restart_work()
2936 !hdev->discovery.scan_start) in le_scan_restart_work()
2944 duration = hdev->discovery.scan_duration; in le_scan_restart_work()
2945 scan_start = hdev->discovery.scan_start; in le_scan_restart_work()
2960 queue_delayed_work(hdev->req_workqueue, in le_scan_restart_work()
2961 &hdev->le_scan_disable, timeout); in le_scan_restart_work()
2964 hci_dev_unlock(hdev); in le_scan_restart_work()
2970 struct hci_dev *hdev = req->hdev; in active_scan() local
2978 BT_DBG("%s", hdev->name); in active_scan()
2984 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) in active_scan()
2991 err = hci_update_random_address(req, true, scan_use_rpa(hdev), in active_scan()
2997 hdev->le_scan_window_discovery, own_addr_type, in active_scan()
3006 BT_DBG("%s", req->hdev->name); in interleaved_discov()
3015 static void start_discovery(struct hci_dev *hdev, u8 *status) in start_discovery() argument
3019 BT_DBG("%s type %u", hdev->name, hdev->discovery.type); in start_discovery()
3021 switch (hdev->discovery.type) { in start_discovery()
3023 if (!hci_dev_test_flag(hdev, HCI_INQUIRY)) in start_discovery()
3024 hci_req_sync(hdev, bredr_inquiry, in start_discovery()
3038 &hdev->quirks)) { in start_discovery()
3044 hci_req_sync(hdev, interleaved_discov, in start_discovery()
3045 hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT, in start_discovery()
3050 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout); in start_discovery()
3051 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery, in start_discovery()
3056 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery, in start_discovery()
3067 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout)); in start_discovery()
3074 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) && in start_discovery()
3075 hdev->discovery.result_filtering) { in start_discovery()
3076 hdev->discovery.scan_start = jiffies; in start_discovery()
3077 hdev->discovery.scan_duration = timeout; in start_discovery()
3080 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable, in start_discovery()
3086 struct hci_dev *hdev = req->hdev; in hci_req_stop_discovery() local
3087 struct discovery_state *d = &hdev->discovery; in hci_req_stop_discovery()
3092 BT_DBG("%s state %u", hdev->name, hdev->discovery.state); in hci_req_stop_discovery()
3095 if (test_bit(HCI_INQUIRY, &hdev->flags)) in hci_req_stop_discovery()
3098 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { in hci_req_stop_discovery()
3099 cancel_delayed_work(&hdev->le_scan_disable); in hci_req_stop_discovery()
3106 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { in hci_req_stop_discovery()
3117 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, in hci_req_stop_discovery()
3133 hci_dev_lock(req->hdev); in stop_discovery()
3135 hci_dev_unlock(req->hdev); in stop_discovery()
3142 struct hci_dev *hdev = container_of(work, struct hci_dev, in discov_update() local
3146 switch (hdev->discovery.state) { in discov_update()
3148 start_discovery(hdev, &status); in discov_update()
3149 mgmt_start_discovery_complete(hdev, status); in discov_update()
3151 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); in discov_update()
3153 hci_discovery_set_state(hdev, DISCOVERY_FINDING); in discov_update()
3156 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status); in discov_update()
3157 mgmt_stop_discovery_complete(hdev, status); in discov_update()
3159 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); in discov_update()
3169 struct hci_dev *hdev = container_of(work, struct hci_dev, in discov_off() local
3172 BT_DBG("%s", hdev->name); in discov_off()
3174 hci_dev_lock(hdev); in discov_off()
3181 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); in discov_off()
3182 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); in discov_off()
3183 hdev->discov_timeout = 0; in discov_off()
3185 hci_dev_unlock(hdev); in discov_off()
3187 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL); in discov_off()
3188 mgmt_new_settings(hdev); in discov_off()
3193 struct hci_dev *hdev = req->hdev; in powered_update_hci() local
3196 hci_dev_lock(hdev); in powered_update_hci()
3198 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) && in powered_update_hci()
3199 !lmp_host_ssp_capable(hdev)) { in powered_update_hci()
3204 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) { in powered_update_hci()
3212 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) && in powered_update_hci()
3213 lmp_bredr_capable(hdev)) { in powered_update_hci()
3222 if (cp.le != lmp_host_le_capable(hdev) || in powered_update_hci()
3223 cp.simul != lmp_host_le_br_capable(hdev)) in powered_update_hci()
3228 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { in powered_update_hci()
3233 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || in powered_update_hci()
3234 list_empty(&hdev->adv_instances)) { in powered_update_hci()
3237 if (ext_adv_capable(hdev)) { in powered_update_hci()
3249 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) { in powered_update_hci()
3250 if (!ext_adv_capable(hdev)) in powered_update_hci()
3256 } else if (!list_empty(&hdev->adv_instances)) { in powered_update_hci()
3259 adv_instance = list_first_entry(&hdev->adv_instances, in powered_update_hci()
3267 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY); in powered_update_hci()
3268 if (link_sec != test_bit(HCI_AUTH, &hdev->flags)) in powered_update_hci()
3272 if (lmp_bredr_capable(hdev)) { in powered_update_hci()
3273 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) in powered_update_hci()
3283 hci_dev_unlock(hdev); in powered_update_hci()
3287 int __hci_req_hci_power_on(struct hci_dev *hdev) in __hci_req_hci_power_on() argument
3294 smp_register(hdev); in __hci_req_hci_power_on()
3296 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT, in __hci_req_hci_power_on()
3300 void hci_request_setup(struct hci_dev *hdev) in hci_request_setup() argument
3302 INIT_WORK(&hdev->discov_update, discov_update); in hci_request_setup()
3303 INIT_WORK(&hdev->bg_scan_update, bg_scan_update); in hci_request_setup()
3304 INIT_WORK(&hdev->scan_update, scan_update_work); in hci_request_setup()
3305 INIT_WORK(&hdev->connectable_update, connectable_update_work); in hci_request_setup()
3306 INIT_WORK(&hdev->discoverable_update, discoverable_update_work); in hci_request_setup()
3307 INIT_DELAYED_WORK(&hdev->discov_off, discov_off); in hci_request_setup()
3308 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work); in hci_request_setup()
3309 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work); in hci_request_setup()
3310 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire); in hci_request_setup()
3313 void hci_request_cancel_all(struct hci_dev *hdev) in hci_request_cancel_all() argument
3315 hci_req_sync_cancel(hdev, ENODEV); in hci_request_cancel_all()
3317 cancel_work_sync(&hdev->discov_update); in hci_request_cancel_all()
3318 cancel_work_sync(&hdev->bg_scan_update); in hci_request_cancel_all()
3319 cancel_work_sync(&hdev->scan_update); in hci_request_cancel_all()
3320 cancel_work_sync(&hdev->connectable_update); in hci_request_cancel_all()
3321 cancel_work_sync(&hdev->discoverable_update); in hci_request_cancel_all()
3322 cancel_delayed_work_sync(&hdev->discov_off); in hci_request_cancel_all()
3323 cancel_delayed_work_sync(&hdev->le_scan_disable); in hci_request_cancel_all()
3324 cancel_delayed_work_sync(&hdev->le_scan_restart); in hci_request_cancel_all()
3326 if (hdev->adv_instance_timeout) { in hci_request_cancel_all()
3327 cancel_delayed_work_sync(&hdev->adv_instance_expire); in hci_request_cancel_all()
3328 hdev->adv_instance_timeout = 0; in hci_request_cancel_all()