Lines Matching full:qca

189  * QCA Bluetooth chipset
204 * Platform data for the QCA Bluetooth power driver.
275 struct qca_data *qca = hu->priv; in serial_clock_vote() local
278 bool old_vote = (qca->tx_vote | qca->rx_vote); in serial_clock_vote()
283 diff = jiffies_to_msecs(jiffies - qca->vote_last_jif); in serial_clock_vote()
286 qca->vote_off_ms += diff; in serial_clock_vote()
288 qca->vote_on_ms += diff; in serial_clock_vote()
292 qca->tx_vote = true; in serial_clock_vote()
293 qca->tx_votes_on++; in serial_clock_vote()
297 qca->rx_vote = true; in serial_clock_vote()
298 qca->rx_votes_on++; in serial_clock_vote()
302 qca->tx_vote = false; in serial_clock_vote()
303 qca->tx_votes_off++; in serial_clock_vote()
307 qca->rx_vote = false; in serial_clock_vote()
308 qca->rx_votes_off++; in serial_clock_vote()
316 new_vote = qca->rx_vote | qca->tx_vote; in serial_clock_vote()
327 diff = jiffies_to_msecs(jiffies - qca->vote_last_jif); in serial_clock_vote()
330 qca->votes_on++; in serial_clock_vote()
331 qca->vote_off_ms += diff; in serial_clock_vote()
333 qca->votes_off++; in serial_clock_vote()
334 qca->vote_on_ms += diff; in serial_clock_vote()
336 qca->vote_last_jif = jiffies; in serial_clock_vote()
347 struct qca_data *qca = hu->priv; in send_hci_ibs_cmd() local
360 skb_queue_tail(&qca->txq, skb); in send_hci_ibs_cmd()
367 struct qca_data *qca = container_of(work, struct qca_data, in qca_wq_awake_device() local
369 struct hci_uart *hu = qca->hu; in qca_wq_awake_device()
378 spin_lock_irqsave(&qca->hci_ibs_lock, flags); in qca_wq_awake_device()
384 qca->ibs_sent_wakes++; in qca_wq_awake_device()
387 retrans_delay = msecs_to_jiffies(qca->wake_retrans); in qca_wq_awake_device()
388 mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay); in qca_wq_awake_device()
390 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in qca_wq_awake_device()
398 struct qca_data *qca = container_of(work, struct qca_data, in qca_wq_awake_rx() local
400 struct hci_uart *hu = qca->hu; in qca_wq_awake_rx()
407 spin_lock_irqsave(&qca->hci_ibs_lock, flags); in qca_wq_awake_rx()
408 qca->rx_ibs_state = HCI_IBS_RX_AWAKE; in qca_wq_awake_rx()
416 qca->ibs_sent_wacks++; in qca_wq_awake_rx()
418 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in qca_wq_awake_rx()
426 struct qca_data *qca = container_of(work, struct qca_data, in qca_wq_serial_rx_clock_vote_off() local
428 struct hci_uart *hu = qca->hu; in qca_wq_serial_rx_clock_vote_off()
437 struct qca_data *qca = container_of(work, struct qca_data, in qca_wq_serial_tx_clock_vote_off() local
439 struct hci_uart *hu = qca->hu; in qca_wq_serial_tx_clock_vote_off()
454 struct qca_data *qca = from_timer(qca, t, tx_idle_timer); in hci_ibs_tx_idle_timeout() local
455 struct hci_uart *hu = qca->hu; in hci_ibs_tx_idle_timeout()
458 BT_DBG("hu %p idle timeout in %d state", hu, qca->tx_ibs_state); in hci_ibs_tx_idle_timeout()
460 spin_lock_irqsave_nested(&qca->hci_ibs_lock, in hci_ibs_tx_idle_timeout()
463 switch (qca->tx_ibs_state) { in hci_ibs_tx_idle_timeout()
470 qca->tx_ibs_state = HCI_IBS_TX_ASLEEP; in hci_ibs_tx_idle_timeout()
471 qca->ibs_sent_slps++; in hci_ibs_tx_idle_timeout()
472 queue_work(qca->workqueue, &qca->ws_tx_vote_off); in hci_ibs_tx_idle_timeout()
478 BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state); in hci_ibs_tx_idle_timeout()
482 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in hci_ibs_tx_idle_timeout()
487 struct qca_data *qca = from_timer(qca, t, wake_retrans_timer); in hci_ibs_wake_retrans_timeout() local
488 struct hci_uart *hu = qca->hu; in hci_ibs_wake_retrans_timeout()
493 hu, qca->tx_ibs_state); in hci_ibs_wake_retrans_timeout()
495 spin_lock_irqsave_nested(&qca->hci_ibs_lock, in hci_ibs_wake_retrans_timeout()
499 if (test_bit(QCA_SUSPENDING, &qca->flags)) { in hci_ibs_wake_retrans_timeout()
500 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in hci_ibs_wake_retrans_timeout()
504 switch (qca->tx_ibs_state) { in hci_ibs_wake_retrans_timeout()
512 qca->ibs_sent_wakes++; in hci_ibs_wake_retrans_timeout()
513 retrans_delay = msecs_to_jiffies(qca->wake_retrans); in hci_ibs_wake_retrans_timeout()
514 mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay); in hci_ibs_wake_retrans_timeout()
520 BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state); in hci_ibs_wake_retrans_timeout()
524 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in hci_ibs_wake_retrans_timeout()
533 struct qca_data *qca = container_of(work, struct qca_data, in qca_controller_memdump_timeout() local
535 struct hci_uart *hu = qca->hu; in qca_controller_memdump_timeout()
537 mutex_lock(&qca->hci_memdump_lock); in qca_controller_memdump_timeout()
538 if (test_bit(QCA_MEMDUMP_COLLECTION, &qca->flags)) { in qca_controller_memdump_timeout()
539 qca->memdump_state = QCA_MEMDUMP_TIMEOUT; in qca_controller_memdump_timeout()
540 if (!test_bit(QCA_HW_ERROR_EVENT, &qca->flags)) { in qca_controller_memdump_timeout()
548 mutex_unlock(&qca->hci_memdump_lock); in qca_controller_memdump_timeout()
556 struct qca_data *qca; in qca_open() local
563 qca = kzalloc(sizeof(struct qca_data), GFP_KERNEL); in qca_open()
564 if (!qca) in qca_open()
567 skb_queue_head_init(&qca->txq); in qca_open()
568 skb_queue_head_init(&qca->tx_wait_q); in qca_open()
569 skb_queue_head_init(&qca->rx_memdump_q); in qca_open()
570 spin_lock_init(&qca->hci_ibs_lock); in qca_open()
571 mutex_init(&qca->hci_memdump_lock); in qca_open()
572 qca->workqueue = alloc_ordered_workqueue("qca_wq", 0); in qca_open()
573 if (!qca->workqueue) { in qca_open()
574 BT_ERR("QCA Workqueue not initialized properly"); in qca_open()
575 kfree(qca); in qca_open()
579 INIT_WORK(&qca->ws_awake_rx, qca_wq_awake_rx); in qca_open()
580 INIT_WORK(&qca->ws_awake_device, qca_wq_awake_device); in qca_open()
581 INIT_WORK(&qca->ws_rx_vote_off, qca_wq_serial_rx_clock_vote_off); in qca_open()
582 INIT_WORK(&qca->ws_tx_vote_off, qca_wq_serial_tx_clock_vote_off); in qca_open()
583 INIT_WORK(&qca->ctrl_memdump_evt, qca_controller_memdump); in qca_open()
584 INIT_DELAYED_WORK(&qca->ctrl_memdump_timeout, in qca_open()
586 init_waitqueue_head(&qca->suspend_wait_q); in qca_open()
588 qca->hu = hu; in qca_open()
589 init_completion(&qca->drop_ev_comp); in qca_open()
592 qca->tx_ibs_state = HCI_IBS_TX_ASLEEP; in qca_open()
593 qca->rx_ibs_state = HCI_IBS_RX_ASLEEP; in qca_open()
595 qca->vote_last_jif = jiffies; in qca_open()
597 hu->priv = qca; in qca_open()
609 timer_setup(&qca->wake_retrans_timer, hci_ibs_wake_retrans_timeout, 0); in qca_open()
610 qca->wake_retrans = IBS_WAKE_RETRANS_TIMEOUT_MS; in qca_open()
612 timer_setup(&qca->tx_idle_timer, hci_ibs_tx_idle_timeout, 0); in qca_open()
613 qca->tx_idle_delay = IBS_HOST_TX_IDLE_TIMEOUT_MS; in qca_open()
616 qca->tx_idle_delay, qca->wake_retrans); in qca_open()
624 struct qca_data *qca = hu->priv; in qca_debugfs_init() local
635 debugfs_create_u8("tx_ibs_state", mode, ibs_dir, &qca->tx_ibs_state); in qca_debugfs_init()
636 debugfs_create_u8("rx_ibs_state", mode, ibs_dir, &qca->rx_ibs_state); in qca_debugfs_init()
638 &qca->ibs_sent_slps); in qca_debugfs_init()
640 &qca->ibs_sent_wakes); in qca_debugfs_init()
642 &qca->ibs_sent_wacks); in qca_debugfs_init()
644 &qca->ibs_recv_slps); in qca_debugfs_init()
646 &qca->ibs_recv_wakes); in qca_debugfs_init()
648 &qca->ibs_recv_wacks); in qca_debugfs_init()
649 debugfs_create_bool("tx_vote", mode, ibs_dir, &qca->tx_vote); in qca_debugfs_init()
650 debugfs_create_u64("tx_votes_on", mode, ibs_dir, &qca->tx_votes_on); in qca_debugfs_init()
651 debugfs_create_u64("tx_votes_off", mode, ibs_dir, &qca->tx_votes_off); in qca_debugfs_init()
652 debugfs_create_bool("rx_vote", mode, ibs_dir, &qca->rx_vote); in qca_debugfs_init()
653 debugfs_create_u64("rx_votes_on", mode, ibs_dir, &qca->rx_votes_on); in qca_debugfs_init()
654 debugfs_create_u64("rx_votes_off", mode, ibs_dir, &qca->rx_votes_off); in qca_debugfs_init()
655 debugfs_create_u64("votes_on", mode, ibs_dir, &qca->votes_on); in qca_debugfs_init()
656 debugfs_create_u64("votes_off", mode, ibs_dir, &qca->votes_off); in qca_debugfs_init()
657 debugfs_create_u32("vote_on_ms", mode, ibs_dir, &qca->vote_on_ms); in qca_debugfs_init()
658 debugfs_create_u32("vote_off_ms", mode, ibs_dir, &qca->vote_off_ms); in qca_debugfs_init()
662 debugfs_create_u32("wake_retrans", mode, ibs_dir, &qca->wake_retrans); in qca_debugfs_init()
664 &qca->tx_idle_delay); in qca_debugfs_init()
670 struct qca_data *qca = hu->priv; in qca_flush() local
672 BT_DBG("hu %p qca flush", hu); in qca_flush()
674 skb_queue_purge(&qca->tx_wait_q); in qca_flush()
675 skb_queue_purge(&qca->txq); in qca_flush()
683 struct qca_data *qca = hu->priv; in qca_close() local
685 BT_DBG("hu %p qca close", hu); in qca_close()
689 skb_queue_purge(&qca->tx_wait_q); in qca_close()
690 skb_queue_purge(&qca->txq); in qca_close()
691 skb_queue_purge(&qca->rx_memdump_q); in qca_close()
692 destroy_workqueue(qca->workqueue); in qca_close()
693 del_timer_sync(&qca->tx_idle_timer); in qca_close()
694 del_timer_sync(&qca->wake_retrans_timer); in qca_close()
695 qca->hu = NULL; in qca_close()
697 kfree_skb(qca->rx_skb); in qca_close()
701 kfree(qca); in qca_close()
711 struct qca_data *qca = hu->priv; in device_want_to_wakeup() local
715 spin_lock_irqsave(&qca->hci_ibs_lock, flags); in device_want_to_wakeup()
717 qca->ibs_recv_wakes++; in device_want_to_wakeup()
720 if (test_bit(QCA_SUSPENDING, &qca->flags)) { in device_want_to_wakeup()
721 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in device_want_to_wakeup()
725 switch (qca->rx_ibs_state) { in device_want_to_wakeup()
730 queue_work(qca->workqueue, &qca->ws_awake_rx); in device_want_to_wakeup()
731 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in device_want_to_wakeup()
742 qca->ibs_sent_wacks++; in device_want_to_wakeup()
748 qca->rx_ibs_state); in device_want_to_wakeup()
752 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in device_want_to_wakeup()
763 struct qca_data *qca = hu->priv; in device_want_to_sleep() local
765 BT_DBG("hu %p want to sleep in %d state", hu, qca->rx_ibs_state); in device_want_to_sleep()
767 spin_lock_irqsave(&qca->hci_ibs_lock, flags); in device_want_to_sleep()
769 qca->ibs_recv_slps++; in device_want_to_sleep()
771 switch (qca->rx_ibs_state) { in device_want_to_sleep()
774 qca->rx_ibs_state = HCI_IBS_RX_ASLEEP; in device_want_to_sleep()
776 queue_work(qca->workqueue, &qca->ws_rx_vote_off); in device_want_to_sleep()
785 qca->rx_ibs_state); in device_want_to_sleep()
789 wake_up_interruptible(&qca->suspend_wait_q); in device_want_to_sleep()
791 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in device_want_to_sleep()
799 struct qca_data *qca = hu->priv; in device_woke_up() local
804 spin_lock_irqsave(&qca->hci_ibs_lock, flags); in device_woke_up()
806 qca->ibs_recv_wacks++; in device_woke_up()
809 if (test_bit(QCA_SUSPENDING, &qca->flags)) { in device_woke_up()
810 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in device_woke_up()
814 switch (qca->tx_ibs_state) { in device_woke_up()
818 qca->tx_ibs_state); in device_woke_up()
823 while ((skb = skb_dequeue(&qca->tx_wait_q))) in device_woke_up()
824 skb_queue_tail(&qca->txq, skb); in device_woke_up()
827 del_timer(&qca->wake_retrans_timer); in device_woke_up()
828 idle_delay = msecs_to_jiffies(qca->tx_idle_delay); in device_woke_up()
829 mod_timer(&qca->tx_idle_timer, jiffies + idle_delay); in device_woke_up()
830 qca->tx_ibs_state = HCI_IBS_TX_AWAKE; in device_woke_up()
836 qca->tx_ibs_state); in device_woke_up()
840 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in device_woke_up()
852 struct qca_data *qca = hu->priv; in qca_enqueue() local
854 BT_DBG("hu %p qca enq skb %p tx_ibs_state %d", hu, skb, in qca_enqueue()
855 qca->tx_ibs_state); in qca_enqueue()
857 if (test_bit(QCA_SSR_TRIGGERED, &qca->flags)) { in qca_enqueue()
867 spin_lock_irqsave(&qca->hci_ibs_lock, flags); in qca_enqueue()
873 if (!test_bit(QCA_IBS_ENABLED, &qca->flags) || in qca_enqueue()
874 test_bit(QCA_SUSPENDING, &qca->flags)) { in qca_enqueue()
875 skb_queue_tail(&qca->txq, skb); in qca_enqueue()
876 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in qca_enqueue()
881 switch (qca->tx_ibs_state) { in qca_enqueue()
884 skb_queue_tail(&qca->txq, skb); in qca_enqueue()
885 idle_delay = msecs_to_jiffies(qca->tx_idle_delay); in qca_enqueue()
886 mod_timer(&qca->tx_idle_timer, jiffies + idle_delay); in qca_enqueue()
892 skb_queue_tail(&qca->tx_wait_q, skb); in qca_enqueue()
894 qca->tx_ibs_state = HCI_IBS_TX_WAKING; in qca_enqueue()
896 queue_work(qca->workqueue, &qca->ws_awake_device); in qca_enqueue()
902 skb_queue_tail(&qca->tx_wait_q, skb); in qca_enqueue()
907 qca->tx_ibs_state); in qca_enqueue()
912 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in qca_enqueue()
968 struct qca_data *qca = container_of(work, struct qca_data, in qca_controller_memdump() local
970 struct hci_uart *hu = qca->hu; in qca_controller_memdump()
973 struct qca_memdump_data *qca_memdump = qca->qca_memdump; in qca_controller_memdump()
982 while ((skb = skb_dequeue(&qca->rx_memdump_q))) { in qca_controller_memdump()
984 mutex_lock(&qca->hci_memdump_lock); in qca_controller_memdump()
988 if (qca->memdump_state == QCA_MEMDUMP_TIMEOUT || in qca_controller_memdump()
989 qca->memdump_state == QCA_MEMDUMP_COLLECTED) { in qca_controller_memdump()
990 mutex_unlock(&qca->hci_memdump_lock); in qca_controller_memdump()
998 mutex_unlock(&qca->hci_memdump_lock); in qca_controller_memdump()
1002 qca->qca_memdump = qca_memdump; in qca_controller_memdump()
1005 qca->memdump_state = QCA_MEMDUMP_COLLECTING; in qca_controller_memdump()
1018 clear_bit(QCA_IBS_ENABLED, &qca->flags); in qca_controller_memdump()
1019 set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags); in qca_controller_memdump()
1026 qca->qca_memdump = NULL; in qca_controller_memdump()
1027 mutex_unlock(&qca->hci_memdump_lock); in qca_controller_memdump()
1031 bt_dev_info(hu->hdev, "QCA collecting dump of size:%u", in qca_controller_memdump()
1033 queue_delayed_work(qca->workqueue, in qca_controller_memdump()
1034 &qca->ctrl_memdump_timeout, in qca_controller_memdump()
1051 bt_dev_err(hu->hdev, "QCA: Discarding other packets"); in qca_controller_memdump()
1054 qca->qca_memdump = NULL; in qca_controller_memdump()
1055 mutex_unlock(&qca->hci_memdump_lock); in qca_controller_memdump()
1070 bt_dev_err(hu->hdev, "QCA controller missed packet:%d", in qca_controller_memdump()
1076 "QCA memdump received %d, no space for missed packet", in qca_controller_memdump()
1091 "QCA memdump unexpected packet %d", in qca_controller_memdump()
1094 "QCA memdump packet %d with length %d", in qca_controller_memdump()
1104 "QCA memdump received %d, no space for packet %d", in qca_controller_memdump()
1107 qca->qca_memdump = qca_memdump; in qca_controller_memdump()
1111 "QCA memdump Done, received %d, total %d", in qca_controller_memdump()
1117 cancel_delayed_work(&qca->ctrl_memdump_timeout); in qca_controller_memdump()
1118 kfree(qca->qca_memdump); in qca_controller_memdump()
1119 qca->qca_memdump = NULL; in qca_controller_memdump()
1120 qca->memdump_state = QCA_MEMDUMP_COLLECTED; in qca_controller_memdump()
1121 clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags); in qca_controller_memdump()
1124 mutex_unlock(&qca->hci_memdump_lock); in qca_controller_memdump()
1133 struct qca_data *qca = hu->priv; in qca_controller_memdump_event() local
1135 set_bit(QCA_SSR_TRIGGERED, &qca->flags); in qca_controller_memdump_event()
1136 skb_queue_tail(&qca->rx_memdump_q, skb); in qca_controller_memdump_event()
1137 queue_work(qca->workqueue, &qca->ctrl_memdump_evt); in qca_controller_memdump_event()
1145 struct qca_data *qca = hu->priv; in qca_recv_event() local
1147 if (test_bit(QCA_DROP_VENDOR_EVENT, &qca->flags)) { in qca_recv_event()
1161 complete(&qca->drop_ev_comp); in qca_recv_event()
1211 struct qca_data *qca = hu->priv; in qca_recv() local
1216 qca->rx_skb = h4_recv_buf(hu->hdev, qca->rx_skb, data, count, in qca_recv()
1218 if (IS_ERR(qca->rx_skb)) { in qca_recv()
1219 int err = PTR_ERR(qca->rx_skb); in qca_recv()
1221 qca->rx_skb = NULL; in qca_recv()
1230 struct qca_data *qca = hu->priv; in qca_dequeue() local
1232 return skb_dequeue(&qca->txq); in qca_dequeue()
1274 struct qca_data *qca = hu->priv; in qca_set_baudrate() local
1293 skb_queue_tail(&qca->txq, skb); in qca_set_baudrate()
1298 while (!skb_queue_empty(&qca->txq)) in qca_set_baudrate()
1398 struct qca_data *qca = hu->priv; in qca_set_speed() local
1419 reinit_completion(&qca->drop_ev_comp); in qca_set_speed()
1420 set_bit(QCA_DROP_VENDOR_EVENT, &qca->flags); in qca_set_speed()
1439 if (!wait_for_completion_timeout(&qca->drop_ev_comp, in qca_set_speed()
1446 clear_bit(QCA_DROP_VENDOR_EVENT, &qca->flags); in qca_set_speed()
1455 struct qca_data *qca = hu->priv; in qca_send_crashbuffer() local
1472 skb_queue_tail(&qca->txq, skb); in qca_send_crashbuffer()
1481 struct qca_data *qca = hu->priv; in qca_wait_for_dump_collection() local
1483 wait_on_bit_timeout(&qca->flags, QCA_MEMDUMP_COLLECTION, in qca_wait_for_dump_collection()
1486 clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags); in qca_wait_for_dump_collection()
1492 struct qca_data *qca = hu->priv; in qca_hw_error() local
1494 set_bit(QCA_SSR_TRIGGERED, &qca->flags); in qca_hw_error()
1495 set_bit(QCA_HW_ERROR_EVENT, &qca->flags); in qca_hw_error()
1496 bt_dev_info(hdev, "mem_dump_status: %d", qca->memdump_state); in qca_hw_error()
1498 if (qca->memdump_state == QCA_MEMDUMP_IDLE) { in qca_hw_error()
1499 /* If hardware error event received for other than QCA in qca_hw_error()
1505 set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags); in qca_hw_error()
1508 } else if (qca->memdump_state == QCA_MEMDUMP_COLLECTING) { in qca_hw_error()
1516 mutex_lock(&qca->hci_memdump_lock); in qca_hw_error()
1517 if (qca->memdump_state != QCA_MEMDUMP_COLLECTED) { in qca_hw_error()
1519 if (qca->qca_memdump) { in qca_hw_error()
1520 vfree(qca->qca_memdump->memdump_buf_head); in qca_hw_error()
1521 kfree(qca->qca_memdump); in qca_hw_error()
1522 qca->qca_memdump = NULL; in qca_hw_error()
1524 qca->memdump_state = QCA_MEMDUMP_TIMEOUT; in qca_hw_error()
1525 cancel_delayed_work(&qca->ctrl_memdump_timeout); in qca_hw_error()
1527 mutex_unlock(&qca->hci_memdump_lock); in qca_hw_error()
1529 if (qca->memdump_state == QCA_MEMDUMP_TIMEOUT || in qca_hw_error()
1530 qca->memdump_state == QCA_MEMDUMP_COLLECTED) { in qca_hw_error()
1531 cancel_work_sync(&qca->ctrl_memdump_evt); in qca_hw_error()
1532 skb_queue_purge(&qca->rx_memdump_q); in qca_hw_error()
1535 clear_bit(QCA_HW_ERROR_EVENT, &qca->flags); in qca_hw_error()
1541 struct qca_data *qca = hu->priv; in qca_cmd_timeout() local
1543 set_bit(QCA_SSR_TRIGGERED, &qca->flags); in qca_cmd_timeout()
1544 if (qca->memdump_state == QCA_MEMDUMP_IDLE) { in qca_cmd_timeout()
1545 set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags); in qca_cmd_timeout()
1548 } else if (qca->memdump_state == QCA_MEMDUMP_COLLECTING) { in qca_cmd_timeout()
1556 mutex_lock(&qca->hci_memdump_lock); in qca_cmd_timeout()
1557 if (qca->memdump_state != QCA_MEMDUMP_COLLECTED) { in qca_cmd_timeout()
1558 qca->memdump_state = QCA_MEMDUMP_TIMEOUT; in qca_cmd_timeout()
1559 if (!test_bit(QCA_HW_ERROR_EVENT, &qca->flags)) { in qca_cmd_timeout()
1566 mutex_unlock(&qca->hci_memdump_lock); in qca_cmd_timeout()
1649 struct qca_data *qca = hu->priv; in qca_setup() local
1662 clear_bit(QCA_IBS_ENABLED, &qca->flags); in qca_setup()
1672 qca->memdump_state = QCA_MEMDUMP_IDLE; in qca_setup()
1679 clear_bit(QCA_SSR_TRIGGERED, &qca->flags); in qca_setup()
1702 /* Get QCA version information */ in qca_setup()
1708 bt_dev_info(hdev, "QCA controller version 0x%08x", soc_ver); in qca_setup()
1713 set_bit(QCA_IBS_ENABLED, &qca->flags); in qca_setup()
1753 .name = "QCA",
1808 struct qca_data *qca = hu->priv; in qca_power_shutdown() local
1816 spin_lock_irqsave(&qca->hci_ibs_lock, flags); in qca_power_shutdown()
1817 clear_bit(QCA_IBS_ENABLED, &qca->flags); in qca_power_shutdown()
1819 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in qca_power_shutdown()
1841 struct qca_data *qca = hu->priv; in qca_power_off() local
1847 del_timer_sync(&qca->wake_retrans_timer); in qca_power_off()
1848 del_timer_sync(&qca->tx_idle_timer); in qca_power_off()
1852 && qca->memdump_state == QCA_MEMDUMP_IDLE) { in qca_power_off()
1904 static int qca_init_regulators(struct qca_power *qca, in qca_init_regulators() argument
1911 bulk = devm_kcalloc(qca->dev, num_vregs, sizeof(*bulk), GFP_KERNEL); in qca_init_regulators()
1918 ret = devm_regulator_bulk_get(qca->dev, num_vregs, bulk); in qca_init_regulators()
1928 qca->vreg_bulk = bulk; in qca_init_regulators()
1929 qca->num_vregs = num_vregs; in qca_init_regulators()
2068 BT_ERR("QCA send IBS_WAKE_IND error: %d", ret); in qca_serdev_shutdown()
2078 BT_ERR("QCA send EDL_RESET_REQ error: %d", ret); in qca_serdev_shutdown()
2091 struct qca_data *qca = hu->priv; in qca_suspend() local
2097 set_bit(QCA_SUSPENDING, &qca->flags); in qca_suspend()
2100 if (!test_bit(QCA_IBS_ENABLED, &qca->flags)) in qca_suspend()
2103 cancel_work_sync(&qca->ws_awake_device); in qca_suspend()
2104 cancel_work_sync(&qca->ws_awake_rx); in qca_suspend()
2106 spin_lock_irqsave_nested(&qca->hci_ibs_lock, in qca_suspend()
2109 switch (qca->tx_ibs_state) { in qca_suspend()
2111 del_timer(&qca->wake_retrans_timer); in qca_suspend()
2114 del_timer(&qca->tx_idle_timer); in qca_suspend()
2125 qca->tx_ibs_state = HCI_IBS_TX_ASLEEP; in qca_suspend()
2126 qca->ibs_sent_slps++; in qca_suspend()
2134 BT_ERR("Spurious tx state %d", qca->tx_ibs_state); in qca_suspend()
2139 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in qca_suspend()
2153 ret = wait_event_interruptible_timeout(qca->suspend_wait_q, in qca_suspend()
2154 qca->rx_ibs_state == HCI_IBS_RX_ASLEEP, in qca_suspend()
2164 clear_bit(QCA_SUSPENDING, &qca->flags); in qca_suspend()
2174 struct qca_data *qca = hu->priv; in qca_resume() local
2176 clear_bit(QCA_SUSPENDING, &qca->flags); in qca_resume()