• Home
  • Raw
  • Download

Lines Matching +full:fw +full:- +full:init +full:- +full:baudrate

1 // SPDX-License-Identifier: GPL-2.0-only
5 * HCI_IBS (HCI In-Band Sleep) is Qualcomm's power management
13 * Written by Ohad Ben-Cohen <ohad@bencohen.org>
63 /* max retry count when init fails */
240 if (hu->serdev) { in qca_soc_type()
241 struct qca_serdev *qsd = serdev_device_get_drvdata(hu->serdev); in qca_soc_type()
243 soc_type = qsd->btsoc_type; in qca_soc_type()
253 if (hu->serdev) { in qca_get_firmware_name()
254 struct qca_serdev *qsd = serdev_device_get_drvdata(hu->serdev); in qca_get_firmware_name()
256 return qsd->firmware_name; in qca_get_firmware_name()
281 struct qca_data *qca = hu->priv; in serial_clock_vote()
284 bool old_vote = (qca->tx_vote | qca->rx_vote); in serial_clock_vote()
289 diff = jiffies_to_msecs(jiffies - qca->vote_last_jif); in serial_clock_vote()
292 qca->vote_off_ms += diff; in serial_clock_vote()
294 qca->vote_on_ms += diff; in serial_clock_vote()
298 qca->tx_vote = true; in serial_clock_vote()
299 qca->tx_votes_on++; in serial_clock_vote()
303 qca->rx_vote = true; in serial_clock_vote()
304 qca->rx_votes_on++; in serial_clock_vote()
308 qca->tx_vote = false; in serial_clock_vote()
309 qca->tx_votes_off++; in serial_clock_vote()
313 qca->rx_vote = false; in serial_clock_vote()
314 qca->rx_votes_off++; in serial_clock_vote()
322 new_vote = qca->rx_vote | qca->tx_vote; in serial_clock_vote()
326 __serial_clock_on(hu->tty); in serial_clock_vote()
328 __serial_clock_off(hu->tty); in serial_clock_vote()
333 diff = jiffies_to_msecs(jiffies - qca->vote_last_jif); in serial_clock_vote()
336 qca->votes_on++; in serial_clock_vote()
337 qca->vote_off_ms += diff; in serial_clock_vote()
339 qca->votes_off++; in serial_clock_vote()
340 qca->vote_on_ms += diff; in serial_clock_vote()
342 qca->vote_last_jif = jiffies; in serial_clock_vote()
353 struct qca_data *qca = hu->priv; in send_hci_ibs_cmd()
360 return -ENOMEM; in send_hci_ibs_cmd()
366 skb_queue_tail(&qca->txq, skb); in send_hci_ibs_cmd()
375 struct hci_uart *hu = qca->hu; in qca_wq_awake_device()
384 spin_lock_irqsave(&qca->hci_ibs_lock, flags); in qca_wq_awake_device()
390 qca->ibs_sent_wakes++; in qca_wq_awake_device()
393 retrans_delay = msecs_to_jiffies(qca->wake_retrans); in qca_wq_awake_device()
394 mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay); in qca_wq_awake_device()
396 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in qca_wq_awake_device()
406 struct hci_uart *hu = qca->hu; in qca_wq_awake_rx()
413 spin_lock_irqsave(&qca->hci_ibs_lock, flags); in qca_wq_awake_rx()
414 qca->rx_ibs_state = HCI_IBS_RX_AWAKE; in qca_wq_awake_rx()
422 qca->ibs_sent_wacks++; in qca_wq_awake_rx()
424 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in qca_wq_awake_rx()
434 struct hci_uart *hu = qca->hu; in qca_wq_serial_rx_clock_vote_off()
445 struct hci_uart *hu = qca->hu; in qca_wq_serial_tx_clock_vote_off()
461 struct hci_uart *hu = qca->hu; in hci_ibs_tx_idle_timeout()
464 BT_DBG("hu %p idle timeout in %d state", hu, qca->tx_ibs_state); in hci_ibs_tx_idle_timeout()
466 spin_lock_irqsave_nested(&qca->hci_ibs_lock, in hci_ibs_tx_idle_timeout()
469 switch (qca->tx_ibs_state) { in hci_ibs_tx_idle_timeout()
476 qca->tx_ibs_state = HCI_IBS_TX_ASLEEP; in hci_ibs_tx_idle_timeout()
477 qca->ibs_sent_slps++; in hci_ibs_tx_idle_timeout()
478 queue_work(qca->workqueue, &qca->ws_tx_vote_off); in hci_ibs_tx_idle_timeout()
484 BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state); in hci_ibs_tx_idle_timeout()
488 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in hci_ibs_tx_idle_timeout()
494 struct hci_uart *hu = qca->hu; in hci_ibs_wake_retrans_timeout()
499 hu, qca->tx_ibs_state); in hci_ibs_wake_retrans_timeout()
501 spin_lock_irqsave_nested(&qca->hci_ibs_lock, in hci_ibs_wake_retrans_timeout()
505 if (test_bit(QCA_SUSPENDING, &qca->flags)) { in hci_ibs_wake_retrans_timeout()
506 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in hci_ibs_wake_retrans_timeout()
510 switch (qca->tx_ibs_state) { in hci_ibs_wake_retrans_timeout()
518 qca->ibs_sent_wakes++; in hci_ibs_wake_retrans_timeout()
519 retrans_delay = msecs_to_jiffies(qca->wake_retrans); in hci_ibs_wake_retrans_timeout()
520 mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay); in hci_ibs_wake_retrans_timeout()
526 BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state); in hci_ibs_wake_retrans_timeout()
530 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in hci_ibs_wake_retrans_timeout()
541 struct hci_uart *hu = qca->hu; in qca_controller_memdump_timeout()
543 mutex_lock(&qca->hci_memdump_lock); in qca_controller_memdump_timeout()
544 if (test_bit(QCA_MEMDUMP_COLLECTION, &qca->flags)) { in qca_controller_memdump_timeout()
545 qca->memdump_state = QCA_MEMDUMP_TIMEOUT; in qca_controller_memdump_timeout()
546 if (!test_bit(QCA_HW_ERROR_EVENT, &qca->flags)) { in qca_controller_memdump_timeout()
550 hci_reset_dev(hu->hdev); in qca_controller_memdump_timeout()
554 mutex_unlock(&qca->hci_memdump_lock); in qca_controller_memdump_timeout()
567 return -EOPNOTSUPP; in qca_open()
571 return -ENOMEM; in qca_open()
573 skb_queue_head_init(&qca->txq); in qca_open()
574 skb_queue_head_init(&qca->tx_wait_q); in qca_open()
575 skb_queue_head_init(&qca->rx_memdump_q); in qca_open()
576 spin_lock_init(&qca->hci_ibs_lock); in qca_open()
577 mutex_init(&qca->hci_memdump_lock); in qca_open()
578 qca->workqueue = alloc_ordered_workqueue("qca_wq", 0); in qca_open()
579 if (!qca->workqueue) { in qca_open()
582 return -ENOMEM; in qca_open()
585 INIT_WORK(&qca->ws_awake_rx, qca_wq_awake_rx); in qca_open()
586 INIT_WORK(&qca->ws_awake_device, qca_wq_awake_device); in qca_open()
587 INIT_WORK(&qca->ws_rx_vote_off, qca_wq_serial_rx_clock_vote_off); in qca_open()
588 INIT_WORK(&qca->ws_tx_vote_off, qca_wq_serial_tx_clock_vote_off); in qca_open()
589 INIT_WORK(&qca->ctrl_memdump_evt, qca_controller_memdump); in qca_open()
590 INIT_DELAYED_WORK(&qca->ctrl_memdump_timeout, in qca_open()
592 init_waitqueue_head(&qca->suspend_wait_q); in qca_open()
594 qca->hu = hu; in qca_open()
595 init_completion(&qca->drop_ev_comp); in qca_open()
597 /* Assume we start with both sides asleep -- extra wakes OK */ in qca_open()
598 qca->tx_ibs_state = HCI_IBS_TX_ASLEEP; in qca_open()
599 qca->rx_ibs_state = HCI_IBS_RX_ASLEEP; in qca_open()
601 qca->vote_last_jif = jiffies; in qca_open()
603 hu->priv = qca; in qca_open()
605 if (hu->serdev) { in qca_open()
606 qcadev = serdev_device_get_drvdata(hu->serdev); in qca_open()
608 if (qca_is_wcn399x(qcadev->btsoc_type)) in qca_open()
609 hu->init_speed = qcadev->init_speed; in qca_open()
611 if (qcadev->oper_speed) in qca_open()
612 hu->oper_speed = qcadev->oper_speed; in qca_open()
615 timer_setup(&qca->wake_retrans_timer, hci_ibs_wake_retrans_timeout, 0); in qca_open()
616 qca->wake_retrans = IBS_WAKE_RETRANS_TIMEOUT_MS; in qca_open()
618 timer_setup(&qca->tx_idle_timer, hci_ibs_tx_idle_timeout, 0); in qca_open()
619 qca->tx_idle_delay = IBS_HOST_TX_IDLE_TIMEOUT_MS; in qca_open()
622 qca->tx_idle_delay, qca->wake_retrans); in qca_open()
630 struct qca_data *qca = hu->priv; in qca_debugfs_init()
634 if (!hdev->debugfs) in qca_debugfs_init()
637 if (test_and_set_bit(QCA_DEBUGFS_CREATED, &qca->flags)) in qca_debugfs_init()
640 ibs_dir = debugfs_create_dir("ibs", hdev->debugfs); in qca_debugfs_init()
644 debugfs_create_u8("tx_ibs_state", mode, ibs_dir, &qca->tx_ibs_state); in qca_debugfs_init()
645 debugfs_create_u8("rx_ibs_state", mode, ibs_dir, &qca->rx_ibs_state); in qca_debugfs_init()
647 &qca->ibs_sent_slps); in qca_debugfs_init()
649 &qca->ibs_sent_wakes); in qca_debugfs_init()
651 &qca->ibs_sent_wacks); in qca_debugfs_init()
653 &qca->ibs_recv_slps); in qca_debugfs_init()
655 &qca->ibs_recv_wakes); in qca_debugfs_init()
657 &qca->ibs_recv_wacks); in qca_debugfs_init()
658 debugfs_create_bool("tx_vote", mode, ibs_dir, &qca->tx_vote); in qca_debugfs_init()
659 debugfs_create_u64("tx_votes_on", mode, ibs_dir, &qca->tx_votes_on); in qca_debugfs_init()
660 debugfs_create_u64("tx_votes_off", mode, ibs_dir, &qca->tx_votes_off); in qca_debugfs_init()
661 debugfs_create_bool("rx_vote", mode, ibs_dir, &qca->rx_vote); in qca_debugfs_init()
662 debugfs_create_u64("rx_votes_on", mode, ibs_dir, &qca->rx_votes_on); in qca_debugfs_init()
663 debugfs_create_u64("rx_votes_off", mode, ibs_dir, &qca->rx_votes_off); in qca_debugfs_init()
664 debugfs_create_u64("votes_on", mode, ibs_dir, &qca->votes_on); in qca_debugfs_init()
665 debugfs_create_u64("votes_off", mode, ibs_dir, &qca->votes_off); in qca_debugfs_init()
666 debugfs_create_u32("vote_on_ms", mode, ibs_dir, &qca->vote_on_ms); in qca_debugfs_init()
667 debugfs_create_u32("vote_off_ms", mode, ibs_dir, &qca->vote_off_ms); in qca_debugfs_init()
671 debugfs_create_u32("wake_retrans", mode, ibs_dir, &qca->wake_retrans); in qca_debugfs_init()
673 &qca->tx_idle_delay); in qca_debugfs_init()
679 struct qca_data *qca = hu->priv; in qca_flush()
683 skb_queue_purge(&qca->tx_wait_q); in qca_flush()
684 skb_queue_purge(&qca->txq); in qca_flush()
692 struct qca_data *qca = hu->priv; in qca_close()
698 skb_queue_purge(&qca->tx_wait_q); in qca_close()
699 skb_queue_purge(&qca->txq); in qca_close()
700 skb_queue_purge(&qca->rx_memdump_q); in qca_close()
701 destroy_workqueue(qca->workqueue); in qca_close()
702 del_timer_sync(&qca->tx_idle_timer); in qca_close()
703 del_timer_sync(&qca->wake_retrans_timer); in qca_close()
704 qca->hu = NULL; in qca_close()
706 kfree_skb(qca->rx_skb); in qca_close()
708 hu->priv = NULL; in qca_close()
715 /* Called upon a wake-up-indication from the device.
720 struct qca_data *qca = hu->priv; in device_want_to_wakeup()
724 spin_lock_irqsave(&qca->hci_ibs_lock, flags); in device_want_to_wakeup()
726 qca->ibs_recv_wakes++; in device_want_to_wakeup()
729 if (test_bit(QCA_SUSPENDING, &qca->flags)) { in device_want_to_wakeup()
730 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in device_want_to_wakeup()
734 switch (qca->rx_ibs_state) { in device_want_to_wakeup()
736 /* Make sure clock is on - we may have turned clock off since in device_want_to_wakeup()
739 queue_work(qca->workqueue, &qca->ws_awake_rx); in device_want_to_wakeup()
740 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in device_want_to_wakeup()
751 qca->ibs_sent_wacks++; in device_want_to_wakeup()
757 qca->rx_ibs_state); in device_want_to_wakeup()
761 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in device_want_to_wakeup()
767 /* Called upon a sleep-indication from the device.
772 struct qca_data *qca = hu->priv; in device_want_to_sleep()
774 BT_DBG("hu %p want to sleep in %d state", hu, qca->rx_ibs_state); in device_want_to_sleep()
776 spin_lock_irqsave(&qca->hci_ibs_lock, flags); in device_want_to_sleep()
778 qca->ibs_recv_slps++; in device_want_to_sleep()
780 switch (qca->rx_ibs_state) { in device_want_to_sleep()
783 qca->rx_ibs_state = HCI_IBS_RX_ASLEEP; in device_want_to_sleep()
785 queue_work(qca->workqueue, &qca->ws_rx_vote_off); in device_want_to_sleep()
794 qca->rx_ibs_state); in device_want_to_sleep()
798 wake_up_interruptible(&qca->suspend_wait_q); in device_want_to_sleep()
800 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in device_want_to_sleep()
803 /* Called upon wake-up-acknowledgement from the device
808 struct qca_data *qca = hu->priv; in device_woke_up()
813 spin_lock_irqsave(&qca->hci_ibs_lock, flags); in device_woke_up()
815 qca->ibs_recv_wacks++; in device_woke_up()
817 /* Don't react to the wake-up-acknowledgment when suspending. */ in device_woke_up()
818 if (test_bit(QCA_SUSPENDING, &qca->flags)) { in device_woke_up()
819 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in device_woke_up()
823 switch (qca->tx_ibs_state) { in device_woke_up()
827 qca->tx_ibs_state); in device_woke_up()
832 while ((skb = skb_dequeue(&qca->tx_wait_q))) in device_woke_up()
833 skb_queue_tail(&qca->txq, skb); in device_woke_up()
836 del_timer(&qca->wake_retrans_timer); in device_woke_up()
837 idle_delay = msecs_to_jiffies(qca->tx_idle_delay); in device_woke_up()
838 mod_timer(&qca->tx_idle_timer, jiffies + idle_delay); in device_woke_up()
839 qca->tx_ibs_state = HCI_IBS_TX_AWAKE; in device_woke_up()
845 qca->tx_ibs_state); in device_woke_up()
849 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in device_woke_up()
861 struct qca_data *qca = hu->priv; in qca_enqueue()
864 qca->tx_ibs_state); in qca_enqueue()
866 if (test_bit(QCA_SSR_TRIGGERED, &qca->flags)) { in qca_enqueue()
868 bt_dev_dbg(hu->hdev, "SSR is in progress"); in qca_enqueue()
876 spin_lock_irqsave(&qca->hci_ibs_lock, flags); in qca_enqueue()
879 * Out-Of-Band(GPIOs control) sleep is selected. in qca_enqueue()
882 if (test_bit(QCA_IBS_DISABLED, &qca->flags) || in qca_enqueue()
883 test_bit(QCA_SUSPENDING, &qca->flags)) { in qca_enqueue()
884 skb_queue_tail(&qca->txq, skb); in qca_enqueue()
885 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in qca_enqueue()
890 switch (qca->tx_ibs_state) { in qca_enqueue()
893 skb_queue_tail(&qca->txq, skb); in qca_enqueue()
894 idle_delay = msecs_to_jiffies(qca->tx_idle_delay); in qca_enqueue()
895 mod_timer(&qca->tx_idle_timer, jiffies + idle_delay); in qca_enqueue()
901 skb_queue_tail(&qca->tx_wait_q, skb); in qca_enqueue()
903 qca->tx_ibs_state = HCI_IBS_TX_WAKING; in qca_enqueue()
905 queue_work(qca->workqueue, &qca->ws_awake_device); in qca_enqueue()
911 skb_queue_tail(&qca->tx_wait_q, skb); in qca_enqueue()
916 qca->tx_ibs_state); in qca_enqueue()
921 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in qca_enqueue()
969 if (get_unaligned_le16(skb->data) == QCA_DEBUG_HANDLE) in qca_recv_acl_data()
979 struct hci_uart *hu = qca->hu; in qca_controller_memdump()
982 struct qca_memdump_data *qca_memdump = qca->qca_memdump; in qca_controller_memdump()
991 while ((skb = skb_dequeue(&qca->rx_memdump_q))) { in qca_controller_memdump()
993 mutex_lock(&qca->hci_memdump_lock); in qca_controller_memdump()
997 if (qca->memdump_state == QCA_MEMDUMP_TIMEOUT || in qca_controller_memdump()
998 qca->memdump_state == QCA_MEMDUMP_COLLECTED) { in qca_controller_memdump()
999 mutex_unlock(&qca->hci_memdump_lock); in qca_controller_memdump()
1007 mutex_unlock(&qca->hci_memdump_lock); in qca_controller_memdump()
1011 qca->qca_memdump = qca_memdump; in qca_controller_memdump()
1014 qca->memdump_state = QCA_MEMDUMP_COLLECTING; in qca_controller_memdump()
1015 cmd_hdr = (void *) skb->data; in qca_controller_memdump()
1016 seq_no = __le16_to_cpu(cmd_hdr->seq_no); in qca_controller_memdump()
1027 set_bit(QCA_IBS_DISABLED, &qca->flags); in qca_controller_memdump()
1028 set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags); in qca_controller_memdump()
1029 dump = (void *) skb->data; in qca_controller_memdump()
1030 dump_size = __le32_to_cpu(dump->dump_size); in qca_controller_memdump()
1032 bt_dev_err(hu->hdev, "Rx invalid memdump size"); in qca_controller_memdump()
1035 qca->qca_memdump = NULL; in qca_controller_memdump()
1036 mutex_unlock(&qca->hci_memdump_lock); in qca_controller_memdump()
1040 bt_dev_info(hu->hdev, "QCA collecting dump of size:%u", in qca_controller_memdump()
1042 queue_delayed_work(qca->workqueue, in qca_controller_memdump()
1043 &qca->ctrl_memdump_timeout, in qca_controller_memdump()
1049 qca_memdump->ram_dump_size = dump_size; in qca_controller_memdump()
1050 qca_memdump->memdump_buf_head = memdump_buf; in qca_controller_memdump()
1051 qca_memdump->memdump_buf_tail = memdump_buf; in qca_controller_memdump()
1054 memdump_buf = qca_memdump->memdump_buf_tail; in qca_controller_memdump()
1060 bt_dev_err(hu->hdev, "QCA: Discarding other packets"); in qca_controller_memdump()
1063 qca->qca_memdump = NULL; in qca_controller_memdump()
1064 mutex_unlock(&qca->hci_memdump_lock); in qca_controller_memdump()
1076 while ((seq_no > qca_memdump->current_seq_no + 1) && in qca_controller_memdump()
1079 bt_dev_err(hu->hdev, "QCA controller missed packet:%d", in qca_controller_memdump()
1080 qca_memdump->current_seq_no); in qca_controller_memdump()
1081 rx_size = qca_memdump->received_dump; in qca_controller_memdump()
1083 if (rx_size > qca_memdump->ram_dump_size) { in qca_controller_memdump()
1084 bt_dev_err(hu->hdev, in qca_controller_memdump()
1086 qca_memdump->received_dump); in qca_controller_memdump()
1091 qca_memdump->received_dump += QCA_DUMP_PACKET_SIZE; in qca_controller_memdump()
1092 qca_memdump->current_seq_no++; in qca_controller_memdump()
1095 rx_size = qca_memdump->received_dump + skb->len; in qca_controller_memdump()
1096 if (rx_size <= qca_memdump->ram_dump_size) { in qca_controller_memdump()
1098 (seq_no != qca_memdump->current_seq_no)) in qca_controller_memdump()
1099 bt_dev_err(hu->hdev, in qca_controller_memdump()
1102 bt_dev_dbg(hu->hdev, in qca_controller_memdump()
1104 seq_no, skb->len); in qca_controller_memdump()
1105 memcpy(memdump_buf, (unsigned char *)skb->data, in qca_controller_memdump()
1106 skb->len); in qca_controller_memdump()
1107 memdump_buf = memdump_buf + skb->len; in qca_controller_memdump()
1108 qca_memdump->memdump_buf_tail = memdump_buf; in qca_controller_memdump()
1109 qca_memdump->current_seq_no = seq_no + 1; in qca_controller_memdump()
1110 qca_memdump->received_dump += skb->len; in qca_controller_memdump()
1112 bt_dev_err(hu->hdev, in qca_controller_memdump()
1114 qca_memdump->received_dump, seq_no); in qca_controller_memdump()
1116 qca->qca_memdump = qca_memdump; in qca_controller_memdump()
1119 bt_dev_info(hu->hdev, in qca_controller_memdump()
1121 qca_memdump->received_dump, in qca_controller_memdump()
1122 qca_memdump->ram_dump_size); in qca_controller_memdump()
1123 memdump_buf = qca_memdump->memdump_buf_head; in qca_controller_memdump()
1124 dev_coredumpv(&hu->serdev->dev, memdump_buf, in qca_controller_memdump()
1125 qca_memdump->received_dump, GFP_KERNEL); in qca_controller_memdump()
1126 cancel_delayed_work(&qca->ctrl_memdump_timeout); in qca_controller_memdump()
1127 kfree(qca->qca_memdump); in qca_controller_memdump()
1128 qca->qca_memdump = NULL; in qca_controller_memdump()
1129 qca->memdump_state = QCA_MEMDUMP_COLLECTED; in qca_controller_memdump()
1130 clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags); in qca_controller_memdump()
1133 mutex_unlock(&qca->hci_memdump_lock); in qca_controller_memdump()
1142 struct qca_data *qca = hu->priv; in qca_controller_memdump_event()
1144 set_bit(QCA_SSR_TRIGGERED, &qca->flags); in qca_controller_memdump_event()
1145 skb_queue_tail(&qca->rx_memdump_q, skb); in qca_controller_memdump_event()
1146 queue_work(qca->workqueue, &qca->ctrl_memdump_evt); in qca_controller_memdump_event()
1154 struct qca_data *qca = hu->priv; in qca_recv_event()
1156 if (test_bit(QCA_DROP_VENDOR_EVENT, &qca->flags)) { in qca_recv_event()
1157 struct hci_event_hdr *hdr = (void *)skb->data; in qca_recv_event()
1159 /* For the WCN3990 the vendor command for a baudrate change in qca_recv_event()
1162 * new baudrate. The event is received and properly decoded in qca_recv_event()
1163 * after changing the baudrate of the host port. It needs to in qca_recv_event()
1169 if (hdr->evt == HCI_EV_VENDOR) in qca_recv_event()
1170 complete(&qca->drop_ev_comp); in qca_recv_event()
1181 if ((skb->data[0] == HCI_VENDOR_PKT) && in qca_recv_event()
1182 (get_unaligned_be16(skb->data + 2) == QCA_SSR_DUMP_HANDLE)) in qca_recv_event()
1220 struct qca_data *qca = hu->priv; in qca_recv()
1222 if (!test_bit(HCI_UART_REGISTERED, &hu->flags)) in qca_recv()
1223 return -EUNATCH; in qca_recv()
1225 qca->rx_skb = h4_recv_buf(hu->hdev, qca->rx_skb, data, count, in qca_recv()
1227 if (IS_ERR(qca->rx_skb)) { in qca_recv()
1228 int err = PTR_ERR(qca->rx_skb); in qca_recv()
1229 bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err); in qca_recv()
1230 qca->rx_skb = NULL; in qca_recv()
1239 struct qca_data *qca = hu->priv; in qca_dequeue()
1241 return skb_dequeue(&qca->txq); in qca_dequeue()
1280 static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate) in qca_set_baudrate() argument
1283 struct qca_data *qca = hu->priv; in qca_set_baudrate()
1287 if (baudrate > QCA_BAUDRATE_3200000) in qca_set_baudrate()
1288 return -EINVAL; in qca_set_baudrate()
1290 cmd[4] = baudrate; in qca_set_baudrate()
1294 bt_dev_err(hdev, "Failed to allocate baudrate packet"); in qca_set_baudrate()
1295 return -ENOMEM; in qca_set_baudrate()
1298 /* Assign commands to change baudrate and packet type. */ in qca_set_baudrate()
1302 skb_queue_tail(&qca->txq, skb); in qca_set_baudrate()
1305 /* Wait for the baudrate change request to be sent */ in qca_set_baudrate()
1307 while (!skb_queue_empty(&qca->txq)) in qca_set_baudrate()
1310 if (hu->serdev) in qca_set_baudrate()
1311 serdev_device_wait_until_sent(hu->serdev, in qca_set_baudrate()
1325 if (hu->serdev) in host_set_baudrate()
1326 serdev_device_set_baudrate(hu->serdev, speed); in host_set_baudrate()
1338 * at required baudrate to wcn3990. On wcn3990, we have an external in qca_send_power_pulse()
1339 * circuit at Tx pin which decodes the pulse sent at specific baudrate. in qca_send_power_pulse()
1340 * For example, wcn3990 supports RF COEX antenna for both Wi-Fi/BT in qca_send_power_pulse()
1342 * Wi-Fi/BT. Powering up the power sources will not enable BT, until in qca_send_power_pulse()
1347 bt_dev_dbg(hu->hdev, "sending power pulse %02x to controller", cmd); in qca_send_power_pulse()
1349 serdev_device_write_flush(hu->serdev); in qca_send_power_pulse()
1351 ret = serdev_device_write_buf(hu->serdev, &cmd, sizeof(cmd)); in qca_send_power_pulse()
1353 bt_dev_err(hu->hdev, "failed to send power pulse %02x", cmd); in qca_send_power_pulse()
1357 serdev_device_wait_until_sent(hu->serdev, timeout); in qca_send_power_pulse()
1375 if (hu->init_speed) in qca_get_speed()
1376 speed = hu->init_speed; in qca_get_speed()
1377 else if (hu->proto->init_speed) in qca_get_speed()
1378 speed = hu->proto->init_speed; in qca_get_speed()
1380 if (hu->oper_speed) in qca_get_speed()
1381 speed = hu->oper_speed; in qca_get_speed()
1382 else if (hu->proto->oper_speed) in qca_get_speed()
1383 speed = hu->proto->oper_speed; in qca_get_speed()
1394 return -EINVAL; in qca_check_speeds()
1398 return -EINVAL; in qca_check_speeds()
1407 struct qca_data *qca = hu->priv; in qca_set_speed()
1422 * changing the baudrate of chip and host. in qca_set_speed()
1428 reinit_completion(&qca->drop_ev_comp); in qca_set_speed()
1429 set_bit(QCA_DROP_VENDOR_EVENT, &qca->flags); in qca_set_speed()
1433 bt_dev_dbg(hu->hdev, "Set UART speed to %d", speed); in qca_set_speed()
1434 ret = qca_set_baudrate(hu->hdev, qca_baudrate); in qca_set_speed()
1446 * for the baudrate change command. in qca_set_speed()
1448 if (!wait_for_completion_timeout(&qca->drop_ev_comp, in qca_set_speed()
1450 bt_dev_err(hu->hdev, in qca_set_speed()
1451 "Failed to change controller baudrate\n"); in qca_set_speed()
1452 ret = -ETIMEDOUT; in qca_set_speed()
1455 clear_bit(QCA_DROP_VENDOR_EVENT, &qca->flags); in qca_set_speed()
1464 struct qca_data *qca = hu->priv; in qca_send_crashbuffer()
1469 bt_dev_err(hu->hdev, "Failed to allocate memory for skb packet"); in qca_send_crashbuffer()
1470 return -ENOMEM; in qca_send_crashbuffer()
1480 bt_dev_info(hu->hdev, "crash the soc to collect controller dump"); in qca_send_crashbuffer()
1481 skb_queue_tail(&qca->txq, skb); in qca_send_crashbuffer()
1490 struct qca_data *qca = hu->priv; in qca_wait_for_dump_collection()
1492 wait_on_bit_timeout(&qca->flags, QCA_MEMDUMP_COLLECTION, in qca_wait_for_dump_collection()
1495 clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags); in qca_wait_for_dump_collection()
1501 struct qca_data *qca = hu->priv; in qca_hw_error()
1503 set_bit(QCA_SSR_TRIGGERED, &qca->flags); in qca_hw_error()
1504 set_bit(QCA_HW_ERROR_EVENT, &qca->flags); in qca_hw_error()
1505 bt_dev_info(hdev, "mem_dump_status: %d", qca->memdump_state); in qca_hw_error()
1507 if (qca->memdump_state == QCA_MEMDUMP_IDLE) { in qca_hw_error()
1514 set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags); in qca_hw_error()
1517 } else if (qca->memdump_state == QCA_MEMDUMP_COLLECTING) { in qca_hw_error()
1525 mutex_lock(&qca->hci_memdump_lock); in qca_hw_error()
1526 if (qca->memdump_state != QCA_MEMDUMP_COLLECTED) { in qca_hw_error()
1527 bt_dev_err(hu->hdev, "clearing allocated memory due to memdump timeout"); in qca_hw_error()
1528 if (qca->qca_memdump) { in qca_hw_error()
1529 vfree(qca->qca_memdump->memdump_buf_head); in qca_hw_error()
1530 kfree(qca->qca_memdump); in qca_hw_error()
1531 qca->qca_memdump = NULL; in qca_hw_error()
1533 qca->memdump_state = QCA_MEMDUMP_TIMEOUT; in qca_hw_error()
1534 cancel_delayed_work(&qca->ctrl_memdump_timeout); in qca_hw_error()
1536 mutex_unlock(&qca->hci_memdump_lock); in qca_hw_error()
1538 if (qca->memdump_state == QCA_MEMDUMP_TIMEOUT || in qca_hw_error()
1539 qca->memdump_state == QCA_MEMDUMP_COLLECTED) { in qca_hw_error()
1540 cancel_work_sync(&qca->ctrl_memdump_evt); in qca_hw_error()
1541 skb_queue_purge(&qca->rx_memdump_q); in qca_hw_error()
1544 clear_bit(QCA_HW_ERROR_EVENT, &qca->flags); in qca_hw_error()
1550 struct qca_data *qca = hu->priv; in qca_cmd_timeout()
1552 set_bit(QCA_SSR_TRIGGERED, &qca->flags); in qca_cmd_timeout()
1553 if (qca->memdump_state == QCA_MEMDUMP_IDLE) { in qca_cmd_timeout()
1554 set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags); in qca_cmd_timeout()
1557 } else if (qca->memdump_state == QCA_MEMDUMP_COLLECTING) { in qca_cmd_timeout()
1565 mutex_lock(&qca->hci_memdump_lock); in qca_cmd_timeout()
1566 if (qca->memdump_state != QCA_MEMDUMP_COLLECTED) { in qca_cmd_timeout()
1567 qca->memdump_state = QCA_MEMDUMP_TIMEOUT; in qca_cmd_timeout()
1568 if (!test_bit(QCA_HW_ERROR_EVENT, &qca->flags)) { in qca_cmd_timeout()
1572 hci_reset_dev(hu->hdev); in qca_cmd_timeout()
1575 mutex_unlock(&qca->hci_memdump_lock); in qca_cmd_timeout()
1586 qcadev = serdev_device_get_drvdata(hu->serdev); in qca_wcn3990_init()
1587 if (!qcadev->bt_power->vregs_on) { in qca_wcn3990_init()
1588 serdev_device_close(hu->serdev); in qca_wcn3990_init()
1593 ret = serdev_device_open(hu->serdev); in qca_wcn3990_init()
1595 bt_dev_err(hu->hdev, "failed to open port"); in qca_wcn3990_init()
1616 serdev_device_close(hu->serdev); in qca_wcn3990_init()
1617 ret = serdev_device_open(hu->serdev); in qca_wcn3990_init()
1619 bt_dev_err(hu->hdev, "failed to open port"); in qca_wcn3990_init()
1633 struct qca_data *qca = hu->priv; in qca_power_on()
1636 /* Non-serdev device usually is powered by external power in qca_power_on()
1639 if (!hu->serdev) in qca_power_on()
1645 qcadev = serdev_device_get_drvdata(hu->serdev); in qca_power_on()
1646 if (qcadev->bt_en) { in qca_power_on()
1647 gpiod_set_value_cansleep(qcadev->bt_en, 1); in qca_power_on()
1653 clear_bit(QCA_BT_OFF, &qca->flags); in qca_power_on()
1659 struct hci_dev *hdev = hu->hdev; in qca_setup()
1660 struct qca_data *qca = hu->priv; in qca_setup()
1672 clear_bit(QCA_ROM_FW, &qca->flags); in qca_setup()
1674 set_bit(QCA_IBS_DISABLED, &qca->flags); in qca_setup()
1679 set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); in qca_setup()
1684 qca->memdump_state = QCA_MEMDUMP_IDLE; in qca_setup()
1691 clear_bit(QCA_SSR_TRIGGERED, &qca->flags); in qca_setup()
1694 set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks); in qca_setup()
1725 clear_bit(QCA_IBS_DISABLED, &qca->flags); in qca_setup()
1727 hu->hdev->hw_error = qca_hw_error; in qca_setup()
1728 hu->hdev->cmd_timeout = qca_cmd_timeout; in qca_setup()
1729 } else if (ret == -ENOENT) { in qca_setup()
1730 /* No patch/nvm-config found, run with original fw/config */ in qca_setup()
1731 set_bit(QCA_ROM_FW, &qca->flags); in qca_setup()
1733 } else if (ret == -EAGAIN) { in qca_setup()
1735 * Userspace firmware loader will return -EAGAIN in case no in qca_setup()
1736 * patch/nvm-config is found, so run with original fw/config. in qca_setup()
1738 set_bit(QCA_ROM_FW, &qca->flags); in qca_setup()
1743 if (hu->serdev) { in qca_setup()
1744 serdev_device_close(hu->serdev); in qca_setup()
1745 ret = serdev_device_open(hu->serdev); in qca_setup()
1758 hu->hdev->set_bdaddr = qca_set_bdaddr_rome; in qca_setup()
1760 hu->hdev->set_bdaddr = qca_set_bdaddr; in qca_setup()
1823 struct qca_data *qca = hu->priv; in qca_power_shutdown()
1831 spin_lock_irqsave(&qca->hci_ibs_lock, flags); in qca_power_shutdown()
1832 set_bit(QCA_IBS_DISABLED, &qca->flags); in qca_power_shutdown()
1834 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in qca_power_shutdown()
1836 /* Non-serdev device usually is powered by external power in qca_power_shutdown()
1839 if (!hu->serdev) in qca_power_shutdown()
1842 qcadev = serdev_device_get_drvdata(hu->serdev); in qca_power_shutdown()
1848 } else if (qcadev->bt_en) { in qca_power_shutdown()
1849 gpiod_set_value_cansleep(qcadev->bt_en, 0); in qca_power_shutdown()
1852 set_bit(QCA_BT_OFF, &qca->flags); in qca_power_shutdown()
1858 struct qca_data *qca = hu->priv; in qca_power_off()
1861 hu->hdev->hw_error = NULL; in qca_power_off()
1862 hu->hdev->cmd_timeout = NULL; in qca_power_off()
1864 del_timer_sync(&qca->wake_retrans_timer); in qca_power_off()
1865 del_timer_sync(&qca->tx_idle_timer); in qca_power_off()
1869 && qca->memdump_state == QCA_MEMDUMP_IDLE) { in qca_power_off()
1880 struct qca_power *power = qcadev->bt_power; in qca_regulator_enable()
1884 if (power->vregs_on) in qca_regulator_enable()
1887 BT_DBG("enabling %d regulators)", power->num_vregs); in qca_regulator_enable()
1889 ret = regulator_bulk_enable(power->num_vregs, power->vreg_bulk); in qca_regulator_enable()
1893 power->vregs_on = true; in qca_regulator_enable()
1895 ret = clk_prepare_enable(qcadev->susclk); in qca_regulator_enable()
1909 power = qcadev->bt_power; in qca_regulator_disable()
1912 if (!power->vregs_on) in qca_regulator_disable()
1915 regulator_bulk_disable(power->num_vregs, power->vreg_bulk); in qca_regulator_disable()
1916 power->vregs_on = false; in qca_regulator_disable()
1918 clk_disable_unprepare(qcadev->susclk); in qca_regulator_disable()
1928 bulk = devm_kcalloc(qca->dev, num_vregs, sizeof(*bulk), GFP_KERNEL); in qca_init_regulators()
1930 return -ENOMEM; in qca_init_regulators()
1935 ret = devm_regulator_bulk_get(qca->dev, num_vregs, bulk); in qca_init_regulators()
1945 qca->vreg_bulk = bulk; in qca_init_regulators()
1946 qca->num_vregs = num_vregs; in qca_init_regulators()
1959 qcadev = devm_kzalloc(&serdev->dev, sizeof(*qcadev), GFP_KERNEL); in qca_serdev_probe()
1961 return -ENOMEM; in qca_serdev_probe()
1963 qcadev->serdev_hu.serdev = serdev; in qca_serdev_probe()
1964 data = device_get_match_data(&serdev->dev); in qca_serdev_probe()
1966 device_property_read_string(&serdev->dev, "firmware-name", in qca_serdev_probe()
1967 &qcadev->firmware_name); in qca_serdev_probe()
1968 device_property_read_u32(&serdev->dev, "max-speed", in qca_serdev_probe()
1969 &qcadev->oper_speed); in qca_serdev_probe()
1970 if (!qcadev->oper_speed) in qca_serdev_probe()
1973 if (data && qca_is_wcn399x(data->soc_type)) { in qca_serdev_probe()
1974 qcadev->btsoc_type = data->soc_type; in qca_serdev_probe()
1975 qcadev->bt_power = devm_kzalloc(&serdev->dev, in qca_serdev_probe()
1978 if (!qcadev->bt_power) in qca_serdev_probe()
1979 return -ENOMEM; in qca_serdev_probe()
1981 qcadev->bt_power->dev = &serdev->dev; in qca_serdev_probe()
1982 err = qca_init_regulators(qcadev->bt_power, data->vregs, in qca_serdev_probe()
1983 data->num_vregs); in qca_serdev_probe()
1985 BT_ERR("Failed to init regulators:%d", err); in qca_serdev_probe()
1989 qcadev->bt_power->vregs_on = false; in qca_serdev_probe()
1991 qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL); in qca_serdev_probe()
1992 if (IS_ERR(qcadev->susclk)) { in qca_serdev_probe()
1993 dev_err(&serdev->dev, "failed to acquire clk\n"); in qca_serdev_probe()
1994 return PTR_ERR(qcadev->susclk); in qca_serdev_probe()
1997 err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto); in qca_serdev_probe()
2004 qcadev->btsoc_type = data->soc_type; in qca_serdev_probe()
2006 qcadev->btsoc_type = QCA_ROME; in qca_serdev_probe()
2008 qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable", in qca_serdev_probe()
2010 if (IS_ERR_OR_NULL(qcadev->bt_en)) { in qca_serdev_probe()
2011 dev_warn(&serdev->dev, "failed to acquire enable gpio\n"); in qca_serdev_probe()
2015 qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL); in qca_serdev_probe()
2016 if (IS_ERR(qcadev->susclk)) { in qca_serdev_probe()
2017 dev_warn(&serdev->dev, "failed to acquire clk\n"); in qca_serdev_probe()
2018 return PTR_ERR(qcadev->susclk); in qca_serdev_probe()
2020 err = clk_set_rate(qcadev->susclk, SUSCLK_RATE_32KHZ); in qca_serdev_probe()
2024 err = clk_prepare_enable(qcadev->susclk); in qca_serdev_probe()
2028 err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto); in qca_serdev_probe()
2031 clk_disable_unprepare(qcadev->susclk); in qca_serdev_probe()
2036 hdev = qcadev->serdev_hu.hdev; in qca_serdev_probe()
2039 set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks); in qca_serdev_probe()
2040 hdev->shutdown = qca_power_off; in qca_serdev_probe()
2047 if (data->capabilities & QCA_CAP_WIDEBAND_SPEECH) in qca_serdev_probe()
2049 &hdev->quirks); in qca_serdev_probe()
2051 if (data->capabilities & QCA_CAP_VALID_LE_STATES) in qca_serdev_probe()
2052 set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks); in qca_serdev_probe()
2061 struct qca_power *power = qcadev->bt_power; in qca_serdev_remove()
2063 if (qca_is_wcn399x(qcadev->btsoc_type) && power->vregs_on) in qca_serdev_remove()
2064 qca_power_shutdown(&qcadev->serdev_hu); in qca_serdev_remove()
2065 else if (qcadev->susclk) in qca_serdev_remove()
2066 clk_disable_unprepare(qcadev->susclk); in qca_serdev_remove()
2068 hci_uart_unregister_device(&qcadev->serdev_hu); in qca_serdev_remove()
2077 struct hci_uart *hu = &qcadev->serdev_hu; in qca_serdev_shutdown()
2078 struct hci_dev *hdev = hu->hdev; in qca_serdev_shutdown()
2079 struct qca_data *qca = hu->priv; in qca_serdev_shutdown()
2083 if (qcadev->btsoc_type == QCA_QCA6390) { in qca_serdev_shutdown()
2084 if (test_bit(QCA_BT_OFF, &qca->flags) || in qca_serdev_shutdown()
2085 !test_bit(HCI_RUNNING, &hdev->flags)) in qca_serdev_shutdown()
2114 struct hci_uart *hu = &qcadev->serdev_hu; in qca_suspend()
2115 struct qca_data *qca = hu->priv; in qca_suspend()
2122 set_bit(QCA_SUSPENDING, &qca->flags); in qca_suspend()
2125 * support in-band sleep in qca_suspend()
2127 if (test_bit(QCA_ROM_FW, &qca->flags)) in qca_suspend()
2134 if (test_bit(QCA_BT_OFF, &qca->flags) && in qca_suspend()
2135 !test_bit(QCA_SSR_TRIGGERED, &qca->flags)) in qca_suspend()
2138 if (test_bit(QCA_IBS_DISABLED, &qca->flags) || in qca_suspend()
2139 test_bit(QCA_SSR_TRIGGERED, &qca->flags)) { in qca_suspend()
2140 wait_timeout = test_bit(QCA_SSR_TRIGGERED, &qca->flags) ? in qca_suspend()
2144 /* QCA_IBS_DISABLED flag is set to true, During FW download in qca_suspend()
2146 * After FW download complete. in qca_suspend()
2148 wait_on_bit_timeout(&qca->flags, QCA_IBS_DISABLED, in qca_suspend()
2151 if (test_bit(QCA_IBS_DISABLED, &qca->flags)) { in qca_suspend()
2152 bt_dev_err(hu->hdev, "SSR or FW download time out"); in qca_suspend()
2153 ret = -ETIMEDOUT; in qca_suspend()
2158 cancel_work_sync(&qca->ws_awake_device); in qca_suspend()
2159 cancel_work_sync(&qca->ws_awake_rx); in qca_suspend()
2161 spin_lock_irqsave_nested(&qca->hci_ibs_lock, in qca_suspend()
2164 switch (qca->tx_ibs_state) { in qca_suspend()
2166 del_timer(&qca->wake_retrans_timer); in qca_suspend()
2169 del_timer(&qca->tx_idle_timer); in qca_suspend()
2171 serdev_device_write_flush(hu->serdev); in qca_suspend()
2173 ret = serdev_device_write_buf(hu->serdev, &cmd, sizeof(cmd)); in qca_suspend()
2180 qca->tx_ibs_state = HCI_IBS_TX_ASLEEP; in qca_suspend()
2181 qca->ibs_sent_slps++; in qca_suspend()
2189 BT_ERR("Spurious tx state %d", qca->tx_ibs_state); in qca_suspend()
2190 ret = -EINVAL; in qca_suspend()
2194 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); in qca_suspend()
2200 serdev_device_wait_until_sent(hu->serdev, in qca_suspend()
2208 ret = wait_event_interruptible_timeout(qca->suspend_wait_q, in qca_suspend()
2209 qca->rx_ibs_state == HCI_IBS_RX_ASLEEP, in qca_suspend()
2212 ret = -ETIMEDOUT; in qca_suspend()
2219 clear_bit(QCA_SUSPENDING, &qca->flags); in qca_suspend()
2228 struct hci_uart *hu = &qcadev->serdev_hu; in qca_resume()
2229 struct qca_data *qca = hu->priv; in qca_resume()
2231 clear_bit(QCA_SUSPENDING, &qca->flags); in qca_resume()
2240 { .compatible = "qcom,qca6174-bt" },
2241 { .compatible = "qcom,qca6390-bt", .data = &qca_soc_data_qca6390},
2242 { .compatible = "qcom,qca9377-bt" },
2243 { .compatible = "qcom,wcn3990-bt", .data = &qca_soc_data_wcn3990},
2244 { .compatible = "qcom,wcn3991-bt", .data = &qca_soc_data_wcn3991},
2245 { .compatible = "qcom,wcn3998-bt", .data = &qca_soc_data_wcn3998},