1 /*
2 * Copyright (c) 2018 Redpine Signals Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include "rsi_main.h"
18 #include "rsi_coex.h"
19 #include "rsi_mgmt.h"
20 #include "rsi_hal.h"
21
rsi_coex_determine_coex_q(struct rsi_coex_ctrl_block * coex_cb)22 static enum rsi_coex_queues rsi_coex_determine_coex_q
23 (struct rsi_coex_ctrl_block *coex_cb)
24 {
25 enum rsi_coex_queues q_num = RSI_COEX_Q_INVALID;
26
27 if (skb_queue_len(&coex_cb->coex_tx_qs[RSI_COEX_Q_COMMON]) > 0)
28 q_num = RSI_COEX_Q_COMMON;
29 if (skb_queue_len(&coex_cb->coex_tx_qs[RSI_COEX_Q_BT]) > 0)
30 q_num = RSI_COEX_Q_BT;
31 if (skb_queue_len(&coex_cb->coex_tx_qs[RSI_COEX_Q_WLAN]) > 0)
32 q_num = RSI_COEX_Q_WLAN;
33
34 return q_num;
35 }
36
rsi_coex_sched_tx_pkts(struct rsi_coex_ctrl_block * coex_cb)37 static void rsi_coex_sched_tx_pkts(struct rsi_coex_ctrl_block *coex_cb)
38 {
39 enum rsi_coex_queues coex_q = RSI_COEX_Q_INVALID;
40 struct sk_buff *skb;
41
42 do {
43 coex_q = rsi_coex_determine_coex_q(coex_cb);
44 rsi_dbg(INFO_ZONE, "queue = %d\n", coex_q);
45
46 if (coex_q == RSI_COEX_Q_BT) {
47 skb = skb_dequeue(&coex_cb->coex_tx_qs[RSI_COEX_Q_BT]);
48 rsi_send_bt_pkt(coex_cb->priv, skb);
49 }
50 } while (coex_q != RSI_COEX_Q_INVALID);
51 }
52
rsi_coex_scheduler_thread(struct rsi_common * common)53 static void rsi_coex_scheduler_thread(struct rsi_common *common)
54 {
55 struct rsi_coex_ctrl_block *coex_cb =
56 (struct rsi_coex_ctrl_block *)common->coex_cb;
57 u32 timeout = EVENT_WAIT_FOREVER;
58
59 do {
60 rsi_wait_event(&coex_cb->coex_tx_thread.event, timeout);
61 rsi_reset_event(&coex_cb->coex_tx_thread.event);
62
63 rsi_coex_sched_tx_pkts(coex_cb);
64 } while (atomic_read(&coex_cb->coex_tx_thread.thread_done) == 0);
65
66 complete_and_exit(&coex_cb->coex_tx_thread.completion, 0);
67 }
68
rsi_coex_recv_pkt(struct rsi_common * common,u8 * msg)69 int rsi_coex_recv_pkt(struct rsi_common *common, u8 *msg)
70 {
71 u8 msg_type = msg[RSI_RX_DESC_MSG_TYPE_OFFSET];
72
73 switch (msg_type) {
74 case COMMON_CARD_READY_IND:
75 rsi_dbg(INFO_ZONE, "common card ready received\n");
76 common->hibernate_resume = false;
77 rsi_handle_card_ready(common, msg);
78 break;
79 case SLEEP_NOTIFY_IND:
80 rsi_dbg(INFO_ZONE, "sleep notify received\n");
81 rsi_mgmt_pkt_recv(common, msg);
82 break;
83 }
84
85 return 0;
86 }
87
rsi_map_coex_q(u8 hal_queue)88 static inline int rsi_map_coex_q(u8 hal_queue)
89 {
90 switch (hal_queue) {
91 case RSI_COEX_Q:
92 return RSI_COEX_Q_COMMON;
93 case RSI_WLAN_Q:
94 return RSI_COEX_Q_WLAN;
95 case RSI_BT_Q:
96 return RSI_COEX_Q_BT;
97 }
98 return RSI_COEX_Q_INVALID;
99 }
100
rsi_coex_send_pkt(void * priv,struct sk_buff * skb,u8 hal_queue)101 int rsi_coex_send_pkt(void *priv, struct sk_buff *skb, u8 hal_queue)
102 {
103 struct rsi_common *common = (struct rsi_common *)priv;
104 struct rsi_coex_ctrl_block *coex_cb =
105 (struct rsi_coex_ctrl_block *)common->coex_cb;
106 struct skb_info *tx_params = NULL;
107 enum rsi_coex_queues coex_q;
108 int status;
109
110 coex_q = rsi_map_coex_q(hal_queue);
111 if (coex_q == RSI_COEX_Q_INVALID) {
112 rsi_dbg(ERR_ZONE, "Invalid coex queue\n");
113 return -EINVAL;
114 }
115 if (coex_q != RSI_COEX_Q_COMMON &&
116 coex_q != RSI_COEX_Q_WLAN) {
117 skb_queue_tail(&coex_cb->coex_tx_qs[coex_q], skb);
118 rsi_set_event(&coex_cb->coex_tx_thread.event);
119 return 0;
120 }
121 if (common->iface_down) {
122 tx_params =
123 (struct skb_info *)&IEEE80211_SKB_CB(skb)->driver_data;
124
125 if (!(tx_params->flags & INTERNAL_MGMT_PKT)) {
126 rsi_indicate_tx_status(common->priv, skb, -EINVAL);
127 return 0;
128 }
129 }
130
131 /* Send packet to hal */
132 if (skb->priority == MGMT_SOFT_Q)
133 status = rsi_send_mgmt_pkt(common, skb);
134 else
135 status = rsi_send_data_pkt(common, skb);
136
137 return status;
138 }
139
rsi_coex_attach(struct rsi_common * common)140 int rsi_coex_attach(struct rsi_common *common)
141 {
142 struct rsi_coex_ctrl_block *coex_cb;
143 int cnt;
144
145 coex_cb = kzalloc(sizeof(*coex_cb), GFP_KERNEL);
146 if (!coex_cb)
147 return -ENOMEM;
148
149 common->coex_cb = (void *)coex_cb;
150 coex_cb->priv = common;
151
152 /* Initialize co-ex queues */
153 for (cnt = 0; cnt < NUM_COEX_TX_QUEUES; cnt++)
154 skb_queue_head_init(&coex_cb->coex_tx_qs[cnt]);
155 rsi_init_event(&coex_cb->coex_tx_thread.event);
156
157 /* Initialize co-ex thread */
158 if (rsi_create_kthread(common,
159 &coex_cb->coex_tx_thread,
160 rsi_coex_scheduler_thread,
161 "Coex-Tx-Thread")) {
162 rsi_dbg(ERR_ZONE, "%s: Unable to init tx thrd\n", __func__);
163 return -EINVAL;
164 }
165 return 0;
166 }
167
rsi_coex_detach(struct rsi_common * common)168 void rsi_coex_detach(struct rsi_common *common)
169 {
170 struct rsi_coex_ctrl_block *coex_cb =
171 (struct rsi_coex_ctrl_block *)common->coex_cb;
172 int cnt;
173
174 rsi_kill_thread(&coex_cb->coex_tx_thread);
175
176 for (cnt = 0; cnt < NUM_COEX_TX_QUEUES; cnt++)
177 skb_queue_purge(&coex_cb->coex_tx_qs[cnt]);
178
179 kfree(coex_cb);
180 }
181