• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2018 Redpine Signals Inc.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include "rsi_main.h"
18 #include "rsi_coex.h"
19 #include "rsi_mgmt.h"
20 #include "rsi_hal.h"
21 
rsi_coex_determine_coex_q(struct rsi_coex_ctrl_block * coex_cb)22 static enum rsi_coex_queues rsi_coex_determine_coex_q
23 			(struct rsi_coex_ctrl_block *coex_cb)
24 {
25 	enum rsi_coex_queues q_num = RSI_COEX_Q_INVALID;
26 
27 	if (skb_queue_len(&coex_cb->coex_tx_qs[RSI_COEX_Q_COMMON]) > 0)
28 		q_num = RSI_COEX_Q_COMMON;
29 	if (skb_queue_len(&coex_cb->coex_tx_qs[RSI_COEX_Q_BT]) > 0)
30 		q_num = RSI_COEX_Q_BT;
31 	if (skb_queue_len(&coex_cb->coex_tx_qs[RSI_COEX_Q_WLAN]) > 0)
32 		q_num = RSI_COEX_Q_WLAN;
33 
34 	return q_num;
35 }
36 
rsi_coex_sched_tx_pkts(struct rsi_coex_ctrl_block * coex_cb)37 static void rsi_coex_sched_tx_pkts(struct rsi_coex_ctrl_block *coex_cb)
38 {
39 	enum rsi_coex_queues coex_q = RSI_COEX_Q_INVALID;
40 	struct sk_buff *skb;
41 
42 	do {
43 		coex_q = rsi_coex_determine_coex_q(coex_cb);
44 		rsi_dbg(INFO_ZONE, "queue = %d\n", coex_q);
45 
46 		if (coex_q == RSI_COEX_Q_BT) {
47 			skb = skb_dequeue(&coex_cb->coex_tx_qs[RSI_COEX_Q_BT]);
48 			rsi_send_bt_pkt(coex_cb->priv, skb);
49 		}
50 	} while (coex_q != RSI_COEX_Q_INVALID);
51 }
52 
rsi_coex_scheduler_thread(struct rsi_common * common)53 static void rsi_coex_scheduler_thread(struct rsi_common *common)
54 {
55 	struct rsi_coex_ctrl_block *coex_cb = common->coex_cb;
56 	u32 timeout = EVENT_WAIT_FOREVER;
57 
58 	do {
59 		rsi_wait_event(&coex_cb->coex_tx_thread.event, timeout);
60 		rsi_reset_event(&coex_cb->coex_tx_thread.event);
61 
62 		rsi_coex_sched_tx_pkts(coex_cb);
63 	} while (atomic_read(&coex_cb->coex_tx_thread.thread_done) == 0);
64 
65 	kthread_complete_and_exit(&coex_cb->coex_tx_thread.completion, 0);
66 }
67 
rsi_coex_recv_pkt(struct rsi_common * common,u8 * msg)68 int rsi_coex_recv_pkt(struct rsi_common *common, u8 *msg)
69 {
70 	u8 msg_type = msg[RSI_RX_DESC_MSG_TYPE_OFFSET];
71 
72 	switch (msg_type) {
73 	case COMMON_CARD_READY_IND:
74 		rsi_dbg(INFO_ZONE, "common card ready received\n");
75 		common->hibernate_resume = false;
76 		rsi_handle_card_ready(common, msg);
77 		break;
78 	case SLEEP_NOTIFY_IND:
79 		rsi_dbg(INFO_ZONE, "sleep notify received\n");
80 		rsi_mgmt_pkt_recv(common, msg);
81 		break;
82 	}
83 
84 	return 0;
85 }
86 
rsi_map_coex_q(u8 hal_queue)87 static inline int rsi_map_coex_q(u8 hal_queue)
88 {
89 	switch (hal_queue) {
90 	case RSI_COEX_Q:
91 		return RSI_COEX_Q_COMMON;
92 	case RSI_WLAN_Q:
93 		return RSI_COEX_Q_WLAN;
94 	case RSI_BT_Q:
95 		return RSI_COEX_Q_BT;
96 	}
97 	return RSI_COEX_Q_INVALID;
98 }
99 
rsi_coex_send_pkt(void * priv,struct sk_buff * skb,u8 hal_queue)100 int rsi_coex_send_pkt(void *priv, struct sk_buff *skb, u8 hal_queue)
101 {
102 	struct rsi_common *common = priv;
103 	struct rsi_coex_ctrl_block *coex_cb = common->coex_cb;
104 	struct skb_info *tx_params = NULL;
105 	enum rsi_coex_queues coex_q;
106 	int status;
107 
108 	coex_q = rsi_map_coex_q(hal_queue);
109 	if (coex_q == RSI_COEX_Q_INVALID) {
110 		rsi_dbg(ERR_ZONE, "Invalid coex queue\n");
111 		return -EINVAL;
112 	}
113 	if (coex_q != RSI_COEX_Q_COMMON &&
114 	    coex_q != RSI_COEX_Q_WLAN) {
115 		skb_queue_tail(&coex_cb->coex_tx_qs[coex_q], skb);
116 		rsi_set_event(&coex_cb->coex_tx_thread.event);
117 		return 0;
118 	}
119 	if (common->iface_down) {
120 		tx_params =
121 			(struct skb_info *)&IEEE80211_SKB_CB(skb)->driver_data;
122 
123 		if (!(tx_params->flags & INTERNAL_MGMT_PKT)) {
124 			rsi_indicate_tx_status(common->priv, skb, -EINVAL);
125 			return 0;
126 		}
127 	}
128 
129 	/* Send packet to hal */
130 	if (skb->priority == MGMT_SOFT_Q)
131 		status = rsi_send_mgmt_pkt(common, skb);
132 	else
133 		status = rsi_send_data_pkt(common, skb);
134 
135 	return status;
136 }
137 
rsi_coex_attach(struct rsi_common * common)138 int rsi_coex_attach(struct rsi_common *common)
139 {
140 	struct rsi_coex_ctrl_block *coex_cb;
141 	int cnt;
142 
143 	coex_cb = kzalloc(sizeof(*coex_cb), GFP_KERNEL);
144 	if (!coex_cb)
145 		return -ENOMEM;
146 
147 	common->coex_cb = (void *)coex_cb;
148 	coex_cb->priv = common;
149 
150 	/* Initialize co-ex queues */
151 	for (cnt = 0; cnt < NUM_COEX_TX_QUEUES; cnt++)
152 		skb_queue_head_init(&coex_cb->coex_tx_qs[cnt]);
153 	rsi_init_event(&coex_cb->coex_tx_thread.event);
154 
155 	/* Initialize co-ex thread */
156 	if (rsi_create_kthread(common,
157 			       &coex_cb->coex_tx_thread,
158 			       rsi_coex_scheduler_thread,
159 			       "Coex-Tx-Thread")) {
160 		rsi_dbg(ERR_ZONE, "%s: Unable to init tx thrd\n", __func__);
161 		kfree(coex_cb);
162 		return -EINVAL;
163 	}
164 	return 0;
165 }
166 
rsi_coex_detach(struct rsi_common * common)167 void rsi_coex_detach(struct rsi_common *common)
168 {
169 	struct rsi_coex_ctrl_block *coex_cb = common->coex_cb;
170 	int cnt;
171 
172 	rsi_kill_thread(&coex_cb->coex_tx_thread);
173 
174 	for (cnt = 0; cnt < NUM_COEX_TX_QUEUES; cnt++)
175 		skb_queue_purge(&coex_cb->coex_tx_qs[cnt]);
176 
177 	kfree(coex_cb);
178 }
179