1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4 * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
5 */
6 #include <linux/skbuff.h>
7 #include <linux/ctype.h>
8 #include <net/mac80211.h>
9 #include <net/cfg80211.h>
10 #include <linux/completion.h>
11 #include <linux/if_ether.h>
12 #include <linux/types.h>
13 #include <linux/pci.h>
14 #include <linux/uuid.h>
15 #include <linux/time.h>
16 #include <linux/of.h>
17 #include "core.h"
18 #include "debug.h"
19 #include "mac.h"
20 #include "hw.h"
21 #include "peer.h"
22 #include "p2p.h"
23
24 struct ath12k_wmi_svc_ready_parse {
25 bool wmi_svc_bitmap_done;
26 };
27
28 struct ath12k_wmi_dma_ring_caps_parse {
29 struct ath12k_wmi_dma_ring_caps_params *dma_ring_caps;
30 u32 n_dma_ring_caps;
31 };
32
33 struct ath12k_wmi_service_ext_arg {
34 u32 default_conc_scan_config_bits;
35 u32 default_fw_config_bits;
36 struct ath12k_wmi_ppe_threshold_arg ppet;
37 u32 he_cap_info;
38 u32 mpdu_density;
39 u32 max_bssid_rx_filters;
40 u32 num_hw_modes;
41 u32 num_phy;
42 };
43
44 struct ath12k_wmi_svc_rdy_ext_parse {
45 struct ath12k_wmi_service_ext_arg arg;
46 const struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params *hw_caps;
47 const struct ath12k_wmi_hw_mode_cap_params *hw_mode_caps;
48 u32 n_hw_mode_caps;
49 u32 tot_phy_id;
50 struct ath12k_wmi_hw_mode_cap_params pref_hw_mode_caps;
51 struct ath12k_wmi_mac_phy_caps_params *mac_phy_caps;
52 u32 n_mac_phy_caps;
53 const struct ath12k_wmi_soc_hal_reg_caps_params *soc_hal_reg_caps;
54 const struct ath12k_wmi_hal_reg_caps_ext_params *ext_hal_reg_caps;
55 u32 n_ext_hal_reg_caps;
56 struct ath12k_wmi_dma_ring_caps_parse dma_caps_parse;
57 bool hw_mode_done;
58 bool mac_phy_done;
59 bool ext_hal_reg_done;
60 bool mac_phy_chainmask_combo_done;
61 bool mac_phy_chainmask_cap_done;
62 bool oem_dma_ring_cap_done;
63 bool dma_ring_cap_done;
64 };
65
66 struct ath12k_wmi_svc_rdy_ext2_arg {
67 u32 reg_db_version;
68 u32 hw_min_max_tx_power_2ghz;
69 u32 hw_min_max_tx_power_5ghz;
70 u32 chwidth_num_peer_caps;
71 u32 preamble_puncture_bw;
72 u32 max_user_per_ppdu_ofdma;
73 u32 max_user_per_ppdu_mumimo;
74 u32 target_cap_flags;
75 u32 eht_cap_mac_info[WMI_MAX_EHTCAP_MAC_SIZE];
76 u32 max_num_linkview_peers;
77 u32 max_num_msduq_supported_per_tid;
78 u32 default_num_msduq_supported_per_tid;
79 };
80
81 struct ath12k_wmi_svc_rdy_ext2_parse {
82 struct ath12k_wmi_svc_rdy_ext2_arg arg;
83 struct ath12k_wmi_dma_ring_caps_parse dma_caps_parse;
84 bool dma_ring_cap_done;
85 bool spectral_bin_scaling_done;
86 bool mac_phy_caps_ext_done;
87 };
88
89 struct ath12k_wmi_rdy_parse {
90 u32 num_extra_mac_addr;
91 };
92
93 struct ath12k_wmi_dma_buf_release_arg {
94 struct ath12k_wmi_dma_buf_release_fixed_params fixed;
95 const struct ath12k_wmi_dma_buf_release_entry_params *buf_entry;
96 const struct ath12k_wmi_dma_buf_release_meta_data_params *meta_data;
97 u32 num_buf_entry;
98 u32 num_meta;
99 bool buf_entry_done;
100 bool meta_data_done;
101 };
102
103 struct ath12k_wmi_tlv_policy {
104 size_t min_len;
105 };
106
107 struct wmi_tlv_mgmt_rx_parse {
108 const struct ath12k_wmi_mgmt_rx_params *fixed;
109 const u8 *frame_buf;
110 bool frame_buf_done;
111 };
112
113 static const struct ath12k_wmi_tlv_policy ath12k_wmi_tlv_policies[] = {
114 [WMI_TAG_ARRAY_BYTE] = { .min_len = 0 },
115 [WMI_TAG_ARRAY_UINT32] = { .min_len = 0 },
116 [WMI_TAG_SERVICE_READY_EVENT] = {
117 .min_len = sizeof(struct wmi_service_ready_event) },
118 [WMI_TAG_SERVICE_READY_EXT_EVENT] = {
119 .min_len = sizeof(struct wmi_service_ready_ext_event) },
120 [WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS] = {
121 .min_len = sizeof(struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params) },
122 [WMI_TAG_SOC_HAL_REG_CAPABILITIES] = {
123 .min_len = sizeof(struct ath12k_wmi_soc_hal_reg_caps_params) },
124 [WMI_TAG_VDEV_START_RESPONSE_EVENT] = {
125 .min_len = sizeof(struct wmi_vdev_start_resp_event) },
126 [WMI_TAG_PEER_DELETE_RESP_EVENT] = {
127 .min_len = sizeof(struct wmi_peer_delete_resp_event) },
128 [WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT] = {
129 .min_len = sizeof(struct wmi_bcn_tx_status_event) },
130 [WMI_TAG_VDEV_STOPPED_EVENT] = {
131 .min_len = sizeof(struct wmi_vdev_stopped_event) },
132 [WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT] = {
133 .min_len = sizeof(struct wmi_reg_chan_list_cc_ext_event) },
134 [WMI_TAG_MGMT_RX_HDR] = {
135 .min_len = sizeof(struct ath12k_wmi_mgmt_rx_params) },
136 [WMI_TAG_MGMT_TX_COMPL_EVENT] = {
137 .min_len = sizeof(struct wmi_mgmt_tx_compl_event) },
138 [WMI_TAG_SCAN_EVENT] = {
139 .min_len = sizeof(struct wmi_scan_event) },
140 [WMI_TAG_PEER_STA_KICKOUT_EVENT] = {
141 .min_len = sizeof(struct wmi_peer_sta_kickout_event) },
142 [WMI_TAG_ROAM_EVENT] = {
143 .min_len = sizeof(struct wmi_roam_event) },
144 [WMI_TAG_CHAN_INFO_EVENT] = {
145 .min_len = sizeof(struct wmi_chan_info_event) },
146 [WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT] = {
147 .min_len = sizeof(struct wmi_pdev_bss_chan_info_event) },
148 [WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT] = {
149 .min_len = sizeof(struct wmi_vdev_install_key_compl_event) },
150 [WMI_TAG_READY_EVENT] = {
151 .min_len = sizeof(struct ath12k_wmi_ready_event_min_params) },
152 [WMI_TAG_SERVICE_AVAILABLE_EVENT] = {
153 .min_len = sizeof(struct wmi_service_available_event) },
154 [WMI_TAG_PEER_ASSOC_CONF_EVENT] = {
155 .min_len = sizeof(struct wmi_peer_assoc_conf_event) },
156 [WMI_TAG_RFKILL_EVENT] = {
157 .min_len = sizeof(struct wmi_rfkill_state_change_event) },
158 [WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT] = {
159 .min_len = sizeof(struct wmi_pdev_ctl_failsafe_chk_event) },
160 [WMI_TAG_HOST_SWFDA_EVENT] = {
161 .min_len = sizeof(struct wmi_fils_discovery_event) },
162 [WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT] = {
163 .min_len = sizeof(struct wmi_probe_resp_tx_status_event) },
164 [WMI_TAG_VDEV_DELETE_RESP_EVENT] = {
165 .min_len = sizeof(struct wmi_vdev_delete_resp_event) },
166 [WMI_TAG_TWT_ENABLE_COMPLETE_EVENT] = {
167 .min_len = sizeof(struct wmi_twt_enable_event) },
168 [WMI_TAG_TWT_DISABLE_COMPLETE_EVENT] = {
169 .min_len = sizeof(struct wmi_twt_disable_event) },
170 [WMI_TAG_P2P_NOA_INFO] = {
171 .min_len = sizeof(struct ath12k_wmi_p2p_noa_info) },
172 [WMI_TAG_P2P_NOA_EVENT] = {
173 .min_len = sizeof(struct wmi_p2p_noa_event) },
174 };
175
ath12k_wmi_tlv_hdr(u32 cmd,u32 len)176 static __le32 ath12k_wmi_tlv_hdr(u32 cmd, u32 len)
177 {
178 return le32_encode_bits(cmd, WMI_TLV_TAG) |
179 le32_encode_bits(len, WMI_TLV_LEN);
180 }
181
ath12k_wmi_tlv_cmd_hdr(u32 cmd,u32 len)182 static __le32 ath12k_wmi_tlv_cmd_hdr(u32 cmd, u32 len)
183 {
184 return ath12k_wmi_tlv_hdr(cmd, len - TLV_HDR_SIZE);
185 }
186
ath12k_wmi_init_qcn9274(struct ath12k_base * ab,struct ath12k_wmi_resource_config_arg * config)187 void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
188 struct ath12k_wmi_resource_config_arg *config)
189 {
190 config->num_vdevs = ab->num_radios * TARGET_NUM_VDEVS;
191 config->num_peers = ab->num_radios *
192 ath12k_core_get_max_peers_per_radio(ab);
193 config->num_tids = ath12k_core_get_max_num_tids(ab);
194 config->num_offload_peers = TARGET_NUM_OFFLD_PEERS;
195 config->num_offload_reorder_buffs = TARGET_NUM_OFFLD_REORDER_BUFFS;
196 config->num_peer_keys = TARGET_NUM_PEER_KEYS;
197 config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
198 config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
199 config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
200 config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
201 config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
202 config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
203 config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
204
205 if (test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags))
206 config->rx_decap_mode = TARGET_DECAP_MODE_RAW;
207 else
208 config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
209
210 config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
211 config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
212 config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
213 config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
214 config->num_mcast_groups = TARGET_NUM_MCAST_GROUPS;
215 config->num_mcast_table_elems = TARGET_NUM_MCAST_TABLE_ELEMS;
216 config->mcast2ucast_mode = TARGET_MCAST2UCAST_MODE;
217 config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
218 config->num_wds_entries = TARGET_NUM_WDS_ENTRIES;
219 config->dma_burst_size = TARGET_DMA_BURST_SIZE;
220 config->rx_skip_defrag_timeout_dup_detection_check =
221 TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
222 config->vow_config = TARGET_VOW_CONFIG;
223 config->gtk_offload_max_vdev = TARGET_GTK_OFFLOAD_MAX_VDEV;
224 config->num_msdu_desc = TARGET_NUM_MSDU_DESC;
225 config->beacon_tx_offload_max_vdev = ab->num_radios * TARGET_MAX_BCN_OFFLD;
226 config->rx_batchmode = TARGET_RX_BATCHMODE;
227 /* Indicates host supports peer map v3 and unmap v2 support */
228 config->peer_map_unmap_version = 0x32;
229 config->twt_ap_pdev_count = ab->num_radios;
230 config->twt_ap_sta_count = 1000;
231 config->ema_max_vap_cnt = ab->num_radios;
232 config->ema_max_profile_period = TARGET_EMA_MAX_PROFILE_PERIOD;
233 config->beacon_tx_offload_max_vdev += config->ema_max_vap_cnt;
234
235 if (test_bit(WMI_TLV_SERVICE_PEER_METADATA_V1A_V1B_SUPPORT, ab->wmi_ab.svc_map))
236 config->peer_metadata_ver = ATH12K_PEER_METADATA_V1B;
237 }
238
ath12k_wmi_init_wcn7850(struct ath12k_base * ab,struct ath12k_wmi_resource_config_arg * config)239 void ath12k_wmi_init_wcn7850(struct ath12k_base *ab,
240 struct ath12k_wmi_resource_config_arg *config)
241 {
242 config->num_vdevs = 4;
243 config->num_peers = 16;
244 config->num_tids = 32;
245
246 config->num_offload_peers = 3;
247 config->num_offload_reorder_buffs = 3;
248 config->num_peer_keys = TARGET_NUM_PEER_KEYS;
249 config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
250 config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
251 config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
252 config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
253 config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
254 config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
255 config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
256 config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
257 config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
258 config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
259 config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
260 config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
261 config->num_mcast_groups = 0;
262 config->num_mcast_table_elems = 0;
263 config->mcast2ucast_mode = 0;
264 config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
265 config->num_wds_entries = 0;
266 config->dma_burst_size = 0;
267 config->rx_skip_defrag_timeout_dup_detection_check = 0;
268 config->vow_config = TARGET_VOW_CONFIG;
269 config->gtk_offload_max_vdev = 2;
270 config->num_msdu_desc = 0x400;
271 config->beacon_tx_offload_max_vdev = 2;
272 config->rx_batchmode = TARGET_RX_BATCHMODE;
273
274 config->peer_map_unmap_version = 0x1;
275 config->use_pdev_id = 1;
276 config->max_frag_entries = 0xa;
277 config->num_tdls_vdevs = 0x1;
278 config->num_tdls_conn_table_entries = 8;
279 config->beacon_tx_offload_max_vdev = 0x2;
280 config->num_multicast_filter_entries = 0x20;
281 config->num_wow_filters = 0x16;
282 config->num_keep_alive_pattern = 0;
283 }
284
285 #define PRIMAP(_hw_mode_) \
286 [_hw_mode_] = _hw_mode_##_PRI
287
288 static const int ath12k_hw_mode_pri_map[] = {
289 PRIMAP(WMI_HOST_HW_MODE_SINGLE),
290 PRIMAP(WMI_HOST_HW_MODE_DBS),
291 PRIMAP(WMI_HOST_HW_MODE_SBS_PASSIVE),
292 PRIMAP(WMI_HOST_HW_MODE_SBS),
293 PRIMAP(WMI_HOST_HW_MODE_DBS_SBS),
294 PRIMAP(WMI_HOST_HW_MODE_DBS_OR_SBS),
295 /* keep last */
296 PRIMAP(WMI_HOST_HW_MODE_MAX),
297 };
298
299 static int
ath12k_wmi_tlv_iter(struct ath12k_base * ab,const void * ptr,size_t len,int (* iter)(struct ath12k_base * ab,u16 tag,u16 len,const void * ptr,void * data),void * data)300 ath12k_wmi_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len,
301 int (*iter)(struct ath12k_base *ab, u16 tag, u16 len,
302 const void *ptr, void *data),
303 void *data)
304 {
305 const void *begin = ptr;
306 const struct wmi_tlv *tlv;
307 u16 tlv_tag, tlv_len;
308 int ret;
309
310 while (len > 0) {
311 if (len < sizeof(*tlv)) {
312 ath12k_err(ab, "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
313 ptr - begin, len, sizeof(*tlv));
314 return -EINVAL;
315 }
316
317 tlv = ptr;
318 tlv_tag = le32_get_bits(tlv->header, WMI_TLV_TAG);
319 tlv_len = le32_get_bits(tlv->header, WMI_TLV_LEN);
320 ptr += sizeof(*tlv);
321 len -= sizeof(*tlv);
322
323 if (tlv_len > len) {
324 ath12k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
325 tlv_tag, ptr - begin, len, tlv_len);
326 return -EINVAL;
327 }
328
329 if (tlv_tag < ARRAY_SIZE(ath12k_wmi_tlv_policies) &&
330 ath12k_wmi_tlv_policies[tlv_tag].min_len &&
331 ath12k_wmi_tlv_policies[tlv_tag].min_len > tlv_len) {
332 ath12k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%u bytes is less than min length %zu)\n",
333 tlv_tag, ptr - begin, tlv_len,
334 ath12k_wmi_tlv_policies[tlv_tag].min_len);
335 return -EINVAL;
336 }
337
338 ret = iter(ab, tlv_tag, tlv_len, ptr, data);
339 if (ret)
340 return ret;
341
342 ptr += tlv_len;
343 len -= tlv_len;
344 }
345
346 return 0;
347 }
348
ath12k_wmi_tlv_iter_parse(struct ath12k_base * ab,u16 tag,u16 len,const void * ptr,void * data)349 static int ath12k_wmi_tlv_iter_parse(struct ath12k_base *ab, u16 tag, u16 len,
350 const void *ptr, void *data)
351 {
352 const void **tb = data;
353
354 if (tag < WMI_TAG_MAX)
355 tb[tag] = ptr;
356
357 return 0;
358 }
359
ath12k_wmi_tlv_parse(struct ath12k_base * ar,const void ** tb,const void * ptr,size_t len)360 static int ath12k_wmi_tlv_parse(struct ath12k_base *ar, const void **tb,
361 const void *ptr, size_t len)
362 {
363 return ath12k_wmi_tlv_iter(ar, ptr, len, ath12k_wmi_tlv_iter_parse,
364 (void *)tb);
365 }
366
367 static const void **
ath12k_wmi_tlv_parse_alloc(struct ath12k_base * ab,struct sk_buff * skb,gfp_t gfp)368 ath12k_wmi_tlv_parse_alloc(struct ath12k_base *ab,
369 struct sk_buff *skb, gfp_t gfp)
370 {
371 const void **tb;
372 int ret;
373
374 tb = kcalloc(WMI_TAG_MAX, sizeof(*tb), gfp);
375 if (!tb)
376 return ERR_PTR(-ENOMEM);
377
378 ret = ath12k_wmi_tlv_parse(ab, tb, skb->data, skb->len);
379 if (ret) {
380 kfree(tb);
381 return ERR_PTR(ret);
382 }
383
384 return tb;
385 }
386
ath12k_wmi_cmd_send_nowait(struct ath12k_wmi_pdev * wmi,struct sk_buff * skb,u32 cmd_id)387 static int ath12k_wmi_cmd_send_nowait(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb,
388 u32 cmd_id)
389 {
390 struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
391 struct ath12k_base *ab = wmi->wmi_ab->ab;
392 struct wmi_cmd_hdr *cmd_hdr;
393 int ret;
394
395 if (!skb_push(skb, sizeof(struct wmi_cmd_hdr)))
396 return -ENOMEM;
397
398 cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
399 cmd_hdr->cmd_id = le32_encode_bits(cmd_id, WMI_CMD_HDR_CMD_ID);
400
401 memset(skb_cb, 0, sizeof(*skb_cb));
402 ret = ath12k_htc_send(&ab->htc, wmi->eid, skb);
403
404 if (ret)
405 goto err_pull;
406
407 return 0;
408
409 err_pull:
410 skb_pull(skb, sizeof(struct wmi_cmd_hdr));
411 return ret;
412 }
413
ath12k_wmi_cmd_send(struct ath12k_wmi_pdev * wmi,struct sk_buff * skb,u32 cmd_id)414 int ath12k_wmi_cmd_send(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb,
415 u32 cmd_id)
416 {
417 struct ath12k_wmi_base *wmi_ab = wmi->wmi_ab;
418 int ret = -EOPNOTSUPP;
419
420 might_sleep();
421
422 wait_event_timeout(wmi_ab->tx_credits_wq, ({
423 ret = ath12k_wmi_cmd_send_nowait(wmi, skb, cmd_id);
424
425 if (ret && test_bit(ATH12K_FLAG_CRASH_FLUSH, &wmi_ab->ab->dev_flags))
426 ret = -ESHUTDOWN;
427
428 (ret != -EAGAIN);
429 }), WMI_SEND_TIMEOUT_HZ);
430
431 if (ret == -EAGAIN)
432 ath12k_warn(wmi_ab->ab, "wmi command %d timeout\n", cmd_id);
433
434 return ret;
435 }
436
ath12k_pull_svc_ready_ext(struct ath12k_wmi_pdev * wmi_handle,const void * ptr,struct ath12k_wmi_service_ext_arg * arg)437 static int ath12k_pull_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
438 const void *ptr,
439 struct ath12k_wmi_service_ext_arg *arg)
440 {
441 const struct wmi_service_ready_ext_event *ev = ptr;
442 int i;
443
444 if (!ev)
445 return -EINVAL;
446
447 /* Move this to host based bitmap */
448 arg->default_conc_scan_config_bits =
449 le32_to_cpu(ev->default_conc_scan_config_bits);
450 arg->default_fw_config_bits = le32_to_cpu(ev->default_fw_config_bits);
451 arg->he_cap_info = le32_to_cpu(ev->he_cap_info);
452 arg->mpdu_density = le32_to_cpu(ev->mpdu_density);
453 arg->max_bssid_rx_filters = le32_to_cpu(ev->max_bssid_rx_filters);
454 arg->ppet.numss_m1 = le32_to_cpu(ev->ppet.numss_m1);
455 arg->ppet.ru_bit_mask = le32_to_cpu(ev->ppet.ru_info);
456
457 for (i = 0; i < WMI_MAX_NUM_SS; i++)
458 arg->ppet.ppet16_ppet8_ru3_ru0[i] =
459 le32_to_cpu(ev->ppet.ppet16_ppet8_ru3_ru0[i]);
460
461 return 0;
462 }
463
464 static int
ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev * wmi_handle,struct ath12k_wmi_svc_rdy_ext_parse * svc,u8 hw_mode_id,u8 phy_id,struct ath12k_pdev * pdev)465 ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
466 struct ath12k_wmi_svc_rdy_ext_parse *svc,
467 u8 hw_mode_id, u8 phy_id,
468 struct ath12k_pdev *pdev)
469 {
470 const struct ath12k_wmi_mac_phy_caps_params *mac_caps;
471 const struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params *hw_caps = svc->hw_caps;
472 const struct ath12k_wmi_hw_mode_cap_params *wmi_hw_mode_caps = svc->hw_mode_caps;
473 const struct ath12k_wmi_mac_phy_caps_params *wmi_mac_phy_caps = svc->mac_phy_caps;
474 struct ath12k_base *ab = wmi_handle->wmi_ab->ab;
475 struct ath12k_band_cap *cap_band;
476 struct ath12k_pdev_cap *pdev_cap = &pdev->cap;
477 struct ath12k_fw_pdev *fw_pdev;
478 u32 phy_map;
479 u32 hw_idx, phy_idx = 0;
480 int i;
481
482 if (!hw_caps || !wmi_hw_mode_caps || !svc->soc_hal_reg_caps)
483 return -EINVAL;
484
485 for (hw_idx = 0; hw_idx < le32_to_cpu(hw_caps->num_hw_modes); hw_idx++) {
486 if (hw_mode_id == le32_to_cpu(wmi_hw_mode_caps[hw_idx].hw_mode_id))
487 break;
488
489 phy_map = le32_to_cpu(wmi_hw_mode_caps[hw_idx].phy_id_map);
490 phy_idx = fls(phy_map);
491 }
492
493 if (hw_idx == le32_to_cpu(hw_caps->num_hw_modes))
494 return -EINVAL;
495
496 phy_idx += phy_id;
497 if (phy_id >= le32_to_cpu(svc->soc_hal_reg_caps->num_phy))
498 return -EINVAL;
499
500 mac_caps = wmi_mac_phy_caps + phy_idx;
501
502 pdev->pdev_id = ath12k_wmi_mac_phy_get_pdev_id(mac_caps);
503 pdev->hw_link_id = ath12k_wmi_mac_phy_get_hw_link_id(mac_caps);
504 pdev_cap->supported_bands |= le32_to_cpu(mac_caps->supported_bands);
505 pdev_cap->ampdu_density = le32_to_cpu(mac_caps->ampdu_density);
506
507 fw_pdev = &ab->fw_pdev[ab->fw_pdev_count];
508 fw_pdev->supported_bands = le32_to_cpu(mac_caps->supported_bands);
509 fw_pdev->pdev_id = ath12k_wmi_mac_phy_get_pdev_id(mac_caps);
510 fw_pdev->phy_id = le32_to_cpu(mac_caps->phy_id);
511 ab->fw_pdev_count++;
512
513 /* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from
514 * band to band for a single radio, need to see how this should be
515 * handled.
516 */
517 if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2G_CAP) {
518 pdev_cap->tx_chain_mask = le32_to_cpu(mac_caps->tx_chain_mask_2g);
519 pdev_cap->rx_chain_mask = le32_to_cpu(mac_caps->rx_chain_mask_2g);
520 } else if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5G_CAP) {
521 pdev_cap->vht_cap = le32_to_cpu(mac_caps->vht_cap_info_5g);
522 pdev_cap->vht_mcs = le32_to_cpu(mac_caps->vht_supp_mcs_5g);
523 pdev_cap->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
524 pdev_cap->tx_chain_mask = le32_to_cpu(mac_caps->tx_chain_mask_5g);
525 pdev_cap->rx_chain_mask = le32_to_cpu(mac_caps->rx_chain_mask_5g);
526 } else {
527 return -EINVAL;
528 }
529
530 /* tx/rx chainmask reported from fw depends on the actual hw chains used,
531 * For example, for 4x4 capable macphys, first 4 chains can be used for first
532 * mac and the remaining 4 chains can be used for the second mac or vice-versa.
533 * In this case, tx/rx chainmask 0xf will be advertised for first mac and 0xf0
534 * will be advertised for second mac or vice-versa. Compute the shift value
535 * for tx/rx chainmask which will be used to advertise supported ht/vht rates to
536 * mac80211.
537 */
538 pdev_cap->tx_chain_mask_shift =
539 find_first_bit((unsigned long *)&pdev_cap->tx_chain_mask, 32);
540 pdev_cap->rx_chain_mask_shift =
541 find_first_bit((unsigned long *)&pdev_cap->rx_chain_mask, 32);
542
543 if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2G_CAP) {
544 cap_band = &pdev_cap->band[NL80211_BAND_2GHZ];
545 cap_band->phy_id = le32_to_cpu(mac_caps->phy_id);
546 cap_band->max_bw_supported = le32_to_cpu(mac_caps->max_bw_supported_2g);
547 cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_2g);
548 cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_2g);
549 cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_2g_ext);
550 cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_2g);
551 for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
552 cap_band->he_cap_phy_info[i] =
553 le32_to_cpu(mac_caps->he_cap_phy_info_2g[i]);
554
555 cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet2g.numss_m1);
556 cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet2g.ru_info);
557
558 for (i = 0; i < WMI_MAX_NUM_SS; i++)
559 cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] =
560 le32_to_cpu(mac_caps->he_ppet2g.ppet16_ppet8_ru3_ru0[i]);
561 }
562
563 if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5G_CAP) {
564 cap_band = &pdev_cap->band[NL80211_BAND_5GHZ];
565 cap_band->phy_id = le32_to_cpu(mac_caps->phy_id);
566 cap_band->max_bw_supported =
567 le32_to_cpu(mac_caps->max_bw_supported_5g);
568 cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_5g);
569 cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_5g);
570 cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_5g_ext);
571 cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
572 for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
573 cap_band->he_cap_phy_info[i] =
574 le32_to_cpu(mac_caps->he_cap_phy_info_5g[i]);
575
576 cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet5g.numss_m1);
577 cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet5g.ru_info);
578
579 for (i = 0; i < WMI_MAX_NUM_SS; i++)
580 cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] =
581 le32_to_cpu(mac_caps->he_ppet5g.ppet16_ppet8_ru3_ru0[i]);
582
583 cap_band = &pdev_cap->band[NL80211_BAND_6GHZ];
584 cap_band->max_bw_supported =
585 le32_to_cpu(mac_caps->max_bw_supported_5g);
586 cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_5g);
587 cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_5g);
588 cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_5g_ext);
589 cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
590 for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
591 cap_band->he_cap_phy_info[i] =
592 le32_to_cpu(mac_caps->he_cap_phy_info_5g[i]);
593
594 cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet5g.numss_m1);
595 cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet5g.ru_info);
596
597 for (i = 0; i < WMI_MAX_NUM_SS; i++)
598 cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] =
599 le32_to_cpu(mac_caps->he_ppet5g.ppet16_ppet8_ru3_ru0[i]);
600 }
601
602 return 0;
603 }
604
605 static int
ath12k_pull_reg_cap_svc_rdy_ext(struct ath12k_wmi_pdev * wmi_handle,const struct ath12k_wmi_soc_hal_reg_caps_params * reg_caps,const struct ath12k_wmi_hal_reg_caps_ext_params * ext_caps,u8 phy_idx,struct ath12k_wmi_hal_reg_capabilities_ext_arg * param)606 ath12k_pull_reg_cap_svc_rdy_ext(struct ath12k_wmi_pdev *wmi_handle,
607 const struct ath12k_wmi_soc_hal_reg_caps_params *reg_caps,
608 const struct ath12k_wmi_hal_reg_caps_ext_params *ext_caps,
609 u8 phy_idx,
610 struct ath12k_wmi_hal_reg_capabilities_ext_arg *param)
611 {
612 const struct ath12k_wmi_hal_reg_caps_ext_params *ext_reg_cap;
613
614 if (!reg_caps || !ext_caps)
615 return -EINVAL;
616
617 if (phy_idx >= le32_to_cpu(reg_caps->num_phy))
618 return -EINVAL;
619
620 ext_reg_cap = &ext_caps[phy_idx];
621
622 param->phy_id = le32_to_cpu(ext_reg_cap->phy_id);
623 param->eeprom_reg_domain = le32_to_cpu(ext_reg_cap->eeprom_reg_domain);
624 param->eeprom_reg_domain_ext =
625 le32_to_cpu(ext_reg_cap->eeprom_reg_domain_ext);
626 param->regcap1 = le32_to_cpu(ext_reg_cap->regcap1);
627 param->regcap2 = le32_to_cpu(ext_reg_cap->regcap2);
628 /* check if param->wireless_mode is needed */
629 param->low_2ghz_chan = le32_to_cpu(ext_reg_cap->low_2ghz_chan);
630 param->high_2ghz_chan = le32_to_cpu(ext_reg_cap->high_2ghz_chan);
631 param->low_5ghz_chan = le32_to_cpu(ext_reg_cap->low_5ghz_chan);
632 param->high_5ghz_chan = le32_to_cpu(ext_reg_cap->high_5ghz_chan);
633
634 return 0;
635 }
636
ath12k_pull_service_ready_tlv(struct ath12k_base * ab,const void * evt_buf,struct ath12k_wmi_target_cap_arg * cap)637 static int ath12k_pull_service_ready_tlv(struct ath12k_base *ab,
638 const void *evt_buf,
639 struct ath12k_wmi_target_cap_arg *cap)
640 {
641 const struct wmi_service_ready_event *ev = evt_buf;
642
643 if (!ev) {
644 ath12k_err(ab, "%s: failed by NULL param\n",
645 __func__);
646 return -EINVAL;
647 }
648
649 cap->phy_capability = le32_to_cpu(ev->phy_capability);
650 cap->max_frag_entry = le32_to_cpu(ev->max_frag_entry);
651 cap->num_rf_chains = le32_to_cpu(ev->num_rf_chains);
652 cap->ht_cap_info = le32_to_cpu(ev->ht_cap_info);
653 cap->vht_cap_info = le32_to_cpu(ev->vht_cap_info);
654 cap->vht_supp_mcs = le32_to_cpu(ev->vht_supp_mcs);
655 cap->hw_min_tx_power = le32_to_cpu(ev->hw_min_tx_power);
656 cap->hw_max_tx_power = le32_to_cpu(ev->hw_max_tx_power);
657 cap->sys_cap_info = le32_to_cpu(ev->sys_cap_info);
658 cap->min_pkt_size_enable = le32_to_cpu(ev->min_pkt_size_enable);
659 cap->max_bcn_ie_size = le32_to_cpu(ev->max_bcn_ie_size);
660 cap->max_num_scan_channels = le32_to_cpu(ev->max_num_scan_channels);
661 cap->max_supported_macs = le32_to_cpu(ev->max_supported_macs);
662 cap->wmi_fw_sub_feat_caps = le32_to_cpu(ev->wmi_fw_sub_feat_caps);
663 cap->txrx_chainmask = le32_to_cpu(ev->txrx_chainmask);
664 cap->default_dbs_hw_mode_index = le32_to_cpu(ev->default_dbs_hw_mode_index);
665 cap->num_msdu_desc = le32_to_cpu(ev->num_msdu_desc);
666
667 return 0;
668 }
669
670 /* Save the wmi_service_bitmap into a linear bitmap. The wmi_services in
671 * wmi_service ready event are advertised in b0-b3 (LSB 4-bits) of each
672 * 4-byte word.
673 */
ath12k_wmi_service_bitmap_copy(struct ath12k_wmi_pdev * wmi,const u32 * wmi_svc_bm)674 static void ath12k_wmi_service_bitmap_copy(struct ath12k_wmi_pdev *wmi,
675 const u32 *wmi_svc_bm)
676 {
677 int i, j;
678
679 for (i = 0, j = 0; i < WMI_SERVICE_BM_SIZE && j < WMI_MAX_SERVICE; i++) {
680 do {
681 if (wmi_svc_bm[i] & BIT(j % WMI_SERVICE_BITS_IN_SIZE32))
682 set_bit(j, wmi->wmi_ab->svc_map);
683 } while (++j % WMI_SERVICE_BITS_IN_SIZE32);
684 }
685 }
686
ath12k_wmi_svc_rdy_parse(struct ath12k_base * ab,u16 tag,u16 len,const void * ptr,void * data)687 static int ath12k_wmi_svc_rdy_parse(struct ath12k_base *ab, u16 tag, u16 len,
688 const void *ptr, void *data)
689 {
690 struct ath12k_wmi_svc_ready_parse *svc_ready = data;
691 struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0];
692 u16 expect_len;
693
694 switch (tag) {
695 case WMI_TAG_SERVICE_READY_EVENT:
696 if (ath12k_pull_service_ready_tlv(ab, ptr, &ab->target_caps))
697 return -EINVAL;
698 break;
699
700 case WMI_TAG_ARRAY_UINT32:
701 if (!svc_ready->wmi_svc_bitmap_done) {
702 expect_len = WMI_SERVICE_BM_SIZE * sizeof(u32);
703 if (len < expect_len) {
704 ath12k_warn(ab, "invalid len %d for the tag 0x%x\n",
705 len, tag);
706 return -EINVAL;
707 }
708
709 ath12k_wmi_service_bitmap_copy(wmi_handle, ptr);
710
711 svc_ready->wmi_svc_bitmap_done = true;
712 }
713 break;
714 default:
715 break;
716 }
717
718 return 0;
719 }
720
ath12k_service_ready_event(struct ath12k_base * ab,struct sk_buff * skb)721 static int ath12k_service_ready_event(struct ath12k_base *ab, struct sk_buff *skb)
722 {
723 struct ath12k_wmi_svc_ready_parse svc_ready = { };
724 int ret;
725
726 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
727 ath12k_wmi_svc_rdy_parse,
728 &svc_ready);
729 if (ret) {
730 ath12k_warn(ab, "failed to parse tlv %d\n", ret);
731 return ret;
732 }
733
734 return 0;
735 }
736
ath12k_wmi_mgmt_get_freq(struct ath12k * ar,struct ieee80211_tx_info * info)737 static u32 ath12k_wmi_mgmt_get_freq(struct ath12k *ar,
738 struct ieee80211_tx_info *info)
739 {
740 struct ath12k_base *ab = ar->ab;
741 u32 freq = 0;
742
743 if (ab->hw_params->single_pdev_only &&
744 ar->scan.is_roc &&
745 (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
746 freq = ar->scan.roc_freq;
747
748 return freq;
749 }
750
ath12k_wmi_alloc_skb(struct ath12k_wmi_base * wmi_ab,u32 len)751 struct sk_buff *ath12k_wmi_alloc_skb(struct ath12k_wmi_base *wmi_ab, u32 len)
752 {
753 struct sk_buff *skb;
754 struct ath12k_base *ab = wmi_ab->ab;
755 u32 round_len = roundup(len, 4);
756
757 skb = ath12k_htc_alloc_skb(ab, WMI_SKB_HEADROOM + round_len);
758 if (!skb)
759 return NULL;
760
761 skb_reserve(skb, WMI_SKB_HEADROOM);
762 if (!IS_ALIGNED((unsigned long)skb->data, 4))
763 ath12k_warn(ab, "unaligned WMI skb data\n");
764
765 skb_put(skb, round_len);
766 memset(skb->data, 0, round_len);
767
768 return skb;
769 }
770
ath12k_wmi_mgmt_send(struct ath12k * ar,u32 vdev_id,u32 buf_id,struct sk_buff * frame)771 int ath12k_wmi_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id,
772 struct sk_buff *frame)
773 {
774 struct ath12k_wmi_pdev *wmi = ar->wmi;
775 struct wmi_mgmt_send_cmd *cmd;
776 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(frame);
777 struct wmi_tlv *frame_tlv;
778 struct sk_buff *skb;
779 u32 buf_len;
780 int ret, len;
781
782 buf_len = min_t(int, frame->len, WMI_MGMT_SEND_DOWNLD_LEN);
783
784 len = sizeof(*cmd) + sizeof(*frame_tlv) + roundup(buf_len, 4);
785
786 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
787 if (!skb)
788 return -ENOMEM;
789
790 cmd = (struct wmi_mgmt_send_cmd *)skb->data;
791 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MGMT_TX_SEND_CMD,
792 sizeof(*cmd));
793 cmd->vdev_id = cpu_to_le32(vdev_id);
794 cmd->desc_id = cpu_to_le32(buf_id);
795 cmd->chanfreq = cpu_to_le32(ath12k_wmi_mgmt_get_freq(ar, info));
796 cmd->paddr_lo = cpu_to_le32(lower_32_bits(ATH12K_SKB_CB(frame)->paddr));
797 cmd->paddr_hi = cpu_to_le32(upper_32_bits(ATH12K_SKB_CB(frame)->paddr));
798 cmd->frame_len = cpu_to_le32(frame->len);
799 cmd->buf_len = cpu_to_le32(buf_len);
800 cmd->tx_params_valid = 0;
801
802 frame_tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
803 frame_tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, buf_len);
804
805 memcpy(frame_tlv->value, frame->data, buf_len);
806
807 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MGMT_TX_SEND_CMDID);
808 if (ret) {
809 ath12k_warn(ar->ab,
810 "failed to submit WMI_MGMT_TX_SEND_CMDID cmd\n");
811 dev_kfree_skb(skb);
812 }
813
814 return ret;
815 }
816
ath12k_wmi_vdev_create(struct ath12k * ar,u8 * macaddr,struct ath12k_wmi_vdev_create_arg * args)817 int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr,
818 struct ath12k_wmi_vdev_create_arg *args)
819 {
820 struct ath12k_wmi_pdev *wmi = ar->wmi;
821 struct wmi_vdev_create_cmd *cmd;
822 struct sk_buff *skb;
823 struct ath12k_wmi_vdev_txrx_streams_params *txrx_streams;
824 struct wmi_tlv *tlv;
825 int ret, len;
826 void *ptr;
827
828 /* It can be optimized my sending tx/rx chain configuration
829 * only for supported bands instead of always sending it for
830 * both the bands.
831 */
832 len = sizeof(*cmd) + TLV_HDR_SIZE +
833 (WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams));
834
835 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
836 if (!skb)
837 return -ENOMEM;
838
839 cmd = (struct wmi_vdev_create_cmd *)skb->data;
840 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_CREATE_CMD,
841 sizeof(*cmd));
842
843 cmd->vdev_id = cpu_to_le32(args->if_id);
844 cmd->vdev_type = cpu_to_le32(args->type);
845 cmd->vdev_subtype = cpu_to_le32(args->subtype);
846 cmd->num_cfg_txrx_streams = cpu_to_le32(WMI_NUM_SUPPORTED_BAND_MAX);
847 cmd->pdev_id = cpu_to_le32(args->pdev_id);
848 cmd->mbssid_flags = cpu_to_le32(args->mbssid_flags);
849 cmd->mbssid_tx_vdev_id = cpu_to_le32(args->mbssid_tx_vdev_id);
850 cmd->vdev_stats_id = cpu_to_le32(args->if_stats_id);
851 ether_addr_copy(cmd->vdev_macaddr.addr, macaddr);
852
853 if (args->if_stats_id != ATH12K_INVAL_VDEV_STATS_ID)
854 cmd->vdev_stats_id_valid = cpu_to_le32(BIT(0));
855
856 ptr = skb->data + sizeof(*cmd);
857 len = WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams);
858
859 tlv = ptr;
860 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
861
862 ptr += TLV_HDR_SIZE;
863 txrx_streams = ptr;
864 len = sizeof(*txrx_streams);
865 txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS,
866 len);
867 txrx_streams->band = cpu_to_le32(WMI_TPC_CHAINMASK_CONFIG_BAND_2G);
868 txrx_streams->supported_tx_streams =
869 cpu_to_le32(args->chains[NL80211_BAND_2GHZ].tx);
870 txrx_streams->supported_rx_streams =
871 cpu_to_le32(args->chains[NL80211_BAND_2GHZ].rx);
872
873 txrx_streams++;
874 txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS,
875 len);
876 txrx_streams->band = cpu_to_le32(WMI_TPC_CHAINMASK_CONFIG_BAND_5G);
877 txrx_streams->supported_tx_streams =
878 cpu_to_le32(args->chains[NL80211_BAND_5GHZ].tx);
879 txrx_streams->supported_rx_streams =
880 cpu_to_le32(args->chains[NL80211_BAND_5GHZ].rx);
881
882 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
883 "WMI vdev create: id %d type %d subtype %d macaddr %pM pdevid %d\n",
884 args->if_id, args->type, args->subtype,
885 macaddr, args->pdev_id);
886
887 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_CREATE_CMDID);
888 if (ret) {
889 ath12k_warn(ar->ab,
890 "failed to submit WMI_VDEV_CREATE_CMDID\n");
891 dev_kfree_skb(skb);
892 }
893
894 return ret;
895 }
896
ath12k_wmi_vdev_delete(struct ath12k * ar,u8 vdev_id)897 int ath12k_wmi_vdev_delete(struct ath12k *ar, u8 vdev_id)
898 {
899 struct ath12k_wmi_pdev *wmi = ar->wmi;
900 struct wmi_vdev_delete_cmd *cmd;
901 struct sk_buff *skb;
902 int ret;
903
904 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
905 if (!skb)
906 return -ENOMEM;
907
908 cmd = (struct wmi_vdev_delete_cmd *)skb->data;
909 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_DELETE_CMD,
910 sizeof(*cmd));
911 cmd->vdev_id = cpu_to_le32(vdev_id);
912
913 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev delete id %d\n", vdev_id);
914
915 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_DELETE_CMDID);
916 if (ret) {
917 ath12k_warn(ar->ab, "failed to submit WMI_VDEV_DELETE_CMDID\n");
918 dev_kfree_skb(skb);
919 }
920
921 return ret;
922 }
923
ath12k_wmi_vdev_stop(struct ath12k * ar,u8 vdev_id)924 int ath12k_wmi_vdev_stop(struct ath12k *ar, u8 vdev_id)
925 {
926 struct ath12k_wmi_pdev *wmi = ar->wmi;
927 struct wmi_vdev_stop_cmd *cmd;
928 struct sk_buff *skb;
929 int ret;
930
931 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
932 if (!skb)
933 return -ENOMEM;
934
935 cmd = (struct wmi_vdev_stop_cmd *)skb->data;
936
937 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_STOP_CMD,
938 sizeof(*cmd));
939 cmd->vdev_id = cpu_to_le32(vdev_id);
940
941 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev stop id 0x%x\n", vdev_id);
942
943 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_STOP_CMDID);
944 if (ret) {
945 ath12k_warn(ar->ab, "failed to submit WMI_VDEV_STOP cmd\n");
946 dev_kfree_skb(skb);
947 }
948
949 return ret;
950 }
951
ath12k_wmi_vdev_down(struct ath12k * ar,u8 vdev_id)952 int ath12k_wmi_vdev_down(struct ath12k *ar, u8 vdev_id)
953 {
954 struct ath12k_wmi_pdev *wmi = ar->wmi;
955 struct wmi_vdev_down_cmd *cmd;
956 struct sk_buff *skb;
957 int ret;
958
959 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
960 if (!skb)
961 return -ENOMEM;
962
963 cmd = (struct wmi_vdev_down_cmd *)skb->data;
964
965 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_DOWN_CMD,
966 sizeof(*cmd));
967 cmd->vdev_id = cpu_to_le32(vdev_id);
968
969 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev down id 0x%x\n", vdev_id);
970
971 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_DOWN_CMDID);
972 if (ret) {
973 ath12k_warn(ar->ab, "failed to submit WMI_VDEV_DOWN cmd\n");
974 dev_kfree_skb(skb);
975 }
976
977 return ret;
978 }
979
ath12k_wmi_put_wmi_channel(struct ath12k_wmi_channel_params * chan,struct wmi_vdev_start_req_arg * arg)980 static void ath12k_wmi_put_wmi_channel(struct ath12k_wmi_channel_params *chan,
981 struct wmi_vdev_start_req_arg *arg)
982 {
983 u32 center_freq1 = arg->band_center_freq1;
984
985 memset(chan, 0, sizeof(*chan));
986
987 chan->mhz = cpu_to_le32(arg->freq);
988 chan->band_center_freq1 = cpu_to_le32(center_freq1);
989 if (arg->mode == MODE_11BE_EHT160) {
990 if (arg->freq > center_freq1)
991 chan->band_center_freq1 = cpu_to_le32(center_freq1 + 40);
992 else
993 chan->band_center_freq1 = cpu_to_le32(center_freq1 - 40);
994
995 chan->band_center_freq2 = cpu_to_le32(center_freq1);
996 } else if (arg->mode == MODE_11BE_EHT80_80) {
997 chan->band_center_freq2 = cpu_to_le32(arg->band_center_freq2);
998 } else {
999 chan->band_center_freq2 = 0;
1000 }
1001
1002 chan->info |= le32_encode_bits(arg->mode, WMI_CHAN_INFO_MODE);
1003 if (arg->passive)
1004 chan->info |= cpu_to_le32(WMI_CHAN_INFO_PASSIVE);
1005 if (arg->allow_ibss)
1006 chan->info |= cpu_to_le32(WMI_CHAN_INFO_ADHOC_ALLOWED);
1007 if (arg->allow_ht)
1008 chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HT);
1009 if (arg->allow_vht)
1010 chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_VHT);
1011 if (arg->allow_he)
1012 chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HE);
1013 if (arg->ht40plus)
1014 chan->info |= cpu_to_le32(WMI_CHAN_INFO_HT40_PLUS);
1015 if (arg->chan_radar)
1016 chan->info |= cpu_to_le32(WMI_CHAN_INFO_DFS);
1017 if (arg->freq2_radar)
1018 chan->info |= cpu_to_le32(WMI_CHAN_INFO_DFS_FREQ2);
1019
1020 chan->reg_info_1 = le32_encode_bits(arg->max_power,
1021 WMI_CHAN_REG_INFO1_MAX_PWR) |
1022 le32_encode_bits(arg->max_reg_power,
1023 WMI_CHAN_REG_INFO1_MAX_REG_PWR);
1024
1025 chan->reg_info_2 = le32_encode_bits(arg->max_antenna_gain,
1026 WMI_CHAN_REG_INFO2_ANT_MAX) |
1027 le32_encode_bits(arg->max_power, WMI_CHAN_REG_INFO2_MAX_TX_PWR);
1028 }
1029
ath12k_wmi_vdev_start(struct ath12k * ar,struct wmi_vdev_start_req_arg * arg,bool restart)1030 int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
1031 bool restart)
1032 {
1033 struct ath12k_wmi_pdev *wmi = ar->wmi;
1034 struct wmi_vdev_start_request_cmd *cmd;
1035 struct sk_buff *skb;
1036 struct ath12k_wmi_channel_params *chan;
1037 struct wmi_tlv *tlv;
1038 void *ptr;
1039 int ret, len;
1040
1041 if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
1042 return -EINVAL;
1043
1044 len = sizeof(*cmd) + sizeof(*chan) + TLV_HDR_SIZE;
1045
1046 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1047 if (!skb)
1048 return -ENOMEM;
1049
1050 cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
1051 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_START_REQUEST_CMD,
1052 sizeof(*cmd));
1053 cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1054 cmd->beacon_interval = cpu_to_le32(arg->bcn_intval);
1055 cmd->bcn_tx_rate = cpu_to_le32(arg->bcn_tx_rate);
1056 cmd->dtim_period = cpu_to_le32(arg->dtim_period);
1057 cmd->num_noa_descriptors = cpu_to_le32(arg->num_noa_descriptors);
1058 cmd->preferred_rx_streams = cpu_to_le32(arg->pref_rx_streams);
1059 cmd->preferred_tx_streams = cpu_to_le32(arg->pref_tx_streams);
1060 cmd->cac_duration_ms = cpu_to_le32(arg->cac_duration_ms);
1061 cmd->regdomain = cpu_to_le32(arg->regdomain);
1062 cmd->he_ops = cpu_to_le32(arg->he_ops);
1063 cmd->punct_bitmap = cpu_to_le32(arg->punct_bitmap);
1064 cmd->mbssid_flags = cpu_to_le32(arg->mbssid_flags);
1065 cmd->mbssid_tx_vdev_id = cpu_to_le32(arg->mbssid_tx_vdev_id);
1066
1067 if (!restart) {
1068 if (arg->ssid) {
1069 cmd->ssid.ssid_len = cpu_to_le32(arg->ssid_len);
1070 memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
1071 }
1072 if (arg->hidden_ssid)
1073 cmd->flags |= cpu_to_le32(WMI_VDEV_START_HIDDEN_SSID);
1074 if (arg->pmf_enabled)
1075 cmd->flags |= cpu_to_le32(WMI_VDEV_START_PMF_ENABLED);
1076 }
1077
1078 cmd->flags |= cpu_to_le32(WMI_VDEV_START_LDPC_RX_ENABLED);
1079
1080 ptr = skb->data + sizeof(*cmd);
1081 chan = ptr;
1082
1083 ath12k_wmi_put_wmi_channel(chan, arg);
1084
1085 chan->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_CHANNEL,
1086 sizeof(*chan));
1087 ptr += sizeof(*chan);
1088
1089 tlv = ptr;
1090 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
1091
1092 /* Note: This is a nested TLV containing:
1093 * [wmi_tlv][ath12k_wmi_p2p_noa_descriptor][wmi_tlv]..
1094 */
1095
1096 ptr += sizeof(*tlv);
1097
1098 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %s id 0x%x freq 0x%x mode 0x%x\n",
1099 restart ? "restart" : "start", arg->vdev_id,
1100 arg->freq, arg->mode);
1101
1102 if (restart)
1103 ret = ath12k_wmi_cmd_send(wmi, skb,
1104 WMI_VDEV_RESTART_REQUEST_CMDID);
1105 else
1106 ret = ath12k_wmi_cmd_send(wmi, skb,
1107 WMI_VDEV_START_REQUEST_CMDID);
1108 if (ret) {
1109 ath12k_warn(ar->ab, "failed to submit vdev_%s cmd\n",
1110 restart ? "restart" : "start");
1111 dev_kfree_skb(skb);
1112 }
1113
1114 return ret;
1115 }
1116
ath12k_wmi_vdev_up(struct ath12k * ar,struct ath12k_wmi_vdev_up_params * params)1117 int ath12k_wmi_vdev_up(struct ath12k *ar, struct ath12k_wmi_vdev_up_params *params)
1118 {
1119 struct ath12k_wmi_pdev *wmi = ar->wmi;
1120 struct wmi_vdev_up_cmd *cmd;
1121 struct sk_buff *skb;
1122 int ret;
1123
1124 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1125 if (!skb)
1126 return -ENOMEM;
1127
1128 cmd = (struct wmi_vdev_up_cmd *)skb->data;
1129
1130 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_UP_CMD,
1131 sizeof(*cmd));
1132 cmd->vdev_id = cpu_to_le32(params->vdev_id);
1133 cmd->vdev_assoc_id = cpu_to_le32(params->aid);
1134
1135 ether_addr_copy(cmd->vdev_bssid.addr, params->bssid);
1136
1137 if (params->tx_bssid) {
1138 ether_addr_copy(cmd->tx_vdev_bssid.addr, params->tx_bssid);
1139 cmd->nontx_profile_idx = cpu_to_le32(params->nontx_profile_idx);
1140 cmd->nontx_profile_cnt = cpu_to_le32(params->nontx_profile_cnt);
1141 }
1142
1143 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1144 "WMI mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
1145 params->vdev_id, params->aid, params->bssid);
1146
1147 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_UP_CMDID);
1148 if (ret) {
1149 ath12k_warn(ar->ab, "failed to submit WMI_VDEV_UP cmd\n");
1150 dev_kfree_skb(skb);
1151 }
1152
1153 return ret;
1154 }
1155
ath12k_wmi_send_peer_create_cmd(struct ath12k * ar,struct ath12k_wmi_peer_create_arg * arg)1156 int ath12k_wmi_send_peer_create_cmd(struct ath12k *ar,
1157 struct ath12k_wmi_peer_create_arg *arg)
1158 {
1159 struct ath12k_wmi_pdev *wmi = ar->wmi;
1160 struct wmi_peer_create_cmd *cmd;
1161 struct sk_buff *skb;
1162 int ret;
1163
1164 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1165 if (!skb)
1166 return -ENOMEM;
1167
1168 cmd = (struct wmi_peer_create_cmd *)skb->data;
1169 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_CREATE_CMD,
1170 sizeof(*cmd));
1171
1172 ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_addr);
1173 cmd->peer_type = cpu_to_le32(arg->peer_type);
1174 cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1175
1176 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1177 "WMI peer create vdev_id %d peer_addr %pM\n",
1178 arg->vdev_id, arg->peer_addr);
1179
1180 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_CREATE_CMDID);
1181 if (ret) {
1182 ath12k_warn(ar->ab, "failed to submit WMI_PEER_CREATE cmd\n");
1183 dev_kfree_skb(skb);
1184 }
1185
1186 return ret;
1187 }
1188
ath12k_wmi_send_peer_delete_cmd(struct ath12k * ar,const u8 * peer_addr,u8 vdev_id)1189 int ath12k_wmi_send_peer_delete_cmd(struct ath12k *ar,
1190 const u8 *peer_addr, u8 vdev_id)
1191 {
1192 struct ath12k_wmi_pdev *wmi = ar->wmi;
1193 struct wmi_peer_delete_cmd *cmd;
1194 struct sk_buff *skb;
1195 int ret;
1196
1197 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1198 if (!skb)
1199 return -ENOMEM;
1200
1201 cmd = (struct wmi_peer_delete_cmd *)skb->data;
1202 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_DELETE_CMD,
1203 sizeof(*cmd));
1204
1205 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1206 cmd->vdev_id = cpu_to_le32(vdev_id);
1207
1208 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1209 "WMI peer delete vdev_id %d peer_addr %pM\n",
1210 vdev_id, peer_addr);
1211
1212 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_DELETE_CMDID);
1213 if (ret) {
1214 ath12k_warn(ar->ab, "failed to send WMI_PEER_DELETE cmd\n");
1215 dev_kfree_skb(skb);
1216 }
1217
1218 return ret;
1219 }
1220
ath12k_wmi_send_pdev_set_regdomain(struct ath12k * ar,struct ath12k_wmi_pdev_set_regdomain_arg * arg)1221 int ath12k_wmi_send_pdev_set_regdomain(struct ath12k *ar,
1222 struct ath12k_wmi_pdev_set_regdomain_arg *arg)
1223 {
1224 struct ath12k_wmi_pdev *wmi = ar->wmi;
1225 struct wmi_pdev_set_regdomain_cmd *cmd;
1226 struct sk_buff *skb;
1227 int ret;
1228
1229 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1230 if (!skb)
1231 return -ENOMEM;
1232
1233 cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data;
1234 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_REGDOMAIN_CMD,
1235 sizeof(*cmd));
1236
1237 cmd->reg_domain = cpu_to_le32(arg->current_rd_in_use);
1238 cmd->reg_domain_2g = cpu_to_le32(arg->current_rd_2g);
1239 cmd->reg_domain_5g = cpu_to_le32(arg->current_rd_5g);
1240 cmd->conformance_test_limit_2g = cpu_to_le32(arg->ctl_2g);
1241 cmd->conformance_test_limit_5g = cpu_to_le32(arg->ctl_5g);
1242 cmd->dfs_domain = cpu_to_le32(arg->dfs_domain);
1243 cmd->pdev_id = cpu_to_le32(arg->pdev_id);
1244
1245 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1246 "WMI pdev regd rd %d rd2g %d rd5g %d domain %d pdev id %d\n",
1247 arg->current_rd_in_use, arg->current_rd_2g,
1248 arg->current_rd_5g, arg->dfs_domain, arg->pdev_id);
1249
1250 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_REGDOMAIN_CMDID);
1251 if (ret) {
1252 ath12k_warn(ar->ab,
1253 "failed to send WMI_PDEV_SET_REGDOMAIN cmd\n");
1254 dev_kfree_skb(skb);
1255 }
1256
1257 return ret;
1258 }
1259
ath12k_wmi_set_peer_param(struct ath12k * ar,const u8 * peer_addr,u32 vdev_id,u32 param_id,u32 param_val)1260 int ath12k_wmi_set_peer_param(struct ath12k *ar, const u8 *peer_addr,
1261 u32 vdev_id, u32 param_id, u32 param_val)
1262 {
1263 struct ath12k_wmi_pdev *wmi = ar->wmi;
1264 struct wmi_peer_set_param_cmd *cmd;
1265 struct sk_buff *skb;
1266 int ret;
1267
1268 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1269 if (!skb)
1270 return -ENOMEM;
1271
1272 cmd = (struct wmi_peer_set_param_cmd *)skb->data;
1273 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_SET_PARAM_CMD,
1274 sizeof(*cmd));
1275 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1276 cmd->vdev_id = cpu_to_le32(vdev_id);
1277 cmd->param_id = cpu_to_le32(param_id);
1278 cmd->param_value = cpu_to_le32(param_val);
1279
1280 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1281 "WMI vdev %d peer 0x%pM set param %d value %d\n",
1282 vdev_id, peer_addr, param_id, param_val);
1283
1284 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_SET_PARAM_CMDID);
1285 if (ret) {
1286 ath12k_warn(ar->ab, "failed to send WMI_PEER_SET_PARAM cmd\n");
1287 dev_kfree_skb(skb);
1288 }
1289
1290 return ret;
1291 }
1292
ath12k_wmi_send_peer_flush_tids_cmd(struct ath12k * ar,u8 peer_addr[ETH_ALEN],u32 peer_tid_bitmap,u8 vdev_id)1293 int ath12k_wmi_send_peer_flush_tids_cmd(struct ath12k *ar,
1294 u8 peer_addr[ETH_ALEN],
1295 u32 peer_tid_bitmap,
1296 u8 vdev_id)
1297 {
1298 struct ath12k_wmi_pdev *wmi = ar->wmi;
1299 struct wmi_peer_flush_tids_cmd *cmd;
1300 struct sk_buff *skb;
1301 int ret;
1302
1303 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1304 if (!skb)
1305 return -ENOMEM;
1306
1307 cmd = (struct wmi_peer_flush_tids_cmd *)skb->data;
1308 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_FLUSH_TIDS_CMD,
1309 sizeof(*cmd));
1310
1311 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1312 cmd->peer_tid_bitmap = cpu_to_le32(peer_tid_bitmap);
1313 cmd->vdev_id = cpu_to_le32(vdev_id);
1314
1315 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1316 "WMI peer flush vdev_id %d peer_addr %pM tids %08x\n",
1317 vdev_id, peer_addr, peer_tid_bitmap);
1318
1319 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_FLUSH_TIDS_CMDID);
1320 if (ret) {
1321 ath12k_warn(ar->ab,
1322 "failed to send WMI_PEER_FLUSH_TIDS cmd\n");
1323 dev_kfree_skb(skb);
1324 }
1325
1326 return ret;
1327 }
1328
ath12k_wmi_peer_rx_reorder_queue_setup(struct ath12k * ar,int vdev_id,const u8 * addr,dma_addr_t paddr,u8 tid,u8 ba_window_size_valid,u32 ba_window_size)1329 int ath12k_wmi_peer_rx_reorder_queue_setup(struct ath12k *ar,
1330 int vdev_id, const u8 *addr,
1331 dma_addr_t paddr, u8 tid,
1332 u8 ba_window_size_valid,
1333 u32 ba_window_size)
1334 {
1335 struct wmi_peer_reorder_queue_setup_cmd *cmd;
1336 struct sk_buff *skb;
1337 int ret;
1338
1339 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
1340 if (!skb)
1341 return -ENOMEM;
1342
1343 cmd = (struct wmi_peer_reorder_queue_setup_cmd *)skb->data;
1344 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REORDER_QUEUE_SETUP_CMD,
1345 sizeof(*cmd));
1346
1347 ether_addr_copy(cmd->peer_macaddr.addr, addr);
1348 cmd->vdev_id = cpu_to_le32(vdev_id);
1349 cmd->tid = cpu_to_le32(tid);
1350 cmd->queue_ptr_lo = cpu_to_le32(lower_32_bits(paddr));
1351 cmd->queue_ptr_hi = cpu_to_le32(upper_32_bits(paddr));
1352 cmd->queue_no = cpu_to_le32(tid);
1353 cmd->ba_window_size_valid = cpu_to_le32(ba_window_size_valid);
1354 cmd->ba_window_size = cpu_to_le32(ba_window_size);
1355
1356 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1357 "wmi rx reorder queue setup addr %pM vdev_id %d tid %d\n",
1358 addr, vdev_id, tid);
1359
1360 ret = ath12k_wmi_cmd_send(ar->wmi, skb,
1361 WMI_PEER_REORDER_QUEUE_SETUP_CMDID);
1362 if (ret) {
1363 ath12k_warn(ar->ab,
1364 "failed to send WMI_PEER_REORDER_QUEUE_SETUP\n");
1365 dev_kfree_skb(skb);
1366 }
1367
1368 return ret;
1369 }
1370
1371 int
ath12k_wmi_rx_reord_queue_remove(struct ath12k * ar,struct ath12k_wmi_rx_reorder_queue_remove_arg * arg)1372 ath12k_wmi_rx_reord_queue_remove(struct ath12k *ar,
1373 struct ath12k_wmi_rx_reorder_queue_remove_arg *arg)
1374 {
1375 struct ath12k_wmi_pdev *wmi = ar->wmi;
1376 struct wmi_peer_reorder_queue_remove_cmd *cmd;
1377 struct sk_buff *skb;
1378 int ret;
1379
1380 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1381 if (!skb)
1382 return -ENOMEM;
1383
1384 cmd = (struct wmi_peer_reorder_queue_remove_cmd *)skb->data;
1385 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REORDER_QUEUE_REMOVE_CMD,
1386 sizeof(*cmd));
1387
1388 ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_macaddr);
1389 cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1390 cmd->tid_mask = cpu_to_le32(arg->peer_tid_bitmap);
1391
1392 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1393 "%s: peer_macaddr %pM vdev_id %d, tid_map %d", __func__,
1394 arg->peer_macaddr, arg->vdev_id, arg->peer_tid_bitmap);
1395
1396 ret = ath12k_wmi_cmd_send(wmi, skb,
1397 WMI_PEER_REORDER_QUEUE_REMOVE_CMDID);
1398 if (ret) {
1399 ath12k_warn(ar->ab,
1400 "failed to send WMI_PEER_REORDER_QUEUE_REMOVE_CMDID");
1401 dev_kfree_skb(skb);
1402 }
1403
1404 return ret;
1405 }
1406
ath12k_wmi_pdev_set_param(struct ath12k * ar,u32 param_id,u32 param_value,u8 pdev_id)1407 int ath12k_wmi_pdev_set_param(struct ath12k *ar, u32 param_id,
1408 u32 param_value, u8 pdev_id)
1409 {
1410 struct ath12k_wmi_pdev *wmi = ar->wmi;
1411 struct wmi_pdev_set_param_cmd *cmd;
1412 struct sk_buff *skb;
1413 int ret;
1414
1415 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1416 if (!skb)
1417 return -ENOMEM;
1418
1419 cmd = (struct wmi_pdev_set_param_cmd *)skb->data;
1420 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_PARAM_CMD,
1421 sizeof(*cmd));
1422 cmd->pdev_id = cpu_to_le32(pdev_id);
1423 cmd->param_id = cpu_to_le32(param_id);
1424 cmd->param_value = cpu_to_le32(param_value);
1425
1426 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1427 "WMI pdev set param %d pdev id %d value %d\n",
1428 param_id, pdev_id, param_value);
1429
1430 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_PARAM_CMDID);
1431 if (ret) {
1432 ath12k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n");
1433 dev_kfree_skb(skb);
1434 }
1435
1436 return ret;
1437 }
1438
ath12k_wmi_pdev_set_ps_mode(struct ath12k * ar,int vdev_id,u32 enable)1439 int ath12k_wmi_pdev_set_ps_mode(struct ath12k *ar, int vdev_id, u32 enable)
1440 {
1441 struct ath12k_wmi_pdev *wmi = ar->wmi;
1442 struct wmi_pdev_set_ps_mode_cmd *cmd;
1443 struct sk_buff *skb;
1444 int ret;
1445
1446 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1447 if (!skb)
1448 return -ENOMEM;
1449
1450 cmd = (struct wmi_pdev_set_ps_mode_cmd *)skb->data;
1451 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_POWERSAVE_MODE_CMD,
1452 sizeof(*cmd));
1453 cmd->vdev_id = cpu_to_le32(vdev_id);
1454 cmd->sta_ps_mode = cpu_to_le32(enable);
1455
1456 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1457 "WMI vdev set psmode %d vdev id %d\n",
1458 enable, vdev_id);
1459
1460 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_MODE_CMDID);
1461 if (ret) {
1462 ath12k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n");
1463 dev_kfree_skb(skb);
1464 }
1465
1466 return ret;
1467 }
1468
ath12k_wmi_pdev_suspend(struct ath12k * ar,u32 suspend_opt,u32 pdev_id)1469 int ath12k_wmi_pdev_suspend(struct ath12k *ar, u32 suspend_opt,
1470 u32 pdev_id)
1471 {
1472 struct ath12k_wmi_pdev *wmi = ar->wmi;
1473 struct wmi_pdev_suspend_cmd *cmd;
1474 struct sk_buff *skb;
1475 int ret;
1476
1477 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1478 if (!skb)
1479 return -ENOMEM;
1480
1481 cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
1482
1483 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SUSPEND_CMD,
1484 sizeof(*cmd));
1485
1486 cmd->suspend_opt = cpu_to_le32(suspend_opt);
1487 cmd->pdev_id = cpu_to_le32(pdev_id);
1488
1489 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1490 "WMI pdev suspend pdev_id %d\n", pdev_id);
1491
1492 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SUSPEND_CMDID);
1493 if (ret) {
1494 ath12k_warn(ar->ab, "failed to send WMI_PDEV_SUSPEND cmd\n");
1495 dev_kfree_skb(skb);
1496 }
1497
1498 return ret;
1499 }
1500
ath12k_wmi_pdev_resume(struct ath12k * ar,u32 pdev_id)1501 int ath12k_wmi_pdev_resume(struct ath12k *ar, u32 pdev_id)
1502 {
1503 struct ath12k_wmi_pdev *wmi = ar->wmi;
1504 struct wmi_pdev_resume_cmd *cmd;
1505 struct sk_buff *skb;
1506 int ret;
1507
1508 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1509 if (!skb)
1510 return -ENOMEM;
1511
1512 cmd = (struct wmi_pdev_resume_cmd *)skb->data;
1513
1514 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_RESUME_CMD,
1515 sizeof(*cmd));
1516 cmd->pdev_id = cpu_to_le32(pdev_id);
1517
1518 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1519 "WMI pdev resume pdev id %d\n", pdev_id);
1520
1521 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_RESUME_CMDID);
1522 if (ret) {
1523 ath12k_warn(ar->ab, "failed to send WMI_PDEV_RESUME cmd\n");
1524 dev_kfree_skb(skb);
1525 }
1526
1527 return ret;
1528 }
1529
1530 /* TODO FW Support for the cmd is not available yet.
1531 * Can be tested once the command and corresponding
1532 * event is implemented in FW
1533 */
ath12k_wmi_pdev_bss_chan_info_request(struct ath12k * ar,enum wmi_bss_chan_info_req_type type)1534 int ath12k_wmi_pdev_bss_chan_info_request(struct ath12k *ar,
1535 enum wmi_bss_chan_info_req_type type)
1536 {
1537 struct ath12k_wmi_pdev *wmi = ar->wmi;
1538 struct wmi_pdev_bss_chan_info_req_cmd *cmd;
1539 struct sk_buff *skb;
1540 int ret;
1541
1542 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1543 if (!skb)
1544 return -ENOMEM;
1545
1546 cmd = (struct wmi_pdev_bss_chan_info_req_cmd *)skb->data;
1547
1548 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST,
1549 sizeof(*cmd));
1550 cmd->req_type = cpu_to_le32(type);
1551 cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
1552
1553 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1554 "WMI bss chan info req type %d\n", type);
1555
1556 ret = ath12k_wmi_cmd_send(wmi, skb,
1557 WMI_PDEV_BSS_CHAN_INFO_REQUEST_CMDID);
1558 if (ret) {
1559 ath12k_warn(ar->ab,
1560 "failed to send WMI_PDEV_BSS_CHAN_INFO_REQUEST cmd\n");
1561 dev_kfree_skb(skb);
1562 }
1563
1564 return ret;
1565 }
1566
ath12k_wmi_send_set_ap_ps_param_cmd(struct ath12k * ar,u8 * peer_addr,struct ath12k_wmi_ap_ps_arg * arg)1567 int ath12k_wmi_send_set_ap_ps_param_cmd(struct ath12k *ar, u8 *peer_addr,
1568 struct ath12k_wmi_ap_ps_arg *arg)
1569 {
1570 struct ath12k_wmi_pdev *wmi = ar->wmi;
1571 struct wmi_ap_ps_peer_cmd *cmd;
1572 struct sk_buff *skb;
1573 int ret;
1574
1575 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1576 if (!skb)
1577 return -ENOMEM;
1578
1579 cmd = (struct wmi_ap_ps_peer_cmd *)skb->data;
1580 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_AP_PS_PEER_CMD,
1581 sizeof(*cmd));
1582
1583 cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1584 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1585 cmd->param = cpu_to_le32(arg->param);
1586 cmd->value = cpu_to_le32(arg->value);
1587
1588 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1589 "WMI set ap ps vdev id %d peer %pM param %d value %d\n",
1590 arg->vdev_id, peer_addr, arg->param, arg->value);
1591
1592 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_AP_PS_PEER_PARAM_CMDID);
1593 if (ret) {
1594 ath12k_warn(ar->ab,
1595 "failed to send WMI_AP_PS_PEER_PARAM_CMDID\n");
1596 dev_kfree_skb(skb);
1597 }
1598
1599 return ret;
1600 }
1601
ath12k_wmi_set_sta_ps_param(struct ath12k * ar,u32 vdev_id,u32 param,u32 param_value)1602 int ath12k_wmi_set_sta_ps_param(struct ath12k *ar, u32 vdev_id,
1603 u32 param, u32 param_value)
1604 {
1605 struct ath12k_wmi_pdev *wmi = ar->wmi;
1606 struct wmi_sta_powersave_param_cmd *cmd;
1607 struct sk_buff *skb;
1608 int ret;
1609
1610 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1611 if (!skb)
1612 return -ENOMEM;
1613
1614 cmd = (struct wmi_sta_powersave_param_cmd *)skb->data;
1615 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_POWERSAVE_PARAM_CMD,
1616 sizeof(*cmd));
1617
1618 cmd->vdev_id = cpu_to_le32(vdev_id);
1619 cmd->param = cpu_to_le32(param);
1620 cmd->value = cpu_to_le32(param_value);
1621
1622 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1623 "WMI set sta ps vdev_id %d param %d value %d\n",
1624 vdev_id, param, param_value);
1625
1626 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_PARAM_CMDID);
1627 if (ret) {
1628 ath12k_warn(ar->ab, "failed to send WMI_STA_POWERSAVE_PARAM_CMDID");
1629 dev_kfree_skb(skb);
1630 }
1631
1632 return ret;
1633 }
1634
ath12k_wmi_force_fw_hang_cmd(struct ath12k * ar,u32 type,u32 delay_time_ms)1635 int ath12k_wmi_force_fw_hang_cmd(struct ath12k *ar, u32 type, u32 delay_time_ms)
1636 {
1637 struct ath12k_wmi_pdev *wmi = ar->wmi;
1638 struct wmi_force_fw_hang_cmd *cmd;
1639 struct sk_buff *skb;
1640 int ret, len;
1641
1642 len = sizeof(*cmd);
1643
1644 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1645 if (!skb)
1646 return -ENOMEM;
1647
1648 cmd = (struct wmi_force_fw_hang_cmd *)skb->data;
1649 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_FORCE_FW_HANG_CMD,
1650 len);
1651
1652 cmd->type = cpu_to_le32(type);
1653 cmd->delay_time_ms = cpu_to_le32(delay_time_ms);
1654
1655 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_FORCE_FW_HANG_CMDID);
1656
1657 if (ret) {
1658 ath12k_warn(ar->ab, "Failed to send WMI_FORCE_FW_HANG_CMDID");
1659 dev_kfree_skb(skb);
1660 }
1661 return ret;
1662 }
1663
ath12k_wmi_vdev_set_param_cmd(struct ath12k * ar,u32 vdev_id,u32 param_id,u32 param_value)1664 int ath12k_wmi_vdev_set_param_cmd(struct ath12k *ar, u32 vdev_id,
1665 u32 param_id, u32 param_value)
1666 {
1667 struct ath12k_wmi_pdev *wmi = ar->wmi;
1668 struct wmi_vdev_set_param_cmd *cmd;
1669 struct sk_buff *skb;
1670 int ret;
1671
1672 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1673 if (!skb)
1674 return -ENOMEM;
1675
1676 cmd = (struct wmi_vdev_set_param_cmd *)skb->data;
1677 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_PARAM_CMD,
1678 sizeof(*cmd));
1679
1680 cmd->vdev_id = cpu_to_le32(vdev_id);
1681 cmd->param_id = cpu_to_le32(param_id);
1682 cmd->param_value = cpu_to_le32(param_value);
1683
1684 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1685 "WMI vdev id 0x%x set param %d value %d\n",
1686 vdev_id, param_id, param_value);
1687
1688 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_PARAM_CMDID);
1689 if (ret) {
1690 ath12k_warn(ar->ab,
1691 "failed to send WMI_VDEV_SET_PARAM_CMDID\n");
1692 dev_kfree_skb(skb);
1693 }
1694
1695 return ret;
1696 }
1697
ath12k_wmi_send_pdev_temperature_cmd(struct ath12k * ar)1698 int ath12k_wmi_send_pdev_temperature_cmd(struct ath12k *ar)
1699 {
1700 struct ath12k_wmi_pdev *wmi = ar->wmi;
1701 struct wmi_get_pdev_temperature_cmd *cmd;
1702 struct sk_buff *skb;
1703 int ret;
1704
1705 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1706 if (!skb)
1707 return -ENOMEM;
1708
1709 cmd = (struct wmi_get_pdev_temperature_cmd *)skb->data;
1710 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_GET_TEMPERATURE_CMD,
1711 sizeof(*cmd));
1712 cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
1713
1714 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1715 "WMI pdev get temperature for pdev_id %d\n", ar->pdev->pdev_id);
1716
1717 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_GET_TEMPERATURE_CMDID);
1718 if (ret) {
1719 ath12k_warn(ar->ab, "failed to send WMI_PDEV_GET_TEMPERATURE cmd\n");
1720 dev_kfree_skb(skb);
1721 }
1722
1723 return ret;
1724 }
1725
ath12k_wmi_send_bcn_offload_control_cmd(struct ath12k * ar,u32 vdev_id,u32 bcn_ctrl_op)1726 int ath12k_wmi_send_bcn_offload_control_cmd(struct ath12k *ar,
1727 u32 vdev_id, u32 bcn_ctrl_op)
1728 {
1729 struct ath12k_wmi_pdev *wmi = ar->wmi;
1730 struct wmi_bcn_offload_ctrl_cmd *cmd;
1731 struct sk_buff *skb;
1732 int ret;
1733
1734 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1735 if (!skb)
1736 return -ENOMEM;
1737
1738 cmd = (struct wmi_bcn_offload_ctrl_cmd *)skb->data;
1739 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_OFFLOAD_CTRL_CMD,
1740 sizeof(*cmd));
1741
1742 cmd->vdev_id = cpu_to_le32(vdev_id);
1743 cmd->bcn_ctrl_op = cpu_to_le32(bcn_ctrl_op);
1744
1745 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1746 "WMI bcn ctrl offload vdev id %d ctrl_op %d\n",
1747 vdev_id, bcn_ctrl_op);
1748
1749 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_BCN_OFFLOAD_CTRL_CMDID);
1750 if (ret) {
1751 ath12k_warn(ar->ab,
1752 "failed to send WMI_BCN_OFFLOAD_CTRL_CMDID\n");
1753 dev_kfree_skb(skb);
1754 }
1755
1756 return ret;
1757 }
1758
ath12k_wmi_p2p_go_bcn_ie(struct ath12k * ar,u32 vdev_id,const u8 * p2p_ie)1759 int ath12k_wmi_p2p_go_bcn_ie(struct ath12k *ar, u32 vdev_id,
1760 const u8 *p2p_ie)
1761 {
1762 struct ath12k_wmi_pdev *wmi = ar->wmi;
1763 struct wmi_p2p_go_set_beacon_ie_cmd *cmd;
1764 size_t p2p_ie_len, aligned_len;
1765 struct wmi_tlv *tlv;
1766 struct sk_buff *skb;
1767 void *ptr;
1768 int ret, len;
1769
1770 p2p_ie_len = p2p_ie[1] + 2;
1771 aligned_len = roundup(p2p_ie_len, sizeof(u32));
1772
1773 len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len;
1774
1775 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1776 if (!skb)
1777 return -ENOMEM;
1778
1779 ptr = skb->data;
1780 cmd = ptr;
1781 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_P2P_GO_SET_BEACON_IE,
1782 sizeof(*cmd));
1783 cmd->vdev_id = cpu_to_le32(vdev_id);
1784 cmd->ie_buf_len = cpu_to_le32(p2p_ie_len);
1785
1786 ptr += sizeof(*cmd);
1787 tlv = ptr;
1788 tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_BYTE,
1789 aligned_len);
1790 memcpy(tlv->value, p2p_ie, p2p_ie_len);
1791
1792 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_P2P_GO_SET_BEACON_IE);
1793 if (ret) {
1794 ath12k_warn(ar->ab, "failed to send WMI_P2P_GO_SET_BEACON_IE\n");
1795 dev_kfree_skb(skb);
1796 }
1797
1798 return ret;
1799 }
1800
ath12k_wmi_bcn_tmpl(struct ath12k * ar,u32 vdev_id,struct ieee80211_mutable_offsets * offs,struct sk_buff * bcn,struct ath12k_wmi_bcn_tmpl_ema_arg * ema_args)1801 int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id,
1802 struct ieee80211_mutable_offsets *offs,
1803 struct sk_buff *bcn,
1804 struct ath12k_wmi_bcn_tmpl_ema_arg *ema_args)
1805 {
1806 struct ath12k_wmi_pdev *wmi = ar->wmi;
1807 struct wmi_bcn_tmpl_cmd *cmd;
1808 struct ath12k_wmi_bcn_prb_info_params *bcn_prb_info;
1809 struct wmi_tlv *tlv;
1810 struct sk_buff *skb;
1811 u32 ema_params = 0;
1812 void *ptr;
1813 int ret, len;
1814 size_t aligned_len = roundup(bcn->len, 4);
1815
1816 len = sizeof(*cmd) + sizeof(*bcn_prb_info) + TLV_HDR_SIZE + aligned_len;
1817
1818 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1819 if (!skb)
1820 return -ENOMEM;
1821
1822 cmd = (struct wmi_bcn_tmpl_cmd *)skb->data;
1823 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_TMPL_CMD,
1824 sizeof(*cmd));
1825 cmd->vdev_id = cpu_to_le32(vdev_id);
1826 cmd->tim_ie_offset = cpu_to_le32(offs->tim_offset);
1827 cmd->csa_switch_count_offset = cpu_to_le32(offs->cntdwn_counter_offs[0]);
1828 cmd->ext_csa_switch_count_offset = cpu_to_le32(offs->cntdwn_counter_offs[1]);
1829 cmd->buf_len = cpu_to_le32(bcn->len);
1830 cmd->mbssid_ie_offset = cpu_to_le32(offs->mbssid_off);
1831 if (ema_args) {
1832 u32p_replace_bits(&ema_params, ema_args->bcn_cnt, WMI_EMA_BEACON_CNT);
1833 u32p_replace_bits(&ema_params, ema_args->bcn_index, WMI_EMA_BEACON_IDX);
1834 if (ema_args->bcn_index == 0)
1835 u32p_replace_bits(&ema_params, 1, WMI_EMA_BEACON_FIRST);
1836 if (ema_args->bcn_index + 1 == ema_args->bcn_cnt)
1837 u32p_replace_bits(&ema_params, 1, WMI_EMA_BEACON_LAST);
1838 cmd->ema_params = cpu_to_le32(ema_params);
1839 }
1840
1841 ptr = skb->data + sizeof(*cmd);
1842
1843 bcn_prb_info = ptr;
1844 len = sizeof(*bcn_prb_info);
1845 bcn_prb_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_PRB_INFO,
1846 len);
1847 bcn_prb_info->caps = 0;
1848 bcn_prb_info->erp = 0;
1849
1850 ptr += sizeof(*bcn_prb_info);
1851
1852 tlv = ptr;
1853 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len);
1854 memcpy(tlv->value, bcn->data, bcn->len);
1855
1856 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_BCN_TMPL_CMDID);
1857 if (ret) {
1858 ath12k_warn(ar->ab, "failed to send WMI_BCN_TMPL_CMDID\n");
1859 dev_kfree_skb(skb);
1860 }
1861
1862 return ret;
1863 }
1864
ath12k_wmi_vdev_install_key(struct ath12k * ar,struct wmi_vdev_install_key_arg * arg)1865 int ath12k_wmi_vdev_install_key(struct ath12k *ar,
1866 struct wmi_vdev_install_key_arg *arg)
1867 {
1868 struct ath12k_wmi_pdev *wmi = ar->wmi;
1869 struct wmi_vdev_install_key_cmd *cmd;
1870 struct wmi_tlv *tlv;
1871 struct sk_buff *skb;
1872 int ret, len, key_len_aligned;
1873
1874 /* WMI_TAG_ARRAY_BYTE needs to be aligned with 4, the actual key
1875 * length is specified in cmd->key_len.
1876 */
1877 key_len_aligned = roundup(arg->key_len, 4);
1878
1879 len = sizeof(*cmd) + TLV_HDR_SIZE + key_len_aligned;
1880
1881 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1882 if (!skb)
1883 return -ENOMEM;
1884
1885 cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
1886 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_INSTALL_KEY_CMD,
1887 sizeof(*cmd));
1888 cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1889 ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
1890 cmd->key_idx = cpu_to_le32(arg->key_idx);
1891 cmd->key_flags = cpu_to_le32(arg->key_flags);
1892 cmd->key_cipher = cpu_to_le32(arg->key_cipher);
1893 cmd->key_len = cpu_to_le32(arg->key_len);
1894 cmd->key_txmic_len = cpu_to_le32(arg->key_txmic_len);
1895 cmd->key_rxmic_len = cpu_to_le32(arg->key_rxmic_len);
1896
1897 if (arg->key_rsc_counter)
1898 cmd->key_rsc_counter = cpu_to_le64(arg->key_rsc_counter);
1899
1900 tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
1901 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, key_len_aligned);
1902 memcpy(tlv->value, arg->key_data, arg->key_len);
1903
1904 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1905 "WMI vdev install key idx %d cipher %d len %d\n",
1906 arg->key_idx, arg->key_cipher, arg->key_len);
1907
1908 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_INSTALL_KEY_CMDID);
1909 if (ret) {
1910 ath12k_warn(ar->ab,
1911 "failed to send WMI_VDEV_INSTALL_KEY cmd\n");
1912 dev_kfree_skb(skb);
1913 }
1914
1915 return ret;
1916 }
1917
ath12k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd * cmd,struct ath12k_wmi_peer_assoc_arg * arg,bool hw_crypto_disabled)1918 static void ath12k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd,
1919 struct ath12k_wmi_peer_assoc_arg *arg,
1920 bool hw_crypto_disabled)
1921 {
1922 cmd->peer_flags = 0;
1923 cmd->peer_flags_ext = 0;
1924
1925 if (arg->is_wme_set) {
1926 if (arg->qos_flag)
1927 cmd->peer_flags |= cpu_to_le32(WMI_PEER_QOS);
1928 if (arg->apsd_flag)
1929 cmd->peer_flags |= cpu_to_le32(WMI_PEER_APSD);
1930 if (arg->ht_flag)
1931 cmd->peer_flags |= cpu_to_le32(WMI_PEER_HT);
1932 if (arg->bw_40)
1933 cmd->peer_flags |= cpu_to_le32(WMI_PEER_40MHZ);
1934 if (arg->bw_80)
1935 cmd->peer_flags |= cpu_to_le32(WMI_PEER_80MHZ);
1936 if (arg->bw_160)
1937 cmd->peer_flags |= cpu_to_le32(WMI_PEER_160MHZ);
1938 if (arg->bw_320)
1939 cmd->peer_flags_ext |= cpu_to_le32(WMI_PEER_EXT_320MHZ);
1940
1941 /* Typically if STBC is enabled for VHT it should be enabled
1942 * for HT as well
1943 **/
1944 if (arg->stbc_flag)
1945 cmd->peer_flags |= cpu_to_le32(WMI_PEER_STBC);
1946
1947 /* Typically if LDPC is enabled for VHT it should be enabled
1948 * for HT as well
1949 **/
1950 if (arg->ldpc_flag)
1951 cmd->peer_flags |= cpu_to_le32(WMI_PEER_LDPC);
1952
1953 if (arg->static_mimops_flag)
1954 cmd->peer_flags |= cpu_to_le32(WMI_PEER_STATIC_MIMOPS);
1955 if (arg->dynamic_mimops_flag)
1956 cmd->peer_flags |= cpu_to_le32(WMI_PEER_DYN_MIMOPS);
1957 if (arg->spatial_mux_flag)
1958 cmd->peer_flags |= cpu_to_le32(WMI_PEER_SPATIAL_MUX);
1959 if (arg->vht_flag)
1960 cmd->peer_flags |= cpu_to_le32(WMI_PEER_VHT);
1961 if (arg->he_flag)
1962 cmd->peer_flags |= cpu_to_le32(WMI_PEER_HE);
1963 if (arg->twt_requester)
1964 cmd->peer_flags |= cpu_to_le32(WMI_PEER_TWT_REQ);
1965 if (arg->twt_responder)
1966 cmd->peer_flags |= cpu_to_le32(WMI_PEER_TWT_RESP);
1967 if (arg->eht_flag)
1968 cmd->peer_flags_ext |= cpu_to_le32(WMI_PEER_EXT_EHT);
1969 }
1970
1971 /* Suppress authorization for all AUTH modes that need 4-way handshake
1972 * (during re-association).
1973 * Authorization will be done for these modes on key installation.
1974 */
1975 if (arg->auth_flag)
1976 cmd->peer_flags |= cpu_to_le32(WMI_PEER_AUTH);
1977 if (arg->need_ptk_4_way) {
1978 cmd->peer_flags |= cpu_to_le32(WMI_PEER_NEED_PTK_4_WAY);
1979 if (!hw_crypto_disabled)
1980 cmd->peer_flags &= cpu_to_le32(~WMI_PEER_AUTH);
1981 }
1982 if (arg->need_gtk_2_way)
1983 cmd->peer_flags |= cpu_to_le32(WMI_PEER_NEED_GTK_2_WAY);
1984 /* safe mode bypass the 4-way handshake */
1985 if (arg->safe_mode_enabled)
1986 cmd->peer_flags &= cpu_to_le32(~(WMI_PEER_NEED_PTK_4_WAY |
1987 WMI_PEER_NEED_GTK_2_WAY));
1988
1989 if (arg->is_pmf_enabled)
1990 cmd->peer_flags |= cpu_to_le32(WMI_PEER_PMF);
1991
1992 /* Disable AMSDU for station transmit, if user configures it */
1993 /* Disable AMSDU for AP transmit to 11n Stations, if user configures
1994 * it
1995 * if (arg->amsdu_disable) Add after FW support
1996 **/
1997
1998 /* Target asserts if node is marked HT and all MCS is set to 0.
1999 * Mark the node as non-HT if all the mcs rates are disabled through
2000 * iwpriv
2001 **/
2002 if (arg->peer_ht_rates.num_rates == 0)
2003 cmd->peer_flags &= cpu_to_le32(~WMI_PEER_HT);
2004 }
2005
ath12k_wmi_send_peer_assoc_cmd(struct ath12k * ar,struct ath12k_wmi_peer_assoc_arg * arg)2006 int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar,
2007 struct ath12k_wmi_peer_assoc_arg *arg)
2008 {
2009 struct ath12k_wmi_pdev *wmi = ar->wmi;
2010 struct wmi_peer_assoc_complete_cmd *cmd;
2011 struct ath12k_wmi_vht_rate_set_params *mcs;
2012 struct ath12k_wmi_he_rate_set_params *he_mcs;
2013 struct ath12k_wmi_eht_rate_set_params *eht_mcs;
2014 struct sk_buff *skb;
2015 struct wmi_tlv *tlv;
2016 void *ptr;
2017 u32 peer_legacy_rates_align;
2018 u32 peer_ht_rates_align;
2019 int i, ret, len;
2020
2021 peer_legacy_rates_align = roundup(arg->peer_legacy_rates.num_rates,
2022 sizeof(u32));
2023 peer_ht_rates_align = roundup(arg->peer_ht_rates.num_rates,
2024 sizeof(u32));
2025
2026 len = sizeof(*cmd) +
2027 TLV_HDR_SIZE + (peer_legacy_rates_align * sizeof(u8)) +
2028 TLV_HDR_SIZE + (peer_ht_rates_align * sizeof(u8)) +
2029 sizeof(*mcs) + TLV_HDR_SIZE +
2030 (sizeof(*he_mcs) * arg->peer_he_mcs_count) +
2031 TLV_HDR_SIZE + (sizeof(*eht_mcs) * arg->peer_eht_mcs_count) +
2032 TLV_HDR_SIZE + TLV_HDR_SIZE;
2033
2034 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2035 if (!skb)
2036 return -ENOMEM;
2037
2038 ptr = skb->data;
2039
2040 cmd = ptr;
2041 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_ASSOC_COMPLETE_CMD,
2042 sizeof(*cmd));
2043
2044 cmd->vdev_id = cpu_to_le32(arg->vdev_id);
2045
2046 cmd->peer_new_assoc = cpu_to_le32(arg->peer_new_assoc);
2047 cmd->peer_associd = cpu_to_le32(arg->peer_associd);
2048 cmd->punct_bitmap = cpu_to_le32(arg->punct_bitmap);
2049
2050 ath12k_wmi_copy_peer_flags(cmd, arg,
2051 test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED,
2052 &ar->ab->dev_flags));
2053
2054 ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_mac);
2055
2056 cmd->peer_rate_caps = cpu_to_le32(arg->peer_rate_caps);
2057 cmd->peer_caps = cpu_to_le32(arg->peer_caps);
2058 cmd->peer_listen_intval = cpu_to_le32(arg->peer_listen_intval);
2059 cmd->peer_ht_caps = cpu_to_le32(arg->peer_ht_caps);
2060 cmd->peer_max_mpdu = cpu_to_le32(arg->peer_max_mpdu);
2061 cmd->peer_mpdu_density = cpu_to_le32(arg->peer_mpdu_density);
2062 cmd->peer_vht_caps = cpu_to_le32(arg->peer_vht_caps);
2063 cmd->peer_phymode = cpu_to_le32(arg->peer_phymode);
2064
2065 /* Update 11ax capabilities */
2066 cmd->peer_he_cap_info = cpu_to_le32(arg->peer_he_cap_macinfo[0]);
2067 cmd->peer_he_cap_info_ext = cpu_to_le32(arg->peer_he_cap_macinfo[1]);
2068 cmd->peer_he_cap_info_internal = cpu_to_le32(arg->peer_he_cap_macinfo_internal);
2069 cmd->peer_he_caps_6ghz = cpu_to_le32(arg->peer_he_caps_6ghz);
2070 cmd->peer_he_ops = cpu_to_le32(arg->peer_he_ops);
2071 for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
2072 cmd->peer_he_cap_phy[i] =
2073 cpu_to_le32(arg->peer_he_cap_phyinfo[i]);
2074 cmd->peer_ppet.numss_m1 = cpu_to_le32(arg->peer_ppet.numss_m1);
2075 cmd->peer_ppet.ru_info = cpu_to_le32(arg->peer_ppet.ru_bit_mask);
2076 for (i = 0; i < WMI_MAX_NUM_SS; i++)
2077 cmd->peer_ppet.ppet16_ppet8_ru3_ru0[i] =
2078 cpu_to_le32(arg->peer_ppet.ppet16_ppet8_ru3_ru0[i]);
2079
2080 /* Update 11be capabilities */
2081 memcpy_and_pad(cmd->peer_eht_cap_mac, sizeof(cmd->peer_eht_cap_mac),
2082 arg->peer_eht_cap_mac, sizeof(arg->peer_eht_cap_mac),
2083 0);
2084 memcpy_and_pad(cmd->peer_eht_cap_phy, sizeof(cmd->peer_eht_cap_phy),
2085 arg->peer_eht_cap_phy, sizeof(arg->peer_eht_cap_phy),
2086 0);
2087 memcpy_and_pad(&cmd->peer_eht_ppet, sizeof(cmd->peer_eht_ppet),
2088 &arg->peer_eht_ppet, sizeof(arg->peer_eht_ppet), 0);
2089
2090 /* Update peer legacy rate information */
2091 ptr += sizeof(*cmd);
2092
2093 tlv = ptr;
2094 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, peer_legacy_rates_align);
2095
2096 ptr += TLV_HDR_SIZE;
2097
2098 cmd->num_peer_legacy_rates = cpu_to_le32(arg->peer_legacy_rates.num_rates);
2099 memcpy(ptr, arg->peer_legacy_rates.rates,
2100 arg->peer_legacy_rates.num_rates);
2101
2102 /* Update peer HT rate information */
2103 ptr += peer_legacy_rates_align;
2104
2105 tlv = ptr;
2106 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, peer_ht_rates_align);
2107 ptr += TLV_HDR_SIZE;
2108 cmd->num_peer_ht_rates = cpu_to_le32(arg->peer_ht_rates.num_rates);
2109 memcpy(ptr, arg->peer_ht_rates.rates,
2110 arg->peer_ht_rates.num_rates);
2111
2112 /* VHT Rates */
2113 ptr += peer_ht_rates_align;
2114
2115 mcs = ptr;
2116
2117 mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VHT_RATE_SET,
2118 sizeof(*mcs));
2119
2120 cmd->peer_nss = cpu_to_le32(arg->peer_nss);
2121
2122 /* Update bandwidth-NSS mapping */
2123 cmd->peer_bw_rxnss_override = 0;
2124 cmd->peer_bw_rxnss_override |= cpu_to_le32(arg->peer_bw_rxnss_override);
2125
2126 if (arg->vht_capable) {
2127 mcs->rx_max_rate = cpu_to_le32(arg->rx_max_rate);
2128 mcs->rx_mcs_set = cpu_to_le32(arg->rx_mcs_set);
2129 mcs->tx_max_rate = cpu_to_le32(arg->tx_max_rate);
2130 mcs->tx_mcs_set = cpu_to_le32(arg->tx_mcs_set);
2131 }
2132
2133 /* HE Rates */
2134 cmd->peer_he_mcs = cpu_to_le32(arg->peer_he_mcs_count);
2135 cmd->min_data_rate = cpu_to_le32(arg->min_data_rate);
2136
2137 ptr += sizeof(*mcs);
2138
2139 len = arg->peer_he_mcs_count * sizeof(*he_mcs);
2140
2141 tlv = ptr;
2142 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
2143 ptr += TLV_HDR_SIZE;
2144
2145 /* Loop through the HE rate set */
2146 for (i = 0; i < arg->peer_he_mcs_count; i++) {
2147 he_mcs = ptr;
2148 he_mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HE_RATE_SET,
2149 sizeof(*he_mcs));
2150
2151 he_mcs->rx_mcs_set = cpu_to_le32(arg->peer_he_rx_mcs_set[i]);
2152 he_mcs->tx_mcs_set = cpu_to_le32(arg->peer_he_tx_mcs_set[i]);
2153 ptr += sizeof(*he_mcs);
2154 }
2155
2156 /* MLO header tag with 0 length */
2157 len = 0;
2158 tlv = ptr;
2159 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
2160 ptr += TLV_HDR_SIZE;
2161
2162 /* Loop through the EHT rate set */
2163 len = arg->peer_eht_mcs_count * sizeof(*eht_mcs);
2164 tlv = ptr;
2165 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
2166 ptr += TLV_HDR_SIZE;
2167
2168 for (i = 0; i < arg->peer_eht_mcs_count; i++) {
2169 eht_mcs = ptr;
2170 eht_mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_EHT_RATE_SET,
2171 sizeof(*eht_mcs));
2172
2173 eht_mcs->rx_mcs_set = cpu_to_le32(arg->peer_eht_rx_mcs_set[i]);
2174 eht_mcs->tx_mcs_set = cpu_to_le32(arg->peer_eht_tx_mcs_set[i]);
2175 ptr += sizeof(*eht_mcs);
2176 }
2177
2178 /* ML partner links tag with 0 length */
2179 len = 0;
2180 tlv = ptr;
2181 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
2182 ptr += TLV_HDR_SIZE;
2183
2184 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2185 "wmi peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x peer_flags_ext %x eht mac_cap %x %x eht phy_cap %x %x %x\n",
2186 cmd->vdev_id, cmd->peer_associd, arg->peer_mac,
2187 cmd->peer_flags, cmd->peer_rate_caps, cmd->peer_caps,
2188 cmd->peer_listen_intval, cmd->peer_ht_caps,
2189 cmd->peer_max_mpdu, cmd->peer_nss, cmd->peer_phymode,
2190 cmd->peer_mpdu_density,
2191 cmd->peer_vht_caps, cmd->peer_he_cap_info,
2192 cmd->peer_he_ops, cmd->peer_he_cap_info_ext,
2193 cmd->peer_he_cap_phy[0], cmd->peer_he_cap_phy[1],
2194 cmd->peer_he_cap_phy[2],
2195 cmd->peer_bw_rxnss_override, cmd->peer_flags_ext,
2196 cmd->peer_eht_cap_mac[0], cmd->peer_eht_cap_mac[1],
2197 cmd->peer_eht_cap_phy[0], cmd->peer_eht_cap_phy[1],
2198 cmd->peer_eht_cap_phy[2]);
2199
2200 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_ASSOC_CMDID);
2201 if (ret) {
2202 ath12k_warn(ar->ab,
2203 "failed to send WMI_PEER_ASSOC_CMDID\n");
2204 dev_kfree_skb(skb);
2205 }
2206
2207 return ret;
2208 }
2209
ath12k_wmi_start_scan_init(struct ath12k * ar,struct ath12k_wmi_scan_req_arg * arg)2210 void ath12k_wmi_start_scan_init(struct ath12k *ar,
2211 struct ath12k_wmi_scan_req_arg *arg)
2212 {
2213 /* setup commonly used values */
2214 arg->scan_req_id = 1;
2215 arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
2216 arg->dwell_time_active = 50;
2217 arg->dwell_time_active_2g = 0;
2218 arg->dwell_time_passive = 150;
2219 arg->dwell_time_active_6g = 70;
2220 arg->dwell_time_passive_6g = 70;
2221 arg->min_rest_time = 50;
2222 arg->max_rest_time = 500;
2223 arg->repeat_probe_time = 0;
2224 arg->probe_spacing_time = 0;
2225 arg->idle_time = 0;
2226 arg->max_scan_time = 20000;
2227 arg->probe_delay = 5;
2228 arg->notify_scan_events = WMI_SCAN_EVENT_STARTED |
2229 WMI_SCAN_EVENT_COMPLETED |
2230 WMI_SCAN_EVENT_BSS_CHANNEL |
2231 WMI_SCAN_EVENT_FOREIGN_CHAN |
2232 WMI_SCAN_EVENT_DEQUEUED;
2233 arg->scan_f_chan_stat_evnt = 1;
2234 arg->num_bssid = 1;
2235
2236 /* fill bssid_list[0] with 0xff, otherwise bssid and RA will be
2237 * ZEROs in probe request
2238 */
2239 eth_broadcast_addr(arg->bssid_list[0].addr);
2240 }
2241
ath12k_wmi_copy_scan_event_cntrl_flags(struct wmi_start_scan_cmd * cmd,struct ath12k_wmi_scan_req_arg * arg)2242 static void ath12k_wmi_copy_scan_event_cntrl_flags(struct wmi_start_scan_cmd *cmd,
2243 struct ath12k_wmi_scan_req_arg *arg)
2244 {
2245 /* Scan events subscription */
2246 if (arg->scan_ev_started)
2247 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_STARTED);
2248 if (arg->scan_ev_completed)
2249 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_COMPLETED);
2250 if (arg->scan_ev_bss_chan)
2251 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_BSS_CHANNEL);
2252 if (arg->scan_ev_foreign_chan)
2253 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_FOREIGN_CHAN);
2254 if (arg->scan_ev_dequeued)
2255 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_DEQUEUED);
2256 if (arg->scan_ev_preempted)
2257 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_PREEMPTED);
2258 if (arg->scan_ev_start_failed)
2259 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_START_FAILED);
2260 if (arg->scan_ev_restarted)
2261 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_RESTARTED);
2262 if (arg->scan_ev_foreign_chn_exit)
2263 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT);
2264 if (arg->scan_ev_suspended)
2265 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_SUSPENDED);
2266 if (arg->scan_ev_resumed)
2267 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_RESUMED);
2268
2269 /** Set scan control flags */
2270 cmd->scan_ctrl_flags = 0;
2271 if (arg->scan_f_passive)
2272 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_PASSIVE);
2273 if (arg->scan_f_strict_passive_pch)
2274 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_STRICT_PASSIVE_ON_PCHN);
2275 if (arg->scan_f_promisc_mode)
2276 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FILTER_PROMISCUOS);
2277 if (arg->scan_f_capture_phy_err)
2278 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_CAPTURE_PHY_ERROR);
2279 if (arg->scan_f_half_rate)
2280 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_HALF_RATE_SUPPORT);
2281 if (arg->scan_f_quarter_rate)
2282 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_QUARTER_RATE_SUPPORT);
2283 if (arg->scan_f_cck_rates)
2284 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_CCK_RATES);
2285 if (arg->scan_f_ofdm_rates)
2286 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_OFDM_RATES);
2287 if (arg->scan_f_chan_stat_evnt)
2288 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_CHAN_STAT_EVENT);
2289 if (arg->scan_f_filter_prb_req)
2290 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FILTER_PROBE_REQ);
2291 if (arg->scan_f_bcast_probe)
2292 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_BCAST_PROBE_REQ);
2293 if (arg->scan_f_offchan_mgmt_tx)
2294 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_OFFCHAN_MGMT_TX);
2295 if (arg->scan_f_offchan_data_tx)
2296 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_OFFCHAN_DATA_TX);
2297 if (arg->scan_f_force_active_dfs_chn)
2298 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_FORCE_ACTIVE_ON_DFS);
2299 if (arg->scan_f_add_tpc_ie_in_probe)
2300 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_TPC_IE_IN_PROBE_REQ);
2301 if (arg->scan_f_add_ds_ie_in_probe)
2302 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ);
2303 if (arg->scan_f_add_spoofed_mac_in_probe)
2304 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_SPOOF_MAC_IN_PROBE_REQ);
2305 if (arg->scan_f_add_rand_seq_in_probe)
2306 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_RANDOM_SEQ_NO_IN_PROBE_REQ);
2307 if (arg->scan_f_en_ie_whitelist_in_probe)
2308 cmd->scan_ctrl_flags |=
2309 cpu_to_le32(WMI_SCAN_ENABLE_IE_WHTELIST_IN_PROBE_REQ);
2310
2311 cmd->scan_ctrl_flags |= le32_encode_bits(arg->adaptive_dwell_time_mode,
2312 WMI_SCAN_DWELL_MODE_MASK);
2313 }
2314
ath12k_wmi_send_scan_start_cmd(struct ath12k * ar,struct ath12k_wmi_scan_req_arg * arg)2315 int ath12k_wmi_send_scan_start_cmd(struct ath12k *ar,
2316 struct ath12k_wmi_scan_req_arg *arg)
2317 {
2318 struct ath12k_wmi_pdev *wmi = ar->wmi;
2319 struct wmi_start_scan_cmd *cmd;
2320 struct ath12k_wmi_ssid_params *ssid = NULL;
2321 struct ath12k_wmi_mac_addr_params *bssid;
2322 struct sk_buff *skb;
2323 struct wmi_tlv *tlv;
2324 void *ptr;
2325 int i, ret, len;
2326 u32 *tmp_ptr, extraie_len_with_pad = 0;
2327 struct ath12k_wmi_hint_short_ssid_arg *s_ssid = NULL;
2328 struct ath12k_wmi_hint_bssid_arg *hint_bssid = NULL;
2329
2330 len = sizeof(*cmd);
2331
2332 len += TLV_HDR_SIZE;
2333 if (arg->num_chan)
2334 len += arg->num_chan * sizeof(u32);
2335
2336 len += TLV_HDR_SIZE;
2337 if (arg->num_ssids)
2338 len += arg->num_ssids * sizeof(*ssid);
2339
2340 len += TLV_HDR_SIZE;
2341 if (arg->num_bssid)
2342 len += sizeof(*bssid) * arg->num_bssid;
2343
2344 if (arg->num_hint_bssid)
2345 len += TLV_HDR_SIZE +
2346 arg->num_hint_bssid * sizeof(*hint_bssid);
2347
2348 if (arg->num_hint_s_ssid)
2349 len += TLV_HDR_SIZE +
2350 arg->num_hint_s_ssid * sizeof(*s_ssid);
2351
2352 len += TLV_HDR_SIZE;
2353 if (arg->extraie.len)
2354 extraie_len_with_pad =
2355 roundup(arg->extraie.len, sizeof(u32));
2356 if (extraie_len_with_pad <= (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len)) {
2357 len += extraie_len_with_pad;
2358 } else {
2359 ath12k_warn(ar->ab, "discard large size %d bytes extraie for scan start\n",
2360 arg->extraie.len);
2361 extraie_len_with_pad = 0;
2362 }
2363
2364 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2365 if (!skb)
2366 return -ENOMEM;
2367
2368 ptr = skb->data;
2369
2370 cmd = ptr;
2371 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_START_SCAN_CMD,
2372 sizeof(*cmd));
2373
2374 cmd->scan_id = cpu_to_le32(arg->scan_id);
2375 cmd->scan_req_id = cpu_to_le32(arg->scan_req_id);
2376 cmd->vdev_id = cpu_to_le32(arg->vdev_id);
2377 cmd->scan_priority = cpu_to_le32(arg->scan_priority);
2378 cmd->notify_scan_events = cpu_to_le32(arg->notify_scan_events);
2379
2380 ath12k_wmi_copy_scan_event_cntrl_flags(cmd, arg);
2381
2382 cmd->dwell_time_active = cpu_to_le32(arg->dwell_time_active);
2383 cmd->dwell_time_active_2g = cpu_to_le32(arg->dwell_time_active_2g);
2384 cmd->dwell_time_passive = cpu_to_le32(arg->dwell_time_passive);
2385 cmd->dwell_time_active_6g = cpu_to_le32(arg->dwell_time_active_6g);
2386 cmd->dwell_time_passive_6g = cpu_to_le32(arg->dwell_time_passive_6g);
2387 cmd->min_rest_time = cpu_to_le32(arg->min_rest_time);
2388 cmd->max_rest_time = cpu_to_le32(arg->max_rest_time);
2389 cmd->repeat_probe_time = cpu_to_le32(arg->repeat_probe_time);
2390 cmd->probe_spacing_time = cpu_to_le32(arg->probe_spacing_time);
2391 cmd->idle_time = cpu_to_le32(arg->idle_time);
2392 cmd->max_scan_time = cpu_to_le32(arg->max_scan_time);
2393 cmd->probe_delay = cpu_to_le32(arg->probe_delay);
2394 cmd->burst_duration = cpu_to_le32(arg->burst_duration);
2395 cmd->num_chan = cpu_to_le32(arg->num_chan);
2396 cmd->num_bssid = cpu_to_le32(arg->num_bssid);
2397 cmd->num_ssids = cpu_to_le32(arg->num_ssids);
2398 cmd->ie_len = cpu_to_le32(arg->extraie.len);
2399 cmd->n_probes = cpu_to_le32(arg->n_probes);
2400
2401 ptr += sizeof(*cmd);
2402
2403 len = arg->num_chan * sizeof(u32);
2404
2405 tlv = ptr;
2406 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, len);
2407 ptr += TLV_HDR_SIZE;
2408 tmp_ptr = (u32 *)ptr;
2409
2410 memcpy(tmp_ptr, arg->chan_list, arg->num_chan * 4);
2411
2412 ptr += len;
2413
2414 len = arg->num_ssids * sizeof(*ssid);
2415 tlv = ptr;
2416 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
2417
2418 ptr += TLV_HDR_SIZE;
2419
2420 if (arg->num_ssids) {
2421 ssid = ptr;
2422 for (i = 0; i < arg->num_ssids; ++i) {
2423 ssid->ssid_len = cpu_to_le32(arg->ssid[i].ssid_len);
2424 memcpy(ssid->ssid, arg->ssid[i].ssid,
2425 arg->ssid[i].ssid_len);
2426 ssid++;
2427 }
2428 }
2429
2430 ptr += (arg->num_ssids * sizeof(*ssid));
2431 len = arg->num_bssid * sizeof(*bssid);
2432 tlv = ptr;
2433 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
2434
2435 ptr += TLV_HDR_SIZE;
2436 bssid = ptr;
2437
2438 if (arg->num_bssid) {
2439 for (i = 0; i < arg->num_bssid; ++i) {
2440 ether_addr_copy(bssid->addr,
2441 arg->bssid_list[i].addr);
2442 bssid++;
2443 }
2444 }
2445
2446 ptr += arg->num_bssid * sizeof(*bssid);
2447
2448 len = extraie_len_with_pad;
2449 tlv = ptr;
2450 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, len);
2451 ptr += TLV_HDR_SIZE;
2452
2453 if (extraie_len_with_pad)
2454 memcpy(ptr, arg->extraie.ptr,
2455 arg->extraie.len);
2456
2457 ptr += extraie_len_with_pad;
2458
2459 if (arg->num_hint_s_ssid) {
2460 len = arg->num_hint_s_ssid * sizeof(*s_ssid);
2461 tlv = ptr;
2462 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
2463 ptr += TLV_HDR_SIZE;
2464 s_ssid = ptr;
2465 for (i = 0; i < arg->num_hint_s_ssid; ++i) {
2466 s_ssid->freq_flags = arg->hint_s_ssid[i].freq_flags;
2467 s_ssid->short_ssid = arg->hint_s_ssid[i].short_ssid;
2468 s_ssid++;
2469 }
2470 ptr += len;
2471 }
2472
2473 if (arg->num_hint_bssid) {
2474 len = arg->num_hint_bssid * sizeof(struct ath12k_wmi_hint_bssid_arg);
2475 tlv = ptr;
2476 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
2477 ptr += TLV_HDR_SIZE;
2478 hint_bssid = ptr;
2479 for (i = 0; i < arg->num_hint_bssid; ++i) {
2480 hint_bssid->freq_flags =
2481 arg->hint_bssid[i].freq_flags;
2482 ether_addr_copy(&arg->hint_bssid[i].bssid.addr[0],
2483 &hint_bssid->bssid.addr[0]);
2484 hint_bssid++;
2485 }
2486 }
2487
2488 ret = ath12k_wmi_cmd_send(wmi, skb,
2489 WMI_START_SCAN_CMDID);
2490 if (ret) {
2491 ath12k_warn(ar->ab, "failed to send WMI_START_SCAN_CMDID\n");
2492 dev_kfree_skb(skb);
2493 }
2494
2495 return ret;
2496 }
2497
ath12k_wmi_send_scan_stop_cmd(struct ath12k * ar,struct ath12k_wmi_scan_cancel_arg * arg)2498 int ath12k_wmi_send_scan_stop_cmd(struct ath12k *ar,
2499 struct ath12k_wmi_scan_cancel_arg *arg)
2500 {
2501 struct ath12k_wmi_pdev *wmi = ar->wmi;
2502 struct wmi_stop_scan_cmd *cmd;
2503 struct sk_buff *skb;
2504 int ret;
2505
2506 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2507 if (!skb)
2508 return -ENOMEM;
2509
2510 cmd = (struct wmi_stop_scan_cmd *)skb->data;
2511
2512 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STOP_SCAN_CMD,
2513 sizeof(*cmd));
2514
2515 cmd->vdev_id = cpu_to_le32(arg->vdev_id);
2516 cmd->requestor = cpu_to_le32(arg->requester);
2517 cmd->scan_id = cpu_to_le32(arg->scan_id);
2518 cmd->pdev_id = cpu_to_le32(arg->pdev_id);
2519 /* stop the scan with the corresponding scan_id */
2520 if (arg->req_type == WLAN_SCAN_CANCEL_PDEV_ALL) {
2521 /* Cancelling all scans */
2522 cmd->req_type = cpu_to_le32(WMI_SCAN_STOP_ALL);
2523 } else if (arg->req_type == WLAN_SCAN_CANCEL_VDEV_ALL) {
2524 /* Cancelling VAP scans */
2525 cmd->req_type = cpu_to_le32(WMI_SCAN_STOP_VAP_ALL);
2526 } else if (arg->req_type == WLAN_SCAN_CANCEL_SINGLE) {
2527 /* Cancelling specific scan */
2528 cmd->req_type = WMI_SCAN_STOP_ONE;
2529 } else {
2530 ath12k_warn(ar->ab, "invalid scan cancel req_type %d",
2531 arg->req_type);
2532 dev_kfree_skb(skb);
2533 return -EINVAL;
2534 }
2535
2536 ret = ath12k_wmi_cmd_send(wmi, skb,
2537 WMI_STOP_SCAN_CMDID);
2538 if (ret) {
2539 ath12k_warn(ar->ab, "failed to send WMI_STOP_SCAN_CMDID\n");
2540 dev_kfree_skb(skb);
2541 }
2542
2543 return ret;
2544 }
2545
ath12k_wmi_send_scan_chan_list_cmd(struct ath12k * ar,struct ath12k_wmi_scan_chan_list_arg * arg)2546 int ath12k_wmi_send_scan_chan_list_cmd(struct ath12k *ar,
2547 struct ath12k_wmi_scan_chan_list_arg *arg)
2548 {
2549 struct ath12k_wmi_pdev *wmi = ar->wmi;
2550 struct wmi_scan_chan_list_cmd *cmd;
2551 struct sk_buff *skb;
2552 struct ath12k_wmi_channel_params *chan_info;
2553 struct ath12k_wmi_channel_arg *channel_arg;
2554 struct wmi_tlv *tlv;
2555 void *ptr;
2556 int i, ret, len;
2557 u16 num_send_chans, num_sends = 0, max_chan_limit = 0;
2558 __le32 *reg1, *reg2;
2559
2560 channel_arg = &arg->channel[0];
2561 while (arg->nallchans) {
2562 len = sizeof(*cmd) + TLV_HDR_SIZE;
2563 max_chan_limit = (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len) /
2564 sizeof(*chan_info);
2565
2566 num_send_chans = min(arg->nallchans, max_chan_limit);
2567
2568 arg->nallchans -= num_send_chans;
2569 len += sizeof(*chan_info) * num_send_chans;
2570
2571 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2572 if (!skb)
2573 return -ENOMEM;
2574
2575 cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
2576 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SCAN_CHAN_LIST_CMD,
2577 sizeof(*cmd));
2578 cmd->pdev_id = cpu_to_le32(arg->pdev_id);
2579 cmd->num_scan_chans = cpu_to_le32(num_send_chans);
2580 if (num_sends)
2581 cmd->flags |= cpu_to_le32(WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG);
2582
2583 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2584 "WMI no.of chan = %d len = %d pdev_id = %d num_sends = %d\n",
2585 num_send_chans, len, cmd->pdev_id, num_sends);
2586
2587 ptr = skb->data + sizeof(*cmd);
2588
2589 len = sizeof(*chan_info) * num_send_chans;
2590 tlv = ptr;
2591 tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_STRUCT,
2592 len);
2593 ptr += TLV_HDR_SIZE;
2594
2595 for (i = 0; i < num_send_chans; ++i) {
2596 chan_info = ptr;
2597 memset(chan_info, 0, sizeof(*chan_info));
2598 len = sizeof(*chan_info);
2599 chan_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_CHANNEL,
2600 len);
2601
2602 reg1 = &chan_info->reg_info_1;
2603 reg2 = &chan_info->reg_info_2;
2604 chan_info->mhz = cpu_to_le32(channel_arg->mhz);
2605 chan_info->band_center_freq1 = cpu_to_le32(channel_arg->cfreq1);
2606 chan_info->band_center_freq2 = cpu_to_le32(channel_arg->cfreq2);
2607
2608 if (channel_arg->is_chan_passive)
2609 chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_PASSIVE);
2610 if (channel_arg->allow_he)
2611 chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HE);
2612 else if (channel_arg->allow_vht)
2613 chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_VHT);
2614 else if (channel_arg->allow_ht)
2615 chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HT);
2616 if (channel_arg->half_rate)
2617 chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_HALF_RATE);
2618 if (channel_arg->quarter_rate)
2619 chan_info->info |=
2620 cpu_to_le32(WMI_CHAN_INFO_QUARTER_RATE);
2621
2622 if (channel_arg->psc_channel)
2623 chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_PSC);
2624
2625 if (channel_arg->dfs_set)
2626 chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_DFS);
2627
2628 chan_info->info |= le32_encode_bits(channel_arg->phy_mode,
2629 WMI_CHAN_INFO_MODE);
2630 *reg1 |= le32_encode_bits(channel_arg->minpower,
2631 WMI_CHAN_REG_INFO1_MIN_PWR);
2632 *reg1 |= le32_encode_bits(channel_arg->maxpower,
2633 WMI_CHAN_REG_INFO1_MAX_PWR);
2634 *reg1 |= le32_encode_bits(channel_arg->maxregpower,
2635 WMI_CHAN_REG_INFO1_MAX_REG_PWR);
2636 *reg1 |= le32_encode_bits(channel_arg->reg_class_id,
2637 WMI_CHAN_REG_INFO1_REG_CLS);
2638 *reg2 |= le32_encode_bits(channel_arg->antennamax,
2639 WMI_CHAN_REG_INFO2_ANT_MAX);
2640
2641 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2642 "WMI chan scan list chan[%d] = %u, chan_info->info %8x\n",
2643 i, chan_info->mhz, chan_info->info);
2644
2645 ptr += sizeof(*chan_info);
2646
2647 channel_arg++;
2648 }
2649
2650 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_SCAN_CHAN_LIST_CMDID);
2651 if (ret) {
2652 ath12k_warn(ar->ab, "failed to send WMI_SCAN_CHAN_LIST cmd\n");
2653 dev_kfree_skb(skb);
2654 return ret;
2655 }
2656
2657 num_sends++;
2658 }
2659
2660 return 0;
2661 }
2662
ath12k_wmi_send_wmm_update_cmd(struct ath12k * ar,u32 vdev_id,struct wmi_wmm_params_all_arg * param)2663 int ath12k_wmi_send_wmm_update_cmd(struct ath12k *ar, u32 vdev_id,
2664 struct wmi_wmm_params_all_arg *param)
2665 {
2666 struct ath12k_wmi_pdev *wmi = ar->wmi;
2667 struct wmi_vdev_set_wmm_params_cmd *cmd;
2668 struct wmi_wmm_params *wmm_param;
2669 struct wmi_wmm_params_arg *wmi_wmm_arg;
2670 struct sk_buff *skb;
2671 int ret, ac;
2672
2673 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2674 if (!skb)
2675 return -ENOMEM;
2676
2677 cmd = (struct wmi_vdev_set_wmm_params_cmd *)skb->data;
2678 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_WMM_PARAMS_CMD,
2679 sizeof(*cmd));
2680
2681 cmd->vdev_id = cpu_to_le32(vdev_id);
2682 cmd->wmm_param_type = 0;
2683
2684 for (ac = 0; ac < WME_NUM_AC; ac++) {
2685 switch (ac) {
2686 case WME_AC_BE:
2687 wmi_wmm_arg = ¶m->ac_be;
2688 break;
2689 case WME_AC_BK:
2690 wmi_wmm_arg = ¶m->ac_bk;
2691 break;
2692 case WME_AC_VI:
2693 wmi_wmm_arg = ¶m->ac_vi;
2694 break;
2695 case WME_AC_VO:
2696 wmi_wmm_arg = ¶m->ac_vo;
2697 break;
2698 }
2699
2700 wmm_param = (struct wmi_wmm_params *)&cmd->wmm_params[ac];
2701 wmm_param->tlv_header =
2702 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_WMM_PARAMS_CMD,
2703 sizeof(*wmm_param));
2704
2705 wmm_param->aifs = cpu_to_le32(wmi_wmm_arg->aifs);
2706 wmm_param->cwmin = cpu_to_le32(wmi_wmm_arg->cwmin);
2707 wmm_param->cwmax = cpu_to_le32(wmi_wmm_arg->cwmax);
2708 wmm_param->txoplimit = cpu_to_le32(wmi_wmm_arg->txop);
2709 wmm_param->acm = cpu_to_le32(wmi_wmm_arg->acm);
2710 wmm_param->no_ack = cpu_to_le32(wmi_wmm_arg->no_ack);
2711
2712 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2713 "wmi wmm set ac %d aifs %d cwmin %d cwmax %d txop %d acm %d no_ack %d\n",
2714 ac, wmm_param->aifs, wmm_param->cwmin,
2715 wmm_param->cwmax, wmm_param->txoplimit,
2716 wmm_param->acm, wmm_param->no_ack);
2717 }
2718 ret = ath12k_wmi_cmd_send(wmi, skb,
2719 WMI_VDEV_SET_WMM_PARAMS_CMDID);
2720 if (ret) {
2721 ath12k_warn(ar->ab,
2722 "failed to send WMI_VDEV_SET_WMM_PARAMS_CMDID");
2723 dev_kfree_skb(skb);
2724 }
2725
2726 return ret;
2727 }
2728
ath12k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath12k * ar,u32 pdev_id)2729 int ath12k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath12k *ar,
2730 u32 pdev_id)
2731 {
2732 struct ath12k_wmi_pdev *wmi = ar->wmi;
2733 struct wmi_dfs_phyerr_offload_cmd *cmd;
2734 struct sk_buff *skb;
2735 int ret;
2736
2737 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2738 if (!skb)
2739 return -ENOMEM;
2740
2741 cmd = (struct wmi_dfs_phyerr_offload_cmd *)skb->data;
2742 cmd->tlv_header =
2743 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD,
2744 sizeof(*cmd));
2745
2746 cmd->pdev_id = cpu_to_le32(pdev_id);
2747
2748 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2749 "WMI dfs phy err offload enable pdev id %d\n", pdev_id);
2750
2751 ret = ath12k_wmi_cmd_send(wmi, skb,
2752 WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID);
2753 if (ret) {
2754 ath12k_warn(ar->ab,
2755 "failed to send WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE cmd\n");
2756 dev_kfree_skb(skb);
2757 }
2758
2759 return ret;
2760 }
2761
ath12k_wmi_set_bios_cmd(struct ath12k_base * ab,u32 param_id,const u8 * buf,size_t buf_len)2762 int ath12k_wmi_set_bios_cmd(struct ath12k_base *ab, u32 param_id,
2763 const u8 *buf, size_t buf_len)
2764 {
2765 struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
2766 struct wmi_pdev_set_bios_interface_cmd *cmd;
2767 struct wmi_tlv *tlv;
2768 struct sk_buff *skb;
2769 u8 *ptr;
2770 u32 len, len_aligned;
2771 int ret;
2772
2773 len_aligned = roundup(buf_len, sizeof(u32));
2774 len = sizeof(*cmd) + TLV_HDR_SIZE + len_aligned;
2775
2776 skb = ath12k_wmi_alloc_skb(wmi_ab, len);
2777 if (!skb)
2778 return -ENOMEM;
2779
2780 cmd = (struct wmi_pdev_set_bios_interface_cmd *)skb->data;
2781 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_INTERFACE_CMD,
2782 sizeof(*cmd));
2783 cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC);
2784 cmd->param_type_id = cpu_to_le32(param_id);
2785 cmd->length = cpu_to_le32(buf_len);
2786
2787 ptr = skb->data + sizeof(*cmd);
2788 tlv = (struct wmi_tlv *)ptr;
2789 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, len_aligned);
2790 ptr += TLV_HDR_SIZE;
2791 memcpy(ptr, buf, buf_len);
2792
2793 ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0],
2794 skb,
2795 WMI_PDEV_SET_BIOS_INTERFACE_CMDID);
2796 if (ret) {
2797 ath12k_warn(ab,
2798 "failed to send WMI_PDEV_SET_BIOS_INTERFACE_CMDID parameter id %d: %d\n",
2799 param_id, ret);
2800 dev_kfree_skb(skb);
2801 }
2802
2803 return 0;
2804 }
2805
ath12k_wmi_set_bios_sar_cmd(struct ath12k_base * ab,const u8 * psar_table)2806 int ath12k_wmi_set_bios_sar_cmd(struct ath12k_base *ab, const u8 *psar_table)
2807 {
2808 struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
2809 struct wmi_pdev_set_bios_sar_table_cmd *cmd;
2810 struct wmi_tlv *tlv;
2811 struct sk_buff *skb;
2812 int ret;
2813 u8 *buf_ptr;
2814 u32 len, sar_table_len_aligned, sar_dbs_backoff_len_aligned;
2815 const u8 *psar_value = psar_table + ATH12K_ACPI_POWER_LIMIT_DATA_OFFSET;
2816 const u8 *pdbs_value = psar_table + ATH12K_ACPI_DBS_BACKOFF_DATA_OFFSET;
2817
2818 sar_table_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_TABLE_LEN, sizeof(u32));
2819 sar_dbs_backoff_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN,
2820 sizeof(u32));
2821 len = sizeof(*cmd) + TLV_HDR_SIZE + sar_table_len_aligned +
2822 TLV_HDR_SIZE + sar_dbs_backoff_len_aligned;
2823
2824 skb = ath12k_wmi_alloc_skb(wmi_ab, len);
2825 if (!skb)
2826 return -ENOMEM;
2827
2828 cmd = (struct wmi_pdev_set_bios_sar_table_cmd *)skb->data;
2829 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD,
2830 sizeof(*cmd));
2831 cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC);
2832 cmd->sar_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_TABLE_LEN);
2833 cmd->dbs_backoff_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN);
2834
2835 buf_ptr = skb->data + sizeof(*cmd);
2836 tlv = (struct wmi_tlv *)buf_ptr;
2837 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE,
2838 sar_table_len_aligned);
2839 buf_ptr += TLV_HDR_SIZE;
2840 memcpy(buf_ptr, psar_value, ATH12K_ACPI_BIOS_SAR_TABLE_LEN);
2841
2842 buf_ptr += sar_table_len_aligned;
2843 tlv = (struct wmi_tlv *)buf_ptr;
2844 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE,
2845 sar_dbs_backoff_len_aligned);
2846 buf_ptr += TLV_HDR_SIZE;
2847 memcpy(buf_ptr, pdbs_value, ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN);
2848
2849 ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0],
2850 skb,
2851 WMI_PDEV_SET_BIOS_SAR_TABLE_CMDID);
2852 if (ret) {
2853 ath12k_warn(ab,
2854 "failed to send WMI_PDEV_SET_BIOS_INTERFACE_CMDID %d\n",
2855 ret);
2856 dev_kfree_skb(skb);
2857 }
2858
2859 return ret;
2860 }
2861
ath12k_wmi_set_bios_geo_cmd(struct ath12k_base * ab,const u8 * pgeo_table)2862 int ath12k_wmi_set_bios_geo_cmd(struct ath12k_base *ab, const u8 *pgeo_table)
2863 {
2864 struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
2865 struct wmi_pdev_set_bios_geo_table_cmd *cmd;
2866 struct wmi_tlv *tlv;
2867 struct sk_buff *skb;
2868 int ret;
2869 u8 *buf_ptr;
2870 u32 len, sar_geo_len_aligned;
2871 const u8 *pgeo_value = pgeo_table + ATH12K_ACPI_GEO_OFFSET_DATA_OFFSET;
2872
2873 sar_geo_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN, sizeof(u32));
2874 len = sizeof(*cmd) + TLV_HDR_SIZE + sar_geo_len_aligned;
2875
2876 skb = ath12k_wmi_alloc_skb(wmi_ab, len);
2877 if (!skb)
2878 return -ENOMEM;
2879
2880 cmd = (struct wmi_pdev_set_bios_geo_table_cmd *)skb->data;
2881 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD,
2882 sizeof(*cmd));
2883 cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC);
2884 cmd->geo_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN);
2885
2886 buf_ptr = skb->data + sizeof(*cmd);
2887 tlv = (struct wmi_tlv *)buf_ptr;
2888 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, sar_geo_len_aligned);
2889 buf_ptr += TLV_HDR_SIZE;
2890 memcpy(buf_ptr, pgeo_value, ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN);
2891
2892 ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0],
2893 skb,
2894 WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID);
2895 if (ret) {
2896 ath12k_warn(ab,
2897 "failed to send WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID %d\n",
2898 ret);
2899 dev_kfree_skb(skb);
2900 }
2901
2902 return ret;
2903 }
2904
ath12k_wmi_delba_send(struct ath12k * ar,u32 vdev_id,const u8 * mac,u32 tid,u32 initiator,u32 reason)2905 int ath12k_wmi_delba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac,
2906 u32 tid, u32 initiator, u32 reason)
2907 {
2908 struct ath12k_wmi_pdev *wmi = ar->wmi;
2909 struct wmi_delba_send_cmd *cmd;
2910 struct sk_buff *skb;
2911 int ret;
2912
2913 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2914 if (!skb)
2915 return -ENOMEM;
2916
2917 cmd = (struct wmi_delba_send_cmd *)skb->data;
2918 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_DELBA_SEND_CMD,
2919 sizeof(*cmd));
2920 cmd->vdev_id = cpu_to_le32(vdev_id);
2921 ether_addr_copy(cmd->peer_macaddr.addr, mac);
2922 cmd->tid = cpu_to_le32(tid);
2923 cmd->initiator = cpu_to_le32(initiator);
2924 cmd->reasoncode = cpu_to_le32(reason);
2925
2926 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2927 "wmi delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n",
2928 vdev_id, mac, tid, initiator, reason);
2929
2930 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_DELBA_SEND_CMDID);
2931
2932 if (ret) {
2933 ath12k_warn(ar->ab,
2934 "failed to send WMI_DELBA_SEND_CMDID cmd\n");
2935 dev_kfree_skb(skb);
2936 }
2937
2938 return ret;
2939 }
2940
ath12k_wmi_addba_set_resp(struct ath12k * ar,u32 vdev_id,const u8 * mac,u32 tid,u32 status)2941 int ath12k_wmi_addba_set_resp(struct ath12k *ar, u32 vdev_id, const u8 *mac,
2942 u32 tid, u32 status)
2943 {
2944 struct ath12k_wmi_pdev *wmi = ar->wmi;
2945 struct wmi_addba_setresponse_cmd *cmd;
2946 struct sk_buff *skb;
2947 int ret;
2948
2949 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2950 if (!skb)
2951 return -ENOMEM;
2952
2953 cmd = (struct wmi_addba_setresponse_cmd *)skb->data;
2954 cmd->tlv_header =
2955 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_SETRESPONSE_CMD,
2956 sizeof(*cmd));
2957 cmd->vdev_id = cpu_to_le32(vdev_id);
2958 ether_addr_copy(cmd->peer_macaddr.addr, mac);
2959 cmd->tid = cpu_to_le32(tid);
2960 cmd->statuscode = cpu_to_le32(status);
2961
2962 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2963 "wmi addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n",
2964 vdev_id, mac, tid, status);
2965
2966 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SET_RESP_CMDID);
2967
2968 if (ret) {
2969 ath12k_warn(ar->ab,
2970 "failed to send WMI_ADDBA_SET_RESP_CMDID cmd\n");
2971 dev_kfree_skb(skb);
2972 }
2973
2974 return ret;
2975 }
2976
ath12k_wmi_addba_send(struct ath12k * ar,u32 vdev_id,const u8 * mac,u32 tid,u32 buf_size)2977 int ath12k_wmi_addba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac,
2978 u32 tid, u32 buf_size)
2979 {
2980 struct ath12k_wmi_pdev *wmi = ar->wmi;
2981 struct wmi_addba_send_cmd *cmd;
2982 struct sk_buff *skb;
2983 int ret;
2984
2985 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2986 if (!skb)
2987 return -ENOMEM;
2988
2989 cmd = (struct wmi_addba_send_cmd *)skb->data;
2990 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_SEND_CMD,
2991 sizeof(*cmd));
2992 cmd->vdev_id = cpu_to_le32(vdev_id);
2993 ether_addr_copy(cmd->peer_macaddr.addr, mac);
2994 cmd->tid = cpu_to_le32(tid);
2995 cmd->buffersize = cpu_to_le32(buf_size);
2996
2997 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2998 "wmi addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n",
2999 vdev_id, mac, tid, buf_size);
3000
3001 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SEND_CMDID);
3002
3003 if (ret) {
3004 ath12k_warn(ar->ab,
3005 "failed to send WMI_ADDBA_SEND_CMDID cmd\n");
3006 dev_kfree_skb(skb);
3007 }
3008
3009 return ret;
3010 }
3011
ath12k_wmi_addba_clear_resp(struct ath12k * ar,u32 vdev_id,const u8 * mac)3012 int ath12k_wmi_addba_clear_resp(struct ath12k *ar, u32 vdev_id, const u8 *mac)
3013 {
3014 struct ath12k_wmi_pdev *wmi = ar->wmi;
3015 struct wmi_addba_clear_resp_cmd *cmd;
3016 struct sk_buff *skb;
3017 int ret;
3018
3019 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3020 if (!skb)
3021 return -ENOMEM;
3022
3023 cmd = (struct wmi_addba_clear_resp_cmd *)skb->data;
3024 cmd->tlv_header =
3025 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_CLEAR_RESP_CMD,
3026 sizeof(*cmd));
3027 cmd->vdev_id = cpu_to_le32(vdev_id);
3028 ether_addr_copy(cmd->peer_macaddr.addr, mac);
3029
3030 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3031 "wmi addba clear resp vdev_id 0x%X mac_addr %pM\n",
3032 vdev_id, mac);
3033
3034 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_CLEAR_RESP_CMDID);
3035
3036 if (ret) {
3037 ath12k_warn(ar->ab,
3038 "failed to send WMI_ADDBA_CLEAR_RESP_CMDID cmd\n");
3039 dev_kfree_skb(skb);
3040 }
3041
3042 return ret;
3043 }
3044
ath12k_wmi_send_init_country_cmd(struct ath12k * ar,struct ath12k_wmi_init_country_arg * arg)3045 int ath12k_wmi_send_init_country_cmd(struct ath12k *ar,
3046 struct ath12k_wmi_init_country_arg *arg)
3047 {
3048 struct ath12k_wmi_pdev *wmi = ar->wmi;
3049 struct wmi_init_country_cmd *cmd;
3050 struct sk_buff *skb;
3051 int ret;
3052
3053 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3054 if (!skb)
3055 return -ENOMEM;
3056
3057 cmd = (struct wmi_init_country_cmd *)skb->data;
3058 cmd->tlv_header =
3059 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_INIT_COUNTRY_CMD,
3060 sizeof(*cmd));
3061
3062 cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
3063
3064 switch (arg->flags) {
3065 case ALPHA_IS_SET:
3066 cmd->init_cc_type = WMI_COUNTRY_INFO_TYPE_ALPHA;
3067 memcpy(&cmd->cc_info.alpha2, arg->cc_info.alpha2, 3);
3068 break;
3069 case CC_IS_SET:
3070 cmd->init_cc_type = cpu_to_le32(WMI_COUNTRY_INFO_TYPE_COUNTRY_CODE);
3071 cmd->cc_info.country_code =
3072 cpu_to_le32(arg->cc_info.country_code);
3073 break;
3074 case REGDMN_IS_SET:
3075 cmd->init_cc_type = cpu_to_le32(WMI_COUNTRY_INFO_TYPE_REGDOMAIN);
3076 cmd->cc_info.regdom_id = cpu_to_le32(arg->cc_info.regdom_id);
3077 break;
3078 default:
3079 ret = -EINVAL;
3080 goto out;
3081 }
3082
3083 ret = ath12k_wmi_cmd_send(wmi, skb,
3084 WMI_SET_INIT_COUNTRY_CMDID);
3085
3086 out:
3087 if (ret) {
3088 ath12k_warn(ar->ab,
3089 "failed to send WMI_SET_INIT_COUNTRY CMD :%d\n",
3090 ret);
3091 dev_kfree_skb(skb);
3092 }
3093
3094 return ret;
3095 }
3096
3097 int
ath12k_wmi_send_twt_enable_cmd(struct ath12k * ar,u32 pdev_id)3098 ath12k_wmi_send_twt_enable_cmd(struct ath12k *ar, u32 pdev_id)
3099 {
3100 struct ath12k_wmi_pdev *wmi = ar->wmi;
3101 struct ath12k_base *ab = wmi->wmi_ab->ab;
3102 struct wmi_twt_enable_params_cmd *cmd;
3103 struct sk_buff *skb;
3104 int ret, len;
3105
3106 len = sizeof(*cmd);
3107
3108 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3109 if (!skb)
3110 return -ENOMEM;
3111
3112 cmd = (struct wmi_twt_enable_params_cmd *)skb->data;
3113 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_TWT_ENABLE_CMD,
3114 len);
3115 cmd->pdev_id = cpu_to_le32(pdev_id);
3116 cmd->sta_cong_timer_ms = cpu_to_le32(ATH12K_TWT_DEF_STA_CONG_TIMER_MS);
3117 cmd->default_slot_size = cpu_to_le32(ATH12K_TWT_DEF_DEFAULT_SLOT_SIZE);
3118 cmd->congestion_thresh_setup =
3119 cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_SETUP);
3120 cmd->congestion_thresh_teardown =
3121 cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_TEARDOWN);
3122 cmd->congestion_thresh_critical =
3123 cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_CRITICAL);
3124 cmd->interference_thresh_teardown =
3125 cpu_to_le32(ATH12K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN);
3126 cmd->interference_thresh_setup =
3127 cpu_to_le32(ATH12K_TWT_DEF_INTERFERENCE_THRESH_SETUP);
3128 cmd->min_no_sta_setup = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_STA_SETUP);
3129 cmd->min_no_sta_teardown = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_STA_TEARDOWN);
3130 cmd->no_of_bcast_mcast_slots =
3131 cpu_to_le32(ATH12K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS);
3132 cmd->min_no_twt_slots = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_TWT_SLOTS);
3133 cmd->max_no_sta_twt = cpu_to_le32(ATH12K_TWT_DEF_MAX_NO_STA_TWT);
3134 cmd->mode_check_interval = cpu_to_le32(ATH12K_TWT_DEF_MODE_CHECK_INTERVAL);
3135 cmd->add_sta_slot_interval = cpu_to_le32(ATH12K_TWT_DEF_ADD_STA_SLOT_INTERVAL);
3136 cmd->remove_sta_slot_interval =
3137 cpu_to_le32(ATH12K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL);
3138 /* TODO add MBSSID support */
3139 cmd->mbss_support = 0;
3140
3141 ret = ath12k_wmi_cmd_send(wmi, skb,
3142 WMI_TWT_ENABLE_CMDID);
3143 if (ret) {
3144 ath12k_warn(ab, "Failed to send WMI_TWT_ENABLE_CMDID");
3145 dev_kfree_skb(skb);
3146 }
3147 return ret;
3148 }
3149
3150 int
ath12k_wmi_send_twt_disable_cmd(struct ath12k * ar,u32 pdev_id)3151 ath12k_wmi_send_twt_disable_cmd(struct ath12k *ar, u32 pdev_id)
3152 {
3153 struct ath12k_wmi_pdev *wmi = ar->wmi;
3154 struct ath12k_base *ab = wmi->wmi_ab->ab;
3155 struct wmi_twt_disable_params_cmd *cmd;
3156 struct sk_buff *skb;
3157 int ret, len;
3158
3159 len = sizeof(*cmd);
3160
3161 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3162 if (!skb)
3163 return -ENOMEM;
3164
3165 cmd = (struct wmi_twt_disable_params_cmd *)skb->data;
3166 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_TWT_DISABLE_CMD,
3167 len);
3168 cmd->pdev_id = cpu_to_le32(pdev_id);
3169
3170 ret = ath12k_wmi_cmd_send(wmi, skb,
3171 WMI_TWT_DISABLE_CMDID);
3172 if (ret) {
3173 ath12k_warn(ab, "Failed to send WMI_TWT_DISABLE_CMDID");
3174 dev_kfree_skb(skb);
3175 }
3176 return ret;
3177 }
3178
3179 int
ath12k_wmi_send_obss_spr_cmd(struct ath12k * ar,u32 vdev_id,struct ieee80211_he_obss_pd * he_obss_pd)3180 ath12k_wmi_send_obss_spr_cmd(struct ath12k *ar, u32 vdev_id,
3181 struct ieee80211_he_obss_pd *he_obss_pd)
3182 {
3183 struct ath12k_wmi_pdev *wmi = ar->wmi;
3184 struct ath12k_base *ab = wmi->wmi_ab->ab;
3185 struct wmi_obss_spatial_reuse_params_cmd *cmd;
3186 struct sk_buff *skb;
3187 int ret, len;
3188
3189 len = sizeof(*cmd);
3190
3191 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3192 if (!skb)
3193 return -ENOMEM;
3194
3195 cmd = (struct wmi_obss_spatial_reuse_params_cmd *)skb->data;
3196 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_OBSS_SPATIAL_REUSE_SET_CMD,
3197 len);
3198 cmd->vdev_id = cpu_to_le32(vdev_id);
3199 cmd->enable = cpu_to_le32(he_obss_pd->enable);
3200 cmd->obss_min = a_cpu_to_sle32(he_obss_pd->min_offset);
3201 cmd->obss_max = a_cpu_to_sle32(he_obss_pd->max_offset);
3202
3203 ret = ath12k_wmi_cmd_send(wmi, skb,
3204 WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID);
3205 if (ret) {
3206 ath12k_warn(ab,
3207 "Failed to send WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID");
3208 dev_kfree_skb(skb);
3209 }
3210 return ret;
3211 }
3212
ath12k_wmi_obss_color_cfg_cmd(struct ath12k * ar,u32 vdev_id,u8 bss_color,u32 period,bool enable)3213 int ath12k_wmi_obss_color_cfg_cmd(struct ath12k *ar, u32 vdev_id,
3214 u8 bss_color, u32 period,
3215 bool enable)
3216 {
3217 struct ath12k_wmi_pdev *wmi = ar->wmi;
3218 struct ath12k_base *ab = wmi->wmi_ab->ab;
3219 struct wmi_obss_color_collision_cfg_params_cmd *cmd;
3220 struct sk_buff *skb;
3221 int ret, len;
3222
3223 len = sizeof(*cmd);
3224
3225 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3226 if (!skb)
3227 return -ENOMEM;
3228
3229 cmd = (struct wmi_obss_color_collision_cfg_params_cmd *)skb->data;
3230 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_OBSS_COLOR_COLLISION_DET_CONFIG,
3231 len);
3232 cmd->vdev_id = cpu_to_le32(vdev_id);
3233 cmd->evt_type = enable ? cpu_to_le32(ATH12K_OBSS_COLOR_COLLISION_DETECTION) :
3234 cpu_to_le32(ATH12K_OBSS_COLOR_COLLISION_DETECTION_DISABLE);
3235 cmd->current_bss_color = cpu_to_le32(bss_color);
3236 cmd->detection_period_ms = cpu_to_le32(period);
3237 cmd->scan_period_ms = cpu_to_le32(ATH12K_BSS_COLOR_COLLISION_SCAN_PERIOD_MS);
3238 cmd->free_slot_expiry_time_ms = 0;
3239 cmd->flags = 0;
3240
3241 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3242 "wmi_send_obss_color_collision_cfg id %d type %d bss_color %d detect_period %d scan_period %d\n",
3243 cmd->vdev_id, cmd->evt_type, cmd->current_bss_color,
3244 cmd->detection_period_ms, cmd->scan_period_ms);
3245
3246 ret = ath12k_wmi_cmd_send(wmi, skb,
3247 WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID);
3248 if (ret) {
3249 ath12k_warn(ab, "Failed to send WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID");
3250 dev_kfree_skb(skb);
3251 }
3252 return ret;
3253 }
3254
ath12k_wmi_send_bss_color_change_enable_cmd(struct ath12k * ar,u32 vdev_id,bool enable)3255 int ath12k_wmi_send_bss_color_change_enable_cmd(struct ath12k *ar, u32 vdev_id,
3256 bool enable)
3257 {
3258 struct ath12k_wmi_pdev *wmi = ar->wmi;
3259 struct ath12k_base *ab = wmi->wmi_ab->ab;
3260 struct wmi_bss_color_change_enable_params_cmd *cmd;
3261 struct sk_buff *skb;
3262 int ret, len;
3263
3264 len = sizeof(*cmd);
3265
3266 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3267 if (!skb)
3268 return -ENOMEM;
3269
3270 cmd = (struct wmi_bss_color_change_enable_params_cmd *)skb->data;
3271 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BSS_COLOR_CHANGE_ENABLE,
3272 len);
3273 cmd->vdev_id = cpu_to_le32(vdev_id);
3274 cmd->enable = enable ? cpu_to_le32(1) : 0;
3275
3276 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3277 "wmi_send_bss_color_change_enable id %d enable %d\n",
3278 cmd->vdev_id, cmd->enable);
3279
3280 ret = ath12k_wmi_cmd_send(wmi, skb,
3281 WMI_BSS_COLOR_CHANGE_ENABLE_CMDID);
3282 if (ret) {
3283 ath12k_warn(ab, "Failed to send WMI_BSS_COLOR_CHANGE_ENABLE_CMDID");
3284 dev_kfree_skb(skb);
3285 }
3286 return ret;
3287 }
3288
ath12k_wmi_fils_discovery_tmpl(struct ath12k * ar,u32 vdev_id,struct sk_buff * tmpl)3289 int ath12k_wmi_fils_discovery_tmpl(struct ath12k *ar, u32 vdev_id,
3290 struct sk_buff *tmpl)
3291 {
3292 struct wmi_tlv *tlv;
3293 struct sk_buff *skb;
3294 void *ptr;
3295 int ret, len;
3296 size_t aligned_len;
3297 struct wmi_fils_discovery_tmpl_cmd *cmd;
3298
3299 aligned_len = roundup(tmpl->len, 4);
3300 len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len;
3301
3302 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3303 "WMI vdev %i set FILS discovery template\n", vdev_id);
3304
3305 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
3306 if (!skb)
3307 return -ENOMEM;
3308
3309 cmd = (struct wmi_fils_discovery_tmpl_cmd *)skb->data;
3310 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_FILS_DISCOVERY_TMPL_CMD,
3311 sizeof(*cmd));
3312 cmd->vdev_id = cpu_to_le32(vdev_id);
3313 cmd->buf_len = cpu_to_le32(tmpl->len);
3314 ptr = skb->data + sizeof(*cmd);
3315
3316 tlv = ptr;
3317 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len);
3318 memcpy(tlv->value, tmpl->data, tmpl->len);
3319
3320 ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_FILS_DISCOVERY_TMPL_CMDID);
3321 if (ret) {
3322 ath12k_warn(ar->ab,
3323 "WMI vdev %i failed to send FILS discovery template command\n",
3324 vdev_id);
3325 dev_kfree_skb(skb);
3326 }
3327 return ret;
3328 }
3329
ath12k_wmi_probe_resp_tmpl(struct ath12k * ar,u32 vdev_id,struct sk_buff * tmpl)3330 int ath12k_wmi_probe_resp_tmpl(struct ath12k *ar, u32 vdev_id,
3331 struct sk_buff *tmpl)
3332 {
3333 struct wmi_probe_tmpl_cmd *cmd;
3334 struct ath12k_wmi_bcn_prb_info_params *probe_info;
3335 struct wmi_tlv *tlv;
3336 struct sk_buff *skb;
3337 void *ptr;
3338 int ret, len;
3339 size_t aligned_len = roundup(tmpl->len, 4);
3340
3341 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3342 "WMI vdev %i set probe response template\n", vdev_id);
3343
3344 len = sizeof(*cmd) + sizeof(*probe_info) + TLV_HDR_SIZE + aligned_len;
3345
3346 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
3347 if (!skb)
3348 return -ENOMEM;
3349
3350 cmd = (struct wmi_probe_tmpl_cmd *)skb->data;
3351 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PRB_TMPL_CMD,
3352 sizeof(*cmd));
3353 cmd->vdev_id = cpu_to_le32(vdev_id);
3354 cmd->buf_len = cpu_to_le32(tmpl->len);
3355
3356 ptr = skb->data + sizeof(*cmd);
3357
3358 probe_info = ptr;
3359 len = sizeof(*probe_info);
3360 probe_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_PRB_INFO,
3361 len);
3362 probe_info->caps = 0;
3363 probe_info->erp = 0;
3364
3365 ptr += sizeof(*probe_info);
3366
3367 tlv = ptr;
3368 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len);
3369 memcpy(tlv->value, tmpl->data, tmpl->len);
3370
3371 ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_PRB_TMPL_CMDID);
3372 if (ret) {
3373 ath12k_warn(ar->ab,
3374 "WMI vdev %i failed to send probe response template command\n",
3375 vdev_id);
3376 dev_kfree_skb(skb);
3377 }
3378 return ret;
3379 }
3380
ath12k_wmi_fils_discovery(struct ath12k * ar,u32 vdev_id,u32 interval,bool unsol_bcast_probe_resp_enabled)3381 int ath12k_wmi_fils_discovery(struct ath12k *ar, u32 vdev_id, u32 interval,
3382 bool unsol_bcast_probe_resp_enabled)
3383 {
3384 struct sk_buff *skb;
3385 int ret, len;
3386 struct wmi_fils_discovery_cmd *cmd;
3387
3388 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3389 "WMI vdev %i set %s interval to %u TU\n",
3390 vdev_id, unsol_bcast_probe_resp_enabled ?
3391 "unsolicited broadcast probe response" : "FILS discovery",
3392 interval);
3393
3394 len = sizeof(*cmd);
3395 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
3396 if (!skb)
3397 return -ENOMEM;
3398
3399 cmd = (struct wmi_fils_discovery_cmd *)skb->data;
3400 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ENABLE_FILS_CMD,
3401 len);
3402 cmd->vdev_id = cpu_to_le32(vdev_id);
3403 cmd->interval = cpu_to_le32(interval);
3404 cmd->config = cpu_to_le32(unsol_bcast_probe_resp_enabled);
3405
3406 ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_ENABLE_FILS_CMDID);
3407 if (ret) {
3408 ath12k_warn(ar->ab,
3409 "WMI vdev %i failed to send FILS discovery enable/disable command\n",
3410 vdev_id);
3411 dev_kfree_skb(skb);
3412 }
3413 return ret;
3414 }
3415
3416 static void
ath12k_fill_band_to_mac_param(struct ath12k_base * soc,struct ath12k_wmi_pdev_band_arg * arg)3417 ath12k_fill_band_to_mac_param(struct ath12k_base *soc,
3418 struct ath12k_wmi_pdev_band_arg *arg)
3419 {
3420 u8 i;
3421 struct ath12k_wmi_hal_reg_capabilities_ext_arg *hal_reg_cap;
3422 struct ath12k_pdev *pdev;
3423
3424 for (i = 0; i < soc->num_radios; i++) {
3425 pdev = &soc->pdevs[i];
3426 hal_reg_cap = &soc->hal_reg_cap[i];
3427 arg[i].pdev_id = pdev->pdev_id;
3428
3429 switch (pdev->cap.supported_bands) {
3430 case WMI_HOST_WLAN_2G_5G_CAP:
3431 arg[i].start_freq = hal_reg_cap->low_2ghz_chan;
3432 arg[i].end_freq = hal_reg_cap->high_5ghz_chan;
3433 break;
3434 case WMI_HOST_WLAN_2G_CAP:
3435 arg[i].start_freq = hal_reg_cap->low_2ghz_chan;
3436 arg[i].end_freq = hal_reg_cap->high_2ghz_chan;
3437 break;
3438 case WMI_HOST_WLAN_5G_CAP:
3439 arg[i].start_freq = hal_reg_cap->low_5ghz_chan;
3440 arg[i].end_freq = hal_reg_cap->high_5ghz_chan;
3441 break;
3442 default:
3443 break;
3444 }
3445 }
3446 }
3447
3448 static void
ath12k_wmi_copy_resource_config(struct ath12k_wmi_resource_config_params * wmi_cfg,struct ath12k_wmi_resource_config_arg * tg_cfg)3449 ath12k_wmi_copy_resource_config(struct ath12k_wmi_resource_config_params *wmi_cfg,
3450 struct ath12k_wmi_resource_config_arg *tg_cfg)
3451 {
3452 wmi_cfg->num_vdevs = cpu_to_le32(tg_cfg->num_vdevs);
3453 wmi_cfg->num_peers = cpu_to_le32(tg_cfg->num_peers);
3454 wmi_cfg->num_offload_peers = cpu_to_le32(tg_cfg->num_offload_peers);
3455 wmi_cfg->num_offload_reorder_buffs =
3456 cpu_to_le32(tg_cfg->num_offload_reorder_buffs);
3457 wmi_cfg->num_peer_keys = cpu_to_le32(tg_cfg->num_peer_keys);
3458 wmi_cfg->num_tids = cpu_to_le32(tg_cfg->num_tids);
3459 wmi_cfg->ast_skid_limit = cpu_to_le32(tg_cfg->ast_skid_limit);
3460 wmi_cfg->tx_chain_mask = cpu_to_le32(tg_cfg->tx_chain_mask);
3461 wmi_cfg->rx_chain_mask = cpu_to_le32(tg_cfg->rx_chain_mask);
3462 wmi_cfg->rx_timeout_pri[0] = cpu_to_le32(tg_cfg->rx_timeout_pri[0]);
3463 wmi_cfg->rx_timeout_pri[1] = cpu_to_le32(tg_cfg->rx_timeout_pri[1]);
3464 wmi_cfg->rx_timeout_pri[2] = cpu_to_le32(tg_cfg->rx_timeout_pri[2]);
3465 wmi_cfg->rx_timeout_pri[3] = cpu_to_le32(tg_cfg->rx_timeout_pri[3]);
3466 wmi_cfg->rx_decap_mode = cpu_to_le32(tg_cfg->rx_decap_mode);
3467 wmi_cfg->scan_max_pending_req = cpu_to_le32(tg_cfg->scan_max_pending_req);
3468 wmi_cfg->bmiss_offload_max_vdev = cpu_to_le32(tg_cfg->bmiss_offload_max_vdev);
3469 wmi_cfg->roam_offload_max_vdev = cpu_to_le32(tg_cfg->roam_offload_max_vdev);
3470 wmi_cfg->roam_offload_max_ap_profiles =
3471 cpu_to_le32(tg_cfg->roam_offload_max_ap_profiles);
3472 wmi_cfg->num_mcast_groups = cpu_to_le32(tg_cfg->num_mcast_groups);
3473 wmi_cfg->num_mcast_table_elems = cpu_to_le32(tg_cfg->num_mcast_table_elems);
3474 wmi_cfg->mcast2ucast_mode = cpu_to_le32(tg_cfg->mcast2ucast_mode);
3475 wmi_cfg->tx_dbg_log_size = cpu_to_le32(tg_cfg->tx_dbg_log_size);
3476 wmi_cfg->num_wds_entries = cpu_to_le32(tg_cfg->num_wds_entries);
3477 wmi_cfg->dma_burst_size = cpu_to_le32(tg_cfg->dma_burst_size);
3478 wmi_cfg->mac_aggr_delim = cpu_to_le32(tg_cfg->mac_aggr_delim);
3479 wmi_cfg->rx_skip_defrag_timeout_dup_detection_check =
3480 cpu_to_le32(tg_cfg->rx_skip_defrag_timeout_dup_detection_check);
3481 wmi_cfg->vow_config = cpu_to_le32(tg_cfg->vow_config);
3482 wmi_cfg->gtk_offload_max_vdev = cpu_to_le32(tg_cfg->gtk_offload_max_vdev);
3483 wmi_cfg->num_msdu_desc = cpu_to_le32(tg_cfg->num_msdu_desc);
3484 wmi_cfg->max_frag_entries = cpu_to_le32(tg_cfg->max_frag_entries);
3485 wmi_cfg->num_tdls_vdevs = cpu_to_le32(tg_cfg->num_tdls_vdevs);
3486 wmi_cfg->num_tdls_conn_table_entries =
3487 cpu_to_le32(tg_cfg->num_tdls_conn_table_entries);
3488 wmi_cfg->beacon_tx_offload_max_vdev =
3489 cpu_to_le32(tg_cfg->beacon_tx_offload_max_vdev);
3490 wmi_cfg->num_multicast_filter_entries =
3491 cpu_to_le32(tg_cfg->num_multicast_filter_entries);
3492 wmi_cfg->num_wow_filters = cpu_to_le32(tg_cfg->num_wow_filters);
3493 wmi_cfg->num_keep_alive_pattern = cpu_to_le32(tg_cfg->num_keep_alive_pattern);
3494 wmi_cfg->keep_alive_pattern_size = cpu_to_le32(tg_cfg->keep_alive_pattern_size);
3495 wmi_cfg->max_tdls_concurrent_sleep_sta =
3496 cpu_to_le32(tg_cfg->max_tdls_concurrent_sleep_sta);
3497 wmi_cfg->max_tdls_concurrent_buffer_sta =
3498 cpu_to_le32(tg_cfg->max_tdls_concurrent_buffer_sta);
3499 wmi_cfg->wmi_send_separate = cpu_to_le32(tg_cfg->wmi_send_separate);
3500 wmi_cfg->num_ocb_vdevs = cpu_to_le32(tg_cfg->num_ocb_vdevs);
3501 wmi_cfg->num_ocb_channels = cpu_to_le32(tg_cfg->num_ocb_channels);
3502 wmi_cfg->num_ocb_schedules = cpu_to_le32(tg_cfg->num_ocb_schedules);
3503 wmi_cfg->bpf_instruction_size = cpu_to_le32(tg_cfg->bpf_instruction_size);
3504 wmi_cfg->max_bssid_rx_filters = cpu_to_le32(tg_cfg->max_bssid_rx_filters);
3505 wmi_cfg->use_pdev_id = cpu_to_le32(tg_cfg->use_pdev_id);
3506 wmi_cfg->flag1 = cpu_to_le32(tg_cfg->atf_config |
3507 WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64);
3508 wmi_cfg->peer_map_unmap_version = cpu_to_le32(tg_cfg->peer_map_unmap_version);
3509 wmi_cfg->sched_params = cpu_to_le32(tg_cfg->sched_params);
3510 wmi_cfg->twt_ap_pdev_count = cpu_to_le32(tg_cfg->twt_ap_pdev_count);
3511 wmi_cfg->twt_ap_sta_count = cpu_to_le32(tg_cfg->twt_ap_sta_count);
3512 wmi_cfg->flags2 = le32_encode_bits(tg_cfg->peer_metadata_ver,
3513 WMI_RSRC_CFG_FLAGS2_RX_PEER_METADATA_VERSION);
3514 wmi_cfg->host_service_flags = cpu_to_le32(tg_cfg->is_reg_cc_ext_event_supported <<
3515 WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT);
3516 wmi_cfg->ema_max_vap_cnt = cpu_to_le32(tg_cfg->ema_max_vap_cnt);
3517 wmi_cfg->ema_max_profile_period = cpu_to_le32(tg_cfg->ema_max_profile_period);
3518 wmi_cfg->flags2 |= cpu_to_le32(WMI_RSRC_CFG_FLAGS2_CALC_NEXT_DTIM_COUNT_SET);
3519 }
3520
ath12k_init_cmd_send(struct ath12k_wmi_pdev * wmi,struct ath12k_wmi_init_cmd_arg * arg)3521 static int ath12k_init_cmd_send(struct ath12k_wmi_pdev *wmi,
3522 struct ath12k_wmi_init_cmd_arg *arg)
3523 {
3524 struct ath12k_base *ab = wmi->wmi_ab->ab;
3525 struct sk_buff *skb;
3526 struct wmi_init_cmd *cmd;
3527 struct ath12k_wmi_resource_config_params *cfg;
3528 struct ath12k_wmi_pdev_set_hw_mode_cmd *hw_mode;
3529 struct ath12k_wmi_pdev_band_to_mac_params *band_to_mac;
3530 struct ath12k_wmi_host_mem_chunk_params *host_mem_chunks;
3531 struct wmi_tlv *tlv;
3532 size_t ret, len;
3533 void *ptr;
3534 u32 hw_mode_len = 0;
3535 u16 idx;
3536
3537 if (arg->hw_mode_id != WMI_HOST_HW_MODE_MAX)
3538 hw_mode_len = sizeof(*hw_mode) + TLV_HDR_SIZE +
3539 (arg->num_band_to_mac * sizeof(*band_to_mac));
3540
3541 len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*cfg) + hw_mode_len +
3542 (arg->num_mem_chunks ? (sizeof(*host_mem_chunks) * WMI_MAX_MEM_REQS) : 0);
3543
3544 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3545 if (!skb)
3546 return -ENOMEM;
3547
3548 cmd = (struct wmi_init_cmd *)skb->data;
3549
3550 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_INIT_CMD,
3551 sizeof(*cmd));
3552
3553 ptr = skb->data + sizeof(*cmd);
3554 cfg = ptr;
3555
3556 ath12k_wmi_copy_resource_config(cfg, &arg->res_cfg);
3557
3558 cfg->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_RESOURCE_CONFIG,
3559 sizeof(*cfg));
3560
3561 ptr += sizeof(*cfg);
3562 host_mem_chunks = ptr + TLV_HDR_SIZE;
3563 len = sizeof(struct ath12k_wmi_host_mem_chunk_params);
3564
3565 for (idx = 0; idx < arg->num_mem_chunks; ++idx) {
3566 host_mem_chunks[idx].tlv_header =
3567 ath12k_wmi_tlv_hdr(WMI_TAG_WLAN_HOST_MEMORY_CHUNK,
3568 len);
3569
3570 host_mem_chunks[idx].ptr = cpu_to_le32(arg->mem_chunks[idx].paddr);
3571 host_mem_chunks[idx].size = cpu_to_le32(arg->mem_chunks[idx].len);
3572 host_mem_chunks[idx].req_id = cpu_to_le32(arg->mem_chunks[idx].req_id);
3573
3574 ath12k_dbg(ab, ATH12K_DBG_WMI,
3575 "WMI host mem chunk req_id %d paddr 0x%llx len %d\n",
3576 arg->mem_chunks[idx].req_id,
3577 (u64)arg->mem_chunks[idx].paddr,
3578 arg->mem_chunks[idx].len);
3579 }
3580 cmd->num_host_mem_chunks = cpu_to_le32(arg->num_mem_chunks);
3581 len = sizeof(struct ath12k_wmi_host_mem_chunk_params) * arg->num_mem_chunks;
3582
3583 /* num_mem_chunks is zero */
3584 tlv = ptr;
3585 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
3586 ptr += TLV_HDR_SIZE + len;
3587
3588 if (arg->hw_mode_id != WMI_HOST_HW_MODE_MAX) {
3589 hw_mode = (struct ath12k_wmi_pdev_set_hw_mode_cmd *)ptr;
3590 hw_mode->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_HW_MODE_CMD,
3591 sizeof(*hw_mode));
3592
3593 hw_mode->hw_mode_index = cpu_to_le32(arg->hw_mode_id);
3594 hw_mode->num_band_to_mac = cpu_to_le32(arg->num_band_to_mac);
3595
3596 ptr += sizeof(*hw_mode);
3597
3598 len = arg->num_band_to_mac * sizeof(*band_to_mac);
3599 tlv = ptr;
3600 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
3601
3602 ptr += TLV_HDR_SIZE;
3603 len = sizeof(*band_to_mac);
3604
3605 for (idx = 0; idx < arg->num_band_to_mac; idx++) {
3606 band_to_mac = (void *)ptr;
3607
3608 band_to_mac->tlv_header =
3609 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BAND_TO_MAC,
3610 len);
3611 band_to_mac->pdev_id = cpu_to_le32(arg->band_to_mac[idx].pdev_id);
3612 band_to_mac->start_freq =
3613 cpu_to_le32(arg->band_to_mac[idx].start_freq);
3614 band_to_mac->end_freq =
3615 cpu_to_le32(arg->band_to_mac[idx].end_freq);
3616 ptr += sizeof(*band_to_mac);
3617 }
3618 }
3619
3620 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_INIT_CMDID);
3621 if (ret) {
3622 ath12k_warn(ab, "failed to send WMI_INIT_CMDID\n");
3623 dev_kfree_skb(skb);
3624 }
3625
3626 return ret;
3627 }
3628
ath12k_wmi_pdev_lro_cfg(struct ath12k * ar,int pdev_id)3629 int ath12k_wmi_pdev_lro_cfg(struct ath12k *ar,
3630 int pdev_id)
3631 {
3632 struct ath12k_wmi_pdev_lro_config_cmd *cmd;
3633 struct sk_buff *skb;
3634 int ret;
3635
3636 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
3637 if (!skb)
3638 return -ENOMEM;
3639
3640 cmd = (struct ath12k_wmi_pdev_lro_config_cmd *)skb->data;
3641 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_LRO_INFO_CMD,
3642 sizeof(*cmd));
3643
3644 get_random_bytes(cmd->th_4, sizeof(cmd->th_4));
3645 get_random_bytes(cmd->th_6, sizeof(cmd->th_6));
3646
3647 cmd->pdev_id = cpu_to_le32(pdev_id);
3648
3649 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3650 "WMI lro cfg cmd pdev_id 0x%x\n", pdev_id);
3651
3652 ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_LRO_CONFIG_CMDID);
3653 if (ret) {
3654 ath12k_warn(ar->ab,
3655 "failed to send lro cfg req wmi cmd\n");
3656 goto err;
3657 }
3658
3659 return 0;
3660 err:
3661 dev_kfree_skb(skb);
3662 return ret;
3663 }
3664
ath12k_wmi_wait_for_service_ready(struct ath12k_base * ab)3665 int ath12k_wmi_wait_for_service_ready(struct ath12k_base *ab)
3666 {
3667 unsigned long time_left;
3668
3669 time_left = wait_for_completion_timeout(&ab->wmi_ab.service_ready,
3670 WMI_SERVICE_READY_TIMEOUT_HZ);
3671 if (!time_left)
3672 return -ETIMEDOUT;
3673
3674 return 0;
3675 }
3676
ath12k_wmi_wait_for_unified_ready(struct ath12k_base * ab)3677 int ath12k_wmi_wait_for_unified_ready(struct ath12k_base *ab)
3678 {
3679 unsigned long time_left;
3680
3681 time_left = wait_for_completion_timeout(&ab->wmi_ab.unified_ready,
3682 WMI_SERVICE_READY_TIMEOUT_HZ);
3683 if (!time_left)
3684 return -ETIMEDOUT;
3685
3686 return 0;
3687 }
3688
ath12k_wmi_set_hw_mode(struct ath12k_base * ab,enum wmi_host_hw_mode_config_type mode)3689 int ath12k_wmi_set_hw_mode(struct ath12k_base *ab,
3690 enum wmi_host_hw_mode_config_type mode)
3691 {
3692 struct ath12k_wmi_pdev_set_hw_mode_cmd *cmd;
3693 struct sk_buff *skb;
3694 struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
3695 int len;
3696 int ret;
3697
3698 len = sizeof(*cmd);
3699
3700 skb = ath12k_wmi_alloc_skb(wmi_ab, len);
3701 if (!skb)
3702 return -ENOMEM;
3703
3704 cmd = (struct ath12k_wmi_pdev_set_hw_mode_cmd *)skb->data;
3705
3706 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_HW_MODE_CMD,
3707 sizeof(*cmd));
3708
3709 cmd->pdev_id = WMI_PDEV_ID_SOC;
3710 cmd->hw_mode_index = cpu_to_le32(mode);
3711
3712 ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0], skb, WMI_PDEV_SET_HW_MODE_CMDID);
3713 if (ret) {
3714 ath12k_warn(ab, "failed to send WMI_PDEV_SET_HW_MODE_CMDID\n");
3715 dev_kfree_skb(skb);
3716 }
3717
3718 return ret;
3719 }
3720
ath12k_wmi_cmd_init(struct ath12k_base * ab)3721 int ath12k_wmi_cmd_init(struct ath12k_base *ab)
3722 {
3723 struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
3724 struct ath12k_wmi_init_cmd_arg arg = {};
3725
3726 if (test_bit(WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT,
3727 ab->wmi_ab.svc_map))
3728 arg.res_cfg.is_reg_cc_ext_event_supported = true;
3729
3730 ab->hw_params->wmi_init(ab, &arg.res_cfg);
3731 ab->wow.wmi_conf_rx_decap_mode = arg.res_cfg.rx_decap_mode;
3732
3733 arg.num_mem_chunks = wmi_ab->num_mem_chunks;
3734 arg.hw_mode_id = wmi_ab->preferred_hw_mode;
3735 arg.mem_chunks = wmi_ab->mem_chunks;
3736
3737 if (ab->hw_params->single_pdev_only)
3738 arg.hw_mode_id = WMI_HOST_HW_MODE_MAX;
3739
3740 arg.num_band_to_mac = ab->num_radios;
3741 ath12k_fill_band_to_mac_param(ab, arg.band_to_mac);
3742
3743 ab->dp.peer_metadata_ver = arg.res_cfg.peer_metadata_ver;
3744
3745 return ath12k_init_cmd_send(&wmi_ab->wmi[0], &arg);
3746 }
3747
ath12k_wmi_vdev_spectral_conf(struct ath12k * ar,struct ath12k_wmi_vdev_spectral_conf_arg * arg)3748 int ath12k_wmi_vdev_spectral_conf(struct ath12k *ar,
3749 struct ath12k_wmi_vdev_spectral_conf_arg *arg)
3750 {
3751 struct ath12k_wmi_vdev_spectral_conf_cmd *cmd;
3752 struct sk_buff *skb;
3753 int ret;
3754
3755 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
3756 if (!skb)
3757 return -ENOMEM;
3758
3759 cmd = (struct ath12k_wmi_vdev_spectral_conf_cmd *)skb->data;
3760 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SPECTRAL_CONFIGURE_CMD,
3761 sizeof(*cmd));
3762 cmd->vdev_id = cpu_to_le32(arg->vdev_id);
3763 cmd->scan_count = cpu_to_le32(arg->scan_count);
3764 cmd->scan_period = cpu_to_le32(arg->scan_period);
3765 cmd->scan_priority = cpu_to_le32(arg->scan_priority);
3766 cmd->scan_fft_size = cpu_to_le32(arg->scan_fft_size);
3767 cmd->scan_gc_ena = cpu_to_le32(arg->scan_gc_ena);
3768 cmd->scan_restart_ena = cpu_to_le32(arg->scan_restart_ena);
3769 cmd->scan_noise_floor_ref = cpu_to_le32(arg->scan_noise_floor_ref);
3770 cmd->scan_init_delay = cpu_to_le32(arg->scan_init_delay);
3771 cmd->scan_nb_tone_thr = cpu_to_le32(arg->scan_nb_tone_thr);
3772 cmd->scan_str_bin_thr = cpu_to_le32(arg->scan_str_bin_thr);
3773 cmd->scan_wb_rpt_mode = cpu_to_le32(arg->scan_wb_rpt_mode);
3774 cmd->scan_rssi_rpt_mode = cpu_to_le32(arg->scan_rssi_rpt_mode);
3775 cmd->scan_rssi_thr = cpu_to_le32(arg->scan_rssi_thr);
3776 cmd->scan_pwr_format = cpu_to_le32(arg->scan_pwr_format);
3777 cmd->scan_rpt_mode = cpu_to_le32(arg->scan_rpt_mode);
3778 cmd->scan_bin_scale = cpu_to_le32(arg->scan_bin_scale);
3779 cmd->scan_dbm_adj = cpu_to_le32(arg->scan_dbm_adj);
3780 cmd->scan_chn_mask = cpu_to_le32(arg->scan_chn_mask);
3781
3782 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3783 "WMI spectral scan config cmd vdev_id 0x%x\n",
3784 arg->vdev_id);
3785
3786 ret = ath12k_wmi_cmd_send(ar->wmi, skb,
3787 WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID);
3788 if (ret) {
3789 ath12k_warn(ar->ab,
3790 "failed to send spectral scan config wmi cmd\n");
3791 goto err;
3792 }
3793
3794 return 0;
3795 err:
3796 dev_kfree_skb(skb);
3797 return ret;
3798 }
3799
ath12k_wmi_vdev_spectral_enable(struct ath12k * ar,u32 vdev_id,u32 trigger,u32 enable)3800 int ath12k_wmi_vdev_spectral_enable(struct ath12k *ar, u32 vdev_id,
3801 u32 trigger, u32 enable)
3802 {
3803 struct ath12k_wmi_vdev_spectral_enable_cmd *cmd;
3804 struct sk_buff *skb;
3805 int ret;
3806
3807 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
3808 if (!skb)
3809 return -ENOMEM;
3810
3811 cmd = (struct ath12k_wmi_vdev_spectral_enable_cmd *)skb->data;
3812 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SPECTRAL_ENABLE_CMD,
3813 sizeof(*cmd));
3814
3815 cmd->vdev_id = cpu_to_le32(vdev_id);
3816 cmd->trigger_cmd = cpu_to_le32(trigger);
3817 cmd->enable_cmd = cpu_to_le32(enable);
3818
3819 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3820 "WMI spectral enable cmd vdev id 0x%x\n",
3821 vdev_id);
3822
3823 ret = ath12k_wmi_cmd_send(ar->wmi, skb,
3824 WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID);
3825 if (ret) {
3826 ath12k_warn(ar->ab,
3827 "failed to send spectral enable wmi cmd\n");
3828 goto err;
3829 }
3830
3831 return 0;
3832 err:
3833 dev_kfree_skb(skb);
3834 return ret;
3835 }
3836
ath12k_wmi_pdev_dma_ring_cfg(struct ath12k * ar,struct ath12k_wmi_pdev_dma_ring_cfg_arg * arg)3837 int ath12k_wmi_pdev_dma_ring_cfg(struct ath12k *ar,
3838 struct ath12k_wmi_pdev_dma_ring_cfg_arg *arg)
3839 {
3840 struct ath12k_wmi_pdev_dma_ring_cfg_req_cmd *cmd;
3841 struct sk_buff *skb;
3842 int ret;
3843
3844 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
3845 if (!skb)
3846 return -ENOMEM;
3847
3848 cmd = (struct ath12k_wmi_pdev_dma_ring_cfg_req_cmd *)skb->data;
3849 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_DMA_RING_CFG_REQ,
3850 sizeof(*cmd));
3851
3852 cmd->pdev_id = cpu_to_le32(arg->pdev_id);
3853 cmd->module_id = cpu_to_le32(arg->module_id);
3854 cmd->base_paddr_lo = cpu_to_le32(arg->base_paddr_lo);
3855 cmd->base_paddr_hi = cpu_to_le32(arg->base_paddr_hi);
3856 cmd->head_idx_paddr_lo = cpu_to_le32(arg->head_idx_paddr_lo);
3857 cmd->head_idx_paddr_hi = cpu_to_le32(arg->head_idx_paddr_hi);
3858 cmd->tail_idx_paddr_lo = cpu_to_le32(arg->tail_idx_paddr_lo);
3859 cmd->tail_idx_paddr_hi = cpu_to_le32(arg->tail_idx_paddr_hi);
3860 cmd->num_elems = cpu_to_le32(arg->num_elems);
3861 cmd->buf_size = cpu_to_le32(arg->buf_size);
3862 cmd->num_resp_per_event = cpu_to_le32(arg->num_resp_per_event);
3863 cmd->event_timeout_ms = cpu_to_le32(arg->event_timeout_ms);
3864
3865 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3866 "WMI DMA ring cfg req cmd pdev_id 0x%x\n",
3867 arg->pdev_id);
3868
3869 ret = ath12k_wmi_cmd_send(ar->wmi, skb,
3870 WMI_PDEV_DMA_RING_CFG_REQ_CMDID);
3871 if (ret) {
3872 ath12k_warn(ar->ab,
3873 "failed to send dma ring cfg req wmi cmd\n");
3874 goto err;
3875 }
3876
3877 return 0;
3878 err:
3879 dev_kfree_skb(skb);
3880 return ret;
3881 }
3882
ath12k_wmi_dma_buf_entry_parse(struct ath12k_base * soc,u16 tag,u16 len,const void * ptr,void * data)3883 static int ath12k_wmi_dma_buf_entry_parse(struct ath12k_base *soc,
3884 u16 tag, u16 len,
3885 const void *ptr, void *data)
3886 {
3887 struct ath12k_wmi_dma_buf_release_arg *arg = data;
3888
3889 if (tag != WMI_TAG_DMA_BUF_RELEASE_ENTRY)
3890 return -EPROTO;
3891
3892 if (arg->num_buf_entry >= le32_to_cpu(arg->fixed.num_buf_release_entry))
3893 return -ENOBUFS;
3894
3895 arg->num_buf_entry++;
3896 return 0;
3897 }
3898
ath12k_wmi_dma_buf_meta_parse(struct ath12k_base * soc,u16 tag,u16 len,const void * ptr,void * data)3899 static int ath12k_wmi_dma_buf_meta_parse(struct ath12k_base *soc,
3900 u16 tag, u16 len,
3901 const void *ptr, void *data)
3902 {
3903 struct ath12k_wmi_dma_buf_release_arg *arg = data;
3904
3905 if (tag != WMI_TAG_DMA_BUF_RELEASE_SPECTRAL_META_DATA)
3906 return -EPROTO;
3907
3908 if (arg->num_meta >= le32_to_cpu(arg->fixed.num_meta_data_entry))
3909 return -ENOBUFS;
3910
3911 arg->num_meta++;
3912
3913 return 0;
3914 }
3915
ath12k_wmi_dma_buf_parse(struct ath12k_base * ab,u16 tag,u16 len,const void * ptr,void * data)3916 static int ath12k_wmi_dma_buf_parse(struct ath12k_base *ab,
3917 u16 tag, u16 len,
3918 const void *ptr, void *data)
3919 {
3920 struct ath12k_wmi_dma_buf_release_arg *arg = data;
3921 const struct ath12k_wmi_dma_buf_release_fixed_params *fixed;
3922 u32 pdev_id;
3923 int ret;
3924
3925 switch (tag) {
3926 case WMI_TAG_DMA_BUF_RELEASE:
3927 fixed = ptr;
3928 arg->fixed = *fixed;
3929 pdev_id = DP_HW2SW_MACID(le32_to_cpu(fixed->pdev_id));
3930 arg->fixed.pdev_id = cpu_to_le32(pdev_id);
3931 break;
3932 case WMI_TAG_ARRAY_STRUCT:
3933 if (!arg->buf_entry_done) {
3934 arg->num_buf_entry = 0;
3935 arg->buf_entry = ptr;
3936
3937 ret = ath12k_wmi_tlv_iter(ab, ptr, len,
3938 ath12k_wmi_dma_buf_entry_parse,
3939 arg);
3940 if (ret) {
3941 ath12k_warn(ab, "failed to parse dma buf entry tlv %d\n",
3942 ret);
3943 return ret;
3944 }
3945
3946 arg->buf_entry_done = true;
3947 } else if (!arg->meta_data_done) {
3948 arg->num_meta = 0;
3949 arg->meta_data = ptr;
3950
3951 ret = ath12k_wmi_tlv_iter(ab, ptr, len,
3952 ath12k_wmi_dma_buf_meta_parse,
3953 arg);
3954 if (ret) {
3955 ath12k_warn(ab, "failed to parse dma buf meta tlv %d\n",
3956 ret);
3957 return ret;
3958 }
3959
3960 arg->meta_data_done = true;
3961 }
3962 break;
3963 default:
3964 break;
3965 }
3966 return 0;
3967 }
3968
ath12k_wmi_pdev_dma_ring_buf_release_event(struct ath12k_base * ab,struct sk_buff * skb)3969 static void ath12k_wmi_pdev_dma_ring_buf_release_event(struct ath12k_base *ab,
3970 struct sk_buff *skb)
3971 {
3972 struct ath12k_wmi_dma_buf_release_arg arg = {};
3973 struct ath12k_dbring_buf_release_event param;
3974 int ret;
3975
3976 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
3977 ath12k_wmi_dma_buf_parse,
3978 &arg);
3979 if (ret) {
3980 ath12k_warn(ab, "failed to parse dma buf release tlv %d\n", ret);
3981 return;
3982 }
3983
3984 param.fixed = arg.fixed;
3985 param.buf_entry = arg.buf_entry;
3986 param.num_buf_entry = arg.num_buf_entry;
3987 param.meta_data = arg.meta_data;
3988 param.num_meta = arg.num_meta;
3989
3990 ret = ath12k_dbring_buffer_release_event(ab, ¶m);
3991 if (ret) {
3992 ath12k_warn(ab, "failed to handle dma buf release event %d\n", ret);
3993 return;
3994 }
3995 }
3996
ath12k_wmi_hw_mode_caps_parse(struct ath12k_base * soc,u16 tag,u16 len,const void * ptr,void * data)3997 static int ath12k_wmi_hw_mode_caps_parse(struct ath12k_base *soc,
3998 u16 tag, u16 len,
3999 const void *ptr, void *data)
4000 {
4001 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4002 struct ath12k_wmi_hw_mode_cap_params *hw_mode_cap;
4003 u32 phy_map = 0;
4004
4005 if (tag != WMI_TAG_HW_MODE_CAPABILITIES)
4006 return -EPROTO;
4007
4008 if (svc_rdy_ext->n_hw_mode_caps >= svc_rdy_ext->arg.num_hw_modes)
4009 return -ENOBUFS;
4010
4011 hw_mode_cap = container_of(ptr, struct ath12k_wmi_hw_mode_cap_params,
4012 hw_mode_id);
4013 svc_rdy_ext->n_hw_mode_caps++;
4014
4015 phy_map = le32_to_cpu(hw_mode_cap->phy_id_map);
4016 svc_rdy_ext->tot_phy_id += fls(phy_map);
4017
4018 return 0;
4019 }
4020
ath12k_wmi_hw_mode_caps(struct ath12k_base * soc,u16 len,const void * ptr,void * data)4021 static int ath12k_wmi_hw_mode_caps(struct ath12k_base *soc,
4022 u16 len, const void *ptr, void *data)
4023 {
4024 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4025 const struct ath12k_wmi_hw_mode_cap_params *hw_mode_caps;
4026 enum wmi_host_hw_mode_config_type mode, pref;
4027 u32 i;
4028 int ret;
4029
4030 svc_rdy_ext->n_hw_mode_caps = 0;
4031 svc_rdy_ext->hw_mode_caps = ptr;
4032
4033 ret = ath12k_wmi_tlv_iter(soc, ptr, len,
4034 ath12k_wmi_hw_mode_caps_parse,
4035 svc_rdy_ext);
4036 if (ret) {
4037 ath12k_warn(soc, "failed to parse tlv %d\n", ret);
4038 return ret;
4039 }
4040
4041 for (i = 0 ; i < svc_rdy_ext->n_hw_mode_caps; i++) {
4042 hw_mode_caps = &svc_rdy_ext->hw_mode_caps[i];
4043 mode = le32_to_cpu(hw_mode_caps->hw_mode_id);
4044
4045 if (mode >= WMI_HOST_HW_MODE_MAX)
4046 continue;
4047
4048 pref = soc->wmi_ab.preferred_hw_mode;
4049
4050 if (ath12k_hw_mode_pri_map[mode] < ath12k_hw_mode_pri_map[pref]) {
4051 svc_rdy_ext->pref_hw_mode_caps = *hw_mode_caps;
4052 soc->wmi_ab.preferred_hw_mode = mode;
4053 }
4054 }
4055
4056 ath12k_dbg(soc, ATH12K_DBG_WMI, "preferred_hw_mode:%d\n",
4057 soc->wmi_ab.preferred_hw_mode);
4058 if (soc->wmi_ab.preferred_hw_mode == WMI_HOST_HW_MODE_MAX)
4059 return -EINVAL;
4060
4061 return 0;
4062 }
4063
ath12k_wmi_mac_phy_caps_parse(struct ath12k_base * soc,u16 tag,u16 len,const void * ptr,void * data)4064 static int ath12k_wmi_mac_phy_caps_parse(struct ath12k_base *soc,
4065 u16 tag, u16 len,
4066 const void *ptr, void *data)
4067 {
4068 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4069
4070 if (tag != WMI_TAG_MAC_PHY_CAPABILITIES)
4071 return -EPROTO;
4072
4073 if (svc_rdy_ext->n_mac_phy_caps >= svc_rdy_ext->tot_phy_id)
4074 return -ENOBUFS;
4075
4076 len = min_t(u16, len, sizeof(struct ath12k_wmi_mac_phy_caps_params));
4077 if (!svc_rdy_ext->n_mac_phy_caps) {
4078 svc_rdy_ext->mac_phy_caps = kzalloc((svc_rdy_ext->tot_phy_id) * len,
4079 GFP_ATOMIC);
4080 if (!svc_rdy_ext->mac_phy_caps)
4081 return -ENOMEM;
4082 }
4083
4084 memcpy(svc_rdy_ext->mac_phy_caps + svc_rdy_ext->n_mac_phy_caps, ptr, len);
4085 svc_rdy_ext->n_mac_phy_caps++;
4086 return 0;
4087 }
4088
ath12k_wmi_ext_hal_reg_caps_parse(struct ath12k_base * soc,u16 tag,u16 len,const void * ptr,void * data)4089 static int ath12k_wmi_ext_hal_reg_caps_parse(struct ath12k_base *soc,
4090 u16 tag, u16 len,
4091 const void *ptr, void *data)
4092 {
4093 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4094
4095 if (tag != WMI_TAG_HAL_REG_CAPABILITIES_EXT)
4096 return -EPROTO;
4097
4098 if (svc_rdy_ext->n_ext_hal_reg_caps >= svc_rdy_ext->arg.num_phy)
4099 return -ENOBUFS;
4100
4101 svc_rdy_ext->n_ext_hal_reg_caps++;
4102 return 0;
4103 }
4104
ath12k_wmi_ext_hal_reg_caps(struct ath12k_base * soc,u16 len,const void * ptr,void * data)4105 static int ath12k_wmi_ext_hal_reg_caps(struct ath12k_base *soc,
4106 u16 len, const void *ptr, void *data)
4107 {
4108 struct ath12k_wmi_pdev *wmi_handle = &soc->wmi_ab.wmi[0];
4109 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4110 struct ath12k_wmi_hal_reg_capabilities_ext_arg reg_cap;
4111 int ret;
4112 u32 i;
4113
4114 svc_rdy_ext->n_ext_hal_reg_caps = 0;
4115 svc_rdy_ext->ext_hal_reg_caps = ptr;
4116 ret = ath12k_wmi_tlv_iter(soc, ptr, len,
4117 ath12k_wmi_ext_hal_reg_caps_parse,
4118 svc_rdy_ext);
4119 if (ret) {
4120 ath12k_warn(soc, "failed to parse tlv %d\n", ret);
4121 return ret;
4122 }
4123
4124 for (i = 0; i < svc_rdy_ext->arg.num_phy; i++) {
4125 ret = ath12k_pull_reg_cap_svc_rdy_ext(wmi_handle,
4126 svc_rdy_ext->soc_hal_reg_caps,
4127 svc_rdy_ext->ext_hal_reg_caps, i,
4128 ®_cap);
4129 if (ret) {
4130 ath12k_warn(soc, "failed to extract reg cap %d\n", i);
4131 return ret;
4132 }
4133
4134 if (reg_cap.phy_id >= MAX_RADIOS) {
4135 ath12k_warn(soc, "unexpected phy id %u\n", reg_cap.phy_id);
4136 return -EINVAL;
4137 }
4138
4139 soc->hal_reg_cap[reg_cap.phy_id] = reg_cap;
4140 }
4141 return 0;
4142 }
4143
ath12k_wmi_ext_soc_hal_reg_caps_parse(struct ath12k_base * soc,u16 len,const void * ptr,void * data)4144 static int ath12k_wmi_ext_soc_hal_reg_caps_parse(struct ath12k_base *soc,
4145 u16 len, const void *ptr,
4146 void *data)
4147 {
4148 struct ath12k_wmi_pdev *wmi_handle = &soc->wmi_ab.wmi[0];
4149 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4150 u8 hw_mode_id = le32_to_cpu(svc_rdy_ext->pref_hw_mode_caps.hw_mode_id);
4151 u32 phy_id_map;
4152 int pdev_index = 0;
4153 int ret;
4154
4155 svc_rdy_ext->soc_hal_reg_caps = ptr;
4156 svc_rdy_ext->arg.num_phy = le32_to_cpu(svc_rdy_ext->soc_hal_reg_caps->num_phy);
4157
4158 soc->num_radios = 0;
4159 phy_id_map = le32_to_cpu(svc_rdy_ext->pref_hw_mode_caps.phy_id_map);
4160 soc->fw_pdev_count = 0;
4161
4162 while (phy_id_map && soc->num_radios < MAX_RADIOS) {
4163 ret = ath12k_pull_mac_phy_cap_svc_ready_ext(wmi_handle,
4164 svc_rdy_ext,
4165 hw_mode_id, soc->num_radios,
4166 &soc->pdevs[pdev_index]);
4167 if (ret) {
4168 ath12k_warn(soc, "failed to extract mac caps, idx :%d\n",
4169 soc->num_radios);
4170 return ret;
4171 }
4172
4173 soc->num_radios++;
4174
4175 /* For single_pdev_only targets,
4176 * save mac_phy capability in the same pdev
4177 */
4178 if (soc->hw_params->single_pdev_only)
4179 pdev_index = 0;
4180 else
4181 pdev_index = soc->num_radios;
4182
4183 /* TODO: mac_phy_cap prints */
4184 phy_id_map >>= 1;
4185 }
4186
4187 if (soc->hw_params->single_pdev_only) {
4188 soc->num_radios = 1;
4189 soc->pdevs[0].pdev_id = 0;
4190 }
4191
4192 return 0;
4193 }
4194
ath12k_wmi_dma_ring_caps_parse(struct ath12k_base * soc,u16 tag,u16 len,const void * ptr,void * data)4195 static int ath12k_wmi_dma_ring_caps_parse(struct ath12k_base *soc,
4196 u16 tag, u16 len,
4197 const void *ptr, void *data)
4198 {
4199 struct ath12k_wmi_dma_ring_caps_parse *parse = data;
4200
4201 if (tag != WMI_TAG_DMA_RING_CAPABILITIES)
4202 return -EPROTO;
4203
4204 parse->n_dma_ring_caps++;
4205 return 0;
4206 }
4207
ath12k_wmi_alloc_dbring_caps(struct ath12k_base * ab,u32 num_cap)4208 static int ath12k_wmi_alloc_dbring_caps(struct ath12k_base *ab,
4209 u32 num_cap)
4210 {
4211 size_t sz;
4212 void *ptr;
4213
4214 sz = num_cap * sizeof(struct ath12k_dbring_cap);
4215 ptr = kzalloc(sz, GFP_ATOMIC);
4216 if (!ptr)
4217 return -ENOMEM;
4218
4219 ab->db_caps = ptr;
4220 ab->num_db_cap = num_cap;
4221
4222 return 0;
4223 }
4224
ath12k_wmi_free_dbring_caps(struct ath12k_base * ab)4225 static void ath12k_wmi_free_dbring_caps(struct ath12k_base *ab)
4226 {
4227 kfree(ab->db_caps);
4228 ab->db_caps = NULL;
4229 ab->num_db_cap = 0;
4230 }
4231
ath12k_wmi_dma_ring_caps(struct ath12k_base * ab,u16 len,const void * ptr,void * data)4232 static int ath12k_wmi_dma_ring_caps(struct ath12k_base *ab,
4233 u16 len, const void *ptr, void *data)
4234 {
4235 struct ath12k_wmi_dma_ring_caps_parse *dma_caps_parse = data;
4236 struct ath12k_wmi_dma_ring_caps_params *dma_caps;
4237 struct ath12k_dbring_cap *dir_buff_caps;
4238 int ret;
4239 u32 i;
4240
4241 dma_caps_parse->n_dma_ring_caps = 0;
4242 dma_caps = (struct ath12k_wmi_dma_ring_caps_params *)ptr;
4243 ret = ath12k_wmi_tlv_iter(ab, ptr, len,
4244 ath12k_wmi_dma_ring_caps_parse,
4245 dma_caps_parse);
4246 if (ret) {
4247 ath12k_warn(ab, "failed to parse dma ring caps tlv %d\n", ret);
4248 return ret;
4249 }
4250
4251 if (!dma_caps_parse->n_dma_ring_caps)
4252 return 0;
4253
4254 if (ab->num_db_cap) {
4255 ath12k_warn(ab, "Already processed, so ignoring dma ring caps\n");
4256 return 0;
4257 }
4258
4259 ret = ath12k_wmi_alloc_dbring_caps(ab, dma_caps_parse->n_dma_ring_caps);
4260 if (ret)
4261 return ret;
4262
4263 dir_buff_caps = ab->db_caps;
4264 for (i = 0; i < dma_caps_parse->n_dma_ring_caps; i++) {
4265 if (le32_to_cpu(dma_caps[i].module_id) >= WMI_DIRECT_BUF_MAX) {
4266 ath12k_warn(ab, "Invalid module id %d\n",
4267 le32_to_cpu(dma_caps[i].module_id));
4268 ret = -EINVAL;
4269 goto free_dir_buff;
4270 }
4271
4272 dir_buff_caps[i].id = le32_to_cpu(dma_caps[i].module_id);
4273 dir_buff_caps[i].pdev_id =
4274 DP_HW2SW_MACID(le32_to_cpu(dma_caps[i].pdev_id));
4275 dir_buff_caps[i].min_elem = le32_to_cpu(dma_caps[i].min_elem);
4276 dir_buff_caps[i].min_buf_sz = le32_to_cpu(dma_caps[i].min_buf_sz);
4277 dir_buff_caps[i].min_buf_align = le32_to_cpu(dma_caps[i].min_buf_align);
4278 }
4279
4280 return 0;
4281
4282 free_dir_buff:
4283 ath12k_wmi_free_dbring_caps(ab);
4284 return ret;
4285 }
4286
ath12k_wmi_svc_rdy_ext_parse(struct ath12k_base * ab,u16 tag,u16 len,const void * ptr,void * data)4287 static int ath12k_wmi_svc_rdy_ext_parse(struct ath12k_base *ab,
4288 u16 tag, u16 len,
4289 const void *ptr, void *data)
4290 {
4291 struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0];
4292 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4293 int ret;
4294
4295 switch (tag) {
4296 case WMI_TAG_SERVICE_READY_EXT_EVENT:
4297 ret = ath12k_pull_svc_ready_ext(wmi_handle, ptr,
4298 &svc_rdy_ext->arg);
4299 if (ret) {
4300 ath12k_warn(ab, "unable to extract ext params\n");
4301 return ret;
4302 }
4303 break;
4304
4305 case WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS:
4306 svc_rdy_ext->hw_caps = ptr;
4307 svc_rdy_ext->arg.num_hw_modes =
4308 le32_to_cpu(svc_rdy_ext->hw_caps->num_hw_modes);
4309 break;
4310
4311 case WMI_TAG_SOC_HAL_REG_CAPABILITIES:
4312 ret = ath12k_wmi_ext_soc_hal_reg_caps_parse(ab, len, ptr,
4313 svc_rdy_ext);
4314 if (ret)
4315 return ret;
4316 break;
4317
4318 case WMI_TAG_ARRAY_STRUCT:
4319 if (!svc_rdy_ext->hw_mode_done) {
4320 ret = ath12k_wmi_hw_mode_caps(ab, len, ptr, svc_rdy_ext);
4321 if (ret)
4322 return ret;
4323
4324 svc_rdy_ext->hw_mode_done = true;
4325 } else if (!svc_rdy_ext->mac_phy_done) {
4326 svc_rdy_ext->n_mac_phy_caps = 0;
4327 ret = ath12k_wmi_tlv_iter(ab, ptr, len,
4328 ath12k_wmi_mac_phy_caps_parse,
4329 svc_rdy_ext);
4330 if (ret) {
4331 ath12k_warn(ab, "failed to parse tlv %d\n", ret);
4332 return ret;
4333 }
4334
4335 svc_rdy_ext->mac_phy_done = true;
4336 } else if (!svc_rdy_ext->ext_hal_reg_done) {
4337 ret = ath12k_wmi_ext_hal_reg_caps(ab, len, ptr, svc_rdy_ext);
4338 if (ret)
4339 return ret;
4340
4341 svc_rdy_ext->ext_hal_reg_done = true;
4342 } else if (!svc_rdy_ext->mac_phy_chainmask_combo_done) {
4343 svc_rdy_ext->mac_phy_chainmask_combo_done = true;
4344 } else if (!svc_rdy_ext->mac_phy_chainmask_cap_done) {
4345 svc_rdy_ext->mac_phy_chainmask_cap_done = true;
4346 } else if (!svc_rdy_ext->oem_dma_ring_cap_done) {
4347 svc_rdy_ext->oem_dma_ring_cap_done = true;
4348 } else if (!svc_rdy_ext->dma_ring_cap_done) {
4349 ret = ath12k_wmi_dma_ring_caps(ab, len, ptr,
4350 &svc_rdy_ext->dma_caps_parse);
4351 if (ret)
4352 return ret;
4353
4354 svc_rdy_ext->dma_ring_cap_done = true;
4355 }
4356 break;
4357
4358 default:
4359 break;
4360 }
4361 return 0;
4362 }
4363
ath12k_service_ready_ext_event(struct ath12k_base * ab,struct sk_buff * skb)4364 static int ath12k_service_ready_ext_event(struct ath12k_base *ab,
4365 struct sk_buff *skb)
4366 {
4367 struct ath12k_wmi_svc_rdy_ext_parse svc_rdy_ext = { };
4368 int ret;
4369
4370 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
4371 ath12k_wmi_svc_rdy_ext_parse,
4372 &svc_rdy_ext);
4373 if (ret) {
4374 ath12k_warn(ab, "failed to parse tlv %d\n", ret);
4375 goto err;
4376 }
4377
4378 if (!test_bit(WMI_TLV_SERVICE_EXT2_MSG, ab->wmi_ab.svc_map))
4379 complete(&ab->wmi_ab.service_ready);
4380
4381 kfree(svc_rdy_ext.mac_phy_caps);
4382 return 0;
4383
4384 err:
4385 kfree(svc_rdy_ext.mac_phy_caps);
4386 ath12k_wmi_free_dbring_caps(ab);
4387 return ret;
4388 }
4389
ath12k_pull_svc_ready_ext2(struct ath12k_wmi_pdev * wmi_handle,const void * ptr,struct ath12k_wmi_svc_rdy_ext2_arg * arg)4390 static int ath12k_pull_svc_ready_ext2(struct ath12k_wmi_pdev *wmi_handle,
4391 const void *ptr,
4392 struct ath12k_wmi_svc_rdy_ext2_arg *arg)
4393 {
4394 const struct wmi_service_ready_ext2_event *ev = ptr;
4395
4396 if (!ev)
4397 return -EINVAL;
4398
4399 arg->reg_db_version = le32_to_cpu(ev->reg_db_version);
4400 arg->hw_min_max_tx_power_2ghz = le32_to_cpu(ev->hw_min_max_tx_power_2ghz);
4401 arg->hw_min_max_tx_power_5ghz = le32_to_cpu(ev->hw_min_max_tx_power_5ghz);
4402 arg->chwidth_num_peer_caps = le32_to_cpu(ev->chwidth_num_peer_caps);
4403 arg->preamble_puncture_bw = le32_to_cpu(ev->preamble_puncture_bw);
4404 arg->max_user_per_ppdu_ofdma = le32_to_cpu(ev->max_user_per_ppdu_ofdma);
4405 arg->max_user_per_ppdu_mumimo = le32_to_cpu(ev->max_user_per_ppdu_mumimo);
4406 arg->target_cap_flags = le32_to_cpu(ev->target_cap_flags);
4407 return 0;
4408 }
4409
ath12k_wmi_eht_caps_parse(struct ath12k_pdev * pdev,u32 band,const __le32 cap_mac_info[],const __le32 cap_phy_info[],const __le32 supp_mcs[],const struct ath12k_wmi_ppe_threshold_params * ppet,__le32 cap_info_internal)4410 static void ath12k_wmi_eht_caps_parse(struct ath12k_pdev *pdev, u32 band,
4411 const __le32 cap_mac_info[],
4412 const __le32 cap_phy_info[],
4413 const __le32 supp_mcs[],
4414 const struct ath12k_wmi_ppe_threshold_params *ppet,
4415 __le32 cap_info_internal)
4416 {
4417 struct ath12k_band_cap *cap_band = &pdev->cap.band[band];
4418 u32 support_320mhz;
4419 u8 i;
4420
4421 if (band == NL80211_BAND_6GHZ)
4422 support_320mhz = cap_band->eht_cap_phy_info[0] &
4423 IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ;
4424
4425 for (i = 0; i < WMI_MAX_EHTCAP_MAC_SIZE; i++)
4426 cap_band->eht_cap_mac_info[i] = le32_to_cpu(cap_mac_info[i]);
4427
4428 for (i = 0; i < WMI_MAX_EHTCAP_PHY_SIZE; i++)
4429 cap_band->eht_cap_phy_info[i] = le32_to_cpu(cap_phy_info[i]);
4430
4431 if (band == NL80211_BAND_6GHZ)
4432 cap_band->eht_cap_phy_info[0] |= support_320mhz;
4433
4434 cap_band->eht_mcs_20_only = le32_to_cpu(supp_mcs[0]);
4435 cap_band->eht_mcs_80 = le32_to_cpu(supp_mcs[1]);
4436 if (band != NL80211_BAND_2GHZ) {
4437 cap_band->eht_mcs_160 = le32_to_cpu(supp_mcs[2]);
4438 cap_band->eht_mcs_320 = le32_to_cpu(supp_mcs[3]);
4439 }
4440
4441 cap_band->eht_ppet.numss_m1 = le32_to_cpu(ppet->numss_m1);
4442 cap_band->eht_ppet.ru_bit_mask = le32_to_cpu(ppet->ru_info);
4443 for (i = 0; i < WMI_MAX_NUM_SS; i++)
4444 cap_band->eht_ppet.ppet16_ppet8_ru3_ru0[i] =
4445 le32_to_cpu(ppet->ppet16_ppet8_ru3_ru0[i]);
4446
4447 cap_band->eht_cap_info_internal = le32_to_cpu(cap_info_internal);
4448 }
4449
4450 static int
ath12k_wmi_tlv_mac_phy_caps_ext_parse(struct ath12k_base * ab,const struct ath12k_wmi_caps_ext_params * caps,struct ath12k_pdev * pdev)4451 ath12k_wmi_tlv_mac_phy_caps_ext_parse(struct ath12k_base *ab,
4452 const struct ath12k_wmi_caps_ext_params *caps,
4453 struct ath12k_pdev *pdev)
4454 {
4455 struct ath12k_band_cap *cap_band;
4456 u32 bands, support_320mhz;
4457 int i;
4458
4459 if (ab->hw_params->single_pdev_only) {
4460 if (caps->hw_mode_id == WMI_HOST_HW_MODE_SINGLE) {
4461 support_320mhz = le32_to_cpu(caps->eht_cap_phy_info_5ghz[0]) &
4462 IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ;
4463 cap_band = &pdev->cap.band[NL80211_BAND_6GHZ];
4464 cap_band->eht_cap_phy_info[0] |= support_320mhz;
4465 return 0;
4466 }
4467
4468 for (i = 0; i < ab->fw_pdev_count; i++) {
4469 struct ath12k_fw_pdev *fw_pdev = &ab->fw_pdev[i];
4470
4471 if (fw_pdev->pdev_id == ath12k_wmi_caps_ext_get_pdev_id(caps) &&
4472 fw_pdev->phy_id == le32_to_cpu(caps->phy_id)) {
4473 bands = fw_pdev->supported_bands;
4474 break;
4475 }
4476 }
4477
4478 if (i == ab->fw_pdev_count)
4479 return -EINVAL;
4480 } else {
4481 bands = pdev->cap.supported_bands;
4482 }
4483
4484 if (bands & WMI_HOST_WLAN_2G_CAP) {
4485 ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_2GHZ,
4486 caps->eht_cap_mac_info_2ghz,
4487 caps->eht_cap_phy_info_2ghz,
4488 caps->eht_supp_mcs_ext_2ghz,
4489 &caps->eht_ppet_2ghz,
4490 caps->eht_cap_info_internal);
4491 }
4492
4493 if (bands & WMI_HOST_WLAN_5G_CAP) {
4494 ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_5GHZ,
4495 caps->eht_cap_mac_info_5ghz,
4496 caps->eht_cap_phy_info_5ghz,
4497 caps->eht_supp_mcs_ext_5ghz,
4498 &caps->eht_ppet_5ghz,
4499 caps->eht_cap_info_internal);
4500
4501 ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_6GHZ,
4502 caps->eht_cap_mac_info_5ghz,
4503 caps->eht_cap_phy_info_5ghz,
4504 caps->eht_supp_mcs_ext_5ghz,
4505 &caps->eht_ppet_5ghz,
4506 caps->eht_cap_info_internal);
4507 }
4508
4509 return 0;
4510 }
4511
ath12k_wmi_tlv_mac_phy_caps_ext(struct ath12k_base * ab,u16 tag,u16 len,const void * ptr,void * data)4512 static int ath12k_wmi_tlv_mac_phy_caps_ext(struct ath12k_base *ab, u16 tag,
4513 u16 len, const void *ptr,
4514 void *data)
4515 {
4516 const struct ath12k_wmi_caps_ext_params *caps = ptr;
4517 int i = 0, ret;
4518
4519 if (tag != WMI_TAG_MAC_PHY_CAPABILITIES_EXT)
4520 return -EPROTO;
4521
4522 if (ab->hw_params->single_pdev_only) {
4523 if (ab->wmi_ab.preferred_hw_mode != le32_to_cpu(caps->hw_mode_id) &&
4524 caps->hw_mode_id != WMI_HOST_HW_MODE_SINGLE)
4525 return 0;
4526 } else {
4527 for (i = 0; i < ab->num_radios; i++) {
4528 if (ab->pdevs[i].pdev_id ==
4529 ath12k_wmi_caps_ext_get_pdev_id(caps))
4530 break;
4531 }
4532
4533 if (i == ab->num_radios)
4534 return -EINVAL;
4535 }
4536
4537 ret = ath12k_wmi_tlv_mac_phy_caps_ext_parse(ab, caps, &ab->pdevs[i]);
4538 if (ret) {
4539 ath12k_warn(ab,
4540 "failed to parse extended MAC PHY capabilities for pdev %d: %d\n",
4541 ret, ab->pdevs[i].pdev_id);
4542 return ret;
4543 }
4544
4545 return 0;
4546 }
4547
ath12k_wmi_svc_rdy_ext2_parse(struct ath12k_base * ab,u16 tag,u16 len,const void * ptr,void * data)4548 static int ath12k_wmi_svc_rdy_ext2_parse(struct ath12k_base *ab,
4549 u16 tag, u16 len,
4550 const void *ptr, void *data)
4551 {
4552 struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0];
4553 struct ath12k_wmi_svc_rdy_ext2_parse *parse = data;
4554 int ret;
4555
4556 switch (tag) {
4557 case WMI_TAG_SERVICE_READY_EXT2_EVENT:
4558 ret = ath12k_pull_svc_ready_ext2(wmi_handle, ptr,
4559 &parse->arg);
4560 if (ret) {
4561 ath12k_warn(ab,
4562 "failed to extract wmi service ready ext2 parameters: %d\n",
4563 ret);
4564 return ret;
4565 }
4566 break;
4567
4568 case WMI_TAG_ARRAY_STRUCT:
4569 if (!parse->dma_ring_cap_done) {
4570 ret = ath12k_wmi_dma_ring_caps(ab, len, ptr,
4571 &parse->dma_caps_parse);
4572 if (ret)
4573 return ret;
4574
4575 parse->dma_ring_cap_done = true;
4576 } else if (!parse->spectral_bin_scaling_done) {
4577 /* TODO: This is a place-holder as WMI tag for
4578 * spectral scaling is before
4579 * WMI_TAG_MAC_PHY_CAPABILITIES_EXT
4580 */
4581 parse->spectral_bin_scaling_done = true;
4582 } else if (!parse->mac_phy_caps_ext_done) {
4583 ret = ath12k_wmi_tlv_iter(ab, ptr, len,
4584 ath12k_wmi_tlv_mac_phy_caps_ext,
4585 parse);
4586 if (ret) {
4587 ath12k_warn(ab, "failed to parse extended MAC PHY capabilities WMI TLV: %d\n",
4588 ret);
4589 return ret;
4590 }
4591
4592 parse->mac_phy_caps_ext_done = true;
4593 }
4594 break;
4595 default:
4596 break;
4597 }
4598
4599 return 0;
4600 }
4601
ath12k_service_ready_ext2_event(struct ath12k_base * ab,struct sk_buff * skb)4602 static int ath12k_service_ready_ext2_event(struct ath12k_base *ab,
4603 struct sk_buff *skb)
4604 {
4605 struct ath12k_wmi_svc_rdy_ext2_parse svc_rdy_ext2 = { };
4606 int ret;
4607
4608 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
4609 ath12k_wmi_svc_rdy_ext2_parse,
4610 &svc_rdy_ext2);
4611 if (ret) {
4612 ath12k_warn(ab, "failed to parse ext2 event tlv %d\n", ret);
4613 goto err;
4614 }
4615
4616 complete(&ab->wmi_ab.service_ready);
4617
4618 return 0;
4619
4620 err:
4621 ath12k_wmi_free_dbring_caps(ab);
4622 return ret;
4623 }
4624
ath12k_pull_vdev_start_resp_tlv(struct ath12k_base * ab,struct sk_buff * skb,struct wmi_vdev_start_resp_event * vdev_rsp)4625 static int ath12k_pull_vdev_start_resp_tlv(struct ath12k_base *ab, struct sk_buff *skb,
4626 struct wmi_vdev_start_resp_event *vdev_rsp)
4627 {
4628 const void **tb;
4629 const struct wmi_vdev_start_resp_event *ev;
4630 int ret;
4631
4632 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
4633 if (IS_ERR(tb)) {
4634 ret = PTR_ERR(tb);
4635 ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
4636 return ret;
4637 }
4638
4639 ev = tb[WMI_TAG_VDEV_START_RESPONSE_EVENT];
4640 if (!ev) {
4641 ath12k_warn(ab, "failed to fetch vdev start resp ev");
4642 kfree(tb);
4643 return -EPROTO;
4644 }
4645
4646 *vdev_rsp = *ev;
4647
4648 kfree(tb);
4649 return 0;
4650 }
4651
4652 static struct ath12k_reg_rule
create_ext_reg_rules_from_wmi(u32 num_reg_rules,struct ath12k_wmi_reg_rule_ext_params * wmi_reg_rule)4653 *create_ext_reg_rules_from_wmi(u32 num_reg_rules,
4654 struct ath12k_wmi_reg_rule_ext_params *wmi_reg_rule)
4655 {
4656 struct ath12k_reg_rule *reg_rule_ptr;
4657 u32 count;
4658
4659 reg_rule_ptr = kzalloc((num_reg_rules * sizeof(*reg_rule_ptr)),
4660 GFP_ATOMIC);
4661
4662 if (!reg_rule_ptr)
4663 return NULL;
4664
4665 for (count = 0; count < num_reg_rules; count++) {
4666 reg_rule_ptr[count].start_freq =
4667 le32_get_bits(wmi_reg_rule[count].freq_info,
4668 REG_RULE_START_FREQ);
4669 reg_rule_ptr[count].end_freq =
4670 le32_get_bits(wmi_reg_rule[count].freq_info,
4671 REG_RULE_END_FREQ);
4672 reg_rule_ptr[count].max_bw =
4673 le32_get_bits(wmi_reg_rule[count].bw_pwr_info,
4674 REG_RULE_MAX_BW);
4675 reg_rule_ptr[count].reg_power =
4676 le32_get_bits(wmi_reg_rule[count].bw_pwr_info,
4677 REG_RULE_REG_PWR);
4678 reg_rule_ptr[count].ant_gain =
4679 le32_get_bits(wmi_reg_rule[count].bw_pwr_info,
4680 REG_RULE_ANT_GAIN);
4681 reg_rule_ptr[count].flags =
4682 le32_get_bits(wmi_reg_rule[count].flag_info,
4683 REG_RULE_FLAGS);
4684 reg_rule_ptr[count].psd_flag =
4685 le32_get_bits(wmi_reg_rule[count].psd_power_info,
4686 REG_RULE_PSD_INFO);
4687 reg_rule_ptr[count].psd_eirp =
4688 le32_get_bits(wmi_reg_rule[count].psd_power_info,
4689 REG_RULE_PSD_EIRP);
4690 }
4691
4692 return reg_rule_ptr;
4693 }
4694
ath12k_wmi_ignore_num_extra_rules(struct ath12k_wmi_reg_rule_ext_params * rule,u32 num_reg_rules)4695 static u8 ath12k_wmi_ignore_num_extra_rules(struct ath12k_wmi_reg_rule_ext_params *rule,
4696 u32 num_reg_rules)
4697 {
4698 u8 num_invalid_5ghz_rules = 0;
4699 u32 count, start_freq;
4700
4701 for (count = 0; count < num_reg_rules; count++) {
4702 start_freq = le32_get_bits(rule[count].freq_info, REG_RULE_START_FREQ);
4703
4704 if (start_freq >= ATH12K_MIN_6G_FREQ)
4705 num_invalid_5ghz_rules++;
4706 }
4707
4708 return num_invalid_5ghz_rules;
4709 }
4710
ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base * ab,struct sk_buff * skb,struct ath12k_reg_info * reg_info)4711 static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
4712 struct sk_buff *skb,
4713 struct ath12k_reg_info *reg_info)
4714 {
4715 const void **tb;
4716 const struct wmi_reg_chan_list_cc_ext_event *ev;
4717 struct ath12k_wmi_reg_rule_ext_params *ext_wmi_reg_rule;
4718 u32 num_2g_reg_rules, num_5g_reg_rules;
4719 u32 num_6g_reg_rules_ap[WMI_REG_CURRENT_MAX_AP_TYPE];
4720 u32 num_6g_reg_rules_cl[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE];
4721 u8 num_invalid_5ghz_ext_rules;
4722 u32 total_reg_rules = 0;
4723 int ret, i, j;
4724
4725 ath12k_dbg(ab, ATH12K_DBG_WMI, "processing regulatory ext channel list\n");
4726
4727 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
4728 if (IS_ERR(tb)) {
4729 ret = PTR_ERR(tb);
4730 ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
4731 return ret;
4732 }
4733
4734 ev = tb[WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT];
4735 if (!ev) {
4736 ath12k_warn(ab, "failed to fetch reg chan list ext update ev\n");
4737 kfree(tb);
4738 return -EPROTO;
4739 }
4740
4741 reg_info->num_2g_reg_rules = le32_to_cpu(ev->num_2g_reg_rules);
4742 reg_info->num_5g_reg_rules = le32_to_cpu(ev->num_5g_reg_rules);
4743 reg_info->num_6g_reg_rules_ap[WMI_REG_INDOOR_AP] =
4744 le32_to_cpu(ev->num_6g_reg_rules_ap_lpi);
4745 reg_info->num_6g_reg_rules_ap[WMI_REG_STD_POWER_AP] =
4746 le32_to_cpu(ev->num_6g_reg_rules_ap_sp);
4747 reg_info->num_6g_reg_rules_ap[WMI_REG_VLP_AP] =
4748 le32_to_cpu(ev->num_6g_reg_rules_ap_vlp);
4749
4750 for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
4751 reg_info->num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] =
4752 le32_to_cpu(ev->num_6g_reg_rules_cl_lpi[i]);
4753 reg_info->num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] =
4754 le32_to_cpu(ev->num_6g_reg_rules_cl_sp[i]);
4755 reg_info->num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] =
4756 le32_to_cpu(ev->num_6g_reg_rules_cl_vlp[i]);
4757 }
4758
4759 num_2g_reg_rules = reg_info->num_2g_reg_rules;
4760 total_reg_rules += num_2g_reg_rules;
4761 num_5g_reg_rules = reg_info->num_5g_reg_rules;
4762 total_reg_rules += num_5g_reg_rules;
4763
4764 if (num_2g_reg_rules > MAX_REG_RULES || num_5g_reg_rules > MAX_REG_RULES) {
4765 ath12k_warn(ab, "Num reg rules for 2G/5G exceeds max limit (num_2g_reg_rules: %d num_5g_reg_rules: %d max_rules: %d)\n",
4766 num_2g_reg_rules, num_5g_reg_rules, MAX_REG_RULES);
4767 kfree(tb);
4768 return -EINVAL;
4769 }
4770
4771 for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
4772 num_6g_reg_rules_ap[i] = reg_info->num_6g_reg_rules_ap[i];
4773
4774 if (num_6g_reg_rules_ap[i] > MAX_6G_REG_RULES) {
4775 ath12k_warn(ab, "Num 6G reg rules for AP mode(%d) exceeds max limit (num_6g_reg_rules_ap: %d, max_rules: %d)\n",
4776 i, num_6g_reg_rules_ap[i], MAX_6G_REG_RULES);
4777 kfree(tb);
4778 return -EINVAL;
4779 }
4780
4781 total_reg_rules += num_6g_reg_rules_ap[i];
4782 }
4783
4784 for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
4785 num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] =
4786 reg_info->num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i];
4787 total_reg_rules += num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i];
4788
4789 num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] =
4790 reg_info->num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i];
4791 total_reg_rules += num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i];
4792
4793 num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] =
4794 reg_info->num_6g_reg_rules_cl[WMI_REG_VLP_AP][i];
4795 total_reg_rules += num_6g_reg_rules_cl[WMI_REG_VLP_AP][i];
4796
4797 if (num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] > MAX_6G_REG_RULES ||
4798 num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] > MAX_6G_REG_RULES ||
4799 num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] > MAX_6G_REG_RULES) {
4800 ath12k_warn(ab, "Num 6g client reg rules exceeds max limit, for client(type: %d)\n",
4801 i);
4802 kfree(tb);
4803 return -EINVAL;
4804 }
4805 }
4806
4807 if (!total_reg_rules) {
4808 ath12k_warn(ab, "No reg rules available\n");
4809 kfree(tb);
4810 return -EINVAL;
4811 }
4812
4813 memcpy(reg_info->alpha2, &ev->alpha2, REG_ALPHA2_LEN);
4814
4815 reg_info->dfs_region = le32_to_cpu(ev->dfs_region);
4816 reg_info->phybitmap = le32_to_cpu(ev->phybitmap);
4817 reg_info->num_phy = le32_to_cpu(ev->num_phy);
4818 reg_info->phy_id = le32_to_cpu(ev->phy_id);
4819 reg_info->ctry_code = le32_to_cpu(ev->country_id);
4820 reg_info->reg_dmn_pair = le32_to_cpu(ev->domain_code);
4821
4822 switch (le32_to_cpu(ev->status_code)) {
4823 case WMI_REG_SET_CC_STATUS_PASS:
4824 reg_info->status_code = REG_SET_CC_STATUS_PASS;
4825 break;
4826 case WMI_REG_CURRENT_ALPHA2_NOT_FOUND:
4827 reg_info->status_code = REG_CURRENT_ALPHA2_NOT_FOUND;
4828 break;
4829 case WMI_REG_INIT_ALPHA2_NOT_FOUND:
4830 reg_info->status_code = REG_INIT_ALPHA2_NOT_FOUND;
4831 break;
4832 case WMI_REG_SET_CC_CHANGE_NOT_ALLOWED:
4833 reg_info->status_code = REG_SET_CC_CHANGE_NOT_ALLOWED;
4834 break;
4835 case WMI_REG_SET_CC_STATUS_NO_MEMORY:
4836 reg_info->status_code = REG_SET_CC_STATUS_NO_MEMORY;
4837 break;
4838 case WMI_REG_SET_CC_STATUS_FAIL:
4839 reg_info->status_code = REG_SET_CC_STATUS_FAIL;
4840 break;
4841 }
4842
4843 reg_info->is_ext_reg_event = true;
4844
4845 reg_info->min_bw_2g = le32_to_cpu(ev->min_bw_2g);
4846 reg_info->max_bw_2g = le32_to_cpu(ev->max_bw_2g);
4847 reg_info->min_bw_5g = le32_to_cpu(ev->min_bw_5g);
4848 reg_info->max_bw_5g = le32_to_cpu(ev->max_bw_5g);
4849 reg_info->min_bw_6g_ap[WMI_REG_INDOOR_AP] = le32_to_cpu(ev->min_bw_6g_ap_lpi);
4850 reg_info->max_bw_6g_ap[WMI_REG_INDOOR_AP] = le32_to_cpu(ev->max_bw_6g_ap_lpi);
4851 reg_info->min_bw_6g_ap[WMI_REG_STD_POWER_AP] = le32_to_cpu(ev->min_bw_6g_ap_sp);
4852 reg_info->max_bw_6g_ap[WMI_REG_STD_POWER_AP] = le32_to_cpu(ev->max_bw_6g_ap_sp);
4853 reg_info->min_bw_6g_ap[WMI_REG_VLP_AP] = le32_to_cpu(ev->min_bw_6g_ap_vlp);
4854 reg_info->max_bw_6g_ap[WMI_REG_VLP_AP] = le32_to_cpu(ev->max_bw_6g_ap_vlp);
4855
4856 for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
4857 reg_info->min_bw_6g_client[WMI_REG_INDOOR_AP][i] =
4858 le32_to_cpu(ev->min_bw_6g_client_lpi[i]);
4859 reg_info->max_bw_6g_client[WMI_REG_INDOOR_AP][i] =
4860 le32_to_cpu(ev->max_bw_6g_client_lpi[i]);
4861 reg_info->min_bw_6g_client[WMI_REG_STD_POWER_AP][i] =
4862 le32_to_cpu(ev->min_bw_6g_client_sp[i]);
4863 reg_info->max_bw_6g_client[WMI_REG_STD_POWER_AP][i] =
4864 le32_to_cpu(ev->max_bw_6g_client_sp[i]);
4865 reg_info->min_bw_6g_client[WMI_REG_VLP_AP][i] =
4866 le32_to_cpu(ev->min_bw_6g_client_vlp[i]);
4867 reg_info->max_bw_6g_client[WMI_REG_VLP_AP][i] =
4868 le32_to_cpu(ev->max_bw_6g_client_vlp[i]);
4869 }
4870
4871 ath12k_dbg(ab, ATH12K_DBG_WMI,
4872 "%s:cc_ext %s dfs %d BW: min_2g %d max_2g %d min_5g %d max_5g %d phy_bitmap 0x%x",
4873 __func__, reg_info->alpha2, reg_info->dfs_region,
4874 reg_info->min_bw_2g, reg_info->max_bw_2g,
4875 reg_info->min_bw_5g, reg_info->max_bw_5g,
4876 reg_info->phybitmap);
4877
4878 ath12k_dbg(ab, ATH12K_DBG_WMI,
4879 "num_2g_reg_rules %d num_5g_reg_rules %d",
4880 num_2g_reg_rules, num_5g_reg_rules);
4881
4882 ath12k_dbg(ab, ATH12K_DBG_WMI,
4883 "num_6g_reg_rules_ap_lpi: %d num_6g_reg_rules_ap_sp: %d num_6g_reg_rules_ap_vlp: %d",
4884 num_6g_reg_rules_ap[WMI_REG_INDOOR_AP],
4885 num_6g_reg_rules_ap[WMI_REG_STD_POWER_AP],
4886 num_6g_reg_rules_ap[WMI_REG_VLP_AP]);
4887
4888 ath12k_dbg(ab, ATH12K_DBG_WMI,
4889 "6g Regular client: num_6g_reg_rules_lpi: %d num_6g_reg_rules_sp: %d num_6g_reg_rules_vlp: %d",
4890 num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][WMI_REG_DEFAULT_CLIENT],
4891 num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][WMI_REG_DEFAULT_CLIENT],
4892 num_6g_reg_rules_cl[WMI_REG_VLP_AP][WMI_REG_DEFAULT_CLIENT]);
4893
4894 ath12k_dbg(ab, ATH12K_DBG_WMI,
4895 "6g Subordinate client: num_6g_reg_rules_lpi: %d num_6g_reg_rules_sp: %d num_6g_reg_rules_vlp: %d",
4896 num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][WMI_REG_SUBORDINATE_CLIENT],
4897 num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][WMI_REG_SUBORDINATE_CLIENT],
4898 num_6g_reg_rules_cl[WMI_REG_VLP_AP][WMI_REG_SUBORDINATE_CLIENT]);
4899
4900 ext_wmi_reg_rule =
4901 (struct ath12k_wmi_reg_rule_ext_params *)((u8 *)ev
4902 + sizeof(*ev)
4903 + sizeof(struct wmi_tlv));
4904
4905 if (num_2g_reg_rules) {
4906 reg_info->reg_rules_2g_ptr =
4907 create_ext_reg_rules_from_wmi(num_2g_reg_rules,
4908 ext_wmi_reg_rule);
4909
4910 if (!reg_info->reg_rules_2g_ptr) {
4911 kfree(tb);
4912 ath12k_warn(ab, "Unable to Allocate memory for 2g rules\n");
4913 return -ENOMEM;
4914 }
4915 }
4916
4917 ext_wmi_reg_rule += num_2g_reg_rules;
4918
4919 /* Firmware might include 6 GHz reg rule in 5 GHz rule list
4920 * for few countries along with separate 6 GHz rule.
4921 * Having same 6 GHz reg rule in 5 GHz and 6 GHz rules list
4922 * causes intersect check to be true, and same rules will be
4923 * shown multiple times in iw cmd.
4924 * Hence, avoid parsing 6 GHz rule from 5 GHz reg rule list
4925 */
4926 num_invalid_5ghz_ext_rules = ath12k_wmi_ignore_num_extra_rules(ext_wmi_reg_rule,
4927 num_5g_reg_rules);
4928
4929 if (num_invalid_5ghz_ext_rules) {
4930 ath12k_dbg(ab, ATH12K_DBG_WMI,
4931 "CC: %s 5 GHz reg rules number %d from fw, %d number of invalid 5 GHz rules",
4932 reg_info->alpha2, reg_info->num_5g_reg_rules,
4933 num_invalid_5ghz_ext_rules);
4934
4935 num_5g_reg_rules = num_5g_reg_rules - num_invalid_5ghz_ext_rules;
4936 reg_info->num_5g_reg_rules = num_5g_reg_rules;
4937 }
4938
4939 if (num_5g_reg_rules) {
4940 reg_info->reg_rules_5g_ptr =
4941 create_ext_reg_rules_from_wmi(num_5g_reg_rules,
4942 ext_wmi_reg_rule);
4943
4944 if (!reg_info->reg_rules_5g_ptr) {
4945 kfree(tb);
4946 ath12k_warn(ab, "Unable to Allocate memory for 5g rules\n");
4947 return -ENOMEM;
4948 }
4949 }
4950
4951 /* We have adjusted the number of 5 GHz reg rules above. But still those
4952 * many rules needs to be adjusted in ext_wmi_reg_rule.
4953 *
4954 * NOTE: num_invalid_5ghz_ext_rules will be 0 for rest other cases.
4955 */
4956 ext_wmi_reg_rule += (num_5g_reg_rules + num_invalid_5ghz_ext_rules);
4957
4958 for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
4959 reg_info->reg_rules_6g_ap_ptr[i] =
4960 create_ext_reg_rules_from_wmi(num_6g_reg_rules_ap[i],
4961 ext_wmi_reg_rule);
4962
4963 if (!reg_info->reg_rules_6g_ap_ptr[i]) {
4964 kfree(tb);
4965 ath12k_warn(ab, "Unable to Allocate memory for 6g ap rules\n");
4966 return -ENOMEM;
4967 }
4968
4969 ext_wmi_reg_rule += num_6g_reg_rules_ap[i];
4970 }
4971
4972 for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++) {
4973 for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
4974 reg_info->reg_rules_6g_client_ptr[j][i] =
4975 create_ext_reg_rules_from_wmi(num_6g_reg_rules_cl[j][i],
4976 ext_wmi_reg_rule);
4977
4978 if (!reg_info->reg_rules_6g_client_ptr[j][i]) {
4979 kfree(tb);
4980 ath12k_warn(ab, "Unable to Allocate memory for 6g client rules\n");
4981 return -ENOMEM;
4982 }
4983
4984 ext_wmi_reg_rule += num_6g_reg_rules_cl[j][i];
4985 }
4986 }
4987
4988 reg_info->client_type = le32_to_cpu(ev->client_type);
4989 reg_info->rnr_tpe_usable = ev->rnr_tpe_usable;
4990 reg_info->unspecified_ap_usable = ev->unspecified_ap_usable;
4991 reg_info->domain_code_6g_ap[WMI_REG_INDOOR_AP] =
4992 le32_to_cpu(ev->domain_code_6g_ap_lpi);
4993 reg_info->domain_code_6g_ap[WMI_REG_STD_POWER_AP] =
4994 le32_to_cpu(ev->domain_code_6g_ap_sp);
4995 reg_info->domain_code_6g_ap[WMI_REG_VLP_AP] =
4996 le32_to_cpu(ev->domain_code_6g_ap_vlp);
4997
4998 for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
4999 reg_info->domain_code_6g_client[WMI_REG_INDOOR_AP][i] =
5000 le32_to_cpu(ev->domain_code_6g_client_lpi[i]);
5001 reg_info->domain_code_6g_client[WMI_REG_STD_POWER_AP][i] =
5002 le32_to_cpu(ev->domain_code_6g_client_sp[i]);
5003 reg_info->domain_code_6g_client[WMI_REG_VLP_AP][i] =
5004 le32_to_cpu(ev->domain_code_6g_client_vlp[i]);
5005 }
5006
5007 reg_info->domain_code_6g_super_id = le32_to_cpu(ev->domain_code_6g_super_id);
5008
5009 ath12k_dbg(ab, ATH12K_DBG_WMI, "6g client_type: %d domain_code_6g_super_id: %d",
5010 reg_info->client_type, reg_info->domain_code_6g_super_id);
5011
5012 ath12k_dbg(ab, ATH12K_DBG_WMI, "processed regulatory ext channel list\n");
5013
5014 kfree(tb);
5015 return 0;
5016 }
5017
ath12k_pull_peer_del_resp_ev(struct ath12k_base * ab,struct sk_buff * skb,struct wmi_peer_delete_resp_event * peer_del_resp)5018 static int ath12k_pull_peer_del_resp_ev(struct ath12k_base *ab, struct sk_buff *skb,
5019 struct wmi_peer_delete_resp_event *peer_del_resp)
5020 {
5021 const void **tb;
5022 const struct wmi_peer_delete_resp_event *ev;
5023 int ret;
5024
5025 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5026 if (IS_ERR(tb)) {
5027 ret = PTR_ERR(tb);
5028 ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5029 return ret;
5030 }
5031
5032 ev = tb[WMI_TAG_PEER_DELETE_RESP_EVENT];
5033 if (!ev) {
5034 ath12k_warn(ab, "failed to fetch peer delete resp ev");
5035 kfree(tb);
5036 return -EPROTO;
5037 }
5038
5039 memset(peer_del_resp, 0, sizeof(*peer_del_resp));
5040
5041 peer_del_resp->vdev_id = ev->vdev_id;
5042 ether_addr_copy(peer_del_resp->peer_macaddr.addr,
5043 ev->peer_macaddr.addr);
5044
5045 kfree(tb);
5046 return 0;
5047 }
5048
ath12k_pull_vdev_del_resp_ev(struct ath12k_base * ab,struct sk_buff * skb,u32 * vdev_id)5049 static int ath12k_pull_vdev_del_resp_ev(struct ath12k_base *ab,
5050 struct sk_buff *skb,
5051 u32 *vdev_id)
5052 {
5053 const void **tb;
5054 const struct wmi_vdev_delete_resp_event *ev;
5055 int ret;
5056
5057 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5058 if (IS_ERR(tb)) {
5059 ret = PTR_ERR(tb);
5060 ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5061 return ret;
5062 }
5063
5064 ev = tb[WMI_TAG_VDEV_DELETE_RESP_EVENT];
5065 if (!ev) {
5066 ath12k_warn(ab, "failed to fetch vdev delete resp ev");
5067 kfree(tb);
5068 return -EPROTO;
5069 }
5070
5071 *vdev_id = le32_to_cpu(ev->vdev_id);
5072
5073 kfree(tb);
5074 return 0;
5075 }
5076
ath12k_pull_bcn_tx_status_ev(struct ath12k_base * ab,struct sk_buff * skb,u32 * vdev_id,u32 * tx_status)5077 static int ath12k_pull_bcn_tx_status_ev(struct ath12k_base *ab,
5078 struct sk_buff *skb,
5079 u32 *vdev_id, u32 *tx_status)
5080 {
5081 const void **tb;
5082 const struct wmi_bcn_tx_status_event *ev;
5083 int ret;
5084
5085 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5086 if (IS_ERR(tb)) {
5087 ret = PTR_ERR(tb);
5088 ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5089 return ret;
5090 }
5091
5092 ev = tb[WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT];
5093 if (!ev) {
5094 ath12k_warn(ab, "failed to fetch bcn tx status ev");
5095 kfree(tb);
5096 return -EPROTO;
5097 }
5098
5099 *vdev_id = le32_to_cpu(ev->vdev_id);
5100 *tx_status = le32_to_cpu(ev->tx_status);
5101
5102 kfree(tb);
5103 return 0;
5104 }
5105
ath12k_pull_vdev_stopped_param_tlv(struct ath12k_base * ab,struct sk_buff * skb,u32 * vdev_id)5106 static int ath12k_pull_vdev_stopped_param_tlv(struct ath12k_base *ab, struct sk_buff *skb,
5107 u32 *vdev_id)
5108 {
5109 const void **tb;
5110 const struct wmi_vdev_stopped_event *ev;
5111 int ret;
5112
5113 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5114 if (IS_ERR(tb)) {
5115 ret = PTR_ERR(tb);
5116 ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5117 return ret;
5118 }
5119
5120 ev = tb[WMI_TAG_VDEV_STOPPED_EVENT];
5121 if (!ev) {
5122 ath12k_warn(ab, "failed to fetch vdev stop ev");
5123 kfree(tb);
5124 return -EPROTO;
5125 }
5126
5127 *vdev_id = le32_to_cpu(ev->vdev_id);
5128
5129 kfree(tb);
5130 return 0;
5131 }
5132
ath12k_wmi_tlv_mgmt_rx_parse(struct ath12k_base * ab,u16 tag,u16 len,const void * ptr,void * data)5133 static int ath12k_wmi_tlv_mgmt_rx_parse(struct ath12k_base *ab,
5134 u16 tag, u16 len,
5135 const void *ptr, void *data)
5136 {
5137 struct wmi_tlv_mgmt_rx_parse *parse = data;
5138
5139 switch (tag) {
5140 case WMI_TAG_MGMT_RX_HDR:
5141 parse->fixed = ptr;
5142 break;
5143 case WMI_TAG_ARRAY_BYTE:
5144 if (!parse->frame_buf_done) {
5145 parse->frame_buf = ptr;
5146 parse->frame_buf_done = true;
5147 }
5148 break;
5149 }
5150 return 0;
5151 }
5152
ath12k_pull_mgmt_rx_params_tlv(struct ath12k_base * ab,struct sk_buff * skb,struct ath12k_wmi_mgmt_rx_arg * hdr)5153 static int ath12k_pull_mgmt_rx_params_tlv(struct ath12k_base *ab,
5154 struct sk_buff *skb,
5155 struct ath12k_wmi_mgmt_rx_arg *hdr)
5156 {
5157 struct wmi_tlv_mgmt_rx_parse parse = { };
5158 const struct ath12k_wmi_mgmt_rx_params *ev;
5159 const u8 *frame;
5160 int i, ret;
5161
5162 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
5163 ath12k_wmi_tlv_mgmt_rx_parse,
5164 &parse);
5165 if (ret) {
5166 ath12k_warn(ab, "failed to parse mgmt rx tlv %d\n", ret);
5167 return ret;
5168 }
5169
5170 ev = parse.fixed;
5171 frame = parse.frame_buf;
5172
5173 if (!ev || !frame) {
5174 ath12k_warn(ab, "failed to fetch mgmt rx hdr");
5175 return -EPROTO;
5176 }
5177
5178 hdr->pdev_id = le32_to_cpu(ev->pdev_id);
5179 hdr->chan_freq = le32_to_cpu(ev->chan_freq);
5180 hdr->channel = le32_to_cpu(ev->channel);
5181 hdr->snr = le32_to_cpu(ev->snr);
5182 hdr->rate = le32_to_cpu(ev->rate);
5183 hdr->phy_mode = le32_to_cpu(ev->phy_mode);
5184 hdr->buf_len = le32_to_cpu(ev->buf_len);
5185 hdr->status = le32_to_cpu(ev->status);
5186 hdr->flags = le32_to_cpu(ev->flags);
5187 hdr->rssi = a_sle32_to_cpu(ev->rssi);
5188 hdr->tsf_delta = le32_to_cpu(ev->tsf_delta);
5189
5190 for (i = 0; i < ATH_MAX_ANTENNA; i++)
5191 hdr->rssi_ctl[i] = le32_to_cpu(ev->rssi_ctl[i]);
5192
5193 if (skb->len < (frame - skb->data) + hdr->buf_len) {
5194 ath12k_warn(ab, "invalid length in mgmt rx hdr ev");
5195 return -EPROTO;
5196 }
5197
5198 /* shift the sk_buff to point to `frame` */
5199 skb_trim(skb, 0);
5200 skb_put(skb, frame - skb->data);
5201 skb_pull(skb, frame - skb->data);
5202 skb_put(skb, hdr->buf_len);
5203
5204 return 0;
5205 }
5206
wmi_process_mgmt_tx_comp(struct ath12k * ar,u32 desc_id,u32 status)5207 static int wmi_process_mgmt_tx_comp(struct ath12k *ar, u32 desc_id,
5208 u32 status)
5209 {
5210 struct sk_buff *msdu;
5211 struct ieee80211_tx_info *info;
5212 struct ath12k_skb_cb *skb_cb;
5213 int num_mgmt;
5214
5215 spin_lock_bh(&ar->txmgmt_idr_lock);
5216 msdu = idr_find(&ar->txmgmt_idr, desc_id);
5217
5218 if (!msdu) {
5219 ath12k_warn(ar->ab, "received mgmt tx compl for invalid msdu_id: %d\n",
5220 desc_id);
5221 spin_unlock_bh(&ar->txmgmt_idr_lock);
5222 return -ENOENT;
5223 }
5224
5225 idr_remove(&ar->txmgmt_idr, desc_id);
5226 spin_unlock_bh(&ar->txmgmt_idr_lock);
5227
5228 skb_cb = ATH12K_SKB_CB(msdu);
5229 dma_unmap_single(ar->ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
5230
5231 info = IEEE80211_SKB_CB(msdu);
5232 memset(&info->status, 0, sizeof(info->status));
5233
5234 /* skip tx rate update from ieee80211_status*/
5235 info->status.rates[0].idx = -1;
5236
5237 if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) && !status)
5238 info->flags |= IEEE80211_TX_STAT_ACK;
5239
5240 ieee80211_tx_status_irqsafe(ath12k_ar_to_hw(ar), msdu);
5241
5242 num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx);
5243
5244 /* WARN when we received this event without doing any mgmt tx */
5245 if (num_mgmt < 0)
5246 WARN_ON_ONCE(1);
5247
5248 if (!num_mgmt)
5249 wake_up(&ar->txmgmt_empty_waitq);
5250
5251 return 0;
5252 }
5253
ath12k_pull_mgmt_tx_compl_param_tlv(struct ath12k_base * ab,struct sk_buff * skb,struct wmi_mgmt_tx_compl_event * param)5254 static int ath12k_pull_mgmt_tx_compl_param_tlv(struct ath12k_base *ab,
5255 struct sk_buff *skb,
5256 struct wmi_mgmt_tx_compl_event *param)
5257 {
5258 const void **tb;
5259 const struct wmi_mgmt_tx_compl_event *ev;
5260 int ret;
5261
5262 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5263 if (IS_ERR(tb)) {
5264 ret = PTR_ERR(tb);
5265 ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5266 return ret;
5267 }
5268
5269 ev = tb[WMI_TAG_MGMT_TX_COMPL_EVENT];
5270 if (!ev) {
5271 ath12k_warn(ab, "failed to fetch mgmt tx compl ev");
5272 kfree(tb);
5273 return -EPROTO;
5274 }
5275
5276 param->pdev_id = ev->pdev_id;
5277 param->desc_id = ev->desc_id;
5278 param->status = ev->status;
5279
5280 kfree(tb);
5281 return 0;
5282 }
5283
ath12k_wmi_event_scan_started(struct ath12k * ar)5284 static void ath12k_wmi_event_scan_started(struct ath12k *ar)
5285 {
5286 lockdep_assert_held(&ar->data_lock);
5287
5288 switch (ar->scan.state) {
5289 case ATH12K_SCAN_IDLE:
5290 case ATH12K_SCAN_RUNNING:
5291 case ATH12K_SCAN_ABORTING:
5292 ath12k_warn(ar->ab, "received scan started event in an invalid scan state: %s (%d)\n",
5293 ath12k_scan_state_str(ar->scan.state),
5294 ar->scan.state);
5295 break;
5296 case ATH12K_SCAN_STARTING:
5297 ar->scan.state = ATH12K_SCAN_RUNNING;
5298
5299 if (ar->scan.is_roc)
5300 ieee80211_ready_on_channel(ath12k_ar_to_hw(ar));
5301
5302 complete(&ar->scan.started);
5303 break;
5304 }
5305 }
5306
ath12k_wmi_event_scan_start_failed(struct ath12k * ar)5307 static void ath12k_wmi_event_scan_start_failed(struct ath12k *ar)
5308 {
5309 lockdep_assert_held(&ar->data_lock);
5310
5311 switch (ar->scan.state) {
5312 case ATH12K_SCAN_IDLE:
5313 case ATH12K_SCAN_RUNNING:
5314 case ATH12K_SCAN_ABORTING:
5315 ath12k_warn(ar->ab, "received scan start failed event in an invalid scan state: %s (%d)\n",
5316 ath12k_scan_state_str(ar->scan.state),
5317 ar->scan.state);
5318 break;
5319 case ATH12K_SCAN_STARTING:
5320 complete(&ar->scan.started);
5321 __ath12k_mac_scan_finish(ar);
5322 break;
5323 }
5324 }
5325
ath12k_wmi_event_scan_completed(struct ath12k * ar)5326 static void ath12k_wmi_event_scan_completed(struct ath12k *ar)
5327 {
5328 lockdep_assert_held(&ar->data_lock);
5329
5330 switch (ar->scan.state) {
5331 case ATH12K_SCAN_IDLE:
5332 case ATH12K_SCAN_STARTING:
5333 /* One suspected reason scan can be completed while starting is
5334 * if firmware fails to deliver all scan events to the host,
5335 * e.g. when transport pipe is full. This has been observed
5336 * with spectral scan phyerr events starving wmi transport
5337 * pipe. In such case the "scan completed" event should be (and
5338 * is) ignored by the host as it may be just firmware's scan
5339 * state machine recovering.
5340 */
5341 ath12k_warn(ar->ab, "received scan completed event in an invalid scan state: %s (%d)\n",
5342 ath12k_scan_state_str(ar->scan.state),
5343 ar->scan.state);
5344 break;
5345 case ATH12K_SCAN_RUNNING:
5346 case ATH12K_SCAN_ABORTING:
5347 __ath12k_mac_scan_finish(ar);
5348 break;
5349 }
5350 }
5351
ath12k_wmi_event_scan_bss_chan(struct ath12k * ar)5352 static void ath12k_wmi_event_scan_bss_chan(struct ath12k *ar)
5353 {
5354 lockdep_assert_held(&ar->data_lock);
5355
5356 switch (ar->scan.state) {
5357 case ATH12K_SCAN_IDLE:
5358 case ATH12K_SCAN_STARTING:
5359 ath12k_warn(ar->ab, "received scan bss chan event in an invalid scan state: %s (%d)\n",
5360 ath12k_scan_state_str(ar->scan.state),
5361 ar->scan.state);
5362 break;
5363 case ATH12K_SCAN_RUNNING:
5364 case ATH12K_SCAN_ABORTING:
5365 ar->scan_channel = NULL;
5366 break;
5367 }
5368 }
5369
ath12k_wmi_event_scan_foreign_chan(struct ath12k * ar,u32 freq)5370 static void ath12k_wmi_event_scan_foreign_chan(struct ath12k *ar, u32 freq)
5371 {
5372 struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
5373
5374 lockdep_assert_held(&ar->data_lock);
5375
5376 switch (ar->scan.state) {
5377 case ATH12K_SCAN_IDLE:
5378 case ATH12K_SCAN_STARTING:
5379 ath12k_warn(ar->ab, "received scan foreign chan event in an invalid scan state: %s (%d)\n",
5380 ath12k_scan_state_str(ar->scan.state),
5381 ar->scan.state);
5382 break;
5383 case ATH12K_SCAN_RUNNING:
5384 case ATH12K_SCAN_ABORTING:
5385 ar->scan_channel = ieee80211_get_channel(hw->wiphy, freq);
5386
5387 if (ar->scan.is_roc && ar->scan.roc_freq == freq)
5388 complete(&ar->scan.on_channel);
5389
5390 break;
5391 }
5392 }
5393
5394 static const char *
ath12k_wmi_event_scan_type_str(enum wmi_scan_event_type type,enum wmi_scan_completion_reason reason)5395 ath12k_wmi_event_scan_type_str(enum wmi_scan_event_type type,
5396 enum wmi_scan_completion_reason reason)
5397 {
5398 switch (type) {
5399 case WMI_SCAN_EVENT_STARTED:
5400 return "started";
5401 case WMI_SCAN_EVENT_COMPLETED:
5402 switch (reason) {
5403 case WMI_SCAN_REASON_COMPLETED:
5404 return "completed";
5405 case WMI_SCAN_REASON_CANCELLED:
5406 return "completed [cancelled]";
5407 case WMI_SCAN_REASON_PREEMPTED:
5408 return "completed [preempted]";
5409 case WMI_SCAN_REASON_TIMEDOUT:
5410 return "completed [timedout]";
5411 case WMI_SCAN_REASON_INTERNAL_FAILURE:
5412 return "completed [internal err]";
5413 case WMI_SCAN_REASON_MAX:
5414 break;
5415 }
5416 return "completed [unknown]";
5417 case WMI_SCAN_EVENT_BSS_CHANNEL:
5418 return "bss channel";
5419 case WMI_SCAN_EVENT_FOREIGN_CHAN:
5420 return "foreign channel";
5421 case WMI_SCAN_EVENT_DEQUEUED:
5422 return "dequeued";
5423 case WMI_SCAN_EVENT_PREEMPTED:
5424 return "preempted";
5425 case WMI_SCAN_EVENT_START_FAILED:
5426 return "start failed";
5427 case WMI_SCAN_EVENT_RESTARTED:
5428 return "restarted";
5429 case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
5430 return "foreign channel exit";
5431 default:
5432 return "unknown";
5433 }
5434 }
5435
ath12k_pull_scan_ev(struct ath12k_base * ab,struct sk_buff * skb,struct wmi_scan_event * scan_evt_param)5436 static int ath12k_pull_scan_ev(struct ath12k_base *ab, struct sk_buff *skb,
5437 struct wmi_scan_event *scan_evt_param)
5438 {
5439 const void **tb;
5440 const struct wmi_scan_event *ev;
5441 int ret;
5442
5443 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5444 if (IS_ERR(tb)) {
5445 ret = PTR_ERR(tb);
5446 ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5447 return ret;
5448 }
5449
5450 ev = tb[WMI_TAG_SCAN_EVENT];
5451 if (!ev) {
5452 ath12k_warn(ab, "failed to fetch scan ev");
5453 kfree(tb);
5454 return -EPROTO;
5455 }
5456
5457 scan_evt_param->event_type = ev->event_type;
5458 scan_evt_param->reason = ev->reason;
5459 scan_evt_param->channel_freq = ev->channel_freq;
5460 scan_evt_param->scan_req_id = ev->scan_req_id;
5461 scan_evt_param->scan_id = ev->scan_id;
5462 scan_evt_param->vdev_id = ev->vdev_id;
5463 scan_evt_param->tsf_timestamp = ev->tsf_timestamp;
5464
5465 kfree(tb);
5466 return 0;
5467 }
5468
ath12k_pull_peer_sta_kickout_ev(struct ath12k_base * ab,struct sk_buff * skb,struct wmi_peer_sta_kickout_arg * arg)5469 static int ath12k_pull_peer_sta_kickout_ev(struct ath12k_base *ab, struct sk_buff *skb,
5470 struct wmi_peer_sta_kickout_arg *arg)
5471 {
5472 const void **tb;
5473 const struct wmi_peer_sta_kickout_event *ev;
5474 int ret;
5475
5476 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5477 if (IS_ERR(tb)) {
5478 ret = PTR_ERR(tb);
5479 ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5480 return ret;
5481 }
5482
5483 ev = tb[WMI_TAG_PEER_STA_KICKOUT_EVENT];
5484 if (!ev) {
5485 ath12k_warn(ab, "failed to fetch peer sta kickout ev");
5486 kfree(tb);
5487 return -EPROTO;
5488 }
5489
5490 arg->mac_addr = ev->peer_macaddr.addr;
5491
5492 kfree(tb);
5493 return 0;
5494 }
5495
ath12k_pull_roam_ev(struct ath12k_base * ab,struct sk_buff * skb,struct wmi_roam_event * roam_ev)5496 static int ath12k_pull_roam_ev(struct ath12k_base *ab, struct sk_buff *skb,
5497 struct wmi_roam_event *roam_ev)
5498 {
5499 const void **tb;
5500 const struct wmi_roam_event *ev;
5501 int ret;
5502
5503 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5504 if (IS_ERR(tb)) {
5505 ret = PTR_ERR(tb);
5506 ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5507 return ret;
5508 }
5509
5510 ev = tb[WMI_TAG_ROAM_EVENT];
5511 if (!ev) {
5512 ath12k_warn(ab, "failed to fetch roam ev");
5513 kfree(tb);
5514 return -EPROTO;
5515 }
5516
5517 roam_ev->vdev_id = ev->vdev_id;
5518 roam_ev->reason = ev->reason;
5519 roam_ev->rssi = ev->rssi;
5520
5521 kfree(tb);
5522 return 0;
5523 }
5524
freq_to_idx(struct ath12k * ar,int freq)5525 static int freq_to_idx(struct ath12k *ar, int freq)
5526 {
5527 struct ieee80211_supported_band *sband;
5528 struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
5529 int band, ch, idx = 0;
5530
5531 for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
5532 if (!ar->mac.sbands[band].channels)
5533 continue;
5534
5535 sband = hw->wiphy->bands[band];
5536 if (!sband)
5537 continue;
5538
5539 for (ch = 0; ch < sband->n_channels; ch++, idx++)
5540 if (sband->channels[ch].center_freq == freq)
5541 goto exit;
5542 }
5543
5544 exit:
5545 return idx;
5546 }
5547
ath12k_pull_chan_info_ev(struct ath12k_base * ab,struct sk_buff * skb,struct wmi_chan_info_event * ch_info_ev)5548 static int ath12k_pull_chan_info_ev(struct ath12k_base *ab, struct sk_buff *skb,
5549 struct wmi_chan_info_event *ch_info_ev)
5550 {
5551 const void **tb;
5552 const struct wmi_chan_info_event *ev;
5553 int ret;
5554
5555 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5556 if (IS_ERR(tb)) {
5557 ret = PTR_ERR(tb);
5558 ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5559 return ret;
5560 }
5561
5562 ev = tb[WMI_TAG_CHAN_INFO_EVENT];
5563 if (!ev) {
5564 ath12k_warn(ab, "failed to fetch chan info ev");
5565 kfree(tb);
5566 return -EPROTO;
5567 }
5568
5569 ch_info_ev->err_code = ev->err_code;
5570 ch_info_ev->freq = ev->freq;
5571 ch_info_ev->cmd_flags = ev->cmd_flags;
5572 ch_info_ev->noise_floor = ev->noise_floor;
5573 ch_info_ev->rx_clear_count = ev->rx_clear_count;
5574 ch_info_ev->cycle_count = ev->cycle_count;
5575 ch_info_ev->chan_tx_pwr_range = ev->chan_tx_pwr_range;
5576 ch_info_ev->chan_tx_pwr_tp = ev->chan_tx_pwr_tp;
5577 ch_info_ev->rx_frame_count = ev->rx_frame_count;
5578 ch_info_ev->tx_frame_cnt = ev->tx_frame_cnt;
5579 ch_info_ev->mac_clk_mhz = ev->mac_clk_mhz;
5580 ch_info_ev->vdev_id = ev->vdev_id;
5581
5582 kfree(tb);
5583 return 0;
5584 }
5585
5586 static int
ath12k_pull_pdev_bss_chan_info_ev(struct ath12k_base * ab,struct sk_buff * skb,struct wmi_pdev_bss_chan_info_event * bss_ch_info_ev)5587 ath12k_pull_pdev_bss_chan_info_ev(struct ath12k_base *ab, struct sk_buff *skb,
5588 struct wmi_pdev_bss_chan_info_event *bss_ch_info_ev)
5589 {
5590 const void **tb;
5591 const struct wmi_pdev_bss_chan_info_event *ev;
5592 int ret;
5593
5594 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5595 if (IS_ERR(tb)) {
5596 ret = PTR_ERR(tb);
5597 ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5598 return ret;
5599 }
5600
5601 ev = tb[WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT];
5602 if (!ev) {
5603 ath12k_warn(ab, "failed to fetch pdev bss chan info ev");
5604 kfree(tb);
5605 return -EPROTO;
5606 }
5607
5608 bss_ch_info_ev->pdev_id = ev->pdev_id;
5609 bss_ch_info_ev->freq = ev->freq;
5610 bss_ch_info_ev->noise_floor = ev->noise_floor;
5611 bss_ch_info_ev->rx_clear_count_low = ev->rx_clear_count_low;
5612 bss_ch_info_ev->rx_clear_count_high = ev->rx_clear_count_high;
5613 bss_ch_info_ev->cycle_count_low = ev->cycle_count_low;
5614 bss_ch_info_ev->cycle_count_high = ev->cycle_count_high;
5615 bss_ch_info_ev->tx_cycle_count_low = ev->tx_cycle_count_low;
5616 bss_ch_info_ev->tx_cycle_count_high = ev->tx_cycle_count_high;
5617 bss_ch_info_ev->rx_cycle_count_low = ev->rx_cycle_count_low;
5618 bss_ch_info_ev->rx_cycle_count_high = ev->rx_cycle_count_high;
5619 bss_ch_info_ev->rx_bss_cycle_count_low = ev->rx_bss_cycle_count_low;
5620 bss_ch_info_ev->rx_bss_cycle_count_high = ev->rx_bss_cycle_count_high;
5621
5622 kfree(tb);
5623 return 0;
5624 }
5625
5626 static int
ath12k_pull_vdev_install_key_compl_ev(struct ath12k_base * ab,struct sk_buff * skb,struct wmi_vdev_install_key_complete_arg * arg)5627 ath12k_pull_vdev_install_key_compl_ev(struct ath12k_base *ab, struct sk_buff *skb,
5628 struct wmi_vdev_install_key_complete_arg *arg)
5629 {
5630 const void **tb;
5631 const struct wmi_vdev_install_key_compl_event *ev;
5632 int ret;
5633
5634 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5635 if (IS_ERR(tb)) {
5636 ret = PTR_ERR(tb);
5637 ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5638 return ret;
5639 }
5640
5641 ev = tb[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT];
5642 if (!ev) {
5643 ath12k_warn(ab, "failed to fetch vdev install key compl ev");
5644 kfree(tb);
5645 return -EPROTO;
5646 }
5647
5648 arg->vdev_id = le32_to_cpu(ev->vdev_id);
5649 arg->macaddr = ev->peer_macaddr.addr;
5650 arg->key_idx = le32_to_cpu(ev->key_idx);
5651 arg->key_flags = le32_to_cpu(ev->key_flags);
5652 arg->status = le32_to_cpu(ev->status);
5653
5654 kfree(tb);
5655 return 0;
5656 }
5657
ath12k_pull_peer_assoc_conf_ev(struct ath12k_base * ab,struct sk_buff * skb,struct wmi_peer_assoc_conf_arg * peer_assoc_conf)5658 static int ath12k_pull_peer_assoc_conf_ev(struct ath12k_base *ab, struct sk_buff *skb,
5659 struct wmi_peer_assoc_conf_arg *peer_assoc_conf)
5660 {
5661 const void **tb;
5662 const struct wmi_peer_assoc_conf_event *ev;
5663 int ret;
5664
5665 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5666 if (IS_ERR(tb)) {
5667 ret = PTR_ERR(tb);
5668 ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5669 return ret;
5670 }
5671
5672 ev = tb[WMI_TAG_PEER_ASSOC_CONF_EVENT];
5673 if (!ev) {
5674 ath12k_warn(ab, "failed to fetch peer assoc conf ev");
5675 kfree(tb);
5676 return -EPROTO;
5677 }
5678
5679 peer_assoc_conf->vdev_id = le32_to_cpu(ev->vdev_id);
5680 peer_assoc_conf->macaddr = ev->peer_macaddr.addr;
5681
5682 kfree(tb);
5683 return 0;
5684 }
5685
5686 static int
ath12k_pull_pdev_temp_ev(struct ath12k_base * ab,struct sk_buff * skb,const struct wmi_pdev_temperature_event * ev)5687 ath12k_pull_pdev_temp_ev(struct ath12k_base *ab, struct sk_buff *skb,
5688 const struct wmi_pdev_temperature_event *ev)
5689 {
5690 const void **tb;
5691 int ret;
5692
5693 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5694 if (IS_ERR(tb)) {
5695 ret = PTR_ERR(tb);
5696 ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5697 return ret;
5698 }
5699
5700 ev = tb[WMI_TAG_PDEV_TEMPERATURE_EVENT];
5701 if (!ev) {
5702 ath12k_warn(ab, "failed to fetch pdev temp ev");
5703 kfree(tb);
5704 return -EPROTO;
5705 }
5706
5707 kfree(tb);
5708 return 0;
5709 }
5710
ath12k_wmi_op_ep_tx_credits(struct ath12k_base * ab)5711 static void ath12k_wmi_op_ep_tx_credits(struct ath12k_base *ab)
5712 {
5713 /* try to send pending beacons first. they take priority */
5714 wake_up(&ab->wmi_ab.tx_credits_wq);
5715 }
5716
ath12k_wmi_htc_tx_complete(struct ath12k_base * ab,struct sk_buff * skb)5717 static void ath12k_wmi_htc_tx_complete(struct ath12k_base *ab,
5718 struct sk_buff *skb)
5719 {
5720 dev_kfree_skb(skb);
5721 }
5722
ath12k_reg_is_world_alpha(char * alpha)5723 static bool ath12k_reg_is_world_alpha(char *alpha)
5724 {
5725 if (alpha[0] == '0' && alpha[1] == '0')
5726 return true;
5727
5728 if (alpha[0] == 'n' && alpha[1] == 'a')
5729 return true;
5730
5731 return false;
5732 }
5733
ath12k_reg_chan_list_event(struct ath12k_base * ab,struct sk_buff * skb)5734 static int ath12k_reg_chan_list_event(struct ath12k_base *ab, struct sk_buff *skb)
5735 {
5736 struct ath12k_reg_info *reg_info = NULL;
5737 struct ieee80211_regdomain *regd = NULL;
5738 bool intersect = false;
5739 int ret = 0, pdev_idx, i, j;
5740 struct ath12k *ar;
5741
5742 reg_info = kzalloc(sizeof(*reg_info), GFP_ATOMIC);
5743 if (!reg_info) {
5744 ret = -ENOMEM;
5745 goto fallback;
5746 }
5747
5748 ret = ath12k_pull_reg_chan_list_ext_update_ev(ab, skb, reg_info);
5749
5750 if (ret) {
5751 ath12k_warn(ab, "failed to extract regulatory info from received event\n");
5752 goto fallback;
5753 }
5754
5755 if (reg_info->status_code != REG_SET_CC_STATUS_PASS) {
5756 /* In case of failure to set the requested ctry,
5757 * fw retains the current regd. We print a failure info
5758 * and return from here.
5759 */
5760 ath12k_warn(ab, "Failed to set the requested Country regulatory setting\n");
5761 goto mem_free;
5762 }
5763
5764 pdev_idx = reg_info->phy_id;
5765
5766 if (pdev_idx >= ab->num_radios) {
5767 /* Process the event for phy0 only if single_pdev_only
5768 * is true. If pdev_idx is valid but not 0, discard the
5769 * event. Otherwise, it goes to fallback.
5770 */
5771 if (ab->hw_params->single_pdev_only &&
5772 pdev_idx < ab->hw_params->num_rxdma_per_pdev)
5773 goto mem_free;
5774 else
5775 goto fallback;
5776 }
5777
5778 /* Avoid multiple overwrites to default regd, during core
5779 * stop-start after mac registration.
5780 */
5781 if (ab->default_regd[pdev_idx] && !ab->new_regd[pdev_idx] &&
5782 !memcmp(ab->default_regd[pdev_idx]->alpha2,
5783 reg_info->alpha2, 2))
5784 goto mem_free;
5785
5786 /* Intersect new rules with default regd if a new country setting was
5787 * requested, i.e a default regd was already set during initialization
5788 * and the regd coming from this event has a valid country info.
5789 */
5790 if (ab->default_regd[pdev_idx] &&
5791 !ath12k_reg_is_world_alpha((char *)
5792 ab->default_regd[pdev_idx]->alpha2) &&
5793 !ath12k_reg_is_world_alpha((char *)reg_info->alpha2))
5794 intersect = true;
5795
5796 regd = ath12k_reg_build_regd(ab, reg_info, intersect);
5797 if (!regd) {
5798 ath12k_warn(ab, "failed to build regd from reg_info\n");
5799 goto fallback;
5800 }
5801
5802 spin_lock_bh(&ab->base_lock);
5803 if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) {
5804 /* Once mac is registered, ar is valid and all CC events from
5805 * fw is considered to be received due to user requests
5806 * currently.
5807 * Free previously built regd before assigning the newly
5808 * generated regd to ar. NULL pointer handling will be
5809 * taken care by kfree itself.
5810 */
5811 ar = ab->pdevs[pdev_idx].ar;
5812 kfree(ab->new_regd[pdev_idx]);
5813 ab->new_regd[pdev_idx] = regd;
5814 queue_work(ab->workqueue, &ar->regd_update_work);
5815 } else {
5816 /* Multiple events for the same *ar is not expected. But we
5817 * can still clear any previously stored default_regd if we
5818 * are receiving this event for the same radio by mistake.
5819 * NULL pointer handling will be taken care by kfree itself.
5820 */
5821 kfree(ab->default_regd[pdev_idx]);
5822 /* This regd would be applied during mac registration */
5823 ab->default_regd[pdev_idx] = regd;
5824 }
5825 ab->dfs_region = reg_info->dfs_region;
5826 spin_unlock_bh(&ab->base_lock);
5827
5828 goto mem_free;
5829
5830 fallback:
5831 /* Fallback to older reg (by sending previous country setting
5832 * again if fw has succeeded and we failed to process here.
5833 * The Regdomain should be uniform across driver and fw. Since the
5834 * FW has processed the command and sent a success status, we expect
5835 * this function to succeed as well. If it doesn't, CTRY needs to be
5836 * reverted at the fw and the old SCAN_CHAN_LIST cmd needs to be sent.
5837 */
5838 /* TODO: This is rare, but still should also be handled */
5839 WARN_ON(1);
5840 mem_free:
5841 if (reg_info) {
5842 kfree(reg_info->reg_rules_2g_ptr);
5843 kfree(reg_info->reg_rules_5g_ptr);
5844 if (reg_info->is_ext_reg_event) {
5845 for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++)
5846 kfree(reg_info->reg_rules_6g_ap_ptr[i]);
5847
5848 for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++)
5849 for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++)
5850 kfree(reg_info->reg_rules_6g_client_ptr[j][i]);
5851 }
5852 kfree(reg_info);
5853 }
5854 return ret;
5855 }
5856
ath12k_wmi_rdy_parse(struct ath12k_base * ab,u16 tag,u16 len,const void * ptr,void * data)5857 static int ath12k_wmi_rdy_parse(struct ath12k_base *ab, u16 tag, u16 len,
5858 const void *ptr, void *data)
5859 {
5860 struct ath12k_wmi_rdy_parse *rdy_parse = data;
5861 struct wmi_ready_event fixed_param;
5862 struct ath12k_wmi_mac_addr_params *addr_list;
5863 struct ath12k_pdev *pdev;
5864 u32 num_mac_addr;
5865 int i;
5866
5867 switch (tag) {
5868 case WMI_TAG_READY_EVENT:
5869 memset(&fixed_param, 0, sizeof(fixed_param));
5870 memcpy(&fixed_param, (struct wmi_ready_event *)ptr,
5871 min_t(u16, sizeof(fixed_param), len));
5872 ab->wlan_init_status = le32_to_cpu(fixed_param.ready_event_min.status);
5873 rdy_parse->num_extra_mac_addr =
5874 le32_to_cpu(fixed_param.ready_event_min.num_extra_mac_addr);
5875
5876 ether_addr_copy(ab->mac_addr,
5877 fixed_param.ready_event_min.mac_addr.addr);
5878 ab->pktlog_defs_checksum = le32_to_cpu(fixed_param.pktlog_defs_checksum);
5879 ab->wmi_ready = true;
5880 break;
5881 case WMI_TAG_ARRAY_FIXED_STRUCT:
5882 addr_list = (struct ath12k_wmi_mac_addr_params *)ptr;
5883 num_mac_addr = rdy_parse->num_extra_mac_addr;
5884
5885 if (!(ab->num_radios > 1 && num_mac_addr >= ab->num_radios))
5886 break;
5887
5888 for (i = 0; i < ab->num_radios; i++) {
5889 pdev = &ab->pdevs[i];
5890 ether_addr_copy(pdev->mac_addr, addr_list[i].addr);
5891 }
5892 ab->pdevs_macaddr_valid = true;
5893 break;
5894 default:
5895 break;
5896 }
5897
5898 return 0;
5899 }
5900
ath12k_ready_event(struct ath12k_base * ab,struct sk_buff * skb)5901 static int ath12k_ready_event(struct ath12k_base *ab, struct sk_buff *skb)
5902 {
5903 struct ath12k_wmi_rdy_parse rdy_parse = { };
5904 int ret;
5905
5906 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
5907 ath12k_wmi_rdy_parse, &rdy_parse);
5908 if (ret) {
5909 ath12k_warn(ab, "failed to parse tlv %d\n", ret);
5910 return ret;
5911 }
5912
5913 complete(&ab->wmi_ab.unified_ready);
5914 return 0;
5915 }
5916
ath12k_peer_delete_resp_event(struct ath12k_base * ab,struct sk_buff * skb)5917 static void ath12k_peer_delete_resp_event(struct ath12k_base *ab, struct sk_buff *skb)
5918 {
5919 struct wmi_peer_delete_resp_event peer_del_resp;
5920 struct ath12k *ar;
5921
5922 if (ath12k_pull_peer_del_resp_ev(ab, skb, &peer_del_resp) != 0) {
5923 ath12k_warn(ab, "failed to extract peer delete resp");
5924 return;
5925 }
5926
5927 rcu_read_lock();
5928 ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(peer_del_resp.vdev_id));
5929 if (!ar) {
5930 ath12k_warn(ab, "invalid vdev id in peer delete resp ev %d",
5931 peer_del_resp.vdev_id);
5932 rcu_read_unlock();
5933 return;
5934 }
5935
5936 complete(&ar->peer_delete_done);
5937 rcu_read_unlock();
5938 ath12k_dbg(ab, ATH12K_DBG_WMI, "peer delete resp for vdev id %d addr %pM\n",
5939 peer_del_resp.vdev_id, peer_del_resp.peer_macaddr.addr);
5940 }
5941
ath12k_vdev_delete_resp_event(struct ath12k_base * ab,struct sk_buff * skb)5942 static void ath12k_vdev_delete_resp_event(struct ath12k_base *ab,
5943 struct sk_buff *skb)
5944 {
5945 struct ath12k *ar;
5946 u32 vdev_id = 0;
5947
5948 if (ath12k_pull_vdev_del_resp_ev(ab, skb, &vdev_id) != 0) {
5949 ath12k_warn(ab, "failed to extract vdev delete resp");
5950 return;
5951 }
5952
5953 rcu_read_lock();
5954 ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
5955 if (!ar) {
5956 ath12k_warn(ab, "invalid vdev id in vdev delete resp ev %d",
5957 vdev_id);
5958 rcu_read_unlock();
5959 return;
5960 }
5961
5962 complete(&ar->vdev_delete_done);
5963
5964 rcu_read_unlock();
5965
5966 ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev delete resp for vdev id %d\n",
5967 vdev_id);
5968 }
5969
ath12k_wmi_vdev_resp_print(u32 vdev_resp_status)5970 static const char *ath12k_wmi_vdev_resp_print(u32 vdev_resp_status)
5971 {
5972 switch (vdev_resp_status) {
5973 case WMI_VDEV_START_RESPONSE_INVALID_VDEVID:
5974 return "invalid vdev id";
5975 case WMI_VDEV_START_RESPONSE_NOT_SUPPORTED:
5976 return "not supported";
5977 case WMI_VDEV_START_RESPONSE_DFS_VIOLATION:
5978 return "dfs violation";
5979 case WMI_VDEV_START_RESPONSE_INVALID_REGDOMAIN:
5980 return "invalid regdomain";
5981 default:
5982 return "unknown";
5983 }
5984 }
5985
ath12k_vdev_start_resp_event(struct ath12k_base * ab,struct sk_buff * skb)5986 static void ath12k_vdev_start_resp_event(struct ath12k_base *ab, struct sk_buff *skb)
5987 {
5988 struct wmi_vdev_start_resp_event vdev_start_resp;
5989 struct ath12k *ar;
5990 u32 status;
5991
5992 if (ath12k_pull_vdev_start_resp_tlv(ab, skb, &vdev_start_resp) != 0) {
5993 ath12k_warn(ab, "failed to extract vdev start resp");
5994 return;
5995 }
5996
5997 rcu_read_lock();
5998 ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(vdev_start_resp.vdev_id));
5999 if (!ar) {
6000 ath12k_warn(ab, "invalid vdev id in vdev start resp ev %d",
6001 vdev_start_resp.vdev_id);
6002 rcu_read_unlock();
6003 return;
6004 }
6005
6006 ar->last_wmi_vdev_start_status = 0;
6007
6008 status = le32_to_cpu(vdev_start_resp.status);
6009
6010 if (WARN_ON_ONCE(status)) {
6011 ath12k_warn(ab, "vdev start resp error status %d (%s)\n",
6012 status, ath12k_wmi_vdev_resp_print(status));
6013 ar->last_wmi_vdev_start_status = status;
6014 }
6015
6016 complete(&ar->vdev_setup_done);
6017
6018 rcu_read_unlock();
6019
6020 ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev start resp for vdev id %d",
6021 vdev_start_resp.vdev_id);
6022 }
6023
ath12k_bcn_tx_status_event(struct ath12k_base * ab,struct sk_buff * skb)6024 static void ath12k_bcn_tx_status_event(struct ath12k_base *ab, struct sk_buff *skb)
6025 {
6026 u32 vdev_id, tx_status;
6027
6028 if (ath12k_pull_bcn_tx_status_ev(ab, skb, &vdev_id, &tx_status) != 0) {
6029 ath12k_warn(ab, "failed to extract bcn tx status");
6030 return;
6031 }
6032 }
6033
ath12k_vdev_stopped_event(struct ath12k_base * ab,struct sk_buff * skb)6034 static void ath12k_vdev_stopped_event(struct ath12k_base *ab, struct sk_buff *skb)
6035 {
6036 struct ath12k *ar;
6037 u32 vdev_id = 0;
6038
6039 if (ath12k_pull_vdev_stopped_param_tlv(ab, skb, &vdev_id) != 0) {
6040 ath12k_warn(ab, "failed to extract vdev stopped event");
6041 return;
6042 }
6043
6044 rcu_read_lock();
6045 ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
6046 if (!ar) {
6047 ath12k_warn(ab, "invalid vdev id in vdev stopped ev %d",
6048 vdev_id);
6049 rcu_read_unlock();
6050 return;
6051 }
6052
6053 complete(&ar->vdev_setup_done);
6054
6055 rcu_read_unlock();
6056
6057 ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev stopped for vdev id %d", vdev_id);
6058 }
6059
ath12k_mgmt_rx_event(struct ath12k_base * ab,struct sk_buff * skb)6060 static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb)
6061 {
6062 struct ath12k_wmi_mgmt_rx_arg rx_ev = {0};
6063 struct ath12k *ar;
6064 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
6065 struct ieee80211_hdr *hdr;
6066 u16 fc;
6067 struct ieee80211_supported_band *sband;
6068
6069 if (ath12k_pull_mgmt_rx_params_tlv(ab, skb, &rx_ev) != 0) {
6070 ath12k_warn(ab, "failed to extract mgmt rx event");
6071 dev_kfree_skb(skb);
6072 return;
6073 }
6074
6075 memset(status, 0, sizeof(*status));
6076
6077 ath12k_dbg(ab, ATH12K_DBG_MGMT, "mgmt rx event status %08x\n",
6078 rx_ev.status);
6079
6080 rcu_read_lock();
6081 ar = ath12k_mac_get_ar_by_pdev_id(ab, rx_ev.pdev_id);
6082
6083 if (!ar) {
6084 ath12k_warn(ab, "invalid pdev_id %d in mgmt_rx_event\n",
6085 rx_ev.pdev_id);
6086 dev_kfree_skb(skb);
6087 goto exit;
6088 }
6089
6090 if ((test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) ||
6091 (rx_ev.status & (WMI_RX_STATUS_ERR_DECRYPT |
6092 WMI_RX_STATUS_ERR_KEY_CACHE_MISS |
6093 WMI_RX_STATUS_ERR_CRC))) {
6094 dev_kfree_skb(skb);
6095 goto exit;
6096 }
6097
6098 if (rx_ev.status & WMI_RX_STATUS_ERR_MIC)
6099 status->flag |= RX_FLAG_MMIC_ERROR;
6100
6101 if (rx_ev.chan_freq >= ATH12K_MIN_6G_FREQ &&
6102 rx_ev.chan_freq <= ATH12K_MAX_6G_FREQ) {
6103 status->band = NL80211_BAND_6GHZ;
6104 status->freq = rx_ev.chan_freq;
6105 } else if (rx_ev.channel >= 1 && rx_ev.channel <= 14) {
6106 status->band = NL80211_BAND_2GHZ;
6107 } else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH12K_MAX_5G_CHAN) {
6108 status->band = NL80211_BAND_5GHZ;
6109 } else {
6110 /* Shouldn't happen unless list of advertised channels to
6111 * mac80211 has been changed.
6112 */
6113 WARN_ON_ONCE(1);
6114 dev_kfree_skb(skb);
6115 goto exit;
6116 }
6117
6118 if (rx_ev.phy_mode == MODE_11B &&
6119 (status->band == NL80211_BAND_5GHZ || status->band == NL80211_BAND_6GHZ))
6120 ath12k_dbg(ab, ATH12K_DBG_WMI,
6121 "wmi mgmt rx 11b (CCK) on 5/6GHz, band = %d\n", status->band);
6122
6123 sband = &ar->mac.sbands[status->band];
6124
6125 if (status->band != NL80211_BAND_6GHZ)
6126 status->freq = ieee80211_channel_to_frequency(rx_ev.channel,
6127 status->band);
6128
6129 status->signal = rx_ev.snr + ATH12K_DEFAULT_NOISE_FLOOR;
6130 status->rate_idx = ath12k_mac_bitrate_to_idx(sband, rx_ev.rate / 100);
6131
6132 hdr = (struct ieee80211_hdr *)skb->data;
6133 fc = le16_to_cpu(hdr->frame_control);
6134
6135 /* Firmware is guaranteed to report all essential management frames via
6136 * WMI while it can deliver some extra via HTT. Since there can be
6137 * duplicates split the reporting wrt monitor/sniffing.
6138 */
6139 status->flag |= RX_FLAG_SKIP_MONITOR;
6140
6141 /* In case of PMF, FW delivers decrypted frames with Protected Bit set
6142 * including group privacy action frames.
6143 */
6144 if (ieee80211_has_protected(hdr->frame_control)) {
6145 status->flag |= RX_FLAG_DECRYPTED;
6146
6147 if (!ieee80211_is_robust_mgmt_frame(skb)) {
6148 status->flag |= RX_FLAG_IV_STRIPPED |
6149 RX_FLAG_MMIC_STRIPPED;
6150 hdr->frame_control = __cpu_to_le16(fc &
6151 ~IEEE80211_FCTL_PROTECTED);
6152 }
6153 }
6154
6155 if (ieee80211_is_beacon(hdr->frame_control))
6156 ath12k_mac_handle_beacon(ar, skb);
6157
6158 ath12k_dbg(ab, ATH12K_DBG_MGMT,
6159 "event mgmt rx skb %p len %d ftype %02x stype %02x\n",
6160 skb, skb->len,
6161 fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
6162
6163 ath12k_dbg(ab, ATH12K_DBG_MGMT,
6164 "event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
6165 status->freq, status->band, status->signal,
6166 status->rate_idx);
6167
6168 ieee80211_rx_ni(ath12k_ar_to_hw(ar), skb);
6169
6170 exit:
6171 rcu_read_unlock();
6172 }
6173
ath12k_mgmt_tx_compl_event(struct ath12k_base * ab,struct sk_buff * skb)6174 static void ath12k_mgmt_tx_compl_event(struct ath12k_base *ab, struct sk_buff *skb)
6175 {
6176 struct wmi_mgmt_tx_compl_event tx_compl_param = {0};
6177 struct ath12k *ar;
6178
6179 if (ath12k_pull_mgmt_tx_compl_param_tlv(ab, skb, &tx_compl_param) != 0) {
6180 ath12k_warn(ab, "failed to extract mgmt tx compl event");
6181 return;
6182 }
6183
6184 rcu_read_lock();
6185 ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(tx_compl_param.pdev_id));
6186 if (!ar) {
6187 ath12k_warn(ab, "invalid pdev id %d in mgmt_tx_compl_event\n",
6188 tx_compl_param.pdev_id);
6189 goto exit;
6190 }
6191
6192 wmi_process_mgmt_tx_comp(ar, le32_to_cpu(tx_compl_param.desc_id),
6193 le32_to_cpu(tx_compl_param.status));
6194
6195 ath12k_dbg(ab, ATH12K_DBG_MGMT,
6196 "mgmt tx compl ev pdev_id %d, desc_id %d, status %d",
6197 tx_compl_param.pdev_id, tx_compl_param.desc_id,
6198 tx_compl_param.status);
6199
6200 exit:
6201 rcu_read_unlock();
6202 }
6203
ath12k_get_ar_on_scan_state(struct ath12k_base * ab,u32 vdev_id,enum ath12k_scan_state state)6204 static struct ath12k *ath12k_get_ar_on_scan_state(struct ath12k_base *ab,
6205 u32 vdev_id,
6206 enum ath12k_scan_state state)
6207 {
6208 int i;
6209 struct ath12k_pdev *pdev;
6210 struct ath12k *ar;
6211
6212 for (i = 0; i < ab->num_radios; i++) {
6213 pdev = rcu_dereference(ab->pdevs_active[i]);
6214 if (pdev && pdev->ar) {
6215 ar = pdev->ar;
6216
6217 spin_lock_bh(&ar->data_lock);
6218 if (ar->scan.state == state &&
6219 ar->scan.vdev_id == vdev_id) {
6220 spin_unlock_bh(&ar->data_lock);
6221 return ar;
6222 }
6223 spin_unlock_bh(&ar->data_lock);
6224 }
6225 }
6226 return NULL;
6227 }
6228
ath12k_scan_event(struct ath12k_base * ab,struct sk_buff * skb)6229 static void ath12k_scan_event(struct ath12k_base *ab, struct sk_buff *skb)
6230 {
6231 struct ath12k *ar;
6232 struct wmi_scan_event scan_ev = {0};
6233
6234 if (ath12k_pull_scan_ev(ab, skb, &scan_ev) != 0) {
6235 ath12k_warn(ab, "failed to extract scan event");
6236 return;
6237 }
6238
6239 rcu_read_lock();
6240
6241 /* In case the scan was cancelled, ex. during interface teardown,
6242 * the interface will not be found in active interfaces.
6243 * Rather, in such scenarios, iterate over the active pdev's to
6244 * search 'ar' if the corresponding 'ar' scan is ABORTING and the
6245 * aborting scan's vdev id matches this event info.
6246 */
6247 if (le32_to_cpu(scan_ev.event_type) == WMI_SCAN_EVENT_COMPLETED &&
6248 le32_to_cpu(scan_ev.reason) == WMI_SCAN_REASON_CANCELLED) {
6249 ar = ath12k_get_ar_on_scan_state(ab, le32_to_cpu(scan_ev.vdev_id),
6250 ATH12K_SCAN_ABORTING);
6251 if (!ar)
6252 ar = ath12k_get_ar_on_scan_state(ab, le32_to_cpu(scan_ev.vdev_id),
6253 ATH12K_SCAN_RUNNING);
6254 } else {
6255 ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(scan_ev.vdev_id));
6256 }
6257
6258 if (!ar) {
6259 ath12k_warn(ab, "Received scan event for unknown vdev");
6260 rcu_read_unlock();
6261 return;
6262 }
6263
6264 spin_lock_bh(&ar->data_lock);
6265
6266 ath12k_dbg(ab, ATH12K_DBG_WMI,
6267 "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n",
6268 ath12k_wmi_event_scan_type_str(le32_to_cpu(scan_ev.event_type),
6269 le32_to_cpu(scan_ev.reason)),
6270 le32_to_cpu(scan_ev.event_type),
6271 le32_to_cpu(scan_ev.reason),
6272 le32_to_cpu(scan_ev.channel_freq),
6273 le32_to_cpu(scan_ev.scan_req_id),
6274 le32_to_cpu(scan_ev.scan_id),
6275 le32_to_cpu(scan_ev.vdev_id),
6276 ath12k_scan_state_str(ar->scan.state), ar->scan.state);
6277
6278 switch (le32_to_cpu(scan_ev.event_type)) {
6279 case WMI_SCAN_EVENT_STARTED:
6280 ath12k_wmi_event_scan_started(ar);
6281 break;
6282 case WMI_SCAN_EVENT_COMPLETED:
6283 ath12k_wmi_event_scan_completed(ar);
6284 break;
6285 case WMI_SCAN_EVENT_BSS_CHANNEL:
6286 ath12k_wmi_event_scan_bss_chan(ar);
6287 break;
6288 case WMI_SCAN_EVENT_FOREIGN_CHAN:
6289 ath12k_wmi_event_scan_foreign_chan(ar, le32_to_cpu(scan_ev.channel_freq));
6290 break;
6291 case WMI_SCAN_EVENT_START_FAILED:
6292 ath12k_warn(ab, "received scan start failure event\n");
6293 ath12k_wmi_event_scan_start_failed(ar);
6294 break;
6295 case WMI_SCAN_EVENT_DEQUEUED:
6296 __ath12k_mac_scan_finish(ar);
6297 break;
6298 case WMI_SCAN_EVENT_PREEMPTED:
6299 case WMI_SCAN_EVENT_RESTARTED:
6300 case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
6301 default:
6302 break;
6303 }
6304
6305 spin_unlock_bh(&ar->data_lock);
6306
6307 rcu_read_unlock();
6308 }
6309
ath12k_peer_sta_kickout_event(struct ath12k_base * ab,struct sk_buff * skb)6310 static void ath12k_peer_sta_kickout_event(struct ath12k_base *ab, struct sk_buff *skb)
6311 {
6312 struct wmi_peer_sta_kickout_arg arg = {};
6313 struct ieee80211_sta *sta;
6314 struct ath12k_peer *peer;
6315 struct ath12k *ar;
6316
6317 if (ath12k_pull_peer_sta_kickout_ev(ab, skb, &arg) != 0) {
6318 ath12k_warn(ab, "failed to extract peer sta kickout event");
6319 return;
6320 }
6321
6322 rcu_read_lock();
6323
6324 spin_lock_bh(&ab->base_lock);
6325
6326 peer = ath12k_peer_find_by_addr(ab, arg.mac_addr);
6327
6328 if (!peer) {
6329 ath12k_warn(ab, "peer not found %pM\n",
6330 arg.mac_addr);
6331 goto exit;
6332 }
6333
6334 ar = ath12k_mac_get_ar_by_vdev_id(ab, peer->vdev_id);
6335 if (!ar) {
6336 ath12k_warn(ab, "invalid vdev id in peer sta kickout ev %d",
6337 peer->vdev_id);
6338 goto exit;
6339 }
6340
6341 sta = ieee80211_find_sta_by_ifaddr(ath12k_ar_to_hw(ar),
6342 arg.mac_addr, NULL);
6343 if (!sta) {
6344 ath12k_warn(ab, "Spurious quick kickout for STA %pM\n",
6345 arg.mac_addr);
6346 goto exit;
6347 }
6348
6349 ath12k_dbg(ab, ATH12K_DBG_WMI, "peer sta kickout event %pM",
6350 arg.mac_addr);
6351
6352 ieee80211_report_low_ack(sta, 10);
6353
6354 exit:
6355 spin_unlock_bh(&ab->base_lock);
6356 rcu_read_unlock();
6357 }
6358
ath12k_roam_event(struct ath12k_base * ab,struct sk_buff * skb)6359 static void ath12k_roam_event(struct ath12k_base *ab, struct sk_buff *skb)
6360 {
6361 struct wmi_roam_event roam_ev = {};
6362 struct ath12k *ar;
6363 u32 vdev_id;
6364 u8 roam_reason;
6365
6366 if (ath12k_pull_roam_ev(ab, skb, &roam_ev) != 0) {
6367 ath12k_warn(ab, "failed to extract roam event");
6368 return;
6369 }
6370
6371 vdev_id = le32_to_cpu(roam_ev.vdev_id);
6372 roam_reason = u32_get_bits(le32_to_cpu(roam_ev.reason),
6373 WMI_ROAM_REASON_MASK);
6374
6375 ath12k_dbg(ab, ATH12K_DBG_WMI,
6376 "wmi roam event vdev %u reason %d rssi %d\n",
6377 vdev_id, roam_reason, roam_ev.rssi);
6378
6379 rcu_read_lock();
6380 ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
6381 if (!ar) {
6382 ath12k_warn(ab, "invalid vdev id in roam ev %d", vdev_id);
6383 rcu_read_unlock();
6384 return;
6385 }
6386
6387 if (roam_reason >= WMI_ROAM_REASON_MAX)
6388 ath12k_warn(ab, "ignoring unknown roam event reason %d on vdev %i\n",
6389 roam_reason, vdev_id);
6390
6391 switch (roam_reason) {
6392 case WMI_ROAM_REASON_BEACON_MISS:
6393 ath12k_mac_handle_beacon_miss(ar, vdev_id);
6394 break;
6395 case WMI_ROAM_REASON_BETTER_AP:
6396 case WMI_ROAM_REASON_LOW_RSSI:
6397 case WMI_ROAM_REASON_SUITABLE_AP_FOUND:
6398 case WMI_ROAM_REASON_HO_FAILED:
6399 ath12k_warn(ab, "ignoring not implemented roam event reason %d on vdev %i\n",
6400 roam_reason, vdev_id);
6401 break;
6402 }
6403
6404 rcu_read_unlock();
6405 }
6406
ath12k_chan_info_event(struct ath12k_base * ab,struct sk_buff * skb)6407 static void ath12k_chan_info_event(struct ath12k_base *ab, struct sk_buff *skb)
6408 {
6409 struct wmi_chan_info_event ch_info_ev = {0};
6410 struct ath12k *ar;
6411 struct survey_info *survey;
6412 int idx;
6413 /* HW channel counters frequency value in hertz */
6414 u32 cc_freq_hz = ab->cc_freq_hz;
6415
6416 if (ath12k_pull_chan_info_ev(ab, skb, &ch_info_ev) != 0) {
6417 ath12k_warn(ab, "failed to extract chan info event");
6418 return;
6419 }
6420
6421 ath12k_dbg(ab, ATH12K_DBG_WMI,
6422 "chan info vdev_id %d err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d mac_clk_mhz %d\n",
6423 ch_info_ev.vdev_id, ch_info_ev.err_code, ch_info_ev.freq,
6424 ch_info_ev.cmd_flags, ch_info_ev.noise_floor,
6425 ch_info_ev.rx_clear_count, ch_info_ev.cycle_count,
6426 ch_info_ev.mac_clk_mhz);
6427
6428 if (le32_to_cpu(ch_info_ev.cmd_flags) == WMI_CHAN_INFO_END_RESP) {
6429 ath12k_dbg(ab, ATH12K_DBG_WMI, "chan info report completed\n");
6430 return;
6431 }
6432
6433 rcu_read_lock();
6434 ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(ch_info_ev.vdev_id));
6435 if (!ar) {
6436 ath12k_warn(ab, "invalid vdev id in chan info ev %d",
6437 ch_info_ev.vdev_id);
6438 rcu_read_unlock();
6439 return;
6440 }
6441 spin_lock_bh(&ar->data_lock);
6442
6443 switch (ar->scan.state) {
6444 case ATH12K_SCAN_IDLE:
6445 case ATH12K_SCAN_STARTING:
6446 ath12k_warn(ab, "received chan info event without a scan request, ignoring\n");
6447 goto exit;
6448 case ATH12K_SCAN_RUNNING:
6449 case ATH12K_SCAN_ABORTING:
6450 break;
6451 }
6452
6453 idx = freq_to_idx(ar, le32_to_cpu(ch_info_ev.freq));
6454 if (idx >= ARRAY_SIZE(ar->survey)) {
6455 ath12k_warn(ab, "chan info: invalid frequency %d (idx %d out of bounds)\n",
6456 ch_info_ev.freq, idx);
6457 goto exit;
6458 }
6459
6460 /* If FW provides MAC clock frequency in Mhz, overriding the initialized
6461 * HW channel counters frequency value
6462 */
6463 if (ch_info_ev.mac_clk_mhz)
6464 cc_freq_hz = (le32_to_cpu(ch_info_ev.mac_clk_mhz) * 1000);
6465
6466 if (ch_info_ev.cmd_flags == WMI_CHAN_INFO_START_RESP) {
6467 survey = &ar->survey[idx];
6468 memset(survey, 0, sizeof(*survey));
6469 survey->noise = le32_to_cpu(ch_info_ev.noise_floor);
6470 survey->filled = SURVEY_INFO_NOISE_DBM | SURVEY_INFO_TIME |
6471 SURVEY_INFO_TIME_BUSY;
6472 survey->time = div_u64(le32_to_cpu(ch_info_ev.cycle_count), cc_freq_hz);
6473 survey->time_busy = div_u64(le32_to_cpu(ch_info_ev.rx_clear_count),
6474 cc_freq_hz);
6475 }
6476 exit:
6477 spin_unlock_bh(&ar->data_lock);
6478 rcu_read_unlock();
6479 }
6480
6481 static void
ath12k_pdev_bss_chan_info_event(struct ath12k_base * ab,struct sk_buff * skb)6482 ath12k_pdev_bss_chan_info_event(struct ath12k_base *ab, struct sk_buff *skb)
6483 {
6484 struct wmi_pdev_bss_chan_info_event bss_ch_info_ev = {};
6485 struct survey_info *survey;
6486 struct ath12k *ar;
6487 u32 cc_freq_hz = ab->cc_freq_hz;
6488 u64 busy, total, tx, rx, rx_bss;
6489 int idx;
6490
6491 if (ath12k_pull_pdev_bss_chan_info_ev(ab, skb, &bss_ch_info_ev) != 0) {
6492 ath12k_warn(ab, "failed to extract pdev bss chan info event");
6493 return;
6494 }
6495
6496 busy = (u64)(le32_to_cpu(bss_ch_info_ev.rx_clear_count_high)) << 32 |
6497 le32_to_cpu(bss_ch_info_ev.rx_clear_count_low);
6498
6499 total = (u64)(le32_to_cpu(bss_ch_info_ev.cycle_count_high)) << 32 |
6500 le32_to_cpu(bss_ch_info_ev.cycle_count_low);
6501
6502 tx = (u64)(le32_to_cpu(bss_ch_info_ev.tx_cycle_count_high)) << 32 |
6503 le32_to_cpu(bss_ch_info_ev.tx_cycle_count_low);
6504
6505 rx = (u64)(le32_to_cpu(bss_ch_info_ev.rx_cycle_count_high)) << 32 |
6506 le32_to_cpu(bss_ch_info_ev.rx_cycle_count_low);
6507
6508 rx_bss = (u64)(le32_to_cpu(bss_ch_info_ev.rx_bss_cycle_count_high)) << 32 |
6509 le32_to_cpu(bss_ch_info_ev.rx_bss_cycle_count_low);
6510
6511 ath12k_dbg(ab, ATH12K_DBG_WMI,
6512 "pdev bss chan info:\n pdev_id: %d freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n",
6513 bss_ch_info_ev.pdev_id, bss_ch_info_ev.freq,
6514 bss_ch_info_ev.noise_floor, busy, total,
6515 tx, rx, rx_bss);
6516
6517 rcu_read_lock();
6518 ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(bss_ch_info_ev.pdev_id));
6519
6520 if (!ar) {
6521 ath12k_warn(ab, "invalid pdev id %d in bss_chan_info event\n",
6522 bss_ch_info_ev.pdev_id);
6523 rcu_read_unlock();
6524 return;
6525 }
6526
6527 spin_lock_bh(&ar->data_lock);
6528 idx = freq_to_idx(ar, le32_to_cpu(bss_ch_info_ev.freq));
6529 if (idx >= ARRAY_SIZE(ar->survey)) {
6530 ath12k_warn(ab, "bss chan info: invalid frequency %d (idx %d out of bounds)\n",
6531 bss_ch_info_ev.freq, idx);
6532 goto exit;
6533 }
6534
6535 survey = &ar->survey[idx];
6536
6537 survey->noise = le32_to_cpu(bss_ch_info_ev.noise_floor);
6538 survey->time = div_u64(total, cc_freq_hz);
6539 survey->time_busy = div_u64(busy, cc_freq_hz);
6540 survey->time_rx = div_u64(rx_bss, cc_freq_hz);
6541 survey->time_tx = div_u64(tx, cc_freq_hz);
6542 survey->filled |= (SURVEY_INFO_NOISE_DBM |
6543 SURVEY_INFO_TIME |
6544 SURVEY_INFO_TIME_BUSY |
6545 SURVEY_INFO_TIME_RX |
6546 SURVEY_INFO_TIME_TX);
6547 exit:
6548 spin_unlock_bh(&ar->data_lock);
6549 complete(&ar->bss_survey_done);
6550
6551 rcu_read_unlock();
6552 }
6553
ath12k_vdev_install_key_compl_event(struct ath12k_base * ab,struct sk_buff * skb)6554 static void ath12k_vdev_install_key_compl_event(struct ath12k_base *ab,
6555 struct sk_buff *skb)
6556 {
6557 struct wmi_vdev_install_key_complete_arg install_key_compl = {0};
6558 struct ath12k *ar;
6559
6560 if (ath12k_pull_vdev_install_key_compl_ev(ab, skb, &install_key_compl) != 0) {
6561 ath12k_warn(ab, "failed to extract install key compl event");
6562 return;
6563 }
6564
6565 ath12k_dbg(ab, ATH12K_DBG_WMI,
6566 "vdev install key ev idx %d flags %08x macaddr %pM status %d\n",
6567 install_key_compl.key_idx, install_key_compl.key_flags,
6568 install_key_compl.macaddr, install_key_compl.status);
6569
6570 rcu_read_lock();
6571 ar = ath12k_mac_get_ar_by_vdev_id(ab, install_key_compl.vdev_id);
6572 if (!ar) {
6573 ath12k_warn(ab, "invalid vdev id in install key compl ev %d",
6574 install_key_compl.vdev_id);
6575 rcu_read_unlock();
6576 return;
6577 }
6578
6579 ar->install_key_status = 0;
6580
6581 if (install_key_compl.status != WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS) {
6582 ath12k_warn(ab, "install key failed for %pM status %d\n",
6583 install_key_compl.macaddr, install_key_compl.status);
6584 ar->install_key_status = install_key_compl.status;
6585 }
6586
6587 complete(&ar->install_key_done);
6588 rcu_read_unlock();
6589 }
6590
ath12k_wmi_tlv_services_parser(struct ath12k_base * ab,u16 tag,u16 len,const void * ptr,void * data)6591 static int ath12k_wmi_tlv_services_parser(struct ath12k_base *ab,
6592 u16 tag, u16 len,
6593 const void *ptr,
6594 void *data)
6595 {
6596 const struct wmi_service_available_event *ev;
6597 __le32 *wmi_ext2_service_bitmap;
6598 int i, j;
6599 u16 expected_len;
6600
6601 expected_len = WMI_SERVICE_SEGMENT_BM_SIZE32 * sizeof(u32);
6602 if (len < expected_len) {
6603 ath12k_warn(ab, "invalid length %d for the WMI services available tag 0x%x\n",
6604 len, tag);
6605 return -EINVAL;
6606 }
6607
6608 switch (tag) {
6609 case WMI_TAG_SERVICE_AVAILABLE_EVENT:
6610 ev = (struct wmi_service_available_event *)ptr;
6611 for (i = 0, j = WMI_MAX_SERVICE;
6612 i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT_SERVICE;
6613 i++) {
6614 do {
6615 if (le32_to_cpu(ev->wmi_service_segment_bitmap[i]) &
6616 BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
6617 set_bit(j, ab->wmi_ab.svc_map);
6618 } while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
6619 }
6620
6621 ath12k_dbg(ab, ATH12K_DBG_WMI,
6622 "wmi_ext_service_bitmap 0x%x 0x%x 0x%x 0x%x",
6623 ev->wmi_service_segment_bitmap[0],
6624 ev->wmi_service_segment_bitmap[1],
6625 ev->wmi_service_segment_bitmap[2],
6626 ev->wmi_service_segment_bitmap[3]);
6627 break;
6628 case WMI_TAG_ARRAY_UINT32:
6629 wmi_ext2_service_bitmap = (__le32 *)ptr;
6630 for (i = 0, j = WMI_MAX_EXT_SERVICE;
6631 i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT2_SERVICE;
6632 i++) {
6633 do {
6634 if (__le32_to_cpu(wmi_ext2_service_bitmap[i]) &
6635 BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
6636 set_bit(j, ab->wmi_ab.svc_map);
6637 } while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
6638 }
6639
6640 ath12k_dbg(ab, ATH12K_DBG_WMI,
6641 "wmi_ext2_service_bitmap 0x%04x 0x%04x 0x%04x 0x%04x",
6642 __le32_to_cpu(wmi_ext2_service_bitmap[0]),
6643 __le32_to_cpu(wmi_ext2_service_bitmap[1]),
6644 __le32_to_cpu(wmi_ext2_service_bitmap[2]),
6645 __le32_to_cpu(wmi_ext2_service_bitmap[3]));
6646 break;
6647 }
6648 return 0;
6649 }
6650
ath12k_service_available_event(struct ath12k_base * ab,struct sk_buff * skb)6651 static int ath12k_service_available_event(struct ath12k_base *ab, struct sk_buff *skb)
6652 {
6653 int ret;
6654
6655 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
6656 ath12k_wmi_tlv_services_parser,
6657 NULL);
6658 return ret;
6659 }
6660
ath12k_peer_assoc_conf_event(struct ath12k_base * ab,struct sk_buff * skb)6661 static void ath12k_peer_assoc_conf_event(struct ath12k_base *ab, struct sk_buff *skb)
6662 {
6663 struct wmi_peer_assoc_conf_arg peer_assoc_conf = {0};
6664 struct ath12k *ar;
6665
6666 if (ath12k_pull_peer_assoc_conf_ev(ab, skb, &peer_assoc_conf) != 0) {
6667 ath12k_warn(ab, "failed to extract peer assoc conf event");
6668 return;
6669 }
6670
6671 ath12k_dbg(ab, ATH12K_DBG_WMI,
6672 "peer assoc conf ev vdev id %d macaddr %pM\n",
6673 peer_assoc_conf.vdev_id, peer_assoc_conf.macaddr);
6674
6675 rcu_read_lock();
6676 ar = ath12k_mac_get_ar_by_vdev_id(ab, peer_assoc_conf.vdev_id);
6677
6678 if (!ar) {
6679 ath12k_warn(ab, "invalid vdev id in peer assoc conf ev %d",
6680 peer_assoc_conf.vdev_id);
6681 rcu_read_unlock();
6682 return;
6683 }
6684
6685 complete(&ar->peer_assoc_done);
6686 rcu_read_unlock();
6687 }
6688
ath12k_update_stats_event(struct ath12k_base * ab,struct sk_buff * skb)6689 static void ath12k_update_stats_event(struct ath12k_base *ab, struct sk_buff *skb)
6690 {
6691 }
6692
6693 /* PDEV_CTL_FAILSAFE_CHECK_EVENT is received from FW when the frequency scanned
6694 * is not part of BDF CTL(Conformance test limits) table entries.
6695 */
ath12k_pdev_ctl_failsafe_check_event(struct ath12k_base * ab,struct sk_buff * skb)6696 static void ath12k_pdev_ctl_failsafe_check_event(struct ath12k_base *ab,
6697 struct sk_buff *skb)
6698 {
6699 const void **tb;
6700 const struct wmi_pdev_ctl_failsafe_chk_event *ev;
6701 int ret;
6702
6703 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6704 if (IS_ERR(tb)) {
6705 ret = PTR_ERR(tb);
6706 ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6707 return;
6708 }
6709
6710 ev = tb[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT];
6711 if (!ev) {
6712 ath12k_warn(ab, "failed to fetch pdev ctl failsafe check ev");
6713 kfree(tb);
6714 return;
6715 }
6716
6717 ath12k_dbg(ab, ATH12K_DBG_WMI,
6718 "pdev ctl failsafe check ev status %d\n",
6719 ev->ctl_failsafe_status);
6720
6721 /* If ctl_failsafe_status is set to 1 FW will max out the Transmit power
6722 * to 10 dBm else the CTL power entry in the BDF would be picked up.
6723 */
6724 if (ev->ctl_failsafe_status != 0)
6725 ath12k_warn(ab, "pdev ctl failsafe failure status %d",
6726 ev->ctl_failsafe_status);
6727
6728 kfree(tb);
6729 }
6730
6731 static void
ath12k_wmi_process_csa_switch_count_event(struct ath12k_base * ab,const struct ath12k_wmi_pdev_csa_event * ev,const u32 * vdev_ids)6732 ath12k_wmi_process_csa_switch_count_event(struct ath12k_base *ab,
6733 const struct ath12k_wmi_pdev_csa_event *ev,
6734 const u32 *vdev_ids)
6735 {
6736 int i;
6737 struct ath12k_vif *arvif;
6738
6739 /* Finish CSA once the switch count becomes NULL */
6740 if (ev->current_switch_count)
6741 return;
6742
6743 rcu_read_lock();
6744 for (i = 0; i < le32_to_cpu(ev->num_vdevs); i++) {
6745 arvif = ath12k_mac_get_arvif_by_vdev_id(ab, vdev_ids[i]);
6746
6747 if (!arvif) {
6748 ath12k_warn(ab, "Recvd csa status for unknown vdev %d",
6749 vdev_ids[i]);
6750 continue;
6751 }
6752
6753 if (arvif->is_up && arvif->vif->bss_conf.csa_active)
6754 ieee80211_csa_finish(arvif->vif, 0);
6755 }
6756 rcu_read_unlock();
6757 }
6758
6759 static void
ath12k_wmi_pdev_csa_switch_count_status_event(struct ath12k_base * ab,struct sk_buff * skb)6760 ath12k_wmi_pdev_csa_switch_count_status_event(struct ath12k_base *ab,
6761 struct sk_buff *skb)
6762 {
6763 const void **tb;
6764 const struct ath12k_wmi_pdev_csa_event *ev;
6765 const u32 *vdev_ids;
6766 int ret;
6767
6768 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6769 if (IS_ERR(tb)) {
6770 ret = PTR_ERR(tb);
6771 ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6772 return;
6773 }
6774
6775 ev = tb[WMI_TAG_PDEV_CSA_SWITCH_COUNT_STATUS_EVENT];
6776 vdev_ids = tb[WMI_TAG_ARRAY_UINT32];
6777
6778 if (!ev || !vdev_ids) {
6779 ath12k_warn(ab, "failed to fetch pdev csa switch count ev");
6780 kfree(tb);
6781 return;
6782 }
6783
6784 ath12k_dbg(ab, ATH12K_DBG_WMI,
6785 "pdev csa switch count %d for pdev %d, num_vdevs %d",
6786 ev->current_switch_count, ev->pdev_id,
6787 ev->num_vdevs);
6788
6789 ath12k_wmi_process_csa_switch_count_event(ab, ev, vdev_ids);
6790
6791 kfree(tb);
6792 }
6793
6794 static void
ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base * ab,struct sk_buff * skb)6795 ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff *skb)
6796 {
6797 const void **tb;
6798 const struct ath12k_wmi_pdev_radar_event *ev;
6799 struct ath12k *ar;
6800 int ret;
6801
6802 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6803 if (IS_ERR(tb)) {
6804 ret = PTR_ERR(tb);
6805 ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6806 return;
6807 }
6808
6809 ev = tb[WMI_TAG_PDEV_DFS_RADAR_DETECTION_EVENT];
6810
6811 if (!ev) {
6812 ath12k_warn(ab, "failed to fetch pdev dfs radar detected ev");
6813 kfree(tb);
6814 return;
6815 }
6816
6817 ath12k_dbg(ab, ATH12K_DBG_WMI,
6818 "pdev dfs radar detected on pdev %d, detection mode %d, chan freq %d, chan_width %d, detector id %d, seg id %d, timestamp %d, chirp %d, freq offset %d, sidx %d",
6819 ev->pdev_id, ev->detection_mode, ev->chan_freq, ev->chan_width,
6820 ev->detector_id, ev->segment_id, ev->timestamp, ev->is_chirp,
6821 ev->freq_offset, ev->sidx);
6822
6823 rcu_read_lock();
6824
6825 ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev->pdev_id));
6826
6827 if (!ar) {
6828 ath12k_warn(ab, "radar detected in invalid pdev %d\n",
6829 ev->pdev_id);
6830 goto exit;
6831 }
6832
6833 ath12k_dbg(ar->ab, ATH12K_DBG_REG, "DFS Radar Detected in pdev %d\n",
6834 ev->pdev_id);
6835
6836 if (ar->dfs_block_radar_events)
6837 ath12k_info(ab, "DFS Radar detected, but ignored as requested\n");
6838 else
6839 ieee80211_radar_detected(ath12k_ar_to_hw(ar), NULL);
6840
6841 exit:
6842 rcu_read_unlock();
6843
6844 kfree(tb);
6845 }
6846
6847 static void
ath12k_wmi_pdev_temperature_event(struct ath12k_base * ab,struct sk_buff * skb)6848 ath12k_wmi_pdev_temperature_event(struct ath12k_base *ab,
6849 struct sk_buff *skb)
6850 {
6851 struct ath12k *ar;
6852 struct wmi_pdev_temperature_event ev = {0};
6853
6854 if (ath12k_pull_pdev_temp_ev(ab, skb, &ev) != 0) {
6855 ath12k_warn(ab, "failed to extract pdev temperature event");
6856 return;
6857 }
6858
6859 ath12k_dbg(ab, ATH12K_DBG_WMI,
6860 "pdev temperature ev temp %d pdev_id %d\n", ev.temp, ev.pdev_id);
6861
6862 rcu_read_lock();
6863
6864 ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev.pdev_id));
6865 if (!ar) {
6866 ath12k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev.pdev_id);
6867 goto exit;
6868 }
6869
6870 exit:
6871 rcu_read_unlock();
6872 }
6873
ath12k_fils_discovery_event(struct ath12k_base * ab,struct sk_buff * skb)6874 static void ath12k_fils_discovery_event(struct ath12k_base *ab,
6875 struct sk_buff *skb)
6876 {
6877 const void **tb;
6878 const struct wmi_fils_discovery_event *ev;
6879 int ret;
6880
6881 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6882 if (IS_ERR(tb)) {
6883 ret = PTR_ERR(tb);
6884 ath12k_warn(ab,
6885 "failed to parse FILS discovery event tlv %d\n",
6886 ret);
6887 return;
6888 }
6889
6890 ev = tb[WMI_TAG_HOST_SWFDA_EVENT];
6891 if (!ev) {
6892 ath12k_warn(ab, "failed to fetch FILS discovery event\n");
6893 kfree(tb);
6894 return;
6895 }
6896
6897 ath12k_warn(ab,
6898 "FILS discovery frame expected from host for vdev_id: %u, transmission scheduled at %u, next TBTT: %u\n",
6899 ev->vdev_id, ev->fils_tt, ev->tbtt);
6900
6901 kfree(tb);
6902 }
6903
ath12k_probe_resp_tx_status_event(struct ath12k_base * ab,struct sk_buff * skb)6904 static void ath12k_probe_resp_tx_status_event(struct ath12k_base *ab,
6905 struct sk_buff *skb)
6906 {
6907 const void **tb;
6908 const struct wmi_probe_resp_tx_status_event *ev;
6909 int ret;
6910
6911 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6912 if (IS_ERR(tb)) {
6913 ret = PTR_ERR(tb);
6914 ath12k_warn(ab,
6915 "failed to parse probe response transmission status event tlv: %d\n",
6916 ret);
6917 return;
6918 }
6919
6920 ev = tb[WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT];
6921 if (!ev) {
6922 ath12k_warn(ab,
6923 "failed to fetch probe response transmission status event");
6924 kfree(tb);
6925 return;
6926 }
6927
6928 if (ev->tx_status)
6929 ath12k_warn(ab,
6930 "Probe response transmission failed for vdev_id %u, status %u\n",
6931 ev->vdev_id, ev->tx_status);
6932
6933 kfree(tb);
6934 }
6935
ath12k_wmi_p2p_noa_event(struct ath12k_base * ab,struct sk_buff * skb)6936 static int ath12k_wmi_p2p_noa_event(struct ath12k_base *ab,
6937 struct sk_buff *skb)
6938 {
6939 const void **tb;
6940 const struct wmi_p2p_noa_event *ev;
6941 const struct ath12k_wmi_p2p_noa_info *noa;
6942 struct ath12k *ar;
6943 int ret, vdev_id;
6944
6945 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6946 if (IS_ERR(tb)) {
6947 ret = PTR_ERR(tb);
6948 ath12k_warn(ab, "failed to parse P2P NoA TLV: %d\n", ret);
6949 return ret;
6950 }
6951
6952 ev = tb[WMI_TAG_P2P_NOA_EVENT];
6953 noa = tb[WMI_TAG_P2P_NOA_INFO];
6954
6955 if (!ev || !noa) {
6956 ret = -EPROTO;
6957 goto out;
6958 }
6959
6960 vdev_id = __le32_to_cpu(ev->vdev_id);
6961
6962 ath12k_dbg(ab, ATH12K_DBG_WMI,
6963 "wmi tlv p2p noa vdev_id %i descriptors %u\n",
6964 vdev_id, le32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_DESC_NUM));
6965
6966 rcu_read_lock();
6967 ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
6968 if (!ar) {
6969 ath12k_warn(ab, "invalid vdev id %d in P2P NoA event\n",
6970 vdev_id);
6971 ret = -EINVAL;
6972 goto unlock;
6973 }
6974
6975 ath12k_p2p_noa_update_by_vdev_id(ar, vdev_id, noa);
6976
6977 ret = 0;
6978
6979 unlock:
6980 rcu_read_unlock();
6981 out:
6982 kfree(tb);
6983 return ret;
6984 }
6985
ath12k_rfkill_state_change_event(struct ath12k_base * ab,struct sk_buff * skb)6986 static void ath12k_rfkill_state_change_event(struct ath12k_base *ab,
6987 struct sk_buff *skb)
6988 {
6989 const struct wmi_rfkill_state_change_event *ev;
6990 const void **tb;
6991 int ret;
6992
6993 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6994 if (IS_ERR(tb)) {
6995 ret = PTR_ERR(tb);
6996 ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6997 return;
6998 }
6999
7000 ev = tb[WMI_TAG_RFKILL_EVENT];
7001 if (!ev) {
7002 kfree(tb);
7003 return;
7004 }
7005
7006 ath12k_dbg(ab, ATH12K_DBG_MAC,
7007 "wmi tlv rfkill state change gpio %d type %d radio_state %d\n",
7008 le32_to_cpu(ev->gpio_pin_num),
7009 le32_to_cpu(ev->int_type),
7010 le32_to_cpu(ev->radio_state));
7011
7012 spin_lock_bh(&ab->base_lock);
7013 ab->rfkill_radio_on = (ev->radio_state == cpu_to_le32(WMI_RFKILL_RADIO_STATE_ON));
7014 spin_unlock_bh(&ab->base_lock);
7015
7016 queue_work(ab->workqueue, &ab->rfkill_work);
7017 kfree(tb);
7018 }
7019
7020 static void
ath12k_wmi_diag_event(struct ath12k_base * ab,struct sk_buff * skb)7021 ath12k_wmi_diag_event(struct ath12k_base *ab, struct sk_buff *skb)
7022 {
7023 trace_ath12k_wmi_diag(ab, skb->data, skb->len);
7024 }
7025
ath12k_wmi_twt_enable_event(struct ath12k_base * ab,struct sk_buff * skb)7026 static void ath12k_wmi_twt_enable_event(struct ath12k_base *ab,
7027 struct sk_buff *skb)
7028 {
7029 const void **tb;
7030 const struct wmi_twt_enable_event *ev;
7031 int ret;
7032
7033 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
7034 if (IS_ERR(tb)) {
7035 ret = PTR_ERR(tb);
7036 ath12k_warn(ab, "failed to parse wmi twt enable status event tlv: %d\n",
7037 ret);
7038 return;
7039 }
7040
7041 ev = tb[WMI_TAG_TWT_ENABLE_COMPLETE_EVENT];
7042 if (!ev) {
7043 ath12k_warn(ab, "failed to fetch twt enable wmi event\n");
7044 goto exit;
7045 }
7046
7047 ath12k_dbg(ab, ATH12K_DBG_MAC, "wmi twt enable event pdev id %u status %u\n",
7048 le32_to_cpu(ev->pdev_id),
7049 le32_to_cpu(ev->status));
7050
7051 exit:
7052 kfree(tb);
7053 }
7054
ath12k_wmi_twt_disable_event(struct ath12k_base * ab,struct sk_buff * skb)7055 static void ath12k_wmi_twt_disable_event(struct ath12k_base *ab,
7056 struct sk_buff *skb)
7057 {
7058 const void **tb;
7059 const struct wmi_twt_disable_event *ev;
7060 int ret;
7061
7062 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
7063 if (IS_ERR(tb)) {
7064 ret = PTR_ERR(tb);
7065 ath12k_warn(ab, "failed to parse wmi twt disable status event tlv: %d\n",
7066 ret);
7067 return;
7068 }
7069
7070 ev = tb[WMI_TAG_TWT_DISABLE_COMPLETE_EVENT];
7071 if (!ev) {
7072 ath12k_warn(ab, "failed to fetch twt disable wmi event\n");
7073 goto exit;
7074 }
7075
7076 ath12k_dbg(ab, ATH12K_DBG_MAC, "wmi twt disable event pdev id %d status %u\n",
7077 le32_to_cpu(ev->pdev_id),
7078 le32_to_cpu(ev->status));
7079
7080 exit:
7081 kfree(tb);
7082 }
7083
ath12k_wmi_wow_wakeup_host_parse(struct ath12k_base * ab,u16 tag,u16 len,const void * ptr,void * data)7084 static int ath12k_wmi_wow_wakeup_host_parse(struct ath12k_base *ab,
7085 u16 tag, u16 len,
7086 const void *ptr, void *data)
7087 {
7088 const struct wmi_wow_ev_pg_fault_param *pf_param;
7089 const struct wmi_wow_ev_param *param;
7090 struct wmi_wow_ev_arg *arg = data;
7091 int pf_len;
7092
7093 switch (tag) {
7094 case WMI_TAG_WOW_EVENT_INFO:
7095 param = ptr;
7096 arg->wake_reason = le32_to_cpu(param->wake_reason);
7097 ath12k_dbg(ab, ATH12K_DBG_WMI, "wow wakeup host reason %d %s\n",
7098 arg->wake_reason, wow_reason(arg->wake_reason));
7099 break;
7100
7101 case WMI_TAG_ARRAY_BYTE:
7102 if (arg && arg->wake_reason == WOW_REASON_PAGE_FAULT) {
7103 pf_param = ptr;
7104 pf_len = le32_to_cpu(pf_param->len);
7105 if (pf_len > len - sizeof(pf_len) ||
7106 pf_len < 0) {
7107 ath12k_warn(ab, "invalid wo reason page fault buffer len %d\n",
7108 pf_len);
7109 return -EINVAL;
7110 }
7111 ath12k_dbg(ab, ATH12K_DBG_WMI, "wow_reason_page_fault len %d\n",
7112 pf_len);
7113 ath12k_dbg_dump(ab, ATH12K_DBG_WMI,
7114 "wow_reason_page_fault packet present",
7115 "wow_pg_fault ",
7116 pf_param->data,
7117 pf_len);
7118 }
7119 break;
7120 default:
7121 break;
7122 }
7123
7124 return 0;
7125 }
7126
ath12k_wmi_event_wow_wakeup_host(struct ath12k_base * ab,struct sk_buff * skb)7127 static void ath12k_wmi_event_wow_wakeup_host(struct ath12k_base *ab, struct sk_buff *skb)
7128 {
7129 struct wmi_wow_ev_arg arg = { };
7130 int ret;
7131
7132 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
7133 ath12k_wmi_wow_wakeup_host_parse,
7134 &arg);
7135 if (ret) {
7136 ath12k_warn(ab, "failed to parse wmi wow wakeup host event tlv: %d\n",
7137 ret);
7138 return;
7139 }
7140
7141 complete(&ab->wow.wakeup_completed);
7142 }
7143
ath12k_wmi_gtk_offload_status_event(struct ath12k_base * ab,struct sk_buff * skb)7144 static void ath12k_wmi_gtk_offload_status_event(struct ath12k_base *ab,
7145 struct sk_buff *skb)
7146 {
7147 const struct wmi_gtk_offload_status_event *ev;
7148 struct ath12k_vif *arvif;
7149 __be64 replay_ctr_be;
7150 u64 replay_ctr;
7151 const void **tb;
7152 int ret;
7153
7154 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
7155 if (IS_ERR(tb)) {
7156 ret = PTR_ERR(tb);
7157 ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
7158 return;
7159 }
7160
7161 ev = tb[WMI_TAG_GTK_OFFLOAD_STATUS_EVENT];
7162 if (!ev) {
7163 ath12k_warn(ab, "failed to fetch gtk offload status ev");
7164 kfree(tb);
7165 return;
7166 }
7167
7168 rcu_read_lock();
7169 arvif = ath12k_mac_get_arvif_by_vdev_id(ab, le32_to_cpu(ev->vdev_id));
7170 if (!arvif) {
7171 rcu_read_unlock();
7172 ath12k_warn(ab, "failed to get arvif for vdev_id:%d\n",
7173 le32_to_cpu(ev->vdev_id));
7174 kfree(tb);
7175 return;
7176 }
7177
7178 replay_ctr = le64_to_cpu(ev->replay_ctr);
7179 arvif->rekey_data.replay_ctr = replay_ctr;
7180 ath12k_dbg(ab, ATH12K_DBG_WMI, "wmi gtk offload event refresh_cnt %d replay_ctr %llu\n",
7181 le32_to_cpu(ev->refresh_cnt), replay_ctr);
7182
7183 /* supplicant expects big-endian replay counter */
7184 replay_ctr_be = cpu_to_be64(replay_ctr);
7185
7186 ieee80211_gtk_rekey_notify(arvif->vif, arvif->bssid,
7187 (void *)&replay_ctr_be, GFP_ATOMIC);
7188
7189 rcu_read_unlock();
7190
7191 kfree(tb);
7192 }
7193
ath12k_wmi_op_rx(struct ath12k_base * ab,struct sk_buff * skb)7194 static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
7195 {
7196 struct wmi_cmd_hdr *cmd_hdr;
7197 enum wmi_tlv_event_id id;
7198
7199 cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
7200 id = le32_get_bits(cmd_hdr->cmd_id, WMI_CMD_HDR_CMD_ID);
7201
7202 if (!skb_pull(skb, sizeof(struct wmi_cmd_hdr)))
7203 goto out;
7204
7205 switch (id) {
7206 /* Process all the WMI events here */
7207 case WMI_SERVICE_READY_EVENTID:
7208 ath12k_service_ready_event(ab, skb);
7209 break;
7210 case WMI_SERVICE_READY_EXT_EVENTID:
7211 ath12k_service_ready_ext_event(ab, skb);
7212 break;
7213 case WMI_SERVICE_READY_EXT2_EVENTID:
7214 ath12k_service_ready_ext2_event(ab, skb);
7215 break;
7216 case WMI_REG_CHAN_LIST_CC_EXT_EVENTID:
7217 ath12k_reg_chan_list_event(ab, skb);
7218 break;
7219 case WMI_READY_EVENTID:
7220 ath12k_ready_event(ab, skb);
7221 break;
7222 case WMI_PEER_DELETE_RESP_EVENTID:
7223 ath12k_peer_delete_resp_event(ab, skb);
7224 break;
7225 case WMI_VDEV_START_RESP_EVENTID:
7226 ath12k_vdev_start_resp_event(ab, skb);
7227 break;
7228 case WMI_OFFLOAD_BCN_TX_STATUS_EVENTID:
7229 ath12k_bcn_tx_status_event(ab, skb);
7230 break;
7231 case WMI_VDEV_STOPPED_EVENTID:
7232 ath12k_vdev_stopped_event(ab, skb);
7233 break;
7234 case WMI_MGMT_RX_EVENTID:
7235 ath12k_mgmt_rx_event(ab, skb);
7236 /* mgmt_rx_event() owns the skb now! */
7237 return;
7238 case WMI_MGMT_TX_COMPLETION_EVENTID:
7239 ath12k_mgmt_tx_compl_event(ab, skb);
7240 break;
7241 case WMI_SCAN_EVENTID:
7242 ath12k_scan_event(ab, skb);
7243 break;
7244 case WMI_PEER_STA_KICKOUT_EVENTID:
7245 ath12k_peer_sta_kickout_event(ab, skb);
7246 break;
7247 case WMI_ROAM_EVENTID:
7248 ath12k_roam_event(ab, skb);
7249 break;
7250 case WMI_CHAN_INFO_EVENTID:
7251 ath12k_chan_info_event(ab, skb);
7252 break;
7253 case WMI_PDEV_BSS_CHAN_INFO_EVENTID:
7254 ath12k_pdev_bss_chan_info_event(ab, skb);
7255 break;
7256 case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
7257 ath12k_vdev_install_key_compl_event(ab, skb);
7258 break;
7259 case WMI_SERVICE_AVAILABLE_EVENTID:
7260 ath12k_service_available_event(ab, skb);
7261 break;
7262 case WMI_PEER_ASSOC_CONF_EVENTID:
7263 ath12k_peer_assoc_conf_event(ab, skb);
7264 break;
7265 case WMI_UPDATE_STATS_EVENTID:
7266 ath12k_update_stats_event(ab, skb);
7267 break;
7268 case WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID:
7269 ath12k_pdev_ctl_failsafe_check_event(ab, skb);
7270 break;
7271 case WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID:
7272 ath12k_wmi_pdev_csa_switch_count_status_event(ab, skb);
7273 break;
7274 case WMI_PDEV_TEMPERATURE_EVENTID:
7275 ath12k_wmi_pdev_temperature_event(ab, skb);
7276 break;
7277 case WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID:
7278 ath12k_wmi_pdev_dma_ring_buf_release_event(ab, skb);
7279 break;
7280 case WMI_HOST_FILS_DISCOVERY_EVENTID:
7281 ath12k_fils_discovery_event(ab, skb);
7282 break;
7283 case WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID:
7284 ath12k_probe_resp_tx_status_event(ab, skb);
7285 break;
7286 case WMI_RFKILL_STATE_CHANGE_EVENTID:
7287 ath12k_rfkill_state_change_event(ab, skb);
7288 break;
7289 case WMI_TWT_ENABLE_EVENTID:
7290 ath12k_wmi_twt_enable_event(ab, skb);
7291 break;
7292 case WMI_TWT_DISABLE_EVENTID:
7293 ath12k_wmi_twt_disable_event(ab, skb);
7294 break;
7295 case WMI_P2P_NOA_EVENTID:
7296 ath12k_wmi_p2p_noa_event(ab, skb);
7297 break;
7298 /* add Unsupported events here */
7299 case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
7300 case WMI_PEER_OPER_MODE_CHANGE_EVENTID:
7301 case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID:
7302 ath12k_dbg(ab, ATH12K_DBG_WMI,
7303 "ignoring unsupported event 0x%x\n", id);
7304 break;
7305 case WMI_PDEV_DFS_RADAR_DETECTION_EVENTID:
7306 ath12k_wmi_pdev_dfs_radar_detected_event(ab, skb);
7307 break;
7308 case WMI_VDEV_DELETE_RESP_EVENTID:
7309 ath12k_vdev_delete_resp_event(ab, skb);
7310 break;
7311 case WMI_DIAG_EVENTID:
7312 ath12k_wmi_diag_event(ab, skb);
7313 break;
7314 case WMI_WOW_WAKEUP_HOST_EVENTID:
7315 ath12k_wmi_event_wow_wakeup_host(ab, skb);
7316 break;
7317 case WMI_GTK_OFFLOAD_STATUS_EVENTID:
7318 ath12k_wmi_gtk_offload_status_event(ab, skb);
7319 break;
7320 /* TODO: Add remaining events */
7321 default:
7322 ath12k_dbg(ab, ATH12K_DBG_WMI, "Unknown eventid: 0x%x\n", id);
7323 break;
7324 }
7325
7326 out:
7327 dev_kfree_skb(skb);
7328 }
7329
ath12k_connect_pdev_htc_service(struct ath12k_base * ab,u32 pdev_idx)7330 static int ath12k_connect_pdev_htc_service(struct ath12k_base *ab,
7331 u32 pdev_idx)
7332 {
7333 int status;
7334 u32 svc_id[] = { ATH12K_HTC_SVC_ID_WMI_CONTROL,
7335 ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1,
7336 ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2 };
7337 struct ath12k_htc_svc_conn_req conn_req = {};
7338 struct ath12k_htc_svc_conn_resp conn_resp = {};
7339
7340 /* these fields are the same for all service endpoints */
7341 conn_req.ep_ops.ep_tx_complete = ath12k_wmi_htc_tx_complete;
7342 conn_req.ep_ops.ep_rx_complete = ath12k_wmi_op_rx;
7343 conn_req.ep_ops.ep_tx_credits = ath12k_wmi_op_ep_tx_credits;
7344
7345 /* connect to control service */
7346 conn_req.service_id = svc_id[pdev_idx];
7347
7348 status = ath12k_htc_connect_service(&ab->htc, &conn_req, &conn_resp);
7349 if (status) {
7350 ath12k_warn(ab, "failed to connect to WMI CONTROL service status: %d\n",
7351 status);
7352 return status;
7353 }
7354
7355 ab->wmi_ab.wmi_endpoint_id[pdev_idx] = conn_resp.eid;
7356 ab->wmi_ab.wmi[pdev_idx].eid = conn_resp.eid;
7357 ab->wmi_ab.max_msg_len[pdev_idx] = conn_resp.max_msg_len;
7358
7359 return 0;
7360 }
7361
7362 static int
ath12k_wmi_send_unit_test_cmd(struct ath12k * ar,struct wmi_unit_test_cmd ut_cmd,u32 * test_args)7363 ath12k_wmi_send_unit_test_cmd(struct ath12k *ar,
7364 struct wmi_unit_test_cmd ut_cmd,
7365 u32 *test_args)
7366 {
7367 struct ath12k_wmi_pdev *wmi = ar->wmi;
7368 struct wmi_unit_test_cmd *cmd;
7369 struct sk_buff *skb;
7370 struct wmi_tlv *tlv;
7371 void *ptr;
7372 u32 *ut_cmd_args;
7373 int buf_len, arg_len;
7374 int ret;
7375 int i;
7376
7377 arg_len = sizeof(u32) * le32_to_cpu(ut_cmd.num_args);
7378 buf_len = sizeof(ut_cmd) + arg_len + TLV_HDR_SIZE;
7379
7380 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
7381 if (!skb)
7382 return -ENOMEM;
7383
7384 cmd = (struct wmi_unit_test_cmd *)skb->data;
7385 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_UNIT_TEST_CMD,
7386 sizeof(ut_cmd));
7387
7388 cmd->vdev_id = ut_cmd.vdev_id;
7389 cmd->module_id = ut_cmd.module_id;
7390 cmd->num_args = ut_cmd.num_args;
7391 cmd->diag_token = ut_cmd.diag_token;
7392
7393 ptr = skb->data + sizeof(ut_cmd);
7394
7395 tlv = ptr;
7396 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, arg_len);
7397
7398 ptr += TLV_HDR_SIZE;
7399
7400 ut_cmd_args = ptr;
7401 for (i = 0; i < le32_to_cpu(ut_cmd.num_args); i++)
7402 ut_cmd_args[i] = test_args[i];
7403
7404 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
7405 "WMI unit test : module %d vdev %d n_args %d token %d\n",
7406 cmd->module_id, cmd->vdev_id, cmd->num_args,
7407 cmd->diag_token);
7408
7409 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_UNIT_TEST_CMDID);
7410
7411 if (ret) {
7412 ath12k_warn(ar->ab, "failed to send WMI_UNIT_TEST CMD :%d\n",
7413 ret);
7414 dev_kfree_skb(skb);
7415 }
7416
7417 return ret;
7418 }
7419
ath12k_wmi_simulate_radar(struct ath12k * ar)7420 int ath12k_wmi_simulate_radar(struct ath12k *ar)
7421 {
7422 struct ath12k_vif *arvif;
7423 u32 dfs_args[DFS_MAX_TEST_ARGS];
7424 struct wmi_unit_test_cmd wmi_ut;
7425 bool arvif_found = false;
7426
7427 list_for_each_entry(arvif, &ar->arvifs, list) {
7428 if (arvif->is_started && arvif->vdev_type == WMI_VDEV_TYPE_AP) {
7429 arvif_found = true;
7430 break;
7431 }
7432 }
7433
7434 if (!arvif_found)
7435 return -EINVAL;
7436
7437 dfs_args[DFS_TEST_CMDID] = 0;
7438 dfs_args[DFS_TEST_PDEV_ID] = ar->pdev->pdev_id;
7439 /* Currently we could pass segment_id(b0 - b1), chirp(b2)
7440 * freq offset (b3 - b10) to unit test. For simulation
7441 * purpose this can be set to 0 which is valid.
7442 */
7443 dfs_args[DFS_TEST_RADAR_PARAM] = 0;
7444
7445 wmi_ut.vdev_id = cpu_to_le32(arvif->vdev_id);
7446 wmi_ut.module_id = cpu_to_le32(DFS_UNIT_TEST_MODULE);
7447 wmi_ut.num_args = cpu_to_le32(DFS_MAX_TEST_ARGS);
7448 wmi_ut.diag_token = cpu_to_le32(DFS_UNIT_TEST_TOKEN);
7449
7450 ath12k_dbg(ar->ab, ATH12K_DBG_REG, "Triggering Radar Simulation\n");
7451
7452 return ath12k_wmi_send_unit_test_cmd(ar, wmi_ut, dfs_args);
7453 }
7454
ath12k_wmi_connect(struct ath12k_base * ab)7455 int ath12k_wmi_connect(struct ath12k_base *ab)
7456 {
7457 u32 i;
7458 u8 wmi_ep_count;
7459
7460 wmi_ep_count = ab->htc.wmi_ep_count;
7461 if (wmi_ep_count > ab->hw_params->max_radios)
7462 return -1;
7463
7464 for (i = 0; i < wmi_ep_count; i++)
7465 ath12k_connect_pdev_htc_service(ab, i);
7466
7467 return 0;
7468 }
7469
ath12k_wmi_pdev_detach(struct ath12k_base * ab,u8 pdev_id)7470 static void ath12k_wmi_pdev_detach(struct ath12k_base *ab, u8 pdev_id)
7471 {
7472 if (WARN_ON(pdev_id >= MAX_RADIOS))
7473 return;
7474
7475 /* TODO: Deinit any pdev specific wmi resource */
7476 }
7477
ath12k_wmi_pdev_attach(struct ath12k_base * ab,u8 pdev_id)7478 int ath12k_wmi_pdev_attach(struct ath12k_base *ab,
7479 u8 pdev_id)
7480 {
7481 struct ath12k_wmi_pdev *wmi_handle;
7482
7483 if (pdev_id >= ab->hw_params->max_radios)
7484 return -EINVAL;
7485
7486 wmi_handle = &ab->wmi_ab.wmi[pdev_id];
7487
7488 wmi_handle->wmi_ab = &ab->wmi_ab;
7489
7490 ab->wmi_ab.ab = ab;
7491 /* TODO: Init remaining resource specific to pdev */
7492
7493 return 0;
7494 }
7495
ath12k_wmi_attach(struct ath12k_base * ab)7496 int ath12k_wmi_attach(struct ath12k_base *ab)
7497 {
7498 int ret;
7499
7500 ret = ath12k_wmi_pdev_attach(ab, 0);
7501 if (ret)
7502 return ret;
7503
7504 ab->wmi_ab.ab = ab;
7505 ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_MAX;
7506
7507 /* It's overwritten when service_ext_ready is handled */
7508 if (ab->hw_params->single_pdev_only)
7509 ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_SINGLE;
7510
7511 /* TODO: Init remaining wmi soc resources required */
7512 init_completion(&ab->wmi_ab.service_ready);
7513 init_completion(&ab->wmi_ab.unified_ready);
7514
7515 return 0;
7516 }
7517
ath12k_wmi_detach(struct ath12k_base * ab)7518 void ath12k_wmi_detach(struct ath12k_base *ab)
7519 {
7520 int i;
7521
7522 /* TODO: Deinit wmi resource specific to SOC as required */
7523
7524 for (i = 0; i < ab->htc.wmi_ep_count; i++)
7525 ath12k_wmi_pdev_detach(ab, i);
7526
7527 ath12k_wmi_free_dbring_caps(ab);
7528 }
7529
ath12k_wmi_hw_data_filter_cmd(struct ath12k * ar,struct wmi_hw_data_filter_arg * arg)7530 int ath12k_wmi_hw_data_filter_cmd(struct ath12k *ar, struct wmi_hw_data_filter_arg *arg)
7531 {
7532 struct wmi_hw_data_filter_cmd *cmd;
7533 struct sk_buff *skb;
7534 int len;
7535
7536 len = sizeof(*cmd);
7537 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
7538
7539 if (!skb)
7540 return -ENOMEM;
7541
7542 cmd = (struct wmi_hw_data_filter_cmd *)skb->data;
7543 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HW_DATA_FILTER_CMD,
7544 sizeof(*cmd));
7545 cmd->vdev_id = cpu_to_le32(arg->vdev_id);
7546 cmd->enable = cpu_to_le32(arg->enable ? 1 : 0);
7547
7548 /* Set all modes in case of disable */
7549 if (arg->enable)
7550 cmd->hw_filter_bitmap = cpu_to_le32(arg->hw_filter_bitmap);
7551 else
7552 cmd->hw_filter_bitmap = cpu_to_le32((u32)~0U);
7553
7554 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
7555 "wmi hw data filter enable %d filter_bitmap 0x%x\n",
7556 arg->enable, arg->hw_filter_bitmap);
7557
7558 return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_HW_DATA_FILTER_CMDID);
7559 }
7560
ath12k_wmi_wow_host_wakeup_ind(struct ath12k * ar)7561 int ath12k_wmi_wow_host_wakeup_ind(struct ath12k *ar)
7562 {
7563 struct wmi_wow_host_wakeup_cmd *cmd;
7564 struct sk_buff *skb;
7565 size_t len;
7566
7567 len = sizeof(*cmd);
7568 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
7569 if (!skb)
7570 return -ENOMEM;
7571
7572 cmd = (struct wmi_wow_host_wakeup_cmd *)skb->data;
7573 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_HOSTWAKEUP_FROM_SLEEP_CMD,
7574 sizeof(*cmd));
7575
7576 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow host wakeup ind\n");
7577
7578 return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID);
7579 }
7580
ath12k_wmi_wow_enable(struct ath12k * ar)7581 int ath12k_wmi_wow_enable(struct ath12k *ar)
7582 {
7583 struct wmi_wow_enable_cmd *cmd;
7584 struct sk_buff *skb;
7585 int len;
7586
7587 len = sizeof(*cmd);
7588 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
7589 if (!skb)
7590 return -ENOMEM;
7591
7592 cmd = (struct wmi_wow_enable_cmd *)skb->data;
7593 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ENABLE_CMD,
7594 sizeof(*cmd));
7595
7596 cmd->enable = cpu_to_le32(1);
7597 cmd->pause_iface_config = cpu_to_le32(WOW_IFACE_PAUSE_ENABLED);
7598 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow enable\n");
7599
7600 return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_CMDID);
7601 }
7602
ath12k_wmi_wow_add_wakeup_event(struct ath12k * ar,u32 vdev_id,enum wmi_wow_wakeup_event event,u32 enable)7603 int ath12k_wmi_wow_add_wakeup_event(struct ath12k *ar, u32 vdev_id,
7604 enum wmi_wow_wakeup_event event,
7605 u32 enable)
7606 {
7607 struct wmi_wow_add_del_event_cmd *cmd;
7608 struct sk_buff *skb;
7609 size_t len;
7610
7611 len = sizeof(*cmd);
7612 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
7613 if (!skb)
7614 return -ENOMEM;
7615
7616 cmd = (struct wmi_wow_add_del_event_cmd *)skb->data;
7617 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ADD_DEL_EVT_CMD,
7618 sizeof(*cmd));
7619 cmd->vdev_id = cpu_to_le32(vdev_id);
7620 cmd->is_add = cpu_to_le32(enable);
7621 cmd->event_bitmap = cpu_to_le32((1 << event));
7622
7623 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow add wakeup event %s enable %d vdev_id %d\n",
7624 wow_wakeup_event(event), enable, vdev_id);
7625
7626 return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID);
7627 }
7628
ath12k_wmi_wow_add_pattern(struct ath12k * ar,u32 vdev_id,u32 pattern_id,const u8 * pattern,const u8 * mask,int pattern_len,int pattern_offset)7629 int ath12k_wmi_wow_add_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id,
7630 const u8 *pattern, const u8 *mask,
7631 int pattern_len, int pattern_offset)
7632 {
7633 struct wmi_wow_add_pattern_cmd *cmd;
7634 struct wmi_wow_bitmap_pattern_params *bitmap;
7635 struct wmi_tlv *tlv;
7636 struct sk_buff *skb;
7637 void *ptr;
7638 size_t len;
7639
7640 len = sizeof(*cmd) +
7641 sizeof(*tlv) + /* array struct */
7642 sizeof(*bitmap) + /* bitmap */
7643 sizeof(*tlv) + /* empty ipv4 sync */
7644 sizeof(*tlv) + /* empty ipv6 sync */
7645 sizeof(*tlv) + /* empty magic */
7646 sizeof(*tlv) + /* empty info timeout */
7647 sizeof(*tlv) + sizeof(u32); /* ratelimit interval */
7648
7649 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
7650 if (!skb)
7651 return -ENOMEM;
7652
7653 /* cmd */
7654 ptr = skb->data;
7655 cmd = ptr;
7656 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ADD_PATTERN_CMD,
7657 sizeof(*cmd));
7658 cmd->vdev_id = cpu_to_le32(vdev_id);
7659 cmd->pattern_id = cpu_to_le32(pattern_id);
7660 cmd->pattern_type = cpu_to_le32(WOW_BITMAP_PATTERN);
7661
7662 ptr += sizeof(*cmd);
7663
7664 /* bitmap */
7665 tlv = ptr;
7666 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, sizeof(*bitmap));
7667
7668 ptr += sizeof(*tlv);
7669
7670 bitmap = ptr;
7671 bitmap->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_BITMAP_PATTERN_T,
7672 sizeof(*bitmap));
7673 memcpy(bitmap->patternbuf, pattern, pattern_len);
7674 memcpy(bitmap->bitmaskbuf, mask, pattern_len);
7675 bitmap->pattern_offset = cpu_to_le32(pattern_offset);
7676 bitmap->pattern_len = cpu_to_le32(pattern_len);
7677 bitmap->bitmask_len = cpu_to_le32(pattern_len);
7678 bitmap->pattern_id = cpu_to_le32(pattern_id);
7679
7680 ptr += sizeof(*bitmap);
7681
7682 /* ipv4 sync */
7683 tlv = ptr;
7684 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
7685
7686 ptr += sizeof(*tlv);
7687
7688 /* ipv6 sync */
7689 tlv = ptr;
7690 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
7691
7692 ptr += sizeof(*tlv);
7693
7694 /* magic */
7695 tlv = ptr;
7696 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
7697
7698 ptr += sizeof(*tlv);
7699
7700 /* pattern info timeout */
7701 tlv = ptr;
7702 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0);
7703
7704 ptr += sizeof(*tlv);
7705
7706 /* ratelimit interval */
7707 tlv = ptr;
7708 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, sizeof(u32));
7709
7710 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow add pattern vdev_id %d pattern_id %d pattern_offset %d pattern_len %d\n",
7711 vdev_id, pattern_id, pattern_offset, pattern_len);
7712
7713 ath12k_dbg_dump(ar->ab, ATH12K_DBG_WMI, NULL, "wow pattern: ",
7714 bitmap->patternbuf, pattern_len);
7715 ath12k_dbg_dump(ar->ab, ATH12K_DBG_WMI, NULL, "wow bitmask: ",
7716 bitmap->bitmaskbuf, pattern_len);
7717
7718 return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ADD_WAKE_PATTERN_CMDID);
7719 }
7720
ath12k_wmi_wow_del_pattern(struct ath12k * ar,u32 vdev_id,u32 pattern_id)7721 int ath12k_wmi_wow_del_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id)
7722 {
7723 struct wmi_wow_del_pattern_cmd *cmd;
7724 struct sk_buff *skb;
7725 size_t len;
7726
7727 len = sizeof(*cmd);
7728 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
7729 if (!skb)
7730 return -ENOMEM;
7731
7732 cmd = (struct wmi_wow_del_pattern_cmd *)skb->data;
7733 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_DEL_PATTERN_CMD,
7734 sizeof(*cmd));
7735 cmd->vdev_id = cpu_to_le32(vdev_id);
7736 cmd->pattern_id = cpu_to_le32(pattern_id);
7737 cmd->pattern_type = cpu_to_le32(WOW_BITMAP_PATTERN);
7738
7739 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow del pattern vdev_id %d pattern_id %d\n",
7740 vdev_id, pattern_id);
7741
7742 return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_DEL_WAKE_PATTERN_CMDID);
7743 }
7744
7745 static struct sk_buff *
ath12k_wmi_op_gen_config_pno_start(struct ath12k * ar,u32 vdev_id,struct wmi_pno_scan_req_arg * pno)7746 ath12k_wmi_op_gen_config_pno_start(struct ath12k *ar, u32 vdev_id,
7747 struct wmi_pno_scan_req_arg *pno)
7748 {
7749 struct nlo_configured_params *nlo_list;
7750 size_t len, nlo_list_len, channel_list_len;
7751 struct wmi_wow_nlo_config_cmd *cmd;
7752 __le32 *channel_list;
7753 struct wmi_tlv *tlv;
7754 struct sk_buff *skb;
7755 void *ptr;
7756 u32 i;
7757
7758 len = sizeof(*cmd) +
7759 sizeof(*tlv) +
7760 /* TLV place holder for array of structures
7761 * nlo_configured_params(nlo_list)
7762 */
7763 sizeof(*tlv);
7764 /* TLV place holder for array of uint32 channel_list */
7765
7766 channel_list_len = sizeof(u32) * pno->a_networks[0].channel_count;
7767 len += channel_list_len;
7768
7769 nlo_list_len = sizeof(*nlo_list) * pno->uc_networks_count;
7770 len += nlo_list_len;
7771
7772 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
7773 if (!skb)
7774 return ERR_PTR(-ENOMEM);
7775
7776 ptr = skb->data;
7777 cmd = ptr;
7778 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NLO_CONFIG_CMD, sizeof(*cmd));
7779
7780 cmd->vdev_id = cpu_to_le32(pno->vdev_id);
7781 cmd->flags = cpu_to_le32(WMI_NLO_CONFIG_START | WMI_NLO_CONFIG_SSID_HIDE_EN);
7782
7783 /* current FW does not support min-max range for dwell time */
7784 cmd->active_dwell_time = cpu_to_le32(pno->active_max_time);
7785 cmd->passive_dwell_time = cpu_to_le32(pno->passive_max_time);
7786
7787 if (pno->do_passive_scan)
7788 cmd->flags |= cpu_to_le32(WMI_NLO_CONFIG_SCAN_PASSIVE);
7789
7790 cmd->fast_scan_period = cpu_to_le32(pno->fast_scan_period);
7791 cmd->slow_scan_period = cpu_to_le32(pno->slow_scan_period);
7792 cmd->fast_scan_max_cycles = cpu_to_le32(pno->fast_scan_max_cycles);
7793 cmd->delay_start_time = cpu_to_le32(pno->delay_start_time);
7794
7795 if (pno->enable_pno_scan_randomization) {
7796 cmd->flags |= cpu_to_le32(WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ |
7797 WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ);
7798 ether_addr_copy(cmd->mac_addr.addr, pno->mac_addr);
7799 ether_addr_copy(cmd->mac_mask.addr, pno->mac_addr_mask);
7800 }
7801
7802 ptr += sizeof(*cmd);
7803
7804 /* nlo_configured_params(nlo_list) */
7805 cmd->no_of_ssids = cpu_to_le32(pno->uc_networks_count);
7806 tlv = ptr;
7807 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, nlo_list_len);
7808
7809 ptr += sizeof(*tlv);
7810 nlo_list = ptr;
7811 for (i = 0; i < pno->uc_networks_count; i++) {
7812 tlv = (struct wmi_tlv *)(&nlo_list[i].tlv_header);
7813 tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_BYTE,
7814 sizeof(*nlo_list));
7815
7816 nlo_list[i].ssid.valid = cpu_to_le32(1);
7817 nlo_list[i].ssid.ssid.ssid_len =
7818 cpu_to_le32(pno->a_networks[i].ssid.ssid_len);
7819 memcpy(nlo_list[i].ssid.ssid.ssid,
7820 pno->a_networks[i].ssid.ssid,
7821 le32_to_cpu(nlo_list[i].ssid.ssid.ssid_len));
7822
7823 if (pno->a_networks[i].rssi_threshold &&
7824 pno->a_networks[i].rssi_threshold > -300) {
7825 nlo_list[i].rssi_cond.valid = cpu_to_le32(1);
7826 nlo_list[i].rssi_cond.rssi =
7827 cpu_to_le32(pno->a_networks[i].rssi_threshold);
7828 }
7829
7830 nlo_list[i].bcast_nw_type.valid = cpu_to_le32(1);
7831 nlo_list[i].bcast_nw_type.bcast_nw_type =
7832 cpu_to_le32(pno->a_networks[i].bcast_nw_type);
7833 }
7834
7835 ptr += nlo_list_len;
7836 cmd->num_of_channels = cpu_to_le32(pno->a_networks[0].channel_count);
7837 tlv = ptr;
7838 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, channel_list_len);
7839 ptr += sizeof(*tlv);
7840 channel_list = ptr;
7841
7842 for (i = 0; i < pno->a_networks[0].channel_count; i++)
7843 channel_list[i] = cpu_to_le32(pno->a_networks[0].channels[i]);
7844
7845 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv start pno config vdev_id %d\n",
7846 vdev_id);
7847
7848 return skb;
7849 }
7850
ath12k_wmi_op_gen_config_pno_stop(struct ath12k * ar,u32 vdev_id)7851 static struct sk_buff *ath12k_wmi_op_gen_config_pno_stop(struct ath12k *ar,
7852 u32 vdev_id)
7853 {
7854 struct wmi_wow_nlo_config_cmd *cmd;
7855 struct sk_buff *skb;
7856 size_t len;
7857
7858 len = sizeof(*cmd);
7859 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
7860 if (!skb)
7861 return ERR_PTR(-ENOMEM);
7862
7863 cmd = (struct wmi_wow_nlo_config_cmd *)skb->data;
7864 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NLO_CONFIG_CMD, len);
7865
7866 cmd->vdev_id = cpu_to_le32(vdev_id);
7867 cmd->flags = cpu_to_le32(WMI_NLO_CONFIG_STOP);
7868
7869 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
7870 "wmi tlv stop pno config vdev_id %d\n", vdev_id);
7871 return skb;
7872 }
7873
ath12k_wmi_wow_config_pno(struct ath12k * ar,u32 vdev_id,struct wmi_pno_scan_req_arg * pno_scan)7874 int ath12k_wmi_wow_config_pno(struct ath12k *ar, u32 vdev_id,
7875 struct wmi_pno_scan_req_arg *pno_scan)
7876 {
7877 struct sk_buff *skb;
7878
7879 if (pno_scan->enable)
7880 skb = ath12k_wmi_op_gen_config_pno_start(ar, vdev_id, pno_scan);
7881 else
7882 skb = ath12k_wmi_op_gen_config_pno_stop(ar, vdev_id);
7883
7884 if (IS_ERR_OR_NULL(skb))
7885 return -ENOMEM;
7886
7887 return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID);
7888 }
7889
ath12k_wmi_fill_ns_offload(struct ath12k * ar,struct wmi_arp_ns_offload_arg * offload,void ** ptr,bool enable,bool ext)7890 static void ath12k_wmi_fill_ns_offload(struct ath12k *ar,
7891 struct wmi_arp_ns_offload_arg *offload,
7892 void **ptr,
7893 bool enable,
7894 bool ext)
7895 {
7896 struct wmi_ns_offload_params *ns;
7897 struct wmi_tlv *tlv;
7898 void *buf_ptr = *ptr;
7899 u32 ns_cnt, ns_ext_tuples;
7900 int i, max_offloads;
7901
7902 ns_cnt = offload->ipv6_count;
7903
7904 tlv = buf_ptr;
7905
7906 if (ext) {
7907 ns_ext_tuples = offload->ipv6_count - WMI_MAX_NS_OFFLOADS;
7908 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
7909 ns_ext_tuples * sizeof(*ns));
7910 i = WMI_MAX_NS_OFFLOADS;
7911 max_offloads = offload->ipv6_count;
7912 } else {
7913 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
7914 WMI_MAX_NS_OFFLOADS * sizeof(*ns));
7915 i = 0;
7916 max_offloads = WMI_MAX_NS_OFFLOADS;
7917 }
7918
7919 buf_ptr += sizeof(*tlv);
7920
7921 for (; i < max_offloads; i++) {
7922 ns = buf_ptr;
7923 ns->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NS_OFFLOAD_TUPLE,
7924 sizeof(*ns));
7925
7926 if (enable) {
7927 if (i < ns_cnt)
7928 ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_VALID);
7929
7930 memcpy(ns->target_ipaddr[0], offload->ipv6_addr[i], 16);
7931 memcpy(ns->solicitation_ipaddr, offload->self_ipv6_addr[i], 16);
7932
7933 if (offload->ipv6_type[i])
7934 ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_IS_IPV6_ANYCAST);
7935
7936 memcpy(ns->target_mac.addr, offload->mac_addr, ETH_ALEN);
7937
7938 if (!is_zero_ether_addr(ns->target_mac.addr))
7939 ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_MAC_VALID);
7940
7941 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
7942 "wmi index %d ns_solicited %pI6 target %pI6",
7943 i, ns->solicitation_ipaddr,
7944 ns->target_ipaddr[0]);
7945 }
7946
7947 buf_ptr += sizeof(*ns);
7948 }
7949
7950 *ptr = buf_ptr;
7951 }
7952
ath12k_wmi_fill_arp_offload(struct ath12k * ar,struct wmi_arp_ns_offload_arg * offload,void ** ptr,bool enable)7953 static void ath12k_wmi_fill_arp_offload(struct ath12k *ar,
7954 struct wmi_arp_ns_offload_arg *offload,
7955 void **ptr,
7956 bool enable)
7957 {
7958 struct wmi_arp_offload_params *arp;
7959 struct wmi_tlv *tlv;
7960 void *buf_ptr = *ptr;
7961 int i;
7962
7963 /* fill arp tuple */
7964 tlv = buf_ptr;
7965 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
7966 WMI_MAX_ARP_OFFLOADS * sizeof(*arp));
7967 buf_ptr += sizeof(*tlv);
7968
7969 for (i = 0; i < WMI_MAX_ARP_OFFLOADS; i++) {
7970 arp = buf_ptr;
7971 arp->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARP_OFFLOAD_TUPLE,
7972 sizeof(*arp));
7973
7974 if (enable && i < offload->ipv4_count) {
7975 /* Copy the target ip addr and flags */
7976 arp->flags = cpu_to_le32(WMI_ARPOL_FLAGS_VALID);
7977 memcpy(arp->target_ipaddr, offload->ipv4_addr[i], 4);
7978
7979 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi arp offload address %pI4",
7980 arp->target_ipaddr);
7981 }
7982
7983 buf_ptr += sizeof(*arp);
7984 }
7985
7986 *ptr = buf_ptr;
7987 }
7988
ath12k_wmi_arp_ns_offload(struct ath12k * ar,struct ath12k_vif * arvif,struct wmi_arp_ns_offload_arg * offload,bool enable)7989 int ath12k_wmi_arp_ns_offload(struct ath12k *ar,
7990 struct ath12k_vif *arvif,
7991 struct wmi_arp_ns_offload_arg *offload,
7992 bool enable)
7993 {
7994 struct wmi_set_arp_ns_offload_cmd *cmd;
7995 struct wmi_tlv *tlv;
7996 struct sk_buff *skb;
7997 void *buf_ptr;
7998 size_t len;
7999 u8 ns_cnt, ns_ext_tuples = 0;
8000
8001 ns_cnt = offload->ipv6_count;
8002
8003 len = sizeof(*cmd) +
8004 sizeof(*tlv) +
8005 WMI_MAX_NS_OFFLOADS * sizeof(struct wmi_ns_offload_params) +
8006 sizeof(*tlv) +
8007 WMI_MAX_ARP_OFFLOADS * sizeof(struct wmi_arp_offload_params);
8008
8009 if (ns_cnt > WMI_MAX_NS_OFFLOADS) {
8010 ns_ext_tuples = ns_cnt - WMI_MAX_NS_OFFLOADS;
8011 len += sizeof(*tlv) +
8012 ns_ext_tuples * sizeof(struct wmi_ns_offload_params);
8013 }
8014
8015 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
8016 if (!skb)
8017 return -ENOMEM;
8018
8019 buf_ptr = skb->data;
8020 cmd = buf_ptr;
8021 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_ARP_NS_OFFLOAD_CMD,
8022 sizeof(*cmd));
8023 cmd->flags = cpu_to_le32(0);
8024 cmd->vdev_id = cpu_to_le32(arvif->vdev_id);
8025 cmd->num_ns_ext_tuples = cpu_to_le32(ns_ext_tuples);
8026
8027 buf_ptr += sizeof(*cmd);
8028
8029 ath12k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 0);
8030 ath12k_wmi_fill_arp_offload(ar, offload, &buf_ptr, enable);
8031
8032 if (ns_ext_tuples)
8033 ath12k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 1);
8034
8035 return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_SET_ARP_NS_OFFLOAD_CMDID);
8036 }
8037
ath12k_wmi_gtk_rekey_offload(struct ath12k * ar,struct ath12k_vif * arvif,bool enable)8038 int ath12k_wmi_gtk_rekey_offload(struct ath12k *ar,
8039 struct ath12k_vif *arvif, bool enable)
8040 {
8041 struct ath12k_rekey_data *rekey_data = &arvif->rekey_data;
8042 struct wmi_gtk_rekey_offload_cmd *cmd;
8043 struct sk_buff *skb;
8044 __le64 replay_ctr;
8045 int len;
8046
8047 len = sizeof(*cmd);
8048 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
8049 if (!skb)
8050 return -ENOMEM;
8051
8052 cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data;
8053 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_GTK_OFFLOAD_CMD, sizeof(*cmd));
8054 cmd->vdev_id = cpu_to_le32(arvif->vdev_id);
8055
8056 if (enable) {
8057 cmd->flags = cpu_to_le32(GTK_OFFLOAD_ENABLE_OPCODE);
8058
8059 /* the length in rekey_data and cmd is equal */
8060 memcpy(cmd->kck, rekey_data->kck, sizeof(cmd->kck));
8061 memcpy(cmd->kek, rekey_data->kek, sizeof(cmd->kek));
8062
8063 replay_ctr = cpu_to_le64(rekey_data->replay_ctr);
8064 memcpy(cmd->replay_ctr, &replay_ctr,
8065 sizeof(replay_ctr));
8066 } else {
8067 cmd->flags = cpu_to_le32(GTK_OFFLOAD_DISABLE_OPCODE);
8068 }
8069
8070 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "offload gtk rekey vdev: %d %d\n",
8071 arvif->vdev_id, enable);
8072 return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID);
8073 }
8074
ath12k_wmi_gtk_rekey_getinfo(struct ath12k * ar,struct ath12k_vif * arvif)8075 int ath12k_wmi_gtk_rekey_getinfo(struct ath12k *ar,
8076 struct ath12k_vif *arvif)
8077 {
8078 struct wmi_gtk_rekey_offload_cmd *cmd;
8079 struct sk_buff *skb;
8080 int len;
8081
8082 len = sizeof(*cmd);
8083 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
8084 if (!skb)
8085 return -ENOMEM;
8086
8087 cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data;
8088 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_GTK_OFFLOAD_CMD, sizeof(*cmd));
8089 cmd->vdev_id = cpu_to_le32(arvif->vdev_id);
8090 cmd->flags = cpu_to_le32(GTK_OFFLOAD_REQUEST_STATUS_OPCODE);
8091
8092 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "get gtk rekey vdev_id: %d\n",
8093 arvif->vdev_id);
8094 return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID);
8095 }
8096
ath12k_wmi_sta_keepalive(struct ath12k * ar,const struct wmi_sta_keepalive_arg * arg)8097 int ath12k_wmi_sta_keepalive(struct ath12k *ar,
8098 const struct wmi_sta_keepalive_arg *arg)
8099 {
8100 struct wmi_sta_keepalive_arp_resp_params *arp;
8101 struct ath12k_wmi_pdev *wmi = ar->wmi;
8102 struct wmi_sta_keepalive_cmd *cmd;
8103 struct sk_buff *skb;
8104 size_t len;
8105
8106 len = sizeof(*cmd) + sizeof(*arp);
8107 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
8108 if (!skb)
8109 return -ENOMEM;
8110
8111 cmd = (struct wmi_sta_keepalive_cmd *)skb->data;
8112 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_KEEPALIVE_CMD, sizeof(*cmd));
8113 cmd->vdev_id = cpu_to_le32(arg->vdev_id);
8114 cmd->enabled = cpu_to_le32(arg->enabled);
8115 cmd->interval = cpu_to_le32(arg->interval);
8116 cmd->method = cpu_to_le32(arg->method);
8117
8118 arp = (struct wmi_sta_keepalive_arp_resp_params *)(cmd + 1);
8119 arp->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_KEEPALVE_ARP_RESPONSE,
8120 sizeof(*arp));
8121 if (arg->method == WMI_STA_KEEPALIVE_METHOD_UNSOLICITED_ARP_RESPONSE ||
8122 arg->method == WMI_STA_KEEPALIVE_METHOD_GRATUITOUS_ARP_REQUEST) {
8123 arp->src_ip4_addr = cpu_to_le32(arg->src_ip4_addr);
8124 arp->dest_ip4_addr = cpu_to_le32(arg->dest_ip4_addr);
8125 ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr);
8126 }
8127
8128 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
8129 "wmi sta keepalive vdev %d enabled %d method %d interval %d\n",
8130 arg->vdev_id, arg->enabled, arg->method, arg->interval);
8131
8132 return ath12k_wmi_cmd_send(wmi, skb, WMI_STA_KEEPALIVE_CMDID);
8133 }
8134