1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Broadcom Dongle Host Driver (DHD), common DHD core.
4 *
5 * Copyright (C) 1999-2019, Broadcom.
6 *
7 * Unless you and Broadcom execute a separate written software license
8 * agreement governing use of this software, this software is licensed to you
9 * under the terms of the GNU General Public License version 2 (the "GPL"),
10 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11 * following added to such license:
12 *
13 * As a special exception, the copyright holders of this software give you
14 * permission to link this software with independent modules, and to copy and
15 * distribute the resulting executable under terms of your choice, provided that
16 * you also meet, for each linked independent module, the terms and conditions of
17 * the license of that module. An independent module is a module which is not
18 * derived from this software. The special exception does not apply to any
19 * modifications of the software.
20 *
21 * Notwithstanding the above, under no circumstances may you combine this
22 * software in any way with any other Broadcom software provided under a license
23 * other than the GPL, without Broadcom's express prior written consent.
24 *
25 *
26 * <<Broadcom-WL-IPTag/Open:>>
27 *
28 * $Id: dhd_common.c 826445 2019-06-20 04:47:47Z $
29 */
30 #include <typedefs.h>
31 #include <osl.h>
32
33 #include <epivers.h>
34 #include <bcmutils.h>
35 #include <bcmstdlib_s.h>
36
37 #include <bcmendian.h>
38 #include <dngl_stats.h>
39 #include <dhd.h>
40 #include <dhd_ip.h>
41 #include <bcmevent.h>
42 #include <dhdioctl.h>
43
44 #ifdef PCIE_FULL_DONGLE
45 #include <bcmmsgbuf.h>
46 #endif /* PCIE_FULL_DONGLE */
47
48 #ifdef SHOW_LOGTRACE
49 #include <event_log.h>
50 #endif /* SHOW_LOGTRACE */
51
52 #ifdef BCMPCIE
53 #include <dhd_flowring.h>
54 #endif // endif
55
56 #include <dhd_bus.h>
57 #include <dhd_proto.h>
58 #include <bcmsdbus.h>
59 #include <dhd_dbg.h>
60 #include <802.1d.h>
61 #include <dhd_debug.h>
62 #include <dhd_dbg_ring.h>
63 #include <dhd_mschdbg.h>
64 #include <msgtrace.h>
65 #include <dhd_config.h>
66 #include <wl_android.h>
67
68 #ifdef WL_CFG80211
69 #include <wl_cfg80211.h>
70 #endif // endif
71 #if defined(PNO_SUPPORT)
72 #include <dhd_pno.h>
73 #endif /* OEM_ANDROID && PNO_SUPPORT */
74 #ifdef RTT_SUPPORT
75 #include <dhd_rtt.h>
76 #endif // endif
77
78 #ifdef DNGL_EVENT_SUPPORT
79 #include <dnglevent.h>
80 #endif // endif
81
82 #define htod32(i) (i)
83 #define htod16(i) (i)
84 #define dtoh32(i) (i)
85 #define dtoh16(i) (i)
86 #define htodchanspec(i) (i)
87 #define dtohchanspec(i) (i)
88
89 #ifdef PROP_TXSTATUS
90 #include <wlfc_proto.h>
91 #include <dhd_wlfc.h>
92 #endif // endif
93
94 #if defined(DHD_POST_EAPOL_M1_AFTER_ROAM_EVT)
95 #include <dhd_linux.h>
96 #endif // endif
97
98 #ifdef DHD_L2_FILTER
99 #include <dhd_l2_filter.h>
100 #endif /* DHD_L2_FILTER */
101
102 #ifdef DHD_PSTA
103 #include <dhd_psta.h>
104 #endif /* DHD_PSTA */
105
106 #ifdef DHD_WET
107 #include <dhd_wet.h>
108 #endif /* DHD_WET */
109
110 #ifdef DHD_LOG_DUMP
111 #include <dhd_dbg.h>
112 #endif /* DHD_LOG_DUMP */
113
114 #ifdef DHD_LOG_PRINT_RATE_LIMIT
115 int log_print_threshold = 0;
116 #endif /* DHD_LOG_PRINT_RATE_LIMIT */
117 int dhd_msg_level = DHD_ERROR_VAL | DHD_FWLOG_VAL;// | DHD_EVENT_VAL
118 /* For CUSTOMER_HW4 do not enable DHD_IOVAR_MEM_VAL by default */
119 // | DHD_PKT_MON_VAL;
120
121 #if defined(WL_WIRELESS_EXT)
122 #include <wl_iw.h>
123 #endif // endif
124
125 #ifdef DHD_ULP
126 #include <dhd_ulp.h>
127 #endif /* DHD_ULP */
128
129 #ifdef DHD_DEBUG
130 #include <sdiovar.h>
131 #endif /* DHD_DEBUG */
132
133 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
134 #include <linux/pm_runtime.h>
135 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
136
137 #ifdef CSI_SUPPORT
138 #include <dhd_csi.h>
139 #endif /* CSI_SUPPORT */
140
141 #ifdef SOFTAP
142 char fw_path2[MOD_PARAM_PATHLEN];
143 extern bool softap_enabled;
144 #endif // endif
145
146 #ifdef SHOW_LOGTRACE
147 #define BYTES_AHEAD_NUM 10 /* address in map file is before these many bytes */
148 #define READ_NUM_BYTES 1000 /* read map file each time this No. of bytes */
149 #define GO_BACK_FILE_POS_NUM_BYTES 100 /* set file pos back to cur pos */
150 static char *ramstart_str = " text_start"; /* string in mapfile has addr ramstart */
151 static char *rodata_start_str = " rodata_start"; /* string in mapfile has addr rodata start */
152 static char *rodata_end_str = " rodata_end"; /* string in mapfile has addr rodata end */
153 #define RAMSTART_BIT 0x01
154 #define RDSTART_BIT 0x02
155 #define RDEND_BIT 0x04
156 #define ALL_MAP_VAL (RAMSTART_BIT | RDSTART_BIT | RDEND_BIT)
157 #endif /* SHOW_LOGTRACE */
158
159 #ifdef SHOW_LOGTRACE
160 /* the fw file path is taken from either the module parameter at
161 * insmod time or is defined as a constant of different values
162 * for different platforms
163 */
164 extern char *st_str_file_path;
165 #endif /* SHOW_LOGTRACE */
166
167 #define DHD_TPUT_MAX_TX_PKTS_BATCH 1000
168
169 #ifdef EWP_EDL
170 typedef struct msg_hdr_edl {
171 uint32 infobuf_ver;
172 info_buf_payload_hdr_t pyld_hdr;
173 msgtrace_hdr_t trace_hdr;
174 } msg_hdr_edl_t;
175 #endif /* EWP_EDL */
176
177 /* Last connection success/failure status */
178 uint32 dhd_conn_event;
179 uint32 dhd_conn_status;
180 uint32 dhd_conn_reason;
181
182 extern int dhd_iscan_request(void * dhdp, uint16 action);
183 extern void dhd_ind_scan_confirm(void *h, bool status);
184 extern int dhd_iscan_in_progress(void *h);
185 void dhd_iscan_lock(void);
186 void dhd_iscan_unlock(void);
187 extern int dhd_change_mtu(dhd_pub_t *dhd, int new_mtu, int ifidx);
188 #if !defined(AP) && defined(WLP2P)
189 extern int dhd_get_concurrent_capabilites(dhd_pub_t *dhd);
190 #endif // endif
191
192 extern int dhd_socram_dump(struct dhd_bus *bus);
193 extern void dhd_set_packet_filter(dhd_pub_t *dhd);
194
195 #ifdef DNGL_EVENT_SUPPORT
196 static void dngl_host_event_process(dhd_pub_t *dhdp, bcm_dngl_event_t *event,
197 bcm_dngl_event_msg_t *dngl_event, size_t pktlen);
198 static int dngl_host_event(dhd_pub_t *dhdp, void *pktdata, bcm_dngl_event_msg_t *dngl_event,
199 size_t pktlen);
200 #endif /* DNGL_EVENT_SUPPORT */
201
202 #define MAX_CHUNK_LEN 1408 /* 8 * 8 * 22 */
203
204 bool ap_cfg_running = FALSE;
205 bool ap_fw_loaded = FALSE;
206
207 /* Version string to report */
208 #ifdef DHD_DEBUG
209 #ifndef SRCBASE
210 #define SRCBASE "drivers/net/wireless/bcmdhd"
211 #endif // endif
212 #define DHD_COMPILED "\nCompiled in " SRCBASE
213 #endif /* DHD_DEBUG */
214
215 #define CHIPID_MISMATCH 8
216
217 #if defined(DHD_DEBUG)
218 const char dhd_version[] = "Dongle Host Driver, version " EPI_VERSION_STR;
219 #else
220 const char dhd_version[] = "\nDongle Host Driver, version " EPI_VERSION_STR;
221 #endif // endif
222 char fw_version[FW_VER_STR_LEN] = "\0";
223 char clm_version[CLM_VER_STR_LEN] = "\0";
224
225 char bus_api_revision[BUS_API_REV_STR_LEN] = "\0";
226
227 void dhd_set_timer(void *bus, uint wdtick);
228
229 static char* ioctl2str(uint32 ioctl);
230
231 /* IOVar table */
232 enum {
233 IOV_VERSION = 1,
234 IOV_WLMSGLEVEL,
235 IOV_MSGLEVEL,
236 IOV_BCMERRORSTR,
237 IOV_BCMERROR,
238 IOV_WDTICK,
239 IOV_DUMP,
240 IOV_CLEARCOUNTS,
241 IOV_LOGDUMP,
242 IOV_LOGCAL,
243 IOV_LOGSTAMP,
244 IOV_GPIOOB,
245 IOV_IOCTLTIMEOUT,
246 IOV_CONS,
247 IOV_DCONSOLE_POLL,
248 #if defined(DHD_DEBUG)
249 IOV_DHD_JOIN_TIMEOUT_DBG,
250 IOV_SCAN_TIMEOUT,
251 IOV_MEM_DEBUG,
252 #ifdef BCMPCIE
253 IOV_FLOW_RING_DEBUG,
254 #endif /* BCMPCIE */
255 #endif /* defined(DHD_DEBUG) */
256 #ifdef PROP_TXSTATUS
257 IOV_PROPTXSTATUS_ENABLE,
258 IOV_PROPTXSTATUS_MODE,
259 IOV_PROPTXSTATUS_OPT,
260 IOV_PROPTXSTATUS_MODULE_IGNORE,
261 IOV_PROPTXSTATUS_CREDIT_IGNORE,
262 IOV_PROPTXSTATUS_TXSTATUS_IGNORE,
263 IOV_PROPTXSTATUS_RXPKT_CHK,
264 #endif /* PROP_TXSTATUS */
265 IOV_BUS_TYPE,
266 IOV_CHANGEMTU,
267 IOV_HOSTREORDER_FLOWS,
268 #ifdef DHDTCPACK_SUPPRESS
269 IOV_TCPACK_SUPPRESS,
270 #endif /* DHDTCPACK_SUPPRESS */
271 IOV_AP_ISOLATE,
272 #ifdef DHD_L2_FILTER
273 IOV_DHCP_UNICAST,
274 IOV_BLOCK_PING,
275 IOV_PROXY_ARP,
276 IOV_GRAT_ARP,
277 IOV_BLOCK_TDLS,
278 #endif /* DHD_L2_FILTER */
279 IOV_DHD_IE,
280 #ifdef DHD_PSTA
281 IOV_PSTA,
282 #endif /* DHD_PSTA */
283 #ifdef DHD_WET
284 IOV_WET,
285 IOV_WET_HOST_IPV4,
286 IOV_WET_HOST_MAC,
287 #endif /* DHD_WET */
288 IOV_CFG80211_OPMODE,
289 IOV_ASSERT_TYPE,
290 IOV_LMTEST,
291 #ifdef DHD_MCAST_REGEN
292 IOV_MCAST_REGEN_BSS_ENABLE,
293 #endif // endif
294 #ifdef SHOW_LOGTRACE
295 IOV_DUMP_TRACE_LOG,
296 #endif /* SHOW_LOGTRACE */
297 IOV_DONGLE_TRAP_TYPE,
298 IOV_DONGLE_TRAP_INFO,
299 IOV_BPADDR,
300 IOV_DUMP_DONGLE, /**< dumps core registers and d11 memories */
301 #if defined(DHD_LOG_DUMP)
302 IOV_LOG_DUMP,
303 #endif /* DHD_LOG_DUMP */
304 IOV_TPUT_TEST,
305 IOV_FIS_TRIGGER,
306 IOV_DEBUG_BUF_DEST_STAT,
307 #ifdef DHD_DEBUG
308 IOV_INDUCE_ERROR,
309 #endif /* DHD_DEBUG */
310 #ifdef WL_IFACE_MGMT_CONF
311 #ifdef WL_CFG80211
312 #ifdef WL_NANP2P
313 IOV_CONC_DISC,
314 #endif /* WL_NANP2P */
315 #ifdef WL_IFACE_MGMT
316 IOV_IFACE_POLICY,
317 #endif /* WL_IFACE_MGMT */
318 #endif /* WL_CFG80211 */
319 #endif /* WL_IFACE_MGMT_CONF */
320 #ifdef RTT_GEOFENCE_CONT
321 #if defined(RTT_SUPPORT) && defined(WL_NAN)
322 IOV_RTT_GEOFENCE_TYPE_OVRD,
323 #endif /* RTT_SUPPORT && WL_NAN */
324 #endif /* RTT_GEOFENCE_CONT */
325 IOV_LAST
326 };
327
328 const bcm_iovar_t dhd_iovars[] = {
329 /* name varid flags flags2 type minlen */
330 {"version", IOV_VERSION, 0, 0, IOVT_BUFFER, sizeof(dhd_version)},
331 {"wlmsglevel", IOV_WLMSGLEVEL, 0, 0, IOVT_UINT32, 0 },
332 #ifdef DHD_DEBUG
333 {"msglevel", IOV_MSGLEVEL, 0, 0, IOVT_UINT32, 0},
334 {"mem_debug", IOV_MEM_DEBUG, 0, 0, IOVT_BUFFER, 0 },
335 #ifdef BCMPCIE
336 {"flow_ring_debug", IOV_FLOW_RING_DEBUG, 0, 0, IOVT_BUFFER, 0 },
337 #endif /* BCMPCIE */
338 #endif /* DHD_DEBUG */
339 {"bcmerrorstr", IOV_BCMERRORSTR, 0, 0, IOVT_BUFFER, BCME_STRLEN},
340 {"bcmerror", IOV_BCMERROR, 0, 0, IOVT_INT8, 0},
341 {"wdtick", IOV_WDTICK, 0, 0, IOVT_UINT32, 0},
342 {"dump", IOV_DUMP, 0, 0, IOVT_BUFFER, DHD_IOCTL_MAXLEN},
343 {"cons", IOV_CONS, 0, 0, IOVT_BUFFER, 0},
344 {"dconpoll", IOV_DCONSOLE_POLL, 0, 0, IOVT_UINT32, 0},
345 {"clearcounts", IOV_CLEARCOUNTS, 0, 0, IOVT_VOID, 0},
346 {"gpioob", IOV_GPIOOB, 0, 0, IOVT_UINT32, 0},
347 {"ioctl_timeout", IOV_IOCTLTIMEOUT, 0, 0, IOVT_UINT32, 0},
348 #ifdef PROP_TXSTATUS
349 {"proptx", IOV_PROPTXSTATUS_ENABLE, 0, 0, IOVT_BOOL, 0 },
350 /*
351 set the proptxtstatus operation mode:
352 0 - Do not do any proptxtstatus flow control
353 1 - Use implied credit from a packet status
354 2 - Use explicit credit
355 */
356 {"ptxmode", IOV_PROPTXSTATUS_MODE, 0, 0, IOVT_UINT32, 0 },
357 {"proptx_opt", IOV_PROPTXSTATUS_OPT, 0, 0, IOVT_UINT32, 0 },
358 {"pmodule_ignore", IOV_PROPTXSTATUS_MODULE_IGNORE, 0, 0, IOVT_BOOL, 0 },
359 {"pcredit_ignore", IOV_PROPTXSTATUS_CREDIT_IGNORE, 0, 0, IOVT_BOOL, 0 },
360 {"ptxstatus_ignore", IOV_PROPTXSTATUS_TXSTATUS_IGNORE, 0, 0, IOVT_BOOL, 0 },
361 {"rxpkt_chk", IOV_PROPTXSTATUS_RXPKT_CHK, 0, 0, IOVT_BOOL, 0 },
362 #endif /* PROP_TXSTATUS */
363 {"bustype", IOV_BUS_TYPE, 0, 0, IOVT_UINT32, 0},
364 {"changemtu", IOV_CHANGEMTU, 0, 0, IOVT_UINT32, 0 },
365 {"host_reorder_flows", IOV_HOSTREORDER_FLOWS, 0, 0, IOVT_BUFFER,
366 (WLHOST_REORDERDATA_MAXFLOWS + 1) },
367 #ifdef DHDTCPACK_SUPPRESS
368 {"tcpack_suppress", IOV_TCPACK_SUPPRESS, 0, 0, IOVT_UINT8, 0 },
369 #endif /* DHDTCPACK_SUPPRESS */
370 #ifdef DHD_L2_FILTER
371 {"dhcp_unicast", IOV_DHCP_UNICAST, (0), 0, IOVT_BOOL, 0 },
372 #endif /* DHD_L2_FILTER */
373 {"ap_isolate", IOV_AP_ISOLATE, (0), 0, IOVT_BOOL, 0},
374 #ifdef DHD_L2_FILTER
375 {"block_ping", IOV_BLOCK_PING, (0), 0, IOVT_BOOL, 0},
376 {"proxy_arp", IOV_PROXY_ARP, (0), 0, IOVT_BOOL, 0},
377 {"grat_arp", IOV_GRAT_ARP, (0), 0, IOVT_BOOL, 0},
378 {"block_tdls", IOV_BLOCK_TDLS, (0), IOVT_BOOL, 0},
379 #endif /* DHD_L2_FILTER */
380 {"dhd_ie", IOV_DHD_IE, (0), 0, IOVT_BUFFER, 0},
381 #ifdef DHD_PSTA
382 /* PSTA/PSR Mode configuration. 0: DIABLED 1: PSTA 2: PSR */
383 {"psta", IOV_PSTA, 0, 0, IOVT_UINT32, 0},
384 #endif /* DHD PSTA */
385 #ifdef DHD_WET
386 /* WET Mode configuration. 0: DIABLED 1: WET */
387 {"wet", IOV_WET, 0, 0, IOVT_UINT32, 0},
388 {"wet_host_ipv4", IOV_WET_HOST_IPV4, 0, 0, IOVT_UINT32, 0},
389 {"wet_host_mac", IOV_WET_HOST_MAC, 0, 0, IOVT_BUFFER, 0},
390 #endif /* DHD WET */
391 {"op_mode", IOV_CFG80211_OPMODE, 0, 0, IOVT_UINT32, 0 },
392 {"assert_type", IOV_ASSERT_TYPE, (0), 0, IOVT_UINT32, 0},
393 {"lmtest", IOV_LMTEST, 0, 0, IOVT_UINT32, 0 },
394 #ifdef DHD_MCAST_REGEN
395 {"mcast_regen_bss_enable", IOV_MCAST_REGEN_BSS_ENABLE, 0, 0, IOVT_BOOL, 0},
396 #endif // endif
397 #ifdef SHOW_LOGTRACE
398 {"dump_trace_buf", IOV_DUMP_TRACE_LOG, 0, 0, IOVT_BUFFER, sizeof(trace_buf_info_t) },
399 #endif /* SHOW_LOGTRACE */
400 {"trap_type", IOV_DONGLE_TRAP_TYPE, 0, 0, IOVT_UINT32, 0 },
401 {"trap_info", IOV_DONGLE_TRAP_INFO, 0, 0, IOVT_BUFFER, sizeof(trap_t) },
402 #ifdef DHD_DEBUG
403 {"bpaddr", IOV_BPADDR, 0, 0, IOVT_BUFFER, sizeof(sdreg_t) },
404 #endif /* DHD_DEBUG */
405 {"dump_dongle", IOV_DUMP_DONGLE, 0, 0, IOVT_BUFFER,
406 MAX(sizeof(dump_dongle_in_t), sizeof(dump_dongle_out_t)) },
407 #if defined(DHD_LOG_DUMP)
408 {"log_dump", IOV_LOG_DUMP, 0, 0, IOVT_UINT8, 0},
409 #endif /* DHD_LOG_DUMP */
410 {"debug_buf_dest_stat", IOV_DEBUG_BUF_DEST_STAT, 0, 0, IOVT_UINT32, 0 },
411 #ifdef DHD_DEBUG
412 {"induce_error", IOV_INDUCE_ERROR, (0), 0, IOVT_UINT16, 0 },
413 #endif /* DHD_DEBUG */
414 #ifdef WL_IFACE_MGMT_CONF
415 #ifdef WL_CFG80211
416 #ifdef WL_NANP2P
417 {"conc_disc", IOV_CONC_DISC, (0), 0, IOVT_UINT16, 0 },
418 #endif /* WL_NANP2P */
419 #ifdef WL_IFACE_MGMT
420 {"if_policy", IOV_IFACE_POLICY, (0), 0, IOVT_BUFFER, sizeof(iface_mgmt_data_t)},
421 #endif /* WL_IFACE_MGMT */
422 #endif /* WL_CFG80211 */
423 #endif /* WL_IFACE_MGMT_CONF */
424 #ifdef RTT_GEOFENCE_CONT
425 #if defined(RTT_SUPPORT) && defined(WL_NAN)
426 {"rtt_geofence_type_ovrd", IOV_RTT_GEOFENCE_TYPE_OVRD, (0), 0, IOVT_BOOL, 0},
427 #endif /* RTT_SUPPORT && WL_NAN */
428 #endif /* RTT_GEOFENCE_CONT */
429 {NULL, 0, 0, 0, 0, 0 }
430 };
431
432 #define DHD_IOVAR_BUF_SIZE 128
433
434 bool
dhd_query_bus_erros(dhd_pub_t * dhdp)435 dhd_query_bus_erros(dhd_pub_t *dhdp)
436 {
437 bool ret = FALSE;
438
439 if (dhdp->dongle_reset) {
440 DHD_ERROR_RLMT(("%s: Dongle Reset occurred, cannot proceed\n",
441 __FUNCTION__));
442 ret = TRUE;
443 }
444
445 if (dhdp->dongle_trap_occured) {
446 DHD_ERROR_RLMT(("%s: FW TRAP has occurred, cannot proceed\n",
447 __FUNCTION__));
448 ret = TRUE;
449 dhdp->hang_reason = HANG_REASON_DONGLE_TRAP;
450 dhd_os_send_hang_message(dhdp);
451 }
452
453 if (dhdp->iovar_timeout_occured) {
454 DHD_ERROR_RLMT(("%s: Resumed on timeout for previous IOVAR, cannot proceed\n",
455 __FUNCTION__));
456 ret = TRUE;
457 }
458
459 #ifdef PCIE_FULL_DONGLE
460 if (dhdp->d3ack_timeout_occured) {
461 DHD_ERROR_RLMT(("%s: Resumed on timeout for previous D3ACK, cannot proceed\n",
462 __FUNCTION__));
463 ret = TRUE;
464 }
465 if (dhdp->livelock_occured) {
466 DHD_ERROR_RLMT(("%s: LIVELOCK occurred for previous msg, cannot proceed\n",
467 __FUNCTION__));
468 ret = TRUE;
469 }
470
471 if (dhdp->pktid_audit_failed) {
472 DHD_ERROR_RLMT(("%s: pktid_audit_failed, cannot proceed\n",
473 __FUNCTION__));
474 ret = TRUE;
475 }
476 #endif /* PCIE_FULL_DONGLE */
477
478 if (dhdp->iface_op_failed) {
479 DHD_ERROR_RLMT(("%s: iface_op_failed, cannot proceed\n",
480 __FUNCTION__));
481 ret = TRUE;
482 }
483
484 if (dhdp->scan_timeout_occurred) {
485 DHD_ERROR_RLMT(("%s: scan_timeout_occurred, cannot proceed\n",
486 __FUNCTION__));
487 ret = TRUE;
488 }
489
490 if (dhdp->scan_busy_occurred) {
491 DHD_ERROR_RLMT(("%s: scan_busy_occurred, cannot proceed\n",
492 __FUNCTION__));
493 ret = TRUE;
494 }
495
496 #ifdef DNGL_AXI_ERROR_LOGGING
497 if (dhdp->axi_error) {
498 DHD_ERROR_RLMT(("%s: AXI error occurred, cannot proceed\n",
499 __FUNCTION__));
500 ret = TRUE;
501 }
502 #endif /* DNGL_AXI_ERROR_LOGGING */
503
504 if (dhd_bus_get_linkdown(dhdp)) {
505 DHD_ERROR_RLMT(("%s : PCIE Link down occurred, cannot proceed\n",
506 __FUNCTION__));
507 ret = TRUE;
508 }
509
510 if (dhd_bus_get_cto(dhdp)) {
511 DHD_ERROR_RLMT(("%s : CTO Recovery reported, cannot proceed\n",
512 __FUNCTION__));
513 ret = TRUE;
514 }
515
516 return ret;
517 }
518
519 void
dhd_clear_bus_errors(dhd_pub_t * dhdp)520 dhd_clear_bus_errors(dhd_pub_t *dhdp)
521 {
522 if (!dhdp)
523 return;
524
525 dhdp->dongle_reset = FALSE;
526 dhdp->dongle_trap_occured = FALSE;
527 dhdp->iovar_timeout_occured = FALSE;
528 #ifdef PCIE_FULL_DONGLE
529 dhdp->d3ack_timeout_occured = FALSE;
530 dhdp->livelock_occured = FALSE;
531 dhdp->pktid_audit_failed = FALSE;
532 #endif // endif
533 dhdp->iface_op_failed = FALSE;
534 dhdp->scan_timeout_occurred = FALSE;
535 dhdp->scan_busy_occurred = FALSE;
536 }
537
538 #ifdef DHD_SSSR_DUMP
539
540 /* This can be overwritten by module parameter defined in dhd_linux.c */
541 uint support_sssr_dump = TRUE;
542
543 int
dhd_sssr_mempool_init(dhd_pub_t * dhd)544 dhd_sssr_mempool_init(dhd_pub_t *dhd)
545 {
546 dhd->sssr_mempool = (uint8 *) MALLOCZ(dhd->osh, DHD_SSSR_MEMPOOL_SIZE);
547 if (dhd->sssr_mempool == NULL) {
548 DHD_ERROR(("%s: MALLOC of sssr_mempool failed\n",
549 __FUNCTION__));
550 return BCME_ERROR;
551 }
552 return BCME_OK;
553 }
554
555 void
dhd_sssr_mempool_deinit(dhd_pub_t * dhd)556 dhd_sssr_mempool_deinit(dhd_pub_t *dhd)
557 {
558 if (dhd->sssr_mempool) {
559 MFREE(dhd->osh, dhd->sssr_mempool, DHD_SSSR_MEMPOOL_SIZE);
560 dhd->sssr_mempool = NULL;
561 }
562 }
563
564 void
dhd_dump_sssr_reg_info(sssr_reg_info_v1_t * sssr_reg_info)565 dhd_dump_sssr_reg_info(sssr_reg_info_v1_t *sssr_reg_info)
566 {
567 }
568
569 int
dhd_get_sssr_reg_info(dhd_pub_t * dhd)570 dhd_get_sssr_reg_info(dhd_pub_t *dhd)
571 {
572 int ret;
573 /* get sssr_reg_info from firmware */
574 memset((void *)&dhd->sssr_reg_info, 0, sizeof(dhd->sssr_reg_info));
575 ret = dhd_iovar(dhd, 0, "sssr_reg_info", NULL, 0, (char *)&dhd->sssr_reg_info,
576 sizeof(dhd->sssr_reg_info), FALSE);
577 if (ret < 0) {
578 DHD_ERROR(("%s: sssr_reg_info failed (error=%d)\n",
579 __FUNCTION__, ret));
580 return BCME_ERROR;
581 }
582
583 dhd_dump_sssr_reg_info(&dhd->sssr_reg_info);
584 return BCME_OK;
585 }
586
587 uint32
dhd_get_sssr_bufsize(dhd_pub_t * dhd)588 dhd_get_sssr_bufsize(dhd_pub_t *dhd)
589 {
590 int i;
591 uint32 sssr_bufsize = 0;
592 /* Init all pointers to NULL */
593 for (i = 0; i < MAX_NUM_D11CORES; i++) {
594 sssr_bufsize += dhd->sssr_reg_info.mac_regs[i].sr_size;
595 }
596 sssr_bufsize += dhd->sssr_reg_info.vasip_regs.vasip_sr_size;
597
598 /* Double the size as different dumps will be saved before and after SR */
599 sssr_bufsize = 2 * sssr_bufsize;
600
601 return sssr_bufsize;
602 }
603
604 int
dhd_sssr_dump_init(dhd_pub_t * dhd)605 dhd_sssr_dump_init(dhd_pub_t *dhd)
606 {
607 int i;
608 uint32 sssr_bufsize;
609 uint32 mempool_used = 0;
610
611 dhd->sssr_inited = FALSE;
612
613 if (!support_sssr_dump) {
614 DHD_ERROR(("%s: sssr dump not inited as instructed by mod param\n", __FUNCTION__));
615 return BCME_OK;
616 }
617
618 /* check if sssr mempool is allocated */
619 if (dhd->sssr_mempool == NULL) {
620 DHD_ERROR(("%s: sssr_mempool is not allocated\n",
621 __FUNCTION__));
622 return BCME_ERROR;
623 }
624
625 /* Get SSSR reg info */
626 if (dhd_get_sssr_reg_info(dhd) != BCME_OK) {
627 DHD_ERROR(("%s: dhd_get_sssr_reg_info failed\n", __FUNCTION__));
628 return BCME_ERROR;
629 }
630
631 /* Validate structure version */
632 if (dhd->sssr_reg_info.version > SSSR_REG_INFO_VER_1) {
633 DHD_ERROR(("%s: dhd->sssr_reg_info.version (%d : %d) mismatch\n",
634 __FUNCTION__, (int)dhd->sssr_reg_info.version, SSSR_REG_INFO_VER));
635 return BCME_ERROR;
636 }
637
638 /* Validate structure length */
639 if (dhd->sssr_reg_info.length < sizeof(sssr_reg_info_v0_t)) {
640 DHD_ERROR(("%s: dhd->sssr_reg_info.length (%d : %d) mismatch\n",
641 __FUNCTION__, (int)dhd->sssr_reg_info.length,
642 (int)sizeof(dhd->sssr_reg_info)));
643 return BCME_ERROR;
644 }
645
646 /* validate fifo size */
647 sssr_bufsize = dhd_get_sssr_bufsize(dhd);
648 if (sssr_bufsize > DHD_SSSR_MEMPOOL_SIZE) {
649 DHD_ERROR(("%s: sssr_bufsize(%d) is greater than sssr_mempool(%d)\n",
650 __FUNCTION__, (int)sssr_bufsize, DHD_SSSR_MEMPOOL_SIZE));
651 return BCME_ERROR;
652 }
653
654 /* init all pointers to NULL */
655 for (i = 0; i < MAX_NUM_D11CORES; i++) {
656 dhd->sssr_d11_before[i] = NULL;
657 dhd->sssr_d11_after[i] = NULL;
658 }
659 dhd->sssr_dig_buf_before = NULL;
660 dhd->sssr_dig_buf_after = NULL;
661
662 /* Allocate memory */
663 for (i = 0; i < MAX_NUM_D11CORES; i++) {
664 if (dhd->sssr_reg_info.mac_regs[i].sr_size) {
665 dhd->sssr_d11_before[i] = (uint32 *)(dhd->sssr_mempool + mempool_used);
666 mempool_used += dhd->sssr_reg_info.mac_regs[i].sr_size;
667
668 dhd->sssr_d11_after[i] = (uint32 *)(dhd->sssr_mempool + mempool_used);
669 mempool_used += dhd->sssr_reg_info.mac_regs[i].sr_size;
670 }
671 }
672
673 if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) {
674 dhd->sssr_dig_buf_before = (uint32 *)(dhd->sssr_mempool + mempool_used);
675 mempool_used += dhd->sssr_reg_info.vasip_regs.vasip_sr_size;
676
677 dhd->sssr_dig_buf_after = (uint32 *)(dhd->sssr_mempool + mempool_used);
678 mempool_used += dhd->sssr_reg_info.vasip_regs.vasip_sr_size;
679 } else if ((dhd->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
680 dhd->sssr_reg_info.dig_mem_info.dig_sr_addr) {
681 dhd->sssr_dig_buf_before = (uint32 *)(dhd->sssr_mempool + mempool_used);
682 mempool_used += dhd->sssr_reg_info.dig_mem_info.dig_sr_size;
683
684 dhd->sssr_dig_buf_after = (uint32 *)(dhd->sssr_mempool + mempool_used);
685 mempool_used += dhd->sssr_reg_info.dig_mem_info.dig_sr_size;
686 }
687
688 dhd->sssr_inited = TRUE;
689
690 return BCME_OK;
691
692 }
693
694 void
dhd_sssr_dump_deinit(dhd_pub_t * dhd)695 dhd_sssr_dump_deinit(dhd_pub_t *dhd)
696 {
697 int i;
698
699 dhd->sssr_inited = FALSE;
700 /* init all pointers to NULL */
701 for (i = 0; i < MAX_NUM_D11CORES; i++) {
702 dhd->sssr_d11_before[i] = NULL;
703 dhd->sssr_d11_after[i] = NULL;
704 }
705 dhd->sssr_dig_buf_before = NULL;
706 dhd->sssr_dig_buf_after = NULL;
707
708 return;
709 }
710
711 void
dhd_sssr_print_filepath(dhd_pub_t * dhd,char * path)712 dhd_sssr_print_filepath(dhd_pub_t *dhd, char *path)
713 {
714 bool print_info = FALSE;
715 int dump_mode;
716
717 if (!dhd || !path) {
718 DHD_ERROR(("%s: dhd or memdump_path is NULL\n",
719 __FUNCTION__));
720 return;
721 }
722
723 if (!dhd->sssr_dump_collected) {
724 /* SSSR dump is not collected */
725 return;
726 }
727
728 dump_mode = dhd->sssr_dump_mode;
729
730 if (bcmstrstr(path, "core_0_before")) {
731 if (dhd->sssr_d11_outofreset[0] &&
732 dump_mode == SSSR_DUMP_MODE_SSSR) {
733 print_info = TRUE;
734 }
735 } else if (bcmstrstr(path, "core_0_after")) {
736 if (dhd->sssr_d11_outofreset[0]) {
737 print_info = TRUE;
738 }
739 } else if (bcmstrstr(path, "core_1_before")) {
740 if (dhd->sssr_d11_outofreset[1] &&
741 dump_mode == SSSR_DUMP_MODE_SSSR) {
742 print_info = TRUE;
743 }
744 } else if (bcmstrstr(path, "core_1_after")) {
745 if (dhd->sssr_d11_outofreset[1]) {
746 print_info = TRUE;
747 }
748 } else {
749 print_info = TRUE;
750 }
751
752 if (print_info) {
753 DHD_ERROR(("%s: file_path = %s%s\n", __FUNCTION__,
754 path, FILE_NAME_HAL_TAG));
755 }
756 }
757 #endif /* DHD_SSSR_DUMP */
758
759 #ifdef DHD_FW_COREDUMP
dhd_get_fwdump_buf(dhd_pub_t * dhd_pub,uint32 length)760 void* dhd_get_fwdump_buf(dhd_pub_t *dhd_pub, uint32 length)
761 {
762 if (!dhd_pub->soc_ram) {
763 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
764 dhd_pub->soc_ram = (uint8*)DHD_OS_PREALLOC(dhd_pub,
765 DHD_PREALLOC_MEMDUMP_RAM, length);
766 #else
767 dhd_pub->soc_ram = (uint8*) MALLOC(dhd_pub->osh, length);
768 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
769 }
770
771 if (dhd_pub->soc_ram == NULL) {
772 DHD_ERROR(("%s: Failed to allocate memory for fw crash snap shot.\n",
773 __FUNCTION__));
774 dhd_pub->soc_ram_length = 0;
775 } else {
776 memset(dhd_pub->soc_ram, 0, length);
777 dhd_pub->soc_ram_length = length;
778 }
779
780 /* soc_ram free handled in dhd_{free,clear} */
781 return dhd_pub->soc_ram;
782 }
783 #endif /* DHD_FW_COREDUMP */
784
785 /* to NDIS developer, the structure dhd_common is redundant,
786 * please do NOT merge it back from other branches !!!
787 */
788
789 int
dhd_common_socram_dump(dhd_pub_t * dhdp)790 dhd_common_socram_dump(dhd_pub_t *dhdp)
791 {
792 #ifdef BCMDBUS
793 return 0;
794 #else
795 return dhd_socram_dump(dhdp->bus);
796 #endif /* BCMDBUS */
797 }
798
799 int
dhd_dump(dhd_pub_t * dhdp,char * buf,int buflen)800 dhd_dump(dhd_pub_t *dhdp, char *buf, int buflen)
801 {
802 struct bcmstrbuf b;
803 struct bcmstrbuf *strbuf = &b;
804
805 if (!dhdp || !dhdp->prot || !buf) {
806 return BCME_ERROR;
807 }
808
809 bcm_binit(strbuf, buf, buflen);
810
811 /* Base DHD info */
812 bcm_bprintf(strbuf, "%s\n", dhd_version);
813 bcm_bprintf(strbuf, "\n");
814 bcm_bprintf(strbuf, "pub.up %d pub.txoff %d pub.busstate %d\n",
815 dhdp->up, dhdp->txoff, dhdp->busstate);
816 bcm_bprintf(strbuf, "pub.hdrlen %u pub.maxctl %u pub.rxsz %u\n",
817 dhdp->hdrlen, dhdp->maxctl, dhdp->rxsz);
818 bcm_bprintf(strbuf, "pub.iswl %d pub.drv_version %ld pub.mac "MACDBG"\n",
819 dhdp->iswl, dhdp->drv_version, MAC2STRDBG(&dhdp->mac));
820 bcm_bprintf(strbuf, "pub.bcmerror %d tickcnt %u\n", dhdp->bcmerror, dhdp->tickcnt);
821
822 bcm_bprintf(strbuf, "dongle stats:\n");
823 bcm_bprintf(strbuf, "tx_packets %lu tx_bytes %lu tx_errors %lu tx_dropped %lu\n",
824 dhdp->dstats.tx_packets, dhdp->dstats.tx_bytes,
825 dhdp->dstats.tx_errors, dhdp->dstats.tx_dropped);
826 bcm_bprintf(strbuf, "rx_packets %lu rx_bytes %lu rx_errors %lu rx_dropped %lu\n",
827 dhdp->dstats.rx_packets, dhdp->dstats.rx_bytes,
828 dhdp->dstats.rx_errors, dhdp->dstats.rx_dropped);
829 bcm_bprintf(strbuf, "multicast %lu\n", dhdp->dstats.multicast);
830
831 bcm_bprintf(strbuf, "bus stats:\n");
832 bcm_bprintf(strbuf, "tx_packets %lu tx_dropped %lu tx_multicast %lu tx_errors %lu\n",
833 dhdp->tx_packets, dhdp->tx_dropped, dhdp->tx_multicast, dhdp->tx_errors);
834 bcm_bprintf(strbuf, "tx_ctlpkts %lu tx_ctlerrs %lu\n",
835 dhdp->tx_ctlpkts, dhdp->tx_ctlerrs);
836 bcm_bprintf(strbuf, "rx_packets %lu rx_multicast %lu rx_errors %lu \n",
837 dhdp->rx_packets, dhdp->rx_multicast, dhdp->rx_errors);
838 bcm_bprintf(strbuf, "rx_ctlpkts %lu rx_ctlerrs %lu rx_dropped %lu\n",
839 dhdp->rx_ctlpkts, dhdp->rx_ctlerrs, dhdp->rx_dropped);
840 bcm_bprintf(strbuf, "rx_readahead_cnt %lu tx_realloc %lu\n",
841 dhdp->rx_readahead_cnt, dhdp->tx_realloc);
842 bcm_bprintf(strbuf, "tx_pktgetfail %lu rx_pktgetfail %lu\n",
843 dhdp->tx_pktgetfail, dhdp->rx_pktgetfail);
844 bcm_bprintf(strbuf, "tx_big_packets %lu\n",
845 dhdp->tx_big_packets);
846 bcm_bprintf(strbuf, "\n");
847 #ifdef DMAMAP_STATS
848 /* Add DMA MAP info */
849 bcm_bprintf(strbuf, "DMA MAP stats: \n");
850 bcm_bprintf(strbuf, "txdata: %lu size: %luK, rxdata: %lu size: %luK\n",
851 dhdp->dma_stats.txdata, KB(dhdp->dma_stats.txdata_sz),
852 dhdp->dma_stats.rxdata, KB(dhdp->dma_stats.rxdata_sz));
853 #ifndef IOCTLRESP_USE_CONSTMEM
854 bcm_bprintf(strbuf, "IOCTL RX: %lu size: %luK ,",
855 dhdp->dma_stats.ioctl_rx, KB(dhdp->dma_stats.ioctl_rx_sz));
856 #endif /* !IOCTLRESP_USE_CONSTMEM */
857 bcm_bprintf(strbuf, "EVENT RX: %lu size: %luK, INFO RX: %lu size: %luK, "
858 "TSBUF RX: %lu size %luK\n",
859 dhdp->dma_stats.event_rx, KB(dhdp->dma_stats.event_rx_sz),
860 dhdp->dma_stats.info_rx, KB(dhdp->dma_stats.info_rx_sz),
861 dhdp->dma_stats.tsbuf_rx, KB(dhdp->dma_stats.tsbuf_rx_sz));
862 bcm_bprintf(strbuf, "Total : %luK \n",
863 KB(dhdp->dma_stats.txdata_sz + dhdp->dma_stats.rxdata_sz +
864 dhdp->dma_stats.ioctl_rx_sz + dhdp->dma_stats.event_rx_sz +
865 dhdp->dma_stats.tsbuf_rx_sz));
866 #endif /* DMAMAP_STATS */
867 bcm_bprintf(strbuf, "dhd_induce_error : %u\n", dhdp->dhd_induce_error);
868 /* Add any prot info */
869 dhd_prot_dump(dhdp, strbuf);
870 bcm_bprintf(strbuf, "\n");
871
872 /* Add any bus info */
873 dhd_bus_dump(dhdp, strbuf);
874
875 #if defined(DHD_LB_STATS)
876 dhd_lb_stats_dump(dhdp, strbuf);
877 #endif /* DHD_LB_STATS */
878 #ifdef DHD_WET
879 if (dhd_get_wet_mode(dhdp)) {
880 bcm_bprintf(strbuf, "Wet Dump:\n");
881 dhd_wet_dump(dhdp, strbuf);
882 }
883 #endif /* DHD_WET */
884
885 /* return remaining buffer length */
886 return (!strbuf->size ? BCME_BUFTOOSHORT : strbuf->size);
887 }
888
889 void
dhd_dump_to_kernelog(dhd_pub_t * dhdp)890 dhd_dump_to_kernelog(dhd_pub_t *dhdp)
891 {
892 char buf[512];
893
894 DHD_ERROR(("F/W version: %s\n", fw_version));
895 bcm_bprintf_bypass = TRUE;
896 dhd_dump(dhdp, buf, sizeof(buf));
897 bcm_bprintf_bypass = FALSE;
898 }
899
900 int
dhd_wl_ioctl_cmd(dhd_pub_t * dhd_pub,int cmd,void * arg,int len,uint8 set,int ifidx)901 dhd_wl_ioctl_cmd(dhd_pub_t *dhd_pub, int cmd, void *arg, int len, uint8 set, int ifidx)
902 {
903 wl_ioctl_t ioc;
904
905 ioc.cmd = cmd;
906 ioc.buf = arg;
907 ioc.len = len;
908 ioc.set = set;
909
910 return dhd_wl_ioctl(dhd_pub, ifidx, &ioc, arg, len);
911 }
912
913 int
dhd_wl_ioctl_get_intiovar(dhd_pub_t * dhd_pub,char * name,uint * pval,int cmd,uint8 set,int ifidx)914 dhd_wl_ioctl_get_intiovar(dhd_pub_t *dhd_pub, char *name, uint *pval,
915 int cmd, uint8 set, int ifidx)
916 {
917 char iovbuf[WLC_IOCTL_SMLEN];
918 int ret = -1;
919
920 memset(iovbuf, 0, sizeof(iovbuf));
921 if (bcm_mkiovar(name, NULL, 0, iovbuf, sizeof(iovbuf))) {
922 ret = dhd_wl_ioctl_cmd(dhd_pub, cmd, iovbuf, sizeof(iovbuf), set, ifidx);
923 if (!ret) {
924 *pval = ltoh32(*((uint*)iovbuf));
925 } else {
926 DHD_ERROR(("%s: get int iovar %s failed, ERR %d\n",
927 __FUNCTION__, name, ret));
928 }
929 } else {
930 DHD_ERROR(("%s: mkiovar %s failed\n",
931 __FUNCTION__, name));
932 }
933
934 return ret;
935 }
936
937 int
dhd_wl_ioctl_set_intiovar(dhd_pub_t * dhd_pub,char * name,uint val,int cmd,uint8 set,int ifidx)938 dhd_wl_ioctl_set_intiovar(dhd_pub_t *dhd_pub, char *name, uint val,
939 int cmd, uint8 set, int ifidx)
940 {
941 char iovbuf[WLC_IOCTL_SMLEN];
942 int ret = -1;
943 int lval = htol32(val);
944 uint len;
945
946 len = bcm_mkiovar(name, (char*)&lval, sizeof(lval), iovbuf, sizeof(iovbuf));
947
948 if (len) {
949 ret = dhd_wl_ioctl_cmd(dhd_pub, cmd, iovbuf, len, set, ifidx);
950 if (ret) {
951 DHD_ERROR(("%s: set int iovar %s failed, ERR %d\n",
952 __FUNCTION__, name, ret));
953 }
954 } else {
955 DHD_ERROR(("%s: mkiovar %s failed\n",
956 __FUNCTION__, name));
957 }
958
959 return ret;
960 }
961
962 static struct ioctl2str_s {
963 uint32 ioctl;
964 char *name;
965 } ioctl2str_array[] = {
966 {WLC_UP, "UP"},
967 {WLC_DOWN, "DOWN"},
968 {WLC_SET_PROMISC, "SET_PROMISC"},
969 {WLC_SET_INFRA, "SET_INFRA"},
970 {WLC_SET_AUTH, "SET_AUTH"},
971 {WLC_SET_SSID, "SET_SSID"},
972 {WLC_RESTART, "RESTART"},
973 {WLC_SET_CHANNEL, "SET_CHANNEL"},
974 {WLC_SET_RATE_PARAMS, "SET_RATE_PARAMS"},
975 {WLC_SET_KEY, "SET_KEY"},
976 {WLC_SCAN, "SCAN"},
977 {WLC_DISASSOC, "DISASSOC"},
978 {WLC_REASSOC, "REASSOC"},
979 {WLC_SET_COUNTRY, "SET_COUNTRY"},
980 {WLC_SET_WAKE, "SET_WAKE"},
981 {WLC_SET_SCANSUPPRESS, "SET_SCANSUPPRESS"},
982 {WLC_SCB_DEAUTHORIZE, "SCB_DEAUTHORIZE"},
983 {WLC_SET_WSEC, "SET_WSEC"},
984 {WLC_SET_INTERFERENCE_MODE, "SET_INTERFERENCE_MODE"},
985 {WLC_SET_RADAR, "SET_RADAR"},
986 {0, NULL}
987 };
988
989 static char *
ioctl2str(uint32 ioctl)990 ioctl2str(uint32 ioctl)
991 {
992 struct ioctl2str_s *p = ioctl2str_array;
993
994 while (p->name != NULL) {
995 if (p->ioctl == ioctl) {
996 return p->name;
997 }
998 p++;
999 }
1000
1001 return "";
1002 }
1003
1004 /**
1005 * @param ioc IO control struct, members are partially used by this function.
1006 * @param buf [inout] Contains parameters to send to dongle, contains dongle response on return.
1007 * @param len Maximum number of bytes that dongle is allowed to write into 'buf'.
1008 */
1009 int
dhd_wl_ioctl(dhd_pub_t * dhd_pub,int ifidx,wl_ioctl_t * ioc,void * buf,int len)1010 dhd_wl_ioctl(dhd_pub_t *dhd_pub, int ifidx, wl_ioctl_t *ioc, void *buf, int len)
1011 {
1012 int ret = BCME_ERROR;
1013 unsigned long flags;
1014 #ifdef DUMP_IOCTL_IOV_LIST
1015 dhd_iov_li_t *iov_li;
1016 #endif /* DUMP_IOCTL_IOV_LIST */
1017 int hostsleep_set = 0;
1018 int hostsleep_val = 0;
1019
1020 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
1021 DHD_OS_WAKE_LOCK(dhd_pub);
1022 if (pm_runtime_get_sync(dhd_bus_to_dev(dhd_pub->bus)) < 0) {
1023 DHD_RPM(("%s: pm_runtime_get_sync error. \n", __FUNCTION__));
1024 DHD_OS_WAKE_UNLOCK(dhd_pub);
1025 return BCME_ERROR;
1026 }
1027 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
1028
1029 #ifdef KEEPIF_ON_DEVICE_RESET
1030 if (ioc->cmd == WLC_GET_VAR) {
1031 dbus_config_t config;
1032 config.general_param = 0;
1033 if (buf) {
1034 if (!strcmp(buf, "wowl_activate")) {
1035 /* 1 (TRUE) after decreased by 1 */
1036 config.general_param = 2;
1037 } else if (!strcmp(buf, "wowl_clear")) {
1038 /* 0 (FALSE) after decreased by 1 */
1039 config.general_param = 1;
1040 }
1041 }
1042 if (config.general_param) {
1043 config.config_id = DBUS_CONFIG_ID_KEEPIF_ON_DEVRESET;
1044 config.general_param--;
1045 dbus_set_config(dhd_pub->dbus, &config);
1046 }
1047 }
1048 #endif /* KEEPIF_ON_DEVICE_RESET */
1049
1050 if (dhd_os_proto_block(dhd_pub))
1051 {
1052 #ifdef DHD_LOG_DUMP
1053 int slen, val, lval, min_len;
1054 char *msg, tmp[64];
1055
1056 /* WLC_GET_VAR */
1057 if (ioc->cmd == WLC_GET_VAR && buf) {
1058 min_len = MIN(sizeof(tmp) - 1, strlen(buf));
1059 memset(tmp, 0, sizeof(tmp));
1060 bcopy(buf, tmp, min_len);
1061 tmp[min_len] = '\0';
1062 }
1063 #endif /* DHD_LOG_DUMP */
1064
1065 #ifdef DHD_DISCONNECT_TRACE
1066 if ((WLC_DISASSOC == ioc->cmd) || (WLC_DOWN == ioc->cmd) ||
1067 (WLC_DISASSOC_MYAP == ioc->cmd)) {
1068 DHD_ERROR(("IOCTL Disconnect WiFi: %d\n", ioc->cmd));
1069 }
1070 #endif /* HW_DISCONNECT_TRACE */
1071
1072 /* logging of iovars that are send to the dongle, ./dhd msglevel +iovar */
1073 if (ioc->set == TRUE) {
1074 char *pars = (char *)buf; // points at user buffer
1075 if (ioc->cmd == WLC_SET_VAR && buf) {
1076 DHD_DNGL_IOVAR_SET(("iovar:%d: set %s", ifidx, pars));
1077 if (ioc->len > 1 + sizeof(uint32)) {
1078 // skip iovar name:
1079 pars += strnlen(pars, ioc->len - 1 - sizeof(uint32));
1080 pars++; // skip NULL character
1081 }
1082 } else {
1083 DHD_DNGL_IOVAR_SET(("ioctl:%d: set %d %s",
1084 ifidx, ioc->cmd, ioctl2str(ioc->cmd)));
1085 }
1086 if (pars != NULL) {
1087 DHD_DNGL_IOVAR_SET((" 0x%x\n", *(uint32*)pars));
1088 } else {
1089 DHD_DNGL_IOVAR_SET((" NULL\n"));
1090 }
1091 }
1092
1093 DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
1094 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd_pub)) {
1095 DHD_INFO(("%s: returning as busstate=%d\n",
1096 __FUNCTION__, dhd_pub->busstate));
1097 DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
1098 dhd_os_proto_unblock(dhd_pub);
1099 return -ENODEV;
1100 }
1101 DHD_BUS_BUSY_SET_IN_IOVAR(dhd_pub);
1102 DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
1103
1104 DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
1105 if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhd_pub)) {
1106 DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
1107 __FUNCTION__, dhd_pub->busstate, dhd_pub->dhd_bus_busy_state));
1108 DHD_BUS_BUSY_CLEAR_IN_IOVAR(dhd_pub);
1109 dhd_os_busbusy_wake(dhd_pub);
1110 DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
1111 dhd_os_proto_unblock(dhd_pub);
1112 return -ENODEV;
1113 }
1114 DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
1115
1116 #ifdef DUMP_IOCTL_IOV_LIST
1117 if (ioc->cmd != WLC_GET_MAGIC && ioc->cmd != WLC_GET_VERSION && buf) {
1118 if (!(iov_li = MALLOC(dhd_pub->osh, sizeof(*iov_li)))) {
1119 DHD_ERROR(("iovar dump list item allocation Failed\n"));
1120 } else {
1121 iov_li->cmd = ioc->cmd;
1122 if (buf)
1123 bcopy((char *)buf, iov_li->buff, strlen((char *)buf)+1);
1124 dhd_iov_li_append(dhd_pub, &dhd_pub->dump_iovlist_head,
1125 &iov_li->list);
1126 }
1127 }
1128 #endif /* DUMP_IOCTL_IOV_LIST */
1129
1130 if (dhd_conf_check_hostsleep(dhd_pub, ioc->cmd, ioc->buf, len,
1131 &hostsleep_set, &hostsleep_val, &ret))
1132 goto exit;
1133 ret = dhd_prot_ioctl(dhd_pub, ifidx, ioc, buf, len);
1134 dhd_conf_get_hostsleep(dhd_pub, hostsleep_set, hostsleep_val, ret);
1135
1136 #ifdef DUMP_IOCTL_IOV_LIST
1137 if (ret == -ETIMEDOUT) {
1138 DHD_ERROR(("Last %d issued commands: Latest one is at bottom.\n",
1139 IOV_LIST_MAX_LEN));
1140 dhd_iov_li_print(&dhd_pub->dump_iovlist_head);
1141 }
1142 #endif /* DUMP_IOCTL_IOV_LIST */
1143 #ifdef DHD_LOG_DUMP
1144 if ((ioc->cmd == WLC_GET_VAR || ioc->cmd == WLC_SET_VAR) &&
1145 buf != NULL) {
1146 if (buf) {
1147 lval = 0;
1148 slen = strlen(buf) + 1;
1149 msg = (char*)buf;
1150 if (len >= slen + sizeof(lval)) {
1151 if (ioc->cmd == WLC_GET_VAR) {
1152 msg = tmp;
1153 lval = *(int*)buf;
1154 } else {
1155 min_len = MIN(ioc->len - slen, sizeof(int));
1156 bcopy((msg + slen), &lval, min_len);
1157 }
1158 if (!strncmp(msg, "cur_etheraddr",
1159 strlen("cur_etheraddr"))) {
1160 lval = 0;
1161 }
1162 }
1163 DHD_IOVAR_MEM((
1164 "%s: cmd: %d, msg: %s val: 0x%x,"
1165 " len: %d, set: %d, txn-id: %d\n",
1166 ioc->cmd == WLC_GET_VAR ?
1167 "WLC_GET_VAR" : "WLC_SET_VAR",
1168 ioc->cmd, msg, lval, ioc->len, ioc->set,
1169 dhd_prot_get_ioctl_trans_id(dhd_pub)));
1170 } else {
1171 DHD_IOVAR_MEM(("%s: cmd: %d, len: %d, set: %d, txn-id: %d\n",
1172 ioc->cmd == WLC_GET_VAR ? "WLC_GET_VAR" : "WLC_SET_VAR",
1173 ioc->cmd, ioc->len, ioc->set,
1174 dhd_prot_get_ioctl_trans_id(dhd_pub)));
1175 }
1176 } else {
1177 slen = ioc->len;
1178 if (buf != NULL && slen != 0) {
1179 if (slen >= 4) {
1180 val = *(int*)buf;
1181 } else if (slen >= 2) {
1182 val = *(short*)buf;
1183 } else {
1184 val = *(char*)buf;
1185 }
1186 /* Do not dump for WLC_GET_MAGIC and WLC_GET_VERSION */
1187 if (ioc->cmd != WLC_GET_MAGIC && ioc->cmd != WLC_GET_VERSION)
1188 DHD_IOVAR_MEM(("WLC_IOCTL: cmd: %d, val: %d, len: %d, "
1189 "set: %d\n", ioc->cmd, val, ioc->len, ioc->set));
1190 } else {
1191 DHD_IOVAR_MEM(("WLC_IOCTL: cmd: %d, buf is NULL\n", ioc->cmd));
1192 }
1193 }
1194 #endif /* DHD_LOG_DUMP */
1195 if (ret && dhd_pub->up) {
1196 /* Send hang event only if dhd_open() was success */
1197 dhd_os_check_hang(dhd_pub, ifidx, ret);
1198 }
1199
1200 if (ret == -ETIMEDOUT && !dhd_pub->up) {
1201 DHD_ERROR(("%s: 'resumed on timeout' error is "
1202 "occurred before the interface does not"
1203 " bring up\n", __FUNCTION__));
1204 }
1205
1206 exit:
1207 DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
1208 DHD_BUS_BUSY_CLEAR_IN_IOVAR(dhd_pub);
1209 dhd_os_busbusy_wake(dhd_pub);
1210 DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
1211
1212 dhd_os_proto_unblock(dhd_pub);
1213
1214 }
1215
1216 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
1217 pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd_pub->bus));
1218 pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd_pub->bus));
1219
1220 DHD_OS_WAKE_UNLOCK(dhd_pub);
1221 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
1222
1223 #ifdef WL_MONITOR
1224 /* Intercept monitor ioctl here, add/del monitor if */
1225 if (ret == BCME_OK && ioc->cmd == WLC_SET_MONITOR) {
1226 int val = 0;
1227 if (buf != NULL && len != 0) {
1228 if (len >= 4) {
1229 val = *(int*)buf;
1230 } else if (len >= 2) {
1231 val = *(short*)buf;
1232 } else {
1233 val = *(char*)buf;
1234 }
1235 }
1236 dhd_set_monitor(dhd_pub, ifidx, val);
1237 }
1238 #endif /* WL_MONITOR */
1239
1240 return ret;
1241 }
1242
wl_get_port_num(wl_io_pport_t * io_pport)1243 uint wl_get_port_num(wl_io_pport_t *io_pport)
1244 {
1245 return 0;
1246 }
1247
1248 /* Get bssidx from iovar params
1249 * Input: dhd_pub - pointer to dhd_pub_t
1250 * params - IOVAR params
1251 * Output: idx - BSS index
1252 * val - ponter to the IOVAR arguments
1253 */
1254 static int
dhd_iovar_parse_bssidx(dhd_pub_t * dhd_pub,const char * params,uint32 * idx,const char ** val)1255 dhd_iovar_parse_bssidx(dhd_pub_t *dhd_pub, const char *params, uint32 *idx, const char **val)
1256 {
1257 char *prefix = "bsscfg:";
1258 uint32 bssidx;
1259
1260 if (!(strncmp(params, prefix, strlen(prefix)))) {
1261 /* per bss setting should be prefixed with 'bsscfg:' */
1262 const char *p = params + strlen(prefix);
1263
1264 /* Skip Name */
1265 while (*p != '\0')
1266 p++;
1267 /* consider null */
1268 p = p + 1;
1269 bcopy(p, &bssidx, sizeof(uint32));
1270 /* Get corresponding dhd index */
1271 bssidx = dhd_bssidx2idx(dhd_pub, htod32(bssidx));
1272
1273 if (bssidx >= DHD_MAX_IFS) {
1274 DHD_ERROR(("%s Wrong bssidx provided\n", __FUNCTION__));
1275 return BCME_ERROR;
1276 }
1277
1278 /* skip bss idx */
1279 p += sizeof(uint32);
1280 *val = p;
1281 *idx = bssidx;
1282 } else {
1283 DHD_ERROR(("%s: bad parameter for per bss iovar\n", __FUNCTION__));
1284 return BCME_ERROR;
1285 }
1286
1287 return BCME_OK;
1288 }
1289
1290 #if defined(DHD_DEBUG) && defined(BCMDBUS)
1291 /* USB Device console input function */
dhd_bus_console_in(dhd_pub_t * dhd,uchar * msg,uint msglen)1292 int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen)
1293 {
1294 DHD_TRACE(("%s \n", __FUNCTION__));
1295
1296 return dhd_iovar(dhd, 0, "cons", msg, msglen, NULL, 0, TRUE);
1297
1298 }
1299 #endif /* DHD_DEBUG && BCMDBUS */
1300
1301 #ifdef DHD_DEBUG
1302 int
dhd_mem_debug(dhd_pub_t * dhd,uchar * msg,uint msglen)1303 dhd_mem_debug(dhd_pub_t *dhd, uchar *msg, uint msglen)
1304 {
1305 unsigned long int_arg = 0;
1306 char *p;
1307 char *end_ptr = NULL;
1308 dhd_dbg_mwli_t *mw_li;
1309 dll_t *item, *next;
1310 /* check if mwalloc, mwquery or mwfree was supplied arguement with space */
1311 p = bcmstrstr((char *)msg, " ");
1312 if (p != NULL) {
1313 /* space should be converted to null as separation flag for firmware */
1314 *p = '\0';
1315 /* store the argument in int_arg */
1316 int_arg = bcm_strtoul(p+1, &end_ptr, 10);
1317 }
1318
1319 if (!p && !strcmp(msg, "query")) {
1320 /* lets query the list inetrnally */
1321 if (dll_empty(dll_head_p(&dhd->mw_list_head))) {
1322 DHD_ERROR(("memwaste list is empty, call mwalloc < size > to allocate\n"));
1323 } else {
1324 for (item = dll_head_p(&dhd->mw_list_head);
1325 !dll_end(&dhd->mw_list_head, item); item = next) {
1326 next = dll_next_p(item);
1327 mw_li = (dhd_dbg_mwli_t *)CONTAINEROF(item, dhd_dbg_mwli_t, list);
1328 DHD_ERROR(("item: <id=%d, size=%d>\n", mw_li->id, mw_li->size));
1329 }
1330 }
1331 } else if (p && end_ptr && (*end_ptr == '\0') && !strcmp(msg, "alloc")) {
1332 int32 alloc_handle;
1333 /* convert size into KB and append as integer */
1334 *((int32 *)(p+1)) = int_arg*1024;
1335 *(p+1+sizeof(int32)) = '\0';
1336
1337 /* recalculated length -> 5 bytes for "alloc" + 4 bytes for size +
1338 * 1 bytes for null caracter
1339 */
1340 msglen = strlen(msg) + sizeof(int32) + 1;
1341 if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, msg, msglen+1, FALSE, 0) < 0) {
1342 DHD_ERROR(("IOCTL failed for memdebug alloc\n"));
1343 }
1344
1345 /* returned allocated handle from dongle, basically address of the allocated unit */
1346 alloc_handle = *((int32 *)msg);
1347
1348 /* add a node in the list with tuple <id, handle, size> */
1349 if (alloc_handle == 0) {
1350 DHD_ERROR(("Reuqested size could not be allocated\n"));
1351 } else if (!(mw_li = MALLOC(dhd->osh, sizeof(*mw_li)))) {
1352 DHD_ERROR(("mw list item allocation Failed\n"));
1353 } else {
1354 mw_li->id = dhd->mw_id++;
1355 mw_li->handle = alloc_handle;
1356 mw_li->size = int_arg;
1357 /* append the node in the list */
1358 dll_append(&dhd->mw_list_head, &mw_li->list);
1359 }
1360 } else if (p && end_ptr && (*end_ptr == '\0') && !strcmp(msg, "free")) {
1361 /* inform dongle to free wasted chunk */
1362 int handle = 0;
1363 int size = 0;
1364 for (item = dll_head_p(&dhd->mw_list_head);
1365 !dll_end(&dhd->mw_list_head, item); item = next) {
1366 next = dll_next_p(item);
1367 mw_li = (dhd_dbg_mwli_t *)CONTAINEROF(item, dhd_dbg_mwli_t, list);
1368
1369 if (mw_li->id == (int)int_arg) {
1370 handle = mw_li->handle;
1371 size = mw_li->size;
1372 dll_delete(item);
1373 MFREE(dhd->osh, mw_li, sizeof(*mw_li));
1374 if (dll_empty(dll_head_p(&dhd->mw_list_head))) {
1375 /* reset the id */
1376 dhd->mw_id = 0;
1377 }
1378 }
1379 }
1380 if (handle) {
1381 int len;
1382 /* append the free handle and the chunk size in first 8 bytes
1383 * after the command and null character
1384 */
1385 *((int32 *)(p+1)) = handle;
1386 *((int32 *)((p+1)+sizeof(int32))) = size;
1387 /* append null as terminator */
1388 *(p+1+2*sizeof(int32)) = '\0';
1389 /* recalculated length -> 4 bytes for "free" + 8 bytes for hadnle and size
1390 * + 1 bytes for null caracter
1391 */
1392 len = strlen(msg) + 2*sizeof(int32) + 1;
1393 /* send iovar to free the chunk */
1394 if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, msg, len, FALSE, 0) < 0) {
1395 DHD_ERROR(("IOCTL failed for memdebug free\n"));
1396 }
1397 } else {
1398 DHD_ERROR(("specified id does not exist\n"));
1399 }
1400 } else {
1401 /* for all the wrong argument formats */
1402 return BCME_BADARG;
1403 }
1404 return 0;
1405 }
1406 extern void
dhd_mw_list_delete(dhd_pub_t * dhd,dll_t * list_head)1407 dhd_mw_list_delete(dhd_pub_t *dhd, dll_t *list_head)
1408 {
1409 dll_t *item;
1410 dhd_dbg_mwli_t *mw_li;
1411 while (!(dll_empty(list_head))) {
1412 item = dll_head_p(list_head);
1413 mw_li = (dhd_dbg_mwli_t *)CONTAINEROF(item, dhd_dbg_mwli_t, list);
1414 dll_delete(item);
1415 MFREE(dhd->osh, mw_li, sizeof(*mw_li));
1416 }
1417 }
1418 #ifdef BCMPCIE
1419 int
dhd_flow_ring_debug(dhd_pub_t * dhd,char * msg,uint msglen)1420 dhd_flow_ring_debug(dhd_pub_t *dhd, char *msg, uint msglen)
1421 {
1422 flow_ring_table_t *flow_ring_table;
1423 char *cmd;
1424 char *end_ptr = NULL;
1425 uint8 prio;
1426 uint16 flowid;
1427 int i;
1428 int ret = 0;
1429 cmd = bcmstrstr(msg, " ");
1430 BCM_REFERENCE(prio);
1431 if (cmd != NULL) {
1432 /* in order to use string operations append null */
1433 *cmd = '\0';
1434 } else {
1435 DHD_ERROR(("missing: create/delete args\n"));
1436 return BCME_ERROR;
1437 }
1438 if (cmd && !strcmp(msg, "create")) {
1439 /* extract <"source address", "destination address", "priority"> */
1440 uint8 sa[ETHER_ADDR_LEN], da[ETHER_ADDR_LEN];
1441 BCM_REFERENCE(sa);
1442 BCM_REFERENCE(da);
1443 msg = msg + strlen("create") + 1;
1444 /* fill ethernet source address */
1445 for (i = 0; i < ETHER_ADDR_LEN; i++) {
1446 sa[i] = (uint8)bcm_strtoul(msg, &end_ptr, 16);
1447 if (*end_ptr == ':') {
1448 msg = (end_ptr + 1);
1449 } else if (i != 5) {
1450 DHD_ERROR(("not a valid source mac addr\n"));
1451 return BCME_ERROR;
1452 }
1453 }
1454 if (*end_ptr != ' ') {
1455 DHD_ERROR(("missing: destiantion mac id\n"));
1456 return BCME_ERROR;
1457 } else {
1458 /* skip space */
1459 msg = end_ptr + 1;
1460 }
1461 /* fill ethernet destination address */
1462 for (i = 0; i < ETHER_ADDR_LEN; i++) {
1463 da[i] = (uint8)bcm_strtoul(msg, &end_ptr, 16);
1464 if (*end_ptr == ':') {
1465 msg = (end_ptr + 1);
1466 } else if (i != 5) {
1467 DHD_ERROR(("not a valid destination mac addr\n"));
1468 return BCME_ERROR;
1469 }
1470 }
1471 if (*end_ptr != ' ') {
1472 DHD_ERROR(("missing: priority\n"));
1473 return BCME_ERROR;
1474 } else {
1475 msg = end_ptr + 1;
1476 }
1477 /* parse priority */
1478 prio = (uint8)bcm_strtoul(msg, &end_ptr, 10);
1479 if (prio > MAXPRIO) {
1480 DHD_ERROR(("%s: invalid priority. Must be between 0-7 inclusive\n",
1481 __FUNCTION__));
1482 return BCME_ERROR;
1483 }
1484
1485 if (*end_ptr != '\0') {
1486 DHD_ERROR(("msg not truncated with NULL character\n"));
1487 return BCME_ERROR;
1488 }
1489 ret = dhd_flowid_debug_create(dhd, 0, prio, (char *)sa, (char *)da, &flowid);
1490 if (ret != BCME_OK) {
1491 DHD_ERROR(("%s: flowring creation failed ret: %d\n", __FUNCTION__, ret));
1492 return BCME_ERROR;
1493 }
1494 return BCME_OK;
1495
1496 } else if (cmd && !strcmp(msg, "delete")) {
1497 msg = msg + strlen("delete") + 1;
1498 /* parse flowid */
1499 flowid = (uint16)bcm_strtoul(msg, &end_ptr, 10);
1500 if (*end_ptr != '\0') {
1501 DHD_ERROR(("msg not truncated with NULL character\n"));
1502 return BCME_ERROR;
1503 }
1504
1505 /* Find flowid from ifidx 0 since this IOVAR creating flowring with ifidx 0 */
1506 if (dhd_flowid_find_by_ifidx(dhd, 0, flowid) != BCME_OK)
1507 {
1508 DHD_ERROR(("%s : Deleting not created flowid: %u\n", __FUNCTION__, flowid));
1509 return BCME_ERROR;
1510 }
1511
1512 flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
1513 ret = dhd_bus_flow_ring_delete_request(dhd->bus, (void *)&flow_ring_table[flowid]);
1514 if (ret != BCME_OK) {
1515 DHD_ERROR(("%s: flowring deletion failed ret: %d\n", __FUNCTION__, ret));
1516 return BCME_ERROR;
1517 }
1518 return BCME_OK;
1519 }
1520 DHD_ERROR(("%s: neither create nor delete\n", __FUNCTION__));
1521 return BCME_ERROR;
1522 }
1523 #endif /* BCMPCIE */
1524 #endif /* DHD_DEBUG */
1525
1526 static int
dhd_doiovar(dhd_pub_t * dhd_pub,const bcm_iovar_t * vi,uint32 actionid,const char * name,void * params,int plen,void * arg,int len,int val_size)1527 dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const char *name,
1528 void *params, int plen, void *arg, int len, int val_size)
1529 {
1530 int bcmerror = 0;
1531 int32 int_val = 0;
1532 uint32 dhd_ver_len, bus_api_rev_len;
1533
1534 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
1535 DHD_TRACE(("%s: actionid = %d; name %s\n", __FUNCTION__, actionid, name));
1536
1537 if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0)
1538 goto exit;
1539
1540 if (plen >= (int)sizeof(int_val))
1541 bcopy(params, &int_val, sizeof(int_val));
1542
1543 switch (actionid) {
1544 case IOV_GVAL(IOV_VERSION):
1545 /* Need to have checked buffer length */
1546 dhd_ver_len = strlen(dhd_version);
1547 bus_api_rev_len = strlen(bus_api_revision);
1548 if (dhd_ver_len)
1549 bcm_strncpy_s((char*)arg, dhd_ver_len, dhd_version, dhd_ver_len);
1550 if (bus_api_rev_len)
1551 bcm_strncat_s((char*)arg + dhd_ver_len, bus_api_rev_len, bus_api_revision,
1552 bus_api_rev_len);
1553 #if defined(BCMSDIO) && defined(PKT_STATICS)
1554 dhd_bus_clear_txpktstatics(dhd_pub->bus);
1555 #endif
1556 break;
1557
1558 case IOV_GVAL(IOV_WLMSGLEVEL):
1559 printf("android_msg_level=0x%x\n", android_msg_level);
1560 printf("config_msg_level=0x%x\n", config_msg_level);
1561 #if defined(WL_WIRELESS_EXT)
1562 int_val = (int32)iw_msg_level;
1563 bcopy(&int_val, arg, val_size);
1564 printf("iw_msg_level=0x%x\n", iw_msg_level);
1565 #endif
1566 #ifdef WL_CFG80211
1567 int_val = (int32)wl_dbg_level;
1568 bcopy(&int_val, arg, val_size);
1569 printf("cfg_msg_level=0x%x\n", wl_dbg_level);
1570 #endif
1571 break;
1572
1573 case IOV_SVAL(IOV_WLMSGLEVEL):
1574 if (int_val & DHD_ANDROID_VAL) {
1575 android_msg_level = (uint)(int_val & 0xFFFF);
1576 printf("android_msg_level=0x%x\n", android_msg_level);
1577 }
1578 if (int_val & DHD_CONFIG_VAL) {
1579 config_msg_level = (uint)(int_val & 0xFFFF);
1580 printf("config_msg_level=0x%x\n", config_msg_level);
1581 }
1582 #if defined(WL_WIRELESS_EXT)
1583 if (int_val & DHD_IW_VAL) {
1584 iw_msg_level = (uint)(int_val & 0xFFFF);
1585 printf("iw_msg_level=0x%x\n", iw_msg_level);
1586 }
1587 #endif
1588 #ifdef WL_CFG80211
1589 if (int_val & DHD_CFG_VAL) {
1590 wl_cfg80211_enable_trace((u32)(int_val & 0xFFFF));
1591 }
1592 #endif
1593 break;
1594
1595 case IOV_GVAL(IOV_MSGLEVEL):
1596 int_val = (int32)dhd_msg_level;
1597 bcopy(&int_val, arg, val_size);
1598 #if defined(BCMSDIO) && defined(PKT_STATICS)
1599 dhd_bus_dump_txpktstatics(dhd_pub->bus);
1600 #endif
1601 break;
1602
1603 case IOV_SVAL(IOV_MSGLEVEL):
1604 dhd_msg_level = int_val;
1605 break;
1606
1607 case IOV_GVAL(IOV_BCMERRORSTR):
1608 bcm_strncpy_s((char *)arg, len, bcmerrorstr(dhd_pub->bcmerror), BCME_STRLEN);
1609 ((char *)arg)[BCME_STRLEN - 1] = 0x00;
1610 break;
1611
1612 case IOV_GVAL(IOV_BCMERROR):
1613 int_val = (int32)dhd_pub->bcmerror;
1614 bcopy(&int_val, arg, val_size);
1615 break;
1616
1617 #ifndef BCMDBUS
1618 case IOV_GVAL(IOV_WDTICK):
1619 int_val = (int32)dhd_watchdog_ms;
1620 bcopy(&int_val, arg, val_size);
1621 break;
1622 #endif /* !BCMDBUS */
1623
1624 case IOV_SVAL(IOV_WDTICK):
1625 if (!dhd_pub->up) {
1626 bcmerror = BCME_NOTUP;
1627 break;
1628 }
1629
1630 dhd_watchdog_ms = (uint)int_val;
1631
1632 dhd_os_wd_timer(dhd_pub, (uint)int_val);
1633 break;
1634
1635 case IOV_GVAL(IOV_DUMP):
1636 if (dhd_dump(dhd_pub, arg, len) <= 0)
1637 bcmerror = BCME_ERROR;
1638 else
1639 bcmerror = BCME_OK;
1640 break;
1641
1642 #ifndef BCMDBUS
1643 case IOV_GVAL(IOV_DCONSOLE_POLL):
1644 int_val = (int32)dhd_pub->dhd_console_ms;
1645 bcopy(&int_val, arg, val_size);
1646 break;
1647
1648 case IOV_SVAL(IOV_DCONSOLE_POLL):
1649 dhd_pub->dhd_console_ms = (uint)int_val;
1650 break;
1651
1652 #if defined(DHD_DEBUG)
1653 case IOV_SVAL(IOV_CONS):
1654 if (len > 0) {
1655 #ifdef CONSOLE_DPC
1656 bcmerror = dhd_bus_txcons(dhd_pub, arg, len - 1);
1657 #else
1658 bcmerror = dhd_bus_console_in(dhd_pub, arg, len - 1);
1659 #endif
1660 }
1661 break;
1662 #endif /* DHD_DEBUG */
1663 #endif /* !BCMDBUS */
1664
1665 case IOV_SVAL(IOV_CLEARCOUNTS):
1666 dhd_pub->tx_packets = dhd_pub->rx_packets = 0;
1667 dhd_pub->tx_errors = dhd_pub->rx_errors = 0;
1668 dhd_pub->tx_ctlpkts = dhd_pub->rx_ctlpkts = 0;
1669 dhd_pub->tx_ctlerrs = dhd_pub->rx_ctlerrs = 0;
1670 dhd_pub->tx_dropped = 0;
1671 dhd_pub->rx_dropped = 0;
1672 dhd_pub->tx_pktgetfail = 0;
1673 dhd_pub->rx_pktgetfail = 0;
1674 dhd_pub->rx_readahead_cnt = 0;
1675 dhd_pub->tx_realloc = 0;
1676 dhd_pub->wd_dpc_sched = 0;
1677 dhd_pub->tx_big_packets = 0;
1678 memset(&dhd_pub->dstats, 0, sizeof(dhd_pub->dstats));
1679 dhd_bus_clearcounts(dhd_pub);
1680 #ifdef PROP_TXSTATUS
1681 /* clear proptxstatus related counters */
1682 dhd_wlfc_clear_counts(dhd_pub);
1683 #endif /* PROP_TXSTATUS */
1684 #if defined(DHD_LB_STATS)
1685 DHD_LB_STATS_RESET(dhd_pub);
1686 #endif /* DHD_LB_STATS */
1687 break;
1688
1689 case IOV_GVAL(IOV_IOCTLTIMEOUT): {
1690 int_val = (int32)dhd_os_get_ioctl_resp_timeout();
1691 bcopy(&int_val, arg, sizeof(int_val));
1692 break;
1693 }
1694
1695 case IOV_SVAL(IOV_IOCTLTIMEOUT): {
1696 if (int_val <= 0)
1697 bcmerror = BCME_BADARG;
1698 else
1699 dhd_os_set_ioctl_resp_timeout((unsigned int)int_val);
1700 break;
1701 }
1702
1703 #ifdef PROP_TXSTATUS
1704 case IOV_GVAL(IOV_PROPTXSTATUS_ENABLE): {
1705 bool wlfc_enab = FALSE;
1706 bcmerror = dhd_wlfc_get_enable(dhd_pub, &wlfc_enab);
1707 if (bcmerror != BCME_OK)
1708 goto exit;
1709 int_val = wlfc_enab ? 1 : 0;
1710 bcopy(&int_val, arg, val_size);
1711 break;
1712 }
1713 case IOV_SVAL(IOV_PROPTXSTATUS_ENABLE): {
1714 bool wlfc_enab = FALSE;
1715 bcmerror = dhd_wlfc_get_enable(dhd_pub, &wlfc_enab);
1716 if (bcmerror != BCME_OK)
1717 goto exit;
1718
1719 /* wlfc is already set as desired */
1720 if (wlfc_enab == (int_val == 0 ? FALSE : TRUE))
1721 goto exit;
1722
1723 if (int_val == TRUE)
1724 bcmerror = dhd_wlfc_init(dhd_pub);
1725 else
1726 bcmerror = dhd_wlfc_deinit(dhd_pub);
1727
1728 break;
1729 }
1730 case IOV_GVAL(IOV_PROPTXSTATUS_MODE):
1731 bcmerror = dhd_wlfc_get_mode(dhd_pub, &int_val);
1732 if (bcmerror != BCME_OK)
1733 goto exit;
1734 bcopy(&int_val, arg, val_size);
1735 break;
1736
1737 case IOV_SVAL(IOV_PROPTXSTATUS_MODE):
1738 dhd_wlfc_set_mode(dhd_pub, int_val);
1739 break;
1740
1741 case IOV_GVAL(IOV_PROPTXSTATUS_MODULE_IGNORE):
1742 bcmerror = dhd_wlfc_get_module_ignore(dhd_pub, &int_val);
1743 if (bcmerror != BCME_OK)
1744 goto exit;
1745 bcopy(&int_val, arg, val_size);
1746 break;
1747
1748 case IOV_SVAL(IOV_PROPTXSTATUS_MODULE_IGNORE):
1749 dhd_wlfc_set_module_ignore(dhd_pub, int_val);
1750 break;
1751
1752 case IOV_GVAL(IOV_PROPTXSTATUS_CREDIT_IGNORE):
1753 bcmerror = dhd_wlfc_get_credit_ignore(dhd_pub, &int_val);
1754 if (bcmerror != BCME_OK)
1755 goto exit;
1756 bcopy(&int_val, arg, val_size);
1757 break;
1758
1759 case IOV_SVAL(IOV_PROPTXSTATUS_CREDIT_IGNORE):
1760 dhd_wlfc_set_credit_ignore(dhd_pub, int_val);
1761 break;
1762
1763 case IOV_GVAL(IOV_PROPTXSTATUS_TXSTATUS_IGNORE):
1764 bcmerror = dhd_wlfc_get_txstatus_ignore(dhd_pub, &int_val);
1765 if (bcmerror != BCME_OK)
1766 goto exit;
1767 bcopy(&int_val, arg, val_size);
1768 break;
1769
1770 case IOV_SVAL(IOV_PROPTXSTATUS_TXSTATUS_IGNORE):
1771 dhd_wlfc_set_txstatus_ignore(dhd_pub, int_val);
1772 break;
1773
1774 case IOV_GVAL(IOV_PROPTXSTATUS_RXPKT_CHK):
1775 bcmerror = dhd_wlfc_get_rxpkt_chk(dhd_pub, &int_val);
1776 if (bcmerror != BCME_OK)
1777 goto exit;
1778 bcopy(&int_val, arg, val_size);
1779 break;
1780
1781 case IOV_SVAL(IOV_PROPTXSTATUS_RXPKT_CHK):
1782 dhd_wlfc_set_rxpkt_chk(dhd_pub, int_val);
1783 break;
1784
1785 #endif /* PROP_TXSTATUS */
1786
1787 case IOV_GVAL(IOV_BUS_TYPE):
1788 /* The dhd application queries the driver to check if its usb or sdio. */
1789 #ifdef BCMDBUS
1790 int_val = BUS_TYPE_USB;
1791 #endif // endif
1792 #ifdef BCMSDIO
1793 int_val = BUS_TYPE_SDIO;
1794 #endif // endif
1795 #ifdef PCIE_FULL_DONGLE
1796 int_val = BUS_TYPE_PCIE;
1797 #endif // endif
1798 bcopy(&int_val, arg, val_size);
1799 break;
1800
1801 case IOV_SVAL(IOV_CHANGEMTU):
1802 int_val &= 0xffff;
1803 bcmerror = dhd_change_mtu(dhd_pub, int_val, 0);
1804 break;
1805
1806 case IOV_GVAL(IOV_HOSTREORDER_FLOWS):
1807 {
1808 uint i = 0;
1809 uint8 *ptr = (uint8 *)arg;
1810 uint8 count = 0;
1811
1812 ptr++;
1813 for (i = 0; i < WLHOST_REORDERDATA_MAXFLOWS; i++) {
1814 if (dhd_pub->reorder_bufs[i] != NULL) {
1815 *ptr = dhd_pub->reorder_bufs[i]->flow_id;
1816 ptr++;
1817 count++;
1818 }
1819 }
1820 ptr = (uint8 *)arg;
1821 *ptr = count;
1822 break;
1823 }
1824 #ifdef DHDTCPACK_SUPPRESS
1825 case IOV_GVAL(IOV_TCPACK_SUPPRESS): {
1826 int_val = (uint32)dhd_pub->tcpack_sup_mode;
1827 bcopy(&int_val, arg, val_size);
1828 break;
1829 }
1830 case IOV_SVAL(IOV_TCPACK_SUPPRESS): {
1831 bcmerror = dhd_tcpack_suppress_set(dhd_pub, (uint8)int_val);
1832 break;
1833 }
1834 #endif /* DHDTCPACK_SUPPRESS */
1835
1836 #ifdef DHD_L2_FILTER
1837 case IOV_GVAL(IOV_DHCP_UNICAST): {
1838 uint32 bssidx;
1839 const char *val;
1840 if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
1841 DHD_ERROR(("%s: IOV_DHCP_UNICAST: bad parameterand name = %s\n",
1842 __FUNCTION__, name));
1843 bcmerror = BCME_BADARG;
1844 break;
1845 }
1846 int_val = dhd_get_dhcp_unicast_status(dhd_pub, bssidx);
1847 memcpy(arg, &int_val, val_size);
1848 break;
1849 }
1850 case IOV_SVAL(IOV_DHCP_UNICAST): {
1851 uint32 bssidx;
1852 const char *val;
1853 if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
1854 DHD_ERROR(("%s: IOV_DHCP_UNICAST: bad parameterand name = %s\n",
1855 __FUNCTION__, name));
1856 bcmerror = BCME_BADARG;
1857 break;
1858 }
1859 memcpy(&int_val, val, sizeof(int_val));
1860 bcmerror = dhd_set_dhcp_unicast_status(dhd_pub, bssidx, int_val ? 1 : 0);
1861 break;
1862 }
1863 case IOV_GVAL(IOV_BLOCK_PING): {
1864 uint32 bssidx;
1865 const char *val;
1866
1867 if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
1868 DHD_ERROR(("%s: IOV_BLOCK_PING: bad parameter\n", __FUNCTION__));
1869 bcmerror = BCME_BADARG;
1870 break;
1871 }
1872 int_val = dhd_get_block_ping_status(dhd_pub, bssidx);
1873 memcpy(arg, &int_val, val_size);
1874 break;
1875 }
1876 case IOV_SVAL(IOV_BLOCK_PING): {
1877 uint32 bssidx;
1878 const char *val;
1879
1880 if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
1881 DHD_ERROR(("%s: IOV_BLOCK_PING: bad parameter\n", __FUNCTION__));
1882 bcmerror = BCME_BADARG;
1883 break;
1884 }
1885 memcpy(&int_val, val, sizeof(int_val));
1886 bcmerror = dhd_set_block_ping_status(dhd_pub, bssidx, int_val ? 1 : 0);
1887 break;
1888 }
1889 case IOV_GVAL(IOV_PROXY_ARP): {
1890 uint32 bssidx;
1891 const char *val;
1892
1893 if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
1894 DHD_ERROR(("%s: IOV_PROXY_ARP: bad parameter\n", __FUNCTION__));
1895 bcmerror = BCME_BADARG;
1896 break;
1897 }
1898 int_val = dhd_get_parp_status(dhd_pub, bssidx);
1899 bcopy(&int_val, arg, val_size);
1900 break;
1901 }
1902 case IOV_SVAL(IOV_PROXY_ARP): {
1903 uint32 bssidx;
1904 const char *val;
1905
1906 if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
1907 DHD_ERROR(("%s: IOV_PROXY_ARP: bad parameter\n", __FUNCTION__));
1908 bcmerror = BCME_BADARG;
1909 break;
1910 }
1911 bcopy(val, &int_val, sizeof(int_val));
1912
1913 /* Issue a iovar request to WL to update the proxy arp capability bit
1914 * in the Extended Capability IE of beacons/probe responses.
1915 */
1916 bcmerror = dhd_iovar(dhd_pub, bssidx, "proxy_arp_advertise", val, sizeof(int_val),
1917 NULL, 0, TRUE);
1918 if (bcmerror == BCME_OK) {
1919 dhd_set_parp_status(dhd_pub, bssidx, int_val ? 1 : 0);
1920 }
1921 break;
1922 }
1923 case IOV_GVAL(IOV_GRAT_ARP): {
1924 uint32 bssidx;
1925 const char *val;
1926
1927 if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
1928 DHD_ERROR(("%s: IOV_GRAT_ARP: bad parameter\n", __FUNCTION__));
1929 bcmerror = BCME_BADARG;
1930 break;
1931 }
1932 int_val = dhd_get_grat_arp_status(dhd_pub, bssidx);
1933 memcpy(arg, &int_val, val_size);
1934 break;
1935 }
1936 case IOV_SVAL(IOV_GRAT_ARP): {
1937 uint32 bssidx;
1938 const char *val;
1939
1940 if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
1941 DHD_ERROR(("%s: IOV_GRAT_ARP: bad parameter\n", __FUNCTION__));
1942 bcmerror = BCME_BADARG;
1943 break;
1944 }
1945 memcpy(&int_val, val, sizeof(int_val));
1946 bcmerror = dhd_set_grat_arp_status(dhd_pub, bssidx, int_val ? 1 : 0);
1947 break;
1948 }
1949 case IOV_GVAL(IOV_BLOCK_TDLS): {
1950 uint32 bssidx;
1951 const char *val;
1952
1953 if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
1954 DHD_ERROR(("%s: IOV_BLOCK_TDLS: bad parameter\n", __FUNCTION__));
1955 bcmerror = BCME_BADARG;
1956 break;
1957 }
1958 int_val = dhd_get_block_tdls_status(dhd_pub, bssidx);
1959 memcpy(arg, &int_val, val_size);
1960 break;
1961 }
1962 case IOV_SVAL(IOV_BLOCK_TDLS): {
1963 uint32 bssidx;
1964 const char *val;
1965
1966 if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
1967 DHD_ERROR(("%s: IOV_BLOCK_TDLS: bad parameter\n", __FUNCTION__));
1968 bcmerror = BCME_BADARG;
1969 break;
1970 }
1971 memcpy(&int_val, val, sizeof(int_val));
1972 bcmerror = dhd_set_block_tdls_status(dhd_pub, bssidx, int_val ? 1 : 0);
1973 break;
1974 }
1975 #endif /* DHD_L2_FILTER */
1976 case IOV_SVAL(IOV_DHD_IE): {
1977 uint32 bssidx;
1978 const char *val;
1979
1980 if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
1981 DHD_ERROR(("%s: dhd ie: bad parameter\n", __FUNCTION__));
1982 bcmerror = BCME_BADARG;
1983 break;
1984 }
1985
1986 break;
1987 }
1988 case IOV_GVAL(IOV_AP_ISOLATE): {
1989 uint32 bssidx;
1990 const char *val;
1991
1992 if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
1993 DHD_ERROR(("%s: ap isoalate: bad parameter\n", __FUNCTION__));
1994 bcmerror = BCME_BADARG;
1995 break;
1996 }
1997
1998 int_val = dhd_get_ap_isolate(dhd_pub, bssidx);
1999 bcopy(&int_val, arg, val_size);
2000 break;
2001 }
2002 case IOV_SVAL(IOV_AP_ISOLATE): {
2003 uint32 bssidx;
2004 const char *val;
2005
2006 if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
2007 DHD_ERROR(("%s: ap isolate: bad parameter\n", __FUNCTION__));
2008 bcmerror = BCME_BADARG;
2009 break;
2010 }
2011
2012 ASSERT(val);
2013 bcopy(val, &int_val, sizeof(uint32));
2014 dhd_set_ap_isolate(dhd_pub, bssidx, int_val);
2015 break;
2016 }
2017 #ifdef DHD_PSTA
2018 case IOV_GVAL(IOV_PSTA): {
2019 int_val = dhd_get_psta_mode(dhd_pub);
2020 bcopy(&int_val, arg, val_size);
2021 break;
2022 }
2023 case IOV_SVAL(IOV_PSTA): {
2024 if (int_val >= DHD_MODE_PSTA_DISABLED && int_val <= DHD_MODE_PSR) {
2025 dhd_set_psta_mode(dhd_pub, int_val);
2026 } else {
2027 bcmerror = BCME_RANGE;
2028 }
2029 break;
2030 }
2031 #endif /* DHD_PSTA */
2032 #ifdef DHD_WET
2033 case IOV_GVAL(IOV_WET):
2034 int_val = dhd_get_wet_mode(dhd_pub);
2035 bcopy(&int_val, arg, val_size);
2036 break;
2037
2038 case IOV_SVAL(IOV_WET):
2039 if (int_val == 0 || int_val == 1) {
2040 dhd_set_wet_mode(dhd_pub, int_val);
2041 /* Delete the WET DB when disabled */
2042 if (!int_val) {
2043 dhd_wet_sta_delete_list(dhd_pub);
2044 }
2045 } else {
2046 bcmerror = BCME_RANGE;
2047 }
2048 break;
2049 case IOV_SVAL(IOV_WET_HOST_IPV4):
2050 dhd_set_wet_host_ipv4(dhd_pub, params, plen);
2051 break;
2052 case IOV_SVAL(IOV_WET_HOST_MAC):
2053 dhd_set_wet_host_mac(dhd_pub, params, plen);
2054 break;
2055 #endif /* DHD_WET */
2056 #ifdef DHD_MCAST_REGEN
2057 case IOV_GVAL(IOV_MCAST_REGEN_BSS_ENABLE): {
2058 uint32 bssidx;
2059 const char *val;
2060
2061 if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
2062 DHD_ERROR(("%s: mcast_regen_bss_enable: bad parameter\n", __FUNCTION__));
2063 bcmerror = BCME_BADARG;
2064 break;
2065 }
2066
2067 int_val = dhd_get_mcast_regen_bss_enable(dhd_pub, bssidx);
2068 bcopy(&int_val, arg, val_size);
2069 break;
2070 }
2071
2072 case IOV_SVAL(IOV_MCAST_REGEN_BSS_ENABLE): {
2073 uint32 bssidx;
2074 const char *val;
2075
2076 if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
2077 DHD_ERROR(("%s: mcast_regen_bss_enable: bad parameter\n", __FUNCTION__));
2078 bcmerror = BCME_BADARG;
2079 break;
2080 }
2081
2082 ASSERT(val);
2083 bcopy(val, &int_val, sizeof(uint32));
2084 dhd_set_mcast_regen_bss_enable(dhd_pub, bssidx, int_val);
2085 break;
2086 }
2087 #endif /* DHD_MCAST_REGEN */
2088
2089 case IOV_GVAL(IOV_CFG80211_OPMODE): {
2090 int_val = (int32)dhd_pub->op_mode;
2091 bcopy(&int_val, arg, sizeof(int_val));
2092 break;
2093 }
2094 case IOV_SVAL(IOV_CFG80211_OPMODE): {
2095 if (int_val <= 0)
2096 bcmerror = BCME_BADARG;
2097 else
2098 dhd_pub->op_mode = int_val;
2099 break;
2100 }
2101
2102 case IOV_GVAL(IOV_ASSERT_TYPE):
2103 int_val = g_assert_type;
2104 bcopy(&int_val, arg, val_size);
2105 break;
2106
2107 case IOV_SVAL(IOV_ASSERT_TYPE):
2108 g_assert_type = (uint32)int_val;
2109 break;
2110
2111 #if !defined(MACOSX_DHD)
2112 case IOV_GVAL(IOV_LMTEST): {
2113 *(uint32 *)arg = (uint32)lmtest;
2114 break;
2115 }
2116
2117 case IOV_SVAL(IOV_LMTEST): {
2118 uint32 val = *(uint32 *)arg;
2119 if (val > 50)
2120 bcmerror = BCME_BADARG;
2121 else {
2122 lmtest = (uint)val;
2123 DHD_ERROR(("%s: lmtest %s\n",
2124 __FUNCTION__, (lmtest == FALSE)? "OFF" : "ON"));
2125 }
2126 break;
2127 }
2128 #endif // endif
2129
2130 #ifdef SHOW_LOGTRACE
2131 case IOV_GVAL(IOV_DUMP_TRACE_LOG): {
2132 trace_buf_info_t *trace_buf_info = (trace_buf_info_t *)arg;
2133 dhd_dbg_ring_t *dbg_verbose_ring = NULL;
2134
2135 dbg_verbose_ring = dhd_dbg_get_ring_from_ring_id(dhd_pub, FW_VERBOSE_RING_ID);
2136 if (dbg_verbose_ring == NULL) {
2137 DHD_ERROR(("dbg_verbose_ring is NULL\n"));
2138 bcmerror = BCME_UNSUPPORTED;
2139 break;
2140 }
2141
2142 if (trace_buf_info != NULL) {
2143 bzero(trace_buf_info, sizeof(trace_buf_info_t));
2144 dhd_dbg_read_ring_into_trace_buf(dbg_verbose_ring, trace_buf_info);
2145 } else {
2146 DHD_ERROR(("%s: arg is NULL\n", __FUNCTION__));
2147 bcmerror = BCME_NOMEM;
2148 }
2149 break;
2150 }
2151 #endif /* SHOW_LOGTRACE */
2152 #ifdef DHD_DEBUG
2153 #if defined(BCMSDIO) || defined(BCMPCIE)
2154 case IOV_GVAL(IOV_DONGLE_TRAP_TYPE):
2155 if (dhd_pub->dongle_trap_occured)
2156 int_val = ltoh32(dhd_pub->last_trap_info.type);
2157 else
2158 int_val = 0;
2159 bcopy(&int_val, arg, val_size);
2160 break;
2161
2162 case IOV_GVAL(IOV_DONGLE_TRAP_INFO):
2163 {
2164 struct bcmstrbuf strbuf;
2165 bcm_binit(&strbuf, arg, len);
2166 if (dhd_pub->dongle_trap_occured == FALSE) {
2167 bcm_bprintf(&strbuf, "no trap recorded\n");
2168 break;
2169 }
2170 dhd_bus_dump_trap_info(dhd_pub->bus, &strbuf);
2171 break;
2172 }
2173
2174 case IOV_GVAL(IOV_BPADDR):
2175 {
2176 sdreg_t sdreg;
2177 uint32 addr, size;
2178
2179 memcpy(&sdreg, params, sizeof(sdreg));
2180
2181 addr = sdreg.offset;
2182 size = sdreg.func;
2183
2184 bcmerror = dhd_bus_readwrite_bp_addr(dhd_pub, addr, size,
2185 (uint *)&int_val, TRUE);
2186
2187 memcpy(arg, &int_val, sizeof(int32));
2188
2189 break;
2190 }
2191
2192 case IOV_SVAL(IOV_BPADDR):
2193 {
2194 sdreg_t sdreg;
2195 uint32 addr, size;
2196
2197 memcpy(&sdreg, params, sizeof(sdreg));
2198
2199 addr = sdreg.offset;
2200 size = sdreg.func;
2201
2202 bcmerror = dhd_bus_readwrite_bp_addr(dhd_pub, addr, size,
2203 (uint *)&sdreg.value,
2204 FALSE);
2205
2206 break;
2207 }
2208 #endif /* BCMSDIO || BCMPCIE */
2209 #ifdef BCMPCIE
2210 case IOV_SVAL(IOV_FLOW_RING_DEBUG):
2211 {
2212 bcmerror = dhd_flow_ring_debug(dhd_pub, arg, len);
2213 break;
2214 }
2215 #endif /* BCMPCIE */
2216 case IOV_SVAL(IOV_MEM_DEBUG):
2217 if (len > 0) {
2218 bcmerror = dhd_mem_debug(dhd_pub, arg, len - 1);
2219 }
2220 break;
2221 #endif /* DHD_DEBUG */
2222 #if defined(DHD_LOG_DUMP)
2223 case IOV_GVAL(IOV_LOG_DUMP):
2224 {
2225 dhd_prot_debug_info_print(dhd_pub);
2226 dhd_log_dump_trigger(dhd_pub, CMD_DEFAULT);
2227 break;
2228 }
2229 #endif /* DHD_LOG_DUMP */
2230 case IOV_GVAL(IOV_DEBUG_BUF_DEST_STAT):
2231 {
2232 if (dhd_pub->debug_buf_dest_support) {
2233 debug_buf_dest_stat_t *debug_buf_dest_stat =
2234 (debug_buf_dest_stat_t *)arg;
2235 memcpy(debug_buf_dest_stat, dhd_pub->debug_buf_dest_stat,
2236 sizeof(dhd_pub->debug_buf_dest_stat));
2237 } else {
2238 bcmerror = BCME_DISABLED;
2239 }
2240 break;
2241 }
2242
2243 #ifdef DHD_DEBUG
2244 case IOV_SVAL(IOV_INDUCE_ERROR): {
2245 if (int_val >= DHD_INDUCE_ERROR_MAX) {
2246 DHD_ERROR(("%s: Invalid command : %u\n", __FUNCTION__, (uint16)int_val));
2247 } else {
2248 dhd_pub->dhd_induce_error = (uint16)int_val;
2249 }
2250 break;
2251 }
2252 #endif /* DHD_DEBUG */
2253
2254 #ifdef WL_IFACE_MGMT_CONF
2255 #ifdef WL_CFG80211
2256 #ifdef WL_NANP2P
2257 case IOV_GVAL(IOV_CONC_DISC): {
2258 int_val = wl_cfg80211_get_iface_conc_disc(
2259 dhd_linux_get_primary_netdev(dhd_pub));
2260 bcopy(&int_val, arg, sizeof(int_val));
2261 break;
2262 }
2263 case IOV_SVAL(IOV_CONC_DISC): {
2264 bcmerror = wl_cfg80211_set_iface_conc_disc(
2265 dhd_linux_get_primary_netdev(dhd_pub), (uint8)int_val);
2266 break;
2267 }
2268 #endif /* WL_NANP2P */
2269 #ifdef WL_IFACE_MGMT
2270 case IOV_GVAL(IOV_IFACE_POLICY): {
2271 int_val = wl_cfg80211_get_iface_policy(
2272 dhd_linux_get_primary_netdev(dhd_pub));
2273 bcopy(&int_val, arg, sizeof(int_val));
2274 break;
2275 }
2276 case IOV_SVAL(IOV_IFACE_POLICY): {
2277 bcmerror = wl_cfg80211_set_iface_policy(
2278 dhd_linux_get_primary_netdev(dhd_pub),
2279 arg, len);
2280 break;
2281 }
2282 #endif /* WL_IFACE_MGMT */
2283 #endif /* WL_CFG80211 */
2284 #endif /* WL_IFACE_MGMT_CONF */
2285 #ifdef RTT_GEOFENCE_CONT
2286 #if defined(RTT_SUPPORT) && defined(WL_NAN)
2287 case IOV_GVAL(IOV_RTT_GEOFENCE_TYPE_OVRD): {
2288 bool enable = 0;
2289 dhd_rtt_get_geofence_cont_ind(dhd_pub, &enable);
2290 int_val = enable ? 1 : 0;
2291 bcopy(&int_val, arg, val_size);
2292 break;
2293 }
2294 case IOV_SVAL(IOV_RTT_GEOFENCE_TYPE_OVRD): {
2295 bool enable = *(bool *)arg;
2296 dhd_rtt_set_geofence_cont_ind(dhd_pub, enable);
2297 break;
2298 }
2299 #endif /* RTT_SUPPORT && WL_NAN */
2300 #endif /* RTT_GEOFENCE_CONT */
2301 default:
2302 bcmerror = BCME_UNSUPPORTED;
2303 break;
2304 }
2305
2306 exit:
2307 DHD_TRACE(("%s: actionid %d, bcmerror %d\n", __FUNCTION__, actionid, bcmerror));
2308 return bcmerror;
2309 }
2310
2311 /* Store the status of a connection attempt for later retrieval by an iovar */
2312 void
dhd_store_conn_status(uint32 event,uint32 status,uint32 reason)2313 dhd_store_conn_status(uint32 event, uint32 status, uint32 reason)
2314 {
2315 /* Do not overwrite a WLC_E_PRUNE with a WLC_E_SET_SSID
2316 * because an encryption/rsn mismatch results in both events, and
2317 * the important information is in the WLC_E_PRUNE.
2318 */
2319 if (!(event == WLC_E_SET_SSID && status == WLC_E_STATUS_FAIL &&
2320 dhd_conn_event == WLC_E_PRUNE)) {
2321 dhd_conn_event = event;
2322 dhd_conn_status = status;
2323 dhd_conn_reason = reason;
2324 }
2325 }
2326
2327 bool
dhd_prec_enq(dhd_pub_t * dhdp,struct pktq * q,void * pkt,int prec)2328 dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q, void *pkt, int prec)
2329 {
2330 void *p;
2331 int eprec = -1; /* precedence to evict from */
2332 bool discard_oldest;
2333
2334 /* Fast case, precedence queue is not full and we are also not
2335 * exceeding total queue length
2336 */
2337 if (!pktqprec_full(q, prec) && !pktq_full(q)) {
2338 pktq_penq(q, prec, pkt);
2339 return TRUE;
2340 }
2341
2342 /* Determine precedence from which to evict packet, if any */
2343 if (pktqprec_full(q, prec))
2344 eprec = prec;
2345 else if (pktq_full(q)) {
2346 p = pktq_peek_tail(q, &eprec);
2347 ASSERT(p);
2348 if (eprec > prec || eprec < 0)
2349 return FALSE;
2350 }
2351
2352 /* Evict if needed */
2353 if (eprec >= 0) {
2354 /* Detect queueing to unconfigured precedence */
2355 ASSERT(!pktqprec_empty(q, eprec));
2356 discard_oldest = AC_BITMAP_TST(dhdp->wme_dp, eprec);
2357 if (eprec == prec && !discard_oldest)
2358 return FALSE; /* refuse newer (incoming) packet */
2359 /* Evict packet according to discard policy */
2360 p = discard_oldest ? pktq_pdeq(q, eprec) : pktq_pdeq_tail(q, eprec);
2361 ASSERT(p);
2362 #ifdef DHDTCPACK_SUPPRESS
2363 if (dhd_tcpack_check_xmit(dhdp, p) == BCME_ERROR) {
2364 DHD_ERROR(("%s %d: tcpack_suppress ERROR!!! Stop using it\n",
2365 __FUNCTION__, __LINE__));
2366 dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF);
2367 }
2368 #endif /* DHDTCPACK_SUPPRESS */
2369 PKTFREE(dhdp->osh, p, TRUE);
2370 }
2371
2372 /* Enqueue */
2373 p = pktq_penq(q, prec, pkt);
2374 ASSERT(p);
2375
2376 return TRUE;
2377 }
2378
2379 /*
2380 * Functions to drop proper pkts from queue:
2381 * If one pkt in queue is non-fragmented, drop first non-fragmented pkt only
2382 * If all pkts in queue are all fragmented, find and drop one whole set fragmented pkts
2383 * If can't find pkts matching upper 2 cases, drop first pkt anyway
2384 */
2385 bool
dhd_prec_drop_pkts(dhd_pub_t * dhdp,struct pktq * pq,int prec,f_droppkt_t fn)2386 dhd_prec_drop_pkts(dhd_pub_t *dhdp, struct pktq *pq, int prec, f_droppkt_t fn)
2387 {
2388 struct pktq_prec *q = NULL;
2389 void *p, *prev = NULL, *next = NULL, *first = NULL, *last = NULL, *prev_first = NULL;
2390 pkt_frag_t frag_info;
2391
2392 ASSERT(dhdp && pq);
2393 ASSERT(prec >= 0 && prec < pq->num_prec);
2394
2395 q = &pq->q[prec];
2396 p = q->head;
2397
2398 if (p == NULL)
2399 return FALSE;
2400
2401 while (p) {
2402 frag_info = pkt_frag_info(dhdp->osh, p);
2403 if (frag_info == DHD_PKT_FRAG_NONE) {
2404 break;
2405 } else if (frag_info == DHD_PKT_FRAG_FIRST) {
2406 if (first) {
2407 /* No last frag pkt, use prev as last */
2408 last = prev;
2409 break;
2410 } else {
2411 first = p;
2412 prev_first = prev;
2413 }
2414 } else if (frag_info == DHD_PKT_FRAG_LAST) {
2415 if (first) {
2416 last = p;
2417 break;
2418 }
2419 }
2420
2421 prev = p;
2422 p = PKTLINK(p);
2423 }
2424
2425 if ((p == NULL) || ((frag_info != DHD_PKT_FRAG_NONE) && !(first && last))) {
2426 /* Not found matching pkts, use oldest */
2427 prev = NULL;
2428 p = q->head;
2429 frag_info = 0;
2430 }
2431
2432 if (frag_info == DHD_PKT_FRAG_NONE) {
2433 first = last = p;
2434 prev_first = prev;
2435 }
2436
2437 p = first;
2438 while (p) {
2439 next = PKTLINK(p);
2440 q->n_pkts--;
2441 pq->n_pkts_tot--;
2442
2443 #ifdef WL_TXQ_STALL
2444 q->dequeue_count++;
2445 #endif // endif
2446
2447 PKTSETLINK(p, NULL);
2448
2449 if (fn)
2450 fn(dhdp, prec, p, TRUE);
2451
2452 if (p == last)
2453 break;
2454
2455 p = next;
2456 }
2457
2458 if (prev_first == NULL) {
2459 if ((q->head = next) == NULL)
2460 q->tail = NULL;
2461 } else {
2462 PKTSETLINK(prev_first, next);
2463 if (!next)
2464 q->tail = prev_first;
2465 }
2466
2467 return TRUE;
2468 }
2469
2470 static int
dhd_iovar_op(dhd_pub_t * dhd_pub,const char * name,void * params,int plen,void * arg,int len,bool set)2471 dhd_iovar_op(dhd_pub_t *dhd_pub, const char *name,
2472 void *params, int plen, void *arg, int len, bool set)
2473 {
2474 int bcmerror = 0;
2475 int val_size;
2476 const bcm_iovar_t *vi = NULL;
2477 uint32 actionid;
2478
2479 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2480
2481 ASSERT(name);
2482 ASSERT(len >= 0);
2483
2484 /* Get MUST have return space */
2485 ASSERT(set || (arg && len));
2486
2487 /* Set does NOT take qualifiers */
2488 ASSERT(!set || (!params && !plen));
2489
2490 if ((vi = bcm_iovar_lookup(dhd_iovars, name)) == NULL) {
2491 bcmerror = BCME_UNSUPPORTED;
2492 goto exit;
2493 }
2494
2495 DHD_CTL(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
2496 name, (set ? "set" : "get"), len, plen));
2497
2498 /* set up 'params' pointer in case this is a set command so that
2499 * the convenience int and bool code can be common to set and get
2500 */
2501 if (params == NULL) {
2502 params = arg;
2503 plen = len;
2504 }
2505
2506 if (vi->type == IOVT_VOID)
2507 val_size = 0;
2508 else if (vi->type == IOVT_BUFFER)
2509 val_size = len;
2510 else
2511 /* all other types are integer sized */
2512 val_size = sizeof(int);
2513
2514 actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
2515
2516 bcmerror = dhd_doiovar(dhd_pub, vi, actionid, name, params, plen, arg, len, val_size);
2517
2518 exit:
2519 return bcmerror;
2520 }
2521
2522 int
dhd_ioctl(dhd_pub_t * dhd_pub,dhd_ioctl_t * ioc,void * buf,uint buflen)2523 dhd_ioctl(dhd_pub_t * dhd_pub, dhd_ioctl_t *ioc, void *buf, uint buflen)
2524 {
2525 int bcmerror = 0;
2526 unsigned long flags;
2527
2528 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2529
2530 if (!buf) {
2531 return BCME_BADARG;
2532 }
2533
2534 dhd_os_dhdiovar_lock(dhd_pub);
2535 switch (ioc->cmd) {
2536 case DHD_GET_MAGIC:
2537 if (buflen < sizeof(int))
2538 bcmerror = BCME_BUFTOOSHORT;
2539 else
2540 *(int*)buf = DHD_IOCTL_MAGIC;
2541 break;
2542
2543 case DHD_GET_VERSION:
2544 if (buflen < sizeof(int))
2545 bcmerror = BCME_BUFTOOSHORT;
2546 else
2547 *(int*)buf = DHD_IOCTL_VERSION;
2548 break;
2549
2550 case DHD_GET_VAR:
2551 case DHD_SET_VAR:
2552 {
2553 char *arg;
2554 uint arglen;
2555
2556 DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
2557 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd_pub) &&
2558 bcmstricmp((char *)buf, "devreset")) {
2559 /* In platforms like FC19, the FW download is done via IOCTL
2560 * and should not return error for IOCTLs fired before FW
2561 * Download is done
2562 */
2563 if (dhd_fw_download_status(dhd_pub) == FW_DOWNLOAD_DONE) {
2564 DHD_ERROR(("%s: returning as busstate=%d\n",
2565 __FUNCTION__, dhd_pub->busstate));
2566 DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
2567 dhd_os_dhdiovar_unlock(dhd_pub);
2568 return -ENODEV;
2569 }
2570 }
2571 DHD_BUS_BUSY_SET_IN_DHD_IOVAR(dhd_pub);
2572 DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
2573
2574 DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
2575 if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhd_pub)) {
2576 /* If Suspend/Resume is tested via pcie_suspend IOVAR
2577 * then continue to execute the IOVAR, return from here for
2578 * other IOVARs, also include pciecfgreg and devreset to go
2579 * through.
2580 */
2581 if (bcmstricmp((char *)buf, "pcie_suspend") &&
2582 bcmstricmp((char *)buf, "pciecfgreg") &&
2583 bcmstricmp((char *)buf, "devreset") &&
2584 bcmstricmp((char *)buf, "sdio_suspend")) {
2585 DHD_ERROR(("%s: bus is in suspend(%d)"
2586 "or suspending(0x%x) state\n",
2587 __FUNCTION__, dhd_pub->busstate,
2588 dhd_pub->dhd_bus_busy_state));
2589 DHD_BUS_BUSY_CLEAR_IN_DHD_IOVAR(dhd_pub);
2590 dhd_os_busbusy_wake(dhd_pub);
2591 DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
2592 dhd_os_dhdiovar_unlock(dhd_pub);
2593 return -ENODEV;
2594 }
2595 }
2596 /* During devreset ioctl, we call dhdpcie_advertise_bus_cleanup,
2597 * which will wait for all the busy contexts to get over for
2598 * particular time and call ASSERT if timeout happens. As during
2599 * devreset ioctal, we made DHD_BUS_BUSY_SET_IN_DHD_IOVAR,
2600 * to avoid ASSERT, clear the IOCTL busy state. "devreset" ioctl is
2601 * not used in Production platforms but only used in FC19 setups.
2602 */
2603 if (!bcmstricmp((char *)buf, "devreset") ||
2604 #ifdef BCMPCIE
2605 (dhd_bus_is_multibp_capable(dhd_pub->bus) &&
2606 !bcmstricmp((char *)buf, "dwnldstate")) ||
2607 #endif /* BCMPCIE */
2608 FALSE)
2609 {
2610 DHD_BUS_BUSY_CLEAR_IN_DHD_IOVAR(dhd_pub);
2611 }
2612 DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
2613
2614 /* scan past the name to any arguments */
2615 for (arg = buf, arglen = buflen; *arg && arglen; arg++, arglen--)
2616 ;
2617
2618 if (*arg) {
2619 bcmerror = BCME_BUFTOOSHORT;
2620 goto unlock_exit;
2621 }
2622
2623 /* account for the NUL terminator */
2624 arg++, arglen--;
2625 /* call with the appropriate arguments */
2626 if (ioc->cmd == DHD_GET_VAR) {
2627 bcmerror = dhd_iovar_op(dhd_pub, buf, arg, arglen,
2628 buf, buflen, IOV_GET);
2629 } else {
2630 bcmerror = dhd_iovar_op(dhd_pub, buf, NULL, 0,
2631 arg, arglen, IOV_SET);
2632 }
2633 if (bcmerror != BCME_UNSUPPORTED) {
2634 goto unlock_exit;
2635 }
2636
2637 /* not in generic table, try protocol module */
2638 if (ioc->cmd == DHD_GET_VAR) {
2639 bcmerror = dhd_prot_iovar_op(dhd_pub, buf, arg,
2640 arglen, buf, buflen, IOV_GET);
2641 } else {
2642 bcmerror = dhd_prot_iovar_op(dhd_pub, buf,
2643 NULL, 0, arg, arglen, IOV_SET);
2644 }
2645 if (bcmerror != BCME_UNSUPPORTED) {
2646 goto unlock_exit;
2647 }
2648
2649 /* if still not found, try bus module */
2650 if (ioc->cmd == DHD_GET_VAR) {
2651 bcmerror = dhd_bus_iovar_op(dhd_pub, buf,
2652 arg, arglen, buf, buflen, IOV_GET);
2653 } else {
2654 bcmerror = dhd_bus_iovar_op(dhd_pub, buf,
2655 NULL, 0, arg, arglen, IOV_SET);
2656 }
2657 if (bcmerror != BCME_UNSUPPORTED) {
2658 goto unlock_exit;
2659 }
2660
2661 }
2662 goto unlock_exit;
2663
2664 default:
2665 bcmerror = BCME_UNSUPPORTED;
2666 }
2667 dhd_os_dhdiovar_unlock(dhd_pub);
2668 return bcmerror;
2669
2670 unlock_exit:
2671 DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
2672 DHD_BUS_BUSY_CLEAR_IN_DHD_IOVAR(dhd_pub);
2673 dhd_os_busbusy_wake(dhd_pub);
2674 DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
2675 dhd_os_dhdiovar_unlock(dhd_pub);
2676 return bcmerror;
2677 }
2678
2679 #ifdef SHOW_EVENTS
2680
2681 static void
wl_show_host_event(dhd_pub_t * dhd_pub,wl_event_msg_t * event,void * event_data,void * raw_event_ptr,char * eventmask)2682 wl_show_host_event(dhd_pub_t *dhd_pub, wl_event_msg_t *event, void *event_data,
2683 void *raw_event_ptr, char *eventmask)
2684 {
2685 uint i, status, reason;
2686 bool group = FALSE, flush_txq = FALSE, link = FALSE;
2687 bool host_data = FALSE; /* prints event data after the case when set */
2688 const char *auth_str;
2689 const char *event_name;
2690 uchar *buf;
2691 char err_msg[256], eabuf[ETHER_ADDR_STR_LEN];
2692 uint event_type, flags, auth_type, datalen;
2693
2694 event_type = ntoh32(event->event_type);
2695 flags = ntoh16(event->flags);
2696 status = ntoh32(event->status);
2697 reason = ntoh32(event->reason);
2698 BCM_REFERENCE(reason);
2699 auth_type = ntoh32(event->auth_type);
2700 datalen = ntoh32(event->datalen);
2701
2702 /* debug dump of event messages */
2703 snprintf(eabuf, sizeof(eabuf), MACDBG, MAC2STRDBG(event->addr.octet));
2704
2705 event_name = bcmevent_get_name(event_type);
2706 BCM_REFERENCE(event_name);
2707
2708 if (flags & WLC_EVENT_MSG_LINK)
2709 link = TRUE;
2710 if (flags & WLC_EVENT_MSG_GROUP)
2711 group = TRUE;
2712 if (flags & WLC_EVENT_MSG_FLUSHTXQ)
2713 flush_txq = TRUE;
2714
2715 switch (event_type) {
2716 case WLC_E_START:
2717 case WLC_E_DEAUTH:
2718 case WLC_E_DISASSOC:
2719 DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
2720 break;
2721
2722 case WLC_E_ASSOC_IND:
2723 case WLC_E_REASSOC_IND:
2724
2725 DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
2726
2727 break;
2728
2729 case WLC_E_ASSOC:
2730 case WLC_E_REASSOC:
2731 if (status == WLC_E_STATUS_SUCCESS) {
2732 DHD_EVENT(("MACEVENT: %s, MAC %s, SUCCESS\n", event_name, eabuf));
2733 } else if (status == WLC_E_STATUS_TIMEOUT) {
2734 DHD_EVENT(("MACEVENT: %s, MAC %s, TIMEOUT\n", event_name, eabuf));
2735 } else if (status == WLC_E_STATUS_FAIL) {
2736 DHD_EVENT(("MACEVENT: %s, MAC %s, FAILURE, status %d reason %d\n",
2737 event_name, eabuf, (int)status, (int)reason));
2738 } else {
2739 DHD_EVENT(("MACEVENT: %s, MAC %s, unexpected status %d\n",
2740 event_name, eabuf, (int)status));
2741 }
2742
2743 break;
2744
2745 case WLC_E_DEAUTH_IND:
2746 case WLC_E_DISASSOC_IND:
2747 DHD_EVENT(("MACEVENT: %s, MAC %s, reason %d\n", event_name, eabuf, (int)reason));
2748 break;
2749
2750 case WLC_E_AUTH:
2751 case WLC_E_AUTH_IND:
2752 if (auth_type == DOT11_OPEN_SYSTEM)
2753 auth_str = "Open System";
2754 else if (auth_type == DOT11_SHARED_KEY)
2755 auth_str = "Shared Key";
2756 else if (auth_type == DOT11_SAE)
2757 auth_str = "SAE";
2758 else {
2759 snprintf(err_msg, sizeof(err_msg), "AUTH unknown: %d", (int)auth_type);
2760 auth_str = err_msg;
2761 }
2762
2763 if (event_type == WLC_E_AUTH_IND) {
2764 DHD_EVENT(("MACEVENT: %s, MAC %s, %s\n", event_name, eabuf, auth_str));
2765 } else if (status == WLC_E_STATUS_SUCCESS) {
2766 DHD_EVENT(("MACEVENT: %s, MAC %s, %s, SUCCESS\n",
2767 event_name, eabuf, auth_str));
2768 } else if (status == WLC_E_STATUS_TIMEOUT) {
2769 DHD_EVENT(("MACEVENT: %s, MAC %s, %s, TIMEOUT\n",
2770 event_name, eabuf, auth_str));
2771 } else if (status == WLC_E_STATUS_FAIL) {
2772 DHD_EVENT(("MACEVENT: %s, MAC %s, %s, FAILURE, status %d reason %d\n",
2773 event_name, eabuf, auth_str, (int)status, (int)reason));
2774 } else if (status == WLC_E_STATUS_NO_ACK) {
2775 DHD_EVENT(("MACEVENT: %s, MAC %s, %s, NOACK\n",
2776 event_name, eabuf, auth_str));
2777 } else {
2778 DHD_EVENT(("MACEVENT: %s, MAC %s, %s, status %d reason %d\n",
2779 event_name, eabuf, auth_str, (int)status, (int)reason));
2780 }
2781 BCM_REFERENCE(auth_str);
2782
2783 break;
2784
2785 case WLC_E_JOIN:
2786 case WLC_E_ROAM:
2787 case WLC_E_SET_SSID:
2788 if (status == WLC_E_STATUS_SUCCESS) {
2789 DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
2790 } else {
2791 if (status == WLC_E_STATUS_FAIL) {
2792 DHD_EVENT(("MACEVENT: %s, failed status %d\n", event_name, status));
2793 } else if (status == WLC_E_STATUS_NO_NETWORKS) {
2794 DHD_EVENT(("MACEVENT: %s, no networks found\n", event_name));
2795 } else {
2796 DHD_EVENT(("MACEVENT: %s, unexpected status %d\n",
2797 event_name, (int)status));
2798 }
2799 }
2800 break;
2801
2802 case WLC_E_BEACON_RX:
2803 if (status == WLC_E_STATUS_SUCCESS) {
2804 DHD_EVENT(("MACEVENT: %s, SUCCESS\n", event_name));
2805 } else if (status == WLC_E_STATUS_FAIL) {
2806 DHD_EVENT(("MACEVENT: %s, FAIL\n", event_name));
2807 } else {
2808 DHD_EVENT(("MACEVENT: %s, status %d\n", event_name, status));
2809 }
2810 break;
2811
2812 case WLC_E_LINK:
2813 DHD_EVENT(("MACEVENT: %s %s flags:0x%x status:%d\n",
2814 event_name, link?"UP":"DOWN", flags, status));
2815 BCM_REFERENCE(link);
2816 break;
2817
2818 case WLC_E_MIC_ERROR:
2819 DHD_EVENT(("MACEVENT: %s, MAC %s, Group %d, Flush %d\n",
2820 event_name, eabuf, group, flush_txq));
2821 BCM_REFERENCE(group);
2822 BCM_REFERENCE(flush_txq);
2823 break;
2824
2825 case WLC_E_ICV_ERROR:
2826 case WLC_E_UNICAST_DECODE_ERROR:
2827 case WLC_E_MULTICAST_DECODE_ERROR:
2828 DHD_EVENT(("MACEVENT: %s, MAC %s\n",
2829 event_name, eabuf));
2830 break;
2831
2832 case WLC_E_TXFAIL:
2833 DHD_EVENT(("MACEVENT: %s, RA %s status %d\n", event_name, eabuf, status));
2834 break;
2835
2836 case WLC_E_ASSOC_REQ_IE:
2837 case WLC_E_ASSOC_RESP_IE:
2838 case WLC_E_PMKID_CACHE:
2839 DHD_EVENT(("MACEVENT: %s\n", event_name));
2840 break;
2841
2842 case WLC_E_SCAN_COMPLETE:
2843 DHD_EVENT(("MACEVENT: %s\n", event_name));
2844 break;
2845 case WLC_E_RSSI_LQM:
2846 case WLC_E_PFN_NET_FOUND:
2847 case WLC_E_PFN_NET_LOST:
2848 case WLC_E_PFN_SCAN_COMPLETE:
2849 case WLC_E_PFN_SCAN_NONE:
2850 case WLC_E_PFN_SCAN_ALLGONE:
2851 case WLC_E_PFN_GSCAN_FULL_RESULT:
2852 case WLC_E_PFN_SSID_EXT:
2853 DHD_EVENT(("PNOEVENT: %s\n", event_name));
2854 break;
2855
2856 case WLC_E_PFN_SCAN_BACKOFF:
2857 case WLC_E_PFN_BSSID_SCAN_BACKOFF:
2858 DHD_EVENT(("PNOEVENT: %s, status %d, reason %d\n",
2859 event_name, (int)status, (int)reason));
2860 break;
2861
2862 case WLC_E_PSK_SUP:
2863 case WLC_E_PRUNE:
2864 DHD_EVENT(("MACEVENT: %s, status %d, reason %d\n",
2865 event_name, (int)status, (int)reason));
2866 break;
2867
2868 #ifdef WIFI_ACT_FRAME
2869 case WLC_E_ACTION_FRAME:
2870 DHD_TRACE(("MACEVENT: %s Bssid %s\n", event_name, eabuf));
2871 break;
2872 #endif /* WIFI_ACT_FRAME */
2873
2874 #ifdef SHOW_LOGTRACE
2875 case WLC_E_TRACE:
2876 {
2877 dhd_dbg_trace_evnt_handler(dhd_pub, event_data, raw_event_ptr, datalen);
2878 break;
2879 }
2880 #endif /* SHOW_LOGTRACE */
2881
2882 case WLC_E_RSSI:
2883 DHD_EVENT(("MACEVENT: %s %d\n", event_name, ntoh32(*((int *)event_data))));
2884 break;
2885
2886 case WLC_E_SERVICE_FOUND:
2887 case WLC_E_P2PO_ADD_DEVICE:
2888 case WLC_E_P2PO_DEL_DEVICE:
2889 DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
2890 break;
2891
2892 #ifdef BT_WIFI_HANDOBER
2893 case WLC_E_BT_WIFI_HANDOVER_REQ:
2894 DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
2895 break;
2896 #endif // endif
2897
2898 case WLC_E_CCA_CHAN_QUAL:
2899 if (datalen) {
2900 cca_chan_qual_event_t *cca_event = (cca_chan_qual_event_t *)event_data;
2901 if (cca_event->id == WL_CHAN_QUAL_FULLPM_CCA) {
2902 cca_only_chan_qual_event_t *cca_only_event =
2903 (cca_only_chan_qual_event_t *)cca_event;
2904 BCM_REFERENCE(cca_only_event);
2905 DHD_EVENT((
2906 "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
2907 " channel 0x%02x\n",
2908 event_name, event_type, eabuf, (int)status,
2909 (int)reason, (int)auth_type, cca_event->chanspec));
2910 DHD_EVENT((
2911 "\tTOTAL (dur %dms me %dms notme %dms interf %dms"
2912 " ts 0x%08x)\n",
2913 cca_only_event->cca_busy_ext.duration,
2914 cca_only_event->cca_busy_ext.congest_ibss,
2915 cca_only_event->cca_busy_ext.congest_obss,
2916 cca_only_event->cca_busy_ext.interference,
2917 cca_only_event->cca_busy_ext.timestamp));
2918 DHD_EVENT((
2919 "\t !PM (dur %dms me %dms notme %dms interf %dms)\n",
2920 cca_only_event->cca_busy_nopm.duration,
2921 cca_only_event->cca_busy_nopm.congest_ibss,
2922 cca_only_event->cca_busy_nopm.congest_obss,
2923 cca_only_event->cca_busy_nopm.interference));
2924 DHD_EVENT((
2925 "\t PM (dur %dms me %dms notme %dms interf %dms)\n",
2926 cca_only_event->cca_busy_pm.duration,
2927 cca_only_event->cca_busy_pm.congest_ibss,
2928 cca_only_event->cca_busy_pm.congest_obss,
2929 cca_only_event->cca_busy_pm.interference));
2930 } else if (cca_event->id == WL_CHAN_QUAL_FULL_CCA) {
2931 DHD_EVENT((
2932 "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
2933 " channel 0x%02x (dur %dms ibss %dms obss %dms interf %dms"
2934 " ts 0x%08x)\n",
2935 event_name, event_type, eabuf, (int)status,
2936 (int)reason, (int)auth_type, cca_event->chanspec,
2937 cca_event->cca_busy_ext.duration,
2938 cca_event->cca_busy_ext.congest_ibss,
2939 cca_event->cca_busy_ext.congest_obss,
2940 cca_event->cca_busy_ext.interference,
2941 cca_event->cca_busy_ext.timestamp));
2942 } else if (cca_event->id == WL_CHAN_QUAL_CCA) {
2943 DHD_EVENT((
2944 "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
2945 " channel 0x%02x (dur %dms busy %dms ts 0x%08x)\n",
2946 event_name, event_type, eabuf, (int)status,
2947 (int)reason, (int)auth_type, cca_event->chanspec,
2948 cca_event->cca_busy.duration,
2949 cca_event->cca_busy.congest,
2950 cca_event->cca_busy.timestamp));
2951 } else if ((cca_event->id == WL_CHAN_QUAL_NF) ||
2952 (cca_event->id == WL_CHAN_QUAL_NF_LTE)) {
2953 DHD_EVENT((
2954 "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
2955 " channel 0x%02x (NF[%d] %ddB)\n",
2956 event_name, event_type, eabuf, (int)status,
2957 (int)reason, (int)auth_type, cca_event->chanspec,
2958 cca_event->id, cca_event->noise));
2959 } else {
2960 DHD_EVENT((
2961 "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
2962 " channel 0x%02x (unknown ID %d)\n",
2963 event_name, event_type, eabuf, (int)status,
2964 (int)reason, (int)auth_type, cca_event->chanspec,
2965 cca_event->id));
2966 }
2967 }
2968 break;
2969 case WLC_E_ESCAN_RESULT:
2970 {
2971 wl_escan_result_v2_t *escan_result =
2972 (wl_escan_result_v2_t *)event_data;
2973 BCM_REFERENCE(escan_result);
2974 if ((status == WLC_E_STATUS_SUCCESS) || (status == WLC_E_STATUS_ABORT)) {
2975 DHD_EVENT(("MACEVENT: %s %d, status %d sync-id %u\n",
2976 event_name, event_type, (int)status,
2977 dtoh16(escan_result->sync_id)));
2978 } else {
2979 DHD_TRACE(("MACEVENT: %s %d, MAC %s, status %d \n",
2980 event_name, event_type, eabuf, (int)status));
2981 }
2982
2983 break;
2984 }
2985 case WLC_E_IF:
2986 {
2987 struct wl_event_data_if *ifevent = (struct wl_event_data_if *)event_data;
2988 BCM_REFERENCE(ifevent);
2989
2990 DHD_EVENT(("MACEVENT: %s, opcode:0x%d ifidx:%d role:%d\n",
2991 event_name, ifevent->opcode, ifevent->ifidx, ifevent->role));
2992 break;
2993 }
2994 #ifdef SHOW_LOGTRACE
2995 case WLC_E_MSCH:
2996 {
2997 wl_mschdbg_event_handler(dhd_pub, raw_event_ptr, reason, event_data, datalen);
2998 break;
2999 }
3000 #endif /* SHOW_LOGTRACE */
3001
3002 case WLC_E_PSK_AUTH:
3003 DHD_EVENT(("MACEVENT: %s, RA %s status %d Reason:%d\n",
3004 event_name, eabuf, status, reason));
3005 break;
3006 case WLC_E_AGGR_EVENT:
3007 {
3008 event_aggr_data_t *aggrbuf = event_data;
3009 int j = 0, len = 0;
3010 uint8 *data = aggrbuf->data;
3011 DHD_EVENT(("MACEVENT: %s, num of events %d total len %d sub events: ",
3012 event_name, aggrbuf->num_events, aggrbuf->len));
3013 for (j = 0; j < aggrbuf->num_events; j++)
3014 {
3015 wl_event_msg_t * sub_event = (wl_event_msg_t *)data;
3016 if (len > aggrbuf->len) {
3017 DHD_ERROR(("%s: Aggr events corrupted!",
3018 __FUNCTION__));
3019 break;
3020 }
3021 DHD_EVENT(("\n Event type: %d ", ntoh32(sub_event->event_type)));
3022 len += ALIGN_SIZE((ntoh32(sub_event->datalen) +
3023 sizeof(wl_event_msg_t)), sizeof(uint64));
3024 buf = (uchar *)(data + sizeof(wl_event_msg_t));
3025 BCM_REFERENCE(buf);
3026 DHD_EVENT((" data (%d) : ", ntoh32(sub_event->datalen)));
3027 for (i = 0; i < ntoh32(sub_event->datalen); i++) {
3028 DHD_EVENT((" 0x%02x ", buf[i]));
3029 }
3030 data = aggrbuf->data + len;
3031 }
3032 DHD_EVENT(("\n"));
3033 }
3034 break;
3035 case WLC_E_NAN_CRITICAL:
3036 {
3037 DHD_LOG_MEM(("MACEVENT: %s, type:%d\n", event_name, reason));
3038 break;
3039 }
3040 case WLC_E_NAN_NON_CRITICAL:
3041 {
3042 DHD_TRACE(("MACEVENT: %s, type:%d\n", event_name, reason));
3043 break;
3044 }
3045 case WLC_E_PROXD:
3046 {
3047 wl_proxd_event_t *proxd = (wl_proxd_event_t*)event_data;
3048 DHD_LOG_MEM(("MACEVENT: %s, event:%d, status:%d\n",
3049 event_name, proxd->type, reason));
3050 break;
3051 }
3052 case WLC_E_RPSNOA:
3053 {
3054 rpsnoa_stats_t *stat = event_data;
3055 if (datalen == sizeof(*stat)) {
3056 DHD_EVENT(("MACEVENT: %s, band %s, status %d, pps %d\n", event_name,
3057 (stat->band == WLC_BAND_2G) ? "2G":"5G",
3058 stat->state, stat->last_pps));
3059 }
3060 break;
3061 }
3062 case WLC_E_PHY_CAL:
3063 {
3064 DHD_EVENT(("MACEVENT: %s, reason:%d\n", event_name, reason));
3065 break;
3066 }
3067 case WLC_E_WA_LQM:
3068 {
3069 wl_event_wa_lqm_t *event_wa_lqm = (wl_event_wa_lqm_t *)event_data;
3070 bcm_xtlv_t *subevent;
3071 wl_event_wa_lqm_basic_t *elqm_basic;
3072
3073 if ((event_wa_lqm->ver != WL_EVENT_WA_LQM_VER) ||
3074 (event_wa_lqm->len < sizeof(wl_event_wa_lqm_t) + BCM_XTLV_HDR_SIZE)) {
3075 DHD_ERROR(("MACEVENT: %s invalid (ver=%d len=%d)\n",
3076 event_name, event_wa_lqm->ver, event_wa_lqm->len));
3077 break;
3078 }
3079
3080 subevent = (bcm_xtlv_t *)event_wa_lqm->subevent;
3081 if ((subevent->id != WL_EVENT_WA_LQM_BASIC) ||
3082 (subevent->len < sizeof(wl_event_wa_lqm_basic_t))) {
3083 DHD_ERROR(("MACEVENT: %s invalid sub-type (id=%d len=%d)\n",
3084 event_name, subevent->id, subevent->len));
3085 break;
3086 }
3087
3088 elqm_basic = (wl_event_wa_lqm_basic_t *)subevent->data;
3089 BCM_REFERENCE(elqm_basic);
3090 DHD_EVENT(("MACEVENT: %s (RSSI=%d SNR=%d TxRate=%d RxRate=%d)\n",
3091 event_name, elqm_basic->rssi, elqm_basic->snr,
3092 elqm_basic->tx_rate, elqm_basic->rx_rate));
3093 break;
3094 }
3095 default:
3096 DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d\n",
3097 event_name, event_type, eabuf, (int)status, (int)reason,
3098 (int)auth_type));
3099 break;
3100 }
3101
3102 /* show any appended data if message level is set to bytes or host_data is set */
3103 if ((DHD_BYTES_ON() || (host_data == TRUE)) && DHD_EVENT_ON() && datalen) {
3104 buf = (uchar *) event_data;
3105 BCM_REFERENCE(buf);
3106 DHD_EVENT((" data (%d) : ", datalen));
3107 for (i = 0; i < datalen; i++) {
3108 DHD_EVENT((" 0x%02x ", buf[i]));
3109 }
3110 DHD_EVENT(("\n"));
3111 }
3112 } /* wl_show_host_event */
3113 #endif /* SHOW_EVENTS */
3114
3115 #ifdef DNGL_EVENT_SUPPORT
3116 /* Check whether packet is a BRCM dngl event pkt. If it is, process event data. */
3117 int
dngl_host_event(dhd_pub_t * dhdp,void * pktdata,bcm_dngl_event_msg_t * dngl_event,size_t pktlen)3118 dngl_host_event(dhd_pub_t *dhdp, void *pktdata, bcm_dngl_event_msg_t *dngl_event, size_t pktlen)
3119 {
3120 bcm_dngl_event_t *pvt_data = (bcm_dngl_event_t *)pktdata;
3121
3122 dngl_host_event_process(dhdp, pvt_data, dngl_event, pktlen);
3123 return BCME_OK;
3124 }
3125
3126 #ifdef PARSE_DONGLE_HOST_EVENT
3127 typedef struct hck_id_to_str_s {
3128 uint32 id;
3129 char *name;
3130 } hck_id_to_str_t;
3131
3132 hck_id_to_str_t hck_sw_id_to_str[] = {
3133 {WL_HC_DD_PCIE, "WL_HC_DD_PCIE"},
3134 {WL_HC_DD_RX_DMA_STALL, "WL_HC_DD_RX_DMA_STALL"},
3135 {WL_HC_DD_RX_STALL, "WL_HC_DD_RX_STALL"},
3136 {WL_HC_DD_TX_STALL, "WL_HC_DD_TX_STALL"},
3137 {WL_HC_DD_SCAN_STALL, "WL_HC_DD_SCAN_STALL"},
3138 {WL_HC_DD_PHY, "WL_HC_DD_PHY"},
3139 {WL_HC_DD_REINIT, "WL_HC_DD_REINIT"},
3140 {WL_HC_DD_TXQ_STALL, "WL_HC_DD_TXQ_STALL"},
3141 {0, NULL}
3142 };
3143
3144 hck_id_to_str_t hck_pcie_module_to_str[] = {
3145 {HEALTH_CHECK_PCIEDEV_INDUCED_IND, "PCIEDEV_INDUCED_IND"},
3146 {HEALTH_CHECK_PCIEDEV_H2D_DMA_IND, "PCIEDEV_H2D_DMA_IND"},
3147 {HEALTH_CHECK_PCIEDEV_D2H_DMA_IND, "PCIEDEV_D2H_DMA_IND"},
3148 {HEALTH_CHECK_PCIEDEV_IOCTL_STALL_IND, "PCIEDEV_IOCTL_STALL_IND"},
3149 {HEALTH_CHECK_PCIEDEV_D3ACK_STALL_IND, "PCIEDEV_D3ACK_STALL_IND"},
3150 {HEALTH_CHECK_PCIEDEV_NODS_IND, "PCIEDEV_NODS_IND"},
3151 {HEALTH_CHECK_PCIEDEV_LINKSPEED_FALLBACK_IND, "PCIEDEV_LINKSPEED_FALLBACK_IND"},
3152 {HEALTH_CHECK_PCIEDEV_DSACK_STALL_IND, "PCIEDEV_DSACK_STALL_IND"},
3153 {0, NULL}
3154 };
3155
3156 hck_id_to_str_t hck_rx_stall_v2_to_str[] = {
3157 {BCM_RX_HC_RESERVED, "BCM_RX_HC_RESERVED"},
3158 {BCM_RX_HC_UNSPECIFIED, "BCM_RX_HC_UNSPECIFIED"},
3159 {BCM_RX_HC_UNICAST_DECRYPT_FAIL, "BCM_RX_HC_UNICAST_DECRYPT_FAIL"},
3160 {BCM_RX_HC_BCMC_DECRYPT_FAIL, "BCM_RX_HC_BCMC_DECRYPT_FAIL"},
3161 {BCM_RX_HC_UNICAST_REPLAY, "BCM_RX_HC_UNICAST_REPLAY"},
3162 {BCM_RX_HC_BCMC_REPLAY, "BCM_RX_HC_BCMC_REPLAY"},
3163 {BCM_RX_HC_AMPDU_DUP, "BCM_RX_HC_AMPDU_DUP"},
3164 {0, NULL}
3165 };
3166
3167 static void
dhd_print_dongle_hck_id(uint32 id,hck_id_to_str_t * hck)3168 dhd_print_dongle_hck_id(uint32 id, hck_id_to_str_t *hck)
3169 {
3170 while (hck->name != NULL) {
3171 if (hck->id == id) {
3172 DHD_ERROR(("DONGLE_HCK_EVENT: %s\n", hck->name));
3173 return;
3174 }
3175 hck++;
3176 }
3177 }
3178
3179 void
dhd_parse_hck_common_sw_event(bcm_xtlv_t * wl_hc)3180 dhd_parse_hck_common_sw_event(bcm_xtlv_t *wl_hc)
3181 {
3182
3183 wl_rx_hc_info_v2_t *hck_rx_stall_v2;
3184 uint16 id;
3185
3186 id = ltoh16(wl_hc->id);
3187
3188 if (id == WL_HC_DD_RX_STALL_V2) {
3189 /* map the hck_rx_stall_v2 structure to the value of the XTLV */
3190 hck_rx_stall_v2 =
3191 (wl_rx_hc_info_v2_t*)wl_hc;
3192 DHD_ERROR(("type:%d len:%d if_idx:%d ac:%d pkts:%d"
3193 " drop:%d alert_th:%d reason:%d peer_ea:"MACF"\n",
3194 hck_rx_stall_v2->type,
3195 hck_rx_stall_v2->length,
3196 hck_rx_stall_v2->if_idx,
3197 hck_rx_stall_v2->ac,
3198 hck_rx_stall_v2->rx_hc_pkts,
3199 hck_rx_stall_v2->rx_hc_dropped_all,
3200 hck_rx_stall_v2->rx_hc_alert_th,
3201 hck_rx_stall_v2->reason,
3202 ETHER_TO_MACF(hck_rx_stall_v2->peer_ea)));
3203 dhd_print_dongle_hck_id(
3204 ltoh32(hck_rx_stall_v2->reason),
3205 hck_rx_stall_v2_to_str);
3206 } else {
3207 dhd_print_dongle_hck_id(ltoh16(wl_hc->id),
3208 hck_sw_id_to_str);
3209 }
3210
3211 }
3212
3213 #endif /* PARSE_DONGLE_HOST_EVENT */
3214
3215 void
dngl_host_event_process(dhd_pub_t * dhdp,bcm_dngl_event_t * event,bcm_dngl_event_msg_t * dngl_event,size_t pktlen)3216 dngl_host_event_process(dhd_pub_t *dhdp, bcm_dngl_event_t *event,
3217 bcm_dngl_event_msg_t *dngl_event, size_t pktlen)
3218 {
3219 uint8 *p = (uint8 *)(event + 1);
3220 uint16 type = ntoh16_ua((void *)&dngl_event->event_type);
3221 uint16 datalen = ntoh16_ua((void *)&dngl_event->datalen);
3222 uint16 version = ntoh16_ua((void *)&dngl_event->version);
3223
3224 DHD_EVENT(("VERSION:%d, EVENT TYPE:%d, DATALEN:%d\n", version, type, datalen));
3225 if (datalen > (pktlen - sizeof(bcm_dngl_event_t) + ETHER_TYPE_LEN)) {
3226 return;
3227 }
3228 if (version != BCM_DNGL_EVENT_MSG_VERSION) {
3229 DHD_ERROR(("%s:version mismatch:%d:%d\n", __FUNCTION__,
3230 version, BCM_DNGL_EVENT_MSG_VERSION));
3231 return;
3232 }
3233 switch (type) {
3234 case DNGL_E_SOCRAM_IND:
3235 {
3236 bcm_dngl_socramind_t *socramind_ptr = (bcm_dngl_socramind_t *)p;
3237 uint16 tag = ltoh32(socramind_ptr->tag);
3238 uint16 taglen = ltoh32(socramind_ptr->length);
3239 p = (uint8 *)socramind_ptr->value;
3240 DHD_EVENT(("Tag:%d Len:%d Datalen:%d\n", tag, taglen, datalen));
3241 switch (tag) {
3242 case SOCRAM_IND_ASSERT_TAG:
3243 {
3244 /*
3245 * The payload consists of -
3246 * null terminated function name padded till 32 bit boundary +
3247 * Line number - (32 bits)
3248 * Caller address (32 bits)
3249 */
3250 char *fnname = (char *)p;
3251 if (datalen < (ROUNDUP(strlen(fnname) + 1, sizeof(uint32)) +
3252 sizeof(uint32) * 2)) {
3253 DHD_ERROR(("Wrong length:%d\n", datalen));
3254 return;
3255 }
3256 DHD_EVENT(("ASSRT Function:%s ", p));
3257 p += ROUNDUP(strlen(p) + 1, sizeof(uint32));
3258 DHD_EVENT(("Line:%d ", *(uint32 *)p));
3259 p += sizeof(uint32);
3260 DHD_EVENT(("Caller Addr:0x%x\n", *(uint32 *)p));
3261 #ifdef PARSE_DONGLE_HOST_EVENT
3262 DHD_ERROR(("DONGLE_HCK_EVENT: SOCRAM_IND_ASSERT_TAG\n"));
3263 #endif /* PARSE_DONGLE_HOST_EVENT */
3264 break;
3265 }
3266 case SOCRAM_IND_TAG_HEALTH_CHECK:
3267 {
3268 bcm_dngl_healthcheck_t *dngl_hc = (bcm_dngl_healthcheck_t *)p;
3269 DHD_EVENT(("SOCRAM_IND_HEALTHCHECK_TAG:%d Len:%d datalen:%d\n",
3270 ltoh32(dngl_hc->top_module_tag),
3271 ltoh32(dngl_hc->top_module_len),
3272 datalen));
3273 if (DHD_EVENT_ON()) {
3274 prhex("HEALTHCHECK", p, MIN(ltoh32(dngl_hc->top_module_len)
3275 + BCM_XTLV_HDR_SIZE, datalen));
3276 }
3277 #ifdef DHD_LOG_DUMP
3278 memset(dhdp->health_chk_event_data, 0, HEALTH_CHK_BUF_SIZE);
3279 memcpy(dhdp->health_chk_event_data, p,
3280 MIN(ltoh32(dngl_hc->top_module_len),
3281 HEALTH_CHK_BUF_SIZE));
3282 #endif /* DHD_LOG_DUMP */
3283 p = (uint8 *)dngl_hc->value;
3284
3285 switch (ltoh32(dngl_hc->top_module_tag)) {
3286 case HEALTH_CHECK_TOP_LEVEL_MODULE_PCIEDEV_RTE:
3287 {
3288 bcm_dngl_pcie_hc_t *pcie_hc;
3289 pcie_hc = (bcm_dngl_pcie_hc_t *)p;
3290 BCM_REFERENCE(pcie_hc);
3291 if (ltoh32(dngl_hc->top_module_len) <
3292 sizeof(bcm_dngl_pcie_hc_t)) {
3293 DHD_ERROR(("Wrong length:%d\n",
3294 ltoh32(dngl_hc->top_module_len)));
3295 return;
3296 }
3297 DHD_EVENT(("%d:PCIE HC error:%d flag:0x%x,"
3298 " control:0x%x\n",
3299 ltoh32(pcie_hc->version),
3300 ltoh32(pcie_hc->pcie_err_ind_type),
3301 ltoh32(pcie_hc->pcie_flag),
3302 ltoh32(pcie_hc->pcie_control_reg)));
3303 #ifdef PARSE_DONGLE_HOST_EVENT
3304 dhd_print_dongle_hck_id(
3305 ltoh32(pcie_hc->pcie_err_ind_type),
3306 hck_pcie_module_to_str);
3307 #endif /* PARSE_DONGLE_HOST_EVENT */
3308 break;
3309 }
3310 #ifdef HCHK_COMMON_SW_EVENT
3311 case HCHK_SW_ENTITY_WL_PRIMARY:
3312 case HCHK_SW_ENTITY_WL_SECONDARY:
3313 {
3314 bcm_xtlv_t *wl_hc = (bcm_xtlv_t*)p;
3315
3316 if (ltoh32(dngl_hc->top_module_len) <
3317 sizeof(bcm_xtlv_t)) {
3318 DHD_ERROR(("WL SW HC Wrong length:%d\n",
3319 ltoh32(dngl_hc->top_module_len)));
3320 return;
3321 }
3322 BCM_REFERENCE(wl_hc);
3323 DHD_EVENT(("WL SW HC type %d len %d\n",
3324 ltoh16(wl_hc->id), ltoh16(wl_hc->len)));
3325
3326 #ifdef PARSE_DONGLE_HOST_EVENT
3327 dhd_parse_hck_common_sw_event(wl_hc);
3328 #endif /* PARSE_DONGLE_HOST_EVENT */
3329 break;
3330
3331 }
3332 #endif /* HCHK_COMMON_SW_EVENT */
3333 default:
3334 {
3335 DHD_ERROR(("%s:Unknown module TAG:%d\n",
3336 __FUNCTION__,
3337 ltoh32(dngl_hc->top_module_tag)));
3338 break;
3339 }
3340 }
3341 break;
3342 }
3343 default:
3344 DHD_ERROR(("%s:Unknown TAG\n", __FUNCTION__));
3345 if (p && DHD_EVENT_ON()) {
3346 prhex("SOCRAMIND", p, taglen);
3347 }
3348 break;
3349 }
3350 break;
3351 }
3352 default:
3353 DHD_ERROR(("%s:Unknown DNGL Event Type:%d\n", __FUNCTION__, type));
3354 if (p && DHD_EVENT_ON()) {
3355 prhex("SOCRAMIND", p, datalen);
3356 }
3357 break;
3358 }
3359 #ifndef BCMDBUS
3360 #ifdef DHD_FW_COREDUMP
3361 if (dhdp->memdump_enabled) {
3362 dhdp->memdump_type = DUMP_TYPE_DONGLE_HOST_EVENT;
3363 if (dhd_socram_dump(dhdp->bus)) {
3364 DHD_ERROR(("%s: socram dump failed\n", __FUNCTION__));
3365 }
3366 }
3367 #else
3368 dhd_dbg_send_urgent_evt(dhdp, p, datalen);
3369 #endif /* DHD_FW_COREDUMP */
3370 #endif /* !BCMDBUS */
3371 }
3372
3373 #endif /* DNGL_EVENT_SUPPORT */
3374
3375 /* Stub for now. Will become real function as soon as shim
3376 * is being integrated to Android, Linux etc.
3377 */
3378 int
wl_event_process_default(wl_event_msg_t * event,struct wl_evt_pport * evt_pport)3379 wl_event_process_default(wl_event_msg_t *event, struct wl_evt_pport *evt_pport)
3380 {
3381 return BCME_OK;
3382 }
3383
3384 int
wl_event_process(dhd_pub_t * dhd_pub,int * ifidx,void * pktdata,uint pktlen,void ** data_ptr,void * raw_event)3385 wl_event_process(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata,
3386 uint pktlen, void **data_ptr, void *raw_event)
3387 {
3388 wl_evt_pport_t evt_pport;
3389 wl_event_msg_t event;
3390 bcm_event_msg_u_t evu;
3391 int ret;
3392
3393 /* make sure it is a BRCM event pkt and record event data */
3394 ret = wl_host_event_get_data(pktdata, pktlen, &evu);
3395 if (ret != BCME_OK) {
3396 return ret;
3397 }
3398
3399 memcpy(&event, &evu.event, sizeof(wl_event_msg_t));
3400
3401 /* convert event from network order to host order */
3402 wl_event_to_host_order(&event);
3403
3404 /* record event params to evt_pport */
3405 evt_pport.dhd_pub = dhd_pub;
3406 evt_pport.ifidx = ifidx;
3407 evt_pport.pktdata = pktdata;
3408 evt_pport.data_ptr = data_ptr;
3409 evt_pport.raw_event = raw_event;
3410 evt_pport.data_len = pktlen;
3411
3412 ret = wl_event_process_default(&event, &evt_pport);
3413
3414 return ret;
3415 } /* wl_event_process */
3416
3417 /* Check whether packet is a BRCM event pkt. If it is, record event data. */
3418 int
wl_host_event_get_data(void * pktdata,uint pktlen,bcm_event_msg_u_t * evu)3419 wl_host_event_get_data(void *pktdata, uint pktlen, bcm_event_msg_u_t *evu)
3420 {
3421 int ret;
3422
3423 ret = is_wlc_event_frame(pktdata, pktlen, 0, evu);
3424 if (ret != BCME_OK) {
3425 DHD_ERROR(("%s: Invalid event frame, err = %d\n",
3426 __FUNCTION__, ret));
3427 }
3428
3429 return ret;
3430 }
3431
3432 int
wl_process_host_event(dhd_pub_t * dhd_pub,int * ifidx,void * pktdata,uint pktlen,wl_event_msg_t * event,void ** data_ptr,void * raw_event)3433 wl_process_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, uint pktlen,
3434 wl_event_msg_t *event, void **data_ptr, void *raw_event)
3435 {
3436 bcm_event_t *pvt_data = (bcm_event_t *)pktdata;
3437 bcm_event_msg_u_t evu;
3438 uint8 *event_data;
3439 uint32 type, status, datalen, reason;
3440 uint16 flags;
3441 uint evlen;
3442 int ret;
3443 uint16 usr_subtype;
3444 #ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
3445 dhd_if_t *ifp = NULL;
3446 #endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
3447
3448 ret = wl_host_event_get_data(pktdata, pktlen, &evu);
3449 if (ret != BCME_OK) {
3450 return ret;
3451 }
3452
3453 usr_subtype = ntoh16_ua((void *)&pvt_data->bcm_hdr.usr_subtype);
3454 switch (usr_subtype) {
3455 case BCMILCP_BCM_SUBTYPE_EVENT:
3456 memcpy(event, &evu.event, sizeof(wl_event_msg_t));
3457 *data_ptr = &pvt_data[1];
3458 break;
3459 case BCMILCP_BCM_SUBTYPE_DNGLEVENT:
3460 #ifdef DNGL_EVENT_SUPPORT
3461 /* If it is a DNGL event process it first */
3462 if (dngl_host_event(dhd_pub, pktdata, &evu.dngl_event, pktlen) == BCME_OK) {
3463 /*
3464 * Return error purposely to prevent DNGL event being processed
3465 * as BRCM event
3466 */
3467 return BCME_ERROR;
3468 }
3469 #endif /* DNGL_EVENT_SUPPORT */
3470 return BCME_NOTFOUND;
3471 default:
3472 return BCME_NOTFOUND;
3473 }
3474
3475 /* start wl_event_msg process */
3476 event_data = *data_ptr;
3477 type = ntoh32_ua((void *)&event->event_type);
3478 flags = ntoh16_ua((void *)&event->flags);
3479 status = ntoh32_ua((void *)&event->status);
3480 reason = ntoh32_ua((void *)&event->reason);
3481 datalen = ntoh32_ua((void *)&event->datalen);
3482 evlen = datalen + sizeof(bcm_event_t);
3483
3484 switch (type) {
3485 #ifdef PROP_TXSTATUS
3486 case WLC_E_FIFO_CREDIT_MAP:
3487 dhd_wlfc_enable(dhd_pub);
3488 dhd_wlfc_FIFOcreditmap_event(dhd_pub, event_data);
3489 WLFC_DBGMESG(("WLC_E_FIFO_CREDIT_MAP:(AC0,AC1,AC2,AC3),(BC_MC),(OTHER): "
3490 "(%d,%d,%d,%d),(%d),(%d)\n", event_data[0], event_data[1],
3491 event_data[2],
3492 event_data[3], event_data[4], event_data[5]));
3493 break;
3494
3495 case WLC_E_BCMC_CREDIT_SUPPORT:
3496 dhd_wlfc_BCMCCredit_support_event(dhd_pub);
3497 break;
3498 #ifdef LIMIT_BORROW
3499 case WLC_E_ALLOW_CREDIT_BORROW:
3500 dhd_wlfc_disable_credit_borrow_event(dhd_pub, event_data);
3501 break;
3502 #endif /* LIMIT_BORROW */
3503 #endif /* PROP_TXSTATUS */
3504
3505 case WLC_E_ULP:
3506 #ifdef DHD_ULP
3507 {
3508 wl_ulp_event_t *ulp_evt = (wl_ulp_event_t *)event_data;
3509
3510 /* Flush and disable console messages */
3511 if (ulp_evt->ulp_dongle_action == WL_ULP_DISABLE_CONSOLE) {
3512 #ifdef DHD_ULP_NOT_USED
3513 dhd_bus_ulp_disable_console(dhd_pub);
3514 #endif /* DHD_ULP_NOT_USED */
3515 }
3516 if (ulp_evt->ulp_dongle_action == WL_ULP_UCODE_DOWNLOAD) {
3517 dhd_bus_ucode_download(dhd_pub->bus);
3518 }
3519 }
3520 #endif /* DHD_ULP */
3521 break;
3522 case WLC_E_TDLS_PEER_EVENT:
3523 #if defined(WLTDLS) && defined(PCIE_FULL_DONGLE)
3524 {
3525 dhd_tdls_event_handler(dhd_pub, event);
3526 }
3527 #endif // endif
3528 break;
3529
3530 case WLC_E_IF:
3531 {
3532 struct wl_event_data_if *ifevent = (struct wl_event_data_if *)event_data;
3533
3534 /* Ignore the event if NOIF is set */
3535 if (ifevent->reserved & WLC_E_IF_FLAGS_BSSCFG_NOIF) {
3536 DHD_ERROR(("WLC_E_IF: NO_IF set, event Ignored\r\n"));
3537 return (BCME_UNSUPPORTED);
3538 }
3539 #ifdef PCIE_FULL_DONGLE
3540 dhd_update_interface_flow_info(dhd_pub, ifevent->ifidx,
3541 ifevent->opcode, ifevent->role);
3542 #endif // endif
3543 #ifdef PROP_TXSTATUS
3544 {
3545 uint8* ea = pvt_data->eth.ether_dhost;
3546 WLFC_DBGMESG(("WLC_E_IF: idx:%d, action:%s, iftype:%s, ["MACDBG"]\n"
3547 ifevent->ifidx,
3548 ((ifevent->opcode == WLC_E_IF_ADD) ? "ADD":"DEL"),
3549 ((ifevent->role == 0) ? "STA":"AP "),
3550 MAC2STRDBG(ea)));
3551 (void)ea;
3552
3553 if (ifevent->opcode == WLC_E_IF_CHANGE)
3554 dhd_wlfc_interface_event(dhd_pub,
3555 eWLFC_MAC_ENTRY_ACTION_UPDATE,
3556 ifevent->ifidx, ifevent->role, ea);
3557 else
3558 dhd_wlfc_interface_event(dhd_pub,
3559 ((ifevent->opcode == WLC_E_IF_ADD) ?
3560 eWLFC_MAC_ENTRY_ACTION_ADD : eWLFC_MAC_ENTRY_ACTION_DEL),
3561 ifevent->ifidx, ifevent->role, ea);
3562
3563 /* dhd already has created an interface by default, for 0 */
3564 if (ifevent->ifidx == 0)
3565 break;
3566 }
3567 #endif /* PROP_TXSTATUS */
3568
3569 if (ifevent->ifidx > 0 && ifevent->ifidx < DHD_MAX_IFS) {
3570 if (ifevent->opcode == WLC_E_IF_ADD) {
3571 if (dhd_event_ifadd(dhd_pub->info, ifevent, event->ifname,
3572 event->addr.octet)) {
3573
3574 DHD_ERROR(("%s: dhd_event_ifadd failed ifidx: %d %s\n",
3575 __FUNCTION__, ifevent->ifidx, event->ifname));
3576 return (BCME_ERROR);
3577 }
3578 } else if (ifevent->opcode == WLC_E_IF_DEL) {
3579 #ifdef PCIE_FULL_DONGLE
3580 /* Delete flowrings unconditionally for i/f delete */
3581 dhd_flow_rings_delete(dhd_pub, (uint8)dhd_ifname2idx(dhd_pub->info,
3582 event->ifname));
3583 #endif /* PCIE_FULL_DONGLE */
3584 dhd_event_ifdel(dhd_pub->info, ifevent, event->ifname,
3585 event->addr.octet);
3586 } else if (ifevent->opcode == WLC_E_IF_CHANGE) {
3587 #ifdef WL_CFG80211
3588 dhd_event_ifchange(dhd_pub->info, ifevent, event->ifname,
3589 event->addr.octet);
3590 #endif /* WL_CFG80211 */
3591 }
3592 } else {
3593 #if !defined(PROP_TXSTATUS) && !defined(PCIE_FULL_DONGLE) && defined(WL_CFG80211)
3594 DHD_INFO(("%s: Invalid ifidx %d for %s\n",
3595 __FUNCTION__, ifevent->ifidx, event->ifname));
3596 #endif /* !PROP_TXSTATUS && !PCIE_FULL_DONGLE && WL_CFG80211 */
3597 }
3598 /* send up the if event: btamp user needs it */
3599 *ifidx = dhd_ifname2idx(dhd_pub->info, event->ifname);
3600 /* push up to external supp/auth */
3601 dhd_event(dhd_pub->info, (char *)pvt_data, evlen, *ifidx);
3602 break;
3603 }
3604
3605 case WLC_E_NDIS_LINK:
3606 break;
3607 case WLC_E_PFN_NET_FOUND:
3608 case WLC_E_PFN_SCAN_ALLGONE: /* share with WLC_E_PFN_BSSID_NET_LOST */
3609 case WLC_E_PFN_NET_LOST:
3610 break;
3611 #if defined(PNO_SUPPORT)
3612 case WLC_E_PFN_BSSID_NET_FOUND:
3613 case WLC_E_PFN_BEST_BATCHING:
3614 dhd_pno_event_handler(dhd_pub, event, (void *)event_data);
3615 break;
3616 #endif // endif
3617 #if defined(RTT_SUPPORT)
3618 case WLC_E_PROXD:
3619 #ifndef WL_CFG80211
3620 dhd_rtt_event_handler(dhd_pub, event, (void *)event_data);
3621 #endif /* WL_CFG80211 */
3622 break;
3623 #endif /* RTT_SUPPORT */
3624 /* These are what external supplicant/authenticator wants */
3625 case WLC_E_ASSOC_IND:
3626 case WLC_E_AUTH_IND:
3627 case WLC_E_REASSOC_IND:
3628 dhd_findadd_sta(dhd_pub,
3629 dhd_ifname2idx(dhd_pub->info, event->ifname),
3630 &event->addr.octet);
3631 break;
3632 #ifndef BCMDBUS
3633 #if defined(DHD_FW_COREDUMP)
3634 case WLC_E_PSM_WATCHDOG:
3635 DHD_ERROR(("%s: WLC_E_PSM_WATCHDOG event received : \n", __FUNCTION__));
3636 if (dhd_socram_dump(dhd_pub->bus) != BCME_OK) {
3637 DHD_ERROR(("%s: socram dump ERROR : \n", __FUNCTION__));
3638 }
3639 break;
3640 #endif // endif
3641 #endif /* !BCMDBUS */
3642 case WLC_E_NATOE_NFCT:
3643 #ifdef WL_NATOE
3644 DHD_EVENT(("%s: WLC_E_NATOE_NFCT event received \n", __FUNCTION__));
3645 dhd_natoe_ct_event(dhd_pub, event_data);
3646 #endif /* WL_NATOE */
3647 break;
3648 #ifdef WL_NAN
3649 case WLC_E_SLOTTED_BSS_PEER_OP:
3650 DHD_EVENT(("%s: WLC_E_SLOTTED_BSS_PEER_OP event received for peer: "
3651 "" MACDBG ", status = %d\n",
3652 __FUNCTION__, MAC2STRDBG(event->addr.octet), status));
3653 if (status == WLC_E_STATUS_SLOTTED_PEER_ADD) {
3654 dhd_findadd_sta(dhd_pub, dhd_ifname2idx(dhd_pub->info,
3655 event->ifname), &event->addr.octet);
3656 } else if (status == WLC_E_STATUS_SLOTTED_PEER_DEL) {
3657 uint8 ifindex = (uint8)dhd_ifname2idx(dhd_pub->info, event->ifname);
3658 BCM_REFERENCE(ifindex);
3659 dhd_del_sta(dhd_pub, dhd_ifname2idx(dhd_pub->info,
3660 event->ifname), &event->addr.octet);
3661 #ifdef PCIE_FULL_DONGLE
3662 dhd_flow_rings_delete_for_peer(dhd_pub, ifindex,
3663 (char *)&event->addr.octet[0]);
3664 #endif // endif
3665 } else {
3666 DHD_ERROR(("%s: WLC_E_SLOTTED_BSS_PEER_OP: Status is not expected = %d\n",
3667 __FUNCTION__, status));
3668 }
3669 break;
3670 #endif /* WL_NAN */
3671 #ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
3672 case WLC_E_REASSOC:
3673 ifp = dhd_get_ifp(dhd_pub, event->ifidx);
3674
3675 if (!ifp)
3676 break;
3677
3678 /* Consider STA role only since roam is disabled on P2P GC.
3679 * Drop EAPOL M1 frame only if roam is done to same BSS.
3680 */
3681 if ((status == WLC_E_STATUS_SUCCESS) &&
3682 IS_STA_IFACE(ndev_to_wdev(ifp->net)) &&
3683 wl_cfg80211_is_event_from_connected_bssid(ifp->net, event, event->ifidx)) {
3684 ifp->recv_reassoc_evt = TRUE;
3685 }
3686 break;
3687 #endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
3688 #if defined(CSI_SUPPORT)
3689 case WLC_E_CSI:
3690 dhd_csi_event_handler(dhd_pub, event, (void *)event_data);
3691 break;
3692 #endif /* CSI_SUPPORT */
3693 case WLC_E_LINK:
3694 #ifdef PCIE_FULL_DONGLE
3695 if (dhd_update_interface_link_status(dhd_pub, (uint8)dhd_ifname2idx(dhd_pub->info,
3696 event->ifname), (uint8)flags) != BCME_OK) {
3697 DHD_ERROR(("%s: dhd_update_interface_link_status Failed.\n",
3698 __FUNCTION__));
3699 break;
3700 }
3701 if (!flags) {
3702 DHD_ERROR(("%s: Deleting all STA from assoc list and flowrings.\n",
3703 __FUNCTION__));
3704 /* Delete all sta and flowrings */
3705 dhd_del_all_sta(dhd_pub, dhd_ifname2idx(dhd_pub->info, event->ifname));
3706 dhd_flow_rings_delete(dhd_pub, (uint8)dhd_ifname2idx(dhd_pub->info,
3707 event->ifname));
3708 }
3709 /* fall through */
3710 #endif /* PCIE_FULL_DONGLE */
3711 case WLC_E_DEAUTH:
3712 case WLC_E_DEAUTH_IND:
3713 case WLC_E_DISASSOC:
3714 case WLC_E_DISASSOC_IND:
3715 #ifdef PCIE_FULL_DONGLE
3716 if (type != WLC_E_LINK) {
3717 uint8 ifindex = (uint8)dhd_ifname2idx(dhd_pub->info, event->ifname);
3718 uint8 role = dhd_flow_rings_ifindex2role(dhd_pub, ifindex);
3719 uint8 del_sta = TRUE;
3720 #ifdef WL_CFG80211
3721 if (role == WLC_E_IF_ROLE_STA &&
3722 !wl_cfg80211_is_roam_offload(dhd_idx2net(dhd_pub, ifindex)) &&
3723 !wl_cfg80211_is_event_from_connected_bssid(
3724 dhd_idx2net(dhd_pub, ifindex), event, *ifidx)) {
3725 del_sta = FALSE;
3726 }
3727 #endif /* WL_CFG80211 */
3728 DHD_EVENT(("%s: Link event %d, flags %x, status %x, role %d, del_sta %d\n",
3729 __FUNCTION__, type, flags, status, role, del_sta));
3730
3731 if (del_sta) {
3732 DHD_EVENT(("%s: Deleting STA " MACDBG "\n",
3733 __FUNCTION__, MAC2STRDBG(event->addr.octet)));
3734
3735 dhd_del_sta(dhd_pub, dhd_ifname2idx(dhd_pub->info,
3736 event->ifname), &event->addr.octet);
3737 /* Delete all flowrings for STA and P2P Client */
3738 if (role == WLC_E_IF_ROLE_STA || role == WLC_E_IF_ROLE_P2P_CLIENT) {
3739 dhd_flow_rings_delete(dhd_pub, ifindex);
3740 } else {
3741 dhd_flow_rings_delete_for_peer(dhd_pub, ifindex,
3742 (char *)&event->addr.octet[0]);
3743 }
3744 }
3745 }
3746 #endif /* PCIE_FULL_DONGLE */
3747 #ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
3748 /* fall through */
3749 ifp = dhd_get_ifp(dhd_pub, event->ifidx);
3750 if (ifp) {
3751 ifp->recv_reassoc_evt = FALSE;
3752 ifp->post_roam_evt = FALSE;
3753 }
3754 #endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
3755 /* fall through */
3756 default:
3757 *ifidx = dhd_ifname2idx(dhd_pub->info, event->ifname);
3758 #ifdef DHD_UPDATE_INTF_MAC
3759 if ((WLC_E_LINK==type)&&(WLC_EVENT_MSG_LINK&flags)) {
3760 dhd_event_ifchange(dhd_pub->info,
3761 (struct wl_event_data_if *)event,
3762 event->ifname,
3763 event->addr.octet);
3764 }
3765 #endif /* DHD_UPDATE_INTF_MAC */
3766 /* push up to external supp/auth */
3767 dhd_event(dhd_pub->info, (char *)pvt_data, evlen, *ifidx);
3768 DHD_TRACE(("%s: MAC event %d, flags %x, status %x\n",
3769 __FUNCTION__, type, flags, status));
3770 BCM_REFERENCE(flags);
3771 BCM_REFERENCE(status);
3772 BCM_REFERENCE(reason);
3773
3774 break;
3775 }
3776 #if defined(STBAP)
3777 /* For routers, EAPD will be working on these events.
3778 * Overwrite interface name to that event is pushed
3779 * to host with its registered interface name
3780 */
3781 memcpy(pvt_data->event.ifname, dhd_ifname(dhd_pub, *ifidx), IFNAMSIZ);
3782 #endif // endif
3783
3784 #ifdef DHD_STATUS_LOGGING
3785 if (dhd_pub->statlog) {
3786 dhd_statlog_process_event(dhd_pub, type, *ifidx,
3787 status, reason, flags);
3788 }
3789 #endif /* DHD_STATUS_LOGGING */
3790
3791 #ifdef SHOW_EVENTS
3792 if (DHD_FWLOG_ON() || DHD_EVENT_ON()) {
3793 wl_show_host_event(dhd_pub, event,
3794 (void *)event_data, raw_event, dhd_pub->enable_log);
3795 }
3796 #endif /* SHOW_EVENTS */
3797
3798 return (BCME_OK);
3799 } /* wl_process_host_event */
3800
3801 int
wl_host_event(dhd_pub_t * dhd_pub,int * ifidx,void * pktdata,uint pktlen,wl_event_msg_t * event,void ** data_ptr,void * raw_event)3802 wl_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, uint pktlen,
3803 wl_event_msg_t *event, void **data_ptr, void *raw_event)
3804 {
3805 return wl_process_host_event(dhd_pub, ifidx, pktdata, pktlen, event, data_ptr,
3806 raw_event);
3807 }
3808
3809 void
dhd_print_buf(void * pbuf,int len,int bytes_per_line)3810 dhd_print_buf(void *pbuf, int len, int bytes_per_line)
3811 {
3812 #ifdef DHD_DEBUG
3813 int i, j = 0;
3814 unsigned char *buf = pbuf;
3815
3816 if (bytes_per_line == 0) {
3817 bytes_per_line = len;
3818 }
3819
3820 for (i = 0; i < len; i++) {
3821 printf("%2.2x", *buf++);
3822 j++;
3823 if (j == bytes_per_line) {
3824 printf("\n");
3825 j = 0;
3826 } else {
3827 printf(":");
3828 }
3829 }
3830 printf("\n");
3831 #endif /* DHD_DEBUG */
3832 }
3833 #ifndef strtoul
3834 #define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
3835 #endif // endif
3836
3837 /* Convert user's input in hex pattern to byte-size mask */
3838 int
wl_pattern_atoh(char * src,char * dst)3839 wl_pattern_atoh(char *src, char *dst)
3840 {
3841 int i;
3842 if (strncmp(src, "0x", 2) != 0 &&
3843 strncmp(src, "0X", 2) != 0) {
3844 DHD_ERROR(("Mask invalid format. Needs to start with 0x\n"));
3845 return -1;
3846 }
3847 src = src + 2; /* Skip past 0x */
3848 if (strlen(src) % 2 != 0) {
3849 DHD_ERROR(("Mask invalid format. Needs to be of even length\n"));
3850 return -1;
3851 }
3852 for (i = 0; *src != '\0'; i++) {
3853 char num[3];
3854 bcm_strncpy_s(num, sizeof(num), src, 2);
3855 num[2] = '\0';
3856 dst[i] = (uint8)strtoul(num, NULL, 16);
3857 src += 2;
3858 }
3859 return i;
3860 }
3861
3862 #if defined(PKT_FILTER_SUPPORT)
3863 int
pattern_atoh_len(char * src,char * dst,int len)3864 pattern_atoh_len(char *src, char *dst, int len)
3865 {
3866 int i;
3867 if (strncmp(src, "0x", HD_PREFIX_SIZE) != 0 &&
3868 strncmp(src, "0X", HD_PREFIX_SIZE) != 0) {
3869 DHD_ERROR(("Mask invalid format. Needs to start with 0x\n"));
3870 return -1;
3871 }
3872 src = src + HD_PREFIX_SIZE; /* Skip past 0x */
3873 if (strlen(src) % HD_BYTE_SIZE != 0) {
3874 DHD_ERROR(("Mask invalid format. Needs to be of even length\n"));
3875 return -1;
3876 }
3877 for (i = 0; *src != '\0'; i++) {
3878 char num[HD_BYTE_SIZE + 1];
3879
3880 if (i > len - 1) {
3881 DHD_ERROR(("pattern not in range, idx: %d len: %d\n", i, len));
3882 return -1;
3883 }
3884 bcm_strncpy_s(num, sizeof(num), src, HD_BYTE_SIZE);
3885 num[HD_BYTE_SIZE] = '\0';
3886 dst[i] = (uint8)strtoul(num, NULL, 16);
3887 src += HD_BYTE_SIZE;
3888 }
3889 return i;
3890 }
3891 #endif // endif
3892
3893 #ifdef PKT_FILTER_SUPPORT
3894 void
dhd_pktfilter_offload_enable(dhd_pub_t * dhd,char * arg,int enable,int master_mode)3895 dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode)
3896 {
3897 char *argv[8];
3898 int i = 0;
3899 const char *str;
3900 int buf_len;
3901 int str_len;
3902 char *arg_save = 0, *arg_org = 0;
3903 int rc;
3904 char buf[32] = {0};
3905 wl_pkt_filter_enable_t enable_parm;
3906 wl_pkt_filter_enable_t * pkt_filterp;
3907
3908 if (!arg)
3909 return;
3910
3911 if (!(arg_save = MALLOC(dhd->osh, strlen(arg) + 1))) {
3912 DHD_ERROR(("%s: malloc failed\n", __FUNCTION__));
3913 goto fail;
3914 }
3915 arg_org = arg_save;
3916 memcpy(arg_save, arg, strlen(arg) + 1);
3917
3918 argv[i] = bcmstrtok(&arg_save, " ", 0);
3919
3920 i = 0;
3921 if (argv[i] == NULL) {
3922 DHD_ERROR(("No args provided\n"));
3923 goto fail;
3924 }
3925
3926 str = "pkt_filter_enable";
3927 str_len = strlen(str);
3928 bcm_strncpy_s(buf, sizeof(buf) - 1, str, sizeof(buf) - 1);
3929 buf[ sizeof(buf) - 1 ] = '\0';
3930 buf_len = str_len + 1;
3931
3932 pkt_filterp = (wl_pkt_filter_enable_t *)(buf + str_len + 1);
3933
3934 /* Parse packet filter id. */
3935 enable_parm.id = htod32(strtoul(argv[i], NULL, 0));
3936 if (dhd_conf_del_pkt_filter(dhd, enable_parm.id))
3937 goto fail;
3938
3939 /* Parse enable/disable value. */
3940 enable_parm.enable = htod32(enable);
3941
3942 buf_len += sizeof(enable_parm);
3943 memcpy((char *)pkt_filterp,
3944 &enable_parm,
3945 sizeof(enable_parm));
3946
3947 /* Enable/disable the specified filter. */
3948 rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
3949 rc = rc >= 0 ? 0 : rc;
3950 if (rc) {
3951 DHD_ERROR(("%s: failed to %s pktfilter %s, retcode = %d\n",
3952 __FUNCTION__, enable?"enable":"disable", arg, rc));
3953 dhd_set_packet_filter(dhd);
3954 rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
3955 rc = rc >= 0 ? 0 : rc;
3956 if (rc) {
3957 DHD_TRACE_HW4(("%s: 2nd retry failed to add pktfilter %s, retcode = %d\n",
3958 __FUNCTION__, arg, rc));
3959 } else {
3960 DHD_TRACE_HW4(("%s: 2nd retry successfully added pktfilter %s\n",
3961 __FUNCTION__, arg));
3962 }
3963 }
3964 else
3965 DHD_TRACE(("%s: successfully %s pktfilter %s\n",
3966 __FUNCTION__, enable?"enable":"disable", arg));
3967
3968 /* Contorl the master mode */
3969 rc = dhd_wl_ioctl_set_intiovar(dhd, "pkt_filter_mode",
3970 master_mode, WLC_SET_VAR, TRUE, 0);
3971 rc = rc >= 0 ? 0 : rc;
3972 if (rc)
3973 DHD_TRACE(("%s: failed to set pkt_filter_mode %d, retcode = %d\n",
3974 __FUNCTION__, master_mode, rc));
3975
3976 fail:
3977 if (arg_org)
3978 MFREE(dhd->osh, arg_org, strlen(arg) + 1);
3979 }
3980
3981 /* Packet filter section: extended filters have named offsets, add table here */
3982 typedef struct {
3983 char *name;
3984 uint16 base;
3985 } wl_pfbase_t;
3986
3987 static wl_pfbase_t basenames[] = { WL_PKT_FILTER_BASE_NAMES };
3988
3989 static int
wl_pkt_filter_base_parse(char * name)3990 wl_pkt_filter_base_parse(char *name)
3991 {
3992 uint i;
3993 char *bname, *uname;
3994
3995 for (i = 0; i < ARRAYSIZE(basenames); i++) {
3996 bname = basenames[i].name;
3997 for (uname = name; *uname; bname++, uname++) {
3998 if (*bname != bcm_toupper(*uname)) {
3999 break;
4000 }
4001 }
4002 if (!*uname && !*bname) {
4003 break;
4004 }
4005 }
4006
4007 if (i < ARRAYSIZE(basenames)) {
4008 return basenames[i].base;
4009 } else {
4010 return -1;
4011 }
4012 }
4013
4014 void
dhd_pktfilter_offload_set(dhd_pub_t * dhd,char * arg)4015 dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg)
4016 {
4017 const char *str;
4018 wl_pkt_filter_t pkt_filter;
4019 wl_pkt_filter_t *pkt_filterp;
4020 int buf_len;
4021 int str_len;
4022 int rc = -1;
4023 uint32 mask_size;
4024 uint32 pattern_size;
4025 char *argv[MAXPKT_ARG] = {0}, * buf = 0;
4026 int i = 0;
4027 char *arg_save = 0, *arg_org = 0;
4028
4029 if (!arg)
4030 return;
4031
4032 if (!(arg_save = MALLOC(dhd->osh, strlen(arg) + 1))) {
4033 DHD_ERROR(("%s: malloc failed\n", __FUNCTION__));
4034 goto fail;
4035 }
4036
4037 arg_org = arg_save;
4038
4039 if (!(buf = MALLOC(dhd->osh, MAX_PKTFLT_BUF_SIZE))) {
4040 DHD_ERROR(("%s: malloc failed\n", __FUNCTION__));
4041 goto fail;
4042 }
4043
4044 memset(buf, 0, MAX_PKTFLT_BUF_SIZE);
4045 memcpy(arg_save, arg, strlen(arg) + 1);
4046
4047 if (strlen(arg) > MAX_PKTFLT_BUF_SIZE) {
4048 DHD_ERROR(("Not enough buffer %d < %d\n", (int)strlen(arg), (int)sizeof(buf)));
4049 goto fail;
4050 }
4051
4052 argv[i] = bcmstrtok(&arg_save, " ", 0);
4053 while (argv[i++]) {
4054 if (i >= MAXPKT_ARG) {
4055 DHD_ERROR(("Invalid args provided\n"));
4056 goto fail;
4057 }
4058 argv[i] = bcmstrtok(&arg_save, " ", 0);
4059 }
4060
4061 i = 0;
4062 if (argv[i] == NULL) {
4063 DHD_ERROR(("No args provided\n"));
4064 goto fail;
4065 }
4066
4067 str = "pkt_filter_add";
4068 str_len = strlen(str);
4069 bcm_strncpy_s(buf, MAX_PKTFLT_BUF_SIZE, str, str_len);
4070 buf[ str_len ] = '\0';
4071 buf_len = str_len + 1;
4072
4073 pkt_filterp = (wl_pkt_filter_t *) (buf + str_len + 1);
4074
4075 /* Parse packet filter id. */
4076 pkt_filter.id = htod32(strtoul(argv[i], NULL, 0));
4077
4078 if (argv[++i] == NULL) {
4079 DHD_ERROR(("Polarity not provided\n"));
4080 goto fail;
4081 }
4082
4083 /* Parse filter polarity. */
4084 pkt_filter.negate_match = htod32(strtoul(argv[i], NULL, 0));
4085
4086 if (argv[++i] == NULL) {
4087 DHD_ERROR(("Filter type not provided\n"));
4088 goto fail;
4089 }
4090
4091 /* Parse filter type. */
4092 pkt_filter.type = htod32(strtoul(argv[i], NULL, 0));
4093
4094 if ((pkt_filter.type == 0) || (pkt_filter.type == 1)) {
4095 if (argv[++i] == NULL) {
4096 DHD_ERROR(("Offset not provided\n"));
4097 goto fail;
4098 }
4099
4100 /* Parse pattern filter offset. */
4101 pkt_filter.u.pattern.offset = htod32(strtoul(argv[i], NULL, 0));
4102
4103 if (argv[++i] == NULL) {
4104 DHD_ERROR(("Bitmask not provided\n"));
4105 goto fail;
4106 }
4107
4108 /* Parse pattern filter mask. */
4109 rc = wl_pattern_atoh(argv[i],
4110 (char *) pkt_filterp->u.pattern.mask_and_pattern);
4111
4112 if (rc == -1) {
4113 DHD_ERROR(("Rejecting: %s\n", argv[i]));
4114 goto fail;
4115 }
4116 mask_size = htod32(rc);
4117 if (argv[++i] == NULL) {
4118 DHD_ERROR(("Pattern not provided\n"));
4119 goto fail;
4120 }
4121
4122 /* Parse pattern filter pattern. */
4123 rc = wl_pattern_atoh(argv[i],
4124 (char *) &pkt_filterp->u.pattern.mask_and_pattern[mask_size]);
4125
4126 if (rc == -1) {
4127 DHD_ERROR(("Rejecting: %s\n", argv[i]));
4128 goto fail;
4129 }
4130 pattern_size = htod32(rc);
4131 if (mask_size != pattern_size) {
4132 DHD_ERROR(("Mask and pattern not the same size\n"));
4133 goto fail;
4134 }
4135
4136 pkt_filter.u.pattern.size_bytes = mask_size;
4137 buf_len += WL_PKT_FILTER_FIXED_LEN;
4138 buf_len += (WL_PKT_FILTER_PATTERN_FIXED_LEN + 2 * mask_size);
4139
4140 /* Keep-alive attributes are set in local variable (keep_alive_pkt), and
4141 * then memcpy'ed into buffer (keep_alive_pktp) since there is no
4142 * guarantee that the buffer is properly aligned.
4143 */
4144 memcpy((char *)pkt_filterp,
4145 &pkt_filter,
4146 WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_FIXED_LEN);
4147 } else if ((pkt_filter.type == 2) || (pkt_filter.type == 6)) {
4148 int list_cnt = 0;
4149 char *endptr = NULL;
4150 wl_pkt_filter_pattern_listel_t *pf_el =
4151 (wl_pkt_filter_pattern_listel_t *)&pkt_filterp->u.patlist.patterns[0];
4152
4153 while (argv[++i] != NULL) {
4154 /* Check valid buffer size. */
4155 if ((buf_len + MAX_PKTFLT_FIXED_BUF_SIZE) > MAX_PKTFLT_BUF_SIZE) {
4156 DHD_ERROR(("buffer over length MAX_PKTFLT_FIXED_BUF_SIZE\n"));
4157 goto fail;
4158 }
4159
4160 /* Parse pattern filter base and offset. */
4161 if (bcm_isdigit(*argv[i])) {
4162 /* Numeric base */
4163 rc = strtoul(argv[i], &endptr, 0);
4164 } else {
4165 endptr = strchr(argv[i], ':');
4166 if (endptr) {
4167 *endptr = '\0';
4168 rc = wl_pkt_filter_base_parse(argv[i]);
4169 if (rc == -1) {
4170 printf("Invalid base %s\n", argv[i]);
4171 goto fail;
4172 }
4173 *endptr = ':';
4174 }
4175 }
4176
4177 if (endptr == NULL) {
4178 printf("Invalid [base:]offset format: %s\n", argv[i]);
4179 goto fail;
4180 }
4181
4182 if (*endptr == ':') {
4183 pf_el->base_offs = htod16(rc);
4184 rc = strtoul(endptr + 1, &endptr, 0);
4185 } else {
4186 /* Must have had a numeric offset only */
4187 pf_el->base_offs = htod16(0);
4188 }
4189
4190 if (*endptr) {
4191 printf("Invalid [base:]offset format: %s\n", argv[i]);
4192 goto fail;
4193 }
4194 if (rc > 0x0000FFFF) {
4195 printf("Offset too large\n");
4196 goto fail;
4197 }
4198 pf_el->rel_offs = htod16(rc);
4199
4200 /* Clear match_flag (may be set in parsing which follows) */
4201 pf_el->match_flags = htod16(0);
4202
4203 /* Parse pattern filter mask and pattern directly into ioctl buffer */
4204 if (argv[++i] == NULL) {
4205 printf("Bitmask not provided\n");
4206 goto fail;
4207 }
4208 rc = wl_pattern_atoh(argv[i], (char*)pf_el->mask_and_data);
4209 if ((rc == -1) || (rc > MAX_PKTFLT_FIXED_PATTERN_SIZE)) {
4210 printf("Rejecting: %s\n", argv[i]);
4211 goto fail;
4212 }
4213 mask_size = htod16(rc);
4214
4215 if (argv[++i] == NULL) {
4216 printf("Pattern not provided\n");
4217 goto fail;
4218 }
4219
4220 if (*argv[i] == '!') {
4221 pf_el->match_flags =
4222 htod16(WL_PKT_FILTER_MFLAG_NEG);
4223 (argv[i])++;
4224 }
4225 if (*argv[i] == '\0') {
4226 printf("Pattern not provided\n");
4227 goto fail;
4228 }
4229 rc = wl_pattern_atoh(argv[i], (char*)&pf_el->mask_and_data[rc]);
4230 if ((rc == -1) || (rc > MAX_PKTFLT_FIXED_PATTERN_SIZE)) {
4231 printf("Rejecting: %s\n", argv[i]);
4232 goto fail;
4233 }
4234 pattern_size = htod16(rc);
4235
4236 if (mask_size != pattern_size) {
4237 printf("Mask and pattern not the same size\n");
4238 goto fail;
4239 }
4240
4241 pf_el->size_bytes = mask_size;
4242
4243 /* Account for the size of this pattern element */
4244 buf_len += WL_PKT_FILTER_PATTERN_LISTEL_FIXED_LEN + 2 * rc;
4245
4246 /* Move to next element location in ioctl buffer */
4247 pf_el = (wl_pkt_filter_pattern_listel_t*)
4248 ((uint8*)pf_el + WL_PKT_FILTER_PATTERN_LISTEL_FIXED_LEN + 2 * rc);
4249
4250 /* Count list element */
4251 list_cnt++;
4252 }
4253
4254 /* Account for initial fixed size, and copy initial fixed fields */
4255 buf_len += WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_LIST_FIXED_LEN;
4256
4257 if (buf_len > MAX_PKTFLT_BUF_SIZE) {
4258 DHD_ERROR(("buffer over length MAX_PKTFLT_BUF_SIZE\n"));
4259 goto fail;
4260 }
4261 /* Update list count and total size */
4262 pkt_filter.u.patlist.list_cnt = list_cnt;
4263 pkt_filter.u.patlist.PAD1[0] = 0;
4264 pkt_filter.u.patlist.totsize = buf + buf_len - (char*)pkt_filterp;
4265 pkt_filter.u.patlist.totsize -= WL_PKT_FILTER_FIXED_LEN;
4266
4267 memcpy((char *)pkt_filterp, &pkt_filter,
4268 WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_LIST_FIXED_LEN);
4269 } else {
4270 DHD_ERROR(("Invalid filter type %d\n", pkt_filter.type));
4271 goto fail;
4272 }
4273
4274 rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
4275 rc = rc >= 0 ? 0 : rc;
4276
4277 if (rc)
4278 DHD_ERROR(("%s: failed to add pktfilter %s, retcode = %d\n",
4279 __FUNCTION__, arg, rc));
4280 else
4281 DHD_TRACE(("%s: successfully added pktfilter %s\n",
4282 __FUNCTION__, arg));
4283
4284 fail:
4285 if (arg_org)
4286 MFREE(dhd->osh, arg_org, strlen(arg) + 1);
4287
4288 if (buf)
4289 MFREE(dhd->osh, buf, MAX_PKTFLT_BUF_SIZE);
4290 }
4291
4292 void
dhd_pktfilter_offload_delete(dhd_pub_t * dhd,int id)4293 dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id)
4294 {
4295 int ret;
4296
4297 ret = dhd_wl_ioctl_set_intiovar(dhd, "pkt_filter_delete",
4298 id, WLC_SET_VAR, TRUE, 0);
4299 if (ret < 0) {
4300 DHD_ERROR(("%s: Failed to delete filter ID:%d, ret=%d\n",
4301 __FUNCTION__, id, ret));
4302 }
4303 else
4304 DHD_TRACE(("%s: successfully deleted pktfilter %d\n",
4305 __FUNCTION__, id));
4306 }
4307 #endif /* PKT_FILTER_SUPPORT */
4308
4309 /* ========================== */
4310 /* ==== ARP OFFLOAD SUPPORT = */
4311 /* ========================== */
4312 #ifdef ARP_OFFLOAD_SUPPORT
4313 void
dhd_arp_offload_set(dhd_pub_t * dhd,int arp_mode)4314 dhd_arp_offload_set(dhd_pub_t * dhd, int arp_mode)
4315 {
4316 int retcode;
4317
4318 retcode = dhd_wl_ioctl_set_intiovar(dhd, "arp_ol",
4319 arp_mode, WLC_SET_VAR, TRUE, 0);
4320
4321 retcode = retcode >= 0 ? 0 : retcode;
4322 if (retcode)
4323 DHD_ERROR(("%s: failed to set ARP offload mode to 0x%x, retcode = %d\n",
4324 __FUNCTION__, arp_mode, retcode));
4325 else
4326 DHD_ARPOE(("%s: successfully set ARP offload mode to 0x%x\n",
4327 __FUNCTION__, arp_mode));
4328 }
4329
4330 void
dhd_arp_offload_enable(dhd_pub_t * dhd,int arp_enable)4331 dhd_arp_offload_enable(dhd_pub_t * dhd, int arp_enable)
4332 {
4333 int retcode;
4334 #ifdef WL_CFG80211
4335 /* Do not enable arp offload in case of non-STA interfaces active */
4336 if (arp_enable &&
4337 (wl_cfg80211_check_vif_in_use(dhd_linux_get_primary_netdev(dhd)))) {
4338 DHD_TRACE(("%s: Virtual interfaces active, ignore arp offload request \n",
4339 __FUNCTION__));
4340 return;
4341 }
4342 #endif /* WL_CFG80211 */
4343 retcode = dhd_wl_ioctl_set_intiovar(dhd, "arpoe",
4344 arp_enable, WLC_SET_VAR, TRUE, 0);
4345
4346 retcode = retcode >= 0 ? 0 : retcode;
4347 if (retcode)
4348 DHD_ERROR(("%s: failed to enabe ARP offload to %d, retcode = %d\n",
4349 __FUNCTION__, arp_enable, retcode));
4350 else
4351 #ifdef DHD_LOG_DUMP
4352 DHD_LOG_MEM(("%s: successfully enabed ARP offload to %d\n",
4353 __FUNCTION__, arp_enable));
4354 #else
4355 DHD_ARPOE(("%s: successfully enabed ARP offload to %d\n",
4356 __FUNCTION__, arp_enable));
4357 #endif /* DHD_LOG_DUMP */
4358 if (arp_enable) {
4359 uint32 version;
4360 retcode = dhd_wl_ioctl_get_intiovar(dhd, "arp_version",
4361 &version, WLC_GET_VAR, FALSE, 0);
4362 if (retcode) {
4363 DHD_INFO(("%s: fail to get version (maybe version 1:retcode = %d\n",
4364 __FUNCTION__, retcode));
4365 dhd->arp_version = 1;
4366 }
4367 else {
4368 DHD_INFO(("%s: ARP Version= %x\n", __FUNCTION__, version));
4369 dhd->arp_version = version;
4370 }
4371 }
4372 }
4373
4374 void
dhd_aoe_arp_clr(dhd_pub_t * dhd,int idx)4375 dhd_aoe_arp_clr(dhd_pub_t *dhd, int idx)
4376 {
4377 int ret = 0;
4378
4379 if (dhd == NULL) return;
4380 if (dhd->arp_version == 1)
4381 idx = 0;
4382
4383 ret = dhd_iovar(dhd, idx, "arp_table_clear", NULL, 0, NULL, 0, TRUE);
4384 if (ret < 0)
4385 DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
4386 else {
4387 #ifdef DHD_LOG_DUMP
4388 DHD_LOG_MEM(("%s: ARP table clear\n", __FUNCTION__));
4389 #else
4390 DHD_TRACE(("%s: ARP table clear\n", __FUNCTION__));
4391 #endif /* DHD_LOG_DUMP */
4392 }
4393 /* mac address isn't cleared here but it will be cleared after dongle off */
4394 dhd->hmac_updated = 0;
4395 }
4396
4397 void
dhd_aoe_hostip_clr(dhd_pub_t * dhd,int idx)4398 dhd_aoe_hostip_clr(dhd_pub_t *dhd, int idx)
4399 {
4400 int ret = 0;
4401
4402 if (dhd == NULL) return;
4403 if (dhd->arp_version == 1)
4404 idx = 0;
4405
4406 ret = dhd_iovar(dhd, idx, "arp_hostip_clear", NULL, 0, NULL, 0, TRUE);
4407 if (ret < 0)
4408 DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
4409 else {
4410 #ifdef DHD_LOG_DUMP
4411 DHD_LOG_MEM(("%s: ARP host ip clear\n", __FUNCTION__));
4412 #else
4413 DHD_TRACE(("%s: ARP host ip clear\n", __FUNCTION__));
4414 #endif /* DHD_LOG_DUMP */
4415 }
4416 }
4417
4418 void
dhd_arp_offload_add_ip(dhd_pub_t * dhd,uint32 ipaddr,int idx)4419 dhd_arp_offload_add_ip(dhd_pub_t *dhd, uint32 ipaddr, int idx)
4420 {
4421 int ret;
4422
4423 if (dhd == NULL) return;
4424 if (dhd->arp_version == 1)
4425 idx = 0;
4426
4427 ret = dhd_iovar(dhd, idx, "arp_hostip", (char *)&ipaddr, sizeof(ipaddr),
4428 NULL, 0, TRUE);
4429 if (ret < 0)
4430 DHD_ERROR(("%s: ARP ip addr add failed, ret = %d\n", __FUNCTION__, ret));
4431 else {
4432 /* mac address is updated in the dongle */
4433 dhd->hmac_updated = 1;
4434 #ifdef DHD_LOG_DUMP
4435 DHD_LOG_MEM(("%s: ARP ip addr entry added \n", __FUNCTION__));
4436 #else
4437 DHD_ARPOE(("%s: ARP ip addr entry added \n", __FUNCTION__));
4438 #endif /* DHD_LOG_DUMP */
4439 }
4440 }
4441
4442 int
dhd_arp_get_arp_hostip_table(dhd_pub_t * dhd,void * buf,int buflen,int idx)4443 dhd_arp_get_arp_hostip_table(dhd_pub_t *dhd, void *buf, int buflen, int idx)
4444 {
4445 int ret, i;
4446 uint32 *ptr32 = buf;
4447 bool clr_bottom = FALSE;
4448
4449 if (!buf)
4450 return -1;
4451 if (dhd == NULL) return -1;
4452 if (dhd->arp_version == 1)
4453 idx = 0;
4454
4455 ret = dhd_iovar(dhd, idx, "arp_hostip", NULL, 0, (char *)buf, buflen,
4456 FALSE);
4457 if (ret) {
4458 DHD_ERROR(("%s: ioctl WLC_GET_VAR error %d\n",
4459 __FUNCTION__, ret));
4460
4461 return -1;
4462 }
4463
4464 /* clean up the buf, ascii reminder */
4465 for (i = 0; i < MAX_IPV4_ENTRIES; i++) {
4466 if (!clr_bottom) {
4467 if (*ptr32 == 0)
4468 clr_bottom = TRUE;
4469 } else {
4470 *ptr32 = 0;
4471 }
4472 ptr32++;
4473 }
4474
4475 return 0;
4476 }
4477 #endif /* ARP_OFFLOAD_SUPPORT */
4478
4479 /*
4480 * Neighbor Discovery Offload: enable NDO feature
4481 * Called by ipv6 event handler when interface comes up/goes down
4482 */
4483 int
dhd_ndo_enable(dhd_pub_t * dhd,int ndo_enable)4484 dhd_ndo_enable(dhd_pub_t * dhd, int ndo_enable)
4485 {
4486 int retcode;
4487
4488 if (dhd == NULL)
4489 return -1;
4490
4491 #if defined(WL_CFG80211) && defined(WL_NAN)
4492 if (wl_cfgnan_is_dp_active(dhd_linux_get_primary_netdev(dhd))) {
4493 /* If nan dp is active, skip NDO */
4494 DHD_INFO(("Active NAN DP, skip NDO\n"));
4495 return 0;
4496 }
4497 #endif /* WL_CFG80211 && WL_NAN */
4498 #ifdef WL_CFG80211
4499 if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
4500 /* NDO disable on STA+SOFTAP mode */
4501 ndo_enable = FALSE;
4502 }
4503 #endif /* WL_CFG80211 */
4504 retcode = dhd_wl_ioctl_set_intiovar(dhd, "ndoe",
4505 ndo_enable, WLC_SET_VAR, TRUE, 0);
4506 if (retcode)
4507 DHD_ERROR(("%s: failed to enabe ndo to %d, retcode = %d\n",
4508 __FUNCTION__, ndo_enable, retcode));
4509 else
4510 DHD_TRACE(("%s: successfully enabed ndo offload to %d\n",
4511 __FUNCTION__, ndo_enable));
4512
4513 return retcode;
4514 }
4515
4516 /*
4517 * Neighbor Discover Offload: enable NDO feature
4518 * Called by ipv6 event handler when interface comes up
4519 */
4520 int
dhd_ndo_add_ip(dhd_pub_t * dhd,char * ipv6addr,int idx)4521 dhd_ndo_add_ip(dhd_pub_t *dhd, char* ipv6addr, int idx)
4522 {
4523 int iov_len = 0;
4524 char iovbuf[DHD_IOVAR_BUF_SIZE];
4525 int retcode;
4526
4527 if (dhd == NULL)
4528 return -1;
4529
4530 iov_len = bcm_mkiovar("nd_hostip", (char *)ipv6addr,
4531 IPV6_ADDR_LEN, iovbuf, sizeof(iovbuf));
4532 if (!iov_len) {
4533 DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
4534 __FUNCTION__, sizeof(iovbuf)));
4535 return -1;
4536 }
4537 retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
4538
4539 if (retcode)
4540 DHD_ERROR(("%s: ndo ip addr add failed, retcode = %d\n",
4541 __FUNCTION__, retcode));
4542 else
4543 DHD_TRACE(("%s: ndo ipaddr entry added \n",
4544 __FUNCTION__));
4545
4546 return retcode;
4547 }
4548
4549 /*
4550 * Neighbor Discover Offload: enable NDO feature
4551 * Called by ipv6 event handler when interface goes down
4552 */
4553 int
dhd_ndo_remove_ip(dhd_pub_t * dhd,int idx)4554 dhd_ndo_remove_ip(dhd_pub_t *dhd, int idx)
4555 {
4556 int iov_len = 0;
4557 char iovbuf[DHD_IOVAR_BUF_SIZE];
4558 int retcode;
4559
4560 if (dhd == NULL)
4561 return -1;
4562
4563 iov_len = bcm_mkiovar("nd_hostip_clear", NULL,
4564 0, iovbuf, sizeof(iovbuf));
4565 if (!iov_len) {
4566 DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
4567 __FUNCTION__, sizeof(iovbuf)));
4568 return -1;
4569 }
4570 retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
4571
4572 if (retcode)
4573 DHD_ERROR(("%s: ndo ip addr remove failed, retcode = %d\n",
4574 __FUNCTION__, retcode));
4575 else
4576 DHD_TRACE(("%s: ndo ipaddr entry removed \n",
4577 __FUNCTION__));
4578
4579 return retcode;
4580 }
4581 /* Enhanced ND offload */
4582 uint16
dhd_ndo_get_version(dhd_pub_t * dhdp)4583 dhd_ndo_get_version(dhd_pub_t *dhdp)
4584 {
4585 char iovbuf[DHD_IOVAR_BUF_SIZE];
4586 wl_nd_hostip_t ndo_get_ver;
4587 int iov_len;
4588 int retcode;
4589 uint16 ver = 0;
4590
4591 if (dhdp == NULL) {
4592 return BCME_ERROR;
4593 }
4594
4595 memset(&iovbuf, 0, sizeof(iovbuf));
4596 ndo_get_ver.version = htod16(WL_ND_HOSTIP_IOV_VER);
4597 ndo_get_ver.op_type = htod16(WL_ND_HOSTIP_OP_VER);
4598 ndo_get_ver.length = htod32(WL_ND_HOSTIP_FIXED_LEN + sizeof(uint16));
4599 ndo_get_ver.u.version = 0;
4600 iov_len = bcm_mkiovar("nd_hostip", (char *)&ndo_get_ver,
4601 WL_ND_HOSTIP_FIXED_LEN + sizeof(uint16), iovbuf, sizeof(iovbuf));
4602
4603 if (!iov_len) {
4604 DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
4605 __FUNCTION__, sizeof(iovbuf)));
4606 return BCME_ERROR;
4607 }
4608
4609 retcode = dhd_wl_ioctl_cmd(dhdp, WLC_GET_VAR, iovbuf, iov_len, FALSE, 0);
4610
4611 if (retcode) {
4612 DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__, retcode));
4613 /* ver iovar not supported. NDO version is 0 */
4614 ver = 0;
4615 } else {
4616 wl_nd_hostip_t *ndo_ver_ret = (wl_nd_hostip_t *)iovbuf;
4617
4618 if ((dtoh16(ndo_ver_ret->version) == WL_ND_HOSTIP_IOV_VER) &&
4619 (dtoh16(ndo_ver_ret->op_type) == WL_ND_HOSTIP_OP_VER) &&
4620 (dtoh32(ndo_ver_ret->length) == WL_ND_HOSTIP_FIXED_LEN
4621 + sizeof(uint16))) {
4622 /* nd_hostip iovar version */
4623 ver = dtoh16(ndo_ver_ret->u.version);
4624 }
4625
4626 DHD_TRACE(("%s: successfully get version: %d\n", __FUNCTION__, ver));
4627 }
4628
4629 return ver;
4630 }
4631
4632 int
dhd_ndo_add_ip_with_type(dhd_pub_t * dhdp,char * ipv6addr,uint8 type,int idx)4633 dhd_ndo_add_ip_with_type(dhd_pub_t *dhdp, char *ipv6addr, uint8 type, int idx)
4634 {
4635 char iovbuf[DHD_IOVAR_BUF_SIZE];
4636 wl_nd_hostip_t ndo_add_addr;
4637 int iov_len;
4638 int retcode;
4639
4640 if (dhdp == NULL || ipv6addr == 0) {
4641 return BCME_ERROR;
4642 }
4643
4644 /* wl_nd_hostip_t fixed param */
4645 ndo_add_addr.version = htod16(WL_ND_HOSTIP_IOV_VER);
4646 ndo_add_addr.op_type = htod16(WL_ND_HOSTIP_OP_ADD);
4647 ndo_add_addr.length = htod32(WL_ND_HOSTIP_WITH_ADDR_LEN);
4648 /* wl_nd_host_ip_addr_t param for add */
4649 memcpy(&ndo_add_addr.u.host_ip.ip_addr, ipv6addr, IPV6_ADDR_LEN);
4650 ndo_add_addr.u.host_ip.type = type;
4651
4652 iov_len = bcm_mkiovar("nd_hostip", (char *)&ndo_add_addr,
4653 WL_ND_HOSTIP_WITH_ADDR_LEN, iovbuf, sizeof(iovbuf));
4654 if (!iov_len) {
4655 DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
4656 __FUNCTION__, sizeof(iovbuf)));
4657 return BCME_ERROR;
4658 }
4659
4660 retcode = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
4661 if (retcode) {
4662 DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__, retcode));
4663 #ifdef NDO_CONFIG_SUPPORT
4664 if (retcode == BCME_NORESOURCE) {
4665 /* number of host ip addr exceeds FW capacity, Deactivate ND offload */
4666 DHD_INFO(("%s: Host IP count exceed device capacity,"
4667 "ND offload deactivated\n", __FUNCTION__));
4668 dhdp->ndo_host_ip_overflow = TRUE;
4669 dhd_ndo_enable(dhdp, FALSE);
4670 }
4671 #endif /* NDO_CONFIG_SUPPORT */
4672 } else {
4673 DHD_TRACE(("%s: successfully added: %d\n", __FUNCTION__, retcode));
4674 }
4675
4676 return retcode;
4677 }
4678
4679 int
dhd_ndo_remove_ip_by_addr(dhd_pub_t * dhdp,char * ipv6addr,int idx)4680 dhd_ndo_remove_ip_by_addr(dhd_pub_t *dhdp, char *ipv6addr, int idx)
4681 {
4682 char iovbuf[DHD_IOVAR_BUF_SIZE];
4683 wl_nd_hostip_t ndo_del_addr;
4684 int iov_len;
4685 int retcode;
4686
4687 if (dhdp == NULL || ipv6addr == 0) {
4688 return BCME_ERROR;
4689 }
4690
4691 /* wl_nd_hostip_t fixed param */
4692 ndo_del_addr.version = htod16(WL_ND_HOSTIP_IOV_VER);
4693 ndo_del_addr.op_type = htod16(WL_ND_HOSTIP_OP_DEL);
4694 ndo_del_addr.length = htod32(WL_ND_HOSTIP_WITH_ADDR_LEN);
4695 /* wl_nd_host_ip_addr_t param for del */
4696 memcpy(&ndo_del_addr.u.host_ip.ip_addr, ipv6addr, IPV6_ADDR_LEN);
4697 ndo_del_addr.u.host_ip.type = 0; /* don't care */
4698
4699 iov_len = bcm_mkiovar("nd_hostip", (char *)&ndo_del_addr,
4700 WL_ND_HOSTIP_WITH_ADDR_LEN, iovbuf, sizeof(iovbuf));
4701
4702 if (!iov_len) {
4703 DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
4704 __FUNCTION__, sizeof(iovbuf)));
4705 return BCME_ERROR;
4706 }
4707
4708 retcode = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
4709 if (retcode) {
4710 DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__, retcode));
4711 } else {
4712 DHD_TRACE(("%s: successfully removed: %d\n", __FUNCTION__, retcode));
4713 }
4714
4715 return retcode;
4716 }
4717
4718 int
dhd_ndo_remove_ip_by_type(dhd_pub_t * dhdp,uint8 type,int idx)4719 dhd_ndo_remove_ip_by_type(dhd_pub_t *dhdp, uint8 type, int idx)
4720 {
4721 char iovbuf[DHD_IOVAR_BUF_SIZE];
4722 wl_nd_hostip_t ndo_del_addr;
4723 int iov_len;
4724 int retcode;
4725
4726 if (dhdp == NULL) {
4727 return BCME_ERROR;
4728 }
4729
4730 /* wl_nd_hostip_t fixed param */
4731 ndo_del_addr.version = htod16(WL_ND_HOSTIP_IOV_VER);
4732 if (type == WL_ND_IPV6_ADDR_TYPE_UNICAST) {
4733 ndo_del_addr.op_type = htod16(WL_ND_HOSTIP_OP_DEL_UC);
4734 } else if (type == WL_ND_IPV6_ADDR_TYPE_ANYCAST) {
4735 ndo_del_addr.op_type = htod16(WL_ND_HOSTIP_OP_DEL_AC);
4736 } else {
4737 return BCME_BADARG;
4738 }
4739 ndo_del_addr.length = htod32(WL_ND_HOSTIP_FIXED_LEN);
4740
4741 iov_len = bcm_mkiovar("nd_hostip", (char *)&ndo_del_addr, WL_ND_HOSTIP_FIXED_LEN,
4742 iovbuf, sizeof(iovbuf));
4743
4744 if (!iov_len) {
4745 DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
4746 __FUNCTION__, sizeof(iovbuf)));
4747 return BCME_ERROR;
4748 }
4749
4750 retcode = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
4751 if (retcode) {
4752 DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__, retcode));
4753 } else {
4754 DHD_TRACE(("%s: successfully removed: %d\n", __FUNCTION__, retcode));
4755 }
4756
4757 return retcode;
4758 }
4759
4760 int
dhd_ndo_unsolicited_na_filter_enable(dhd_pub_t * dhdp,int enable)4761 dhd_ndo_unsolicited_na_filter_enable(dhd_pub_t *dhdp, int enable)
4762 {
4763 char iovbuf[DHD_IOVAR_BUF_SIZE];
4764 int iov_len;
4765 int retcode;
4766
4767 if (dhdp == NULL) {
4768 return BCME_ERROR;
4769 }
4770
4771 iov_len = bcm_mkiovar("nd_unsolicited_na_filter", (char *)&enable, sizeof(int),
4772 iovbuf, sizeof(iovbuf));
4773
4774 if (!iov_len) {
4775 DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
4776 __FUNCTION__, sizeof(iovbuf)));
4777 return BCME_ERROR;
4778 }
4779
4780 retcode = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, iov_len, TRUE, 0);
4781 if (retcode)
4782 DHD_ERROR(("%s: failed to enable Unsolicited NA filter to %d, retcode = %d\n",
4783 __FUNCTION__, enable, retcode));
4784 else {
4785 DHD_TRACE(("%s: successfully enabled Unsolicited NA filter to %d\n",
4786 __FUNCTION__, enable));
4787 }
4788
4789 return retcode;
4790 }
4791 #ifdef SIMPLE_ISCAN
4792
4793 uint iscan_thread_id = 0;
4794 iscan_buf_t * iscan_chain = 0;
4795
4796 iscan_buf_t *
dhd_iscan_allocate_buf(dhd_pub_t * dhd,iscan_buf_t ** iscanbuf)4797 dhd_iscan_allocate_buf(dhd_pub_t *dhd, iscan_buf_t **iscanbuf)
4798 {
4799 iscan_buf_t *iscanbuf_alloc = 0;
4800 iscan_buf_t *iscanbuf_head;
4801
4802 DHD_ISCAN(("%s: Entered\n", __FUNCTION__));
4803 dhd_iscan_lock();
4804
4805 iscanbuf_alloc = (iscan_buf_t*)MALLOC(dhd->osh, sizeof(iscan_buf_t));
4806 if (iscanbuf_alloc == NULL)
4807 goto fail;
4808
4809 iscanbuf_alloc->next = NULL;
4810 iscanbuf_head = *iscanbuf;
4811
4812 DHD_ISCAN(("%s: addr of allocated node = 0x%X"
4813 "addr of iscanbuf_head = 0x%X dhd = 0x%X\n",
4814 __FUNCTION__, iscanbuf_alloc, iscanbuf_head, dhd));
4815
4816 if (iscanbuf_head == NULL) {
4817 *iscanbuf = iscanbuf_alloc;
4818 DHD_ISCAN(("%s: Head is allocated\n", __FUNCTION__));
4819 goto fail;
4820 }
4821
4822 while (iscanbuf_head->next)
4823 iscanbuf_head = iscanbuf_head->next;
4824
4825 iscanbuf_head->next = iscanbuf_alloc;
4826
4827 fail:
4828 dhd_iscan_unlock();
4829 return iscanbuf_alloc;
4830 }
4831
4832 void
dhd_iscan_free_buf(void * dhdp,iscan_buf_t * iscan_delete)4833 dhd_iscan_free_buf(void *dhdp, iscan_buf_t *iscan_delete)
4834 {
4835 iscan_buf_t *iscanbuf_free = 0;
4836 iscan_buf_t *iscanbuf_prv = 0;
4837 iscan_buf_t *iscanbuf_cur;
4838 dhd_pub_t *dhd = dhd_bus_pub(dhdp);
4839 DHD_ISCAN(("%s: Entered\n", __FUNCTION__));
4840
4841 dhd_iscan_lock();
4842
4843 iscanbuf_cur = iscan_chain;
4844
4845 /* If iscan_delete is null then delete the entire
4846 * chain or else delete specific one provided
4847 */
4848 if (!iscan_delete) {
4849 while (iscanbuf_cur) {
4850 iscanbuf_free = iscanbuf_cur;
4851 iscanbuf_cur = iscanbuf_cur->next;
4852 iscanbuf_free->next = 0;
4853 MFREE(dhd->osh, iscanbuf_free, sizeof(iscan_buf_t));
4854 }
4855 iscan_chain = 0;
4856 } else {
4857 while (iscanbuf_cur) {
4858 if (iscanbuf_cur == iscan_delete)
4859 break;
4860 iscanbuf_prv = iscanbuf_cur;
4861 iscanbuf_cur = iscanbuf_cur->next;
4862 }
4863 if (iscanbuf_prv)
4864 iscanbuf_prv->next = iscan_delete->next;
4865
4866 iscan_delete->next = 0;
4867 MFREE(dhd->osh, iscan_delete, sizeof(iscan_buf_t));
4868
4869 if (!iscanbuf_prv)
4870 iscan_chain = 0;
4871 }
4872 dhd_iscan_unlock();
4873 }
4874
4875 iscan_buf_t *
dhd_iscan_result_buf(void)4876 dhd_iscan_result_buf(void)
4877 {
4878 return iscan_chain;
4879 }
4880
4881 int
dhd_iscan_issue_request(void * dhdp,wl_iscan_params_t * pParams,uint32 size)4882 dhd_iscan_issue_request(void * dhdp, wl_iscan_params_t *pParams, uint32 size)
4883 {
4884 int rc = -1;
4885 dhd_pub_t *dhd = dhd_bus_pub(dhdp);
4886 char *buf;
4887 char iovar[] = "iscan";
4888 uint32 allocSize = 0;
4889 wl_ioctl_t ioctl;
4890 int len;
4891
4892 if (pParams) {
4893 allocSize = (size + strlen(iovar) + 1);
4894 if ((allocSize < size) || (allocSize < strlen(iovar)))
4895 {
4896 DHD_ERROR(("%s: overflow - allocation size too large %d < %d + %d!\n",
4897 __FUNCTION__, allocSize, size, strlen(iovar)));
4898 goto cleanUp;
4899 }
4900 buf = MALLOC(dhd->osh, allocSize);
4901
4902 if (buf == NULL)
4903 {
4904 DHD_ERROR(("%s: malloc of size %d failed!\n", __FUNCTION__, allocSize));
4905 goto cleanUp;
4906 }
4907 ioctl.cmd = WLC_SET_VAR;
4908 len = bcm_mkiovar(iovar, (char *)pParams, size, buf, allocSize);
4909 if (len == 0) {
4910 rc = BCME_BUFTOOSHORT;
4911 goto cleanUp;
4912 }
4913 rc = dhd_wl_ioctl(dhd, 0, &ioctl, buf, len);
4914 }
4915
4916 cleanUp:
4917 if (buf) {
4918 MFREE(dhd->osh, buf, allocSize);
4919 }
4920
4921 return rc;
4922 }
4923
4924 static int
dhd_iscan_get_partial_result(void * dhdp,uint * scan_count)4925 dhd_iscan_get_partial_result(void *dhdp, uint *scan_count)
4926 {
4927 wl_iscan_results_t *list_buf;
4928 wl_iscan_results_t list;
4929 wl_scan_results_t *results;
4930 iscan_buf_t *iscan_cur;
4931 int status = -1;
4932 dhd_pub_t *dhd = dhd_bus_pub(dhdp);
4933 int rc;
4934 wl_ioctl_t ioctl;
4935 int len;
4936
4937 DHD_ISCAN(("%s: Enter\n", __FUNCTION__));
4938
4939 iscan_cur = dhd_iscan_allocate_buf(dhd, &iscan_chain);
4940 if (!iscan_cur) {
4941 DHD_ERROR(("%s: Failed to allocate node\n", __FUNCTION__));
4942 dhd_iscan_free_buf(dhdp, 0);
4943 dhd_iscan_request(dhdp, WL_SCAN_ACTION_ABORT);
4944 dhd_ind_scan_confirm(dhdp, FALSE);
4945 goto fail;
4946 }
4947
4948 dhd_iscan_lock();
4949
4950 memset(iscan_cur->iscan_buf, 0, WLC_IW_ISCAN_MAXLEN);
4951 list_buf = (wl_iscan_results_t*)iscan_cur->iscan_buf;
4952 results = &list_buf->results;
4953 results->buflen = WL_ISCAN_RESULTS_FIXED_SIZE;
4954 results->version = 0;
4955 results->count = 0;
4956
4957 memset(&list, 0, sizeof(list));
4958 list.results.buflen = htod32(WLC_IW_ISCAN_MAXLEN);
4959 len = bcm_mkiovar("iscanresults", (char *)&list, WL_ISCAN_RESULTS_FIXED_SIZE,
4960 iscan_cur->iscan_buf, WLC_IW_ISCAN_MAXLEN);
4961 if (len == 0) {
4962 dhd_iscan_free_buf(dhdp, 0);
4963 dhd_iscan_request(dhdp, WL_SCAN_ACTION_ABORT);
4964 dhd_ind_scan_confirm(dhdp, FALSE);
4965 status = BCME_BUFTOOSHORT;
4966 goto fail;
4967 }
4968 ioctl.cmd = WLC_GET_VAR;
4969 ioctl.set = FALSE;
4970 rc = dhd_wl_ioctl(dhd, 0, &ioctl, iscan_cur->iscan_buf, WLC_IW_ISCAN_MAXLEN);
4971
4972 results->buflen = dtoh32(results->buflen);
4973 results->version = dtoh32(results->version);
4974 *scan_count = results->count = dtoh32(results->count);
4975 status = dtoh32(list_buf->status);
4976 DHD_ISCAN(("%s: Got %d resuls status = (%x)\n", __FUNCTION__, results->count, status));
4977
4978 dhd_iscan_unlock();
4979
4980 if (!(*scan_count)) {
4981 /* TODO: race condition when FLUSH already called */
4982 dhd_iscan_free_buf(dhdp, 0);
4983 }
4984 fail:
4985 return status;
4986 }
4987
4988 #endif /* SIMPLE_ISCAN */
4989
4990 /*
4991 * returns = TRUE if associated, FALSE if not associated
4992 */
dhd_is_associated(dhd_pub_t * dhd,uint8 ifidx,int * retval)4993 bool dhd_is_associated(dhd_pub_t *dhd, uint8 ifidx, int *retval)
4994 {
4995 char bssid[6], zbuf[6];
4996 int ret = -1;
4997
4998 bzero(bssid, 6);
4999 bzero(zbuf, 6);
5000
5001 ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_BSSID, (char *)&bssid,
5002 ETHER_ADDR_LEN, FALSE, ifidx);
5003 DHD_TRACE((" %s WLC_GET_BSSID ioctl res = %d\n", __FUNCTION__, ret));
5004
5005 if (ret == BCME_NOTASSOCIATED) {
5006 DHD_TRACE(("%s: not associated! res:%d\n", __FUNCTION__, ret));
5007 }
5008
5009 if (retval)
5010 *retval = ret;
5011
5012 if (ret < 0)
5013 return FALSE;
5014
5015 if ((memcmp(bssid, zbuf, ETHER_ADDR_LEN) == 0)) {
5016 DHD_TRACE(("%s: WLC_GET_BSSID ioctl returned zero bssid\n", __FUNCTION__));
5017 return FALSE;
5018 }
5019 return TRUE;
5020 }
5021
5022 /* Function to estimate possible DTIM_SKIP value */
5023 #if defined(BCMPCIE)
5024 int
dhd_get_suspend_bcn_li_dtim(dhd_pub_t * dhd,int * dtim_period,int * bcn_interval)5025 dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd, int *dtim_period, int *bcn_interval)
5026 {
5027 int bcn_li_dtim = 1; /* deafult no dtim skip setting */
5028 int ret = -1;
5029 int allowed_skip_dtim_cnt = 0;
5030
5031 if (dhd->disable_dtim_in_suspend) {
5032 DHD_ERROR(("%s Disable bcn_li_dtim in suspend\n", __FUNCTION__));
5033 bcn_li_dtim = 0;
5034 return bcn_li_dtim;
5035 }
5036
5037 /* Check if associated */
5038 if (dhd_is_associated(dhd, 0, NULL) == FALSE) {
5039 DHD_TRACE(("%s NOT assoc ret %d\n", __FUNCTION__, ret));
5040 return bcn_li_dtim;
5041 }
5042
5043 if (dtim_period == NULL || bcn_interval == NULL)
5044 return bcn_li_dtim;
5045
5046 /* read associated AP beacon interval */
5047 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_BCNPRD,
5048 bcn_interval, sizeof(*bcn_interval), FALSE, 0)) < 0) {
5049 DHD_ERROR(("%s get beacon failed code %d\n", __FUNCTION__, ret));
5050 return bcn_li_dtim;
5051 }
5052
5053 /* read associated AP dtim setup */
5054 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_DTIMPRD,
5055 dtim_period, sizeof(*dtim_period), FALSE, 0)) < 0) {
5056 DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
5057 return bcn_li_dtim;
5058 }
5059
5060 /* if not assocated just return */
5061 if (*dtim_period == 0) {
5062 return bcn_li_dtim;
5063 }
5064
5065 if (dhd->max_dtim_enable) {
5066 bcn_li_dtim =
5067 (int) (MAX_DTIM_ALLOWED_INTERVAL / ((*dtim_period) * (*bcn_interval)));
5068 if (bcn_li_dtim == 0) {
5069 bcn_li_dtim = 1;
5070 }
5071 } else {
5072 /* attemp to use platform defined dtim skip interval */
5073 bcn_li_dtim = dhd->suspend_bcn_li_dtim;
5074
5075 /* check if sta listen interval fits into AP dtim */
5076 if (*dtim_period > CUSTOM_LISTEN_INTERVAL) {
5077 /* AP DTIM to big for our Listen Interval : no dtim skiping */
5078 bcn_li_dtim = NO_DTIM_SKIP;
5079 DHD_ERROR(("%s DTIM=%d > Listen=%d : too big ...\n",
5080 __FUNCTION__, *dtim_period, CUSTOM_LISTEN_INTERVAL));
5081 return bcn_li_dtim;
5082 }
5083
5084 if (((*dtim_period) * (*bcn_interval) * bcn_li_dtim) > MAX_DTIM_ALLOWED_INTERVAL) {
5085 allowed_skip_dtim_cnt =
5086 MAX_DTIM_ALLOWED_INTERVAL / ((*dtim_period) * (*bcn_interval));
5087 bcn_li_dtim =
5088 (allowed_skip_dtim_cnt != 0) ? allowed_skip_dtim_cnt : NO_DTIM_SKIP;
5089 }
5090
5091 if ((bcn_li_dtim * (*dtim_period)) > CUSTOM_LISTEN_INTERVAL) {
5092 /* Round up dtim_skip to fit into STAs Listen Interval */
5093 bcn_li_dtim = (int)(CUSTOM_LISTEN_INTERVAL / *dtim_period);
5094 DHD_TRACE(("%s agjust dtim_skip as %d\n", __FUNCTION__, bcn_li_dtim));
5095 }
5096 }
5097
5098 if (dhd->conf->suspend_bcn_li_dtim >= 0)
5099 bcn_li_dtim = dhd->conf->suspend_bcn_li_dtim;
5100 DHD_ERROR(("%s beacon=%d bcn_li_dtim=%d DTIM=%d Listen=%d\n",
5101 __FUNCTION__, *bcn_interval, bcn_li_dtim, *dtim_period, CUSTOM_LISTEN_INTERVAL));
5102
5103 return bcn_li_dtim;
5104 }
5105 #else /* OEM_ANDROID && BCMPCIE */
5106 int
dhd_get_suspend_bcn_li_dtim(dhd_pub_t * dhd)5107 dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd)
5108 {
5109 int bcn_li_dtim = 1; /* deafult no dtim skip setting */
5110 int ret = -1;
5111 int dtim_period = 0;
5112 int ap_beacon = 0;
5113 int allowed_skip_dtim_cnt = 0;
5114
5115 if (dhd->disable_dtim_in_suspend) {
5116 DHD_ERROR(("%s Disable bcn_li_dtim in suspend\n", __FUNCTION__));
5117 bcn_li_dtim = 0;
5118 goto exit;
5119 }
5120
5121 /* Check if associated */
5122 if (dhd_is_associated(dhd, 0, NULL) == FALSE) {
5123 DHD_TRACE(("%s NOT assoc ret %d\n", __FUNCTION__, ret));
5124 goto exit;
5125 }
5126
5127 /* read associated AP beacon interval */
5128 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_BCNPRD,
5129 &ap_beacon, sizeof(ap_beacon), FALSE, 0)) < 0) {
5130 DHD_ERROR(("%s get beacon failed code %d\n", __FUNCTION__, ret));
5131 goto exit;
5132 }
5133
5134 /* read associated ap's dtim setup */
5135 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_DTIMPRD,
5136 &dtim_period, sizeof(dtim_period), FALSE, 0)) < 0) {
5137 DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
5138 goto exit;
5139 }
5140
5141 /* if not assocated just exit */
5142 if (dtim_period == 0) {
5143 goto exit;
5144 }
5145
5146 if (dhd->max_dtim_enable) {
5147 bcn_li_dtim =
5148 (int) (MAX_DTIM_ALLOWED_INTERVAL / (ap_beacon * dtim_period));
5149 if (bcn_li_dtim == 0) {
5150 bcn_li_dtim = 1;
5151 }
5152 } else {
5153 /* attemp to use platform defined dtim skip interval */
5154 bcn_li_dtim = dhd->suspend_bcn_li_dtim;
5155
5156 /* check if sta listen interval fits into AP dtim */
5157 if (dtim_period > CUSTOM_LISTEN_INTERVAL) {
5158 /* AP DTIM to big for our Listen Interval : no dtim skiping */
5159 bcn_li_dtim = NO_DTIM_SKIP;
5160 DHD_ERROR(("%s DTIM=%d > Listen=%d : too big ...\n",
5161 __FUNCTION__, dtim_period, CUSTOM_LISTEN_INTERVAL));
5162 goto exit;
5163 }
5164
5165 if ((dtim_period * ap_beacon * bcn_li_dtim) > MAX_DTIM_ALLOWED_INTERVAL) {
5166 allowed_skip_dtim_cnt =
5167 MAX_DTIM_ALLOWED_INTERVAL / (dtim_period * ap_beacon);
5168 bcn_li_dtim =
5169 (allowed_skip_dtim_cnt != 0) ? allowed_skip_dtim_cnt : NO_DTIM_SKIP;
5170 }
5171
5172 if ((bcn_li_dtim * dtim_period) > CUSTOM_LISTEN_INTERVAL) {
5173 /* Round up dtim_skip to fit into STAs Listen Interval */
5174 bcn_li_dtim = (int)(CUSTOM_LISTEN_INTERVAL / dtim_period);
5175 DHD_TRACE(("%s agjust dtim_skip as %d\n", __FUNCTION__, bcn_li_dtim));
5176 }
5177 }
5178
5179 if (dhd->conf->suspend_bcn_li_dtim >= 0)
5180 bcn_li_dtim = dhd->conf->suspend_bcn_li_dtim;
5181 DHD_ERROR(("%s beacon=%d bcn_li_dtim=%d DTIM=%d Listen=%d\n",
5182 __FUNCTION__, ap_beacon, bcn_li_dtim, dtim_period, CUSTOM_LISTEN_INTERVAL));
5183
5184 exit:
5185 return bcn_li_dtim;
5186 }
5187 #endif /* OEM_ANDROID && BCMPCIE */
5188
5189 #ifdef CONFIG_SILENT_ROAM
5190 int
dhd_sroam_set_mon(dhd_pub_t * dhd,bool set)5191 dhd_sroam_set_mon(dhd_pub_t *dhd, bool set)
5192 {
5193 int ret = BCME_OK;
5194 wlc_sroam_t *psroam;
5195 wlc_sroam_info_t *sroam;
5196 uint sroamlen = sizeof(*sroam) + SROAM_HDRLEN;
5197
5198 /* Check if associated */
5199 if (dhd_is_associated(dhd, 0, NULL) == FALSE) {
5200 DHD_TRACE(("%s NOT assoc\n", __FUNCTION__));
5201 return ret;
5202 }
5203
5204 if (set && (dhd->op_mode &
5205 (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE))) {
5206 DHD_INFO((" Failed to set sroam %d, op_mode 0x%04x\n", set, dhd->op_mode));
5207 return ret;
5208 }
5209
5210 if (!dhd->sroam_turn_on) {
5211 DHD_INFO((" Failed to set sroam %d, sroam turn %d\n", set, dhd->sroam_turn_on));
5212 return ret;
5213 }
5214 psroam = (wlc_sroam_t *)MALLOCZ(dhd->osh, sroamlen);
5215 if (!psroam) {
5216 DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
5217 return BCME_NOMEM;
5218 }
5219
5220 ret = dhd_iovar(dhd, 0, "sroam", NULL, 0, (char *)psroam, sroamlen, FALSE);
5221 if (ret < 0) {
5222 DHD_ERROR(("%s Failed to Get sroam %d\n", __FUNCTION__, ret));
5223 goto done;
5224 }
5225
5226 if (psroam->ver != WLC_SILENT_ROAM_CUR_VER) {
5227 ret = BCME_VERSION;
5228 goto done;
5229 }
5230
5231 sroam = (wlc_sroam_info_t *)psroam->data;
5232 sroam->sroam_on = set;
5233 DHD_INFO((" Silent roam monitor mode %s\n", set ? "On" : "Off"));
5234
5235 ret = dhd_iovar(dhd, 0, "sroam", (char *)psroam, sroamlen, NULL, 0, TRUE);
5236 if (ret < 0) {
5237 DHD_ERROR(("%s Failed to Set sroam %d\n", __FUNCTION__, ret));
5238 }
5239
5240 done:
5241 if (psroam) {
5242 MFREE(dhd->osh, psroam, sroamlen);
5243 }
5244 return ret;
5245
5246 }
5247 #endif /* CONFIG_SILENT_ROAM */
5248
5249 /* Check if the mode supports STA MODE */
dhd_support_sta_mode(dhd_pub_t * dhd)5250 bool dhd_support_sta_mode(dhd_pub_t *dhd)
5251 {
5252
5253 #ifdef WL_CFG80211
5254 if (!(dhd->op_mode & DHD_FLAG_STA_MODE))
5255 return FALSE;
5256 else
5257 #endif /* WL_CFG80211 */
5258 return TRUE;
5259 }
5260
5261 #if defined(KEEP_ALIVE)
dhd_keep_alive_onoff(dhd_pub_t * dhd)5262 int dhd_keep_alive_onoff(dhd_pub_t *dhd)
5263 {
5264 char buf[32] = {0};
5265 const char *str;
5266 wl_mkeep_alive_pkt_t mkeep_alive_pkt = {0, 0, 0, 0, 0, {0}};
5267 wl_mkeep_alive_pkt_t *mkeep_alive_pktp;
5268 int buf_len;
5269 int str_len;
5270 int res = -1;
5271
5272 if (!dhd_support_sta_mode(dhd))
5273 return res;
5274
5275 DHD_TRACE(("%s execution\n", __FUNCTION__));
5276
5277 str = "mkeep_alive";
5278 str_len = strlen(str);
5279 strncpy(buf, str, sizeof(buf) - 1);
5280 buf[ sizeof(buf) - 1 ] = '\0';
5281 mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) (buf + str_len + 1);
5282 mkeep_alive_pkt.period_msec = dhd->conf->keep_alive_period;
5283 buf_len = str_len + 1;
5284 mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION);
5285 mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN);
5286 /* Setup keep alive zero for null packet generation */
5287 mkeep_alive_pkt.keep_alive_id = 0;
5288 mkeep_alive_pkt.len_bytes = 0;
5289 buf_len += WL_MKEEP_ALIVE_FIXED_LEN;
5290 bzero(mkeep_alive_pkt.data, sizeof(mkeep_alive_pkt.data));
5291 /* Keep-alive attributes are set in local variable (mkeep_alive_pkt), and
5292 * then memcpy'ed into buffer (mkeep_alive_pktp) since there is no
5293 * guarantee that the buffer is properly aligned.
5294 */
5295 memcpy((char *)mkeep_alive_pktp, &mkeep_alive_pkt, WL_MKEEP_ALIVE_FIXED_LEN);
5296
5297 res = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
5298
5299 return res;
5300 }
5301 #endif /* defined(KEEP_ALIVE) */
5302 #define CSCAN_TLV_TYPE_SSID_IE 'S'
5303 /*
5304 * SSIDs list parsing from cscan tlv list
5305 */
5306 int
wl_parse_ssid_list_tlv(char ** list_str,wlc_ssid_ext_t * ssid,int max,int * bytes_left)5307 wl_parse_ssid_list_tlv(char** list_str, wlc_ssid_ext_t* ssid, int max, int *bytes_left)
5308 {
5309 char* str;
5310 int idx = 0;
5311 uint8 len;
5312
5313 if ((list_str == NULL) || (*list_str == NULL) || (*bytes_left < 0)) {
5314 DHD_ERROR(("%s error paramters\n", __FUNCTION__));
5315 return BCME_BADARG;
5316 }
5317 str = *list_str;
5318 while (*bytes_left > 0) {
5319 if (str[0] != CSCAN_TLV_TYPE_SSID_IE) {
5320 *list_str = str;
5321 DHD_TRACE(("nssid=%d left_parse=%d %d\n", idx, *bytes_left, str[0]));
5322 return idx;
5323 }
5324
5325 if (idx >= max) {
5326 DHD_ERROR(("%s number of SSIDs more than %d\n", __FUNCTION__, idx));
5327 return BCME_BADARG;
5328 }
5329
5330 /* Get proper CSCAN_TLV_TYPE_SSID_IE */
5331 *bytes_left -= 1;
5332 if (*bytes_left == 0) {
5333 DHD_ERROR(("%s no length field.\n", __FUNCTION__));
5334 return BCME_BADARG;
5335 }
5336 str += 1;
5337 ssid[idx].rssi_thresh = 0;
5338 ssid[idx].flags = 0;
5339 len = str[0];
5340 if (len == 0) {
5341 /* Broadcast SSID */
5342 ssid[idx].SSID_len = 0;
5343 memset((char*)ssid[idx].SSID, 0x0, DOT11_MAX_SSID_LEN);
5344 *bytes_left -= 1;
5345 str += 1;
5346
5347 DHD_TRACE(("BROADCAST SCAN left=%d\n", *bytes_left));
5348 } else if (len <= DOT11_MAX_SSID_LEN) {
5349 /* Get proper SSID size */
5350 ssid[idx].SSID_len = len;
5351 *bytes_left -= 1;
5352 /* Get SSID */
5353 if (ssid[idx].SSID_len > *bytes_left) {
5354 DHD_ERROR(("%s out of memory range len=%d but left=%d\n",
5355 __FUNCTION__, ssid[idx].SSID_len, *bytes_left));
5356 return BCME_BADARG;
5357 }
5358 str += 1;
5359 memcpy((char*)ssid[idx].SSID, str, ssid[idx].SSID_len);
5360
5361 *bytes_left -= ssid[idx].SSID_len;
5362 str += ssid[idx].SSID_len;
5363 ssid[idx].hidden = TRUE;
5364
5365 DHD_TRACE(("%s :size=%d left=%d\n",
5366 (char*)ssid[idx].SSID, ssid[idx].SSID_len, *bytes_left));
5367 } else {
5368 DHD_ERROR(("### SSID size more than %d\n", str[0]));
5369 return BCME_BADARG;
5370 }
5371 idx++;
5372 }
5373
5374 *list_str = str;
5375 return idx;
5376 }
5377
5378 #if defined(WL_WIRELESS_EXT)
5379 /* Android ComboSCAN support */
5380
5381 /*
5382 * data parsing from ComboScan tlv list
5383 */
5384 int
wl_iw_parse_data_tlv(char ** list_str,void * dst,int dst_size,const char token,int input_size,int * bytes_left)5385 wl_iw_parse_data_tlv(char** list_str, void *dst, int dst_size, const char token,
5386 int input_size, int *bytes_left)
5387 {
5388 char* str;
5389 uint16 short_temp;
5390 uint32 int_temp;
5391
5392 if ((list_str == NULL) || (*list_str == NULL) ||(bytes_left == NULL) || (*bytes_left < 0)) {
5393 DHD_ERROR(("%s error paramters\n", __FUNCTION__));
5394 return -1;
5395 }
5396 str = *list_str;
5397
5398 /* Clean all dest bytes */
5399 memset(dst, 0, dst_size);
5400 if (*bytes_left > 0) {
5401
5402 if (str[0] != token) {
5403 DHD_TRACE(("%s NOT Type=%d get=%d left_parse=%d \n",
5404 __FUNCTION__, token, str[0], *bytes_left));
5405 return -1;
5406 }
5407
5408 *bytes_left -= 1;
5409 str += 1;
5410
5411 if (input_size == 1) {
5412 memcpy(dst, str, input_size);
5413 }
5414 else if (input_size == 2) {
5415 memcpy(dst, (char *)htod16(memcpy(&short_temp, str, input_size)),
5416 input_size);
5417 }
5418 else if (input_size == 4) {
5419 memcpy(dst, (char *)htod32(memcpy(&int_temp, str, input_size)),
5420 input_size);
5421 }
5422
5423 *bytes_left -= input_size;
5424 str += input_size;
5425 *list_str = str;
5426 return 1;
5427 }
5428 return 1;
5429 }
5430
5431 /*
5432 * channel list parsing from cscan tlv list
5433 */
5434 int
wl_iw_parse_channel_list_tlv(char ** list_str,uint16 * channel_list,int channel_num,int * bytes_left)5435 wl_iw_parse_channel_list_tlv(char** list_str, uint16* channel_list,
5436 int channel_num, int *bytes_left)
5437 {
5438 char* str;
5439 int idx = 0;
5440
5441 if ((list_str == NULL) || (*list_str == NULL) ||(bytes_left == NULL) || (*bytes_left < 0)) {
5442 DHD_ERROR(("%s error paramters\n", __FUNCTION__));
5443 return -1;
5444 }
5445 str = *list_str;
5446
5447 while (*bytes_left > 0) {
5448
5449 if (str[0] != CSCAN_TLV_TYPE_CHANNEL_IE) {
5450 *list_str = str;
5451 DHD_TRACE(("End channel=%d left_parse=%d %d\n", idx, *bytes_left, str[0]));
5452 return idx;
5453 }
5454 /* Get proper CSCAN_TLV_TYPE_CHANNEL_IE */
5455 *bytes_left -= 1;
5456 str += 1;
5457
5458 if (str[0] == 0) {
5459 /* All channels */
5460 channel_list[idx] = 0x0;
5461 }
5462 else {
5463 channel_list[idx] = (uint16)str[0];
5464 DHD_TRACE(("%s channel=%d \n", __FUNCTION__, channel_list[idx]));
5465 }
5466 *bytes_left -= 1;
5467 str += 1;
5468
5469 if (idx++ > 255) {
5470 DHD_ERROR(("%s Too many channels \n", __FUNCTION__));
5471 return -1;
5472 }
5473 }
5474
5475 *list_str = str;
5476 return idx;
5477 }
5478
5479 /* Parse a comma-separated list from list_str into ssid array, starting
5480 * at index idx. Max specifies size of the ssid array. Parses ssids
5481 * and returns updated idx; if idx >= max not all fit, the excess have
5482 * not been copied. Returns -1 on empty string, or on ssid too long.
5483 */
5484 int
wl_iw_parse_ssid_list(char ** list_str,wlc_ssid_t * ssid,int idx,int max)5485 wl_iw_parse_ssid_list(char** list_str, wlc_ssid_t* ssid, int idx, int max)
5486 {
5487 char* str, *ptr;
5488
5489 if ((list_str == NULL) || (*list_str == NULL))
5490 return -1;
5491
5492 for (str = *list_str; str != NULL; str = ptr) {
5493
5494 /* check for next TAG */
5495 if (!strncmp(str, GET_CHANNEL, strlen(GET_CHANNEL))) {
5496 *list_str = str + strlen(GET_CHANNEL);
5497 return idx;
5498 }
5499
5500 if ((ptr = strchr(str, ',')) != NULL) {
5501 *ptr++ = '\0';
5502 }
5503
5504 if (strlen(str) > DOT11_MAX_SSID_LEN) {
5505 DHD_ERROR(("ssid <%s> exceeds %d\n", str, DOT11_MAX_SSID_LEN));
5506 return -1;
5507 }
5508
5509 if (strlen(str) == 0)
5510 ssid[idx].SSID_len = 0;
5511
5512 if (idx < max) {
5513 bzero(ssid[idx].SSID, sizeof(ssid[idx].SSID));
5514 strncpy((char*)ssid[idx].SSID, str, sizeof(ssid[idx].SSID) - 1);
5515 ssid[idx].SSID_len = strlen(str);
5516 }
5517 idx++;
5518 }
5519 return idx;
5520 }
5521
5522 /*
5523 * Parse channel list from iwpriv CSCAN
5524 */
5525 int
wl_iw_parse_channel_list(char ** list_str,uint16 * channel_list,int channel_num)5526 wl_iw_parse_channel_list(char** list_str, uint16* channel_list, int channel_num)
5527 {
5528 int num;
5529 int val;
5530 char* str;
5531 char* endptr = NULL;
5532
5533 if ((list_str == NULL)||(*list_str == NULL))
5534 return -1;
5535
5536 str = *list_str;
5537 num = 0;
5538 while (strncmp(str, GET_NPROBE, strlen(GET_NPROBE))) {
5539 val = (int)strtoul(str, &endptr, 0);
5540 if (endptr == str) {
5541 printf("could not parse channel number starting at"
5542 " substring \"%s\" in list:\n%s\n",
5543 str, *list_str);
5544 return -1;
5545 }
5546 str = endptr + strspn(endptr, " ,");
5547
5548 if (num == channel_num) {
5549 DHD_ERROR(("too many channels (more than %d) in channel list:\n%s\n",
5550 channel_num, *list_str));
5551 return -1;
5552 }
5553
5554 channel_list[num++] = (uint16)val;
5555 }
5556 *list_str = str;
5557 return num;
5558 }
5559 #endif
5560
5561 /* Given filename and download type, returns a buffer pointer and length
5562 * for download to f/w. Type can be FW or NVRAM.
5563 *
5564 */
dhd_get_download_buffer(dhd_pub_t * dhd,char * file_path,download_type_t component,char ** buffer,int * length)5565 int dhd_get_download_buffer(dhd_pub_t *dhd, char *file_path, download_type_t component,
5566 char ** buffer, int *length)
5567
5568 {
5569 int ret = BCME_ERROR;
5570 int len = 0;
5571 int file_len;
5572 void *image = NULL;
5573 uint8 *buf = NULL;
5574
5575 /* Point to cache if available. */
5576 /* No Valid cache found on this call */
5577 if (!len) {
5578 file_len = *length;
5579 *length = 0;
5580
5581 if (file_path) {
5582 image = dhd_os_open_image1(dhd, file_path);
5583 if (image == NULL) {
5584 printf("%s: Open image file failed %s\n", __FUNCTION__, file_path);
5585 goto err;
5586 }
5587 }
5588
5589 buf = MALLOCZ(dhd->osh, file_len);
5590 if (buf == NULL) {
5591 DHD_ERROR(("%s: Failed to allocate memory %d bytes\n",
5592 __FUNCTION__, file_len));
5593 goto err;
5594 }
5595
5596 /* Download image */
5597 len = dhd_os_get_image_block((char *)buf, file_len, image);
5598 if ((len <= 0 || len > file_len)) {
5599 MFREE(dhd->osh, buf, file_len);
5600 goto err;
5601 }
5602 }
5603
5604 ret = BCME_OK;
5605 *length = len;
5606 *buffer = (char *)buf;
5607
5608 /* Cache if first call. */
5609
5610 err:
5611 if (image)
5612 dhd_os_close_image1(dhd, image);
5613
5614 return ret;
5615 }
5616
5617 int
dhd_download_2_dongle(dhd_pub_t * dhd,char * iovar,uint16 flag,uint16 dload_type,unsigned char * dload_buf,int len)5618 dhd_download_2_dongle(dhd_pub_t *dhd, char *iovar, uint16 flag, uint16 dload_type,
5619 unsigned char *dload_buf, int len)
5620 {
5621 struct wl_dload_data *dload_ptr = (struct wl_dload_data *)dload_buf;
5622 int err = 0;
5623 int dload_data_offset;
5624 static char iovar_buf[WLC_IOCTL_MEDLEN];
5625 int iovar_len;
5626
5627 memset(iovar_buf, 0, sizeof(iovar_buf));
5628
5629 dload_data_offset = OFFSETOF(wl_dload_data_t, data);
5630 dload_ptr->flag = (DLOAD_HANDLER_VER << DLOAD_FLAG_VER_SHIFT) | flag;
5631 dload_ptr->dload_type = dload_type;
5632 dload_ptr->len = htod32(len - dload_data_offset);
5633 dload_ptr->crc = 0;
5634 len = ROUNDUP(len, 8);
5635
5636 iovar_len = bcm_mkiovar(iovar, (char *)dload_buf,
5637 (uint)len, iovar_buf, sizeof(iovar_buf));
5638 if (iovar_len == 0) {
5639 DHD_ERROR(("%s: insufficient buffer space passed to bcm_mkiovar for '%s' \n",
5640 __FUNCTION__, iovar));
5641 return BCME_BUFTOOSHORT;
5642 }
5643
5644 err = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovar_buf,
5645 iovar_len, IOV_SET, 0);
5646
5647 return err;
5648 }
5649
5650 int
dhd_download_blob(dhd_pub_t * dhd,unsigned char * buf,uint32 len,char * iovar)5651 dhd_download_blob(dhd_pub_t *dhd, unsigned char *buf,
5652 uint32 len, char *iovar)
5653
5654 {
5655 int chunk_len;
5656 int size2alloc;
5657 unsigned char *new_buf;
5658 int err = 0, data_offset;
5659 uint16 dl_flag = DL_BEGIN;
5660
5661 data_offset = OFFSETOF(wl_dload_data_t, data);
5662 size2alloc = data_offset + MAX_CHUNK_LEN;
5663 size2alloc = ROUNDUP(size2alloc, 8);
5664
5665 if ((new_buf = (unsigned char *)MALLOCZ(dhd->osh, size2alloc)) != NULL) {
5666 do {
5667 chunk_len = dhd_os_get_image_block((char *)(new_buf + data_offset),
5668 MAX_CHUNK_LEN, buf);
5669 if (chunk_len < 0) {
5670 DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n",
5671 __FUNCTION__, chunk_len));
5672 err = BCME_ERROR;
5673 goto exit;
5674 }
5675 if (len - chunk_len == 0)
5676 dl_flag |= DL_END;
5677
5678 err = dhd_download_2_dongle(dhd, iovar, dl_flag, DL_TYPE_CLM,
5679 new_buf, data_offset + chunk_len);
5680
5681 dl_flag &= ~DL_BEGIN;
5682
5683 len = len - chunk_len;
5684 } while ((len > 0) && (err == 0));
5685 } else {
5686 err = BCME_NOMEM;
5687 }
5688 exit:
5689 if (new_buf) {
5690 MFREE(dhd->osh, new_buf, size2alloc);
5691 }
5692 return err;
5693 }
5694
5695 int
dhd_apply_default_txcap(dhd_pub_t * dhd,char * path)5696 dhd_apply_default_txcap(dhd_pub_t *dhd, char *path)
5697 {
5698 return 0;
5699 }
5700
5701 int
dhd_check_current_clm_data(dhd_pub_t * dhd)5702 dhd_check_current_clm_data(dhd_pub_t *dhd)
5703 {
5704 char iovbuf[WLC_IOCTL_SMLEN];
5705 wl_country_t *cspec;
5706 int err = BCME_OK;
5707
5708 memset(iovbuf, 0, sizeof(iovbuf));
5709 err = bcm_mkiovar("country", NULL, 0, iovbuf, sizeof(iovbuf));
5710 if (err == 0) {
5711 err = BCME_BUFTOOSHORT;
5712 DHD_ERROR(("%s: bcm_mkiovar failed.", __FUNCTION__));
5713 return err;
5714 }
5715 err = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
5716 if (err) {
5717 DHD_ERROR(("%s: country code get failed\n", __FUNCTION__));
5718 return err;
5719 }
5720 cspec = (wl_country_t *)iovbuf;
5721 if ((strncmp(cspec->ccode, WL_CCODE_NULL_COUNTRY, WLC_CNTRY_BUF_SZ)) == 0) {
5722 DHD_ERROR(("%s: ----- This FW is not included CLM data -----\n",
5723 __FUNCTION__));
5724 return FALSE;
5725 }
5726 DHD_ERROR(("%s: ----- This FW is included CLM data -----\n",
5727 __FUNCTION__));
5728 return TRUE;
5729 }
5730
5731 int
dhd_apply_default_clm(dhd_pub_t * dhd,char * clm_path)5732 dhd_apply_default_clm(dhd_pub_t *dhd, char *clm_path)
5733 {
5734 char *clm_blob_path;
5735 int len;
5736 char *memblock = NULL;
5737 int err = BCME_OK;
5738 char iovbuf[WLC_IOCTL_SMLEN];
5739 int status = FALSE;
5740
5741 if (clm_path && clm_path[0] != '\0') {
5742 if (strlen(clm_path) > MOD_PARAM_PATHLEN) {
5743 DHD_ERROR(("clm path exceeds max len\n"));
5744 return BCME_ERROR;
5745 }
5746 clm_blob_path = clm_path;
5747 DHD_TRACE(("clm path from module param:%s\n", clm_path));
5748 } else {
5749 clm_blob_path = VENDOR_PATH CONFIG_BCMDHD_CLM_PATH;
5750 }
5751
5752 /* If CLM blob file is found on the filesystem, download the file.
5753 * After CLM file download or If the blob file is not present,
5754 * validate the country code before proceeding with the initialization.
5755 * If country code is not valid, fail the initialization.
5756 */
5757 memblock = dhd_os_open_image1(dhd, (char *)clm_blob_path);
5758 if (memblock == NULL) {
5759 printf("%s: Ignore clm file %s\n", __FUNCTION__, clm_path);
5760 #if defined(DHD_BLOB_EXISTENCE_CHECK)
5761 if (dhd->is_blob) {
5762 err = BCME_ERROR;
5763 } else {
5764 status = dhd_check_current_clm_data(dhd);
5765 if (status == TRUE) {
5766 err = BCME_OK;
5767 } else {
5768 err = status;
5769 }
5770 }
5771 #endif /* DHD_BLOB_EXISTENCE_CHECK */
5772 goto exit;
5773 }
5774
5775 len = dhd_os_get_image_size(memblock);
5776
5777 if ((len > 0) && (len < MAX_CLM_BUF_SIZE) && memblock) {
5778 status = dhd_check_current_clm_data(dhd);
5779 if (status == TRUE) {
5780 #if defined(DHD_BLOB_EXISTENCE_CHECK)
5781 if (dhd->op_mode != DHD_FLAG_MFG_MODE) {
5782 if (dhd->is_blob) {
5783 err = BCME_ERROR;
5784 }
5785 goto exit;
5786 }
5787 #else
5788 DHD_ERROR(("%s: CLM already exist in F/W, "
5789 "new CLM data will be added to the end of existing CLM data!\n",
5790 __FUNCTION__));
5791 #endif /* DHD_BLOB_EXISTENCE_CHECK */
5792 } else if (status != FALSE) {
5793 err = status;
5794 goto exit;
5795 }
5796
5797 /* Found blob file. Download the file */
5798 DHD_TRACE(("clm file download from %s \n", clm_blob_path));
5799 err = dhd_download_blob(dhd, (unsigned char*)memblock, len, "clmload");
5800 if (err) {
5801 DHD_ERROR(("%s: CLM download failed err=%d\n", __FUNCTION__, err));
5802 /* Retrieve clmload_status and print */
5803 memset(iovbuf, 0, sizeof(iovbuf));
5804 len = bcm_mkiovar("clmload_status", NULL, 0, iovbuf, sizeof(iovbuf));
5805 if (len == 0) {
5806 err = BCME_BUFTOOSHORT;
5807 goto exit;
5808 }
5809 err = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
5810 if (err) {
5811 DHD_ERROR(("%s: clmload_status get failed err=%d \n",
5812 __FUNCTION__, err));
5813 } else {
5814 DHD_ERROR(("%s: clmload_status: %d \n",
5815 __FUNCTION__, *((int *)iovbuf)));
5816 if (*((int *)iovbuf) == CHIPID_MISMATCH) {
5817 DHD_ERROR(("Chip ID mismatch error \n"));
5818 }
5819 }
5820 err = BCME_ERROR;
5821 goto exit;
5822 } else {
5823 DHD_INFO(("%s: CLM download succeeded \n", __FUNCTION__));
5824 }
5825 } else {
5826 DHD_INFO(("Skipping the clm download. len:%d memblk:%p \n", len, memblock));
5827 }
5828
5829 /* Verify country code */
5830 status = dhd_check_current_clm_data(dhd);
5831
5832 if (status != TRUE) {
5833 /* Country code not initialized or CLM download not proper */
5834 DHD_ERROR(("country code not initialized\n"));
5835 err = status;
5836 }
5837 exit:
5838
5839 if (memblock) {
5840 dhd_os_close_image1(dhd, memblock);
5841 }
5842
5843 return err;
5844 }
5845
dhd_free_download_buffer(dhd_pub_t * dhd,void * buffer,int length)5846 void dhd_free_download_buffer(dhd_pub_t *dhd, void *buffer, int length)
5847 {
5848 MFREE(dhd->osh, buffer, length);
5849 }
5850
5851 #ifdef SHOW_LOGTRACE
5852 int
dhd_parse_logstrs_file(osl_t * osh,char * raw_fmts,int logstrs_size,dhd_event_log_t * event_log)5853 dhd_parse_logstrs_file(osl_t *osh, char *raw_fmts, int logstrs_size,
5854 dhd_event_log_t *event_log)
5855 {
5856 uint32 *lognums = NULL;
5857 char *logstrs = NULL;
5858 logstr_trailer_t *trailer = NULL;
5859 int ram_index = 0;
5860 char **fmts = NULL;
5861 int num_fmts = 0;
5862 bool match_fail = TRUE;
5863 int32 i = 0;
5864 uint8 *pfw_id = NULL;
5865 uint32 fwid = 0;
5866 void *file = NULL;
5867 int file_len = 0;
5868 char fwid_str[FWID_STR_LEN];
5869 uint32 hdr_logstrs_size = 0;
5870
5871 /* Read last three words in the logstrs.bin file */
5872 trailer = (logstr_trailer_t *) (raw_fmts + logstrs_size -
5873 sizeof(logstr_trailer_t));
5874
5875 if (trailer->log_magic == LOGSTRS_MAGIC) {
5876 /*
5877 * logstrs.bin has a header.
5878 */
5879 if (trailer->version == 1) {
5880 logstr_header_v1_t *hdr_v1 = (logstr_header_v1_t *) (raw_fmts +
5881 logstrs_size - sizeof(logstr_header_v1_t));
5882 DHD_INFO(("%s: logstr header version = %u\n",
5883 __FUNCTION__, hdr_v1->version));
5884 num_fmts = hdr_v1->rom_logstrs_offset / sizeof(uint32);
5885 ram_index = (hdr_v1->ram_lognums_offset -
5886 hdr_v1->rom_lognums_offset) / sizeof(uint32);
5887 lognums = (uint32 *) &raw_fmts[hdr_v1->rom_lognums_offset];
5888 logstrs = (char *) &raw_fmts[hdr_v1->rom_logstrs_offset];
5889 hdr_logstrs_size = hdr_v1->logstrs_size;
5890 } else if (trailer->version == 2) {
5891 logstr_header_t *hdr = (logstr_header_t *) (raw_fmts + logstrs_size -
5892 sizeof(logstr_header_t));
5893 DHD_INFO(("%s: logstr header version = %u; flags = %x\n",
5894 __FUNCTION__, hdr->trailer.version, hdr->trailer.flags));
5895
5896 /* For ver. 2 of the header, need to match fwid of
5897 * both logstrs.bin and fw bin
5898 */
5899
5900 /* read the FWID from fw bin */
5901 file = dhd_os_open_image1(NULL, st_str_file_path);
5902 if (!file) {
5903 DHD_ERROR(("%s: cannot open fw file !\n", __FUNCTION__));
5904 goto error;
5905 }
5906 file_len = dhd_os_get_image_size(file);
5907 if (file_len <= 0) {
5908 DHD_ERROR(("%s: bad fw file length !\n", __FUNCTION__));
5909 goto error;
5910 }
5911 /* fwid is at the end of fw bin in string format */
5912 if (dhd_os_seek_file(file, file_len - (sizeof(fwid_str) - 1)) < 0) {
5913 DHD_ERROR(("%s: can't seek file \n", __FUNCTION__));
5914 goto error;
5915 }
5916
5917 memset(fwid_str, 0, sizeof(fwid_str));
5918 if (dhd_os_get_image_block(fwid_str, sizeof(fwid_str) - 1, file) <= 0) {
5919 DHD_ERROR(("%s: read fw file failed !\n", __FUNCTION__));
5920 goto error;
5921 }
5922 pfw_id = (uint8 *)bcmstrnstr(fwid_str, sizeof(fwid_str) - 1,
5923 FWID_STR_1, strlen(FWID_STR_1));
5924 if (!pfw_id) {
5925 pfw_id = (uint8 *)bcmstrnstr(fwid_str, sizeof(fwid_str) - 1,
5926 FWID_STR_2, strlen(FWID_STR_2));
5927 if (!pfw_id) {
5928 DHD_ERROR(("%s: could not find id in FW bin!\n",
5929 __FUNCTION__));
5930 goto error;
5931 }
5932 }
5933 /* search for the '-' in the fw id str, after which the
5934 * actual 4 byte fw id is present
5935 */
5936 while (pfw_id && *pfw_id != '-') {
5937 ++pfw_id;
5938 }
5939 ++pfw_id;
5940 fwid = bcm_strtoul((char *)pfw_id, NULL, 16);
5941
5942 /* check if fw id in logstrs.bin matches the fw one */
5943 if (hdr->trailer.fw_id != fwid) {
5944 DHD_ERROR(("%s: logstr id does not match FW!"
5945 "logstrs_fwid:0x%x, rtecdc_fwid:0x%x\n",
5946 __FUNCTION__, hdr->trailer.fw_id, fwid));
5947 goto error;
5948 }
5949
5950 match_fail = FALSE;
5951 num_fmts = hdr->rom_logstrs_offset / sizeof(uint32);
5952 ram_index = (hdr->ram_lognums_offset -
5953 hdr->rom_lognums_offset) / sizeof(uint32);
5954 lognums = (uint32 *) &raw_fmts[hdr->rom_lognums_offset];
5955 logstrs = (char *) &raw_fmts[hdr->rom_logstrs_offset];
5956 hdr_logstrs_size = hdr->logstrs_size;
5957
5958 error:
5959 if (file) {
5960 dhd_os_close_image1(NULL, file);
5961 }
5962 if (match_fail) {
5963 return BCME_DECERR;
5964 }
5965 } else {
5966 DHD_ERROR(("%s: Invalid logstr version %u\n", __FUNCTION__,
5967 trailer->version));
5968 return BCME_ERROR;
5969 }
5970 if (logstrs_size != hdr_logstrs_size) {
5971 DHD_ERROR(("%s: bad logstrs_size %d\n", __FUNCTION__, hdr_logstrs_size));
5972 return BCME_ERROR;
5973 }
5974 } else {
5975 /*
5976 * Legacy logstrs.bin format without header.
5977 */
5978 num_fmts = *((uint32 *) (raw_fmts)) / sizeof(uint32);
5979
5980 /* Legacy RAM-only logstrs.bin format:
5981 * - RAM 'lognums' section
5982 * - RAM 'logstrs' section.
5983 *
5984 * 'lognums' is an array of indexes for the strings in the
5985 * 'logstrs' section. The first uint32 is an index to the
5986 * start of 'logstrs'. Therefore, if this index is divided
5987 * by 'sizeof(uint32)' it provides the number of logstr
5988 * entries.
5989 */
5990 ram_index = 0;
5991 lognums = (uint32 *) raw_fmts;
5992 logstrs = (char *) &raw_fmts[num_fmts << 2];
5993 }
5994 if (num_fmts)
5995 fmts = MALLOC(osh, num_fmts * sizeof(char *));
5996 if (fmts == NULL) {
5997 DHD_ERROR(("%s: Failed to allocate fmts memory\n", __FUNCTION__));
5998 return BCME_ERROR;
5999 }
6000 event_log->fmts_size = num_fmts * sizeof(char *);
6001
6002 for (i = 0; i < num_fmts; i++) {
6003 /* ROM lognums index into logstrs using 'rom_logstrs_offset' as a base
6004 * (they are 0-indexed relative to 'rom_logstrs_offset').
6005 *
6006 * RAM lognums are already indexed to point to the correct RAM logstrs (they
6007 * are 0-indexed relative to the start of the logstrs.bin file).
6008 */
6009 if (i == ram_index) {
6010 logstrs = raw_fmts;
6011 }
6012 fmts[i] = &logstrs[lognums[i]];
6013 }
6014 event_log->fmts = fmts;
6015 event_log->raw_fmts_size = logstrs_size;
6016 event_log->raw_fmts = raw_fmts;
6017 event_log->num_fmts = num_fmts;
6018 return BCME_OK;
6019 } /* dhd_parse_logstrs_file */
6020
dhd_parse_map_file(osl_t * osh,void * file,uint32 * ramstart,uint32 * rodata_start,uint32 * rodata_end)6021 int dhd_parse_map_file(osl_t *osh, void *file, uint32 *ramstart, uint32 *rodata_start,
6022 uint32 *rodata_end)
6023 {
6024 char *raw_fmts = NULL, *raw_fmts_loc = NULL;
6025 uint32 read_size = READ_NUM_BYTES;
6026 int error = 0;
6027 char * cptr = NULL;
6028 char c;
6029 uint8 count = 0;
6030
6031 *ramstart = 0;
6032 *rodata_start = 0;
6033 *rodata_end = 0;
6034
6035 /* Allocate 1 byte more than read_size to terminate it with NULL */
6036 raw_fmts = MALLOCZ(osh, read_size + 1);
6037 if (raw_fmts == NULL) {
6038 DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__));
6039 goto fail;
6040 }
6041
6042 /* read ram start, rodata_start and rodata_end values from map file */
6043 while (count != ALL_MAP_VAL)
6044 {
6045 error = dhd_os_read_file(file, raw_fmts, read_size);
6046 if (error < 0) {
6047 DHD_ERROR(("%s: map file read failed err:%d \n", __FUNCTION__,
6048 error));
6049 goto fail;
6050 }
6051
6052 /* End raw_fmts with NULL as strstr expects NULL terminated strings */
6053 raw_fmts[read_size] = '\0';
6054
6055 /* Get ramstart address */
6056 raw_fmts_loc = raw_fmts;
6057 if (!(count & RAMSTART_BIT) &&
6058 (cptr = bcmstrnstr(raw_fmts_loc, read_size, ramstart_str,
6059 strlen(ramstart_str)))) {
6060 cptr = cptr - BYTES_AHEAD_NUM;
6061 sscanf(cptr, "%x %c text_start", ramstart, &c);
6062 count |= RAMSTART_BIT;
6063 }
6064
6065 /* Get ram rodata start address */
6066 raw_fmts_loc = raw_fmts;
6067 if (!(count & RDSTART_BIT) &&
6068 (cptr = bcmstrnstr(raw_fmts_loc, read_size, rodata_start_str,
6069 strlen(rodata_start_str)))) {
6070 cptr = cptr - BYTES_AHEAD_NUM;
6071 sscanf(cptr, "%x %c rodata_start", rodata_start, &c);
6072 count |= RDSTART_BIT;
6073 }
6074
6075 /* Get ram rodata end address */
6076 raw_fmts_loc = raw_fmts;
6077 if (!(count & RDEND_BIT) &&
6078 (cptr = bcmstrnstr(raw_fmts_loc, read_size, rodata_end_str,
6079 strlen(rodata_end_str)))) {
6080 cptr = cptr - BYTES_AHEAD_NUM;
6081 sscanf(cptr, "%x %c rodata_end", rodata_end, &c);
6082 count |= RDEND_BIT;
6083 }
6084
6085 if (error < (int)read_size) {
6086 /*
6087 * since we reset file pos back to earlier pos by
6088 * GO_BACK_FILE_POS_NUM_BYTES bytes we won't reach EOF.
6089 * The reason for this is if string is spreaded across
6090 * bytes, the read function should not miss it.
6091 * So if ret value is less than read_size, reached EOF don't read further
6092 */
6093 break;
6094 }
6095 memset(raw_fmts, 0, read_size);
6096 /*
6097 * go back to predefined NUM of bytes so that we won't miss
6098 * the string and addr even if it comes as splited in next read.
6099 */
6100 dhd_os_seek_file(file, -GO_BACK_FILE_POS_NUM_BYTES);
6101 }
6102
6103 fail:
6104 if (raw_fmts) {
6105 MFREE(osh, raw_fmts, read_size + 1);
6106 raw_fmts = NULL;
6107 }
6108 if (count == ALL_MAP_VAL) {
6109 return BCME_OK;
6110 }
6111 else {
6112 DHD_ERROR(("%s: readmap error 0X%x \n", __FUNCTION__,
6113 count));
6114 return BCME_ERROR;
6115 }
6116
6117 } /* dhd_parse_map_file */
6118
6119 #ifdef PCIE_FULL_DONGLE
6120 int
dhd_event_logtrace_infobuf_pkt_process(dhd_pub_t * dhdp,void * pktbuf,dhd_event_log_t * event_data)6121 dhd_event_logtrace_infobuf_pkt_process(dhd_pub_t *dhdp, void *pktbuf,
6122 dhd_event_log_t *event_data)
6123 {
6124 uint32 infobuf_version;
6125 info_buf_payload_hdr_t *payload_hdr_ptr;
6126 uint16 payload_hdr_type;
6127 uint16 payload_hdr_length;
6128
6129 DHD_TRACE(("%s:Enter\n", __FUNCTION__));
6130
6131 if (PKTLEN(dhdp->osh, pktbuf) < sizeof(uint32)) {
6132 DHD_ERROR(("%s: infobuf too small for version field\n",
6133 __FUNCTION__));
6134 goto exit;
6135 }
6136 infobuf_version = *((uint32 *)PKTDATA(dhdp->osh, pktbuf));
6137 PKTPULL(dhdp->osh, pktbuf, sizeof(uint32));
6138 if (infobuf_version != PCIE_INFOBUF_V1) {
6139 DHD_ERROR(("%s: infobuf version %d is not PCIE_INFOBUF_V1\n",
6140 __FUNCTION__, infobuf_version));
6141 goto exit;
6142 }
6143
6144 /* Version 1 infobuf has a single type/length (and then value) field */
6145 if (PKTLEN(dhdp->osh, pktbuf) < sizeof(info_buf_payload_hdr_t)) {
6146 DHD_ERROR(("%s: infobuf too small for v1 type/length fields\n",
6147 __FUNCTION__));
6148 goto exit;
6149 }
6150 /* Process/parse the common info payload header (type/length) */
6151 payload_hdr_ptr = (info_buf_payload_hdr_t *)PKTDATA(dhdp->osh, pktbuf);
6152 payload_hdr_type = ltoh16(payload_hdr_ptr->type);
6153 payload_hdr_length = ltoh16(payload_hdr_ptr->length);
6154 if (payload_hdr_type != PCIE_INFOBUF_V1_TYPE_LOGTRACE) {
6155 DHD_ERROR(("%s: payload_hdr_type %d is not V1_TYPE_LOGTRACE\n",
6156 __FUNCTION__, payload_hdr_type));
6157 goto exit;
6158 }
6159 PKTPULL(dhdp->osh, pktbuf, sizeof(info_buf_payload_hdr_t));
6160
6161 /* Validate that the specified length isn't bigger than the
6162 * provided data.
6163 */
6164 if (payload_hdr_length > PKTLEN(dhdp->osh, pktbuf)) {
6165 DHD_ERROR(("%s: infobuf logtrace length is bigger"
6166 " than actual buffer data\n", __FUNCTION__));
6167 goto exit;
6168 }
6169 dhd_dbg_trace_evnt_handler(dhdp, PKTDATA(dhdp->osh, pktbuf),
6170 event_data, payload_hdr_length);
6171
6172 return BCME_OK;
6173
6174 exit:
6175 return BCME_ERROR;
6176 } /* dhd_event_logtrace_infobuf_pkt_process */
6177 #endif /* PCIE_FULL_DONGLE */
6178 #endif /* SHOW_LOGTRACE */
6179
6180 #if defined(WLTDLS) && defined(PCIE_FULL_DONGLE)
6181
6182 /* To handle the TDLS event in the dhd_common.c
6183 */
dhd_tdls_event_handler(dhd_pub_t * dhd_pub,wl_event_msg_t * event)6184 int dhd_tdls_event_handler(dhd_pub_t *dhd_pub, wl_event_msg_t *event)
6185 {
6186 int ret = BCME_OK;
6187 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
6188 #pragma GCC diagnostic push
6189 #pragma GCC diagnostic ignored "-Wcast-qual"
6190 #endif // endif
6191 ret = dhd_tdls_update_peer_info(dhd_pub, event);
6192 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
6193 #pragma GCC diagnostic pop
6194 #endif // endif
6195 return ret;
6196 }
6197
dhd_free_tdls_peer_list(dhd_pub_t * dhd_pub)6198 int dhd_free_tdls_peer_list(dhd_pub_t *dhd_pub)
6199 {
6200 tdls_peer_node_t *cur = NULL, *prev = NULL;
6201 if (!dhd_pub)
6202 return BCME_ERROR;
6203 cur = dhd_pub->peer_tbl.node;
6204
6205 if ((dhd_pub->peer_tbl.node == NULL) && !dhd_pub->peer_tbl.tdls_peer_count)
6206 return BCME_ERROR;
6207
6208 while (cur != NULL) {
6209 prev = cur;
6210 cur = cur->next;
6211 MFREE(dhd_pub->osh, prev, sizeof(tdls_peer_node_t));
6212 }
6213 dhd_pub->peer_tbl.tdls_peer_count = 0;
6214 dhd_pub->peer_tbl.node = NULL;
6215 return BCME_OK;
6216 }
6217 #endif /* #if defined(WLTDLS) && defined(PCIE_FULL_DONGLE) */
6218
6219 /* pretty hex print a contiguous buffer
6220 * based on the debug level specified
6221 */
6222 void
dhd_prhex(const char * msg,volatile uchar * buf,uint nbytes,uint8 dbg_level)6223 dhd_prhex(const char *msg, volatile uchar *buf, uint nbytes, uint8 dbg_level)
6224 {
6225 char line[128], *p;
6226 int len = sizeof(line);
6227 int nchar;
6228 uint i;
6229
6230 if (msg && (msg[0] != '\0')) {
6231 if (dbg_level == DHD_ERROR_VAL)
6232 DHD_ERROR(("%s:\n", msg));
6233 else if (dbg_level == DHD_INFO_VAL)
6234 DHD_INFO(("%s:\n", msg));
6235 else if (dbg_level == DHD_TRACE_VAL)
6236 DHD_TRACE(("%s:\n", msg));
6237 }
6238
6239 p = line;
6240 for (i = 0; i < nbytes; i++) {
6241 if (i % 16 == 0) {
6242 nchar = snprintf(p, len, " %04x: ", i); /* line prefix */
6243 p += nchar;
6244 len -= nchar;
6245 }
6246 if (len > 0) {
6247 nchar = snprintf(p, len, "%02x ", buf[i]);
6248 p += nchar;
6249 len -= nchar;
6250 }
6251
6252 if (i % 16 == 15) {
6253 /* flush line */
6254 if (dbg_level == DHD_ERROR_VAL)
6255 DHD_ERROR(("%s:\n", line));
6256 else if (dbg_level == DHD_INFO_VAL)
6257 DHD_INFO(("%s:\n", line));
6258 else if (dbg_level == DHD_TRACE_VAL)
6259 DHD_TRACE(("%s:\n", line));
6260 p = line;
6261 len = sizeof(line);
6262 }
6263 }
6264
6265 /* flush last partial line */
6266 if (p != line) {
6267 if (dbg_level == DHD_ERROR_VAL)
6268 DHD_ERROR(("%s:\n", line));
6269 else if (dbg_level == DHD_INFO_VAL)
6270 DHD_INFO(("%s:\n", line));
6271 else if (dbg_level == DHD_TRACE_VAL)
6272 DHD_TRACE(("%s:\n", line));
6273 }
6274 }
6275
6276 #ifdef DUMP_IOCTL_IOV_LIST
6277 void
dhd_iov_li_append(dhd_pub_t * dhd,dll_t * list_head,dll_t * node)6278 dhd_iov_li_append(dhd_pub_t *dhd, dll_t *list_head, dll_t *node)
6279 {
6280 dll_t *item;
6281 dhd_iov_li_t *iov_li;
6282 dhd->dump_iovlist_len++;
6283
6284 if (dhd->dump_iovlist_len == IOV_LIST_MAX_LEN+1) {
6285 item = dll_head_p(list_head);
6286 iov_li = (dhd_iov_li_t *)CONTAINEROF(item, dhd_iov_li_t, list);
6287 dll_delete(item);
6288 MFREE(dhd->osh, iov_li, sizeof(*iov_li));
6289 dhd->dump_iovlist_len--;
6290 }
6291 dll_append(list_head, node);
6292 }
6293
6294 void
dhd_iov_li_print(dll_t * list_head)6295 dhd_iov_li_print(dll_t *list_head)
6296 {
6297 dhd_iov_li_t *iov_li;
6298 dll_t *item, *next;
6299 uint8 index = 0;
6300 for (item = dll_head_p(list_head); !dll_end(list_head, item); item = next) {
6301 next = dll_next_p(item);
6302 iov_li = (dhd_iov_li_t *)CONTAINEROF(item, dhd_iov_li_t, list);
6303 DHD_ERROR(("%d:cmd_name = %s, cmd = %d.\n", ++index, iov_li->buff, iov_li->cmd));
6304 }
6305 }
6306
6307 void
dhd_iov_li_delete(dhd_pub_t * dhd,dll_t * list_head)6308 dhd_iov_li_delete(dhd_pub_t *dhd, dll_t *list_head)
6309 {
6310 dll_t *item;
6311 dhd_iov_li_t *iov_li;
6312 while (!(dll_empty(list_head))) {
6313 item = dll_head_p(list_head);
6314 iov_li = (dhd_iov_li_t *)CONTAINEROF(item, dhd_iov_li_t, list);
6315 dll_delete(item);
6316 MFREE(dhd->osh, iov_li, sizeof(*iov_li));
6317 }
6318 }
6319 #endif /* DUMP_IOCTL_IOV_LIST */
6320
6321 /* configuations of ecounters to be enabled by default in FW */
6322 static ecounters_cfg_t ecounters_cfg_tbl[] = {
6323 /* Global ecounters */
6324 {ECOUNTERS_STATS_TYPES_FLAG_GLOBAL, 0x0, WL_IFSTATS_XTLV_BUS_PCIE},
6325 // {ECOUNTERS_STATS_TYPES_FLAG_GLOBAL, 0x0, WL_IFSTATS_XTLV_TX_AMPDU_STATS},
6326 // {ECOUNTERS_STATS_TYPES_FLAG_GLOBAL, 0x0, WL_IFSTATS_XTLV_RX_AMPDU_STATS},
6327
6328 /* Slice specific ecounters */
6329 {ECOUNTERS_STATS_TYPES_FLAG_SLICE, 0x0, WL_SLICESTATS_XTLV_PERIODIC_STATE},
6330 {ECOUNTERS_STATS_TYPES_FLAG_SLICE, 0x1, WL_SLICESTATS_XTLV_PERIODIC_STATE},
6331 {ECOUNTERS_STATS_TYPES_FLAG_SLICE, 0x1, WL_IFSTATS_XTLV_WL_SLICE_BTCOEX},
6332
6333 /* Interface specific ecounters */
6334 {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_IF_PERIODIC_STATE},
6335 {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_GENERIC},
6336 {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_INFRA_SPECIFIC},
6337 {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_MGT_CNT},
6338
6339 /* secondary interface */
6340 };
6341
6342 static event_ecounters_cfg_t event_ecounters_cfg_tbl[] = {
6343 /* Interface specific event ecounters */
6344 {WLC_E_DEAUTH_IND, ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_IF_EVENT_STATS},
6345 };
6346
6347 /* Accepts an argument to -s, -g or -f and creates an XTLV */
6348 int
dhd_create_ecounters_params(dhd_pub_t * dhd,uint16 type,uint16 if_slice_idx,uint16 stats_rep,uint8 ** xtlv)6349 dhd_create_ecounters_params(dhd_pub_t *dhd, uint16 type, uint16 if_slice_idx,
6350 uint16 stats_rep, uint8 **xtlv)
6351 {
6352 uint8 *req_xtlv = NULL;
6353 ecounters_stats_types_report_req_t *req;
6354 bcm_xtlvbuf_t xtlvbuf, container_xtlvbuf;
6355 ecountersv2_xtlv_list_elt_t temp;
6356 uint16 xtlv_len = 0, total_len = 0;
6357 int rc = BCME_OK;
6358
6359 /* fill in the stat type XTLV. For now there is no explicit TLV for the stat type. */
6360 temp.id = stats_rep;
6361 temp.len = 0;
6362
6363 /* Hence len/data = 0/NULL */
6364 xtlv_len += temp.len + BCM_XTLV_HDR_SIZE;
6365
6366 /* Total length of the container */
6367 total_len = BCM_XTLV_HDR_SIZE +
6368 OFFSETOF(ecounters_stats_types_report_req_t, stats_types_req) + xtlv_len;
6369
6370 /* Now allocate a structure for the entire request */
6371 if ((req_xtlv = (uint8 *)MALLOCZ(dhd->osh, total_len)) == NULL) {
6372 rc = BCME_NOMEM;
6373 goto fail;
6374 }
6375
6376 /* container XTLV context */
6377 bcm_xtlv_buf_init(&container_xtlvbuf, (uint8 *)req_xtlv, total_len,
6378 BCM_XTLV_OPTION_ALIGN32);
6379
6380 /* Fill other XTLVs in the container. Leave space for XTLV headers */
6381 req = (ecounters_stats_types_report_req_t *)(req_xtlv + BCM_XTLV_HDR_SIZE);
6382 req->flags = type;
6383 if (type == ECOUNTERS_STATS_TYPES_FLAG_SLICE) {
6384 req->slice_mask = 0x1 << if_slice_idx;
6385 } else if (type == ECOUNTERS_STATS_TYPES_FLAG_IFACE) {
6386 req->if_index = if_slice_idx;
6387 }
6388
6389 /* Fill remaining XTLVs */
6390 bcm_xtlv_buf_init(&xtlvbuf, (uint8*) req->stats_types_req, xtlv_len,
6391 BCM_XTLV_OPTION_ALIGN32);
6392 if (bcm_xtlv_put_data(&xtlvbuf, temp.id, NULL, temp.len)) {
6393 DHD_ERROR(("Error creating XTLV for requested stats type = %d\n", temp.id));
6394 rc = BCME_ERROR;
6395 goto fail;
6396 }
6397
6398 /* fill the top level container and get done with the XTLV container */
6399 rc = bcm_xtlv_put_data(&container_xtlvbuf, WL_ECOUNTERS_XTLV_REPORT_REQ, NULL,
6400 bcm_xtlv_buf_len(&xtlvbuf) + OFFSETOF(ecounters_stats_types_report_req_t,
6401 stats_types_req));
6402
6403 if (rc) {
6404 DHD_ERROR(("Error creating parent XTLV for type = %d\n", req->flags));
6405 goto fail;
6406 }
6407
6408 fail:
6409 if (rc && req_xtlv) {
6410 MFREE(dhd->osh, req_xtlv, total_len);
6411 req_xtlv = NULL;
6412 }
6413
6414 /* update the xtlv pointer */
6415 *xtlv = req_xtlv;
6416 return rc;
6417 }
6418
6419 int
dhd_get_preserve_log_numbers(dhd_pub_t * dhd,uint32 * logset_mask)6420 dhd_get_preserve_log_numbers(dhd_pub_t *dhd, uint32 *logset_mask)
6421 {
6422 wl_el_set_type_t logset_type, logset_op;
6423 int ret = BCME_ERROR;
6424 int i = 0, err = 0;
6425
6426 if (!dhd || !logset_mask)
6427 return BCME_BADARG;
6428
6429 *logset_mask = 0;
6430 memset(&logset_type, 0, sizeof(logset_type));
6431 memset(&logset_op, 0, sizeof(logset_op));
6432 logset_type.version = htod16(EVENT_LOG_SET_TYPE_CURRENT_VERSION);
6433 logset_type.len = htod16(sizeof(wl_el_set_type_t));
6434 for (i = 0; i < dhd->event_log_max_sets; i++) {
6435 logset_type.set = i;
6436 err = dhd_iovar(dhd, 0, "event_log_set_type", (char *)&logset_type,
6437 sizeof(logset_type), (char *)&logset_op, sizeof(logset_op), FALSE);
6438 /* the iovar may return 'unsupported' error if a log set number is not present
6439 * in the fw, so we should not return on error !
6440 */
6441 if (err == BCME_OK &&
6442 logset_op.type == EVENT_LOG_SET_TYPE_PRSRV) {
6443 *logset_mask |= 0x01u << i;
6444 ret = BCME_OK;
6445 DHD_ERROR(("[INIT] logset:%d is preserve/chatty\n", i));
6446 }
6447 }
6448
6449 return ret;
6450 }
6451
6452 static int
dhd_ecounter_autoconfig(dhd_pub_t * dhd)6453 dhd_ecounter_autoconfig(dhd_pub_t *dhd)
6454 {
6455 int rc = BCME_OK;
6456 uint32 buf;
6457 rc = dhd_iovar(dhd, 0, "ecounters_autoconfig", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
6458
6459 if (rc != BCME_OK) {
6460
6461 if (rc != BCME_UNSUPPORTED) {
6462 rc = BCME_OK;
6463 DHD_ERROR(("%s Ecounter autoconfig in fw failed : %d\n", __FUNCTION__, rc));
6464 } else {
6465 DHD_ERROR(("%s Ecounter autoconfig in FW not supported\n", __FUNCTION__));
6466 }
6467 }
6468
6469 return rc;
6470 }
6471
6472 int
dhd_ecounter_configure(dhd_pub_t * dhd,bool enable)6473 dhd_ecounter_configure(dhd_pub_t *dhd, bool enable)
6474 {
6475 int rc = BCME_OK;
6476 if (enable) {
6477 if (dhd_ecounter_autoconfig(dhd) != BCME_OK) {
6478 if ((rc = dhd_start_ecounters(dhd)) != BCME_OK) {
6479 DHD_ERROR(("%s Ecounters start failed\n", __FUNCTION__));
6480 } else if ((rc = dhd_start_event_ecounters(dhd)) != BCME_OK) {
6481 DHD_ERROR(("%s Event_Ecounters start failed\n", __FUNCTION__));
6482 }
6483 }
6484 } else {
6485 if ((rc = dhd_stop_ecounters(dhd)) != BCME_OK) {
6486 DHD_ERROR(("%s Ecounters stop failed\n", __FUNCTION__));
6487 } else if ((rc = dhd_stop_event_ecounters(dhd)) != BCME_OK) {
6488 DHD_ERROR(("%s Event_Ecounters stop failed\n", __FUNCTION__));
6489 }
6490 }
6491 return rc;
6492 }
6493
6494 int
dhd_start_ecounters(dhd_pub_t * dhd)6495 dhd_start_ecounters(dhd_pub_t *dhd)
6496 {
6497 uint8 i = 0;
6498 uint8 *start_ptr;
6499 int rc = BCME_OK;
6500 bcm_xtlv_t *elt;
6501 ecounters_config_request_v2_t *req = NULL;
6502 ecountersv2_processed_xtlv_list_elt *list_elt, *tail = NULL;
6503 ecountersv2_processed_xtlv_list_elt *processed_containers_list = NULL;
6504 uint16 total_processed_containers_len = 0;
6505
6506 for (i = 0; i < ARRAYSIZE(ecounters_cfg_tbl); i++) {
6507 ecounters_cfg_t *ecounter_stat = &ecounters_cfg_tbl[i];
6508
6509 if ((list_elt = (ecountersv2_processed_xtlv_list_elt *)
6510 MALLOCZ(dhd->osh, sizeof(*list_elt))) == NULL) {
6511 DHD_ERROR(("Ecounters v2: No memory to process\n"));
6512 goto fail;
6513 }
6514
6515 rc = dhd_create_ecounters_params(dhd, ecounter_stat->type,
6516 ecounter_stat->if_slice_idx, ecounter_stat->stats_rep, &list_elt->data);
6517
6518 if (rc) {
6519 DHD_ERROR(("Ecounters v2: Could not process: stat: %d return code: %d\n",
6520 ecounter_stat->stats_rep, rc));
6521
6522 /* Free allocated memory and go to fail to release any memories allocated
6523 * in previous iterations. Note that list_elt->data gets populated in
6524 * dhd_create_ecounters_params() and gets freed there itself.
6525 */
6526 MFREE(dhd->osh, list_elt, sizeof(*list_elt));
6527 list_elt = NULL;
6528 goto fail;
6529 }
6530 elt = (bcm_xtlv_t *) list_elt->data;
6531
6532 /* Put the elements in the order they are processed */
6533 if (processed_containers_list == NULL) {
6534 processed_containers_list = list_elt;
6535 } else {
6536 tail->next = list_elt;
6537 }
6538 tail = list_elt;
6539 /* Size of the XTLV returned */
6540 total_processed_containers_len += BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE;
6541 }
6542
6543 /* Now create ecounters config request with totallength */
6544 req = (ecounters_config_request_v2_t *)MALLOCZ(dhd->osh, sizeof(*req) +
6545 total_processed_containers_len);
6546
6547 if (req == NULL) {
6548 rc = BCME_NOMEM;
6549 goto fail;
6550 }
6551
6552 req->version = ECOUNTERS_VERSION_2;
6553 req->logset = EVENT_LOG_SET_ECOUNTERS;
6554 req->reporting_period = ECOUNTERS_DEFAULT_PERIOD;
6555 req->num_reports = ECOUNTERS_NUM_REPORTS;
6556 req->len = total_processed_containers_len +
6557 OFFSETOF(ecounters_config_request_v2_t, ecounters_xtlvs);
6558
6559 /* Copy config */
6560 start_ptr = req->ecounters_xtlvs;
6561
6562 /* Now go element by element in the list */
6563 while (processed_containers_list) {
6564 list_elt = processed_containers_list;
6565
6566 elt = (bcm_xtlv_t *)list_elt->data;
6567
6568 memcpy(start_ptr, list_elt->data, BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE);
6569 start_ptr += (size_t)(BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE);
6570 processed_containers_list = processed_containers_list->next;
6571
6572 /* Free allocated memories */
6573 MFREE(dhd->osh, elt, elt->len + BCM_XTLV_HDR_SIZE);
6574 MFREE(dhd->osh, list_elt, sizeof(*list_elt));
6575 }
6576
6577 if ((rc = dhd_iovar(dhd, 0, "ecounters", (char *)req, req->len, NULL, 0, TRUE)) < 0) {
6578 DHD_ERROR(("failed to start ecounters\n"));
6579 }
6580
6581 fail:
6582 if (req) {
6583 MFREE(dhd->osh, req, sizeof(*req) + total_processed_containers_len);
6584 }
6585
6586 /* Now go element by element in the list */
6587 while (processed_containers_list) {
6588 list_elt = processed_containers_list;
6589 elt = (bcm_xtlv_t *)list_elt->data;
6590 processed_containers_list = processed_containers_list->next;
6591
6592 /* Free allocated memories */
6593 MFREE(dhd->osh, elt, elt->len + BCM_XTLV_HDR_SIZE);
6594 MFREE(dhd->osh, list_elt, sizeof(*list_elt));
6595 }
6596 return rc;
6597 }
6598
6599 int
dhd_stop_ecounters(dhd_pub_t * dhd)6600 dhd_stop_ecounters(dhd_pub_t *dhd)
6601 {
6602 int rc = BCME_OK;
6603 ecounters_config_request_v2_t *req;
6604
6605 /* Now create ecounters config request with totallength */
6606 req = (ecounters_config_request_v2_t *)MALLOCZ(dhd->osh, sizeof(*req));
6607
6608 if (req == NULL) {
6609 rc = BCME_NOMEM;
6610 goto fail;
6611 }
6612
6613 req->version = ECOUNTERS_VERSION_2;
6614 req->len = OFFSETOF(ecounters_config_request_v2_t, ecounters_xtlvs);
6615
6616 if ((rc = dhd_iovar(dhd, 0, "ecounters", (char *)req, req->len, NULL, 0, TRUE)) < 0) {
6617 DHD_ERROR(("failed to stop ecounters\n"));
6618 }
6619
6620 fail:
6621 if (req) {
6622 MFREE(dhd->osh, req, sizeof(*req));
6623 }
6624 return rc;
6625 }
6626
6627 /* configured event_id_array for event ecounters */
6628 typedef struct event_id_array {
6629 uint8 event_id;
6630 uint8 str_idx;
6631 } event_id_array_t;
6632
6633 /* get event id array only from event_ecounters_cfg_tbl[] */
__dhd_event_ecounters_get_event_id_array(event_id_array_t * event_array)6634 static inline int __dhd_event_ecounters_get_event_id_array(event_id_array_t *event_array)
6635 {
6636 uint8 i;
6637 uint8 idx = 0;
6638 int32 prev_evt_id = -1;
6639
6640 for (i = 0; i < (uint8)ARRAYSIZE(event_ecounters_cfg_tbl); i++) {
6641 if (prev_evt_id != event_ecounters_cfg_tbl[i].event_id) {
6642 if (prev_evt_id >= 0)
6643 idx++;
6644 event_array[idx].event_id = event_ecounters_cfg_tbl[i].event_id;
6645 event_array[idx].str_idx = i;
6646 }
6647 prev_evt_id = event_ecounters_cfg_tbl[i].event_id;
6648 }
6649 return idx;
6650 }
6651
6652 /* One event id has limit xtlv num to request based on wl_ifstats_xtlv_id * 2 interface */
6653 #define ECNTRS_MAX_XTLV_NUM (31 * 2)
6654
6655 int
dhd_start_event_ecounters(dhd_pub_t * dhd)6656 dhd_start_event_ecounters(dhd_pub_t *dhd)
6657 {
6658 uint8 i, j = 0;
6659 uint8 event_id_cnt = 0;
6660 uint16 processed_containers_len = 0;
6661 uint16 max_xtlv_len = 0;
6662 int rc = BCME_OK;
6663 uint8 *ptr;
6664 uint8 *data;
6665 event_id_array_t *id_array;
6666 bcm_xtlv_t *elt = NULL;
6667 event_ecounters_config_request_v2_t *req = NULL;
6668
6669 id_array = (event_id_array_t *)MALLOCZ(dhd->osh, sizeof(event_id_array_t) *
6670 ARRAYSIZE(event_ecounters_cfg_tbl));
6671
6672 if (id_array == NULL) {
6673 rc = BCME_NOMEM;
6674 goto fail;
6675 }
6676 event_id_cnt = __dhd_event_ecounters_get_event_id_array(id_array);
6677
6678 max_xtlv_len = ((BCM_XTLV_HDR_SIZE +
6679 OFFSETOF(event_ecounters_config_request_v2_t, ecounters_xtlvs)) *
6680 ECNTRS_MAX_XTLV_NUM);
6681
6682 /* Now create ecounters config request with max allowed length */
6683 req = (event_ecounters_config_request_v2_t *)MALLOCZ(dhd->osh,
6684 sizeof(event_ecounters_config_request_v2_t *) + max_xtlv_len);
6685
6686 if (req == NULL) {
6687 rc = BCME_NOMEM;
6688 goto fail;
6689 }
6690
6691 for (i = 0; i <= event_id_cnt; i++) {
6692 /* req initialization by event id */
6693 req->version = ECOUNTERS_VERSION_2;
6694 req->logset = EVENT_LOG_SET_ECOUNTERS;
6695 req->event_id = id_array[i].event_id;
6696 req->flags = EVENT_ECOUNTERS_FLAGS_ADD;
6697 req->len = 0;
6698 processed_containers_len = 0;
6699
6700 /* Copy config */
6701 ptr = req->ecounters_xtlvs;
6702
6703 for (j = id_array[i].str_idx; j < (uint8)ARRAYSIZE(event_ecounters_cfg_tbl); j++) {
6704 event_ecounters_cfg_t *event_ecounter_stat = &event_ecounters_cfg_tbl[j];
6705 if (id_array[i].event_id != event_ecounter_stat->event_id)
6706 break;
6707
6708 rc = dhd_create_ecounters_params(dhd, event_ecounter_stat->type,
6709 event_ecounter_stat->if_slice_idx, event_ecounter_stat->stats_rep,
6710 &data);
6711
6712 if (rc) {
6713 DHD_ERROR(("%s: Could not process: stat: %d return code: %d\n",
6714 __FUNCTION__, event_ecounter_stat->stats_rep, rc));
6715 goto fail;
6716 }
6717
6718 elt = (bcm_xtlv_t *)data;
6719
6720 memcpy(ptr, elt, BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE);
6721 ptr += (size_t)(BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE);
6722 processed_containers_len += BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE;
6723
6724 /* Free allocated memories alloced by dhd_create_ecounters_params */
6725 MFREE(dhd->osh, elt, elt->len + BCM_XTLV_HDR_SIZE);
6726
6727 if (processed_containers_len > max_xtlv_len) {
6728 DHD_ERROR(("%s XTLV NUM IS OVERFLOWED THAN ALLOWED!!\n",
6729 __FUNCTION__));
6730 rc = BCME_BADLEN;
6731 goto fail;
6732 }
6733 }
6734
6735 req->len = processed_containers_len +
6736 OFFSETOF(event_ecounters_config_request_v2_t, ecounters_xtlvs);
6737
6738 DHD_INFO(("%s req version %d logset %d event_id %d flags %d len %d\n",
6739 __FUNCTION__, req->version, req->logset, req->event_id,
6740 req->flags, req->len));
6741
6742 rc = dhd_iovar(dhd, 0, "event_ecounters", (char *)req, req->len, NULL, 0, TRUE);
6743
6744 if (rc < 0) {
6745 DHD_ERROR(("failed to start event_ecounters(event id %d) with rc %d\n",
6746 req->event_id, rc));
6747 goto fail;
6748 }
6749 }
6750
6751 fail:
6752 /* Free allocated memories */
6753 if (req) {
6754 MFREE(dhd->osh, req, sizeof(event_ecounters_config_request_v2_t *) + max_xtlv_len);
6755 }
6756 if (id_array) {
6757 MFREE(dhd->osh, id_array, sizeof(event_id_array_t) *
6758 ARRAYSIZE(event_ecounters_cfg_tbl));
6759 }
6760
6761 return rc;
6762 }
6763
6764 int
dhd_stop_event_ecounters(dhd_pub_t * dhd)6765 dhd_stop_event_ecounters(dhd_pub_t *dhd)
6766 {
6767 int rc = BCME_OK;
6768 event_ecounters_config_request_v2_t *req;
6769
6770 /* Now create ecounters config request with totallength */
6771 req = (event_ecounters_config_request_v2_t *)MALLOCZ(dhd->osh, sizeof(*req));
6772
6773 if (req == NULL) {
6774 rc = BCME_NOMEM;
6775 goto fail;
6776 }
6777
6778 req->version = ECOUNTERS_VERSION_2;
6779 req->flags = EVENT_ECOUNTERS_FLAGS_DEL_ALL;
6780 req->len = OFFSETOF(event_ecounters_config_request_v2_t, ecounters_xtlvs);
6781
6782 if ((rc = dhd_iovar(dhd, 0, "event_ecounters", (char *)req, req->len, NULL, 0, TRUE)) < 0) {
6783 DHD_ERROR(("failed to stop event_ecounters\n"));
6784 }
6785
6786 fail:
6787 if (req) {
6788 MFREE(dhd->osh, req, sizeof(*req));
6789 }
6790 return rc;
6791 }
6792
6793 #ifdef DHD_LOG_DUMP
6794 int
dhd_dump_debug_ring(dhd_pub_t * dhdp,void * ring_ptr,const void * user_buf,log_dump_section_hdr_t * sec_hdr,char * text_hdr,int buflen,uint32 sec_type)6795 dhd_dump_debug_ring(dhd_pub_t *dhdp, void *ring_ptr, const void *user_buf,
6796 log_dump_section_hdr_t *sec_hdr,
6797 char *text_hdr, int buflen, uint32 sec_type)
6798 {
6799 uint32 rlen = 0;
6800 uint32 data_len = 0;
6801 void *data = NULL;
6802 unsigned long flags = 0;
6803 int ret = 0;
6804 dhd_dbg_ring_t *ring = (dhd_dbg_ring_t *)ring_ptr;
6805 int pos = 0;
6806 int fpos_sechdr = 0;
6807
6808 if (!dhdp || !ring || !user_buf || !sec_hdr || !text_hdr) {
6809 return BCME_BADARG;
6810 }
6811 /* do not allow further writes to the ring
6812 * till we flush it
6813 */
6814 DHD_DBG_RING_LOCK(ring->lock, flags);
6815 ring->state = RING_SUSPEND;
6816 DHD_DBG_RING_UNLOCK(ring->lock, flags);
6817
6818 if (dhdp->concise_dbg_buf) {
6819 /* re-use concise debug buffer temporarily
6820 * to pull ring data, to write
6821 * record by record to file
6822 */
6823 data_len = CONCISE_DUMP_BUFLEN;
6824 data = dhdp->concise_dbg_buf;
6825 ret = dhd_export_debug_data(text_hdr, NULL, user_buf, strlen(text_hdr), &pos);
6826 /* write the section header now with zero length,
6827 * once the correct length is found out, update
6828 * it later
6829 */
6830 fpos_sechdr = pos;
6831 sec_hdr->type = sec_type;
6832 sec_hdr->length = 0;
6833 ret = dhd_export_debug_data((char *)sec_hdr, NULL, user_buf,
6834 sizeof(*sec_hdr), &pos);
6835 do {
6836 rlen = dhd_dbg_ring_pull_single(ring, data, data_len, TRUE);
6837 if (rlen > 0) {
6838 /* write the log */
6839 ret = dhd_export_debug_data(data, NULL, user_buf, rlen, &pos);
6840 }
6841 DHD_DBGIF(("%s: rlen : %d\n", __FUNCTION__, rlen));
6842 } while ((rlen > 0));
6843 /* now update the section header length in the file */
6844 /* Complete ring size is dumped by HAL, hence updating length to ring size */
6845 sec_hdr->length = ring->ring_size;
6846 ret = dhd_export_debug_data((char *)sec_hdr, NULL, user_buf,
6847 sizeof(*sec_hdr), &fpos_sechdr);
6848 } else {
6849 DHD_ERROR(("%s: No concise buffer available !\n", __FUNCTION__));
6850 }
6851 DHD_DBG_RING_LOCK(ring->lock, flags);
6852 ring->state = RING_ACTIVE;
6853 /* Resetting both read and write pointer,
6854 * since all items are read.
6855 */
6856 ring->rp = ring->wp = 0;
6857 DHD_DBG_RING_UNLOCK(ring->lock, flags);
6858
6859 return ret;
6860 }
6861
6862 int
dhd_log_dump_ring_to_file(dhd_pub_t * dhdp,void * ring_ptr,void * file,unsigned long * file_posn,log_dump_section_hdr_t * sec_hdr,char * text_hdr,uint32 sec_type)6863 dhd_log_dump_ring_to_file(dhd_pub_t *dhdp, void *ring_ptr, void *file,
6864 unsigned long *file_posn, log_dump_section_hdr_t *sec_hdr,
6865 char *text_hdr, uint32 sec_type)
6866 {
6867 uint32 rlen = 0;
6868 uint32 data_len = 0, total_len = 0;
6869 void *data = NULL;
6870 unsigned long fpos_sechdr = 0;
6871 unsigned long flags = 0;
6872 int ret = 0;
6873 dhd_dbg_ring_t *ring = (dhd_dbg_ring_t *)ring_ptr;
6874
6875 if (!dhdp || !ring || !file || !sec_hdr ||
6876 !file_posn || !text_hdr)
6877 return BCME_BADARG;
6878
6879 /* do not allow further writes to the ring
6880 * till we flush it
6881 */
6882 DHD_DBG_RING_LOCK(ring->lock, flags);
6883 ring->state = RING_SUSPEND;
6884 DHD_DBG_RING_UNLOCK(ring->lock, flags);
6885
6886 if (dhdp->concise_dbg_buf) {
6887 /* re-use concise debug buffer temporarily
6888 * to pull ring data, to write
6889 * record by record to file
6890 */
6891 data_len = CONCISE_DUMP_BUFLEN;
6892 data = dhdp->concise_dbg_buf;
6893 dhd_os_write_file_posn(file, file_posn, text_hdr,
6894 strlen(text_hdr));
6895 /* write the section header now with zero length,
6896 * once the correct length is found out, update
6897 * it later
6898 */
6899 dhd_init_sec_hdr(sec_hdr);
6900 fpos_sechdr = *file_posn;
6901 sec_hdr->type = sec_type;
6902 sec_hdr->length = 0;
6903 dhd_os_write_file_posn(file, file_posn, (char *)sec_hdr,
6904 sizeof(*sec_hdr));
6905 do {
6906 rlen = dhd_dbg_ring_pull_single(ring, data, data_len, TRUE);
6907 if (rlen > 0) {
6908 /* write the log */
6909 ret = dhd_os_write_file_posn(file, file_posn, data, rlen);
6910 if (ret < 0) {
6911 DHD_ERROR(("%s: write file error !\n", __FUNCTION__));
6912 DHD_DBG_RING_LOCK(ring->lock, flags);
6913 ring->state = RING_ACTIVE;
6914 DHD_DBG_RING_UNLOCK(ring->lock, flags);
6915 return BCME_ERROR;
6916 }
6917 }
6918 total_len += rlen;
6919 } while (rlen > 0);
6920 /* now update the section header length in the file */
6921 sec_hdr->length = total_len;
6922 dhd_os_write_file_posn(file, &fpos_sechdr, (char *)sec_hdr, sizeof(*sec_hdr));
6923 } else {
6924 DHD_ERROR(("%s: No concise buffer available !\n", __FUNCTION__));
6925 }
6926
6927 DHD_DBG_RING_LOCK(ring->lock, flags);
6928 ring->state = RING_ACTIVE;
6929 /* Resetting both read and write pointer,
6930 * since all items are read.
6931 */
6932 ring->rp = ring->wp = 0;
6933 DHD_DBG_RING_UNLOCK(ring->lock, flags);
6934 return BCME_OK;
6935 }
6936
6937 /* logdump cookie */
6938 #define MAX_LOGUDMP_COOKIE_CNT 10u
6939 #define LOGDUMP_COOKIE_STR_LEN 50u
6940 int
dhd_logdump_cookie_init(dhd_pub_t * dhdp,uint8 * buf,uint32 buf_size)6941 dhd_logdump_cookie_init(dhd_pub_t *dhdp, uint8 *buf, uint32 buf_size)
6942 {
6943 uint32 ring_size;
6944
6945 if (!dhdp || !buf) {
6946 DHD_ERROR(("INVALID PTR: dhdp:%p buf:%p\n", dhdp, buf));
6947 return BCME_ERROR;
6948 }
6949
6950 ring_size = dhd_ring_get_hdr_size() + LOGDUMP_COOKIE_STR_LEN * MAX_LOGUDMP_COOKIE_CNT;
6951 if (buf_size < ring_size) {
6952 DHD_ERROR(("BUF SIZE IS TO SHORT: req:%d buf_size:%d\n",
6953 ring_size, buf_size));
6954 return BCME_ERROR;
6955 }
6956
6957 dhdp->logdump_cookie = dhd_ring_init(dhdp, buf, buf_size,
6958 LOGDUMP_COOKIE_STR_LEN, MAX_LOGUDMP_COOKIE_CNT,
6959 DHD_RING_TYPE_FIXED);
6960 if (!dhdp->logdump_cookie) {
6961 DHD_ERROR(("FAIL TO INIT COOKIE RING\n"));
6962 return BCME_ERROR;
6963 }
6964
6965 return BCME_OK;
6966 }
6967
6968 void
dhd_logdump_cookie_deinit(dhd_pub_t * dhdp)6969 dhd_logdump_cookie_deinit(dhd_pub_t *dhdp)
6970 {
6971 if (!dhdp) {
6972 return;
6973 }
6974 if (dhdp->logdump_cookie) {
6975 dhd_ring_deinit(dhdp, dhdp->logdump_cookie);
6976 }
6977
6978 return;
6979 }
6980
6981 void
dhd_logdump_cookie_save(dhd_pub_t * dhdp,char * cookie,char * type)6982 dhd_logdump_cookie_save(dhd_pub_t *dhdp, char *cookie, char *type)
6983 {
6984 char *ptr;
6985
6986 if (!dhdp || !cookie || !type || !dhdp->logdump_cookie) {
6987 DHD_ERROR(("%s: At least one buffer ptr is NULL dhdp=%p cookie=%p"
6988 " type = %p, cookie_cfg:%p\n", __FUNCTION__,
6989 dhdp, cookie, type, dhdp?dhdp->logdump_cookie: NULL));
6990 return;
6991 }
6992 ptr = (char *)dhd_ring_get_empty(dhdp->logdump_cookie);
6993 if (ptr == NULL) {
6994 DHD_ERROR(("%s : Skip to save due to locking\n", __FUNCTION__));
6995 return;
6996 }
6997 scnprintf(ptr, LOGDUMP_COOKIE_STR_LEN, "%s: %s\n", type, cookie);
6998 return;
6999 }
7000
7001 int
dhd_logdump_cookie_get(dhd_pub_t * dhdp,char * ret_cookie,uint32 buf_size)7002 dhd_logdump_cookie_get(dhd_pub_t *dhdp, char *ret_cookie, uint32 buf_size)
7003 {
7004 char *ptr;
7005
7006 if (!dhdp || !ret_cookie || !dhdp->logdump_cookie) {
7007 DHD_ERROR(("%s: At least one buffer ptr is NULL dhdp=%p"
7008 "cookie=%p cookie_cfg:%p\n", __FUNCTION__,
7009 dhdp, ret_cookie, dhdp?dhdp->logdump_cookie: NULL));
7010 return BCME_ERROR;
7011 }
7012 ptr = (char *)dhd_ring_get_first(dhdp->logdump_cookie);
7013 if (ptr == NULL) {
7014 DHD_ERROR(("%s : Skip to save due to locking\n", __FUNCTION__));
7015 return BCME_ERROR;
7016 }
7017 memcpy(ret_cookie, ptr, MIN(buf_size, strlen(ptr)));
7018 dhd_ring_free_first(dhdp->logdump_cookie);
7019 return BCME_OK;
7020 }
7021
7022 int
dhd_logdump_cookie_count(dhd_pub_t * dhdp)7023 dhd_logdump_cookie_count(dhd_pub_t *dhdp)
7024 {
7025 if (!dhdp || !dhdp->logdump_cookie) {
7026 DHD_ERROR(("%s: At least one buffer ptr is NULL dhdp=%p cookie=%p\n",
7027 __FUNCTION__, dhdp, dhdp?dhdp->logdump_cookie: NULL));
7028 return 0;
7029 }
7030 return dhd_ring_get_cur_size(dhdp->logdump_cookie);
7031 }
7032
7033 static inline int
__dhd_log_dump_cookie_to_file(dhd_pub_t * dhdp,void * fp,const void * user_buf,unsigned long * f_pos,char * buf,uint32 buf_size)7034 __dhd_log_dump_cookie_to_file(
7035 dhd_pub_t *dhdp, void *fp, const void *user_buf, unsigned long *f_pos,
7036 char *buf, uint32 buf_size)
7037 {
7038
7039 uint32 remain = buf_size;
7040 int ret = BCME_ERROR;
7041 char tmp_buf[LOGDUMP_COOKIE_STR_LEN];
7042 log_dump_section_hdr_t sec_hdr;
7043 uint32 read_idx;
7044 uint32 write_idx;
7045
7046 read_idx = dhd_ring_get_read_idx(dhdp->logdump_cookie);
7047 write_idx = dhd_ring_get_write_idx(dhdp->logdump_cookie);
7048 while (dhd_logdump_cookie_count(dhdp) > 0) {
7049 memset(tmp_buf, 0, sizeof(tmp_buf));
7050 ret = dhd_logdump_cookie_get(dhdp, tmp_buf, LOGDUMP_COOKIE_STR_LEN);
7051 if (ret != BCME_OK) {
7052 return ret;
7053 }
7054 remain -= scnprintf(&buf[buf_size - remain], remain, "%s", tmp_buf);
7055 }
7056 dhd_ring_set_read_idx(dhdp->logdump_cookie, read_idx);
7057 dhd_ring_set_write_idx(dhdp->logdump_cookie, write_idx);
7058
7059 ret = dhd_export_debug_data(COOKIE_LOG_HDR, fp, user_buf, strlen(COOKIE_LOG_HDR), f_pos);
7060 if (ret < 0) {
7061 DHD_ERROR(("%s : Write file Error for cookie hdr\n", __FUNCTION__));
7062 return ret;
7063 }
7064 sec_hdr.magic = LOG_DUMP_MAGIC;
7065 sec_hdr.timestamp = local_clock();
7066 sec_hdr.type = LOG_DUMP_SECTION_COOKIE;
7067 sec_hdr.length = buf_size - remain;
7068
7069 ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), f_pos);
7070 if (ret < 0) {
7071 DHD_ERROR(("%s : Write file Error for section hdr\n", __FUNCTION__));
7072 return ret;
7073 }
7074
7075 ret = dhd_export_debug_data(buf, fp, user_buf, sec_hdr.length, f_pos);
7076 if (ret < 0) {
7077 DHD_ERROR(("%s : Write file Error for cookie data\n", __FUNCTION__));
7078 }
7079
7080 return ret;
7081 }
7082
7083 uint32
dhd_log_dump_cookie_len(dhd_pub_t * dhdp)7084 dhd_log_dump_cookie_len(dhd_pub_t *dhdp)
7085 {
7086 int len = 0;
7087 char tmp_buf[LOGDUMP_COOKIE_STR_LEN];
7088 log_dump_section_hdr_t sec_hdr;
7089 char *buf = NULL;
7090 int ret = BCME_ERROR;
7091 uint32 buf_size = MAX_LOGUDMP_COOKIE_CNT * LOGDUMP_COOKIE_STR_LEN;
7092 uint32 read_idx;
7093 uint32 write_idx;
7094 uint32 remain;
7095
7096 remain = buf_size;
7097
7098 if (!dhdp || !dhdp->logdump_cookie) {
7099 DHD_ERROR(("%s At least one ptr is NULL "
7100 "dhdp = %p cookie %p\n",
7101 __FUNCTION__, dhdp, dhdp?dhdp->logdump_cookie:NULL));
7102 goto exit;
7103 }
7104
7105 buf = (char *)MALLOCZ(dhdp->osh, buf_size);
7106 if (!buf) {
7107 DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
7108 goto exit;
7109 }
7110
7111 read_idx = dhd_ring_get_read_idx(dhdp->logdump_cookie);
7112 write_idx = dhd_ring_get_write_idx(dhdp->logdump_cookie);
7113 while (dhd_logdump_cookie_count(dhdp) > 0) {
7114 memset(tmp_buf, 0, sizeof(tmp_buf));
7115 ret = dhd_logdump_cookie_get(dhdp, tmp_buf, LOGDUMP_COOKIE_STR_LEN);
7116 if (ret != BCME_OK) {
7117 goto exit;
7118 }
7119 remain -= (uint32)strlen(tmp_buf);
7120 }
7121 dhd_ring_set_read_idx(dhdp->logdump_cookie, read_idx);
7122 dhd_ring_set_write_idx(dhdp->logdump_cookie, write_idx);
7123 len += strlen(COOKIE_LOG_HDR);
7124 len += sizeof(sec_hdr);
7125 len += (buf_size - remain);
7126 exit:
7127 if (buf)
7128 MFREE(dhdp->osh, buf, buf_size);
7129 return len;
7130 }
7131
7132 int
dhd_log_dump_cookie(dhd_pub_t * dhdp,const void * user_buf)7133 dhd_log_dump_cookie(dhd_pub_t *dhdp, const void *user_buf)
7134 {
7135 int ret = BCME_ERROR;
7136 char tmp_buf[LOGDUMP_COOKIE_STR_LEN];
7137 log_dump_section_hdr_t sec_hdr;
7138 char *buf = NULL;
7139 uint32 buf_size = MAX_LOGUDMP_COOKIE_CNT * LOGDUMP_COOKIE_STR_LEN;
7140 int pos = 0;
7141 uint32 read_idx;
7142 uint32 write_idx;
7143 uint32 remain;
7144
7145 remain = buf_size;
7146
7147 if (!dhdp || !dhdp->logdump_cookie) {
7148 DHD_ERROR(("%s At least one ptr is NULL "
7149 "dhdp = %p cookie %p\n",
7150 __FUNCTION__, dhdp, dhdp?dhdp->logdump_cookie:NULL));
7151 goto exit;
7152 }
7153
7154 buf = (char *)MALLOCZ(dhdp->osh, buf_size);
7155 if (!buf) {
7156 DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
7157 goto exit;
7158 }
7159
7160 read_idx = dhd_ring_get_read_idx(dhdp->logdump_cookie);
7161 write_idx = dhd_ring_get_write_idx(dhdp->logdump_cookie);
7162 while (dhd_logdump_cookie_count(dhdp) > 0) {
7163 memset(tmp_buf, 0, sizeof(tmp_buf));
7164 ret = dhd_logdump_cookie_get(dhdp, tmp_buf, LOGDUMP_COOKIE_STR_LEN);
7165 if (ret != BCME_OK) {
7166 goto exit;
7167 }
7168 remain -= scnprintf(&buf[buf_size - remain], remain, "%s", tmp_buf);
7169 }
7170 dhd_ring_set_read_idx(dhdp->logdump_cookie, read_idx);
7171 dhd_ring_set_write_idx(dhdp->logdump_cookie, write_idx);
7172 ret = dhd_export_debug_data(COOKIE_LOG_HDR, NULL, user_buf, strlen(COOKIE_LOG_HDR), &pos);
7173 sec_hdr.magic = LOG_DUMP_MAGIC;
7174 sec_hdr.timestamp = local_clock();
7175 sec_hdr.type = LOG_DUMP_SECTION_COOKIE;
7176 sec_hdr.length = buf_size - remain;
7177 ret = dhd_export_debug_data((char *)&sec_hdr, NULL, user_buf, sizeof(sec_hdr), &pos);
7178 ret = dhd_export_debug_data(buf, NULL, user_buf, sec_hdr.length, &pos);
7179 exit:
7180 if (buf)
7181 MFREE(dhdp->osh, buf, buf_size);
7182 return ret;
7183 }
7184
7185 int
dhd_log_dump_cookie_to_file(dhd_pub_t * dhdp,void * fp,const void * user_buf,unsigned long * f_pos)7186 dhd_log_dump_cookie_to_file(dhd_pub_t *dhdp, void *fp, const void *user_buf, unsigned long *f_pos)
7187 {
7188 char *buf;
7189 int ret = BCME_ERROR;
7190 uint32 buf_size = MAX_LOGUDMP_COOKIE_CNT * LOGDUMP_COOKIE_STR_LEN;
7191
7192 if (!dhdp || !dhdp->logdump_cookie || (!fp && !user_buf) || !f_pos) {
7193 DHD_ERROR(("%s At least one ptr is NULL "
7194 "dhdp = %p cookie %p fp = %p f_pos = %p\n",
7195 __FUNCTION__, dhdp, dhdp?dhdp->logdump_cookie:NULL, fp, f_pos));
7196 return ret;
7197 }
7198
7199 buf = (char *)MALLOCZ(dhdp->osh, buf_size);
7200 if (!buf) {
7201 DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
7202 return ret;
7203 }
7204 ret = __dhd_log_dump_cookie_to_file(dhdp, fp, user_buf, f_pos, buf, buf_size);
7205 MFREE(dhdp->osh, buf, buf_size);
7206
7207 return ret;
7208 }
7209
7210 #endif /* DHD_LOG_DUMP */
7211
7212 #ifdef DHD_LOG_DUMP
7213 #define DEBUG_DUMP_TRIGGER_INTERVAL_SEC 4
7214 void
dhd_log_dump_trigger(dhd_pub_t * dhdp,int subcmd)7215 dhd_log_dump_trigger(dhd_pub_t *dhdp, int subcmd)
7216 {
7217 #if defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
7218 log_dump_type_t *flush_type;
7219 #endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL */
7220 uint64 current_time_sec;
7221
7222 if (!dhdp) {
7223 DHD_ERROR(("dhdp is NULL !\n"));
7224 return;
7225 }
7226
7227 if (subcmd >= CMD_MAX || subcmd < CMD_DEFAULT) {
7228 DHD_ERROR(("%s : Invalid subcmd \n", __FUNCTION__));
7229 return;
7230 }
7231
7232 current_time_sec = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
7233
7234 DHD_ERROR(("%s: current_time_sec=%lld debug_dump_time_sec=%lld interval=%d\n",
7235 __FUNCTION__, current_time_sec, dhdp->debug_dump_time_sec,
7236 DEBUG_DUMP_TRIGGER_INTERVAL_SEC));
7237
7238 if ((current_time_sec - dhdp->debug_dump_time_sec) < DEBUG_DUMP_TRIGGER_INTERVAL_SEC) {
7239 DHD_ERROR(("%s : Last debug dump triggered(%lld) within %d seconds, so SKIP\n",
7240 __FUNCTION__, dhdp->debug_dump_time_sec, DEBUG_DUMP_TRIGGER_INTERVAL_SEC));
7241 return;
7242 }
7243
7244 clear_debug_dump_time(dhdp->debug_dump_time_str);
7245 /* */
7246 dhdp->debug_dump_subcmd = subcmd;
7247
7248 dhdp->debug_dump_time_sec = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
7249
7250 #if defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
7251 /* flush_type is freed at do_dhd_log_dump function */
7252 flush_type = MALLOCZ(dhdp->osh, sizeof(log_dump_type_t));
7253 if (flush_type) {
7254 *flush_type = DLD_BUF_TYPE_ALL;
7255 dhd_schedule_log_dump(dhdp, flush_type);
7256 } else {
7257 DHD_ERROR(("%s Fail to malloc flush_type\n", __FUNCTION__));
7258 return;
7259 }
7260 #endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL */
7261
7262 /* Inside dhd_mem_dump, event notification will be sent to HAL and
7263 * from other context DHD pushes memdump, debug_dump and pktlog dump
7264 * to HAL and HAL will write into file
7265 */
7266 #if (defined(BCMPCIE) || defined(BCMSDIO)) && defined(DHD_FW_COREDUMP)
7267 dhdp->memdump_type = DUMP_TYPE_BY_SYSDUMP;
7268 dhd_bus_mem_dump(dhdp);
7269 #endif /* BCMPCIE && DHD_FW_COREDUMP */
7270
7271 }
7272 #endif /* DHD_LOG_DUMP */
7273
7274 #ifdef EWP_EDL
7275 /* For now we are allocating memory for EDL ring using DMA_ALLOC_CONSISTENT
7276 * The reason being that, in hikey, if we try to DMA_MAP prealloced memory
7277 * it is failing with an 'out of space in SWIOTLB' error
7278 */
7279 int
dhd_edl_mem_init(dhd_pub_t * dhd)7280 dhd_edl_mem_init(dhd_pub_t *dhd)
7281 {
7282 int ret = 0;
7283
7284 memset(&dhd->edl_ring_mem, 0, sizeof(dhd->edl_ring_mem));
7285 ret = dhd_dma_buf_alloc(dhd, &dhd->edl_ring_mem, DHD_EDL_RING_SIZE);
7286 if (ret != BCME_OK) {
7287 DHD_ERROR(("%s: alloc of edl_ring_mem failed\n",
7288 __FUNCTION__));
7289 return BCME_ERROR;
7290 }
7291 return BCME_OK;
7292 }
7293
7294 /* NOTE:- that dhd_edl_mem_deinit need NOT be called explicitly, because the dma_buf
7295 * for EDL is freed during 'dhd_prot_detach_edl_rings' which is called during de-init.
7296 */
7297 void
dhd_edl_mem_deinit(dhd_pub_t * dhd)7298 dhd_edl_mem_deinit(dhd_pub_t *dhd)
7299 {
7300 if (dhd->edl_ring_mem.va != NULL)
7301 dhd_dma_buf_free(dhd, &dhd->edl_ring_mem);
7302 }
7303
7304 int
dhd_event_logtrace_process_edl(dhd_pub_t * dhdp,uint8 * data,void * evt_decode_data)7305 dhd_event_logtrace_process_edl(dhd_pub_t *dhdp, uint8 *data,
7306 void *evt_decode_data)
7307 {
7308 msg_hdr_edl_t *msg = NULL;
7309 cmn_msg_hdr_t *cmn_msg_hdr = NULL;
7310 uint8 *buf = NULL;
7311
7312 if (!data || !dhdp || !evt_decode_data) {
7313 DHD_ERROR(("%s: invalid args ! \n", __FUNCTION__));
7314 return BCME_ERROR;
7315 }
7316
7317 /* format of data in each work item in the EDL ring:
7318 * |cmn_msg_hdr_t |payload (var len)|cmn_msg_hdr_t|
7319 * payload = |infobuf_ver(u32)|info_buf_payload_hdr_t|msgtrace_hdr_t|<var len data>|
7320 */
7321 cmn_msg_hdr = (cmn_msg_hdr_t *)data;
7322 msg = (msg_hdr_edl_t *)(data + sizeof(cmn_msg_hdr_t));
7323 buf = (uint8 *)msg;
7324 /* validate the fields */
7325 if (ltoh32(msg->infobuf_ver) != PCIE_INFOBUF_V1) {
7326 DHD_ERROR(("%s: Skipping msg with invalid infobuf ver (0x%x)"
7327 " expected (0x%x)\n", __FUNCTION__,
7328 msg->infobuf_ver, PCIE_INFOBUF_V1));
7329 return BCME_VERSION;
7330 }
7331
7332 /* in EDL, the request_id field of cmn_msg_hdr is overloaded to carry payload length */
7333 if (sizeof(info_buf_payload_hdr_t) > cmn_msg_hdr->request_id) {
7334 DHD_ERROR(("%s: infobuf too small for v1 type/length fields\n",
7335 __FUNCTION__));
7336 return BCME_BUFTOOLONG;
7337 }
7338
7339 if (ltoh16(msg->pyld_hdr.type) != PCIE_INFOBUF_V1_TYPE_LOGTRACE) {
7340 DHD_ERROR(("%s: payload_hdr_type %d is not V1_TYPE_LOGTRACE\n",
7341 __FUNCTION__, ltoh16(msg->pyld_hdr.type)));
7342 return BCME_BADOPTION;
7343 }
7344
7345 if (ltoh16(msg->pyld_hdr.length) > cmn_msg_hdr->request_id) {
7346 DHD_ERROR(("%s: infobuf logtrace length %u is bigger"
7347 " than available buffer size %u\n", __FUNCTION__,
7348 ltoh16(msg->pyld_hdr.length), cmn_msg_hdr->request_id));
7349 return BCME_BADLEN;
7350 }
7351
7352 /* dhd_dbg_trace_evnt_handler expects the data to start from msgtrace_hdr_t */
7353 buf += sizeof(msg->infobuf_ver) + sizeof(msg->pyld_hdr);
7354 dhd_dbg_trace_evnt_handler(dhdp, buf, evt_decode_data,
7355 ltoh16(msg->pyld_hdr.length));
7356
7357 /* check 'dhdp->logtrace_pkt_sendup' and if true alloc an skb
7358 * copy the event data to the skb and send it up the stack
7359 */
7360 if (dhdp->logtrace_pkt_sendup) {
7361 DHD_INFO(("%s: send up event log, len %u bytes\n", __FUNCTION__,
7362 (uint32)(ltoh16(msg->pyld_hdr.length) +
7363 sizeof(info_buf_payload_hdr_t) + 4)));
7364 dhd_sendup_info_buf(dhdp, (uint8 *)msg);
7365 }
7366
7367 return BCME_OK;
7368 }
7369 #endif /* EWP_EDL */
7370
7371 #if defined(SHOW_LOGTRACE)
7372 int
dhd_print_fw_ver_from_file(dhd_pub_t * dhdp,char * fwpath)7373 dhd_print_fw_ver_from_file(dhd_pub_t *dhdp, char *fwpath)
7374 {
7375 void *file = NULL;
7376 int size = 0;
7377 char buf[FW_VER_STR_LEN];
7378 char *str = NULL;
7379 int ret = BCME_OK;
7380
7381 if (!fwpath)
7382 return BCME_BADARG;
7383
7384 file = dhd_os_open_image1(dhdp, fwpath);
7385 if (!file) {
7386 ret = BCME_ERROR;
7387 goto exit;
7388 }
7389 size = dhd_os_get_image_size(file);
7390 if (!size) {
7391 ret = BCME_ERROR;
7392 goto exit;
7393 }
7394
7395 /* seek to the last 'X' bytes in the file */
7396 if (dhd_os_seek_file(file, size - FW_VER_STR_LEN) != BCME_OK) {
7397 ret = BCME_ERROR;
7398 goto exit;
7399 }
7400
7401 /* read the last 'X' bytes of the file to a buffer */
7402 memset(buf, 0, FW_VER_STR_LEN);
7403 if (dhd_os_get_image_block(buf, FW_VER_STR_LEN - 1, file) < 0) {
7404 ret = BCME_ERROR;
7405 goto exit;
7406 }
7407 /* search for 'Version' in the buffer */
7408 str = bcmstrnstr(buf, FW_VER_STR_LEN, FW_VER_STR, strlen(FW_VER_STR));
7409 if (!str) {
7410 ret = BCME_ERROR;
7411 goto exit;
7412 }
7413 /* go back in the buffer to the last ascii character */
7414 while (str != buf &&
7415 (*str >= ' ' && *str <= '~')) {
7416 --str;
7417 }
7418 /* reverse the final decrement, so that str is pointing
7419 * to the first ascii character in the buffer
7420 */
7421 ++str;
7422
7423 if (strlen(str) > (FW_VER_STR_LEN - 1)) {
7424 ret = BCME_BADLEN;
7425 goto exit;
7426 }
7427
7428 DHD_ERROR(("FW version in file '%s': %s\n", fwpath, str));
7429 /* copy to global variable, so that in case FW load fails, the
7430 * core capture logs will contain FW version read from the file
7431 */
7432 memset(fw_version, 0, FW_VER_STR_LEN);
7433 strlcpy(fw_version, str, FW_VER_STR_LEN);
7434
7435 exit:
7436 if (file)
7437 dhd_os_close_image1(dhdp, file);
7438
7439 return ret;
7440 }
7441 #endif // endif
7442
7443 #if defined(DHD_H2D_LOG_TIME_SYNC)
7444 /*
7445 * Helper function:
7446 * Used for Dongle console message time syncing with Host printk
7447 */
dhd_h2d_log_time_sync(dhd_pub_t * dhd)7448 void dhd_h2d_log_time_sync(dhd_pub_t *dhd)
7449 {
7450 uint64 ts;
7451
7452 /*
7453 * local_clock() returns time in nano seconds.
7454 * Dongle understand only milli seconds time.
7455 */
7456 ts = local_clock();
7457 /* Nano seconds to milli seconds */
7458 do_div(ts, 1000000);
7459 if (dhd_wl_ioctl_set_intiovar(dhd, "rte_timesync", ts, WLC_SET_VAR, TRUE, 0)) {
7460 DHD_ERROR(("%s rte_timesync **** FAILED ****\n", __FUNCTION__));
7461 /* Stopping HOST Dongle console time syncing */
7462 dhd->dhd_rte_time_sync_ms = 0;
7463 }
7464 }
7465 #endif /* DHD_H2D_LOG_TIME_SYNC */
7466
7467 #if defined(DISABLE_HE_ENAB) || defined(CUSTOM_CONTROL_HE_ENAB)
7468 int
dhd_control_he_enab(dhd_pub_t * dhd,uint8 he_enab)7469 dhd_control_he_enab(dhd_pub_t * dhd, uint8 he_enab)
7470 {
7471 int ret = BCME_OK;
7472 bcm_xtlv_t *pxtlv = NULL;
7473 uint8 mybuf[DHD_IOVAR_BUF_SIZE];
7474 uint16 mybuf_len = sizeof(mybuf);
7475 pxtlv = (bcm_xtlv_t *)mybuf;
7476
7477 ret = bcm_pack_xtlv_entry((uint8**)&pxtlv, &mybuf_len, WL_HE_CMD_ENAB, sizeof(he_enab),
7478 &he_enab, BCM_XTLV_OPTION_ALIGN32);
7479
7480 if (ret != BCME_OK) {
7481 ret = -EINVAL;
7482 DHD_ERROR(("%s failed to pack he enab, err: %s\n", __FUNCTION__, bcmerrorstr(ret)));
7483 return ret;
7484 }
7485
7486 ret = dhd_iovar(dhd, 0, "he", (char *)&mybuf, sizeof(mybuf), NULL, 0, TRUE);
7487 if (ret < 0) {
7488 DHD_ERROR(("%s he_enab (%d) set failed, err: %s\n",
7489 __FUNCTION__, he_enab, bcmerrorstr(ret)));
7490 } else {
7491 DHD_ERROR(("%s he_enab (%d) set successed\n", __FUNCTION__, he_enab));
7492 }
7493
7494 return ret;
7495 }
7496 #endif /* DISABLE_HE_ENAB || CUSTOM_CONTROL_HE_ENAB */
7497