1 /*
2 * Neighbor Awareness Networking
3 *
4 * Copyright (C) 1999-2019, Broadcom.
5 *
6 * Unless you and Broadcom execute a separate written software license
7 * agreement governing use of this software, this software is licensed to you
8 * under the terms of the GNU General Public License version 2 (the "GPL"),
9 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10 * following added to such license:
11 *
12 * As a special exception, the copyright holders of this software give you
13 * permission to link this software with independent modules, and to copy and
14 * distribute the resulting executable under terms of your choice, provided that
15 * you also meet, for each linked independent module, the terms and conditions
16 * of the license of that module. An independent module is a module which is
17 * not derived from this software. The special exception does not apply to any
18 * modifications of the software.
19 *
20 * Notwithstanding the above, under no circumstances may you combine this
21 * software in any way with any other Broadcom software provided under a license
22 * other than the GPL, without Broadcom's express prior written consent.
23 *
24 *
25 * <<Broadcom-WL-IPTag/Open:>>
26 *
27 * $Id: wl_cfgnan.c 825970 2019-06-18 05:28:31Z $
28 */
29
30 #ifdef WL_NAN
31 #include <bcmutils.h>
32 #include <bcmendian.h>
33 #include <bcmwifi_channels.h>
34 #include <nan.h>
35 #include <bcmiov.h>
36 #include <net/rtnetlink.h>
37
38 #include <wl_cfg80211.h>
39 #include <wl_cfgscan.h>
40 #include <wl_android.h>
41 #include <wl_cfgnan.h>
42
43 #include <dngl_stats.h>
44 #include <dhd.h>
45 #ifdef RTT_SUPPORT
46 #include <dhd_rtt.h>
47 #endif /* RTT_SUPPORT */
48 #include <wl_cfgvendor.h>
49 #include <bcmbloom.h>
50 #include <wl_cfgp2p.h>
51 #ifdef RTT_SUPPORT
52 #include <dhd_rtt.h>
53 #endif /* RTT_SUPPORT */
54 #include <bcmstdlib_s.h>
55
56 #define NAN_RANGE_REQ_EVNT 1
57 #define NAN_RAND_MAC_RETRIES 10
58 #define NAN_SCAN_DWELL_TIME_DELTA_MS 10
59
60 #ifdef WL_NAN_DISC_CACHE
61 /* Disc Cache Parameters update Flags */
62 #define NAN_DISC_CACHE_PARAM_SDE_CONTROL 0x0001
63
64 static int wl_cfgnan_cache_disc_result(struct bcm_cfg80211 *cfg, void *data,
65 u16 *disc_cache_update_flags);
66 static int wl_cfgnan_remove_disc_result(struct bcm_cfg80211 *cfg,
67 uint8 local_subid);
68 static nan_disc_result_cache *
69 wl_cfgnan_get_disc_result(struct bcm_cfg80211 *cfg, uint8 remote_pubid,
70 struct ether_addr *peer);
71 #endif /* WL_NAN_DISC_CACHE */
72 static int wl_cfgnan_clear_disc_cache(struct bcm_cfg80211 *cfg,
73 wl_nan_instance_id_t sub_id);
74 static int wl_cfgnan_set_if_addr(struct bcm_cfg80211 *cfg);
75
76 static int wl_cfgnan_get_capability(struct net_device *ndev,
77 struct bcm_cfg80211 *cfg,
78 nan_hal_capabilities_t *capabilities);
79
80 static int32 wl_cfgnan_notify_disc_with_ranging(
81 struct bcm_cfg80211 *cfg, nan_ranging_inst_t *rng_inst,
82 nan_event_data_t *nan_event_data, uint32 distance);
83
84 static void
85 wl_cfgnan_disc_result_on_geofence_cancel(struct bcm_cfg80211 *cfg,
86 nan_ranging_inst_t *rng_inst);
87
88 static void wl_cfgnan_clear_nan_event_data(struct bcm_cfg80211 *cfg,
89 nan_event_data_t *nan_event_data);
90
91 void wl_cfgnan_data_remove_peer(struct bcm_cfg80211 *cfg,
92 struct ether_addr *peer_addr);
93
94 static void wl_cfgnan_send_stop_event(struct bcm_cfg80211 *cfg);
95
96 static void
97 wl_cfgnan_terminate_ranging_session(struct bcm_cfg80211 *cfg,
98 nan_ranging_inst_t *ranging_inst);
99
100 #ifdef RTT_SUPPORT
101 static s32 wl_cfgnan_clear_peer_ranging(struct bcm_cfg80211 *cfg,
102 struct ether_addr *peer, int reason);
103 #endif /* RTT_SUPPORT */
104
nan_role_to_str(u8 role)105 static const char *nan_role_to_str(u8 role)
106 {
107 switch (role) {
108 C2S(WL_NAN_ROLE_AUTO)
109 C2S(WL_NAN_ROLE_NON_MASTER_NON_SYNC)
110 C2S(WL_NAN_ROLE_NON_MASTER_SYNC)
111 C2S(WL_NAN_ROLE_MASTER)
112 C2S(WL_NAN_ROLE_ANCHOR_MASTER)
113 default:
114 return "WL_NAN_ROLE_UNKNOWN";
115 }
116 }
117
nan_event_to_str(u16 cmd)118 static const char *nan_event_to_str(u16 cmd)
119 {
120 switch (cmd) {
121 C2S(WL_NAN_EVENT_START)
122 C2S(WL_NAN_EVENT_DISCOVERY_RESULT)
123 C2S(WL_NAN_EVENT_TERMINATED)
124 C2S(WL_NAN_EVENT_RECEIVE)
125 C2S(WL_NAN_EVENT_MERGE)
126 C2S(WL_NAN_EVENT_STOP)
127 C2S(WL_NAN_EVENT_PEER_DATAPATH_IND)
128 C2S(WL_NAN_EVENT_DATAPATH_ESTB)
129 C2S(WL_NAN_EVENT_SDF_RX)
130 C2S(WL_NAN_EVENT_DATAPATH_END)
131 C2S(WL_NAN_EVENT_RNG_REQ_IND)
132 C2S(WL_NAN_EVENT_RNG_RPT_IND)
133 C2S(WL_NAN_EVENT_RNG_TERM_IND)
134 C2S(WL_NAN_EVENT_TXS)
135 C2S(WL_NAN_EVENT_INVALID)
136
137 default:
138 return "WL_NAN_EVENT_UNKNOWN";
139 }
140 }
141
142 static int wl_cfgnan_execute_ioctl(struct net_device *ndev,
143 struct bcm_cfg80211 *cfg,
144 bcm_iov_batch_buf_t *nan_buf,
145 uint16 nan_buf_size, uint32 *status,
146 uint8 *resp_buf, uint16 resp_buf_len);
wl_cfgnan_generate_inst_id(struct bcm_cfg80211 * cfg,uint8 * p_inst_id)147 int wl_cfgnan_generate_inst_id(struct bcm_cfg80211 *cfg, uint8 *p_inst_id)
148 {
149 s32 ret = BCME_OK;
150 uint8 i = 0;
151 if (p_inst_id == NULL) {
152 WL_ERR(("Invalid arguments\n"));
153 ret = -EINVAL;
154 goto exit;
155 }
156
157 if (cfg->nancfg.inst_id_start == NAN_ID_MAX) {
158 WL_ERR(("Consumed all IDs, resetting the counter\n"));
159 cfg->nancfg.inst_id_start = 0;
160 }
161
162 for (i = cfg->nancfg.inst_id_start; i < NAN_ID_MAX; i++) {
163 if (isclr(cfg->nancfg.svc_inst_id_mask, i)) {
164 setbit(cfg->nancfg.svc_inst_id_mask, i);
165 *p_inst_id = i + 1;
166 cfg->nancfg.inst_id_start = *p_inst_id;
167 WL_DBG(("Instance ID=%d\n", *p_inst_id));
168 goto exit;
169 }
170 }
171 WL_ERR(("Allocated maximum IDs\n"));
172 ret = BCME_NORESOURCE;
173 exit:
174 return ret;
175 }
176
wl_cfgnan_remove_inst_id(struct bcm_cfg80211 * cfg,uint8 inst_id)177 int wl_cfgnan_remove_inst_id(struct bcm_cfg80211 *cfg, uint8 inst_id)
178 {
179 s32 ret = BCME_OK;
180 WL_DBG(("%s: Removing svc instance id %d\n", __FUNCTION__, inst_id));
181 clrbit(cfg->nancfg.svc_inst_id_mask, inst_id - 1);
182 return ret;
183 }
wl_cfgnan_parse_sdea_data(osl_t * osh,const uint8 * p_attr,uint16 len,nan_event_data_t * tlv_data)184 s32 wl_cfgnan_parse_sdea_data(osl_t *osh, const uint8 *p_attr, uint16 len,
185 nan_event_data_t *tlv_data)
186 {
187 const wifi_nan_svc_desc_ext_attr_t *nan_svc_desc_ext_attr = NULL;
188 uint8 offset;
189 s32 ret = BCME_OK;
190
191 /* service descriptor ext attributes */
192 nan_svc_desc_ext_attr = (const wifi_nan_svc_desc_ext_attr_t *)p_attr;
193
194 /* attribute ID */
195 WL_TRACE(("> attr id: 0x%02x\n", nan_svc_desc_ext_attr->id));
196
197 /* attribute length */
198 WL_TRACE(("> attr len: 0x%x\n", nan_svc_desc_ext_attr->len));
199 if (nan_svc_desc_ext_attr->instance_id == tlv_data->pub_id) {
200 tlv_data->sde_control_flag = nan_svc_desc_ext_attr->control;
201 }
202 offset = sizeof(*nan_svc_desc_ext_attr);
203 if (offset > len) {
204 WL_ERR(("Invalid event buffer len\n"));
205 ret = BCME_BUFTOOSHORT;
206 goto fail;
207 }
208 p_attr += offset;
209 len -= offset;
210
211 if (tlv_data->sde_control_flag & NAN_SC_RANGE_LIMITED) {
212 WL_TRACE(("> svc_control: range limited present\n"));
213 }
214 if (tlv_data->sde_control_flag & NAN_SDE_CF_SVC_UPD_IND_PRESENT) {
215 WL_TRACE(("> svc_control: sdea svc specific info present\n"));
216 tlv_data->sde_svc_info.dlen = (p_attr[1] | (p_attr[0x2] << 0x8));
217 WL_TRACE(
218 ("> sdea svc info len: 0x%02x\n", tlv_data->sde_svc_info.dlen));
219 if (!tlv_data->sde_svc_info.dlen ||
220 tlv_data->sde_svc_info.dlen > NAN_MAX_SERVICE_SPECIFIC_INFO_LEN) {
221 /* must be able to handle null msg which is not error */
222 tlv_data->sde_svc_info.dlen = 0;
223 WL_ERR(("sde data length is invalid\n"));
224 ret = BCME_BADLEN;
225 goto fail;
226 }
227
228 if (tlv_data->sde_svc_info.dlen > 0) {
229 tlv_data->sde_svc_info.data =
230 MALLOCZ(osh, tlv_data->sde_svc_info.dlen);
231 if (!tlv_data->sde_svc_info.data) {
232 WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
233 tlv_data->sde_svc_info.dlen = 0;
234 ret = BCME_NOMEM;
235 goto fail;
236 }
237 /* advance read pointer, consider sizeof of Service Update Indicator
238 */
239 offset = sizeof(tlv_data->sde_svc_info.dlen) - 1;
240 if (offset > len) {
241 WL_ERR(("Invalid event buffer len\n"));
242 ret = BCME_BUFTOOSHORT;
243 goto fail;
244 }
245 p_attr += offset;
246 len -= offset;
247 ret = memcpy_s(tlv_data->sde_svc_info.data,
248 tlv_data->sde_svc_info.dlen, p_attr,
249 tlv_data->sde_svc_info.dlen);
250 if (ret != BCME_OK) {
251 WL_ERR(("Failed to copy sde_svc_info\n"));
252 goto fail;
253 }
254 } else {
255 /* must be able to handle null msg which is not error */
256 tlv_data->sde_svc_info.dlen = 0;
257 WL_DBG(("%s: sdea svc info length is zero, null info data\n",
258 __FUNCTION__));
259 }
260 }
261 return ret;
262 fail:
263 if (tlv_data->sde_svc_info.data) {
264 MFREE(osh, tlv_data->sde_svc_info.data, tlv_data->sde_svc_info.dlen);
265 tlv_data->sde_svc_info.data = NULL;
266 }
267
268 WL_DBG(("Parse SDEA event data, status = %d\n", ret));
269 return ret;
270 }
271
272 /*
273 * This attribute contains some mandatory fields and some optional fields
274 * depending on the content of the service discovery request.
275 */
wl_cfgnan_parse_sda_data(osl_t * osh,const uint8 * p_attr,uint16 len,nan_event_data_t * tlv_data)276 s32 wl_cfgnan_parse_sda_data(osl_t *osh, const uint8 *p_attr, uint16 len,
277 nan_event_data_t *tlv_data)
278 {
279 uint8 svc_control = 0, offset = 0;
280 s32 ret = BCME_OK;
281 const wifi_nan_svc_descriptor_attr_t *nan_svc_desc_attr = NULL;
282
283 /* service descriptor attributes */
284 nan_svc_desc_attr = (const wifi_nan_svc_descriptor_attr_t *)p_attr;
285 /* attribute ID */
286 WL_TRACE(("> attr id: 0x%02x\n", nan_svc_desc_attr->id));
287
288 /* attribute length */
289 WL_TRACE(("> attr len: 0x%x\n", nan_svc_desc_attr->len));
290
291 /* service ID */
292 ret = memcpy_s(tlv_data->svc_name, sizeof(tlv_data->svc_name),
293 nan_svc_desc_attr->svc_hash, NAN_SVC_HASH_LEN);
294 if (ret != BCME_OK) {
295 WL_ERR(("Failed to copy svc_hash_name:\n"));
296 return ret;
297 }
298 WL_TRACE(("> svc_hash_name: " MACDBG "\n", MAC2STRDBG(tlv_data->svc_name)));
299
300 /* local instance ID */
301 tlv_data->local_inst_id = nan_svc_desc_attr->instance_id;
302 WL_TRACE(("> local instance id: 0x%02x\n", tlv_data->local_inst_id));
303
304 /* requestor instance ID */
305 tlv_data->requestor_id = nan_svc_desc_attr->requestor_id;
306 WL_TRACE(("> requestor id: 0x%02x\n", tlv_data->requestor_id));
307
308 /* service control */
309 svc_control = nan_svc_desc_attr->svc_control;
310 if ((svc_control & NAN_SVC_CONTROL_TYPE_MASK) == NAN_SC_PUBLISH) {
311 WL_TRACE(("> Service control type: NAN_SC_PUBLISH\n"));
312 } else if ((svc_control & NAN_SVC_CONTROL_TYPE_MASK) == NAN_SC_SUBSCRIBE) {
313 WL_TRACE(("> Service control type: NAN_SC_SUBSCRIBE\n"));
314 } else if ((svc_control & NAN_SVC_CONTROL_TYPE_MASK) == NAN_SC_FOLLOWUP) {
315 WL_TRACE(("> Service control type: NAN_SC_FOLLOWUP\n"));
316 }
317 offset = sizeof(*nan_svc_desc_attr);
318 if (offset > len) {
319 WL_ERR(("Invalid event buffer len\n"));
320 ret = BCME_BUFTOOSHORT;
321 goto fail;
322 }
323 p_attr += offset;
324 len -= offset;
325
326 /*
327 * optional fields:
328 * must be in order following by service descriptor attribute format
329 */
330
331 /* binding bitmap */
332 if (svc_control & NAN_SC_BINDING_BITMAP_PRESENT) {
333 uint16 bitmap = 0;
334 WL_TRACE(("> svc_control: binding bitmap present\n"));
335
336 /* Copy binding bitmap */
337 ret = memcpy_s(&bitmap, sizeof(bitmap), p_attr, NAN_BINDING_BITMAP_LEN);
338 if (ret != BCME_OK) {
339 WL_ERR(("Failed to copy bit map\n"));
340 return ret;
341 }
342 WL_TRACE(("> sc binding bitmap: 0x%04x\n", bitmap));
343
344 if (NAN_BINDING_BITMAP_LEN > len) {
345 WL_ERR(("Invalid event buffer len\n"));
346 ret = BCME_BUFTOOSHORT;
347 goto fail;
348 }
349 p_attr += NAN_BINDING_BITMAP_LEN;
350 len -= NAN_BINDING_BITMAP_LEN;
351 }
352
353 /* matching filter */
354 if (svc_control & NAN_SC_MATCHING_FILTER_PRESENT) {
355 WL_TRACE(("> svc_control: matching filter present\n"));
356
357 tlv_data->tx_match_filter.dlen = *p_attr++;
358 WL_TRACE(("> matching filter len: 0x%02x\n",
359 tlv_data->tx_match_filter.dlen));
360
361 if (!tlv_data->tx_match_filter.dlen ||
362 tlv_data->tx_match_filter.dlen > MAX_MATCH_FILTER_LEN) {
363 tlv_data->tx_match_filter.dlen = 0;
364 WL_ERR(("tx match filter length is invalid\n"));
365 ret = -EINVAL;
366 goto fail;
367 }
368 tlv_data->tx_match_filter.data =
369 MALLOCZ(osh, tlv_data->tx_match_filter.dlen);
370 if (!tlv_data->tx_match_filter.data) {
371 WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
372 tlv_data->tx_match_filter.dlen = 0;
373 ret = -ENOMEM;
374 goto fail;
375 }
376 ret = memcpy_s(tlv_data->tx_match_filter.data,
377 tlv_data->tx_match_filter.dlen, p_attr,
378 tlv_data->tx_match_filter.dlen);
379 if (ret != BCME_OK) {
380 WL_ERR(("Failed to copy tx match filter data\n"));
381 goto fail;
382 }
383 /* advance read pointer */
384 offset = tlv_data->tx_match_filter.dlen;
385 if (offset > len) {
386 WL_ERR(("Invalid event buffer\n"));
387 ret = BCME_BUFTOOSHORT;
388 goto fail;
389 }
390 p_attr += offset;
391 len -= offset;
392 }
393
394 /* service response filter */
395 if (svc_control & NAN_SC_SR_FILTER_PRESENT) {
396 WL_TRACE(("> svc_control: service response filter present\n"));
397
398 tlv_data->rx_match_filter.dlen = *p_attr++;
399 WL_TRACE(("> sr match filter len: 0x%02x\n",
400 tlv_data->rx_match_filter.dlen));
401
402 if (!tlv_data->rx_match_filter.dlen ||
403 tlv_data->rx_match_filter.dlen > MAX_MATCH_FILTER_LEN) {
404 tlv_data->rx_match_filter.dlen = 0;
405 WL_ERR(
406 ("%s: sr matching filter length is invalid\n", __FUNCTION__));
407 ret = BCME_BADLEN;
408 goto fail;
409 }
410 tlv_data->rx_match_filter.data =
411 MALLOCZ(osh, tlv_data->rx_match_filter.dlen);
412 if (!tlv_data->rx_match_filter.data) {
413 WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
414 tlv_data->rx_match_filter.dlen = 0;
415 ret = BCME_NOMEM;
416 goto fail;
417 }
418
419 ret = memcpy_s(tlv_data->rx_match_filter.data,
420 tlv_data->rx_match_filter.dlen, p_attr,
421 tlv_data->rx_match_filter.dlen);
422 if (ret != BCME_OK) {
423 WL_ERR(("Failed to copy rx match filter data\n"));
424 goto fail;
425 }
426
427 /* advance read pointer */
428 offset = tlv_data->rx_match_filter.dlen;
429 if (offset > len) {
430 WL_ERR(("Invalid event buffer len\n"));
431 ret = BCME_BUFTOOSHORT;
432 goto fail;
433 }
434 p_attr += offset;
435 len -= offset;
436 }
437
438 /* service specific info */
439 if (svc_control & NAN_SC_SVC_INFO_PRESENT) {
440 WL_TRACE(("> svc_control: svc specific info present\n"));
441
442 tlv_data->svc_info.dlen = *p_attr++;
443 WL_TRACE(("> svc info len: 0x%02x\n", tlv_data->svc_info.dlen));
444
445 if (!tlv_data->svc_info.dlen ||
446 tlv_data->svc_info.dlen > NAN_MAX_SERVICE_SPECIFIC_INFO_LEN) {
447 /* must be able to handle null msg which is not error */
448 tlv_data->svc_info.dlen = 0;
449 WL_ERR(("sde data length is invalid\n"));
450 ret = BCME_BADLEN;
451 goto fail;
452 }
453
454 if (tlv_data->svc_info.dlen > 0) {
455 tlv_data->svc_info.data = MALLOCZ(osh, tlv_data->svc_info.dlen);
456 if (!tlv_data->svc_info.data) {
457 WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
458 tlv_data->svc_info.dlen = 0;
459 ret = BCME_NOMEM;
460 goto fail;
461 }
462 ret = memcpy_s(tlv_data->svc_info.data, tlv_data->svc_info.dlen,
463 p_attr, tlv_data->svc_info.dlen);
464 if (ret != BCME_OK) {
465 WL_ERR(("Failed to copy svc info\n"));
466 goto fail;
467 }
468
469 /* advance read pointer */
470 offset = tlv_data->svc_info.dlen;
471 if (offset > len) {
472 WL_ERR(("Invalid event buffer len\n"));
473 ret = BCME_BUFTOOSHORT;
474 goto fail;
475 }
476 p_attr += offset;
477 len -= offset;
478 } else {
479 /* must be able to handle null msg which is not error */
480 tlv_data->svc_info.dlen = 0;
481 WL_TRACE(("%s: svc info length is zero, null info data\n",
482 __FUNCTION__));
483 }
484 }
485
486 /*
487 * discovery range limited:
488 * If set to 1, the pub/sub msg is limited in range to close proximity.
489 * If set to 0, the pub/sub msg is not limited in range.
490 * Valid only when the message is either of a publish or a sub.
491 */
492 if (svc_control & NAN_SC_RANGE_LIMITED) {
493 if (((svc_control & NAN_SVC_CONTROL_TYPE_MASK) == NAN_SC_PUBLISH) ||
494 ((svc_control & NAN_SVC_CONTROL_TYPE_MASK) == NAN_SC_SUBSCRIBE)) {
495 WL_TRACE(("> svc_control: range limited present\n"));
496 } else {
497 WL_TRACE(("range limited is only valid on pub or sub\n"));
498 }
499
500 /* send up */
501
502 /* advance read pointer */
503 p_attr++;
504 }
505 return ret;
506 fail:
507 if (tlv_data->tx_match_filter.data) {
508 MFREE(osh, tlv_data->tx_match_filter.data,
509 tlv_data->tx_match_filter.dlen);
510 tlv_data->tx_match_filter.data = NULL;
511 }
512 if (tlv_data->rx_match_filter.data) {
513 MFREE(osh, tlv_data->rx_match_filter.data,
514 tlv_data->rx_match_filter.dlen);
515 tlv_data->rx_match_filter.data = NULL;
516 }
517 if (tlv_data->svc_info.data) {
518 MFREE(osh, tlv_data->svc_info.data, tlv_data->svc_info.dlen);
519 tlv_data->svc_info.data = NULL;
520 }
521
522 WL_DBG(("Parse SDA event data, status = %d\n", ret));
523 return ret;
524 }
525
wl_cfgnan_parse_sd_attr_data(osl_t * osh,uint16 len,const uint8 * data,nan_event_data_t * tlv_data,uint16 type)526 static s32 wl_cfgnan_parse_sd_attr_data(osl_t *osh, uint16 len,
527 const uint8 *data,
528 nan_event_data_t *tlv_data, uint16 type)
529 {
530 const uint8 *p_attr = data;
531 uint16 offset = 0;
532 s32 ret = BCME_OK;
533 const wl_nan_event_disc_result_t *ev_disc = NULL;
534 const wl_nan_event_replied_t *ev_replied = NULL;
535 const wl_nan_ev_receive_t *ev_fup = NULL;
536
537 /*
538 * Mapping wifi_nan_svc_descriptor_attr_t, and svc controls are optional.
539 */
540 if (type == WL_NAN_XTLV_SD_DISC_RESULTS) {
541 u8 iter;
542 ev_disc = (const wl_nan_event_disc_result_t *)p_attr;
543
544 WL_DBG((">> WL_NAN_XTLV_RESULTS: Discovery result\n"));
545
546 tlv_data->pub_id = (wl_nan_instance_id_t)ev_disc->pub_id;
547 tlv_data->sub_id = (wl_nan_instance_id_t)ev_disc->sub_id;
548 tlv_data->publish_rssi = ev_disc->publish_rssi;
549 ret = memcpy_s(&tlv_data->remote_nmi, ETHER_ADDR_LEN, &ev_disc->pub_mac,
550 ETHER_ADDR_LEN);
551 if (ret != BCME_OK) {
552 WL_ERR(("Failed to copy remote nmi\n"));
553 goto fail;
554 }
555
556 WL_TRACE(("publish id: %d\n", ev_disc->pub_id));
557 WL_TRACE(("subscribe d: %d\n", ev_disc->sub_id));
558 WL_TRACE(("publish mac addr: " MACDBG "\n",
559 MAC2STRDBG(ev_disc->pub_mac.octet)));
560 WL_TRACE(("publish rssi: %d\n", (int8)ev_disc->publish_rssi));
561 WL_TRACE(("attribute no: %d\n", ev_disc->attr_num));
562 WL_TRACE(("attribute len: %d\n", (uint16)ev_disc->attr_list_len));
563
564 /* advance to the service descricptor */
565 offset = OFFSETOF(wl_nan_event_disc_result_t, attr_list[0]);
566 if (offset > len) {
567 WL_ERR(("Invalid event buffer len\n"));
568 ret = BCME_BUFTOOSHORT;
569 goto fail;
570 }
571 p_attr += offset;
572 len -= offset;
573
574 iter = ev_disc->attr_num;
575 while (iter) {
576 if ((uint8)*p_attr == NAN_ATTR_SVC_DESCRIPTOR) {
577 WL_TRACE(("> attr id: 0x%02x\n", (uint8)*p_attr));
578 ret = wl_cfgnan_parse_sda_data(osh, p_attr, len, tlv_data);
579 if (unlikely(ret)) {
580 WL_ERR(("wl_cfgnan_parse_sda_data failed,"
581 "error = %d \n",
582 ret));
583 goto fail;
584 }
585 }
586
587 if ((uint8)*p_attr == NAN_ATTR_SVC_DESC_EXTENSION) {
588 WL_TRACE(("> attr id: 0x%02x\n", (uint8)*p_attr));
589 ret = wl_cfgnan_parse_sdea_data(osh, p_attr, len, tlv_data);
590 if (unlikely(ret)) {
591 WL_ERR(("wl_cfgnan_parse_sdea_data failed,"
592 "error = %d \n",
593 ret));
594 goto fail;
595 }
596 }
597 offset = (sizeof(*p_attr) + sizeof(ev_disc->attr_list_len) +
598 (p_attr[1] | (p_attr[0x2] << 0x8)));
599 if (offset > len) {
600 WL_ERR(("Invalid event buffer len\n"));
601 ret = BCME_BUFTOOSHORT;
602 goto fail;
603 }
604 p_attr += offset;
605 len -= offset;
606 iter--;
607 }
608 } else if (type == WL_NAN_XTLV_SD_FUP_RECEIVED) {
609 uint8 iter;
610 ev_fup = (const wl_nan_ev_receive_t *)p_attr;
611
612 WL_TRACE((">> WL_NAN_XTLV_SD_FUP_RECEIVED: Transmit follow-up\n"));
613
614 tlv_data->local_inst_id = (wl_nan_instance_id_t)ev_fup->local_id;
615 tlv_data->requestor_id = (wl_nan_instance_id_t)ev_fup->remote_id;
616 tlv_data->fup_rssi = ev_fup->fup_rssi;
617 ret = memcpy_s(&tlv_data->remote_nmi, ETHER_ADDR_LEN,
618 &ev_fup->remote_addr, ETHER_ADDR_LEN);
619 if (ret != BCME_OK) {
620 WL_ERR(("Failed to copy remote nmi\n"));
621 goto fail;
622 }
623
624 WL_TRACE(("local id: %d\n", ev_fup->local_id));
625 WL_TRACE(("remote id: %d\n", ev_fup->remote_id));
626 WL_TRACE(("peer mac addr: " MACDBG "\n",
627 MAC2STRDBG(ev_fup->remote_addr.octet)));
628 WL_TRACE(("peer rssi: %d\n", (int8)ev_fup->fup_rssi));
629 WL_TRACE(("attribute no: %d\n", ev_fup->attr_num));
630 WL_TRACE(("attribute len: %d\n", ev_fup->attr_list_len));
631
632 /* advance to the service descriptor which is attr_list[0] */
633 offset = OFFSETOF(wl_nan_ev_receive_t, attr_list[0]);
634 if (offset > len) {
635 WL_ERR(("Invalid event buffer len\n"));
636 ret = BCME_BUFTOOSHORT;
637 goto fail;
638 }
639 p_attr += offset;
640 len -= offset;
641
642 iter = ev_fup->attr_num;
643 while (iter) {
644 if ((uint8)*p_attr == NAN_ATTR_SVC_DESCRIPTOR) {
645 WL_TRACE(("> attr id: 0x%02x\n", (uint8)*p_attr));
646 ret = wl_cfgnan_parse_sda_data(osh, p_attr, len, tlv_data);
647 if (unlikely(ret)) {
648 WL_ERR(("wl_cfgnan_parse_sda_data failed,"
649 "error = %d \n",
650 ret));
651 goto fail;
652 }
653 }
654
655 if ((uint8)*p_attr == NAN_ATTR_SVC_DESC_EXTENSION) {
656 WL_TRACE(("> attr id: 0x%02x\n", (uint8)*p_attr));
657 ret = wl_cfgnan_parse_sdea_data(osh, p_attr, len, tlv_data);
658 if (unlikely(ret)) {
659 WL_ERR(("wl_cfgnan_parse_sdea_data failed,"
660 "error = %d \n",
661 ret));
662 goto fail;
663 }
664 }
665 offset = (sizeof(*p_attr) + sizeof(ev_fup->attr_list_len) +
666 (p_attr[1] | (p_attr[0x2] << 0x8)));
667 if (offset > len) {
668 WL_ERR(("Invalid event buffer len\n"));
669 ret = BCME_BUFTOOSHORT;
670 goto fail;
671 }
672 p_attr += offset;
673 len -= offset;
674 iter--;
675 }
676 } else if (type == WL_NAN_XTLV_SD_SDF_RX) {
677 /*
678 * SDF followed by nan2_pub_act_frame_t and
679 * wifi_nan_svc_descriptor_attr_t, and svc controls are optional.
680 */
681 const nan2_pub_act_frame_t *nan_pub_af =
682 (const nan2_pub_act_frame_t *)p_attr;
683
684 WL_TRACE((">> WL_NAN_XTLV_SD_SDF_RX\n"));
685
686 /* nan2_pub_act_frame_t */
687 WL_TRACE(("pub category: 0x%02x\n", nan_pub_af->category_id));
688 WL_TRACE(("pub action: 0x%02x\n", nan_pub_af->action_field));
689 WL_TRACE(("nan oui: %2x-%2x-%2x\n", nan_pub_af->oui[0],
690 nan_pub_af->oui[1], nan_pub_af->oui[0x2]));
691 WL_TRACE(("oui type: 0x%02x\n", nan_pub_af->oui_type));
692 WL_TRACE(("oui subtype: 0x%02x\n", nan_pub_af->oui_sub_type));
693
694 offset = sizeof(*nan_pub_af);
695 if (offset > len) {
696 WL_ERR(("Invalid event buffer len\n"));
697 ret = BCME_BUFTOOSHORT;
698 goto fail;
699 }
700 p_attr += offset;
701 len -= offset;
702 } else if (type == WL_NAN_XTLV_SD_REPLIED) {
703 ev_replied = (const wl_nan_event_replied_t *)p_attr;
704
705 WL_TRACE((">> WL_NAN_XTLV_SD_REPLIED: Replied Event\n"));
706
707 tlv_data->pub_id = (wl_nan_instance_id_t)ev_replied->pub_id;
708 tlv_data->sub_id = (wl_nan_instance_id_t)ev_replied->sub_id;
709 tlv_data->sub_rssi = ev_replied->sub_rssi;
710 ret = memcpy_s(&tlv_data->remote_nmi, ETHER_ADDR_LEN,
711 &ev_replied->sub_mac, ETHER_ADDR_LEN);
712 if (ret != BCME_OK) {
713 WL_ERR(("Failed to copy remote nmi\n"));
714 goto fail;
715 }
716
717 WL_TRACE(("publish id: %d\n", ev_replied->pub_id));
718 WL_TRACE(("subscribe d: %d\n", ev_replied->sub_id));
719 WL_TRACE(("Subscriber mac addr: " MACDBG "\n",
720 MAC2STRDBG(ev_replied->sub_mac.octet)));
721 WL_TRACE(("subscribe rssi: %d\n", (int8)ev_replied->sub_rssi));
722 WL_TRACE(("attribute no: %d\n", ev_replied->attr_num));
723 WL_TRACE(("attribute len: %d\n", (uint16)ev_replied->attr_list_len));
724
725 /* advance to the service descriptor which is attr_list[0] */
726 offset = OFFSETOF(wl_nan_event_replied_t, attr_list[0]);
727 if (offset > len) {
728 WL_ERR(("Invalid event buffer len\n"));
729 ret = BCME_BUFTOOSHORT;
730 goto fail;
731 }
732 p_attr += offset;
733 len -= offset;
734 ret = wl_cfgnan_parse_sda_data(osh, p_attr, len, tlv_data);
735 if (unlikely(ret)) {
736 WL_ERR(("wl_cfgnan_parse_sdea_data failed,"
737 "error = %d \n",
738 ret));
739 }
740 }
741
742 fail:
743 return ret;
744 }
745
746 /* Based on each case of tlv type id, fill into tlv data */
wl_cfgnan_set_vars_cbfn(void * ctx,const uint8 * data,uint16 type,uint16 len)747 int wl_cfgnan_set_vars_cbfn(void *ctx, const uint8 *data, uint16 type,
748 uint16 len)
749 {
750 nan_parse_event_ctx_t *ctx_tlv_data = ((nan_parse_event_ctx_t *)(ctx));
751 nan_event_data_t *tlv_data =
752 ((nan_event_data_t *)(ctx_tlv_data->nan_evt_data));
753 int ret = BCME_OK;
754
755 NAN_DBG_ENTER();
756 if (!data || !len) {
757 WL_ERR(("data length is invalid\n"));
758 ret = BCME_ERROR;
759 goto fail;
760 }
761
762 switch (type) {
763 /*
764 * Need to parse service descript attributes including service control,
765 * when Follow up or Discovery result come
766 */
767 case WL_NAN_XTLV_SD_FUP_RECEIVED:
768 case WL_NAN_XTLV_SD_DISC_RESULTS: {
769 ret = wl_cfgnan_parse_sd_attr_data(ctx_tlv_data->cfg->osh, len,
770 data, tlv_data, type);
771 break;
772 }
773 case WL_NAN_XTLV_SD_SVC_INFO: {
774 tlv_data->svc_info.data = MALLOCZ(ctx_tlv_data->cfg->osh, len);
775 if (!tlv_data->svc_info.data) {
776 WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
777 tlv_data->svc_info.dlen = 0;
778 ret = BCME_NOMEM;
779 goto fail;
780 }
781 tlv_data->svc_info.dlen = len;
782 ret = memcpy_s(tlv_data->svc_info.data, tlv_data->svc_info.dlen,
783 data, tlv_data->svc_info.dlen);
784 if (ret != BCME_OK) {
785 WL_ERR(("Failed to copy svc info data\n"));
786 goto fail;
787 }
788 break;
789 }
790 default:
791 WL_ERR(("Not available for tlv type = 0x%x\n", type));
792 ret = BCME_ERROR;
793 break;
794 }
795 fail:
796 NAN_DBG_EXIT();
797 return ret;
798 }
799
wl_cfg_nan_check_cmd_len(uint16 nan_iov_len,uint16 data_size,uint16 * subcmd_len)800 int wl_cfg_nan_check_cmd_len(uint16 nan_iov_len, uint16 data_size,
801 uint16 *subcmd_len)
802 {
803 s32 ret = BCME_OK;
804
805 if (subcmd_len != NULL) {
806 *subcmd_len =
807 OFFSETOF(bcm_iov_batch_subcmd_t, data) + ALIGN_SIZE(data_size, 0x4);
808 if (*subcmd_len > nan_iov_len) {
809 WL_ERR(("%s: Buf short, requested:%d, available:%d\n", __FUNCTION__,
810 *subcmd_len, nan_iov_len));
811 ret = BCME_NOMEM;
812 }
813 } else {
814 WL_ERR(("Invalid subcmd_len\n"));
815 ret = BCME_ERROR;
816 }
817 return ret;
818 }
819
wl_cfgnan_config_eventmask(struct net_device * ndev,struct bcm_cfg80211 * cfg,uint8 event_ind_flag,bool disable_events)820 int wl_cfgnan_config_eventmask(struct net_device *ndev,
821 struct bcm_cfg80211 *cfg, uint8 event_ind_flag,
822 bool disable_events)
823 {
824 bcm_iov_batch_buf_t *nan_buf = NULL;
825 s32 ret = BCME_OK;
826 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
827 uint16 subcmd_len;
828 uint32 status;
829 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
830 bcm_iov_batch_subcmd_t *sub_cmd_resp = NULL;
831 uint8 event_mask[WL_NAN_EVMASK_EXTN_LEN];
832 wl_nan_evmask_extn_t *evmask;
833 uint16 evmask_cmd_len;
834 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
835
836 NAN_DBG_ENTER();
837
838 /* same src and dest len here */
839 (void)memset_s(event_mask, WL_NAN_EVMASK_EXTN_VER, 0,
840 WL_NAN_EVMASK_EXTN_VER);
841 evmask_cmd_len =
842 OFFSETOF(wl_nan_evmask_extn_t, evmask) + WL_NAN_EVMASK_EXTN_LEN;
843 ret = wl_add_remove_eventmsg(ndev, WLC_E_NAN, true);
844 if (unlikely(ret)) {
845 WL_ERR((" nan event enable failed, error = %d \n", ret));
846 goto fail;
847 }
848
849 nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
850 if (!nan_buf) {
851 WL_ERR(("%s: memory allocation failed\n", __func__));
852 ret = BCME_NOMEM;
853 goto fail;
854 }
855
856 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
857 nan_buf->count = 0;
858 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
859 sub_cmd = (bcm_iov_batch_subcmd_t *)(uint8 *)(&nan_buf->cmds[0]);
860
861 ret = wl_cfg_nan_check_cmd_len(nan_buf_size, evmask_cmd_len, &subcmd_len);
862 if (unlikely(ret)) {
863 WL_ERR(("nan_sub_cmd check failed\n"));
864 goto fail;
865 }
866
867 sub_cmd->id = htod16(WL_NAN_CMD_CFG_EVENT_MASK);
868 sub_cmd->len = sizeof(sub_cmd->u.options) + evmask_cmd_len;
869 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
870 evmask = (wl_nan_evmask_extn_t *)sub_cmd->data;
871 evmask->ver = WL_NAN_EVMASK_EXTN_VER;
872 evmask->len = WL_NAN_EVMASK_EXTN_LEN;
873 nan_buf_size -= subcmd_len;
874 nan_buf->count = 1;
875
876 if (disable_events) {
877 WL_DBG(("Disabling all nan events..except stop event\n"));
878 setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_STOP));
879 } else {
880 /*
881 * Android framework event mask configuration.
882 */
883 nan_buf->is_set = false;
884 memset(resp_buf, 0, sizeof(resp_buf));
885 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
886 (void *)resp_buf, NAN_IOCTL_BUF_SIZE);
887 if (unlikely(ret) || unlikely(status)) {
888 WL_ERR(
889 ("get nan event mask failed ret %d status %d \n", ret, status));
890 goto fail;
891 }
892 sub_cmd_resp = &((bcm_iov_batch_buf_t *)(resp_buf))->cmds[0];
893 evmask = (wl_nan_evmask_extn_t *)sub_cmd_resp->data;
894
895 /* check the response buff */
896 /* same src and dest len here */
897 (void)memcpy_s(&event_mask, WL_NAN_EVMASK_EXTN_LEN,
898 (uint8 *)&evmask->evmask, WL_NAN_EVMASK_EXTN_LEN);
899
900 if (event_ind_flag) {
901 if (CHECK_BIT(event_ind_flag, WL_NAN_EVENT_DIC_MAC_ADDR_BIT)) {
902 WL_DBG(("Need to add disc mac addr change event\n"));
903 }
904 /* BIT2 - Disable nan cluster join indication (OTA). */
905 if (CHECK_BIT(event_ind_flag, WL_NAN_EVENT_JOIN_EVENT)) {
906 clrbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_MERGE));
907 }
908 }
909
910 setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_DISCOVERY_RESULT));
911 setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_RECEIVE));
912 setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_TERMINATED));
913 setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_STOP));
914 setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_TXS));
915 setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_PEER_DATAPATH_IND));
916 setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_DATAPATH_ESTB));
917 setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_DATAPATH_END));
918 setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_RNG_REQ_IND));
919 setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_RNG_TERM_IND));
920 setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_DISC_CACHE_TIMEOUT));
921 /* Disable below events by default */
922 clrbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_PEER_SCHED_UPD_NOTIF));
923 clrbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_RNG_RPT_IND));
924 clrbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_DW_END));
925 }
926
927 nan_buf->is_set = true;
928 evmask = (wl_nan_evmask_extn_t *)sub_cmd->data;
929 /* same src and dest len here */
930 (void)memcpy_s((uint8 *)&evmask->evmask, WL_NAN_EVMASK_EXTN_LEN,
931 &event_mask, WL_NAN_EVMASK_EXTN_LEN);
932
933 nan_buf_size = (NAN_IOCTL_BUF_SIZE - nan_buf_size);
934 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
935 (void *)resp_buf, NAN_IOCTL_BUF_SIZE);
936 if (unlikely(ret) || unlikely(status)) {
937 WL_ERR(("set nan event mask failed ret %d status %d \n", ret, status));
938 goto fail;
939 }
940 WL_DBG(("set nan event mask successfull\n"));
941
942 fail:
943 if (nan_buf) {
944 MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
945 }
946 NAN_DBG_EXIT();
947 return ret;
948 }
949
wl_cfgnan_set_nan_avail(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_avail_cmd_data * cmd_data,uint8 avail_type)950 static int wl_cfgnan_set_nan_avail(struct net_device *ndev,
951 struct bcm_cfg80211 *cfg,
952 nan_avail_cmd_data *cmd_data,
953 uint8 avail_type)
954 {
955 bcm_iov_batch_buf_t *nan_buf = NULL;
956 s32 ret = BCME_OK;
957 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
958 uint16 subcmd_len;
959 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
960 wl_nan_iov_t *nan_iov_data = NULL;
961 wl_avail_t *avail = NULL;
962 wl_avail_entry_t *entry; /* used for filling entry structure */
963 uint8 *p; /* tracking pointer */
964 uint8 i;
965 u32 status;
966 int c;
967 char ndc_id[ETHER_ADDR_LEN] = {0x50, 0x6f, 0x9a, 0x01, 0x0, 0x0};
968 dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
969 char *a = WL_AVAIL_BIT_MAP;
970 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
971
972 NAN_DBG_ENTER();
973
974 /* Do not disturb avail if dam is supported */
975 if (FW_SUPPORTED(dhdp, autodam)) {
976 WL_DBG(("DAM is supported, avail modification not allowed\n"));
977 return ret;
978 }
979
980 if (avail_type < WL_AVAIL_LOCAL || avail_type > WL_AVAIL_TYPE_MAX) {
981 WL_ERR(("Invalid availability type\n"));
982 ret = BCME_USAGE_ERROR;
983 goto fail;
984 }
985
986 nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
987 if (!nan_buf) {
988 WL_ERR(("%s: memory allocation failed\n", __func__));
989 ret = BCME_NOMEM;
990 goto fail;
991 }
992
993 nan_iov_data = MALLOCZ(cfg->osh, sizeof(*nan_iov_data));
994 if (!nan_iov_data) {
995 WL_ERR(("%s: memory allocation failed\n", __func__));
996 ret = BCME_NOMEM;
997 goto fail;
998 }
999
1000 nan_iov_data->nan_iov_len = NAN_IOCTL_BUF_SIZE;
1001 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
1002 nan_buf->count = 0;
1003 nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
1004 nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
1005
1006 sub_cmd = (bcm_iov_batch_subcmd_t *)(nan_iov_data->nan_iov_buf);
1007 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len, sizeof(*avail),
1008 &subcmd_len);
1009 if (unlikely(ret)) {
1010 WL_ERR(("nan_sub_cmd check failed\n"));
1011 goto fail;
1012 }
1013 avail = (wl_avail_t *)sub_cmd->data;
1014
1015 /* populate wl_avail_type */
1016 avail->flags = avail_type;
1017 if (avail_type == WL_AVAIL_RANGING) {
1018 ret = memcpy_s(&avail->addr, ETHER_ADDR_LEN, &cmd_data->peer_nmi,
1019 ETHER_ADDR_LEN);
1020 if (ret != BCME_OK) {
1021 WL_ERR(("Failed to copy peer nmi\n"));
1022 goto fail;
1023 }
1024 }
1025
1026 sub_cmd->len = sizeof(sub_cmd->u.options) + subcmd_len;
1027 sub_cmd->id = htod16(WL_NAN_CMD_CFG_AVAIL);
1028 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
1029
1030 nan_buf->is_set = false;
1031 nan_buf->count++;
1032 nan_iov_data->nan_iov_len -= subcmd_len;
1033 nan_buf_size = (NAN_IOCTL_BUF_SIZE - nan_iov_data->nan_iov_len);
1034
1035 WL_TRACE(("Read wl nan avail status\n"));
1036
1037 memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
1038 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
1039 (void *)resp_buf, NAN_IOCTL_BUF_SIZE);
1040 if (unlikely(ret)) {
1041 WL_ERR(("\n Get nan avail failed ret %d, status %d \n", ret, status));
1042 goto fail;
1043 }
1044
1045 if (status == BCME_NOTFOUND) {
1046 nan_buf->count = 0;
1047 nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
1048 nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
1049
1050 sub_cmd = (bcm_iov_batch_subcmd_t *)(nan_iov_data->nan_iov_buf);
1051
1052 avail = (wl_avail_t *)sub_cmd->data;
1053 p = avail->entry;
1054
1055 /* populate wl_avail fields */
1056 avail->length = OFFSETOF(wl_avail_t, entry);
1057 avail->flags = avail_type;
1058 avail->num_entries = 0;
1059 avail->id = 0;
1060 entry = (wl_avail_entry_t *)p;
1061 entry->flags = WL_AVAIL_ENTRY_COM;
1062
1063 /* set default values for optional parameters */
1064 entry->start_offset = 0;
1065 entry->u.band = 0;
1066
1067 if (cmd_data->avail_period) {
1068 entry->period = cmd_data->avail_period;
1069 } else {
1070 entry->period = WL_AVAIL_PERIOD_1024;
1071 }
1072
1073 if (cmd_data->duration != NAN_BAND_INVALID) {
1074 entry->flags |=
1075 (0x3 << WL_AVAIL_ENTRY_USAGE_SHIFT) |
1076 (cmd_data->duration << WL_AVAIL_ENTRY_BIT_DUR_SHIFT);
1077 } else {
1078 entry->flags |=
1079 (0x3 << WL_AVAIL_ENTRY_USAGE_SHIFT) |
1080 (WL_AVAIL_BIT_DUR_16 << WL_AVAIL_ENTRY_BIT_DUR_SHIFT);
1081 }
1082 entry->bitmap_len = 0;
1083
1084 if (avail_type == WL_AVAIL_LOCAL) {
1085 entry->flags |= 1 << WL_AVAIL_ENTRY_CHAN_SHIFT;
1086 /* Check for 5g support, based on that choose 5g channel */
1087 if (cfg->support_5g) {
1088 entry->u.channel_info = htod32(wf_channel2chspec(
1089 WL_AVAIL_CHANNEL_5G, WL_AVAIL_BANDWIDTH_5G));
1090 } else {
1091 entry->u.channel_info = htod32(wf_channel2chspec(
1092 WL_AVAIL_CHANNEL_2G, WL_AVAIL_BANDWIDTH_2G));
1093 }
1094 entry->flags = htod16(entry->flags);
1095 }
1096
1097 if (cfg->support_5g) {
1098 a = WL_5G_AVAIL_BIT_MAP;
1099 }
1100
1101 /* point to bitmap value for processing */
1102 if (cmd_data->bmap) {
1103 for (c = (WL_NAN_EVENT_CLEAR_BIT - 1); c >= 0; c--) {
1104 i = cmd_data->bmap >> c;
1105 if (i & 1) {
1106 setbit(entry->bitmap, (WL_NAN_EVENT_CLEAR_BIT - c - 1));
1107 }
1108 }
1109 } else {
1110 for (i = 0; i < strlen(WL_AVAIL_BIT_MAP); i++) {
1111 if (*a == '1') {
1112 setbit(entry->bitmap, i);
1113 }
1114 a++;
1115 }
1116 }
1117
1118 /* account for partially filled most significant byte */
1119 entry->bitmap_len = ((WL_NAN_EVENT_CLEAR_BIT) + NBBY - 1) / NBBY;
1120 if (avail_type == WL_AVAIL_NDC) {
1121 ret =
1122 memcpy_s(&avail->addr, ETHER_ADDR_LEN, ndc_id, ETHER_ADDR_LEN);
1123 if (ret != BCME_OK) {
1124 WL_ERR(("Failed to copy ndc id\n"));
1125 goto fail;
1126 }
1127 } else if (avail_type == WL_AVAIL_RANGING) {
1128 ret = memcpy_s(&avail->addr, ETHER_ADDR_LEN, &cmd_data->peer_nmi,
1129 ETHER_ADDR_LEN);
1130 if (ret != BCME_OK) {
1131 WL_ERR(("Failed to copy peer nmi\n"));
1132 goto fail;
1133 }
1134 }
1135 /* account for partially filled most significant byte */
1136
1137 /* update wl_avail and populate wl_avail_entry */
1138 entry->length = OFFSETOF(wl_avail_entry_t, bitmap) + entry->bitmap_len;
1139 avail->num_entries++;
1140 avail->length += entry->length;
1141 /* advance pointer for next entry */
1142 p += entry->length;
1143
1144 /* convert to dongle endianness */
1145 entry->length = htod16(entry->length);
1146 entry->start_offset = htod16(entry->start_offset);
1147 entry->u.channel_info = htod32(entry->u.channel_info);
1148 entry->flags = htod16(entry->flags);
1149 /* update avail_len only if
1150 * there are avail entries
1151 */
1152 if (avail->num_entries) {
1153 nan_iov_data->nan_iov_len -= avail->length;
1154 avail->length = htod16(avail->length);
1155 avail->flags = htod16(avail->flags);
1156 }
1157 avail->length = htod16(avail->length);
1158
1159 sub_cmd->id = htod16(WL_NAN_CMD_CFG_AVAIL);
1160 sub_cmd->len = sizeof(sub_cmd->u.options) + avail->length;
1161 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
1162
1163 nan_buf->is_set = true;
1164 nan_buf->count++;
1165
1166 /* Reduce the iov_len size by subcmd_len */
1167 nan_iov_data->nan_iov_len -= subcmd_len;
1168 nan_buf_size = (NAN_IOCTL_BUF_SIZE - nan_iov_data->nan_iov_len);
1169
1170 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
1171 (void *)resp_buf, NAN_IOCTL_BUF_SIZE);
1172 if (unlikely(ret) || unlikely(status)) {
1173 WL_ERR(
1174 ("\n set nan avail failed ret %d status %d \n", ret, status));
1175 ret = status;
1176 goto fail;
1177 }
1178 } else if (status == BCME_OK) {
1179 WL_DBG(("Avail type [%d] found to be configured\n", avail_type));
1180 } else {
1181 WL_ERR(("set nan avail failed ret %d status %d \n", ret, status));
1182 }
1183
1184 fail:
1185 if (nan_buf) {
1186 MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
1187 }
1188 if (nan_iov_data) {
1189 MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
1190 }
1191
1192 NAN_DBG_EXIT();
1193 return ret;
1194 }
1195
wl_cfgnan_config_control_flag(struct net_device * ndev,struct bcm_cfg80211 * cfg,uint32 flag,uint32 * status,bool set)1196 static int wl_cfgnan_config_control_flag(struct net_device *ndev,
1197 struct bcm_cfg80211 *cfg, uint32 flag,
1198 uint32 *status, bool set)
1199 {
1200 bcm_iov_batch_buf_t *nan_buf = NULL;
1201 s32 ret = BCME_OK;
1202 uint16 nan_iov_start, nan_iov_end;
1203 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
1204 uint16 subcmd_len;
1205 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
1206 bcm_iov_batch_subcmd_t *sub_cmd_resp = NULL;
1207 wl_nan_iov_t *nan_iov_data = NULL;
1208 uint32 cfg_ctrl;
1209 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
1210
1211 NAN_DBG_ENTER();
1212 WL_INFORM_MEM(
1213 ("%s: Modifying nan ctrl flag %x val %d", __FUNCTION__, flag, set));
1214 nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
1215 if (!nan_buf) {
1216 WL_ERR(("%s: memory allocation failed\n", __func__));
1217 ret = BCME_NOMEM;
1218 goto fail;
1219 }
1220
1221 nan_iov_data = MALLOCZ(cfg->osh, sizeof(*nan_iov_data));
1222 if (!nan_iov_data) {
1223 WL_ERR(("%s: memory allocation failed\n", __func__));
1224 ret = BCME_NOMEM;
1225 goto fail;
1226 }
1227
1228 nan_iov_data->nan_iov_len = nan_iov_start = NAN_IOCTL_BUF_SIZE;
1229 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
1230 nan_buf->count = 0;
1231 nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
1232 nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
1233 sub_cmd = (bcm_iov_batch_subcmd_t *)(nan_iov_data->nan_iov_buf);
1234
1235 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len, sizeof(cfg_ctrl),
1236 &subcmd_len);
1237 if (unlikely(ret)) {
1238 WL_ERR(("nan_sub_cmd check failed\n"));
1239 goto fail;
1240 }
1241
1242 sub_cmd->id = htod16(WL_NAN_CMD_CFG_NAN_CONFIG);
1243 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(cfg_ctrl);
1244 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
1245
1246 nan_buf->is_set = false;
1247 nan_buf->count++;
1248
1249 /* Reduce the iov_len size by subcmd_len */
1250 nan_iov_data->nan_iov_len -= subcmd_len;
1251 nan_iov_end = nan_iov_data->nan_iov_len;
1252 nan_buf_size = (nan_iov_start - nan_iov_end);
1253
1254 memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
1255 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, status,
1256 (void *)resp_buf, NAN_IOCTL_BUF_SIZE);
1257 if (unlikely(ret) || unlikely(*status)) {
1258 WL_ERR(("get nan cfg ctrl failed ret %d status %d \n", ret, *status));
1259 goto fail;
1260 }
1261 sub_cmd_resp = &((bcm_iov_batch_buf_t *)(resp_buf))->cmds[0];
1262
1263 /* check the response buff */
1264 cfg_ctrl = (*(uint32 *)&sub_cmd_resp->data[0]);
1265 if (set) {
1266 cfg_ctrl |= flag;
1267 } else {
1268 cfg_ctrl &= ~flag;
1269 }
1270 ret =
1271 memcpy_s(sub_cmd->data, sizeof(cfg_ctrl), &cfg_ctrl, sizeof(cfg_ctrl));
1272 if (ret != BCME_OK) {
1273 WL_ERR(("Failed to copy cfg ctrl\n"));
1274 goto fail;
1275 }
1276
1277 nan_buf->is_set = true;
1278 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, status,
1279 (void *)resp_buf, NAN_IOCTL_BUF_SIZE);
1280 if (unlikely(ret) || unlikely(*status)) {
1281 WL_ERR(("set nan cfg ctrl failed ret %d status %d \n", ret, *status));
1282 goto fail;
1283 }
1284 WL_DBG(("set nan cfg ctrl successfull\n"));
1285 fail:
1286 if (nan_buf) {
1287 MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
1288 }
1289 if (nan_iov_data) {
1290 MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
1291 }
1292
1293 NAN_DBG_EXIT();
1294 return ret;
1295 }
1296
wl_cfgnan_get_iovars_status(void * ctx,const uint8 * data,uint16 type,uint16 len)1297 static int wl_cfgnan_get_iovars_status(void *ctx, const uint8 *data,
1298 uint16 type, uint16 len)
1299 {
1300 bcm_iov_batch_buf_t *b_resp = (bcm_iov_batch_buf_t *)ctx;
1301 uint32 status;
1302 /* if all tlvs are parsed, we should not be here */
1303 if (b_resp->count == 0) {
1304 return BCME_BADLEN;
1305 }
1306
1307 /* cbfn params may be used in f/w */
1308 if (len < sizeof(status)) {
1309 return BCME_BUFTOOSHORT;
1310 }
1311
1312 /* first 4 bytes consists status */
1313 if (memcpy_s(&status, sizeof(status), data, sizeof(uint32)) != BCME_OK) {
1314 WL_ERR(("Failed to copy status\n"));
1315 goto exit;
1316 }
1317
1318 status = dtoh32(status);
1319 /* If status is non zero */
1320 if (status != BCME_OK) {
1321 printf("cmd type %d failed, status: %04x\n", type, status);
1322 goto exit;
1323 }
1324
1325 if (b_resp->count > 0) {
1326 b_resp->count--;
1327 }
1328
1329 if (!b_resp->count) {
1330 status = BCME_IOV_LAST_CMD;
1331 }
1332 exit:
1333 return status;
1334 }
1335
wl_cfgnan_execute_ioctl(struct net_device * ndev,struct bcm_cfg80211 * cfg,bcm_iov_batch_buf_t * nan_buf,uint16 nan_buf_size,uint32 * status,uint8 * resp_buf,uint16 resp_buf_size)1336 static int wl_cfgnan_execute_ioctl(struct net_device *ndev,
1337 struct bcm_cfg80211 *cfg,
1338 bcm_iov_batch_buf_t *nan_buf,
1339 uint16 nan_buf_size, uint32 *status,
1340 uint8 *resp_buf, uint16 resp_buf_size)
1341 {
1342 int ret = BCME_OK;
1343 uint16 tlvs_len;
1344 int res = BCME_OK;
1345 bcm_iov_batch_buf_t *p_resp = NULL;
1346 char *iov = "nan";
1347 int max_resp_len = WLC_IOCTL_MAXLEN;
1348
1349 WL_DBG(("Enter:\n"));
1350 if (nan_buf->is_set) {
1351 ret = wldev_iovar_setbuf(ndev, "nan", nan_buf, nan_buf_size, resp_buf,
1352 resp_buf_size, NULL);
1353 p_resp = (bcm_iov_batch_buf_t *)(resp_buf + strlen(iov) + 1);
1354 } else {
1355 ret = wldev_iovar_getbuf(ndev, "nan", nan_buf, nan_buf_size, resp_buf,
1356 resp_buf_size, NULL);
1357 p_resp = (bcm_iov_batch_buf_t *)(resp_buf);
1358 }
1359 if (unlikely(ret)) {
1360 WL_ERR((" nan execute ioctl failed, error = %d \n", ret));
1361 goto fail;
1362 }
1363
1364 p_resp->is_set = nan_buf->is_set;
1365 tlvs_len = max_resp_len - OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
1366
1367 /* Extract the tlvs and print their resp in cb fn */
1368 res = bcm_unpack_xtlv_buf((void *)p_resp, (const uint8 *)&p_resp->cmds[0],
1369 tlvs_len, BCM_IOV_CMD_OPT_ALIGN32,
1370 wl_cfgnan_get_iovars_status);
1371 if (res == BCME_IOV_LAST_CMD) {
1372 res = BCME_OK;
1373 }
1374 fail:
1375 *status = res;
1376 WL_DBG((" nan ioctl ret %d status %d \n", ret, *status));
1377 return ret;
1378 }
1379
wl_cfgnan_if_addr_handler(void * p_buf,uint16 * nan_buf_size,struct ether_addr * if_addr)1380 static int wl_cfgnan_if_addr_handler(void *p_buf, uint16 *nan_buf_size,
1381 struct ether_addr *if_addr)
1382 {
1383 /* nan enable */
1384 s32 ret = BCME_OK;
1385 uint16 subcmd_len;
1386
1387 NAN_DBG_ENTER();
1388
1389 if (p_buf != NULL) {
1390 bcm_iov_batch_subcmd_t *sub_cmd = (bcm_iov_batch_subcmd_t *)(p_buf);
1391
1392 ret = wl_cfg_nan_check_cmd_len(*nan_buf_size, sizeof(*if_addr),
1393 &subcmd_len);
1394 if (unlikely(ret)) {
1395 WL_ERR(("nan_sub_cmd check failed\n"));
1396 goto fail;
1397 }
1398
1399 /* Fill the sub_command block */
1400 sub_cmd->id = htod16(WL_NAN_CMD_CFG_IF_ADDR);
1401 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*if_addr);
1402 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
1403 ret = memcpy_s(sub_cmd->data, sizeof(*if_addr), (uint8 *)if_addr,
1404 sizeof(*if_addr));
1405 if (ret != BCME_OK) {
1406 WL_ERR(("Failed to copy if addr\n"));
1407 goto fail;
1408 }
1409
1410 *nan_buf_size -= subcmd_len;
1411 } else {
1412 WL_ERR(("nan_iov_buf is NULL\n"));
1413 ret = BCME_ERROR;
1414 goto fail;
1415 }
1416
1417 fail:
1418 NAN_DBG_EXIT();
1419 return ret;
1420 }
1421
wl_cfgnan_get_ver(struct net_device * ndev,struct bcm_cfg80211 * cfg)1422 static int wl_cfgnan_get_ver(struct net_device *ndev, struct bcm_cfg80211 *cfg)
1423 {
1424 bcm_iov_batch_buf_t *nan_buf = NULL;
1425 s32 ret = BCME_OK;
1426 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
1427 wl_nan_ver_t *nan_ver = NULL;
1428 uint16 subcmd_len;
1429 uint32 status;
1430 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
1431 bcm_iov_batch_subcmd_t *sub_cmd_resp = NULL;
1432 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
1433
1434 NAN_DBG_ENTER();
1435 nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
1436 if (!nan_buf) {
1437 WL_ERR(("%s: memory allocation failed\n", __func__));
1438 ret = BCME_NOMEM;
1439 goto fail;
1440 }
1441
1442 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
1443 nan_buf->count = 0;
1444 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
1445 sub_cmd = (bcm_iov_batch_subcmd_t *)(uint8 *)(&nan_buf->cmds[0]);
1446
1447 ret = wl_cfg_nan_check_cmd_len(nan_buf_size, sizeof(*nan_ver), &subcmd_len);
1448 if (unlikely(ret)) {
1449 WL_ERR(("nan_sub_cmd check failed\n"));
1450 goto fail;
1451 }
1452
1453 nan_ver = (wl_nan_ver_t *)sub_cmd->data;
1454 sub_cmd->id = htod16(WL_NAN_CMD_GLB_NAN_VER);
1455 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*nan_ver);
1456 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
1457 nan_buf_size -= subcmd_len;
1458 nan_buf->count = 1;
1459
1460 nan_buf->is_set = false;
1461 bzero(resp_buf, sizeof(resp_buf));
1462 nan_buf_size = NAN_IOCTL_BUF_SIZE - nan_buf_size;
1463
1464 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
1465 (void *)resp_buf, NAN_IOCTL_BUF_SIZE);
1466 if (unlikely(ret) || unlikely(status)) {
1467 WL_ERR(("get nan ver failed ret %d status %d \n", ret, status));
1468 goto fail;
1469 }
1470
1471 sub_cmd_resp = &((bcm_iov_batch_buf_t *)(resp_buf))->cmds[0];
1472 nan_ver = ((wl_nan_ver_t *)&sub_cmd_resp->data[0]);
1473 if (!nan_ver) {
1474 ret = BCME_NOTFOUND;
1475 WL_ERR(("nan_ver not found: err = %d\n", ret));
1476 goto fail;
1477 }
1478 cfg->nancfg.version = *nan_ver;
1479 WL_INFORM_MEM(("Nan Version is %d\n", cfg->nancfg.version));
1480
1481 fail:
1482 if (nan_buf) {
1483 MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
1484 }
1485 NAN_DBG_EXIT();
1486 return ret;
1487 }
1488
wl_cfgnan_set_if_addr(struct bcm_cfg80211 * cfg)1489 static int wl_cfgnan_set_if_addr(struct bcm_cfg80211 *cfg)
1490 {
1491 s32 ret = BCME_OK;
1492 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
1493 uint32 status;
1494 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
1495 struct ether_addr if_addr;
1496 uint8 buf[NAN_IOCTL_BUF_SIZE];
1497 bcm_iov_batch_buf_t *nan_buf = (bcm_iov_batch_buf_t *)buf;
1498 bool rand_mac = cfg->nancfg.mac_rand;
1499
1500 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
1501 nan_buf->count = 0;
1502 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
1503 if (rand_mac) {
1504 RANDOM_BYTES(if_addr.octet, 0x6);
1505 /* restore mcast and local admin bits to 0 and 1 */
1506 ETHER_SET_UNICAST(if_addr.octet);
1507 ETHER_SET_LOCALADDR(if_addr.octet);
1508 } else {
1509 /* Use primary MAC with the locally administered bit for the
1510 * NAN NMI I/F
1511 */
1512 if (wl_get_vif_macaddr(cfg, WL_IF_TYPE_NAN_NMI, if_addr.octet) !=
1513 BCME_OK) {
1514 ret = -EINVAL;
1515 WL_ERR(("Failed to get mac addr for NMI\n"));
1516 goto fail;
1517 }
1518 }
1519 WL_INFORM_MEM(
1520 ("%s: NMI " MACDBG "\n", __FUNCTION__, MAC2STRDBG(if_addr.octet)));
1521 ret = wl_cfgnan_if_addr_handler(&nan_buf->cmds[0], &nan_buf_size, &if_addr);
1522 if (unlikely(ret)) {
1523 WL_ERR(("Nan if addr handler sub_cmd set failed\n"));
1524 goto fail;
1525 }
1526 nan_buf->count++;
1527 nan_buf->is_set = true;
1528 nan_buf_size = NAN_IOCTL_BUF_SIZE - nan_buf_size;
1529 memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
1530 ret = wl_cfgnan_execute_ioctl(bcmcfg_to_prmry_ndev(cfg), cfg, nan_buf,
1531 nan_buf_size, &status, (void *)resp_buf,
1532 NAN_IOCTL_BUF_SIZE);
1533 if (unlikely(ret) || unlikely(status)) {
1534 WL_ERR(("nan if addr handler failed ret %d status %d\n", ret, status));
1535 goto fail;
1536 }
1537 ret = memcpy_s(cfg->nan_nmi_mac, ETH_ALEN, if_addr.octet, ETH_ALEN);
1538 if (ret != BCME_OK) {
1539 WL_ERR(("Failed to copy nmi addr\n"));
1540 goto fail;
1541 }
1542 return ret;
1543 fail:
1544 if (!rand_mac) {
1545 wl_release_vif_macaddr(cfg, if_addr.octet, WL_IF_TYPE_NAN_NMI);
1546 }
1547
1548 return ret;
1549 }
1550
wl_cfgnan_init_handler(void * p_buf,uint16 * nan_buf_size,bool val)1551 static int wl_cfgnan_init_handler(void *p_buf, uint16 *nan_buf_size, bool val)
1552 {
1553 /* nan enable */
1554 s32 ret = BCME_OK;
1555 uint16 subcmd_len;
1556
1557 NAN_DBG_ENTER();
1558
1559 if (p_buf != NULL) {
1560 bcm_iov_batch_subcmd_t *sub_cmd = (bcm_iov_batch_subcmd_t *)(p_buf);
1561
1562 ret = wl_cfg_nan_check_cmd_len(*nan_buf_size, sizeof(val), &subcmd_len);
1563 if (unlikely(ret)) {
1564 WL_ERR(("nan_sub_cmd check failed\n"));
1565 goto fail;
1566 }
1567
1568 /* Fill the sub_command block */
1569 sub_cmd->id = htod16(WL_NAN_CMD_CFG_NAN_INIT);
1570 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(uint8);
1571 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
1572 ret = memcpy_s(sub_cmd->data, sizeof(uint8), (uint8 *)&val,
1573 sizeof(uint8));
1574 if (ret != BCME_OK) {
1575 WL_ERR(("Failed to copy init value\n"));
1576 goto fail;
1577 }
1578
1579 *nan_buf_size -= subcmd_len;
1580 } else {
1581 WL_ERR(("nan_iov_buf is NULL\n"));
1582 ret = BCME_ERROR;
1583 goto fail;
1584 }
1585
1586 fail:
1587 NAN_DBG_EXIT();
1588 return ret;
1589 }
1590
wl_cfgnan_enable_handler(wl_nan_iov_t * nan_iov_data,bool val)1591 static int wl_cfgnan_enable_handler(wl_nan_iov_t *nan_iov_data, bool val)
1592 {
1593 /* nan enable */
1594 s32 ret = BCME_OK;
1595 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
1596 uint16 subcmd_len;
1597
1598 NAN_DBG_ENTER();
1599
1600 sub_cmd = (bcm_iov_batch_subcmd_t *)(nan_iov_data->nan_iov_buf);
1601
1602 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len, sizeof(val),
1603 &subcmd_len);
1604 if (unlikely(ret)) {
1605 WL_ERR(("nan_sub_cmd check failed\n"));
1606 return ret;
1607 }
1608
1609 /* Fill the sub_command block */
1610 sub_cmd->id = htod16(WL_NAN_CMD_CFG_NAN_ENAB);
1611 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(uint8);
1612 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
1613 ret = memcpy_s(sub_cmd->data, sizeof(uint8), (uint8 *)&val, sizeof(uint8));
1614 if (ret != BCME_OK) {
1615 WL_ERR(("Failed to copy enab value\n"));
1616 return ret;
1617 }
1618
1619 nan_iov_data->nan_iov_len -= subcmd_len;
1620 nan_iov_data->nan_iov_buf += subcmd_len;
1621 NAN_DBG_EXIT();
1622 return ret;
1623 }
1624
wl_cfgnan_warmup_time_handler(nan_config_cmd_data_t * cmd_data,wl_nan_iov_t * nan_iov_data)1625 static int wl_cfgnan_warmup_time_handler(nan_config_cmd_data_t *cmd_data,
1626 wl_nan_iov_t *nan_iov_data)
1627 {
1628 /* wl nan warm_up_time */
1629 s32 ret = BCME_OK;
1630 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
1631 wl_nan_warmup_time_ticks_t *wup_ticks = NULL;
1632 uint16 subcmd_len;
1633 NAN_DBG_ENTER();
1634
1635 sub_cmd = (bcm_iov_batch_subcmd_t *)(nan_iov_data->nan_iov_buf);
1636 wup_ticks = (wl_nan_warmup_time_ticks_t *)sub_cmd->data;
1637
1638 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
1639 sizeof(*wup_ticks), &subcmd_len);
1640 if (unlikely(ret)) {
1641 WL_ERR(("nan_sub_cmd check failed\n"));
1642 return ret;
1643 }
1644 /* Fill the sub_command block */
1645 sub_cmd->id = htod16(WL_NAN_CMD_CFG_WARMUP_TIME);
1646 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*wup_ticks);
1647 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
1648 *wup_ticks = cmd_data->warmup_time;
1649
1650 nan_iov_data->nan_iov_len -= subcmd_len;
1651 nan_iov_data->nan_iov_buf += subcmd_len;
1652
1653 NAN_DBG_EXIT();
1654 return ret;
1655 }
1656
wl_cfgnan_set_election_metric(nan_config_cmd_data_t * cmd_data,wl_nan_iov_t * nan_iov_data,uint32 nan_attr_mask)1657 static int wl_cfgnan_set_election_metric(nan_config_cmd_data_t *cmd_data,
1658 wl_nan_iov_t *nan_iov_data,
1659 uint32 nan_attr_mask)
1660 {
1661 s32 ret = BCME_OK;
1662 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
1663 wl_nan_election_metric_config_t *metrics = NULL;
1664 uint16 subcmd_len;
1665 NAN_DBG_ENTER();
1666
1667 sub_cmd = (bcm_iov_batch_subcmd_t *)(nan_iov_data->nan_iov_buf);
1668 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len, sizeof(*metrics),
1669 &subcmd_len);
1670 if (unlikely(ret)) {
1671 WL_ERR(("nan_sub_cmd check failed\n"));
1672 goto fail;
1673 }
1674
1675 metrics = (wl_nan_election_metric_config_t *)sub_cmd->data;
1676
1677 if (nan_attr_mask & NAN_ATTR_RAND_FACTOR_CONFIG) {
1678 metrics->random_factor = (uint8)cmd_data->metrics.random_factor;
1679 }
1680
1681 if ((!cmd_data->metrics.master_pref) ||
1682 (cmd_data->metrics.master_pref > NAN_MAXIMUM_MASTER_PREFERENCE)) {
1683 WL_TRACE(("Master Pref is 0 or greater than 254, hence sending random "
1684 "value\n"));
1685 /* Master pref for mobile devices can be from 1 - 127 as per Spec
1686 * AppendixC */
1687 metrics->master_pref =
1688 (RANDOM32() % (NAN_MAXIMUM_MASTER_PREFERENCE / 0x2)) + 1;
1689 } else {
1690 metrics->master_pref = (uint8)cmd_data->metrics.master_pref;
1691 }
1692 sub_cmd->id = htod16(WL_NAN_CMD_ELECTION_METRICS_CONFIG);
1693 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*metrics);
1694 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
1695
1696 nan_iov_data->nan_iov_len -= subcmd_len;
1697 nan_iov_data->nan_iov_buf += subcmd_len;
1698
1699 fail:
1700 NAN_DBG_EXIT();
1701 return ret;
1702 }
1703
wl_cfgnan_set_rssi_proximity(nan_config_cmd_data_t * cmd_data,wl_nan_iov_t * nan_iov_data,uint32 nan_attr_mask)1704 static int wl_cfgnan_set_rssi_proximity(nan_config_cmd_data_t *cmd_data,
1705 wl_nan_iov_t *nan_iov_data,
1706 uint32 nan_attr_mask)
1707 {
1708 s32 ret = BCME_OK;
1709 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
1710 wl_nan_rssi_notif_thld_t *rssi_notif_thld = NULL;
1711 uint16 subcmd_len;
1712
1713 NAN_DBG_ENTER();
1714 sub_cmd = (bcm_iov_batch_subcmd_t *)(nan_iov_data->nan_iov_buf);
1715
1716 rssi_notif_thld = (wl_nan_rssi_notif_thld_t *)sub_cmd->data;
1717
1718 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
1719 sizeof(*rssi_notif_thld), &subcmd_len);
1720 if (unlikely(ret)) {
1721 WL_ERR(("nan_sub_cmd check failed\n"));
1722 return ret;
1723 }
1724 if (nan_attr_mask & NAN_ATTR_RSSI_PROXIMITY_2G_CONFIG) {
1725 rssi_notif_thld->bcn_rssi_2g =
1726 cmd_data->rssi_attr.rssi_proximity_2dot4g_val;
1727 } else {
1728 /* Keeping RSSI threshold value to be -70dBm */
1729 rssi_notif_thld->bcn_rssi_2g = NAN_DEF_RSSI_NOTIF_THRESH;
1730 }
1731
1732 if (nan_attr_mask & NAN_ATTR_RSSI_PROXIMITY_5G_CONFIG) {
1733 rssi_notif_thld->bcn_rssi_5g =
1734 cmd_data->rssi_attr.rssi_proximity_5g_val;
1735 } else {
1736 /* Keeping RSSI threshold value to be -70dBm */
1737 rssi_notif_thld->bcn_rssi_5g = NAN_DEF_RSSI_NOTIF_THRESH;
1738 }
1739
1740 sub_cmd->id = htod16(WL_NAN_CMD_SYNC_BCN_RSSI_NOTIF_THRESHOLD);
1741 sub_cmd->len =
1742 htod16(sizeof(sub_cmd->u.options) + sizeof(*rssi_notif_thld));
1743 sub_cmd->u.options = htod32(BCM_XTLV_OPTION_ALIGN32);
1744
1745 nan_iov_data->nan_iov_len -= subcmd_len;
1746 nan_iov_data->nan_iov_buf += subcmd_len;
1747
1748 NAN_DBG_EXIT();
1749 return ret;
1750 }
1751
wl_cfgnan_set_rssi_mid_or_close(nan_config_cmd_data_t * cmd_data,wl_nan_iov_t * nan_iov_data,uint32 nan_attr_mask)1752 static int wl_cfgnan_set_rssi_mid_or_close(nan_config_cmd_data_t *cmd_data,
1753 wl_nan_iov_t *nan_iov_data,
1754 uint32 nan_attr_mask)
1755 {
1756 s32 ret = BCME_OK;
1757 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
1758 wl_nan_rssi_thld_t *rssi_thld = NULL;
1759 uint16 subcmd_len;
1760
1761 NAN_DBG_ENTER();
1762 sub_cmd = (bcm_iov_batch_subcmd_t *)(nan_iov_data->nan_iov_buf);
1763 rssi_thld = (wl_nan_rssi_thld_t *)sub_cmd->data;
1764
1765 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
1766 sizeof(*rssi_thld), &subcmd_len);
1767 if (unlikely(ret)) {
1768 WL_ERR(("nan_sub_cmd check failed\n"));
1769 return ret;
1770 }
1771
1772 /*
1773 * Keeping RSSI mid value -75dBm for both 2G and 5G
1774 * Keeping RSSI close value -60dBm for both 2G and 5G
1775 */
1776 if (nan_attr_mask & NAN_ATTR_RSSI_MIDDLE_2G_CONFIG) {
1777 rssi_thld->rssi_mid_2g = cmd_data->rssi_attr.rssi_middle_2dot4g_val;
1778 } else {
1779 rssi_thld->rssi_mid_2g = NAN_DEF_RSSI_MID;
1780 }
1781
1782 if (nan_attr_mask & NAN_ATTR_RSSI_MIDDLE_5G_CONFIG) {
1783 rssi_thld->rssi_mid_5g = cmd_data->rssi_attr.rssi_middle_5g_val;
1784 } else {
1785 rssi_thld->rssi_mid_5g = NAN_DEF_RSSI_MID;
1786 }
1787
1788 if (nan_attr_mask & NAN_ATTR_RSSI_CLOSE_CONFIG) {
1789 rssi_thld->rssi_close_2g = cmd_data->rssi_attr.rssi_close_2dot4g_val;
1790 } else {
1791 rssi_thld->rssi_close_2g = NAN_DEF_RSSI_CLOSE;
1792 }
1793
1794 if (nan_attr_mask & NAN_ATTR_RSSI_CLOSE_5G_CONFIG) {
1795 rssi_thld->rssi_close_5g = cmd_data->rssi_attr.rssi_close_5g_val;
1796 } else {
1797 rssi_thld->rssi_close_5g = NAN_DEF_RSSI_CLOSE;
1798 }
1799
1800 sub_cmd->id = htod16(WL_NAN_CMD_ELECTION_RSSI_THRESHOLD);
1801 sub_cmd->len = htod16(sizeof(sub_cmd->u.options) + sizeof(*rssi_thld));
1802 sub_cmd->u.options = htod32(BCM_XTLV_OPTION_ALIGN32);
1803
1804 nan_iov_data->nan_iov_len -= subcmd_len;
1805 nan_iov_data->nan_iov_buf += subcmd_len;
1806
1807 NAN_DBG_EXIT();
1808 return ret;
1809 }
1810
check_for_valid_5gchan(struct net_device * ndev,uint8 chan)1811 static int check_for_valid_5gchan(struct net_device *ndev, uint8 chan)
1812 {
1813 s32 ret = BCME_OK;
1814 uint bitmap;
1815 u8 ioctl_buf[WLC_IOCTL_SMLEN];
1816 uint32 chanspec_arg;
1817 NAN_DBG_ENTER();
1818
1819 chanspec_arg = CH20MHZ_CHSPEC(chan);
1820 chanspec_arg = wl_chspec_host_to_driver(chanspec_arg);
1821 memset_s(ioctl_buf, WLC_IOCTL_SMLEN, 0, WLC_IOCTL_SMLEN);
1822 ret = wldev_iovar_getbuf(ndev, "per_chan_info", (void *)&chanspec_arg,
1823 sizeof(chanspec_arg), ioctl_buf, WLC_IOCTL_SMLEN,
1824 NULL);
1825 if (ret != BCME_OK) {
1826 WL_ERR(("Chaninfo for channel = %d, error %d\n", chan, ret));
1827 goto exit;
1828 }
1829
1830 bitmap = dtoh32(*(uint *)ioctl_buf);
1831 if (!(bitmap & WL_CHAN_VALID_HW)) {
1832 WL_ERR(("Invalid channel\n"));
1833 ret = BCME_BADCHAN;
1834 goto exit;
1835 }
1836
1837 if (!(bitmap & WL_CHAN_VALID_SW)) {
1838 WL_ERR(("Not supported in current locale\n"));
1839 ret = BCME_BADCHAN;
1840 goto exit;
1841 }
1842 exit:
1843 NAN_DBG_EXIT();
1844 return ret;
1845 }
1846
wl_cfgnan_set_nan_soc_chans(struct net_device * ndev,nan_config_cmd_data_t * cmd_data,wl_nan_iov_t * nan_iov_data,uint32 nan_attr_mask)1847 static int wl_cfgnan_set_nan_soc_chans(struct net_device *ndev,
1848 nan_config_cmd_data_t *cmd_data,
1849 wl_nan_iov_t *nan_iov_data,
1850 uint32 nan_attr_mask)
1851 {
1852 s32 ret = BCME_OK;
1853 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
1854 wl_nan_social_channels_t *soc_chans = NULL;
1855 uint16 subcmd_len;
1856
1857 NAN_DBG_ENTER();
1858
1859 sub_cmd = (bcm_iov_batch_subcmd_t *)(nan_iov_data->nan_iov_buf);
1860 soc_chans = (wl_nan_social_channels_t *)sub_cmd->data;
1861
1862 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
1863 sizeof(*soc_chans), &subcmd_len);
1864 if (unlikely(ret)) {
1865 WL_ERR(("nan_sub_cmd check failed\n"));
1866 return ret;
1867 }
1868
1869 sub_cmd->id = htod16(WL_NAN_CMD_SYNC_SOCIAL_CHAN);
1870 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*soc_chans);
1871 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
1872 if (nan_attr_mask & NAN_ATTR_2G_CHAN_CONFIG) {
1873 soc_chans->soc_chan_2g = cmd_data->chanspec[1];
1874 } else {
1875 soc_chans->soc_chan_2g = NAN_DEF_SOCIAL_CHAN_2G;
1876 }
1877
1878 if (cmd_data->support_5g) {
1879 if (nan_attr_mask & NAN_ATTR_5G_CHAN_CONFIG) {
1880 soc_chans->soc_chan_5g = cmd_data->chanspec[0x2];
1881 } else {
1882 soc_chans->soc_chan_5g = NAN_DEF_SOCIAL_CHAN_5G;
1883 }
1884 ret = check_for_valid_5gchan(ndev, soc_chans->soc_chan_5g);
1885 if (ret != BCME_OK) {
1886 ret = check_for_valid_5gchan(ndev, NAN_DEF_SEC_SOCIAL_CHAN_5G);
1887 if (ret == BCME_OK) {
1888 soc_chans->soc_chan_5g = NAN_DEF_SEC_SOCIAL_CHAN_5G;
1889 } else {
1890 soc_chans->soc_chan_5g = 0;
1891 ret = BCME_OK;
1892 WL_ERR(("Current locale doesn't support 5G op"
1893 "continuing with 2G only operation\n"));
1894 }
1895 }
1896 } else {
1897 WL_DBG(("5G support is disabled\n"));
1898 }
1899 nan_iov_data->nan_iov_len -= subcmd_len;
1900 nan_iov_data->nan_iov_buf += subcmd_len;
1901
1902 NAN_DBG_EXIT();
1903 return ret;
1904 }
1905
wl_cfgnan_set_nan_scan_params(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_config_cmd_data_t * cmd_data,uint8 band_index,uint32 nan_attr_mask)1906 static int wl_cfgnan_set_nan_scan_params(struct net_device *ndev,
1907 struct bcm_cfg80211 *cfg,
1908 nan_config_cmd_data_t *cmd_data,
1909 uint8 band_index, uint32 nan_attr_mask)
1910 {
1911 bcm_iov_batch_buf_t *nan_buf = NULL;
1912 s32 ret = BCME_OK;
1913 uint16 nan_iov_start, nan_iov_end;
1914 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
1915 uint16 subcmd_len;
1916 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
1917 wl_nan_iov_t *nan_iov_data = NULL;
1918 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
1919 wl_nan_scan_params_t *scan_params = NULL;
1920 uint32 status;
1921
1922 NAN_DBG_ENTER();
1923
1924 nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
1925 if (!nan_buf) {
1926 WL_ERR(("%s: memory allocation failed\n", __func__));
1927 ret = BCME_NOMEM;
1928 goto fail;
1929 }
1930
1931 nan_iov_data = MALLOCZ(cfg->osh, sizeof(*nan_iov_data));
1932 if (!nan_iov_data) {
1933 WL_ERR(("%s: memory allocation failed\n", __func__));
1934 ret = BCME_NOMEM;
1935 goto fail;
1936 }
1937
1938 nan_iov_data->nan_iov_len = nan_iov_start = NAN_IOCTL_BUF_SIZE;
1939 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
1940 nan_buf->count = 0;
1941 nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
1942 nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
1943 sub_cmd = (bcm_iov_batch_subcmd_t *)(nan_iov_data->nan_iov_buf);
1944
1945 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
1946 sizeof(*scan_params), &subcmd_len);
1947 if (unlikely(ret)) {
1948 WL_ERR(("nan_sub_cmd check failed\n"));
1949 goto fail;
1950 }
1951 scan_params = (wl_nan_scan_params_t *)sub_cmd->data;
1952
1953 sub_cmd->id = htod16(WL_NAN_CMD_CFG_SCAN_PARAMS);
1954 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*scan_params);
1955 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
1956
1957 if (!band_index) {
1958 /* Fw default: Dwell time for 2G is 210 */
1959 if ((nan_attr_mask & NAN_ATTR_2G_DWELL_TIME_CONFIG) &&
1960 cmd_data->dwell_time[0]) {
1961 scan_params->dwell_time =
1962 cmd_data->dwell_time[0] + NAN_SCAN_DWELL_TIME_DELTA_MS;
1963 }
1964 /* Fw default: Scan period for 2G is 10 */
1965 if (nan_attr_mask & NAN_ATTR_2G_SCAN_PERIOD_CONFIG) {
1966 scan_params->scan_period = cmd_data->scan_period[0];
1967 }
1968 } else {
1969 if ((nan_attr_mask & NAN_ATTR_5G_DWELL_TIME_CONFIG) &&
1970 cmd_data->dwell_time[1]) {
1971 scan_params->dwell_time =
1972 cmd_data->dwell_time[1] + NAN_SCAN_DWELL_TIME_DELTA_MS;
1973 }
1974 if (nan_attr_mask & NAN_ATTR_5G_SCAN_PERIOD_CONFIG) {
1975 scan_params->scan_period = cmd_data->scan_period[1];
1976 }
1977 }
1978 scan_params->band_index = band_index;
1979 nan_buf->is_set = true;
1980 nan_buf->count++;
1981
1982 /* Reduce the iov_len size by subcmd_len */
1983 nan_iov_data->nan_iov_len -= subcmd_len;
1984 nan_iov_end = nan_iov_data->nan_iov_len;
1985 nan_buf_size = (nan_iov_start - nan_iov_end);
1986
1987 memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
1988 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
1989 (void *)resp_buf, NAN_IOCTL_BUF_SIZE);
1990 if (unlikely(ret) || unlikely(status)) {
1991 WL_ERR(("set nan scan params failed ret %d status %d \n", ret, status));
1992 goto fail;
1993 }
1994 WL_DBG(("set nan scan params successfull\n"));
1995 fail:
1996 if (nan_buf) {
1997 MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
1998 }
1999 if (nan_iov_data) {
2000 MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
2001 }
2002
2003 NAN_DBG_EXIT();
2004 return ret;
2005 }
2006
wl_cfgnan_set_cluster_id(nan_config_cmd_data_t * cmd_data,wl_nan_iov_t * nan_iov_data)2007 static int wl_cfgnan_set_cluster_id(nan_config_cmd_data_t *cmd_data,
2008 wl_nan_iov_t *nan_iov_data)
2009 {
2010 s32 ret = BCME_OK;
2011 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
2012 uint16 subcmd_len;
2013
2014 NAN_DBG_ENTER();
2015
2016 sub_cmd = (bcm_iov_batch_subcmd_t *)(nan_iov_data->nan_iov_buf);
2017
2018 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
2019 (sizeof(cmd_data->clus_id) - sizeof(uint8)),
2020 &subcmd_len);
2021 if (unlikely(ret)) {
2022 WL_ERR(("nan_sub_cmd check failed\n"));
2023 return ret;
2024 }
2025
2026 cmd_data->clus_id.octet[0] = 0x50;
2027 cmd_data->clus_id.octet[1] = 0x6F;
2028 cmd_data->clus_id.octet[0x2] = 0x9A;
2029 cmd_data->clus_id.octet[0x3] = 0x01;
2030 WL_TRACE(
2031 ("cluster_id = " MACDBG "\n", MAC2STRDBG(cmd_data->clus_id.octet)));
2032
2033 sub_cmd->id = htod16(WL_NAN_CMD_CFG_CID);
2034 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(cmd_data->clus_id);
2035 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
2036 ret = memcpy_s(sub_cmd->data, sizeof(cmd_data->clus_id),
2037 (uint8 *)&cmd_data->clus_id, sizeof(cmd_data->clus_id));
2038 if (ret != BCME_OK) {
2039 WL_ERR(("Failed to copy clus id\n"));
2040 return ret;
2041 }
2042
2043 nan_iov_data->nan_iov_len -= subcmd_len;
2044 nan_iov_data->nan_iov_buf += subcmd_len;
2045
2046 NAN_DBG_EXIT();
2047 return ret;
2048 }
2049
wl_cfgnan_set_hop_count_limit(nan_config_cmd_data_t * cmd_data,wl_nan_iov_t * nan_iov_data)2050 static int wl_cfgnan_set_hop_count_limit(nan_config_cmd_data_t *cmd_data,
2051 wl_nan_iov_t *nan_iov_data)
2052 {
2053 s32 ret = BCME_OK;
2054 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
2055 wl_nan_hop_count_t *hop_limit = NULL;
2056 uint16 subcmd_len;
2057
2058 NAN_DBG_ENTER();
2059
2060 sub_cmd = (bcm_iov_batch_subcmd_t *)(nan_iov_data->nan_iov_buf);
2061 hop_limit = (wl_nan_hop_count_t *)sub_cmd->data;
2062
2063 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
2064 sizeof(*hop_limit), &subcmd_len);
2065 if (unlikely(ret)) {
2066 WL_ERR(("nan_sub_cmd check failed\n"));
2067 return ret;
2068 }
2069
2070 *hop_limit = cmd_data->hop_count_limit;
2071 sub_cmd->id = htod16(WL_NAN_CMD_CFG_HOP_LIMIT);
2072 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*hop_limit);
2073 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
2074
2075 nan_iov_data->nan_iov_len -= subcmd_len;
2076 nan_iov_data->nan_iov_buf += subcmd_len;
2077
2078 NAN_DBG_EXIT();
2079 return ret;
2080 }
2081
wl_cfgnan_set_sid_beacon_val(nan_config_cmd_data_t * cmd_data,wl_nan_iov_t * nan_iov_data,uint32 nan_attr_mask)2082 static int wl_cfgnan_set_sid_beacon_val(nan_config_cmd_data_t *cmd_data,
2083 wl_nan_iov_t *nan_iov_data,
2084 uint32 nan_attr_mask)
2085 {
2086 s32 ret = BCME_OK;
2087 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
2088 wl_nan_sid_beacon_control_t *sid_beacon = NULL;
2089 uint16 subcmd_len;
2090
2091 NAN_DBG_ENTER();
2092
2093 sub_cmd = (bcm_iov_batch_subcmd_t *)(nan_iov_data->nan_iov_buf);
2094
2095 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
2096 sizeof(*sid_beacon), &subcmd_len);
2097 if (unlikely(ret)) {
2098 WL_ERR(("nan_sub_cmd check failed\n"));
2099 return ret;
2100 }
2101
2102 sid_beacon = (wl_nan_sid_beacon_control_t *)sub_cmd->data;
2103 sid_beacon->sid_enable = cmd_data->sid_beacon.sid_enable;
2104 /* Need to have separate flag for sub beacons
2105 * sid_beacon->sub_sid_enable = cmd_data->sid_beacon.sub_sid_enable;
2106 */
2107 if (nan_attr_mask & NAN_ATTR_SID_BEACON_CONFIG) {
2108 /* Limit for number of publish SIDs to be included in Beacons */
2109 sid_beacon->sid_count = cmd_data->sid_beacon.sid_count;
2110 }
2111 if (nan_attr_mask & NAN_ATTR_SUB_SID_BEACON_CONFIG) {
2112 /* Limit for number of subscribe SIDs to be included in Beacons */
2113 sid_beacon->sub_sid_count = cmd_data->sid_beacon.sub_sid_count;
2114 }
2115 sub_cmd->id = htod16(WL_NAN_CMD_CFG_SID_BEACON);
2116 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*sid_beacon);
2117 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
2118
2119 nan_iov_data->nan_iov_len -= subcmd_len;
2120 nan_iov_data->nan_iov_buf += subcmd_len;
2121 NAN_DBG_EXIT();
2122 return ret;
2123 }
2124
wl_cfgnan_set_nan_oui(nan_config_cmd_data_t * cmd_data,wl_nan_iov_t * nan_iov_data)2125 static int wl_cfgnan_set_nan_oui(nan_config_cmd_data_t *cmd_data,
2126 wl_nan_iov_t *nan_iov_data)
2127 {
2128 s32 ret = BCME_OK;
2129 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
2130 uint16 subcmd_len;
2131
2132 NAN_DBG_ENTER();
2133
2134 sub_cmd = (bcm_iov_batch_subcmd_t *)(nan_iov_data->nan_iov_buf);
2135
2136 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
2137 sizeof(cmd_data->nan_oui), &subcmd_len);
2138 if (unlikely(ret)) {
2139 WL_ERR(("nan_sub_cmd check failed\n"));
2140 return ret;
2141 }
2142
2143 sub_cmd->id = htod16(WL_NAN_CMD_CFG_OUI);
2144 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(cmd_data->nan_oui);
2145 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
2146 ret = memcpy_s(sub_cmd->data, sizeof(cmd_data->nan_oui),
2147 (uint32 *)&cmd_data->nan_oui, sizeof(cmd_data->nan_oui));
2148 if (ret != BCME_OK) {
2149 WL_ERR(("Failed to copy nan oui\n"));
2150 return ret;
2151 }
2152
2153 nan_iov_data->nan_iov_len -= subcmd_len;
2154 nan_iov_data->nan_iov_buf += subcmd_len;
2155 NAN_DBG_EXIT();
2156 return ret;
2157 }
2158
wl_cfgnan_set_awake_dws(struct net_device * ndev,nan_config_cmd_data_t * cmd_data,wl_nan_iov_t * nan_iov_data,struct bcm_cfg80211 * cfg,uint32 nan_attr_mask)2159 static int wl_cfgnan_set_awake_dws(struct net_device *ndev,
2160 nan_config_cmd_data_t *cmd_data,
2161 wl_nan_iov_t *nan_iov_data,
2162 struct bcm_cfg80211 *cfg,
2163 uint32 nan_attr_mask)
2164 {
2165 s32 ret = BCME_OK;
2166 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
2167 wl_nan_awake_dws_t *awake_dws = NULL;
2168 uint16 subcmd_len;
2169 NAN_DBG_ENTER();
2170
2171 sub_cmd = (bcm_iov_batch_subcmd_t *)(nan_iov_data->nan_iov_buf);
2172 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
2173 sizeof(*awake_dws), &subcmd_len);
2174 if (unlikely(ret)) {
2175 WL_ERR(("nan_sub_cmd check failed\n"));
2176 return ret;
2177 }
2178
2179 awake_dws = (wl_nan_awake_dws_t *)sub_cmd->data;
2180
2181 if (nan_attr_mask & NAN_ATTR_2G_DW_CONFIG) {
2182 awake_dws->dw_interval_2g = cmd_data->awake_dws.dw_interval_2g;
2183 if (!awake_dws->dw_interval_2g) {
2184 /* Set 2G awake dw value to fw default value 1 */
2185 awake_dws->dw_interval_2g = NAN_SYNC_DEF_AWAKE_DW;
2186 }
2187 } else {
2188 /* Set 2G awake dw value to fw default value 1 */
2189 awake_dws->dw_interval_2g = NAN_SYNC_DEF_AWAKE_DW;
2190 }
2191
2192 if (cfg->support_5g) {
2193 if (nan_attr_mask & NAN_ATTR_5G_DW_CONFIG) {
2194 awake_dws->dw_interval_5g = cmd_data->awake_dws.dw_interval_5g;
2195 if (!awake_dws->dw_interval_5g) {
2196 /* disable 5g beacon ctrls */
2197 ret = wl_cfgnan_config_control_flag(
2198 ndev, cfg, WL_NAN_CTRL_DISC_BEACON_TX_5G,
2199 &(cmd_data->status), 0);
2200 if (unlikely(ret) || unlikely(cmd_data->status)) {
2201 WL_ERR((" nan control set config handler,"
2202 " ret = %d status = %d \n",
2203 ret, cmd_data->status));
2204 goto fail;
2205 }
2206 ret = wl_cfgnan_config_control_flag(
2207 ndev, cfg, WL_NAN_CTRL_SYNC_BEACON_TX_5G,
2208 &(cmd_data->status), 0);
2209 if (unlikely(ret) || unlikely(cmd_data->status)) {
2210 WL_ERR((" nan control set config handler,"
2211 " ret = %d status = %d \n",
2212 ret, cmd_data->status));
2213 goto fail;
2214 }
2215 }
2216 } else {
2217 /* Set 5G awake dw value to fw default value 1 */
2218 awake_dws->dw_interval_5g = NAN_SYNC_DEF_AWAKE_DW;
2219 ret = wl_cfgnan_config_control_flag(
2220 ndev, cfg,
2221 WL_NAN_CTRL_DISC_BEACON_TX_5G | WL_NAN_CTRL_SYNC_BEACON_TX_5G,
2222 &(cmd_data->status), TRUE);
2223 if (unlikely(ret) || unlikely(cmd_data->status)) {
2224 WL_ERR((" nan control set config handler, ret = %d"
2225 " status = %d \n",
2226 ret, cmd_data->status));
2227 goto fail;
2228 }
2229 }
2230 }
2231
2232 sub_cmd->id = htod16(WL_NAN_CMD_SYNC_AWAKE_DWS);
2233 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*awake_dws);
2234 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
2235
2236 nan_iov_data->nan_iov_len -= subcmd_len;
2237 nan_iov_data->nan_iov_buf += subcmd_len;
2238
2239 fail:
2240 NAN_DBG_EXIT();
2241 return ret;
2242 }
2243
wl_cfgnan_start_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_config_cmd_data_t * cmd_data,uint32 nan_attr_mask)2244 int wl_cfgnan_start_handler(struct net_device *ndev, struct bcm_cfg80211 *cfg,
2245 nan_config_cmd_data_t *cmd_data,
2246 uint32 nan_attr_mask)
2247 {
2248 s32 ret = BCME_OK;
2249 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
2250 bcm_iov_batch_buf_t *nan_buf = NULL;
2251 wl_nan_iov_t *nan_iov_data = NULL;
2252 dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
2253 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
2254 int i;
2255 s32 timeout = 0;
2256 nan_hal_capabilities_t capabilities;
2257
2258 NAN_DBG_ENTER();
2259
2260 /* Protect discovery creation. Ensure proper mutex precedence.
2261 * If if_sync & nan_mutex comes together in same context, nan_mutex
2262 * should follow if_sync.
2263 */
2264 mutex_lock(&cfg->if_sync);
2265 NAN_MUTEX_LOCK();
2266
2267 if (!dhdp->up) {
2268 WL_ERR(("bus is already down, hence blocking nan start\n"));
2269 ret = BCME_ERROR;
2270 NAN_MUTEX_UNLOCK();
2271 mutex_unlock(&cfg->if_sync);
2272 goto fail;
2273 }
2274
2275 #ifdef WL_IFACE_MGMT
2276 if ((ret = wl_cfg80211_handle_if_role_conflict(cfg, WL_IF_TYPE_NAN_NMI)) !=
2277 BCME_OK) {
2278 WL_ERR(("Conflicting iface is present, cant support nan\n"));
2279 NAN_MUTEX_UNLOCK();
2280 mutex_unlock(&cfg->if_sync);
2281 goto fail;
2282 }
2283 #endif /* WL_IFACE_MGMT */
2284
2285 WL_INFORM_MEM(("Initializing NAN\n"));
2286 ret = wl_cfgnan_init(cfg);
2287 if (ret != BCME_OK) {
2288 WL_ERR(("failed to initialize NAN[%d]\n", ret));
2289 NAN_MUTEX_UNLOCK();
2290 mutex_unlock(&cfg->if_sync);
2291 goto fail;
2292 }
2293
2294 ret = wl_cfgnan_get_ver(ndev, cfg);
2295 if (ret != BCME_OK) {
2296 WL_ERR(("failed to Nan IOV version[%d]\n", ret));
2297 NAN_MUTEX_UNLOCK();
2298 mutex_unlock(&cfg->if_sync);
2299 goto fail;
2300 }
2301
2302 /* set nmi addr */
2303 ret = wl_cfgnan_set_if_addr(cfg);
2304 if (ret != BCME_OK) {
2305 WL_ERR(("Failed to set nmi address \n"));
2306 NAN_MUTEX_UNLOCK();
2307 mutex_unlock(&cfg->if_sync);
2308 goto fail;
2309 }
2310 cfg->nancfg.nan_event_recvd = false;
2311 NAN_MUTEX_UNLOCK();
2312 mutex_unlock(&cfg->if_sync);
2313
2314 for (i = 0; i < NAN_MAX_NDI; i++) {
2315 /* Create NDI using the information provided by user space */
2316 if (cfg->nancfg.ndi[i].in_use && !cfg->nancfg.ndi[i].created) {
2317 ret = wl_cfgnan_data_path_iface_create_delete_handler(
2318 ndev, cfg, cfg->nancfg.ndi[i].ifname,
2319 NAN_WIFI_SUBCMD_DATA_PATH_IFACE_CREATE, dhdp->up);
2320 if (ret) {
2321 WL_ERR(("failed to create ndp interface [%d]\n", ret));
2322 goto fail;
2323 }
2324 cfg->nancfg.ndi[i].created = true;
2325 }
2326 }
2327
2328 nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
2329 if (!nan_buf) {
2330 WL_ERR(("%s: memory allocation failed\n", __func__));
2331 ret = BCME_NOMEM;
2332 goto fail;
2333 }
2334
2335 nan_iov_data = MALLOCZ(cfg->osh, sizeof(*nan_iov_data));
2336 if (!nan_iov_data) {
2337 WL_ERR(("%s: memory allocation failed\n", __func__));
2338 ret = BCME_NOMEM;
2339 goto fail;
2340 }
2341
2342 nan_iov_data->nan_iov_len = NAN_IOCTL_BUF_SIZE;
2343 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
2344 nan_buf->count = 0;
2345 nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
2346 nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
2347
2348 if (nan_attr_mask & NAN_ATTR_SYNC_DISC_2G_BEACON_CONFIG) {
2349 /* config sync/discovery beacons on 2G band */
2350 /* 2g is mandatory */
2351 if (!cmd_data->beacon_2g_val) {
2352 WL_ERR(("Invalid NAN config...2G is mandatory\n"));
2353 ret = BCME_BADARG;
2354 }
2355 ret = wl_cfgnan_config_control_flag(ndev, cfg,
2356 WL_NAN_CTRL_DISC_BEACON_TX_2G |
2357 WL_NAN_CTRL_SYNC_BEACON_TX_2G,
2358 &(cmd_data->status), TRUE);
2359 if (unlikely(ret) || unlikely(cmd_data->status)) {
2360 WL_ERR((" nan control set config handler, ret = %d status = %d \n",
2361 ret, cmd_data->status));
2362 goto fail;
2363 }
2364 }
2365 if (nan_attr_mask & NAN_ATTR_SYNC_DISC_5G_BEACON_CONFIG) {
2366 /* config sync/discovery beacons on 5G band */
2367 ret = wl_cfgnan_config_control_flag(
2368 ndev, cfg,
2369 WL_NAN_CTRL_DISC_BEACON_TX_5G | WL_NAN_CTRL_SYNC_BEACON_TX_5G,
2370 &(cmd_data->status), cmd_data->beacon_5g_val);
2371 if (unlikely(ret) || unlikely(cmd_data->status)) {
2372 WL_ERR((" nan control set config handler, ret = %d status = %d \n",
2373 ret, cmd_data->status));
2374 goto fail;
2375 }
2376 }
2377 /* Setting warm up time */
2378 cmd_data->warmup_time = 1;
2379 if (cmd_data->warmup_time) {
2380 ret = wl_cfgnan_warmup_time_handler(cmd_data, nan_iov_data);
2381 if (unlikely(ret)) {
2382 WL_ERR(("warm up time handler sub_cmd set failed\n"));
2383 goto fail;
2384 }
2385 nan_buf->count++;
2386 }
2387 /* setting master preference and random factor */
2388 ret = wl_cfgnan_set_election_metric(cmd_data, nan_iov_data, nan_attr_mask);
2389 if (unlikely(ret)) {
2390 WL_ERR(("election_metric sub_cmd set failed\n"));
2391 goto fail;
2392 } else {
2393 nan_buf->count++;
2394 }
2395
2396 /* setting nan social channels */
2397 ret = wl_cfgnan_set_nan_soc_chans(ndev, cmd_data, nan_iov_data,
2398 nan_attr_mask);
2399 if (unlikely(ret)) {
2400 WL_ERR(("nan social channels set failed\n"));
2401 goto fail;
2402 } else {
2403 /* Storing 5g capability which is reqd for avail chan config. */
2404 cfg->support_5g = cmd_data->support_5g;
2405 nan_buf->count++;
2406 }
2407
2408 if ((cmd_data->support_2g) &&
2409 ((cmd_data->dwell_time[0]) || (cmd_data->scan_period[0]))) {
2410 /* setting scan params */
2411 ret = wl_cfgnan_set_nan_scan_params(ndev, cfg, cmd_data, 0,
2412 nan_attr_mask);
2413 if (unlikely(ret)) {
2414 WL_ERR(("scan params set failed for 2g\n"));
2415 goto fail;
2416 }
2417 }
2418
2419 if ((cmd_data->support_5g) &&
2420 ((cmd_data->dwell_time[1]) || (cmd_data->scan_period[1]))) {
2421 /* setting scan params */
2422 ret = wl_cfgnan_set_nan_scan_params(
2423 ndev, cfg, cmd_data, cmd_data->support_5g, nan_attr_mask);
2424 if (unlikely(ret)) {
2425 WL_ERR(("scan params set failed for 5g\n"));
2426 goto fail;
2427 }
2428 }
2429
2430 /*
2431 * A cluster_low value matching cluster_high indicates a request
2432 * to join a cluster with that value.
2433 * If the requested cluster is not found the
2434 * device will start its own cluster
2435 */
2436 /* For Debug purpose, using clust id compulsion */
2437 if (!ETHER_ISNULLADDR(&cmd_data->clus_id.octet)) {
2438 if (cmd_data->clus_id.octet[0x4] == cmd_data->clus_id.octet[0x5]) {
2439 /* device will merge to configured CID only */
2440 ret = wl_cfgnan_config_control_flag(ndev, cfg,
2441 WL_NAN_CTRL_MERGE_CONF_CID_ONLY,
2442 &(cmd_data->status), true);
2443 if (unlikely(ret) || unlikely(cmd_data->status)) {
2444 WL_ERR(
2445 (" nan control set config handler, ret = %d status = %d \n",
2446 ret, cmd_data->status));
2447 goto fail;
2448 }
2449 }
2450 /* setting cluster ID */
2451 ret = wl_cfgnan_set_cluster_id(cmd_data, nan_iov_data);
2452 if (unlikely(ret)) {
2453 WL_ERR(("cluster_id sub_cmd set failed\n"));
2454 goto fail;
2455 }
2456 nan_buf->count++;
2457 }
2458
2459 /* setting rssi proximaty values for 2.4GHz and 5GHz */
2460 ret = wl_cfgnan_set_rssi_proximity(cmd_data, nan_iov_data, nan_attr_mask);
2461 if (unlikely(ret)) {
2462 WL_ERR(("2.4GHz/5GHz rssi proximity threshold set failed\n"));
2463 goto fail;
2464 } else {
2465 nan_buf->count++;
2466 }
2467
2468 /* setting rssi middle/close values for 2.4GHz and 5GHz */
2469 ret =
2470 wl_cfgnan_set_rssi_mid_or_close(cmd_data, nan_iov_data, nan_attr_mask);
2471 if (unlikely(ret)) {
2472 WL_ERR(("2.4GHz/5GHz rssi middle and close set failed\n"));
2473 goto fail;
2474 } else {
2475 nan_buf->count++;
2476 }
2477
2478 /* setting hop count limit or threshold */
2479 if (nan_attr_mask & NAN_ATTR_HOP_COUNT_LIMIT_CONFIG) {
2480 ret = wl_cfgnan_set_hop_count_limit(cmd_data, nan_iov_data);
2481 if (unlikely(ret)) {
2482 WL_ERR(("hop_count_limit sub_cmd set failed\n"));
2483 goto fail;
2484 }
2485 nan_buf->count++;
2486 }
2487
2488 /* setting sid beacon val */
2489 if ((nan_attr_mask & NAN_ATTR_SID_BEACON_CONFIG) ||
2490 (nan_attr_mask & NAN_ATTR_SUB_SID_BEACON_CONFIG)) {
2491 ret =
2492 wl_cfgnan_set_sid_beacon_val(cmd_data, nan_iov_data, nan_attr_mask);
2493 if (unlikely(ret)) {
2494 WL_ERR(("sid_beacon sub_cmd set failed\n"));
2495 goto fail;
2496 }
2497 nan_buf->count++;
2498 }
2499
2500 /* setting nan oui */
2501 if (nan_attr_mask & NAN_ATTR_OUI_CONFIG) {
2502 ret = wl_cfgnan_set_nan_oui(cmd_data, nan_iov_data);
2503 if (unlikely(ret)) {
2504 WL_ERR(("nan_oui sub_cmd set failed\n"));
2505 goto fail;
2506 }
2507 nan_buf->count++;
2508 }
2509
2510 /* setting nan awake dws */
2511 ret = wl_cfgnan_set_awake_dws(ndev, cmd_data, nan_iov_data, cfg,
2512 nan_attr_mask);
2513 if (unlikely(ret)) {
2514 WL_ERR(("nan awake dws set failed\n"));
2515 goto fail;
2516 } else {
2517 nan_buf->count++;
2518 }
2519
2520 /* enable events */
2521 ret = wl_cfgnan_config_eventmask(ndev, cfg, cmd_data->disc_ind_cfg, false);
2522 if (unlikely(ret)) {
2523 WL_ERR(
2524 ("Failed to config disc ind flag in event_mask, ret = %d\n", ret));
2525 goto fail;
2526 }
2527
2528 /* setting nan enable sub_cmd */
2529 ret = wl_cfgnan_enable_handler(nan_iov_data, true);
2530 if (unlikely(ret)) {
2531 WL_ERR(("enable handler sub_cmd set failed\n"));
2532 goto fail;
2533 }
2534 nan_buf->count++;
2535 nan_buf->is_set = true;
2536
2537 nan_buf_size -= nan_iov_data->nan_iov_len;
2538 memset(resp_buf, 0, sizeof(resp_buf));
2539 /* Reset conditon variable */
2540 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size,
2541 &(cmd_data->status), (void *)resp_buf,
2542 NAN_IOCTL_BUF_SIZE);
2543 if (unlikely(ret) || unlikely(cmd_data->status)) {
2544 WL_ERR((" nan start handler, enable failed, ret = %d status = %d \n",
2545 ret, cmd_data->status));
2546 goto fail;
2547 }
2548
2549 timeout = wait_event_timeout(cfg->nancfg.nan_event_wait,
2550 cfg->nancfg.nan_event_recvd,
2551 msecs_to_jiffies(NAN_START_STOP_TIMEOUT));
2552 if (!timeout) {
2553 WL_ERR(("Timed out while Waiting for WL_NAN_EVENT_START event !!!\n"));
2554 ret = BCME_ERROR;
2555 goto fail;
2556 }
2557
2558 /* If set, auto datapath confirms will be sent by FW */
2559 ret = wl_cfgnan_config_control_flag(ndev, cfg, WL_NAN_CTRL_AUTO_DPCONF,
2560 &(cmd_data->status), true);
2561 if (unlikely(ret) || unlikely(cmd_data->status)) {
2562 WL_ERR((" nan control set config handler, ret = %d status = %d \n", ret,
2563 cmd_data->status));
2564 goto fail;
2565 }
2566
2567 /* By default set NAN proprietary rates */
2568 ret = wl_cfgnan_config_control_flag(ndev, cfg, WL_NAN_CTRL_PROP_RATE,
2569 &(cmd_data->status), true);
2570 if (unlikely(ret) || unlikely(cmd_data->status)) {
2571 WL_ERR((" nan proprietary rate set failed, ret = %d status = %d \n",
2572 ret, cmd_data->status));
2573 goto fail;
2574 }
2575
2576 /* malloc for ndp peer list */
2577 if ((ret = wl_cfgnan_get_capablities_handler(ndev, cfg, &capabilities)) ==
2578 BCME_OK) {
2579 cfg->nancfg.max_ndp_count = capabilities.max_ndp_sessions;
2580 cfg->nancfg.nan_ndp_peer_info = MALLOCZ(
2581 cfg->osh, cfg->nancfg.max_ndp_count * sizeof(nan_ndp_peer_t));
2582 if (!cfg->nancfg.nan_ndp_peer_info) {
2583 WL_ERR(("%s: memory allocation failed\n", __func__));
2584 ret = BCME_NOMEM;
2585 goto fail;
2586 }
2587 } else {
2588 WL_ERR(("wl_cfgnan_get_capablities_handler failed, ret = %d\n", ret));
2589 goto fail;
2590 }
2591
2592 #ifdef RTT_SUPPORT
2593 /* Initialize geofence cfg */
2594 dhd_rtt_initialize_geofence_cfg(cfg->pub);
2595 #endif /* RTT_SUPPORT */
2596
2597 cfg->nan_enable = true;
2598 WL_INFORM_MEM(("[NAN] Enable successfull \n"));
2599 /* disable TDLS on NAN NMI IF create */
2600 wl_cfg80211_tdls_config(cfg, TDLS_STATE_NMI_CREATE, false);
2601
2602 fail:
2603 /* reset conditon variable */
2604 cfg->nancfg.nan_event_recvd = false;
2605 if (unlikely(ret) || unlikely(cmd_data->status)) {
2606 cfg->nan_enable = false;
2607 mutex_lock(&cfg->if_sync);
2608 ret = wl_cfg80211_delete_iface(cfg, WL_IF_TYPE_NAN);
2609 if (ret != BCME_OK) {
2610 WL_ERR(("failed to delete NDI[%d]\n", ret));
2611 }
2612 mutex_unlock(&cfg->if_sync);
2613 }
2614 if (nan_buf) {
2615 MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
2616 }
2617 if (nan_iov_data) {
2618 MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
2619 }
2620
2621 NAN_DBG_EXIT();
2622 return ret;
2623 }
2624
wl_cfgnan_disable(struct bcm_cfg80211 * cfg)2625 int wl_cfgnan_disable(struct bcm_cfg80211 *cfg)
2626 {
2627 s32 ret = BCME_OK;
2628 dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
2629
2630 NAN_DBG_ENTER();
2631 if ((cfg->nan_init_state == TRUE) && (cfg->nan_enable == TRUE)) {
2632 struct net_device *ndev;
2633 ndev = bcmcfg_to_prmry_ndev(cfg);
2634
2635 /* We have to remove NDIs so that P2P/Softap can work */
2636 ret = wl_cfg80211_delete_iface(cfg, WL_IF_TYPE_NAN);
2637 if (ret != BCME_OK) {
2638 WL_ERR(("failed to delete NDI[%d]\n", ret));
2639 }
2640
2641 WL_INFORM_MEM(
2642 ("Nan Disable Req, reason = %d\n", cfg->nancfg.disable_reason));
2643 ret = wl_cfgnan_stop_handler(ndev, cfg);
2644 if (ret == -ENODEV) {
2645 WL_ERR(("Bus is down, no need to proceed\n"));
2646 } else if (ret != BCME_OK) {
2647 WL_ERR(("failed to stop nan, error[%d]\n", ret));
2648 }
2649 ret = wl_cfgnan_deinit(cfg, dhdp->up);
2650 if (ret != BCME_OK) {
2651 WL_ERR(("failed to de-initialize NAN[%d]\n", ret));
2652 if (!dhd_query_bus_erros(dhdp)) {
2653 ASSERT(0);
2654 }
2655 }
2656 wl_cfgnan_disable_cleanup(cfg);
2657 }
2658 NAN_DBG_EXIT();
2659 return ret;
2660 }
2661
wl_cfgnan_send_stop_event(struct bcm_cfg80211 * cfg)2662 static void wl_cfgnan_send_stop_event(struct bcm_cfg80211 *cfg)
2663 {
2664 s32 ret = BCME_OK;
2665 nan_event_data_t *nan_event_data = NULL;
2666
2667 NAN_DBG_ENTER();
2668
2669 if (cfg->nancfg.disable_reason == NAN_USER_INITIATED) {
2670 /* do not event to host if command is from host */
2671 goto exit;
2672 }
2673 nan_event_data = MALLOCZ(cfg->osh, sizeof(nan_event_data_t));
2674 if (!nan_event_data) {
2675 WL_ERR(("%s: memory allocation failed\n", __func__));
2676 ret = BCME_NOMEM;
2677 goto exit;
2678 }
2679 bzero(nan_event_data, sizeof(nan_event_data_t));
2680
2681 if (cfg->nancfg.disable_reason == NAN_CONCURRENCY_CONFLICT) {
2682 nan_event_data->status =
2683 NAN_STATUS_UNSUPPORTED_CONCURRENCY_NAN_DISABLED;
2684 } else {
2685 nan_event_data->status = NAN_STATUS_SUCCESS;
2686 }
2687
2688 nan_event_data->status = NAN_STATUS_SUCCESS;
2689 ret = memcpy_s(nan_event_data->nan_reason, NAN_ERROR_STR_LEN,
2690 "NAN_STATUS_SUCCESS", strlen("NAN_STATUS_SUCCESS"));
2691 if (ret != BCME_OK) {
2692 WL_ERR(("Failed to copy nan reason string, ret = %d\n", ret));
2693 goto exit;
2694 }
2695 #if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || \
2696 defined(WL_VENDOR_EXT_SUPPORT)
2697 ret =
2698 wl_cfgvendor_send_nan_event(cfg->wdev->wiphy, bcmcfg_to_prmry_ndev(cfg),
2699 GOOGLE_NAN_EVENT_DISABLED, nan_event_data);
2700 if (ret != BCME_OK) {
2701 WL_ERR(("Failed to send event to nan hal, (%d)\n",
2702 GOOGLE_NAN_EVENT_DISABLED));
2703 }
2704 #endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || \
2705 defined(WL_VENDOR_EXT_SUPPORT) */
2706 exit:
2707 if (nan_event_data) {
2708 MFREE(cfg->osh, nan_event_data, sizeof(nan_event_data_t));
2709 }
2710 NAN_DBG_EXIT();
2711 return;
2712 }
2713
wl_cfgnan_disable_cleanup(struct bcm_cfg80211 * cfg)2714 void wl_cfgnan_disable_cleanup(struct bcm_cfg80211 *cfg)
2715 {
2716 int i = 0;
2717 #ifdef RTT_SUPPORT
2718 dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
2719 rtt_status_info_t *rtt_status = GET_RTTSTATE(dhdp);
2720 rtt_target_info_t *target_info = NULL;
2721
2722 /* Delete the geofence rtt target list */
2723 dhd_rtt_delete_geofence_target_list(dhdp);
2724 /* Cancel pending retry timer if any */
2725 if (delayed_work_pending(&rtt_status->rtt_retry_timer)) {
2726 cancel_delayed_work_sync(&rtt_status->rtt_retry_timer);
2727 }
2728 /* Remove if any pending proxd timeout for nan-rtt */
2729 target_info = &rtt_status->rtt_config.target_info[rtt_status->cur_idx];
2730 if (target_info && target_info->peer == RTT_PEER_NAN) {
2731 /* Cancel pending proxd timeout work if any */
2732 if (delayed_work_pending(&rtt_status->proxd_timeout)) {
2733 cancel_delayed_work_sync(&rtt_status->proxd_timeout);
2734 }
2735 }
2736 /* Delete if any directed nan rtt session */
2737 dhd_rtt_delete_nan_session(dhdp);
2738 #endif /* RTT_SUPPORT */
2739 /* Clear the NDP ID array and dp count */
2740 for (i = 0; i < NAN_MAX_NDP_PEER; i++) {
2741 cfg->nancfg.ndp_id[i] = 0;
2742 }
2743 cfg->nan_dp_count = 0;
2744 if (cfg->nancfg.nan_ndp_peer_info) {
2745 MFREE(cfg->osh, cfg->nancfg.nan_ndp_peer_info,
2746 cfg->nancfg.max_ndp_count * sizeof(nan_ndp_peer_t));
2747 cfg->nancfg.nan_ndp_peer_info = NULL;
2748 }
2749 return;
2750 }
2751
2752 /*
2753 * Deferred nan disable work,
2754 * scheduled with 3sec delay in order to remove any active nan dps
2755 */
wl_cfgnan_delayed_disable(struct work_struct * work)2756 void wl_cfgnan_delayed_disable(struct work_struct *work)
2757 {
2758 struct bcm_cfg80211 *cfg = NULL;
2759
2760 BCM_SET_CONTAINER_OF(cfg, work, struct bcm_cfg80211, nan_disable.work);
2761
2762 rtnl_lock();
2763 wl_cfgnan_disable(cfg);
2764 rtnl_unlock();
2765 }
2766
wl_cfgnan_stop_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg)2767 int wl_cfgnan_stop_handler(struct net_device *ndev, struct bcm_cfg80211 *cfg)
2768 {
2769 bcm_iov_batch_buf_t *nan_buf = NULL;
2770 s32 ret = BCME_OK;
2771 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
2772 wl_nan_iov_t *nan_iov_data = NULL;
2773 uint32 status;
2774 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
2775
2776 NAN_DBG_ENTER();
2777 NAN_MUTEX_LOCK();
2778
2779 if (!cfg->nan_enable) {
2780 WL_INFORM(("Nan is not enabled\n"));
2781 ret = BCME_OK;
2782 goto fail;
2783 }
2784
2785 if (cfg->nancfg.disable_reason != NAN_BUS_IS_DOWN) {
2786 /*
2787 * Framework doing cleanup(iface remove) on disable command,
2788 * so avoiding event to prevent iface delete calls again
2789 */
2790 WL_INFORM_MEM(("[NAN] Disabling Nan events\n"));
2791 wl_cfgnan_config_eventmask(ndev, cfg, 0, true);
2792
2793 nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
2794 if (!nan_buf) {
2795 WL_ERR(("%s: memory allocation failed\n", __func__));
2796 ret = BCME_NOMEM;
2797 goto fail;
2798 }
2799
2800 nan_iov_data = MALLOCZ(cfg->osh, sizeof(*nan_iov_data));
2801 if (!nan_iov_data) {
2802 WL_ERR(("%s: memory allocation failed\n", __func__));
2803 ret = BCME_NOMEM;
2804 goto fail;
2805 }
2806
2807 nan_iov_data->nan_iov_len = NAN_IOCTL_BUF_SIZE;
2808 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
2809 nan_buf->count = 0;
2810 nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
2811 nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
2812
2813 ret = wl_cfgnan_enable_handler(nan_iov_data, false);
2814 if (unlikely(ret)) {
2815 WL_ERR(("nan disable handler failed\n"));
2816 goto fail;
2817 }
2818 nan_buf->count++;
2819 nan_buf->is_set = true;
2820 nan_buf_size -= nan_iov_data->nan_iov_len;
2821 memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
2822 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
2823 (void *)resp_buf, NAN_IOCTL_BUF_SIZE);
2824 if (unlikely(ret) || unlikely(status)) {
2825 WL_ERR(("nan disable failed ret = %d status = %d\n", ret, status));
2826 goto fail;
2827 }
2828 /* Enable back TDLS if connected interface is <= 1 */
2829 wl_cfg80211_tdls_config(cfg, TDLS_STATE_IF_DELETE, false);
2830 }
2831
2832 wl_cfgnan_send_stop_event(cfg);
2833
2834 fail:
2835 /* Resetting instance ID mask */
2836 cfg->nancfg.inst_id_start = 0;
2837 memset(cfg->nancfg.svc_inst_id_mask, 0,
2838 sizeof(cfg->nancfg.svc_inst_id_mask));
2839 memset(cfg->svc_info, 0, NAN_MAX_SVC_INST * sizeof(nan_svc_info_t));
2840 cfg->nan_enable = false;
2841
2842 if (nan_buf) {
2843 MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
2844 }
2845 if (nan_iov_data) {
2846 MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
2847 }
2848
2849 NAN_MUTEX_UNLOCK();
2850 NAN_DBG_EXIT();
2851 return ret;
2852 }
2853
wl_cfgnan_config_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_config_cmd_data_t * cmd_data,uint32 nan_attr_mask)2854 int wl_cfgnan_config_handler(struct net_device *ndev, struct bcm_cfg80211 *cfg,
2855 nan_config_cmd_data_t *cmd_data,
2856 uint32 nan_attr_mask)
2857 {
2858 bcm_iov_batch_buf_t *nan_buf = NULL;
2859 s32 ret = BCME_OK;
2860 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
2861 wl_nan_iov_t *nan_iov_data = NULL;
2862 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
2863
2864 NAN_DBG_ENTER();
2865
2866 /* Nan need to be enabled before configuring/updating params */
2867 if (cfg->nan_enable) {
2868 nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
2869 if (!nan_buf) {
2870 WL_ERR(("%s: memory allocation failed\n", __func__));
2871 ret = BCME_NOMEM;
2872 goto fail;
2873 }
2874
2875 nan_iov_data = MALLOCZ(cfg->osh, sizeof(*nan_iov_data));
2876 if (!nan_iov_data) {
2877 WL_ERR(("%s: memory allocation failed\n", __func__));
2878 ret = BCME_NOMEM;
2879 goto fail;
2880 }
2881
2882 nan_iov_data->nan_iov_len = NAN_IOCTL_BUF_SIZE;
2883 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
2884 nan_buf->count = 0;
2885 nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
2886 nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
2887
2888 /* setting sid beacon val */
2889 if ((nan_attr_mask & NAN_ATTR_SID_BEACON_CONFIG) ||
2890 (nan_attr_mask & NAN_ATTR_SUB_SID_BEACON_CONFIG)) {
2891 ret = wl_cfgnan_set_sid_beacon_val(cmd_data, nan_iov_data,
2892 nan_attr_mask);
2893 if (unlikely(ret)) {
2894 WL_ERR(("sid_beacon sub_cmd set failed\n"));
2895 goto fail;
2896 }
2897 nan_buf->count++;
2898 }
2899
2900 /* setting master preference and random factor */
2901 if (cmd_data->metrics.random_factor || cmd_data->metrics.master_pref) {
2902 ret = wl_cfgnan_set_election_metric(cmd_data, nan_iov_data,
2903 nan_attr_mask);
2904 if (unlikely(ret)) {
2905 WL_ERR(("election_metric sub_cmd set failed\n"));
2906 goto fail;
2907 } else {
2908 nan_buf->count++;
2909 }
2910 }
2911
2912 /* setting hop count limit or threshold */
2913 if (nan_attr_mask & NAN_ATTR_HOP_COUNT_LIMIT_CONFIG) {
2914 ret = wl_cfgnan_set_hop_count_limit(cmd_data, nan_iov_data);
2915 if (unlikely(ret)) {
2916 WL_ERR(("hop_count_limit sub_cmd set failed\n"));
2917 goto fail;
2918 }
2919 nan_buf->count++;
2920 }
2921
2922 /* setting rssi proximaty values for 2.4GHz and 5GHz */
2923 ret =
2924 wl_cfgnan_set_rssi_proximity(cmd_data, nan_iov_data, nan_attr_mask);
2925 if (unlikely(ret)) {
2926 WL_ERR(("2.4GHz/5GHz rssi proximity threshold set failed\n"));
2927 goto fail;
2928 } else {
2929 nan_buf->count++;
2930 }
2931
2932 /* setting nan awake dws */
2933 ret = wl_cfgnan_set_awake_dws(ndev, cmd_data, nan_iov_data, cfg,
2934 nan_attr_mask);
2935 if (unlikely(ret)) {
2936 WL_ERR(("nan awake dws set failed\n"));
2937 goto fail;
2938 } else {
2939 nan_buf->count++;
2940 }
2941
2942 if (cmd_data->disc_ind_cfg) {
2943 /* Disable events */
2944 WL_TRACE(("Disable events based on flag\n"));
2945 ret = wl_cfgnan_config_eventmask(ndev, cfg, cmd_data->disc_ind_cfg,
2946 false);
2947 if (unlikely(ret)) {
2948 WL_ERR(
2949 ("Failed to config disc ind flag in event_mask, ret = %d\n",
2950 ret));
2951 goto fail;
2952 }
2953 }
2954
2955 if ((cfg->support_5g) &&
2956 ((cmd_data->dwell_time[1]) || (cmd_data->scan_period[1]))) {
2957 /* setting scan params */
2958 ret = wl_cfgnan_set_nan_scan_params(ndev, cfg, cmd_data,
2959 cfg->support_5g, nan_attr_mask);
2960 if (unlikely(ret)) {
2961 WL_ERR(("scan params set failed for 5g\n"));
2962 goto fail;
2963 }
2964 }
2965 if ((cmd_data->dwell_time[0]) || (cmd_data->scan_period[0])) {
2966 ret = wl_cfgnan_set_nan_scan_params(ndev, cfg, cmd_data, 0,
2967 nan_attr_mask);
2968 if (unlikely(ret)) {
2969 WL_ERR(("scan params set failed for 2g\n"));
2970 goto fail;
2971 }
2972 }
2973 nan_buf->is_set = true;
2974 nan_buf_size -= nan_iov_data->nan_iov_len;
2975
2976 if (nan_buf->count) {
2977 memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
2978 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size,
2979 &(cmd_data->status), (void *)resp_buf,
2980 NAN_IOCTL_BUF_SIZE);
2981 if (unlikely(ret) || unlikely(cmd_data->status)) {
2982 WL_ERR((" nan config handler failed ret = %d status = %d\n",
2983 ret, cmd_data->status));
2984 goto fail;
2985 }
2986 } else {
2987 WL_DBG(("No commands to send\n"));
2988 }
2989
2990 if ((!cmd_data->bmap) ||
2991 (cmd_data->avail_params.duration == NAN_BAND_INVALID) ||
2992 (!cmd_data->chanspec[0])) {
2993 WL_TRACE(("mandatory arguments are not present to set avail\n"));
2994 ret = BCME_OK;
2995 } else {
2996 cmd_data->avail_params.chanspec[0] = cmd_data->chanspec[0];
2997 cmd_data->avail_params.bmap = cmd_data->bmap;
2998 /* 1=local, 2=peer, 3=ndc, 4=immutable, 5=response, 6=counter */
2999 ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg), cfg,
3000 &cmd_data->avail_params,
3001 WL_AVAIL_LOCAL);
3002 if (unlikely(ret)) {
3003 WL_ERR(("Failed to set avail value with type local\n"));
3004 goto fail;
3005 }
3006
3007 ret =
3008 wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg), cfg,
3009 &cmd_data->avail_params, WL_AVAIL_NDC);
3010 if (unlikely(ret)) {
3011 WL_ERR(("Failed to set avail value with type ndc\n"));
3012 goto fail;
3013 }
3014 }
3015 } else {
3016 WL_INFORM(("nan is not enabled\n"));
3017 }
3018
3019 fail:
3020 if (nan_buf) {
3021 MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
3022 }
3023 if (nan_iov_data) {
3024 MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
3025 }
3026
3027 NAN_DBG_EXIT();
3028 return ret;
3029 }
3030
wl_cfgnan_support_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_config_cmd_data_t * cmd_data)3031 int wl_cfgnan_support_handler(struct net_device *ndev, struct bcm_cfg80211 *cfg,
3032 nan_config_cmd_data_t *cmd_data)
3033 {
3034 /* */
3035 return BCME_OK;
3036 }
3037
wl_cfgnan_status_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_config_cmd_data_t * cmd_data)3038 int wl_cfgnan_status_handler(struct net_device *ndev, struct bcm_cfg80211 *cfg,
3039 nan_config_cmd_data_t *cmd_data)
3040 {
3041 /* */
3042 return BCME_OK;
3043 }
3044
3045 #ifdef WL_NAN_DISC_CACHE
wl_cfgnan_get_svc_inst(struct bcm_cfg80211 * cfg,wl_nan_instance_id svc_inst_id,uint8 ndp_id)3046 static nan_svc_info_t *wl_cfgnan_get_svc_inst(struct bcm_cfg80211 *cfg,
3047 wl_nan_instance_id svc_inst_id,
3048 uint8 ndp_id)
3049 {
3050 uint8 i, j;
3051 if (ndp_id) {
3052 for (i = 0; i < NAN_MAX_SVC_INST; i++) {
3053 for (j = 0; j < NAN_MAX_SVC_INST; j++) {
3054 if (cfg->svc_info[i].ndp_id[j] == ndp_id) {
3055 return &cfg->svc_info[i];
3056 }
3057 }
3058 }
3059 } else if (svc_inst_id) {
3060 for (i = 0; i < NAN_MAX_SVC_INST; i++) {
3061 if (cfg->svc_info[i].svc_id == svc_inst_id) {
3062 return &cfg->svc_info[i];
3063 }
3064 }
3065 }
3066 return NULL;
3067 }
3068
wl_cfgnan_check_for_ranging(struct bcm_cfg80211 * cfg,struct ether_addr * peer)3069 nan_ranging_inst_t *wl_cfgnan_check_for_ranging(struct bcm_cfg80211 *cfg,
3070 struct ether_addr *peer)
3071 {
3072 uint8 i;
3073 if (peer) {
3074 for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
3075 if (!memcmp(peer, &cfg->nan_ranging_info[i].peer_addr,
3076 ETHER_ADDR_LEN)) {
3077 return &(cfg->nan_ranging_info[i]);
3078 }
3079 }
3080 }
3081 return NULL;
3082 }
3083
wl_cfgnan_get_rng_inst_by_id(struct bcm_cfg80211 * cfg,uint8 rng_id)3084 nan_ranging_inst_t *wl_cfgnan_get_rng_inst_by_id(struct bcm_cfg80211 *cfg,
3085 uint8 rng_id)
3086 {
3087 uint8 i;
3088 if (rng_id) {
3089 for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
3090 if (cfg->nan_ranging_info[i].range_id == rng_id) {
3091 return &(cfg->nan_ranging_info[i]);
3092 }
3093 }
3094 }
3095 WL_ERR(("Couldn't find the ranging instance for rng_id %d\n", rng_id));
3096 return NULL;
3097 }
3098
3099 /*
3100 * Find ranging inst for given peer,
3101 * On not found, create one
3102 * with given range role
3103 */
wl_cfgnan_get_ranging_inst(struct bcm_cfg80211 * cfg,struct ether_addr * peer,nan_range_role_t range_role)3104 nan_ranging_inst_t *wl_cfgnan_get_ranging_inst(struct bcm_cfg80211 *cfg,
3105 struct ether_addr *peer,
3106 nan_range_role_t range_role)
3107 {
3108 nan_ranging_inst_t *ranging_inst = NULL;
3109 uint8 i;
3110
3111 if (!peer) {
3112 WL_ERR(("Peer address is NULL"));
3113 goto done;
3114 }
3115
3116 ranging_inst = wl_cfgnan_check_for_ranging(cfg, peer);
3117 if (ranging_inst) {
3118 goto done;
3119 }
3120 WL_TRACE(("Creating Ranging instance \n"));
3121
3122 for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
3123 if (cfg->nan_ranging_info[i].in_use == FALSE) {
3124 break;
3125 }
3126 }
3127
3128 if (i == NAN_MAX_RANGING_INST) {
3129 WL_ERR(("No buffer available for the ranging instance"));
3130 goto done;
3131 }
3132 ranging_inst = &cfg->nan_ranging_info[i];
3133 memcpy(&ranging_inst->peer_addr, peer, ETHER_ADDR_LEN);
3134 ranging_inst->range_status = NAN_RANGING_REQUIRED;
3135 ranging_inst->prev_distance_mm = INVALID_DISTANCE;
3136 ranging_inst->range_role = range_role;
3137 ranging_inst->in_use = TRUE;
3138
3139 done:
3140 return ranging_inst;
3141 }
3142 #endif /* WL_NAN_DISC_CACHE */
3143
process_resp_buf(void * iov_resp,uint8 * instance_id,uint16 sub_cmd_id)3144 static int process_resp_buf(void *iov_resp, uint8 *instance_id,
3145 uint16 sub_cmd_id)
3146 {
3147 int res = BCME_OK;
3148 NAN_DBG_ENTER();
3149
3150 if (sub_cmd_id == WL_NAN_CMD_DATA_DATAREQ) {
3151 wl_nan_dp_req_ret_t *dpreq_ret = NULL;
3152 dpreq_ret = (wl_nan_dp_req_ret_t *)(iov_resp);
3153 *instance_id = dpreq_ret->ndp_id;
3154 WL_TRACE(("%s: Initiator NDI: " MACDBG "\n", __FUNCTION__,
3155 MAC2STRDBG(dpreq_ret->indi.octet)));
3156 } else if (sub_cmd_id == WL_NAN_CMD_RANGE_REQUEST) {
3157 wl_nan_range_id *range_id = NULL;
3158 range_id = (wl_nan_range_id *)(iov_resp);
3159 *instance_id = *range_id;
3160 WL_TRACE(("Range id: %d\n", *range_id));
3161 }
3162 WL_DBG(("instance_id: %d\n", *instance_id));
3163 NAN_DBG_EXIT();
3164 return res;
3165 }
3166
wl_cfgnan_cancel_ranging(struct net_device * ndev,struct bcm_cfg80211 * cfg,uint8 range_id,uint8 flags,uint32 * status)3167 int wl_cfgnan_cancel_ranging(struct net_device *ndev, struct bcm_cfg80211 *cfg,
3168 uint8 range_id, uint8 flags, uint32 *status)
3169 {
3170 bcm_iov_batch_buf_t *nan_buf = NULL;
3171 s32 ret = BCME_OK;
3172 uint16 nan_iov_start, nan_iov_end;
3173 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
3174 uint16 subcmd_len;
3175 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
3176 wl_nan_iov_t *nan_iov_data = NULL;
3177 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
3178 wl_nan_range_cancel_ext_t rng_cncl;
3179 uint8 size_of_iov;
3180
3181 NAN_DBG_ENTER();
3182
3183 if (cfg->nancfg.version >= NAN_RANGE_EXT_CANCEL_SUPPORT_VER) {
3184 size_of_iov = sizeof(rng_cncl);
3185 } else {
3186 size_of_iov = sizeof(range_id);
3187 }
3188
3189 memset_s(&rng_cncl, sizeof(rng_cncl), 0, sizeof(rng_cncl));
3190 rng_cncl.range_id = range_id;
3191 rng_cncl.flags = flags;
3192
3193 nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
3194 if (!nan_buf) {
3195 WL_ERR(("%s: memory allocation failed\n", __func__));
3196 ret = BCME_NOMEM;
3197 goto fail;
3198 }
3199
3200 nan_iov_data = MALLOCZ(cfg->osh, sizeof(*nan_iov_data));
3201 if (!nan_iov_data) {
3202 WL_ERR(("%s: memory allocation failed\n", __func__));
3203 ret = BCME_NOMEM;
3204 goto fail;
3205 }
3206
3207 nan_iov_data->nan_iov_len = nan_iov_start = NAN_IOCTL_BUF_SIZE;
3208 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
3209 nan_buf->count = 0;
3210 nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
3211 nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
3212 sub_cmd = (bcm_iov_batch_subcmd_t *)(nan_iov_data->nan_iov_buf);
3213
3214 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len, size_of_iov,
3215 &subcmd_len);
3216 if (unlikely(ret)) {
3217 WL_ERR(("nan_sub_cmd check failed\n"));
3218 goto fail;
3219 }
3220
3221 sub_cmd->id = htod16(WL_NAN_CMD_RANGE_CANCEL);
3222 sub_cmd->len = sizeof(sub_cmd->u.options) + size_of_iov;
3223 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
3224
3225 /* Reduce the iov_len size by subcmd_len */
3226 nan_iov_data->nan_iov_len -= subcmd_len;
3227 nan_iov_end = nan_iov_data->nan_iov_len;
3228 nan_buf_size = (nan_iov_start - nan_iov_end);
3229
3230 if (size_of_iov >= sizeof(rng_cncl)) {
3231 (void)memcpy_s(sub_cmd->data, nan_iov_data->nan_iov_len, &rng_cncl,
3232 size_of_iov);
3233 } else {
3234 (void)memcpy_s(sub_cmd->data, nan_iov_data->nan_iov_len, &range_id,
3235 size_of_iov);
3236 }
3237
3238 nan_buf->is_set = true;
3239 nan_buf->count++;
3240 memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
3241 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, status,
3242 (void *)resp_buf, NAN_IOCTL_BUF_SIZE);
3243 if (unlikely(ret) || unlikely(*status)) {
3244 WL_ERR(("Range ID %d cancel failed ret %d status %d \n", range_id, ret,
3245 *status));
3246 goto fail;
3247 }
3248 WL_MEM(("Range cancel with Range ID [%d] successfull\n", range_id));
3249 fail:
3250 if (nan_buf) {
3251 MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
3252 }
3253 if (nan_iov_data) {
3254 MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
3255 }
3256 NAN_DBG_EXIT();
3257 return ret;
3258 }
3259
3260 #ifdef WL_NAN_DISC_CACHE
wl_cfgnan_cache_svc_info(struct bcm_cfg80211 * cfg,nan_discover_cmd_data_t * cmd_data,uint16 cmd_id,bool update)3261 static int wl_cfgnan_cache_svc_info(struct bcm_cfg80211 *cfg,
3262 nan_discover_cmd_data_t *cmd_data,
3263 uint16 cmd_id, bool update)
3264 {
3265 int ret = BCME_OK;
3266 int i;
3267 nan_svc_info_t *svc_info;
3268 uint8 svc_id = (cmd_id == WL_NAN_CMD_SD_SUBSCRIBE) ? cmd_data->sub_id
3269 : cmd_data->pub_id;
3270
3271 for (i = 0; i < NAN_MAX_SVC_INST; i++) {
3272 if (update) {
3273 if (cfg->svc_info[i].svc_id == svc_id) {
3274 svc_info = &cfg->svc_info[i];
3275 break;
3276 } else {
3277 continue;
3278 }
3279 }
3280 if (!cfg->svc_info[i].svc_id) {
3281 svc_info = &cfg->svc_info[i];
3282 break;
3283 }
3284 }
3285 if (i == NAN_MAX_SVC_INST) {
3286 WL_ERR(("%s:cannot accomodate ranging session\n", __FUNCTION__));
3287 ret = BCME_NORESOURCE;
3288 goto fail;
3289 }
3290 if (cmd_data->sde_control_flag & NAN_SDE_CF_RANGING_REQUIRED) {
3291 WL_TRACE(("%s: updating ranging info, enabling", __FUNCTION__));
3292 svc_info->status = 1;
3293 svc_info->ranging_interval = cmd_data->ranging_intvl_msec;
3294 svc_info->ranging_ind = cmd_data->ranging_indication;
3295 svc_info->ingress_limit = cmd_data->ingress_limit;
3296 svc_info->egress_limit = cmd_data->egress_limit;
3297 svc_info->ranging_required = 1;
3298 } else {
3299 WL_TRACE(("%s: updating ranging info, disabling", __FUNCTION__));
3300 svc_info->status = 0;
3301 svc_info->ranging_interval = 0;
3302 svc_info->ranging_ind = 0;
3303 svc_info->ingress_limit = 0;
3304 svc_info->egress_limit = 0;
3305 svc_info->ranging_required = 0;
3306 }
3307
3308 /* Reset Range status flags on svc creation/update */
3309 svc_info->svc_range_status = 0;
3310 svc_info->flags = cmd_data->flags;
3311
3312 if (cmd_id == WL_NAN_CMD_SD_SUBSCRIBE) {
3313 svc_info->svc_id = cmd_data->sub_id;
3314 if ((cmd_data->flags & WL_NAN_SUB_ACTIVE) &&
3315 (cmd_data->tx_match.dlen)) {
3316 ret = memcpy_s(svc_info->tx_match_filter,
3317 sizeof(svc_info->tx_match_filter),
3318 cmd_data->tx_match.data, cmd_data->tx_match.dlen);
3319 if (ret != BCME_OK) {
3320 WL_ERR(("Failed to copy tx match filter data\n"));
3321 goto fail;
3322 }
3323 svc_info->tx_match_filter_len = cmd_data->tx_match.dlen;
3324 }
3325 } else {
3326 svc_info->svc_id = cmd_data->pub_id;
3327 }
3328 ret = memcpy_s(svc_info->svc_hash, sizeof(svc_info->svc_hash),
3329 cmd_data->svc_hash.data, WL_NAN_SVC_HASH_LEN);
3330 if (ret != BCME_OK) {
3331 WL_ERR(("Failed to copy svc hash\n"));
3332 }
3333 fail:
3334 return ret;
3335 }
3336
3337 static bool
wl_cfgnan_clear_svc_from_ranging_inst(struct bcm_cfg80211 * cfg,nan_ranging_inst_t * ranging_inst,nan_svc_info_t * svc)3338 wl_cfgnan_clear_svc_from_ranging_inst(struct bcm_cfg80211 *cfg,
3339 nan_ranging_inst_t *ranging_inst,
3340 nan_svc_info_t *svc)
3341 {
3342 int i = 0;
3343 bool cleared = FALSE;
3344
3345 if (svc && ranging_inst->in_use) {
3346 for (i = 0; i < MAX_SUBSCRIBES; i++) {
3347 if (svc == ranging_inst->svc_idx[i]) {
3348 ranging_inst->num_svc_ctx--;
3349 ranging_inst->svc_idx[i] = NULL;
3350 cleared = TRUE;
3351 /*
3352 * This list is maintained dupes free,
3353 * hence can break
3354 */
3355 break;
3356 }
3357 }
3358 }
3359 return cleared;
3360 }
3361
wl_cfgnan_clear_svc_from_all_ranging_inst(struct bcm_cfg80211 * cfg,uint8 svc_id)3362 static int wl_cfgnan_clear_svc_from_all_ranging_inst(struct bcm_cfg80211 *cfg,
3363 uint8 svc_id)
3364 {
3365 nan_ranging_inst_t *ranging_inst;
3366 int i = 0;
3367 int ret = BCME_OK;
3368
3369 nan_svc_info_t *svc = wl_cfgnan_get_svc_inst(cfg, svc_id, 0);
3370 if (!svc) {
3371 WL_ERR(("\n svc not found \n"));
3372 ret = BCME_NOTFOUND;
3373 goto done;
3374 }
3375 for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
3376 ranging_inst = &(cfg->nan_ranging_info[i]);
3377 wl_cfgnan_clear_svc_from_ranging_inst(cfg, ranging_inst, svc);
3378 }
3379
3380 done:
3381 return ret;
3382 }
3383
wl_cfgnan_ranging_clear_publish(struct bcm_cfg80211 * cfg,struct ether_addr * peer,uint8 svc_id)3384 static int wl_cfgnan_ranging_clear_publish(struct bcm_cfg80211 *cfg,
3385 struct ether_addr *peer,
3386 uint8 svc_id)
3387 {
3388 nan_ranging_inst_t *ranging_inst = NULL;
3389 nan_svc_info_t *svc = NULL;
3390 bool cleared = FALSE;
3391 int ret = BCME_OK;
3392
3393 ranging_inst = wl_cfgnan_check_for_ranging(cfg, peer);
3394 if (!ranging_inst || !ranging_inst->in_use) {
3395 goto done;
3396 }
3397
3398 WL_INFORM_MEM(("Check clear Ranging for pub update, sub id = %d,"
3399 " range_id = %d, peer addr = " MACDBG " \n",
3400 svc_id, ranging_inst->range_id, MAC2STRDBG(peer)));
3401 svc = wl_cfgnan_get_svc_inst(cfg, svc_id, 0);
3402 if (!svc) {
3403 WL_ERR(("\n svc not found, svc_id = %d\n", svc_id));
3404 ret = BCME_NOTFOUND;
3405 goto done;
3406 }
3407
3408 cleared = wl_cfgnan_clear_svc_from_ranging_inst(cfg, ranging_inst, svc);
3409 if (!cleared) {
3410 /* Only if this svc was cleared, any update needed */
3411 ret = BCME_NOTFOUND;
3412 goto done;
3413 }
3414
3415 wl_cfgnan_terminate_ranging_session(cfg, ranging_inst);
3416
3417 done:
3418 return ret;
3419 }
3420
3421 #ifdef RTT_SUPPORT
3422 /* API to terminate/clear all directed nan-rtt sessions.
3423 * Can be called from framework RTT stop context
3424 */
wl_cfgnan_terminate_directed_rtt_sessions(struct net_device * ndev,struct bcm_cfg80211 * cfg)3425 int wl_cfgnan_terminate_directed_rtt_sessions(struct net_device *ndev,
3426 struct bcm_cfg80211 *cfg)
3427 {
3428 nan_ranging_inst_t *ranging_inst;
3429 int i, ret = BCME_OK;
3430 uint32 status;
3431
3432 for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
3433 ranging_inst = &cfg->nan_ranging_info[i];
3434 if (ranging_inst->range_id &&
3435 ranging_inst->range_type == RTT_TYPE_NAN_DIRECTED) {
3436 if (ranging_inst->range_status == NAN_RANGING_IN_PROGRESS) {
3437 ret = wl_cfgnan_cancel_ranging(
3438 ndev, cfg, ranging_inst->range_id,
3439 NAN_RNG_TERM_FLAG_IMMEDIATE, &status);
3440 if (unlikely(ret) || unlikely(status)) {
3441 WL_ERR(("nan range cancel failed ret = %d status = %d\n",
3442 ret, status));
3443 }
3444 }
3445 wl_cfgnan_reset_geofence_ranging(cfg, ranging_inst,
3446 RTT_SHCED_HOST_DIRECTED_TERM);
3447 }
3448 }
3449 return ret;
3450 }
3451 #endif /* RTT_SUPPORT */
3452
3453 /*
3454 * suspend ongoing geofence ranging session
3455 * with a peer if on-going ranging is with given peer
3456 * If peer NULL,
3457 * Suspend on-going ranging blindly
3458 * Do nothing on:
3459 * If ranging is not in progress
3460 * If ranging in progress but not with given peer
3461 */
wl_cfgnan_suspend_geofence_rng_session(struct net_device * ndev,struct ether_addr * peer,int suspend_reason,u8 cancel_flags)3462 int wl_cfgnan_suspend_geofence_rng_session(struct net_device *ndev,
3463 struct ether_addr *peer,
3464 int suspend_reason, u8 cancel_flags)
3465 {
3466 int ret = BCME_OK;
3467 uint32 status;
3468 nan_ranging_inst_t *ranging_inst = NULL;
3469 struct ether_addr *peer_addr = NULL;
3470 struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
3471 #ifdef RTT_SUPPORT
3472 dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
3473 rtt_geofence_target_info_t *geofence_target_info;
3474
3475 geofence_target_info = dhd_rtt_get_geofence_current_target(dhd);
3476 if (!geofence_target_info) {
3477 WL_DBG(("No Geofencing Targets, suspend req dropped\n"));
3478 goto exit;
3479 }
3480 peer_addr = &geofence_target_info->peer_addr;
3481
3482 ranging_inst = wl_cfgnan_check_for_ranging(cfg, peer_addr);
3483 if (dhd_rtt_get_geofence_rtt_state(dhd) == FALSE) {
3484 WL_DBG(("Geofencing Ranging not in progress, suspend req dropped\n"));
3485 goto exit;
3486 }
3487
3488 if (peer && memcmp(peer_addr, peer, ETHER_ADDR_LEN)) {
3489 if (suspend_reason == RTT_GEO_SUSPN_HOST_NDP_TRIGGER ||
3490 suspend_reason == RTT_GEO_SUSPN_PEER_NDP_TRIGGER) {
3491 /* NDP and Ranging can coexist with different Peers */
3492 WL_DBG(("Geofencing Ranging not in progress with given peer,"
3493 " suspend req dropped\n"));
3494 goto exit;
3495 }
3496 }
3497 #endif /* RTT_SUPPORT */
3498
3499 ASSERT((ranging_inst != NULL));
3500 if (ranging_inst) {
3501 if (ranging_inst->range_status != NAN_RANGING_IN_PROGRESS) {
3502 WL_DBG(("Ranging Inst with peer not in progress, "
3503 " suspend req dropped\n"));
3504 goto exit;
3505 }
3506 cancel_flags |= NAN_RNG_TERM_FLAG_IMMEDIATE;
3507 ret = wl_cfgnan_cancel_ranging(ndev, cfg, ranging_inst->range_id,
3508 cancel_flags, &status);
3509 if (unlikely(ret) || unlikely(status)) {
3510 WL_ERR(("Geofence Range suspended failed, err = %d, status = %d,"
3511 " range_id = %d, suspend_reason = %d, " MACDBG " \n",
3512 ret, status, ranging_inst->range_id, suspend_reason,
3513 MAC2STRDBG(peer_addr)));
3514 }
3515 ranging_inst->range_status = NAN_RANGING_REQUIRED;
3516 WL_INFORM_MEM(("Geofence Range suspended, range_id = %d,"
3517 " suspend_reason = %d, " MACDBG " \n",
3518 ranging_inst->range_id, suspend_reason,
3519 MAC2STRDBG(peer_addr)));
3520 #ifdef RTT_SUPPORT
3521 /* Set geofence RTT in progress state to false */
3522 dhd_rtt_set_geofence_rtt_state(dhd, FALSE);
3523 #endif /* RTT_SUPPORT */
3524 }
3525
3526 exit:
3527 /* Post pending discovery results */
3528 if (ranging_inst && ((suspend_reason == RTT_GEO_SUSPN_HOST_NDP_TRIGGER) ||
3529 (suspend_reason == RTT_GEO_SUSPN_PEER_NDP_TRIGGER))) {
3530 wl_cfgnan_disc_result_on_geofence_cancel(cfg, ranging_inst);
3531 }
3532
3533 return ret;
3534 }
3535
wl_cfgnan_clear_svc_cache(struct bcm_cfg80211 * cfg,wl_nan_instance_id svc_id)3536 static void wl_cfgnan_clear_svc_cache(struct bcm_cfg80211 *cfg,
3537 wl_nan_instance_id svc_id)
3538 {
3539 nan_svc_info_t *svc;
3540 svc = wl_cfgnan_get_svc_inst(cfg, svc_id, 0);
3541 if (svc) {
3542 WL_DBG(("clearing cached svc info for svc id %d\n", svc_id));
3543 memset(svc, 0, sizeof(*svc));
3544 }
3545 }
3546
3547 /*
3548 * Terminate given ranging instance
3549 * if no pending ranging sub service
3550 */
3551 static void
wl_cfgnan_terminate_ranging_session(struct bcm_cfg80211 * cfg,nan_ranging_inst_t * ranging_inst)3552 wl_cfgnan_terminate_ranging_session(struct bcm_cfg80211 *cfg,
3553 nan_ranging_inst_t *ranging_inst)
3554 {
3555 int ret = BCME_OK;
3556 uint32 status;
3557 #ifdef RTT_SUPPORT
3558 rtt_geofence_target_info_t *geofence_target = NULL;
3559 dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
3560 int8 index;
3561 #endif /* RTT_SUPPORT */
3562
3563 if (ranging_inst->range_id == 0) {
3564 /* Make sure, range inst is valid in caller */
3565 return;
3566 }
3567
3568 if (ranging_inst->num_svc_ctx != 0) {
3569 /*
3570 * Make sure to remove all svc_insts for range_inst
3571 * in order to cancel ranging and remove target in caller
3572 */
3573 return;
3574 }
3575
3576 /* Cancel Ranging if in progress for rang_inst */
3577 if (ranging_inst->range_status == NAN_RANGING_IN_PROGRESS) {
3578 ret = wl_cfgnan_cancel_ranging(bcmcfg_to_prmry_ndev(cfg), cfg,
3579 ranging_inst->range_id,
3580 NAN_RNG_TERM_FLAG_IMMEDIATE, &status);
3581 if (unlikely(ret) || unlikely(status)) {
3582 WL_ERR(("%s:nan range cancel failed ret = %d status = %d\n",
3583 __FUNCTION__, ret, status));
3584 } else {
3585 WL_DBG(("Range cancelled \n"));
3586 /* Set geofence RTT in progress state to false */
3587 #ifdef RTT_SUPPORT
3588 dhd_rtt_set_geofence_rtt_state(dhd, FALSE);
3589 #endif /* RTT_SUPPORT */
3590 }
3591 }
3592
3593 #ifdef RTT_SUPPORT
3594 geofence_target =
3595 dhd_rtt_get_geofence_target(dhd, &ranging_inst->peer_addr, &index);
3596 if (geofence_target) {
3597 dhd_rtt_remove_geofence_target(dhd, &geofence_target->peer_addr);
3598 WL_INFORM_MEM(("Removing Ranging Instance " MACDBG "\n",
3599 MAC2STRDBG(&(ranging_inst->peer_addr))));
3600 bzero(ranging_inst, sizeof(nan_ranging_inst_t));
3601 }
3602 #endif /* RTT_SUPPORT */
3603 }
3604
3605 /*
3606 * Terminate all ranging sessions
3607 * with no pending ranging sub service
3608 */
3609 static void
wl_cfgnan_terminate_all_obsolete_ranging_sessions(struct bcm_cfg80211 * cfg)3610 wl_cfgnan_terminate_all_obsolete_ranging_sessions(struct bcm_cfg80211 *cfg)
3611 {
3612 /* cancel all related ranging instances */
3613 uint8 i = 0;
3614 nan_ranging_inst_t *ranging_inst = NULL;
3615
3616 for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
3617 ranging_inst = &cfg->nan_ranging_info[i];
3618 if (ranging_inst->in_use) {
3619 wl_cfgnan_terminate_ranging_session(cfg, ranging_inst);
3620 }
3621 }
3622
3623 return;
3624 }
3625
3626 /*
3627 * Store svc_ctx for processing during RNG_RPT
3628 * Return BCME_OK only when svc is added
3629 */
wl_cfgnan_update_ranging_svc_inst(nan_ranging_inst_t * ranging_inst,nan_svc_info_t * svc)3630 static int wl_cfgnan_update_ranging_svc_inst(nan_ranging_inst_t *ranging_inst,
3631 nan_svc_info_t *svc)
3632 {
3633 int ret = BCME_OK;
3634 int i = 0;
3635
3636 for (i = 0; i < MAX_SUBSCRIBES; i++) {
3637 if (ranging_inst->svc_idx[i] == svc) {
3638 WL_DBG(("SVC Ctx for ranging already present, "
3639 " Duplication not supported: sub_id: %d\n",
3640 svc->svc_id));
3641 ret = BCME_UNSUPPORTED;
3642 goto done;
3643 }
3644 }
3645 for (i = 0; i < MAX_SUBSCRIBES; i++) {
3646 if (ranging_inst->svc_idx[i]) {
3647 continue;
3648 } else {
3649 WL_DBG(("Adding SVC Ctx for ranging..svc_id %d\n", svc->svc_id));
3650 ranging_inst->svc_idx[i] = svc;
3651 ranging_inst->num_svc_ctx++;
3652 ret = BCME_OK;
3653 goto done;
3654 }
3655 }
3656 if (i == MAX_SUBSCRIBES) {
3657 WL_ERR(("wl_cfgnan_update_ranging_svc_inst: "
3658 "No resource to hold Ref SVC ctx..svc_id %d\n",
3659 svc->svc_id));
3660 ret = BCME_NORESOURCE;
3661 goto done;
3662 }
3663 done:
3664 return ret;
3665 }
3666
3667 #ifdef RTT_SUPPORT
wl_cfgnan_trigger_geofencing_ranging(struct net_device * dev,struct ether_addr * peer_addr)3668 int wl_cfgnan_trigger_geofencing_ranging(struct net_device *dev,
3669 struct ether_addr *peer_addr)
3670 {
3671 int ret = BCME_OK;
3672 int err_at = 0;
3673 struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
3674 int8 index = -1;
3675 dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
3676 rtt_geofence_target_info_t *geofence_target;
3677 nan_ranging_inst_t *ranging_inst;
3678 ranging_inst = wl_cfgnan_check_for_ranging(cfg, peer_addr);
3679 if (!ranging_inst) {
3680 WL_INFORM_MEM(("Ranging Entry for peer:" MACDBG ", not found\n",
3681 MAC2STRDBG(peer_addr)));
3682 ASSERT(0);
3683 /* Ranging inst should have been added before adding target */
3684 dhd_rtt_remove_geofence_target(dhd, peer_addr);
3685 ret = BCME_ERROR;
3686 err_at = 1;
3687 goto exit;
3688 }
3689
3690 ASSERT(ranging_inst->range_status != NAN_RANGING_IN_PROGRESS);
3691
3692 if (ranging_inst->range_status != NAN_RANGING_IN_PROGRESS) {
3693 WL_DBG(("Trigger range request with first svc in svc list of range "
3694 "inst\n"));
3695 ret = wl_cfgnan_trigger_ranging(bcmcfg_to_prmry_ndev(cfg), cfg,
3696 ranging_inst, ranging_inst->svc_idx[0],
3697 NAN_RANGE_REQ_CMD, TRUE);
3698 if (ret != BCME_OK) {
3699 /* Unsupported is for already ranging session for peer */
3700 if (ret == BCME_BUSY) {
3701 /* Attempt again over a timer */
3702 err_at = 0x2;
3703 } else {
3704 /* Remove target and clean ranging inst */
3705 geofence_target = dhd_rtt_get_geofence_target(
3706 dhd, &ranging_inst->peer_addr, &index);
3707 if (geofence_target) {
3708 dhd_rtt_remove_geofence_target(dhd,
3709 &geofence_target->peer_addr);
3710 }
3711 bzero(ranging_inst, sizeof(nan_ranging_inst_t));
3712 err_at = 0x3;
3713 goto exit;
3714 }
3715 }
3716 } else {
3717 /* already in progress..This should not happen */
3718 ASSERT(0);
3719 ret = BCME_ERROR;
3720 err_at = 0x4;
3721 goto exit;
3722 }
3723
3724 exit:
3725 if (ret) {
3726 WL_ERR(("wl_cfgnan_trigger_geofencing_ranging: Failed to "
3727 "trigger ranging, peer: " MACDBG " ret"
3728 " = (%d), err_at = %d\n",
3729 MAC2STRDBG(peer_addr), ret, err_at));
3730 }
3731 return ret;
3732 }
3733 #endif /* RTT_SUPPORT */
3734
3735 static int
wl_cfgnan_check_disc_result_for_ranging(struct bcm_cfg80211 * cfg,nan_event_data_t * nan_event_data)3736 wl_cfgnan_check_disc_result_for_ranging(struct bcm_cfg80211 *cfg,
3737 nan_event_data_t *nan_event_data)
3738 {
3739 nan_svc_info_t *svc;
3740 int ret = BCME_OK;
3741 #ifdef RTT_SUPPORT
3742 rtt_geofence_target_info_t geofence_target;
3743 dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
3744 uint8 index;
3745 #endif /* RTT_SUPPORT */
3746 bool add_target;
3747
3748 svc = wl_cfgnan_get_svc_inst(cfg, nan_event_data->sub_id, 0);
3749 if (svc && svc->ranging_required) {
3750 nan_ranging_inst_t *ranging_inst;
3751 ranging_inst = wl_cfgnan_get_ranging_inst(
3752 cfg, &nan_event_data->remote_nmi, NAN_RANGING_ROLE_INITIATOR);
3753 if (!ranging_inst) {
3754 ret = BCME_NORESOURCE;
3755 goto exit;
3756 }
3757 ASSERT(ranging_inst->range_role != NAN_RANGING_ROLE_INVALID);
3758
3759 /* For responder role, range state should be in progress only */
3760 ASSERT(ranging_inst->range_role == NAN_RANGING_ROLE_INITIATOR ||
3761 ranging_inst->range_status == NAN_RANGING_IN_PROGRESS);
3762
3763 /*
3764 * On rec disc result with ranging required, add target, if
3765 * ranging role is responder (range state has to be in prog always)
3766 * Or ranging role is initiator and ranging is not already in prog
3767 */
3768 add_target =
3769 ((ranging_inst->range_role == NAN_RANGING_ROLE_RESPONDER) ||
3770 ((ranging_inst->range_role == NAN_RANGING_ROLE_INITIATOR) &&
3771 (ranging_inst->range_status != NAN_RANGING_IN_PROGRESS)));
3772 if (add_target) {
3773 WL_DBG(("Add Range request to geofence target list\n"));
3774 #ifdef RTT_SUPPORT
3775 memcpy(&geofence_target.peer_addr, &nan_event_data->remote_nmi,
3776 ETHER_ADDR_LEN);
3777 /* check if target is already added */
3778 if (!dhd_rtt_get_geofence_target(dhd, &nan_event_data->remote_nmi,
3779 &index)) {
3780 ret = dhd_rtt_add_geofence_target(dhd, &geofence_target);
3781 if (unlikely(ret)) {
3782 WL_ERR(("Failed to add geofence Tgt, ret = (%d)\n", ret));
3783 bzero(ranging_inst, sizeof(*ranging_inst));
3784 goto exit;
3785 } else {
3786 WL_INFORM_MEM(("Geofence Tgt Added:" MACDBG " sub_id:%d\n",
3787 MAC2STRDBG(&geofence_target.peer_addr),
3788 svc->svc_id));
3789 }
3790 ranging_inst->range_type = RTT_TYPE_NAN_GEOFENCE;
3791 }
3792 #endif /* RTT_SUPPORT */
3793 if (wl_cfgnan_update_ranging_svc_inst(ranging_inst, svc) !=
3794 BCME_OK) {
3795 goto exit;
3796 }
3797 #ifdef RTT_SUPPORT
3798 if (ranging_inst->range_role == NAN_RANGING_ROLE_RESPONDER) {
3799 /* Adding RTT target while responder, leads to role concurrency
3800 */
3801 dhd_rtt_set_role_concurrency_state(dhd, TRUE);
3802 } else {
3803 /* Trigger/Reset geofence RTT */
3804 wl_cfgnan_reset_geofence_ranging(cfg, ranging_inst,
3805 RTT_SCHED_SUB_MATCH);
3806 }
3807 #endif /* RTT_SUPPORT */
3808 } else {
3809 /* Target already added, check & add svc_inst ref to rang_inst */
3810 wl_cfgnan_update_ranging_svc_inst(ranging_inst, svc);
3811 }
3812 /* Disc event will be given on receving range_rpt event */
3813 WL_TRACE(("Disc event will given when Range RPT event is recvd"));
3814 } else {
3815 ret = BCME_UNSUPPORTED;
3816 }
3817
3818 exit:
3819 return ret;
3820 }
3821
wl_cfgnan_ranging_allowed(struct bcm_cfg80211 * cfg)3822 bool wl_cfgnan_ranging_allowed(struct bcm_cfg80211 *cfg)
3823 {
3824 int i = 0;
3825 uint8 rng_progress_count = 0;
3826 nan_ranging_inst_t *ranging_inst = NULL;
3827
3828 for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
3829 ranging_inst = &cfg->nan_ranging_info[i];
3830 if (ranging_inst->range_status == NAN_RANGING_IN_PROGRESS) {
3831 rng_progress_count++;
3832 }
3833 }
3834
3835 ASSERT(rng_progress_count <= NAN_MAX_RANGING_SSN_ALLOWED);
3836 if (rng_progress_count == NAN_MAX_RANGING_SSN_ALLOWED) {
3837 return FALSE;
3838 }
3839 return TRUE;
3840 }
3841
wl_cfgnan_cancel_rng_responders(struct net_device * ndev,struct bcm_cfg80211 * cfg)3842 uint8 wl_cfgnan_cancel_rng_responders(struct net_device *ndev,
3843 struct bcm_cfg80211 *cfg)
3844 {
3845 int i = 0;
3846 uint8 num_resp_cancelled = 0;
3847 int status, ret;
3848 nan_ranging_inst_t *ranging_inst = NULL;
3849
3850 for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
3851 ranging_inst = &cfg->nan_ranging_info[i];
3852 if (ranging_inst->range_status == NAN_RANGING_IN_PROGRESS &&
3853 ranging_inst->range_role == NAN_RANGING_ROLE_RESPONDER) {
3854 num_resp_cancelled++;
3855 WL_ERR((" Cancelling responder\n"));
3856 ret = wl_cfgnan_cancel_ranging(
3857 bcmcfg_to_prmry_ndev(cfg), cfg, ranging_inst->range_id,
3858 NAN_RNG_TERM_FLAG_IMMEDIATE, &status);
3859 if (unlikely(ret) || unlikely(status)) {
3860 WL_ERR(("wl_cfgnan_cancel_rng_responders: Failed to cancel"
3861 " existing ranging, ret = (%d)\n",
3862 ret));
3863 }
3864 WL_INFORM_MEM(("Removing Ranging Instance " MACDBG "\n",
3865 MAC2STRDBG(&(ranging_inst->peer_addr))));
3866 bzero(ranging_inst, sizeof(*ranging_inst));
3867 }
3868 }
3869 return num_resp_cancelled;
3870 }
3871
3872 #ifdef RTT_SUPPORT
3873 /* ranging reqeust event handler */
wl_cfgnan_handle_ranging_ind(struct bcm_cfg80211 * cfg,wl_nan_ev_rng_req_ind_t * rng_ind)3874 static int wl_cfgnan_handle_ranging_ind(struct bcm_cfg80211 *cfg,
3875 wl_nan_ev_rng_req_ind_t *rng_ind)
3876 {
3877 int ret = BCME_OK;
3878 nan_ranging_inst_t *ranging_inst = NULL;
3879 uint32 status;
3880 uint8 cancel_flags = 0;
3881 bool accept = TRUE;
3882 nan_ranging_inst_t tmp_rng_inst;
3883 struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
3884
3885 WL_DBG(("Trigger range response\n"));
3886
3887 /* check if we are already having any ranging session with peer.
3888 * If so below are the policies
3889 * If we are already a Geofence Initiator or responder w.r.t the peer
3890 * then silently teardown the current session and accept the REQ.
3891 * If we are in direct rtt initiator role then reject.
3892 */
3893 ranging_inst = wl_cfgnan_check_for_ranging(cfg, &(rng_ind->peer_m_addr));
3894 if (ranging_inst) {
3895 if (ranging_inst->range_type == RTT_TYPE_NAN_GEOFENCE ||
3896 ranging_inst->range_role == NAN_RANGING_ROLE_RESPONDER) {
3897 WL_INFORM_MEM(
3898 ("Already responder/geofence for the Peer, cancel current"
3899 " ssn and accept new one, range_type = %d, role = %d\n",
3900 ranging_inst->range_type, ranging_inst->range_role));
3901 cancel_flags = NAN_RNG_TERM_FLAG_IMMEDIATE |
3902 NAN_RNG_TERM_FLAG_SILIENT_TEARDOWN;
3903
3904 if (ranging_inst->range_type == RTT_TYPE_NAN_GEOFENCE &&
3905 ranging_inst->range_role == NAN_RANGING_ROLE_INITIATOR) {
3906 wl_cfgnan_suspend_geofence_rng_session(
3907 ndev, &(rng_ind->peer_m_addr),
3908 RTT_GEO_SUSPN_PEER_RTT_TRIGGER, cancel_flags);
3909 } else {
3910 ret = wl_cfgnan_cancel_ranging(
3911 ndev, cfg, ranging_inst->range_id, cancel_flags, &status);
3912 if (unlikely(ret)) {
3913 WL_ERR(("wl_cfgnan_handle_ranging_ind: Failed to cancel"
3914 " existing ranging, ret = (%d)\n",
3915 ret));
3916 goto done;
3917 }
3918 }
3919 ranging_inst->range_status = NAN_RANGING_REQUIRED;
3920 ranging_inst->range_role = NAN_RANGING_ROLE_RESPONDER;
3921 ranging_inst->range_type = 0;
3922 } else {
3923 WL_ERR(("Reject the RNG_REQ_IND in direct rtt initiator role\n"));
3924 ret = BCME_BUSY;
3925 goto done;
3926 }
3927 } else {
3928 /* Check if new Ranging session is allowed */
3929 if (!wl_cfgnan_ranging_allowed(cfg)) {
3930 WL_ERR(("Cannot allow more ranging sessions \n"));
3931 ret = BCME_NORESOURCE;
3932 goto done;
3933 }
3934
3935 ranging_inst = wl_cfgnan_get_ranging_inst(cfg, &rng_ind->peer_m_addr,
3936 NAN_RANGING_ROLE_RESPONDER);
3937 if (!ranging_inst) {
3938 WL_ERR(("Failed to create ranging instance \n"));
3939 ASSERT(0);
3940 ret = BCME_NORESOURCE;
3941 goto done;
3942 }
3943 }
3944
3945 done:
3946 if (ret != BCME_OK) {
3947 /* reject the REQ using temp ranging instance */
3948 bzero(&tmp_rng_inst, sizeof(tmp_rng_inst));
3949 ranging_inst = &tmp_rng_inst;
3950 (void)memcpy_s(&tmp_rng_inst.peer_addr, ETHER_ADDR_LEN,
3951 &rng_ind->peer_m_addr, ETHER_ADDR_LEN);
3952 accept = FALSE;
3953 }
3954
3955 ranging_inst->range_id = rng_ind->rng_id;
3956
3957 WL_DBG(("Trigger Ranging at Responder\n"));
3958 ret = wl_cfgnan_trigger_ranging(ndev, cfg, ranging_inst, NULL,
3959 NAN_RANGE_REQ_EVNT, accept);
3960 if (unlikely(ret) || !accept) {
3961 WL_ERR(("Failed to handle range request, ret = (%d) accept %d\n", ret,
3962 accept));
3963 bzero(ranging_inst, sizeof(*ranging_inst));
3964 }
3965
3966 return ret;
3967 }
3968 #endif /* RTT_SUPPORT */
3969 /* ranging quest and response iovar handler */
wl_cfgnan_trigger_ranging(struct net_device * ndev,struct bcm_cfg80211 * cfg,void * ranging_ctxt,nan_svc_info_t * svc,uint8 range_cmd,bool accept_req)3970 int wl_cfgnan_trigger_ranging(struct net_device *ndev, struct bcm_cfg80211 *cfg,
3971 void *ranging_ctxt, nan_svc_info_t *svc,
3972 uint8 range_cmd, bool accept_req)
3973 {
3974 s32 ret = BCME_OK;
3975 bcm_iov_batch_buf_t *nan_buf = NULL;
3976 wl_nan_range_req_t *range_req = NULL;
3977 wl_nan_range_resp_t *range_resp = NULL;
3978 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
3979 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
3980 uint32 status;
3981 uint8 resp_buf[NAN_IOCTL_BUF_SIZE_MED];
3982 nan_ranging_inst_t *ranging_inst = (nan_ranging_inst_t *)ranging_ctxt;
3983 nan_avail_cmd_data cmd_data;
3984
3985 NAN_DBG_ENTER();
3986
3987 memset_s(&cmd_data, sizeof(cmd_data), 0, sizeof(cmd_data));
3988 ret = memcpy_s(&cmd_data.peer_nmi, ETHER_ADDR_LEN, &ranging_inst->peer_addr,
3989 ETHER_ADDR_LEN);
3990 if (ret != BCME_OK) {
3991 WL_ERR(("Failed to copy ranging peer addr\n"));
3992 goto fail;
3993 }
3994
3995 cmd_data.avail_period = NAN_RANGING_PERIOD;
3996 ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg), cfg, &cmd_data,
3997 WL_AVAIL_LOCAL);
3998 if (ret != BCME_OK) {
3999 WL_ERR(("Failed to set avail value with type [WL_AVAIL_LOCAL]\n"));
4000 goto fail;
4001 }
4002
4003 ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg), cfg, &cmd_data,
4004 WL_AVAIL_RANGING);
4005 if (unlikely(ret)) {
4006 WL_ERR(("Failed to set avail value with type [WL_AVAIL_RANGING]\n"));
4007 goto fail;
4008 }
4009
4010 nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
4011 if (!nan_buf) {
4012 WL_ERR(("%s: memory allocation failed\n", __func__));
4013 ret = BCME_NOMEM;
4014 goto fail;
4015 }
4016
4017 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
4018 nan_buf->count = 0;
4019 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
4020
4021 sub_cmd = (bcm_iov_batch_subcmd_t *)(&nan_buf->cmds[0]);
4022 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
4023 if (range_cmd == NAN_RANGE_REQ_CMD) {
4024 sub_cmd->id = htod16(WL_NAN_CMD_RANGE_REQUEST);
4025 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(wl_nan_range_req_t);
4026 range_req = (wl_nan_range_req_t *)(sub_cmd->data);
4027 /* ranging config */
4028 range_req->peer = ranging_inst->peer_addr;
4029 if (svc) {
4030 range_req->interval = svc->ranging_interval;
4031 /* Limits are in cm from host */
4032 range_req->ingress = svc->ingress_limit;
4033 range_req->egress = svc->egress_limit;
4034 }
4035 range_req->indication = NAN_RANGING_INDICATE_CONTINUOUS_MASK;
4036 } else {
4037 /* range response config */
4038 sub_cmd->id = htod16(WL_NAN_CMD_RANGE_RESPONSE);
4039 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(wl_nan_range_resp_t);
4040 range_resp = (wl_nan_range_resp_t *)(sub_cmd->data);
4041 range_resp->range_id = ranging_inst->range_id;
4042 range_resp->indication = NAN_RANGING_INDICATE_CONTINUOUS_MASK;
4043 if (accept_req) {
4044 range_resp->status = NAN_RNG_REQ_ACCEPTED_BY_HOST;
4045 } else {
4046 range_resp->status = NAN_RNG_REQ_REJECTED_BY_HOST;
4047 }
4048 nan_buf->is_set = true;
4049 }
4050
4051 nan_buf_size -=
4052 (sub_cmd->len + OFFSETOF(bcm_iov_batch_subcmd_t, u.options));
4053 nan_buf->count++;
4054
4055 memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
4056 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
4057 (void *)resp_buf, NAN_IOCTL_BUF_SIZE);
4058 if (unlikely(ret) || unlikely(status)) {
4059 WL_ERR(("nan ranging failed ret = %d status = %d\n", ret, status));
4060 ret = (ret == BCME_OK) ? status : ret;
4061 goto fail;
4062 }
4063 WL_TRACE(("nan ranging trigger successful\n"));
4064 if (range_cmd == NAN_RANGE_REQ_CMD) {
4065 WL_MEM(("Ranging Req Triggered"
4066 " peer: " MACDBG ", ind : %d, ingress : %d, egress : %d\n",
4067 MAC2STRDBG(&ranging_inst->peer_addr), range_req->indication,
4068 range_req->ingress, range_req->egress));
4069 } else {
4070 WL_MEM(("Ranging Resp Triggered"
4071 " peer: " MACDBG ", ind : %d, ingress : %d, egress : %d\n",
4072 MAC2STRDBG(&ranging_inst->peer_addr), range_resp->indication,
4073 range_resp->ingress, range_resp->egress));
4074 }
4075
4076 /* check the response buff for request */
4077 if (range_cmd == NAN_RANGE_REQ_CMD) {
4078 ret =
4079 process_resp_buf(resp_buf + WL_NAN_OBUF_DATA_OFFSET,
4080 &ranging_inst->range_id, WL_NAN_CMD_RANGE_REQUEST);
4081 WL_INFORM_MEM(
4082 ("ranging instance returned %d\n", ranging_inst->range_id));
4083 }
4084 /* Preventing continuous range requests */
4085 ranging_inst->range_status = NAN_RANGING_IN_PROGRESS;
4086
4087 fail:
4088 if (nan_buf) {
4089 MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
4090 }
4091
4092 NAN_DBG_EXIT();
4093 return ret;
4094 }
4095 #endif /* WL_NAN_DISC_CACHE */
4096
wl_nan_bloom_alloc(void * ctx,uint size)4097 static void *wl_nan_bloom_alloc(void *ctx, uint size)
4098 {
4099 uint8 *buf;
4100 BCM_REFERENCE(ctx);
4101
4102 buf = kmalloc(size, GFP_KERNEL);
4103 if (!buf) {
4104 WL_ERR(("%s: memory allocation failed\n", __func__));
4105 buf = NULL;
4106 }
4107 return buf;
4108 }
4109
wl_nan_bloom_free(void * ctx,void * buf,uint size)4110 static void wl_nan_bloom_free(void *ctx, void *buf, uint size)
4111 {
4112 BCM_REFERENCE(ctx);
4113 BCM_REFERENCE(size);
4114 if (buf) {
4115 kfree(buf);
4116 }
4117 }
4118
wl_nan_hash(void * ctx,uint index,const uint8 * input,uint input_len)4119 static uint wl_nan_hash(void *ctx, uint index, const uint8 *input,
4120 uint input_len)
4121 {
4122 uint8 *filter_idx = (uint8 *)ctx;
4123 uint8 i = (*filter_idx * WL_NAN_HASHES_PER_BLOOM) + (uint8)index;
4124 uint b = 0;
4125
4126 /* Steps 1 and 2 as explained in Section 6.2 */
4127 /* Concatenate index to input and run CRC32 by calling hndcrc32 twice */
4128 GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
4129 b = hndcrc32(&i, sizeof(uint8), CRC32_INIT_VALUE);
4130 b = hndcrc32((uint8 *)input, input_len, b);
4131 GCC_DIAGNOSTIC_POP();
4132 /* Obtain the last 2 bytes of the CRC32 output */
4133 b &= NAN_BLOOM_CRC32_MASK;
4134
4135 /* Step 3 is completed by bcmbloom functions */
4136 return b;
4137 }
4138
wl_nan_bloom_create(bcm_bloom_filter_t ** bp,uint * idx,uint size)4139 static int wl_nan_bloom_create(bcm_bloom_filter_t **bp, uint *idx, uint size)
4140 {
4141 uint i;
4142 int err;
4143
4144 err = bcm_bloom_create(wl_nan_bloom_alloc, wl_nan_bloom_free, idx,
4145 WL_NAN_HASHES_PER_BLOOM, size, bp);
4146 if (err != BCME_OK) {
4147 goto exit;
4148 }
4149
4150 /* Populate bloom filter with hash functions */
4151 for (i = 0; i < WL_NAN_HASHES_PER_BLOOM; i++) {
4152 err = bcm_bloom_add_hash(*bp, wl_nan_hash, &i);
4153 if (err) {
4154 WL_ERR(("bcm_bloom_add_hash failed\n"));
4155 goto exit;
4156 }
4157 }
4158 exit:
4159 return err;
4160 }
4161
wl_cfgnan_sd_params_handler(struct net_device * ndev,nan_discover_cmd_data_t * cmd_data,uint16 cmd_id,void * p_buf,uint16 * nan_buf_size)4162 static int wl_cfgnan_sd_params_handler(struct net_device *ndev,
4163 nan_discover_cmd_data_t *cmd_data,
4164 uint16 cmd_id, void *p_buf,
4165 uint16 *nan_buf_size)
4166 {
4167 s32 ret = BCME_OK;
4168 uint8 *pxtlv, *srf = NULL, *srf_mac = NULL, *srftmp = NULL;
4169 uint16 buflen_avail;
4170 bcm_iov_batch_subcmd_t *sub_cmd = (bcm_iov_batch_subcmd_t *)(p_buf);
4171 wl_nan_sd_params_t *sd_params = (wl_nan_sd_params_t *)sub_cmd->data;
4172 uint16 srf_size = 0;
4173 uint bloom_size, a;
4174 bcm_bloom_filter_t *bp = NULL;
4175 /* Bloom filter index default, indicates it has not been set */
4176 uint bloom_idx = 0xFFFFFFFF;
4177 uint16 bloom_len = NAN_BLOOM_LENGTH_DEFAULT;
4178 /* srf_ctrl_size = bloom_len + src_control field */
4179 uint16 srf_ctrl_size = bloom_len + 1;
4180
4181 dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
4182 struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
4183 BCM_REFERENCE(cfg);
4184
4185 NAN_DBG_ENTER();
4186
4187 if (cmd_data->period) {
4188 sd_params->awake_dw = cmd_data->period;
4189 }
4190 sd_params->period = 1;
4191
4192 if (cmd_data->ttl) {
4193 sd_params->ttl = cmd_data->ttl;
4194 } else {
4195 sd_params->ttl = WL_NAN_TTL_UNTIL_CANCEL;
4196 }
4197
4198 sd_params->flags = 0;
4199 sd_params->flags = cmd_data->flags;
4200
4201 /* Nan Service Based event suppression Flags */
4202 if (cmd_data->recv_ind_flag) {
4203 /* BIT0 - If set, host wont rec event "terminated" */
4204 if (CHECK_BIT(cmd_data->recv_ind_flag,
4205 WL_NAN_EVENT_SUPPRESS_TERMINATE_BIT)) {
4206 sd_params->flags |= WL_NAN_SVC_CTRL_SUPPRESS_EVT_TERMINATED;
4207 }
4208
4209 /* BIT1 - If set, host wont receive match expiry evt */
4210 /* Exp not yet supported */
4211 if (CHECK_BIT(cmd_data->recv_ind_flag,
4212 WL_NAN_EVENT_SUPPRESS_MATCH_EXP_BIT)) {
4213 WL_DBG(("Need to add match expiry event\n"));
4214 }
4215 /* BIT2 - If set, host wont rec event "receive" */
4216 if (CHECK_BIT(cmd_data->recv_ind_flag,
4217 WL_NAN_EVENT_SUPPRESS_RECEIVE_BIT)) {
4218 sd_params->flags |= WL_NAN_SVC_CTRL_SUPPRESS_EVT_RECEIVE;
4219 }
4220 /* BIT3 - If set, host wont rec event "replied" */
4221 if (CHECK_BIT(cmd_data->recv_ind_flag,
4222 WL_NAN_EVENT_SUPPRESS_REPLIED_BIT)) {
4223 sd_params->flags |= WL_NAN_SVC_CTRL_SUPPRESS_EVT_REPLIED;
4224 }
4225 }
4226 if (cmd_id == WL_NAN_CMD_SD_PUBLISH) {
4227 sd_params->instance_id = cmd_data->pub_id;
4228 if (cmd_data->service_responder_policy) {
4229 /* Do not disturb avail if dam is supported */
4230 if (FW_SUPPORTED(dhdp, autodam)) {
4231 /* Nan Accept policy: Per service basis policy
4232 * Based on this policy(ALL/NONE), responder side
4233 * will send ACCEPT/REJECT
4234 * If set, auto datapath responder will be sent by FW
4235 */
4236 sd_params->flags |= WL_NAN_SVC_CTRL_AUTO_DPRESP;
4237 } else {
4238 WL_ERR(("svc specifiv auto dp resp is not"
4239 " supported in non-auto dam fw\n"));
4240 }
4241 }
4242 } else if (cmd_id == WL_NAN_CMD_SD_SUBSCRIBE) {
4243 sd_params->instance_id = cmd_data->sub_id;
4244 } else {
4245 ret = BCME_USAGE_ERROR;
4246 WL_ERR(("wrong command id = %d \n", cmd_id));
4247 goto fail;
4248 }
4249
4250 if ((cmd_data->svc_hash.dlen == WL_NAN_SVC_HASH_LEN) &&
4251 (cmd_data->svc_hash.data)) {
4252 ret =
4253 memcpy_s((uint8 *)sd_params->svc_hash, sizeof(sd_params->svc_hash),
4254 cmd_data->svc_hash.data, cmd_data->svc_hash.dlen);
4255 if (ret != BCME_OK) {
4256 WL_ERR(("Failed to copy svc hash\n"));
4257 goto fail;
4258 }
4259 #ifdef WL_NAN_DEBUG
4260 prhex("hashed svc name", cmd_data->svc_hash.data,
4261 cmd_data->svc_hash.dlen);
4262 #endif /* WL_NAN_DEBUG */
4263 } else {
4264 ret = BCME_ERROR;
4265 WL_ERR(("invalid svc hash data or length = %d\n",
4266 cmd_data->svc_hash.dlen));
4267 goto fail;
4268 }
4269
4270 /* check if ranging support is present in firmware */
4271 if ((cmd_data->sde_control_flag & NAN_SDE_CF_RANGING_REQUIRED) &&
4272 !FW_SUPPORTED(dhdp, nanrange)) {
4273 WL_ERR(("Service requires ranging but fw doesnt support it\n"));
4274 ret = BCME_UNSUPPORTED;
4275 goto fail;
4276 }
4277
4278 /* Optional parameters: fill the sub_command block with service descriptor
4279 * attr */
4280 sub_cmd->id = htod16(cmd_id);
4281 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
4282 sub_cmd->len =
4283 sizeof(sub_cmd->u.options) + OFFSETOF(wl_nan_sd_params_t, optional[0]);
4284 pxtlv = (uint8 *)&sd_params->optional[0];
4285
4286 *nan_buf_size -= sub_cmd->len;
4287 buflen_avail = *nan_buf_size;
4288
4289 if (cmd_data->svc_info.data && cmd_data->svc_info.dlen) {
4290 WL_TRACE(("optional svc_info present, pack it\n"));
4291 ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size, WL_NAN_XTLV_SD_SVC_INFO,
4292 cmd_data->svc_info.dlen,
4293 cmd_data->svc_info.data,
4294 BCM_XTLV_OPTION_ALIGN32);
4295 if (unlikely(ret)) {
4296 WL_ERR(
4297 ("%s: fail to pack WL_NAN_XTLV_SD_SVC_INFO\n", __FUNCTION__));
4298 goto fail;
4299 }
4300 }
4301
4302 if (cmd_data->sde_svc_info.data && cmd_data->sde_svc_info.dlen) {
4303 WL_TRACE(("optional sdea svc_info present, pack it, %d\n",
4304 cmd_data->sde_svc_info.dlen));
4305 ret = bcm_pack_xtlv_entry(
4306 &pxtlv, nan_buf_size, WL_NAN_XTLV_SD_SDE_SVC_INFO,
4307 cmd_data->sde_svc_info.dlen, cmd_data->sde_svc_info.data,
4308 BCM_XTLV_OPTION_ALIGN32);
4309 if (unlikely(ret)) {
4310 WL_ERR(("%s: fail to pack sdea svc info\n", __FUNCTION__));
4311 goto fail;
4312 }
4313 }
4314
4315 if (cmd_data->tx_match.dlen) {
4316 WL_TRACE(("optional tx match filter presnet (len=%d)\n",
4317 cmd_data->tx_match.dlen));
4318 ret = bcm_pack_xtlv_entry(
4319 &pxtlv, nan_buf_size, WL_NAN_XTLV_CFG_MATCH_TX,
4320 cmd_data->tx_match.dlen, cmd_data->tx_match.data,
4321 BCM_XTLV_OPTION_ALIGN32);
4322 if (unlikely(ret)) {
4323 WL_ERR(("%s: failed on xtlv_pack for tx match filter\n",
4324 __FUNCTION__));
4325 goto fail;
4326 }
4327 }
4328
4329 if (cmd_data->life_count) {
4330 WL_TRACE(("optional life count is present, pack it\n"));
4331 ret = bcm_pack_xtlv_entry(
4332 &pxtlv, nan_buf_size, WL_NAN_XTLV_CFG_SVC_LIFE_COUNT,
4333 sizeof(cmd_data->life_count), &cmd_data->life_count,
4334 BCM_XTLV_OPTION_ALIGN32);
4335 if (unlikely(ret)) {
4336 WL_ERR(("%s: failed to WL_NAN_XTLV_CFG_SVC_LIFE_COUNT\n",
4337 __FUNCTION__));
4338 goto fail;
4339 }
4340 }
4341
4342 if (cmd_data->use_srf) {
4343 uint8 srf_control = 0;
4344 /* set include bit */
4345 if (cmd_data->srf_include == true) {
4346 srf_control |= 0x2;
4347 }
4348
4349 if (!ETHER_ISNULLADDR(&cmd_data->mac_list.list) &&
4350 (cmd_data->mac_list.num_mac_addr < NAN_SRF_MAX_MAC)) {
4351 if (cmd_data->srf_type == SRF_TYPE_SEQ_MAC_ADDR) {
4352 /* mac list */
4353 srf_size = (cmd_data->mac_list.num_mac_addr * ETHER_ADDR_LEN) +
4354 NAN_SRF_CTRL_FIELD_LEN;
4355 WL_TRACE(("srf size = %d\n", srf_size));
4356
4357 srf_mac = MALLOCZ(cfg->osh, srf_size);
4358 if (srf_mac == NULL) {
4359 WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
4360 ret = -ENOMEM;
4361 goto fail;
4362 }
4363 ret = memcpy_s(srf_mac, NAN_SRF_CTRL_FIELD_LEN, &srf_control,
4364 NAN_SRF_CTRL_FIELD_LEN);
4365 if (ret != BCME_OK) {
4366 WL_ERR(("Failed to copy srf control\n"));
4367 goto fail;
4368 }
4369 ret = memcpy_s(srf_mac + 1, (srf_size - NAN_SRF_CTRL_FIELD_LEN),
4370 cmd_data->mac_list.list,
4371 (srf_size - NAN_SRF_CTRL_FIELD_LEN));
4372 if (ret != BCME_OK) {
4373 WL_ERR(("Failed to copy srf control mac list\n"));
4374 goto fail;
4375 }
4376 ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
4377 WL_NAN_XTLV_CFG_SR_FILTER, srf_size,
4378 srf_mac, BCM_XTLV_OPTION_ALIGN32);
4379 if (unlikely(ret)) {
4380 WL_ERR(("%s: failed to WL_NAN_XTLV_CFG_SR_FILTER\n",
4381 __FUNCTION__));
4382 goto fail;
4383 }
4384 } else if (cmd_data->srf_type == SRF_TYPE_BLOOM_FILTER) {
4385 /* Create bloom filter */
4386 srf = MALLOCZ(cfg->osh, srf_ctrl_size);
4387 if (srf == NULL) {
4388 WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
4389 ret = -ENOMEM;
4390 goto fail;
4391 }
4392 /* Bloom filter */
4393 srf_control |= 0x1;
4394 /* Instance id must be from 1 to 255, 0 is Reserved */
4395 if (sd_params->instance_id == NAN_ID_RESERVED) {
4396 WL_ERR(
4397 ("Invalid instance id: %d\n", sd_params->instance_id));
4398 ret = BCME_BADARG;
4399 goto fail;
4400 }
4401 if (bloom_idx == 0xFFFFFFFF) {
4402 bloom_idx = sd_params->instance_id % 0x4;
4403 } else {
4404 WL_ERR(("Invalid bloom_idx\n"));
4405 ret = BCME_BADARG;
4406 goto fail;
4407 }
4408 srf_control |= bloom_idx << 0x2;
4409
4410 ret = wl_nan_bloom_create(&bp, &bloom_idx, bloom_len);
4411 if (unlikely(ret)) {
4412 WL_ERR(("%s: Bloom create failed\n", __FUNCTION__));
4413 goto fail;
4414 }
4415
4416 srftmp = cmd_data->mac_list.list;
4417 for (a = 0; a < cmd_data->mac_list.num_mac_addr; a++) {
4418 ret = bcm_bloom_add_member(bp, srftmp, ETHER_ADDR_LEN);
4419 if (unlikely(ret)) {
4420 WL_ERR(
4421 ("%s: Cannot add to bloom filter\n", __FUNCTION__));
4422 goto fail;
4423 }
4424 srftmp += ETHER_ADDR_LEN;
4425 }
4426
4427 ret = memcpy_s(srf, NAN_SRF_CTRL_FIELD_LEN, &srf_control,
4428 NAN_SRF_CTRL_FIELD_LEN);
4429 if (ret != BCME_OK) {
4430 WL_ERR(("Failed to copy srf control\n"));
4431 goto fail;
4432 }
4433 ret = bcm_bloom_get_filter_data(
4434 bp, bloom_len, (srf + NAN_SRF_CTRL_FIELD_LEN), &bloom_size);
4435 if (unlikely(ret)) {
4436 WL_ERR(("%s: Cannot get filter data\n", __FUNCTION__));
4437 goto fail;
4438 }
4439 ret = bcm_pack_xtlv_entry(
4440 &pxtlv, nan_buf_size, WL_NAN_XTLV_CFG_SR_FILTER,
4441 srf_ctrl_size, srf, BCM_XTLV_OPTION_ALIGN32);
4442 if (ret != BCME_OK) {
4443 WL_ERR(("Failed to pack SR FILTER data, ret = %d\n", ret));
4444 goto fail;
4445 }
4446 } else {
4447 WL_ERR(("Invalid SRF Type = %d !!!\n", cmd_data->srf_type));
4448 goto fail;
4449 }
4450 } else {
4451 WL_ERR(("Invalid MAC Addr/Too many mac addr = %d !!!\n",
4452 cmd_data->mac_list.num_mac_addr));
4453 goto fail;
4454 }
4455 }
4456
4457 if (cmd_data->rx_match.dlen) {
4458 WL_TRACE(("optional rx match filter is present, pack it\n"));
4459 ret = bcm_pack_xtlv_entry(
4460 &pxtlv, nan_buf_size, WL_NAN_XTLV_CFG_MATCH_RX,
4461 cmd_data->rx_match.dlen, cmd_data->rx_match.data,
4462 BCM_XTLV_OPTION_ALIGN32);
4463 if (unlikely(ret)) {
4464 WL_ERR(("%s: failed on xtlv_pack for rx match filter\n", __func__));
4465 goto fail;
4466 }
4467 }
4468
4469 /* Security elements */
4470 if (cmd_data->csid) {
4471 WL_TRACE(("Cipher suite type is present, pack it\n"));
4472 ret = bcm_pack_xtlv_entry(
4473 &pxtlv, nan_buf_size, WL_NAN_XTLV_CFG_SEC_CSID,
4474 sizeof(nan_sec_csid_e), (uint8 *)&cmd_data->csid,
4475 BCM_XTLV_OPTION_ALIGN32);
4476 if (unlikely(ret)) {
4477 WL_ERR(("%s: fail to pack on csid\n", __FUNCTION__));
4478 goto fail;
4479 }
4480 }
4481
4482 if (cmd_data->ndp_cfg.security_cfg) {
4483 if ((cmd_data->key_type == NAN_SECURITY_KEY_INPUT_PMK) ||
4484 (cmd_data->key_type == NAN_SECURITY_KEY_INPUT_PASSPHRASE)) {
4485 if (cmd_data->key.data && cmd_data->key.dlen) {
4486 WL_TRACE(("optional pmk present, pack it\n"));
4487 ret = bcm_pack_xtlv_entry(
4488 &pxtlv, nan_buf_size, WL_NAN_XTLV_CFG_SEC_PMK,
4489 cmd_data->key.dlen, cmd_data->key.data,
4490 BCM_XTLV_OPTION_ALIGN32);
4491 if (unlikely(ret)) {
4492 WL_ERR(("%s: fail to pack WL_NAN_XTLV_CFG_SEC_PMK\n",
4493 __FUNCTION__));
4494 goto fail;
4495 }
4496 }
4497 } else {
4498 WL_ERR(("Invalid security key type\n"));
4499 ret = BCME_BADARG;
4500 goto fail;
4501 }
4502 }
4503
4504 if (cmd_data->scid.data && cmd_data->scid.dlen) {
4505 WL_TRACE(("optional scid present, pack it\n"));
4506 ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
4507 WL_NAN_XTLV_CFG_SEC_SCID, cmd_data->scid.dlen,
4508 cmd_data->scid.data, BCM_XTLV_OPTION_ALIGN32);
4509 if (unlikely(ret)) {
4510 WL_ERR(
4511 ("%s: fail to pack WL_NAN_XTLV_CFG_SEC_SCID\n", __FUNCTION__));
4512 goto fail;
4513 }
4514 }
4515
4516 if (cmd_data->sde_control_config) {
4517 ret = bcm_pack_xtlv_entry(
4518 &pxtlv, nan_buf_size, WL_NAN_XTLV_SD_SDE_CONTROL, sizeof(uint16),
4519 (uint8 *)&cmd_data->sde_control_flag, BCM_XTLV_OPTION_ALIGN32);
4520 if (ret != BCME_OK) {
4521 WL_ERR(("%s: fail to pack WL_NAN_XTLV_SD_SDE_CONTROL\n",
4522 __FUNCTION__));
4523 goto fail;
4524 }
4525 }
4526
4527 sub_cmd->len += (buflen_avail - *nan_buf_size);
4528
4529 fail:
4530 if (srf) {
4531 MFREE(cfg->osh, srf, srf_ctrl_size);
4532 }
4533
4534 if (srf_mac) {
4535 MFREE(cfg->osh, srf_mac, srf_size);
4536 }
4537 NAN_DBG_EXIT();
4538 return ret;
4539 }
4540
wl_cfgnan_aligned_data_size_of_opt_disc_params(uint16 * data_size,nan_discover_cmd_data_t * cmd_data)4541 static int wl_cfgnan_aligned_data_size_of_opt_disc_params(
4542 uint16 *data_size, nan_discover_cmd_data_t *cmd_data)
4543 {
4544 s32 ret = BCME_OK;
4545 if (cmd_data->svc_info.dlen) {
4546 *data_size +=
4547 ALIGN_SIZE(cmd_data->svc_info.dlen + NAN_XTLV_ID_LEN_SIZE, 0x4);
4548 }
4549 if (cmd_data->sde_svc_info.dlen) {
4550 *data_size +=
4551 ALIGN_SIZE(cmd_data->sde_svc_info.dlen + NAN_XTLV_ID_LEN_SIZE, 0x4);
4552 }
4553 if (cmd_data->tx_match.dlen) {
4554 *data_size +=
4555 ALIGN_SIZE(cmd_data->tx_match.dlen + NAN_XTLV_ID_LEN_SIZE, 0x4);
4556 }
4557 if (cmd_data->rx_match.dlen) {
4558 *data_size +=
4559 ALIGN_SIZE(cmd_data->rx_match.dlen + NAN_XTLV_ID_LEN_SIZE, 0x4);
4560 }
4561 if (cmd_data->use_srf) {
4562 if (cmd_data->srf_type == SRF_TYPE_SEQ_MAC_ADDR) {
4563 *data_size += (cmd_data->mac_list.num_mac_addr * ETHER_ADDR_LEN) +
4564 NAN_SRF_CTRL_FIELD_LEN;
4565 } else { /* Bloom filter type */
4566 *data_size += NAN_BLOOM_LENGTH_DEFAULT + 1;
4567 }
4568 *data_size += ALIGN_SIZE(*data_size + NAN_XTLV_ID_LEN_SIZE, 0x4);
4569 }
4570 if (cmd_data->csid) {
4571 *data_size +=
4572 ALIGN_SIZE(sizeof(nan_sec_csid_e) + NAN_XTLV_ID_LEN_SIZE, 0x4);
4573 }
4574 if (cmd_data->key.dlen) {
4575 *data_size += ALIGN_SIZE(cmd_data->key.dlen + NAN_XTLV_ID_LEN_SIZE, 0x4);
4576 }
4577 if (cmd_data->scid.dlen) {
4578 *data_size += ALIGN_SIZE(cmd_data->scid.dlen + NAN_XTLV_ID_LEN_SIZE, 0x4);
4579 }
4580 if (cmd_data->sde_control_config) {
4581 *data_size += ALIGN_SIZE(sizeof(uint16) + NAN_XTLV_ID_LEN_SIZE, 0x4);
4582 }
4583 if (cmd_data->life_count) {
4584 *data_size +=
4585 ALIGN_SIZE(sizeof(cmd_data->life_count) + NAN_XTLV_ID_LEN_SIZE, 0x4);
4586 }
4587 return ret;
4588 }
4589
4590 static int
wl_cfgnan_aligned_data_size_of_opt_dp_params(uint16 * data_size,nan_datapath_cmd_data_t * cmd_data)4591 wl_cfgnan_aligned_data_size_of_opt_dp_params(uint16 *data_size,
4592 nan_datapath_cmd_data_t *cmd_data)
4593 {
4594 s32 ret = BCME_OK;
4595 if (cmd_data->svc_info.dlen) {
4596 *data_size +=
4597 ALIGN_SIZE(cmd_data->svc_info.dlen + NAN_XTLV_ID_LEN_SIZE, 0x4);
4598 }
4599 if (cmd_data->key.dlen) {
4600 *data_size += ALIGN_SIZE(cmd_data->key.dlen + NAN_XTLV_ID_LEN_SIZE, 0x4);
4601 }
4602 if (cmd_data->csid) {
4603 *data_size +=
4604 ALIGN_SIZE(sizeof(nan_sec_csid_e) + NAN_XTLV_ID_LEN_SIZE, 0x4);
4605 }
4606
4607 *data_size += ALIGN_SIZE(WL_NAN_SVC_HASH_LEN + NAN_XTLV_ID_LEN_SIZE, 0x4);
4608 return ret;
4609 }
wl_cfgnan_svc_get_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,uint16 cmd_id,nan_discover_cmd_data_t * cmd_data)4610 int wl_cfgnan_svc_get_handler(struct net_device *ndev, struct bcm_cfg80211 *cfg,
4611 uint16 cmd_id, nan_discover_cmd_data_t *cmd_data)
4612 {
4613 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
4614 uint32 instance_id;
4615 s32 ret = BCME_OK;
4616 bcm_iov_batch_buf_t *nan_buf = NULL;
4617
4618 uint8 *resp_buf = NULL;
4619 uint16 data_size = WL_NAN_OBUF_DATA_OFFSET + sizeof(instance_id);
4620
4621 NAN_DBG_ENTER();
4622
4623 nan_buf = MALLOCZ(cfg->osh, data_size);
4624 if (!nan_buf) {
4625 WL_ERR(("%s: memory allocation failed\n", __func__));
4626 ret = BCME_NOMEM;
4627 goto fail;
4628 }
4629
4630 resp_buf = MALLOCZ(cfg->osh, NAN_IOCTL_BUF_SIZE_LARGE);
4631 if (!resp_buf) {
4632 WL_ERR(("%s: memory allocation failed\n", __func__));
4633 ret = BCME_NOMEM;
4634 goto fail;
4635 }
4636 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
4637 nan_buf->count = 1;
4638 /* check if service is present */
4639 nan_buf->is_set = false;
4640 sub_cmd = (bcm_iov_batch_subcmd_t *)(&nan_buf->cmds[0]);
4641 if (cmd_id == WL_NAN_CMD_SD_PUBLISH) {
4642 instance_id = cmd_data->pub_id;
4643 } else if (cmd_id == WL_NAN_CMD_SD_SUBSCRIBE) {
4644 instance_id = cmd_data->sub_id;
4645 } else {
4646 ret = BCME_USAGE_ERROR;
4647 WL_ERR(("wrong command id = %u\n", cmd_id));
4648 goto fail;
4649 }
4650 /* Fill the sub_command block */
4651 sub_cmd->id = htod16(cmd_id);
4652 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(instance_id);
4653 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
4654
4655 ret = memcpy_s(sub_cmd->data, (data_size - WL_NAN_OBUF_DATA_OFFSET),
4656 &instance_id, sizeof(instance_id));
4657 if (ret != BCME_OK) {
4658 WL_ERR(("Failed to copy instance id, ret = %d\n", ret));
4659 goto fail;
4660 }
4661
4662 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, data_size,
4663 &(cmd_data->status), resp_buf,
4664 NAN_IOCTL_BUF_SIZE_LARGE);
4665 if (unlikely(ret) || unlikely(cmd_data->status)) {
4666 WL_ERR(("nan svc check failed ret = %d status = %d\n", ret,
4667 cmd_data->status));
4668 goto fail;
4669 } else {
4670 WL_DBG(("nan svc check successful..proceed to update\n"));
4671 }
4672
4673 fail:
4674 if (nan_buf) {
4675 MFREE(cfg->osh, nan_buf, data_size);
4676 }
4677
4678 if (resp_buf) {
4679 MFREE(cfg->osh, resp_buf, NAN_IOCTL_BUF_SIZE_LARGE);
4680 }
4681 NAN_DBG_EXIT();
4682 return ret;
4683 }
4684
wl_cfgnan_svc_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,uint16 cmd_id,nan_discover_cmd_data_t * cmd_data)4685 int wl_cfgnan_svc_handler(struct net_device *ndev, struct bcm_cfg80211 *cfg,
4686 uint16 cmd_id, nan_discover_cmd_data_t *cmd_data)
4687 {
4688 s32 ret = BCME_OK;
4689 bcm_iov_batch_buf_t *nan_buf = NULL;
4690 uint16 nan_buf_size;
4691 uint8 *resp_buf = NULL;
4692 /* Considering fixed params */
4693 uint16 data_size =
4694 WL_NAN_OBUF_DATA_OFFSET + OFFSETOF(wl_nan_sd_params_t, optional[0]);
4695
4696 if (cmd_data->svc_update) {
4697 ret = wl_cfgnan_svc_get_handler(ndev, cfg, cmd_id, cmd_data);
4698 if (ret != BCME_OK) {
4699 WL_ERR(("Failed to update svc handler, ret = %d\n", ret));
4700 goto fail;
4701 } else {
4702 /* Ignoring any other svc get error */
4703 if (cmd_data->status == WL_NAN_E_BAD_INSTANCE) {
4704 WL_ERR(("Bad instance status, failed to update svc handler\n"));
4705 goto fail;
4706 }
4707 }
4708 }
4709
4710 ret = wl_cfgnan_aligned_data_size_of_opt_disc_params(&data_size, cmd_data);
4711 if (unlikely(ret)) {
4712 WL_ERR(("Failed to get alligned size of optional params\n"));
4713 goto fail;
4714 }
4715 nan_buf_size = data_size;
4716 NAN_DBG_ENTER();
4717
4718 nan_buf = MALLOCZ(cfg->osh, data_size);
4719 if (!nan_buf) {
4720 WL_ERR(("%s: memory allocation failed\n", __func__));
4721 ret = BCME_NOMEM;
4722 goto fail;
4723 }
4724
4725 resp_buf = MALLOCZ(cfg->osh, data_size + NAN_IOVAR_NAME_SIZE);
4726 if (!resp_buf) {
4727 WL_ERR(("%s: memory allocation failed\n", __func__));
4728 ret = BCME_NOMEM;
4729 goto fail;
4730 }
4731 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
4732 nan_buf->count = 0;
4733 nan_buf->is_set = true;
4734
4735 ret = wl_cfgnan_sd_params_handler(ndev, cmd_data, cmd_id, &nan_buf->cmds[0],
4736 &nan_buf_size);
4737 if (unlikely(ret)) {
4738 WL_ERR((" Service discovery params handler failed, ret = %d\n", ret));
4739 goto fail;
4740 }
4741
4742 nan_buf->count++;
4743 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, data_size,
4744 &(cmd_data->status), resp_buf,
4745 data_size + NAN_IOVAR_NAME_SIZE);
4746 if (cmd_data->svc_update && (cmd_data->status == BCME_DATA_NOTFOUND)) {
4747 /* return OK if update tlv data is not present
4748 * which means nothing to update
4749 */
4750 cmd_data->status = BCME_OK;
4751 }
4752 if (unlikely(ret) || unlikely(cmd_data->status)) {
4753 WL_ERR(
4754 ("nan svc failed ret = %d status = %d\n", ret, cmd_data->status));
4755 goto fail;
4756 } else {
4757 WL_DBG(("nan svc successful\n"));
4758 #ifdef WL_NAN_DISC_CACHE
4759 ret = wl_cfgnan_cache_svc_info(cfg, cmd_data, cmd_id,
4760 cmd_data->svc_update);
4761 if (ret < 0) {
4762 WL_ERR(("%s: fail to cache svc info, ret=%d\n", __FUNCTION__, ret));
4763 goto fail;
4764 }
4765 #endif /* WL_NAN_DISC_CACHE */
4766 }
4767
4768 fail:
4769 if (nan_buf) {
4770 MFREE(cfg->osh, nan_buf, data_size);
4771 }
4772
4773 if (resp_buf) {
4774 MFREE(cfg->osh, resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
4775 }
4776 NAN_DBG_EXIT();
4777 return ret;
4778 }
4779
wl_cfgnan_publish_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_discover_cmd_data_t * cmd_data)4780 int wl_cfgnan_publish_handler(struct net_device *ndev, struct bcm_cfg80211 *cfg,
4781 nan_discover_cmd_data_t *cmd_data)
4782 {
4783 int ret = BCME_OK;
4784
4785 NAN_DBG_ENTER();
4786 NAN_MUTEX_LOCK();
4787 /*
4788 * proceed only if mandatory arguments are present - subscriber id,
4789 * service hash
4790 */
4791 if ((!cmd_data->pub_id) || (!cmd_data->svc_hash.data) ||
4792 (!cmd_data->svc_hash.dlen)) {
4793 WL_ERR(("mandatory arguments are not present\n"));
4794 ret = BCME_BADARG;
4795 goto fail;
4796 }
4797
4798 ret = wl_cfgnan_svc_handler(ndev, cfg, WL_NAN_CMD_SD_PUBLISH, cmd_data);
4799 if (ret < 0) {
4800 WL_ERR(("%s: fail to handle pub, ret=%d\n", __FUNCTION__, ret));
4801 goto fail;
4802 }
4803 WL_INFORM_MEM(
4804 ("[NAN] Service published for instance id:%d\n", cmd_data->pub_id));
4805
4806 fail:
4807 NAN_MUTEX_UNLOCK();
4808 NAN_DBG_EXIT();
4809 return ret;
4810 }
4811
wl_cfgnan_subscribe_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_discover_cmd_data_t * cmd_data)4812 int wl_cfgnan_subscribe_handler(struct net_device *ndev,
4813 struct bcm_cfg80211 *cfg,
4814 nan_discover_cmd_data_t *cmd_data)
4815 {
4816 int ret = BCME_OK;
4817 #ifdef WL_NAN_DISC_CACHE
4818 nan_svc_info_t *svc_info;
4819 uint8 upd_ranging_required;
4820 #endif /* WL_NAN_DISC_CACHE */
4821 #ifdef RTT_GEOFENCE_CONT
4822 #ifdef RTT_SUPPORT
4823 dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
4824 rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
4825 #endif /* RTT_SUPPORT */
4826 #endif /* RTT_GEOFENCE_CONT */
4827
4828 NAN_DBG_ENTER();
4829 NAN_MUTEX_LOCK();
4830
4831 /*
4832 * proceed only if mandatory arguments are present - subscriber id,
4833 * service hash
4834 */
4835 if ((!cmd_data->sub_id) || (!cmd_data->svc_hash.data) ||
4836 (!cmd_data->svc_hash.dlen)) {
4837 WL_ERR(("mandatory arguments are not present\n"));
4838 ret = BCME_BADARG;
4839 goto fail;
4840 }
4841
4842 /* Check for ranging sessions if any */
4843 if (cmd_data->svc_update) {
4844 #ifdef WL_NAN_DISC_CACHE
4845 svc_info = wl_cfgnan_get_svc_inst(cfg, cmd_data->sub_id, 0);
4846 if (svc_info) {
4847 wl_cfgnan_clear_svc_from_all_ranging_inst(cfg, cmd_data->sub_id);
4848 /* terminate ranging sessions for this svc, avoid clearing svc cache
4849 */
4850 wl_cfgnan_terminate_all_obsolete_ranging_sessions(cfg);
4851 WL_DBG(("Ranging sessions handled for svc update\n"));
4852 upd_ranging_required =
4853 !!(cmd_data->sde_control_flag & NAN_SDE_CF_RANGING_REQUIRED);
4854 if ((svc_info->ranging_required ^ upd_ranging_required) ||
4855 (svc_info->ingress_limit != cmd_data->ingress_limit) ||
4856 (svc_info->egress_limit != cmd_data->egress_limit)) {
4857 /* Clear cache info in Firmware */
4858 ret = wl_cfgnan_clear_disc_cache(cfg, cmd_data->sub_id);
4859 if (ret != BCME_OK) {
4860 WL_ERR(("couldn't send clear cache to FW \n"));
4861 goto fail;
4862 }
4863 /* Invalidate local cache info */
4864 wl_cfgnan_remove_disc_result(cfg, cmd_data->sub_id);
4865 }
4866 }
4867 #endif /* WL_NAN_DISC_CACHE */
4868 }
4869
4870 #ifdef RTT_GEOFENCE_CONT
4871 #ifdef RTT_SUPPORT
4872 /* Override ranging Indication */
4873 if (rtt_status->geofence_cfg.geofence_cont) {
4874 if (cmd_data->ranging_indication != NAN_RANGE_INDICATION_NONE) {
4875 cmd_data->ranging_indication = NAN_RANGE_INDICATION_CONT;
4876 }
4877 }
4878 #endif /* RTT_SUPPORT */
4879 #endif /* RTT_GEOFENCE_CONT */
4880 ret = wl_cfgnan_svc_handler(ndev, cfg, WL_NAN_CMD_SD_SUBSCRIBE, cmd_data);
4881 if (ret < 0) {
4882 WL_ERR(("%s: fail to handle svc, ret=%d\n", __FUNCTION__, ret));
4883 goto fail;
4884 }
4885 WL_INFORM_MEM(
4886 ("[NAN] Service subscribed for instance id:%d\n", cmd_data->sub_id));
4887
4888 fail:
4889 NAN_MUTEX_UNLOCK();
4890 NAN_DBG_EXIT();
4891 return ret;
4892 }
4893
wl_cfgnan_cancel_handler(nan_discover_cmd_data_t * cmd_data,uint16 cmd_id,void * p_buf,uint16 * nan_buf_size)4894 static int wl_cfgnan_cancel_handler(nan_discover_cmd_data_t *cmd_data,
4895 uint16 cmd_id, void *p_buf,
4896 uint16 *nan_buf_size)
4897 {
4898 s32 ret = BCME_OK;
4899
4900 NAN_DBG_ENTER();
4901
4902 if (p_buf != NULL) {
4903 bcm_iov_batch_subcmd_t *sub_cmd = (bcm_iov_batch_subcmd_t *)(p_buf);
4904 wl_nan_instance_id_t instance_id;
4905
4906 if (cmd_id == WL_NAN_CMD_SD_CANCEL_PUBLISH) {
4907 instance_id = cmd_data->pub_id;
4908 } else if (cmd_id == WL_NAN_CMD_SD_CANCEL_SUBSCRIBE) {
4909 instance_id = cmd_data->sub_id;
4910 } else {
4911 ret = BCME_USAGE_ERROR;
4912 WL_ERR(("wrong command id = %u\n", cmd_id));
4913 goto fail;
4914 }
4915
4916 /* Fill the sub_command block */
4917 sub_cmd->id = htod16(cmd_id);
4918 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(instance_id);
4919 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
4920 ret = memcpy_s(sub_cmd->data, *nan_buf_size, &instance_id,
4921 sizeof(instance_id));
4922 if (ret != BCME_OK) {
4923 WL_ERR(("Failed to copy instance id, ret = %d\n", ret));
4924 goto fail;
4925 }
4926 /* adjust iov data len to the end of last data record */
4927 *nan_buf_size -=
4928 (sub_cmd->len + OFFSETOF(bcm_iov_batch_subcmd_t, u.options));
4929 WL_INFORM_MEM(
4930 ("[NAN] Service with instance id:%d cancelled\n", instance_id));
4931 } else {
4932 WL_ERR(("nan_iov_buf is NULL\n"));
4933 ret = BCME_ERROR;
4934 goto fail;
4935 }
4936
4937 fail:
4938 NAN_DBG_EXIT();
4939 return ret;
4940 }
4941
wl_cfgnan_cancel_pub_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_discover_cmd_data_t * cmd_data)4942 int wl_cfgnan_cancel_pub_handler(struct net_device *ndev,
4943 struct bcm_cfg80211 *cfg,
4944 nan_discover_cmd_data_t *cmd_data)
4945 {
4946 bcm_iov_batch_buf_t *nan_buf = NULL;
4947 s32 ret = BCME_OK;
4948 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
4949 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
4950
4951 NAN_DBG_ENTER();
4952 NAN_MUTEX_LOCK();
4953
4954 nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
4955 if (!nan_buf) {
4956 WL_ERR(("%s: memory allocation failed\n", __func__));
4957 ret = BCME_NOMEM;
4958 goto fail;
4959 }
4960
4961 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
4962 nan_buf->count = 0;
4963 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
4964
4965 /* proceed only if mandatory argument is present - publisher id */
4966 if (!cmd_data->pub_id) {
4967 WL_ERR(("mandatory argument is not present\n"));
4968 ret = BCME_BADARG;
4969 goto fail;
4970 }
4971
4972 #ifdef WL_NAN_DISC_CACHE
4973 wl_cfgnan_clear_svc_cache(cfg, cmd_data->pub_id);
4974 #endif /* WL_NAN_DISC_CACHE */
4975 ret = wl_cfgnan_cancel_handler(cmd_data, WL_NAN_CMD_SD_CANCEL_PUBLISH,
4976 &nan_buf->cmds[0], &nan_buf_size);
4977 if (unlikely(ret)) {
4978 WL_ERR(("cancel publish failed\n"));
4979 goto fail;
4980 }
4981 nan_buf->is_set = true;
4982 nan_buf->count++;
4983
4984 memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
4985 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size,
4986 &(cmd_data->status), (void *)resp_buf,
4987 NAN_IOCTL_BUF_SIZE);
4988 if (unlikely(ret) || unlikely(cmd_data->status)) {
4989 WL_ERR(("nan cancel publish failed ret = %d status = %d\n", ret,
4990 cmd_data->status));
4991 goto fail;
4992 }
4993 WL_DBG(("nan cancel publish successfull\n"));
4994 wl_cfgnan_remove_inst_id(cfg, cmd_data->pub_id);
4995 fail:
4996 if (nan_buf) {
4997 MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
4998 }
4999
5000 NAN_MUTEX_UNLOCK();
5001 NAN_DBG_EXIT();
5002 return ret;
5003 }
5004
wl_cfgnan_cancel_sub_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_discover_cmd_data_t * cmd_data)5005 int wl_cfgnan_cancel_sub_handler(struct net_device *ndev,
5006 struct bcm_cfg80211 *cfg,
5007 nan_discover_cmd_data_t *cmd_data)
5008 {
5009 bcm_iov_batch_buf_t *nan_buf = NULL;
5010 s32 ret = BCME_OK;
5011 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
5012 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
5013
5014 NAN_DBG_ENTER();
5015 NAN_MUTEX_LOCK();
5016
5017 nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
5018 if (!nan_buf) {
5019 WL_ERR(("%s: memory allocation failed\n", __func__));
5020 ret = BCME_NOMEM;
5021 goto fail;
5022 }
5023
5024 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
5025 nan_buf->count = 0;
5026 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
5027
5028 /* proceed only if mandatory argument is present - subscriber id */
5029 if (!cmd_data->sub_id) {
5030 WL_ERR(("mandatory argument is not present\n"));
5031 ret = BCME_BADARG;
5032 goto fail;
5033 }
5034
5035 #ifdef WL_NAN_DISC_CACHE
5036 /* terminate ranging sessions for this svc */
5037 wl_cfgnan_clear_svc_from_all_ranging_inst(cfg, cmd_data->sub_id);
5038 wl_cfgnan_terminate_all_obsolete_ranging_sessions(cfg);
5039 /* clear svc cache for the service */
5040 wl_cfgnan_clear_svc_cache(cfg, cmd_data->sub_id);
5041 wl_cfgnan_remove_disc_result(cfg, cmd_data->sub_id);
5042 #endif /* WL_NAN_DISC_CACHE */
5043
5044 ret = wl_cfgnan_cancel_handler(cmd_data, WL_NAN_CMD_SD_CANCEL_SUBSCRIBE,
5045 &nan_buf->cmds[0], &nan_buf_size);
5046 if (unlikely(ret)) {
5047 WL_ERR(("cancel subscribe failed\n"));
5048 goto fail;
5049 }
5050 nan_buf->is_set = true;
5051 nan_buf->count++;
5052
5053 memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
5054 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size,
5055 &(cmd_data->status), (void *)resp_buf,
5056 NAN_IOCTL_BUF_SIZE);
5057 if (unlikely(ret) || unlikely(cmd_data->status)) {
5058 WL_ERR(("nan cancel subscribe failed ret = %d status = %d\n", ret,
5059 cmd_data->status));
5060 goto fail;
5061 }
5062 WL_DBG(("subscribe cancel successfull\n"));
5063 wl_cfgnan_remove_inst_id(cfg, cmd_data->sub_id);
5064 fail:
5065 if (nan_buf) {
5066 MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
5067 }
5068
5069 NAN_MUTEX_UNLOCK();
5070 NAN_DBG_EXIT();
5071 return ret;
5072 }
5073
wl_cfgnan_transmit_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_discover_cmd_data_t * cmd_data)5074 int wl_cfgnan_transmit_handler(struct net_device *ndev,
5075 struct bcm_cfg80211 *cfg,
5076 nan_discover_cmd_data_t *cmd_data)
5077 {
5078 s32 ret = BCME_OK;
5079 bcm_iov_batch_buf_t *nan_buf = NULL;
5080 wl_nan_sd_transmit_t *sd_xmit = NULL;
5081 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
5082 bool is_lcl_id = FALSE;
5083 bool is_dest_id = FALSE;
5084 bool is_dest_mac = FALSE;
5085 uint16 buflen_avail;
5086 uint8 *pxtlv;
5087 uint16 nan_buf_size;
5088 uint8 *resp_buf = NULL;
5089 /* Considering fixed params */
5090 uint16 data_size =
5091 WL_NAN_OBUF_DATA_OFFSET + OFFSETOF(wl_nan_sd_transmit_t, opt_tlv);
5092 data_size = ALIGN_SIZE(data_size, 0x4);
5093 ret = wl_cfgnan_aligned_data_size_of_opt_disc_params(&data_size, cmd_data);
5094 if (unlikely(ret)) {
5095 WL_ERR(("Failed to get alligned size of optional params\n"));
5096 goto fail;
5097 }
5098 NAN_DBG_ENTER();
5099 NAN_MUTEX_LOCK();
5100 nan_buf_size = data_size;
5101 nan_buf = MALLOCZ(cfg->osh, data_size);
5102 if (!nan_buf) {
5103 WL_ERR(("%s: memory allocation failed\n", __func__));
5104 ret = BCME_NOMEM;
5105 goto fail;
5106 }
5107
5108 resp_buf = MALLOCZ(cfg->osh, data_size + NAN_IOVAR_NAME_SIZE);
5109 if (!resp_buf) {
5110 WL_ERR(("%s: memory allocation failed\n", __func__));
5111 ret = BCME_NOMEM;
5112 goto fail;
5113 }
5114
5115 /* nan transmit */
5116 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
5117 nan_buf->count = 0;
5118 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
5119 /*
5120 * proceed only if mandatory arguments are present - subscriber id,
5121 * publisher id, mac address
5122 */
5123 if ((!cmd_data->local_id) || (!cmd_data->remote_id) ||
5124 ETHER_ISNULLADDR(&cmd_data->mac_addr.octet)) {
5125 WL_ERR(("mandatory arguments are not present\n"));
5126 ret = -EINVAL;
5127 goto fail;
5128 }
5129
5130 sub_cmd = (bcm_iov_batch_subcmd_t *)(&nan_buf->cmds[0]);
5131 sd_xmit = (wl_nan_sd_transmit_t *)(sub_cmd->data);
5132
5133 /* local instance id must be from 1 to 255, 0 is reserved */
5134 if (cmd_data->local_id == NAN_ID_RESERVED) {
5135 WL_ERR(("Invalid local instance id: %d\n", cmd_data->local_id));
5136 ret = BCME_BADARG;
5137 goto fail;
5138 }
5139 sd_xmit->local_service_id = cmd_data->local_id;
5140 is_lcl_id = TRUE;
5141
5142 /* remote instance id must be from 1 to 255, 0 is reserved */
5143 if (cmd_data->remote_id == NAN_ID_RESERVED) {
5144 WL_ERR(("Invalid remote instance id: %d\n", cmd_data->remote_id));
5145 ret = BCME_BADARG;
5146 goto fail;
5147 }
5148
5149 sd_xmit->requestor_service_id = cmd_data->remote_id;
5150 is_dest_id = TRUE;
5151
5152 if (!ETHER_ISNULLADDR(&cmd_data->mac_addr.octet)) {
5153 ret = memcpy_s(&sd_xmit->destination_addr, ETHER_ADDR_LEN,
5154 &cmd_data->mac_addr, ETHER_ADDR_LEN);
5155 if (ret != BCME_OK) {
5156 WL_ERR(("Failed to copy dest mac address\n"));
5157 goto fail;
5158 }
5159 } else {
5160 WL_ERR(("Invalid ether addr provided\n"));
5161 ret = BCME_BADARG;
5162 goto fail;
5163 }
5164 is_dest_mac = TRUE;
5165
5166 if (cmd_data->priority) {
5167 sd_xmit->priority = cmd_data->priority;
5168 }
5169 sd_xmit->token = cmd_data->token;
5170
5171 if (cmd_data->recv_ind_flag) {
5172 /* BIT0 - If set, host wont rec event "txs" */
5173 if (CHECK_BIT(cmd_data->recv_ind_flag,
5174 WL_NAN_EVENT_SUPPRESS_FOLLOWUP_RECEIVE_BIT)) {
5175 sd_xmit->flags = WL_NAN_FUP_SUPR_EVT_TXS;
5176 }
5177 }
5178 /* Optional parameters: fill the sub_command block with service descriptor
5179 * attr */
5180 sub_cmd->id = htod16(WL_NAN_CMD_SD_TRANSMIT);
5181 sub_cmd->len =
5182 sizeof(sub_cmd->u.options) + OFFSETOF(wl_nan_sd_transmit_t, opt_tlv);
5183 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
5184 pxtlv = (uint8 *)&sd_xmit->opt_tlv;
5185
5186 nan_buf_size -=
5187 (sub_cmd->len + OFFSETOF(bcm_iov_batch_subcmd_t, u.options));
5188
5189 buflen_avail = nan_buf_size;
5190
5191 if (cmd_data->svc_info.data && cmd_data->svc_info.dlen) {
5192 bcm_xtlv_t *pxtlv_svc_info = (bcm_xtlv_t *)pxtlv;
5193 ret = bcm_pack_xtlv_entry(
5194 &pxtlv, &nan_buf_size, WL_NAN_XTLV_SD_SVC_INFO,
5195 cmd_data->svc_info.dlen, cmd_data->svc_info.data,
5196 BCM_XTLV_OPTION_ALIGN32);
5197 if (unlikely(ret)) {
5198 WL_ERR(("%s: fail to pack on bcm_pack_xtlv_entry, ret=%d\n",
5199 __FUNCTION__, ret));
5200 goto fail;
5201 }
5202
5203 /* 0xFF is max length for svc_info */
5204 if (pxtlv_svc_info->len > 0xFF) {
5205 WL_ERR(("Invalid service info length %d\n", (pxtlv_svc_info->len)));
5206 ret = BCME_USAGE_ERROR;
5207 goto fail;
5208 }
5209 sd_xmit->opt_len = (uint8)(pxtlv_svc_info->len);
5210 }
5211 if (cmd_data->sde_svc_info.data && cmd_data->sde_svc_info.dlen) {
5212 WL_TRACE(("optional sdea svc_info present, pack it\n"));
5213 ret = bcm_pack_xtlv_entry(
5214 &pxtlv, &nan_buf_size, WL_NAN_XTLV_SD_SDE_SVC_INFO,
5215 cmd_data->sde_svc_info.dlen, cmd_data->sde_svc_info.data,
5216 BCM_XTLV_OPTION_ALIGN32);
5217 if (unlikely(ret)) {
5218 WL_ERR(("%s: fail to pack sdea svc info\n", __FUNCTION__));
5219 goto fail;
5220 }
5221 }
5222
5223 /* Check if all mandatory params are provided */
5224 if (is_lcl_id && is_dest_id && is_dest_mac) {
5225 nan_buf->count++;
5226 sub_cmd->len += (buflen_avail - nan_buf_size);
5227 } else {
5228 WL_ERR(("Missing parameters\n"));
5229 ret = BCME_USAGE_ERROR;
5230 }
5231 nan_buf->is_set = TRUE;
5232 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, data_size,
5233 &(cmd_data->status), resp_buf,
5234 data_size + NAN_IOVAR_NAME_SIZE);
5235 if (unlikely(ret) || unlikely(cmd_data->status)) {
5236 WL_ERR(("nan transmit failed for token %d ret = %d status = %d\n",
5237 sd_xmit->token, ret, cmd_data->status));
5238 goto fail;
5239 }
5240 WL_INFORM_MEM(("nan transmit successful for token %d\n", sd_xmit->token));
5241 fail:
5242 if (nan_buf) {
5243 MFREE(cfg->osh, nan_buf, data_size);
5244 }
5245 if (resp_buf) {
5246 MFREE(cfg->osh, resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
5247 }
5248 NAN_MUTEX_UNLOCK();
5249 NAN_DBG_EXIT();
5250 return ret;
5251 }
5252
wl_cfgnan_get_capability(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_hal_capabilities_t * capabilities)5253 static int wl_cfgnan_get_capability(struct net_device *ndev,
5254 struct bcm_cfg80211 *cfg,
5255 nan_hal_capabilities_t *capabilities)
5256 {
5257 bcm_iov_batch_buf_t *nan_buf = NULL;
5258 s32 ret = BCME_OK;
5259 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
5260 wl_nan_fw_cap_t *fw_cap = NULL;
5261 uint16 subcmd_len;
5262 uint32 status;
5263 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
5264 bcm_iov_batch_subcmd_t *sub_cmd_resp = NULL;
5265 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
5266 const bcm_xtlv_t *xtlv;
5267 uint16 type = 0;
5268 int len = 0;
5269
5270 NAN_DBG_ENTER();
5271 nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
5272 if (!nan_buf) {
5273 WL_ERR(("%s: memory allocation failed\n", __func__));
5274 ret = BCME_NOMEM;
5275 goto fail;
5276 }
5277
5278 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
5279 nan_buf->count = 0;
5280 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
5281 sub_cmd = (bcm_iov_batch_subcmd_t *)(uint8 *)(&nan_buf->cmds[0]);
5282
5283 ret = wl_cfg_nan_check_cmd_len(nan_buf_size, sizeof(*fw_cap), &subcmd_len);
5284 if (unlikely(ret)) {
5285 WL_ERR(("nan_sub_cmd check failed\n"));
5286 goto fail;
5287 }
5288
5289 fw_cap = (wl_nan_fw_cap_t *)sub_cmd->data;
5290 sub_cmd->id = htod16(WL_NAN_CMD_GEN_FW_CAP);
5291 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*fw_cap);
5292 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
5293 nan_buf_size -= subcmd_len;
5294 nan_buf->count = 1;
5295
5296 nan_buf->is_set = false;
5297 memset(resp_buf, 0, sizeof(resp_buf));
5298 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
5299 (void *)resp_buf, NAN_IOCTL_BUF_SIZE);
5300 if (unlikely(ret) || unlikely(status)) {
5301 WL_ERR(("get nan fw cap failed ret %d status %d \n", ret, status));
5302 goto fail;
5303 }
5304
5305 sub_cmd_resp = &((bcm_iov_batch_buf_t *)(resp_buf))->cmds[0];
5306
5307 /* check the response buff */
5308 xtlv = ((const bcm_xtlv_t *)&sub_cmd_resp->data[0]);
5309 if (!xtlv) {
5310 ret = BCME_NOTFOUND;
5311 WL_ERR(("xtlv not found: err = %d\n", ret));
5312 goto fail;
5313 }
5314 bcm_xtlv_unpack_xtlv(xtlv, &type, (uint16 *)&len, NULL,
5315 BCM_XTLV_OPTION_ALIGN32);
5316 do {
5317 switch (type) {
5318 case WL_NAN_XTLV_GEN_FW_CAP:
5319 if (len > sizeof(wl_nan_fw_cap_t)) {
5320 ret = BCME_BADARG;
5321 goto fail;
5322 }
5323 GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
5324 fw_cap = (wl_nan_fw_cap_t *)xtlv->data;
5325 GCC_DIAGNOSTIC_POP();
5326 break;
5327 default:
5328 WL_ERR(("Unknown xtlv: id %u\n", type));
5329 ret = BCME_ERROR;
5330 break;
5331 }
5332 if (ret != BCME_OK) {
5333 goto fail;
5334 }
5335 } while ((xtlv = bcm_next_xtlv(xtlv, &len, BCM_XTLV_OPTION_ALIGN32)));
5336
5337 memset(capabilities, 0, sizeof(nan_hal_capabilities_t));
5338 capabilities->max_publishes = fw_cap->max_svc_publishes;
5339 capabilities->max_subscribes = fw_cap->max_svc_subscribes;
5340 capabilities->max_ndi_interfaces = fw_cap->max_lcl_ndi_interfaces;
5341 capabilities->max_ndp_sessions = fw_cap->max_ndp_sessions;
5342 capabilities->max_concurrent_nan_clusters =
5343 fw_cap->max_concurrent_nan_clusters;
5344 capabilities->max_service_name_len = fw_cap->max_service_name_len;
5345 capabilities->max_match_filter_len = fw_cap->max_match_filter_len;
5346 capabilities->max_total_match_filter_len =
5347 fw_cap->max_total_match_filter_len;
5348 capabilities->max_service_specific_info_len =
5349 fw_cap->max_service_specific_info_len;
5350 capabilities->max_app_info_len = fw_cap->max_app_info_len;
5351 capabilities->max_sdea_service_specific_info_len =
5352 fw_cap->max_sdea_svc_specific_info_len;
5353 capabilities->max_queued_transmit_followup_msgs =
5354 fw_cap->max_queued_tx_followup_msgs;
5355 capabilities->max_subscribe_address = fw_cap->max_subscribe_address;
5356 capabilities->is_ndp_security_supported = fw_cap->is_ndp_security_supported;
5357 capabilities->ndp_supported_bands = fw_cap->ndp_supported_bands;
5358 capabilities->cipher_suites_supported =
5359 fw_cap->cipher_suites_supported_mask;
5360 fail:
5361 if (nan_buf) {
5362 MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
5363 }
5364 NAN_DBG_EXIT();
5365 return ret;
5366 }
5367
wl_cfgnan_get_capablities_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_hal_capabilities_t * capabilities)5368 int wl_cfgnan_get_capablities_handler(struct net_device *ndev,
5369 struct bcm_cfg80211 *cfg,
5370 nan_hal_capabilities_t *capabilities)
5371 {
5372 s32 ret = BCME_OK;
5373 dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
5374
5375 NAN_DBG_ENTER();
5376
5377 /* Do not query fw about nan if feature is not supported */
5378 if (!FW_SUPPORTED(dhdp, nan)) {
5379 WL_DBG(("NAN is not supported\n"));
5380 return ret;
5381 }
5382
5383 if (cfg->nan_init_state) {
5384 ret = wl_cfgnan_get_capability(ndev, cfg, capabilities);
5385 if (ret != BCME_OK) {
5386 WL_ERR(
5387 ("NAN init state: %d, failed to get capability from FW[%d]\n",
5388 cfg->nan_init_state, ret));
5389 goto exit;
5390 }
5391 } else {
5392 /* Initialize NAN before sending iovar */
5393 WL_ERR(("Initializing NAN\n"));
5394 ret = wl_cfgnan_init(cfg);
5395 if (ret != BCME_OK) {
5396 WL_ERR(("failed to initialize NAN[%d]\n", ret));
5397 goto fail;
5398 }
5399
5400 ret = wl_cfgnan_get_capability(ndev, cfg, capabilities);
5401 if (ret != BCME_OK) {
5402 WL_ERR(
5403 ("NAN init state: %d, failed to get capability from FW[%d]\n",
5404 cfg->nan_init_state, ret));
5405 goto exit;
5406 }
5407 WL_ERR(("De-Initializing NAN\n"));
5408 ret = wl_cfgnan_deinit(cfg, dhdp->up);
5409 if (ret != BCME_OK) {
5410 WL_ERR(("failed to de-initialize NAN[%d]\n", ret));
5411 goto fail;
5412 }
5413 }
5414 fail:
5415 NAN_DBG_EXIT();
5416 return ret;
5417 exit:
5418 /* Keeping backward campatibility */
5419 capabilities->max_concurrent_nan_clusters = MAX_CONCURRENT_NAN_CLUSTERS;
5420 capabilities->max_publishes = MAX_PUBLISHES;
5421 capabilities->max_subscribes = MAX_SUBSCRIBES;
5422 capabilities->max_service_name_len = MAX_SVC_NAME_LEN;
5423 capabilities->max_match_filter_len = MAX_MATCH_FILTER_LEN;
5424 capabilities->max_total_match_filter_len = MAX_TOTAL_MATCH_FILTER_LEN;
5425 capabilities->max_service_specific_info_len =
5426 NAN_MAX_SERVICE_SPECIFIC_INFO_LEN;
5427 capabilities->max_ndi_interfaces = MAX_NDI_INTERFACES;
5428 capabilities->max_ndp_sessions = MAX_NDP_SESSIONS;
5429 capabilities->max_app_info_len = MAX_APP_INFO_LEN;
5430 capabilities->max_queued_transmit_followup_msgs =
5431 MAX_QUEUED_TX_FOLLOUP_MSGS;
5432 capabilities->max_sdea_service_specific_info_len = MAX_SDEA_SVC_INFO_LEN;
5433 capabilities->max_subscribe_address = MAX_SUBSCRIBE_ADDRESS;
5434 capabilities->cipher_suites_supported =
5435 WL_NAN_CIPHER_SUITE_SHARED_KEY_128_MASK;
5436 capabilities->max_scid_len = MAX_SCID_LEN;
5437 capabilities->is_ndp_security_supported = true;
5438 capabilities->ndp_supported_bands = NDP_SUPPORTED_BANDS;
5439 ret = BCME_OK;
5440 NAN_DBG_EXIT();
5441 return ret;
5442 }
5443
wl_cfgnan_check_state(struct bcm_cfg80211 * cfg)5444 bool wl_cfgnan_check_state(struct bcm_cfg80211 *cfg)
5445 {
5446 return cfg->nan_enable;
5447 }
5448
wl_cfgnan_init(struct bcm_cfg80211 * cfg)5449 int wl_cfgnan_init(struct bcm_cfg80211 *cfg)
5450 {
5451 s32 ret = BCME_OK;
5452 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
5453 uint32 status;
5454 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
5455 uint8 buf[NAN_IOCTL_BUF_SIZE];
5456 bcm_iov_batch_buf_t *nan_buf = (bcm_iov_batch_buf_t *)buf;
5457
5458 NAN_DBG_ENTER();
5459 if (cfg->nan_init_state) {
5460 WL_ERR(("nan initialized/nmi exists\n"));
5461 return BCME_OK;
5462 }
5463 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
5464 nan_buf->count = 0;
5465 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
5466 ret = wl_cfgnan_init_handler(&nan_buf->cmds[0], &nan_buf_size, true);
5467 if (unlikely(ret)) {
5468 WL_ERR(("init handler sub_cmd set failed\n"));
5469 goto fail;
5470 }
5471 nan_buf->count++;
5472 nan_buf->is_set = true;
5473
5474 memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
5475 ret = wl_cfgnan_execute_ioctl(bcmcfg_to_prmry_ndev(cfg), cfg, nan_buf,
5476 nan_buf_size, &status, (void *)resp_buf,
5477 NAN_IOCTL_BUF_SIZE);
5478 if (unlikely(ret) || unlikely(status)) {
5479 WL_ERR(("nan init handler failed ret %d status %d\n", ret, status));
5480 goto fail;
5481 }
5482
5483 #ifdef WL_NAN_DISC_CACHE
5484 /* malloc for disc result */
5485 cfg->nan_disc_cache = MALLOCZ(cfg->osh, NAN_MAX_CACHE_DISC_RESULT *
5486 sizeof(nan_disc_result_cache));
5487 if (!cfg->nan_disc_cache) {
5488 WL_ERR(("%s: memory allocation failed\n", __func__));
5489 ret = BCME_NOMEM;
5490 goto fail;
5491 }
5492 #endif /* WL_NAN_DISC_CACHE */
5493 cfg->nan_init_state = true;
5494 return ret;
5495 fail:
5496 NAN_DBG_EXIT();
5497 return ret;
5498 }
5499
wl_cfgnan_deinit_cleanup(struct bcm_cfg80211 * cfg)5500 void wl_cfgnan_deinit_cleanup(struct bcm_cfg80211 *cfg)
5501 {
5502 uint8 i = 0;
5503 cfg->nan_dp_count = 0;
5504 cfg->nan_init_state = false;
5505 #ifdef WL_NAN_DISC_CACHE
5506 if (cfg->nan_disc_cache) {
5507 for (i = 0; i < NAN_MAX_CACHE_DISC_RESULT; i++) {
5508 if (cfg->nan_disc_cache[i].tx_match_filter.data) {
5509 MFREE(cfg->osh, cfg->nan_disc_cache[i].tx_match_filter.data,
5510 cfg->nan_disc_cache[i].tx_match_filter.dlen);
5511 }
5512 if (cfg->nan_disc_cache[i].svc_info.data) {
5513 MFREE(cfg->osh, cfg->nan_disc_cache[i].svc_info.data,
5514 cfg->nan_disc_cache[i].svc_info.dlen);
5515 }
5516 }
5517 MFREE(cfg->osh, cfg->nan_disc_cache,
5518 NAN_MAX_CACHE_DISC_RESULT * sizeof(nan_disc_result_cache));
5519 cfg->nan_disc_cache = NULL;
5520 }
5521 cfg->nan_disc_count = 0;
5522 memset_s(cfg->svc_info, NAN_MAX_SVC_INST * sizeof(nan_svc_info_t), 0,
5523 NAN_MAX_SVC_INST * sizeof(nan_svc_info_t));
5524 memset_s(cfg->nan_ranging_info,
5525 NAN_MAX_RANGING_INST * sizeof(nan_ranging_inst_t), 0,
5526 NAN_MAX_RANGING_INST * sizeof(nan_ranging_inst_t));
5527 #endif /* WL_NAN_DISC_CACHE */
5528 return;
5529 }
5530
wl_cfgnan_deinit(struct bcm_cfg80211 * cfg,uint8 busstate)5531 int wl_cfgnan_deinit(struct bcm_cfg80211 *cfg, uint8 busstate)
5532 {
5533 s32 ret = BCME_OK;
5534 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
5535 uint32 status;
5536 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
5537 uint8 buf[NAN_IOCTL_BUF_SIZE];
5538 bcm_iov_batch_buf_t *nan_buf = (bcm_iov_batch_buf_t *)buf;
5539
5540 NAN_DBG_ENTER();
5541 NAN_MUTEX_LOCK();
5542
5543 if (!cfg->nan_init_state) {
5544 WL_ERR(("nan is not initialized/nmi doesnt exists\n"));
5545 ret = BCME_OK;
5546 goto fail;
5547 }
5548
5549 if (busstate != DHD_BUS_DOWN) {
5550 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
5551 nan_buf->count = 0;
5552 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
5553
5554 WL_DBG(("nan deinit\n"));
5555 ret = wl_cfgnan_init_handler(&nan_buf->cmds[0], &nan_buf_size, false);
5556 if (unlikely(ret)) {
5557 WL_ERR(("deinit handler sub_cmd set failed\n"));
5558 } else {
5559 nan_buf->count++;
5560 nan_buf->is_set = true;
5561 memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
5562 ret = wl_cfgnan_execute_ioctl(cfg->wdev->netdev, cfg, nan_buf,
5563 nan_buf_size, &status,
5564 (void *)resp_buf, NAN_IOCTL_BUF_SIZE);
5565 if (unlikely(ret) || unlikely(status)) {
5566 WL_ERR(("nan init handler failed ret %d status %d\n", ret,
5567 status));
5568 }
5569 }
5570 }
5571 wl_cfgnan_deinit_cleanup(cfg);
5572
5573 fail:
5574 if (!cfg->nancfg.mac_rand && !ETHER_ISNULLADDR(cfg->nan_nmi_mac)) {
5575 wl_release_vif_macaddr(cfg, cfg->nan_nmi_mac, WL_IF_TYPE_NAN_NMI);
5576 }
5577 NAN_MUTEX_UNLOCK();
5578 NAN_DBG_EXIT();
5579 return ret;
5580 }
5581
wl_cfgnan_get_ndi_macaddr(struct bcm_cfg80211 * cfg,u8 * mac_addr)5582 static int wl_cfgnan_get_ndi_macaddr(struct bcm_cfg80211 *cfg, u8 *mac_addr)
5583 {
5584 int i = 0;
5585 int ret = BCME_OK;
5586 bool rand_mac = cfg->nancfg.mac_rand;
5587 BCM_REFERENCE(i);
5588
5589 if (rand_mac) {
5590 /* ensure nmi != ndi */
5591 do {
5592 RANDOM_BYTES(mac_addr, ETHER_ADDR_LEN);
5593 /* restore mcast and local admin bits to 0 and 1 */
5594 ETHER_SET_UNICAST(mac_addr);
5595 ETHER_SET_LOCALADDR(mac_addr);
5596 i++;
5597 if (i == NAN_RAND_MAC_RETRIES) {
5598 break;
5599 }
5600 } while (eacmp(cfg->nan_nmi_mac, mac_addr) == 0);
5601
5602 if (i == NAN_RAND_MAC_RETRIES) {
5603 if (eacmp(cfg->nan_nmi_mac, mac_addr) == 0) {
5604 WL_ERR(("\nCouldn't generate rand NDI which != NMI\n"));
5605 ret = BCME_NORESOURCE;
5606 goto fail;
5607 }
5608 }
5609 } else {
5610 if (wl_get_vif_macaddr(cfg, WL_IF_TYPE_NAN, mac_addr) != BCME_OK) {
5611 ret = -EINVAL;
5612 WL_ERR(("Failed to get mac addr for NDI\n"));
5613 goto fail;
5614 }
5615 }
5616
5617 fail:
5618 return ret;
5619 }
5620
wl_cfgnan_data_path_iface_create_delete_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,char * ifname,uint16 type,uint8 busstate)5621 int wl_cfgnan_data_path_iface_create_delete_handler(struct net_device *ndev,
5622 struct bcm_cfg80211 *cfg,
5623 char *ifname, uint16 type,
5624 uint8 busstate)
5625 {
5626 u8 mac_addr[ETH_ALEN];
5627 s32 ret = BCME_OK;
5628 s32 idx;
5629 struct wireless_dev *wdev;
5630 NAN_DBG_ENTER();
5631
5632 if (busstate != DHD_BUS_DOWN) {
5633 if (type == NAN_WIFI_SUBCMD_DATA_PATH_IFACE_CREATE) {
5634 if ((idx = wl_cfgnan_get_ndi_idx(cfg)) < 0) {
5635 WL_ERR(("No free idx for NAN NDI\n"));
5636 ret = BCME_NORESOURCE;
5637 goto fail;
5638 }
5639
5640 ret = wl_cfgnan_get_ndi_macaddr(cfg, mac_addr);
5641 if (ret != BCME_OK) {
5642 WL_ERR(("Couldn't get mac addr for NDI ret %d\n", ret));
5643 goto fail;
5644 }
5645 wdev =
5646 wl_cfg80211_add_if(cfg, ndev, WL_IF_TYPE_NAN, ifname, mac_addr);
5647 if (!wdev) {
5648 ret = -ENODEV;
5649 WL_ERR(("Failed to create NDI iface = %s, wdev is NULL\n",
5650 ifname));
5651 goto fail;
5652 }
5653 /* Store the iface name to pub data so that it can be used
5654 * during NAN enable
5655 */
5656 wl_cfgnan_add_ndi_data(cfg, idx, ifname);
5657 cfg->nancfg.ndi[idx].created = true;
5658 /* Store nan ndev */
5659 cfg->nancfg.ndi[idx].nan_ndev = wdev_to_ndev(wdev);
5660 } else if (type == NAN_WIFI_SUBCMD_DATA_PATH_IFACE_DELETE) {
5661 ret = wl_cfg80211_del_if(cfg, ndev, NULL, ifname);
5662 if (ret == BCME_OK) {
5663 if (wl_cfgnan_del_ndi_data(cfg, ifname) < 0) {
5664 WL_ERR(
5665 ("Failed to find matching data for ndi:%s\n", ifname));
5666 }
5667 } else if (ret == -ENODEV) {
5668 WL_INFORM(("Already deleted: %s\n", ifname));
5669 ret = BCME_OK;
5670 } else if (ret != BCME_OK) {
5671 WL_ERR(("failed to delete NDI[%d]\n", ret));
5672 }
5673 }
5674 } else {
5675 ret = -ENODEV;
5676 WL_ERR(
5677 ("Bus is already down, no dev found to remove, ret = %d\n", ret));
5678 }
5679 fail:
5680 NAN_DBG_EXIT();
5681 return ret;
5682 }
5683
5684 /*
5685 * Return data peer from peer list
5686 * for peer_addr
5687 * NULL if not found
5688 */
wl_cfgnan_data_get_peer(struct bcm_cfg80211 * cfg,struct ether_addr * peer_addr)5689 nan_ndp_peer_t *wl_cfgnan_data_get_peer(struct bcm_cfg80211 *cfg,
5690 struct ether_addr *peer_addr)
5691 {
5692 uint8 i;
5693 nan_ndp_peer_t *peer = cfg->nancfg.nan_ndp_peer_info;
5694
5695 if (!peer) {
5696 WL_ERR(("wl_cfgnan_data_get_peer: nan_ndp_peer_info is NULL\n"));
5697 goto exit;
5698 }
5699 for (i = 0; i < cfg->nancfg.max_ndp_count; i++) {
5700 if (peer[i].peer_dp_state != NAN_PEER_DP_NOT_CONNECTED &&
5701 (!memcmp(peer_addr, &peer[i].peer_addr, ETHER_ADDR_LEN))) {
5702 return &peer[i];
5703 }
5704 }
5705
5706 exit:
5707 return NULL;
5708 }
5709
5710 /*
5711 * Returns True if
5712 * datapath exists for nan cfg
5713 * for any peer
5714 */
wl_cfgnan_data_dp_exists(struct bcm_cfg80211 * cfg)5715 bool wl_cfgnan_data_dp_exists(struct bcm_cfg80211 *cfg)
5716 {
5717 bool ret = FALSE;
5718 uint8 i;
5719 nan_ndp_peer_t *peer = NULL;
5720
5721 if ((cfg->nan_init_state == FALSE) || (cfg->nan_enable == FALSE)) {
5722 goto exit;
5723 }
5724
5725 if (!cfg->nancfg.nan_ndp_peer_info) {
5726 goto exit;
5727 }
5728
5729 peer = cfg->nancfg.nan_ndp_peer_info;
5730 for (i = 0; i < cfg->nancfg.max_ndp_count; i++) {
5731 if (peer[i].peer_dp_state != NAN_PEER_DP_NOT_CONNECTED) {
5732 ret = TRUE;
5733 break;
5734 }
5735 }
5736
5737 exit:
5738 return ret;
5739 }
5740
5741 /*
5742 * Returns True if
5743 * datapath exists for nan cfg
5744 * for given peer
5745 */
wl_cfgnan_data_dp_exists_with_peer(struct bcm_cfg80211 * cfg,struct ether_addr * peer_addr)5746 bool wl_cfgnan_data_dp_exists_with_peer(struct bcm_cfg80211 *cfg,
5747 struct ether_addr *peer_addr)
5748 {
5749 bool ret = FALSE;
5750 nan_ndp_peer_t *peer = NULL;
5751
5752 if ((cfg->nan_init_state == FALSE) || (cfg->nan_enable == FALSE)) {
5753 goto exit;
5754 }
5755
5756 /* check for peer exist */
5757 peer = wl_cfgnan_data_get_peer(cfg, peer_addr);
5758 if (peer) {
5759 ret = TRUE;
5760 }
5761
5762 exit:
5763 return ret;
5764 }
5765
5766 /*
5767 * As of now API only available
5768 * for setting state to CONNECTED
5769 * if applicable
5770 */
wl_cfgnan_data_set_peer_dp_state(struct bcm_cfg80211 * cfg,struct ether_addr * peer_addr,nan_peer_dp_state_t state)5771 void wl_cfgnan_data_set_peer_dp_state(struct bcm_cfg80211 *cfg,
5772 struct ether_addr *peer_addr,
5773 nan_peer_dp_state_t state)
5774 {
5775 nan_ndp_peer_t *peer = NULL;
5776 /* check for peer exist */
5777 peer = wl_cfgnan_data_get_peer(cfg, peer_addr);
5778 if (!peer) {
5779 goto end;
5780 }
5781 peer->peer_dp_state = state;
5782 end:
5783 return;
5784 }
5785
5786 /* Adds peer to nan data peer list */
wl_cfgnan_data_add_peer(struct bcm_cfg80211 * cfg,struct ether_addr * peer_addr)5787 void wl_cfgnan_data_add_peer(struct bcm_cfg80211 *cfg,
5788 struct ether_addr *peer_addr)
5789 {
5790 uint8 i;
5791 nan_ndp_peer_t *peer = NULL;
5792 /* check for peer exist */
5793 peer = wl_cfgnan_data_get_peer(cfg, peer_addr);
5794 if (peer) {
5795 peer->dp_count++;
5796 goto end;
5797 }
5798 peer = cfg->nancfg.nan_ndp_peer_info;
5799 for (i = 0; i < cfg->nancfg.max_ndp_count; i++) {
5800 if (peer[i].peer_dp_state == NAN_PEER_DP_NOT_CONNECTED) {
5801 break;
5802 }
5803 }
5804 if (i == NAN_MAX_NDP_PEER) {
5805 WL_DBG(("DP Peer list full, Droopping add peer req\n"));
5806 goto end;
5807 }
5808 /* Add peer to list */
5809 memcpy(&peer[i].peer_addr, peer_addr, ETHER_ADDR_LEN);
5810 peer[i].dp_count = 1;
5811 peer[i].peer_dp_state = NAN_PEER_DP_CONNECTING;
5812
5813 end:
5814 return;
5815 }
5816
5817 /* Removes nan data peer from peer list */
wl_cfgnan_data_remove_peer(struct bcm_cfg80211 * cfg,struct ether_addr * peer_addr)5818 void wl_cfgnan_data_remove_peer(struct bcm_cfg80211 *cfg,
5819 struct ether_addr *peer_addr)
5820 {
5821 nan_ndp_peer_t *peer = NULL;
5822 /* check for peer exist */
5823 peer = wl_cfgnan_data_get_peer(cfg, peer_addr);
5824 if (!peer) {
5825 WL_DBG(("DP Peer not present in list, "
5826 "Droopping remove peer req\n"));
5827 goto end;
5828 }
5829 peer->dp_count--;
5830 if (peer->dp_count == 0) {
5831 /* No more NDPs, delete entry */
5832 memset(peer, 0, sizeof(nan_ndp_peer_t));
5833 } else {
5834 /* Set peer dp state to connected if any ndp still exits */
5835 peer->peer_dp_state = NAN_PEER_DP_CONNECTED;
5836 }
5837 end:
5838 return;
5839 }
5840
wl_cfgnan_data_path_request_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_datapath_cmd_data_t * cmd_data,uint8 * ndp_instance_id)5841 int wl_cfgnan_data_path_request_handler(struct net_device *ndev,
5842 struct bcm_cfg80211 *cfg,
5843 nan_datapath_cmd_data_t *cmd_data,
5844 uint8 *ndp_instance_id)
5845 {
5846 s32 ret = BCME_OK;
5847 bcm_iov_batch_buf_t *nan_buf = NULL;
5848 wl_nan_dp_req_t *datareq = NULL;
5849 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
5850 uint16 buflen_avail;
5851 uint8 *pxtlv;
5852 struct wireless_dev *wdev;
5853 uint16 nan_buf_size;
5854 uint8 *resp_buf = NULL;
5855 /* Considering fixed params */
5856 uint16 data_size =
5857 WL_NAN_OBUF_DATA_OFFSET + OFFSETOF(wl_nan_dp_req_t, tlv_params);
5858 data_size = ALIGN_SIZE(data_size, 0x4);
5859
5860 ret = wl_cfgnan_aligned_data_size_of_opt_dp_params(&data_size, cmd_data);
5861 if (unlikely(ret)) {
5862 WL_ERR(("Failed to get alligned size of optional params\n"));
5863 goto fail;
5864 }
5865
5866 nan_buf_size = data_size;
5867 NAN_DBG_ENTER();
5868
5869 mutex_lock(&cfg->if_sync);
5870 NAN_MUTEX_LOCK();
5871 #ifdef WL_IFACE_MGMT
5872 if ((ret = wl_cfg80211_handle_if_role_conflict(cfg, WL_IF_TYPE_NAN)) < 0) {
5873 WL_ERR(("Conflicting iface found to be active\n"));
5874 ret = BCME_UNSUPPORTED;
5875 goto fail;
5876 }
5877 #endif /* WL_IFACE_MGMT */
5878
5879 #ifdef RTT_SUPPORT
5880 /* cancel any ongoing RTT session with peer
5881 * as we donot support DP and RNG to same peer
5882 */
5883 wl_cfgnan_clear_peer_ranging(cfg, &cmd_data->mac_addr,
5884 RTT_GEO_SUSPN_HOST_NDP_TRIGGER);
5885 #endif /* RTT_SUPPORT */
5886
5887 nan_buf = MALLOCZ(cfg->osh, data_size);
5888 if (!nan_buf) {
5889 WL_ERR(("%s: memory allocation failed\n", __func__));
5890 ret = BCME_NOMEM;
5891 goto fail;
5892 }
5893
5894 resp_buf = MALLOCZ(cfg->osh, data_size + NAN_IOVAR_NAME_SIZE);
5895 if (!resp_buf) {
5896 WL_ERR(("%s: memory allocation failed\n", __func__));
5897 ret = BCME_NOMEM;
5898 goto fail;
5899 }
5900
5901 ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg), cfg,
5902 &cmd_data->avail_params, WL_AVAIL_LOCAL);
5903 if (unlikely(ret)) {
5904 WL_ERR(("Failed to set avail value with type local\n"));
5905 goto fail;
5906 }
5907
5908 ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg), cfg,
5909 &cmd_data->avail_params, WL_AVAIL_NDC);
5910 if (unlikely(ret)) {
5911 WL_ERR(("Failed to set avail value with type ndc\n"));
5912 goto fail;
5913 }
5914
5915 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
5916 nan_buf->count = 0;
5917 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
5918
5919 sub_cmd = (bcm_iov_batch_subcmd_t *)(&nan_buf->cmds[0]);
5920 datareq = (wl_nan_dp_req_t *)(sub_cmd->data);
5921
5922 /* setting default data path type to unicast */
5923 datareq->type = WL_NAN_DP_TYPE_UNICAST;
5924
5925 if (cmd_data->pub_id) {
5926 datareq->pub_id = cmd_data->pub_id;
5927 }
5928
5929 if (!ETHER_ISNULLADDR(&cmd_data->mac_addr.octet)) {
5930 ret = memcpy_s(&datareq->peer_mac, ETHER_ADDR_LEN, &cmd_data->mac_addr,
5931 ETHER_ADDR_LEN);
5932 if (ret != BCME_OK) {
5933 WL_ERR(("Failed to copy ether addr provided\n"));
5934 goto fail;
5935 }
5936 } else {
5937 WL_ERR(("Invalid ether addr provided\n"));
5938 ret = BCME_BADARG;
5939 goto fail;
5940 }
5941
5942 /* Retrieve mac from given iface name */
5943 wdev = wl_cfg80211_get_wdev_from_ifname(cfg, (char *)cmd_data->ndp_iface);
5944 if (!wdev || ETHER_ISNULLADDR(wdev->netdev->dev_addr)) {
5945 ret = -EINVAL;
5946 WL_ERR(("Failed to retrieve wdev/dev addr for ndp_iface = %s\n",
5947 (char *)cmd_data->ndp_iface));
5948 goto fail;
5949 }
5950
5951 if (!ETHER_ISNULLADDR(wdev->netdev->dev_addr)) {
5952 ret = memcpy_s(&datareq->ndi, ETHER_ADDR_LEN, wdev->netdev->dev_addr,
5953 ETHER_ADDR_LEN);
5954 if (ret != BCME_OK) {
5955 WL_ERR(("Failed to copy ether addr provided\n"));
5956 goto fail;
5957 }
5958 WL_TRACE(("%s: Retrieved ndi mac " MACDBG "\n", __FUNCTION__,
5959 MAC2STRDBG(datareq->ndi.octet)));
5960 } else {
5961 WL_ERR(("Invalid NDI addr retrieved\n"));
5962 ret = BCME_BADARG;
5963 goto fail;
5964 }
5965
5966 datareq->ndl_qos.min_slots = NAN_NDL_QOS_MIN_SLOT_NO_PREF;
5967 datareq->ndl_qos.max_latency = NAN_NDL_QOS_MAX_LAT_NO_PREF;
5968
5969 /* Fill the sub_command block */
5970 sub_cmd->id = htod16(WL_NAN_CMD_DATA_DATAREQ);
5971 sub_cmd->len =
5972 sizeof(sub_cmd->u.options) + OFFSETOF(wl_nan_dp_req_t, tlv_params);
5973 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
5974 pxtlv = (uint8 *)&datareq->tlv_params;
5975
5976 nan_buf_size -=
5977 (sub_cmd->len + OFFSETOF(bcm_iov_batch_subcmd_t, u.options));
5978 buflen_avail = nan_buf_size;
5979
5980 if (cmd_data->svc_info.data && cmd_data->svc_info.dlen) {
5981 ret = bcm_pack_xtlv_entry(
5982 &pxtlv, &nan_buf_size, WL_NAN_XTLV_SD_SVC_INFO,
5983 cmd_data->svc_info.dlen, cmd_data->svc_info.data,
5984 BCM_XTLV_OPTION_ALIGN32);
5985 if (ret != BCME_OK) {
5986 WL_ERR(("unable to process svc_spec_info: %d\n", ret));
5987 goto fail;
5988 }
5989 datareq->flags |= WL_NAN_DP_FLAG_SVC_INFO;
5990 }
5991
5992 /* Security elements */
5993
5994 if (cmd_data->csid) {
5995 WL_TRACE(("Cipher suite type is present, pack it\n"));
5996 ret = bcm_pack_xtlv_entry(
5997 &pxtlv, &nan_buf_size, WL_NAN_XTLV_CFG_SEC_CSID,
5998 sizeof(nan_sec_csid_e), (uint8 *)&cmd_data->csid,
5999 BCM_XTLV_OPTION_ALIGN32);
6000 if (unlikely(ret)) {
6001 WL_ERR(("%s: fail to pack on csid\n", __FUNCTION__));
6002 goto fail;
6003 }
6004 }
6005
6006 if (cmd_data->ndp_cfg.security_cfg) {
6007 if ((cmd_data->key_type == NAN_SECURITY_KEY_INPUT_PMK) ||
6008 (cmd_data->key_type == NAN_SECURITY_KEY_INPUT_PASSPHRASE)) {
6009 if (cmd_data->key.data && cmd_data->key.dlen) {
6010 WL_TRACE(("optional pmk present, pack it\n"));
6011 ret = bcm_pack_xtlv_entry(
6012 &pxtlv, &nan_buf_size, WL_NAN_XTLV_CFG_SEC_PMK,
6013 cmd_data->key.dlen, cmd_data->key.data,
6014 BCM_XTLV_OPTION_ALIGN32);
6015 if (unlikely(ret)) {
6016 WL_ERR(("%s: fail to pack on WL_NAN_XTLV_CFG_SEC_PMK\n",
6017 __FUNCTION__));
6018 goto fail;
6019 }
6020 }
6021 } else {
6022 WL_ERR(("Invalid security key type\n"));
6023 ret = BCME_BADARG;
6024 goto fail;
6025 }
6026
6027 if ((cmd_data->svc_hash.dlen == WL_NAN_SVC_HASH_LEN) &&
6028 (cmd_data->svc_hash.data)) {
6029 WL_TRACE(("svc hash present, pack it\n"));
6030 ret = bcm_pack_xtlv_entry(
6031 &pxtlv, &nan_buf_size, WL_NAN_XTLV_CFG_SVC_HASH,
6032 WL_NAN_SVC_HASH_LEN, cmd_data->svc_hash.data,
6033 BCM_XTLV_OPTION_ALIGN32);
6034 if (ret != BCME_OK) {
6035 WL_ERR(("%s: fail to pack WL_NAN_XTLV_CFG_SVC_HASH\n",
6036 __FUNCTION__));
6037 goto fail;
6038 }
6039 } else {
6040 #ifdef WL_NAN_DISC_CACHE
6041 /* check in cache */
6042 nan_disc_result_cache *cache;
6043 cache = wl_cfgnan_get_disc_result(cfg, datareq->pub_id,
6044 &datareq->peer_mac);
6045 if (!cache) {
6046 ret = BCME_ERROR;
6047 WL_ERR(("invalid svc hash data or length = %d\n",
6048 cmd_data->svc_hash.dlen));
6049 goto fail;
6050 }
6051 WL_TRACE(("svc hash present, pack it\n"));
6052 ret = bcm_pack_xtlv_entry(
6053 &pxtlv, &nan_buf_size, WL_NAN_XTLV_CFG_SVC_HASH,
6054 WL_NAN_SVC_HASH_LEN, cache->svc_hash, BCM_XTLV_OPTION_ALIGN32);
6055 if (ret != BCME_OK) {
6056 WL_ERR(("%s: fail to pack WL_NAN_XTLV_CFG_SVC_HASH\n",
6057 __FUNCTION__));
6058 goto fail;
6059 }
6060 #else
6061 ret = BCME_ERROR;
6062 WL_ERR(("invalid svc hash data or length = %d\n",
6063 cmd_data->svc_hash.dlen));
6064 goto fail;
6065 #endif /* WL_NAN_DISC_CACHE */
6066 }
6067 /* If the Data req is for secure data connection */
6068 datareq->flags |= WL_NAN_DP_FLAG_SECURITY;
6069 }
6070
6071 sub_cmd->len += (buflen_avail - nan_buf_size);
6072 nan_buf->is_set = false;
6073 nan_buf->count++;
6074
6075 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, data_size,
6076 &(cmd_data->status), resp_buf,
6077 data_size + NAN_IOVAR_NAME_SIZE);
6078 if (unlikely(ret) || unlikely(cmd_data->status)) {
6079 WL_ERR(("nan data path request handler failed, ret = %d status %d\n",
6080 ret, cmd_data->status));
6081 goto fail;
6082 }
6083
6084 /* check the response buff */
6085 if (ret == BCME_OK) {
6086 ret = process_resp_buf(resp_buf + WL_NAN_OBUF_DATA_OFFSET,
6087 ndp_instance_id, WL_NAN_CMD_DATA_DATAREQ);
6088 cmd_data->ndp_instance_id = *ndp_instance_id;
6089 }
6090 WL_INFORM_MEM(("[NAN] DP request successfull (ndp_id:%d)\n",
6091 cmd_data->ndp_instance_id));
6092 /* Add peer to data ndp peer list */
6093 wl_cfgnan_data_add_peer(cfg, &datareq->peer_mac);
6094
6095 fail:
6096 if (nan_buf) {
6097 MFREE(cfg->osh, nan_buf, data_size);
6098 }
6099
6100 if (resp_buf) {
6101 MFREE(cfg->osh, resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
6102 }
6103 NAN_MUTEX_UNLOCK();
6104 mutex_unlock(&cfg->if_sync);
6105 NAN_DBG_EXIT();
6106 return ret;
6107 }
6108
wl_cfgnan_data_path_response_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_datapath_cmd_data_t * cmd_data)6109 int wl_cfgnan_data_path_response_handler(struct net_device *ndev,
6110 struct bcm_cfg80211 *cfg,
6111 nan_datapath_cmd_data_t *cmd_data)
6112 {
6113 s32 ret = BCME_OK;
6114 bcm_iov_batch_buf_t *nan_buf = NULL;
6115 wl_nan_dp_resp_t *dataresp = NULL;
6116 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
6117 uint16 buflen_avail;
6118 uint8 *pxtlv;
6119 struct wireless_dev *wdev;
6120 uint16 nan_buf_size;
6121 uint8 *resp_buf = NULL;
6122
6123 /* Considering fixed params */
6124 uint16 data_size =
6125 WL_NAN_OBUF_DATA_OFFSET + OFFSETOF(wl_nan_dp_resp_t, tlv_params);
6126 data_size = ALIGN_SIZE(data_size, 0x4);
6127 ret = wl_cfgnan_aligned_data_size_of_opt_dp_params(&data_size, cmd_data);
6128 if (unlikely(ret)) {
6129 WL_ERR(("Failed to get alligned size of optional params\n"));
6130 goto fail;
6131 }
6132 nan_buf_size = data_size;
6133
6134 NAN_DBG_ENTER();
6135
6136 mutex_lock(&cfg->if_sync);
6137 NAN_MUTEX_LOCK();
6138 #ifdef WL_IFACE_MGMT
6139 if ((ret = wl_cfg80211_handle_if_role_conflict(cfg, WL_IF_TYPE_NAN)) < 0) {
6140 WL_ERR(("Conflicting iface found to be active\n"));
6141 ret = BCME_UNSUPPORTED;
6142 goto fail;
6143 }
6144 #endif /* WL_IFACE_MGMT */
6145
6146 nan_buf = MALLOCZ(cfg->osh, data_size);
6147 if (!nan_buf) {
6148 WL_ERR(("%s: memory allocation failed\n", __func__));
6149 ret = BCME_NOMEM;
6150 goto fail;
6151 }
6152
6153 resp_buf = MALLOCZ(cfg->osh, data_size + NAN_IOVAR_NAME_SIZE);
6154 if (!resp_buf) {
6155 WL_ERR(("%s: memory allocation failed\n", __func__));
6156 ret = BCME_NOMEM;
6157 goto fail;
6158 }
6159
6160 ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg), cfg,
6161 &cmd_data->avail_params, WL_AVAIL_LOCAL);
6162 if (unlikely(ret)) {
6163 WL_ERR(("Failed to set avail value with type local\n"));
6164 goto fail;
6165 }
6166
6167 ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg), cfg,
6168 &cmd_data->avail_params, WL_AVAIL_NDC);
6169 if (unlikely(ret)) {
6170 WL_ERR(("Failed to set avail value with type ndc\n"));
6171 goto fail;
6172 }
6173
6174 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
6175 nan_buf->count = 0;
6176 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
6177
6178 sub_cmd = (bcm_iov_batch_subcmd_t *)(&nan_buf->cmds[0]);
6179 dataresp = (wl_nan_dp_resp_t *)(sub_cmd->data);
6180
6181 /* Setting default data path type to unicast */
6182 dataresp->type = WL_NAN_DP_TYPE_UNICAST;
6183 /* Changing status value as per fw convention */
6184 dataresp->status = cmd_data->rsp_code ^= 1;
6185 dataresp->reason_code = 0;
6186
6187 /* ndp instance id must be from 1 to 255, 0 is reserved */
6188 if (cmd_data->ndp_instance_id < NAN_ID_MIN ||
6189 cmd_data->ndp_instance_id > NAN_ID_MAX) {
6190 WL_ERR(("Invalid ndp instance id: %d\n", cmd_data->ndp_instance_id));
6191 ret = BCME_BADARG;
6192 goto fail;
6193 }
6194 dataresp->ndp_id = cmd_data->ndp_instance_id;
6195
6196 /* Retrieved initiator ndi from NanDataPathRequestInd */
6197 if (!ETHER_ISNULLADDR(&cfg->initiator_ndi.octet)) {
6198 ret = memcpy_s(&dataresp->mac_addr, ETHER_ADDR_LEN, &cfg->initiator_ndi,
6199 ETHER_ADDR_LEN);
6200 if (ret != BCME_OK) {
6201 WL_ERR(("Failed to copy initiator ndi\n"));
6202 goto fail;
6203 }
6204 } else {
6205 WL_ERR(("Invalid ether addr retrieved\n"));
6206 ret = BCME_BADARG;
6207 goto fail;
6208 }
6209
6210 /* Interface is not mandatory, when it is a reject from framework */
6211 if (dataresp->status != WL_NAN_DP_STATUS_REJECTED) {
6212 /* Retrieve mac from given iface name */
6213 wdev =
6214 wl_cfg80211_get_wdev_from_ifname(cfg, (char *)cmd_data->ndp_iface);
6215 if (!wdev || ETHER_ISNULLADDR(wdev->netdev->dev_addr)) {
6216 ret = -EINVAL;
6217 WL_ERR(("Failed to retrieve wdev/dev addr for ndp_iface = %s\n",
6218 (char *)cmd_data->ndp_iface));
6219 goto fail;
6220 }
6221
6222 if (!ETHER_ISNULLADDR(wdev->netdev->dev_addr)) {
6223 ret = memcpy_s(&dataresp->ndi, ETHER_ADDR_LEN,
6224 wdev->netdev->dev_addr, ETHER_ADDR_LEN);
6225 if (ret != BCME_OK) {
6226 WL_ERR(("Failed to copy responder ndi\n"));
6227 goto fail;
6228 }
6229 WL_TRACE(("%s: Retrieved ndi mac " MACDBG "\n", __FUNCTION__,
6230 MAC2STRDBG(dataresp->ndi.octet)));
6231 } else {
6232 WL_ERR(("Invalid NDI addr retrieved\n"));
6233 ret = BCME_BADARG;
6234 goto fail;
6235 }
6236 }
6237
6238 dataresp->ndl_qos.min_slots = NAN_NDL_QOS_MIN_SLOT_NO_PREF;
6239 dataresp->ndl_qos.max_latency = NAN_NDL_QOS_MAX_LAT_NO_PREF;
6240
6241 /* Fill the sub_command block */
6242 sub_cmd->id = htod16(WL_NAN_CMD_DATA_DATARESP);
6243 sub_cmd->len =
6244 sizeof(sub_cmd->u.options) + OFFSETOF(wl_nan_dp_resp_t, tlv_params);
6245 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
6246 pxtlv = (uint8 *)&dataresp->tlv_params;
6247
6248 nan_buf_size -=
6249 (sub_cmd->len + OFFSETOF(bcm_iov_batch_subcmd_t, u.options));
6250 buflen_avail = nan_buf_size;
6251
6252 if (cmd_data->svc_info.data && cmd_data->svc_info.dlen) {
6253 ret = bcm_pack_xtlv_entry(
6254 &pxtlv, &nan_buf_size, WL_NAN_XTLV_SD_SVC_INFO,
6255 cmd_data->svc_info.dlen, cmd_data->svc_info.data,
6256 BCM_XTLV_OPTION_ALIGN32);
6257 if (ret != BCME_OK) {
6258 WL_ERR(("unable to process svc_spec_info: %d\n", ret));
6259 goto fail;
6260 }
6261 dataresp->flags |= WL_NAN_DP_FLAG_SVC_INFO;
6262 }
6263
6264 /* Security elements */
6265 if (cmd_data->csid) {
6266 WL_TRACE(("Cipher suite type is present, pack it\n"));
6267 ret = bcm_pack_xtlv_entry(
6268 &pxtlv, &nan_buf_size, WL_NAN_XTLV_CFG_SEC_CSID,
6269 sizeof(nan_sec_csid_e), (uint8 *)&cmd_data->csid,
6270 BCM_XTLV_OPTION_ALIGN32);
6271 if (unlikely(ret)) {
6272 WL_ERR(("%s: fail to pack csid\n", __FUNCTION__));
6273 goto fail;
6274 }
6275 }
6276
6277 if (cmd_data->ndp_cfg.security_cfg) {
6278 if ((cmd_data->key_type == NAN_SECURITY_KEY_INPUT_PMK) ||
6279 (cmd_data->key_type == NAN_SECURITY_KEY_INPUT_PASSPHRASE)) {
6280 if (cmd_data->key.data && cmd_data->key.dlen) {
6281 WL_TRACE(("optional pmk present, pack it\n"));
6282 ret = bcm_pack_xtlv_entry(
6283 &pxtlv, &nan_buf_size, WL_NAN_XTLV_CFG_SEC_PMK,
6284 cmd_data->key.dlen, cmd_data->key.data,
6285 BCM_XTLV_OPTION_ALIGN32);
6286 if (unlikely(ret)) {
6287 WL_ERR(("%s: fail to pack WL_NAN_XTLV_CFG_SEC_PMK\n",
6288 __FUNCTION__));
6289 goto fail;
6290 }
6291 }
6292 } else {
6293 WL_ERR(("Invalid security key type\n"));
6294 ret = BCME_BADARG;
6295 goto fail;
6296 }
6297
6298 if ((cmd_data->svc_hash.dlen == WL_NAN_SVC_HASH_LEN) &&
6299 (cmd_data->svc_hash.data)) {
6300 WL_TRACE(("svc hash present, pack it\n"));
6301 ret = bcm_pack_xtlv_entry(
6302 &pxtlv, &nan_buf_size, WL_NAN_XTLV_CFG_SVC_HASH,
6303 WL_NAN_SVC_HASH_LEN, cmd_data->svc_hash.data,
6304 BCM_XTLV_OPTION_ALIGN32);
6305 if (ret != BCME_OK) {
6306 WL_ERR(("%s: fail to pack WL_NAN_XTLV_CFG_SVC_HASH\n",
6307 __FUNCTION__));
6308 goto fail;
6309 }
6310 }
6311 /* If the Data resp is for secure data connection */
6312 dataresp->flags |= WL_NAN_DP_FLAG_SECURITY;
6313 }
6314
6315 sub_cmd->len += (buflen_avail - nan_buf_size);
6316
6317 nan_buf->is_set = false;
6318 nan_buf->count++;
6319 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, data_size,
6320 &(cmd_data->status), resp_buf,
6321 data_size + NAN_IOVAR_NAME_SIZE);
6322 if (unlikely(ret) || unlikely(cmd_data->status)) {
6323 WL_ERR(
6324 ("nan data path response handler failed, error = %d, status %d\n",
6325 ret, cmd_data->status));
6326 goto fail;
6327 }
6328
6329 WL_INFORM_MEM(
6330 ("[NAN] DP response successfull (ndp_id:%d)\n", dataresp->ndp_id));
6331
6332 fail:
6333 if (nan_buf) {
6334 MFREE(cfg->osh, nan_buf, data_size);
6335 }
6336
6337 if (resp_buf) {
6338 MFREE(cfg->osh, resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
6339 }
6340 NAN_MUTEX_UNLOCK();
6341 mutex_unlock(&cfg->if_sync);
6342
6343 NAN_DBG_EXIT();
6344 return ret;
6345 }
6346
wl_cfgnan_data_path_end_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_data_path_id ndp_instance_id,int * status)6347 int wl_cfgnan_data_path_end_handler(struct net_device *ndev,
6348 struct bcm_cfg80211 *cfg,
6349 nan_data_path_id ndp_instance_id,
6350 int *status)
6351 {
6352 bcm_iov_batch_buf_t *nan_buf = NULL;
6353 wl_nan_dp_end_t *dataend = NULL;
6354 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
6355 s32 ret = BCME_OK;
6356 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
6357 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
6358
6359 dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
6360
6361 NAN_DBG_ENTER();
6362 NAN_MUTEX_LOCK();
6363
6364 if (!dhdp->up) {
6365 WL_ERR(("bus is already down, hence blocking nan dp end\n"));
6366 ret = BCME_OK;
6367 goto fail;
6368 }
6369
6370 if (!cfg->nan_enable) {
6371 WL_ERR(("nan is not enabled, nan dp end blocked\n"));
6372 ret = BCME_OK;
6373 goto fail;
6374 }
6375
6376 /* ndp instance id must be from 1 to 255, 0 is reserved */
6377 if (ndp_instance_id < NAN_ID_MIN || ndp_instance_id > NAN_ID_MAX) {
6378 WL_ERR(("Invalid ndp instance id: %d\n", ndp_instance_id));
6379 ret = BCME_BADARG;
6380 goto fail;
6381 }
6382
6383 nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
6384 if (!nan_buf) {
6385 WL_ERR(("%s: memory allocation failed\n", __func__));
6386 ret = BCME_NOMEM;
6387 goto fail;
6388 }
6389
6390 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
6391 nan_buf->count = 0;
6392 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
6393
6394 sub_cmd = (bcm_iov_batch_subcmd_t *)(&nan_buf->cmds[0]);
6395 dataend = (wl_nan_dp_end_t *)(sub_cmd->data);
6396
6397 /* Fill sub_cmd block */
6398 sub_cmd->id = htod16(WL_NAN_CMD_DATA_DATAEND);
6399 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*dataend);
6400 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
6401
6402 dataend->lndp_id = ndp_instance_id;
6403
6404 /*
6405 * Currently fw requires ndp_id and reason to end the data path
6406 * But wifi_nan.h takes ndp_instances_count and ndp_id.
6407 * Will keep reason = accept always.
6408 */
6409
6410 dataend->status = 1;
6411
6412 nan_buf->is_set = true;
6413 nan_buf->count++;
6414
6415 nan_buf_size -=
6416 (sub_cmd->len + OFFSETOF(bcm_iov_batch_subcmd_t, u.options));
6417 memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
6418 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, status,
6419 (void *)resp_buf, NAN_IOCTL_BUF_SIZE);
6420 if (unlikely(ret) || unlikely(*status)) {
6421 WL_ERR(("nan data path end handler failed, error = %d status %d\n", ret,
6422 *status));
6423 goto fail;
6424 }
6425 WL_INFORM_MEM(("[NAN] DP end successfull (ndp_id:%d)\n", dataend->lndp_id));
6426 fail:
6427 if (nan_buf) {
6428 MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
6429 }
6430
6431 NAN_MUTEX_UNLOCK();
6432 NAN_DBG_EXIT();
6433 return ret;
6434 }
6435
6436 #ifdef WL_NAN_DISC_CACHE
wl_cfgnan_sec_info_handler(struct bcm_cfg80211 * cfg,nan_datapath_sec_info_cmd_data_t * cmd_data,nan_hal_resp_t * nan_req_resp)6437 int wl_cfgnan_sec_info_handler(struct bcm_cfg80211 *cfg,
6438 nan_datapath_sec_info_cmd_data_t *cmd_data,
6439 nan_hal_resp_t *nan_req_resp)
6440 {
6441 s32 ret = BCME_NOTFOUND;
6442 /* check in cache */
6443 nan_disc_result_cache *disc_cache = NULL;
6444 nan_svc_info_t *svc_info = NULL;
6445
6446 NAN_DBG_ENTER();
6447 NAN_MUTEX_LOCK();
6448
6449 if (!cfg->nan_init_state) {
6450 WL_ERR(("nan is not initialized/nmi doesnt exists\n"));
6451 ret = BCME_NOTENABLED;
6452 goto fail;
6453 }
6454
6455 /* datapath request context */
6456 if (cmd_data->pub_id && !ETHER_ISNULLADDR(&cmd_data->mac_addr)) {
6457 disc_cache = wl_cfgnan_get_disc_result(cfg, cmd_data->pub_id,
6458 &cmd_data->mac_addr);
6459 WL_DBG(("datapath request: PUB ID: = %d\n", cmd_data->pub_id));
6460 if (disc_cache) {
6461 (void)memcpy_s(nan_req_resp->svc_hash, WL_NAN_SVC_HASH_LEN,
6462 disc_cache->svc_hash, WL_NAN_SVC_HASH_LEN);
6463 ret = BCME_OK;
6464 } else {
6465 WL_ERR(("disc_cache is NULL\n"));
6466 goto fail;
6467 }
6468 }
6469
6470 /* datapath response context */
6471 if (cmd_data->ndp_instance_id) {
6472 WL_DBG(
6473 ("datapath response: NDP ID: = %d\n", cmd_data->ndp_instance_id));
6474 svc_info = wl_cfgnan_get_svc_inst(cfg, 0, cmd_data->ndp_instance_id);
6475 /* Note: svc_info will not be present in OOB cases
6476 * In such case send NMI alone and let HAL handle if
6477 * svc_hash is mandatory
6478 */
6479 if (svc_info) {
6480 WL_DBG(("svc hash present, pack it\n"));
6481 (void)memcpy_s(nan_req_resp->svc_hash, WL_NAN_SVC_HASH_LEN,
6482 svc_info->svc_hash, WL_NAN_SVC_HASH_LEN);
6483 } else {
6484 WL_INFORM_MEM(("svc_info not present..assuming OOB DP\n"));
6485 }
6486 /* Always send NMI */
6487 (void)memcpy_s(nan_req_resp->pub_nmi, ETHER_ADDR_LEN, cfg->nan_nmi_mac,
6488 ETHER_ADDR_LEN);
6489 ret = BCME_OK;
6490 }
6491 fail:
6492 NAN_MUTEX_UNLOCK();
6493 NAN_DBG_EXIT();
6494 return ret;
6495 }
6496
wl_nan_cache_to_event_data(nan_disc_result_cache * cache,nan_event_data_t * nan_event_data,osl_t * osh)6497 static s32 wl_nan_cache_to_event_data(nan_disc_result_cache *cache,
6498 nan_event_data_t *nan_event_data,
6499 osl_t *osh)
6500 {
6501 s32 ret = BCME_OK;
6502 NAN_DBG_ENTER();
6503
6504 nan_event_data->pub_id = cache->pub_id;
6505 nan_event_data->sub_id = cache->sub_id;
6506 nan_event_data->publish_rssi = cache->publish_rssi;
6507 nan_event_data->peer_cipher_suite = cache->peer_cipher_suite;
6508 ret = memcpy_s(&nan_event_data->remote_nmi, ETHER_ADDR_LEN, &cache->peer,
6509 ETHER_ADDR_LEN);
6510 if (ret != BCME_OK) {
6511 WL_ERR(("Failed to copy cached peer nan nmi\n"));
6512 goto fail;
6513 }
6514
6515 if (cache->svc_info.dlen && cache->svc_info.data) {
6516 nan_event_data->svc_info.dlen = cache->svc_info.dlen;
6517 nan_event_data->svc_info.data =
6518 MALLOCZ(osh, nan_event_data->svc_info.dlen);
6519 if (!nan_event_data->svc_info.data) {
6520 WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
6521 nan_event_data->svc_info.dlen = 0;
6522 ret = -ENOMEM;
6523 goto fail;
6524 }
6525 ret = memcpy_s(nan_event_data->svc_info.data,
6526 nan_event_data->svc_info.dlen, cache->svc_info.data,
6527 cache->svc_info.dlen);
6528 if (ret != BCME_OK) {
6529 WL_ERR(("Failed to copy cached svc info data\n"));
6530 goto fail;
6531 }
6532 }
6533 if (cache->tx_match_filter.dlen && cache->tx_match_filter.data) {
6534 nan_event_data->tx_match_filter.dlen = cache->tx_match_filter.dlen;
6535 nan_event_data->tx_match_filter.data =
6536 MALLOCZ(osh, nan_event_data->tx_match_filter.dlen);
6537 if (!nan_event_data->tx_match_filter.data) {
6538 WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
6539 nan_event_data->tx_match_filter.dlen = 0;
6540 ret = -ENOMEM;
6541 goto fail;
6542 }
6543 ret =
6544 memcpy_s(nan_event_data->tx_match_filter.data,
6545 nan_event_data->tx_match_filter.dlen,
6546 cache->tx_match_filter.data, cache->tx_match_filter.dlen);
6547 if (ret != BCME_OK) {
6548 WL_ERR(("Failed to copy cached tx match filter data\n"));
6549 goto fail;
6550 }
6551 }
6552 fail:
6553 NAN_DBG_EXIT();
6554 return ret;
6555 }
6556 #endif /* WL_NAN_DISC_CACHE */
6557
6558 /* API to cancel the ranging with peer
6559 * For geofence initiator, suspend ranging.
6560 * for directed RTT initiator , report fail result, cancel ranging
6561 * and clear ranging instance
6562 * For responder, cancel ranging and clear ranging instance
6563 */
6564 #ifdef RTT_SUPPORT
wl_cfgnan_clear_peer_ranging(struct bcm_cfg80211 * cfg,struct ether_addr * peer,int reason)6565 static s32 wl_cfgnan_clear_peer_ranging(struct bcm_cfg80211 *cfg,
6566 struct ether_addr *peer, int reason)
6567 {
6568 uint32 status = 0;
6569 nan_ranging_inst_t *rng_inst = NULL;
6570 int err = BCME_OK;
6571 struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
6572 dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
6573
6574 rng_inst = wl_cfgnan_check_for_ranging(cfg, peer);
6575 if (rng_inst) {
6576 if (rng_inst->range_type == RTT_TYPE_NAN_GEOFENCE) {
6577 err = wl_cfgnan_suspend_geofence_rng_session(ndev, peer, reason, 0);
6578 } else {
6579 if (rng_inst->range_type == RTT_TYPE_NAN_DIRECTED) {
6580 dhd_rtt_handle_nan_rtt_session_end(dhdp, peer);
6581 }
6582 /* responder */
6583 err =
6584 wl_cfgnan_cancel_ranging(ndev, cfg, rng_inst->range_id,
6585 NAN_RNG_TERM_FLAG_IMMEDIATE, &status);
6586 bzero(rng_inst, sizeof(*rng_inst));
6587 }
6588 }
6589
6590 if (err) {
6591 WL_ERR(("Failed to stop ranging with peer %d\n", err));
6592 }
6593
6594 return err;
6595 }
6596 #endif /* RTT_SUPPORT */
6597
wl_nan_dp_cmn_event_data(struct bcm_cfg80211 * cfg,void * event_data,uint16 data_len,uint16 * tlvs_offset,uint16 * nan_opts_len,uint32 event_num,int * hal_event_id,nan_event_data_t * nan_event_data)6598 static s32 wl_nan_dp_cmn_event_data(struct bcm_cfg80211 *cfg, void *event_data,
6599 uint16 data_len, uint16 *tlvs_offset,
6600 uint16 *nan_opts_len, uint32 event_num,
6601 int *hal_event_id,
6602 nan_event_data_t *nan_event_data)
6603 {
6604 s32 ret = BCME_OK;
6605 uint8 i;
6606 wl_nan_ev_datapath_cmn_t *ev_dp;
6607 nan_svc_info_t *svc_info;
6608 bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
6609 #ifdef RTT_SUPPORT
6610 nan_ranging_inst_t *rng_inst = NULL;
6611 #endif /* RTT_SUPPORT */
6612
6613 if (xtlv->id == WL_NAN_XTLV_DATA_DP_INFO) {
6614 ev_dp = (wl_nan_ev_datapath_cmn_t *)xtlv->data;
6615 NAN_DBG_ENTER();
6616
6617 BCM_REFERENCE(svc_info);
6618 BCM_REFERENCE(i);
6619 /* Mapping to common struct between DHD and HAL */
6620 WL_TRACE(("Event type: %d\n", ev_dp->type));
6621 nan_event_data->type = ev_dp->type;
6622 WL_TRACE(("pub_id: %d\n", ev_dp->pub_id));
6623 nan_event_data->pub_id = ev_dp->pub_id;
6624 WL_TRACE(("security: %d\n", ev_dp->security));
6625 nan_event_data->security = ev_dp->security;
6626
6627 /* Store initiator_ndi, required for data_path_response_request */
6628 ret = memcpy_s(&cfg->initiator_ndi, ETHER_ADDR_LEN,
6629 &ev_dp->initiator_ndi, ETHER_ADDR_LEN);
6630 if (ret != BCME_OK) {
6631 WL_ERR(("Failed to copy event's initiator addr\n"));
6632 goto fail;
6633 }
6634 if (ev_dp->type == NAN_DP_SESSION_UNICAST) {
6635 WL_INFORM_MEM(("NDP ID: %d\n", ev_dp->ndp_id));
6636 nan_event_data->ndp_id = ev_dp->ndp_id;
6637 WL_TRACE(("INITIATOR_NDI: " MACDBG "\n",
6638 MAC2STRDBG(ev_dp->initiator_ndi.octet)));
6639 WL_TRACE(("RESPONDOR_NDI: " MACDBG "\n",
6640 MAC2STRDBG(ev_dp->responder_ndi.octet)));
6641 WL_TRACE(
6642 ("PEER NMI: " MACDBG "\n", MAC2STRDBG(ev_dp->peer_nmi.octet)));
6643 ret = memcpy_s(&nan_event_data->remote_nmi, ETHER_ADDR_LEN,
6644 &ev_dp->peer_nmi, ETHER_ADDR_LEN);
6645 if (ret != BCME_OK) {
6646 WL_ERR(("Failed to copy event's peer nmi\n"));
6647 goto fail;
6648 }
6649 } else {
6650 /* type is multicast */
6651 WL_INFORM_MEM(("NDP ID: %d\n", ev_dp->mc_id));
6652 nan_event_data->ndp_id = ev_dp->mc_id;
6653 WL_TRACE(
6654 ("PEER NMI: " MACDBG "\n", MAC2STRDBG(ev_dp->peer_nmi.octet)));
6655 ret = memcpy_s(&nan_event_data->remote_nmi, ETHER_ADDR_LEN,
6656 &ev_dp->peer_nmi, ETHER_ADDR_LEN);
6657 if (ret != BCME_OK) {
6658 WL_ERR(("Failed to copy event's peer nmi\n"));
6659 goto fail;
6660 }
6661 }
6662 *tlvs_offset = OFFSETOF(wl_nan_ev_datapath_cmn_t, opt_tlvs) +
6663 OFFSETOF(bcm_xtlv_t, data);
6664 *nan_opts_len = data_len - *tlvs_offset;
6665 if (event_num == WL_NAN_EVENT_PEER_DATAPATH_IND) {
6666 *hal_event_id = GOOGLE_NAN_EVENT_DATA_REQUEST;
6667 #ifdef WL_NAN_DISC_CACHE
6668 svc_info = wl_cfgnan_get_svc_inst(cfg, nan_event_data->pub_id, 0);
6669 if (svc_info) {
6670 for (i = 0; i < NAN_MAX_SVC_INST; i++) {
6671 if (!svc_info->ndp_id[i]) {
6672 WL_TRACE(("Found empty field\n"));
6673 break;
6674 }
6675 }
6676 if (i == NAN_MAX_SVC_INST) {
6677 WL_ERR(("%s:cannot accommadate ndp id\n", __FUNCTION__));
6678 ret = BCME_NORESOURCE;
6679 goto fail;
6680 }
6681 svc_info->ndp_id[i] = nan_event_data->ndp_id;
6682 /* Add peer to data ndp peer list */
6683 wl_cfgnan_data_add_peer(cfg, &ev_dp->peer_nmi);
6684 #ifdef RTT_SUPPORT
6685 /* cancel any ongoing RTT session with peer
6686 * as we donot support DP and RNG to same peer
6687 */
6688 wl_cfgnan_clear_peer_ranging(cfg, &ev_dp->peer_nmi,
6689 RTT_GEO_SUSPN_PEER_NDP_TRIGGER);
6690 #endif /* RTT_SUPPORT */
6691 ret = BCME_OK;
6692 }
6693 #endif /* WL_NAN_DISC_CACHE */
6694 } else if (event_num == WL_NAN_EVENT_DATAPATH_ESTB) {
6695 *hal_event_id = GOOGLE_NAN_EVENT_DATA_CONFIRMATION;
6696 if (ev_dp->role == NAN_DP_ROLE_INITIATOR) {
6697 ret = memcpy_s(&nan_event_data->responder_ndi, ETHER_ADDR_LEN,
6698 &ev_dp->responder_ndi, ETHER_ADDR_LEN);
6699 if (ret != BCME_OK) {
6700 WL_ERR(("Failed to copy event's responder ndi\n"));
6701 goto fail;
6702 }
6703 WL_TRACE(("REMOTE_NDI: " MACDBG "\n",
6704 MAC2STRDBG(ev_dp->responder_ndi.octet)));
6705 WL_TRACE(("Initiator status %d\n", nan_event_data->status));
6706 } else {
6707 ret = memcpy_s(&nan_event_data->responder_ndi, ETHER_ADDR_LEN,
6708 &ev_dp->initiator_ndi, ETHER_ADDR_LEN);
6709 if (ret != BCME_OK) {
6710 WL_ERR(("Failed to copy event's responder ndi\n"));
6711 goto fail;
6712 }
6713 WL_TRACE(("REMOTE_NDI: " MACDBG "\n",
6714 MAC2STRDBG(ev_dp->initiator_ndi.octet)));
6715 }
6716 if (ev_dp->status == NAN_NDP_STATUS_ACCEPT) {
6717 nan_event_data->status = NAN_DP_REQUEST_ACCEPT;
6718 wl_cfgnan_data_set_peer_dp_state(cfg, &ev_dp->peer_nmi,
6719 NAN_PEER_DP_CONNECTED);
6720 wl_cfgnan_update_dp_info(cfg, true, nan_event_data->ndp_id);
6721 } else if (ev_dp->status == NAN_NDP_STATUS_REJECT) {
6722 nan_event_data->status = NAN_DP_REQUEST_REJECT;
6723 /* Remove peer from data ndp peer list */
6724 wl_cfgnan_data_remove_peer(cfg, &ev_dp->peer_nmi);
6725 #ifdef RTT_SUPPORT
6726 rng_inst = wl_cfgnan_check_for_ranging(cfg, &ev_dp->peer_nmi);
6727 if (rng_inst) {
6728 /* Trigger/Reset geofence RTT */
6729 wl_cfgnan_reset_geofence_ranging(cfg, rng_inst,
6730 RTT_SCHED_DP_REJECTED);
6731 }
6732 #endif /* RTT_SUPPORT */
6733 } else {
6734 WL_ERR(("%s:Status code = %x not expected\n", __FUNCTION__,
6735 ev_dp->status));
6736 ret = BCME_ERROR;
6737 goto fail;
6738 }
6739 WL_TRACE(("Responder status %d\n", nan_event_data->status));
6740 } else if (event_num == WL_NAN_EVENT_DATAPATH_END) {
6741 /* Mapping to common struct between DHD and HAL */
6742 *hal_event_id = GOOGLE_NAN_EVENT_DATA_END;
6743 #ifdef WL_NAN_DISC_CACHE
6744 if (ev_dp->role != NAN_DP_ROLE_INITIATOR) {
6745 /* Only at Responder side,
6746 * If dp is ended,
6747 * clear the resp ndp id from the svc info cache
6748 */
6749 svc_info =
6750 wl_cfgnan_get_svc_inst(cfg, 0, nan_event_data->ndp_id);
6751 if (svc_info) {
6752 for (i = 0; i < NAN_MAX_SVC_INST; i++) {
6753 if (svc_info->ndp_id[i] == nan_event_data->ndp_id) {
6754 svc_info->ndp_id[i] = 0;
6755 }
6756 }
6757 } else {
6758 WL_DBG(("couldn't find entry for ndp id = %d\n",
6759 nan_event_data->ndp_id));
6760 }
6761 }
6762 #endif /* WL_NAN_DISC_CACHE */
6763 /* Remove peer from data ndp peer list */
6764 wl_cfgnan_data_remove_peer(cfg, &ev_dp->peer_nmi);
6765 wl_cfgnan_update_dp_info(cfg, false, nan_event_data->ndp_id);
6766 #ifdef RTT_SUPPORT
6767 WL_INFORM_MEM(("DP_END for REMOTE_NMI: " MACDBG "\n",
6768 MAC2STRDBG(&ev_dp->peer_nmi)));
6769 rng_inst = wl_cfgnan_check_for_ranging(cfg, &ev_dp->peer_nmi);
6770 if (rng_inst) {
6771 /* Trigger/Reset geofence RTT */
6772 WL_INFORM_MEM(("sched geofence rtt from DP_END ctx: " MACDBG
6773 "\n",
6774 MAC2STRDBG(&rng_inst->peer_addr)));
6775 wl_cfgnan_reset_geofence_ranging(cfg, rng_inst,
6776 RTT_SCHED_DP_END);
6777 }
6778 #endif /* RTT_SUPPORT */
6779 }
6780 } else {
6781 /* Follow though, not handling other IDs as of now */
6782 WL_DBG(("%s:ID = 0x%02x not supported\n", __FUNCTION__, xtlv->id));
6783 }
6784 fail:
6785 NAN_DBG_EXIT();
6786 return ret;
6787 }
6788 #define IN_GEOFENCE(ingress, egress, distance) \
6789 (((distance) <= (ingress)) && ((distance) >= (egress)))
6790 #define IS_INGRESS_VAL(ingress, distance) ((distance) < (ingress))
6791 #define IS_EGRESS_VAL(egress, distance) ((distance) > (egress))
6792
wl_cfgnan_check_ranging_cond(nan_svc_info_t * svc_info,uint32 distance,uint8 * ranging_ind,uint32 prev_distance)6793 static bool wl_cfgnan_check_ranging_cond(nan_svc_info_t *svc_info,
6794 uint32 distance, uint8 *ranging_ind,
6795 uint32 prev_distance)
6796 {
6797 uint8 svc_ind = svc_info->ranging_ind;
6798 bool notify = FALSE;
6799 bool range_rep_ev_once =
6800 !!(svc_info->svc_range_status & SVC_RANGE_REP_EVENT_ONCE);
6801 uint32 ingress_limit = svc_info->ingress_limit;
6802 uint32 egress_limit = svc_info->egress_limit;
6803
6804 WL_DBG(("wl_cfgnan_check_ranging_cond: Checking the svc ranging cnd %d"
6805 " distance %d prev_distance %d, range_rep_ev_once %d\n",
6806 svc_ind, distance, prev_distance, range_rep_ev_once));
6807 WL_DBG(("wl_cfgnan_check_ranging_cond: Checking the SVC ingress and"
6808 " egress limits %d %d\n",
6809 ingress_limit, egress_limit));
6810 if (svc_ind & NAN_RANGE_INDICATION_CONT) {
6811 *ranging_ind = NAN_RANGE_INDICATION_CONT;
6812 notify = TRUE;
6813 WL_ERR(("\n%s :Svc has continous Ind %d\n", __FUNCTION__, __LINE__));
6814 goto done;
6815 }
6816 if (svc_ind ==
6817 (NAN_RANGE_INDICATION_INGRESS | NAN_RANGE_INDICATION_EGRESS)) {
6818 if (IN_GEOFENCE(ingress_limit, egress_limit, distance)) {
6819 /* if not already in geofence */
6820 if ((range_rep_ev_once == FALSE) ||
6821 (!IN_GEOFENCE(ingress_limit, egress_limit, prev_distance))) {
6822 notify = TRUE;
6823 if (distance < ingress_limit) {
6824 *ranging_ind = NAN_RANGE_INDICATION_INGRESS;
6825 } else {
6826 *ranging_ind = NAN_RANGE_INDICATION_EGRESS;
6827 }
6828 WL_ERR(("\n%s :Svc has geofence Ind %d res_ind %d\n",
6829 __FUNCTION__, __LINE__, *ranging_ind));
6830 }
6831 }
6832 goto done;
6833 }
6834
6835 if (svc_ind == NAN_RANGE_INDICATION_INGRESS) {
6836 if (IS_INGRESS_VAL(ingress_limit, distance)) {
6837 if ((range_rep_ev_once == FALSE) ||
6838 (prev_distance == INVALID_DISTANCE) ||
6839 !IS_INGRESS_VAL(ingress_limit, prev_distance)) {
6840 notify = TRUE;
6841 *ranging_ind = NAN_RANGE_INDICATION_INGRESS;
6842 WL_ERR(
6843 ("\n%s :Svc has ingress Ind %d\n", __FUNCTION__, __LINE__));
6844 }
6845 }
6846 goto done;
6847 }
6848 if (svc_ind == NAN_RANGE_INDICATION_EGRESS) {
6849 if (IS_EGRESS_VAL(egress_limit, distance)) {
6850 if ((range_rep_ev_once == FALSE) ||
6851 (prev_distance == INVALID_DISTANCE) ||
6852 !IS_EGRESS_VAL(egress_limit, prev_distance)) {
6853 notify = TRUE;
6854 *ranging_ind = NAN_RANGE_INDICATION_EGRESS;
6855 WL_ERR(
6856 ("\n%s :Svc has egress Ind %d\n", __FUNCTION__, __LINE__));
6857 }
6858 }
6859 goto done;
6860 }
6861 done:
6862 svc_info->svc_range_status |= SVC_RANGE_REP_EVENT_ONCE;
6863 return notify;
6864 }
6865
wl_cfgnan_event_disc_result(struct bcm_cfg80211 * cfg,nan_event_data_t * nan_event_data)6866 static int wl_cfgnan_event_disc_result(struct bcm_cfg80211 *cfg,
6867 nan_event_data_t *nan_event_data)
6868 {
6869 int ret = BCME_OK;
6870 #if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || \
6871 defined(WL_VENDOR_EXT_SUPPORT)
6872 ret = wl_cfgvendor_send_nan_event(
6873 cfg->wdev->wiphy, bcmcfg_to_prmry_ndev(cfg),
6874 GOOGLE_NAN_EVENT_SUBSCRIBE_MATCH, nan_event_data);
6875 if (ret != BCME_OK) {
6876 WL_ERR(("Failed to send event to nan hal\n"));
6877 }
6878 #endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || \
6879 defined(WL_VENDOR_EXT_SUPPORT) */
6880 return ret;
6881 }
6882
wl_cfgnan_notify_disc_with_ranging(struct bcm_cfg80211 * cfg,nan_ranging_inst_t * rng_inst,nan_event_data_t * nan_event_data,uint32 distance)6883 static int32 wl_cfgnan_notify_disc_with_ranging(
6884 struct bcm_cfg80211 *cfg, nan_ranging_inst_t *rng_inst,
6885 nan_event_data_t *nan_event_data, uint32 distance)
6886 {
6887 nan_svc_info_t *svc_info;
6888 bool notify_svc = FALSE;
6889 nan_disc_result_cache *disc_res = cfg->nan_disc_cache;
6890 uint8 ranging_ind = 0;
6891 int ret = BCME_OK;
6892 int i = 0, j = 0;
6893
6894 for (i = 0; i < MAX_SUBSCRIBES; i++) {
6895 svc_info = rng_inst->svc_idx[i];
6896 if (svc_info) {
6897 if (nan_event_data->ranging_result_present) {
6898 notify_svc = wl_cfgnan_check_ranging_cond(
6899 svc_info, distance, &ranging_ind,
6900 rng_inst->prev_distance_mm);
6901 nan_event_data->ranging_ind = ranging_ind;
6902 } else {
6903 /* Report only if ranging was needed */
6904 notify_svc = svc_info->ranging_required;
6905 }
6906 WL_DBG(("wl_cfgnan_notify_disc_with_ranging: Ranging notify for"
6907 " svc_id %d, notify %d and ind %d\n",
6908 svc_info->svc_id, notify_svc, ranging_ind));
6909 } else {
6910 continue;
6911 }
6912 if (notify_svc) {
6913 for (j = 0; j < NAN_MAX_CACHE_DISC_RESULT; j++) {
6914 if (!memcmp(&disc_res[j].peer, &(rng_inst->peer_addr),
6915 ETHER_ADDR_LEN) &&
6916 (svc_info->svc_id == disc_res[j].sub_id)) {
6917 ret = wl_nan_cache_to_event_data(&disc_res[j],
6918 nan_event_data, cfg->osh);
6919 ret = wl_cfgnan_event_disc_result(cfg, nan_event_data);
6920 /* If its not match once, clear it as the FW indicates
6921 * again.
6922 */
6923 if (!(svc_info->flags & WL_NAN_MATCH_ONCE)) {
6924 wl_cfgnan_remove_disc_result(cfg, svc_info->svc_id);
6925 }
6926 }
6927 }
6928 }
6929 }
6930 WL_DBG(("notify_disc_with_ranging done ret %d\n", ret));
6931 return ret;
6932 }
6933
6934 #ifdef RTT_SUPPORT
wl_cfgnan_handle_directed_rtt_report(struct bcm_cfg80211 * cfg,nan_ranging_inst_t * rng_inst,uint8 rng_id)6935 static int32 wl_cfgnan_handle_directed_rtt_report(struct bcm_cfg80211 *cfg,
6936 nan_ranging_inst_t *rng_inst,
6937 uint8 rng_id)
6938 {
6939 int ret = BCME_OK;
6940 uint32 status;
6941 dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
6942
6943 ret = wl_cfgnan_cancel_ranging(bcmcfg_to_prmry_ndev(cfg), cfg, rng_id,
6944 NAN_RNG_TERM_FLAG_IMMEDIATE, &status);
6945 if (unlikely(ret) || unlikely(status)) {
6946 WL_ERR(("%s:nan range cancel failed ret = %d status = %d\n",
6947 __FUNCTION__, ret, status));
6948 }
6949 dhd_rtt_handle_nan_rtt_session_end(dhd, &rng_inst->peer_addr);
6950
6951 wl_cfgnan_reset_geofence_ranging(cfg, rng_inst, RTT_SCHED_RNG_RPT_DIRECTED);
6952
6953 WL_DBG(("Ongoing ranging session is cancelled \n"));
6954 return ret;
6955 }
6956 #endif /* RTT_SUPPORT */
6957
6958 static void
wl_cfgnan_disc_result_on_geofence_cancel(struct bcm_cfg80211 * cfg,nan_ranging_inst_t * rng_inst)6959 wl_cfgnan_disc_result_on_geofence_cancel(struct bcm_cfg80211 *cfg,
6960 nan_ranging_inst_t *rng_inst)
6961 {
6962 nan_event_data_t *nan_event_data = NULL;
6963
6964 nan_event_data = MALLOCZ(cfg->osh, sizeof(*nan_event_data));
6965 if (!nan_event_data) {
6966 WL_ERR(("%s: memory allocation failed\n", __func__));
6967 goto exit;
6968 }
6969
6970 wl_cfgnan_notify_disc_with_ranging(cfg, rng_inst, nan_event_data, 0);
6971
6972 exit:
6973 wl_cfgnan_clear_nan_event_data(cfg, nan_event_data);
6974
6975 return;
6976 }
6977
6978 #ifdef RTT_SUPPORT
wl_cfgnan_process_range_report(struct bcm_cfg80211 * cfg,wl_nan_ev_rng_rpt_ind_t * range_res)6979 void wl_cfgnan_process_range_report(struct bcm_cfg80211 *cfg,
6980 wl_nan_ev_rng_rpt_ind_t *range_res)
6981 {
6982 nan_ranging_inst_t *rng_inst = NULL;
6983 nan_event_data_t nan_event_data;
6984 dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
6985 rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
6986
6987 UNUSED_PARAMETER(nan_event_data);
6988 rng_inst = wl_cfgnan_check_for_ranging(cfg, &range_res->peer_m_addr);
6989 if (!rng_inst) {
6990 WL_ERR(("wl_cfgnan_process_range_report: No ranging instance "
6991 "but received RNG RPT event..check \n"));
6992 goto exit;
6993 }
6994 #ifdef NAN_RTT_DBG
6995 DUMP_NAN_RTT_INST(rng_inst);
6996 DUMP_NAN_RTT_RPT(range_res);
6997 #endif // endif
6998 range_res->rng_id = rng_inst->range_id;
6999 bzero(&nan_event_data, sizeof(nan_event_data));
7000 nan_event_data.ranging_result_present = 1;
7001 nan_event_data.range_measurement_cm = range_res->dist_mm;
7002 (void)memcpy_s(&nan_event_data.remote_nmi, ETHER_ADDR_LEN,
7003 &range_res->peer_m_addr, ETHER_ADDR_LEN);
7004 nan_event_data.ranging_ind = range_res->indication;
7005 if (rng_inst->range_type == RTT_TYPE_NAN_GEOFENCE) {
7006 /* check in cache and event match to host */
7007 wl_cfgnan_notify_disc_with_ranging(cfg, rng_inst, &nan_event_data,
7008 range_res->dist_mm);
7009 rng_inst->prev_distance_mm = range_res->dist_mm;
7010 /* Reset resp reject count on valid measurement */
7011 rng_inst->geof_retry_count = 0;
7012 #ifdef RTT_GEOFENCE_INTERVAL
7013 if (rtt_status->geofence_cfg.geofence_rtt_interval < 0) {
7014 ; /* Do Nothing */
7015 } else
7016 #endif /* RTT_GEOFENCE_INTERVAL */
7017 {
7018 wl_cfgnan_suspend_geofence_rng_session(
7019 bcmcfg_to_prmry_ndev(cfg), &rng_inst->peer_addr,
7020 RTT_GEO_SUSPN_RANGE_RES_REPORTED, 0);
7021 GEOFENCE_RTT_LOCK(rtt_status);
7022 dhd_rtt_move_geofence_cur_target_idx_to_next(dhd);
7023 GEOFENCE_RTT_UNLOCK(rtt_status);
7024 wl_cfgnan_reset_geofence_ranging(cfg, rng_inst,
7025 RTT_SCHED_RNG_RPT_GEOFENCE);
7026 }
7027 } else if (rng_inst->range_type == RTT_TYPE_NAN_DIRECTED) {
7028 wl_cfgnan_handle_directed_rtt_report(cfg, rng_inst, range_res->rng_id);
7029 }
7030
7031 exit:
7032 return;
7033 }
7034 #endif /* RTT_SUPPORT */
7035
wl_nan_print_status(wl_nan_conf_status_t * nstatus)7036 static void wl_nan_print_status(wl_nan_conf_status_t *nstatus)
7037 {
7038 printf("> enabled: %d\n", nstatus->enabled);
7039 printf("> Current NMI: " MACDBG "\n", MAC2STRDBG(nstatus->nmi.octet));
7040 printf("> Current cluster_id: " MACDBG "\n",
7041 MAC2STRDBG(nstatus->cid.octet));
7042
7043 switch (nstatus->role) {
7044 case WL_NAN_ROLE_AUTO:
7045 printf("> role: %s (%d)\n", "auto", nstatus->role);
7046 break;
7047 case WL_NAN_ROLE_NON_MASTER_NON_SYNC:
7048 printf("> role: %s (%d)\n", "non-master-non-sync", nstatus->role);
7049 break;
7050 case WL_NAN_ROLE_NON_MASTER_SYNC:
7051 printf("> role: %s (%d)\n", "non-master-sync", nstatus->role);
7052 break;
7053 case WL_NAN_ROLE_MASTER:
7054 printf("> role: %s (%d)\n", "master", nstatus->role);
7055 break;
7056 case WL_NAN_ROLE_ANCHOR_MASTER:
7057 printf("> role: %s (%d)\n", "anchor-master", nstatus->role);
7058 break;
7059 default:
7060 printf("> role: %s (%d)\n", "undefined", nstatus->role);
7061 break;
7062 }
7063
7064 printf("> social channels: %d, %d\n", nstatus->social_chans[0],
7065 nstatus->social_chans[1]);
7066 printf("> master_rank: " NMRSTR "\n", NMR2STR(nstatus->mr));
7067 printf("> amr : " NMRSTR "\n", NMR2STR(nstatus->amr));
7068 printf("> hop_count: %d\n", nstatus->hop_count);
7069 printf("> ambtt: %d\n", nstatus->ambtt);
7070 }
7071
wl_cfgnan_clear_nan_event_data(struct bcm_cfg80211 * cfg,nan_event_data_t * nan_event_data)7072 static void wl_cfgnan_clear_nan_event_data(struct bcm_cfg80211 *cfg,
7073 nan_event_data_t *nan_event_data)
7074 {
7075 if (nan_event_data) {
7076 if (nan_event_data->tx_match_filter.data) {
7077 MFREE(cfg->osh, nan_event_data->tx_match_filter.data,
7078 nan_event_data->tx_match_filter.dlen);
7079 nan_event_data->tx_match_filter.data = NULL;
7080 }
7081 if (nan_event_data->rx_match_filter.data) {
7082 MFREE(cfg->osh, nan_event_data->rx_match_filter.data,
7083 nan_event_data->rx_match_filter.dlen);
7084 nan_event_data->rx_match_filter.data = NULL;
7085 }
7086 if (nan_event_data->svc_info.data) {
7087 MFREE(cfg->osh, nan_event_data->svc_info.data,
7088 nan_event_data->svc_info.dlen);
7089 nan_event_data->svc_info.data = NULL;
7090 }
7091 if (nan_event_data->sde_svc_info.data) {
7092 MFREE(cfg->osh, nan_event_data->sde_svc_info.data,
7093 nan_event_data->sde_svc_info.dlen);
7094 nan_event_data->sde_svc_info.data = NULL;
7095 }
7096 MFREE(cfg->osh, nan_event_data, sizeof(*nan_event_data));
7097 }
7098 }
7099
7100 #ifdef RTT_SUPPORT
7101 /*
7102 * Triggers rtt work thread
7103 * if geofence rtt pending,
7104 * clears ranging instance
7105 * otherwise
7106 */
wl_cfgnan_reset_geofence_ranging(struct bcm_cfg80211 * cfg,nan_ranging_inst_t * rng_inst,int sched_reason)7107 void wl_cfgnan_reset_geofence_ranging(struct bcm_cfg80211 *cfg,
7108 nan_ranging_inst_t *rng_inst,
7109 int sched_reason)
7110 {
7111 dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
7112 u8 rtt_invalid_reason = RTT_STATE_VALID;
7113 rtt_geofence_target_info_t *geofence_target = NULL;
7114 rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
7115 int8 cur_idx = DHD_RTT_INVALID_TARGET_INDEX;
7116 int8 index = DHD_RTT_INVALID_TARGET_INDEX;
7117 bool geofence_state = dhd_rtt_get_geofence_rtt_state(dhd);
7118 bool retry = FALSE;
7119
7120 WL_INFORM_MEM(
7121 ("wl_cfgnan_reset_geofence_ranging, sched_reason = %d, cur_idx = %d, "
7122 "geofence_interval = %d\n",
7123 sched_reason, rtt_status->geofence_cfg.cur_target_idx,
7124 rtt_status->geofence_cfg.geofence_rtt_interval));
7125 cur_idx = dhd_rtt_get_geofence_cur_target_idx(dhd);
7126 if (cur_idx == -1) {
7127 WL_INFORM_MEM(("wl_cfgnan_reset_geofence_ranging, "
7128 "Removing Ranging Instance " MACDBG "\n",
7129 MAC2STRDBG(&(rng_inst->peer_addr))));
7130 bzero(rng_inst, sizeof(*rng_inst));
7131 /* Cancel pending retry timer if any */
7132 if (delayed_work_pending(&rtt_status->rtt_retry_timer)) {
7133 cancel_delayed_work(&rtt_status->rtt_retry_timer);
7134 }
7135 goto exit;
7136 }
7137
7138 /* Get current geofencing target */
7139 geofence_target = dhd_rtt_get_geofence_current_target(dhd);
7140
7141 /* get target index for cur ranging inst */
7142 dhd_rtt_get_geofence_target(dhd, &rng_inst->peer_addr, &index);
7143 if ((sched_reason == RTT_SCHED_RTT_RETRY_GEOFENCE) &&
7144 (rng_inst->range_status == NAN_RANGING_IN_PROGRESS)) {
7145 /* if we are already inprogress with peer
7146 * (responder or directed RTT initiator)
7147 * retyr later if sched_reason = timeout
7148 */
7149 retry = TRUE;
7150 } else if (cur_idx == index) {
7151 /* Reset incoming Ranging instance */
7152 rng_inst->range_type = RTT_TYPE_NAN_GEOFENCE;
7153 rng_inst->range_status = NAN_RANGING_REQUIRED;
7154 rng_inst->range_role = NAN_RANGING_ROLE_INITIATOR;
7155 if ((sched_reason != RTT_SCHED_RNG_RPT_GEOFENCE) &&
7156 (sched_reason != RTT_SCHED_RTT_RETRY_GEOFENCE)) {
7157 rng_inst->prev_distance_mm = INVALID_DISTANCE;
7158 }
7159 } else {
7160 if (index == DHD_RTT_INVALID_TARGET_INDEX) {
7161 /* Remove incoming Ranging instance */
7162 WL_INFORM_MEM(("Removing Ranging Instance " MACDBG "\n",
7163 MAC2STRDBG(&(rng_inst->peer_addr))));
7164 bzero(rng_inst, sizeof(*rng_inst));
7165 } else {
7166 /* Reset incoming Ranging instance */
7167 rng_inst->range_type = RTT_TYPE_NAN_GEOFENCE;
7168 rng_inst->range_status = NAN_RANGING_REQUIRED;
7169 rng_inst->range_role = NAN_RANGING_ROLE_INITIATOR;
7170 if ((sched_reason != RTT_SCHED_RNG_RPT_GEOFENCE) &&
7171 (sched_reason != RTT_SCHED_RTT_RETRY_GEOFENCE)) {
7172 rng_inst->prev_distance_mm = INVALID_DISTANCE;
7173 }
7174 }
7175 /* Create range inst if not present and reset explicitly */
7176 rng_inst = wl_cfgnan_get_ranging_inst(cfg, &geofence_target->peer_addr,
7177 NAN_RANGING_ROLE_INITIATOR);
7178 }
7179
7180 /* Avoid schedule if
7181 * already geofence running
7182 * or Directed RTT in progress
7183 * or Invalid RTT state like
7184 * NDP with Peer
7185 */
7186 if ((geofence_state == TRUE) || (!RTT_IS_STOPPED(rtt_status)) ||
7187 (rtt_invalid_reason != RTT_STATE_VALID)) {
7188 /* Not in valid RTT state, avoid schedule */
7189 goto exit;
7190 }
7191
7192 if ((cur_idx == 0) && ((sched_reason == RTT_SCHED_RNG_RPT_GEOFENCE) ||
7193 (sched_reason == RTT_SCHED_RNG_TERM))) {
7194 /* First Target again after all done, retry over a timer */
7195 retry = TRUE;
7196 }
7197
7198 if (retry && (rtt_status->geofence_cfg.geofence_rtt_interval >= 0)) {
7199 /* Move to first target and retry over a timer */
7200 WL_DBG(("Retry over a timer, cur_idx = %d\n",
7201 rtt_status->geofence_cfg.cur_target_idx));
7202 /* schedule proxd retry timer */
7203 schedule_delayed_work(
7204 &rtt_status->rtt_retry_timer,
7205 msecs_to_jiffies(rtt_status->geofence_cfg.geofence_rtt_interval));
7206 goto exit;
7207 }
7208
7209 /* schedule RTT */
7210 dhd_rtt_schedule_rtt_work_thread(dhd, sched_reason);
7211
7212 exit:
7213 return;
7214 }
7215
wl_check_range_role_concurrency(dhd_pub_t * dhd,nan_ranging_inst_t * rng_inst)7216 static bool wl_check_range_role_concurrency(dhd_pub_t *dhd,
7217 nan_ranging_inst_t *rng_inst)
7218 {
7219 ASSERT(rng_inst);
7220 if ((dhd_rtt_get_role_concurrency_state(dhd) == TRUE) &&
7221 (rng_inst->num_svc_ctx > 0)) {
7222 return TRUE;
7223 } else {
7224 return FALSE;
7225 }
7226 }
7227
7228 static void
wl_cfgnan_resolve_ranging_role_concurrecny(dhd_pub_t * dhd,nan_ranging_inst_t * rng_inst)7229 wl_cfgnan_resolve_ranging_role_concurrecny(dhd_pub_t *dhd,
7230 nan_ranging_inst_t *rng_inst)
7231 {
7232 /* Update rang_inst to initiator and resolve role concurrency */
7233 rng_inst->range_role = NAN_RANGING_ROLE_INITIATOR;
7234 dhd_rtt_set_role_concurrency_state(dhd, FALSE);
7235 }
7236 #endif /* RTT_SUPPORT */
7237
wl_cfgnan_geofence_retry_check(nan_ranging_inst_t * rng_inst,uint8 reason_code)7238 static bool wl_cfgnan_geofence_retry_check(nan_ranging_inst_t *rng_inst,
7239 uint8 reason_code)
7240 {
7241 bool geof_retry = FALSE;
7242
7243 switch (reason_code) {
7244 case NAN_RNG_TERM_IDLE_TIMEOUT:
7245 /* Fallthrough: Keep adding more reason code if needed */
7246 case NAN_RNG_TERM_RNG_RESP_TIMEOUT:
7247 case NAN_RNG_TERM_RNG_RESP_REJ:
7248 case NAN_RNG_TERM_RNG_TXS_FAIL:
7249 if (rng_inst->geof_retry_count < NAN_RNG_GEOFENCE_MAX_RETRY_CNT) {
7250 rng_inst->geof_retry_count++;
7251 geof_retry = TRUE;
7252 }
7253 break;
7254 default:
7255 /* FALSE for any other case */
7256 break;
7257 }
7258
7259 return geof_retry;
7260 }
7261
wl_cfgnan_notify_nan_status(struct bcm_cfg80211 * cfg,bcm_struct_cfgdev * cfgdev,const wl_event_msg_t * event,void * event_data)7262 s32 wl_cfgnan_notify_nan_status(struct bcm_cfg80211 *cfg,
7263 bcm_struct_cfgdev *cfgdev,
7264 const wl_event_msg_t *event, void *event_data)
7265 {
7266 uint16 data_len;
7267 uint32 event_num;
7268 s32 event_type;
7269 int hal_event_id = 0;
7270 nan_event_data_t *nan_event_data = NULL;
7271 nan_parse_event_ctx_t nan_event_ctx;
7272 uint16 tlvs_offset = 0;
7273 uint16 nan_opts_len = 0;
7274 uint8 *tlv_buf;
7275 s32 ret = BCME_OK;
7276 bcm_xtlv_opts_t xtlv_opt = BCM_IOV_CMD_OPT_ALIGN32;
7277 uint32 status;
7278 nan_svc_info_t *svc;
7279
7280 UNUSED_PARAMETER(wl_nan_print_status);
7281 UNUSED_PARAMETER(status);
7282 NAN_DBG_ENTER();
7283 NAN_MUTEX_LOCK();
7284
7285 if (!event || !event_data) {
7286 WL_ERR(("event data is NULL\n"));
7287 ret = -EINVAL;
7288 goto exit;
7289 }
7290
7291 event_type = ntoh32(event->event_type);
7292 event_num = ntoh32(event->reason);
7293 data_len = ntoh32(event->datalen);
7294
7295 if (NAN_INVALID_EVENT(event_num)) {
7296 WL_ERR(("unsupported event, num: %d, event type: %d\n", event_num,
7297 event_type));
7298 ret = -EINVAL;
7299 goto exit;
7300 }
7301 WL_DBG((">> Nan Event Received: %s (num=%d, len=%d)\n",
7302 nan_event_to_str(event_num), event_num, data_len));
7303
7304 #ifdef WL_NAN_DEBUG
7305 prhex("nan_event_data:", event_data, data_len);
7306 #endif /* WL_NAN_DEBUG */
7307
7308 if (!cfg->nan_init_state) {
7309 WL_ERR(
7310 ("nan is not in initialized state, dropping nan related events\n"));
7311 ret = BCME_OK;
7312 goto exit;
7313 }
7314
7315 nan_event_data = MALLOCZ(cfg->osh, sizeof(*nan_event_data));
7316 if (!nan_event_data) {
7317 WL_ERR(("%s: memory allocation failed\n", __func__));
7318 goto exit;
7319 }
7320
7321 nan_event_ctx.cfg = cfg;
7322 nan_event_ctx.nan_evt_data = nan_event_data;
7323 /*
7324 * send as preformatted hex string
7325 * EVENT_NAN <event_type> <tlv_hex_string>
7326 */
7327 switch (event_num) {
7328 case WL_NAN_EVENT_START:
7329 case WL_NAN_EVENT_MERGE:
7330 case WL_NAN_EVENT_ROLE: {
7331 /* get nan status info as-is */
7332 bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
7333 wl_nan_conf_status_t *nstatus = (wl_nan_conf_status_t *)xtlv->data;
7334 WL_INFORM_MEM((">> Nan Mac Event Received: %s (num=%d, len=%d)\n",
7335 nan_event_to_str(event_num), event_num, data_len));
7336 WL_INFORM_MEM(
7337 ("Nan Device Role %s\n", nan_role_to_str(nstatus->role)));
7338 /* Mapping to common struct between DHD and HAL */
7339 nan_event_data->enabled = nstatus->enabled;
7340 ret = memcpy_s(&nan_event_data->local_nmi, ETHER_ADDR_LEN,
7341 &nstatus->nmi, ETHER_ADDR_LEN);
7342 if (ret != BCME_OK) {
7343 WL_ERR(("Failed to copy nmi\n"));
7344 goto exit;
7345 }
7346 ret = memcpy_s(&nan_event_data->clus_id, ETHER_ADDR_LEN,
7347 &nstatus->cid, ETHER_ADDR_LEN);
7348 if (ret != BCME_OK) {
7349 WL_ERR(("Failed to copy cluster id\n"));
7350 goto exit;
7351 }
7352 nan_event_data->nan_de_evt_type = event_num;
7353 #ifdef WL_NAN_DEBUG
7354 wl_nan_print_status(nstatus);
7355 #endif /* WL_NAN_DEBUG */
7356 if (event_num == WL_NAN_EVENT_START) {
7357 OSL_SMP_WMB();
7358 cfg->nancfg.nan_event_recvd = true;
7359 OSL_SMP_WMB();
7360 wake_up(&cfg->nancfg.nan_event_wait);
7361 }
7362 hal_event_id = GOOGLE_NAN_EVENT_DE_EVENT;
7363 break;
7364 }
7365 case WL_NAN_EVENT_TERMINATED: {
7366 bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
7367 wl_nan_ev_terminated_t *pev = (wl_nan_ev_terminated_t *)xtlv->data;
7368
7369 /* Mapping to common struct between DHD and HAL */
7370 WL_TRACE(("Instance ID: %d\n", pev->instance_id));
7371 nan_event_data->local_inst_id = pev->instance_id;
7372 WL_TRACE(("Service Type: %d\n", pev->svctype));
7373
7374 #ifdef WL_NAN_DISC_CACHE
7375 if (pev->svctype == NAN_SC_SUBSCRIBE) {
7376 wl_cfgnan_remove_disc_result(cfg, pev->instance_id);
7377 }
7378 #endif /* WL_NAN_DISC_CACHE */
7379 /* Mapping reason code of FW to status code of framework */
7380 if (pev->reason == NAN_TERM_REASON_TIMEOUT ||
7381 pev->reason == NAN_TERM_REASON_USER_REQ ||
7382 pev->reason == NAN_TERM_REASON_COUNT_REACHED) {
7383 nan_event_data->status = NAN_STATUS_SUCCESS;
7384 ret = memcpy_s(nan_event_data->nan_reason,
7385 sizeof(nan_event_data->nan_reason),
7386 "NAN_STATUS_SUCCESS",
7387 strlen("NAN_STATUS_SUCCESS"));
7388 if (ret != BCME_OK) {
7389 WL_ERR(("Failed to copy nan_reason\n"));
7390 goto exit;
7391 }
7392 } else {
7393 nan_event_data->status = NAN_STATUS_INTERNAL_FAILURE;
7394 ret = memcpy_s(nan_event_data->nan_reason,
7395 sizeof(nan_event_data->nan_reason),
7396 "NAN_STATUS_INTERNAL_FAILURE",
7397 strlen("NAN_STATUS_INTERNAL_FAILURE"));
7398 if (ret != BCME_OK) {
7399 WL_ERR(("Failed to copy nan_reason\n"));
7400 goto exit;
7401 }
7402 }
7403
7404 if (pev->svctype == NAN_SC_SUBSCRIBE) {
7405 hal_event_id = GOOGLE_NAN_EVENT_SUBSCRIBE_TERMINATED;
7406 } else {
7407 hal_event_id = GOOGLE_NAN_EVENT_PUBLISH_TERMINATED;
7408 }
7409 #ifdef WL_NAN_DISC_CACHE
7410 if (pev->reason != NAN_TERM_REASON_USER_REQ) {
7411 wl_cfgnan_clear_svc_from_all_ranging_inst(cfg,
7412 pev->instance_id);
7413 /* terminate ranging sessions */
7414 wl_cfgnan_terminate_all_obsolete_ranging_sessions(cfg);
7415 }
7416 #endif /* WL_NAN_DISC_CACHE */
7417 break;
7418 }
7419
7420 case WL_NAN_EVENT_RECEIVE: {
7421 nan_opts_len = data_len;
7422 hal_event_id = GOOGLE_NAN_EVENT_FOLLOWUP;
7423 xtlv_opt = BCM_IOV_CMD_OPT_ALIGN_NONE;
7424 break;
7425 }
7426
7427 case WL_NAN_EVENT_TXS: {
7428 bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
7429 wl_nan_event_txs_t *txs = (wl_nan_event_txs_t *)xtlv->data;
7430 wl_nan_event_sd_txs_t *txs_sd = NULL;
7431 if (txs->status == WL_NAN_TXS_SUCCESS) {
7432 WL_INFORM_MEM(("TXS success for type %d token %d\n", txs->type,
7433 txs->host_seq));
7434 nan_event_data->status = NAN_STATUS_SUCCESS;
7435 ret = memcpy_s(nan_event_data->nan_reason,
7436 sizeof(nan_event_data->nan_reason),
7437 "NAN_STATUS_SUCCESS",
7438 strlen("NAN_STATUS_SUCCESS"));
7439 if (ret != BCME_OK) {
7440 WL_ERR(("Failed to copy nan_reason\n"));
7441 goto exit;
7442 }
7443 } else {
7444 /* populate status based on reason codes
7445 For now adding it as no ACK, so that app/framework can retry
7446 */
7447 WL_INFORM_MEM(("TXS failed for type %d status %d token %d\n",
7448 txs->type, txs->status, txs->host_seq));
7449 nan_event_data->status = NAN_STATUS_NO_OTA_ACK;
7450 ret = memcpy_s(nan_event_data->nan_reason,
7451 sizeof(nan_event_data->nan_reason),
7452 "NAN_STATUS_NO_OTA_ACK",
7453 strlen("NAN_STATUS_NO_OTA_ACK"));
7454 if (ret != BCME_OK) {
7455 WL_ERR(("Failed to copy nan_reason\n"));
7456 goto exit;
7457 }
7458 }
7459 nan_event_data->reason = txs->reason_code;
7460 nan_event_data->token = txs->host_seq;
7461 if (txs->type == WL_NAN_FRM_TYPE_FOLLOWUP) {
7462 hal_event_id = GOOGLE_NAN_EVENT_TRANSMIT_FOLLOWUP_IND;
7463 xtlv = (bcm_xtlv_t *)(txs->opt_tlvs);
7464 if (txs->opt_tlvs_len && xtlv->id == WL_NAN_XTLV_SD_TXS) {
7465 txs_sd = (wl_nan_event_sd_txs_t *)xtlv->data;
7466 nan_event_data->local_inst_id = txs_sd->inst_id;
7467 } else {
7468 WL_ERR(
7469 ("Invalid params in TX status for trasnmit followup"));
7470 ret = -EINVAL;
7471 goto exit;
7472 }
7473 } else { /* add for other frame types if required */
7474 ret = -EINVAL;
7475 goto exit;
7476 }
7477 break;
7478 }
7479
7480 case WL_NAN_EVENT_DISCOVERY_RESULT: {
7481 nan_opts_len = data_len;
7482 hal_event_id = GOOGLE_NAN_EVENT_SUBSCRIBE_MATCH;
7483 xtlv_opt = BCM_IOV_CMD_OPT_ALIGN_NONE;
7484 break;
7485 }
7486 #ifdef WL_NAN_DISC_CACHE
7487 case WL_NAN_EVENT_DISC_CACHE_TIMEOUT: {
7488 bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
7489 wl_nan_ev_disc_cache_timeout_t *cache_data =
7490 (wl_nan_ev_disc_cache_timeout_t *)xtlv->data;
7491 wl_nan_disc_expired_cache_entry_t *cache_entry = NULL;
7492 uint16 xtlv_len = xtlv->len;
7493 uint8 entry_idx = 0;
7494
7495 if (xtlv->id == WL_NAN_XTLV_SD_DISC_CACHE_TIMEOUT) {
7496 xtlv_len = xtlv_len - OFFSETOF(wl_nan_ev_disc_cache_timeout_t,
7497 cache_exp_list);
7498 while ((entry_idx < cache_data->count) &&
7499 (xtlv_len >= sizeof(*cache_entry))) {
7500 cache_entry = &cache_data->cache_exp_list[entry_idx];
7501 /* Handle ranging cases for cache timeout */
7502 wl_cfgnan_ranging_clear_publish(
7503 cfg, &cache_entry->r_nmi_addr, cache_entry->l_sub_id);
7504 /* Invalidate local cache info */
7505 wl_cfgnan_remove_disc_result(cfg, cache_entry->l_sub_id);
7506 xtlv_len = xtlv_len - sizeof(*cache_entry);
7507 entry_idx++;
7508 }
7509 }
7510 break;
7511 }
7512 case WL_NAN_EVENT_RNG_REQ_IND: {
7513 wl_nan_ev_rng_req_ind_t *rng_ind;
7514 bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
7515
7516 nan_opts_len = data_len;
7517 rng_ind = (wl_nan_ev_rng_req_ind_t *)xtlv->data;
7518 xtlv_opt = BCM_IOV_CMD_OPT_ALIGN_NONE;
7519 WL_INFORM_MEM(("Received WL_NAN_EVENT_RNG_REQ_IND range_id %d"
7520 " peer:" MACDBG "\n",
7521 rng_ind->rng_id, MAC2STRDBG(&rng_ind->peer_m_addr)));
7522 #ifdef RTT_SUPPORT
7523 ret = wl_cfgnan_handle_ranging_ind(cfg, rng_ind);
7524 #endif /* RTT_SUPPORT */
7525 /* no need to event to HAL */
7526 goto exit;
7527 }
7528
7529 case WL_NAN_EVENT_RNG_TERM_IND: {
7530 bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
7531 dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
7532 nan_ranging_inst_t *rng_inst;
7533 wl_nan_ev_rng_term_ind_t *range_term =
7534 (wl_nan_ev_rng_term_ind_t *)xtlv->data;
7535 #ifdef RTT_SUPPORT
7536 int8 index = -1;
7537 rtt_geofence_target_info_t *geofence_target;
7538 rtt_status_info_t *rtt_status;
7539 int rng_sched_reason = 0;
7540 #endif /* RTT_SUPPORT */
7541 BCM_REFERENCE(dhd);
7542 WL_INFORM_MEM(("Received WL_NAN_EVENT_RNG_TERM_IND peer: " MACDBG
7543 ", "
7544 " Range ID:%d Reason Code:%d\n",
7545 MAC2STRDBG(&range_term->peer_m_addr),
7546 range_term->rng_id, range_term->reason_code));
7547 rng_inst = wl_cfgnan_get_rng_inst_by_id(cfg, range_term->rng_id);
7548 if (rng_inst) {
7549 #ifdef RTT_SUPPORT
7550 rng_sched_reason = RTT_SCHED_RNG_TERM;
7551 if (rng_inst->range_type == RTT_TYPE_NAN_DIRECTED) {
7552 dhd_rtt_handle_nan_rtt_session_end(dhd,
7553 &rng_inst->peer_addr);
7554 } else if (rng_inst->range_type == RTT_TYPE_NAN_GEOFENCE) {
7555 if (wl_cfgnan_geofence_retry_check(rng_inst, range_term->reason_code)) {
7556 rtt_status = GET_RTTSTATE(dhd);
7557 GEOFENCE_RTT_LOCK(rtt_status);
7558 dhd_rtt_move_geofence_cur_target_idx_to_next(dhd);
7559 GEOFENCE_RTT_UNLOCK(rtt_status);
7560 } else {
7561 /* Report on ranging failure */
7562 wl_cfgnan_disc_result_on_geofence_cancel(cfg, rng_inst);
7563 WL_TRACE(("Reset the state on terminate\n"));
7564 geofence_target = dhd_rtt_get_geofence_target(
7565 dhd, &rng_inst->peer_addr, &index);
7566 if (geofence_target) {
7567 dhd_rtt_remove_geofence_target(
7568 dhd, &geofence_target->peer_addr);
7569 }
7570 }
7571 /* Set geofence RTT in progress state to false */
7572 dhd_rtt_set_geofence_rtt_state(dhd, FALSE);
7573 }
7574 if (rng_inst->range_role == NAN_RANGING_ROLE_RESPONDER &&
7575 wl_check_range_role_concurrency(dhd, rng_inst)) {
7576 /* Resolve role concurrency */
7577 wl_cfgnan_resolve_ranging_role_concurrecny(dhd, rng_inst);
7578 /* Override sched reason if role concurrency just resolved
7579 */
7580 rng_sched_reason = RTT_SCHED_RNG_TERM_PEND_ROLE_CHANGE;
7581 }
7582 /* Reset Ranging Instance and trigger ranging if applicable */
7583 wl_cfgnan_reset_geofence_ranging(cfg, rng_inst,
7584 rng_sched_reason);
7585 #endif /* RTT_SUPPORT */
7586 }
7587 break;
7588 }
7589 #endif /* WL_NAN_DISC_CACHE */
7590 /*
7591 * Data path events data are received in common event struct,
7592 * Handling all the events as part of one case, hence fall through is
7593 * intentional
7594 */
7595 case WL_NAN_EVENT_PEER_DATAPATH_IND:
7596 case WL_NAN_EVENT_DATAPATH_ESTB:
7597 case WL_NAN_EVENT_DATAPATH_END: {
7598 ret = wl_nan_dp_cmn_event_data(
7599 cfg, event_data, data_len, &tlvs_offset, &nan_opts_len,
7600 event_num, &hal_event_id, nan_event_data);
7601 /* Avoiding optional param parsing for DP END Event */
7602 if (event_num == WL_NAN_EVENT_DATAPATH_END) {
7603 nan_opts_len = 0;
7604 xtlv_opt = BCM_IOV_CMD_OPT_ALIGN_NONE;
7605 }
7606 if (unlikely(ret)) {
7607 WL_ERR(("nan dp common event data parse failed\n"));
7608 goto exit;
7609 }
7610 break;
7611 }
7612 default:
7613 WL_ERR_RLMT(
7614 ("WARNING: unimplemented NAN APP EVENT = %d\n", event_num));
7615 ret = BCME_ERROR;
7616 goto exit;
7617 }
7618
7619 if (nan_opts_len) {
7620 tlv_buf = (uint8 *)event_data + tlvs_offset;
7621 /* Extract event data tlvs and pass their resp to cb fn */
7622 ret = bcm_unpack_xtlv_buf((void *)&nan_event_ctx,
7623 (const uint8 *)tlv_buf, nan_opts_len,
7624 xtlv_opt, wl_cfgnan_set_vars_cbfn);
7625 if (ret != BCME_OK) {
7626 WL_ERR(("Failed to unpack tlv data, ret=%d\n", ret));
7627 }
7628 }
7629
7630 #ifdef WL_NAN_DISC_CACHE
7631 if (hal_event_id == GOOGLE_NAN_EVENT_SUBSCRIBE_MATCH) {
7632 #ifdef RTT_SUPPORT
7633 u8 rtt_invalid_reason = RTT_STATE_VALID;
7634 bool role_concur_state = 0;
7635 dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
7636 #endif /* RTT_SUPPORT */
7637 u16 update_flags = 0;
7638 WL_TRACE(("Cache disc res\n"));
7639 ret = wl_cfgnan_cache_disc_result(cfg, nan_event_data, &update_flags);
7640 if (ret) {
7641 WL_ERR(("Failed to cache disc result ret %d\n", ret));
7642 }
7643 if (nan_event_data->sde_control_flag & NAN_SDE_CF_RANGING_REQUIRED) {
7644 ret = wl_cfgnan_check_disc_result_for_ranging(cfg, nan_event_data);
7645 if (ret == BCME_OK) {
7646 #ifdef RTT_SUPPORT
7647 rtt_invalid_reason = dhd_rtt_invalid_states(
7648 bcmcfg_to_prmry_ndev(cfg), &nan_event_data->remote_nmi);
7649 role_concur_state = dhd_rtt_get_role_concurrency_state(dhd);
7650 /*
7651 * If instant RTT not possible,
7652 * send discovery result instantly like
7653 * incase of invalid rtt state as
7654 * NDP connected/connecting or role_concurrency
7655 * on, otherwise, disc result will be posted
7656 * on ranging report event
7657 */
7658 if (rtt_invalid_reason == RTT_STATE_VALID &&
7659 role_concur_state == FALSE) {
7660 /* Avoid sending disc result instantly */
7661 goto exit;
7662 }
7663 #endif /* RTT_SUPPORT */
7664 } else {
7665 /* should we terminate service if ranging fails ? */
7666 WL_INFORM_MEM(("Ranging failed or not required, " MACDBG
7667 " sub_id:%d , pub_id:%d\n",
7668 MAC2STRDBG(&nan_event_data->remote_nmi),
7669 nan_event_data->sub_id, nan_event_data->pub_id));
7670 }
7671 } else {
7672 nan_svc_info_t *svc_info =
7673 wl_cfgnan_get_svc_inst(cfg, nan_event_data->sub_id, 0);
7674 if (svc_info && svc_info->ranging_required &&
7675 (update_flags & NAN_DISC_CACHE_PARAM_SDE_CONTROL)) {
7676 wl_cfgnan_ranging_clear_publish(
7677 cfg, &nan_event_data->remote_nmi, nan_event_data->sub_id);
7678 }
7679 }
7680
7681 /*
7682 * If tx match filter is present as part of active subscribe, keep same
7683 * filter values in discovery results also.
7684 */
7685 if (nan_event_data->sub_id == nan_event_data->requestor_id) {
7686 svc = wl_cfgnan_get_svc_inst(cfg, nan_event_data->sub_id, 0);
7687 if (svc && svc->tx_match_filter_len) {
7688 nan_event_data->tx_match_filter.dlen = svc->tx_match_filter_len;
7689 nan_event_data->tx_match_filter.data =
7690 MALLOCZ(cfg->osh, svc->tx_match_filter_len);
7691 if (!nan_event_data->tx_match_filter.data) {
7692 WL_ERR(("%s: tx_match_filter_data alloc failed\n",
7693 __FUNCTION__));
7694 nan_event_data->tx_match_filter.dlen = 0;
7695 ret = -ENOMEM;
7696 goto exit;
7697 }
7698 ret = memcpy_s(nan_event_data->tx_match_filter.data,
7699 nan_event_data->tx_match_filter.dlen,
7700 svc->tx_match_filter, svc->tx_match_filter_len);
7701 if (ret != BCME_OK) {
7702 WL_ERR(("Failed to copy tx match filter data\n"));
7703 goto exit;
7704 }
7705 }
7706 }
7707 }
7708 #endif /* WL_NAN_DISC_CACHE */
7709
7710 WL_TRACE(("Send up %s (%d) data to HAL, hal_event_id=%d\n",
7711 nan_event_to_str(event_num), event_num, hal_event_id));
7712 #if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || \
7713 defined(WL_VENDOR_EXT_SUPPORT)
7714 ret =
7715 wl_cfgvendor_send_nan_event(cfg->wdev->wiphy, bcmcfg_to_prmry_ndev(cfg),
7716 hal_event_id, nan_event_data);
7717 if (ret != BCME_OK) {
7718 WL_ERR(("Failed to send event to nan hal, %s (%d)\n",
7719 nan_event_to_str(event_num), event_num));
7720 }
7721 #endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || \
7722 defined(WL_VENDOR_EXT_SUPPORT) */
7723
7724 exit:
7725 wl_cfgnan_clear_nan_event_data(cfg, nan_event_data);
7726
7727 NAN_MUTEX_UNLOCK();
7728 NAN_DBG_EXIT();
7729 return ret;
7730 }
7731
7732 #ifdef WL_NAN_DISC_CACHE
wl_cfgnan_cache_disc_result(struct bcm_cfg80211 * cfg,void * data,u16 * disc_cache_update_flags)7733 static int wl_cfgnan_cache_disc_result(struct bcm_cfg80211 *cfg, void *data,
7734 u16 *disc_cache_update_flags)
7735 {
7736 nan_event_data_t *disc = (nan_event_data_t *)data;
7737 int i, add_index = 0;
7738 int ret = BCME_OK;
7739 nan_disc_result_cache *disc_res = cfg->nan_disc_cache;
7740 *disc_cache_update_flags = 0;
7741
7742 if (!cfg->nan_enable) {
7743 WL_DBG(("nan not enabled"));
7744 return BCME_NOTENABLED;
7745 }
7746 if (cfg->nan_disc_count == NAN_MAX_CACHE_DISC_RESULT) {
7747 WL_DBG(("cache full"));
7748 ret = BCME_NORESOURCE;
7749 goto done;
7750 }
7751
7752 for (i = 0; i < NAN_MAX_CACHE_DISC_RESULT; i++) {
7753 if (!disc_res[i].valid) {
7754 add_index = i;
7755 continue;
7756 }
7757 if (!memcmp(&disc_res[i].peer, &disc->remote_nmi, ETHER_ADDR_LEN) &&
7758 !memcmp(disc_res[i].svc_hash, disc->svc_name,
7759 WL_NAN_SVC_HASH_LEN)) {
7760 WL_DBG(("cache entry already present, i = %d", i));
7761 /* Update needed parameters here */
7762 if (disc_res[i].sde_control_flag != disc->sde_control_flag) {
7763 disc_res[i].sde_control_flag = disc->sde_control_flag;
7764 *disc_cache_update_flags |= NAN_DISC_CACHE_PARAM_SDE_CONTROL;
7765 }
7766 ret = BCME_OK; /* entry already present */
7767 goto done;
7768 }
7769 }
7770 WL_DBG(("adding cache entry: add_index = %d\n", add_index));
7771 disc_res[add_index].valid = 1;
7772 disc_res[add_index].pub_id = disc->pub_id;
7773 disc_res[add_index].sub_id = disc->sub_id;
7774 disc_res[add_index].publish_rssi = disc->publish_rssi;
7775 disc_res[add_index].peer_cipher_suite = disc->peer_cipher_suite;
7776 disc_res[add_index].sde_control_flag = disc->sde_control_flag;
7777 ret = memcpy_s(&disc_res[add_index].peer, ETHER_ADDR_LEN, &disc->remote_nmi,
7778 ETHER_ADDR_LEN);
7779 if (ret != BCME_OK) {
7780 WL_ERR(("Failed to copy remote nmi\n"));
7781 goto done;
7782 }
7783 ret = memcpy_s(disc_res[add_index].svc_hash, WL_NAN_SVC_HASH_LEN,
7784 disc->svc_name, WL_NAN_SVC_HASH_LEN);
7785 if (ret != BCME_OK) {
7786 WL_ERR(("Failed to copy svc hash\n"));
7787 goto done;
7788 }
7789
7790 if (disc->svc_info.dlen && disc->svc_info.data) {
7791 disc_res[add_index].svc_info.dlen = disc->svc_info.dlen;
7792 disc_res[add_index].svc_info.data =
7793 MALLOCZ(cfg->osh, disc_res[add_index].svc_info.dlen);
7794 if (!disc_res[add_index].svc_info.data) {
7795 WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
7796 disc_res[add_index].svc_info.dlen = 0;
7797 ret = BCME_NOMEM;
7798 goto done;
7799 }
7800 ret = memcpy_s(disc_res[add_index].svc_info.data,
7801 disc_res[add_index].svc_info.dlen, disc->svc_info.data,
7802 disc->svc_info.dlen);
7803 if (ret != BCME_OK) {
7804 WL_ERR(("Failed to copy svc info\n"));
7805 goto done;
7806 }
7807 }
7808 if (disc->tx_match_filter.dlen && disc->tx_match_filter.data) {
7809 disc_res[add_index].tx_match_filter.dlen = disc->tx_match_filter.dlen;
7810 disc_res[add_index].tx_match_filter.data =
7811 MALLOCZ(cfg->osh, disc_res[add_index].tx_match_filter.dlen);
7812 if (!disc_res[add_index].tx_match_filter.data) {
7813 WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
7814 disc_res[add_index].tx_match_filter.dlen = 0;
7815 ret = BCME_NOMEM;
7816 goto done;
7817 }
7818 ret = memcpy_s(disc_res[add_index].tx_match_filter.data,
7819 disc_res[add_index].tx_match_filter.dlen,
7820 disc->tx_match_filter.data, disc->tx_match_filter.dlen);
7821 if (ret != BCME_OK) {
7822 WL_ERR(("Failed to copy tx match filter\n"));
7823 goto done;
7824 }
7825 }
7826 cfg->nan_disc_count++;
7827 WL_DBG(("cfg->nan_disc_count = %d\n", cfg->nan_disc_count));
7828
7829 done:
7830 return ret;
7831 }
7832
7833 /* Sending command to FW for clearing discovery cache info in FW */
wl_cfgnan_clear_disc_cache(struct bcm_cfg80211 * cfg,wl_nan_instance_id_t sub_id)7834 static int wl_cfgnan_clear_disc_cache(struct bcm_cfg80211 *cfg,
7835 wl_nan_instance_id_t sub_id)
7836 {
7837 s32 ret = BCME_OK;
7838 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
7839 uint32 status;
7840 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
7841 uint8 buf[NAN_IOCTL_BUF_SIZE];
7842 bcm_iov_batch_buf_t *nan_buf;
7843 bcm_iov_batch_subcmd_t *sub_cmd;
7844 uint16 subcmd_len;
7845
7846 /* Same src and dest len here */
7847 memset_s(buf, sizeof(buf), 0, sizeof(buf));
7848
7849 nan_buf = (bcm_iov_batch_buf_t *)buf;
7850
7851 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
7852 nan_buf->count = 0;
7853 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
7854
7855 sub_cmd = (bcm_iov_batch_subcmd_t *)(&nan_buf->cmds[0]);
7856 ret = wl_cfg_nan_check_cmd_len(nan_buf_size, sizeof(sub_id), &subcmd_len);
7857 if (unlikely(ret)) {
7858 WL_ERR(("nan_sub_cmd check failed\n"));
7859 goto fail;
7860 }
7861
7862 /* Fill the sub_command block */
7863 sub_cmd->id = htod16(WL_NAN_CMD_SD_DISC_CACHE_CLEAR);
7864 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(sub_id);
7865 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
7866 /* Data size len vs buffer len check is already done above.
7867 * So, short buffer error is impossible.
7868 */
7869 (void)memcpy_s(sub_cmd->data,
7870 (nan_buf_size - OFFSETOF(bcm_iov_batch_subcmd_t, data)),
7871 &sub_id, sizeof(sub_id));
7872 /* adjust iov data len to the end of last data record */
7873 nan_buf_size -= (subcmd_len);
7874
7875 nan_buf->count++;
7876 nan_buf->is_set = true;
7877 nan_buf_size = NAN_IOCTL_BUF_SIZE - nan_buf_size;
7878 /* Same src and dest len here */
7879 memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
7880 ret = wl_cfgnan_execute_ioctl(bcmcfg_to_prmry_ndev(cfg), cfg, nan_buf,
7881 nan_buf_size, &status, (void *)resp_buf,
7882 NAN_IOCTL_BUF_SIZE);
7883 if (unlikely(ret) || unlikely(status)) {
7884 WL_ERR(("Disc cache clear handler failed ret %d status %d\n", ret,
7885 status));
7886 goto fail;
7887 }
7888
7889 fail:
7890 return ret;
7891 }
7892
wl_cfgnan_remove_disc_result(struct bcm_cfg80211 * cfg,uint8 local_subid)7893 static int wl_cfgnan_remove_disc_result(struct bcm_cfg80211 *cfg,
7894 uint8 local_subid)
7895 {
7896 int i;
7897 int ret = BCME_NOTFOUND;
7898 nan_disc_result_cache *disc_res = cfg->nan_disc_cache;
7899 if (!cfg->nan_enable) {
7900 WL_DBG(("nan not enabled\n"));
7901 ret = BCME_NOTENABLED;
7902 goto done;
7903 }
7904 for (i = 0; i < NAN_MAX_CACHE_DISC_RESULT; i++) {
7905 if ((disc_res[i].valid) && (disc_res[i].sub_id == local_subid)) {
7906 WL_TRACE(("make cache entry invalid\n"));
7907 if (disc_res[i].tx_match_filter.data) {
7908 MFREE(cfg->osh, disc_res[i].tx_match_filter.data,
7909 disc_res[i].tx_match_filter.dlen);
7910 }
7911 if (disc_res[i].svc_info.data) {
7912 MFREE(cfg->osh, disc_res[i].svc_info.data,
7913 disc_res[i].svc_info.dlen);
7914 }
7915 memset_s(&disc_res[i], sizeof(disc_res[i]), 0, sizeof(disc_res[i]));
7916 cfg->nan_disc_count--;
7917 ret = BCME_OK;
7918 }
7919 }
7920 WL_DBG(("couldn't find entry\n"));
7921 done:
7922 return ret;
7923 }
7924
7925 static nan_disc_result_cache *
wl_cfgnan_get_disc_result(struct bcm_cfg80211 * cfg,uint8 remote_pubid,struct ether_addr * peer)7926 wl_cfgnan_get_disc_result(struct bcm_cfg80211 *cfg, uint8 remote_pubid,
7927 struct ether_addr *peer)
7928 {
7929 int i;
7930 nan_disc_result_cache *disc_res = cfg->nan_disc_cache;
7931 if (remote_pubid) {
7932 for (i = 0; i < NAN_MAX_CACHE_DISC_RESULT; i++) {
7933 if ((disc_res[i].pub_id == remote_pubid) &&
7934 !memcmp(&disc_res[i].peer, peer, ETHER_ADDR_LEN)) {
7935 WL_DBG(("Found entry: i = %d\n", i));
7936 return &disc_res[i];
7937 }
7938 }
7939 } else {
7940 for (i = 0; i < NAN_MAX_CACHE_DISC_RESULT; i++) {
7941 if (!memcmp(&disc_res[i].peer, peer, ETHER_ADDR_LEN)) {
7942 WL_DBG(("Found entry: %d\n", i));
7943 return &disc_res[i];
7944 }
7945 }
7946 }
7947 return NULL;
7948 }
7949 #endif /* WL_NAN_DISC_CACHE */
7950
wl_cfgnan_update_dp_info(struct bcm_cfg80211 * cfg,bool add,nan_data_path_id ndp_id)7951 void wl_cfgnan_update_dp_info(struct bcm_cfg80211 *cfg, bool add,
7952 nan_data_path_id ndp_id)
7953 {
7954 uint8 i;
7955 bool match_found = false;
7956 #ifdef ARP_OFFLOAD_SUPPORT
7957 dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
7958 #endif /* ARP_OFFLOAD_SUPPORT */
7959 /* As of now, we don't see a need to know which ndp is active.
7960 * so just keep tracking of ndp via count. If we need to know
7961 * the status of each ndp based on ndp id, we need to change
7962 * this implementation to use a bit mask.
7963 */
7964 if (!dhd) {
7965 WL_ERR(("dhd pub null!\n"));
7966 return;
7967 }
7968
7969 if (add) {
7970 /* On first NAN DP establishment, disable ARP. */
7971 #ifdef ARP_OFFLOAD_SUPPORT
7972 if (!cfg->nan_dp_count) {
7973 dhd_arp_offload_set(dhd, 0);
7974 dhd_arp_offload_enable(dhd, false);
7975 }
7976 #endif /* ARP_OFFLOAD_SUPPORT */
7977 for (i = 0; i < NAN_MAX_NDP_PEER; i++) {
7978 if (!cfg->nancfg.ndp_id[i]) {
7979 WL_TRACE(("Found empty field\n"));
7980 break;
7981 }
7982 }
7983
7984 if (i == NAN_MAX_NDP_PEER) {
7985 WL_ERR(("%s:cannot accommodate ndp id\n", __FUNCTION__));
7986 return;
7987 }
7988 if (ndp_id) {
7989 cfg->nan_dp_count++;
7990 cfg->nancfg.ndp_id[i] = ndp_id;
7991 WL_DBG(("%s:Added ndp id = [%d] at i = %d\n", __FUNCTION__,
7992 cfg->nancfg.ndp_id[i], i));
7993 }
7994 } else {
7995 ASSERT(cfg->nan_dp_count);
7996 if (ndp_id) {
7997 for (i = 0; i < NAN_MAX_NDP_PEER; i++) {
7998 if (cfg->nancfg.ndp_id[i] == ndp_id) {
7999 cfg->nancfg.ndp_id[i] = 0;
8000 WL_DBG(("%s:Removed ndp id = [%d] from i = %d\n",
8001 __FUNCTION__, ndp_id, i));
8002 match_found = true;
8003 if (cfg->nan_dp_count) {
8004 cfg->nan_dp_count--;
8005 }
8006 break;
8007 } else {
8008 WL_DBG(("couldn't find entry for ndp id = %d\n", ndp_id));
8009 }
8010 }
8011 if (match_found == false) {
8012 WL_ERR(("Received unsaved NDP Id = %d !!\n", ndp_id));
8013 }
8014 }
8015
8016 #ifdef ARP_OFFLOAD_SUPPORT
8017 if (!cfg->nan_dp_count) {
8018 /* If NAN DP count becomes zero and if there
8019 * are no conflicts, enable back ARP offload.
8020 * As of now, the conflicting interfaces are AP
8021 * and P2P. But NAN + P2P/AP concurrency is not
8022 * supported.
8023 */
8024 dhd_arp_offload_set(dhd, dhd_arp_mode);
8025 dhd_arp_offload_enable(dhd, true);
8026 }
8027 #endif /* ARP_OFFLOAD_SUPPORT */
8028 }
8029 WL_INFORM_MEM(("NAN_DP_COUNT: %d\n", cfg->nan_dp_count));
8030 }
8031
wl_cfgnan_is_dp_active(struct net_device * ndev)8032 bool wl_cfgnan_is_dp_active(struct net_device *ndev)
8033 {
8034 struct bcm_cfg80211 *cfg;
8035 bool nan_dp;
8036
8037 if (!ndev || !ndev->ieee80211_ptr) {
8038 WL_ERR(("ndev/wdev null\n"));
8039 return false;
8040 }
8041
8042 cfg = wiphy_priv(ndev->ieee80211_ptr->wiphy);
8043 nan_dp = cfg->nan_dp_count ? true : false;
8044
8045 WL_DBG(("NAN DP status:%d\n", nan_dp));
8046 return nan_dp;
8047 }
8048
wl_cfgnan_get_ndi_idx(struct bcm_cfg80211 * cfg)8049 s32 wl_cfgnan_get_ndi_idx(struct bcm_cfg80211 *cfg)
8050 {
8051 int i;
8052 for (i = 0; i < NAN_MAX_NDI; i++) {
8053 if (!cfg->nancfg.ndi[i].in_use) {
8054 /* Free interface, use it */
8055 return i;
8056 }
8057 }
8058 /* Don't have a free interface */
8059 return WL_INVALID;
8060 }
8061
wl_cfgnan_add_ndi_data(struct bcm_cfg80211 * cfg,s32 idx,char * name)8062 s32 wl_cfgnan_add_ndi_data(struct bcm_cfg80211 *cfg, s32 idx, char *name)
8063 {
8064 u16 len;
8065 if (!name || (idx < 0) || (idx >= NAN_MAX_NDI)) {
8066 return -EINVAL;
8067 }
8068
8069 /* Ensure ifname string size <= IFNAMSIZ including null termination */
8070 len = MIN(strlen(name), (IFNAMSIZ - 1));
8071 strncpy(cfg->nancfg.ndi[idx].ifname, name, len);
8072 cfg->nancfg.ndi[idx].ifname[len] = '\0';
8073 cfg->nancfg.ndi[idx].in_use = true;
8074 cfg->nancfg.ndi[idx].created = false;
8075
8076 /* Don't have a free interface */
8077 return WL_INVALID;
8078 }
8079
wl_cfgnan_del_ndi_data(struct bcm_cfg80211 * cfg,char * name)8080 s32 wl_cfgnan_del_ndi_data(struct bcm_cfg80211 *cfg, char *name)
8081 {
8082 u16 len;
8083 int i;
8084 if (!name) {
8085 return -EINVAL;
8086 }
8087
8088 len = MIN(strlen(name), IFNAMSIZ);
8089 for (i = 0; i < NAN_MAX_NDI; i++) {
8090 if (strncmp(cfg->nancfg.ndi[i].ifname, name, len) == 0) {
8091 memset_s(&cfg->nancfg.ndi[i].ifname, IFNAMSIZ, 0x0, IFNAMSIZ);
8092 cfg->nancfg.ndi[i].in_use = false;
8093 cfg->nancfg.ndi[i].created = false;
8094 cfg->nancfg.ndi[i].nan_ndev = NULL;
8095 return i;
8096 }
8097 }
8098 return -EINVAL;
8099 }
8100
wl_cfgnan_get_ndi_data(struct bcm_cfg80211 * cfg,char * name)8101 struct wl_ndi_data *wl_cfgnan_get_ndi_data(struct bcm_cfg80211 *cfg, char *name)
8102 {
8103 u16 len;
8104 int i;
8105 if (!name) {
8106 return NULL;
8107 }
8108
8109 len = MIN(strlen(name), IFNAMSIZ);
8110 for (i = 0; i < NAN_MAX_NDI; i++) {
8111 if (strncmp(cfg->nancfg.ndi[i].ifname, name, len) == 0) {
8112 return &cfg->nancfg.ndi[i];
8113 }
8114 }
8115 return NULL;
8116 }
8117
wl_cfgnan_delete_ndp(struct bcm_cfg80211 * cfg,struct net_device * nan_ndev)8118 s32 wl_cfgnan_delete_ndp(struct bcm_cfg80211 *cfg, struct net_device *nan_ndev)
8119 {
8120 s32 ret = BCME_OK;
8121 uint8 i = 0;
8122 for (i = 0; i < NAN_MAX_NDI; i++) {
8123 if (cfg->nancfg.ndi[i].in_use && cfg->nancfg.ndi[i].created &&
8124 (cfg->nancfg.ndi[i].nan_ndev == nan_ndev)) {
8125 WL_INFORM_MEM(("iface name: %s, cfg->nancfg.ndi[i].nan_ndev = %p"
8126 " and nan_ndev = %p\n",
8127 (char *)cfg->nancfg.ndi[i].ifname,
8128 cfg->nancfg.ndi[i].nan_ndev, nan_ndev));
8129 ret = _wl_cfg80211_del_if(cfg, nan_ndev, NULL,
8130 (char *)cfg->nancfg.ndi[i].ifname);
8131 if (ret) {
8132 WL_ERR(("failed to del ndi [%d]\n", ret));
8133 goto exit;
8134 }
8135 /* After successful delete of interface,
8136 * clear up the ndi data
8137 */
8138 if (wl_cfgnan_del_ndi_data(cfg, (char *)cfg->nancfg.ndi[i].ifname) <
8139 0) {
8140 WL_ERR(("Failed to find matching data for ndi:%s\n",
8141 (char *)cfg->nancfg.ndi[i].ifname));
8142 }
8143 }
8144 }
8145 exit:
8146 return ret;
8147 }
8148
wl_cfgnan_get_status(struct net_device * ndev,wl_nan_conf_status_t * nan_status)8149 int wl_cfgnan_get_status(struct net_device *ndev,
8150 wl_nan_conf_status_t *nan_status)
8151 {
8152 bcm_iov_batch_buf_t *nan_buf = NULL;
8153 uint16 subcmd_len;
8154 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
8155 bcm_iov_batch_subcmd_t *sub_cmd_resp = NULL;
8156 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
8157 wl_nan_conf_status_t *nstatus = NULL;
8158 uint32 status;
8159 s32 ret = BCME_OK;
8160 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
8161 struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
8162 NAN_DBG_ENTER();
8163
8164 nan_buf = MALLOCZ(cfg->osh, NAN_IOCTL_BUF_SIZE);
8165 if (!nan_buf) {
8166 WL_ERR(("%s: memory allocation failed\n", __func__));
8167 ret = BCME_NOMEM;
8168 goto fail;
8169 }
8170
8171 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
8172 nan_buf->count = 0;
8173 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
8174 sub_cmd = (bcm_iov_batch_subcmd_t *)(uint8 *)(&nan_buf->cmds[0]);
8175
8176 ret = wl_cfg_nan_check_cmd_len(nan_buf_size, sizeof(*nstatus), &subcmd_len);
8177 if (unlikely(ret)) {
8178 WL_ERR(("nan_sub_cmd check failed\n"));
8179 goto fail;
8180 }
8181
8182 nstatus = (wl_nan_conf_status_t *)sub_cmd->data;
8183 sub_cmd->id = htod16(WL_NAN_CMD_CFG_STATUS);
8184 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*nstatus);
8185 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
8186 nan_buf_size -= subcmd_len;
8187 nan_buf->count = 1;
8188 nan_buf->is_set = false;
8189
8190 memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
8191 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
8192 (void *)resp_buf, NAN_IOCTL_BUF_SIZE);
8193 if (unlikely(ret) || unlikely(status)) {
8194 WL_ERR(("get nan status failed ret %d status %d \n", ret, status));
8195 goto fail;
8196 }
8197 sub_cmd_resp = &((bcm_iov_batch_buf_t *)(resp_buf))->cmds[0];
8198 /* WL_NAN_CMD_CFG_STATUS return value doesn't use xtlv package */
8199 nstatus = ((wl_nan_conf_status_t *)&sub_cmd_resp->data[0]);
8200 ret = memcpy_s(nan_status, sizeof(wl_nan_conf_status_t), nstatus,
8201 sizeof(wl_nan_conf_status_t));
8202 if (ret != BCME_OK) {
8203 WL_ERR(("Failed to copy tx match filter\n"));
8204 goto fail;
8205 }
8206
8207 fail:
8208 if (nan_buf) {
8209 MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
8210 }
8211 NAN_DBG_EXIT();
8212 return ret;
8213 }
8214 #endif /* WL_NAN */
8215