1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Neighbor Awareness Networking
4 *
5 * Copyright (C) 1999-2019, Broadcom.
6 *
7 * Unless you and Broadcom execute a separate written software license
8 * agreement governing use of this software, this software is licensed to you
9 * under the terms of the GNU General Public License version 2 (the "GPL"),
10 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11 * following added to such license:
12 *
13 * As a special exception, the copyright holders of this software give you
14 * permission to link this software with independent modules, and to copy and
15 * distribute the resulting executable under terms of your choice, provided that
16 * you also meet, for each linked independent module, the terms and conditions of
17 * the license of that module. An independent module is a module which is not
18 * derived from this software. The special exception does not apply to any
19 * modifications of the software.
20 *
21 * Notwithstanding the above, under no circumstances may you combine this
22 * software in any way with any other Broadcom software provided under a license
23 * other than the GPL, without Broadcom's express prior written consent.
24 *
25 *
26 * <<Broadcom-WL-IPTag/Open:>>
27 *
28 * $Id: wl_cfgnan.c 825970 2019-06-18 05:28:31Z $
29 */
30
31 #ifdef WL_NAN
32 #include <bcmutils.h>
33 #include <bcmendian.h>
34 #include <bcmwifi_channels.h>
35 #include <nan.h>
36 #include <bcmiov.h>
37 #include <net/rtnetlink.h>
38
39 #include <wl_cfg80211.h>
40 #include <wl_cfgscan.h>
41 #include <wl_android.h>
42 #include <wl_cfgnan.h>
43
44 #include <dngl_stats.h>
45 #include <dhd.h>
46 #ifdef RTT_SUPPORT
47 #include <dhd_rtt.h>
48 #endif /* RTT_SUPPORT */
49 #include <wl_cfgvendor.h>
50 #include <bcmbloom.h>
51 #include <wl_cfgp2p.h>
52 #ifdef RTT_SUPPORT
53 #include <dhd_rtt.h>
54 #endif /* RTT_SUPPORT */
55 #include <bcmstdlib_s.h>
56
57 #define NAN_RANGE_REQ_EVNT 1
58 #define NAN_RAND_MAC_RETRIES 10
59 #define NAN_SCAN_DWELL_TIME_DELTA_MS 10
60
61 #ifdef WL_NAN_DISC_CACHE
62 /* Disc Cache Parameters update Flags */
63 #define NAN_DISC_CACHE_PARAM_SDE_CONTROL 0x0001
64
65 static int wl_cfgnan_cache_disc_result(struct bcm_cfg80211 *cfg, void * data,
66 u16 *disc_cache_update_flags);
67 static int wl_cfgnan_remove_disc_result(struct bcm_cfg80211 * cfg, uint8 local_subid);
68 static nan_disc_result_cache * wl_cfgnan_get_disc_result(struct bcm_cfg80211 *cfg,
69 uint8 remote_pubid, struct ether_addr *peer);
70 #endif /* WL_NAN_DISC_CACHE */
71 static int wl_cfgnan_clear_disc_cache(struct bcm_cfg80211 *cfg, wl_nan_instance_id_t sub_id);
72 static int wl_cfgnan_set_if_addr(struct bcm_cfg80211 *cfg);
73
74 static int wl_cfgnan_get_capability(struct net_device *ndev,
75 struct bcm_cfg80211 *cfg, nan_hal_capabilities_t *capabilities);
76
77 static int32 wl_cfgnan_notify_disc_with_ranging(struct bcm_cfg80211 *cfg,
78 nan_ranging_inst_t *rng_inst, nan_event_data_t *nan_event_data, uint32 distance);
79
80 static void wl_cfgnan_disc_result_on_geofence_cancel(struct bcm_cfg80211 *cfg,
81 nan_ranging_inst_t *rng_inst);
82
83 static void wl_cfgnan_clear_nan_event_data(struct bcm_cfg80211 *cfg,
84 nan_event_data_t *nan_event_data);
85
86 void wl_cfgnan_data_remove_peer(struct bcm_cfg80211 *cfg,
87 struct ether_addr *peer_addr);
88
89 static void wl_cfgnan_send_stop_event(struct bcm_cfg80211 *cfg);
90
91 static void wl_cfgnan_terminate_ranging_session(struct bcm_cfg80211 *cfg,
92 nan_ranging_inst_t *ranging_inst);
93
94 #ifdef RTT_SUPPORT
95 static s32 wl_cfgnan_clear_peer_ranging(struct bcm_cfg80211 * cfg,
96 struct ether_addr * peer, int reason);
97 #endif /* RTT_SUPPORT */
98
nan_role_to_str(u8 role)99 static const char *nan_role_to_str(u8 role)
100 {
101 switch (role) {
102 C2S(WL_NAN_ROLE_AUTO)
103 C2S(WL_NAN_ROLE_NON_MASTER_NON_SYNC)
104 C2S(WL_NAN_ROLE_NON_MASTER_SYNC)
105 C2S(WL_NAN_ROLE_MASTER)
106 C2S(WL_NAN_ROLE_ANCHOR_MASTER)
107 default:
108 return "WL_NAN_ROLE_UNKNOWN";
109 }
110 }
111
nan_event_to_str(u16 cmd)112 static const char *nan_event_to_str(u16 cmd)
113 {
114 switch (cmd) {
115 C2S(WL_NAN_EVENT_START)
116 C2S(WL_NAN_EVENT_DISCOVERY_RESULT)
117 C2S(WL_NAN_EVENT_TERMINATED)
118 C2S(WL_NAN_EVENT_RECEIVE)
119 C2S(WL_NAN_EVENT_MERGE)
120 C2S(WL_NAN_EVENT_STOP)
121 C2S(WL_NAN_EVENT_PEER_DATAPATH_IND)
122 C2S(WL_NAN_EVENT_DATAPATH_ESTB)
123 C2S(WL_NAN_EVENT_SDF_RX)
124 C2S(WL_NAN_EVENT_DATAPATH_END)
125 C2S(WL_NAN_EVENT_RNG_REQ_IND)
126 C2S(WL_NAN_EVENT_RNG_RPT_IND)
127 C2S(WL_NAN_EVENT_RNG_TERM_IND)
128 C2S(WL_NAN_EVENT_TXS)
129 C2S(WL_NAN_EVENT_INVALID)
130
131 default:
132 return "WL_NAN_EVENT_UNKNOWN";
133 }
134 }
135
136 static int wl_cfgnan_execute_ioctl(struct net_device *ndev,
137 struct bcm_cfg80211 *cfg, bcm_iov_batch_buf_t *nan_buf,
138 uint16 nan_buf_size, uint32 *status, uint8 *resp_buf,
139 uint16 resp_buf_len);
140 int
wl_cfgnan_generate_inst_id(struct bcm_cfg80211 * cfg,uint8 * p_inst_id)141 wl_cfgnan_generate_inst_id(struct bcm_cfg80211 *cfg, uint8 *p_inst_id)
142 {
143 s32 ret = BCME_OK;
144 uint8 i = 0;
145 if (p_inst_id == NULL) {
146 WL_ERR(("Invalid arguments\n"));
147 ret = -EINVAL;
148 goto exit;
149 }
150
151 if (cfg->nancfg.inst_id_start == NAN_ID_MAX) {
152 WL_ERR(("Consumed all IDs, resetting the counter\n"));
153 cfg->nancfg.inst_id_start = 0;
154 }
155
156 for (i = cfg->nancfg.inst_id_start; i < NAN_ID_MAX; i++) {
157 if (isclr(cfg->nancfg.svc_inst_id_mask, i)) {
158 setbit(cfg->nancfg.svc_inst_id_mask, i);
159 *p_inst_id = i + 1;
160 cfg->nancfg.inst_id_start = *p_inst_id;
161 WL_DBG(("Instance ID=%d\n", *p_inst_id));
162 goto exit;
163 }
164 }
165 WL_ERR(("Allocated maximum IDs\n"));
166 ret = BCME_NORESOURCE;
167 exit:
168 return ret;
169 }
170
171 int
wl_cfgnan_remove_inst_id(struct bcm_cfg80211 * cfg,uint8 inst_id)172 wl_cfgnan_remove_inst_id(struct bcm_cfg80211 *cfg, uint8 inst_id)
173 {
174 s32 ret = BCME_OK;
175 WL_DBG(("%s: Removing svc instance id %d\n", __FUNCTION__, inst_id));
176 clrbit(cfg->nancfg.svc_inst_id_mask, inst_id-1);
177 return ret;
178 }
wl_cfgnan_parse_sdea_data(osl_t * osh,const uint8 * p_attr,uint16 len,nan_event_data_t * tlv_data)179 s32 wl_cfgnan_parse_sdea_data(osl_t *osh, const uint8 *p_attr,
180 uint16 len, nan_event_data_t *tlv_data)
181 {
182 const wifi_nan_svc_desc_ext_attr_t *nan_svc_desc_ext_attr = NULL;
183 uint8 offset;
184 s32 ret = BCME_OK;
185
186 /* service descriptor ext attributes */
187 nan_svc_desc_ext_attr = (const wifi_nan_svc_desc_ext_attr_t *)p_attr;
188
189 /* attribute ID */
190 WL_TRACE(("> attr id: 0x%02x\n", nan_svc_desc_ext_attr->id));
191
192 /* attribute length */
193 WL_TRACE(("> attr len: 0x%x\n", nan_svc_desc_ext_attr->len));
194 if (nan_svc_desc_ext_attr->instance_id == tlv_data->pub_id) {
195 tlv_data->sde_control_flag = nan_svc_desc_ext_attr->control;
196 }
197 offset = sizeof(*nan_svc_desc_ext_attr);
198 if (offset > len) {
199 WL_ERR(("Invalid event buffer len\n"));
200 ret = BCME_BUFTOOSHORT;
201 goto fail;
202 }
203 p_attr += offset;
204 len -= offset;
205
206 if (tlv_data->sde_control_flag & NAN_SC_RANGE_LIMITED) {
207 WL_TRACE(("> svc_control: range limited present\n"));
208 }
209 if (tlv_data->sde_control_flag & NAN_SDE_CF_SVC_UPD_IND_PRESENT) {
210 WL_TRACE(("> svc_control: sdea svc specific info present\n"));
211 tlv_data->sde_svc_info.dlen = (p_attr[1] | (p_attr[2] << 8));
212 WL_TRACE(("> sdea svc info len: 0x%02x\n", tlv_data->sde_svc_info.dlen));
213 if (!tlv_data->sde_svc_info.dlen ||
214 tlv_data->sde_svc_info.dlen > NAN_MAX_SERVICE_SPECIFIC_INFO_LEN) {
215 /* must be able to handle null msg which is not error */
216 tlv_data->sde_svc_info.dlen = 0;
217 WL_ERR(("sde data length is invalid\n"));
218 ret = BCME_BADLEN;
219 goto fail;
220 }
221
222 if (tlv_data->sde_svc_info.dlen > 0) {
223 tlv_data->sde_svc_info.data = MALLOCZ(osh, tlv_data->sde_svc_info.dlen);
224 if (!tlv_data->sde_svc_info.data) {
225 WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
226 tlv_data->sde_svc_info.dlen = 0;
227 ret = BCME_NOMEM;
228 goto fail;
229 }
230 /* advance read pointer, consider sizeof of Service Update Indicator */
231 offset = sizeof(tlv_data->sde_svc_info.dlen) - 1;
232 if (offset > len) {
233 WL_ERR(("Invalid event buffer len\n"));
234 ret = BCME_BUFTOOSHORT;
235 goto fail;
236 }
237 p_attr += offset;
238 len -= offset;
239 ret = memcpy_s(tlv_data->sde_svc_info.data, tlv_data->sde_svc_info.dlen,
240 p_attr, tlv_data->sde_svc_info.dlen);
241 if (ret != BCME_OK) {
242 WL_ERR(("Failed to copy sde_svc_info\n"));
243 goto fail;
244 }
245 } else {
246 /* must be able to handle null msg which is not error */
247 tlv_data->sde_svc_info.dlen = 0;
248 WL_DBG(("%s: sdea svc info length is zero, null info data\n",
249 __FUNCTION__));
250 }
251 }
252 return ret;
253 fail:
254 if (tlv_data->sde_svc_info.data) {
255 MFREE(osh, tlv_data->sde_svc_info.data,
256 tlv_data->sde_svc_info.dlen);
257 tlv_data->sde_svc_info.data = NULL;
258 }
259
260 WL_DBG(("Parse SDEA event data, status = %d\n", ret));
261 return ret;
262 }
263
264 /*
265 * This attribute contains some mandatory fields and some optional fields
266 * depending on the content of the service discovery request.
267 */
268 s32
wl_cfgnan_parse_sda_data(osl_t * osh,const uint8 * p_attr,uint16 len,nan_event_data_t * tlv_data)269 wl_cfgnan_parse_sda_data(osl_t *osh, const uint8 *p_attr,
270 uint16 len, nan_event_data_t *tlv_data)
271 {
272 uint8 svc_control = 0, offset = 0;
273 s32 ret = BCME_OK;
274 const wifi_nan_svc_descriptor_attr_t *nan_svc_desc_attr = NULL;
275
276 /* service descriptor attributes */
277 nan_svc_desc_attr = (const wifi_nan_svc_descriptor_attr_t *)p_attr;
278 /* attribute ID */
279 WL_TRACE(("> attr id: 0x%02x\n", nan_svc_desc_attr->id));
280
281 /* attribute length */
282 WL_TRACE(("> attr len: 0x%x\n", nan_svc_desc_attr->len));
283
284 /* service ID */
285 ret = memcpy_s(tlv_data->svc_name, sizeof(tlv_data->svc_name),
286 nan_svc_desc_attr->svc_hash, NAN_SVC_HASH_LEN);
287 if (ret != BCME_OK) {
288 WL_ERR(("Failed to copy svc_hash_name:\n"));
289 return ret;
290 }
291 WL_TRACE(("> svc_hash_name: " MACDBG "\n", MAC2STRDBG(tlv_data->svc_name)));
292
293 /* local instance ID */
294 tlv_data->local_inst_id = nan_svc_desc_attr->instance_id;
295 WL_TRACE(("> local instance id: 0x%02x\n", tlv_data->local_inst_id));
296
297 /* requestor instance ID */
298 tlv_data->requestor_id = nan_svc_desc_attr->requestor_id;
299 WL_TRACE(("> requestor id: 0x%02x\n", tlv_data->requestor_id));
300
301 /* service control */
302 svc_control = nan_svc_desc_attr->svc_control;
303 if ((svc_control & NAN_SVC_CONTROL_TYPE_MASK) == NAN_SC_PUBLISH) {
304 WL_TRACE(("> Service control type: NAN_SC_PUBLISH\n"));
305 } else if ((svc_control & NAN_SVC_CONTROL_TYPE_MASK) == NAN_SC_SUBSCRIBE) {
306 WL_TRACE(("> Service control type: NAN_SC_SUBSCRIBE\n"));
307 } else if ((svc_control & NAN_SVC_CONTROL_TYPE_MASK) == NAN_SC_FOLLOWUP) {
308 WL_TRACE(("> Service control type: NAN_SC_FOLLOWUP\n"));
309 }
310 offset = sizeof(*nan_svc_desc_attr);
311 if (offset > len) {
312 WL_ERR(("Invalid event buffer len\n"));
313 ret = BCME_BUFTOOSHORT;
314 goto fail;
315 }
316 p_attr += offset;
317 len -= offset;
318
319 /*
320 * optional fields:
321 * must be in order following by service descriptor attribute format
322 */
323
324 /* binding bitmap */
325 if (svc_control & NAN_SC_BINDING_BITMAP_PRESENT) {
326 uint16 bitmap = 0;
327 WL_TRACE(("> svc_control: binding bitmap present\n"));
328
329 /* Copy binding bitmap */
330 ret = memcpy_s(&bitmap, sizeof(bitmap),
331 p_attr, NAN_BINDING_BITMAP_LEN);
332 if (ret != BCME_OK) {
333 WL_ERR(("Failed to copy bit map\n"));
334 return ret;
335 }
336 WL_TRACE(("> sc binding bitmap: 0x%04x\n", bitmap));
337
338 if (NAN_BINDING_BITMAP_LEN > len) {
339 WL_ERR(("Invalid event buffer len\n"));
340 ret = BCME_BUFTOOSHORT;
341 goto fail;
342 }
343 p_attr += NAN_BINDING_BITMAP_LEN;
344 len -= NAN_BINDING_BITMAP_LEN;
345 }
346
347 /* matching filter */
348 if (svc_control & NAN_SC_MATCHING_FILTER_PRESENT) {
349 WL_TRACE(("> svc_control: matching filter present\n"));
350
351 tlv_data->tx_match_filter.dlen = *p_attr++;
352 WL_TRACE(("> matching filter len: 0x%02x\n",
353 tlv_data->tx_match_filter.dlen));
354
355 if (!tlv_data->tx_match_filter.dlen ||
356 tlv_data->tx_match_filter.dlen > MAX_MATCH_FILTER_LEN) {
357 tlv_data->tx_match_filter.dlen = 0;
358 WL_ERR(("tx match filter length is invalid\n"));
359 ret = -EINVAL;
360 goto fail;
361 }
362 tlv_data->tx_match_filter.data =
363 MALLOCZ(osh, tlv_data->tx_match_filter.dlen);
364 if (!tlv_data->tx_match_filter.data) {
365 WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
366 tlv_data->tx_match_filter.dlen = 0;
367 ret = -ENOMEM;
368 goto fail;
369 }
370 ret = memcpy_s(tlv_data->tx_match_filter.data, tlv_data->tx_match_filter.dlen,
371 p_attr, tlv_data->tx_match_filter.dlen);
372 if (ret != BCME_OK) {
373 WL_ERR(("Failed to copy tx match filter data\n"));
374 goto fail;
375 }
376 /* advance read pointer */
377 offset = tlv_data->tx_match_filter.dlen;
378 if (offset > len) {
379 WL_ERR(("Invalid event buffer\n"));
380 ret = BCME_BUFTOOSHORT;
381 goto fail;
382 }
383 p_attr += offset;
384 len -= offset;
385 }
386
387 /* service response filter */
388 if (svc_control & NAN_SC_SR_FILTER_PRESENT) {
389 WL_TRACE(("> svc_control: service response filter present\n"));
390
391 tlv_data->rx_match_filter.dlen = *p_attr++;
392 WL_TRACE(("> sr match filter len: 0x%02x\n",
393 tlv_data->rx_match_filter.dlen));
394
395 if (!tlv_data->rx_match_filter.dlen ||
396 tlv_data->rx_match_filter.dlen > MAX_MATCH_FILTER_LEN) {
397 tlv_data->rx_match_filter.dlen = 0;
398 WL_ERR(("%s: sr matching filter length is invalid\n",
399 __FUNCTION__));
400 ret = BCME_BADLEN;
401 goto fail;
402 }
403 tlv_data->rx_match_filter.data =
404 MALLOCZ(osh, tlv_data->rx_match_filter.dlen);
405 if (!tlv_data->rx_match_filter.data) {
406 WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
407 tlv_data->rx_match_filter.dlen = 0;
408 ret = BCME_NOMEM;
409 goto fail;
410 }
411
412 ret = memcpy_s(tlv_data->rx_match_filter.data, tlv_data->rx_match_filter.dlen,
413 p_attr, tlv_data->rx_match_filter.dlen);
414 if (ret != BCME_OK) {
415 WL_ERR(("Failed to copy rx match filter data\n"));
416 goto fail;
417 }
418
419 /* advance read pointer */
420 offset = tlv_data->rx_match_filter.dlen;
421 if (offset > len) {
422 WL_ERR(("Invalid event buffer len\n"));
423 ret = BCME_BUFTOOSHORT;
424 goto fail;
425 }
426 p_attr += offset;
427 len -= offset;
428 }
429
430 /* service specific info */
431 if (svc_control & NAN_SC_SVC_INFO_PRESENT) {
432 WL_TRACE(("> svc_control: svc specific info present\n"));
433
434 tlv_data->svc_info.dlen = *p_attr++;
435 WL_TRACE(("> svc info len: 0x%02x\n", tlv_data->svc_info.dlen));
436
437 if (!tlv_data->svc_info.dlen ||
438 tlv_data->svc_info.dlen > NAN_MAX_SERVICE_SPECIFIC_INFO_LEN) {
439 /* must be able to handle null msg which is not error */
440 tlv_data->svc_info.dlen = 0;
441 WL_ERR(("sde data length is invalid\n"));
442 ret = BCME_BADLEN;
443 goto fail;
444 }
445
446 if (tlv_data->svc_info.dlen > 0) {
447 tlv_data->svc_info.data =
448 MALLOCZ(osh, tlv_data->svc_info.dlen);
449 if (!tlv_data->svc_info.data) {
450 WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
451 tlv_data->svc_info.dlen = 0;
452 ret = BCME_NOMEM;
453 goto fail;
454 }
455 ret = memcpy_s(tlv_data->svc_info.data, tlv_data->svc_info.dlen,
456 p_attr, tlv_data->svc_info.dlen);
457 if (ret != BCME_OK) {
458 WL_ERR(("Failed to copy svc info\n"));
459 goto fail;
460 }
461
462 /* advance read pointer */
463 offset = tlv_data->svc_info.dlen;
464 if (offset > len) {
465 WL_ERR(("Invalid event buffer len\n"));
466 ret = BCME_BUFTOOSHORT;
467 goto fail;
468 }
469 p_attr += offset;
470 len -= offset;
471 } else {
472 /* must be able to handle null msg which is not error */
473 tlv_data->svc_info.dlen = 0;
474 WL_TRACE(("%s: svc info length is zero, null info data\n",
475 __FUNCTION__));
476 }
477 }
478
479 /*
480 * discovery range limited:
481 * If set to 1, the pub/sub msg is limited in range to close proximity.
482 * If set to 0, the pub/sub msg is not limited in range.
483 * Valid only when the message is either of a publish or a sub.
484 */
485 if (svc_control & NAN_SC_RANGE_LIMITED) {
486 if (((svc_control & NAN_SVC_CONTROL_TYPE_MASK) == NAN_SC_PUBLISH) ||
487 ((svc_control & NAN_SVC_CONTROL_TYPE_MASK) == NAN_SC_SUBSCRIBE)) {
488 WL_TRACE(("> svc_control: range limited present\n"));
489 } else {
490 WL_TRACE(("range limited is only valid on pub or sub\n"));
491 }
492
493 /* TODO: send up */
494
495 /* advance read pointer */
496 p_attr++;
497 }
498 return ret;
499 fail:
500 if (tlv_data->tx_match_filter.data) {
501 MFREE(osh, tlv_data->tx_match_filter.data,
502 tlv_data->tx_match_filter.dlen);
503 tlv_data->tx_match_filter.data = NULL;
504 }
505 if (tlv_data->rx_match_filter.data) {
506 MFREE(osh, tlv_data->rx_match_filter.data,
507 tlv_data->rx_match_filter.dlen);
508 tlv_data->rx_match_filter.data = NULL;
509 }
510 if (tlv_data->svc_info.data) {
511 MFREE(osh, tlv_data->svc_info.data,
512 tlv_data->svc_info.dlen);
513 tlv_data->svc_info.data = NULL;
514 }
515
516 WL_DBG(("Parse SDA event data, status = %d\n", ret));
517 return ret;
518 }
519
520 static s32
wl_cfgnan_parse_sd_attr_data(osl_t * osh,uint16 len,const uint8 * data,nan_event_data_t * tlv_data,uint16 type)521 wl_cfgnan_parse_sd_attr_data(osl_t *osh, uint16 len, const uint8 *data,
522 nan_event_data_t *tlv_data, uint16 type) {
523 const uint8 *p_attr = data;
524 uint16 offset = 0;
525 s32 ret = BCME_OK;
526 const wl_nan_event_disc_result_t *ev_disc = NULL;
527 const wl_nan_event_replied_t *ev_replied = NULL;
528 const wl_nan_ev_receive_t *ev_fup = NULL;
529
530 /*
531 * Mapping wifi_nan_svc_descriptor_attr_t, and svc controls are optional.
532 */
533 if (type == WL_NAN_XTLV_SD_DISC_RESULTS) {
534 u8 iter;
535 ev_disc = (const wl_nan_event_disc_result_t *)p_attr;
536
537 WL_DBG((">> WL_NAN_XTLV_RESULTS: Discovery result\n"));
538
539 tlv_data->pub_id = (wl_nan_instance_id_t)ev_disc->pub_id;
540 tlv_data->sub_id = (wl_nan_instance_id_t)ev_disc->sub_id;
541 tlv_data->publish_rssi = ev_disc->publish_rssi;
542 ret = memcpy_s(&tlv_data->remote_nmi, ETHER_ADDR_LEN,
543 &ev_disc->pub_mac, ETHER_ADDR_LEN);
544 if (ret != BCME_OK) {
545 WL_ERR(("Failed to copy remote nmi\n"));
546 goto fail;
547 }
548
549 WL_TRACE(("publish id: %d\n", ev_disc->pub_id));
550 WL_TRACE(("subscribe d: %d\n", ev_disc->sub_id));
551 WL_TRACE(("publish mac addr: " MACDBG "\n",
552 MAC2STRDBG(ev_disc->pub_mac.octet)));
553 WL_TRACE(("publish rssi: %d\n", (int8)ev_disc->publish_rssi));
554 WL_TRACE(("attribute no: %d\n", ev_disc->attr_num));
555 WL_TRACE(("attribute len: %d\n", (uint16)ev_disc->attr_list_len));
556
557 /* advance to the service descricptor */
558 offset = OFFSETOF(wl_nan_event_disc_result_t, attr_list[0]);
559 if (offset > len) {
560 WL_ERR(("Invalid event buffer len\n"));
561 ret = BCME_BUFTOOSHORT;
562 goto fail;
563 }
564 p_attr += offset;
565 len -= offset;
566
567 iter = ev_disc->attr_num;
568 while (iter) {
569 if ((uint8)*p_attr == NAN_ATTR_SVC_DESCRIPTOR) {
570 WL_TRACE(("> attr id: 0x%02x\n", (uint8)*p_attr));
571 ret = wl_cfgnan_parse_sda_data(osh, p_attr, len, tlv_data);
572 if (unlikely(ret)) {
573 WL_ERR(("wl_cfgnan_parse_sda_data failed,"
574 "error = %d \n", ret));
575 goto fail;
576 }
577 }
578
579 if ((uint8)*p_attr == NAN_ATTR_SVC_DESC_EXTENSION) {
580 WL_TRACE(("> attr id: 0x%02x\n", (uint8)*p_attr));
581 ret = wl_cfgnan_parse_sdea_data(osh, p_attr, len, tlv_data);
582 if (unlikely(ret)) {
583 WL_ERR(("wl_cfgnan_parse_sdea_data failed,"
584 "error = %d \n", ret));
585 goto fail;
586 }
587 }
588 offset = (sizeof(*p_attr) +
589 sizeof(ev_disc->attr_list_len) +
590 (p_attr[1] | (p_attr[2] << 8)));
591 if (offset > len) {
592 WL_ERR(("Invalid event buffer len\n"));
593 ret = BCME_BUFTOOSHORT;
594 goto fail;
595 }
596 p_attr += offset;
597 len -= offset;
598 iter--;
599 }
600 } else if (type == WL_NAN_XTLV_SD_FUP_RECEIVED) {
601 uint8 iter;
602 ev_fup = (const wl_nan_ev_receive_t *)p_attr;
603
604 WL_TRACE((">> WL_NAN_XTLV_SD_FUP_RECEIVED: Transmit follow-up\n"));
605
606 tlv_data->local_inst_id = (wl_nan_instance_id_t)ev_fup->local_id;
607 tlv_data->requestor_id = (wl_nan_instance_id_t)ev_fup->remote_id;
608 tlv_data->fup_rssi = ev_fup->fup_rssi;
609 ret = memcpy_s(&tlv_data->remote_nmi, ETHER_ADDR_LEN,
610 &ev_fup->remote_addr, ETHER_ADDR_LEN);
611 if (ret != BCME_OK) {
612 WL_ERR(("Failed to copy remote nmi\n"));
613 goto fail;
614 }
615
616 WL_TRACE(("local id: %d\n", ev_fup->local_id));
617 WL_TRACE(("remote id: %d\n", ev_fup->remote_id));
618 WL_TRACE(("peer mac addr: " MACDBG "\n",
619 MAC2STRDBG(ev_fup->remote_addr.octet)));
620 WL_TRACE(("peer rssi: %d\n", (int8)ev_fup->fup_rssi));
621 WL_TRACE(("attribute no: %d\n", ev_fup->attr_num));
622 WL_TRACE(("attribute len: %d\n", ev_fup->attr_list_len));
623
624 /* advance to the service descriptor which is attr_list[0] */
625 offset = OFFSETOF(wl_nan_ev_receive_t, attr_list[0]);
626 if (offset > len) {
627 WL_ERR(("Invalid event buffer len\n"));
628 ret = BCME_BUFTOOSHORT;
629 goto fail;
630 }
631 p_attr += offset;
632 len -= offset;
633
634 iter = ev_fup->attr_num;
635 while (iter) {
636 if ((uint8)*p_attr == NAN_ATTR_SVC_DESCRIPTOR) {
637 WL_TRACE(("> attr id: 0x%02x\n", (uint8)*p_attr));
638 ret = wl_cfgnan_parse_sda_data(osh, p_attr, len, tlv_data);
639 if (unlikely(ret)) {
640 WL_ERR(("wl_cfgnan_parse_sda_data failed,"
641 "error = %d \n", ret));
642 goto fail;
643 }
644 }
645
646 if ((uint8)*p_attr == NAN_ATTR_SVC_DESC_EXTENSION) {
647 WL_TRACE(("> attr id: 0x%02x\n", (uint8)*p_attr));
648 ret = wl_cfgnan_parse_sdea_data(osh, p_attr, len, tlv_data);
649 if (unlikely(ret)) {
650 WL_ERR(("wl_cfgnan_parse_sdea_data failed,"
651 "error = %d \n", ret));
652 goto fail;
653 }
654 }
655 offset = (sizeof(*p_attr) +
656 sizeof(ev_fup->attr_list_len) +
657 (p_attr[1] | (p_attr[2] << 8)));
658 if (offset > len) {
659 WL_ERR(("Invalid event buffer len\n"));
660 ret = BCME_BUFTOOSHORT;
661 goto fail;
662 }
663 p_attr += offset;
664 len -= offset;
665 iter--;
666 }
667 } else if (type == WL_NAN_XTLV_SD_SDF_RX) {
668 /*
669 * SDF followed by nan2_pub_act_frame_t and wifi_nan_svc_descriptor_attr_t,
670 * and svc controls are optional.
671 */
672 const nan2_pub_act_frame_t *nan_pub_af =
673 (const nan2_pub_act_frame_t *)p_attr;
674
675 WL_TRACE((">> WL_NAN_XTLV_SD_SDF_RX\n"));
676
677 /* nan2_pub_act_frame_t */
678 WL_TRACE(("pub category: 0x%02x\n", nan_pub_af->category_id));
679 WL_TRACE(("pub action: 0x%02x\n", nan_pub_af->action_field));
680 WL_TRACE(("nan oui: %2x-%2x-%2x\n",
681 nan_pub_af->oui[0], nan_pub_af->oui[1], nan_pub_af->oui[2]));
682 WL_TRACE(("oui type: 0x%02x\n", nan_pub_af->oui_type));
683 WL_TRACE(("oui subtype: 0x%02x\n", nan_pub_af->oui_sub_type));
684
685 offset = sizeof(*nan_pub_af);
686 if (offset > len) {
687 WL_ERR(("Invalid event buffer len\n"));
688 ret = BCME_BUFTOOSHORT;
689 goto fail;
690 }
691 p_attr += offset;
692 len -= offset;
693 } else if (type == WL_NAN_XTLV_SD_REPLIED) {
694 ev_replied = (const wl_nan_event_replied_t *)p_attr;
695
696 WL_TRACE((">> WL_NAN_XTLV_SD_REPLIED: Replied Event\n"));
697
698 tlv_data->pub_id = (wl_nan_instance_id_t)ev_replied->pub_id;
699 tlv_data->sub_id = (wl_nan_instance_id_t)ev_replied->sub_id;
700 tlv_data->sub_rssi = ev_replied->sub_rssi;
701 ret = memcpy_s(&tlv_data->remote_nmi, ETHER_ADDR_LEN,
702 &ev_replied->sub_mac, ETHER_ADDR_LEN);
703 if (ret != BCME_OK) {
704 WL_ERR(("Failed to copy remote nmi\n"));
705 goto fail;
706 }
707
708 WL_TRACE(("publish id: %d\n", ev_replied->pub_id));
709 WL_TRACE(("subscribe d: %d\n", ev_replied->sub_id));
710 WL_TRACE(("Subscriber mac addr: " MACDBG "\n",
711 MAC2STRDBG(ev_replied->sub_mac.octet)));
712 WL_TRACE(("subscribe rssi: %d\n", (int8)ev_replied->sub_rssi));
713 WL_TRACE(("attribute no: %d\n", ev_replied->attr_num));
714 WL_TRACE(("attribute len: %d\n", (uint16)ev_replied->attr_list_len));
715
716 /* advance to the service descriptor which is attr_list[0] */
717 offset = OFFSETOF(wl_nan_event_replied_t, attr_list[0]);
718 if (offset > len) {
719 WL_ERR(("Invalid event buffer len\n"));
720 ret = BCME_BUFTOOSHORT;
721 goto fail;
722 }
723 p_attr += offset;
724 len -= offset;
725 ret = wl_cfgnan_parse_sda_data(osh, p_attr, len, tlv_data);
726 if (unlikely(ret)) {
727 WL_ERR(("wl_cfgnan_parse_sdea_data failed,"
728 "error = %d \n", ret));
729 }
730 }
731
732 fail:
733 return ret;
734 }
735
736 /* Based on each case of tlv type id, fill into tlv data */
737 int
wl_cfgnan_set_vars_cbfn(void * ctx,const uint8 * data,uint16 type,uint16 len)738 wl_cfgnan_set_vars_cbfn(void *ctx, const uint8 *data, uint16 type, uint16 len)
739 {
740 nan_parse_event_ctx_t *ctx_tlv_data = ((nan_parse_event_ctx_t *)(ctx));
741 nan_event_data_t *tlv_data = ((nan_event_data_t *)(ctx_tlv_data->nan_evt_data));
742 int ret = BCME_OK;
743
744 NAN_DBG_ENTER();
745 if (!data || !len) {
746 WL_ERR(("data length is invalid\n"));
747 ret = BCME_ERROR;
748 goto fail;
749 }
750
751 switch (type) {
752 /*
753 * Need to parse service descript attributes including service control,
754 * when Follow up or Discovery result come
755 */
756 case WL_NAN_XTLV_SD_FUP_RECEIVED:
757 case WL_NAN_XTLV_SD_DISC_RESULTS: {
758 ret = wl_cfgnan_parse_sd_attr_data(ctx_tlv_data->cfg->osh,
759 len, data, tlv_data, type);
760 break;
761 }
762 case WL_NAN_XTLV_SD_SVC_INFO: {
763 tlv_data->svc_info.data =
764 MALLOCZ(ctx_tlv_data->cfg->osh, len);
765 if (!tlv_data->svc_info.data) {
766 WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
767 tlv_data->svc_info.dlen = 0;
768 ret = BCME_NOMEM;
769 goto fail;
770 }
771 tlv_data->svc_info.dlen = len;
772 ret = memcpy_s(tlv_data->svc_info.data, tlv_data->svc_info.dlen,
773 data, tlv_data->svc_info.dlen);
774 if (ret != BCME_OK) {
775 WL_ERR(("Failed to copy svc info data\n"));
776 goto fail;
777 }
778 break;
779 }
780 default:
781 WL_ERR(("Not available for tlv type = 0x%x\n", type));
782 ret = BCME_ERROR;
783 break;
784 }
785 fail:
786 NAN_DBG_EXIT();
787 return ret;
788 }
789
790 int
wl_cfg_nan_check_cmd_len(uint16 nan_iov_len,uint16 data_size,uint16 * subcmd_len)791 wl_cfg_nan_check_cmd_len(uint16 nan_iov_len, uint16 data_size,
792 uint16 *subcmd_len)
793 {
794 s32 ret = BCME_OK;
795
796 if (subcmd_len != NULL) {
797 *subcmd_len = OFFSETOF(bcm_iov_batch_subcmd_t, data) +
798 ALIGN_SIZE(data_size, 4);
799 if (*subcmd_len > nan_iov_len) {
800 WL_ERR(("%s: Buf short, requested:%d, available:%d\n",
801 __FUNCTION__, *subcmd_len, nan_iov_len));
802 ret = BCME_NOMEM;
803 }
804 } else {
805 WL_ERR(("Invalid subcmd_len\n"));
806 ret = BCME_ERROR;
807 }
808 return ret;
809 }
810
811 int
wl_cfgnan_config_eventmask(struct net_device * ndev,struct bcm_cfg80211 * cfg,uint8 event_ind_flag,bool disable_events)812 wl_cfgnan_config_eventmask(struct net_device *ndev, struct bcm_cfg80211 *cfg,
813 uint8 event_ind_flag, bool disable_events)
814 {
815 bcm_iov_batch_buf_t *nan_buf = NULL;
816 s32 ret = BCME_OK;
817 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
818 uint16 subcmd_len;
819 uint32 status;
820 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
821 bcm_iov_batch_subcmd_t *sub_cmd_resp = NULL;
822 uint8 event_mask[WL_NAN_EVMASK_EXTN_LEN];
823 wl_nan_evmask_extn_t *evmask;
824 uint16 evmask_cmd_len;
825 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
826
827 NAN_DBG_ENTER();
828
829 /* same src and dest len here */
830 (void)memset_s(event_mask, WL_NAN_EVMASK_EXTN_VER, 0, WL_NAN_EVMASK_EXTN_VER);
831 evmask_cmd_len = OFFSETOF(wl_nan_evmask_extn_t, evmask) +
832 WL_NAN_EVMASK_EXTN_LEN;
833 ret = wl_add_remove_eventmsg(ndev, WLC_E_NAN, true);
834 if (unlikely(ret)) {
835 WL_ERR((" nan event enable failed, error = %d \n", ret));
836 goto fail;
837 }
838
839 nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
840 if (!nan_buf) {
841 WL_ERR(("%s: memory allocation failed\n", __func__));
842 ret = BCME_NOMEM;
843 goto fail;
844 }
845
846 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
847 nan_buf->count = 0;
848 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
849 sub_cmd = (bcm_iov_batch_subcmd_t*)(uint8 *)(&nan_buf->cmds[0]);
850
851 ret = wl_cfg_nan_check_cmd_len(nan_buf_size,
852 evmask_cmd_len, &subcmd_len);
853 if (unlikely(ret)) {
854 WL_ERR(("nan_sub_cmd check failed\n"));
855 goto fail;
856 }
857
858 sub_cmd->id = htod16(WL_NAN_CMD_CFG_EVENT_MASK);
859 sub_cmd->len = sizeof(sub_cmd->u.options) + evmask_cmd_len;
860 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
861 evmask = (wl_nan_evmask_extn_t *)sub_cmd->data;
862 evmask->ver = WL_NAN_EVMASK_EXTN_VER;
863 evmask->len = WL_NAN_EVMASK_EXTN_LEN;
864 nan_buf_size -= subcmd_len;
865 nan_buf->count = 1;
866
867 if (disable_events) {
868 WL_DBG(("Disabling all nan events..except stop event\n"));
869 setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_STOP));
870 } else {
871 /*
872 * Android framework event mask configuration.
873 */
874 nan_buf->is_set = false;
875 memset(resp_buf, 0, sizeof(resp_buf));
876 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
877 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
878 if (unlikely(ret) || unlikely(status)) {
879 WL_ERR(("get nan event mask failed ret %d status %d \n",
880 ret, status));
881 goto fail;
882 }
883 sub_cmd_resp = &((bcm_iov_batch_buf_t *)(resp_buf))->cmds[0];
884 evmask = (wl_nan_evmask_extn_t *)sub_cmd_resp->data;
885
886 /* check the response buff */
887 /* same src and dest len here */
888 (void)memcpy_s(&event_mask, WL_NAN_EVMASK_EXTN_LEN,
889 (uint8*)&evmask->evmask, WL_NAN_EVMASK_EXTN_LEN);
890
891 if (event_ind_flag) {
892 if (CHECK_BIT(event_ind_flag, WL_NAN_EVENT_DIC_MAC_ADDR_BIT)) {
893 WL_DBG(("Need to add disc mac addr change event\n"));
894 }
895 /* BIT2 - Disable nan cluster join indication (OTA). */
896 if (CHECK_BIT(event_ind_flag, WL_NAN_EVENT_JOIN_EVENT)) {
897 clrbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_MERGE));
898 }
899 }
900
901 setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_DISCOVERY_RESULT));
902 setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_RECEIVE));
903 setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_TERMINATED));
904 setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_STOP));
905 setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_TXS));
906 setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_PEER_DATAPATH_IND));
907 setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_DATAPATH_ESTB));
908 setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_DATAPATH_END));
909 setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_RNG_REQ_IND));
910 setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_RNG_TERM_IND));
911 setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_DISC_CACHE_TIMEOUT));
912 /* Disable below events by default */
913 clrbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_PEER_SCHED_UPD_NOTIF));
914 clrbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_RNG_RPT_IND));
915 clrbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_DW_END));
916 }
917
918 nan_buf->is_set = true;
919 evmask = (wl_nan_evmask_extn_t *)sub_cmd->data;
920 /* same src and dest len here */
921 (void)memcpy_s((uint8*)&evmask->evmask, WL_NAN_EVMASK_EXTN_LEN,
922 &event_mask, WL_NAN_EVMASK_EXTN_LEN);
923
924 nan_buf_size = (NAN_IOCTL_BUF_SIZE - nan_buf_size);
925 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
926 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
927 if (unlikely(ret) || unlikely(status)) {
928 WL_ERR(("set nan event mask failed ret %d status %d \n", ret, status));
929 goto fail;
930 }
931 WL_DBG(("set nan event mask successfull\n"));
932
933 fail:
934 if (nan_buf) {
935 MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
936 }
937 NAN_DBG_EXIT();
938 return ret;
939 }
940
941 static int
wl_cfgnan_set_nan_avail(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_avail_cmd_data * cmd_data,uint8 avail_type)942 wl_cfgnan_set_nan_avail(struct net_device *ndev,
943 struct bcm_cfg80211 *cfg, nan_avail_cmd_data *cmd_data, uint8 avail_type)
944 {
945 bcm_iov_batch_buf_t *nan_buf = NULL;
946 s32 ret = BCME_OK;
947 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
948 uint16 subcmd_len;
949 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
950 wl_nan_iov_t *nan_iov_data = NULL;
951 wl_avail_t *avail = NULL;
952 wl_avail_entry_t *entry; /* used for filling entry structure */
953 uint8 *p; /* tracking pointer */
954 uint8 i;
955 u32 status;
956 int c;
957 char ndc_id[ETHER_ADDR_LEN] = { 0x50, 0x6f, 0x9a, 0x01, 0x0, 0x0 };
958 dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
959 char *a = WL_AVAIL_BIT_MAP;
960 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
961
962 NAN_DBG_ENTER();
963
964 /* Do not disturb avail if dam is supported */
965 if (FW_SUPPORTED(dhdp, autodam)) {
966 WL_DBG(("DAM is supported, avail modification not allowed\n"));
967 return ret;
968 }
969
970 if (avail_type < WL_AVAIL_LOCAL || avail_type > WL_AVAIL_TYPE_MAX) {
971 WL_ERR(("Invalid availability type\n"));
972 ret = BCME_USAGE_ERROR;
973 goto fail;
974 }
975
976 nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
977 if (!nan_buf) {
978 WL_ERR(("%s: memory allocation failed\n", __func__));
979 ret = BCME_NOMEM;
980 goto fail;
981 }
982
983 nan_iov_data = MALLOCZ(cfg->osh, sizeof(*nan_iov_data));
984 if (!nan_iov_data) {
985 WL_ERR(("%s: memory allocation failed\n", __func__));
986 ret = BCME_NOMEM;
987 goto fail;
988 }
989
990 nan_iov_data->nan_iov_len = NAN_IOCTL_BUF_SIZE;
991 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
992 nan_buf->count = 0;
993 nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
994 nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
995
996 sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
997 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
998 sizeof(*avail), &subcmd_len);
999 if (unlikely(ret)) {
1000 WL_ERR(("nan_sub_cmd check failed\n"));
1001 goto fail;
1002 }
1003 avail = (wl_avail_t *)sub_cmd->data;
1004
1005 /* populate wl_avail_type */
1006 avail->flags = avail_type;
1007 if (avail_type == WL_AVAIL_RANGING) {
1008 ret = memcpy_s(&avail->addr, ETHER_ADDR_LEN,
1009 &cmd_data->peer_nmi, ETHER_ADDR_LEN);
1010 if (ret != BCME_OK) {
1011 WL_ERR(("Failed to copy peer nmi\n"));
1012 goto fail;
1013 }
1014 }
1015
1016 sub_cmd->len = sizeof(sub_cmd->u.options) + subcmd_len;
1017 sub_cmd->id = htod16(WL_NAN_CMD_CFG_AVAIL);
1018 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
1019
1020 nan_buf->is_set = false;
1021 nan_buf->count++;
1022 nan_iov_data->nan_iov_len -= subcmd_len;
1023 nan_buf_size = (NAN_IOCTL_BUF_SIZE - nan_iov_data->nan_iov_len);
1024
1025 WL_TRACE(("Read wl nan avail status\n"));
1026
1027 memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
1028 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
1029 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
1030 if (unlikely(ret)) {
1031 WL_ERR(("\n Get nan avail failed ret %d, status %d \n", ret, status));
1032 goto fail;
1033 }
1034
1035 if (status == BCME_NOTFOUND) {
1036 nan_buf->count = 0;
1037 nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
1038 nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
1039
1040 sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
1041
1042 avail = (wl_avail_t *)sub_cmd->data;
1043 p = avail->entry;
1044
1045 /* populate wl_avail fields */
1046 avail->length = OFFSETOF(wl_avail_t, entry);
1047 avail->flags = avail_type;
1048 avail->num_entries = 0;
1049 avail->id = 0;
1050 entry = (wl_avail_entry_t*)p;
1051 entry->flags = WL_AVAIL_ENTRY_COM;
1052
1053 /* set default values for optional parameters */
1054 entry->start_offset = 0;
1055 entry->u.band = 0;
1056
1057 if (cmd_data->avail_period) {
1058 entry->period = cmd_data->avail_period;
1059 } else {
1060 entry->period = WL_AVAIL_PERIOD_1024;
1061 }
1062
1063 if (cmd_data->duration != NAN_BAND_INVALID) {
1064 entry->flags |= (3 << WL_AVAIL_ENTRY_USAGE_SHIFT) |
1065 (cmd_data->duration << WL_AVAIL_ENTRY_BIT_DUR_SHIFT);
1066 } else {
1067 entry->flags |= (3 << WL_AVAIL_ENTRY_USAGE_SHIFT) |
1068 (WL_AVAIL_BIT_DUR_16 << WL_AVAIL_ENTRY_BIT_DUR_SHIFT);
1069 }
1070 entry->bitmap_len = 0;
1071
1072 if (avail_type == WL_AVAIL_LOCAL) {
1073 entry->flags |= 1 << WL_AVAIL_ENTRY_CHAN_SHIFT;
1074 /* Check for 5g support, based on that choose 5g channel */
1075 if (cfg->support_5g) {
1076 entry->u.channel_info =
1077 htod32(wf_channel2chspec(WL_AVAIL_CHANNEL_5G,
1078 WL_AVAIL_BANDWIDTH_5G));
1079 } else {
1080 entry->u.channel_info =
1081 htod32(wf_channel2chspec(WL_AVAIL_CHANNEL_2G,
1082 WL_AVAIL_BANDWIDTH_2G));
1083 }
1084 entry->flags = htod16(entry->flags);
1085 }
1086
1087 if (cfg->support_5g) {
1088 a = WL_5G_AVAIL_BIT_MAP;
1089 }
1090
1091 /* point to bitmap value for processing */
1092 if (cmd_data->bmap) {
1093 for (c = (WL_NAN_EVENT_CLEAR_BIT-1); c >= 0; c--) {
1094 i = cmd_data->bmap >> c;
1095 if (i & 1) {
1096 setbit(entry->bitmap, (WL_NAN_EVENT_CLEAR_BIT-c-1));
1097 }
1098 }
1099 } else {
1100 for (i = 0; i < strlen(WL_AVAIL_BIT_MAP); i++) {
1101 if (*a == '1') {
1102 setbit(entry->bitmap, i);
1103 }
1104 a++;
1105 }
1106 }
1107
1108 /* account for partially filled most significant byte */
1109 entry->bitmap_len = ((WL_NAN_EVENT_CLEAR_BIT) + NBBY - 1) / NBBY;
1110 if (avail_type == WL_AVAIL_NDC) {
1111 ret = memcpy_s(&avail->addr, ETHER_ADDR_LEN,
1112 ndc_id, ETHER_ADDR_LEN);
1113 if (ret != BCME_OK) {
1114 WL_ERR(("Failed to copy ndc id\n"));
1115 goto fail;
1116 }
1117 } else if (avail_type == WL_AVAIL_RANGING) {
1118 ret = memcpy_s(&avail->addr, ETHER_ADDR_LEN,
1119 &cmd_data->peer_nmi, ETHER_ADDR_LEN);
1120 if (ret != BCME_OK) {
1121 WL_ERR(("Failed to copy peer nmi\n"));
1122 goto fail;
1123 }
1124 }
1125 /* account for partially filled most significant byte */
1126
1127 /* update wl_avail and populate wl_avail_entry */
1128 entry->length = OFFSETOF(wl_avail_entry_t, bitmap) + entry->bitmap_len;
1129 avail->num_entries++;
1130 avail->length += entry->length;
1131 /* advance pointer for next entry */
1132 p += entry->length;
1133
1134 /* convert to dongle endianness */
1135 entry->length = htod16(entry->length);
1136 entry->start_offset = htod16(entry->start_offset);
1137 entry->u.channel_info = htod32(entry->u.channel_info);
1138 entry->flags = htod16(entry->flags);
1139 /* update avail_len only if
1140 * there are avail entries
1141 */
1142 if (avail->num_entries) {
1143 nan_iov_data->nan_iov_len -= avail->length;
1144 avail->length = htod16(avail->length);
1145 avail->flags = htod16(avail->flags);
1146 }
1147 avail->length = htod16(avail->length);
1148
1149 sub_cmd->id = htod16(WL_NAN_CMD_CFG_AVAIL);
1150 sub_cmd->len = sizeof(sub_cmd->u.options) + avail->length;
1151 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
1152
1153 nan_buf->is_set = true;
1154 nan_buf->count++;
1155
1156 /* Reduce the iov_len size by subcmd_len */
1157 nan_iov_data->nan_iov_len -= subcmd_len;
1158 nan_buf_size = (NAN_IOCTL_BUF_SIZE - nan_iov_data->nan_iov_len);
1159
1160 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
1161 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
1162 if (unlikely(ret) || unlikely(status)) {
1163 WL_ERR(("\n set nan avail failed ret %d status %d \n", ret, status));
1164 ret = status;
1165 goto fail;
1166 }
1167 } else if (status == BCME_OK) {
1168 WL_DBG(("Avail type [%d] found to be configured\n", avail_type));
1169 } else {
1170 WL_ERR(("set nan avail failed ret %d status %d \n", ret, status));
1171 }
1172
1173 fail:
1174 if (nan_buf) {
1175 MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
1176 }
1177 if (nan_iov_data) {
1178 MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
1179 }
1180
1181 NAN_DBG_EXIT();
1182 return ret;
1183 }
1184
1185 static int
wl_cfgnan_config_control_flag(struct net_device * ndev,struct bcm_cfg80211 * cfg,uint32 flag,uint32 * status,bool set)1186 wl_cfgnan_config_control_flag(struct net_device *ndev, struct bcm_cfg80211 *cfg,
1187 uint32 flag, uint32 *status, bool set)
1188 {
1189 bcm_iov_batch_buf_t *nan_buf = NULL;
1190 s32 ret = BCME_OK;
1191 uint16 nan_iov_start, nan_iov_end;
1192 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
1193 uint16 subcmd_len;
1194 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
1195 bcm_iov_batch_subcmd_t *sub_cmd_resp = NULL;
1196 wl_nan_iov_t *nan_iov_data = NULL;
1197 uint32 cfg_ctrl;
1198 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
1199
1200 NAN_DBG_ENTER();
1201 WL_INFORM_MEM(("%s: Modifying nan ctrl flag %x val %d",
1202 __FUNCTION__, flag, set));
1203 nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
1204 if (!nan_buf) {
1205 WL_ERR(("%s: memory allocation failed\n", __func__));
1206 ret = BCME_NOMEM;
1207 goto fail;
1208 }
1209
1210 nan_iov_data = MALLOCZ(cfg->osh, sizeof(*nan_iov_data));
1211 if (!nan_iov_data) {
1212 WL_ERR(("%s: memory allocation failed\n", __func__));
1213 ret = BCME_NOMEM;
1214 goto fail;
1215 }
1216
1217 nan_iov_data->nan_iov_len = nan_iov_start = NAN_IOCTL_BUF_SIZE;
1218 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
1219 nan_buf->count = 0;
1220 nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
1221 nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
1222 sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
1223
1224 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
1225 sizeof(cfg_ctrl), &subcmd_len);
1226 if (unlikely(ret)) {
1227 WL_ERR(("nan_sub_cmd check failed\n"));
1228 goto fail;
1229 }
1230
1231 sub_cmd->id = htod16(WL_NAN_CMD_CFG_NAN_CONFIG);
1232 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(cfg_ctrl);
1233 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
1234
1235 nan_buf->is_set = false;
1236 nan_buf->count++;
1237
1238 /* Reduce the iov_len size by subcmd_len */
1239 nan_iov_data->nan_iov_len -= subcmd_len;
1240 nan_iov_end = nan_iov_data->nan_iov_len;
1241 nan_buf_size = (nan_iov_start - nan_iov_end);
1242
1243 memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
1244 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, status,
1245 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
1246 if (unlikely(ret) || unlikely(*status)) {
1247 WL_ERR(("get nan cfg ctrl failed ret %d status %d \n", ret, *status));
1248 goto fail;
1249 }
1250 sub_cmd_resp = &((bcm_iov_batch_buf_t *)(resp_buf))->cmds[0];
1251
1252 /* check the response buff */
1253 cfg_ctrl = (*(uint32 *)&sub_cmd_resp->data[0]);
1254 if (set) {
1255 cfg_ctrl |= flag;
1256 } else {
1257 cfg_ctrl &= ~flag;
1258 }
1259 ret = memcpy_s(sub_cmd->data, sizeof(cfg_ctrl),
1260 &cfg_ctrl, sizeof(cfg_ctrl));
1261 if (ret != BCME_OK) {
1262 WL_ERR(("Failed to copy cfg ctrl\n"));
1263 goto fail;
1264 }
1265
1266 nan_buf->is_set = true;
1267 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, status,
1268 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
1269 if (unlikely(ret) || unlikely(*status)) {
1270 WL_ERR(("set nan cfg ctrl failed ret %d status %d \n", ret, *status));
1271 goto fail;
1272 }
1273 WL_DBG(("set nan cfg ctrl successfull\n"));
1274 fail:
1275 if (nan_buf) {
1276 MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
1277 }
1278 if (nan_iov_data) {
1279 MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
1280 }
1281
1282 NAN_DBG_EXIT();
1283 return ret;
1284 }
1285
1286 static int
wl_cfgnan_get_iovars_status(void * ctx,const uint8 * data,uint16 type,uint16 len)1287 wl_cfgnan_get_iovars_status(void *ctx, const uint8 *data, uint16 type, uint16 len)
1288 {
1289 bcm_iov_batch_buf_t *b_resp = (bcm_iov_batch_buf_t *)ctx;
1290 uint32 status;
1291 /* if all tlvs are parsed, we should not be here */
1292 if (b_resp->count == 0) {
1293 return BCME_BADLEN;
1294 }
1295
1296 /* cbfn params may be used in f/w */
1297 if (len < sizeof(status)) {
1298 return BCME_BUFTOOSHORT;
1299 }
1300
1301 /* first 4 bytes consists status */
1302 if (memcpy_s(&status, sizeof(status),
1303 data, sizeof(uint32)) != BCME_OK) {
1304 WL_ERR(("Failed to copy status\n"));
1305 goto exit;
1306 }
1307
1308 status = dtoh32(status);
1309
1310 /* If status is non zero */
1311 if (status != BCME_OK) {
1312 printf("cmd type %d failed, status: %04x\n", type, status);
1313 goto exit;
1314 }
1315
1316 if (b_resp->count > 0) {
1317 b_resp->count--;
1318 }
1319
1320 if (!b_resp->count) {
1321 status = BCME_IOV_LAST_CMD;
1322 }
1323 exit:
1324 return status;
1325 }
1326
1327 static int
wl_cfgnan_execute_ioctl(struct net_device * ndev,struct bcm_cfg80211 * cfg,bcm_iov_batch_buf_t * nan_buf,uint16 nan_buf_size,uint32 * status,uint8 * resp_buf,uint16 resp_buf_size)1328 wl_cfgnan_execute_ioctl(struct net_device *ndev, struct bcm_cfg80211 *cfg,
1329 bcm_iov_batch_buf_t *nan_buf, uint16 nan_buf_size, uint32 *status,
1330 uint8 *resp_buf, uint16 resp_buf_size)
1331 {
1332 int ret = BCME_OK;
1333 uint16 tlvs_len;
1334 int res = BCME_OK;
1335 bcm_iov_batch_buf_t *p_resp = NULL;
1336 char *iov = "nan";
1337 int max_resp_len = WLC_IOCTL_MAXLEN;
1338
1339 WL_DBG(("Enter:\n"));
1340 if (nan_buf->is_set) {
1341 ret = wldev_iovar_setbuf(ndev, "nan", nan_buf, nan_buf_size,
1342 resp_buf, resp_buf_size, NULL);
1343 p_resp = (bcm_iov_batch_buf_t *)(resp_buf + strlen(iov) + 1);
1344 } else {
1345 ret = wldev_iovar_getbuf(ndev, "nan", nan_buf, nan_buf_size,
1346 resp_buf, resp_buf_size, NULL);
1347 p_resp = (bcm_iov_batch_buf_t *)(resp_buf);
1348 }
1349 if (unlikely(ret)) {
1350 WL_ERR((" nan execute ioctl failed, error = %d \n", ret));
1351 goto fail;
1352 }
1353
1354 p_resp->is_set = nan_buf->is_set;
1355 tlvs_len = max_resp_len - OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
1356
1357 /* Extract the tlvs and print their resp in cb fn */
1358 res = bcm_unpack_xtlv_buf((void *)p_resp, (const uint8 *)&p_resp->cmds[0],
1359 tlvs_len, BCM_IOV_CMD_OPT_ALIGN32, wl_cfgnan_get_iovars_status);
1360
1361 if (res == BCME_IOV_LAST_CMD) {
1362 res = BCME_OK;
1363 }
1364 fail:
1365 *status = res;
1366 WL_DBG((" nan ioctl ret %d status %d \n", ret, *status));
1367 return ret;
1368
1369 }
1370
1371 static int
wl_cfgnan_if_addr_handler(void * p_buf,uint16 * nan_buf_size,struct ether_addr * if_addr)1372 wl_cfgnan_if_addr_handler(void *p_buf, uint16 *nan_buf_size,
1373 struct ether_addr *if_addr)
1374 {
1375 /* nan enable */
1376 s32 ret = BCME_OK;
1377 uint16 subcmd_len;
1378
1379 NAN_DBG_ENTER();
1380
1381 if (p_buf != NULL) {
1382 bcm_iov_batch_subcmd_t *sub_cmd = (bcm_iov_batch_subcmd_t*)(p_buf);
1383
1384 ret = wl_cfg_nan_check_cmd_len(*nan_buf_size,
1385 sizeof(*if_addr), &subcmd_len);
1386 if (unlikely(ret)) {
1387 WL_ERR(("nan_sub_cmd check failed\n"));
1388 goto fail;
1389 }
1390
1391 /* Fill the sub_command block */
1392 sub_cmd->id = htod16(WL_NAN_CMD_CFG_IF_ADDR);
1393 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*if_addr);
1394 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
1395 ret = memcpy_s(sub_cmd->data, sizeof(*if_addr),
1396 (uint8 *)if_addr, sizeof(*if_addr));
1397 if (ret != BCME_OK) {
1398 WL_ERR(("Failed to copy if addr\n"));
1399 goto fail;
1400 }
1401
1402 *nan_buf_size -= subcmd_len;
1403 } else {
1404 WL_ERR(("nan_iov_buf is NULL\n"));
1405 ret = BCME_ERROR;
1406 goto fail;
1407 }
1408
1409 fail:
1410 NAN_DBG_EXIT();
1411 return ret;
1412 }
1413
1414 static int
wl_cfgnan_get_ver(struct net_device * ndev,struct bcm_cfg80211 * cfg)1415 wl_cfgnan_get_ver(struct net_device *ndev, struct bcm_cfg80211 *cfg)
1416 {
1417 bcm_iov_batch_buf_t *nan_buf = NULL;
1418 s32 ret = BCME_OK;
1419 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
1420 wl_nan_ver_t *nan_ver = NULL;
1421 uint16 subcmd_len;
1422 uint32 status;
1423 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
1424 bcm_iov_batch_subcmd_t *sub_cmd_resp = NULL;
1425 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
1426
1427 NAN_DBG_ENTER();
1428 nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
1429 if (!nan_buf) {
1430 WL_ERR(("%s: memory allocation failed\n", __func__));
1431 ret = BCME_NOMEM;
1432 goto fail;
1433 }
1434
1435 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
1436 nan_buf->count = 0;
1437 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
1438 sub_cmd = (bcm_iov_batch_subcmd_t*)(uint8 *)(&nan_buf->cmds[0]);
1439
1440 ret = wl_cfg_nan_check_cmd_len(nan_buf_size,
1441 sizeof(*nan_ver), &subcmd_len);
1442 if (unlikely(ret)) {
1443 WL_ERR(("nan_sub_cmd check failed\n"));
1444 goto fail;
1445 }
1446
1447 nan_ver = (wl_nan_ver_t *)sub_cmd->data;
1448 sub_cmd->id = htod16(WL_NAN_CMD_GLB_NAN_VER);
1449 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*nan_ver);
1450 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
1451 nan_buf_size -= subcmd_len;
1452 nan_buf->count = 1;
1453
1454 nan_buf->is_set = false;
1455 bzero(resp_buf, sizeof(resp_buf));
1456 nan_buf_size = NAN_IOCTL_BUF_SIZE - nan_buf_size;
1457
1458 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
1459 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
1460 if (unlikely(ret) || unlikely(status)) {
1461 WL_ERR(("get nan ver failed ret %d status %d \n",
1462 ret, status));
1463 goto fail;
1464 }
1465
1466 sub_cmd_resp = &((bcm_iov_batch_buf_t *)(resp_buf))->cmds[0];
1467 nan_ver = ((wl_nan_ver_t *)&sub_cmd_resp->data[0]);
1468 if (!nan_ver) {
1469 ret = BCME_NOTFOUND;
1470 WL_ERR(("nan_ver not found: err = %d\n", ret));
1471 goto fail;
1472 }
1473 cfg->nancfg.version = *nan_ver;
1474 WL_INFORM_MEM(("Nan Version is %d\n", cfg->nancfg.version));
1475
1476 fail:
1477 if (nan_buf) {
1478 MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
1479 }
1480 NAN_DBG_EXIT();
1481 return ret;
1482
1483 }
1484
1485 static int
wl_cfgnan_set_if_addr(struct bcm_cfg80211 * cfg)1486 wl_cfgnan_set_if_addr(struct bcm_cfg80211 *cfg)
1487 {
1488 s32 ret = BCME_OK;
1489 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
1490 uint32 status;
1491 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
1492 struct ether_addr if_addr;
1493 uint8 buf[NAN_IOCTL_BUF_SIZE];
1494 bcm_iov_batch_buf_t *nan_buf = (bcm_iov_batch_buf_t*)buf;
1495 bool rand_mac = cfg->nancfg.mac_rand;
1496
1497 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
1498 nan_buf->count = 0;
1499 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
1500 if (rand_mac) {
1501 RANDOM_BYTES(if_addr.octet, 6);
1502 /* restore mcast and local admin bits to 0 and 1 */
1503 ETHER_SET_UNICAST(if_addr.octet);
1504 ETHER_SET_LOCALADDR(if_addr.octet);
1505 } else {
1506 /* Use primary MAC with the locally administered bit for the
1507 * NAN NMI I/F
1508 */
1509 if (wl_get_vif_macaddr(cfg, WL_IF_TYPE_NAN_NMI,
1510 if_addr.octet) != BCME_OK) {
1511 ret = -EINVAL;
1512 WL_ERR(("Failed to get mac addr for NMI\n"));
1513 goto fail;
1514 }
1515 }
1516 WL_INFORM_MEM(("%s: NMI " MACDBG "\n",
1517 __FUNCTION__, MAC2STRDBG(if_addr.octet)));
1518 ret = wl_cfgnan_if_addr_handler(&nan_buf->cmds[0],
1519 &nan_buf_size, &if_addr);
1520 if (unlikely(ret)) {
1521 WL_ERR(("Nan if addr handler sub_cmd set failed\n"));
1522 goto fail;
1523 }
1524 nan_buf->count++;
1525 nan_buf->is_set = true;
1526 nan_buf_size = NAN_IOCTL_BUF_SIZE - nan_buf_size;
1527 memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
1528 ret = wl_cfgnan_execute_ioctl(bcmcfg_to_prmry_ndev(cfg), cfg,
1529 nan_buf, nan_buf_size, &status,
1530 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
1531 if (unlikely(ret) || unlikely(status)) {
1532 WL_ERR(("nan if addr handler failed ret %d status %d\n",
1533 ret, status));
1534 goto fail;
1535 }
1536 ret = memcpy_s(cfg->nan_nmi_mac, ETH_ALEN,
1537 if_addr.octet, ETH_ALEN);
1538 if (ret != BCME_OK) {
1539 WL_ERR(("Failed to copy nmi addr\n"));
1540 goto fail;
1541 }
1542 return ret;
1543 fail:
1544 if (!rand_mac) {
1545 wl_release_vif_macaddr(cfg, if_addr.octet, WL_IF_TYPE_NAN_NMI);
1546 }
1547
1548 return ret;
1549 }
1550
1551 static int
wl_cfgnan_init_handler(void * p_buf,uint16 * nan_buf_size,bool val)1552 wl_cfgnan_init_handler(void *p_buf, uint16 *nan_buf_size, bool val)
1553 {
1554 /* nan enable */
1555 s32 ret = BCME_OK;
1556 uint16 subcmd_len;
1557
1558 NAN_DBG_ENTER();
1559
1560 if (p_buf != NULL) {
1561 bcm_iov_batch_subcmd_t *sub_cmd = (bcm_iov_batch_subcmd_t*)(p_buf);
1562
1563 ret = wl_cfg_nan_check_cmd_len(*nan_buf_size,
1564 sizeof(val), &subcmd_len);
1565 if (unlikely(ret)) {
1566 WL_ERR(("nan_sub_cmd check failed\n"));
1567 goto fail;
1568 }
1569
1570 /* Fill the sub_command block */
1571 sub_cmd->id = htod16(WL_NAN_CMD_CFG_NAN_INIT);
1572 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(uint8);
1573 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
1574 ret = memcpy_s(sub_cmd->data, sizeof(uint8),
1575 (uint8*)&val, sizeof(uint8));
1576 if (ret != BCME_OK) {
1577 WL_ERR(("Failed to copy init value\n"));
1578 goto fail;
1579 }
1580
1581 *nan_buf_size -= subcmd_len;
1582 } else {
1583 WL_ERR(("nan_iov_buf is NULL\n"));
1584 ret = BCME_ERROR;
1585 goto fail;
1586 }
1587
1588 fail:
1589 NAN_DBG_EXIT();
1590 return ret;
1591 }
1592
1593 static int
wl_cfgnan_enable_handler(wl_nan_iov_t * nan_iov_data,bool val)1594 wl_cfgnan_enable_handler(wl_nan_iov_t *nan_iov_data, bool val)
1595 {
1596 /* nan enable */
1597 s32 ret = BCME_OK;
1598 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
1599 uint16 subcmd_len;
1600
1601 NAN_DBG_ENTER();
1602
1603 sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
1604
1605 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
1606 sizeof(val), &subcmd_len);
1607 if (unlikely(ret)) {
1608 WL_ERR(("nan_sub_cmd check failed\n"));
1609 return ret;
1610 }
1611
1612 /* Fill the sub_command block */
1613 sub_cmd->id = htod16(WL_NAN_CMD_CFG_NAN_ENAB);
1614 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(uint8);
1615 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
1616 ret = memcpy_s(sub_cmd->data, sizeof(uint8),
1617 (uint8*)&val, sizeof(uint8));
1618 if (ret != BCME_OK) {
1619 WL_ERR(("Failed to copy enab value\n"));
1620 return ret;
1621 }
1622
1623 nan_iov_data->nan_iov_len -= subcmd_len;
1624 nan_iov_data->nan_iov_buf += subcmd_len;
1625 NAN_DBG_EXIT();
1626 return ret;
1627 }
1628
1629 static int
wl_cfgnan_warmup_time_handler(nan_config_cmd_data_t * cmd_data,wl_nan_iov_t * nan_iov_data)1630 wl_cfgnan_warmup_time_handler(nan_config_cmd_data_t *cmd_data,
1631 wl_nan_iov_t *nan_iov_data)
1632 {
1633 /* wl nan warm_up_time */
1634 s32 ret = BCME_OK;
1635 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
1636 wl_nan_warmup_time_ticks_t *wup_ticks = NULL;
1637 uint16 subcmd_len;
1638 NAN_DBG_ENTER();
1639
1640 sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
1641 wup_ticks = (wl_nan_warmup_time_ticks_t *)sub_cmd->data;
1642
1643 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
1644 sizeof(*wup_ticks), &subcmd_len);
1645 if (unlikely(ret)) {
1646 WL_ERR(("nan_sub_cmd check failed\n"));
1647 return ret;
1648 }
1649 /* Fill the sub_command block */
1650 sub_cmd->id = htod16(WL_NAN_CMD_CFG_WARMUP_TIME);
1651 sub_cmd->len = sizeof(sub_cmd->u.options) +
1652 sizeof(*wup_ticks);
1653 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
1654 *wup_ticks = cmd_data->warmup_time;
1655
1656 nan_iov_data->nan_iov_len -= subcmd_len;
1657 nan_iov_data->nan_iov_buf += subcmd_len;
1658
1659 NAN_DBG_EXIT();
1660 return ret;
1661 }
1662
1663 static int
wl_cfgnan_set_election_metric(nan_config_cmd_data_t * cmd_data,wl_nan_iov_t * nan_iov_data,uint32 nan_attr_mask)1664 wl_cfgnan_set_election_metric(nan_config_cmd_data_t *cmd_data,
1665 wl_nan_iov_t *nan_iov_data, uint32 nan_attr_mask)
1666 {
1667 s32 ret = BCME_OK;
1668 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
1669 wl_nan_election_metric_config_t *metrics = NULL;
1670 uint16 subcmd_len;
1671 NAN_DBG_ENTER();
1672
1673 sub_cmd =
1674 (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
1675 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
1676 sizeof(*metrics), &subcmd_len);
1677 if (unlikely(ret)) {
1678 WL_ERR(("nan_sub_cmd check failed\n"));
1679 goto fail;
1680 }
1681
1682 metrics = (wl_nan_election_metric_config_t *)sub_cmd->data;
1683
1684 if (nan_attr_mask & NAN_ATTR_RAND_FACTOR_CONFIG) {
1685 metrics->random_factor = (uint8)cmd_data->metrics.random_factor;
1686 }
1687
1688 if ((!cmd_data->metrics.master_pref) ||
1689 (cmd_data->metrics.master_pref > NAN_MAXIMUM_MASTER_PREFERENCE)) {
1690 WL_TRACE(("Master Pref is 0 or greater than 254, hence sending random value\n"));
1691 /* Master pref for mobile devices can be from 1 - 127 as per Spec AppendixC */
1692 metrics->master_pref = (RANDOM32()%(NAN_MAXIMUM_MASTER_PREFERENCE/2)) + 1;
1693 } else {
1694 metrics->master_pref = (uint8)cmd_data->metrics.master_pref;
1695 }
1696 sub_cmd->id = htod16(WL_NAN_CMD_ELECTION_METRICS_CONFIG);
1697 sub_cmd->len = sizeof(sub_cmd->u.options) +
1698 sizeof(*metrics);
1699 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
1700
1701 nan_iov_data->nan_iov_len -= subcmd_len;
1702 nan_iov_data->nan_iov_buf += subcmd_len;
1703
1704 fail:
1705 NAN_DBG_EXIT();
1706 return ret;
1707 }
1708
1709 static int
wl_cfgnan_set_rssi_proximity(nan_config_cmd_data_t * cmd_data,wl_nan_iov_t * nan_iov_data,uint32 nan_attr_mask)1710 wl_cfgnan_set_rssi_proximity(nan_config_cmd_data_t *cmd_data,
1711 wl_nan_iov_t *nan_iov_data, uint32 nan_attr_mask)
1712 {
1713 s32 ret = BCME_OK;
1714 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
1715 wl_nan_rssi_notif_thld_t *rssi_notif_thld = NULL;
1716 uint16 subcmd_len;
1717
1718 NAN_DBG_ENTER();
1719 sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
1720
1721 rssi_notif_thld = (wl_nan_rssi_notif_thld_t *)sub_cmd->data;
1722
1723 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
1724 sizeof(*rssi_notif_thld), &subcmd_len);
1725 if (unlikely(ret)) {
1726 WL_ERR(("nan_sub_cmd check failed\n"));
1727 return ret;
1728 }
1729 if (nan_attr_mask & NAN_ATTR_RSSI_PROXIMITY_2G_CONFIG) {
1730 rssi_notif_thld->bcn_rssi_2g =
1731 cmd_data->rssi_attr.rssi_proximity_2dot4g_val;
1732 } else {
1733 /* Keeping RSSI threshold value to be -70dBm */
1734 rssi_notif_thld->bcn_rssi_2g = NAN_DEF_RSSI_NOTIF_THRESH;
1735 }
1736
1737 if (nan_attr_mask & NAN_ATTR_RSSI_PROXIMITY_5G_CONFIG) {
1738 rssi_notif_thld->bcn_rssi_5g =
1739 cmd_data->rssi_attr.rssi_proximity_5g_val;
1740 } else {
1741 /* Keeping RSSI threshold value to be -70dBm */
1742 rssi_notif_thld->bcn_rssi_5g = NAN_DEF_RSSI_NOTIF_THRESH;
1743 }
1744
1745 sub_cmd->id = htod16(WL_NAN_CMD_SYNC_BCN_RSSI_NOTIF_THRESHOLD);
1746 sub_cmd->len = htod16(sizeof(sub_cmd->u.options) + sizeof(*rssi_notif_thld));
1747 sub_cmd->u.options = htod32(BCM_XTLV_OPTION_ALIGN32);
1748
1749 nan_iov_data->nan_iov_len -= subcmd_len;
1750 nan_iov_data->nan_iov_buf += subcmd_len;
1751
1752 NAN_DBG_EXIT();
1753 return ret;
1754 }
1755
1756 static int
wl_cfgnan_set_rssi_mid_or_close(nan_config_cmd_data_t * cmd_data,wl_nan_iov_t * nan_iov_data,uint32 nan_attr_mask)1757 wl_cfgnan_set_rssi_mid_or_close(nan_config_cmd_data_t *cmd_data,
1758 wl_nan_iov_t *nan_iov_data, uint32 nan_attr_mask)
1759 {
1760 s32 ret = BCME_OK;
1761 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
1762 wl_nan_rssi_thld_t *rssi_thld = NULL;
1763 uint16 subcmd_len;
1764
1765 NAN_DBG_ENTER();
1766 sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
1767 rssi_thld = (wl_nan_rssi_thld_t *)sub_cmd->data;
1768
1769 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
1770 sizeof(*rssi_thld), &subcmd_len);
1771 if (unlikely(ret)) {
1772 WL_ERR(("nan_sub_cmd check failed\n"));
1773 return ret;
1774 }
1775
1776 /*
1777 * Keeping RSSI mid value -75dBm for both 2G and 5G
1778 * Keeping RSSI close value -60dBm for both 2G and 5G
1779 */
1780 if (nan_attr_mask & NAN_ATTR_RSSI_MIDDLE_2G_CONFIG) {
1781 rssi_thld->rssi_mid_2g =
1782 cmd_data->rssi_attr.rssi_middle_2dot4g_val;
1783 } else {
1784 rssi_thld->rssi_mid_2g = NAN_DEF_RSSI_MID;
1785 }
1786
1787 if (nan_attr_mask & NAN_ATTR_RSSI_MIDDLE_5G_CONFIG) {
1788 rssi_thld->rssi_mid_5g =
1789 cmd_data->rssi_attr.rssi_middle_5g_val;
1790 } else {
1791 rssi_thld->rssi_mid_5g = NAN_DEF_RSSI_MID;
1792 }
1793
1794 if (nan_attr_mask & NAN_ATTR_RSSI_CLOSE_CONFIG) {
1795 rssi_thld->rssi_close_2g =
1796 cmd_data->rssi_attr.rssi_close_2dot4g_val;
1797 } else {
1798 rssi_thld->rssi_close_2g = NAN_DEF_RSSI_CLOSE;
1799 }
1800
1801 if (nan_attr_mask & NAN_ATTR_RSSI_CLOSE_5G_CONFIG) {
1802 rssi_thld->rssi_close_5g =
1803 cmd_data->rssi_attr.rssi_close_5g_val;
1804 } else {
1805 rssi_thld->rssi_close_5g = NAN_DEF_RSSI_CLOSE;
1806 }
1807
1808 sub_cmd->id = htod16(WL_NAN_CMD_ELECTION_RSSI_THRESHOLD);
1809 sub_cmd->len = htod16(sizeof(sub_cmd->u.options) + sizeof(*rssi_thld));
1810 sub_cmd->u.options = htod32(BCM_XTLV_OPTION_ALIGN32);
1811
1812 nan_iov_data->nan_iov_len -= subcmd_len;
1813 nan_iov_data->nan_iov_buf += subcmd_len;
1814
1815 NAN_DBG_EXIT();
1816 return ret;
1817 }
1818
1819 static int
check_for_valid_5gchan(struct net_device * ndev,uint8 chan)1820 check_for_valid_5gchan(struct net_device *ndev, uint8 chan)
1821 {
1822 s32 ret = BCME_OK;
1823 uint bitmap;
1824 u8 ioctl_buf[WLC_IOCTL_SMLEN];
1825 uint32 chanspec_arg;
1826 NAN_DBG_ENTER();
1827
1828 chanspec_arg = CH20MHZ_CHSPEC(chan);
1829 chanspec_arg = wl_chspec_host_to_driver(chanspec_arg);
1830 memset_s(ioctl_buf, WLC_IOCTL_SMLEN, 0, WLC_IOCTL_SMLEN);
1831 ret = wldev_iovar_getbuf(ndev, "per_chan_info",
1832 (void *)&chanspec_arg, sizeof(chanspec_arg),
1833 ioctl_buf, WLC_IOCTL_SMLEN, NULL);
1834 if (ret != BCME_OK) {
1835 WL_ERR(("Chaninfo for channel = %d, error %d\n", chan, ret));
1836 goto exit;
1837 }
1838
1839 bitmap = dtoh32(*(uint *)ioctl_buf);
1840 if (!(bitmap & WL_CHAN_VALID_HW)) {
1841 WL_ERR(("Invalid channel\n"));
1842 ret = BCME_BADCHAN;
1843 goto exit;
1844 }
1845
1846 if (!(bitmap & WL_CHAN_VALID_SW)) {
1847 WL_ERR(("Not supported in current locale\n"));
1848 ret = BCME_BADCHAN;
1849 goto exit;
1850 }
1851 exit:
1852 NAN_DBG_EXIT();
1853 return ret;
1854 }
1855
1856 static int
wl_cfgnan_set_nan_soc_chans(struct net_device * ndev,nan_config_cmd_data_t * cmd_data,wl_nan_iov_t * nan_iov_data,uint32 nan_attr_mask)1857 wl_cfgnan_set_nan_soc_chans(struct net_device *ndev, nan_config_cmd_data_t *cmd_data,
1858 wl_nan_iov_t *nan_iov_data, uint32 nan_attr_mask)
1859 {
1860 s32 ret = BCME_OK;
1861 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
1862 wl_nan_social_channels_t *soc_chans = NULL;
1863 uint16 subcmd_len;
1864
1865 NAN_DBG_ENTER();
1866
1867 sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
1868 soc_chans =
1869 (wl_nan_social_channels_t *)sub_cmd->data;
1870
1871 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
1872 sizeof(*soc_chans), &subcmd_len);
1873 if (unlikely(ret)) {
1874 WL_ERR(("nan_sub_cmd check failed\n"));
1875 return ret;
1876 }
1877
1878 sub_cmd->id = htod16(WL_NAN_CMD_SYNC_SOCIAL_CHAN);
1879 sub_cmd->len = sizeof(sub_cmd->u.options) +
1880 sizeof(*soc_chans);
1881 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
1882 if (nan_attr_mask & NAN_ATTR_2G_CHAN_CONFIG) {
1883 soc_chans->soc_chan_2g = cmd_data->chanspec[1];
1884 } else {
1885 soc_chans->soc_chan_2g = NAN_DEF_SOCIAL_CHAN_2G;
1886 }
1887
1888 if (cmd_data->support_5g) {
1889 if (nan_attr_mask & NAN_ATTR_5G_CHAN_CONFIG) {
1890 soc_chans->soc_chan_5g = cmd_data->chanspec[2];
1891 } else {
1892 soc_chans->soc_chan_5g = NAN_DEF_SOCIAL_CHAN_5G;
1893 }
1894 ret = check_for_valid_5gchan(ndev, soc_chans->soc_chan_5g);
1895 if (ret != BCME_OK) {
1896 ret = check_for_valid_5gchan(ndev, NAN_DEF_SEC_SOCIAL_CHAN_5G);
1897 if (ret == BCME_OK) {
1898 soc_chans->soc_chan_5g = NAN_DEF_SEC_SOCIAL_CHAN_5G;
1899 } else {
1900 soc_chans->soc_chan_5g = 0;
1901 ret = BCME_OK;
1902 WL_ERR(("Current locale doesn't support 5G op"
1903 "continuing with 2G only operation\n"));
1904 }
1905 }
1906 } else {
1907 WL_DBG(("5G support is disabled\n"));
1908 }
1909 nan_iov_data->nan_iov_len -= subcmd_len;
1910 nan_iov_data->nan_iov_buf += subcmd_len;
1911
1912 NAN_DBG_EXIT();
1913 return ret;
1914 }
1915
1916 static int
wl_cfgnan_set_nan_scan_params(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_config_cmd_data_t * cmd_data,uint8 band_index,uint32 nan_attr_mask)1917 wl_cfgnan_set_nan_scan_params(struct net_device *ndev, struct bcm_cfg80211 *cfg,
1918 nan_config_cmd_data_t *cmd_data, uint8 band_index, uint32 nan_attr_mask)
1919 {
1920 bcm_iov_batch_buf_t *nan_buf = NULL;
1921 s32 ret = BCME_OK;
1922 uint16 nan_iov_start, nan_iov_end;
1923 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
1924 uint16 subcmd_len;
1925 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
1926 wl_nan_iov_t *nan_iov_data = NULL;
1927 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
1928 wl_nan_scan_params_t *scan_params = NULL;
1929 uint32 status;
1930
1931 NAN_DBG_ENTER();
1932
1933 nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
1934 if (!nan_buf) {
1935 WL_ERR(("%s: memory allocation failed\n", __func__));
1936 ret = BCME_NOMEM;
1937 goto fail;
1938 }
1939
1940 nan_iov_data = MALLOCZ(cfg->osh, sizeof(*nan_iov_data));
1941 if (!nan_iov_data) {
1942 WL_ERR(("%s: memory allocation failed\n", __func__));
1943 ret = BCME_NOMEM;
1944 goto fail;
1945 }
1946
1947 nan_iov_data->nan_iov_len = nan_iov_start = NAN_IOCTL_BUF_SIZE;
1948 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
1949 nan_buf->count = 0;
1950 nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
1951 nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
1952 sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
1953
1954 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
1955 sizeof(*scan_params), &subcmd_len);
1956 if (unlikely(ret)) {
1957 WL_ERR(("nan_sub_cmd check failed\n"));
1958 goto fail;
1959 }
1960 scan_params = (wl_nan_scan_params_t *)sub_cmd->data;
1961
1962 sub_cmd->id = htod16(WL_NAN_CMD_CFG_SCAN_PARAMS);
1963 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*scan_params);
1964 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
1965
1966 if (!band_index) {
1967 /* Fw default: Dwell time for 2G is 210 */
1968 if ((nan_attr_mask & NAN_ATTR_2G_DWELL_TIME_CONFIG) &&
1969 cmd_data->dwell_time[0]) {
1970 scan_params->dwell_time = cmd_data->dwell_time[0] +
1971 NAN_SCAN_DWELL_TIME_DELTA_MS;
1972 }
1973 /* Fw default: Scan period for 2G is 10 */
1974 if (nan_attr_mask & NAN_ATTR_2G_SCAN_PERIOD_CONFIG) {
1975 scan_params->scan_period = cmd_data->scan_period[0];
1976 }
1977 } else {
1978 if ((nan_attr_mask & NAN_ATTR_5G_DWELL_TIME_CONFIG) &&
1979 cmd_data->dwell_time[1]) {
1980 scan_params->dwell_time = cmd_data->dwell_time[1] +
1981 NAN_SCAN_DWELL_TIME_DELTA_MS;
1982 }
1983 if (nan_attr_mask & NAN_ATTR_5G_SCAN_PERIOD_CONFIG) {
1984 scan_params->scan_period = cmd_data->scan_period[1];
1985 }
1986 }
1987 scan_params->band_index = band_index;
1988 nan_buf->is_set = true;
1989 nan_buf->count++;
1990
1991 /* Reduce the iov_len size by subcmd_len */
1992 nan_iov_data->nan_iov_len -= subcmd_len;
1993 nan_iov_end = nan_iov_data->nan_iov_len;
1994 nan_buf_size = (nan_iov_start - nan_iov_end);
1995
1996 memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
1997 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
1998 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
1999 if (unlikely(ret) || unlikely(status)) {
2000 WL_ERR(("set nan scan params failed ret %d status %d \n", ret, status));
2001 goto fail;
2002 }
2003 WL_DBG(("set nan scan params successfull\n"));
2004 fail:
2005 if (nan_buf) {
2006 MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
2007 }
2008 if (nan_iov_data) {
2009 MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
2010 }
2011
2012 NAN_DBG_EXIT();
2013 return ret;
2014 }
2015
2016 static int
wl_cfgnan_set_cluster_id(nan_config_cmd_data_t * cmd_data,wl_nan_iov_t * nan_iov_data)2017 wl_cfgnan_set_cluster_id(nan_config_cmd_data_t *cmd_data,
2018 wl_nan_iov_t *nan_iov_data)
2019 {
2020 s32 ret = BCME_OK;
2021 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
2022 uint16 subcmd_len;
2023
2024 NAN_DBG_ENTER();
2025
2026 sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
2027
2028 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
2029 (sizeof(cmd_data->clus_id) - sizeof(uint8)), &subcmd_len);
2030 if (unlikely(ret)) {
2031 WL_ERR(("nan_sub_cmd check failed\n"));
2032 return ret;
2033 }
2034
2035 cmd_data->clus_id.octet[0] = 0x50;
2036 cmd_data->clus_id.octet[1] = 0x6F;
2037 cmd_data->clus_id.octet[2] = 0x9A;
2038 cmd_data->clus_id.octet[3] = 0x01;
2039 WL_TRACE(("cluster_id = " MACDBG "\n", MAC2STRDBG(cmd_data->clus_id.octet)));
2040
2041 sub_cmd->id = htod16(WL_NAN_CMD_CFG_CID);
2042 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(cmd_data->clus_id);
2043 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
2044 ret = memcpy_s(sub_cmd->data, sizeof(cmd_data->clus_id),
2045 (uint8 *)&cmd_data->clus_id,
2046 sizeof(cmd_data->clus_id));
2047 if (ret != BCME_OK) {
2048 WL_ERR(("Failed to copy clus id\n"));
2049 return ret;
2050 }
2051
2052 nan_iov_data->nan_iov_len -= subcmd_len;
2053 nan_iov_data->nan_iov_buf += subcmd_len;
2054
2055 NAN_DBG_EXIT();
2056 return ret;
2057 }
2058
2059 static int
wl_cfgnan_set_hop_count_limit(nan_config_cmd_data_t * cmd_data,wl_nan_iov_t * nan_iov_data)2060 wl_cfgnan_set_hop_count_limit(nan_config_cmd_data_t *cmd_data,
2061 wl_nan_iov_t *nan_iov_data)
2062 {
2063 s32 ret = BCME_OK;
2064 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
2065 wl_nan_hop_count_t *hop_limit = NULL;
2066 uint16 subcmd_len;
2067
2068 NAN_DBG_ENTER();
2069
2070 sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
2071 hop_limit = (wl_nan_hop_count_t *)sub_cmd->data;
2072
2073 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
2074 sizeof(*hop_limit), &subcmd_len);
2075 if (unlikely(ret)) {
2076 WL_ERR(("nan_sub_cmd check failed\n"));
2077 return ret;
2078 }
2079
2080 *hop_limit = cmd_data->hop_count_limit;
2081 sub_cmd->id = htod16(WL_NAN_CMD_CFG_HOP_LIMIT);
2082 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*hop_limit);
2083 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
2084
2085 nan_iov_data->nan_iov_len -= subcmd_len;
2086 nan_iov_data->nan_iov_buf += subcmd_len;
2087
2088 NAN_DBG_EXIT();
2089 return ret;
2090 }
2091
2092 static int
wl_cfgnan_set_sid_beacon_val(nan_config_cmd_data_t * cmd_data,wl_nan_iov_t * nan_iov_data,uint32 nan_attr_mask)2093 wl_cfgnan_set_sid_beacon_val(nan_config_cmd_data_t *cmd_data,
2094 wl_nan_iov_t *nan_iov_data, uint32 nan_attr_mask)
2095 {
2096 s32 ret = BCME_OK;
2097 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
2098 wl_nan_sid_beacon_control_t *sid_beacon = NULL;
2099 uint16 subcmd_len;
2100
2101 NAN_DBG_ENTER();
2102
2103 sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
2104
2105 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
2106 sizeof(*sid_beacon), &subcmd_len);
2107 if (unlikely(ret)) {
2108 WL_ERR(("nan_sub_cmd check failed\n"));
2109 return ret;
2110 }
2111
2112 sid_beacon = (wl_nan_sid_beacon_control_t *)sub_cmd->data;
2113 sid_beacon->sid_enable = cmd_data->sid_beacon.sid_enable;
2114 /* Need to have separate flag for sub beacons
2115 * sid_beacon->sub_sid_enable = cmd_data->sid_beacon.sub_sid_enable;
2116 */
2117 if (nan_attr_mask & NAN_ATTR_SID_BEACON_CONFIG) {
2118 /* Limit for number of publish SIDs to be included in Beacons */
2119 sid_beacon->sid_count = cmd_data->sid_beacon.sid_count;
2120 }
2121 if (nan_attr_mask & NAN_ATTR_SUB_SID_BEACON_CONFIG) {
2122 /* Limit for number of subscribe SIDs to be included in Beacons */
2123 sid_beacon->sub_sid_count = cmd_data->sid_beacon.sub_sid_count;
2124 }
2125 sub_cmd->id = htod16(WL_NAN_CMD_CFG_SID_BEACON);
2126 sub_cmd->len = sizeof(sub_cmd->u.options) +
2127 sizeof(*sid_beacon);
2128 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
2129
2130 nan_iov_data->nan_iov_len -= subcmd_len;
2131 nan_iov_data->nan_iov_buf += subcmd_len;
2132 NAN_DBG_EXIT();
2133 return ret;
2134 }
2135
2136 static int
wl_cfgnan_set_nan_oui(nan_config_cmd_data_t * cmd_data,wl_nan_iov_t * nan_iov_data)2137 wl_cfgnan_set_nan_oui(nan_config_cmd_data_t *cmd_data,
2138 wl_nan_iov_t *nan_iov_data)
2139 {
2140 s32 ret = BCME_OK;
2141 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
2142 uint16 subcmd_len;
2143
2144 NAN_DBG_ENTER();
2145
2146 sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
2147
2148 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
2149 sizeof(cmd_data->nan_oui), &subcmd_len);
2150 if (unlikely(ret)) {
2151 WL_ERR(("nan_sub_cmd check failed\n"));
2152 return ret;
2153 }
2154
2155 sub_cmd->id = htod16(WL_NAN_CMD_CFG_OUI);
2156 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(cmd_data->nan_oui);
2157 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
2158 ret = memcpy_s(sub_cmd->data, sizeof(cmd_data->nan_oui),
2159 (uint32 *)&cmd_data->nan_oui,
2160 sizeof(cmd_data->nan_oui));
2161 if (ret != BCME_OK) {
2162 WL_ERR(("Failed to copy nan oui\n"));
2163 return ret;
2164 }
2165
2166 nan_iov_data->nan_iov_len -= subcmd_len;
2167 nan_iov_data->nan_iov_buf += subcmd_len;
2168 NAN_DBG_EXIT();
2169 return ret;
2170 }
2171
2172 static int
wl_cfgnan_set_awake_dws(struct net_device * ndev,nan_config_cmd_data_t * cmd_data,wl_nan_iov_t * nan_iov_data,struct bcm_cfg80211 * cfg,uint32 nan_attr_mask)2173 wl_cfgnan_set_awake_dws(struct net_device *ndev, nan_config_cmd_data_t *cmd_data,
2174 wl_nan_iov_t *nan_iov_data, struct bcm_cfg80211 *cfg, uint32 nan_attr_mask)
2175 {
2176 s32 ret = BCME_OK;
2177 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
2178 wl_nan_awake_dws_t *awake_dws = NULL;
2179 uint16 subcmd_len;
2180 NAN_DBG_ENTER();
2181
2182 sub_cmd =
2183 (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
2184 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
2185 sizeof(*awake_dws), &subcmd_len);
2186 if (unlikely(ret)) {
2187 WL_ERR(("nan_sub_cmd check failed\n"));
2188 return ret;
2189 }
2190
2191 awake_dws = (wl_nan_awake_dws_t *)sub_cmd->data;
2192
2193 if (nan_attr_mask & NAN_ATTR_2G_DW_CONFIG) {
2194 awake_dws->dw_interval_2g = cmd_data->awake_dws.dw_interval_2g;
2195 if (!awake_dws->dw_interval_2g) {
2196 /* Set 2G awake dw value to fw default value 1 */
2197 awake_dws->dw_interval_2g = NAN_SYNC_DEF_AWAKE_DW;
2198 }
2199 } else {
2200 /* Set 2G awake dw value to fw default value 1 */
2201 awake_dws->dw_interval_2g = NAN_SYNC_DEF_AWAKE_DW;
2202 }
2203
2204 if (cfg->support_5g) {
2205 if (nan_attr_mask & NAN_ATTR_5G_DW_CONFIG) {
2206 awake_dws->dw_interval_5g = cmd_data->awake_dws.dw_interval_5g;
2207 if (!awake_dws->dw_interval_5g) {
2208 /* disable 5g beacon ctrls */
2209 ret = wl_cfgnan_config_control_flag(ndev, cfg,
2210 WL_NAN_CTRL_DISC_BEACON_TX_5G,
2211 &(cmd_data->status), 0);
2212 if (unlikely(ret) || unlikely(cmd_data->status)) {
2213 WL_ERR((" nan control set config handler,"
2214 " ret = %d status = %d \n",
2215 ret, cmd_data->status));
2216 goto fail;
2217 }
2218 ret = wl_cfgnan_config_control_flag(ndev, cfg,
2219 WL_NAN_CTRL_SYNC_BEACON_TX_5G,
2220 &(cmd_data->status), 0);
2221 if (unlikely(ret) || unlikely(cmd_data->status)) {
2222 WL_ERR((" nan control set config handler,"
2223 " ret = %d status = %d \n",
2224 ret, cmd_data->status));
2225 goto fail;
2226 }
2227 }
2228 } else {
2229 /* Set 5G awake dw value to fw default value 1 */
2230 awake_dws->dw_interval_5g = NAN_SYNC_DEF_AWAKE_DW;
2231 ret = wl_cfgnan_config_control_flag(ndev, cfg,
2232 WL_NAN_CTRL_DISC_BEACON_TX_5G |
2233 WL_NAN_CTRL_SYNC_BEACON_TX_5G,
2234 &(cmd_data->status), TRUE);
2235 if (unlikely(ret) || unlikely(cmd_data->status)) {
2236 WL_ERR((" nan control set config handler, ret = %d"
2237 " status = %d \n", ret, cmd_data->status));
2238 goto fail;
2239 }
2240 }
2241 }
2242
2243 sub_cmd->id = htod16(WL_NAN_CMD_SYNC_AWAKE_DWS);
2244 sub_cmd->len = sizeof(sub_cmd->u.options) +
2245 sizeof(*awake_dws);
2246 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
2247
2248 nan_iov_data->nan_iov_len -= subcmd_len;
2249 nan_iov_data->nan_iov_buf += subcmd_len;
2250
2251 fail:
2252 NAN_DBG_EXIT();
2253 return ret;
2254 }
2255
2256 int
wl_cfgnan_start_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_config_cmd_data_t * cmd_data,uint32 nan_attr_mask)2257 wl_cfgnan_start_handler(struct net_device *ndev, struct bcm_cfg80211 *cfg,
2258 nan_config_cmd_data_t *cmd_data, uint32 nan_attr_mask)
2259 {
2260 s32 ret = BCME_OK;
2261 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
2262 bcm_iov_batch_buf_t *nan_buf = NULL;
2263 wl_nan_iov_t *nan_iov_data = NULL;
2264 dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
2265 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
2266 int i;
2267 s32 timeout = 0;
2268 nan_hal_capabilities_t capabilities;
2269
2270 NAN_DBG_ENTER();
2271
2272 /* Protect discovery creation. Ensure proper mutex precedence.
2273 * If if_sync & nan_mutex comes together in same context, nan_mutex
2274 * should follow if_sync.
2275 */
2276 mutex_lock(&cfg->if_sync);
2277 NAN_MUTEX_LOCK();
2278
2279 if (!dhdp->up) {
2280 WL_ERR(("bus is already down, hence blocking nan start\n"));
2281 ret = BCME_ERROR;
2282 NAN_MUTEX_UNLOCK();
2283 mutex_unlock(&cfg->if_sync);
2284 goto fail;
2285 }
2286
2287 #ifdef WL_IFACE_MGMT
2288 if ((ret = wl_cfg80211_handle_if_role_conflict(cfg, WL_IF_TYPE_NAN_NMI)) != BCME_OK) {
2289 WL_ERR(("Conflicting iface is present, cant support nan\n"));
2290 NAN_MUTEX_UNLOCK();
2291 mutex_unlock(&cfg->if_sync);
2292 goto fail;
2293 }
2294 #endif /* WL_IFACE_MGMT */
2295
2296 WL_INFORM_MEM(("Initializing NAN\n"));
2297 ret = wl_cfgnan_init(cfg);
2298 if (ret != BCME_OK) {
2299 WL_ERR(("failed to initialize NAN[%d]\n", ret));
2300 NAN_MUTEX_UNLOCK();
2301 mutex_unlock(&cfg->if_sync);
2302 goto fail;
2303 }
2304
2305 ret = wl_cfgnan_get_ver(ndev, cfg);
2306 if (ret != BCME_OK) {
2307 WL_ERR(("failed to Nan IOV version[%d]\n", ret));
2308 NAN_MUTEX_UNLOCK();
2309 mutex_unlock(&cfg->if_sync);
2310 goto fail;
2311 }
2312
2313 /* set nmi addr */
2314 ret = wl_cfgnan_set_if_addr(cfg);
2315 if (ret != BCME_OK) {
2316 WL_ERR(("Failed to set nmi address \n"));
2317 NAN_MUTEX_UNLOCK();
2318 mutex_unlock(&cfg->if_sync);
2319 goto fail;
2320 }
2321 cfg->nancfg.nan_event_recvd = false;
2322 NAN_MUTEX_UNLOCK();
2323 mutex_unlock(&cfg->if_sync);
2324
2325 for (i = 0; i < NAN_MAX_NDI; i++) {
2326 /* Create NDI using the information provided by user space */
2327 if (cfg->nancfg.ndi[i].in_use && !cfg->nancfg.ndi[i].created) {
2328 ret = wl_cfgnan_data_path_iface_create_delete_handler(ndev, cfg,
2329 cfg->nancfg.ndi[i].ifname,
2330 NAN_WIFI_SUBCMD_DATA_PATH_IFACE_CREATE, dhdp->up);
2331 if (ret) {
2332 WL_ERR(("failed to create ndp interface [%d]\n", ret));
2333 goto fail;
2334 }
2335 cfg->nancfg.ndi[i].created = true;
2336 }
2337 }
2338
2339 nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
2340 if (!nan_buf) {
2341 WL_ERR(("%s: memory allocation failed\n", __func__));
2342 ret = BCME_NOMEM;
2343 goto fail;
2344 }
2345
2346 nan_iov_data = MALLOCZ(cfg->osh, sizeof(*nan_iov_data));
2347 if (!nan_iov_data) {
2348 WL_ERR(("%s: memory allocation failed\n", __func__));
2349 ret = BCME_NOMEM;
2350 goto fail;
2351 }
2352
2353 nan_iov_data->nan_iov_len = NAN_IOCTL_BUF_SIZE;
2354 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
2355 nan_buf->count = 0;
2356 nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
2357 nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
2358
2359 if (nan_attr_mask & NAN_ATTR_SYNC_DISC_2G_BEACON_CONFIG) {
2360 /* config sync/discovery beacons on 2G band */
2361 /* 2g is mandatory */
2362 if (!cmd_data->beacon_2g_val) {
2363 WL_ERR(("Invalid NAN config...2G is mandatory\n"));
2364 ret = BCME_BADARG;
2365 }
2366 ret = wl_cfgnan_config_control_flag(ndev, cfg,
2367 WL_NAN_CTRL_DISC_BEACON_TX_2G | WL_NAN_CTRL_SYNC_BEACON_TX_2G,
2368 &(cmd_data->status), TRUE);
2369 if (unlikely(ret) || unlikely(cmd_data->status)) {
2370 WL_ERR((" nan control set config handler, ret = %d status = %d \n",
2371 ret, cmd_data->status));
2372 goto fail;
2373 }
2374 }
2375 if (nan_attr_mask & NAN_ATTR_SYNC_DISC_5G_BEACON_CONFIG) {
2376 /* config sync/discovery beacons on 5G band */
2377 ret = wl_cfgnan_config_control_flag(ndev, cfg,
2378 WL_NAN_CTRL_DISC_BEACON_TX_5G | WL_NAN_CTRL_SYNC_BEACON_TX_5G,
2379 &(cmd_data->status), cmd_data->beacon_5g_val);
2380 if (unlikely(ret) || unlikely(cmd_data->status)) {
2381 WL_ERR((" nan control set config handler, ret = %d status = %d \n",
2382 ret, cmd_data->status));
2383 goto fail;
2384 }
2385 }
2386 /* Setting warm up time */
2387 cmd_data->warmup_time = 1;
2388 if (cmd_data->warmup_time) {
2389 ret = wl_cfgnan_warmup_time_handler(cmd_data, nan_iov_data);
2390 if (unlikely(ret)) {
2391 WL_ERR(("warm up time handler sub_cmd set failed\n"));
2392 goto fail;
2393 }
2394 nan_buf->count++;
2395 }
2396 /* setting master preference and random factor */
2397 ret = wl_cfgnan_set_election_metric(cmd_data, nan_iov_data, nan_attr_mask);
2398 if (unlikely(ret)) {
2399 WL_ERR(("election_metric sub_cmd set failed\n"));
2400 goto fail;
2401 } else {
2402 nan_buf->count++;
2403 }
2404
2405 /* setting nan social channels */
2406 ret = wl_cfgnan_set_nan_soc_chans(ndev, cmd_data, nan_iov_data, nan_attr_mask);
2407 if (unlikely(ret)) {
2408 WL_ERR(("nan social channels set failed\n"));
2409 goto fail;
2410 } else {
2411 /* Storing 5g capability which is reqd for avail chan config. */
2412 cfg->support_5g = cmd_data->support_5g;
2413 nan_buf->count++;
2414 }
2415
2416 if ((cmd_data->support_2g) && ((cmd_data->dwell_time[0]) ||
2417 (cmd_data->scan_period[0]))) {
2418 /* setting scan params */
2419 ret = wl_cfgnan_set_nan_scan_params(ndev, cfg, cmd_data, 0, nan_attr_mask);
2420 if (unlikely(ret)) {
2421 WL_ERR(("scan params set failed for 2g\n"));
2422 goto fail;
2423 }
2424 }
2425
2426 if ((cmd_data->support_5g) && ((cmd_data->dwell_time[1]) ||
2427 (cmd_data->scan_period[1]))) {
2428 /* setting scan params */
2429 ret = wl_cfgnan_set_nan_scan_params(ndev, cfg, cmd_data,
2430 cmd_data->support_5g, nan_attr_mask);
2431 if (unlikely(ret)) {
2432 WL_ERR(("scan params set failed for 5g\n"));
2433 goto fail;
2434 }
2435 }
2436
2437 /*
2438 * A cluster_low value matching cluster_high indicates a request
2439 * to join a cluster with that value.
2440 * If the requested cluster is not found the
2441 * device will start its own cluster
2442 */
2443 /* For Debug purpose, using clust id compulsion */
2444 if (!ETHER_ISNULLADDR(&cmd_data->clus_id.octet)) {
2445 if (cmd_data->clus_id.octet[4] == cmd_data->clus_id.octet[5]) {
2446 /* device will merge to configured CID only */
2447 ret = wl_cfgnan_config_control_flag(ndev, cfg,
2448 WL_NAN_CTRL_MERGE_CONF_CID_ONLY, &(cmd_data->status), true);
2449 if (unlikely(ret) || unlikely(cmd_data->status)) {
2450 WL_ERR((" nan control set config handler, ret = %d status = %d \n",
2451 ret, cmd_data->status));
2452 goto fail;
2453 }
2454 }
2455 /* setting cluster ID */
2456 ret = wl_cfgnan_set_cluster_id(cmd_data, nan_iov_data);
2457 if (unlikely(ret)) {
2458 WL_ERR(("cluster_id sub_cmd set failed\n"));
2459 goto fail;
2460 }
2461 nan_buf->count++;
2462 }
2463
2464 /* setting rssi proximaty values for 2.4GHz and 5GHz */
2465 ret = wl_cfgnan_set_rssi_proximity(cmd_data, nan_iov_data, nan_attr_mask);
2466 if (unlikely(ret)) {
2467 WL_ERR(("2.4GHz/5GHz rssi proximity threshold set failed\n"));
2468 goto fail;
2469 } else {
2470 nan_buf->count++;
2471 }
2472
2473 /* setting rssi middle/close values for 2.4GHz and 5GHz */
2474 ret = wl_cfgnan_set_rssi_mid_or_close(cmd_data, nan_iov_data, nan_attr_mask);
2475 if (unlikely(ret)) {
2476 WL_ERR(("2.4GHz/5GHz rssi middle and close set failed\n"));
2477 goto fail;
2478 } else {
2479 nan_buf->count++;
2480 }
2481
2482 /* setting hop count limit or threshold */
2483 if (nan_attr_mask & NAN_ATTR_HOP_COUNT_LIMIT_CONFIG) {
2484 ret = wl_cfgnan_set_hop_count_limit(cmd_data, nan_iov_data);
2485 if (unlikely(ret)) {
2486 WL_ERR(("hop_count_limit sub_cmd set failed\n"));
2487 goto fail;
2488 }
2489 nan_buf->count++;
2490 }
2491
2492 /* setting sid beacon val */
2493 if ((nan_attr_mask & NAN_ATTR_SID_BEACON_CONFIG) ||
2494 (nan_attr_mask & NAN_ATTR_SUB_SID_BEACON_CONFIG)) {
2495 ret = wl_cfgnan_set_sid_beacon_val(cmd_data, nan_iov_data, nan_attr_mask);
2496 if (unlikely(ret)) {
2497 WL_ERR(("sid_beacon sub_cmd set failed\n"));
2498 goto fail;
2499 }
2500 nan_buf->count++;
2501 }
2502
2503 /* setting nan oui */
2504 if (nan_attr_mask & NAN_ATTR_OUI_CONFIG) {
2505 ret = wl_cfgnan_set_nan_oui(cmd_data, nan_iov_data);
2506 if (unlikely(ret)) {
2507 WL_ERR(("nan_oui sub_cmd set failed\n"));
2508 goto fail;
2509 }
2510 nan_buf->count++;
2511 }
2512
2513 /* setting nan awake dws */
2514 ret = wl_cfgnan_set_awake_dws(ndev, cmd_data,
2515 nan_iov_data, cfg, nan_attr_mask);
2516 if (unlikely(ret)) {
2517 WL_ERR(("nan awake dws set failed\n"));
2518 goto fail;
2519 } else {
2520 nan_buf->count++;
2521 }
2522
2523 /* enable events */
2524 ret = wl_cfgnan_config_eventmask(ndev, cfg, cmd_data->disc_ind_cfg, false);
2525 if (unlikely(ret)) {
2526 WL_ERR(("Failed to config disc ind flag in event_mask, ret = %d\n", ret));
2527 goto fail;
2528 }
2529
2530 /* setting nan enable sub_cmd */
2531 ret = wl_cfgnan_enable_handler(nan_iov_data, true);
2532 if (unlikely(ret)) {
2533 WL_ERR(("enable handler sub_cmd set failed\n"));
2534 goto fail;
2535 }
2536 nan_buf->count++;
2537 nan_buf->is_set = true;
2538
2539 nan_buf_size -= nan_iov_data->nan_iov_len;
2540 memset(resp_buf, 0, sizeof(resp_buf));
2541 /* Reset conditon variable */
2542 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size,
2543 &(cmd_data->status), (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
2544 if (unlikely(ret) || unlikely(cmd_data->status)) {
2545 WL_ERR((" nan start handler, enable failed, ret = %d status = %d \n",
2546 ret, cmd_data->status));
2547 goto fail;
2548 }
2549
2550 timeout = wait_event_timeout(cfg->nancfg.nan_event_wait,
2551 cfg->nancfg.nan_event_recvd, msecs_to_jiffies(NAN_START_STOP_TIMEOUT));
2552 if (!timeout) {
2553 WL_ERR(("Timed out while Waiting for WL_NAN_EVENT_START event !!!\n"));
2554 ret = BCME_ERROR;
2555 goto fail;
2556 }
2557
2558 /* If set, auto datapath confirms will be sent by FW */
2559 ret = wl_cfgnan_config_control_flag(ndev, cfg, WL_NAN_CTRL_AUTO_DPCONF,
2560 &(cmd_data->status), true);
2561 if (unlikely(ret) || unlikely(cmd_data->status)) {
2562 WL_ERR((" nan control set config handler, ret = %d status = %d \n",
2563 ret, cmd_data->status));
2564 goto fail;
2565 }
2566
2567 /* By default set NAN proprietary rates */
2568 ret = wl_cfgnan_config_control_flag(ndev, cfg, WL_NAN_CTRL_PROP_RATE,
2569 &(cmd_data->status), true);
2570 if (unlikely(ret) || unlikely(cmd_data->status)) {
2571 WL_ERR((" nan proprietary rate set failed, ret = %d status = %d \n",
2572 ret, cmd_data->status));
2573 goto fail;
2574 }
2575
2576 /* malloc for ndp peer list */
2577 if ((ret = wl_cfgnan_get_capablities_handler(ndev, cfg, &capabilities))
2578 == BCME_OK) {
2579 cfg->nancfg.max_ndp_count = capabilities.max_ndp_sessions;
2580 cfg->nancfg.nan_ndp_peer_info = MALLOCZ(cfg->osh,
2581 cfg->nancfg.max_ndp_count * sizeof(nan_ndp_peer_t));
2582 if (!cfg->nancfg.nan_ndp_peer_info) {
2583 WL_ERR(("%s: memory allocation failed\n", __func__));
2584 ret = BCME_NOMEM;
2585 goto fail;
2586 }
2587
2588 } else {
2589 WL_ERR(("wl_cfgnan_get_capablities_handler failed, ret = %d\n", ret));
2590 goto fail;
2591 }
2592
2593 #ifdef RTT_SUPPORT
2594 /* Initialize geofence cfg */
2595 dhd_rtt_initialize_geofence_cfg(cfg->pub);
2596 #endif /* RTT_SUPPORT */
2597
2598 cfg->nan_enable = true;
2599 WL_INFORM_MEM(("[NAN] Enable successfull \n"));
2600 /* disable TDLS on NAN NMI IF create */
2601 wl_cfg80211_tdls_config(cfg, TDLS_STATE_NMI_CREATE, false);
2602
2603 fail:
2604 /* reset conditon variable */
2605 cfg->nancfg.nan_event_recvd = false;
2606 if (unlikely(ret) || unlikely(cmd_data->status)) {
2607 cfg->nan_enable = false;
2608 mutex_lock(&cfg->if_sync);
2609 ret = wl_cfg80211_delete_iface(cfg, WL_IF_TYPE_NAN);
2610 if (ret != BCME_OK) {
2611 WL_ERR(("failed to delete NDI[%d]\n", ret));
2612 }
2613 mutex_unlock(&cfg->if_sync);
2614 }
2615 if (nan_buf) {
2616 MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
2617 }
2618 if (nan_iov_data) {
2619 MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
2620 }
2621
2622 NAN_DBG_EXIT();
2623 return ret;
2624 }
2625
2626 int
wl_cfgnan_disable(struct bcm_cfg80211 * cfg)2627 wl_cfgnan_disable(struct bcm_cfg80211 *cfg)
2628 {
2629 s32 ret = BCME_OK;
2630 dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
2631
2632 NAN_DBG_ENTER();
2633 if ((cfg->nan_init_state == TRUE) &&
2634 (cfg->nan_enable == TRUE)) {
2635 struct net_device *ndev;
2636 ndev = bcmcfg_to_prmry_ndev(cfg);
2637
2638 /* We have to remove NDIs so that P2P/Softap can work */
2639 ret = wl_cfg80211_delete_iface(cfg, WL_IF_TYPE_NAN);
2640 if (ret != BCME_OK) {
2641 WL_ERR(("failed to delete NDI[%d]\n", ret));
2642 }
2643
2644 WL_INFORM_MEM(("Nan Disable Req, reason = %d\n", cfg->nancfg.disable_reason));
2645 ret = wl_cfgnan_stop_handler(ndev, cfg);
2646 if (ret == -ENODEV) {
2647 WL_ERR(("Bus is down, no need to proceed\n"));
2648 } else if (ret != BCME_OK) {
2649 WL_ERR(("failed to stop nan, error[%d]\n", ret));
2650 }
2651 ret = wl_cfgnan_deinit(cfg, dhdp->up);
2652 if (ret != BCME_OK) {
2653 WL_ERR(("failed to de-initialize NAN[%d]\n", ret));
2654 if (!dhd_query_bus_erros(dhdp)) {
2655 ASSERT(0);
2656 }
2657 }
2658 wl_cfgnan_disable_cleanup(cfg);
2659 }
2660 NAN_DBG_EXIT();
2661 return ret;
2662 }
2663
2664 static void
wl_cfgnan_send_stop_event(struct bcm_cfg80211 * cfg)2665 wl_cfgnan_send_stop_event(struct bcm_cfg80211 *cfg)
2666 {
2667 s32 ret = BCME_OK;
2668 nan_event_data_t *nan_event_data = NULL;
2669
2670 NAN_DBG_ENTER();
2671
2672 if (cfg->nancfg.disable_reason == NAN_USER_INITIATED) {
2673 /* do not event to host if command is from host */
2674 goto exit;
2675 }
2676 nan_event_data = MALLOCZ(cfg->osh, sizeof(nan_event_data_t));
2677 if (!nan_event_data) {
2678 WL_ERR(("%s: memory allocation failed\n", __func__));
2679 ret = BCME_NOMEM;
2680 goto exit;
2681 }
2682 bzero(nan_event_data, sizeof(nan_event_data_t));
2683
2684 if (cfg->nancfg.disable_reason == NAN_CONCURRENCY_CONFLICT) {
2685 nan_event_data->status = NAN_STATUS_UNSUPPORTED_CONCURRENCY_NAN_DISABLED;
2686 } else {
2687 nan_event_data->status = NAN_STATUS_SUCCESS;
2688 }
2689
2690 nan_event_data->status = NAN_STATUS_SUCCESS;
2691 ret = memcpy_s(nan_event_data->nan_reason, NAN_ERROR_STR_LEN,
2692 "NAN_STATUS_SUCCESS", strlen("NAN_STATUS_SUCCESS"));
2693 if (ret != BCME_OK) {
2694 WL_ERR(("Failed to copy nan reason string, ret = %d\n", ret));
2695 goto exit;
2696 }
2697 #if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT)
2698 ret = wl_cfgvendor_send_nan_event(cfg->wdev->wiphy, bcmcfg_to_prmry_ndev(cfg),
2699 GOOGLE_NAN_EVENT_DISABLED, nan_event_data);
2700 if (ret != BCME_OK) {
2701 WL_ERR(("Failed to send event to nan hal, (%d)\n",
2702 GOOGLE_NAN_EVENT_DISABLED));
2703 }
2704 #endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT) */
2705 exit:
2706 if (nan_event_data) {
2707 MFREE(cfg->osh, nan_event_data, sizeof(nan_event_data_t));
2708 }
2709 NAN_DBG_EXIT();
2710 return;
2711 }
2712
wl_cfgnan_disable_cleanup(struct bcm_cfg80211 * cfg)2713 void wl_cfgnan_disable_cleanup(struct bcm_cfg80211 *cfg)
2714 {
2715 int i = 0;
2716 #ifdef RTT_SUPPORT
2717 dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
2718 rtt_status_info_t *rtt_status = GET_RTTSTATE(dhdp);
2719 rtt_target_info_t *target_info = NULL;
2720
2721 /* Delete the geofence rtt target list */
2722 dhd_rtt_delete_geofence_target_list(dhdp);
2723 /* Cancel pending retry timer if any */
2724 if (delayed_work_pending(&rtt_status->rtt_retry_timer)) {
2725 cancel_delayed_work_sync(&rtt_status->rtt_retry_timer);
2726 }
2727 /* Remove if any pending proxd timeout for nan-rtt */
2728 target_info = &rtt_status->rtt_config.target_info[rtt_status->cur_idx];
2729 if (target_info && target_info->peer == RTT_PEER_NAN) {
2730 /* Cancel pending proxd timeout work if any */
2731 if (delayed_work_pending(&rtt_status->proxd_timeout)) {
2732 cancel_delayed_work_sync(&rtt_status->proxd_timeout);
2733 }
2734 }
2735 /* Delete if any directed nan rtt session */
2736 dhd_rtt_delete_nan_session(dhdp);
2737 #endif /* RTT_SUPPORT */
2738 /* Clear the NDP ID array and dp count */
2739 for (i = 0; i < NAN_MAX_NDP_PEER; i++) {
2740 cfg->nancfg.ndp_id[i] = 0;
2741 }
2742 cfg->nan_dp_count = 0;
2743 if (cfg->nancfg.nan_ndp_peer_info) {
2744 MFREE(cfg->osh, cfg->nancfg.nan_ndp_peer_info,
2745 cfg->nancfg.max_ndp_count * sizeof(nan_ndp_peer_t));
2746 cfg->nancfg.nan_ndp_peer_info = NULL;
2747 }
2748 return;
2749 }
2750
2751 /*
2752 * Deferred nan disable work,
2753 * scheduled with 3sec delay in order to remove any active nan dps
2754 */
2755 void
wl_cfgnan_delayed_disable(struct work_struct * work)2756 wl_cfgnan_delayed_disable(struct work_struct *work)
2757 {
2758 struct bcm_cfg80211 *cfg = NULL;
2759
2760 BCM_SET_CONTAINER_OF(cfg, work, struct bcm_cfg80211, nan_disable.work);
2761
2762 rtnl_lock();
2763 wl_cfgnan_disable(cfg);
2764 rtnl_unlock();
2765 }
2766
2767 int
wl_cfgnan_stop_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg)2768 wl_cfgnan_stop_handler(struct net_device *ndev,
2769 struct bcm_cfg80211 *cfg)
2770 {
2771 bcm_iov_batch_buf_t *nan_buf = NULL;
2772 s32 ret = BCME_OK;
2773 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
2774 wl_nan_iov_t *nan_iov_data = NULL;
2775 uint32 status;
2776 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
2777
2778 NAN_DBG_ENTER();
2779 NAN_MUTEX_LOCK();
2780
2781 if (!cfg->nan_enable) {
2782 WL_INFORM(("Nan is not enabled\n"));
2783 ret = BCME_OK;
2784 goto fail;
2785 }
2786
2787 if (cfg->nancfg.disable_reason != NAN_BUS_IS_DOWN) {
2788 /*
2789 * Framework doing cleanup(iface remove) on disable command,
2790 * so avoiding event to prevent iface delete calls again
2791 */
2792 WL_INFORM_MEM(("[NAN] Disabling Nan events\n"));
2793 wl_cfgnan_config_eventmask(ndev, cfg, 0, true);
2794
2795 nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
2796 if (!nan_buf) {
2797 WL_ERR(("%s: memory allocation failed\n", __func__));
2798 ret = BCME_NOMEM;
2799 goto fail;
2800 }
2801
2802 nan_iov_data = MALLOCZ(cfg->osh, sizeof(*nan_iov_data));
2803 if (!nan_iov_data) {
2804 WL_ERR(("%s: memory allocation failed\n", __func__));
2805 ret = BCME_NOMEM;
2806 goto fail;
2807 }
2808
2809 nan_iov_data->nan_iov_len = NAN_IOCTL_BUF_SIZE;
2810 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
2811 nan_buf->count = 0;
2812 nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
2813 nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
2814
2815 ret = wl_cfgnan_enable_handler(nan_iov_data, false);
2816 if (unlikely(ret)) {
2817 WL_ERR(("nan disable handler failed\n"));
2818 goto fail;
2819 }
2820 nan_buf->count++;
2821 nan_buf->is_set = true;
2822 nan_buf_size -= nan_iov_data->nan_iov_len;
2823 memset_s(resp_buf, sizeof(resp_buf),
2824 0, sizeof(resp_buf));
2825 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
2826 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
2827 if (unlikely(ret) || unlikely(status)) {
2828 WL_ERR(("nan disable failed ret = %d status = %d\n", ret, status));
2829 goto fail;
2830 }
2831 /* Enable back TDLS if connected interface is <= 1 */
2832 wl_cfg80211_tdls_config(cfg, TDLS_STATE_IF_DELETE, false);
2833 }
2834
2835 wl_cfgnan_send_stop_event(cfg);
2836
2837 fail:
2838 /* Resetting instance ID mask */
2839 cfg->nancfg.inst_id_start = 0;
2840 memset(cfg->nancfg.svc_inst_id_mask, 0, sizeof(cfg->nancfg.svc_inst_id_mask));
2841 memset(cfg->svc_info, 0, NAN_MAX_SVC_INST * sizeof(nan_svc_info_t));
2842 cfg->nan_enable = false;
2843
2844 if (nan_buf) {
2845 MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
2846 }
2847 if (nan_iov_data) {
2848 MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
2849 }
2850
2851 NAN_MUTEX_UNLOCK();
2852 NAN_DBG_EXIT();
2853 return ret;
2854 }
2855
2856 int
wl_cfgnan_config_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_config_cmd_data_t * cmd_data,uint32 nan_attr_mask)2857 wl_cfgnan_config_handler(struct net_device *ndev, struct bcm_cfg80211 *cfg,
2858 nan_config_cmd_data_t *cmd_data, uint32 nan_attr_mask)
2859 {
2860 bcm_iov_batch_buf_t *nan_buf = NULL;
2861 s32 ret = BCME_OK;
2862 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
2863 wl_nan_iov_t *nan_iov_data = NULL;
2864 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
2865
2866 NAN_DBG_ENTER();
2867
2868 /* Nan need to be enabled before configuring/updating params */
2869 if (cfg->nan_enable) {
2870 nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
2871 if (!nan_buf) {
2872 WL_ERR(("%s: memory allocation failed\n", __func__));
2873 ret = BCME_NOMEM;
2874 goto fail;
2875 }
2876
2877 nan_iov_data = MALLOCZ(cfg->osh, sizeof(*nan_iov_data));
2878 if (!nan_iov_data) {
2879 WL_ERR(("%s: memory allocation failed\n", __func__));
2880 ret = BCME_NOMEM;
2881 goto fail;
2882 }
2883
2884 nan_iov_data->nan_iov_len = NAN_IOCTL_BUF_SIZE;
2885 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
2886 nan_buf->count = 0;
2887 nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
2888 nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
2889
2890 /* setting sid beacon val */
2891 if ((nan_attr_mask & NAN_ATTR_SID_BEACON_CONFIG) ||
2892 (nan_attr_mask & NAN_ATTR_SUB_SID_BEACON_CONFIG)) {
2893 ret = wl_cfgnan_set_sid_beacon_val(cmd_data, nan_iov_data, nan_attr_mask);
2894 if (unlikely(ret)) {
2895 WL_ERR(("sid_beacon sub_cmd set failed\n"));
2896 goto fail;
2897 }
2898 nan_buf->count++;
2899 }
2900
2901 /* setting master preference and random factor */
2902 if (cmd_data->metrics.random_factor ||
2903 cmd_data->metrics.master_pref) {
2904 ret = wl_cfgnan_set_election_metric(cmd_data, nan_iov_data,
2905 nan_attr_mask);
2906 if (unlikely(ret)) {
2907 WL_ERR(("election_metric sub_cmd set failed\n"));
2908 goto fail;
2909 } else {
2910 nan_buf->count++;
2911 }
2912 }
2913
2914 /* setting hop count limit or threshold */
2915 if (nan_attr_mask & NAN_ATTR_HOP_COUNT_LIMIT_CONFIG) {
2916 ret = wl_cfgnan_set_hop_count_limit(cmd_data, nan_iov_data);
2917 if (unlikely(ret)) {
2918 WL_ERR(("hop_count_limit sub_cmd set failed\n"));
2919 goto fail;
2920 }
2921 nan_buf->count++;
2922 }
2923
2924 /* setting rssi proximaty values for 2.4GHz and 5GHz */
2925 ret = wl_cfgnan_set_rssi_proximity(cmd_data, nan_iov_data,
2926 nan_attr_mask);
2927 if (unlikely(ret)) {
2928 WL_ERR(("2.4GHz/5GHz rssi proximity threshold set failed\n"));
2929 goto fail;
2930 } else {
2931 nan_buf->count++;
2932 }
2933
2934 /* setting nan awake dws */
2935 ret = wl_cfgnan_set_awake_dws(ndev, cmd_data, nan_iov_data,
2936 cfg, nan_attr_mask);
2937 if (unlikely(ret)) {
2938 WL_ERR(("nan awake dws set failed\n"));
2939 goto fail;
2940 } else {
2941 nan_buf->count++;
2942 }
2943
2944 if (cmd_data->disc_ind_cfg) {
2945 /* Disable events */
2946 WL_TRACE(("Disable events based on flag\n"));
2947 ret = wl_cfgnan_config_eventmask(ndev, cfg,
2948 cmd_data->disc_ind_cfg, false);
2949 if (unlikely(ret)) {
2950 WL_ERR(("Failed to config disc ind flag in event_mask, ret = %d\n",
2951 ret));
2952 goto fail;
2953 }
2954 }
2955
2956 if ((cfg->support_5g) && ((cmd_data->dwell_time[1]) ||
2957 (cmd_data->scan_period[1]))) {
2958 /* setting scan params */
2959 ret = wl_cfgnan_set_nan_scan_params(ndev, cfg,
2960 cmd_data, cfg->support_5g, nan_attr_mask);
2961 if (unlikely(ret)) {
2962 WL_ERR(("scan params set failed for 5g\n"));
2963 goto fail;
2964 }
2965 }
2966 if ((cmd_data->dwell_time[0]) ||
2967 (cmd_data->scan_period[0])) {
2968 ret = wl_cfgnan_set_nan_scan_params(ndev, cfg, cmd_data, 0, nan_attr_mask);
2969 if (unlikely(ret)) {
2970 WL_ERR(("scan params set failed for 2g\n"));
2971 goto fail;
2972 }
2973 }
2974 nan_buf->is_set = true;
2975 nan_buf_size -= nan_iov_data->nan_iov_len;
2976
2977 if (nan_buf->count) {
2978 memset_s(resp_buf, sizeof(resp_buf),
2979 0, sizeof(resp_buf));
2980 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size,
2981 &(cmd_data->status),
2982 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
2983 if (unlikely(ret) || unlikely(cmd_data->status)) {
2984 WL_ERR((" nan config handler failed ret = %d status = %d\n",
2985 ret, cmd_data->status));
2986 goto fail;
2987 }
2988 } else {
2989 WL_DBG(("No commands to send\n"));
2990 }
2991
2992 if ((!cmd_data->bmap) || (cmd_data->avail_params.duration == NAN_BAND_INVALID) ||
2993 (!cmd_data->chanspec[0])) {
2994 WL_TRACE(("mandatory arguments are not present to set avail\n"));
2995 ret = BCME_OK;
2996 } else {
2997 cmd_data->avail_params.chanspec[0] = cmd_data->chanspec[0];
2998 cmd_data->avail_params.bmap = cmd_data->bmap;
2999 /* 1=local, 2=peer, 3=ndc, 4=immutable, 5=response, 6=counter */
3000 ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
3001 cfg, &cmd_data->avail_params, WL_AVAIL_LOCAL);
3002 if (unlikely(ret)) {
3003 WL_ERR(("Failed to set avail value with type local\n"));
3004 goto fail;
3005 }
3006
3007 ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
3008 cfg, &cmd_data->avail_params, WL_AVAIL_NDC);
3009 if (unlikely(ret)) {
3010 WL_ERR(("Failed to set avail value with type ndc\n"));
3011 goto fail;
3012 }
3013 }
3014 } else {
3015 WL_INFORM(("nan is not enabled\n"));
3016 }
3017
3018 fail:
3019 if (nan_buf) {
3020 MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
3021 }
3022 if (nan_iov_data) {
3023 MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
3024 }
3025
3026 NAN_DBG_EXIT();
3027 return ret;
3028 }
3029
3030 int
wl_cfgnan_support_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_config_cmd_data_t * cmd_data)3031 wl_cfgnan_support_handler(struct net_device *ndev,
3032 struct bcm_cfg80211 *cfg, nan_config_cmd_data_t *cmd_data)
3033 {
3034 /* TODO: */
3035 return BCME_OK;
3036 }
3037
3038 int
wl_cfgnan_status_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_config_cmd_data_t * cmd_data)3039 wl_cfgnan_status_handler(struct net_device *ndev,
3040 struct bcm_cfg80211 *cfg, nan_config_cmd_data_t *cmd_data)
3041 {
3042 /* TODO: */
3043 return BCME_OK;
3044 }
3045
3046 #ifdef WL_NAN_DISC_CACHE
3047 static
3048 nan_svc_info_t *
wl_cfgnan_get_svc_inst(struct bcm_cfg80211 * cfg,wl_nan_instance_id svc_inst_id,uint8 ndp_id)3049 wl_cfgnan_get_svc_inst(struct bcm_cfg80211 *cfg,
3050 wl_nan_instance_id svc_inst_id, uint8 ndp_id)
3051 {
3052 uint8 i, j;
3053 if (ndp_id) {
3054 for (i = 0; i < NAN_MAX_SVC_INST; i++) {
3055 for (j = 0; j < NAN_MAX_SVC_INST; j++) {
3056 if (cfg->svc_info[i].ndp_id[j] == ndp_id) {
3057 return &cfg->svc_info[i];
3058 }
3059 }
3060 }
3061 } else if (svc_inst_id) {
3062 for (i = 0; i < NAN_MAX_SVC_INST; i++) {
3063 if (cfg->svc_info[i].svc_id == svc_inst_id) {
3064 return &cfg->svc_info[i];
3065 }
3066 }
3067
3068 }
3069 return NULL;
3070 }
3071
3072 nan_ranging_inst_t *
wl_cfgnan_check_for_ranging(struct bcm_cfg80211 * cfg,struct ether_addr * peer)3073 wl_cfgnan_check_for_ranging(struct bcm_cfg80211 *cfg, struct ether_addr *peer)
3074 {
3075 uint8 i;
3076 if (peer) {
3077 for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
3078 if (!memcmp(peer, &cfg->nan_ranging_info[i].peer_addr,
3079 ETHER_ADDR_LEN)) {
3080 return &(cfg->nan_ranging_info[i]);
3081 }
3082 }
3083 }
3084 return NULL;
3085 }
3086
3087 nan_ranging_inst_t *
wl_cfgnan_get_rng_inst_by_id(struct bcm_cfg80211 * cfg,uint8 rng_id)3088 wl_cfgnan_get_rng_inst_by_id(struct bcm_cfg80211 *cfg, uint8 rng_id)
3089 {
3090 uint8 i;
3091 if (rng_id) {
3092 for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
3093 if (cfg->nan_ranging_info[i].range_id == rng_id)
3094 {
3095 return &(cfg->nan_ranging_info[i]);
3096 }
3097 }
3098 }
3099 WL_ERR(("Couldn't find the ranging instance for rng_id %d\n", rng_id));
3100 return NULL;
3101 }
3102
3103 /*
3104 * Find ranging inst for given peer,
3105 * On not found, create one
3106 * with given range role
3107 */
3108 nan_ranging_inst_t *
wl_cfgnan_get_ranging_inst(struct bcm_cfg80211 * cfg,struct ether_addr * peer,nan_range_role_t range_role)3109 wl_cfgnan_get_ranging_inst(struct bcm_cfg80211 *cfg, struct ether_addr *peer,
3110 nan_range_role_t range_role)
3111 {
3112 nan_ranging_inst_t *ranging_inst = NULL;
3113 uint8 i;
3114
3115 if (!peer) {
3116 WL_ERR(("Peer address is NULL"));
3117 goto done;
3118 }
3119
3120 ranging_inst = wl_cfgnan_check_for_ranging(cfg, peer);
3121 if (ranging_inst) {
3122 goto done;
3123 }
3124 WL_TRACE(("Creating Ranging instance \n"));
3125
3126 for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
3127 if (cfg->nan_ranging_info[i].in_use == FALSE) {
3128 break;
3129 }
3130 }
3131
3132 if (i == NAN_MAX_RANGING_INST) {
3133 WL_ERR(("No buffer available for the ranging instance"));
3134 goto done;
3135 }
3136 ranging_inst = &cfg->nan_ranging_info[i];
3137 memcpy(&ranging_inst->peer_addr, peer, ETHER_ADDR_LEN);
3138 ranging_inst->range_status = NAN_RANGING_REQUIRED;
3139 ranging_inst->prev_distance_mm = INVALID_DISTANCE;
3140 ranging_inst->range_role = range_role;
3141 ranging_inst->in_use = TRUE;
3142
3143 done:
3144 return ranging_inst;
3145 }
3146 #endif /* WL_NAN_DISC_CACHE */
3147
3148 static int
process_resp_buf(void * iov_resp,uint8 * instance_id,uint16 sub_cmd_id)3149 process_resp_buf(void *iov_resp,
3150 uint8 *instance_id, uint16 sub_cmd_id)
3151 {
3152 int res = BCME_OK;
3153 NAN_DBG_ENTER();
3154
3155 if (sub_cmd_id == WL_NAN_CMD_DATA_DATAREQ) {
3156 wl_nan_dp_req_ret_t *dpreq_ret = NULL;
3157 dpreq_ret = (wl_nan_dp_req_ret_t *)(iov_resp);
3158 *instance_id = dpreq_ret->ndp_id;
3159 WL_TRACE(("%s: Initiator NDI: " MACDBG "\n",
3160 __FUNCTION__, MAC2STRDBG(dpreq_ret->indi.octet)));
3161 } else if (sub_cmd_id == WL_NAN_CMD_RANGE_REQUEST) {
3162 wl_nan_range_id *range_id = NULL;
3163 range_id = (wl_nan_range_id *)(iov_resp);
3164 *instance_id = *range_id;
3165 WL_TRACE(("Range id: %d\n", *range_id));
3166 }
3167 WL_DBG(("instance_id: %d\n", *instance_id));
3168 NAN_DBG_EXIT();
3169 return res;
3170 }
3171
3172 int
wl_cfgnan_cancel_ranging(struct net_device * ndev,struct bcm_cfg80211 * cfg,uint8 range_id,uint8 flags,uint32 * status)3173 wl_cfgnan_cancel_ranging(struct net_device *ndev,
3174 struct bcm_cfg80211 *cfg, uint8 range_id, uint8 flags, uint32 *status)
3175 {
3176 bcm_iov_batch_buf_t *nan_buf = NULL;
3177 s32 ret = BCME_OK;
3178 uint16 nan_iov_start, nan_iov_end;
3179 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
3180 uint16 subcmd_len;
3181 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
3182 wl_nan_iov_t *nan_iov_data = NULL;
3183 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
3184 wl_nan_range_cancel_ext_t rng_cncl;
3185 uint8 size_of_iov;
3186
3187 NAN_DBG_ENTER();
3188
3189 if (cfg->nancfg.version >= NAN_RANGE_EXT_CANCEL_SUPPORT_VER) {
3190 size_of_iov = sizeof(rng_cncl);
3191 } else {
3192 size_of_iov = sizeof(range_id);
3193 }
3194
3195 memset_s(&rng_cncl, sizeof(rng_cncl), 0, sizeof(rng_cncl));
3196 rng_cncl.range_id = range_id;
3197 rng_cncl.flags = flags;
3198
3199 nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
3200 if (!nan_buf) {
3201 WL_ERR(("%s: memory allocation failed\n", __func__));
3202 ret = BCME_NOMEM;
3203 goto fail;
3204 }
3205
3206 nan_iov_data = MALLOCZ(cfg->osh, sizeof(*nan_iov_data));
3207 if (!nan_iov_data) {
3208 WL_ERR(("%s: memory allocation failed\n", __func__));
3209 ret = BCME_NOMEM;
3210 goto fail;
3211 }
3212
3213 nan_iov_data->nan_iov_len = nan_iov_start = NAN_IOCTL_BUF_SIZE;
3214 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
3215 nan_buf->count = 0;
3216 nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
3217 nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
3218 sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
3219
3220 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
3221 size_of_iov, &subcmd_len);
3222 if (unlikely(ret)) {
3223 WL_ERR(("nan_sub_cmd check failed\n"));
3224 goto fail;
3225 }
3226
3227 sub_cmd->id = htod16(WL_NAN_CMD_RANGE_CANCEL);
3228 sub_cmd->len = sizeof(sub_cmd->u.options) + size_of_iov;
3229 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
3230
3231 /* Reduce the iov_len size by subcmd_len */
3232 nan_iov_data->nan_iov_len -= subcmd_len;
3233 nan_iov_end = nan_iov_data->nan_iov_len;
3234 nan_buf_size = (nan_iov_start - nan_iov_end);
3235
3236 if (size_of_iov >= sizeof(rng_cncl)) {
3237 (void)memcpy_s(sub_cmd->data, nan_iov_data->nan_iov_len,
3238 &rng_cncl, size_of_iov);
3239 } else {
3240 (void)memcpy_s(sub_cmd->data, nan_iov_data->nan_iov_len,
3241 &range_id, size_of_iov);
3242 }
3243
3244 nan_buf->is_set = true;
3245 nan_buf->count++;
3246 memset_s(resp_buf, sizeof(resp_buf),
3247 0, sizeof(resp_buf));
3248 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, status,
3249 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
3250 if (unlikely(ret) || unlikely(*status)) {
3251 WL_ERR(("Range ID %d cancel failed ret %d status %d \n", range_id, ret, *status));
3252 goto fail;
3253 }
3254 WL_MEM(("Range cancel with Range ID [%d] successfull\n", range_id));
3255 fail:
3256 if (nan_buf) {
3257 MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
3258 }
3259 if (nan_iov_data) {
3260 MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
3261 }
3262 NAN_DBG_EXIT();
3263 return ret;
3264 }
3265
3266 #ifdef WL_NAN_DISC_CACHE
3267 static int
wl_cfgnan_cache_svc_info(struct bcm_cfg80211 * cfg,nan_discover_cmd_data_t * cmd_data,uint16 cmd_id,bool update)3268 wl_cfgnan_cache_svc_info(struct bcm_cfg80211 *cfg,
3269 nan_discover_cmd_data_t *cmd_data, uint16 cmd_id, bool update)
3270 {
3271 int ret = BCME_OK;
3272 int i;
3273 nan_svc_info_t *svc_info;
3274 uint8 svc_id = (cmd_id == WL_NAN_CMD_SD_SUBSCRIBE) ? cmd_data->sub_id :
3275 cmd_data->pub_id;
3276
3277 for (i = 0; i < NAN_MAX_SVC_INST; i++) {
3278 if (update) {
3279 if (cfg->svc_info[i].svc_id == svc_id) {
3280 svc_info = &cfg->svc_info[i];
3281 break;
3282 } else {
3283 continue;
3284 }
3285 }
3286 if (!cfg->svc_info[i].svc_id) {
3287 svc_info = &cfg->svc_info[i];
3288 break;
3289 }
3290 }
3291 if (i == NAN_MAX_SVC_INST) {
3292 WL_ERR(("%s:cannot accomodate ranging session\n", __FUNCTION__));
3293 ret = BCME_NORESOURCE;
3294 goto fail;
3295 }
3296 if (cmd_data->sde_control_flag & NAN_SDE_CF_RANGING_REQUIRED) {
3297 WL_TRACE(("%s: updating ranging info, enabling", __FUNCTION__));
3298 svc_info->status = 1;
3299 svc_info->ranging_interval = cmd_data->ranging_intvl_msec;
3300 svc_info->ranging_ind = cmd_data->ranging_indication;
3301 svc_info->ingress_limit = cmd_data->ingress_limit;
3302 svc_info->egress_limit = cmd_data->egress_limit;
3303 svc_info->ranging_required = 1;
3304 } else {
3305 WL_TRACE(("%s: updating ranging info, disabling", __FUNCTION__));
3306 svc_info->status = 0;
3307 svc_info->ranging_interval = 0;
3308 svc_info->ranging_ind = 0;
3309 svc_info->ingress_limit = 0;
3310 svc_info->egress_limit = 0;
3311 svc_info->ranging_required = 0;
3312 }
3313
3314 /* Reset Range status flags on svc creation/update */
3315 svc_info->svc_range_status = 0;
3316 svc_info->flags = cmd_data->flags;
3317
3318 if (cmd_id == WL_NAN_CMD_SD_SUBSCRIBE) {
3319 svc_info->svc_id = cmd_data->sub_id;
3320 if ((cmd_data->flags & WL_NAN_SUB_ACTIVE) &&
3321 (cmd_data->tx_match.dlen)) {
3322 ret = memcpy_s(svc_info->tx_match_filter, sizeof(svc_info->tx_match_filter),
3323 cmd_data->tx_match.data, cmd_data->tx_match.dlen);
3324 if (ret != BCME_OK) {
3325 WL_ERR(("Failed to copy tx match filter data\n"));
3326 goto fail;
3327 }
3328 svc_info->tx_match_filter_len = cmd_data->tx_match.dlen;
3329 }
3330 } else {
3331 svc_info->svc_id = cmd_data->pub_id;
3332 }
3333 ret = memcpy_s(svc_info->svc_hash, sizeof(svc_info->svc_hash),
3334 cmd_data->svc_hash.data, WL_NAN_SVC_HASH_LEN);
3335 if (ret != BCME_OK) {
3336 WL_ERR(("Failed to copy svc hash\n"));
3337 }
3338 fail:
3339 return ret;
3340
3341 }
3342
3343 static bool
wl_cfgnan_clear_svc_from_ranging_inst(struct bcm_cfg80211 * cfg,nan_ranging_inst_t * ranging_inst,nan_svc_info_t * svc)3344 wl_cfgnan_clear_svc_from_ranging_inst(struct bcm_cfg80211 *cfg,
3345 nan_ranging_inst_t *ranging_inst, nan_svc_info_t *svc)
3346 {
3347 int i = 0;
3348 bool cleared = FALSE;
3349
3350 if (svc && ranging_inst->in_use) {
3351 for (i = 0; i < MAX_SUBSCRIBES; i++) {
3352 if (svc == ranging_inst->svc_idx[i]) {
3353 ranging_inst->num_svc_ctx--;
3354 ranging_inst->svc_idx[i] = NULL;
3355 cleared = TRUE;
3356 /*
3357 * This list is maintained dupes free,
3358 * hence can break
3359 */
3360 break;
3361 }
3362 }
3363 }
3364 return cleared;
3365 }
3366
3367 static int
wl_cfgnan_clear_svc_from_all_ranging_inst(struct bcm_cfg80211 * cfg,uint8 svc_id)3368 wl_cfgnan_clear_svc_from_all_ranging_inst(struct bcm_cfg80211 *cfg, uint8 svc_id)
3369 {
3370 nan_ranging_inst_t *ranging_inst;
3371 int i = 0;
3372 int ret = BCME_OK;
3373
3374 nan_svc_info_t *svc = wl_cfgnan_get_svc_inst(cfg, svc_id, 0);
3375 if (!svc) {
3376 WL_ERR(("\n svc not found \n"));
3377 ret = BCME_NOTFOUND;
3378 goto done;
3379 }
3380 for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
3381 ranging_inst = &(cfg->nan_ranging_info[i]);
3382 wl_cfgnan_clear_svc_from_ranging_inst(cfg, ranging_inst, svc);
3383 }
3384
3385 done:
3386 return ret;
3387 }
3388
3389 static int
wl_cfgnan_ranging_clear_publish(struct bcm_cfg80211 * cfg,struct ether_addr * peer,uint8 svc_id)3390 wl_cfgnan_ranging_clear_publish(struct bcm_cfg80211 *cfg,
3391 struct ether_addr *peer, uint8 svc_id)
3392 {
3393 nan_ranging_inst_t *ranging_inst = NULL;
3394 nan_svc_info_t *svc = NULL;
3395 bool cleared = FALSE;
3396 int ret = BCME_OK;
3397
3398 ranging_inst = wl_cfgnan_check_for_ranging(cfg, peer);
3399 if (!ranging_inst || !ranging_inst->in_use) {
3400 goto done;
3401 }
3402
3403 WL_INFORM_MEM(("Check clear Ranging for pub update, sub id = %d,"
3404 " range_id = %d, peer addr = " MACDBG " \n", svc_id,
3405 ranging_inst->range_id, MAC2STRDBG(peer)));
3406 svc = wl_cfgnan_get_svc_inst(cfg, svc_id, 0);
3407 if (!svc) {
3408 WL_ERR(("\n svc not found, svc_id = %d\n", svc_id));
3409 ret = BCME_NOTFOUND;
3410 goto done;
3411 }
3412
3413 cleared = wl_cfgnan_clear_svc_from_ranging_inst(cfg, ranging_inst, svc);
3414 if (!cleared) {
3415 /* Only if this svc was cleared, any update needed */
3416 ret = BCME_NOTFOUND;
3417 goto done;
3418 }
3419
3420 wl_cfgnan_terminate_ranging_session(cfg, ranging_inst);
3421
3422 done:
3423 return ret;
3424 }
3425
3426 #ifdef RTT_SUPPORT
3427 /* API to terminate/clear all directed nan-rtt sessions.
3428 * Can be called from framework RTT stop context
3429 */
3430 int
wl_cfgnan_terminate_directed_rtt_sessions(struct net_device * ndev,struct bcm_cfg80211 * cfg)3431 wl_cfgnan_terminate_directed_rtt_sessions(struct net_device *ndev,
3432 struct bcm_cfg80211 *cfg)
3433 {
3434 nan_ranging_inst_t *ranging_inst;
3435 int i, ret = BCME_OK;
3436 uint32 status;
3437
3438 for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
3439 ranging_inst = &cfg->nan_ranging_info[i];
3440 if (ranging_inst->range_id && ranging_inst->range_type == RTT_TYPE_NAN_DIRECTED) {
3441 if (ranging_inst->range_status == NAN_RANGING_IN_PROGRESS) {
3442 ret = wl_cfgnan_cancel_ranging(ndev, cfg, ranging_inst->range_id,
3443 NAN_RNG_TERM_FLAG_IMMEDIATE, &status);
3444 if (unlikely(ret) || unlikely(status)) {
3445 WL_ERR(("nan range cancel failed ret = %d status = %d\n",
3446 ret, status));
3447 }
3448 }
3449 wl_cfgnan_reset_geofence_ranging(cfg, ranging_inst,
3450 RTT_SHCED_HOST_DIRECTED_TERM);
3451 }
3452 }
3453 return ret;
3454 }
3455 #endif /* RTT_SUPPORT */
3456
3457 /*
3458 * suspend ongoing geofence ranging session
3459 * with a peer if on-going ranging is with given peer
3460 * If peer NULL,
3461 * Suspend on-going ranging blindly
3462 * Do nothing on:
3463 * If ranging is not in progress
3464 * If ranging in progress but not with given peer
3465 */
3466 int
wl_cfgnan_suspend_geofence_rng_session(struct net_device * ndev,struct ether_addr * peer,int suspend_reason,u8 cancel_flags)3467 wl_cfgnan_suspend_geofence_rng_session(struct net_device *ndev,
3468 struct ether_addr *peer, int suspend_reason, u8 cancel_flags)
3469 {
3470 int ret = BCME_OK;
3471 uint32 status;
3472 nan_ranging_inst_t *ranging_inst = NULL;
3473 struct ether_addr* peer_addr = NULL;
3474 struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
3475 #ifdef RTT_SUPPORT
3476 dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
3477 rtt_geofence_target_info_t *geofence_target_info;
3478
3479 geofence_target_info = dhd_rtt_get_geofence_current_target(dhd);
3480 if (!geofence_target_info) {
3481 WL_DBG(("No Geofencing Targets, suspend req dropped\n"));
3482 goto exit;
3483 }
3484 peer_addr = &geofence_target_info->peer_addr;
3485
3486 ranging_inst = wl_cfgnan_check_for_ranging(cfg, peer_addr);
3487 if (dhd_rtt_get_geofence_rtt_state(dhd) == FALSE) {
3488 WL_DBG(("Geofencing Ranging not in progress, suspend req dropped\n"));
3489 goto exit;
3490 }
3491
3492 if (peer && memcmp(peer_addr, peer, ETHER_ADDR_LEN)) {
3493 if (suspend_reason == RTT_GEO_SUSPN_HOST_NDP_TRIGGER ||
3494 suspend_reason == RTT_GEO_SUSPN_PEER_NDP_TRIGGER) {
3495 /* NDP and Ranging can coexist with different Peers */
3496 WL_DBG(("Geofencing Ranging not in progress with given peer,"
3497 " suspend req dropped\n"));
3498 goto exit;
3499 }
3500 }
3501 #endif /* RTT_SUPPORT */
3502
3503 ASSERT((ranging_inst != NULL));
3504 if (ranging_inst) {
3505 if (ranging_inst->range_status != NAN_RANGING_IN_PROGRESS) {
3506 WL_DBG(("Ranging Inst with peer not in progress, "
3507 " suspend req dropped\n"));
3508 goto exit;
3509 }
3510 cancel_flags |= NAN_RNG_TERM_FLAG_IMMEDIATE;
3511 ret = wl_cfgnan_cancel_ranging(ndev, cfg,
3512 ranging_inst->range_id, cancel_flags, &status);
3513 if (unlikely(ret) || unlikely(status)) {
3514 WL_ERR(("Geofence Range suspended failed, err = %d, status = %d,"
3515 " range_id = %d, suspend_reason = %d, " MACDBG " \n",
3516 ret, status, ranging_inst->range_id,
3517 suspend_reason, MAC2STRDBG(peer_addr)));
3518 }
3519 ranging_inst->range_status = NAN_RANGING_REQUIRED;
3520 WL_INFORM_MEM(("Geofence Range suspended, range_id = %d,"
3521 " suspend_reason = %d, " MACDBG " \n", ranging_inst->range_id,
3522 suspend_reason, MAC2STRDBG(peer_addr)));
3523 #ifdef RTT_SUPPORT
3524 /* Set geofence RTT in progress state to false */
3525 dhd_rtt_set_geofence_rtt_state(dhd, FALSE);
3526 #endif /* RTT_SUPPORT */
3527 }
3528
3529 exit:
3530 /* Post pending discovery results */
3531 if (ranging_inst &&
3532 ((suspend_reason == RTT_GEO_SUSPN_HOST_NDP_TRIGGER) ||
3533 (suspend_reason == RTT_GEO_SUSPN_PEER_NDP_TRIGGER))) {
3534 wl_cfgnan_disc_result_on_geofence_cancel(cfg, ranging_inst);
3535 }
3536
3537 return ret;
3538 }
3539
3540 static void
wl_cfgnan_clear_svc_cache(struct bcm_cfg80211 * cfg,wl_nan_instance_id svc_id)3541 wl_cfgnan_clear_svc_cache(struct bcm_cfg80211 *cfg,
3542 wl_nan_instance_id svc_id)
3543 {
3544 nan_svc_info_t *svc;
3545 svc = wl_cfgnan_get_svc_inst(cfg, svc_id, 0);
3546 if (svc) {
3547 WL_DBG(("clearing cached svc info for svc id %d\n", svc_id));
3548 memset(svc, 0, sizeof(*svc));
3549 }
3550 }
3551
3552 /*
3553 * Terminate given ranging instance
3554 * if no pending ranging sub service
3555 */
3556 static void
wl_cfgnan_terminate_ranging_session(struct bcm_cfg80211 * cfg,nan_ranging_inst_t * ranging_inst)3557 wl_cfgnan_terminate_ranging_session(struct bcm_cfg80211 *cfg,
3558 nan_ranging_inst_t *ranging_inst)
3559 {
3560 int ret = BCME_OK;
3561 uint32 status;
3562 #ifdef RTT_SUPPORT
3563 rtt_geofence_target_info_t* geofence_target = NULL;
3564 dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
3565 int8 index;
3566 #endif /* RTT_SUPPORT */
3567
3568 if (ranging_inst->range_id == 0) {
3569 /* Make sure, range inst is valid in caller */
3570 return;
3571 }
3572
3573 if (ranging_inst->num_svc_ctx != 0) {
3574 /*
3575 * Make sure to remove all svc_insts for range_inst
3576 * in order to cancel ranging and remove target in caller
3577 */
3578 return;
3579 }
3580
3581 /* Cancel Ranging if in progress for rang_inst */
3582 if (ranging_inst->range_status == NAN_RANGING_IN_PROGRESS) {
3583 ret = wl_cfgnan_cancel_ranging(bcmcfg_to_prmry_ndev(cfg),
3584 cfg, ranging_inst->range_id,
3585 NAN_RNG_TERM_FLAG_IMMEDIATE, &status);
3586 if (unlikely(ret) || unlikely(status)) {
3587 WL_ERR(("%s:nan range cancel failed ret = %d status = %d\n",
3588 __FUNCTION__, ret, status));
3589 } else {
3590 WL_DBG(("Range cancelled \n"));
3591 /* Set geofence RTT in progress state to false */
3592 #ifdef RTT_SUPPORT
3593 dhd_rtt_set_geofence_rtt_state(dhd, FALSE);
3594 #endif /* RTT_SUPPORT */
3595 }
3596 }
3597
3598 #ifdef RTT_SUPPORT
3599 geofence_target = dhd_rtt_get_geofence_target(dhd,
3600 &ranging_inst->peer_addr, &index);
3601 if (geofence_target) {
3602 dhd_rtt_remove_geofence_target(dhd, &geofence_target->peer_addr);
3603 WL_INFORM_MEM(("Removing Ranging Instance " MACDBG "\n",
3604 MAC2STRDBG(&(ranging_inst->peer_addr))));
3605 bzero(ranging_inst, sizeof(nan_ranging_inst_t));
3606 }
3607 #endif /* RTT_SUPPORT */
3608 }
3609
3610 /*
3611 * Terminate all ranging sessions
3612 * with no pending ranging sub service
3613 */
3614 static void
wl_cfgnan_terminate_all_obsolete_ranging_sessions(struct bcm_cfg80211 * cfg)3615 wl_cfgnan_terminate_all_obsolete_ranging_sessions(
3616 struct bcm_cfg80211 *cfg)
3617 {
3618 /* cancel all related ranging instances */
3619 uint8 i = 0;
3620 nan_ranging_inst_t *ranging_inst = NULL;
3621
3622 for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
3623 ranging_inst = &cfg->nan_ranging_info[i];
3624 if (ranging_inst->in_use) {
3625 wl_cfgnan_terminate_ranging_session(cfg, ranging_inst);
3626 }
3627 }
3628
3629 return;
3630 }
3631
3632 /*
3633 * Store svc_ctx for processing during RNG_RPT
3634 * Return BCME_OK only when svc is added
3635 */
3636 static int
wl_cfgnan_update_ranging_svc_inst(nan_ranging_inst_t * ranging_inst,nan_svc_info_t * svc)3637 wl_cfgnan_update_ranging_svc_inst(nan_ranging_inst_t *ranging_inst,
3638 nan_svc_info_t *svc)
3639 {
3640 int ret = BCME_OK;
3641 int i = 0;
3642
3643 for (i = 0; i < MAX_SUBSCRIBES; i++) {
3644 if (ranging_inst->svc_idx[i] == svc) {
3645 WL_DBG(("SVC Ctx for ranging already present, "
3646 " Duplication not supported: sub_id: %d\n", svc->svc_id));
3647 ret = BCME_UNSUPPORTED;
3648 goto done;
3649 }
3650 }
3651 for (i = 0; i < MAX_SUBSCRIBES; i++) {
3652 if (ranging_inst->svc_idx[i]) {
3653 continue;
3654 } else {
3655 WL_DBG(("Adding SVC Ctx for ranging..svc_id %d\n", svc->svc_id));
3656 ranging_inst->svc_idx[i] = svc;
3657 ranging_inst->num_svc_ctx++;
3658 ret = BCME_OK;
3659 goto done;
3660 }
3661 }
3662 if (i == MAX_SUBSCRIBES) {
3663 WL_ERR(("wl_cfgnan_update_ranging_svc_inst: "
3664 "No resource to hold Ref SVC ctx..svc_id %d\n", svc->svc_id));
3665 ret = BCME_NORESOURCE;
3666 goto done;
3667 }
3668 done:
3669 return ret;
3670 }
3671
3672 #ifdef RTT_SUPPORT
3673 int
wl_cfgnan_trigger_geofencing_ranging(struct net_device * dev,struct ether_addr * peer_addr)3674 wl_cfgnan_trigger_geofencing_ranging(struct net_device *dev,
3675 struct ether_addr *peer_addr)
3676 {
3677 int ret = BCME_OK;
3678 int err_at = 0;
3679 struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
3680 int8 index = -1;
3681 dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
3682 rtt_geofence_target_info_t* geofence_target;
3683 nan_ranging_inst_t *ranging_inst;
3684 ranging_inst = wl_cfgnan_check_for_ranging(cfg, peer_addr);
3685
3686 if (!ranging_inst) {
3687 WL_INFORM_MEM(("Ranging Entry for peer:" MACDBG ", not found\n",
3688 MAC2STRDBG(peer_addr)));
3689 ASSERT(0);
3690 /* Ranging inst should have been added before adding target */
3691 dhd_rtt_remove_geofence_target(dhd, peer_addr);
3692 ret = BCME_ERROR;
3693 err_at = 1;
3694 goto exit;
3695 }
3696
3697 ASSERT(ranging_inst->range_status !=
3698 NAN_RANGING_IN_PROGRESS);
3699
3700 if (ranging_inst->range_status !=
3701 NAN_RANGING_IN_PROGRESS) {
3702 WL_DBG(("Trigger range request with first svc in svc list of range inst\n"));
3703 ret = wl_cfgnan_trigger_ranging(bcmcfg_to_prmry_ndev(cfg),
3704 cfg, ranging_inst, ranging_inst->svc_idx[0],
3705 NAN_RANGE_REQ_CMD, TRUE);
3706 if (ret != BCME_OK) {
3707 /* Unsupported is for already ranging session for peer */
3708 if (ret == BCME_BUSY) {
3709 /* TODO: Attempt again over a timer */
3710 err_at = 2;
3711 } else {
3712 /* Remove target and clean ranging inst */
3713 geofence_target = dhd_rtt_get_geofence_target(dhd,
3714 &ranging_inst->peer_addr, &index);
3715 if (geofence_target) {
3716 dhd_rtt_remove_geofence_target(dhd,
3717 &geofence_target->peer_addr);
3718 }
3719 bzero(ranging_inst, sizeof(nan_ranging_inst_t));
3720 err_at = 3;
3721 goto exit;
3722 }
3723 }
3724 } else {
3725 /* already in progress..This should not happen */
3726 ASSERT(0);
3727 ret = BCME_ERROR;
3728 err_at = 4;
3729 goto exit;
3730 }
3731
3732 exit:
3733 if (ret) {
3734 WL_ERR(("wl_cfgnan_trigger_geofencing_ranging: Failed to "
3735 "trigger ranging, peer: " MACDBG " ret"
3736 " = (%d), err_at = %d\n", MAC2STRDBG(peer_addr),
3737 ret, err_at));
3738 }
3739 return ret;
3740 }
3741 #endif /* RTT_SUPPORT */
3742
3743 static int
wl_cfgnan_check_disc_result_for_ranging(struct bcm_cfg80211 * cfg,nan_event_data_t * nan_event_data)3744 wl_cfgnan_check_disc_result_for_ranging(struct bcm_cfg80211 *cfg,
3745 nan_event_data_t* nan_event_data)
3746 {
3747 nan_svc_info_t *svc;
3748 int ret = BCME_OK;
3749 #ifdef RTT_SUPPORT
3750 rtt_geofence_target_info_t geofence_target;
3751 dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
3752 uint8 index;
3753 #endif /* RTT_SUPPORT */
3754 bool add_target;
3755
3756 svc = wl_cfgnan_get_svc_inst(cfg, nan_event_data->sub_id, 0);
3757
3758 if (svc && svc->ranging_required) {
3759 nan_ranging_inst_t *ranging_inst;
3760 ranging_inst = wl_cfgnan_get_ranging_inst(cfg,
3761 &nan_event_data->remote_nmi,
3762 NAN_RANGING_ROLE_INITIATOR);
3763 if (!ranging_inst) {
3764 ret = BCME_NORESOURCE;
3765 goto exit;
3766 }
3767 ASSERT(ranging_inst->range_role != NAN_RANGING_ROLE_INVALID);
3768
3769 /* For responder role, range state should be in progress only */
3770 ASSERT(ranging_inst->range_role == NAN_RANGING_ROLE_INITIATOR ||
3771 ranging_inst->range_status == NAN_RANGING_IN_PROGRESS);
3772
3773 /*
3774 * On rec disc result with ranging required, add target, if
3775 * ranging role is responder (range state has to be in prog always)
3776 * Or ranging role is initiator and ranging is not already in prog
3777 */
3778 add_target = ((ranging_inst->range_role == NAN_RANGING_ROLE_RESPONDER) ||
3779 ((ranging_inst->range_role == NAN_RANGING_ROLE_INITIATOR) &&
3780 (ranging_inst->range_status != NAN_RANGING_IN_PROGRESS)));
3781 if (add_target) {
3782 WL_DBG(("Add Range request to geofence target list\n"));
3783 #ifdef RTT_SUPPORT
3784 memcpy(&geofence_target.peer_addr, &nan_event_data->remote_nmi,
3785 ETHER_ADDR_LEN);
3786 /* check if target is already added */
3787 if (!dhd_rtt_get_geofence_target(dhd, &nan_event_data->remote_nmi, &index))
3788 {
3789 ret = dhd_rtt_add_geofence_target(dhd, &geofence_target);
3790 if (unlikely(ret)) {
3791 WL_ERR(("Failed to add geofence Tgt, ret = (%d)\n", ret));
3792 bzero(ranging_inst, sizeof(*ranging_inst));
3793 goto exit;
3794 } else {
3795 WL_INFORM_MEM(("Geofence Tgt Added:" MACDBG " sub_id:%d\n",
3796 MAC2STRDBG(&geofence_target.peer_addr),
3797 svc->svc_id));
3798 }
3799 ranging_inst->range_type = RTT_TYPE_NAN_GEOFENCE;
3800 }
3801 #endif /* RTT_SUPPORT */
3802 if (wl_cfgnan_update_ranging_svc_inst(ranging_inst, svc)
3803 != BCME_OK) {
3804 goto exit;
3805 }
3806 #ifdef RTT_SUPPORT
3807 if (ranging_inst->range_role == NAN_RANGING_ROLE_RESPONDER) {
3808 /* Adding RTT target while responder, leads to role concurrency */
3809 dhd_rtt_set_role_concurrency_state(dhd, TRUE);
3810 }
3811 else {
3812 /* Trigger/Reset geofence RTT */
3813 wl_cfgnan_reset_geofence_ranging(cfg, ranging_inst,
3814 RTT_SCHED_SUB_MATCH);
3815 }
3816 #endif /* RTT_SUPPORT */
3817 } else {
3818 /* Target already added, check & add svc_inst ref to rang_inst */
3819 wl_cfgnan_update_ranging_svc_inst(ranging_inst, svc);
3820 }
3821 /* Disc event will be given on receving range_rpt event */
3822 WL_TRACE(("Disc event will given when Range RPT event is recvd"));
3823 } else {
3824 ret = BCME_UNSUPPORTED;
3825 }
3826
3827 exit:
3828 return ret;
3829 }
3830
3831 bool
wl_cfgnan_ranging_allowed(struct bcm_cfg80211 * cfg)3832 wl_cfgnan_ranging_allowed(struct bcm_cfg80211 *cfg)
3833 {
3834 int i = 0;
3835 uint8 rng_progress_count = 0;
3836 nan_ranging_inst_t *ranging_inst = NULL;
3837
3838 for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
3839 ranging_inst = &cfg->nan_ranging_info[i];
3840 if (ranging_inst->range_status == NAN_RANGING_IN_PROGRESS) {
3841 rng_progress_count++;
3842 }
3843 }
3844
3845 ASSERT(rng_progress_count <= NAN_MAX_RANGING_SSN_ALLOWED);
3846 if (rng_progress_count == NAN_MAX_RANGING_SSN_ALLOWED) {
3847 return FALSE;
3848 }
3849 return TRUE;
3850 }
3851
3852 uint8
wl_cfgnan_cancel_rng_responders(struct net_device * ndev,struct bcm_cfg80211 * cfg)3853 wl_cfgnan_cancel_rng_responders(struct net_device *ndev,
3854 struct bcm_cfg80211 *cfg)
3855 {
3856 int i = 0;
3857 uint8 num_resp_cancelled = 0;
3858 int status, ret;
3859 nan_ranging_inst_t *ranging_inst = NULL;
3860
3861 for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
3862 ranging_inst = &cfg->nan_ranging_info[i];
3863 if (ranging_inst->range_status == NAN_RANGING_IN_PROGRESS &&
3864 ranging_inst->range_role == NAN_RANGING_ROLE_RESPONDER) {
3865 num_resp_cancelled++;
3866 WL_ERR((" Cancelling responder\n"));
3867 ret = wl_cfgnan_cancel_ranging(bcmcfg_to_prmry_ndev(cfg), cfg,
3868 ranging_inst->range_id, NAN_RNG_TERM_FLAG_IMMEDIATE, &status);
3869 if (unlikely(ret) || unlikely(status)) {
3870 WL_ERR(("wl_cfgnan_cancel_rng_responders: Failed to cancel"
3871 " existing ranging, ret = (%d)\n", ret));
3872 }
3873 WL_INFORM_MEM(("Removing Ranging Instance " MACDBG "\n",
3874 MAC2STRDBG(&(ranging_inst->peer_addr))));
3875 bzero(ranging_inst, sizeof(*ranging_inst));
3876 }
3877 }
3878 return num_resp_cancelled;
3879 }
3880
3881 #ifdef RTT_SUPPORT
3882 /* ranging reqeust event handler */
3883 static int
wl_cfgnan_handle_ranging_ind(struct bcm_cfg80211 * cfg,wl_nan_ev_rng_req_ind_t * rng_ind)3884 wl_cfgnan_handle_ranging_ind(struct bcm_cfg80211 *cfg,
3885 wl_nan_ev_rng_req_ind_t *rng_ind)
3886 {
3887 int ret = BCME_OK;
3888 nan_ranging_inst_t *ranging_inst = NULL;
3889 uint32 status;
3890 uint8 cancel_flags = 0;
3891 bool accept = TRUE;
3892 nan_ranging_inst_t tmp_rng_inst;
3893 struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
3894
3895 WL_DBG(("Trigger range response\n"));
3896
3897 /* check if we are already having any ranging session with peer.
3898 * If so below are the policies
3899 * If we are already a Geofence Initiator or responder w.r.t the peer
3900 * then silently teardown the current session and accept the REQ.
3901 * If we are in direct rtt initiator role then reject.
3902 */
3903 ranging_inst = wl_cfgnan_check_for_ranging(cfg, &(rng_ind->peer_m_addr));
3904 if (ranging_inst) {
3905 if (ranging_inst->range_type == RTT_TYPE_NAN_GEOFENCE ||
3906 ranging_inst->range_role == NAN_RANGING_ROLE_RESPONDER) {
3907 WL_INFORM_MEM(("Already responder/geofence for the Peer, cancel current"
3908 " ssn and accept new one, range_type = %d, role = %d\n",
3909 ranging_inst->range_type, ranging_inst->range_role));
3910 cancel_flags = NAN_RNG_TERM_FLAG_IMMEDIATE |
3911 NAN_RNG_TERM_FLAG_SILIENT_TEARDOWN;
3912
3913 if (ranging_inst->range_type == RTT_TYPE_NAN_GEOFENCE &&
3914 ranging_inst->range_role == NAN_RANGING_ROLE_INITIATOR) {
3915 wl_cfgnan_suspend_geofence_rng_session(ndev,
3916 &(rng_ind->peer_m_addr), RTT_GEO_SUSPN_PEER_RTT_TRIGGER,
3917 cancel_flags);
3918 } else {
3919 ret = wl_cfgnan_cancel_ranging(ndev, cfg,
3920 ranging_inst->range_id, cancel_flags, &status);
3921 if (unlikely(ret)) {
3922 WL_ERR(("wl_cfgnan_handle_ranging_ind: Failed to cancel"
3923 " existing ranging, ret = (%d)\n", ret));
3924 goto done;
3925 }
3926 }
3927 ranging_inst->range_status = NAN_RANGING_REQUIRED;
3928 ranging_inst->range_role = NAN_RANGING_ROLE_RESPONDER;
3929 ranging_inst->range_type = 0;
3930 } else {
3931 WL_ERR(("Reject the RNG_REQ_IND in direct rtt initiator role\n"));
3932 ret = BCME_BUSY;
3933 goto done;
3934 }
3935 } else {
3936 /* Check if new Ranging session is allowed */
3937 if (!wl_cfgnan_ranging_allowed(cfg)) {
3938 WL_ERR(("Cannot allow more ranging sessions \n"));
3939 ret = BCME_NORESOURCE;
3940 goto done;
3941 }
3942
3943 ranging_inst = wl_cfgnan_get_ranging_inst(cfg, &rng_ind->peer_m_addr,
3944 NAN_RANGING_ROLE_RESPONDER);
3945 if (!ranging_inst) {
3946 WL_ERR(("Failed to create ranging instance \n"));
3947 ASSERT(0);
3948 ret = BCME_NORESOURCE;
3949 goto done;
3950 }
3951 }
3952
3953 done:
3954 if (ret != BCME_OK) {
3955 /* reject the REQ using temp ranging instance */
3956 bzero(&tmp_rng_inst, sizeof(tmp_rng_inst));
3957 ranging_inst = &tmp_rng_inst;
3958 (void)memcpy_s(&tmp_rng_inst.peer_addr, ETHER_ADDR_LEN,
3959 &rng_ind->peer_m_addr, ETHER_ADDR_LEN);
3960 accept = FALSE;
3961 }
3962
3963 ranging_inst->range_id = rng_ind->rng_id;
3964
3965 WL_DBG(("Trigger Ranging at Responder\n"));
3966 ret = wl_cfgnan_trigger_ranging(ndev, cfg, ranging_inst,
3967 NULL, NAN_RANGE_REQ_EVNT, accept);
3968 if (unlikely(ret) || !accept) {
3969 WL_ERR(("Failed to handle range request, ret = (%d) accept %d\n",
3970 ret, accept));
3971 bzero(ranging_inst, sizeof(*ranging_inst));
3972 }
3973
3974 return ret;
3975 }
3976 #endif /* RTT_SUPPORT */
3977 /* ranging quest and response iovar handler */
3978 int
wl_cfgnan_trigger_ranging(struct net_device * ndev,struct bcm_cfg80211 * cfg,void * ranging_ctxt,nan_svc_info_t * svc,uint8 range_cmd,bool accept_req)3979 wl_cfgnan_trigger_ranging(struct net_device *ndev, struct bcm_cfg80211 *cfg,
3980 void *ranging_ctxt, nan_svc_info_t *svc,
3981 uint8 range_cmd, bool accept_req)
3982 {
3983 s32 ret = BCME_OK;
3984 bcm_iov_batch_buf_t *nan_buf = NULL;
3985 wl_nan_range_req_t *range_req = NULL;
3986 wl_nan_range_resp_t *range_resp = NULL;
3987 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
3988 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
3989 uint32 status;
3990 uint8 resp_buf[NAN_IOCTL_BUF_SIZE_MED];
3991 nan_ranging_inst_t *ranging_inst = (nan_ranging_inst_t *)ranging_ctxt;
3992 nan_avail_cmd_data cmd_data;
3993
3994 NAN_DBG_ENTER();
3995
3996 memset_s(&cmd_data, sizeof(cmd_data),
3997 0, sizeof(cmd_data));
3998 ret = memcpy_s(&cmd_data.peer_nmi, ETHER_ADDR_LEN,
3999 &ranging_inst->peer_addr, ETHER_ADDR_LEN);
4000 if (ret != BCME_OK) {
4001 WL_ERR(("Failed to copy ranging peer addr\n"));
4002 goto fail;
4003 }
4004
4005 cmd_data.avail_period = NAN_RANGING_PERIOD;
4006 ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
4007 cfg, &cmd_data, WL_AVAIL_LOCAL);
4008 if (ret != BCME_OK) {
4009 WL_ERR(("Failed to set avail value with type [WL_AVAIL_LOCAL]\n"));
4010 goto fail;
4011 }
4012
4013 ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
4014 cfg, &cmd_data, WL_AVAIL_RANGING);
4015 if (unlikely(ret)) {
4016 WL_ERR(("Failed to set avail value with type [WL_AVAIL_RANGING]\n"));
4017 goto fail;
4018 }
4019
4020 nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
4021 if (!nan_buf) {
4022 WL_ERR(("%s: memory allocation failed\n", __func__));
4023 ret = BCME_NOMEM;
4024 goto fail;
4025 }
4026
4027 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
4028 nan_buf->count = 0;
4029 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
4030
4031 sub_cmd = (bcm_iov_batch_subcmd_t*)(&nan_buf->cmds[0]);
4032 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
4033 if (range_cmd == NAN_RANGE_REQ_CMD) {
4034 sub_cmd->id = htod16(WL_NAN_CMD_RANGE_REQUEST);
4035 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(wl_nan_range_req_t);
4036 range_req = (wl_nan_range_req_t *)(sub_cmd->data);
4037 /* ranging config */
4038 range_req->peer = ranging_inst->peer_addr;
4039 if (svc) {
4040 range_req->interval = svc->ranging_interval;
4041 /* Limits are in cm from host */
4042 range_req->ingress = svc->ingress_limit;
4043 range_req->egress = svc->egress_limit;
4044 }
4045 range_req->indication = NAN_RANGING_INDICATE_CONTINUOUS_MASK;
4046 } else {
4047 /* range response config */
4048 sub_cmd->id = htod16(WL_NAN_CMD_RANGE_RESPONSE);
4049 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(wl_nan_range_resp_t);
4050 range_resp = (wl_nan_range_resp_t *)(sub_cmd->data);
4051 range_resp->range_id = ranging_inst->range_id;
4052 range_resp->indication = NAN_RANGING_INDICATE_CONTINUOUS_MASK;
4053 if (accept_req) {
4054 range_resp->status = NAN_RNG_REQ_ACCEPTED_BY_HOST;
4055 } else {
4056 range_resp->status = NAN_RNG_REQ_REJECTED_BY_HOST;
4057 }
4058 nan_buf->is_set = true;
4059 }
4060
4061 nan_buf_size -= (sub_cmd->len +
4062 OFFSETOF(bcm_iov_batch_subcmd_t, u.options));
4063 nan_buf->count++;
4064
4065 memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
4066 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size,
4067 &status,
4068 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
4069 if (unlikely(ret) || unlikely(status)) {
4070 WL_ERR(("nan ranging failed ret = %d status = %d\n",
4071 ret, status));
4072 ret = (ret == BCME_OK) ? status : ret;
4073 goto fail;
4074 }
4075 WL_TRACE(("nan ranging trigger successful\n"));
4076 if (range_cmd == NAN_RANGE_REQ_CMD) {
4077 WL_MEM(("Ranging Req Triggered"
4078 " peer: " MACDBG ", ind : %d, ingress : %d, egress : %d\n",
4079 MAC2STRDBG(&ranging_inst->peer_addr), range_req->indication,
4080 range_req->ingress, range_req->egress));
4081 } else {
4082 WL_MEM(("Ranging Resp Triggered"
4083 " peer: " MACDBG ", ind : %d, ingress : %d, egress : %d\n",
4084 MAC2STRDBG(&ranging_inst->peer_addr), range_resp->indication,
4085 range_resp->ingress, range_resp->egress));
4086 }
4087
4088 /* check the response buff for request */
4089 if (range_cmd == NAN_RANGE_REQ_CMD) {
4090 ret = process_resp_buf(resp_buf + WL_NAN_OBUF_DATA_OFFSET,
4091 &ranging_inst->range_id, WL_NAN_CMD_RANGE_REQUEST);
4092 WL_INFORM_MEM(("ranging instance returned %d\n", ranging_inst->range_id));
4093 }
4094 /* Preventing continuous range requests */
4095 ranging_inst->range_status = NAN_RANGING_IN_PROGRESS;
4096
4097 fail:
4098 if (nan_buf) {
4099 MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
4100 }
4101
4102 NAN_DBG_EXIT();
4103 return ret;
4104 }
4105 #endif /* WL_NAN_DISC_CACHE */
4106
wl_nan_bloom_alloc(void * ctx,uint size)4107 static void *wl_nan_bloom_alloc(void *ctx, uint size)
4108 {
4109 uint8 *buf;
4110 BCM_REFERENCE(ctx);
4111
4112 buf = kmalloc(size, GFP_KERNEL);
4113 if (!buf) {
4114 WL_ERR(("%s: memory allocation failed\n", __func__));
4115 buf = NULL;
4116 }
4117 return buf;
4118 }
4119
wl_nan_bloom_free(void * ctx,void * buf,uint size)4120 static void wl_nan_bloom_free(void *ctx, void *buf, uint size)
4121 {
4122 BCM_REFERENCE(ctx);
4123 BCM_REFERENCE(size);
4124 if (buf) {
4125 kfree(buf);
4126 }
4127 }
4128
wl_nan_hash(void * ctx,uint index,const uint8 * input,uint input_len)4129 static uint wl_nan_hash(void *ctx, uint index, const uint8 *input, uint input_len)
4130 {
4131 uint8* filter_idx = (uint8*)ctx;
4132 uint8 i = (*filter_idx * WL_NAN_HASHES_PER_BLOOM) + (uint8)index;
4133 uint b = 0;
4134
4135 /* Steps 1 and 2 as explained in Section 6.2 */
4136 /* Concatenate index to input and run CRC32 by calling hndcrc32 twice */
4137 GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
4138 b = hndcrc32(&i, sizeof(uint8), CRC32_INIT_VALUE);
4139 b = hndcrc32((uint8*)input, input_len, b);
4140 GCC_DIAGNOSTIC_POP();
4141 /* Obtain the last 2 bytes of the CRC32 output */
4142 b &= NAN_BLOOM_CRC32_MASK;
4143
4144 /* Step 3 is completed by bcmbloom functions */
4145 return b;
4146 }
4147
wl_nan_bloom_create(bcm_bloom_filter_t ** bp,uint * idx,uint size)4148 static int wl_nan_bloom_create(bcm_bloom_filter_t **bp, uint *idx, uint size)
4149 {
4150 uint i;
4151 int err;
4152
4153 err = bcm_bloom_create(wl_nan_bloom_alloc, wl_nan_bloom_free,
4154 idx, WL_NAN_HASHES_PER_BLOOM, size, bp);
4155 if (err != BCME_OK) {
4156 goto exit;
4157 }
4158
4159 /* Populate bloom filter with hash functions */
4160 for (i = 0; i < WL_NAN_HASHES_PER_BLOOM; i++) {
4161 err = bcm_bloom_add_hash(*bp, wl_nan_hash, &i);
4162 if (err) {
4163 WL_ERR(("bcm_bloom_add_hash failed\n"));
4164 goto exit;
4165 }
4166 }
4167 exit:
4168 return err;
4169 }
4170
4171 static int
wl_cfgnan_sd_params_handler(struct net_device * ndev,nan_discover_cmd_data_t * cmd_data,uint16 cmd_id,void * p_buf,uint16 * nan_buf_size)4172 wl_cfgnan_sd_params_handler(struct net_device *ndev,
4173 nan_discover_cmd_data_t *cmd_data, uint16 cmd_id,
4174 void *p_buf, uint16 *nan_buf_size)
4175 {
4176 s32 ret = BCME_OK;
4177 uint8 *pxtlv, *srf = NULL, *srf_mac = NULL, *srftmp = NULL;
4178 uint16 buflen_avail;
4179 bcm_iov_batch_subcmd_t *sub_cmd = (bcm_iov_batch_subcmd_t*)(p_buf);
4180 wl_nan_sd_params_t *sd_params = (wl_nan_sd_params_t *)sub_cmd->data;
4181 uint16 srf_size = 0;
4182 uint bloom_size, a;
4183 bcm_bloom_filter_t *bp = NULL;
4184 /* Bloom filter index default, indicates it has not been set */
4185 uint bloom_idx = 0xFFFFFFFF;
4186 uint16 bloom_len = NAN_BLOOM_LENGTH_DEFAULT;
4187 /* srf_ctrl_size = bloom_len + src_control field */
4188 uint16 srf_ctrl_size = bloom_len + 1;
4189
4190 dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
4191 struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
4192 BCM_REFERENCE(cfg);
4193
4194 NAN_DBG_ENTER();
4195
4196 if (cmd_data->period) {
4197 sd_params->awake_dw = cmd_data->period;
4198 }
4199 sd_params->period = 1;
4200
4201 if (cmd_data->ttl) {
4202 sd_params->ttl = cmd_data->ttl;
4203 } else {
4204 sd_params->ttl = WL_NAN_TTL_UNTIL_CANCEL;
4205 }
4206
4207 sd_params->flags = 0;
4208 sd_params->flags = cmd_data->flags;
4209
4210 /* Nan Service Based event suppression Flags */
4211 if (cmd_data->recv_ind_flag) {
4212 /* BIT0 - If set, host wont rec event "terminated" */
4213 if (CHECK_BIT(cmd_data->recv_ind_flag, WL_NAN_EVENT_SUPPRESS_TERMINATE_BIT)) {
4214 sd_params->flags |= WL_NAN_SVC_CTRL_SUPPRESS_EVT_TERMINATED;
4215 }
4216
4217 /* BIT1 - If set, host wont receive match expiry evt */
4218 /* TODO: Exp not yet supported */
4219 if (CHECK_BIT(cmd_data->recv_ind_flag, WL_NAN_EVENT_SUPPRESS_MATCH_EXP_BIT)) {
4220 WL_DBG(("Need to add match expiry event\n"));
4221 }
4222 /* BIT2 - If set, host wont rec event "receive" */
4223 if (CHECK_BIT(cmd_data->recv_ind_flag, WL_NAN_EVENT_SUPPRESS_RECEIVE_BIT)) {
4224 sd_params->flags |= WL_NAN_SVC_CTRL_SUPPRESS_EVT_RECEIVE;
4225 }
4226 /* BIT3 - If set, host wont rec event "replied" */
4227 if (CHECK_BIT(cmd_data->recv_ind_flag, WL_NAN_EVENT_SUPPRESS_REPLIED_BIT)) {
4228 sd_params->flags |= WL_NAN_SVC_CTRL_SUPPRESS_EVT_REPLIED;
4229 }
4230 }
4231 if (cmd_id == WL_NAN_CMD_SD_PUBLISH) {
4232 sd_params->instance_id = cmd_data->pub_id;
4233 if (cmd_data->service_responder_policy) {
4234 /* Do not disturb avail if dam is supported */
4235 if (FW_SUPPORTED(dhdp, autodam)) {
4236 /* Nan Accept policy: Per service basis policy
4237 * Based on this policy(ALL/NONE), responder side
4238 * will send ACCEPT/REJECT
4239 * If set, auto datapath responder will be sent by FW
4240 */
4241 sd_params->flags |= WL_NAN_SVC_CTRL_AUTO_DPRESP;
4242 } else {
4243 WL_ERR(("svc specifiv auto dp resp is not"
4244 " supported in non-auto dam fw\n"));
4245 }
4246 }
4247 } else if (cmd_id == WL_NAN_CMD_SD_SUBSCRIBE) {
4248 sd_params->instance_id = cmd_data->sub_id;
4249 } else {
4250 ret = BCME_USAGE_ERROR;
4251 WL_ERR(("wrong command id = %d \n", cmd_id));
4252 goto fail;
4253 }
4254
4255 if ((cmd_data->svc_hash.dlen == WL_NAN_SVC_HASH_LEN) &&
4256 (cmd_data->svc_hash.data)) {
4257 ret = memcpy_s((uint8*)sd_params->svc_hash,
4258 sizeof(sd_params->svc_hash),
4259 cmd_data->svc_hash.data,
4260 cmd_data->svc_hash.dlen);
4261 if (ret != BCME_OK) {
4262 WL_ERR(("Failed to copy svc hash\n"));
4263 goto fail;
4264 }
4265 #ifdef WL_NAN_DEBUG
4266 prhex("hashed svc name", cmd_data->svc_hash.data,
4267 cmd_data->svc_hash.dlen);
4268 #endif /* WL_NAN_DEBUG */
4269 } else {
4270 ret = BCME_ERROR;
4271 WL_ERR(("invalid svc hash data or length = %d\n",
4272 cmd_data->svc_hash.dlen));
4273 goto fail;
4274 }
4275
4276 /* check if ranging support is present in firmware */
4277 if ((cmd_data->sde_control_flag & NAN_SDE_CF_RANGING_REQUIRED) &&
4278 !FW_SUPPORTED(dhdp, nanrange)) {
4279 WL_ERR(("Service requires ranging but fw doesnt support it\n"));
4280 ret = BCME_UNSUPPORTED;
4281 goto fail;
4282 }
4283
4284 /* Optional parameters: fill the sub_command block with service descriptor attr */
4285 sub_cmd->id = htod16(cmd_id);
4286 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
4287 sub_cmd->len = sizeof(sub_cmd->u.options) +
4288 OFFSETOF(wl_nan_sd_params_t, optional[0]);
4289 pxtlv = (uint8*)&sd_params->optional[0];
4290
4291 *nan_buf_size -= sub_cmd->len;
4292 buflen_avail = *nan_buf_size;
4293
4294 if (cmd_data->svc_info.data && cmd_data->svc_info.dlen) {
4295 WL_TRACE(("optional svc_info present, pack it\n"));
4296 ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
4297 WL_NAN_XTLV_SD_SVC_INFO,
4298 cmd_data->svc_info.dlen,
4299 cmd_data->svc_info.data, BCM_XTLV_OPTION_ALIGN32);
4300 if (unlikely(ret)) {
4301 WL_ERR(("%s: fail to pack WL_NAN_XTLV_SD_SVC_INFO\n", __FUNCTION__));
4302 goto fail;
4303 }
4304 }
4305
4306 if (cmd_data->sde_svc_info.data && cmd_data->sde_svc_info.dlen) {
4307 WL_TRACE(("optional sdea svc_info present, pack it, %d\n",
4308 cmd_data->sde_svc_info.dlen));
4309 ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
4310 WL_NAN_XTLV_SD_SDE_SVC_INFO,
4311 cmd_data->sde_svc_info.dlen,
4312 cmd_data->sde_svc_info.data, BCM_XTLV_OPTION_ALIGN32);
4313 if (unlikely(ret)) {
4314 WL_ERR(("%s: fail to pack sdea svc info\n", __FUNCTION__));
4315 goto fail;
4316 }
4317 }
4318
4319 if (cmd_data->tx_match.dlen) {
4320 WL_TRACE(("optional tx match filter presnet (len=%d)\n",
4321 cmd_data->tx_match.dlen));
4322 ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
4323 WL_NAN_XTLV_CFG_MATCH_TX, cmd_data->tx_match.dlen,
4324 cmd_data->tx_match.data, BCM_XTLV_OPTION_ALIGN32);
4325 if (unlikely(ret)) {
4326 WL_ERR(("%s: failed on xtlv_pack for tx match filter\n", __FUNCTION__));
4327 goto fail;
4328 }
4329 }
4330
4331 if (cmd_data->life_count) {
4332 WL_TRACE(("optional life count is present, pack it\n"));
4333 ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size, WL_NAN_XTLV_CFG_SVC_LIFE_COUNT,
4334 sizeof(cmd_data->life_count), &cmd_data->life_count,
4335 BCM_XTLV_OPTION_ALIGN32);
4336 if (unlikely(ret)) {
4337 WL_ERR(("%s: failed to WL_NAN_XTLV_CFG_SVC_LIFE_COUNT\n", __FUNCTION__));
4338 goto fail;
4339 }
4340 }
4341
4342 if (cmd_data->use_srf) {
4343 uint8 srf_control = 0;
4344 /* set include bit */
4345 if (cmd_data->srf_include == true) {
4346 srf_control |= 0x2;
4347 }
4348
4349 if (!ETHER_ISNULLADDR(&cmd_data->mac_list.list) &&
4350 (cmd_data->mac_list.num_mac_addr
4351 < NAN_SRF_MAX_MAC)) {
4352 if (cmd_data->srf_type == SRF_TYPE_SEQ_MAC_ADDR) {
4353 /* mac list */
4354 srf_size = (cmd_data->mac_list.num_mac_addr
4355 * ETHER_ADDR_LEN) + NAN_SRF_CTRL_FIELD_LEN;
4356 WL_TRACE(("srf size = %d\n", srf_size));
4357
4358 srf_mac = MALLOCZ(cfg->osh, srf_size);
4359 if (srf_mac == NULL) {
4360 WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
4361 ret = -ENOMEM;
4362 goto fail;
4363 }
4364 ret = memcpy_s(srf_mac, NAN_SRF_CTRL_FIELD_LEN,
4365 &srf_control, NAN_SRF_CTRL_FIELD_LEN);
4366 if (ret != BCME_OK) {
4367 WL_ERR(("Failed to copy srf control\n"));
4368 goto fail;
4369 }
4370 ret = memcpy_s(srf_mac+1, (srf_size - NAN_SRF_CTRL_FIELD_LEN),
4371 cmd_data->mac_list.list,
4372 (srf_size - NAN_SRF_CTRL_FIELD_LEN));
4373 if (ret != BCME_OK) {
4374 WL_ERR(("Failed to copy srf control mac list\n"));
4375 goto fail;
4376 }
4377 ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
4378 WL_NAN_XTLV_CFG_SR_FILTER, srf_size, srf_mac,
4379 BCM_XTLV_OPTION_ALIGN32);
4380 if (unlikely(ret)) {
4381 WL_ERR(("%s: failed to WL_NAN_XTLV_CFG_SR_FILTER\n",
4382 __FUNCTION__));
4383 goto fail;
4384 }
4385 } else if (cmd_data->srf_type == SRF_TYPE_BLOOM_FILTER) {
4386 /* Create bloom filter */
4387 srf = MALLOCZ(cfg->osh, srf_ctrl_size);
4388 if (srf == NULL) {
4389 WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
4390 ret = -ENOMEM;
4391 goto fail;
4392 }
4393 /* Bloom filter */
4394 srf_control |= 0x1;
4395 /* Instance id must be from 1 to 255, 0 is Reserved */
4396 if (sd_params->instance_id == NAN_ID_RESERVED) {
4397 WL_ERR(("Invalid instance id: %d\n",
4398 sd_params->instance_id));
4399 ret = BCME_BADARG;
4400 goto fail;
4401 }
4402 if (bloom_idx == 0xFFFFFFFF) {
4403 bloom_idx = sd_params->instance_id % 4;
4404 } else {
4405 WL_ERR(("Invalid bloom_idx\n"));
4406 ret = BCME_BADARG;
4407 goto fail;
4408
4409 }
4410 srf_control |= bloom_idx << 2;
4411
4412 ret = wl_nan_bloom_create(&bp, &bloom_idx, bloom_len);
4413 if (unlikely(ret)) {
4414 WL_ERR(("%s: Bloom create failed\n", __FUNCTION__));
4415 goto fail;
4416 }
4417
4418 srftmp = cmd_data->mac_list.list;
4419 for (a = 0;
4420 a < cmd_data->mac_list.num_mac_addr; a++) {
4421 ret = bcm_bloom_add_member(bp, srftmp, ETHER_ADDR_LEN);
4422 if (unlikely(ret)) {
4423 WL_ERR(("%s: Cannot add to bloom filter\n",
4424 __FUNCTION__));
4425 goto fail;
4426 }
4427 srftmp += ETHER_ADDR_LEN;
4428 }
4429
4430 ret = memcpy_s(srf, NAN_SRF_CTRL_FIELD_LEN,
4431 &srf_control, NAN_SRF_CTRL_FIELD_LEN);
4432 if (ret != BCME_OK) {
4433 WL_ERR(("Failed to copy srf control\n"));
4434 goto fail;
4435 }
4436 ret = bcm_bloom_get_filter_data(bp, bloom_len,
4437 (srf + NAN_SRF_CTRL_FIELD_LEN),
4438 &bloom_size);
4439 if (unlikely(ret)) {
4440 WL_ERR(("%s: Cannot get filter data\n", __FUNCTION__));
4441 goto fail;
4442 }
4443 ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
4444 WL_NAN_XTLV_CFG_SR_FILTER, srf_ctrl_size,
4445 srf, BCM_XTLV_OPTION_ALIGN32);
4446 if (ret != BCME_OK) {
4447 WL_ERR(("Failed to pack SR FILTER data, ret = %d\n", ret));
4448 goto fail;
4449 }
4450 } else {
4451 WL_ERR(("Invalid SRF Type = %d !!!\n",
4452 cmd_data->srf_type));
4453 goto fail;
4454 }
4455 } else {
4456 WL_ERR(("Invalid MAC Addr/Too many mac addr = %d !!!\n",
4457 cmd_data->mac_list.num_mac_addr));
4458 goto fail;
4459 }
4460 }
4461
4462 if (cmd_data->rx_match.dlen) {
4463 WL_TRACE(("optional rx match filter is present, pack it\n"));
4464 ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
4465 WL_NAN_XTLV_CFG_MATCH_RX, cmd_data->rx_match.dlen,
4466 cmd_data->rx_match.data, BCM_XTLV_OPTION_ALIGN32);
4467 if (unlikely(ret)) {
4468 WL_ERR(("%s: failed on xtlv_pack for rx match filter\n", __func__));
4469 goto fail;
4470 }
4471 }
4472
4473 /* Security elements */
4474 if (cmd_data->csid) {
4475 WL_TRACE(("Cipher suite type is present, pack it\n"));
4476 ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
4477 WL_NAN_XTLV_CFG_SEC_CSID, sizeof(nan_sec_csid_e),
4478 (uint8*)&cmd_data->csid, BCM_XTLV_OPTION_ALIGN32);
4479 if (unlikely(ret)) {
4480 WL_ERR(("%s: fail to pack on csid\n", __FUNCTION__));
4481 goto fail;
4482 }
4483 }
4484
4485 if (cmd_data->ndp_cfg.security_cfg) {
4486 if ((cmd_data->key_type == NAN_SECURITY_KEY_INPUT_PMK) ||
4487 (cmd_data->key_type == NAN_SECURITY_KEY_INPUT_PASSPHRASE)) {
4488 if (cmd_data->key.data && cmd_data->key.dlen) {
4489 WL_TRACE(("optional pmk present, pack it\n"));
4490 ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
4491 WL_NAN_XTLV_CFG_SEC_PMK, cmd_data->key.dlen,
4492 cmd_data->key.data, BCM_XTLV_OPTION_ALIGN32);
4493 if (unlikely(ret)) {
4494 WL_ERR(("%s: fail to pack WL_NAN_XTLV_CFG_SEC_PMK\n",
4495 __FUNCTION__));
4496 goto fail;
4497 }
4498 }
4499 } else {
4500 WL_ERR(("Invalid security key type\n"));
4501 ret = BCME_BADARG;
4502 goto fail;
4503 }
4504 }
4505
4506 if (cmd_data->scid.data && cmd_data->scid.dlen) {
4507 WL_TRACE(("optional scid present, pack it\n"));
4508 ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size, WL_NAN_XTLV_CFG_SEC_SCID,
4509 cmd_data->scid.dlen, cmd_data->scid.data, BCM_XTLV_OPTION_ALIGN32);
4510 if (unlikely(ret)) {
4511 WL_ERR(("%s: fail to pack WL_NAN_XTLV_CFG_SEC_SCID\n", __FUNCTION__));
4512 goto fail;
4513 }
4514 }
4515
4516 if (cmd_data->sde_control_config) {
4517 ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
4518 WL_NAN_XTLV_SD_SDE_CONTROL,
4519 sizeof(uint16), (uint8*)&cmd_data->sde_control_flag,
4520 BCM_XTLV_OPTION_ALIGN32);
4521 if (ret != BCME_OK) {
4522 WL_ERR(("%s: fail to pack WL_NAN_XTLV_SD_SDE_CONTROL\n", __FUNCTION__));
4523 goto fail;
4524 }
4525 }
4526
4527 sub_cmd->len += (buflen_avail - *nan_buf_size);
4528
4529 fail:
4530 if (srf) {
4531 MFREE(cfg->osh, srf, srf_ctrl_size);
4532 }
4533
4534 if (srf_mac) {
4535 MFREE(cfg->osh, srf_mac, srf_size);
4536 }
4537 NAN_DBG_EXIT();
4538 return ret;
4539 }
4540
4541 static int
wl_cfgnan_aligned_data_size_of_opt_disc_params(uint16 * data_size,nan_discover_cmd_data_t * cmd_data)4542 wl_cfgnan_aligned_data_size_of_opt_disc_params(uint16 *data_size, nan_discover_cmd_data_t *cmd_data)
4543 {
4544 s32 ret = BCME_OK;
4545 if (cmd_data->svc_info.dlen)
4546 *data_size += ALIGN_SIZE(cmd_data->svc_info.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
4547 if (cmd_data->sde_svc_info.dlen)
4548 *data_size += ALIGN_SIZE(cmd_data->sde_svc_info.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
4549 if (cmd_data->tx_match.dlen)
4550 *data_size += ALIGN_SIZE(cmd_data->tx_match.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
4551 if (cmd_data->rx_match.dlen)
4552 *data_size += ALIGN_SIZE(cmd_data->rx_match.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
4553 if (cmd_data->use_srf) {
4554 if (cmd_data->srf_type == SRF_TYPE_SEQ_MAC_ADDR) {
4555 *data_size += (cmd_data->mac_list.num_mac_addr * ETHER_ADDR_LEN)
4556 + NAN_SRF_CTRL_FIELD_LEN;
4557 } else { /* Bloom filter type */
4558 *data_size += NAN_BLOOM_LENGTH_DEFAULT + 1;
4559 }
4560 *data_size += ALIGN_SIZE(*data_size + NAN_XTLV_ID_LEN_SIZE, 4);
4561 }
4562 if (cmd_data->csid)
4563 *data_size += ALIGN_SIZE(sizeof(nan_sec_csid_e) + NAN_XTLV_ID_LEN_SIZE, 4);
4564 if (cmd_data->key.dlen)
4565 *data_size += ALIGN_SIZE(cmd_data->key.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
4566 if (cmd_data->scid.dlen)
4567 *data_size += ALIGN_SIZE(cmd_data->scid.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
4568 if (cmd_data->sde_control_config)
4569 *data_size += ALIGN_SIZE(sizeof(uint16) + NAN_XTLV_ID_LEN_SIZE, 4);
4570 if (cmd_data->life_count)
4571 *data_size += ALIGN_SIZE(sizeof(cmd_data->life_count) + NAN_XTLV_ID_LEN_SIZE, 4);
4572 return ret;
4573 }
4574
4575 static int
wl_cfgnan_aligned_data_size_of_opt_dp_params(uint16 * data_size,nan_datapath_cmd_data_t * cmd_data)4576 wl_cfgnan_aligned_data_size_of_opt_dp_params(uint16 *data_size, nan_datapath_cmd_data_t *cmd_data)
4577 {
4578 s32 ret = BCME_OK;
4579 if (cmd_data->svc_info.dlen)
4580 *data_size += ALIGN_SIZE(cmd_data->svc_info.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
4581 if (cmd_data->key.dlen)
4582 *data_size += ALIGN_SIZE(cmd_data->key.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
4583 if (cmd_data->csid)
4584 *data_size += ALIGN_SIZE(sizeof(nan_sec_csid_e) + NAN_XTLV_ID_LEN_SIZE, 4);
4585
4586 *data_size += ALIGN_SIZE(WL_NAN_SVC_HASH_LEN + NAN_XTLV_ID_LEN_SIZE, 4);
4587 return ret;
4588 }
4589 int
wl_cfgnan_svc_get_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,uint16 cmd_id,nan_discover_cmd_data_t * cmd_data)4590 wl_cfgnan_svc_get_handler(struct net_device *ndev,
4591 struct bcm_cfg80211 *cfg, uint16 cmd_id, nan_discover_cmd_data_t *cmd_data)
4592 {
4593 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
4594 uint32 instance_id;
4595 s32 ret = BCME_OK;
4596 bcm_iov_batch_buf_t *nan_buf = NULL;
4597
4598 uint8 *resp_buf = NULL;
4599 uint16 data_size = WL_NAN_OBUF_DATA_OFFSET + sizeof(instance_id);
4600
4601 NAN_DBG_ENTER();
4602
4603 nan_buf = MALLOCZ(cfg->osh, data_size);
4604 if (!nan_buf) {
4605 WL_ERR(("%s: memory allocation failed\n", __func__));
4606 ret = BCME_NOMEM;
4607 goto fail;
4608 }
4609
4610 resp_buf = MALLOCZ(cfg->osh, NAN_IOCTL_BUF_SIZE_LARGE);
4611 if (!resp_buf) {
4612 WL_ERR(("%s: memory allocation failed\n", __func__));
4613 ret = BCME_NOMEM;
4614 goto fail;
4615 }
4616 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
4617 nan_buf->count = 1;
4618 /* check if service is present */
4619 nan_buf->is_set = false;
4620 sub_cmd = (bcm_iov_batch_subcmd_t*)(&nan_buf->cmds[0]);
4621 if (cmd_id == WL_NAN_CMD_SD_PUBLISH) {
4622 instance_id = cmd_data->pub_id;
4623 } else if (cmd_id == WL_NAN_CMD_SD_SUBSCRIBE) {
4624 instance_id = cmd_data->sub_id;
4625 } else {
4626 ret = BCME_USAGE_ERROR;
4627 WL_ERR(("wrong command id = %u\n", cmd_id));
4628 goto fail;
4629 }
4630 /* Fill the sub_command block */
4631 sub_cmd->id = htod16(cmd_id);
4632 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(instance_id);
4633 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
4634
4635 ret = memcpy_s(sub_cmd->data, (data_size - WL_NAN_OBUF_DATA_OFFSET),
4636 &instance_id, sizeof(instance_id));
4637 if (ret != BCME_OK) {
4638 WL_ERR(("Failed to copy instance id, ret = %d\n", ret));
4639 goto fail;
4640 }
4641
4642 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, data_size,
4643 &(cmd_data->status), resp_buf, NAN_IOCTL_BUF_SIZE_LARGE);
4644
4645 if (unlikely(ret) || unlikely(cmd_data->status)) {
4646 WL_ERR(("nan svc check failed ret = %d status = %d\n", ret, cmd_data->status));
4647 goto fail;
4648 } else {
4649 WL_DBG(("nan svc check successful..proceed to update\n"));
4650 }
4651
4652 fail:
4653 if (nan_buf) {
4654 MFREE(cfg->osh, nan_buf, data_size);
4655 }
4656
4657 if (resp_buf) {
4658 MFREE(cfg->osh, resp_buf, NAN_IOCTL_BUF_SIZE_LARGE);
4659 }
4660 NAN_DBG_EXIT();
4661 return ret;
4662
4663 }
4664
4665 int
wl_cfgnan_svc_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,uint16 cmd_id,nan_discover_cmd_data_t * cmd_data)4666 wl_cfgnan_svc_handler(struct net_device *ndev,
4667 struct bcm_cfg80211 *cfg, uint16 cmd_id, nan_discover_cmd_data_t *cmd_data)
4668 {
4669 s32 ret = BCME_OK;
4670 bcm_iov_batch_buf_t *nan_buf = NULL;
4671 uint16 nan_buf_size;
4672 uint8 *resp_buf = NULL;
4673 /* Considering fixed params */
4674 uint16 data_size = WL_NAN_OBUF_DATA_OFFSET +
4675 OFFSETOF(wl_nan_sd_params_t, optional[0]);
4676
4677 if (cmd_data->svc_update) {
4678 ret = wl_cfgnan_svc_get_handler(ndev, cfg, cmd_id, cmd_data);
4679 if (ret != BCME_OK) {
4680 WL_ERR(("Failed to update svc handler, ret = %d\n", ret));
4681 goto fail;
4682 } else {
4683 /* Ignoring any other svc get error */
4684 if (cmd_data->status == WL_NAN_E_BAD_INSTANCE) {
4685 WL_ERR(("Bad instance status, failed to update svc handler\n"));
4686 goto fail;
4687 }
4688 }
4689 }
4690
4691 ret = wl_cfgnan_aligned_data_size_of_opt_disc_params(&data_size, cmd_data);
4692 if (unlikely(ret)) {
4693 WL_ERR(("Failed to get alligned size of optional params\n"));
4694 goto fail;
4695 }
4696 nan_buf_size = data_size;
4697 NAN_DBG_ENTER();
4698
4699 nan_buf = MALLOCZ(cfg->osh, data_size);
4700 if (!nan_buf) {
4701 WL_ERR(("%s: memory allocation failed\n", __func__));
4702 ret = BCME_NOMEM;
4703 goto fail;
4704 }
4705
4706 resp_buf = MALLOCZ(cfg->osh, data_size + NAN_IOVAR_NAME_SIZE);
4707 if (!resp_buf) {
4708 WL_ERR(("%s: memory allocation failed\n", __func__));
4709 ret = BCME_NOMEM;
4710 goto fail;
4711 }
4712 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
4713 nan_buf->count = 0;
4714 nan_buf->is_set = true;
4715
4716 ret = wl_cfgnan_sd_params_handler(ndev, cmd_data, cmd_id,
4717 &nan_buf->cmds[0], &nan_buf_size);
4718 if (unlikely(ret)) {
4719 WL_ERR((" Service discovery params handler failed, ret = %d\n", ret));
4720 goto fail;
4721 }
4722
4723 nan_buf->count++;
4724 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, data_size,
4725 &(cmd_data->status), resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
4726 if (cmd_data->svc_update && (cmd_data->status == BCME_DATA_NOTFOUND)) {
4727 /* return OK if update tlv data is not present
4728 * which means nothing to update
4729 */
4730 cmd_data->status = BCME_OK;
4731 }
4732 if (unlikely(ret) || unlikely(cmd_data->status)) {
4733 WL_ERR(("nan svc failed ret = %d status = %d\n", ret, cmd_data->status));
4734 goto fail;
4735 } else {
4736 WL_DBG(("nan svc successful\n"));
4737 #ifdef WL_NAN_DISC_CACHE
4738 ret = wl_cfgnan_cache_svc_info(cfg, cmd_data, cmd_id, cmd_data->svc_update);
4739 if (ret < 0) {
4740 WL_ERR(("%s: fail to cache svc info, ret=%d\n",
4741 __FUNCTION__, ret));
4742 goto fail;
4743 }
4744 #endif /* WL_NAN_DISC_CACHE */
4745 }
4746
4747 fail:
4748 if (nan_buf) {
4749 MFREE(cfg->osh, nan_buf, data_size);
4750 }
4751
4752 if (resp_buf) {
4753 MFREE(cfg->osh, resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
4754 }
4755 NAN_DBG_EXIT();
4756 return ret;
4757 }
4758
4759 int
wl_cfgnan_publish_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_discover_cmd_data_t * cmd_data)4760 wl_cfgnan_publish_handler(struct net_device *ndev,
4761 struct bcm_cfg80211 *cfg, nan_discover_cmd_data_t *cmd_data)
4762 {
4763 int ret = BCME_OK;
4764
4765 NAN_DBG_ENTER();
4766 NAN_MUTEX_LOCK();
4767 /*
4768 * proceed only if mandatory arguments are present - subscriber id,
4769 * service hash
4770 */
4771 if ((!cmd_data->pub_id) || (!cmd_data->svc_hash.data) ||
4772 (!cmd_data->svc_hash.dlen)) {
4773 WL_ERR(("mandatory arguments are not present\n"));
4774 ret = BCME_BADARG;
4775 goto fail;
4776 }
4777
4778 ret = wl_cfgnan_svc_handler(ndev, cfg, WL_NAN_CMD_SD_PUBLISH, cmd_data);
4779 if (ret < 0) {
4780 WL_ERR(("%s: fail to handle pub, ret=%d\n", __FUNCTION__, ret));
4781 goto fail;
4782 }
4783 WL_INFORM_MEM(("[NAN] Service published for instance id:%d\n", cmd_data->pub_id));
4784
4785 fail:
4786 NAN_MUTEX_UNLOCK();
4787 NAN_DBG_EXIT();
4788 return ret;
4789 }
4790
4791 int
wl_cfgnan_subscribe_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_discover_cmd_data_t * cmd_data)4792 wl_cfgnan_subscribe_handler(struct net_device *ndev,
4793 struct bcm_cfg80211 *cfg, nan_discover_cmd_data_t *cmd_data)
4794 {
4795 int ret = BCME_OK;
4796 #ifdef WL_NAN_DISC_CACHE
4797 nan_svc_info_t *svc_info;
4798 uint8 upd_ranging_required;
4799 #endif /* WL_NAN_DISC_CACHE */
4800 #ifdef RTT_GEOFENCE_CONT
4801 #ifdef RTT_SUPPORT
4802 dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
4803 rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
4804 #endif /* RTT_SUPPORT */
4805 #endif /* RTT_GEOFENCE_CONT */
4806
4807 NAN_DBG_ENTER();
4808 NAN_MUTEX_LOCK();
4809
4810 /*
4811 * proceed only if mandatory arguments are present - subscriber id,
4812 * service hash
4813 */
4814 if ((!cmd_data->sub_id) || (!cmd_data->svc_hash.data) ||
4815 (!cmd_data->svc_hash.dlen)) {
4816 WL_ERR(("mandatory arguments are not present\n"));
4817 ret = BCME_BADARG;
4818 goto fail;
4819 }
4820
4821 /* Check for ranging sessions if any */
4822 if (cmd_data->svc_update) {
4823 #ifdef WL_NAN_DISC_CACHE
4824 svc_info = wl_cfgnan_get_svc_inst(cfg, cmd_data->sub_id, 0);
4825 if (svc_info) {
4826 wl_cfgnan_clear_svc_from_all_ranging_inst(cfg, cmd_data->sub_id);
4827 /* terminate ranging sessions for this svc, avoid clearing svc cache */
4828 wl_cfgnan_terminate_all_obsolete_ranging_sessions(cfg);
4829 WL_DBG(("Ranging sessions handled for svc update\n"));
4830 upd_ranging_required = !!(cmd_data->sde_control_flag &
4831 NAN_SDE_CF_RANGING_REQUIRED);
4832 if ((svc_info->ranging_required ^ upd_ranging_required) ||
4833 (svc_info->ingress_limit != cmd_data->ingress_limit) ||
4834 (svc_info->egress_limit != cmd_data->egress_limit)) {
4835 /* Clear cache info in Firmware */
4836 ret = wl_cfgnan_clear_disc_cache(cfg, cmd_data->sub_id);
4837 if (ret != BCME_OK) {
4838 WL_ERR(("couldn't send clear cache to FW \n"));
4839 goto fail;
4840 }
4841 /* Invalidate local cache info */
4842 wl_cfgnan_remove_disc_result(cfg, cmd_data->sub_id);
4843 }
4844 }
4845 #endif /* WL_NAN_DISC_CACHE */
4846 }
4847
4848 #ifdef RTT_GEOFENCE_CONT
4849 #ifdef RTT_SUPPORT
4850 /* Override ranging Indication */
4851 if (rtt_status->geofence_cfg.geofence_cont) {
4852 if (cmd_data->ranging_indication !=
4853 NAN_RANGE_INDICATION_NONE) {
4854 cmd_data->ranging_indication = NAN_RANGE_INDICATION_CONT;
4855 }
4856 }
4857 #endif /* RTT_SUPPORT */
4858 #endif /* RTT_GEOFENCE_CONT */
4859 ret = wl_cfgnan_svc_handler(ndev, cfg, WL_NAN_CMD_SD_SUBSCRIBE, cmd_data);
4860 if (ret < 0) {
4861 WL_ERR(("%s: fail to handle svc, ret=%d\n", __FUNCTION__, ret));
4862 goto fail;
4863 }
4864 WL_INFORM_MEM(("[NAN] Service subscribed for instance id:%d\n", cmd_data->sub_id));
4865
4866 fail:
4867 NAN_MUTEX_UNLOCK();
4868 NAN_DBG_EXIT();
4869 return ret;
4870 }
4871
4872 static int
wl_cfgnan_cancel_handler(nan_discover_cmd_data_t * cmd_data,uint16 cmd_id,void * p_buf,uint16 * nan_buf_size)4873 wl_cfgnan_cancel_handler(nan_discover_cmd_data_t *cmd_data,
4874 uint16 cmd_id, void *p_buf, uint16 *nan_buf_size)
4875 {
4876 s32 ret = BCME_OK;
4877
4878 NAN_DBG_ENTER();
4879
4880 if (p_buf != NULL) {
4881 bcm_iov_batch_subcmd_t *sub_cmd = (bcm_iov_batch_subcmd_t*)(p_buf);
4882 wl_nan_instance_id_t instance_id;
4883
4884 if (cmd_id == WL_NAN_CMD_SD_CANCEL_PUBLISH) {
4885 instance_id = cmd_data->pub_id;
4886 } else if (cmd_id == WL_NAN_CMD_SD_CANCEL_SUBSCRIBE) {
4887 instance_id = cmd_data->sub_id;
4888 } else {
4889 ret = BCME_USAGE_ERROR;
4890 WL_ERR(("wrong command id = %u\n", cmd_id));
4891 goto fail;
4892 }
4893
4894 /* Fill the sub_command block */
4895 sub_cmd->id = htod16(cmd_id);
4896 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(instance_id);
4897 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
4898 ret = memcpy_s(sub_cmd->data, *nan_buf_size,
4899 &instance_id, sizeof(instance_id));
4900 if (ret != BCME_OK) {
4901 WL_ERR(("Failed to copy instance id, ret = %d\n", ret));
4902 goto fail;
4903 }
4904 /* adjust iov data len to the end of last data record */
4905 *nan_buf_size -= (sub_cmd->len +
4906 OFFSETOF(bcm_iov_batch_subcmd_t, u.options));
4907 WL_INFORM_MEM(("[NAN] Service with instance id:%d cancelled\n", instance_id));
4908 } else {
4909 WL_ERR(("nan_iov_buf is NULL\n"));
4910 ret = BCME_ERROR;
4911 goto fail;
4912 }
4913
4914 fail:
4915 NAN_DBG_EXIT();
4916 return ret;
4917 }
4918
4919 int
wl_cfgnan_cancel_pub_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_discover_cmd_data_t * cmd_data)4920 wl_cfgnan_cancel_pub_handler(struct net_device *ndev,
4921 struct bcm_cfg80211 *cfg, nan_discover_cmd_data_t *cmd_data)
4922 {
4923 bcm_iov_batch_buf_t *nan_buf = NULL;
4924 s32 ret = BCME_OK;
4925 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
4926 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
4927
4928 NAN_DBG_ENTER();
4929 NAN_MUTEX_LOCK();
4930
4931 nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
4932 if (!nan_buf) {
4933 WL_ERR(("%s: memory allocation failed\n", __func__));
4934 ret = BCME_NOMEM;
4935 goto fail;
4936 }
4937
4938 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
4939 nan_buf->count = 0;
4940 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
4941
4942 /* proceed only if mandatory argument is present - publisher id */
4943 if (!cmd_data->pub_id) {
4944 WL_ERR(("mandatory argument is not present\n"));
4945 ret = BCME_BADARG;
4946 goto fail;
4947 }
4948
4949 #ifdef WL_NAN_DISC_CACHE
4950 wl_cfgnan_clear_svc_cache(cfg, cmd_data->pub_id);
4951 #endif /* WL_NAN_DISC_CACHE */
4952 ret = wl_cfgnan_cancel_handler(cmd_data, WL_NAN_CMD_SD_CANCEL_PUBLISH,
4953 &nan_buf->cmds[0], &nan_buf_size);
4954 if (unlikely(ret)) {
4955 WL_ERR(("cancel publish failed\n"));
4956 goto fail;
4957 }
4958 nan_buf->is_set = true;
4959 nan_buf->count++;
4960
4961 memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
4962 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size,
4963 &(cmd_data->status),
4964 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
4965 if (unlikely(ret) || unlikely(cmd_data->status)) {
4966 WL_ERR(("nan cancel publish failed ret = %d status = %d\n",
4967 ret, cmd_data->status));
4968 goto fail;
4969 }
4970 WL_DBG(("nan cancel publish successfull\n"));
4971 wl_cfgnan_remove_inst_id(cfg, cmd_data->pub_id);
4972 fail:
4973 if (nan_buf) {
4974 MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
4975 }
4976
4977 NAN_MUTEX_UNLOCK();
4978 NAN_DBG_EXIT();
4979 return ret;
4980 }
4981
4982 int
wl_cfgnan_cancel_sub_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_discover_cmd_data_t * cmd_data)4983 wl_cfgnan_cancel_sub_handler(struct net_device *ndev,
4984 struct bcm_cfg80211 *cfg, nan_discover_cmd_data_t *cmd_data)
4985 {
4986 bcm_iov_batch_buf_t *nan_buf = NULL;
4987 s32 ret = BCME_OK;
4988 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
4989 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
4990
4991 NAN_DBG_ENTER();
4992 NAN_MUTEX_LOCK();
4993
4994 nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
4995 if (!nan_buf) {
4996 WL_ERR(("%s: memory allocation failed\n", __func__));
4997 ret = BCME_NOMEM;
4998 goto fail;
4999 }
5000
5001 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
5002 nan_buf->count = 0;
5003 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
5004
5005 /* proceed only if mandatory argument is present - subscriber id */
5006 if (!cmd_data->sub_id) {
5007 WL_ERR(("mandatory argument is not present\n"));
5008 ret = BCME_BADARG;
5009 goto fail;
5010 }
5011
5012 #ifdef WL_NAN_DISC_CACHE
5013 /* terminate ranging sessions for this svc */
5014 wl_cfgnan_clear_svc_from_all_ranging_inst(cfg, cmd_data->sub_id);
5015 wl_cfgnan_terminate_all_obsolete_ranging_sessions(cfg);
5016 /* clear svc cache for the service */
5017 wl_cfgnan_clear_svc_cache(cfg, cmd_data->sub_id);
5018 wl_cfgnan_remove_disc_result(cfg, cmd_data->sub_id);
5019 #endif /* WL_NAN_DISC_CACHE */
5020
5021 ret = wl_cfgnan_cancel_handler(cmd_data, WL_NAN_CMD_SD_CANCEL_SUBSCRIBE,
5022 &nan_buf->cmds[0], &nan_buf_size);
5023 if (unlikely(ret)) {
5024 WL_ERR(("cancel subscribe failed\n"));
5025 goto fail;
5026 }
5027 nan_buf->is_set = true;
5028 nan_buf->count++;
5029
5030 memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
5031 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size,
5032 &(cmd_data->status),
5033 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
5034 if (unlikely(ret) || unlikely(cmd_data->status)) {
5035 WL_ERR(("nan cancel subscribe failed ret = %d status = %d\n",
5036 ret, cmd_data->status));
5037 goto fail;
5038 }
5039 WL_DBG(("subscribe cancel successfull\n"));
5040 wl_cfgnan_remove_inst_id(cfg, cmd_data->sub_id);
5041 fail:
5042 if (nan_buf) {
5043 MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
5044 }
5045
5046 NAN_MUTEX_UNLOCK();
5047 NAN_DBG_EXIT();
5048 return ret;
5049 }
5050
5051 int
wl_cfgnan_transmit_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_discover_cmd_data_t * cmd_data)5052 wl_cfgnan_transmit_handler(struct net_device *ndev,
5053 struct bcm_cfg80211 *cfg, nan_discover_cmd_data_t *cmd_data)
5054 {
5055 s32 ret = BCME_OK;
5056 bcm_iov_batch_buf_t *nan_buf = NULL;
5057 wl_nan_sd_transmit_t *sd_xmit = NULL;
5058 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
5059 bool is_lcl_id = FALSE;
5060 bool is_dest_id = FALSE;
5061 bool is_dest_mac = FALSE;
5062 uint16 buflen_avail;
5063 uint8 *pxtlv;
5064 uint16 nan_buf_size;
5065 uint8 *resp_buf = NULL;
5066 /* Considering fixed params */
5067 uint16 data_size = WL_NAN_OBUF_DATA_OFFSET +
5068 OFFSETOF(wl_nan_sd_transmit_t, opt_tlv);
5069 data_size = ALIGN_SIZE(data_size, 4);
5070 ret = wl_cfgnan_aligned_data_size_of_opt_disc_params(&data_size, cmd_data);
5071 if (unlikely(ret)) {
5072 WL_ERR(("Failed to get alligned size of optional params\n"));
5073 goto fail;
5074 }
5075 NAN_DBG_ENTER();
5076 NAN_MUTEX_LOCK();
5077 nan_buf_size = data_size;
5078 nan_buf = MALLOCZ(cfg->osh, data_size);
5079 if (!nan_buf) {
5080 WL_ERR(("%s: memory allocation failed\n", __func__));
5081 ret = BCME_NOMEM;
5082 goto fail;
5083 }
5084
5085 resp_buf = MALLOCZ(cfg->osh, data_size + NAN_IOVAR_NAME_SIZE);
5086 if (!resp_buf) {
5087 WL_ERR(("%s: memory allocation failed\n", __func__));
5088 ret = BCME_NOMEM;
5089 goto fail;
5090 }
5091
5092 /* nan transmit */
5093 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
5094 nan_buf->count = 0;
5095 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
5096 /*
5097 * proceed only if mandatory arguments are present - subscriber id,
5098 * publisher id, mac address
5099 */
5100 if ((!cmd_data->local_id) || (!cmd_data->remote_id) ||
5101 ETHER_ISNULLADDR(&cmd_data->mac_addr.octet)) {
5102 WL_ERR(("mandatory arguments are not present\n"));
5103 ret = -EINVAL;
5104 goto fail;
5105 }
5106
5107 sub_cmd = (bcm_iov_batch_subcmd_t*)(&nan_buf->cmds[0]);
5108 sd_xmit = (wl_nan_sd_transmit_t *)(sub_cmd->data);
5109
5110 /* local instance id must be from 1 to 255, 0 is reserved */
5111 if (cmd_data->local_id == NAN_ID_RESERVED) {
5112 WL_ERR(("Invalid local instance id: %d\n", cmd_data->local_id));
5113 ret = BCME_BADARG;
5114 goto fail;
5115 }
5116 sd_xmit->local_service_id = cmd_data->local_id;
5117 is_lcl_id = TRUE;
5118
5119 /* remote instance id must be from 1 to 255, 0 is reserved */
5120 if (cmd_data->remote_id == NAN_ID_RESERVED) {
5121 WL_ERR(("Invalid remote instance id: %d\n", cmd_data->remote_id));
5122 ret = BCME_BADARG;
5123 goto fail;
5124 }
5125
5126 sd_xmit->requestor_service_id = cmd_data->remote_id;
5127 is_dest_id = TRUE;
5128
5129 if (!ETHER_ISNULLADDR(&cmd_data->mac_addr.octet)) {
5130 ret = memcpy_s(&sd_xmit->destination_addr, ETHER_ADDR_LEN,
5131 &cmd_data->mac_addr, ETHER_ADDR_LEN);
5132 if (ret != BCME_OK) {
5133 WL_ERR(("Failed to copy dest mac address\n"));
5134 goto fail;
5135 }
5136 } else {
5137 WL_ERR(("Invalid ether addr provided\n"));
5138 ret = BCME_BADARG;
5139 goto fail;
5140 }
5141 is_dest_mac = TRUE;
5142
5143 if (cmd_data->priority) {
5144 sd_xmit->priority = cmd_data->priority;
5145 }
5146 sd_xmit->token = cmd_data->token;
5147
5148 if (cmd_data->recv_ind_flag) {
5149 /* BIT0 - If set, host wont rec event "txs" */
5150 if (CHECK_BIT(cmd_data->recv_ind_flag,
5151 WL_NAN_EVENT_SUPPRESS_FOLLOWUP_RECEIVE_BIT)) {
5152 sd_xmit->flags = WL_NAN_FUP_SUPR_EVT_TXS;
5153 }
5154 }
5155 /* Optional parameters: fill the sub_command block with service descriptor attr */
5156 sub_cmd->id = htod16(WL_NAN_CMD_SD_TRANSMIT);
5157 sub_cmd->len = sizeof(sub_cmd->u.options) +
5158 OFFSETOF(wl_nan_sd_transmit_t, opt_tlv);
5159 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
5160 pxtlv = (uint8 *)&sd_xmit->opt_tlv;
5161
5162 nan_buf_size -= (sub_cmd->len +
5163 OFFSETOF(bcm_iov_batch_subcmd_t, u.options));
5164
5165 buflen_avail = nan_buf_size;
5166
5167 if (cmd_data->svc_info.data && cmd_data->svc_info.dlen) {
5168 bcm_xtlv_t *pxtlv_svc_info = (bcm_xtlv_t *)pxtlv;
5169 ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
5170 WL_NAN_XTLV_SD_SVC_INFO, cmd_data->svc_info.dlen,
5171 cmd_data->svc_info.data, BCM_XTLV_OPTION_ALIGN32);
5172 if (unlikely(ret)) {
5173 WL_ERR(("%s: fail to pack on bcm_pack_xtlv_entry, ret=%d\n",
5174 __FUNCTION__, ret));
5175 goto fail;
5176 }
5177
5178 /* 0xFF is max length for svc_info */
5179 if (pxtlv_svc_info->len > 0xFF) {
5180 WL_ERR(("Invalid service info length %d\n",
5181 (pxtlv_svc_info->len)));
5182 ret = BCME_USAGE_ERROR;
5183 goto fail;
5184 }
5185 sd_xmit->opt_len = (uint8)(pxtlv_svc_info->len);
5186 }
5187 if (cmd_data->sde_svc_info.data && cmd_data->sde_svc_info.dlen) {
5188 WL_TRACE(("optional sdea svc_info present, pack it\n"));
5189 ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
5190 WL_NAN_XTLV_SD_SDE_SVC_INFO, cmd_data->sde_svc_info.dlen,
5191 cmd_data->sde_svc_info.data, BCM_XTLV_OPTION_ALIGN32);
5192 if (unlikely(ret)) {
5193 WL_ERR(("%s: fail to pack sdea svc info\n", __FUNCTION__));
5194 goto fail;
5195 }
5196 }
5197
5198 /* Check if all mandatory params are provided */
5199 if (is_lcl_id && is_dest_id && is_dest_mac) {
5200 nan_buf->count++;
5201 sub_cmd->len += (buflen_avail - nan_buf_size);
5202 } else {
5203 WL_ERR(("Missing parameters\n"));
5204 ret = BCME_USAGE_ERROR;
5205 }
5206 nan_buf->is_set = TRUE;
5207 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, data_size,
5208 &(cmd_data->status), resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
5209 if (unlikely(ret) || unlikely(cmd_data->status)) {
5210 WL_ERR(("nan transmit failed for token %d ret = %d status = %d\n",
5211 sd_xmit->token, ret, cmd_data->status));
5212 goto fail;
5213 }
5214 WL_INFORM_MEM(("nan transmit successful for token %d\n", sd_xmit->token));
5215 fail:
5216 if (nan_buf) {
5217 MFREE(cfg->osh, nan_buf, data_size);
5218 }
5219 if (resp_buf) {
5220 MFREE(cfg->osh, resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
5221 }
5222 NAN_MUTEX_UNLOCK();
5223 NAN_DBG_EXIT();
5224 return ret;
5225 }
5226
5227 static int
wl_cfgnan_get_capability(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_hal_capabilities_t * capabilities)5228 wl_cfgnan_get_capability(struct net_device *ndev,
5229 struct bcm_cfg80211 *cfg, nan_hal_capabilities_t *capabilities)
5230 {
5231 bcm_iov_batch_buf_t *nan_buf = NULL;
5232 s32 ret = BCME_OK;
5233 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
5234 wl_nan_fw_cap_t *fw_cap = NULL;
5235 uint16 subcmd_len;
5236 uint32 status;
5237 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
5238 bcm_iov_batch_subcmd_t *sub_cmd_resp = NULL;
5239 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
5240 const bcm_xtlv_t *xtlv;
5241 uint16 type = 0;
5242 int len = 0;
5243
5244 NAN_DBG_ENTER();
5245 nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
5246 if (!nan_buf) {
5247 WL_ERR(("%s: memory allocation failed\n", __func__));
5248 ret = BCME_NOMEM;
5249 goto fail;
5250 }
5251
5252 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
5253 nan_buf->count = 0;
5254 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
5255 sub_cmd = (bcm_iov_batch_subcmd_t*)(uint8 *)(&nan_buf->cmds[0]);
5256
5257 ret = wl_cfg_nan_check_cmd_len(nan_buf_size,
5258 sizeof(*fw_cap), &subcmd_len);
5259 if (unlikely(ret)) {
5260 WL_ERR(("nan_sub_cmd check failed\n"));
5261 goto fail;
5262 }
5263
5264 fw_cap = (wl_nan_fw_cap_t *)sub_cmd->data;
5265 sub_cmd->id = htod16(WL_NAN_CMD_GEN_FW_CAP);
5266 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*fw_cap);
5267 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
5268 nan_buf_size -= subcmd_len;
5269 nan_buf->count = 1;
5270
5271 nan_buf->is_set = false;
5272 memset(resp_buf, 0, sizeof(resp_buf));
5273 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
5274 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
5275 if (unlikely(ret) || unlikely(status)) {
5276 WL_ERR(("get nan fw cap failed ret %d status %d \n",
5277 ret, status));
5278 goto fail;
5279 }
5280
5281 sub_cmd_resp = &((bcm_iov_batch_buf_t *)(resp_buf))->cmds[0];
5282
5283 /* check the response buff */
5284 xtlv = ((const bcm_xtlv_t *)&sub_cmd_resp->data[0]);
5285 if (!xtlv) {
5286 ret = BCME_NOTFOUND;
5287 WL_ERR(("xtlv not found: err = %d\n", ret));
5288 goto fail;
5289 }
5290 bcm_xtlv_unpack_xtlv(xtlv, &type, (uint16*)&len, NULL, BCM_XTLV_OPTION_ALIGN32);
5291 do
5292 {
5293 switch (type) {
5294 case WL_NAN_XTLV_GEN_FW_CAP:
5295 if (len > sizeof(wl_nan_fw_cap_t)) {
5296 ret = BCME_BADARG;
5297 goto fail;
5298 }
5299 GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
5300 fw_cap = (wl_nan_fw_cap_t*)xtlv->data;
5301 GCC_DIAGNOSTIC_POP();
5302 break;
5303 default:
5304 WL_ERR(("Unknown xtlv: id %u\n", type));
5305 ret = BCME_ERROR;
5306 break;
5307 }
5308 if (ret != BCME_OK) {
5309 goto fail;
5310 }
5311 } while ((xtlv = bcm_next_xtlv(xtlv, &len, BCM_XTLV_OPTION_ALIGN32)));
5312
5313 memset(capabilities, 0, sizeof(nan_hal_capabilities_t));
5314 capabilities->max_publishes = fw_cap->max_svc_publishes;
5315 capabilities->max_subscribes = fw_cap->max_svc_subscribes;
5316 capabilities->max_ndi_interfaces = fw_cap->max_lcl_ndi_interfaces;
5317 capabilities->max_ndp_sessions = fw_cap->max_ndp_sessions;
5318 capabilities->max_concurrent_nan_clusters = fw_cap->max_concurrent_nan_clusters;
5319 capabilities->max_service_name_len = fw_cap->max_service_name_len;
5320 capabilities->max_match_filter_len = fw_cap->max_match_filter_len;
5321 capabilities->max_total_match_filter_len = fw_cap->max_total_match_filter_len;
5322 capabilities->max_service_specific_info_len = fw_cap->max_service_specific_info_len;
5323 capabilities->max_app_info_len = fw_cap->max_app_info_len;
5324 capabilities->max_sdea_service_specific_info_len = fw_cap->max_sdea_svc_specific_info_len;
5325 capabilities->max_queued_transmit_followup_msgs = fw_cap->max_queued_tx_followup_msgs;
5326 capabilities->max_subscribe_address = fw_cap->max_subscribe_address;
5327 capabilities->is_ndp_security_supported = fw_cap->is_ndp_security_supported;
5328 capabilities->ndp_supported_bands = fw_cap->ndp_supported_bands;
5329 capabilities->cipher_suites_supported = fw_cap->cipher_suites_supported_mask;
5330 fail:
5331 if (nan_buf) {
5332 MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
5333 }
5334 NAN_DBG_EXIT();
5335 return ret;
5336 }
5337
5338 int
wl_cfgnan_get_capablities_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_hal_capabilities_t * capabilities)5339 wl_cfgnan_get_capablities_handler(struct net_device *ndev,
5340 struct bcm_cfg80211 *cfg, nan_hal_capabilities_t *capabilities)
5341 {
5342 s32 ret = BCME_OK;
5343 dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
5344
5345 NAN_DBG_ENTER();
5346
5347 /* Do not query fw about nan if feature is not supported */
5348 if (!FW_SUPPORTED(dhdp, nan)) {
5349 WL_DBG(("NAN is not supported\n"));
5350 return ret;
5351 }
5352
5353 if (cfg->nan_init_state) {
5354 ret = wl_cfgnan_get_capability(ndev, cfg, capabilities);
5355 if (ret != BCME_OK) {
5356 WL_ERR(("NAN init state: %d, failed to get capability from FW[%d]\n",
5357 cfg->nan_init_state, ret));
5358 goto exit;
5359 }
5360 } else {
5361 /* Initialize NAN before sending iovar */
5362 WL_ERR(("Initializing NAN\n"));
5363 ret = wl_cfgnan_init(cfg);
5364 if (ret != BCME_OK) {
5365 WL_ERR(("failed to initialize NAN[%d]\n", ret));
5366 goto fail;
5367 }
5368
5369 ret = wl_cfgnan_get_capability(ndev, cfg, capabilities);
5370 if (ret != BCME_OK) {
5371 WL_ERR(("NAN init state: %d, failed to get capability from FW[%d]\n",
5372 cfg->nan_init_state, ret));
5373 goto exit;
5374 }
5375 WL_ERR(("De-Initializing NAN\n"));
5376 ret = wl_cfgnan_deinit(cfg, dhdp->up);
5377 if (ret != BCME_OK) {
5378 WL_ERR(("failed to de-initialize NAN[%d]\n", ret));
5379 goto fail;
5380 }
5381 }
5382 fail:
5383 NAN_DBG_EXIT();
5384 return ret;
5385 exit:
5386 /* Keeping backward campatibility */
5387 capabilities->max_concurrent_nan_clusters = MAX_CONCURRENT_NAN_CLUSTERS;
5388 capabilities->max_publishes = MAX_PUBLISHES;
5389 capabilities->max_subscribes = MAX_SUBSCRIBES;
5390 capabilities->max_service_name_len = MAX_SVC_NAME_LEN;
5391 capabilities->max_match_filter_len = MAX_MATCH_FILTER_LEN;
5392 capabilities->max_total_match_filter_len = MAX_TOTAL_MATCH_FILTER_LEN;
5393 capabilities->max_service_specific_info_len = NAN_MAX_SERVICE_SPECIFIC_INFO_LEN;
5394 capabilities->max_ndi_interfaces = MAX_NDI_INTERFACES;
5395 capabilities->max_ndp_sessions = MAX_NDP_SESSIONS;
5396 capabilities->max_app_info_len = MAX_APP_INFO_LEN;
5397 capabilities->max_queued_transmit_followup_msgs = MAX_QUEUED_TX_FOLLOUP_MSGS;
5398 capabilities->max_sdea_service_specific_info_len = MAX_SDEA_SVC_INFO_LEN;
5399 capabilities->max_subscribe_address = MAX_SUBSCRIBE_ADDRESS;
5400 capabilities->cipher_suites_supported = WL_NAN_CIPHER_SUITE_SHARED_KEY_128_MASK;
5401 capabilities->max_scid_len = MAX_SCID_LEN;
5402 capabilities->is_ndp_security_supported = true;
5403 capabilities->ndp_supported_bands = NDP_SUPPORTED_BANDS;
5404 ret = BCME_OK;
5405 NAN_DBG_EXIT();
5406 return ret;
5407 }
5408
wl_cfgnan_check_state(struct bcm_cfg80211 * cfg)5409 bool wl_cfgnan_check_state(struct bcm_cfg80211 *cfg)
5410 {
5411 return cfg->nan_enable;
5412 }
5413
5414 int
wl_cfgnan_init(struct bcm_cfg80211 * cfg)5415 wl_cfgnan_init(struct bcm_cfg80211 *cfg)
5416 {
5417 s32 ret = BCME_OK;
5418 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
5419 uint32 status;
5420 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
5421 uint8 buf[NAN_IOCTL_BUF_SIZE];
5422 bcm_iov_batch_buf_t *nan_buf = (bcm_iov_batch_buf_t*)buf;
5423
5424 NAN_DBG_ENTER();
5425 if (cfg->nan_init_state) {
5426 WL_ERR(("nan initialized/nmi exists\n"));
5427 return BCME_OK;
5428 }
5429 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
5430 nan_buf->count = 0;
5431 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
5432 ret = wl_cfgnan_init_handler(&nan_buf->cmds[0], &nan_buf_size, true);
5433 if (unlikely(ret)) {
5434 WL_ERR(("init handler sub_cmd set failed\n"));
5435 goto fail;
5436 }
5437 nan_buf->count++;
5438 nan_buf->is_set = true;
5439
5440 memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
5441 ret = wl_cfgnan_execute_ioctl(bcmcfg_to_prmry_ndev(cfg), cfg,
5442 nan_buf, nan_buf_size, &status,
5443 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
5444 if (unlikely(ret) || unlikely(status)) {
5445 WL_ERR(("nan init handler failed ret %d status %d\n",
5446 ret, status));
5447 goto fail;
5448 }
5449
5450 #ifdef WL_NAN_DISC_CACHE
5451 /* malloc for disc result */
5452 cfg->nan_disc_cache = MALLOCZ(cfg->osh,
5453 NAN_MAX_CACHE_DISC_RESULT * sizeof(nan_disc_result_cache));
5454 if (!cfg->nan_disc_cache) {
5455 WL_ERR(("%s: memory allocation failed\n", __func__));
5456 ret = BCME_NOMEM;
5457 goto fail;
5458 }
5459 #endif /* WL_NAN_DISC_CACHE */
5460 cfg->nan_init_state = true;
5461 return ret;
5462 fail:
5463 NAN_DBG_EXIT();
5464 return ret;
5465 }
5466
5467 void
wl_cfgnan_deinit_cleanup(struct bcm_cfg80211 * cfg)5468 wl_cfgnan_deinit_cleanup(struct bcm_cfg80211 *cfg)
5469 {
5470 uint8 i = 0;
5471 cfg->nan_dp_count = 0;
5472 cfg->nan_init_state = false;
5473 #ifdef WL_NAN_DISC_CACHE
5474 if (cfg->nan_disc_cache) {
5475 for (i = 0; i < NAN_MAX_CACHE_DISC_RESULT; i++) {
5476 if (cfg->nan_disc_cache[i].tx_match_filter.data) {
5477 MFREE(cfg->osh, cfg->nan_disc_cache[i].tx_match_filter.data,
5478 cfg->nan_disc_cache[i].tx_match_filter.dlen);
5479 }
5480 if (cfg->nan_disc_cache[i].svc_info.data) {
5481 MFREE(cfg->osh, cfg->nan_disc_cache[i].svc_info.data,
5482 cfg->nan_disc_cache[i].svc_info.dlen);
5483 }
5484 }
5485 MFREE(cfg->osh, cfg->nan_disc_cache,
5486 NAN_MAX_CACHE_DISC_RESULT * sizeof(nan_disc_result_cache));
5487 cfg->nan_disc_cache = NULL;
5488 }
5489 cfg->nan_disc_count = 0;
5490 memset_s(cfg->svc_info, NAN_MAX_SVC_INST * sizeof(nan_svc_info_t),
5491 0, NAN_MAX_SVC_INST * sizeof(nan_svc_info_t));
5492 memset_s(cfg->nan_ranging_info, NAN_MAX_RANGING_INST * sizeof(nan_ranging_inst_t),
5493 0, NAN_MAX_RANGING_INST * sizeof(nan_ranging_inst_t));
5494 #endif /* WL_NAN_DISC_CACHE */
5495 return;
5496 }
5497
5498 int
wl_cfgnan_deinit(struct bcm_cfg80211 * cfg,uint8 busstate)5499 wl_cfgnan_deinit(struct bcm_cfg80211 *cfg, uint8 busstate)
5500 {
5501 s32 ret = BCME_OK;
5502 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
5503 uint32 status;
5504 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
5505 uint8 buf[NAN_IOCTL_BUF_SIZE];
5506 bcm_iov_batch_buf_t *nan_buf = (bcm_iov_batch_buf_t*)buf;
5507
5508 NAN_DBG_ENTER();
5509 NAN_MUTEX_LOCK();
5510
5511 if (!cfg->nan_init_state) {
5512 WL_ERR(("nan is not initialized/nmi doesnt exists\n"));
5513 ret = BCME_OK;
5514 goto fail;
5515 }
5516
5517 if (busstate != DHD_BUS_DOWN) {
5518 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
5519 nan_buf->count = 0;
5520 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
5521
5522 WL_DBG(("nan deinit\n"));
5523 ret = wl_cfgnan_init_handler(&nan_buf->cmds[0], &nan_buf_size, false);
5524 if (unlikely(ret)) {
5525 WL_ERR(("deinit handler sub_cmd set failed\n"));
5526 } else {
5527 nan_buf->count++;
5528 nan_buf->is_set = true;
5529 memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
5530 ret = wl_cfgnan_execute_ioctl(cfg->wdev->netdev, cfg,
5531 nan_buf, nan_buf_size, &status,
5532 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
5533 if (unlikely(ret) || unlikely(status)) {
5534 WL_ERR(("nan init handler failed ret %d status %d\n",
5535 ret, status));
5536 }
5537 }
5538 }
5539 wl_cfgnan_deinit_cleanup(cfg);
5540
5541 fail:
5542 if (!cfg->nancfg.mac_rand && !ETHER_ISNULLADDR(cfg->nan_nmi_mac)) {
5543 wl_release_vif_macaddr(cfg, cfg->nan_nmi_mac, WL_IF_TYPE_NAN_NMI);
5544 }
5545 NAN_MUTEX_UNLOCK();
5546 NAN_DBG_EXIT();
5547 return ret;
5548 }
5549
5550 static int
wl_cfgnan_get_ndi_macaddr(struct bcm_cfg80211 * cfg,u8 * mac_addr)5551 wl_cfgnan_get_ndi_macaddr(struct bcm_cfg80211 *cfg, u8* mac_addr)
5552 {
5553 int i = 0;
5554 int ret = BCME_OK;
5555 bool rand_mac = cfg->nancfg.mac_rand;
5556 BCM_REFERENCE(i);
5557
5558 if (rand_mac) {
5559 /* ensure nmi != ndi */
5560 do {
5561 RANDOM_BYTES(mac_addr, ETHER_ADDR_LEN);
5562 /* restore mcast and local admin bits to 0 and 1 */
5563 ETHER_SET_UNICAST(mac_addr);
5564 ETHER_SET_LOCALADDR(mac_addr);
5565 i++;
5566 if (i == NAN_RAND_MAC_RETRIES) {
5567 break;
5568 }
5569 } while (eacmp(cfg->nan_nmi_mac, mac_addr) == 0);
5570
5571 if (i == NAN_RAND_MAC_RETRIES) {
5572 if (eacmp(cfg->nan_nmi_mac, mac_addr) == 0) {
5573 WL_ERR(("\nCouldn't generate rand NDI which != NMI\n"));
5574 ret = BCME_NORESOURCE;
5575 goto fail;
5576 }
5577 }
5578 } else {
5579 if (wl_get_vif_macaddr(cfg, WL_IF_TYPE_NAN,
5580 mac_addr) != BCME_OK) {
5581 ret = -EINVAL;
5582 WL_ERR(("Failed to get mac addr for NDI\n"));
5583 goto fail;
5584 }
5585 }
5586
5587 fail:
5588 return ret;
5589 }
5590
5591 int
wl_cfgnan_data_path_iface_create_delete_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,char * ifname,uint16 type,uint8 busstate)5592 wl_cfgnan_data_path_iface_create_delete_handler(struct net_device *ndev,
5593 struct bcm_cfg80211 *cfg, char *ifname, uint16 type, uint8 busstate)
5594 {
5595 u8 mac_addr[ETH_ALEN];
5596 s32 ret = BCME_OK;
5597 s32 idx;
5598 struct wireless_dev *wdev;
5599 NAN_DBG_ENTER();
5600
5601 if (busstate != DHD_BUS_DOWN) {
5602 if (type == NAN_WIFI_SUBCMD_DATA_PATH_IFACE_CREATE) {
5603 if ((idx = wl_cfgnan_get_ndi_idx(cfg)) < 0) {
5604 WL_ERR(("No free idx for NAN NDI\n"));
5605 ret = BCME_NORESOURCE;
5606 goto fail;
5607 }
5608
5609 ret = wl_cfgnan_get_ndi_macaddr(cfg, mac_addr);
5610 if (ret != BCME_OK) {
5611 WL_ERR(("Couldn't get mac addr for NDI ret %d\n", ret));
5612 goto fail;
5613 }
5614 wdev = wl_cfg80211_add_if(cfg, ndev, WL_IF_TYPE_NAN,
5615 ifname, mac_addr);
5616 if (!wdev) {
5617 ret = -ENODEV;
5618 WL_ERR(("Failed to create NDI iface = %s, wdev is NULL\n", ifname));
5619 goto fail;
5620 }
5621 /* Store the iface name to pub data so that it can be used
5622 * during NAN enable
5623 */
5624 wl_cfgnan_add_ndi_data(cfg, idx, ifname);
5625 cfg->nancfg.ndi[idx].created = true;
5626 /* Store nan ndev */
5627 cfg->nancfg.ndi[idx].nan_ndev = wdev_to_ndev(wdev);
5628
5629 } else if (type == NAN_WIFI_SUBCMD_DATA_PATH_IFACE_DELETE) {
5630 ret = wl_cfg80211_del_if(cfg, ndev, NULL, ifname);
5631 if (ret == BCME_OK) {
5632 if (wl_cfgnan_del_ndi_data(cfg, ifname) < 0) {
5633 WL_ERR(("Failed to find matching data for ndi:%s\n",
5634 ifname));
5635 }
5636 } else if (ret == -ENODEV) {
5637 WL_INFORM(("Already deleted: %s\n", ifname));
5638 ret = BCME_OK;
5639 } else if (ret != BCME_OK) {
5640 WL_ERR(("failed to delete NDI[%d]\n", ret));
5641 }
5642 }
5643 } else {
5644 ret = -ENODEV;
5645 WL_ERR(("Bus is already down, no dev found to remove, ret = %d\n", ret));
5646 }
5647 fail:
5648 NAN_DBG_EXIT();
5649 return ret;
5650 }
5651
5652 /*
5653 * Return data peer from peer list
5654 * for peer_addr
5655 * NULL if not found
5656 */
5657 nan_ndp_peer_t *
wl_cfgnan_data_get_peer(struct bcm_cfg80211 * cfg,struct ether_addr * peer_addr)5658 wl_cfgnan_data_get_peer(struct bcm_cfg80211 *cfg,
5659 struct ether_addr *peer_addr)
5660 {
5661 uint8 i;
5662 nan_ndp_peer_t* peer = cfg->nancfg.nan_ndp_peer_info;
5663
5664 if (!peer) {
5665 WL_ERR(("wl_cfgnan_data_get_peer: nan_ndp_peer_info is NULL\n"));
5666 goto exit;
5667 }
5668 for (i = 0; i < cfg->nancfg.max_ndp_count; i++) {
5669 if (peer[i].peer_dp_state != NAN_PEER_DP_NOT_CONNECTED &&
5670 (!memcmp(peer_addr, &peer[i].peer_addr, ETHER_ADDR_LEN))) {
5671 return &peer[i];
5672 }
5673 }
5674
5675 exit:
5676 return NULL;
5677 }
5678
5679 /*
5680 * Returns True if
5681 * datapath exists for nan cfg
5682 * for any peer
5683 */
5684 bool
wl_cfgnan_data_dp_exists(struct bcm_cfg80211 * cfg)5685 wl_cfgnan_data_dp_exists(struct bcm_cfg80211 *cfg)
5686 {
5687 bool ret = FALSE;
5688 uint8 i;
5689 nan_ndp_peer_t* peer = NULL;
5690
5691 if ((cfg->nan_init_state == FALSE) ||
5692 (cfg->nan_enable == FALSE)) {
5693 goto exit;
5694 }
5695
5696 if (!cfg->nancfg.nan_ndp_peer_info) {
5697 goto exit;
5698 }
5699
5700 peer = cfg->nancfg.nan_ndp_peer_info;
5701 for (i = 0; i < cfg->nancfg.max_ndp_count; i++) {
5702 if (peer[i].peer_dp_state != NAN_PEER_DP_NOT_CONNECTED) {
5703 ret = TRUE;
5704 break;
5705 }
5706 }
5707
5708 exit:
5709 return ret;
5710 }
5711
5712 /*
5713 * Returns True if
5714 * datapath exists for nan cfg
5715 * for given peer
5716 */
5717 bool
wl_cfgnan_data_dp_exists_with_peer(struct bcm_cfg80211 * cfg,struct ether_addr * peer_addr)5718 wl_cfgnan_data_dp_exists_with_peer(struct bcm_cfg80211 *cfg,
5719 struct ether_addr *peer_addr)
5720 {
5721 bool ret = FALSE;
5722 nan_ndp_peer_t* peer = NULL;
5723
5724 if ((cfg->nan_init_state == FALSE) ||
5725 (cfg->nan_enable == FALSE)) {
5726 goto exit;
5727 }
5728
5729 /* check for peer exist */
5730 peer = wl_cfgnan_data_get_peer(cfg, peer_addr);
5731 if (peer) {
5732 ret = TRUE;
5733 }
5734
5735 exit:
5736 return ret;
5737 }
5738
5739 /*
5740 * As of now API only available
5741 * for setting state to CONNECTED
5742 * if applicable
5743 */
5744 void
wl_cfgnan_data_set_peer_dp_state(struct bcm_cfg80211 * cfg,struct ether_addr * peer_addr,nan_peer_dp_state_t state)5745 wl_cfgnan_data_set_peer_dp_state(struct bcm_cfg80211 *cfg,
5746 struct ether_addr *peer_addr, nan_peer_dp_state_t state)
5747 {
5748 nan_ndp_peer_t* peer = NULL;
5749 /* check for peer exist */
5750 peer = wl_cfgnan_data_get_peer(cfg, peer_addr);
5751 if (!peer) {
5752 goto end;
5753 }
5754 peer->peer_dp_state = state;
5755 end:
5756 return;
5757 }
5758
5759 /* Adds peer to nan data peer list */
5760 void
wl_cfgnan_data_add_peer(struct bcm_cfg80211 * cfg,struct ether_addr * peer_addr)5761 wl_cfgnan_data_add_peer(struct bcm_cfg80211 *cfg,
5762 struct ether_addr *peer_addr)
5763 {
5764 uint8 i;
5765 nan_ndp_peer_t* peer = NULL;
5766 /* check for peer exist */
5767 peer = wl_cfgnan_data_get_peer(cfg, peer_addr);
5768 if (peer) {
5769 peer->dp_count++;
5770 goto end;
5771 }
5772 peer = cfg->nancfg.nan_ndp_peer_info;
5773 for (i = 0; i < cfg->nancfg.max_ndp_count; i++) {
5774 if (peer[i].peer_dp_state == NAN_PEER_DP_NOT_CONNECTED) {
5775 break;
5776 }
5777 }
5778 if (i == NAN_MAX_NDP_PEER) {
5779 WL_DBG(("DP Peer list full, Droopping add peer req\n"));
5780 goto end;
5781 }
5782 /* Add peer to list */
5783 memcpy(&peer[i].peer_addr, peer_addr, ETHER_ADDR_LEN);
5784 peer[i].dp_count = 1;
5785 peer[i].peer_dp_state = NAN_PEER_DP_CONNECTING;
5786
5787 end:
5788 return;
5789 }
5790
5791 /* Removes nan data peer from peer list */
5792 void
wl_cfgnan_data_remove_peer(struct bcm_cfg80211 * cfg,struct ether_addr * peer_addr)5793 wl_cfgnan_data_remove_peer(struct bcm_cfg80211 *cfg,
5794 struct ether_addr *peer_addr)
5795 {
5796 nan_ndp_peer_t* peer = NULL;
5797 /* check for peer exist */
5798 peer = wl_cfgnan_data_get_peer(cfg, peer_addr);
5799 if (!peer) {
5800 WL_DBG(("DP Peer not present in list, "
5801 "Droopping remove peer req\n"));
5802 goto end;
5803 }
5804 peer->dp_count--;
5805 if (peer->dp_count == 0) {
5806 /* No more NDPs, delete entry */
5807 memset(peer, 0, sizeof(nan_ndp_peer_t));
5808 } else {
5809 /* Set peer dp state to connected if any ndp still exits */
5810 peer->peer_dp_state = NAN_PEER_DP_CONNECTED;
5811 }
5812 end:
5813 return;
5814 }
5815
5816 int
wl_cfgnan_data_path_request_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_datapath_cmd_data_t * cmd_data,uint8 * ndp_instance_id)5817 wl_cfgnan_data_path_request_handler(struct net_device *ndev,
5818 struct bcm_cfg80211 *cfg, nan_datapath_cmd_data_t *cmd_data,
5819 uint8 *ndp_instance_id)
5820 {
5821 s32 ret = BCME_OK;
5822 bcm_iov_batch_buf_t *nan_buf = NULL;
5823 wl_nan_dp_req_t *datareq = NULL;
5824 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
5825 uint16 buflen_avail;
5826 uint8 *pxtlv;
5827 struct wireless_dev *wdev;
5828 uint16 nan_buf_size;
5829 uint8 *resp_buf = NULL;
5830 /* Considering fixed params */
5831 uint16 data_size = WL_NAN_OBUF_DATA_OFFSET +
5832 OFFSETOF(wl_nan_dp_req_t, tlv_params);
5833 data_size = ALIGN_SIZE(data_size, 4);
5834
5835 ret = wl_cfgnan_aligned_data_size_of_opt_dp_params(&data_size, cmd_data);
5836 if (unlikely(ret)) {
5837 WL_ERR(("Failed to get alligned size of optional params\n"));
5838 goto fail;
5839 }
5840
5841 nan_buf_size = data_size;
5842 NAN_DBG_ENTER();
5843
5844 mutex_lock(&cfg->if_sync);
5845 NAN_MUTEX_LOCK();
5846 #ifdef WL_IFACE_MGMT
5847 if ((ret = wl_cfg80211_handle_if_role_conflict(cfg, WL_IF_TYPE_NAN)) < 0) {
5848 WL_ERR(("Conflicting iface found to be active\n"));
5849 ret = BCME_UNSUPPORTED;
5850 goto fail;
5851 }
5852 #endif /* WL_IFACE_MGMT */
5853
5854 #ifdef RTT_SUPPORT
5855 /* cancel any ongoing RTT session with peer
5856 * as we donot support DP and RNG to same peer
5857 */
5858 wl_cfgnan_clear_peer_ranging(cfg, &cmd_data->mac_addr,
5859 RTT_GEO_SUSPN_HOST_NDP_TRIGGER);
5860 #endif /* RTT_SUPPORT */
5861
5862 nan_buf = MALLOCZ(cfg->osh, data_size);
5863 if (!nan_buf) {
5864 WL_ERR(("%s: memory allocation failed\n", __func__));
5865 ret = BCME_NOMEM;
5866 goto fail;
5867 }
5868
5869 resp_buf = MALLOCZ(cfg->osh, data_size + NAN_IOVAR_NAME_SIZE);
5870 if (!resp_buf) {
5871 WL_ERR(("%s: memory allocation failed\n", __func__));
5872 ret = BCME_NOMEM;
5873 goto fail;
5874 }
5875
5876 ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
5877 cfg, &cmd_data->avail_params, WL_AVAIL_LOCAL);
5878 if (unlikely(ret)) {
5879 WL_ERR(("Failed to set avail value with type local\n"));
5880 goto fail;
5881 }
5882
5883 ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
5884 cfg, &cmd_data->avail_params, WL_AVAIL_NDC);
5885 if (unlikely(ret)) {
5886 WL_ERR(("Failed to set avail value with type ndc\n"));
5887 goto fail;
5888 }
5889
5890 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
5891 nan_buf->count = 0;
5892 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
5893
5894 sub_cmd = (bcm_iov_batch_subcmd_t*)(&nan_buf->cmds[0]);
5895 datareq = (wl_nan_dp_req_t *)(sub_cmd->data);
5896
5897 /* setting default data path type to unicast */
5898 datareq->type = WL_NAN_DP_TYPE_UNICAST;
5899
5900 if (cmd_data->pub_id) {
5901 datareq->pub_id = cmd_data->pub_id;
5902 }
5903
5904 if (!ETHER_ISNULLADDR(&cmd_data->mac_addr.octet)) {
5905 ret = memcpy_s(&datareq->peer_mac, ETHER_ADDR_LEN,
5906 &cmd_data->mac_addr, ETHER_ADDR_LEN);
5907 if (ret != BCME_OK) {
5908 WL_ERR(("Failed to copy ether addr provided\n"));
5909 goto fail;
5910 }
5911 } else {
5912 WL_ERR(("Invalid ether addr provided\n"));
5913 ret = BCME_BADARG;
5914 goto fail;
5915 }
5916
5917 /* Retrieve mac from given iface name */
5918 wdev = wl_cfg80211_get_wdev_from_ifname(cfg,
5919 (char *)cmd_data->ndp_iface);
5920 if (!wdev || ETHER_ISNULLADDR(wdev->netdev->dev_addr)) {
5921 ret = -EINVAL;
5922 WL_ERR(("Failed to retrieve wdev/dev addr for ndp_iface = %s\n",
5923 (char *)cmd_data->ndp_iface));
5924 goto fail;
5925 }
5926
5927 if (!ETHER_ISNULLADDR(wdev->netdev->dev_addr)) {
5928 ret = memcpy_s(&datareq->ndi, ETHER_ADDR_LEN,
5929 wdev->netdev->dev_addr, ETHER_ADDR_LEN);
5930 if (ret != BCME_OK) {
5931 WL_ERR(("Failed to copy ether addr provided\n"));
5932 goto fail;
5933 }
5934 WL_TRACE(("%s: Retrieved ndi mac " MACDBG "\n",
5935 __FUNCTION__, MAC2STRDBG(datareq->ndi.octet)));
5936 } else {
5937 WL_ERR(("Invalid NDI addr retrieved\n"));
5938 ret = BCME_BADARG;
5939 goto fail;
5940 }
5941
5942 datareq->ndl_qos.min_slots = NAN_NDL_QOS_MIN_SLOT_NO_PREF;
5943 datareq->ndl_qos.max_latency = NAN_NDL_QOS_MAX_LAT_NO_PREF;
5944
5945 /* Fill the sub_command block */
5946 sub_cmd->id = htod16(WL_NAN_CMD_DATA_DATAREQ);
5947 sub_cmd->len = sizeof(sub_cmd->u.options) +
5948 OFFSETOF(wl_nan_dp_req_t, tlv_params);
5949 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
5950 pxtlv = (uint8 *)&datareq->tlv_params;
5951
5952 nan_buf_size -= (sub_cmd->len +
5953 OFFSETOF(bcm_iov_batch_subcmd_t, u.options));
5954 buflen_avail = nan_buf_size;
5955
5956 if (cmd_data->svc_info.data && cmd_data->svc_info.dlen) {
5957 ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
5958 WL_NAN_XTLV_SD_SVC_INFO, cmd_data->svc_info.dlen,
5959 cmd_data->svc_info.data,
5960 BCM_XTLV_OPTION_ALIGN32);
5961 if (ret != BCME_OK) {
5962 WL_ERR(("unable to process svc_spec_info: %d\n", ret));
5963 goto fail;
5964 }
5965 datareq->flags |= WL_NAN_DP_FLAG_SVC_INFO;
5966 }
5967
5968 /* Security elements */
5969
5970 if (cmd_data->csid) {
5971 WL_TRACE(("Cipher suite type is present, pack it\n"));
5972 ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
5973 WL_NAN_XTLV_CFG_SEC_CSID, sizeof(nan_sec_csid_e),
5974 (uint8*)&cmd_data->csid, BCM_XTLV_OPTION_ALIGN32);
5975 if (unlikely(ret)) {
5976 WL_ERR(("%s: fail to pack on csid\n", __FUNCTION__));
5977 goto fail;
5978 }
5979 }
5980
5981 if (cmd_data->ndp_cfg.security_cfg) {
5982 if ((cmd_data->key_type == NAN_SECURITY_KEY_INPUT_PMK) ||
5983 (cmd_data->key_type == NAN_SECURITY_KEY_INPUT_PASSPHRASE)) {
5984 if (cmd_data->key.data && cmd_data->key.dlen) {
5985 WL_TRACE(("optional pmk present, pack it\n"));
5986 ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
5987 WL_NAN_XTLV_CFG_SEC_PMK, cmd_data->key.dlen,
5988 cmd_data->key.data, BCM_XTLV_OPTION_ALIGN32);
5989 if (unlikely(ret)) {
5990 WL_ERR(("%s: fail to pack on WL_NAN_XTLV_CFG_SEC_PMK\n",
5991 __FUNCTION__));
5992 goto fail;
5993 }
5994 }
5995 } else {
5996 WL_ERR(("Invalid security key type\n"));
5997 ret = BCME_BADARG;
5998 goto fail;
5999 }
6000
6001 if ((cmd_data->svc_hash.dlen == WL_NAN_SVC_HASH_LEN) &&
6002 (cmd_data->svc_hash.data)) {
6003 WL_TRACE(("svc hash present, pack it\n"));
6004 ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
6005 WL_NAN_XTLV_CFG_SVC_HASH, WL_NAN_SVC_HASH_LEN,
6006 cmd_data->svc_hash.data, BCM_XTLV_OPTION_ALIGN32);
6007 if (ret != BCME_OK) {
6008 WL_ERR(("%s: fail to pack WL_NAN_XTLV_CFG_SVC_HASH\n",
6009 __FUNCTION__));
6010 goto fail;
6011 }
6012 } else {
6013 #ifdef WL_NAN_DISC_CACHE
6014 /* check in cache */
6015 nan_disc_result_cache *cache;
6016 cache = wl_cfgnan_get_disc_result(cfg,
6017 datareq->pub_id, &datareq->peer_mac);
6018 if (!cache) {
6019 ret = BCME_ERROR;
6020 WL_ERR(("invalid svc hash data or length = %d\n",
6021 cmd_data->svc_hash.dlen));
6022 goto fail;
6023 }
6024 WL_TRACE(("svc hash present, pack it\n"));
6025 ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
6026 WL_NAN_XTLV_CFG_SVC_HASH, WL_NAN_SVC_HASH_LEN,
6027 cache->svc_hash, BCM_XTLV_OPTION_ALIGN32);
6028 if (ret != BCME_OK) {
6029 WL_ERR(("%s: fail to pack WL_NAN_XTLV_CFG_SVC_HASH\n",
6030 __FUNCTION__));
6031 goto fail;
6032 }
6033 #else
6034 ret = BCME_ERROR;
6035 WL_ERR(("invalid svc hash data or length = %d\n",
6036 cmd_data->svc_hash.dlen));
6037 goto fail;
6038 #endif /* WL_NAN_DISC_CACHE */
6039 }
6040 /* If the Data req is for secure data connection */
6041 datareq->flags |= WL_NAN_DP_FLAG_SECURITY;
6042 }
6043
6044 sub_cmd->len += (buflen_avail - nan_buf_size);
6045 nan_buf->is_set = false;
6046 nan_buf->count++;
6047
6048 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, data_size,
6049 &(cmd_data->status), resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
6050 if (unlikely(ret) || unlikely(cmd_data->status)) {
6051 WL_ERR(("nan data path request handler failed, ret = %d status %d\n",
6052 ret, cmd_data->status));
6053 goto fail;
6054 }
6055
6056 /* check the response buff */
6057 if (ret == BCME_OK) {
6058 ret = process_resp_buf(resp_buf + WL_NAN_OBUF_DATA_OFFSET,
6059 ndp_instance_id, WL_NAN_CMD_DATA_DATAREQ);
6060 cmd_data->ndp_instance_id = *ndp_instance_id;
6061 }
6062 WL_INFORM_MEM(("[NAN] DP request successfull (ndp_id:%d)\n",
6063 cmd_data->ndp_instance_id));
6064 /* Add peer to data ndp peer list */
6065 wl_cfgnan_data_add_peer(cfg, &datareq->peer_mac);
6066
6067 fail:
6068 if (nan_buf) {
6069 MFREE(cfg->osh, nan_buf, data_size);
6070 }
6071
6072 if (resp_buf) {
6073 MFREE(cfg->osh, resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
6074 }
6075 NAN_MUTEX_UNLOCK();
6076 mutex_unlock(&cfg->if_sync);
6077 NAN_DBG_EXIT();
6078 return ret;
6079 }
6080
6081 int
wl_cfgnan_data_path_response_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_datapath_cmd_data_t * cmd_data)6082 wl_cfgnan_data_path_response_handler(struct net_device *ndev,
6083 struct bcm_cfg80211 *cfg, nan_datapath_cmd_data_t *cmd_data)
6084 {
6085 s32 ret = BCME_OK;
6086 bcm_iov_batch_buf_t *nan_buf = NULL;
6087 wl_nan_dp_resp_t *dataresp = NULL;
6088 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
6089 uint16 buflen_avail;
6090 uint8 *pxtlv;
6091 struct wireless_dev *wdev;
6092 uint16 nan_buf_size;
6093 uint8 *resp_buf = NULL;
6094
6095 /* Considering fixed params */
6096 uint16 data_size = WL_NAN_OBUF_DATA_OFFSET +
6097 OFFSETOF(wl_nan_dp_resp_t, tlv_params);
6098 data_size = ALIGN_SIZE(data_size, 4);
6099 ret = wl_cfgnan_aligned_data_size_of_opt_dp_params(&data_size, cmd_data);
6100 if (unlikely(ret)) {
6101 WL_ERR(("Failed to get alligned size of optional params\n"));
6102 goto fail;
6103 }
6104 nan_buf_size = data_size;
6105
6106 NAN_DBG_ENTER();
6107
6108 mutex_lock(&cfg->if_sync);
6109 NAN_MUTEX_LOCK();
6110 #ifdef WL_IFACE_MGMT
6111 if ((ret = wl_cfg80211_handle_if_role_conflict(cfg, WL_IF_TYPE_NAN)) < 0) {
6112 WL_ERR(("Conflicting iface found to be active\n"));
6113 ret = BCME_UNSUPPORTED;
6114 goto fail;
6115 }
6116 #endif /* WL_IFACE_MGMT */
6117
6118 nan_buf = MALLOCZ(cfg->osh, data_size);
6119 if (!nan_buf) {
6120 WL_ERR(("%s: memory allocation failed\n", __func__));
6121 ret = BCME_NOMEM;
6122 goto fail;
6123 }
6124
6125 resp_buf = MALLOCZ(cfg->osh, data_size + NAN_IOVAR_NAME_SIZE);
6126 if (!resp_buf) {
6127 WL_ERR(("%s: memory allocation failed\n", __func__));
6128 ret = BCME_NOMEM;
6129 goto fail;
6130 }
6131
6132 ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
6133 cfg, &cmd_data->avail_params, WL_AVAIL_LOCAL);
6134 if (unlikely(ret)) {
6135 WL_ERR(("Failed to set avail value with type local\n"));
6136 goto fail;
6137 }
6138
6139 ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
6140 cfg, &cmd_data->avail_params, WL_AVAIL_NDC);
6141 if (unlikely(ret)) {
6142 WL_ERR(("Failed to set avail value with type ndc\n"));
6143 goto fail;
6144 }
6145
6146 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
6147 nan_buf->count = 0;
6148 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
6149
6150 sub_cmd = (bcm_iov_batch_subcmd_t*)(&nan_buf->cmds[0]);
6151 dataresp = (wl_nan_dp_resp_t *)(sub_cmd->data);
6152
6153 /* Setting default data path type to unicast */
6154 dataresp->type = WL_NAN_DP_TYPE_UNICAST;
6155 /* Changing status value as per fw convention */
6156 dataresp->status = cmd_data->rsp_code ^= 1;
6157 dataresp->reason_code = 0;
6158
6159 /* ndp instance id must be from 1 to 255, 0 is reserved */
6160 if (cmd_data->ndp_instance_id < NAN_ID_MIN ||
6161 cmd_data->ndp_instance_id > NAN_ID_MAX) {
6162 WL_ERR(("Invalid ndp instance id: %d\n", cmd_data->ndp_instance_id));
6163 ret = BCME_BADARG;
6164 goto fail;
6165 }
6166 dataresp->ndp_id = cmd_data->ndp_instance_id;
6167
6168 /* Retrieved initiator ndi from NanDataPathRequestInd */
6169 if (!ETHER_ISNULLADDR(&cfg->initiator_ndi.octet)) {
6170 ret = memcpy_s(&dataresp->mac_addr, ETHER_ADDR_LEN,
6171 &cfg->initiator_ndi, ETHER_ADDR_LEN);
6172 if (ret != BCME_OK) {
6173 WL_ERR(("Failed to copy initiator ndi\n"));
6174 goto fail;
6175 }
6176 } else {
6177 WL_ERR(("Invalid ether addr retrieved\n"));
6178 ret = BCME_BADARG;
6179 goto fail;
6180 }
6181
6182 /* Interface is not mandatory, when it is a reject from framework */
6183 if (dataresp->status != WL_NAN_DP_STATUS_REJECTED) {
6184 /* Retrieve mac from given iface name */
6185 wdev = wl_cfg80211_get_wdev_from_ifname(cfg,
6186 (char *)cmd_data->ndp_iface);
6187 if (!wdev || ETHER_ISNULLADDR(wdev->netdev->dev_addr)) {
6188 ret = -EINVAL;
6189 WL_ERR(("Failed to retrieve wdev/dev addr for ndp_iface = %s\n",
6190 (char *)cmd_data->ndp_iface));
6191 goto fail;
6192 }
6193
6194 if (!ETHER_ISNULLADDR(wdev->netdev->dev_addr)) {
6195 ret = memcpy_s(&dataresp->ndi, ETHER_ADDR_LEN,
6196 wdev->netdev->dev_addr, ETHER_ADDR_LEN);
6197 if (ret != BCME_OK) {
6198 WL_ERR(("Failed to copy responder ndi\n"));
6199 goto fail;
6200 }
6201 WL_TRACE(("%s: Retrieved ndi mac " MACDBG "\n",
6202 __FUNCTION__, MAC2STRDBG(dataresp->ndi.octet)));
6203 } else {
6204 WL_ERR(("Invalid NDI addr retrieved\n"));
6205 ret = BCME_BADARG;
6206 goto fail;
6207 }
6208 }
6209
6210 dataresp->ndl_qos.min_slots = NAN_NDL_QOS_MIN_SLOT_NO_PREF;
6211 dataresp->ndl_qos.max_latency = NAN_NDL_QOS_MAX_LAT_NO_PREF;
6212
6213 /* Fill the sub_command block */
6214 sub_cmd->id = htod16(WL_NAN_CMD_DATA_DATARESP);
6215 sub_cmd->len = sizeof(sub_cmd->u.options) +
6216 OFFSETOF(wl_nan_dp_resp_t, tlv_params);
6217 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
6218 pxtlv = (uint8 *)&dataresp->tlv_params;
6219
6220 nan_buf_size -= (sub_cmd->len +
6221 OFFSETOF(bcm_iov_batch_subcmd_t, u.options));
6222 buflen_avail = nan_buf_size;
6223
6224 if (cmd_data->svc_info.data && cmd_data->svc_info.dlen) {
6225 ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
6226 WL_NAN_XTLV_SD_SVC_INFO, cmd_data->svc_info.dlen,
6227 cmd_data->svc_info.data,
6228 BCM_XTLV_OPTION_ALIGN32);
6229 if (ret != BCME_OK) {
6230 WL_ERR(("unable to process svc_spec_info: %d\n", ret));
6231 goto fail;
6232 }
6233 dataresp->flags |= WL_NAN_DP_FLAG_SVC_INFO;
6234 }
6235
6236 /* Security elements */
6237 if (cmd_data->csid) {
6238 WL_TRACE(("Cipher suite type is present, pack it\n"));
6239 ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
6240 WL_NAN_XTLV_CFG_SEC_CSID, sizeof(nan_sec_csid_e),
6241 (uint8*)&cmd_data->csid, BCM_XTLV_OPTION_ALIGN32);
6242 if (unlikely(ret)) {
6243 WL_ERR(("%s: fail to pack csid\n", __FUNCTION__));
6244 goto fail;
6245 }
6246 }
6247
6248 if (cmd_data->ndp_cfg.security_cfg) {
6249 if ((cmd_data->key_type == NAN_SECURITY_KEY_INPUT_PMK) ||
6250 (cmd_data->key_type == NAN_SECURITY_KEY_INPUT_PASSPHRASE)) {
6251 if (cmd_data->key.data && cmd_data->key.dlen) {
6252 WL_TRACE(("optional pmk present, pack it\n"));
6253 ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
6254 WL_NAN_XTLV_CFG_SEC_PMK, cmd_data->key.dlen,
6255 cmd_data->key.data, BCM_XTLV_OPTION_ALIGN32);
6256 if (unlikely(ret)) {
6257 WL_ERR(("%s: fail to pack WL_NAN_XTLV_CFG_SEC_PMK\n",
6258 __FUNCTION__));
6259 goto fail;
6260 }
6261 }
6262 } else {
6263 WL_ERR(("Invalid security key type\n"));
6264 ret = BCME_BADARG;
6265 goto fail;
6266 }
6267
6268 if ((cmd_data->svc_hash.dlen == WL_NAN_SVC_HASH_LEN) &&
6269 (cmd_data->svc_hash.data)) {
6270 WL_TRACE(("svc hash present, pack it\n"));
6271 ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
6272 WL_NAN_XTLV_CFG_SVC_HASH, WL_NAN_SVC_HASH_LEN,
6273 cmd_data->svc_hash.data,
6274 BCM_XTLV_OPTION_ALIGN32);
6275 if (ret != BCME_OK) {
6276 WL_ERR(("%s: fail to pack WL_NAN_XTLV_CFG_SVC_HASH\n",
6277 __FUNCTION__));
6278 goto fail;
6279 }
6280 }
6281 /* If the Data resp is for secure data connection */
6282 dataresp->flags |= WL_NAN_DP_FLAG_SECURITY;
6283 }
6284
6285 sub_cmd->len += (buflen_avail - nan_buf_size);
6286
6287 nan_buf->is_set = false;
6288 nan_buf->count++;
6289 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, data_size,
6290 &(cmd_data->status), resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
6291 if (unlikely(ret) || unlikely(cmd_data->status)) {
6292 WL_ERR(("nan data path response handler failed, error = %d, status %d\n",
6293 ret, cmd_data->status));
6294 goto fail;
6295 }
6296
6297 WL_INFORM_MEM(("[NAN] DP response successfull (ndp_id:%d)\n", dataresp->ndp_id));
6298
6299 fail:
6300 if (nan_buf) {
6301 MFREE(cfg->osh, nan_buf, data_size);
6302 }
6303
6304 if (resp_buf) {
6305 MFREE(cfg->osh, resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
6306 }
6307 NAN_MUTEX_UNLOCK();
6308 mutex_unlock(&cfg->if_sync);
6309
6310 NAN_DBG_EXIT();
6311 return ret;
6312 }
6313
wl_cfgnan_data_path_end_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_data_path_id ndp_instance_id,int * status)6314 int wl_cfgnan_data_path_end_handler(struct net_device *ndev,
6315 struct bcm_cfg80211 *cfg, nan_data_path_id ndp_instance_id,
6316 int *status)
6317 {
6318 bcm_iov_batch_buf_t *nan_buf = NULL;
6319 wl_nan_dp_end_t *dataend = NULL;
6320 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
6321 s32 ret = BCME_OK;
6322 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
6323 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
6324
6325 dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
6326
6327 NAN_DBG_ENTER();
6328 NAN_MUTEX_LOCK();
6329
6330 if (!dhdp->up) {
6331 WL_ERR(("bus is already down, hence blocking nan dp end\n"));
6332 ret = BCME_OK;
6333 goto fail;
6334 }
6335
6336 if (!cfg->nan_enable) {
6337 WL_ERR(("nan is not enabled, nan dp end blocked\n"));
6338 ret = BCME_OK;
6339 goto fail;
6340 }
6341
6342 /* ndp instance id must be from 1 to 255, 0 is reserved */
6343 if (ndp_instance_id < NAN_ID_MIN ||
6344 ndp_instance_id > NAN_ID_MAX) {
6345 WL_ERR(("Invalid ndp instance id: %d\n", ndp_instance_id));
6346 ret = BCME_BADARG;
6347 goto fail;
6348 }
6349
6350 nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
6351 if (!nan_buf) {
6352 WL_ERR(("%s: memory allocation failed\n", __func__));
6353 ret = BCME_NOMEM;
6354 goto fail;
6355 }
6356
6357 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
6358 nan_buf->count = 0;
6359 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
6360
6361 sub_cmd = (bcm_iov_batch_subcmd_t*)(&nan_buf->cmds[0]);
6362 dataend = (wl_nan_dp_end_t *)(sub_cmd->data);
6363
6364 /* Fill sub_cmd block */
6365 sub_cmd->id = htod16(WL_NAN_CMD_DATA_DATAEND);
6366 sub_cmd->len = sizeof(sub_cmd->u.options) +
6367 sizeof(*dataend);
6368 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
6369
6370 dataend->lndp_id = ndp_instance_id;
6371
6372 /*
6373 * Currently fw requires ndp_id and reason to end the data path
6374 * But wifi_nan.h takes ndp_instances_count and ndp_id.
6375 * Will keep reason = accept always.
6376 */
6377
6378 dataend->status = 1;
6379
6380 nan_buf->is_set = true;
6381 nan_buf->count++;
6382
6383 nan_buf_size -= (sub_cmd->len +
6384 OFFSETOF(bcm_iov_batch_subcmd_t, u.options));
6385 memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
6386 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size,
6387 status, (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
6388 if (unlikely(ret) || unlikely(*status)) {
6389 WL_ERR(("nan data path end handler failed, error = %d status %d\n",
6390 ret, *status));
6391 goto fail;
6392 }
6393 WL_INFORM_MEM(("[NAN] DP end successfull (ndp_id:%d)\n",
6394 dataend->lndp_id));
6395 fail:
6396 if (nan_buf) {
6397 MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
6398 }
6399
6400 NAN_MUTEX_UNLOCK();
6401 NAN_DBG_EXIT();
6402 return ret;
6403 }
6404
6405 #ifdef WL_NAN_DISC_CACHE
wl_cfgnan_sec_info_handler(struct bcm_cfg80211 * cfg,nan_datapath_sec_info_cmd_data_t * cmd_data,nan_hal_resp_t * nan_req_resp)6406 int wl_cfgnan_sec_info_handler(struct bcm_cfg80211 *cfg,
6407 nan_datapath_sec_info_cmd_data_t *cmd_data, nan_hal_resp_t *nan_req_resp)
6408 {
6409 s32 ret = BCME_NOTFOUND;
6410 /* check in cache */
6411 nan_disc_result_cache *disc_cache = NULL;
6412 nan_svc_info_t *svc_info = NULL;
6413
6414 NAN_DBG_ENTER();
6415 NAN_MUTEX_LOCK();
6416
6417 if (!cfg->nan_init_state) {
6418 WL_ERR(("nan is not initialized/nmi doesnt exists\n"));
6419 ret = BCME_NOTENABLED;
6420 goto fail;
6421 }
6422
6423 /* datapath request context */
6424 if (cmd_data->pub_id && !ETHER_ISNULLADDR(&cmd_data->mac_addr)) {
6425 disc_cache = wl_cfgnan_get_disc_result(cfg,
6426 cmd_data->pub_id, &cmd_data->mac_addr);
6427 WL_DBG(("datapath request: PUB ID: = %d\n",
6428 cmd_data->pub_id));
6429 if (disc_cache) {
6430 (void)memcpy_s(nan_req_resp->svc_hash, WL_NAN_SVC_HASH_LEN,
6431 disc_cache->svc_hash, WL_NAN_SVC_HASH_LEN);
6432 ret = BCME_OK;
6433 } else {
6434 WL_ERR(("disc_cache is NULL\n"));
6435 goto fail;
6436 }
6437 }
6438
6439 /* datapath response context */
6440 if (cmd_data->ndp_instance_id) {
6441 WL_DBG(("datapath response: NDP ID: = %d\n",
6442 cmd_data->ndp_instance_id));
6443 svc_info = wl_cfgnan_get_svc_inst(cfg, 0, cmd_data->ndp_instance_id);
6444 /* Note: svc_info will not be present in OOB cases
6445 * In such case send NMI alone and let HAL handle if
6446 * svc_hash is mandatory
6447 */
6448 if (svc_info) {
6449 WL_DBG(("svc hash present, pack it\n"));
6450 (void)memcpy_s(nan_req_resp->svc_hash, WL_NAN_SVC_HASH_LEN,
6451 svc_info->svc_hash, WL_NAN_SVC_HASH_LEN);
6452 } else {
6453 WL_INFORM_MEM(("svc_info not present..assuming OOB DP\n"));
6454 }
6455 /* Always send NMI */
6456 (void)memcpy_s(nan_req_resp->pub_nmi, ETHER_ADDR_LEN,
6457 cfg->nan_nmi_mac, ETHER_ADDR_LEN);
6458 ret = BCME_OK;
6459 }
6460 fail:
6461 NAN_MUTEX_UNLOCK();
6462 NAN_DBG_EXIT();
6463 return ret;
6464 }
6465
wl_nan_cache_to_event_data(nan_disc_result_cache * cache,nan_event_data_t * nan_event_data,osl_t * osh)6466 static s32 wl_nan_cache_to_event_data(nan_disc_result_cache *cache,
6467 nan_event_data_t *nan_event_data, osl_t *osh)
6468 {
6469 s32 ret = BCME_OK;
6470 NAN_DBG_ENTER();
6471
6472 nan_event_data->pub_id = cache->pub_id;
6473 nan_event_data->sub_id = cache->sub_id;
6474 nan_event_data->publish_rssi = cache->publish_rssi;
6475 nan_event_data->peer_cipher_suite = cache->peer_cipher_suite;
6476 ret = memcpy_s(&nan_event_data->remote_nmi, ETHER_ADDR_LEN,
6477 &cache->peer, ETHER_ADDR_LEN);
6478 if (ret != BCME_OK) {
6479 WL_ERR(("Failed to copy cached peer nan nmi\n"));
6480 goto fail;
6481 }
6482
6483 if (cache->svc_info.dlen && cache->svc_info.data) {
6484 nan_event_data->svc_info.dlen = cache->svc_info.dlen;
6485 nan_event_data->svc_info.data =
6486 MALLOCZ(osh, nan_event_data->svc_info.dlen);
6487 if (!nan_event_data->svc_info.data) {
6488 WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
6489 nan_event_data->svc_info.dlen = 0;
6490 ret = -ENOMEM;
6491 goto fail;
6492 }
6493 ret = memcpy_s(nan_event_data->svc_info.data, nan_event_data->svc_info.dlen,
6494 cache->svc_info.data, cache->svc_info.dlen);
6495 if (ret != BCME_OK) {
6496 WL_ERR(("Failed to copy cached svc info data\n"));
6497 goto fail;
6498 }
6499 }
6500 if (cache->tx_match_filter.dlen && cache->tx_match_filter.data) {
6501 nan_event_data->tx_match_filter.dlen = cache->tx_match_filter.dlen;
6502 nan_event_data->tx_match_filter.data =
6503 MALLOCZ(osh, nan_event_data->tx_match_filter.dlen);
6504 if (!nan_event_data->tx_match_filter.data) {
6505 WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
6506 nan_event_data->tx_match_filter.dlen = 0;
6507 ret = -ENOMEM;
6508 goto fail;
6509 }
6510 ret = memcpy_s(nan_event_data->tx_match_filter.data,
6511 nan_event_data->tx_match_filter.dlen,
6512 cache->tx_match_filter.data, cache->tx_match_filter.dlen);
6513 if (ret != BCME_OK) {
6514 WL_ERR(("Failed to copy cached tx match filter data\n"));
6515 goto fail;
6516 }
6517 }
6518 fail:
6519 NAN_DBG_EXIT();
6520 return ret;
6521 }
6522 #endif /* WL_NAN_DISC_CACHE */
6523
6524 /* API to cancel the ranging with peer
6525 * For geofence initiator, suspend ranging.
6526 * for directed RTT initiator , report fail result, cancel ranging
6527 * and clear ranging instance
6528 * For responder, cancel ranging and clear ranging instance
6529 */
6530 #ifdef RTT_SUPPORT
6531 static s32
wl_cfgnan_clear_peer_ranging(struct bcm_cfg80211 * cfg,struct ether_addr * peer,int reason)6532 wl_cfgnan_clear_peer_ranging(struct bcm_cfg80211 *cfg,
6533 struct ether_addr *peer, int reason)
6534 {
6535 uint32 status = 0;
6536 nan_ranging_inst_t *rng_inst = NULL;
6537 int err = BCME_OK;
6538 struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
6539 dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
6540
6541 rng_inst = wl_cfgnan_check_for_ranging(cfg, peer);
6542 if (rng_inst) {
6543 if (rng_inst->range_type == RTT_TYPE_NAN_GEOFENCE) {
6544 err = wl_cfgnan_suspend_geofence_rng_session(ndev,
6545 peer, reason, 0);
6546 } else {
6547 if (rng_inst->range_type == RTT_TYPE_NAN_DIRECTED) {
6548 dhd_rtt_handle_nan_rtt_session_end(dhdp,
6549 peer);
6550 }
6551 /* responder */
6552 err = wl_cfgnan_cancel_ranging(ndev, cfg,
6553 rng_inst->range_id,
6554 NAN_RNG_TERM_FLAG_IMMEDIATE, &status);
6555 bzero(rng_inst, sizeof(*rng_inst));
6556 }
6557 }
6558
6559 if (err) {
6560 WL_ERR(("Failed to stop ranging with peer %d\n", err));
6561 }
6562
6563 return err;
6564 }
6565 #endif /* RTT_SUPPORT */
6566
6567 static s32
wl_nan_dp_cmn_event_data(struct bcm_cfg80211 * cfg,void * event_data,uint16 data_len,uint16 * tlvs_offset,uint16 * nan_opts_len,uint32 event_num,int * hal_event_id,nan_event_data_t * nan_event_data)6568 wl_nan_dp_cmn_event_data(struct bcm_cfg80211 *cfg, void *event_data,
6569 uint16 data_len, uint16 *tlvs_offset,
6570 uint16 *nan_opts_len, uint32 event_num,
6571 int *hal_event_id, nan_event_data_t *nan_event_data)
6572 {
6573 s32 ret = BCME_OK;
6574 uint8 i;
6575 wl_nan_ev_datapath_cmn_t *ev_dp;
6576 nan_svc_info_t *svc_info;
6577 bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
6578 #ifdef RTT_SUPPORT
6579 nan_ranging_inst_t *rng_inst = NULL;
6580 #endif /* RTT_SUPPORT */
6581
6582 if (xtlv->id == WL_NAN_XTLV_DATA_DP_INFO) {
6583 ev_dp = (wl_nan_ev_datapath_cmn_t *)xtlv->data;
6584 NAN_DBG_ENTER();
6585
6586 BCM_REFERENCE(svc_info);
6587 BCM_REFERENCE(i);
6588 /* Mapping to common struct between DHD and HAL */
6589 WL_TRACE(("Event type: %d\n", ev_dp->type));
6590 nan_event_data->type = ev_dp->type;
6591 WL_TRACE(("pub_id: %d\n", ev_dp->pub_id));
6592 nan_event_data->pub_id = ev_dp->pub_id;
6593 WL_TRACE(("security: %d\n", ev_dp->security));
6594 nan_event_data->security = ev_dp->security;
6595
6596 /* Store initiator_ndi, required for data_path_response_request */
6597 ret = memcpy_s(&cfg->initiator_ndi, ETHER_ADDR_LEN,
6598 &ev_dp->initiator_ndi, ETHER_ADDR_LEN);
6599 if (ret != BCME_OK) {
6600 WL_ERR(("Failed to copy event's initiator addr\n"));
6601 goto fail;
6602 }
6603 if (ev_dp->type == NAN_DP_SESSION_UNICAST) {
6604 WL_INFORM_MEM(("NDP ID: %d\n", ev_dp->ndp_id));
6605 nan_event_data->ndp_id = ev_dp->ndp_id;
6606 WL_TRACE(("INITIATOR_NDI: " MACDBG "\n",
6607 MAC2STRDBG(ev_dp->initiator_ndi.octet)));
6608 WL_TRACE(("RESPONDOR_NDI: " MACDBG "\n",
6609 MAC2STRDBG(ev_dp->responder_ndi.octet)));
6610 WL_TRACE(("PEER NMI: " MACDBG "\n",
6611 MAC2STRDBG(ev_dp->peer_nmi.octet)));
6612 ret = memcpy_s(&nan_event_data->remote_nmi, ETHER_ADDR_LEN,
6613 &ev_dp->peer_nmi, ETHER_ADDR_LEN);
6614 if (ret != BCME_OK) {
6615 WL_ERR(("Failed to copy event's peer nmi\n"));
6616 goto fail;
6617 }
6618 } else {
6619 /* type is multicast */
6620 WL_INFORM_MEM(("NDP ID: %d\n", ev_dp->mc_id));
6621 nan_event_data->ndp_id = ev_dp->mc_id;
6622 WL_TRACE(("PEER NMI: " MACDBG "\n",
6623 MAC2STRDBG(ev_dp->peer_nmi.octet)));
6624 ret = memcpy_s(&nan_event_data->remote_nmi, ETHER_ADDR_LEN,
6625 &ev_dp->peer_nmi,
6626 ETHER_ADDR_LEN);
6627 if (ret != BCME_OK) {
6628 WL_ERR(("Failed to copy event's peer nmi\n"));
6629 goto fail;
6630 }
6631 }
6632 *tlvs_offset = OFFSETOF(wl_nan_ev_datapath_cmn_t, opt_tlvs) +
6633 OFFSETOF(bcm_xtlv_t, data);
6634 *nan_opts_len = data_len - *tlvs_offset;
6635 if (event_num == WL_NAN_EVENT_PEER_DATAPATH_IND) {
6636 *hal_event_id = GOOGLE_NAN_EVENT_DATA_REQUEST;
6637 #ifdef WL_NAN_DISC_CACHE
6638 svc_info = wl_cfgnan_get_svc_inst(cfg, nan_event_data->pub_id, 0);
6639 if (svc_info) {
6640 for (i = 0; i < NAN_MAX_SVC_INST; i++) {
6641 if (!svc_info->ndp_id[i]) {
6642 WL_TRACE(("Found empty field\n"));
6643 break;
6644 }
6645 }
6646 if (i == NAN_MAX_SVC_INST) {
6647 WL_ERR(("%s:cannot accommadate ndp id\n", __FUNCTION__));
6648 ret = BCME_NORESOURCE;
6649 goto fail;
6650 }
6651 svc_info->ndp_id[i] = nan_event_data->ndp_id;
6652 /* Add peer to data ndp peer list */
6653 wl_cfgnan_data_add_peer(cfg, &ev_dp->peer_nmi);
6654 #ifdef RTT_SUPPORT
6655 /* cancel any ongoing RTT session with peer
6656 * as we donot support DP and RNG to same peer
6657 */
6658 wl_cfgnan_clear_peer_ranging(cfg, &ev_dp->peer_nmi,
6659 RTT_GEO_SUSPN_PEER_NDP_TRIGGER);
6660 #endif /* RTT_SUPPORT */
6661 ret = BCME_OK;
6662 }
6663 #endif /* WL_NAN_DISC_CACHE */
6664 } else if (event_num == WL_NAN_EVENT_DATAPATH_ESTB) {
6665 *hal_event_id = GOOGLE_NAN_EVENT_DATA_CONFIRMATION;
6666 if (ev_dp->role == NAN_DP_ROLE_INITIATOR) {
6667 ret = memcpy_s(&nan_event_data->responder_ndi, ETHER_ADDR_LEN,
6668 &ev_dp->responder_ndi,
6669 ETHER_ADDR_LEN);
6670 if (ret != BCME_OK) {
6671 WL_ERR(("Failed to copy event's responder ndi\n"));
6672 goto fail;
6673 }
6674 WL_TRACE(("REMOTE_NDI: " MACDBG "\n",
6675 MAC2STRDBG(ev_dp->responder_ndi.octet)));
6676 WL_TRACE(("Initiator status %d\n", nan_event_data->status));
6677 } else {
6678 ret = memcpy_s(&nan_event_data->responder_ndi, ETHER_ADDR_LEN,
6679 &ev_dp->initiator_ndi,
6680 ETHER_ADDR_LEN);
6681 if (ret != BCME_OK) {
6682 WL_ERR(("Failed to copy event's responder ndi\n"));
6683 goto fail;
6684 }
6685 WL_TRACE(("REMOTE_NDI: " MACDBG "\n",
6686 MAC2STRDBG(ev_dp->initiator_ndi.octet)));
6687 }
6688 if (ev_dp->status == NAN_NDP_STATUS_ACCEPT) {
6689 nan_event_data->status = NAN_DP_REQUEST_ACCEPT;
6690 wl_cfgnan_data_set_peer_dp_state(cfg, &ev_dp->peer_nmi,
6691 NAN_PEER_DP_CONNECTED);
6692 wl_cfgnan_update_dp_info(cfg, true, nan_event_data->ndp_id);
6693 } else if (ev_dp->status == NAN_NDP_STATUS_REJECT) {
6694 nan_event_data->status = NAN_DP_REQUEST_REJECT;
6695 /* Remove peer from data ndp peer list */
6696 wl_cfgnan_data_remove_peer(cfg, &ev_dp->peer_nmi);
6697 #ifdef RTT_SUPPORT
6698 rng_inst = wl_cfgnan_check_for_ranging(cfg, &ev_dp->peer_nmi);
6699 if (rng_inst) {
6700 /* Trigger/Reset geofence RTT */
6701 wl_cfgnan_reset_geofence_ranging(cfg,
6702 rng_inst, RTT_SCHED_DP_REJECTED);
6703 }
6704 #endif /* RTT_SUPPORT */
6705 } else {
6706 WL_ERR(("%s:Status code = %x not expected\n",
6707 __FUNCTION__, ev_dp->status));
6708 ret = BCME_ERROR;
6709 goto fail;
6710 }
6711 WL_TRACE(("Responder status %d\n", nan_event_data->status));
6712 } else if (event_num == WL_NAN_EVENT_DATAPATH_END) {
6713 /* Mapping to common struct between DHD and HAL */
6714 *hal_event_id = GOOGLE_NAN_EVENT_DATA_END;
6715 #ifdef WL_NAN_DISC_CACHE
6716 if (ev_dp->role != NAN_DP_ROLE_INITIATOR) {
6717 /* Only at Responder side,
6718 * If dp is ended,
6719 * clear the resp ndp id from the svc info cache
6720 */
6721 svc_info = wl_cfgnan_get_svc_inst(cfg, 0, nan_event_data->ndp_id);
6722 if (svc_info) {
6723 for (i = 0; i < NAN_MAX_SVC_INST; i++) {
6724 if (svc_info->ndp_id[i] == nan_event_data->ndp_id) {
6725 svc_info->ndp_id[i] = 0;
6726 }
6727 }
6728 } else {
6729 WL_DBG(("couldn't find entry for ndp id = %d\n",
6730 nan_event_data->ndp_id));
6731 }
6732 }
6733 #endif /* WL_NAN_DISC_CACHE */
6734 /* Remove peer from data ndp peer list */
6735 wl_cfgnan_data_remove_peer(cfg, &ev_dp->peer_nmi);
6736 wl_cfgnan_update_dp_info(cfg, false, nan_event_data->ndp_id);
6737 #ifdef RTT_SUPPORT
6738 WL_INFORM_MEM(("DP_END for REMOTE_NMI: " MACDBG "\n",
6739 MAC2STRDBG(&ev_dp->peer_nmi)));
6740 rng_inst = wl_cfgnan_check_for_ranging(cfg, &ev_dp->peer_nmi);
6741 if (rng_inst) {
6742 /* Trigger/Reset geofence RTT */
6743 WL_INFORM_MEM(("sched geofence rtt from DP_END ctx: " MACDBG "\n",
6744 MAC2STRDBG(&rng_inst->peer_addr)));
6745 wl_cfgnan_reset_geofence_ranging(cfg, rng_inst,
6746 RTT_SCHED_DP_END);
6747 }
6748 #endif /* RTT_SUPPORT */
6749 }
6750 } else {
6751 /* Follow though, not handling other IDs as of now */
6752 WL_DBG(("%s:ID = 0x%02x not supported\n", __FUNCTION__, xtlv->id));
6753 }
6754 fail:
6755 NAN_DBG_EXIT();
6756 return ret;
6757 }
6758 #define IN_GEOFENCE(ingress, egress, distance) (((distance) <= (ingress)) && \
6759 ((distance) >= (egress)))
6760 #define IS_INGRESS_VAL(ingress, distance) ((distance) < (ingress))
6761 #define IS_EGRESS_VAL(egress, distance) ((distance) > (egress))
6762
6763 static bool
wl_cfgnan_check_ranging_cond(nan_svc_info_t * svc_info,uint32 distance,uint8 * ranging_ind,uint32 prev_distance)6764 wl_cfgnan_check_ranging_cond(nan_svc_info_t *svc_info, uint32 distance,
6765 uint8 *ranging_ind, uint32 prev_distance)
6766 {
6767 uint8 svc_ind = svc_info->ranging_ind;
6768 bool notify = FALSE;
6769 bool range_rep_ev_once =
6770 !!(svc_info->svc_range_status & SVC_RANGE_REP_EVENT_ONCE);
6771 uint32 ingress_limit = svc_info->ingress_limit;
6772 uint32 egress_limit = svc_info->egress_limit;
6773
6774 WL_DBG(("wl_cfgnan_check_ranging_cond: Checking the svc ranging cnd %d"
6775 " distance %d prev_distance %d, range_rep_ev_once %d\n",
6776 svc_ind, distance, prev_distance, range_rep_ev_once));
6777 WL_DBG(("wl_cfgnan_check_ranging_cond: Checking the SVC ingress and"
6778 " egress limits %d %d\n", ingress_limit, egress_limit));
6779 if (svc_ind & NAN_RANGE_INDICATION_CONT) {
6780 *ranging_ind = NAN_RANGE_INDICATION_CONT;
6781 notify = TRUE;
6782 WL_ERR(("\n%s :Svc has continous Ind %d\n",
6783 __FUNCTION__, __LINE__));
6784 goto done;
6785 }
6786 if (svc_ind == (NAN_RANGE_INDICATION_INGRESS |
6787 NAN_RANGE_INDICATION_EGRESS)) {
6788 if (IN_GEOFENCE(ingress_limit, egress_limit, distance)) {
6789 /* if not already in geofence */
6790 if ((range_rep_ev_once == FALSE) ||
6791 (!IN_GEOFENCE(ingress_limit, egress_limit,
6792 prev_distance))) {
6793 notify = TRUE;
6794 if (distance < ingress_limit) {
6795 *ranging_ind = NAN_RANGE_INDICATION_INGRESS;
6796 } else {
6797 *ranging_ind = NAN_RANGE_INDICATION_EGRESS;
6798 }
6799 WL_ERR(("\n%s :Svc has geofence Ind %d res_ind %d\n",
6800 __FUNCTION__, __LINE__, *ranging_ind));
6801 }
6802 }
6803 goto done;
6804 }
6805
6806 if (svc_ind == NAN_RANGE_INDICATION_INGRESS) {
6807 if (IS_INGRESS_VAL(ingress_limit, distance)) {
6808 if ((range_rep_ev_once == FALSE) ||
6809 (prev_distance == INVALID_DISTANCE) ||
6810 !IS_INGRESS_VAL(ingress_limit, prev_distance)) {
6811 notify = TRUE;
6812 *ranging_ind = NAN_RANGE_INDICATION_INGRESS;
6813 WL_ERR(("\n%s :Svc has ingress Ind %d\n",
6814 __FUNCTION__, __LINE__));
6815 }
6816 }
6817 goto done;
6818 }
6819 if (svc_ind == NAN_RANGE_INDICATION_EGRESS) {
6820 if (IS_EGRESS_VAL(egress_limit, distance)) {
6821 if ((range_rep_ev_once == FALSE) ||
6822 (prev_distance == INVALID_DISTANCE) ||
6823 !IS_EGRESS_VAL(egress_limit, prev_distance)) {
6824 notify = TRUE;
6825 *ranging_ind = NAN_RANGE_INDICATION_EGRESS;
6826 WL_ERR(("\n%s :Svc has egress Ind %d\n",
6827 __FUNCTION__, __LINE__));
6828 }
6829 }
6830 goto done;
6831 }
6832 done:
6833 svc_info->svc_range_status |= SVC_RANGE_REP_EVENT_ONCE;
6834 return notify;
6835 }
6836
6837 static int
wl_cfgnan_event_disc_result(struct bcm_cfg80211 * cfg,nan_event_data_t * nan_event_data)6838 wl_cfgnan_event_disc_result(struct bcm_cfg80211 *cfg,
6839 nan_event_data_t *nan_event_data)
6840 {
6841 int ret = BCME_OK;
6842 #if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT)
6843 ret = wl_cfgvendor_send_nan_event(cfg->wdev->wiphy, bcmcfg_to_prmry_ndev(cfg),
6844 GOOGLE_NAN_EVENT_SUBSCRIBE_MATCH, nan_event_data);
6845 if (ret != BCME_OK) {
6846 WL_ERR(("Failed to send event to nan hal\n"));
6847 }
6848 #endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT) */
6849 return ret;
6850 }
6851
6852 static int32
wl_cfgnan_notify_disc_with_ranging(struct bcm_cfg80211 * cfg,nan_ranging_inst_t * rng_inst,nan_event_data_t * nan_event_data,uint32 distance)6853 wl_cfgnan_notify_disc_with_ranging(struct bcm_cfg80211 *cfg,
6854 nan_ranging_inst_t *rng_inst, nan_event_data_t *nan_event_data, uint32 distance)
6855 {
6856 nan_svc_info_t *svc_info;
6857 bool notify_svc = FALSE;
6858 nan_disc_result_cache *disc_res = cfg->nan_disc_cache;
6859 uint8 ranging_ind = 0;
6860 int ret = BCME_OK;
6861 int i = 0, j = 0;
6862
6863 for (i = 0; i < MAX_SUBSCRIBES; i++) {
6864 svc_info = rng_inst->svc_idx[i];
6865 if (svc_info) {
6866 if (nan_event_data->ranging_result_present) {
6867 notify_svc = wl_cfgnan_check_ranging_cond(svc_info, distance,
6868 &ranging_ind, rng_inst->prev_distance_mm);
6869 nan_event_data->ranging_ind = ranging_ind;
6870 } else {
6871 /* Report only if ranging was needed */
6872 notify_svc = svc_info->ranging_required;
6873 }
6874 WL_DBG(("wl_cfgnan_notify_disc_with_ranging: Ranging notify for"
6875 " svc_id %d, notify %d and ind %d\n",
6876 svc_info->svc_id, notify_svc, ranging_ind));
6877 } else {
6878 continue;
6879 }
6880 if (notify_svc) {
6881 for (j = 0; j < NAN_MAX_CACHE_DISC_RESULT; j++) {
6882 if (!memcmp(&disc_res[j].peer,
6883 &(rng_inst->peer_addr), ETHER_ADDR_LEN) &&
6884 (svc_info->svc_id == disc_res[j].sub_id)) {
6885 ret = wl_nan_cache_to_event_data(&disc_res[j],
6886 nan_event_data, cfg->osh);
6887 ret = wl_cfgnan_event_disc_result(cfg, nan_event_data);
6888 /* If its not match once, clear it as the FW indicates
6889 * again.
6890 */
6891 if (!(svc_info->flags & WL_NAN_MATCH_ONCE)) {
6892 wl_cfgnan_remove_disc_result(cfg, svc_info->svc_id);
6893 }
6894 }
6895 }
6896 }
6897 }
6898 WL_DBG(("notify_disc_with_ranging done ret %d\n", ret));
6899 return ret;
6900 }
6901
6902 #ifdef RTT_SUPPORT
6903 static int32
wl_cfgnan_handle_directed_rtt_report(struct bcm_cfg80211 * cfg,nan_ranging_inst_t * rng_inst,uint8 rng_id)6904 wl_cfgnan_handle_directed_rtt_report(struct bcm_cfg80211 *cfg,
6905 nan_ranging_inst_t *rng_inst, uint8 rng_id)
6906 {
6907 int ret = BCME_OK;
6908 uint32 status;
6909 dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
6910
6911 ret = wl_cfgnan_cancel_ranging(bcmcfg_to_prmry_ndev(cfg), cfg,
6912 rng_id, NAN_RNG_TERM_FLAG_IMMEDIATE, &status);
6913 if (unlikely(ret) || unlikely(status)) {
6914 WL_ERR(("%s:nan range cancel failed ret = %d status = %d\n",
6915 __FUNCTION__, ret, status));
6916 }
6917 dhd_rtt_handle_nan_rtt_session_end(dhd, &rng_inst->peer_addr);
6918
6919 wl_cfgnan_reset_geofence_ranging(cfg, rng_inst, RTT_SCHED_RNG_RPT_DIRECTED);
6920
6921 WL_DBG(("Ongoing ranging session is cancelled \n"));
6922 return ret;
6923 }
6924 #endif /* RTT_SUPPORT */
6925
6926 static void
wl_cfgnan_disc_result_on_geofence_cancel(struct bcm_cfg80211 * cfg,nan_ranging_inst_t * rng_inst)6927 wl_cfgnan_disc_result_on_geofence_cancel(struct bcm_cfg80211 *cfg,
6928 nan_ranging_inst_t *rng_inst)
6929 {
6930 nan_event_data_t *nan_event_data = NULL;
6931
6932 nan_event_data = MALLOCZ(cfg->osh, sizeof(*nan_event_data));
6933 if (!nan_event_data) {
6934 WL_ERR(("%s: memory allocation failed\n", __func__));
6935 goto exit;
6936 }
6937
6938 wl_cfgnan_notify_disc_with_ranging(cfg, rng_inst, nan_event_data, 0);
6939
6940 exit:
6941 wl_cfgnan_clear_nan_event_data(cfg, nan_event_data);
6942
6943 return;
6944 }
6945
6946 #ifdef RTT_SUPPORT
6947 void
wl_cfgnan_process_range_report(struct bcm_cfg80211 * cfg,wl_nan_ev_rng_rpt_ind_t * range_res)6948 wl_cfgnan_process_range_report(struct bcm_cfg80211 *cfg,
6949 wl_nan_ev_rng_rpt_ind_t *range_res)
6950 {
6951 nan_ranging_inst_t *rng_inst = NULL;
6952 nan_event_data_t nan_event_data;
6953 dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
6954 rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
6955
6956 UNUSED_PARAMETER(nan_event_data);
6957 rng_inst = wl_cfgnan_check_for_ranging(cfg, &range_res->peer_m_addr);
6958 if (!rng_inst) {
6959 WL_ERR(("wl_cfgnan_process_range_report: No ranging instance "
6960 "but received RNG RPT event..check \n"));
6961 goto exit;
6962 }
6963 #ifdef NAN_RTT_DBG
6964 DUMP_NAN_RTT_INST(rng_inst);
6965 DUMP_NAN_RTT_RPT(range_res);
6966 #endif // endif
6967 range_res->rng_id = rng_inst->range_id;
6968 bzero(&nan_event_data, sizeof(nan_event_data));
6969 nan_event_data.ranging_result_present = 1;
6970 nan_event_data.range_measurement_cm = range_res->dist_mm;
6971 (void)memcpy_s(&nan_event_data.remote_nmi, ETHER_ADDR_LEN,
6972 &range_res->peer_m_addr, ETHER_ADDR_LEN);
6973 nan_event_data.ranging_ind = range_res->indication;
6974 if (rng_inst->range_type == RTT_TYPE_NAN_GEOFENCE) {
6975 /* check in cache and event match to host */
6976 wl_cfgnan_notify_disc_with_ranging(cfg, rng_inst, &nan_event_data,
6977 range_res->dist_mm);
6978 rng_inst->prev_distance_mm = range_res->dist_mm;
6979 /* Reset resp reject count on valid measurement */
6980 rng_inst->geof_retry_count = 0;
6981 #ifdef RTT_GEOFENCE_INTERVAL
6982 if (rtt_status->geofence_cfg.geofence_rtt_interval < 0) {
6983 ; /* Do Nothing */
6984 } else
6985 #endif /* RTT_GEOFENCE_INTERVAL */
6986 {
6987 wl_cfgnan_suspend_geofence_rng_session(bcmcfg_to_prmry_ndev(cfg),
6988 &rng_inst->peer_addr, RTT_GEO_SUSPN_RANGE_RES_REPORTED, 0);
6989 GEOFENCE_RTT_LOCK(rtt_status);
6990 dhd_rtt_move_geofence_cur_target_idx_to_next(dhd);
6991 GEOFENCE_RTT_UNLOCK(rtt_status);
6992 wl_cfgnan_reset_geofence_ranging(cfg,
6993 rng_inst, RTT_SCHED_RNG_RPT_GEOFENCE);
6994 }
6995 } else if (rng_inst->range_type == RTT_TYPE_NAN_DIRECTED) {
6996 wl_cfgnan_handle_directed_rtt_report(cfg, rng_inst, range_res->rng_id);
6997 }
6998
6999 exit:
7000 return;
7001 }
7002 #endif /* RTT_SUPPORT */
7003
7004 static void
wl_nan_print_status(wl_nan_conf_status_t * nstatus)7005 wl_nan_print_status(wl_nan_conf_status_t *nstatus)
7006 {
7007 printf("> enabled: %d\n", nstatus->enabled);
7008 printf("> Current NMI: " MACDBG "\n", MAC2STRDBG(nstatus->nmi.octet));
7009 printf("> Current cluster_id: " MACDBG "\n", MAC2STRDBG(nstatus->cid.octet));
7010
7011 switch (nstatus->role) {
7012 case WL_NAN_ROLE_AUTO:
7013 printf("> role: %s (%d)\n", "auto", nstatus->role);
7014 break;
7015 case WL_NAN_ROLE_NON_MASTER_NON_SYNC:
7016 printf("> role: %s (%d)\n", "non-master-non-sync", nstatus->role);
7017 break;
7018 case WL_NAN_ROLE_NON_MASTER_SYNC:
7019 printf("> role: %s (%d)\n", "non-master-sync", nstatus->role);
7020 break;
7021 case WL_NAN_ROLE_MASTER:
7022 printf("> role: %s (%d)\n", "master", nstatus->role);
7023 break;
7024 case WL_NAN_ROLE_ANCHOR_MASTER:
7025 printf("> role: %s (%d)\n", "anchor-master", nstatus->role);
7026 break;
7027 default:
7028 printf("> role: %s (%d)\n", "undefined", nstatus->role);
7029 break;
7030 }
7031
7032 printf("> social channels: %d, %d\n",
7033 nstatus->social_chans[0], nstatus->social_chans[1]);
7034 printf("> master_rank: " NMRSTR "\n", NMR2STR(nstatus->mr));
7035 printf("> amr : " NMRSTR "\n", NMR2STR(nstatus->amr));
7036 printf("> hop_count: %d\n", nstatus->hop_count);
7037 printf("> ambtt: %d\n", nstatus->ambtt);
7038 }
7039
7040 static void
wl_cfgnan_clear_nan_event_data(struct bcm_cfg80211 * cfg,nan_event_data_t * nan_event_data)7041 wl_cfgnan_clear_nan_event_data(struct bcm_cfg80211 *cfg,
7042 nan_event_data_t *nan_event_data)
7043 {
7044 if (nan_event_data) {
7045 if (nan_event_data->tx_match_filter.data) {
7046 MFREE(cfg->osh, nan_event_data->tx_match_filter.data,
7047 nan_event_data->tx_match_filter.dlen);
7048 nan_event_data->tx_match_filter.data = NULL;
7049 }
7050 if (nan_event_data->rx_match_filter.data) {
7051 MFREE(cfg->osh, nan_event_data->rx_match_filter.data,
7052 nan_event_data->rx_match_filter.dlen);
7053 nan_event_data->rx_match_filter.data = NULL;
7054 }
7055 if (nan_event_data->svc_info.data) {
7056 MFREE(cfg->osh, nan_event_data->svc_info.data,
7057 nan_event_data->svc_info.dlen);
7058 nan_event_data->svc_info.data = NULL;
7059 }
7060 if (nan_event_data->sde_svc_info.data) {
7061 MFREE(cfg->osh, nan_event_data->sde_svc_info.data,
7062 nan_event_data->sde_svc_info.dlen);
7063 nan_event_data->sde_svc_info.data = NULL;
7064 }
7065 MFREE(cfg->osh, nan_event_data, sizeof(*nan_event_data));
7066 }
7067
7068 }
7069
7070 #ifdef RTT_SUPPORT
7071 /*
7072 * Triggers rtt work thread
7073 * if geofence rtt pending,
7074 * clears ranging instance
7075 * otherwise
7076 */
7077 void
wl_cfgnan_reset_geofence_ranging(struct bcm_cfg80211 * cfg,nan_ranging_inst_t * rng_inst,int sched_reason)7078 wl_cfgnan_reset_geofence_ranging(struct bcm_cfg80211 *cfg,
7079 nan_ranging_inst_t * rng_inst, int sched_reason)
7080 {
7081 dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
7082 u8 rtt_invalid_reason = RTT_STATE_VALID;
7083 rtt_geofence_target_info_t *geofence_target = NULL;
7084 rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
7085 int8 cur_idx = DHD_RTT_INVALID_TARGET_INDEX;
7086 int8 index = DHD_RTT_INVALID_TARGET_INDEX;
7087 bool geofence_state = dhd_rtt_get_geofence_rtt_state(dhd);
7088 bool retry = FALSE;
7089
7090 WL_INFORM_MEM(("wl_cfgnan_reset_geofence_ranging, sched_reason = %d, cur_idx = %d, "
7091 "geofence_interval = %d\n", sched_reason, rtt_status->geofence_cfg.cur_target_idx,
7092 rtt_status->geofence_cfg.geofence_rtt_interval));
7093 cur_idx = dhd_rtt_get_geofence_cur_target_idx(dhd);
7094 if (cur_idx == -1) {
7095 WL_INFORM_MEM(("wl_cfgnan_reset_geofence_ranging, "
7096 "Removing Ranging Instance " MACDBG "\n",
7097 MAC2STRDBG(&(rng_inst->peer_addr))));
7098 bzero(rng_inst, sizeof(*rng_inst));
7099 /* Cancel pending retry timer if any */
7100 if (delayed_work_pending(&rtt_status->rtt_retry_timer)) {
7101 cancel_delayed_work(&rtt_status->rtt_retry_timer);
7102 }
7103 goto exit;
7104 }
7105
7106 /* Get current geofencing target */
7107 geofence_target = dhd_rtt_get_geofence_current_target(dhd);
7108
7109 /* get target index for cur ranging inst */
7110 dhd_rtt_get_geofence_target(dhd,
7111 &rng_inst->peer_addr, &index);
7112 if ((sched_reason == RTT_SCHED_RTT_RETRY_GEOFENCE) &&
7113 (rng_inst->range_status == NAN_RANGING_IN_PROGRESS)) {
7114 /* if we are already inprogress with peer
7115 * (responder or directed RTT initiator)
7116 * retyr later if sched_reason = timeout
7117 */
7118 retry = TRUE;
7119 } else if (cur_idx == index) {
7120 /* Reset incoming Ranging instance */
7121 rng_inst->range_type = RTT_TYPE_NAN_GEOFENCE;
7122 rng_inst->range_status = NAN_RANGING_REQUIRED;
7123 rng_inst->range_role = NAN_RANGING_ROLE_INITIATOR;
7124 if ((sched_reason != RTT_SCHED_RNG_RPT_GEOFENCE) &&
7125 (sched_reason != RTT_SCHED_RTT_RETRY_GEOFENCE)) {
7126 rng_inst->prev_distance_mm = INVALID_DISTANCE;
7127 }
7128 } else {
7129 if (index == DHD_RTT_INVALID_TARGET_INDEX) {
7130 /* Remove incoming Ranging instance */
7131 WL_INFORM_MEM(("Removing Ranging Instance " MACDBG "\n",
7132 MAC2STRDBG(&(rng_inst->peer_addr))));
7133 bzero(rng_inst, sizeof(*rng_inst));
7134 } else {
7135 /* Reset incoming Ranging instance */
7136 rng_inst->range_type = RTT_TYPE_NAN_GEOFENCE;
7137 rng_inst->range_status = NAN_RANGING_REQUIRED;
7138 rng_inst->range_role = NAN_RANGING_ROLE_INITIATOR;
7139 if ((sched_reason != RTT_SCHED_RNG_RPT_GEOFENCE) &&
7140 (sched_reason != RTT_SCHED_RTT_RETRY_GEOFENCE)) {
7141 rng_inst->prev_distance_mm = INVALID_DISTANCE;
7142 }
7143 }
7144 /* Create range inst if not present and reset explicitly */
7145 rng_inst = wl_cfgnan_get_ranging_inst(cfg,
7146 &geofence_target->peer_addr, NAN_RANGING_ROLE_INITIATOR);
7147 }
7148
7149 /* Avoid schedule if
7150 * already geofence running
7151 * or Directed RTT in progress
7152 * or Invalid RTT state like
7153 * NDP with Peer
7154 */
7155 if ((geofence_state == TRUE) ||
7156 (!RTT_IS_STOPPED(rtt_status)) ||
7157 (rtt_invalid_reason != RTT_STATE_VALID)) {
7158 /* Not in valid RTT state, avoid schedule */
7159 goto exit;
7160 }
7161
7162 if ((cur_idx == 0) && ((sched_reason == RTT_SCHED_RNG_RPT_GEOFENCE) ||
7163 (sched_reason == RTT_SCHED_RNG_TERM))) {
7164 /* First Target again after all done, retry over a timer */
7165 retry = TRUE;
7166 }
7167
7168 if (retry && (rtt_status->geofence_cfg.geofence_rtt_interval >= 0)) {
7169 /* Move to first target and retry over a timer */
7170 WL_DBG(("Retry over a timer, cur_idx = %d\n",
7171 rtt_status->geofence_cfg.cur_target_idx));
7172 /* schedule proxd retry timer */
7173 schedule_delayed_work(&rtt_status->rtt_retry_timer,
7174 msecs_to_jiffies(rtt_status->geofence_cfg.geofence_rtt_interval));
7175 goto exit;
7176
7177 }
7178
7179 /* schedule RTT */
7180 dhd_rtt_schedule_rtt_work_thread(dhd, sched_reason);
7181
7182 exit:
7183 return;
7184 }
7185
7186 static bool
wl_check_range_role_concurrency(dhd_pub_t * dhd,nan_ranging_inst_t * rng_inst)7187 wl_check_range_role_concurrency(dhd_pub_t *dhd, nan_ranging_inst_t *rng_inst)
7188 {
7189 ASSERT(rng_inst);
7190 if ((dhd_rtt_get_role_concurrency_state(dhd) == TRUE) &&
7191 (rng_inst->num_svc_ctx > 0)) {
7192 return TRUE;
7193 } else {
7194 return FALSE;
7195 }
7196 }
7197
7198 static void
wl_cfgnan_resolve_ranging_role_concurrecny(dhd_pub_t * dhd,nan_ranging_inst_t * rng_inst)7199 wl_cfgnan_resolve_ranging_role_concurrecny(dhd_pub_t *dhd,
7200 nan_ranging_inst_t *rng_inst)
7201 {
7202 /* Update rang_inst to initiator and resolve role concurrency */
7203 rng_inst->range_role = NAN_RANGING_ROLE_INITIATOR;
7204 dhd_rtt_set_role_concurrency_state(dhd, FALSE);
7205 }
7206 #endif /* RTT_SUPPORT */
7207
7208 static bool
wl_cfgnan_geofence_retry_check(nan_ranging_inst_t * rng_inst,uint8 reason_code)7209 wl_cfgnan_geofence_retry_check(nan_ranging_inst_t *rng_inst, uint8 reason_code)
7210 {
7211 bool geof_retry = FALSE;
7212
7213 switch (reason_code) {
7214 case NAN_RNG_TERM_IDLE_TIMEOUT:
7215 /* Fallthrough: Keep adding more reason code if needed */
7216 case NAN_RNG_TERM_RNG_RESP_TIMEOUT:
7217 case NAN_RNG_TERM_RNG_RESP_REJ:
7218 case NAN_RNG_TERM_RNG_TXS_FAIL:
7219 if (rng_inst->geof_retry_count <
7220 NAN_RNG_GEOFENCE_MAX_RETRY_CNT) {
7221 rng_inst->geof_retry_count++;
7222 geof_retry = TRUE;
7223 }
7224 break;
7225 default:
7226 /* FALSE for any other case */
7227 break;
7228 }
7229
7230 return geof_retry;
7231 }
7232
7233 s32
wl_cfgnan_notify_nan_status(struct bcm_cfg80211 * cfg,bcm_struct_cfgdev * cfgdev,const wl_event_msg_t * event,void * event_data)7234 wl_cfgnan_notify_nan_status(struct bcm_cfg80211 *cfg,
7235 bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *event, void *event_data)
7236 {
7237 uint16 data_len;
7238 uint32 event_num;
7239 s32 event_type;
7240 int hal_event_id = 0;
7241 nan_event_data_t *nan_event_data = NULL;
7242 nan_parse_event_ctx_t nan_event_ctx;
7243 uint16 tlvs_offset = 0;
7244 uint16 nan_opts_len = 0;
7245 uint8 *tlv_buf;
7246 s32 ret = BCME_OK;
7247 bcm_xtlv_opts_t xtlv_opt = BCM_IOV_CMD_OPT_ALIGN32;
7248 uint32 status;
7249 nan_svc_info_t *svc;
7250
7251 UNUSED_PARAMETER(wl_nan_print_status);
7252 UNUSED_PARAMETER(status);
7253 NAN_DBG_ENTER();
7254 NAN_MUTEX_LOCK();
7255
7256 if (!event || !event_data) {
7257 WL_ERR(("event data is NULL\n"));
7258 ret = -EINVAL;
7259 goto exit;
7260 }
7261
7262 event_type = ntoh32(event->event_type);
7263 event_num = ntoh32(event->reason);
7264 data_len = ntoh32(event->datalen);
7265
7266 if (NAN_INVALID_EVENT(event_num)) {
7267 WL_ERR(("unsupported event, num: %d, event type: %d\n", event_num, event_type));
7268 ret = -EINVAL;
7269 goto exit;
7270 }
7271 WL_DBG((">> Nan Event Received: %s (num=%d, len=%d)\n",
7272 nan_event_to_str(event_num), event_num, data_len));
7273
7274 #ifdef WL_NAN_DEBUG
7275 prhex("nan_event_data:", event_data, data_len);
7276 #endif /* WL_NAN_DEBUG */
7277
7278 if (!cfg->nan_init_state) {
7279 WL_ERR(("nan is not in initialized state, dropping nan related events\n"));
7280 ret = BCME_OK;
7281 goto exit;
7282 }
7283
7284 nan_event_data = MALLOCZ(cfg->osh, sizeof(*nan_event_data));
7285 if (!nan_event_data) {
7286 WL_ERR(("%s: memory allocation failed\n", __func__));
7287 goto exit;
7288 }
7289
7290 nan_event_ctx.cfg = cfg;
7291 nan_event_ctx.nan_evt_data = nan_event_data;
7292 /*
7293 * send as preformatted hex string
7294 * EVENT_NAN <event_type> <tlv_hex_string>
7295 */
7296 switch (event_num) {
7297 case WL_NAN_EVENT_START:
7298 case WL_NAN_EVENT_MERGE:
7299 case WL_NAN_EVENT_ROLE: {
7300 /* get nan status info as-is */
7301 bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
7302 wl_nan_conf_status_t *nstatus = (wl_nan_conf_status_t *)xtlv->data;
7303 WL_INFORM_MEM((">> Nan Mac Event Received: %s (num=%d, len=%d)\n",
7304 nan_event_to_str(event_num), event_num, data_len));
7305 WL_INFORM_MEM(("Nan Device Role %s\n", nan_role_to_str(nstatus->role)));
7306 /* Mapping to common struct between DHD and HAL */
7307 nan_event_data->enabled = nstatus->enabled;
7308 ret = memcpy_s(&nan_event_data->local_nmi, ETHER_ADDR_LEN,
7309 &nstatus->nmi, ETHER_ADDR_LEN);
7310 if (ret != BCME_OK) {
7311 WL_ERR(("Failed to copy nmi\n"));
7312 goto exit;
7313 }
7314 ret = memcpy_s(&nan_event_data->clus_id, ETHER_ADDR_LEN,
7315 &nstatus->cid, ETHER_ADDR_LEN);
7316 if (ret != BCME_OK) {
7317 WL_ERR(("Failed to copy cluster id\n"));
7318 goto exit;
7319 }
7320 nan_event_data->nan_de_evt_type = event_num;
7321 #ifdef WL_NAN_DEBUG
7322 wl_nan_print_status(nstatus);
7323 #endif /* WL_NAN_DEBUG */
7324 if (event_num == WL_NAN_EVENT_START) {
7325 OSL_SMP_WMB();
7326 cfg->nancfg.nan_event_recvd = true;
7327 OSL_SMP_WMB();
7328 wake_up(&cfg->nancfg.nan_event_wait);
7329 }
7330 hal_event_id = GOOGLE_NAN_EVENT_DE_EVENT;
7331 break;
7332 }
7333 case WL_NAN_EVENT_TERMINATED: {
7334 bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
7335 wl_nan_ev_terminated_t *pev = (wl_nan_ev_terminated_t *)xtlv->data;
7336
7337 /* Mapping to common struct between DHD and HAL */
7338 WL_TRACE(("Instance ID: %d\n", pev->instance_id));
7339 nan_event_data->local_inst_id = pev->instance_id;
7340 WL_TRACE(("Service Type: %d\n", pev->svctype));
7341
7342 #ifdef WL_NAN_DISC_CACHE
7343 if (pev->svctype == NAN_SC_SUBSCRIBE) {
7344 wl_cfgnan_remove_disc_result(cfg, pev->instance_id);
7345 }
7346 #endif /* WL_NAN_DISC_CACHE */
7347 /* Mapping reason code of FW to status code of framework */
7348 if (pev->reason == NAN_TERM_REASON_TIMEOUT ||
7349 pev->reason == NAN_TERM_REASON_USER_REQ ||
7350 pev->reason == NAN_TERM_REASON_COUNT_REACHED) {
7351 nan_event_data->status = NAN_STATUS_SUCCESS;
7352 ret = memcpy_s(nan_event_data->nan_reason,
7353 sizeof(nan_event_data->nan_reason),
7354 "NAN_STATUS_SUCCESS",
7355 strlen("NAN_STATUS_SUCCESS"));
7356 if (ret != BCME_OK) {
7357 WL_ERR(("Failed to copy nan_reason\n"));
7358 goto exit;
7359 }
7360 } else {
7361 nan_event_data->status = NAN_STATUS_INTERNAL_FAILURE;
7362 ret = memcpy_s(nan_event_data->nan_reason,
7363 sizeof(nan_event_data->nan_reason),
7364 "NAN_STATUS_INTERNAL_FAILURE",
7365 strlen("NAN_STATUS_INTERNAL_FAILURE"));
7366 if (ret != BCME_OK) {
7367 WL_ERR(("Failed to copy nan_reason\n"));
7368 goto exit;
7369 }
7370 }
7371
7372 if (pev->svctype == NAN_SC_SUBSCRIBE) {
7373 hal_event_id = GOOGLE_NAN_EVENT_SUBSCRIBE_TERMINATED;
7374 } else {
7375 hal_event_id = GOOGLE_NAN_EVENT_PUBLISH_TERMINATED;
7376 }
7377 #ifdef WL_NAN_DISC_CACHE
7378 if (pev->reason != NAN_TERM_REASON_USER_REQ) {
7379 wl_cfgnan_clear_svc_from_all_ranging_inst(cfg, pev->instance_id);
7380 /* terminate ranging sessions */
7381 wl_cfgnan_terminate_all_obsolete_ranging_sessions(cfg);
7382 }
7383 #endif /* WL_NAN_DISC_CACHE */
7384 break;
7385 }
7386
7387 case WL_NAN_EVENT_RECEIVE: {
7388 nan_opts_len = data_len;
7389 hal_event_id = GOOGLE_NAN_EVENT_FOLLOWUP;
7390 xtlv_opt = BCM_IOV_CMD_OPT_ALIGN_NONE;
7391 break;
7392 }
7393
7394 case WL_NAN_EVENT_TXS: {
7395 bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
7396 wl_nan_event_txs_t *txs = (wl_nan_event_txs_t *)xtlv->data;
7397 wl_nan_event_sd_txs_t *txs_sd = NULL;
7398 if (txs->status == WL_NAN_TXS_SUCCESS) {
7399 WL_INFORM_MEM(("TXS success for type %d token %d\n",
7400 txs->type, txs->host_seq));
7401 nan_event_data->status = NAN_STATUS_SUCCESS;
7402 ret = memcpy_s(nan_event_data->nan_reason,
7403 sizeof(nan_event_data->nan_reason),
7404 "NAN_STATUS_SUCCESS",
7405 strlen("NAN_STATUS_SUCCESS"));
7406 if (ret != BCME_OK) {
7407 WL_ERR(("Failed to copy nan_reason\n"));
7408 goto exit;
7409 }
7410 } else {
7411 /* TODO : populate status based on reason codes
7412 For now adding it as no ACK, so that app/framework can retry
7413 */
7414 WL_INFORM_MEM(("TXS failed for type %d status %d token %d\n",
7415 txs->type, txs->status, txs->host_seq));
7416 nan_event_data->status = NAN_STATUS_NO_OTA_ACK;
7417 ret = memcpy_s(nan_event_data->nan_reason,
7418 sizeof(nan_event_data->nan_reason),
7419 "NAN_STATUS_NO_OTA_ACK",
7420 strlen("NAN_STATUS_NO_OTA_ACK"));
7421 if (ret != BCME_OK) {
7422 WL_ERR(("Failed to copy nan_reason\n"));
7423 goto exit;
7424 }
7425 }
7426 nan_event_data->reason = txs->reason_code;
7427 nan_event_data->token = txs->host_seq;
7428 if (txs->type == WL_NAN_FRM_TYPE_FOLLOWUP) {
7429 hal_event_id = GOOGLE_NAN_EVENT_TRANSMIT_FOLLOWUP_IND;
7430 xtlv = (bcm_xtlv_t *)(txs->opt_tlvs);
7431 if (txs->opt_tlvs_len && xtlv->id == WL_NAN_XTLV_SD_TXS) {
7432 txs_sd = (wl_nan_event_sd_txs_t*)xtlv->data;
7433 nan_event_data->local_inst_id = txs_sd->inst_id;
7434 } else {
7435 WL_ERR(("Invalid params in TX status for trasnmit followup"));
7436 ret = -EINVAL;
7437 goto exit;
7438 }
7439 } else { /* TODO: add for other frame types if required */
7440 ret = -EINVAL;
7441 goto exit;
7442 }
7443 break;
7444 }
7445
7446 case WL_NAN_EVENT_DISCOVERY_RESULT: {
7447 nan_opts_len = data_len;
7448 hal_event_id = GOOGLE_NAN_EVENT_SUBSCRIBE_MATCH;
7449 xtlv_opt = BCM_IOV_CMD_OPT_ALIGN_NONE;
7450 break;
7451 }
7452 #ifdef WL_NAN_DISC_CACHE
7453 case WL_NAN_EVENT_DISC_CACHE_TIMEOUT: {
7454 bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
7455 wl_nan_ev_disc_cache_timeout_t *cache_data =
7456 (wl_nan_ev_disc_cache_timeout_t *)xtlv->data;
7457 wl_nan_disc_expired_cache_entry_t *cache_entry = NULL;
7458 uint16 xtlv_len = xtlv->len;
7459 uint8 entry_idx = 0;
7460
7461 if (xtlv->id == WL_NAN_XTLV_SD_DISC_CACHE_TIMEOUT) {
7462 xtlv_len = xtlv_len -
7463 OFFSETOF(wl_nan_ev_disc_cache_timeout_t, cache_exp_list);
7464 while ((entry_idx < cache_data->count) &&
7465 (xtlv_len >= sizeof(*cache_entry))) {
7466 cache_entry = &cache_data->cache_exp_list[entry_idx];
7467 /* Handle ranging cases for cache timeout */
7468 wl_cfgnan_ranging_clear_publish(cfg, &cache_entry->r_nmi_addr,
7469 cache_entry->l_sub_id);
7470 /* Invalidate local cache info */
7471 wl_cfgnan_remove_disc_result(cfg, cache_entry->l_sub_id);
7472 xtlv_len = xtlv_len - sizeof(*cache_entry);
7473 entry_idx++;
7474 }
7475 }
7476 break;
7477 }
7478 case WL_NAN_EVENT_RNG_REQ_IND: {
7479 wl_nan_ev_rng_req_ind_t *rng_ind;
7480 bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
7481
7482 nan_opts_len = data_len;
7483 rng_ind = (wl_nan_ev_rng_req_ind_t *)xtlv->data;
7484 xtlv_opt = BCM_IOV_CMD_OPT_ALIGN_NONE;
7485 WL_INFORM_MEM(("Received WL_NAN_EVENT_RNG_REQ_IND range_id %d"
7486 " peer:" MACDBG "\n", rng_ind->rng_id,
7487 MAC2STRDBG(&rng_ind->peer_m_addr)));
7488 #ifdef RTT_SUPPORT
7489 ret = wl_cfgnan_handle_ranging_ind(cfg, rng_ind);
7490 #endif /* RTT_SUPPORT */
7491 /* no need to event to HAL */
7492 goto exit;
7493 }
7494
7495 case WL_NAN_EVENT_RNG_TERM_IND: {
7496 bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
7497 dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
7498 nan_ranging_inst_t *rng_inst;
7499 wl_nan_ev_rng_term_ind_t *range_term = (wl_nan_ev_rng_term_ind_t *)xtlv->data;
7500 #ifdef RTT_SUPPORT
7501 int8 index = -1;
7502 rtt_geofence_target_info_t* geofence_target;
7503 rtt_status_info_t *rtt_status;
7504 int rng_sched_reason = 0;
7505 #endif /* RTT_SUPPORT */
7506 BCM_REFERENCE(dhd);
7507 WL_INFORM_MEM(("Received WL_NAN_EVENT_RNG_TERM_IND peer: " MACDBG ", "
7508 " Range ID:%d Reason Code:%d\n", MAC2STRDBG(&range_term->peer_m_addr),
7509 range_term->rng_id, range_term->reason_code));
7510 rng_inst = wl_cfgnan_get_rng_inst_by_id(cfg, range_term->rng_id);
7511 if (rng_inst) {
7512 #ifdef RTT_SUPPORT
7513 rng_sched_reason = RTT_SCHED_RNG_TERM;
7514 if (rng_inst->range_type == RTT_TYPE_NAN_DIRECTED) {
7515 dhd_rtt_handle_nan_rtt_session_end(dhd, &rng_inst->peer_addr);
7516 } else if (rng_inst->range_type == RTT_TYPE_NAN_GEOFENCE) {
7517 if (wl_cfgnan_geofence_retry_check(rng_inst,
7518 range_term->reason_code)) {
7519 rtt_status = GET_RTTSTATE(dhd);
7520 GEOFENCE_RTT_LOCK(rtt_status);
7521 dhd_rtt_move_geofence_cur_target_idx_to_next(dhd);
7522 GEOFENCE_RTT_UNLOCK(rtt_status);
7523 } else {
7524 /* Report on ranging failure */
7525 wl_cfgnan_disc_result_on_geofence_cancel(cfg,
7526 rng_inst);
7527 WL_TRACE(("Reset the state on terminate\n"));
7528 geofence_target = dhd_rtt_get_geofence_target(dhd,
7529 &rng_inst->peer_addr, &index);
7530 if (geofence_target) {
7531 dhd_rtt_remove_geofence_target(dhd,
7532 &geofence_target->peer_addr);
7533 }
7534 }
7535 /* Set geofence RTT in progress state to false */
7536 dhd_rtt_set_geofence_rtt_state(dhd, FALSE);
7537 }
7538 if (rng_inst->range_role == NAN_RANGING_ROLE_RESPONDER &&
7539 wl_check_range_role_concurrency(dhd, rng_inst)) {
7540 /* Resolve role concurrency */
7541 wl_cfgnan_resolve_ranging_role_concurrecny(dhd, rng_inst);
7542 /* Override sched reason if role concurrency just resolved */
7543 rng_sched_reason = RTT_SCHED_RNG_TERM_PEND_ROLE_CHANGE;
7544 }
7545 /* Reset Ranging Instance and trigger ranging if applicable */
7546 wl_cfgnan_reset_geofence_ranging(cfg, rng_inst, rng_sched_reason);
7547 #endif /* RTT_SUPPORT */
7548 }
7549 break;
7550 }
7551 #endif /* WL_NAN_DISC_CACHE */
7552 /*
7553 * Data path events data are received in common event struct,
7554 * Handling all the events as part of one case, hence fall through is intentional
7555 */
7556 case WL_NAN_EVENT_PEER_DATAPATH_IND:
7557 case WL_NAN_EVENT_DATAPATH_ESTB:
7558 case WL_NAN_EVENT_DATAPATH_END: {
7559 ret = wl_nan_dp_cmn_event_data(cfg, event_data, data_len,
7560 &tlvs_offset, &nan_opts_len,
7561 event_num, &hal_event_id, nan_event_data);
7562 /* Avoiding optional param parsing for DP END Event */
7563 if (event_num == WL_NAN_EVENT_DATAPATH_END) {
7564 nan_opts_len = 0;
7565 xtlv_opt = BCM_IOV_CMD_OPT_ALIGN_NONE;
7566 }
7567 if (unlikely(ret)) {
7568 WL_ERR(("nan dp common event data parse failed\n"));
7569 goto exit;
7570 }
7571 break;
7572 }
7573 default:
7574 WL_ERR_RLMT(("WARNING: unimplemented NAN APP EVENT = %d\n", event_num));
7575 ret = BCME_ERROR;
7576 goto exit;
7577 }
7578
7579 if (nan_opts_len) {
7580 tlv_buf = (uint8 *)event_data + tlvs_offset;
7581 /* Extract event data tlvs and pass their resp to cb fn */
7582 ret = bcm_unpack_xtlv_buf((void *)&nan_event_ctx, (const uint8*)tlv_buf,
7583 nan_opts_len, xtlv_opt, wl_cfgnan_set_vars_cbfn);
7584 if (ret != BCME_OK) {
7585 WL_ERR(("Failed to unpack tlv data, ret=%d\n", ret));
7586 }
7587 }
7588
7589 #ifdef WL_NAN_DISC_CACHE
7590 if (hal_event_id == GOOGLE_NAN_EVENT_SUBSCRIBE_MATCH) {
7591 #ifdef RTT_SUPPORT
7592 u8 rtt_invalid_reason = RTT_STATE_VALID;
7593 bool role_concur_state = 0;
7594 dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
7595 #endif /* RTT_SUPPORT */
7596 u16 update_flags = 0;
7597 WL_TRACE(("Cache disc res\n"));
7598 ret = wl_cfgnan_cache_disc_result(cfg, nan_event_data, &update_flags);
7599 if (ret) {
7600 WL_ERR(("Failed to cache disc result ret %d\n", ret));
7601 }
7602 if (nan_event_data->sde_control_flag & NAN_SDE_CF_RANGING_REQUIRED) {
7603 ret = wl_cfgnan_check_disc_result_for_ranging(cfg, nan_event_data);
7604 if (ret == BCME_OK) {
7605 #ifdef RTT_SUPPORT
7606 rtt_invalid_reason = dhd_rtt_invalid_states
7607 (bcmcfg_to_prmry_ndev(cfg), &nan_event_data->remote_nmi);
7608 role_concur_state = dhd_rtt_get_role_concurrency_state(dhd);
7609 /*
7610 * If instant RTT not possible,
7611 * send discovery result instantly like
7612 * incase of invalid rtt state as
7613 * NDP connected/connecting or role_concurrency
7614 * on, otherwise, disc result will be posted
7615 * on ranging report event
7616 */
7617 if (rtt_invalid_reason == RTT_STATE_VALID &&
7618 role_concur_state == FALSE) {
7619 /* Avoid sending disc result instantly */
7620 goto exit;
7621 }
7622 #endif /* RTT_SUPPORT */
7623 } else {
7624 /* TODO: should we terminate service if ranging fails ? */
7625 WL_INFORM_MEM(("Ranging failed or not required, " MACDBG
7626 " sub_id:%d , pub_id:%d\n",
7627 MAC2STRDBG(&nan_event_data->remote_nmi),
7628 nan_event_data->sub_id, nan_event_data->pub_id));
7629 }
7630 } else {
7631 nan_svc_info_t *svc_info = wl_cfgnan_get_svc_inst(cfg,
7632 nan_event_data->sub_id, 0);
7633 if (svc_info && svc_info->ranging_required &&
7634 (update_flags & NAN_DISC_CACHE_PARAM_SDE_CONTROL)) {
7635 wl_cfgnan_ranging_clear_publish(cfg,
7636 &nan_event_data->remote_nmi, nan_event_data->sub_id);
7637 }
7638 }
7639
7640 /*
7641 * If tx match filter is present as part of active subscribe, keep same filter
7642 * values in discovery results also.
7643 */
7644 if (nan_event_data->sub_id == nan_event_data->requestor_id) {
7645 svc = wl_cfgnan_get_svc_inst(cfg, nan_event_data->sub_id, 0);
7646 if (svc && svc->tx_match_filter_len) {
7647 nan_event_data->tx_match_filter.dlen = svc->tx_match_filter_len;
7648 nan_event_data->tx_match_filter.data =
7649 MALLOCZ(cfg->osh, svc->tx_match_filter_len);
7650 if (!nan_event_data->tx_match_filter.data) {
7651 WL_ERR(("%s: tx_match_filter_data alloc failed\n",
7652 __FUNCTION__));
7653 nan_event_data->tx_match_filter.dlen = 0;
7654 ret = -ENOMEM;
7655 goto exit;
7656 }
7657 ret = memcpy_s(nan_event_data->tx_match_filter.data,
7658 nan_event_data->tx_match_filter.dlen,
7659 svc->tx_match_filter, svc->tx_match_filter_len);
7660 if (ret != BCME_OK) {
7661 WL_ERR(("Failed to copy tx match filter data\n"));
7662 goto exit;
7663 }
7664 }
7665 }
7666 }
7667 #endif /* WL_NAN_DISC_CACHE */
7668
7669 WL_TRACE(("Send up %s (%d) data to HAL, hal_event_id=%d\n",
7670 nan_event_to_str(event_num), event_num, hal_event_id));
7671 #if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT)
7672 ret = wl_cfgvendor_send_nan_event(cfg->wdev->wiphy, bcmcfg_to_prmry_ndev(cfg),
7673 hal_event_id, nan_event_data);
7674 if (ret != BCME_OK) {
7675 WL_ERR(("Failed to send event to nan hal, %s (%d)\n",
7676 nan_event_to_str(event_num), event_num));
7677 }
7678 #endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT) */
7679
7680 exit:
7681 wl_cfgnan_clear_nan_event_data(cfg, nan_event_data);
7682
7683 NAN_MUTEX_UNLOCK();
7684 NAN_DBG_EXIT();
7685 return ret;
7686 }
7687
7688 #ifdef WL_NAN_DISC_CACHE
7689 static int
wl_cfgnan_cache_disc_result(struct bcm_cfg80211 * cfg,void * data,u16 * disc_cache_update_flags)7690 wl_cfgnan_cache_disc_result(struct bcm_cfg80211 *cfg, void * data,
7691 u16 *disc_cache_update_flags)
7692 {
7693 nan_event_data_t* disc = (nan_event_data_t*)data;
7694 int i, add_index = 0;
7695 int ret = BCME_OK;
7696 nan_disc_result_cache *disc_res = cfg->nan_disc_cache;
7697 *disc_cache_update_flags = 0;
7698
7699 if (!cfg->nan_enable) {
7700 WL_DBG(("nan not enabled"));
7701 return BCME_NOTENABLED;
7702 }
7703 if (cfg->nan_disc_count == NAN_MAX_CACHE_DISC_RESULT) {
7704 WL_DBG(("cache full"));
7705 ret = BCME_NORESOURCE;
7706 goto done;
7707 }
7708
7709 for (i = 0; i < NAN_MAX_CACHE_DISC_RESULT; i++) {
7710 if (!disc_res[i].valid) {
7711 add_index = i;
7712 continue;
7713 }
7714 if (!memcmp(&disc_res[i].peer, &disc->remote_nmi, ETHER_ADDR_LEN) &&
7715 !memcmp(disc_res[i].svc_hash, disc->svc_name, WL_NAN_SVC_HASH_LEN)) {
7716 WL_DBG(("cache entry already present, i = %d", i));
7717 /* Update needed parameters here */
7718 if (disc_res[i].sde_control_flag != disc->sde_control_flag) {
7719 disc_res[i].sde_control_flag = disc->sde_control_flag;
7720 *disc_cache_update_flags |= NAN_DISC_CACHE_PARAM_SDE_CONTROL;
7721 }
7722 ret = BCME_OK; /* entry already present */
7723 goto done;
7724 }
7725 }
7726 WL_DBG(("adding cache entry: add_index = %d\n", add_index));
7727 disc_res[add_index].valid = 1;
7728 disc_res[add_index].pub_id = disc->pub_id;
7729 disc_res[add_index].sub_id = disc->sub_id;
7730 disc_res[add_index].publish_rssi = disc->publish_rssi;
7731 disc_res[add_index].peer_cipher_suite = disc->peer_cipher_suite;
7732 disc_res[add_index].sde_control_flag = disc->sde_control_flag;
7733 ret = memcpy_s(&disc_res[add_index].peer, ETHER_ADDR_LEN,
7734 &disc->remote_nmi, ETHER_ADDR_LEN);
7735 if (ret != BCME_OK) {
7736 WL_ERR(("Failed to copy remote nmi\n"));
7737 goto done;
7738 }
7739 ret = memcpy_s(disc_res[add_index].svc_hash, WL_NAN_SVC_HASH_LEN,
7740 disc->svc_name, WL_NAN_SVC_HASH_LEN);
7741 if (ret != BCME_OK) {
7742 WL_ERR(("Failed to copy svc hash\n"));
7743 goto done;
7744 }
7745
7746 if (disc->svc_info.dlen && disc->svc_info.data) {
7747 disc_res[add_index].svc_info.dlen = disc->svc_info.dlen;
7748 disc_res[add_index].svc_info.data =
7749 MALLOCZ(cfg->osh, disc_res[add_index].svc_info.dlen);
7750 if (!disc_res[add_index].svc_info.data) {
7751 WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
7752 disc_res[add_index].svc_info.dlen = 0;
7753 ret = BCME_NOMEM;
7754 goto done;
7755 }
7756 ret = memcpy_s(disc_res[add_index].svc_info.data, disc_res[add_index].svc_info.dlen,
7757 disc->svc_info.data, disc->svc_info.dlen);
7758 if (ret != BCME_OK) {
7759 WL_ERR(("Failed to copy svc info\n"));
7760 goto done;
7761 }
7762 }
7763 if (disc->tx_match_filter.dlen && disc->tx_match_filter.data) {
7764 disc_res[add_index].tx_match_filter.dlen = disc->tx_match_filter.dlen;
7765 disc_res[add_index].tx_match_filter.data =
7766 MALLOCZ(cfg->osh, disc_res[add_index].tx_match_filter.dlen);
7767 if (!disc_res[add_index].tx_match_filter.data) {
7768 WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
7769 disc_res[add_index].tx_match_filter.dlen = 0;
7770 ret = BCME_NOMEM;
7771 goto done;
7772 }
7773 ret = memcpy_s(disc_res[add_index].tx_match_filter.data,
7774 disc_res[add_index].tx_match_filter.dlen,
7775 disc->tx_match_filter.data, disc->tx_match_filter.dlen);
7776 if (ret != BCME_OK) {
7777 WL_ERR(("Failed to copy tx match filter\n"));
7778 goto done;
7779 }
7780 }
7781 cfg->nan_disc_count++;
7782 WL_DBG(("cfg->nan_disc_count = %d\n", cfg->nan_disc_count));
7783
7784 done:
7785 return ret;
7786 }
7787
7788 /* Sending command to FW for clearing discovery cache info in FW */
7789 static int
wl_cfgnan_clear_disc_cache(struct bcm_cfg80211 * cfg,wl_nan_instance_id_t sub_id)7790 wl_cfgnan_clear_disc_cache(struct bcm_cfg80211 *cfg, wl_nan_instance_id_t sub_id)
7791 {
7792 s32 ret = BCME_OK;
7793 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
7794 uint32 status;
7795 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
7796 uint8 buf[NAN_IOCTL_BUF_SIZE];
7797 bcm_iov_batch_buf_t *nan_buf;
7798 bcm_iov_batch_subcmd_t *sub_cmd;
7799 uint16 subcmd_len;
7800
7801 /* Same src and dest len here */
7802 memset_s(buf, sizeof(buf), 0, sizeof(buf));
7803
7804 nan_buf = (bcm_iov_batch_buf_t*)buf;
7805
7806 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
7807 nan_buf->count = 0;
7808 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
7809
7810 sub_cmd = (bcm_iov_batch_subcmd_t *)(&nan_buf->cmds[0]);
7811 ret = wl_cfg_nan_check_cmd_len(nan_buf_size,
7812 sizeof(sub_id), &subcmd_len);
7813 if (unlikely(ret)) {
7814 WL_ERR(("nan_sub_cmd check failed\n"));
7815 goto fail;
7816 }
7817
7818 /* Fill the sub_command block */
7819 sub_cmd->id = htod16(WL_NAN_CMD_SD_DISC_CACHE_CLEAR);
7820 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(sub_id);
7821 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
7822 /* Data size len vs buffer len check is already done above.
7823 * So, short buffer error is impossible.
7824 */
7825 (void)memcpy_s(sub_cmd->data, (nan_buf_size - OFFSETOF(bcm_iov_batch_subcmd_t, data)),
7826 &sub_id, sizeof(sub_id));
7827 /* adjust iov data len to the end of last data record */
7828 nan_buf_size -= (subcmd_len);
7829
7830 nan_buf->count++;
7831 nan_buf->is_set = true;
7832 nan_buf_size = NAN_IOCTL_BUF_SIZE - nan_buf_size;
7833 /* Same src and dest len here */
7834 memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
7835 ret = wl_cfgnan_execute_ioctl(bcmcfg_to_prmry_ndev(cfg), cfg,
7836 nan_buf, nan_buf_size, &status,
7837 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
7838 if (unlikely(ret) || unlikely(status)) {
7839 WL_ERR(("Disc cache clear handler failed ret %d status %d\n",
7840 ret, status));
7841 goto fail;
7842 }
7843
7844 fail:
7845 return ret;
7846 }
7847
wl_cfgnan_remove_disc_result(struct bcm_cfg80211 * cfg,uint8 local_subid)7848 static int wl_cfgnan_remove_disc_result(struct bcm_cfg80211 *cfg,
7849 uint8 local_subid)
7850 {
7851 int i;
7852 int ret = BCME_NOTFOUND;
7853 nan_disc_result_cache *disc_res = cfg->nan_disc_cache;
7854 if (!cfg->nan_enable) {
7855 WL_DBG(("nan not enabled\n"));
7856 ret = BCME_NOTENABLED;
7857 goto done;
7858 }
7859 for (i = 0; i < NAN_MAX_CACHE_DISC_RESULT; i++) {
7860 if ((disc_res[i].valid) && (disc_res[i].sub_id == local_subid)) {
7861 WL_TRACE(("make cache entry invalid\n"));
7862 if (disc_res[i].tx_match_filter.data) {
7863 MFREE(cfg->osh, disc_res[i].tx_match_filter.data,
7864 disc_res[i].tx_match_filter.dlen);
7865 }
7866 if (disc_res[i].svc_info.data) {
7867 MFREE(cfg->osh, disc_res[i].svc_info.data,
7868 disc_res[i].svc_info.dlen);
7869 }
7870 memset_s(&disc_res[i], sizeof(disc_res[i]), 0, sizeof(disc_res[i]));
7871 cfg->nan_disc_count--;
7872 ret = BCME_OK;
7873 }
7874 }
7875 WL_DBG(("couldn't find entry\n"));
7876 done:
7877 return ret;
7878 }
7879
7880 static nan_disc_result_cache *
wl_cfgnan_get_disc_result(struct bcm_cfg80211 * cfg,uint8 remote_pubid,struct ether_addr * peer)7881 wl_cfgnan_get_disc_result(struct bcm_cfg80211 *cfg, uint8 remote_pubid,
7882 struct ether_addr *peer)
7883 {
7884 int i;
7885 nan_disc_result_cache *disc_res = cfg->nan_disc_cache;
7886 if (remote_pubid) {
7887 for (i = 0; i < NAN_MAX_CACHE_DISC_RESULT; i++) {
7888 if ((disc_res[i].pub_id == remote_pubid) &&
7889 !memcmp(&disc_res[i].peer, peer, ETHER_ADDR_LEN)) {
7890 WL_DBG(("Found entry: i = %d\n", i));
7891 return &disc_res[i];
7892 }
7893 }
7894 } else {
7895 for (i = 0; i < NAN_MAX_CACHE_DISC_RESULT; i++) {
7896 if (!memcmp(&disc_res[i].peer, peer, ETHER_ADDR_LEN)) {
7897 WL_DBG(("Found entry: %d\n", i));
7898 return &disc_res[i];
7899 }
7900 }
7901 }
7902 return NULL;
7903 }
7904 #endif /* WL_NAN_DISC_CACHE */
7905
7906 void
wl_cfgnan_update_dp_info(struct bcm_cfg80211 * cfg,bool add,nan_data_path_id ndp_id)7907 wl_cfgnan_update_dp_info(struct bcm_cfg80211 *cfg, bool add,
7908 nan_data_path_id ndp_id)
7909 {
7910 uint8 i;
7911 bool match_found = false;
7912 #ifdef ARP_OFFLOAD_SUPPORT
7913 dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
7914 #endif /* ARP_OFFLOAD_SUPPORT */
7915 /* As of now, we don't see a need to know which ndp is active.
7916 * so just keep tracking of ndp via count. If we need to know
7917 * the status of each ndp based on ndp id, we need to change
7918 * this implementation to use a bit mask.
7919 */
7920 if (!dhd) {
7921 WL_ERR(("dhd pub null!\n"));
7922 return;
7923 }
7924
7925 if (add) {
7926 /* On first NAN DP establishment, disable ARP. */
7927 #ifdef ARP_OFFLOAD_SUPPORT
7928 if (!cfg->nan_dp_count) {
7929 dhd_arp_offload_set(dhd, 0);
7930 dhd_arp_offload_enable(dhd, false);
7931 }
7932 #endif /* ARP_OFFLOAD_SUPPORT */
7933 for (i = 0; i < NAN_MAX_NDP_PEER; i++) {
7934 if (!cfg->nancfg.ndp_id[i]) {
7935 WL_TRACE(("Found empty field\n"));
7936 break;
7937 }
7938 }
7939
7940 if (i == NAN_MAX_NDP_PEER) {
7941 WL_ERR(("%s:cannot accommodate ndp id\n", __FUNCTION__));
7942 return;
7943 }
7944 if (ndp_id) {
7945 cfg->nan_dp_count++;
7946 cfg->nancfg.ndp_id[i] = ndp_id;
7947 WL_DBG(("%s:Added ndp id = [%d] at i = %d\n",
7948 __FUNCTION__, cfg->nancfg.ndp_id[i], i));
7949 }
7950 } else {
7951 ASSERT(cfg->nan_dp_count);
7952 if (ndp_id) {
7953 for (i = 0; i < NAN_MAX_NDP_PEER; i++) {
7954 if (cfg->nancfg.ndp_id[i] == ndp_id) {
7955 cfg->nancfg.ndp_id[i] = 0;
7956 WL_DBG(("%s:Removed ndp id = [%d] from i = %d\n",
7957 __FUNCTION__, ndp_id, i));
7958 match_found = true;
7959 if (cfg->nan_dp_count) {
7960 cfg->nan_dp_count--;
7961 }
7962 break;
7963 } else {
7964 WL_DBG(("couldn't find entry for ndp id = %d\n",
7965 ndp_id));
7966 }
7967 }
7968 if (match_found == false) {
7969 WL_ERR(("Received unsaved NDP Id = %d !!\n", ndp_id));
7970 }
7971 }
7972
7973 #ifdef ARP_OFFLOAD_SUPPORT
7974 if (!cfg->nan_dp_count) {
7975 /* If NAN DP count becomes zero and if there
7976 * are no conflicts, enable back ARP offload.
7977 * As of now, the conflicting interfaces are AP
7978 * and P2P. But NAN + P2P/AP concurrency is not
7979 * supported.
7980 */
7981 dhd_arp_offload_set(dhd, dhd_arp_mode);
7982 dhd_arp_offload_enable(dhd, true);
7983 }
7984 #endif /* ARP_OFFLOAD_SUPPORT */
7985 }
7986 WL_INFORM_MEM(("NAN_DP_COUNT: %d\n", cfg->nan_dp_count));
7987 }
7988
7989 bool
wl_cfgnan_is_dp_active(struct net_device * ndev)7990 wl_cfgnan_is_dp_active(struct net_device *ndev)
7991 {
7992 struct bcm_cfg80211 *cfg;
7993 bool nan_dp;
7994
7995 if (!ndev || !ndev->ieee80211_ptr) {
7996 WL_ERR(("ndev/wdev null\n"));
7997 return false;
7998 }
7999
8000 cfg = wiphy_priv(ndev->ieee80211_ptr->wiphy);
8001 nan_dp = cfg->nan_dp_count ? true : false;
8002
8003 WL_DBG(("NAN DP status:%d\n", nan_dp));
8004 return nan_dp;
8005 }
8006
8007 s32
wl_cfgnan_get_ndi_idx(struct bcm_cfg80211 * cfg)8008 wl_cfgnan_get_ndi_idx(struct bcm_cfg80211 *cfg)
8009 {
8010 int i;
8011 for (i = 0; i < NAN_MAX_NDI; i++) {
8012 if (!cfg->nancfg.ndi[i].in_use) {
8013 /* Free interface, use it */
8014 return i;
8015 }
8016 }
8017 /* Don't have a free interface */
8018 return WL_INVALID;
8019 }
8020
8021 s32
wl_cfgnan_add_ndi_data(struct bcm_cfg80211 * cfg,s32 idx,char * name)8022 wl_cfgnan_add_ndi_data(struct bcm_cfg80211 *cfg, s32 idx, char *name)
8023 {
8024 u16 len;
8025 if (!name || (idx < 0) || (idx >= NAN_MAX_NDI)) {
8026 return -EINVAL;
8027 }
8028
8029 /* Ensure ifname string size <= IFNAMSIZ including null termination */
8030 len = MIN(strlen(name), (IFNAMSIZ - 1));
8031 strncpy(cfg->nancfg.ndi[idx].ifname, name, len);
8032 cfg->nancfg.ndi[idx].ifname[len] = '\0';
8033 cfg->nancfg.ndi[idx].in_use = true;
8034 cfg->nancfg.ndi[idx].created = false;
8035
8036 /* Don't have a free interface */
8037 return WL_INVALID;
8038 }
8039
8040 s32
wl_cfgnan_del_ndi_data(struct bcm_cfg80211 * cfg,char * name)8041 wl_cfgnan_del_ndi_data(struct bcm_cfg80211 *cfg, char *name)
8042 {
8043 u16 len;
8044 int i;
8045 if (!name) {
8046 return -EINVAL;
8047 }
8048
8049 len = MIN(strlen(name), IFNAMSIZ);
8050 for (i = 0; i < NAN_MAX_NDI; i++) {
8051 if (strncmp(cfg->nancfg.ndi[i].ifname, name, len) == 0) {
8052 memset_s(&cfg->nancfg.ndi[i].ifname, IFNAMSIZ,
8053 0x0, IFNAMSIZ);
8054 cfg->nancfg.ndi[i].in_use = false;
8055 cfg->nancfg.ndi[i].created = false;
8056 cfg->nancfg.ndi[i].nan_ndev = NULL;
8057 return i;
8058 }
8059 }
8060 return -EINVAL;
8061 }
8062
8063 struct wl_ndi_data *
wl_cfgnan_get_ndi_data(struct bcm_cfg80211 * cfg,char * name)8064 wl_cfgnan_get_ndi_data(struct bcm_cfg80211 *cfg, char *name)
8065 {
8066 u16 len;
8067 int i;
8068 if (!name) {
8069 return NULL;
8070 }
8071
8072 len = MIN(strlen(name), IFNAMSIZ);
8073 for (i = 0; i < NAN_MAX_NDI; i++) {
8074 if (strncmp(cfg->nancfg.ndi[i].ifname, name, len) == 0) {
8075 return &cfg->nancfg.ndi[i];
8076 }
8077 }
8078 return NULL;
8079 }
8080
8081 s32
wl_cfgnan_delete_ndp(struct bcm_cfg80211 * cfg,struct net_device * nan_ndev)8082 wl_cfgnan_delete_ndp(struct bcm_cfg80211 *cfg,
8083 struct net_device *nan_ndev)
8084 {
8085 s32 ret = BCME_OK;
8086 uint8 i = 0;
8087 for (i = 0; i < NAN_MAX_NDI; i++) {
8088 if (cfg->nancfg.ndi[i].in_use &&
8089 cfg->nancfg.ndi[i].created &&
8090 (cfg->nancfg.ndi[i].nan_ndev == nan_ndev)) {
8091 WL_INFORM_MEM(("iface name: %s, cfg->nancfg.ndi[i].nan_ndev = %p"
8092 " and nan_ndev = %p\n",
8093 (char*)cfg->nancfg.ndi[i].ifname,
8094 cfg->nancfg.ndi[i].nan_ndev, nan_ndev));
8095 ret = _wl_cfg80211_del_if(cfg, nan_ndev, NULL,
8096 (char*)cfg->nancfg.ndi[i].ifname);
8097 if (ret) {
8098 WL_ERR(("failed to del ndi [%d]\n", ret));
8099 goto exit;
8100 }
8101 /* After successful delete of interface,
8102 * clear up the ndi data
8103 */
8104 if (wl_cfgnan_del_ndi_data(cfg,
8105 (char*)cfg->nancfg.ndi[i].ifname) < 0) {
8106 WL_ERR(("Failed to find matching data for ndi:%s\n",
8107 (char*)cfg->nancfg.ndi[i].ifname));
8108 }
8109 }
8110 }
8111 exit:
8112 return ret;
8113 }
8114
8115 int
wl_cfgnan_get_status(struct net_device * ndev,wl_nan_conf_status_t * nan_status)8116 wl_cfgnan_get_status(struct net_device *ndev, wl_nan_conf_status_t *nan_status)
8117 {
8118 bcm_iov_batch_buf_t *nan_buf = NULL;
8119 uint16 subcmd_len;
8120 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
8121 bcm_iov_batch_subcmd_t *sub_cmd_resp = NULL;
8122 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
8123 wl_nan_conf_status_t *nstatus = NULL;
8124 uint32 status;
8125 s32 ret = BCME_OK;
8126 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
8127 struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
8128 NAN_DBG_ENTER();
8129
8130 nan_buf = MALLOCZ(cfg->osh, NAN_IOCTL_BUF_SIZE);
8131 if (!nan_buf) {
8132 WL_ERR(("%s: memory allocation failed\n", __func__));
8133 ret = BCME_NOMEM;
8134 goto fail;
8135 }
8136
8137 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
8138 nan_buf->count = 0;
8139 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
8140 sub_cmd = (bcm_iov_batch_subcmd_t*)(uint8 *)(&nan_buf->cmds[0]);
8141
8142 ret = wl_cfg_nan_check_cmd_len(nan_buf_size,
8143 sizeof(*nstatus), &subcmd_len);
8144 if (unlikely(ret)) {
8145 WL_ERR(("nan_sub_cmd check failed\n"));
8146 goto fail;
8147 }
8148
8149 nstatus = (wl_nan_conf_status_t *)sub_cmd->data;
8150 sub_cmd->id = htod16(WL_NAN_CMD_CFG_STATUS);
8151 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*nstatus);
8152 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
8153 nan_buf_size -= subcmd_len;
8154 nan_buf->count = 1;
8155 nan_buf->is_set = false;
8156
8157 memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
8158 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
8159 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
8160 if (unlikely(ret) || unlikely(status)) {
8161 WL_ERR(("get nan status failed ret %d status %d \n",
8162 ret, status));
8163 goto fail;
8164 }
8165 sub_cmd_resp = &((bcm_iov_batch_buf_t *)(resp_buf))->cmds[0];
8166 /* WL_NAN_CMD_CFG_STATUS return value doesn't use xtlv package */
8167 nstatus = ((wl_nan_conf_status_t *)&sub_cmd_resp->data[0]);
8168 ret = memcpy_s(nan_status, sizeof(wl_nan_conf_status_t),
8169 nstatus, sizeof(wl_nan_conf_status_t));
8170 if (ret != BCME_OK) {
8171 WL_ERR(("Failed to copy tx match filter\n"));
8172 goto fail;
8173 }
8174
8175 fail:
8176 if (nan_buf) {
8177 MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
8178 }
8179 NAN_DBG_EXIT();
8180 return ret;
8181 }
8182 #endif /* WL_NAN */
8183