1 /*
2 * Neighbor Awareness Networking
3 *
4 * Copyright (C) 2020, Broadcom.
5 *
6 * Unless you and Broadcom execute a separate written software license
7 * agreement governing use of this software, this software is licensed to you
8 * under the terms of the GNU General Public License version 2 (the "GPL"),
9 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10 * following added to such license:
11 *
12 * As a special exception, the copyright holders of this software give you
13 * permission to link this software with independent modules, and to copy and
14 * distribute the resulting executable under terms of your choice, provided that
15 * you also meet, for each linked independent module, the terms and conditions of
16 * the license of that module. An independent module is a module which is not
17 * derived from this software. The special exception does not apply to any
18 * modifications of the software.
19 *
20 *
21 * <<Broadcom-WL-IPTag/Dual:>>
22 */
23
24 #ifdef WL_NAN
25 #include <bcmutils.h>
26 #include <bcmendian.h>
27 #include <bcmwifi_channels.h>
28 #include <nan.h>
29 #include <bcmiov.h>
30 #include <net/rtnetlink.h>
31
32 #include <wl_cfg80211.h>
33 #include <wl_cfgscan.h>
34 #include <wl_android.h>
35 #include <wl_cfgnan.h>
36
37 #if defined(BCMDONGLEHOST)
38 #include <dngl_stats.h>
39 #include <dhd.h>
40 #endif /* BCMDONGLEHOST */
41 #include <wl_cfgvendor.h>
42 #include <bcmbloom.h>
43 #include <wl_cfgp2p.h>
44 #include <wl_cfgvif.h>
45 #ifdef RTT_SUPPORT
46 #include <dhd_rtt.h>
47 #endif /* RTT_SUPPORT */
48 #include <bcmstdlib_s.h>
49
50 #define NAN_RANGE_REQ_EVNT 1
51 #define NAN_RAND_MAC_RETRIES 10
52 #define NAN_SCAN_DWELL_TIME_DELTA_MS 10
53
54 #ifdef WL_NAN_DISC_CACHE
55 /* Disc Cache Parameters update Flags */
56 #define NAN_DISC_CACHE_PARAM_SDE_CONTROL 0x0001
57 static int wl_cfgnan_cache_disc_result(struct bcm_cfg80211 *cfg, void * data,
58 u16 *disc_cache_update_flags);
59 static int wl_cfgnan_remove_disc_result(struct bcm_cfg80211 * cfg, uint8 local_subid);
60 static nan_disc_result_cache * wl_cfgnan_get_disc_result(struct bcm_cfg80211 *cfg,
61 uint8 remote_pubid, struct ether_addr *peer);
62 #endif /* WL_NAN_DISC_CACHE */
63
64 static int wl_cfgnan_set_if_addr(struct bcm_cfg80211 *cfg);
65 static int wl_cfgnan_get_capability(struct net_device *ndev,
66 struct bcm_cfg80211 *cfg, nan_hal_capabilities_t *capabilities);
67 static void wl_cfgnan_clear_nan_event_data(struct bcm_cfg80211 *cfg,
68 nan_event_data_t *nan_event_data);
69 void wl_cfgnan_data_remove_peer(struct bcm_cfg80211 *cfg,
70 struct ether_addr *peer_addr);
71 static void wl_cfgnan_send_stop_event(struct bcm_cfg80211 *cfg);
72 static void wl_cfgnan_disable_cleanup(struct bcm_cfg80211 *cfg);
73 static s32 wl_cfgnan_get_ndi_idx(struct bcm_cfg80211 *cfg);
74 static int wl_cfgnan_init(struct bcm_cfg80211 *cfg);
75 static int wl_cfgnan_deinit(struct bcm_cfg80211 *cfg, uint8 busstate);
76 static void wl_cfgnan_update_dp_info(struct bcm_cfg80211 *cfg, bool add,
77 nan_data_path_id ndp_id);
78 static void wl_cfgnan_data_set_peer_dp_state(struct bcm_cfg80211 *cfg,
79 struct ether_addr *peer_addr, nan_peer_dp_state_t state);
80 static nan_ndp_peer_t* wl_cfgnan_data_get_peer(struct bcm_cfg80211 *cfg,
81 struct ether_addr *peer_addr);
82 static int wl_cfgnan_disable(struct bcm_cfg80211 *cfg);
83 static s32 wl_cfgnan_del_ndi_data(struct bcm_cfg80211 *cfg, char *name);
84 static s32 wl_cfgnan_add_ndi_data(struct bcm_cfg80211 *cfg, s32 idx, char *name);
85
86 #ifdef RTT_SUPPORT
87 static int wl_cfgnan_clear_disc_cache(struct bcm_cfg80211 *cfg, wl_nan_instance_id_t sub_id);
88 static int32 wl_cfgnan_notify_disc_with_ranging(struct bcm_cfg80211 *cfg,
89 nan_ranging_inst_t *rng_inst, nan_event_data_t *nan_event_data, uint32 distance);
90 static void wl_cfgnan_disc_result_on_geofence_cancel(struct bcm_cfg80211 *cfg,
91 nan_ranging_inst_t *rng_inst);
92 static void wl_cfgnan_terminate_ranging_session(struct bcm_cfg80211 *cfg,
93 nan_ranging_inst_t *ranging_inst);
94 static s32 wl_cfgnan_clear_peer_ranging(struct bcm_cfg80211 * cfg,
95 nan_ranging_inst_t *rng_inst, int reason);
96 static s32 wl_cfgnan_handle_dp_ranging_concurrency(struct bcm_cfg80211 *cfg,
97 struct ether_addr *peer, int reason);
98 static void wl_cfgnan_terminate_all_obsolete_ranging_sessions(struct bcm_cfg80211 *cfg);
99 static bool wl_ranging_geofence_session_with_peer(struct bcm_cfg80211 *cfg,
100 struct ether_addr *peer_addr);
101 static void wl_cfgnan_reset_remove_ranging_instance(struct bcm_cfg80211 *cfg,
102 nan_ranging_inst_t *ranging_inst);
103 static void wl_cfgnan_remove_ranging_instance(struct bcm_cfg80211 *cfg,
104 nan_ranging_inst_t *ranging_inst);
105 #endif /* RTT_SUPPORT */
106
107 static const char *
nan_role_to_str(u8 role)108 nan_role_to_str(u8 role)
109 {
110 const char *id2str;
111
112 switch (role) {
113 C2S(WL_NAN_ROLE_AUTO);
114 break;
115 C2S(WL_NAN_ROLE_NON_MASTER_NON_SYNC);
116 break;
117 C2S(WL_NAN_ROLE_NON_MASTER_SYNC);
118 break;
119 C2S(WL_NAN_ROLE_MASTER);
120 break;
121 C2S(WL_NAN_ROLE_ANCHOR_MASTER);
122 break;
123 default:
124 id2str = "WL_NAN_ROLE_UNKNOWN";
125 }
126
127 return id2str;
128 }
129
130 const char *
nan_event_to_str(u16 cmd)131 nan_event_to_str(u16 cmd)
132 {
133 const char *id2str;
134
135 switch (cmd) {
136 C2S(WL_NAN_EVENT_START);
137 break;
138 C2S(WL_NAN_EVENT_JOIN);
139 break;
140 C2S(WL_NAN_EVENT_ROLE);
141 break;
142 C2S(WL_NAN_EVENT_SCAN_COMPLETE);
143 break;
144 C2S(WL_NAN_EVENT_DISCOVERY_RESULT);
145 break;
146 C2S(WL_NAN_EVENT_REPLIED);
147 break;
148 C2S(WL_NAN_EVENT_TERMINATED);
149 break;
150 C2S(WL_NAN_EVENT_RECEIVE);
151 break;
152 C2S(WL_NAN_EVENT_STATUS_CHG);
153 break;
154 C2S(WL_NAN_EVENT_MERGE);
155 break;
156 C2S(WL_NAN_EVENT_STOP);
157 break;
158 C2S(WL_NAN_EVENT_P2P);
159 break;
160 C2S(WL_NAN_EVENT_WINDOW_BEGIN_P2P);
161 break;
162 C2S(WL_NAN_EVENT_WINDOW_BEGIN_MESH);
163 break;
164 C2S(WL_NAN_EVENT_WINDOW_BEGIN_IBSS);
165 break;
166 C2S(WL_NAN_EVENT_WINDOW_BEGIN_RANGING);
167 break;
168 C2S(WL_NAN_EVENT_POST_DISC);
169 break;
170 C2S(WL_NAN_EVENT_DATA_IF_ADD);
171 break;
172 C2S(WL_NAN_EVENT_DATA_PEER_ADD);
173 break;
174 C2S(WL_NAN_EVENT_PEER_DATAPATH_IND);
175 break;
176 C2S(WL_NAN_EVENT_DATAPATH_ESTB);
177 break;
178 C2S(WL_NAN_EVENT_SDF_RX);
179 break;
180 C2S(WL_NAN_EVENT_DATAPATH_END);
181 break;
182 C2S(WL_NAN_EVENT_BCN_RX);
183 break;
184 C2S(WL_NAN_EVENT_PEER_DATAPATH_RESP);
185 break;
186 C2S(WL_NAN_EVENT_PEER_DATAPATH_CONF);
187 break;
188 C2S(WL_NAN_EVENT_RNG_REQ_IND);
189 break;
190 C2S(WL_NAN_EVENT_RNG_RPT_IND);
191 break;
192 C2S(WL_NAN_EVENT_RNG_TERM_IND);
193 break;
194 C2S(WL_NAN_EVENT_PEER_DATAPATH_SEC_INST);
195 break;
196 C2S(WL_NAN_EVENT_TXS);
197 break;
198 C2S(WL_NAN_EVENT_DW_START);
199 break;
200 C2S(WL_NAN_EVENT_DW_END);
201 break;
202 C2S(WL_NAN_EVENT_CHAN_BOUNDARY);
203 break;
204 C2S(WL_NAN_EVENT_MR_CHANGED);
205 break;
206 C2S(WL_NAN_EVENT_RNG_RESP_IND);
207 break;
208 C2S(WL_NAN_EVENT_PEER_SCHED_UPD_NOTIF);
209 break;
210 C2S(WL_NAN_EVENT_PEER_SCHED_REQ);
211 break;
212 C2S(WL_NAN_EVENT_PEER_SCHED_RESP);
213 break;
214 C2S(WL_NAN_EVENT_PEER_SCHED_CONF);
215 break;
216 C2S(WL_NAN_EVENT_SENT_DATAPATH_END);
217 break;
218 C2S(WL_NAN_EVENT_SLOT_START);
219 break;
220 C2S(WL_NAN_EVENT_SLOT_END);
221 break;
222 C2S(WL_NAN_EVENT_HOST_ASSIST_REQ);
223 break;
224 C2S(WL_NAN_EVENT_RX_MGMT_FRM);
225 break;
226 C2S(WL_NAN_EVENT_DISC_CACHE_TIMEOUT);
227 break;
228 C2S(WL_NAN_EVENT_OOB_AF_TXS);
229 break;
230 C2S(WL_NAN_EVENT_OOB_AF_RX);
231 break;
232 C2S(WL_NAN_EVENT_INVALID);
233 break;
234
235 default:
236 id2str = "WL_NAN_EVENT_UNKNOWN";
237 }
238
239 return id2str;
240 }
241
242 static const char *
nan_frm_type_to_str(u16 frm_type)243 nan_frm_type_to_str(u16 frm_type)
244 {
245 const char *id2str;
246
247 switch (frm_type) {
248 C2S(WL_NAN_FRM_TYPE_PUBLISH);
249 break;
250 C2S(WL_NAN_FRM_TYPE_SUBSCRIBE);
251 break;
252 C2S(WL_NAN_FRM_TYPE_FOLLOWUP);
253 break;
254
255 C2S(WL_NAN_FRM_TYPE_DP_REQ);
256 break;
257 C2S(WL_NAN_FRM_TYPE_DP_RESP);
258 break;
259 C2S(WL_NAN_FRM_TYPE_DP_CONF);
260 break;
261 C2S(WL_NAN_FRM_TYPE_DP_INSTALL);
262 break;
263 C2S(WL_NAN_FRM_TYPE_DP_END);
264 break;
265
266 C2S(WL_NAN_FRM_TYPE_SCHED_REQ);
267 break;
268 C2S(WL_NAN_FRM_TYPE_SCHED_RESP);
269 break;
270 C2S(WL_NAN_FRM_TYPE_SCHED_CONF);
271 break;
272 C2S(WL_NAN_FRM_TYPE_SCHED_UPD);
273 break;
274
275 C2S(WL_NAN_FRM_TYPE_RNG_REQ);
276 break;
277 C2S(WL_NAN_FRM_TYPE_RNG_RESP);
278 break;
279 C2S(WL_NAN_FRM_TYPE_RNG_TERM);
280 break;
281 C2S(WL_NAN_FRM_TYPE_RNG_REPORT);
282 break;
283
284 default:
285 id2str = "WL_NAN_FRM_TYPE_UNKNOWN";
286 }
287
288 return id2str;
289 }
290
291 static const char *
nan_event_cause_to_str(u8 cause)292 nan_event_cause_to_str(u8 cause)
293 {
294 const char *id2str;
295
296 switch (cause) {
297 C2S(WL_NAN_DP_TERM_WITH_INACTIVITY);
298 break;
299 C2S(WL_NAN_DP_TERM_WITH_FSM_DESTROY);
300 break;
301 C2S(WL_NAN_DP_TERM_WITH_PEER_DP_END);
302 break;
303 C2S(WL_NAN_DP_TERM_WITH_STALE_NDP);
304 break;
305 C2S(WL_NAN_DP_TERM_WITH_DISABLE);
306 break;
307 C2S(WL_NAN_DP_TERM_WITH_NDI_DEL);
308 break;
309 C2S(WL_NAN_DP_TERM_WITH_PEER_HB_FAIL);
310 break;
311 C2S(WL_NAN_DP_TERM_WITH_HOST_IOVAR);
312 break;
313 C2S(WL_NAN_DP_TERM_WITH_ESTB_FAIL);
314 break;
315 C2S(WL_NAN_DP_TERM_WITH_SCHED_REJECT);
316 break;
317
318 default:
319 id2str = "WL_NAN_EVENT_CAUSE_UNKNOWN";
320 }
321
322 return id2str;
323 }
324
325 static int wl_cfgnan_execute_ioctl(struct net_device *ndev,
326 struct bcm_cfg80211 *cfg, bcm_iov_batch_buf_t *nan_buf,
327 uint16 nan_buf_size, uint32 *status, uint8 *resp_buf,
328 uint16 resp_buf_len);
329 int
wl_cfgnan_generate_inst_id(struct bcm_cfg80211 * cfg,uint8 * p_inst_id)330 wl_cfgnan_generate_inst_id(struct bcm_cfg80211 *cfg, uint8 *p_inst_id)
331 {
332 s32 ret = BCME_OK;
333 uint8 i = 0;
334 wl_nancfg_t *nancfg = cfg->nancfg;
335
336 if (p_inst_id == NULL) {
337 WL_ERR(("Invalid arguments\n"));
338 ret = -EINVAL;
339 goto exit;
340 }
341
342 if (nancfg->inst_id_start == NAN_ID_MAX) {
343 WL_ERR(("Consumed all IDs, resetting the counter\n"));
344 nancfg->inst_id_start = 0;
345 }
346
347 for (i = nancfg->inst_id_start; i < NAN_ID_MAX; i++) {
348 if (isclr(nancfg->svc_inst_id_mask, i)) {
349 setbit(nancfg->svc_inst_id_mask, i);
350 *p_inst_id = i + 1;
351 nancfg->inst_id_start = *p_inst_id;
352 WL_DBG(("Instance ID=%d\n", *p_inst_id));
353 goto exit;
354 }
355 }
356 WL_ERR(("Allocated maximum IDs\n"));
357 ret = BCME_NORESOURCE;
358 exit:
359 return ret;
360 }
361
362 int
wl_cfgnan_remove_inst_id(struct bcm_cfg80211 * cfg,uint8 inst_id)363 wl_cfgnan_remove_inst_id(struct bcm_cfg80211 *cfg, uint8 inst_id)
364 {
365 s32 ret = BCME_OK;
366 WL_DBG(("%s: Removing svc instance id %d\n", __FUNCTION__, inst_id));
367 clrbit(cfg->nancfg->svc_inst_id_mask, inst_id-1);
368 return ret;
369 }
wl_cfgnan_parse_sdea_data(osl_t * osh,const uint8 * p_attr,uint16 len,nan_event_data_t * tlv_data)370 s32 wl_cfgnan_parse_sdea_data(osl_t *osh, const uint8 *p_attr,
371 uint16 len, nan_event_data_t *tlv_data)
372 {
373 const wifi_nan_svc_desc_ext_attr_t *nan_svc_desc_ext_attr = NULL;
374 uint8 offset;
375 s32 ret = BCME_OK;
376
377 /* service descriptor ext attributes */
378 nan_svc_desc_ext_attr = (const wifi_nan_svc_desc_ext_attr_t *)p_attr;
379
380 /* attribute ID */
381 WL_TRACE(("> attr id: 0x%02x\n", nan_svc_desc_ext_attr->id));
382
383 /* attribute length */
384 WL_TRACE(("> attr len: 0x%x\n", nan_svc_desc_ext_attr->len));
385 if (nan_svc_desc_ext_attr->instance_id == tlv_data->pub_id) {
386 tlv_data->sde_control_flag = nan_svc_desc_ext_attr->control;
387 }
388 offset = sizeof(*nan_svc_desc_ext_attr);
389 if (offset > len) {
390 WL_ERR(("Invalid event buffer len\n"));
391 ret = BCME_BUFTOOSHORT;
392 goto fail;
393 }
394 p_attr += offset;
395 len -= offset;
396
397 if (tlv_data->sde_control_flag & NAN_SC_RANGE_LIMITED) {
398 WL_TRACE(("> svc_control: range limited present\n"));
399 }
400 if (tlv_data->sde_control_flag & NAN_SDE_CF_SVC_UPD_IND_PRESENT) {
401 WL_TRACE(("> svc_control: sdea svc specific info present\n"));
402 tlv_data->sde_svc_info.dlen = (p_attr[1] | (p_attr[2] << 8));
403 WL_TRACE(("> sdea svc info len: 0x%02x\n", tlv_data->sde_svc_info.dlen));
404 if (!tlv_data->sde_svc_info.dlen ||
405 tlv_data->sde_svc_info.dlen > NAN_MAX_SERVICE_SPECIFIC_INFO_LEN) {
406 /* must be able to handle null msg which is not error */
407 tlv_data->sde_svc_info.dlen = 0;
408 WL_ERR(("sde data length is invalid\n"));
409 ret = BCME_BADLEN;
410 goto fail;
411 }
412
413 if (tlv_data->sde_svc_info.dlen > 0) {
414 tlv_data->sde_svc_info.data = MALLOCZ(osh, tlv_data->sde_svc_info.dlen);
415 if (!tlv_data->sde_svc_info.data) {
416 WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
417 tlv_data->sde_svc_info.dlen = 0;
418 ret = BCME_NOMEM;
419 goto fail;
420 }
421 /* advance read pointer, consider sizeof of Service Update Indicator */
422 offset = sizeof(tlv_data->sde_svc_info.dlen) - 1;
423 if (offset > len) {
424 WL_ERR(("Invalid event buffer len\n"));
425 ret = BCME_BUFTOOSHORT;
426 goto fail;
427 }
428 p_attr += offset;
429 len -= offset;
430 ret = memcpy_s(tlv_data->sde_svc_info.data, tlv_data->sde_svc_info.dlen,
431 p_attr, tlv_data->sde_svc_info.dlen);
432 if (ret != BCME_OK) {
433 WL_ERR(("Failed to copy sde_svc_info\n"));
434 goto fail;
435 }
436 } else {
437 /* must be able to handle null msg which is not error */
438 tlv_data->sde_svc_info.dlen = 0;
439 WL_DBG(("%s: sdea svc info length is zero, null info data\n",
440 __FUNCTION__));
441 }
442 }
443 return ret;
444 fail:
445 if (tlv_data->sde_svc_info.data) {
446 MFREE(osh, tlv_data->sde_svc_info.data,
447 tlv_data->sde_svc_info.dlen);
448 tlv_data->sde_svc_info.data = NULL;
449 }
450
451 WL_DBG(("Parse SDEA event data, status = %d\n", ret));
452 return ret;
453 }
454
455 /*
456 * This attribute contains some mandatory fields and some optional fields
457 * depending on the content of the service discovery request.
458 */
459 s32
wl_cfgnan_parse_sda_data(osl_t * osh,const uint8 * p_attr,uint16 len,nan_event_data_t * tlv_data)460 wl_cfgnan_parse_sda_data(osl_t *osh, const uint8 *p_attr,
461 uint16 len, nan_event_data_t *tlv_data)
462 {
463 uint8 svc_control = 0, offset = 0;
464 s32 ret = BCME_OK;
465 const wifi_nan_svc_descriptor_attr_t *nan_svc_desc_attr = NULL;
466
467 /* service descriptor attributes */
468 nan_svc_desc_attr = (const wifi_nan_svc_descriptor_attr_t *)p_attr;
469 /* attribute ID */
470 WL_TRACE(("> attr id: 0x%02x\n", nan_svc_desc_attr->id));
471
472 /* attribute length */
473 WL_TRACE(("> attr len: 0x%x\n", nan_svc_desc_attr->len));
474
475 /* service ID */
476 ret = memcpy_s(tlv_data->svc_name, sizeof(tlv_data->svc_name),
477 nan_svc_desc_attr->svc_hash, NAN_SVC_HASH_LEN);
478 if (ret != BCME_OK) {
479 WL_ERR(("Failed to copy svc_hash_name:\n"));
480 return ret;
481 }
482 WL_TRACE(("> svc_hash_name: " MACDBG "\n", MAC2STRDBG(tlv_data->svc_name)));
483
484 /* local instance ID */
485 tlv_data->local_inst_id = nan_svc_desc_attr->instance_id;
486 WL_TRACE(("> local instance id: 0x%02x\n", tlv_data->local_inst_id));
487
488 /* requestor instance ID */
489 tlv_data->requestor_id = nan_svc_desc_attr->requestor_id;
490 WL_TRACE(("> requestor id: 0x%02x\n", tlv_data->requestor_id));
491
492 /* service control */
493 svc_control = nan_svc_desc_attr->svc_control;
494 if ((svc_control & NAN_SVC_CONTROL_TYPE_MASK) == NAN_SC_PUBLISH) {
495 WL_TRACE(("> Service control type: NAN_SC_PUBLISH\n"));
496 } else if ((svc_control & NAN_SVC_CONTROL_TYPE_MASK) == NAN_SC_SUBSCRIBE) {
497 WL_TRACE(("> Service control type: NAN_SC_SUBSCRIBE\n"));
498 } else if ((svc_control & NAN_SVC_CONTROL_TYPE_MASK) == NAN_SC_FOLLOWUP) {
499 WL_TRACE(("> Service control type: NAN_SC_FOLLOWUP\n"));
500 }
501 offset = sizeof(*nan_svc_desc_attr);
502 if (offset > len) {
503 WL_ERR(("Invalid event buffer len\n"));
504 ret = BCME_BUFTOOSHORT;
505 goto fail;
506 }
507 p_attr += offset;
508 len -= offset;
509
510 /*
511 * optional fields:
512 * must be in order following by service descriptor attribute format
513 */
514
515 /* binding bitmap */
516 if (svc_control & NAN_SC_BINDING_BITMAP_PRESENT) {
517 uint16 bitmap = 0;
518 WL_TRACE(("> svc_control: binding bitmap present\n"));
519
520 /* Copy binding bitmap */
521 ret = memcpy_s(&bitmap, sizeof(bitmap),
522 p_attr, NAN_BINDING_BITMAP_LEN);
523 if (ret != BCME_OK) {
524 WL_ERR(("Failed to copy bit map\n"));
525 return ret;
526 }
527 WL_TRACE(("> sc binding bitmap: 0x%04x\n", bitmap));
528
529 if (NAN_BINDING_BITMAP_LEN > len) {
530 WL_ERR(("Invalid event buffer len\n"));
531 ret = BCME_BUFTOOSHORT;
532 goto fail;
533 }
534 p_attr += NAN_BINDING_BITMAP_LEN;
535 len -= NAN_BINDING_BITMAP_LEN;
536 }
537
538 /* matching filter */
539 if (svc_control & NAN_SC_MATCHING_FILTER_PRESENT) {
540 WL_TRACE(("> svc_control: matching filter present\n"));
541
542 tlv_data->tx_match_filter.dlen = *p_attr++;
543 WL_TRACE(("> matching filter len: 0x%02x\n",
544 tlv_data->tx_match_filter.dlen));
545
546 if (!tlv_data->tx_match_filter.dlen ||
547 tlv_data->tx_match_filter.dlen > MAX_MATCH_FILTER_LEN) {
548 tlv_data->tx_match_filter.dlen = 0;
549 WL_ERR(("tx match filter length is invalid\n"));
550 ret = -EINVAL;
551 goto fail;
552 }
553 tlv_data->tx_match_filter.data =
554 MALLOCZ(osh, tlv_data->tx_match_filter.dlen);
555 if (!tlv_data->tx_match_filter.data) {
556 WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
557 tlv_data->tx_match_filter.dlen = 0;
558 ret = -ENOMEM;
559 goto fail;
560 }
561 ret = memcpy_s(tlv_data->tx_match_filter.data, tlv_data->tx_match_filter.dlen,
562 p_attr, tlv_data->tx_match_filter.dlen);
563 if (ret != BCME_OK) {
564 WL_ERR(("Failed to copy tx match filter data\n"));
565 goto fail;
566 }
567 /* advance read pointer */
568 offset = tlv_data->tx_match_filter.dlen;
569 if (offset > len) {
570 WL_ERR(("Invalid event buffer\n"));
571 ret = BCME_BUFTOOSHORT;
572 goto fail;
573 }
574 p_attr += offset;
575 len -= offset;
576 }
577
578 /* service response filter */
579 if (svc_control & NAN_SC_SR_FILTER_PRESENT) {
580 WL_TRACE(("> svc_control: service response filter present\n"));
581
582 tlv_data->rx_match_filter.dlen = *p_attr++;
583 WL_TRACE(("> sr match filter len: 0x%02x\n",
584 tlv_data->rx_match_filter.dlen));
585
586 if (!tlv_data->rx_match_filter.dlen ||
587 tlv_data->rx_match_filter.dlen > MAX_MATCH_FILTER_LEN) {
588 tlv_data->rx_match_filter.dlen = 0;
589 WL_ERR(("%s: sr matching filter length is invalid\n",
590 __FUNCTION__));
591 ret = BCME_BADLEN;
592 goto fail;
593 }
594 tlv_data->rx_match_filter.data =
595 MALLOCZ(osh, tlv_data->rx_match_filter.dlen);
596 if (!tlv_data->rx_match_filter.data) {
597 WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
598 tlv_data->rx_match_filter.dlen = 0;
599 ret = BCME_NOMEM;
600 goto fail;
601 }
602
603 ret = memcpy_s(tlv_data->rx_match_filter.data, tlv_data->rx_match_filter.dlen,
604 p_attr, tlv_data->rx_match_filter.dlen);
605 if (ret != BCME_OK) {
606 WL_ERR(("Failed to copy rx match filter data\n"));
607 goto fail;
608 }
609
610 /* advance read pointer */
611 offset = tlv_data->rx_match_filter.dlen;
612 if (offset > len) {
613 WL_ERR(("Invalid event buffer len\n"));
614 ret = BCME_BUFTOOSHORT;
615 goto fail;
616 }
617 p_attr += offset;
618 len -= offset;
619 }
620
621 /* service specific info */
622 if (svc_control & NAN_SC_SVC_INFO_PRESENT) {
623 WL_TRACE(("> svc_control: svc specific info present\n"));
624
625 tlv_data->svc_info.dlen = *p_attr++;
626 WL_TRACE(("> svc info len: 0x%02x\n", tlv_data->svc_info.dlen));
627
628 if (!tlv_data->svc_info.dlen ||
629 tlv_data->svc_info.dlen > NAN_MAX_SERVICE_SPECIFIC_INFO_LEN) {
630 /* must be able to handle null msg which is not error */
631 tlv_data->svc_info.dlen = 0;
632 WL_ERR(("sde data length is invalid\n"));
633 ret = BCME_BADLEN;
634 goto fail;
635 }
636
637 if (tlv_data->svc_info.dlen > 0) {
638 tlv_data->svc_info.data =
639 MALLOCZ(osh, tlv_data->svc_info.dlen);
640 if (!tlv_data->svc_info.data) {
641 WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
642 tlv_data->svc_info.dlen = 0;
643 ret = BCME_NOMEM;
644 goto fail;
645 }
646 ret = memcpy_s(tlv_data->svc_info.data, tlv_data->svc_info.dlen,
647 p_attr, tlv_data->svc_info.dlen);
648 if (ret != BCME_OK) {
649 WL_ERR(("Failed to copy svc info\n"));
650 goto fail;
651 }
652
653 /* advance read pointer */
654 offset = tlv_data->svc_info.dlen;
655 if (offset > len) {
656 WL_ERR(("Invalid event buffer len\n"));
657 ret = BCME_BUFTOOSHORT;
658 goto fail;
659 }
660 p_attr += offset;
661 len -= offset;
662 } else {
663 /* must be able to handle null msg which is not error */
664 tlv_data->svc_info.dlen = 0;
665 WL_TRACE(("%s: svc info length is zero, null info data\n",
666 __FUNCTION__));
667 }
668 }
669
670 /*
671 * discovery range limited:
672 * If set to 1, the pub/sub msg is limited in range to close proximity.
673 * If set to 0, the pub/sub msg is not limited in range.
674 * Valid only when the message is either of a publish or a sub.
675 */
676 if (svc_control & NAN_SC_RANGE_LIMITED) {
677 if (((svc_control & NAN_SVC_CONTROL_TYPE_MASK) == NAN_SC_PUBLISH) ||
678 ((svc_control & NAN_SVC_CONTROL_TYPE_MASK) == NAN_SC_SUBSCRIBE)) {
679 WL_TRACE(("> svc_control: range limited present\n"));
680 } else {
681 WL_TRACE(("range limited is only valid on pub or sub\n"));
682 }
683
684 /* TODO: send up */
685
686 /* advance read pointer */
687 p_attr++;
688 }
689 return ret;
690 fail:
691 if (tlv_data->tx_match_filter.data) {
692 MFREE(osh, tlv_data->tx_match_filter.data,
693 tlv_data->tx_match_filter.dlen);
694 tlv_data->tx_match_filter.data = NULL;
695 }
696 if (tlv_data->rx_match_filter.data) {
697 MFREE(osh, tlv_data->rx_match_filter.data,
698 tlv_data->rx_match_filter.dlen);
699 tlv_data->rx_match_filter.data = NULL;
700 }
701 if (tlv_data->svc_info.data) {
702 MFREE(osh, tlv_data->svc_info.data,
703 tlv_data->svc_info.dlen);
704 tlv_data->svc_info.data = NULL;
705 }
706
707 WL_DBG(("Parse SDA event data, status = %d\n", ret));
708 return ret;
709 }
710
711 static s32
wl_cfgnan_parse_sd_attr_data(osl_t * osh,uint16 len,const uint8 * data,nan_event_data_t * tlv_data,uint16 type)712 wl_cfgnan_parse_sd_attr_data(osl_t *osh, uint16 len, const uint8 *data,
713 nan_event_data_t *tlv_data, uint16 type) {
714 const uint8 *p_attr = data;
715 uint16 offset = 0;
716 s32 ret = BCME_OK;
717 const wl_nan_event_disc_result_t *ev_disc = NULL;
718 const wl_nan_event_replied_t *ev_replied = NULL;
719 const wl_nan_ev_receive_t *ev_fup = NULL;
720
721 /*
722 * Mapping wifi_nan_svc_descriptor_attr_t, and svc controls are optional.
723 */
724 if (type == WL_NAN_XTLV_SD_DISC_RESULTS) {
725 u8 iter;
726 ev_disc = (const wl_nan_event_disc_result_t *)p_attr;
727
728 WL_DBG((">> WL_NAN_XTLV_RESULTS: Discovery result\n"));
729
730 tlv_data->pub_id = (wl_nan_instance_id_t)ev_disc->pub_id;
731 tlv_data->sub_id = (wl_nan_instance_id_t)ev_disc->sub_id;
732 tlv_data->publish_rssi = ev_disc->publish_rssi;
733 ret = memcpy_s(&tlv_data->remote_nmi, ETHER_ADDR_LEN,
734 &ev_disc->pub_mac, ETHER_ADDR_LEN);
735 if (ret != BCME_OK) {
736 WL_ERR(("Failed to copy remote nmi\n"));
737 goto fail;
738 }
739
740 WL_TRACE(("publish id: %d\n", ev_disc->pub_id));
741 WL_TRACE(("subscribe d: %d\n", ev_disc->sub_id));
742 WL_TRACE(("publish mac addr: " MACDBG "\n",
743 MAC2STRDBG(ev_disc->pub_mac.octet)));
744 WL_TRACE(("publish rssi: %d\n", (int8)ev_disc->publish_rssi));
745 WL_TRACE(("attribute no: %d\n", ev_disc->attr_num));
746 WL_TRACE(("attribute len: %d\n", (uint16)ev_disc->attr_list_len));
747
748 /* advance to the service descricptor */
749 offset = OFFSETOF(wl_nan_event_disc_result_t, attr_list[0]);
750 if (offset > len) {
751 WL_ERR(("Invalid event buffer len\n"));
752 ret = BCME_BUFTOOSHORT;
753 goto fail;
754 }
755 p_attr += offset;
756 len -= offset;
757
758 iter = ev_disc->attr_num;
759 while (iter) {
760 if ((uint8)*p_attr == NAN_ATTR_SVC_DESCRIPTOR) {
761 WL_TRACE(("> attr id: 0x%02x\n", (uint8)*p_attr));
762 ret = wl_cfgnan_parse_sda_data(osh, p_attr, len, tlv_data);
763 if (unlikely(ret)) {
764 WL_ERR(("wl_cfgnan_parse_sda_data failed,"
765 "error = %d \n", ret));
766 goto fail;
767 }
768 }
769
770 if ((uint8)*p_attr == NAN_ATTR_SVC_DESC_EXTENSION) {
771 WL_TRACE(("> attr id: 0x%02x\n", (uint8)*p_attr));
772 ret = wl_cfgnan_parse_sdea_data(osh, p_attr, len, tlv_data);
773 if (unlikely(ret)) {
774 WL_ERR(("wl_cfgnan_parse_sdea_data failed,"
775 "error = %d \n", ret));
776 goto fail;
777 }
778 }
779 offset = (sizeof(*p_attr) +
780 sizeof(ev_disc->attr_list_len) +
781 (p_attr[1] | (p_attr[2] << 8)));
782 if (offset > len) {
783 WL_ERR(("Invalid event buffer len\n"));
784 ret = BCME_BUFTOOSHORT;
785 goto fail;
786 }
787 p_attr += offset;
788 len -= offset;
789 iter--;
790 }
791 } else if (type == WL_NAN_XTLV_SD_FUP_RECEIVED) {
792 uint8 iter;
793 ev_fup = (const wl_nan_ev_receive_t *)p_attr;
794
795 WL_TRACE((">> WL_NAN_XTLV_SD_FUP_RECEIVED: Transmit follow-up\n"));
796
797 tlv_data->local_inst_id = (wl_nan_instance_id_t)ev_fup->local_id;
798 tlv_data->requestor_id = (wl_nan_instance_id_t)ev_fup->remote_id;
799 tlv_data->fup_rssi = ev_fup->fup_rssi;
800 ret = memcpy_s(&tlv_data->remote_nmi, ETHER_ADDR_LEN,
801 &ev_fup->remote_addr, ETHER_ADDR_LEN);
802 if (ret != BCME_OK) {
803 WL_ERR(("Failed to copy remote nmi\n"));
804 goto fail;
805 }
806
807 WL_TRACE(("local id: %d\n", ev_fup->local_id));
808 WL_TRACE(("remote id: %d\n", ev_fup->remote_id));
809 WL_TRACE(("peer mac addr: " MACDBG "\n",
810 MAC2STRDBG(ev_fup->remote_addr.octet)));
811 WL_TRACE(("peer rssi: %d\n", (int8)ev_fup->fup_rssi));
812 WL_TRACE(("attribute no: %d\n", ev_fup->attr_num));
813 WL_TRACE(("attribute len: %d\n", ev_fup->attr_list_len));
814
815 /* advance to the service descriptor which is attr_list[0] */
816 offset = OFFSETOF(wl_nan_ev_receive_t, attr_list[0]);
817 if (offset > len) {
818 WL_ERR(("Invalid event buffer len\n"));
819 ret = BCME_BUFTOOSHORT;
820 goto fail;
821 }
822 p_attr += offset;
823 len -= offset;
824
825 iter = ev_fup->attr_num;
826 while (iter) {
827 if ((uint8)*p_attr == NAN_ATTR_SVC_DESCRIPTOR) {
828 WL_TRACE(("> attr id: 0x%02x\n", (uint8)*p_attr));
829 ret = wl_cfgnan_parse_sda_data(osh, p_attr, len, tlv_data);
830 if (unlikely(ret)) {
831 WL_ERR(("wl_cfgnan_parse_sda_data failed,"
832 "error = %d \n", ret));
833 goto fail;
834 }
835 }
836
837 if ((uint8)*p_attr == NAN_ATTR_SVC_DESC_EXTENSION) {
838 WL_TRACE(("> attr id: 0x%02x\n", (uint8)*p_attr));
839 ret = wl_cfgnan_parse_sdea_data(osh, p_attr, len, tlv_data);
840 if (unlikely(ret)) {
841 WL_ERR(("wl_cfgnan_parse_sdea_data failed,"
842 "error = %d \n", ret));
843 goto fail;
844 }
845 }
846 offset = (sizeof(*p_attr) +
847 sizeof(ev_fup->attr_list_len) +
848 (p_attr[1] | (p_attr[2] << 8)));
849 if (offset > len) {
850 WL_ERR(("Invalid event buffer len\n"));
851 ret = BCME_BUFTOOSHORT;
852 goto fail;
853 }
854 p_attr += offset;
855 len -= offset;
856 iter--;
857 }
858 } else if (type == WL_NAN_XTLV_SD_SDF_RX) {
859 /*
860 * SDF followed by nan2_pub_act_frame_t and wifi_nan_svc_descriptor_attr_t,
861 * and svc controls are optional.
862 */
863 const nan2_pub_act_frame_t *nan_pub_af =
864 (const nan2_pub_act_frame_t *)p_attr;
865
866 WL_TRACE((">> WL_NAN_XTLV_SD_SDF_RX\n"));
867
868 /* nan2_pub_act_frame_t */
869 WL_TRACE(("pub category: 0x%02x\n", nan_pub_af->category_id));
870 WL_TRACE(("pub action: 0x%02x\n", nan_pub_af->action_field));
871 WL_TRACE(("nan oui: %2x-%2x-%2x\n",
872 nan_pub_af->oui[0], nan_pub_af->oui[1], nan_pub_af->oui[2]));
873 WL_TRACE(("oui type: 0x%02x\n", nan_pub_af->oui_type));
874 WL_TRACE(("oui subtype: 0x%02x\n", nan_pub_af->oui_sub_type));
875
876 offset = sizeof(*nan_pub_af);
877 if (offset > len) {
878 WL_ERR(("Invalid event buffer len\n"));
879 ret = BCME_BUFTOOSHORT;
880 goto fail;
881 }
882 p_attr += offset;
883 len -= offset;
884 } else if (type == WL_NAN_XTLV_SD_REPLIED) {
885 ev_replied = (const wl_nan_event_replied_t *)p_attr;
886
887 WL_TRACE((">> WL_NAN_XTLV_SD_REPLIED: Replied Event\n"));
888
889 tlv_data->pub_id = (wl_nan_instance_id_t)ev_replied->pub_id;
890 tlv_data->sub_id = (wl_nan_instance_id_t)ev_replied->sub_id;
891 tlv_data->sub_rssi = ev_replied->sub_rssi;
892 ret = memcpy_s(&tlv_data->remote_nmi, ETHER_ADDR_LEN,
893 &ev_replied->sub_mac, ETHER_ADDR_LEN);
894 if (ret != BCME_OK) {
895 WL_ERR(("Failed to copy remote nmi\n"));
896 goto fail;
897 }
898
899 WL_TRACE(("publish id: %d\n", ev_replied->pub_id));
900 WL_TRACE(("subscribe d: %d\n", ev_replied->sub_id));
901 WL_TRACE(("Subscriber mac addr: " MACDBG "\n",
902 MAC2STRDBG(ev_replied->sub_mac.octet)));
903 WL_TRACE(("subscribe rssi: %d\n", (int8)ev_replied->sub_rssi));
904 WL_TRACE(("attribute no: %d\n", ev_replied->attr_num));
905 WL_TRACE(("attribute len: %d\n", (uint16)ev_replied->attr_list_len));
906
907 /* advance to the service descriptor which is attr_list[0] */
908 offset = OFFSETOF(wl_nan_event_replied_t, attr_list[0]);
909 if (offset > len) {
910 WL_ERR(("Invalid event buffer len\n"));
911 ret = BCME_BUFTOOSHORT;
912 goto fail;
913 }
914 p_attr += offset;
915 len -= offset;
916 ret = wl_cfgnan_parse_sda_data(osh, p_attr, len, tlv_data);
917 if (unlikely(ret)) {
918 WL_ERR(("wl_cfgnan_parse_sdea_data failed,"
919 "error = %d \n", ret));
920 }
921 }
922
923 fail:
924 return ret;
925 }
926
927 /* Based on each case of tlv type id, fill into tlv data */
928 static int
wl_cfgnan_set_vars_cbfn(void * ctx,const uint8 * data,uint16 type,uint16 len)929 wl_cfgnan_set_vars_cbfn(void *ctx, const uint8 *data, uint16 type, uint16 len)
930 {
931 nan_parse_event_ctx_t *ctx_tlv_data = ((nan_parse_event_ctx_t *)(ctx));
932 nan_event_data_t *tlv_data = ((nan_event_data_t *)(ctx_tlv_data->nan_evt_data));
933 int ret = BCME_OK;
934
935 NAN_DBG_ENTER();
936 if (!data || !len) {
937 WL_ERR(("data length is invalid\n"));
938 ret = BCME_ERROR;
939 goto fail;
940 }
941
942 switch (type) {
943 /*
944 * Need to parse service descript attributes including service control,
945 * when Follow up or Discovery result come
946 */
947 case WL_NAN_XTLV_SD_FUP_RECEIVED:
948 case WL_NAN_XTLV_SD_DISC_RESULTS: {
949 ret = wl_cfgnan_parse_sd_attr_data(ctx_tlv_data->cfg->osh,
950 len, data, tlv_data, type);
951 break;
952 }
953 case WL_NAN_XTLV_SD_NDPE_TLV_LIST:
954 /* Intentional fall through NDPE TLV list and SVC INFO is sent in same container
955 * to upper layers
956 */
957 case WL_NAN_XTLV_SD_SVC_INFO: {
958 tlv_data->svc_info.data =
959 MALLOCZ(ctx_tlv_data->cfg->osh, len);
960 if (!tlv_data->svc_info.data) {
961 WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
962 tlv_data->svc_info.dlen = 0;
963 ret = BCME_NOMEM;
964 goto fail;
965 }
966 tlv_data->svc_info.dlen = len;
967 ret = memcpy_s(tlv_data->svc_info.data, tlv_data->svc_info.dlen,
968 data, tlv_data->svc_info.dlen);
969 if (ret != BCME_OK) {
970 WL_ERR(("Failed to copy svc info data\n"));
971 goto fail;
972 }
973 break;
974 }
975 case WL_NAN_XTLV_SD_NAN_AF:
976 case WL_NAN_XTLV_DAM_NA_ATTR:
977 /* No action -intentionally added to avoid prints when these events are rcvd */
978 break;
979 default:
980 WL_ERR(("Not available for tlv type = 0x%x\n", type));
981 ret = BCME_ERROR;
982 break;
983 }
984 fail:
985 NAN_DBG_EXIT();
986 return ret;
987 }
988
989 int
wl_cfg_nan_check_cmd_len(uint16 nan_iov_len,uint16 data_size,uint16 * subcmd_len)990 wl_cfg_nan_check_cmd_len(uint16 nan_iov_len, uint16 data_size,
991 uint16 *subcmd_len)
992 {
993 s32 ret = BCME_OK;
994
995 if (subcmd_len != NULL) {
996 *subcmd_len = OFFSETOF(bcm_iov_batch_subcmd_t, data) +
997 ALIGN_SIZE(data_size, 4);
998 if (*subcmd_len > nan_iov_len) {
999 WL_ERR(("%s: Buf short, requested:%d, available:%d\n",
1000 __FUNCTION__, *subcmd_len, nan_iov_len));
1001 ret = BCME_NOMEM;
1002 }
1003 } else {
1004 WL_ERR(("Invalid subcmd_len\n"));
1005 ret = BCME_ERROR;
1006 }
1007 return ret;
1008 }
1009
1010 int
wl_cfgnan_config_eventmask(struct net_device * ndev,struct bcm_cfg80211 * cfg,uint8 event_ind_flag,bool disable_events)1011 wl_cfgnan_config_eventmask(struct net_device *ndev, struct bcm_cfg80211 *cfg,
1012 uint8 event_ind_flag, bool disable_events)
1013 {
1014 bcm_iov_batch_buf_t *nan_buf = NULL;
1015 s32 ret = BCME_OK;
1016 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
1017 uint16 subcmd_len;
1018 uint32 status;
1019 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
1020 bcm_iov_batch_subcmd_t *sub_cmd_resp = NULL;
1021 uint8 event_mask[WL_NAN_EVMASK_EXTN_LEN];
1022 wl_nan_evmask_extn_t *evmask;
1023 uint16 evmask_cmd_len;
1024 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
1025
1026 NAN_DBG_ENTER();
1027
1028 /* same src and dest len here */
1029 bzero(event_mask, sizeof(event_mask));
1030 evmask_cmd_len = OFFSETOF(wl_nan_evmask_extn_t, evmask) +
1031 sizeof(event_mask);
1032 ret = wl_add_remove_eventmsg(ndev, WLC_E_NAN, true);
1033 if (unlikely(ret)) {
1034 WL_ERR((" nan event enable failed, error = %d \n", ret));
1035 goto fail;
1036 }
1037
1038 nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
1039 if (!nan_buf) {
1040 WL_ERR(("%s: memory allocation failed\n", __func__));
1041 ret = BCME_NOMEM;
1042 goto fail;
1043 }
1044
1045 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
1046 nan_buf->count = 0;
1047 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
1048 sub_cmd = (bcm_iov_batch_subcmd_t*)(uint8 *)(&nan_buf->cmds[0]);
1049
1050 ret = wl_cfg_nan_check_cmd_len(nan_buf_size,
1051 evmask_cmd_len, &subcmd_len);
1052 if (unlikely(ret)) {
1053 WL_ERR(("nan_sub_cmd check failed\n"));
1054 goto fail;
1055 }
1056
1057 sub_cmd->id = htod16(WL_NAN_CMD_CFG_EVENT_MASK);
1058 sub_cmd->len = sizeof(sub_cmd->u.options) + evmask_cmd_len;
1059 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
1060 evmask = (wl_nan_evmask_extn_t *)sub_cmd->data;
1061 evmask->ver = WL_NAN_EVMASK_EXTN_VER;
1062 evmask->len = WL_NAN_EVMASK_EXTN_LEN;
1063 nan_buf_size -= subcmd_len;
1064 nan_buf->count = 1;
1065
1066 if (disable_events) {
1067 WL_DBG(("Disabling all nan events..except stop event\n"));
1068 setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_STOP));
1069 } else {
1070 /*
1071 * Android framework event mask configuration.
1072 */
1073 nan_buf->is_set = false;
1074 memset(resp_buf, 0, sizeof(resp_buf));
1075 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
1076 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
1077 if (unlikely(ret) || unlikely(status)) {
1078 WL_ERR(("get nan event mask failed ret %d status %d \n",
1079 ret, status));
1080 goto fail;
1081 }
1082 sub_cmd_resp = &((bcm_iov_batch_buf_t *)(resp_buf))->cmds[0];
1083 evmask = (wl_nan_evmask_extn_t *)sub_cmd_resp->data;
1084
1085 /* check the response buff */
1086 /* same src and dest len here */
1087 (void)memcpy_s(&event_mask, WL_NAN_EVMASK_EXTN_LEN,
1088 (uint8*)&evmask->evmask, WL_NAN_EVMASK_EXTN_LEN);
1089
1090 if (event_ind_flag) {
1091 /* FIXME:BIT0 - Disable disc mac addr change event indication */
1092 if (CHECK_BIT(event_ind_flag, WL_NAN_EVENT_DIC_MAC_ADDR_BIT)) {
1093 WL_DBG(("Need to add disc mac addr change event\n"));
1094 }
1095 /* BIT2 - Disable nan cluster join indication (OTA). */
1096 if (CHECK_BIT(event_ind_flag, WL_NAN_EVENT_JOIN_EVENT)) {
1097 clrbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_MERGE));
1098 }
1099 }
1100
1101 setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_DISCOVERY_RESULT));
1102 setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_RECEIVE));
1103 setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_TERMINATED));
1104 setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_STOP));
1105 setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_TXS));
1106 setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_PEER_DATAPATH_IND));
1107 setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_DATAPATH_ESTB));
1108 setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_DATAPATH_END));
1109 setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_RNG_REQ_IND));
1110 setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_RNG_TERM_IND));
1111 setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_DISC_CACHE_TIMEOUT));
1112 /* Disable below events by default */
1113 clrbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_PEER_SCHED_UPD_NOTIF));
1114 clrbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_RNG_RPT_IND));
1115 clrbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_DW_END));
1116 }
1117
1118 nan_buf->is_set = true;
1119 evmask = (wl_nan_evmask_extn_t *)sub_cmd->data;
1120 /* same src and dest len here */
1121 (void)memcpy_s((uint8*)&evmask->evmask, sizeof(event_mask),
1122 &event_mask, sizeof(event_mask));
1123
1124 nan_buf_size = (NAN_IOCTL_BUF_SIZE - nan_buf_size);
1125 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
1126 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
1127 if (unlikely(ret) || unlikely(status)) {
1128 WL_ERR(("set nan event mask failed ret %d status %d \n", ret, status));
1129 goto fail;
1130 }
1131 WL_DBG(("set nan event mask successfull\n"));
1132
1133 fail:
1134 if (nan_buf) {
1135 MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
1136 }
1137 NAN_DBG_EXIT();
1138 return ret;
1139 }
1140
1141 static int
wl_cfgnan_set_nan_avail(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_avail_cmd_data * cmd_data,uint8 avail_type)1142 wl_cfgnan_set_nan_avail(struct net_device *ndev,
1143 struct bcm_cfg80211 *cfg, nan_avail_cmd_data *cmd_data, uint8 avail_type)
1144 {
1145 bcm_iov_batch_buf_t *nan_buf = NULL;
1146 s32 ret = BCME_OK;
1147 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
1148 uint16 subcmd_len;
1149 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
1150 wl_nan_iov_t *nan_iov_data = NULL;
1151 wl_avail_t *avail = NULL;
1152 wl_avail_entry_t *entry; /* used for filling entry structure */
1153 uint8 *p; /* tracking pointer */
1154 uint8 i;
1155 u32 status;
1156 int c;
1157 char ndc_id[ETHER_ADDR_LEN] = { 0x50, 0x6f, 0x9a, 0x01, 0x0, 0x0 };
1158 dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
1159 char *a = WL_AVAIL_BIT_MAP;
1160 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
1161
1162 NAN_DBG_ENTER();
1163
1164 /* Do not disturb avail if dam is supported */
1165 if (FW_SUPPORTED(dhdp, autodam)) {
1166 WL_DBG(("DAM is supported, avail modification not allowed\n"));
1167 return ret;
1168 }
1169
1170 if (avail_type < WL_AVAIL_LOCAL || avail_type > WL_AVAIL_TYPE_MAX) {
1171 WL_ERR(("Invalid availability type\n"));
1172 ret = BCME_USAGE_ERROR;
1173 goto fail;
1174 }
1175
1176 nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
1177 if (!nan_buf) {
1178 WL_ERR(("%s: memory allocation failed\n", __func__));
1179 ret = BCME_NOMEM;
1180 goto fail;
1181 }
1182
1183 nan_iov_data = MALLOCZ(cfg->osh, sizeof(*nan_iov_data));
1184 if (!nan_iov_data) {
1185 WL_ERR(("%s: memory allocation failed\n", __func__));
1186 ret = BCME_NOMEM;
1187 goto fail;
1188 }
1189
1190 nan_iov_data->nan_iov_len = NAN_IOCTL_BUF_SIZE;
1191 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
1192 nan_buf->count = 0;
1193 nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
1194 nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
1195
1196 sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
1197 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
1198 sizeof(*avail), &subcmd_len);
1199 if (unlikely(ret)) {
1200 WL_ERR(("nan_sub_cmd check failed\n"));
1201 goto fail;
1202 }
1203 avail = (wl_avail_t *)sub_cmd->data;
1204
1205 /* populate wl_avail_type */
1206 avail->flags = avail_type;
1207 if (avail_type == WL_AVAIL_RANGING) {
1208 ret = memcpy_s(&avail->addr, ETHER_ADDR_LEN,
1209 &cmd_data->peer_nmi, ETHER_ADDR_LEN);
1210 if (ret != BCME_OK) {
1211 WL_ERR(("Failed to copy peer nmi\n"));
1212 goto fail;
1213 }
1214 }
1215
1216 sub_cmd->len = sizeof(sub_cmd->u.options) + subcmd_len;
1217 sub_cmd->id = htod16(WL_NAN_CMD_CFG_AVAIL);
1218 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
1219
1220 nan_buf->is_set = false;
1221 nan_buf->count++;
1222 nan_iov_data->nan_iov_len -= subcmd_len;
1223 nan_buf_size = (NAN_IOCTL_BUF_SIZE - nan_iov_data->nan_iov_len);
1224
1225 WL_TRACE(("Read wl nan avail status\n"));
1226
1227 bzero(resp_buf, sizeof(resp_buf));
1228 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
1229 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
1230 if (unlikely(ret)) {
1231 WL_ERR(("\n Get nan avail failed ret %d, status %d \n", ret, status));
1232 goto fail;
1233 }
1234
1235 if (status == BCME_NOTFOUND) {
1236 nan_buf->count = 0;
1237 nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
1238 nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
1239
1240 sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
1241
1242 avail = (wl_avail_t *)sub_cmd->data;
1243 p = avail->entry;
1244
1245 /* populate wl_avail fields */
1246 avail->length = OFFSETOF(wl_avail_t, entry);
1247 avail->flags = avail_type;
1248 avail->num_entries = 0;
1249 avail->id = 0;
1250 entry = (wl_avail_entry_t*)p;
1251 entry->flags = WL_AVAIL_ENTRY_COM;
1252
1253 /* set default values for optional parameters */
1254 entry->start_offset = 0;
1255 entry->u.band = 0;
1256
1257 if (cmd_data->avail_period) {
1258 entry->period = cmd_data->avail_period;
1259 } else {
1260 entry->period = WL_AVAIL_PERIOD_1024;
1261 }
1262
1263 if (cmd_data->duration != NAN_BAND_INVALID) {
1264 entry->flags |= (3 << WL_AVAIL_ENTRY_USAGE_SHIFT) |
1265 (cmd_data->duration << WL_AVAIL_ENTRY_BIT_DUR_SHIFT);
1266 } else {
1267 entry->flags |= (3 << WL_AVAIL_ENTRY_USAGE_SHIFT) |
1268 (WL_AVAIL_BIT_DUR_16 << WL_AVAIL_ENTRY_BIT_DUR_SHIFT);
1269 }
1270 entry->bitmap_len = 0;
1271
1272 if (avail_type == WL_AVAIL_LOCAL) {
1273 entry->flags |= 1 << WL_AVAIL_ENTRY_CHAN_SHIFT;
1274 /* Check for 5g support, based on that choose 5g channel */
1275 if (cfg->nancfg->support_5g) {
1276 entry->u.channel_info =
1277 htod32(wf_channel2chspec(WL_AVAIL_CHANNEL_5G,
1278 WL_AVAIL_BANDWIDTH_5G));
1279 } else {
1280 entry->u.channel_info =
1281 htod32(wf_channel2chspec(WL_AVAIL_CHANNEL_2G,
1282 WL_AVAIL_BANDWIDTH_2G));
1283 }
1284 entry->flags = htod16(entry->flags);
1285 }
1286
1287 if (cfg->nancfg->support_5g) {
1288 a = WL_5G_AVAIL_BIT_MAP;
1289 }
1290
1291 /* point to bitmap value for processing */
1292 if (cmd_data->bmap) {
1293 for (c = (WL_NAN_EVENT_CLEAR_BIT-1); c >= 0; c--) {
1294 i = cmd_data->bmap >> c;
1295 if (i & 1) {
1296 setbit(entry->bitmap, (WL_NAN_EVENT_CLEAR_BIT-c-1));
1297 }
1298 }
1299 } else {
1300 for (i = 0; i < strlen(WL_AVAIL_BIT_MAP); i++) {
1301 if (*a == '1') {
1302 setbit(entry->bitmap, i);
1303 }
1304 a++;
1305 }
1306 }
1307
1308 /* account for partially filled most significant byte */
1309 entry->bitmap_len = ((WL_NAN_EVENT_CLEAR_BIT) + NBBY - 1) / NBBY;
1310 if (avail_type == WL_AVAIL_NDC) {
1311 ret = memcpy_s(&avail->addr, ETHER_ADDR_LEN,
1312 ndc_id, ETHER_ADDR_LEN);
1313 if (ret != BCME_OK) {
1314 WL_ERR(("Failed to copy ndc id\n"));
1315 goto fail;
1316 }
1317 } else if (avail_type == WL_AVAIL_RANGING) {
1318 ret = memcpy_s(&avail->addr, ETHER_ADDR_LEN,
1319 &cmd_data->peer_nmi, ETHER_ADDR_LEN);
1320 if (ret != BCME_OK) {
1321 WL_ERR(("Failed to copy peer nmi\n"));
1322 goto fail;
1323 }
1324 }
1325 /* account for partially filled most significant byte */
1326
1327 /* update wl_avail and populate wl_avail_entry */
1328 entry->length = OFFSETOF(wl_avail_entry_t, bitmap) + entry->bitmap_len;
1329 avail->num_entries++;
1330 avail->length += entry->length;
1331 /* advance pointer for next entry */
1332 p += entry->length;
1333
1334 /* convert to dongle endianness */
1335 entry->length = htod16(entry->length);
1336 entry->start_offset = htod16(entry->start_offset);
1337 entry->u.channel_info = htod32(entry->u.channel_info);
1338 entry->flags = htod16(entry->flags);
1339 /* update avail_len only if
1340 * there are avail entries
1341 */
1342 if (avail->num_entries) {
1343 nan_iov_data->nan_iov_len -= avail->length;
1344 avail->length = htod16(avail->length);
1345 avail->flags = htod16(avail->flags);
1346 }
1347 avail->length = htod16(avail->length);
1348
1349 sub_cmd->id = htod16(WL_NAN_CMD_CFG_AVAIL);
1350 sub_cmd->len = sizeof(sub_cmd->u.options) + avail->length;
1351 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
1352
1353 nan_buf->is_set = true;
1354 nan_buf->count++;
1355
1356 /* Reduce the iov_len size by subcmd_len */
1357 nan_iov_data->nan_iov_len -= subcmd_len;
1358 nan_buf_size = (NAN_IOCTL_BUF_SIZE - nan_iov_data->nan_iov_len);
1359
1360 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
1361 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
1362 if (unlikely(ret) || unlikely(status)) {
1363 WL_ERR(("\n set nan avail failed ret %d status %d \n", ret, status));
1364 ret = status;
1365 goto fail;
1366 }
1367 } else if (status == BCME_OK) {
1368 WL_DBG(("Avail type [%d] found to be configured\n", avail_type));
1369 } else {
1370 WL_ERR(("set nan avail failed ret %d status %d \n", ret, status));
1371 }
1372
1373 fail:
1374 if (nan_buf) {
1375 MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
1376 }
1377 if (nan_iov_data) {
1378 MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
1379 }
1380
1381 NAN_DBG_EXIT();
1382 return ret;
1383 }
1384
1385 /* API to configure nan ctrl and nan ctrl2 commands */
1386 static int
wl_cfgnan_config_control_flag(struct net_device * ndev,struct bcm_cfg80211 * cfg,uint32 flag1,uint32 flag2,uint16 cmd_id,uint32 * status,bool set)1387 wl_cfgnan_config_control_flag(struct net_device *ndev, struct bcm_cfg80211 *cfg,
1388 uint32 flag1, uint32 flag2, uint16 cmd_id, uint32 *status, bool set)
1389 {
1390 bcm_iov_batch_buf_t *nan_buf = NULL;
1391 s32 ret = BCME_OK;
1392 uint16 nan_iov_start, nan_iov_end;
1393 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
1394 uint16 subcmd_len;
1395 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
1396 bcm_iov_batch_subcmd_t *sub_cmd_resp = NULL;
1397 wl_nan_iov_t *nan_iov_data = NULL;
1398 uint32 *cfg_ctrl;
1399 uint16 cfg_ctrl_size;
1400 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
1401
1402 NAN_DBG_ENTER();
1403 nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
1404 if (!nan_buf) {
1405 WL_ERR(("%s: memory allocation failed\n", __func__));
1406 ret = BCME_NOMEM;
1407 goto fail;
1408 }
1409
1410 nan_iov_data = MALLOCZ(cfg->osh, sizeof(*nan_iov_data));
1411 if (!nan_iov_data) {
1412 WL_ERR(("%s: memory allocation failed\n", __func__));
1413 ret = BCME_NOMEM;
1414 goto fail;
1415 }
1416
1417 if (cmd_id == WL_NAN_CMD_CFG_NAN_CONFIG) {
1418 cfg_ctrl_size = sizeof(wl_nan_cfg_ctrl_t);
1419 } else if (cmd_id == WL_NAN_CMD_CFG_NAN_CONFIG2) {
1420 cfg_ctrl_size = sizeof(wl_nan_cfg_ctrl2_t);
1421 } else {
1422 ret = BCME_BADARG;
1423 goto fail;
1424 }
1425
1426 nan_iov_data->nan_iov_len = nan_iov_start = NAN_IOCTL_BUF_SIZE;
1427 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
1428 nan_buf->count = 0;
1429 nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
1430 nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
1431 sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
1432
1433 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
1434 cfg_ctrl_size, &subcmd_len);
1435 if (unlikely(ret)) {
1436 WL_ERR(("nan_sub_cmd check failed\n"));
1437 goto fail;
1438 }
1439
1440 sub_cmd->id = htod16(cmd_id);
1441 sub_cmd->len = sizeof(sub_cmd->u.options) + cfg_ctrl_size;
1442 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
1443
1444 nan_buf->is_set = false;
1445 nan_buf->count++;
1446
1447 /* Reduce the iov_len size by subcmd_len */
1448 nan_iov_data->nan_iov_len -= subcmd_len;
1449 nan_iov_end = nan_iov_data->nan_iov_len;
1450 nan_buf_size = (nan_iov_start - nan_iov_end);
1451
1452 bzero(resp_buf, sizeof(resp_buf));
1453 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, status,
1454 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
1455 if (unlikely(ret) || unlikely(*status)) {
1456 WL_ERR(("get nan cfg ctrl failed ret %d status %d \n", ret, *status));
1457 goto fail;
1458 }
1459 sub_cmd_resp = &((bcm_iov_batch_buf_t *)(resp_buf))->cmds[0];
1460
1461 /* check the response buff */
1462 if (cmd_id == WL_NAN_CMD_CFG_NAN_CONFIG) {
1463 wl_nan_cfg_ctrl_t *cfg_ctrl1;
1464 cfg_ctrl1 = ((uint32 *)&sub_cmd_resp->data[0]);
1465 if (set) {
1466 *cfg_ctrl1 |= flag1;
1467 } else {
1468 *cfg_ctrl1 &= ~flag1;
1469 }
1470 cfg_ctrl = cfg_ctrl1;
1471 WL_INFORM_MEM(("%s: Modifying nan ctrl flag %x val %d\n",
1472 __FUNCTION__, flag1, set));
1473 } else {
1474 wl_nan_cfg_ctrl2_t *cfg_ctrl2;
1475 cfg_ctrl2 = ((wl_nan_cfg_ctrl2_t *)&sub_cmd_resp->data[0]);
1476 if (set) {
1477 cfg_ctrl2->flags1 |= flag1;
1478 cfg_ctrl2->flags2 |= flag2;
1479 } else {
1480 cfg_ctrl2->flags1 &= ~flag1;
1481 cfg_ctrl2->flags2 &= ~flag2;
1482 }
1483 cfg_ctrl = (uint32 *)cfg_ctrl2;
1484 WL_INFORM_MEM(("%s: Modifying nan ctrl2 flag1 %x flag2 %x val %d\n",
1485 __FUNCTION__, flag1, flag2, set));
1486 }
1487 ret = memcpy_s(sub_cmd->data, cfg_ctrl_size, cfg_ctrl, cfg_ctrl_size);
1488 if (ret != BCME_OK) {
1489 WL_ERR(("Failed to copy cfg ctrl\n"));
1490 goto fail;
1491 }
1492
1493 nan_buf->is_set = true;
1494 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, status,
1495 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
1496 if (unlikely(ret) || unlikely(*status)) {
1497 WL_ERR(("set nan cfg ctrl failed ret %d status %d \n", ret, *status));
1498 goto fail;
1499 }
1500 WL_DBG(("set nan cfg ctrl successfull\n"));
1501 fail:
1502 if (nan_buf) {
1503 MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
1504 }
1505 if (nan_iov_data) {
1506 MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
1507 }
1508
1509 NAN_DBG_EXIT();
1510 return ret;
1511 }
1512
1513 static int
wl_cfgnan_get_iovars_status(void * ctx,const uint8 * data,uint16 type,uint16 len)1514 wl_cfgnan_get_iovars_status(void *ctx, const uint8 *data, uint16 type, uint16 len)
1515 {
1516 bcm_iov_batch_buf_t *b_resp = (bcm_iov_batch_buf_t *)ctx;
1517 uint32 status;
1518 /* if all tlvs are parsed, we should not be here */
1519 if (b_resp->count == 0) {
1520 return BCME_BADLEN;
1521 }
1522
1523 /* cbfn params may be used in f/w */
1524 if (len < sizeof(status)) {
1525 return BCME_BUFTOOSHORT;
1526 }
1527
1528 /* first 4 bytes consists status */
1529 if (memcpy_s(&status, sizeof(status),
1530 data, sizeof(uint32)) != BCME_OK) {
1531 WL_ERR(("Failed to copy status\n"));
1532 goto exit;
1533 }
1534
1535 status = dtoh32(status);
1536
1537 /* If status is non zero */
1538 if (status != BCME_OK) {
1539 printf("cmd type %d failed, status: %04x\n", type, status);
1540 goto exit;
1541 }
1542
1543 if (b_resp->count > 0) {
1544 b_resp->count--;
1545 }
1546
1547 if (!b_resp->count) {
1548 status = BCME_IOV_LAST_CMD;
1549 }
1550 exit:
1551 return status;
1552 }
1553
1554 static int
wl_cfgnan_execute_ioctl(struct net_device * ndev,struct bcm_cfg80211 * cfg,bcm_iov_batch_buf_t * nan_buf,uint16 nan_buf_size,uint32 * status,uint8 * resp_buf,uint16 resp_buf_size)1555 wl_cfgnan_execute_ioctl(struct net_device *ndev, struct bcm_cfg80211 *cfg,
1556 bcm_iov_batch_buf_t *nan_buf, uint16 nan_buf_size, uint32 *status,
1557 uint8 *resp_buf, uint16 resp_buf_size)
1558 {
1559 int ret = BCME_OK;
1560 uint16 tlvs_len;
1561 int res = BCME_OK;
1562 bcm_iov_batch_buf_t *p_resp = NULL;
1563 char *iov = "nan";
1564 int max_resp_len = WLC_IOCTL_MAXLEN;
1565
1566 WL_DBG(("Enter:\n"));
1567 if (nan_buf->is_set) {
1568 ret = wldev_iovar_setbuf(ndev, "nan", nan_buf, nan_buf_size,
1569 resp_buf, resp_buf_size, NULL);
1570 p_resp = (bcm_iov_batch_buf_t *)(resp_buf + strlen(iov) + 1);
1571 } else {
1572 ret = wldev_iovar_getbuf(ndev, "nan", nan_buf, nan_buf_size,
1573 resp_buf, resp_buf_size, NULL);
1574 p_resp = (bcm_iov_batch_buf_t *)(resp_buf);
1575 }
1576 if (unlikely(ret)) {
1577 WL_ERR((" nan execute ioctl failed, error = %d \n", ret));
1578 goto fail;
1579 }
1580
1581 p_resp->is_set = nan_buf->is_set;
1582 tlvs_len = max_resp_len - OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
1583
1584 /* Extract the tlvs and print their resp in cb fn */
1585 res = bcm_unpack_xtlv_buf((void *)p_resp, (const uint8 *)&p_resp->cmds[0],
1586 tlvs_len, BCM_IOV_CMD_OPT_ALIGN32, wl_cfgnan_get_iovars_status);
1587
1588 if (res == BCME_IOV_LAST_CMD) {
1589 res = BCME_OK;
1590 }
1591 fail:
1592 *status = res;
1593 WL_DBG((" nan ioctl ret %d status %d \n", ret, *status));
1594 return ret;
1595
1596 }
1597
1598 static int
wl_cfgnan_if_addr_handler(void * p_buf,uint16 * nan_buf_size,struct ether_addr * if_addr)1599 wl_cfgnan_if_addr_handler(void *p_buf, uint16 *nan_buf_size,
1600 struct ether_addr *if_addr)
1601 {
1602 /* nan enable */
1603 s32 ret = BCME_OK;
1604 uint16 subcmd_len;
1605
1606 NAN_DBG_ENTER();
1607
1608 if (p_buf != NULL) {
1609 bcm_iov_batch_subcmd_t *sub_cmd = (bcm_iov_batch_subcmd_t*)(p_buf);
1610
1611 ret = wl_cfg_nan_check_cmd_len(*nan_buf_size,
1612 sizeof(*if_addr), &subcmd_len);
1613 if (unlikely(ret)) {
1614 WL_ERR(("nan_sub_cmd check failed\n"));
1615 goto fail;
1616 }
1617
1618 /* Fill the sub_command block */
1619 sub_cmd->id = htod16(WL_NAN_CMD_CFG_IF_ADDR);
1620 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*if_addr);
1621 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
1622 ret = memcpy_s(sub_cmd->data, sizeof(*if_addr),
1623 (uint8 *)if_addr, sizeof(*if_addr));
1624 if (ret != BCME_OK) {
1625 WL_ERR(("Failed to copy if addr\n"));
1626 goto fail;
1627 }
1628
1629 *nan_buf_size -= subcmd_len;
1630 } else {
1631 WL_ERR(("nan_iov_buf is NULL\n"));
1632 ret = BCME_ERROR;
1633 goto fail;
1634 }
1635
1636 fail:
1637 NAN_DBG_EXIT();
1638 return ret;
1639 }
1640
1641 static int
wl_cfgnan_get_ver(struct net_device * ndev,struct bcm_cfg80211 * cfg)1642 wl_cfgnan_get_ver(struct net_device *ndev, struct bcm_cfg80211 *cfg)
1643 {
1644 bcm_iov_batch_buf_t *nan_buf = NULL;
1645 s32 ret = BCME_OK;
1646 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
1647 wl_nan_ver_t *nan_ver = NULL;
1648 uint16 subcmd_len;
1649 uint32 status;
1650 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
1651 bcm_iov_batch_subcmd_t *sub_cmd_resp = NULL;
1652 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
1653
1654 NAN_DBG_ENTER();
1655 nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
1656 if (!nan_buf) {
1657 WL_ERR(("%s: memory allocation failed\n", __func__));
1658 ret = BCME_NOMEM;
1659 goto fail;
1660 }
1661
1662 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
1663 nan_buf->count = 0;
1664 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
1665 sub_cmd = (bcm_iov_batch_subcmd_t*)(uint8 *)(&nan_buf->cmds[0]);
1666
1667 ret = wl_cfg_nan_check_cmd_len(nan_buf_size,
1668 sizeof(*nan_ver), &subcmd_len);
1669 if (unlikely(ret)) {
1670 WL_ERR(("nan_sub_cmd check failed\n"));
1671 goto fail;
1672 }
1673
1674 nan_ver = (wl_nan_ver_t *)sub_cmd->data;
1675 sub_cmd->id = htod16(WL_NAN_CMD_GLB_NAN_VER);
1676 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*nan_ver);
1677 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
1678 nan_buf_size -= subcmd_len;
1679 nan_buf->count = 1;
1680
1681 nan_buf->is_set = false;
1682 bzero(resp_buf, sizeof(resp_buf));
1683 nan_buf_size = NAN_IOCTL_BUF_SIZE - nan_buf_size;
1684
1685 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
1686 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
1687 if (unlikely(ret) || unlikely(status)) {
1688 WL_ERR(("get nan ver failed ret %d status %d \n",
1689 ret, status));
1690 goto fail;
1691 }
1692
1693 sub_cmd_resp = &((bcm_iov_batch_buf_t *)(resp_buf))->cmds[0];
1694 nan_ver = ((wl_nan_ver_t *)&sub_cmd_resp->data[0]);
1695 if (!nan_ver) {
1696 ret = BCME_NOTFOUND;
1697 WL_ERR(("nan_ver not found: err = %d\n", ret));
1698 goto fail;
1699 }
1700 cfg->nancfg->version = *nan_ver;
1701 WL_INFORM_MEM(("Nan Version is %d\n", cfg->nancfg->version));
1702
1703 fail:
1704 if (nan_buf) {
1705 MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
1706 }
1707 NAN_DBG_EXIT();
1708 return ret;
1709
1710 }
1711
1712 static int
wl_cfgnan_set_if_addr(struct bcm_cfg80211 * cfg)1713 wl_cfgnan_set_if_addr(struct bcm_cfg80211 *cfg)
1714 {
1715 s32 ret = BCME_OK;
1716 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
1717 uint32 status;
1718 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
1719 struct ether_addr if_addr;
1720 uint8 buf[NAN_IOCTL_BUF_SIZE];
1721 bcm_iov_batch_buf_t *nan_buf = (bcm_iov_batch_buf_t*)buf;
1722 bool rand_mac = cfg->nancfg->mac_rand;
1723
1724 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
1725 nan_buf->count = 0;
1726 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
1727 if (rand_mac) {
1728 RANDOM_BYTES(if_addr.octet, 6);
1729 /* restore mcast and local admin bits to 0 and 1 */
1730 ETHER_SET_UNICAST(if_addr.octet);
1731 ETHER_SET_LOCALADDR(if_addr.octet);
1732 } else {
1733 /* Use primary MAC with the locally administered bit for the
1734 * NAN NMI I/F
1735 */
1736 if (wl_get_vif_macaddr(cfg, WL_IF_TYPE_NAN_NMI,
1737 if_addr.octet) != BCME_OK) {
1738 ret = -EINVAL;
1739 WL_ERR(("Failed to get mac addr for NMI\n"));
1740 goto fail;
1741 }
1742 }
1743 WL_INFORM_MEM(("%s: NMI " MACDBG "\n",
1744 __FUNCTION__, MAC2STRDBG(if_addr.octet)));
1745 ret = wl_cfgnan_if_addr_handler(&nan_buf->cmds[0],
1746 &nan_buf_size, &if_addr);
1747 if (unlikely(ret)) {
1748 WL_ERR(("Nan if addr handler sub_cmd set failed\n"));
1749 goto fail;
1750 }
1751 nan_buf->count++;
1752 nan_buf->is_set = true;
1753 nan_buf_size = NAN_IOCTL_BUF_SIZE - nan_buf_size;
1754 bzero(resp_buf, sizeof(resp_buf));
1755 ret = wl_cfgnan_execute_ioctl(bcmcfg_to_prmry_ndev(cfg), cfg,
1756 nan_buf, nan_buf_size, &status,
1757 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
1758 if (unlikely(ret) || unlikely(status)) {
1759 WL_ERR(("nan if addr handler failed ret %d status %d\n",
1760 ret, status));
1761 goto fail;
1762 }
1763 ret = memcpy_s(cfg->nancfg->nan_nmi_mac, ETH_ALEN,
1764 if_addr.octet, ETH_ALEN);
1765 if (ret != BCME_OK) {
1766 WL_ERR(("Failed to copy nmi addr\n"));
1767 goto fail;
1768 }
1769 return ret;
1770 fail:
1771 if (!rand_mac) {
1772 wl_release_vif_macaddr(cfg, if_addr.octet, WL_IF_TYPE_NAN_NMI);
1773 }
1774
1775 return ret;
1776 }
1777
1778 static int
wl_cfgnan_init_handler(void * p_buf,uint16 * nan_buf_size,bool val)1779 wl_cfgnan_init_handler(void *p_buf, uint16 *nan_buf_size, bool val)
1780 {
1781 /* nan enable */
1782 s32 ret = BCME_OK;
1783 uint16 subcmd_len;
1784
1785 NAN_DBG_ENTER();
1786
1787 if (p_buf != NULL) {
1788 bcm_iov_batch_subcmd_t *sub_cmd = (bcm_iov_batch_subcmd_t*)(p_buf);
1789
1790 ret = wl_cfg_nan_check_cmd_len(*nan_buf_size,
1791 sizeof(val), &subcmd_len);
1792 if (unlikely(ret)) {
1793 WL_ERR(("nan_sub_cmd check failed\n"));
1794 goto fail;
1795 }
1796
1797 /* Fill the sub_command block */
1798 sub_cmd->id = htod16(WL_NAN_CMD_CFG_NAN_INIT);
1799 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(uint8);
1800 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
1801 ret = memcpy_s(sub_cmd->data, sizeof(uint8),
1802 (uint8*)&val, sizeof(uint8));
1803 if (ret != BCME_OK) {
1804 WL_ERR(("Failed to copy init value\n"));
1805 goto fail;
1806 }
1807
1808 *nan_buf_size -= subcmd_len;
1809 } else {
1810 WL_ERR(("nan_iov_buf is NULL\n"));
1811 ret = BCME_ERROR;
1812 goto fail;
1813 }
1814
1815 fail:
1816 NAN_DBG_EXIT();
1817 return ret;
1818 }
1819
1820 static int
wl_cfgnan_enable_handler(wl_nan_iov_t * nan_iov_data,bool val)1821 wl_cfgnan_enable_handler(wl_nan_iov_t *nan_iov_data, bool val)
1822 {
1823 /* nan enable */
1824 s32 ret = BCME_OK;
1825 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
1826 uint16 subcmd_len;
1827
1828 NAN_DBG_ENTER();
1829
1830 sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
1831
1832 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
1833 sizeof(val), &subcmd_len);
1834 if (unlikely(ret)) {
1835 WL_ERR(("nan_sub_cmd check failed\n"));
1836 return ret;
1837 }
1838
1839 /* Fill the sub_command block */
1840 sub_cmd->id = htod16(WL_NAN_CMD_CFG_NAN_ENAB);
1841 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(uint8);
1842 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
1843 ret = memcpy_s(sub_cmd->data, sizeof(uint8),
1844 (uint8*)&val, sizeof(uint8));
1845 if (ret != BCME_OK) {
1846 WL_ERR(("Failed to copy enab value\n"));
1847 return ret;
1848 }
1849
1850 nan_iov_data->nan_iov_len -= subcmd_len;
1851 nan_iov_data->nan_iov_buf += subcmd_len;
1852 NAN_DBG_EXIT();
1853 return ret;
1854 }
1855
1856 static int
wl_cfgnan_warmup_time_handler(nan_config_cmd_data_t * cmd_data,wl_nan_iov_t * nan_iov_data)1857 wl_cfgnan_warmup_time_handler(nan_config_cmd_data_t *cmd_data,
1858 wl_nan_iov_t *nan_iov_data)
1859 {
1860 /* wl nan warm_up_time */
1861 s32 ret = BCME_OK;
1862 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
1863 wl_nan_warmup_time_ticks_t *wup_ticks = NULL;
1864 uint16 subcmd_len;
1865 NAN_DBG_ENTER();
1866
1867 sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
1868 wup_ticks = (wl_nan_warmup_time_ticks_t *)sub_cmd->data;
1869
1870 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
1871 sizeof(*wup_ticks), &subcmd_len);
1872 if (unlikely(ret)) {
1873 WL_ERR(("nan_sub_cmd check failed\n"));
1874 return ret;
1875 }
1876 /* Fill the sub_command block */
1877 sub_cmd->id = htod16(WL_NAN_CMD_CFG_WARMUP_TIME);
1878 sub_cmd->len = sizeof(sub_cmd->u.options) +
1879 sizeof(*wup_ticks);
1880 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
1881 *wup_ticks = cmd_data->warmup_time;
1882
1883 nan_iov_data->nan_iov_len -= subcmd_len;
1884 nan_iov_data->nan_iov_buf += subcmd_len;
1885
1886 NAN_DBG_EXIT();
1887 return ret;
1888 }
1889
1890 static int
wl_cfgnan_set_election_metric(nan_config_cmd_data_t * cmd_data,wl_nan_iov_t * nan_iov_data,uint32 nan_attr_mask)1891 wl_cfgnan_set_election_metric(nan_config_cmd_data_t *cmd_data,
1892 wl_nan_iov_t *nan_iov_data, uint32 nan_attr_mask)
1893 {
1894 s32 ret = BCME_OK;
1895 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
1896 wl_nan_election_metric_config_t *metrics = NULL;
1897 uint16 subcmd_len;
1898 NAN_DBG_ENTER();
1899
1900 sub_cmd =
1901 (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
1902 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
1903 sizeof(*metrics), &subcmd_len);
1904 if (unlikely(ret)) {
1905 WL_ERR(("nan_sub_cmd check failed\n"));
1906 goto fail;
1907 }
1908
1909 metrics = (wl_nan_election_metric_config_t *)sub_cmd->data;
1910
1911 if (nan_attr_mask & NAN_ATTR_RAND_FACTOR_CONFIG) {
1912 metrics->random_factor = (uint8)cmd_data->metrics.random_factor;
1913 }
1914
1915 if ((!cmd_data->metrics.master_pref) ||
1916 (cmd_data->metrics.master_pref > NAN_MAXIMUM_MASTER_PREFERENCE)) {
1917 WL_TRACE(("Master Pref is 0 or greater than 254, hence sending random value\n"));
1918 /* Master pref for mobile devices can be from 1 - 127 as per Spec AppendixC */
1919 metrics->master_pref = (RANDOM32()%(NAN_MAXIMUM_MASTER_PREFERENCE/2)) + 1;
1920 } else {
1921 metrics->master_pref = (uint8)cmd_data->metrics.master_pref;
1922 }
1923 sub_cmd->id = htod16(WL_NAN_CMD_ELECTION_METRICS_CONFIG);
1924 sub_cmd->len = sizeof(sub_cmd->u.options) +
1925 sizeof(*metrics);
1926 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
1927
1928 nan_iov_data->nan_iov_len -= subcmd_len;
1929 nan_iov_data->nan_iov_buf += subcmd_len;
1930
1931 fail:
1932 NAN_DBG_EXIT();
1933 return ret;
1934 }
1935
1936 static int
wl_cfgnan_set_rssi_proximity(nan_config_cmd_data_t * cmd_data,wl_nan_iov_t * nan_iov_data,uint32 nan_attr_mask)1937 wl_cfgnan_set_rssi_proximity(nan_config_cmd_data_t *cmd_data,
1938 wl_nan_iov_t *nan_iov_data, uint32 nan_attr_mask)
1939 {
1940 s32 ret = BCME_OK;
1941 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
1942 wl_nan_rssi_notif_thld_t *rssi_notif_thld = NULL;
1943 uint16 subcmd_len;
1944
1945 NAN_DBG_ENTER();
1946 sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
1947
1948 rssi_notif_thld = (wl_nan_rssi_notif_thld_t *)sub_cmd->data;
1949
1950 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
1951 sizeof(*rssi_notif_thld), &subcmd_len);
1952 if (unlikely(ret)) {
1953 WL_ERR(("nan_sub_cmd check failed\n"));
1954 return ret;
1955 }
1956 if (nan_attr_mask & NAN_ATTR_RSSI_PROXIMITY_2G_CONFIG) {
1957 rssi_notif_thld->bcn_rssi_2g =
1958 cmd_data->rssi_attr.rssi_proximity_2dot4g_val;
1959 } else {
1960 /* Keeping RSSI threshold value to be -70dBm */
1961 rssi_notif_thld->bcn_rssi_2g = NAN_DEF_RSSI_NOTIF_THRESH;
1962 }
1963
1964 if (nan_attr_mask & NAN_ATTR_RSSI_PROXIMITY_5G_CONFIG) {
1965 rssi_notif_thld->bcn_rssi_5g =
1966 cmd_data->rssi_attr.rssi_proximity_5g_val;
1967 } else {
1968 /* Keeping RSSI threshold value to be -70dBm */
1969 rssi_notif_thld->bcn_rssi_5g = NAN_DEF_RSSI_NOTIF_THRESH;
1970 }
1971
1972 sub_cmd->id = htod16(WL_NAN_CMD_SYNC_BCN_RSSI_NOTIF_THRESHOLD);
1973 sub_cmd->len = htod16(sizeof(sub_cmd->u.options) + sizeof(*rssi_notif_thld));
1974 sub_cmd->u.options = htod32(BCM_XTLV_OPTION_ALIGN32);
1975
1976 nan_iov_data->nan_iov_len -= subcmd_len;
1977 nan_iov_data->nan_iov_buf += subcmd_len;
1978
1979 NAN_DBG_EXIT();
1980 return ret;
1981 }
1982
1983 static int
wl_cfgnan_set_rssi_mid_or_close(nan_config_cmd_data_t * cmd_data,wl_nan_iov_t * nan_iov_data,uint32 nan_attr_mask)1984 wl_cfgnan_set_rssi_mid_or_close(nan_config_cmd_data_t *cmd_data,
1985 wl_nan_iov_t *nan_iov_data, uint32 nan_attr_mask)
1986 {
1987 s32 ret = BCME_OK;
1988 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
1989 wl_nan_rssi_thld_t *rssi_thld = NULL;
1990 uint16 subcmd_len;
1991
1992 NAN_DBG_ENTER();
1993 sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
1994 rssi_thld = (wl_nan_rssi_thld_t *)sub_cmd->data;
1995
1996 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
1997 sizeof(*rssi_thld), &subcmd_len);
1998 if (unlikely(ret)) {
1999 WL_ERR(("nan_sub_cmd check failed\n"));
2000 return ret;
2001 }
2002
2003 /*
2004 * Keeping RSSI mid value -75dBm for both 2G and 5G
2005 * Keeping RSSI close value -60dBm for both 2G and 5G
2006 */
2007 if (nan_attr_mask & NAN_ATTR_RSSI_MIDDLE_2G_CONFIG) {
2008 rssi_thld->rssi_mid_2g =
2009 cmd_data->rssi_attr.rssi_middle_2dot4g_val;
2010 } else {
2011 rssi_thld->rssi_mid_2g = NAN_DEF_RSSI_MID;
2012 }
2013
2014 if (nan_attr_mask & NAN_ATTR_RSSI_MIDDLE_5G_CONFIG) {
2015 rssi_thld->rssi_mid_5g =
2016 cmd_data->rssi_attr.rssi_middle_5g_val;
2017 } else {
2018 rssi_thld->rssi_mid_5g = NAN_DEF_RSSI_MID;
2019 }
2020
2021 if (nan_attr_mask & NAN_ATTR_RSSI_CLOSE_CONFIG) {
2022 rssi_thld->rssi_close_2g =
2023 cmd_data->rssi_attr.rssi_close_2dot4g_val;
2024 } else {
2025 rssi_thld->rssi_close_2g = NAN_DEF_RSSI_CLOSE;
2026 }
2027
2028 if (nan_attr_mask & NAN_ATTR_RSSI_CLOSE_5G_CONFIG) {
2029 rssi_thld->rssi_close_5g =
2030 cmd_data->rssi_attr.rssi_close_5g_val;
2031 } else {
2032 rssi_thld->rssi_close_5g = NAN_DEF_RSSI_CLOSE;
2033 }
2034
2035 sub_cmd->id = htod16(WL_NAN_CMD_ELECTION_RSSI_THRESHOLD);
2036 sub_cmd->len = htod16(sizeof(sub_cmd->u.options) + sizeof(*rssi_thld));
2037 sub_cmd->u.options = htod32(BCM_XTLV_OPTION_ALIGN32);
2038
2039 nan_iov_data->nan_iov_len -= subcmd_len;
2040 nan_iov_data->nan_iov_buf += subcmd_len;
2041
2042 NAN_DBG_EXIT();
2043 return ret;
2044 }
2045
2046 static int
check_for_valid_5gchan(struct net_device * ndev,uint8 chan)2047 check_for_valid_5gchan(struct net_device *ndev, uint8 chan)
2048 {
2049 s32 ret = BCME_OK;
2050 uint bitmap;
2051 u8 ioctl_buf[WLC_IOCTL_SMLEN];
2052 uint32 chanspec_arg;
2053 NAN_DBG_ENTER();
2054
2055 chanspec_arg = CH20MHZ_CHSPEC(chan);
2056 chanspec_arg = wl_chspec_host_to_driver(chanspec_arg);
2057 bzero(ioctl_buf, WLC_IOCTL_SMLEN);
2058 ret = wldev_iovar_getbuf(ndev, "per_chan_info",
2059 (void *)&chanspec_arg, sizeof(chanspec_arg),
2060 ioctl_buf, WLC_IOCTL_SMLEN, NULL);
2061 if (ret != BCME_OK) {
2062 WL_ERR(("Chaninfo for channel = %d, error %d\n", chan, ret));
2063 goto exit;
2064 }
2065
2066 bitmap = dtoh32(*(uint *)ioctl_buf);
2067 if (!(bitmap & WL_CHAN_VALID_HW)) {
2068 WL_ERR(("Invalid channel\n"));
2069 ret = BCME_BADCHAN;
2070 goto exit;
2071 }
2072
2073 if (!(bitmap & WL_CHAN_VALID_SW)) {
2074 WL_ERR(("Not supported in current locale\n"));
2075 ret = BCME_BADCHAN;
2076 goto exit;
2077 }
2078 exit:
2079 NAN_DBG_EXIT();
2080 return ret;
2081 }
2082
2083 static int
wl_cfgnan_set_nan_soc_chans(struct net_device * ndev,nan_config_cmd_data_t * cmd_data,wl_nan_iov_t * nan_iov_data,uint32 nan_attr_mask)2084 wl_cfgnan_set_nan_soc_chans(struct net_device *ndev, nan_config_cmd_data_t *cmd_data,
2085 wl_nan_iov_t *nan_iov_data, uint32 nan_attr_mask)
2086 {
2087 s32 ret = BCME_OK;
2088 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
2089 wl_nan_social_channels_t *soc_chans = NULL;
2090 uint16 subcmd_len;
2091
2092 NAN_DBG_ENTER();
2093
2094 sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
2095 soc_chans =
2096 (wl_nan_social_channels_t *)sub_cmd->data;
2097
2098 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
2099 sizeof(*soc_chans), &subcmd_len);
2100 if (unlikely(ret)) {
2101 WL_ERR(("nan_sub_cmd check failed\n"));
2102 return ret;
2103 }
2104
2105 sub_cmd->id = htod16(WL_NAN_CMD_SYNC_SOCIAL_CHAN);
2106 sub_cmd->len = sizeof(sub_cmd->u.options) +
2107 sizeof(*soc_chans);
2108 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
2109 if (nan_attr_mask & NAN_ATTR_2G_CHAN_CONFIG) {
2110 soc_chans->soc_chan_2g = cmd_data->chanspec[1];
2111 } else {
2112 soc_chans->soc_chan_2g = NAN_DEF_SOCIAL_CHAN_2G;
2113 }
2114
2115 if (cmd_data->support_5g) {
2116 if (nan_attr_mask & NAN_ATTR_5G_CHAN_CONFIG) {
2117 soc_chans->soc_chan_5g = cmd_data->chanspec[2];
2118 } else {
2119 soc_chans->soc_chan_5g = NAN_DEF_SOCIAL_CHAN_5G;
2120 }
2121 ret = check_for_valid_5gchan(ndev, soc_chans->soc_chan_5g);
2122 if (ret != BCME_OK) {
2123 ret = check_for_valid_5gchan(ndev, NAN_DEF_SEC_SOCIAL_CHAN_5G);
2124 if (ret == BCME_OK) {
2125 soc_chans->soc_chan_5g = NAN_DEF_SEC_SOCIAL_CHAN_5G;
2126 } else {
2127 soc_chans->soc_chan_5g = 0;
2128 ret = BCME_OK;
2129 WL_ERR(("Current locale doesn't support 5G op"
2130 "continuing with 2G only operation\n"));
2131 }
2132 }
2133 } else {
2134 WL_DBG(("5G support is disabled\n"));
2135 }
2136 nan_iov_data->nan_iov_len -= subcmd_len;
2137 nan_iov_data->nan_iov_buf += subcmd_len;
2138
2139 NAN_DBG_EXIT();
2140 return ret;
2141 }
2142
2143 static int
wl_cfgnan_set_nan_scan_params(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_config_cmd_data_t * cmd_data,uint8 band_index,uint32 nan_attr_mask)2144 wl_cfgnan_set_nan_scan_params(struct net_device *ndev, struct bcm_cfg80211 *cfg,
2145 nan_config_cmd_data_t *cmd_data, uint8 band_index, uint32 nan_attr_mask)
2146 {
2147 bcm_iov_batch_buf_t *nan_buf = NULL;
2148 s32 ret = BCME_OK;
2149 uint16 nan_iov_start, nan_iov_end;
2150 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
2151 uint16 subcmd_len;
2152 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
2153 wl_nan_iov_t *nan_iov_data = NULL;
2154 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
2155 wl_nan_scan_params_t *scan_params = NULL;
2156 uint32 status;
2157
2158 NAN_DBG_ENTER();
2159
2160 nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
2161 if (!nan_buf) {
2162 WL_ERR(("%s: memory allocation failed\n", __func__));
2163 ret = BCME_NOMEM;
2164 goto fail;
2165 }
2166
2167 nan_iov_data = MALLOCZ(cfg->osh, sizeof(*nan_iov_data));
2168 if (!nan_iov_data) {
2169 WL_ERR(("%s: memory allocation failed\n", __func__));
2170 ret = BCME_NOMEM;
2171 goto fail;
2172 }
2173
2174 nan_iov_data->nan_iov_len = nan_iov_start = NAN_IOCTL_BUF_SIZE;
2175 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
2176 nan_buf->count = 0;
2177 nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
2178 nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
2179 sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
2180
2181 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
2182 sizeof(*scan_params), &subcmd_len);
2183 if (unlikely(ret)) {
2184 WL_ERR(("nan_sub_cmd check failed\n"));
2185 goto fail;
2186 }
2187 scan_params = (wl_nan_scan_params_t *)sub_cmd->data;
2188
2189 sub_cmd->id = htod16(WL_NAN_CMD_CFG_SCAN_PARAMS);
2190 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*scan_params);
2191 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
2192
2193 if (!band_index) {
2194 /* Fw default: Dwell time for 2G is 210 */
2195 if ((nan_attr_mask & NAN_ATTR_2G_DWELL_TIME_CONFIG) &&
2196 cmd_data->dwell_time[0]) {
2197 scan_params->dwell_time = cmd_data->dwell_time[0] +
2198 NAN_SCAN_DWELL_TIME_DELTA_MS;
2199 }
2200 /* Fw default: Scan period for 2G is 10 */
2201 if (nan_attr_mask & NAN_ATTR_2G_SCAN_PERIOD_CONFIG) {
2202 scan_params->scan_period = cmd_data->scan_period[0];
2203 }
2204 } else {
2205 if ((nan_attr_mask & NAN_ATTR_5G_DWELL_TIME_CONFIG) &&
2206 cmd_data->dwell_time[1]) {
2207 scan_params->dwell_time = cmd_data->dwell_time[1] +
2208 NAN_SCAN_DWELL_TIME_DELTA_MS;
2209 }
2210 if (nan_attr_mask & NAN_ATTR_5G_SCAN_PERIOD_CONFIG) {
2211 scan_params->scan_period = cmd_data->scan_period[1];
2212 }
2213 }
2214 scan_params->band_index = band_index;
2215 nan_buf->is_set = true;
2216 nan_buf->count++;
2217
2218 /* Reduce the iov_len size by subcmd_len */
2219 nan_iov_data->nan_iov_len -= subcmd_len;
2220 nan_iov_end = nan_iov_data->nan_iov_len;
2221 nan_buf_size = (nan_iov_start - nan_iov_end);
2222
2223 bzero(resp_buf, sizeof(resp_buf));
2224 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
2225 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
2226 if (unlikely(ret) || unlikely(status)) {
2227 WL_ERR(("set nan scan params failed ret %d status %d \n", ret, status));
2228 goto fail;
2229 }
2230 WL_DBG(("set nan scan params successfull\n"));
2231 fail:
2232 if (nan_buf) {
2233 MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
2234 }
2235 if (nan_iov_data) {
2236 MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
2237 }
2238
2239 NAN_DBG_EXIT();
2240 return ret;
2241 }
2242
2243 static uint16
wl_cfgnan_gen_rand_cluster_id(uint16 low_val,uint16 high_val)2244 wl_cfgnan_gen_rand_cluster_id(uint16 low_val, uint16 high_val)
2245 {
2246 uint16 random_id;
2247 ulong random_seed;
2248
2249 /* In negative case also, assigning to cluster_high value */
2250 if (low_val >= high_val)
2251 {
2252 random_id = high_val;
2253 } else {
2254 RANDOM_BYTES(&random_seed, sizeof(random_seed));
2255 random_id = (uint16)((random_seed % ((high_val + 1) -
2256 low_val)) + low_val);
2257 }
2258 return random_id;
2259 }
2260
2261 static int
wl_cfgnan_set_cluster_id(nan_config_cmd_data_t * cmd_data,wl_nan_iov_t * nan_iov_data)2262 wl_cfgnan_set_cluster_id(nan_config_cmd_data_t *cmd_data,
2263 wl_nan_iov_t *nan_iov_data)
2264 {
2265 s32 ret = BCME_OK;
2266 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
2267 uint16 subcmd_len;
2268
2269 NAN_DBG_ENTER();
2270
2271 sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
2272
2273 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
2274 (sizeof(cmd_data->clus_id) - sizeof(uint8)), &subcmd_len);
2275 if (unlikely(ret)) {
2276 WL_ERR(("nan_sub_cmd check failed\n"));
2277 return ret;
2278 }
2279
2280 cmd_data->clus_id.octet[0] = 0x50;
2281 cmd_data->clus_id.octet[1] = 0x6F;
2282 cmd_data->clus_id.octet[2] = 0x9A;
2283 cmd_data->clus_id.octet[3] = 0x01;
2284 hton16_ua_store(wl_cfgnan_gen_rand_cluster_id(cmd_data->cluster_low,
2285 cmd_data->cluster_high), &cmd_data->clus_id.octet[4]);
2286
2287 WL_TRACE(("cluster_id = " MACDBG "\n", MAC2STRDBG(cmd_data->clus_id.octet)));
2288
2289 sub_cmd->id = htod16(WL_NAN_CMD_CFG_CID);
2290 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(cmd_data->clus_id);
2291 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
2292 ret = memcpy_s(sub_cmd->data, sizeof(cmd_data->clus_id),
2293 (uint8 *)&cmd_data->clus_id,
2294 sizeof(cmd_data->clus_id));
2295 if (ret != BCME_OK) {
2296 WL_ERR(("Failed to copy clus id\n"));
2297 return ret;
2298 }
2299
2300 nan_iov_data->nan_iov_len -= subcmd_len;
2301 nan_iov_data->nan_iov_buf += subcmd_len;
2302
2303 NAN_DBG_EXIT();
2304 return ret;
2305 }
2306
2307 static int
wl_cfgnan_set_hop_count_limit(nan_config_cmd_data_t * cmd_data,wl_nan_iov_t * nan_iov_data)2308 wl_cfgnan_set_hop_count_limit(nan_config_cmd_data_t *cmd_data,
2309 wl_nan_iov_t *nan_iov_data)
2310 {
2311 s32 ret = BCME_OK;
2312 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
2313 wl_nan_hop_count_t *hop_limit = NULL;
2314 uint16 subcmd_len;
2315
2316 NAN_DBG_ENTER();
2317
2318 sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
2319 hop_limit = (wl_nan_hop_count_t *)sub_cmd->data;
2320
2321 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
2322 sizeof(*hop_limit), &subcmd_len);
2323 if (unlikely(ret)) {
2324 WL_ERR(("nan_sub_cmd check failed\n"));
2325 return ret;
2326 }
2327
2328 *hop_limit = cmd_data->hop_count_limit;
2329 sub_cmd->id = htod16(WL_NAN_CMD_CFG_HOP_LIMIT);
2330 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*hop_limit);
2331 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
2332
2333 nan_iov_data->nan_iov_len -= subcmd_len;
2334 nan_iov_data->nan_iov_buf += subcmd_len;
2335
2336 NAN_DBG_EXIT();
2337 return ret;
2338 }
2339
2340 static int
wl_cfgnan_set_sid_beacon_val(nan_config_cmd_data_t * cmd_data,wl_nan_iov_t * nan_iov_data,uint32 nan_attr_mask)2341 wl_cfgnan_set_sid_beacon_val(nan_config_cmd_data_t *cmd_data,
2342 wl_nan_iov_t *nan_iov_data, uint32 nan_attr_mask)
2343 {
2344 s32 ret = BCME_OK;
2345 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
2346 wl_nan_sid_beacon_control_t *sid_beacon = NULL;
2347 uint16 subcmd_len;
2348
2349 NAN_DBG_ENTER();
2350
2351 sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
2352
2353 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
2354 sizeof(*sid_beacon), &subcmd_len);
2355 if (unlikely(ret)) {
2356 WL_ERR(("nan_sub_cmd check failed\n"));
2357 return ret;
2358 }
2359
2360 sid_beacon = (wl_nan_sid_beacon_control_t *)sub_cmd->data;
2361 sid_beacon->sid_enable = cmd_data->sid_beacon.sid_enable;
2362 /* Need to have separate flag for sub beacons
2363 * sid_beacon->sub_sid_enable = cmd_data->sid_beacon.sub_sid_enable;
2364 */
2365 if (nan_attr_mask & NAN_ATTR_SID_BEACON_CONFIG) {
2366 /* Limit for number of publish SIDs to be included in Beacons */
2367 sid_beacon->sid_count = cmd_data->sid_beacon.sid_count;
2368 }
2369 if (nan_attr_mask & NAN_ATTR_SUB_SID_BEACON_CONFIG) {
2370 /* Limit for number of subscribe SIDs to be included in Beacons */
2371 sid_beacon->sub_sid_count = cmd_data->sid_beacon.sub_sid_count;
2372 }
2373 sub_cmd->id = htod16(WL_NAN_CMD_CFG_SID_BEACON);
2374 sub_cmd->len = sizeof(sub_cmd->u.options) +
2375 sizeof(*sid_beacon);
2376 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
2377
2378 nan_iov_data->nan_iov_len -= subcmd_len;
2379 nan_iov_data->nan_iov_buf += subcmd_len;
2380 NAN_DBG_EXIT();
2381 return ret;
2382 }
2383
2384 static int
wl_cfgnan_set_nan_oui(nan_config_cmd_data_t * cmd_data,wl_nan_iov_t * nan_iov_data)2385 wl_cfgnan_set_nan_oui(nan_config_cmd_data_t *cmd_data,
2386 wl_nan_iov_t *nan_iov_data)
2387 {
2388 s32 ret = BCME_OK;
2389 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
2390 uint16 subcmd_len;
2391
2392 NAN_DBG_ENTER();
2393
2394 sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
2395
2396 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
2397 sizeof(cmd_data->nan_oui), &subcmd_len);
2398 if (unlikely(ret)) {
2399 WL_ERR(("nan_sub_cmd check failed\n"));
2400 return ret;
2401 }
2402
2403 sub_cmd->id = htod16(WL_NAN_CMD_CFG_OUI);
2404 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(cmd_data->nan_oui);
2405 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
2406 ret = memcpy_s(sub_cmd->data, sizeof(cmd_data->nan_oui),
2407 (uint32 *)&cmd_data->nan_oui,
2408 sizeof(cmd_data->nan_oui));
2409 if (ret != BCME_OK) {
2410 WL_ERR(("Failed to copy nan oui\n"));
2411 return ret;
2412 }
2413
2414 nan_iov_data->nan_iov_len -= subcmd_len;
2415 nan_iov_data->nan_iov_buf += subcmd_len;
2416 NAN_DBG_EXIT();
2417 return ret;
2418 }
2419
2420 static int
wl_cfgnan_set_awake_dws(struct net_device * ndev,nan_config_cmd_data_t * cmd_data,wl_nan_iov_t * nan_iov_data,struct bcm_cfg80211 * cfg,uint32 nan_attr_mask)2421 wl_cfgnan_set_awake_dws(struct net_device *ndev, nan_config_cmd_data_t *cmd_data,
2422 wl_nan_iov_t *nan_iov_data, struct bcm_cfg80211 *cfg, uint32 nan_attr_mask)
2423 {
2424 s32 ret = BCME_OK;
2425 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
2426 wl_nan_awake_dws_t *awake_dws = NULL;
2427 uint16 subcmd_len;
2428 NAN_DBG_ENTER();
2429
2430 sub_cmd =
2431 (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
2432 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
2433 sizeof(*awake_dws), &subcmd_len);
2434 if (unlikely(ret)) {
2435 WL_ERR(("nan_sub_cmd check failed\n"));
2436 return ret;
2437 }
2438
2439 awake_dws = (wl_nan_awake_dws_t *)sub_cmd->data;
2440
2441 if (nan_attr_mask & NAN_ATTR_2G_DW_CONFIG) {
2442 awake_dws->dw_interval_2g = cmd_data->awake_dws.dw_interval_2g;
2443 if (!awake_dws->dw_interval_2g) {
2444 /* Set 2G awake dw value to fw default value 1 */
2445 awake_dws->dw_interval_2g = NAN_SYNC_DEF_AWAKE_DW;
2446 }
2447 } else {
2448 /* Set 2G awake dw value to fw default value 1 */
2449 awake_dws->dw_interval_2g = NAN_SYNC_DEF_AWAKE_DW;
2450 }
2451
2452 if (cfg->nancfg->support_5g) {
2453 if (nan_attr_mask & NAN_ATTR_5G_DW_CONFIG) {
2454 awake_dws->dw_interval_5g = cmd_data->awake_dws.dw_interval_5g;
2455 /* config sync/discovery beacons on 5G band */
2456 ret = wl_cfgnan_config_control_flag(ndev, cfg,
2457 WL_NAN_CTRL_DISC_BEACON_TX_5G |
2458 WL_NAN_CTRL_SYNC_BEACON_TX_5G,
2459 0, WL_NAN_CMD_CFG_NAN_CONFIG,
2460 &(cmd_data->status),
2461 awake_dws->dw_interval_5g);
2462 if (unlikely(ret) || unlikely(cmd_data->status)) {
2463 WL_ERR((" nan control set config handler, ret = %d"
2464 " status = %d \n", ret, cmd_data->status));
2465 goto fail;
2466 }
2467 } else {
2468 /* Set 5G awake dw value to fw default value 1 */
2469 awake_dws->dw_interval_5g = NAN_SYNC_DEF_AWAKE_DW;
2470 }
2471 }
2472
2473 sub_cmd->id = htod16(WL_NAN_CMD_SYNC_AWAKE_DWS);
2474 sub_cmd->len = sizeof(sub_cmd->u.options) +
2475 sizeof(*awake_dws);
2476 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
2477
2478 nan_iov_data->nan_iov_len -= subcmd_len;
2479 nan_iov_data->nan_iov_buf += subcmd_len;
2480
2481 fail:
2482 NAN_DBG_EXIT();
2483 return ret;
2484 }
2485
2486 int
wl_cfgnan_set_enable_merge(struct net_device * ndev,struct bcm_cfg80211 * cfg,uint8 enable,uint32 * status)2487 wl_cfgnan_set_enable_merge(struct net_device *ndev,
2488 struct bcm_cfg80211 *cfg, uint8 enable, uint32 *status)
2489 {
2490 bcm_iov_batch_buf_t *nan_buf = NULL;
2491 s32 ret = BCME_OK;
2492 uint16 nan_iov_start, nan_iov_end;
2493 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
2494 uint16 subcmd_len;
2495 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
2496 wl_nan_iov_t *nan_iov_data = NULL;
2497 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
2498 wl_nan_merge_enable_t merge_enable;
2499 uint8 size_of_iov;
2500
2501 NAN_DBG_ENTER();
2502
2503 nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
2504 if (!nan_buf) {
2505 WL_ERR(("%s: memory allocation failed\n", __func__));
2506 ret = BCME_NOMEM;
2507 goto fail;
2508 }
2509
2510 nan_iov_data = MALLOCZ(cfg->osh, sizeof(*nan_iov_data));
2511 if (!nan_iov_data) {
2512 WL_ERR(("%s: memory allocation failed\n", __func__));
2513 ret = BCME_NOMEM;
2514 goto fail;
2515 }
2516
2517 merge_enable = (wl_nan_merge_enable_t)enable;
2518 size_of_iov = sizeof(wl_nan_merge_enable_t);
2519
2520 nan_iov_data->nan_iov_len = nan_iov_start = NAN_IOCTL_BUF_SIZE;
2521 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
2522 nan_buf->count = 0;
2523 nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
2524 nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
2525 sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
2526
2527 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
2528 size_of_iov, &subcmd_len);
2529 if (unlikely(ret)) {
2530 WL_ERR(("nan_sub_cmd check failed\n"));
2531 goto fail;
2532 }
2533
2534 sub_cmd->id = htod16(WL_NAN_CMD_ELECTION_MERGE);
2535 sub_cmd->len = sizeof(sub_cmd->u.options) + size_of_iov;
2536 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
2537
2538 /* Reduce the iov_len size by subcmd_len */
2539 nan_iov_data->nan_iov_len -= subcmd_len;
2540 nan_iov_end = nan_iov_data->nan_iov_len;
2541 nan_buf_size = (nan_iov_start - nan_iov_end);
2542
2543 (void)memcpy_s(sub_cmd->data, nan_iov_data->nan_iov_len,
2544 &merge_enable, size_of_iov);
2545
2546 nan_buf->is_set = true;
2547 nan_buf->count++;
2548 bzero(resp_buf, sizeof(resp_buf));
2549 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, status,
2550 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
2551 if (unlikely(ret) || unlikely(*status)) {
2552 WL_ERR(("Merge enable %d failed ret %d status %d \n", merge_enable, ret, *status));
2553 goto fail;
2554 }
2555 fail:
2556 if (nan_buf) {
2557 MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
2558 }
2559 if (nan_iov_data) {
2560 MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
2561 }
2562 NAN_DBG_EXIT();
2563 return ret;
2564 }
2565
2566 static int
wl_cfgnan_set_disc_beacon_interval_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,wl_nan_disc_bcn_interval_t disc_beacon_interval)2567 wl_cfgnan_set_disc_beacon_interval_handler(struct net_device *ndev, struct bcm_cfg80211 *cfg,
2568 wl_nan_disc_bcn_interval_t disc_beacon_interval)
2569 {
2570 bcm_iov_batch_buf_t *nan_buf = NULL;
2571 s32 ret = BCME_OK;
2572 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
2573 wl_nan_iov_t *nan_iov_data = NULL;
2574 uint32 status;
2575 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
2576 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
2577 uint16 subcmd_len;
2578 uint8 size_of_iov;
2579
2580 NAN_DBG_ENTER();
2581 NAN_MUTEX_LOCK();
2582
2583 nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
2584 if (!nan_buf) {
2585 WL_ERR(("%s: memory allocation failed\n", __func__));
2586 ret = BCME_NOMEM;
2587 goto fail;
2588 }
2589
2590 nan_iov_data = MALLOCZ(cfg->osh, sizeof(*nan_iov_data));
2591 if (!nan_iov_data) {
2592 WL_ERR(("%s: memory allocation failed\n", __func__));
2593 ret = BCME_NOMEM;
2594 goto fail;
2595 }
2596
2597 size_of_iov = sizeof(wl_nan_disc_bcn_interval_t);
2598 nan_iov_data->nan_iov_len = NAN_IOCTL_BUF_SIZE;
2599 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
2600 nan_buf->count = 0;
2601 nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
2602 nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
2603
2604 sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
2605 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
2606 size_of_iov, &subcmd_len);
2607 if (unlikely(ret)) {
2608 WL_ERR(("nan_sub_cmd check failed\n"));
2609 goto fail;
2610 }
2611
2612 /* Choose default value discovery beacon interval if value is zero */
2613 if (!disc_beacon_interval) {
2614 disc_beacon_interval = cfg->nancfg->support_5g ? NAN_DISC_BCN_INTERVAL_5G_DEF:
2615 NAN_DISC_BCN_INTERVAL_2G_DEF;
2616 }
2617
2618 /* Fill the sub_command block */
2619 sub_cmd->id = htod16(WL_NAN_CMD_CFG_BCN_INTERVAL);
2620 sub_cmd->len = sizeof(sub_cmd->u.options) + size_of_iov;
2621 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
2622 ret = memcpy_s(sub_cmd->data, nan_iov_data->nan_iov_len,
2623 &disc_beacon_interval, size_of_iov);
2624 if (ret != BCME_OK) {
2625 WL_ERR(("Failed to copy disc_beacon_interval\n"));
2626 goto fail;
2627 }
2628
2629 nan_iov_data->nan_iov_len -= subcmd_len;
2630 nan_iov_data->nan_iov_buf += subcmd_len;
2631
2632 nan_buf->count++;
2633 nan_buf->is_set = true;
2634 nan_buf_size -= nan_iov_data->nan_iov_len;
2635 bzero(resp_buf, sizeof(resp_buf));
2636 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
2637 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
2638 if (unlikely(ret) || unlikely(status)) {
2639 WL_ERR(("Failed to set disc beacon interval, ret = %d status = %d\n",
2640 ret, status));
2641 goto fail;
2642 }
2643
2644 fail:
2645 if (nan_buf) {
2646 MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
2647 }
2648 if (nan_iov_data) {
2649 MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
2650 }
2651
2652 NAN_MUTEX_UNLOCK();
2653 NAN_DBG_EXIT();
2654 return ret;
2655 }
2656
2657 void
wl_cfgnan_immediate_nan_disable_pending(struct bcm_cfg80211 * cfg)2658 wl_cfgnan_immediate_nan_disable_pending(struct bcm_cfg80211 *cfg)
2659 {
2660 if (delayed_work_pending(&cfg->nancfg->nan_disable)) {
2661 WL_DBG(("Do immediate nan_disable work\n"));
2662 DHD_NAN_WAKE_UNLOCK(cfg->pub);
2663 if (cancel_delayed_work(&cfg->nancfg->nan_disable)) {
2664 schedule_delayed_work(&cfg->nancfg->nan_disable, 0);
2665 }
2666 }
2667 }
2668
2669 int
wl_cfgnan_check_nan_disable_pending(struct bcm_cfg80211 * cfg,bool force_disable,bool is_sync_reqd)2670 wl_cfgnan_check_nan_disable_pending(struct bcm_cfg80211 *cfg,
2671 bool force_disable, bool is_sync_reqd)
2672 {
2673 int ret = BCME_OK;
2674 struct net_device *ndev = NULL;
2675
2676 if (delayed_work_pending(&cfg->nancfg->nan_disable)) {
2677 WL_DBG(("Cancel nan_disable work\n"));
2678 /*
2679 * Nan gets disabled from dhd_stop(dev_close) and other frameworks contexts.
2680 * Can't use cancel_work_sync from dhd_stop context for
2681 * wl_cfgnan_delayed_disable since both contexts uses
2682 * rtnl_lock resulting in deadlock. If dhd_stop gets invoked,
2683 * rely on dhd_stop context to do the nan clean up work and
2684 * just do return from delayed WQ based on state check.
2685 */
2686
2687 DHD_NAN_WAKE_UNLOCK(cfg->pub);
2688
2689 if (is_sync_reqd == true) {
2690 cancel_delayed_work_sync(&cfg->nancfg->nan_disable);
2691 } else {
2692 cancel_delayed_work(&cfg->nancfg->nan_disable);
2693 }
2694 force_disable = true;
2695 }
2696 if ((force_disable == true) && (cfg->nancfg->nan_enable == true)) {
2697 ret = wl_cfgnan_disable(cfg);
2698 if (ret != BCME_OK) {
2699 WL_ERR(("failed to disable nan, error[%d]\n", ret));
2700 }
2701 /* Intentional fall through to cleanup framework */
2702 if (cfg->nancfg->notify_user == true) {
2703 ndev = bcmcfg_to_prmry_ndev(cfg);
2704 wl_cfgvendor_nan_send_async_disable_resp(ndev->ieee80211_ptr);
2705 }
2706 }
2707 return ret;
2708 }
2709
2710 int
wl_cfgnan_start_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_config_cmd_data_t * cmd_data,uint32 nan_attr_mask)2711 wl_cfgnan_start_handler(struct net_device *ndev, struct bcm_cfg80211 *cfg,
2712 nan_config_cmd_data_t *cmd_data, uint32 nan_attr_mask)
2713 {
2714 s32 ret = BCME_OK;
2715 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
2716 bcm_iov_batch_buf_t *nan_buf = NULL;
2717 wl_nan_iov_t *nan_iov_data = NULL;
2718 dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
2719 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
2720 int i;
2721 s32 timeout = 0;
2722 nan_hal_capabilities_t capabilities;
2723 uint32 cfg_ctrl1_flags = 0;
2724 uint32 cfg_ctrl2_flags1 = 0;
2725 wl_nancfg_t *nancfg = cfg->nancfg;
2726
2727 NAN_DBG_ENTER();
2728
2729 if (!dhdp->up) {
2730 WL_ERR(("bus is already down, hence blocking nan start\n"));
2731 return BCME_ERROR;
2732 }
2733
2734 /* Protect discovery creation. Ensure proper mutex precedence.
2735 * If if_sync & nan_mutex comes together in same context, nan_mutex
2736 * should follow if_sync.
2737 */
2738 mutex_lock(&cfg->if_sync);
2739 NAN_MUTEX_LOCK();
2740
2741 #ifdef WL_IFACE_MGMT
2742 if ((ret = wl_cfg80211_handle_if_role_conflict(cfg, WL_IF_TYPE_NAN_NMI)) != BCME_OK) {
2743 WL_ERR(("Conflicting iface is present, cant support nan\n"));
2744 NAN_MUTEX_UNLOCK();
2745 mutex_unlock(&cfg->if_sync);
2746 goto fail;
2747 }
2748 #endif /* WL_IFACE_MGMT */
2749
2750 /* disable TDLS on NAN init */
2751 wl_cfg80211_tdls_config(cfg, TDLS_STATE_NMI_CREATE, false);
2752
2753 WL_INFORM_MEM(("Initializing NAN\n"));
2754 ret = wl_cfgnan_init(cfg);
2755 if (ret != BCME_OK) {
2756 WL_ERR(("failed to initialize NAN[%d]\n", ret));
2757 NAN_MUTEX_UNLOCK();
2758 mutex_unlock(&cfg->if_sync);
2759 goto fail;
2760 }
2761
2762 ret = wl_cfgnan_get_ver(ndev, cfg);
2763 if (ret != BCME_OK) {
2764 WL_ERR(("failed to Nan IOV version[%d]\n", ret));
2765 NAN_MUTEX_UNLOCK();
2766 mutex_unlock(&cfg->if_sync);
2767 goto fail;
2768 }
2769
2770 /* set nmi addr */
2771 ret = wl_cfgnan_set_if_addr(cfg);
2772 if (ret != BCME_OK) {
2773 WL_ERR(("Failed to set nmi address \n"));
2774 NAN_MUTEX_UNLOCK();
2775 mutex_unlock(&cfg->if_sync);
2776 goto fail;
2777 }
2778 nancfg->nan_event_recvd = false;
2779 NAN_MUTEX_UNLOCK();
2780 mutex_unlock(&cfg->if_sync);
2781
2782 nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
2783 if (!nan_buf) {
2784 WL_ERR(("%s: memory allocation failed\n", __func__));
2785 ret = BCME_NOMEM;
2786 goto fail;
2787 }
2788
2789 nan_iov_data = MALLOCZ(cfg->osh, sizeof(*nan_iov_data));
2790 if (!nan_iov_data) {
2791 WL_ERR(("%s: memory allocation failed\n", __func__));
2792 ret = BCME_NOMEM;
2793 goto fail;
2794 }
2795
2796 nan_iov_data->nan_iov_len = NAN_IOCTL_BUF_SIZE;
2797 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
2798 nan_buf->count = 0;
2799 nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
2800 nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
2801
2802 if (nan_attr_mask & NAN_ATTR_SYNC_DISC_2G_BEACON_CONFIG) {
2803 /* config sync/discovery beacons on 2G band */
2804 /* 2g is mandatory */
2805 if (!cmd_data->beacon_2g_val) {
2806 WL_ERR(("Invalid NAN config...2G is mandatory\n"));
2807 ret = BCME_BADARG;
2808 }
2809 cfg_ctrl1_flags |= (WL_NAN_CTRL_DISC_BEACON_TX_2G | WL_NAN_CTRL_SYNC_BEACON_TX_2G);
2810 }
2811 if (nan_attr_mask & NAN_ATTR_SYNC_DISC_5G_BEACON_CONFIG) {
2812 /* config sync/discovery beacons on 5G band */
2813 cfg_ctrl1_flags |= (WL_NAN_CTRL_DISC_BEACON_TX_5G | WL_NAN_CTRL_SYNC_BEACON_TX_5G);
2814 }
2815
2816 if (cmd_data->warmup_time) {
2817 ret = wl_cfgnan_warmup_time_handler(cmd_data, nan_iov_data);
2818 if (unlikely(ret)) {
2819 WL_ERR(("warm up time handler sub_cmd set failed\n"));
2820 goto fail;
2821 }
2822 nan_buf->count++;
2823 }
2824 /* setting master preference and random factor */
2825 ret = wl_cfgnan_set_election_metric(cmd_data, nan_iov_data, nan_attr_mask);
2826 if (unlikely(ret)) {
2827 WL_ERR(("election_metric sub_cmd set failed\n"));
2828 goto fail;
2829 } else {
2830 nan_buf->count++;
2831 }
2832
2833 /* setting nan social channels */
2834 ret = wl_cfgnan_set_nan_soc_chans(ndev, cmd_data, nan_iov_data, nan_attr_mask);
2835 if (unlikely(ret)) {
2836 WL_ERR(("nan social channels set failed\n"));
2837 goto fail;
2838 } else {
2839 /* Storing 5g capability which is reqd for avail chan config. */
2840 nancfg->support_5g = cmd_data->support_5g;
2841 nan_buf->count++;
2842 }
2843
2844 if ((cmd_data->support_2g) && ((cmd_data->dwell_time[0]) ||
2845 (cmd_data->scan_period[0]))) {
2846 /* setting scan params */
2847 ret = wl_cfgnan_set_nan_scan_params(ndev, cfg, cmd_data, 0, nan_attr_mask);
2848 if (unlikely(ret)) {
2849 WL_ERR(("scan params set failed for 2g\n"));
2850 goto fail;
2851 }
2852 }
2853
2854 if ((cmd_data->support_5g) && ((cmd_data->dwell_time[1]) ||
2855 (cmd_data->scan_period[1]))) {
2856 /* setting scan params */
2857 ret = wl_cfgnan_set_nan_scan_params(ndev, cfg, cmd_data,
2858 cmd_data->support_5g, nan_attr_mask);
2859 if (unlikely(ret)) {
2860 WL_ERR(("scan params set failed for 5g\n"));
2861 goto fail;
2862 }
2863 }
2864
2865 /*
2866 * A cluster_low value matching cluster_high indicates a request
2867 * to join a cluster with that value.
2868 * If the requested cluster is not found the
2869 * device will start its own cluster
2870 */
2871 /* For Debug purpose, using clust id compulsion */
2872 if (cmd_data->cluster_low == cmd_data->cluster_high) {
2873 /* device will merge to configured CID only */
2874 cfg_ctrl1_flags |= (WL_NAN_CTRL_MERGE_CONF_CID_ONLY);
2875 }
2876 /* setting cluster ID */
2877 ret = wl_cfgnan_set_cluster_id(cmd_data, nan_iov_data);
2878 if (unlikely(ret)) {
2879 WL_ERR(("cluster_id sub_cmd set failed\n"));
2880 goto fail;
2881 }
2882 nan_buf->count++;
2883
2884 /* setting rssi proximaty values for 2.4GHz and 5GHz */
2885 ret = wl_cfgnan_set_rssi_proximity(cmd_data, nan_iov_data, nan_attr_mask);
2886 if (unlikely(ret)) {
2887 WL_ERR(("2.4GHz/5GHz rssi proximity threshold set failed\n"));
2888 goto fail;
2889 } else {
2890 nan_buf->count++;
2891 }
2892
2893 /* setting rssi middle/close values for 2.4GHz and 5GHz */
2894 ret = wl_cfgnan_set_rssi_mid_or_close(cmd_data, nan_iov_data, nan_attr_mask);
2895 if (unlikely(ret)) {
2896 WL_ERR(("2.4GHz/5GHz rssi middle and close set failed\n"));
2897 goto fail;
2898 } else {
2899 nan_buf->count++;
2900 }
2901
2902 /* setting hop count limit or threshold */
2903 if (nan_attr_mask & NAN_ATTR_HOP_COUNT_LIMIT_CONFIG) {
2904 ret = wl_cfgnan_set_hop_count_limit(cmd_data, nan_iov_data);
2905 if (unlikely(ret)) {
2906 WL_ERR(("hop_count_limit sub_cmd set failed\n"));
2907 goto fail;
2908 }
2909 nan_buf->count++;
2910 }
2911
2912 /* setting sid beacon val */
2913 if ((nan_attr_mask & NAN_ATTR_SID_BEACON_CONFIG) ||
2914 (nan_attr_mask & NAN_ATTR_SUB_SID_BEACON_CONFIG)) {
2915 ret = wl_cfgnan_set_sid_beacon_val(cmd_data, nan_iov_data, nan_attr_mask);
2916 if (unlikely(ret)) {
2917 WL_ERR(("sid_beacon sub_cmd set failed\n"));
2918 goto fail;
2919 }
2920 nan_buf->count++;
2921 }
2922
2923 /* setting nan oui */
2924 if (nan_attr_mask & NAN_ATTR_OUI_CONFIG) {
2925 ret = wl_cfgnan_set_nan_oui(cmd_data, nan_iov_data);
2926 if (unlikely(ret)) {
2927 WL_ERR(("nan_oui sub_cmd set failed\n"));
2928 goto fail;
2929 }
2930 nan_buf->count++;
2931 }
2932
2933 /* setting nan awake dws */
2934 ret = wl_cfgnan_set_awake_dws(ndev, cmd_data,
2935 nan_iov_data, cfg, nan_attr_mask);
2936 if (unlikely(ret)) {
2937 WL_ERR(("nan awake dws set failed\n"));
2938 goto fail;
2939 } else {
2940 nan_buf->count++;
2941 }
2942
2943 /* enable events */
2944 ret = wl_cfgnan_config_eventmask(ndev, cfg, cmd_data->disc_ind_cfg, false);
2945 if (unlikely(ret)) {
2946 WL_ERR(("Failed to config disc ind flag in event_mask, ret = %d\n", ret));
2947 goto fail;
2948 }
2949
2950 /* setting nan enable sub_cmd */
2951 ret = wl_cfgnan_enable_handler(nan_iov_data, true);
2952 if (unlikely(ret)) {
2953 WL_ERR(("enable handler sub_cmd set failed\n"));
2954 goto fail;
2955 }
2956 nan_buf->count++;
2957 nan_buf->is_set = true;
2958
2959 nan_buf_size -= nan_iov_data->nan_iov_len;
2960 memset(resp_buf, 0, sizeof(resp_buf));
2961 /* Reset conditon variable */
2962 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size,
2963 &(cmd_data->status), (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
2964 if (unlikely(ret) || unlikely(cmd_data->status)) {
2965 WL_ERR((" nan start handler, enable failed, ret = %d status = %d \n",
2966 ret, cmd_data->status));
2967 goto fail;
2968 }
2969
2970 timeout = wait_event_timeout(nancfg->nan_event_wait,
2971 nancfg->nan_event_recvd, msecs_to_jiffies(NAN_START_STOP_TIMEOUT));
2972 if (!timeout) {
2973 WL_ERR(("Timed out while Waiting for WL_NAN_EVENT_START event !!!\n"));
2974 ret = BCME_ERROR;
2975 goto fail;
2976 }
2977
2978 /* Default flags: set NAN proprietary rates and auto datapath confirm
2979 * If auto datapath confirms is set, then DPCONF will be sent by FW
2980 */
2981 cfg_ctrl1_flags |= (WL_NAN_CTRL_AUTO_DPCONF | WL_NAN_CTRL_PROP_RATE);
2982
2983 /* set CFG CTRL flags */
2984 ret = wl_cfgnan_config_control_flag(ndev, cfg, cfg_ctrl1_flags,
2985 0, WL_NAN_CMD_CFG_NAN_CONFIG,
2986 &(cmd_data->status), true);
2987 if (unlikely(ret) || unlikely(cmd_data->status)) {
2988 WL_ERR((" nan ctrl1 config flags setting failed, ret = %d status = %d \n",
2989 ret, cmd_data->status));
2990 goto fail;
2991 }
2992
2993 /* malloc for ndp peer list */
2994 if ((ret = wl_cfgnan_get_capablities_handler(ndev, cfg, &capabilities))
2995 == BCME_OK) {
2996 nancfg->max_ndp_count = capabilities.max_ndp_sessions;
2997 nancfg->max_ndi_supported = capabilities.max_ndi_interfaces;
2998 nancfg->nan_ndp_peer_info = MALLOCZ(cfg->osh,
2999 nancfg->max_ndp_count * sizeof(nan_ndp_peer_t));
3000 if (!nancfg->nan_ndp_peer_info) {
3001 WL_ERR(("%s: memory allocation failed\n", __func__));
3002 ret = BCME_NOMEM;
3003 goto fail;
3004 }
3005
3006 if (!nancfg->ndi) {
3007 nancfg->ndi = MALLOCZ(cfg->osh,
3008 nancfg->max_ndi_supported * sizeof(*nancfg->ndi));
3009 if (!nancfg->ndi) {
3010 WL_ERR(("%s: memory allocation failed\n", __func__));
3011 ret = BCME_NOMEM;
3012 goto fail;
3013 }
3014 }
3015 } else {
3016 WL_ERR(("wl_cfgnan_get_capablities_handler failed, ret = %d\n", ret));
3017 goto fail;
3018 }
3019
3020 BCM_REFERENCE(i);
3021 #ifdef NAN_IFACE_CREATE_ON_UP
3022 for (i = 0; i < nancfg->max_ndi_supported; i++) {
3023 /* Create NDI using the information provided by user space */
3024 if (nancfg->ndi[i].in_use && !nancfg->ndi[i].created) {
3025 ret = wl_cfgnan_data_path_iface_create_delete_handler(ndev, cfg,
3026 nancfg->ndi[i].ifname,
3027 NAN_WIFI_SUBCMD_DATA_PATH_IFACE_CREATE, dhdp->up);
3028 if (ret) {
3029 WL_ERR(("failed to create ndp interface [%d]\n", ret));
3030 goto fail;
3031 }
3032 nancfg->ndi[i].created = true;
3033 }
3034 }
3035 #endif /* NAN_IFACE_CREATE_ON_UP */
3036
3037 /* Check if NDPE is capable and use_ndpe_attr is set by framework */
3038 /* TODO: For now enabling NDPE by default as framework is not setting use_ndpe_attr
3039 * When (cmd_data->use_ndpe_attr) is set by framework, Add additional check for
3040 * (cmd_data->use_ndpe_attr) as below
3041 * if (capabilities.ndpe_attr_supported && cmd_data->use_ndpe_attr)
3042 */
3043 if (capabilities.ndpe_attr_supported)
3044 {
3045 cfg_ctrl2_flags1 |= WL_NAN_CTRL2_FLAG1_NDPE_CAP;
3046 nancfg->ndpe_enabled = true;
3047 } else {
3048 /* reset NDPE capability in FW */
3049 ret = wl_cfgnan_config_control_flag(ndev, cfg, WL_NAN_CTRL2_FLAG1_NDPE_CAP,
3050 0, WL_NAN_CMD_CFG_NAN_CONFIG2,
3051 &(cmd_data->status), false);
3052 if (unlikely(ret) || unlikely(cmd_data->status)) {
3053 WL_ERR((" nan ctrl2 config flags resetting failed, ret = %d status = %d \n",
3054 ret, cmd_data->status));
3055 goto fail;
3056 }
3057 nancfg->ndpe_enabled = false;
3058 }
3059
3060 /* set CFG CTRL2 flags1 and flags2 */
3061 ret = wl_cfgnan_config_control_flag(ndev, cfg, cfg_ctrl2_flags1,
3062 0, WL_NAN_CMD_CFG_NAN_CONFIG2,
3063 &(cmd_data->status), true);
3064 if (unlikely(ret) || unlikely(cmd_data->status)) {
3065 WL_ERR((" nan ctrl2 config flags setting failed, ret = %d status = %d \n",
3066 ret, cmd_data->status));
3067 goto fail;
3068 }
3069
3070 #ifdef RTT_SUPPORT
3071 /* Initialize geofence cfg */
3072 dhd_rtt_initialize_geofence_cfg(cfg->pub);
3073 #endif /* RTT_SUPPORT */
3074
3075 if (cmd_data->dw_early_termination > 0) {
3076 WL_ERR(("dw early termination is not supported, ignoring for now\n"));
3077 }
3078
3079 if (nan_attr_mask & NAN_ATTR_DISC_BEACON_INTERVAL) {
3080 ret = wl_cfgnan_set_disc_beacon_interval_handler(ndev, cfg,
3081 cmd_data->disc_bcn_interval);
3082 if (unlikely(ret)) {
3083 WL_ERR(("Failed to set beacon interval\n"));
3084 goto fail;
3085 }
3086 }
3087
3088 nancfg->nan_enable = true;
3089 WL_INFORM_MEM(("[NAN] Enable successfull \n"));
3090
3091 fail:
3092 /* Enable back TDLS if connected interface is <= 1 */
3093 wl_cfg80211_tdls_config(cfg, TDLS_STATE_IF_DELETE, false);
3094
3095 /* reset conditon variable */
3096 nancfg->nan_event_recvd = false;
3097 if (unlikely(ret) || unlikely(cmd_data->status)) {
3098 nancfg->nan_enable = false;
3099 mutex_lock(&cfg->if_sync);
3100 ret = wl_cfg80211_delete_iface(cfg, WL_IF_TYPE_NAN);
3101 if (ret != BCME_OK) {
3102 WL_ERR(("failed to delete NDI[%d]\n", ret));
3103 }
3104 mutex_unlock(&cfg->if_sync);
3105 if (nancfg->nan_ndp_peer_info) {
3106 MFREE(cfg->osh, nancfg->nan_ndp_peer_info,
3107 nancfg->max_ndp_count * sizeof(nan_ndp_peer_t));
3108 nancfg->nan_ndp_peer_info = NULL;
3109 }
3110 if (nancfg->ndi) {
3111 MFREE(cfg->osh, nancfg->ndi,
3112 nancfg->max_ndi_supported * sizeof(*nancfg->ndi));
3113 nancfg->ndi = NULL;
3114 }
3115 }
3116 if (nan_buf) {
3117 MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
3118 }
3119 if (nan_iov_data) {
3120 MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
3121 }
3122
3123 NAN_DBG_EXIT();
3124 return ret;
3125 }
3126
3127 static int
wl_cfgnan_disable(struct bcm_cfg80211 * cfg)3128 wl_cfgnan_disable(struct bcm_cfg80211 *cfg)
3129 {
3130 s32 ret = BCME_OK;
3131 dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
3132
3133 NAN_DBG_ENTER();
3134 if ((cfg->nancfg->nan_init_state == TRUE) &&
3135 (cfg->nancfg->nan_enable == TRUE)) {
3136 struct net_device *ndev;
3137 ndev = bcmcfg_to_prmry_ndev(cfg);
3138
3139 /* We have to remove NDIs so that P2P/Softap can work */
3140 ret = wl_cfg80211_delete_iface(cfg, WL_IF_TYPE_NAN);
3141 if (ret != BCME_OK) {
3142 WL_ERR(("failed to delete NDI[%d]\n", ret));
3143 }
3144
3145 ret = wl_cfgnan_stop_handler(ndev, cfg);
3146 if (ret == -ENODEV) {
3147 WL_ERR(("Bus is down, no need to proceed\n"));
3148 } else if (ret != BCME_OK) {
3149 WL_ERR(("failed to stop nan, error[%d]\n", ret));
3150 }
3151 ret = wl_cfgnan_deinit(cfg, dhdp->up);
3152 if (ret != BCME_OK) {
3153 WL_ERR(("failed to de-initialize NAN[%d]\n", ret));
3154 if (!dhd_query_bus_erros(dhdp)) {
3155 ASSERT(0);
3156 }
3157 }
3158 wl_cfgnan_disable_cleanup(cfg);
3159 }
3160 NAN_DBG_EXIT();
3161 return ret;
3162 }
3163
3164 static void
wl_cfgnan_send_stop_event(struct bcm_cfg80211 * cfg)3165 wl_cfgnan_send_stop_event(struct bcm_cfg80211 *cfg)
3166 {
3167 s32 ret = BCME_OK;
3168 nan_event_data_t *nan_event_data = NULL;
3169
3170 NAN_DBG_ENTER();
3171
3172 nan_event_data = MALLOCZ(cfg->osh, sizeof(nan_event_data_t));
3173 if (!nan_event_data) {
3174 WL_ERR(("%s: memory allocation failed\n", __func__));
3175 ret = BCME_NOMEM;
3176 goto exit;
3177 }
3178 bzero(nan_event_data, sizeof(nan_event_data_t));
3179
3180 nan_event_data->status = NAN_STATUS_SUCCESS;
3181 ret = memcpy_s(nan_event_data->nan_reason, NAN_ERROR_STR_LEN,
3182 "NAN_STATUS_SUCCESS", strlen("NAN_STATUS_SUCCESS"));
3183 if (ret != BCME_OK) {
3184 WL_ERR(("Failed to copy nan reason string, ret = %d\n", ret));
3185 goto exit;
3186 }
3187 #if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT)
3188 ret = wl_cfgvendor_send_nan_event(cfg->wdev->wiphy, bcmcfg_to_prmry_ndev(cfg),
3189 GOOGLE_NAN_EVENT_DISABLED, nan_event_data);
3190 if (ret != BCME_OK) {
3191 WL_ERR(("Failed to send event to nan hal, (%d)\n",
3192 GOOGLE_NAN_EVENT_DISABLED));
3193 }
3194 #endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT) */
3195 exit:
3196 if (nan_event_data) {
3197 MFREE(cfg->osh, nan_event_data, sizeof(nan_event_data_t));
3198 }
3199 NAN_DBG_EXIT();
3200 return;
3201 }
3202
3203 static void
wl_cfgnan_disable_cleanup(struct bcm_cfg80211 * cfg)3204 wl_cfgnan_disable_cleanup(struct bcm_cfg80211 *cfg)
3205 {
3206 int i = 0;
3207 wl_nancfg_t *nancfg = cfg->nancfg;
3208 #ifdef RTT_SUPPORT
3209 dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
3210 rtt_status_info_t *rtt_status = GET_RTTSTATE(dhdp);
3211 rtt_target_info_t *target_info = NULL;
3212
3213 /* Delete the geofence rtt target list */
3214 dhd_rtt_delete_geofence_target_list(dhdp);
3215 /* Cancel pending retry timer if any */
3216 if (delayed_work_pending(&rtt_status->rtt_retry_timer)) {
3217 cancel_delayed_work_sync(&rtt_status->rtt_retry_timer);
3218 }
3219 /* Remove if any pending proxd timeout for nan-rtt */
3220 target_info = &rtt_status->rtt_config.target_info[rtt_status->cur_idx];
3221 if (target_info && target_info->peer == RTT_PEER_NAN) {
3222 /* Cancel pending proxd timeout work if any */
3223 if (delayed_work_pending(&rtt_status->proxd_timeout)) {
3224 cancel_delayed_work_sync(&rtt_status->proxd_timeout);
3225 }
3226 }
3227 /* Delete if any directed nan rtt session */
3228 dhd_rtt_delete_nan_session(dhdp);
3229 #endif /* RTT_SUPPORT */
3230 /* Clear the NDP ID array and dp count */
3231 for (i = 0; i < NAN_MAX_NDP_PEER; i++) {
3232 nancfg->ndp_id[i] = 0;
3233 }
3234 nancfg->nan_dp_count = 0;
3235 if (nancfg->nan_ndp_peer_info) {
3236 MFREE(cfg->osh, nancfg->nan_ndp_peer_info,
3237 nancfg->max_ndp_count * sizeof(nan_ndp_peer_t));
3238 nancfg->nan_ndp_peer_info = NULL;
3239 }
3240 if (nancfg->ndi) {
3241 MFREE(cfg->osh, nancfg->ndi,
3242 nancfg->max_ndi_supported * sizeof(*nancfg->ndi));
3243 nancfg->ndi = NULL;
3244 }
3245 wl_cfg80211_concurrent_roam(cfg, false);
3246 return;
3247 }
3248
3249 /*
3250 * Deferred nan disable work,
3251 * scheduled with NAN_DISABLE_CMD_DELAY
3252 * delay in order to remove any active nan dps
3253 */
3254 void
wl_cfgnan_delayed_disable(struct work_struct * work)3255 wl_cfgnan_delayed_disable(struct work_struct *work)
3256 {
3257 struct bcm_cfg80211 *cfg = NULL;
3258 struct net_device *ndev = NULL;
3259 wl_nancfg_t *nancfg = NULL;
3260
3261 BCM_SET_CONTAINER_OF(nancfg, work, wl_nancfg_t, nan_disable.work);
3262
3263 cfg = nancfg->cfg;
3264
3265 rtnl_lock();
3266 if (nancfg->nan_enable == true) {
3267 wl_cfgnan_disable(cfg);
3268 ndev = bcmcfg_to_prmry_ndev(cfg);
3269 wl_cfgvendor_nan_send_async_disable_resp(ndev->ieee80211_ptr);
3270 } else {
3271 WL_INFORM_MEM(("nan is in disabled state\n"));
3272 }
3273 rtnl_unlock();
3274
3275 DHD_NAN_WAKE_UNLOCK(cfg->pub);
3276
3277 return;
3278 }
3279
3280 int
wl_cfgnan_stop_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg)3281 wl_cfgnan_stop_handler(struct net_device *ndev,
3282 struct bcm_cfg80211 *cfg)
3283 {
3284 bcm_iov_batch_buf_t *nan_buf = NULL;
3285 s32 ret = BCME_OK;
3286 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
3287 wl_nan_iov_t *nan_iov_data = NULL;
3288 uint32 status;
3289 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
3290 dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
3291 wl_nancfg_t *nancfg = cfg->nancfg;
3292
3293 NAN_DBG_ENTER();
3294 NAN_MUTEX_LOCK();
3295
3296 if (!nancfg->nan_enable) {
3297 WL_INFORM(("Nan is not enabled\n"));
3298 ret = BCME_OK;
3299 goto fail;
3300 }
3301
3302 if (dhdp->up != DHD_BUS_DOWN) {
3303 /*
3304 * Framework doing cleanup(iface remove) on disable command,
3305 * so avoiding event to prevent iface delete calls again
3306 */
3307 WL_INFORM_MEM(("[NAN] Disabling Nan events\n"));
3308 wl_cfgnan_config_eventmask(ndev, cfg, 0, true);
3309
3310 nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
3311 if (!nan_buf) {
3312 WL_ERR(("%s: memory allocation failed\n", __func__));
3313 ret = BCME_NOMEM;
3314 goto fail;
3315 }
3316
3317 nan_iov_data = MALLOCZ(cfg->osh, sizeof(*nan_iov_data));
3318 if (!nan_iov_data) {
3319 WL_ERR(("%s: memory allocation failed\n", __func__));
3320 ret = BCME_NOMEM;
3321 goto fail;
3322 }
3323
3324 nan_iov_data->nan_iov_len = NAN_IOCTL_BUF_SIZE;
3325 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
3326 nan_buf->count = 0;
3327 nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
3328 nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
3329
3330 ret = wl_cfgnan_enable_handler(nan_iov_data, false);
3331 if (unlikely(ret)) {
3332 WL_ERR(("nan disable handler failed\n"));
3333 goto fail;
3334 }
3335 nan_buf->count++;
3336 nan_buf->is_set = true;
3337 nan_buf_size -= nan_iov_data->nan_iov_len;
3338 bzero(resp_buf, sizeof(resp_buf));
3339 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
3340 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
3341 if (unlikely(ret) || unlikely(status)) {
3342 WL_ERR(("nan disable failed ret = %d status = %d\n", ret, status));
3343 goto fail;
3344 }
3345 /* Enable back TDLS if connected interface is <= 1 */
3346 wl_cfg80211_tdls_config(cfg, TDLS_STATE_IF_DELETE, false);
3347 }
3348
3349 if (!nancfg->notify_user) {
3350 wl_cfgnan_send_stop_event(cfg);
3351 }
3352 fail:
3353 /* Resetting instance ID mask */
3354 nancfg->inst_id_start = 0;
3355 memset(nancfg->svc_inst_id_mask, 0, sizeof(nancfg->svc_inst_id_mask));
3356 memset(nancfg->svc_info, 0, NAN_MAX_SVC_INST * sizeof(nan_svc_info_t));
3357 nancfg->nan_enable = false;
3358 WL_INFORM_MEM(("[NAN] Disable done\n"));
3359
3360 if (nan_buf) {
3361 MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
3362 }
3363 if (nan_iov_data) {
3364 MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
3365 }
3366
3367 NAN_MUTEX_UNLOCK();
3368 NAN_DBG_EXIT();
3369 return ret;
3370 }
3371
3372 int
wl_cfgnan_config_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_config_cmd_data_t * cmd_data,uint32 nan_attr_mask)3373 wl_cfgnan_config_handler(struct net_device *ndev, struct bcm_cfg80211 *cfg,
3374 nan_config_cmd_data_t *cmd_data, uint32 nan_attr_mask)
3375 {
3376 bcm_iov_batch_buf_t *nan_buf = NULL;
3377 s32 ret = BCME_OK;
3378 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
3379 wl_nan_iov_t *nan_iov_data = NULL;
3380 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
3381
3382 NAN_DBG_ENTER();
3383
3384 /* Nan need to be enabled before configuring/updating params */
3385 if (!cfg->nancfg->nan_enable) {
3386 WL_INFORM(("nan is not enabled\n"));
3387 ret = BCME_NOTENABLED;
3388 goto fail;
3389 }
3390
3391 nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
3392 if (!nan_buf) {
3393 WL_ERR(("%s: memory allocation failed\n", __func__));
3394 ret = BCME_NOMEM;
3395 goto fail;
3396 }
3397
3398 nan_iov_data = MALLOCZ(cfg->osh, sizeof(*nan_iov_data));
3399 if (!nan_iov_data) {
3400 WL_ERR(("%s: memory allocation failed\n", __func__));
3401 ret = BCME_NOMEM;
3402 goto fail;
3403 }
3404
3405 nan_iov_data->nan_iov_len = NAN_IOCTL_BUF_SIZE;
3406 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
3407 nan_buf->count = 0;
3408 nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
3409 nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
3410
3411 /* setting sid beacon val */
3412 if ((nan_attr_mask & NAN_ATTR_SID_BEACON_CONFIG) ||
3413 (nan_attr_mask & NAN_ATTR_SUB_SID_BEACON_CONFIG)) {
3414 ret = wl_cfgnan_set_sid_beacon_val(cmd_data, nan_iov_data, nan_attr_mask);
3415 if (unlikely(ret)) {
3416 WL_ERR(("sid_beacon sub_cmd set failed\n"));
3417 goto fail;
3418 }
3419 nan_buf->count++;
3420 }
3421
3422 /* setting master preference and random factor */
3423 if (cmd_data->metrics.random_factor ||
3424 cmd_data->metrics.master_pref) {
3425 ret = wl_cfgnan_set_election_metric(cmd_data, nan_iov_data,
3426 nan_attr_mask);
3427 if (unlikely(ret)) {
3428 WL_ERR(("election_metric sub_cmd set failed\n"));
3429 goto fail;
3430 } else {
3431 nan_buf->count++;
3432 }
3433 }
3434
3435 /* setting hop count limit or threshold */
3436 if (nan_attr_mask & NAN_ATTR_HOP_COUNT_LIMIT_CONFIG) {
3437 ret = wl_cfgnan_set_hop_count_limit(cmd_data, nan_iov_data);
3438 if (unlikely(ret)) {
3439 WL_ERR(("hop_count_limit sub_cmd set failed\n"));
3440 goto fail;
3441 }
3442 nan_buf->count++;
3443 }
3444
3445 /* setting rssi proximaty values for 2.4GHz and 5GHz */
3446 ret = wl_cfgnan_set_rssi_proximity(cmd_data, nan_iov_data,
3447 nan_attr_mask);
3448 if (unlikely(ret)) {
3449 WL_ERR(("2.4GHz/5GHz rssi proximity threshold set failed\n"));
3450 goto fail;
3451 } else {
3452 nan_buf->count++;
3453 }
3454
3455 /* setting nan awake dws */
3456 ret = wl_cfgnan_set_awake_dws(ndev, cmd_data, nan_iov_data,
3457 cfg, nan_attr_mask);
3458 if (unlikely(ret)) {
3459 WL_ERR(("nan awake dws set failed\n"));
3460 goto fail;
3461 } else {
3462 nan_buf->count++;
3463 }
3464
3465 /* TODO: Add below code once use_ndpe_attr is being updated by framework
3466 * If NDPE is enabled (cfg.nancfg.ndpe_enabled) and use_ndpe_attr is reset
3467 * by framework, then disable NDPE using nan ctrl2 configuration setting.
3468 * Else if NDPE is disabled and use_ndpe_attr is set by framework enable NDPE in FW
3469 */
3470
3471 if (cmd_data->disc_ind_cfg) {
3472 /* Disable events */
3473 WL_TRACE(("Disable events based on flag\n"));
3474 ret = wl_cfgnan_config_eventmask(ndev, cfg,
3475 cmd_data->disc_ind_cfg, false);
3476 if (unlikely(ret)) {
3477 WL_ERR(("Failed to config disc ind flag in event_mask, ret = %d\n",
3478 ret));
3479 goto fail;
3480 }
3481 }
3482
3483 if ((cfg->nancfg->support_5g) && ((cmd_data->dwell_time[1]) ||
3484 (cmd_data->scan_period[1]))) {
3485 /* setting scan params */
3486 ret = wl_cfgnan_set_nan_scan_params(ndev, cfg,
3487 cmd_data, cfg->nancfg->support_5g, nan_attr_mask);
3488 if (unlikely(ret)) {
3489 WL_ERR(("scan params set failed for 5g\n"));
3490 goto fail;
3491 }
3492 }
3493 if ((cmd_data->dwell_time[0]) ||
3494 (cmd_data->scan_period[0])) {
3495 ret = wl_cfgnan_set_nan_scan_params(ndev, cfg, cmd_data, 0, nan_attr_mask);
3496 if (unlikely(ret)) {
3497 WL_ERR(("scan params set failed for 2g\n"));
3498 goto fail;
3499 }
3500 }
3501 nan_buf->is_set = true;
3502 nan_buf_size -= nan_iov_data->nan_iov_len;
3503
3504 if (nan_buf->count) {
3505 bzero(resp_buf, sizeof(resp_buf));
3506 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size,
3507 &(cmd_data->status),
3508 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
3509 if (unlikely(ret) || unlikely(cmd_data->status)) {
3510 WL_ERR((" nan config handler failed ret = %d status = %d\n",
3511 ret, cmd_data->status));
3512 goto fail;
3513 }
3514 } else {
3515 WL_DBG(("No commands to send\n"));
3516 }
3517
3518 if ((!cmd_data->bmap) || (cmd_data->avail_params.duration == NAN_BAND_INVALID) ||
3519 (!cmd_data->chanspec[0])) {
3520 WL_TRACE(("mandatory arguments are not present to set avail\n"));
3521 ret = BCME_OK;
3522 } else {
3523 cmd_data->avail_params.chanspec[0] = cmd_data->chanspec[0];
3524 cmd_data->avail_params.bmap = cmd_data->bmap;
3525 /* 1=local, 2=peer, 3=ndc, 4=immutable, 5=response, 6=counter */
3526 ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
3527 cfg, &cmd_data->avail_params, WL_AVAIL_LOCAL);
3528 if (unlikely(ret)) {
3529 WL_ERR(("Failed to set avail value with type local\n"));
3530 goto fail;
3531 }
3532
3533 ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
3534 cfg, &cmd_data->avail_params, WL_AVAIL_NDC);
3535 if (unlikely(ret)) {
3536 WL_ERR(("Failed to set avail value with type ndc\n"));
3537 goto fail;
3538 }
3539 }
3540
3541 if (cmd_data->nmi_rand_intvl > 0) {
3542 #ifdef WL_NAN_ENABLE_MERGE
3543 /* Cluster merge enable/disable are being set using nmi random interval config param
3544 * If MSB(31st bit) is set that indicates cluster merge enable/disable config is set
3545 * MSB 30th bit indicates cluser merge enable/disable value to set in firmware
3546 */
3547 if (cmd_data->nmi_rand_intvl & NAN_NMI_RAND_PVT_CMD_VENDOR) {
3548 uint8 merge_enable;
3549 uint8 lwt_mode_enable;
3550 int status = BCME_OK;
3551
3552 merge_enable = !!(cmd_data->nmi_rand_intvl &
3553 NAN_NMI_RAND_CLUSTER_MERGE_ENAB);
3554 ret = wl_cfgnan_set_enable_merge(bcmcfg_to_prmry_ndev(cfg), cfg,
3555 merge_enable, &status);
3556 if (unlikely(ret) || unlikely(status)) {
3557 WL_ERR(("Enable merge: failed to set config request [%d]\n", ret));
3558 /* As there is no cmd_reply, check if error is in status or ret */
3559 if (status) {
3560 ret = status;
3561 }
3562 goto fail;
3563 }
3564
3565 lwt_mode_enable = !!(cmd_data->nmi_rand_intvl &
3566 NAN_NMI_RAND_AUTODAM_LWT_MODE_ENAB);
3567
3568 /* set CFG CTRL2 flags1 and flags2 */
3569 ret = wl_cfgnan_config_control_flag(ndev, cfg,
3570 WL_NAN_CTRL2_FLAG1_AUTODAM_LWT_MODE,
3571 0, WL_NAN_CMD_CFG_NAN_CONFIG2,
3572 &status, lwt_mode_enable);
3573 if (unlikely(ret) || unlikely(status)) {
3574 WL_ERR(("Enable dam lwt mode: "
3575 "failed to set config request [%d]\n", ret));
3576 /* As there is no cmd_reply, check if error is in status or ret */
3577 if (status) {
3578 ret = status;
3579 }
3580 goto fail;
3581 }
3582
3583 /* reset pvt merge enable bits */
3584 cmd_data->nmi_rand_intvl &= ~(NAN_NMI_RAND_PVT_CMD_VENDOR |
3585 NAN_NMI_RAND_CLUSTER_MERGE_ENAB |
3586 NAN_NMI_RAND_AUTODAM_LWT_MODE_ENAB);
3587 }
3588 #endif /* WL_NAN_ENABLE_MERGE */
3589
3590 if (cmd_data->nmi_rand_intvl) {
3591 /* run time nmi rand not supported as of now.
3592 * Only during nan enable/iface-create rand mac is used
3593 */
3594 WL_ERR(("run time nmi rand not supported, ignoring for now\n"));
3595 }
3596 }
3597
3598 if (cmd_data->dw_early_termination > 0) {
3599 WL_ERR(("dw early termination is not supported, ignoring for now\n"));
3600 }
3601
3602 if (nan_attr_mask & NAN_ATTR_DISC_BEACON_INTERVAL) {
3603 ret = wl_cfgnan_set_disc_beacon_interval_handler(ndev, cfg,
3604 cmd_data->disc_bcn_interval);
3605 if (unlikely(ret)) {
3606 WL_ERR(("Failed to set beacon interval\n"));
3607 goto fail;
3608 }
3609 }
3610
3611 fail:
3612 if (nan_buf) {
3613 MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
3614 }
3615 if (nan_iov_data) {
3616 MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
3617 }
3618
3619 NAN_DBG_EXIT();
3620 return ret;
3621 }
3622
3623 int
wl_cfgnan_support_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_config_cmd_data_t * cmd_data)3624 wl_cfgnan_support_handler(struct net_device *ndev,
3625 struct bcm_cfg80211 *cfg, nan_config_cmd_data_t *cmd_data)
3626 {
3627 /* TODO: */
3628 return BCME_OK;
3629 }
3630
3631 int
wl_cfgnan_status_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_config_cmd_data_t * cmd_data)3632 wl_cfgnan_status_handler(struct net_device *ndev,
3633 struct bcm_cfg80211 *cfg, nan_config_cmd_data_t *cmd_data)
3634 {
3635 /* TODO: */
3636 return BCME_OK;
3637 }
3638
3639 #ifdef WL_NAN_DISC_CACHE
3640 static
3641 nan_svc_info_t *
wl_cfgnan_get_svc_inst(struct bcm_cfg80211 * cfg,wl_nan_instance_id svc_inst_id,uint8 ndp_id)3642 wl_cfgnan_get_svc_inst(struct bcm_cfg80211 *cfg,
3643 wl_nan_instance_id svc_inst_id, uint8 ndp_id)
3644 {
3645 uint8 i, j;
3646 wl_nancfg_t *nancfg = cfg->nancfg;
3647 if (ndp_id) {
3648 for (i = 0; i < NAN_MAX_SVC_INST; i++) {
3649 for (j = 0; j < NAN_MAX_SVC_INST; j++) {
3650 if (nancfg->svc_info[i].ndp_id[j] == ndp_id) {
3651 return &nancfg->svc_info[i];
3652 }
3653 }
3654 }
3655 } else if (svc_inst_id) {
3656 for (i = 0; i < NAN_MAX_SVC_INST; i++) {
3657 if (nancfg->svc_info[i].svc_id == svc_inst_id) {
3658 return &nancfg->svc_info[i];
3659 }
3660 }
3661
3662 }
3663 return NULL;
3664 }
3665
3666 static int
wl_cfgnan_svc_inst_add_ndp(struct bcm_cfg80211 * cfg,wl_nan_instance_id svc_inst_id,uint8 ndp_id)3667 wl_cfgnan_svc_inst_add_ndp(struct bcm_cfg80211 *cfg,
3668 wl_nan_instance_id svc_inst_id, uint8 ndp_id)
3669 {
3670 int ret = BCME_OK, i;
3671 nan_svc_info_t *svc_info;
3672
3673 svc_info = wl_cfgnan_get_svc_inst(cfg, svc_inst_id, 0);
3674 if (svc_info) {
3675 for (i = 0; i < NAN_MAX_SVC_INST; i++) {
3676 if (!svc_info->ndp_id[i]) {
3677 WL_TRACE(("Found empty field\n"));
3678 break;
3679 }
3680 }
3681 if (i == NAN_MAX_SVC_INST) {
3682 WL_ERR(("%s:cannot accommadate ndp id\n", __FUNCTION__));
3683 ret = BCME_NORESOURCE;
3684 goto done;
3685 }
3686 svc_info->ndp_id[i] = ndp_id;
3687 }
3688
3689 done:
3690 return ret;
3691 }
3692
3693 static int
wl_cfgnan_svc_inst_del_ndp(struct bcm_cfg80211 * cfg,wl_nan_instance_id svc_inst_id,uint8 ndp_id)3694 wl_cfgnan_svc_inst_del_ndp(struct bcm_cfg80211 *cfg,
3695 wl_nan_instance_id svc_inst_id, uint8 ndp_id)
3696 {
3697 int ret = BCME_OK, i;
3698 nan_svc_info_t *svc_info;
3699
3700 svc_info = wl_cfgnan_get_svc_inst(cfg, svc_inst_id, 0);
3701
3702 if (svc_info) {
3703 for (i = 0; i < NAN_MAX_SVC_INST; i++) {
3704 if (svc_info->ndp_id[i] == ndp_id) {
3705 svc_info->ndp_id[i] = 0;
3706 break;
3707 }
3708 }
3709 if (i == NAN_MAX_SVC_INST) {
3710 WL_ERR(("couldn't find entry for ndp id = %d\n", ndp_id));
3711 ret = BCME_NOTFOUND;
3712 }
3713 }
3714 return ret;
3715 }
3716
3717 nan_ranging_inst_t *
wl_cfgnan_check_for_ranging(struct bcm_cfg80211 * cfg,struct ether_addr * peer)3718 wl_cfgnan_check_for_ranging(struct bcm_cfg80211 *cfg, struct ether_addr *peer)
3719 {
3720 uint8 i;
3721 if (peer) {
3722 for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
3723 if (!memcmp(peer, &cfg->nancfg->nan_ranging_info[i].peer_addr,
3724 ETHER_ADDR_LEN)) {
3725 return &(cfg->nancfg->nan_ranging_info[i]);
3726 }
3727 }
3728 }
3729 return NULL;
3730 }
3731
3732 nan_ranging_inst_t *
wl_cfgnan_get_rng_inst_by_id(struct bcm_cfg80211 * cfg,uint8 rng_id)3733 wl_cfgnan_get_rng_inst_by_id(struct bcm_cfg80211 *cfg, uint8 rng_id)
3734 {
3735 uint8 i;
3736 if (rng_id) {
3737 for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
3738 if (cfg->nancfg->nan_ranging_info[i].range_id == rng_id)
3739 {
3740 return &(cfg->nancfg->nan_ranging_info[i]);
3741 }
3742 }
3743 }
3744 WL_ERR(("Couldn't find the ranging instance for rng_id %d\n", rng_id));
3745 return NULL;
3746 }
3747
3748 /*
3749 * Find ranging inst for given peer,
3750 * On not found, create one
3751 * with given range role
3752 */
3753 nan_ranging_inst_t *
wl_cfgnan_get_ranging_inst(struct bcm_cfg80211 * cfg,struct ether_addr * peer,nan_range_role_t range_role)3754 wl_cfgnan_get_ranging_inst(struct bcm_cfg80211 *cfg, struct ether_addr *peer,
3755 nan_range_role_t range_role)
3756 {
3757 nan_ranging_inst_t *ranging_inst = NULL;
3758 uint8 i;
3759
3760 if (!peer) {
3761 WL_ERR(("Peer address is NULL"));
3762 goto done;
3763 }
3764
3765 ranging_inst = wl_cfgnan_check_for_ranging(cfg, peer);
3766 if (ranging_inst) {
3767 goto done;
3768 }
3769 WL_TRACE(("Creating Ranging instance \n"));
3770
3771 for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
3772 if (cfg->nancfg->nan_ranging_info[i].in_use == FALSE) {
3773 break;
3774 }
3775 }
3776
3777 if (i == NAN_MAX_RANGING_INST) {
3778 WL_ERR(("No buffer available for the ranging instance"));
3779 goto done;
3780 }
3781 ranging_inst = &cfg->nancfg->nan_ranging_info[i];
3782 memcpy(&ranging_inst->peer_addr, peer, ETHER_ADDR_LEN);
3783 ranging_inst->range_status = NAN_RANGING_REQUIRED;
3784 ranging_inst->prev_distance_mm = INVALID_DISTANCE;
3785 ranging_inst->range_role = range_role;
3786 ranging_inst->in_use = TRUE;
3787
3788 done:
3789 return ranging_inst;
3790 }
3791 #endif /* WL_NAN_DISC_CACHE */
3792
3793 static int
process_resp_buf(void * iov_resp,uint8 * instance_id,uint16 sub_cmd_id)3794 process_resp_buf(void *iov_resp,
3795 uint8 *instance_id, uint16 sub_cmd_id)
3796 {
3797 int res = BCME_OK;
3798 NAN_DBG_ENTER();
3799
3800 if (sub_cmd_id == WL_NAN_CMD_DATA_DATAREQ) {
3801 wl_nan_dp_req_ret_t *dpreq_ret = NULL;
3802 dpreq_ret = (wl_nan_dp_req_ret_t *)(iov_resp);
3803 *instance_id = dpreq_ret->ndp_id;
3804 WL_TRACE(("%s: Initiator NDI: " MACDBG "\n",
3805 __FUNCTION__, MAC2STRDBG(dpreq_ret->indi.octet)));
3806 } else if (sub_cmd_id == WL_NAN_CMD_RANGE_REQUEST) {
3807 wl_nan_range_id *range_id = NULL;
3808 range_id = (wl_nan_range_id *)(iov_resp);
3809 *instance_id = *range_id;
3810 WL_TRACE(("Range id: %d\n", *range_id));
3811 }
3812 WL_DBG(("instance_id: %d\n", *instance_id));
3813 NAN_DBG_EXIT();
3814 return res;
3815 }
3816
3817 int
wl_cfgnan_cancel_ranging(struct net_device * ndev,struct bcm_cfg80211 * cfg,uint8 * range_id,uint8 flags,uint32 * status)3818 wl_cfgnan_cancel_ranging(struct net_device *ndev,
3819 struct bcm_cfg80211 *cfg, uint8 *range_id, uint8 flags, uint32 *status)
3820 {
3821 bcm_iov_batch_buf_t *nan_buf = NULL;
3822 s32 ret = BCME_OK;
3823 uint16 nan_iov_start, nan_iov_end;
3824 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
3825 uint16 subcmd_len;
3826 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
3827 wl_nan_iov_t *nan_iov_data = NULL;
3828 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
3829 wl_nan_range_cancel_ext_t rng_cncl;
3830 uint8 size_of_iov;
3831
3832 NAN_DBG_ENTER();
3833
3834 if (*range_id == 0) {
3835 WL_ERR(("Invalid Range ID\n"));
3836 ret = BCME_BADARG;
3837 goto fail;
3838 }
3839
3840 if (cfg->nancfg->version >= NAN_RANGE_EXT_CANCEL_SUPPORT_VER) {
3841 size_of_iov = sizeof(rng_cncl);
3842 } else {
3843 size_of_iov = sizeof(*range_id);
3844 }
3845
3846 bzero(&rng_cncl, sizeof(rng_cncl));
3847 rng_cncl.range_id = *range_id;
3848 rng_cncl.flags = flags;
3849
3850 nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
3851 if (!nan_buf) {
3852 WL_ERR(("%s: memory allocation failed\n", __func__));
3853 ret = BCME_NOMEM;
3854 goto fail;
3855 }
3856
3857 nan_iov_data = MALLOCZ(cfg->osh, sizeof(*nan_iov_data));
3858 if (!nan_iov_data) {
3859 WL_ERR(("%s: memory allocation failed\n", __func__));
3860 ret = BCME_NOMEM;
3861 goto fail;
3862 }
3863
3864 nan_iov_data->nan_iov_len = nan_iov_start = NAN_IOCTL_BUF_SIZE;
3865 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
3866 nan_buf->count = 0;
3867 nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
3868 nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
3869 sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
3870
3871 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
3872 size_of_iov, &subcmd_len);
3873 if (unlikely(ret)) {
3874 WL_ERR(("nan_sub_cmd check failed\n"));
3875 goto fail;
3876 }
3877
3878 sub_cmd->id = htod16(WL_NAN_CMD_RANGE_CANCEL);
3879 sub_cmd->len = sizeof(sub_cmd->u.options) + size_of_iov;
3880 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
3881
3882 /* Reduce the iov_len size by subcmd_len */
3883 nan_iov_data->nan_iov_len -= subcmd_len;
3884 nan_iov_end = nan_iov_data->nan_iov_len;
3885 nan_buf_size = (nan_iov_start - nan_iov_end);
3886
3887 if (size_of_iov >= sizeof(rng_cncl)) {
3888 (void)memcpy_s(sub_cmd->data, nan_iov_data->nan_iov_len,
3889 &rng_cncl, size_of_iov);
3890 } else {
3891 (void)memcpy_s(sub_cmd->data, nan_iov_data->nan_iov_len,
3892 range_id, size_of_iov);
3893 }
3894
3895 nan_buf->is_set = true;
3896 nan_buf->count++;
3897 bzero(resp_buf, sizeof(resp_buf));
3898 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, status,
3899 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
3900 if (unlikely(ret) || unlikely(*status)) {
3901 WL_ERR(("Range ID %d cancel failed ret %d status %d \n", *range_id, ret, *status));
3902 goto fail;
3903 }
3904 WL_MEM(("Range cancel with Range ID [%d] successfull\n", *range_id));
3905
3906 /* Resetting range id */
3907 *range_id = 0;
3908 fail:
3909 if (nan_buf) {
3910 MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
3911 }
3912 if (nan_iov_data) {
3913 MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
3914 }
3915 NAN_DBG_EXIT();
3916 return ret;
3917 }
3918
3919 #ifdef WL_NAN_DISC_CACHE
3920 static void
wl_cfgnan_clear_svc_cache(struct bcm_cfg80211 * cfg,wl_nan_instance_id svc_id)3921 wl_cfgnan_clear_svc_cache(struct bcm_cfg80211 *cfg,
3922 wl_nan_instance_id svc_id)
3923 {
3924 nan_svc_info_t *svc;
3925 svc = wl_cfgnan_get_svc_inst(cfg, svc_id, 0);
3926 if (svc) {
3927 WL_DBG(("clearing cached svc info for svc id %d\n", svc_id));
3928 memset(svc, 0, sizeof(*svc));
3929 }
3930 }
3931
3932 static int
wl_cfgnan_cache_svc_info(struct bcm_cfg80211 * cfg,nan_discover_cmd_data_t * cmd_data,uint16 cmd_id,bool update)3933 wl_cfgnan_cache_svc_info(struct bcm_cfg80211 *cfg,
3934 nan_discover_cmd_data_t *cmd_data, uint16 cmd_id, bool update)
3935 {
3936 int ret = BCME_OK;
3937 int i;
3938 nan_svc_info_t *svc_info;
3939 uint8 svc_id = (cmd_id == WL_NAN_CMD_SD_SUBSCRIBE) ? cmd_data->sub_id :
3940 cmd_data->pub_id;
3941 wl_nancfg_t *nancfg = cfg->nancfg;
3942
3943 for (i = 0; i < NAN_MAX_SVC_INST; i++) {
3944 if (update) {
3945 if (nancfg->svc_info[i].svc_id == svc_id) {
3946 svc_info = &nancfg->svc_info[i];
3947 break;
3948 } else {
3949 continue;
3950 }
3951 }
3952 if (!nancfg->svc_info[i].svc_id) {
3953 svc_info = &nancfg->svc_info[i];
3954 break;
3955 }
3956 }
3957 if (i == NAN_MAX_SVC_INST) {
3958 WL_ERR(("%s:cannot accomodate ranging session\n", __FUNCTION__));
3959 ret = BCME_NORESOURCE;
3960 goto fail;
3961 }
3962 if (cmd_data->sde_control_flag & NAN_SDE_CF_RANGING_REQUIRED) {
3963 WL_TRACE(("%s: updating ranging info, enabling\n", __FUNCTION__));
3964 svc_info->status = 1;
3965 svc_info->ranging_interval = cmd_data->ranging_intvl_msec;
3966 svc_info->ranging_ind = cmd_data->ranging_indication;
3967 svc_info->ingress_limit = cmd_data->ingress_limit;
3968 svc_info->egress_limit = cmd_data->egress_limit;
3969 svc_info->ranging_required = 1;
3970 } else {
3971 WL_TRACE(("%s: updating ranging info, disabling\n", __FUNCTION__));
3972 svc_info->status = 0;
3973 svc_info->ranging_interval = 0;
3974 svc_info->ranging_ind = 0;
3975 svc_info->ingress_limit = 0;
3976 svc_info->egress_limit = 0;
3977 svc_info->ranging_required = 0;
3978 }
3979
3980 /* Reset Range status flags on svc creation/update */
3981 svc_info->svc_range_status = 0;
3982 svc_info->flags = cmd_data->flags;
3983
3984 if (cmd_id == WL_NAN_CMD_SD_SUBSCRIBE) {
3985 svc_info->svc_id = cmd_data->sub_id;
3986 if ((cmd_data->flags & WL_NAN_SUB_ACTIVE) &&
3987 (cmd_data->tx_match.dlen)) {
3988 ret = memcpy_s(svc_info->tx_match_filter, sizeof(svc_info->tx_match_filter),
3989 cmd_data->tx_match.data, cmd_data->tx_match.dlen);
3990 if (ret != BCME_OK) {
3991 WL_ERR(("Failed to copy tx match filter data\n"));
3992 goto fail;
3993 }
3994 svc_info->tx_match_filter_len = cmd_data->tx_match.dlen;
3995 }
3996 } else {
3997 svc_info->svc_id = cmd_data->pub_id;
3998 }
3999 ret = memcpy_s(svc_info->svc_hash, sizeof(svc_info->svc_hash),
4000 cmd_data->svc_hash.data, WL_NAN_SVC_HASH_LEN);
4001 if (ret != BCME_OK) {
4002 WL_ERR(("Failed to copy svc hash\n"));
4003 }
4004 fail:
4005 return ret;
4006
4007 }
4008
4009 #ifdef RTT_SUPPORT
4010 /*
4011 * Reset for Initiator
4012 * Remove for Responder if no pending
4013 * geofence target or else reset
4014 */
4015 static void
wl_cfgnan_reset_remove_ranging_instance(struct bcm_cfg80211 * cfg,nan_ranging_inst_t * ranging_inst)4016 wl_cfgnan_reset_remove_ranging_instance(struct bcm_cfg80211 *cfg,
4017 nan_ranging_inst_t *ranging_inst)
4018 {
4019 dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
4020 int8 index;
4021 rtt_geofence_target_info_t* geofence_target;
4022
4023 ASSERT(ranging_inst);
4024 if (!ranging_inst) {
4025 return;
4026 }
4027
4028 if ((ranging_inst->range_role == NAN_RANGING_ROLE_RESPONDER) ||
4029 (ranging_inst->range_type == RTT_TYPE_NAN_DIRECTED)) {
4030 /* Remove ranging instance for responder */
4031 geofence_target = dhd_rtt_get_geofence_target(dhd,
4032 &ranging_inst->peer_addr, &index);
4033 if (!geofence_target) {
4034 /* Remove rng inst if no pend target */
4035 WL_INFORM_MEM(("Removing Ranging Instance "
4036 "peer: " MACDBG "\n",
4037 MAC2STRDBG(&ranging_inst->peer_addr)));
4038 bzero(ranging_inst, sizeof(*ranging_inst));
4039 } else {
4040 ranging_inst->range_status = NAN_RANGING_REQUIRED;
4041 /* resolve range role concurrency */
4042 WL_INFORM_MEM(("Resolving Role Concurrency constraint, peer : "
4043 MACDBG "\n", MAC2STRDBG(&ranging_inst->peer_addr)));
4044 ranging_inst->role_concurrency_status = FALSE;
4045 }
4046 } else {
4047 /* For geofence Initiator */
4048 ranging_inst->range_status = NAN_RANGING_REQUIRED;
4049 }
4050 }
4051
4052 /*
4053 * Forcecully Remove Ranging instance
4054 * Remove if any corresponding Geofence Target
4055 */
4056 static void
wl_cfgnan_remove_ranging_instance(struct bcm_cfg80211 * cfg,nan_ranging_inst_t * ranging_inst)4057 wl_cfgnan_remove_ranging_instance(struct bcm_cfg80211 *cfg,
4058 nan_ranging_inst_t *ranging_inst)
4059 {
4060 dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
4061 int8 index;
4062 rtt_geofence_target_info_t* geofence_target;
4063
4064 ASSERT(ranging_inst);
4065 if (!ranging_inst) {
4066 return;
4067 }
4068
4069 geofence_target = dhd_rtt_get_geofence_target(dhd,
4070 &ranging_inst->peer_addr, &index);
4071 if (geofence_target) {
4072 dhd_rtt_remove_geofence_target(dhd,
4073 &geofence_target->peer_addr);
4074 }
4075 WL_INFORM_MEM(("Removing Ranging Instance " MACDBG "\n",
4076 MAC2STRDBG(&(ranging_inst->peer_addr))));
4077 bzero(ranging_inst, sizeof(nan_ranging_inst_t));
4078
4079 return;
4080 }
4081
4082 static bool
wl_cfgnan_clear_svc_from_ranging_inst(struct bcm_cfg80211 * cfg,nan_ranging_inst_t * ranging_inst,nan_svc_info_t * svc)4083 wl_cfgnan_clear_svc_from_ranging_inst(struct bcm_cfg80211 *cfg,
4084 nan_ranging_inst_t *ranging_inst, nan_svc_info_t *svc)
4085 {
4086 int i = 0;
4087 bool cleared = FALSE;
4088
4089 if (svc && ranging_inst->in_use) {
4090 for (i = 0; i < MAX_SUBSCRIBES; i++) {
4091 if (svc == ranging_inst->svc_idx[i]) {
4092 ranging_inst->num_svc_ctx--;
4093 ranging_inst->svc_idx[i] = NULL;
4094 cleared = TRUE;
4095 /*
4096 * This list is maintained dupes free,
4097 * hence can break
4098 */
4099 break;
4100 }
4101 }
4102 }
4103 return cleared;
4104 }
4105
4106 static int
wl_cfgnan_clear_svc_from_all_ranging_inst(struct bcm_cfg80211 * cfg,uint8 svc_id)4107 wl_cfgnan_clear_svc_from_all_ranging_inst(struct bcm_cfg80211 *cfg, uint8 svc_id)
4108 {
4109 nan_ranging_inst_t *ranging_inst;
4110 int i = 0;
4111 int ret = BCME_OK;
4112
4113 nan_svc_info_t *svc = wl_cfgnan_get_svc_inst(cfg, svc_id, 0);
4114 if (!svc) {
4115 WL_ERR(("\n svc not found \n"));
4116 ret = BCME_NOTFOUND;
4117 goto done;
4118 }
4119 for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
4120 ranging_inst = &(cfg->nancfg->nan_ranging_info[i]);
4121 wl_cfgnan_clear_svc_from_ranging_inst(cfg, ranging_inst, svc);
4122 }
4123
4124 done:
4125 return ret;
4126 }
4127
4128 static int
wl_cfgnan_ranging_clear_publish(struct bcm_cfg80211 * cfg,struct ether_addr * peer,uint8 svc_id)4129 wl_cfgnan_ranging_clear_publish(struct bcm_cfg80211 *cfg,
4130 struct ether_addr *peer, uint8 svc_id)
4131 {
4132 nan_ranging_inst_t *ranging_inst = NULL;
4133 nan_svc_info_t *svc = NULL;
4134 bool cleared = FALSE;
4135 int ret = BCME_OK;
4136
4137 ranging_inst = wl_cfgnan_check_for_ranging(cfg, peer);
4138 if (!ranging_inst || !ranging_inst->in_use) {
4139 goto done;
4140 }
4141
4142 WL_INFORM_MEM(("Check clear Ranging for pub update, sub id = %d,"
4143 " range_id = %d, peer addr = " MACDBG " \n", svc_id,
4144 ranging_inst->range_id, MAC2STRDBG(peer)));
4145 svc = wl_cfgnan_get_svc_inst(cfg, svc_id, 0);
4146 if (!svc) {
4147 WL_ERR(("\n svc not found, svc_id = %d\n", svc_id));
4148 ret = BCME_NOTFOUND;
4149 goto done;
4150 }
4151
4152 cleared = wl_cfgnan_clear_svc_from_ranging_inst(cfg, ranging_inst, svc);
4153 if (!cleared) {
4154 /* Only if this svc was cleared, any update needed */
4155 ret = BCME_NOTFOUND;
4156 goto done;
4157 }
4158
4159 wl_cfgnan_terminate_ranging_session(cfg, ranging_inst);
4160 wl_cfgnan_reset_geofence_ranging(cfg, NULL,
4161 RTT_SCHED_RNG_TERM_PUB_RNG_CLEAR, TRUE);
4162
4163 done:
4164 return ret;
4165 }
4166
4167 /* API to terminate/clear all directed nan-rtt sessions.
4168 * Can be called from framework RTT stop context
4169 */
4170 int
wl_cfgnan_terminate_directed_rtt_sessions(struct net_device * ndev,struct bcm_cfg80211 * cfg)4171 wl_cfgnan_terminate_directed_rtt_sessions(struct net_device *ndev,
4172 struct bcm_cfg80211 *cfg)
4173 {
4174 nan_ranging_inst_t *ranging_inst;
4175 int i, ret = BCME_OK;
4176 uint32 status;
4177
4178 for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
4179 ranging_inst = &cfg->nancfg->nan_ranging_info[i];
4180 if (ranging_inst->range_id && ranging_inst->range_type == RTT_TYPE_NAN_DIRECTED) {
4181 if (NAN_RANGING_IS_IN_PROG(ranging_inst->range_status)) {
4182 ret = wl_cfgnan_cancel_ranging(ndev, cfg, &ranging_inst->range_id,
4183 NAN_RNG_TERM_FLAG_IMMEDIATE, &status);
4184 if (unlikely(ret) || unlikely(status)) {
4185 WL_ERR(("nan range cancel failed ret = %d status = %d\n",
4186 ret, status));
4187 }
4188 }
4189 wl_cfgnan_reset_geofence_ranging(cfg, ranging_inst,
4190 RTT_SHCED_HOST_DIRECTED_TERM, FALSE);
4191 }
4192 }
4193 return ret;
4194 }
4195
4196 /*
4197 * suspend ongoing geofence ranging session
4198 * with a peer if on-going ranging is with given peer
4199 * If peer NULL,
4200 * Suspend all on-going ranging sessions blindly
4201 * Do nothing on:
4202 * If ranging is not in progress
4203 * If ranging in progress but not with given peer
4204 */
4205 int
wl_cfgnan_suspend_geofence_rng_session(struct net_device * ndev,struct ether_addr * peer,int suspend_reason,u8 cancel_flags)4206 wl_cfgnan_suspend_geofence_rng_session(struct net_device *ndev,
4207 struct ether_addr *peer, int suspend_reason, u8 cancel_flags)
4208 {
4209 int ret = BCME_OK;
4210 uint32 status;
4211 nan_ranging_inst_t *ranging_inst = NULL;
4212 struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
4213 int suspend_req_dropped_at = 0;
4214 dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
4215
4216 UNUSED_PARAMETER(suspend_req_dropped_at);
4217
4218 ASSERT(peer);
4219 if (!peer) {
4220 WL_DBG(("Incoming Peer is NULL, suspend req dropped\n"));
4221 suspend_req_dropped_at = 1;
4222 goto exit;
4223 }
4224
4225 if (!wl_ranging_geofence_session_with_peer(cfg, peer)) {
4226 WL_DBG(("Geofence Ranging not in progress with given peer,"
4227 " suspend req dropped\n"));
4228 suspend_req_dropped_at = 2;
4229 goto exit;
4230 }
4231
4232 ranging_inst = wl_cfgnan_check_for_ranging(cfg, peer);
4233 if (ranging_inst) {
4234 cancel_flags |= NAN_RNG_TERM_FLAG_IMMEDIATE;
4235 ret = wl_cfgnan_cancel_ranging(ndev, cfg,
4236 &ranging_inst->range_id, cancel_flags, &status);
4237 if (unlikely(ret) || unlikely(status)) {
4238 WL_ERR(("Geofence Range suspended failed, err = %d, status = %d,"
4239 "suspend_reason = %d, peer: " MACDBG " \n",
4240 ret, status, suspend_reason, MAC2STRDBG(peer)));
4241 }
4242
4243 ranging_inst->range_status = NAN_RANGING_REQUIRED;
4244 dhd_rtt_update_geofence_sessions_cnt(dhd, FALSE,
4245 &ranging_inst->peer_addr);
4246
4247 if (ranging_inst->range_role == NAN_RANGING_ROLE_RESPONDER &&
4248 ranging_inst->role_concurrency_status) {
4249 /* resolve range role concurrency */
4250 WL_INFORM_MEM(("Resolving Role Concurrency constraint, peer : "
4251 MACDBG "\n", MAC2STRDBG(&ranging_inst->peer_addr)));
4252 ranging_inst->role_concurrency_status = FALSE;
4253 }
4254
4255 WL_INFORM_MEM(("Geofence Range suspended, "
4256 " suspend_reason = %d, peer: " MACDBG " \n",
4257 suspend_reason, MAC2STRDBG(peer)));
4258 }
4259
4260 exit:
4261 /* Post pending discovery results */
4262 if (ranging_inst &&
4263 ((suspend_reason == RTT_GEO_SUSPN_HOST_NDP_TRIGGER) ||
4264 (suspend_reason == RTT_GEO_SUSPN_PEER_NDP_TRIGGER))) {
4265 wl_cfgnan_disc_result_on_geofence_cancel(cfg, ranging_inst);
4266 }
4267
4268 if (suspend_req_dropped_at) {
4269 if (ranging_inst) {
4270 WL_INFORM_MEM(("Ranging Suspend Req with peer: " MACDBG
4271 ", dropped at = %d\n", MAC2STRDBG(&ranging_inst->peer_addr),
4272 suspend_req_dropped_at));
4273 } else {
4274 WL_INFORM_MEM(("Ranging Suspend Req dropped at = %d\n",
4275 suspend_req_dropped_at));
4276 }
4277 }
4278 return ret;
4279 }
4280
4281 /*
4282 * suspends all geofence ranging sessions
4283 * including initiators and responders
4284 */
4285 void
wl_cfgnan_suspend_all_geofence_rng_sessions(struct net_device * ndev,int suspend_reason,u8 cancel_flags)4286 wl_cfgnan_suspend_all_geofence_rng_sessions(struct net_device *ndev,
4287 int suspend_reason, u8 cancel_flags)
4288 {
4289
4290 uint8 i = 0;
4291 int ret = BCME_OK;
4292 uint32 status;
4293 nan_ranging_inst_t *ranging_inst = NULL;
4294 struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
4295 dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
4296
4297 WL_INFORM_MEM(("Suspending all geofence sessions: "
4298 "suspend_reason = %d\n", suspend_reason));
4299
4300 cancel_flags |= NAN_RNG_TERM_FLAG_IMMEDIATE;
4301 for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
4302 ranging_inst = &cfg->nancfg->nan_ranging_info[i];
4303 /* Cancel Ranging if in progress for rang_inst */
4304 if (ranging_inst->in_use &&
4305 NAN_RANGING_IS_IN_PROG(ranging_inst->range_status)) {
4306 ret = wl_cfgnan_cancel_ranging(bcmcfg_to_prmry_ndev(cfg),
4307 cfg, &ranging_inst->range_id,
4308 NAN_RNG_TERM_FLAG_IMMEDIATE, &status);
4309 if (unlikely(ret) || unlikely(status)) {
4310 WL_ERR(("wl_cfgnan_suspend_all_geofence_rng_sessions: "
4311 "nan range cancel failed ret = %d status = %d\n",
4312 ret, status));
4313 } else {
4314 dhd_rtt_update_geofence_sessions_cnt(dhd, FALSE,
4315 &ranging_inst->peer_addr);
4316 wl_cfgnan_reset_remove_ranging_instance(cfg, ranging_inst);
4317 }
4318 }
4319 }
4320
4321 return;
4322
4323 }
4324
4325 /*
4326 * Terminate given ranging instance
4327 * if no pending ranging sub service
4328 */
4329 static void
wl_cfgnan_terminate_ranging_session(struct bcm_cfg80211 * cfg,nan_ranging_inst_t * ranging_inst)4330 wl_cfgnan_terminate_ranging_session(struct bcm_cfg80211 *cfg,
4331 nan_ranging_inst_t *ranging_inst)
4332 {
4333 int ret = BCME_OK;
4334 uint32 status;
4335 dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
4336
4337 if (ranging_inst->num_svc_ctx != 0) {
4338 /*
4339 * Make sure to remove all svc_insts for range_inst
4340 * in order to cancel ranging and remove target in caller
4341 */
4342 return;
4343 }
4344
4345 /* Cancel Ranging if in progress for rang_inst */
4346 if (NAN_RANGING_IS_IN_PROG(ranging_inst->range_status)) {
4347 ret = wl_cfgnan_cancel_ranging(bcmcfg_to_prmry_ndev(cfg),
4348 cfg, &ranging_inst->range_id,
4349 NAN_RNG_TERM_FLAG_IMMEDIATE, &status);
4350 if (unlikely(ret) || unlikely(status)) {
4351 WL_ERR(("%s:nan range cancel failed ret = %d status = %d\n",
4352 __FUNCTION__, ret, status));
4353 } else {
4354 WL_DBG(("Range cancelled \n"));
4355 dhd_rtt_update_geofence_sessions_cnt(dhd, FALSE,
4356 &ranging_inst->peer_addr);
4357 }
4358 }
4359
4360 /* Remove ranging instance and clean any corresponding target */
4361 wl_cfgnan_remove_ranging_instance(cfg, ranging_inst);
4362 }
4363
4364 /*
4365 * Terminate all ranging sessions
4366 * with no pending ranging sub service
4367 */
4368 static void
wl_cfgnan_terminate_all_obsolete_ranging_sessions(struct bcm_cfg80211 * cfg)4369 wl_cfgnan_terminate_all_obsolete_ranging_sessions(
4370 struct bcm_cfg80211 *cfg)
4371 {
4372 /* cancel all related ranging instances */
4373 uint8 i = 0;
4374 nan_ranging_inst_t *ranging_inst = NULL;
4375
4376 for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
4377 ranging_inst = &cfg->nancfg->nan_ranging_info[i];
4378 if (ranging_inst->in_use) {
4379 wl_cfgnan_terminate_ranging_session(cfg, ranging_inst);
4380 }
4381 }
4382
4383 return;
4384 }
4385
4386 /*
4387 * Store svc_ctx for processing during RNG_RPT
4388 * Return BCME_OK only when svc is added
4389 */
4390 static int
wl_cfgnan_update_ranging_svc_inst(nan_ranging_inst_t * ranging_inst,nan_svc_info_t * svc)4391 wl_cfgnan_update_ranging_svc_inst(nan_ranging_inst_t *ranging_inst,
4392 nan_svc_info_t *svc)
4393 {
4394 int ret = BCME_OK;
4395 int i = 0;
4396
4397 for (i = 0; i < MAX_SUBSCRIBES; i++) {
4398 if (ranging_inst->svc_idx[i] == svc) {
4399 WL_DBG(("SVC Ctx for ranging already present, "
4400 " Duplication not supported: sub_id: %d\n", svc->svc_id));
4401 ret = BCME_UNSUPPORTED;
4402 goto done;
4403 }
4404 }
4405 for (i = 0; i < MAX_SUBSCRIBES; i++) {
4406 if (ranging_inst->svc_idx[i]) {
4407 continue;
4408 } else {
4409 WL_DBG(("Adding SVC Ctx for ranging..svc_id %d\n", svc->svc_id));
4410 ranging_inst->svc_idx[i] = svc;
4411 ranging_inst->num_svc_ctx++;
4412 ret = BCME_OK;
4413 goto done;
4414 }
4415 }
4416 if (i == MAX_SUBSCRIBES) {
4417 WL_ERR(("wl_cfgnan_update_ranging_svc_inst: "
4418 "No resource to hold Ref SVC ctx..svc_id %d\n", svc->svc_id));
4419 ret = BCME_NORESOURCE;
4420 goto done;
4421 }
4422 done:
4423 return ret;
4424 }
4425
4426 bool
wl_ranging_geofence_session_with_peer(struct bcm_cfg80211 * cfg,struct ether_addr * peer_addr)4427 wl_ranging_geofence_session_with_peer(struct bcm_cfg80211 *cfg,
4428 struct ether_addr *peer_addr)
4429 {
4430 bool ret = FALSE;
4431 nan_ranging_inst_t *rng_inst = NULL;
4432
4433 rng_inst = wl_cfgnan_check_for_ranging(cfg,
4434 peer_addr);
4435 if (rng_inst &&
4436 (NAN_RANGING_IS_IN_PROG(rng_inst->range_status))) {
4437 ret = TRUE;
4438 }
4439
4440 return ret;
4441 }
4442
4443 int
wl_cfgnan_trigger_geofencing_ranging(struct net_device * dev,struct ether_addr * peer_addr)4444 wl_cfgnan_trigger_geofencing_ranging(struct net_device *dev,
4445 struct ether_addr *peer_addr)
4446 {
4447 int ret = BCME_OK;
4448 int err_at = 0;
4449 struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
4450 dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
4451 nan_ranging_inst_t *ranging_inst;
4452 ranging_inst = wl_cfgnan_check_for_ranging(cfg, peer_addr);
4453
4454 if (!ranging_inst) {
4455 WL_INFORM_MEM(("Ranging Entry for peer:" MACDBG ", not found\n",
4456 MAC2STRDBG(peer_addr)));
4457 ASSERT(0);
4458 /* Ranging inst should have been added before adding target */
4459 dhd_rtt_remove_geofence_target(dhd, peer_addr);
4460 ret = BCME_ERROR;
4461 err_at = 1;
4462 goto exit;
4463 }
4464
4465 if (!NAN_RANGING_IS_IN_PROG(ranging_inst->range_status)) {
4466 WL_DBG(("Trigger range request with first svc in svc list of range inst\n"));
4467 ret = wl_cfgnan_trigger_ranging(bcmcfg_to_prmry_ndev(cfg),
4468 cfg, ranging_inst, ranging_inst->svc_idx[0],
4469 NAN_RANGE_REQ_CMD, TRUE);
4470 if (ret != BCME_OK) {
4471 /* Unsupported is for already ranging session for peer */
4472 if (ret == BCME_BUSY) {
4473 /* TODO: Attempt again over a timer */
4474 err_at = 2;
4475 } else {
4476 /* Remove target and clean ranging inst */
4477 wl_cfgnan_remove_ranging_instance(cfg, ranging_inst);
4478 err_at = 3;
4479 goto exit;
4480 }
4481 } else {
4482 ranging_inst->range_type = RTT_TYPE_NAN_GEOFENCE;
4483 ranging_inst->range_role = NAN_RANGING_ROLE_INITIATOR;
4484 }
4485 } else if (ranging_inst->range_role != NAN_RANGING_ROLE_RESPONDER) {
4486 /* already in progress but not as responder.. This should not happen */
4487 ASSERT(!NAN_RANGING_IS_IN_PROG(ranging_inst->range_status));
4488 ret = BCME_ERROR;
4489 err_at = 4;
4490 goto exit;
4491 } else {
4492 /* Already in progress as responder, bail out */
4493 goto exit;
4494 }
4495
4496 exit:
4497 if (ret) {
4498 WL_ERR(("wl_cfgnan_trigger_geofencing_ranging: Failed to "
4499 "trigger ranging, peer: " MACDBG " ret"
4500 " = (%d), err_at = %d\n", MAC2STRDBG(peer_addr),
4501 ret, err_at));
4502 }
4503 return ret;
4504 }
4505
4506 static int
wl_cfgnan_check_disc_result_for_ranging(struct bcm_cfg80211 * cfg,nan_event_data_t * nan_event_data,bool * send_disc_result)4507 wl_cfgnan_check_disc_result_for_ranging(struct bcm_cfg80211 *cfg,
4508 nan_event_data_t* nan_event_data, bool *send_disc_result)
4509 {
4510 nan_svc_info_t *svc;
4511 int ret = BCME_OK;
4512 rtt_geofence_target_info_t geofence_target;
4513 dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
4514 uint8 index, rtt_invalid_reason = RTT_STATE_VALID;
4515 bool add_target;
4516
4517 *send_disc_result = TRUE;
4518 svc = wl_cfgnan_get_svc_inst(cfg, nan_event_data->sub_id, 0);
4519
4520 if (svc && svc->ranging_required) {
4521 nan_ranging_inst_t *ranging_inst;
4522 ranging_inst = wl_cfgnan_get_ranging_inst(cfg,
4523 &nan_event_data->remote_nmi,
4524 NAN_RANGING_ROLE_INITIATOR);
4525 if (!ranging_inst) {
4526 ret = BCME_NORESOURCE;
4527 goto exit;
4528 }
4529 ASSERT(ranging_inst->range_role != NAN_RANGING_ROLE_INVALID);
4530
4531 /* For responder role, range state should be in progress only */
4532 ASSERT((ranging_inst->range_role == NAN_RANGING_ROLE_INITIATOR) ||
4533 NAN_RANGING_IS_IN_PROG(ranging_inst->range_status));
4534
4535 /*
4536 * On rec disc result with ranging required, add target, if
4537 * ranging role is responder (range state has to be in prog always)
4538 * Or ranging role is initiator and ranging is not already in prog
4539 */
4540 add_target = ((ranging_inst->range_role == NAN_RANGING_ROLE_RESPONDER) ||
4541 ((ranging_inst->range_role == NAN_RANGING_ROLE_INITIATOR) &&
4542 (!NAN_RANGING_IS_IN_PROG(ranging_inst->range_status))));
4543 if (add_target) {
4544 WL_DBG(("Add Range request to geofence target list\n"));
4545 memcpy(&geofence_target.peer_addr, &nan_event_data->remote_nmi,
4546 ETHER_ADDR_LEN);
4547 /* check if target is already added */
4548 if (!dhd_rtt_get_geofence_target(dhd, &nan_event_data->remote_nmi, &index))
4549 {
4550 ret = dhd_rtt_add_geofence_target(dhd, &geofence_target);
4551 if (unlikely(ret)) {
4552 WL_ERR(("Failed to add geofence Tgt, ret = (%d)\n", ret));
4553 bzero(ranging_inst, sizeof(*ranging_inst));
4554 goto exit;
4555 } else {
4556 WL_INFORM_MEM(("Geofence Tgt Added:" MACDBG " sub_id:%d\n",
4557 MAC2STRDBG(&geofence_target.peer_addr),
4558 svc->svc_id));
4559 }
4560 }
4561 if (wl_cfgnan_update_ranging_svc_inst(ranging_inst, svc)
4562 != BCME_OK) {
4563 goto exit;
4564 }
4565 if (ranging_inst->range_role == NAN_RANGING_ROLE_RESPONDER) {
4566 /* Adding RTT target while responder, leads to role concurrency */
4567 WL_INFORM_MEM(("Entering Role Concurrency constraint, peer : "
4568 MACDBG "\n", MAC2STRDBG(&ranging_inst->peer_addr)));
4569 ranging_inst->role_concurrency_status = TRUE;
4570 } else {
4571 /* Trigger/Reset geofence RTT */
4572 wl_cfgnan_reset_geofence_ranging(cfg, ranging_inst,
4573 RTT_SCHED_SUB_MATCH, TRUE);
4574 }
4575 } else {
4576 /* Target already added, check & add svc_inst ref to rang_inst */
4577 wl_cfgnan_update_ranging_svc_inst(ranging_inst, svc);
4578 }
4579 /* Disc event will be given on receving range_rpt event */
4580 WL_TRACE(("Disc event will given when Range RPT event is recvd"));
4581 } else {
4582 ret = BCME_UNSUPPORTED;
4583 }
4584
4585 exit:
4586 if (ret == BCME_OK) {
4587 /* Check if we have to send disc result immediately or not */
4588 rtt_invalid_reason = dhd_rtt_invalid_states
4589 (bcmcfg_to_prmry_ndev(cfg), &nan_event_data->remote_nmi);
4590 /*
4591 * If instant RTT not possible (RTT postpone),
4592 * send discovery result instantly like
4593 * incase of invalid rtt state as
4594 * ndp connected/connecting,
4595 * or role_concurrency active with peer.
4596 * Otherwise, result should be posted
4597 * on ranging report event after RTT done
4598 */
4599 if ((rtt_invalid_reason == RTT_STATE_VALID) &&
4600 (!wl_cfgnan_check_role_concurrency(cfg,
4601 &nan_event_data->remote_nmi))) {
4602 /* Avoid sending disc result instantly */
4603 *send_disc_result = FALSE;
4604 }
4605 }
4606
4607 return ret;
4608 }
4609
4610 bool
wl_cfgnan_ranging_allowed(struct bcm_cfg80211 * cfg)4611 wl_cfgnan_ranging_allowed(struct bcm_cfg80211 *cfg)
4612 {
4613 int i = 0;
4614 uint8 rng_progress_count = 0;
4615 nan_ranging_inst_t *ranging_inst = NULL;
4616
4617 for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
4618 ranging_inst = &cfg->nancfg->nan_ranging_info[i];
4619 if (NAN_RANGING_IS_IN_PROG(ranging_inst->range_status)) {
4620 rng_progress_count++;
4621 }
4622 }
4623
4624 if (rng_progress_count >= NAN_MAX_RANGING_SSN_ALLOWED) {
4625 return FALSE;
4626 }
4627 return TRUE;
4628 }
4629
4630 uint8
wl_cfgnan_cancel_rng_responders(struct net_device * ndev)4631 wl_cfgnan_cancel_rng_responders(struct net_device *ndev)
4632 {
4633 int i = 0;
4634 uint8 num_resp_cancelled = 0;
4635 int status, ret;
4636 nan_ranging_inst_t *ranging_inst = NULL;
4637 struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
4638
4639 for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
4640 ranging_inst = &cfg->nancfg->nan_ranging_info[i];
4641 if (NAN_RANGING_IS_IN_PROG(ranging_inst->range_status) &&
4642 (ranging_inst->range_role == NAN_RANGING_ROLE_RESPONDER)) {
4643 num_resp_cancelled++;
4644 ret = wl_cfgnan_cancel_ranging(bcmcfg_to_prmry_ndev(cfg), cfg,
4645 &ranging_inst->range_id, NAN_RNG_TERM_FLAG_IMMEDIATE, &status);
4646 if (unlikely(ret) || unlikely(status)) {
4647 WL_ERR(("wl_cfgnan_cancel_rng_responders: Failed to cancel"
4648 " existing ranging, ret = (%d)\n", ret));
4649 }
4650 WL_INFORM_MEM(("Removing Ranging Instance " MACDBG "\n",
4651 MAC2STRDBG(&(ranging_inst->peer_addr))));
4652 bzero(ranging_inst, sizeof(*ranging_inst));
4653 }
4654 }
4655 return num_resp_cancelled;
4656 }
4657
4658 /* ranging reqeust event handler */
4659 static int
wl_cfgnan_handle_ranging_ind(struct bcm_cfg80211 * cfg,wl_nan_ev_rng_req_ind_t * rng_ind)4660 wl_cfgnan_handle_ranging_ind(struct bcm_cfg80211 *cfg,
4661 wl_nan_ev_rng_req_ind_t *rng_ind)
4662 {
4663 int ret = BCME_OK;
4664 nan_ranging_inst_t *ranging_inst = NULL;
4665 uint8 cancel_flags = 0;
4666 bool accept = TRUE;
4667 nan_ranging_inst_t tmp_rng_inst;
4668 struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
4669 struct ether_addr * peer_addr = &(rng_ind->peer_m_addr);
4670 uint8 rtt_invalid_state;
4671 dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
4672 rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
4673 int err_at = 0;
4674
4675 WL_DBG(("Trigger range response\n"));
4676
4677 /* Check if ranging is allowed */
4678 rtt_invalid_state = dhd_rtt_invalid_states(ndev, peer_addr);
4679 if (rtt_invalid_state != RTT_STATE_VALID) {
4680 WL_INFORM_MEM(("Cannot allow ranging due to reason %d \n", rtt_invalid_state));
4681 ret = BCME_NORESOURCE;
4682 err_at = 1;
4683 goto done;
4684 }
4685
4686 mutex_lock(&rtt_status->rtt_mutex);
4687
4688 if (rtt_status && !RTT_IS_STOPPED(rtt_status)) {
4689 WL_INFORM_MEM(("Direcetd RTT in progress..reject RNG_REQ\n"));
4690 ret = BCME_NORESOURCE;
4691 err_at = 2;
4692 goto done;
4693 }
4694
4695 /* Check if ranging set up in progress */
4696 if (dhd_rtt_is_geofence_setup_inprog(dhd)) {
4697 WL_INFORM_MEM(("Ranging set up already in progress, "
4698 "RNG IND event dropped\n"));
4699 err_at = 3;
4700 ret = BCME_NOTREADY;
4701 goto done;
4702 }
4703
4704 /* check if we are already having any ranging session with peer.
4705 * If so below are the policies
4706 * If we are already a Geofence Initiator or responder w.r.t the peer
4707 * then silently teardown the current session and accept the REQ.
4708 * If we are in direct rtt initiator role then reject.
4709 */
4710 ranging_inst = wl_cfgnan_check_for_ranging(cfg, peer_addr);
4711 if (ranging_inst) {
4712 if (NAN_RANGING_IS_IN_PROG(ranging_inst->range_status)) {
4713 if (ranging_inst->range_type == RTT_TYPE_NAN_GEOFENCE ||
4714 ranging_inst->range_role == NAN_RANGING_ROLE_RESPONDER) {
4715 WL_INFORM_MEM(("Already responder/geofence for the Peer, cancel "
4716 "current ssn and accept new one,"
4717 " range_type = %d, role = %d\n",
4718 ranging_inst->range_type, ranging_inst->range_role));
4719 cancel_flags = NAN_RNG_TERM_FLAG_IMMEDIATE |
4720 NAN_RNG_TERM_FLAG_SILENT_TEARDOWN;
4721 wl_cfgnan_suspend_geofence_rng_session(ndev,
4722 &(rng_ind->peer_m_addr),
4723 RTT_GEO_SUSPN_PEER_RTT_TRIGGER, cancel_flags);
4724 } else {
4725 WL_ERR(("Reject the RNG_REQ_IND in direct rtt initiator role\n"));
4726 err_at = 4;
4727 ret = BCME_BUSY;
4728 goto done;
4729 }
4730 } else {
4731 /* Check if new Ranging session is allowed */
4732 if (dhd_rtt_geofence_sessions_maxed_out(dhd)) {
4733 WL_ERR(("Cannot allow more ranging sessions\n"));
4734 err_at = 5;
4735 ret = BCME_NORESOURCE;
4736 goto done;
4737 }
4738 }
4739 /* reset ranging instance for responder role */
4740 ranging_inst->range_status = NAN_RANGING_REQUIRED;
4741 ranging_inst->range_role = NAN_RANGING_ROLE_RESPONDER;
4742 ranging_inst->range_type = 0;
4743 } else {
4744 /* Check if new Ranging session is allowed */
4745 if (dhd_rtt_geofence_sessions_maxed_out(dhd)) {
4746 WL_ERR(("Cannot allow more ranging sessions\n"));
4747 err_at = 6;
4748 ret = BCME_NORESOURCE;
4749 goto done;
4750 }
4751
4752 ranging_inst = wl_cfgnan_get_ranging_inst(cfg, &rng_ind->peer_m_addr,
4753 NAN_RANGING_ROLE_RESPONDER);
4754 ASSERT(ranging_inst);
4755 if (!ranging_inst) {
4756 WL_ERR(("Failed to create ranging instance \n"));
4757 err_at = 7;
4758 ret = BCME_NORESOURCE;
4759 goto done;
4760 }
4761 }
4762
4763 done:
4764 if (ret != BCME_OK) {
4765 /* reject the REQ using temp ranging instance */
4766 bzero(&tmp_rng_inst, sizeof(tmp_rng_inst));
4767 ranging_inst = &tmp_rng_inst;
4768 (void)memcpy_s(&tmp_rng_inst.peer_addr, ETHER_ADDR_LEN,
4769 &rng_ind->peer_m_addr, ETHER_ADDR_LEN);
4770 accept = FALSE;
4771 }
4772
4773 ranging_inst->range_id = rng_ind->rng_id;
4774
4775 WL_INFORM_MEM(("Trigger Ranging at Responder, ret = %d, err_at = %d, "
4776 "accept = %d, rng_id = %d\n", ret, err_at,
4777 accept, rng_ind->rng_id));
4778 ret = wl_cfgnan_trigger_ranging(ndev, cfg, ranging_inst,
4779 NULL, NAN_RANGE_REQ_EVNT, accept);
4780 if (unlikely(ret) || !accept) {
4781 WL_ERR(("Failed to trigger ranging while handling range request, "
4782 " ret = %d, rng_id = %d, accept %d\n", ret,
4783 rng_ind->rng_id, accept));
4784 wl_cfgnan_reset_remove_ranging_instance(cfg, ranging_inst);
4785 } else {
4786 dhd_rtt_set_geofence_setup_status(dhd, TRUE,
4787 &ranging_inst->peer_addr);
4788 }
4789 mutex_unlock(&rtt_status->rtt_mutex);
4790 return ret;
4791 }
4792
4793 /* ranging quest and response iovar handler */
4794 int
wl_cfgnan_trigger_ranging(struct net_device * ndev,struct bcm_cfg80211 * cfg,void * ranging_ctxt,nan_svc_info_t * svc,uint8 range_cmd,bool accept_req)4795 wl_cfgnan_trigger_ranging(struct net_device *ndev, struct bcm_cfg80211 *cfg,
4796 void *ranging_ctxt, nan_svc_info_t *svc,
4797 uint8 range_cmd, bool accept_req)
4798 {
4799 s32 ret = BCME_OK;
4800 bcm_iov_batch_buf_t *nan_buf = NULL;
4801 wl_nan_range_req_t *range_req = NULL;
4802 wl_nan_range_resp_t *range_resp = NULL;
4803 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
4804 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
4805 uint32 status;
4806 uint8 resp_buf[NAN_IOCTL_BUF_SIZE_MED];
4807 nan_ranging_inst_t *ranging_inst = (nan_ranging_inst_t *)ranging_ctxt;
4808 nan_avail_cmd_data cmd_data;
4809
4810 NAN_DBG_ENTER();
4811
4812 bzero(&cmd_data, sizeof(cmd_data));
4813 ret = memcpy_s(&cmd_data.peer_nmi, ETHER_ADDR_LEN,
4814 &ranging_inst->peer_addr, ETHER_ADDR_LEN);
4815 if (ret != BCME_OK) {
4816 WL_ERR(("Failed to copy ranging peer addr\n"));
4817 goto fail;
4818 }
4819
4820 cmd_data.avail_period = NAN_RANGING_PERIOD;
4821 ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
4822 cfg, &cmd_data, WL_AVAIL_LOCAL);
4823 if (ret != BCME_OK) {
4824 WL_ERR(("Failed to set avail value with type [WL_AVAIL_LOCAL]\n"));
4825 goto fail;
4826 }
4827
4828 ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
4829 cfg, &cmd_data, WL_AVAIL_RANGING);
4830 if (unlikely(ret)) {
4831 WL_ERR(("Failed to set avail value with type [WL_AVAIL_RANGING]\n"));
4832 goto fail;
4833 }
4834
4835 nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
4836 if (!nan_buf) {
4837 WL_ERR(("%s: memory allocation failed\n", __func__));
4838 ret = BCME_NOMEM;
4839 goto fail;
4840 }
4841
4842 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
4843 nan_buf->count = 0;
4844 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
4845
4846 sub_cmd = (bcm_iov_batch_subcmd_t*)(&nan_buf->cmds[0]);
4847 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
4848 if (range_cmd == NAN_RANGE_REQ_CMD) {
4849 sub_cmd->id = htod16(WL_NAN_CMD_RANGE_REQUEST);
4850 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(wl_nan_range_req_t);
4851 range_req = (wl_nan_range_req_t *)(sub_cmd->data);
4852 /* ranging config */
4853 range_req->peer = ranging_inst->peer_addr;
4854 if (svc) {
4855 range_req->interval = svc->ranging_interval;
4856 /* Limits are in cm from host */
4857 range_req->ingress = svc->ingress_limit;
4858 range_req->egress = svc->egress_limit;
4859 }
4860 range_req->indication = NAN_RANGING_INDICATE_CONTINUOUS_MASK;
4861 } else {
4862 /* range response config */
4863 sub_cmd->id = htod16(WL_NAN_CMD_RANGE_RESPONSE);
4864 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(wl_nan_range_resp_t);
4865 range_resp = (wl_nan_range_resp_t *)(sub_cmd->data);
4866 range_resp->range_id = ranging_inst->range_id;
4867 range_resp->indication = NAN_RANGING_INDICATE_CONTINUOUS_MASK;
4868 if (accept_req) {
4869 range_resp->status = NAN_RNG_REQ_ACCEPTED_BY_HOST;
4870 } else {
4871 range_resp->status = NAN_RNG_REQ_REJECTED_BY_HOST;
4872 }
4873 nan_buf->is_set = true;
4874 }
4875
4876 nan_buf_size -= (sub_cmd->len +
4877 OFFSETOF(bcm_iov_batch_subcmd_t, u.options));
4878 nan_buf->count++;
4879
4880 bzero(resp_buf, sizeof(resp_buf));
4881 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size,
4882 &status,
4883 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
4884 if (unlikely(ret) || unlikely(status)) {
4885 WL_ERR(("nan ranging failed ret = %d status = %d\n",
4886 ret, status));
4887 ret = (ret == BCME_OK) ? status : ret;
4888 goto fail;
4889 }
4890 WL_TRACE(("nan ranging trigger successful\n"));
4891 if (range_cmd == NAN_RANGE_REQ_CMD) {
4892 WL_INFORM_MEM(("Ranging Req Triggered"
4893 " peer: " MACDBG ", ind : %d, ingress : %d, egress : %d\n",
4894 MAC2STRDBG(&ranging_inst->peer_addr), range_req->indication,
4895 range_req->ingress, range_req->egress));
4896 } else {
4897 WL_INFORM_MEM(("Ranging Resp Triggered"
4898 " peer: " MACDBG ", ind : %d, ingress : %d, egress : %d\n",
4899 MAC2STRDBG(&ranging_inst->peer_addr), range_resp->indication,
4900 range_resp->ingress, range_resp->egress));
4901 }
4902
4903 /* check the response buff for request */
4904 if (range_cmd == NAN_RANGE_REQ_CMD) {
4905 ret = process_resp_buf(resp_buf + WL_NAN_OBUF_DATA_OFFSET,
4906 &ranging_inst->range_id, WL_NAN_CMD_RANGE_REQUEST);
4907 WL_INFORM_MEM(("ranging instance returned %d\n", ranging_inst->range_id));
4908 }
4909
4910 /* Move Ranging instance to set up in progress state */
4911 ranging_inst->range_status = NAN_RANGING_SETUP_IN_PROGRESS;
4912
4913 fail:
4914 if (nan_buf) {
4915 MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
4916 }
4917
4918 NAN_DBG_EXIT();
4919 return ret;
4920 }
4921
4922 bool
wl_cfgnan_ranging_is_in_prog_for_peer(struct bcm_cfg80211 * cfg,struct ether_addr * peer_addr)4923 wl_cfgnan_ranging_is_in_prog_for_peer(struct bcm_cfg80211 *cfg, struct ether_addr *peer_addr)
4924 {
4925 nan_ranging_inst_t *rng_inst = NULL;
4926
4927 rng_inst = wl_cfgnan_check_for_ranging(cfg, peer_addr);
4928
4929 return (rng_inst && NAN_RANGING_IS_IN_PROG(rng_inst->range_status));
4930 }
4931
4932 #endif /* RTT_SUPPORT */
4933 #endif /* WL_NAN_DISC_CACHE */
4934
wl_nan_bloom_alloc(void * ctx,uint size)4935 static void *wl_nan_bloom_alloc(void *ctx, uint size)
4936 {
4937 uint8 *buf;
4938 BCM_REFERENCE(ctx);
4939
4940 buf = kmalloc(size, GFP_KERNEL);
4941 if (!buf) {
4942 WL_ERR(("%s: memory allocation failed\n", __func__));
4943 buf = NULL;
4944 }
4945 return buf;
4946 }
4947
wl_nan_bloom_free(void * ctx,void * buf,uint size)4948 static void wl_nan_bloom_free(void *ctx, void *buf, uint size)
4949 {
4950 BCM_REFERENCE(ctx);
4951 BCM_REFERENCE(size);
4952 if (buf) {
4953 kfree(buf);
4954 }
4955 }
4956
wl_nan_hash(void * ctx,uint index,const uint8 * input,uint input_len)4957 static uint wl_nan_hash(void *ctx, uint index, const uint8 *input, uint input_len)
4958 {
4959 uint8* filter_idx = (uint8*)ctx;
4960 uint8 i = (*filter_idx * WL_NAN_HASHES_PER_BLOOM) + (uint8)index;
4961 uint b = 0;
4962
4963 /* Steps 1 and 2 as explained in Section 6.2 */
4964 /* Concatenate index to input and run CRC32 by calling hndcrc32 twice */
4965 GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
4966 b = hndcrc32(&i, sizeof(uint8), CRC32_INIT_VALUE);
4967 b = hndcrc32((uint8*)input, input_len, b);
4968 GCC_DIAGNOSTIC_POP();
4969 /* Obtain the last 2 bytes of the CRC32 output */
4970 b &= NAN_BLOOM_CRC32_MASK;
4971
4972 /* Step 3 is completed by bcmbloom functions */
4973 return b;
4974 }
4975
wl_nan_bloom_create(bcm_bloom_filter_t ** bp,uint * idx,uint size)4976 static int wl_nan_bloom_create(bcm_bloom_filter_t **bp, uint *idx, uint size)
4977 {
4978 uint i;
4979 int err;
4980
4981 err = bcm_bloom_create(wl_nan_bloom_alloc, wl_nan_bloom_free,
4982 idx, WL_NAN_HASHES_PER_BLOOM, size, bp);
4983 if (err != BCME_OK) {
4984 goto exit;
4985 }
4986
4987 /* Populate bloom filter with hash functions */
4988 for (i = 0; i < WL_NAN_HASHES_PER_BLOOM; i++) {
4989 err = bcm_bloom_add_hash(*bp, wl_nan_hash, &i);
4990 if (err) {
4991 WL_ERR(("bcm_bloom_add_hash failed\n"));
4992 goto exit;
4993 }
4994 }
4995 exit:
4996 return err;
4997 }
4998
4999 static int
wl_cfgnan_sd_params_handler(struct net_device * ndev,nan_discover_cmd_data_t * cmd_data,uint16 cmd_id,void * p_buf,uint16 * nan_buf_size)5000 wl_cfgnan_sd_params_handler(struct net_device *ndev,
5001 nan_discover_cmd_data_t *cmd_data, uint16 cmd_id,
5002 void *p_buf, uint16 *nan_buf_size)
5003 {
5004 s32 ret = BCME_OK;
5005 uint8 *pxtlv, *srf = NULL, *srf_mac = NULL, *srftmp = NULL;
5006 uint16 buflen_avail;
5007 bcm_iov_batch_subcmd_t *sub_cmd = (bcm_iov_batch_subcmd_t*)(p_buf);
5008 wl_nan_sd_params_t *sd_params = (wl_nan_sd_params_t *)sub_cmd->data;
5009 uint16 srf_size = 0;
5010 uint bloom_size, a;
5011 bcm_bloom_filter_t *bp = NULL;
5012 /* Bloom filter index default, indicates it has not been set */
5013 uint bloom_idx = 0xFFFFFFFF;
5014 uint16 bloom_len = NAN_BLOOM_LENGTH_DEFAULT;
5015 /* srf_ctrl_size = bloom_len + src_control field */
5016 uint16 srf_ctrl_size = bloom_len + 1;
5017
5018 dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
5019 struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
5020 BCM_REFERENCE(cfg);
5021
5022 NAN_DBG_ENTER();
5023
5024 if (cmd_data->period) {
5025 sd_params->awake_dw = cmd_data->period;
5026 }
5027 sd_params->period = 1;
5028
5029 if (cmd_data->ttl) {
5030 sd_params->ttl = cmd_data->ttl;
5031 } else {
5032 sd_params->ttl = WL_NAN_TTL_UNTIL_CANCEL;
5033 }
5034
5035 sd_params->flags = 0;
5036 sd_params->flags = cmd_data->flags;
5037
5038 /* Nan Service Based event suppression Flags */
5039 if (cmd_data->recv_ind_flag) {
5040 /* BIT0 - If set, host wont rec event "terminated" */
5041 if (CHECK_BIT(cmd_data->recv_ind_flag, WL_NAN_EVENT_SUPPRESS_TERMINATE_BIT)) {
5042 sd_params->flags |= WL_NAN_SVC_CTRL_SUPPRESS_EVT_TERMINATED;
5043 }
5044
5045 /* BIT1 - If set, host wont receive match expiry evt */
5046 /* TODO: Exp not yet supported */
5047 if (CHECK_BIT(cmd_data->recv_ind_flag, WL_NAN_EVENT_SUPPRESS_MATCH_EXP_BIT)) {
5048 WL_DBG(("Need to add match expiry event\n"));
5049 }
5050 /* BIT2 - If set, host wont rec event "receive" */
5051 if (CHECK_BIT(cmd_data->recv_ind_flag, WL_NAN_EVENT_SUPPRESS_RECEIVE_BIT)) {
5052 sd_params->flags |= WL_NAN_SVC_CTRL_SUPPRESS_EVT_RECEIVE;
5053 }
5054 /* BIT3 - If set, host wont rec event "replied" */
5055 if (CHECK_BIT(cmd_data->recv_ind_flag, WL_NAN_EVENT_SUPPRESS_REPLIED_BIT)) {
5056 sd_params->flags |= WL_NAN_SVC_CTRL_SUPPRESS_EVT_REPLIED;
5057 }
5058 }
5059 if (cmd_id == WL_NAN_CMD_SD_PUBLISH) {
5060 sd_params->instance_id = cmd_data->pub_id;
5061 if (cmd_data->service_responder_policy) {
5062 /* Do not disturb avail if dam is supported */
5063 if (FW_SUPPORTED(dhdp, autodam)) {
5064 /* Nan Accept policy: Per service basis policy
5065 * Based on this policy(ALL/NONE), responder side
5066 * will send ACCEPT/REJECT
5067 * If set, auto datapath responder will be sent by FW
5068 */
5069 sd_params->flags |= WL_NAN_SVC_CTRL_AUTO_DPRESP;
5070 } else {
5071 WL_ERR(("svc specifiv auto dp resp is not"
5072 " supported in non-auto dam fw\n"));
5073 }
5074 }
5075 } else if (cmd_id == WL_NAN_CMD_SD_SUBSCRIBE) {
5076 sd_params->instance_id = cmd_data->sub_id;
5077 } else {
5078 ret = BCME_USAGE_ERROR;
5079 WL_ERR(("wrong command id = %d \n", cmd_id));
5080 goto fail;
5081 }
5082
5083 if ((cmd_data->svc_hash.dlen == WL_NAN_SVC_HASH_LEN) &&
5084 (cmd_data->svc_hash.data)) {
5085 ret = memcpy_s((uint8*)sd_params->svc_hash,
5086 sizeof(sd_params->svc_hash),
5087 cmd_data->svc_hash.data,
5088 cmd_data->svc_hash.dlen);
5089 if (ret != BCME_OK) {
5090 WL_ERR(("Failed to copy svc hash\n"));
5091 goto fail;
5092 }
5093 #ifdef WL_NAN_DEBUG
5094 prhex("hashed svc name", cmd_data->svc_hash.data,
5095 cmd_data->svc_hash.dlen);
5096 #endif /* WL_NAN_DEBUG */
5097 } else {
5098 ret = BCME_ERROR;
5099 WL_ERR(("invalid svc hash data or length = %d\n",
5100 cmd_data->svc_hash.dlen));
5101 goto fail;
5102 }
5103
5104 /* check if ranging support is present in firmware */
5105 if ((cmd_data->sde_control_flag & NAN_SDE_CF_RANGING_REQUIRED) &&
5106 !FW_SUPPORTED(dhdp, nanrange)) {
5107 WL_ERR(("Service requires ranging but fw doesnt support it\n"));
5108 ret = BCME_UNSUPPORTED;
5109 goto fail;
5110 }
5111
5112 /* Optional parameters: fill the sub_command block with service descriptor attr */
5113 sub_cmd->id = htod16(cmd_id);
5114 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
5115 sub_cmd->len = sizeof(sub_cmd->u.options) +
5116 OFFSETOF(wl_nan_sd_params_t, optional[0]);
5117 pxtlv = (uint8*)&sd_params->optional[0];
5118
5119 *nan_buf_size -= sub_cmd->len;
5120 buflen_avail = *nan_buf_size;
5121
5122 if (cmd_data->svc_info.data && cmd_data->svc_info.dlen) {
5123 WL_TRACE(("optional svc_info present, pack it\n"));
5124 ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
5125 WL_NAN_XTLV_SD_SVC_INFO,
5126 cmd_data->svc_info.dlen,
5127 cmd_data->svc_info.data, BCM_XTLV_OPTION_ALIGN32);
5128 if (unlikely(ret)) {
5129 WL_ERR(("%s: fail to pack WL_NAN_XTLV_SD_SVC_INFO\n", __FUNCTION__));
5130 goto fail;
5131 }
5132 }
5133
5134 if (cmd_data->sde_svc_info.data && cmd_data->sde_svc_info.dlen) {
5135 WL_TRACE(("optional sdea svc_info present, pack it, %d\n",
5136 cmd_data->sde_svc_info.dlen));
5137 ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
5138 WL_NAN_XTLV_SD_SDE_SVC_INFO,
5139 cmd_data->sde_svc_info.dlen,
5140 cmd_data->sde_svc_info.data, BCM_XTLV_OPTION_ALIGN32);
5141 if (unlikely(ret)) {
5142 WL_ERR(("%s: fail to pack sdea svc info\n", __FUNCTION__));
5143 goto fail;
5144 }
5145 }
5146
5147 if (cmd_data->tx_match.dlen) {
5148 WL_TRACE(("optional tx match filter presnet (len=%d)\n",
5149 cmd_data->tx_match.dlen));
5150 ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
5151 WL_NAN_XTLV_CFG_MATCH_TX, cmd_data->tx_match.dlen,
5152 cmd_data->tx_match.data, BCM_XTLV_OPTION_ALIGN32);
5153 if (unlikely(ret)) {
5154 WL_ERR(("%s: failed on xtlv_pack for tx match filter\n", __FUNCTION__));
5155 goto fail;
5156 }
5157 }
5158
5159 if (cmd_data->life_count) {
5160 WL_TRACE(("optional life count is present, pack it\n"));
5161 ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size, WL_NAN_XTLV_CFG_SVC_LIFE_COUNT,
5162 sizeof(cmd_data->life_count), &cmd_data->life_count,
5163 BCM_XTLV_OPTION_ALIGN32);
5164 if (unlikely(ret)) {
5165 WL_ERR(("%s: failed to WL_NAN_XTLV_CFG_SVC_LIFE_COUNT\n", __FUNCTION__));
5166 goto fail;
5167 }
5168 }
5169
5170 if (cmd_data->use_srf) {
5171 uint8 srf_control = 0;
5172 /* set include bit */
5173 if (cmd_data->srf_include == true) {
5174 srf_control |= 0x2;
5175 }
5176
5177 if (!ETHER_ISNULLADDR(&cmd_data->mac_list.list) &&
5178 (cmd_data->mac_list.num_mac_addr
5179 < NAN_SRF_MAX_MAC)) {
5180 if (cmd_data->srf_type == SRF_TYPE_SEQ_MAC_ADDR) {
5181 /* mac list */
5182 srf_size = (cmd_data->mac_list.num_mac_addr
5183 * ETHER_ADDR_LEN) + NAN_SRF_CTRL_FIELD_LEN;
5184 WL_TRACE(("srf size = %d\n", srf_size));
5185
5186 srf_mac = MALLOCZ(cfg->osh, srf_size);
5187 if (srf_mac == NULL) {
5188 WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
5189 ret = -ENOMEM;
5190 goto fail;
5191 }
5192 ret = memcpy_s(srf_mac, NAN_SRF_CTRL_FIELD_LEN,
5193 &srf_control, NAN_SRF_CTRL_FIELD_LEN);
5194 if (ret != BCME_OK) {
5195 WL_ERR(("Failed to copy srf control\n"));
5196 goto fail;
5197 }
5198 ret = memcpy_s(srf_mac+1, (srf_size - NAN_SRF_CTRL_FIELD_LEN),
5199 cmd_data->mac_list.list,
5200 (srf_size - NAN_SRF_CTRL_FIELD_LEN));
5201 if (ret != BCME_OK) {
5202 WL_ERR(("Failed to copy srf control mac list\n"));
5203 goto fail;
5204 }
5205 ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
5206 WL_NAN_XTLV_CFG_SR_FILTER, srf_size, srf_mac,
5207 BCM_XTLV_OPTION_ALIGN32);
5208 if (unlikely(ret)) {
5209 WL_ERR(("%s: failed to WL_NAN_XTLV_CFG_SR_FILTER\n",
5210 __FUNCTION__));
5211 goto fail;
5212 }
5213 } else if (cmd_data->srf_type == SRF_TYPE_BLOOM_FILTER) {
5214 /* Create bloom filter */
5215 srf = MALLOCZ(cfg->osh, srf_ctrl_size);
5216 if (srf == NULL) {
5217 WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
5218 ret = -ENOMEM;
5219 goto fail;
5220 }
5221 /* Bloom filter */
5222 srf_control |= 0x1;
5223 /* Instance id must be from 1 to 255, 0 is Reserved */
5224 if (sd_params->instance_id == NAN_ID_RESERVED) {
5225 WL_ERR(("Invalid instance id: %d\n",
5226 sd_params->instance_id));
5227 ret = BCME_BADARG;
5228 goto fail;
5229 }
5230 if (bloom_idx == 0xFFFFFFFF) {
5231 bloom_idx = sd_params->instance_id % 4;
5232 } else {
5233 WL_ERR(("Invalid bloom_idx\n"));
5234 ret = BCME_BADARG;
5235 goto fail;
5236
5237 }
5238 srf_control |= bloom_idx << 2;
5239
5240 ret = wl_nan_bloom_create(&bp, &bloom_idx, bloom_len);
5241 if (unlikely(ret)) {
5242 WL_ERR(("%s: Bloom create failed\n", __FUNCTION__));
5243 goto fail;
5244 }
5245
5246 srftmp = cmd_data->mac_list.list;
5247 for (a = 0;
5248 a < cmd_data->mac_list.num_mac_addr; a++) {
5249 ret = bcm_bloom_add_member(bp, srftmp, ETHER_ADDR_LEN);
5250 if (unlikely(ret)) {
5251 WL_ERR(("%s: Cannot add to bloom filter\n",
5252 __FUNCTION__));
5253 goto fail;
5254 }
5255 srftmp += ETHER_ADDR_LEN;
5256 }
5257
5258 ret = memcpy_s(srf, NAN_SRF_CTRL_FIELD_LEN,
5259 &srf_control, NAN_SRF_CTRL_FIELD_LEN);
5260 if (ret != BCME_OK) {
5261 WL_ERR(("Failed to copy srf control\n"));
5262 goto fail;
5263 }
5264 ret = bcm_bloom_get_filter_data(bp, bloom_len,
5265 (srf + NAN_SRF_CTRL_FIELD_LEN),
5266 &bloom_size);
5267 if (unlikely(ret)) {
5268 WL_ERR(("%s: Cannot get filter data\n", __FUNCTION__));
5269 goto fail;
5270 }
5271 ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
5272 WL_NAN_XTLV_CFG_SR_FILTER, srf_ctrl_size,
5273 srf, BCM_XTLV_OPTION_ALIGN32);
5274 if (ret != BCME_OK) {
5275 WL_ERR(("Failed to pack SR FILTER data, ret = %d\n", ret));
5276 goto fail;
5277 }
5278 } else {
5279 WL_ERR(("Invalid SRF Type = %d !!!\n",
5280 cmd_data->srf_type));
5281 goto fail;
5282 }
5283 } else {
5284 WL_ERR(("Invalid MAC Addr/Too many mac addr = %d !!!\n",
5285 cmd_data->mac_list.num_mac_addr));
5286 goto fail;
5287 }
5288 }
5289
5290 if (cmd_data->rx_match.dlen) {
5291 WL_TRACE(("optional rx match filter is present, pack it\n"));
5292 ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
5293 WL_NAN_XTLV_CFG_MATCH_RX, cmd_data->rx_match.dlen,
5294 cmd_data->rx_match.data, BCM_XTLV_OPTION_ALIGN32);
5295 if (unlikely(ret)) {
5296 WL_ERR(("%s: failed on xtlv_pack for rx match filter\n", __func__));
5297 goto fail;
5298 }
5299 }
5300
5301 /* Security elements */
5302 if (cmd_data->csid) {
5303 WL_TRACE(("Cipher suite type is present, pack it\n"));
5304 ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
5305 WL_NAN_XTLV_CFG_SEC_CSID, sizeof(nan_sec_csid_e),
5306 (uint8*)&cmd_data->csid, BCM_XTLV_OPTION_ALIGN32);
5307 if (unlikely(ret)) {
5308 WL_ERR(("%s: fail to pack on csid\n", __FUNCTION__));
5309 goto fail;
5310 }
5311 }
5312
5313 if (cmd_data->ndp_cfg.security_cfg) {
5314 if ((cmd_data->key_type == NAN_SECURITY_KEY_INPUT_PMK) ||
5315 (cmd_data->key_type == NAN_SECURITY_KEY_INPUT_PASSPHRASE)) {
5316 if (cmd_data->key.data && cmd_data->key.dlen) {
5317 WL_TRACE(("optional pmk present, pack it\n"));
5318 ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
5319 WL_NAN_XTLV_CFG_SEC_PMK, cmd_data->key.dlen,
5320 cmd_data->key.data, BCM_XTLV_OPTION_ALIGN32);
5321 if (unlikely(ret)) {
5322 WL_ERR(("%s: fail to pack WL_NAN_XTLV_CFG_SEC_PMK\n",
5323 __FUNCTION__));
5324 goto fail;
5325 }
5326 }
5327 } else {
5328 WL_ERR(("Invalid security key type\n"));
5329 ret = BCME_BADARG;
5330 goto fail;
5331 }
5332 }
5333
5334 if (cmd_data->scid.data && cmd_data->scid.dlen) {
5335 WL_TRACE(("optional scid present, pack it\n"));
5336 ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size, WL_NAN_XTLV_CFG_SEC_SCID,
5337 cmd_data->scid.dlen, cmd_data->scid.data, BCM_XTLV_OPTION_ALIGN32);
5338 if (unlikely(ret)) {
5339 WL_ERR(("%s: fail to pack WL_NAN_XTLV_CFG_SEC_SCID\n", __FUNCTION__));
5340 goto fail;
5341 }
5342 }
5343
5344 if (cmd_data->sde_control_config) {
5345 ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
5346 WL_NAN_XTLV_SD_SDE_CONTROL,
5347 sizeof(uint16), (uint8*)&cmd_data->sde_control_flag,
5348 BCM_XTLV_OPTION_ALIGN32);
5349 if (ret != BCME_OK) {
5350 WL_ERR(("%s: fail to pack WL_NAN_XTLV_SD_SDE_CONTROL\n", __FUNCTION__));
5351 goto fail;
5352 }
5353 }
5354
5355 sub_cmd->len += (buflen_avail - *nan_buf_size);
5356
5357 fail:
5358 if (srf) {
5359 MFREE(cfg->osh, srf, srf_ctrl_size);
5360 }
5361
5362 if (srf_mac) {
5363 MFREE(cfg->osh, srf_mac, srf_size);
5364 }
5365 NAN_DBG_EXIT();
5366 return ret;
5367 }
5368
5369 static int
wl_cfgnan_aligned_data_size_of_opt_disc_params(uint16 * data_size,nan_discover_cmd_data_t * cmd_data)5370 wl_cfgnan_aligned_data_size_of_opt_disc_params(uint16 *data_size, nan_discover_cmd_data_t *cmd_data)
5371 {
5372 s32 ret = BCME_OK;
5373 if (cmd_data->svc_info.dlen)
5374 *data_size += ALIGN_SIZE(cmd_data->svc_info.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
5375 if (cmd_data->sde_svc_info.dlen)
5376 *data_size += ALIGN_SIZE(cmd_data->sde_svc_info.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
5377 if (cmd_data->tx_match.dlen)
5378 *data_size += ALIGN_SIZE(cmd_data->tx_match.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
5379 if (cmd_data->rx_match.dlen)
5380 *data_size += ALIGN_SIZE(cmd_data->rx_match.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
5381 if (cmd_data->use_srf) {
5382 if (cmd_data->srf_type == SRF_TYPE_SEQ_MAC_ADDR) {
5383 *data_size += (cmd_data->mac_list.num_mac_addr * ETHER_ADDR_LEN)
5384 + NAN_SRF_CTRL_FIELD_LEN;
5385 } else { /* Bloom filter type */
5386 *data_size += NAN_BLOOM_LENGTH_DEFAULT + 1;
5387 }
5388 *data_size += ALIGN_SIZE(*data_size + NAN_XTLV_ID_LEN_SIZE, 4);
5389 }
5390 if (cmd_data->csid)
5391 *data_size += ALIGN_SIZE(sizeof(nan_sec_csid_e) + NAN_XTLV_ID_LEN_SIZE, 4);
5392 if (cmd_data->key.dlen)
5393 *data_size += ALIGN_SIZE(cmd_data->key.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
5394 if (cmd_data->scid.dlen)
5395 *data_size += ALIGN_SIZE(cmd_data->scid.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
5396 if (cmd_data->sde_control_config)
5397 *data_size += ALIGN_SIZE(sizeof(uint16) + NAN_XTLV_ID_LEN_SIZE, 4);
5398 if (cmd_data->life_count)
5399 *data_size += ALIGN_SIZE(sizeof(cmd_data->life_count) + NAN_XTLV_ID_LEN_SIZE, 4);
5400 return ret;
5401 }
5402
5403 static int
wl_cfgnan_aligned_data_size_of_opt_dp_params(struct bcm_cfg80211 * cfg,uint16 * data_size,nan_datapath_cmd_data_t * cmd_data)5404 wl_cfgnan_aligned_data_size_of_opt_dp_params(struct bcm_cfg80211 *cfg, uint16 *data_size,
5405 nan_datapath_cmd_data_t *cmd_data)
5406 {
5407 s32 ret = BCME_OK;
5408 if (cmd_data->svc_info.dlen) {
5409 *data_size += ALIGN_SIZE(cmd_data->svc_info.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
5410 /* When NDPE is enabled, adding this extra data_size to provide backward
5411 * compatability for non-ndpe devices. Duplicating NDP specific info and sending it
5412 * to FW in SD SVCINFO and NDPE TLV list as host doesn't know peer's NDPE capability
5413 */
5414 if (cfg->nancfg->ndpe_enabled) {
5415 *data_size += ALIGN_SIZE(cmd_data->svc_info.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
5416 }
5417 }
5418 if (cmd_data->key.dlen)
5419 *data_size += ALIGN_SIZE(cmd_data->key.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
5420 if (cmd_data->csid)
5421 *data_size += ALIGN_SIZE(sizeof(nan_sec_csid_e) + NAN_XTLV_ID_LEN_SIZE, 4);
5422
5423 *data_size += ALIGN_SIZE(WL_NAN_SVC_HASH_LEN + NAN_XTLV_ID_LEN_SIZE, 4);
5424 return ret;
5425 }
5426 int
wl_cfgnan_svc_get_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,uint16 cmd_id,nan_discover_cmd_data_t * cmd_data)5427 wl_cfgnan_svc_get_handler(struct net_device *ndev,
5428 struct bcm_cfg80211 *cfg, uint16 cmd_id, nan_discover_cmd_data_t *cmd_data)
5429 {
5430 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
5431 uint32 instance_id;
5432 s32 ret = BCME_OK;
5433 bcm_iov_batch_buf_t *nan_buf = NULL;
5434
5435 uint8 *resp_buf = NULL;
5436 uint16 data_size = WL_NAN_OBUF_DATA_OFFSET + sizeof(instance_id);
5437
5438 NAN_DBG_ENTER();
5439
5440 nan_buf = MALLOCZ(cfg->osh, data_size);
5441 if (!nan_buf) {
5442 WL_ERR(("%s: memory allocation failed\n", __func__));
5443 ret = BCME_NOMEM;
5444 goto fail;
5445 }
5446
5447 resp_buf = MALLOCZ(cfg->osh, NAN_IOCTL_BUF_SIZE_LARGE);
5448 if (!resp_buf) {
5449 WL_ERR(("%s: memory allocation failed\n", __func__));
5450 ret = BCME_NOMEM;
5451 goto fail;
5452 }
5453 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
5454 nan_buf->count = 1;
5455 /* check if service is present */
5456 nan_buf->is_set = false;
5457 sub_cmd = (bcm_iov_batch_subcmd_t*)(&nan_buf->cmds[0]);
5458 if (cmd_id == WL_NAN_CMD_SD_PUBLISH) {
5459 instance_id = cmd_data->pub_id;
5460 } else if (cmd_id == WL_NAN_CMD_SD_SUBSCRIBE) {
5461 instance_id = cmd_data->sub_id;
5462 } else {
5463 ret = BCME_USAGE_ERROR;
5464 WL_ERR(("wrong command id = %u\n", cmd_id));
5465 goto fail;
5466 }
5467 /* Fill the sub_command block */
5468 sub_cmd->id = htod16(cmd_id);
5469 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(instance_id);
5470 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
5471
5472 ret = memcpy_s(sub_cmd->data, (data_size - WL_NAN_OBUF_DATA_OFFSET),
5473 &instance_id, sizeof(instance_id));
5474 if (ret != BCME_OK) {
5475 WL_ERR(("Failed to copy instance id, ret = %d\n", ret));
5476 goto fail;
5477 }
5478
5479 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, data_size,
5480 &(cmd_data->status), resp_buf, NAN_IOCTL_BUF_SIZE_LARGE);
5481
5482 if (unlikely(ret) || unlikely(cmd_data->status)) {
5483 WL_ERR(("nan svc check failed ret = %d status = %d\n", ret, cmd_data->status));
5484 goto fail;
5485 } else {
5486 WL_DBG(("nan svc check successful..proceed to update\n"));
5487 }
5488
5489 fail:
5490 if (nan_buf) {
5491 MFREE(cfg->osh, nan_buf, data_size);
5492 }
5493
5494 if (resp_buf) {
5495 MFREE(cfg->osh, resp_buf, NAN_IOCTL_BUF_SIZE_LARGE);
5496 }
5497 NAN_DBG_EXIT();
5498 return ret;
5499
5500 }
5501
5502 int
wl_cfgnan_svc_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,uint16 cmd_id,nan_discover_cmd_data_t * cmd_data)5503 wl_cfgnan_svc_handler(struct net_device *ndev,
5504 struct bcm_cfg80211 *cfg, uint16 cmd_id, nan_discover_cmd_data_t *cmd_data)
5505 {
5506 s32 ret = BCME_OK;
5507 bcm_iov_batch_buf_t *nan_buf = NULL;
5508 uint16 nan_buf_size;
5509 uint8 *resp_buf = NULL;
5510 /* Considering fixed params */
5511 uint16 data_size = WL_NAN_OBUF_DATA_OFFSET +
5512 OFFSETOF(wl_nan_sd_params_t, optional[0]);
5513
5514 if (cmd_data->svc_update) {
5515 ret = wl_cfgnan_svc_get_handler(ndev, cfg, cmd_id, cmd_data);
5516 if (ret != BCME_OK) {
5517 WL_ERR(("Failed to update svc handler, ret = %d\n", ret));
5518 goto fail;
5519 } else {
5520 /* Ignoring any other svc get error */
5521 if (cmd_data->status == WL_NAN_E_BAD_INSTANCE) {
5522 WL_ERR(("Bad instance status, failed to update svc handler\n"));
5523 goto fail;
5524 }
5525 }
5526 }
5527
5528 ret = wl_cfgnan_aligned_data_size_of_opt_disc_params(&data_size, cmd_data);
5529 if (unlikely(ret)) {
5530 WL_ERR(("Failed to get alligned size of optional params\n"));
5531 goto fail;
5532 }
5533 nan_buf_size = data_size;
5534 NAN_DBG_ENTER();
5535
5536 nan_buf = MALLOCZ(cfg->osh, data_size);
5537 if (!nan_buf) {
5538 WL_ERR(("%s: memory allocation failed\n", __func__));
5539 ret = BCME_NOMEM;
5540 goto fail;
5541 }
5542
5543 resp_buf = MALLOCZ(cfg->osh, data_size + NAN_IOVAR_NAME_SIZE);
5544 if (!resp_buf) {
5545 WL_ERR(("%s: memory allocation failed\n", __func__));
5546 ret = BCME_NOMEM;
5547 goto fail;
5548 }
5549 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
5550 nan_buf->count = 0;
5551 nan_buf->is_set = true;
5552
5553 ret = wl_cfgnan_sd_params_handler(ndev, cmd_data, cmd_id,
5554 &nan_buf->cmds[0], &nan_buf_size);
5555 if (unlikely(ret)) {
5556 WL_ERR((" Service discovery params handler failed, ret = %d\n", ret));
5557 goto fail;
5558 }
5559
5560 nan_buf->count++;
5561 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, data_size,
5562 &(cmd_data->status), resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
5563 if (cmd_data->svc_update && (cmd_data->status == BCME_DATA_NOTFOUND)) {
5564 /* return OK if update tlv data is not present
5565 * which means nothing to update
5566 */
5567 cmd_data->status = BCME_OK;
5568 }
5569 if (unlikely(ret) || unlikely(cmd_data->status)) {
5570 WL_ERR(("nan svc failed ret = %d status = %d\n", ret, cmd_data->status));
5571 goto fail;
5572 } else {
5573 WL_DBG(("nan svc successful\n"));
5574 #ifdef WL_NAN_DISC_CACHE
5575 ret = wl_cfgnan_cache_svc_info(cfg, cmd_data, cmd_id, cmd_data->svc_update);
5576 if (ret < 0) {
5577 WL_ERR(("%s: fail to cache svc info, ret=%d\n",
5578 __FUNCTION__, ret));
5579 goto fail;
5580 }
5581 #endif /* WL_NAN_DISC_CACHE */
5582 }
5583
5584 fail:
5585 if (nan_buf) {
5586 MFREE(cfg->osh, nan_buf, data_size);
5587 }
5588
5589 if (resp_buf) {
5590 MFREE(cfg->osh, resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
5591 }
5592 NAN_DBG_EXIT();
5593 return ret;
5594 }
5595
5596 int
wl_cfgnan_publish_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_discover_cmd_data_t * cmd_data)5597 wl_cfgnan_publish_handler(struct net_device *ndev,
5598 struct bcm_cfg80211 *cfg, nan_discover_cmd_data_t *cmd_data)
5599 {
5600 int ret = BCME_OK;
5601
5602 NAN_DBG_ENTER();
5603 NAN_MUTEX_LOCK();
5604 /*
5605 * proceed only if mandatory arguments are present - subscriber id,
5606 * service hash
5607 */
5608 if ((!cmd_data->pub_id) || (!cmd_data->svc_hash.data) ||
5609 (!cmd_data->svc_hash.dlen)) {
5610 WL_ERR(("mandatory arguments are not present\n"));
5611 ret = BCME_BADARG;
5612 goto fail;
5613 }
5614
5615 ret = wl_cfgnan_svc_handler(ndev, cfg, WL_NAN_CMD_SD_PUBLISH, cmd_data);
5616 if (ret < 0) {
5617 WL_ERR(("%s: fail to handle pub, ret=%d\n", __FUNCTION__, ret));
5618 goto fail;
5619 }
5620 WL_INFORM_MEM(("[NAN] Service published for instance id:%d is_update %d\n",
5621 cmd_data->pub_id, cmd_data->svc_update));
5622
5623 fail:
5624 NAN_MUTEX_UNLOCK();
5625 NAN_DBG_EXIT();
5626 return ret;
5627 }
5628
5629 int
wl_cfgnan_subscribe_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_discover_cmd_data_t * cmd_data)5630 wl_cfgnan_subscribe_handler(struct net_device *ndev,
5631 struct bcm_cfg80211 *cfg, nan_discover_cmd_data_t *cmd_data)
5632 {
5633 int ret = BCME_OK;
5634 #ifdef WL_NAN_DISC_CACHE
5635 nan_svc_info_t *svc_info;
5636 #ifdef RTT_SUPPORT
5637 uint8 upd_ranging_required;
5638 #endif /* RTT_SUPPORT */
5639 #endif /* WL_NAN_DISC_CACHE */
5640
5641 #ifdef RTT_SUPPORT
5642 #ifdef RTT_GEOFENCE_CONT
5643 dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
5644 rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
5645 #endif /* RTT_GEOFENCE_CONT */
5646 #endif /* RTT_SUPPORT */
5647
5648 NAN_DBG_ENTER();
5649 NAN_MUTEX_LOCK();
5650
5651 /*
5652 * proceed only if mandatory arguments are present - subscriber id,
5653 * service hash
5654 */
5655 if ((!cmd_data->sub_id) || (!cmd_data->svc_hash.data) ||
5656 (!cmd_data->svc_hash.dlen)) {
5657 WL_ERR(("mandatory arguments are not present\n"));
5658 ret = BCME_BADARG;
5659 goto fail;
5660 }
5661
5662 /* Check for ranging sessions if any */
5663 if (cmd_data->svc_update) {
5664 #ifdef WL_NAN_DISC_CACHE
5665 svc_info = wl_cfgnan_get_svc_inst(cfg, cmd_data->sub_id, 0);
5666 if (svc_info) {
5667 #ifdef RTT_SUPPORT
5668 wl_cfgnan_clear_svc_from_all_ranging_inst(cfg, cmd_data->sub_id);
5669 /* terminate ranging sessions for this svc, avoid clearing svc cache */
5670 wl_cfgnan_terminate_all_obsolete_ranging_sessions(cfg);
5671 /* Attempt RTT for current geofence target */
5672 wl_cfgnan_reset_geofence_ranging(cfg, NULL,
5673 RTT_SCHED_RNG_TERM_SUB_SVC_UPD, TRUE);
5674 WL_DBG(("Ranging sessions handled for svc update\n"));
5675 upd_ranging_required = !!(cmd_data->sde_control_flag &
5676 NAN_SDE_CF_RANGING_REQUIRED);
5677 if ((svc_info->ranging_required ^ upd_ranging_required) ||
5678 (svc_info->ingress_limit != cmd_data->ingress_limit) ||
5679 (svc_info->egress_limit != cmd_data->egress_limit)) {
5680 /* Clear cache info in Firmware */
5681 ret = wl_cfgnan_clear_disc_cache(cfg, cmd_data->sub_id);
5682 if (ret != BCME_OK) {
5683 WL_ERR(("couldn't send clear cache to FW \n"));
5684 goto fail;
5685 }
5686 /* Invalidate local cache info */
5687 wl_cfgnan_remove_disc_result(cfg, cmd_data->sub_id);
5688 }
5689 #endif /* RTT_SUPPORT */
5690 }
5691 #endif /* WL_NAN_DISC_CACHE */
5692 }
5693
5694 #ifdef RTT_SUPPORT
5695 #ifdef RTT_GEOFENCE_CONT
5696 /* Override ranging Indication */
5697 if (rtt_status->geofence_cfg.geofence_cont) {
5698 if (cmd_data->ranging_indication !=
5699 NAN_RANGE_INDICATION_NONE) {
5700 cmd_data->ranging_indication = NAN_RANGE_INDICATION_CONT;
5701 }
5702 }
5703 #endif /* RTT_GEOFENCE_CONT */
5704 #endif /* RTT_SUPPORT */
5705 ret = wl_cfgnan_svc_handler(ndev, cfg, WL_NAN_CMD_SD_SUBSCRIBE, cmd_data);
5706 if (ret < 0) {
5707 WL_ERR(("%s: fail to handle svc, ret=%d\n", __FUNCTION__, ret));
5708 goto fail;
5709 }
5710 WL_INFORM_MEM(("[NAN] Service subscribed for instance id:%d is_update %d\n",
5711 cmd_data->sub_id, cmd_data->svc_update));
5712
5713 fail:
5714 NAN_MUTEX_UNLOCK();
5715 NAN_DBG_EXIT();
5716 return ret;
5717 }
5718
5719 static int
wl_cfgnan_cancel_handler(nan_discover_cmd_data_t * cmd_data,uint16 cmd_id,void * p_buf,uint16 * nan_buf_size)5720 wl_cfgnan_cancel_handler(nan_discover_cmd_data_t *cmd_data,
5721 uint16 cmd_id, void *p_buf, uint16 *nan_buf_size)
5722 {
5723 s32 ret = BCME_OK;
5724
5725 NAN_DBG_ENTER();
5726
5727 if (p_buf != NULL) {
5728 bcm_iov_batch_subcmd_t *sub_cmd = (bcm_iov_batch_subcmd_t*)(p_buf);
5729 wl_nan_instance_id_t instance_id;
5730
5731 if (cmd_id == WL_NAN_CMD_SD_CANCEL_PUBLISH) {
5732 instance_id = cmd_data->pub_id;
5733 } else if (cmd_id == WL_NAN_CMD_SD_CANCEL_SUBSCRIBE) {
5734 instance_id = cmd_data->sub_id;
5735 } else {
5736 ret = BCME_USAGE_ERROR;
5737 WL_ERR(("wrong command id = %u\n", cmd_id));
5738 goto fail;
5739 }
5740
5741 /* Fill the sub_command block */
5742 sub_cmd->id = htod16(cmd_id);
5743 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(instance_id);
5744 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
5745 ret = memcpy_s(sub_cmd->data, *nan_buf_size,
5746 &instance_id, sizeof(instance_id));
5747 if (ret != BCME_OK) {
5748 WL_ERR(("Failed to copy instance id, ret = %d\n", ret));
5749 goto fail;
5750 }
5751 /* adjust iov data len to the end of last data record */
5752 *nan_buf_size -= (sub_cmd->len +
5753 OFFSETOF(bcm_iov_batch_subcmd_t, u.options));
5754 WL_INFORM_MEM(("[NAN] Service with instance id:%d cancelled\n", instance_id));
5755 } else {
5756 WL_ERR(("nan_iov_buf is NULL\n"));
5757 ret = BCME_ERROR;
5758 goto fail;
5759 }
5760
5761 fail:
5762 NAN_DBG_EXIT();
5763 return ret;
5764 }
5765
5766 int
wl_cfgnan_cancel_pub_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_discover_cmd_data_t * cmd_data)5767 wl_cfgnan_cancel_pub_handler(struct net_device *ndev,
5768 struct bcm_cfg80211 *cfg, nan_discover_cmd_data_t *cmd_data)
5769 {
5770 bcm_iov_batch_buf_t *nan_buf = NULL;
5771 s32 ret = BCME_OK;
5772 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
5773 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
5774
5775 NAN_DBG_ENTER();
5776 NAN_MUTEX_LOCK();
5777
5778 nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
5779 if (!nan_buf) {
5780 WL_ERR(("%s: memory allocation failed\n", __func__));
5781 ret = BCME_NOMEM;
5782 goto fail;
5783 }
5784
5785 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
5786 nan_buf->count = 0;
5787 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
5788
5789 /* proceed only if mandatory argument is present - publisher id */
5790 if (!cmd_data->pub_id) {
5791 WL_ERR(("mandatory argument is not present\n"));
5792 ret = BCME_BADARG;
5793 goto fail;
5794 }
5795
5796 #ifdef WL_NAN_DISC_CACHE
5797 wl_cfgnan_clear_svc_cache(cfg, cmd_data->pub_id);
5798 #endif /* WL_NAN_DISC_CACHE */
5799 ret = wl_cfgnan_cancel_handler(cmd_data, WL_NAN_CMD_SD_CANCEL_PUBLISH,
5800 &nan_buf->cmds[0], &nan_buf_size);
5801 if (unlikely(ret)) {
5802 WL_ERR(("cancel publish failed\n"));
5803 goto fail;
5804 }
5805 nan_buf->is_set = true;
5806 nan_buf->count++;
5807
5808 bzero(resp_buf, sizeof(resp_buf));
5809 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size,
5810 &(cmd_data->status),
5811 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
5812 if (unlikely(ret) || unlikely(cmd_data->status)) {
5813 WL_ERR(("nan cancel publish failed ret = %d status = %d\n",
5814 ret, cmd_data->status));
5815 goto fail;
5816 }
5817 WL_DBG(("nan cancel publish successfull\n"));
5818 wl_cfgnan_remove_inst_id(cfg, cmd_data->pub_id);
5819 fail:
5820 if (nan_buf) {
5821 MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
5822 }
5823
5824 NAN_MUTEX_UNLOCK();
5825 NAN_DBG_EXIT();
5826 return ret;
5827 }
5828
5829 int
wl_cfgnan_cancel_sub_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_discover_cmd_data_t * cmd_data)5830 wl_cfgnan_cancel_sub_handler(struct net_device *ndev,
5831 struct bcm_cfg80211 *cfg, nan_discover_cmd_data_t *cmd_data)
5832 {
5833 bcm_iov_batch_buf_t *nan_buf = NULL;
5834 s32 ret = BCME_OK;
5835 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
5836 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
5837
5838 NAN_DBG_ENTER();
5839 NAN_MUTEX_LOCK();
5840
5841 nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
5842 if (!nan_buf) {
5843 WL_ERR(("%s: memory allocation failed\n", __func__));
5844 ret = BCME_NOMEM;
5845 goto fail;
5846 }
5847
5848 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
5849 nan_buf->count = 0;
5850 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
5851
5852 /* proceed only if mandatory argument is present - subscriber id */
5853 if (!cmd_data->sub_id) {
5854 WL_ERR(("mandatory argument is not present\n"));
5855 ret = BCME_BADARG;
5856 goto fail;
5857 }
5858
5859 #ifdef WL_NAN_DISC_CACHE
5860 #ifdef RTT_SUPPORT
5861 /* terminate ranging sessions for this svc */
5862 wl_cfgnan_clear_svc_from_all_ranging_inst(cfg, cmd_data->sub_id);
5863 wl_cfgnan_terminate_all_obsolete_ranging_sessions(cfg);
5864 wl_cfgnan_reset_geofence_ranging(cfg, NULL,
5865 RTT_SCHED_RNG_TERM_SUB_SVC_CANCEL, TRUE);
5866 #endif /* RTT_SUPPORT */
5867 /* clear svc cache for the service */
5868 wl_cfgnan_clear_svc_cache(cfg, cmd_data->sub_id);
5869 wl_cfgnan_remove_disc_result(cfg, cmd_data->sub_id);
5870 #endif /* WL_NAN_DISC_CACHE */
5871
5872 ret = wl_cfgnan_cancel_handler(cmd_data, WL_NAN_CMD_SD_CANCEL_SUBSCRIBE,
5873 &nan_buf->cmds[0], &nan_buf_size);
5874 if (unlikely(ret)) {
5875 WL_ERR(("cancel subscribe failed\n"));
5876 goto fail;
5877 }
5878 nan_buf->is_set = true;
5879 nan_buf->count++;
5880
5881 bzero(resp_buf, sizeof(resp_buf));
5882 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size,
5883 &(cmd_data->status),
5884 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
5885 if (unlikely(ret) || unlikely(cmd_data->status)) {
5886 WL_ERR(("nan cancel subscribe failed ret = %d status = %d\n",
5887 ret, cmd_data->status));
5888 goto fail;
5889 }
5890 WL_DBG(("subscribe cancel successfull\n"));
5891 wl_cfgnan_remove_inst_id(cfg, cmd_data->sub_id);
5892 fail:
5893 if (nan_buf) {
5894 MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
5895 }
5896
5897 NAN_MUTEX_UNLOCK();
5898 NAN_DBG_EXIT();
5899 return ret;
5900 }
5901
5902 int
wl_cfgnan_transmit_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_discover_cmd_data_t * cmd_data)5903 wl_cfgnan_transmit_handler(struct net_device *ndev,
5904 struct bcm_cfg80211 *cfg, nan_discover_cmd_data_t *cmd_data)
5905 {
5906 s32 ret = BCME_OK;
5907 bcm_iov_batch_buf_t *nan_buf = NULL;
5908 wl_nan_sd_transmit_t *sd_xmit = NULL;
5909 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
5910 bool is_lcl_id = FALSE;
5911 bool is_dest_id = FALSE;
5912 bool is_dest_mac = FALSE;
5913 uint16 buflen_avail;
5914 uint8 *pxtlv;
5915 uint16 nan_buf_size;
5916 uint8 *resp_buf = NULL;
5917 /* Considering fixed params */
5918 uint16 data_size = WL_NAN_OBUF_DATA_OFFSET +
5919 OFFSETOF(wl_nan_sd_transmit_t, opt_tlv);
5920 data_size = ALIGN_SIZE(data_size, 4);
5921 ret = wl_cfgnan_aligned_data_size_of_opt_disc_params(&data_size, cmd_data);
5922 if (unlikely(ret)) {
5923 WL_ERR(("Failed to get alligned size of optional params\n"));
5924 goto fail;
5925 }
5926 NAN_DBG_ENTER();
5927 NAN_MUTEX_LOCK();
5928 nan_buf_size = data_size;
5929 nan_buf = MALLOCZ(cfg->osh, data_size);
5930 if (!nan_buf) {
5931 WL_ERR(("%s: memory allocation failed\n", __func__));
5932 ret = BCME_NOMEM;
5933 goto fail;
5934 }
5935
5936 resp_buf = MALLOCZ(cfg->osh, data_size + NAN_IOVAR_NAME_SIZE);
5937 if (!resp_buf) {
5938 WL_ERR(("%s: memory allocation failed\n", __func__));
5939 ret = BCME_NOMEM;
5940 goto fail;
5941 }
5942
5943 /* nan transmit */
5944 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
5945 nan_buf->count = 0;
5946 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
5947 /*
5948 * proceed only if mandatory arguments are present - subscriber id,
5949 * publisher id, mac address
5950 */
5951 if ((!cmd_data->local_id) || (!cmd_data->remote_id) ||
5952 ETHER_ISNULLADDR(&cmd_data->mac_addr.octet)) {
5953 WL_ERR(("mandatory arguments are not present\n"));
5954 ret = -EINVAL;
5955 goto fail;
5956 }
5957
5958 sub_cmd = (bcm_iov_batch_subcmd_t*)(&nan_buf->cmds[0]);
5959 sd_xmit = (wl_nan_sd_transmit_t *)(sub_cmd->data);
5960
5961 /* local instance id must be from 1 to 255, 0 is reserved */
5962 if (cmd_data->local_id == NAN_ID_RESERVED) {
5963 WL_ERR(("Invalid local instance id: %d\n", cmd_data->local_id));
5964 ret = BCME_BADARG;
5965 goto fail;
5966 }
5967 sd_xmit->local_service_id = cmd_data->local_id;
5968 is_lcl_id = TRUE;
5969
5970 /* remote instance id must be from 1 to 255, 0 is reserved */
5971 if (cmd_data->remote_id == NAN_ID_RESERVED) {
5972 WL_ERR(("Invalid remote instance id: %d\n", cmd_data->remote_id));
5973 ret = BCME_BADARG;
5974 goto fail;
5975 }
5976
5977 sd_xmit->requestor_service_id = cmd_data->remote_id;
5978 is_dest_id = TRUE;
5979
5980 if (!ETHER_ISNULLADDR(&cmd_data->mac_addr.octet)) {
5981 ret = memcpy_s(&sd_xmit->destination_addr, ETHER_ADDR_LEN,
5982 &cmd_data->mac_addr, ETHER_ADDR_LEN);
5983 if (ret != BCME_OK) {
5984 WL_ERR(("Failed to copy dest mac address\n"));
5985 goto fail;
5986 }
5987 } else {
5988 WL_ERR(("Invalid ether addr provided\n"));
5989 ret = BCME_BADARG;
5990 goto fail;
5991 }
5992 is_dest_mac = TRUE;
5993
5994 if (cmd_data->priority) {
5995 sd_xmit->priority = cmd_data->priority;
5996 }
5997 sd_xmit->token = cmd_data->token;
5998
5999 if (cmd_data->recv_ind_flag) {
6000 /* BIT0 - If set, host wont rec event "txs" */
6001 if (CHECK_BIT(cmd_data->recv_ind_flag,
6002 WL_NAN_EVENT_SUPPRESS_FOLLOWUP_RECEIVE_BIT)) {
6003 sd_xmit->flags = WL_NAN_FUP_SUPR_EVT_TXS;
6004 }
6005 }
6006 /* Optional parameters: fill the sub_command block with service descriptor attr */
6007 sub_cmd->id = htod16(WL_NAN_CMD_SD_TRANSMIT);
6008 sub_cmd->len = sizeof(sub_cmd->u.options) +
6009 OFFSETOF(wl_nan_sd_transmit_t, opt_tlv);
6010 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
6011 pxtlv = (uint8 *)&sd_xmit->opt_tlv;
6012
6013 nan_buf_size -= (sub_cmd->len +
6014 OFFSETOF(bcm_iov_batch_subcmd_t, u.options));
6015
6016 buflen_avail = nan_buf_size;
6017
6018 if (cmd_data->svc_info.data && cmd_data->svc_info.dlen) {
6019 bcm_xtlv_t *pxtlv_svc_info = (bcm_xtlv_t *)pxtlv;
6020 ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
6021 WL_NAN_XTLV_SD_SVC_INFO, cmd_data->svc_info.dlen,
6022 cmd_data->svc_info.data, BCM_XTLV_OPTION_ALIGN32);
6023 if (unlikely(ret)) {
6024 WL_ERR(("%s: fail to pack on bcm_pack_xtlv_entry, ret=%d\n",
6025 __FUNCTION__, ret));
6026 goto fail;
6027 }
6028
6029 /* 0xFF is max length for svc_info */
6030 if (pxtlv_svc_info->len > 0xFF) {
6031 WL_ERR(("Invalid service info length %d\n",
6032 (pxtlv_svc_info->len)));
6033 ret = BCME_USAGE_ERROR;
6034 goto fail;
6035 }
6036 sd_xmit->opt_len = (uint8)(pxtlv_svc_info->len);
6037 }
6038 if (cmd_data->sde_svc_info.data && cmd_data->sde_svc_info.dlen) {
6039 WL_TRACE(("optional sdea svc_info present, pack it\n"));
6040 ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
6041 WL_NAN_XTLV_SD_SDE_SVC_INFO, cmd_data->sde_svc_info.dlen,
6042 cmd_data->sde_svc_info.data, BCM_XTLV_OPTION_ALIGN32);
6043 if (unlikely(ret)) {
6044 WL_ERR(("%s: fail to pack sdea svc info\n", __FUNCTION__));
6045 goto fail;
6046 }
6047 }
6048
6049 /* Check if all mandatory params are provided */
6050 if (is_lcl_id && is_dest_id && is_dest_mac) {
6051 nan_buf->count++;
6052 sub_cmd->len += (buflen_avail - nan_buf_size);
6053 } else {
6054 WL_ERR(("Missing parameters\n"));
6055 ret = BCME_USAGE_ERROR;
6056 }
6057 nan_buf->is_set = TRUE;
6058 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, data_size,
6059 &(cmd_data->status), resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
6060 if (unlikely(ret) || unlikely(cmd_data->status)) {
6061 WL_ERR(("nan transmit failed for token %d ret = %d status = %d\n",
6062 sd_xmit->token, ret, cmd_data->status));
6063 goto fail;
6064 }
6065 WL_INFORM_MEM(("nan transmit successful for token %d\n", sd_xmit->token));
6066 fail:
6067 if (nan_buf) {
6068 MFREE(cfg->osh, nan_buf, data_size);
6069 }
6070 if (resp_buf) {
6071 MFREE(cfg->osh, resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
6072 }
6073 NAN_MUTEX_UNLOCK();
6074 NAN_DBG_EXIT();
6075 return ret;
6076 }
6077
6078 static int
wl_cfgnan_get_capability(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_hal_capabilities_t * capabilities)6079 wl_cfgnan_get_capability(struct net_device *ndev,
6080 struct bcm_cfg80211 *cfg, nan_hal_capabilities_t *capabilities)
6081 {
6082 bcm_iov_batch_buf_t *nan_buf = NULL;
6083 s32 ret = BCME_OK;
6084 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
6085 wl_nan_fw_cap_t *fw_cap = NULL;
6086 uint16 subcmd_len;
6087 uint32 status;
6088 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
6089 bcm_iov_batch_subcmd_t *sub_cmd_resp = NULL;
6090 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
6091 const bcm_xtlv_t *xtlv;
6092 uint16 type = 0;
6093 int len = 0;
6094
6095 NAN_DBG_ENTER();
6096 nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
6097 if (!nan_buf) {
6098 WL_ERR(("%s: memory allocation failed\n", __func__));
6099 ret = BCME_NOMEM;
6100 goto fail;
6101 }
6102
6103 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
6104 nan_buf->count = 0;
6105 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
6106 sub_cmd = (bcm_iov_batch_subcmd_t*)(uint8 *)(&nan_buf->cmds[0]);
6107
6108 ret = wl_cfg_nan_check_cmd_len(nan_buf_size,
6109 sizeof(*fw_cap), &subcmd_len);
6110 if (unlikely(ret)) {
6111 WL_ERR(("nan_sub_cmd check failed\n"));
6112 goto fail;
6113 }
6114
6115 fw_cap = (wl_nan_fw_cap_t *)sub_cmd->data;
6116 sub_cmd->id = htod16(WL_NAN_CMD_GEN_FW_CAP);
6117 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*fw_cap);
6118 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
6119 nan_buf_size -= subcmd_len;
6120 nan_buf->count = 1;
6121
6122 nan_buf->is_set = false;
6123 memset(resp_buf, 0, sizeof(resp_buf));
6124 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
6125 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
6126 if (unlikely(ret) || unlikely(status)) {
6127 WL_ERR(("get nan fw cap failed ret %d status %d \n",
6128 ret, status));
6129 goto fail;
6130 }
6131
6132 sub_cmd_resp = &((bcm_iov_batch_buf_t *)(resp_buf))->cmds[0];
6133
6134 /* check the response buff */
6135 xtlv = ((const bcm_xtlv_t *)&sub_cmd_resp->data[0]);
6136 if (!xtlv) {
6137 ret = BCME_NOTFOUND;
6138 WL_ERR(("xtlv not found: err = %d\n", ret));
6139 goto fail;
6140 }
6141 bcm_xtlv_unpack_xtlv(xtlv, &type, (uint16*)&len, NULL, BCM_XTLV_OPTION_ALIGN32);
6142 do
6143 {
6144 switch (type) {
6145 case WL_NAN_XTLV_GEN_FW_CAP:
6146 if (len > sizeof(wl_nan_fw_cap_t)) {
6147 ret = BCME_BADARG;
6148 goto fail;
6149 }
6150 GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
6151 fw_cap = (wl_nan_fw_cap_t*)xtlv->data;
6152 GCC_DIAGNOSTIC_POP();
6153 break;
6154 default:
6155 WL_ERR(("Unknown xtlv: id %u\n", type));
6156 ret = BCME_ERROR;
6157 break;
6158 }
6159 if (ret != BCME_OK) {
6160 goto fail;
6161 }
6162 } while ((xtlv = bcm_next_xtlv(xtlv, &len, BCM_XTLV_OPTION_ALIGN32)));
6163
6164 memset(capabilities, 0, sizeof(nan_hal_capabilities_t));
6165 capabilities->max_publishes = fw_cap->max_svc_publishes;
6166 capabilities->max_subscribes = fw_cap->max_svc_subscribes;
6167 capabilities->max_ndi_interfaces = fw_cap->max_lcl_ndi_interfaces;
6168 capabilities->max_ndp_sessions = fw_cap->max_ndp_sessions;
6169 capabilities->max_concurrent_nan_clusters = fw_cap->max_concurrent_nan_clusters;
6170 capabilities->max_service_name_len = fw_cap->max_service_name_len;
6171 capabilities->max_match_filter_len = fw_cap->max_match_filter_len;
6172 capabilities->max_total_match_filter_len = fw_cap->max_total_match_filter_len;
6173 capabilities->max_service_specific_info_len = fw_cap->max_service_specific_info_len;
6174 capabilities->max_app_info_len = fw_cap->max_app_info_len;
6175 capabilities->max_sdea_service_specific_info_len = fw_cap->max_sdea_svc_specific_info_len;
6176 capabilities->max_queued_transmit_followup_msgs = fw_cap->max_queued_tx_followup_msgs;
6177 capabilities->max_subscribe_address = fw_cap->max_subscribe_address;
6178 capabilities->is_ndp_security_supported = fw_cap->is_ndp_security_supported;
6179 capabilities->ndp_supported_bands = fw_cap->ndp_supported_bands;
6180 capabilities->cipher_suites_supported = fw_cap->cipher_suites_supported_mask;
6181 if (fw_cap->flags1 & WL_NAN_FW_CAP_FLAG1_NDPE) {
6182 capabilities->ndpe_attr_supported = true;
6183 }
6184
6185 fail:
6186 if (nan_buf) {
6187 MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
6188 }
6189 NAN_DBG_EXIT();
6190 return ret;
6191 }
6192
6193 int
wl_cfgnan_get_capablities_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_hal_capabilities_t * capabilities)6194 wl_cfgnan_get_capablities_handler(struct net_device *ndev,
6195 struct bcm_cfg80211 *cfg, nan_hal_capabilities_t *capabilities)
6196 {
6197 s32 ret = BCME_OK;
6198 dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
6199
6200 NAN_DBG_ENTER();
6201
6202 /* Do not query fw about nan if feature is not supported */
6203 if (!FW_SUPPORTED(dhdp, nan)) {
6204 WL_DBG(("NAN is not supported\n"));
6205 return ret;
6206 }
6207
6208 if (cfg->nancfg->nan_init_state) {
6209 ret = wl_cfgnan_get_capability(ndev, cfg, capabilities);
6210 if (ret != BCME_OK) {
6211 WL_ERR(("NAN init state: %d, failed to get capability from FW[%d]\n",
6212 cfg->nancfg->nan_init_state, ret));
6213 goto exit;
6214 }
6215 } else {
6216 /* Initialize NAN before sending iovar */
6217 WL_ERR(("Initializing NAN\n"));
6218 ret = wl_cfgnan_init(cfg);
6219 if (ret != BCME_OK) {
6220 WL_ERR(("failed to initialize NAN[%d]\n", ret));
6221 goto fail;
6222 }
6223
6224 ret = wl_cfgnan_get_capability(ndev, cfg, capabilities);
6225 if (ret != BCME_OK) {
6226 WL_ERR(("NAN init state: %d, failed to get capability from FW[%d]\n",
6227 cfg->nancfg->nan_init_state, ret));
6228 goto exit;
6229 }
6230 WL_ERR(("De-Initializing NAN\n"));
6231 ret = wl_cfgnan_deinit(cfg, dhdp->up);
6232 if (ret != BCME_OK) {
6233 WL_ERR(("failed to de-initialize NAN[%d]\n", ret));
6234 goto fail;
6235 }
6236 }
6237 fail:
6238 NAN_DBG_EXIT();
6239 return ret;
6240 exit:
6241 /* Keeping backward campatibility */
6242 capabilities->max_concurrent_nan_clusters = MAX_CONCURRENT_NAN_CLUSTERS;
6243 capabilities->max_publishes = MAX_PUBLISHES;
6244 capabilities->max_subscribes = MAX_SUBSCRIBES;
6245 capabilities->max_service_name_len = MAX_SVC_NAME_LEN;
6246 capabilities->max_match_filter_len = MAX_MATCH_FILTER_LEN;
6247 capabilities->max_total_match_filter_len = MAX_TOTAL_MATCH_FILTER_LEN;
6248 capabilities->max_service_specific_info_len = NAN_MAX_SERVICE_SPECIFIC_INFO_LEN;
6249 capabilities->max_ndi_interfaces = NAN_MAX_NDI;
6250 capabilities->max_ndp_sessions = MAX_NDP_SESSIONS;
6251 capabilities->max_app_info_len = MAX_APP_INFO_LEN;
6252 capabilities->max_queued_transmit_followup_msgs = MAX_QUEUED_TX_FOLLOUP_MSGS;
6253 capabilities->max_sdea_service_specific_info_len = MAX_SDEA_SVC_INFO_LEN;
6254 capabilities->max_subscribe_address = MAX_SUBSCRIBE_ADDRESS;
6255 capabilities->cipher_suites_supported = WL_NAN_CIPHER_SUITE_SHARED_KEY_128_MASK;
6256 capabilities->max_scid_len = MAX_SCID_LEN;
6257 capabilities->is_ndp_security_supported = true;
6258 capabilities->ndp_supported_bands = NDP_SUPPORTED_BANDS;
6259 capabilities->ndpe_attr_supported = false;
6260 ret = BCME_OK;
6261 NAN_DBG_EXIT();
6262 return ret;
6263 }
6264
wl_cfgnan_is_enabled(struct bcm_cfg80211 * cfg)6265 bool wl_cfgnan_is_enabled(struct bcm_cfg80211 *cfg)
6266 {
6267 wl_nancfg_t *nancfg = cfg->nancfg;
6268 if (nancfg) {
6269 if (nancfg->nan_init_state && nancfg->nan_enable) {
6270 return TRUE;
6271 }
6272 }
6273
6274 return FALSE;
6275 }
6276
6277 static int
wl_cfgnan_init(struct bcm_cfg80211 * cfg)6278 wl_cfgnan_init(struct bcm_cfg80211 *cfg)
6279 {
6280 s32 ret = BCME_OK;
6281 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
6282 uint32 status;
6283 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
6284 uint8 buf[NAN_IOCTL_BUF_SIZE];
6285 bcm_iov_batch_buf_t *nan_buf = (bcm_iov_batch_buf_t*)buf;
6286
6287 NAN_DBG_ENTER();
6288 if (cfg->nancfg->nan_init_state) {
6289 WL_ERR(("nan initialized/nmi exists\n"));
6290 return BCME_OK;
6291 }
6292 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
6293 nan_buf->count = 0;
6294 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
6295 ret = wl_cfgnan_init_handler(&nan_buf->cmds[0], &nan_buf_size, true);
6296 if (unlikely(ret)) {
6297 WL_ERR(("init handler sub_cmd set failed\n"));
6298 goto fail;
6299 }
6300 nan_buf->count++;
6301 nan_buf->is_set = true;
6302
6303 bzero(resp_buf, sizeof(resp_buf));
6304 ret = wl_cfgnan_execute_ioctl(bcmcfg_to_prmry_ndev(cfg), cfg,
6305 nan_buf, nan_buf_size, &status,
6306 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
6307 if (unlikely(ret) || unlikely(status)) {
6308 WL_ERR(("nan init handler failed ret %d status %d\n",
6309 ret, status));
6310 goto fail;
6311 }
6312
6313 #ifdef WL_NAN_DISC_CACHE
6314 /* malloc for disc result */
6315 cfg->nancfg->nan_disc_cache = MALLOCZ(cfg->osh,
6316 NAN_MAX_CACHE_DISC_RESULT * sizeof(nan_disc_result_cache));
6317 if (!cfg->nancfg->nan_disc_cache) {
6318 WL_ERR(("%s: memory allocation failed\n", __func__));
6319 ret = BCME_NOMEM;
6320 goto fail;
6321 }
6322 #endif /* WL_NAN_DISC_CACHE */
6323 cfg->nancfg->nan_init_state = true;
6324 return ret;
6325 fail:
6326 NAN_DBG_EXIT();
6327 return ret;
6328 }
6329
6330 static void
wl_cfgnan_deinit_cleanup(struct bcm_cfg80211 * cfg)6331 wl_cfgnan_deinit_cleanup(struct bcm_cfg80211 *cfg)
6332 {
6333 uint8 i = 0;
6334 wl_nancfg_t *nancfg = cfg->nancfg;
6335
6336 nancfg->nan_dp_count = 0;
6337 nancfg->nan_init_state = false;
6338 #ifdef WL_NAN_DISC_CACHE
6339 if (nancfg->nan_disc_cache) {
6340 for (i = 0; i < NAN_MAX_CACHE_DISC_RESULT; i++) {
6341 if (nancfg->nan_disc_cache[i].tx_match_filter.data) {
6342 MFREE(cfg->osh, nancfg->nan_disc_cache[i].tx_match_filter.data,
6343 nancfg->nan_disc_cache[i].tx_match_filter.dlen);
6344 }
6345 if (nancfg->nan_disc_cache[i].svc_info.data) {
6346 MFREE(cfg->osh, nancfg->nan_disc_cache[i].svc_info.data,
6347 nancfg->nan_disc_cache[i].svc_info.dlen);
6348 }
6349 }
6350 MFREE(cfg->osh, nancfg->nan_disc_cache,
6351 NAN_MAX_CACHE_DISC_RESULT * sizeof(nan_disc_result_cache));
6352 nancfg->nan_disc_cache = NULL;
6353 }
6354 nancfg->nan_disc_count = 0;
6355 bzero(nancfg->svc_info, NAN_MAX_SVC_INST * sizeof(nan_svc_info_t));
6356 bzero(nancfg->nan_ranging_info, NAN_MAX_RANGING_INST * sizeof(nan_ranging_inst_t));
6357 #endif /* WL_NAN_DISC_CACHE */
6358 return;
6359 }
6360
6361 static int
wl_cfgnan_deinit(struct bcm_cfg80211 * cfg,uint8 busstate)6362 wl_cfgnan_deinit(struct bcm_cfg80211 *cfg, uint8 busstate)
6363 {
6364 s32 ret = BCME_OK;
6365 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
6366 uint32 status;
6367 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
6368 uint8 buf[NAN_IOCTL_BUF_SIZE];
6369 bcm_iov_batch_buf_t *nan_buf = (bcm_iov_batch_buf_t*)buf;
6370 wl_nancfg_t *nancfg = cfg->nancfg;
6371
6372 NAN_DBG_ENTER();
6373 NAN_MUTEX_LOCK();
6374
6375 if (!nancfg->nan_init_state) {
6376 WL_ERR(("nan is not initialized/nmi doesnt exists\n"));
6377 ret = BCME_OK;
6378 goto fail;
6379 }
6380
6381 if (busstate != DHD_BUS_DOWN) {
6382 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
6383 nan_buf->count = 0;
6384 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
6385
6386 WL_DBG(("nan deinit\n"));
6387 ret = wl_cfgnan_init_handler(&nan_buf->cmds[0], &nan_buf_size, false);
6388 if (unlikely(ret)) {
6389 WL_ERR(("deinit handler sub_cmd set failed\n"));
6390 } else {
6391 nan_buf->count++;
6392 nan_buf->is_set = true;
6393 bzero(resp_buf, sizeof(resp_buf));
6394 ret = wl_cfgnan_execute_ioctl(cfg->wdev->netdev, cfg,
6395 nan_buf, nan_buf_size, &status,
6396 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
6397 if (unlikely(ret) || unlikely(status)) {
6398 WL_ERR(("nan init handler failed ret %d status %d\n",
6399 ret, status));
6400 }
6401 }
6402 }
6403 wl_cfgnan_deinit_cleanup(cfg);
6404
6405 fail:
6406 if (!nancfg->mac_rand && !ETHER_ISNULLADDR(nancfg->nan_nmi_mac)) {
6407 wl_release_vif_macaddr(cfg, nancfg->nan_nmi_mac, WL_IF_TYPE_NAN_NMI);
6408 }
6409 NAN_MUTEX_UNLOCK();
6410 NAN_DBG_EXIT();
6411 return ret;
6412 }
6413
6414 static int
wl_cfgnan_get_ndi_macaddr(struct bcm_cfg80211 * cfg,u8 * mac_addr)6415 wl_cfgnan_get_ndi_macaddr(struct bcm_cfg80211 *cfg, u8* mac_addr)
6416 {
6417 int i = 0;
6418 int ret = BCME_OK;
6419 bool rand_mac = cfg->nancfg->mac_rand;
6420 BCM_REFERENCE(i);
6421
6422 if (rand_mac) {
6423 /* ensure nmi != ndi */
6424 do {
6425 RANDOM_BYTES(mac_addr, ETHER_ADDR_LEN);
6426 /* restore mcast and local admin bits to 0 and 1 */
6427 ETHER_SET_UNICAST(mac_addr);
6428 ETHER_SET_LOCALADDR(mac_addr);
6429 i++;
6430 if (i == NAN_RAND_MAC_RETRIES) {
6431 break;
6432 }
6433 } while (eacmp(cfg->nancfg->nan_nmi_mac, mac_addr) == 0);
6434
6435 if (i == NAN_RAND_MAC_RETRIES) {
6436 if (eacmp(cfg->nancfg->nan_nmi_mac, mac_addr) == 0) {
6437 WL_ERR(("\nCouldn't generate rand NDI which != NMI\n"));
6438 ret = BCME_NORESOURCE;
6439 goto fail;
6440 }
6441 }
6442 } else {
6443 if (wl_get_vif_macaddr(cfg, WL_IF_TYPE_NAN,
6444 mac_addr) != BCME_OK) {
6445 ret = -EINVAL;
6446 WL_ERR(("Failed to get mac addr for NDI\n"));
6447 goto fail;
6448 }
6449 }
6450
6451 fail:
6452 return ret;
6453 }
6454
6455 int
wl_cfgnan_data_path_iface_create_delete_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,char * ifname,uint16 type,uint8 busstate)6456 wl_cfgnan_data_path_iface_create_delete_handler(struct net_device *ndev,
6457 struct bcm_cfg80211 *cfg, char *ifname, uint16 type, uint8 busstate)
6458 {
6459 u8 mac_addr[ETH_ALEN];
6460 s32 ret = BCME_OK;
6461 s32 idx;
6462 struct wireless_dev *wdev;
6463 NAN_DBG_ENTER();
6464
6465 if (busstate != DHD_BUS_DOWN) {
6466 ASSERT(cfg->nancfg->ndi);
6467 if (type == NAN_WIFI_SUBCMD_DATA_PATH_IFACE_CREATE) {
6468 if ((idx = wl_cfgnan_get_ndi_idx(cfg)) < 0) {
6469 WL_ERR(("No free idx for NAN NDI\n"));
6470 ret = BCME_NORESOURCE;
6471 goto fail;
6472 }
6473
6474 ret = wl_cfgnan_get_ndi_macaddr(cfg, mac_addr);
6475 if (ret != BCME_OK) {
6476 WL_ERR(("Couldn't get mac addr for NDI ret %d\n", ret));
6477 goto fail;
6478 }
6479 wdev = wl_cfg80211_add_if(cfg, ndev, WL_IF_TYPE_NAN,
6480 ifname, mac_addr);
6481 if (!wdev) {
6482 ret = -ENODEV;
6483 WL_ERR(("Failed to create NDI iface = %s, wdev is NULL\n", ifname));
6484 goto fail;
6485 }
6486 /* Store the iface name to pub data so that it can be used
6487 * during NAN enable
6488 */
6489 wl_cfgnan_add_ndi_data(cfg, idx, ifname);
6490 cfg->nancfg->ndi[idx].created = true;
6491 /* Store nan ndev */
6492 cfg->nancfg->ndi[idx].nan_ndev = wdev_to_ndev(wdev);
6493
6494 } else if (type == NAN_WIFI_SUBCMD_DATA_PATH_IFACE_DELETE) {
6495 ret = wl_cfg80211_del_if(cfg, ndev, NULL, ifname);
6496 if (ret == BCME_OK) {
6497 if (wl_cfgnan_del_ndi_data(cfg, ifname) < 0) {
6498 WL_ERR(("Failed to find matching data for ndi:%s\n",
6499 ifname));
6500 }
6501 } else if (ret == -ENODEV) {
6502 WL_INFORM(("Already deleted: %s\n", ifname));
6503 ret = BCME_OK;
6504 } else if (ret != BCME_OK) {
6505 WL_ERR(("failed to delete NDI[%d]\n", ret));
6506 }
6507 }
6508 } else {
6509 ret = -ENODEV;
6510 WL_ERR(("Bus is already down, no dev found to remove, ret = %d\n", ret));
6511 }
6512 fail:
6513 NAN_DBG_EXIT();
6514 return ret;
6515 }
6516
6517 /*
6518 * Return data peer from peer list
6519 * for peer_addr
6520 * NULL if not found
6521 */
6522 static nan_ndp_peer_t *
wl_cfgnan_data_get_peer(struct bcm_cfg80211 * cfg,struct ether_addr * peer_addr)6523 wl_cfgnan_data_get_peer(struct bcm_cfg80211 *cfg,
6524 struct ether_addr *peer_addr)
6525 {
6526 uint8 i;
6527 nan_ndp_peer_t* peer = cfg->nancfg->nan_ndp_peer_info;
6528
6529 if (!peer) {
6530 WL_ERR(("wl_cfgnan_data_get_peer: nan_ndp_peer_info is NULL\n"));
6531 goto exit;
6532 }
6533 for (i = 0; i < cfg->nancfg->max_ndp_count; i++) {
6534 if (peer[i].peer_dp_state != NAN_PEER_DP_NOT_CONNECTED &&
6535 (!memcmp(peer_addr, &peer[i].peer_addr, ETHER_ADDR_LEN))) {
6536 return &peer[i];
6537 }
6538 }
6539
6540 exit:
6541 return NULL;
6542 }
6543
6544 /*
6545 * Returns True if
6546 * datapath exists for nan cfg
6547 * for given peer
6548 */
6549 bool
wl_cfgnan_data_dp_exists_with_peer(struct bcm_cfg80211 * cfg,struct ether_addr * peer_addr)6550 wl_cfgnan_data_dp_exists_with_peer(struct bcm_cfg80211 *cfg,
6551 struct ether_addr *peer_addr)
6552 {
6553 bool ret = FALSE;
6554 nan_ndp_peer_t* peer = NULL;
6555
6556 if ((cfg->nancfg->nan_init_state == FALSE) ||
6557 (cfg->nancfg->nan_enable == FALSE)) {
6558 goto exit;
6559 }
6560
6561 /* check for peer exist */
6562 peer = wl_cfgnan_data_get_peer(cfg, peer_addr);
6563 if (peer) {
6564 ret = TRUE;
6565 }
6566
6567 exit:
6568 return ret;
6569 }
6570
6571 /*
6572 * As of now API only available
6573 * for setting state to CONNECTED
6574 * if applicable
6575 */
6576 static void
wl_cfgnan_data_set_peer_dp_state(struct bcm_cfg80211 * cfg,struct ether_addr * peer_addr,nan_peer_dp_state_t state)6577 wl_cfgnan_data_set_peer_dp_state(struct bcm_cfg80211 *cfg,
6578 struct ether_addr *peer_addr, nan_peer_dp_state_t state)
6579 {
6580 nan_ndp_peer_t* peer = NULL;
6581 /* check for peer exist */
6582 peer = wl_cfgnan_data_get_peer(cfg, peer_addr);
6583 if (!peer) {
6584 goto end;
6585 }
6586 peer->peer_dp_state = state;
6587 end:
6588 return;
6589 }
6590
6591 /* Adds peer to nan data peer list */
6592 void
wl_cfgnan_data_add_peer(struct bcm_cfg80211 * cfg,struct ether_addr * peer_addr)6593 wl_cfgnan_data_add_peer(struct bcm_cfg80211 *cfg,
6594 struct ether_addr *peer_addr)
6595 {
6596 uint8 i;
6597 nan_ndp_peer_t* peer = NULL;
6598 /* check for peer exist */
6599 peer = wl_cfgnan_data_get_peer(cfg, peer_addr);
6600 if (peer) {
6601 peer->dp_count++;
6602 goto end;
6603 }
6604 peer = cfg->nancfg->nan_ndp_peer_info;
6605 for (i = 0; i < cfg->nancfg->max_ndp_count; i++) {
6606 if (peer[i].peer_dp_state == NAN_PEER_DP_NOT_CONNECTED) {
6607 break;
6608 }
6609 }
6610 if (i == NAN_MAX_NDP_PEER) {
6611 WL_DBG(("DP Peer list full, Droopping add peer req\n"));
6612 goto end;
6613 }
6614 /* Add peer to list */
6615 memcpy(&peer[i].peer_addr, peer_addr, ETHER_ADDR_LEN);
6616 peer[i].dp_count = 1;
6617 peer[i].peer_dp_state = NAN_PEER_DP_CONNECTING;
6618
6619 end:
6620 return;
6621 }
6622
6623 /* Removes nan data peer from peer list */
6624 void
wl_cfgnan_data_remove_peer(struct bcm_cfg80211 * cfg,struct ether_addr * peer_addr)6625 wl_cfgnan_data_remove_peer(struct bcm_cfg80211 *cfg,
6626 struct ether_addr *peer_addr)
6627 {
6628 nan_ndp_peer_t* peer = NULL;
6629 /* check for peer exist */
6630 peer = wl_cfgnan_data_get_peer(cfg, peer_addr);
6631 if (!peer) {
6632 WL_DBG(("DP Peer not present in list, "
6633 "Droopping remove peer req\n"));
6634 goto end;
6635 }
6636 peer->dp_count--;
6637 if (peer->dp_count == 0) {
6638 /* No more NDPs, delete entry */
6639 memset(peer, 0, sizeof(nan_ndp_peer_t));
6640 } else {
6641 /* Set peer dp state to connected if any ndp still exits */
6642 peer->peer_dp_state = NAN_PEER_DP_CONNECTED;
6643 }
6644 end:
6645 return;
6646 }
6647
6648 int
wl_cfgnan_data_path_request_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_datapath_cmd_data_t * cmd_data,uint8 * ndp_instance_id)6649 wl_cfgnan_data_path_request_handler(struct net_device *ndev,
6650 struct bcm_cfg80211 *cfg, nan_datapath_cmd_data_t *cmd_data,
6651 uint8 *ndp_instance_id)
6652 {
6653 s32 ret = BCME_OK;
6654 bcm_iov_batch_buf_t *nan_buf = NULL;
6655 wl_nan_dp_req_t *datareq = NULL;
6656 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
6657 uint16 buflen_avail;
6658 uint8 *pxtlv;
6659 struct wireless_dev *wdev;
6660 uint16 nan_buf_size;
6661 uint8 *resp_buf = NULL;
6662 /* Considering fixed params */
6663 uint16 data_size = WL_NAN_OBUF_DATA_OFFSET +
6664 OFFSETOF(wl_nan_dp_req_t, tlv_params);
6665 data_size = ALIGN_SIZE(data_size, 4);
6666
6667 ret = wl_cfgnan_aligned_data_size_of_opt_dp_params(cfg, &data_size, cmd_data);
6668 if (unlikely(ret)) {
6669 WL_ERR(("Failed to get alligned size of optional params\n"));
6670 goto fail;
6671 }
6672
6673 nan_buf_size = data_size;
6674 NAN_DBG_ENTER();
6675
6676 mutex_lock(&cfg->if_sync);
6677 NAN_MUTEX_LOCK();
6678 #ifdef WL_IFACE_MGMT
6679 if ((ret = wl_cfg80211_handle_if_role_conflict(cfg, WL_IF_TYPE_NAN)) < 0) {
6680 WL_ERR(("Conflicting iface found to be active\n"));
6681 ret = BCME_UNSUPPORTED;
6682 goto fail;
6683 }
6684 #endif /* WL_IFACE_MGMT */
6685
6686 #ifdef RTT_SUPPORT
6687 /* cancel any ongoing RTT session with peer
6688 * as we donot support DP and RNG to same peer
6689 */
6690 wl_cfgnan_handle_dp_ranging_concurrency(cfg, &cmd_data->mac_addr,
6691 RTT_GEO_SUSPN_HOST_NDP_TRIGGER);
6692 #endif /* RTT_SUPPORT */
6693
6694 nan_buf = MALLOCZ(cfg->osh, data_size);
6695 if (!nan_buf) {
6696 WL_ERR(("%s: memory allocation failed\n", __func__));
6697 ret = BCME_NOMEM;
6698 goto fail;
6699 }
6700
6701 resp_buf = MALLOCZ(cfg->osh, data_size + NAN_IOVAR_NAME_SIZE);
6702 if (!resp_buf) {
6703 WL_ERR(("%s: memory allocation failed\n", __func__));
6704 ret = BCME_NOMEM;
6705 goto fail;
6706 }
6707
6708 ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
6709 cfg, &cmd_data->avail_params, WL_AVAIL_LOCAL);
6710 if (unlikely(ret)) {
6711 WL_ERR(("Failed to set avail value with type local\n"));
6712 goto fail;
6713 }
6714
6715 ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
6716 cfg, &cmd_data->avail_params, WL_AVAIL_NDC);
6717 if (unlikely(ret)) {
6718 WL_ERR(("Failed to set avail value with type ndc\n"));
6719 goto fail;
6720 }
6721
6722 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
6723 nan_buf->count = 0;
6724 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
6725
6726 sub_cmd = (bcm_iov_batch_subcmd_t*)(&nan_buf->cmds[0]);
6727 datareq = (wl_nan_dp_req_t *)(sub_cmd->data);
6728
6729 /* setting default data path type to unicast */
6730 datareq->type = WL_NAN_DP_TYPE_UNICAST;
6731
6732 if (cmd_data->pub_id) {
6733 datareq->pub_id = cmd_data->pub_id;
6734 }
6735
6736 if (!ETHER_ISNULLADDR(&cmd_data->mac_addr.octet)) {
6737 ret = memcpy_s(&datareq->peer_mac, ETHER_ADDR_LEN,
6738 &cmd_data->mac_addr, ETHER_ADDR_LEN);
6739 if (ret != BCME_OK) {
6740 WL_ERR(("Failed to copy ether addr provided\n"));
6741 goto fail;
6742 }
6743 } else {
6744 WL_ERR(("Invalid ether addr provided\n"));
6745 ret = BCME_BADARG;
6746 goto fail;
6747 }
6748
6749 /* Retrieve mac from given iface name */
6750 wdev = wl_cfg80211_get_wdev_from_ifname(cfg,
6751 (char *)cmd_data->ndp_iface);
6752 if (!wdev || ETHER_ISNULLADDR(wdev->netdev->dev_addr)) {
6753 ret = -EINVAL;
6754 WL_ERR(("Failed to retrieve wdev/dev addr for ndp_iface = %s\n",
6755 (char *)cmd_data->ndp_iface));
6756 goto fail;
6757 }
6758
6759 if (!ETHER_ISNULLADDR(wdev->netdev->dev_addr)) {
6760 ret = memcpy_s(&datareq->ndi, ETHER_ADDR_LEN,
6761 wdev->netdev->dev_addr, ETHER_ADDR_LEN);
6762 if (ret != BCME_OK) {
6763 WL_ERR(("Failed to copy ether addr provided\n"));
6764 goto fail;
6765 }
6766 WL_TRACE(("%s: Retrieved ndi mac " MACDBG "\n",
6767 __FUNCTION__, MAC2STRDBG(datareq->ndi.octet)));
6768 } else {
6769 WL_ERR(("Invalid NDI addr retrieved\n"));
6770 ret = BCME_BADARG;
6771 goto fail;
6772 }
6773
6774 datareq->ndl_qos.min_slots = NAN_NDL_QOS_MIN_SLOT_NO_PREF;
6775 datareq->ndl_qos.max_latency = NAN_NDL_QOS_MAX_LAT_NO_PREF;
6776
6777 /* Fill the sub_command block */
6778 sub_cmd->id = htod16(WL_NAN_CMD_DATA_DATAREQ);
6779 sub_cmd->len = sizeof(sub_cmd->u.options) +
6780 OFFSETOF(wl_nan_dp_req_t, tlv_params);
6781 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
6782 pxtlv = (uint8 *)&datareq->tlv_params;
6783
6784 nan_buf_size -= (sub_cmd->len +
6785 OFFSETOF(bcm_iov_batch_subcmd_t, u.options));
6786 buflen_avail = nan_buf_size;
6787
6788 if (cmd_data->svc_info.data && cmd_data->svc_info.dlen) {
6789 ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
6790 WL_NAN_XTLV_SD_SVC_INFO, cmd_data->svc_info.dlen,
6791 cmd_data->svc_info.data,
6792 BCM_XTLV_OPTION_ALIGN32);
6793 if (ret != BCME_OK) {
6794 WL_ERR(("unable to process svc_spec_info: %d\n", ret));
6795 goto fail;
6796 }
6797 /* If NDPE is enabled, duplicating svc_info and sending it as part of NDPE TLV list
6798 * too along with SD SVC INFO, as FW is considering both of them as different
6799 * entities where as framework is sending both of them in same variable
6800 * (cmd_data->svc_info). FW will decide which one to use based on
6801 * peer's capability (NDPE capable or not)
6802 */
6803 if (cfg->nancfg->ndpe_enabled) {
6804 ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
6805 WL_NAN_XTLV_SD_NDPE_TLV_LIST, cmd_data->svc_info.dlen,
6806 cmd_data->svc_info.data,
6807 BCM_XTLV_OPTION_ALIGN32);
6808 if (ret != BCME_OK) {
6809 WL_ERR(("unable to process NDPE TLV list: %d\n", ret));
6810 goto fail;
6811 }
6812 }
6813 datareq->flags |= WL_NAN_DP_FLAG_SVC_INFO;
6814 }
6815
6816 /* Security elements */
6817
6818 if (cmd_data->csid) {
6819 WL_TRACE(("Cipher suite type is present, pack it\n"));
6820 ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
6821 WL_NAN_XTLV_CFG_SEC_CSID, sizeof(nan_sec_csid_e),
6822 (uint8*)&cmd_data->csid, BCM_XTLV_OPTION_ALIGN32);
6823 if (unlikely(ret)) {
6824 WL_ERR(("%s: fail to pack on csid\n", __FUNCTION__));
6825 goto fail;
6826 }
6827 }
6828
6829 if (cmd_data->ndp_cfg.security_cfg) {
6830 if ((cmd_data->key_type == NAN_SECURITY_KEY_INPUT_PMK) ||
6831 (cmd_data->key_type == NAN_SECURITY_KEY_INPUT_PASSPHRASE)) {
6832 if (cmd_data->key.data && cmd_data->key.dlen) {
6833 WL_TRACE(("optional pmk present, pack it\n"));
6834 ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
6835 WL_NAN_XTLV_CFG_SEC_PMK, cmd_data->key.dlen,
6836 cmd_data->key.data, BCM_XTLV_OPTION_ALIGN32);
6837 if (unlikely(ret)) {
6838 WL_ERR(("%s: fail to pack on WL_NAN_XTLV_CFG_SEC_PMK\n",
6839 __FUNCTION__));
6840 goto fail;
6841 }
6842 }
6843 } else {
6844 WL_ERR(("Invalid security key type\n"));
6845 ret = BCME_BADARG;
6846 goto fail;
6847 }
6848
6849 if ((cmd_data->svc_hash.dlen == WL_NAN_SVC_HASH_LEN) &&
6850 (cmd_data->svc_hash.data)) {
6851 WL_TRACE(("svc hash present, pack it\n"));
6852 ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
6853 WL_NAN_XTLV_CFG_SVC_HASH, WL_NAN_SVC_HASH_LEN,
6854 cmd_data->svc_hash.data, BCM_XTLV_OPTION_ALIGN32);
6855 if (ret != BCME_OK) {
6856 WL_ERR(("%s: fail to pack WL_NAN_XTLV_CFG_SVC_HASH\n",
6857 __FUNCTION__));
6858 goto fail;
6859 }
6860 } else {
6861 #ifdef WL_NAN_DISC_CACHE
6862 /* check in cache */
6863 nan_disc_result_cache *cache;
6864 cache = wl_cfgnan_get_disc_result(cfg,
6865 datareq->pub_id, &datareq->peer_mac);
6866 if (!cache) {
6867 ret = BCME_ERROR;
6868 WL_ERR(("invalid svc hash data or length = %d\n",
6869 cmd_data->svc_hash.dlen));
6870 goto fail;
6871 }
6872 WL_TRACE(("svc hash present, pack it\n"));
6873 ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
6874 WL_NAN_XTLV_CFG_SVC_HASH, WL_NAN_SVC_HASH_LEN,
6875 cache->svc_hash, BCM_XTLV_OPTION_ALIGN32);
6876 if (ret != BCME_OK) {
6877 WL_ERR(("%s: fail to pack WL_NAN_XTLV_CFG_SVC_HASH\n",
6878 __FUNCTION__));
6879 goto fail;
6880 }
6881 #else
6882 ret = BCME_ERROR;
6883 WL_ERR(("invalid svc hash data or length = %d\n",
6884 cmd_data->svc_hash.dlen));
6885 goto fail;
6886 #endif /* WL_NAN_DISC_CACHE */
6887 }
6888 /* If the Data req is for secure data connection */
6889 datareq->flags |= WL_NAN_DP_FLAG_SECURITY;
6890 }
6891
6892 sub_cmd->len += (buflen_avail - nan_buf_size);
6893 nan_buf->is_set = false;
6894 nan_buf->count++;
6895
6896 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, data_size,
6897 &(cmd_data->status), resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
6898 if (unlikely(ret) || unlikely(cmd_data->status)) {
6899 WL_ERR(("nan data path request handler failed, ret = %d,"
6900 " status %d, peer: " MACDBG "\n",
6901 ret, cmd_data->status, MAC2STRDBG(&(cmd_data->mac_addr))));
6902 goto fail;
6903 }
6904
6905 /* check the response buff */
6906 if (ret == BCME_OK) {
6907 ret = process_resp_buf(resp_buf + WL_NAN_OBUF_DATA_OFFSET,
6908 ndp_instance_id, WL_NAN_CMD_DATA_DATAREQ);
6909 cmd_data->ndp_instance_id = *ndp_instance_id;
6910 }
6911 WL_INFORM_MEM(("[NAN] DP request successfull (ndp_id:%d), peer: " MACDBG " \n",
6912 cmd_data->ndp_instance_id, MAC2STRDBG(&cmd_data->mac_addr)));
6913 /* Add peer to data ndp peer list */
6914 wl_cfgnan_data_add_peer(cfg, &datareq->peer_mac);
6915
6916 fail:
6917 if (nan_buf) {
6918 MFREE(cfg->osh, nan_buf, data_size);
6919 }
6920
6921 if (resp_buf) {
6922 MFREE(cfg->osh, resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
6923 }
6924 NAN_MUTEX_UNLOCK();
6925 mutex_unlock(&cfg->if_sync);
6926 NAN_DBG_EXIT();
6927 return ret;
6928 }
6929
6930 int
wl_cfgnan_data_path_response_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_datapath_cmd_data_t * cmd_data)6931 wl_cfgnan_data_path_response_handler(struct net_device *ndev,
6932 struct bcm_cfg80211 *cfg, nan_datapath_cmd_data_t *cmd_data)
6933 {
6934 s32 ret = BCME_OK;
6935 bcm_iov_batch_buf_t *nan_buf = NULL;
6936 wl_nan_dp_resp_t *dataresp = NULL;
6937 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
6938 uint16 buflen_avail;
6939 uint8 *pxtlv;
6940 struct wireless_dev *wdev;
6941 uint16 nan_buf_size;
6942 uint8 *resp_buf = NULL;
6943
6944 /* Considering fixed params */
6945 uint16 data_size = WL_NAN_OBUF_DATA_OFFSET +
6946 OFFSETOF(wl_nan_dp_resp_t, tlv_params);
6947 data_size = ALIGN_SIZE(data_size, 4);
6948 ret = wl_cfgnan_aligned_data_size_of_opt_dp_params(cfg, &data_size, cmd_data);
6949 if (unlikely(ret)) {
6950 WL_ERR(("Failed to get alligned size of optional params\n"));
6951 goto fail;
6952 }
6953 nan_buf_size = data_size;
6954
6955 NAN_DBG_ENTER();
6956
6957 mutex_lock(&cfg->if_sync);
6958 NAN_MUTEX_LOCK();
6959 #ifdef WL_IFACE_MGMT
6960 if ((ret = wl_cfg80211_handle_if_role_conflict(cfg, WL_IF_TYPE_NAN)) < 0) {
6961 WL_ERR(("Conflicting iface found to be active\n"));
6962 ret = BCME_UNSUPPORTED;
6963 goto fail;
6964 }
6965 #endif /* WL_IFACE_MGMT */
6966
6967 nan_buf = MALLOCZ(cfg->osh, data_size);
6968 if (!nan_buf) {
6969 WL_ERR(("%s: memory allocation failed\n", __func__));
6970 ret = BCME_NOMEM;
6971 goto fail;
6972 }
6973
6974 resp_buf = MALLOCZ(cfg->osh, data_size + NAN_IOVAR_NAME_SIZE);
6975 if (!resp_buf) {
6976 WL_ERR(("%s: memory allocation failed\n", __func__));
6977 ret = BCME_NOMEM;
6978 goto fail;
6979 }
6980
6981 ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
6982 cfg, &cmd_data->avail_params, WL_AVAIL_LOCAL);
6983 if (unlikely(ret)) {
6984 WL_ERR(("Failed to set avail value with type local\n"));
6985 goto fail;
6986 }
6987
6988 ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
6989 cfg, &cmd_data->avail_params, WL_AVAIL_NDC);
6990 if (unlikely(ret)) {
6991 WL_ERR(("Failed to set avail value with type ndc\n"));
6992 goto fail;
6993 }
6994
6995 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
6996 nan_buf->count = 0;
6997 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
6998
6999 sub_cmd = (bcm_iov_batch_subcmd_t*)(&nan_buf->cmds[0]);
7000 dataresp = (wl_nan_dp_resp_t *)(sub_cmd->data);
7001
7002 /* Setting default data path type to unicast */
7003 dataresp->type = WL_NAN_DP_TYPE_UNICAST;
7004 /* Changing status value as per fw convention */
7005 dataresp->status = cmd_data->rsp_code ^= 1;
7006 dataresp->reason_code = 0;
7007
7008 /* ndp instance id must be from 1 to 255, 0 is reserved */
7009 if (cmd_data->ndp_instance_id < NAN_ID_MIN ||
7010 cmd_data->ndp_instance_id > NAN_ID_MAX) {
7011 WL_ERR(("Invalid ndp instance id: %d\n", cmd_data->ndp_instance_id));
7012 ret = BCME_BADARG;
7013 goto fail;
7014 }
7015 dataresp->ndp_id = cmd_data->ndp_instance_id;
7016
7017 /* Retrieved initiator ndi from NanDataPathRequestInd */
7018 if (!ETHER_ISNULLADDR(&cfg->nancfg->initiator_ndi.octet)) {
7019 ret = memcpy_s(&dataresp->mac_addr, ETHER_ADDR_LEN,
7020 &cfg->nancfg->initiator_ndi, ETHER_ADDR_LEN);
7021 if (ret != BCME_OK) {
7022 WL_ERR(("Failed to copy initiator ndi\n"));
7023 goto fail;
7024 }
7025 } else {
7026 WL_ERR(("Invalid ether addr retrieved\n"));
7027 ret = BCME_BADARG;
7028 goto fail;
7029 }
7030
7031 /* Interface is not mandatory, when it is a reject from framework */
7032 if (dataresp->status != WL_NAN_DP_STATUS_REJECTED) {
7033 #ifdef RTT_SUPPORT
7034 /* cancel any ongoing RTT session with peer
7035 * as we donot support DP and RNG to same peer
7036 */
7037 wl_cfgnan_handle_dp_ranging_concurrency(cfg, &cmd_data->mac_addr,
7038 RTT_GEO_SUSPN_HOST_NDP_TRIGGER);
7039 #endif /* RTT_SUPPORT */
7040 /* Retrieve mac from given iface name */
7041 wdev = wl_cfg80211_get_wdev_from_ifname(cfg,
7042 (char *)cmd_data->ndp_iface);
7043 if (!wdev || ETHER_ISNULLADDR(wdev->netdev->dev_addr)) {
7044 ret = -EINVAL;
7045 WL_ERR(("Failed to retrieve wdev/dev addr for ndp_iface = %s\n",
7046 (char *)cmd_data->ndp_iface));
7047 goto fail;
7048 }
7049
7050 if (!ETHER_ISNULLADDR(wdev->netdev->dev_addr)) {
7051 ret = memcpy_s(&dataresp->ndi, ETHER_ADDR_LEN,
7052 wdev->netdev->dev_addr, ETHER_ADDR_LEN);
7053 if (ret != BCME_OK) {
7054 WL_ERR(("Failed to copy responder ndi\n"));
7055 goto fail;
7056 }
7057 WL_TRACE(("%s: Retrieved ndi mac " MACDBG "\n",
7058 __FUNCTION__, MAC2STRDBG(dataresp->ndi.octet)));
7059 } else {
7060 WL_ERR(("Invalid NDI addr retrieved\n"));
7061 ret = BCME_BADARG;
7062 goto fail;
7063 }
7064 }
7065
7066 dataresp->ndl_qos.min_slots = NAN_NDL_QOS_MIN_SLOT_NO_PREF;
7067 dataresp->ndl_qos.max_latency = NAN_NDL_QOS_MAX_LAT_NO_PREF;
7068
7069 /* Fill the sub_command block */
7070 sub_cmd->id = htod16(WL_NAN_CMD_DATA_DATARESP);
7071 sub_cmd->len = sizeof(sub_cmd->u.options) +
7072 OFFSETOF(wl_nan_dp_resp_t, tlv_params);
7073 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
7074 pxtlv = (uint8 *)&dataresp->tlv_params;
7075
7076 nan_buf_size -= (sub_cmd->len +
7077 OFFSETOF(bcm_iov_batch_subcmd_t, u.options));
7078 buflen_avail = nan_buf_size;
7079
7080 if (cmd_data->svc_info.data && cmd_data->svc_info.dlen) {
7081 ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
7082 WL_NAN_XTLV_SD_SVC_INFO, cmd_data->svc_info.dlen,
7083 cmd_data->svc_info.data,
7084 BCM_XTLV_OPTION_ALIGN32);
7085 if (ret != BCME_OK) {
7086 WL_ERR(("unable to process svc_spec_info: %d\n", ret));
7087 goto fail;
7088 }
7089 /* If NDPE is enabled, duplicating svc_info and sending it as part of NDPE TLV list
7090 * too along with SD SVC INFO, as FW is considering both of them as different
7091 * entities where as framework is sending both of them in same variable
7092 * (cmd_data->svc_info). FW will decide which one to use based on
7093 * peer's capability (NDPE capable or not)
7094 */
7095 if (cfg->nancfg->ndpe_enabled) {
7096 ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
7097 WL_NAN_XTLV_SD_NDPE_TLV_LIST, cmd_data->svc_info.dlen,
7098 cmd_data->svc_info.data,
7099 BCM_XTLV_OPTION_ALIGN32);
7100 if (ret != BCME_OK) {
7101 WL_ERR(("unable to process NDPE TLV list: %d\n", ret));
7102 goto fail;
7103 }
7104 }
7105 dataresp->flags |= WL_NAN_DP_FLAG_SVC_INFO;
7106 }
7107
7108 /* Security elements */
7109 if (cmd_data->csid) {
7110 WL_TRACE(("Cipher suite type is present, pack it\n"));
7111 ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
7112 WL_NAN_XTLV_CFG_SEC_CSID, sizeof(nan_sec_csid_e),
7113 (uint8*)&cmd_data->csid, BCM_XTLV_OPTION_ALIGN32);
7114 if (unlikely(ret)) {
7115 WL_ERR(("%s: fail to pack csid\n", __FUNCTION__));
7116 goto fail;
7117 }
7118 }
7119
7120 if (cmd_data->ndp_cfg.security_cfg) {
7121 if ((cmd_data->key_type == NAN_SECURITY_KEY_INPUT_PMK) ||
7122 (cmd_data->key_type == NAN_SECURITY_KEY_INPUT_PASSPHRASE)) {
7123 if (cmd_data->key.data && cmd_data->key.dlen) {
7124 WL_TRACE(("optional pmk present, pack it\n"));
7125 ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
7126 WL_NAN_XTLV_CFG_SEC_PMK, cmd_data->key.dlen,
7127 cmd_data->key.data, BCM_XTLV_OPTION_ALIGN32);
7128 if (unlikely(ret)) {
7129 WL_ERR(("%s: fail to pack WL_NAN_XTLV_CFG_SEC_PMK\n",
7130 __FUNCTION__));
7131 goto fail;
7132 }
7133 }
7134 } else {
7135 WL_ERR(("Invalid security key type\n"));
7136 ret = BCME_BADARG;
7137 goto fail;
7138 }
7139
7140 if ((cmd_data->svc_hash.dlen == WL_NAN_SVC_HASH_LEN) &&
7141 (cmd_data->svc_hash.data)) {
7142 WL_TRACE(("svc hash present, pack it\n"));
7143 ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
7144 WL_NAN_XTLV_CFG_SVC_HASH, WL_NAN_SVC_HASH_LEN,
7145 cmd_data->svc_hash.data,
7146 BCM_XTLV_OPTION_ALIGN32);
7147 if (ret != BCME_OK) {
7148 WL_ERR(("%s: fail to pack WL_NAN_XTLV_CFG_SVC_HASH\n",
7149 __FUNCTION__));
7150 goto fail;
7151 }
7152 }
7153 /* If the Data resp is for secure data connection */
7154 dataresp->flags |= WL_NAN_DP_FLAG_SECURITY;
7155 }
7156
7157 sub_cmd->len += (buflen_avail - nan_buf_size);
7158
7159 nan_buf->is_set = false;
7160 nan_buf->count++;
7161 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, data_size,
7162 &(cmd_data->status), resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
7163 if (unlikely(ret) || unlikely(cmd_data->status)) {
7164 WL_ERR(("nan data path response handler failed, error = %d, status %d\n",
7165 ret, cmd_data->status));
7166 goto fail;
7167 }
7168
7169 WL_INFORM_MEM(("[NAN] DP response successfull (ndp_id:%d)\n", dataresp->ndp_id));
7170
7171 fail:
7172 if (nan_buf) {
7173 MFREE(cfg->osh, nan_buf, data_size);
7174 }
7175
7176 if (resp_buf) {
7177 MFREE(cfg->osh, resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
7178 }
7179 NAN_MUTEX_UNLOCK();
7180 mutex_unlock(&cfg->if_sync);
7181
7182 NAN_DBG_EXIT();
7183 return ret;
7184 }
7185
wl_cfgnan_data_path_end_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_data_path_id ndp_instance_id,int * status)7186 int wl_cfgnan_data_path_end_handler(struct net_device *ndev,
7187 struct bcm_cfg80211 *cfg, nan_data_path_id ndp_instance_id,
7188 int *status)
7189 {
7190 bcm_iov_batch_buf_t *nan_buf = NULL;
7191 wl_nan_dp_end_t *dataend = NULL;
7192 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
7193 s32 ret = BCME_OK;
7194 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
7195 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
7196
7197 dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
7198
7199 NAN_DBG_ENTER();
7200 NAN_MUTEX_LOCK();
7201
7202 if (!dhdp->up) {
7203 WL_ERR(("bus is already down, hence blocking nan dp end\n"));
7204 ret = BCME_OK;
7205 goto fail;
7206 }
7207
7208 if (!cfg->nancfg->nan_enable) {
7209 WL_ERR(("nan is not enabled, nan dp end blocked\n"));
7210 ret = BCME_OK;
7211 goto fail;
7212 }
7213
7214 /* ndp instance id must be from 1 to 255, 0 is reserved */
7215 if (ndp_instance_id < NAN_ID_MIN ||
7216 ndp_instance_id > NAN_ID_MAX) {
7217 WL_ERR(("Invalid ndp instance id: %d\n", ndp_instance_id));
7218 ret = BCME_BADARG;
7219 goto fail;
7220 }
7221
7222 nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
7223 if (!nan_buf) {
7224 WL_ERR(("%s: memory allocation failed\n", __func__));
7225 ret = BCME_NOMEM;
7226 goto fail;
7227 }
7228
7229 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
7230 nan_buf->count = 0;
7231 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
7232
7233 sub_cmd = (bcm_iov_batch_subcmd_t*)(&nan_buf->cmds[0]);
7234 dataend = (wl_nan_dp_end_t *)(sub_cmd->data);
7235
7236 /* Fill sub_cmd block */
7237 sub_cmd->id = htod16(WL_NAN_CMD_DATA_DATAEND);
7238 sub_cmd->len = sizeof(sub_cmd->u.options) +
7239 sizeof(*dataend);
7240 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
7241
7242 dataend->lndp_id = ndp_instance_id;
7243
7244 /*
7245 * Currently fw requires ndp_id and reason to end the data path
7246 * But wifi_nan.h takes ndp_instances_count and ndp_id.
7247 * Will keep reason = accept always.
7248 */
7249
7250 dataend->status = 1;
7251
7252 nan_buf->is_set = true;
7253 nan_buf->count++;
7254
7255 nan_buf_size -= (sub_cmd->len +
7256 OFFSETOF(bcm_iov_batch_subcmd_t, u.options));
7257 bzero(resp_buf, sizeof(resp_buf));
7258 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size,
7259 status, (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
7260 if (unlikely(ret) || unlikely(*status)) {
7261 WL_ERR(("nan data path end handler failed, error = %d status %d\n",
7262 ret, *status));
7263 goto fail;
7264 }
7265 WL_INFORM_MEM(("[NAN] DP end successfull (ndp_id:%d)\n",
7266 dataend->lndp_id));
7267 fail:
7268 if (nan_buf) {
7269 MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
7270 }
7271
7272 NAN_MUTEX_UNLOCK();
7273 NAN_DBG_EXIT();
7274 return ret;
7275 }
7276
7277 #ifdef WL_NAN_DISC_CACHE
wl_cfgnan_sec_info_handler(struct bcm_cfg80211 * cfg,nan_datapath_sec_info_cmd_data_t * cmd_data,nan_hal_resp_t * nan_req_resp)7278 int wl_cfgnan_sec_info_handler(struct bcm_cfg80211 *cfg,
7279 nan_datapath_sec_info_cmd_data_t *cmd_data, nan_hal_resp_t *nan_req_resp)
7280 {
7281 s32 ret = BCME_NOTFOUND;
7282 /* check in cache */
7283 nan_disc_result_cache *disc_cache = NULL;
7284 nan_svc_info_t *svc_info = NULL;
7285
7286 NAN_DBG_ENTER();
7287 NAN_MUTEX_LOCK();
7288
7289 if (!cfg->nancfg->nan_init_state) {
7290 WL_ERR(("nan is not initialized/nmi doesnt exists\n"));
7291 ret = BCME_NOTENABLED;
7292 goto fail;
7293 }
7294
7295 /* datapath request context */
7296 if (cmd_data->pub_id && !ETHER_ISNULLADDR(&cmd_data->mac_addr)) {
7297 disc_cache = wl_cfgnan_get_disc_result(cfg,
7298 cmd_data->pub_id, &cmd_data->mac_addr);
7299 WL_DBG(("datapath request: PUB ID: = %d\n",
7300 cmd_data->pub_id));
7301 if (disc_cache) {
7302 (void)memcpy_s(nan_req_resp->svc_hash, WL_NAN_SVC_HASH_LEN,
7303 disc_cache->svc_hash, WL_NAN_SVC_HASH_LEN);
7304 ret = BCME_OK;
7305 } else {
7306 WL_ERR(("disc_cache is NULL\n"));
7307 goto fail;
7308 }
7309 }
7310
7311 /* datapath response context */
7312 if (cmd_data->ndp_instance_id) {
7313 WL_DBG(("datapath response: NDP ID: = %d\n",
7314 cmd_data->ndp_instance_id));
7315 svc_info = wl_cfgnan_get_svc_inst(cfg, 0, cmd_data->ndp_instance_id);
7316 /* Note: svc_info will not be present in OOB cases
7317 * In such case send NMI alone and let HAL handle if
7318 * svc_hash is mandatory
7319 */
7320 if (svc_info) {
7321 WL_DBG(("svc hash present, pack it\n"));
7322 (void)memcpy_s(nan_req_resp->svc_hash, WL_NAN_SVC_HASH_LEN,
7323 svc_info->svc_hash, WL_NAN_SVC_HASH_LEN);
7324 } else {
7325 WL_INFORM_MEM(("svc_info not present..assuming OOB DP\n"));
7326 }
7327 /* Always send NMI */
7328 (void)memcpy_s(nan_req_resp->pub_nmi, ETHER_ADDR_LEN,
7329 cfg->nancfg->nan_nmi_mac, ETHER_ADDR_LEN);
7330 ret = BCME_OK;
7331 }
7332 fail:
7333 NAN_MUTEX_UNLOCK();
7334 NAN_DBG_EXIT();
7335 return ret;
7336 }
7337 #endif /* WL_NAN_DISC_CACHE */
7338
7339 #ifdef RTT_SUPPORT
wl_nan_cache_to_event_data(nan_disc_result_cache * cache,nan_event_data_t * nan_event_data,osl_t * osh)7340 static s32 wl_nan_cache_to_event_data(nan_disc_result_cache *cache,
7341 nan_event_data_t *nan_event_data, osl_t *osh)
7342 {
7343 s32 ret = BCME_OK;
7344 NAN_DBG_ENTER();
7345
7346 nan_event_data->pub_id = cache->pub_id;
7347 nan_event_data->sub_id = cache->sub_id;
7348 nan_event_data->publish_rssi = cache->publish_rssi;
7349 nan_event_data->peer_cipher_suite = cache->peer_cipher_suite;
7350 ret = memcpy_s(&nan_event_data->remote_nmi, ETHER_ADDR_LEN,
7351 &cache->peer, ETHER_ADDR_LEN);
7352 if (ret != BCME_OK) {
7353 WL_ERR(("Failed to copy cached peer nan nmi\n"));
7354 goto fail;
7355 }
7356
7357 if (cache->svc_info.dlen && cache->svc_info.data) {
7358 nan_event_data->svc_info.dlen = cache->svc_info.dlen;
7359 nan_event_data->svc_info.data =
7360 MALLOCZ(osh, nan_event_data->svc_info.dlen);
7361 if (!nan_event_data->svc_info.data) {
7362 WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
7363 nan_event_data->svc_info.dlen = 0;
7364 ret = -ENOMEM;
7365 goto fail;
7366 }
7367 ret = memcpy_s(nan_event_data->svc_info.data, nan_event_data->svc_info.dlen,
7368 cache->svc_info.data, cache->svc_info.dlen);
7369 if (ret != BCME_OK) {
7370 WL_ERR(("Failed to copy cached svc info data\n"));
7371 goto fail;
7372 }
7373 }
7374 if (cache->tx_match_filter.dlen && cache->tx_match_filter.data) {
7375 nan_event_data->tx_match_filter.dlen = cache->tx_match_filter.dlen;
7376 nan_event_data->tx_match_filter.data =
7377 MALLOCZ(osh, nan_event_data->tx_match_filter.dlen);
7378 if (!nan_event_data->tx_match_filter.data) {
7379 WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
7380 nan_event_data->tx_match_filter.dlen = 0;
7381 ret = -ENOMEM;
7382 goto fail;
7383 }
7384 ret = memcpy_s(nan_event_data->tx_match_filter.data,
7385 nan_event_data->tx_match_filter.dlen,
7386 cache->tx_match_filter.data, cache->tx_match_filter.dlen);
7387 if (ret != BCME_OK) {
7388 WL_ERR(("Failed to copy cached tx match filter data\n"));
7389 goto fail;
7390 }
7391 }
7392 fail:
7393 NAN_DBG_EXIT();
7394 return ret;
7395 }
7396
7397 /*
7398 * API to cancel the ranging for given instance
7399 * For geofence initiator, suspend ranging.
7400 * for directed RTT initiator , report fail result, cancel ranging
7401 * and clear ranging instance
7402 * For responder, cancel ranging and clear ranging instance
7403 */
7404 static s32
wl_cfgnan_clear_peer_ranging(struct bcm_cfg80211 * cfg,nan_ranging_inst_t * rng_inst,int reason)7405 wl_cfgnan_clear_peer_ranging(struct bcm_cfg80211 *cfg,
7406 nan_ranging_inst_t *rng_inst, int reason)
7407 {
7408 uint32 status = 0;
7409 int err = BCME_OK;
7410 struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
7411 dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
7412
7413 if (rng_inst->range_type == RTT_TYPE_NAN_GEOFENCE &&
7414 rng_inst->range_role == NAN_RANGING_ROLE_INITIATOR) {
7415 err = wl_cfgnan_suspend_geofence_rng_session(ndev,
7416 &rng_inst->peer_addr, reason, 0);
7417 } else {
7418 if (rng_inst->range_type == RTT_TYPE_NAN_DIRECTED) {
7419 dhd_rtt_handle_nan_rtt_session_end(dhdp,
7420 &rng_inst->peer_addr);
7421 }
7422 /* responder */
7423 err = wl_cfgnan_cancel_ranging(ndev, cfg,
7424 &rng_inst->range_id,
7425 NAN_RNG_TERM_FLAG_IMMEDIATE, &status);
7426 wl_cfgnan_reset_remove_ranging_instance(cfg, rng_inst);
7427 }
7428
7429 if (err) {
7430 WL_ERR(("Failed to stop ranging with peer, err : %d\n", err));
7431 }
7432
7433 return err;
7434 }
7435
7436 /*
7437 * Handle NDP-Ranging Concurrency,
7438 * for incoming DP Reuest
7439 * Cancel Ranging with same peer
7440 * Cancel Ranging for set up in prog
7441 * for all other peers
7442 */
7443 static s32
wl_cfgnan_handle_dp_ranging_concurrency(struct bcm_cfg80211 * cfg,struct ether_addr * peer,int reason)7444 wl_cfgnan_handle_dp_ranging_concurrency(struct bcm_cfg80211 *cfg,
7445 struct ether_addr *peer, int reason)
7446 {
7447 uint8 i = 0;
7448 nan_ranging_inst_t *cur_rng_inst = NULL;
7449 nan_ranging_inst_t *rng_inst = NULL;
7450 int err = BCME_OK;
7451
7452 /*
7453 * FixMe:
7454 * DP Ranging Concurrency will need more
7455 * than what has been addressed till now
7456 * Poll max rng sessions and update it
7457 * take relevant actions accordingly
7458 */
7459
7460 cur_rng_inst = wl_cfgnan_check_for_ranging(cfg, peer);
7461
7462 for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
7463 rng_inst = &cfg->nancfg->nan_ranging_info[i];
7464 if (rng_inst->in_use) {
7465 if ((cur_rng_inst && cur_rng_inst == rng_inst) &&
7466 NAN_RANGING_IS_IN_PROG(rng_inst->range_status)) {
7467 err = wl_cfgnan_clear_peer_ranging(cfg, rng_inst,
7468 RTT_GEO_SUSPN_HOST_NDP_TRIGGER);
7469 }
7470 }
7471 }
7472
7473 if (err) {
7474 WL_ERR(("Failed to handle dp ranging concurrency, err : %d\n", err));
7475 }
7476
7477 return err;
7478 }
7479
7480 bool
wl_cfgnan_check_role_concurrency(struct bcm_cfg80211 * cfg,struct ether_addr * peer_addr)7481 wl_cfgnan_check_role_concurrency(struct bcm_cfg80211 *cfg,
7482 struct ether_addr *peer_addr)
7483 {
7484 nan_ranging_inst_t *rng_inst = NULL;
7485 bool role_conc_status = FALSE;
7486
7487 rng_inst = wl_cfgnan_check_for_ranging(cfg, peer_addr);
7488 if (rng_inst) {
7489 role_conc_status = rng_inst->role_concurrency_status;
7490 }
7491
7492 return role_conc_status;
7493 }
7494 #endif /* RTT_SUPPORT */
7495
7496 static s32
wl_nan_dp_cmn_event_data(struct bcm_cfg80211 * cfg,void * event_data,uint16 data_len,uint16 * tlvs_offset,uint16 * nan_opts_len,uint32 event_num,int * hal_event_id,nan_event_data_t * nan_event_data)7497 wl_nan_dp_cmn_event_data(struct bcm_cfg80211 *cfg, void *event_data,
7498 uint16 data_len, uint16 *tlvs_offset,
7499 uint16 *nan_opts_len, uint32 event_num,
7500 int *hal_event_id, nan_event_data_t *nan_event_data)
7501 {
7502 s32 ret = BCME_OK;
7503 uint8 i;
7504 wl_nan_ev_datapath_cmn_t *ev_dp;
7505 nan_svc_info_t *svc_info;
7506 bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
7507 #ifdef RTT_SUPPORT
7508 nan_ranging_inst_t *rng_inst = NULL;
7509 #endif /* RTT_SUPPORT */
7510
7511 if (xtlv->id == WL_NAN_XTLV_DATA_DP_INFO) {
7512 ev_dp = (wl_nan_ev_datapath_cmn_t *)xtlv->data;
7513 NAN_DBG_ENTER();
7514
7515 BCM_REFERENCE(svc_info);
7516 BCM_REFERENCE(i);
7517 /* Mapping to common struct between DHD and HAL */
7518 WL_TRACE(("Event type: %d\n", ev_dp->type));
7519 nan_event_data->type = ev_dp->type;
7520 WL_TRACE(("pub_id: %d\n", ev_dp->pub_id));
7521 nan_event_data->pub_id = ev_dp->pub_id;
7522 WL_TRACE(("security: %d\n", ev_dp->security));
7523 nan_event_data->security = ev_dp->security;
7524
7525 /* Store initiator_ndi, required for data_path_response_request */
7526 ret = memcpy_s(&cfg->nancfg->initiator_ndi, ETHER_ADDR_LEN,
7527 &ev_dp->initiator_ndi, ETHER_ADDR_LEN);
7528 if (ret != BCME_OK) {
7529 WL_ERR(("Failed to copy event's initiator addr\n"));
7530 goto fail;
7531 }
7532 if (ev_dp->type == NAN_DP_SESSION_UNICAST) {
7533 WL_INFORM_MEM(("NDP ID: %d\n", ev_dp->ndp_id));
7534 nan_event_data->ndp_id = ev_dp->ndp_id;
7535 WL_TRACE(("INITIATOR_NDI: " MACDBG "\n",
7536 MAC2STRDBG(ev_dp->initiator_ndi.octet)));
7537 WL_TRACE(("RESPONDOR_NDI: " MACDBG "\n",
7538 MAC2STRDBG(ev_dp->responder_ndi.octet)));
7539 WL_TRACE(("PEER NMI: " MACDBG "\n",
7540 MAC2STRDBG(ev_dp->peer_nmi.octet)));
7541 ret = memcpy_s(&nan_event_data->remote_nmi, ETHER_ADDR_LEN,
7542 &ev_dp->peer_nmi, ETHER_ADDR_LEN);
7543 if (ret != BCME_OK) {
7544 WL_ERR(("Failed to copy event's peer nmi\n"));
7545 goto fail;
7546 }
7547 } else {
7548 /* type is multicast */
7549 WL_INFORM_MEM(("NDP ID: %d\n", ev_dp->mc_id));
7550 nan_event_data->ndp_id = ev_dp->mc_id;
7551 WL_TRACE(("PEER NMI: " MACDBG "\n",
7552 MAC2STRDBG(ev_dp->peer_nmi.octet)));
7553 ret = memcpy_s(&nan_event_data->remote_nmi, ETHER_ADDR_LEN,
7554 &ev_dp->peer_nmi,
7555 ETHER_ADDR_LEN);
7556 if (ret != BCME_OK) {
7557 WL_ERR(("Failed to copy event's peer nmi\n"));
7558 goto fail;
7559 }
7560 }
7561 *tlvs_offset = OFFSETOF(wl_nan_ev_datapath_cmn_t, opt_tlvs) +
7562 OFFSETOF(bcm_xtlv_t, data);
7563 *nan_opts_len = data_len - *tlvs_offset;
7564 if (event_num == WL_NAN_EVENT_PEER_DATAPATH_IND) {
7565 *hal_event_id = GOOGLE_NAN_EVENT_DATA_REQUEST;
7566 #ifdef WL_NAN_DISC_CACHE
7567 ret = wl_cfgnan_svc_inst_add_ndp(cfg, nan_event_data->pub_id,
7568 nan_event_data->ndp_id);
7569 if (ret != BCME_OK) {
7570 goto fail;
7571 }
7572 #endif /* WL_NAN_DISC_CACHE */
7573 /* Add peer to data ndp peer list */
7574 wl_cfgnan_data_add_peer(cfg, &ev_dp->peer_nmi);
7575 #ifdef RTT_SUPPORT
7576 /* cancel any ongoing RTT session with peer
7577 * as we donot support DP and RNG to same peer
7578 */
7579 wl_cfgnan_handle_dp_ranging_concurrency(cfg, &ev_dp->peer_nmi,
7580 RTT_GEO_SUSPN_PEER_NDP_TRIGGER);
7581 #endif /* RTT_SUPPORT */
7582 } else if (event_num == WL_NAN_EVENT_DATAPATH_ESTB) {
7583 *hal_event_id = GOOGLE_NAN_EVENT_DATA_CONFIRMATION;
7584 if (ev_dp->role == NAN_DP_ROLE_INITIATOR) {
7585 ret = memcpy_s(&nan_event_data->responder_ndi, ETHER_ADDR_LEN,
7586 &ev_dp->responder_ndi,
7587 ETHER_ADDR_LEN);
7588 if (ret != BCME_OK) {
7589 WL_ERR(("Failed to copy event's responder ndi\n"));
7590 goto fail;
7591 }
7592 WL_TRACE(("REMOTE_NDI: " MACDBG "\n",
7593 MAC2STRDBG(ev_dp->responder_ndi.octet)));
7594 WL_TRACE(("Initiator status %d\n", nan_event_data->status));
7595 } else {
7596 ret = memcpy_s(&nan_event_data->responder_ndi, ETHER_ADDR_LEN,
7597 &ev_dp->initiator_ndi,
7598 ETHER_ADDR_LEN);
7599 if (ret != BCME_OK) {
7600 WL_ERR(("Failed to copy event's responder ndi\n"));
7601 goto fail;
7602 }
7603 WL_TRACE(("REMOTE_NDI: " MACDBG "\n",
7604 MAC2STRDBG(ev_dp->initiator_ndi.octet)));
7605 }
7606 if (ev_dp->status == NAN_NDP_STATUS_ACCEPT) {
7607 nan_event_data->status = NAN_DP_REQUEST_ACCEPT;
7608 wl_cfgnan_data_set_peer_dp_state(cfg, &ev_dp->peer_nmi,
7609 NAN_PEER_DP_CONNECTED);
7610 wl_cfgnan_update_dp_info(cfg, true, nan_event_data->ndp_id);
7611 wl_cfgnan_get_stats(cfg);
7612 } else if (ev_dp->status == NAN_NDP_STATUS_REJECT) {
7613 nan_event_data->status = NAN_DP_REQUEST_REJECT;
7614 #ifdef WL_NAN_DISC_CACHE
7615 if (ev_dp->role != NAN_DP_ROLE_INITIATOR) {
7616 /* Only at Responder side,
7617 * If dp is ended,
7618 * clear the resp ndp id from the svc info cache
7619 */
7620 ret = wl_cfgnan_svc_inst_del_ndp(cfg,
7621 nan_event_data->pub_id,
7622 nan_event_data->ndp_id);
7623 if (ret != BCME_OK) {
7624 goto fail;
7625 }
7626 }
7627 #endif /* WL_NAN_DISC_CACHE */
7628 /* Remove peer from data ndp peer list */
7629 wl_cfgnan_data_remove_peer(cfg, &ev_dp->peer_nmi);
7630 #ifdef RTT_SUPPORT
7631 rng_inst = wl_cfgnan_check_for_ranging(cfg, &ev_dp->peer_nmi);
7632 if (rng_inst) {
7633 /* Trigger/Reset geofence RTT */
7634 wl_cfgnan_reset_geofence_ranging(cfg,
7635 rng_inst, RTT_SCHED_DP_REJECTED, TRUE);
7636 }
7637 #endif /* RTT_SUPPORT */
7638 } else {
7639 WL_ERR(("%s:Status code = %x not expected\n",
7640 __FUNCTION__, ev_dp->status));
7641 ret = BCME_ERROR;
7642 goto fail;
7643 }
7644 WL_TRACE(("Responder status %d\n", nan_event_data->status));
7645 } else if (event_num == WL_NAN_EVENT_DATAPATH_END) {
7646 /* Mapping to common struct between DHD and HAL */
7647 *hal_event_id = GOOGLE_NAN_EVENT_DATA_END;
7648 #ifdef WL_NAN_DISC_CACHE
7649 if (ev_dp->role != NAN_DP_ROLE_INITIATOR) {
7650 /* Only at Responder side,
7651 * If dp is ended,
7652 * clear the resp ndp id from the svc info cache
7653 */
7654 ret = wl_cfgnan_svc_inst_del_ndp(cfg,
7655 nan_event_data->pub_id,
7656 nan_event_data->ndp_id);
7657 if (ret != BCME_OK) {
7658 goto fail;
7659 }
7660 }
7661 #endif /* WL_NAN_DISC_CACHE */
7662 /* Remove peer from data ndp peer list */
7663 wl_cfgnan_data_remove_peer(cfg, &ev_dp->peer_nmi);
7664 wl_cfgnan_update_dp_info(cfg, false, nan_event_data->ndp_id);
7665 WL_INFORM_MEM(("DP_END for REMOTE_NMI: " MACDBG " with %s\n",
7666 MAC2STRDBG(&ev_dp->peer_nmi),
7667 nan_event_cause_to_str(ev_dp->event_cause)));
7668 #ifdef RTT_SUPPORT
7669 rng_inst = wl_cfgnan_check_for_ranging(cfg, &ev_dp->peer_nmi);
7670 if (rng_inst) {
7671 /* Trigger/Reset geofence RTT */
7672 WL_INFORM_MEM(("sched geofence rtt from DP_END ctx: " MACDBG "\n",
7673 MAC2STRDBG(&rng_inst->peer_addr)));
7674 wl_cfgnan_reset_geofence_ranging(cfg, rng_inst,
7675 RTT_SCHED_DP_END, TRUE);
7676 }
7677 #endif /* RTT_SUPPORT */
7678 }
7679 } else {
7680 /* Follow though, not handling other IDs as of now */
7681 WL_DBG(("%s:ID = 0x%02x not supported\n", __FUNCTION__, xtlv->id));
7682 }
7683 fail:
7684 NAN_DBG_EXIT();
7685 return ret;
7686 }
7687
7688 #ifdef RTT_SUPPORT
7689 static int
wl_cfgnan_event_disc_result(struct bcm_cfg80211 * cfg,nan_event_data_t * nan_event_data)7690 wl_cfgnan_event_disc_result(struct bcm_cfg80211 *cfg,
7691 nan_event_data_t *nan_event_data)
7692 {
7693 int ret = BCME_OK;
7694 #if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT)
7695 ret = wl_cfgvendor_send_nan_event(cfg->wdev->wiphy, bcmcfg_to_prmry_ndev(cfg),
7696 GOOGLE_NAN_EVENT_SUBSCRIBE_MATCH, nan_event_data);
7697 if (ret != BCME_OK) {
7698 WL_ERR(("Failed to send event to nan hal\n"));
7699 }
7700 #endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT) */
7701 return ret;
7702 }
7703
7704 #define IN_GEOFENCE(ingress, egress, distance) (((distance) <= (ingress)) && \
7705 ((distance) >= (egress)))
7706 #define IS_INGRESS_VAL(ingress, distance) ((distance) < (ingress))
7707 #define IS_EGRESS_VAL(egress, distance) ((distance) > (egress))
7708
7709 static bool
wl_cfgnan_check_ranging_cond(nan_svc_info_t * svc_info,uint32 distance,uint8 * ranging_ind,uint32 prev_distance)7710 wl_cfgnan_check_ranging_cond(nan_svc_info_t *svc_info, uint32 distance,
7711 uint8 *ranging_ind, uint32 prev_distance)
7712 {
7713 uint8 svc_ind = svc_info->ranging_ind;
7714 bool notify = FALSE;
7715 bool range_rep_ev_once =
7716 !!(svc_info->svc_range_status & SVC_RANGE_REP_EVENT_ONCE);
7717 uint32 ingress_limit = svc_info->ingress_limit;
7718 uint32 egress_limit = svc_info->egress_limit;
7719
7720 if (svc_ind & NAN_RANGE_INDICATION_CONT) {
7721 *ranging_ind = NAN_RANGE_INDICATION_CONT;
7722 notify = TRUE;
7723 WL_ERR(("\n%s :Svc has continous Ind %d\n",
7724 __FUNCTION__, __LINE__));
7725 goto done;
7726 }
7727
7728 if (svc_ind == (NAN_RANGE_INDICATION_INGRESS |
7729 NAN_RANGE_INDICATION_EGRESS)) {
7730 if (IN_GEOFENCE(ingress_limit, egress_limit, distance)) {
7731 /* if not already in geofence */
7732 if ((range_rep_ev_once == FALSE) ||
7733 (!IN_GEOFENCE(ingress_limit, egress_limit,
7734 prev_distance))) {
7735 notify = TRUE;
7736 if (distance > prev_distance) {
7737 *ranging_ind = NAN_RANGE_INDICATION_EGRESS;
7738 } else {
7739 *ranging_ind = NAN_RANGE_INDICATION_INGRESS;
7740 }
7741 WL_ERR(("\n%s :Svc has geofence Ind %d res_ind %d\n",
7742 __FUNCTION__, __LINE__, *ranging_ind));
7743 }
7744 }
7745 goto done;
7746 }
7747
7748 if (svc_ind == NAN_RANGE_INDICATION_INGRESS) {
7749 if (IS_INGRESS_VAL(ingress_limit, distance)) {
7750 if ((range_rep_ev_once == FALSE) ||
7751 (prev_distance == INVALID_DISTANCE) ||
7752 !IS_INGRESS_VAL(ingress_limit, prev_distance)) {
7753 notify = TRUE;
7754 *ranging_ind = NAN_RANGE_INDICATION_INGRESS;
7755 WL_ERR(("\n%s :Svc has ingress Ind %d\n",
7756 __FUNCTION__, __LINE__));
7757 }
7758 }
7759 goto done;
7760 }
7761
7762 if (svc_ind == NAN_RANGE_INDICATION_EGRESS) {
7763 if (IS_EGRESS_VAL(egress_limit, distance)) {
7764 if ((range_rep_ev_once == FALSE) ||
7765 (prev_distance == INVALID_DISTANCE) ||
7766 !IS_EGRESS_VAL(egress_limit, prev_distance)) {
7767 notify = TRUE;
7768 *ranging_ind = NAN_RANGE_INDICATION_EGRESS;
7769 WL_ERR(("\n%s :Svc has egress Ind %d\n",
7770 __FUNCTION__, __LINE__));
7771 }
7772 }
7773 goto done;
7774 }
7775 done:
7776 WL_INFORM_MEM(("SVC ranging Ind %d distance %d prev_distance %d, "
7777 "range_rep_ev_once %d ingress_limit %d egress_limit %d notify %d\n",
7778 svc_ind, distance, prev_distance, range_rep_ev_once,
7779 ingress_limit, egress_limit, notify));
7780 svc_info->svc_range_status |= SVC_RANGE_REP_EVENT_ONCE;
7781 return notify;
7782 }
7783
7784 static int32
wl_cfgnan_notify_disc_with_ranging(struct bcm_cfg80211 * cfg,nan_ranging_inst_t * rng_inst,nan_event_data_t * nan_event_data,uint32 distance)7785 wl_cfgnan_notify_disc_with_ranging(struct bcm_cfg80211 *cfg,
7786 nan_ranging_inst_t *rng_inst, nan_event_data_t *nan_event_data, uint32 distance)
7787 {
7788 nan_svc_info_t *svc_info;
7789 bool notify_svc = TRUE;
7790 nan_disc_result_cache *disc_res = cfg->nancfg->nan_disc_cache;
7791 uint8 ranging_ind = 0;
7792 int ret = BCME_OK;
7793 int i = 0, j = 0;
7794 uint8 result_present = nan_event_data->ranging_result_present;
7795
7796 for (i = 0; i < MAX_SUBSCRIBES; i++) {
7797 svc_info = rng_inst->svc_idx[i];
7798 if (svc_info && svc_info->ranging_required) {
7799 /* if ranging_result is present notify disc result if
7800 * result satisfies the conditions.
7801 * if ranging_result is not present, then notify disc
7802 * result with out ranging info.
7803 */
7804 if (result_present) {
7805 notify_svc = wl_cfgnan_check_ranging_cond(svc_info, distance,
7806 &ranging_ind, rng_inst->prev_distance_mm);
7807 nan_event_data->ranging_ind = ranging_ind;
7808 }
7809 WL_DBG(("Ranging notify for svc_id %d, notify %d and ind %d"
7810 " distance_mm %d result_present %d\n", svc_info->svc_id, notify_svc,
7811 ranging_ind, distance, result_present));
7812 } else {
7813 continue;
7814 }
7815 if (notify_svc) {
7816 for (j = 0; j < NAN_MAX_CACHE_DISC_RESULT; j++) {
7817 if (!memcmp(&disc_res[j].peer,
7818 &(rng_inst->peer_addr), ETHER_ADDR_LEN) &&
7819 (svc_info->svc_id == disc_res[j].sub_id)) {
7820 ret = wl_nan_cache_to_event_data(&disc_res[j],
7821 nan_event_data, cfg->osh);
7822 ret = wl_cfgnan_event_disc_result(cfg, nan_event_data);
7823 /* If its not match once, clear it as the FW indicates
7824 * again.
7825 */
7826 if (!(svc_info->flags & WL_NAN_MATCH_ONCE)) {
7827 wl_cfgnan_remove_disc_result(cfg, svc_info->svc_id);
7828 }
7829 }
7830 }
7831 }
7832 }
7833 WL_DBG(("notify_disc_with_ranging done ret %d\n", ret));
7834 return ret;
7835 }
7836
7837 static int32
wl_cfgnan_handle_directed_rtt_report(struct bcm_cfg80211 * cfg,nan_ranging_inst_t * rng_inst)7838 wl_cfgnan_handle_directed_rtt_report(struct bcm_cfg80211 *cfg,
7839 nan_ranging_inst_t *rng_inst)
7840 {
7841 int ret = BCME_OK;
7842 uint32 status;
7843 dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
7844
7845 ret = wl_cfgnan_cancel_ranging(bcmcfg_to_prmry_ndev(cfg), cfg,
7846 &rng_inst->range_id, NAN_RNG_TERM_FLAG_IMMEDIATE, &status);
7847 if (unlikely(ret) || unlikely(status)) {
7848 WL_ERR(("nan range cancel failed ret = %d status = %d\n", ret, status));
7849 }
7850 dhd_rtt_handle_nan_rtt_session_end(dhd, &rng_inst->peer_addr);
7851 dhd_rtt_nan_update_directed_sessions_cnt(dhd, FALSE);
7852
7853 wl_cfgnan_reset_remove_ranging_instance(cfg, rng_inst);
7854
7855 WL_DBG(("Ongoing ranging session is cancelled \n"));
7856 return ret;
7857 }
7858
7859 static void
wl_cfgnan_disc_result_on_geofence_cancel(struct bcm_cfg80211 * cfg,nan_ranging_inst_t * rng_inst)7860 wl_cfgnan_disc_result_on_geofence_cancel(struct bcm_cfg80211 *cfg,
7861 nan_ranging_inst_t *rng_inst)
7862 {
7863 nan_event_data_t *nan_event_data = NULL;
7864
7865 nan_event_data = MALLOCZ(cfg->osh, sizeof(*nan_event_data));
7866 if (!nan_event_data) {
7867 WL_ERR(("%s: memory allocation failed\n", __func__));
7868 goto exit;
7869 }
7870
7871 wl_cfgnan_notify_disc_with_ranging(cfg, rng_inst, nan_event_data, 0);
7872
7873 exit:
7874 wl_cfgnan_clear_nan_event_data(cfg, nan_event_data);
7875
7876 return;
7877 }
7878
7879 void
wl_cfgnan_process_range_report(struct bcm_cfg80211 * cfg,wl_nan_ev_rng_rpt_ind_t * range_res,int status)7880 wl_cfgnan_process_range_report(struct bcm_cfg80211 *cfg,
7881 wl_nan_ev_rng_rpt_ind_t *range_res, int status)
7882 {
7883 nan_ranging_inst_t *rng_inst = NULL;
7884 nan_event_data_t nan_event_data;
7885 dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
7886
7887 UNUSED_PARAMETER(nan_event_data);
7888 rng_inst = wl_cfgnan_check_for_ranging(cfg, &range_res->peer_m_addr);
7889 if (!rng_inst) {
7890 WL_ERR(("No ranging instance but received RNG RPT event..check \n"));
7891 goto exit;
7892 }
7893
7894 if (rng_inst->range_status != NAN_RANGING_SESSION_IN_PROGRESS) {
7895 WL_ERR(("SSN not in prog but received RNG RPT event..ignore \n"));
7896 goto exit;
7897 }
7898
7899 #ifdef NAN_RTT_DBG
7900 DUMP_NAN_RTT_INST(rng_inst);
7901 DUMP_NAN_RTT_RPT(range_res);
7902 #endif
7903 range_res->rng_id = rng_inst->range_id;
7904 bzero(&nan_event_data, sizeof(nan_event_data));
7905
7906 if (status == BCME_OK) {
7907 nan_event_data.ranging_result_present = 1;
7908 nan_event_data.range_measurement_cm = range_res->dist_mm;
7909 nan_event_data.ranging_ind = range_res->indication;
7910 }
7911
7912 (void)memcpy_s(&nan_event_data.remote_nmi, ETHER_ADDR_LEN,
7913 &range_res->peer_m_addr, ETHER_ADDR_LEN);
7914
7915 if (rng_inst->range_type == RTT_TYPE_NAN_GEOFENCE) {
7916 /* check in cache and event match to host */
7917 wl_cfgnan_notify_disc_with_ranging(cfg, rng_inst, &nan_event_data,
7918 range_res->dist_mm);
7919 rng_inst->prev_distance_mm = range_res->dist_mm;
7920 /* Reset geof retry count on valid measurement */
7921 rng_inst->geof_retry_count = 0;
7922 /*
7923 * Suspend and trigger other targets,
7924 * if running sessions maxed out and more
7925 * pending targets waiting for trigger
7926 */
7927 if (dhd_rtt_geofence_sessions_maxed_out(dhd) &&
7928 (dhd_rtt_get_geofence_target_cnt(dhd) >=
7929 dhd_rtt_get_geofence_max_sessions(dhd))) {
7930 /*
7931 * Update the target idx first, before suspending current target
7932 * or else current target will become eligible again
7933 * and will get scheduled again on reset ranging
7934 */
7935 wl_cfgnan_update_geofence_target_idx(cfg);
7936 wl_cfgnan_suspend_geofence_rng_session(bcmcfg_to_prmry_ndev(cfg),
7937 &rng_inst->peer_addr, RTT_GEO_SUSPN_RANGE_RES_REPORTED, 0);
7938 }
7939 wl_cfgnan_reset_geofence_ranging(cfg,
7940 rng_inst, RTT_SCHED_RNG_RPT_GEOFENCE, TRUE);
7941
7942 } else if (rng_inst->range_type == RTT_TYPE_NAN_DIRECTED) {
7943 wl_cfgnan_handle_directed_rtt_report(cfg, rng_inst);
7944 }
7945 rng_inst->ftm_ssn_retry_count = 0;
7946
7947 exit:
7948 return;
7949 }
7950 #endif /* RTT_SUPPORT */
7951
7952 static void
wl_nan_print_status(wl_nan_conf_status_t * nstatus)7953 wl_nan_print_status(wl_nan_conf_status_t *nstatus)
7954 {
7955 WL_INFORM_MEM(("> NMI: " MACDBG " Cluster_ID: " MACDBG "\n",
7956 MAC2STRDBG(nstatus->nmi.octet),
7957 MAC2STRDBG(nstatus->cid.octet)));
7958
7959 WL_INFORM_MEM(("> NAN Device Role %s\n", nan_role_to_str(nstatus->role)));
7960 WL_INFORM_MEM(("> Social channels: %d, %d\n",
7961 nstatus->social_chans[0], nstatus->social_chans[1]));
7962
7963 WL_INFORM_MEM(("> Master_rank: " NMRSTR " AMR : " NMRSTR " Hop Count : %d, AMBTT : %d\n",
7964 NMR2STR(nstatus->mr),
7965 NMR2STR(nstatus->amr),
7966 nstatus->hop_count,
7967 nstatus->ambtt));
7968
7969 WL_INFORM_MEM(("> Cluster TSF_H: %x , Cluster TSF_L: %x\n",
7970 nstatus->cluster_tsf_h, nstatus->cluster_tsf_l));
7971 }
7972
7973 static void
wl_cfgnan_clear_nan_event_data(struct bcm_cfg80211 * cfg,nan_event_data_t * nan_event_data)7974 wl_cfgnan_clear_nan_event_data(struct bcm_cfg80211 *cfg,
7975 nan_event_data_t *nan_event_data)
7976 {
7977 if (nan_event_data) {
7978 if (nan_event_data->tx_match_filter.data) {
7979 MFREE(cfg->osh, nan_event_data->tx_match_filter.data,
7980 nan_event_data->tx_match_filter.dlen);
7981 nan_event_data->tx_match_filter.data = NULL;
7982 }
7983 if (nan_event_data->rx_match_filter.data) {
7984 MFREE(cfg->osh, nan_event_data->rx_match_filter.data,
7985 nan_event_data->rx_match_filter.dlen);
7986 nan_event_data->rx_match_filter.data = NULL;
7987 }
7988 if (nan_event_data->svc_info.data) {
7989 MFREE(cfg->osh, nan_event_data->svc_info.data,
7990 nan_event_data->svc_info.dlen);
7991 nan_event_data->svc_info.data = NULL;
7992 }
7993 if (nan_event_data->sde_svc_info.data) {
7994 MFREE(cfg->osh, nan_event_data->sde_svc_info.data,
7995 nan_event_data->sde_svc_info.dlen);
7996 nan_event_data->sde_svc_info.data = NULL;
7997 }
7998 MFREE(cfg->osh, nan_event_data, sizeof(*nan_event_data));
7999 }
8000
8001 }
8002
8003 #ifdef RTT_SUPPORT
8004 bool
wl_cfgnan_update_geofence_target_idx(struct bcm_cfg80211 * cfg)8005 wl_cfgnan_update_geofence_target_idx(struct bcm_cfg80211 *cfg)
8006 {
8007 int8 i = 0, target_cnt = 0;
8008 int8 cur_idx = DHD_RTT_INVALID_TARGET_INDEX;
8009 rtt_geofence_target_info_t *geofence_target_info = NULL;
8010 bool found = false;
8011 nan_ranging_inst_t *rng_inst = NULL;
8012 dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
8013 rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
8014
8015 target_cnt = dhd_rtt_get_geofence_target_cnt(dhd);
8016 ASSERT(target_cnt);
8017 if (target_cnt == 0) {
8018 WL_DBG(("No geofence targets to schedule\n"));
8019 dhd_rtt_set_geofence_cur_target_idx(dhd,
8020 DHD_RTT_INVALID_TARGET_INDEX);
8021 goto exit;
8022 }
8023
8024 /* cur idx is validated too, in the following API */
8025 cur_idx = dhd_rtt_get_geofence_cur_target_idx(dhd);
8026 if (cur_idx == DHD_RTT_INVALID_TARGET_INDEX) {
8027 WL_DBG(("invalid current target index, start looking from first\n"));
8028 cur_idx = 0;
8029 }
8030
8031 geofence_target_info = rtt_status->geofence_cfg.geofence_target_info;
8032
8033 /* Loop through to find eligible target idx */
8034 i = cur_idx;
8035 do {
8036 if (geofence_target_info[i].valid == TRUE) {
8037 rng_inst = wl_cfgnan_check_for_ranging(cfg,
8038 &geofence_target_info[i].peer_addr);
8039 if (rng_inst &&
8040 (!NAN_RANGING_IS_IN_PROG(rng_inst->range_status)) &&
8041 (!wl_cfgnan_check_role_concurrency(cfg,
8042 &rng_inst->peer_addr))) {
8043 found = TRUE;
8044 break;
8045 }
8046 }
8047 i++;
8048 if (i == target_cnt) {
8049 i = 0;
8050 }
8051 } while (i != cur_idx);
8052
8053 if (found) {
8054 dhd_rtt_set_geofence_cur_target_idx(dhd, i);
8055 WL_DBG(("Updated cur index, cur_idx = %d, target_cnt = %d\n",
8056 i, target_cnt));
8057 } else {
8058 dhd_rtt_set_geofence_cur_target_idx(dhd,
8059 DHD_RTT_INVALID_TARGET_INDEX);
8060 WL_DBG(("Invalidated cur_idx, as either no target present, or all "
8061 "target already running, target_cnt = %d\n", target_cnt));
8062
8063 }
8064
8065 exit:
8066 return found;
8067 }
8068
8069 /*
8070 * Triggers rtt work thread
8071 * if set up not in prog already
8072 * and max sessions not maxed out,
8073 * after setting next eligible target index
8074 */
8075 void
wl_cfgnan_reset_geofence_ranging(struct bcm_cfg80211 * cfg,nan_ranging_inst_t * rng_inst,int sched_reason,bool need_rtt_mutex)8076 wl_cfgnan_reset_geofence_ranging(struct bcm_cfg80211 *cfg,
8077 nan_ranging_inst_t * rng_inst, int sched_reason,
8078 bool need_rtt_mutex)
8079 {
8080 dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
8081 u8 rtt_invalid_reason = RTT_STATE_VALID;
8082 rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
8083 int8 target_cnt = 0;
8084 int reset_req_drop = 0;
8085
8086 if (need_rtt_mutex == TRUE) {
8087 mutex_lock(&rtt_status->rtt_mutex);
8088 }
8089
8090 WL_INFORM_MEM(("wl_cfgnan_reset_geofence_ranging: "
8091 "sched_reason = %d, cur_idx = %d, target_cnt = %d\n",
8092 sched_reason, rtt_status->geofence_cfg.cur_target_idx,
8093 rtt_status->geofence_cfg.geofence_target_cnt));
8094
8095 if (rtt_status->rtt_sched == TRUE) {
8096 reset_req_drop = 1;
8097 goto exit;
8098 }
8099
8100 target_cnt = dhd_rtt_get_geofence_target_cnt(dhd);
8101 if (target_cnt == 0) {
8102 WL_DBG(("No geofence targets to schedule\n"));
8103 /*
8104 * FIXME:
8105 * No Geofence target
8106 * Remove all valid ranging inst
8107 */
8108 if (rng_inst) {
8109 WL_INFORM_MEM(("Removing Ranging Instance " MACDBG "\n",
8110 MAC2STRDBG(&(rng_inst->peer_addr))));
8111 bzero(rng_inst, sizeof(*rng_inst));
8112 }
8113 /* Cancel pending retry timer if any */
8114 if (delayed_work_pending(&rtt_status->rtt_retry_timer)) {
8115 cancel_delayed_work(&rtt_status->rtt_retry_timer);
8116 }
8117
8118 /* invalidate current index as there are no targets */
8119 dhd_rtt_set_geofence_cur_target_idx(dhd,
8120 DHD_RTT_INVALID_TARGET_INDEX);
8121 reset_req_drop = 2;
8122 goto exit;
8123 }
8124
8125 if (dhd_rtt_is_geofence_setup_inprog(dhd)) {
8126 /* Will be called again for schedule once lock is removed */
8127 reset_req_drop = 3;
8128 goto exit;
8129 }
8130
8131 /* Avoid schedule if
8132 * already geofence running
8133 * or Directed RTT in progress
8134 * or Invalid RTT state like
8135 * NDP with Peer
8136 */
8137 if ((!RTT_IS_STOPPED(rtt_status)) ||
8138 (rtt_invalid_reason != RTT_STATE_VALID)) {
8139 /* Not in valid RTT state, avoid schedule */
8140 reset_req_drop = 4;
8141 goto exit;
8142 }
8143
8144 if (dhd_rtt_geofence_sessions_maxed_out(dhd)) {
8145 reset_req_drop = 5;
8146 goto exit;
8147 }
8148
8149 if (!wl_cfgnan_update_geofence_target_idx(cfg)) {
8150 reset_req_drop = 6;
8151 goto exit;
8152 }
8153
8154 /*
8155 * FixMe: Retry geofence target over a timer Logic
8156 * to be brought back later again
8157 * in accordance to new multipeer implementation
8158 */
8159
8160 /* schedule RTT */
8161 dhd_rtt_schedule_rtt_work_thread(dhd, sched_reason);
8162
8163 exit:
8164 if (reset_req_drop) {
8165 WL_INFORM_MEM(("reset geofence req dropped, reason = %d\n",
8166 reset_req_drop));
8167 }
8168 if (need_rtt_mutex == TRUE) {
8169 mutex_unlock(&rtt_status->rtt_mutex);
8170 }
8171 return;
8172 }
8173
8174 void
wl_cfgnan_reset_geofence_ranging_for_cur_target(dhd_pub_t * dhd,int sched_reason)8175 wl_cfgnan_reset_geofence_ranging_for_cur_target(dhd_pub_t *dhd, int sched_reason)
8176 {
8177 struct net_device *dev = dhd_linux_get_primary_netdev(dhd);
8178 struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
8179 rtt_geofence_target_info_t *geofence_target = NULL;
8180 nan_ranging_inst_t *ranging_inst = NULL;
8181
8182 geofence_target = dhd_rtt_get_geofence_current_target(dhd);
8183 if (!geofence_target) {
8184 WL_DBG(("reset ranging request dropped: geofence target null\n"));
8185 goto exit;
8186 }
8187
8188 ranging_inst = wl_cfgnan_check_for_ranging(cfg,
8189 &geofence_target->peer_addr);
8190 if (!ranging_inst) {
8191 WL_DBG(("reset ranging request dropped: ranging instance null\n"));
8192 goto exit;
8193 }
8194
8195 if (NAN_RANGING_IS_IN_PROG(ranging_inst->range_status) &&
8196 (ranging_inst->range_type == RTT_TYPE_NAN_GEOFENCE)) {
8197 WL_DBG(("Ranging is already in progress for Current target "
8198 MACDBG " \n", MAC2STRDBG(&ranging_inst->peer_addr)));
8199 goto exit;
8200 }
8201
8202 wl_cfgnan_reset_geofence_ranging(cfg, ranging_inst, sched_reason, TRUE);
8203
8204 exit:
8205 return;
8206 }
8207
8208 static bool
wl_cfgnan_geofence_retry_check(nan_ranging_inst_t * rng_inst,uint8 reason_code)8209 wl_cfgnan_geofence_retry_check(nan_ranging_inst_t *rng_inst, uint8 reason_code)
8210 {
8211 bool geof_retry = FALSE;
8212
8213 switch (reason_code) {
8214 case NAN_RNG_TERM_IDLE_TIMEOUT:
8215 /* Fallthrough: Keep adding more reason code if needed */
8216 case NAN_RNG_TERM_RNG_RESP_TIMEOUT:
8217 case NAN_RNG_TERM_RNG_RESP_REJ:
8218 case NAN_RNG_TERM_RNG_TXS_FAIL:
8219 if (rng_inst->geof_retry_count <
8220 NAN_RNG_GEOFENCE_MAX_RETRY_CNT) {
8221 rng_inst->geof_retry_count++;
8222 geof_retry = TRUE;
8223 }
8224 break;
8225 default:
8226 /* FALSE for any other case */
8227 break;
8228 }
8229
8230 return geof_retry;
8231 }
8232 #endif /* RTT_SUPPORT */
8233
8234 s32
wl_cfgnan_notify_nan_status(struct bcm_cfg80211 * cfg,bcm_struct_cfgdev * cfgdev,const wl_event_msg_t * event,void * event_data)8235 wl_cfgnan_notify_nan_status(struct bcm_cfg80211 *cfg,
8236 bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *event, void *event_data)
8237 {
8238 uint16 data_len;
8239 uint32 event_num;
8240 s32 event_type;
8241 int hal_event_id = 0;
8242 nan_event_data_t *nan_event_data = NULL;
8243 nan_parse_event_ctx_t nan_event_ctx;
8244 uint16 tlvs_offset = 0;
8245 uint16 nan_opts_len = 0;
8246 uint8 *tlv_buf;
8247 s32 ret = BCME_OK;
8248 bcm_xtlv_opts_t xtlv_opt = BCM_IOV_CMD_OPT_ALIGN32;
8249 uint32 status;
8250 nan_svc_info_t *svc;
8251 #ifdef RTT_SUPPORT
8252 dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
8253 rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
8254 UNUSED_PARAMETER(dhd);
8255 UNUSED_PARAMETER(rtt_status);
8256 if (rtt_status == NULL) {
8257 return -EINVAL;
8258 }
8259 #endif /* RTT_SUPPORT */
8260
8261 UNUSED_PARAMETER(wl_nan_print_status);
8262 UNUSED_PARAMETER(status);
8263 NAN_DBG_ENTER();
8264
8265 if (!event || !event_data) {
8266 WL_ERR(("event data is NULL\n"));
8267 return -EINVAL;
8268 }
8269
8270 event_type = ntoh32(event->event_type);
8271 event_num = ntoh32(event->reason);
8272 data_len = ntoh32(event->datalen);
8273
8274 #ifdef RTT_SUPPORT
8275 if (event_num == WL_NAN_EVENT_RNG_REQ_IND)
8276 {
8277 /* Flush any RTT work to avoid any
8278 * inconsistencies & ensure RNG REQ
8279 * is handling in a stable RTT state.
8280 * Note new RTT work can be enqueued from
8281 * a. host command context - synchronized over rtt_mutex & state
8282 * b. event context - event processing is synchronized/serialised
8283 */
8284 flush_work(&rtt_status->work);
8285 }
8286 #endif /* RTT_SUPPORT */
8287
8288 NAN_MUTEX_LOCK();
8289
8290 if (NAN_INVALID_EVENT(event_num)) {
8291 WL_ERR(("unsupported event, num: %d, event type: %d\n", event_num, event_type));
8292 ret = -EINVAL;
8293 goto exit;
8294 }
8295
8296 WL_DBG((">> Nan Event Received: %s (num=%d, len=%d)\n",
8297 nan_event_to_str(event_num), event_num, data_len));
8298
8299 #ifdef WL_NAN_DEBUG
8300 prhex("nan_event_data:", event_data, data_len);
8301 #endif /* WL_NAN_DEBUG */
8302
8303 if (!cfg->nancfg->nan_init_state) {
8304 WL_ERR(("nan is not in initialized state, dropping nan related events\n"));
8305 ret = BCME_OK;
8306 goto exit;
8307 }
8308
8309 nan_event_data = MALLOCZ(cfg->osh, sizeof(*nan_event_data));
8310 if (!nan_event_data) {
8311 WL_ERR(("%s: memory allocation failed\n", __func__));
8312 goto exit;
8313 }
8314
8315 nan_event_ctx.cfg = cfg;
8316 nan_event_ctx.nan_evt_data = nan_event_data;
8317 /*
8318 * send as preformatted hex string
8319 * EVENT_NAN <event_type> <tlv_hex_string>
8320 */
8321 switch (event_num) {
8322 case WL_NAN_EVENT_START:
8323 case WL_NAN_EVENT_MERGE:
8324 case WL_NAN_EVENT_ROLE: {
8325 /* get nan status info as-is */
8326 bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
8327 wl_nan_conf_status_t *nstatus = (wl_nan_conf_status_t *)xtlv->data;
8328 WL_INFORM_MEM((">> Nan Mac Event Received: %s (num=%d, len=%d)\n",
8329 nan_event_to_str(event_num), event_num, data_len));
8330 WL_INFORM_MEM(("Nan Device Role %s\n", nan_role_to_str(nstatus->role)));
8331 /* Mapping to common struct between DHD and HAL */
8332 nan_event_data->enabled = nstatus->enabled;
8333 ret = memcpy_s(&nan_event_data->local_nmi, ETHER_ADDR_LEN,
8334 &nstatus->nmi, ETHER_ADDR_LEN);
8335 if (ret != BCME_OK) {
8336 WL_ERR(("Failed to copy nmi\n"));
8337 goto exit;
8338 }
8339 ret = memcpy_s(&nan_event_data->clus_id, ETHER_ADDR_LEN,
8340 &nstatus->cid, ETHER_ADDR_LEN);
8341 if (ret != BCME_OK) {
8342 WL_ERR(("Failed to copy cluster id\n"));
8343 goto exit;
8344 }
8345 nan_event_data->nan_de_evt_type = event_num;
8346 if (event_num == WL_NAN_EVENT_ROLE) {
8347 wl_nan_print_status(nstatus);
8348 }
8349
8350 if (event_num == WL_NAN_EVENT_START) {
8351 OSL_SMP_WMB();
8352 cfg->nancfg->nan_event_recvd = true;
8353 OSL_SMP_WMB();
8354 wake_up(&cfg->nancfg->nan_event_wait);
8355 }
8356 hal_event_id = GOOGLE_NAN_EVENT_DE_EVENT;
8357 break;
8358 }
8359 case WL_NAN_EVENT_TERMINATED: {
8360 bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
8361 wl_nan_ev_terminated_t *pev = (wl_nan_ev_terminated_t *)xtlv->data;
8362
8363 /* Mapping to common struct between DHD and HAL */
8364 WL_TRACE(("Instance ID: %d\n", pev->instance_id));
8365 nan_event_data->local_inst_id = pev->instance_id;
8366 WL_TRACE(("Service Type: %d\n", pev->svctype));
8367
8368 #ifdef WL_NAN_DISC_CACHE
8369 wl_cfgnan_clear_svc_cache(cfg, pev->instance_id);
8370 /* if we have to store disc_res even after sub_cancel
8371 * donot call below api..but need to device on the criteria to expire
8372 */
8373 if (pev->svctype == NAN_SC_SUBSCRIBE) {
8374 wl_cfgnan_remove_disc_result(cfg, pev->instance_id);
8375 }
8376 #endif /* WL_NAN_DISC_CACHE */
8377 /* Mapping reason code of FW to status code of framework */
8378 if (pev->reason == NAN_TERM_REASON_TIMEOUT ||
8379 pev->reason == NAN_TERM_REASON_USER_REQ ||
8380 pev->reason == NAN_TERM_REASON_COUNT_REACHED) {
8381 nan_event_data->status = NAN_STATUS_SUCCESS;
8382 ret = memcpy_s(nan_event_data->nan_reason,
8383 sizeof(nan_event_data->nan_reason),
8384 "NAN_STATUS_SUCCESS",
8385 strlen("NAN_STATUS_SUCCESS"));
8386 if (ret != BCME_OK) {
8387 WL_ERR(("Failed to copy nan_reason\n"));
8388 goto exit;
8389 }
8390 } else {
8391 nan_event_data->status = NAN_STATUS_INTERNAL_FAILURE;
8392 ret = memcpy_s(nan_event_data->nan_reason,
8393 sizeof(nan_event_data->nan_reason),
8394 "NAN_STATUS_INTERNAL_FAILURE",
8395 strlen("NAN_STATUS_INTERNAL_FAILURE"));
8396 if (ret != BCME_OK) {
8397 WL_ERR(("Failed to copy nan_reason\n"));
8398 goto exit;
8399 }
8400 }
8401
8402 if (pev->svctype == NAN_SC_SUBSCRIBE) {
8403 hal_event_id = GOOGLE_NAN_EVENT_SUBSCRIBE_TERMINATED;
8404 } else {
8405 hal_event_id = GOOGLE_NAN_EVENT_PUBLISH_TERMINATED;
8406 }
8407 #ifdef WL_NAN_DISC_CACHE
8408 #ifdef RTT_SUPPORT
8409 if (pev->reason != NAN_TERM_REASON_USER_REQ) {
8410 wl_cfgnan_clear_svc_from_all_ranging_inst(cfg, pev->instance_id);
8411 /* terminate ranging sessions */
8412 wl_cfgnan_terminate_all_obsolete_ranging_sessions(cfg);
8413 }
8414 #endif /* RTT_SUPPORT */
8415 #endif /* WL_NAN_DISC_CACHE */
8416 break;
8417 }
8418
8419 case WL_NAN_EVENT_RECEIVE: {
8420 nan_opts_len = data_len;
8421 hal_event_id = GOOGLE_NAN_EVENT_FOLLOWUP;
8422 xtlv_opt = BCM_IOV_CMD_OPT_ALIGN_NONE;
8423 break;
8424 }
8425
8426 case WL_NAN_EVENT_TXS: {
8427 bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
8428 wl_nan_event_txs_t *txs = (wl_nan_event_txs_t *)xtlv->data;
8429 wl_nan_event_sd_txs_t *txs_sd = NULL;
8430 if (txs->status == WL_NAN_TXS_SUCCESS) {
8431 WL_INFORM_MEM(("TXS success for type %s(%d) token %d\n",
8432 nan_frm_type_to_str(txs->type), txs->type, txs->host_seq));
8433 nan_event_data->status = NAN_STATUS_SUCCESS;
8434 ret = memcpy_s(nan_event_data->nan_reason,
8435 sizeof(nan_event_data->nan_reason),
8436 "NAN_STATUS_SUCCESS",
8437 strlen("NAN_STATUS_SUCCESS"));
8438 if (ret != BCME_OK) {
8439 WL_ERR(("Failed to copy nan_reason\n"));
8440 goto exit;
8441 }
8442 } else {
8443 /* TODO : populate status based on reason codes
8444 For now adding it as no ACK, so that app/framework can retry
8445 */
8446 WL_INFORM_MEM(("TXS failed for type %s(%d) status %d token %d\n",
8447 nan_frm_type_to_str(txs->type), txs->type, txs->status,
8448 txs->host_seq));
8449 nan_event_data->status = NAN_STATUS_NO_OTA_ACK;
8450 ret = memcpy_s(nan_event_data->nan_reason,
8451 sizeof(nan_event_data->nan_reason),
8452 "NAN_STATUS_NO_OTA_ACK",
8453 strlen("NAN_STATUS_NO_OTA_ACK"));
8454 if (ret != BCME_OK) {
8455 WL_ERR(("Failed to copy nan_reason\n"));
8456 goto exit;
8457 }
8458 }
8459 nan_event_data->reason = txs->reason_code;
8460 nan_event_data->token = txs->host_seq;
8461 if (txs->type == WL_NAN_FRM_TYPE_FOLLOWUP) {
8462 hal_event_id = GOOGLE_NAN_EVENT_TRANSMIT_FOLLOWUP_IND;
8463 xtlv = (bcm_xtlv_t *)(txs->opt_tlvs);
8464 if (txs->opt_tlvs_len && xtlv->id == WL_NAN_XTLV_SD_TXS) {
8465 txs_sd = (wl_nan_event_sd_txs_t*)xtlv->data;
8466 nan_event_data->local_inst_id = txs_sd->inst_id;
8467 } else {
8468 WL_ERR(("Invalid params in TX status for trasnmit followup"));
8469 ret = -EINVAL;
8470 goto exit;
8471 }
8472 #ifdef RTT_SUPPORT
8473 } else if (txs->type == WL_NAN_FRM_TYPE_RNG_RESP) {
8474 xtlv = (bcm_xtlv_t *)(txs->opt_tlvs);
8475 if (txs->opt_tlvs_len && xtlv->id == WL_NAN_XTLV_RNG_TXS) {
8476 wl_nan_range_txs_t* txs_rng_resp = (wl_nan_range_txs_t*)xtlv->data;
8477 nan_ranging_inst_t *rng_inst =
8478 wl_cfgnan_get_rng_inst_by_id(cfg, txs_rng_resp->range_id);
8479 if (rng_inst &&
8480 NAN_RANGING_SETUP_IS_IN_PROG(rng_inst->range_status)) {
8481 /* Unset ranging set up in progress */
8482 dhd_rtt_update_geofence_sessions_cnt(dhd, FALSE,
8483 &rng_inst->peer_addr);
8484 if (txs->status == WL_NAN_TXS_SUCCESS) {
8485 /* range set up is over, move range in progress */
8486 rng_inst->range_status =
8487 NAN_RANGING_SESSION_IN_PROGRESS;
8488 /* Increment geofence session count */
8489 dhd_rtt_update_geofence_sessions_cnt(dhd,
8490 TRUE, NULL);
8491 WL_DBG(("Txs for range resp, rng_id = %d\n",
8492 rng_inst->range_id));
8493 } else {
8494 wl_cfgnan_reset_remove_ranging_instance(cfg,
8495 rng_inst);
8496 }
8497 }
8498 } else {
8499 WL_ERR(("Invalid params in TX status for range response"));
8500 ret = -EINVAL;
8501 goto exit;
8502 }
8503 #endif /* RTT_SUPPORT */
8504 } else { /* TODO: add for other frame types if required */
8505 ret = -EINVAL;
8506 goto exit;
8507 }
8508 break;
8509 }
8510
8511 case WL_NAN_EVENT_DISCOVERY_RESULT: {
8512 nan_opts_len = data_len;
8513 hal_event_id = GOOGLE_NAN_EVENT_SUBSCRIBE_MATCH;
8514 xtlv_opt = BCM_IOV_CMD_OPT_ALIGN_NONE;
8515 break;
8516 }
8517 #ifdef WL_NAN_DISC_CACHE
8518 case WL_NAN_EVENT_DISC_CACHE_TIMEOUT: {
8519 bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
8520 wl_nan_ev_disc_cache_timeout_t *cache_data =
8521 (wl_nan_ev_disc_cache_timeout_t *)xtlv->data;
8522 wl_nan_disc_expired_cache_entry_t *cache_entry = NULL;
8523 uint16 xtlv_len = xtlv->len;
8524 uint8 entry_idx = 0;
8525
8526 if (xtlv->id == WL_NAN_XTLV_SD_DISC_CACHE_TIMEOUT) {
8527 xtlv_len = xtlv_len -
8528 OFFSETOF(wl_nan_ev_disc_cache_timeout_t, cache_exp_list);
8529 while ((entry_idx < cache_data->count) &&
8530 (xtlv_len >= sizeof(*cache_entry))) {
8531 cache_entry = &cache_data->cache_exp_list[entry_idx];
8532 /* Handle ranging cases for cache timeout */
8533 WL_INFORM_MEM(("WL_NAN_EVENT_DISC_CACHE_TIMEOUT peer: " MACDBG
8534 " l_id:%d r_id:%d\n", MAC2STRDBG(&cache_entry->r_nmi_addr),
8535 cache_entry->l_sub_id, cache_entry->r_pub_id));
8536 #ifdef RTT_SUPPORT
8537 wl_cfgnan_ranging_clear_publish(cfg, &cache_entry->r_nmi_addr,
8538 cache_entry->l_sub_id);
8539 #endif /* RTT_SUPPORT */
8540 /* Invalidate local cache info */
8541 wl_cfgnan_remove_disc_result(cfg, cache_entry->l_sub_id);
8542 xtlv_len = xtlv_len - sizeof(*cache_entry);
8543 entry_idx++;
8544 }
8545 }
8546 break;
8547 }
8548 #ifdef RTT_SUPPORT
8549 case WL_NAN_EVENT_RNG_REQ_IND: {
8550 wl_nan_ev_rng_req_ind_t *rng_ind;
8551 bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
8552
8553 nan_opts_len = data_len;
8554 rng_ind = (wl_nan_ev_rng_req_ind_t *)xtlv->data;
8555 xtlv_opt = BCM_IOV_CMD_OPT_ALIGN_NONE;
8556 WL_INFORM_MEM(("Received WL_NAN_EVENT_RNG_REQ_IND range_id %d"
8557 " peer:" MACDBG "\n", rng_ind->rng_id,
8558 MAC2STRDBG(&rng_ind->peer_m_addr)));
8559 ret = wl_cfgnan_handle_ranging_ind(cfg, rng_ind);
8560 /* no need to event to HAL */
8561 goto exit;
8562 }
8563
8564 case WL_NAN_EVENT_RNG_TERM_IND: {
8565 bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
8566 nan_ranging_inst_t *rng_inst;
8567 wl_nan_ev_rng_term_ind_t *range_term = (wl_nan_ev_rng_term_ind_t *)xtlv->data;
8568 int rng_sched_reason = 0;
8569 int8 index = -1;
8570 rtt_geofence_target_info_t* geofence_target;
8571 BCM_REFERENCE(dhd);
8572 WL_INFORM_MEM(("Received WL_NAN_EVENT_RNG_TERM_IND peer: " MACDBG ", "
8573 " Range ID:%d Reason Code:%d\n", MAC2STRDBG(&range_term->peer_m_addr),
8574 range_term->rng_id, range_term->reason_code));
8575 rng_inst = wl_cfgnan_get_rng_inst_by_id(cfg, range_term->rng_id);
8576 if (rng_inst) {
8577 if (!NAN_RANGING_IS_IN_PROG(rng_inst->range_status)) {
8578 WL_DBG(("Late or unsynchronized nan term indicator event\n"));
8579 break;
8580 }
8581 rng_sched_reason = RTT_SCHED_RNG_TERM;
8582 if (rng_inst->range_role == NAN_RANGING_ROLE_RESPONDER) {
8583 dhd_rtt_update_geofence_sessions_cnt(dhd, FALSE,
8584 &rng_inst->peer_addr);
8585 wl_cfgnan_reset_remove_ranging_instance(cfg, rng_inst);
8586 } else {
8587 if (rng_inst->range_type == RTT_TYPE_NAN_DIRECTED) {
8588 dhd_rtt_handle_nan_rtt_session_end(dhd,
8589 &rng_inst->peer_addr);
8590 if (dhd_rtt_nan_is_directed_setup_in_prog_with_peer(dhd,
8591 &rng_inst->peer_addr)) {
8592 dhd_rtt_nan_update_directed_setup_inprog(dhd,
8593 NULL, FALSE);
8594 } else {
8595 dhd_rtt_nan_update_directed_sessions_cnt(dhd,
8596 FALSE);
8597 }
8598 } else if (rng_inst->range_type == RTT_TYPE_NAN_GEOFENCE) {
8599 rng_inst->range_status = NAN_RANGING_REQUIRED;
8600 dhd_rtt_update_geofence_sessions_cnt(dhd, FALSE,
8601 &rng_inst->peer_addr);
8602 if (!wl_cfgnan_geofence_retry_check(rng_inst,
8603 range_term->reason_code)) {
8604 /* Report on ranging failure */
8605 wl_cfgnan_disc_result_on_geofence_cancel(cfg,
8606 rng_inst);
8607 WL_TRACE(("Reset the state on terminate\n"));
8608 geofence_target = dhd_rtt_get_geofence_target(dhd,
8609 &rng_inst->peer_addr, &index);
8610 if (geofence_target) {
8611 dhd_rtt_remove_geofence_target(dhd,
8612 &geofence_target->peer_addr);
8613 }
8614 }
8615 }
8616 }
8617 /* Reset Ranging Instance and trigger ranging if applicable */
8618 wl_cfgnan_reset_geofence_ranging(cfg, rng_inst, rng_sched_reason, TRUE);
8619 } else {
8620 /*
8621 * This can happen in some scenarios
8622 * like receiving term after a fail txs for range resp
8623 * where ranging instance is already cleared
8624 */
8625 WL_DBG(("Term Indication recieved for a peer without rng inst\n"));
8626 }
8627 break;
8628 }
8629
8630 case WL_NAN_EVENT_RNG_RESP_IND: {
8631 bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
8632 nan_ranging_inst_t *rng_inst;
8633 wl_nan_ev_rng_resp_t *range_resp = (wl_nan_ev_rng_resp_t *)xtlv->data;
8634
8635 WL_INFORM_MEM(("Received WL_NAN_EVENT_RNG_RESP_IND peer: " MACDBG ", "
8636 " Range ID:%d Ranging Status:%d\n", MAC2STRDBG(&range_resp->peer_m_addr),
8637 range_resp->rng_id, range_resp->status));
8638 rng_inst = wl_cfgnan_get_rng_inst_by_id(cfg, range_resp->rng_id);
8639 if (!rng_inst) {
8640 WL_DBG(("Late or unsynchronized resp indicator event\n"));
8641 break;
8642 }
8643 //ASSERT(NAN_RANGING_SETUP_IS_IN_PROG(rng_inst->range_status));
8644 if (!NAN_RANGING_SETUP_IS_IN_PROG(rng_inst->range_status)) {
8645 WL_INFORM_MEM(("Resp Indicator received for not in prog range inst\n"));
8646 break;
8647 }
8648 /* range set up is over now, move to range in progress */
8649 rng_inst->range_status = NAN_RANGING_SESSION_IN_PROGRESS;
8650 if (rng_inst->range_type == RTT_TYPE_NAN_DIRECTED) {
8651 /* FixMe: Ideally, all below like update session cnt
8652 * should be appilicabe to nan rtt and not specific to
8653 * geofence. To be fixed in next RB
8654 */
8655 dhd_rtt_nan_update_directed_setup_inprog(dhd, NULL, FALSE);
8656 /*
8657 * Increase session count here,
8658 * failure status is followed by Term Ind
8659 * and handled accordingly
8660 */
8661 dhd_rtt_nan_update_directed_sessions_cnt(dhd, TRUE);
8662 /*
8663 * If pending targets to be triggered,
8664 * and max sessions, not running already,
8665 * schedule next target for RTT
8666 */
8667 if ((!dhd_rtt_nan_all_directed_sessions_triggered(dhd)) &&
8668 dhd_rtt_nan_directed_sessions_allowed(dhd)) {
8669 /* Find and set next directed target */
8670 dhd_rtt_set_next_target_idx(dhd,
8671 (dhd_rtt_get_cur_target_idx(dhd) + 1));
8672 /* schedule RTT */
8673 dhd_rtt_schedule_rtt_work_thread(dhd,
8674 RTT_SCHED_RNG_RESP_IND);
8675 }
8676 break;
8677 }
8678 /*
8679 ASSERT(dhd_rtt_is_geofence_setup_inprog_with_peer(dhd,
8680 &rng_inst->peer_addr));
8681 */
8682 if (!dhd_rtt_is_geofence_setup_inprog_with_peer(dhd,
8683 &rng_inst->peer_addr)) {
8684 WL_INFORM_MEM(("Resp Indicator received for not in prog range peer\n"));
8685 break;
8686 }
8687 /* Unset geof ranging setup status */
8688 dhd_rtt_update_geofence_sessions_cnt(dhd, FALSE, &rng_inst->peer_addr);
8689 /* Increase geofence session count */
8690 dhd_rtt_update_geofence_sessions_cnt(dhd, TRUE, NULL);
8691 wl_cfgnan_reset_geofence_ranging(cfg,
8692 rng_inst, RTT_SCHED_RNG_RESP_IND, TRUE);
8693 break;
8694 }
8695 #endif /* RTT_SUPPORT */
8696 #endif /* WL_NAN_DISC_CACHE */
8697 /*
8698 * Data path events data are received in common event struct,
8699 * Handling all the events as part of one case, hence fall through is intentional
8700 */
8701 case WL_NAN_EVENT_PEER_DATAPATH_IND:
8702 case WL_NAN_EVENT_DATAPATH_ESTB:
8703 case WL_NAN_EVENT_DATAPATH_END: {
8704 ret = wl_nan_dp_cmn_event_data(cfg, event_data, data_len,
8705 &tlvs_offset, &nan_opts_len,
8706 event_num, &hal_event_id, nan_event_data);
8707 /* Avoiding optional param parsing for DP END Event */
8708 if (event_num == WL_NAN_EVENT_DATAPATH_END) {
8709 nan_opts_len = 0;
8710 xtlv_opt = BCM_IOV_CMD_OPT_ALIGN_NONE;
8711 }
8712 if (unlikely(ret)) {
8713 WL_ERR(("nan dp common event data parse failed\n"));
8714 goto exit;
8715 }
8716 break;
8717 }
8718 case WL_NAN_EVENT_PEER_DATAPATH_RESP:
8719 {
8720 /* No action -intentionally added to avoid prints when this event is rcvd */
8721 break;
8722 }
8723 default:
8724 WL_ERR_RLMT(("WARNING: unimplemented NAN APP EVENT = %d\n", event_num));
8725 ret = BCME_ERROR;
8726 goto exit;
8727 }
8728
8729 if (nan_opts_len) {
8730 tlv_buf = (uint8 *)event_data + tlvs_offset;
8731 /* Extract event data tlvs and pass their resp to cb fn */
8732 ret = bcm_unpack_xtlv_buf((void *)&nan_event_ctx, (const uint8*)tlv_buf,
8733 nan_opts_len, xtlv_opt, wl_cfgnan_set_vars_cbfn);
8734 if (ret != BCME_OK) {
8735 WL_ERR(("Failed to unpack tlv data, ret=%d\n", ret));
8736 }
8737 }
8738
8739 #ifdef WL_NAN_DISC_CACHE
8740 if (hal_event_id == GOOGLE_NAN_EVENT_SUBSCRIBE_MATCH) {
8741 #ifdef RTT_SUPPORT
8742 bool send_disc_result;
8743 #endif /* RTT_SUPPORT */
8744 u16 update_flags = 0;
8745
8746 WL_TRACE(("Cache disc res\n"));
8747 ret = wl_cfgnan_cache_disc_result(cfg, nan_event_data, &update_flags);
8748 if (ret) {
8749 WL_ERR(("Failed to cache disc result ret %d\n", ret));
8750 }
8751 #ifdef RTT_SUPPORT
8752 if (nan_event_data->sde_control_flag & NAN_SDE_CF_RANGING_REQUIRED) {
8753 ret = wl_cfgnan_check_disc_result_for_ranging(cfg,
8754 nan_event_data, &send_disc_result);
8755 if ((ret == BCME_OK) && (send_disc_result == FALSE)) {
8756 /* Avoid sending disc result instantly and exit */
8757 goto exit;
8758 } else {
8759 /* TODO: should we terminate service if ranging fails ? */
8760 WL_INFORM_MEM(("Ranging failed or not required, " MACDBG
8761 " sub_id:%d , pub_id:%d, ret = %d, send_disc_result = %d\n",
8762 MAC2STRDBG(&nan_event_data->remote_nmi),
8763 nan_event_data->sub_id, nan_event_data->pub_id,
8764 ret, send_disc_result));
8765 }
8766 } else {
8767 nan_svc_info_t *svc_info = wl_cfgnan_get_svc_inst(cfg,
8768 nan_event_data->sub_id, 0);
8769 if (svc_info && svc_info->ranging_required &&
8770 (update_flags & NAN_DISC_CACHE_PARAM_SDE_CONTROL)) {
8771 wl_cfgnan_ranging_clear_publish(cfg,
8772 &nan_event_data->remote_nmi, nan_event_data->sub_id);
8773 }
8774 }
8775 #endif /* RTT_SUPPORT */
8776
8777 /*
8778 * If tx match filter is present as part of active subscribe, keep same filter
8779 * values in discovery results also.
8780 */
8781 if (nan_event_data->sub_id == nan_event_data->requestor_id) {
8782 svc = wl_cfgnan_get_svc_inst(cfg, nan_event_data->sub_id, 0);
8783 if (svc && svc->tx_match_filter_len) {
8784 nan_event_data->tx_match_filter.dlen = svc->tx_match_filter_len;
8785 nan_event_data->tx_match_filter.data =
8786 MALLOCZ(cfg->osh, svc->tx_match_filter_len);
8787 if (!nan_event_data->tx_match_filter.data) {
8788 WL_ERR(("%s: tx_match_filter_data alloc failed\n",
8789 __FUNCTION__));
8790 nan_event_data->tx_match_filter.dlen = 0;
8791 ret = -ENOMEM;
8792 goto exit;
8793 }
8794 ret = memcpy_s(nan_event_data->tx_match_filter.data,
8795 nan_event_data->tx_match_filter.dlen,
8796 svc->tx_match_filter, svc->tx_match_filter_len);
8797 if (ret != BCME_OK) {
8798 WL_ERR(("Failed to copy tx match filter data\n"));
8799 goto exit;
8800 }
8801 }
8802 }
8803 }
8804 #endif /* WL_NAN_DISC_CACHE */
8805
8806 WL_TRACE(("Send up %s (%d) data to HAL, hal_event_id=%d\n",
8807 nan_event_to_str(event_num), event_num, hal_event_id));
8808 #if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT)
8809 ret = wl_cfgvendor_send_nan_event(cfg->wdev->wiphy, bcmcfg_to_prmry_ndev(cfg),
8810 hal_event_id, nan_event_data);
8811 if (ret != BCME_OK) {
8812 WL_ERR(("Failed to send event to nan hal, %s (%d)\n",
8813 nan_event_to_str(event_num), event_num));
8814 }
8815 #endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT) */
8816
8817 exit:
8818 wl_cfgnan_clear_nan_event_data(cfg, nan_event_data);
8819
8820 NAN_MUTEX_UNLOCK();
8821 NAN_DBG_EXIT();
8822 return ret;
8823 }
8824
8825 #ifdef WL_NAN_DISC_CACHE
8826 static int
wl_cfgnan_cache_disc_result(struct bcm_cfg80211 * cfg,void * data,u16 * disc_cache_update_flags)8827 wl_cfgnan_cache_disc_result(struct bcm_cfg80211 *cfg, void * data,
8828 u16 *disc_cache_update_flags)
8829 {
8830 nan_event_data_t* disc = (nan_event_data_t*)data;
8831 int i, add_index = 0;
8832 int ret = BCME_OK;
8833 wl_nancfg_t *nancfg = cfg->nancfg;
8834 nan_disc_result_cache *disc_res = nancfg->nan_disc_cache;
8835 *disc_cache_update_flags = 0;
8836
8837 if (!nancfg->nan_enable) {
8838 WL_DBG(("nan not enabled"));
8839 return BCME_NOTENABLED;
8840 }
8841 if (nancfg->nan_disc_count == NAN_MAX_CACHE_DISC_RESULT) {
8842 WL_DBG(("cache full"));
8843 ret = BCME_NORESOURCE;
8844 goto done;
8845 }
8846
8847 for (i = 0; i < NAN_MAX_CACHE_DISC_RESULT; i++) {
8848 if (!disc_res[i].valid) {
8849 add_index = i;
8850 continue;
8851 }
8852 if (!memcmp(&disc_res[i].peer, &disc->remote_nmi, ETHER_ADDR_LEN) &&
8853 !memcmp(disc_res[i].svc_hash, disc->svc_name, WL_NAN_SVC_HASH_LEN)) {
8854 WL_DBG(("cache entry already present, i = %d", i));
8855 /* Update needed parameters here */
8856 if (disc_res[i].sde_control_flag != disc->sde_control_flag) {
8857 disc_res[i].sde_control_flag = disc->sde_control_flag;
8858 *disc_cache_update_flags |= NAN_DISC_CACHE_PARAM_SDE_CONTROL;
8859 }
8860 ret = BCME_OK; /* entry already present */
8861 goto done;
8862 }
8863 }
8864 WL_DBG(("adding cache entry: add_index = %d\n", add_index));
8865 disc_res[add_index].valid = 1;
8866 disc_res[add_index].pub_id = disc->pub_id;
8867 disc_res[add_index].sub_id = disc->sub_id;
8868 disc_res[add_index].publish_rssi = disc->publish_rssi;
8869 disc_res[add_index].peer_cipher_suite = disc->peer_cipher_suite;
8870 disc_res[add_index].sde_control_flag = disc->sde_control_flag;
8871 ret = memcpy_s(&disc_res[add_index].peer, ETHER_ADDR_LEN,
8872 &disc->remote_nmi, ETHER_ADDR_LEN);
8873 if (ret != BCME_OK) {
8874 WL_ERR(("Failed to copy remote nmi\n"));
8875 goto done;
8876 }
8877 ret = memcpy_s(disc_res[add_index].svc_hash, WL_NAN_SVC_HASH_LEN,
8878 disc->svc_name, WL_NAN_SVC_HASH_LEN);
8879 if (ret != BCME_OK) {
8880 WL_ERR(("Failed to copy svc hash\n"));
8881 goto done;
8882 }
8883
8884 if (disc->svc_info.dlen && disc->svc_info.data) {
8885 disc_res[add_index].svc_info.dlen = disc->svc_info.dlen;
8886 disc_res[add_index].svc_info.data =
8887 MALLOCZ(cfg->osh, disc_res[add_index].svc_info.dlen);
8888 if (!disc_res[add_index].svc_info.data) {
8889 WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
8890 disc_res[add_index].svc_info.dlen = 0;
8891 ret = BCME_NOMEM;
8892 goto done;
8893 }
8894 ret = memcpy_s(disc_res[add_index].svc_info.data, disc_res[add_index].svc_info.dlen,
8895 disc->svc_info.data, disc->svc_info.dlen);
8896 if (ret != BCME_OK) {
8897 WL_ERR(("Failed to copy svc info\n"));
8898 goto done;
8899 }
8900 }
8901 if (disc->tx_match_filter.dlen && disc->tx_match_filter.data) {
8902 disc_res[add_index].tx_match_filter.dlen = disc->tx_match_filter.dlen;
8903 disc_res[add_index].tx_match_filter.data =
8904 MALLOCZ(cfg->osh, disc_res[add_index].tx_match_filter.dlen);
8905 if (!disc_res[add_index].tx_match_filter.data) {
8906 WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
8907 disc_res[add_index].tx_match_filter.dlen = 0;
8908 ret = BCME_NOMEM;
8909 goto done;
8910 }
8911 ret = memcpy_s(disc_res[add_index].tx_match_filter.data,
8912 disc_res[add_index].tx_match_filter.dlen,
8913 disc->tx_match_filter.data, disc->tx_match_filter.dlen);
8914 if (ret != BCME_OK) {
8915 WL_ERR(("Failed to copy tx match filter\n"));
8916 goto done;
8917 }
8918 }
8919 nancfg->nan_disc_count++;
8920 WL_DBG(("cfg->nan_disc_count = %d\n", nancfg->nan_disc_count));
8921
8922 done:
8923 return ret;
8924 }
8925
8926 #ifdef RTT_SUPPORT
8927 /* Sending command to FW for clearing discovery cache info in FW */
8928 static int
wl_cfgnan_clear_disc_cache(struct bcm_cfg80211 * cfg,wl_nan_instance_id_t sub_id)8929 wl_cfgnan_clear_disc_cache(struct bcm_cfg80211 *cfg, wl_nan_instance_id_t sub_id)
8930 {
8931 s32 ret = BCME_OK;
8932 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
8933 uint32 status;
8934 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
8935 uint8 buf[NAN_IOCTL_BUF_SIZE];
8936 bcm_iov_batch_buf_t *nan_buf;
8937 bcm_iov_batch_subcmd_t *sub_cmd;
8938 uint16 subcmd_len;
8939
8940 bzero(buf, sizeof(buf));
8941 nan_buf = (bcm_iov_batch_buf_t*)buf;
8942
8943 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
8944 nan_buf->count = 0;
8945 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
8946
8947 sub_cmd = (bcm_iov_batch_subcmd_t *)(&nan_buf->cmds[0]);
8948 ret = wl_cfg_nan_check_cmd_len(nan_buf_size,
8949 sizeof(sub_id), &subcmd_len);
8950 if (unlikely(ret)) {
8951 WL_ERR(("nan_sub_cmd check failed\n"));
8952 goto fail;
8953 }
8954
8955 /* Fill the sub_command block */
8956 sub_cmd->id = htod16(WL_NAN_CMD_SD_DISC_CACHE_CLEAR);
8957 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(sub_id);
8958 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
8959 /* Data size len vs buffer len check is already done above.
8960 * So, short buffer error is impossible.
8961 */
8962 (void)memcpy_s(sub_cmd->data, (nan_buf_size - OFFSETOF(bcm_iov_batch_subcmd_t, data)),
8963 &sub_id, sizeof(sub_id));
8964 /* adjust iov data len to the end of last data record */
8965 nan_buf_size -= (subcmd_len);
8966
8967 nan_buf->count++;
8968 nan_buf->is_set = true;
8969 nan_buf_size = NAN_IOCTL_BUF_SIZE - nan_buf_size;
8970 /* Same src and dest len here */
8971 bzero(resp_buf, sizeof(resp_buf));
8972 ret = wl_cfgnan_execute_ioctl(bcmcfg_to_prmry_ndev(cfg), cfg,
8973 nan_buf, nan_buf_size, &status,
8974 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
8975 if (unlikely(ret) || unlikely(status)) {
8976 WL_ERR(("Disc cache clear handler failed ret %d status %d\n",
8977 ret, status));
8978 goto fail;
8979 }
8980
8981 fail:
8982 return ret;
8983 }
8984 #endif /* RTT_SUPPORT */
8985
wl_cfgnan_remove_disc_result(struct bcm_cfg80211 * cfg,uint8 local_subid)8986 static int wl_cfgnan_remove_disc_result(struct bcm_cfg80211 *cfg,
8987 uint8 local_subid)
8988 {
8989 int i;
8990 int ret = BCME_NOTFOUND;
8991 nan_disc_result_cache *disc_res = cfg->nancfg->nan_disc_cache;
8992 if (!cfg->nancfg->nan_enable) {
8993 WL_DBG(("nan not enabled\n"));
8994 ret = BCME_NOTENABLED;
8995 goto done;
8996 }
8997 for (i = 0; i < NAN_MAX_CACHE_DISC_RESULT; i++) {
8998 if ((disc_res[i].valid) && (disc_res[i].sub_id == local_subid)) {
8999 WL_TRACE(("make cache entry invalid\n"));
9000 if (disc_res[i].tx_match_filter.data) {
9001 MFREE(cfg->osh, disc_res[i].tx_match_filter.data,
9002 disc_res[i].tx_match_filter.dlen);
9003 }
9004 if (disc_res[i].svc_info.data) {
9005 MFREE(cfg->osh, disc_res[i].svc_info.data,
9006 disc_res[i].svc_info.dlen);
9007 }
9008 bzero(&disc_res[i], sizeof(disc_res[i]));
9009 cfg->nancfg->nan_disc_count--;
9010 ret = BCME_OK;
9011 }
9012 }
9013 WL_DBG(("couldn't find entry\n"));
9014 done:
9015 return ret;
9016 }
9017
9018 static nan_disc_result_cache *
wl_cfgnan_get_disc_result(struct bcm_cfg80211 * cfg,uint8 remote_pubid,struct ether_addr * peer)9019 wl_cfgnan_get_disc_result(struct bcm_cfg80211 *cfg, uint8 remote_pubid,
9020 struct ether_addr *peer)
9021 {
9022 int i;
9023 nan_disc_result_cache *disc_res = cfg->nancfg->nan_disc_cache;
9024 if (remote_pubid) {
9025 for (i = 0; i < NAN_MAX_CACHE_DISC_RESULT; i++) {
9026 if ((disc_res[i].pub_id == remote_pubid) &&
9027 !memcmp(&disc_res[i].peer, peer, ETHER_ADDR_LEN)) {
9028 WL_DBG(("Found entry: i = %d\n", i));
9029 return &disc_res[i];
9030 }
9031 }
9032 } else {
9033 for (i = 0; i < NAN_MAX_CACHE_DISC_RESULT; i++) {
9034 if (!memcmp(&disc_res[i].peer, peer, ETHER_ADDR_LEN)) {
9035 WL_DBG(("Found entry: %d\n", i));
9036 return &disc_res[i];
9037 }
9038 }
9039 }
9040 return NULL;
9041 }
9042 #endif /* WL_NAN_DISC_CACHE */
9043
9044 static void
wl_cfgnan_update_dp_info(struct bcm_cfg80211 * cfg,bool add,nan_data_path_id ndp_id)9045 wl_cfgnan_update_dp_info(struct bcm_cfg80211 *cfg, bool add,
9046 nan_data_path_id ndp_id)
9047 {
9048 uint8 i;
9049 bool match_found = false;
9050 wl_nancfg_t *nancfg = cfg->nancfg;
9051 /* As of now, we don't see a need to know which ndp is active.
9052 * so just keep tracking of ndp via count. If we need to know
9053 * the status of each ndp based on ndp id, we need to change
9054 * this implementation to use a bit mask.
9055 */
9056
9057 if (add) {
9058 /* On first NAN DP establishment, disable ARP. */
9059 for (i = 0; i < NAN_MAX_NDP_PEER; i++) {
9060 if (!nancfg->ndp_id[i]) {
9061 WL_TRACE(("Found empty field\n"));
9062 break;
9063 }
9064 }
9065
9066 if (i == NAN_MAX_NDP_PEER) {
9067 WL_ERR(("%s:cannot accommodate ndp id\n", __FUNCTION__));
9068 return;
9069 }
9070 if (ndp_id) {
9071 nancfg->nan_dp_count++;
9072 nancfg->ndp_id[i] = ndp_id;
9073 WL_DBG(("%s:Added ndp id = [%d] at i = %d\n",
9074 __FUNCTION__, nancfg->ndp_id[i], i));
9075 wl_cfg80211_concurrent_roam(cfg, true);
9076 }
9077 } else {
9078 ASSERT(nancfg->nan_dp_count);
9079 if (ndp_id) {
9080 for (i = 0; i < NAN_MAX_NDP_PEER; i++) {
9081 if (nancfg->ndp_id[i] == ndp_id) {
9082 nancfg->ndp_id[i] = 0;
9083 WL_DBG(("%s:Removed ndp id = [%d] from i = %d\n",
9084 __FUNCTION__, ndp_id, i));
9085 match_found = true;
9086 if (nancfg->nan_dp_count) {
9087 nancfg->nan_dp_count--;
9088 }
9089 break;
9090 } else {
9091 WL_DBG(("couldn't find entry for ndp id = %d\n",
9092 ndp_id));
9093 }
9094 }
9095 if (match_found == false) {
9096 WL_ERR(("Received unsaved NDP Id = %d !!\n", ndp_id));
9097 } else {
9098 if (nancfg->nan_dp_count == 0) {
9099 wl_cfg80211_concurrent_roam(cfg, false);
9100 wl_cfgnan_immediate_nan_disable_pending(cfg);
9101 }
9102 }
9103
9104 }
9105 }
9106 WL_INFORM_MEM(("NAN_DP_COUNT: %d\n", nancfg->nan_dp_count));
9107 }
9108
9109 bool
wl_cfgnan_is_dp_active(struct net_device * ndev)9110 wl_cfgnan_is_dp_active(struct net_device *ndev)
9111 {
9112 struct bcm_cfg80211 *cfg;
9113 bool nan_dp;
9114
9115 if (!ndev || !ndev->ieee80211_ptr) {
9116 WL_ERR(("ndev/wdev null\n"));
9117 return false;
9118 }
9119
9120 cfg = wiphy_priv(ndev->ieee80211_ptr->wiphy);
9121 nan_dp = cfg->nancfg->nan_dp_count ? true : false;
9122
9123 WL_DBG(("NAN DP status:%d\n", nan_dp));
9124 return nan_dp;
9125 }
9126
9127 static s32
wl_cfgnan_get_ndi_idx(struct bcm_cfg80211 * cfg)9128 wl_cfgnan_get_ndi_idx(struct bcm_cfg80211 *cfg)
9129 {
9130 int i;
9131 for (i = 0; i < cfg->nancfg->max_ndi_supported; i++) {
9132 if (!cfg->nancfg->ndi[i].in_use) {
9133 /* Free interface, use it */
9134 return i;
9135 }
9136 }
9137 /* Don't have a free interface */
9138 return WL_INVALID;
9139 }
9140
9141 static s32
wl_cfgnan_add_ndi_data(struct bcm_cfg80211 * cfg,s32 idx,char * name)9142 wl_cfgnan_add_ndi_data(struct bcm_cfg80211 *cfg, s32 idx, char *name)
9143 {
9144 u16 len;
9145 wl_nancfg_t *nancfg = cfg->nancfg;
9146 if (!name || (idx < 0) || (idx >= cfg->nancfg->max_ndi_supported)) {
9147 return -EINVAL;
9148 }
9149
9150 /* Ensure ifname string size <= IFNAMSIZ including null termination */
9151 len = MIN(strlen(name), (IFNAMSIZ - 1));
9152 strncpy(nancfg->ndi[idx].ifname, name, len);
9153 nancfg->ndi[idx].ifname[len] = '\0';
9154 nancfg->ndi[idx].in_use = true;
9155 nancfg->ndi[idx].created = false;
9156
9157 /* Don't have a free interface */
9158 return WL_INVALID;
9159 }
9160
9161 static s32
wl_cfgnan_del_ndi_data(struct bcm_cfg80211 * cfg,char * name)9162 wl_cfgnan_del_ndi_data(struct bcm_cfg80211 *cfg, char *name)
9163 {
9164 u16 len;
9165 int i;
9166 wl_nancfg_t *nancfg = cfg->nancfg;
9167
9168 if (!name) {
9169 return -EINVAL;
9170 }
9171
9172 len = MIN(strlen(name), IFNAMSIZ);
9173 for (i = 0; i < cfg->nancfg->max_ndi_supported; i++) {
9174 if (strncmp(nancfg->ndi[i].ifname, name, len) == 0) {
9175 bzero(&nancfg->ndi[i].ifname, IFNAMSIZ);
9176 nancfg->ndi[i].in_use = false;
9177 nancfg->ndi[i].created = false;
9178 nancfg->ndi[i].nan_ndev = NULL;
9179 return i;
9180 }
9181 }
9182 return -EINVAL;
9183 }
9184
9185 s32
wl_cfgnan_delete_ndp(struct bcm_cfg80211 * cfg,struct net_device * nan_ndev)9186 wl_cfgnan_delete_ndp(struct bcm_cfg80211 *cfg,
9187 struct net_device *nan_ndev)
9188 {
9189 s32 ret = BCME_OK;
9190 uint8 i = 0;
9191 wl_nancfg_t *nancfg = cfg->nancfg;
9192
9193 for (i = 0; i < cfg->nancfg->max_ndi_supported; i++) {
9194 if (nancfg->ndi[i].in_use && nancfg->ndi[i].created &&
9195 (nancfg->ndi[i].nan_ndev == nan_ndev)) {
9196 WL_INFORM_MEM(("iface name: %s, cfg->nancfg->ndi[i].nan_ndev = %p"
9197 " and nan_ndev = %p\n",
9198 (char*)nancfg->ndi[i].ifname,
9199 nancfg->ndi[i].nan_ndev, nan_ndev));
9200 ret = _wl_cfg80211_del_if(cfg, nan_ndev, NULL,
9201 (char*)nancfg->ndi[i].ifname);
9202 if (ret) {
9203 WL_ERR(("failed to del ndi [%d]\n", ret));
9204 }
9205 /*
9206 * Intentional fall through to clear the host data structs
9207 * Unconditionally delete the ndi data and states
9208 */
9209 if (wl_cfgnan_del_ndi_data(cfg,
9210 (char*)nancfg->ndi[i].ifname) < 0) {
9211 WL_ERR(("Failed to find matching data for ndi:%s\n",
9212 (char*)nancfg->ndi[i].ifname));
9213 }
9214 }
9215 }
9216 return ret;
9217 }
9218
9219 int
wl_cfgnan_get_status(struct net_device * ndev,wl_nan_conf_status_t * nan_status)9220 wl_cfgnan_get_status(struct net_device *ndev, wl_nan_conf_status_t *nan_status)
9221 {
9222 bcm_iov_batch_buf_t *nan_buf = NULL;
9223 uint16 subcmd_len;
9224 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
9225 bcm_iov_batch_subcmd_t *sub_cmd_resp = NULL;
9226 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
9227 wl_nan_conf_status_t *nstatus = NULL;
9228 uint32 status;
9229 s32 ret = BCME_OK;
9230 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
9231 struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
9232 NAN_DBG_ENTER();
9233
9234 nan_buf = MALLOCZ(cfg->osh, NAN_IOCTL_BUF_SIZE);
9235 if (!nan_buf) {
9236 WL_ERR(("%s: memory allocation failed\n", __func__));
9237 ret = BCME_NOMEM;
9238 goto fail;
9239 }
9240
9241 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
9242 nan_buf->count = 0;
9243 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
9244 sub_cmd = (bcm_iov_batch_subcmd_t*)(uint8 *)(&nan_buf->cmds[0]);
9245
9246 ret = wl_cfg_nan_check_cmd_len(nan_buf_size,
9247 sizeof(*nstatus), &subcmd_len);
9248 if (unlikely(ret)) {
9249 WL_ERR(("nan_sub_cmd check failed\n"));
9250 goto fail;
9251 }
9252
9253 nstatus = (wl_nan_conf_status_t *)sub_cmd->data;
9254 sub_cmd->id = htod16(WL_NAN_CMD_CFG_STATUS);
9255 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*nstatus);
9256 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
9257 nan_buf_size -= subcmd_len;
9258 nan_buf->count = 1;
9259 nan_buf->is_set = false;
9260
9261 bzero(resp_buf, sizeof(resp_buf));
9262 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
9263 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
9264 if (unlikely(ret) || unlikely(status)) {
9265 WL_ERR(("get nan status failed ret %d status %d \n",
9266 ret, status));
9267 goto fail;
9268 }
9269 sub_cmd_resp = &((bcm_iov_batch_buf_t *)(resp_buf))->cmds[0];
9270 /* WL_NAN_CMD_CFG_STATUS return value doesn't use xtlv package */
9271 nstatus = ((wl_nan_conf_status_t *)&sub_cmd_resp->data[0]);
9272 ret = memcpy_s(nan_status, sizeof(wl_nan_conf_status_t),
9273 nstatus, sizeof(wl_nan_conf_status_t));
9274 if (ret != BCME_OK) {
9275 WL_ERR(("Failed to copy tx match filter\n"));
9276 goto fail;
9277 }
9278
9279 fail:
9280 if (nan_buf) {
9281 MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
9282 }
9283 NAN_DBG_EXIT();
9284 return ret;
9285 }
9286
9287 s32
wl_nan_print_avail_stats(const uint8 * data)9288 wl_nan_print_avail_stats(const uint8 *data)
9289 {
9290 int idx;
9291 s32 ret = BCME_OK;
9292 int s_chan = 0;
9293 char pbuf[NAN_IOCTL_BUF_SIZE_MED];
9294 const wl_nan_stats_sched_t *sched = (const wl_nan_stats_sched_t *)data;
9295 #define SLOT_PRINT_SIZE 4
9296
9297 char *buf = pbuf;
9298 int remained_len = 0, bytes_written = 0;
9299 bzero(pbuf, sizeof(pbuf));
9300
9301 if ((sched->num_slot * SLOT_PRINT_SIZE) > (sizeof(pbuf)-1)) {
9302 WL_ERR(("overflowed slot number %d detected\n",
9303 sched->num_slot));
9304 ret = BCME_BUFTOOSHORT;
9305 goto exit;
9306 }
9307
9308 remained_len = NAN_IOCTL_BUF_SIZE_MED;
9309 bytes_written = snprintf(buf, remained_len, "Map ID:%u, %u/%u, Slot#:%u ",
9310 sched->map_id, sched->period, sched->slot_dur, sched->num_slot);
9311
9312 for (idx = 0; idx < sched->num_slot; idx++) {
9313 const wl_nan_stats_sched_slot_t *slot;
9314 slot = &sched->slot[idx];
9315 s_chan = 0;
9316
9317 if (!wf_chspec_malformed(slot->chanspec)) {
9318 s_chan = wf_chspec_ctlchan(slot->chanspec);
9319 }
9320
9321 buf += bytes_written;
9322 remained_len -= bytes_written;
9323 bytes_written = snprintf(buf, remained_len, "%03d|", s_chan);
9324
9325 }
9326 WL_INFORM_MEM(("%s\n", pbuf));
9327 exit:
9328 return ret;
9329 }
9330
9331 static int
wl_nan_print_stats_tlvs(void * ctx,const uint8 * data,uint16 type,uint16 len)9332 wl_nan_print_stats_tlvs(void *ctx, const uint8 *data, uint16 type, uint16 len)
9333 {
9334 int err = BCME_OK;
9335
9336 switch (type) {
9337 /* Avail stats xtlvs */
9338 case WL_NAN_XTLV_GEN_AVAIL_STATS_SCHED:
9339 err = wl_nan_print_avail_stats(data);
9340 break;
9341 default:
9342 err = BCME_BADARG;
9343 WL_ERR(("Unknown xtlv type received: %x\n", type));
9344 break;
9345 }
9346
9347 return err;
9348 }
9349
9350 int
wl_cfgnan_get_stats(struct bcm_cfg80211 * cfg)9351 wl_cfgnan_get_stats(struct bcm_cfg80211 *cfg)
9352 {
9353 bcm_iov_batch_buf_t *nan_buf = NULL;
9354 uint16 subcmd_len;
9355 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
9356 bcm_iov_batch_subcmd_t *sub_cmd_resp = NULL;
9357 uint8 *resp_buf = NULL;
9358 wl_nan_cmn_get_stat_t *get_stat = NULL;
9359 wl_nan_cmn_stat_t *stats = NULL;
9360 uint32 status;
9361 s32 ret = BCME_OK;
9362 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
9363 NAN_DBG_ENTER();
9364
9365 nan_buf = MALLOCZ(cfg->osh, NAN_IOCTL_BUF_SIZE);
9366 resp_buf = MALLOCZ(cfg->osh, NAN_IOCTL_BUF_SIZE_LARGE);
9367 if (!nan_buf || !resp_buf) {
9368 WL_ERR(("%s: memory allocation failed\n", __func__));
9369 ret = BCME_NOMEM;
9370 goto fail;
9371 }
9372
9373 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
9374 nan_buf->count = 0;
9375 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
9376 sub_cmd = (bcm_iov_batch_subcmd_t*)(uint8 *)(&nan_buf->cmds[0]);
9377
9378 ret = wl_cfg_nan_check_cmd_len(nan_buf_size,
9379 sizeof(*get_stat), &subcmd_len);
9380 if (unlikely(ret)) {
9381 WL_ERR(("nan_sub_cmd check failed\n"));
9382 goto fail;
9383 }
9384
9385 get_stat = (wl_nan_cmn_get_stat_t *)sub_cmd->data;
9386 /* get only local availabiity stats */
9387 get_stat->modules_btmap = (1 << NAN_AVAIL);
9388 get_stat->operation = WLA_NAN_STATS_GET;
9389
9390 sub_cmd->id = htod16(WL_NAN_CMD_GEN_STATS);
9391 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*get_stat);
9392 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
9393 nan_buf_size -= subcmd_len;
9394 nan_buf->count = 1;
9395 nan_buf->is_set = false;
9396
9397 ret = wl_cfgnan_execute_ioctl(bcmcfg_to_prmry_ndev(cfg),
9398 cfg, nan_buf, nan_buf_size, &status,
9399 (void*)resp_buf, NAN_IOCTL_BUF_SIZE_LARGE);
9400 if (unlikely(ret) || unlikely(status)) {
9401 WL_ERR(("get nan stats failed ret %d status %d \n",
9402 ret, status));
9403 goto fail;
9404 }
9405
9406 sub_cmd_resp = &((bcm_iov_batch_buf_t *)(resp_buf))->cmds[0];
9407
9408 stats = (wl_nan_cmn_stat_t *)&sub_cmd_resp->data[0];
9409
9410 if (stats->n_stats) {
9411 WL_INFORM_MEM((" == Aware Local Avail Schedule ==\n"));
9412 ret = bcm_unpack_xtlv_buf((void *)&stats->n_stats,
9413 (const uint8 *)&stats->stats_tlvs,
9414 stats->totlen - 8, BCM_IOV_CMD_OPT_ALIGN32,
9415 wl_nan_print_stats_tlvs);
9416 }
9417 fail:
9418 if (nan_buf) {
9419 MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
9420 }
9421 if (resp_buf) {
9422 MFREE(cfg->osh, resp_buf, NAN_IOCTL_BUF_SIZE_LARGE);
9423 }
9424
9425 NAN_DBG_EXIT();
9426 return ret;
9427 }
9428
9429 int
wl_cfgnan_attach(struct bcm_cfg80211 * cfg)9430 wl_cfgnan_attach(struct bcm_cfg80211 *cfg)
9431 {
9432 int err = BCME_OK;
9433 wl_nancfg_t *nancfg = NULL;
9434
9435 if (cfg) {
9436 cfg->nancfg = (wl_nancfg_t *)MALLOCZ(cfg->osh, sizeof(wl_nancfg_t));
9437 if (cfg->nancfg == NULL) {
9438 err = BCME_NOMEM;
9439 goto done;
9440 }
9441 cfg->nancfg->cfg = cfg;
9442 } else {
9443 err = BCME_BADARG;
9444 goto done;
9445 }
9446
9447 nancfg = cfg->nancfg;
9448 mutex_init(&nancfg->nan_sync);
9449 init_waitqueue_head(&nancfg->nan_event_wait);
9450 INIT_DELAYED_WORK(&nancfg->nan_disable, wl_cfgnan_delayed_disable);
9451 nancfg->nan_dp_state = NAN_DP_STATE_DISABLED;
9452 init_waitqueue_head(&nancfg->ndp_if_change_event);
9453
9454 done:
9455 return err;
9456
9457 }
9458
9459 void
wl_cfgnan_detach(struct bcm_cfg80211 * cfg)9460 wl_cfgnan_detach(struct bcm_cfg80211 *cfg)
9461 {
9462 if (cfg && cfg->nancfg) {
9463 if (delayed_work_pending(&cfg->nancfg->nan_disable)) {
9464 WL_DBG(("Cancel nan_disable work\n"));
9465 DHD_NAN_WAKE_UNLOCK(cfg->pub);
9466 cancel_delayed_work_sync(&cfg->nancfg->nan_disable);
9467 }
9468 MFREE(cfg->osh, cfg->nancfg, sizeof(wl_nancfg_t));
9469 cfg->nancfg = NULL;
9470 }
9471
9472 }
9473 #endif /* WL_NAN */
9474