xref: /OK3568_Linux_fs/external/rkwifibt/drivers/bcmdhd/wl_cfgnan.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Neighbor Awareness Networking
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright (C) 2020, Broadcom.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  *      Unless you and Broadcom execute a separate written software license
7*4882a593Smuzhiyun  * agreement governing use of this software, this software is licensed to you
8*4882a593Smuzhiyun  * under the terms of the GNU General Public License version 2 (the "GPL"),
9*4882a593Smuzhiyun  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10*4882a593Smuzhiyun  * following added to such license:
11*4882a593Smuzhiyun  *
12*4882a593Smuzhiyun  *      As a special exception, the copyright holders of this software give you
13*4882a593Smuzhiyun  * permission to link this software with independent modules, and to copy and
14*4882a593Smuzhiyun  * distribute the resulting executable under terms of your choice, provided that
15*4882a593Smuzhiyun  * you also meet, for each linked independent module, the terms and conditions of
16*4882a593Smuzhiyun  * the license of that module.  An independent module is a module which is not
17*4882a593Smuzhiyun  * derived from this software.  The special exception does not apply to any
18*4882a593Smuzhiyun  * modifications of the software.
19*4882a593Smuzhiyun  *
20*4882a593Smuzhiyun  *
21*4882a593Smuzhiyun  * <<Broadcom-WL-IPTag/Dual:>>
22*4882a593Smuzhiyun  */
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #ifdef WL_NAN
25*4882a593Smuzhiyun #include <bcmutils.h>
26*4882a593Smuzhiyun #include <bcmendian.h>
27*4882a593Smuzhiyun #include <bcmwifi_channels.h>
28*4882a593Smuzhiyun #include <nan.h>
29*4882a593Smuzhiyun #include <bcmiov.h>
30*4882a593Smuzhiyun #include <net/rtnetlink.h>
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun #include <wl_cfg80211.h>
33*4882a593Smuzhiyun #include <wl_cfgscan.h>
34*4882a593Smuzhiyun #include <wl_android.h>
35*4882a593Smuzhiyun #include <wl_cfgnan.h>
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun #if defined(BCMDONGLEHOST)
38*4882a593Smuzhiyun #include <dngl_stats.h>
39*4882a593Smuzhiyun #include <dhd.h>
40*4882a593Smuzhiyun #endif /* BCMDONGLEHOST */
41*4882a593Smuzhiyun #include <wl_cfgvendor.h>
42*4882a593Smuzhiyun #include <bcmbloom.h>
43*4882a593Smuzhiyun #include <wl_cfgp2p.h>
44*4882a593Smuzhiyun #include <wl_cfgvif.h>
45*4882a593Smuzhiyun #ifdef RTT_SUPPORT
46*4882a593Smuzhiyun #include <dhd_rtt.h>
47*4882a593Smuzhiyun #endif /* RTT_SUPPORT */
48*4882a593Smuzhiyun #include <bcmstdlib_s.h>
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun #define NAN_RANGE_REQ_EVNT 1
51*4882a593Smuzhiyun #define NAN_RAND_MAC_RETRIES 10
52*4882a593Smuzhiyun #define NAN_SCAN_DWELL_TIME_DELTA_MS 10
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun #ifdef WL_NAN_DISC_CACHE
55*4882a593Smuzhiyun /* Disc Cache Parameters update Flags */
56*4882a593Smuzhiyun #define NAN_DISC_CACHE_PARAM_SDE_CONTROL	0x0001
57*4882a593Smuzhiyun static int wl_cfgnan_cache_disc_result(struct bcm_cfg80211 *cfg, void * data,
58*4882a593Smuzhiyun 	u16 *disc_cache_update_flags);
59*4882a593Smuzhiyun static int wl_cfgnan_remove_disc_result(struct bcm_cfg80211 * cfg, uint8 local_subid);
60*4882a593Smuzhiyun static nan_disc_result_cache * wl_cfgnan_get_disc_result(struct bcm_cfg80211 *cfg,
61*4882a593Smuzhiyun 	uint8 remote_pubid, struct ether_addr *peer);
62*4882a593Smuzhiyun #endif /* WL_NAN_DISC_CACHE */
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun static int wl_cfgnan_set_if_addr(struct bcm_cfg80211 *cfg);
65*4882a593Smuzhiyun static int wl_cfgnan_get_capability(struct net_device *ndev,
66*4882a593Smuzhiyun 	struct bcm_cfg80211 *cfg, nan_hal_capabilities_t *capabilities);
67*4882a593Smuzhiyun static void wl_cfgnan_clear_nan_event_data(struct bcm_cfg80211 *cfg,
68*4882a593Smuzhiyun 	nan_event_data_t *nan_event_data);
69*4882a593Smuzhiyun void wl_cfgnan_data_remove_peer(struct bcm_cfg80211 *cfg,
70*4882a593Smuzhiyun         struct ether_addr *peer_addr);
71*4882a593Smuzhiyun static void wl_cfgnan_send_stop_event(struct bcm_cfg80211 *cfg);
72*4882a593Smuzhiyun static void wl_cfgnan_disable_cleanup(struct bcm_cfg80211 *cfg);
73*4882a593Smuzhiyun static s32 wl_cfgnan_get_ndi_idx(struct bcm_cfg80211 *cfg);
74*4882a593Smuzhiyun static int wl_cfgnan_init(struct bcm_cfg80211 *cfg);
75*4882a593Smuzhiyun static int wl_cfgnan_deinit(struct bcm_cfg80211 *cfg, uint8 busstate);
76*4882a593Smuzhiyun static void wl_cfgnan_update_dp_info(struct bcm_cfg80211 *cfg, bool add,
77*4882a593Smuzhiyun 	nan_data_path_id ndp_id);
78*4882a593Smuzhiyun static void wl_cfgnan_data_set_peer_dp_state(struct bcm_cfg80211 *cfg,
79*4882a593Smuzhiyun 	struct ether_addr *peer_addr, nan_peer_dp_state_t state);
80*4882a593Smuzhiyun static nan_ndp_peer_t* wl_cfgnan_data_get_peer(struct bcm_cfg80211 *cfg,
81*4882a593Smuzhiyun 	struct ether_addr *peer_addr);
82*4882a593Smuzhiyun static int wl_cfgnan_disable(struct bcm_cfg80211 *cfg);
83*4882a593Smuzhiyun static s32 wl_cfgnan_del_ndi_data(struct bcm_cfg80211 *cfg, char *name);
84*4882a593Smuzhiyun static s32 wl_cfgnan_add_ndi_data(struct bcm_cfg80211 *cfg, s32 idx, char *name);
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun #ifdef RTT_SUPPORT
87*4882a593Smuzhiyun static int wl_cfgnan_clear_disc_cache(struct bcm_cfg80211 *cfg, wl_nan_instance_id_t sub_id);
88*4882a593Smuzhiyun static int32 wl_cfgnan_notify_disc_with_ranging(struct bcm_cfg80211 *cfg,
89*4882a593Smuzhiyun 	nan_ranging_inst_t *rng_inst, nan_event_data_t *nan_event_data, uint32 distance);
90*4882a593Smuzhiyun static void wl_cfgnan_disc_result_on_geofence_cancel(struct bcm_cfg80211 *cfg,
91*4882a593Smuzhiyun 	nan_ranging_inst_t *rng_inst);
92*4882a593Smuzhiyun static void wl_cfgnan_terminate_ranging_session(struct bcm_cfg80211 *cfg,
93*4882a593Smuzhiyun 	nan_ranging_inst_t *ranging_inst);
94*4882a593Smuzhiyun static s32 wl_cfgnan_clear_peer_ranging(struct bcm_cfg80211 * cfg,
95*4882a593Smuzhiyun 	nan_ranging_inst_t *rng_inst, int reason);
96*4882a593Smuzhiyun static s32 wl_cfgnan_handle_dp_ranging_concurrency(struct bcm_cfg80211 *cfg,
97*4882a593Smuzhiyun 	struct ether_addr *peer, int reason);
98*4882a593Smuzhiyun static void wl_cfgnan_terminate_all_obsolete_ranging_sessions(struct bcm_cfg80211 *cfg);
99*4882a593Smuzhiyun static bool wl_ranging_geofence_session_with_peer(struct bcm_cfg80211 *cfg,
100*4882a593Smuzhiyun 	struct ether_addr *peer_addr);
101*4882a593Smuzhiyun static void wl_cfgnan_reset_remove_ranging_instance(struct bcm_cfg80211 *cfg,
102*4882a593Smuzhiyun 	nan_ranging_inst_t *ranging_inst);
103*4882a593Smuzhiyun static void wl_cfgnan_remove_ranging_instance(struct bcm_cfg80211 *cfg,
104*4882a593Smuzhiyun 	nan_ranging_inst_t *ranging_inst);
105*4882a593Smuzhiyun #endif /* RTT_SUPPORT */
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun static const char *
nan_role_to_str(u8 role)108*4882a593Smuzhiyun nan_role_to_str(u8 role)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun 	const char *id2str;
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	switch (role) {
113*4882a593Smuzhiyun 		C2S(WL_NAN_ROLE_AUTO);
114*4882a593Smuzhiyun 			break;
115*4882a593Smuzhiyun 		C2S(WL_NAN_ROLE_NON_MASTER_NON_SYNC);
116*4882a593Smuzhiyun 			break;
117*4882a593Smuzhiyun 		C2S(WL_NAN_ROLE_NON_MASTER_SYNC);
118*4882a593Smuzhiyun 			break;
119*4882a593Smuzhiyun 		C2S(WL_NAN_ROLE_MASTER);
120*4882a593Smuzhiyun 			break;
121*4882a593Smuzhiyun 		C2S(WL_NAN_ROLE_ANCHOR_MASTER);
122*4882a593Smuzhiyun 			break;
123*4882a593Smuzhiyun 		default:
124*4882a593Smuzhiyun 			id2str = "WL_NAN_ROLE_UNKNOWN";
125*4882a593Smuzhiyun 	}
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	return id2str;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun const char *
nan_event_to_str(u16 cmd)131*4882a593Smuzhiyun nan_event_to_str(u16 cmd)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun 	const char *id2str;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	switch (cmd) {
136*4882a593Smuzhiyun 	C2S(WL_NAN_EVENT_START);
137*4882a593Smuzhiyun 		break;
138*4882a593Smuzhiyun 	C2S(WL_NAN_EVENT_JOIN);
139*4882a593Smuzhiyun 		break;
140*4882a593Smuzhiyun 	C2S(WL_NAN_EVENT_ROLE);
141*4882a593Smuzhiyun 		break;
142*4882a593Smuzhiyun 	C2S(WL_NAN_EVENT_SCAN_COMPLETE);
143*4882a593Smuzhiyun 		break;
144*4882a593Smuzhiyun 	C2S(WL_NAN_EVENT_DISCOVERY_RESULT);
145*4882a593Smuzhiyun 		break;
146*4882a593Smuzhiyun 	C2S(WL_NAN_EVENT_REPLIED);
147*4882a593Smuzhiyun 		break;
148*4882a593Smuzhiyun 	C2S(WL_NAN_EVENT_TERMINATED);
149*4882a593Smuzhiyun 		break;
150*4882a593Smuzhiyun 	C2S(WL_NAN_EVENT_RECEIVE);
151*4882a593Smuzhiyun 		break;
152*4882a593Smuzhiyun 	C2S(WL_NAN_EVENT_STATUS_CHG);
153*4882a593Smuzhiyun 		break;
154*4882a593Smuzhiyun 	C2S(WL_NAN_EVENT_MERGE);
155*4882a593Smuzhiyun 		break;
156*4882a593Smuzhiyun 	C2S(WL_NAN_EVENT_STOP);
157*4882a593Smuzhiyun 		break;
158*4882a593Smuzhiyun 	C2S(WL_NAN_EVENT_P2P);
159*4882a593Smuzhiyun 		break;
160*4882a593Smuzhiyun 	C2S(WL_NAN_EVENT_WINDOW_BEGIN_P2P);
161*4882a593Smuzhiyun 		break;
162*4882a593Smuzhiyun 	C2S(WL_NAN_EVENT_WINDOW_BEGIN_MESH);
163*4882a593Smuzhiyun 		break;
164*4882a593Smuzhiyun 	C2S(WL_NAN_EVENT_WINDOW_BEGIN_IBSS);
165*4882a593Smuzhiyun 		break;
166*4882a593Smuzhiyun 	C2S(WL_NAN_EVENT_WINDOW_BEGIN_RANGING);
167*4882a593Smuzhiyun 		break;
168*4882a593Smuzhiyun 	C2S(WL_NAN_EVENT_POST_DISC);
169*4882a593Smuzhiyun 		break;
170*4882a593Smuzhiyun 	C2S(WL_NAN_EVENT_DATA_IF_ADD);
171*4882a593Smuzhiyun 		break;
172*4882a593Smuzhiyun 	C2S(WL_NAN_EVENT_DATA_PEER_ADD);
173*4882a593Smuzhiyun 		break;
174*4882a593Smuzhiyun 	C2S(WL_NAN_EVENT_PEER_DATAPATH_IND);
175*4882a593Smuzhiyun 		break;
176*4882a593Smuzhiyun 	C2S(WL_NAN_EVENT_DATAPATH_ESTB);
177*4882a593Smuzhiyun 		break;
178*4882a593Smuzhiyun 	C2S(WL_NAN_EVENT_SDF_RX);
179*4882a593Smuzhiyun 		break;
180*4882a593Smuzhiyun 	C2S(WL_NAN_EVENT_DATAPATH_END);
181*4882a593Smuzhiyun 		break;
182*4882a593Smuzhiyun 	C2S(WL_NAN_EVENT_BCN_RX);
183*4882a593Smuzhiyun 		break;
184*4882a593Smuzhiyun 	C2S(WL_NAN_EVENT_PEER_DATAPATH_RESP);
185*4882a593Smuzhiyun 		break;
186*4882a593Smuzhiyun 	C2S(WL_NAN_EVENT_PEER_DATAPATH_CONF);
187*4882a593Smuzhiyun 		break;
188*4882a593Smuzhiyun 	C2S(WL_NAN_EVENT_RNG_REQ_IND);
189*4882a593Smuzhiyun 		break;
190*4882a593Smuzhiyun 	C2S(WL_NAN_EVENT_RNG_RPT_IND);
191*4882a593Smuzhiyun 		break;
192*4882a593Smuzhiyun 	C2S(WL_NAN_EVENT_RNG_TERM_IND);
193*4882a593Smuzhiyun 		break;
194*4882a593Smuzhiyun 	C2S(WL_NAN_EVENT_PEER_DATAPATH_SEC_INST);
195*4882a593Smuzhiyun 		break;
196*4882a593Smuzhiyun 	C2S(WL_NAN_EVENT_TXS);
197*4882a593Smuzhiyun 		break;
198*4882a593Smuzhiyun 	C2S(WL_NAN_EVENT_DW_START);
199*4882a593Smuzhiyun 		break;
200*4882a593Smuzhiyun 	C2S(WL_NAN_EVENT_DW_END);
201*4882a593Smuzhiyun 		break;
202*4882a593Smuzhiyun 	C2S(WL_NAN_EVENT_CHAN_BOUNDARY);
203*4882a593Smuzhiyun 		break;
204*4882a593Smuzhiyun 	C2S(WL_NAN_EVENT_MR_CHANGED);
205*4882a593Smuzhiyun 		break;
206*4882a593Smuzhiyun 	C2S(WL_NAN_EVENT_RNG_RESP_IND);
207*4882a593Smuzhiyun 		break;
208*4882a593Smuzhiyun 	C2S(WL_NAN_EVENT_PEER_SCHED_UPD_NOTIF);
209*4882a593Smuzhiyun 		break;
210*4882a593Smuzhiyun 	C2S(WL_NAN_EVENT_PEER_SCHED_REQ);
211*4882a593Smuzhiyun 		break;
212*4882a593Smuzhiyun 	C2S(WL_NAN_EVENT_PEER_SCHED_RESP);
213*4882a593Smuzhiyun 		break;
214*4882a593Smuzhiyun 	C2S(WL_NAN_EVENT_PEER_SCHED_CONF);
215*4882a593Smuzhiyun 		break;
216*4882a593Smuzhiyun 	C2S(WL_NAN_EVENT_SENT_DATAPATH_END);
217*4882a593Smuzhiyun 		break;
218*4882a593Smuzhiyun 	C2S(WL_NAN_EVENT_SLOT_START);
219*4882a593Smuzhiyun 		break;
220*4882a593Smuzhiyun 	C2S(WL_NAN_EVENT_SLOT_END);
221*4882a593Smuzhiyun 		break;
222*4882a593Smuzhiyun 	C2S(WL_NAN_EVENT_HOST_ASSIST_REQ);
223*4882a593Smuzhiyun 		break;
224*4882a593Smuzhiyun 	C2S(WL_NAN_EVENT_RX_MGMT_FRM);
225*4882a593Smuzhiyun 		break;
226*4882a593Smuzhiyun 	C2S(WL_NAN_EVENT_DISC_CACHE_TIMEOUT);
227*4882a593Smuzhiyun 		break;
228*4882a593Smuzhiyun 	C2S(WL_NAN_EVENT_OOB_AF_TXS);
229*4882a593Smuzhiyun 		break;
230*4882a593Smuzhiyun 	C2S(WL_NAN_EVENT_OOB_AF_RX);
231*4882a593Smuzhiyun 		break;
232*4882a593Smuzhiyun 	C2S(WL_NAN_EVENT_INVALID);
233*4882a593Smuzhiyun 		break;
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	default:
236*4882a593Smuzhiyun 		id2str = "WL_NAN_EVENT_UNKNOWN";
237*4882a593Smuzhiyun 	}
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	return id2str;
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun static const char *
nan_frm_type_to_str(u16 frm_type)243*4882a593Smuzhiyun nan_frm_type_to_str(u16 frm_type)
244*4882a593Smuzhiyun {
245*4882a593Smuzhiyun 	const char *id2str;
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 	switch (frm_type) {
248*4882a593Smuzhiyun 	C2S(WL_NAN_FRM_TYPE_PUBLISH);
249*4882a593Smuzhiyun 		break;
250*4882a593Smuzhiyun 	C2S(WL_NAN_FRM_TYPE_SUBSCRIBE);
251*4882a593Smuzhiyun 		break;
252*4882a593Smuzhiyun 	C2S(WL_NAN_FRM_TYPE_FOLLOWUP);
253*4882a593Smuzhiyun 		break;
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	C2S(WL_NAN_FRM_TYPE_DP_REQ);
256*4882a593Smuzhiyun 		break;
257*4882a593Smuzhiyun 	C2S(WL_NAN_FRM_TYPE_DP_RESP);
258*4882a593Smuzhiyun 		break;
259*4882a593Smuzhiyun 	C2S(WL_NAN_FRM_TYPE_DP_CONF);
260*4882a593Smuzhiyun 		break;
261*4882a593Smuzhiyun 	C2S(WL_NAN_FRM_TYPE_DP_INSTALL);
262*4882a593Smuzhiyun 		break;
263*4882a593Smuzhiyun 	C2S(WL_NAN_FRM_TYPE_DP_END);
264*4882a593Smuzhiyun 		break;
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	C2S(WL_NAN_FRM_TYPE_SCHED_REQ);
267*4882a593Smuzhiyun 		break;
268*4882a593Smuzhiyun 	C2S(WL_NAN_FRM_TYPE_SCHED_RESP);
269*4882a593Smuzhiyun 		break;
270*4882a593Smuzhiyun 	C2S(WL_NAN_FRM_TYPE_SCHED_CONF);
271*4882a593Smuzhiyun 		break;
272*4882a593Smuzhiyun 	C2S(WL_NAN_FRM_TYPE_SCHED_UPD);
273*4882a593Smuzhiyun 		break;
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	C2S(WL_NAN_FRM_TYPE_RNG_REQ);
276*4882a593Smuzhiyun 		break;
277*4882a593Smuzhiyun 	C2S(WL_NAN_FRM_TYPE_RNG_RESP);
278*4882a593Smuzhiyun 		break;
279*4882a593Smuzhiyun 	C2S(WL_NAN_FRM_TYPE_RNG_TERM);
280*4882a593Smuzhiyun 		break;
281*4882a593Smuzhiyun 	C2S(WL_NAN_FRM_TYPE_RNG_REPORT);
282*4882a593Smuzhiyun 		break;
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	default:
285*4882a593Smuzhiyun 		id2str = "WL_NAN_FRM_TYPE_UNKNOWN";
286*4882a593Smuzhiyun 	}
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	return id2str;
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun static const char *
nan_event_cause_to_str(u8 cause)292*4882a593Smuzhiyun nan_event_cause_to_str(u8 cause)
293*4882a593Smuzhiyun {
294*4882a593Smuzhiyun 	const char *id2str;
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	switch (cause) {
297*4882a593Smuzhiyun 	C2S(WL_NAN_DP_TERM_WITH_INACTIVITY);
298*4882a593Smuzhiyun 		break;
299*4882a593Smuzhiyun 	C2S(WL_NAN_DP_TERM_WITH_FSM_DESTROY);
300*4882a593Smuzhiyun 		break;
301*4882a593Smuzhiyun 	C2S(WL_NAN_DP_TERM_WITH_PEER_DP_END);
302*4882a593Smuzhiyun 		break;
303*4882a593Smuzhiyun 	C2S(WL_NAN_DP_TERM_WITH_STALE_NDP);
304*4882a593Smuzhiyun 		break;
305*4882a593Smuzhiyun 	C2S(WL_NAN_DP_TERM_WITH_DISABLE);
306*4882a593Smuzhiyun 		break;
307*4882a593Smuzhiyun 	C2S(WL_NAN_DP_TERM_WITH_NDI_DEL);
308*4882a593Smuzhiyun 		break;
309*4882a593Smuzhiyun 	C2S(WL_NAN_DP_TERM_WITH_PEER_HB_FAIL);
310*4882a593Smuzhiyun 		break;
311*4882a593Smuzhiyun 	C2S(WL_NAN_DP_TERM_WITH_HOST_IOVAR);
312*4882a593Smuzhiyun 		break;
313*4882a593Smuzhiyun 	C2S(WL_NAN_DP_TERM_WITH_ESTB_FAIL);
314*4882a593Smuzhiyun 		break;
315*4882a593Smuzhiyun 	C2S(WL_NAN_DP_TERM_WITH_SCHED_REJECT);
316*4882a593Smuzhiyun 		break;
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	default:
319*4882a593Smuzhiyun 		id2str = "WL_NAN_EVENT_CAUSE_UNKNOWN";
320*4882a593Smuzhiyun 	}
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	return id2str;
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun static int wl_cfgnan_execute_ioctl(struct net_device *ndev,
326*4882a593Smuzhiyun 	struct bcm_cfg80211 *cfg, bcm_iov_batch_buf_t *nan_buf,
327*4882a593Smuzhiyun 	uint16 nan_buf_size, uint32 *status, uint8 *resp_buf,
328*4882a593Smuzhiyun 	uint16 resp_buf_len);
329*4882a593Smuzhiyun int
wl_cfgnan_generate_inst_id(struct bcm_cfg80211 * cfg,uint8 * p_inst_id)330*4882a593Smuzhiyun wl_cfgnan_generate_inst_id(struct bcm_cfg80211 *cfg, uint8 *p_inst_id)
331*4882a593Smuzhiyun {
332*4882a593Smuzhiyun 	s32 ret = BCME_OK;
333*4882a593Smuzhiyun 	uint8 i = 0;
334*4882a593Smuzhiyun 	wl_nancfg_t *nancfg = cfg->nancfg;
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	if (p_inst_id == NULL) {
337*4882a593Smuzhiyun 		WL_ERR(("Invalid arguments\n"));
338*4882a593Smuzhiyun 		ret = -EINVAL;
339*4882a593Smuzhiyun 		goto exit;
340*4882a593Smuzhiyun 	}
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	if (nancfg->inst_id_start == NAN_ID_MAX) {
343*4882a593Smuzhiyun 		WL_ERR(("Consumed all IDs, resetting the counter\n"));
344*4882a593Smuzhiyun 		nancfg->inst_id_start = 0;
345*4882a593Smuzhiyun 	}
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	for (i = nancfg->inst_id_start; i < NAN_ID_MAX; i++) {
348*4882a593Smuzhiyun 		if (isclr(nancfg->svc_inst_id_mask, i)) {
349*4882a593Smuzhiyun 			setbit(nancfg->svc_inst_id_mask, i);
350*4882a593Smuzhiyun 			*p_inst_id = i + 1;
351*4882a593Smuzhiyun 			nancfg->inst_id_start = *p_inst_id;
352*4882a593Smuzhiyun 			WL_DBG(("Instance ID=%d\n", *p_inst_id));
353*4882a593Smuzhiyun 			goto exit;
354*4882a593Smuzhiyun 		}
355*4882a593Smuzhiyun 	}
356*4882a593Smuzhiyun 	WL_ERR(("Allocated maximum IDs\n"));
357*4882a593Smuzhiyun 	ret = BCME_NORESOURCE;
358*4882a593Smuzhiyun exit:
359*4882a593Smuzhiyun 	return ret;
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun int
wl_cfgnan_remove_inst_id(struct bcm_cfg80211 * cfg,uint8 inst_id)363*4882a593Smuzhiyun wl_cfgnan_remove_inst_id(struct bcm_cfg80211 *cfg, uint8 inst_id)
364*4882a593Smuzhiyun {
365*4882a593Smuzhiyun 	s32 ret = BCME_OK;
366*4882a593Smuzhiyun 	WL_DBG(("%s: Removing svc instance id %d\n", __FUNCTION__, inst_id));
367*4882a593Smuzhiyun 	clrbit(cfg->nancfg->svc_inst_id_mask, inst_id-1);
368*4882a593Smuzhiyun 	return ret;
369*4882a593Smuzhiyun }
wl_cfgnan_parse_sdea_data(osl_t * osh,const uint8 * p_attr,uint16 len,nan_event_data_t * tlv_data)370*4882a593Smuzhiyun s32 wl_cfgnan_parse_sdea_data(osl_t *osh, const uint8 *p_attr,
371*4882a593Smuzhiyun 		uint16 len, nan_event_data_t *tlv_data)
372*4882a593Smuzhiyun {
373*4882a593Smuzhiyun 	const wifi_nan_svc_desc_ext_attr_t *nan_svc_desc_ext_attr = NULL;
374*4882a593Smuzhiyun 	uint8 offset;
375*4882a593Smuzhiyun 	s32 ret = BCME_OK;
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 	/* service descriptor ext attributes */
378*4882a593Smuzhiyun 	nan_svc_desc_ext_attr = (const wifi_nan_svc_desc_ext_attr_t *)p_attr;
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	/* attribute ID */
381*4882a593Smuzhiyun 	WL_TRACE(("> attr id: 0x%02x\n", nan_svc_desc_ext_attr->id));
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	/* attribute length */
384*4882a593Smuzhiyun 	WL_TRACE(("> attr len: 0x%x\n", nan_svc_desc_ext_attr->len));
385*4882a593Smuzhiyun 	if (nan_svc_desc_ext_attr->instance_id == tlv_data->pub_id) {
386*4882a593Smuzhiyun 		tlv_data->sde_control_flag = nan_svc_desc_ext_attr->control;
387*4882a593Smuzhiyun 	}
388*4882a593Smuzhiyun 	offset = sizeof(*nan_svc_desc_ext_attr);
389*4882a593Smuzhiyun 	if (offset > len) {
390*4882a593Smuzhiyun 		WL_ERR(("Invalid event buffer len\n"));
391*4882a593Smuzhiyun 		ret = BCME_BUFTOOSHORT;
392*4882a593Smuzhiyun 		goto fail;
393*4882a593Smuzhiyun 	}
394*4882a593Smuzhiyun 	p_attr += offset;
395*4882a593Smuzhiyun 	len -= offset;
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	if (tlv_data->sde_control_flag & NAN_SC_RANGE_LIMITED) {
398*4882a593Smuzhiyun 		WL_TRACE(("> svc_control: range limited present\n"));
399*4882a593Smuzhiyun 	}
400*4882a593Smuzhiyun 	if (tlv_data->sde_control_flag & NAN_SDE_CF_SVC_UPD_IND_PRESENT) {
401*4882a593Smuzhiyun 		WL_TRACE(("> svc_control: sdea svc specific info present\n"));
402*4882a593Smuzhiyun 		tlv_data->sde_svc_info.dlen = (p_attr[1] | (p_attr[2] << 8));
403*4882a593Smuzhiyun 		WL_TRACE(("> sdea svc info len: 0x%02x\n", tlv_data->sde_svc_info.dlen));
404*4882a593Smuzhiyun 		if (!tlv_data->sde_svc_info.dlen ||
405*4882a593Smuzhiyun 				tlv_data->sde_svc_info.dlen > NAN_MAX_SERVICE_SPECIFIC_INFO_LEN) {
406*4882a593Smuzhiyun 			/* must be able to handle null msg which is not error */
407*4882a593Smuzhiyun 			tlv_data->sde_svc_info.dlen = 0;
408*4882a593Smuzhiyun 			WL_ERR(("sde data length is invalid\n"));
409*4882a593Smuzhiyun 			ret = BCME_BADLEN;
410*4882a593Smuzhiyun 			goto fail;
411*4882a593Smuzhiyun 		}
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 		if (tlv_data->sde_svc_info.dlen > 0) {
414*4882a593Smuzhiyun 			tlv_data->sde_svc_info.data = MALLOCZ(osh, tlv_data->sde_svc_info.dlen);
415*4882a593Smuzhiyun 			if (!tlv_data->sde_svc_info.data) {
416*4882a593Smuzhiyun 				WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
417*4882a593Smuzhiyun 				tlv_data->sde_svc_info.dlen = 0;
418*4882a593Smuzhiyun 				ret = BCME_NOMEM;
419*4882a593Smuzhiyun 				goto fail;
420*4882a593Smuzhiyun 			}
421*4882a593Smuzhiyun 			/* advance read pointer, consider sizeof of Service Update Indicator */
422*4882a593Smuzhiyun 			offset = sizeof(tlv_data->sde_svc_info.dlen) - 1;
423*4882a593Smuzhiyun 			if (offset > len) {
424*4882a593Smuzhiyun 				WL_ERR(("Invalid event buffer len\n"));
425*4882a593Smuzhiyun 				ret = BCME_BUFTOOSHORT;
426*4882a593Smuzhiyun 				goto fail;
427*4882a593Smuzhiyun 			}
428*4882a593Smuzhiyun 			p_attr += offset;
429*4882a593Smuzhiyun 			len -= offset;
430*4882a593Smuzhiyun 			ret = memcpy_s(tlv_data->sde_svc_info.data, tlv_data->sde_svc_info.dlen,
431*4882a593Smuzhiyun 				p_attr, tlv_data->sde_svc_info.dlen);
432*4882a593Smuzhiyun 			if (ret != BCME_OK) {
433*4882a593Smuzhiyun 				WL_ERR(("Failed to copy sde_svc_info\n"));
434*4882a593Smuzhiyun 				goto fail;
435*4882a593Smuzhiyun 			}
436*4882a593Smuzhiyun 		} else {
437*4882a593Smuzhiyun 			/* must be able to handle null msg which is not error */
438*4882a593Smuzhiyun 			tlv_data->sde_svc_info.dlen = 0;
439*4882a593Smuzhiyun 			WL_DBG(("%s: sdea svc info length is zero, null info data\n",
440*4882a593Smuzhiyun 				__FUNCTION__));
441*4882a593Smuzhiyun 		}
442*4882a593Smuzhiyun 	}
443*4882a593Smuzhiyun 	return ret;
444*4882a593Smuzhiyun fail:
445*4882a593Smuzhiyun 	if (tlv_data->sde_svc_info.data) {
446*4882a593Smuzhiyun 		MFREE(osh, tlv_data->sde_svc_info.data,
447*4882a593Smuzhiyun 				tlv_data->sde_svc_info.dlen);
448*4882a593Smuzhiyun 		tlv_data->sde_svc_info.data = NULL;
449*4882a593Smuzhiyun 	}
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 	WL_DBG(("Parse SDEA event data, status = %d\n", ret));
452*4882a593Smuzhiyun 	return ret;
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun /*
456*4882a593Smuzhiyun  * This attribute contains some mandatory fields and some optional fields
457*4882a593Smuzhiyun  * depending on the content of the service discovery request.
458*4882a593Smuzhiyun  */
459*4882a593Smuzhiyun s32
wl_cfgnan_parse_sda_data(osl_t * osh,const uint8 * p_attr,uint16 len,nan_event_data_t * tlv_data)460*4882a593Smuzhiyun wl_cfgnan_parse_sda_data(osl_t *osh, const uint8 *p_attr,
461*4882a593Smuzhiyun 		uint16 len, nan_event_data_t *tlv_data)
462*4882a593Smuzhiyun {
463*4882a593Smuzhiyun 	uint8 svc_control = 0, offset = 0;
464*4882a593Smuzhiyun 	s32 ret = BCME_OK;
465*4882a593Smuzhiyun 	const wifi_nan_svc_descriptor_attr_t *nan_svc_desc_attr = NULL;
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 	/* service descriptor attributes */
468*4882a593Smuzhiyun 	nan_svc_desc_attr = (const wifi_nan_svc_descriptor_attr_t *)p_attr;
469*4882a593Smuzhiyun 	/* attribute ID */
470*4882a593Smuzhiyun 	WL_TRACE(("> attr id: 0x%02x\n", nan_svc_desc_attr->id));
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 	/* attribute length */
473*4882a593Smuzhiyun 	WL_TRACE(("> attr len: 0x%x\n", nan_svc_desc_attr->len));
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 	/* service ID */
476*4882a593Smuzhiyun 	ret = memcpy_s(tlv_data->svc_name, sizeof(tlv_data->svc_name),
477*4882a593Smuzhiyun 		nan_svc_desc_attr->svc_hash, NAN_SVC_HASH_LEN);
478*4882a593Smuzhiyun 	if (ret != BCME_OK) {
479*4882a593Smuzhiyun 		WL_ERR(("Failed to copy svc_hash_name:\n"));
480*4882a593Smuzhiyun 		return ret;
481*4882a593Smuzhiyun 	}
482*4882a593Smuzhiyun 	WL_TRACE(("> svc_hash_name: " MACDBG "\n", MAC2STRDBG(tlv_data->svc_name)));
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun 	/* local instance ID */
485*4882a593Smuzhiyun 	tlv_data->local_inst_id = nan_svc_desc_attr->instance_id;
486*4882a593Smuzhiyun 	WL_TRACE(("> local instance id: 0x%02x\n", tlv_data->local_inst_id));
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun 	/* requestor instance ID */
489*4882a593Smuzhiyun 	tlv_data->requestor_id = nan_svc_desc_attr->requestor_id;
490*4882a593Smuzhiyun 	WL_TRACE(("> requestor id: 0x%02x\n", tlv_data->requestor_id));
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun 	/* service control */
493*4882a593Smuzhiyun 	svc_control = nan_svc_desc_attr->svc_control;
494*4882a593Smuzhiyun 	if ((svc_control & NAN_SVC_CONTROL_TYPE_MASK) == NAN_SC_PUBLISH) {
495*4882a593Smuzhiyun 		WL_TRACE(("> Service control type: NAN_SC_PUBLISH\n"));
496*4882a593Smuzhiyun 	} else if ((svc_control & NAN_SVC_CONTROL_TYPE_MASK) == NAN_SC_SUBSCRIBE) {
497*4882a593Smuzhiyun 		WL_TRACE(("> Service control type: NAN_SC_SUBSCRIBE\n"));
498*4882a593Smuzhiyun 	} else if ((svc_control & NAN_SVC_CONTROL_TYPE_MASK) == NAN_SC_FOLLOWUP) {
499*4882a593Smuzhiyun 		WL_TRACE(("> Service control type: NAN_SC_FOLLOWUP\n"));
500*4882a593Smuzhiyun 	}
501*4882a593Smuzhiyun 	offset = sizeof(*nan_svc_desc_attr);
502*4882a593Smuzhiyun 	if (offset > len) {
503*4882a593Smuzhiyun 		WL_ERR(("Invalid event buffer len\n"));
504*4882a593Smuzhiyun 		ret = BCME_BUFTOOSHORT;
505*4882a593Smuzhiyun 		goto fail;
506*4882a593Smuzhiyun 	}
507*4882a593Smuzhiyun 	p_attr += offset;
508*4882a593Smuzhiyun 	len -= offset;
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 	/*
511*4882a593Smuzhiyun 	 * optional fields:
512*4882a593Smuzhiyun 	 * must be in order following by service descriptor attribute format
513*4882a593Smuzhiyun 	 */
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun 	/* binding bitmap */
516*4882a593Smuzhiyun 	if (svc_control & NAN_SC_BINDING_BITMAP_PRESENT) {
517*4882a593Smuzhiyun 		uint16 bitmap = 0;
518*4882a593Smuzhiyun 		WL_TRACE(("> svc_control: binding bitmap present\n"));
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 		/* Copy binding bitmap */
521*4882a593Smuzhiyun 		ret = memcpy_s(&bitmap, sizeof(bitmap),
522*4882a593Smuzhiyun 			p_attr, NAN_BINDING_BITMAP_LEN);
523*4882a593Smuzhiyun 		if (ret != BCME_OK) {
524*4882a593Smuzhiyun 			WL_ERR(("Failed to copy bit map\n"));
525*4882a593Smuzhiyun 			return ret;
526*4882a593Smuzhiyun 		}
527*4882a593Smuzhiyun 		WL_TRACE(("> sc binding bitmap: 0x%04x\n", bitmap));
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun 		if (NAN_BINDING_BITMAP_LEN > len) {
530*4882a593Smuzhiyun 			WL_ERR(("Invalid event buffer len\n"));
531*4882a593Smuzhiyun 			ret = BCME_BUFTOOSHORT;
532*4882a593Smuzhiyun 			goto fail;
533*4882a593Smuzhiyun 		}
534*4882a593Smuzhiyun 		p_attr += NAN_BINDING_BITMAP_LEN;
535*4882a593Smuzhiyun 		len -= NAN_BINDING_BITMAP_LEN;
536*4882a593Smuzhiyun 	}
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	/* matching filter */
539*4882a593Smuzhiyun 	if (svc_control & NAN_SC_MATCHING_FILTER_PRESENT) {
540*4882a593Smuzhiyun 		WL_TRACE(("> svc_control: matching filter present\n"));
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 		tlv_data->tx_match_filter.dlen = *p_attr++;
543*4882a593Smuzhiyun 		WL_TRACE(("> matching filter len: 0x%02x\n",
544*4882a593Smuzhiyun 				tlv_data->tx_match_filter.dlen));
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun 		if (!tlv_data->tx_match_filter.dlen ||
547*4882a593Smuzhiyun 				tlv_data->tx_match_filter.dlen > MAX_MATCH_FILTER_LEN) {
548*4882a593Smuzhiyun 			tlv_data->tx_match_filter.dlen = 0;
549*4882a593Smuzhiyun 			WL_ERR(("tx match filter length is invalid\n"));
550*4882a593Smuzhiyun 			ret = -EINVAL;
551*4882a593Smuzhiyun 			goto fail;
552*4882a593Smuzhiyun 		}
553*4882a593Smuzhiyun 		tlv_data->tx_match_filter.data =
554*4882a593Smuzhiyun 			MALLOCZ(osh, tlv_data->tx_match_filter.dlen);
555*4882a593Smuzhiyun 		if (!tlv_data->tx_match_filter.data) {
556*4882a593Smuzhiyun 			WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
557*4882a593Smuzhiyun 			tlv_data->tx_match_filter.dlen = 0;
558*4882a593Smuzhiyun 			ret = -ENOMEM;
559*4882a593Smuzhiyun 			goto fail;
560*4882a593Smuzhiyun 		}
561*4882a593Smuzhiyun 		ret = memcpy_s(tlv_data->tx_match_filter.data, tlv_data->tx_match_filter.dlen,
562*4882a593Smuzhiyun 				p_attr, tlv_data->tx_match_filter.dlen);
563*4882a593Smuzhiyun 		if (ret != BCME_OK) {
564*4882a593Smuzhiyun 			WL_ERR(("Failed to copy tx match filter data\n"));
565*4882a593Smuzhiyun 			goto fail;
566*4882a593Smuzhiyun 		}
567*4882a593Smuzhiyun 		/* advance read pointer */
568*4882a593Smuzhiyun 		offset = tlv_data->tx_match_filter.dlen;
569*4882a593Smuzhiyun 		if (offset > len) {
570*4882a593Smuzhiyun 			WL_ERR(("Invalid event buffer\n"));
571*4882a593Smuzhiyun 			ret = BCME_BUFTOOSHORT;
572*4882a593Smuzhiyun 			goto fail;
573*4882a593Smuzhiyun 		}
574*4882a593Smuzhiyun 		p_attr += offset;
575*4882a593Smuzhiyun 		len -= offset;
576*4882a593Smuzhiyun 	}
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun 	/* service response filter */
579*4882a593Smuzhiyun 	if (svc_control & NAN_SC_SR_FILTER_PRESENT) {
580*4882a593Smuzhiyun 		WL_TRACE(("> svc_control: service response filter present\n"));
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun 		tlv_data->rx_match_filter.dlen = *p_attr++;
583*4882a593Smuzhiyun 		WL_TRACE(("> sr match filter len: 0x%02x\n",
584*4882a593Smuzhiyun 				tlv_data->rx_match_filter.dlen));
585*4882a593Smuzhiyun 
586*4882a593Smuzhiyun 		if (!tlv_data->rx_match_filter.dlen ||
587*4882a593Smuzhiyun 				tlv_data->rx_match_filter.dlen > MAX_MATCH_FILTER_LEN) {
588*4882a593Smuzhiyun 			tlv_data->rx_match_filter.dlen = 0;
589*4882a593Smuzhiyun 			WL_ERR(("%s: sr matching filter length is invalid\n",
590*4882a593Smuzhiyun 					__FUNCTION__));
591*4882a593Smuzhiyun 			ret = BCME_BADLEN;
592*4882a593Smuzhiyun 			goto fail;
593*4882a593Smuzhiyun 		}
594*4882a593Smuzhiyun 		tlv_data->rx_match_filter.data =
595*4882a593Smuzhiyun 			MALLOCZ(osh, tlv_data->rx_match_filter.dlen);
596*4882a593Smuzhiyun 		if (!tlv_data->rx_match_filter.data) {
597*4882a593Smuzhiyun 			WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
598*4882a593Smuzhiyun 			tlv_data->rx_match_filter.dlen = 0;
599*4882a593Smuzhiyun 			ret = BCME_NOMEM;
600*4882a593Smuzhiyun 			goto fail;
601*4882a593Smuzhiyun 		}
602*4882a593Smuzhiyun 
603*4882a593Smuzhiyun 		ret = memcpy_s(tlv_data->rx_match_filter.data, tlv_data->rx_match_filter.dlen,
604*4882a593Smuzhiyun 				p_attr, tlv_data->rx_match_filter.dlen);
605*4882a593Smuzhiyun 		if (ret != BCME_OK) {
606*4882a593Smuzhiyun 			WL_ERR(("Failed to copy rx match filter data\n"));
607*4882a593Smuzhiyun 			goto fail;
608*4882a593Smuzhiyun 		}
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun 		/* advance read pointer */
611*4882a593Smuzhiyun 		offset = tlv_data->rx_match_filter.dlen;
612*4882a593Smuzhiyun 		if (offset > len) {
613*4882a593Smuzhiyun 			WL_ERR(("Invalid event buffer len\n"));
614*4882a593Smuzhiyun 			ret = BCME_BUFTOOSHORT;
615*4882a593Smuzhiyun 			goto fail;
616*4882a593Smuzhiyun 		}
617*4882a593Smuzhiyun 		p_attr += offset;
618*4882a593Smuzhiyun 		len -= offset;
619*4882a593Smuzhiyun 	}
620*4882a593Smuzhiyun 
621*4882a593Smuzhiyun 	/* service specific info */
622*4882a593Smuzhiyun 	if (svc_control & NAN_SC_SVC_INFO_PRESENT) {
623*4882a593Smuzhiyun 		WL_TRACE(("> svc_control: svc specific info present\n"));
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun 		tlv_data->svc_info.dlen = *p_attr++;
626*4882a593Smuzhiyun 		WL_TRACE(("> svc info len: 0x%02x\n", tlv_data->svc_info.dlen));
627*4882a593Smuzhiyun 
628*4882a593Smuzhiyun 		if (!tlv_data->svc_info.dlen ||
629*4882a593Smuzhiyun 				tlv_data->svc_info.dlen > NAN_MAX_SERVICE_SPECIFIC_INFO_LEN) {
630*4882a593Smuzhiyun 			/* must be able to handle null msg which is not error */
631*4882a593Smuzhiyun 			tlv_data->svc_info.dlen = 0;
632*4882a593Smuzhiyun 			WL_ERR(("sde data length is invalid\n"));
633*4882a593Smuzhiyun 			ret = BCME_BADLEN;
634*4882a593Smuzhiyun 			goto fail;
635*4882a593Smuzhiyun 		}
636*4882a593Smuzhiyun 
637*4882a593Smuzhiyun 		if (tlv_data->svc_info.dlen > 0) {
638*4882a593Smuzhiyun 			tlv_data->svc_info.data =
639*4882a593Smuzhiyun 				MALLOCZ(osh, tlv_data->svc_info.dlen);
640*4882a593Smuzhiyun 			if (!tlv_data->svc_info.data) {
641*4882a593Smuzhiyun 				WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
642*4882a593Smuzhiyun 				tlv_data->svc_info.dlen = 0;
643*4882a593Smuzhiyun 				ret = BCME_NOMEM;
644*4882a593Smuzhiyun 				goto fail;
645*4882a593Smuzhiyun 			}
646*4882a593Smuzhiyun 			ret = memcpy_s(tlv_data->svc_info.data, tlv_data->svc_info.dlen,
647*4882a593Smuzhiyun 					p_attr, tlv_data->svc_info.dlen);
648*4882a593Smuzhiyun 			if (ret != BCME_OK) {
649*4882a593Smuzhiyun 				WL_ERR(("Failed to copy svc info\n"));
650*4882a593Smuzhiyun 				goto fail;
651*4882a593Smuzhiyun 			}
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun 			/* advance read pointer */
654*4882a593Smuzhiyun 			offset = tlv_data->svc_info.dlen;
655*4882a593Smuzhiyun 			if (offset > len) {
656*4882a593Smuzhiyun 				WL_ERR(("Invalid event buffer len\n"));
657*4882a593Smuzhiyun 				ret = BCME_BUFTOOSHORT;
658*4882a593Smuzhiyun 				goto fail;
659*4882a593Smuzhiyun 			}
660*4882a593Smuzhiyun 			p_attr += offset;
661*4882a593Smuzhiyun 			len -= offset;
662*4882a593Smuzhiyun 		} else {
663*4882a593Smuzhiyun 			/* must be able to handle null msg which is not error */
664*4882a593Smuzhiyun 			tlv_data->svc_info.dlen = 0;
665*4882a593Smuzhiyun 			WL_TRACE(("%s: svc info length is zero, null info data\n",
666*4882a593Smuzhiyun 					__FUNCTION__));
667*4882a593Smuzhiyun 		}
668*4882a593Smuzhiyun 	}
669*4882a593Smuzhiyun 
670*4882a593Smuzhiyun 	/*
671*4882a593Smuzhiyun 	 * discovery range limited:
672*4882a593Smuzhiyun 	 * If set to 1, the pub/sub msg is limited in range to close proximity.
673*4882a593Smuzhiyun 	 * If set to 0, the pub/sub msg is not limited in range.
674*4882a593Smuzhiyun 	 * Valid only when the message is either of a publish or a sub.
675*4882a593Smuzhiyun 	 */
676*4882a593Smuzhiyun 	if (svc_control & NAN_SC_RANGE_LIMITED) {
677*4882a593Smuzhiyun 		if (((svc_control & NAN_SVC_CONTROL_TYPE_MASK) == NAN_SC_PUBLISH) ||
678*4882a593Smuzhiyun 				((svc_control & NAN_SVC_CONTROL_TYPE_MASK) == NAN_SC_SUBSCRIBE)) {
679*4882a593Smuzhiyun 			WL_TRACE(("> svc_control: range limited present\n"));
680*4882a593Smuzhiyun 		} else {
681*4882a593Smuzhiyun 			WL_TRACE(("range limited is only valid on pub or sub\n"));
682*4882a593Smuzhiyun 		}
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun 		/* TODO: send up */
685*4882a593Smuzhiyun 
686*4882a593Smuzhiyun 		/* advance read pointer */
687*4882a593Smuzhiyun 		p_attr++;
688*4882a593Smuzhiyun 	}
689*4882a593Smuzhiyun 	return ret;
690*4882a593Smuzhiyun fail:
691*4882a593Smuzhiyun 	if (tlv_data->tx_match_filter.data) {
692*4882a593Smuzhiyun 		MFREE(osh, tlv_data->tx_match_filter.data,
693*4882a593Smuzhiyun 				tlv_data->tx_match_filter.dlen);
694*4882a593Smuzhiyun 		tlv_data->tx_match_filter.data = NULL;
695*4882a593Smuzhiyun 	}
696*4882a593Smuzhiyun 	if (tlv_data->rx_match_filter.data) {
697*4882a593Smuzhiyun 		MFREE(osh, tlv_data->rx_match_filter.data,
698*4882a593Smuzhiyun 				tlv_data->rx_match_filter.dlen);
699*4882a593Smuzhiyun 		tlv_data->rx_match_filter.data = NULL;
700*4882a593Smuzhiyun 	}
701*4882a593Smuzhiyun 	if (tlv_data->svc_info.data) {
702*4882a593Smuzhiyun 		MFREE(osh, tlv_data->svc_info.data,
703*4882a593Smuzhiyun 				tlv_data->svc_info.dlen);
704*4882a593Smuzhiyun 		tlv_data->svc_info.data = NULL;
705*4882a593Smuzhiyun 	}
706*4882a593Smuzhiyun 
707*4882a593Smuzhiyun 	WL_DBG(("Parse SDA event data, status = %d\n", ret));
708*4882a593Smuzhiyun 	return ret;
709*4882a593Smuzhiyun }
710*4882a593Smuzhiyun 
711*4882a593Smuzhiyun static s32
wl_cfgnan_parse_sd_attr_data(osl_t * osh,uint16 len,const uint8 * data,nan_event_data_t * tlv_data,uint16 type)712*4882a593Smuzhiyun wl_cfgnan_parse_sd_attr_data(osl_t *osh, uint16 len, const uint8 *data,
713*4882a593Smuzhiyun 	nan_event_data_t *tlv_data, uint16 type) {
714*4882a593Smuzhiyun 	const uint8 *p_attr = data;
715*4882a593Smuzhiyun 	uint16 offset = 0;
716*4882a593Smuzhiyun 	s32 ret = BCME_OK;
717*4882a593Smuzhiyun 	const wl_nan_event_disc_result_t *ev_disc = NULL;
718*4882a593Smuzhiyun 	const wl_nan_event_replied_t *ev_replied = NULL;
719*4882a593Smuzhiyun 	const wl_nan_ev_receive_t *ev_fup = NULL;
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun 	/*
722*4882a593Smuzhiyun 	 * Mapping wifi_nan_svc_descriptor_attr_t, and svc controls are optional.
723*4882a593Smuzhiyun 	 */
724*4882a593Smuzhiyun 	if (type == WL_NAN_XTLV_SD_DISC_RESULTS) {
725*4882a593Smuzhiyun 		u8 iter;
726*4882a593Smuzhiyun 		ev_disc = (const wl_nan_event_disc_result_t *)p_attr;
727*4882a593Smuzhiyun 
728*4882a593Smuzhiyun 		WL_DBG((">> WL_NAN_XTLV_RESULTS: Discovery result\n"));
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun 		tlv_data->pub_id = (wl_nan_instance_id_t)ev_disc->pub_id;
731*4882a593Smuzhiyun 		tlv_data->sub_id = (wl_nan_instance_id_t)ev_disc->sub_id;
732*4882a593Smuzhiyun 		tlv_data->publish_rssi = ev_disc->publish_rssi;
733*4882a593Smuzhiyun 		ret = memcpy_s(&tlv_data->remote_nmi, ETHER_ADDR_LEN,
734*4882a593Smuzhiyun 				&ev_disc->pub_mac, ETHER_ADDR_LEN);
735*4882a593Smuzhiyun 		if (ret != BCME_OK) {
736*4882a593Smuzhiyun 			WL_ERR(("Failed to copy remote nmi\n"));
737*4882a593Smuzhiyun 			goto fail;
738*4882a593Smuzhiyun 		}
739*4882a593Smuzhiyun 
740*4882a593Smuzhiyun 		WL_TRACE(("publish id: %d\n", ev_disc->pub_id));
741*4882a593Smuzhiyun 		WL_TRACE(("subscribe d: %d\n", ev_disc->sub_id));
742*4882a593Smuzhiyun 		WL_TRACE(("publish mac addr: " MACDBG "\n",
743*4882a593Smuzhiyun 				MAC2STRDBG(ev_disc->pub_mac.octet)));
744*4882a593Smuzhiyun 		WL_TRACE(("publish rssi: %d\n", (int8)ev_disc->publish_rssi));
745*4882a593Smuzhiyun 		WL_TRACE(("attribute no: %d\n", ev_disc->attr_num));
746*4882a593Smuzhiyun 		WL_TRACE(("attribute len: %d\n", (uint16)ev_disc->attr_list_len));
747*4882a593Smuzhiyun 
748*4882a593Smuzhiyun 		/* advance to the service descricptor */
749*4882a593Smuzhiyun 		offset = OFFSETOF(wl_nan_event_disc_result_t, attr_list[0]);
750*4882a593Smuzhiyun 		if (offset > len) {
751*4882a593Smuzhiyun 			WL_ERR(("Invalid event buffer len\n"));
752*4882a593Smuzhiyun 			ret = BCME_BUFTOOSHORT;
753*4882a593Smuzhiyun 			goto fail;
754*4882a593Smuzhiyun 		}
755*4882a593Smuzhiyun 		p_attr += offset;
756*4882a593Smuzhiyun 		len -= offset;
757*4882a593Smuzhiyun 
758*4882a593Smuzhiyun 		iter = ev_disc->attr_num;
759*4882a593Smuzhiyun 		while (iter) {
760*4882a593Smuzhiyun 			if ((uint8)*p_attr == NAN_ATTR_SVC_DESCRIPTOR) {
761*4882a593Smuzhiyun 				WL_TRACE(("> attr id: 0x%02x\n", (uint8)*p_attr));
762*4882a593Smuzhiyun 				ret = wl_cfgnan_parse_sda_data(osh, p_attr, len, tlv_data);
763*4882a593Smuzhiyun 				if (unlikely(ret)) {
764*4882a593Smuzhiyun 					WL_ERR(("wl_cfgnan_parse_sda_data failed,"
765*4882a593Smuzhiyun 							"error = %d \n", ret));
766*4882a593Smuzhiyun 					goto fail;
767*4882a593Smuzhiyun 				}
768*4882a593Smuzhiyun 			}
769*4882a593Smuzhiyun 
770*4882a593Smuzhiyun 			if ((uint8)*p_attr == NAN_ATTR_SVC_DESC_EXTENSION) {
771*4882a593Smuzhiyun 				WL_TRACE(("> attr id: 0x%02x\n", (uint8)*p_attr));
772*4882a593Smuzhiyun 				ret = wl_cfgnan_parse_sdea_data(osh, p_attr, len, tlv_data);
773*4882a593Smuzhiyun 				if (unlikely(ret)) {
774*4882a593Smuzhiyun 					WL_ERR(("wl_cfgnan_parse_sdea_data failed,"
775*4882a593Smuzhiyun 							"error = %d \n", ret));
776*4882a593Smuzhiyun 					goto fail;
777*4882a593Smuzhiyun 				}
778*4882a593Smuzhiyun 			}
779*4882a593Smuzhiyun 			offset = (sizeof(*p_attr) +
780*4882a593Smuzhiyun 					sizeof(ev_disc->attr_list_len) +
781*4882a593Smuzhiyun 					(p_attr[1] | (p_attr[2] << 8)));
782*4882a593Smuzhiyun 			if (offset > len) {
783*4882a593Smuzhiyun 				WL_ERR(("Invalid event buffer len\n"));
784*4882a593Smuzhiyun 				ret = BCME_BUFTOOSHORT;
785*4882a593Smuzhiyun 				goto fail;
786*4882a593Smuzhiyun 			}
787*4882a593Smuzhiyun 			p_attr += offset;
788*4882a593Smuzhiyun 			len -= offset;
789*4882a593Smuzhiyun 			iter--;
790*4882a593Smuzhiyun 		}
791*4882a593Smuzhiyun 	} else if (type == WL_NAN_XTLV_SD_FUP_RECEIVED) {
792*4882a593Smuzhiyun 		uint8 iter;
793*4882a593Smuzhiyun 		ev_fup = (const wl_nan_ev_receive_t *)p_attr;
794*4882a593Smuzhiyun 
795*4882a593Smuzhiyun 		WL_TRACE((">> WL_NAN_XTLV_SD_FUP_RECEIVED: Transmit follow-up\n"));
796*4882a593Smuzhiyun 
797*4882a593Smuzhiyun 		tlv_data->local_inst_id = (wl_nan_instance_id_t)ev_fup->local_id;
798*4882a593Smuzhiyun 		tlv_data->requestor_id = (wl_nan_instance_id_t)ev_fup->remote_id;
799*4882a593Smuzhiyun 		tlv_data->fup_rssi = ev_fup->fup_rssi;
800*4882a593Smuzhiyun 		ret = memcpy_s(&tlv_data->remote_nmi, ETHER_ADDR_LEN,
801*4882a593Smuzhiyun 				&ev_fup->remote_addr, ETHER_ADDR_LEN);
802*4882a593Smuzhiyun 		if (ret != BCME_OK) {
803*4882a593Smuzhiyun 			WL_ERR(("Failed to copy remote nmi\n"));
804*4882a593Smuzhiyun 			goto fail;
805*4882a593Smuzhiyun 		}
806*4882a593Smuzhiyun 
807*4882a593Smuzhiyun 		WL_TRACE(("local id: %d\n", ev_fup->local_id));
808*4882a593Smuzhiyun 		WL_TRACE(("remote id: %d\n", ev_fup->remote_id));
809*4882a593Smuzhiyun 		WL_TRACE(("peer mac addr: " MACDBG "\n",
810*4882a593Smuzhiyun 				MAC2STRDBG(ev_fup->remote_addr.octet)));
811*4882a593Smuzhiyun 		WL_TRACE(("peer rssi: %d\n", (int8)ev_fup->fup_rssi));
812*4882a593Smuzhiyun 		WL_TRACE(("attribute no: %d\n", ev_fup->attr_num));
813*4882a593Smuzhiyun 		WL_TRACE(("attribute len: %d\n", ev_fup->attr_list_len));
814*4882a593Smuzhiyun 
815*4882a593Smuzhiyun 		/* advance to the service descriptor which is attr_list[0] */
816*4882a593Smuzhiyun 		offset = OFFSETOF(wl_nan_ev_receive_t, attr_list[0]);
817*4882a593Smuzhiyun 		if (offset > len) {
818*4882a593Smuzhiyun 			WL_ERR(("Invalid event buffer len\n"));
819*4882a593Smuzhiyun 			ret = BCME_BUFTOOSHORT;
820*4882a593Smuzhiyun 			goto fail;
821*4882a593Smuzhiyun 		}
822*4882a593Smuzhiyun 		p_attr += offset;
823*4882a593Smuzhiyun 		len -= offset;
824*4882a593Smuzhiyun 
825*4882a593Smuzhiyun 		iter = ev_fup->attr_num;
826*4882a593Smuzhiyun 		while (iter) {
827*4882a593Smuzhiyun 			if ((uint8)*p_attr == NAN_ATTR_SVC_DESCRIPTOR) {
828*4882a593Smuzhiyun 				WL_TRACE(("> attr id: 0x%02x\n", (uint8)*p_attr));
829*4882a593Smuzhiyun 				ret = wl_cfgnan_parse_sda_data(osh, p_attr, len, tlv_data);
830*4882a593Smuzhiyun 				if (unlikely(ret)) {
831*4882a593Smuzhiyun 					WL_ERR(("wl_cfgnan_parse_sda_data failed,"
832*4882a593Smuzhiyun 							"error = %d \n", ret));
833*4882a593Smuzhiyun 					goto fail;
834*4882a593Smuzhiyun 				}
835*4882a593Smuzhiyun 			}
836*4882a593Smuzhiyun 
837*4882a593Smuzhiyun 			if ((uint8)*p_attr == NAN_ATTR_SVC_DESC_EXTENSION) {
838*4882a593Smuzhiyun 				WL_TRACE(("> attr id: 0x%02x\n", (uint8)*p_attr));
839*4882a593Smuzhiyun 				ret = wl_cfgnan_parse_sdea_data(osh, p_attr, len, tlv_data);
840*4882a593Smuzhiyun 				if (unlikely(ret)) {
841*4882a593Smuzhiyun 					WL_ERR(("wl_cfgnan_parse_sdea_data failed,"
842*4882a593Smuzhiyun 							"error = %d \n", ret));
843*4882a593Smuzhiyun 					goto fail;
844*4882a593Smuzhiyun 				}
845*4882a593Smuzhiyun 			}
846*4882a593Smuzhiyun 			offset = (sizeof(*p_attr) +
847*4882a593Smuzhiyun 					sizeof(ev_fup->attr_list_len) +
848*4882a593Smuzhiyun 					(p_attr[1] | (p_attr[2] << 8)));
849*4882a593Smuzhiyun 			if (offset > len) {
850*4882a593Smuzhiyun 				WL_ERR(("Invalid event buffer len\n"));
851*4882a593Smuzhiyun 				ret = BCME_BUFTOOSHORT;
852*4882a593Smuzhiyun 				goto fail;
853*4882a593Smuzhiyun 			}
854*4882a593Smuzhiyun 			p_attr += offset;
855*4882a593Smuzhiyun 			len -= offset;
856*4882a593Smuzhiyun 			iter--;
857*4882a593Smuzhiyun 		}
858*4882a593Smuzhiyun 	} else if (type == WL_NAN_XTLV_SD_SDF_RX) {
859*4882a593Smuzhiyun 		/*
860*4882a593Smuzhiyun 		 * SDF followed by nan2_pub_act_frame_t and wifi_nan_svc_descriptor_attr_t,
861*4882a593Smuzhiyun 		 * and svc controls are optional.
862*4882a593Smuzhiyun 		 */
863*4882a593Smuzhiyun 		const nan2_pub_act_frame_t *nan_pub_af =
864*4882a593Smuzhiyun 			(const nan2_pub_act_frame_t *)p_attr;
865*4882a593Smuzhiyun 
866*4882a593Smuzhiyun 		WL_TRACE((">> WL_NAN_XTLV_SD_SDF_RX\n"));
867*4882a593Smuzhiyun 
868*4882a593Smuzhiyun 		/* nan2_pub_act_frame_t */
869*4882a593Smuzhiyun 		WL_TRACE(("pub category: 0x%02x\n", nan_pub_af->category_id));
870*4882a593Smuzhiyun 		WL_TRACE(("pub action: 0x%02x\n", nan_pub_af->action_field));
871*4882a593Smuzhiyun 		WL_TRACE(("nan oui: %2x-%2x-%2x\n",
872*4882a593Smuzhiyun 				nan_pub_af->oui[0], nan_pub_af->oui[1], nan_pub_af->oui[2]));
873*4882a593Smuzhiyun 		WL_TRACE(("oui type: 0x%02x\n", nan_pub_af->oui_type));
874*4882a593Smuzhiyun 		WL_TRACE(("oui subtype: 0x%02x\n", nan_pub_af->oui_sub_type));
875*4882a593Smuzhiyun 
876*4882a593Smuzhiyun 		offset = sizeof(*nan_pub_af);
877*4882a593Smuzhiyun 		if (offset > len) {
878*4882a593Smuzhiyun 			WL_ERR(("Invalid event buffer len\n"));
879*4882a593Smuzhiyun 			ret = BCME_BUFTOOSHORT;
880*4882a593Smuzhiyun 			goto fail;
881*4882a593Smuzhiyun 		}
882*4882a593Smuzhiyun 		p_attr += offset;
883*4882a593Smuzhiyun 		len -= offset;
884*4882a593Smuzhiyun 	} else if (type == WL_NAN_XTLV_SD_REPLIED) {
885*4882a593Smuzhiyun 		ev_replied = (const wl_nan_event_replied_t *)p_attr;
886*4882a593Smuzhiyun 
887*4882a593Smuzhiyun 		WL_TRACE((">> WL_NAN_XTLV_SD_REPLIED: Replied Event\n"));
888*4882a593Smuzhiyun 
889*4882a593Smuzhiyun 		tlv_data->pub_id = (wl_nan_instance_id_t)ev_replied->pub_id;
890*4882a593Smuzhiyun 		tlv_data->sub_id = (wl_nan_instance_id_t)ev_replied->sub_id;
891*4882a593Smuzhiyun 		tlv_data->sub_rssi = ev_replied->sub_rssi;
892*4882a593Smuzhiyun 		ret = memcpy_s(&tlv_data->remote_nmi, ETHER_ADDR_LEN,
893*4882a593Smuzhiyun 				&ev_replied->sub_mac, ETHER_ADDR_LEN);
894*4882a593Smuzhiyun 		if (ret != BCME_OK) {
895*4882a593Smuzhiyun 			WL_ERR(("Failed to copy remote nmi\n"));
896*4882a593Smuzhiyun 			goto fail;
897*4882a593Smuzhiyun 		}
898*4882a593Smuzhiyun 
899*4882a593Smuzhiyun 		WL_TRACE(("publish id: %d\n", ev_replied->pub_id));
900*4882a593Smuzhiyun 		WL_TRACE(("subscribe d: %d\n", ev_replied->sub_id));
901*4882a593Smuzhiyun 		WL_TRACE(("Subscriber mac addr: " MACDBG "\n",
902*4882a593Smuzhiyun 				MAC2STRDBG(ev_replied->sub_mac.octet)));
903*4882a593Smuzhiyun 		WL_TRACE(("subscribe rssi: %d\n", (int8)ev_replied->sub_rssi));
904*4882a593Smuzhiyun 		WL_TRACE(("attribute no: %d\n", ev_replied->attr_num));
905*4882a593Smuzhiyun 		WL_TRACE(("attribute len: %d\n", (uint16)ev_replied->attr_list_len));
906*4882a593Smuzhiyun 
907*4882a593Smuzhiyun 		/* advance to the service descriptor which is attr_list[0] */
908*4882a593Smuzhiyun 		offset = OFFSETOF(wl_nan_event_replied_t, attr_list[0]);
909*4882a593Smuzhiyun 		if (offset > len) {
910*4882a593Smuzhiyun 			WL_ERR(("Invalid event buffer len\n"));
911*4882a593Smuzhiyun 			ret = BCME_BUFTOOSHORT;
912*4882a593Smuzhiyun 			goto fail;
913*4882a593Smuzhiyun 		}
914*4882a593Smuzhiyun 		p_attr += offset;
915*4882a593Smuzhiyun 		len -= offset;
916*4882a593Smuzhiyun 		ret = wl_cfgnan_parse_sda_data(osh, p_attr, len, tlv_data);
917*4882a593Smuzhiyun 		if (unlikely(ret)) {
918*4882a593Smuzhiyun 			WL_ERR(("wl_cfgnan_parse_sdea_data failed,"
919*4882a593Smuzhiyun 				"error = %d \n", ret));
920*4882a593Smuzhiyun 		}
921*4882a593Smuzhiyun 	}
922*4882a593Smuzhiyun 
923*4882a593Smuzhiyun fail:
924*4882a593Smuzhiyun 	return ret;
925*4882a593Smuzhiyun }
926*4882a593Smuzhiyun 
927*4882a593Smuzhiyun /* Based on each case of tlv type id, fill into tlv data */
928*4882a593Smuzhiyun static int
wl_cfgnan_set_vars_cbfn(void * ctx,const uint8 * data,uint16 type,uint16 len)929*4882a593Smuzhiyun wl_cfgnan_set_vars_cbfn(void *ctx, const uint8 *data, uint16 type, uint16 len)
930*4882a593Smuzhiyun {
931*4882a593Smuzhiyun 	nan_parse_event_ctx_t *ctx_tlv_data = ((nan_parse_event_ctx_t *)(ctx));
932*4882a593Smuzhiyun 	nan_event_data_t *tlv_data = ((nan_event_data_t *)(ctx_tlv_data->nan_evt_data));
933*4882a593Smuzhiyun 	int ret = BCME_OK;
934*4882a593Smuzhiyun 
935*4882a593Smuzhiyun 	NAN_DBG_ENTER();
936*4882a593Smuzhiyun 	if (!data || !len) {
937*4882a593Smuzhiyun 		WL_ERR(("data length is invalid\n"));
938*4882a593Smuzhiyun 		ret = BCME_ERROR;
939*4882a593Smuzhiyun 		goto fail;
940*4882a593Smuzhiyun 	}
941*4882a593Smuzhiyun 
942*4882a593Smuzhiyun 	switch (type) {
943*4882a593Smuzhiyun 	/*
944*4882a593Smuzhiyun 	 * Need to parse service descript attributes including service control,
945*4882a593Smuzhiyun 	 * when Follow up or Discovery result come
946*4882a593Smuzhiyun 	 */
947*4882a593Smuzhiyun 	case WL_NAN_XTLV_SD_FUP_RECEIVED:
948*4882a593Smuzhiyun 	case WL_NAN_XTLV_SD_DISC_RESULTS: {
949*4882a593Smuzhiyun 		ret = wl_cfgnan_parse_sd_attr_data(ctx_tlv_data->cfg->osh,
950*4882a593Smuzhiyun 			len, data, tlv_data, type);
951*4882a593Smuzhiyun 		break;
952*4882a593Smuzhiyun 	}
953*4882a593Smuzhiyun 	case WL_NAN_XTLV_SD_NDPE_TLV_LIST:
954*4882a593Smuzhiyun 		/* Intentional fall through NDPE TLV list and SVC INFO is sent in same container
955*4882a593Smuzhiyun 		 * to upper layers
956*4882a593Smuzhiyun 		 */
957*4882a593Smuzhiyun 	case WL_NAN_XTLV_SD_SVC_INFO: {
958*4882a593Smuzhiyun 		tlv_data->svc_info.data =
959*4882a593Smuzhiyun 			MALLOCZ(ctx_tlv_data->cfg->osh, len);
960*4882a593Smuzhiyun 		if (!tlv_data->svc_info.data) {
961*4882a593Smuzhiyun 			WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
962*4882a593Smuzhiyun 			tlv_data->svc_info.dlen = 0;
963*4882a593Smuzhiyun 			ret = BCME_NOMEM;
964*4882a593Smuzhiyun 			goto fail;
965*4882a593Smuzhiyun 		}
966*4882a593Smuzhiyun 		tlv_data->svc_info.dlen = len;
967*4882a593Smuzhiyun 		ret = memcpy_s(tlv_data->svc_info.data, tlv_data->svc_info.dlen,
968*4882a593Smuzhiyun 				data, tlv_data->svc_info.dlen);
969*4882a593Smuzhiyun 		if (ret != BCME_OK) {
970*4882a593Smuzhiyun 			WL_ERR(("Failed to copy svc info data\n"));
971*4882a593Smuzhiyun 			goto fail;
972*4882a593Smuzhiyun 		}
973*4882a593Smuzhiyun 		break;
974*4882a593Smuzhiyun 	}
975*4882a593Smuzhiyun 	case WL_NAN_XTLV_SD_NAN_AF:
976*4882a593Smuzhiyun 	case WL_NAN_XTLV_DAM_NA_ATTR:
977*4882a593Smuzhiyun 		/* No action -intentionally added to avoid prints when these events are rcvd */
978*4882a593Smuzhiyun 		break;
979*4882a593Smuzhiyun 	default:
980*4882a593Smuzhiyun 		WL_ERR(("Not available for tlv type = 0x%x\n", type));
981*4882a593Smuzhiyun 		ret = BCME_ERROR;
982*4882a593Smuzhiyun 		break;
983*4882a593Smuzhiyun 	}
984*4882a593Smuzhiyun fail:
985*4882a593Smuzhiyun 	NAN_DBG_EXIT();
986*4882a593Smuzhiyun 	return ret;
987*4882a593Smuzhiyun }
988*4882a593Smuzhiyun 
989*4882a593Smuzhiyun int
wl_cfg_nan_check_cmd_len(uint16 nan_iov_len,uint16 data_size,uint16 * subcmd_len)990*4882a593Smuzhiyun wl_cfg_nan_check_cmd_len(uint16 nan_iov_len, uint16 data_size,
991*4882a593Smuzhiyun 		uint16 *subcmd_len)
992*4882a593Smuzhiyun {
993*4882a593Smuzhiyun 	s32 ret = BCME_OK;
994*4882a593Smuzhiyun 
995*4882a593Smuzhiyun 	if (subcmd_len != NULL) {
996*4882a593Smuzhiyun 		*subcmd_len = OFFSETOF(bcm_iov_batch_subcmd_t, data) +
997*4882a593Smuzhiyun 				ALIGN_SIZE(data_size, 4);
998*4882a593Smuzhiyun 		if (*subcmd_len > nan_iov_len) {
999*4882a593Smuzhiyun 			WL_ERR(("%s: Buf short, requested:%d, available:%d\n",
1000*4882a593Smuzhiyun 					__FUNCTION__, *subcmd_len, nan_iov_len));
1001*4882a593Smuzhiyun 			ret = BCME_NOMEM;
1002*4882a593Smuzhiyun 		}
1003*4882a593Smuzhiyun 	} else {
1004*4882a593Smuzhiyun 		WL_ERR(("Invalid subcmd_len\n"));
1005*4882a593Smuzhiyun 		ret = BCME_ERROR;
1006*4882a593Smuzhiyun 	}
1007*4882a593Smuzhiyun 	return ret;
1008*4882a593Smuzhiyun }
1009*4882a593Smuzhiyun 
1010*4882a593Smuzhiyun int
wl_cfgnan_config_eventmask(struct net_device * ndev,struct bcm_cfg80211 * cfg,uint8 event_ind_flag,bool disable_events)1011*4882a593Smuzhiyun wl_cfgnan_config_eventmask(struct net_device *ndev, struct bcm_cfg80211 *cfg,
1012*4882a593Smuzhiyun 	uint8 event_ind_flag, bool disable_events)
1013*4882a593Smuzhiyun {
1014*4882a593Smuzhiyun 	bcm_iov_batch_buf_t *nan_buf = NULL;
1015*4882a593Smuzhiyun 	s32 ret = BCME_OK;
1016*4882a593Smuzhiyun 	uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
1017*4882a593Smuzhiyun 	uint16 subcmd_len;
1018*4882a593Smuzhiyun 	uint32 status;
1019*4882a593Smuzhiyun 	bcm_iov_batch_subcmd_t *sub_cmd = NULL;
1020*4882a593Smuzhiyun 	bcm_iov_batch_subcmd_t *sub_cmd_resp = NULL;
1021*4882a593Smuzhiyun 	uint8 event_mask[WL_NAN_EVMASK_EXTN_LEN];
1022*4882a593Smuzhiyun 	wl_nan_evmask_extn_t *evmask;
1023*4882a593Smuzhiyun 	uint16 evmask_cmd_len;
1024*4882a593Smuzhiyun 	uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
1025*4882a593Smuzhiyun 
1026*4882a593Smuzhiyun 	NAN_DBG_ENTER();
1027*4882a593Smuzhiyun 
1028*4882a593Smuzhiyun 	/* same src and dest len here */
1029*4882a593Smuzhiyun 	bzero(event_mask, sizeof(event_mask));
1030*4882a593Smuzhiyun 	evmask_cmd_len = OFFSETOF(wl_nan_evmask_extn_t, evmask) +
1031*4882a593Smuzhiyun 		sizeof(event_mask);
1032*4882a593Smuzhiyun 	ret = wl_add_remove_eventmsg(ndev, WLC_E_NAN, true);
1033*4882a593Smuzhiyun 	if (unlikely(ret)) {
1034*4882a593Smuzhiyun 		WL_ERR((" nan event enable failed, error = %d \n", ret));
1035*4882a593Smuzhiyun 		goto fail;
1036*4882a593Smuzhiyun 	}
1037*4882a593Smuzhiyun 
1038*4882a593Smuzhiyun 	nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
1039*4882a593Smuzhiyun 	if (!nan_buf) {
1040*4882a593Smuzhiyun 		WL_ERR(("%s: memory allocation failed\n", __func__));
1041*4882a593Smuzhiyun 		ret = BCME_NOMEM;
1042*4882a593Smuzhiyun 		goto fail;
1043*4882a593Smuzhiyun 	}
1044*4882a593Smuzhiyun 
1045*4882a593Smuzhiyun 	nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
1046*4882a593Smuzhiyun 	nan_buf->count = 0;
1047*4882a593Smuzhiyun 	nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
1048*4882a593Smuzhiyun 	sub_cmd = (bcm_iov_batch_subcmd_t*)(uint8 *)(&nan_buf->cmds[0]);
1049*4882a593Smuzhiyun 
1050*4882a593Smuzhiyun 	ret = wl_cfg_nan_check_cmd_len(nan_buf_size,
1051*4882a593Smuzhiyun 			evmask_cmd_len, &subcmd_len);
1052*4882a593Smuzhiyun 	if (unlikely(ret)) {
1053*4882a593Smuzhiyun 		WL_ERR(("nan_sub_cmd check failed\n"));
1054*4882a593Smuzhiyun 		goto fail;
1055*4882a593Smuzhiyun 	}
1056*4882a593Smuzhiyun 
1057*4882a593Smuzhiyun 	sub_cmd->id = htod16(WL_NAN_CMD_CFG_EVENT_MASK);
1058*4882a593Smuzhiyun 	sub_cmd->len = sizeof(sub_cmd->u.options) + evmask_cmd_len;
1059*4882a593Smuzhiyun 	sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
1060*4882a593Smuzhiyun 	evmask = (wl_nan_evmask_extn_t *)sub_cmd->data;
1061*4882a593Smuzhiyun 	evmask->ver = WL_NAN_EVMASK_EXTN_VER;
1062*4882a593Smuzhiyun 	evmask->len = WL_NAN_EVMASK_EXTN_LEN;
1063*4882a593Smuzhiyun 	nan_buf_size -= subcmd_len;
1064*4882a593Smuzhiyun 	nan_buf->count = 1;
1065*4882a593Smuzhiyun 
1066*4882a593Smuzhiyun 	if (disable_events) {
1067*4882a593Smuzhiyun 		WL_DBG(("Disabling all nan events..except stop event\n"));
1068*4882a593Smuzhiyun 		setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_STOP));
1069*4882a593Smuzhiyun 	} else {
1070*4882a593Smuzhiyun 		/*
1071*4882a593Smuzhiyun 		 * Android framework event mask configuration.
1072*4882a593Smuzhiyun 		 */
1073*4882a593Smuzhiyun 		nan_buf->is_set = false;
1074*4882a593Smuzhiyun 		memset(resp_buf, 0, sizeof(resp_buf));
1075*4882a593Smuzhiyun 		ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
1076*4882a593Smuzhiyun 				(void*)resp_buf, NAN_IOCTL_BUF_SIZE);
1077*4882a593Smuzhiyun 		if (unlikely(ret) || unlikely(status)) {
1078*4882a593Smuzhiyun 			WL_ERR(("get nan event mask failed ret %d status %d \n",
1079*4882a593Smuzhiyun 				ret, status));
1080*4882a593Smuzhiyun 			goto fail;
1081*4882a593Smuzhiyun 		}
1082*4882a593Smuzhiyun 		sub_cmd_resp = &((bcm_iov_batch_buf_t *)(resp_buf))->cmds[0];
1083*4882a593Smuzhiyun 		evmask = (wl_nan_evmask_extn_t *)sub_cmd_resp->data;
1084*4882a593Smuzhiyun 
1085*4882a593Smuzhiyun 		/* check the response buff */
1086*4882a593Smuzhiyun 		/* same src and dest len here */
1087*4882a593Smuzhiyun 		(void)memcpy_s(&event_mask, WL_NAN_EVMASK_EXTN_LEN,
1088*4882a593Smuzhiyun 				(uint8*)&evmask->evmask, WL_NAN_EVMASK_EXTN_LEN);
1089*4882a593Smuzhiyun 
1090*4882a593Smuzhiyun 		if (event_ind_flag) {
1091*4882a593Smuzhiyun 			/* FIXME:BIT0 - Disable disc mac addr change event indication */
1092*4882a593Smuzhiyun 			if (CHECK_BIT(event_ind_flag, WL_NAN_EVENT_DIC_MAC_ADDR_BIT)) {
1093*4882a593Smuzhiyun 				WL_DBG(("Need to add disc mac addr change event\n"));
1094*4882a593Smuzhiyun 			}
1095*4882a593Smuzhiyun 			/* BIT2 - Disable nan cluster join indication (OTA). */
1096*4882a593Smuzhiyun 			if (CHECK_BIT(event_ind_flag, WL_NAN_EVENT_JOIN_EVENT)) {
1097*4882a593Smuzhiyun 				clrbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_MERGE));
1098*4882a593Smuzhiyun 			}
1099*4882a593Smuzhiyun 		}
1100*4882a593Smuzhiyun 
1101*4882a593Smuzhiyun 		setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_DISCOVERY_RESULT));
1102*4882a593Smuzhiyun 		setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_RECEIVE));
1103*4882a593Smuzhiyun 		setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_TERMINATED));
1104*4882a593Smuzhiyun 		setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_STOP));
1105*4882a593Smuzhiyun 		setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_TXS));
1106*4882a593Smuzhiyun 		setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_PEER_DATAPATH_IND));
1107*4882a593Smuzhiyun 		setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_DATAPATH_ESTB));
1108*4882a593Smuzhiyun 		setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_DATAPATH_END));
1109*4882a593Smuzhiyun 		setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_RNG_REQ_IND));
1110*4882a593Smuzhiyun 		setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_RNG_TERM_IND));
1111*4882a593Smuzhiyun 		setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_DISC_CACHE_TIMEOUT));
1112*4882a593Smuzhiyun 		/* Disable below events by default */
1113*4882a593Smuzhiyun 		clrbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_PEER_SCHED_UPD_NOTIF));
1114*4882a593Smuzhiyun 		clrbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_RNG_RPT_IND));
1115*4882a593Smuzhiyun 		clrbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_DW_END));
1116*4882a593Smuzhiyun 	}
1117*4882a593Smuzhiyun 
1118*4882a593Smuzhiyun 	nan_buf->is_set = true;
1119*4882a593Smuzhiyun 	evmask = (wl_nan_evmask_extn_t *)sub_cmd->data;
1120*4882a593Smuzhiyun 	/* same src and dest len here */
1121*4882a593Smuzhiyun 	(void)memcpy_s((uint8*)&evmask->evmask, sizeof(event_mask),
1122*4882a593Smuzhiyun 		&event_mask, sizeof(event_mask));
1123*4882a593Smuzhiyun 
1124*4882a593Smuzhiyun 	nan_buf_size = (NAN_IOCTL_BUF_SIZE - nan_buf_size);
1125*4882a593Smuzhiyun 	ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
1126*4882a593Smuzhiyun 			(void*)resp_buf, NAN_IOCTL_BUF_SIZE);
1127*4882a593Smuzhiyun 	if (unlikely(ret) || unlikely(status)) {
1128*4882a593Smuzhiyun 		WL_ERR(("set nan event mask failed ret %d status %d \n", ret, status));
1129*4882a593Smuzhiyun 		goto fail;
1130*4882a593Smuzhiyun 	}
1131*4882a593Smuzhiyun 	WL_DBG(("set nan event mask successfull\n"));
1132*4882a593Smuzhiyun 
1133*4882a593Smuzhiyun fail:
1134*4882a593Smuzhiyun 	if (nan_buf) {
1135*4882a593Smuzhiyun 		MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
1136*4882a593Smuzhiyun 	}
1137*4882a593Smuzhiyun 	NAN_DBG_EXIT();
1138*4882a593Smuzhiyun 	return ret;
1139*4882a593Smuzhiyun }
1140*4882a593Smuzhiyun 
1141*4882a593Smuzhiyun static int
wl_cfgnan_set_nan_avail(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_avail_cmd_data * cmd_data,uint8 avail_type)1142*4882a593Smuzhiyun wl_cfgnan_set_nan_avail(struct net_device *ndev,
1143*4882a593Smuzhiyun 		struct bcm_cfg80211 *cfg, nan_avail_cmd_data *cmd_data, uint8 avail_type)
1144*4882a593Smuzhiyun {
1145*4882a593Smuzhiyun 	bcm_iov_batch_buf_t *nan_buf = NULL;
1146*4882a593Smuzhiyun 	s32 ret = BCME_OK;
1147*4882a593Smuzhiyun 	uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
1148*4882a593Smuzhiyun 	uint16 subcmd_len;
1149*4882a593Smuzhiyun 	bcm_iov_batch_subcmd_t *sub_cmd = NULL;
1150*4882a593Smuzhiyun 	wl_nan_iov_t *nan_iov_data = NULL;
1151*4882a593Smuzhiyun 	wl_avail_t *avail = NULL;
1152*4882a593Smuzhiyun 	wl_avail_entry_t *entry;	/* used for filling entry structure */
1153*4882a593Smuzhiyun 	uint8 *p;	/* tracking pointer */
1154*4882a593Smuzhiyun 	uint8 i;
1155*4882a593Smuzhiyun 	u32 status;
1156*4882a593Smuzhiyun 	int c;
1157*4882a593Smuzhiyun 	char ndc_id[ETHER_ADDR_LEN] = { 0x50, 0x6f, 0x9a, 0x01, 0x0, 0x0 };
1158*4882a593Smuzhiyun 	dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
1159*4882a593Smuzhiyun 	char *a = WL_AVAIL_BIT_MAP;
1160*4882a593Smuzhiyun 	uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
1161*4882a593Smuzhiyun 
1162*4882a593Smuzhiyun 	NAN_DBG_ENTER();
1163*4882a593Smuzhiyun 
1164*4882a593Smuzhiyun 	/* Do not disturb avail if dam is supported */
1165*4882a593Smuzhiyun 	if (FW_SUPPORTED(dhdp, autodam)) {
1166*4882a593Smuzhiyun 		WL_DBG(("DAM is supported, avail modification not allowed\n"));
1167*4882a593Smuzhiyun 		return ret;
1168*4882a593Smuzhiyun 	}
1169*4882a593Smuzhiyun 
1170*4882a593Smuzhiyun 	if (avail_type < WL_AVAIL_LOCAL || avail_type > WL_AVAIL_TYPE_MAX) {
1171*4882a593Smuzhiyun 		WL_ERR(("Invalid availability type\n"));
1172*4882a593Smuzhiyun 		ret = BCME_USAGE_ERROR;
1173*4882a593Smuzhiyun 		goto fail;
1174*4882a593Smuzhiyun 	}
1175*4882a593Smuzhiyun 
1176*4882a593Smuzhiyun 	nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
1177*4882a593Smuzhiyun 	if (!nan_buf) {
1178*4882a593Smuzhiyun 		WL_ERR(("%s: memory allocation failed\n", __func__));
1179*4882a593Smuzhiyun 		ret = BCME_NOMEM;
1180*4882a593Smuzhiyun 		goto fail;
1181*4882a593Smuzhiyun 	}
1182*4882a593Smuzhiyun 
1183*4882a593Smuzhiyun 	nan_iov_data = MALLOCZ(cfg->osh, sizeof(*nan_iov_data));
1184*4882a593Smuzhiyun 	if (!nan_iov_data) {
1185*4882a593Smuzhiyun 		WL_ERR(("%s: memory allocation failed\n", __func__));
1186*4882a593Smuzhiyun 		ret = BCME_NOMEM;
1187*4882a593Smuzhiyun 		goto fail;
1188*4882a593Smuzhiyun 	}
1189*4882a593Smuzhiyun 
1190*4882a593Smuzhiyun 	nan_iov_data->nan_iov_len = NAN_IOCTL_BUF_SIZE;
1191*4882a593Smuzhiyun 	nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
1192*4882a593Smuzhiyun 	nan_buf->count = 0;
1193*4882a593Smuzhiyun 	nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
1194*4882a593Smuzhiyun 	nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
1195*4882a593Smuzhiyun 
1196*4882a593Smuzhiyun 	sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
1197*4882a593Smuzhiyun 	ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
1198*4882a593Smuzhiyun 			sizeof(*avail), &subcmd_len);
1199*4882a593Smuzhiyun 	if (unlikely(ret)) {
1200*4882a593Smuzhiyun 		WL_ERR(("nan_sub_cmd check failed\n"));
1201*4882a593Smuzhiyun 		goto fail;
1202*4882a593Smuzhiyun 	}
1203*4882a593Smuzhiyun 	avail = (wl_avail_t *)sub_cmd->data;
1204*4882a593Smuzhiyun 
1205*4882a593Smuzhiyun 	/* populate wl_avail_type */
1206*4882a593Smuzhiyun 	avail->flags = avail_type;
1207*4882a593Smuzhiyun 	if (avail_type == WL_AVAIL_RANGING) {
1208*4882a593Smuzhiyun 		ret = memcpy_s(&avail->addr, ETHER_ADDR_LEN,
1209*4882a593Smuzhiyun 			&cmd_data->peer_nmi, ETHER_ADDR_LEN);
1210*4882a593Smuzhiyun 		if (ret != BCME_OK) {
1211*4882a593Smuzhiyun 			WL_ERR(("Failed to copy peer nmi\n"));
1212*4882a593Smuzhiyun 			goto fail;
1213*4882a593Smuzhiyun 		}
1214*4882a593Smuzhiyun 	}
1215*4882a593Smuzhiyun 
1216*4882a593Smuzhiyun 	sub_cmd->len = sizeof(sub_cmd->u.options) + subcmd_len;
1217*4882a593Smuzhiyun 	sub_cmd->id = htod16(WL_NAN_CMD_CFG_AVAIL);
1218*4882a593Smuzhiyun 	sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
1219*4882a593Smuzhiyun 
1220*4882a593Smuzhiyun 	nan_buf->is_set = false;
1221*4882a593Smuzhiyun 	nan_buf->count++;
1222*4882a593Smuzhiyun 	nan_iov_data->nan_iov_len -= subcmd_len;
1223*4882a593Smuzhiyun 	nan_buf_size = (NAN_IOCTL_BUF_SIZE - nan_iov_data->nan_iov_len);
1224*4882a593Smuzhiyun 
1225*4882a593Smuzhiyun 	WL_TRACE(("Read wl nan avail status\n"));
1226*4882a593Smuzhiyun 
1227*4882a593Smuzhiyun 	bzero(resp_buf, sizeof(resp_buf));
1228*4882a593Smuzhiyun 	ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
1229*4882a593Smuzhiyun 			(void*)resp_buf, NAN_IOCTL_BUF_SIZE);
1230*4882a593Smuzhiyun 	if (unlikely(ret)) {
1231*4882a593Smuzhiyun 		WL_ERR(("\n Get nan avail failed ret %d, status %d \n", ret, status));
1232*4882a593Smuzhiyun 		goto fail;
1233*4882a593Smuzhiyun 	}
1234*4882a593Smuzhiyun 
1235*4882a593Smuzhiyun 	if (status == BCME_NOTFOUND) {
1236*4882a593Smuzhiyun 		nan_buf->count = 0;
1237*4882a593Smuzhiyun 		nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
1238*4882a593Smuzhiyun 		nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
1239*4882a593Smuzhiyun 
1240*4882a593Smuzhiyun 		sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
1241*4882a593Smuzhiyun 
1242*4882a593Smuzhiyun 		avail = (wl_avail_t *)sub_cmd->data;
1243*4882a593Smuzhiyun 		p = avail->entry;
1244*4882a593Smuzhiyun 
1245*4882a593Smuzhiyun 		/* populate wl_avail fields */
1246*4882a593Smuzhiyun 		avail->length = OFFSETOF(wl_avail_t, entry);
1247*4882a593Smuzhiyun 		avail->flags = avail_type;
1248*4882a593Smuzhiyun 		avail->num_entries = 0;
1249*4882a593Smuzhiyun 		avail->id = 0;
1250*4882a593Smuzhiyun 		entry = (wl_avail_entry_t*)p;
1251*4882a593Smuzhiyun 		entry->flags = WL_AVAIL_ENTRY_COM;
1252*4882a593Smuzhiyun 
1253*4882a593Smuzhiyun 		/* set default values for optional parameters */
1254*4882a593Smuzhiyun 		entry->start_offset = 0;
1255*4882a593Smuzhiyun 		entry->u.band = 0;
1256*4882a593Smuzhiyun 
1257*4882a593Smuzhiyun 		if (cmd_data->avail_period) {
1258*4882a593Smuzhiyun 			entry->period = cmd_data->avail_period;
1259*4882a593Smuzhiyun 		} else {
1260*4882a593Smuzhiyun 			entry->period = WL_AVAIL_PERIOD_1024;
1261*4882a593Smuzhiyun 		}
1262*4882a593Smuzhiyun 
1263*4882a593Smuzhiyun 		if (cmd_data->duration != NAN_BAND_INVALID) {
1264*4882a593Smuzhiyun 			entry->flags |= (3 << WL_AVAIL_ENTRY_USAGE_SHIFT) |
1265*4882a593Smuzhiyun 				(cmd_data->duration << WL_AVAIL_ENTRY_BIT_DUR_SHIFT);
1266*4882a593Smuzhiyun 		} else {
1267*4882a593Smuzhiyun 			entry->flags |= (3 << WL_AVAIL_ENTRY_USAGE_SHIFT) |
1268*4882a593Smuzhiyun 				(WL_AVAIL_BIT_DUR_16 << WL_AVAIL_ENTRY_BIT_DUR_SHIFT);
1269*4882a593Smuzhiyun 		}
1270*4882a593Smuzhiyun 		entry->bitmap_len = 0;
1271*4882a593Smuzhiyun 
1272*4882a593Smuzhiyun 		if (avail_type == WL_AVAIL_LOCAL) {
1273*4882a593Smuzhiyun 			entry->flags |= 1 << WL_AVAIL_ENTRY_CHAN_SHIFT;
1274*4882a593Smuzhiyun 			/* Check for 5g support, based on that choose 5g channel */
1275*4882a593Smuzhiyun 			if (cfg->nancfg->support_5g) {
1276*4882a593Smuzhiyun 				entry->u.channel_info =
1277*4882a593Smuzhiyun 					htod32(wf_channel2chspec(WL_AVAIL_CHANNEL_5G,
1278*4882a593Smuzhiyun 						WL_AVAIL_BANDWIDTH_5G));
1279*4882a593Smuzhiyun 			} else {
1280*4882a593Smuzhiyun 				entry->u.channel_info =
1281*4882a593Smuzhiyun 					htod32(wf_channel2chspec(WL_AVAIL_CHANNEL_2G,
1282*4882a593Smuzhiyun 						WL_AVAIL_BANDWIDTH_2G));
1283*4882a593Smuzhiyun 			}
1284*4882a593Smuzhiyun 			entry->flags = htod16(entry->flags);
1285*4882a593Smuzhiyun 		}
1286*4882a593Smuzhiyun 
1287*4882a593Smuzhiyun 		if (cfg->nancfg->support_5g) {
1288*4882a593Smuzhiyun 			a = WL_5G_AVAIL_BIT_MAP;
1289*4882a593Smuzhiyun 		}
1290*4882a593Smuzhiyun 
1291*4882a593Smuzhiyun 		/* point to bitmap value for processing */
1292*4882a593Smuzhiyun 		if (cmd_data->bmap) {
1293*4882a593Smuzhiyun 			for (c = (WL_NAN_EVENT_CLEAR_BIT-1); c >= 0; c--) {
1294*4882a593Smuzhiyun 				i = cmd_data->bmap >> c;
1295*4882a593Smuzhiyun 				if (i & 1) {
1296*4882a593Smuzhiyun 					setbit(entry->bitmap, (WL_NAN_EVENT_CLEAR_BIT-c-1));
1297*4882a593Smuzhiyun 				}
1298*4882a593Smuzhiyun 			}
1299*4882a593Smuzhiyun 		} else {
1300*4882a593Smuzhiyun 			for (i = 0; i < strlen(WL_AVAIL_BIT_MAP); i++) {
1301*4882a593Smuzhiyun 				if (*a == '1') {
1302*4882a593Smuzhiyun 					setbit(entry->bitmap, i);
1303*4882a593Smuzhiyun 				}
1304*4882a593Smuzhiyun 				a++;
1305*4882a593Smuzhiyun 			}
1306*4882a593Smuzhiyun 		}
1307*4882a593Smuzhiyun 
1308*4882a593Smuzhiyun 		/* account for partially filled most significant byte */
1309*4882a593Smuzhiyun 		entry->bitmap_len = ((WL_NAN_EVENT_CLEAR_BIT) + NBBY - 1) / NBBY;
1310*4882a593Smuzhiyun 		if (avail_type == WL_AVAIL_NDC) {
1311*4882a593Smuzhiyun 			ret = memcpy_s(&avail->addr, ETHER_ADDR_LEN,
1312*4882a593Smuzhiyun 					ndc_id, ETHER_ADDR_LEN);
1313*4882a593Smuzhiyun 			if (ret != BCME_OK) {
1314*4882a593Smuzhiyun 				WL_ERR(("Failed to copy ndc id\n"));
1315*4882a593Smuzhiyun 				goto fail;
1316*4882a593Smuzhiyun 			}
1317*4882a593Smuzhiyun 		} else if (avail_type == WL_AVAIL_RANGING) {
1318*4882a593Smuzhiyun 			ret = memcpy_s(&avail->addr, ETHER_ADDR_LEN,
1319*4882a593Smuzhiyun 					&cmd_data->peer_nmi, ETHER_ADDR_LEN);
1320*4882a593Smuzhiyun 			if (ret != BCME_OK) {
1321*4882a593Smuzhiyun 				WL_ERR(("Failed to copy peer nmi\n"));
1322*4882a593Smuzhiyun 				goto fail;
1323*4882a593Smuzhiyun 			}
1324*4882a593Smuzhiyun 		}
1325*4882a593Smuzhiyun 		/* account for partially filled most significant byte */
1326*4882a593Smuzhiyun 
1327*4882a593Smuzhiyun 		/* update wl_avail and populate wl_avail_entry */
1328*4882a593Smuzhiyun 		entry->length = OFFSETOF(wl_avail_entry_t, bitmap) + entry->bitmap_len;
1329*4882a593Smuzhiyun 		avail->num_entries++;
1330*4882a593Smuzhiyun 		avail->length += entry->length;
1331*4882a593Smuzhiyun 		/* advance pointer for next entry */
1332*4882a593Smuzhiyun 		p += entry->length;
1333*4882a593Smuzhiyun 
1334*4882a593Smuzhiyun 		/* convert to dongle endianness */
1335*4882a593Smuzhiyun 		entry->length = htod16(entry->length);
1336*4882a593Smuzhiyun 		entry->start_offset = htod16(entry->start_offset);
1337*4882a593Smuzhiyun 		entry->u.channel_info = htod32(entry->u.channel_info);
1338*4882a593Smuzhiyun 		entry->flags = htod16(entry->flags);
1339*4882a593Smuzhiyun 		/* update avail_len only if
1340*4882a593Smuzhiyun 		 * there are avail entries
1341*4882a593Smuzhiyun 		 */
1342*4882a593Smuzhiyun 		if (avail->num_entries) {
1343*4882a593Smuzhiyun 			nan_iov_data->nan_iov_len -= avail->length;
1344*4882a593Smuzhiyun 			avail->length = htod16(avail->length);
1345*4882a593Smuzhiyun 			avail->flags = htod16(avail->flags);
1346*4882a593Smuzhiyun 		}
1347*4882a593Smuzhiyun 		avail->length = htod16(avail->length);
1348*4882a593Smuzhiyun 
1349*4882a593Smuzhiyun 		sub_cmd->id = htod16(WL_NAN_CMD_CFG_AVAIL);
1350*4882a593Smuzhiyun 		sub_cmd->len = sizeof(sub_cmd->u.options) + avail->length;
1351*4882a593Smuzhiyun 		sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
1352*4882a593Smuzhiyun 
1353*4882a593Smuzhiyun 		nan_buf->is_set = true;
1354*4882a593Smuzhiyun 		nan_buf->count++;
1355*4882a593Smuzhiyun 
1356*4882a593Smuzhiyun 		/* Reduce the iov_len size by subcmd_len */
1357*4882a593Smuzhiyun 		nan_iov_data->nan_iov_len -= subcmd_len;
1358*4882a593Smuzhiyun 		nan_buf_size = (NAN_IOCTL_BUF_SIZE - nan_iov_data->nan_iov_len);
1359*4882a593Smuzhiyun 
1360*4882a593Smuzhiyun 		ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
1361*4882a593Smuzhiyun 				(void*)resp_buf, NAN_IOCTL_BUF_SIZE);
1362*4882a593Smuzhiyun 		if (unlikely(ret) || unlikely(status)) {
1363*4882a593Smuzhiyun 			WL_ERR(("\n set nan avail failed ret %d status %d \n", ret, status));
1364*4882a593Smuzhiyun 			ret = status;
1365*4882a593Smuzhiyun 			goto fail;
1366*4882a593Smuzhiyun 		}
1367*4882a593Smuzhiyun 	} else if (status == BCME_OK) {
1368*4882a593Smuzhiyun 		WL_DBG(("Avail type [%d] found to be configured\n", avail_type));
1369*4882a593Smuzhiyun 	} else {
1370*4882a593Smuzhiyun 		WL_ERR(("set nan avail failed ret %d status %d \n", ret, status));
1371*4882a593Smuzhiyun 	}
1372*4882a593Smuzhiyun 
1373*4882a593Smuzhiyun fail:
1374*4882a593Smuzhiyun 	if (nan_buf) {
1375*4882a593Smuzhiyun 		MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
1376*4882a593Smuzhiyun 	}
1377*4882a593Smuzhiyun 	if (nan_iov_data) {
1378*4882a593Smuzhiyun 		MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
1379*4882a593Smuzhiyun 	}
1380*4882a593Smuzhiyun 
1381*4882a593Smuzhiyun 	NAN_DBG_EXIT();
1382*4882a593Smuzhiyun 	return ret;
1383*4882a593Smuzhiyun }
1384*4882a593Smuzhiyun 
1385*4882a593Smuzhiyun /* API to configure nan ctrl and nan ctrl2 commands */
1386*4882a593Smuzhiyun static int
wl_cfgnan_config_control_flag(struct net_device * ndev,struct bcm_cfg80211 * cfg,uint32 flag1,uint32 flag2,uint16 cmd_id,uint32 * status,bool set)1387*4882a593Smuzhiyun wl_cfgnan_config_control_flag(struct net_device *ndev, struct bcm_cfg80211 *cfg,
1388*4882a593Smuzhiyun 	uint32 flag1, uint32 flag2, uint16 cmd_id, uint32 *status, bool set)
1389*4882a593Smuzhiyun {
1390*4882a593Smuzhiyun 	bcm_iov_batch_buf_t *nan_buf = NULL;
1391*4882a593Smuzhiyun 	s32 ret = BCME_OK;
1392*4882a593Smuzhiyun 	uint16 nan_iov_start, nan_iov_end;
1393*4882a593Smuzhiyun 	uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
1394*4882a593Smuzhiyun 	uint16 subcmd_len;
1395*4882a593Smuzhiyun 	bcm_iov_batch_subcmd_t *sub_cmd = NULL;
1396*4882a593Smuzhiyun 	bcm_iov_batch_subcmd_t *sub_cmd_resp = NULL;
1397*4882a593Smuzhiyun 	wl_nan_iov_t *nan_iov_data = NULL;
1398*4882a593Smuzhiyun 	uint32 *cfg_ctrl;
1399*4882a593Smuzhiyun 	uint16 cfg_ctrl_size;
1400*4882a593Smuzhiyun 	uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
1401*4882a593Smuzhiyun 
1402*4882a593Smuzhiyun 	NAN_DBG_ENTER();
1403*4882a593Smuzhiyun 	nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
1404*4882a593Smuzhiyun 	if (!nan_buf) {
1405*4882a593Smuzhiyun 		WL_ERR(("%s: memory allocation failed\n", __func__));
1406*4882a593Smuzhiyun 		ret = BCME_NOMEM;
1407*4882a593Smuzhiyun 		goto fail;
1408*4882a593Smuzhiyun 	}
1409*4882a593Smuzhiyun 
1410*4882a593Smuzhiyun 	nan_iov_data = MALLOCZ(cfg->osh, sizeof(*nan_iov_data));
1411*4882a593Smuzhiyun 	if (!nan_iov_data) {
1412*4882a593Smuzhiyun 		WL_ERR(("%s: memory allocation failed\n", __func__));
1413*4882a593Smuzhiyun 		ret = BCME_NOMEM;
1414*4882a593Smuzhiyun 		goto fail;
1415*4882a593Smuzhiyun 	}
1416*4882a593Smuzhiyun 
1417*4882a593Smuzhiyun 	if (cmd_id == WL_NAN_CMD_CFG_NAN_CONFIG) {
1418*4882a593Smuzhiyun 		cfg_ctrl_size = sizeof(wl_nan_cfg_ctrl_t);
1419*4882a593Smuzhiyun 	} else if (cmd_id == WL_NAN_CMD_CFG_NAN_CONFIG2) {
1420*4882a593Smuzhiyun 		cfg_ctrl_size = sizeof(wl_nan_cfg_ctrl2_t);
1421*4882a593Smuzhiyun 	} else {
1422*4882a593Smuzhiyun 		ret = BCME_BADARG;
1423*4882a593Smuzhiyun 		goto fail;
1424*4882a593Smuzhiyun 	}
1425*4882a593Smuzhiyun 
1426*4882a593Smuzhiyun 	nan_iov_data->nan_iov_len = nan_iov_start = NAN_IOCTL_BUF_SIZE;
1427*4882a593Smuzhiyun 	nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
1428*4882a593Smuzhiyun 	nan_buf->count = 0;
1429*4882a593Smuzhiyun 	nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
1430*4882a593Smuzhiyun 	nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
1431*4882a593Smuzhiyun 	sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
1432*4882a593Smuzhiyun 
1433*4882a593Smuzhiyun 	ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
1434*4882a593Smuzhiyun 			cfg_ctrl_size, &subcmd_len);
1435*4882a593Smuzhiyun 	if (unlikely(ret)) {
1436*4882a593Smuzhiyun 		WL_ERR(("nan_sub_cmd check failed\n"));
1437*4882a593Smuzhiyun 		goto fail;
1438*4882a593Smuzhiyun 	}
1439*4882a593Smuzhiyun 
1440*4882a593Smuzhiyun 	sub_cmd->id = htod16(cmd_id);
1441*4882a593Smuzhiyun 	sub_cmd->len = sizeof(sub_cmd->u.options) + cfg_ctrl_size;
1442*4882a593Smuzhiyun 	sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
1443*4882a593Smuzhiyun 
1444*4882a593Smuzhiyun 	nan_buf->is_set = false;
1445*4882a593Smuzhiyun 	nan_buf->count++;
1446*4882a593Smuzhiyun 
1447*4882a593Smuzhiyun 	/* Reduce the iov_len size by subcmd_len */
1448*4882a593Smuzhiyun 	nan_iov_data->nan_iov_len -= subcmd_len;
1449*4882a593Smuzhiyun 	nan_iov_end = nan_iov_data->nan_iov_len;
1450*4882a593Smuzhiyun 	nan_buf_size = (nan_iov_start - nan_iov_end);
1451*4882a593Smuzhiyun 
1452*4882a593Smuzhiyun 	bzero(resp_buf, sizeof(resp_buf));
1453*4882a593Smuzhiyun 	ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, status,
1454*4882a593Smuzhiyun 			(void*)resp_buf, NAN_IOCTL_BUF_SIZE);
1455*4882a593Smuzhiyun 	if (unlikely(ret) || unlikely(*status)) {
1456*4882a593Smuzhiyun 		WL_ERR(("get nan cfg ctrl failed ret %d status %d \n", ret, *status));
1457*4882a593Smuzhiyun 		goto fail;
1458*4882a593Smuzhiyun 	}
1459*4882a593Smuzhiyun 	sub_cmd_resp = &((bcm_iov_batch_buf_t *)(resp_buf))->cmds[0];
1460*4882a593Smuzhiyun 
1461*4882a593Smuzhiyun 	/* check the response buff */
1462*4882a593Smuzhiyun 	if (cmd_id == WL_NAN_CMD_CFG_NAN_CONFIG) {
1463*4882a593Smuzhiyun 		wl_nan_cfg_ctrl_t *cfg_ctrl1;
1464*4882a593Smuzhiyun 		cfg_ctrl1 = ((uint32 *)&sub_cmd_resp->data[0]);
1465*4882a593Smuzhiyun 		if (set) {
1466*4882a593Smuzhiyun 			*cfg_ctrl1 |= flag1;
1467*4882a593Smuzhiyun 		} else {
1468*4882a593Smuzhiyun 			*cfg_ctrl1 &= ~flag1;
1469*4882a593Smuzhiyun 		}
1470*4882a593Smuzhiyun 		cfg_ctrl = cfg_ctrl1;
1471*4882a593Smuzhiyun 		WL_INFORM_MEM(("%s: Modifying nan ctrl flag %x val %d\n",
1472*4882a593Smuzhiyun 				__FUNCTION__, flag1, set));
1473*4882a593Smuzhiyun 	} else {
1474*4882a593Smuzhiyun 		wl_nan_cfg_ctrl2_t *cfg_ctrl2;
1475*4882a593Smuzhiyun 		cfg_ctrl2 = ((wl_nan_cfg_ctrl2_t *)&sub_cmd_resp->data[0]);
1476*4882a593Smuzhiyun 		if (set) {
1477*4882a593Smuzhiyun 			cfg_ctrl2->flags1 |= flag1;
1478*4882a593Smuzhiyun 			cfg_ctrl2->flags2 |= flag2;
1479*4882a593Smuzhiyun 		} else {
1480*4882a593Smuzhiyun 			cfg_ctrl2->flags1 &= ~flag1;
1481*4882a593Smuzhiyun 			cfg_ctrl2->flags2 &= ~flag2;
1482*4882a593Smuzhiyun 		}
1483*4882a593Smuzhiyun 		cfg_ctrl = (uint32 *)cfg_ctrl2;
1484*4882a593Smuzhiyun 		WL_INFORM_MEM(("%s: Modifying nan ctrl2 flag1 %x flag2 %x val %d\n",
1485*4882a593Smuzhiyun 				__FUNCTION__, flag1, flag2, set));
1486*4882a593Smuzhiyun 	}
1487*4882a593Smuzhiyun 	ret = memcpy_s(sub_cmd->data, cfg_ctrl_size, cfg_ctrl, cfg_ctrl_size);
1488*4882a593Smuzhiyun 	if (ret != BCME_OK) {
1489*4882a593Smuzhiyun 		WL_ERR(("Failed to copy cfg ctrl\n"));
1490*4882a593Smuzhiyun 		goto fail;
1491*4882a593Smuzhiyun 	}
1492*4882a593Smuzhiyun 
1493*4882a593Smuzhiyun 	nan_buf->is_set = true;
1494*4882a593Smuzhiyun 	ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, status,
1495*4882a593Smuzhiyun 			(void*)resp_buf, NAN_IOCTL_BUF_SIZE);
1496*4882a593Smuzhiyun 	if (unlikely(ret) || unlikely(*status)) {
1497*4882a593Smuzhiyun 		WL_ERR(("set nan cfg ctrl failed ret %d status %d \n", ret, *status));
1498*4882a593Smuzhiyun 		goto fail;
1499*4882a593Smuzhiyun 	}
1500*4882a593Smuzhiyun 	WL_DBG(("set nan cfg ctrl successfull\n"));
1501*4882a593Smuzhiyun fail:
1502*4882a593Smuzhiyun 	if (nan_buf) {
1503*4882a593Smuzhiyun 		MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
1504*4882a593Smuzhiyun 	}
1505*4882a593Smuzhiyun 	if (nan_iov_data) {
1506*4882a593Smuzhiyun 		MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
1507*4882a593Smuzhiyun 	}
1508*4882a593Smuzhiyun 
1509*4882a593Smuzhiyun 	NAN_DBG_EXIT();
1510*4882a593Smuzhiyun 	return ret;
1511*4882a593Smuzhiyun }
1512*4882a593Smuzhiyun 
1513*4882a593Smuzhiyun static int
wl_cfgnan_get_iovars_status(void * ctx,const uint8 * data,uint16 type,uint16 len)1514*4882a593Smuzhiyun wl_cfgnan_get_iovars_status(void *ctx, const uint8 *data, uint16 type, uint16 len)
1515*4882a593Smuzhiyun {
1516*4882a593Smuzhiyun 	bcm_iov_batch_buf_t *b_resp = (bcm_iov_batch_buf_t *)ctx;
1517*4882a593Smuzhiyun 	uint32 status;
1518*4882a593Smuzhiyun 	/* if all tlvs are parsed, we should not be here */
1519*4882a593Smuzhiyun 	if (b_resp->count == 0) {
1520*4882a593Smuzhiyun 		return BCME_BADLEN;
1521*4882a593Smuzhiyun 	}
1522*4882a593Smuzhiyun 
1523*4882a593Smuzhiyun 	/*  cbfn params may be used in f/w */
1524*4882a593Smuzhiyun 	if (len < sizeof(status)) {
1525*4882a593Smuzhiyun 		return BCME_BUFTOOSHORT;
1526*4882a593Smuzhiyun 	}
1527*4882a593Smuzhiyun 
1528*4882a593Smuzhiyun 	/* first 4 bytes consists status */
1529*4882a593Smuzhiyun 	if (memcpy_s(&status, sizeof(status),
1530*4882a593Smuzhiyun 			data, sizeof(uint32)) != BCME_OK) {
1531*4882a593Smuzhiyun 		WL_ERR(("Failed to copy status\n"));
1532*4882a593Smuzhiyun 		goto exit;
1533*4882a593Smuzhiyun 	}
1534*4882a593Smuzhiyun 
1535*4882a593Smuzhiyun 	status = dtoh32(status);
1536*4882a593Smuzhiyun 
1537*4882a593Smuzhiyun 	/* If status is non zero */
1538*4882a593Smuzhiyun 	if (status != BCME_OK) {
1539*4882a593Smuzhiyun 		printf("cmd type %d failed, status: %04x\n", type, status);
1540*4882a593Smuzhiyun 		goto exit;
1541*4882a593Smuzhiyun 	}
1542*4882a593Smuzhiyun 
1543*4882a593Smuzhiyun 	if (b_resp->count > 0) {
1544*4882a593Smuzhiyun 		b_resp->count--;
1545*4882a593Smuzhiyun 	}
1546*4882a593Smuzhiyun 
1547*4882a593Smuzhiyun 	if (!b_resp->count) {
1548*4882a593Smuzhiyun 		status = BCME_IOV_LAST_CMD;
1549*4882a593Smuzhiyun 	}
1550*4882a593Smuzhiyun exit:
1551*4882a593Smuzhiyun 	return status;
1552*4882a593Smuzhiyun }
1553*4882a593Smuzhiyun 
1554*4882a593Smuzhiyun static int
wl_cfgnan_execute_ioctl(struct net_device * ndev,struct bcm_cfg80211 * cfg,bcm_iov_batch_buf_t * nan_buf,uint16 nan_buf_size,uint32 * status,uint8 * resp_buf,uint16 resp_buf_size)1555*4882a593Smuzhiyun wl_cfgnan_execute_ioctl(struct net_device *ndev, struct bcm_cfg80211 *cfg,
1556*4882a593Smuzhiyun 	bcm_iov_batch_buf_t *nan_buf, uint16 nan_buf_size, uint32 *status,
1557*4882a593Smuzhiyun 	uint8 *resp_buf, uint16 resp_buf_size)
1558*4882a593Smuzhiyun {
1559*4882a593Smuzhiyun 	int ret = BCME_OK;
1560*4882a593Smuzhiyun 	uint16 tlvs_len;
1561*4882a593Smuzhiyun 	int res = BCME_OK;
1562*4882a593Smuzhiyun 	bcm_iov_batch_buf_t *p_resp = NULL;
1563*4882a593Smuzhiyun 	char *iov = "nan";
1564*4882a593Smuzhiyun 	int max_resp_len = WLC_IOCTL_MAXLEN;
1565*4882a593Smuzhiyun 
1566*4882a593Smuzhiyun 	WL_DBG(("Enter:\n"));
1567*4882a593Smuzhiyun 	if (nan_buf->is_set) {
1568*4882a593Smuzhiyun 		ret = wldev_iovar_setbuf(ndev, "nan", nan_buf, nan_buf_size,
1569*4882a593Smuzhiyun 			resp_buf, resp_buf_size, NULL);
1570*4882a593Smuzhiyun 		p_resp = (bcm_iov_batch_buf_t *)(resp_buf + strlen(iov) + 1);
1571*4882a593Smuzhiyun 	} else {
1572*4882a593Smuzhiyun 		ret = wldev_iovar_getbuf(ndev, "nan", nan_buf, nan_buf_size,
1573*4882a593Smuzhiyun 			resp_buf, resp_buf_size, NULL);
1574*4882a593Smuzhiyun 		p_resp = (bcm_iov_batch_buf_t *)(resp_buf);
1575*4882a593Smuzhiyun 	}
1576*4882a593Smuzhiyun 	if (unlikely(ret)) {
1577*4882a593Smuzhiyun 		WL_ERR((" nan execute ioctl failed, error = %d \n", ret));
1578*4882a593Smuzhiyun 		goto fail;
1579*4882a593Smuzhiyun 	}
1580*4882a593Smuzhiyun 
1581*4882a593Smuzhiyun 	p_resp->is_set = nan_buf->is_set;
1582*4882a593Smuzhiyun 	tlvs_len = max_resp_len - OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
1583*4882a593Smuzhiyun 
1584*4882a593Smuzhiyun 	/* Extract the tlvs and print their resp in cb fn */
1585*4882a593Smuzhiyun 	res = bcm_unpack_xtlv_buf((void *)p_resp, (const uint8 *)&p_resp->cmds[0],
1586*4882a593Smuzhiyun 		tlvs_len, BCM_IOV_CMD_OPT_ALIGN32, wl_cfgnan_get_iovars_status);
1587*4882a593Smuzhiyun 
1588*4882a593Smuzhiyun 	if (res == BCME_IOV_LAST_CMD) {
1589*4882a593Smuzhiyun 		res = BCME_OK;
1590*4882a593Smuzhiyun 	}
1591*4882a593Smuzhiyun fail:
1592*4882a593Smuzhiyun 	*status = res;
1593*4882a593Smuzhiyun 	WL_DBG((" nan ioctl ret %d status %d \n", ret, *status));
1594*4882a593Smuzhiyun 	return ret;
1595*4882a593Smuzhiyun 
1596*4882a593Smuzhiyun }
1597*4882a593Smuzhiyun 
1598*4882a593Smuzhiyun static int
wl_cfgnan_if_addr_handler(void * p_buf,uint16 * nan_buf_size,struct ether_addr * if_addr)1599*4882a593Smuzhiyun wl_cfgnan_if_addr_handler(void *p_buf, uint16 *nan_buf_size,
1600*4882a593Smuzhiyun 		struct ether_addr *if_addr)
1601*4882a593Smuzhiyun {
1602*4882a593Smuzhiyun 	/* nan enable */
1603*4882a593Smuzhiyun 	s32 ret = BCME_OK;
1604*4882a593Smuzhiyun 	uint16 subcmd_len;
1605*4882a593Smuzhiyun 
1606*4882a593Smuzhiyun 	NAN_DBG_ENTER();
1607*4882a593Smuzhiyun 
1608*4882a593Smuzhiyun 	if (p_buf != NULL) {
1609*4882a593Smuzhiyun 		bcm_iov_batch_subcmd_t *sub_cmd = (bcm_iov_batch_subcmd_t*)(p_buf);
1610*4882a593Smuzhiyun 
1611*4882a593Smuzhiyun 		ret = wl_cfg_nan_check_cmd_len(*nan_buf_size,
1612*4882a593Smuzhiyun 				sizeof(*if_addr), &subcmd_len);
1613*4882a593Smuzhiyun 		if (unlikely(ret)) {
1614*4882a593Smuzhiyun 			WL_ERR(("nan_sub_cmd check failed\n"));
1615*4882a593Smuzhiyun 			goto fail;
1616*4882a593Smuzhiyun 		}
1617*4882a593Smuzhiyun 
1618*4882a593Smuzhiyun 		/* Fill the sub_command block */
1619*4882a593Smuzhiyun 		sub_cmd->id = htod16(WL_NAN_CMD_CFG_IF_ADDR);
1620*4882a593Smuzhiyun 		sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*if_addr);
1621*4882a593Smuzhiyun 		sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
1622*4882a593Smuzhiyun 		ret = memcpy_s(sub_cmd->data, sizeof(*if_addr),
1623*4882a593Smuzhiyun 				(uint8 *)if_addr, sizeof(*if_addr));
1624*4882a593Smuzhiyun 		if (ret != BCME_OK) {
1625*4882a593Smuzhiyun 			WL_ERR(("Failed to copy if addr\n"));
1626*4882a593Smuzhiyun 			goto fail;
1627*4882a593Smuzhiyun 		}
1628*4882a593Smuzhiyun 
1629*4882a593Smuzhiyun 		*nan_buf_size -= subcmd_len;
1630*4882a593Smuzhiyun 	} else {
1631*4882a593Smuzhiyun 		WL_ERR(("nan_iov_buf is NULL\n"));
1632*4882a593Smuzhiyun 		ret = BCME_ERROR;
1633*4882a593Smuzhiyun 		goto fail;
1634*4882a593Smuzhiyun 	}
1635*4882a593Smuzhiyun 
1636*4882a593Smuzhiyun fail:
1637*4882a593Smuzhiyun 	NAN_DBG_EXIT();
1638*4882a593Smuzhiyun 	return ret;
1639*4882a593Smuzhiyun }
1640*4882a593Smuzhiyun 
1641*4882a593Smuzhiyun static int
wl_cfgnan_get_ver(struct net_device * ndev,struct bcm_cfg80211 * cfg)1642*4882a593Smuzhiyun wl_cfgnan_get_ver(struct net_device *ndev, struct bcm_cfg80211 *cfg)
1643*4882a593Smuzhiyun {
1644*4882a593Smuzhiyun 	bcm_iov_batch_buf_t *nan_buf = NULL;
1645*4882a593Smuzhiyun 	s32 ret = BCME_OK;
1646*4882a593Smuzhiyun 	uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
1647*4882a593Smuzhiyun 	wl_nan_ver_t *nan_ver = NULL;
1648*4882a593Smuzhiyun 	uint16 subcmd_len;
1649*4882a593Smuzhiyun 	uint32 status;
1650*4882a593Smuzhiyun 	bcm_iov_batch_subcmd_t *sub_cmd = NULL;
1651*4882a593Smuzhiyun 	bcm_iov_batch_subcmd_t *sub_cmd_resp = NULL;
1652*4882a593Smuzhiyun 	uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
1653*4882a593Smuzhiyun 
1654*4882a593Smuzhiyun 	NAN_DBG_ENTER();
1655*4882a593Smuzhiyun 	nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
1656*4882a593Smuzhiyun 	if (!nan_buf) {
1657*4882a593Smuzhiyun 		WL_ERR(("%s: memory allocation failed\n", __func__));
1658*4882a593Smuzhiyun 		ret = BCME_NOMEM;
1659*4882a593Smuzhiyun 		goto fail;
1660*4882a593Smuzhiyun 	}
1661*4882a593Smuzhiyun 
1662*4882a593Smuzhiyun 	nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
1663*4882a593Smuzhiyun 	nan_buf->count = 0;
1664*4882a593Smuzhiyun 	nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
1665*4882a593Smuzhiyun 	sub_cmd = (bcm_iov_batch_subcmd_t*)(uint8 *)(&nan_buf->cmds[0]);
1666*4882a593Smuzhiyun 
1667*4882a593Smuzhiyun 	ret = wl_cfg_nan_check_cmd_len(nan_buf_size,
1668*4882a593Smuzhiyun 			sizeof(*nan_ver), &subcmd_len);
1669*4882a593Smuzhiyun 	if (unlikely(ret)) {
1670*4882a593Smuzhiyun 		WL_ERR(("nan_sub_cmd check failed\n"));
1671*4882a593Smuzhiyun 		goto fail;
1672*4882a593Smuzhiyun 	}
1673*4882a593Smuzhiyun 
1674*4882a593Smuzhiyun 	nan_ver = (wl_nan_ver_t *)sub_cmd->data;
1675*4882a593Smuzhiyun 	sub_cmd->id = htod16(WL_NAN_CMD_GLB_NAN_VER);
1676*4882a593Smuzhiyun 	sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*nan_ver);
1677*4882a593Smuzhiyun 	sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
1678*4882a593Smuzhiyun 	nan_buf_size -= subcmd_len;
1679*4882a593Smuzhiyun 	nan_buf->count = 1;
1680*4882a593Smuzhiyun 
1681*4882a593Smuzhiyun 	nan_buf->is_set = false;
1682*4882a593Smuzhiyun 	bzero(resp_buf, sizeof(resp_buf));
1683*4882a593Smuzhiyun 	nan_buf_size = NAN_IOCTL_BUF_SIZE - nan_buf_size;
1684*4882a593Smuzhiyun 
1685*4882a593Smuzhiyun 	ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
1686*4882a593Smuzhiyun 			(void*)resp_buf, NAN_IOCTL_BUF_SIZE);
1687*4882a593Smuzhiyun 	if (unlikely(ret) || unlikely(status)) {
1688*4882a593Smuzhiyun 		WL_ERR(("get nan ver failed ret %d status %d \n",
1689*4882a593Smuzhiyun 				ret, status));
1690*4882a593Smuzhiyun 		goto fail;
1691*4882a593Smuzhiyun 	}
1692*4882a593Smuzhiyun 
1693*4882a593Smuzhiyun 	sub_cmd_resp = &((bcm_iov_batch_buf_t *)(resp_buf))->cmds[0];
1694*4882a593Smuzhiyun 	nan_ver = ((wl_nan_ver_t *)&sub_cmd_resp->data[0]);
1695*4882a593Smuzhiyun 	if (!nan_ver) {
1696*4882a593Smuzhiyun 		ret = BCME_NOTFOUND;
1697*4882a593Smuzhiyun 		WL_ERR(("nan_ver not found: err = %d\n", ret));
1698*4882a593Smuzhiyun 		goto fail;
1699*4882a593Smuzhiyun 	}
1700*4882a593Smuzhiyun 	cfg->nancfg->version = *nan_ver;
1701*4882a593Smuzhiyun 	WL_INFORM_MEM(("Nan Version is %d\n", cfg->nancfg->version));
1702*4882a593Smuzhiyun 
1703*4882a593Smuzhiyun fail:
1704*4882a593Smuzhiyun 	if (nan_buf) {
1705*4882a593Smuzhiyun 		MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
1706*4882a593Smuzhiyun 	}
1707*4882a593Smuzhiyun 	NAN_DBG_EXIT();
1708*4882a593Smuzhiyun 	return ret;
1709*4882a593Smuzhiyun 
1710*4882a593Smuzhiyun }
1711*4882a593Smuzhiyun 
1712*4882a593Smuzhiyun static int
wl_cfgnan_set_if_addr(struct bcm_cfg80211 * cfg)1713*4882a593Smuzhiyun wl_cfgnan_set_if_addr(struct bcm_cfg80211 *cfg)
1714*4882a593Smuzhiyun {
1715*4882a593Smuzhiyun 	s32 ret = BCME_OK;
1716*4882a593Smuzhiyun 	uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
1717*4882a593Smuzhiyun 	uint32 status;
1718*4882a593Smuzhiyun 	uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
1719*4882a593Smuzhiyun 	struct ether_addr if_addr;
1720*4882a593Smuzhiyun 	uint8 buf[NAN_IOCTL_BUF_SIZE];
1721*4882a593Smuzhiyun 	bcm_iov_batch_buf_t *nan_buf = (bcm_iov_batch_buf_t*)buf;
1722*4882a593Smuzhiyun 	bool rand_mac = cfg->nancfg->mac_rand;
1723*4882a593Smuzhiyun 
1724*4882a593Smuzhiyun 	nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
1725*4882a593Smuzhiyun 	nan_buf->count = 0;
1726*4882a593Smuzhiyun 	nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
1727*4882a593Smuzhiyun 	if (rand_mac) {
1728*4882a593Smuzhiyun 		RANDOM_BYTES(if_addr.octet, 6);
1729*4882a593Smuzhiyun 		/* restore mcast and local admin bits to 0 and 1 */
1730*4882a593Smuzhiyun 		ETHER_SET_UNICAST(if_addr.octet);
1731*4882a593Smuzhiyun 		ETHER_SET_LOCALADDR(if_addr.octet);
1732*4882a593Smuzhiyun 	} else {
1733*4882a593Smuzhiyun 		/* Use primary MAC with the locally administered bit for the
1734*4882a593Smuzhiyun 		 * NAN NMI I/F
1735*4882a593Smuzhiyun 		 */
1736*4882a593Smuzhiyun 		if (wl_get_vif_macaddr(cfg, WL_IF_TYPE_NAN_NMI,
1737*4882a593Smuzhiyun 				if_addr.octet) != BCME_OK) {
1738*4882a593Smuzhiyun 			ret = -EINVAL;
1739*4882a593Smuzhiyun 			WL_ERR(("Failed to get mac addr for NMI\n"));
1740*4882a593Smuzhiyun 			goto fail;
1741*4882a593Smuzhiyun 		}
1742*4882a593Smuzhiyun 	}
1743*4882a593Smuzhiyun 	WL_INFORM_MEM(("%s: NMI " MACDBG "\n",
1744*4882a593Smuzhiyun 			__FUNCTION__, MAC2STRDBG(if_addr.octet)));
1745*4882a593Smuzhiyun 	ret = wl_cfgnan_if_addr_handler(&nan_buf->cmds[0],
1746*4882a593Smuzhiyun 			&nan_buf_size, &if_addr);
1747*4882a593Smuzhiyun 	if (unlikely(ret)) {
1748*4882a593Smuzhiyun 		WL_ERR(("Nan if addr handler sub_cmd set failed\n"));
1749*4882a593Smuzhiyun 		goto fail;
1750*4882a593Smuzhiyun 	}
1751*4882a593Smuzhiyun 	nan_buf->count++;
1752*4882a593Smuzhiyun 	nan_buf->is_set = true;
1753*4882a593Smuzhiyun 	nan_buf_size = NAN_IOCTL_BUF_SIZE - nan_buf_size;
1754*4882a593Smuzhiyun 	bzero(resp_buf, sizeof(resp_buf));
1755*4882a593Smuzhiyun 	ret = wl_cfgnan_execute_ioctl(bcmcfg_to_prmry_ndev(cfg), cfg,
1756*4882a593Smuzhiyun 			nan_buf, nan_buf_size, &status,
1757*4882a593Smuzhiyun 			(void*)resp_buf, NAN_IOCTL_BUF_SIZE);
1758*4882a593Smuzhiyun 	if (unlikely(ret) || unlikely(status)) {
1759*4882a593Smuzhiyun 		WL_ERR(("nan if addr handler failed ret %d status %d\n",
1760*4882a593Smuzhiyun 				ret, status));
1761*4882a593Smuzhiyun 		goto fail;
1762*4882a593Smuzhiyun 	}
1763*4882a593Smuzhiyun 	ret = memcpy_s(cfg->nancfg->nan_nmi_mac, ETH_ALEN,
1764*4882a593Smuzhiyun 			if_addr.octet, ETH_ALEN);
1765*4882a593Smuzhiyun 	if (ret != BCME_OK) {
1766*4882a593Smuzhiyun 		WL_ERR(("Failed to copy nmi addr\n"));
1767*4882a593Smuzhiyun 		goto fail;
1768*4882a593Smuzhiyun 	}
1769*4882a593Smuzhiyun 	return ret;
1770*4882a593Smuzhiyun fail:
1771*4882a593Smuzhiyun 	if (!rand_mac) {
1772*4882a593Smuzhiyun 		wl_release_vif_macaddr(cfg, if_addr.octet, WL_IF_TYPE_NAN_NMI);
1773*4882a593Smuzhiyun 	}
1774*4882a593Smuzhiyun 
1775*4882a593Smuzhiyun 	return ret;
1776*4882a593Smuzhiyun }
1777*4882a593Smuzhiyun 
1778*4882a593Smuzhiyun static int
wl_cfgnan_init_handler(void * p_buf,uint16 * nan_buf_size,bool val)1779*4882a593Smuzhiyun wl_cfgnan_init_handler(void *p_buf, uint16 *nan_buf_size, bool val)
1780*4882a593Smuzhiyun {
1781*4882a593Smuzhiyun 	/* nan enable */
1782*4882a593Smuzhiyun 	s32 ret = BCME_OK;
1783*4882a593Smuzhiyun 	uint16 subcmd_len;
1784*4882a593Smuzhiyun 
1785*4882a593Smuzhiyun 	NAN_DBG_ENTER();
1786*4882a593Smuzhiyun 
1787*4882a593Smuzhiyun 	if (p_buf != NULL) {
1788*4882a593Smuzhiyun 		bcm_iov_batch_subcmd_t *sub_cmd = (bcm_iov_batch_subcmd_t*)(p_buf);
1789*4882a593Smuzhiyun 
1790*4882a593Smuzhiyun 		ret = wl_cfg_nan_check_cmd_len(*nan_buf_size,
1791*4882a593Smuzhiyun 				sizeof(val), &subcmd_len);
1792*4882a593Smuzhiyun 		if (unlikely(ret)) {
1793*4882a593Smuzhiyun 			WL_ERR(("nan_sub_cmd check failed\n"));
1794*4882a593Smuzhiyun 			goto fail;
1795*4882a593Smuzhiyun 		}
1796*4882a593Smuzhiyun 
1797*4882a593Smuzhiyun 		/* Fill the sub_command block */
1798*4882a593Smuzhiyun 		sub_cmd->id = htod16(WL_NAN_CMD_CFG_NAN_INIT);
1799*4882a593Smuzhiyun 		sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(uint8);
1800*4882a593Smuzhiyun 		sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
1801*4882a593Smuzhiyun 		ret = memcpy_s(sub_cmd->data, sizeof(uint8),
1802*4882a593Smuzhiyun 				(uint8*)&val, sizeof(uint8));
1803*4882a593Smuzhiyun 		if (ret != BCME_OK) {
1804*4882a593Smuzhiyun 			WL_ERR(("Failed to copy init value\n"));
1805*4882a593Smuzhiyun 			goto fail;
1806*4882a593Smuzhiyun 		}
1807*4882a593Smuzhiyun 
1808*4882a593Smuzhiyun 		*nan_buf_size -= subcmd_len;
1809*4882a593Smuzhiyun 	} else {
1810*4882a593Smuzhiyun 		WL_ERR(("nan_iov_buf is NULL\n"));
1811*4882a593Smuzhiyun 		ret = BCME_ERROR;
1812*4882a593Smuzhiyun 		goto fail;
1813*4882a593Smuzhiyun 	}
1814*4882a593Smuzhiyun 
1815*4882a593Smuzhiyun fail:
1816*4882a593Smuzhiyun 	NAN_DBG_EXIT();
1817*4882a593Smuzhiyun 	return ret;
1818*4882a593Smuzhiyun }
1819*4882a593Smuzhiyun 
1820*4882a593Smuzhiyun static int
wl_cfgnan_enable_handler(wl_nan_iov_t * nan_iov_data,bool val)1821*4882a593Smuzhiyun wl_cfgnan_enable_handler(wl_nan_iov_t *nan_iov_data, bool val)
1822*4882a593Smuzhiyun {
1823*4882a593Smuzhiyun 	/* nan enable */
1824*4882a593Smuzhiyun 	s32 ret = BCME_OK;
1825*4882a593Smuzhiyun 	bcm_iov_batch_subcmd_t *sub_cmd = NULL;
1826*4882a593Smuzhiyun 	uint16 subcmd_len;
1827*4882a593Smuzhiyun 
1828*4882a593Smuzhiyun 	NAN_DBG_ENTER();
1829*4882a593Smuzhiyun 
1830*4882a593Smuzhiyun 	sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
1831*4882a593Smuzhiyun 
1832*4882a593Smuzhiyun 	ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
1833*4882a593Smuzhiyun 			sizeof(val), &subcmd_len);
1834*4882a593Smuzhiyun 	if (unlikely(ret)) {
1835*4882a593Smuzhiyun 		WL_ERR(("nan_sub_cmd check failed\n"));
1836*4882a593Smuzhiyun 		return ret;
1837*4882a593Smuzhiyun 	}
1838*4882a593Smuzhiyun 
1839*4882a593Smuzhiyun 	/* Fill the sub_command block */
1840*4882a593Smuzhiyun 	sub_cmd->id = htod16(WL_NAN_CMD_CFG_NAN_ENAB);
1841*4882a593Smuzhiyun 	sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(uint8);
1842*4882a593Smuzhiyun 	sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
1843*4882a593Smuzhiyun 	ret = memcpy_s(sub_cmd->data, sizeof(uint8),
1844*4882a593Smuzhiyun 			(uint8*)&val, sizeof(uint8));
1845*4882a593Smuzhiyun 	if (ret != BCME_OK) {
1846*4882a593Smuzhiyun 		WL_ERR(("Failed to copy enab value\n"));
1847*4882a593Smuzhiyun 		return ret;
1848*4882a593Smuzhiyun 	}
1849*4882a593Smuzhiyun 
1850*4882a593Smuzhiyun 	nan_iov_data->nan_iov_len -= subcmd_len;
1851*4882a593Smuzhiyun 	nan_iov_data->nan_iov_buf += subcmd_len;
1852*4882a593Smuzhiyun 	NAN_DBG_EXIT();
1853*4882a593Smuzhiyun 	return ret;
1854*4882a593Smuzhiyun }
1855*4882a593Smuzhiyun 
1856*4882a593Smuzhiyun static int
wl_cfgnan_warmup_time_handler(nan_config_cmd_data_t * cmd_data,wl_nan_iov_t * nan_iov_data)1857*4882a593Smuzhiyun wl_cfgnan_warmup_time_handler(nan_config_cmd_data_t *cmd_data,
1858*4882a593Smuzhiyun 		wl_nan_iov_t *nan_iov_data)
1859*4882a593Smuzhiyun {
1860*4882a593Smuzhiyun 	/* wl nan warm_up_time */
1861*4882a593Smuzhiyun 	s32 ret = BCME_OK;
1862*4882a593Smuzhiyun 	bcm_iov_batch_subcmd_t *sub_cmd = NULL;
1863*4882a593Smuzhiyun 	wl_nan_warmup_time_ticks_t *wup_ticks = NULL;
1864*4882a593Smuzhiyun 	uint16 subcmd_len;
1865*4882a593Smuzhiyun 	NAN_DBG_ENTER();
1866*4882a593Smuzhiyun 
1867*4882a593Smuzhiyun 	sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
1868*4882a593Smuzhiyun 	wup_ticks = (wl_nan_warmup_time_ticks_t *)sub_cmd->data;
1869*4882a593Smuzhiyun 
1870*4882a593Smuzhiyun 	ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
1871*4882a593Smuzhiyun 			sizeof(*wup_ticks), &subcmd_len);
1872*4882a593Smuzhiyun 	if (unlikely(ret)) {
1873*4882a593Smuzhiyun 		WL_ERR(("nan_sub_cmd check failed\n"));
1874*4882a593Smuzhiyun 		return ret;
1875*4882a593Smuzhiyun 	}
1876*4882a593Smuzhiyun 	/* Fill the sub_command block */
1877*4882a593Smuzhiyun 	sub_cmd->id = htod16(WL_NAN_CMD_CFG_WARMUP_TIME);
1878*4882a593Smuzhiyun 	sub_cmd->len = sizeof(sub_cmd->u.options) +
1879*4882a593Smuzhiyun 		sizeof(*wup_ticks);
1880*4882a593Smuzhiyun 	sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
1881*4882a593Smuzhiyun 	*wup_ticks = cmd_data->warmup_time;
1882*4882a593Smuzhiyun 
1883*4882a593Smuzhiyun 	nan_iov_data->nan_iov_len -= subcmd_len;
1884*4882a593Smuzhiyun 	nan_iov_data->nan_iov_buf += subcmd_len;
1885*4882a593Smuzhiyun 
1886*4882a593Smuzhiyun 	NAN_DBG_EXIT();
1887*4882a593Smuzhiyun 	return ret;
1888*4882a593Smuzhiyun }
1889*4882a593Smuzhiyun 
1890*4882a593Smuzhiyun static int
wl_cfgnan_set_election_metric(nan_config_cmd_data_t * cmd_data,wl_nan_iov_t * nan_iov_data,uint32 nan_attr_mask)1891*4882a593Smuzhiyun wl_cfgnan_set_election_metric(nan_config_cmd_data_t *cmd_data,
1892*4882a593Smuzhiyun 		wl_nan_iov_t *nan_iov_data, uint32 nan_attr_mask)
1893*4882a593Smuzhiyun {
1894*4882a593Smuzhiyun 	s32 ret = BCME_OK;
1895*4882a593Smuzhiyun 	bcm_iov_batch_subcmd_t *sub_cmd = NULL;
1896*4882a593Smuzhiyun 	wl_nan_election_metric_config_t *metrics = NULL;
1897*4882a593Smuzhiyun 	uint16 subcmd_len;
1898*4882a593Smuzhiyun 	NAN_DBG_ENTER();
1899*4882a593Smuzhiyun 
1900*4882a593Smuzhiyun 	sub_cmd =
1901*4882a593Smuzhiyun 		(bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
1902*4882a593Smuzhiyun 	ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
1903*4882a593Smuzhiyun 			sizeof(*metrics), &subcmd_len);
1904*4882a593Smuzhiyun 	if (unlikely(ret)) {
1905*4882a593Smuzhiyun 		WL_ERR(("nan_sub_cmd check failed\n"));
1906*4882a593Smuzhiyun 		goto fail;
1907*4882a593Smuzhiyun 	}
1908*4882a593Smuzhiyun 
1909*4882a593Smuzhiyun 	metrics = (wl_nan_election_metric_config_t *)sub_cmd->data;
1910*4882a593Smuzhiyun 
1911*4882a593Smuzhiyun 	if (nan_attr_mask & NAN_ATTR_RAND_FACTOR_CONFIG) {
1912*4882a593Smuzhiyun 		metrics->random_factor = (uint8)cmd_data->metrics.random_factor;
1913*4882a593Smuzhiyun 	}
1914*4882a593Smuzhiyun 
1915*4882a593Smuzhiyun 	if ((!cmd_data->metrics.master_pref) ||
1916*4882a593Smuzhiyun 		(cmd_data->metrics.master_pref > NAN_MAXIMUM_MASTER_PREFERENCE)) {
1917*4882a593Smuzhiyun 		WL_TRACE(("Master Pref is 0 or greater than 254, hence sending random value\n"));
1918*4882a593Smuzhiyun 		/* Master pref for mobile devices can be from 1 - 127 as per Spec AppendixC */
1919*4882a593Smuzhiyun 		metrics->master_pref = (RANDOM32()%(NAN_MAXIMUM_MASTER_PREFERENCE/2)) + 1;
1920*4882a593Smuzhiyun 	} else {
1921*4882a593Smuzhiyun 		metrics->master_pref = (uint8)cmd_data->metrics.master_pref;
1922*4882a593Smuzhiyun 	}
1923*4882a593Smuzhiyun 	sub_cmd->id = htod16(WL_NAN_CMD_ELECTION_METRICS_CONFIG);
1924*4882a593Smuzhiyun 	sub_cmd->len = sizeof(sub_cmd->u.options) +
1925*4882a593Smuzhiyun 		sizeof(*metrics);
1926*4882a593Smuzhiyun 	sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
1927*4882a593Smuzhiyun 
1928*4882a593Smuzhiyun 	nan_iov_data->nan_iov_len -= subcmd_len;
1929*4882a593Smuzhiyun 	nan_iov_data->nan_iov_buf += subcmd_len;
1930*4882a593Smuzhiyun 
1931*4882a593Smuzhiyun fail:
1932*4882a593Smuzhiyun 	NAN_DBG_EXIT();
1933*4882a593Smuzhiyun 	return ret;
1934*4882a593Smuzhiyun }
1935*4882a593Smuzhiyun 
1936*4882a593Smuzhiyun static int
wl_cfgnan_set_rssi_proximity(nan_config_cmd_data_t * cmd_data,wl_nan_iov_t * nan_iov_data,uint32 nan_attr_mask)1937*4882a593Smuzhiyun wl_cfgnan_set_rssi_proximity(nan_config_cmd_data_t *cmd_data,
1938*4882a593Smuzhiyun 		wl_nan_iov_t *nan_iov_data, uint32 nan_attr_mask)
1939*4882a593Smuzhiyun {
1940*4882a593Smuzhiyun 	s32 ret = BCME_OK;
1941*4882a593Smuzhiyun 	bcm_iov_batch_subcmd_t *sub_cmd = NULL;
1942*4882a593Smuzhiyun 	wl_nan_rssi_notif_thld_t *rssi_notif_thld = NULL;
1943*4882a593Smuzhiyun 	uint16 subcmd_len;
1944*4882a593Smuzhiyun 
1945*4882a593Smuzhiyun 	NAN_DBG_ENTER();
1946*4882a593Smuzhiyun 	sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
1947*4882a593Smuzhiyun 
1948*4882a593Smuzhiyun 	rssi_notif_thld = (wl_nan_rssi_notif_thld_t *)sub_cmd->data;
1949*4882a593Smuzhiyun 
1950*4882a593Smuzhiyun 	ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
1951*4882a593Smuzhiyun 			sizeof(*rssi_notif_thld), &subcmd_len);
1952*4882a593Smuzhiyun 	if (unlikely(ret)) {
1953*4882a593Smuzhiyun 		WL_ERR(("nan_sub_cmd check failed\n"));
1954*4882a593Smuzhiyun 		return ret;
1955*4882a593Smuzhiyun 	}
1956*4882a593Smuzhiyun 	if (nan_attr_mask & NAN_ATTR_RSSI_PROXIMITY_2G_CONFIG) {
1957*4882a593Smuzhiyun 		rssi_notif_thld->bcn_rssi_2g =
1958*4882a593Smuzhiyun 			cmd_data->rssi_attr.rssi_proximity_2dot4g_val;
1959*4882a593Smuzhiyun 	} else {
1960*4882a593Smuzhiyun 		/* Keeping RSSI threshold value to be -70dBm */
1961*4882a593Smuzhiyun 		rssi_notif_thld->bcn_rssi_2g = NAN_DEF_RSSI_NOTIF_THRESH;
1962*4882a593Smuzhiyun 	}
1963*4882a593Smuzhiyun 
1964*4882a593Smuzhiyun 	if (nan_attr_mask & NAN_ATTR_RSSI_PROXIMITY_5G_CONFIG) {
1965*4882a593Smuzhiyun 		rssi_notif_thld->bcn_rssi_5g =
1966*4882a593Smuzhiyun 			cmd_data->rssi_attr.rssi_proximity_5g_val;
1967*4882a593Smuzhiyun 	} else {
1968*4882a593Smuzhiyun 		/* Keeping RSSI threshold value to be -70dBm */
1969*4882a593Smuzhiyun 		rssi_notif_thld->bcn_rssi_5g = NAN_DEF_RSSI_NOTIF_THRESH;
1970*4882a593Smuzhiyun 	}
1971*4882a593Smuzhiyun 
1972*4882a593Smuzhiyun 	sub_cmd->id = htod16(WL_NAN_CMD_SYNC_BCN_RSSI_NOTIF_THRESHOLD);
1973*4882a593Smuzhiyun 	sub_cmd->len = htod16(sizeof(sub_cmd->u.options) + sizeof(*rssi_notif_thld));
1974*4882a593Smuzhiyun 	sub_cmd->u.options = htod32(BCM_XTLV_OPTION_ALIGN32);
1975*4882a593Smuzhiyun 
1976*4882a593Smuzhiyun 	nan_iov_data->nan_iov_len -= subcmd_len;
1977*4882a593Smuzhiyun 	nan_iov_data->nan_iov_buf += subcmd_len;
1978*4882a593Smuzhiyun 
1979*4882a593Smuzhiyun 	NAN_DBG_EXIT();
1980*4882a593Smuzhiyun 	return ret;
1981*4882a593Smuzhiyun }
1982*4882a593Smuzhiyun 
1983*4882a593Smuzhiyun static int
wl_cfgnan_set_rssi_mid_or_close(nan_config_cmd_data_t * cmd_data,wl_nan_iov_t * nan_iov_data,uint32 nan_attr_mask)1984*4882a593Smuzhiyun wl_cfgnan_set_rssi_mid_or_close(nan_config_cmd_data_t *cmd_data,
1985*4882a593Smuzhiyun 		wl_nan_iov_t *nan_iov_data, uint32 nan_attr_mask)
1986*4882a593Smuzhiyun {
1987*4882a593Smuzhiyun 	s32 ret = BCME_OK;
1988*4882a593Smuzhiyun 	bcm_iov_batch_subcmd_t *sub_cmd = NULL;
1989*4882a593Smuzhiyun 	wl_nan_rssi_thld_t *rssi_thld = NULL;
1990*4882a593Smuzhiyun 	uint16 subcmd_len;
1991*4882a593Smuzhiyun 
1992*4882a593Smuzhiyun 	NAN_DBG_ENTER();
1993*4882a593Smuzhiyun 	sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
1994*4882a593Smuzhiyun 	rssi_thld = (wl_nan_rssi_thld_t *)sub_cmd->data;
1995*4882a593Smuzhiyun 
1996*4882a593Smuzhiyun 	ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
1997*4882a593Smuzhiyun 			sizeof(*rssi_thld), &subcmd_len);
1998*4882a593Smuzhiyun 	if (unlikely(ret)) {
1999*4882a593Smuzhiyun 		WL_ERR(("nan_sub_cmd check failed\n"));
2000*4882a593Smuzhiyun 		return ret;
2001*4882a593Smuzhiyun 	}
2002*4882a593Smuzhiyun 
2003*4882a593Smuzhiyun 	/*
2004*4882a593Smuzhiyun 	 * Keeping RSSI mid value -75dBm for both 2G and 5G
2005*4882a593Smuzhiyun 	 * Keeping RSSI close value -60dBm for both 2G and 5G
2006*4882a593Smuzhiyun 	 */
2007*4882a593Smuzhiyun 	if (nan_attr_mask & NAN_ATTR_RSSI_MIDDLE_2G_CONFIG) {
2008*4882a593Smuzhiyun 		rssi_thld->rssi_mid_2g =
2009*4882a593Smuzhiyun 			cmd_data->rssi_attr.rssi_middle_2dot4g_val;
2010*4882a593Smuzhiyun 	} else {
2011*4882a593Smuzhiyun 		rssi_thld->rssi_mid_2g = NAN_DEF_RSSI_MID;
2012*4882a593Smuzhiyun 	}
2013*4882a593Smuzhiyun 
2014*4882a593Smuzhiyun 	if (nan_attr_mask & NAN_ATTR_RSSI_MIDDLE_5G_CONFIG) {
2015*4882a593Smuzhiyun 		rssi_thld->rssi_mid_5g =
2016*4882a593Smuzhiyun 			cmd_data->rssi_attr.rssi_middle_5g_val;
2017*4882a593Smuzhiyun 	} else {
2018*4882a593Smuzhiyun 		rssi_thld->rssi_mid_5g = NAN_DEF_RSSI_MID;
2019*4882a593Smuzhiyun 	}
2020*4882a593Smuzhiyun 
2021*4882a593Smuzhiyun 	if (nan_attr_mask & NAN_ATTR_RSSI_CLOSE_CONFIG) {
2022*4882a593Smuzhiyun 		rssi_thld->rssi_close_2g =
2023*4882a593Smuzhiyun 			cmd_data->rssi_attr.rssi_close_2dot4g_val;
2024*4882a593Smuzhiyun 	} else {
2025*4882a593Smuzhiyun 		rssi_thld->rssi_close_2g = NAN_DEF_RSSI_CLOSE;
2026*4882a593Smuzhiyun 	}
2027*4882a593Smuzhiyun 
2028*4882a593Smuzhiyun 	if (nan_attr_mask & NAN_ATTR_RSSI_CLOSE_5G_CONFIG) {
2029*4882a593Smuzhiyun 		rssi_thld->rssi_close_5g =
2030*4882a593Smuzhiyun 			cmd_data->rssi_attr.rssi_close_5g_val;
2031*4882a593Smuzhiyun 	} else {
2032*4882a593Smuzhiyun 		rssi_thld->rssi_close_5g = NAN_DEF_RSSI_CLOSE;
2033*4882a593Smuzhiyun 	}
2034*4882a593Smuzhiyun 
2035*4882a593Smuzhiyun 	sub_cmd->id = htod16(WL_NAN_CMD_ELECTION_RSSI_THRESHOLD);
2036*4882a593Smuzhiyun 	sub_cmd->len = htod16(sizeof(sub_cmd->u.options) + sizeof(*rssi_thld));
2037*4882a593Smuzhiyun 	sub_cmd->u.options = htod32(BCM_XTLV_OPTION_ALIGN32);
2038*4882a593Smuzhiyun 
2039*4882a593Smuzhiyun 	nan_iov_data->nan_iov_len -= subcmd_len;
2040*4882a593Smuzhiyun 	nan_iov_data->nan_iov_buf += subcmd_len;
2041*4882a593Smuzhiyun 
2042*4882a593Smuzhiyun 	NAN_DBG_EXIT();
2043*4882a593Smuzhiyun 	return ret;
2044*4882a593Smuzhiyun }
2045*4882a593Smuzhiyun 
2046*4882a593Smuzhiyun static int
check_for_valid_5gchan(struct net_device * ndev,uint8 chan)2047*4882a593Smuzhiyun check_for_valid_5gchan(struct net_device *ndev, uint8 chan)
2048*4882a593Smuzhiyun {
2049*4882a593Smuzhiyun 	s32 ret = BCME_OK;
2050*4882a593Smuzhiyun 	uint bitmap;
2051*4882a593Smuzhiyun 	u8 ioctl_buf[WLC_IOCTL_SMLEN];
2052*4882a593Smuzhiyun 	uint32 chanspec_arg;
2053*4882a593Smuzhiyun 	NAN_DBG_ENTER();
2054*4882a593Smuzhiyun 
2055*4882a593Smuzhiyun 	chanspec_arg = CH20MHZ_CHSPEC(chan);
2056*4882a593Smuzhiyun 	chanspec_arg = wl_chspec_host_to_driver(chanspec_arg);
2057*4882a593Smuzhiyun 	bzero(ioctl_buf, WLC_IOCTL_SMLEN);
2058*4882a593Smuzhiyun 	ret = wldev_iovar_getbuf(ndev, "per_chan_info",
2059*4882a593Smuzhiyun 			(void *)&chanspec_arg, sizeof(chanspec_arg),
2060*4882a593Smuzhiyun 			ioctl_buf, WLC_IOCTL_SMLEN, NULL);
2061*4882a593Smuzhiyun 	if (ret != BCME_OK) {
2062*4882a593Smuzhiyun 		WL_ERR(("Chaninfo for channel = %d, error %d\n", chan, ret));
2063*4882a593Smuzhiyun 		goto exit;
2064*4882a593Smuzhiyun 	}
2065*4882a593Smuzhiyun 
2066*4882a593Smuzhiyun 	bitmap = dtoh32(*(uint *)ioctl_buf);
2067*4882a593Smuzhiyun 	if (!(bitmap & WL_CHAN_VALID_HW)) {
2068*4882a593Smuzhiyun 		WL_ERR(("Invalid channel\n"));
2069*4882a593Smuzhiyun 		ret = BCME_BADCHAN;
2070*4882a593Smuzhiyun 		goto exit;
2071*4882a593Smuzhiyun 	}
2072*4882a593Smuzhiyun 
2073*4882a593Smuzhiyun 	if (!(bitmap & WL_CHAN_VALID_SW)) {
2074*4882a593Smuzhiyun 		WL_ERR(("Not supported in current locale\n"));
2075*4882a593Smuzhiyun 		ret = BCME_BADCHAN;
2076*4882a593Smuzhiyun 		goto exit;
2077*4882a593Smuzhiyun 	}
2078*4882a593Smuzhiyun exit:
2079*4882a593Smuzhiyun 	NAN_DBG_EXIT();
2080*4882a593Smuzhiyun 	return ret;
2081*4882a593Smuzhiyun }
2082*4882a593Smuzhiyun 
2083*4882a593Smuzhiyun static int
wl_cfgnan_set_nan_soc_chans(struct net_device * ndev,nan_config_cmd_data_t * cmd_data,wl_nan_iov_t * nan_iov_data,uint32 nan_attr_mask)2084*4882a593Smuzhiyun wl_cfgnan_set_nan_soc_chans(struct net_device *ndev, nan_config_cmd_data_t *cmd_data,
2085*4882a593Smuzhiyun 	wl_nan_iov_t *nan_iov_data, uint32 nan_attr_mask)
2086*4882a593Smuzhiyun {
2087*4882a593Smuzhiyun 	s32 ret = BCME_OK;
2088*4882a593Smuzhiyun 	bcm_iov_batch_subcmd_t *sub_cmd = NULL;
2089*4882a593Smuzhiyun 	wl_nan_social_channels_t *soc_chans = NULL;
2090*4882a593Smuzhiyun 	uint16 subcmd_len;
2091*4882a593Smuzhiyun 
2092*4882a593Smuzhiyun 	NAN_DBG_ENTER();
2093*4882a593Smuzhiyun 
2094*4882a593Smuzhiyun 	sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
2095*4882a593Smuzhiyun 	soc_chans =
2096*4882a593Smuzhiyun 		(wl_nan_social_channels_t *)sub_cmd->data;
2097*4882a593Smuzhiyun 
2098*4882a593Smuzhiyun 	ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
2099*4882a593Smuzhiyun 			sizeof(*soc_chans), &subcmd_len);
2100*4882a593Smuzhiyun 	if (unlikely(ret)) {
2101*4882a593Smuzhiyun 		WL_ERR(("nan_sub_cmd check failed\n"));
2102*4882a593Smuzhiyun 		return ret;
2103*4882a593Smuzhiyun 	}
2104*4882a593Smuzhiyun 
2105*4882a593Smuzhiyun 	sub_cmd->id = htod16(WL_NAN_CMD_SYNC_SOCIAL_CHAN);
2106*4882a593Smuzhiyun 	sub_cmd->len = sizeof(sub_cmd->u.options) +
2107*4882a593Smuzhiyun 		sizeof(*soc_chans);
2108*4882a593Smuzhiyun 	sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
2109*4882a593Smuzhiyun 	if (nan_attr_mask & NAN_ATTR_2G_CHAN_CONFIG) {
2110*4882a593Smuzhiyun 		soc_chans->soc_chan_2g = cmd_data->chanspec[1];
2111*4882a593Smuzhiyun 	} else {
2112*4882a593Smuzhiyun 		soc_chans->soc_chan_2g = NAN_DEF_SOCIAL_CHAN_2G;
2113*4882a593Smuzhiyun 	}
2114*4882a593Smuzhiyun 
2115*4882a593Smuzhiyun 	if (cmd_data->support_5g) {
2116*4882a593Smuzhiyun 		if (nan_attr_mask & NAN_ATTR_5G_CHAN_CONFIG) {
2117*4882a593Smuzhiyun 			soc_chans->soc_chan_5g = cmd_data->chanspec[2];
2118*4882a593Smuzhiyun 		} else {
2119*4882a593Smuzhiyun 			soc_chans->soc_chan_5g = NAN_DEF_SOCIAL_CHAN_5G;
2120*4882a593Smuzhiyun 		}
2121*4882a593Smuzhiyun 		ret = check_for_valid_5gchan(ndev, soc_chans->soc_chan_5g);
2122*4882a593Smuzhiyun 		if (ret != BCME_OK) {
2123*4882a593Smuzhiyun 			ret = check_for_valid_5gchan(ndev, NAN_DEF_SEC_SOCIAL_CHAN_5G);
2124*4882a593Smuzhiyun 			if (ret == BCME_OK) {
2125*4882a593Smuzhiyun 				soc_chans->soc_chan_5g = NAN_DEF_SEC_SOCIAL_CHAN_5G;
2126*4882a593Smuzhiyun 			} else {
2127*4882a593Smuzhiyun 				soc_chans->soc_chan_5g = 0;
2128*4882a593Smuzhiyun 				ret = BCME_OK;
2129*4882a593Smuzhiyun 				WL_ERR(("Current locale doesn't support 5G op"
2130*4882a593Smuzhiyun 					"continuing with 2G only operation\n"));
2131*4882a593Smuzhiyun 			}
2132*4882a593Smuzhiyun 		}
2133*4882a593Smuzhiyun 	} else {
2134*4882a593Smuzhiyun 		WL_DBG(("5G support is disabled\n"));
2135*4882a593Smuzhiyun 	}
2136*4882a593Smuzhiyun 	nan_iov_data->nan_iov_len -= subcmd_len;
2137*4882a593Smuzhiyun 	nan_iov_data->nan_iov_buf += subcmd_len;
2138*4882a593Smuzhiyun 
2139*4882a593Smuzhiyun 	NAN_DBG_EXIT();
2140*4882a593Smuzhiyun 	return ret;
2141*4882a593Smuzhiyun }
2142*4882a593Smuzhiyun 
2143*4882a593Smuzhiyun static int
wl_cfgnan_set_nan_scan_params(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_config_cmd_data_t * cmd_data,uint8 band_index,uint32 nan_attr_mask)2144*4882a593Smuzhiyun wl_cfgnan_set_nan_scan_params(struct net_device *ndev, struct bcm_cfg80211 *cfg,
2145*4882a593Smuzhiyun 	nan_config_cmd_data_t *cmd_data, uint8 band_index, uint32 nan_attr_mask)
2146*4882a593Smuzhiyun {
2147*4882a593Smuzhiyun 	bcm_iov_batch_buf_t *nan_buf = NULL;
2148*4882a593Smuzhiyun 	s32 ret = BCME_OK;
2149*4882a593Smuzhiyun 	uint16 nan_iov_start, nan_iov_end;
2150*4882a593Smuzhiyun 	uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
2151*4882a593Smuzhiyun 	uint16 subcmd_len;
2152*4882a593Smuzhiyun 	bcm_iov_batch_subcmd_t *sub_cmd = NULL;
2153*4882a593Smuzhiyun 	wl_nan_iov_t *nan_iov_data = NULL;
2154*4882a593Smuzhiyun 	uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
2155*4882a593Smuzhiyun 	wl_nan_scan_params_t *scan_params = NULL;
2156*4882a593Smuzhiyun 	uint32 status;
2157*4882a593Smuzhiyun 
2158*4882a593Smuzhiyun 	NAN_DBG_ENTER();
2159*4882a593Smuzhiyun 
2160*4882a593Smuzhiyun 	nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
2161*4882a593Smuzhiyun 	if (!nan_buf) {
2162*4882a593Smuzhiyun 		WL_ERR(("%s: memory allocation failed\n", __func__));
2163*4882a593Smuzhiyun 		ret = BCME_NOMEM;
2164*4882a593Smuzhiyun 		goto fail;
2165*4882a593Smuzhiyun 	}
2166*4882a593Smuzhiyun 
2167*4882a593Smuzhiyun 	nan_iov_data = MALLOCZ(cfg->osh, sizeof(*nan_iov_data));
2168*4882a593Smuzhiyun 	if (!nan_iov_data) {
2169*4882a593Smuzhiyun 		WL_ERR(("%s: memory allocation failed\n", __func__));
2170*4882a593Smuzhiyun 		ret = BCME_NOMEM;
2171*4882a593Smuzhiyun 		goto fail;
2172*4882a593Smuzhiyun 	}
2173*4882a593Smuzhiyun 
2174*4882a593Smuzhiyun 	nan_iov_data->nan_iov_len = nan_iov_start = NAN_IOCTL_BUF_SIZE;
2175*4882a593Smuzhiyun 	nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
2176*4882a593Smuzhiyun 	nan_buf->count = 0;
2177*4882a593Smuzhiyun 	nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
2178*4882a593Smuzhiyun 	nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
2179*4882a593Smuzhiyun 	sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
2180*4882a593Smuzhiyun 
2181*4882a593Smuzhiyun 	ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
2182*4882a593Smuzhiyun 			sizeof(*scan_params), &subcmd_len);
2183*4882a593Smuzhiyun 	if (unlikely(ret)) {
2184*4882a593Smuzhiyun 		WL_ERR(("nan_sub_cmd check failed\n"));
2185*4882a593Smuzhiyun 		goto fail;
2186*4882a593Smuzhiyun 	}
2187*4882a593Smuzhiyun 	scan_params = (wl_nan_scan_params_t *)sub_cmd->data;
2188*4882a593Smuzhiyun 
2189*4882a593Smuzhiyun 	sub_cmd->id = htod16(WL_NAN_CMD_CFG_SCAN_PARAMS);
2190*4882a593Smuzhiyun 	sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*scan_params);
2191*4882a593Smuzhiyun 	sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
2192*4882a593Smuzhiyun 
2193*4882a593Smuzhiyun 	if (!band_index) {
2194*4882a593Smuzhiyun 		/* Fw default: Dwell time for 2G is 210 */
2195*4882a593Smuzhiyun 		if ((nan_attr_mask & NAN_ATTR_2G_DWELL_TIME_CONFIG) &&
2196*4882a593Smuzhiyun 			cmd_data->dwell_time[0]) {
2197*4882a593Smuzhiyun 			scan_params->dwell_time = cmd_data->dwell_time[0] +
2198*4882a593Smuzhiyun 				NAN_SCAN_DWELL_TIME_DELTA_MS;
2199*4882a593Smuzhiyun 		}
2200*4882a593Smuzhiyun 		/* Fw default: Scan period for 2G is 10 */
2201*4882a593Smuzhiyun 		if (nan_attr_mask & NAN_ATTR_2G_SCAN_PERIOD_CONFIG) {
2202*4882a593Smuzhiyun 			scan_params->scan_period = cmd_data->scan_period[0];
2203*4882a593Smuzhiyun 		}
2204*4882a593Smuzhiyun 	} else {
2205*4882a593Smuzhiyun 		if ((nan_attr_mask & NAN_ATTR_5G_DWELL_TIME_CONFIG) &&
2206*4882a593Smuzhiyun 			cmd_data->dwell_time[1]) {
2207*4882a593Smuzhiyun 			scan_params->dwell_time = cmd_data->dwell_time[1] +
2208*4882a593Smuzhiyun 				NAN_SCAN_DWELL_TIME_DELTA_MS;
2209*4882a593Smuzhiyun 		}
2210*4882a593Smuzhiyun 		if (nan_attr_mask & NAN_ATTR_5G_SCAN_PERIOD_CONFIG) {
2211*4882a593Smuzhiyun 			scan_params->scan_period = cmd_data->scan_period[1];
2212*4882a593Smuzhiyun 		}
2213*4882a593Smuzhiyun 	}
2214*4882a593Smuzhiyun 	scan_params->band_index = band_index;
2215*4882a593Smuzhiyun 	nan_buf->is_set = true;
2216*4882a593Smuzhiyun 	nan_buf->count++;
2217*4882a593Smuzhiyun 
2218*4882a593Smuzhiyun 	/* Reduce the iov_len size by subcmd_len */
2219*4882a593Smuzhiyun 	nan_iov_data->nan_iov_len -= subcmd_len;
2220*4882a593Smuzhiyun 	nan_iov_end = nan_iov_data->nan_iov_len;
2221*4882a593Smuzhiyun 	nan_buf_size = (nan_iov_start - nan_iov_end);
2222*4882a593Smuzhiyun 
2223*4882a593Smuzhiyun 	bzero(resp_buf, sizeof(resp_buf));
2224*4882a593Smuzhiyun 	ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
2225*4882a593Smuzhiyun 			(void*)resp_buf, NAN_IOCTL_BUF_SIZE);
2226*4882a593Smuzhiyun 	if (unlikely(ret) || unlikely(status)) {
2227*4882a593Smuzhiyun 		WL_ERR(("set nan scan params failed ret %d status %d \n", ret, status));
2228*4882a593Smuzhiyun 		goto fail;
2229*4882a593Smuzhiyun 	}
2230*4882a593Smuzhiyun 	WL_DBG(("set nan scan params successfull\n"));
2231*4882a593Smuzhiyun fail:
2232*4882a593Smuzhiyun 	if (nan_buf) {
2233*4882a593Smuzhiyun 		MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
2234*4882a593Smuzhiyun 	}
2235*4882a593Smuzhiyun 	if (nan_iov_data) {
2236*4882a593Smuzhiyun 		MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
2237*4882a593Smuzhiyun 	}
2238*4882a593Smuzhiyun 
2239*4882a593Smuzhiyun 	NAN_DBG_EXIT();
2240*4882a593Smuzhiyun 	return ret;
2241*4882a593Smuzhiyun }
2242*4882a593Smuzhiyun 
2243*4882a593Smuzhiyun static uint16
wl_cfgnan_gen_rand_cluster_id(uint16 low_val,uint16 high_val)2244*4882a593Smuzhiyun wl_cfgnan_gen_rand_cluster_id(uint16 low_val, uint16 high_val)
2245*4882a593Smuzhiyun {
2246*4882a593Smuzhiyun 	uint16 random_id;
2247*4882a593Smuzhiyun 	ulong random_seed;
2248*4882a593Smuzhiyun 
2249*4882a593Smuzhiyun 	/* In negative case also, assigning to cluster_high value */
2250*4882a593Smuzhiyun 	if (low_val >= high_val)
2251*4882a593Smuzhiyun 	{
2252*4882a593Smuzhiyun 		random_id = high_val;
2253*4882a593Smuzhiyun 	} else {
2254*4882a593Smuzhiyun 		RANDOM_BYTES(&random_seed, sizeof(random_seed));
2255*4882a593Smuzhiyun 		random_id = (uint16)((random_seed % ((high_val + 1) -
2256*4882a593Smuzhiyun 				low_val)) + low_val);
2257*4882a593Smuzhiyun 	}
2258*4882a593Smuzhiyun 	return random_id;
2259*4882a593Smuzhiyun }
2260*4882a593Smuzhiyun 
2261*4882a593Smuzhiyun static int
wl_cfgnan_set_cluster_id(nan_config_cmd_data_t * cmd_data,wl_nan_iov_t * nan_iov_data)2262*4882a593Smuzhiyun wl_cfgnan_set_cluster_id(nan_config_cmd_data_t *cmd_data,
2263*4882a593Smuzhiyun 		wl_nan_iov_t *nan_iov_data)
2264*4882a593Smuzhiyun {
2265*4882a593Smuzhiyun 	s32 ret = BCME_OK;
2266*4882a593Smuzhiyun 	bcm_iov_batch_subcmd_t *sub_cmd = NULL;
2267*4882a593Smuzhiyun 	uint16 subcmd_len;
2268*4882a593Smuzhiyun 
2269*4882a593Smuzhiyun 	NAN_DBG_ENTER();
2270*4882a593Smuzhiyun 
2271*4882a593Smuzhiyun 	sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
2272*4882a593Smuzhiyun 
2273*4882a593Smuzhiyun 	ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
2274*4882a593Smuzhiyun 			(sizeof(cmd_data->clus_id) - sizeof(uint8)), &subcmd_len);
2275*4882a593Smuzhiyun 	if (unlikely(ret)) {
2276*4882a593Smuzhiyun 		WL_ERR(("nan_sub_cmd check failed\n"));
2277*4882a593Smuzhiyun 		return ret;
2278*4882a593Smuzhiyun 	}
2279*4882a593Smuzhiyun 
2280*4882a593Smuzhiyun 	cmd_data->clus_id.octet[0] = 0x50;
2281*4882a593Smuzhiyun 	cmd_data->clus_id.octet[1] = 0x6F;
2282*4882a593Smuzhiyun 	cmd_data->clus_id.octet[2] = 0x9A;
2283*4882a593Smuzhiyun 	cmd_data->clus_id.octet[3] = 0x01;
2284*4882a593Smuzhiyun 	hton16_ua_store(wl_cfgnan_gen_rand_cluster_id(cmd_data->cluster_low,
2285*4882a593Smuzhiyun 			cmd_data->cluster_high), &cmd_data->clus_id.octet[4]);
2286*4882a593Smuzhiyun 
2287*4882a593Smuzhiyun 	WL_TRACE(("cluster_id = " MACDBG "\n", MAC2STRDBG(cmd_data->clus_id.octet)));
2288*4882a593Smuzhiyun 
2289*4882a593Smuzhiyun 	sub_cmd->id = htod16(WL_NAN_CMD_CFG_CID);
2290*4882a593Smuzhiyun 	sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(cmd_data->clus_id);
2291*4882a593Smuzhiyun 	sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
2292*4882a593Smuzhiyun 	ret = memcpy_s(sub_cmd->data, sizeof(cmd_data->clus_id),
2293*4882a593Smuzhiyun 			(uint8 *)&cmd_data->clus_id,
2294*4882a593Smuzhiyun 			sizeof(cmd_data->clus_id));
2295*4882a593Smuzhiyun 	if (ret != BCME_OK) {
2296*4882a593Smuzhiyun 		WL_ERR(("Failed to copy clus id\n"));
2297*4882a593Smuzhiyun 		return ret;
2298*4882a593Smuzhiyun 	}
2299*4882a593Smuzhiyun 
2300*4882a593Smuzhiyun 	nan_iov_data->nan_iov_len -= subcmd_len;
2301*4882a593Smuzhiyun 	nan_iov_data->nan_iov_buf += subcmd_len;
2302*4882a593Smuzhiyun 
2303*4882a593Smuzhiyun 	NAN_DBG_EXIT();
2304*4882a593Smuzhiyun 	return ret;
2305*4882a593Smuzhiyun }
2306*4882a593Smuzhiyun 
2307*4882a593Smuzhiyun static int
wl_cfgnan_set_hop_count_limit(nan_config_cmd_data_t * cmd_data,wl_nan_iov_t * nan_iov_data)2308*4882a593Smuzhiyun wl_cfgnan_set_hop_count_limit(nan_config_cmd_data_t *cmd_data,
2309*4882a593Smuzhiyun 		wl_nan_iov_t *nan_iov_data)
2310*4882a593Smuzhiyun {
2311*4882a593Smuzhiyun 	s32 ret = BCME_OK;
2312*4882a593Smuzhiyun 	bcm_iov_batch_subcmd_t *sub_cmd = NULL;
2313*4882a593Smuzhiyun 	wl_nan_hop_count_t *hop_limit = NULL;
2314*4882a593Smuzhiyun 	uint16 subcmd_len;
2315*4882a593Smuzhiyun 
2316*4882a593Smuzhiyun 	NAN_DBG_ENTER();
2317*4882a593Smuzhiyun 
2318*4882a593Smuzhiyun 	sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
2319*4882a593Smuzhiyun 	hop_limit = (wl_nan_hop_count_t *)sub_cmd->data;
2320*4882a593Smuzhiyun 
2321*4882a593Smuzhiyun 	ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
2322*4882a593Smuzhiyun 			sizeof(*hop_limit), &subcmd_len);
2323*4882a593Smuzhiyun 	if (unlikely(ret)) {
2324*4882a593Smuzhiyun 		WL_ERR(("nan_sub_cmd check failed\n"));
2325*4882a593Smuzhiyun 		return ret;
2326*4882a593Smuzhiyun 	}
2327*4882a593Smuzhiyun 
2328*4882a593Smuzhiyun 	*hop_limit = cmd_data->hop_count_limit;
2329*4882a593Smuzhiyun 	sub_cmd->id = htod16(WL_NAN_CMD_CFG_HOP_LIMIT);
2330*4882a593Smuzhiyun 	sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*hop_limit);
2331*4882a593Smuzhiyun 	sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
2332*4882a593Smuzhiyun 
2333*4882a593Smuzhiyun 	nan_iov_data->nan_iov_len -= subcmd_len;
2334*4882a593Smuzhiyun 	nan_iov_data->nan_iov_buf += subcmd_len;
2335*4882a593Smuzhiyun 
2336*4882a593Smuzhiyun 	NAN_DBG_EXIT();
2337*4882a593Smuzhiyun 	return ret;
2338*4882a593Smuzhiyun }
2339*4882a593Smuzhiyun 
2340*4882a593Smuzhiyun static int
wl_cfgnan_set_sid_beacon_val(nan_config_cmd_data_t * cmd_data,wl_nan_iov_t * nan_iov_data,uint32 nan_attr_mask)2341*4882a593Smuzhiyun wl_cfgnan_set_sid_beacon_val(nan_config_cmd_data_t *cmd_data,
2342*4882a593Smuzhiyun 	wl_nan_iov_t *nan_iov_data, uint32 nan_attr_mask)
2343*4882a593Smuzhiyun {
2344*4882a593Smuzhiyun 	s32 ret = BCME_OK;
2345*4882a593Smuzhiyun 	bcm_iov_batch_subcmd_t *sub_cmd = NULL;
2346*4882a593Smuzhiyun 	wl_nan_sid_beacon_control_t *sid_beacon = NULL;
2347*4882a593Smuzhiyun 	uint16 subcmd_len;
2348*4882a593Smuzhiyun 
2349*4882a593Smuzhiyun 	NAN_DBG_ENTER();
2350*4882a593Smuzhiyun 
2351*4882a593Smuzhiyun 	sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
2352*4882a593Smuzhiyun 
2353*4882a593Smuzhiyun 	ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
2354*4882a593Smuzhiyun 			sizeof(*sid_beacon), &subcmd_len);
2355*4882a593Smuzhiyun 	if (unlikely(ret)) {
2356*4882a593Smuzhiyun 		WL_ERR(("nan_sub_cmd check failed\n"));
2357*4882a593Smuzhiyun 		return ret;
2358*4882a593Smuzhiyun 	}
2359*4882a593Smuzhiyun 
2360*4882a593Smuzhiyun 	sid_beacon = (wl_nan_sid_beacon_control_t *)sub_cmd->data;
2361*4882a593Smuzhiyun 	sid_beacon->sid_enable = cmd_data->sid_beacon.sid_enable;
2362*4882a593Smuzhiyun 	/* Need to have separate flag for sub beacons
2363*4882a593Smuzhiyun 	 * sid_beacon->sub_sid_enable = cmd_data->sid_beacon.sub_sid_enable;
2364*4882a593Smuzhiyun 	 */
2365*4882a593Smuzhiyun 	if (nan_attr_mask & NAN_ATTR_SID_BEACON_CONFIG) {
2366*4882a593Smuzhiyun 		/* Limit for number of publish SIDs to be included in Beacons */
2367*4882a593Smuzhiyun 		sid_beacon->sid_count = cmd_data->sid_beacon.sid_count;
2368*4882a593Smuzhiyun 	}
2369*4882a593Smuzhiyun 	if (nan_attr_mask & NAN_ATTR_SUB_SID_BEACON_CONFIG) {
2370*4882a593Smuzhiyun 		/* Limit for number of subscribe SIDs to be included in Beacons */
2371*4882a593Smuzhiyun 		sid_beacon->sub_sid_count = cmd_data->sid_beacon.sub_sid_count;
2372*4882a593Smuzhiyun 	}
2373*4882a593Smuzhiyun 	sub_cmd->id = htod16(WL_NAN_CMD_CFG_SID_BEACON);
2374*4882a593Smuzhiyun 	sub_cmd->len = sizeof(sub_cmd->u.options) +
2375*4882a593Smuzhiyun 		sizeof(*sid_beacon);
2376*4882a593Smuzhiyun 	sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
2377*4882a593Smuzhiyun 
2378*4882a593Smuzhiyun 	nan_iov_data->nan_iov_len -= subcmd_len;
2379*4882a593Smuzhiyun 	nan_iov_data->nan_iov_buf += subcmd_len;
2380*4882a593Smuzhiyun 	NAN_DBG_EXIT();
2381*4882a593Smuzhiyun 	return ret;
2382*4882a593Smuzhiyun }
2383*4882a593Smuzhiyun 
2384*4882a593Smuzhiyun static int
wl_cfgnan_set_nan_oui(nan_config_cmd_data_t * cmd_data,wl_nan_iov_t * nan_iov_data)2385*4882a593Smuzhiyun wl_cfgnan_set_nan_oui(nan_config_cmd_data_t *cmd_data,
2386*4882a593Smuzhiyun 		wl_nan_iov_t *nan_iov_data)
2387*4882a593Smuzhiyun {
2388*4882a593Smuzhiyun 	s32 ret = BCME_OK;
2389*4882a593Smuzhiyun 	bcm_iov_batch_subcmd_t *sub_cmd = NULL;
2390*4882a593Smuzhiyun 	uint16 subcmd_len;
2391*4882a593Smuzhiyun 
2392*4882a593Smuzhiyun 	NAN_DBG_ENTER();
2393*4882a593Smuzhiyun 
2394*4882a593Smuzhiyun 	sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
2395*4882a593Smuzhiyun 
2396*4882a593Smuzhiyun 	ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
2397*4882a593Smuzhiyun 			sizeof(cmd_data->nan_oui), &subcmd_len);
2398*4882a593Smuzhiyun 	if (unlikely(ret)) {
2399*4882a593Smuzhiyun 		WL_ERR(("nan_sub_cmd check failed\n"));
2400*4882a593Smuzhiyun 		return ret;
2401*4882a593Smuzhiyun 	}
2402*4882a593Smuzhiyun 
2403*4882a593Smuzhiyun 	sub_cmd->id = htod16(WL_NAN_CMD_CFG_OUI);
2404*4882a593Smuzhiyun 	sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(cmd_data->nan_oui);
2405*4882a593Smuzhiyun 	sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
2406*4882a593Smuzhiyun 	ret = memcpy_s(sub_cmd->data, sizeof(cmd_data->nan_oui),
2407*4882a593Smuzhiyun 			(uint32 *)&cmd_data->nan_oui,
2408*4882a593Smuzhiyun 			sizeof(cmd_data->nan_oui));
2409*4882a593Smuzhiyun 	if (ret != BCME_OK) {
2410*4882a593Smuzhiyun 		WL_ERR(("Failed to copy nan oui\n"));
2411*4882a593Smuzhiyun 		return ret;
2412*4882a593Smuzhiyun 	}
2413*4882a593Smuzhiyun 
2414*4882a593Smuzhiyun 	nan_iov_data->nan_iov_len -= subcmd_len;
2415*4882a593Smuzhiyun 	nan_iov_data->nan_iov_buf += subcmd_len;
2416*4882a593Smuzhiyun 	NAN_DBG_EXIT();
2417*4882a593Smuzhiyun 	return ret;
2418*4882a593Smuzhiyun }
2419*4882a593Smuzhiyun 
2420*4882a593Smuzhiyun static int
wl_cfgnan_set_awake_dws(struct net_device * ndev,nan_config_cmd_data_t * cmd_data,wl_nan_iov_t * nan_iov_data,struct bcm_cfg80211 * cfg,uint32 nan_attr_mask)2421*4882a593Smuzhiyun wl_cfgnan_set_awake_dws(struct net_device *ndev, nan_config_cmd_data_t *cmd_data,
2422*4882a593Smuzhiyun 		wl_nan_iov_t *nan_iov_data, struct bcm_cfg80211 *cfg, uint32 nan_attr_mask)
2423*4882a593Smuzhiyun {
2424*4882a593Smuzhiyun 	s32 ret = BCME_OK;
2425*4882a593Smuzhiyun 	bcm_iov_batch_subcmd_t *sub_cmd = NULL;
2426*4882a593Smuzhiyun 	wl_nan_awake_dws_t *awake_dws = NULL;
2427*4882a593Smuzhiyun 	uint16 subcmd_len;
2428*4882a593Smuzhiyun 	NAN_DBG_ENTER();
2429*4882a593Smuzhiyun 
2430*4882a593Smuzhiyun 	sub_cmd =
2431*4882a593Smuzhiyun 		(bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
2432*4882a593Smuzhiyun 	ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
2433*4882a593Smuzhiyun 			sizeof(*awake_dws), &subcmd_len);
2434*4882a593Smuzhiyun 	if (unlikely(ret)) {
2435*4882a593Smuzhiyun 		WL_ERR(("nan_sub_cmd check failed\n"));
2436*4882a593Smuzhiyun 		return ret;
2437*4882a593Smuzhiyun 	}
2438*4882a593Smuzhiyun 
2439*4882a593Smuzhiyun 	awake_dws = (wl_nan_awake_dws_t *)sub_cmd->data;
2440*4882a593Smuzhiyun 
2441*4882a593Smuzhiyun 	if (nan_attr_mask & NAN_ATTR_2G_DW_CONFIG) {
2442*4882a593Smuzhiyun 		awake_dws->dw_interval_2g = cmd_data->awake_dws.dw_interval_2g;
2443*4882a593Smuzhiyun 		if (!awake_dws->dw_interval_2g) {
2444*4882a593Smuzhiyun 			/* Set 2G awake dw value to fw default value 1 */
2445*4882a593Smuzhiyun 			awake_dws->dw_interval_2g = NAN_SYNC_DEF_AWAKE_DW;
2446*4882a593Smuzhiyun 		}
2447*4882a593Smuzhiyun 	} else {
2448*4882a593Smuzhiyun 		/* Set 2G awake dw value to fw default value 1 */
2449*4882a593Smuzhiyun 		awake_dws->dw_interval_2g = NAN_SYNC_DEF_AWAKE_DW;
2450*4882a593Smuzhiyun 	}
2451*4882a593Smuzhiyun 
2452*4882a593Smuzhiyun 	if (cfg->nancfg->support_5g) {
2453*4882a593Smuzhiyun 		if (nan_attr_mask & NAN_ATTR_5G_DW_CONFIG) {
2454*4882a593Smuzhiyun 			awake_dws->dw_interval_5g = cmd_data->awake_dws.dw_interval_5g;
2455*4882a593Smuzhiyun 			/* config sync/discovery beacons on 5G band */
2456*4882a593Smuzhiyun 			ret = wl_cfgnan_config_control_flag(ndev, cfg,
2457*4882a593Smuzhiyun 					WL_NAN_CTRL_DISC_BEACON_TX_5G |
2458*4882a593Smuzhiyun 					WL_NAN_CTRL_SYNC_BEACON_TX_5G,
2459*4882a593Smuzhiyun 					0, WL_NAN_CMD_CFG_NAN_CONFIG,
2460*4882a593Smuzhiyun 					&(cmd_data->status),
2461*4882a593Smuzhiyun 					awake_dws->dw_interval_5g);
2462*4882a593Smuzhiyun 			if (unlikely(ret) || unlikely(cmd_data->status)) {
2463*4882a593Smuzhiyun 				WL_ERR((" nan control set config handler, ret = %d"
2464*4882a593Smuzhiyun 					" status = %d \n", ret, cmd_data->status));
2465*4882a593Smuzhiyun 				goto fail;
2466*4882a593Smuzhiyun 			}
2467*4882a593Smuzhiyun 		} else {
2468*4882a593Smuzhiyun 			/* Set 5G awake dw value to fw default value 1 */
2469*4882a593Smuzhiyun 			awake_dws->dw_interval_5g = NAN_SYNC_DEF_AWAKE_DW;
2470*4882a593Smuzhiyun 		}
2471*4882a593Smuzhiyun 	}
2472*4882a593Smuzhiyun 
2473*4882a593Smuzhiyun 	sub_cmd->id = htod16(WL_NAN_CMD_SYNC_AWAKE_DWS);
2474*4882a593Smuzhiyun 	sub_cmd->len = sizeof(sub_cmd->u.options) +
2475*4882a593Smuzhiyun 		sizeof(*awake_dws);
2476*4882a593Smuzhiyun 	sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
2477*4882a593Smuzhiyun 
2478*4882a593Smuzhiyun 	nan_iov_data->nan_iov_len -= subcmd_len;
2479*4882a593Smuzhiyun 	nan_iov_data->nan_iov_buf += subcmd_len;
2480*4882a593Smuzhiyun 
2481*4882a593Smuzhiyun fail:
2482*4882a593Smuzhiyun 	NAN_DBG_EXIT();
2483*4882a593Smuzhiyun 	return ret;
2484*4882a593Smuzhiyun }
2485*4882a593Smuzhiyun 
2486*4882a593Smuzhiyun int
wl_cfgnan_set_enable_merge(struct net_device * ndev,struct bcm_cfg80211 * cfg,uint8 enable,uint32 * status)2487*4882a593Smuzhiyun wl_cfgnan_set_enable_merge(struct net_device *ndev,
2488*4882a593Smuzhiyun 	struct bcm_cfg80211 *cfg, uint8 enable, uint32 *status)
2489*4882a593Smuzhiyun {
2490*4882a593Smuzhiyun 	bcm_iov_batch_buf_t *nan_buf = NULL;
2491*4882a593Smuzhiyun 	s32 ret = BCME_OK;
2492*4882a593Smuzhiyun 	uint16 nan_iov_start, nan_iov_end;
2493*4882a593Smuzhiyun 	uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
2494*4882a593Smuzhiyun 	uint16 subcmd_len;
2495*4882a593Smuzhiyun 	bcm_iov_batch_subcmd_t *sub_cmd = NULL;
2496*4882a593Smuzhiyun 	wl_nan_iov_t *nan_iov_data = NULL;
2497*4882a593Smuzhiyun 	uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
2498*4882a593Smuzhiyun 	wl_nan_merge_enable_t merge_enable;
2499*4882a593Smuzhiyun 	uint8 size_of_iov;
2500*4882a593Smuzhiyun 
2501*4882a593Smuzhiyun 	NAN_DBG_ENTER();
2502*4882a593Smuzhiyun 
2503*4882a593Smuzhiyun 	nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
2504*4882a593Smuzhiyun 	if (!nan_buf) {
2505*4882a593Smuzhiyun 		WL_ERR(("%s: memory allocation failed\n", __func__));
2506*4882a593Smuzhiyun 		ret = BCME_NOMEM;
2507*4882a593Smuzhiyun 		goto fail;
2508*4882a593Smuzhiyun 	}
2509*4882a593Smuzhiyun 
2510*4882a593Smuzhiyun 	nan_iov_data = MALLOCZ(cfg->osh, sizeof(*nan_iov_data));
2511*4882a593Smuzhiyun 	if (!nan_iov_data) {
2512*4882a593Smuzhiyun 		WL_ERR(("%s: memory allocation failed\n", __func__));
2513*4882a593Smuzhiyun 		ret = BCME_NOMEM;
2514*4882a593Smuzhiyun 		goto fail;
2515*4882a593Smuzhiyun 	}
2516*4882a593Smuzhiyun 
2517*4882a593Smuzhiyun 	merge_enable = (wl_nan_merge_enable_t)enable;
2518*4882a593Smuzhiyun 	size_of_iov = sizeof(wl_nan_merge_enable_t);
2519*4882a593Smuzhiyun 
2520*4882a593Smuzhiyun 	nan_iov_data->nan_iov_len = nan_iov_start = NAN_IOCTL_BUF_SIZE;
2521*4882a593Smuzhiyun 	nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
2522*4882a593Smuzhiyun 	nan_buf->count = 0;
2523*4882a593Smuzhiyun 	nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
2524*4882a593Smuzhiyun 	nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
2525*4882a593Smuzhiyun 	sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
2526*4882a593Smuzhiyun 
2527*4882a593Smuzhiyun 	ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
2528*4882a593Smuzhiyun 		size_of_iov, &subcmd_len);
2529*4882a593Smuzhiyun 	if (unlikely(ret)) {
2530*4882a593Smuzhiyun 		WL_ERR(("nan_sub_cmd check failed\n"));
2531*4882a593Smuzhiyun 		goto fail;
2532*4882a593Smuzhiyun 	}
2533*4882a593Smuzhiyun 
2534*4882a593Smuzhiyun 	sub_cmd->id = htod16(WL_NAN_CMD_ELECTION_MERGE);
2535*4882a593Smuzhiyun 	sub_cmd->len = sizeof(sub_cmd->u.options) + size_of_iov;
2536*4882a593Smuzhiyun 	sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
2537*4882a593Smuzhiyun 
2538*4882a593Smuzhiyun 	/* Reduce the iov_len size by subcmd_len */
2539*4882a593Smuzhiyun 	nan_iov_data->nan_iov_len -= subcmd_len;
2540*4882a593Smuzhiyun 	nan_iov_end = nan_iov_data->nan_iov_len;
2541*4882a593Smuzhiyun 	nan_buf_size = (nan_iov_start - nan_iov_end);
2542*4882a593Smuzhiyun 
2543*4882a593Smuzhiyun 	(void)memcpy_s(sub_cmd->data, nan_iov_data->nan_iov_len,
2544*4882a593Smuzhiyun 		&merge_enable, size_of_iov);
2545*4882a593Smuzhiyun 
2546*4882a593Smuzhiyun 	nan_buf->is_set = true;
2547*4882a593Smuzhiyun 	nan_buf->count++;
2548*4882a593Smuzhiyun 	bzero(resp_buf, sizeof(resp_buf));
2549*4882a593Smuzhiyun 	ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, status,
2550*4882a593Smuzhiyun 			(void*)resp_buf, NAN_IOCTL_BUF_SIZE);
2551*4882a593Smuzhiyun 	if (unlikely(ret) || unlikely(*status)) {
2552*4882a593Smuzhiyun 		WL_ERR(("Merge enable %d failed ret %d status %d \n", merge_enable, ret, *status));
2553*4882a593Smuzhiyun 		goto fail;
2554*4882a593Smuzhiyun 	}
2555*4882a593Smuzhiyun fail:
2556*4882a593Smuzhiyun 	if (nan_buf) {
2557*4882a593Smuzhiyun 		MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
2558*4882a593Smuzhiyun 	}
2559*4882a593Smuzhiyun 	if (nan_iov_data) {
2560*4882a593Smuzhiyun 		MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
2561*4882a593Smuzhiyun 	}
2562*4882a593Smuzhiyun 	NAN_DBG_EXIT();
2563*4882a593Smuzhiyun 	return ret;
2564*4882a593Smuzhiyun }
2565*4882a593Smuzhiyun 
2566*4882a593Smuzhiyun static int
wl_cfgnan_set_disc_beacon_interval_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,wl_nan_disc_bcn_interval_t disc_beacon_interval)2567*4882a593Smuzhiyun wl_cfgnan_set_disc_beacon_interval_handler(struct net_device *ndev, struct bcm_cfg80211 *cfg,
2568*4882a593Smuzhiyun 	wl_nan_disc_bcn_interval_t disc_beacon_interval)
2569*4882a593Smuzhiyun {
2570*4882a593Smuzhiyun 	bcm_iov_batch_buf_t *nan_buf = NULL;
2571*4882a593Smuzhiyun 	s32 ret = BCME_OK;
2572*4882a593Smuzhiyun 	uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
2573*4882a593Smuzhiyun 	wl_nan_iov_t *nan_iov_data = NULL;
2574*4882a593Smuzhiyun 	uint32 status;
2575*4882a593Smuzhiyun 	uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
2576*4882a593Smuzhiyun 	bcm_iov_batch_subcmd_t *sub_cmd = NULL;
2577*4882a593Smuzhiyun 	uint16 subcmd_len;
2578*4882a593Smuzhiyun 	uint8 size_of_iov;
2579*4882a593Smuzhiyun 
2580*4882a593Smuzhiyun 	NAN_DBG_ENTER();
2581*4882a593Smuzhiyun 	NAN_MUTEX_LOCK();
2582*4882a593Smuzhiyun 
2583*4882a593Smuzhiyun 	nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
2584*4882a593Smuzhiyun 	if (!nan_buf) {
2585*4882a593Smuzhiyun 		WL_ERR(("%s: memory allocation failed\n", __func__));
2586*4882a593Smuzhiyun 		ret = BCME_NOMEM;
2587*4882a593Smuzhiyun 		goto fail;
2588*4882a593Smuzhiyun 	}
2589*4882a593Smuzhiyun 
2590*4882a593Smuzhiyun 	nan_iov_data = MALLOCZ(cfg->osh, sizeof(*nan_iov_data));
2591*4882a593Smuzhiyun 	if (!nan_iov_data) {
2592*4882a593Smuzhiyun 		WL_ERR(("%s: memory allocation failed\n", __func__));
2593*4882a593Smuzhiyun 		ret = BCME_NOMEM;
2594*4882a593Smuzhiyun 		goto fail;
2595*4882a593Smuzhiyun 	}
2596*4882a593Smuzhiyun 
2597*4882a593Smuzhiyun 	size_of_iov = sizeof(wl_nan_disc_bcn_interval_t);
2598*4882a593Smuzhiyun 	nan_iov_data->nan_iov_len = NAN_IOCTL_BUF_SIZE;
2599*4882a593Smuzhiyun 	nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
2600*4882a593Smuzhiyun 	nan_buf->count = 0;
2601*4882a593Smuzhiyun 	nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
2602*4882a593Smuzhiyun 	nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
2603*4882a593Smuzhiyun 
2604*4882a593Smuzhiyun 	sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
2605*4882a593Smuzhiyun 	ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
2606*4882a593Smuzhiyun 			size_of_iov, &subcmd_len);
2607*4882a593Smuzhiyun 	if (unlikely(ret)) {
2608*4882a593Smuzhiyun 		WL_ERR(("nan_sub_cmd check failed\n"));
2609*4882a593Smuzhiyun 		goto fail;
2610*4882a593Smuzhiyun 	}
2611*4882a593Smuzhiyun 
2612*4882a593Smuzhiyun 	/* Choose default value discovery beacon interval  if value is zero */
2613*4882a593Smuzhiyun 	if (!disc_beacon_interval) {
2614*4882a593Smuzhiyun 		disc_beacon_interval = cfg->nancfg->support_5g ? NAN_DISC_BCN_INTERVAL_5G_DEF:
2615*4882a593Smuzhiyun 			NAN_DISC_BCN_INTERVAL_2G_DEF;
2616*4882a593Smuzhiyun 	}
2617*4882a593Smuzhiyun 
2618*4882a593Smuzhiyun 	/* Fill the sub_command block */
2619*4882a593Smuzhiyun 	sub_cmd->id = htod16(WL_NAN_CMD_CFG_BCN_INTERVAL);
2620*4882a593Smuzhiyun 	sub_cmd->len = sizeof(sub_cmd->u.options) + size_of_iov;
2621*4882a593Smuzhiyun 	sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
2622*4882a593Smuzhiyun 	ret = memcpy_s(sub_cmd->data, nan_iov_data->nan_iov_len,
2623*4882a593Smuzhiyun 			&disc_beacon_interval, size_of_iov);
2624*4882a593Smuzhiyun 	if (ret != BCME_OK) {
2625*4882a593Smuzhiyun 		WL_ERR(("Failed to copy disc_beacon_interval\n"));
2626*4882a593Smuzhiyun 		goto fail;
2627*4882a593Smuzhiyun 	}
2628*4882a593Smuzhiyun 
2629*4882a593Smuzhiyun 	nan_iov_data->nan_iov_len -= subcmd_len;
2630*4882a593Smuzhiyun 	nan_iov_data->nan_iov_buf += subcmd_len;
2631*4882a593Smuzhiyun 
2632*4882a593Smuzhiyun 	nan_buf->count++;
2633*4882a593Smuzhiyun 	nan_buf->is_set = true;
2634*4882a593Smuzhiyun 	nan_buf_size -= nan_iov_data->nan_iov_len;
2635*4882a593Smuzhiyun 	bzero(resp_buf, sizeof(resp_buf));
2636*4882a593Smuzhiyun 	ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
2637*4882a593Smuzhiyun 			(void*)resp_buf, NAN_IOCTL_BUF_SIZE);
2638*4882a593Smuzhiyun 	if (unlikely(ret) || unlikely(status)) {
2639*4882a593Smuzhiyun 		WL_ERR(("Failed to set disc beacon interval, ret = %d status = %d\n",
2640*4882a593Smuzhiyun 			ret, status));
2641*4882a593Smuzhiyun 		goto fail;
2642*4882a593Smuzhiyun 	}
2643*4882a593Smuzhiyun 
2644*4882a593Smuzhiyun fail:
2645*4882a593Smuzhiyun 	if (nan_buf) {
2646*4882a593Smuzhiyun 		MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
2647*4882a593Smuzhiyun 	}
2648*4882a593Smuzhiyun 	if (nan_iov_data) {
2649*4882a593Smuzhiyun 		MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
2650*4882a593Smuzhiyun 	}
2651*4882a593Smuzhiyun 
2652*4882a593Smuzhiyun 	NAN_MUTEX_UNLOCK();
2653*4882a593Smuzhiyun 	NAN_DBG_EXIT();
2654*4882a593Smuzhiyun 	return ret;
2655*4882a593Smuzhiyun }
2656*4882a593Smuzhiyun 
2657*4882a593Smuzhiyun void
wl_cfgnan_immediate_nan_disable_pending(struct bcm_cfg80211 * cfg)2658*4882a593Smuzhiyun wl_cfgnan_immediate_nan_disable_pending(struct bcm_cfg80211 *cfg)
2659*4882a593Smuzhiyun {
2660*4882a593Smuzhiyun 	if (delayed_work_pending(&cfg->nancfg->nan_disable)) {
2661*4882a593Smuzhiyun 		WL_DBG(("Do immediate nan_disable work\n"));
2662*4882a593Smuzhiyun 		DHD_NAN_WAKE_UNLOCK(cfg->pub);
2663*4882a593Smuzhiyun 		if (cancel_delayed_work(&cfg->nancfg->nan_disable)) {
2664*4882a593Smuzhiyun 			schedule_delayed_work(&cfg->nancfg->nan_disable, 0);
2665*4882a593Smuzhiyun 		}
2666*4882a593Smuzhiyun 	}
2667*4882a593Smuzhiyun }
2668*4882a593Smuzhiyun 
2669*4882a593Smuzhiyun int
wl_cfgnan_check_nan_disable_pending(struct bcm_cfg80211 * cfg,bool force_disable,bool is_sync_reqd)2670*4882a593Smuzhiyun wl_cfgnan_check_nan_disable_pending(struct bcm_cfg80211 *cfg,
2671*4882a593Smuzhiyun 	bool force_disable, bool is_sync_reqd)
2672*4882a593Smuzhiyun {
2673*4882a593Smuzhiyun 	int ret = BCME_OK;
2674*4882a593Smuzhiyun 	struct net_device *ndev = NULL;
2675*4882a593Smuzhiyun 
2676*4882a593Smuzhiyun 	if (delayed_work_pending(&cfg->nancfg->nan_disable)) {
2677*4882a593Smuzhiyun 		WL_DBG(("Cancel nan_disable work\n"));
2678*4882a593Smuzhiyun 		/*
2679*4882a593Smuzhiyun 		 * Nan gets disabled from dhd_stop(dev_close) and other frameworks contexts.
2680*4882a593Smuzhiyun 		 * Can't use cancel_work_sync from dhd_stop context for
2681*4882a593Smuzhiyun 		 * wl_cfgnan_delayed_disable since both contexts uses
2682*4882a593Smuzhiyun 		 * rtnl_lock resulting in deadlock. If dhd_stop gets invoked,
2683*4882a593Smuzhiyun 		 * rely on dhd_stop context to do the nan clean up work and
2684*4882a593Smuzhiyun 		 * just do return from delayed WQ based on state check.
2685*4882a593Smuzhiyun 		 */
2686*4882a593Smuzhiyun 
2687*4882a593Smuzhiyun 		DHD_NAN_WAKE_UNLOCK(cfg->pub);
2688*4882a593Smuzhiyun 
2689*4882a593Smuzhiyun 		if (is_sync_reqd == true) {
2690*4882a593Smuzhiyun 			cancel_delayed_work_sync(&cfg->nancfg->nan_disable);
2691*4882a593Smuzhiyun 		} else {
2692*4882a593Smuzhiyun 			cancel_delayed_work(&cfg->nancfg->nan_disable);
2693*4882a593Smuzhiyun 		}
2694*4882a593Smuzhiyun 		force_disable = true;
2695*4882a593Smuzhiyun 	}
2696*4882a593Smuzhiyun 	if ((force_disable == true) && (cfg->nancfg->nan_enable == true)) {
2697*4882a593Smuzhiyun 		ret = wl_cfgnan_disable(cfg);
2698*4882a593Smuzhiyun 		if (ret != BCME_OK) {
2699*4882a593Smuzhiyun 			WL_ERR(("failed to disable nan, error[%d]\n", ret));
2700*4882a593Smuzhiyun 		}
2701*4882a593Smuzhiyun 		/* Intentional fall through to cleanup framework */
2702*4882a593Smuzhiyun 		if (cfg->nancfg->notify_user == true) {
2703*4882a593Smuzhiyun 			ndev = bcmcfg_to_prmry_ndev(cfg);
2704*4882a593Smuzhiyun 			wl_cfgvendor_nan_send_async_disable_resp(ndev->ieee80211_ptr);
2705*4882a593Smuzhiyun 		}
2706*4882a593Smuzhiyun 	}
2707*4882a593Smuzhiyun 	return ret;
2708*4882a593Smuzhiyun }
2709*4882a593Smuzhiyun 
2710*4882a593Smuzhiyun int
wl_cfgnan_start_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_config_cmd_data_t * cmd_data,uint32 nan_attr_mask)2711*4882a593Smuzhiyun wl_cfgnan_start_handler(struct net_device *ndev, struct bcm_cfg80211 *cfg,
2712*4882a593Smuzhiyun 	nan_config_cmd_data_t *cmd_data, uint32 nan_attr_mask)
2713*4882a593Smuzhiyun {
2714*4882a593Smuzhiyun 	s32 ret = BCME_OK;
2715*4882a593Smuzhiyun 	uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
2716*4882a593Smuzhiyun 	bcm_iov_batch_buf_t *nan_buf = NULL;
2717*4882a593Smuzhiyun 	wl_nan_iov_t *nan_iov_data = NULL;
2718*4882a593Smuzhiyun 	dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
2719*4882a593Smuzhiyun 	uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
2720*4882a593Smuzhiyun 	int i;
2721*4882a593Smuzhiyun 	s32 timeout = 0;
2722*4882a593Smuzhiyun 	nan_hal_capabilities_t capabilities;
2723*4882a593Smuzhiyun 	uint32 cfg_ctrl1_flags = 0;
2724*4882a593Smuzhiyun 	uint32 cfg_ctrl2_flags1 = 0;
2725*4882a593Smuzhiyun 	wl_nancfg_t *nancfg = cfg->nancfg;
2726*4882a593Smuzhiyun 
2727*4882a593Smuzhiyun 	NAN_DBG_ENTER();
2728*4882a593Smuzhiyun 
2729*4882a593Smuzhiyun 	if (!dhdp->up) {
2730*4882a593Smuzhiyun 		WL_ERR(("bus is already down, hence blocking nan start\n"));
2731*4882a593Smuzhiyun 		return BCME_ERROR;
2732*4882a593Smuzhiyun 	}
2733*4882a593Smuzhiyun 
2734*4882a593Smuzhiyun 	/* Protect discovery creation. Ensure proper mutex precedence.
2735*4882a593Smuzhiyun 	 * If if_sync & nan_mutex comes together in same context, nan_mutex
2736*4882a593Smuzhiyun 	 * should follow if_sync.
2737*4882a593Smuzhiyun 	 */
2738*4882a593Smuzhiyun 	mutex_lock(&cfg->if_sync);
2739*4882a593Smuzhiyun 	NAN_MUTEX_LOCK();
2740*4882a593Smuzhiyun 
2741*4882a593Smuzhiyun #ifdef WL_IFACE_MGMT
2742*4882a593Smuzhiyun 	if ((ret = wl_cfg80211_handle_if_role_conflict(cfg, WL_IF_TYPE_NAN_NMI)) != BCME_OK) {
2743*4882a593Smuzhiyun 		WL_ERR(("Conflicting iface is present, cant support nan\n"));
2744*4882a593Smuzhiyun 		NAN_MUTEX_UNLOCK();
2745*4882a593Smuzhiyun 		mutex_unlock(&cfg->if_sync);
2746*4882a593Smuzhiyun 		goto fail;
2747*4882a593Smuzhiyun 	}
2748*4882a593Smuzhiyun #endif /* WL_IFACE_MGMT */
2749*4882a593Smuzhiyun 
2750*4882a593Smuzhiyun 	/* disable TDLS on NAN init  */
2751*4882a593Smuzhiyun 	wl_cfg80211_tdls_config(cfg, TDLS_STATE_NMI_CREATE, false);
2752*4882a593Smuzhiyun 
2753*4882a593Smuzhiyun 	WL_INFORM_MEM(("Initializing NAN\n"));
2754*4882a593Smuzhiyun 	ret = wl_cfgnan_init(cfg);
2755*4882a593Smuzhiyun 	if (ret != BCME_OK) {
2756*4882a593Smuzhiyun 		WL_ERR(("failed to initialize NAN[%d]\n", ret));
2757*4882a593Smuzhiyun 		NAN_MUTEX_UNLOCK();
2758*4882a593Smuzhiyun 		mutex_unlock(&cfg->if_sync);
2759*4882a593Smuzhiyun 		goto fail;
2760*4882a593Smuzhiyun 	}
2761*4882a593Smuzhiyun 
2762*4882a593Smuzhiyun 	ret = wl_cfgnan_get_ver(ndev, cfg);
2763*4882a593Smuzhiyun 	if (ret != BCME_OK) {
2764*4882a593Smuzhiyun 		WL_ERR(("failed to Nan IOV version[%d]\n", ret));
2765*4882a593Smuzhiyun 		NAN_MUTEX_UNLOCK();
2766*4882a593Smuzhiyun 		mutex_unlock(&cfg->if_sync);
2767*4882a593Smuzhiyun 		goto fail;
2768*4882a593Smuzhiyun 	}
2769*4882a593Smuzhiyun 
2770*4882a593Smuzhiyun 	/* set nmi addr */
2771*4882a593Smuzhiyun 	ret = wl_cfgnan_set_if_addr(cfg);
2772*4882a593Smuzhiyun 	if (ret != BCME_OK) {
2773*4882a593Smuzhiyun 		WL_ERR(("Failed to set nmi address \n"));
2774*4882a593Smuzhiyun 		NAN_MUTEX_UNLOCK();
2775*4882a593Smuzhiyun 		mutex_unlock(&cfg->if_sync);
2776*4882a593Smuzhiyun 		goto fail;
2777*4882a593Smuzhiyun 	}
2778*4882a593Smuzhiyun 	nancfg->nan_event_recvd = false;
2779*4882a593Smuzhiyun 	NAN_MUTEX_UNLOCK();
2780*4882a593Smuzhiyun 	mutex_unlock(&cfg->if_sync);
2781*4882a593Smuzhiyun 
2782*4882a593Smuzhiyun 	nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
2783*4882a593Smuzhiyun 	if (!nan_buf) {
2784*4882a593Smuzhiyun 		WL_ERR(("%s: memory allocation failed\n", __func__));
2785*4882a593Smuzhiyun 		ret = BCME_NOMEM;
2786*4882a593Smuzhiyun 		goto fail;
2787*4882a593Smuzhiyun 	}
2788*4882a593Smuzhiyun 
2789*4882a593Smuzhiyun 	nan_iov_data = MALLOCZ(cfg->osh, sizeof(*nan_iov_data));
2790*4882a593Smuzhiyun 	if (!nan_iov_data) {
2791*4882a593Smuzhiyun 		WL_ERR(("%s: memory allocation failed\n", __func__));
2792*4882a593Smuzhiyun 		ret = BCME_NOMEM;
2793*4882a593Smuzhiyun 		goto fail;
2794*4882a593Smuzhiyun 	}
2795*4882a593Smuzhiyun 
2796*4882a593Smuzhiyun 	nan_iov_data->nan_iov_len = NAN_IOCTL_BUF_SIZE;
2797*4882a593Smuzhiyun 	nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
2798*4882a593Smuzhiyun 	nan_buf->count = 0;
2799*4882a593Smuzhiyun 	nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
2800*4882a593Smuzhiyun 	nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
2801*4882a593Smuzhiyun 
2802*4882a593Smuzhiyun 	if (nan_attr_mask & NAN_ATTR_SYNC_DISC_2G_BEACON_CONFIG) {
2803*4882a593Smuzhiyun 		/* config sync/discovery beacons on 2G band */
2804*4882a593Smuzhiyun 		/* 2g is mandatory */
2805*4882a593Smuzhiyun 		if (!cmd_data->beacon_2g_val) {
2806*4882a593Smuzhiyun 			WL_ERR(("Invalid NAN config...2G is mandatory\n"));
2807*4882a593Smuzhiyun 			ret = BCME_BADARG;
2808*4882a593Smuzhiyun 		}
2809*4882a593Smuzhiyun 		cfg_ctrl1_flags |= (WL_NAN_CTRL_DISC_BEACON_TX_2G | WL_NAN_CTRL_SYNC_BEACON_TX_2G);
2810*4882a593Smuzhiyun 	}
2811*4882a593Smuzhiyun 	if (nan_attr_mask & NAN_ATTR_SYNC_DISC_5G_BEACON_CONFIG) {
2812*4882a593Smuzhiyun 		/* config sync/discovery beacons on 5G band */
2813*4882a593Smuzhiyun 		cfg_ctrl1_flags |= (WL_NAN_CTRL_DISC_BEACON_TX_5G | WL_NAN_CTRL_SYNC_BEACON_TX_5G);
2814*4882a593Smuzhiyun 	}
2815*4882a593Smuzhiyun 
2816*4882a593Smuzhiyun 	if (cmd_data->warmup_time) {
2817*4882a593Smuzhiyun 		ret = wl_cfgnan_warmup_time_handler(cmd_data, nan_iov_data);
2818*4882a593Smuzhiyun 		if (unlikely(ret)) {
2819*4882a593Smuzhiyun 			WL_ERR(("warm up time handler sub_cmd set failed\n"));
2820*4882a593Smuzhiyun 			goto fail;
2821*4882a593Smuzhiyun 		}
2822*4882a593Smuzhiyun 		nan_buf->count++;
2823*4882a593Smuzhiyun 	}
2824*4882a593Smuzhiyun 	/* setting master preference and random factor */
2825*4882a593Smuzhiyun 	ret = wl_cfgnan_set_election_metric(cmd_data, nan_iov_data, nan_attr_mask);
2826*4882a593Smuzhiyun 	if (unlikely(ret)) {
2827*4882a593Smuzhiyun 		WL_ERR(("election_metric sub_cmd set failed\n"));
2828*4882a593Smuzhiyun 		goto fail;
2829*4882a593Smuzhiyun 	} else {
2830*4882a593Smuzhiyun 		nan_buf->count++;
2831*4882a593Smuzhiyun 	}
2832*4882a593Smuzhiyun 
2833*4882a593Smuzhiyun 	/* setting nan social channels */
2834*4882a593Smuzhiyun 	ret = wl_cfgnan_set_nan_soc_chans(ndev, cmd_data, nan_iov_data, nan_attr_mask);
2835*4882a593Smuzhiyun 	if (unlikely(ret)) {
2836*4882a593Smuzhiyun 		WL_ERR(("nan social channels set failed\n"));
2837*4882a593Smuzhiyun 		goto fail;
2838*4882a593Smuzhiyun 	} else {
2839*4882a593Smuzhiyun 		/* Storing 5g capability which is reqd for avail chan config. */
2840*4882a593Smuzhiyun 		nancfg->support_5g = cmd_data->support_5g;
2841*4882a593Smuzhiyun 		nan_buf->count++;
2842*4882a593Smuzhiyun 	}
2843*4882a593Smuzhiyun 
2844*4882a593Smuzhiyun 	if ((cmd_data->support_2g) && ((cmd_data->dwell_time[0]) ||
2845*4882a593Smuzhiyun 			(cmd_data->scan_period[0]))) {
2846*4882a593Smuzhiyun 		/* setting scan params */
2847*4882a593Smuzhiyun 		ret = wl_cfgnan_set_nan_scan_params(ndev, cfg, cmd_data, 0, nan_attr_mask);
2848*4882a593Smuzhiyun 		if (unlikely(ret)) {
2849*4882a593Smuzhiyun 			WL_ERR(("scan params set failed for 2g\n"));
2850*4882a593Smuzhiyun 			goto fail;
2851*4882a593Smuzhiyun 		}
2852*4882a593Smuzhiyun 	}
2853*4882a593Smuzhiyun 
2854*4882a593Smuzhiyun 	if ((cmd_data->support_5g) && ((cmd_data->dwell_time[1]) ||
2855*4882a593Smuzhiyun 			(cmd_data->scan_period[1]))) {
2856*4882a593Smuzhiyun 		/* setting scan params */
2857*4882a593Smuzhiyun 		ret = wl_cfgnan_set_nan_scan_params(ndev, cfg, cmd_data,
2858*4882a593Smuzhiyun 			cmd_data->support_5g, nan_attr_mask);
2859*4882a593Smuzhiyun 		if (unlikely(ret)) {
2860*4882a593Smuzhiyun 			WL_ERR(("scan params set failed for 5g\n"));
2861*4882a593Smuzhiyun 			goto fail;
2862*4882a593Smuzhiyun 		}
2863*4882a593Smuzhiyun 	}
2864*4882a593Smuzhiyun 
2865*4882a593Smuzhiyun 	/*
2866*4882a593Smuzhiyun 	 * A cluster_low value matching cluster_high indicates a request
2867*4882a593Smuzhiyun 	 * to join a cluster with that value.
2868*4882a593Smuzhiyun 	 * If the requested cluster is not found the
2869*4882a593Smuzhiyun 	 * device will start its own cluster
2870*4882a593Smuzhiyun 	 */
2871*4882a593Smuzhiyun 	/* For Debug purpose, using clust id compulsion */
2872*4882a593Smuzhiyun 	if (cmd_data->cluster_low == cmd_data->cluster_high) {
2873*4882a593Smuzhiyun 		/* device will merge to configured CID only */
2874*4882a593Smuzhiyun 		cfg_ctrl1_flags |= (WL_NAN_CTRL_MERGE_CONF_CID_ONLY);
2875*4882a593Smuzhiyun 	}
2876*4882a593Smuzhiyun 	/* setting cluster ID */
2877*4882a593Smuzhiyun 	ret = wl_cfgnan_set_cluster_id(cmd_data, nan_iov_data);
2878*4882a593Smuzhiyun 	if (unlikely(ret)) {
2879*4882a593Smuzhiyun 		WL_ERR(("cluster_id sub_cmd set failed\n"));
2880*4882a593Smuzhiyun 		goto fail;
2881*4882a593Smuzhiyun 	}
2882*4882a593Smuzhiyun 	nan_buf->count++;
2883*4882a593Smuzhiyun 
2884*4882a593Smuzhiyun 	/* setting rssi proximaty values for 2.4GHz and 5GHz */
2885*4882a593Smuzhiyun 	ret = wl_cfgnan_set_rssi_proximity(cmd_data, nan_iov_data, nan_attr_mask);
2886*4882a593Smuzhiyun 	if (unlikely(ret)) {
2887*4882a593Smuzhiyun 		WL_ERR(("2.4GHz/5GHz rssi proximity threshold set failed\n"));
2888*4882a593Smuzhiyun 		goto fail;
2889*4882a593Smuzhiyun 	} else {
2890*4882a593Smuzhiyun 		nan_buf->count++;
2891*4882a593Smuzhiyun 	}
2892*4882a593Smuzhiyun 
2893*4882a593Smuzhiyun 	/* setting rssi middle/close values for 2.4GHz and 5GHz */
2894*4882a593Smuzhiyun 	ret = wl_cfgnan_set_rssi_mid_or_close(cmd_data, nan_iov_data, nan_attr_mask);
2895*4882a593Smuzhiyun 	if (unlikely(ret)) {
2896*4882a593Smuzhiyun 		WL_ERR(("2.4GHz/5GHz rssi middle and close set failed\n"));
2897*4882a593Smuzhiyun 		goto fail;
2898*4882a593Smuzhiyun 	} else {
2899*4882a593Smuzhiyun 		nan_buf->count++;
2900*4882a593Smuzhiyun 	}
2901*4882a593Smuzhiyun 
2902*4882a593Smuzhiyun 	/* setting hop count limit or threshold */
2903*4882a593Smuzhiyun 	if (nan_attr_mask & NAN_ATTR_HOP_COUNT_LIMIT_CONFIG) {
2904*4882a593Smuzhiyun 		ret = wl_cfgnan_set_hop_count_limit(cmd_data, nan_iov_data);
2905*4882a593Smuzhiyun 		if (unlikely(ret)) {
2906*4882a593Smuzhiyun 			WL_ERR(("hop_count_limit sub_cmd set failed\n"));
2907*4882a593Smuzhiyun 			goto fail;
2908*4882a593Smuzhiyun 		}
2909*4882a593Smuzhiyun 		nan_buf->count++;
2910*4882a593Smuzhiyun 	}
2911*4882a593Smuzhiyun 
2912*4882a593Smuzhiyun 	/* setting sid beacon val */
2913*4882a593Smuzhiyun 	if ((nan_attr_mask & NAN_ATTR_SID_BEACON_CONFIG) ||
2914*4882a593Smuzhiyun 		(nan_attr_mask & NAN_ATTR_SUB_SID_BEACON_CONFIG)) {
2915*4882a593Smuzhiyun 		ret = wl_cfgnan_set_sid_beacon_val(cmd_data, nan_iov_data, nan_attr_mask);
2916*4882a593Smuzhiyun 		if (unlikely(ret)) {
2917*4882a593Smuzhiyun 			WL_ERR(("sid_beacon sub_cmd set failed\n"));
2918*4882a593Smuzhiyun 			goto fail;
2919*4882a593Smuzhiyun 		}
2920*4882a593Smuzhiyun 		nan_buf->count++;
2921*4882a593Smuzhiyun 	}
2922*4882a593Smuzhiyun 
2923*4882a593Smuzhiyun 	/* setting nan oui */
2924*4882a593Smuzhiyun 	if (nan_attr_mask & NAN_ATTR_OUI_CONFIG) {
2925*4882a593Smuzhiyun 		ret = wl_cfgnan_set_nan_oui(cmd_data, nan_iov_data);
2926*4882a593Smuzhiyun 		if (unlikely(ret)) {
2927*4882a593Smuzhiyun 			WL_ERR(("nan_oui sub_cmd set failed\n"));
2928*4882a593Smuzhiyun 			goto fail;
2929*4882a593Smuzhiyun 		}
2930*4882a593Smuzhiyun 		nan_buf->count++;
2931*4882a593Smuzhiyun 	}
2932*4882a593Smuzhiyun 
2933*4882a593Smuzhiyun 	/* setting nan awake dws */
2934*4882a593Smuzhiyun 	ret = wl_cfgnan_set_awake_dws(ndev, cmd_data,
2935*4882a593Smuzhiyun 			nan_iov_data, cfg, nan_attr_mask);
2936*4882a593Smuzhiyun 	if (unlikely(ret)) {
2937*4882a593Smuzhiyun 		WL_ERR(("nan awake dws set failed\n"));
2938*4882a593Smuzhiyun 		goto fail;
2939*4882a593Smuzhiyun 	} else {
2940*4882a593Smuzhiyun 		nan_buf->count++;
2941*4882a593Smuzhiyun 	}
2942*4882a593Smuzhiyun 
2943*4882a593Smuzhiyun 	/* enable events */
2944*4882a593Smuzhiyun 	ret = wl_cfgnan_config_eventmask(ndev, cfg, cmd_data->disc_ind_cfg, false);
2945*4882a593Smuzhiyun 	if (unlikely(ret)) {
2946*4882a593Smuzhiyun 		WL_ERR(("Failed to config disc ind flag in event_mask, ret = %d\n", ret));
2947*4882a593Smuzhiyun 		goto fail;
2948*4882a593Smuzhiyun 	}
2949*4882a593Smuzhiyun 
2950*4882a593Smuzhiyun 	/* setting nan enable sub_cmd */
2951*4882a593Smuzhiyun 	ret = wl_cfgnan_enable_handler(nan_iov_data, true);
2952*4882a593Smuzhiyun 	if (unlikely(ret)) {
2953*4882a593Smuzhiyun 		WL_ERR(("enable handler sub_cmd set failed\n"));
2954*4882a593Smuzhiyun 		goto fail;
2955*4882a593Smuzhiyun 	}
2956*4882a593Smuzhiyun 	nan_buf->count++;
2957*4882a593Smuzhiyun 	nan_buf->is_set = true;
2958*4882a593Smuzhiyun 
2959*4882a593Smuzhiyun 	nan_buf_size -= nan_iov_data->nan_iov_len;
2960*4882a593Smuzhiyun 	memset(resp_buf, 0, sizeof(resp_buf));
2961*4882a593Smuzhiyun 	/* Reset conditon variable */
2962*4882a593Smuzhiyun 	ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size,
2963*4882a593Smuzhiyun 			&(cmd_data->status), (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
2964*4882a593Smuzhiyun 	if (unlikely(ret) || unlikely(cmd_data->status)) {
2965*4882a593Smuzhiyun 		WL_ERR((" nan start handler, enable failed, ret = %d status = %d \n",
2966*4882a593Smuzhiyun 				ret, cmd_data->status));
2967*4882a593Smuzhiyun 		goto fail;
2968*4882a593Smuzhiyun 	}
2969*4882a593Smuzhiyun 
2970*4882a593Smuzhiyun 	timeout = wait_event_timeout(nancfg->nan_event_wait,
2971*4882a593Smuzhiyun 		nancfg->nan_event_recvd, msecs_to_jiffies(NAN_START_STOP_TIMEOUT));
2972*4882a593Smuzhiyun 	if (!timeout) {
2973*4882a593Smuzhiyun 		WL_ERR(("Timed out while Waiting for WL_NAN_EVENT_START event !!!\n"));
2974*4882a593Smuzhiyun 		ret = BCME_ERROR;
2975*4882a593Smuzhiyun 		goto fail;
2976*4882a593Smuzhiyun 	}
2977*4882a593Smuzhiyun 
2978*4882a593Smuzhiyun 	/* Default flags: set NAN proprietary rates and auto datapath confirm
2979*4882a593Smuzhiyun 	 * If auto datapath confirms is set, then DPCONF will be sent by FW
2980*4882a593Smuzhiyun 	 */
2981*4882a593Smuzhiyun 	cfg_ctrl1_flags |= (WL_NAN_CTRL_AUTO_DPCONF | WL_NAN_CTRL_PROP_RATE);
2982*4882a593Smuzhiyun 
2983*4882a593Smuzhiyun 	/* set CFG CTRL flags */
2984*4882a593Smuzhiyun 	ret = wl_cfgnan_config_control_flag(ndev, cfg, cfg_ctrl1_flags,
2985*4882a593Smuzhiyun 			0, WL_NAN_CMD_CFG_NAN_CONFIG,
2986*4882a593Smuzhiyun 			&(cmd_data->status), true);
2987*4882a593Smuzhiyun 	if (unlikely(ret) || unlikely(cmd_data->status)) {
2988*4882a593Smuzhiyun 		WL_ERR((" nan ctrl1 config flags setting failed, ret = %d status = %d \n",
2989*4882a593Smuzhiyun 				ret, cmd_data->status));
2990*4882a593Smuzhiyun 		goto fail;
2991*4882a593Smuzhiyun 	}
2992*4882a593Smuzhiyun 
2993*4882a593Smuzhiyun 	/* malloc for ndp peer list */
2994*4882a593Smuzhiyun 	if ((ret = wl_cfgnan_get_capablities_handler(ndev, cfg, &capabilities))
2995*4882a593Smuzhiyun 			== BCME_OK) {
2996*4882a593Smuzhiyun 		nancfg->max_ndp_count = capabilities.max_ndp_sessions;
2997*4882a593Smuzhiyun 		nancfg->max_ndi_supported = capabilities.max_ndi_interfaces;
2998*4882a593Smuzhiyun 		nancfg->nan_ndp_peer_info = MALLOCZ(cfg->osh,
2999*4882a593Smuzhiyun 				nancfg->max_ndp_count * sizeof(nan_ndp_peer_t));
3000*4882a593Smuzhiyun 		if (!nancfg->nan_ndp_peer_info) {
3001*4882a593Smuzhiyun 			WL_ERR(("%s: memory allocation failed\n", __func__));
3002*4882a593Smuzhiyun 			ret = BCME_NOMEM;
3003*4882a593Smuzhiyun 			goto fail;
3004*4882a593Smuzhiyun 		}
3005*4882a593Smuzhiyun 
3006*4882a593Smuzhiyun 		if (!nancfg->ndi) {
3007*4882a593Smuzhiyun 			nancfg->ndi = MALLOCZ(cfg->osh,
3008*4882a593Smuzhiyun 					nancfg->max_ndi_supported * sizeof(*nancfg->ndi));
3009*4882a593Smuzhiyun 			if (!nancfg->ndi) {
3010*4882a593Smuzhiyun 				WL_ERR(("%s: memory allocation failed\n", __func__));
3011*4882a593Smuzhiyun 				ret = BCME_NOMEM;
3012*4882a593Smuzhiyun 				goto fail;
3013*4882a593Smuzhiyun 			}
3014*4882a593Smuzhiyun 		}
3015*4882a593Smuzhiyun 	} else {
3016*4882a593Smuzhiyun 		WL_ERR(("wl_cfgnan_get_capablities_handler failed, ret = %d\n", ret));
3017*4882a593Smuzhiyun 		goto fail;
3018*4882a593Smuzhiyun 	}
3019*4882a593Smuzhiyun 
3020*4882a593Smuzhiyun 	BCM_REFERENCE(i);
3021*4882a593Smuzhiyun #ifdef NAN_IFACE_CREATE_ON_UP
3022*4882a593Smuzhiyun 	for (i = 0; i < nancfg->max_ndi_supported; i++) {
3023*4882a593Smuzhiyun 		/* Create NDI using the information provided by user space */
3024*4882a593Smuzhiyun 		if (nancfg->ndi[i].in_use && !nancfg->ndi[i].created) {
3025*4882a593Smuzhiyun 			ret = wl_cfgnan_data_path_iface_create_delete_handler(ndev, cfg,
3026*4882a593Smuzhiyun 				nancfg->ndi[i].ifname,
3027*4882a593Smuzhiyun 				NAN_WIFI_SUBCMD_DATA_PATH_IFACE_CREATE, dhdp->up);
3028*4882a593Smuzhiyun 			if (ret) {
3029*4882a593Smuzhiyun 				WL_ERR(("failed to create ndp interface [%d]\n", ret));
3030*4882a593Smuzhiyun 				goto fail;
3031*4882a593Smuzhiyun 			}
3032*4882a593Smuzhiyun 			nancfg->ndi[i].created = true;
3033*4882a593Smuzhiyun 		}
3034*4882a593Smuzhiyun 	}
3035*4882a593Smuzhiyun #endif /* NAN_IFACE_CREATE_ON_UP */
3036*4882a593Smuzhiyun 
3037*4882a593Smuzhiyun 	/* Check if NDPE is capable and use_ndpe_attr is set by framework */
3038*4882a593Smuzhiyun 	/* TODO: For now enabling NDPE by default as framework is not setting use_ndpe_attr
3039*4882a593Smuzhiyun 	 * When (cmd_data->use_ndpe_attr) is set by framework, Add additional check for
3040*4882a593Smuzhiyun 	 * (cmd_data->use_ndpe_attr) as below
3041*4882a593Smuzhiyun 	 * if (capabilities.ndpe_attr_supported && cmd_data->use_ndpe_attr)
3042*4882a593Smuzhiyun 	 */
3043*4882a593Smuzhiyun 	if (capabilities.ndpe_attr_supported)
3044*4882a593Smuzhiyun 	{
3045*4882a593Smuzhiyun 		cfg_ctrl2_flags1 |= WL_NAN_CTRL2_FLAG1_NDPE_CAP;
3046*4882a593Smuzhiyun 		nancfg->ndpe_enabled = true;
3047*4882a593Smuzhiyun 	} else {
3048*4882a593Smuzhiyun 		/* reset NDPE capability in FW */
3049*4882a593Smuzhiyun 		ret = wl_cfgnan_config_control_flag(ndev, cfg, WL_NAN_CTRL2_FLAG1_NDPE_CAP,
3050*4882a593Smuzhiyun 				0, WL_NAN_CMD_CFG_NAN_CONFIG2,
3051*4882a593Smuzhiyun 				&(cmd_data->status), false);
3052*4882a593Smuzhiyun 		if (unlikely(ret) || unlikely(cmd_data->status)) {
3053*4882a593Smuzhiyun 			WL_ERR((" nan ctrl2 config flags resetting failed, ret = %d status = %d \n",
3054*4882a593Smuzhiyun 					ret, cmd_data->status));
3055*4882a593Smuzhiyun 			goto fail;
3056*4882a593Smuzhiyun 		}
3057*4882a593Smuzhiyun 		nancfg->ndpe_enabled = false;
3058*4882a593Smuzhiyun 	}
3059*4882a593Smuzhiyun 
3060*4882a593Smuzhiyun 	/* set CFG CTRL2 flags1 and flags2 */
3061*4882a593Smuzhiyun 	ret = wl_cfgnan_config_control_flag(ndev, cfg, cfg_ctrl2_flags1,
3062*4882a593Smuzhiyun 			0, WL_NAN_CMD_CFG_NAN_CONFIG2,
3063*4882a593Smuzhiyun 			&(cmd_data->status), true);
3064*4882a593Smuzhiyun 	if (unlikely(ret) || unlikely(cmd_data->status)) {
3065*4882a593Smuzhiyun 		WL_ERR((" nan ctrl2 config flags setting failed, ret = %d status = %d \n",
3066*4882a593Smuzhiyun 				ret, cmd_data->status));
3067*4882a593Smuzhiyun 		goto fail;
3068*4882a593Smuzhiyun 	}
3069*4882a593Smuzhiyun 
3070*4882a593Smuzhiyun #ifdef RTT_SUPPORT
3071*4882a593Smuzhiyun 	/* Initialize geofence cfg */
3072*4882a593Smuzhiyun 	dhd_rtt_initialize_geofence_cfg(cfg->pub);
3073*4882a593Smuzhiyun #endif /* RTT_SUPPORT */
3074*4882a593Smuzhiyun 
3075*4882a593Smuzhiyun 	if (cmd_data->dw_early_termination > 0) {
3076*4882a593Smuzhiyun 		WL_ERR(("dw early termination is not supported, ignoring for now\n"));
3077*4882a593Smuzhiyun 	}
3078*4882a593Smuzhiyun 
3079*4882a593Smuzhiyun 	if (nan_attr_mask & NAN_ATTR_DISC_BEACON_INTERVAL) {
3080*4882a593Smuzhiyun 		ret = wl_cfgnan_set_disc_beacon_interval_handler(ndev, cfg,
3081*4882a593Smuzhiyun 			cmd_data->disc_bcn_interval);
3082*4882a593Smuzhiyun 		if (unlikely(ret)) {
3083*4882a593Smuzhiyun 			WL_ERR(("Failed to set beacon interval\n"));
3084*4882a593Smuzhiyun 			goto fail;
3085*4882a593Smuzhiyun 		}
3086*4882a593Smuzhiyun 	}
3087*4882a593Smuzhiyun 
3088*4882a593Smuzhiyun 	nancfg->nan_enable = true;
3089*4882a593Smuzhiyun 	WL_INFORM_MEM(("[NAN] Enable successfull \n"));
3090*4882a593Smuzhiyun 
3091*4882a593Smuzhiyun fail:
3092*4882a593Smuzhiyun 	/* Enable back TDLS if connected interface is <= 1 */
3093*4882a593Smuzhiyun 	wl_cfg80211_tdls_config(cfg, TDLS_STATE_IF_DELETE, false);
3094*4882a593Smuzhiyun 
3095*4882a593Smuzhiyun 	/* reset conditon variable */
3096*4882a593Smuzhiyun 	nancfg->nan_event_recvd = false;
3097*4882a593Smuzhiyun 	if (unlikely(ret) || unlikely(cmd_data->status)) {
3098*4882a593Smuzhiyun 		nancfg->nan_enable = false;
3099*4882a593Smuzhiyun 		mutex_lock(&cfg->if_sync);
3100*4882a593Smuzhiyun 		ret = wl_cfg80211_delete_iface(cfg, WL_IF_TYPE_NAN);
3101*4882a593Smuzhiyun 		if (ret != BCME_OK) {
3102*4882a593Smuzhiyun 			WL_ERR(("failed to delete NDI[%d]\n", ret));
3103*4882a593Smuzhiyun 		}
3104*4882a593Smuzhiyun 		mutex_unlock(&cfg->if_sync);
3105*4882a593Smuzhiyun 		if (nancfg->nan_ndp_peer_info) {
3106*4882a593Smuzhiyun 			MFREE(cfg->osh, nancfg->nan_ndp_peer_info,
3107*4882a593Smuzhiyun 					nancfg->max_ndp_count * sizeof(nan_ndp_peer_t));
3108*4882a593Smuzhiyun 			nancfg->nan_ndp_peer_info = NULL;
3109*4882a593Smuzhiyun 		}
3110*4882a593Smuzhiyun 		if (nancfg->ndi) {
3111*4882a593Smuzhiyun 			MFREE(cfg->osh, nancfg->ndi,
3112*4882a593Smuzhiyun 					nancfg->max_ndi_supported * sizeof(*nancfg->ndi));
3113*4882a593Smuzhiyun 			nancfg->ndi = NULL;
3114*4882a593Smuzhiyun 		}
3115*4882a593Smuzhiyun 	}
3116*4882a593Smuzhiyun 	if (nan_buf) {
3117*4882a593Smuzhiyun 		MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
3118*4882a593Smuzhiyun 	}
3119*4882a593Smuzhiyun 	if (nan_iov_data) {
3120*4882a593Smuzhiyun 		MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
3121*4882a593Smuzhiyun 	}
3122*4882a593Smuzhiyun 
3123*4882a593Smuzhiyun 	NAN_DBG_EXIT();
3124*4882a593Smuzhiyun 	return ret;
3125*4882a593Smuzhiyun }
3126*4882a593Smuzhiyun 
3127*4882a593Smuzhiyun static int
wl_cfgnan_disable(struct bcm_cfg80211 * cfg)3128*4882a593Smuzhiyun wl_cfgnan_disable(struct bcm_cfg80211 *cfg)
3129*4882a593Smuzhiyun {
3130*4882a593Smuzhiyun 	s32 ret = BCME_OK;
3131*4882a593Smuzhiyun 	dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
3132*4882a593Smuzhiyun 
3133*4882a593Smuzhiyun 	NAN_DBG_ENTER();
3134*4882a593Smuzhiyun 	if ((cfg->nancfg->nan_init_state == TRUE) &&
3135*4882a593Smuzhiyun 			(cfg->nancfg->nan_enable == TRUE)) {
3136*4882a593Smuzhiyun 		struct net_device *ndev;
3137*4882a593Smuzhiyun 		ndev = bcmcfg_to_prmry_ndev(cfg);
3138*4882a593Smuzhiyun 
3139*4882a593Smuzhiyun 		/* We have to remove NDIs so that P2P/Softap can work */
3140*4882a593Smuzhiyun 		ret = wl_cfg80211_delete_iface(cfg, WL_IF_TYPE_NAN);
3141*4882a593Smuzhiyun 		if (ret != BCME_OK) {
3142*4882a593Smuzhiyun 			WL_ERR(("failed to delete NDI[%d]\n", ret));
3143*4882a593Smuzhiyun 		}
3144*4882a593Smuzhiyun 
3145*4882a593Smuzhiyun 		ret = wl_cfgnan_stop_handler(ndev, cfg);
3146*4882a593Smuzhiyun 		if (ret == -ENODEV) {
3147*4882a593Smuzhiyun 			WL_ERR(("Bus is down, no need to proceed\n"));
3148*4882a593Smuzhiyun 		} else if (ret != BCME_OK) {
3149*4882a593Smuzhiyun 			WL_ERR(("failed to stop nan, error[%d]\n", ret));
3150*4882a593Smuzhiyun 		}
3151*4882a593Smuzhiyun 		ret = wl_cfgnan_deinit(cfg, dhdp->up);
3152*4882a593Smuzhiyun 		if (ret != BCME_OK) {
3153*4882a593Smuzhiyun 			WL_ERR(("failed to de-initialize NAN[%d]\n", ret));
3154*4882a593Smuzhiyun 			if (!dhd_query_bus_erros(dhdp)) {
3155*4882a593Smuzhiyun 				ASSERT(0);
3156*4882a593Smuzhiyun 			}
3157*4882a593Smuzhiyun 		}
3158*4882a593Smuzhiyun 		wl_cfgnan_disable_cleanup(cfg);
3159*4882a593Smuzhiyun 	}
3160*4882a593Smuzhiyun 	NAN_DBG_EXIT();
3161*4882a593Smuzhiyun 	return ret;
3162*4882a593Smuzhiyun }
3163*4882a593Smuzhiyun 
3164*4882a593Smuzhiyun static void
wl_cfgnan_send_stop_event(struct bcm_cfg80211 * cfg)3165*4882a593Smuzhiyun wl_cfgnan_send_stop_event(struct bcm_cfg80211 *cfg)
3166*4882a593Smuzhiyun {
3167*4882a593Smuzhiyun 	s32 ret = BCME_OK;
3168*4882a593Smuzhiyun 	nan_event_data_t *nan_event_data = NULL;
3169*4882a593Smuzhiyun 
3170*4882a593Smuzhiyun 	NAN_DBG_ENTER();
3171*4882a593Smuzhiyun 
3172*4882a593Smuzhiyun 	nan_event_data = MALLOCZ(cfg->osh, sizeof(nan_event_data_t));
3173*4882a593Smuzhiyun 	if (!nan_event_data) {
3174*4882a593Smuzhiyun 		WL_ERR(("%s: memory allocation failed\n", __func__));
3175*4882a593Smuzhiyun 		ret = BCME_NOMEM;
3176*4882a593Smuzhiyun 		goto exit;
3177*4882a593Smuzhiyun 	}
3178*4882a593Smuzhiyun 	bzero(nan_event_data, sizeof(nan_event_data_t));
3179*4882a593Smuzhiyun 
3180*4882a593Smuzhiyun 	nan_event_data->status = NAN_STATUS_SUCCESS;
3181*4882a593Smuzhiyun 	ret = memcpy_s(nan_event_data->nan_reason, NAN_ERROR_STR_LEN,
3182*4882a593Smuzhiyun 			"NAN_STATUS_SUCCESS", strlen("NAN_STATUS_SUCCESS"));
3183*4882a593Smuzhiyun 	if (ret != BCME_OK) {
3184*4882a593Smuzhiyun 		WL_ERR(("Failed to copy nan reason string, ret = %d\n", ret));
3185*4882a593Smuzhiyun 		goto exit;
3186*4882a593Smuzhiyun 	}
3187*4882a593Smuzhiyun #if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT)
3188*4882a593Smuzhiyun 	ret = wl_cfgvendor_send_nan_event(cfg->wdev->wiphy, bcmcfg_to_prmry_ndev(cfg),
3189*4882a593Smuzhiyun 			GOOGLE_NAN_EVENT_DISABLED, nan_event_data);
3190*4882a593Smuzhiyun 	if (ret != BCME_OK) {
3191*4882a593Smuzhiyun 		WL_ERR(("Failed to send event to nan hal, (%d)\n",
3192*4882a593Smuzhiyun 				GOOGLE_NAN_EVENT_DISABLED));
3193*4882a593Smuzhiyun 	}
3194*4882a593Smuzhiyun #endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT) */
3195*4882a593Smuzhiyun exit:
3196*4882a593Smuzhiyun 	if (nan_event_data) {
3197*4882a593Smuzhiyun 		MFREE(cfg->osh, nan_event_data, sizeof(nan_event_data_t));
3198*4882a593Smuzhiyun 	}
3199*4882a593Smuzhiyun 	NAN_DBG_EXIT();
3200*4882a593Smuzhiyun 	return;
3201*4882a593Smuzhiyun }
3202*4882a593Smuzhiyun 
3203*4882a593Smuzhiyun static void
wl_cfgnan_disable_cleanup(struct bcm_cfg80211 * cfg)3204*4882a593Smuzhiyun wl_cfgnan_disable_cleanup(struct bcm_cfg80211 *cfg)
3205*4882a593Smuzhiyun {
3206*4882a593Smuzhiyun 	int i = 0;
3207*4882a593Smuzhiyun 	wl_nancfg_t *nancfg = cfg->nancfg;
3208*4882a593Smuzhiyun #ifdef RTT_SUPPORT
3209*4882a593Smuzhiyun 	dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
3210*4882a593Smuzhiyun 	rtt_status_info_t *rtt_status = GET_RTTSTATE(dhdp);
3211*4882a593Smuzhiyun 	rtt_target_info_t *target_info = NULL;
3212*4882a593Smuzhiyun 
3213*4882a593Smuzhiyun 	/* Delete the geofence rtt target list */
3214*4882a593Smuzhiyun 	dhd_rtt_delete_geofence_target_list(dhdp);
3215*4882a593Smuzhiyun 	/* Cancel pending retry timer if any */
3216*4882a593Smuzhiyun 	if (delayed_work_pending(&rtt_status->rtt_retry_timer)) {
3217*4882a593Smuzhiyun 		cancel_delayed_work_sync(&rtt_status->rtt_retry_timer);
3218*4882a593Smuzhiyun 	}
3219*4882a593Smuzhiyun 	/* Remove if any pending proxd timeout for nan-rtt */
3220*4882a593Smuzhiyun 	target_info = &rtt_status->rtt_config.target_info[rtt_status->cur_idx];
3221*4882a593Smuzhiyun 	if (target_info && target_info->peer == RTT_PEER_NAN) {
3222*4882a593Smuzhiyun 		/* Cancel pending proxd timeout work if any */
3223*4882a593Smuzhiyun 		if (delayed_work_pending(&rtt_status->proxd_timeout)) {
3224*4882a593Smuzhiyun 			cancel_delayed_work_sync(&rtt_status->proxd_timeout);
3225*4882a593Smuzhiyun 		}
3226*4882a593Smuzhiyun 	}
3227*4882a593Smuzhiyun 	/* Delete if any directed nan rtt session */
3228*4882a593Smuzhiyun 	dhd_rtt_delete_nan_session(dhdp);
3229*4882a593Smuzhiyun #endif /* RTT_SUPPORT */
3230*4882a593Smuzhiyun 	/* Clear the NDP ID array and dp count */
3231*4882a593Smuzhiyun 	for (i = 0; i < NAN_MAX_NDP_PEER; i++) {
3232*4882a593Smuzhiyun 		nancfg->ndp_id[i] = 0;
3233*4882a593Smuzhiyun 	}
3234*4882a593Smuzhiyun 	nancfg->nan_dp_count = 0;
3235*4882a593Smuzhiyun 	if (nancfg->nan_ndp_peer_info) {
3236*4882a593Smuzhiyun 		MFREE(cfg->osh, nancfg->nan_ndp_peer_info,
3237*4882a593Smuzhiyun 			nancfg->max_ndp_count * sizeof(nan_ndp_peer_t));
3238*4882a593Smuzhiyun 		nancfg->nan_ndp_peer_info = NULL;
3239*4882a593Smuzhiyun 	}
3240*4882a593Smuzhiyun 	if (nancfg->ndi) {
3241*4882a593Smuzhiyun 		MFREE(cfg->osh, nancfg->ndi,
3242*4882a593Smuzhiyun 			nancfg->max_ndi_supported * sizeof(*nancfg->ndi));
3243*4882a593Smuzhiyun 		nancfg->ndi = NULL;
3244*4882a593Smuzhiyun 	}
3245*4882a593Smuzhiyun 	wl_cfg80211_concurrent_roam(cfg, false);
3246*4882a593Smuzhiyun 	return;
3247*4882a593Smuzhiyun }
3248*4882a593Smuzhiyun 
3249*4882a593Smuzhiyun /*
3250*4882a593Smuzhiyun  * Deferred nan disable work,
3251*4882a593Smuzhiyun  * scheduled with NAN_DISABLE_CMD_DELAY
3252*4882a593Smuzhiyun  * delay in order to remove any active nan dps
3253*4882a593Smuzhiyun  */
3254*4882a593Smuzhiyun void
wl_cfgnan_delayed_disable(struct work_struct * work)3255*4882a593Smuzhiyun wl_cfgnan_delayed_disable(struct work_struct *work)
3256*4882a593Smuzhiyun {
3257*4882a593Smuzhiyun 	struct bcm_cfg80211 *cfg = NULL;
3258*4882a593Smuzhiyun 	struct net_device *ndev = NULL;
3259*4882a593Smuzhiyun 	wl_nancfg_t *nancfg = NULL;
3260*4882a593Smuzhiyun 
3261*4882a593Smuzhiyun 	BCM_SET_CONTAINER_OF(nancfg, work, wl_nancfg_t, nan_disable.work);
3262*4882a593Smuzhiyun 
3263*4882a593Smuzhiyun 	cfg = nancfg->cfg;
3264*4882a593Smuzhiyun 
3265*4882a593Smuzhiyun 	rtnl_lock();
3266*4882a593Smuzhiyun 	if (nancfg->nan_enable == true) {
3267*4882a593Smuzhiyun 		wl_cfgnan_disable(cfg);
3268*4882a593Smuzhiyun 		ndev = bcmcfg_to_prmry_ndev(cfg);
3269*4882a593Smuzhiyun 		wl_cfgvendor_nan_send_async_disable_resp(ndev->ieee80211_ptr);
3270*4882a593Smuzhiyun 	} else {
3271*4882a593Smuzhiyun 		WL_INFORM_MEM(("nan is in disabled state\n"));
3272*4882a593Smuzhiyun 	}
3273*4882a593Smuzhiyun 	rtnl_unlock();
3274*4882a593Smuzhiyun 
3275*4882a593Smuzhiyun 	DHD_NAN_WAKE_UNLOCK(cfg->pub);
3276*4882a593Smuzhiyun 
3277*4882a593Smuzhiyun 	return;
3278*4882a593Smuzhiyun }
3279*4882a593Smuzhiyun 
3280*4882a593Smuzhiyun int
wl_cfgnan_stop_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg)3281*4882a593Smuzhiyun wl_cfgnan_stop_handler(struct net_device *ndev,
3282*4882a593Smuzhiyun 	struct bcm_cfg80211 *cfg)
3283*4882a593Smuzhiyun {
3284*4882a593Smuzhiyun 	bcm_iov_batch_buf_t *nan_buf = NULL;
3285*4882a593Smuzhiyun 	s32 ret = BCME_OK;
3286*4882a593Smuzhiyun 	uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
3287*4882a593Smuzhiyun 	wl_nan_iov_t *nan_iov_data = NULL;
3288*4882a593Smuzhiyun 	uint32 status;
3289*4882a593Smuzhiyun 	uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
3290*4882a593Smuzhiyun 	dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
3291*4882a593Smuzhiyun 	wl_nancfg_t *nancfg = cfg->nancfg;
3292*4882a593Smuzhiyun 
3293*4882a593Smuzhiyun 	NAN_DBG_ENTER();
3294*4882a593Smuzhiyun 	NAN_MUTEX_LOCK();
3295*4882a593Smuzhiyun 
3296*4882a593Smuzhiyun 	if (!nancfg->nan_enable) {
3297*4882a593Smuzhiyun 		WL_INFORM(("Nan is not enabled\n"));
3298*4882a593Smuzhiyun 		ret = BCME_OK;
3299*4882a593Smuzhiyun 		goto fail;
3300*4882a593Smuzhiyun 	}
3301*4882a593Smuzhiyun 
3302*4882a593Smuzhiyun 	if (dhdp->up != DHD_BUS_DOWN) {
3303*4882a593Smuzhiyun 		/*
3304*4882a593Smuzhiyun 		 * Framework doing cleanup(iface remove) on disable command,
3305*4882a593Smuzhiyun 		 * so avoiding event to prevent iface delete calls again
3306*4882a593Smuzhiyun 		 */
3307*4882a593Smuzhiyun 		WL_INFORM_MEM(("[NAN] Disabling Nan events\n"));
3308*4882a593Smuzhiyun 		wl_cfgnan_config_eventmask(ndev, cfg, 0, true);
3309*4882a593Smuzhiyun 
3310*4882a593Smuzhiyun 		nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
3311*4882a593Smuzhiyun 		if (!nan_buf) {
3312*4882a593Smuzhiyun 			WL_ERR(("%s: memory allocation failed\n", __func__));
3313*4882a593Smuzhiyun 			ret = BCME_NOMEM;
3314*4882a593Smuzhiyun 			goto fail;
3315*4882a593Smuzhiyun 		}
3316*4882a593Smuzhiyun 
3317*4882a593Smuzhiyun 		nan_iov_data = MALLOCZ(cfg->osh, sizeof(*nan_iov_data));
3318*4882a593Smuzhiyun 		if (!nan_iov_data) {
3319*4882a593Smuzhiyun 			WL_ERR(("%s: memory allocation failed\n", __func__));
3320*4882a593Smuzhiyun 			ret = BCME_NOMEM;
3321*4882a593Smuzhiyun 			goto fail;
3322*4882a593Smuzhiyun 		}
3323*4882a593Smuzhiyun 
3324*4882a593Smuzhiyun 		nan_iov_data->nan_iov_len = NAN_IOCTL_BUF_SIZE;
3325*4882a593Smuzhiyun 		nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
3326*4882a593Smuzhiyun 		nan_buf->count = 0;
3327*4882a593Smuzhiyun 		nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
3328*4882a593Smuzhiyun 		nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
3329*4882a593Smuzhiyun 
3330*4882a593Smuzhiyun 		ret = wl_cfgnan_enable_handler(nan_iov_data, false);
3331*4882a593Smuzhiyun 		if (unlikely(ret)) {
3332*4882a593Smuzhiyun 			WL_ERR(("nan disable handler failed\n"));
3333*4882a593Smuzhiyun 			goto fail;
3334*4882a593Smuzhiyun 		}
3335*4882a593Smuzhiyun 		nan_buf->count++;
3336*4882a593Smuzhiyun 		nan_buf->is_set = true;
3337*4882a593Smuzhiyun 		nan_buf_size -= nan_iov_data->nan_iov_len;
3338*4882a593Smuzhiyun 		bzero(resp_buf, sizeof(resp_buf));
3339*4882a593Smuzhiyun 		ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
3340*4882a593Smuzhiyun 				(void*)resp_buf, NAN_IOCTL_BUF_SIZE);
3341*4882a593Smuzhiyun 		if (unlikely(ret) || unlikely(status)) {
3342*4882a593Smuzhiyun 			WL_ERR(("nan disable failed ret = %d status = %d\n", ret, status));
3343*4882a593Smuzhiyun 			goto fail;
3344*4882a593Smuzhiyun 		}
3345*4882a593Smuzhiyun 		/* Enable back TDLS if connected interface is <= 1 */
3346*4882a593Smuzhiyun 		wl_cfg80211_tdls_config(cfg, TDLS_STATE_IF_DELETE, false);
3347*4882a593Smuzhiyun 	}
3348*4882a593Smuzhiyun 
3349*4882a593Smuzhiyun 	if (!nancfg->notify_user) {
3350*4882a593Smuzhiyun 		wl_cfgnan_send_stop_event(cfg);
3351*4882a593Smuzhiyun 	}
3352*4882a593Smuzhiyun fail:
3353*4882a593Smuzhiyun 	/* Resetting instance ID mask */
3354*4882a593Smuzhiyun 	nancfg->inst_id_start = 0;
3355*4882a593Smuzhiyun 	memset(nancfg->svc_inst_id_mask, 0, sizeof(nancfg->svc_inst_id_mask));
3356*4882a593Smuzhiyun 	memset(nancfg->svc_info, 0, NAN_MAX_SVC_INST * sizeof(nan_svc_info_t));
3357*4882a593Smuzhiyun 	nancfg->nan_enable = false;
3358*4882a593Smuzhiyun 	WL_INFORM_MEM(("[NAN] Disable done\n"));
3359*4882a593Smuzhiyun 
3360*4882a593Smuzhiyun 	if (nan_buf) {
3361*4882a593Smuzhiyun 		MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
3362*4882a593Smuzhiyun 	}
3363*4882a593Smuzhiyun 	if (nan_iov_data) {
3364*4882a593Smuzhiyun 		MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
3365*4882a593Smuzhiyun 	}
3366*4882a593Smuzhiyun 
3367*4882a593Smuzhiyun 	NAN_MUTEX_UNLOCK();
3368*4882a593Smuzhiyun 	NAN_DBG_EXIT();
3369*4882a593Smuzhiyun 	return ret;
3370*4882a593Smuzhiyun }
3371*4882a593Smuzhiyun 
3372*4882a593Smuzhiyun int
wl_cfgnan_config_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_config_cmd_data_t * cmd_data,uint32 nan_attr_mask)3373*4882a593Smuzhiyun wl_cfgnan_config_handler(struct net_device *ndev, struct bcm_cfg80211 *cfg,
3374*4882a593Smuzhiyun 	nan_config_cmd_data_t *cmd_data, uint32 nan_attr_mask)
3375*4882a593Smuzhiyun {
3376*4882a593Smuzhiyun 	bcm_iov_batch_buf_t *nan_buf = NULL;
3377*4882a593Smuzhiyun 	s32 ret = BCME_OK;
3378*4882a593Smuzhiyun 	uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
3379*4882a593Smuzhiyun 	wl_nan_iov_t *nan_iov_data = NULL;
3380*4882a593Smuzhiyun 	uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
3381*4882a593Smuzhiyun 
3382*4882a593Smuzhiyun 	NAN_DBG_ENTER();
3383*4882a593Smuzhiyun 
3384*4882a593Smuzhiyun 	/* Nan need to be enabled before configuring/updating params */
3385*4882a593Smuzhiyun 	if (!cfg->nancfg->nan_enable) {
3386*4882a593Smuzhiyun 		WL_INFORM(("nan is not enabled\n"));
3387*4882a593Smuzhiyun 		ret = BCME_NOTENABLED;
3388*4882a593Smuzhiyun 		goto fail;
3389*4882a593Smuzhiyun 	}
3390*4882a593Smuzhiyun 
3391*4882a593Smuzhiyun 	nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
3392*4882a593Smuzhiyun 	if (!nan_buf) {
3393*4882a593Smuzhiyun 		WL_ERR(("%s: memory allocation failed\n", __func__));
3394*4882a593Smuzhiyun 		ret = BCME_NOMEM;
3395*4882a593Smuzhiyun 		goto fail;
3396*4882a593Smuzhiyun 	}
3397*4882a593Smuzhiyun 
3398*4882a593Smuzhiyun 	nan_iov_data = MALLOCZ(cfg->osh, sizeof(*nan_iov_data));
3399*4882a593Smuzhiyun 	if (!nan_iov_data) {
3400*4882a593Smuzhiyun 		WL_ERR(("%s: memory allocation failed\n", __func__));
3401*4882a593Smuzhiyun 		ret = BCME_NOMEM;
3402*4882a593Smuzhiyun 		goto fail;
3403*4882a593Smuzhiyun 	}
3404*4882a593Smuzhiyun 
3405*4882a593Smuzhiyun 	nan_iov_data->nan_iov_len = NAN_IOCTL_BUF_SIZE;
3406*4882a593Smuzhiyun 	nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
3407*4882a593Smuzhiyun 	nan_buf->count = 0;
3408*4882a593Smuzhiyun 	nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
3409*4882a593Smuzhiyun 	nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
3410*4882a593Smuzhiyun 
3411*4882a593Smuzhiyun 	/* setting sid beacon val */
3412*4882a593Smuzhiyun 	if ((nan_attr_mask & NAN_ATTR_SID_BEACON_CONFIG) ||
3413*4882a593Smuzhiyun 		(nan_attr_mask & NAN_ATTR_SUB_SID_BEACON_CONFIG)) {
3414*4882a593Smuzhiyun 		ret = wl_cfgnan_set_sid_beacon_val(cmd_data, nan_iov_data, nan_attr_mask);
3415*4882a593Smuzhiyun 		if (unlikely(ret)) {
3416*4882a593Smuzhiyun 			WL_ERR(("sid_beacon sub_cmd set failed\n"));
3417*4882a593Smuzhiyun 			goto fail;
3418*4882a593Smuzhiyun 		}
3419*4882a593Smuzhiyun 		nan_buf->count++;
3420*4882a593Smuzhiyun 	}
3421*4882a593Smuzhiyun 
3422*4882a593Smuzhiyun 	/* setting master preference and random factor */
3423*4882a593Smuzhiyun 	if (cmd_data->metrics.random_factor ||
3424*4882a593Smuzhiyun 		cmd_data->metrics.master_pref) {
3425*4882a593Smuzhiyun 		ret = wl_cfgnan_set_election_metric(cmd_data, nan_iov_data,
3426*4882a593Smuzhiyun 				nan_attr_mask);
3427*4882a593Smuzhiyun 		if (unlikely(ret)) {
3428*4882a593Smuzhiyun 			WL_ERR(("election_metric sub_cmd set failed\n"));
3429*4882a593Smuzhiyun 			goto fail;
3430*4882a593Smuzhiyun 		} else {
3431*4882a593Smuzhiyun 			nan_buf->count++;
3432*4882a593Smuzhiyun 		}
3433*4882a593Smuzhiyun 	}
3434*4882a593Smuzhiyun 
3435*4882a593Smuzhiyun 	/* setting hop count limit or threshold */
3436*4882a593Smuzhiyun 	if (nan_attr_mask & NAN_ATTR_HOP_COUNT_LIMIT_CONFIG) {
3437*4882a593Smuzhiyun 		ret = wl_cfgnan_set_hop_count_limit(cmd_data, nan_iov_data);
3438*4882a593Smuzhiyun 		if (unlikely(ret)) {
3439*4882a593Smuzhiyun 			WL_ERR(("hop_count_limit sub_cmd set failed\n"));
3440*4882a593Smuzhiyun 			goto fail;
3441*4882a593Smuzhiyun 		}
3442*4882a593Smuzhiyun 		nan_buf->count++;
3443*4882a593Smuzhiyun 	}
3444*4882a593Smuzhiyun 
3445*4882a593Smuzhiyun 	/* setting rssi proximaty values for 2.4GHz and 5GHz */
3446*4882a593Smuzhiyun 	ret = wl_cfgnan_set_rssi_proximity(cmd_data, nan_iov_data,
3447*4882a593Smuzhiyun 			nan_attr_mask);
3448*4882a593Smuzhiyun 	if (unlikely(ret)) {
3449*4882a593Smuzhiyun 		WL_ERR(("2.4GHz/5GHz rssi proximity threshold set failed\n"));
3450*4882a593Smuzhiyun 		goto fail;
3451*4882a593Smuzhiyun 	} else {
3452*4882a593Smuzhiyun 		nan_buf->count++;
3453*4882a593Smuzhiyun 	}
3454*4882a593Smuzhiyun 
3455*4882a593Smuzhiyun 	/* setting nan awake dws */
3456*4882a593Smuzhiyun 	ret = wl_cfgnan_set_awake_dws(ndev, cmd_data, nan_iov_data,
3457*4882a593Smuzhiyun 		cfg, nan_attr_mask);
3458*4882a593Smuzhiyun 	if (unlikely(ret)) {
3459*4882a593Smuzhiyun 		WL_ERR(("nan awake dws set failed\n"));
3460*4882a593Smuzhiyun 		goto fail;
3461*4882a593Smuzhiyun 	} else {
3462*4882a593Smuzhiyun 		nan_buf->count++;
3463*4882a593Smuzhiyun 	}
3464*4882a593Smuzhiyun 
3465*4882a593Smuzhiyun 	/* TODO: Add below code once use_ndpe_attr is being updated by framework
3466*4882a593Smuzhiyun 	 * If NDPE is enabled (cfg.nancfg.ndpe_enabled) and use_ndpe_attr is reset
3467*4882a593Smuzhiyun 	 * by framework, then disable NDPE using nan ctrl2 configuration setting.
3468*4882a593Smuzhiyun 	 * Else if NDPE is disabled and use_ndpe_attr is set by framework enable NDPE in FW
3469*4882a593Smuzhiyun 	 */
3470*4882a593Smuzhiyun 
3471*4882a593Smuzhiyun 	if (cmd_data->disc_ind_cfg) {
3472*4882a593Smuzhiyun 		/* Disable events */
3473*4882a593Smuzhiyun 		WL_TRACE(("Disable events based on flag\n"));
3474*4882a593Smuzhiyun 		ret = wl_cfgnan_config_eventmask(ndev, cfg,
3475*4882a593Smuzhiyun 			cmd_data->disc_ind_cfg, false);
3476*4882a593Smuzhiyun 		if (unlikely(ret)) {
3477*4882a593Smuzhiyun 			WL_ERR(("Failed to config disc ind flag in event_mask, ret = %d\n",
3478*4882a593Smuzhiyun 				ret));
3479*4882a593Smuzhiyun 			goto fail;
3480*4882a593Smuzhiyun 		}
3481*4882a593Smuzhiyun 	}
3482*4882a593Smuzhiyun 
3483*4882a593Smuzhiyun 	if ((cfg->nancfg->support_5g) && ((cmd_data->dwell_time[1]) ||
3484*4882a593Smuzhiyun 			(cmd_data->scan_period[1]))) {
3485*4882a593Smuzhiyun 		/* setting scan params */
3486*4882a593Smuzhiyun 		ret = wl_cfgnan_set_nan_scan_params(ndev, cfg,
3487*4882a593Smuzhiyun 				cmd_data, cfg->nancfg->support_5g, nan_attr_mask);
3488*4882a593Smuzhiyun 		if (unlikely(ret)) {
3489*4882a593Smuzhiyun 			WL_ERR(("scan params set failed for 5g\n"));
3490*4882a593Smuzhiyun 			goto fail;
3491*4882a593Smuzhiyun 		}
3492*4882a593Smuzhiyun 	}
3493*4882a593Smuzhiyun 	if ((cmd_data->dwell_time[0]) ||
3494*4882a593Smuzhiyun 			(cmd_data->scan_period[0])) {
3495*4882a593Smuzhiyun 		ret = wl_cfgnan_set_nan_scan_params(ndev, cfg, cmd_data, 0, nan_attr_mask);
3496*4882a593Smuzhiyun 		if (unlikely(ret)) {
3497*4882a593Smuzhiyun 			WL_ERR(("scan params set failed for 2g\n"));
3498*4882a593Smuzhiyun 			goto fail;
3499*4882a593Smuzhiyun 		}
3500*4882a593Smuzhiyun 	}
3501*4882a593Smuzhiyun 	nan_buf->is_set = true;
3502*4882a593Smuzhiyun 	nan_buf_size -= nan_iov_data->nan_iov_len;
3503*4882a593Smuzhiyun 
3504*4882a593Smuzhiyun 	if (nan_buf->count) {
3505*4882a593Smuzhiyun 		bzero(resp_buf, sizeof(resp_buf));
3506*4882a593Smuzhiyun 		ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size,
3507*4882a593Smuzhiyun 				&(cmd_data->status),
3508*4882a593Smuzhiyun 				(void*)resp_buf, NAN_IOCTL_BUF_SIZE);
3509*4882a593Smuzhiyun 		if (unlikely(ret) || unlikely(cmd_data->status)) {
3510*4882a593Smuzhiyun 			WL_ERR((" nan config handler failed ret = %d status = %d\n",
3511*4882a593Smuzhiyun 				ret, cmd_data->status));
3512*4882a593Smuzhiyun 			goto fail;
3513*4882a593Smuzhiyun 		}
3514*4882a593Smuzhiyun 	} else {
3515*4882a593Smuzhiyun 		WL_DBG(("No commands to send\n"));
3516*4882a593Smuzhiyun 	}
3517*4882a593Smuzhiyun 
3518*4882a593Smuzhiyun 	if ((!cmd_data->bmap) || (cmd_data->avail_params.duration == NAN_BAND_INVALID) ||
3519*4882a593Smuzhiyun 			(!cmd_data->chanspec[0])) {
3520*4882a593Smuzhiyun 		WL_TRACE(("mandatory arguments are not present to set avail\n"));
3521*4882a593Smuzhiyun 		ret = BCME_OK;
3522*4882a593Smuzhiyun 	} else {
3523*4882a593Smuzhiyun 		cmd_data->avail_params.chanspec[0] = cmd_data->chanspec[0];
3524*4882a593Smuzhiyun 		cmd_data->avail_params.bmap = cmd_data->bmap;
3525*4882a593Smuzhiyun 		/* 1=local, 2=peer, 3=ndc, 4=immutable, 5=response, 6=counter */
3526*4882a593Smuzhiyun 		ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
3527*4882a593Smuzhiyun 				cfg, &cmd_data->avail_params, WL_AVAIL_LOCAL);
3528*4882a593Smuzhiyun 		if (unlikely(ret)) {
3529*4882a593Smuzhiyun 			WL_ERR(("Failed to set avail value with type local\n"));
3530*4882a593Smuzhiyun 			goto fail;
3531*4882a593Smuzhiyun 		}
3532*4882a593Smuzhiyun 
3533*4882a593Smuzhiyun 		ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
3534*4882a593Smuzhiyun 				cfg, &cmd_data->avail_params, WL_AVAIL_NDC);
3535*4882a593Smuzhiyun 		if (unlikely(ret)) {
3536*4882a593Smuzhiyun 			WL_ERR(("Failed to set avail value with type ndc\n"));
3537*4882a593Smuzhiyun 			goto fail;
3538*4882a593Smuzhiyun 		}
3539*4882a593Smuzhiyun 	}
3540*4882a593Smuzhiyun 
3541*4882a593Smuzhiyun 	if (cmd_data->nmi_rand_intvl > 0) {
3542*4882a593Smuzhiyun #ifdef WL_NAN_ENABLE_MERGE
3543*4882a593Smuzhiyun 		/* Cluster merge enable/disable are being set using nmi random interval config param
3544*4882a593Smuzhiyun 		 * If MSB(31st bit) is set that indicates cluster merge enable/disable config is set
3545*4882a593Smuzhiyun 		 * MSB 30th bit indicates cluser merge enable/disable value to set in firmware
3546*4882a593Smuzhiyun 		 */
3547*4882a593Smuzhiyun 		if (cmd_data->nmi_rand_intvl & NAN_NMI_RAND_PVT_CMD_VENDOR) {
3548*4882a593Smuzhiyun 			uint8 merge_enable;
3549*4882a593Smuzhiyun 			uint8 lwt_mode_enable;
3550*4882a593Smuzhiyun 			int status = BCME_OK;
3551*4882a593Smuzhiyun 
3552*4882a593Smuzhiyun 			merge_enable = !!(cmd_data->nmi_rand_intvl &
3553*4882a593Smuzhiyun 					NAN_NMI_RAND_CLUSTER_MERGE_ENAB);
3554*4882a593Smuzhiyun 			ret = wl_cfgnan_set_enable_merge(bcmcfg_to_prmry_ndev(cfg), cfg,
3555*4882a593Smuzhiyun 					merge_enable, &status);
3556*4882a593Smuzhiyun 			if (unlikely(ret) || unlikely(status)) {
3557*4882a593Smuzhiyun 				WL_ERR(("Enable merge: failed to set config request  [%d]\n", ret));
3558*4882a593Smuzhiyun 				/* As there is no cmd_reply, check if error is in status or ret */
3559*4882a593Smuzhiyun 				if (status) {
3560*4882a593Smuzhiyun 					ret = status;
3561*4882a593Smuzhiyun 				}
3562*4882a593Smuzhiyun 				goto fail;
3563*4882a593Smuzhiyun 			}
3564*4882a593Smuzhiyun 
3565*4882a593Smuzhiyun 			lwt_mode_enable = !!(cmd_data->nmi_rand_intvl &
3566*4882a593Smuzhiyun 					NAN_NMI_RAND_AUTODAM_LWT_MODE_ENAB);
3567*4882a593Smuzhiyun 
3568*4882a593Smuzhiyun 			/* set CFG CTRL2 flags1 and flags2 */
3569*4882a593Smuzhiyun 			ret = wl_cfgnan_config_control_flag(ndev, cfg,
3570*4882a593Smuzhiyun 					WL_NAN_CTRL2_FLAG1_AUTODAM_LWT_MODE,
3571*4882a593Smuzhiyun 					0, WL_NAN_CMD_CFG_NAN_CONFIG2,
3572*4882a593Smuzhiyun 					&status, lwt_mode_enable);
3573*4882a593Smuzhiyun 			if (unlikely(ret) || unlikely(status)) {
3574*4882a593Smuzhiyun 				WL_ERR(("Enable dam lwt mode: "
3575*4882a593Smuzhiyun 					"failed to set config request  [%d]\n", ret));
3576*4882a593Smuzhiyun 				/* As there is no cmd_reply, check if error is in status or ret */
3577*4882a593Smuzhiyun 				if (status) {
3578*4882a593Smuzhiyun 					ret = status;
3579*4882a593Smuzhiyun 				}
3580*4882a593Smuzhiyun 				goto fail;
3581*4882a593Smuzhiyun 			}
3582*4882a593Smuzhiyun 
3583*4882a593Smuzhiyun 			/* reset pvt merge enable bits */
3584*4882a593Smuzhiyun 			cmd_data->nmi_rand_intvl &= ~(NAN_NMI_RAND_PVT_CMD_VENDOR |
3585*4882a593Smuzhiyun 					NAN_NMI_RAND_CLUSTER_MERGE_ENAB |
3586*4882a593Smuzhiyun 					NAN_NMI_RAND_AUTODAM_LWT_MODE_ENAB);
3587*4882a593Smuzhiyun 		}
3588*4882a593Smuzhiyun #endif /* WL_NAN_ENABLE_MERGE */
3589*4882a593Smuzhiyun 
3590*4882a593Smuzhiyun 		if (cmd_data->nmi_rand_intvl) {
3591*4882a593Smuzhiyun 			/* run time nmi rand not supported as of now.
3592*4882a593Smuzhiyun 			 * Only during nan enable/iface-create rand mac is used
3593*4882a593Smuzhiyun 			 */
3594*4882a593Smuzhiyun 			WL_ERR(("run time nmi rand not supported, ignoring for now\n"));
3595*4882a593Smuzhiyun 		}
3596*4882a593Smuzhiyun 	}
3597*4882a593Smuzhiyun 
3598*4882a593Smuzhiyun 	if (cmd_data->dw_early_termination > 0) {
3599*4882a593Smuzhiyun 		WL_ERR(("dw early termination is not supported, ignoring for now\n"));
3600*4882a593Smuzhiyun 	}
3601*4882a593Smuzhiyun 
3602*4882a593Smuzhiyun 	if (nan_attr_mask & NAN_ATTR_DISC_BEACON_INTERVAL) {
3603*4882a593Smuzhiyun 		ret = wl_cfgnan_set_disc_beacon_interval_handler(ndev, cfg,
3604*4882a593Smuzhiyun 			cmd_data->disc_bcn_interval);
3605*4882a593Smuzhiyun 		if (unlikely(ret)) {
3606*4882a593Smuzhiyun 			WL_ERR(("Failed to set beacon interval\n"));
3607*4882a593Smuzhiyun 			goto fail;
3608*4882a593Smuzhiyun 		}
3609*4882a593Smuzhiyun 	}
3610*4882a593Smuzhiyun 
3611*4882a593Smuzhiyun fail:
3612*4882a593Smuzhiyun 	if (nan_buf) {
3613*4882a593Smuzhiyun 		MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
3614*4882a593Smuzhiyun 	}
3615*4882a593Smuzhiyun 	if (nan_iov_data) {
3616*4882a593Smuzhiyun 		MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
3617*4882a593Smuzhiyun 	}
3618*4882a593Smuzhiyun 
3619*4882a593Smuzhiyun 	NAN_DBG_EXIT();
3620*4882a593Smuzhiyun 	return ret;
3621*4882a593Smuzhiyun }
3622*4882a593Smuzhiyun 
3623*4882a593Smuzhiyun int
wl_cfgnan_support_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_config_cmd_data_t * cmd_data)3624*4882a593Smuzhiyun wl_cfgnan_support_handler(struct net_device *ndev,
3625*4882a593Smuzhiyun 	struct bcm_cfg80211 *cfg, nan_config_cmd_data_t *cmd_data)
3626*4882a593Smuzhiyun {
3627*4882a593Smuzhiyun 	/* TODO: */
3628*4882a593Smuzhiyun 	return BCME_OK;
3629*4882a593Smuzhiyun }
3630*4882a593Smuzhiyun 
3631*4882a593Smuzhiyun int
wl_cfgnan_status_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_config_cmd_data_t * cmd_data)3632*4882a593Smuzhiyun wl_cfgnan_status_handler(struct net_device *ndev,
3633*4882a593Smuzhiyun 	struct bcm_cfg80211 *cfg, nan_config_cmd_data_t *cmd_data)
3634*4882a593Smuzhiyun {
3635*4882a593Smuzhiyun 	/* TODO: */
3636*4882a593Smuzhiyun 	return BCME_OK;
3637*4882a593Smuzhiyun }
3638*4882a593Smuzhiyun 
3639*4882a593Smuzhiyun #ifdef WL_NAN_DISC_CACHE
3640*4882a593Smuzhiyun static
3641*4882a593Smuzhiyun nan_svc_info_t *
wl_cfgnan_get_svc_inst(struct bcm_cfg80211 * cfg,wl_nan_instance_id svc_inst_id,uint8 ndp_id)3642*4882a593Smuzhiyun wl_cfgnan_get_svc_inst(struct bcm_cfg80211 *cfg,
3643*4882a593Smuzhiyun 	wl_nan_instance_id svc_inst_id, uint8 ndp_id)
3644*4882a593Smuzhiyun {
3645*4882a593Smuzhiyun 	uint8 i, j;
3646*4882a593Smuzhiyun 	wl_nancfg_t *nancfg = cfg->nancfg;
3647*4882a593Smuzhiyun 	if (ndp_id) {
3648*4882a593Smuzhiyun 		for (i = 0; i < NAN_MAX_SVC_INST; i++) {
3649*4882a593Smuzhiyun 			for (j = 0; j < NAN_MAX_SVC_INST; j++) {
3650*4882a593Smuzhiyun 				if (nancfg->svc_info[i].ndp_id[j] == ndp_id) {
3651*4882a593Smuzhiyun 					return &nancfg->svc_info[i];
3652*4882a593Smuzhiyun 				}
3653*4882a593Smuzhiyun 			}
3654*4882a593Smuzhiyun 		}
3655*4882a593Smuzhiyun 	} else if (svc_inst_id) {
3656*4882a593Smuzhiyun 		for (i = 0; i < NAN_MAX_SVC_INST; i++) {
3657*4882a593Smuzhiyun 			if (nancfg->svc_info[i].svc_id == svc_inst_id) {
3658*4882a593Smuzhiyun 				return &nancfg->svc_info[i];
3659*4882a593Smuzhiyun 			}
3660*4882a593Smuzhiyun 		}
3661*4882a593Smuzhiyun 
3662*4882a593Smuzhiyun 	}
3663*4882a593Smuzhiyun 	return NULL;
3664*4882a593Smuzhiyun }
3665*4882a593Smuzhiyun 
3666*4882a593Smuzhiyun static int
wl_cfgnan_svc_inst_add_ndp(struct bcm_cfg80211 * cfg,wl_nan_instance_id svc_inst_id,uint8 ndp_id)3667*4882a593Smuzhiyun wl_cfgnan_svc_inst_add_ndp(struct bcm_cfg80211 *cfg,
3668*4882a593Smuzhiyun 	wl_nan_instance_id svc_inst_id, uint8 ndp_id)
3669*4882a593Smuzhiyun {
3670*4882a593Smuzhiyun 	int ret = BCME_OK, i;
3671*4882a593Smuzhiyun 	nan_svc_info_t *svc_info;
3672*4882a593Smuzhiyun 
3673*4882a593Smuzhiyun 	svc_info = wl_cfgnan_get_svc_inst(cfg, svc_inst_id, 0);
3674*4882a593Smuzhiyun 	if (svc_info) {
3675*4882a593Smuzhiyun 		for (i = 0; i < NAN_MAX_SVC_INST; i++) {
3676*4882a593Smuzhiyun 			if (!svc_info->ndp_id[i]) {
3677*4882a593Smuzhiyun 				WL_TRACE(("Found empty field\n"));
3678*4882a593Smuzhiyun 				break;
3679*4882a593Smuzhiyun 			}
3680*4882a593Smuzhiyun 		}
3681*4882a593Smuzhiyun 		if (i == NAN_MAX_SVC_INST) {
3682*4882a593Smuzhiyun 			WL_ERR(("%s:cannot accommadate ndp id\n", __FUNCTION__));
3683*4882a593Smuzhiyun 			ret = BCME_NORESOURCE;
3684*4882a593Smuzhiyun 			goto done;
3685*4882a593Smuzhiyun 		}
3686*4882a593Smuzhiyun 		svc_info->ndp_id[i] = ndp_id;
3687*4882a593Smuzhiyun 	}
3688*4882a593Smuzhiyun 
3689*4882a593Smuzhiyun done:
3690*4882a593Smuzhiyun 	return ret;
3691*4882a593Smuzhiyun }
3692*4882a593Smuzhiyun 
3693*4882a593Smuzhiyun static int
wl_cfgnan_svc_inst_del_ndp(struct bcm_cfg80211 * cfg,wl_nan_instance_id svc_inst_id,uint8 ndp_id)3694*4882a593Smuzhiyun wl_cfgnan_svc_inst_del_ndp(struct bcm_cfg80211 *cfg,
3695*4882a593Smuzhiyun 	wl_nan_instance_id svc_inst_id, uint8 ndp_id)
3696*4882a593Smuzhiyun {
3697*4882a593Smuzhiyun 	int ret = BCME_OK, i;
3698*4882a593Smuzhiyun 	nan_svc_info_t *svc_info;
3699*4882a593Smuzhiyun 
3700*4882a593Smuzhiyun 	svc_info = wl_cfgnan_get_svc_inst(cfg, svc_inst_id, 0);
3701*4882a593Smuzhiyun 
3702*4882a593Smuzhiyun 	if (svc_info) {
3703*4882a593Smuzhiyun 		for (i = 0; i < NAN_MAX_SVC_INST; i++) {
3704*4882a593Smuzhiyun 			if (svc_info->ndp_id[i] == ndp_id) {
3705*4882a593Smuzhiyun 				svc_info->ndp_id[i] = 0;
3706*4882a593Smuzhiyun 				break;
3707*4882a593Smuzhiyun 			}
3708*4882a593Smuzhiyun 		}
3709*4882a593Smuzhiyun 		if (i == NAN_MAX_SVC_INST) {
3710*4882a593Smuzhiyun 			WL_ERR(("couldn't find entry for ndp id = %d\n", ndp_id));
3711*4882a593Smuzhiyun 			ret = BCME_NOTFOUND;
3712*4882a593Smuzhiyun 		}
3713*4882a593Smuzhiyun 	}
3714*4882a593Smuzhiyun 	return ret;
3715*4882a593Smuzhiyun }
3716*4882a593Smuzhiyun 
3717*4882a593Smuzhiyun nan_ranging_inst_t *
wl_cfgnan_check_for_ranging(struct bcm_cfg80211 * cfg,struct ether_addr * peer)3718*4882a593Smuzhiyun wl_cfgnan_check_for_ranging(struct bcm_cfg80211 *cfg, struct ether_addr *peer)
3719*4882a593Smuzhiyun {
3720*4882a593Smuzhiyun 	uint8 i;
3721*4882a593Smuzhiyun 	if (peer) {
3722*4882a593Smuzhiyun 		for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
3723*4882a593Smuzhiyun 			if (!memcmp(peer, &cfg->nancfg->nan_ranging_info[i].peer_addr,
3724*4882a593Smuzhiyun 				ETHER_ADDR_LEN)) {
3725*4882a593Smuzhiyun 				return &(cfg->nancfg->nan_ranging_info[i]);
3726*4882a593Smuzhiyun 			}
3727*4882a593Smuzhiyun 		}
3728*4882a593Smuzhiyun 	}
3729*4882a593Smuzhiyun 	return NULL;
3730*4882a593Smuzhiyun }
3731*4882a593Smuzhiyun 
3732*4882a593Smuzhiyun nan_ranging_inst_t *
wl_cfgnan_get_rng_inst_by_id(struct bcm_cfg80211 * cfg,uint8 rng_id)3733*4882a593Smuzhiyun wl_cfgnan_get_rng_inst_by_id(struct bcm_cfg80211 *cfg, uint8 rng_id)
3734*4882a593Smuzhiyun {
3735*4882a593Smuzhiyun 	uint8 i;
3736*4882a593Smuzhiyun 	if (rng_id) {
3737*4882a593Smuzhiyun 		for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
3738*4882a593Smuzhiyun 			if (cfg->nancfg->nan_ranging_info[i].range_id == rng_id)
3739*4882a593Smuzhiyun 			{
3740*4882a593Smuzhiyun 				return &(cfg->nancfg->nan_ranging_info[i]);
3741*4882a593Smuzhiyun 			}
3742*4882a593Smuzhiyun 		}
3743*4882a593Smuzhiyun 	}
3744*4882a593Smuzhiyun 	WL_ERR(("Couldn't find the ranging instance for rng_id %d\n", rng_id));
3745*4882a593Smuzhiyun 	return NULL;
3746*4882a593Smuzhiyun }
3747*4882a593Smuzhiyun 
3748*4882a593Smuzhiyun /*
3749*4882a593Smuzhiyun  * Find ranging inst for given peer,
3750*4882a593Smuzhiyun  * On not found, create one
3751*4882a593Smuzhiyun  * with given range role
3752*4882a593Smuzhiyun  */
3753*4882a593Smuzhiyun nan_ranging_inst_t *
wl_cfgnan_get_ranging_inst(struct bcm_cfg80211 * cfg,struct ether_addr * peer,nan_range_role_t range_role)3754*4882a593Smuzhiyun wl_cfgnan_get_ranging_inst(struct bcm_cfg80211 *cfg, struct ether_addr *peer,
3755*4882a593Smuzhiyun 	nan_range_role_t range_role)
3756*4882a593Smuzhiyun {
3757*4882a593Smuzhiyun 	nan_ranging_inst_t *ranging_inst = NULL;
3758*4882a593Smuzhiyun 	uint8 i;
3759*4882a593Smuzhiyun 
3760*4882a593Smuzhiyun 	if (!peer) {
3761*4882a593Smuzhiyun 		WL_ERR(("Peer address is NULL"));
3762*4882a593Smuzhiyun 		goto done;
3763*4882a593Smuzhiyun 	}
3764*4882a593Smuzhiyun 
3765*4882a593Smuzhiyun 	ranging_inst = wl_cfgnan_check_for_ranging(cfg, peer);
3766*4882a593Smuzhiyun 	if (ranging_inst) {
3767*4882a593Smuzhiyun 		goto done;
3768*4882a593Smuzhiyun 	}
3769*4882a593Smuzhiyun 	WL_TRACE(("Creating Ranging instance \n"));
3770*4882a593Smuzhiyun 
3771*4882a593Smuzhiyun 	for (i =  0; i < NAN_MAX_RANGING_INST; i++) {
3772*4882a593Smuzhiyun 		if (cfg->nancfg->nan_ranging_info[i].in_use == FALSE) {
3773*4882a593Smuzhiyun 			break;
3774*4882a593Smuzhiyun 		}
3775*4882a593Smuzhiyun 	}
3776*4882a593Smuzhiyun 
3777*4882a593Smuzhiyun 	if (i == NAN_MAX_RANGING_INST) {
3778*4882a593Smuzhiyun 		WL_ERR(("No buffer available for the ranging instance"));
3779*4882a593Smuzhiyun 		goto done;
3780*4882a593Smuzhiyun 	}
3781*4882a593Smuzhiyun 	ranging_inst = &cfg->nancfg->nan_ranging_info[i];
3782*4882a593Smuzhiyun 	memcpy(&ranging_inst->peer_addr, peer, ETHER_ADDR_LEN);
3783*4882a593Smuzhiyun 	ranging_inst->range_status = NAN_RANGING_REQUIRED;
3784*4882a593Smuzhiyun 	ranging_inst->prev_distance_mm = INVALID_DISTANCE;
3785*4882a593Smuzhiyun 	ranging_inst->range_role = range_role;
3786*4882a593Smuzhiyun 	ranging_inst->in_use = TRUE;
3787*4882a593Smuzhiyun 
3788*4882a593Smuzhiyun done:
3789*4882a593Smuzhiyun 	return ranging_inst;
3790*4882a593Smuzhiyun }
3791*4882a593Smuzhiyun #endif /* WL_NAN_DISC_CACHE */
3792*4882a593Smuzhiyun 
3793*4882a593Smuzhiyun static int
process_resp_buf(void * iov_resp,uint8 * instance_id,uint16 sub_cmd_id)3794*4882a593Smuzhiyun process_resp_buf(void *iov_resp,
3795*4882a593Smuzhiyun 	uint8 *instance_id, uint16 sub_cmd_id)
3796*4882a593Smuzhiyun {
3797*4882a593Smuzhiyun 	int res = BCME_OK;
3798*4882a593Smuzhiyun 	NAN_DBG_ENTER();
3799*4882a593Smuzhiyun 
3800*4882a593Smuzhiyun 	if (sub_cmd_id == WL_NAN_CMD_DATA_DATAREQ) {
3801*4882a593Smuzhiyun 		wl_nan_dp_req_ret_t *dpreq_ret = NULL;
3802*4882a593Smuzhiyun 		dpreq_ret = (wl_nan_dp_req_ret_t *)(iov_resp);
3803*4882a593Smuzhiyun 		*instance_id = dpreq_ret->ndp_id;
3804*4882a593Smuzhiyun 		WL_TRACE(("%s: Initiator NDI: " MACDBG "\n",
3805*4882a593Smuzhiyun 			__FUNCTION__, MAC2STRDBG(dpreq_ret->indi.octet)));
3806*4882a593Smuzhiyun 	} else if (sub_cmd_id == WL_NAN_CMD_RANGE_REQUEST) {
3807*4882a593Smuzhiyun 		wl_nan_range_id *range_id = NULL;
3808*4882a593Smuzhiyun 		range_id = (wl_nan_range_id *)(iov_resp);
3809*4882a593Smuzhiyun 		*instance_id = *range_id;
3810*4882a593Smuzhiyun 		WL_TRACE(("Range id: %d\n", *range_id));
3811*4882a593Smuzhiyun 	}
3812*4882a593Smuzhiyun 	WL_DBG(("instance_id: %d\n", *instance_id));
3813*4882a593Smuzhiyun 	NAN_DBG_EXIT();
3814*4882a593Smuzhiyun 	return res;
3815*4882a593Smuzhiyun }
3816*4882a593Smuzhiyun 
3817*4882a593Smuzhiyun int
wl_cfgnan_cancel_ranging(struct net_device * ndev,struct bcm_cfg80211 * cfg,uint8 * range_id,uint8 flags,uint32 * status)3818*4882a593Smuzhiyun wl_cfgnan_cancel_ranging(struct net_device *ndev,
3819*4882a593Smuzhiyun 	struct bcm_cfg80211 *cfg, uint8 *range_id, uint8 flags, uint32 *status)
3820*4882a593Smuzhiyun {
3821*4882a593Smuzhiyun 	bcm_iov_batch_buf_t *nan_buf = NULL;
3822*4882a593Smuzhiyun 	s32 ret = BCME_OK;
3823*4882a593Smuzhiyun 	uint16 nan_iov_start, nan_iov_end;
3824*4882a593Smuzhiyun 	uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
3825*4882a593Smuzhiyun 	uint16 subcmd_len;
3826*4882a593Smuzhiyun 	bcm_iov_batch_subcmd_t *sub_cmd = NULL;
3827*4882a593Smuzhiyun 	wl_nan_iov_t *nan_iov_data = NULL;
3828*4882a593Smuzhiyun 	uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
3829*4882a593Smuzhiyun 	wl_nan_range_cancel_ext_t rng_cncl;
3830*4882a593Smuzhiyun 	uint8 size_of_iov;
3831*4882a593Smuzhiyun 
3832*4882a593Smuzhiyun 	NAN_DBG_ENTER();
3833*4882a593Smuzhiyun 
3834*4882a593Smuzhiyun 	if (*range_id == 0) {
3835*4882a593Smuzhiyun 		WL_ERR(("Invalid Range ID\n"));
3836*4882a593Smuzhiyun 		ret = BCME_BADARG;
3837*4882a593Smuzhiyun 		goto fail;
3838*4882a593Smuzhiyun 	}
3839*4882a593Smuzhiyun 
3840*4882a593Smuzhiyun 	if (cfg->nancfg->version >= NAN_RANGE_EXT_CANCEL_SUPPORT_VER) {
3841*4882a593Smuzhiyun 		size_of_iov = sizeof(rng_cncl);
3842*4882a593Smuzhiyun 	} else {
3843*4882a593Smuzhiyun 		size_of_iov = sizeof(*range_id);
3844*4882a593Smuzhiyun 	}
3845*4882a593Smuzhiyun 
3846*4882a593Smuzhiyun 	bzero(&rng_cncl, sizeof(rng_cncl));
3847*4882a593Smuzhiyun 	rng_cncl.range_id = *range_id;
3848*4882a593Smuzhiyun 	rng_cncl.flags = flags;
3849*4882a593Smuzhiyun 
3850*4882a593Smuzhiyun 	nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
3851*4882a593Smuzhiyun 	if (!nan_buf) {
3852*4882a593Smuzhiyun 		WL_ERR(("%s: memory allocation failed\n", __func__));
3853*4882a593Smuzhiyun 		ret = BCME_NOMEM;
3854*4882a593Smuzhiyun 		goto fail;
3855*4882a593Smuzhiyun 	}
3856*4882a593Smuzhiyun 
3857*4882a593Smuzhiyun 	nan_iov_data = MALLOCZ(cfg->osh, sizeof(*nan_iov_data));
3858*4882a593Smuzhiyun 	if (!nan_iov_data) {
3859*4882a593Smuzhiyun 		WL_ERR(("%s: memory allocation failed\n", __func__));
3860*4882a593Smuzhiyun 		ret = BCME_NOMEM;
3861*4882a593Smuzhiyun 		goto fail;
3862*4882a593Smuzhiyun 	}
3863*4882a593Smuzhiyun 
3864*4882a593Smuzhiyun 	nan_iov_data->nan_iov_len = nan_iov_start = NAN_IOCTL_BUF_SIZE;
3865*4882a593Smuzhiyun 	nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
3866*4882a593Smuzhiyun 	nan_buf->count = 0;
3867*4882a593Smuzhiyun 	nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
3868*4882a593Smuzhiyun 	nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
3869*4882a593Smuzhiyun 	sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
3870*4882a593Smuzhiyun 
3871*4882a593Smuzhiyun 	ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
3872*4882a593Smuzhiyun 		size_of_iov, &subcmd_len);
3873*4882a593Smuzhiyun 	if (unlikely(ret)) {
3874*4882a593Smuzhiyun 		WL_ERR(("nan_sub_cmd check failed\n"));
3875*4882a593Smuzhiyun 		goto fail;
3876*4882a593Smuzhiyun 	}
3877*4882a593Smuzhiyun 
3878*4882a593Smuzhiyun 	sub_cmd->id = htod16(WL_NAN_CMD_RANGE_CANCEL);
3879*4882a593Smuzhiyun 	sub_cmd->len = sizeof(sub_cmd->u.options) + size_of_iov;
3880*4882a593Smuzhiyun 	sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
3881*4882a593Smuzhiyun 
3882*4882a593Smuzhiyun 	/* Reduce the iov_len size by subcmd_len */
3883*4882a593Smuzhiyun 	nan_iov_data->nan_iov_len -= subcmd_len;
3884*4882a593Smuzhiyun 	nan_iov_end = nan_iov_data->nan_iov_len;
3885*4882a593Smuzhiyun 	nan_buf_size = (nan_iov_start - nan_iov_end);
3886*4882a593Smuzhiyun 
3887*4882a593Smuzhiyun 	if (size_of_iov >= sizeof(rng_cncl)) {
3888*4882a593Smuzhiyun 		(void)memcpy_s(sub_cmd->data, nan_iov_data->nan_iov_len,
3889*4882a593Smuzhiyun 			&rng_cncl, size_of_iov);
3890*4882a593Smuzhiyun 	} else {
3891*4882a593Smuzhiyun 		(void)memcpy_s(sub_cmd->data, nan_iov_data->nan_iov_len,
3892*4882a593Smuzhiyun 			range_id, size_of_iov);
3893*4882a593Smuzhiyun 	}
3894*4882a593Smuzhiyun 
3895*4882a593Smuzhiyun 	nan_buf->is_set = true;
3896*4882a593Smuzhiyun 	nan_buf->count++;
3897*4882a593Smuzhiyun 	bzero(resp_buf, sizeof(resp_buf));
3898*4882a593Smuzhiyun 	ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, status,
3899*4882a593Smuzhiyun 			(void*)resp_buf, NAN_IOCTL_BUF_SIZE);
3900*4882a593Smuzhiyun 	if (unlikely(ret) || unlikely(*status)) {
3901*4882a593Smuzhiyun 		WL_ERR(("Range ID %d cancel failed ret %d status %d \n", *range_id, ret, *status));
3902*4882a593Smuzhiyun 		goto fail;
3903*4882a593Smuzhiyun 	}
3904*4882a593Smuzhiyun 	WL_MEM(("Range cancel with Range ID [%d] successfull\n", *range_id));
3905*4882a593Smuzhiyun 
3906*4882a593Smuzhiyun 	/* Resetting range id */
3907*4882a593Smuzhiyun 	*range_id = 0;
3908*4882a593Smuzhiyun fail:
3909*4882a593Smuzhiyun 	if (nan_buf) {
3910*4882a593Smuzhiyun 		MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
3911*4882a593Smuzhiyun 	}
3912*4882a593Smuzhiyun 	if (nan_iov_data) {
3913*4882a593Smuzhiyun 		MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
3914*4882a593Smuzhiyun 	}
3915*4882a593Smuzhiyun 	NAN_DBG_EXIT();
3916*4882a593Smuzhiyun 	return ret;
3917*4882a593Smuzhiyun }
3918*4882a593Smuzhiyun 
3919*4882a593Smuzhiyun #ifdef WL_NAN_DISC_CACHE
3920*4882a593Smuzhiyun static void
wl_cfgnan_clear_svc_cache(struct bcm_cfg80211 * cfg,wl_nan_instance_id svc_id)3921*4882a593Smuzhiyun wl_cfgnan_clear_svc_cache(struct bcm_cfg80211 *cfg,
3922*4882a593Smuzhiyun 	wl_nan_instance_id svc_id)
3923*4882a593Smuzhiyun {
3924*4882a593Smuzhiyun 	nan_svc_info_t *svc;
3925*4882a593Smuzhiyun 	svc = wl_cfgnan_get_svc_inst(cfg, svc_id, 0);
3926*4882a593Smuzhiyun 	if (svc) {
3927*4882a593Smuzhiyun 		WL_DBG(("clearing cached svc info for svc id %d\n", svc_id));
3928*4882a593Smuzhiyun 		memset(svc, 0, sizeof(*svc));
3929*4882a593Smuzhiyun 	}
3930*4882a593Smuzhiyun }
3931*4882a593Smuzhiyun 
3932*4882a593Smuzhiyun static int
wl_cfgnan_cache_svc_info(struct bcm_cfg80211 * cfg,nan_discover_cmd_data_t * cmd_data,uint16 cmd_id,bool update)3933*4882a593Smuzhiyun wl_cfgnan_cache_svc_info(struct bcm_cfg80211 *cfg,
3934*4882a593Smuzhiyun 	nan_discover_cmd_data_t *cmd_data, uint16 cmd_id, bool update)
3935*4882a593Smuzhiyun {
3936*4882a593Smuzhiyun 	int ret = BCME_OK;
3937*4882a593Smuzhiyun 	int i;
3938*4882a593Smuzhiyun 	nan_svc_info_t *svc_info;
3939*4882a593Smuzhiyun 	uint8 svc_id = (cmd_id == WL_NAN_CMD_SD_SUBSCRIBE) ? cmd_data->sub_id :
3940*4882a593Smuzhiyun 		cmd_data->pub_id;
3941*4882a593Smuzhiyun 	wl_nancfg_t *nancfg = cfg->nancfg;
3942*4882a593Smuzhiyun 
3943*4882a593Smuzhiyun 	for (i = 0; i < NAN_MAX_SVC_INST; i++) {
3944*4882a593Smuzhiyun 		if (update) {
3945*4882a593Smuzhiyun 			if (nancfg->svc_info[i].svc_id == svc_id) {
3946*4882a593Smuzhiyun 				svc_info = &nancfg->svc_info[i];
3947*4882a593Smuzhiyun 				break;
3948*4882a593Smuzhiyun 			} else {
3949*4882a593Smuzhiyun 				continue;
3950*4882a593Smuzhiyun 			}
3951*4882a593Smuzhiyun 		}
3952*4882a593Smuzhiyun 		if (!nancfg->svc_info[i].svc_id) {
3953*4882a593Smuzhiyun 			svc_info = &nancfg->svc_info[i];
3954*4882a593Smuzhiyun 			break;
3955*4882a593Smuzhiyun 		}
3956*4882a593Smuzhiyun 	}
3957*4882a593Smuzhiyun 	if (i == NAN_MAX_SVC_INST) {
3958*4882a593Smuzhiyun 		WL_ERR(("%s:cannot accomodate ranging session\n", __FUNCTION__));
3959*4882a593Smuzhiyun 		ret = BCME_NORESOURCE;
3960*4882a593Smuzhiyun 		goto fail;
3961*4882a593Smuzhiyun 	}
3962*4882a593Smuzhiyun 	if (cmd_data->sde_control_flag & NAN_SDE_CF_RANGING_REQUIRED) {
3963*4882a593Smuzhiyun 		WL_TRACE(("%s: updating ranging info, enabling\n", __FUNCTION__));
3964*4882a593Smuzhiyun 		svc_info->status = 1;
3965*4882a593Smuzhiyun 		svc_info->ranging_interval = cmd_data->ranging_intvl_msec;
3966*4882a593Smuzhiyun 		svc_info->ranging_ind = cmd_data->ranging_indication;
3967*4882a593Smuzhiyun 		svc_info->ingress_limit = cmd_data->ingress_limit;
3968*4882a593Smuzhiyun 		svc_info->egress_limit = cmd_data->egress_limit;
3969*4882a593Smuzhiyun 		svc_info->ranging_required = 1;
3970*4882a593Smuzhiyun 	} else {
3971*4882a593Smuzhiyun 		WL_TRACE(("%s: updating ranging info, disabling\n", __FUNCTION__));
3972*4882a593Smuzhiyun 		svc_info->status = 0;
3973*4882a593Smuzhiyun 		svc_info->ranging_interval = 0;
3974*4882a593Smuzhiyun 		svc_info->ranging_ind = 0;
3975*4882a593Smuzhiyun 		svc_info->ingress_limit = 0;
3976*4882a593Smuzhiyun 		svc_info->egress_limit = 0;
3977*4882a593Smuzhiyun 		svc_info->ranging_required = 0;
3978*4882a593Smuzhiyun 	}
3979*4882a593Smuzhiyun 
3980*4882a593Smuzhiyun 	/* Reset Range status flags on svc creation/update */
3981*4882a593Smuzhiyun 	svc_info->svc_range_status = 0;
3982*4882a593Smuzhiyun 	svc_info->flags = cmd_data->flags;
3983*4882a593Smuzhiyun 
3984*4882a593Smuzhiyun 	if (cmd_id == WL_NAN_CMD_SD_SUBSCRIBE) {
3985*4882a593Smuzhiyun 		svc_info->svc_id = cmd_data->sub_id;
3986*4882a593Smuzhiyun 		if ((cmd_data->flags & WL_NAN_SUB_ACTIVE) &&
3987*4882a593Smuzhiyun 			(cmd_data->tx_match.dlen)) {
3988*4882a593Smuzhiyun 			ret = memcpy_s(svc_info->tx_match_filter, sizeof(svc_info->tx_match_filter),
3989*4882a593Smuzhiyun 				cmd_data->tx_match.data, cmd_data->tx_match.dlen);
3990*4882a593Smuzhiyun 			if (ret != BCME_OK) {
3991*4882a593Smuzhiyun 				WL_ERR(("Failed to copy tx match filter data\n"));
3992*4882a593Smuzhiyun 				goto fail;
3993*4882a593Smuzhiyun 			}
3994*4882a593Smuzhiyun 			svc_info->tx_match_filter_len = cmd_data->tx_match.dlen;
3995*4882a593Smuzhiyun 		}
3996*4882a593Smuzhiyun 	} else {
3997*4882a593Smuzhiyun 		svc_info->svc_id = cmd_data->pub_id;
3998*4882a593Smuzhiyun 	}
3999*4882a593Smuzhiyun 	ret = memcpy_s(svc_info->svc_hash, sizeof(svc_info->svc_hash),
4000*4882a593Smuzhiyun 			cmd_data->svc_hash.data, WL_NAN_SVC_HASH_LEN);
4001*4882a593Smuzhiyun 	if (ret != BCME_OK) {
4002*4882a593Smuzhiyun 		WL_ERR(("Failed to copy svc hash\n"));
4003*4882a593Smuzhiyun 	}
4004*4882a593Smuzhiyun fail:
4005*4882a593Smuzhiyun 	return ret;
4006*4882a593Smuzhiyun 
4007*4882a593Smuzhiyun }
4008*4882a593Smuzhiyun 
4009*4882a593Smuzhiyun #ifdef RTT_SUPPORT
4010*4882a593Smuzhiyun /*
4011*4882a593Smuzhiyun  * Reset for Initiator
4012*4882a593Smuzhiyun  * Remove for Responder if no pending
4013*4882a593Smuzhiyun  * geofence target or else reset
4014*4882a593Smuzhiyun  */
4015*4882a593Smuzhiyun static void
wl_cfgnan_reset_remove_ranging_instance(struct bcm_cfg80211 * cfg,nan_ranging_inst_t * ranging_inst)4016*4882a593Smuzhiyun wl_cfgnan_reset_remove_ranging_instance(struct bcm_cfg80211 *cfg,
4017*4882a593Smuzhiyun         nan_ranging_inst_t *ranging_inst)
4018*4882a593Smuzhiyun {
4019*4882a593Smuzhiyun 	dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
4020*4882a593Smuzhiyun 	int8 index;
4021*4882a593Smuzhiyun 	rtt_geofence_target_info_t* geofence_target;
4022*4882a593Smuzhiyun 
4023*4882a593Smuzhiyun 	ASSERT(ranging_inst);
4024*4882a593Smuzhiyun 	if (!ranging_inst) {
4025*4882a593Smuzhiyun 		return;
4026*4882a593Smuzhiyun 	}
4027*4882a593Smuzhiyun 
4028*4882a593Smuzhiyun 	if ((ranging_inst->range_role == NAN_RANGING_ROLE_RESPONDER) ||
4029*4882a593Smuzhiyun 		(ranging_inst->range_type == RTT_TYPE_NAN_DIRECTED)) {
4030*4882a593Smuzhiyun 		/* Remove ranging instance for responder */
4031*4882a593Smuzhiyun 		geofence_target = dhd_rtt_get_geofence_target(dhd,
4032*4882a593Smuzhiyun 				&ranging_inst->peer_addr, &index);
4033*4882a593Smuzhiyun 		if (!geofence_target) {
4034*4882a593Smuzhiyun 			/* Remove rng inst if no pend target */
4035*4882a593Smuzhiyun 			WL_INFORM_MEM(("Removing Ranging Instance "
4036*4882a593Smuzhiyun 				"peer: " MACDBG "\n",
4037*4882a593Smuzhiyun 				MAC2STRDBG(&ranging_inst->peer_addr)));
4038*4882a593Smuzhiyun 			bzero(ranging_inst, sizeof(*ranging_inst));
4039*4882a593Smuzhiyun 		} else {
4040*4882a593Smuzhiyun 			ranging_inst->range_status = NAN_RANGING_REQUIRED;
4041*4882a593Smuzhiyun 			/* resolve range role concurrency */
4042*4882a593Smuzhiyun 			WL_INFORM_MEM(("Resolving Role Concurrency constraint, peer : "
4043*4882a593Smuzhiyun 				MACDBG "\n", MAC2STRDBG(&ranging_inst->peer_addr)));
4044*4882a593Smuzhiyun 			ranging_inst->role_concurrency_status = FALSE;
4045*4882a593Smuzhiyun 		}
4046*4882a593Smuzhiyun 	} else {
4047*4882a593Smuzhiyun 		/* For geofence Initiator */
4048*4882a593Smuzhiyun 		ranging_inst->range_status = NAN_RANGING_REQUIRED;
4049*4882a593Smuzhiyun 	}
4050*4882a593Smuzhiyun }
4051*4882a593Smuzhiyun 
4052*4882a593Smuzhiyun /*
4053*4882a593Smuzhiyun  * Forcecully Remove Ranging instance
4054*4882a593Smuzhiyun  * Remove if any corresponding Geofence Target
4055*4882a593Smuzhiyun  */
4056*4882a593Smuzhiyun static void
wl_cfgnan_remove_ranging_instance(struct bcm_cfg80211 * cfg,nan_ranging_inst_t * ranging_inst)4057*4882a593Smuzhiyun wl_cfgnan_remove_ranging_instance(struct bcm_cfg80211 *cfg,
4058*4882a593Smuzhiyun 		nan_ranging_inst_t *ranging_inst)
4059*4882a593Smuzhiyun {
4060*4882a593Smuzhiyun 	dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
4061*4882a593Smuzhiyun 	int8 index;
4062*4882a593Smuzhiyun 	rtt_geofence_target_info_t* geofence_target;
4063*4882a593Smuzhiyun 
4064*4882a593Smuzhiyun 	ASSERT(ranging_inst);
4065*4882a593Smuzhiyun 	if (!ranging_inst) {
4066*4882a593Smuzhiyun 		return;
4067*4882a593Smuzhiyun 	}
4068*4882a593Smuzhiyun 
4069*4882a593Smuzhiyun 	geofence_target = dhd_rtt_get_geofence_target(dhd,
4070*4882a593Smuzhiyun 			&ranging_inst->peer_addr, &index);
4071*4882a593Smuzhiyun 	if (geofence_target) {
4072*4882a593Smuzhiyun 		dhd_rtt_remove_geofence_target(dhd,
4073*4882a593Smuzhiyun 			&geofence_target->peer_addr);
4074*4882a593Smuzhiyun 	}
4075*4882a593Smuzhiyun 	WL_INFORM_MEM(("Removing Ranging Instance " MACDBG "\n",
4076*4882a593Smuzhiyun 		MAC2STRDBG(&(ranging_inst->peer_addr))));
4077*4882a593Smuzhiyun 	bzero(ranging_inst, sizeof(nan_ranging_inst_t));
4078*4882a593Smuzhiyun 
4079*4882a593Smuzhiyun 	return;
4080*4882a593Smuzhiyun }
4081*4882a593Smuzhiyun 
4082*4882a593Smuzhiyun static bool
wl_cfgnan_clear_svc_from_ranging_inst(struct bcm_cfg80211 * cfg,nan_ranging_inst_t * ranging_inst,nan_svc_info_t * svc)4083*4882a593Smuzhiyun wl_cfgnan_clear_svc_from_ranging_inst(struct bcm_cfg80211 *cfg,
4084*4882a593Smuzhiyun 	nan_ranging_inst_t *ranging_inst, nan_svc_info_t *svc)
4085*4882a593Smuzhiyun {
4086*4882a593Smuzhiyun 	int i = 0;
4087*4882a593Smuzhiyun 	bool cleared = FALSE;
4088*4882a593Smuzhiyun 
4089*4882a593Smuzhiyun 	if (svc && ranging_inst->in_use) {
4090*4882a593Smuzhiyun 		for (i = 0; i < MAX_SUBSCRIBES; i++) {
4091*4882a593Smuzhiyun 			if (svc == ranging_inst->svc_idx[i]) {
4092*4882a593Smuzhiyun 				ranging_inst->num_svc_ctx--;
4093*4882a593Smuzhiyun 				ranging_inst->svc_idx[i] = NULL;
4094*4882a593Smuzhiyun 				cleared = TRUE;
4095*4882a593Smuzhiyun 				/*
4096*4882a593Smuzhiyun 				 * This list is maintained dupes free,
4097*4882a593Smuzhiyun 				 * hence can break
4098*4882a593Smuzhiyun 				 */
4099*4882a593Smuzhiyun 				break;
4100*4882a593Smuzhiyun 			}
4101*4882a593Smuzhiyun 		}
4102*4882a593Smuzhiyun 	}
4103*4882a593Smuzhiyun 	return cleared;
4104*4882a593Smuzhiyun }
4105*4882a593Smuzhiyun 
4106*4882a593Smuzhiyun static int
wl_cfgnan_clear_svc_from_all_ranging_inst(struct bcm_cfg80211 * cfg,uint8 svc_id)4107*4882a593Smuzhiyun wl_cfgnan_clear_svc_from_all_ranging_inst(struct bcm_cfg80211 *cfg, uint8 svc_id)
4108*4882a593Smuzhiyun {
4109*4882a593Smuzhiyun 	nan_ranging_inst_t *ranging_inst;
4110*4882a593Smuzhiyun 	int i = 0;
4111*4882a593Smuzhiyun 	int ret = BCME_OK;
4112*4882a593Smuzhiyun 
4113*4882a593Smuzhiyun 	nan_svc_info_t *svc = wl_cfgnan_get_svc_inst(cfg, svc_id, 0);
4114*4882a593Smuzhiyun 	if (!svc) {
4115*4882a593Smuzhiyun 		WL_ERR(("\n svc not found \n"));
4116*4882a593Smuzhiyun 		ret = BCME_NOTFOUND;
4117*4882a593Smuzhiyun 		goto done;
4118*4882a593Smuzhiyun 	}
4119*4882a593Smuzhiyun 	for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
4120*4882a593Smuzhiyun 		ranging_inst = &(cfg->nancfg->nan_ranging_info[i]);
4121*4882a593Smuzhiyun 		wl_cfgnan_clear_svc_from_ranging_inst(cfg, ranging_inst, svc);
4122*4882a593Smuzhiyun 	}
4123*4882a593Smuzhiyun 
4124*4882a593Smuzhiyun done:
4125*4882a593Smuzhiyun 	return ret;
4126*4882a593Smuzhiyun }
4127*4882a593Smuzhiyun 
4128*4882a593Smuzhiyun static int
wl_cfgnan_ranging_clear_publish(struct bcm_cfg80211 * cfg,struct ether_addr * peer,uint8 svc_id)4129*4882a593Smuzhiyun wl_cfgnan_ranging_clear_publish(struct bcm_cfg80211 *cfg,
4130*4882a593Smuzhiyun 	struct ether_addr *peer, uint8 svc_id)
4131*4882a593Smuzhiyun {
4132*4882a593Smuzhiyun 	nan_ranging_inst_t *ranging_inst = NULL;
4133*4882a593Smuzhiyun 	nan_svc_info_t *svc = NULL;
4134*4882a593Smuzhiyun 	bool cleared = FALSE;
4135*4882a593Smuzhiyun 	int ret = BCME_OK;
4136*4882a593Smuzhiyun 
4137*4882a593Smuzhiyun 	ranging_inst = wl_cfgnan_check_for_ranging(cfg, peer);
4138*4882a593Smuzhiyun 	if (!ranging_inst || !ranging_inst->in_use) {
4139*4882a593Smuzhiyun 		goto done;
4140*4882a593Smuzhiyun 	}
4141*4882a593Smuzhiyun 
4142*4882a593Smuzhiyun 	WL_INFORM_MEM(("Check clear Ranging for pub update, sub id = %d,"
4143*4882a593Smuzhiyun 		" range_id = %d, peer addr = " MACDBG " \n", svc_id,
4144*4882a593Smuzhiyun 		ranging_inst->range_id, MAC2STRDBG(peer)));
4145*4882a593Smuzhiyun 	svc = wl_cfgnan_get_svc_inst(cfg, svc_id, 0);
4146*4882a593Smuzhiyun 	if (!svc) {
4147*4882a593Smuzhiyun 		WL_ERR(("\n svc not found, svc_id = %d\n", svc_id));
4148*4882a593Smuzhiyun 		ret = BCME_NOTFOUND;
4149*4882a593Smuzhiyun 		goto done;
4150*4882a593Smuzhiyun 	}
4151*4882a593Smuzhiyun 
4152*4882a593Smuzhiyun 	cleared = wl_cfgnan_clear_svc_from_ranging_inst(cfg, ranging_inst, svc);
4153*4882a593Smuzhiyun 	if (!cleared) {
4154*4882a593Smuzhiyun 		/* Only if this svc was cleared, any update needed */
4155*4882a593Smuzhiyun 		ret = BCME_NOTFOUND;
4156*4882a593Smuzhiyun 		goto done;
4157*4882a593Smuzhiyun 	}
4158*4882a593Smuzhiyun 
4159*4882a593Smuzhiyun 	wl_cfgnan_terminate_ranging_session(cfg, ranging_inst);
4160*4882a593Smuzhiyun 	wl_cfgnan_reset_geofence_ranging(cfg, NULL,
4161*4882a593Smuzhiyun 		RTT_SCHED_RNG_TERM_PUB_RNG_CLEAR, TRUE);
4162*4882a593Smuzhiyun 
4163*4882a593Smuzhiyun done:
4164*4882a593Smuzhiyun 	return ret;
4165*4882a593Smuzhiyun }
4166*4882a593Smuzhiyun 
4167*4882a593Smuzhiyun /* API to terminate/clear all directed nan-rtt sessions.
4168*4882a593Smuzhiyun * Can be called from framework RTT stop context
4169*4882a593Smuzhiyun */
4170*4882a593Smuzhiyun int
wl_cfgnan_terminate_directed_rtt_sessions(struct net_device * ndev,struct bcm_cfg80211 * cfg)4171*4882a593Smuzhiyun wl_cfgnan_terminate_directed_rtt_sessions(struct net_device *ndev,
4172*4882a593Smuzhiyun 	struct bcm_cfg80211 *cfg)
4173*4882a593Smuzhiyun {
4174*4882a593Smuzhiyun 	nan_ranging_inst_t *ranging_inst;
4175*4882a593Smuzhiyun 	int i, ret = BCME_OK;
4176*4882a593Smuzhiyun 	uint32 status;
4177*4882a593Smuzhiyun 
4178*4882a593Smuzhiyun 	for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
4179*4882a593Smuzhiyun 		ranging_inst = &cfg->nancfg->nan_ranging_info[i];
4180*4882a593Smuzhiyun 		if (ranging_inst->range_id && ranging_inst->range_type == RTT_TYPE_NAN_DIRECTED) {
4181*4882a593Smuzhiyun 			if (NAN_RANGING_IS_IN_PROG(ranging_inst->range_status)) {
4182*4882a593Smuzhiyun 				ret =  wl_cfgnan_cancel_ranging(ndev, cfg, &ranging_inst->range_id,
4183*4882a593Smuzhiyun 					NAN_RNG_TERM_FLAG_IMMEDIATE, &status);
4184*4882a593Smuzhiyun 				if (unlikely(ret) || unlikely(status)) {
4185*4882a593Smuzhiyun 					WL_ERR(("nan range cancel failed ret = %d status = %d\n",
4186*4882a593Smuzhiyun 						ret, status));
4187*4882a593Smuzhiyun 				}
4188*4882a593Smuzhiyun 			}
4189*4882a593Smuzhiyun 			wl_cfgnan_reset_geofence_ranging(cfg, ranging_inst,
4190*4882a593Smuzhiyun 				RTT_SHCED_HOST_DIRECTED_TERM, FALSE);
4191*4882a593Smuzhiyun 		}
4192*4882a593Smuzhiyun 	}
4193*4882a593Smuzhiyun 	return ret;
4194*4882a593Smuzhiyun }
4195*4882a593Smuzhiyun 
4196*4882a593Smuzhiyun /*
4197*4882a593Smuzhiyun  * suspend ongoing geofence ranging session
4198*4882a593Smuzhiyun  * with a peer if on-going ranging is with given peer
4199*4882a593Smuzhiyun  * If peer NULL,
4200*4882a593Smuzhiyun  * Suspend all on-going ranging sessions blindly
4201*4882a593Smuzhiyun  * Do nothing on:
4202*4882a593Smuzhiyun  * If ranging is not in progress
4203*4882a593Smuzhiyun  * If ranging in progress but not with given peer
4204*4882a593Smuzhiyun  */
4205*4882a593Smuzhiyun int
wl_cfgnan_suspend_geofence_rng_session(struct net_device * ndev,struct ether_addr * peer,int suspend_reason,u8 cancel_flags)4206*4882a593Smuzhiyun wl_cfgnan_suspend_geofence_rng_session(struct net_device *ndev,
4207*4882a593Smuzhiyun 	struct ether_addr *peer, int suspend_reason, u8 cancel_flags)
4208*4882a593Smuzhiyun {
4209*4882a593Smuzhiyun 	int ret = BCME_OK;
4210*4882a593Smuzhiyun 	uint32 status;
4211*4882a593Smuzhiyun 	nan_ranging_inst_t *ranging_inst = NULL;
4212*4882a593Smuzhiyun 	struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
4213*4882a593Smuzhiyun 	int suspend_req_dropped_at = 0;
4214*4882a593Smuzhiyun 	dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
4215*4882a593Smuzhiyun 
4216*4882a593Smuzhiyun 	UNUSED_PARAMETER(suspend_req_dropped_at);
4217*4882a593Smuzhiyun 
4218*4882a593Smuzhiyun 	ASSERT(peer);
4219*4882a593Smuzhiyun 	if (!peer) {
4220*4882a593Smuzhiyun 		WL_DBG(("Incoming Peer is NULL, suspend req dropped\n"));
4221*4882a593Smuzhiyun 		suspend_req_dropped_at = 1;
4222*4882a593Smuzhiyun 		goto exit;
4223*4882a593Smuzhiyun 	}
4224*4882a593Smuzhiyun 
4225*4882a593Smuzhiyun 	if (!wl_ranging_geofence_session_with_peer(cfg, peer)) {
4226*4882a593Smuzhiyun 		WL_DBG(("Geofence Ranging not in progress with given peer,"
4227*4882a593Smuzhiyun 			" suspend req dropped\n"));
4228*4882a593Smuzhiyun 		suspend_req_dropped_at = 2;
4229*4882a593Smuzhiyun 		goto exit;
4230*4882a593Smuzhiyun 	}
4231*4882a593Smuzhiyun 
4232*4882a593Smuzhiyun 	ranging_inst = wl_cfgnan_check_for_ranging(cfg, peer);
4233*4882a593Smuzhiyun 	if (ranging_inst) {
4234*4882a593Smuzhiyun 		cancel_flags |= NAN_RNG_TERM_FLAG_IMMEDIATE;
4235*4882a593Smuzhiyun 		ret =  wl_cfgnan_cancel_ranging(ndev, cfg,
4236*4882a593Smuzhiyun 				&ranging_inst->range_id, cancel_flags, &status);
4237*4882a593Smuzhiyun 		if (unlikely(ret) || unlikely(status)) {
4238*4882a593Smuzhiyun 			WL_ERR(("Geofence Range suspended failed, err = %d, status = %d,"
4239*4882a593Smuzhiyun 				"suspend_reason = %d, peer: " MACDBG " \n",
4240*4882a593Smuzhiyun 				ret, status, suspend_reason, MAC2STRDBG(peer)));
4241*4882a593Smuzhiyun 		}
4242*4882a593Smuzhiyun 
4243*4882a593Smuzhiyun 		ranging_inst->range_status = NAN_RANGING_REQUIRED;
4244*4882a593Smuzhiyun 		dhd_rtt_update_geofence_sessions_cnt(dhd, FALSE,
4245*4882a593Smuzhiyun 			&ranging_inst->peer_addr);
4246*4882a593Smuzhiyun 
4247*4882a593Smuzhiyun 		if (ranging_inst->range_role == NAN_RANGING_ROLE_RESPONDER &&
4248*4882a593Smuzhiyun 			ranging_inst->role_concurrency_status) {
4249*4882a593Smuzhiyun 			/* resolve range role concurrency */
4250*4882a593Smuzhiyun 			WL_INFORM_MEM(("Resolving Role Concurrency constraint, peer : "
4251*4882a593Smuzhiyun 				MACDBG "\n", MAC2STRDBG(&ranging_inst->peer_addr)));
4252*4882a593Smuzhiyun 			ranging_inst->role_concurrency_status = FALSE;
4253*4882a593Smuzhiyun 		}
4254*4882a593Smuzhiyun 
4255*4882a593Smuzhiyun 		WL_INFORM_MEM(("Geofence Range suspended, "
4256*4882a593Smuzhiyun 			" suspend_reason = %d, peer: " MACDBG " \n",
4257*4882a593Smuzhiyun 			suspend_reason, MAC2STRDBG(peer)));
4258*4882a593Smuzhiyun 	}
4259*4882a593Smuzhiyun 
4260*4882a593Smuzhiyun exit:
4261*4882a593Smuzhiyun 	/* Post pending discovery results */
4262*4882a593Smuzhiyun 	if (ranging_inst &&
4263*4882a593Smuzhiyun 		((suspend_reason == RTT_GEO_SUSPN_HOST_NDP_TRIGGER) ||
4264*4882a593Smuzhiyun 		(suspend_reason == RTT_GEO_SUSPN_PEER_NDP_TRIGGER))) {
4265*4882a593Smuzhiyun 		wl_cfgnan_disc_result_on_geofence_cancel(cfg, ranging_inst);
4266*4882a593Smuzhiyun 	}
4267*4882a593Smuzhiyun 
4268*4882a593Smuzhiyun 	if (suspend_req_dropped_at) {
4269*4882a593Smuzhiyun 		if (ranging_inst) {
4270*4882a593Smuzhiyun 			WL_INFORM_MEM(("Ranging Suspend Req with peer: " MACDBG
4271*4882a593Smuzhiyun 				", dropped at = %d\n", MAC2STRDBG(&ranging_inst->peer_addr),
4272*4882a593Smuzhiyun 				suspend_req_dropped_at));
4273*4882a593Smuzhiyun 		} else {
4274*4882a593Smuzhiyun 			WL_INFORM_MEM(("Ranging Suspend Req dropped at = %d\n",
4275*4882a593Smuzhiyun 				suspend_req_dropped_at));
4276*4882a593Smuzhiyun 		}
4277*4882a593Smuzhiyun 	}
4278*4882a593Smuzhiyun 	return ret;
4279*4882a593Smuzhiyun }
4280*4882a593Smuzhiyun 
4281*4882a593Smuzhiyun /*
4282*4882a593Smuzhiyun  * suspends all geofence ranging sessions
4283*4882a593Smuzhiyun  * including initiators and responders
4284*4882a593Smuzhiyun  */
4285*4882a593Smuzhiyun void
wl_cfgnan_suspend_all_geofence_rng_sessions(struct net_device * ndev,int suspend_reason,u8 cancel_flags)4286*4882a593Smuzhiyun wl_cfgnan_suspend_all_geofence_rng_sessions(struct net_device *ndev,
4287*4882a593Smuzhiyun 		int suspend_reason, u8 cancel_flags)
4288*4882a593Smuzhiyun {
4289*4882a593Smuzhiyun 
4290*4882a593Smuzhiyun 	uint8 i = 0;
4291*4882a593Smuzhiyun 	int ret = BCME_OK;
4292*4882a593Smuzhiyun 	uint32 status;
4293*4882a593Smuzhiyun 	nan_ranging_inst_t *ranging_inst = NULL;
4294*4882a593Smuzhiyun 	struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
4295*4882a593Smuzhiyun 	dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
4296*4882a593Smuzhiyun 
4297*4882a593Smuzhiyun 	WL_INFORM_MEM(("Suspending all geofence sessions: "
4298*4882a593Smuzhiyun 		"suspend_reason = %d\n", suspend_reason));
4299*4882a593Smuzhiyun 
4300*4882a593Smuzhiyun 	cancel_flags |= NAN_RNG_TERM_FLAG_IMMEDIATE;
4301*4882a593Smuzhiyun 	for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
4302*4882a593Smuzhiyun 		ranging_inst = &cfg->nancfg->nan_ranging_info[i];
4303*4882a593Smuzhiyun 		/* Cancel Ranging if in progress for rang_inst */
4304*4882a593Smuzhiyun 		if (ranging_inst->in_use &&
4305*4882a593Smuzhiyun 				NAN_RANGING_IS_IN_PROG(ranging_inst->range_status)) {
4306*4882a593Smuzhiyun 			ret =  wl_cfgnan_cancel_ranging(bcmcfg_to_prmry_ndev(cfg),
4307*4882a593Smuzhiyun 					cfg, &ranging_inst->range_id,
4308*4882a593Smuzhiyun 					NAN_RNG_TERM_FLAG_IMMEDIATE, &status);
4309*4882a593Smuzhiyun 			if (unlikely(ret) || unlikely(status)) {
4310*4882a593Smuzhiyun 				WL_ERR(("wl_cfgnan_suspend_all_geofence_rng_sessions: "
4311*4882a593Smuzhiyun 					"nan range cancel failed ret = %d status = %d\n",
4312*4882a593Smuzhiyun 					ret, status));
4313*4882a593Smuzhiyun 			} else {
4314*4882a593Smuzhiyun 				dhd_rtt_update_geofence_sessions_cnt(dhd, FALSE,
4315*4882a593Smuzhiyun 					&ranging_inst->peer_addr);
4316*4882a593Smuzhiyun 				wl_cfgnan_reset_remove_ranging_instance(cfg, ranging_inst);
4317*4882a593Smuzhiyun 			}
4318*4882a593Smuzhiyun 		}
4319*4882a593Smuzhiyun 	}
4320*4882a593Smuzhiyun 
4321*4882a593Smuzhiyun 	return;
4322*4882a593Smuzhiyun 
4323*4882a593Smuzhiyun }
4324*4882a593Smuzhiyun 
4325*4882a593Smuzhiyun /*
4326*4882a593Smuzhiyun  * Terminate given ranging instance
4327*4882a593Smuzhiyun  * if no pending ranging sub service
4328*4882a593Smuzhiyun  */
4329*4882a593Smuzhiyun static void
wl_cfgnan_terminate_ranging_session(struct bcm_cfg80211 * cfg,nan_ranging_inst_t * ranging_inst)4330*4882a593Smuzhiyun wl_cfgnan_terminate_ranging_session(struct bcm_cfg80211 *cfg,
4331*4882a593Smuzhiyun 	nan_ranging_inst_t *ranging_inst)
4332*4882a593Smuzhiyun {
4333*4882a593Smuzhiyun 	int ret = BCME_OK;
4334*4882a593Smuzhiyun 	uint32 status;
4335*4882a593Smuzhiyun 	dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
4336*4882a593Smuzhiyun 
4337*4882a593Smuzhiyun 	if (ranging_inst->num_svc_ctx != 0) {
4338*4882a593Smuzhiyun 		/*
4339*4882a593Smuzhiyun 		 * Make sure to remove all svc_insts for range_inst
4340*4882a593Smuzhiyun 		 * in order to cancel ranging and remove target in caller
4341*4882a593Smuzhiyun 		 */
4342*4882a593Smuzhiyun 		return;
4343*4882a593Smuzhiyun 	}
4344*4882a593Smuzhiyun 
4345*4882a593Smuzhiyun 	/* Cancel Ranging if in progress for rang_inst */
4346*4882a593Smuzhiyun 	if (NAN_RANGING_IS_IN_PROG(ranging_inst->range_status)) {
4347*4882a593Smuzhiyun 		ret =  wl_cfgnan_cancel_ranging(bcmcfg_to_prmry_ndev(cfg),
4348*4882a593Smuzhiyun 				cfg, &ranging_inst->range_id,
4349*4882a593Smuzhiyun 				NAN_RNG_TERM_FLAG_IMMEDIATE, &status);
4350*4882a593Smuzhiyun 		if (unlikely(ret) || unlikely(status)) {
4351*4882a593Smuzhiyun 			WL_ERR(("%s:nan range cancel failed ret = %d status = %d\n",
4352*4882a593Smuzhiyun 				__FUNCTION__, ret, status));
4353*4882a593Smuzhiyun 		} else {
4354*4882a593Smuzhiyun 			WL_DBG(("Range cancelled \n"));
4355*4882a593Smuzhiyun 			dhd_rtt_update_geofence_sessions_cnt(dhd, FALSE,
4356*4882a593Smuzhiyun 				&ranging_inst->peer_addr);
4357*4882a593Smuzhiyun 		}
4358*4882a593Smuzhiyun 	}
4359*4882a593Smuzhiyun 
4360*4882a593Smuzhiyun 	/* Remove ranging instance and clean any corresponding target */
4361*4882a593Smuzhiyun 	wl_cfgnan_remove_ranging_instance(cfg, ranging_inst);
4362*4882a593Smuzhiyun }
4363*4882a593Smuzhiyun 
4364*4882a593Smuzhiyun /*
4365*4882a593Smuzhiyun  * Terminate all ranging sessions
4366*4882a593Smuzhiyun  * with no pending ranging sub service
4367*4882a593Smuzhiyun  */
4368*4882a593Smuzhiyun static void
wl_cfgnan_terminate_all_obsolete_ranging_sessions(struct bcm_cfg80211 * cfg)4369*4882a593Smuzhiyun wl_cfgnan_terminate_all_obsolete_ranging_sessions(
4370*4882a593Smuzhiyun 	struct bcm_cfg80211 *cfg)
4371*4882a593Smuzhiyun {
4372*4882a593Smuzhiyun 	/* cancel all related ranging instances */
4373*4882a593Smuzhiyun 	uint8 i = 0;
4374*4882a593Smuzhiyun 	nan_ranging_inst_t *ranging_inst = NULL;
4375*4882a593Smuzhiyun 
4376*4882a593Smuzhiyun 	for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
4377*4882a593Smuzhiyun 		ranging_inst = &cfg->nancfg->nan_ranging_info[i];
4378*4882a593Smuzhiyun 		if (ranging_inst->in_use) {
4379*4882a593Smuzhiyun 			wl_cfgnan_terminate_ranging_session(cfg, ranging_inst);
4380*4882a593Smuzhiyun 		}
4381*4882a593Smuzhiyun 	}
4382*4882a593Smuzhiyun 
4383*4882a593Smuzhiyun 	return;
4384*4882a593Smuzhiyun }
4385*4882a593Smuzhiyun 
4386*4882a593Smuzhiyun /*
4387*4882a593Smuzhiyun  * Store svc_ctx for processing during RNG_RPT
4388*4882a593Smuzhiyun  * Return BCME_OK only when svc is added
4389*4882a593Smuzhiyun  */
4390*4882a593Smuzhiyun static int
wl_cfgnan_update_ranging_svc_inst(nan_ranging_inst_t * ranging_inst,nan_svc_info_t * svc)4391*4882a593Smuzhiyun wl_cfgnan_update_ranging_svc_inst(nan_ranging_inst_t *ranging_inst,
4392*4882a593Smuzhiyun 	nan_svc_info_t *svc)
4393*4882a593Smuzhiyun {
4394*4882a593Smuzhiyun 	int ret = BCME_OK;
4395*4882a593Smuzhiyun 	int i = 0;
4396*4882a593Smuzhiyun 
4397*4882a593Smuzhiyun 	for (i = 0; i < MAX_SUBSCRIBES; i++) {
4398*4882a593Smuzhiyun 		if (ranging_inst->svc_idx[i] == svc) {
4399*4882a593Smuzhiyun 			WL_DBG(("SVC Ctx for ranging already present, "
4400*4882a593Smuzhiyun 			" Duplication not supported: sub_id: %d\n", svc->svc_id));
4401*4882a593Smuzhiyun 			ret = BCME_UNSUPPORTED;
4402*4882a593Smuzhiyun 			goto done;
4403*4882a593Smuzhiyun 		}
4404*4882a593Smuzhiyun 	}
4405*4882a593Smuzhiyun 	for (i = 0; i < MAX_SUBSCRIBES; i++) {
4406*4882a593Smuzhiyun 		if (ranging_inst->svc_idx[i]) {
4407*4882a593Smuzhiyun 			continue;
4408*4882a593Smuzhiyun 		} else {
4409*4882a593Smuzhiyun 			WL_DBG(("Adding SVC Ctx for ranging..svc_id %d\n", svc->svc_id));
4410*4882a593Smuzhiyun 			ranging_inst->svc_idx[i] = svc;
4411*4882a593Smuzhiyun 			ranging_inst->num_svc_ctx++;
4412*4882a593Smuzhiyun 			ret = BCME_OK;
4413*4882a593Smuzhiyun 			goto done;
4414*4882a593Smuzhiyun 		}
4415*4882a593Smuzhiyun 	}
4416*4882a593Smuzhiyun 	if (i == MAX_SUBSCRIBES) {
4417*4882a593Smuzhiyun 		WL_ERR(("wl_cfgnan_update_ranging_svc_inst: "
4418*4882a593Smuzhiyun 			"No resource to hold Ref SVC ctx..svc_id %d\n", svc->svc_id));
4419*4882a593Smuzhiyun 		ret = BCME_NORESOURCE;
4420*4882a593Smuzhiyun 		goto done;
4421*4882a593Smuzhiyun 	}
4422*4882a593Smuzhiyun done:
4423*4882a593Smuzhiyun 	return ret;
4424*4882a593Smuzhiyun }
4425*4882a593Smuzhiyun 
4426*4882a593Smuzhiyun bool
wl_ranging_geofence_session_with_peer(struct bcm_cfg80211 * cfg,struct ether_addr * peer_addr)4427*4882a593Smuzhiyun wl_ranging_geofence_session_with_peer(struct bcm_cfg80211 *cfg,
4428*4882a593Smuzhiyun 		struct ether_addr *peer_addr)
4429*4882a593Smuzhiyun {
4430*4882a593Smuzhiyun 	bool ret = FALSE;
4431*4882a593Smuzhiyun 	nan_ranging_inst_t *rng_inst = NULL;
4432*4882a593Smuzhiyun 
4433*4882a593Smuzhiyun 	rng_inst = wl_cfgnan_check_for_ranging(cfg,
4434*4882a593Smuzhiyun 		peer_addr);
4435*4882a593Smuzhiyun 	if (rng_inst &&
4436*4882a593Smuzhiyun 			(NAN_RANGING_IS_IN_PROG(rng_inst->range_status))) {
4437*4882a593Smuzhiyun 		ret = TRUE;
4438*4882a593Smuzhiyun 	}
4439*4882a593Smuzhiyun 
4440*4882a593Smuzhiyun 	return ret;
4441*4882a593Smuzhiyun }
4442*4882a593Smuzhiyun 
4443*4882a593Smuzhiyun int
wl_cfgnan_trigger_geofencing_ranging(struct net_device * dev,struct ether_addr * peer_addr)4444*4882a593Smuzhiyun wl_cfgnan_trigger_geofencing_ranging(struct net_device *dev,
4445*4882a593Smuzhiyun 		struct ether_addr *peer_addr)
4446*4882a593Smuzhiyun {
4447*4882a593Smuzhiyun 	int ret = BCME_OK;
4448*4882a593Smuzhiyun 	int err_at = 0;
4449*4882a593Smuzhiyun 	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
4450*4882a593Smuzhiyun 	dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
4451*4882a593Smuzhiyun 	nan_ranging_inst_t *ranging_inst;
4452*4882a593Smuzhiyun 	ranging_inst = wl_cfgnan_check_for_ranging(cfg, peer_addr);
4453*4882a593Smuzhiyun 
4454*4882a593Smuzhiyun 	if (!ranging_inst) {
4455*4882a593Smuzhiyun 		WL_INFORM_MEM(("Ranging Entry for peer:" MACDBG ", not found\n",
4456*4882a593Smuzhiyun 			MAC2STRDBG(peer_addr)));
4457*4882a593Smuzhiyun 		ASSERT(0);
4458*4882a593Smuzhiyun 		/* Ranging inst should have been added before adding target */
4459*4882a593Smuzhiyun 		dhd_rtt_remove_geofence_target(dhd, peer_addr);
4460*4882a593Smuzhiyun 		ret = BCME_ERROR;
4461*4882a593Smuzhiyun 		err_at = 1;
4462*4882a593Smuzhiyun 		goto exit;
4463*4882a593Smuzhiyun 	}
4464*4882a593Smuzhiyun 
4465*4882a593Smuzhiyun 	if (!NAN_RANGING_IS_IN_PROG(ranging_inst->range_status)) {
4466*4882a593Smuzhiyun 		WL_DBG(("Trigger range request with first svc in svc list of range inst\n"));
4467*4882a593Smuzhiyun 		ret = wl_cfgnan_trigger_ranging(bcmcfg_to_prmry_ndev(cfg),
4468*4882a593Smuzhiyun 				cfg, ranging_inst, ranging_inst->svc_idx[0],
4469*4882a593Smuzhiyun 				NAN_RANGE_REQ_CMD, TRUE);
4470*4882a593Smuzhiyun 		if (ret != BCME_OK) {
4471*4882a593Smuzhiyun 			/* Unsupported is for already ranging session for peer */
4472*4882a593Smuzhiyun 			if (ret == BCME_BUSY) {
4473*4882a593Smuzhiyun 				/* TODO: Attempt again over a timer */
4474*4882a593Smuzhiyun 				err_at = 2;
4475*4882a593Smuzhiyun 			} else {
4476*4882a593Smuzhiyun 				/* Remove target and clean ranging inst */
4477*4882a593Smuzhiyun 				wl_cfgnan_remove_ranging_instance(cfg, ranging_inst);
4478*4882a593Smuzhiyun 				err_at = 3;
4479*4882a593Smuzhiyun 				goto exit;
4480*4882a593Smuzhiyun 			}
4481*4882a593Smuzhiyun 		} else {
4482*4882a593Smuzhiyun 			ranging_inst->range_type = RTT_TYPE_NAN_GEOFENCE;
4483*4882a593Smuzhiyun 			ranging_inst->range_role = NAN_RANGING_ROLE_INITIATOR;
4484*4882a593Smuzhiyun 		}
4485*4882a593Smuzhiyun 	} else if (ranging_inst->range_role != NAN_RANGING_ROLE_RESPONDER) {
4486*4882a593Smuzhiyun 		/* already in progress but not as responder.. This should not happen */
4487*4882a593Smuzhiyun 		ASSERT(!NAN_RANGING_IS_IN_PROG(ranging_inst->range_status));
4488*4882a593Smuzhiyun 		ret = BCME_ERROR;
4489*4882a593Smuzhiyun 		err_at = 4;
4490*4882a593Smuzhiyun 		goto exit;
4491*4882a593Smuzhiyun 	} else {
4492*4882a593Smuzhiyun 		/* Already in progress as responder, bail out */
4493*4882a593Smuzhiyun 		goto exit;
4494*4882a593Smuzhiyun 	}
4495*4882a593Smuzhiyun 
4496*4882a593Smuzhiyun exit:
4497*4882a593Smuzhiyun 	if (ret) {
4498*4882a593Smuzhiyun 		WL_ERR(("wl_cfgnan_trigger_geofencing_ranging: Failed to "
4499*4882a593Smuzhiyun 			"trigger ranging, peer: " MACDBG " ret"
4500*4882a593Smuzhiyun 			" = (%d), err_at = %d\n", MAC2STRDBG(peer_addr),
4501*4882a593Smuzhiyun 			ret, err_at));
4502*4882a593Smuzhiyun 	}
4503*4882a593Smuzhiyun 	return ret;
4504*4882a593Smuzhiyun }
4505*4882a593Smuzhiyun 
4506*4882a593Smuzhiyun static int
wl_cfgnan_check_disc_result_for_ranging(struct bcm_cfg80211 * cfg,nan_event_data_t * nan_event_data,bool * send_disc_result)4507*4882a593Smuzhiyun wl_cfgnan_check_disc_result_for_ranging(struct bcm_cfg80211 *cfg,
4508*4882a593Smuzhiyun 		nan_event_data_t* nan_event_data, bool *send_disc_result)
4509*4882a593Smuzhiyun {
4510*4882a593Smuzhiyun 	nan_svc_info_t *svc;
4511*4882a593Smuzhiyun 	int ret = BCME_OK;
4512*4882a593Smuzhiyun 	rtt_geofence_target_info_t geofence_target;
4513*4882a593Smuzhiyun 	dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
4514*4882a593Smuzhiyun 	uint8 index, rtt_invalid_reason = RTT_STATE_VALID;
4515*4882a593Smuzhiyun 	bool add_target;
4516*4882a593Smuzhiyun 
4517*4882a593Smuzhiyun 	*send_disc_result = TRUE;
4518*4882a593Smuzhiyun 	svc = wl_cfgnan_get_svc_inst(cfg, nan_event_data->sub_id, 0);
4519*4882a593Smuzhiyun 
4520*4882a593Smuzhiyun 	if (svc && svc->ranging_required) {
4521*4882a593Smuzhiyun 		nan_ranging_inst_t *ranging_inst;
4522*4882a593Smuzhiyun 		ranging_inst = wl_cfgnan_get_ranging_inst(cfg,
4523*4882a593Smuzhiyun 				&nan_event_data->remote_nmi,
4524*4882a593Smuzhiyun 				NAN_RANGING_ROLE_INITIATOR);
4525*4882a593Smuzhiyun 		if (!ranging_inst) {
4526*4882a593Smuzhiyun 			ret = BCME_NORESOURCE;
4527*4882a593Smuzhiyun 			goto exit;
4528*4882a593Smuzhiyun 		}
4529*4882a593Smuzhiyun 		ASSERT(ranging_inst->range_role != NAN_RANGING_ROLE_INVALID);
4530*4882a593Smuzhiyun 
4531*4882a593Smuzhiyun 		/* For responder role, range state should be in progress only */
4532*4882a593Smuzhiyun 		ASSERT((ranging_inst->range_role == NAN_RANGING_ROLE_INITIATOR) ||
4533*4882a593Smuzhiyun 			NAN_RANGING_IS_IN_PROG(ranging_inst->range_status));
4534*4882a593Smuzhiyun 
4535*4882a593Smuzhiyun 		/*
4536*4882a593Smuzhiyun 		 * On rec disc result with ranging required, add target, if
4537*4882a593Smuzhiyun 		 * ranging role is responder (range state has to be in prog always)
4538*4882a593Smuzhiyun 		 * Or ranging role is initiator and ranging is not already in prog
4539*4882a593Smuzhiyun 		 */
4540*4882a593Smuzhiyun 		add_target = ((ranging_inst->range_role ==  NAN_RANGING_ROLE_RESPONDER) ||
4541*4882a593Smuzhiyun 			((ranging_inst->range_role ==  NAN_RANGING_ROLE_INITIATOR) &&
4542*4882a593Smuzhiyun 			(!NAN_RANGING_IS_IN_PROG(ranging_inst->range_status))));
4543*4882a593Smuzhiyun 		if (add_target) {
4544*4882a593Smuzhiyun 			WL_DBG(("Add Range request to geofence target list\n"));
4545*4882a593Smuzhiyun 			memcpy(&geofence_target.peer_addr, &nan_event_data->remote_nmi,
4546*4882a593Smuzhiyun 					ETHER_ADDR_LEN);
4547*4882a593Smuzhiyun 			/* check if target is already added */
4548*4882a593Smuzhiyun 			if (!dhd_rtt_get_geofence_target(dhd, &nan_event_data->remote_nmi, &index))
4549*4882a593Smuzhiyun 			{
4550*4882a593Smuzhiyun 				ret = dhd_rtt_add_geofence_target(dhd, &geofence_target);
4551*4882a593Smuzhiyun 				if (unlikely(ret)) {
4552*4882a593Smuzhiyun 					WL_ERR(("Failed to add geofence Tgt, ret = (%d)\n", ret));
4553*4882a593Smuzhiyun 					bzero(ranging_inst, sizeof(*ranging_inst));
4554*4882a593Smuzhiyun 					goto exit;
4555*4882a593Smuzhiyun 				} else {
4556*4882a593Smuzhiyun 					WL_INFORM_MEM(("Geofence Tgt Added:" MACDBG " sub_id:%d\n",
4557*4882a593Smuzhiyun 						MAC2STRDBG(&geofence_target.peer_addr),
4558*4882a593Smuzhiyun 						svc->svc_id));
4559*4882a593Smuzhiyun 				}
4560*4882a593Smuzhiyun 			}
4561*4882a593Smuzhiyun 			if (wl_cfgnan_update_ranging_svc_inst(ranging_inst, svc)
4562*4882a593Smuzhiyun 					!= BCME_OK) {
4563*4882a593Smuzhiyun 					goto exit;
4564*4882a593Smuzhiyun 			}
4565*4882a593Smuzhiyun 			if (ranging_inst->range_role == NAN_RANGING_ROLE_RESPONDER) {
4566*4882a593Smuzhiyun 				/* Adding RTT target while responder, leads to role concurrency */
4567*4882a593Smuzhiyun 				WL_INFORM_MEM(("Entering Role Concurrency constraint, peer : "
4568*4882a593Smuzhiyun 					MACDBG "\n", MAC2STRDBG(&ranging_inst->peer_addr)));
4569*4882a593Smuzhiyun 				ranging_inst->role_concurrency_status = TRUE;
4570*4882a593Smuzhiyun 			} else {
4571*4882a593Smuzhiyun 				/* Trigger/Reset geofence RTT */
4572*4882a593Smuzhiyun 				wl_cfgnan_reset_geofence_ranging(cfg, ranging_inst,
4573*4882a593Smuzhiyun 					RTT_SCHED_SUB_MATCH, TRUE);
4574*4882a593Smuzhiyun 			}
4575*4882a593Smuzhiyun 		} else {
4576*4882a593Smuzhiyun 			/* Target already added, check & add svc_inst ref to rang_inst */
4577*4882a593Smuzhiyun 			wl_cfgnan_update_ranging_svc_inst(ranging_inst, svc);
4578*4882a593Smuzhiyun 		}
4579*4882a593Smuzhiyun 		/* Disc event will be given on receving range_rpt event */
4580*4882a593Smuzhiyun 		WL_TRACE(("Disc event will given when Range RPT event is recvd"));
4581*4882a593Smuzhiyun 	} else {
4582*4882a593Smuzhiyun 		ret = BCME_UNSUPPORTED;
4583*4882a593Smuzhiyun 	}
4584*4882a593Smuzhiyun 
4585*4882a593Smuzhiyun exit:
4586*4882a593Smuzhiyun 	if (ret == BCME_OK) {
4587*4882a593Smuzhiyun 		/* Check if we have to send disc result immediately or not */
4588*4882a593Smuzhiyun 		rtt_invalid_reason = dhd_rtt_invalid_states
4589*4882a593Smuzhiyun 			(bcmcfg_to_prmry_ndev(cfg),  &nan_event_data->remote_nmi);
4590*4882a593Smuzhiyun 		/*
4591*4882a593Smuzhiyun 		 * If instant RTT not possible (RTT postpone),
4592*4882a593Smuzhiyun 		 * send discovery result instantly like
4593*4882a593Smuzhiyun 		 * incase of invalid rtt state as
4594*4882a593Smuzhiyun 		 * ndp connected/connecting,
4595*4882a593Smuzhiyun 		 * or role_concurrency active with peer.
4596*4882a593Smuzhiyun 		 * Otherwise, result should be posted
4597*4882a593Smuzhiyun 		 * on ranging report event after RTT done
4598*4882a593Smuzhiyun 		 */
4599*4882a593Smuzhiyun 		if ((rtt_invalid_reason == RTT_STATE_VALID) &&
4600*4882a593Smuzhiyun 			(!wl_cfgnan_check_role_concurrency(cfg,
4601*4882a593Smuzhiyun 			&nan_event_data->remote_nmi))) {
4602*4882a593Smuzhiyun 			/* Avoid sending disc result instantly */
4603*4882a593Smuzhiyun 			*send_disc_result = FALSE;
4604*4882a593Smuzhiyun 		}
4605*4882a593Smuzhiyun 	}
4606*4882a593Smuzhiyun 
4607*4882a593Smuzhiyun 	return ret;
4608*4882a593Smuzhiyun }
4609*4882a593Smuzhiyun 
4610*4882a593Smuzhiyun bool
wl_cfgnan_ranging_allowed(struct bcm_cfg80211 * cfg)4611*4882a593Smuzhiyun wl_cfgnan_ranging_allowed(struct bcm_cfg80211 *cfg)
4612*4882a593Smuzhiyun {
4613*4882a593Smuzhiyun 	int i = 0;
4614*4882a593Smuzhiyun 	uint8 rng_progress_count = 0;
4615*4882a593Smuzhiyun 	nan_ranging_inst_t *ranging_inst = NULL;
4616*4882a593Smuzhiyun 
4617*4882a593Smuzhiyun 	for (i =  0; i < NAN_MAX_RANGING_INST; i++) {
4618*4882a593Smuzhiyun 		ranging_inst = &cfg->nancfg->nan_ranging_info[i];
4619*4882a593Smuzhiyun 		if (NAN_RANGING_IS_IN_PROG(ranging_inst->range_status)) {
4620*4882a593Smuzhiyun 			rng_progress_count++;
4621*4882a593Smuzhiyun 		}
4622*4882a593Smuzhiyun 	}
4623*4882a593Smuzhiyun 
4624*4882a593Smuzhiyun 	if (rng_progress_count >= NAN_MAX_RANGING_SSN_ALLOWED) {
4625*4882a593Smuzhiyun 		return FALSE;
4626*4882a593Smuzhiyun 	}
4627*4882a593Smuzhiyun 	return TRUE;
4628*4882a593Smuzhiyun }
4629*4882a593Smuzhiyun 
4630*4882a593Smuzhiyun uint8
wl_cfgnan_cancel_rng_responders(struct net_device * ndev)4631*4882a593Smuzhiyun wl_cfgnan_cancel_rng_responders(struct net_device *ndev)
4632*4882a593Smuzhiyun {
4633*4882a593Smuzhiyun 	int i = 0;
4634*4882a593Smuzhiyun 	uint8 num_resp_cancelled = 0;
4635*4882a593Smuzhiyun 	int status, ret;
4636*4882a593Smuzhiyun 	nan_ranging_inst_t *ranging_inst = NULL;
4637*4882a593Smuzhiyun 	struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
4638*4882a593Smuzhiyun 
4639*4882a593Smuzhiyun 	for (i =  0; i < NAN_MAX_RANGING_INST; i++) {
4640*4882a593Smuzhiyun 		ranging_inst = &cfg->nancfg->nan_ranging_info[i];
4641*4882a593Smuzhiyun 		if (NAN_RANGING_IS_IN_PROG(ranging_inst->range_status) &&
4642*4882a593Smuzhiyun 			(ranging_inst->range_role == NAN_RANGING_ROLE_RESPONDER)) {
4643*4882a593Smuzhiyun 			num_resp_cancelled++;
4644*4882a593Smuzhiyun 			ret = wl_cfgnan_cancel_ranging(bcmcfg_to_prmry_ndev(cfg), cfg,
4645*4882a593Smuzhiyun 				&ranging_inst->range_id, NAN_RNG_TERM_FLAG_IMMEDIATE, &status);
4646*4882a593Smuzhiyun 			if (unlikely(ret) || unlikely(status)) {
4647*4882a593Smuzhiyun 				WL_ERR(("wl_cfgnan_cancel_rng_responders: Failed to cancel"
4648*4882a593Smuzhiyun 					" existing ranging, ret = (%d)\n", ret));
4649*4882a593Smuzhiyun 			}
4650*4882a593Smuzhiyun 			WL_INFORM_MEM(("Removing Ranging Instance " MACDBG "\n",
4651*4882a593Smuzhiyun 				MAC2STRDBG(&(ranging_inst->peer_addr))));
4652*4882a593Smuzhiyun 			bzero(ranging_inst, sizeof(*ranging_inst));
4653*4882a593Smuzhiyun 		}
4654*4882a593Smuzhiyun 	}
4655*4882a593Smuzhiyun 	return num_resp_cancelled;
4656*4882a593Smuzhiyun }
4657*4882a593Smuzhiyun 
4658*4882a593Smuzhiyun /* ranging reqeust event handler */
4659*4882a593Smuzhiyun static int
wl_cfgnan_handle_ranging_ind(struct bcm_cfg80211 * cfg,wl_nan_ev_rng_req_ind_t * rng_ind)4660*4882a593Smuzhiyun wl_cfgnan_handle_ranging_ind(struct bcm_cfg80211 *cfg,
4661*4882a593Smuzhiyun 		wl_nan_ev_rng_req_ind_t *rng_ind)
4662*4882a593Smuzhiyun {
4663*4882a593Smuzhiyun 	int ret = BCME_OK;
4664*4882a593Smuzhiyun 	nan_ranging_inst_t *ranging_inst = NULL;
4665*4882a593Smuzhiyun 	uint8 cancel_flags = 0;
4666*4882a593Smuzhiyun 	bool accept = TRUE;
4667*4882a593Smuzhiyun 	nan_ranging_inst_t tmp_rng_inst;
4668*4882a593Smuzhiyun 	struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
4669*4882a593Smuzhiyun 	struct ether_addr * peer_addr = &(rng_ind->peer_m_addr);
4670*4882a593Smuzhiyun 	uint8 rtt_invalid_state;
4671*4882a593Smuzhiyun 	dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
4672*4882a593Smuzhiyun 	rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
4673*4882a593Smuzhiyun 	int err_at = 0;
4674*4882a593Smuzhiyun 
4675*4882a593Smuzhiyun 	WL_DBG(("Trigger range response\n"));
4676*4882a593Smuzhiyun 
4677*4882a593Smuzhiyun 	/* Check if ranging is allowed */
4678*4882a593Smuzhiyun 	rtt_invalid_state = dhd_rtt_invalid_states(ndev, peer_addr);
4679*4882a593Smuzhiyun 	if (rtt_invalid_state != RTT_STATE_VALID) {
4680*4882a593Smuzhiyun 		WL_INFORM_MEM(("Cannot allow ranging due to reason %d \n", rtt_invalid_state));
4681*4882a593Smuzhiyun 		ret = BCME_NORESOURCE;
4682*4882a593Smuzhiyun 		err_at = 1;
4683*4882a593Smuzhiyun 		goto done;
4684*4882a593Smuzhiyun 	}
4685*4882a593Smuzhiyun 
4686*4882a593Smuzhiyun 	mutex_lock(&rtt_status->rtt_mutex);
4687*4882a593Smuzhiyun 
4688*4882a593Smuzhiyun 	if (rtt_status && !RTT_IS_STOPPED(rtt_status)) {
4689*4882a593Smuzhiyun 		WL_INFORM_MEM(("Direcetd RTT in progress..reject RNG_REQ\n"));
4690*4882a593Smuzhiyun 		ret = BCME_NORESOURCE;
4691*4882a593Smuzhiyun 		err_at = 2;
4692*4882a593Smuzhiyun 		goto done;
4693*4882a593Smuzhiyun 	}
4694*4882a593Smuzhiyun 
4695*4882a593Smuzhiyun 	/* Check if ranging set up in progress */
4696*4882a593Smuzhiyun 	if (dhd_rtt_is_geofence_setup_inprog(dhd)) {
4697*4882a593Smuzhiyun 		WL_INFORM_MEM(("Ranging set up already in progress, "
4698*4882a593Smuzhiyun 			"RNG IND event dropped\n"));
4699*4882a593Smuzhiyun 		err_at = 3;
4700*4882a593Smuzhiyun 		ret = BCME_NOTREADY;
4701*4882a593Smuzhiyun 		goto done;
4702*4882a593Smuzhiyun 	}
4703*4882a593Smuzhiyun 
4704*4882a593Smuzhiyun 	/* check if we are already having any ranging session with peer.
4705*4882a593Smuzhiyun 	* If so below are the policies
4706*4882a593Smuzhiyun 	* If we are already a Geofence Initiator or responder w.r.t the peer
4707*4882a593Smuzhiyun 	* then silently teardown the current session and accept the REQ.
4708*4882a593Smuzhiyun 	* If we are in direct rtt initiator role then reject.
4709*4882a593Smuzhiyun 	*/
4710*4882a593Smuzhiyun 	ranging_inst = wl_cfgnan_check_for_ranging(cfg, peer_addr);
4711*4882a593Smuzhiyun 	if (ranging_inst) {
4712*4882a593Smuzhiyun 		if (NAN_RANGING_IS_IN_PROG(ranging_inst->range_status)) {
4713*4882a593Smuzhiyun 			if (ranging_inst->range_type == RTT_TYPE_NAN_GEOFENCE ||
4714*4882a593Smuzhiyun 					ranging_inst->range_role == NAN_RANGING_ROLE_RESPONDER) {
4715*4882a593Smuzhiyun 				WL_INFORM_MEM(("Already responder/geofence for the Peer, cancel "
4716*4882a593Smuzhiyun 					"current ssn and accept new one,"
4717*4882a593Smuzhiyun 					" range_type = %d, role = %d\n",
4718*4882a593Smuzhiyun 					ranging_inst->range_type, ranging_inst->range_role));
4719*4882a593Smuzhiyun 				cancel_flags = NAN_RNG_TERM_FLAG_IMMEDIATE |
4720*4882a593Smuzhiyun 					NAN_RNG_TERM_FLAG_SILENT_TEARDOWN;
4721*4882a593Smuzhiyun 					wl_cfgnan_suspend_geofence_rng_session(ndev,
4722*4882a593Smuzhiyun 						&(rng_ind->peer_m_addr),
4723*4882a593Smuzhiyun 						RTT_GEO_SUSPN_PEER_RTT_TRIGGER, cancel_flags);
4724*4882a593Smuzhiyun 			} else {
4725*4882a593Smuzhiyun 				WL_ERR(("Reject the RNG_REQ_IND in direct rtt initiator role\n"));
4726*4882a593Smuzhiyun 				err_at = 4;
4727*4882a593Smuzhiyun 				ret = BCME_BUSY;
4728*4882a593Smuzhiyun 				goto done;
4729*4882a593Smuzhiyun 			}
4730*4882a593Smuzhiyun 		} else {
4731*4882a593Smuzhiyun 			/* Check if new Ranging session is allowed */
4732*4882a593Smuzhiyun 			if (dhd_rtt_geofence_sessions_maxed_out(dhd)) {
4733*4882a593Smuzhiyun 				WL_ERR(("Cannot allow more ranging sessions\n"));
4734*4882a593Smuzhiyun 				err_at = 5;
4735*4882a593Smuzhiyun 				ret = BCME_NORESOURCE;
4736*4882a593Smuzhiyun 				goto done;
4737*4882a593Smuzhiyun 			}
4738*4882a593Smuzhiyun 		}
4739*4882a593Smuzhiyun 		/* reset ranging instance for responder role */
4740*4882a593Smuzhiyun 		ranging_inst->range_status = NAN_RANGING_REQUIRED;
4741*4882a593Smuzhiyun 		ranging_inst->range_role = NAN_RANGING_ROLE_RESPONDER;
4742*4882a593Smuzhiyun 		ranging_inst->range_type = 0;
4743*4882a593Smuzhiyun 	} else {
4744*4882a593Smuzhiyun 		/* Check if new Ranging session is allowed */
4745*4882a593Smuzhiyun 		if (dhd_rtt_geofence_sessions_maxed_out(dhd)) {
4746*4882a593Smuzhiyun 			WL_ERR(("Cannot allow more ranging sessions\n"));
4747*4882a593Smuzhiyun 			err_at = 6;
4748*4882a593Smuzhiyun 			ret = BCME_NORESOURCE;
4749*4882a593Smuzhiyun 			goto done;
4750*4882a593Smuzhiyun 		}
4751*4882a593Smuzhiyun 
4752*4882a593Smuzhiyun 		ranging_inst = wl_cfgnan_get_ranging_inst(cfg, &rng_ind->peer_m_addr,
4753*4882a593Smuzhiyun 				NAN_RANGING_ROLE_RESPONDER);
4754*4882a593Smuzhiyun 		ASSERT(ranging_inst);
4755*4882a593Smuzhiyun 		if (!ranging_inst) {
4756*4882a593Smuzhiyun 			WL_ERR(("Failed to create ranging instance \n"));
4757*4882a593Smuzhiyun 			err_at = 7;
4758*4882a593Smuzhiyun 			ret = BCME_NORESOURCE;
4759*4882a593Smuzhiyun 			goto done;
4760*4882a593Smuzhiyun 		}
4761*4882a593Smuzhiyun 	}
4762*4882a593Smuzhiyun 
4763*4882a593Smuzhiyun done:
4764*4882a593Smuzhiyun 	if (ret != BCME_OK) {
4765*4882a593Smuzhiyun 		/* reject the REQ using temp ranging instance */
4766*4882a593Smuzhiyun 		bzero(&tmp_rng_inst, sizeof(tmp_rng_inst));
4767*4882a593Smuzhiyun 		ranging_inst = &tmp_rng_inst;
4768*4882a593Smuzhiyun 		(void)memcpy_s(&tmp_rng_inst.peer_addr, ETHER_ADDR_LEN,
4769*4882a593Smuzhiyun 			&rng_ind->peer_m_addr, ETHER_ADDR_LEN);
4770*4882a593Smuzhiyun 		accept = FALSE;
4771*4882a593Smuzhiyun 	}
4772*4882a593Smuzhiyun 
4773*4882a593Smuzhiyun 	ranging_inst->range_id = rng_ind->rng_id;
4774*4882a593Smuzhiyun 
4775*4882a593Smuzhiyun 	WL_INFORM_MEM(("Trigger Ranging at Responder, ret = %d, err_at = %d, "
4776*4882a593Smuzhiyun 		"accept = %d, rng_id = %d\n", ret, err_at,
4777*4882a593Smuzhiyun 		accept, rng_ind->rng_id));
4778*4882a593Smuzhiyun 	ret = wl_cfgnan_trigger_ranging(ndev, cfg, ranging_inst,
4779*4882a593Smuzhiyun 		NULL, NAN_RANGE_REQ_EVNT, accept);
4780*4882a593Smuzhiyun 	if (unlikely(ret) || !accept) {
4781*4882a593Smuzhiyun 		WL_ERR(("Failed to trigger ranging while handling range request, "
4782*4882a593Smuzhiyun 			" ret = %d, rng_id = %d, accept %d\n", ret,
4783*4882a593Smuzhiyun 			rng_ind->rng_id, accept));
4784*4882a593Smuzhiyun 		wl_cfgnan_reset_remove_ranging_instance(cfg, ranging_inst);
4785*4882a593Smuzhiyun 	} else {
4786*4882a593Smuzhiyun 		dhd_rtt_set_geofence_setup_status(dhd, TRUE,
4787*4882a593Smuzhiyun 			&ranging_inst->peer_addr);
4788*4882a593Smuzhiyun 	}
4789*4882a593Smuzhiyun 	mutex_unlock(&rtt_status->rtt_mutex);
4790*4882a593Smuzhiyun 	return ret;
4791*4882a593Smuzhiyun }
4792*4882a593Smuzhiyun 
4793*4882a593Smuzhiyun /* ranging quest and response iovar handler */
4794*4882a593Smuzhiyun int
wl_cfgnan_trigger_ranging(struct net_device * ndev,struct bcm_cfg80211 * cfg,void * ranging_ctxt,nan_svc_info_t * svc,uint8 range_cmd,bool accept_req)4795*4882a593Smuzhiyun wl_cfgnan_trigger_ranging(struct net_device *ndev, struct bcm_cfg80211 *cfg,
4796*4882a593Smuzhiyun 		void *ranging_ctxt, nan_svc_info_t *svc,
4797*4882a593Smuzhiyun 		uint8 range_cmd, bool accept_req)
4798*4882a593Smuzhiyun {
4799*4882a593Smuzhiyun 	s32 ret = BCME_OK;
4800*4882a593Smuzhiyun 	bcm_iov_batch_buf_t *nan_buf = NULL;
4801*4882a593Smuzhiyun 	wl_nan_range_req_t *range_req = NULL;
4802*4882a593Smuzhiyun 	wl_nan_range_resp_t *range_resp = NULL;
4803*4882a593Smuzhiyun 	bcm_iov_batch_subcmd_t *sub_cmd = NULL;
4804*4882a593Smuzhiyun 	uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
4805*4882a593Smuzhiyun 	uint32 status;
4806*4882a593Smuzhiyun 	uint8 resp_buf[NAN_IOCTL_BUF_SIZE_MED];
4807*4882a593Smuzhiyun 	nan_ranging_inst_t *ranging_inst = (nan_ranging_inst_t *)ranging_ctxt;
4808*4882a593Smuzhiyun 	nan_avail_cmd_data cmd_data;
4809*4882a593Smuzhiyun 
4810*4882a593Smuzhiyun 	NAN_DBG_ENTER();
4811*4882a593Smuzhiyun 
4812*4882a593Smuzhiyun 	bzero(&cmd_data, sizeof(cmd_data));
4813*4882a593Smuzhiyun 	ret = memcpy_s(&cmd_data.peer_nmi, ETHER_ADDR_LEN,
4814*4882a593Smuzhiyun 			&ranging_inst->peer_addr, ETHER_ADDR_LEN);
4815*4882a593Smuzhiyun 	if (ret != BCME_OK) {
4816*4882a593Smuzhiyun 		WL_ERR(("Failed to copy ranging peer addr\n"));
4817*4882a593Smuzhiyun 		goto fail;
4818*4882a593Smuzhiyun 	}
4819*4882a593Smuzhiyun 
4820*4882a593Smuzhiyun 	cmd_data.avail_period = NAN_RANGING_PERIOD;
4821*4882a593Smuzhiyun 	ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
4822*4882a593Smuzhiyun 			cfg, &cmd_data, WL_AVAIL_LOCAL);
4823*4882a593Smuzhiyun 	if (ret != BCME_OK) {
4824*4882a593Smuzhiyun 		WL_ERR(("Failed to set avail value with type [WL_AVAIL_LOCAL]\n"));
4825*4882a593Smuzhiyun 		goto fail;
4826*4882a593Smuzhiyun 	}
4827*4882a593Smuzhiyun 
4828*4882a593Smuzhiyun 	ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
4829*4882a593Smuzhiyun 			cfg, &cmd_data, WL_AVAIL_RANGING);
4830*4882a593Smuzhiyun 	if (unlikely(ret)) {
4831*4882a593Smuzhiyun 		WL_ERR(("Failed to set avail value with type [WL_AVAIL_RANGING]\n"));
4832*4882a593Smuzhiyun 		goto fail;
4833*4882a593Smuzhiyun 	}
4834*4882a593Smuzhiyun 
4835*4882a593Smuzhiyun 	nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
4836*4882a593Smuzhiyun 	if (!nan_buf) {
4837*4882a593Smuzhiyun 		WL_ERR(("%s: memory allocation failed\n", __func__));
4838*4882a593Smuzhiyun 		ret = BCME_NOMEM;
4839*4882a593Smuzhiyun 		goto fail;
4840*4882a593Smuzhiyun 	}
4841*4882a593Smuzhiyun 
4842*4882a593Smuzhiyun 	nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
4843*4882a593Smuzhiyun 	nan_buf->count = 0;
4844*4882a593Smuzhiyun 	nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
4845*4882a593Smuzhiyun 
4846*4882a593Smuzhiyun 	sub_cmd = (bcm_iov_batch_subcmd_t*)(&nan_buf->cmds[0]);
4847*4882a593Smuzhiyun 	sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
4848*4882a593Smuzhiyun 	if (range_cmd == NAN_RANGE_REQ_CMD) {
4849*4882a593Smuzhiyun 		sub_cmd->id = htod16(WL_NAN_CMD_RANGE_REQUEST);
4850*4882a593Smuzhiyun 		sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(wl_nan_range_req_t);
4851*4882a593Smuzhiyun 		range_req = (wl_nan_range_req_t *)(sub_cmd->data);
4852*4882a593Smuzhiyun 		/* ranging config */
4853*4882a593Smuzhiyun 		range_req->peer = ranging_inst->peer_addr;
4854*4882a593Smuzhiyun 		if (svc) {
4855*4882a593Smuzhiyun 			range_req->interval = svc->ranging_interval;
4856*4882a593Smuzhiyun 			/* Limits are in cm from host */
4857*4882a593Smuzhiyun 			range_req->ingress = svc->ingress_limit;
4858*4882a593Smuzhiyun 			range_req->egress = svc->egress_limit;
4859*4882a593Smuzhiyun 		}
4860*4882a593Smuzhiyun 		range_req->indication = NAN_RANGING_INDICATE_CONTINUOUS_MASK;
4861*4882a593Smuzhiyun 	} else {
4862*4882a593Smuzhiyun 		/* range response config */
4863*4882a593Smuzhiyun 		sub_cmd->id = htod16(WL_NAN_CMD_RANGE_RESPONSE);
4864*4882a593Smuzhiyun 		sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(wl_nan_range_resp_t);
4865*4882a593Smuzhiyun 		range_resp = (wl_nan_range_resp_t *)(sub_cmd->data);
4866*4882a593Smuzhiyun 		range_resp->range_id = ranging_inst->range_id;
4867*4882a593Smuzhiyun 		range_resp->indication = NAN_RANGING_INDICATE_CONTINUOUS_MASK;
4868*4882a593Smuzhiyun 		if (accept_req) {
4869*4882a593Smuzhiyun 			range_resp->status = NAN_RNG_REQ_ACCEPTED_BY_HOST;
4870*4882a593Smuzhiyun 		} else {
4871*4882a593Smuzhiyun 			range_resp->status = NAN_RNG_REQ_REJECTED_BY_HOST;
4872*4882a593Smuzhiyun 		}
4873*4882a593Smuzhiyun 		nan_buf->is_set = true;
4874*4882a593Smuzhiyun 	}
4875*4882a593Smuzhiyun 
4876*4882a593Smuzhiyun 	nan_buf_size -= (sub_cmd->len +
4877*4882a593Smuzhiyun 			OFFSETOF(bcm_iov_batch_subcmd_t, u.options));
4878*4882a593Smuzhiyun 	nan_buf->count++;
4879*4882a593Smuzhiyun 
4880*4882a593Smuzhiyun 	bzero(resp_buf, sizeof(resp_buf));
4881*4882a593Smuzhiyun 	ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size,
4882*4882a593Smuzhiyun 			&status,
4883*4882a593Smuzhiyun 			(void*)resp_buf, NAN_IOCTL_BUF_SIZE);
4884*4882a593Smuzhiyun 	if (unlikely(ret) || unlikely(status)) {
4885*4882a593Smuzhiyun 		WL_ERR(("nan ranging failed ret = %d status = %d\n",
4886*4882a593Smuzhiyun 				ret, status));
4887*4882a593Smuzhiyun 		ret = (ret == BCME_OK) ? status : ret;
4888*4882a593Smuzhiyun 		goto fail;
4889*4882a593Smuzhiyun 	}
4890*4882a593Smuzhiyun 	WL_TRACE(("nan ranging trigger successful\n"));
4891*4882a593Smuzhiyun 	if (range_cmd == NAN_RANGE_REQ_CMD) {
4892*4882a593Smuzhiyun 		WL_INFORM_MEM(("Ranging Req Triggered"
4893*4882a593Smuzhiyun 			" peer: " MACDBG ", ind : %d, ingress : %d, egress : %d\n",
4894*4882a593Smuzhiyun 			MAC2STRDBG(&ranging_inst->peer_addr), range_req->indication,
4895*4882a593Smuzhiyun 			range_req->ingress, range_req->egress));
4896*4882a593Smuzhiyun 	} else {
4897*4882a593Smuzhiyun 		WL_INFORM_MEM(("Ranging Resp Triggered"
4898*4882a593Smuzhiyun 			" peer: " MACDBG ", ind : %d, ingress : %d, egress : %d\n",
4899*4882a593Smuzhiyun 			MAC2STRDBG(&ranging_inst->peer_addr), range_resp->indication,
4900*4882a593Smuzhiyun 			range_resp->ingress, range_resp->egress));
4901*4882a593Smuzhiyun 	}
4902*4882a593Smuzhiyun 
4903*4882a593Smuzhiyun 	/* check the response buff for request */
4904*4882a593Smuzhiyun 	if (range_cmd == NAN_RANGE_REQ_CMD) {
4905*4882a593Smuzhiyun 		ret = process_resp_buf(resp_buf + WL_NAN_OBUF_DATA_OFFSET,
4906*4882a593Smuzhiyun 				&ranging_inst->range_id, WL_NAN_CMD_RANGE_REQUEST);
4907*4882a593Smuzhiyun 		WL_INFORM_MEM(("ranging instance returned %d\n", ranging_inst->range_id));
4908*4882a593Smuzhiyun 	}
4909*4882a593Smuzhiyun 
4910*4882a593Smuzhiyun 	/* Move Ranging instance to set up in progress state */
4911*4882a593Smuzhiyun 	ranging_inst->range_status = NAN_RANGING_SETUP_IN_PROGRESS;
4912*4882a593Smuzhiyun 
4913*4882a593Smuzhiyun fail:
4914*4882a593Smuzhiyun 	if (nan_buf) {
4915*4882a593Smuzhiyun 		MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
4916*4882a593Smuzhiyun 	}
4917*4882a593Smuzhiyun 
4918*4882a593Smuzhiyun 	NAN_DBG_EXIT();
4919*4882a593Smuzhiyun 	return ret;
4920*4882a593Smuzhiyun }
4921*4882a593Smuzhiyun 
4922*4882a593Smuzhiyun bool
wl_cfgnan_ranging_is_in_prog_for_peer(struct bcm_cfg80211 * cfg,struct ether_addr * peer_addr)4923*4882a593Smuzhiyun wl_cfgnan_ranging_is_in_prog_for_peer(struct bcm_cfg80211 *cfg, struct ether_addr *peer_addr)
4924*4882a593Smuzhiyun {
4925*4882a593Smuzhiyun 	nan_ranging_inst_t *rng_inst = NULL;
4926*4882a593Smuzhiyun 
4927*4882a593Smuzhiyun 	rng_inst = wl_cfgnan_check_for_ranging(cfg, peer_addr);
4928*4882a593Smuzhiyun 
4929*4882a593Smuzhiyun 	return (rng_inst && NAN_RANGING_IS_IN_PROG(rng_inst->range_status));
4930*4882a593Smuzhiyun }
4931*4882a593Smuzhiyun 
4932*4882a593Smuzhiyun #endif /* RTT_SUPPORT */
4933*4882a593Smuzhiyun #endif /* WL_NAN_DISC_CACHE */
4934*4882a593Smuzhiyun 
wl_nan_bloom_alloc(void * ctx,uint size)4935*4882a593Smuzhiyun static void *wl_nan_bloom_alloc(void *ctx, uint size)
4936*4882a593Smuzhiyun {
4937*4882a593Smuzhiyun 	uint8 *buf;
4938*4882a593Smuzhiyun 	BCM_REFERENCE(ctx);
4939*4882a593Smuzhiyun 
4940*4882a593Smuzhiyun 	buf = kmalloc(size, GFP_KERNEL);
4941*4882a593Smuzhiyun 	if (!buf) {
4942*4882a593Smuzhiyun 		WL_ERR(("%s: memory allocation failed\n", __func__));
4943*4882a593Smuzhiyun 		buf = NULL;
4944*4882a593Smuzhiyun 	}
4945*4882a593Smuzhiyun 	return buf;
4946*4882a593Smuzhiyun }
4947*4882a593Smuzhiyun 
wl_nan_bloom_free(void * ctx,void * buf,uint size)4948*4882a593Smuzhiyun static void wl_nan_bloom_free(void *ctx, void *buf, uint size)
4949*4882a593Smuzhiyun {
4950*4882a593Smuzhiyun 	BCM_REFERENCE(ctx);
4951*4882a593Smuzhiyun 	BCM_REFERENCE(size);
4952*4882a593Smuzhiyun 	if (buf) {
4953*4882a593Smuzhiyun 		kfree(buf);
4954*4882a593Smuzhiyun 	}
4955*4882a593Smuzhiyun }
4956*4882a593Smuzhiyun 
wl_nan_hash(void * ctx,uint index,const uint8 * input,uint input_len)4957*4882a593Smuzhiyun static uint wl_nan_hash(void *ctx, uint index, const uint8 *input, uint input_len)
4958*4882a593Smuzhiyun {
4959*4882a593Smuzhiyun 	uint8* filter_idx = (uint8*)ctx;
4960*4882a593Smuzhiyun 	uint8 i = (*filter_idx * WL_NAN_HASHES_PER_BLOOM) + (uint8)index;
4961*4882a593Smuzhiyun 	uint b = 0;
4962*4882a593Smuzhiyun 
4963*4882a593Smuzhiyun 	/* Steps 1 and 2 as explained in Section 6.2 */
4964*4882a593Smuzhiyun 	/* Concatenate index to input and run CRC32 by calling hndcrc32 twice */
4965*4882a593Smuzhiyun 	GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
4966*4882a593Smuzhiyun 	b = hndcrc32(&i, sizeof(uint8), CRC32_INIT_VALUE);
4967*4882a593Smuzhiyun 	b = hndcrc32((uint8*)input, input_len, b);
4968*4882a593Smuzhiyun 	GCC_DIAGNOSTIC_POP();
4969*4882a593Smuzhiyun 	/* Obtain the last 2 bytes of the CRC32 output */
4970*4882a593Smuzhiyun 	b &= NAN_BLOOM_CRC32_MASK;
4971*4882a593Smuzhiyun 
4972*4882a593Smuzhiyun 	/* Step 3 is completed by bcmbloom functions */
4973*4882a593Smuzhiyun 	return b;
4974*4882a593Smuzhiyun }
4975*4882a593Smuzhiyun 
wl_nan_bloom_create(bcm_bloom_filter_t ** bp,uint * idx,uint size)4976*4882a593Smuzhiyun static int wl_nan_bloom_create(bcm_bloom_filter_t **bp, uint *idx, uint size)
4977*4882a593Smuzhiyun {
4978*4882a593Smuzhiyun 	uint i;
4979*4882a593Smuzhiyun 	int err;
4980*4882a593Smuzhiyun 
4981*4882a593Smuzhiyun 	err = bcm_bloom_create(wl_nan_bloom_alloc, wl_nan_bloom_free,
4982*4882a593Smuzhiyun 			idx, WL_NAN_HASHES_PER_BLOOM, size, bp);
4983*4882a593Smuzhiyun 	if (err != BCME_OK) {
4984*4882a593Smuzhiyun 		goto exit;
4985*4882a593Smuzhiyun 	}
4986*4882a593Smuzhiyun 
4987*4882a593Smuzhiyun 	/* Populate bloom filter with hash functions */
4988*4882a593Smuzhiyun 	for (i = 0; i < WL_NAN_HASHES_PER_BLOOM; i++) {
4989*4882a593Smuzhiyun 		err = bcm_bloom_add_hash(*bp, wl_nan_hash, &i);
4990*4882a593Smuzhiyun 		if (err) {
4991*4882a593Smuzhiyun 			WL_ERR(("bcm_bloom_add_hash failed\n"));
4992*4882a593Smuzhiyun 			goto exit;
4993*4882a593Smuzhiyun 		}
4994*4882a593Smuzhiyun 	}
4995*4882a593Smuzhiyun exit:
4996*4882a593Smuzhiyun 	return err;
4997*4882a593Smuzhiyun }
4998*4882a593Smuzhiyun 
4999*4882a593Smuzhiyun static int
wl_cfgnan_sd_params_handler(struct net_device * ndev,nan_discover_cmd_data_t * cmd_data,uint16 cmd_id,void * p_buf,uint16 * nan_buf_size)5000*4882a593Smuzhiyun wl_cfgnan_sd_params_handler(struct net_device *ndev,
5001*4882a593Smuzhiyun 	nan_discover_cmd_data_t *cmd_data, uint16 cmd_id,
5002*4882a593Smuzhiyun 	void *p_buf, uint16 *nan_buf_size)
5003*4882a593Smuzhiyun {
5004*4882a593Smuzhiyun 	s32 ret = BCME_OK;
5005*4882a593Smuzhiyun 	uint8 *pxtlv, *srf = NULL, *srf_mac = NULL, *srftmp = NULL;
5006*4882a593Smuzhiyun 	uint16 buflen_avail;
5007*4882a593Smuzhiyun 	bcm_iov_batch_subcmd_t *sub_cmd = (bcm_iov_batch_subcmd_t*)(p_buf);
5008*4882a593Smuzhiyun 	wl_nan_sd_params_t *sd_params = (wl_nan_sd_params_t *)sub_cmd->data;
5009*4882a593Smuzhiyun 	uint16 srf_size = 0;
5010*4882a593Smuzhiyun 	uint bloom_size, a;
5011*4882a593Smuzhiyun 	bcm_bloom_filter_t *bp = NULL;
5012*4882a593Smuzhiyun 	/* Bloom filter index default, indicates it has not been set */
5013*4882a593Smuzhiyun 	uint bloom_idx = 0xFFFFFFFF;
5014*4882a593Smuzhiyun 	uint16 bloom_len = NAN_BLOOM_LENGTH_DEFAULT;
5015*4882a593Smuzhiyun 	/* srf_ctrl_size = bloom_len + src_control field */
5016*4882a593Smuzhiyun 	uint16 srf_ctrl_size = bloom_len + 1;
5017*4882a593Smuzhiyun 
5018*4882a593Smuzhiyun 	dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
5019*4882a593Smuzhiyun 	struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
5020*4882a593Smuzhiyun 	BCM_REFERENCE(cfg);
5021*4882a593Smuzhiyun 
5022*4882a593Smuzhiyun 	NAN_DBG_ENTER();
5023*4882a593Smuzhiyun 
5024*4882a593Smuzhiyun 	if (cmd_data->period) {
5025*4882a593Smuzhiyun 		sd_params->awake_dw = cmd_data->period;
5026*4882a593Smuzhiyun 	}
5027*4882a593Smuzhiyun 	sd_params->period = 1;
5028*4882a593Smuzhiyun 
5029*4882a593Smuzhiyun 	if (cmd_data->ttl) {
5030*4882a593Smuzhiyun 		sd_params->ttl = cmd_data->ttl;
5031*4882a593Smuzhiyun 	} else {
5032*4882a593Smuzhiyun 		sd_params->ttl = WL_NAN_TTL_UNTIL_CANCEL;
5033*4882a593Smuzhiyun 	}
5034*4882a593Smuzhiyun 
5035*4882a593Smuzhiyun 	sd_params->flags = 0;
5036*4882a593Smuzhiyun 	sd_params->flags = cmd_data->flags;
5037*4882a593Smuzhiyun 
5038*4882a593Smuzhiyun 	/* Nan Service Based event suppression Flags */
5039*4882a593Smuzhiyun 	if (cmd_data->recv_ind_flag) {
5040*4882a593Smuzhiyun 		/* BIT0 - If set, host wont rec event "terminated" */
5041*4882a593Smuzhiyun 		if (CHECK_BIT(cmd_data->recv_ind_flag, WL_NAN_EVENT_SUPPRESS_TERMINATE_BIT)) {
5042*4882a593Smuzhiyun 			sd_params->flags |= WL_NAN_SVC_CTRL_SUPPRESS_EVT_TERMINATED;
5043*4882a593Smuzhiyun 		}
5044*4882a593Smuzhiyun 
5045*4882a593Smuzhiyun 		/* BIT1 - If set, host wont receive match expiry evt */
5046*4882a593Smuzhiyun 		/* TODO: Exp not yet supported */
5047*4882a593Smuzhiyun 		if (CHECK_BIT(cmd_data->recv_ind_flag, WL_NAN_EVENT_SUPPRESS_MATCH_EXP_BIT)) {
5048*4882a593Smuzhiyun 			WL_DBG(("Need to add match expiry event\n"));
5049*4882a593Smuzhiyun 		}
5050*4882a593Smuzhiyun 		/* BIT2 - If set, host wont rec event "receive"  */
5051*4882a593Smuzhiyun 		if (CHECK_BIT(cmd_data->recv_ind_flag, WL_NAN_EVENT_SUPPRESS_RECEIVE_BIT)) {
5052*4882a593Smuzhiyun 			sd_params->flags |= WL_NAN_SVC_CTRL_SUPPRESS_EVT_RECEIVE;
5053*4882a593Smuzhiyun 		}
5054*4882a593Smuzhiyun 		/* BIT3 - If set, host wont rec event "replied" */
5055*4882a593Smuzhiyun 		if (CHECK_BIT(cmd_data->recv_ind_flag, WL_NAN_EVENT_SUPPRESS_REPLIED_BIT)) {
5056*4882a593Smuzhiyun 			sd_params->flags |= WL_NAN_SVC_CTRL_SUPPRESS_EVT_REPLIED;
5057*4882a593Smuzhiyun 		}
5058*4882a593Smuzhiyun 	}
5059*4882a593Smuzhiyun 	if (cmd_id == WL_NAN_CMD_SD_PUBLISH) {
5060*4882a593Smuzhiyun 		sd_params->instance_id = cmd_data->pub_id;
5061*4882a593Smuzhiyun 		if (cmd_data->service_responder_policy) {
5062*4882a593Smuzhiyun 			/* Do not disturb avail if dam is supported */
5063*4882a593Smuzhiyun 			if (FW_SUPPORTED(dhdp, autodam)) {
5064*4882a593Smuzhiyun 				/* Nan Accept policy: Per service basis policy
5065*4882a593Smuzhiyun 				 * Based on this policy(ALL/NONE), responder side
5066*4882a593Smuzhiyun 				 * will send ACCEPT/REJECT
5067*4882a593Smuzhiyun 				 * If set, auto datapath responder will be sent by FW
5068*4882a593Smuzhiyun 				 */
5069*4882a593Smuzhiyun 				sd_params->flags |= WL_NAN_SVC_CTRL_AUTO_DPRESP;
5070*4882a593Smuzhiyun 			} else  {
5071*4882a593Smuzhiyun 				WL_ERR(("svc specifiv auto dp resp is not"
5072*4882a593Smuzhiyun 						" supported in non-auto dam fw\n"));
5073*4882a593Smuzhiyun 			}
5074*4882a593Smuzhiyun 		}
5075*4882a593Smuzhiyun 	} else if (cmd_id == WL_NAN_CMD_SD_SUBSCRIBE) {
5076*4882a593Smuzhiyun 		sd_params->instance_id = cmd_data->sub_id;
5077*4882a593Smuzhiyun 	} else {
5078*4882a593Smuzhiyun 		ret = BCME_USAGE_ERROR;
5079*4882a593Smuzhiyun 		WL_ERR(("wrong command id = %d \n", cmd_id));
5080*4882a593Smuzhiyun 		goto fail;
5081*4882a593Smuzhiyun 	}
5082*4882a593Smuzhiyun 
5083*4882a593Smuzhiyun 	if ((cmd_data->svc_hash.dlen == WL_NAN_SVC_HASH_LEN) &&
5084*4882a593Smuzhiyun 			(cmd_data->svc_hash.data)) {
5085*4882a593Smuzhiyun 		ret = memcpy_s((uint8*)sd_params->svc_hash,
5086*4882a593Smuzhiyun 				sizeof(sd_params->svc_hash),
5087*4882a593Smuzhiyun 				cmd_data->svc_hash.data,
5088*4882a593Smuzhiyun 				cmd_data->svc_hash.dlen);
5089*4882a593Smuzhiyun 		if (ret != BCME_OK) {
5090*4882a593Smuzhiyun 			WL_ERR(("Failed to copy svc hash\n"));
5091*4882a593Smuzhiyun 			goto fail;
5092*4882a593Smuzhiyun 		}
5093*4882a593Smuzhiyun #ifdef WL_NAN_DEBUG
5094*4882a593Smuzhiyun 		prhex("hashed svc name", cmd_data->svc_hash.data,
5095*4882a593Smuzhiyun 				cmd_data->svc_hash.dlen);
5096*4882a593Smuzhiyun #endif /* WL_NAN_DEBUG */
5097*4882a593Smuzhiyun 	} else {
5098*4882a593Smuzhiyun 		ret = BCME_ERROR;
5099*4882a593Smuzhiyun 		WL_ERR(("invalid svc hash data or length = %d\n",
5100*4882a593Smuzhiyun 				cmd_data->svc_hash.dlen));
5101*4882a593Smuzhiyun 		goto fail;
5102*4882a593Smuzhiyun 	}
5103*4882a593Smuzhiyun 
5104*4882a593Smuzhiyun 	/* check if ranging support is present in firmware */
5105*4882a593Smuzhiyun 	if ((cmd_data->sde_control_flag & NAN_SDE_CF_RANGING_REQUIRED) &&
5106*4882a593Smuzhiyun 		!FW_SUPPORTED(dhdp, nanrange)) {
5107*4882a593Smuzhiyun 		WL_ERR(("Service requires ranging but fw doesnt support it\n"));
5108*4882a593Smuzhiyun 		ret = BCME_UNSUPPORTED;
5109*4882a593Smuzhiyun 		goto fail;
5110*4882a593Smuzhiyun 	}
5111*4882a593Smuzhiyun 
5112*4882a593Smuzhiyun 	/* Optional parameters: fill the sub_command block with service descriptor attr */
5113*4882a593Smuzhiyun 	sub_cmd->id = htod16(cmd_id);
5114*4882a593Smuzhiyun 	sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
5115*4882a593Smuzhiyun 	sub_cmd->len = sizeof(sub_cmd->u.options) +
5116*4882a593Smuzhiyun 		OFFSETOF(wl_nan_sd_params_t, optional[0]);
5117*4882a593Smuzhiyun 	pxtlv = (uint8*)&sd_params->optional[0];
5118*4882a593Smuzhiyun 
5119*4882a593Smuzhiyun 	*nan_buf_size -= sub_cmd->len;
5120*4882a593Smuzhiyun 	buflen_avail = *nan_buf_size;
5121*4882a593Smuzhiyun 
5122*4882a593Smuzhiyun 	if (cmd_data->svc_info.data && cmd_data->svc_info.dlen) {
5123*4882a593Smuzhiyun 		WL_TRACE(("optional svc_info present, pack it\n"));
5124*4882a593Smuzhiyun 		ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
5125*4882a593Smuzhiyun 				WL_NAN_XTLV_SD_SVC_INFO,
5126*4882a593Smuzhiyun 				cmd_data->svc_info.dlen,
5127*4882a593Smuzhiyun 				cmd_data->svc_info.data, BCM_XTLV_OPTION_ALIGN32);
5128*4882a593Smuzhiyun 		if (unlikely(ret)) {
5129*4882a593Smuzhiyun 			WL_ERR(("%s: fail to pack WL_NAN_XTLV_SD_SVC_INFO\n", __FUNCTION__));
5130*4882a593Smuzhiyun 			goto fail;
5131*4882a593Smuzhiyun 		}
5132*4882a593Smuzhiyun 	}
5133*4882a593Smuzhiyun 
5134*4882a593Smuzhiyun 	if (cmd_data->sde_svc_info.data && cmd_data->sde_svc_info.dlen) {
5135*4882a593Smuzhiyun 		WL_TRACE(("optional sdea svc_info present, pack it, %d\n",
5136*4882a593Smuzhiyun 			cmd_data->sde_svc_info.dlen));
5137*4882a593Smuzhiyun 		ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
5138*4882a593Smuzhiyun 				WL_NAN_XTLV_SD_SDE_SVC_INFO,
5139*4882a593Smuzhiyun 				cmd_data->sde_svc_info.dlen,
5140*4882a593Smuzhiyun 				cmd_data->sde_svc_info.data, BCM_XTLV_OPTION_ALIGN32);
5141*4882a593Smuzhiyun 		if (unlikely(ret)) {
5142*4882a593Smuzhiyun 			WL_ERR(("%s: fail to pack sdea svc info\n", __FUNCTION__));
5143*4882a593Smuzhiyun 			goto fail;
5144*4882a593Smuzhiyun 		}
5145*4882a593Smuzhiyun 	}
5146*4882a593Smuzhiyun 
5147*4882a593Smuzhiyun 	if (cmd_data->tx_match.dlen) {
5148*4882a593Smuzhiyun 		WL_TRACE(("optional tx match filter presnet (len=%d)\n",
5149*4882a593Smuzhiyun 				cmd_data->tx_match.dlen));
5150*4882a593Smuzhiyun 		ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
5151*4882a593Smuzhiyun 				WL_NAN_XTLV_CFG_MATCH_TX, cmd_data->tx_match.dlen,
5152*4882a593Smuzhiyun 				cmd_data->tx_match.data, BCM_XTLV_OPTION_ALIGN32);
5153*4882a593Smuzhiyun 		if (unlikely(ret)) {
5154*4882a593Smuzhiyun 			WL_ERR(("%s: failed on xtlv_pack for tx match filter\n", __FUNCTION__));
5155*4882a593Smuzhiyun 			goto fail;
5156*4882a593Smuzhiyun 		}
5157*4882a593Smuzhiyun 	}
5158*4882a593Smuzhiyun 
5159*4882a593Smuzhiyun 	if (cmd_data->life_count) {
5160*4882a593Smuzhiyun 		WL_TRACE(("optional life count is present, pack it\n"));
5161*4882a593Smuzhiyun 		ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size, WL_NAN_XTLV_CFG_SVC_LIFE_COUNT,
5162*4882a593Smuzhiyun 				sizeof(cmd_data->life_count), &cmd_data->life_count,
5163*4882a593Smuzhiyun 				BCM_XTLV_OPTION_ALIGN32);
5164*4882a593Smuzhiyun 		if (unlikely(ret)) {
5165*4882a593Smuzhiyun 			WL_ERR(("%s: failed to WL_NAN_XTLV_CFG_SVC_LIFE_COUNT\n", __FUNCTION__));
5166*4882a593Smuzhiyun 			goto fail;
5167*4882a593Smuzhiyun 		}
5168*4882a593Smuzhiyun 	}
5169*4882a593Smuzhiyun 
5170*4882a593Smuzhiyun 	if (cmd_data->use_srf) {
5171*4882a593Smuzhiyun 		uint8 srf_control = 0;
5172*4882a593Smuzhiyun 		/* set include bit */
5173*4882a593Smuzhiyun 		if (cmd_data->srf_include == true) {
5174*4882a593Smuzhiyun 			srf_control |= 0x2;
5175*4882a593Smuzhiyun 		}
5176*4882a593Smuzhiyun 
5177*4882a593Smuzhiyun 		if (!ETHER_ISNULLADDR(&cmd_data->mac_list.list) &&
5178*4882a593Smuzhiyun 				(cmd_data->mac_list.num_mac_addr
5179*4882a593Smuzhiyun 				 < NAN_SRF_MAX_MAC)) {
5180*4882a593Smuzhiyun 			if (cmd_data->srf_type == SRF_TYPE_SEQ_MAC_ADDR) {
5181*4882a593Smuzhiyun 				/* mac list */
5182*4882a593Smuzhiyun 				srf_size = (cmd_data->mac_list.num_mac_addr
5183*4882a593Smuzhiyun 						* ETHER_ADDR_LEN) + NAN_SRF_CTRL_FIELD_LEN;
5184*4882a593Smuzhiyun 				WL_TRACE(("srf size = %d\n", srf_size));
5185*4882a593Smuzhiyun 
5186*4882a593Smuzhiyun 				srf_mac = MALLOCZ(cfg->osh, srf_size);
5187*4882a593Smuzhiyun 				if (srf_mac == NULL) {
5188*4882a593Smuzhiyun 					WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
5189*4882a593Smuzhiyun 					ret = -ENOMEM;
5190*4882a593Smuzhiyun 					goto fail;
5191*4882a593Smuzhiyun 				}
5192*4882a593Smuzhiyun 				ret = memcpy_s(srf_mac, NAN_SRF_CTRL_FIELD_LEN,
5193*4882a593Smuzhiyun 						&srf_control, NAN_SRF_CTRL_FIELD_LEN);
5194*4882a593Smuzhiyun 				if (ret != BCME_OK) {
5195*4882a593Smuzhiyun 					WL_ERR(("Failed to copy srf control\n"));
5196*4882a593Smuzhiyun 					goto fail;
5197*4882a593Smuzhiyun 				}
5198*4882a593Smuzhiyun 				ret = memcpy_s(srf_mac+1, (srf_size - NAN_SRF_CTRL_FIELD_LEN),
5199*4882a593Smuzhiyun 						cmd_data->mac_list.list,
5200*4882a593Smuzhiyun 						(srf_size - NAN_SRF_CTRL_FIELD_LEN));
5201*4882a593Smuzhiyun 				if (ret != BCME_OK) {
5202*4882a593Smuzhiyun 					WL_ERR(("Failed to copy srf control mac list\n"));
5203*4882a593Smuzhiyun 					goto fail;
5204*4882a593Smuzhiyun 				}
5205*4882a593Smuzhiyun 				ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
5206*4882a593Smuzhiyun 						WL_NAN_XTLV_CFG_SR_FILTER, srf_size, srf_mac,
5207*4882a593Smuzhiyun 						BCM_XTLV_OPTION_ALIGN32);
5208*4882a593Smuzhiyun 				if (unlikely(ret)) {
5209*4882a593Smuzhiyun 					WL_ERR(("%s: failed to WL_NAN_XTLV_CFG_SR_FILTER\n",
5210*4882a593Smuzhiyun 							__FUNCTION__));
5211*4882a593Smuzhiyun 					goto fail;
5212*4882a593Smuzhiyun 				}
5213*4882a593Smuzhiyun 			} else if (cmd_data->srf_type == SRF_TYPE_BLOOM_FILTER) {
5214*4882a593Smuzhiyun 				/* Create bloom filter */
5215*4882a593Smuzhiyun 				srf = MALLOCZ(cfg->osh, srf_ctrl_size);
5216*4882a593Smuzhiyun 				if (srf == NULL) {
5217*4882a593Smuzhiyun 					WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
5218*4882a593Smuzhiyun 					ret = -ENOMEM;
5219*4882a593Smuzhiyun 					goto fail;
5220*4882a593Smuzhiyun 				}
5221*4882a593Smuzhiyun 				/* Bloom filter */
5222*4882a593Smuzhiyun 				srf_control |= 0x1;
5223*4882a593Smuzhiyun 				/* Instance id must be from 1 to 255, 0 is Reserved */
5224*4882a593Smuzhiyun 				if (sd_params->instance_id == NAN_ID_RESERVED) {
5225*4882a593Smuzhiyun 					WL_ERR(("Invalid instance id: %d\n",
5226*4882a593Smuzhiyun 							sd_params->instance_id));
5227*4882a593Smuzhiyun 					ret = BCME_BADARG;
5228*4882a593Smuzhiyun 					goto fail;
5229*4882a593Smuzhiyun 				}
5230*4882a593Smuzhiyun 				if (bloom_idx == 0xFFFFFFFF) {
5231*4882a593Smuzhiyun 					bloom_idx = sd_params->instance_id % 4;
5232*4882a593Smuzhiyun 				} else {
5233*4882a593Smuzhiyun 					WL_ERR(("Invalid bloom_idx\n"));
5234*4882a593Smuzhiyun 					ret = BCME_BADARG;
5235*4882a593Smuzhiyun 					goto fail;
5236*4882a593Smuzhiyun 
5237*4882a593Smuzhiyun 				}
5238*4882a593Smuzhiyun 				srf_control |= bloom_idx << 2;
5239*4882a593Smuzhiyun 
5240*4882a593Smuzhiyun 				ret = wl_nan_bloom_create(&bp, &bloom_idx, bloom_len);
5241*4882a593Smuzhiyun 				if (unlikely(ret)) {
5242*4882a593Smuzhiyun 					WL_ERR(("%s: Bloom create failed\n", __FUNCTION__));
5243*4882a593Smuzhiyun 					goto fail;
5244*4882a593Smuzhiyun 				}
5245*4882a593Smuzhiyun 
5246*4882a593Smuzhiyun 				srftmp = cmd_data->mac_list.list;
5247*4882a593Smuzhiyun 				for (a = 0;
5248*4882a593Smuzhiyun 					a < cmd_data->mac_list.num_mac_addr; a++) {
5249*4882a593Smuzhiyun 					ret = bcm_bloom_add_member(bp, srftmp, ETHER_ADDR_LEN);
5250*4882a593Smuzhiyun 					if (unlikely(ret)) {
5251*4882a593Smuzhiyun 						WL_ERR(("%s: Cannot add to bloom filter\n",
5252*4882a593Smuzhiyun 								__FUNCTION__));
5253*4882a593Smuzhiyun 						goto fail;
5254*4882a593Smuzhiyun 					}
5255*4882a593Smuzhiyun 					srftmp += ETHER_ADDR_LEN;
5256*4882a593Smuzhiyun 				}
5257*4882a593Smuzhiyun 
5258*4882a593Smuzhiyun 				ret = memcpy_s(srf, NAN_SRF_CTRL_FIELD_LEN,
5259*4882a593Smuzhiyun 						&srf_control, NAN_SRF_CTRL_FIELD_LEN);
5260*4882a593Smuzhiyun 				if (ret != BCME_OK) {
5261*4882a593Smuzhiyun 					WL_ERR(("Failed to copy srf control\n"));
5262*4882a593Smuzhiyun 					goto fail;
5263*4882a593Smuzhiyun 				}
5264*4882a593Smuzhiyun 				ret = bcm_bloom_get_filter_data(bp, bloom_len,
5265*4882a593Smuzhiyun 						(srf + NAN_SRF_CTRL_FIELD_LEN),
5266*4882a593Smuzhiyun 						&bloom_size);
5267*4882a593Smuzhiyun 				if (unlikely(ret)) {
5268*4882a593Smuzhiyun 					WL_ERR(("%s: Cannot get filter data\n", __FUNCTION__));
5269*4882a593Smuzhiyun 					goto fail;
5270*4882a593Smuzhiyun 				}
5271*4882a593Smuzhiyun 				ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
5272*4882a593Smuzhiyun 						WL_NAN_XTLV_CFG_SR_FILTER, srf_ctrl_size,
5273*4882a593Smuzhiyun 						srf, BCM_XTLV_OPTION_ALIGN32);
5274*4882a593Smuzhiyun 				if (ret != BCME_OK) {
5275*4882a593Smuzhiyun 					WL_ERR(("Failed to pack SR FILTER data, ret = %d\n", ret));
5276*4882a593Smuzhiyun 					goto fail;
5277*4882a593Smuzhiyun 				}
5278*4882a593Smuzhiyun 			} else {
5279*4882a593Smuzhiyun 				WL_ERR(("Invalid SRF Type = %d !!!\n",
5280*4882a593Smuzhiyun 						cmd_data->srf_type));
5281*4882a593Smuzhiyun 				goto fail;
5282*4882a593Smuzhiyun 			}
5283*4882a593Smuzhiyun 		} else {
5284*4882a593Smuzhiyun 			WL_ERR(("Invalid MAC Addr/Too many mac addr = %d !!!\n",
5285*4882a593Smuzhiyun 					cmd_data->mac_list.num_mac_addr));
5286*4882a593Smuzhiyun 			goto fail;
5287*4882a593Smuzhiyun 		}
5288*4882a593Smuzhiyun 	}
5289*4882a593Smuzhiyun 
5290*4882a593Smuzhiyun 	if (cmd_data->rx_match.dlen) {
5291*4882a593Smuzhiyun 		WL_TRACE(("optional rx match filter is present, pack it\n"));
5292*4882a593Smuzhiyun 		ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
5293*4882a593Smuzhiyun 				WL_NAN_XTLV_CFG_MATCH_RX, cmd_data->rx_match.dlen,
5294*4882a593Smuzhiyun 				cmd_data->rx_match.data, BCM_XTLV_OPTION_ALIGN32);
5295*4882a593Smuzhiyun 		if (unlikely(ret)) {
5296*4882a593Smuzhiyun 			WL_ERR(("%s: failed on xtlv_pack for rx match filter\n", __func__));
5297*4882a593Smuzhiyun 			goto fail;
5298*4882a593Smuzhiyun 		}
5299*4882a593Smuzhiyun 	}
5300*4882a593Smuzhiyun 
5301*4882a593Smuzhiyun 	/* Security elements */
5302*4882a593Smuzhiyun 	if (cmd_data->csid) {
5303*4882a593Smuzhiyun 		WL_TRACE(("Cipher suite type is present, pack it\n"));
5304*4882a593Smuzhiyun 		ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
5305*4882a593Smuzhiyun 				WL_NAN_XTLV_CFG_SEC_CSID, sizeof(nan_sec_csid_e),
5306*4882a593Smuzhiyun 				(uint8*)&cmd_data->csid, BCM_XTLV_OPTION_ALIGN32);
5307*4882a593Smuzhiyun 		if (unlikely(ret)) {
5308*4882a593Smuzhiyun 			WL_ERR(("%s: fail to pack on csid\n", __FUNCTION__));
5309*4882a593Smuzhiyun 			goto fail;
5310*4882a593Smuzhiyun 		}
5311*4882a593Smuzhiyun 	}
5312*4882a593Smuzhiyun 
5313*4882a593Smuzhiyun 	if (cmd_data->ndp_cfg.security_cfg) {
5314*4882a593Smuzhiyun 		if ((cmd_data->key_type == NAN_SECURITY_KEY_INPUT_PMK) ||
5315*4882a593Smuzhiyun 			(cmd_data->key_type == NAN_SECURITY_KEY_INPUT_PASSPHRASE)) {
5316*4882a593Smuzhiyun 			if (cmd_data->key.data && cmd_data->key.dlen) {
5317*4882a593Smuzhiyun 				WL_TRACE(("optional pmk present, pack it\n"));
5318*4882a593Smuzhiyun 				ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
5319*4882a593Smuzhiyun 					WL_NAN_XTLV_CFG_SEC_PMK, cmd_data->key.dlen,
5320*4882a593Smuzhiyun 					cmd_data->key.data, BCM_XTLV_OPTION_ALIGN32);
5321*4882a593Smuzhiyun 				if (unlikely(ret)) {
5322*4882a593Smuzhiyun 					WL_ERR(("%s: fail to pack WL_NAN_XTLV_CFG_SEC_PMK\n",
5323*4882a593Smuzhiyun 						__FUNCTION__));
5324*4882a593Smuzhiyun 					goto fail;
5325*4882a593Smuzhiyun 				}
5326*4882a593Smuzhiyun 			}
5327*4882a593Smuzhiyun 		} else {
5328*4882a593Smuzhiyun 			WL_ERR(("Invalid security key type\n"));
5329*4882a593Smuzhiyun 			ret = BCME_BADARG;
5330*4882a593Smuzhiyun 			goto fail;
5331*4882a593Smuzhiyun 		}
5332*4882a593Smuzhiyun 	}
5333*4882a593Smuzhiyun 
5334*4882a593Smuzhiyun 	if (cmd_data->scid.data && cmd_data->scid.dlen) {
5335*4882a593Smuzhiyun 		WL_TRACE(("optional scid present, pack it\n"));
5336*4882a593Smuzhiyun 		ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size, WL_NAN_XTLV_CFG_SEC_SCID,
5337*4882a593Smuzhiyun 			cmd_data->scid.dlen, cmd_data->scid.data, BCM_XTLV_OPTION_ALIGN32);
5338*4882a593Smuzhiyun 		if (unlikely(ret)) {
5339*4882a593Smuzhiyun 			WL_ERR(("%s: fail to pack WL_NAN_XTLV_CFG_SEC_SCID\n", __FUNCTION__));
5340*4882a593Smuzhiyun 			goto fail;
5341*4882a593Smuzhiyun 		}
5342*4882a593Smuzhiyun 	}
5343*4882a593Smuzhiyun 
5344*4882a593Smuzhiyun 	if (cmd_data->sde_control_config) {
5345*4882a593Smuzhiyun 		ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
5346*4882a593Smuzhiyun 				WL_NAN_XTLV_SD_SDE_CONTROL,
5347*4882a593Smuzhiyun 				sizeof(uint16), (uint8*)&cmd_data->sde_control_flag,
5348*4882a593Smuzhiyun 				BCM_XTLV_OPTION_ALIGN32);
5349*4882a593Smuzhiyun 		if (ret != BCME_OK) {
5350*4882a593Smuzhiyun 			WL_ERR(("%s: fail to pack WL_NAN_XTLV_SD_SDE_CONTROL\n", __FUNCTION__));
5351*4882a593Smuzhiyun 			goto fail;
5352*4882a593Smuzhiyun 		}
5353*4882a593Smuzhiyun 	}
5354*4882a593Smuzhiyun 
5355*4882a593Smuzhiyun 	sub_cmd->len += (buflen_avail - *nan_buf_size);
5356*4882a593Smuzhiyun 
5357*4882a593Smuzhiyun fail:
5358*4882a593Smuzhiyun 	if (srf) {
5359*4882a593Smuzhiyun 		MFREE(cfg->osh, srf, srf_ctrl_size);
5360*4882a593Smuzhiyun 	}
5361*4882a593Smuzhiyun 
5362*4882a593Smuzhiyun 	if (srf_mac) {
5363*4882a593Smuzhiyun 		MFREE(cfg->osh, srf_mac, srf_size);
5364*4882a593Smuzhiyun 	}
5365*4882a593Smuzhiyun 	NAN_DBG_EXIT();
5366*4882a593Smuzhiyun 	return ret;
5367*4882a593Smuzhiyun }
5368*4882a593Smuzhiyun 
5369*4882a593Smuzhiyun static int
wl_cfgnan_aligned_data_size_of_opt_disc_params(uint16 * data_size,nan_discover_cmd_data_t * cmd_data)5370*4882a593Smuzhiyun wl_cfgnan_aligned_data_size_of_opt_disc_params(uint16 *data_size, nan_discover_cmd_data_t *cmd_data)
5371*4882a593Smuzhiyun {
5372*4882a593Smuzhiyun 	s32 ret = BCME_OK;
5373*4882a593Smuzhiyun 	if (cmd_data->svc_info.dlen)
5374*4882a593Smuzhiyun 		*data_size += ALIGN_SIZE(cmd_data->svc_info.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
5375*4882a593Smuzhiyun 	if (cmd_data->sde_svc_info.dlen)
5376*4882a593Smuzhiyun 		*data_size += ALIGN_SIZE(cmd_data->sde_svc_info.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
5377*4882a593Smuzhiyun 	if (cmd_data->tx_match.dlen)
5378*4882a593Smuzhiyun 		*data_size += ALIGN_SIZE(cmd_data->tx_match.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
5379*4882a593Smuzhiyun 	if (cmd_data->rx_match.dlen)
5380*4882a593Smuzhiyun 		*data_size += ALIGN_SIZE(cmd_data->rx_match.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
5381*4882a593Smuzhiyun 	if (cmd_data->use_srf) {
5382*4882a593Smuzhiyun 		if (cmd_data->srf_type == SRF_TYPE_SEQ_MAC_ADDR) {
5383*4882a593Smuzhiyun 			*data_size += (cmd_data->mac_list.num_mac_addr * ETHER_ADDR_LEN)
5384*4882a593Smuzhiyun 					+ NAN_SRF_CTRL_FIELD_LEN;
5385*4882a593Smuzhiyun 		} else { /* Bloom filter type */
5386*4882a593Smuzhiyun 			*data_size += NAN_BLOOM_LENGTH_DEFAULT + 1;
5387*4882a593Smuzhiyun 		}
5388*4882a593Smuzhiyun 		*data_size += ALIGN_SIZE(*data_size + NAN_XTLV_ID_LEN_SIZE, 4);
5389*4882a593Smuzhiyun 	}
5390*4882a593Smuzhiyun 	if (cmd_data->csid)
5391*4882a593Smuzhiyun 		*data_size +=  ALIGN_SIZE(sizeof(nan_sec_csid_e) + NAN_XTLV_ID_LEN_SIZE, 4);
5392*4882a593Smuzhiyun 	if (cmd_data->key.dlen)
5393*4882a593Smuzhiyun 		*data_size += ALIGN_SIZE(cmd_data->key.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
5394*4882a593Smuzhiyun 	if (cmd_data->scid.dlen)
5395*4882a593Smuzhiyun 		*data_size += ALIGN_SIZE(cmd_data->scid.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
5396*4882a593Smuzhiyun 	if (cmd_data->sde_control_config)
5397*4882a593Smuzhiyun 		*data_size += ALIGN_SIZE(sizeof(uint16) + NAN_XTLV_ID_LEN_SIZE, 4);
5398*4882a593Smuzhiyun 	if (cmd_data->life_count)
5399*4882a593Smuzhiyun 		*data_size += ALIGN_SIZE(sizeof(cmd_data->life_count) + NAN_XTLV_ID_LEN_SIZE, 4);
5400*4882a593Smuzhiyun 	return ret;
5401*4882a593Smuzhiyun }
5402*4882a593Smuzhiyun 
5403*4882a593Smuzhiyun static int
wl_cfgnan_aligned_data_size_of_opt_dp_params(struct bcm_cfg80211 * cfg,uint16 * data_size,nan_datapath_cmd_data_t * cmd_data)5404*4882a593Smuzhiyun wl_cfgnan_aligned_data_size_of_opt_dp_params(struct bcm_cfg80211 *cfg, uint16 *data_size,
5405*4882a593Smuzhiyun 	nan_datapath_cmd_data_t *cmd_data)
5406*4882a593Smuzhiyun {
5407*4882a593Smuzhiyun 	s32 ret = BCME_OK;
5408*4882a593Smuzhiyun 	if (cmd_data->svc_info.dlen) {
5409*4882a593Smuzhiyun 		*data_size += ALIGN_SIZE(cmd_data->svc_info.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
5410*4882a593Smuzhiyun 		/* When NDPE is enabled, adding this extra data_size to provide backward
5411*4882a593Smuzhiyun 		 * compatability for non-ndpe devices. Duplicating NDP specific info and sending it
5412*4882a593Smuzhiyun 		 * to FW in SD SVCINFO and NDPE TLV list as host doesn't know peer's NDPE capability
5413*4882a593Smuzhiyun 		 */
5414*4882a593Smuzhiyun 		if (cfg->nancfg->ndpe_enabled) {
5415*4882a593Smuzhiyun 			*data_size += ALIGN_SIZE(cmd_data->svc_info.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
5416*4882a593Smuzhiyun 		}
5417*4882a593Smuzhiyun 	}
5418*4882a593Smuzhiyun 	if (cmd_data->key.dlen)
5419*4882a593Smuzhiyun 		*data_size += ALIGN_SIZE(cmd_data->key.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
5420*4882a593Smuzhiyun 	if (cmd_data->csid)
5421*4882a593Smuzhiyun 		*data_size += ALIGN_SIZE(sizeof(nan_sec_csid_e) + NAN_XTLV_ID_LEN_SIZE, 4);
5422*4882a593Smuzhiyun 
5423*4882a593Smuzhiyun 	*data_size += ALIGN_SIZE(WL_NAN_SVC_HASH_LEN + NAN_XTLV_ID_LEN_SIZE, 4);
5424*4882a593Smuzhiyun 	return ret;
5425*4882a593Smuzhiyun }
5426*4882a593Smuzhiyun int
wl_cfgnan_svc_get_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,uint16 cmd_id,nan_discover_cmd_data_t * cmd_data)5427*4882a593Smuzhiyun wl_cfgnan_svc_get_handler(struct net_device *ndev,
5428*4882a593Smuzhiyun 	struct bcm_cfg80211 *cfg, uint16 cmd_id, nan_discover_cmd_data_t *cmd_data)
5429*4882a593Smuzhiyun {
5430*4882a593Smuzhiyun 	bcm_iov_batch_subcmd_t *sub_cmd = NULL;
5431*4882a593Smuzhiyun 	uint32 instance_id;
5432*4882a593Smuzhiyun 	s32 ret = BCME_OK;
5433*4882a593Smuzhiyun 	bcm_iov_batch_buf_t *nan_buf = NULL;
5434*4882a593Smuzhiyun 
5435*4882a593Smuzhiyun 	uint8 *resp_buf = NULL;
5436*4882a593Smuzhiyun 	uint16 data_size = WL_NAN_OBUF_DATA_OFFSET + sizeof(instance_id);
5437*4882a593Smuzhiyun 
5438*4882a593Smuzhiyun 	NAN_DBG_ENTER();
5439*4882a593Smuzhiyun 
5440*4882a593Smuzhiyun 	nan_buf = MALLOCZ(cfg->osh, data_size);
5441*4882a593Smuzhiyun 	if (!nan_buf) {
5442*4882a593Smuzhiyun 		WL_ERR(("%s: memory allocation failed\n", __func__));
5443*4882a593Smuzhiyun 		ret = BCME_NOMEM;
5444*4882a593Smuzhiyun 		goto fail;
5445*4882a593Smuzhiyun 	}
5446*4882a593Smuzhiyun 
5447*4882a593Smuzhiyun 	resp_buf = MALLOCZ(cfg->osh, NAN_IOCTL_BUF_SIZE_LARGE);
5448*4882a593Smuzhiyun 	if (!resp_buf) {
5449*4882a593Smuzhiyun 		WL_ERR(("%s: memory allocation failed\n", __func__));
5450*4882a593Smuzhiyun 		ret = BCME_NOMEM;
5451*4882a593Smuzhiyun 		goto fail;
5452*4882a593Smuzhiyun 	}
5453*4882a593Smuzhiyun 	nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
5454*4882a593Smuzhiyun 	nan_buf->count = 1;
5455*4882a593Smuzhiyun 	/* check if service is present */
5456*4882a593Smuzhiyun 	nan_buf->is_set = false;
5457*4882a593Smuzhiyun 	sub_cmd = (bcm_iov_batch_subcmd_t*)(&nan_buf->cmds[0]);
5458*4882a593Smuzhiyun 	if (cmd_id == WL_NAN_CMD_SD_PUBLISH) {
5459*4882a593Smuzhiyun 		instance_id = cmd_data->pub_id;
5460*4882a593Smuzhiyun 	} else if (cmd_id == WL_NAN_CMD_SD_SUBSCRIBE) {
5461*4882a593Smuzhiyun 		instance_id = cmd_data->sub_id;
5462*4882a593Smuzhiyun 	}  else {
5463*4882a593Smuzhiyun 		ret = BCME_USAGE_ERROR;
5464*4882a593Smuzhiyun 		WL_ERR(("wrong command id = %u\n", cmd_id));
5465*4882a593Smuzhiyun 		goto fail;
5466*4882a593Smuzhiyun 	}
5467*4882a593Smuzhiyun 	/* Fill the sub_command block */
5468*4882a593Smuzhiyun 	sub_cmd->id = htod16(cmd_id);
5469*4882a593Smuzhiyun 	sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(instance_id);
5470*4882a593Smuzhiyun 	sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
5471*4882a593Smuzhiyun 
5472*4882a593Smuzhiyun 	ret = memcpy_s(sub_cmd->data, (data_size - WL_NAN_OBUF_DATA_OFFSET),
5473*4882a593Smuzhiyun 			&instance_id, sizeof(instance_id));
5474*4882a593Smuzhiyun 	if (ret != BCME_OK) {
5475*4882a593Smuzhiyun 		WL_ERR(("Failed to copy instance id, ret = %d\n", ret));
5476*4882a593Smuzhiyun 		goto fail;
5477*4882a593Smuzhiyun 	}
5478*4882a593Smuzhiyun 
5479*4882a593Smuzhiyun 	ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, data_size,
5480*4882a593Smuzhiyun 			&(cmd_data->status), resp_buf, NAN_IOCTL_BUF_SIZE_LARGE);
5481*4882a593Smuzhiyun 
5482*4882a593Smuzhiyun 	if (unlikely(ret) || unlikely(cmd_data->status)) {
5483*4882a593Smuzhiyun 		WL_ERR(("nan svc check failed ret = %d status = %d\n", ret, cmd_data->status));
5484*4882a593Smuzhiyun 		goto fail;
5485*4882a593Smuzhiyun 	} else {
5486*4882a593Smuzhiyun 		WL_DBG(("nan svc check successful..proceed to update\n"));
5487*4882a593Smuzhiyun 	}
5488*4882a593Smuzhiyun 
5489*4882a593Smuzhiyun fail:
5490*4882a593Smuzhiyun 	if (nan_buf) {
5491*4882a593Smuzhiyun 		MFREE(cfg->osh, nan_buf, data_size);
5492*4882a593Smuzhiyun 	}
5493*4882a593Smuzhiyun 
5494*4882a593Smuzhiyun 	if (resp_buf) {
5495*4882a593Smuzhiyun 		MFREE(cfg->osh, resp_buf, NAN_IOCTL_BUF_SIZE_LARGE);
5496*4882a593Smuzhiyun 	}
5497*4882a593Smuzhiyun 	NAN_DBG_EXIT();
5498*4882a593Smuzhiyun 	return ret;
5499*4882a593Smuzhiyun 
5500*4882a593Smuzhiyun }
5501*4882a593Smuzhiyun 
5502*4882a593Smuzhiyun int
wl_cfgnan_svc_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,uint16 cmd_id,nan_discover_cmd_data_t * cmd_data)5503*4882a593Smuzhiyun wl_cfgnan_svc_handler(struct net_device *ndev,
5504*4882a593Smuzhiyun 	struct bcm_cfg80211 *cfg, uint16 cmd_id, nan_discover_cmd_data_t *cmd_data)
5505*4882a593Smuzhiyun {
5506*4882a593Smuzhiyun 	s32 ret = BCME_OK;
5507*4882a593Smuzhiyun 	bcm_iov_batch_buf_t *nan_buf = NULL;
5508*4882a593Smuzhiyun 	uint16 nan_buf_size;
5509*4882a593Smuzhiyun 	uint8 *resp_buf = NULL;
5510*4882a593Smuzhiyun 	/* Considering fixed params */
5511*4882a593Smuzhiyun 	uint16 data_size = WL_NAN_OBUF_DATA_OFFSET +
5512*4882a593Smuzhiyun 		OFFSETOF(wl_nan_sd_params_t, optional[0]);
5513*4882a593Smuzhiyun 
5514*4882a593Smuzhiyun 	if (cmd_data->svc_update) {
5515*4882a593Smuzhiyun 		ret = wl_cfgnan_svc_get_handler(ndev, cfg, cmd_id, cmd_data);
5516*4882a593Smuzhiyun 		if (ret != BCME_OK) {
5517*4882a593Smuzhiyun 			WL_ERR(("Failed to update svc handler, ret = %d\n", ret));
5518*4882a593Smuzhiyun 			goto fail;
5519*4882a593Smuzhiyun 		} else {
5520*4882a593Smuzhiyun 			/* Ignoring any other svc get error */
5521*4882a593Smuzhiyun 			if (cmd_data->status == WL_NAN_E_BAD_INSTANCE) {
5522*4882a593Smuzhiyun 				WL_ERR(("Bad instance status, failed to update svc handler\n"));
5523*4882a593Smuzhiyun 				goto fail;
5524*4882a593Smuzhiyun 			}
5525*4882a593Smuzhiyun 		}
5526*4882a593Smuzhiyun 	}
5527*4882a593Smuzhiyun 
5528*4882a593Smuzhiyun 	ret = wl_cfgnan_aligned_data_size_of_opt_disc_params(&data_size, cmd_data);
5529*4882a593Smuzhiyun 	if (unlikely(ret)) {
5530*4882a593Smuzhiyun 		WL_ERR(("Failed to get alligned size of optional params\n"));
5531*4882a593Smuzhiyun 		goto fail;
5532*4882a593Smuzhiyun 	}
5533*4882a593Smuzhiyun 	nan_buf_size = data_size;
5534*4882a593Smuzhiyun 	NAN_DBG_ENTER();
5535*4882a593Smuzhiyun 
5536*4882a593Smuzhiyun 	nan_buf = MALLOCZ(cfg->osh, data_size);
5537*4882a593Smuzhiyun 	if (!nan_buf) {
5538*4882a593Smuzhiyun 		WL_ERR(("%s: memory allocation failed\n", __func__));
5539*4882a593Smuzhiyun 		ret = BCME_NOMEM;
5540*4882a593Smuzhiyun 		goto fail;
5541*4882a593Smuzhiyun 	}
5542*4882a593Smuzhiyun 
5543*4882a593Smuzhiyun 	resp_buf = MALLOCZ(cfg->osh, data_size + NAN_IOVAR_NAME_SIZE);
5544*4882a593Smuzhiyun 	if (!resp_buf) {
5545*4882a593Smuzhiyun 		WL_ERR(("%s: memory allocation failed\n", __func__));
5546*4882a593Smuzhiyun 		ret = BCME_NOMEM;
5547*4882a593Smuzhiyun 		goto fail;
5548*4882a593Smuzhiyun 	}
5549*4882a593Smuzhiyun 	nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
5550*4882a593Smuzhiyun 	nan_buf->count = 0;
5551*4882a593Smuzhiyun 	nan_buf->is_set = true;
5552*4882a593Smuzhiyun 
5553*4882a593Smuzhiyun 	ret = wl_cfgnan_sd_params_handler(ndev, cmd_data, cmd_id,
5554*4882a593Smuzhiyun 			&nan_buf->cmds[0], &nan_buf_size);
5555*4882a593Smuzhiyun 	if (unlikely(ret)) {
5556*4882a593Smuzhiyun 		WL_ERR((" Service discovery params handler failed, ret = %d\n", ret));
5557*4882a593Smuzhiyun 		goto fail;
5558*4882a593Smuzhiyun 	}
5559*4882a593Smuzhiyun 
5560*4882a593Smuzhiyun 	nan_buf->count++;
5561*4882a593Smuzhiyun 	ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, data_size,
5562*4882a593Smuzhiyun 			&(cmd_data->status), resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
5563*4882a593Smuzhiyun 	if (cmd_data->svc_update && (cmd_data->status == BCME_DATA_NOTFOUND)) {
5564*4882a593Smuzhiyun 		/* return OK if update tlv data is not present
5565*4882a593Smuzhiyun 		* which means nothing to update
5566*4882a593Smuzhiyun 		*/
5567*4882a593Smuzhiyun 		cmd_data->status = BCME_OK;
5568*4882a593Smuzhiyun 	}
5569*4882a593Smuzhiyun 	if (unlikely(ret) || unlikely(cmd_data->status)) {
5570*4882a593Smuzhiyun 		WL_ERR(("nan svc failed ret = %d status = %d\n", ret, cmd_data->status));
5571*4882a593Smuzhiyun 		goto fail;
5572*4882a593Smuzhiyun 	} else {
5573*4882a593Smuzhiyun 		WL_DBG(("nan svc successful\n"));
5574*4882a593Smuzhiyun #ifdef WL_NAN_DISC_CACHE
5575*4882a593Smuzhiyun 		ret = wl_cfgnan_cache_svc_info(cfg, cmd_data, cmd_id, cmd_data->svc_update);
5576*4882a593Smuzhiyun 		if (ret < 0) {
5577*4882a593Smuzhiyun 			WL_ERR(("%s: fail to cache svc info, ret=%d\n",
5578*4882a593Smuzhiyun 				__FUNCTION__, ret));
5579*4882a593Smuzhiyun 			goto fail;
5580*4882a593Smuzhiyun 		}
5581*4882a593Smuzhiyun #endif /* WL_NAN_DISC_CACHE */
5582*4882a593Smuzhiyun 	}
5583*4882a593Smuzhiyun 
5584*4882a593Smuzhiyun fail:
5585*4882a593Smuzhiyun 	if (nan_buf) {
5586*4882a593Smuzhiyun 		MFREE(cfg->osh, nan_buf, data_size);
5587*4882a593Smuzhiyun 	}
5588*4882a593Smuzhiyun 
5589*4882a593Smuzhiyun 	if (resp_buf) {
5590*4882a593Smuzhiyun 		MFREE(cfg->osh, resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
5591*4882a593Smuzhiyun 	}
5592*4882a593Smuzhiyun 	NAN_DBG_EXIT();
5593*4882a593Smuzhiyun 	return ret;
5594*4882a593Smuzhiyun }
5595*4882a593Smuzhiyun 
5596*4882a593Smuzhiyun int
wl_cfgnan_publish_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_discover_cmd_data_t * cmd_data)5597*4882a593Smuzhiyun wl_cfgnan_publish_handler(struct net_device *ndev,
5598*4882a593Smuzhiyun 	struct bcm_cfg80211 *cfg, nan_discover_cmd_data_t *cmd_data)
5599*4882a593Smuzhiyun {
5600*4882a593Smuzhiyun 	int ret = BCME_OK;
5601*4882a593Smuzhiyun 
5602*4882a593Smuzhiyun 	NAN_DBG_ENTER();
5603*4882a593Smuzhiyun 	NAN_MUTEX_LOCK();
5604*4882a593Smuzhiyun 	/*
5605*4882a593Smuzhiyun 	 * proceed only if mandatory arguments are present - subscriber id,
5606*4882a593Smuzhiyun 	 * service hash
5607*4882a593Smuzhiyun 	 */
5608*4882a593Smuzhiyun 	if ((!cmd_data->pub_id) || (!cmd_data->svc_hash.data) ||
5609*4882a593Smuzhiyun 		(!cmd_data->svc_hash.dlen)) {
5610*4882a593Smuzhiyun 		WL_ERR(("mandatory arguments are not present\n"));
5611*4882a593Smuzhiyun 		ret = BCME_BADARG;
5612*4882a593Smuzhiyun 		goto fail;
5613*4882a593Smuzhiyun 	}
5614*4882a593Smuzhiyun 
5615*4882a593Smuzhiyun 	ret = wl_cfgnan_svc_handler(ndev, cfg, WL_NAN_CMD_SD_PUBLISH, cmd_data);
5616*4882a593Smuzhiyun 	if (ret < 0) {
5617*4882a593Smuzhiyun 		WL_ERR(("%s: fail to handle pub, ret=%d\n", __FUNCTION__, ret));
5618*4882a593Smuzhiyun 		goto fail;
5619*4882a593Smuzhiyun 	}
5620*4882a593Smuzhiyun 	WL_INFORM_MEM(("[NAN] Service published for instance id:%d is_update %d\n",
5621*4882a593Smuzhiyun 		cmd_data->pub_id, cmd_data->svc_update));
5622*4882a593Smuzhiyun 
5623*4882a593Smuzhiyun fail:
5624*4882a593Smuzhiyun 	NAN_MUTEX_UNLOCK();
5625*4882a593Smuzhiyun 	NAN_DBG_EXIT();
5626*4882a593Smuzhiyun 	return ret;
5627*4882a593Smuzhiyun }
5628*4882a593Smuzhiyun 
5629*4882a593Smuzhiyun int
wl_cfgnan_subscribe_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_discover_cmd_data_t * cmd_data)5630*4882a593Smuzhiyun wl_cfgnan_subscribe_handler(struct net_device *ndev,
5631*4882a593Smuzhiyun 	struct bcm_cfg80211 *cfg, nan_discover_cmd_data_t *cmd_data)
5632*4882a593Smuzhiyun {
5633*4882a593Smuzhiyun 	int ret = BCME_OK;
5634*4882a593Smuzhiyun #ifdef WL_NAN_DISC_CACHE
5635*4882a593Smuzhiyun 	nan_svc_info_t *svc_info;
5636*4882a593Smuzhiyun #ifdef RTT_SUPPORT
5637*4882a593Smuzhiyun 	uint8 upd_ranging_required;
5638*4882a593Smuzhiyun #endif /* RTT_SUPPORT */
5639*4882a593Smuzhiyun #endif /* WL_NAN_DISC_CACHE */
5640*4882a593Smuzhiyun 
5641*4882a593Smuzhiyun #ifdef RTT_SUPPORT
5642*4882a593Smuzhiyun #ifdef RTT_GEOFENCE_CONT
5643*4882a593Smuzhiyun 	dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
5644*4882a593Smuzhiyun 	rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
5645*4882a593Smuzhiyun #endif /* RTT_GEOFENCE_CONT */
5646*4882a593Smuzhiyun #endif /* RTT_SUPPORT */
5647*4882a593Smuzhiyun 
5648*4882a593Smuzhiyun 	NAN_DBG_ENTER();
5649*4882a593Smuzhiyun 	NAN_MUTEX_LOCK();
5650*4882a593Smuzhiyun 
5651*4882a593Smuzhiyun 	/*
5652*4882a593Smuzhiyun 	 * proceed only if mandatory arguments are present - subscriber id,
5653*4882a593Smuzhiyun 	 * service hash
5654*4882a593Smuzhiyun 	 */
5655*4882a593Smuzhiyun 	if ((!cmd_data->sub_id) || (!cmd_data->svc_hash.data) ||
5656*4882a593Smuzhiyun 		(!cmd_data->svc_hash.dlen)) {
5657*4882a593Smuzhiyun 		WL_ERR(("mandatory arguments are not present\n"));
5658*4882a593Smuzhiyun 		ret = BCME_BADARG;
5659*4882a593Smuzhiyun 		goto fail;
5660*4882a593Smuzhiyun 	}
5661*4882a593Smuzhiyun 
5662*4882a593Smuzhiyun 	/* Check for ranging sessions if any */
5663*4882a593Smuzhiyun 	if (cmd_data->svc_update) {
5664*4882a593Smuzhiyun #ifdef WL_NAN_DISC_CACHE
5665*4882a593Smuzhiyun 		svc_info = wl_cfgnan_get_svc_inst(cfg, cmd_data->sub_id, 0);
5666*4882a593Smuzhiyun 		if (svc_info) {
5667*4882a593Smuzhiyun #ifdef RTT_SUPPORT
5668*4882a593Smuzhiyun 			wl_cfgnan_clear_svc_from_all_ranging_inst(cfg, cmd_data->sub_id);
5669*4882a593Smuzhiyun 			/* terminate ranging sessions for this svc, avoid clearing svc cache */
5670*4882a593Smuzhiyun 			wl_cfgnan_terminate_all_obsolete_ranging_sessions(cfg);
5671*4882a593Smuzhiyun 			/* Attempt RTT for current geofence target */
5672*4882a593Smuzhiyun 			wl_cfgnan_reset_geofence_ranging(cfg, NULL,
5673*4882a593Smuzhiyun 				RTT_SCHED_RNG_TERM_SUB_SVC_UPD, TRUE);
5674*4882a593Smuzhiyun 			WL_DBG(("Ranging sessions handled for svc update\n"));
5675*4882a593Smuzhiyun 			upd_ranging_required = !!(cmd_data->sde_control_flag &
5676*4882a593Smuzhiyun 					NAN_SDE_CF_RANGING_REQUIRED);
5677*4882a593Smuzhiyun 			if ((svc_info->ranging_required ^ upd_ranging_required) ||
5678*4882a593Smuzhiyun 					(svc_info->ingress_limit != cmd_data->ingress_limit) ||
5679*4882a593Smuzhiyun 					(svc_info->egress_limit != cmd_data->egress_limit)) {
5680*4882a593Smuzhiyun 				/* Clear cache info in Firmware */
5681*4882a593Smuzhiyun 				ret = wl_cfgnan_clear_disc_cache(cfg, cmd_data->sub_id);
5682*4882a593Smuzhiyun 				if (ret != BCME_OK) {
5683*4882a593Smuzhiyun 					WL_ERR(("couldn't send clear cache to FW \n"));
5684*4882a593Smuzhiyun 					goto fail;
5685*4882a593Smuzhiyun 				}
5686*4882a593Smuzhiyun 				/* Invalidate local cache info */
5687*4882a593Smuzhiyun 				wl_cfgnan_remove_disc_result(cfg, cmd_data->sub_id);
5688*4882a593Smuzhiyun 			}
5689*4882a593Smuzhiyun #endif /* RTT_SUPPORT */
5690*4882a593Smuzhiyun 		}
5691*4882a593Smuzhiyun #endif /* WL_NAN_DISC_CACHE */
5692*4882a593Smuzhiyun 	}
5693*4882a593Smuzhiyun 
5694*4882a593Smuzhiyun #ifdef RTT_SUPPORT
5695*4882a593Smuzhiyun #ifdef RTT_GEOFENCE_CONT
5696*4882a593Smuzhiyun 	/* Override ranging Indication */
5697*4882a593Smuzhiyun 	if (rtt_status->geofence_cfg.geofence_cont) {
5698*4882a593Smuzhiyun 		if (cmd_data->ranging_indication !=
5699*4882a593Smuzhiyun 				NAN_RANGE_INDICATION_NONE) {
5700*4882a593Smuzhiyun 			cmd_data->ranging_indication = NAN_RANGE_INDICATION_CONT;
5701*4882a593Smuzhiyun 		}
5702*4882a593Smuzhiyun 	}
5703*4882a593Smuzhiyun #endif /* RTT_GEOFENCE_CONT */
5704*4882a593Smuzhiyun #endif /* RTT_SUPPORT */
5705*4882a593Smuzhiyun 	ret = wl_cfgnan_svc_handler(ndev, cfg, WL_NAN_CMD_SD_SUBSCRIBE, cmd_data);
5706*4882a593Smuzhiyun 	if (ret < 0) {
5707*4882a593Smuzhiyun 		WL_ERR(("%s: fail to handle svc, ret=%d\n", __FUNCTION__, ret));
5708*4882a593Smuzhiyun 		goto fail;
5709*4882a593Smuzhiyun 	}
5710*4882a593Smuzhiyun 	WL_INFORM_MEM(("[NAN] Service subscribed for instance id:%d is_update %d\n",
5711*4882a593Smuzhiyun 		cmd_data->sub_id, cmd_data->svc_update));
5712*4882a593Smuzhiyun 
5713*4882a593Smuzhiyun fail:
5714*4882a593Smuzhiyun 	NAN_MUTEX_UNLOCK();
5715*4882a593Smuzhiyun 	NAN_DBG_EXIT();
5716*4882a593Smuzhiyun 	return ret;
5717*4882a593Smuzhiyun }
5718*4882a593Smuzhiyun 
5719*4882a593Smuzhiyun static int
wl_cfgnan_cancel_handler(nan_discover_cmd_data_t * cmd_data,uint16 cmd_id,void * p_buf,uint16 * nan_buf_size)5720*4882a593Smuzhiyun wl_cfgnan_cancel_handler(nan_discover_cmd_data_t *cmd_data,
5721*4882a593Smuzhiyun 	uint16 cmd_id, void *p_buf, uint16 *nan_buf_size)
5722*4882a593Smuzhiyun {
5723*4882a593Smuzhiyun 	s32 ret = BCME_OK;
5724*4882a593Smuzhiyun 
5725*4882a593Smuzhiyun 	NAN_DBG_ENTER();
5726*4882a593Smuzhiyun 
5727*4882a593Smuzhiyun 	if (p_buf != NULL) {
5728*4882a593Smuzhiyun 		bcm_iov_batch_subcmd_t *sub_cmd = (bcm_iov_batch_subcmd_t*)(p_buf);
5729*4882a593Smuzhiyun 		wl_nan_instance_id_t instance_id;
5730*4882a593Smuzhiyun 
5731*4882a593Smuzhiyun 		if (cmd_id == WL_NAN_CMD_SD_CANCEL_PUBLISH) {
5732*4882a593Smuzhiyun 			instance_id = cmd_data->pub_id;
5733*4882a593Smuzhiyun 		} else if (cmd_id == WL_NAN_CMD_SD_CANCEL_SUBSCRIBE) {
5734*4882a593Smuzhiyun 			instance_id = cmd_data->sub_id;
5735*4882a593Smuzhiyun 		}  else {
5736*4882a593Smuzhiyun 			ret = BCME_USAGE_ERROR;
5737*4882a593Smuzhiyun 			WL_ERR(("wrong command id = %u\n", cmd_id));
5738*4882a593Smuzhiyun 			goto fail;
5739*4882a593Smuzhiyun 		}
5740*4882a593Smuzhiyun 
5741*4882a593Smuzhiyun 		/* Fill the sub_command block */
5742*4882a593Smuzhiyun 		sub_cmd->id = htod16(cmd_id);
5743*4882a593Smuzhiyun 		sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(instance_id);
5744*4882a593Smuzhiyun 		sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
5745*4882a593Smuzhiyun 		ret = memcpy_s(sub_cmd->data, *nan_buf_size,
5746*4882a593Smuzhiyun 				&instance_id, sizeof(instance_id));
5747*4882a593Smuzhiyun 		if (ret != BCME_OK) {
5748*4882a593Smuzhiyun 			WL_ERR(("Failed to copy instance id, ret = %d\n", ret));
5749*4882a593Smuzhiyun 			goto fail;
5750*4882a593Smuzhiyun 		}
5751*4882a593Smuzhiyun 		/* adjust iov data len to the end of last data record */
5752*4882a593Smuzhiyun 		*nan_buf_size -= (sub_cmd->len +
5753*4882a593Smuzhiyun 				OFFSETOF(bcm_iov_batch_subcmd_t, u.options));
5754*4882a593Smuzhiyun 		WL_INFORM_MEM(("[NAN] Service with instance id:%d cancelled\n", instance_id));
5755*4882a593Smuzhiyun 	} else {
5756*4882a593Smuzhiyun 		WL_ERR(("nan_iov_buf is NULL\n"));
5757*4882a593Smuzhiyun 		ret = BCME_ERROR;
5758*4882a593Smuzhiyun 		goto fail;
5759*4882a593Smuzhiyun 	}
5760*4882a593Smuzhiyun 
5761*4882a593Smuzhiyun fail:
5762*4882a593Smuzhiyun 	NAN_DBG_EXIT();
5763*4882a593Smuzhiyun 	return ret;
5764*4882a593Smuzhiyun }
5765*4882a593Smuzhiyun 
5766*4882a593Smuzhiyun int
wl_cfgnan_cancel_pub_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_discover_cmd_data_t * cmd_data)5767*4882a593Smuzhiyun wl_cfgnan_cancel_pub_handler(struct net_device *ndev,
5768*4882a593Smuzhiyun 	struct bcm_cfg80211 *cfg, nan_discover_cmd_data_t *cmd_data)
5769*4882a593Smuzhiyun {
5770*4882a593Smuzhiyun 	bcm_iov_batch_buf_t *nan_buf = NULL;
5771*4882a593Smuzhiyun 	s32 ret = BCME_OK;
5772*4882a593Smuzhiyun 	uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
5773*4882a593Smuzhiyun 	uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
5774*4882a593Smuzhiyun 
5775*4882a593Smuzhiyun 	NAN_DBG_ENTER();
5776*4882a593Smuzhiyun 	NAN_MUTEX_LOCK();
5777*4882a593Smuzhiyun 
5778*4882a593Smuzhiyun 	nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
5779*4882a593Smuzhiyun 	if (!nan_buf) {
5780*4882a593Smuzhiyun 		WL_ERR(("%s: memory allocation failed\n", __func__));
5781*4882a593Smuzhiyun 		ret = BCME_NOMEM;
5782*4882a593Smuzhiyun 		goto fail;
5783*4882a593Smuzhiyun 	}
5784*4882a593Smuzhiyun 
5785*4882a593Smuzhiyun 	nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
5786*4882a593Smuzhiyun 	nan_buf->count = 0;
5787*4882a593Smuzhiyun 	nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
5788*4882a593Smuzhiyun 
5789*4882a593Smuzhiyun 	/* proceed only if mandatory argument is present - publisher id */
5790*4882a593Smuzhiyun 	if (!cmd_data->pub_id) {
5791*4882a593Smuzhiyun 		WL_ERR(("mandatory argument is not present\n"));
5792*4882a593Smuzhiyun 		ret = BCME_BADARG;
5793*4882a593Smuzhiyun 		goto fail;
5794*4882a593Smuzhiyun 	}
5795*4882a593Smuzhiyun 
5796*4882a593Smuzhiyun #ifdef WL_NAN_DISC_CACHE
5797*4882a593Smuzhiyun 	wl_cfgnan_clear_svc_cache(cfg, cmd_data->pub_id);
5798*4882a593Smuzhiyun #endif /* WL_NAN_DISC_CACHE */
5799*4882a593Smuzhiyun 	ret = wl_cfgnan_cancel_handler(cmd_data, WL_NAN_CMD_SD_CANCEL_PUBLISH,
5800*4882a593Smuzhiyun 			&nan_buf->cmds[0], &nan_buf_size);
5801*4882a593Smuzhiyun 	if (unlikely(ret)) {
5802*4882a593Smuzhiyun 		WL_ERR(("cancel publish failed\n"));
5803*4882a593Smuzhiyun 		goto fail;
5804*4882a593Smuzhiyun 	}
5805*4882a593Smuzhiyun 	nan_buf->is_set = true;
5806*4882a593Smuzhiyun 	nan_buf->count++;
5807*4882a593Smuzhiyun 
5808*4882a593Smuzhiyun 	bzero(resp_buf, sizeof(resp_buf));
5809*4882a593Smuzhiyun 	ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size,
5810*4882a593Smuzhiyun 			&(cmd_data->status),
5811*4882a593Smuzhiyun 			(void*)resp_buf, NAN_IOCTL_BUF_SIZE);
5812*4882a593Smuzhiyun 	if (unlikely(ret) || unlikely(cmd_data->status)) {
5813*4882a593Smuzhiyun 		WL_ERR(("nan cancel publish failed ret = %d status = %d\n",
5814*4882a593Smuzhiyun 			ret, cmd_data->status));
5815*4882a593Smuzhiyun 		goto fail;
5816*4882a593Smuzhiyun 	}
5817*4882a593Smuzhiyun 	WL_DBG(("nan cancel publish successfull\n"));
5818*4882a593Smuzhiyun 	wl_cfgnan_remove_inst_id(cfg, cmd_data->pub_id);
5819*4882a593Smuzhiyun fail:
5820*4882a593Smuzhiyun 	if (nan_buf) {
5821*4882a593Smuzhiyun 		MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
5822*4882a593Smuzhiyun 	}
5823*4882a593Smuzhiyun 
5824*4882a593Smuzhiyun 	NAN_MUTEX_UNLOCK();
5825*4882a593Smuzhiyun 	NAN_DBG_EXIT();
5826*4882a593Smuzhiyun 	return ret;
5827*4882a593Smuzhiyun }
5828*4882a593Smuzhiyun 
5829*4882a593Smuzhiyun int
wl_cfgnan_cancel_sub_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_discover_cmd_data_t * cmd_data)5830*4882a593Smuzhiyun wl_cfgnan_cancel_sub_handler(struct net_device *ndev,
5831*4882a593Smuzhiyun 	struct bcm_cfg80211 *cfg, nan_discover_cmd_data_t *cmd_data)
5832*4882a593Smuzhiyun {
5833*4882a593Smuzhiyun 	bcm_iov_batch_buf_t *nan_buf = NULL;
5834*4882a593Smuzhiyun 	s32 ret = BCME_OK;
5835*4882a593Smuzhiyun 	uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
5836*4882a593Smuzhiyun 	uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
5837*4882a593Smuzhiyun 
5838*4882a593Smuzhiyun 	NAN_DBG_ENTER();
5839*4882a593Smuzhiyun 	NAN_MUTEX_LOCK();
5840*4882a593Smuzhiyun 
5841*4882a593Smuzhiyun 	nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
5842*4882a593Smuzhiyun 	if (!nan_buf) {
5843*4882a593Smuzhiyun 		WL_ERR(("%s: memory allocation failed\n", __func__));
5844*4882a593Smuzhiyun 		ret = BCME_NOMEM;
5845*4882a593Smuzhiyun 		goto fail;
5846*4882a593Smuzhiyun 	}
5847*4882a593Smuzhiyun 
5848*4882a593Smuzhiyun 	nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
5849*4882a593Smuzhiyun 	nan_buf->count = 0;
5850*4882a593Smuzhiyun 	nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
5851*4882a593Smuzhiyun 
5852*4882a593Smuzhiyun 	/* proceed only if mandatory argument is present - subscriber id */
5853*4882a593Smuzhiyun 	if (!cmd_data->sub_id) {
5854*4882a593Smuzhiyun 		WL_ERR(("mandatory argument is not present\n"));
5855*4882a593Smuzhiyun 		ret = BCME_BADARG;
5856*4882a593Smuzhiyun 		goto fail;
5857*4882a593Smuzhiyun 	}
5858*4882a593Smuzhiyun 
5859*4882a593Smuzhiyun #ifdef WL_NAN_DISC_CACHE
5860*4882a593Smuzhiyun #ifdef RTT_SUPPORT
5861*4882a593Smuzhiyun 	/* terminate ranging sessions for this svc */
5862*4882a593Smuzhiyun 	wl_cfgnan_clear_svc_from_all_ranging_inst(cfg, cmd_data->sub_id);
5863*4882a593Smuzhiyun 	wl_cfgnan_terminate_all_obsolete_ranging_sessions(cfg);
5864*4882a593Smuzhiyun 	wl_cfgnan_reset_geofence_ranging(cfg, NULL,
5865*4882a593Smuzhiyun 		RTT_SCHED_RNG_TERM_SUB_SVC_CANCEL, TRUE);
5866*4882a593Smuzhiyun #endif /* RTT_SUPPORT */
5867*4882a593Smuzhiyun 	/* clear svc cache for the service */
5868*4882a593Smuzhiyun 	wl_cfgnan_clear_svc_cache(cfg, cmd_data->sub_id);
5869*4882a593Smuzhiyun 	wl_cfgnan_remove_disc_result(cfg, cmd_data->sub_id);
5870*4882a593Smuzhiyun #endif /* WL_NAN_DISC_CACHE */
5871*4882a593Smuzhiyun 
5872*4882a593Smuzhiyun 	ret = wl_cfgnan_cancel_handler(cmd_data, WL_NAN_CMD_SD_CANCEL_SUBSCRIBE,
5873*4882a593Smuzhiyun 			&nan_buf->cmds[0], &nan_buf_size);
5874*4882a593Smuzhiyun 	if (unlikely(ret)) {
5875*4882a593Smuzhiyun 		WL_ERR(("cancel subscribe failed\n"));
5876*4882a593Smuzhiyun 		goto fail;
5877*4882a593Smuzhiyun 	}
5878*4882a593Smuzhiyun 	nan_buf->is_set = true;
5879*4882a593Smuzhiyun 	nan_buf->count++;
5880*4882a593Smuzhiyun 
5881*4882a593Smuzhiyun 	bzero(resp_buf, sizeof(resp_buf));
5882*4882a593Smuzhiyun 	ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size,
5883*4882a593Smuzhiyun 			&(cmd_data->status),
5884*4882a593Smuzhiyun 			(void*)resp_buf, NAN_IOCTL_BUF_SIZE);
5885*4882a593Smuzhiyun 	if (unlikely(ret) || unlikely(cmd_data->status)) {
5886*4882a593Smuzhiyun 		WL_ERR(("nan cancel subscribe failed ret = %d status = %d\n",
5887*4882a593Smuzhiyun 			ret, cmd_data->status));
5888*4882a593Smuzhiyun 		goto fail;
5889*4882a593Smuzhiyun 	}
5890*4882a593Smuzhiyun 	WL_DBG(("subscribe cancel successfull\n"));
5891*4882a593Smuzhiyun 	wl_cfgnan_remove_inst_id(cfg, cmd_data->sub_id);
5892*4882a593Smuzhiyun fail:
5893*4882a593Smuzhiyun 	if (nan_buf) {
5894*4882a593Smuzhiyun 		MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
5895*4882a593Smuzhiyun 	}
5896*4882a593Smuzhiyun 
5897*4882a593Smuzhiyun 	NAN_MUTEX_UNLOCK();
5898*4882a593Smuzhiyun 	NAN_DBG_EXIT();
5899*4882a593Smuzhiyun 	return ret;
5900*4882a593Smuzhiyun }
5901*4882a593Smuzhiyun 
5902*4882a593Smuzhiyun int
wl_cfgnan_transmit_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_discover_cmd_data_t * cmd_data)5903*4882a593Smuzhiyun wl_cfgnan_transmit_handler(struct net_device *ndev,
5904*4882a593Smuzhiyun 	struct bcm_cfg80211 *cfg, nan_discover_cmd_data_t *cmd_data)
5905*4882a593Smuzhiyun {
5906*4882a593Smuzhiyun 	s32 ret = BCME_OK;
5907*4882a593Smuzhiyun 	bcm_iov_batch_buf_t *nan_buf = NULL;
5908*4882a593Smuzhiyun 	wl_nan_sd_transmit_t *sd_xmit = NULL;
5909*4882a593Smuzhiyun 	bcm_iov_batch_subcmd_t *sub_cmd = NULL;
5910*4882a593Smuzhiyun 	bool is_lcl_id = FALSE;
5911*4882a593Smuzhiyun 	bool is_dest_id = FALSE;
5912*4882a593Smuzhiyun 	bool is_dest_mac = FALSE;
5913*4882a593Smuzhiyun 	uint16 buflen_avail;
5914*4882a593Smuzhiyun 	uint8 *pxtlv;
5915*4882a593Smuzhiyun 	uint16 nan_buf_size;
5916*4882a593Smuzhiyun 	uint8 *resp_buf = NULL;
5917*4882a593Smuzhiyun 	/* Considering fixed params */
5918*4882a593Smuzhiyun 	uint16 data_size = WL_NAN_OBUF_DATA_OFFSET +
5919*4882a593Smuzhiyun 		OFFSETOF(wl_nan_sd_transmit_t, opt_tlv);
5920*4882a593Smuzhiyun 	data_size = ALIGN_SIZE(data_size, 4);
5921*4882a593Smuzhiyun 	ret = wl_cfgnan_aligned_data_size_of_opt_disc_params(&data_size, cmd_data);
5922*4882a593Smuzhiyun 	if (unlikely(ret)) {
5923*4882a593Smuzhiyun 		WL_ERR(("Failed to get alligned size of optional params\n"));
5924*4882a593Smuzhiyun 		goto fail;
5925*4882a593Smuzhiyun 	}
5926*4882a593Smuzhiyun 	NAN_DBG_ENTER();
5927*4882a593Smuzhiyun 	NAN_MUTEX_LOCK();
5928*4882a593Smuzhiyun 	nan_buf_size = data_size;
5929*4882a593Smuzhiyun 	nan_buf = MALLOCZ(cfg->osh, data_size);
5930*4882a593Smuzhiyun 	if (!nan_buf) {
5931*4882a593Smuzhiyun 		WL_ERR(("%s: memory allocation failed\n", __func__));
5932*4882a593Smuzhiyun 		ret = BCME_NOMEM;
5933*4882a593Smuzhiyun 		goto fail;
5934*4882a593Smuzhiyun 	}
5935*4882a593Smuzhiyun 
5936*4882a593Smuzhiyun 	resp_buf = MALLOCZ(cfg->osh, data_size + NAN_IOVAR_NAME_SIZE);
5937*4882a593Smuzhiyun 	if (!resp_buf) {
5938*4882a593Smuzhiyun 		WL_ERR(("%s: memory allocation failed\n", __func__));
5939*4882a593Smuzhiyun 		ret = BCME_NOMEM;
5940*4882a593Smuzhiyun 		goto fail;
5941*4882a593Smuzhiyun 	}
5942*4882a593Smuzhiyun 
5943*4882a593Smuzhiyun 	/* nan transmit */
5944*4882a593Smuzhiyun 	nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
5945*4882a593Smuzhiyun 	nan_buf->count = 0;
5946*4882a593Smuzhiyun 	nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
5947*4882a593Smuzhiyun 	/*
5948*4882a593Smuzhiyun 	 * proceed only if mandatory arguments are present - subscriber id,
5949*4882a593Smuzhiyun 	 * publisher id, mac address
5950*4882a593Smuzhiyun 	 */
5951*4882a593Smuzhiyun 	if ((!cmd_data->local_id) || (!cmd_data->remote_id) ||
5952*4882a593Smuzhiyun 			ETHER_ISNULLADDR(&cmd_data->mac_addr.octet)) {
5953*4882a593Smuzhiyun 		WL_ERR(("mandatory arguments are not present\n"));
5954*4882a593Smuzhiyun 		ret = -EINVAL;
5955*4882a593Smuzhiyun 		goto fail;
5956*4882a593Smuzhiyun 	}
5957*4882a593Smuzhiyun 
5958*4882a593Smuzhiyun 	sub_cmd = (bcm_iov_batch_subcmd_t*)(&nan_buf->cmds[0]);
5959*4882a593Smuzhiyun 	sd_xmit = (wl_nan_sd_transmit_t *)(sub_cmd->data);
5960*4882a593Smuzhiyun 
5961*4882a593Smuzhiyun 	/* local instance id must be from 1 to 255, 0 is reserved */
5962*4882a593Smuzhiyun 	if (cmd_data->local_id == NAN_ID_RESERVED) {
5963*4882a593Smuzhiyun 		WL_ERR(("Invalid local instance id: %d\n", cmd_data->local_id));
5964*4882a593Smuzhiyun 		ret = BCME_BADARG;
5965*4882a593Smuzhiyun 		goto fail;
5966*4882a593Smuzhiyun 	}
5967*4882a593Smuzhiyun 	sd_xmit->local_service_id = cmd_data->local_id;
5968*4882a593Smuzhiyun 	is_lcl_id = TRUE;
5969*4882a593Smuzhiyun 
5970*4882a593Smuzhiyun 	/* remote instance id must be from 1 to 255, 0 is reserved */
5971*4882a593Smuzhiyun 	if (cmd_data->remote_id == NAN_ID_RESERVED) {
5972*4882a593Smuzhiyun 		WL_ERR(("Invalid remote instance id: %d\n", cmd_data->remote_id));
5973*4882a593Smuzhiyun 		ret = BCME_BADARG;
5974*4882a593Smuzhiyun 		goto fail;
5975*4882a593Smuzhiyun 	}
5976*4882a593Smuzhiyun 
5977*4882a593Smuzhiyun 	sd_xmit->requestor_service_id = cmd_data->remote_id;
5978*4882a593Smuzhiyun 	is_dest_id = TRUE;
5979*4882a593Smuzhiyun 
5980*4882a593Smuzhiyun 	if (!ETHER_ISNULLADDR(&cmd_data->mac_addr.octet)) {
5981*4882a593Smuzhiyun 		ret = memcpy_s(&sd_xmit->destination_addr, ETHER_ADDR_LEN,
5982*4882a593Smuzhiyun 				&cmd_data->mac_addr, ETHER_ADDR_LEN);
5983*4882a593Smuzhiyun 		if (ret != BCME_OK) {
5984*4882a593Smuzhiyun 			WL_ERR(("Failed to copy dest mac address\n"));
5985*4882a593Smuzhiyun 			goto fail;
5986*4882a593Smuzhiyun 		}
5987*4882a593Smuzhiyun 	} else {
5988*4882a593Smuzhiyun 		WL_ERR(("Invalid ether addr provided\n"));
5989*4882a593Smuzhiyun 		ret = BCME_BADARG;
5990*4882a593Smuzhiyun 		goto fail;
5991*4882a593Smuzhiyun 	}
5992*4882a593Smuzhiyun 	is_dest_mac = TRUE;
5993*4882a593Smuzhiyun 
5994*4882a593Smuzhiyun 	if (cmd_data->priority) {
5995*4882a593Smuzhiyun 		sd_xmit->priority = cmd_data->priority;
5996*4882a593Smuzhiyun 	}
5997*4882a593Smuzhiyun 	sd_xmit->token = cmd_data->token;
5998*4882a593Smuzhiyun 
5999*4882a593Smuzhiyun 	if (cmd_data->recv_ind_flag) {
6000*4882a593Smuzhiyun 		/* BIT0 - If set, host wont rec event "txs"  */
6001*4882a593Smuzhiyun 		if (CHECK_BIT(cmd_data->recv_ind_flag,
6002*4882a593Smuzhiyun 				WL_NAN_EVENT_SUPPRESS_FOLLOWUP_RECEIVE_BIT)) {
6003*4882a593Smuzhiyun 			sd_xmit->flags = WL_NAN_FUP_SUPR_EVT_TXS;
6004*4882a593Smuzhiyun 		}
6005*4882a593Smuzhiyun 	}
6006*4882a593Smuzhiyun 	/* Optional parameters: fill the sub_command block with service descriptor attr */
6007*4882a593Smuzhiyun 	sub_cmd->id = htod16(WL_NAN_CMD_SD_TRANSMIT);
6008*4882a593Smuzhiyun 	sub_cmd->len = sizeof(sub_cmd->u.options) +
6009*4882a593Smuzhiyun 		OFFSETOF(wl_nan_sd_transmit_t, opt_tlv);
6010*4882a593Smuzhiyun 	sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
6011*4882a593Smuzhiyun 	pxtlv = (uint8 *)&sd_xmit->opt_tlv;
6012*4882a593Smuzhiyun 
6013*4882a593Smuzhiyun 	nan_buf_size -= (sub_cmd->len +
6014*4882a593Smuzhiyun 			OFFSETOF(bcm_iov_batch_subcmd_t, u.options));
6015*4882a593Smuzhiyun 
6016*4882a593Smuzhiyun 	buflen_avail = nan_buf_size;
6017*4882a593Smuzhiyun 
6018*4882a593Smuzhiyun 	if (cmd_data->svc_info.data && cmd_data->svc_info.dlen) {
6019*4882a593Smuzhiyun 		bcm_xtlv_t *pxtlv_svc_info = (bcm_xtlv_t *)pxtlv;
6020*4882a593Smuzhiyun 		ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
6021*4882a593Smuzhiyun 				WL_NAN_XTLV_SD_SVC_INFO, cmd_data->svc_info.dlen,
6022*4882a593Smuzhiyun 				cmd_data->svc_info.data, BCM_XTLV_OPTION_ALIGN32);
6023*4882a593Smuzhiyun 		if (unlikely(ret)) {
6024*4882a593Smuzhiyun 			WL_ERR(("%s: fail to pack on bcm_pack_xtlv_entry, ret=%d\n",
6025*4882a593Smuzhiyun 				__FUNCTION__, ret));
6026*4882a593Smuzhiyun 			goto fail;
6027*4882a593Smuzhiyun 		}
6028*4882a593Smuzhiyun 
6029*4882a593Smuzhiyun 		/* 0xFF is max length for svc_info */
6030*4882a593Smuzhiyun 		if (pxtlv_svc_info->len > 0xFF) {
6031*4882a593Smuzhiyun 			WL_ERR(("Invalid service info length %d\n",
6032*4882a593Smuzhiyun 				(pxtlv_svc_info->len)));
6033*4882a593Smuzhiyun 			ret = BCME_USAGE_ERROR;
6034*4882a593Smuzhiyun 			goto fail;
6035*4882a593Smuzhiyun 		}
6036*4882a593Smuzhiyun 		sd_xmit->opt_len = (uint8)(pxtlv_svc_info->len);
6037*4882a593Smuzhiyun 	}
6038*4882a593Smuzhiyun 	if (cmd_data->sde_svc_info.data && cmd_data->sde_svc_info.dlen) {
6039*4882a593Smuzhiyun 		WL_TRACE(("optional sdea svc_info present, pack it\n"));
6040*4882a593Smuzhiyun 		ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
6041*4882a593Smuzhiyun 				WL_NAN_XTLV_SD_SDE_SVC_INFO, cmd_data->sde_svc_info.dlen,
6042*4882a593Smuzhiyun 				cmd_data->sde_svc_info.data, BCM_XTLV_OPTION_ALIGN32);
6043*4882a593Smuzhiyun 		if (unlikely(ret)) {
6044*4882a593Smuzhiyun 			WL_ERR(("%s: fail to pack sdea svc info\n", __FUNCTION__));
6045*4882a593Smuzhiyun 			goto fail;
6046*4882a593Smuzhiyun 		}
6047*4882a593Smuzhiyun 	}
6048*4882a593Smuzhiyun 
6049*4882a593Smuzhiyun 	/* Check if all mandatory params are provided */
6050*4882a593Smuzhiyun 	if (is_lcl_id && is_dest_id && is_dest_mac) {
6051*4882a593Smuzhiyun 		nan_buf->count++;
6052*4882a593Smuzhiyun 		sub_cmd->len += (buflen_avail - nan_buf_size);
6053*4882a593Smuzhiyun 	} else {
6054*4882a593Smuzhiyun 		WL_ERR(("Missing parameters\n"));
6055*4882a593Smuzhiyun 		ret = BCME_USAGE_ERROR;
6056*4882a593Smuzhiyun 	}
6057*4882a593Smuzhiyun 	nan_buf->is_set = TRUE;
6058*4882a593Smuzhiyun 	ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, data_size,
6059*4882a593Smuzhiyun 			&(cmd_data->status), resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
6060*4882a593Smuzhiyun 	if (unlikely(ret) || unlikely(cmd_data->status)) {
6061*4882a593Smuzhiyun 		WL_ERR(("nan transmit failed for token %d ret = %d status = %d\n",
6062*4882a593Smuzhiyun 			sd_xmit->token, ret, cmd_data->status));
6063*4882a593Smuzhiyun 		goto fail;
6064*4882a593Smuzhiyun 	}
6065*4882a593Smuzhiyun 	WL_INFORM_MEM(("nan transmit successful for token %d\n", sd_xmit->token));
6066*4882a593Smuzhiyun fail:
6067*4882a593Smuzhiyun 	if (nan_buf) {
6068*4882a593Smuzhiyun 		MFREE(cfg->osh, nan_buf, data_size);
6069*4882a593Smuzhiyun 	}
6070*4882a593Smuzhiyun 	if (resp_buf) {
6071*4882a593Smuzhiyun 		MFREE(cfg->osh, resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
6072*4882a593Smuzhiyun 	}
6073*4882a593Smuzhiyun 	NAN_MUTEX_UNLOCK();
6074*4882a593Smuzhiyun 	NAN_DBG_EXIT();
6075*4882a593Smuzhiyun 	return ret;
6076*4882a593Smuzhiyun }
6077*4882a593Smuzhiyun 
6078*4882a593Smuzhiyun static int
wl_cfgnan_get_capability(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_hal_capabilities_t * capabilities)6079*4882a593Smuzhiyun wl_cfgnan_get_capability(struct net_device *ndev,
6080*4882a593Smuzhiyun 	struct bcm_cfg80211 *cfg, nan_hal_capabilities_t *capabilities)
6081*4882a593Smuzhiyun {
6082*4882a593Smuzhiyun 	bcm_iov_batch_buf_t *nan_buf = NULL;
6083*4882a593Smuzhiyun 	s32 ret = BCME_OK;
6084*4882a593Smuzhiyun 	uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
6085*4882a593Smuzhiyun 	wl_nan_fw_cap_t *fw_cap = NULL;
6086*4882a593Smuzhiyun 	uint16 subcmd_len;
6087*4882a593Smuzhiyun 	uint32 status;
6088*4882a593Smuzhiyun 	bcm_iov_batch_subcmd_t *sub_cmd = NULL;
6089*4882a593Smuzhiyun 	bcm_iov_batch_subcmd_t *sub_cmd_resp = NULL;
6090*4882a593Smuzhiyun 	uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
6091*4882a593Smuzhiyun 	const bcm_xtlv_t *xtlv;
6092*4882a593Smuzhiyun 	uint16 type = 0;
6093*4882a593Smuzhiyun 	int len = 0;
6094*4882a593Smuzhiyun 
6095*4882a593Smuzhiyun 	NAN_DBG_ENTER();
6096*4882a593Smuzhiyun 	nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
6097*4882a593Smuzhiyun 	if (!nan_buf) {
6098*4882a593Smuzhiyun 		WL_ERR(("%s: memory allocation failed\n", __func__));
6099*4882a593Smuzhiyun 		ret = BCME_NOMEM;
6100*4882a593Smuzhiyun 		goto fail;
6101*4882a593Smuzhiyun 	}
6102*4882a593Smuzhiyun 
6103*4882a593Smuzhiyun 	nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
6104*4882a593Smuzhiyun 	nan_buf->count = 0;
6105*4882a593Smuzhiyun 	nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
6106*4882a593Smuzhiyun 	sub_cmd = (bcm_iov_batch_subcmd_t*)(uint8 *)(&nan_buf->cmds[0]);
6107*4882a593Smuzhiyun 
6108*4882a593Smuzhiyun 	ret = wl_cfg_nan_check_cmd_len(nan_buf_size,
6109*4882a593Smuzhiyun 			sizeof(*fw_cap), &subcmd_len);
6110*4882a593Smuzhiyun 	if (unlikely(ret)) {
6111*4882a593Smuzhiyun 		WL_ERR(("nan_sub_cmd check failed\n"));
6112*4882a593Smuzhiyun 		goto fail;
6113*4882a593Smuzhiyun 	}
6114*4882a593Smuzhiyun 
6115*4882a593Smuzhiyun 	fw_cap = (wl_nan_fw_cap_t *)sub_cmd->data;
6116*4882a593Smuzhiyun 	sub_cmd->id = htod16(WL_NAN_CMD_GEN_FW_CAP);
6117*4882a593Smuzhiyun 	sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*fw_cap);
6118*4882a593Smuzhiyun 	sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
6119*4882a593Smuzhiyun 	nan_buf_size -= subcmd_len;
6120*4882a593Smuzhiyun 	nan_buf->count = 1;
6121*4882a593Smuzhiyun 
6122*4882a593Smuzhiyun 	nan_buf->is_set = false;
6123*4882a593Smuzhiyun 	memset(resp_buf, 0, sizeof(resp_buf));
6124*4882a593Smuzhiyun 	ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
6125*4882a593Smuzhiyun 			(void*)resp_buf, NAN_IOCTL_BUF_SIZE);
6126*4882a593Smuzhiyun 	if (unlikely(ret) || unlikely(status)) {
6127*4882a593Smuzhiyun 		WL_ERR(("get nan fw cap failed ret %d status %d \n",
6128*4882a593Smuzhiyun 				ret, status));
6129*4882a593Smuzhiyun 		goto fail;
6130*4882a593Smuzhiyun 	}
6131*4882a593Smuzhiyun 
6132*4882a593Smuzhiyun 	sub_cmd_resp = &((bcm_iov_batch_buf_t *)(resp_buf))->cmds[0];
6133*4882a593Smuzhiyun 
6134*4882a593Smuzhiyun 	/* check the response buff */
6135*4882a593Smuzhiyun 	xtlv = ((const bcm_xtlv_t *)&sub_cmd_resp->data[0]);
6136*4882a593Smuzhiyun 	if (!xtlv) {
6137*4882a593Smuzhiyun 		ret = BCME_NOTFOUND;
6138*4882a593Smuzhiyun 		WL_ERR(("xtlv not found: err = %d\n", ret));
6139*4882a593Smuzhiyun 		goto fail;
6140*4882a593Smuzhiyun 	}
6141*4882a593Smuzhiyun 	bcm_xtlv_unpack_xtlv(xtlv, &type, (uint16*)&len, NULL, BCM_XTLV_OPTION_ALIGN32);
6142*4882a593Smuzhiyun 	do
6143*4882a593Smuzhiyun 	{
6144*4882a593Smuzhiyun 		switch (type) {
6145*4882a593Smuzhiyun 			case WL_NAN_XTLV_GEN_FW_CAP:
6146*4882a593Smuzhiyun 				if (len > sizeof(wl_nan_fw_cap_t)) {
6147*4882a593Smuzhiyun 					ret = BCME_BADARG;
6148*4882a593Smuzhiyun 					goto fail;
6149*4882a593Smuzhiyun 				}
6150*4882a593Smuzhiyun 				GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
6151*4882a593Smuzhiyun 				fw_cap = (wl_nan_fw_cap_t*)xtlv->data;
6152*4882a593Smuzhiyun 				GCC_DIAGNOSTIC_POP();
6153*4882a593Smuzhiyun 				break;
6154*4882a593Smuzhiyun 			default:
6155*4882a593Smuzhiyun 				WL_ERR(("Unknown xtlv: id %u\n", type));
6156*4882a593Smuzhiyun 				ret = BCME_ERROR;
6157*4882a593Smuzhiyun 				break;
6158*4882a593Smuzhiyun 		}
6159*4882a593Smuzhiyun 		if (ret != BCME_OK) {
6160*4882a593Smuzhiyun 			goto fail;
6161*4882a593Smuzhiyun 		}
6162*4882a593Smuzhiyun 	} while ((xtlv = bcm_next_xtlv(xtlv, &len, BCM_XTLV_OPTION_ALIGN32)));
6163*4882a593Smuzhiyun 
6164*4882a593Smuzhiyun 	memset(capabilities, 0, sizeof(nan_hal_capabilities_t));
6165*4882a593Smuzhiyun 	capabilities->max_publishes = fw_cap->max_svc_publishes;
6166*4882a593Smuzhiyun 	capabilities->max_subscribes = fw_cap->max_svc_subscribes;
6167*4882a593Smuzhiyun 	capabilities->max_ndi_interfaces = fw_cap->max_lcl_ndi_interfaces;
6168*4882a593Smuzhiyun 	capabilities->max_ndp_sessions = fw_cap->max_ndp_sessions;
6169*4882a593Smuzhiyun 	capabilities->max_concurrent_nan_clusters = fw_cap->max_concurrent_nan_clusters;
6170*4882a593Smuzhiyun 	capabilities->max_service_name_len = fw_cap->max_service_name_len;
6171*4882a593Smuzhiyun 	capabilities->max_match_filter_len = fw_cap->max_match_filter_len;
6172*4882a593Smuzhiyun 	capabilities->max_total_match_filter_len = fw_cap->max_total_match_filter_len;
6173*4882a593Smuzhiyun 	capabilities->max_service_specific_info_len = fw_cap->max_service_specific_info_len;
6174*4882a593Smuzhiyun 	capabilities->max_app_info_len = fw_cap->max_app_info_len;
6175*4882a593Smuzhiyun 	capabilities->max_sdea_service_specific_info_len = fw_cap->max_sdea_svc_specific_info_len;
6176*4882a593Smuzhiyun 	capabilities->max_queued_transmit_followup_msgs = fw_cap->max_queued_tx_followup_msgs;
6177*4882a593Smuzhiyun 	capabilities->max_subscribe_address = fw_cap->max_subscribe_address;
6178*4882a593Smuzhiyun 	capabilities->is_ndp_security_supported = fw_cap->is_ndp_security_supported;
6179*4882a593Smuzhiyun 	capabilities->ndp_supported_bands = fw_cap->ndp_supported_bands;
6180*4882a593Smuzhiyun 	capabilities->cipher_suites_supported = fw_cap->cipher_suites_supported_mask;
6181*4882a593Smuzhiyun 	if (fw_cap->flags1 & WL_NAN_FW_CAP_FLAG1_NDPE) {
6182*4882a593Smuzhiyun 		capabilities->ndpe_attr_supported = true;
6183*4882a593Smuzhiyun 	}
6184*4882a593Smuzhiyun 
6185*4882a593Smuzhiyun fail:
6186*4882a593Smuzhiyun 	if (nan_buf) {
6187*4882a593Smuzhiyun 		MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
6188*4882a593Smuzhiyun 	}
6189*4882a593Smuzhiyun 	NAN_DBG_EXIT();
6190*4882a593Smuzhiyun 	return ret;
6191*4882a593Smuzhiyun }
6192*4882a593Smuzhiyun 
6193*4882a593Smuzhiyun int
wl_cfgnan_get_capablities_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_hal_capabilities_t * capabilities)6194*4882a593Smuzhiyun wl_cfgnan_get_capablities_handler(struct net_device *ndev,
6195*4882a593Smuzhiyun 	struct bcm_cfg80211 *cfg, nan_hal_capabilities_t *capabilities)
6196*4882a593Smuzhiyun {
6197*4882a593Smuzhiyun 	s32 ret = BCME_OK;
6198*4882a593Smuzhiyun 	dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
6199*4882a593Smuzhiyun 
6200*4882a593Smuzhiyun 	NAN_DBG_ENTER();
6201*4882a593Smuzhiyun 
6202*4882a593Smuzhiyun 	/* Do not query fw about nan if feature is not supported */
6203*4882a593Smuzhiyun 	if (!FW_SUPPORTED(dhdp, nan)) {
6204*4882a593Smuzhiyun 		WL_DBG(("NAN is not supported\n"));
6205*4882a593Smuzhiyun 		return ret;
6206*4882a593Smuzhiyun 	}
6207*4882a593Smuzhiyun 
6208*4882a593Smuzhiyun 	if (cfg->nancfg->nan_init_state) {
6209*4882a593Smuzhiyun 		ret = wl_cfgnan_get_capability(ndev, cfg, capabilities);
6210*4882a593Smuzhiyun 		if (ret != BCME_OK) {
6211*4882a593Smuzhiyun 			WL_ERR(("NAN init state: %d, failed to get capability from FW[%d]\n",
6212*4882a593Smuzhiyun 					cfg->nancfg->nan_init_state, ret));
6213*4882a593Smuzhiyun 			goto exit;
6214*4882a593Smuzhiyun 		}
6215*4882a593Smuzhiyun 	} else {
6216*4882a593Smuzhiyun 		/* Initialize NAN before sending iovar */
6217*4882a593Smuzhiyun 		WL_ERR(("Initializing NAN\n"));
6218*4882a593Smuzhiyun 		ret = wl_cfgnan_init(cfg);
6219*4882a593Smuzhiyun 		if (ret != BCME_OK) {
6220*4882a593Smuzhiyun 			WL_ERR(("failed to initialize NAN[%d]\n", ret));
6221*4882a593Smuzhiyun 			goto fail;
6222*4882a593Smuzhiyun 		}
6223*4882a593Smuzhiyun 
6224*4882a593Smuzhiyun 		ret = wl_cfgnan_get_capability(ndev, cfg, capabilities);
6225*4882a593Smuzhiyun 		if (ret != BCME_OK) {
6226*4882a593Smuzhiyun 			WL_ERR(("NAN init state: %d, failed to get capability from FW[%d]\n",
6227*4882a593Smuzhiyun 					cfg->nancfg->nan_init_state, ret));
6228*4882a593Smuzhiyun 			goto exit;
6229*4882a593Smuzhiyun 		}
6230*4882a593Smuzhiyun 		WL_ERR(("De-Initializing NAN\n"));
6231*4882a593Smuzhiyun 		ret = wl_cfgnan_deinit(cfg, dhdp->up);
6232*4882a593Smuzhiyun 		if (ret != BCME_OK) {
6233*4882a593Smuzhiyun 			WL_ERR(("failed to de-initialize NAN[%d]\n", ret));
6234*4882a593Smuzhiyun 			goto fail;
6235*4882a593Smuzhiyun 		}
6236*4882a593Smuzhiyun 	}
6237*4882a593Smuzhiyun fail:
6238*4882a593Smuzhiyun 	NAN_DBG_EXIT();
6239*4882a593Smuzhiyun 	return ret;
6240*4882a593Smuzhiyun exit:
6241*4882a593Smuzhiyun 	/* Keeping backward campatibility */
6242*4882a593Smuzhiyun 	capabilities->max_concurrent_nan_clusters = MAX_CONCURRENT_NAN_CLUSTERS;
6243*4882a593Smuzhiyun 	capabilities->max_publishes = MAX_PUBLISHES;
6244*4882a593Smuzhiyun 	capabilities->max_subscribes = MAX_SUBSCRIBES;
6245*4882a593Smuzhiyun 	capabilities->max_service_name_len = MAX_SVC_NAME_LEN;
6246*4882a593Smuzhiyun 	capabilities->max_match_filter_len = MAX_MATCH_FILTER_LEN;
6247*4882a593Smuzhiyun 	capabilities->max_total_match_filter_len = MAX_TOTAL_MATCH_FILTER_LEN;
6248*4882a593Smuzhiyun 	capabilities->max_service_specific_info_len = NAN_MAX_SERVICE_SPECIFIC_INFO_LEN;
6249*4882a593Smuzhiyun 	capabilities->max_ndi_interfaces = NAN_MAX_NDI;
6250*4882a593Smuzhiyun 	capabilities->max_ndp_sessions = MAX_NDP_SESSIONS;
6251*4882a593Smuzhiyun 	capabilities->max_app_info_len = MAX_APP_INFO_LEN;
6252*4882a593Smuzhiyun 	capabilities->max_queued_transmit_followup_msgs = MAX_QUEUED_TX_FOLLOUP_MSGS;
6253*4882a593Smuzhiyun 	capabilities->max_sdea_service_specific_info_len = MAX_SDEA_SVC_INFO_LEN;
6254*4882a593Smuzhiyun 	capabilities->max_subscribe_address = MAX_SUBSCRIBE_ADDRESS;
6255*4882a593Smuzhiyun 	capabilities->cipher_suites_supported = WL_NAN_CIPHER_SUITE_SHARED_KEY_128_MASK;
6256*4882a593Smuzhiyun 	capabilities->max_scid_len = MAX_SCID_LEN;
6257*4882a593Smuzhiyun 	capabilities->is_ndp_security_supported = true;
6258*4882a593Smuzhiyun 	capabilities->ndp_supported_bands = NDP_SUPPORTED_BANDS;
6259*4882a593Smuzhiyun 	capabilities->ndpe_attr_supported = false;
6260*4882a593Smuzhiyun 	ret = BCME_OK;
6261*4882a593Smuzhiyun 	NAN_DBG_EXIT();
6262*4882a593Smuzhiyun 	return ret;
6263*4882a593Smuzhiyun }
6264*4882a593Smuzhiyun 
wl_cfgnan_is_enabled(struct bcm_cfg80211 * cfg)6265*4882a593Smuzhiyun bool wl_cfgnan_is_enabled(struct bcm_cfg80211 *cfg)
6266*4882a593Smuzhiyun {
6267*4882a593Smuzhiyun 	wl_nancfg_t *nancfg = cfg->nancfg;
6268*4882a593Smuzhiyun 	if (nancfg) {
6269*4882a593Smuzhiyun 		if (nancfg->nan_init_state && nancfg->nan_enable) {
6270*4882a593Smuzhiyun 			return TRUE;
6271*4882a593Smuzhiyun 		}
6272*4882a593Smuzhiyun 	}
6273*4882a593Smuzhiyun 
6274*4882a593Smuzhiyun 	return FALSE;
6275*4882a593Smuzhiyun }
6276*4882a593Smuzhiyun 
6277*4882a593Smuzhiyun static int
wl_cfgnan_init(struct bcm_cfg80211 * cfg)6278*4882a593Smuzhiyun wl_cfgnan_init(struct bcm_cfg80211 *cfg)
6279*4882a593Smuzhiyun {
6280*4882a593Smuzhiyun 	s32 ret = BCME_OK;
6281*4882a593Smuzhiyun 	uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
6282*4882a593Smuzhiyun 	uint32 status;
6283*4882a593Smuzhiyun 	uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
6284*4882a593Smuzhiyun 	uint8 buf[NAN_IOCTL_BUF_SIZE];
6285*4882a593Smuzhiyun 	bcm_iov_batch_buf_t *nan_buf = (bcm_iov_batch_buf_t*)buf;
6286*4882a593Smuzhiyun 
6287*4882a593Smuzhiyun 	NAN_DBG_ENTER();
6288*4882a593Smuzhiyun 	if (cfg->nancfg->nan_init_state) {
6289*4882a593Smuzhiyun 		WL_ERR(("nan initialized/nmi exists\n"));
6290*4882a593Smuzhiyun 		return BCME_OK;
6291*4882a593Smuzhiyun 	}
6292*4882a593Smuzhiyun 	nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
6293*4882a593Smuzhiyun 	nan_buf->count = 0;
6294*4882a593Smuzhiyun 	nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
6295*4882a593Smuzhiyun 	ret = wl_cfgnan_init_handler(&nan_buf->cmds[0], &nan_buf_size, true);
6296*4882a593Smuzhiyun 	if (unlikely(ret)) {
6297*4882a593Smuzhiyun 		WL_ERR(("init handler sub_cmd set failed\n"));
6298*4882a593Smuzhiyun 		goto fail;
6299*4882a593Smuzhiyun 	}
6300*4882a593Smuzhiyun 	nan_buf->count++;
6301*4882a593Smuzhiyun 	nan_buf->is_set = true;
6302*4882a593Smuzhiyun 
6303*4882a593Smuzhiyun 	bzero(resp_buf, sizeof(resp_buf));
6304*4882a593Smuzhiyun 	ret = wl_cfgnan_execute_ioctl(bcmcfg_to_prmry_ndev(cfg), cfg,
6305*4882a593Smuzhiyun 			nan_buf, nan_buf_size, &status,
6306*4882a593Smuzhiyun 			(void*)resp_buf, NAN_IOCTL_BUF_SIZE);
6307*4882a593Smuzhiyun 	if (unlikely(ret) || unlikely(status)) {
6308*4882a593Smuzhiyun 		WL_ERR(("nan init handler failed ret %d status %d\n",
6309*4882a593Smuzhiyun 				ret, status));
6310*4882a593Smuzhiyun 		goto fail;
6311*4882a593Smuzhiyun 	}
6312*4882a593Smuzhiyun 
6313*4882a593Smuzhiyun #ifdef WL_NAN_DISC_CACHE
6314*4882a593Smuzhiyun 	/* malloc for disc result */
6315*4882a593Smuzhiyun 	cfg->nancfg->nan_disc_cache = MALLOCZ(cfg->osh,
6316*4882a593Smuzhiyun 			NAN_MAX_CACHE_DISC_RESULT * sizeof(nan_disc_result_cache));
6317*4882a593Smuzhiyun 	if (!cfg->nancfg->nan_disc_cache) {
6318*4882a593Smuzhiyun 		WL_ERR(("%s: memory allocation failed\n", __func__));
6319*4882a593Smuzhiyun 		ret = BCME_NOMEM;
6320*4882a593Smuzhiyun 		goto fail;
6321*4882a593Smuzhiyun 	}
6322*4882a593Smuzhiyun #endif /* WL_NAN_DISC_CACHE */
6323*4882a593Smuzhiyun 	cfg->nancfg->nan_init_state = true;
6324*4882a593Smuzhiyun 	return ret;
6325*4882a593Smuzhiyun fail:
6326*4882a593Smuzhiyun 	NAN_DBG_EXIT();
6327*4882a593Smuzhiyun 	return ret;
6328*4882a593Smuzhiyun }
6329*4882a593Smuzhiyun 
6330*4882a593Smuzhiyun static void
wl_cfgnan_deinit_cleanup(struct bcm_cfg80211 * cfg)6331*4882a593Smuzhiyun wl_cfgnan_deinit_cleanup(struct bcm_cfg80211 *cfg)
6332*4882a593Smuzhiyun {
6333*4882a593Smuzhiyun 	uint8 i = 0;
6334*4882a593Smuzhiyun 	wl_nancfg_t *nancfg = cfg->nancfg;
6335*4882a593Smuzhiyun 
6336*4882a593Smuzhiyun 	nancfg->nan_dp_count = 0;
6337*4882a593Smuzhiyun 	nancfg->nan_init_state = false;
6338*4882a593Smuzhiyun #ifdef WL_NAN_DISC_CACHE
6339*4882a593Smuzhiyun 	if (nancfg->nan_disc_cache) {
6340*4882a593Smuzhiyun 		for (i = 0; i < NAN_MAX_CACHE_DISC_RESULT; i++) {
6341*4882a593Smuzhiyun 			if (nancfg->nan_disc_cache[i].tx_match_filter.data) {
6342*4882a593Smuzhiyun 				MFREE(cfg->osh, nancfg->nan_disc_cache[i].tx_match_filter.data,
6343*4882a593Smuzhiyun 					nancfg->nan_disc_cache[i].tx_match_filter.dlen);
6344*4882a593Smuzhiyun 			}
6345*4882a593Smuzhiyun 			if (nancfg->nan_disc_cache[i].svc_info.data) {
6346*4882a593Smuzhiyun 				MFREE(cfg->osh, nancfg->nan_disc_cache[i].svc_info.data,
6347*4882a593Smuzhiyun 					nancfg->nan_disc_cache[i].svc_info.dlen);
6348*4882a593Smuzhiyun 			}
6349*4882a593Smuzhiyun 		}
6350*4882a593Smuzhiyun 		MFREE(cfg->osh, nancfg->nan_disc_cache,
6351*4882a593Smuzhiyun 			NAN_MAX_CACHE_DISC_RESULT * sizeof(nan_disc_result_cache));
6352*4882a593Smuzhiyun 		nancfg->nan_disc_cache = NULL;
6353*4882a593Smuzhiyun 	}
6354*4882a593Smuzhiyun 	nancfg->nan_disc_count = 0;
6355*4882a593Smuzhiyun 	bzero(nancfg->svc_info, NAN_MAX_SVC_INST * sizeof(nan_svc_info_t));
6356*4882a593Smuzhiyun 	bzero(nancfg->nan_ranging_info, NAN_MAX_RANGING_INST * sizeof(nan_ranging_inst_t));
6357*4882a593Smuzhiyun #endif /* WL_NAN_DISC_CACHE */
6358*4882a593Smuzhiyun 	return;
6359*4882a593Smuzhiyun }
6360*4882a593Smuzhiyun 
6361*4882a593Smuzhiyun static int
wl_cfgnan_deinit(struct bcm_cfg80211 * cfg,uint8 busstate)6362*4882a593Smuzhiyun wl_cfgnan_deinit(struct bcm_cfg80211 *cfg, uint8 busstate)
6363*4882a593Smuzhiyun {
6364*4882a593Smuzhiyun 	s32 ret = BCME_OK;
6365*4882a593Smuzhiyun 	uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
6366*4882a593Smuzhiyun 	uint32 status;
6367*4882a593Smuzhiyun 	uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
6368*4882a593Smuzhiyun 	uint8 buf[NAN_IOCTL_BUF_SIZE];
6369*4882a593Smuzhiyun 	bcm_iov_batch_buf_t *nan_buf = (bcm_iov_batch_buf_t*)buf;
6370*4882a593Smuzhiyun 	wl_nancfg_t *nancfg = cfg->nancfg;
6371*4882a593Smuzhiyun 
6372*4882a593Smuzhiyun 	NAN_DBG_ENTER();
6373*4882a593Smuzhiyun 	NAN_MUTEX_LOCK();
6374*4882a593Smuzhiyun 
6375*4882a593Smuzhiyun 	if (!nancfg->nan_init_state) {
6376*4882a593Smuzhiyun 		WL_ERR(("nan is not initialized/nmi doesnt exists\n"));
6377*4882a593Smuzhiyun 		ret = BCME_OK;
6378*4882a593Smuzhiyun 		goto fail;
6379*4882a593Smuzhiyun 	}
6380*4882a593Smuzhiyun 
6381*4882a593Smuzhiyun 	if (busstate != DHD_BUS_DOWN) {
6382*4882a593Smuzhiyun 		nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
6383*4882a593Smuzhiyun 		nan_buf->count = 0;
6384*4882a593Smuzhiyun 		nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
6385*4882a593Smuzhiyun 
6386*4882a593Smuzhiyun 		WL_DBG(("nan deinit\n"));
6387*4882a593Smuzhiyun 		ret = wl_cfgnan_init_handler(&nan_buf->cmds[0], &nan_buf_size, false);
6388*4882a593Smuzhiyun 		if (unlikely(ret)) {
6389*4882a593Smuzhiyun 			WL_ERR(("deinit handler sub_cmd set failed\n"));
6390*4882a593Smuzhiyun 		} else {
6391*4882a593Smuzhiyun 			nan_buf->count++;
6392*4882a593Smuzhiyun 			nan_buf->is_set = true;
6393*4882a593Smuzhiyun 			bzero(resp_buf, sizeof(resp_buf));
6394*4882a593Smuzhiyun 			ret = wl_cfgnan_execute_ioctl(cfg->wdev->netdev, cfg,
6395*4882a593Smuzhiyun 				nan_buf, nan_buf_size, &status,
6396*4882a593Smuzhiyun 				(void*)resp_buf, NAN_IOCTL_BUF_SIZE);
6397*4882a593Smuzhiyun 			if (unlikely(ret) || unlikely(status)) {
6398*4882a593Smuzhiyun 				WL_ERR(("nan init handler failed ret %d status %d\n",
6399*4882a593Smuzhiyun 					ret, status));
6400*4882a593Smuzhiyun 			}
6401*4882a593Smuzhiyun 		}
6402*4882a593Smuzhiyun 	}
6403*4882a593Smuzhiyun 	wl_cfgnan_deinit_cleanup(cfg);
6404*4882a593Smuzhiyun 
6405*4882a593Smuzhiyun fail:
6406*4882a593Smuzhiyun 	if (!nancfg->mac_rand && !ETHER_ISNULLADDR(nancfg->nan_nmi_mac)) {
6407*4882a593Smuzhiyun 		wl_release_vif_macaddr(cfg, nancfg->nan_nmi_mac, WL_IF_TYPE_NAN_NMI);
6408*4882a593Smuzhiyun 	}
6409*4882a593Smuzhiyun 	NAN_MUTEX_UNLOCK();
6410*4882a593Smuzhiyun 	NAN_DBG_EXIT();
6411*4882a593Smuzhiyun 	return ret;
6412*4882a593Smuzhiyun }
6413*4882a593Smuzhiyun 
6414*4882a593Smuzhiyun static int
wl_cfgnan_get_ndi_macaddr(struct bcm_cfg80211 * cfg,u8 * mac_addr)6415*4882a593Smuzhiyun wl_cfgnan_get_ndi_macaddr(struct bcm_cfg80211 *cfg, u8* mac_addr)
6416*4882a593Smuzhiyun {
6417*4882a593Smuzhiyun 	int i = 0;
6418*4882a593Smuzhiyun 	int ret = BCME_OK;
6419*4882a593Smuzhiyun 	bool rand_mac = cfg->nancfg->mac_rand;
6420*4882a593Smuzhiyun 	BCM_REFERENCE(i);
6421*4882a593Smuzhiyun 
6422*4882a593Smuzhiyun 	if (rand_mac) {
6423*4882a593Smuzhiyun 		/* ensure nmi != ndi */
6424*4882a593Smuzhiyun 		do {
6425*4882a593Smuzhiyun 			RANDOM_BYTES(mac_addr, ETHER_ADDR_LEN);
6426*4882a593Smuzhiyun 			/* restore mcast and local admin bits to 0 and 1 */
6427*4882a593Smuzhiyun 			ETHER_SET_UNICAST(mac_addr);
6428*4882a593Smuzhiyun 			ETHER_SET_LOCALADDR(mac_addr);
6429*4882a593Smuzhiyun 			i++;
6430*4882a593Smuzhiyun 			if (i == NAN_RAND_MAC_RETRIES) {
6431*4882a593Smuzhiyun 				break;
6432*4882a593Smuzhiyun 			}
6433*4882a593Smuzhiyun 		} while (eacmp(cfg->nancfg->nan_nmi_mac, mac_addr) == 0);
6434*4882a593Smuzhiyun 
6435*4882a593Smuzhiyun 		if (i == NAN_RAND_MAC_RETRIES) {
6436*4882a593Smuzhiyun 			if (eacmp(cfg->nancfg->nan_nmi_mac, mac_addr) == 0) {
6437*4882a593Smuzhiyun 				WL_ERR(("\nCouldn't generate rand NDI which != NMI\n"));
6438*4882a593Smuzhiyun 				ret = BCME_NORESOURCE;
6439*4882a593Smuzhiyun 				goto fail;
6440*4882a593Smuzhiyun 			}
6441*4882a593Smuzhiyun 		}
6442*4882a593Smuzhiyun 	} else {
6443*4882a593Smuzhiyun 		if (wl_get_vif_macaddr(cfg, WL_IF_TYPE_NAN,
6444*4882a593Smuzhiyun 			mac_addr) != BCME_OK) {
6445*4882a593Smuzhiyun 			ret = -EINVAL;
6446*4882a593Smuzhiyun 			WL_ERR(("Failed to get mac addr for NDI\n"));
6447*4882a593Smuzhiyun 			goto fail;
6448*4882a593Smuzhiyun 		}
6449*4882a593Smuzhiyun 	}
6450*4882a593Smuzhiyun 
6451*4882a593Smuzhiyun fail:
6452*4882a593Smuzhiyun 	return ret;
6453*4882a593Smuzhiyun }
6454*4882a593Smuzhiyun 
6455*4882a593Smuzhiyun int
wl_cfgnan_data_path_iface_create_delete_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,char * ifname,uint16 type,uint8 busstate)6456*4882a593Smuzhiyun wl_cfgnan_data_path_iface_create_delete_handler(struct net_device *ndev,
6457*4882a593Smuzhiyun 	struct bcm_cfg80211 *cfg, char *ifname, uint16 type, uint8 busstate)
6458*4882a593Smuzhiyun {
6459*4882a593Smuzhiyun 	u8 mac_addr[ETH_ALEN];
6460*4882a593Smuzhiyun 	s32 ret = BCME_OK;
6461*4882a593Smuzhiyun 	s32 idx;
6462*4882a593Smuzhiyun 	struct wireless_dev *wdev;
6463*4882a593Smuzhiyun 	NAN_DBG_ENTER();
6464*4882a593Smuzhiyun 
6465*4882a593Smuzhiyun 	if (busstate != DHD_BUS_DOWN) {
6466*4882a593Smuzhiyun 		ASSERT(cfg->nancfg->ndi);
6467*4882a593Smuzhiyun 		if (type == NAN_WIFI_SUBCMD_DATA_PATH_IFACE_CREATE) {
6468*4882a593Smuzhiyun 			if ((idx = wl_cfgnan_get_ndi_idx(cfg)) < 0) {
6469*4882a593Smuzhiyun 				WL_ERR(("No free idx for NAN NDI\n"));
6470*4882a593Smuzhiyun 				ret = BCME_NORESOURCE;
6471*4882a593Smuzhiyun 				goto fail;
6472*4882a593Smuzhiyun 			}
6473*4882a593Smuzhiyun 
6474*4882a593Smuzhiyun 			ret = wl_cfgnan_get_ndi_macaddr(cfg, mac_addr);
6475*4882a593Smuzhiyun 			if (ret != BCME_OK) {
6476*4882a593Smuzhiyun 				WL_ERR(("Couldn't get mac addr for NDI ret %d\n", ret));
6477*4882a593Smuzhiyun 				goto fail;
6478*4882a593Smuzhiyun 			}
6479*4882a593Smuzhiyun 			wdev = wl_cfg80211_add_if(cfg, ndev, WL_IF_TYPE_NAN,
6480*4882a593Smuzhiyun 				ifname, mac_addr);
6481*4882a593Smuzhiyun 			if (!wdev) {
6482*4882a593Smuzhiyun 				ret = -ENODEV;
6483*4882a593Smuzhiyun 				WL_ERR(("Failed to create NDI iface = %s, wdev is NULL\n", ifname));
6484*4882a593Smuzhiyun 				goto fail;
6485*4882a593Smuzhiyun 			}
6486*4882a593Smuzhiyun 			/* Store the iface name to pub data so that it can be used
6487*4882a593Smuzhiyun 			 * during NAN enable
6488*4882a593Smuzhiyun 			 */
6489*4882a593Smuzhiyun 			wl_cfgnan_add_ndi_data(cfg, idx, ifname);
6490*4882a593Smuzhiyun 			cfg->nancfg->ndi[idx].created = true;
6491*4882a593Smuzhiyun 			/* Store nan ndev */
6492*4882a593Smuzhiyun 			cfg->nancfg->ndi[idx].nan_ndev = wdev_to_ndev(wdev);
6493*4882a593Smuzhiyun 
6494*4882a593Smuzhiyun 		} else if (type == NAN_WIFI_SUBCMD_DATA_PATH_IFACE_DELETE) {
6495*4882a593Smuzhiyun 			ret = wl_cfg80211_del_if(cfg, ndev, NULL, ifname);
6496*4882a593Smuzhiyun 			if (ret == BCME_OK) {
6497*4882a593Smuzhiyun 				if (wl_cfgnan_del_ndi_data(cfg, ifname) < 0) {
6498*4882a593Smuzhiyun 					WL_ERR(("Failed to find matching data for ndi:%s\n",
6499*4882a593Smuzhiyun 					ifname));
6500*4882a593Smuzhiyun 				}
6501*4882a593Smuzhiyun 			} else if (ret == -ENODEV) {
6502*4882a593Smuzhiyun 				WL_INFORM(("Already deleted: %s\n", ifname));
6503*4882a593Smuzhiyun 				ret = BCME_OK;
6504*4882a593Smuzhiyun 			} else if (ret != BCME_OK) {
6505*4882a593Smuzhiyun 				WL_ERR(("failed to delete NDI[%d]\n", ret));
6506*4882a593Smuzhiyun 			}
6507*4882a593Smuzhiyun 		}
6508*4882a593Smuzhiyun 	} else {
6509*4882a593Smuzhiyun 		ret = -ENODEV;
6510*4882a593Smuzhiyun 		WL_ERR(("Bus is already down, no dev found to remove, ret = %d\n", ret));
6511*4882a593Smuzhiyun 	}
6512*4882a593Smuzhiyun fail:
6513*4882a593Smuzhiyun 	NAN_DBG_EXIT();
6514*4882a593Smuzhiyun 	return ret;
6515*4882a593Smuzhiyun }
6516*4882a593Smuzhiyun 
6517*4882a593Smuzhiyun /*
6518*4882a593Smuzhiyun  * Return data peer from peer list
6519*4882a593Smuzhiyun  * for peer_addr
6520*4882a593Smuzhiyun  * NULL if not found
6521*4882a593Smuzhiyun  */
6522*4882a593Smuzhiyun static nan_ndp_peer_t *
wl_cfgnan_data_get_peer(struct bcm_cfg80211 * cfg,struct ether_addr * peer_addr)6523*4882a593Smuzhiyun wl_cfgnan_data_get_peer(struct bcm_cfg80211 *cfg,
6524*4882a593Smuzhiyun 	struct ether_addr *peer_addr)
6525*4882a593Smuzhiyun {
6526*4882a593Smuzhiyun 	uint8 i;
6527*4882a593Smuzhiyun 	nan_ndp_peer_t* peer = cfg->nancfg->nan_ndp_peer_info;
6528*4882a593Smuzhiyun 
6529*4882a593Smuzhiyun 	if (!peer) {
6530*4882a593Smuzhiyun 		WL_ERR(("wl_cfgnan_data_get_peer: nan_ndp_peer_info is NULL\n"));
6531*4882a593Smuzhiyun 		goto exit;
6532*4882a593Smuzhiyun 	}
6533*4882a593Smuzhiyun 	for (i = 0; i < cfg->nancfg->max_ndp_count; i++) {
6534*4882a593Smuzhiyun 		if (peer[i].peer_dp_state != NAN_PEER_DP_NOT_CONNECTED &&
6535*4882a593Smuzhiyun 			(!memcmp(peer_addr, &peer[i].peer_addr, ETHER_ADDR_LEN))) {
6536*4882a593Smuzhiyun 			return &peer[i];
6537*4882a593Smuzhiyun 		}
6538*4882a593Smuzhiyun 	}
6539*4882a593Smuzhiyun 
6540*4882a593Smuzhiyun exit:
6541*4882a593Smuzhiyun 	return NULL;
6542*4882a593Smuzhiyun }
6543*4882a593Smuzhiyun 
6544*4882a593Smuzhiyun /*
6545*4882a593Smuzhiyun  * Returns True if
6546*4882a593Smuzhiyun  * datapath exists for nan cfg
6547*4882a593Smuzhiyun  * for given peer
6548*4882a593Smuzhiyun  */
6549*4882a593Smuzhiyun bool
wl_cfgnan_data_dp_exists_with_peer(struct bcm_cfg80211 * cfg,struct ether_addr * peer_addr)6550*4882a593Smuzhiyun wl_cfgnan_data_dp_exists_with_peer(struct bcm_cfg80211 *cfg,
6551*4882a593Smuzhiyun 		struct ether_addr *peer_addr)
6552*4882a593Smuzhiyun {
6553*4882a593Smuzhiyun 	bool ret = FALSE;
6554*4882a593Smuzhiyun 	nan_ndp_peer_t* peer = NULL;
6555*4882a593Smuzhiyun 
6556*4882a593Smuzhiyun 	if ((cfg->nancfg->nan_init_state == FALSE) ||
6557*4882a593Smuzhiyun 		(cfg->nancfg->nan_enable == FALSE)) {
6558*4882a593Smuzhiyun 		goto exit;
6559*4882a593Smuzhiyun 	}
6560*4882a593Smuzhiyun 
6561*4882a593Smuzhiyun 	/* check for peer exist */
6562*4882a593Smuzhiyun 	peer = wl_cfgnan_data_get_peer(cfg, peer_addr);
6563*4882a593Smuzhiyun 	if (peer) {
6564*4882a593Smuzhiyun 		ret = TRUE;
6565*4882a593Smuzhiyun 	}
6566*4882a593Smuzhiyun 
6567*4882a593Smuzhiyun exit:
6568*4882a593Smuzhiyun 	return ret;
6569*4882a593Smuzhiyun }
6570*4882a593Smuzhiyun 
6571*4882a593Smuzhiyun /*
6572*4882a593Smuzhiyun  * As of now API only available
6573*4882a593Smuzhiyun  * for setting state to CONNECTED
6574*4882a593Smuzhiyun  * if applicable
6575*4882a593Smuzhiyun  */
6576*4882a593Smuzhiyun static void
wl_cfgnan_data_set_peer_dp_state(struct bcm_cfg80211 * cfg,struct ether_addr * peer_addr,nan_peer_dp_state_t state)6577*4882a593Smuzhiyun wl_cfgnan_data_set_peer_dp_state(struct bcm_cfg80211 *cfg,
6578*4882a593Smuzhiyun 		struct ether_addr *peer_addr, nan_peer_dp_state_t state)
6579*4882a593Smuzhiyun {
6580*4882a593Smuzhiyun 	nan_ndp_peer_t* peer = NULL;
6581*4882a593Smuzhiyun 	/* check for peer exist */
6582*4882a593Smuzhiyun 	peer = wl_cfgnan_data_get_peer(cfg, peer_addr);
6583*4882a593Smuzhiyun 	if (!peer) {
6584*4882a593Smuzhiyun 		goto end;
6585*4882a593Smuzhiyun 	}
6586*4882a593Smuzhiyun 	peer->peer_dp_state = state;
6587*4882a593Smuzhiyun end:
6588*4882a593Smuzhiyun 	return;
6589*4882a593Smuzhiyun }
6590*4882a593Smuzhiyun 
6591*4882a593Smuzhiyun /* Adds peer to nan data peer list */
6592*4882a593Smuzhiyun void
wl_cfgnan_data_add_peer(struct bcm_cfg80211 * cfg,struct ether_addr * peer_addr)6593*4882a593Smuzhiyun wl_cfgnan_data_add_peer(struct bcm_cfg80211 *cfg,
6594*4882a593Smuzhiyun 		struct ether_addr *peer_addr)
6595*4882a593Smuzhiyun {
6596*4882a593Smuzhiyun 	uint8 i;
6597*4882a593Smuzhiyun 	nan_ndp_peer_t* peer = NULL;
6598*4882a593Smuzhiyun 	/* check for peer exist */
6599*4882a593Smuzhiyun 	peer = wl_cfgnan_data_get_peer(cfg, peer_addr);
6600*4882a593Smuzhiyun 	if (peer) {
6601*4882a593Smuzhiyun 		peer->dp_count++;
6602*4882a593Smuzhiyun 		goto end;
6603*4882a593Smuzhiyun 	}
6604*4882a593Smuzhiyun 	peer = cfg->nancfg->nan_ndp_peer_info;
6605*4882a593Smuzhiyun 	for (i = 0; i < cfg->nancfg->max_ndp_count; i++) {
6606*4882a593Smuzhiyun 		if (peer[i].peer_dp_state == NAN_PEER_DP_NOT_CONNECTED) {
6607*4882a593Smuzhiyun 			break;
6608*4882a593Smuzhiyun 		}
6609*4882a593Smuzhiyun 	}
6610*4882a593Smuzhiyun 	if (i == NAN_MAX_NDP_PEER) {
6611*4882a593Smuzhiyun 		WL_DBG(("DP Peer list full, Droopping add peer req\n"));
6612*4882a593Smuzhiyun 		goto end;
6613*4882a593Smuzhiyun 	}
6614*4882a593Smuzhiyun 	/* Add peer to list */
6615*4882a593Smuzhiyun 	memcpy(&peer[i].peer_addr, peer_addr, ETHER_ADDR_LEN);
6616*4882a593Smuzhiyun 	peer[i].dp_count = 1;
6617*4882a593Smuzhiyun 	peer[i].peer_dp_state = NAN_PEER_DP_CONNECTING;
6618*4882a593Smuzhiyun 
6619*4882a593Smuzhiyun end:
6620*4882a593Smuzhiyun 	return;
6621*4882a593Smuzhiyun }
6622*4882a593Smuzhiyun 
6623*4882a593Smuzhiyun /* Removes nan data peer from peer list */
6624*4882a593Smuzhiyun void
wl_cfgnan_data_remove_peer(struct bcm_cfg80211 * cfg,struct ether_addr * peer_addr)6625*4882a593Smuzhiyun wl_cfgnan_data_remove_peer(struct bcm_cfg80211 *cfg,
6626*4882a593Smuzhiyun 		struct ether_addr *peer_addr)
6627*4882a593Smuzhiyun {
6628*4882a593Smuzhiyun 	nan_ndp_peer_t* peer = NULL;
6629*4882a593Smuzhiyun 	/* check for peer exist */
6630*4882a593Smuzhiyun 	peer = wl_cfgnan_data_get_peer(cfg, peer_addr);
6631*4882a593Smuzhiyun 	if (!peer) {
6632*4882a593Smuzhiyun 		WL_DBG(("DP Peer not present in list, "
6633*4882a593Smuzhiyun 			"Droopping remove peer req\n"));
6634*4882a593Smuzhiyun 		goto end;
6635*4882a593Smuzhiyun 	}
6636*4882a593Smuzhiyun 	peer->dp_count--;
6637*4882a593Smuzhiyun 	if (peer->dp_count == 0) {
6638*4882a593Smuzhiyun 		/* No more NDPs, delete entry */
6639*4882a593Smuzhiyun 		memset(peer, 0, sizeof(nan_ndp_peer_t));
6640*4882a593Smuzhiyun 	} else {
6641*4882a593Smuzhiyun 		/* Set peer dp state to connected if any ndp still exits */
6642*4882a593Smuzhiyun 		peer->peer_dp_state = NAN_PEER_DP_CONNECTED;
6643*4882a593Smuzhiyun 	}
6644*4882a593Smuzhiyun end:
6645*4882a593Smuzhiyun 	return;
6646*4882a593Smuzhiyun }
6647*4882a593Smuzhiyun 
6648*4882a593Smuzhiyun int
wl_cfgnan_data_path_request_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_datapath_cmd_data_t * cmd_data,uint8 * ndp_instance_id)6649*4882a593Smuzhiyun wl_cfgnan_data_path_request_handler(struct net_device *ndev,
6650*4882a593Smuzhiyun 	struct bcm_cfg80211 *cfg, nan_datapath_cmd_data_t *cmd_data,
6651*4882a593Smuzhiyun 	uint8 *ndp_instance_id)
6652*4882a593Smuzhiyun {
6653*4882a593Smuzhiyun 	s32 ret = BCME_OK;
6654*4882a593Smuzhiyun 	bcm_iov_batch_buf_t *nan_buf = NULL;
6655*4882a593Smuzhiyun 	wl_nan_dp_req_t *datareq = NULL;
6656*4882a593Smuzhiyun 	bcm_iov_batch_subcmd_t *sub_cmd = NULL;
6657*4882a593Smuzhiyun 	uint16 buflen_avail;
6658*4882a593Smuzhiyun 	uint8 *pxtlv;
6659*4882a593Smuzhiyun 	struct wireless_dev *wdev;
6660*4882a593Smuzhiyun 	uint16 nan_buf_size;
6661*4882a593Smuzhiyun 	uint8 *resp_buf = NULL;
6662*4882a593Smuzhiyun 	/* Considering fixed params */
6663*4882a593Smuzhiyun 	uint16 data_size = WL_NAN_OBUF_DATA_OFFSET +
6664*4882a593Smuzhiyun 		OFFSETOF(wl_nan_dp_req_t, tlv_params);
6665*4882a593Smuzhiyun 	data_size = ALIGN_SIZE(data_size, 4);
6666*4882a593Smuzhiyun 
6667*4882a593Smuzhiyun 	ret = wl_cfgnan_aligned_data_size_of_opt_dp_params(cfg, &data_size, cmd_data);
6668*4882a593Smuzhiyun 	if (unlikely(ret)) {
6669*4882a593Smuzhiyun 		WL_ERR(("Failed to get alligned size of optional params\n"));
6670*4882a593Smuzhiyun 		goto fail;
6671*4882a593Smuzhiyun 	}
6672*4882a593Smuzhiyun 
6673*4882a593Smuzhiyun 	nan_buf_size = data_size;
6674*4882a593Smuzhiyun 	NAN_DBG_ENTER();
6675*4882a593Smuzhiyun 
6676*4882a593Smuzhiyun 	mutex_lock(&cfg->if_sync);
6677*4882a593Smuzhiyun 	NAN_MUTEX_LOCK();
6678*4882a593Smuzhiyun #ifdef WL_IFACE_MGMT
6679*4882a593Smuzhiyun 	if ((ret = wl_cfg80211_handle_if_role_conflict(cfg, WL_IF_TYPE_NAN)) < 0) {
6680*4882a593Smuzhiyun 		WL_ERR(("Conflicting iface found to be active\n"));
6681*4882a593Smuzhiyun 		ret = BCME_UNSUPPORTED;
6682*4882a593Smuzhiyun 		goto fail;
6683*4882a593Smuzhiyun 	}
6684*4882a593Smuzhiyun #endif /* WL_IFACE_MGMT */
6685*4882a593Smuzhiyun 
6686*4882a593Smuzhiyun #ifdef RTT_SUPPORT
6687*4882a593Smuzhiyun 	/* cancel any ongoing RTT session with peer
6688*4882a593Smuzhiyun 	* as we donot support DP and RNG to same peer
6689*4882a593Smuzhiyun 	*/
6690*4882a593Smuzhiyun 	wl_cfgnan_handle_dp_ranging_concurrency(cfg, &cmd_data->mac_addr,
6691*4882a593Smuzhiyun 		RTT_GEO_SUSPN_HOST_NDP_TRIGGER);
6692*4882a593Smuzhiyun #endif /* RTT_SUPPORT */
6693*4882a593Smuzhiyun 
6694*4882a593Smuzhiyun 	nan_buf = MALLOCZ(cfg->osh, data_size);
6695*4882a593Smuzhiyun 	if (!nan_buf) {
6696*4882a593Smuzhiyun 		WL_ERR(("%s: memory allocation failed\n", __func__));
6697*4882a593Smuzhiyun 		ret = BCME_NOMEM;
6698*4882a593Smuzhiyun 		goto fail;
6699*4882a593Smuzhiyun 	}
6700*4882a593Smuzhiyun 
6701*4882a593Smuzhiyun 	resp_buf = MALLOCZ(cfg->osh, data_size + NAN_IOVAR_NAME_SIZE);
6702*4882a593Smuzhiyun 	if (!resp_buf) {
6703*4882a593Smuzhiyun 		WL_ERR(("%s: memory allocation failed\n", __func__));
6704*4882a593Smuzhiyun 		ret = BCME_NOMEM;
6705*4882a593Smuzhiyun 		goto fail;
6706*4882a593Smuzhiyun 	}
6707*4882a593Smuzhiyun 
6708*4882a593Smuzhiyun 	ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
6709*4882a593Smuzhiyun 			cfg, &cmd_data->avail_params, WL_AVAIL_LOCAL);
6710*4882a593Smuzhiyun 	if (unlikely(ret)) {
6711*4882a593Smuzhiyun 		WL_ERR(("Failed to set avail value with type local\n"));
6712*4882a593Smuzhiyun 		goto fail;
6713*4882a593Smuzhiyun 	}
6714*4882a593Smuzhiyun 
6715*4882a593Smuzhiyun 	ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
6716*4882a593Smuzhiyun 			cfg, &cmd_data->avail_params, WL_AVAIL_NDC);
6717*4882a593Smuzhiyun 	if (unlikely(ret)) {
6718*4882a593Smuzhiyun 		WL_ERR(("Failed to set avail value with type ndc\n"));
6719*4882a593Smuzhiyun 		goto fail;
6720*4882a593Smuzhiyun 	}
6721*4882a593Smuzhiyun 
6722*4882a593Smuzhiyun 	nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
6723*4882a593Smuzhiyun 	nan_buf->count = 0;
6724*4882a593Smuzhiyun 	nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
6725*4882a593Smuzhiyun 
6726*4882a593Smuzhiyun 	sub_cmd = (bcm_iov_batch_subcmd_t*)(&nan_buf->cmds[0]);
6727*4882a593Smuzhiyun 	datareq = (wl_nan_dp_req_t *)(sub_cmd->data);
6728*4882a593Smuzhiyun 
6729*4882a593Smuzhiyun 	/* setting default data path type to unicast */
6730*4882a593Smuzhiyun 	datareq->type = WL_NAN_DP_TYPE_UNICAST;
6731*4882a593Smuzhiyun 
6732*4882a593Smuzhiyun 	if (cmd_data->pub_id) {
6733*4882a593Smuzhiyun 		datareq->pub_id = cmd_data->pub_id;
6734*4882a593Smuzhiyun 	}
6735*4882a593Smuzhiyun 
6736*4882a593Smuzhiyun 	if (!ETHER_ISNULLADDR(&cmd_data->mac_addr.octet)) {
6737*4882a593Smuzhiyun 		ret = memcpy_s(&datareq->peer_mac, ETHER_ADDR_LEN,
6738*4882a593Smuzhiyun 				&cmd_data->mac_addr, ETHER_ADDR_LEN);
6739*4882a593Smuzhiyun 		if (ret != BCME_OK) {
6740*4882a593Smuzhiyun 			WL_ERR(("Failed to copy ether addr provided\n"));
6741*4882a593Smuzhiyun 			goto fail;
6742*4882a593Smuzhiyun 		}
6743*4882a593Smuzhiyun 	} else {
6744*4882a593Smuzhiyun 		WL_ERR(("Invalid ether addr provided\n"));
6745*4882a593Smuzhiyun 		ret = BCME_BADARG;
6746*4882a593Smuzhiyun 		goto fail;
6747*4882a593Smuzhiyun 	}
6748*4882a593Smuzhiyun 
6749*4882a593Smuzhiyun 	/* Retrieve mac from given iface name */
6750*4882a593Smuzhiyun 	wdev = wl_cfg80211_get_wdev_from_ifname(cfg,
6751*4882a593Smuzhiyun 		(char *)cmd_data->ndp_iface);
6752*4882a593Smuzhiyun 	if (!wdev || ETHER_ISNULLADDR(wdev->netdev->dev_addr)) {
6753*4882a593Smuzhiyun 		ret = -EINVAL;
6754*4882a593Smuzhiyun 		WL_ERR(("Failed to retrieve wdev/dev addr for ndp_iface = %s\n",
6755*4882a593Smuzhiyun 			(char *)cmd_data->ndp_iface));
6756*4882a593Smuzhiyun 		goto fail;
6757*4882a593Smuzhiyun 	}
6758*4882a593Smuzhiyun 
6759*4882a593Smuzhiyun 	if (!ETHER_ISNULLADDR(wdev->netdev->dev_addr)) {
6760*4882a593Smuzhiyun 		ret = memcpy_s(&datareq->ndi, ETHER_ADDR_LEN,
6761*4882a593Smuzhiyun 				wdev->netdev->dev_addr, ETHER_ADDR_LEN);
6762*4882a593Smuzhiyun 		if (ret != BCME_OK) {
6763*4882a593Smuzhiyun 			WL_ERR(("Failed to copy ether addr provided\n"));
6764*4882a593Smuzhiyun 			goto fail;
6765*4882a593Smuzhiyun 		}
6766*4882a593Smuzhiyun 		WL_TRACE(("%s: Retrieved ndi mac " MACDBG "\n",
6767*4882a593Smuzhiyun 			__FUNCTION__, MAC2STRDBG(datareq->ndi.octet)));
6768*4882a593Smuzhiyun 	} else {
6769*4882a593Smuzhiyun 		WL_ERR(("Invalid NDI addr retrieved\n"));
6770*4882a593Smuzhiyun 		ret = BCME_BADARG;
6771*4882a593Smuzhiyun 		goto fail;
6772*4882a593Smuzhiyun 	}
6773*4882a593Smuzhiyun 
6774*4882a593Smuzhiyun 	datareq->ndl_qos.min_slots = NAN_NDL_QOS_MIN_SLOT_NO_PREF;
6775*4882a593Smuzhiyun 	datareq->ndl_qos.max_latency = NAN_NDL_QOS_MAX_LAT_NO_PREF;
6776*4882a593Smuzhiyun 
6777*4882a593Smuzhiyun 	/* Fill the sub_command block */
6778*4882a593Smuzhiyun 	sub_cmd->id = htod16(WL_NAN_CMD_DATA_DATAREQ);
6779*4882a593Smuzhiyun 	sub_cmd->len = sizeof(sub_cmd->u.options) +
6780*4882a593Smuzhiyun 		OFFSETOF(wl_nan_dp_req_t, tlv_params);
6781*4882a593Smuzhiyun 	sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
6782*4882a593Smuzhiyun 	pxtlv = (uint8 *)&datareq->tlv_params;
6783*4882a593Smuzhiyun 
6784*4882a593Smuzhiyun 	nan_buf_size -= (sub_cmd->len +
6785*4882a593Smuzhiyun 			OFFSETOF(bcm_iov_batch_subcmd_t, u.options));
6786*4882a593Smuzhiyun 	buflen_avail = nan_buf_size;
6787*4882a593Smuzhiyun 
6788*4882a593Smuzhiyun 	if (cmd_data->svc_info.data && cmd_data->svc_info.dlen) {
6789*4882a593Smuzhiyun 		ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
6790*4882a593Smuzhiyun 				WL_NAN_XTLV_SD_SVC_INFO, cmd_data->svc_info.dlen,
6791*4882a593Smuzhiyun 				cmd_data->svc_info.data,
6792*4882a593Smuzhiyun 				BCM_XTLV_OPTION_ALIGN32);
6793*4882a593Smuzhiyun 		if (ret != BCME_OK) {
6794*4882a593Smuzhiyun 			WL_ERR(("unable to process svc_spec_info: %d\n", ret));
6795*4882a593Smuzhiyun 			goto fail;
6796*4882a593Smuzhiyun 		}
6797*4882a593Smuzhiyun 		/* If NDPE is enabled, duplicating svc_info and sending it as part of NDPE TLV list
6798*4882a593Smuzhiyun 		 * too along with SD SVC INFO, as FW is considering both of them as different
6799*4882a593Smuzhiyun 		 * entities where as framework is sending both of them in same variable
6800*4882a593Smuzhiyun 		 * (cmd_data->svc_info). FW will decide which one to use based on
6801*4882a593Smuzhiyun 		 * peer's capability (NDPE capable or not)
6802*4882a593Smuzhiyun 		 */
6803*4882a593Smuzhiyun 		if (cfg->nancfg->ndpe_enabled) {
6804*4882a593Smuzhiyun 			ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
6805*4882a593Smuzhiyun 					WL_NAN_XTLV_SD_NDPE_TLV_LIST, cmd_data->svc_info.dlen,
6806*4882a593Smuzhiyun 					cmd_data->svc_info.data,
6807*4882a593Smuzhiyun 					BCM_XTLV_OPTION_ALIGN32);
6808*4882a593Smuzhiyun 			if (ret != BCME_OK) {
6809*4882a593Smuzhiyun 				WL_ERR(("unable to process NDPE TLV list: %d\n", ret));
6810*4882a593Smuzhiyun 				goto fail;
6811*4882a593Smuzhiyun 			}
6812*4882a593Smuzhiyun 		}
6813*4882a593Smuzhiyun 		datareq->flags |= WL_NAN_DP_FLAG_SVC_INFO;
6814*4882a593Smuzhiyun 	}
6815*4882a593Smuzhiyun 
6816*4882a593Smuzhiyun 	/* Security elements */
6817*4882a593Smuzhiyun 
6818*4882a593Smuzhiyun 	if (cmd_data->csid) {
6819*4882a593Smuzhiyun 		WL_TRACE(("Cipher suite type is present, pack it\n"));
6820*4882a593Smuzhiyun 		ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
6821*4882a593Smuzhiyun 				WL_NAN_XTLV_CFG_SEC_CSID, sizeof(nan_sec_csid_e),
6822*4882a593Smuzhiyun 				(uint8*)&cmd_data->csid, BCM_XTLV_OPTION_ALIGN32);
6823*4882a593Smuzhiyun 		if (unlikely(ret)) {
6824*4882a593Smuzhiyun 			WL_ERR(("%s: fail to pack on csid\n", __FUNCTION__));
6825*4882a593Smuzhiyun 			goto fail;
6826*4882a593Smuzhiyun 		}
6827*4882a593Smuzhiyun 	}
6828*4882a593Smuzhiyun 
6829*4882a593Smuzhiyun 	if (cmd_data->ndp_cfg.security_cfg) {
6830*4882a593Smuzhiyun 		if ((cmd_data->key_type == NAN_SECURITY_KEY_INPUT_PMK) ||
6831*4882a593Smuzhiyun 			(cmd_data->key_type == NAN_SECURITY_KEY_INPUT_PASSPHRASE)) {
6832*4882a593Smuzhiyun 			if (cmd_data->key.data && cmd_data->key.dlen) {
6833*4882a593Smuzhiyun 				WL_TRACE(("optional pmk present, pack it\n"));
6834*4882a593Smuzhiyun 				ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
6835*4882a593Smuzhiyun 					WL_NAN_XTLV_CFG_SEC_PMK, cmd_data->key.dlen,
6836*4882a593Smuzhiyun 					cmd_data->key.data, BCM_XTLV_OPTION_ALIGN32);
6837*4882a593Smuzhiyun 				if (unlikely(ret)) {
6838*4882a593Smuzhiyun 					WL_ERR(("%s: fail to pack on WL_NAN_XTLV_CFG_SEC_PMK\n",
6839*4882a593Smuzhiyun 						__FUNCTION__));
6840*4882a593Smuzhiyun 					goto fail;
6841*4882a593Smuzhiyun 				}
6842*4882a593Smuzhiyun 			}
6843*4882a593Smuzhiyun 		} else {
6844*4882a593Smuzhiyun 			WL_ERR(("Invalid security key type\n"));
6845*4882a593Smuzhiyun 			ret = BCME_BADARG;
6846*4882a593Smuzhiyun 			goto fail;
6847*4882a593Smuzhiyun 		}
6848*4882a593Smuzhiyun 
6849*4882a593Smuzhiyun 		if ((cmd_data->svc_hash.dlen == WL_NAN_SVC_HASH_LEN) &&
6850*4882a593Smuzhiyun 				(cmd_data->svc_hash.data)) {
6851*4882a593Smuzhiyun 			WL_TRACE(("svc hash present, pack it\n"));
6852*4882a593Smuzhiyun 			ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
6853*4882a593Smuzhiyun 					WL_NAN_XTLV_CFG_SVC_HASH, WL_NAN_SVC_HASH_LEN,
6854*4882a593Smuzhiyun 					cmd_data->svc_hash.data, BCM_XTLV_OPTION_ALIGN32);
6855*4882a593Smuzhiyun 			if (ret != BCME_OK) {
6856*4882a593Smuzhiyun 				WL_ERR(("%s: fail to pack WL_NAN_XTLV_CFG_SVC_HASH\n",
6857*4882a593Smuzhiyun 						__FUNCTION__));
6858*4882a593Smuzhiyun 				goto fail;
6859*4882a593Smuzhiyun 			}
6860*4882a593Smuzhiyun 		} else {
6861*4882a593Smuzhiyun #ifdef WL_NAN_DISC_CACHE
6862*4882a593Smuzhiyun 			/* check in cache */
6863*4882a593Smuzhiyun 			nan_disc_result_cache *cache;
6864*4882a593Smuzhiyun 			cache = wl_cfgnan_get_disc_result(cfg,
6865*4882a593Smuzhiyun 				datareq->pub_id, &datareq->peer_mac);
6866*4882a593Smuzhiyun 			if (!cache) {
6867*4882a593Smuzhiyun 				ret = BCME_ERROR;
6868*4882a593Smuzhiyun 				WL_ERR(("invalid svc hash data or length = %d\n",
6869*4882a593Smuzhiyun 					cmd_data->svc_hash.dlen));
6870*4882a593Smuzhiyun 				goto fail;
6871*4882a593Smuzhiyun 			}
6872*4882a593Smuzhiyun 			WL_TRACE(("svc hash present, pack it\n"));
6873*4882a593Smuzhiyun 			ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
6874*4882a593Smuzhiyun 					WL_NAN_XTLV_CFG_SVC_HASH, WL_NAN_SVC_HASH_LEN,
6875*4882a593Smuzhiyun 					cache->svc_hash, BCM_XTLV_OPTION_ALIGN32);
6876*4882a593Smuzhiyun 			if (ret != BCME_OK) {
6877*4882a593Smuzhiyun 				WL_ERR(("%s: fail to pack WL_NAN_XTLV_CFG_SVC_HASH\n",
6878*4882a593Smuzhiyun 						__FUNCTION__));
6879*4882a593Smuzhiyun 				goto fail;
6880*4882a593Smuzhiyun 			}
6881*4882a593Smuzhiyun #else
6882*4882a593Smuzhiyun 			ret = BCME_ERROR;
6883*4882a593Smuzhiyun 			WL_ERR(("invalid svc hash data or length = %d\n",
6884*4882a593Smuzhiyun 					cmd_data->svc_hash.dlen));
6885*4882a593Smuzhiyun 			goto fail;
6886*4882a593Smuzhiyun #endif /* WL_NAN_DISC_CACHE */
6887*4882a593Smuzhiyun 		}
6888*4882a593Smuzhiyun 		/* If the Data req is for secure data connection */
6889*4882a593Smuzhiyun 		datareq->flags |= WL_NAN_DP_FLAG_SECURITY;
6890*4882a593Smuzhiyun 	}
6891*4882a593Smuzhiyun 
6892*4882a593Smuzhiyun 	sub_cmd->len += (buflen_avail - nan_buf_size);
6893*4882a593Smuzhiyun 	nan_buf->is_set = false;
6894*4882a593Smuzhiyun 	nan_buf->count++;
6895*4882a593Smuzhiyun 
6896*4882a593Smuzhiyun 	ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, data_size,
6897*4882a593Smuzhiyun 			&(cmd_data->status), resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
6898*4882a593Smuzhiyun 	if (unlikely(ret) || unlikely(cmd_data->status)) {
6899*4882a593Smuzhiyun 		WL_ERR(("nan data path request handler failed, ret = %d,"
6900*4882a593Smuzhiyun 			" status %d, peer: " MACDBG "\n",
6901*4882a593Smuzhiyun 			ret, cmd_data->status, MAC2STRDBG(&(cmd_data->mac_addr))));
6902*4882a593Smuzhiyun 		goto fail;
6903*4882a593Smuzhiyun 	}
6904*4882a593Smuzhiyun 
6905*4882a593Smuzhiyun 	/* check the response buff */
6906*4882a593Smuzhiyun 	if (ret == BCME_OK) {
6907*4882a593Smuzhiyun 		ret = process_resp_buf(resp_buf + WL_NAN_OBUF_DATA_OFFSET,
6908*4882a593Smuzhiyun 				ndp_instance_id, WL_NAN_CMD_DATA_DATAREQ);
6909*4882a593Smuzhiyun 		cmd_data->ndp_instance_id = *ndp_instance_id;
6910*4882a593Smuzhiyun 	}
6911*4882a593Smuzhiyun 	WL_INFORM_MEM(("[NAN] DP request successfull (ndp_id:%d), peer: " MACDBG " \n",
6912*4882a593Smuzhiyun 		cmd_data->ndp_instance_id, MAC2STRDBG(&cmd_data->mac_addr)));
6913*4882a593Smuzhiyun 	/* Add peer to data ndp peer list */
6914*4882a593Smuzhiyun 	wl_cfgnan_data_add_peer(cfg, &datareq->peer_mac);
6915*4882a593Smuzhiyun 
6916*4882a593Smuzhiyun fail:
6917*4882a593Smuzhiyun 	if (nan_buf) {
6918*4882a593Smuzhiyun 		MFREE(cfg->osh, nan_buf, data_size);
6919*4882a593Smuzhiyun 	}
6920*4882a593Smuzhiyun 
6921*4882a593Smuzhiyun 	if (resp_buf) {
6922*4882a593Smuzhiyun 		MFREE(cfg->osh, resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
6923*4882a593Smuzhiyun 	}
6924*4882a593Smuzhiyun 	NAN_MUTEX_UNLOCK();
6925*4882a593Smuzhiyun 	mutex_unlock(&cfg->if_sync);
6926*4882a593Smuzhiyun 	NAN_DBG_EXIT();
6927*4882a593Smuzhiyun 	return ret;
6928*4882a593Smuzhiyun }
6929*4882a593Smuzhiyun 
6930*4882a593Smuzhiyun int
wl_cfgnan_data_path_response_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_datapath_cmd_data_t * cmd_data)6931*4882a593Smuzhiyun wl_cfgnan_data_path_response_handler(struct net_device *ndev,
6932*4882a593Smuzhiyun 	struct bcm_cfg80211 *cfg, nan_datapath_cmd_data_t *cmd_data)
6933*4882a593Smuzhiyun {
6934*4882a593Smuzhiyun 	s32 ret = BCME_OK;
6935*4882a593Smuzhiyun 	bcm_iov_batch_buf_t *nan_buf = NULL;
6936*4882a593Smuzhiyun 	wl_nan_dp_resp_t *dataresp = NULL;
6937*4882a593Smuzhiyun 	bcm_iov_batch_subcmd_t *sub_cmd = NULL;
6938*4882a593Smuzhiyun 	uint16 buflen_avail;
6939*4882a593Smuzhiyun 	uint8 *pxtlv;
6940*4882a593Smuzhiyun 	struct wireless_dev *wdev;
6941*4882a593Smuzhiyun 	uint16 nan_buf_size;
6942*4882a593Smuzhiyun 	uint8 *resp_buf = NULL;
6943*4882a593Smuzhiyun 
6944*4882a593Smuzhiyun 	/* Considering fixed params */
6945*4882a593Smuzhiyun 	uint16 data_size = WL_NAN_OBUF_DATA_OFFSET +
6946*4882a593Smuzhiyun 		OFFSETOF(wl_nan_dp_resp_t, tlv_params);
6947*4882a593Smuzhiyun 	data_size = ALIGN_SIZE(data_size, 4);
6948*4882a593Smuzhiyun 	ret = wl_cfgnan_aligned_data_size_of_opt_dp_params(cfg, &data_size, cmd_data);
6949*4882a593Smuzhiyun 	if (unlikely(ret)) {
6950*4882a593Smuzhiyun 		WL_ERR(("Failed to get alligned size of optional params\n"));
6951*4882a593Smuzhiyun 		goto fail;
6952*4882a593Smuzhiyun 	}
6953*4882a593Smuzhiyun 	nan_buf_size = data_size;
6954*4882a593Smuzhiyun 
6955*4882a593Smuzhiyun 	NAN_DBG_ENTER();
6956*4882a593Smuzhiyun 
6957*4882a593Smuzhiyun 	mutex_lock(&cfg->if_sync);
6958*4882a593Smuzhiyun 	NAN_MUTEX_LOCK();
6959*4882a593Smuzhiyun #ifdef WL_IFACE_MGMT
6960*4882a593Smuzhiyun 	if ((ret = wl_cfg80211_handle_if_role_conflict(cfg, WL_IF_TYPE_NAN)) < 0) {
6961*4882a593Smuzhiyun 		WL_ERR(("Conflicting iface found to be active\n"));
6962*4882a593Smuzhiyun 		ret = BCME_UNSUPPORTED;
6963*4882a593Smuzhiyun 		goto fail;
6964*4882a593Smuzhiyun 	}
6965*4882a593Smuzhiyun #endif /* WL_IFACE_MGMT */
6966*4882a593Smuzhiyun 
6967*4882a593Smuzhiyun 	nan_buf = MALLOCZ(cfg->osh, data_size);
6968*4882a593Smuzhiyun 	if (!nan_buf) {
6969*4882a593Smuzhiyun 		WL_ERR(("%s: memory allocation failed\n", __func__));
6970*4882a593Smuzhiyun 		ret = BCME_NOMEM;
6971*4882a593Smuzhiyun 		goto fail;
6972*4882a593Smuzhiyun 	}
6973*4882a593Smuzhiyun 
6974*4882a593Smuzhiyun 	resp_buf = MALLOCZ(cfg->osh, data_size + NAN_IOVAR_NAME_SIZE);
6975*4882a593Smuzhiyun 	if (!resp_buf) {
6976*4882a593Smuzhiyun 		WL_ERR(("%s: memory allocation failed\n", __func__));
6977*4882a593Smuzhiyun 		ret = BCME_NOMEM;
6978*4882a593Smuzhiyun 		goto fail;
6979*4882a593Smuzhiyun 	}
6980*4882a593Smuzhiyun 
6981*4882a593Smuzhiyun 	ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
6982*4882a593Smuzhiyun 			cfg, &cmd_data->avail_params, WL_AVAIL_LOCAL);
6983*4882a593Smuzhiyun 	if (unlikely(ret)) {
6984*4882a593Smuzhiyun 		WL_ERR(("Failed to set avail value with type local\n"));
6985*4882a593Smuzhiyun 		goto fail;
6986*4882a593Smuzhiyun 	}
6987*4882a593Smuzhiyun 
6988*4882a593Smuzhiyun 	ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
6989*4882a593Smuzhiyun 			cfg, &cmd_data->avail_params, WL_AVAIL_NDC);
6990*4882a593Smuzhiyun 	if (unlikely(ret)) {
6991*4882a593Smuzhiyun 		WL_ERR(("Failed to set avail value with type ndc\n"));
6992*4882a593Smuzhiyun 		goto fail;
6993*4882a593Smuzhiyun 	}
6994*4882a593Smuzhiyun 
6995*4882a593Smuzhiyun 	nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
6996*4882a593Smuzhiyun 	nan_buf->count = 0;
6997*4882a593Smuzhiyun 	nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
6998*4882a593Smuzhiyun 
6999*4882a593Smuzhiyun 	sub_cmd = (bcm_iov_batch_subcmd_t*)(&nan_buf->cmds[0]);
7000*4882a593Smuzhiyun 	dataresp = (wl_nan_dp_resp_t *)(sub_cmd->data);
7001*4882a593Smuzhiyun 
7002*4882a593Smuzhiyun 	/* Setting default data path type to unicast */
7003*4882a593Smuzhiyun 	dataresp->type = WL_NAN_DP_TYPE_UNICAST;
7004*4882a593Smuzhiyun 	/* Changing status value as per fw convention */
7005*4882a593Smuzhiyun 	dataresp->status = cmd_data->rsp_code ^= 1;
7006*4882a593Smuzhiyun 	dataresp->reason_code = 0;
7007*4882a593Smuzhiyun 
7008*4882a593Smuzhiyun 	/* ndp instance id must be from 1 to 255, 0 is reserved */
7009*4882a593Smuzhiyun 	if (cmd_data->ndp_instance_id < NAN_ID_MIN ||
7010*4882a593Smuzhiyun 			cmd_data->ndp_instance_id > NAN_ID_MAX) {
7011*4882a593Smuzhiyun 		WL_ERR(("Invalid ndp instance id: %d\n", cmd_data->ndp_instance_id));
7012*4882a593Smuzhiyun 		ret = BCME_BADARG;
7013*4882a593Smuzhiyun 		goto fail;
7014*4882a593Smuzhiyun 	}
7015*4882a593Smuzhiyun 	dataresp->ndp_id = cmd_data->ndp_instance_id;
7016*4882a593Smuzhiyun 
7017*4882a593Smuzhiyun 	/* Retrieved initiator ndi from NanDataPathRequestInd */
7018*4882a593Smuzhiyun 	if (!ETHER_ISNULLADDR(&cfg->nancfg->initiator_ndi.octet)) {
7019*4882a593Smuzhiyun 		ret = memcpy_s(&dataresp->mac_addr, ETHER_ADDR_LEN,
7020*4882a593Smuzhiyun 				&cfg->nancfg->initiator_ndi, ETHER_ADDR_LEN);
7021*4882a593Smuzhiyun 		if (ret != BCME_OK) {
7022*4882a593Smuzhiyun 			WL_ERR(("Failed to copy initiator ndi\n"));
7023*4882a593Smuzhiyun 			goto fail;
7024*4882a593Smuzhiyun 		}
7025*4882a593Smuzhiyun 	} else {
7026*4882a593Smuzhiyun 		WL_ERR(("Invalid ether addr retrieved\n"));
7027*4882a593Smuzhiyun 		ret = BCME_BADARG;
7028*4882a593Smuzhiyun 		goto fail;
7029*4882a593Smuzhiyun 	}
7030*4882a593Smuzhiyun 
7031*4882a593Smuzhiyun 	/* Interface is not mandatory, when it is a reject from framework */
7032*4882a593Smuzhiyun 	if (dataresp->status != WL_NAN_DP_STATUS_REJECTED) {
7033*4882a593Smuzhiyun #ifdef RTT_SUPPORT
7034*4882a593Smuzhiyun 		/* cancel any ongoing RTT session with peer
7035*4882a593Smuzhiyun 		* as we donot support DP and RNG to same peer
7036*4882a593Smuzhiyun 		*/
7037*4882a593Smuzhiyun 		wl_cfgnan_handle_dp_ranging_concurrency(cfg, &cmd_data->mac_addr,
7038*4882a593Smuzhiyun 			RTT_GEO_SUSPN_HOST_NDP_TRIGGER);
7039*4882a593Smuzhiyun #endif /* RTT_SUPPORT */
7040*4882a593Smuzhiyun 		/* Retrieve mac from given iface name */
7041*4882a593Smuzhiyun 		wdev = wl_cfg80211_get_wdev_from_ifname(cfg,
7042*4882a593Smuzhiyun 				(char *)cmd_data->ndp_iface);
7043*4882a593Smuzhiyun 		if (!wdev || ETHER_ISNULLADDR(wdev->netdev->dev_addr)) {
7044*4882a593Smuzhiyun 			ret = -EINVAL;
7045*4882a593Smuzhiyun 			WL_ERR(("Failed to retrieve wdev/dev addr for ndp_iface = %s\n",
7046*4882a593Smuzhiyun 				(char *)cmd_data->ndp_iface));
7047*4882a593Smuzhiyun 			goto fail;
7048*4882a593Smuzhiyun 		}
7049*4882a593Smuzhiyun 
7050*4882a593Smuzhiyun 		if (!ETHER_ISNULLADDR(wdev->netdev->dev_addr)) {
7051*4882a593Smuzhiyun 			ret = memcpy_s(&dataresp->ndi, ETHER_ADDR_LEN,
7052*4882a593Smuzhiyun 					wdev->netdev->dev_addr, ETHER_ADDR_LEN);
7053*4882a593Smuzhiyun 			if (ret != BCME_OK) {
7054*4882a593Smuzhiyun 				WL_ERR(("Failed to copy responder ndi\n"));
7055*4882a593Smuzhiyun 				goto fail;
7056*4882a593Smuzhiyun 			}
7057*4882a593Smuzhiyun 			WL_TRACE(("%s: Retrieved ndi mac " MACDBG "\n",
7058*4882a593Smuzhiyun 					__FUNCTION__, MAC2STRDBG(dataresp->ndi.octet)));
7059*4882a593Smuzhiyun 		} else {
7060*4882a593Smuzhiyun 			WL_ERR(("Invalid NDI addr retrieved\n"));
7061*4882a593Smuzhiyun 			ret = BCME_BADARG;
7062*4882a593Smuzhiyun 			goto fail;
7063*4882a593Smuzhiyun 		}
7064*4882a593Smuzhiyun 	}
7065*4882a593Smuzhiyun 
7066*4882a593Smuzhiyun 	dataresp->ndl_qos.min_slots = NAN_NDL_QOS_MIN_SLOT_NO_PREF;
7067*4882a593Smuzhiyun 	dataresp->ndl_qos.max_latency = NAN_NDL_QOS_MAX_LAT_NO_PREF;
7068*4882a593Smuzhiyun 
7069*4882a593Smuzhiyun 	/* Fill the sub_command block */
7070*4882a593Smuzhiyun 	sub_cmd->id = htod16(WL_NAN_CMD_DATA_DATARESP);
7071*4882a593Smuzhiyun 	sub_cmd->len = sizeof(sub_cmd->u.options) +
7072*4882a593Smuzhiyun 		OFFSETOF(wl_nan_dp_resp_t, tlv_params);
7073*4882a593Smuzhiyun 	sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
7074*4882a593Smuzhiyun 	pxtlv = (uint8 *)&dataresp->tlv_params;
7075*4882a593Smuzhiyun 
7076*4882a593Smuzhiyun 	nan_buf_size -= (sub_cmd->len +
7077*4882a593Smuzhiyun 			OFFSETOF(bcm_iov_batch_subcmd_t, u.options));
7078*4882a593Smuzhiyun 	buflen_avail = nan_buf_size;
7079*4882a593Smuzhiyun 
7080*4882a593Smuzhiyun 	if (cmd_data->svc_info.data && cmd_data->svc_info.dlen) {
7081*4882a593Smuzhiyun 		ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
7082*4882a593Smuzhiyun 				WL_NAN_XTLV_SD_SVC_INFO, cmd_data->svc_info.dlen,
7083*4882a593Smuzhiyun 				cmd_data->svc_info.data,
7084*4882a593Smuzhiyun 				BCM_XTLV_OPTION_ALIGN32);
7085*4882a593Smuzhiyun 		if (ret != BCME_OK) {
7086*4882a593Smuzhiyun 			WL_ERR(("unable to process svc_spec_info: %d\n", ret));
7087*4882a593Smuzhiyun 			goto fail;
7088*4882a593Smuzhiyun 		}
7089*4882a593Smuzhiyun 		/* If NDPE is enabled, duplicating svc_info and sending it as part of NDPE TLV list
7090*4882a593Smuzhiyun 		 * too along with SD SVC INFO, as FW is considering both of them as different
7091*4882a593Smuzhiyun 		 * entities where as framework is sending both of them in same variable
7092*4882a593Smuzhiyun 		 * (cmd_data->svc_info). FW will decide which one to use based on
7093*4882a593Smuzhiyun 		 * peer's capability (NDPE capable or not)
7094*4882a593Smuzhiyun 		 */
7095*4882a593Smuzhiyun 		if (cfg->nancfg->ndpe_enabled) {
7096*4882a593Smuzhiyun 			ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
7097*4882a593Smuzhiyun 					WL_NAN_XTLV_SD_NDPE_TLV_LIST, cmd_data->svc_info.dlen,
7098*4882a593Smuzhiyun 					cmd_data->svc_info.data,
7099*4882a593Smuzhiyun 					BCM_XTLV_OPTION_ALIGN32);
7100*4882a593Smuzhiyun 			if (ret != BCME_OK) {
7101*4882a593Smuzhiyun 				WL_ERR(("unable to process NDPE TLV list: %d\n", ret));
7102*4882a593Smuzhiyun 				goto fail;
7103*4882a593Smuzhiyun 			}
7104*4882a593Smuzhiyun 		}
7105*4882a593Smuzhiyun 		dataresp->flags |= WL_NAN_DP_FLAG_SVC_INFO;
7106*4882a593Smuzhiyun 	}
7107*4882a593Smuzhiyun 
7108*4882a593Smuzhiyun 	/* Security elements */
7109*4882a593Smuzhiyun 	if (cmd_data->csid) {
7110*4882a593Smuzhiyun 		WL_TRACE(("Cipher suite type is present, pack it\n"));
7111*4882a593Smuzhiyun 		ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
7112*4882a593Smuzhiyun 				WL_NAN_XTLV_CFG_SEC_CSID, sizeof(nan_sec_csid_e),
7113*4882a593Smuzhiyun 				(uint8*)&cmd_data->csid, BCM_XTLV_OPTION_ALIGN32);
7114*4882a593Smuzhiyun 		if (unlikely(ret)) {
7115*4882a593Smuzhiyun 			WL_ERR(("%s: fail to pack csid\n", __FUNCTION__));
7116*4882a593Smuzhiyun 			goto fail;
7117*4882a593Smuzhiyun 		}
7118*4882a593Smuzhiyun 	}
7119*4882a593Smuzhiyun 
7120*4882a593Smuzhiyun 	if (cmd_data->ndp_cfg.security_cfg) {
7121*4882a593Smuzhiyun 		if ((cmd_data->key_type == NAN_SECURITY_KEY_INPUT_PMK) ||
7122*4882a593Smuzhiyun 			(cmd_data->key_type == NAN_SECURITY_KEY_INPUT_PASSPHRASE)) {
7123*4882a593Smuzhiyun 			if (cmd_data->key.data && cmd_data->key.dlen) {
7124*4882a593Smuzhiyun 				WL_TRACE(("optional pmk present, pack it\n"));
7125*4882a593Smuzhiyun 				ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
7126*4882a593Smuzhiyun 					WL_NAN_XTLV_CFG_SEC_PMK, cmd_data->key.dlen,
7127*4882a593Smuzhiyun 					cmd_data->key.data, BCM_XTLV_OPTION_ALIGN32);
7128*4882a593Smuzhiyun 				if (unlikely(ret)) {
7129*4882a593Smuzhiyun 					WL_ERR(("%s: fail to pack WL_NAN_XTLV_CFG_SEC_PMK\n",
7130*4882a593Smuzhiyun 						__FUNCTION__));
7131*4882a593Smuzhiyun 					goto fail;
7132*4882a593Smuzhiyun 				}
7133*4882a593Smuzhiyun 			}
7134*4882a593Smuzhiyun 		} else {
7135*4882a593Smuzhiyun 			WL_ERR(("Invalid security key type\n"));
7136*4882a593Smuzhiyun 			ret = BCME_BADARG;
7137*4882a593Smuzhiyun 			goto fail;
7138*4882a593Smuzhiyun 		}
7139*4882a593Smuzhiyun 
7140*4882a593Smuzhiyun 		if ((cmd_data->svc_hash.dlen == WL_NAN_SVC_HASH_LEN) &&
7141*4882a593Smuzhiyun 				(cmd_data->svc_hash.data)) {
7142*4882a593Smuzhiyun 			WL_TRACE(("svc hash present, pack it\n"));
7143*4882a593Smuzhiyun 			ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
7144*4882a593Smuzhiyun 					WL_NAN_XTLV_CFG_SVC_HASH, WL_NAN_SVC_HASH_LEN,
7145*4882a593Smuzhiyun 					cmd_data->svc_hash.data,
7146*4882a593Smuzhiyun 					BCM_XTLV_OPTION_ALIGN32);
7147*4882a593Smuzhiyun 			if (ret != BCME_OK) {
7148*4882a593Smuzhiyun 				WL_ERR(("%s: fail to pack WL_NAN_XTLV_CFG_SVC_HASH\n",
7149*4882a593Smuzhiyun 						__FUNCTION__));
7150*4882a593Smuzhiyun 				goto fail;
7151*4882a593Smuzhiyun 			}
7152*4882a593Smuzhiyun 		}
7153*4882a593Smuzhiyun 		/* If the Data resp is for secure data connection */
7154*4882a593Smuzhiyun 		dataresp->flags |= WL_NAN_DP_FLAG_SECURITY;
7155*4882a593Smuzhiyun 	}
7156*4882a593Smuzhiyun 
7157*4882a593Smuzhiyun 	sub_cmd->len += (buflen_avail - nan_buf_size);
7158*4882a593Smuzhiyun 
7159*4882a593Smuzhiyun 	nan_buf->is_set = false;
7160*4882a593Smuzhiyun 	nan_buf->count++;
7161*4882a593Smuzhiyun 	ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, data_size,
7162*4882a593Smuzhiyun 			&(cmd_data->status), resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
7163*4882a593Smuzhiyun 	if (unlikely(ret) || unlikely(cmd_data->status)) {
7164*4882a593Smuzhiyun 		WL_ERR(("nan data path response handler failed, error = %d, status %d\n",
7165*4882a593Smuzhiyun 				ret, cmd_data->status));
7166*4882a593Smuzhiyun 		goto fail;
7167*4882a593Smuzhiyun 	}
7168*4882a593Smuzhiyun 
7169*4882a593Smuzhiyun 	WL_INFORM_MEM(("[NAN] DP response successfull (ndp_id:%d)\n", dataresp->ndp_id));
7170*4882a593Smuzhiyun 
7171*4882a593Smuzhiyun fail:
7172*4882a593Smuzhiyun 	if (nan_buf) {
7173*4882a593Smuzhiyun 		MFREE(cfg->osh, nan_buf, data_size);
7174*4882a593Smuzhiyun 	}
7175*4882a593Smuzhiyun 
7176*4882a593Smuzhiyun 	if (resp_buf) {
7177*4882a593Smuzhiyun 		MFREE(cfg->osh, resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
7178*4882a593Smuzhiyun 	}
7179*4882a593Smuzhiyun 	NAN_MUTEX_UNLOCK();
7180*4882a593Smuzhiyun 	mutex_unlock(&cfg->if_sync);
7181*4882a593Smuzhiyun 
7182*4882a593Smuzhiyun 	NAN_DBG_EXIT();
7183*4882a593Smuzhiyun 	return ret;
7184*4882a593Smuzhiyun }
7185*4882a593Smuzhiyun 
wl_cfgnan_data_path_end_handler(struct net_device * ndev,struct bcm_cfg80211 * cfg,nan_data_path_id ndp_instance_id,int * status)7186*4882a593Smuzhiyun int wl_cfgnan_data_path_end_handler(struct net_device *ndev,
7187*4882a593Smuzhiyun 	struct bcm_cfg80211 *cfg, nan_data_path_id ndp_instance_id,
7188*4882a593Smuzhiyun 	int *status)
7189*4882a593Smuzhiyun {
7190*4882a593Smuzhiyun 	bcm_iov_batch_buf_t *nan_buf = NULL;
7191*4882a593Smuzhiyun 	wl_nan_dp_end_t *dataend = NULL;
7192*4882a593Smuzhiyun 	bcm_iov_batch_subcmd_t *sub_cmd = NULL;
7193*4882a593Smuzhiyun 	s32 ret = BCME_OK;
7194*4882a593Smuzhiyun 	uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
7195*4882a593Smuzhiyun 	uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
7196*4882a593Smuzhiyun 
7197*4882a593Smuzhiyun 	dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
7198*4882a593Smuzhiyun 
7199*4882a593Smuzhiyun 	NAN_DBG_ENTER();
7200*4882a593Smuzhiyun 	NAN_MUTEX_LOCK();
7201*4882a593Smuzhiyun 
7202*4882a593Smuzhiyun 	if (!dhdp->up) {
7203*4882a593Smuzhiyun 		WL_ERR(("bus is already down, hence blocking nan dp end\n"));
7204*4882a593Smuzhiyun 		ret = BCME_OK;
7205*4882a593Smuzhiyun 		goto fail;
7206*4882a593Smuzhiyun 	}
7207*4882a593Smuzhiyun 
7208*4882a593Smuzhiyun 	if (!cfg->nancfg->nan_enable) {
7209*4882a593Smuzhiyun 		WL_ERR(("nan is not enabled, nan dp end blocked\n"));
7210*4882a593Smuzhiyun 		ret = BCME_OK;
7211*4882a593Smuzhiyun 		goto fail;
7212*4882a593Smuzhiyun 	}
7213*4882a593Smuzhiyun 
7214*4882a593Smuzhiyun 	/* ndp instance id must be from 1 to 255, 0 is reserved */
7215*4882a593Smuzhiyun 	if (ndp_instance_id < NAN_ID_MIN ||
7216*4882a593Smuzhiyun 		ndp_instance_id > NAN_ID_MAX) {
7217*4882a593Smuzhiyun 		WL_ERR(("Invalid ndp instance id: %d\n", ndp_instance_id));
7218*4882a593Smuzhiyun 		ret = BCME_BADARG;
7219*4882a593Smuzhiyun 		goto fail;
7220*4882a593Smuzhiyun 	}
7221*4882a593Smuzhiyun 
7222*4882a593Smuzhiyun 	nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
7223*4882a593Smuzhiyun 	if (!nan_buf) {
7224*4882a593Smuzhiyun 		WL_ERR(("%s: memory allocation failed\n", __func__));
7225*4882a593Smuzhiyun 		ret = BCME_NOMEM;
7226*4882a593Smuzhiyun 		goto fail;
7227*4882a593Smuzhiyun 	}
7228*4882a593Smuzhiyun 
7229*4882a593Smuzhiyun 	nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
7230*4882a593Smuzhiyun 	nan_buf->count = 0;
7231*4882a593Smuzhiyun 	nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
7232*4882a593Smuzhiyun 
7233*4882a593Smuzhiyun 	sub_cmd = (bcm_iov_batch_subcmd_t*)(&nan_buf->cmds[0]);
7234*4882a593Smuzhiyun 	dataend = (wl_nan_dp_end_t *)(sub_cmd->data);
7235*4882a593Smuzhiyun 
7236*4882a593Smuzhiyun 	/* Fill sub_cmd block */
7237*4882a593Smuzhiyun 	sub_cmd->id = htod16(WL_NAN_CMD_DATA_DATAEND);
7238*4882a593Smuzhiyun 	sub_cmd->len = sizeof(sub_cmd->u.options) +
7239*4882a593Smuzhiyun 		sizeof(*dataend);
7240*4882a593Smuzhiyun 	sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
7241*4882a593Smuzhiyun 
7242*4882a593Smuzhiyun 	dataend->lndp_id = ndp_instance_id;
7243*4882a593Smuzhiyun 
7244*4882a593Smuzhiyun 	/*
7245*4882a593Smuzhiyun 	 * Currently fw requires ndp_id and reason to end the data path
7246*4882a593Smuzhiyun 	 * But wifi_nan.h takes ndp_instances_count and ndp_id.
7247*4882a593Smuzhiyun 	 * Will keep reason = accept always.
7248*4882a593Smuzhiyun 	 */
7249*4882a593Smuzhiyun 
7250*4882a593Smuzhiyun 	dataend->status = 1;
7251*4882a593Smuzhiyun 
7252*4882a593Smuzhiyun 	nan_buf->is_set = true;
7253*4882a593Smuzhiyun 	nan_buf->count++;
7254*4882a593Smuzhiyun 
7255*4882a593Smuzhiyun 	nan_buf_size -= (sub_cmd->len +
7256*4882a593Smuzhiyun 		OFFSETOF(bcm_iov_batch_subcmd_t, u.options));
7257*4882a593Smuzhiyun 	bzero(resp_buf, sizeof(resp_buf));
7258*4882a593Smuzhiyun 	ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size,
7259*4882a593Smuzhiyun 			status, (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
7260*4882a593Smuzhiyun 	if (unlikely(ret) || unlikely(*status)) {
7261*4882a593Smuzhiyun 		WL_ERR(("nan data path end handler failed, error = %d status %d\n",
7262*4882a593Smuzhiyun 			ret, *status));
7263*4882a593Smuzhiyun 		goto fail;
7264*4882a593Smuzhiyun 	}
7265*4882a593Smuzhiyun 	WL_INFORM_MEM(("[NAN] DP end successfull (ndp_id:%d)\n",
7266*4882a593Smuzhiyun 		dataend->lndp_id));
7267*4882a593Smuzhiyun fail:
7268*4882a593Smuzhiyun 	if (nan_buf) {
7269*4882a593Smuzhiyun 		MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
7270*4882a593Smuzhiyun 	}
7271*4882a593Smuzhiyun 
7272*4882a593Smuzhiyun 	NAN_MUTEX_UNLOCK();
7273*4882a593Smuzhiyun 	NAN_DBG_EXIT();
7274*4882a593Smuzhiyun 	return ret;
7275*4882a593Smuzhiyun }
7276*4882a593Smuzhiyun 
7277*4882a593Smuzhiyun #ifdef WL_NAN_DISC_CACHE
wl_cfgnan_sec_info_handler(struct bcm_cfg80211 * cfg,nan_datapath_sec_info_cmd_data_t * cmd_data,nan_hal_resp_t * nan_req_resp)7278*4882a593Smuzhiyun int wl_cfgnan_sec_info_handler(struct bcm_cfg80211 *cfg,
7279*4882a593Smuzhiyun 		nan_datapath_sec_info_cmd_data_t *cmd_data, nan_hal_resp_t *nan_req_resp)
7280*4882a593Smuzhiyun {
7281*4882a593Smuzhiyun 	s32 ret = BCME_NOTFOUND;
7282*4882a593Smuzhiyun 	/* check in cache */
7283*4882a593Smuzhiyun 	nan_disc_result_cache *disc_cache = NULL;
7284*4882a593Smuzhiyun 	nan_svc_info_t *svc_info = NULL;
7285*4882a593Smuzhiyun 
7286*4882a593Smuzhiyun 	NAN_DBG_ENTER();
7287*4882a593Smuzhiyun 	NAN_MUTEX_LOCK();
7288*4882a593Smuzhiyun 
7289*4882a593Smuzhiyun 	if (!cfg->nancfg->nan_init_state) {
7290*4882a593Smuzhiyun 		WL_ERR(("nan is not initialized/nmi doesnt exists\n"));
7291*4882a593Smuzhiyun 		ret = BCME_NOTENABLED;
7292*4882a593Smuzhiyun 		goto fail;
7293*4882a593Smuzhiyun 	}
7294*4882a593Smuzhiyun 
7295*4882a593Smuzhiyun 	/* datapath request context */
7296*4882a593Smuzhiyun 	if (cmd_data->pub_id && !ETHER_ISNULLADDR(&cmd_data->mac_addr)) {
7297*4882a593Smuzhiyun 		disc_cache = wl_cfgnan_get_disc_result(cfg,
7298*4882a593Smuzhiyun 			cmd_data->pub_id, &cmd_data->mac_addr);
7299*4882a593Smuzhiyun 		WL_DBG(("datapath request: PUB ID: = %d\n",
7300*4882a593Smuzhiyun 			cmd_data->pub_id));
7301*4882a593Smuzhiyun 		if (disc_cache) {
7302*4882a593Smuzhiyun 			(void)memcpy_s(nan_req_resp->svc_hash, WL_NAN_SVC_HASH_LEN,
7303*4882a593Smuzhiyun 					disc_cache->svc_hash, WL_NAN_SVC_HASH_LEN);
7304*4882a593Smuzhiyun 			ret = BCME_OK;
7305*4882a593Smuzhiyun 		} else {
7306*4882a593Smuzhiyun 			WL_ERR(("disc_cache is NULL\n"));
7307*4882a593Smuzhiyun 			goto fail;
7308*4882a593Smuzhiyun 		}
7309*4882a593Smuzhiyun 	}
7310*4882a593Smuzhiyun 
7311*4882a593Smuzhiyun 	/* datapath response context */
7312*4882a593Smuzhiyun 	if (cmd_data->ndp_instance_id) {
7313*4882a593Smuzhiyun 		WL_DBG(("datapath response: NDP ID: = %d\n",
7314*4882a593Smuzhiyun 			cmd_data->ndp_instance_id));
7315*4882a593Smuzhiyun 		svc_info = wl_cfgnan_get_svc_inst(cfg, 0, cmd_data->ndp_instance_id);
7316*4882a593Smuzhiyun 		/* Note: svc_info will not be present in OOB cases
7317*4882a593Smuzhiyun 		* In such case send NMI alone and let HAL handle if
7318*4882a593Smuzhiyun 		* svc_hash is mandatory
7319*4882a593Smuzhiyun 		*/
7320*4882a593Smuzhiyun 		if (svc_info) {
7321*4882a593Smuzhiyun 			WL_DBG(("svc hash present, pack it\n"));
7322*4882a593Smuzhiyun 			(void)memcpy_s(nan_req_resp->svc_hash, WL_NAN_SVC_HASH_LEN,
7323*4882a593Smuzhiyun 					svc_info->svc_hash, WL_NAN_SVC_HASH_LEN);
7324*4882a593Smuzhiyun 		} else {
7325*4882a593Smuzhiyun 			WL_INFORM_MEM(("svc_info not present..assuming OOB DP\n"));
7326*4882a593Smuzhiyun 		}
7327*4882a593Smuzhiyun 		/* Always send NMI */
7328*4882a593Smuzhiyun 		(void)memcpy_s(nan_req_resp->pub_nmi, ETHER_ADDR_LEN,
7329*4882a593Smuzhiyun 				cfg->nancfg->nan_nmi_mac, ETHER_ADDR_LEN);
7330*4882a593Smuzhiyun 		ret = BCME_OK;
7331*4882a593Smuzhiyun 	}
7332*4882a593Smuzhiyun fail:
7333*4882a593Smuzhiyun 	NAN_MUTEX_UNLOCK();
7334*4882a593Smuzhiyun 	NAN_DBG_EXIT();
7335*4882a593Smuzhiyun 	return ret;
7336*4882a593Smuzhiyun }
7337*4882a593Smuzhiyun #endif /* WL_NAN_DISC_CACHE */
7338*4882a593Smuzhiyun 
7339*4882a593Smuzhiyun #ifdef RTT_SUPPORT
wl_nan_cache_to_event_data(nan_disc_result_cache * cache,nan_event_data_t * nan_event_data,osl_t * osh)7340*4882a593Smuzhiyun static s32 wl_nan_cache_to_event_data(nan_disc_result_cache *cache,
7341*4882a593Smuzhiyun 	nan_event_data_t *nan_event_data, osl_t *osh)
7342*4882a593Smuzhiyun {
7343*4882a593Smuzhiyun 	s32 ret = BCME_OK;
7344*4882a593Smuzhiyun 	NAN_DBG_ENTER();
7345*4882a593Smuzhiyun 
7346*4882a593Smuzhiyun 	nan_event_data->pub_id = cache->pub_id;
7347*4882a593Smuzhiyun 	nan_event_data->sub_id = cache->sub_id;
7348*4882a593Smuzhiyun 	nan_event_data->publish_rssi = cache->publish_rssi;
7349*4882a593Smuzhiyun 	nan_event_data->peer_cipher_suite = cache->peer_cipher_suite;
7350*4882a593Smuzhiyun 	ret = memcpy_s(&nan_event_data->remote_nmi, ETHER_ADDR_LEN,
7351*4882a593Smuzhiyun 			&cache->peer, ETHER_ADDR_LEN);
7352*4882a593Smuzhiyun 	if (ret != BCME_OK) {
7353*4882a593Smuzhiyun 		WL_ERR(("Failed to copy cached peer nan nmi\n"));
7354*4882a593Smuzhiyun 		goto fail;
7355*4882a593Smuzhiyun 	}
7356*4882a593Smuzhiyun 
7357*4882a593Smuzhiyun 	if (cache->svc_info.dlen && cache->svc_info.data) {
7358*4882a593Smuzhiyun 		nan_event_data->svc_info.dlen = cache->svc_info.dlen;
7359*4882a593Smuzhiyun 		nan_event_data->svc_info.data =
7360*4882a593Smuzhiyun 			MALLOCZ(osh, nan_event_data->svc_info.dlen);
7361*4882a593Smuzhiyun 		if (!nan_event_data->svc_info.data) {
7362*4882a593Smuzhiyun 			WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
7363*4882a593Smuzhiyun 			nan_event_data->svc_info.dlen = 0;
7364*4882a593Smuzhiyun 			ret = -ENOMEM;
7365*4882a593Smuzhiyun 			goto fail;
7366*4882a593Smuzhiyun 		}
7367*4882a593Smuzhiyun 		ret = memcpy_s(nan_event_data->svc_info.data, nan_event_data->svc_info.dlen,
7368*4882a593Smuzhiyun 			cache->svc_info.data, cache->svc_info.dlen);
7369*4882a593Smuzhiyun 		if (ret != BCME_OK) {
7370*4882a593Smuzhiyun 			WL_ERR(("Failed to copy cached svc info data\n"));
7371*4882a593Smuzhiyun 			goto fail;
7372*4882a593Smuzhiyun 		}
7373*4882a593Smuzhiyun 	}
7374*4882a593Smuzhiyun 	if (cache->tx_match_filter.dlen && cache->tx_match_filter.data) {
7375*4882a593Smuzhiyun 		nan_event_data->tx_match_filter.dlen = cache->tx_match_filter.dlen;
7376*4882a593Smuzhiyun 		nan_event_data->tx_match_filter.data =
7377*4882a593Smuzhiyun 			MALLOCZ(osh, nan_event_data->tx_match_filter.dlen);
7378*4882a593Smuzhiyun 		if (!nan_event_data->tx_match_filter.data) {
7379*4882a593Smuzhiyun 			WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
7380*4882a593Smuzhiyun 			nan_event_data->tx_match_filter.dlen = 0;
7381*4882a593Smuzhiyun 			ret = -ENOMEM;
7382*4882a593Smuzhiyun 			goto fail;
7383*4882a593Smuzhiyun 		}
7384*4882a593Smuzhiyun 		ret = memcpy_s(nan_event_data->tx_match_filter.data,
7385*4882a593Smuzhiyun 				nan_event_data->tx_match_filter.dlen,
7386*4882a593Smuzhiyun 				cache->tx_match_filter.data, cache->tx_match_filter.dlen);
7387*4882a593Smuzhiyun 		if (ret != BCME_OK) {
7388*4882a593Smuzhiyun 			WL_ERR(("Failed to copy cached tx match filter data\n"));
7389*4882a593Smuzhiyun 			goto fail;
7390*4882a593Smuzhiyun 		}
7391*4882a593Smuzhiyun 	}
7392*4882a593Smuzhiyun fail:
7393*4882a593Smuzhiyun 	NAN_DBG_EXIT();
7394*4882a593Smuzhiyun 	return ret;
7395*4882a593Smuzhiyun }
7396*4882a593Smuzhiyun 
7397*4882a593Smuzhiyun /*
7398*4882a593Smuzhiyun  * API to cancel the ranging for given instance
7399*4882a593Smuzhiyun  * For geofence initiator, suspend ranging.
7400*4882a593Smuzhiyun  * for directed RTT initiator , report fail result, cancel ranging
7401*4882a593Smuzhiyun  * and clear ranging instance
7402*4882a593Smuzhiyun  * For responder, cancel ranging and clear ranging instance
7403*4882a593Smuzhiyun  */
7404*4882a593Smuzhiyun static s32
wl_cfgnan_clear_peer_ranging(struct bcm_cfg80211 * cfg,nan_ranging_inst_t * rng_inst,int reason)7405*4882a593Smuzhiyun wl_cfgnan_clear_peer_ranging(struct bcm_cfg80211 *cfg,
7406*4882a593Smuzhiyun 		nan_ranging_inst_t *rng_inst, int reason)
7407*4882a593Smuzhiyun {
7408*4882a593Smuzhiyun 	uint32 status = 0;
7409*4882a593Smuzhiyun 	int err = BCME_OK;
7410*4882a593Smuzhiyun 	struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
7411*4882a593Smuzhiyun 	dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
7412*4882a593Smuzhiyun 
7413*4882a593Smuzhiyun 	if (rng_inst->range_type == RTT_TYPE_NAN_GEOFENCE &&
7414*4882a593Smuzhiyun 		rng_inst->range_role == NAN_RANGING_ROLE_INITIATOR) {
7415*4882a593Smuzhiyun 		err = wl_cfgnan_suspend_geofence_rng_session(ndev,
7416*4882a593Smuzhiyun 				&rng_inst->peer_addr, reason, 0);
7417*4882a593Smuzhiyun 	} else {
7418*4882a593Smuzhiyun 		if (rng_inst->range_type == RTT_TYPE_NAN_DIRECTED) {
7419*4882a593Smuzhiyun 			dhd_rtt_handle_nan_rtt_session_end(dhdp,
7420*4882a593Smuzhiyun 				&rng_inst->peer_addr);
7421*4882a593Smuzhiyun 		}
7422*4882a593Smuzhiyun 		/* responder */
7423*4882a593Smuzhiyun 		err = wl_cfgnan_cancel_ranging(ndev, cfg,
7424*4882a593Smuzhiyun 			&rng_inst->range_id,
7425*4882a593Smuzhiyun 			NAN_RNG_TERM_FLAG_IMMEDIATE, &status);
7426*4882a593Smuzhiyun 		wl_cfgnan_reset_remove_ranging_instance(cfg, rng_inst);
7427*4882a593Smuzhiyun 	}
7428*4882a593Smuzhiyun 
7429*4882a593Smuzhiyun 	if (err) {
7430*4882a593Smuzhiyun 		WL_ERR(("Failed to stop ranging with peer, err : %d\n", err));
7431*4882a593Smuzhiyun 	}
7432*4882a593Smuzhiyun 
7433*4882a593Smuzhiyun 	return err;
7434*4882a593Smuzhiyun }
7435*4882a593Smuzhiyun 
7436*4882a593Smuzhiyun /*
7437*4882a593Smuzhiyun  * Handle NDP-Ranging Concurrency,
7438*4882a593Smuzhiyun  * for incoming DP Reuest
7439*4882a593Smuzhiyun  * Cancel Ranging with same peer
7440*4882a593Smuzhiyun  * Cancel Ranging for set up in prog
7441*4882a593Smuzhiyun  * for all other peers
7442*4882a593Smuzhiyun  */
7443*4882a593Smuzhiyun static s32
wl_cfgnan_handle_dp_ranging_concurrency(struct bcm_cfg80211 * cfg,struct ether_addr * peer,int reason)7444*4882a593Smuzhiyun wl_cfgnan_handle_dp_ranging_concurrency(struct bcm_cfg80211 *cfg,
7445*4882a593Smuzhiyun 		struct ether_addr *peer, int reason)
7446*4882a593Smuzhiyun {
7447*4882a593Smuzhiyun 	uint8 i = 0;
7448*4882a593Smuzhiyun 	nan_ranging_inst_t *cur_rng_inst = NULL;
7449*4882a593Smuzhiyun 	nan_ranging_inst_t *rng_inst = NULL;
7450*4882a593Smuzhiyun 	int err = BCME_OK;
7451*4882a593Smuzhiyun 
7452*4882a593Smuzhiyun 	/*
7453*4882a593Smuzhiyun 	 * FixMe:
7454*4882a593Smuzhiyun 	 * DP Ranging Concurrency will need more
7455*4882a593Smuzhiyun 	 * than what has been addressed till now
7456*4882a593Smuzhiyun 	 * Poll max rng sessions and update it
7457*4882a593Smuzhiyun 	 * take relevant actions accordingly
7458*4882a593Smuzhiyun 	 */
7459*4882a593Smuzhiyun 
7460*4882a593Smuzhiyun 	cur_rng_inst = wl_cfgnan_check_for_ranging(cfg, peer);
7461*4882a593Smuzhiyun 
7462*4882a593Smuzhiyun 	for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
7463*4882a593Smuzhiyun 		rng_inst = &cfg->nancfg->nan_ranging_info[i];
7464*4882a593Smuzhiyun 		if (rng_inst->in_use) {
7465*4882a593Smuzhiyun 			if ((cur_rng_inst && cur_rng_inst == rng_inst) &&
7466*4882a593Smuzhiyun 				NAN_RANGING_IS_IN_PROG(rng_inst->range_status)) {
7467*4882a593Smuzhiyun 				err = wl_cfgnan_clear_peer_ranging(cfg, rng_inst,
7468*4882a593Smuzhiyun 						RTT_GEO_SUSPN_HOST_NDP_TRIGGER);
7469*4882a593Smuzhiyun 			}
7470*4882a593Smuzhiyun 		}
7471*4882a593Smuzhiyun 	}
7472*4882a593Smuzhiyun 
7473*4882a593Smuzhiyun 	if (err) {
7474*4882a593Smuzhiyun 		WL_ERR(("Failed to handle dp ranging concurrency, err : %d\n", err));
7475*4882a593Smuzhiyun 	}
7476*4882a593Smuzhiyun 
7477*4882a593Smuzhiyun 	return err;
7478*4882a593Smuzhiyun }
7479*4882a593Smuzhiyun 
7480*4882a593Smuzhiyun bool
wl_cfgnan_check_role_concurrency(struct bcm_cfg80211 * cfg,struct ether_addr * peer_addr)7481*4882a593Smuzhiyun wl_cfgnan_check_role_concurrency(struct bcm_cfg80211 *cfg,
7482*4882a593Smuzhiyun 	struct ether_addr *peer_addr)
7483*4882a593Smuzhiyun {
7484*4882a593Smuzhiyun 	nan_ranging_inst_t *rng_inst = NULL;
7485*4882a593Smuzhiyun 	bool role_conc_status = FALSE;
7486*4882a593Smuzhiyun 
7487*4882a593Smuzhiyun 	rng_inst = wl_cfgnan_check_for_ranging(cfg, peer_addr);
7488*4882a593Smuzhiyun 	if (rng_inst) {
7489*4882a593Smuzhiyun 		role_conc_status = rng_inst->role_concurrency_status;
7490*4882a593Smuzhiyun 	}
7491*4882a593Smuzhiyun 
7492*4882a593Smuzhiyun 	return role_conc_status;
7493*4882a593Smuzhiyun }
7494*4882a593Smuzhiyun #endif /* RTT_SUPPORT */
7495*4882a593Smuzhiyun 
7496*4882a593Smuzhiyun static s32
wl_nan_dp_cmn_event_data(struct bcm_cfg80211 * cfg,void * event_data,uint16 data_len,uint16 * tlvs_offset,uint16 * nan_opts_len,uint32 event_num,int * hal_event_id,nan_event_data_t * nan_event_data)7497*4882a593Smuzhiyun wl_nan_dp_cmn_event_data(struct bcm_cfg80211 *cfg, void *event_data,
7498*4882a593Smuzhiyun 		uint16 data_len, uint16 *tlvs_offset,
7499*4882a593Smuzhiyun 		uint16 *nan_opts_len, uint32 event_num,
7500*4882a593Smuzhiyun 		int *hal_event_id, nan_event_data_t *nan_event_data)
7501*4882a593Smuzhiyun {
7502*4882a593Smuzhiyun 	s32 ret = BCME_OK;
7503*4882a593Smuzhiyun 	uint8 i;
7504*4882a593Smuzhiyun 	wl_nan_ev_datapath_cmn_t *ev_dp;
7505*4882a593Smuzhiyun 	nan_svc_info_t *svc_info;
7506*4882a593Smuzhiyun 	bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
7507*4882a593Smuzhiyun #ifdef RTT_SUPPORT
7508*4882a593Smuzhiyun 	nan_ranging_inst_t *rng_inst = NULL;
7509*4882a593Smuzhiyun #endif /* RTT_SUPPORT */
7510*4882a593Smuzhiyun 
7511*4882a593Smuzhiyun 	if (xtlv->id == WL_NAN_XTLV_DATA_DP_INFO) {
7512*4882a593Smuzhiyun 		ev_dp = (wl_nan_ev_datapath_cmn_t *)xtlv->data;
7513*4882a593Smuzhiyun 		NAN_DBG_ENTER();
7514*4882a593Smuzhiyun 
7515*4882a593Smuzhiyun 		BCM_REFERENCE(svc_info);
7516*4882a593Smuzhiyun 		BCM_REFERENCE(i);
7517*4882a593Smuzhiyun 		/* Mapping to common struct between DHD and HAL */
7518*4882a593Smuzhiyun 		WL_TRACE(("Event type: %d\n", ev_dp->type));
7519*4882a593Smuzhiyun 		nan_event_data->type = ev_dp->type;
7520*4882a593Smuzhiyun 		WL_TRACE(("pub_id: %d\n", ev_dp->pub_id));
7521*4882a593Smuzhiyun 		nan_event_data->pub_id = ev_dp->pub_id;
7522*4882a593Smuzhiyun 		WL_TRACE(("security: %d\n", ev_dp->security));
7523*4882a593Smuzhiyun 		nan_event_data->security = ev_dp->security;
7524*4882a593Smuzhiyun 
7525*4882a593Smuzhiyun 		/* Store initiator_ndi, required for data_path_response_request */
7526*4882a593Smuzhiyun 		ret = memcpy_s(&cfg->nancfg->initiator_ndi, ETHER_ADDR_LEN,
7527*4882a593Smuzhiyun 				&ev_dp->initiator_ndi, ETHER_ADDR_LEN);
7528*4882a593Smuzhiyun 		if (ret != BCME_OK) {
7529*4882a593Smuzhiyun 			WL_ERR(("Failed to copy event's initiator addr\n"));
7530*4882a593Smuzhiyun 			goto fail;
7531*4882a593Smuzhiyun 		}
7532*4882a593Smuzhiyun 		if (ev_dp->type == NAN_DP_SESSION_UNICAST) {
7533*4882a593Smuzhiyun 			WL_INFORM_MEM(("NDP ID: %d\n", ev_dp->ndp_id));
7534*4882a593Smuzhiyun 			nan_event_data->ndp_id = ev_dp->ndp_id;
7535*4882a593Smuzhiyun 			WL_TRACE(("INITIATOR_NDI: " MACDBG "\n",
7536*4882a593Smuzhiyun 					MAC2STRDBG(ev_dp->initiator_ndi.octet)));
7537*4882a593Smuzhiyun 			WL_TRACE(("RESPONDOR_NDI: " MACDBG "\n",
7538*4882a593Smuzhiyun 					MAC2STRDBG(ev_dp->responder_ndi.octet)));
7539*4882a593Smuzhiyun 			WL_TRACE(("PEER NMI: " MACDBG "\n",
7540*4882a593Smuzhiyun 					MAC2STRDBG(ev_dp->peer_nmi.octet)));
7541*4882a593Smuzhiyun 			ret = memcpy_s(&nan_event_data->remote_nmi, ETHER_ADDR_LEN,
7542*4882a593Smuzhiyun 					&ev_dp->peer_nmi, ETHER_ADDR_LEN);
7543*4882a593Smuzhiyun 			if (ret != BCME_OK) {
7544*4882a593Smuzhiyun 				WL_ERR(("Failed to copy event's peer nmi\n"));
7545*4882a593Smuzhiyun 				goto fail;
7546*4882a593Smuzhiyun 			}
7547*4882a593Smuzhiyun 		} else {
7548*4882a593Smuzhiyun 			/* type is multicast */
7549*4882a593Smuzhiyun 			WL_INFORM_MEM(("NDP ID: %d\n", ev_dp->mc_id));
7550*4882a593Smuzhiyun 			nan_event_data->ndp_id = ev_dp->mc_id;
7551*4882a593Smuzhiyun 			WL_TRACE(("PEER NMI: " MACDBG "\n",
7552*4882a593Smuzhiyun 					MAC2STRDBG(ev_dp->peer_nmi.octet)));
7553*4882a593Smuzhiyun 			ret = memcpy_s(&nan_event_data->remote_nmi, ETHER_ADDR_LEN,
7554*4882a593Smuzhiyun 					&ev_dp->peer_nmi,
7555*4882a593Smuzhiyun 					ETHER_ADDR_LEN);
7556*4882a593Smuzhiyun 			if (ret != BCME_OK) {
7557*4882a593Smuzhiyun 				WL_ERR(("Failed to copy event's peer nmi\n"));
7558*4882a593Smuzhiyun 				goto fail;
7559*4882a593Smuzhiyun 			}
7560*4882a593Smuzhiyun 		}
7561*4882a593Smuzhiyun 		*tlvs_offset = OFFSETOF(wl_nan_ev_datapath_cmn_t, opt_tlvs) +
7562*4882a593Smuzhiyun 			OFFSETOF(bcm_xtlv_t, data);
7563*4882a593Smuzhiyun 		*nan_opts_len = data_len - *tlvs_offset;
7564*4882a593Smuzhiyun 		if (event_num == WL_NAN_EVENT_PEER_DATAPATH_IND) {
7565*4882a593Smuzhiyun 			*hal_event_id = GOOGLE_NAN_EVENT_DATA_REQUEST;
7566*4882a593Smuzhiyun #ifdef WL_NAN_DISC_CACHE
7567*4882a593Smuzhiyun 			ret = wl_cfgnan_svc_inst_add_ndp(cfg, nan_event_data->pub_id,
7568*4882a593Smuzhiyun 					nan_event_data->ndp_id);
7569*4882a593Smuzhiyun 			if (ret != BCME_OK) {
7570*4882a593Smuzhiyun 				goto fail;
7571*4882a593Smuzhiyun 			}
7572*4882a593Smuzhiyun #endif /* WL_NAN_DISC_CACHE */
7573*4882a593Smuzhiyun 			/* Add peer to data ndp peer list */
7574*4882a593Smuzhiyun 			wl_cfgnan_data_add_peer(cfg, &ev_dp->peer_nmi);
7575*4882a593Smuzhiyun #ifdef RTT_SUPPORT
7576*4882a593Smuzhiyun 			/* cancel any ongoing RTT session with peer
7577*4882a593Smuzhiyun 			 * as we donot support DP and RNG to same peer
7578*4882a593Smuzhiyun 			 */
7579*4882a593Smuzhiyun 			wl_cfgnan_handle_dp_ranging_concurrency(cfg, &ev_dp->peer_nmi,
7580*4882a593Smuzhiyun 					RTT_GEO_SUSPN_PEER_NDP_TRIGGER);
7581*4882a593Smuzhiyun #endif /* RTT_SUPPORT */
7582*4882a593Smuzhiyun 		} else if (event_num == WL_NAN_EVENT_DATAPATH_ESTB) {
7583*4882a593Smuzhiyun 			*hal_event_id = GOOGLE_NAN_EVENT_DATA_CONFIRMATION;
7584*4882a593Smuzhiyun 			if (ev_dp->role == NAN_DP_ROLE_INITIATOR) {
7585*4882a593Smuzhiyun 				ret = memcpy_s(&nan_event_data->responder_ndi, ETHER_ADDR_LEN,
7586*4882a593Smuzhiyun 						&ev_dp->responder_ndi,
7587*4882a593Smuzhiyun 						ETHER_ADDR_LEN);
7588*4882a593Smuzhiyun 				if (ret != BCME_OK) {
7589*4882a593Smuzhiyun 					WL_ERR(("Failed to copy event's responder ndi\n"));
7590*4882a593Smuzhiyun 					goto fail;
7591*4882a593Smuzhiyun 				}
7592*4882a593Smuzhiyun 				WL_TRACE(("REMOTE_NDI: " MACDBG "\n",
7593*4882a593Smuzhiyun 						MAC2STRDBG(ev_dp->responder_ndi.octet)));
7594*4882a593Smuzhiyun 				WL_TRACE(("Initiator status %d\n", nan_event_data->status));
7595*4882a593Smuzhiyun 			} else {
7596*4882a593Smuzhiyun 				ret = memcpy_s(&nan_event_data->responder_ndi, ETHER_ADDR_LEN,
7597*4882a593Smuzhiyun 						&ev_dp->initiator_ndi,
7598*4882a593Smuzhiyun 						ETHER_ADDR_LEN);
7599*4882a593Smuzhiyun 				if (ret != BCME_OK) {
7600*4882a593Smuzhiyun 					WL_ERR(("Failed to copy event's responder ndi\n"));
7601*4882a593Smuzhiyun 					goto fail;
7602*4882a593Smuzhiyun 				}
7603*4882a593Smuzhiyun 				WL_TRACE(("REMOTE_NDI: " MACDBG "\n",
7604*4882a593Smuzhiyun 						MAC2STRDBG(ev_dp->initiator_ndi.octet)));
7605*4882a593Smuzhiyun 			}
7606*4882a593Smuzhiyun 			if (ev_dp->status == NAN_NDP_STATUS_ACCEPT) {
7607*4882a593Smuzhiyun 				nan_event_data->status = NAN_DP_REQUEST_ACCEPT;
7608*4882a593Smuzhiyun 				wl_cfgnan_data_set_peer_dp_state(cfg, &ev_dp->peer_nmi,
7609*4882a593Smuzhiyun 					NAN_PEER_DP_CONNECTED);
7610*4882a593Smuzhiyun 				wl_cfgnan_update_dp_info(cfg, true, nan_event_data->ndp_id);
7611*4882a593Smuzhiyun 				wl_cfgnan_get_stats(cfg);
7612*4882a593Smuzhiyun 			} else if (ev_dp->status == NAN_NDP_STATUS_REJECT) {
7613*4882a593Smuzhiyun 				nan_event_data->status = NAN_DP_REQUEST_REJECT;
7614*4882a593Smuzhiyun #ifdef WL_NAN_DISC_CACHE
7615*4882a593Smuzhiyun 				if (ev_dp->role != NAN_DP_ROLE_INITIATOR) {
7616*4882a593Smuzhiyun 					/* Only at Responder side,
7617*4882a593Smuzhiyun 					 * If dp is ended,
7618*4882a593Smuzhiyun 					 * clear the resp ndp id from the svc info cache
7619*4882a593Smuzhiyun 					 */
7620*4882a593Smuzhiyun 					ret = wl_cfgnan_svc_inst_del_ndp(cfg,
7621*4882a593Smuzhiyun 							nan_event_data->pub_id,
7622*4882a593Smuzhiyun 							nan_event_data->ndp_id);
7623*4882a593Smuzhiyun 					if (ret != BCME_OK) {
7624*4882a593Smuzhiyun 						goto fail;
7625*4882a593Smuzhiyun 					}
7626*4882a593Smuzhiyun 				}
7627*4882a593Smuzhiyun #endif /* WL_NAN_DISC_CACHE */
7628*4882a593Smuzhiyun 				/* Remove peer from data ndp peer list */
7629*4882a593Smuzhiyun 				wl_cfgnan_data_remove_peer(cfg, &ev_dp->peer_nmi);
7630*4882a593Smuzhiyun #ifdef RTT_SUPPORT
7631*4882a593Smuzhiyun 				rng_inst = wl_cfgnan_check_for_ranging(cfg, &ev_dp->peer_nmi);
7632*4882a593Smuzhiyun 				if (rng_inst) {
7633*4882a593Smuzhiyun 					/* Trigger/Reset geofence RTT */
7634*4882a593Smuzhiyun 					wl_cfgnan_reset_geofence_ranging(cfg,
7635*4882a593Smuzhiyun 						rng_inst, RTT_SCHED_DP_REJECTED, TRUE);
7636*4882a593Smuzhiyun 				}
7637*4882a593Smuzhiyun #endif /* RTT_SUPPORT */
7638*4882a593Smuzhiyun 			} else {
7639*4882a593Smuzhiyun 				WL_ERR(("%s:Status code = %x not expected\n",
7640*4882a593Smuzhiyun 						__FUNCTION__, ev_dp->status));
7641*4882a593Smuzhiyun 				ret = BCME_ERROR;
7642*4882a593Smuzhiyun 				goto fail;
7643*4882a593Smuzhiyun 			}
7644*4882a593Smuzhiyun 			WL_TRACE(("Responder status %d\n", nan_event_data->status));
7645*4882a593Smuzhiyun 		} else if (event_num == WL_NAN_EVENT_DATAPATH_END) {
7646*4882a593Smuzhiyun 			/* Mapping to common struct between DHD and HAL */
7647*4882a593Smuzhiyun 			*hal_event_id = GOOGLE_NAN_EVENT_DATA_END;
7648*4882a593Smuzhiyun #ifdef WL_NAN_DISC_CACHE
7649*4882a593Smuzhiyun 			if (ev_dp->role != NAN_DP_ROLE_INITIATOR) {
7650*4882a593Smuzhiyun 				/* Only at Responder side,
7651*4882a593Smuzhiyun 				 * If dp is ended,
7652*4882a593Smuzhiyun 				 * clear the resp ndp id from the svc info cache
7653*4882a593Smuzhiyun 				 */
7654*4882a593Smuzhiyun 				ret = wl_cfgnan_svc_inst_del_ndp(cfg,
7655*4882a593Smuzhiyun 						nan_event_data->pub_id,
7656*4882a593Smuzhiyun 						nan_event_data->ndp_id);
7657*4882a593Smuzhiyun 				if (ret != BCME_OK) {
7658*4882a593Smuzhiyun 					goto fail;
7659*4882a593Smuzhiyun 				}
7660*4882a593Smuzhiyun 			}
7661*4882a593Smuzhiyun #endif /* WL_NAN_DISC_CACHE */
7662*4882a593Smuzhiyun 			/* Remove peer from data ndp peer list */
7663*4882a593Smuzhiyun 			wl_cfgnan_data_remove_peer(cfg, &ev_dp->peer_nmi);
7664*4882a593Smuzhiyun 			wl_cfgnan_update_dp_info(cfg, false, nan_event_data->ndp_id);
7665*4882a593Smuzhiyun 			WL_INFORM_MEM(("DP_END for REMOTE_NMI: " MACDBG " with %s\n",
7666*4882a593Smuzhiyun 				MAC2STRDBG(&ev_dp->peer_nmi),
7667*4882a593Smuzhiyun 				nan_event_cause_to_str(ev_dp->event_cause)));
7668*4882a593Smuzhiyun #ifdef RTT_SUPPORT
7669*4882a593Smuzhiyun 			rng_inst = wl_cfgnan_check_for_ranging(cfg, &ev_dp->peer_nmi);
7670*4882a593Smuzhiyun 			if (rng_inst) {
7671*4882a593Smuzhiyun 				/* Trigger/Reset geofence RTT */
7672*4882a593Smuzhiyun 				WL_INFORM_MEM(("sched geofence rtt from DP_END ctx: " MACDBG "\n",
7673*4882a593Smuzhiyun 						MAC2STRDBG(&rng_inst->peer_addr)));
7674*4882a593Smuzhiyun 				wl_cfgnan_reset_geofence_ranging(cfg, rng_inst,
7675*4882a593Smuzhiyun 					RTT_SCHED_DP_END, TRUE);
7676*4882a593Smuzhiyun 			}
7677*4882a593Smuzhiyun #endif /* RTT_SUPPORT */
7678*4882a593Smuzhiyun 		}
7679*4882a593Smuzhiyun 	} else {
7680*4882a593Smuzhiyun 		/* Follow though, not handling other IDs as of now */
7681*4882a593Smuzhiyun 		WL_DBG(("%s:ID = 0x%02x not supported\n", __FUNCTION__, xtlv->id));
7682*4882a593Smuzhiyun 	}
7683*4882a593Smuzhiyun fail:
7684*4882a593Smuzhiyun 	NAN_DBG_EXIT();
7685*4882a593Smuzhiyun 	return ret;
7686*4882a593Smuzhiyun }
7687*4882a593Smuzhiyun 
7688*4882a593Smuzhiyun #ifdef RTT_SUPPORT
7689*4882a593Smuzhiyun static int
wl_cfgnan_event_disc_result(struct bcm_cfg80211 * cfg,nan_event_data_t * nan_event_data)7690*4882a593Smuzhiyun wl_cfgnan_event_disc_result(struct bcm_cfg80211 *cfg,
7691*4882a593Smuzhiyun 		nan_event_data_t *nan_event_data)
7692*4882a593Smuzhiyun {
7693*4882a593Smuzhiyun 	int ret = BCME_OK;
7694*4882a593Smuzhiyun #if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT)
7695*4882a593Smuzhiyun 	ret = wl_cfgvendor_send_nan_event(cfg->wdev->wiphy, bcmcfg_to_prmry_ndev(cfg),
7696*4882a593Smuzhiyun 		GOOGLE_NAN_EVENT_SUBSCRIBE_MATCH, nan_event_data);
7697*4882a593Smuzhiyun 	if (ret != BCME_OK) {
7698*4882a593Smuzhiyun 		WL_ERR(("Failed to send event to nan hal\n"));
7699*4882a593Smuzhiyun 	}
7700*4882a593Smuzhiyun #endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT) */
7701*4882a593Smuzhiyun 	return ret;
7702*4882a593Smuzhiyun }
7703*4882a593Smuzhiyun 
7704*4882a593Smuzhiyun #define IN_GEOFENCE(ingress, egress, distance) (((distance) <= (ingress)) && \
7705*4882a593Smuzhiyun 	((distance) >= (egress)))
7706*4882a593Smuzhiyun #define IS_INGRESS_VAL(ingress, distance) ((distance) < (ingress))
7707*4882a593Smuzhiyun #define IS_EGRESS_VAL(egress, distance) ((distance) > (egress))
7708*4882a593Smuzhiyun 
7709*4882a593Smuzhiyun static bool
wl_cfgnan_check_ranging_cond(nan_svc_info_t * svc_info,uint32 distance,uint8 * ranging_ind,uint32 prev_distance)7710*4882a593Smuzhiyun wl_cfgnan_check_ranging_cond(nan_svc_info_t *svc_info, uint32 distance,
7711*4882a593Smuzhiyun 	uint8 *ranging_ind, uint32 prev_distance)
7712*4882a593Smuzhiyun {
7713*4882a593Smuzhiyun 	uint8 svc_ind = svc_info->ranging_ind;
7714*4882a593Smuzhiyun 	bool notify = FALSE;
7715*4882a593Smuzhiyun 	bool range_rep_ev_once =
7716*4882a593Smuzhiyun 		!!(svc_info->svc_range_status & SVC_RANGE_REP_EVENT_ONCE);
7717*4882a593Smuzhiyun 	uint32 ingress_limit = svc_info->ingress_limit;
7718*4882a593Smuzhiyun 	uint32 egress_limit = svc_info->egress_limit;
7719*4882a593Smuzhiyun 
7720*4882a593Smuzhiyun 	if (svc_ind & NAN_RANGE_INDICATION_CONT) {
7721*4882a593Smuzhiyun 		*ranging_ind = NAN_RANGE_INDICATION_CONT;
7722*4882a593Smuzhiyun 		notify = TRUE;
7723*4882a593Smuzhiyun 		WL_ERR(("\n%s :Svc has continous Ind %d\n",
7724*4882a593Smuzhiyun 				__FUNCTION__, __LINE__));
7725*4882a593Smuzhiyun 		goto done;
7726*4882a593Smuzhiyun 	}
7727*4882a593Smuzhiyun 
7728*4882a593Smuzhiyun 	if (svc_ind == (NAN_RANGE_INDICATION_INGRESS |
7729*4882a593Smuzhiyun 		NAN_RANGE_INDICATION_EGRESS)) {
7730*4882a593Smuzhiyun 		if (IN_GEOFENCE(ingress_limit, egress_limit, distance)) {
7731*4882a593Smuzhiyun 			/* if not already in geofence */
7732*4882a593Smuzhiyun 			if ((range_rep_ev_once == FALSE) ||
7733*4882a593Smuzhiyun 				(!IN_GEOFENCE(ingress_limit, egress_limit,
7734*4882a593Smuzhiyun 				prev_distance))) {
7735*4882a593Smuzhiyun 				notify = TRUE;
7736*4882a593Smuzhiyun 				if (distance > prev_distance) {
7737*4882a593Smuzhiyun 					*ranging_ind = NAN_RANGE_INDICATION_EGRESS;
7738*4882a593Smuzhiyun 				} else {
7739*4882a593Smuzhiyun 					*ranging_ind = NAN_RANGE_INDICATION_INGRESS;
7740*4882a593Smuzhiyun 				}
7741*4882a593Smuzhiyun 				WL_ERR(("\n%s :Svc has geofence Ind %d res_ind %d\n",
7742*4882a593Smuzhiyun 					__FUNCTION__, __LINE__, *ranging_ind));
7743*4882a593Smuzhiyun 			}
7744*4882a593Smuzhiyun 		}
7745*4882a593Smuzhiyun 		goto done;
7746*4882a593Smuzhiyun 	}
7747*4882a593Smuzhiyun 
7748*4882a593Smuzhiyun 	if (svc_ind == NAN_RANGE_INDICATION_INGRESS) {
7749*4882a593Smuzhiyun 		if (IS_INGRESS_VAL(ingress_limit, distance)) {
7750*4882a593Smuzhiyun 			if ((range_rep_ev_once == FALSE) ||
7751*4882a593Smuzhiyun 				(prev_distance == INVALID_DISTANCE) ||
7752*4882a593Smuzhiyun 				!IS_INGRESS_VAL(ingress_limit, prev_distance)) {
7753*4882a593Smuzhiyun 				notify = TRUE;
7754*4882a593Smuzhiyun 				*ranging_ind = NAN_RANGE_INDICATION_INGRESS;
7755*4882a593Smuzhiyun 				WL_ERR(("\n%s :Svc has ingress Ind %d\n",
7756*4882a593Smuzhiyun 					__FUNCTION__, __LINE__));
7757*4882a593Smuzhiyun 			}
7758*4882a593Smuzhiyun 		}
7759*4882a593Smuzhiyun 		goto done;
7760*4882a593Smuzhiyun 	}
7761*4882a593Smuzhiyun 
7762*4882a593Smuzhiyun 	if (svc_ind == NAN_RANGE_INDICATION_EGRESS) {
7763*4882a593Smuzhiyun 		if (IS_EGRESS_VAL(egress_limit, distance)) {
7764*4882a593Smuzhiyun 			if ((range_rep_ev_once == FALSE) ||
7765*4882a593Smuzhiyun 				(prev_distance == INVALID_DISTANCE) ||
7766*4882a593Smuzhiyun 				!IS_EGRESS_VAL(egress_limit, prev_distance)) {
7767*4882a593Smuzhiyun 				notify = TRUE;
7768*4882a593Smuzhiyun 				*ranging_ind = NAN_RANGE_INDICATION_EGRESS;
7769*4882a593Smuzhiyun 				WL_ERR(("\n%s :Svc has egress Ind %d\n",
7770*4882a593Smuzhiyun 					__FUNCTION__, __LINE__));
7771*4882a593Smuzhiyun 			}
7772*4882a593Smuzhiyun 		}
7773*4882a593Smuzhiyun 		goto done;
7774*4882a593Smuzhiyun 	}
7775*4882a593Smuzhiyun done:
7776*4882a593Smuzhiyun 	WL_INFORM_MEM(("SVC ranging Ind %d distance %d prev_distance %d, "
7777*4882a593Smuzhiyun 		"range_rep_ev_once %d ingress_limit %d egress_limit %d notify %d\n",
7778*4882a593Smuzhiyun 		svc_ind, distance, prev_distance, range_rep_ev_once,
7779*4882a593Smuzhiyun 		ingress_limit, egress_limit, notify));
7780*4882a593Smuzhiyun 	svc_info->svc_range_status |= SVC_RANGE_REP_EVENT_ONCE;
7781*4882a593Smuzhiyun 	return notify;
7782*4882a593Smuzhiyun }
7783*4882a593Smuzhiyun 
7784*4882a593Smuzhiyun static int32
wl_cfgnan_notify_disc_with_ranging(struct bcm_cfg80211 * cfg,nan_ranging_inst_t * rng_inst,nan_event_data_t * nan_event_data,uint32 distance)7785*4882a593Smuzhiyun wl_cfgnan_notify_disc_with_ranging(struct bcm_cfg80211 *cfg,
7786*4882a593Smuzhiyun 	nan_ranging_inst_t *rng_inst, nan_event_data_t *nan_event_data, uint32 distance)
7787*4882a593Smuzhiyun {
7788*4882a593Smuzhiyun 	nan_svc_info_t *svc_info;
7789*4882a593Smuzhiyun 	bool notify_svc = TRUE;
7790*4882a593Smuzhiyun 	nan_disc_result_cache *disc_res = cfg->nancfg->nan_disc_cache;
7791*4882a593Smuzhiyun 	uint8 ranging_ind = 0;
7792*4882a593Smuzhiyun 	int ret = BCME_OK;
7793*4882a593Smuzhiyun 	int i = 0, j = 0;
7794*4882a593Smuzhiyun 	uint8 result_present = nan_event_data->ranging_result_present;
7795*4882a593Smuzhiyun 
7796*4882a593Smuzhiyun 	for (i = 0; i < MAX_SUBSCRIBES; i++) {
7797*4882a593Smuzhiyun 		svc_info = rng_inst->svc_idx[i];
7798*4882a593Smuzhiyun 		if (svc_info && svc_info->ranging_required) {
7799*4882a593Smuzhiyun 			/* if ranging_result is present notify disc result if
7800*4882a593Smuzhiyun 			* result satisfies the conditions.
7801*4882a593Smuzhiyun 			* if ranging_result is not present, then notify disc
7802*4882a593Smuzhiyun 			* result with out ranging info.
7803*4882a593Smuzhiyun 			*/
7804*4882a593Smuzhiyun 			if (result_present) {
7805*4882a593Smuzhiyun 				notify_svc = wl_cfgnan_check_ranging_cond(svc_info, distance,
7806*4882a593Smuzhiyun 					&ranging_ind, rng_inst->prev_distance_mm);
7807*4882a593Smuzhiyun 				nan_event_data->ranging_ind = ranging_ind;
7808*4882a593Smuzhiyun 			}
7809*4882a593Smuzhiyun 			WL_DBG(("Ranging notify for svc_id %d, notify %d and ind %d"
7810*4882a593Smuzhiyun 				" distance_mm %d result_present %d\n", svc_info->svc_id, notify_svc,
7811*4882a593Smuzhiyun 				ranging_ind, distance, result_present));
7812*4882a593Smuzhiyun 		} else {
7813*4882a593Smuzhiyun 			continue;
7814*4882a593Smuzhiyun 		}
7815*4882a593Smuzhiyun 		if (notify_svc) {
7816*4882a593Smuzhiyun 			for (j = 0; j < NAN_MAX_CACHE_DISC_RESULT; j++) {
7817*4882a593Smuzhiyun 				if (!memcmp(&disc_res[j].peer,
7818*4882a593Smuzhiyun 					&(rng_inst->peer_addr), ETHER_ADDR_LEN) &&
7819*4882a593Smuzhiyun 					(svc_info->svc_id == disc_res[j].sub_id)) {
7820*4882a593Smuzhiyun 					ret = wl_nan_cache_to_event_data(&disc_res[j],
7821*4882a593Smuzhiyun 						nan_event_data, cfg->osh);
7822*4882a593Smuzhiyun 					ret = wl_cfgnan_event_disc_result(cfg, nan_event_data);
7823*4882a593Smuzhiyun 					/* If its not match once, clear it as the FW indicates
7824*4882a593Smuzhiyun 					 * again.
7825*4882a593Smuzhiyun 					 */
7826*4882a593Smuzhiyun 					if (!(svc_info->flags & WL_NAN_MATCH_ONCE)) {
7827*4882a593Smuzhiyun 						wl_cfgnan_remove_disc_result(cfg, svc_info->svc_id);
7828*4882a593Smuzhiyun 					}
7829*4882a593Smuzhiyun 				}
7830*4882a593Smuzhiyun 			}
7831*4882a593Smuzhiyun 		}
7832*4882a593Smuzhiyun 	}
7833*4882a593Smuzhiyun 	WL_DBG(("notify_disc_with_ranging done ret %d\n", ret));
7834*4882a593Smuzhiyun 	return ret;
7835*4882a593Smuzhiyun }
7836*4882a593Smuzhiyun 
7837*4882a593Smuzhiyun static int32
wl_cfgnan_handle_directed_rtt_report(struct bcm_cfg80211 * cfg,nan_ranging_inst_t * rng_inst)7838*4882a593Smuzhiyun wl_cfgnan_handle_directed_rtt_report(struct bcm_cfg80211 *cfg,
7839*4882a593Smuzhiyun 	nan_ranging_inst_t *rng_inst)
7840*4882a593Smuzhiyun {
7841*4882a593Smuzhiyun 	int ret = BCME_OK;
7842*4882a593Smuzhiyun 	uint32 status;
7843*4882a593Smuzhiyun 	dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
7844*4882a593Smuzhiyun 
7845*4882a593Smuzhiyun 	ret = wl_cfgnan_cancel_ranging(bcmcfg_to_prmry_ndev(cfg), cfg,
7846*4882a593Smuzhiyun 			&rng_inst->range_id, NAN_RNG_TERM_FLAG_IMMEDIATE, &status);
7847*4882a593Smuzhiyun 	if (unlikely(ret) || unlikely(status)) {
7848*4882a593Smuzhiyun 		WL_ERR(("nan range cancel failed ret = %d status = %d\n", ret, status));
7849*4882a593Smuzhiyun 	}
7850*4882a593Smuzhiyun 	dhd_rtt_handle_nan_rtt_session_end(dhd, &rng_inst->peer_addr);
7851*4882a593Smuzhiyun 	dhd_rtt_nan_update_directed_sessions_cnt(dhd, FALSE);
7852*4882a593Smuzhiyun 
7853*4882a593Smuzhiyun 	wl_cfgnan_reset_remove_ranging_instance(cfg, rng_inst);
7854*4882a593Smuzhiyun 
7855*4882a593Smuzhiyun 	WL_DBG(("Ongoing ranging session is cancelled \n"));
7856*4882a593Smuzhiyun 	return ret;
7857*4882a593Smuzhiyun }
7858*4882a593Smuzhiyun 
7859*4882a593Smuzhiyun static void
wl_cfgnan_disc_result_on_geofence_cancel(struct bcm_cfg80211 * cfg,nan_ranging_inst_t * rng_inst)7860*4882a593Smuzhiyun wl_cfgnan_disc_result_on_geofence_cancel(struct bcm_cfg80211 *cfg,
7861*4882a593Smuzhiyun 		nan_ranging_inst_t *rng_inst)
7862*4882a593Smuzhiyun {
7863*4882a593Smuzhiyun 	nan_event_data_t *nan_event_data = NULL;
7864*4882a593Smuzhiyun 
7865*4882a593Smuzhiyun 	nan_event_data = MALLOCZ(cfg->osh, sizeof(*nan_event_data));
7866*4882a593Smuzhiyun 	if (!nan_event_data) {
7867*4882a593Smuzhiyun 		WL_ERR(("%s: memory allocation failed\n", __func__));
7868*4882a593Smuzhiyun 		goto exit;
7869*4882a593Smuzhiyun 	}
7870*4882a593Smuzhiyun 
7871*4882a593Smuzhiyun 	wl_cfgnan_notify_disc_with_ranging(cfg, rng_inst, nan_event_data, 0);
7872*4882a593Smuzhiyun 
7873*4882a593Smuzhiyun exit:
7874*4882a593Smuzhiyun 	wl_cfgnan_clear_nan_event_data(cfg, nan_event_data);
7875*4882a593Smuzhiyun 
7876*4882a593Smuzhiyun 	return;
7877*4882a593Smuzhiyun }
7878*4882a593Smuzhiyun 
7879*4882a593Smuzhiyun void
wl_cfgnan_process_range_report(struct bcm_cfg80211 * cfg,wl_nan_ev_rng_rpt_ind_t * range_res,int status)7880*4882a593Smuzhiyun wl_cfgnan_process_range_report(struct bcm_cfg80211 *cfg,
7881*4882a593Smuzhiyun 		wl_nan_ev_rng_rpt_ind_t *range_res, int status)
7882*4882a593Smuzhiyun {
7883*4882a593Smuzhiyun 	nan_ranging_inst_t *rng_inst = NULL;
7884*4882a593Smuzhiyun 	nan_event_data_t nan_event_data;
7885*4882a593Smuzhiyun 	dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
7886*4882a593Smuzhiyun 
7887*4882a593Smuzhiyun 	UNUSED_PARAMETER(nan_event_data);
7888*4882a593Smuzhiyun 	rng_inst = wl_cfgnan_check_for_ranging(cfg, &range_res->peer_m_addr);
7889*4882a593Smuzhiyun 	if (!rng_inst) {
7890*4882a593Smuzhiyun 		WL_ERR(("No ranging instance but received RNG RPT event..check \n"));
7891*4882a593Smuzhiyun 		goto exit;
7892*4882a593Smuzhiyun 	}
7893*4882a593Smuzhiyun 
7894*4882a593Smuzhiyun 	if (rng_inst->range_status != NAN_RANGING_SESSION_IN_PROGRESS) {
7895*4882a593Smuzhiyun 		WL_ERR(("SSN not in prog but received RNG RPT event..ignore \n"));
7896*4882a593Smuzhiyun 		goto exit;
7897*4882a593Smuzhiyun 	}
7898*4882a593Smuzhiyun 
7899*4882a593Smuzhiyun #ifdef NAN_RTT_DBG
7900*4882a593Smuzhiyun 	DUMP_NAN_RTT_INST(rng_inst);
7901*4882a593Smuzhiyun 	DUMP_NAN_RTT_RPT(range_res);
7902*4882a593Smuzhiyun #endif
7903*4882a593Smuzhiyun 	range_res->rng_id = rng_inst->range_id;
7904*4882a593Smuzhiyun 	bzero(&nan_event_data, sizeof(nan_event_data));
7905*4882a593Smuzhiyun 
7906*4882a593Smuzhiyun 	if (status == BCME_OK) {
7907*4882a593Smuzhiyun 		nan_event_data.ranging_result_present = 1;
7908*4882a593Smuzhiyun 		nan_event_data.range_measurement_cm = range_res->dist_mm;
7909*4882a593Smuzhiyun 		nan_event_data.ranging_ind = range_res->indication;
7910*4882a593Smuzhiyun 	}
7911*4882a593Smuzhiyun 
7912*4882a593Smuzhiyun 	(void)memcpy_s(&nan_event_data.remote_nmi, ETHER_ADDR_LEN,
7913*4882a593Smuzhiyun 			&range_res->peer_m_addr, ETHER_ADDR_LEN);
7914*4882a593Smuzhiyun 
7915*4882a593Smuzhiyun 	if (rng_inst->range_type == RTT_TYPE_NAN_GEOFENCE) {
7916*4882a593Smuzhiyun 		/* check in cache and event match to host */
7917*4882a593Smuzhiyun 		wl_cfgnan_notify_disc_with_ranging(cfg, rng_inst, &nan_event_data,
7918*4882a593Smuzhiyun 				range_res->dist_mm);
7919*4882a593Smuzhiyun 		rng_inst->prev_distance_mm = range_res->dist_mm;
7920*4882a593Smuzhiyun 		/* Reset geof retry count on valid measurement */
7921*4882a593Smuzhiyun 		rng_inst->geof_retry_count = 0;
7922*4882a593Smuzhiyun 		/*
7923*4882a593Smuzhiyun 		 * Suspend and trigger other targets,
7924*4882a593Smuzhiyun 		 * if running sessions maxed out and more
7925*4882a593Smuzhiyun 		 * pending targets waiting for trigger
7926*4882a593Smuzhiyun 		 */
7927*4882a593Smuzhiyun 		if (dhd_rtt_geofence_sessions_maxed_out(dhd) &&
7928*4882a593Smuzhiyun 			(dhd_rtt_get_geofence_target_cnt(dhd) >=
7929*4882a593Smuzhiyun 				dhd_rtt_get_geofence_max_sessions(dhd))) {
7930*4882a593Smuzhiyun 			/*
7931*4882a593Smuzhiyun 			 * Update the target idx first, before suspending current target
7932*4882a593Smuzhiyun 			 * or else current target will become eligible again
7933*4882a593Smuzhiyun 			 * and will get scheduled again on reset ranging
7934*4882a593Smuzhiyun 			 */
7935*4882a593Smuzhiyun 			wl_cfgnan_update_geofence_target_idx(cfg);
7936*4882a593Smuzhiyun 			wl_cfgnan_suspend_geofence_rng_session(bcmcfg_to_prmry_ndev(cfg),
7937*4882a593Smuzhiyun 				&rng_inst->peer_addr, RTT_GEO_SUSPN_RANGE_RES_REPORTED, 0);
7938*4882a593Smuzhiyun 		}
7939*4882a593Smuzhiyun 		wl_cfgnan_reset_geofence_ranging(cfg,
7940*4882a593Smuzhiyun 			rng_inst, RTT_SCHED_RNG_RPT_GEOFENCE, TRUE);
7941*4882a593Smuzhiyun 
7942*4882a593Smuzhiyun 	} else if (rng_inst->range_type == RTT_TYPE_NAN_DIRECTED) {
7943*4882a593Smuzhiyun 		wl_cfgnan_handle_directed_rtt_report(cfg, rng_inst);
7944*4882a593Smuzhiyun 	}
7945*4882a593Smuzhiyun 	rng_inst->ftm_ssn_retry_count = 0;
7946*4882a593Smuzhiyun 
7947*4882a593Smuzhiyun exit:
7948*4882a593Smuzhiyun 	return;
7949*4882a593Smuzhiyun }
7950*4882a593Smuzhiyun #endif /* RTT_SUPPORT */
7951*4882a593Smuzhiyun 
7952*4882a593Smuzhiyun static void
wl_nan_print_status(wl_nan_conf_status_t * nstatus)7953*4882a593Smuzhiyun wl_nan_print_status(wl_nan_conf_status_t *nstatus)
7954*4882a593Smuzhiyun {
7955*4882a593Smuzhiyun 	WL_INFORM_MEM(("> NMI: " MACDBG " Cluster_ID: " MACDBG "\n",
7956*4882a593Smuzhiyun 		MAC2STRDBG(nstatus->nmi.octet),
7957*4882a593Smuzhiyun 		MAC2STRDBG(nstatus->cid.octet)));
7958*4882a593Smuzhiyun 
7959*4882a593Smuzhiyun 	WL_INFORM_MEM(("> NAN Device Role %s\n", nan_role_to_str(nstatus->role)));
7960*4882a593Smuzhiyun 	WL_INFORM_MEM(("> Social channels: %d, %d\n",
7961*4882a593Smuzhiyun 		nstatus->social_chans[0], nstatus->social_chans[1]));
7962*4882a593Smuzhiyun 
7963*4882a593Smuzhiyun 	WL_INFORM_MEM(("> Master_rank: " NMRSTR " AMR : " NMRSTR " Hop Count : %d, AMBTT : %d\n",
7964*4882a593Smuzhiyun 		NMR2STR(nstatus->mr),
7965*4882a593Smuzhiyun 		NMR2STR(nstatus->amr),
7966*4882a593Smuzhiyun 		nstatus->hop_count,
7967*4882a593Smuzhiyun 		nstatus->ambtt));
7968*4882a593Smuzhiyun 
7969*4882a593Smuzhiyun 	WL_INFORM_MEM(("> Cluster TSF_H: %x , Cluster TSF_L: %x\n",
7970*4882a593Smuzhiyun 		nstatus->cluster_tsf_h, nstatus->cluster_tsf_l));
7971*4882a593Smuzhiyun }
7972*4882a593Smuzhiyun 
7973*4882a593Smuzhiyun static void
wl_cfgnan_clear_nan_event_data(struct bcm_cfg80211 * cfg,nan_event_data_t * nan_event_data)7974*4882a593Smuzhiyun wl_cfgnan_clear_nan_event_data(struct bcm_cfg80211 *cfg,
7975*4882a593Smuzhiyun 	nan_event_data_t *nan_event_data)
7976*4882a593Smuzhiyun {
7977*4882a593Smuzhiyun 	if (nan_event_data) {
7978*4882a593Smuzhiyun 		if (nan_event_data->tx_match_filter.data) {
7979*4882a593Smuzhiyun 			MFREE(cfg->osh, nan_event_data->tx_match_filter.data,
7980*4882a593Smuzhiyun 					nan_event_data->tx_match_filter.dlen);
7981*4882a593Smuzhiyun 			nan_event_data->tx_match_filter.data = NULL;
7982*4882a593Smuzhiyun 		}
7983*4882a593Smuzhiyun 		if (nan_event_data->rx_match_filter.data) {
7984*4882a593Smuzhiyun 			MFREE(cfg->osh, nan_event_data->rx_match_filter.data,
7985*4882a593Smuzhiyun 					nan_event_data->rx_match_filter.dlen);
7986*4882a593Smuzhiyun 			nan_event_data->rx_match_filter.data = NULL;
7987*4882a593Smuzhiyun 		}
7988*4882a593Smuzhiyun 		if (nan_event_data->svc_info.data) {
7989*4882a593Smuzhiyun 			MFREE(cfg->osh, nan_event_data->svc_info.data,
7990*4882a593Smuzhiyun 					nan_event_data->svc_info.dlen);
7991*4882a593Smuzhiyun 			nan_event_data->svc_info.data = NULL;
7992*4882a593Smuzhiyun 		}
7993*4882a593Smuzhiyun 		if (nan_event_data->sde_svc_info.data) {
7994*4882a593Smuzhiyun 			MFREE(cfg->osh, nan_event_data->sde_svc_info.data,
7995*4882a593Smuzhiyun 					nan_event_data->sde_svc_info.dlen);
7996*4882a593Smuzhiyun 			nan_event_data->sde_svc_info.data = NULL;
7997*4882a593Smuzhiyun 		}
7998*4882a593Smuzhiyun 		MFREE(cfg->osh, nan_event_data, sizeof(*nan_event_data));
7999*4882a593Smuzhiyun 	}
8000*4882a593Smuzhiyun 
8001*4882a593Smuzhiyun }
8002*4882a593Smuzhiyun 
8003*4882a593Smuzhiyun #ifdef RTT_SUPPORT
8004*4882a593Smuzhiyun bool
wl_cfgnan_update_geofence_target_idx(struct bcm_cfg80211 * cfg)8005*4882a593Smuzhiyun wl_cfgnan_update_geofence_target_idx(struct bcm_cfg80211 *cfg)
8006*4882a593Smuzhiyun {
8007*4882a593Smuzhiyun 	int8 i = 0, target_cnt = 0;
8008*4882a593Smuzhiyun 	int8 cur_idx = DHD_RTT_INVALID_TARGET_INDEX;
8009*4882a593Smuzhiyun 	rtt_geofence_target_info_t  *geofence_target_info = NULL;
8010*4882a593Smuzhiyun 	bool found = false;
8011*4882a593Smuzhiyun 	nan_ranging_inst_t *rng_inst = NULL;
8012*4882a593Smuzhiyun 	dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
8013*4882a593Smuzhiyun 	rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
8014*4882a593Smuzhiyun 
8015*4882a593Smuzhiyun 	target_cnt = dhd_rtt_get_geofence_target_cnt(dhd);
8016*4882a593Smuzhiyun 	ASSERT(target_cnt);
8017*4882a593Smuzhiyun 	if (target_cnt == 0) {
8018*4882a593Smuzhiyun 		WL_DBG(("No geofence targets to schedule\n"));
8019*4882a593Smuzhiyun 		dhd_rtt_set_geofence_cur_target_idx(dhd,
8020*4882a593Smuzhiyun 			DHD_RTT_INVALID_TARGET_INDEX);
8021*4882a593Smuzhiyun 		goto exit;
8022*4882a593Smuzhiyun 	}
8023*4882a593Smuzhiyun 
8024*4882a593Smuzhiyun 	/* cur idx is validated too, in the following API */
8025*4882a593Smuzhiyun 	cur_idx = dhd_rtt_get_geofence_cur_target_idx(dhd);
8026*4882a593Smuzhiyun 	if (cur_idx == DHD_RTT_INVALID_TARGET_INDEX) {
8027*4882a593Smuzhiyun 		WL_DBG(("invalid current target index, start looking from first\n"));
8028*4882a593Smuzhiyun 		cur_idx = 0;
8029*4882a593Smuzhiyun 	}
8030*4882a593Smuzhiyun 
8031*4882a593Smuzhiyun 	geofence_target_info = rtt_status->geofence_cfg.geofence_target_info;
8032*4882a593Smuzhiyun 
8033*4882a593Smuzhiyun 	/* Loop through to find eligible target idx */
8034*4882a593Smuzhiyun 	i = cur_idx;
8035*4882a593Smuzhiyun 	do {
8036*4882a593Smuzhiyun 		if (geofence_target_info[i].valid == TRUE) {
8037*4882a593Smuzhiyun 			rng_inst = wl_cfgnan_check_for_ranging(cfg,
8038*4882a593Smuzhiyun 					&geofence_target_info[i].peer_addr);
8039*4882a593Smuzhiyun 			if (rng_inst &&
8040*4882a593Smuzhiyun 				(!NAN_RANGING_IS_IN_PROG(rng_inst->range_status)) &&
8041*4882a593Smuzhiyun 				(!wl_cfgnan_check_role_concurrency(cfg,
8042*4882a593Smuzhiyun 					&rng_inst->peer_addr))) {
8043*4882a593Smuzhiyun 				found = TRUE;
8044*4882a593Smuzhiyun 				break;
8045*4882a593Smuzhiyun 			}
8046*4882a593Smuzhiyun 		}
8047*4882a593Smuzhiyun 		i++;
8048*4882a593Smuzhiyun 		if (i == target_cnt) {
8049*4882a593Smuzhiyun 			i = 0;
8050*4882a593Smuzhiyun 		}
8051*4882a593Smuzhiyun 	} while (i != cur_idx);
8052*4882a593Smuzhiyun 
8053*4882a593Smuzhiyun 	if (found) {
8054*4882a593Smuzhiyun 		dhd_rtt_set_geofence_cur_target_idx(dhd, i);
8055*4882a593Smuzhiyun 		WL_DBG(("Updated cur index, cur_idx = %d, target_cnt = %d\n",
8056*4882a593Smuzhiyun 			i, target_cnt));
8057*4882a593Smuzhiyun 	} else {
8058*4882a593Smuzhiyun 		dhd_rtt_set_geofence_cur_target_idx(dhd,
8059*4882a593Smuzhiyun 			DHD_RTT_INVALID_TARGET_INDEX);
8060*4882a593Smuzhiyun 		WL_DBG(("Invalidated cur_idx, as either no target present, or all "
8061*4882a593Smuzhiyun 			"target already running, target_cnt = %d\n", target_cnt));
8062*4882a593Smuzhiyun 
8063*4882a593Smuzhiyun 	}
8064*4882a593Smuzhiyun 
8065*4882a593Smuzhiyun exit:
8066*4882a593Smuzhiyun 	return found;
8067*4882a593Smuzhiyun }
8068*4882a593Smuzhiyun 
8069*4882a593Smuzhiyun /*
8070*4882a593Smuzhiyun  * Triggers rtt work thread
8071*4882a593Smuzhiyun  * if set up not in prog already
8072*4882a593Smuzhiyun  * and max sessions not maxed out,
8073*4882a593Smuzhiyun  * after setting next eligible target index
8074*4882a593Smuzhiyun  */
8075*4882a593Smuzhiyun void
wl_cfgnan_reset_geofence_ranging(struct bcm_cfg80211 * cfg,nan_ranging_inst_t * rng_inst,int sched_reason,bool need_rtt_mutex)8076*4882a593Smuzhiyun wl_cfgnan_reset_geofence_ranging(struct bcm_cfg80211 *cfg,
8077*4882a593Smuzhiyun 		nan_ranging_inst_t * rng_inst, int sched_reason,
8078*4882a593Smuzhiyun 		bool need_rtt_mutex)
8079*4882a593Smuzhiyun {
8080*4882a593Smuzhiyun 	dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
8081*4882a593Smuzhiyun 	u8 rtt_invalid_reason = RTT_STATE_VALID;
8082*4882a593Smuzhiyun 	rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
8083*4882a593Smuzhiyun 	int8 target_cnt = 0;
8084*4882a593Smuzhiyun 	int reset_req_drop = 0;
8085*4882a593Smuzhiyun 
8086*4882a593Smuzhiyun 	if (need_rtt_mutex == TRUE) {
8087*4882a593Smuzhiyun 		mutex_lock(&rtt_status->rtt_mutex);
8088*4882a593Smuzhiyun 	}
8089*4882a593Smuzhiyun 
8090*4882a593Smuzhiyun 	WL_INFORM_MEM(("wl_cfgnan_reset_geofence_ranging: "
8091*4882a593Smuzhiyun 		"sched_reason = %d, cur_idx = %d, target_cnt = %d\n",
8092*4882a593Smuzhiyun 		sched_reason, rtt_status->geofence_cfg.cur_target_idx,
8093*4882a593Smuzhiyun 		rtt_status->geofence_cfg.geofence_target_cnt));
8094*4882a593Smuzhiyun 
8095*4882a593Smuzhiyun 	if (rtt_status->rtt_sched == TRUE) {
8096*4882a593Smuzhiyun 		reset_req_drop = 1;
8097*4882a593Smuzhiyun 		goto exit;
8098*4882a593Smuzhiyun 	}
8099*4882a593Smuzhiyun 
8100*4882a593Smuzhiyun 	target_cnt = dhd_rtt_get_geofence_target_cnt(dhd);
8101*4882a593Smuzhiyun 	if (target_cnt == 0) {
8102*4882a593Smuzhiyun 		WL_DBG(("No geofence targets to schedule\n"));
8103*4882a593Smuzhiyun 		/*
8104*4882a593Smuzhiyun 		 * FIXME:
8105*4882a593Smuzhiyun 		 * No Geofence target
8106*4882a593Smuzhiyun 		 * Remove all valid ranging inst
8107*4882a593Smuzhiyun 		 */
8108*4882a593Smuzhiyun 		if (rng_inst) {
8109*4882a593Smuzhiyun 			WL_INFORM_MEM(("Removing Ranging Instance " MACDBG "\n",
8110*4882a593Smuzhiyun 				MAC2STRDBG(&(rng_inst->peer_addr))));
8111*4882a593Smuzhiyun 			bzero(rng_inst, sizeof(*rng_inst));
8112*4882a593Smuzhiyun 		}
8113*4882a593Smuzhiyun 		/* Cancel pending retry timer if any */
8114*4882a593Smuzhiyun 		if (delayed_work_pending(&rtt_status->rtt_retry_timer)) {
8115*4882a593Smuzhiyun 			cancel_delayed_work(&rtt_status->rtt_retry_timer);
8116*4882a593Smuzhiyun 		}
8117*4882a593Smuzhiyun 
8118*4882a593Smuzhiyun 		/* invalidate current index as there are no targets */
8119*4882a593Smuzhiyun 		dhd_rtt_set_geofence_cur_target_idx(dhd,
8120*4882a593Smuzhiyun 			DHD_RTT_INVALID_TARGET_INDEX);
8121*4882a593Smuzhiyun 		reset_req_drop = 2;
8122*4882a593Smuzhiyun 		goto exit;
8123*4882a593Smuzhiyun 	}
8124*4882a593Smuzhiyun 
8125*4882a593Smuzhiyun 	if (dhd_rtt_is_geofence_setup_inprog(dhd)) {
8126*4882a593Smuzhiyun 		/* Will be called again for schedule once lock is removed */
8127*4882a593Smuzhiyun 		reset_req_drop = 3;
8128*4882a593Smuzhiyun 		goto exit;
8129*4882a593Smuzhiyun 	}
8130*4882a593Smuzhiyun 
8131*4882a593Smuzhiyun 	/* Avoid schedule if
8132*4882a593Smuzhiyun 	 * already geofence running
8133*4882a593Smuzhiyun 	 * or Directed RTT in progress
8134*4882a593Smuzhiyun 	 * or Invalid RTT state like
8135*4882a593Smuzhiyun 	 * NDP with Peer
8136*4882a593Smuzhiyun 	 */
8137*4882a593Smuzhiyun 	if ((!RTT_IS_STOPPED(rtt_status)) ||
8138*4882a593Smuzhiyun 		(rtt_invalid_reason != RTT_STATE_VALID)) {
8139*4882a593Smuzhiyun 		/* Not in valid RTT state, avoid schedule */
8140*4882a593Smuzhiyun 		reset_req_drop = 4;
8141*4882a593Smuzhiyun 		goto exit;
8142*4882a593Smuzhiyun 	}
8143*4882a593Smuzhiyun 
8144*4882a593Smuzhiyun 	if (dhd_rtt_geofence_sessions_maxed_out(dhd)) {
8145*4882a593Smuzhiyun 		reset_req_drop = 5;
8146*4882a593Smuzhiyun 		goto exit;
8147*4882a593Smuzhiyun 	}
8148*4882a593Smuzhiyun 
8149*4882a593Smuzhiyun 	if (!wl_cfgnan_update_geofence_target_idx(cfg)) {
8150*4882a593Smuzhiyun 		reset_req_drop = 6;
8151*4882a593Smuzhiyun 		goto exit;
8152*4882a593Smuzhiyun 	}
8153*4882a593Smuzhiyun 
8154*4882a593Smuzhiyun 	/*
8155*4882a593Smuzhiyun 	 * FixMe: Retry geofence target over a timer Logic
8156*4882a593Smuzhiyun 	 * to be brought back later again
8157*4882a593Smuzhiyun 	 * in accordance to new multipeer implementation
8158*4882a593Smuzhiyun 	 */
8159*4882a593Smuzhiyun 
8160*4882a593Smuzhiyun 	/* schedule RTT */
8161*4882a593Smuzhiyun 	dhd_rtt_schedule_rtt_work_thread(dhd, sched_reason);
8162*4882a593Smuzhiyun 
8163*4882a593Smuzhiyun exit:
8164*4882a593Smuzhiyun 	if (reset_req_drop) {
8165*4882a593Smuzhiyun 		WL_INFORM_MEM(("reset geofence req dropped, reason = %d\n",
8166*4882a593Smuzhiyun 			reset_req_drop));
8167*4882a593Smuzhiyun 	}
8168*4882a593Smuzhiyun 	if (need_rtt_mutex == TRUE) {
8169*4882a593Smuzhiyun 		mutex_unlock(&rtt_status->rtt_mutex);
8170*4882a593Smuzhiyun 	}
8171*4882a593Smuzhiyun 	return;
8172*4882a593Smuzhiyun }
8173*4882a593Smuzhiyun 
8174*4882a593Smuzhiyun void
wl_cfgnan_reset_geofence_ranging_for_cur_target(dhd_pub_t * dhd,int sched_reason)8175*4882a593Smuzhiyun wl_cfgnan_reset_geofence_ranging_for_cur_target(dhd_pub_t *dhd, int sched_reason)
8176*4882a593Smuzhiyun {
8177*4882a593Smuzhiyun 	struct net_device *dev = dhd_linux_get_primary_netdev(dhd);
8178*4882a593Smuzhiyun 	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
8179*4882a593Smuzhiyun 	rtt_geofence_target_info_t  *geofence_target = NULL;
8180*4882a593Smuzhiyun 	nan_ranging_inst_t *ranging_inst = NULL;
8181*4882a593Smuzhiyun 
8182*4882a593Smuzhiyun 	geofence_target = dhd_rtt_get_geofence_current_target(dhd);
8183*4882a593Smuzhiyun 	if (!geofence_target) {
8184*4882a593Smuzhiyun 		WL_DBG(("reset ranging request dropped: geofence target null\n"));
8185*4882a593Smuzhiyun 		goto exit;
8186*4882a593Smuzhiyun 	}
8187*4882a593Smuzhiyun 
8188*4882a593Smuzhiyun 	ranging_inst = wl_cfgnan_check_for_ranging(cfg,
8189*4882a593Smuzhiyun 			&geofence_target->peer_addr);
8190*4882a593Smuzhiyun 	if (!ranging_inst) {
8191*4882a593Smuzhiyun 		WL_DBG(("reset ranging request dropped: ranging instance null\n"));
8192*4882a593Smuzhiyun 		goto exit;
8193*4882a593Smuzhiyun 	}
8194*4882a593Smuzhiyun 
8195*4882a593Smuzhiyun 	if (NAN_RANGING_IS_IN_PROG(ranging_inst->range_status) &&
8196*4882a593Smuzhiyun 		(ranging_inst->range_type == RTT_TYPE_NAN_GEOFENCE)) {
8197*4882a593Smuzhiyun 		WL_DBG(("Ranging is already in progress for Current target "
8198*4882a593Smuzhiyun 			MACDBG " \n", MAC2STRDBG(&ranging_inst->peer_addr)));
8199*4882a593Smuzhiyun 		goto exit;
8200*4882a593Smuzhiyun 	}
8201*4882a593Smuzhiyun 
8202*4882a593Smuzhiyun 	wl_cfgnan_reset_geofence_ranging(cfg, ranging_inst, sched_reason, TRUE);
8203*4882a593Smuzhiyun 
8204*4882a593Smuzhiyun exit:
8205*4882a593Smuzhiyun 	return;
8206*4882a593Smuzhiyun }
8207*4882a593Smuzhiyun 
8208*4882a593Smuzhiyun static bool
wl_cfgnan_geofence_retry_check(nan_ranging_inst_t * rng_inst,uint8 reason_code)8209*4882a593Smuzhiyun wl_cfgnan_geofence_retry_check(nan_ranging_inst_t *rng_inst, uint8 reason_code)
8210*4882a593Smuzhiyun {
8211*4882a593Smuzhiyun 	bool geof_retry = FALSE;
8212*4882a593Smuzhiyun 
8213*4882a593Smuzhiyun 	switch (reason_code) {
8214*4882a593Smuzhiyun 		case NAN_RNG_TERM_IDLE_TIMEOUT:
8215*4882a593Smuzhiyun 		/* Fallthrough: Keep adding more reason code if needed */
8216*4882a593Smuzhiyun 		case NAN_RNG_TERM_RNG_RESP_TIMEOUT:
8217*4882a593Smuzhiyun 		case NAN_RNG_TERM_RNG_RESP_REJ:
8218*4882a593Smuzhiyun 		case NAN_RNG_TERM_RNG_TXS_FAIL:
8219*4882a593Smuzhiyun 			if (rng_inst->geof_retry_count <
8220*4882a593Smuzhiyun 					NAN_RNG_GEOFENCE_MAX_RETRY_CNT) {
8221*4882a593Smuzhiyun 				rng_inst->geof_retry_count++;
8222*4882a593Smuzhiyun 				geof_retry = TRUE;
8223*4882a593Smuzhiyun 			}
8224*4882a593Smuzhiyun 			break;
8225*4882a593Smuzhiyun 		default:
8226*4882a593Smuzhiyun 			/* FALSE for any other case */
8227*4882a593Smuzhiyun 			break;
8228*4882a593Smuzhiyun 	}
8229*4882a593Smuzhiyun 
8230*4882a593Smuzhiyun 	return geof_retry;
8231*4882a593Smuzhiyun }
8232*4882a593Smuzhiyun #endif /* RTT_SUPPORT */
8233*4882a593Smuzhiyun 
8234*4882a593Smuzhiyun s32
wl_cfgnan_notify_nan_status(struct bcm_cfg80211 * cfg,bcm_struct_cfgdev * cfgdev,const wl_event_msg_t * event,void * event_data)8235*4882a593Smuzhiyun wl_cfgnan_notify_nan_status(struct bcm_cfg80211 *cfg,
8236*4882a593Smuzhiyun 	bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *event, void *event_data)
8237*4882a593Smuzhiyun {
8238*4882a593Smuzhiyun 	uint16 data_len;
8239*4882a593Smuzhiyun 	uint32 event_num;
8240*4882a593Smuzhiyun 	s32 event_type;
8241*4882a593Smuzhiyun 	int hal_event_id = 0;
8242*4882a593Smuzhiyun 	nan_event_data_t *nan_event_data = NULL;
8243*4882a593Smuzhiyun 	nan_parse_event_ctx_t nan_event_ctx;
8244*4882a593Smuzhiyun 	uint16 tlvs_offset = 0;
8245*4882a593Smuzhiyun 	uint16 nan_opts_len = 0;
8246*4882a593Smuzhiyun 	uint8 *tlv_buf;
8247*4882a593Smuzhiyun 	s32 ret = BCME_OK;
8248*4882a593Smuzhiyun 	bcm_xtlv_opts_t xtlv_opt = BCM_IOV_CMD_OPT_ALIGN32;
8249*4882a593Smuzhiyun 	uint32 status;
8250*4882a593Smuzhiyun 	nan_svc_info_t *svc;
8251*4882a593Smuzhiyun #ifdef RTT_SUPPORT
8252*4882a593Smuzhiyun 	dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
8253*4882a593Smuzhiyun 	rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
8254*4882a593Smuzhiyun 	UNUSED_PARAMETER(dhd);
8255*4882a593Smuzhiyun 	UNUSED_PARAMETER(rtt_status);
8256*4882a593Smuzhiyun 	if (rtt_status == NULL) {
8257*4882a593Smuzhiyun 		return -EINVAL;
8258*4882a593Smuzhiyun 	}
8259*4882a593Smuzhiyun #endif /* RTT_SUPPORT */
8260*4882a593Smuzhiyun 
8261*4882a593Smuzhiyun 	UNUSED_PARAMETER(wl_nan_print_status);
8262*4882a593Smuzhiyun 	UNUSED_PARAMETER(status);
8263*4882a593Smuzhiyun 	NAN_DBG_ENTER();
8264*4882a593Smuzhiyun 
8265*4882a593Smuzhiyun 	if (!event || !event_data) {
8266*4882a593Smuzhiyun 		WL_ERR(("event data is NULL\n"));
8267*4882a593Smuzhiyun 		return -EINVAL;
8268*4882a593Smuzhiyun 	}
8269*4882a593Smuzhiyun 
8270*4882a593Smuzhiyun 	event_type = ntoh32(event->event_type);
8271*4882a593Smuzhiyun 	event_num = ntoh32(event->reason);
8272*4882a593Smuzhiyun 	data_len = ntoh32(event->datalen);
8273*4882a593Smuzhiyun 
8274*4882a593Smuzhiyun #ifdef RTT_SUPPORT
8275*4882a593Smuzhiyun 	if (event_num == WL_NAN_EVENT_RNG_REQ_IND)
8276*4882a593Smuzhiyun 	{
8277*4882a593Smuzhiyun 		/* Flush any RTT work  to avoid any
8278*4882a593Smuzhiyun 		* inconsistencies & ensure RNG REQ
8279*4882a593Smuzhiyun 		* is handling in a stable RTT state.
8280*4882a593Smuzhiyun 		* Note new RTT work can be enqueued from
8281*4882a593Smuzhiyun 		* a. host command context - synchronized over rtt_mutex & state
8282*4882a593Smuzhiyun 		* b. event context - event processing is synchronized/serialised
8283*4882a593Smuzhiyun 		*/
8284*4882a593Smuzhiyun 		flush_work(&rtt_status->work);
8285*4882a593Smuzhiyun 	}
8286*4882a593Smuzhiyun #endif /* RTT_SUPPORT */
8287*4882a593Smuzhiyun 
8288*4882a593Smuzhiyun 	NAN_MUTEX_LOCK();
8289*4882a593Smuzhiyun 
8290*4882a593Smuzhiyun 	if (NAN_INVALID_EVENT(event_num)) {
8291*4882a593Smuzhiyun 		WL_ERR(("unsupported event, num: %d, event type: %d\n", event_num, event_type));
8292*4882a593Smuzhiyun 		ret = -EINVAL;
8293*4882a593Smuzhiyun 		goto exit;
8294*4882a593Smuzhiyun 	}
8295*4882a593Smuzhiyun 
8296*4882a593Smuzhiyun 	WL_DBG((">> Nan Event Received: %s (num=%d, len=%d)\n",
8297*4882a593Smuzhiyun 			nan_event_to_str(event_num), event_num, data_len));
8298*4882a593Smuzhiyun 
8299*4882a593Smuzhiyun #ifdef WL_NAN_DEBUG
8300*4882a593Smuzhiyun 	prhex("nan_event_data:", event_data, data_len);
8301*4882a593Smuzhiyun #endif /* WL_NAN_DEBUG */
8302*4882a593Smuzhiyun 
8303*4882a593Smuzhiyun 	if (!cfg->nancfg->nan_init_state) {
8304*4882a593Smuzhiyun 		WL_ERR(("nan is not in initialized state, dropping nan related events\n"));
8305*4882a593Smuzhiyun 		ret = BCME_OK;
8306*4882a593Smuzhiyun 		goto exit;
8307*4882a593Smuzhiyun 	}
8308*4882a593Smuzhiyun 
8309*4882a593Smuzhiyun 	nan_event_data = MALLOCZ(cfg->osh, sizeof(*nan_event_data));
8310*4882a593Smuzhiyun 	if (!nan_event_data) {
8311*4882a593Smuzhiyun 		WL_ERR(("%s: memory allocation failed\n", __func__));
8312*4882a593Smuzhiyun 		goto exit;
8313*4882a593Smuzhiyun 	}
8314*4882a593Smuzhiyun 
8315*4882a593Smuzhiyun 	nan_event_ctx.cfg = cfg;
8316*4882a593Smuzhiyun 	nan_event_ctx.nan_evt_data = nan_event_data;
8317*4882a593Smuzhiyun 	/*
8318*4882a593Smuzhiyun 	 * send as preformatted hex string
8319*4882a593Smuzhiyun 	 * EVENT_NAN <event_type> <tlv_hex_string>
8320*4882a593Smuzhiyun 	 */
8321*4882a593Smuzhiyun 	switch (event_num) {
8322*4882a593Smuzhiyun 	case WL_NAN_EVENT_START:
8323*4882a593Smuzhiyun 	case WL_NAN_EVENT_MERGE:
8324*4882a593Smuzhiyun 	case WL_NAN_EVENT_ROLE:	{
8325*4882a593Smuzhiyun 		/* get nan status info as-is */
8326*4882a593Smuzhiyun 		bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
8327*4882a593Smuzhiyun 		wl_nan_conf_status_t *nstatus = (wl_nan_conf_status_t *)xtlv->data;
8328*4882a593Smuzhiyun 		WL_INFORM_MEM((">> Nan Mac Event Received: %s (num=%d, len=%d)\n",
8329*4882a593Smuzhiyun 			nan_event_to_str(event_num), event_num, data_len));
8330*4882a593Smuzhiyun 		WL_INFORM_MEM(("Nan Device Role %s\n", nan_role_to_str(nstatus->role)));
8331*4882a593Smuzhiyun 		/* Mapping to common struct between DHD and HAL */
8332*4882a593Smuzhiyun 		nan_event_data->enabled = nstatus->enabled;
8333*4882a593Smuzhiyun 		ret = memcpy_s(&nan_event_data->local_nmi, ETHER_ADDR_LEN,
8334*4882a593Smuzhiyun 			&nstatus->nmi, ETHER_ADDR_LEN);
8335*4882a593Smuzhiyun 		if (ret != BCME_OK) {
8336*4882a593Smuzhiyun 			WL_ERR(("Failed to copy nmi\n"));
8337*4882a593Smuzhiyun 			goto exit;
8338*4882a593Smuzhiyun 		}
8339*4882a593Smuzhiyun 		ret = memcpy_s(&nan_event_data->clus_id, ETHER_ADDR_LEN,
8340*4882a593Smuzhiyun 			&nstatus->cid, ETHER_ADDR_LEN);
8341*4882a593Smuzhiyun 		if (ret != BCME_OK) {
8342*4882a593Smuzhiyun 			WL_ERR(("Failed to copy cluster id\n"));
8343*4882a593Smuzhiyun 			goto exit;
8344*4882a593Smuzhiyun 		}
8345*4882a593Smuzhiyun 		nan_event_data->nan_de_evt_type = event_num;
8346*4882a593Smuzhiyun 		if (event_num == WL_NAN_EVENT_ROLE) {
8347*4882a593Smuzhiyun 			wl_nan_print_status(nstatus);
8348*4882a593Smuzhiyun 		}
8349*4882a593Smuzhiyun 
8350*4882a593Smuzhiyun 		if (event_num == WL_NAN_EVENT_START) {
8351*4882a593Smuzhiyun 			OSL_SMP_WMB();
8352*4882a593Smuzhiyun 			cfg->nancfg->nan_event_recvd = true;
8353*4882a593Smuzhiyun 			OSL_SMP_WMB();
8354*4882a593Smuzhiyun 			wake_up(&cfg->nancfg->nan_event_wait);
8355*4882a593Smuzhiyun 		}
8356*4882a593Smuzhiyun 		hal_event_id = GOOGLE_NAN_EVENT_DE_EVENT;
8357*4882a593Smuzhiyun 		break;
8358*4882a593Smuzhiyun 	}
8359*4882a593Smuzhiyun 	case WL_NAN_EVENT_TERMINATED: {
8360*4882a593Smuzhiyun 		bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
8361*4882a593Smuzhiyun 		wl_nan_ev_terminated_t *pev = (wl_nan_ev_terminated_t *)xtlv->data;
8362*4882a593Smuzhiyun 
8363*4882a593Smuzhiyun 		/* Mapping to common struct between DHD and HAL */
8364*4882a593Smuzhiyun 		WL_TRACE(("Instance ID: %d\n", pev->instance_id));
8365*4882a593Smuzhiyun 		nan_event_data->local_inst_id = pev->instance_id;
8366*4882a593Smuzhiyun 		WL_TRACE(("Service Type: %d\n", pev->svctype));
8367*4882a593Smuzhiyun 
8368*4882a593Smuzhiyun #ifdef WL_NAN_DISC_CACHE
8369*4882a593Smuzhiyun 		wl_cfgnan_clear_svc_cache(cfg, pev->instance_id);
8370*4882a593Smuzhiyun 		/* if we have to store disc_res even after sub_cancel
8371*4882a593Smuzhiyun 		* donot call below api..but need to device on the criteria to expire
8372*4882a593Smuzhiyun 		*/
8373*4882a593Smuzhiyun 		if (pev->svctype == NAN_SC_SUBSCRIBE) {
8374*4882a593Smuzhiyun 			wl_cfgnan_remove_disc_result(cfg, pev->instance_id);
8375*4882a593Smuzhiyun 		}
8376*4882a593Smuzhiyun #endif /* WL_NAN_DISC_CACHE */
8377*4882a593Smuzhiyun 		/* Mapping reason code of FW to status code of framework */
8378*4882a593Smuzhiyun 		if (pev->reason == NAN_TERM_REASON_TIMEOUT ||
8379*4882a593Smuzhiyun 				pev->reason == NAN_TERM_REASON_USER_REQ ||
8380*4882a593Smuzhiyun 				pev->reason == NAN_TERM_REASON_COUNT_REACHED) {
8381*4882a593Smuzhiyun 			nan_event_data->status = NAN_STATUS_SUCCESS;
8382*4882a593Smuzhiyun 			ret = memcpy_s(nan_event_data->nan_reason,
8383*4882a593Smuzhiyun 				sizeof(nan_event_data->nan_reason),
8384*4882a593Smuzhiyun 				"NAN_STATUS_SUCCESS",
8385*4882a593Smuzhiyun 				strlen("NAN_STATUS_SUCCESS"));
8386*4882a593Smuzhiyun 			if (ret != BCME_OK) {
8387*4882a593Smuzhiyun 				WL_ERR(("Failed to copy nan_reason\n"));
8388*4882a593Smuzhiyun 				goto exit;
8389*4882a593Smuzhiyun 			}
8390*4882a593Smuzhiyun 		} else {
8391*4882a593Smuzhiyun 			nan_event_data->status = NAN_STATUS_INTERNAL_FAILURE;
8392*4882a593Smuzhiyun 			ret = memcpy_s(nan_event_data->nan_reason,
8393*4882a593Smuzhiyun 				sizeof(nan_event_data->nan_reason),
8394*4882a593Smuzhiyun 				"NAN_STATUS_INTERNAL_FAILURE",
8395*4882a593Smuzhiyun 				strlen("NAN_STATUS_INTERNAL_FAILURE"));
8396*4882a593Smuzhiyun 			if (ret != BCME_OK) {
8397*4882a593Smuzhiyun 				WL_ERR(("Failed to copy nan_reason\n"));
8398*4882a593Smuzhiyun 				goto exit;
8399*4882a593Smuzhiyun 			}
8400*4882a593Smuzhiyun 		}
8401*4882a593Smuzhiyun 
8402*4882a593Smuzhiyun 		if (pev->svctype == NAN_SC_SUBSCRIBE) {
8403*4882a593Smuzhiyun 			hal_event_id = GOOGLE_NAN_EVENT_SUBSCRIBE_TERMINATED;
8404*4882a593Smuzhiyun 		} else {
8405*4882a593Smuzhiyun 			hal_event_id = GOOGLE_NAN_EVENT_PUBLISH_TERMINATED;
8406*4882a593Smuzhiyun 		}
8407*4882a593Smuzhiyun #ifdef WL_NAN_DISC_CACHE
8408*4882a593Smuzhiyun #ifdef RTT_SUPPORT
8409*4882a593Smuzhiyun 		if (pev->reason != NAN_TERM_REASON_USER_REQ) {
8410*4882a593Smuzhiyun 			wl_cfgnan_clear_svc_from_all_ranging_inst(cfg, pev->instance_id);
8411*4882a593Smuzhiyun 			/* terminate ranging sessions */
8412*4882a593Smuzhiyun 			wl_cfgnan_terminate_all_obsolete_ranging_sessions(cfg);
8413*4882a593Smuzhiyun 		}
8414*4882a593Smuzhiyun #endif /* RTT_SUPPORT */
8415*4882a593Smuzhiyun #endif /* WL_NAN_DISC_CACHE */
8416*4882a593Smuzhiyun 		break;
8417*4882a593Smuzhiyun 	}
8418*4882a593Smuzhiyun 
8419*4882a593Smuzhiyun 	case WL_NAN_EVENT_RECEIVE: {
8420*4882a593Smuzhiyun 		nan_opts_len = data_len;
8421*4882a593Smuzhiyun 		hal_event_id = GOOGLE_NAN_EVENT_FOLLOWUP;
8422*4882a593Smuzhiyun 		xtlv_opt = BCM_IOV_CMD_OPT_ALIGN_NONE;
8423*4882a593Smuzhiyun 		break;
8424*4882a593Smuzhiyun 	}
8425*4882a593Smuzhiyun 
8426*4882a593Smuzhiyun 	case WL_NAN_EVENT_TXS: {
8427*4882a593Smuzhiyun 		bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
8428*4882a593Smuzhiyun 		wl_nan_event_txs_t *txs = (wl_nan_event_txs_t *)xtlv->data;
8429*4882a593Smuzhiyun 		wl_nan_event_sd_txs_t *txs_sd = NULL;
8430*4882a593Smuzhiyun 		if (txs->status == WL_NAN_TXS_SUCCESS) {
8431*4882a593Smuzhiyun 			WL_INFORM_MEM(("TXS success for type %s(%d) token %d\n",
8432*4882a593Smuzhiyun 				nan_frm_type_to_str(txs->type), txs->type, txs->host_seq));
8433*4882a593Smuzhiyun 			nan_event_data->status = NAN_STATUS_SUCCESS;
8434*4882a593Smuzhiyun 			ret = memcpy_s(nan_event_data->nan_reason,
8435*4882a593Smuzhiyun 				sizeof(nan_event_data->nan_reason),
8436*4882a593Smuzhiyun 				"NAN_STATUS_SUCCESS",
8437*4882a593Smuzhiyun 				strlen("NAN_STATUS_SUCCESS"));
8438*4882a593Smuzhiyun 			if (ret != BCME_OK) {
8439*4882a593Smuzhiyun 				WL_ERR(("Failed to copy nan_reason\n"));
8440*4882a593Smuzhiyun 				goto exit;
8441*4882a593Smuzhiyun 			}
8442*4882a593Smuzhiyun 		} else {
8443*4882a593Smuzhiyun 			/* TODO : populate status based on reason codes
8444*4882a593Smuzhiyun 			For now adding it as no ACK, so that app/framework can retry
8445*4882a593Smuzhiyun 			*/
8446*4882a593Smuzhiyun 			WL_INFORM_MEM(("TXS failed for type %s(%d) status %d token %d\n",
8447*4882a593Smuzhiyun 				nan_frm_type_to_str(txs->type), txs->type, txs->status,
8448*4882a593Smuzhiyun 				txs->host_seq));
8449*4882a593Smuzhiyun 			nan_event_data->status = NAN_STATUS_NO_OTA_ACK;
8450*4882a593Smuzhiyun 			ret = memcpy_s(nan_event_data->nan_reason,
8451*4882a593Smuzhiyun 				sizeof(nan_event_data->nan_reason),
8452*4882a593Smuzhiyun 				"NAN_STATUS_NO_OTA_ACK",
8453*4882a593Smuzhiyun 				strlen("NAN_STATUS_NO_OTA_ACK"));
8454*4882a593Smuzhiyun 			if (ret != BCME_OK) {
8455*4882a593Smuzhiyun 				WL_ERR(("Failed to copy nan_reason\n"));
8456*4882a593Smuzhiyun 				goto exit;
8457*4882a593Smuzhiyun 			}
8458*4882a593Smuzhiyun 		}
8459*4882a593Smuzhiyun 		nan_event_data->reason = txs->reason_code;
8460*4882a593Smuzhiyun 		nan_event_data->token = txs->host_seq;
8461*4882a593Smuzhiyun 		if (txs->type == WL_NAN_FRM_TYPE_FOLLOWUP) {
8462*4882a593Smuzhiyun 			hal_event_id = GOOGLE_NAN_EVENT_TRANSMIT_FOLLOWUP_IND;
8463*4882a593Smuzhiyun 			xtlv = (bcm_xtlv_t *)(txs->opt_tlvs);
8464*4882a593Smuzhiyun 			if (txs->opt_tlvs_len && xtlv->id == WL_NAN_XTLV_SD_TXS) {
8465*4882a593Smuzhiyun 				txs_sd = (wl_nan_event_sd_txs_t*)xtlv->data;
8466*4882a593Smuzhiyun 				nan_event_data->local_inst_id = txs_sd->inst_id;
8467*4882a593Smuzhiyun 			} else {
8468*4882a593Smuzhiyun 				WL_ERR(("Invalid params in TX status for trasnmit followup"));
8469*4882a593Smuzhiyun 				ret = -EINVAL;
8470*4882a593Smuzhiyun 				goto exit;
8471*4882a593Smuzhiyun 			}
8472*4882a593Smuzhiyun #ifdef RTT_SUPPORT
8473*4882a593Smuzhiyun 		} else if (txs->type == WL_NAN_FRM_TYPE_RNG_RESP) {
8474*4882a593Smuzhiyun 			xtlv = (bcm_xtlv_t *)(txs->opt_tlvs);
8475*4882a593Smuzhiyun 			if (txs->opt_tlvs_len && xtlv->id == WL_NAN_XTLV_RNG_TXS) {
8476*4882a593Smuzhiyun 				wl_nan_range_txs_t* txs_rng_resp = (wl_nan_range_txs_t*)xtlv->data;
8477*4882a593Smuzhiyun 				nan_ranging_inst_t *rng_inst =
8478*4882a593Smuzhiyun 					wl_cfgnan_get_rng_inst_by_id(cfg, txs_rng_resp->range_id);
8479*4882a593Smuzhiyun 				if (rng_inst &&
8480*4882a593Smuzhiyun 					NAN_RANGING_SETUP_IS_IN_PROG(rng_inst->range_status)) {
8481*4882a593Smuzhiyun 					/* Unset ranging set up in progress */
8482*4882a593Smuzhiyun 					dhd_rtt_update_geofence_sessions_cnt(dhd, FALSE,
8483*4882a593Smuzhiyun 						&rng_inst->peer_addr);
8484*4882a593Smuzhiyun 					if (txs->status == WL_NAN_TXS_SUCCESS) {
8485*4882a593Smuzhiyun 						/* range set up is over, move range in progress */
8486*4882a593Smuzhiyun 						rng_inst->range_status =
8487*4882a593Smuzhiyun 							NAN_RANGING_SESSION_IN_PROGRESS;
8488*4882a593Smuzhiyun 						 /* Increment geofence session count */
8489*4882a593Smuzhiyun 						dhd_rtt_update_geofence_sessions_cnt(dhd,
8490*4882a593Smuzhiyun 							TRUE, NULL);
8491*4882a593Smuzhiyun 						WL_DBG(("Txs for range resp, rng_id = %d\n",
8492*4882a593Smuzhiyun 							rng_inst->range_id));
8493*4882a593Smuzhiyun 					} else {
8494*4882a593Smuzhiyun 						wl_cfgnan_reset_remove_ranging_instance(cfg,
8495*4882a593Smuzhiyun 							rng_inst);
8496*4882a593Smuzhiyun 					}
8497*4882a593Smuzhiyun 				}
8498*4882a593Smuzhiyun 			} else {
8499*4882a593Smuzhiyun 				WL_ERR(("Invalid params in TX status for range response"));
8500*4882a593Smuzhiyun 				ret = -EINVAL;
8501*4882a593Smuzhiyun 				goto exit;
8502*4882a593Smuzhiyun 			}
8503*4882a593Smuzhiyun #endif /* RTT_SUPPORT */
8504*4882a593Smuzhiyun 		} else { /* TODO: add for other frame types if required */
8505*4882a593Smuzhiyun 			ret = -EINVAL;
8506*4882a593Smuzhiyun 			goto exit;
8507*4882a593Smuzhiyun 		}
8508*4882a593Smuzhiyun 		break;
8509*4882a593Smuzhiyun 	}
8510*4882a593Smuzhiyun 
8511*4882a593Smuzhiyun 	case WL_NAN_EVENT_DISCOVERY_RESULT: {
8512*4882a593Smuzhiyun 		nan_opts_len = data_len;
8513*4882a593Smuzhiyun 		hal_event_id = GOOGLE_NAN_EVENT_SUBSCRIBE_MATCH;
8514*4882a593Smuzhiyun 		xtlv_opt = BCM_IOV_CMD_OPT_ALIGN_NONE;
8515*4882a593Smuzhiyun 		break;
8516*4882a593Smuzhiyun 	}
8517*4882a593Smuzhiyun #ifdef WL_NAN_DISC_CACHE
8518*4882a593Smuzhiyun 	case WL_NAN_EVENT_DISC_CACHE_TIMEOUT: {
8519*4882a593Smuzhiyun 		bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
8520*4882a593Smuzhiyun 		wl_nan_ev_disc_cache_timeout_t *cache_data =
8521*4882a593Smuzhiyun 				(wl_nan_ev_disc_cache_timeout_t *)xtlv->data;
8522*4882a593Smuzhiyun 		wl_nan_disc_expired_cache_entry_t *cache_entry = NULL;
8523*4882a593Smuzhiyun 		uint16 xtlv_len = xtlv->len;
8524*4882a593Smuzhiyun 		uint8 entry_idx = 0;
8525*4882a593Smuzhiyun 
8526*4882a593Smuzhiyun 		if (xtlv->id == WL_NAN_XTLV_SD_DISC_CACHE_TIMEOUT) {
8527*4882a593Smuzhiyun 			xtlv_len = xtlv_len -
8528*4882a593Smuzhiyun 					OFFSETOF(wl_nan_ev_disc_cache_timeout_t, cache_exp_list);
8529*4882a593Smuzhiyun 			while ((entry_idx < cache_data->count) &&
8530*4882a593Smuzhiyun 					(xtlv_len >= sizeof(*cache_entry))) {
8531*4882a593Smuzhiyun 				cache_entry = &cache_data->cache_exp_list[entry_idx];
8532*4882a593Smuzhiyun 				/* Handle ranging cases for cache timeout */
8533*4882a593Smuzhiyun 				WL_INFORM_MEM(("WL_NAN_EVENT_DISC_CACHE_TIMEOUT peer: " MACDBG
8534*4882a593Smuzhiyun 					" l_id:%d r_id:%d\n", MAC2STRDBG(&cache_entry->r_nmi_addr),
8535*4882a593Smuzhiyun 					cache_entry->l_sub_id, cache_entry->r_pub_id));
8536*4882a593Smuzhiyun #ifdef RTT_SUPPORT
8537*4882a593Smuzhiyun 				wl_cfgnan_ranging_clear_publish(cfg, &cache_entry->r_nmi_addr,
8538*4882a593Smuzhiyun 					cache_entry->l_sub_id);
8539*4882a593Smuzhiyun #endif /* RTT_SUPPORT */
8540*4882a593Smuzhiyun 				/* Invalidate local cache info */
8541*4882a593Smuzhiyun 				wl_cfgnan_remove_disc_result(cfg, cache_entry->l_sub_id);
8542*4882a593Smuzhiyun 				xtlv_len = xtlv_len - sizeof(*cache_entry);
8543*4882a593Smuzhiyun 				entry_idx++;
8544*4882a593Smuzhiyun 			}
8545*4882a593Smuzhiyun 		}
8546*4882a593Smuzhiyun 		break;
8547*4882a593Smuzhiyun 	}
8548*4882a593Smuzhiyun #ifdef RTT_SUPPORT
8549*4882a593Smuzhiyun 	case WL_NAN_EVENT_RNG_REQ_IND: {
8550*4882a593Smuzhiyun 		wl_nan_ev_rng_req_ind_t *rng_ind;
8551*4882a593Smuzhiyun 		bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
8552*4882a593Smuzhiyun 
8553*4882a593Smuzhiyun 		nan_opts_len = data_len;
8554*4882a593Smuzhiyun 		rng_ind = (wl_nan_ev_rng_req_ind_t *)xtlv->data;
8555*4882a593Smuzhiyun 		xtlv_opt = BCM_IOV_CMD_OPT_ALIGN_NONE;
8556*4882a593Smuzhiyun 		WL_INFORM_MEM(("Received WL_NAN_EVENT_RNG_REQ_IND range_id %d"
8557*4882a593Smuzhiyun 			" peer:" MACDBG "\n", rng_ind->rng_id,
8558*4882a593Smuzhiyun 			MAC2STRDBG(&rng_ind->peer_m_addr)));
8559*4882a593Smuzhiyun 		ret = wl_cfgnan_handle_ranging_ind(cfg, rng_ind);
8560*4882a593Smuzhiyun 		/* no need to event to HAL */
8561*4882a593Smuzhiyun 		goto exit;
8562*4882a593Smuzhiyun 	}
8563*4882a593Smuzhiyun 
8564*4882a593Smuzhiyun 	case WL_NAN_EVENT_RNG_TERM_IND: {
8565*4882a593Smuzhiyun 		bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
8566*4882a593Smuzhiyun 		nan_ranging_inst_t *rng_inst;
8567*4882a593Smuzhiyun 		wl_nan_ev_rng_term_ind_t *range_term = (wl_nan_ev_rng_term_ind_t *)xtlv->data;
8568*4882a593Smuzhiyun 		int rng_sched_reason = 0;
8569*4882a593Smuzhiyun 		int8 index = -1;
8570*4882a593Smuzhiyun 		rtt_geofence_target_info_t* geofence_target;
8571*4882a593Smuzhiyun 		BCM_REFERENCE(dhd);
8572*4882a593Smuzhiyun 		WL_INFORM_MEM(("Received WL_NAN_EVENT_RNG_TERM_IND peer: " MACDBG ", "
8573*4882a593Smuzhiyun 			" Range ID:%d Reason Code:%d\n", MAC2STRDBG(&range_term->peer_m_addr),
8574*4882a593Smuzhiyun 			range_term->rng_id, range_term->reason_code));
8575*4882a593Smuzhiyun 		rng_inst = wl_cfgnan_get_rng_inst_by_id(cfg, range_term->rng_id);
8576*4882a593Smuzhiyun 		if (rng_inst) {
8577*4882a593Smuzhiyun 			if (!NAN_RANGING_IS_IN_PROG(rng_inst->range_status)) {
8578*4882a593Smuzhiyun 				WL_DBG(("Late or unsynchronized nan term indicator event\n"));
8579*4882a593Smuzhiyun 				break;
8580*4882a593Smuzhiyun 			}
8581*4882a593Smuzhiyun 			rng_sched_reason = RTT_SCHED_RNG_TERM;
8582*4882a593Smuzhiyun 			if (rng_inst->range_role == NAN_RANGING_ROLE_RESPONDER) {
8583*4882a593Smuzhiyun 				dhd_rtt_update_geofence_sessions_cnt(dhd, FALSE,
8584*4882a593Smuzhiyun 					&rng_inst->peer_addr);
8585*4882a593Smuzhiyun 				wl_cfgnan_reset_remove_ranging_instance(cfg, rng_inst);
8586*4882a593Smuzhiyun 			} else {
8587*4882a593Smuzhiyun 				if (rng_inst->range_type == RTT_TYPE_NAN_DIRECTED) {
8588*4882a593Smuzhiyun 					dhd_rtt_handle_nan_rtt_session_end(dhd,
8589*4882a593Smuzhiyun 						&rng_inst->peer_addr);
8590*4882a593Smuzhiyun 					if (dhd_rtt_nan_is_directed_setup_in_prog_with_peer(dhd,
8591*4882a593Smuzhiyun 						&rng_inst->peer_addr)) {
8592*4882a593Smuzhiyun 						dhd_rtt_nan_update_directed_setup_inprog(dhd,
8593*4882a593Smuzhiyun 							NULL, FALSE);
8594*4882a593Smuzhiyun 					} else {
8595*4882a593Smuzhiyun 						dhd_rtt_nan_update_directed_sessions_cnt(dhd,
8596*4882a593Smuzhiyun 							FALSE);
8597*4882a593Smuzhiyun 					}
8598*4882a593Smuzhiyun 				} else if (rng_inst->range_type == RTT_TYPE_NAN_GEOFENCE) {
8599*4882a593Smuzhiyun 					rng_inst->range_status = NAN_RANGING_REQUIRED;
8600*4882a593Smuzhiyun 					dhd_rtt_update_geofence_sessions_cnt(dhd, FALSE,
8601*4882a593Smuzhiyun 						&rng_inst->peer_addr);
8602*4882a593Smuzhiyun 					if (!wl_cfgnan_geofence_retry_check(rng_inst,
8603*4882a593Smuzhiyun 							range_term->reason_code)) {
8604*4882a593Smuzhiyun 						/* Report on ranging failure */
8605*4882a593Smuzhiyun 						wl_cfgnan_disc_result_on_geofence_cancel(cfg,
8606*4882a593Smuzhiyun 							rng_inst);
8607*4882a593Smuzhiyun 						WL_TRACE(("Reset the state on terminate\n"));
8608*4882a593Smuzhiyun 						geofence_target = dhd_rtt_get_geofence_target(dhd,
8609*4882a593Smuzhiyun 							&rng_inst->peer_addr, &index);
8610*4882a593Smuzhiyun 						if (geofence_target) {
8611*4882a593Smuzhiyun 							dhd_rtt_remove_geofence_target(dhd,
8612*4882a593Smuzhiyun 								&geofence_target->peer_addr);
8613*4882a593Smuzhiyun 						}
8614*4882a593Smuzhiyun 					}
8615*4882a593Smuzhiyun 				}
8616*4882a593Smuzhiyun 			}
8617*4882a593Smuzhiyun 			/* Reset Ranging Instance and trigger ranging if applicable */
8618*4882a593Smuzhiyun 			wl_cfgnan_reset_geofence_ranging(cfg, rng_inst, rng_sched_reason, TRUE);
8619*4882a593Smuzhiyun 		} else {
8620*4882a593Smuzhiyun 			/*
8621*4882a593Smuzhiyun 			 * This can happen in some scenarios
8622*4882a593Smuzhiyun 			 * like receiving term after a fail txs for range resp
8623*4882a593Smuzhiyun 			 * where ranging instance is already cleared
8624*4882a593Smuzhiyun 			 */
8625*4882a593Smuzhiyun 			WL_DBG(("Term Indication recieved for a peer without rng inst\n"));
8626*4882a593Smuzhiyun 		}
8627*4882a593Smuzhiyun 		break;
8628*4882a593Smuzhiyun 	}
8629*4882a593Smuzhiyun 
8630*4882a593Smuzhiyun 	case WL_NAN_EVENT_RNG_RESP_IND: {
8631*4882a593Smuzhiyun 		bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
8632*4882a593Smuzhiyun 		nan_ranging_inst_t *rng_inst;
8633*4882a593Smuzhiyun 		wl_nan_ev_rng_resp_t *range_resp = (wl_nan_ev_rng_resp_t *)xtlv->data;
8634*4882a593Smuzhiyun 
8635*4882a593Smuzhiyun 		WL_INFORM_MEM(("Received WL_NAN_EVENT_RNG_RESP_IND peer: " MACDBG ", "
8636*4882a593Smuzhiyun 			" Range ID:%d Ranging Status:%d\n", MAC2STRDBG(&range_resp->peer_m_addr),
8637*4882a593Smuzhiyun 			range_resp->rng_id, range_resp->status));
8638*4882a593Smuzhiyun 		rng_inst = wl_cfgnan_get_rng_inst_by_id(cfg, range_resp->rng_id);
8639*4882a593Smuzhiyun 		if (!rng_inst) {
8640*4882a593Smuzhiyun 			WL_DBG(("Late or unsynchronized resp indicator event\n"));
8641*4882a593Smuzhiyun 			break;
8642*4882a593Smuzhiyun 		}
8643*4882a593Smuzhiyun 		//ASSERT(NAN_RANGING_SETUP_IS_IN_PROG(rng_inst->range_status));
8644*4882a593Smuzhiyun 		if (!NAN_RANGING_SETUP_IS_IN_PROG(rng_inst->range_status)) {
8645*4882a593Smuzhiyun 			WL_INFORM_MEM(("Resp Indicator received for not in prog range inst\n"));
8646*4882a593Smuzhiyun 			break;
8647*4882a593Smuzhiyun 		}
8648*4882a593Smuzhiyun 		/* range set up is over now, move to range in progress */
8649*4882a593Smuzhiyun 		rng_inst->range_status = NAN_RANGING_SESSION_IN_PROGRESS;
8650*4882a593Smuzhiyun 		if (rng_inst->range_type == RTT_TYPE_NAN_DIRECTED) {
8651*4882a593Smuzhiyun 			/* FixMe: Ideally, all below like update session cnt
8652*4882a593Smuzhiyun 			 * should be appilicabe to nan rtt and not specific to
8653*4882a593Smuzhiyun 			 * geofence. To be fixed in next RB
8654*4882a593Smuzhiyun 			 */
8655*4882a593Smuzhiyun 			dhd_rtt_nan_update_directed_setup_inprog(dhd, NULL, FALSE);
8656*4882a593Smuzhiyun 			/*
8657*4882a593Smuzhiyun 			 * Increase session count here,
8658*4882a593Smuzhiyun 			 * failure status is followed by Term Ind
8659*4882a593Smuzhiyun 			 * and handled accordingly
8660*4882a593Smuzhiyun 			 */
8661*4882a593Smuzhiyun 			dhd_rtt_nan_update_directed_sessions_cnt(dhd, TRUE);
8662*4882a593Smuzhiyun 			/*
8663*4882a593Smuzhiyun 			 * If pending targets to be triggered,
8664*4882a593Smuzhiyun 			 * and max sessions, not running already,
8665*4882a593Smuzhiyun 			 * schedule next target for RTT
8666*4882a593Smuzhiyun 			 */
8667*4882a593Smuzhiyun 			if ((!dhd_rtt_nan_all_directed_sessions_triggered(dhd)) &&
8668*4882a593Smuzhiyun 					dhd_rtt_nan_directed_sessions_allowed(dhd)) {
8669*4882a593Smuzhiyun 				/* Find and set next directed target */
8670*4882a593Smuzhiyun 				dhd_rtt_set_next_target_idx(dhd,
8671*4882a593Smuzhiyun 					(dhd_rtt_get_cur_target_idx(dhd) + 1));
8672*4882a593Smuzhiyun 				/* schedule RTT */
8673*4882a593Smuzhiyun 				dhd_rtt_schedule_rtt_work_thread(dhd,
8674*4882a593Smuzhiyun 					RTT_SCHED_RNG_RESP_IND);
8675*4882a593Smuzhiyun 			}
8676*4882a593Smuzhiyun 			break;
8677*4882a593Smuzhiyun 		}
8678*4882a593Smuzhiyun 		/*
8679*4882a593Smuzhiyun 		ASSERT(dhd_rtt_is_geofence_setup_inprog_with_peer(dhd,
8680*4882a593Smuzhiyun 			&rng_inst->peer_addr));
8681*4882a593Smuzhiyun 		*/
8682*4882a593Smuzhiyun 		if (!dhd_rtt_is_geofence_setup_inprog_with_peer(dhd,
8683*4882a593Smuzhiyun 			&rng_inst->peer_addr)) {
8684*4882a593Smuzhiyun 			WL_INFORM_MEM(("Resp Indicator received for not in prog range peer\n"));
8685*4882a593Smuzhiyun 			break;
8686*4882a593Smuzhiyun 		}
8687*4882a593Smuzhiyun 		/* Unset geof ranging setup status */
8688*4882a593Smuzhiyun 		dhd_rtt_update_geofence_sessions_cnt(dhd, FALSE, &rng_inst->peer_addr);
8689*4882a593Smuzhiyun 		/* Increase geofence session count */
8690*4882a593Smuzhiyun 		dhd_rtt_update_geofence_sessions_cnt(dhd, TRUE, NULL);
8691*4882a593Smuzhiyun 		wl_cfgnan_reset_geofence_ranging(cfg,
8692*4882a593Smuzhiyun 			rng_inst, RTT_SCHED_RNG_RESP_IND, TRUE);
8693*4882a593Smuzhiyun 		break;
8694*4882a593Smuzhiyun 	}
8695*4882a593Smuzhiyun #endif /* RTT_SUPPORT */
8696*4882a593Smuzhiyun #endif /* WL_NAN_DISC_CACHE */
8697*4882a593Smuzhiyun 	/*
8698*4882a593Smuzhiyun 	 * Data path events data are received in common event struct,
8699*4882a593Smuzhiyun 	 * Handling all the events as part of one case, hence fall through is intentional
8700*4882a593Smuzhiyun 	 */
8701*4882a593Smuzhiyun 	case WL_NAN_EVENT_PEER_DATAPATH_IND:
8702*4882a593Smuzhiyun 	case WL_NAN_EVENT_DATAPATH_ESTB:
8703*4882a593Smuzhiyun 	case WL_NAN_EVENT_DATAPATH_END: {
8704*4882a593Smuzhiyun 		ret = wl_nan_dp_cmn_event_data(cfg, event_data, data_len,
8705*4882a593Smuzhiyun 				&tlvs_offset, &nan_opts_len,
8706*4882a593Smuzhiyun 				event_num, &hal_event_id, nan_event_data);
8707*4882a593Smuzhiyun 		/* Avoiding optional param parsing for DP END Event */
8708*4882a593Smuzhiyun 		if (event_num == WL_NAN_EVENT_DATAPATH_END) {
8709*4882a593Smuzhiyun 			nan_opts_len = 0;
8710*4882a593Smuzhiyun 			xtlv_opt = BCM_IOV_CMD_OPT_ALIGN_NONE;
8711*4882a593Smuzhiyun 		}
8712*4882a593Smuzhiyun 		if (unlikely(ret)) {
8713*4882a593Smuzhiyun 			WL_ERR(("nan dp common event data parse failed\n"));
8714*4882a593Smuzhiyun 			goto exit;
8715*4882a593Smuzhiyun 		}
8716*4882a593Smuzhiyun 		break;
8717*4882a593Smuzhiyun 	}
8718*4882a593Smuzhiyun 	case WL_NAN_EVENT_PEER_DATAPATH_RESP:
8719*4882a593Smuzhiyun 	{
8720*4882a593Smuzhiyun 		/* No action -intentionally added to avoid prints when this event is rcvd */
8721*4882a593Smuzhiyun 		break;
8722*4882a593Smuzhiyun 	}
8723*4882a593Smuzhiyun 	default:
8724*4882a593Smuzhiyun 		WL_ERR_RLMT(("WARNING: unimplemented NAN APP EVENT = %d\n", event_num));
8725*4882a593Smuzhiyun 		ret = BCME_ERROR;
8726*4882a593Smuzhiyun 		goto exit;
8727*4882a593Smuzhiyun 	}
8728*4882a593Smuzhiyun 
8729*4882a593Smuzhiyun 	if (nan_opts_len) {
8730*4882a593Smuzhiyun 		tlv_buf = (uint8 *)event_data + tlvs_offset;
8731*4882a593Smuzhiyun 		/* Extract event data tlvs and pass their resp to cb fn */
8732*4882a593Smuzhiyun 		ret = bcm_unpack_xtlv_buf((void *)&nan_event_ctx, (const uint8*)tlv_buf,
8733*4882a593Smuzhiyun 			nan_opts_len, xtlv_opt, wl_cfgnan_set_vars_cbfn);
8734*4882a593Smuzhiyun 		if (ret != BCME_OK) {
8735*4882a593Smuzhiyun 			WL_ERR(("Failed to unpack tlv data, ret=%d\n", ret));
8736*4882a593Smuzhiyun 		}
8737*4882a593Smuzhiyun 	}
8738*4882a593Smuzhiyun 
8739*4882a593Smuzhiyun #ifdef WL_NAN_DISC_CACHE
8740*4882a593Smuzhiyun 	if (hal_event_id == GOOGLE_NAN_EVENT_SUBSCRIBE_MATCH) {
8741*4882a593Smuzhiyun #ifdef RTT_SUPPORT
8742*4882a593Smuzhiyun 		bool send_disc_result;
8743*4882a593Smuzhiyun #endif /* RTT_SUPPORT */
8744*4882a593Smuzhiyun 		u16 update_flags = 0;
8745*4882a593Smuzhiyun 
8746*4882a593Smuzhiyun 		WL_TRACE(("Cache disc res\n"));
8747*4882a593Smuzhiyun 		ret = wl_cfgnan_cache_disc_result(cfg, nan_event_data, &update_flags);
8748*4882a593Smuzhiyun 		if (ret) {
8749*4882a593Smuzhiyun 			WL_ERR(("Failed to cache disc result ret %d\n", ret));
8750*4882a593Smuzhiyun 		}
8751*4882a593Smuzhiyun #ifdef RTT_SUPPORT
8752*4882a593Smuzhiyun 		if (nan_event_data->sde_control_flag & NAN_SDE_CF_RANGING_REQUIRED) {
8753*4882a593Smuzhiyun 			ret = wl_cfgnan_check_disc_result_for_ranging(cfg,
8754*4882a593Smuzhiyun 				nan_event_data, &send_disc_result);
8755*4882a593Smuzhiyun 			if ((ret == BCME_OK) && (send_disc_result == FALSE)) {
8756*4882a593Smuzhiyun 				/* Avoid sending disc result instantly and exit */
8757*4882a593Smuzhiyun 				goto exit;
8758*4882a593Smuzhiyun 			} else {
8759*4882a593Smuzhiyun 				/* TODO: should we terminate service if ranging fails ? */
8760*4882a593Smuzhiyun 				WL_INFORM_MEM(("Ranging failed or not required, " MACDBG
8761*4882a593Smuzhiyun 					" sub_id:%d , pub_id:%d, ret = %d, send_disc_result = %d\n",
8762*4882a593Smuzhiyun 					MAC2STRDBG(&nan_event_data->remote_nmi),
8763*4882a593Smuzhiyun 					nan_event_data->sub_id, nan_event_data->pub_id,
8764*4882a593Smuzhiyun 					ret, send_disc_result));
8765*4882a593Smuzhiyun 			}
8766*4882a593Smuzhiyun 		} else {
8767*4882a593Smuzhiyun 			nan_svc_info_t *svc_info = wl_cfgnan_get_svc_inst(cfg,
8768*4882a593Smuzhiyun 				nan_event_data->sub_id, 0);
8769*4882a593Smuzhiyun 			if (svc_info && svc_info->ranging_required &&
8770*4882a593Smuzhiyun 				(update_flags & NAN_DISC_CACHE_PARAM_SDE_CONTROL)) {
8771*4882a593Smuzhiyun 				wl_cfgnan_ranging_clear_publish(cfg,
8772*4882a593Smuzhiyun 					&nan_event_data->remote_nmi, nan_event_data->sub_id);
8773*4882a593Smuzhiyun 			}
8774*4882a593Smuzhiyun 		}
8775*4882a593Smuzhiyun #endif /* RTT_SUPPORT */
8776*4882a593Smuzhiyun 
8777*4882a593Smuzhiyun 		/*
8778*4882a593Smuzhiyun 		* If tx match filter is present as part of active subscribe, keep same filter
8779*4882a593Smuzhiyun 		* values in discovery results also.
8780*4882a593Smuzhiyun 		*/
8781*4882a593Smuzhiyun 		if (nan_event_data->sub_id == nan_event_data->requestor_id) {
8782*4882a593Smuzhiyun 			svc = wl_cfgnan_get_svc_inst(cfg, nan_event_data->sub_id, 0);
8783*4882a593Smuzhiyun 			if (svc && svc->tx_match_filter_len) {
8784*4882a593Smuzhiyun 				nan_event_data->tx_match_filter.dlen = svc->tx_match_filter_len;
8785*4882a593Smuzhiyun 				nan_event_data->tx_match_filter.data =
8786*4882a593Smuzhiyun 					MALLOCZ(cfg->osh, svc->tx_match_filter_len);
8787*4882a593Smuzhiyun 				if (!nan_event_data->tx_match_filter.data) {
8788*4882a593Smuzhiyun 					WL_ERR(("%s: tx_match_filter_data alloc failed\n",
8789*4882a593Smuzhiyun 							__FUNCTION__));
8790*4882a593Smuzhiyun 					nan_event_data->tx_match_filter.dlen = 0;
8791*4882a593Smuzhiyun 					ret = -ENOMEM;
8792*4882a593Smuzhiyun 					goto exit;
8793*4882a593Smuzhiyun 				}
8794*4882a593Smuzhiyun 				ret = memcpy_s(nan_event_data->tx_match_filter.data,
8795*4882a593Smuzhiyun 						nan_event_data->tx_match_filter.dlen,
8796*4882a593Smuzhiyun 						svc->tx_match_filter, svc->tx_match_filter_len);
8797*4882a593Smuzhiyun 				if (ret != BCME_OK) {
8798*4882a593Smuzhiyun 					WL_ERR(("Failed to copy tx match filter data\n"));
8799*4882a593Smuzhiyun 					goto exit;
8800*4882a593Smuzhiyun 				}
8801*4882a593Smuzhiyun 			}
8802*4882a593Smuzhiyun 		}
8803*4882a593Smuzhiyun 	}
8804*4882a593Smuzhiyun #endif /* WL_NAN_DISC_CACHE */
8805*4882a593Smuzhiyun 
8806*4882a593Smuzhiyun 	WL_TRACE(("Send up %s (%d) data to HAL, hal_event_id=%d\n",
8807*4882a593Smuzhiyun 			nan_event_to_str(event_num), event_num, hal_event_id));
8808*4882a593Smuzhiyun #if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT)
8809*4882a593Smuzhiyun 	ret = wl_cfgvendor_send_nan_event(cfg->wdev->wiphy, bcmcfg_to_prmry_ndev(cfg),
8810*4882a593Smuzhiyun 			hal_event_id, nan_event_data);
8811*4882a593Smuzhiyun 	if (ret != BCME_OK) {
8812*4882a593Smuzhiyun 		WL_ERR(("Failed to send event to nan hal, %s (%d)\n",
8813*4882a593Smuzhiyun 				nan_event_to_str(event_num), event_num));
8814*4882a593Smuzhiyun 	}
8815*4882a593Smuzhiyun #endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT) */
8816*4882a593Smuzhiyun 
8817*4882a593Smuzhiyun exit:
8818*4882a593Smuzhiyun 	wl_cfgnan_clear_nan_event_data(cfg, nan_event_data);
8819*4882a593Smuzhiyun 
8820*4882a593Smuzhiyun 	NAN_MUTEX_UNLOCK();
8821*4882a593Smuzhiyun 	NAN_DBG_EXIT();
8822*4882a593Smuzhiyun 	return ret;
8823*4882a593Smuzhiyun }
8824*4882a593Smuzhiyun 
8825*4882a593Smuzhiyun #ifdef WL_NAN_DISC_CACHE
8826*4882a593Smuzhiyun static int
wl_cfgnan_cache_disc_result(struct bcm_cfg80211 * cfg,void * data,u16 * disc_cache_update_flags)8827*4882a593Smuzhiyun wl_cfgnan_cache_disc_result(struct bcm_cfg80211 *cfg, void * data,
8828*4882a593Smuzhiyun 	u16 *disc_cache_update_flags)
8829*4882a593Smuzhiyun {
8830*4882a593Smuzhiyun 	nan_event_data_t* disc = (nan_event_data_t*)data;
8831*4882a593Smuzhiyun 	int i, add_index = 0;
8832*4882a593Smuzhiyun 	int ret = BCME_OK;
8833*4882a593Smuzhiyun 	wl_nancfg_t *nancfg = cfg->nancfg;
8834*4882a593Smuzhiyun 	nan_disc_result_cache *disc_res = nancfg->nan_disc_cache;
8835*4882a593Smuzhiyun 	*disc_cache_update_flags = 0;
8836*4882a593Smuzhiyun 
8837*4882a593Smuzhiyun 	if (!nancfg->nan_enable) {
8838*4882a593Smuzhiyun 		WL_DBG(("nan not enabled"));
8839*4882a593Smuzhiyun 		return BCME_NOTENABLED;
8840*4882a593Smuzhiyun 	}
8841*4882a593Smuzhiyun 	if (nancfg->nan_disc_count == NAN_MAX_CACHE_DISC_RESULT) {
8842*4882a593Smuzhiyun 		WL_DBG(("cache full"));
8843*4882a593Smuzhiyun 		ret = BCME_NORESOURCE;
8844*4882a593Smuzhiyun 		goto done;
8845*4882a593Smuzhiyun 	}
8846*4882a593Smuzhiyun 
8847*4882a593Smuzhiyun 	for (i = 0; i < NAN_MAX_CACHE_DISC_RESULT; i++) {
8848*4882a593Smuzhiyun 		if (!disc_res[i].valid) {
8849*4882a593Smuzhiyun 			add_index = i;
8850*4882a593Smuzhiyun 			continue;
8851*4882a593Smuzhiyun 		}
8852*4882a593Smuzhiyun 		if (!memcmp(&disc_res[i].peer, &disc->remote_nmi, ETHER_ADDR_LEN) &&
8853*4882a593Smuzhiyun 			!memcmp(disc_res[i].svc_hash, disc->svc_name, WL_NAN_SVC_HASH_LEN)) {
8854*4882a593Smuzhiyun 			WL_DBG(("cache entry already present, i = %d", i));
8855*4882a593Smuzhiyun 			/* Update needed parameters here */
8856*4882a593Smuzhiyun 			if (disc_res[i].sde_control_flag != disc->sde_control_flag) {
8857*4882a593Smuzhiyun 				disc_res[i].sde_control_flag = disc->sde_control_flag;
8858*4882a593Smuzhiyun 				*disc_cache_update_flags |= NAN_DISC_CACHE_PARAM_SDE_CONTROL;
8859*4882a593Smuzhiyun 			}
8860*4882a593Smuzhiyun 			ret = BCME_OK; /* entry already present */
8861*4882a593Smuzhiyun 			goto done;
8862*4882a593Smuzhiyun 		}
8863*4882a593Smuzhiyun 	}
8864*4882a593Smuzhiyun 	WL_DBG(("adding cache entry: add_index = %d\n", add_index));
8865*4882a593Smuzhiyun 	disc_res[add_index].valid = 1;
8866*4882a593Smuzhiyun 	disc_res[add_index].pub_id = disc->pub_id;
8867*4882a593Smuzhiyun 	disc_res[add_index].sub_id = disc->sub_id;
8868*4882a593Smuzhiyun 	disc_res[add_index].publish_rssi = disc->publish_rssi;
8869*4882a593Smuzhiyun 	disc_res[add_index].peer_cipher_suite = disc->peer_cipher_suite;
8870*4882a593Smuzhiyun 	disc_res[add_index].sde_control_flag = disc->sde_control_flag;
8871*4882a593Smuzhiyun 	ret = memcpy_s(&disc_res[add_index].peer, ETHER_ADDR_LEN,
8872*4882a593Smuzhiyun 			&disc->remote_nmi, ETHER_ADDR_LEN);
8873*4882a593Smuzhiyun 	if (ret != BCME_OK) {
8874*4882a593Smuzhiyun 		WL_ERR(("Failed to copy remote nmi\n"));
8875*4882a593Smuzhiyun 		goto done;
8876*4882a593Smuzhiyun 	}
8877*4882a593Smuzhiyun 	ret = memcpy_s(disc_res[add_index].svc_hash, WL_NAN_SVC_HASH_LEN,
8878*4882a593Smuzhiyun 			disc->svc_name, WL_NAN_SVC_HASH_LEN);
8879*4882a593Smuzhiyun 	if (ret != BCME_OK) {
8880*4882a593Smuzhiyun 		WL_ERR(("Failed to copy svc hash\n"));
8881*4882a593Smuzhiyun 		goto done;
8882*4882a593Smuzhiyun 	}
8883*4882a593Smuzhiyun 
8884*4882a593Smuzhiyun 	if (disc->svc_info.dlen && disc->svc_info.data) {
8885*4882a593Smuzhiyun 		disc_res[add_index].svc_info.dlen = disc->svc_info.dlen;
8886*4882a593Smuzhiyun 		disc_res[add_index].svc_info.data =
8887*4882a593Smuzhiyun 			MALLOCZ(cfg->osh, disc_res[add_index].svc_info.dlen);
8888*4882a593Smuzhiyun 		if (!disc_res[add_index].svc_info.data) {
8889*4882a593Smuzhiyun 			WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
8890*4882a593Smuzhiyun 			disc_res[add_index].svc_info.dlen = 0;
8891*4882a593Smuzhiyun 			ret = BCME_NOMEM;
8892*4882a593Smuzhiyun 			goto done;
8893*4882a593Smuzhiyun 		}
8894*4882a593Smuzhiyun 		ret = memcpy_s(disc_res[add_index].svc_info.data, disc_res[add_index].svc_info.dlen,
8895*4882a593Smuzhiyun 				disc->svc_info.data, disc->svc_info.dlen);
8896*4882a593Smuzhiyun 		if (ret != BCME_OK) {
8897*4882a593Smuzhiyun 			WL_ERR(("Failed to copy svc info\n"));
8898*4882a593Smuzhiyun 			goto done;
8899*4882a593Smuzhiyun 		}
8900*4882a593Smuzhiyun 	}
8901*4882a593Smuzhiyun 	if (disc->tx_match_filter.dlen && disc->tx_match_filter.data) {
8902*4882a593Smuzhiyun 		disc_res[add_index].tx_match_filter.dlen = disc->tx_match_filter.dlen;
8903*4882a593Smuzhiyun 		disc_res[add_index].tx_match_filter.data =
8904*4882a593Smuzhiyun 			MALLOCZ(cfg->osh, disc_res[add_index].tx_match_filter.dlen);
8905*4882a593Smuzhiyun 		if (!disc_res[add_index].tx_match_filter.data) {
8906*4882a593Smuzhiyun 			WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
8907*4882a593Smuzhiyun 			disc_res[add_index].tx_match_filter.dlen = 0;
8908*4882a593Smuzhiyun 			ret = BCME_NOMEM;
8909*4882a593Smuzhiyun 			goto done;
8910*4882a593Smuzhiyun 		}
8911*4882a593Smuzhiyun 		ret = memcpy_s(disc_res[add_index].tx_match_filter.data,
8912*4882a593Smuzhiyun 			disc_res[add_index].tx_match_filter.dlen,
8913*4882a593Smuzhiyun 			disc->tx_match_filter.data, disc->tx_match_filter.dlen);
8914*4882a593Smuzhiyun 		if (ret != BCME_OK) {
8915*4882a593Smuzhiyun 			WL_ERR(("Failed to copy tx match filter\n"));
8916*4882a593Smuzhiyun 			goto done;
8917*4882a593Smuzhiyun 		}
8918*4882a593Smuzhiyun 	}
8919*4882a593Smuzhiyun 	nancfg->nan_disc_count++;
8920*4882a593Smuzhiyun 	WL_DBG(("cfg->nan_disc_count = %d\n", nancfg->nan_disc_count));
8921*4882a593Smuzhiyun 
8922*4882a593Smuzhiyun done:
8923*4882a593Smuzhiyun 	return ret;
8924*4882a593Smuzhiyun }
8925*4882a593Smuzhiyun 
8926*4882a593Smuzhiyun #ifdef RTT_SUPPORT
8927*4882a593Smuzhiyun /* Sending command to FW for clearing discovery cache info in FW */
8928*4882a593Smuzhiyun static int
wl_cfgnan_clear_disc_cache(struct bcm_cfg80211 * cfg,wl_nan_instance_id_t sub_id)8929*4882a593Smuzhiyun wl_cfgnan_clear_disc_cache(struct bcm_cfg80211 *cfg, wl_nan_instance_id_t sub_id)
8930*4882a593Smuzhiyun {
8931*4882a593Smuzhiyun 	s32 ret = BCME_OK;
8932*4882a593Smuzhiyun 	uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
8933*4882a593Smuzhiyun 	uint32 status;
8934*4882a593Smuzhiyun 	uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
8935*4882a593Smuzhiyun 	uint8 buf[NAN_IOCTL_BUF_SIZE];
8936*4882a593Smuzhiyun 	bcm_iov_batch_buf_t *nan_buf;
8937*4882a593Smuzhiyun 	bcm_iov_batch_subcmd_t *sub_cmd;
8938*4882a593Smuzhiyun 	uint16 subcmd_len;
8939*4882a593Smuzhiyun 
8940*4882a593Smuzhiyun 	bzero(buf, sizeof(buf));
8941*4882a593Smuzhiyun 	nan_buf = (bcm_iov_batch_buf_t*)buf;
8942*4882a593Smuzhiyun 
8943*4882a593Smuzhiyun 	nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
8944*4882a593Smuzhiyun 	nan_buf->count = 0;
8945*4882a593Smuzhiyun 	nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
8946*4882a593Smuzhiyun 
8947*4882a593Smuzhiyun 	sub_cmd = (bcm_iov_batch_subcmd_t *)(&nan_buf->cmds[0]);
8948*4882a593Smuzhiyun 	ret = wl_cfg_nan_check_cmd_len(nan_buf_size,
8949*4882a593Smuzhiyun 			sizeof(sub_id), &subcmd_len);
8950*4882a593Smuzhiyun 	if (unlikely(ret)) {
8951*4882a593Smuzhiyun 		WL_ERR(("nan_sub_cmd check failed\n"));
8952*4882a593Smuzhiyun 		goto fail;
8953*4882a593Smuzhiyun 	}
8954*4882a593Smuzhiyun 
8955*4882a593Smuzhiyun 	/* Fill the sub_command block */
8956*4882a593Smuzhiyun 	sub_cmd->id = htod16(WL_NAN_CMD_SD_DISC_CACHE_CLEAR);
8957*4882a593Smuzhiyun 	sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(sub_id);
8958*4882a593Smuzhiyun 	sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
8959*4882a593Smuzhiyun 	/* Data size len vs buffer len check is already done above.
8960*4882a593Smuzhiyun 	 * So, short buffer error is impossible.
8961*4882a593Smuzhiyun 	 */
8962*4882a593Smuzhiyun 	(void)memcpy_s(sub_cmd->data, (nan_buf_size - OFFSETOF(bcm_iov_batch_subcmd_t, data)),
8963*4882a593Smuzhiyun 			&sub_id, sizeof(sub_id));
8964*4882a593Smuzhiyun 	/* adjust iov data len to the end of last data record */
8965*4882a593Smuzhiyun 	nan_buf_size -= (subcmd_len);
8966*4882a593Smuzhiyun 
8967*4882a593Smuzhiyun 	nan_buf->count++;
8968*4882a593Smuzhiyun 	nan_buf->is_set = true;
8969*4882a593Smuzhiyun 	nan_buf_size = NAN_IOCTL_BUF_SIZE - nan_buf_size;
8970*4882a593Smuzhiyun 	/* Same src and dest len here */
8971*4882a593Smuzhiyun 	bzero(resp_buf, sizeof(resp_buf));
8972*4882a593Smuzhiyun 	ret = wl_cfgnan_execute_ioctl(bcmcfg_to_prmry_ndev(cfg), cfg,
8973*4882a593Smuzhiyun 			nan_buf, nan_buf_size, &status,
8974*4882a593Smuzhiyun 			(void*)resp_buf, NAN_IOCTL_BUF_SIZE);
8975*4882a593Smuzhiyun 	if (unlikely(ret) || unlikely(status)) {
8976*4882a593Smuzhiyun 		WL_ERR(("Disc cache clear handler failed ret %d status %d\n",
8977*4882a593Smuzhiyun 				ret, status));
8978*4882a593Smuzhiyun 		goto fail;
8979*4882a593Smuzhiyun 	}
8980*4882a593Smuzhiyun 
8981*4882a593Smuzhiyun fail:
8982*4882a593Smuzhiyun 	return ret;
8983*4882a593Smuzhiyun }
8984*4882a593Smuzhiyun #endif /* RTT_SUPPORT */
8985*4882a593Smuzhiyun 
wl_cfgnan_remove_disc_result(struct bcm_cfg80211 * cfg,uint8 local_subid)8986*4882a593Smuzhiyun static int wl_cfgnan_remove_disc_result(struct bcm_cfg80211 *cfg,
8987*4882a593Smuzhiyun 		uint8 local_subid)
8988*4882a593Smuzhiyun {
8989*4882a593Smuzhiyun 	int i;
8990*4882a593Smuzhiyun 	int ret = BCME_NOTFOUND;
8991*4882a593Smuzhiyun 	nan_disc_result_cache *disc_res = cfg->nancfg->nan_disc_cache;
8992*4882a593Smuzhiyun 	if (!cfg->nancfg->nan_enable) {
8993*4882a593Smuzhiyun 		WL_DBG(("nan not enabled\n"));
8994*4882a593Smuzhiyun 		ret = BCME_NOTENABLED;
8995*4882a593Smuzhiyun 		goto done;
8996*4882a593Smuzhiyun 	}
8997*4882a593Smuzhiyun 	for (i = 0; i < NAN_MAX_CACHE_DISC_RESULT; i++) {
8998*4882a593Smuzhiyun 		if ((disc_res[i].valid) && (disc_res[i].sub_id == local_subid)) {
8999*4882a593Smuzhiyun 			WL_TRACE(("make cache entry invalid\n"));
9000*4882a593Smuzhiyun 			if (disc_res[i].tx_match_filter.data) {
9001*4882a593Smuzhiyun 				MFREE(cfg->osh, disc_res[i].tx_match_filter.data,
9002*4882a593Smuzhiyun 					disc_res[i].tx_match_filter.dlen);
9003*4882a593Smuzhiyun 			}
9004*4882a593Smuzhiyun 			if (disc_res[i].svc_info.data) {
9005*4882a593Smuzhiyun 				MFREE(cfg->osh, disc_res[i].svc_info.data,
9006*4882a593Smuzhiyun 					disc_res[i].svc_info.dlen);
9007*4882a593Smuzhiyun 			}
9008*4882a593Smuzhiyun 			bzero(&disc_res[i], sizeof(disc_res[i]));
9009*4882a593Smuzhiyun 			cfg->nancfg->nan_disc_count--;
9010*4882a593Smuzhiyun 			ret = BCME_OK;
9011*4882a593Smuzhiyun 		}
9012*4882a593Smuzhiyun 	}
9013*4882a593Smuzhiyun 	WL_DBG(("couldn't find entry\n"));
9014*4882a593Smuzhiyun done:
9015*4882a593Smuzhiyun 	return ret;
9016*4882a593Smuzhiyun }
9017*4882a593Smuzhiyun 
9018*4882a593Smuzhiyun static nan_disc_result_cache *
wl_cfgnan_get_disc_result(struct bcm_cfg80211 * cfg,uint8 remote_pubid,struct ether_addr * peer)9019*4882a593Smuzhiyun wl_cfgnan_get_disc_result(struct bcm_cfg80211 *cfg, uint8 remote_pubid,
9020*4882a593Smuzhiyun 	struct ether_addr *peer)
9021*4882a593Smuzhiyun {
9022*4882a593Smuzhiyun 	int i;
9023*4882a593Smuzhiyun 	nan_disc_result_cache *disc_res = cfg->nancfg->nan_disc_cache;
9024*4882a593Smuzhiyun 	if (remote_pubid) {
9025*4882a593Smuzhiyun 		for (i = 0; i < NAN_MAX_CACHE_DISC_RESULT; i++) {
9026*4882a593Smuzhiyun 			if ((disc_res[i].pub_id == remote_pubid) &&
9027*4882a593Smuzhiyun 					!memcmp(&disc_res[i].peer, peer, ETHER_ADDR_LEN)) {
9028*4882a593Smuzhiyun 				WL_DBG(("Found entry: i = %d\n", i));
9029*4882a593Smuzhiyun 				return &disc_res[i];
9030*4882a593Smuzhiyun 			}
9031*4882a593Smuzhiyun 		}
9032*4882a593Smuzhiyun 	} else {
9033*4882a593Smuzhiyun 		for (i = 0; i < NAN_MAX_CACHE_DISC_RESULT; i++) {
9034*4882a593Smuzhiyun 			if (!memcmp(&disc_res[i].peer, peer, ETHER_ADDR_LEN)) {
9035*4882a593Smuzhiyun 				WL_DBG(("Found entry: %d\n", i));
9036*4882a593Smuzhiyun 				return &disc_res[i];
9037*4882a593Smuzhiyun 			}
9038*4882a593Smuzhiyun 		}
9039*4882a593Smuzhiyun 	}
9040*4882a593Smuzhiyun 	return NULL;
9041*4882a593Smuzhiyun }
9042*4882a593Smuzhiyun #endif /* WL_NAN_DISC_CACHE */
9043*4882a593Smuzhiyun 
9044*4882a593Smuzhiyun static void
wl_cfgnan_update_dp_info(struct bcm_cfg80211 * cfg,bool add,nan_data_path_id ndp_id)9045*4882a593Smuzhiyun wl_cfgnan_update_dp_info(struct bcm_cfg80211 *cfg, bool add,
9046*4882a593Smuzhiyun 	nan_data_path_id ndp_id)
9047*4882a593Smuzhiyun {
9048*4882a593Smuzhiyun 	uint8 i;
9049*4882a593Smuzhiyun 	bool match_found = false;
9050*4882a593Smuzhiyun 	wl_nancfg_t *nancfg = cfg->nancfg;
9051*4882a593Smuzhiyun 	/* As of now, we don't see a need to know which ndp is active.
9052*4882a593Smuzhiyun 	 * so just keep tracking of ndp via count. If we need to know
9053*4882a593Smuzhiyun 	 * the status of each ndp based on ndp id, we need to change
9054*4882a593Smuzhiyun 	 * this implementation to use a bit mask.
9055*4882a593Smuzhiyun 	 */
9056*4882a593Smuzhiyun 
9057*4882a593Smuzhiyun 	if (add) {
9058*4882a593Smuzhiyun 		/* On first NAN DP establishment, disable ARP. */
9059*4882a593Smuzhiyun 		for (i = 0; i < NAN_MAX_NDP_PEER; i++) {
9060*4882a593Smuzhiyun 			if (!nancfg->ndp_id[i]) {
9061*4882a593Smuzhiyun 				WL_TRACE(("Found empty field\n"));
9062*4882a593Smuzhiyun 				break;
9063*4882a593Smuzhiyun 			}
9064*4882a593Smuzhiyun 		}
9065*4882a593Smuzhiyun 
9066*4882a593Smuzhiyun 		if (i == NAN_MAX_NDP_PEER) {
9067*4882a593Smuzhiyun 			WL_ERR(("%s:cannot accommodate ndp id\n", __FUNCTION__));
9068*4882a593Smuzhiyun 			return;
9069*4882a593Smuzhiyun 		}
9070*4882a593Smuzhiyun 		if (ndp_id) {
9071*4882a593Smuzhiyun 			nancfg->nan_dp_count++;
9072*4882a593Smuzhiyun 			nancfg->ndp_id[i] = ndp_id;
9073*4882a593Smuzhiyun 			WL_DBG(("%s:Added ndp id = [%d] at i = %d\n",
9074*4882a593Smuzhiyun 					__FUNCTION__, nancfg->ndp_id[i], i));
9075*4882a593Smuzhiyun 			wl_cfg80211_concurrent_roam(cfg, true);
9076*4882a593Smuzhiyun 		}
9077*4882a593Smuzhiyun 	} else {
9078*4882a593Smuzhiyun 		ASSERT(nancfg->nan_dp_count);
9079*4882a593Smuzhiyun 		if (ndp_id) {
9080*4882a593Smuzhiyun 			for (i = 0; i < NAN_MAX_NDP_PEER; i++) {
9081*4882a593Smuzhiyun 				if (nancfg->ndp_id[i] == ndp_id) {
9082*4882a593Smuzhiyun 					nancfg->ndp_id[i] = 0;
9083*4882a593Smuzhiyun 					WL_DBG(("%s:Removed ndp id = [%d] from i = %d\n",
9084*4882a593Smuzhiyun 						__FUNCTION__, ndp_id, i));
9085*4882a593Smuzhiyun 					match_found = true;
9086*4882a593Smuzhiyun 					if (nancfg->nan_dp_count) {
9087*4882a593Smuzhiyun 						nancfg->nan_dp_count--;
9088*4882a593Smuzhiyun 					}
9089*4882a593Smuzhiyun 					break;
9090*4882a593Smuzhiyun 				} else {
9091*4882a593Smuzhiyun 					WL_DBG(("couldn't find entry for ndp id = %d\n",
9092*4882a593Smuzhiyun 						ndp_id));
9093*4882a593Smuzhiyun 				}
9094*4882a593Smuzhiyun 			}
9095*4882a593Smuzhiyun 			if (match_found == false) {
9096*4882a593Smuzhiyun 				WL_ERR(("Received unsaved NDP Id = %d !!\n", ndp_id));
9097*4882a593Smuzhiyun 			} else {
9098*4882a593Smuzhiyun 				if (nancfg->nan_dp_count == 0) {
9099*4882a593Smuzhiyun 					wl_cfg80211_concurrent_roam(cfg, false);
9100*4882a593Smuzhiyun 					wl_cfgnan_immediate_nan_disable_pending(cfg);
9101*4882a593Smuzhiyun 				}
9102*4882a593Smuzhiyun 			}
9103*4882a593Smuzhiyun 
9104*4882a593Smuzhiyun 		}
9105*4882a593Smuzhiyun 	}
9106*4882a593Smuzhiyun 	WL_INFORM_MEM(("NAN_DP_COUNT: %d\n", nancfg->nan_dp_count));
9107*4882a593Smuzhiyun }
9108*4882a593Smuzhiyun 
9109*4882a593Smuzhiyun bool
wl_cfgnan_is_dp_active(struct net_device * ndev)9110*4882a593Smuzhiyun wl_cfgnan_is_dp_active(struct net_device *ndev)
9111*4882a593Smuzhiyun {
9112*4882a593Smuzhiyun 	struct bcm_cfg80211 *cfg;
9113*4882a593Smuzhiyun 	bool nan_dp;
9114*4882a593Smuzhiyun 
9115*4882a593Smuzhiyun 	if (!ndev || !ndev->ieee80211_ptr) {
9116*4882a593Smuzhiyun 		WL_ERR(("ndev/wdev null\n"));
9117*4882a593Smuzhiyun 		return false;
9118*4882a593Smuzhiyun 	}
9119*4882a593Smuzhiyun 
9120*4882a593Smuzhiyun 	cfg =  wiphy_priv(ndev->ieee80211_ptr->wiphy);
9121*4882a593Smuzhiyun 	nan_dp = cfg->nancfg->nan_dp_count ? true : false;
9122*4882a593Smuzhiyun 
9123*4882a593Smuzhiyun 	WL_DBG(("NAN DP status:%d\n", nan_dp));
9124*4882a593Smuzhiyun 	return nan_dp;
9125*4882a593Smuzhiyun }
9126*4882a593Smuzhiyun 
9127*4882a593Smuzhiyun static s32
wl_cfgnan_get_ndi_idx(struct bcm_cfg80211 * cfg)9128*4882a593Smuzhiyun wl_cfgnan_get_ndi_idx(struct bcm_cfg80211 *cfg)
9129*4882a593Smuzhiyun {
9130*4882a593Smuzhiyun 	int i;
9131*4882a593Smuzhiyun 	for (i = 0; i < cfg->nancfg->max_ndi_supported; i++) {
9132*4882a593Smuzhiyun 		if (!cfg->nancfg->ndi[i].in_use) {
9133*4882a593Smuzhiyun 			/* Free interface, use it */
9134*4882a593Smuzhiyun 			return i;
9135*4882a593Smuzhiyun 		}
9136*4882a593Smuzhiyun 	}
9137*4882a593Smuzhiyun 	/* Don't have a free interface */
9138*4882a593Smuzhiyun 	return WL_INVALID;
9139*4882a593Smuzhiyun }
9140*4882a593Smuzhiyun 
9141*4882a593Smuzhiyun static s32
wl_cfgnan_add_ndi_data(struct bcm_cfg80211 * cfg,s32 idx,char * name)9142*4882a593Smuzhiyun wl_cfgnan_add_ndi_data(struct bcm_cfg80211 *cfg, s32 idx, char *name)
9143*4882a593Smuzhiyun {
9144*4882a593Smuzhiyun 	u16 len;
9145*4882a593Smuzhiyun 	wl_nancfg_t *nancfg = cfg->nancfg;
9146*4882a593Smuzhiyun 	if (!name || (idx < 0) || (idx >= cfg->nancfg->max_ndi_supported)) {
9147*4882a593Smuzhiyun 		return -EINVAL;
9148*4882a593Smuzhiyun 	}
9149*4882a593Smuzhiyun 
9150*4882a593Smuzhiyun 	/* Ensure ifname string size <= IFNAMSIZ including null termination */
9151*4882a593Smuzhiyun 	len = MIN(strlen(name), (IFNAMSIZ - 1));
9152*4882a593Smuzhiyun 	strncpy(nancfg->ndi[idx].ifname, name, len);
9153*4882a593Smuzhiyun 	nancfg->ndi[idx].ifname[len] = '\0';
9154*4882a593Smuzhiyun 	nancfg->ndi[idx].in_use = true;
9155*4882a593Smuzhiyun 	nancfg->ndi[idx].created = false;
9156*4882a593Smuzhiyun 
9157*4882a593Smuzhiyun 	/* Don't have a free interface */
9158*4882a593Smuzhiyun 	return WL_INVALID;
9159*4882a593Smuzhiyun }
9160*4882a593Smuzhiyun 
9161*4882a593Smuzhiyun static s32
wl_cfgnan_del_ndi_data(struct bcm_cfg80211 * cfg,char * name)9162*4882a593Smuzhiyun wl_cfgnan_del_ndi_data(struct bcm_cfg80211 *cfg, char *name)
9163*4882a593Smuzhiyun {
9164*4882a593Smuzhiyun 	u16 len;
9165*4882a593Smuzhiyun 	int i;
9166*4882a593Smuzhiyun 	wl_nancfg_t *nancfg = cfg->nancfg;
9167*4882a593Smuzhiyun 
9168*4882a593Smuzhiyun 	if (!name) {
9169*4882a593Smuzhiyun 		return -EINVAL;
9170*4882a593Smuzhiyun 	}
9171*4882a593Smuzhiyun 
9172*4882a593Smuzhiyun 	len = MIN(strlen(name), IFNAMSIZ);
9173*4882a593Smuzhiyun 	for (i = 0; i < cfg->nancfg->max_ndi_supported; i++) {
9174*4882a593Smuzhiyun 		if (strncmp(nancfg->ndi[i].ifname, name, len) == 0) {
9175*4882a593Smuzhiyun 			bzero(&nancfg->ndi[i].ifname, IFNAMSIZ);
9176*4882a593Smuzhiyun 			nancfg->ndi[i].in_use = false;
9177*4882a593Smuzhiyun 			nancfg->ndi[i].created = false;
9178*4882a593Smuzhiyun 			nancfg->ndi[i].nan_ndev = NULL;
9179*4882a593Smuzhiyun 			return i;
9180*4882a593Smuzhiyun 		}
9181*4882a593Smuzhiyun 	}
9182*4882a593Smuzhiyun 	return -EINVAL;
9183*4882a593Smuzhiyun }
9184*4882a593Smuzhiyun 
9185*4882a593Smuzhiyun s32
wl_cfgnan_delete_ndp(struct bcm_cfg80211 * cfg,struct net_device * nan_ndev)9186*4882a593Smuzhiyun wl_cfgnan_delete_ndp(struct bcm_cfg80211 *cfg,
9187*4882a593Smuzhiyun 	struct net_device *nan_ndev)
9188*4882a593Smuzhiyun {
9189*4882a593Smuzhiyun 	s32 ret = BCME_OK;
9190*4882a593Smuzhiyun 	uint8 i = 0;
9191*4882a593Smuzhiyun 	wl_nancfg_t *nancfg = cfg->nancfg;
9192*4882a593Smuzhiyun 
9193*4882a593Smuzhiyun 	for (i = 0; i < cfg->nancfg->max_ndi_supported; i++) {
9194*4882a593Smuzhiyun 		if (nancfg->ndi[i].in_use && nancfg->ndi[i].created &&
9195*4882a593Smuzhiyun 			(nancfg->ndi[i].nan_ndev == nan_ndev)) {
9196*4882a593Smuzhiyun 			WL_INFORM_MEM(("iface name: %s, cfg->nancfg->ndi[i].nan_ndev = %p"
9197*4882a593Smuzhiyun 					"  and nan_ndev = %p\n",
9198*4882a593Smuzhiyun 						(char*)nancfg->ndi[i].ifname,
9199*4882a593Smuzhiyun 						nancfg->ndi[i].nan_ndev, nan_ndev));
9200*4882a593Smuzhiyun 			ret = _wl_cfg80211_del_if(cfg, nan_ndev, NULL,
9201*4882a593Smuzhiyun 					(char*)nancfg->ndi[i].ifname);
9202*4882a593Smuzhiyun 			if (ret) {
9203*4882a593Smuzhiyun 				WL_ERR(("failed to del ndi [%d]\n", ret));
9204*4882a593Smuzhiyun 			}
9205*4882a593Smuzhiyun 			/*
9206*4882a593Smuzhiyun 			 * Intentional fall through to clear the host data structs
9207*4882a593Smuzhiyun 			 * Unconditionally delete the ndi data and states
9208*4882a593Smuzhiyun 			 */
9209*4882a593Smuzhiyun 			if (wl_cfgnan_del_ndi_data(cfg,
9210*4882a593Smuzhiyun 				(char*)nancfg->ndi[i].ifname) < 0) {
9211*4882a593Smuzhiyun 				WL_ERR(("Failed to find matching data for ndi:%s\n",
9212*4882a593Smuzhiyun 					(char*)nancfg->ndi[i].ifname));
9213*4882a593Smuzhiyun 			}
9214*4882a593Smuzhiyun 		}
9215*4882a593Smuzhiyun 	}
9216*4882a593Smuzhiyun 	return ret;
9217*4882a593Smuzhiyun }
9218*4882a593Smuzhiyun 
9219*4882a593Smuzhiyun int
wl_cfgnan_get_status(struct net_device * ndev,wl_nan_conf_status_t * nan_status)9220*4882a593Smuzhiyun wl_cfgnan_get_status(struct net_device *ndev, wl_nan_conf_status_t *nan_status)
9221*4882a593Smuzhiyun {
9222*4882a593Smuzhiyun 	bcm_iov_batch_buf_t *nan_buf = NULL;
9223*4882a593Smuzhiyun 	uint16 subcmd_len;
9224*4882a593Smuzhiyun 	bcm_iov_batch_subcmd_t *sub_cmd = NULL;
9225*4882a593Smuzhiyun 	bcm_iov_batch_subcmd_t *sub_cmd_resp = NULL;
9226*4882a593Smuzhiyun 	uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
9227*4882a593Smuzhiyun 	wl_nan_conf_status_t *nstatus = NULL;
9228*4882a593Smuzhiyun 	uint32 status;
9229*4882a593Smuzhiyun 	s32 ret = BCME_OK;
9230*4882a593Smuzhiyun 	uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
9231*4882a593Smuzhiyun 	struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
9232*4882a593Smuzhiyun 	NAN_DBG_ENTER();
9233*4882a593Smuzhiyun 
9234*4882a593Smuzhiyun 	nan_buf = MALLOCZ(cfg->osh, NAN_IOCTL_BUF_SIZE);
9235*4882a593Smuzhiyun 	if (!nan_buf) {
9236*4882a593Smuzhiyun 		WL_ERR(("%s: memory allocation failed\n", __func__));
9237*4882a593Smuzhiyun 		ret = BCME_NOMEM;
9238*4882a593Smuzhiyun 		goto fail;
9239*4882a593Smuzhiyun 	}
9240*4882a593Smuzhiyun 
9241*4882a593Smuzhiyun 	nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
9242*4882a593Smuzhiyun 	nan_buf->count = 0;
9243*4882a593Smuzhiyun 	nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
9244*4882a593Smuzhiyun 	sub_cmd = (bcm_iov_batch_subcmd_t*)(uint8 *)(&nan_buf->cmds[0]);
9245*4882a593Smuzhiyun 
9246*4882a593Smuzhiyun 	ret = wl_cfg_nan_check_cmd_len(nan_buf_size,
9247*4882a593Smuzhiyun 			sizeof(*nstatus), &subcmd_len);
9248*4882a593Smuzhiyun 	if (unlikely(ret)) {
9249*4882a593Smuzhiyun 		WL_ERR(("nan_sub_cmd check failed\n"));
9250*4882a593Smuzhiyun 		goto fail;
9251*4882a593Smuzhiyun 	}
9252*4882a593Smuzhiyun 
9253*4882a593Smuzhiyun 	nstatus = (wl_nan_conf_status_t *)sub_cmd->data;
9254*4882a593Smuzhiyun 	sub_cmd->id = htod16(WL_NAN_CMD_CFG_STATUS);
9255*4882a593Smuzhiyun 	sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*nstatus);
9256*4882a593Smuzhiyun 	sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
9257*4882a593Smuzhiyun 	nan_buf_size -= subcmd_len;
9258*4882a593Smuzhiyun 	nan_buf->count = 1;
9259*4882a593Smuzhiyun 	nan_buf->is_set = false;
9260*4882a593Smuzhiyun 
9261*4882a593Smuzhiyun 	bzero(resp_buf, sizeof(resp_buf));
9262*4882a593Smuzhiyun 	ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
9263*4882a593Smuzhiyun 			(void*)resp_buf, NAN_IOCTL_BUF_SIZE);
9264*4882a593Smuzhiyun 	if (unlikely(ret) || unlikely(status)) {
9265*4882a593Smuzhiyun 		WL_ERR(("get nan status failed ret %d status %d \n",
9266*4882a593Smuzhiyun 			ret, status));
9267*4882a593Smuzhiyun 		goto fail;
9268*4882a593Smuzhiyun 	}
9269*4882a593Smuzhiyun 	sub_cmd_resp = &((bcm_iov_batch_buf_t *)(resp_buf))->cmds[0];
9270*4882a593Smuzhiyun 	/* WL_NAN_CMD_CFG_STATUS return value doesn't use xtlv package */
9271*4882a593Smuzhiyun 	nstatus = ((wl_nan_conf_status_t *)&sub_cmd_resp->data[0]);
9272*4882a593Smuzhiyun 	ret = memcpy_s(nan_status, sizeof(wl_nan_conf_status_t),
9273*4882a593Smuzhiyun 			nstatus, sizeof(wl_nan_conf_status_t));
9274*4882a593Smuzhiyun 	if (ret != BCME_OK) {
9275*4882a593Smuzhiyun 		WL_ERR(("Failed to copy tx match filter\n"));
9276*4882a593Smuzhiyun 		goto fail;
9277*4882a593Smuzhiyun 	}
9278*4882a593Smuzhiyun 
9279*4882a593Smuzhiyun fail:
9280*4882a593Smuzhiyun 	if (nan_buf) {
9281*4882a593Smuzhiyun 		MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
9282*4882a593Smuzhiyun 	}
9283*4882a593Smuzhiyun 	NAN_DBG_EXIT();
9284*4882a593Smuzhiyun 	return ret;
9285*4882a593Smuzhiyun }
9286*4882a593Smuzhiyun 
9287*4882a593Smuzhiyun s32
wl_nan_print_avail_stats(const uint8 * data)9288*4882a593Smuzhiyun wl_nan_print_avail_stats(const uint8 *data)
9289*4882a593Smuzhiyun {
9290*4882a593Smuzhiyun 	int idx;
9291*4882a593Smuzhiyun 	s32 ret = BCME_OK;
9292*4882a593Smuzhiyun 	int s_chan = 0;
9293*4882a593Smuzhiyun 	char pbuf[NAN_IOCTL_BUF_SIZE_MED];
9294*4882a593Smuzhiyun 	const wl_nan_stats_sched_t *sched = (const wl_nan_stats_sched_t *)data;
9295*4882a593Smuzhiyun #define SLOT_PRINT_SIZE 4
9296*4882a593Smuzhiyun 
9297*4882a593Smuzhiyun 	char *buf = pbuf;
9298*4882a593Smuzhiyun 	int remained_len = 0, bytes_written = 0;
9299*4882a593Smuzhiyun 	bzero(pbuf, sizeof(pbuf));
9300*4882a593Smuzhiyun 
9301*4882a593Smuzhiyun 	if ((sched->num_slot * SLOT_PRINT_SIZE) > (sizeof(pbuf)-1)) {
9302*4882a593Smuzhiyun 		WL_ERR(("overflowed slot number %d detected\n",
9303*4882a593Smuzhiyun 			sched->num_slot));
9304*4882a593Smuzhiyun 		ret = BCME_BUFTOOSHORT;
9305*4882a593Smuzhiyun 		goto exit;
9306*4882a593Smuzhiyun 	}
9307*4882a593Smuzhiyun 
9308*4882a593Smuzhiyun 	remained_len = NAN_IOCTL_BUF_SIZE_MED;
9309*4882a593Smuzhiyun 	bytes_written = snprintf(buf, remained_len, "Map ID:%u, %u/%u, Slot#:%u ",
9310*4882a593Smuzhiyun 		sched->map_id, sched->period, sched->slot_dur, sched->num_slot);
9311*4882a593Smuzhiyun 
9312*4882a593Smuzhiyun 	for (idx = 0; idx < sched->num_slot; idx++) {
9313*4882a593Smuzhiyun 		const wl_nan_stats_sched_slot_t *slot;
9314*4882a593Smuzhiyun 		slot = &sched->slot[idx];
9315*4882a593Smuzhiyun 		s_chan = 0;
9316*4882a593Smuzhiyun 
9317*4882a593Smuzhiyun 		if (!wf_chspec_malformed(slot->chanspec)) {
9318*4882a593Smuzhiyun 			s_chan = wf_chspec_ctlchan(slot->chanspec);
9319*4882a593Smuzhiyun 		}
9320*4882a593Smuzhiyun 
9321*4882a593Smuzhiyun 		buf += bytes_written;
9322*4882a593Smuzhiyun 		remained_len -= bytes_written;
9323*4882a593Smuzhiyun 		bytes_written = snprintf(buf, remained_len, "%03d|", s_chan);
9324*4882a593Smuzhiyun 
9325*4882a593Smuzhiyun 	}
9326*4882a593Smuzhiyun 	WL_INFORM_MEM(("%s\n", pbuf));
9327*4882a593Smuzhiyun exit:
9328*4882a593Smuzhiyun 	return ret;
9329*4882a593Smuzhiyun }
9330*4882a593Smuzhiyun 
9331*4882a593Smuzhiyun static int
wl_nan_print_stats_tlvs(void * ctx,const uint8 * data,uint16 type,uint16 len)9332*4882a593Smuzhiyun wl_nan_print_stats_tlvs(void *ctx, const uint8 *data, uint16 type, uint16 len)
9333*4882a593Smuzhiyun {
9334*4882a593Smuzhiyun 	int err = BCME_OK;
9335*4882a593Smuzhiyun 
9336*4882a593Smuzhiyun 	switch (type) {
9337*4882a593Smuzhiyun 		/* Avail stats xtlvs */
9338*4882a593Smuzhiyun 		case WL_NAN_XTLV_GEN_AVAIL_STATS_SCHED:
9339*4882a593Smuzhiyun 			err = wl_nan_print_avail_stats(data);
9340*4882a593Smuzhiyun 			break;
9341*4882a593Smuzhiyun 		default:
9342*4882a593Smuzhiyun 			err = BCME_BADARG;
9343*4882a593Smuzhiyun 			WL_ERR(("Unknown xtlv type received: %x\n", type));
9344*4882a593Smuzhiyun 			break;
9345*4882a593Smuzhiyun 	}
9346*4882a593Smuzhiyun 
9347*4882a593Smuzhiyun 	return err;
9348*4882a593Smuzhiyun }
9349*4882a593Smuzhiyun 
9350*4882a593Smuzhiyun int
wl_cfgnan_get_stats(struct bcm_cfg80211 * cfg)9351*4882a593Smuzhiyun wl_cfgnan_get_stats(struct bcm_cfg80211 *cfg)
9352*4882a593Smuzhiyun {
9353*4882a593Smuzhiyun 	bcm_iov_batch_buf_t *nan_buf = NULL;
9354*4882a593Smuzhiyun 	uint16 subcmd_len;
9355*4882a593Smuzhiyun 	bcm_iov_batch_subcmd_t *sub_cmd = NULL;
9356*4882a593Smuzhiyun 	bcm_iov_batch_subcmd_t *sub_cmd_resp = NULL;
9357*4882a593Smuzhiyun 	uint8 *resp_buf = NULL;
9358*4882a593Smuzhiyun 	wl_nan_cmn_get_stat_t *get_stat = NULL;
9359*4882a593Smuzhiyun 	wl_nan_cmn_stat_t *stats = NULL;
9360*4882a593Smuzhiyun 	uint32 status;
9361*4882a593Smuzhiyun 	s32 ret = BCME_OK;
9362*4882a593Smuzhiyun 	uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
9363*4882a593Smuzhiyun 	NAN_DBG_ENTER();
9364*4882a593Smuzhiyun 
9365*4882a593Smuzhiyun 	nan_buf = MALLOCZ(cfg->osh, NAN_IOCTL_BUF_SIZE);
9366*4882a593Smuzhiyun 	resp_buf = MALLOCZ(cfg->osh, NAN_IOCTL_BUF_SIZE_LARGE);
9367*4882a593Smuzhiyun 	if (!nan_buf || !resp_buf) {
9368*4882a593Smuzhiyun 		WL_ERR(("%s: memory allocation failed\n", __func__));
9369*4882a593Smuzhiyun 		ret = BCME_NOMEM;
9370*4882a593Smuzhiyun 		goto fail;
9371*4882a593Smuzhiyun 	}
9372*4882a593Smuzhiyun 
9373*4882a593Smuzhiyun 	nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
9374*4882a593Smuzhiyun 	nan_buf->count = 0;
9375*4882a593Smuzhiyun 	nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
9376*4882a593Smuzhiyun 	sub_cmd = (bcm_iov_batch_subcmd_t*)(uint8 *)(&nan_buf->cmds[0]);
9377*4882a593Smuzhiyun 
9378*4882a593Smuzhiyun 	ret = wl_cfg_nan_check_cmd_len(nan_buf_size,
9379*4882a593Smuzhiyun 			sizeof(*get_stat), &subcmd_len);
9380*4882a593Smuzhiyun 	if (unlikely(ret)) {
9381*4882a593Smuzhiyun 		WL_ERR(("nan_sub_cmd check failed\n"));
9382*4882a593Smuzhiyun 		goto fail;
9383*4882a593Smuzhiyun 	}
9384*4882a593Smuzhiyun 
9385*4882a593Smuzhiyun 	get_stat = (wl_nan_cmn_get_stat_t *)sub_cmd->data;
9386*4882a593Smuzhiyun 	/* get only local availabiity stats */
9387*4882a593Smuzhiyun 	get_stat->modules_btmap = (1 << NAN_AVAIL);
9388*4882a593Smuzhiyun 	get_stat->operation = WLA_NAN_STATS_GET;
9389*4882a593Smuzhiyun 
9390*4882a593Smuzhiyun 	sub_cmd->id = htod16(WL_NAN_CMD_GEN_STATS);
9391*4882a593Smuzhiyun 	sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*get_stat);
9392*4882a593Smuzhiyun 	sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
9393*4882a593Smuzhiyun 	nan_buf_size -= subcmd_len;
9394*4882a593Smuzhiyun 	nan_buf->count = 1;
9395*4882a593Smuzhiyun 	nan_buf->is_set = false;
9396*4882a593Smuzhiyun 
9397*4882a593Smuzhiyun 	ret = wl_cfgnan_execute_ioctl(bcmcfg_to_prmry_ndev(cfg),
9398*4882a593Smuzhiyun 			cfg, nan_buf, nan_buf_size, &status,
9399*4882a593Smuzhiyun 			(void*)resp_buf, NAN_IOCTL_BUF_SIZE_LARGE);
9400*4882a593Smuzhiyun 	if (unlikely(ret) || unlikely(status)) {
9401*4882a593Smuzhiyun 		WL_ERR(("get nan stats failed ret %d status %d \n",
9402*4882a593Smuzhiyun 			ret, status));
9403*4882a593Smuzhiyun 		goto fail;
9404*4882a593Smuzhiyun 	}
9405*4882a593Smuzhiyun 
9406*4882a593Smuzhiyun 	sub_cmd_resp = &((bcm_iov_batch_buf_t *)(resp_buf))->cmds[0];
9407*4882a593Smuzhiyun 
9408*4882a593Smuzhiyun 	stats = (wl_nan_cmn_stat_t *)&sub_cmd_resp->data[0];
9409*4882a593Smuzhiyun 
9410*4882a593Smuzhiyun 	if (stats->n_stats) {
9411*4882a593Smuzhiyun 		WL_INFORM_MEM((" == Aware Local Avail Schedule ==\n"));
9412*4882a593Smuzhiyun 		ret = bcm_unpack_xtlv_buf((void *)&stats->n_stats,
9413*4882a593Smuzhiyun 				(const uint8 *)&stats->stats_tlvs,
9414*4882a593Smuzhiyun 				stats->totlen - 8, BCM_IOV_CMD_OPT_ALIGN32,
9415*4882a593Smuzhiyun 				wl_nan_print_stats_tlvs);
9416*4882a593Smuzhiyun 	}
9417*4882a593Smuzhiyun fail:
9418*4882a593Smuzhiyun 	if (nan_buf) {
9419*4882a593Smuzhiyun 		MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
9420*4882a593Smuzhiyun 	}
9421*4882a593Smuzhiyun 	if (resp_buf) {
9422*4882a593Smuzhiyun 		MFREE(cfg->osh, resp_buf, NAN_IOCTL_BUF_SIZE_LARGE);
9423*4882a593Smuzhiyun 	}
9424*4882a593Smuzhiyun 
9425*4882a593Smuzhiyun 	NAN_DBG_EXIT();
9426*4882a593Smuzhiyun 	return ret;
9427*4882a593Smuzhiyun }
9428*4882a593Smuzhiyun 
9429*4882a593Smuzhiyun int
wl_cfgnan_attach(struct bcm_cfg80211 * cfg)9430*4882a593Smuzhiyun wl_cfgnan_attach(struct bcm_cfg80211 *cfg)
9431*4882a593Smuzhiyun {
9432*4882a593Smuzhiyun 	int err = BCME_OK;
9433*4882a593Smuzhiyun 	wl_nancfg_t *nancfg = NULL;
9434*4882a593Smuzhiyun 
9435*4882a593Smuzhiyun 	if (cfg) {
9436*4882a593Smuzhiyun 		cfg->nancfg = (wl_nancfg_t *)MALLOCZ(cfg->osh, sizeof(wl_nancfg_t));
9437*4882a593Smuzhiyun 		if (cfg->nancfg == NULL) {
9438*4882a593Smuzhiyun 			err = BCME_NOMEM;
9439*4882a593Smuzhiyun 			goto done;
9440*4882a593Smuzhiyun 		}
9441*4882a593Smuzhiyun 		cfg->nancfg->cfg = cfg;
9442*4882a593Smuzhiyun 	} else {
9443*4882a593Smuzhiyun 		err = BCME_BADARG;
9444*4882a593Smuzhiyun 		goto done;
9445*4882a593Smuzhiyun 	}
9446*4882a593Smuzhiyun 
9447*4882a593Smuzhiyun 	nancfg = cfg->nancfg;
9448*4882a593Smuzhiyun 	mutex_init(&nancfg->nan_sync);
9449*4882a593Smuzhiyun 	init_waitqueue_head(&nancfg->nan_event_wait);
9450*4882a593Smuzhiyun 	INIT_DELAYED_WORK(&nancfg->nan_disable, wl_cfgnan_delayed_disable);
9451*4882a593Smuzhiyun 	nancfg->nan_dp_state = NAN_DP_STATE_DISABLED;
9452*4882a593Smuzhiyun 	init_waitqueue_head(&nancfg->ndp_if_change_event);
9453*4882a593Smuzhiyun 
9454*4882a593Smuzhiyun done:
9455*4882a593Smuzhiyun 	return err;
9456*4882a593Smuzhiyun 
9457*4882a593Smuzhiyun }
9458*4882a593Smuzhiyun 
9459*4882a593Smuzhiyun void
wl_cfgnan_detach(struct bcm_cfg80211 * cfg)9460*4882a593Smuzhiyun wl_cfgnan_detach(struct bcm_cfg80211 *cfg)
9461*4882a593Smuzhiyun {
9462*4882a593Smuzhiyun 	if (cfg && cfg->nancfg) {
9463*4882a593Smuzhiyun 		if (delayed_work_pending(&cfg->nancfg->nan_disable)) {
9464*4882a593Smuzhiyun 			WL_DBG(("Cancel nan_disable work\n"));
9465*4882a593Smuzhiyun 			DHD_NAN_WAKE_UNLOCK(cfg->pub);
9466*4882a593Smuzhiyun 			cancel_delayed_work_sync(&cfg->nancfg->nan_disable);
9467*4882a593Smuzhiyun 		}
9468*4882a593Smuzhiyun 		MFREE(cfg->osh, cfg->nancfg, sizeof(wl_nancfg_t));
9469*4882a593Smuzhiyun 		cfg->nancfg = NULL;
9470*4882a593Smuzhiyun 	}
9471*4882a593Smuzhiyun 
9472*4882a593Smuzhiyun }
9473*4882a593Smuzhiyun #endif /* WL_NAN */
9474