1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * DHD debugability support
4 *
5 * <<Broadcom-WL-IPTag/Open:>>
6 *
7 * Copyright (C) 1999-2017, Broadcom Corporation
8 *
9 * Unless you and Broadcom execute a separate written software license
10 * agreement governing use of this software, this software is licensed to you
11 * under the terms of the GNU General Public License version 2 (the "GPL"),
12 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
13 * following added to such license:
14 *
15 * As a special exception, the copyright holders of this software give you
16 * permission to link this software with independent modules, and to copy and
17 * distribute the resulting executable under terms of your choice, provided that
18 * you also meet, for each linked independent module, the terms and conditions of
19 * the license of that module. An independent module is a module which is not
20 * derived from this software. The special exception does not apply to any
21 * modifications of the software.
22 *
23 * Notwithstanding the above, under no circumstances may you combine this
24 * software in any way with any other Broadcom software provided under a license
25 * other than the GPL, without Broadcom's express prior written consent.
26 *
27 * $Id: dhd_debug.c 711908 2017-07-20 10:37:34Z $
28 */
29
30 #include <typedefs.h>
31 #include <osl.h>
32 #include <bcmutils.h>
33 #include <bcmendian.h>
34 #include <dngl_stats.h>
35 #include <dhd.h>
36 #include <dhd_dbg.h>
37 #include <dhd_debug.h>
38 #include <dhd_mschdbg.h>
39
40 #include <event_log.h>
41 #include <event_trace.h>
42 #include <msgtrace.h>
43
44 #if defined(DHD_EFI)
45 #define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
46 #define container_of(ptr, type, member) \
47 ((type *)((char *)(ptr) - offsetof(type, member)))
48 #endif
49
50 #define DBGRING_FLUSH_THRESHOLD(ring) (ring->ring_size / 3)
51 #define RING_STAT_TO_STATUS(ring, status) \
52 do { \
53 strncpy(status.name, ring->name, \
54 sizeof(status.name) - 1); \
55 status.ring_id = ring->id; \
56 status.ring_buffer_byte_size = ring->ring_size; \
57 status.written_bytes = ring->stat.written_bytes; \
58 status.written_records = ring->stat.written_records; \
59 status.read_bytes = ring->stat.read_bytes; \
60 status.verbose_level = ring->log_level; \
61 } while (0)
62
63 #define DHD_PKT_INFO DHD_ERROR
64 struct map_table {
65 uint16 fw_id;
66 uint16 host_id;
67 char *desc;
68 };
69
70 struct map_table event_map[] = {
71 {WLC_E_AUTH, WIFI_EVENT_AUTH_COMPLETE, "AUTH_COMPLETE"},
72 {WLC_E_ASSOC, WIFI_EVENT_ASSOC_COMPLETE, "ASSOC_COMPLETE"},
73 {TRACE_FW_AUTH_STARTED, WIFI_EVENT_FW_AUTH_STARTED, "AUTH STARTED"},
74 {TRACE_FW_ASSOC_STARTED, WIFI_EVENT_FW_ASSOC_STARTED, "ASSOC STARTED"},
75 {TRACE_FW_RE_ASSOC_STARTED, WIFI_EVENT_FW_RE_ASSOC_STARTED, "REASSOC STARTED"},
76 {TRACE_G_SCAN_STARTED, WIFI_EVENT_G_SCAN_STARTED, "GSCAN STARTED"},
77 {WLC_E_PFN_SCAN_COMPLETE, WIFI_EVENT_G_SCAN_COMPLETE, "GSCAN COMPLETE"},
78 {WLC_E_DISASSOC, WIFI_EVENT_DISASSOCIATION_REQUESTED, "DIASSOC REQUESTED"},
79 {WLC_E_REASSOC, WIFI_EVENT_RE_ASSOCIATION_REQUESTED, "REASSOC REQUESTED"},
80 {TRACE_ROAM_SCAN_STARTED, WIFI_EVENT_ROAM_REQUESTED, "ROAM REQUESTED"},
81 {WLC_E_BEACON_FRAME_RX, WIFI_EVENT_BEACON_RECEIVED, "BEACON Received"},
82 {TRACE_ROAM_SCAN_STARTED, WIFI_EVENT_ROAM_SCAN_STARTED, "ROAM SCAN STARTED"},
83 {TRACE_ROAM_SCAN_COMPLETE, WIFI_EVENT_ROAM_SCAN_COMPLETE, "ROAM SCAN COMPLETED"},
84 {TRACE_ROAM_AUTH_STARTED, WIFI_EVENT_ROAM_AUTH_STARTED, "ROAM AUTH STARTED"},
85 {WLC_E_AUTH, WIFI_EVENT_ROAM_AUTH_COMPLETE, "ROAM AUTH COMPLETED"},
86 {TRACE_FW_RE_ASSOC_STARTED, WIFI_EVENT_ROAM_ASSOC_STARTED, "ROAM ASSOC STARTED"},
87 {WLC_E_ASSOC, WIFI_EVENT_ROAM_ASSOC_COMPLETE, "ROAM ASSOC COMPLETED"},
88 {TRACE_ROAM_SCAN_COMPLETE, WIFI_EVENT_ROAM_SCAN_COMPLETE, "ROAM SCAN COMPLETED"},
89 {TRACE_BT_COEX_BT_SCO_START, WIFI_EVENT_BT_COEX_BT_SCO_START, "BT SCO START"},
90 {TRACE_BT_COEX_BT_SCO_STOP, WIFI_EVENT_BT_COEX_BT_SCO_STOP, "BT SCO STOP"},
91 {TRACE_BT_COEX_BT_SCAN_START, WIFI_EVENT_BT_COEX_BT_SCAN_START, "BT COEX SCAN START"},
92 {TRACE_BT_COEX_BT_SCAN_STOP, WIFI_EVENT_BT_COEX_BT_SCAN_STOP, "BT COEX SCAN STOP"},
93 {TRACE_BT_COEX_BT_HID_START, WIFI_EVENT_BT_COEX_BT_HID_START, "BT HID START"},
94 {TRACE_BT_COEX_BT_HID_STOP, WIFI_EVENT_BT_COEX_BT_HID_STOP, "BT HID STOP"},
95 {WLC_E_EAPOL_MSG, WIFI_EVENT_FW_EAPOL_FRAME_RECEIVED, "FW EAPOL PKT RECEIVED"},
96 {TRACE_FW_EAPOL_FRAME_TRANSMIT_START, WIFI_EVENT_FW_EAPOL_FRAME_TRANSMIT_START,
97 "FW EAPOL PKT TRANSMITED"},
98 {TRACE_FW_EAPOL_FRAME_TRANSMIT_STOP, WIFI_EVENT_FW_EAPOL_FRAME_TRANSMIT_STOP,
99 "FW EAPOL PKT TX STOPPED"},
100 {TRACE_BLOCK_ACK_NEGOTIATION_COMPLETE, WIFI_EVENT_BLOCK_ACK_NEGOTIATION_COMPLETE,
101 "BLOCK ACK NEGO COMPLETED"},
102 };
103
104 struct map_table event_tag_map[] = {
105 {TRACE_TAG_VENDOR_SPECIFIC, WIFI_TAG_VENDOR_SPECIFIC, "VENDOR SPECIFIC DATA"},
106 {TRACE_TAG_BSSID, WIFI_TAG_BSSID, "BSSID"},
107 {TRACE_TAG_ADDR, WIFI_TAG_ADDR, "ADDR_0"},
108 {TRACE_TAG_SSID, WIFI_TAG_SSID, "SSID"},
109 {TRACE_TAG_STATUS, WIFI_TAG_STATUS, "STATUS"},
110 {TRACE_TAG_CHANNEL_SPEC, WIFI_TAG_CHANNEL_SPEC, "CHANSPEC"},
111 {TRACE_TAG_WAKE_LOCK_EVENT, WIFI_TAG_WAKE_LOCK_EVENT, "WAKELOCK EVENT"},
112 {TRACE_TAG_ADDR1, WIFI_TAG_ADDR1, "ADDR_1"},
113 {TRACE_TAG_ADDR2, WIFI_TAG_ADDR2, "ADDR_2"},
114 {TRACE_TAG_ADDR3, WIFI_TAG_ADDR3, "ADDR_3"},
115 {TRACE_TAG_ADDR4, WIFI_TAG_ADDR4, "ADDR_4"},
116 {TRACE_TAG_TSF, WIFI_TAG_TSF, "TSF"},
117 {TRACE_TAG_IE, WIFI_TAG_IE, "802.11 IE"},
118 {TRACE_TAG_INTERFACE, WIFI_TAG_INTERFACE, "INTERFACE"},
119 {TRACE_TAG_REASON_CODE, WIFI_TAG_REASON_CODE, "REASON CODE"},
120 {TRACE_TAG_RATE_MBPS, WIFI_TAG_RATE_MBPS, "RATE"},
121 };
122
123 /* define log level per ring type */
124 struct log_level_table fw_verbose_level_map[] = {
125 {1, EVENT_LOG_TAG_PCI_ERROR, EVENT_LOG_SET_BUS, "PCI_ERROR"},
126 {1, EVENT_LOG_TAG_PCI_WARN, EVENT_LOG_SET_BUS, "PCI_WARN"},
127 {2, EVENT_LOG_TAG_PCI_INFO, EVENT_LOG_SET_BUS, "PCI_INFO"},
128 {3, EVENT_LOG_TAG_PCI_DBG, EVENT_LOG_SET_BUS, "PCI_DEBUG"},
129 {3, EVENT_LOG_TAG_BEACON_LOG, EVENT_LOG_SET_WL, "BEACON_LOG"},
130 {2, EVENT_LOG_TAG_WL_ASSOC_LOG, EVENT_LOG_SET_WL, "ASSOC_LOG"},
131 {2, EVENT_LOG_TAG_WL_ROAM_LOG, EVENT_LOG_SET_WL, "ROAM_LOG"},
132 {1, EVENT_LOG_TAG_TRACE_WL_INFO, EVENT_LOG_SET_WL, "WL_INFO"},
133 {1, EVENT_LOG_TAG_TRACE_BTCOEX_INFO, EVENT_LOG_SET_WL, "BTCOEX_INFO"},
134 #ifdef CUSTOMER_HW4_DEBUG
135 {3, EVENT_LOG_TAG_SCAN_WARN, EVENT_LOG_SET_WL, "SCAN_WARN"},
136 #else
137 {1, EVENT_LOG_TAG_SCAN_WARN, EVENT_LOG_SET_WL, "SCAN_WARN"},
138 #endif /* CUSTOMER_HW4_DEBUG */
139 {1, EVENT_LOG_TAG_SCAN_ERROR, EVENT_LOG_SET_WL, "SCAN_ERROR"},
140 {2, EVENT_LOG_TAG_SCAN_TRACE_LOW, EVENT_LOG_SET_WL, "SCAN_TRACE_LOW"},
141 {2, EVENT_LOG_TAG_SCAN_TRACE_HIGH, EVENT_LOG_SET_WL, "SCAN_TRACE_HIGH"}
142 };
143
144 struct log_level_table fw_event_level_map[] = {
145 {1, EVENT_LOG_TAG_TRACE_WL_INFO, EVENT_LOG_SET_WL, "WL_INFO"},
146 {1, EVENT_LOG_TAG_TRACE_BTCOEX_INFO, EVENT_LOG_SET_WL, "BTCOEX_INFO"},
147 #ifdef CUSTOMER_HW4_DEBUG
148 {3, EVENT_LOG_TAG_BEACON_LOG, EVENT_LOG_SET_WL, "BEACON LOG"},
149 #else
150 {2, EVENT_LOG_TAG_BEACON_LOG, EVENT_LOG_SET_WL, "BEACON LOG"},
151 #endif /* CUSTOMER_HW4_DEBUG */
152 };
153
154 struct map_table nan_event_map[] = {
155 {TRACE_NAN_CLUSTER_STARTED, NAN_EVENT_CLUSTER_STARTED, "NAN_CLUSTER_STARTED"},
156 {TRACE_NAN_CLUSTER_JOINED, NAN_EVENT_CLUSTER_JOINED, "NAN_CLUSTER_JOINED"},
157 {TRACE_NAN_CLUSTER_MERGED, NAN_EVENT_CLUSTER_MERGED, "NAN_CLUSTER_MERGED"},
158 {TRACE_NAN_ROLE_CHANGED, NAN_EVENT_ROLE_CHANGED, "NAN_ROLE_CHANGED"},
159 {TRACE_NAN_SCAN_COMPLETE, NAN_EVENT_SCAN_COMPLETE, "NAN_SCAN_COMPLETE"},
160 {TRACE_NAN_STATUS_CHNG, NAN_EVENT_STATUS_CHNG, "NAN_STATUS_CHNG"},
161 };
162
163 struct log_level_table nan_event_level_map[] = {
164 {1, EVENT_LOG_TAG_NAN_ERROR, 0, "NAN_ERROR"},
165 {2, EVENT_LOG_TAG_NAN_INFO, 0, "NAN_INFO"},
166 {3, EVENT_LOG_TAG_NAN_DBG, 0, "NAN_DEBUG"},
167 };
168
169 struct map_table nan_evt_tag_map[] = {
170 {TRACE_TAG_BSSID, WIFI_TAG_BSSID, "BSSID"},
171 {TRACE_TAG_ADDR, WIFI_TAG_ADDR, "ADDR_0"},
172 };
173
174 /* reference tab table */
175 uint ref_tag_tbl[EVENT_LOG_TAG_MAX + 1] = {0};
176
177 typedef struct dhddbg_loglist_item {
178 dll_t list;
179 event_log_hdr_t *hdr;
180 } loglist_item_t;
181
182 typedef struct dhbdbg_pending_item {
183 dll_t list;
184 dhd_dbg_ring_status_t ring_status;
185 dhd_dbg_ring_entry_t *ring_entry;
186 } pending_item_t;
187
188 /* trace log entry header user space processing */
189 struct tracelog_header {
190 int magic_num;
191 int buf_size;
192 int seq_num;
193 };
194 #define TRACE_LOG_MAGIC_NUMBER 0xEAE47C06
195
196 int
dhd_dbg_ring_pull_single(dhd_pub_t * dhdp,int ring_id,void * data,uint32 buf_len,bool strip_header)197 dhd_dbg_ring_pull_single(dhd_pub_t *dhdp, int ring_id, void *data, uint32 buf_len,
198 bool strip_header)
199 {
200 dhd_dbg_ring_t *ring;
201 dhd_dbg_ring_entry_t *r_entry;
202 uint32 rlen;
203 char *buf;
204
205 if (!dhdp || !dhdp->dbg) {
206 return 0;
207 }
208
209 ring = &dhdp->dbg->dbg_rings[ring_id];
210
211 if (ring->state != RING_ACTIVE) {
212 return 0;
213 }
214
215 if (ring->rp == ring->wp) {
216 return 0;
217 }
218
219 r_entry = (dhd_dbg_ring_entry_t *)((uint8 *)ring->ring_buf + ring->rp);
220
221 /* Boundary Check */
222 rlen = ENTRY_LENGTH(r_entry);
223 if ((ring->rp + rlen) > ring->ring_size) {
224 DHD_ERROR(("%s: entry len %d is out of boundary of ring size %d,"
225 " current ring %d[%s] - rp=%d\n", __FUNCTION__, rlen,
226 ring->ring_size, ring->id, ring->name, ring->rp));
227 return 0;
228 }
229
230 if (strip_header) {
231 rlen = r_entry->len;
232 buf = (char *)r_entry + DBG_RING_ENTRY_SIZE;
233 } else {
234 rlen = ENTRY_LENGTH(r_entry);
235 buf = (char *)r_entry;
236 }
237 if (rlen > buf_len) {
238 DHD_ERROR(("%s: buf len %d is too small for entry len %d\n",
239 __FUNCTION__, buf_len, rlen));
240 DHD_ERROR(("%s: ring %d[%s] - ring size=%d, wp=%d, rp=%d\n",
241 __FUNCTION__, ring->id, ring->name, ring->ring_size,
242 ring->wp, ring->rp));
243 ASSERT(0);
244 return 0;
245 }
246
247 memcpy(data, buf, rlen);
248 /* update ring context */
249 ring->rp += ENTRY_LENGTH(r_entry);
250 /* skip padding if there is one */
251 if (ring->tail_padded && ((ring->rp + ring->rem_len) == ring->ring_size)) {
252 DHD_DBGIF(("%s: RING%d[%s] Found padding, rp=%d, wp=%d\n",
253 __FUNCTION__, ring->id, ring->name, ring->rp, ring->wp));
254 ring->rp = 0;
255 ring->tail_padded = FALSE;
256 ring->rem_len = 0;
257 }
258 if (ring->rp >= ring->ring_size) {
259 DHD_ERROR(("%s: RING%d[%s] rp pointed out of ring boundary,"
260 " rp=%d, ring_size=%d\n", __FUNCTION__, ring->id,
261 ring->name, ring->rp, ring->ring_size));
262 ASSERT(0);
263 }
264 ring->stat.read_bytes += ENTRY_LENGTH(r_entry);
265 DHD_DBGIF(("%s RING%d[%s]read_bytes %d, wp=%d, rp=%d\n", __FUNCTION__,
266 ring->id, ring->name, ring->stat.read_bytes, ring->wp, ring->rp));
267
268 return rlen;
269 }
270
271 int
dhd_dbg_ring_pull(dhd_pub_t * dhdp,int ring_id,void * data,uint32 buf_len)272 dhd_dbg_ring_pull(dhd_pub_t *dhdp, int ring_id, void *data, uint32 buf_len)
273 {
274 int32 r_len, total_r_len = 0;
275 dhd_dbg_ring_t *ring;
276
277 if (!dhdp || !dhdp->dbg)
278 return 0;
279 ring = &dhdp->dbg->dbg_rings[ring_id];
280 if (ring->state != RING_ACTIVE)
281 return 0;
282
283 while (buf_len > 0) {
284 r_len = dhd_dbg_ring_pull_single(dhdp, ring_id, data, buf_len, FALSE);
285 if (r_len == 0)
286 break;
287 data = (uint8 *)data + r_len;
288 buf_len -= r_len;
289 total_r_len += r_len;
290 }
291
292 return total_r_len;
293 }
294
295 int
dhd_dbg_ring_push(dhd_pub_t * dhdp,int ring_id,dhd_dbg_ring_entry_t * hdr,void * data)296 dhd_dbg_ring_push(dhd_pub_t *dhdp, int ring_id, dhd_dbg_ring_entry_t *hdr, void *data)
297 {
298 unsigned long flags;
299 uint32 pending_len;
300 uint32 w_len;
301 uint32 avail_size;
302 dhd_dbg_ring_t *ring;
303 dhd_dbg_ring_entry_t *w_entry, *r_entry;
304
305 if (!dhdp || !dhdp->dbg) {
306 return BCME_BADADDR;
307 }
308
309 ring = &dhdp->dbg->dbg_rings[ring_id];
310
311 if (ring->state != RING_ACTIVE) {
312 return BCME_OK;
313 }
314
315 flags = dhd_os_spin_lock(ring->lock);
316
317 w_len = ENTRY_LENGTH(hdr);
318
319 if (w_len > ring->ring_size) {
320 dhd_os_spin_unlock(ring->lock, flags);
321 return BCME_ERROR;
322 }
323
324 /* Claim the space */
325 do {
326 avail_size = DBG_RING_CHECK_WRITE_SPACE(ring->rp, ring->wp, ring->ring_size);
327 if (avail_size <= w_len) {
328 /* Prepare the space */
329 if (ring->rp <= ring->wp) {
330 ring->tail_padded = TRUE;
331 ring->rem_len = ring->ring_size - ring->wp;
332 DHD_DBGIF(("%s: RING%d[%s] Insuffient tail space,"
333 " rp=%d, wp=%d, rem_len=%d, ring_size=%d,"
334 " avail_size=%d, w_len=%d\n", __FUNCTION__,
335 ring->id, ring->name, ring->rp, ring->wp,
336 ring->rem_len, ring->ring_size, avail_size,
337 w_len));
338
339 /* 0 pad insufficient tail space */
340 memset((uint8 *)ring->ring_buf + ring->wp, 0, ring->rem_len);
341 if (ring->rp == ring->wp) {
342 ring->rp = 0;
343 }
344 ring->wp = 0;
345 } else {
346 /* Not enough space for new entry, free some up */
347 r_entry = (dhd_dbg_ring_entry_t *)((uint8 *)ring->ring_buf +
348 ring->rp);
349 ring->rp += ENTRY_LENGTH(r_entry);
350 /* skip padding if there is one */
351 if (ring->tail_padded &&
352 ((ring->rp + ring->rem_len) == ring->ring_size)) {
353 DHD_DBGIF(("%s: RING%d[%s] Found padding,"
354 " avail_size=%d, w_len=%d\n", __FUNCTION__,
355 ring->id, ring->name, avail_size, w_len));
356 ring->rp = 0;
357 ring->tail_padded = FALSE;
358 ring->rem_len = 0;
359 }
360 if (ring->rp >= ring->ring_size) {
361 DHD_ERROR(("%s: RING%d[%s] rp points out of boundary,"
362 " ring->rp = %d, ring->ring_size=%d\n",
363 __FUNCTION__, ring->id, ring->name, ring->rp,
364 ring->ring_size));
365 ASSERT(0);
366 }
367 ring->stat.read_bytes += ENTRY_LENGTH(r_entry);
368 DHD_DBGIF(("%s: RING%d[%s] read_bytes %d, wp=%d, rp=%d\n",
369 __FUNCTION__, ring->id, ring->name, ring->stat.read_bytes,
370 ring->wp, ring->rp));
371 }
372 } else {
373 break;
374 }
375 } while (TRUE);
376
377 w_entry = (dhd_dbg_ring_entry_t *)((uint8 *)ring->ring_buf + ring->wp);
378 /* header */
379 memcpy(w_entry, hdr, DBG_RING_ENTRY_SIZE);
380 w_entry->len = hdr->len;
381 /* payload */
382 memcpy((char *)w_entry + DBG_RING_ENTRY_SIZE, data, w_entry->len);
383 /* update write pointer */
384 ring->wp += w_len;
385 if (ring->wp >= ring->ring_size) {
386 DHD_ERROR(("%s: RING%d[%s] wp pointed out of ring boundary, "
387 "wp=%d, ring_size=%d\n", __FUNCTION__, ring->id,
388 ring->name, ring->wp, ring->ring_size));
389 ASSERT(0);
390 }
391 /* update statistics */
392 ring->stat.written_records++;
393 ring->stat.written_bytes += w_len;
394 DHD_DBGIF(("%s : RING%d[%s] written_records %d, written_bytes %d, read_bytes=%d,"
395 " ring->threshold=%d, wp=%d, rp=%d\n", __FUNCTION__, ring->id, ring->name,
396 ring->stat.written_records, ring->stat.written_bytes, ring->stat.read_bytes,
397 ring->threshold, ring->wp, ring->rp));
398
399 /* Calculate current pending size */
400 if (ring->stat.written_bytes > ring->stat.read_bytes) {
401 pending_len = ring->stat.written_bytes - ring->stat.read_bytes;
402 } else if (ring->stat.written_bytes < ring->stat.read_bytes) {
403 pending_len = 0xFFFFFFFF - ring->stat.read_bytes + ring->stat.written_bytes;
404 } else {
405 pending_len = 0;
406 }
407
408 /* if the current pending size is bigger than threshold */
409 if (ring->threshold > 0 &&
410 (pending_len >= ring->threshold) && ring->sched_pull) {
411 dhdp->dbg->pullreq(dhdp->dbg->private, ring->id);
412 ring->sched_pull = FALSE;
413 }
414 dhd_os_spin_unlock(ring->lock, flags);
415 return BCME_OK;
416 }
417
418 static int
dhd_dbg_msgtrace_seqchk(uint32 * prev,uint32 cur)419 dhd_dbg_msgtrace_seqchk(uint32 *prev, uint32 cur)
420 {
421 /* normal case including wrap around */
422 if ((cur == 0 && *prev == 0xFFFFFFFF) || ((cur - *prev) == 1)) {
423 goto done;
424 } else if (cur == *prev) {
425 DHD_EVENT(("%s duplicate trace\n", __FUNCTION__));
426 return -1;
427 } else if (cur > *prev) {
428 DHD_EVENT(("%s lost %d packets\n", __FUNCTION__, cur - *prev));
429 } else {
430 DHD_EVENT(("%s seq out of order, dhd %d, dongle %d\n",
431 __FUNCTION__, *prev, cur));
432 }
433 done:
434 *prev = cur;
435 return 0;
436 }
437
438 #ifndef MACOSX_DHD
439 static void
dhd_dbg_msgtrace_msg_parser(void * event_data)440 dhd_dbg_msgtrace_msg_parser(void *event_data)
441 {
442 msgtrace_hdr_t *hdr;
443 char *data, *s;
444 static uint32 seqnum_prev = 0;
445
446 hdr = (msgtrace_hdr_t *)event_data;
447 data = (char *)event_data + MSGTRACE_HDRLEN;
448
449 /* There are 2 bytes available at the end of data */
450 data[ntoh16(hdr->len)] = '\0';
451
452 if (ntoh32(hdr->discarded_bytes) || ntoh32(hdr->discarded_printf)) {
453 DHD_DBGIF(("WLC_E_TRACE: [Discarded traces in dongle -->"
454 "discarded_bytes %d discarded_printf %d]\n",
455 ntoh32(hdr->discarded_bytes),
456 ntoh32(hdr->discarded_printf)));
457 }
458
459 if (dhd_dbg_msgtrace_seqchk(&seqnum_prev, ntoh32(hdr->seqnum)))
460 return;
461
462 /* Display the trace buffer. Advance from
463 * \n to \n to avoid display big
464 * printf (issue with Linux printk )
465 */
466 while (*data != '\0' && (s = strstr(data, "\n")) != NULL) {
467 *s = '\0';
468 DHD_FWLOG(("[FWLOG] %s\n", data));
469 data = s+1;
470 }
471 if (*data)
472 DHD_FWLOG(("[FWLOG] %s", data));
473 }
474 #endif /* MACOSX_DHD */
475 #ifdef SHOW_LOGTRACE
476 static const uint8 *
event_get_tlv(uint16 id,const char * tlvs,uint tlvs_len)477 event_get_tlv(uint16 id, const char* tlvs, uint tlvs_len)
478 {
479 const uint8 *pos = (const uint8 *)tlvs;
480 const uint8 *end = pos + tlvs_len;
481 const tlv_log *tlv;
482 int rest;
483
484 while (pos + 1 < end) {
485 if (pos + 4 + pos[1] > end)
486 break;
487 tlv = (const tlv_log *) pos;
488 if (tlv->tag == id)
489 return pos;
490 rest = tlv->len % 4; /* padding values */
491 pos += 4 + tlv->len + rest;
492 }
493 return NULL;
494 }
495
496 #define DATA_UNIT_FOR_LOG_CNT 4
497 /* #pragma used as a WAR to fix build failure,
498 * ignore dropping of 'const' qualifier in tlv_data assignment
499 * this pragma disables the warning only for the following function
500 */
501 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
502 #pragma GCC diagnostic push
503 #pragma GCC diagnostic ignored "-Wcast-qual"
504 #endif
505 static int
dhd_dbg_nan_event_handler(dhd_pub_t * dhdp,event_log_hdr_t * hdr,uint32 * data)506 dhd_dbg_nan_event_handler(dhd_pub_t *dhdp, event_log_hdr_t *hdr, uint32 *data)
507 {
508 int ret = BCME_OK;
509 wl_event_log_id_ver_t nan_hdr;
510 log_nan_event_t *evt_payload;
511 uint16 evt_payload_len = 0, tot_payload_len = 0;
512 dhd_dbg_ring_entry_t msg_hdr;
513 bool evt_match = FALSE;
514 event_log_hdr_t *ts_hdr;
515 uint32 *ts_data;
516 char *tlvs, *dest_tlvs;
517 tlv_log *tlv_data;
518 int tlv_len = 0;
519 int i = 0, evt_idx = 0;
520 char eaddr_buf[ETHER_ADDR_STR_LEN];
521
522 BCM_REFERENCE(eaddr_buf);
523
524 nan_hdr.t = *data;
525 DHD_DBGIF(("%s: version %u event %x\n", __FUNCTION__, nan_hdr.version,
526 nan_hdr.event));
527
528 if (nan_hdr.version != DIAG_VERSION) {
529 DHD_ERROR(("Event payload version %u mismatch with current version %u\n",
530 nan_hdr.version, DIAG_VERSION));
531 return BCME_VERSION;
532 }
533
534 /* nan event log should at least contain a wl_event_log_id_ver_t
535 * header and a arm cycle count
536 */
537 if (hdr->count < NAN_EVENT_LOG_MIN_LENGTH) {
538 return BCME_BADLEN;
539 }
540
541 memset(&msg_hdr, 0, sizeof(dhd_dbg_ring_entry_t));
542 ts_hdr = (event_log_hdr_t *)((uint8 *)data - sizeof(event_log_hdr_t));
543 if (ts_hdr->tag == EVENT_LOG_TAG_TS) {
544 ts_data = (uint32 *)ts_hdr - ts_hdr->count;
545 msg_hdr.timestamp = (uint64)ts_data[0];
546 msg_hdr.flags |= DBG_RING_ENTRY_FLAGS_HAS_TIMESTAMP;
547 }
548 msg_hdr.type = DBG_RING_ENTRY_NAN_EVENT_TYPE;
549 for (i = 0; i < ARRAYSIZE(nan_event_map); i++) {
550 if (nan_event_map[i].fw_id == nan_hdr.event) {
551 evt_match = TRUE;
552 evt_idx = i;
553 break;
554 }
555 }
556 if (evt_match) {
557 DHD_DBGIF(("%s : event (%s)\n", __FUNCTION__, nan_event_map[evt_idx].desc));
558 /* payload length for nan event data */
559 evt_payload_len = sizeof(log_nan_event_t) +
560 (hdr->count - 2) * DATA_UNIT_FOR_LOG_CNT;
561 if ((evt_payload = MALLOC(dhdp->osh, evt_payload_len)) == NULL) {
562 DHD_ERROR(("Memory allocation failed for nan evt log (%u)\n",
563 evt_payload_len));
564 return BCME_NOMEM;
565 }
566 evt_payload->version = NAN_EVENT_VERSION;
567 evt_payload->event = nan_event_map[evt_idx].host_id;
568 dest_tlvs = (char *)evt_payload->tlvs;
569 tot_payload_len = sizeof(log_nan_event_t);
570 tlvs = (char *)(&data[1]);
571 tlv_len = (hdr->count - 2) * DATA_UNIT_FOR_LOG_CNT;
572 for (i = 0; i < ARRAYSIZE(nan_evt_tag_map); i++) {
573 tlv_data = (tlv_log *)event_get_tlv(nan_evt_tag_map[i].fw_id,
574 tlvs, tlv_len);
575 if (tlv_data) {
576 DHD_DBGIF(("NAN evt tlv.tag(%s), tlv.len : %d, tlv.data : ",
577 nan_evt_tag_map[i].desc, tlv_data->len));
578 memcpy(dest_tlvs, tlv_data, sizeof(tlv_log) + tlv_data->len);
579 tot_payload_len += tlv_data->len + sizeof(tlv_log);
580 switch (tlv_data->tag) {
581 case TRACE_TAG_BSSID:
582 case TRACE_TAG_ADDR:
583 DHD_DBGIF(("%s\n",
584 bcm_ether_ntoa(
585 (const struct ether_addr *)tlv_data->value,
586 eaddr_buf)));
587 break;
588 default:
589 if (DHD_DBGIF_ON()) {
590 prhex(NULL, &tlv_data->value[0],
591 tlv_data->len);
592 }
593 break;
594 }
595 dest_tlvs += tlv_data->len + sizeof(tlv_log);
596 }
597 }
598 msg_hdr.flags |= DBG_RING_ENTRY_FLAGS_HAS_BINARY;
599 msg_hdr.len = tot_payload_len;
600 dhd_dbg_ring_push(dhdp, NAN_EVENT_RING_ID, &msg_hdr, evt_payload);
601 MFREE(dhdp->osh, evt_payload, evt_payload_len);
602 }
603 return ret;
604 }
605
606 static int
dhd_dbg_custom_evnt_handler(dhd_pub_t * dhdp,event_log_hdr_t * hdr,uint32 * data)607 dhd_dbg_custom_evnt_handler(dhd_pub_t *dhdp, event_log_hdr_t *hdr, uint32 *data)
608 {
609 int i = 0, match_idx = 0;
610 int payload_len, tlv_len;
611 uint16 tot_payload_len = 0;
612 int ret = BCME_OK;
613 int log_level;
614 wl_event_log_id_ver_t wl_log_id;
615 dhd_dbg_ring_entry_t msg_hdr;
616 log_conn_event_t *event_data;
617 bool evt_match = FALSE;
618 event_log_hdr_t *ts_hdr;
619 uint32 *ts_data;
620 char *tlvs, *dest_tlvs;
621 tlv_log *tlv_data;
622 static uint64 ts_saved = 0;
623 char eabuf[ETHER_ADDR_STR_LEN];
624 char chanbuf[CHANSPEC_STR_LEN];
625
626 BCM_REFERENCE(eabuf);
627 BCM_REFERENCE(chanbuf);
628 /* get a event type and version */
629 wl_log_id.t = *data;
630 if (wl_log_id.version != DIAG_VERSION)
631 return BCME_VERSION;
632
633 /* custom event log should at least contain a wl_event_log_id_ver_t
634 * header and a arm cycle count
635 */
636 if (hdr->count < NAN_EVENT_LOG_MIN_LENGTH) {
637 return BCME_BADLEN;
638 }
639
640 ts_hdr = (event_log_hdr_t *)((uint8 *)data - sizeof(event_log_hdr_t));
641 if (ts_hdr->tag == EVENT_LOG_TAG_TS) {
642 ts_data = (uint32 *)ts_hdr - ts_hdr->count;
643 ts_saved = (uint64)ts_data[0];
644 }
645 memset(&msg_hdr, 0, sizeof(dhd_dbg_ring_entry_t));
646 msg_hdr.timestamp = ts_saved;
647
648 DHD_DBGIF(("Android Event ver %d, payload %d words, ts %llu\n",
649 (*data >> 16), hdr->count - 1, ts_saved));
650
651 /* Perform endian convertion */
652 for (i = 0; i < hdr->count; i++) {
653 /* *(data + i) = ntoh32(*(data + i)); */
654 DHD_DATA(("%08x ", *(data + i)));
655 }
656 DHD_DATA(("\n"));
657 msg_hdr.flags |= DBG_RING_ENTRY_FLAGS_HAS_TIMESTAMP;
658 msg_hdr.flags |= DBG_RING_ENTRY_FLAGS_HAS_BINARY;
659 msg_hdr.type = DBG_RING_ENTRY_EVENT_TYPE;
660
661 /* convert the data to log_conn_event_t format */
662 for (i = 0; i < ARRAYSIZE(event_map); i++) {
663 if (event_map[i].fw_id == wl_log_id.event) {
664 evt_match = TRUE;
665 match_idx = i;
666 break;
667 }
668 }
669 if (evt_match) {
670 log_level = dhdp->dbg->dbg_rings[FW_EVENT_RING_ID].log_level;
671 /* filter the data based on log_level */
672 for (i = 0; i < ARRAYSIZE(fw_event_level_map); i++) {
673 if ((fw_event_level_map[i].tag == hdr->tag) &&
674 (fw_event_level_map[i].log_level > log_level)) {
675 return BCME_OK;
676 }
677 }
678 DHD_DBGIF(("%s : event (%s)\n", __FUNCTION__, event_map[match_idx].desc));
679 /* get the payload length for event data (skip : log header + timestamp) */
680 payload_len = sizeof(log_conn_event_t) + DATA_UNIT_FOR_LOG_CNT * (hdr->count - 2);
681 event_data = MALLOC(dhdp->osh, payload_len);
682 if (!event_data) {
683 DHD_ERROR(("failed to allocate the log_conn_event_t with length(%d)\n",
684 payload_len));
685 return BCME_NOMEM;
686 }
687 event_data->event = event_map[match_idx].host_id;
688 dest_tlvs = (char *)event_data->tlvs;
689 tot_payload_len = sizeof(log_conn_event_t);
690 tlvs = (char *)(&data[1]);
691 tlv_len = (hdr->count - 2) * DATA_UNIT_FOR_LOG_CNT;
692 for (i = 0; i < ARRAYSIZE(event_tag_map); i++) {
693 tlv_data = (tlv_log *)event_get_tlv(event_tag_map[i].fw_id,
694 tlvs, tlv_len);
695 if (tlv_data) {
696 DHD_DBGIF(("tlv.tag(%s), tlv.len : %d, tlv.data : ",
697 event_tag_map[i].desc, tlv_data->len));
698 memcpy(dest_tlvs, tlv_data, sizeof(tlv_log) + tlv_data->len);
699 tot_payload_len += tlv_data->len + sizeof(tlv_log);
700 switch (tlv_data->tag) {
701 case TRACE_TAG_BSSID:
702 case TRACE_TAG_ADDR:
703 case TRACE_TAG_ADDR1:
704 case TRACE_TAG_ADDR2:
705 case TRACE_TAG_ADDR3:
706 case TRACE_TAG_ADDR4:
707 DHD_DBGIF(("%s\n",
708 bcm_ether_ntoa((const struct ether_addr *)tlv_data->value,
709 eabuf)));
710 break;
711 case TRACE_TAG_SSID:
712 DHD_DBGIF(("%s\n", tlv_data->value));
713 break;
714 case TRACE_TAG_STATUS:
715 DHD_DBGIF(("%d\n", ltoh32_ua(&tlv_data->value[0])));
716 break;
717 case TRACE_TAG_REASON_CODE:
718 DHD_DBGIF(("%d\n", ltoh16_ua(&tlv_data->value[0])));
719 break;
720 case TRACE_TAG_RATE_MBPS:
721 DHD_DBGIF(("%d Kbps\n",
722 ltoh16_ua(&tlv_data->value[0]) * 500));
723 break;
724 case TRACE_TAG_CHANNEL_SPEC:
725 DHD_DBGIF(("%s\n",
726 wf_chspec_ntoa(
727 ltoh16_ua(&tlv_data->value[0]), chanbuf)));
728 break;
729 default:
730 if (DHD_DBGIF_ON()) {
731 prhex(NULL, &tlv_data->value[0], tlv_data->len);
732 }
733 }
734 dest_tlvs += tlv_data->len + sizeof(tlv_log);
735 }
736 }
737 msg_hdr.len = tot_payload_len;
738 dhd_dbg_ring_push(dhdp, FW_EVENT_RING_ID, &msg_hdr, event_data);
739 MFREE(dhdp->osh, event_data, payload_len);
740 }
741 return ret;
742 }
743 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
744 #pragma GCC diagnostic pop
745 #endif
746
747 /* To identify format of types %Ns where N >= 0 is a number */
748 bool
check_valid_string_format(char * curr_ptr)749 check_valid_string_format(char *curr_ptr)
750 {
751 char *next_ptr;
752 if ((next_ptr = bcmstrstr(curr_ptr, "s")) != NULL) {
753 /* Default %s format */
754 if (curr_ptr == next_ptr) {
755 return TRUE;
756 }
757
758 /* Verify each charater between '%' and 's' is a valid number */
759 while (curr_ptr < next_ptr) {
760 if (bcm_isdigit(*curr_ptr) == FALSE) {
761 return FALSE;
762 }
763 curr_ptr++;
764 }
765
766 return TRUE;
767 } else {
768 return FALSE;
769 }
770 }
771
772 #define MAX_NO_OF_ARG 16
773 #define FMTSTR_SIZE 132
774 #define ROMSTR_SIZE 200
775 #define SIZE_LOC_STR 50
776 static uint64 verboselog_ts_saved = 0;
777 static void
dhd_dbg_verboselog_handler(dhd_pub_t * dhdp,event_log_hdr_t * hdr,void * raw_event_ptr)778 dhd_dbg_verboselog_handler(dhd_pub_t *dhdp, event_log_hdr_t *hdr,
779 void *raw_event_ptr)
780 {
781 event_log_hdr_t *ts_hdr;
782 uint32 *log_ptr = (uint32 *)hdr - hdr->count;
783 char fmtstr_loc_buf[ROMSTR_SIZE] = { 0 };
784 uint32 rom_str_len = 0;
785 uint32 *ts_data;
786
787 if (!raw_event_ptr) {
788 return;
789 }
790
791 /* Get time stamp if it's updated */
792 ts_hdr = (event_log_hdr_t *)((char *)log_ptr - sizeof(event_log_hdr_t));
793 if (ts_hdr->tag == EVENT_LOG_TAG_TS) {
794 ts_data = (uint32 *)ts_hdr - ts_hdr->count;
795 verboselog_ts_saved = (uint64)ts_data[0];
796 DHD_MSGTRACE_LOG(("EVENT_LOG_TS[0x%08x]: SYS:%08x CPU:%08x\n",
797 ts_data[ts_hdr->count - 1], ts_data[0], ts_data[1]));
798 }
799
800 if (hdr->tag == EVENT_LOG_TAG_ROM_PRINTF) {
801 rom_str_len = (hdr->count - 1) * sizeof(uint32);
802 if (rom_str_len >= (ROMSTR_SIZE -1))
803 rom_str_len = ROMSTR_SIZE - 1;
804
805 /* copy all ascii data for ROM printf to local string */
806 memcpy(fmtstr_loc_buf, log_ptr, rom_str_len);
807 /* add end of line at last */
808 fmtstr_loc_buf[rom_str_len] = '\0';
809
810 DHD_MSGTRACE_LOG(("EVENT_LOG_ROM[0x%08x]: %s",
811 log_ptr[hdr->count - 1], fmtstr_loc_buf));
812
813 /* Add newline if missing */
814 if (fmtstr_loc_buf[strlen(fmtstr_loc_buf) - 1] != '\n')
815 DHD_MSGTRACE_LOG(("\n"));
816
817 return;
818 }
819
820 if (hdr->tag == EVENT_LOG_TAG_MSCHPROFILE || hdr->tag == EVENT_LOG_TAG_MSCHPROFILE_TLV) {
821 wl_mschdbg_verboselog_handler(dhdp, raw_event_ptr, hdr->tag, log_ptr);
822 return;
823 }
824
825 /* print the message out in a logprint */
826 dhd_dbg_verboselog_printf(dhdp, hdr, raw_event_ptr, log_ptr);
827 }
828
829 void
dhd_dbg_verboselog_printf(dhd_pub_t * dhdp,event_log_hdr_t * hdr,void * raw_event_ptr,uint32 * log_ptr)830 dhd_dbg_verboselog_printf(dhd_pub_t *dhdp, event_log_hdr_t *hdr,
831 void *raw_event_ptr, uint32 *log_ptr)
832 {
833 dhd_event_log_t *raw_event = (dhd_event_log_t *)raw_event_ptr;
834 uint16 count;
835 int log_level, id;
836 char fmtstr_loc_buf[ROMSTR_SIZE] = { 0 };
837 char (*str_buf)[SIZE_LOC_STR] = NULL;
838 char *str_tmpptr = NULL;
839 uint32 addr = 0;
840 typedef union {
841 uint32 val;
842 char * addr;
843 } u_arg;
844 u_arg arg[MAX_NO_OF_ARG] = {{0}};
845 char *c_ptr = NULL;
846
847 BCM_REFERENCE(arg);
848
849 if (!raw_event) {
850 return;
851 }
852
853 /* print the message out in a logprint */
854 if (!(raw_event->fmts) || hdr->fmt_num == 0xffff) {
855 if (dhdp->dbg) {
856 log_level = dhdp->dbg->dbg_rings[FW_VERBOSE_RING_ID].log_level;
857 for (id = 0; id < ARRAYSIZE(fw_verbose_level_map); id++) {
858 if ((fw_verbose_level_map[id].tag == hdr->tag) &&
859 (fw_verbose_level_map[id].log_level > log_level))
860 return;
861 }
862 }
863
864 DHD_EVENT(("%d.%d EL:tag=%d len=%d fmt=0x%x",
865 (uint32)verboselog_ts_saved / 1000,
866 (uint32)verboselog_ts_saved % 1000,
867 hdr->tag,
868 hdr->count,
869 hdr->fmt_num));
870
871 for (count = 0; count < (hdr->count-1); count++) {
872 if (count % 8 == 0)
873 DHD_EVENT(("\n\t%08x", log_ptr[count]));
874 else
875 DHD_EVENT((" %08x", log_ptr[count]));
876 }
877 DHD_EVENT(("\n"));
878
879 return;
880 }
881
882 str_buf = MALLOCZ(dhdp->osh, (MAX_NO_OF_ARG * SIZE_LOC_STR));
883 if (!str_buf) {
884 DHD_ERROR(("%s: malloc failed str_buf\n", __FUNCTION__));
885 return;
886 }
887
888 if ((hdr->fmt_num >> 2) < raw_event->num_fmts) {
889 if (hdr->tag == EVENT_LOG_TAG_MSCHPROFILE) {
890 snprintf(fmtstr_loc_buf, FMTSTR_SIZE, "%s",
891 raw_event->fmts[hdr->fmt_num >> 2]);
892 hdr->count++;
893 } else {
894 snprintf(fmtstr_loc_buf, FMTSTR_SIZE, "CONSOLE_E: %6d.%3d %s",
895 log_ptr[hdr->count-1]/1000, (log_ptr[hdr->count - 1] % 1000),
896 raw_event->fmts[hdr->fmt_num >> 2]);
897 }
898 c_ptr = fmtstr_loc_buf;
899 } else {
900 DHD_ERROR(("%s: fmt number out of range \n", __FUNCTION__));
901 goto exit;
902 }
903
904 for (count = 0; count < (hdr->count - 1); count++) {
905 if (c_ptr != NULL)
906 if ((c_ptr = bcmstrstr(c_ptr, "%")) != NULL)
907 c_ptr++;
908
909 if (c_ptr != NULL) {
910 if (check_valid_string_format(c_ptr)) {
911 if ((raw_event->raw_sstr) &&
912 ((log_ptr[count] > raw_event->rodata_start) &&
913 (log_ptr[count] < raw_event->rodata_end))) {
914 /* ram static string */
915 addr = log_ptr[count] - raw_event->rodata_start;
916 str_tmpptr = raw_event->raw_sstr + addr;
917 memcpy(str_buf[count], str_tmpptr,
918 SIZE_LOC_STR);
919 str_buf[count][SIZE_LOC_STR-1] = '\0';
920 arg[count].addr = str_buf[count];
921 } else if ((raw_event->rom_raw_sstr) &&
922 ((log_ptr[count] >
923 raw_event->rom_rodata_start) &&
924 (log_ptr[count] <
925 raw_event->rom_rodata_end))) {
926 /* rom static string */
927 addr = log_ptr[count] - raw_event->rom_rodata_start;
928 str_tmpptr = raw_event->rom_raw_sstr + addr;
929 memcpy(str_buf[count], str_tmpptr,
930 SIZE_LOC_STR);
931 str_buf[count][SIZE_LOC_STR-1] = '\0';
932 arg[count].addr = str_buf[count];
933 } else {
934 /*
935 * Dynamic string OR
936 * No data for static string.
937 * So store all string's address as string.
938 */
939 snprintf(str_buf[count], SIZE_LOC_STR,
940 "(s)0x%x", log_ptr[count]);
941 arg[count].addr = str_buf[count];
942 }
943 } else {
944 /* Other than string */
945 arg[count].val = log_ptr[count];
946 }
947 }
948 }
949
950 /* Print FW logs */
951 DHD_FWLOG((fmtstr_loc_buf, arg[0], arg[1], arg[2], arg[3],
952 arg[4], arg[5], arg[6], arg[7], arg[8], arg[9], arg[10],
953 arg[11], arg[12], arg[13], arg[14], arg[15]));
954
955 exit:
956 MFREE(dhdp->osh, str_buf, (MAX_NO_OF_ARG * SIZE_LOC_STR));
957 }
958
959 static void
dhd_dbg_msgtrace_log_parser(dhd_pub_t * dhdp,void * event_data,void * raw_event_ptr,uint datalen)960 dhd_dbg_msgtrace_log_parser(dhd_pub_t *dhdp, void *event_data,
961 void *raw_event_ptr, uint datalen)
962 {
963 msgtrace_hdr_t *hdr;
964 char *data;
965 int id;
966 uint32 log_hdr_len = sizeof(event_log_hdr_t);
967 uint32 log_pyld_len;
968 static uint32 seqnum_prev = 0;
969 event_log_hdr_t *log_hdr;
970 bool msg_processed = FALSE;
971 uint32 *log_ptr = NULL;
972 dll_t list_head, *cur;
973 loglist_item_t *log_item;
974 int32 nan_evt_ring_log_level = 0;
975 dhd_dbg_ring_entry_t msg_hdr;
976 char *logbuf;
977 struct tracelog_header *logentry_header;
978
979 /* log trace event consists of:
980 * msgtrace header
981 * event log block header
982 * event log payload
983 */
984 if (datalen <= MSGTRACE_HDRLEN + EVENT_LOG_BLOCK_HDRLEN) {
985 return;
986 }
987 hdr = (msgtrace_hdr_t *)event_data;
988 data = (char *)event_data + MSGTRACE_HDRLEN;
989 datalen -= MSGTRACE_HDRLEN;
990
991 if (dhd_dbg_msgtrace_seqchk(&seqnum_prev, ntoh32(hdr->seqnum)))
992 return;
993
994 /* Save the whole message to event log ring */
995 memset(&msg_hdr, 0, sizeof(dhd_dbg_ring_entry_t));
996 logbuf = VMALLOC(dhdp->osh, sizeof(*logentry_header) + datalen);
997 if (logbuf == NULL)
998 return;
999 logentry_header = (struct tracelog_header *)logbuf;
1000 logentry_header->magic_num = TRACE_LOG_MAGIC_NUMBER;
1001 logentry_header->buf_size = datalen;
1002 logentry_header->seq_num = hdr->seqnum;
1003 msg_hdr.type = DBG_RING_ENTRY_DATA_TYPE;
1004
1005 if ((sizeof(*logentry_header) + datalen) > PAYLOAD_MAX_LEN) {
1006 DHD_ERROR(("%s:Payload len=%u exceeds max len\n", __FUNCTION__,
1007 ((uint)sizeof(*logentry_header) + datalen)));
1008 VMFREE(dhdp->osh, logbuf, sizeof(*logentry_header) + datalen);
1009 return;
1010 }
1011
1012 msg_hdr.len = sizeof(*logentry_header) + datalen;
1013 memcpy(logbuf + sizeof(*logentry_header), data, datalen);
1014 dhd_dbg_ring_push(dhdp, FW_VERBOSE_RING_ID, &msg_hdr, logbuf);
1015 VMFREE(dhdp->osh, logbuf, sizeof(*logentry_header) + datalen);
1016
1017 /* Print sequence number, originating set and length of received
1018 * event log buffer. Refer to event log buffer structure in
1019 * event_log.h
1020 */
1021 DHD_MSGTRACE_LOG(("EVENT_LOG_HDR[0x%x]: Set: 0x%08x length = %d\n",
1022 ltoh16(*((uint16 *)(data+2))), ltoh32(*((uint32 *)(data + 4))),
1023 ltoh16(*((uint16 *)(data)))));
1024 data += EVENT_LOG_BLOCK_HDRLEN;
1025 datalen -= EVENT_LOG_BLOCK_HDRLEN;
1026
1027 /* start parsing from the tail of packet
1028 * Sameple format of a meessage
1029 * 001d3c54 00000064 00000064 001d3c54 001dba08 035d6ce1 0c540639
1030 * 001d3c54 00000064 00000064 035d6d89 0c580439
1031 * 0x0c580439 -- 39 is tag, 04 is count, 580c is format number
1032 * all these uint32 values comes in reverse order as group as EL data
1033 * while decoding we can only parse from last to first
1034 * |<- datalen ->|
1035 * |----(payload and maybe more logs)----|event_log_hdr_t|
1036 * data log_hdr
1037 */
1038 dll_init(&list_head);
1039 while (datalen > log_hdr_len) {
1040 log_hdr = (event_log_hdr_t *)(data + datalen - log_hdr_len);
1041 /* skip zero padding at end of frame */
1042 if (log_hdr->tag == EVENT_LOG_TAG_NULL) {
1043 datalen -= log_hdr_len;
1044 continue;
1045 }
1046 /* Check argument count, any event log should contain at least
1047 * one argument (4 bytes) for arm cycle count and up to 16
1048 * arguments when the format is valid
1049 */
1050 if (log_hdr->count == 0) {
1051 break;
1052 }
1053 if ((log_hdr->count > MAX_NO_OF_ARG) && (log_hdr->fmt_num != 0xffff)) {
1054 break;
1055 }
1056
1057 log_pyld_len = log_hdr->count * DATA_UNIT_FOR_LOG_CNT;
1058 /* log data should not cross the event data boundary */
1059 if ((char *)log_hdr - data < log_pyld_len)
1060 break;
1061 /* skip 4 bytes time stamp packet */
1062 if (log_hdr->tag == EVENT_LOG_TAG_TS) {
1063 datalen -= log_pyld_len + log_hdr_len;
1064 continue;
1065 }
1066 if (!(log_item = MALLOC(dhdp->osh, sizeof(*log_item)))) {
1067 DHD_ERROR(("%s allocating log list item failed\n",
1068 __FUNCTION__));
1069 break;
1070 }
1071 log_item->hdr = log_hdr;
1072 dll_insert(&log_item->list, &list_head);
1073 datalen -= (log_pyld_len + log_hdr_len);
1074 }
1075
1076 while (!dll_empty(&list_head)) {
1077 msg_processed = FALSE;
1078 cur = dll_head_p(&list_head);
1079 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1080 #pragma GCC diagnostic push
1081 #pragma GCC diagnostic ignored "-Wcast-qual"
1082 #endif
1083 log_item = (loglist_item_t *)container_of(cur, loglist_item_t, list);
1084 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1085 #pragma GCC diagnostic pop
1086 #endif
1087 log_hdr = log_item->hdr;
1088 log_ptr = (uint32 *)log_hdr - log_hdr->count;
1089 dll_delete(cur);
1090 MFREE(dhdp->osh, log_item, sizeof(*log_item));
1091
1092 /* Before DHD debugability is implemented WLC_E_TRACE had been
1093 * used to carry verbose logging from firmware. We need to
1094 * be able to handle those messages even without a initialized
1095 * debug layer.
1096 */
1097 if (dhdp->dbg) {
1098 /* check the data for NAN event ring; keeping first as small table */
1099 /* process only user configured to log */
1100 nan_evt_ring_log_level = dhdp->dbg->dbg_rings[NAN_EVENT_RING_ID].log_level;
1101 if (dhdp->dbg->dbg_rings[NAN_EVENT_RING_ID].log_level) {
1102 for (id = 0; id < ARRAYSIZE(nan_event_level_map); id++) {
1103 if (nan_event_level_map[id].tag == log_hdr->tag) {
1104 /* dont process if tag log level is greater
1105 * than ring log level
1106 */
1107 if (nan_event_level_map[id].log_level >
1108 nan_evt_ring_log_level) {
1109 msg_processed = TRUE;
1110 break;
1111 }
1112 /* In case of BCME_VERSION error,
1113 * this is not NAN event type data
1114 */
1115 if (dhd_dbg_nan_event_handler(dhdp,
1116 log_hdr, log_ptr) != BCME_VERSION) {
1117 msg_processed = TRUE;
1118 }
1119 break;
1120 }
1121 }
1122 }
1123 if (!msg_processed) {
1124 /* check the data for event ring */
1125 for (id = 0; id < ARRAYSIZE(fw_event_level_map); id++) {
1126 if (fw_event_level_map[id].tag == log_hdr->tag) {
1127 /* In case of BCME_VERSION error,
1128 * this is not event type data
1129 */
1130 if (dhd_dbg_custom_evnt_handler(dhdp,
1131 log_hdr, log_ptr) != BCME_VERSION) {
1132 msg_processed = TRUE;
1133 }
1134 break;
1135 }
1136 }
1137 }
1138 }
1139 if (!msg_processed)
1140 dhd_dbg_verboselog_handler(dhdp, log_hdr, raw_event_ptr);
1141
1142 }
1143 }
1144 #else /* !SHOW_LOGTRACE */
dhd_dbg_verboselog_handler(dhd_pub_t * dhdp,event_log_hdr_t * hdr,void * raw_event_ptr)1145 static INLINE void dhd_dbg_verboselog_handler(dhd_pub_t *dhdp,
1146 event_log_hdr_t *hdr, void *raw_event_ptr) {};
dhd_dbg_msgtrace_log_parser(dhd_pub_t * dhdp,void * event_data,void * raw_event_ptr,uint datalen)1147 static INLINE void dhd_dbg_msgtrace_log_parser(dhd_pub_t *dhdp,
1148 void *event_data, void *raw_event_ptr, uint datalen) {};
1149 #endif /* SHOW_LOGTRACE */
1150 #ifndef MACOSX_DHD
1151 void
dhd_dbg_trace_evnt_handler(dhd_pub_t * dhdp,void * event_data,void * raw_event_ptr,uint datalen)1152 dhd_dbg_trace_evnt_handler(dhd_pub_t *dhdp, void *event_data,
1153 void *raw_event_ptr, uint datalen)
1154 {
1155 msgtrace_hdr_t *hdr;
1156
1157 hdr = (msgtrace_hdr_t *)event_data;
1158
1159 if (hdr->version != MSGTRACE_VERSION) {
1160 DHD_DBGIF(("%s unsupported MSGTRACE version, dhd %d, dongle %d\n",
1161 __FUNCTION__, MSGTRACE_VERSION, hdr->version));
1162 return;
1163 }
1164
1165 if (hdr->trace_type == MSGTRACE_HDR_TYPE_MSG)
1166 dhd_dbg_msgtrace_msg_parser(event_data);
1167 else if (hdr->trace_type == MSGTRACE_HDR_TYPE_LOG)
1168 dhd_dbg_msgtrace_log_parser(dhdp, event_data, raw_event_ptr, datalen);
1169 }
1170 #endif /* MACOSX_DHD */
1171 static int
dhd_dbg_ring_init(dhd_pub_t * dhdp,dhd_dbg_ring_t * ring,uint16 id,uint8 * name,uint32 ring_sz,int section)1172 dhd_dbg_ring_init(dhd_pub_t *dhdp, dhd_dbg_ring_t *ring, uint16 id, uint8 *name,
1173 uint32 ring_sz, int section)
1174 {
1175 void *buf;
1176 unsigned long flags;
1177 #ifdef CONFIG_DHD_USE_STATIC_BUF
1178 buf = DHD_OS_PREALLOC(dhdp, section, ring_sz);
1179 #else
1180 buf = MALLOCZ(dhdp->osh, ring_sz);
1181 #endif
1182 if (!buf)
1183 return BCME_NOMEM;
1184
1185 ring->lock = dhd_os_spin_lock_init(dhdp->osh);
1186
1187 flags = dhd_os_spin_lock(ring->lock);
1188 ring->id = id;
1189 strncpy(ring->name, name, DBGRING_NAME_MAX);
1190 ring->name[DBGRING_NAME_MAX - 1] = 0;
1191 ring->ring_size = ring_sz;
1192 ring->wp = ring->rp = 0;
1193 ring->ring_buf = buf;
1194 ring->threshold = DBGRING_FLUSH_THRESHOLD(ring);
1195 ring->state = RING_SUSPEND;
1196 ring->sched_pull = TRUE;
1197 ring->rem_len = 0;
1198 dhd_os_spin_unlock(ring->lock, flags);
1199
1200 return BCME_OK;
1201 }
1202
1203 static void
dhd_dbg_ring_deinit(dhd_pub_t * dhdp,dhd_dbg_ring_t * ring)1204 dhd_dbg_ring_deinit(dhd_pub_t *dhdp, dhd_dbg_ring_t *ring)
1205 {
1206 void *buf;
1207 uint32 ring_sz;
1208 unsigned long flags;
1209
1210 if (!ring->ring_buf)
1211 return;
1212
1213 flags = dhd_os_spin_lock(ring->lock);
1214 ring->id = 0;
1215 ring->name[0] = 0;
1216 ring_sz = ring->ring_size;
1217 ring->ring_size = 0;
1218 ring->wp = ring->rp = 0;
1219 buf = ring->ring_buf;
1220 ring->ring_buf = NULL;
1221 memset(&ring->stat, 0, sizeof(ring->stat));
1222 ring->threshold = 0;
1223 ring->state = RING_STOP;
1224 dhd_os_spin_unlock(ring->lock, flags);
1225
1226 dhd_os_spin_lock_deinit(dhdp->osh, ring->lock);
1227 #ifndef CONFIG_DHD_USE_STATIC_BUF
1228 MFREE(dhdp->osh, buf, ring_sz);
1229 #endif
1230 }
1231
1232 uint8
dhd_dbg_find_sets_by_tag(uint16 tag)1233 dhd_dbg_find_sets_by_tag(uint16 tag)
1234 {
1235 uint i;
1236 uint8 sets = 0;
1237
1238 for (i = 0; i < ARRAYSIZE(fw_verbose_level_map); i++) {
1239 if (fw_verbose_level_map[i].tag == tag) {
1240 sets |= fw_verbose_level_map[i].sets;
1241 }
1242 }
1243
1244 for (i = 0; i < ARRAYSIZE(fw_event_level_map); i++) {
1245 if (fw_event_level_map[i].tag == tag) {
1246 sets |= fw_event_level_map[i].sets;
1247 }
1248 }
1249
1250 return sets;
1251 }
1252
1253 /*
1254 * dhd_dbg_set_event_log_tag : modify the state of an event log tag
1255 */
1256 void
dhd_dbg_set_event_log_tag(dhd_pub_t * dhdp,uint16 tag,uint8 set)1257 dhd_dbg_set_event_log_tag(dhd_pub_t *dhdp, uint16 tag, uint8 set)
1258 {
1259 wl_el_tag_params_t pars;
1260 char *cmd = "event_log_tag_control";
1261 char iovbuf[WLC_IOCTL_SMLEN] = { 0 };
1262 int ret;
1263
1264 memset(&pars, 0, sizeof(pars));
1265 pars.tag = tag;
1266 pars.set = dhd_dbg_find_sets_by_tag(tag);
1267 pars.flags = set ? EVENT_LOG_TAG_FLAG_LOG : EVENT_LOG_TAG_FLAG_NONE;
1268
1269 if (!bcm_mkiovar(cmd, (char *)&pars, sizeof(pars), iovbuf, sizeof(iovbuf))) {
1270 DHD_ERROR(("%s mkiovar failed\n", __FUNCTION__));
1271 return;
1272 }
1273
1274 ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
1275 if (ret) {
1276 DHD_ERROR(("%s set log tag iovar failed %d\n", __FUNCTION__, ret));
1277 }
1278 }
1279
1280 int
dhd_dbg_set_configuration(dhd_pub_t * dhdp,int ring_id,int log_level,int flags,uint32 threshold)1281 dhd_dbg_set_configuration(dhd_pub_t *dhdp, int ring_id, int log_level, int flags, uint32 threshold)
1282 {
1283 dhd_dbg_ring_t *ring;
1284 uint8 set = 1;
1285 unsigned long lock_flags;
1286 int i, array_len = 0;
1287 struct log_level_table *log_level_tbl = NULL;
1288 if (!dhdp || !dhdp->dbg)
1289 return BCME_BADADDR;
1290
1291 ring = &dhdp->dbg->dbg_rings[ring_id];
1292
1293 if (ring->state == RING_STOP)
1294 return BCME_UNSUPPORTED;
1295
1296 lock_flags = dhd_os_spin_lock(ring->lock);
1297 if (log_level == 0)
1298 ring->state = RING_SUSPEND;
1299 else
1300 ring->state = RING_ACTIVE;
1301 ring->log_level = log_level;
1302
1303 ring->threshold = MIN(threshold, DBGRING_FLUSH_THRESHOLD(ring));
1304 dhd_os_spin_unlock(ring->lock, lock_flags);
1305 if (log_level > 0)
1306 set = TRUE;
1307
1308 if (ring->id == FW_EVENT_RING_ID) {
1309 log_level_tbl = fw_event_level_map;
1310 array_len = ARRAYSIZE(fw_event_level_map);
1311 } else if (ring->id == FW_VERBOSE_RING_ID) {
1312 log_level_tbl = fw_verbose_level_map;
1313 array_len = ARRAYSIZE(fw_verbose_level_map);
1314 } else if (ring->id == NAN_EVENT_RING_ID) {
1315 log_level_tbl = nan_event_level_map;
1316 array_len = ARRAYSIZE(nan_event_level_map);
1317 }
1318
1319 for (i = 0; i < array_len; i++) {
1320 if (log_level == 0 || (log_level_tbl[i].log_level > log_level)) {
1321 /* clear the reference per ring */
1322 ref_tag_tbl[log_level_tbl[i].tag] &= ~(1 << ring_id);
1323 } else {
1324 /* set the reference per ring */
1325 ref_tag_tbl[log_level_tbl[i].tag] |= (1 << ring_id);
1326 }
1327 set = (ref_tag_tbl[log_level_tbl[i].tag])? 1 : 0;
1328 DHD_DBGIF(("%s TAG(%s) is %s for the ring(%s)\n", __FUNCTION__,
1329 log_level_tbl[i].desc, (set)? "SET" : "CLEAR", ring->name));
1330 dhd_dbg_set_event_log_tag(dhdp, log_level_tbl[i].tag, set);
1331 }
1332 return BCME_OK;
1333 }
1334
1335 /*
1336 * dhd_dbg_get_ring_status : get the ring status from the coresponding ring buffer
1337 * Return: An error code or 0 on success.
1338 */
1339
1340 int
dhd_dbg_get_ring_status(dhd_pub_t * dhdp,int ring_id,dhd_dbg_ring_status_t * dbg_ring_status)1341 dhd_dbg_get_ring_status(dhd_pub_t *dhdp, int ring_id, dhd_dbg_ring_status_t *dbg_ring_status)
1342 {
1343 int ret = BCME_OK;
1344 int id = 0;
1345 dhd_dbg_t *dbg;
1346 dhd_dbg_ring_t *dbg_ring;
1347 dhd_dbg_ring_status_t ring_status;
1348 if (!dhdp || !dhdp->dbg)
1349 return BCME_BADADDR;
1350 dbg = dhdp->dbg;
1351
1352 memset(&ring_status, 0, sizeof(dhd_dbg_ring_status_t));
1353 for (id = DEBUG_RING_ID_INVALID + 1; id < DEBUG_RING_ID_MAX; id++) {
1354 dbg_ring = &dbg->dbg_rings[id];
1355 if (VALID_RING(dbg_ring->id) && (dbg_ring->id == ring_id)) {
1356 RING_STAT_TO_STATUS(dbg_ring, ring_status);
1357 *dbg_ring_status = ring_status;
1358 break;
1359 }
1360 }
1361 if (!VALID_RING(id)) {
1362 DHD_ERROR(("%s : cannot find the ring_id : %d\n", __FUNCTION__, ring_id));
1363 ret = BCME_NOTFOUND;
1364 }
1365 return ret;
1366 }
1367
1368 /*
1369 * dhd_dbg_find_ring_id : return ring_id based on ring_name
1370 * Return: An invalid ring id for failure or valid ring id on success.
1371 */
1372
1373 int
dhd_dbg_find_ring_id(dhd_pub_t * dhdp,char * ring_name)1374 dhd_dbg_find_ring_id(dhd_pub_t *dhdp, char *ring_name)
1375 {
1376 int id;
1377 dhd_dbg_t *dbg;
1378 dhd_dbg_ring_t *ring;
1379
1380 if (!dhdp || !dhdp->dbg)
1381 return BCME_BADADDR;
1382
1383 dbg = dhdp->dbg;
1384 for (id = DEBUG_RING_ID_INVALID + 1; id < DEBUG_RING_ID_MAX; id++) {
1385 ring = &dbg->dbg_rings[id];
1386 if (!strncmp((char *)ring->name, ring_name, sizeof(ring->name) - 1))
1387 break;
1388 }
1389 return id;
1390 }
1391
1392 /*
1393 * dhd_dbg_get_priv : get the private data of dhd dbugability module
1394 * Return : An NULL on failure or valid data address
1395 */
1396 void *
dhd_dbg_get_priv(dhd_pub_t * dhdp)1397 dhd_dbg_get_priv(dhd_pub_t *dhdp)
1398 {
1399 if (!dhdp || !dhdp->dbg)
1400 return NULL;
1401 return dhdp->dbg->private;
1402 }
1403
1404 /*
1405 * dhd_dbg_start : start and stop All of Ring buffers
1406 * Return: An error code or 0 on success.
1407 */
1408 int
dhd_dbg_start(dhd_pub_t * dhdp,bool start)1409 dhd_dbg_start(dhd_pub_t *dhdp, bool start)
1410 {
1411 int ret = BCME_OK;
1412 int ring_id;
1413 dhd_dbg_t *dbg;
1414 dhd_dbg_ring_t *dbg_ring;
1415 if (!dhdp)
1416 return BCME_BADARG;
1417 dbg = dhdp->dbg;
1418
1419 for (ring_id = DEBUG_RING_ID_INVALID + 1; ring_id < DEBUG_RING_ID_MAX; ring_id++) {
1420 dbg_ring = &dbg->dbg_rings[ring_id];
1421 if (!start) {
1422 if (VALID_RING(dbg_ring->id)) {
1423 /* Initialize the information for the ring */
1424 dbg_ring->state = RING_SUSPEND;
1425 dbg_ring->log_level = 0;
1426 dbg_ring->rp = dbg_ring->wp = 0;
1427 dbg_ring->threshold = 0;
1428 memset(&dbg_ring->stat, 0, sizeof(struct ring_statistics));
1429 memset(dbg_ring->ring_buf, 0, dbg_ring->ring_size);
1430 }
1431 }
1432 }
1433 return ret;
1434 }
1435
1436 /*
1437 * dhd_dbg_send_urgent_evt: send the health check evt to Upper layer
1438 *
1439 * Return: An error code or 0 on success.
1440 */
1441
1442 int
dhd_dbg_send_urgent_evt(dhd_pub_t * dhdp,const void * data,const uint32 len)1443 dhd_dbg_send_urgent_evt(dhd_pub_t *dhdp, const void *data, const uint32 len)
1444 {
1445 dhd_dbg_t *dbg;
1446 int ret = BCME_OK;
1447 if (!dhdp || !dhdp->dbg)
1448 return BCME_BADADDR;
1449
1450 dbg = dhdp->dbg;
1451 if (dbg->urgent_notifier) {
1452 dbg->urgent_notifier(dhdp, data, len);
1453 }
1454 return ret;
1455 }
1456
1457 #if defined(DBG_PKT_MON) || defined(DHD_PKT_LOGGING)
1458 uint32
__dhd_dbg_pkt_hash(uintptr_t pkt,uint32 pktid)1459 __dhd_dbg_pkt_hash(uintptr_t pkt, uint32 pktid)
1460 {
1461 uint32 __pkt;
1462 uint32 __pktid;
1463
1464 __pkt = ((int)pkt) >= 0 ? (2 * pkt) : (-2 * pkt - 1);
1465 __pktid = ((int)pktid) >= 0 ? (2 * pktid) : (-2 * pktid - 1);
1466
1467 return (__pkt >= __pktid ? (__pkt * __pkt + __pkt + __pktid) :
1468 (__pkt + __pktid * __pktid));
1469 }
1470
1471 #define __TIMESPEC_TO_US(ts) \
1472 (((uint32)(ts).tv_sec * USEC_PER_SEC) + ((ts).tv_nsec / NSEC_PER_USEC))
1473
1474 uint32
__dhd_dbg_driver_ts_usec(void)1475 __dhd_dbg_driver_ts_usec(void)
1476 {
1477 struct osl_timespec ts;
1478
1479 osl_get_monotonic_boottime(&ts);
1480 return ((uint32)(__TIMESPEC_TO_US(ts)));
1481 }
1482
1483 wifi_tx_packet_fate
__dhd_dbg_map_tx_status_to_pkt_fate(uint16 status)1484 __dhd_dbg_map_tx_status_to_pkt_fate(uint16 status)
1485 {
1486 wifi_tx_packet_fate pkt_fate;
1487
1488 switch (status) {
1489 case WLFC_CTL_PKTFLAG_DISCARD:
1490 pkt_fate = TX_PKT_FATE_ACKED;
1491 break;
1492 case WLFC_CTL_PKTFLAG_D11SUPPRESS:
1493 /* intensional fall through */
1494 case WLFC_CTL_PKTFLAG_WLSUPPRESS:
1495 pkt_fate = TX_PKT_FATE_FW_QUEUED;
1496 break;
1497 case WLFC_CTL_PKTFLAG_TOSSED_BYWLC:
1498 pkt_fate = TX_PKT_FATE_FW_DROP_INVALID;
1499 break;
1500 case WLFC_CTL_PKTFLAG_DISCARD_NOACK:
1501 pkt_fate = TX_PKT_FATE_SENT;
1502 break;
1503 default:
1504 pkt_fate = TX_PKT_FATE_FW_DROP_OTHER;
1505 break;
1506 }
1507
1508 return pkt_fate;
1509 }
1510 #endif /* DBG_PKT_MON || DHD_PKT_LOGGING */
1511
1512 #ifdef DBG_PKT_MON
1513 static int
__dhd_dbg_free_tx_pkts(dhd_pub_t * dhdp,dhd_dbg_tx_info_t * tx_pkts,uint16 pkt_count)1514 __dhd_dbg_free_tx_pkts(dhd_pub_t *dhdp, dhd_dbg_tx_info_t *tx_pkts,
1515 uint16 pkt_count)
1516 {
1517 uint16 count;
1518
1519 DHD_PKT_INFO(("%s, %d\n", __FUNCTION__, __LINE__));
1520 count = 0;
1521 while ((count < pkt_count) && tx_pkts) {
1522 if (tx_pkts->info.pkt)
1523 PKTFREE(dhdp->osh, tx_pkts->info.pkt, TRUE);
1524 tx_pkts++;
1525 count++;
1526 }
1527
1528 return BCME_OK;
1529 }
1530
1531 static int
__dhd_dbg_free_rx_pkts(dhd_pub_t * dhdp,dhd_dbg_rx_info_t * rx_pkts,uint16 pkt_count)1532 __dhd_dbg_free_rx_pkts(dhd_pub_t *dhdp, dhd_dbg_rx_info_t *rx_pkts,
1533 uint16 pkt_count)
1534 {
1535 uint16 count;
1536
1537 DHD_PKT_INFO(("%s, %d\n", __FUNCTION__, __LINE__));
1538 count = 0;
1539 while ((count < pkt_count) && rx_pkts) {
1540 if (rx_pkts->info.pkt)
1541 PKTFREE(dhdp->osh, rx_pkts->info.pkt, TRUE);
1542 rx_pkts++;
1543 count++;
1544 }
1545
1546 return BCME_OK;
1547 }
1548
1549 void
__dhd_dbg_dump_pkt_info(dhd_pub_t * dhdp,dhd_dbg_pkt_info_t * info)1550 __dhd_dbg_dump_pkt_info(dhd_pub_t *dhdp, dhd_dbg_pkt_info_t *info)
1551 {
1552 if (DHD_PKT_MON_DUMP_ON()) {
1553 DHD_PKT_MON(("payload type = %d\n", info->payload_type));
1554 DHD_PKT_MON(("driver ts = %u\n", info->driver_ts));
1555 DHD_PKT_MON(("firmware ts = %u\n", info->firmware_ts));
1556 DHD_PKT_MON(("packet hash = %u\n", info->pkt_hash));
1557 DHD_PKT_MON(("packet length = %zu\n", info->pkt_len));
1558 DHD_PKT_MON(("packet address = %p\n", info->pkt));
1559 DHD_PKT_MON(("packet data = \n"));
1560 if (DHD_PKT_MON_ON()) {
1561 prhex(NULL, PKTDATA(dhdp->osh, info->pkt), info->pkt_len);
1562 }
1563 }
1564 }
1565
1566 void
__dhd_dbg_dump_tx_pkt_info(dhd_pub_t * dhdp,dhd_dbg_tx_info_t * tx_pkt,uint16 count)1567 __dhd_dbg_dump_tx_pkt_info(dhd_pub_t *dhdp, dhd_dbg_tx_info_t *tx_pkt,
1568 uint16 count)
1569 {
1570 if (DHD_PKT_MON_DUMP_ON()) {
1571 DHD_PKT_MON(("\nTX (count: %d)\n", ++count));
1572 DHD_PKT_MON(("packet fate = %d\n", tx_pkt->fate));
1573 __dhd_dbg_dump_pkt_info(dhdp, &tx_pkt->info);
1574 }
1575 }
1576
1577 void
__dhd_dbg_dump_rx_pkt_info(dhd_pub_t * dhdp,dhd_dbg_rx_info_t * rx_pkt,uint16 count)1578 __dhd_dbg_dump_rx_pkt_info(dhd_pub_t *dhdp, dhd_dbg_rx_info_t *rx_pkt,
1579 uint16 count)
1580 {
1581 if (DHD_PKT_MON_DUMP_ON()) {
1582 DHD_PKT_MON(("\nRX (count: %d)\n", ++count));
1583 DHD_PKT_MON(("packet fate = %d\n", rx_pkt->fate));
1584 __dhd_dbg_dump_pkt_info(dhdp, &rx_pkt->info);
1585 }
1586 }
1587
1588 int
dhd_dbg_attach_pkt_monitor(dhd_pub_t * dhdp,dbg_mon_tx_pkts_t tx_pkt_mon,dbg_mon_tx_status_t tx_status_mon,dbg_mon_rx_pkts_t rx_pkt_mon)1589 dhd_dbg_attach_pkt_monitor(dhd_pub_t *dhdp,
1590 dbg_mon_tx_pkts_t tx_pkt_mon,
1591 dbg_mon_tx_status_t tx_status_mon,
1592 dbg_mon_rx_pkts_t rx_pkt_mon)
1593 {
1594
1595 dhd_dbg_tx_report_t *tx_report = NULL;
1596 dhd_dbg_rx_report_t *rx_report = NULL;
1597 dhd_dbg_tx_info_t *tx_pkts = NULL;
1598 dhd_dbg_rx_info_t *rx_pkts = NULL;
1599 dhd_dbg_pkt_mon_state_t tx_pkt_state;
1600 dhd_dbg_pkt_mon_state_t tx_status_state;
1601 dhd_dbg_pkt_mon_state_t rx_pkt_state;
1602 gfp_t kflags;
1603 uint32 alloc_len;
1604 int ret = BCME_OK;
1605 unsigned long flags;
1606
1607 DHD_PKT_INFO(("%s, %d\n", __FUNCTION__, __LINE__));
1608 if (!dhdp || !dhdp->dbg) {
1609 DHD_PKT_MON(("%s(): dhdp=%p, dhdp->dbg=%p\n", __FUNCTION__,
1610 dhdp, (dhdp ? dhdp->dbg : NULL)));
1611 return -EINVAL;
1612 }
1613
1614 DHD_PKT_MON_LOCK(dhdp->dbg->pkt_mon_lock, flags);
1615 tx_pkt_state = dhdp->dbg->pkt_mon.tx_pkt_state;
1616 tx_status_state = dhdp->dbg->pkt_mon.tx_pkt_state;
1617 rx_pkt_state = dhdp->dbg->pkt_mon.rx_pkt_state;
1618
1619 if (PKT_MON_ATTACHED(tx_pkt_state) || PKT_MON_ATTACHED(tx_status_state) ||
1620 PKT_MON_ATTACHED(rx_pkt_state)) {
1621 DHD_PKT_MON(("%s(): packet monitor is already attached, "
1622 "tx_pkt_state=%d, tx_status_state=%d, rx_pkt_state=%d\n",
1623 __FUNCTION__, tx_pkt_state, tx_status_state, rx_pkt_state));
1624 DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags);
1625 /* return success as the intention was to initialize packet monitor */
1626 return BCME_OK;
1627 }
1628
1629 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
1630
1631 /* allocate and initialize tx packet monitoring */
1632 alloc_len = sizeof(*tx_report);
1633 tx_report = (dhd_dbg_tx_report_t *)kzalloc(alloc_len, kflags);
1634 if (unlikely(!tx_report)) {
1635 DHD_ERROR(("%s(): could not allocate memory for - "
1636 "dhd_dbg_tx_report_t\n", __FUNCTION__));
1637 ret = -ENOMEM;
1638 goto fail;
1639 }
1640
1641 alloc_len = (sizeof(*tx_pkts) * MAX_FATE_LOG_LEN);
1642 tx_pkts = (dhd_dbg_tx_info_t *)kzalloc(alloc_len, kflags);
1643 if (unlikely(!tx_pkts)) {
1644 DHD_ERROR(("%s(): could not allocate memory for - "
1645 "dhd_dbg_tx_info_t\n", __FUNCTION__));
1646 ret = -ENOMEM;
1647 goto fail;
1648 }
1649 dhdp->dbg->pkt_mon.tx_report = tx_report;
1650 dhdp->dbg->pkt_mon.tx_report->tx_pkts = tx_pkts;
1651 dhdp->dbg->pkt_mon.tx_pkt_mon = tx_pkt_mon;
1652 dhdp->dbg->pkt_mon.tx_status_mon = tx_status_mon;
1653 dhdp->dbg->pkt_mon.tx_pkt_state = PKT_MON_ATTACHED;
1654 dhdp->dbg->pkt_mon.tx_status_state = PKT_MON_ATTACHED;
1655
1656 /* allocate and initialze rx packet monitoring */
1657 alloc_len = sizeof(*rx_report);
1658 rx_report = (dhd_dbg_rx_report_t *)kzalloc(alloc_len, kflags);
1659 if (unlikely(!rx_report)) {
1660 DHD_ERROR(("%s(): could not allocate memory for - "
1661 "dhd_dbg_rx_report_t\n", __FUNCTION__));
1662 ret = -ENOMEM;
1663 goto fail;
1664 }
1665
1666 alloc_len = (sizeof(*rx_pkts) * MAX_FATE_LOG_LEN);
1667 rx_pkts = (dhd_dbg_rx_info_t *)kzalloc(alloc_len, kflags);
1668 if (unlikely(!rx_pkts)) {
1669 DHD_ERROR(("%s(): could not allocate memory for - "
1670 "dhd_dbg_rx_info_t\n", __FUNCTION__));
1671 ret = -ENOMEM;
1672 goto fail;
1673 }
1674 dhdp->dbg->pkt_mon.rx_report = rx_report;
1675 dhdp->dbg->pkt_mon.rx_report->rx_pkts = rx_pkts;
1676 dhdp->dbg->pkt_mon.rx_pkt_mon = rx_pkt_mon;
1677 dhdp->dbg->pkt_mon.rx_pkt_state = PKT_MON_ATTACHED;
1678
1679 DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags);
1680 DHD_PKT_MON(("%s(): packet monitor attach succeeded\n", __FUNCTION__));
1681 return ret;
1682
1683 fail:
1684 /* tx packet monitoring */
1685 if (tx_pkts) {
1686 kfree(tx_pkts);
1687 }
1688 if (tx_report) {
1689 kfree(tx_report);
1690 }
1691 dhdp->dbg->pkt_mon.tx_report = NULL;
1692 dhdp->dbg->pkt_mon.tx_report->tx_pkts = NULL;
1693 dhdp->dbg->pkt_mon.tx_pkt_mon = NULL;
1694 dhdp->dbg->pkt_mon.tx_status_mon = NULL;
1695 dhdp->dbg->pkt_mon.tx_pkt_state = PKT_MON_DETACHED;
1696 dhdp->dbg->pkt_mon.tx_status_state = PKT_MON_DETACHED;
1697
1698 /* rx packet monitoring */
1699 if (rx_pkts) {
1700 kfree(rx_pkts);
1701 }
1702 if (rx_report) {
1703 kfree(rx_report);
1704 }
1705 dhdp->dbg->pkt_mon.rx_report = NULL;
1706 dhdp->dbg->pkt_mon.rx_report->rx_pkts = NULL;
1707 dhdp->dbg->pkt_mon.rx_pkt_mon = NULL;
1708 dhdp->dbg->pkt_mon.rx_pkt_state = PKT_MON_DETACHED;
1709
1710 DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags);
1711 DHD_ERROR(("%s(): packet monitor attach failed\n", __FUNCTION__));
1712 return ret;
1713 }
1714
1715 int
dhd_dbg_start_pkt_monitor(dhd_pub_t * dhdp)1716 dhd_dbg_start_pkt_monitor(dhd_pub_t *dhdp)
1717 {
1718 dhd_dbg_tx_report_t *tx_report;
1719 dhd_dbg_rx_report_t *rx_report;
1720 dhd_dbg_pkt_mon_state_t tx_pkt_state;
1721 dhd_dbg_pkt_mon_state_t tx_status_state;
1722 dhd_dbg_pkt_mon_state_t rx_pkt_state;
1723 unsigned long flags;
1724
1725 DHD_PKT_INFO(("%s, %d\n", __FUNCTION__, __LINE__));
1726 if (!dhdp || !dhdp->dbg) {
1727 DHD_PKT_MON(("%s(): dhdp=%p, dhdp->dbg=%p\n", __FUNCTION__,
1728 dhdp, (dhdp ? dhdp->dbg : NULL)));
1729 return -EINVAL;
1730 }
1731
1732 DHD_PKT_MON_LOCK(dhdp->dbg->pkt_mon_lock, flags);
1733 tx_pkt_state = dhdp->dbg->pkt_mon.tx_pkt_state;
1734 tx_status_state = dhdp->dbg->pkt_mon.tx_status_state;
1735 rx_pkt_state = dhdp->dbg->pkt_mon.rx_pkt_state;
1736
1737 if (PKT_MON_DETACHED(tx_pkt_state) || PKT_MON_DETACHED(tx_status_state) ||
1738 PKT_MON_DETACHED(rx_pkt_state)) {
1739 DHD_PKT_MON(("%s(): packet monitor is not yet enabled, "
1740 "tx_pkt_state=%d, tx_status_state=%d, rx_pkt_state=%d\n",
1741 __FUNCTION__, tx_pkt_state, tx_status_state, rx_pkt_state));
1742 DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags);
1743 return -EINVAL;
1744 }
1745
1746 dhdp->dbg->pkt_mon.tx_pkt_state = PKT_MON_STARTING;
1747 dhdp->dbg->pkt_mon.tx_status_state = PKT_MON_STARTING;
1748 dhdp->dbg->pkt_mon.rx_pkt_state = PKT_MON_STARTING;
1749
1750 tx_report = dhdp->dbg->pkt_mon.tx_report;
1751 rx_report = dhdp->dbg->pkt_mon.rx_report;
1752 if (!tx_report || !rx_report) {
1753 DHD_PKT_MON(("%s(): tx_report=%p, rx_report=%p\n",
1754 __FUNCTION__, tx_report, rx_report));
1755 DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags);
1756 return -EINVAL;
1757 }
1758
1759
1760 tx_pkt_state = dhdp->dbg->pkt_mon.tx_pkt_state;
1761 tx_status_state = dhdp->dbg->pkt_mon.tx_status_state;
1762 rx_pkt_state = dhdp->dbg->pkt_mon.rx_pkt_state;
1763
1764 /* Safe to free packets as state pkt_state is STARTING */
1765 __dhd_dbg_free_tx_pkts(dhdp, tx_report->tx_pkts, tx_report->pkt_pos);
1766
1767 __dhd_dbg_free_rx_pkts(dhdp, rx_report->rx_pkts, rx_report->pkt_pos);
1768
1769 /* reset array postion */
1770 tx_report->pkt_pos = 0;
1771 tx_report->status_pos = 0;
1772 dhdp->dbg->pkt_mon.tx_pkt_state = PKT_MON_STARTED;
1773 dhdp->dbg->pkt_mon.tx_status_state = PKT_MON_STARTED;
1774
1775 rx_report->pkt_pos = 0;
1776 dhdp->dbg->pkt_mon.rx_pkt_state = PKT_MON_STARTED;
1777 DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags);
1778
1779 DHD_PKT_MON(("%s(): packet monitor started\n", __FUNCTION__));
1780 return BCME_OK;
1781 }
1782
1783 int
dhd_dbg_monitor_tx_pkts(dhd_pub_t * dhdp,void * pkt,uint32 pktid)1784 dhd_dbg_monitor_tx_pkts(dhd_pub_t *dhdp, void *pkt, uint32 pktid)
1785 {
1786 dhd_dbg_tx_report_t *tx_report;
1787 dhd_dbg_tx_info_t *tx_pkts;
1788 dhd_dbg_pkt_mon_state_t tx_pkt_state;
1789 uint32 pkt_hash, driver_ts;
1790 uint16 pkt_pos;
1791 unsigned long flags;
1792
1793 if (!dhdp || !dhdp->dbg) {
1794 DHD_PKT_MON(("%s(): dhdp=%p, dhdp->dbg=%p\n", __FUNCTION__,
1795 dhdp, (dhdp ? dhdp->dbg : NULL)));
1796 return -EINVAL;
1797 }
1798
1799 DHD_PKT_MON_LOCK(dhdp->dbg->pkt_mon_lock, flags);
1800 tx_pkt_state = dhdp->dbg->pkt_mon.tx_pkt_state;
1801 if (PKT_MON_STARTED(tx_pkt_state)) {
1802 tx_report = dhdp->dbg->pkt_mon.tx_report;
1803 pkt_pos = tx_report->pkt_pos;
1804
1805 if (!PKT_MON_PKT_FULL(pkt_pos)) {
1806 tx_pkts = tx_report->tx_pkts;
1807 pkt_hash = __dhd_dbg_pkt_hash((uintptr_t)pkt, pktid);
1808 driver_ts = __dhd_dbg_driver_ts_usec();
1809
1810 tx_pkts[pkt_pos].info.pkt = PKTDUP(dhdp->osh, pkt);
1811 tx_pkts[pkt_pos].info.pkt_len = PKTLEN(dhdp->osh, pkt);
1812 tx_pkts[pkt_pos].info.pkt_hash = pkt_hash;
1813 tx_pkts[pkt_pos].info.driver_ts = driver_ts;
1814 tx_pkts[pkt_pos].info.firmware_ts = 0U;
1815 tx_pkts[pkt_pos].info.payload_type = FRAME_TYPE_ETHERNET_II;
1816 tx_pkts[pkt_pos].fate = TX_PKT_FATE_DRV_QUEUED;
1817
1818 tx_report->pkt_pos++;
1819 } else {
1820 dhdp->dbg->pkt_mon.tx_pkt_state = PKT_MON_STOPPED;
1821 DHD_PKT_MON(("%s(): tx pkt logging stopped, reached "
1822 "max limit\n", __FUNCTION__));
1823 }
1824 }
1825
1826 DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags);
1827 return BCME_OK;
1828 }
1829
1830 int
dhd_dbg_monitor_tx_status(dhd_pub_t * dhdp,void * pkt,uint32 pktid,uint16 status)1831 dhd_dbg_monitor_tx_status(dhd_pub_t *dhdp, void *pkt, uint32 pktid,
1832 uint16 status)
1833 {
1834 dhd_dbg_tx_report_t *tx_report;
1835 dhd_dbg_tx_info_t *tx_pkt;
1836 dhd_dbg_pkt_mon_state_t tx_status_state;
1837 wifi_tx_packet_fate pkt_fate;
1838 uint32 pkt_hash, temp_hash;
1839 uint16 pkt_pos, status_pos;
1840 int16 count;
1841 bool found = FALSE;
1842 unsigned long flags;
1843
1844 if (!dhdp || !dhdp->dbg) {
1845 DHD_PKT_MON(("%s(): dhdp=%p, dhdp->dbg=%p\n", __FUNCTION__,
1846 dhdp, (dhdp ? dhdp->dbg : NULL)));
1847 return -EINVAL;
1848 }
1849
1850 DHD_PKT_MON_LOCK(dhdp->dbg->pkt_mon_lock, flags);
1851 tx_status_state = dhdp->dbg->pkt_mon.tx_status_state;
1852 if (PKT_MON_STARTED(tx_status_state)) {
1853 tx_report = dhdp->dbg->pkt_mon.tx_report;
1854 pkt_pos = tx_report->pkt_pos;
1855 status_pos = tx_report->status_pos;
1856
1857 if (!PKT_MON_STATUS_FULL(pkt_pos, status_pos)) {
1858 pkt_hash = __dhd_dbg_pkt_hash((uintptr_t)pkt, pktid);
1859 pkt_fate = __dhd_dbg_map_tx_status_to_pkt_fate(status);
1860
1861 /* best bet (in-order tx completion) */
1862 count = status_pos;
1863 tx_pkt = (((dhd_dbg_tx_info_t *)tx_report->tx_pkts) + status_pos);
1864 while ((count < pkt_pos) && tx_pkt) {
1865 temp_hash = tx_pkt->info.pkt_hash;
1866 if (temp_hash == pkt_hash) {
1867 tx_pkt->fate = pkt_fate;
1868 tx_report->status_pos++;
1869 found = TRUE;
1870 break;
1871 }
1872 tx_pkt++;
1873 count++;
1874 }
1875
1876 /* search until beginning (handles out-of-order completion) */
1877 if (!found) {
1878 count = status_pos - 1;
1879 tx_pkt = (((dhd_dbg_tx_info_t *)tx_report->tx_pkts) + count);
1880 while ((count >= 0) && tx_pkt) {
1881 temp_hash = tx_pkt->info.pkt_hash;
1882 if (temp_hash == pkt_hash) {
1883 tx_pkt->fate = pkt_fate;
1884 tx_report->status_pos++;
1885 found = TRUE;
1886 break;
1887 }
1888 tx_pkt--;
1889 count--;
1890 }
1891
1892 if (!found) {
1893 /* still couldn't match tx_status */
1894 DHD_ERROR(("%s(): couldn't match tx_status, pkt_pos=%u, "
1895 "status_pos=%u, pkt_fate=%u\n", __FUNCTION__,
1896 pkt_pos, status_pos, pkt_fate));
1897 }
1898 }
1899 } else {
1900 dhdp->dbg->pkt_mon.tx_status_state = PKT_MON_STOPPED;
1901 DHD_PKT_MON(("%s(): tx_status logging stopped, reached "
1902 "max limit\n", __FUNCTION__));
1903 }
1904 }
1905
1906 DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags);
1907 return BCME_OK;
1908 }
1909
1910 int
dhd_dbg_monitor_rx_pkts(dhd_pub_t * dhdp,void * pkt)1911 dhd_dbg_monitor_rx_pkts(dhd_pub_t *dhdp, void *pkt)
1912 {
1913 dhd_dbg_rx_report_t *rx_report;
1914 dhd_dbg_rx_info_t *rx_pkts;
1915 dhd_dbg_pkt_mon_state_t rx_pkt_state;
1916 uint32 driver_ts;
1917 uint16 pkt_pos;
1918 unsigned long flags;
1919
1920 if (!dhdp || !dhdp->dbg) {
1921 DHD_PKT_MON(("%s(): dhdp=%p, dhdp->dbg=%p\n", __FUNCTION__,
1922 dhdp, (dhdp ? dhdp->dbg : NULL)));
1923 return -EINVAL;
1924 }
1925
1926 DHD_PKT_MON_LOCK(dhdp->dbg->pkt_mon_lock, flags);
1927 rx_pkt_state = dhdp->dbg->pkt_mon.rx_pkt_state;
1928 if (PKT_MON_STARTED(rx_pkt_state)) {
1929 rx_report = dhdp->dbg->pkt_mon.rx_report;
1930 pkt_pos = rx_report->pkt_pos;
1931
1932 if (!PKT_MON_PKT_FULL(pkt_pos)) {
1933 rx_pkts = rx_report->rx_pkts;
1934 driver_ts = __dhd_dbg_driver_ts_usec();
1935
1936 rx_pkts[pkt_pos].info.pkt = PKTDUP(dhdp->osh, pkt);
1937 rx_pkts[pkt_pos].info.pkt_len = PKTLEN(dhdp->osh, pkt);
1938 rx_pkts[pkt_pos].info.pkt_hash = 0U;
1939 rx_pkts[pkt_pos].info.driver_ts = driver_ts;
1940 rx_pkts[pkt_pos].info.firmware_ts = 0U;
1941 rx_pkts[pkt_pos].info.payload_type = FRAME_TYPE_ETHERNET_II;
1942 rx_pkts[pkt_pos].fate = RX_PKT_FATE_SUCCESS;
1943
1944 rx_report->pkt_pos++;
1945 } else {
1946 dhdp->dbg->pkt_mon.rx_pkt_state = PKT_MON_STOPPED;
1947 DHD_PKT_MON(("%s(): rx pkt logging stopped, reached "
1948 "max limit\n", __FUNCTION__));
1949 }
1950 }
1951
1952 DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags);
1953 return BCME_OK;
1954 }
1955
1956 int
dhd_dbg_stop_pkt_monitor(dhd_pub_t * dhdp)1957 dhd_dbg_stop_pkt_monitor(dhd_pub_t *dhdp)
1958 {
1959 dhd_dbg_pkt_mon_state_t tx_pkt_state;
1960 dhd_dbg_pkt_mon_state_t tx_status_state;
1961 dhd_dbg_pkt_mon_state_t rx_pkt_state;
1962 unsigned long flags;
1963
1964 DHD_PKT_INFO(("%s, %d\n", __FUNCTION__, __LINE__));
1965 if (!dhdp || !dhdp->dbg) {
1966 DHD_PKT_MON(("%s(): dhdp=%p, dhdp->dbg=%p\n", __FUNCTION__,
1967 dhdp, (dhdp ? dhdp->dbg : NULL)));
1968 return -EINVAL;
1969 }
1970
1971 DHD_PKT_MON_LOCK(dhdp->dbg->pkt_mon_lock, flags);
1972 tx_pkt_state = dhdp->dbg->pkt_mon.tx_pkt_state;
1973 tx_status_state = dhdp->dbg->pkt_mon.tx_status_state;
1974 rx_pkt_state = dhdp->dbg->pkt_mon.rx_pkt_state;
1975
1976 if (PKT_MON_DETACHED(tx_pkt_state) || PKT_MON_DETACHED(tx_status_state) ||
1977 PKT_MON_DETACHED(rx_pkt_state)) {
1978 DHD_PKT_MON(("%s(): packet monitor is not yet enabled, "
1979 "tx_pkt_state=%d, tx_status_state=%d, rx_pkt_state=%d\n",
1980 __FUNCTION__, tx_pkt_state, tx_status_state, rx_pkt_state));
1981 DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags);
1982 return -EINVAL;
1983 }
1984 dhdp->dbg->pkt_mon.tx_pkt_state = PKT_MON_STOPPED;
1985 dhdp->dbg->pkt_mon.tx_status_state = PKT_MON_STOPPED;
1986 dhdp->dbg->pkt_mon.rx_pkt_state = PKT_MON_STOPPED;
1987 DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags);
1988
1989 DHD_PKT_MON(("%s(): packet monitor stopped\n", __FUNCTION__));
1990 return BCME_OK;
1991 }
1992
1993 #define __COPY_TO_USER(to, from, n) \
1994 do { \
1995 int __ret; \
1996 __ret = copy_to_user((void __user *)(to), (void *)(from), \
1997 (unsigned long)(n)); \
1998 if (unlikely(__ret)) { \
1999 DHD_ERROR(("%s():%d: copy_to_user failed, ret=%d\n", \
2000 __FUNCTION__, __LINE__, __ret)); \
2001 return __ret; \
2002 } \
2003 } while (0);
2004
2005 int
dhd_dbg_monitor_get_tx_pkts(dhd_pub_t * dhdp,void __user * user_buf,uint16 req_count,uint16 * resp_count)2006 dhd_dbg_monitor_get_tx_pkts(dhd_pub_t *dhdp, void __user *user_buf,
2007 uint16 req_count, uint16 *resp_count)
2008 {
2009 dhd_dbg_tx_report_t *tx_report;
2010 dhd_dbg_tx_info_t *tx_pkt;
2011 wifi_tx_report_t *ptr;
2012 compat_wifi_tx_report_t *cptr;
2013 dhd_dbg_pkt_mon_state_t tx_pkt_state;
2014 dhd_dbg_pkt_mon_state_t tx_status_state;
2015 uint16 pkt_count, count;
2016 unsigned long flags;
2017
2018 DHD_PKT_INFO(("%s, %d\n", __FUNCTION__, __LINE__));
2019 BCM_REFERENCE(ptr);
2020 BCM_REFERENCE(cptr);
2021
2022 if (!dhdp || !dhdp->dbg) {
2023 DHD_PKT_MON(("%s(): dhdp=%p, dhdp->dbg=%p\n", __FUNCTION__,
2024 dhdp, (dhdp ? dhdp->dbg : NULL)));
2025 return -EINVAL;
2026 }
2027
2028 DHD_PKT_MON_LOCK(dhdp->dbg->pkt_mon_lock, flags);
2029 tx_pkt_state = dhdp->dbg->pkt_mon.tx_pkt_state;
2030 tx_status_state = dhdp->dbg->pkt_mon.tx_status_state;
2031 if (PKT_MON_DETACHED(tx_pkt_state) || PKT_MON_DETACHED(tx_status_state)) {
2032 DHD_PKT_MON(("%s(): packet monitor is not yet enabled, "
2033 "tx_pkt_state=%d, tx_status_state=%d\n", __FUNCTION__,
2034 tx_pkt_state, tx_status_state));
2035 DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags);
2036 return -EINVAL;
2037 }
2038
2039 count = 0;
2040 tx_report = dhdp->dbg->pkt_mon.tx_report;
2041 tx_pkt = tx_report->tx_pkts;
2042 pkt_count = MIN(req_count, tx_report->status_pos);
2043
2044 #ifdef CONFIG_COMPAT
2045 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0))
2046 if (in_compat_syscall())
2047 #else
2048 if (is_compat_task())
2049 #endif
2050 {
2051 cptr = (compat_wifi_tx_report_t *)user_buf;
2052 while ((count < pkt_count) && tx_pkt && cptr) {
2053 compat_wifi_tx_report_t *comp_ptr = compat_ptr((uintptr_t) cptr);
2054 compat_dhd_dbg_pkt_info_t compat_tx_pkt;
2055 __dhd_dbg_dump_tx_pkt_info(dhdp, tx_pkt, count);
2056 __COPY_TO_USER(&comp_ptr->fate, &tx_pkt->fate, sizeof(tx_pkt->fate));
2057
2058 compat_tx_pkt.payload_type = tx_pkt->info.payload_type;
2059 compat_tx_pkt.pkt_len = tx_pkt->info.pkt_len;
2060 compat_tx_pkt.driver_ts = tx_pkt->info.driver_ts;
2061 compat_tx_pkt.firmware_ts = tx_pkt->info.firmware_ts;
2062 compat_tx_pkt.pkt_hash = tx_pkt->info.pkt_hash;
2063 __COPY_TO_USER(&comp_ptr->frame_inf.payload_type,
2064 &compat_tx_pkt.payload_type,
2065 OFFSETOF(compat_dhd_dbg_pkt_info_t, pkt_hash));
2066 __COPY_TO_USER(comp_ptr->frame_inf.frame_content.ethernet_ii,
2067 PKTDATA(dhdp->osh, tx_pkt->info.pkt), tx_pkt->info.pkt_len);
2068
2069 cptr++;
2070 tx_pkt++;
2071 count++;
2072 }
2073 } else
2074 #endif /* CONFIG_COMPAT */
2075
2076 {
2077 ptr = (wifi_tx_report_t *)user_buf;
2078 while ((count < pkt_count) && tx_pkt && ptr) {
2079 __dhd_dbg_dump_tx_pkt_info(dhdp, tx_pkt, count);
2080 __COPY_TO_USER(&ptr->fate, &tx_pkt->fate, sizeof(tx_pkt->fate));
2081 __COPY_TO_USER(&ptr->frame_inf.payload_type,
2082 &tx_pkt->info.payload_type,
2083 OFFSETOF(dhd_dbg_pkt_info_t, pkt_hash));
2084 __COPY_TO_USER(ptr->frame_inf.frame_content.ethernet_ii,
2085 PKTDATA(dhdp->osh, tx_pkt->info.pkt), tx_pkt->info.pkt_len);
2086
2087 ptr++;
2088 tx_pkt++;
2089 count++;
2090 }
2091 }
2092 *resp_count = pkt_count;
2093
2094 DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags);
2095 if (!pkt_count) {
2096 DHD_ERROR(("%s(): no tx_status in tx completion messages, "
2097 "make sure that 'd11status' is enabled in firmware, "
2098 "status_pos=%u\n", __FUNCTION__, pkt_count));
2099 }
2100
2101 return BCME_OK;
2102 }
2103
2104 int
dhd_dbg_monitor_get_rx_pkts(dhd_pub_t * dhdp,void __user * user_buf,uint16 req_count,uint16 * resp_count)2105 dhd_dbg_monitor_get_rx_pkts(dhd_pub_t *dhdp, void __user *user_buf,
2106 uint16 req_count, uint16 *resp_count)
2107 {
2108 dhd_dbg_rx_report_t *rx_report;
2109 dhd_dbg_rx_info_t *rx_pkt;
2110 wifi_rx_report_t *ptr;
2111 compat_wifi_rx_report_t *cptr;
2112 dhd_dbg_pkt_mon_state_t rx_pkt_state;
2113 uint16 pkt_count, count;
2114 unsigned long flags;
2115
2116 DHD_PKT_INFO(("%s, %d\n", __FUNCTION__, __LINE__));
2117 BCM_REFERENCE(ptr);
2118 BCM_REFERENCE(cptr);
2119
2120 if (!dhdp || !dhdp->dbg) {
2121 DHD_PKT_MON(("%s(): dhdp=%p, dhdp->dbg=%p\n", __FUNCTION__,
2122 dhdp, (dhdp ? dhdp->dbg : NULL)));
2123 return -EINVAL;
2124 }
2125
2126 DHD_PKT_MON_LOCK(dhdp->dbg->pkt_mon_lock, flags);
2127 rx_pkt_state = dhdp->dbg->pkt_mon.rx_pkt_state;
2128 if (PKT_MON_DETACHED(rx_pkt_state)) {
2129 DHD_PKT_MON(("%s(): packet fetch is not allowed , "
2130 "rx_pkt_state=%d\n", __FUNCTION__, rx_pkt_state));
2131 DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags);
2132 return -EINVAL;
2133 }
2134
2135 count = 0;
2136 rx_report = dhdp->dbg->pkt_mon.rx_report;
2137 rx_pkt = rx_report->rx_pkts;
2138 pkt_count = MIN(req_count, rx_report->pkt_pos);
2139
2140 #ifdef CONFIG_COMPAT
2141 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0))
2142 if (in_compat_syscall())
2143 #else
2144 if (is_compat_task())
2145 #endif
2146 {
2147 cptr = (compat_wifi_rx_report_t *)user_buf;
2148 while ((count < pkt_count) && rx_pkt && cptr) {
2149 compat_wifi_rx_report_t *comp_ptr = compat_ptr((uintptr_t) cptr);
2150 compat_dhd_dbg_pkt_info_t compat_rx_pkt;
2151 __dhd_dbg_dump_rx_pkt_info(dhdp, rx_pkt, count);
2152 __COPY_TO_USER(&comp_ptr->fate, &rx_pkt->fate, sizeof(rx_pkt->fate));
2153
2154 compat_rx_pkt.payload_type = rx_pkt->info.payload_type;
2155 compat_rx_pkt.pkt_len = rx_pkt->info.pkt_len;
2156 compat_rx_pkt.driver_ts = rx_pkt->info.driver_ts;
2157 compat_rx_pkt.firmware_ts = rx_pkt->info.firmware_ts;
2158 compat_rx_pkt.pkt_hash = rx_pkt->info.pkt_hash;
2159 __COPY_TO_USER(&comp_ptr->frame_inf.payload_type,
2160 &compat_rx_pkt.payload_type,
2161 OFFSETOF(compat_dhd_dbg_pkt_info_t, pkt_hash));
2162 __COPY_TO_USER(comp_ptr->frame_inf.frame_content.ethernet_ii,
2163 PKTDATA(dhdp->osh, rx_pkt->info.pkt), rx_pkt->info.pkt_len);
2164
2165 cptr++;
2166 rx_pkt++;
2167 count++;
2168 }
2169 } else
2170 #endif /* CONFIG_COMPAT */
2171 {
2172 ptr = (wifi_rx_report_t *)user_buf;
2173 while ((count < pkt_count) && rx_pkt && ptr) {
2174 __dhd_dbg_dump_rx_pkt_info(dhdp, rx_pkt, count);
2175
2176 __COPY_TO_USER(&ptr->fate, &rx_pkt->fate, sizeof(rx_pkt->fate));
2177 __COPY_TO_USER(&ptr->frame_inf.payload_type,
2178 &rx_pkt->info.payload_type,
2179 OFFSETOF(dhd_dbg_pkt_info_t, pkt_hash));
2180 __COPY_TO_USER(ptr->frame_inf.frame_content.ethernet_ii,
2181 PKTDATA(dhdp->osh, rx_pkt->info.pkt), rx_pkt->info.pkt_len);
2182
2183 ptr++;
2184 rx_pkt++;
2185 count++;
2186 }
2187 }
2188
2189 *resp_count = pkt_count;
2190 DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags);
2191
2192 return BCME_OK;
2193 }
2194
2195 int
dhd_dbg_detach_pkt_monitor(dhd_pub_t * dhdp)2196 dhd_dbg_detach_pkt_monitor(dhd_pub_t *dhdp)
2197 {
2198 dhd_dbg_tx_report_t *tx_report;
2199 dhd_dbg_rx_report_t *rx_report;
2200 dhd_dbg_pkt_mon_state_t tx_pkt_state;
2201 dhd_dbg_pkt_mon_state_t tx_status_state;
2202 dhd_dbg_pkt_mon_state_t rx_pkt_state;
2203 unsigned long flags;
2204
2205 DHD_PKT_INFO(("%s, %d\n", __FUNCTION__, __LINE__));
2206 if (!dhdp || !dhdp->dbg) {
2207 DHD_PKT_MON(("%s(): dhdp=%p, dhdp->dbg=%p\n", __FUNCTION__,
2208 dhdp, (dhdp ? dhdp->dbg : NULL)));
2209 return -EINVAL;
2210 }
2211
2212 DHD_PKT_MON_LOCK(dhdp->dbg->pkt_mon_lock, flags);
2213 tx_pkt_state = dhdp->dbg->pkt_mon.tx_pkt_state;
2214 tx_status_state = dhdp->dbg->pkt_mon.tx_status_state;
2215 rx_pkt_state = dhdp->dbg->pkt_mon.rx_pkt_state;
2216
2217 if (PKT_MON_DETACHED(tx_pkt_state) || PKT_MON_DETACHED(tx_status_state) ||
2218 PKT_MON_DETACHED(rx_pkt_state)) {
2219 DHD_PKT_MON(("%s(): packet monitor is already detached, "
2220 "tx_pkt_state=%d, tx_status_state=%d, rx_pkt_state=%d\n",
2221 __FUNCTION__, tx_pkt_state, tx_status_state, rx_pkt_state));
2222 DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags);
2223 return -EINVAL;
2224 }
2225
2226 tx_report = dhdp->dbg->pkt_mon.tx_report;
2227 rx_report = dhdp->dbg->pkt_mon.rx_report;
2228
2229 /* free and de-initalize tx packet monitoring */
2230 dhdp->dbg->pkt_mon.tx_pkt_state = PKT_MON_DETACHED;
2231 dhdp->dbg->pkt_mon.tx_status_state = PKT_MON_DETACHED;
2232 if (tx_report) {
2233 if (tx_report->tx_pkts) {
2234 __dhd_dbg_free_tx_pkts(dhdp, tx_report->tx_pkts,
2235 tx_report->pkt_pos);
2236 kfree(tx_report->tx_pkts);
2237 dhdp->dbg->pkt_mon.tx_report->tx_pkts = NULL;
2238 }
2239 kfree(tx_report);
2240 dhdp->dbg->pkt_mon.tx_report = NULL;
2241 }
2242 dhdp->dbg->pkt_mon.tx_pkt_mon = NULL;
2243 dhdp->dbg->pkt_mon.tx_status_mon = NULL;
2244
2245 /* free and de-initalize rx packet monitoring */
2246 dhdp->dbg->pkt_mon.rx_pkt_state = PKT_MON_DETACHED;
2247 if (rx_report) {
2248 if (rx_report->rx_pkts) {
2249 __dhd_dbg_free_rx_pkts(dhdp, rx_report->rx_pkts,
2250 rx_report->pkt_pos);
2251 kfree(rx_report->rx_pkts);
2252 dhdp->dbg->pkt_mon.rx_report->rx_pkts = NULL;
2253 }
2254 kfree(rx_report);
2255 dhdp->dbg->pkt_mon.rx_report = NULL;
2256 }
2257 dhdp->dbg->pkt_mon.rx_pkt_mon = NULL;
2258
2259 DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags);
2260 DHD_PKT_MON(("%s(): packet monitor detach succeeded\n", __FUNCTION__));
2261 return BCME_OK;
2262 }
2263 #endif /* DBG_PKT_MON */
2264
2265 /*
2266 * dhd_dbg_attach: initialziation of dhd dbugability module
2267 *
2268 * Return: An error code or 0 on success.
2269 */
2270 int
dhd_dbg_attach(dhd_pub_t * dhdp,dbg_pullreq_t os_pullreq,dbg_urgent_noti_t os_urgent_notifier,void * os_priv)2271 dhd_dbg_attach(dhd_pub_t *dhdp, dbg_pullreq_t os_pullreq,
2272 dbg_urgent_noti_t os_urgent_notifier, void *os_priv)
2273 {
2274 dhd_dbg_t *dbg;
2275 int ret, ring_id;
2276
2277 dbg = MALLOCZ(dhdp->osh, sizeof(dhd_dbg_t));
2278 if (!dbg)
2279 return BCME_NOMEM;
2280
2281 ret = dhd_dbg_ring_init(dhdp, &dbg->dbg_rings[FW_VERBOSE_RING_ID], FW_VERBOSE_RING_ID,
2282 (uint8 *)FW_VERBOSE_RING_NAME, FW_VERBOSE_RING_SIZE, DHD_PREALLOC_FW_VERBOSE_RING);
2283 if (ret)
2284 goto error;
2285
2286 ret = dhd_dbg_ring_init(dhdp, &dbg->dbg_rings[FW_EVENT_RING_ID], FW_EVENT_RING_ID,
2287 (uint8 *)FW_EVENT_RING_NAME, FW_EVENT_RING_SIZE, DHD_PREALLOC_FW_EVENT_RING);
2288 if (ret)
2289 goto error;
2290
2291 ret = dhd_dbg_ring_init(dhdp, &dbg->dbg_rings[DHD_EVENT_RING_ID], DHD_EVENT_RING_ID,
2292 (uint8 *)DHD_EVENT_RING_NAME, DHD_EVENT_RING_SIZE, DHD_PREALLOC_DHD_EVENT_RING);
2293 if (ret)
2294 goto error;
2295
2296 ret = dhd_dbg_ring_init(dhdp, &dbg->dbg_rings[NAN_EVENT_RING_ID], NAN_EVENT_RING_ID,
2297 (uint8 *)NAN_EVENT_RING_NAME, NAN_EVENT_RING_SIZE, DHD_PREALLOC_NAN_EVENT_RING);
2298 if (ret)
2299 goto error;
2300
2301 dbg->private = os_priv;
2302 dbg->pullreq = os_pullreq;
2303 dbg->urgent_notifier = os_urgent_notifier;
2304 dhdp->dbg = dbg;
2305
2306 return BCME_OK;
2307
2308 error:
2309 for (ring_id = DEBUG_RING_ID_INVALID + 1; ring_id < DEBUG_RING_ID_MAX; ring_id++) {
2310 if (VALID_RING(dbg->dbg_rings[ring_id].id)) {
2311 dhd_dbg_ring_deinit(dhdp, &dbg->dbg_rings[ring_id]);
2312 }
2313 }
2314 MFREE(dhdp->osh, dhdp->dbg, sizeof(dhd_dbg_t));
2315
2316 return ret;
2317 }
2318
2319 /*
2320 * dhd_dbg_detach: clean up dhd dbugability module
2321 */
2322 void
dhd_dbg_detach(dhd_pub_t * dhdp)2323 dhd_dbg_detach(dhd_pub_t *dhdp)
2324 {
2325 int ring_id;
2326 dhd_dbg_t *dbg;
2327 if (!dhdp->dbg)
2328 return;
2329 dbg = dhdp->dbg;
2330 for (ring_id = DEBUG_RING_ID_INVALID + 1; ring_id < DEBUG_RING_ID_MAX; ring_id++) {
2331 if (VALID_RING(dbg->dbg_rings[ring_id].id)) {
2332 dhd_dbg_ring_deinit(dhdp, &dbg->dbg_rings[ring_id]);
2333 }
2334 }
2335 MFREE(dhdp->osh, dhdp->dbg, sizeof(dhd_dbg_t));
2336 }
2337