Lines Matching +full:refclk +full:- +full:dig
2 * Broadcom Dongle Host Driver (DHD), Linux-specific network interface
3 * Basically selected code segments from usb-cdc.c and usb-rndis.c
7 * Copyright (C) 1999-2017, Broadcom Corporation
28 * <<Broadcom-WL-IPTag/Open:>>
30 * $Id: dhd_linux.c 702611 2017-06-02 06:40:15Z $
121 #include <linux/exynos-pci-ctrl.h>
271 #define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen)
273 #define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen + 128)
517 /* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */
668 * there is a constant delay of 45us between CLKREQ# and stable REFCLK
670 * there is a chance of the refclk sense to trigger on noise.
728 #define DHD_IF_ROLE(pub, idx) ((pub)->info->iflist[idx]->role)
737 dhdp->info->iflist[ifidx]->role = role; in dhd_set_role()
752 #define DHD_IF_STA_LIST_LOCK_INIT(ifp) spin_lock_init(&(ifp)->sta_list_lock)
754 spin_lock_irqsave(&(ifp)->sta_list_lock, (flags))
756 spin_unlock_irqrestore(&(ifp)->sta_list_lock, (flags))
849 /* Use in dongle supplicant for 4-way handshake */
934 #pragma GCC diagnostic ignored "-Wcast-qual" in dhd_pm_callback()
958 DHD_OS_WAKE_LOCK_WAIVE(&dhdinfo->pub); in dhd_pm_callback()
959 dhd_wlfc_suspend(&dhdinfo->pub); in dhd_pm_callback()
960 DHD_OS_WAKE_LOCK_RESTORE(&dhdinfo->pub); in dhd_pm_callback()
962 dhd_wlfc_resume(&dhdinfo->pub); in dhd_pm_callback()
1001 #define DHD_DEV_INFO(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->dhd)
1002 #define DHD_DEV_IFP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifp)
1003 #define DHD_DEV_IFIDX(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifidx)
1004 #define DHD_DEV_LKUP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->lkup)
1016 dev_priv->dhd = (dhd_info_t *)NULL; in dhd_dev_priv_clear()
1017 dev_priv->ifp = (dhd_if_t *)NULL; in dhd_dev_priv_clear()
1018 dev_priv->ifidx = DHD_BAD_IF; in dhd_dev_priv_clear()
1019 dev_priv->lkup = (void *)NULL; in dhd_dev_priv_clear()
1030 dev_priv->dhd = dhd; in dhd_dev_priv_save()
1031 dev_priv->ifp = ifp; in dhd_dev_priv_save()
1032 dev_priv->ifidx = ifidx; in dhd_dev_priv_save()
1040 if (!dhdp || !dhdp->info || ifidx >= DHD_MAX_IFS) in dhd_get_ifp()
1043 return dhdp->info->iflist[ifidx]; in dhd_get_ifp()
1099 /* Clear the pool of dhd_sta_t objects for built-in type driver */
1110 ASSERT((sta != DHD_STA_NULL) && (sta->idx != ID16_INVALID)); in dhd_sta_free()
1112 ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL)); in dhd_sta_free()
1120 uint16 flowid = sta->flowid[prio]; in dhd_sta_free()
1135 flow_queue_t *queue = &flow_ring_node->queue; in dhd_sta_free()
1137 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); in dhd_sta_free()
1138 flow_ring_node->status = FLOW_RING_STATUS_STA_FREEING; in dhd_sta_free()
1144 PKTFREE(dhdp->osh, pkt, TRUE); in dhd_sta_free()
1148 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); in dhd_sta_free()
1153 sta->flowid[prio] = FLOWID_INVALID; in dhd_sta_free()
1157 id16_map_free(dhdp->staid_allocator, sta->idx); in dhd_sta_free()
1158 DHD_CUMM_CTR_INIT(&sta->cumm_ctr); in dhd_sta_free()
1159 sta->ifp = DHD_IF_NULL; /* dummy dhd_if object */ in dhd_sta_free()
1160 sta->ifidx = DHD_BAD_IF; in dhd_sta_free()
1161 bzero(sta->ea.octet, ETHER_ADDR_LEN); in dhd_sta_free()
1162 INIT_LIST_HEAD(&sta->list); in dhd_sta_free()
1163 sta->idx = ID16_INVALID; /* implying free */ in dhd_sta_free()
1174 ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL)); in dhd_sta_alloc()
1176 idx = id16_map_alloc(dhdp->staid_allocator); in dhd_sta_alloc()
1182 sta_pool = (dhd_sta_pool_t *)(dhdp->sta_pool); in dhd_sta_alloc()
1185 ASSERT((sta->idx == ID16_INVALID) && in dhd_sta_alloc()
1186 (sta->ifp == DHD_IF_NULL) && (sta->ifidx == DHD_BAD_IF)); in dhd_sta_alloc()
1188 DHD_CUMM_CTR_INIT(&sta->cumm_ctr); in dhd_sta_alloc()
1190 sta->idx = idx; /* implying allocated */ in dhd_sta_alloc()
1205 #pragma GCC diagnostic ignored "-Wcast-qual" in dhd_if_del_sta_list()
1207 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) { in dhd_if_del_sta_list()
1208 list_del(&sta->list); in dhd_if_del_sta_list()
1209 dhd_sta_free(&ifp->info->pub, sta); in dhd_if_del_sta_list()
1238 ASSERT((dhdp->staid_allocator == NULL) && (dhdp->sta_pool == NULL)); in dhd_sta_pool_init()
1241 staid_allocator = id16_map_init(dhdp->osh, max_sta, 1); in dhd_sta_pool_init()
1249 sta_pool = (dhd_sta_pool_t *)MALLOC(dhdp->osh, sta_pool_memsz); in dhd_sta_pool_init()
1252 id16_map_fini(dhdp->osh, staid_allocator); in dhd_sta_pool_init()
1256 dhdp->sta_pool = sta_pool; in dhd_sta_pool_init()
1257 dhdp->staid_allocator = staid_allocator; in dhd_sta_pool_init()
1259 /* Initialize all sta(s) for the pre-allocated free pool. */ in dhd_sta_pool_init()
1261 for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */ in dhd_sta_pool_init()
1263 sta->idx = id16_map_alloc(staid_allocator); in dhd_sta_pool_init()
1264 ASSERT(sta->idx <= max_sta); in dhd_sta_pool_init()
1267 /* Now place them into the pre-allocated free pool. */ in dhd_sta_pool_init()
1272 sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */ in dhd_sta_pool_init()
1287 dhd_sta_pool_t * sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool; in dhd_sta_pool_fini()
1296 MFREE(dhdp->osh, dhdp->sta_pool, sta_pool_memsz); in dhd_sta_pool_fini()
1297 dhdp->sta_pool = NULL; in dhd_sta_pool_fini()
1300 id16_map_fini(dhdp->osh, dhdp->staid_allocator); in dhd_sta_pool_fini()
1301 dhdp->staid_allocator = NULL; in dhd_sta_pool_fini()
1304 /* Clear the pool of dhd_sta_t objects for built-in type driver */
1321 sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool; in dhd_sta_pool_clear()
1322 staid_allocator = dhdp->staid_allocator; in dhd_sta_pool_clear()
1341 /* Initialize all sta(s) for the pre-allocated free pool. */ in dhd_sta_pool_clear()
1342 for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */ in dhd_sta_pool_clear()
1344 sta->idx = id16_map_alloc(staid_allocator); in dhd_sta_pool_clear()
1345 ASSERT(sta->idx <= max_sta); in dhd_sta_pool_clear()
1347 /* Now place them into the pre-allocated free pool. */ in dhd_sta_pool_clear()
1352 sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */ in dhd_sta_pool_clear()
1375 #pragma GCC diagnostic ignored "-Wcast-qual" in dhd_find_sta()
1377 list_for_each_entry(sta, &ifp->sta_list, list) { in dhd_find_sta()
1378 if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) { in dhd_find_sta()
1406 if (!memcmp(ifp->net->dev_addr, ea, ETHER_ADDR_LEN)) { in dhd_add_sta()
1417 memcpy(sta->ea.octet, ea, ETHER_ADDR_LEN); in dhd_add_sta()
1420 sta->ifp = ifp; in dhd_add_sta()
1421 sta->ifidx = ifidx; in dhd_add_sta()
1422 INIT_LIST_HEAD(&sta->list); in dhd_add_sta()
1426 list_add_tail(&sta->list, &ifp->sta_list); in dhd_add_sta()
1451 #pragma GCC diagnostic ignored "-Wcast-qual" in dhd_del_all_sta()
1453 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) { in dhd_del_all_sta()
1455 list_del(&sta->list); in dhd_del_all_sta()
1456 dhd_sta_free(&ifp->info->pub, sta); in dhd_del_all_sta()
1458 if (ifp->parp_enable) { in dhd_del_all_sta()
1460 bcm_l2_filter_arp_table_update(((dhd_pub_t*)pub)->osh, in dhd_del_all_sta()
1461 ifp->phnd_arp_table, FALSE, in dhd_del_all_sta()
1462 sta->ea.octet, FALSE, ((dhd_pub_t*)pub)->tickcnt); in dhd_del_all_sta()
1490 #pragma GCC diagnostic ignored "-Wcast-qual" in dhd_del_sta()
1492 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) { in dhd_del_sta()
1493 if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) { in dhd_del_sta()
1495 __FUNCTION__, MAC2STRDBG(sta->ea.octet))); in dhd_del_sta()
1496 list_del(&sta->list); in dhd_del_sta()
1497 dhd_sta_free(&ifp->info->pub, sta); in dhd_del_sta()
1505 if (ifp->parp_enable) { in dhd_del_sta()
1507 bcm_l2_filter_arp_table_update(((dhd_pub_t*)pub)->osh, ifp->phnd_arp_table, FALSE, in dhd_del_sta()
1508 ea, FALSE, ((dhd_pub_t*)pub)->tickcnt); in dhd_del_sta()
1541 list_for_each_entry(sta, &ifp->sta_list, list) { in dhd_sta_list_snapshot()
1543 snapshot = (dhd_sta_t *)MALLOC(dhd->pub.osh, sizeof(dhd_sta_t)); in dhd_sta_list_snapshot()
1549 memcpy(snapshot->ea.octet, sta->ea.octet, ETHER_ADDR_LEN); in dhd_sta_list_snapshot()
1551 INIT_LIST_HEAD(&snapshot->list); in dhd_sta_list_snapshot()
1552 list_add_tail(&snapshot->list, snapshot_list); in dhd_sta_list_snapshot()
1566 list_del(&sta->list); in dhd_sta_list_snapshot_free()
1567 MFREE(dhd->pub.osh, sta, sizeof(dhd_sta_t)); in dhd_sta_list_snapshot_free()
1576 dhd_info_t *dhd = dhdp->info; in dhd_axi_error_dispatch()
1577 schedule_work(&dhd->axi_error_dispatcher_work); in dhd_axi_error_dispatch()
1584 dhd_axi_error(&dhd->pub); in dhd_axi_error_dispatcher_fn()
1592 dhd_info_t *dhd = dhdp->info; in dhd_bssidx2idx()
1599 ifp = dhd->iflist[i]; in dhd_bssidx2idx()
1600 if (ifp && (ifp->bssidx == bssidx)) { in dhd_bssidx2idx()
1602 ifp->name, bssidx, i)); in dhd_bssidx2idx()
1620 store_idx = dhdp->store_idx; in dhd_rxf_enqueue()
1621 sent_idx = dhdp->sent_idx; in dhd_rxf_enqueue()
1622 if (dhdp->skbbuf[store_idx] != NULL) { in dhd_rxf_enqueue()
1635 DHD_TRACE(("dhd_rxf_enqueue: Store SKB %p. idx %d -> %d\n", in dhd_rxf_enqueue()
1636 skb, store_idx, (store_idx + 1) & (MAXSKBPEND - 1))); in dhd_rxf_enqueue()
1637 dhdp->skbbuf[store_idx] = skb; in dhd_rxf_enqueue()
1638 dhdp->store_idx = (store_idx + 1) & (MAXSKBPEND - 1); in dhd_rxf_enqueue()
1652 store_idx = dhdp->store_idx; in dhd_rxf_dequeue()
1653 sent_idx = dhdp->sent_idx; in dhd_rxf_dequeue()
1654 skb = dhdp->skbbuf[sent_idx]; in dhd_rxf_dequeue()
1663 dhdp->skbbuf[sent_idx] = NULL; in dhd_rxf_dequeue()
1664 dhdp->sent_idx = (sent_idx + 1) & (MAXSKBPEND - 1); in dhd_rxf_dequeue()
1682 dhd_write_macaddr(&dhdp->mac); in dhd_process_cid_mac()
1728 __FUNCTION__, dhdp->dhd_bus_busy_state)); in dhd_wait_for_file_dump()
1730 &dhdp->dhd_bus_busy_state, DHD_BUS_BUSY_IN_HALDUMP, 0); in dhd_wait_for_file_dump()
1731 if ((dhdp->dhd_bus_busy_state & DHD_BUS_BUSY_IN_HALDUMP) != 0) { in dhd_wait_for_file_dump()
1733 __FUNCTION__, dhdp->dhd_bus_busy_state)); in dhd_wait_for_file_dump()
1780 for (i = 0; i < dhd->pktfilter_count; i++) { in dhd_set_packet_filter()
1781 dhd_pktfilter_offload_set(dhd, dhd->pktfilter[i]); in dhd_set_packet_filter()
1792 if ((dhd->op_mode & DHD_FLAG_HOSTAP_MODE) && value) { in dhd_enable_packet_filter()
1796 /* 1 - Enable packet filter, only allow unicast packet to send up */ in dhd_enable_packet_filter()
1797 /* 0 - Disable packet filter */ in dhd_enable_packet_filter()
1799 (dhd_support_sta_mode(dhd) && !dhd->dhcp_in_progress))) in dhd_enable_packet_filter()
1801 for (i = 0; i < dhd->pktfilter_count; i++) { in dhd_enable_packet_filter()
1804 !_turn_on_arp_filter(dhd, dhd->op_mode)) { in dhd_enable_packet_filter()
1807 value, i, dhd->op_mode)); in dhd_enable_packet_filter()
1811 dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i], in dhd_enable_packet_filter()
1831 if (dhdp->pktfilter[num] != NULL) { in dhd_packet_filter_add_remove()
1833 dhdp->pktfilter[num] = NULL; in dhd_packet_filter_add_remove()
1846 if (dhdp->pktfilter[num] != NULL) { in dhd_packet_filter_add_remove()
1848 dhdp->pktfilter[num] = NULL; in dhd_packet_filter_add_remove()
1872 return -EINVAL; in dhd_packet_filter_add_remove()
1877 dhdp->pktfilter[num] = filterp; in dhd_packet_filter_add_remove()
1878 dhd_pktfilter_offload_set(dhdp, dhdp->pktfilter[num]); in dhd_packet_filter_add_remove()
1880 if (dhdp->pktfilter[num]) { in dhd_packet_filter_add_remove()
1882 dhdp->pktfilter[num] = NULL; in dhd_packet_filter_add_remove()
1944 return -ENODEV;
1947 dhdinfo = dhd->info;
1951 __FUNCTION__, value, dhd->in_suspend));
1960 if (dhd->up) {
1961 if (value && dhd->in_suspend) {
1963 dhd->early_suspended = 1;
1989 if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net) {
2008 if (dhd->tdls_mode) {
2038 * MIN(max_roam_threshold, bcn_timeout -1);
2118 if (dhd->ndo_enable) {
2119 if (!dhd->ndo_host_ip_overflow) {
2175 if (!dhd->sroamed) {
2182 dhd->sroamed = FALSE;
2187 dhd->early_suspended = 0;
2224 if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net)
2236 /* restore pre-suspend setting */
2260 /* restore pre-suspend setting for dtim_skip */
2305 if (dhd->ndo_enable) {
2364 dhd_irq_set_affinity(dhd, dhd->info->cpumask_primary);
2375 dhd_pub_t *dhdp = &dhd->pub;
2382 dhdp->in_suspend = val;
2383 if ((force || !dhdp->suspend_disable_flag) &&
2415 * Generalized timeout mechanism. Uses spin sleep with exponential back-off until
2429 tmo->limit = usec;
2430 tmo->increment = 0;
2431 tmo->elapsed = 0;
2432 tmo->tick = jiffies_to_usecs(1);
2439 if (tmo->increment == 0) {
2440 tmo->increment = 1;
2444 if (tmo->elapsed >= tmo->limit)
2448 tmo->elapsed += tmo->increment;
2450 if ((!CAN_SLEEP()) || tmo->increment < tmo->tick) {
2451 OSL_DELAY(tmo->increment);
2452 tmo->increment *= 2;
2453 if (tmo->increment > tmo->tick)
2454 tmo->increment = tmo->tick;
2457 * OSL_SLEEP() is corresponding to usleep_range(). In non-atomic
2479 if (dhd->iflist[i] && dhd->iflist[i]->net && (dhd->iflist[i]->net == net))
2494 dhd_info = dhd_pub->info;
2495 if (dhd_info && dhd_info->iflist[ifidx])
2496 return dhd_info->iflist[ifidx]->net;
2510 while (--i > 0)
2511 if (dhd->iflist[i] && !strncmp(dhd->iflist[i]->dngl_name, name, IFNAMSIZ))
2516 return i; /* default - the primary interface */
2522 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
2531 if (dhd->iflist[ifidx] == NULL) {
2536 if (dhd->iflist[ifidx]->net)
2537 return dhd->iflist[ifidx]->net->name;
2550 if (dhd->iflist[i] && dhd->iflist[i]->bssidx == idx)
2551 return dhd->iflist[i]->mac_addr;
2575 if (dhd->iflist[i]) {
2576 dev = dhd->iflist[i]->net;
2585 allmulti |= (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
2589 if (!dhd->iflist[ifidx]) {
2590 DHD_ERROR(("%s : dhd->iflist[%d] was NULL\n", __FUNCTION__, ifidx));
2593 dev = dhd->iflist[ifidx]->net;
2601 allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
2606 if (!dhd->pub.early_suspended)
2614 if (!(bufp = buf = MALLOC(dhd->pub.osh, buflen))) {
2616 dhd_ifname(&dhd->pub, ifidx), cnt));
2620 strncpy(bufp, "mcast_list", buflen - 1);
2621 bufp[buflen - 1] = '\0';
2630 if (dhd->iflist[i]) {
2632 dev = dhd->iflist[i]->net;
2637 #pragma GCC diagnostic ignored "-Wcast-qual"
2645 memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
2649 cnt_iface[i], MAC2STRDBG(ha->addr)));
2650 cnt_iface[i]--;
2659 #pragma GCC diagnostic ignored "-Wcast-qual"
2664 memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
2666 cnt--;
2680 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
2683 dhd_ifname(&dhd->pub, ifidx), cnt));
2687 MFREE(dhd->pub.osh, buf, buflen);
2695 ret = dhd_iovar(&dhd->pub, ifidx, "allmulti", (char *)&allmulti,
2699 dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
2707 if (dhd->iflist[i]) {
2708 dev = dhd->iflist[i]->net;
2709 allmulti |= (dev->flags & IFF_PROMISC) ? TRUE : FALSE;
2713 allmulti = (dev->flags & IFF_PROMISC) ? TRUE : FALSE;
2724 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
2727 dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
2736 ret = dhd_iovar(&dhd->pub, ifidx, "cur_etheraddr", (char *)addr,
2739 DHD_ERROR(("%s: set cur_etheraddr failed\n", dhd_ifname(&dhd->pub, ifidx)));
2741 memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETHER_ADDR_LEN);
2743 memcpy(dhd->pub.mac.octet, addr, ETHER_ADDR_LEN);
2758 dhd_info_t *dhd = dhdp->info;
2759 return (int)dhd->psta_mode;
2764 dhd_info_t *dhd = dhdp->info;
2765 dhd->psta_mode = val;
2774 dhd_info_t *dhd = dhdp->info;
2779 ifp = dhd->iflist[idx];
2783 (ifp->block_ping) ||
2786 (dhd->wet_mode) ||
2789 (ifp->mcast_regen_bss_enable) ||
2792 ifp->rx_pkt_chainable = FALSE;
2801 dhd_info_t *dhd = dhdp->info;
2802 return (int)dhd->wet_mode;
2808 dhd_info_t *dhd = dhdp->info;
2809 dhd->wet_mode = val;
2868 DHD_OS_WAKE_LOCK(&dhd->pub);
2869 DHD_PERIM_LOCK(&dhd->pub);
2871 ifidx = if_event->event.ifidx;
2872 bssidx = if_event->event.bssidx;
2876 if (if_event->event.ifidx > 0) {
2881 info.role = if_event->event.role;
2882 strncpy(info.name, if_event->name, IFNAMSIZ);
2883 if (is_valid_ether_addr(if_event->mac)) {
2884 mac_addr = if_event->mac;
2889 if (wl_cfg80211_post_ifcreate(dhd->pub.info->iflist[0]->net,
2897 /* This path is for non-android case */
2900 ndev = dhd_allocate_if(&dhd->pub, ifidx, if_event->name,
2901 if_event->mac, bssidx, TRUE, if_event->name);
2907 DHD_PERIM_UNLOCK(&dhd->pub);
2908 ret = dhd_register_if(&dhd->pub, ifidx, TRUE);
2909 DHD_PERIM_LOCK(&dhd->pub);
2912 dhd_remove_if(&dhd->pub, ifidx, TRUE);
2919 if (FW_SUPPORTED((&dhd->pub), ap) && (if_event->event.role != WLC_E_IF_ROLE_STA)) {
2921 ret = dhd_iovar(&dhd->pub, ifidx, "ap_isolate", (char *)&var_int, sizeof(var_int),
2925 dhd_remove_if(&dhd->pub, ifidx, TRUE);
2931 MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
2933 DHD_PERIM_UNLOCK(&dhd->pub);
2934 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2961 DHD_OS_WAKE_LOCK(&dhd->pub);
2962 DHD_PERIM_LOCK(&dhd->pub);
2964 ifidx = if_event->event.ifidx;
2967 DHD_PERIM_UNLOCK(&dhd->pub);
2968 if (!dhd->pub.info->iflist[ifidx]) {
2974 if (if_event->event.ifidx > 0) {
2976 if (wl_cfg80211_post_ifdel(dhd->pub.info->iflist[ifidx]->net,
2977 true, if_event->event.ifidx) != 0) {
2983 /* For non-cfg80211 drivers */
2984 dhd_remove_if(&dhd->pub, ifidx, TRUE);
2988 DHD_PERIM_LOCK(&dhd->pub);
2989 MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
2990 DHD_PERIM_UNLOCK(&dhd->pub);
2991 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3012 DHD_OS_WAKE_LOCK(&dhd->pub);
3013 DHD_PERIM_LOCK(&dhd->pub);
3019 DHD_GENERAL_LOCK(&dhd->pub, flags);
3021 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3025 ifp->net->name));
3031 if (ifp == NULL || !dhd->pub.up) {
3037 ifp->set_macaddress = FALSE;
3038 if (_dhd_set_mac_address(dhd, ifp->idx, ifp->mac_addr) == 0)
3044 DHD_PERIM_UNLOCK(&dhd->pub);
3045 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3068 DHD_OS_WAKE_LOCK(&dhd->pub);
3069 DHD_PERIM_LOCK(&dhd->pub);
3071 ifp = dhd->iflist[ifidx];
3073 if (ifp == NULL || !dhd->pub.up) {
3082 DHD_GENERAL_LOCK(&dhd->pub, flags);
3084 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3088 ifp->net->name));
3089 ifp->set_multicast = FALSE;
3095 ifidx = ifp->idx;
3105 DHD_PERIM_UNLOCK(&dhd->pub);
3106 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3122 return -1;
3124 dhdif = dhd->iflist[ifidx];
3127 memcpy(dhdif->mac_addr, sa->sa_data, ETHER_ADDR_LEN);
3128 dhdif->set_macaddress = TRUE;
3132 ret = _dhd_set_mac_address(dhd, ifidx, dhdif->mac_addr);
3133 dhdif->set_macaddress = FALSE;
3135 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhdif, DHD_WQ_WORK_SET_MAC,
3151 dhd->iflist[ifidx]->set_multicast = TRUE;
3152 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)((long int)ifidx),
3161 dhd_info_t *dhd = dhdp->info;
3162 return dhd->uc_path;
3170 dhd_info_t *di = (dhd_info_t *)(pub->info);
3172 spin_lock_bh(&di->wlfc_spinlock);
3179 dhd_info_t *di = (dhd_info_t *)(pub->info);
3182 spin_unlock_bh(&di->wlfc_spinlock);
3198 skb = PKTTONATIVE(dhdp->osh, p);
3200 ifp = dhdp->info->iflist[ifidx];
3201 skb->dev = ifp->net;
3203 skb->protocol = eth_type_trans(skb, skb->dev);
3210 if (dhdp->info->rxthread_enabled) {
3214 PKTSETNEXT(dhdp->osh, skbprev, skb);
3230 if (dhdp->info->rxthread_enabled && skbhead)
3240 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
3250 if (!dhdp->up || (dhdp->busstate == DHD_BUS_DOWN)) {
3252 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3253 return -ENODEV;
3257 if (dhdp->busstate == DHD_BUS_SUSPEND) {
3259 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3265 if (PKTLEN(dhdp->osh, pktbuf) > MAX_MTU_SZ) {
3267 dhdp->tx_big_packets++;
3268 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3275 if (ifp->dhcp_unicast) {
3279 ret = bcm_l2_filter_get_mac_addr_dhcp_pkt(dhdp->osh, pktbuf, ifidx, &mac_addr);
3285 ehptr = PKTDATA(dhdp->osh, pktbuf);
3291 if (ifp->grat_arp && DHD_IF_ROLE_AP(dhdp, ifidx)) {
3292 if (bcm_l2_filter_gratuitous_arp(dhdp->osh, pktbuf) == BCME_OK) {
3293 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3298 if (ifp->parp_enable && DHD_IF_ROLE_AP(dhdp, ifidx)) {
3305 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3311 if (PKTLEN(dhdp->osh, pktbuf) >= ETHER_HDR_LEN) {
3312 uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
3315 if (ETHER_ISMULTI(eh->ether_dhost))
3316 dhdp->tx_multicast++;
3317 if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X) {
3322 dhdp->prio_8021x = prio;
3326 atomic_inc(&dhd->pend_8021x_cnt);
3329 pktdata, PKTLEN(dhdp->osh, pktbuf), TRUE);
3333 (uint32)PKTLEN(dhdp->osh, pktbuf), TRUE, NULL, NULL);
3335 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3383 pkt_flow_prio = dhdp->flow_prio_map[(PKTPRIO(pktbuf))];
3388 if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X) {
3389 atomic_dec(&dhd->pend_8021x_cnt);
3391 PKTCFREE(dhd->pub.osh, pktbuf, TRUE);
3402 DHD_PKTTAG_SETDSTN(PKTTAG(pktbuf), eh->ether_dhost);
3405 if (ETHER_ISMULTI(eh->ether_dhost))
3421 dhdp->bus, pktbuf, TRUE) == WLFC_UNSUPPORTED) {
3422 /* non-proptxstatus way */
3424 ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
3426 ret = dhd_bus_txdata(dhdp->bus, pktbuf);
3432 ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
3434 ret = dhd_bus_txdata(dhdp->bus, pktbuf);
3450 if (!ifp || ifp->del_in_progress) {
3452 __FUNCTION__, ifp, ifp ? ifp->del_in_progress : 0));
3454 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3455 return -ENODEV;
3459 __FUNCTION__, dhdp->busstate));
3461 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3462 return -ENODEV;
3471 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3472 ret = -EBUSY;
3480 __FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state));
3486 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3487 return -ENODEV;
3523 if (dhd_query_bus_erros(&dhd->pub)) {
3527 return -ENODEV;
3531 DHD_GENERAL_LOCK(&dhd->pub, flags);
3532 DHD_BUS_BUSY_SET_IN_TX(&dhd->pub);
3533 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3536 if (dhdpcie_runtime_bus_wake(&dhd->pub, FALSE, dhd_start_xmit)) {
3537 /* In order to avoid pkt loss. Return NETDEV_TX_BUSY until run-time resumed. */
3539 DHD_GENERAL_LOCK(&dhd->pub, flags);
3540 if (!dhdpcie_is_resume_done(&dhd->pub)) {
3541 dhd_bus_stop_queue(dhd->pub.bus);
3543 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
3544 dhd_os_busbusy_wake(&dhd->pub);
3545 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3550 DHD_GENERAL_LOCK(&dhd->pub, flags);
3551 if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(&dhd->pub)) {
3553 __FUNCTION__, dhd->pub.busstate, dhd->pub.dhd_bus_busy_state));
3554 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
3557 if (DHD_BUS_CHECK_ANY_SUSPEND_IN_PROGRESS(&dhd->pub)) {
3558 dhd_bus_stop_queue(dhd->pub.bus);
3561 dhd_os_busbusy_wake(&dhd->pub);
3562 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3566 DHD_OS_WAKE_LOCK(&dhd->pub);
3570 if (dhd->pub.req_hang_type == HANG_REASON_BUS_DOWN) {
3572 dhd->pub.busstate = DHD_BUS_DOWN;
3577 if (dhd->pub.hang_was_sent || DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(&dhd->pub)) {
3579 __FUNCTION__, dhd->pub.up, dhd->pub.busstate));
3583 if (dhd->pub.up && !dhd->pub.hang_was_sent) {
3585 dhd->pub.hang_reason = HANG_REASON_BUS_DOWN;
3589 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
3590 dhd_os_busbusy_wake(&dhd->pub);
3591 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3593 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3600 ifp->del_in_progress) {
3602 __FUNCTION__, ifidx, ifp, (ifp ? ifp->del_in_progress : 0)));
3604 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
3605 dhd_os_busbusy_wake(&dhd->pub);
3606 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3608 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3613 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3616 ASSERT((ifp != NULL) && ((ifidx < DHD_MAX_IFS) && (ifp == dhd->iflist[ifidx])));
3620 /* re-align socket buffer if "skb->data" is odd address */
3621 if (((unsigned long)(skb->data)) & 0x1) {
3622 unsigned char *data = skb->data;
3623 uint32 length = skb->len;
3624 PKTPUSH(dhd->pub.osh, skb, 1);
3625 memmove(skb->data, data, length);
3626 PKTSETLEN(dhd->pub.osh, skb, length);
3629 datalen = PKTLEN(dhd->pub.osh, skb);
3632 if (skb_headroom(skb) < dhd->pub.hdrlen + htsfdlystat_sz) {
3636 dhd_ifname(&dhd->pub, ifidx)));
3637 dhd->pub.tx_realloc++;
3640 skb2 = skb_realloc_headroom(skb, dhd->pub.hdrlen + htsfdlystat_sz);
3645 dhd_ifname(&dhd->pub, ifidx)));
3646 ret = -ENOMEM;
3653 if (!(pktbuf = PKTFRMNATIVE(dhd->pub.osh, skb))) {
3655 dhd_ifname(&dhd->pub, ifidx)));
3658 ret = -ENOMEM;
3666 if (WET_ENABLED(&dhd->pub) &&
3667 (dhd_wet_send_proc(dhd->pub.wet_info, pktbuf, &pktbuf) < 0)) {
3669 __FUNCTION__, dhd_ifname(&dhd->pub, ifidx)));
3670 PKTFREE(dhd->pub.osh, pktbuf, FALSE);
3671 ret = -EFAULT;
3680 if (PSR_ENABLED(&dhd->pub) &&
3681 (dhd_psta_proc(&dhd->pub, ifidx, &pktbuf, TRUE) < 0)) {
3684 dhd_ifname(&dhd->pub, ifidx)));
3689 if (skb->sk) {
3690 sk_pacing_shift_update(skb->sk, 8);
3695 if (dhd_tcpdata_get_flag(&dhd->pub, pktbuf) == FLAG_SYNCACK) {
3696 ifp->tsyncack_txed ++;
3701 if (dhd->pub.tcpack_sup_mode == TCPACK_SUP_HOLD) {
3703 if (dhd_tcpack_hold(&dhd->pub, pktbuf, ifidx)) {
3709 if (dhd_tcpack_suppress(&dhd->pub, pktbuf)) {
3723 ret = __dhd_sendpkt(&dhd->pub, ifidx, pktbuf);
3728 ifp->stats.tx_dropped++;
3729 dhd->pub.tx_dropped++;
3733 if (!dhd_wlfc_is_supported(&dhd->pub))
3736 dhd->pub.tx_packets++;
3737 ifp->stats.tx_packets++;
3738 ifp->stats.tx_bytes += datalen;
3742 DHD_GENERAL_LOCK(&dhd->pub, flags);
3743 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
3745 dhd_os_tx_completion_wake(&dhd->pub);
3746 dhd_os_busbusy_wake(&dhd->pub);
3747 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3749 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3762 pub = work->pub;
3766 if (atomic_read(&pub->block_bus) || pub->busstate == DHD_BUS_DOWN) {
3771 if (pm_runtime_get_sync(dhd_bus_to_dev(pub->bus)) >= 0) {
3774 pm_runtime_mark_last_busy(dhd_bus_to_dev(pub->bus));
3775 pm_runtime_put_autosuspend(dhd_bus_to_dev(pub->bus));
3790 dhd = DHD_DEV_INFO(work->net);
3792 bus = dhd->pub.bus;
3794 if (atomic_read(&dhd->pub.block_bus)) {
3795 kfree_skb(work->skb);
3802 ret = dhd_start_xmit(work->skb, work->net);
3810 netdev_err(work->net,
3825 if (dhd->pub.busstate == DHD_BUS_SUSPEND) {
3828 dhd_netif_stop_queue(dhd->pub.bus);
3839 ret = -ENOMEM;
3844 INIT_WORK(&start_xmit_work->work, dhd_start_xmit_wq_adapter);
3845 start_xmit_work->skb = skb;
3846 start_xmit_work->net = net;
3847 queue_work(dhd->tx_wq, &start_xmit_work->work);
3854 } else if (dhd->pub.busstate == DHD_BUS_DATA) {
3861 ret = -ENODEV;
3873 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
3881 INIT_WORK(&rx_work->work, dhd_rx_wq_wakeup);
3882 rx_work->pub = dhdp;
3883 queue_work(dhd->rx_wq, &rx_work->work);
3892 if ((state == ON) && (dhdp->txoff == FALSE)) {
3898 if ((state == OFF) && (dhdp->txoff == TRUE)) {
3910 dhd_info_t *dhd = dhdp->info;
3919 if ((dhdp->dequeue_prec_map == 1 << PRIO_8021D_NC) && state == ON) {
3926 if (dhd->iflist[i]) {
3927 net = dhd->iflist[i]->net;
3932 if (dhd->iflist[ifidx]) {
3933 net = dhd->iflist[ifidx]->net;
3937 dhdp->txoff = state;
3944 * Input eh - pointer to the ethernet header
3956 if (eh->ether_type != hton16(ETHER_TYPE_IP))
3959 /* Non-IPv4 multicast packets are not handled */
3967 if (IPV4_ISMULTI(dest_ip) && !ETHER_ISMULTI(&eh->ether_dhost)) {
3968 ETHER_FILL_MCAST_ADDR_FROM_IP(eh->ether_dhost, dest_ip);
3992 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
4006 pktlen = skb->len + ETH_HLEN;
4022 dhd_dbg_trace_evnt_handler(dhdp, data, &dhd->event_data, datalen);
4048 dhdp = &dhd->pub;
4055 qlen = skb_queue_len(&dhd->evt_trace_queue);
4059 while (process_len--) {
4061 skb = skb_dequeue(&dhd->evt_trace_queue);
4073 dhd_event_logtrace_infobuf_pkt_process(dhdp, skb, &dhd->event_data);
4085 if (dhdp->logtrace_pkt_sendup) {
4091 void *npkt = PKTDUP(dhdp->osh, skb);
4093 PKTFREE_STATIC(dhdp->osh, skb, FALSE);
4104 * to send skb to network layer, assign skb->dev with
4108 skb = PKTTONATIVE(dhdp->osh, skb);
4109 skb->dev = dhd->iflist[0]->net;
4117 PKTFREE_STATIC(dhdp->osh, skb, FALSE);
4119 PKTFREE(dhdp->osh, skb, FALSE);
4133 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
4134 dhd_pub_t *dhdp = (dhd_pub_t *)&dhd->pub;
4138 dhdp->logtrace_thr_ts.entry_time = OSL_LOCALTIME_NS();
4140 dhdp->logtrace_thr_ts.sem_down_time = OSL_LOCALTIME_NS();
4142 if (dhd->pub.dongle_reset == FALSE) {
4145 if (tsk->terminated) {
4151 if (dhd->pub.dongle_edl_support) {
4152 ret = dhd_prot_process_edl_complete(&dhd->pub,
4153 &dhd->event_data);
4178 if (tsk->flush_ind) {
4180 dhdp->logtrace_thr_ts.flush_time = OSL_LOCALTIME_NS();
4181 tsk->flush_ind = 0;
4182 complete(&tsk->flushed);
4186 dhdp->logtrace_thr_ts.unexpected_break_time = OSL_LOCALTIME_NS();
4191 complete_and_exit(&tsk->completed, 0);
4192 dhdp->logtrace_thr_ts.complete_time = OSL_LOCALTIME_NS();
4199 /* Ignore compiler warnings due to -Werror=cast-qual */
4202 #pragma GCC diagnostic ignored "-Wcast-qual"
4211 if (dhd->pub.dongle_edl_support) {
4212 ret = dhd_prot_process_edl_complete(&dhd->pub, &dhd->event_data);
4221 schedule_delayed_work(&(dhd)->event_log_dispatcher_work,
4235 if (dhd->thr_logtrace_ctl.thr_pid >= 0) {
4236 binary_sema_up(&dhd->thr_logtrace_ctl);
4239 dhd->thr_logtrace_ctl.thr_pid));
4242 schedule_delayed_work(&dhd->event_log_dispatcher_work, 0);
4251 if (dhd->thr_logtrace_ctl.thr_pid >= 0) {
4252 PROC_STOP_USING_BINARY_SEMA(&dhd->thr_logtrace_ctl);
4255 dhd->thr_logtrace_ctl.thr_pid));
4258 cancel_delayed_work_sync(&dhd->event_log_dispatcher_work);
4266 if (dhd->thr_logtrace_ctl.thr_pid >= 0) {
4267 PROC_FLUSH_USING_BINARY_SEMA(&dhd->thr_logtrace_ctl);
4270 dhd->thr_logtrace_ctl.thr_pid));
4273 flush_delayed_work(&dhd->event_log_dispatcher_work);
4281 dhd->thr_logtrace_ctl.thr_pid = DHD_PID_KT_INVALID;
4282 PROC_START(dhd_logtrace_thread, dhd, &dhd->thr_logtrace_ctl, 0, "dhd_logtrace_thread");
4283 if (dhd->thr_logtrace_ctl.thr_pid < 0) {
4288 dhd->thr_logtrace_ctl.thr_pid));
4291 INIT_DELAYED_WORK(&dhd->event_log_dispatcher_work, dhd_event_logtrace_process);
4300 /* Re-init only if PROC_STOP from dhd_stop was called
4303 if (dhd->thr_logtrace_ctl.thr_pid < 0) {
4304 PROC_START(dhd_logtrace_thread, dhd, &dhd->thr_logtrace_ctl,
4306 if (dhd->thr_logtrace_ctl.thr_pid < 0) {
4311 dhd->thr_logtrace_ctl.thr_pid));
4315 /* No need to re-init for WQ as calcel_delayed_work_sync will
4325 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
4331 skb_queue_tail(&dhd->evt_trace_queue, pktbuf);
4339 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
4342 while ((skb = skb_dequeue(&dhd->evt_trace_queue)) != NULL) {
4344 PKTFREE_STATIC(dhdp->osh, skb, FALSE);
4346 PKTFREE(dhdp->osh, skb, FALSE);
4359 dhd_info_t *dhd = dhdp->info;
4367 pktsize = (uint32)(ltoh16(infobuf->length) + sizeof(info_buf_payload_hdr_t) +
4369 pkt = PKTGET(dhdp->osh, pktsize, FALSE);
4373 PKTSETLEN(dhdp->osh, pkt, pktsize);
4374 pktdata = PKTDATA(dhdp->osh, pkt);
4376 /* For infobuf packets assign skb->dev with
4379 skb = PKTTONATIVE(dhdp->osh, pkt);
4380 skb->dev = dhd->iflist[0]->net;
4392 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
4424 pnext = PKTNEXT(dhdp->osh, pktbuf);
4425 PKTSETNEXT(dhdp->osh, pktbuf, NULL);
4449 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
4451 PKTFREE(dhdp->osh, pktbuf, FALSE);
4465 eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
4470 if (ntoh16(eh->ether_type) == ETHER_TYPE_BRCM) {
4472 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
4474 PKTFREE(dhdp->osh, pktbuf, FALSE);
4477 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4482 ifp = dhd->iflist[ifidx];
4486 if (ntoh16(eh->ether_type) == ETHER_TYPE_BRCM) {
4488 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
4490 PKTFREE(dhdp->osh, pktbuf, FALSE);
4493 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4500 if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED) &&
4501 (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM))
4503 if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED || !dhd->pub.up) &&
4504 (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM))
4509 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4517 piggy-back on
4519 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4525 if (ifp->block_ping) {
4526 if (bcm_l2_filter_block_ping(dhdp->osh, pktbuf) == BCME_OK) {
4527 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4531 if (ifp->grat_arp && DHD_IF_ROLE_STA(dhdp, ifidx)) {
4532 if (bcm_l2_filter_gratuitous_arp(dhdp->osh, pktbuf) == BCME_OK) {
4533 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4537 if (ifp->parp_enable && DHD_IF_ROLE_AP(dhdp, ifidx)) {
4544 PKTCFREE(dhdp->osh, pktbuf, TRUE);
4548 if (ifp->block_tdls) {
4549 if (bcm_l2_filter_block_tdls(dhdp->osh, pktbuf) == BCME_OK) {
4550 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4557 DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
4558 if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
4562 DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
4564 if (ifp->mcast_regen_bss_enable && (interface_role != WLC_E_IF_ROLE_WDS) &&
4566 ETHER_ISUCAST(eh->ether_dhost)) {
4569 /* Change bsscfg to primary bsscfg for unicast-multicast packets */
4591 ifp->tsync_rcvd ++;
4592 delta_sync = ifp->tsync_rcvd - ifp->tsyncack_txed;
4593 delta_sec = curr_time - ifp->last_sync;
4597 schedule_work(&ifp->blk_tsfl_work);
4611 skb = PKTTONATIVE(dhdp->osh, pktbuf);
4614 skb->dev = ifp->net;
4619 if (WET_ENABLED(&dhd->pub) && (dhd_wet_recv_proc(dhd->pub.wet_info,
4634 DHD_TRACE(("\nAp isolate in dhd is %d\n", ifp->ap_isolate));
4635 if (ifidx >= 0 && dhdp != NULL && dhdp->info != NULL &&
4636 dhdp->info->iflist[ifidx] != NULL) {
4638 (!ifp->ap_isolate)) {
4641 MAC2STRDBG(dhdp->info->iflist[ifidx]->mac_addr),
4644 __FUNCTION__, MAC2STRDBG(eh->ether_dhost), ifidx));
4645 eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
4646 if (ETHER_ISUCAST(eh->ether_dhost)) {
4647 if (dhd_find_sta(dhdp, ifidx, (void *)eh->ether_dhost)) {
4653 void *npktbuf = PKTDUP(dhdp->osh, pktbuf);
4664 if (IS_STA_IFACE(ndev_to_wdev(ifp->net)) &&
4665 (ifp->recv_reassoc_evt == TRUE) && (ifp->post_roam_evt == FALSE) &&
4666 (dhd_is_4way_msg((char *)(skb->data)) == EAPOL_4WAY_M1)) {
4669 PKTFREE(dhdp->osh, pktbuf, FALSE);
4675 * Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len'
4679 * we set the 'net->hard_header_len' to ETH_HLEN + extra space required
4682 eth = skb->data;
4683 len = skb->len;
4684 dump_data = skb->data;
4685 protocol = (skb->data[12] << 8) | skb->data[13];
4690 wl_handle_wps_states(ifp->net, dump_data, len, FALSE);
4693 if (dhd_is_4way_msg((uint8 *)(skb->data)) == EAPOL_4WAY_M3) {
4694 OSL_ATOMIC_SET(dhdp->osh, &ifp->m4state, M3_RXED);
4707 skb->protocol = eth_type_trans(skb, skb->dev);
4709 if (skb->pkt_type == PACKET_MULTICAST) {
4710 dhd->pub.rx_multicast++;
4711 ifp->stats.multicast++;
4714 skb->data = eth;
4715 skb->len = len;
4727 if (ntoh16(skb->protocol) == ETHER_TYPE_BRCM) {
4738 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
4740 PKTFREE(dhdp->osh, pktbuf, FALSE);
4788 wcp->rc_event[event.event_type]++;
4789 wcp->rcwake++;
4803 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
4805 PKTFREE(dhdp->osh, pktbuf, FALSE);
4815 ASSERT(ifidx < DHD_MAX_IFS && dhd->iflist[ifidx]);
4816 ifp = dhd->iflist[ifidx];
4818 if (!(ifp && ifp->net && (ifp->net->reg_state == NETREG_REGISTERED)))
4820 if (!(ifp && ifp->net && (ifp->net->reg_state == NETREG_REGISTERED) &&
4821 dhd->pub.up))
4827 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
4829 PKTFREE(dhdp->osh, pktbuf, FALSE);
4834 if (dhdp->wl_event_enabled) {
4840 void *npkt = PKTDUP(dhdp->osh, skb);
4842 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
4853 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
4855 PKTFREE(dhdp->osh, pktbuf, FALSE);
4870 wcp->rxwake++;
4877 if (ntoh16(skb->protocol) == ETHER_TYPE_ARP) /* ARP */
4878 wcp->rx_arp++;
4880 wcp->rx_bcast++;
4882 wcp->rx_mcast++;
4883 if (ntoh16(skb->protocol) == ETHER_TYPE_IPV6) {
4884 wcp->rx_multi_ipv6++;
4885 if ((skb->len > ETHER_ICMP6_HEADER) &&
4887 wcp->rx_icmpv6++;
4888 if (skb->len > ETHER_ICMPV6_TYPE) {
4891 wcp->rx_icmpv6_ra++;
4894 wcp->rx_icmpv6_na++;
4897 wcp->rx_icmpv6_ns++;
4903 wcp->rx_multi_ipv4++;
4905 wcp->rx_multi_other++;
4908 wcp->rx_ucast++;
4921 ifp->net->last_rx = jiffies;
4924 if (ntoh16(skb->protocol) != ETHER_TYPE_BRCM) {
4925 dhdp->dstats.rx_bytes += skb->len;
4926 dhdp->rx_packets++; /* Local count */
4927 ifp->stats.rx_bytes += skb->len;
4928 ifp->stats.rx_packets++;
4934 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4940 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4942 if (dhd->rxthread_enabled) {
4946 PKTSETNEXT(dhdp->osh, skbprev, skb);
4963 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
4965 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4971 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4976 if (dhd->rxthread_enabled && skbhead)
4995 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
5001 eh = (struct ether_header *)PKTDATA(dhdp->osh, txp);
5002 type = ntoh16(eh->ether_type);
5005 atomic_dec(&dhd->pend_8021x_cnt);
5009 if (dhdp->wlfc_state && (dhdp->proptxstatus_mode != WLFC_FCMODE_NONE)) {
5010 dhd_if_t *ifp = dhd->iflist[DHD_PKTTAG_IF(PKTTAG(txp))];
5011 uint datalen = PKTLEN(dhd->pub.osh, txp);
5014 dhd->pub.tx_packets++;
5015 ifp->stats.tx_packets++;
5016 ifp->stats.tx_bytes += datalen;
5018 ifp->stats.tx_dropped++;
5038 ifp = dhd_get_ifp_by_ndev(&dhd->pub, net);
5045 if (dhd->pub.up) {
5047 dhd_prot_dstats(&dhd->pub);
5049 return &ifp->stats;
5052 memset(&net->stats, 0, sizeof(net->stats));
5053 return &net->stats;
5060 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
5061 /* This thread doesn't need any user-level access,
5067 dhd_watchdog_prio:(MAX_RT_PRIO-1);
5072 if (down_interruptible (&tsk->sema) == 0) {
5077 DHD_OS_WD_WAKE_LOCK(&dhd->pub);
5081 if (tsk->terminated) {
5083 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
5088 if (dhd->pub.dongle_reset == FALSE) {
5090 dhd_bus_watchdog(&dhd->pub);
5092 DHD_GENERAL_LOCK(&dhd->pub, flags);
5094 dhd->pub.tickcnt++;
5096 dhd_l2_filter_watchdog(&dhd->pub);
5098 time_lapse = jiffies - jiffies_at_start;
5101 if (dhd->wd_timer_valid) {
5102 mod_timer(&dhd->timer,
5104 msecs_to_jiffies(dhd_watchdog_ms) -
5107 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
5110 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
5117 complete_and_exit(&tsk->completed, 0);
5125 if (dhd->pub.dongle_reset) {
5129 if (dhd->thr_wdt_ctl.thr_pid >= 0) {
5130 up(&dhd->thr_wdt_ctl.sema);
5135 DHD_OS_WD_WAKE_LOCK(&dhd->pub);
5138 dhd_bus_watchdog(&dhd->pub);
5140 DHD_GENERAL_LOCK(&dhd->pub, flags);
5142 dhd->pub.tickcnt++;
5145 dhd_l2_filter_watchdog(&dhd->pub);
5148 if (dhd->wd_timer_valid)
5149 mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
5150 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
5152 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
5161 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
5164 if (down_interruptible (&tsk->sema) == 0) {
5170 if (tsk->terminated) {
5174 if (dhd->pub.dongle_reset == FALSE) {
5176 if (dhd->pub.up) {
5177 dhd_runtimepm_state(&dhd->pub);
5180 DHD_GENERAL_LOCK(&dhd->pub, flags);
5181 time_lapse = jiffies - jiffies_at_start;
5184 if (dhd->rpm_timer_valid) {
5185 mod_timer(&dhd->rpm_timer,
5187 msecs_to_jiffies(dhd_runtimepm_ms) -
5191 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
5198 complete_and_exit(&tsk->completed, 0);
5205 if (dhd->pub.dongle_reset) {
5209 if (dhd->thr_rpm_ctl.thr_pid >= 0) {
5210 up(&dhd->thr_rpm_ctl.sema);
5224 if (!(dhdp->op_mode & DHD_FLAG_MFG_MODE)) {
5243 param.sched_priority = (prio < MAX_RT_PRIO)? prio : (MAX_RT_PRIO-1);
5255 if (!dhd->new_freq)
5259 freq->new, freq->cpu));
5260 *per_cpu_ptr(dhd->new_freq, freq->cpu) = freq->new;
5271 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
5273 /* This thread doesn't need any user-level access,
5279 param.sched_priority = (dhd_dpc_prio < MAX_RT_PRIO)?dhd_dpc_prio:(MAX_RT_PRIO-1);
5287 dhd->pub.current_dpc = current;
5296 if (tsk->terminated) {
5301 if (dhd->pub.busstate != DHD_BUS_DOWN) {
5305 dhd_os_wd_timer_extend(&dhd->pub, TRUE);
5306 while (dhd_bus_dpc(dhd->pub.bus)) {
5314 dhd->pub.dhd_bug_on = true;
5320 dhd_os_wd_timer_extend(&dhd->pub, FALSE);
5321 DHD_OS_WAKE_UNLOCK(&dhd->pub);
5323 if (dhd->pub.up)
5324 dhd_bus_stop(dhd->pub.bus, TRUE);
5325 DHD_OS_WAKE_UNLOCK(&dhd->pub);
5331 complete_and_exit(&tsk->completed, 0);
5338 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
5343 dhd_pub_t *pub = &dhd->pub;
5345 /* This thread doesn't need any user-level access,
5351 param.sched_priority = (dhd_rxf_prio < MAX_RT_PRIO)?dhd_rxf_prio:(MAX_RT_PRIO-1);
5356 dhd->pub.current_rxf = current;
5360 if (down_interruptible(&tsk->sema) == 0) {
5368 if (tsk->terminated) {
5377 void *skbnext = PKTNEXT(pub->osh, skb);
5378 PKTSETNEXT(pub->osh, skb, NULL);
5385 if (OSL_SYSUPTIME() - watchdogTime > RXF_WATCHDOG_TIME) {
5396 complete_and_exit(&tsk->completed, 0);
5405 if (!dhdp || !dhdp->info)
5407 dhd = dhdp->info;
5411 __skb_queue_head_init(&dhd->rx_pend_queue);
5415 skb_queue_head_init(&dhd->tx_pend_queue);
5430 dhd = dhdp->info;
5436 if (dhd->thr_dpc_ctl.thr_pid < 0) {
5437 tasklet_kill(&dhd->tasklet);
5443 cancel_work_sync(&dhd->rx_napi_dispatcher_work);
5444 __skb_queue_purge(&dhd->rx_pend_queue);
5447 cancel_work_sync(&dhd->tx_dispatcher_work);
5448 skb_queue_purge(&dhd->tx_pend_queue);
5453 tasklet_kill(&dhd->tx_compl_tasklet);
5456 tasklet_kill(&dhd->rx_compl_tasklet);
5459 tasklet_kill(&dhd->tx_tasklet);
5473 dhd = dhdp->info;
5479 if (dhd->thr_dpc_ctl.thr_pid < 0) {
5480 tasklet_kill(&dhd->tasklet);
5497 if (dhd->pub.busstate != DHD_BUS_DOWN) {
5499 DHD_LB_STATS_INCR(dhd->dhd_dpc_cnt);
5501 if (dhd_bus_dpc(dhd->pub.bus)) {
5502 tasklet_schedule(&dhd->tasklet);
5505 dhd_bus_stop(dhd->pub.bus, TRUE);
5512 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
5514 if (dhd->thr_dpc_ctl.thr_pid >= 0) {
5519 if (!binary_sema_up(&dhd->thr_dpc_ctl)) {
5525 tasklet_schedule(&dhd->tasklet);
5532 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
5541 if (dhd->thr_rxf_ctl.thr_pid >= 0) {
5542 up(&dhd->thr_rxf_ctl.sema);
5558 ret = dhd_iovar(&dhd->pub, ifidx, "toe_ol", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
5561 if (ret == -EIO) {
5562 DHD_ERROR(("%s: toe not supported by device\n", dhd_ifname(&dhd->pub,
5564 return -EOPNOTSUPP;
5567 DHD_INFO(("%s: could not get toe_ol: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
5582 ret = dhd_iovar(&dhd->pub, ifidx, "toe_ol", (char *)&toe_ol, sizeof(toe_ol), NULL, 0, TRUE);
5585 dhd_ifname(&dhd->pub, ifidx), ret));
5591 ret = dhd_iovar(&dhd->pub, ifidx, "toe", (char *)&toe, sizeof(toe), NULL, 0, TRUE);
5593 DHD_ERROR(("%s: could not set toe: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
5608 if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
5635 snprintf(info->driver, sizeof(info->driver), "wl");
5636 snprintf(info->version, sizeof(info->version), "%lu", dhd->pub.drv_version);
5659 return -EFAULT;
5665 return -EFAULT;
5667 drvname[sizeof(info.driver)-1] = '\0';
5676 strncpy(info.version, EPI_VERSION_STR, sizeof(info.version) - 1);
5677 info.version[sizeof(info.version) - 1] = '\0';
5681 else if (!dhd->pub.up) {
5683 return -ENODEV;
5687 else if (dhd->pub.iswl)
5692 snprintf(info.version, sizeof(info.version), "%lu", dhd->pub.drv_version);
5694 return -EFAULT;
5712 return -EFAULT;
5719 return -EFAULT;
5738 dhd->iflist[0]->net->features |= NETIF_F_IP_CSUM;
5740 dhd->iflist[0]->net->features &= ~NETIF_F_IP_CSUM;
5747 return -EOPNOTSUPP;
5761 if (!dhdp->up)
5765 if (dhdp->info->thr_dpc_ctl.thr_pid < 0) {
5766 DHD_ERROR(("%s : skipped due to negative pid - unloading?\n", __FUNCTION__));
5771 if ((error == -ETIMEDOUT) || (error == -EREMOTEIO) ||
5772 ((dhdp->busstate == DHD_BUS_DOWN) && (!dhdp->dongle_reset))) {
5775 __FUNCTION__, dhdp->rxcnt_timeout, dhdp->txcnt_timeout,
5776 dhdp->d3ackcnt_timeout, error, dhdp->busstate));
5779 dhdp->rxcnt_timeout, dhdp->txcnt_timeout, error, dhdp->busstate));
5781 if (dhdp->hang_reason == 0) {
5782 if (dhdp->dongle_trap_occured) {
5783 dhdp->hang_reason = HANG_REASON_DONGLE_TRAP;
5785 } else if (dhdp->d3ackcnt_timeout) {
5786 dhdp->hang_reason = dhdp->is_sched_error ?
5791 dhdp->hang_reason = dhdp->is_sched_error ?
5807 return (dhd->info->monitor_type != 0);
5813 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
5815 uint8 amsdu_flag = (msg->flags & BCMPCIE_PKT_FLAGS_MONITOR_MASK) >>
5820 if (!dhd->monitor_skb) {
5821 if ((dhd->monitor_skb = PKTTONATIVE(dhdp->osh, pkt))
5825 if (dhd->monitor_type && dhd->monitor_dev)
5826 dhd->monitor_skb->dev = dhd->monitor_dev;
5828 PKTFREE(dhdp->osh, pkt, FALSE);
5829 dhd->monitor_skb = NULL;
5832 dhd->monitor_skb->protocol =
5833 eth_type_trans(dhd->monitor_skb, dhd->monitor_skb->dev);
5834 dhd->monitor_len = 0;
5838 if (!dhd->monitor_skb) {
5839 if ((dhd->monitor_skb = dev_alloc_skb(MAX_MON_PKT_SIZE))
5842 dhd->monitor_len = 0;
5844 if (dhd->monitor_type && dhd->monitor_dev)
5845 dhd->monitor_skb->dev = dhd->monitor_dev;
5847 PKTFREE(dhdp->osh, pkt, FALSE);
5848 dev_kfree_skb(dhd->monitor_skb);
5851 memcpy(PKTDATA(dhdp->osh, dhd->monitor_skb),
5852 PKTDATA(dhdp->osh, pkt), PKTLEN(dhdp->osh, pkt));
5853 dhd->monitor_len = PKTLEN(dhdp->osh, pkt);
5854 PKTFREE(dhdp->osh, pkt, FALSE);
5858 memcpy(PKTDATA(dhdp->osh, dhd->monitor_skb) + dhd->monitor_len,
5859 PKTDATA(dhdp->osh, pkt), PKTLEN(dhdp->osh, pkt));
5860 dhd->monitor_len += PKTLEN(dhdp->osh, pkt);
5861 PKTFREE(dhdp->osh, pkt, FALSE);
5865 memcpy(PKTDATA(dhdp->osh, dhd->monitor_skb) + dhd->monitor_len,
5866 PKTDATA(dhdp->osh, pkt), PKTLEN(dhdp->osh, pkt));
5867 dhd->monitor_len += PKTLEN(dhdp->osh, pkt);
5868 PKTFREE(dhdp->osh, pkt, FALSE);
5869 skb_put(dhd->monitor_skb, dhd->monitor_len);
5870 dhd->monitor_skb->protocol =
5871 eth_type_trans(dhd->monitor_skb, dhd->monitor_skb->dev);
5872 dhd->monitor_len = 0;
5880 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
5881 netif_rx(dhd->monitor_skb);
5882 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
5890 bcm_object_trace_opr(dhd->monitor_skb, BCM_OBJDBG_REMOVE,
5893 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
5894 netif_rx_ni(dhd->monitor_skb);
5895 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
5898 dhd->monitor_skb = NULL;
5907 #define DHD_MON_DEV_STATS(dev) (((dhd_mon_dev_priv_t *)DEV_PRIV(dev))->stats)
5929 dhdp->info->bus_user_count++;
5935 dhdp->info->bus_user_count--;
5940 * Failure: Returns -1 or errono code
5946 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
5949 mutex_lock(&dhd->bus_user_lock);
5950 ++dhd->bus_user_count;
5951 if (dhd->bus_user_count < 0) {
5953 ret = -1;
5957 if (dhd->bus_user_count == 1) {
5959 dhd->pub.hang_was_sent = 0;
5964 if (!wifi_platform_set_power(dhd->adapter, TRUE, WIFI_TURNON_DELAY)) {
5976 dhd_bus_update_fw_nv_path(dhd->pub.bus,
5977 dhd->fw_path, dhd->nv_path);
5983 if (dhd_sync_with_dongle(&dhd->pub) < 0) {
5985 ret = -EFAULT;
5992 __FUNCTION__, dhd->bus_user_count));
5995 mutex_unlock(&dhd->bus_user_lock);
6002 * Failure: Returns -1 or errono code
6008 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
6012 mutex_lock(&dhd->bus_user_lock);
6013 --dhd->bus_user_count;
6014 if (dhd->bus_user_count < 0) {
6016 dhd->bus_user_count = 0;
6017 ret = -1;
6021 if (dhd->bus_user_count == 0) {
6026 if (dhd->pub.wlfc_enabled) {
6027 dhd_wlfc_deinit(&dhd->pub);
6031 if (dhd->pub.pno_state) {
6032 dhd_pno_deinit(&dhd->pub);
6036 if (dhd->pub.rtt_state) {
6037 dhd_rtt_deinit(&dhd->pub);
6043 wifi_platform_set_power(dhd->adapter, FALSE, WIFI_TURNOFF_DELAY);
6047 __FUNCTION__, dhd->bus_user_count));
6050 mutex_unlock(&dhd->bus_user_lock);
6059 return dhd_bus_get(&dhd->pub, WLAN_MODULE);
6066 return dhd_bus_put(&dhd->pub, WLAN_MODULE);
6073 * This function is not callable from non-sleepable context
6087 ret = __dhdsdio_clk_enable(dhdp->bus, owner, TRUE);
6098 * This function is not callable from non-sleepable context
6112 ret = __dhdsdio_clk_disable(dhdp->bus, owner, TRUE);
6122 * This function is not callable from non-sleepable context
6130 dhdsdio_reset_bt_use_count(dhdp->bus);
6138 dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
6140 dhdp->hang_was_sent = 0;
6142 dhd_os_send_hang_message(&dhd->pub);
6180 if (dhd->monitor_dev) {
6193 snprintf(dev->name, sizeof(dev->name), "%s%u", devname, dhd->unit);
6203 dev->type = ARPHRD_IEEE80211_RADIOTAP;
6205 dev->netdev_ops = &netdev_monitor_ops;
6209 __FUNCTION__, dev->name));
6214 if (FW_SUPPORTED((&dhd->pub), monitor)) {
6217 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
6222 ret = dhd_iovar(&dhd->pub, 0, "scansuppress", (char *)&scan_suppress,
6229 dhd->monitor_dev = dev;
6243 if (!dhd->monitor_dev) {
6248 if (FW_SUPPORTED((&dhd->pub), monitor)) {
6251 DHD_ENABLE_RUNTIME_PM(&dhd->pub);
6256 ret = dhd_iovar(&dhd->pub, 0, "scansuppress", (char *)&scan_suppress,
6263 if (dhd->monitor_dev) {
6264 if (dhd->monitor_dev->reg_state == NETREG_UNINITIALIZED) {
6265 free_netdev(dhd->monitor_dev);
6267 unregister_netdevice(dhd->monitor_dev);
6269 dhd->monitor_dev = NULL;
6276 dhd_info_t *dhd = pub->info;
6288 dhd->monitor_type = val;
6300 dhd_info_t *info = dhdp->info;
6303 dhd_deferred_schedule_work(info->dhd_deferred_wq, NULL,
6325 dhd = &dhd_info->pub;
6355 if (ioc->driver == DHD_IOCTL_MAGIC) {
6358 buflen = MIN(ioc->len, DHD_IOCTL_MAXLEN);
6361 pub->bcmerror = bcmerror;
6367 buflen = MIN(ioc->len, WLC_IOCTL_MAXLEN);
6370 if (pub->busstate == DHD_BUS_DOWN || pub->busstate == DHD_BUS_LOAD) {
6371 if ((!pub->dongle_trap_occured) && allow_delay_fwdl) {
6390 if (!pub->iswl) {
6397 * Intercept WLC_SET_KEY IOCTL - serialize M4 send and set key IOCTL to
6399 * intercept WLC_DISASSOC IOCTL - serialize WPS-DONE and WLC_DISASSOC IOCTL to
6400 * prevent disassoc frame being sent before WPS-DONE frame.
6402 if (ioc->cmd == WLC_SET_KEY ||
6403 (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
6405 (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
6407 ioc->cmd == WLC_DISASSOC)
6410 if ((ioc->cmd == WLC_SET_VAR || ioc->cmd == WLC_GET_VAR) &&
6420 if (bcmerror == BCME_OK && ioc->cmd == WLC_SET_MONITOR) {
6470 DHD_OS_WAKE_LOCK(&dhd->pub);
6471 DHD_PERIM_LOCK(&dhd->pub);
6475 /* Interface up check for built-in type */
6476 if (!dhd_download_fw_on_driverload && dhd->pub.up == FALSE) {
6478 DHD_PERIM_UNLOCK(&dhd->pub);
6479 DHD_OS_WAKE_UNLOCK(&dhd->pub);
6490 if (dhd_is_static_ndev(&dhd->pub, net) && !(net->flags & IFF_UP)) {
6491 DHD_PERIM_UNLOCK(&dhd->pub);
6492 DHD_OS_WAKE_UNLOCK(&dhd->pub);
6493 return -1;
6499 DHD_PERIM_UNLOCK(&dhd->pub);
6500 DHD_OS_WAKE_UNLOCK(&dhd->pub);
6501 return -1;
6509 DHD_PERIM_UNLOCK(&dhd->pub);
6510 DHD_OS_WAKE_UNLOCK(&dhd->pub);
6516 ret = dhd_ethtool(dhd, (void*)ifr->ifr_data);
6517 DHD_PERIM_UNLOCK(&dhd->pub);
6518 DHD_OS_WAKE_UNLOCK(&dhd->pub);
6525 dhd_check_hang(net, &dhd->pub, ret);
6526 DHD_PERIM_UNLOCK(&dhd->pub);
6527 DHD_OS_WAKE_UNLOCK(&dhd->pub);
6534 DHD_PERIM_UNLOCK(&dhd->pub);
6535 DHD_OS_WAKE_UNLOCK(&dhd->pub);
6536 return -EOPNOTSUPP;
6543 if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) {
6549 if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t),
6566 if (!(local_buf = MALLOC(dhd->pub.osh, buflen+1))) {
6571 DHD_PERIM_UNLOCK(&dhd->pub);
6573 DHD_PERIM_LOCK(&dhd->pub);
6577 DHD_PERIM_LOCK(&dhd->pub);
6590 if (ioc.driver != DHD_IOCTL_MAGIC && dhd->pub.hang_was_sent) {
6592 DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(&dhd->pub, DHD_EVENT_TIMEOUT_MS);
6598 bcmerror = dhd_ioctl_process(&dhd->pub, ifidx, &ioc, local_buf);
6604 DHD_PERIM_UNLOCK(&dhd->pub);
6606 bcmerror = -EFAULT;
6607 DHD_PERIM_LOCK(&dhd->pub);
6612 MFREE(dhd->pub.osh, local_buf, buflen+1);
6614 DHD_PERIM_UNLOCK(&dhd->pub);
6615 DHD_OS_WAKE_UNLOCK(&dhd->pub);
6622 * user set the memu "Keep Wi-Fi on during sleep" to "Never"
6632 mutex_init(&dhd->cpufreq_fix);
6634 dhd->cpufreq_fix_status = FALSE;
6642 mutex_lock(&dhd->cpufreq_fix);
6644 if (dhd && !dhd->cpufreq_fix_status) {
6645 pm_qos_add_request(&dhd->dhd_cpu_qos, PM_QOS_CPU_FREQ_MIN, 300000);
6647 pm_qos_add_request(&dhd->dhd_bus_qos, PM_QOS_BUS_THROUGHPUT, 400000);
6651 dhd->cpufreq_fix_status = TRUE;
6654 mutex_unlock(&dhd->cpufreq_fix);
6661 mutex_lock(&dhd ->cpufreq_fix);
6663 if (dhd && dhd->cpufreq_fix_status != TRUE) {
6665 mutex_unlock(&dhd->cpufreq_fix);
6670 pm_qos_remove_request(&dhd->dhd_cpu_qos);
6672 pm_qos_remove_request(&dhd->dhd_bus_qos);
6676 dhd->cpufreq_fix_status = FALSE;
6678 mutex_unlock(&dhd->cpufreq_fix);
6690 if (atomic_read(&dhd->pub.block_bus))
6691 return -EHOSTDOWN;
6693 if (pm_runtime_get_sync(dhd_bus_to_dev(dhd->pub.bus)) < 0)
6698 pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd->pub.bus));
6699 pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd->pub.bus));
6717 DHD_OS_WAKE_LOCK(&dhd->pub);
6718 DHD_PERIM_LOCK(&dhd->pub);
6720 dhd->pub.rxcnt_timeout = 0;
6721 dhd->pub.txcnt_timeout = 0;
6724 dhd->pub.d3ackcnt_timeout = 0;
6727 mutex_lock(&dhd->pub.ndev_op_sync);
6729 if (dhd->pub.up == 0) {
6733 if (dhd->pub.req_hang_type) {
6735 __FUNCTION__, dhd->pub.req_hang_type));
6736 dhd->pub.req_hang_type = 0;
6775 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
6777 spin_lock_irqsave(&dhd->pub.up_lock, flags);
6778 dhd->pub.up = 0;
6779 spin_unlock_irqrestore(&dhd->pub.up_lock, flags);
6781 dhd->pub.up = 0;
6789 ifp = dhd->iflist[0];
6795 DHD_STATLOG_CTRL(&dhd->pub, ST(WLAN_POWER_OFF), ifidx, 0);
6796 if ((dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) &&
6797 (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) {
6803 dhd_cleanup_m4_state_work(&dhd->pub, ifidx);
6806 dhd_dump_pkt_clear(&dhd->pub);
6811 dhd_remove_if(&dhd->pub, i, FALSE);
6813 if (ifp && ifp->net) {
6830 cancel_work_sync(dhd->dhd_deferred_wq);
6838 __skb_queue_purge(&dhd->rx_pend_queue);
6842 skb_queue_purge(&dhd->tx_pend_queue);
6850 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
6853 if (ifp && ifp->net == dhd->rx_napi_netdev) {
6854 DHD_INFO(("%s napi<%p> disabled ifp->net<%p,%s>\n",
6855 __FUNCTION__, &dhd->rx_napi_struct, net, net->name));
6856 skb_queue_purge(&dhd->rx_napi_queue);
6857 napi_disable(&dhd->rx_napi_struct);
6858 netif_napi_del(&dhd->rx_napi_struct);
6859 dhd->rx_napi_netdev = NULL;
6865 DHD_SSSR_DUMP_DEINIT(&dhd->pub);
6868 dhd_wlfc_cleanup(&dhd->pub, NULL, 0);
6873 dhd_event_logtrace_flush_queue(&dhd->pub);
6874 if (dhd->dhd_state & DHD_ATTACH_LOGTRACE_INIT) {
6875 if (dhd->event_data.fmts) {
6876 MFREE(dhd->pub.osh, dhd->event_data.fmts,
6877 dhd->event_data.fmts_size);
6878 dhd->event_data.fmts = NULL;
6880 if (dhd->event_data.raw_fmts) {
6881 MFREE(dhd->pub.osh, dhd->event_data.raw_fmts,
6882 dhd->event_data.raw_fmts_size);
6883 dhd->event_data.raw_fmts = NULL;
6885 if (dhd->event_data.raw_sstr) {
6886 MFREE(dhd->pub.osh, dhd->event_data.raw_sstr,
6887 dhd->event_data.raw_sstr_size);
6888 dhd->event_data.raw_sstr = NULL;
6890 if (dhd->event_data.rom_raw_sstr) {
6891 MFREE(dhd->pub.osh, dhd->event_data.rom_raw_sstr,
6892 dhd->event_data.rom_raw_sstr_size);
6893 dhd->event_data.rom_raw_sstr = NULL;
6895 dhd->dhd_state &= ~DHD_ATTACH_LOGTRACE_INIT;
6904 dhd_prot_stop(&dhd->pub);
6912 dhd_bus_put(&dhd->pub, WLAN_MODULE);
6922 * "Keep Wi-Fi on during sleep" to "Never"
6931 dhd->pub.hang_was_sent = 0;
6932 dhd->pub.hang_was_pending = 0;
6934 /* Clear country spec for for built-in type driver */
6936 dhd->pub.dhd_cspec.country_abbrev[0] = 0x00;
6937 dhd->pub.dhd_cspec.rev = 0;
6938 dhd->pub.dhd_cspec.ccode[0] = 0x00;
6946 DHD_PERIM_UNLOCK(&dhd->pub);
6947 DHD_OS_WAKE_UNLOCK(&dhd->pub);
6951 (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) &&
6954 dhd->dhd_state &= ~DHD_ATTACH_STATE_WAKELOCKS_INIT;
6957 mutex_unlock(&dhd->pub.ndev_op_sync);
6993 if (dhd->pub.hang_was_sent == 1) {
6998 if (!dhd_download_fw_on_driverload && dhd->pub.up == 1) {
7002 return -1;
7007 mutex_lock(&dhd->pub.ndev_op_sync);
7009 if (dhd->pub.up == 1) {
7012 mutex_unlock(&dhd->pub.ndev_op_sync);
7019 mutex_unlock(&dhd->pub.ndev_op_sync);
7020 return -1;
7023 if (!(dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
7025 dhd->dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
7029 skb_queue_head_init(&dhd->evt_trace_queue);
7031 if (!(dhd->dhd_state & DHD_ATTACH_LOGTRACE_INIT)) {
7032 ret = dhd_init_logstrs_array(dhd->pub.osh, &dhd->event_data);
7034 dhd_init_static_strs_array(dhd->pub.osh, &dhd->event_data,
7036 dhd_init_static_strs_array(dhd->pub.osh, &dhd->event_data,
7038 dhd->dhd_state |= DHD_ATTACH_LOGTRACE_INIT;
7053 DHD_OS_WAKE_LOCK(&dhd->pub);
7054 DHD_PERIM_LOCK(&dhd->pub);
7055 dhd->pub.dongle_trap_occured = 0;
7056 dhd->pub.hang_was_sent = 0;
7057 dhd->pub.hang_was_pending = 0;
7058 dhd->pub.hang_reason = 0;
7059 dhd->pub.iovar_timeout_occured = 0;
7061 dhd->pub.d3ack_timeout_occured = 0;
7062 dhd->pub.livelock_occured = 0;
7063 dhd->pub.pktid_audit_failed = 0;
7065 dhd->pub.iface_op_failed = 0;
7066 dhd->pub.scan_timeout_occurred = 0;
7067 dhd->pub.scan_busy_occurred = 0;
7068 dhd->pub.smmu_fault_occurred = 0;
7071 dhd->pub.dequeue_prec_map = ALLPRIO;
7083 ret = -1;
7094 ret = -1;
7098 if (!dhd->iflist[ifidx]) {
7100 ret = -1;
7105 atomic_set(&dhd->pend_8021x_cnt, 0);
7109 DHD_STATLOG_CTRL(&dhd->pub, ST(WLAN_POWER_ON), ifidx, 0);
7115 * for built-in models. Need to start logtrace kthread before
7125 ret = dhd_bus_get(&dhd->pub, WLAN_MODULE);
7133 ret = -1;
7141 * "Keep Wi-Fi on during sleep" to "Never"
7160 if (dhd->pub.busstate != DHD_BUS_DATA) {
7163 DHD_PERIM_UNLOCK(&dhd->pub);
7166 if (pm_runtime_get_sync(dhd_bus_to_dev(dhd->pub.bus)) >= 0) {
7167 ret = dhd_bus_start(&dhd->pub);
7168 pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd->pub.bus));
7169 pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd->pub.bus));
7172 ret = dhd_bus_start(&dhd->pub);
7175 DHD_PERIM_LOCK(&dhd->pub);
7178 ret = -1;
7185 if (dhd->pub.is_bt_recovery_required) {
7189 dhd->pub.is_bt_recovery_required = FALSE;
7193 memcpy(net->dev_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
7198 dhd->iflist[ifidx]->net->features |= NETIF_F_IP_CSUM;
7200 dhd->iflist[ifidx]->net->features &= ~NETIF_F_IP_CSUM;
7205 __skb_queue_head_init(&dhd->rx_pend_queue);
7206 if (dhd->rx_napi_netdev == NULL) {
7207 dhd->rx_napi_netdev = dhd->iflist[ifidx]->net;
7208 memset(&dhd->rx_napi_struct, 0, sizeof(struct napi_struct));
7209 netif_napi_add(dhd->rx_napi_netdev, &dhd->rx_napi_struct,
7211 DHD_INFO(("%s napi<%p> enabled ifp->net<%p,%s>\n",
7212 __FUNCTION__, &dhd->rx_napi_struct, net, net->name));
7213 napi_enable(&dhd->rx_napi_struct);
7215 skb_queue_head_init(&dhd->rx_napi_queue);
7221 skb_queue_head_init(&dhd->tx_pend_queue);
7227 ret = -1;
7232 dhd->pend_ipaddr = 0;
7247 dhd_bus_aspm_enable_rc_ep(dhd->pub.bus, TRUE);
7250 dhd_irq_set_affinity(&dhd->pub, cpumask_of(0));
7253 dhd_irq_set_affinity(&dhd->pub, dhd->cpumask_primary);
7260 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
7262 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
7266 dhd_set_scb_probe(&dhd->pub);
7271 dhd->pub.up = 1;
7275 dhd->pub.wl_event_enabled = true;
7277 dhd->pub.wl_event_enabled = false;
7282 dhd->pub.logtrace_pkt_sendup = true;
7284 dhd->pub.logtrace_pkt_sendup = false;
7290 dhd_dbgfs_init(&dhd->pub);
7294 mutex_unlock(&dhd->pub.ndev_op_sync);
7299 DHD_PERIM_UNLOCK(&dhd->pub);
7300 DHD_OS_WAKE_UNLOCK(&dhd->pub);
7327 DHD_ERROR(("[%s] tx queue started\n", net->name));
7341 DHD_ERROR(("[%s] tx queue stopped\n", net->name));
7368 DHD_TRACE(("non-static interface (%s)..do nothing \n", net->name));
7373 DHD_INFO(("[%s][STATIC_IF] Enter \n", net->name));
7400 DHD_INFO(("[%s][STATIC_IF] Enter \n", net->name));
7407 DHD_TRACE(("non-static interface (%s)..do nothing \n", net->name));
7413 if (dhd->pub.up == 0) {
7422 if (!(primary_netdev->flags & IFF_UP)) {
7438 return -EINVAL;
7455 if (dhd->pub.busstate == DHD_BUS_DATA) {
7462 return -1;
7473 if (wl_cfg80211_notify_ifadd(dhd_linux_get_primary_netdev(&dhdinfo->pub),
7474 ifevent->ifidx, name, mac, ifevent->bssidx, ifevent->role) == BCME_OK)
7483 if (ifevent->ifidx > 0) {
7484 dhd_if_event_t *if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
7487 MALLOCED(dhdinfo->pub.osh)));
7491 memcpy(&if_event->event, ifevent, sizeof(if_event->event));
7492 memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
7493 strncpy(if_event->name, name, IFNAMSIZ);
7494 if_event->name[IFNAMSIZ - 1] = '\0';
7495 dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event,
7508 if (wl_cfg80211_notify_ifdel(dhd_linux_get_primary_netdev(&dhdinfo->pub),
7509 ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK)
7516 if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
7519 MALLOCED(dhdinfo->pub.osh)));
7522 memcpy(&if_event->event, ifevent, sizeof(if_event->event));
7523 memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
7524 strncpy(if_event->name, name, IFNAMSIZ);
7525 if_event->name[IFNAMSIZ - 1] = '\0';
7526 dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event, DHD_WQ_WORK_IF_DEL,
7536 wl_cfg80211_notify_ifchange(dhd_linux_get_primary_netdev(&dhdinfo->pub),
7537 ifevent->ifidx, name, mac, ifevent->bssidx);
7549 dhd_nfct_info_t *nfct = dhd->pub.nfct;
7560 if (natoe->natoe_active && natoe->sta_ip && natoe->start_port && natoe->end_port &&
7561 (natoe->start_port < natoe->end_port)) {
7563 if (dhd_ct_nl_bind(nfct, nfct->subscriptions) < 0) {
7567 } else if (!natoe->natoe_active) {
7583 if (dhd->nfct) {
7584 wl_event_data_natoe_t *natoe = dhd->nfct->natoe_info;
7585 uint8 prev_enable = natoe->natoe_active;
7587 spin_lock_bh(&dhd->nfct_lock);
7589 spin_unlock_bh(&dhd->nfct_lock);
7591 if (prev_enable != event_data->natoe_active) {
7592 dhd_deferred_schedule_work(dhd->info->dhd_deferred_wq,
7619 if (dhd_natoe_prep_send_exception_port_ioctl(&dhd->pub, ct_ioc) < 0) {
7631 dhd_deferred_schedule_work(dhd->info->dhd_deferred_wq, (void *)ioc,
7643 u32 ifidx = (DHD_MAX_IFS + DHD_MAX_STATIC_IFS - 1);
7645 u32 ifidx = (DHD_MAX_IFS - 1);
7648 dhd_info_t *dhdinfo = (dhd_info_t *)dhdp->info;
7650 ifp = dhdinfo->iflist[ifidx];
7651 if (ifp && (ifp->net == ndev)) {
7653 ndev->name, ifidx));
7656 } while (ifidx--);
7658 DHD_ERROR(("no entry found for %s\n", ndev->name));
7674 return (ifp && (ifp->static_if == true));
7686 dhd_info_t *dhdinfo = (dhd_info_t *)dhdp->info;
7697 return -ENODEV;
7699 cur_idx = ifp->idx;
7703 ifp->static_if = TRUE;
7707 ifp_new = dhdinfo->iflist[ifidx];
7712 dhdp->hang_reason = HANG_REASON_IFACE_ADD_FAILURE;
7713 net_os_send_hang_message(ifp->net);
7714 return -EINVAL;
7720 dhd_cleanup_if(ifp->net);
7722 dev_priv->ifidx = ifidx;
7726 dhdinfo->iflist[ifidx] = ifp;
7727 dhdinfo->iflist[cur_idx] = NULL;
7730 ifp->idx = ifidx;
7731 ifp->bssidx = bssidx;
7737 strlcpy(ifp->dngl_name, dngl_name, IFNAMSIZ);
7738 } else if (ndev->name[0] != '\0') {
7739 strlcpy(ifp->dngl_name, ndev->name, IFNAMSIZ);
7742 (void)memcpy_s(&ifp->mac_addr, ETHER_ADDR_LEN, mac, ETHER_ADDR_LEN);
7759 dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
7764 ifp = dhdinfo->iflist[ifidx];
7767 if (ifp->net != NULL) {
7769 __FUNCTION__, ifp->net->name, ifidx));
7780 dhd_dev_priv_clear(ifp->net); /* clear net_device private */
7782 /* in unregister_netdev case, the interface gets freed by net->destructor
7785 if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
7786 free_netdev(ifp->net);
7788 netif_stop_queue(ifp->net);
7790 unregister_netdev(ifp->net);
7792 unregister_netdevice(ifp->net);
7794 ifp->net = NULL;
7797 ifp = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_t));
7799 DHD_ERROR(("%s: OOM - dhd_if_t(%zu)\n", __FUNCTION__, sizeof(dhd_if_t)));
7805 ifp->info = dhdinfo;
7806 ifp->idx = ifidx;
7807 ifp->bssidx = bssidx;
7809 ifp->mcast_regen_bss_enable = FALSE;
7812 ifp->rx_pkt_chainable = TRUE;
7815 memcpy(&ifp->mac_addr, mac, ETHER_ADDR_LEN);
7818 ifp->net = alloc_etherdev(DHD_DEV_PRIV_SIZE);
7819 if (ifp->net == NULL) {
7820 DHD_ERROR(("%s: OOM - alloc_etherdev(%zu)\n", __FUNCTION__, sizeof(dhdinfo)));
7825 dhd_dev_priv_save(ifp->net, dhdinfo, ifp, ifidx);
7828 strncpy(ifp->net->name, name, IFNAMSIZ);
7829 ifp->net->name[IFNAMSIZ - 1] = '\0';
7833 #define IFP_NET_DESTRUCTOR ifp->net->priv_destructor
7835 #define IFP_NET_DESTRUCTOR ifp->net->destructor
7847 strncpy(ifp->name, ifp->net->name, IFNAMSIZ);
7848 ifp->name[IFNAMSIZ - 1] = '\0';
7849 dhdinfo->iflist[ifidx] = ifp;
7853 strncpy(ifp->dngl_name, dngl_name, IFNAMSIZ);
7855 strncpy(ifp->dngl_name, name, IFNAMSIZ);
7859 INIT_LIST_HEAD(&ifp->sta_list);
7863 ifp->phnd_arp_table = init_l2_filter_arp_table(dhdpub->osh);
7864 ifp->parp_allnode = TRUE;
7867 DHD_CUMM_CTR_INIT(&ifp->cumm_ctr);
7870 INIT_DELAYED_WORK(&ifp->m4state_work, dhd_m4_state_handler);
7874 ifp->recv_reassoc_evt = FALSE;
7875 ifp->post_roam_evt = FALSE;
7879 INIT_WORK(&ifp->blk_tsfl_work, dhd_blk_tsfl_handler);
7883 return ifp->net;
7887 if (ifp->net != NULL) {
7889 if (ifp->net == dhdinfo->rx_napi_netdev) {
7890 napi_disable(&dhdinfo->rx_napi_struct);
7891 netif_napi_del(&dhdinfo->rx_napi_struct);
7892 skb_queue_purge(&dhdinfo->rx_napi_queue);
7893 dhdinfo->rx_napi_netdev = NULL;
7896 dhd_dev_priv_clear(ifp->net);
7897 free_netdev(ifp->net);
7898 ifp->net = NULL;
7900 MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
7904 dhdinfo->iflist[ifidx] = NULL;
7913 if_flow_lkup_t *if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
7917 if ((ifp->idx < 0) || (ifp->idx >= DHD_MAX_IFS)) {
7918 DHD_ERROR(("Wrong idx:%d \n", ifp->idx));
7923 bcm_l2_filter_arp_table_update(dhdpub->osh, ifp->phnd_arp_table, TRUE,
7924 NULL, FALSE, dhdpub->tickcnt);
7925 deinit_l2_filter_arp_table(dhdpub->osh, ifp->phnd_arp_table);
7926 ifp->phnd_arp_table = NULL;
7932 ifidx = ifp->idx;
7944 dhd_pub_t *dhdp = &dhdinfo->pub;
7948 (ifp->idx >= DHD_MAX_IFS)) {
7949 DHD_ERROR(("Wrong ifidx: %p, %d\n", ifp, ifp ? ifp->idx : -1));
7964 dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
7969 ifp = dhdinfo->iflist[ifidx];
7973 cancel_delayed_work_sync(&ifp->m4state_work);
7977 cancel_work_sync(&ifp->blk_tsfl_work);
7982 if (ifp->static_if) {
7987 if (ifp->net != NULL) {
7988 DHD_ERROR(("deleting interface '%s' idx %d\n", ifp->net->name, ifp->idx));
7991 ifp->del_in_progress = true;
7997 timeout = wait_event_timeout(dhdpub->tx_completion_wait,
7998 ((ifp->tx_paths_active & DHD_TX_CONTEXT_MASK) == 0),
8008 dhdinfo->iflist[ifidx] = NULL;
8009 /* in unregister_netdev case, the interface gets freed by net->destructor
8012 if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
8013 free_netdev(ifp->net);
8015 netif_tx_disable(ifp->net);
8018 custom_rps_map_clear(ifp->net->_rx);
8026 unregister_netdev(ifp->net);
8028 unregister_netdevice(ifp->net);
8030 ifp->net = NULL;
8032 ifp->del_in_progress = false;
8036 DHD_CUMM_CTR_INIT(&ifp->cumm_ctr);
8038 MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
8092 return -1;
8095 return -1;
8108 return -1;
8110 return vfs_read(filep, buf, size, &filep->f_pos);
8118 return -1;
8120 /* offset can be -ve */
8121 filep->f_pos = filep->f_pos + offset;
8146 error = vfs_getattr(&filep->f_path, &stat, STATX_BASIC_STATS, AT_STATX_SYNC_AS_STAT);
8148 error = vfs_getattr(&filep->f_path, &stat);
8167 if (vfs_read(filep, raw_fmts, logstrs_size, &filep->f_pos) != logstrs_size) {
8190 temp->fmts = NULL;
8247 temp->raw_sstr = NULL;
8249 temp->rom_raw_sstr = NULL;
8267 logstrs_size = rodata_end - rodata_start;
8268 logfilebase = rodata_start - ramstart;
8290 error = vfs_read(filep, raw_fmts, logstrs_size, (&filep->f_pos));
8297 temp->raw_sstr = raw_fmts;
8298 temp->raw_sstr_size = logstrs_size;
8299 temp->rodata_start = rodata_start;
8300 temp->rodata_end = rodata_end;
8302 temp->rom_raw_sstr = raw_fmts;
8303 temp->rom_raw_sstr_size = logstrs_size;
8304 temp->rom_rodata_start = rodata_start;
8305 temp->rom_rodata_end = rodata_end;
8326 temp->raw_sstr = NULL;
8328 temp->rom_raw_sstr = NULL;
8344 bool dongle_isolation = dhdp->dongle_isolation;
8351 if (dhdp->memdump_enabled) {
8353 dhdp->collect_sssr = TRUE;
8355 dhdp->memdump_type = DUMP_TYPE_DUE_TO_BT;
8362 dhd_bus_stop_queue(dhdp->bus);
8365 dhdp->dongle_isolation = TRUE;
8367 dhdp->dongle_isolation = dongle_isolation; /* Restore the old value */
8375 bool dongle_isolation = dhdp->dongle_isolation;
8377 DHD_ERROR(("%s: WLAN DHD re-init reason: %d\n", __FUNCTION__, reason));
8379 dhdp->dongle_isolation = TRUE;
8380 dhd_bus_devreset(dhdp, 0); /* DHD structure re-init */
8381 dhdp->dongle_isolation = dongle_isolation; /* Restore the old value */
8383 dhd_bus_start_queue(dhdp->bus);
8397 uint32 bus_type = -1;
8398 uint32 bus_num = -1;
8399 uint32 slot_num = -1;
8427 DHD_ERROR(("%s: OOM - alloc dhd_info\n", __FUNCTION__));
8434 dhd->unit = dhd_found + instance_base; /* do not increment dhd_found, yet */
8436 dhd->pub.osh = osh;
8438 dll_init(&(dhd->pub.dump_iovlist_head));
8440 dhd->pub.dhd_console_ms = dhd_console_ms; /* assigns default value */
8441 dhd->adapter = adapter;
8443 dhd->pub.is_bt_recovery_required = FALSE;
8444 mutex_init(&dhd->bus_user_lock);
8447 g_dhd_pub = &dhd->pub;
8450 dll_init(&(dhd->pub.mw_list_head));
8454 wifi_platform_get_mac_addr(dhd->adapter, dhd->pub.mac.octet);
8457 dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG;
8458 dhd->pub.force_country_change = TRUE;
8461 get_customized_country_code(dhd->adapter,
8462 dhd->pub.dhd_cspec.country_abbrev, &dhd->pub.dhd_cspec,
8463 dhd->pub.dhd_cflags);
8465 dhd->thr_dpc_ctl.thr_pid = DHD_PID_KT_TL_INVALID;
8466 dhd->thr_wdt_ctl.thr_pid = DHD_PID_KT_INVALID;
8468 dhd->pub.wet_info = dhd_get_wet_info(&dhd->pub);
8471 sema_init(&dhd->sdsem, 1);
8473 dhd->pub.pcie_txs_metadata_enable = pcie_txs_metadata_enable;
8476 dhd->pub.info = dhd;
8479 dhd->pub.bus = bus;
8480 dhd->pub.hdrlen = bus_hdrlen;
8481 dhd->pub.txoff = FALSE;
8494 if_name[IFNAMSIZ - 1] = 0;
8496 ch = if_name[len - 1];
8497 if ((ch > '9' || ch < '0') && (len < IFNAMSIZ - 2))
8502 net = dhd_allocate_if(&dhd->pub, 0, if_name, NULL, 0, TRUE, NULL);
8506 mutex_init(&dhd->pub.ndev_op_sync);
8511 dhd->pub.l2_filter_cnt = 0;
8513 net->netdev_ops = NULL;
8515 mutex_init(&dhd->dhd_iovar_mutex);
8516 sema_init(&dhd->proto_sem, 1);
8518 if (!(dhd_ulp_init(osh, &dhd->pub)))
8523 dhd->pub.req_hang_type = 0;
8527 spin_lock_init(&dhd->wlfc_spinlock);
8529 dhd->pub.skip_fc = dhd_wlfc_skip_fc;
8530 dhd->pub.plat_init = dhd_wlfc_plat_init;
8531 dhd->pub.plat_deinit = dhd_wlfc_plat_deinit;
8534 init_waitqueue_head(&dhd->pub.wlfc_wqhead);
8535 dhd->pub.wlfc_thread = kthread_create(dhd_wlfc_transfer_packets, &dhd->pub, "wlfc-thread");
8536 if (IS_ERR(dhd->pub.wlfc_thread)) {
8540 wake_up_process(dhd->pub.wlfc_thread);
8546 init_waitqueue_head(&dhd->ioctl_resp_wait);
8547 init_waitqueue_head(&dhd->d3ack_wait);
8548 init_waitqueue_head(&dhd->ctrl_wait);
8549 init_waitqueue_head(&dhd->dhd_bus_busy_state_wait);
8550 init_waitqueue_head(&dhd->dmaxfer_wait);
8551 init_waitqueue_head(&dhd->pub.tx_completion_wait);
8552 dhd->pub.dhd_bus_busy_state = 0;
8554 spin_lock_init(&dhd->sdlock);
8555 spin_lock_init(&dhd->txqlock);
8556 spin_lock_init(&dhd->dhd_lock);
8557 spin_lock_init(&dhd->rxf_lock);
8559 spin_lock_init(&dhd->pub.tdls_lock);
8562 dhd->rxthread_enabled = TRUE;
8566 spin_lock_init(&dhd->tcpack_lock);
8570 spin_lock_init(&dhd->wakelock_spinlock);
8571 spin_lock_init(&dhd->wakelock_evt_spinlock);
8573 dhd->wakelock_counter = 0;
8576 wake_lock_init(&dhd->wl_wdwake, WAKE_LOCK_SUSPEND, "wlan_wd_wake");
8580 mutex_init(&dhd->dhd_net_if_mutex);
8581 mutex_init(&dhd->dhd_suspend_mutex);
8583 mutex_init(&dhd->dhd_apf_mutex);
8589 if (dhd_prot_attach(&dhd->pub) != 0) {
8596 spin_lock_init(&dhd->pub.up_lock);
8598 if (unlikely(wl_cfg80211_attach(net, &dhd->pub))) {
8603 dhd_monitor_init(&dhd->pub);
8610 if (wl_iw_attach(net, (void *)&dhd->pub) != 0) {
8619 ret = dhd_init_logstrs_array(osh, &dhd->event_data);
8621 dhd_init_static_strs_array(osh, &dhd->event_data, st_str_file_path, map_file_path);
8622 dhd_init_static_strs_array(osh, &dhd->event_data, rom_st_str_file_path,
8629 if (dhd_os_dbg_attach(&dhd->pub)) {
8636 dhd_os_start_logging(&dhd->pub, FW_VERBOSE_RING_NAME, 3, 0, 0, 0);
8640 dhd->pub.dbg->pkt_mon_lock = dhd_os_spin_lock_init(dhd->pub.osh);
8642 dhd_os_dbg_attach_pkt_monitor(&dhd->pub);
8648 dhd->pub.statlog = dhd_attach_statlog(&dhd->pub, MAX_STATLOG_ITEM,
8650 if (dhd->pub.statlog == NULL) {
8656 dhd_log_dump_init(&dhd->pub);
8659 dhd_dump_pkt_init(&dhd->pub);
8662 dhd_os_attach_pktlog(&dhd->pub);
8666 dhd->pub.hang_info = MALLOCZ(osh, VENDOR_SEND_HANG_EXT_INFO_LEN);
8667 if (dhd->pub.hang_info == NULL) {
8671 if (dhd_sta_pool_init(&dhd->pub, DHD_MAX_STA) != BCME_OK) {
8677 dhd->tx_wq = alloc_workqueue("bcmdhd-tx-wq", WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
8678 if (!dhd->tx_wq) {
8679 DHD_ERROR(("%s: alloc_workqueue(bcmdhd-tx-wq) failed\n", __FUNCTION__));
8682 dhd->rx_wq = alloc_workqueue("bcmdhd-rx-wq", WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
8683 if (!dhd->rx_wq) {
8684 DHD_ERROR(("%s: alloc_workqueue(bcmdhd-rx-wq) failed\n", __FUNCTION__));
8685 destroy_workqueue(dhd->tx_wq);
8686 dhd->tx_wq = NULL;
8692 init_timer_compat(&dhd->timer, dhd_watchdog, dhd);
8693 dhd->default_wd_interval = dhd_watchdog_ms;
8697 PROC_START(dhd_watchdog_thread, dhd, &dhd->thr_wdt_ctl, 0, "dhd_watchdog_thread");
8698 if (dhd->thr_wdt_ctl.thr_pid < 0) {
8703 dhd->thr_wdt_ctl.thr_pid = -1;
8708 init_timer_compat(&dhd->rpm_timer, dhd_runtimepm, dhd);
8709 dhd->rpm_timer_valid = FALSE;
8711 dhd->thr_rpm_ctl.thr_pid = DHD_PID_KT_INVALID;
8712 PROC_START(dhd_rpm_state_thread, dhd, &dhd->thr_rpm_ctl, 0, "dhd_rpm_state_thread");
8713 if (dhd->thr_rpm_ctl.thr_pid < 0) {
8719 skb_queue_head_init(&dhd->evt_trace_queue);
8722 dhd_dbg_ring_proc_create(&dhd->pub);
8728 PROC_START(dhd_dpc_thread, dhd, &dhd->thr_dpc_ctl, 0, "dhd_dpc");
8729 if (dhd->thr_dpc_ctl.thr_pid < 0) {
8734 tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd);
8735 dhd->thr_dpc_ctl.thr_pid = -1;
8738 if (dhd->rxthread_enabled) {
8739 bzero(&dhd->pub.skbbuf[0], sizeof(void *) * MAXSKBPEND);
8741 PROC_START(dhd_rxf_thread, dhd, &dhd->thr_rxf_ctl, 0, "dhd_rxf");
8742 if (dhd->thr_rxf_ctl.thr_pid < 0) {
8752 dhd->pm_notifier.notifier_call = dhd_pm_callback;
8753 dhd->pm_notifier.priority = 10;
8754 register_pm_notifier(&dhd->pm_notifier);
8760 dhd->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 20;
8761 dhd->early_suspend.suspend = dhd_early_suspend;
8762 dhd->early_suspend.resume = dhd_late_resume;
8763 register_early_suspend(&dhd->early_suspend);
8768 dhd->pend_ipaddr = 0;
8781 dhd->dhd_deferred_wq = dhd_deferred_work_init((void *)dhd);
8783 INIT_WORK(&dhd->dhd_hang_process_work, dhd_hang_process);
8786 dhd->new_freq = alloc_percpu(int);
8787 dhd->freq_trans.notifier_call = dhd_cpufreq_notifier;
8788 cpufreq_register_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
8792 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_DELAYTX);
8794 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_HOLD);
8796 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
8804 register_page_corrupt_cb(dhd_page_corrupt_cb, &dhd->pub);
8810 DHD_LB_STATS_INIT(&dhd->pub);
8817 /* Register the call backs to CPU Hotplug sub-system */
8826 dhd->cpu_notifier.notifier_call = NULL;
8834 atomic_set(&dhd->lb_txp_active, 1);
8837 atomic_set(&dhd->lb_txp_active, 0);
8843 atomic_set(&dhd->lb_rxp_active, 1);
8848 tasklet_init(&dhd->tx_compl_tasklet,
8849 dhd_lb_tx_compl_handler, (ulong)(&dhd->pub));
8850 INIT_WORK(&dhd->tx_compl_dispatcher_work, dhd_tx_compl_dispatcher_fn);
8854 tasklet_init(&dhd->rx_compl_tasklet,
8855 dhd_lb_rx_compl_handler, (ulong)(&dhd->pub));
8856 INIT_WORK(&dhd->rx_compl_dispatcher_work, dhd_rx_compl_dispatcher_fn);
8861 __skb_queue_head_init(&dhd->rx_pend_queue);
8862 skb_queue_head_init(&dhd->rx_napi_queue);
8864 INIT_WORK(&dhd->rx_napi_dispatcher_work, dhd_rx_napi_dispatcher_fn);
8869 INIT_WORK(&dhd->tx_dispatcher_work, dhd_tx_dispatcher_work);
8870 skb_queue_head_init(&dhd->tx_pend_queue);
8872 tasklet_init(&dhd->tx_tasklet,
8881 INIT_WORK(&dhd->axi_error_dispatcher_work, dhd_axi_error_dispatcher_fn);
8885 dhd->pub.extended_trap_data = MALLOCZ(osh, BCMPCIE_EXT_TRAP_DATA_MAXLEN);
8886 if (dhd->pub.extended_trap_data == NULL) {
8890 dhd->pub.axi_err_dump = MALLOCZ(osh, sizeof(dhd_axi_error_dump_t));
8891 if (dhd->pub.axi_err_dump == NULL) {
8903 DHD_SSSR_MEMPOOL_INIT(&dhd->pub);
8907 if (DHD_EDL_MEM_INIT(&dhd->pub) != BCME_OK) {
8917 dhd->pub.nfct = dhd_ct_open(&dhd->pub, NFNL_SUBSYS_CTNETLINK | NFNL_SUBSYS_CTNETLINK_EXP,
8922 dhd->dhd_state = dhd_state;
8927 dhd->pub.dump_file_manage =
8928 (dhd_dump_file_manage_t *)MALLOCZ(dhd->pub.osh, sizeof(dhd_dump_file_manage_t));
8929 if (unlikely(!dhd->pub.dump_file_manage)) {
8930 DHD_ERROR(("%s(): could not allocate memory for - "
8937 dhd->pub.memdump_enabled = DUMP_DISABLED;
8939 dhd->pub.memdump_enabled = DUMP_MEMFILE_BUGON;
8941 dhd->pub.memdump_enabled = DUMP_MEMFILE;
8944 dhd_get_memdump_info(&dhd->pub);
8949 pom_handler = &dhd->pub.pom_wlan_handler;
8950 pom_handler->func_id = WLAN_FUNC_ID;
8951 pom_handler->handler = (void *)g_dhd_pub;
8952 pom_handler->power_off = dhd_wlan_power_off_handler;
8953 pom_handler->power_on = dhd_wlan_power_on_handler;
8955 dhd->pub.pom_func_register = NULL;
8956 dhd->pub.pom_func_deregister = NULL;
8957 dhd->pub.pom_toggle_reg_on = NULL;
8959 dhd->pub.pom_func_register = symbol_get(pom_func_register);
8960 dhd->pub.pom_func_deregister = symbol_get(pom_func_deregister);
8961 dhd->pub.pom_toggle_reg_on = symbol_get(pom_toggle_reg_on);
8967 if (!dhd->pub.pom_func_register ||
8968 !dhd->pub.pom_func_deregister ||
8969 !dhd->pub.pom_toggle_reg_on) {
8975 dhd->pub.pom_func_register(pom_handler);
8976 dhd->pub.enable_erpom = TRUE;
8980 return &dhd->pub;
8984 DHD_TRACE(("%s: Calling dhd_detach dhd_state 0x%x &dhd->pub %p\n",
8985 __FUNCTION__, dhd_state, &dhd->pub));
8986 dhd->dhd_state = dhd_state;
8987 dhd_detach(&dhd->pub);
8988 dhd_free(&dhd->pub);
8997 if (strstr(dhdinfo->fw_path, "_apsta") != NULL)
8999 if (strstr(dhdinfo->fw_path, "_p2p") != NULL)
9001 if (strstr(dhdinfo->fw_path, "_ibss") != NULL)
9003 if (strstr(dhdinfo->fw_path, "_mfg") != NULL)
9011 return dhd_get_fw_mode(dhdp->info);
9079 chip_id = dhd_bus_chip_id(&dhdinfo->pub);
9080 chip_rev = dhd_bus_chiprev_id(&dhdinfo->pub);
9081 chip_module = dhd_bus_chipmodule_id(&dhdinfo->pub);
9113 snprintf(fw_name, (fw_name_size - 1), "%s/%s", DHD_FIRMWARE_DIR_PATH, entry->fw_name);
9117 snprintf(nv_name, (nv_name_size - 1), "%s/%s", DHD_FIRMWARE_DIR_PATH, entry->nv_name);
9134 if ((entry == NULL) || (entry->clm_name == NULL)) {
9139 … snprintf(clm_name, (clm_name_size - 1), "%s/%s", DHD_FIRMWARE_DIR_PATH, entry->clm_name);
9159 wifi_adapter_info_t *adapter = dhdinfo->adapter;
9160 int fw_path_len = sizeof(dhdinfo->fw_path);
9161 int nv_path_len = sizeof(dhdinfo->nv_path);
9173 * time. When it changes we need to copy it to dhdinfo->fw_path. Also Android private
9174 * command may change dhdinfo->fw_path. As such we need to clear the path info in
9179 /* set default firmware and nvram path for built-in type driver */
9190 if (dhdinfo->fw_path[0] == '\0') {
9191 if (adapter && adapter->fw_path && adapter->fw_path[0] != '\0')
9192 fw = adapter->fw_path;
9194 if (dhdinfo->nv_path[0] == '\0') {
9195 if (adapter && adapter->nv_path && adapter->nv_path[0] != '\0')
9196 nv = adapter->nv_path;
9201 * TODO: need a solution for multi-chip, can't use the same firmware for all chips
9245 DHD_ERROR(("fw path len exceeds max len of dhdinfo->fw_path\n"));
9248 strncpy(dhdinfo->fw_path, fw, fw_path_len);
9249 if (dhdinfo->fw_path[fw_len-1] == '\n')
9250 dhdinfo->fw_path[fw_len-1] = '\0';
9255 DHD_ERROR(("nvram path len exceeds max len of dhdinfo->nv_path\n"));
9258 memset(dhdinfo->nv_path, 0, nv_path_len);
9259 strncpy(dhdinfo->nv_path, nv, nv_path_len);
9260 dhdinfo->nv_path[nv_len] = '\0';
9266 char *sp_nvram = strnstr(dhdinfo->nv_path, nvram_tag, nv_path_len);
9268 strlen(ext_tag) - dhdinfo->nv_path) <= nv_path_len);
9270 char *sp = sp_nvram + strlen(nvram_tag) - 1;
9271 uint32 padding_size = (uint32)(dhdinfo->nv_path +
9272 nv_path_len - sp);
9274 strncat(dhdinfo->nv_path, ext_tag, strlen(ext_tag));
9275 nv_len = strlen(dhdinfo->nv_path);
9277 __FUNCTION__, dhdinfo->nv_path));
9284 " nvram path = %s\n", __FUNCTION__, dhdinfo->nv_path));
9288 if (dhdinfo->nv_path[nv_len-1] == '\n')
9289 dhdinfo->nv_path[nv_len-1] = '\0';
9294 if (uc_len >= sizeof(dhdinfo->uc_path)) {
9295 DHD_ERROR(("uc path len exceeds max len of dhdinfo->uc_path\n"));
9298 strncpy(dhdinfo->uc_path, uc, sizeof(dhdinfo->uc_path));
9299 if (dhdinfo->uc_path[uc_len-1] == '\n')
9300 dhdinfo->uc_path[uc_len-1] = '\0';
9307 strncpy(firmware_path, fw, (MOD_PARAM_PATHLEN - 1));
9311 strncpy(nvram_path, nv, (MOD_PARAM_PATHLEN - 1));
9323 DHD_ERROR(("ucode path: %s\n", dhdinfo->uc_path));
9327 if (dhdinfo->fw_path[0] == '\0') {
9331 if (dhdinfo->nv_path[0] == '\0') {
9344 wifi_adapter_info_t *adapter = dhdinfo->adapter;
9350 * time. When it changes we need to copy it to dhdinfo->btfw_path. Also Android private
9351 * command may change dhdinfo->btfw_path. As such we need to clear the path info in
9356 /* set default firmware and nvram path for built-in type driver */
9364 if (dhdinfo->btfw_path[0] == '\0') {
9365 if (adapter && adapter->btfw_path && adapter->btfw_path[0] != '\0')
9366 fw = adapter->btfw_path;
9376 if (fw_len >= sizeof(dhdinfo->btfw_path)) {
9377 DHD_ERROR(("fw path len exceeds max len of dhdinfo->btfw_path\n"));
9380 strncpy(dhdinfo->btfw_path, fw, sizeof(dhdinfo->btfw_path));
9381 if (dhdinfo->btfw_path[fw_len-1] == '\n')
9382 dhdinfo->btfw_path[fw_len-1] = '\0';
9388 if (dhdinfo->btfw_path[0] == '\0') {
9472 int ret = -1;
9474 dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
9477 if (dhd->pub.busstate == DHD_BUS_DATA && dhd_update_btfw_path(dhd, btfw_path)) {
9478 DHD_INFO(("%s: download btfw from: %s\n", __FUNCTION__, dhd->btfw_path));
9479 ret = dhd_bus_download_btfw(dhd->pub.bus, dhd->pub.osh, dhd->btfw_path);
9482 __FUNCTION__, dhd->btfw_path));
9493 int ret = -1;
9494 dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
9503 dhdp->dongle_trap_occured = 0;
9506 dhdp->sssr_dump_collected = 0;
9508 dhdp->iovar_timeout_occured = 0;
9510 dhdp->d3ack_timeout_occured = 0;
9511 dhdp->livelock_occured = 0;
9512 dhdp->pktid_audit_failed = 0;
9514 dhd->pub.iface_op_failed = 0;
9515 dhd->pub.scan_timeout_occurred = 0;
9516 dhd->pub.scan_busy_occurred = 0;
9518 dhd->pub.dhd_induce_error = DHD_INDUCE_ERROR_CLEAR;
9523 dhdp->event_log_max_sets = NUM_EVENT_LOG_SETS;
9524 dhdp->event_log_max_sets_queried = FALSE;
9525 dhdp->smmu_fault_occurred = 0;
9527 dhdp->axi_error = FALSE;
9532 if (dhd->pub.busstate == DHD_BUS_DOWN && dhd_update_fw_nv_path(dhd)) {
9534 dhd->pub.fw_download_status = FW_DOWNLOAD_IN_PROGRESS;
9535 DHD_INFO(("%s download fw %s, nv %s\n", __FUNCTION__, dhd->fw_path, dhd->nv_path));
9539 ret = dhd_bus_download_firmware(dhd->pub.bus, dhd->pub.osh,
9540 dhd->fw_path, dhd->nv_path);
9546 __FUNCTION__, dhd->fw_path));
9551 dhd->pub.fw_download_status = FW_DOWNLOAD_DONE;
9553 if (dhd->pub.busstate != DHD_BUS_LOAD) {
9555 return -ENETDOWN;
9563 dhd->pub.tickcnt = 0;
9564 dhd_os_wd_timer(&dhd->pub, dhd_watchdog_ms);
9567 if ((ret = dhd_bus_init(&dhd->pub, FALSE)) != 0) {
9577 DHD_ENABLE_RUNTIME_PM(&dhd->pub);
9587 DHD_GENERAL_LOCK(&dhd->pub, flags);
9588 dhd->wd_timer_valid = FALSE;
9589 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
9590 del_timer_sync(&dhd->timer);
9593 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
9596 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
9597 return -ENODEV;
9604 dhd_enable_oob_intr(dhd->pub.bus, TRUE);
9610 uint32 max_h2d_rings = dhd_bus_max_h2d_queues(dhd->pub.bus);
9614 if ((ret = dhd_flow_rings_init(&dhd->pub, max_h2d_rings)) != BCME_OK) {
9625 ret = dhd_prot_init(&dhd->pub);
9628 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
9633 if (dhd->pub.busstate != DHD_BUS_DATA) {
9634 DHD_GENERAL_LOCK(&dhd->pub, flags);
9635 dhd->wd_timer_valid = FALSE;
9636 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
9637 del_timer_sync(&dhd->timer);
9639 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
9644 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
9645 return -ENODEV;
9656 if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0) {
9657 DHD_GENERAL_LOCK(&dhd->pub, flags);
9658 dhd->wd_timer_valid = FALSE;
9659 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
9660 del_timer_sync(&dhd->timer);
9662 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
9676 (fw_download_end - fw_download_start) + (f2_sync_end - f2_sync_start)));
9680 if (dhd->pend_ipaddr) {
9682 aoe_update_host_ipv4_table(&dhd->pub, dhd->pend_ipaddr, TRUE, 0);
9684 dhd->pend_ipaddr = 0;
9708 if (dhd->tdls_enable == tdls_on)
9715 dhd->tdls_enable = tdls_on;
9767 ret = _dhd_tdls_enable(&dhd->pub, tdls_on, auto_on, mac);
9806 dhd->tdls_mode = mode;
9813 tdls_peer_node_t *cur = dhd_pub->peer_tbl.node;
9815 int ifindex = dhd_ifname2idx(dhd_pub->info, event->ifname);
9816 uint8 *da = (uint8 *)&event->addr.octet[0];
9818 uint32 reason = ntoh32(event->reason);
9839 if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
9844 cur = cur->next;
9847 new = MALLOC(dhd_pub->osh, sizeof(tdls_peer_node_t));
9852 memcpy(new->addr, da, ETHER_ADDR_LEN);
9853 DHD_TDLS_LOCK(&dhdp->tdls_lock, flags);
9854 new->next = dhd_pub->peer_tbl.node;
9855 dhd_pub->peer_tbl.node = new;
9856 dhd_pub->peer_tbl.tdls_peer_count++;
9857 DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags);
9861 if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
9863 DHD_TDLS_LOCK(&dhdp->tdls_lock, flags);
9865 prev->next = cur->next;
9867 dhd_pub->peer_tbl.node = cur->next;
9868 MFREE(dhd_pub->osh, cur, sizeof(tdls_peer_node_t));
9869 dhd_pub->peer_tbl.tdls_peer_count--;
9870 DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags);
9874 cur = cur->next;
9888 if (dhd->op_mode & DHD_FLAG_CONCURR_MULTI_CHAN_MODE)
9890 else if ((dhd->op_mode & DHD_FLAG_CONCURR_SINGLE_CHAN_MODE) ==
9908 /* if dhd->op_mode is already set for HOSTAP and Manufacturing
9911 if (dhd->op_mode & (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))
10025 dhd->op_mode |= DHD_FLAG_IBSS_MODE;
10041 DHD_ERROR(("%s - not associated\n", __FUNCTION__));
10048 return -ENODEV;
10054 return -EINVAL;
10058 memcpy(bssid.octet, profile->bssid, ETHER_ADDR_LEN);
10080 iov_buf = MALLOC(dhd->osh, len);
10082 DHD_ERROR(("%s - failed to allocate %d bytes for iov_buf\n", __FUNCTION__, len));
10087 iov_buf->version = WL_ADPS_IOV_VER;
10088 iov_buf->len = sizeof(*data);
10089 iov_buf->id = WL_ADPS_IOV_MODE;
10091 data = (wl_adps_params_v1_t *)iov_buf->data;
10092 data->version = ADPS_SUB_IOV_VERSION_1;
10093 data->length = sizeof(*data);
10094 data->mode = on;
10097 data->band = i;
10121 MFREE(dhd->osh, iov_buf, len);
10309 dhd->apf_set = FALSE;
10312 dhd->suspend_bcn_li_dtim = CUSTOM_SUSPEND_BCN_LI_DTIM;
10314 dhd->max_dtim_enable = TRUE;
10316 dhd->max_dtim_enable = FALSE;
10318 dhd->disable_dtim_in_suspend = FALSE;
10320 dhd->ocl_off = FALSE;
10323 dhd->tid_mode = SET_TID_OFF;
10324 dhd->target_uid = 0;
10325 dhd->target_tid = 0;
10328 dhd->op_mode = 0;
10332 dhd->dhd_cflags &= ~WLAN_PLAT_AP_FLAG;
10357 fw_version[FW_VER_STR_LEN-1] = '\0';
10372 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
10374 dhd->op_mode = DHD_FLAG_MFG_MODE;
10377 pm_runtime_disable(dhd_bus_to_dev(dhd->bus));
10410 ret = dhd_iovar(dhd, 0, "axierror_logbuf_addr", NULL, 0, (char *)&dhd->axierror_logbuf_addr,
10411 sizeof(dhd->axierror_logbuf_addr), FALSE);
10414 dhd->axierror_logbuf_addr = 0;
10417 dhd->axierror_logbuf_addr));
10433 ret = wifi_platform_get_mac_addr(dhd->info->adapter, ea_addr.octet);
10442 memcpy(dhd->mac.octet, ea_addr.octet, ETHER_ADDR_LEN);
10453 memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN);
10461 dhd_map_clm_path_by_chip(dhd->info, clm_path, MOD_PARAM_PATHLEN);
10472 uint32 cap_buf_size = sizeof(dhd->fw_capabilities);
10473 memset(dhd->fw_capabilities, 0, cap_buf_size);
10474 ret = dhd_iovar(dhd, 0, "cap", NULL, 0, dhd->fw_capabilities, (cap_buf_size - 1),
10482 memmove(&dhd->fw_capabilities[1], dhd->fw_capabilities, (cap_buf_size - 1));
10483 dhd->fw_capabilities[0] = ' ';
10484 dhd->fw_capabilities[cap_buf_size - 2] = ' ';
10485 dhd->fw_capabilities[cap_buf_size - 1] = '\0';
10501 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_HOSTAP_MODE) ||
10506 dhd->op_mode = DHD_FLAG_HOSTAP_MODE;
10528 memcpy(dhd->mac.octet, iovbuf, ETHER_ADDR_LEN);
10543 dhd->dhd_cflags |= WLAN_PLAT_AP_FLAG | WLAN_PLAT_NODFS_FLAG;
10545 } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
10553 dhd->op_mode = DHD_FLAG_MFG_MODE;
10573 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_P2P_MODE) ||
10581 dhd->op_mode = DHD_FLAG_P2P_MODE;
10582 } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_IBSS_MODE) ||
10584 dhd->op_mode = DHD_FLAG_IBSS_MODE;
10586 dhd->op_mode = DHD_FLAG_STA_MODE;
10588 if (dhd->op_mode != DHD_FLAG_IBSS_MODE &&
10593 dhd->op_mode |= concurrent_mode;
10597 if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
10609 memcpy(&p2p_ea, &dhd->mac, ETHER_ADDR_LEN);
10644 dhd->op_mode, MAC2STRDBG(dhd->mac.octet)));
10647 if (!dhd->is_blob)
10652 get_customized_country_code(dhd->info->adapter, dhd->dhd_cspec.country_abbrev,
10653 &dhd->dhd_cspec, dhd->dhd_cflags);
10655 get_customized_country_code(dhd->info->adapter, dhd->dhd_cspec.country_abbrev,
10656 &dhd->dhd_cspec);
10662 if (dhd->op_mode == DHD_FLAG_HOSTAP_MODE)
10663 dhd->info->rxthread_enabled = FALSE;
10665 dhd->info->rxthread_enabled = TRUE;
10668 if (dhd->dhd_cspec.ccode[0] != 0) {
10669 ret = dhd_iovar(dhd, 0, "country", (char *)&dhd->dhd_cspec, sizeof(wl_country_t),
10687 /* Disable built-in roaming to allowed ext supplicant to take care of roaming */
10712 dhd->roam_env_detection = TRUE;
10714 dhd->roam_env_detection = FALSE;
10734 dhd->tdls_enable = FALSE;
10751 DHD_ERROR(("%s Set lpc ret --> %d\n", __FUNCTION__, ret));
10757 if (dhd->op_mode & DHD_FLAG_STA_MODE) {
10820 /* Set Keep Alive : be sure to use FW with -keepalive */
10826 if (!(dhd->op_mode &
10852 dhd_map_clm_path_by_chip(dhd->info, clm_path, MOD_PARAM_PATHLEN);
10862 if (!(dhd->op_mode &
10871 memset(dhd->fw_capabilities, 0, sizeof(dhd->fw_capabilities));
10872 ret = dhd_iovar(dhd, 0, "cap", NULL, 0, dhd->fw_capabilities, sizeof(dhd->fw_capabilities),
10884 dhd->event_log_max_sets = event_log_max_sets;
10886 dhd->event_log_max_sets = NUM_EVENT_LOG_SETS;
10892 dhd->event_log_max_sets_queried = TRUE;
10894 __FUNCTION__, dhd->event_log_max_sets, ret));
10925 if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
10949 iov_buf = (char*)MALLOC(dhd->osh, WLC_IOCTL_SMLEN);
10972 if (dhd->op_mode == DHD_FLAG_IBSS_MODE)
11022 /* Read 4-way handshake requirements */
11027 * in-dongle supplicant.
11030 dhd->fw_4way_handshake = TRUE;
11031 DHD_TRACE(("4-way handshake mode is: %d\n", dhd->fw_4way_handshake));
11065 DHD_ERROR(("%s vht_features set. ret --> %d\n", __FUNCTION__, ret));
11127 if (!(dhd->op_mode & DHD_FLAG_IBSS_MODE))
11182 if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
11228 eventmask_msg = (eventmsgs_ext_t*)MALLOC(dhd->osh, msglen);
11235 eventmask_msg->ver = EVENTMSGS_VER;
11236 eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
11245 setbit(eventmask_msg->mask, WLC_E_RSSI_LQM);
11248 setbit(eventmask_msg->mask, WLC_E_PFN_GSCAN_FULL_RESULT);
11249 setbit(eventmask_msg->mask, WLC_E_PFN_SCAN_COMPLETE);
11250 setbit(eventmask_msg->mask, WLC_E_PFN_SSID_EXT);
11251 setbit(eventmask_msg->mask, WLC_E_ROAM_EXP_EVENT);
11253 setbit(eventmask_msg->mask, WLC_E_RSSI_LQM);
11255 setbit(eventmask_msg->mask, WLC_E_BT_WIFI_HANDOVER_REQ);
11258 setbit(eventmask_msg->mask, WLC_E_ROAM_PREP);
11261 setbit(eventmask_msg->mask, WLC_E_ULP);
11264 setbit(eventmask_msg->mask, WLC_E_NATOE_NFCT);
11267 setbit(eventmask_msg->mask, WLC_E_SLOTTED_BSS_PEER_OP);
11270 setbit(eventmask_msg->mask, WLC_E_MBO);
11273 setbit(eventmask_msg->mask, WLC_E_BCNRECV_ABORTED);
11276 setbit(eventmask_msg->mask, WLC_E_ADDTS_IND);
11277 setbit(eventmask_msg->mask, WLC_E_DELTS_IND);
11280 setbit(eventmask_msg->mask, WLC_E_BSS_LOAD);
11283 setbit(eventmask_msg->mask, WLC_E_EXT_AUTH_REQ);
11284 setbit(eventmask_msg->mask, WLC_E_EXT_AUTH_FRAME_RX);
11286 setbit(eventmask_msg->mask, WLC_E_MGMT_FRAME_TXSTATUS);
11287 setbit(eventmask_msg->mask, WLC_E_MGMT_FRAME_OFF_CHAN_COMPLETE);
11291 // setbit(eventmask_msg->mask, WLC_E_IND_DOS_STATUS);
11294 setbit(eventmask_msg->mask, WLC_E_LDF_HOGGER);
11298 eventmask_msg->ver = EVENTMSGS_VER;
11299 eventmask_msg->command = EVENTMSGS_SET_MASK;
11300 eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
11319 el_tag = (wl_el_tag_params_t *)MALLOC(dhd->osh, sizeof(wl_el_tag_params_t));
11326 el_tag->tag = EVENT_LOG_TAG_4WAYHANDSHAKE;
11327 el_tag->set = 1;
11328 el_tag->flags = EVENT_LOG_TAG_FLAG_LOG;
11359 dhd->pktfilter_count = 6;
11360 dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = NULL;
11362 dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = NULL;
11363 dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
11366 dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = DISCARD_IPV4_MCAST;
11367 dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = DISCARD_IPV6_MCAST;
11370 dhd->pktfilter[DHD_ARP_FILTER_NUM] = "105 0 0 12 0xFFFF 0x0806";
11374 dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 "
11380 dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0x01 0x00";
11384 dhd->pktfilter[DHD_MDNS_FILTER_NUM] = "104 0 0 0 0xFFFFFF 0x01005E";
11387 dhd->pktfilter[DHD_MDNS_FILTER_NUM] = NULL;
11391 dhd->pktfilter[DHD_IP4BCAST_DROP_FILTER_NUM] = DISCARD_IPV4_BCAST;
11393 dhd->pktfilter[DHD_LLC_STP_DROP_FILTER_NUM] = DISCARD_LLC_STP;
11395 dhd->pktfilter[DHD_LLC_XID_DROP_FILTER_NUM] = DISCARD_LLC_XID;
11396 dhd->pktfilter_count = 10;
11400 dhd->pktfilter_count = 4;
11403 dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0xffffff 0xffffff";
11405 dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = "102 0 0 36 0xffffffff 0x11940009";
11407 dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = "104 0 0 38 0xffffffff 0x11940009";
11408 dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
11454 MIN(strlen(ver_temp_buf) + 1, CLM_VER_STR_LEN - 1));
11470 CLM_VER_STR_LEN - 1, "%s, Blob ver = Major : %s minor : ",
11502 sec_save_wlinfo(fw_version, EPI_VERSION_STR, dhd->info->nv_path, clm_version);
11512 dhd->wlc_ver_major = wlc_ver.wlc_ver_major;
11513 dhd->wlc_ver_minor = wlc_ver.wlc_ver_minor;
11562 DHD_ERROR(("%s wl ampdu_hostreorder. ret --> %d\n", __FUNCTION__, ret2));
11581 /* For FD we need all the packets at DHD to handle intra-BSS forwarding */
11591 if (!dhd->pno_state) {
11596 if (!dhd->rtt_state) {
11605 if (!(dhd->op_mode & (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE)))
11613 dhd->ndo_enable = FALSE;
11614 dhd->ndo_host_ip_overflow = FALSE;
11615 dhd->ndo_max_host_ip = NDO_MAX_HOST_IP_ENTRIES;
11619 dhd->ndo_version = dhd_ndo_get_version(dhd);
11620 if (dhd->ndo_version > 0) {
11621 DHD_INFO(("%s: ndo version %d\n", __FUNCTION__, dhd->ndo_version));
11633 dhd->wbtext_support = FALSE;
11638 dhd->wbtext_policy = wnm_bsstrans_resp;
11639 if (dhd->wbtext_policy == WL_BSSTRANS_POLICY_PRODUCT_WBTEXT) {
11640 dhd->wbtext_support = TRUE;
11644 if (dhd->wbtext_support) {
11655 if (dhd->op_mode == DHD_FLAG_MFG_MODE &&
11684 if (dhd_get_preserve_log_numbers(dhd, &dhd->logset_prsrv_mask)
11698 dhd->monitor_enable = TRUE;
11701 dhd->monitor_enable = FALSE;
11707 dhd->sroam_turn_on = TRUE;
11708 dhd->sroamed = FALSE;
11714 MFREE(dhd->osh, eventmask_msg, msglen);
11718 MFREE(dhd->osh, iov_buf, WLC_IOCTL_SMLEN);
11723 MFREE(dhd->osh, el_tag, sizeof(wl_el_tag_params_t));
11753 buf = MALLOCZ(pub->osh, input_len);
11781 buf = MALLOCZ(pub->osh, input_len);
11821 MFREE(pub->osh, buf, input_len);
11859 struct dhd_info *dhd = dhdp->info;
11862 ASSERT(dhd && dhd->iflist[ifidx]);
11863 dev = dhd->iflist[ifidx]->net;
11867 DHD_ERROR(("%s: Must be down to change its MTU", dev->name));
11879 dev->mtu = new_mtu;
11952 if (!ifa || !(ifa->ifa_dev->dev))
11956 if ((ifa->ifa_dev->dev->netdev_ops != &dhd_ops_pri) &&
11957 (ifa->ifa_dev->dev->netdev_ops != &dhd_ops_virt)) {
11959 if (!wl_cfgp2p_is_ifops(ifa->ifa_dev->dev->netdev_ops))
11964 dhd = DHD_DEV_INFO(ifa->ifa_dev->dev);
11968 dhd_pub = &dhd->pub;
11970 if (dhd_pub->arp_version == 1) {
11974 if (dhd->iflist[idx] && dhd->iflist[idx]->net == ifa->ifa_dev->dev)
11978 DHD_TRACE(("ifidx : %p %s %d\n", dhd->iflist[idx]->net,
11979 dhd->iflist[idx]->name, dhd->iflist[idx]->idx));
11981 DHD_ERROR(("Cannot find ifidx for(%s) set to 0\n", ifa->ifa_label));
11989 __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
11995 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(&dhd->pub) ||
11996 dhd->pub.busstate == DHD_BUS_LOAD) {
11998 __FUNCTION__, dhd->pub.busstate));
11999 if (dhd->pend_ipaddr) {
12001 __FUNCTION__, dhd->pend_ipaddr));
12003 dhd->pend_ipaddr = ifa->ifa_address;
12010 aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, TRUE, idx);
12016 __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
12017 dhd->pend_ipaddr = 0;
12021 if ((dhd_pub->op_mode & DHD_FLAG_HOSTAP_MODE) ||
12022 (ifa->ifa_dev->dev != dhd_linux_get_primary_netdev(dhd_pub))) {
12023 aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, FALSE, idx);
12027 dhd_aoe_hostip_clr(&dhd->pub, idx);
12028 dhd_aoe_arp_clr(&dhd->pub, idx);
12034 __func__, ifa->ifa_label, event));
12055 dhdp = &dhd->pub;
12067 switch (ndo_work->event) {
12077 if (dhdp->ndo_version > 0) {
12079 ret = dhd_ndo_add_ip_with_type(dhdp, &ndo_work->ipv6_addr[0],
12080 WL_ND_IPV6_ADDR_TYPE_UNICAST, ndo_work->if_idx);
12082 ret = dhd_ndo_add_ip(dhdp, &ndo_work->ipv6_addr[0],
12083 ndo_work->if_idx);
12091 if (dhdp->ndo_version > 0) {
12094 &ndo_work->ipv6_addr[0], ndo_work->if_idx);
12097 ret = dhd_ndo_remove_ip(dhdp, ndo_work->if_idx);
12105 if (dhdp->ndo_host_ip_overflow) {
12107 dhd_idx2net(dhdp, ndo_work->if_idx));
12152 if (inet6_ifa->idev->dev->netdev_ops != &dhd_ops_pri) {
12156 dhd = DHD_DEV_INFO(inet6_ifa->idev->dev);
12160 dhdp = &dhd->pub;
12163 idx = dhd_net2idx(dhd, inet6_ifa->idev->dev);
12180 ndo_info->event = event;
12181 ndo_info->if_idx = idx;
12182 memcpy(ndo_info->ipv6_addr, &inet6_ifa->addr, IPV6_ADDR_LEN);
12185 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)ndo_info, DHD_WQ_WORK_IPV6_NDO,
12217 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
12225 if (dhd == NULL || dhd->iflist[ifidx] == NULL) {
12230 ASSERT(dhd && dhd->iflist[ifidx]);
12231 ifp = dhd->iflist[ifidx];
12232 net = ifp->net;
12233 ASSERT(net && (ifp->idx == ifidx));
12235 ASSERT(!net->netdev_ops);
12236 net->netdev_ops = &dhd_ops_virt;
12243 net->netdev_ops = &dhd_ops_pri;
12244 if (!ETHER_ISNULLADDR(dhd->pub.mac.octet))
12245 memcpy(temp_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
12246 memcpy(dhd->iflist[0]->mac_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
12251 memcpy(temp_addr, ifp->mac_addr, ETHER_ADDR_LEN);
12258 if (!memcmp(temp_addr, dhd->iflist[0]->mac_addr,
12261 __func__, net->name));
12263 memcpy(dhd->iflist[ifidx]->mac_addr, temp_addr, ETHER_ADDR_LEN);
12268 net->hard_header_len = ETH_HLEN + dhd->pub.hdrlen;
12269 net->ethtool_ops = &dhd_ethtool_ops;
12273 net->get_wireless_stats = dhd_get_wireless_stats;
12276 net->wireless_handlers = &wl_iw_handler_def;
12280 dhd->pub.rxsz = DBUS_RX_BUFFER_SIZE_DHD(net);
12282 memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN);
12293 DHD_ERROR(("couldn't register the net device [%s], err %d\n", net->name, err));
12297 printf("Register interface [%s] MAC: "MACDBG"\n\n", net->name,
12299 MAC2STRDBG(dhd->pub.mac.octet));
12301 MAC2STRDBG(net->dev_addr));
12320 __skb_queue_purge(&dhd->rx_pend_queue);
12324 skb_queue_purge(&dhd->tx_pend_queue);
12339 wifi_platform_set_power(dhdp->info->adapter, FALSE, WIFI_TURNOFF_DELAY);
12341 dhd->bus_user_count--;
12350 net->netdev_ops = NULL;
12359 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
12364 ifp = dhd->iflist[0];
12365 net = ifp->net;
12373 viface_name[IFNAMSIZ - 1] = 0;
12375 ch = viface_name[len - 1];
12376 if ((ch > '9' || ch < '0') && (len < IFNAMSIZ - 2))
12405 dhd = (dhd_info_t *)dhdp->info;
12412 if (dhd->pub.busstate != DHD_BUS_DOWN) {
12414 dhd_prot_stop(&dhd->pub);
12417 dhd_bus_stop(dhd->pub.bus, TRUE);
12439 dhd = (dhd_info_t *)dhdp->info;
12443 dev = dhd->iflist[0]->net;
12447 if (dev->flags & IFF_UP) {
12459 DHD_TRACE(("%s: Enter state 0x%x\n", __FUNCTION__, dhd->dhd_state));
12462 dhd->pub.up = 0;
12463 if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) {
12470 dhd_free_wet_info(&dhd->pub, dhd->pub.wet_info);
12477 if (dhd->pub.wlfc_thread) {
12478 kthread_stop(dhd->pub.wlfc_thread);
12479 dhdp->wlfc_thread_go = TRUE;
12480 wake_up_interruptible(&dhdp->wlfc_wqhead);
12482 dhd->pub.wlfc_thread = NULL;
12491 if (dhd->dhd_state & DHD_ATTACH_STATE_PROT_ATTACH) {
12500 if (dhd_wifi_platdata && !dhdp->dongle_reset) {
12501 dhdpcie_bus_clock_stop(dhdp->bus);
12502 wifi_platform_set_power(dhd_wifi_platdata->adapters,
12510 if (dhdp->prot)
12529 if (dhd->dhd_state & DHD_ATTACH_STATE_EARLYSUSPEND_DONE) {
12530 if (dhd->early_suspend.suspend)
12531 unregister_early_suspend(&dhd->early_suspend);
12536 if (dhd->dhd_state & DHD_ATTACH_STATE_WL_ATTACH) {
12543 dhd_ulp_deinit(dhd->pub.osh, dhdp);
12547 if (dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) {
12554 if (dhd->iflist[i]) {
12555 dhd_remove_if(&dhd->pub, i, TRUE);
12561 ifp = dhd->iflist[0];
12562 if (ifp && ifp->net) {
12565 cfg = wl_get_cfg(ifp->net);
12567 /* in unregister_netdev case, the interface gets freed by net->destructor
12570 if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
12571 free_netdev(ifp->net);
12577 custom_rps_map_clear(ifp->net->_rx);
12579 netif_tx_disable(ifp->net);
12580 unregister_netdev(ifp->net);
12583 ifp->net = DHD_NET_DEV_NULL;
12585 ifp->net = NULL;
12590 if (dhdp->prot)
12595 bcm_l2_filter_arp_table_update(dhdp->osh, ifp->phnd_arp_table, TRUE,
12596 NULL, FALSE, dhdp->tickcnt);
12597 deinit_l2_filter_arp_table(dhdp->osh, ifp->phnd_arp_table);
12598 ifp->phnd_arp_table = NULL;
12603 MFREE(dhd->pub.osh, ifp, sizeof(*ifp));
12604 dhd->iflist[0] = NULL;
12609 DHD_GENERAL_LOCK(&dhd->pub, flags);
12610 timer_valid = dhd->wd_timer_valid;
12611 dhd->wd_timer_valid = FALSE;
12612 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
12614 del_timer_sync(&dhd->timer);
12615 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
12617 if (dhd->dhd_state & DHD_ATTACH_STATE_THREADS_CREATED) {
12619 if (dhd->thr_rpm_ctl.thr_pid >= 0) {
12620 PROC_STOP(&dhd->thr_rpm_ctl);
12623 if (dhd->thr_wdt_ctl.thr_pid >= 0) {
12624 PROC_STOP(&dhd->thr_wdt_ctl);
12627 if (dhd->rxthread_enabled && dhd->thr_rxf_ctl.thr_pid >= 0) {
12628 PROC_STOP(&dhd->thr_rxf_ctl);
12631 if (dhd->thr_dpc_ctl.thr_pid >= 0) {
12632 PROC_STOP(&dhd->thr_dpc_ctl);
12635 tasklet_kill(&dhd->tasklet);
12640 if (dhd->pub.nfct) {
12641 dhd_ct_close(dhd->pub.nfct);
12646 if (dhd->dhd_state & DHD_ATTACH_STATE_LB_ATTACH_DONE) {
12648 dhd->dhd_state &= ~DHD_ATTACH_STATE_LB_ATTACH_DONE;
12652 cancel_work_sync(&dhd->rx_napi_dispatcher_work);
12653 __skb_queue_purge(&dhd->rx_pend_queue);
12656 cancel_work_sync(&dhd->tx_dispatcher_work);
12657 tasklet_kill(&dhd->tx_tasklet);
12658 __skb_queue_purge(&dhd->tx_pend_queue);
12661 cancel_work_sync(&dhd->tx_compl_dispatcher_work);
12662 tasklet_kill(&dhd->tx_compl_tasklet);
12665 tasklet_kill(&dhd->rx_compl_tasklet);
12672 DHD_LB_STATS_DEINIT(&dhd->pub);
12677 cancel_work_sync(&dhd->axi_error_dispatcher_work);
12680 DHD_SSSR_MEMPOOL_DEINIT(&dhd->pub);
12683 if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) {
12695 destroy_workqueue(dhd->tx_wq);
12696 dhd->tx_wq = NULL;
12697 destroy_workqueue(dhd->rx_wq);
12698 dhd->rx_wq = NULL;
12701 if (dhdp->dbg) {
12704 dhd_os_spin_lock_deinit(dhd->pub.osh, dhd->pub.dbg->pkt_mon_lock);
12708 if (dhdp->dbg) {
12721 if (dhd->pub.hang_info) {
12722 MFREE(dhd->pub.osh, dhd->pub.hang_info, VENDOR_SEND_HANG_EXT_INFO_LEN);
12733 dhd_dbg_ring_proc_destroy(&dhd->pub);
12735 if (dhd->dhd_state & DHD_ATTACH_LOGTRACE_INIT) {
12736 if (dhd->event_data.fmts) {
12737 MFREE(dhd->pub.osh, dhd->event_data.fmts,
12738 dhd->event_data.fmts_size);
12739 dhd->event_data.fmts = NULL;
12741 if (dhd->event_data.raw_fmts) {
12742 MFREE(dhd->pub.osh, dhd->event_data.raw_fmts,
12743 dhd->event_data.raw_fmts_size);
12744 dhd->event_data.raw_fmts = NULL;
12746 if (dhd->event_data.raw_sstr) {
12747 MFREE(dhd->pub.osh, dhd->event_data.raw_sstr,
12748 dhd->event_data.raw_sstr_size);
12749 dhd->event_data.raw_sstr = NULL;
12751 if (dhd->event_data.rom_raw_sstr) {
12752 MFREE(dhd->pub.osh, dhd->event_data.rom_raw_sstr,
12753 dhd->event_data.rom_raw_sstr_size);
12754 dhd->event_data.rom_raw_sstr = NULL;
12756 dhd->dhd_state &= ~DHD_ATTACH_LOGTRACE_INIT;
12760 if (dhdp->pno_state)
12764 if (dhdp->rtt_state) {
12770 unregister_pm_notifier(&dhd->pm_notifier);
12776 if (dhd->new_freq)
12777 free_percpu(dhd->new_freq);
12778 dhd->new_freq = NULL;
12779 cpufreq_unregister_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
12782 dhd->wakelock_wd_counter = 0;
12783 wake_lock_destroy(&dhd->wl_wdwake);
12785 if (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) {
12786 DHD_TRACE(("wd wakelock count:%d\n", dhd->wakelock_wd_counter));
12792 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
12797 if (dhdp->prot)
12806 dhd_iov_li_delete(dhdp, &(dhdp->dump_iovlist_head));
12810 dhd_mw_list_delete(dhdp, &(dhdp->mw_list_head));
12817 if (dhdp->enable_erpom) {
12818 dhdp->pom_func_deregister(&dhdp->pom_wlan_handler);
12822 cancel_work_sync(&dhd->dhd_hang_process_work);
12824 /* Prefer adding de-init code above this comment unless necessary.
12827 dhd_deferred_work_deinit(dhd->dhd_deferred_wq);
12828 dhd->dhd_deferred_wq = NULL;
12832 dhd_log_dump_deinit(&dhd->pub);
12835 if (dhdp->extended_trap_data)
12837 MFREE(dhdp->osh, dhdp->extended_trap_data, BCMPCIE_EXT_TRAP_DATA_MAXLEN);
12838 dhdp->extended_trap_data = NULL;
12841 if (dhdp->axi_err_dump)
12843 MFREE(dhdp->osh, dhdp->axi_err_dump, sizeof(dhd_axi_error_dump_t));
12844 dhdp->axi_err_dump = NULL;
12850 if (dhd->pub.dump_file_manage) {
12851 MFREE(dhd->pub.osh, dhd->pub.dump_file_manage,
12856 dhd->pub.fw_download_status = FW_UNLOADED;
12859 mutex_destroy(&dhd->bus_user_lock);
12872 for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
12873 if (dhdp->reorder_bufs[i]) {
12877 ptr = dhdp->reorder_bufs[i];
12879 buf_size += ((ptr->max_idx + 1) * sizeof(void*));
12881 i, ptr->max_idx, buf_size));
12883 MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
12884 dhdp->reorder_bufs[i] = NULL;
12890 dhd = (dhd_info_t *)dhdp->info;
12891 if (dhdp->soc_ram) {
12893 DHD_OS_PREFREE(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
12895 MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length);
12897 dhdp->soc_ram = NULL;
12904 MFREE(dhd->pub.osh, dhd, sizeof(*dhd));
12921 for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
12922 if (dhdp->reorder_bufs[i]) {
12926 ptr = dhdp->reorder_bufs[i];
12928 buf_size += ((ptr->max_idx + 1) * sizeof(void*));
12930 i, ptr->max_idx, buf_size));
12932 MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
12933 dhdp->reorder_bufs[i] = NULL;
12939 if (dhdp->soc_ram) {
12941 DHD_OS_PREFREE(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
12943 MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length);
12945 dhdp->soc_ram = NULL;
12993 fw_bak_path[MOD_PARAM_PATHLEN-1] = '\0';
12998 nv_bak_path[MOD_PARAM_PATHLEN-1] = '\0';
13010 firmware_path[MOD_PARAM_PATHLEN-1] = '\0';
13012 nvram_path[MOD_PARAM_PATHLEN-1] = '\0';
13014 } while (retry--);
13077 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
13082 down(&dhd->proto_sem);
13094 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
13097 up(&dhd->proto_sem);
13107 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
13110 mutex_lock(&dhd->dhd_iovar_mutex);
13117 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
13120 mutex_unlock(&dhd->dhd_iovar_mutex);
13132 dhd = (dhd_info_t *)(pub->info);
13135 mutex_lock(&dhd->logdump_lock);
13147 dhd = (dhd_info_t *)(pub->info);
13150 mutex_unlock(&dhd->logdump_lock);
13191 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
13199 timeout = wait_event_timeout(dhd->ioctl_resp_wait, (*condition), timeout);
13209 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
13211 wake_up(&dhd->ioctl_resp_wait);
13218 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
13226 timeout = wait_event_timeout(dhd->d3ack_wait, (*condition), timeout);
13236 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
13238 wake_up(&dhd->d3ack_wait);
13245 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
13255 timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait, !(*condition), timeout);
13268 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
13274 timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait, (*var == condition), timeout);
13288 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
13294 timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait,
13304 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
13310 ret = wait_event_timeout(dhd->dmaxfer_wait, (*condition), timeout);
13320 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
13322 wake_up(&dhd->dmaxfer_wait);
13331 wake_up(&dhd->tx_completion_wait);
13338 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
13341 wake_up(&dhd->dhd_bus_busy_state_wait);
13349 dhd_info_t *dhd = (dhd_info_t *)pub->info;
13354 dhd_os_wd_timer(bus, dhd->default_wd_interval);
13361 dhd_info_t *dhd = (dhd_info_t *)pub->info;
13374 if (pub->busstate == DHD_BUS_DOWN) {
13385 if (!wdtick && dhd->wd_timer_valid == TRUE) {
13386 dhd->wd_timer_valid = FALSE;
13388 del_timer_sync(&dhd->timer);
13401 mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
13402 dhd->wd_timer_valid = TRUE;
13412 dhd_info_t *dhd = (dhd_info_t *)pub->info;
13430 /* If tick is non-zero, the request is to start the timer */
13433 if (dhd->rpm_timer_valid == FALSE) {
13434 mod_timer(&dhd->rpm_timer, jiffies + msecs_to_jiffies(dhd_runtimepm_ms));
13435 dhd->rpm_timer_valid = TRUE;
13441 if (dhd->rpm_timer_valid == TRUE) {
13442 dhd->rpm_timer_valid = FALSE;
13444 del_timer_sync(&dhd->rpm_timer);
13477 if (!S_ISREG(file_inode(fp)->i_mode)) {
13508 rdlen = compat_kernel_read(fp, fp->f_pos, buf, MIN(len, size));
13511 return -EIO;
13515 fp->f_pos += rdlen;
13533 rd_len = compat_kernel_read(fp, fp->f_pos, str, len);
13538 str_len = (uint)(str_end - str);
13541 fp->f_pos += str_len + 1;
13542 bzero(str_end, rd_len - str_len);
13576 dhd = (dhd_info_t *)(pub->info);
13579 down(&dhd->sdsem);
13581 spin_lock_bh(&dhd->sdlock);
13589 dhd = (dhd_info_t *)(pub->info);
13592 up(&dhd->sdsem);
13594 spin_unlock_bh(&dhd->sdlock);
13602 dhd = (dhd_info_t *)(pub->info);
13603 spin_lock_bh(&dhd->txqlock);
13611 dhd = (dhd_info_t *)(pub->info);
13612 spin_unlock_bh(&dhd->txqlock);
13630 dhd = (dhd_info_t *)(pub->info);
13631 spin_lock_bh(&dhd->rxf_lock);
13640 dhd = (dhd_info_t *)(pub->info);
13641 spin_unlock_bh(&dhd->rxf_lock);
13651 dhd = (dhd_info_t *)(pub->info);
13655 spin_lock_bh(&dhd->tcpack_lock);
13657 spin_lock_irqsave(&dhd->tcpack_lock, flags);
13673 dhd = (dhd_info_t *)(pub->info);
13677 spin_unlock_bh(&dhd->tcpack_lock);
13679 spin_unlock_irqrestore(&dhd->tcpack_lock, flags);
13690 buf = (uint8*)wifi_platform_prealloc(dhdpub->info->adapter, section, size);
13708 if (!dhd->pub.up) {
13712 res = wl_iw_get_wireless_stats(dev, &dhd->iw.wstats);
13715 return &dhd->iw.wstats;
13732 bcmerror = wl_process_host_event(&dhd->pub, &ifidx, pktdata, pktlen, event, data,
13733 &dhd->event_data);
13735 bcmerror = wl_process_host_event(&dhd->pub, &ifidx, pktdata, pktlen, event, data,
13742 if (ntoh32(event->event_type) == WLC_E_IF) {
13751 if (event->bsscfgidx == 0) {
13755 ASSERT(dhd->iflist[ifidx] != NULL);
13756 ASSERT(dhd->iflist[ifidx]->net != NULL);
13758 if (dhd->iflist[ifidx]->net) {
13759 wl_iw_event(dhd->iflist[ifidx]->net, event, *data);
13765 if (dhd->iflist[ifidx]->net) {
13766 spin_lock_irqsave(&dhd->pub.up_lock, flags);
13767 if (dhd->pub.up) {
13768 wl_cfg80211_event(dhd->iflist[ifidx]->net, event, *data);
13770 spin_unlock_irqrestore(&dhd->pub.up_lock, flags);
13781 switch (ntoh32(event->event_type)) {
13802 dhd = dhdp->info;
13804 if ((p = PKTGET(dhdp->osh, pktlen, FALSE))) {
13805 ASSERT(ISALIGNED((uintptr)PKTDATA(dhdp->osh, p), sizeof(uint32)));
13807 bcopy(&dhdp->mac, ð.ether_dhost, ETHER_ADDR_LEN);
13808 bcopy(&dhdp->mac, ð.ether_shost, ETHER_ADDR_LEN);
13812 bcopy((void *)ð, PKTDATA(dhdp->osh, p), sizeof(eth));
13813 bcopy(data, PKTDATA(dhdp->osh, p) + sizeof(eth), data_len);
13814 skb = PKTTONATIVE(dhdp->osh, p);
13815 skb_data = skb->data;
13816 len = skb->len;
13819 ifp = dhd->iflist[ifidx];
13821 ifp = dhd->iflist[0];
13824 skb->dev = ifp->net;
13825 skb->protocol = eth_type_trans(skb, skb->dev);
13826 skb->data = skb_data;
13827 skb->len = len;
13850 struct dhd_info *dhdinfo = dhd->info;
13855 wait_event_timeout(dhdinfo->ctrl_wait, (*lockvar == FALSE), timeout);
13864 struct dhd_info *dhdinfo = dhd->info;
13865 if (waitqueue_active(&dhdinfo->ctrl_wait))
13866 wake_up(&dhdinfo->ctrl_wait);
13880 if (pm_runtime_get_sync(dhd_bus_to_dev(dhd->pub.bus)) < 0)
13886 if (dhd_wl_ioctl_cmd(&dhd->pub, WLC_DOWN, NULL, 0, TRUE, 0) < 0) {
13890 if (dhd->pub.wlfc_enabled) {
13891 dhd_wlfc_deinit(&dhd->pub);
13895 if (dhd->pub.pno_state) {
13896 dhd_pno_deinit(&dhd->pub);
13900 if (dhd->pub.rtt_state) {
13901 dhd_rtt_deinit(&dhd->pub);
13906 dhd_os_dbg_detach_pkt_monitor(&dhd->pub);
13914 dhd_bus_update_fw_nv_path(dhd->pub.bus,
13915 dhd->fw_path, dhd->nv_path);
13919 ret = dhd_bus_devreset(&dhd->pub, flag);
13922 pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd->pub.bus));
13923 pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd->pub.bus));
13928 dhd->pub.dongle_trap_occured = 0;
13929 dhd->pub.iovar_timeout_occured = 0;
13931 dhd->pub.d3ack_timeout_occured = 0;
13932 dhd->pub.livelock_occured = 0;
13933 dhd->pub.pktid_audit_failed = 0;
13935 dhd->pub.iface_op_failed = 0;
13936 dhd->pub.scan_timeout_occurred = 0;
13937 dhd->pub.scan_busy_occurred = 0;
13938 dhd->pub.smmu_fault_occurred = 0;
13953 return dhd_bus_suspend(&dhd->pub);
13960 return dhd_bus_resume(&dhd->pub, stage);
13972 ret = dhd->pub.suspend_disable_flag;
13973 dhd->pub.suspend_disable_flag = val;
13987 ret = dhd_set_suspend(val, &dhd->pub);
14005 dhd->pub.suspend_bcn_li_dtim = val;
14019 dhd->pub.max_dtim_enable = TRUE;
14021 dhd->pub.max_dtim_enable = FALSE;
14024 return -1;
14039 dhd->pub.disable_dtim_in_suspend = TRUE;
14041 dhd->pub.disable_dtim_in_suspend = FALSE;
14044 return -1;
14071 if (num >= dhd->pub.pktfilter_count) {
14072 return -EINVAL;
14075 ret = dhd_packet_filter_add_remove(&dhd->pub, add_remove, num);
14086 /* Packet filtering is set only if we still in early-suspend and
14088 * We can always turn it OFF in case of early-suspend, but we turn it
14091 if (dhdp && dhdp->up) {
14092 if (dhdp->in_suspend) {
14093 if (!val || (val && !dhdp->suspend_disable_flag))
14106 return dhd_os_enable_packet_filter(&dhd->pub, val);
14116 if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0)
14127 dhd_pub_t *dhd = (&ptr->pub);
14145 if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE)
14264 if (dhd->pub.dhd_cflags & WLAN_PLAT_NODFS_FLAG) {
14267 dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG;
14269 if (!(dhd->pub.dhd_cflags & WLAN_PLAT_NODFS_FLAG)) {
14272 dhd->pub.dhd_cflags &= ~WLAN_PLAT_NODFS_FLAG;
14274 dhd->pub.force_country_change = TRUE;
14283 dhd_pub_t *dhdp = &dhd->pub;
14288 dhdp->ndo_enable = TRUE;
14298 dhdp->ndo_enable = FALSE;
14315 #pragma GCC diagnostic ignored "-Wcast-qual"
14325 read_lock_bh(&inet6->lock);
14328 list_for_each_entry(ifa, &inet6->addr_list, if_list) {
14329 if ((ifa->flags & IFA_F_DADFAILED) == 0) {
14335 acaddr = inet6->ac_list;
14338 acaddr = acaddr->aca_next;
14342 read_unlock_bh(&inet6->lock);
14361 * unicast addr in inet6_dev->addr_list
14362 * anycast addr in inet6_dev->ac_list
14368 inet6 = dev->ip6_ptr;
14379 dhdp = &dhd->pub;
14392 if (cnt > dhdp->ndo_max_host_ip) {
14393 if (!dhdp->ndo_host_ip_overflow) {
14394 dhdp->ndo_host_ip_overflow = TRUE;
14409 ipv6_addr = (struct in6_addr *)MALLOC(dhdp->osh,
14410 sizeof(struct in6_addr) * dhdp->ndo_max_host_ip);
14418 read_lock_bh(&inet6->lock);
14419 list_for_each_entry(ifa, &inet6->addr_list, if_list) {
14421 if ((ifa->flags & IFA_F_DADFAILED) &&
14422 (cnt < dhdp->ndo_max_host_ip)) {
14423 memcpy(&ipv6_addr[cnt], &ifa->addr, sizeof(struct in6_addr));
14427 read_unlock_bh(&inet6->lock);
14448 if (dhdp->ndo_host_ip_overflow) {
14451 read_lock_bh(&inet6->lock);
14452 list_for_each_entry(ifa, &inet6->addr_list, if_list) {
14454 if (!(ifa->flags & IFA_F_DADFAILED) &&
14455 (cnt < dhdp->ndo_max_host_ip)) {
14456 memcpy(&ipv6_addr[cnt], &ifa->addr,
14461 read_unlock_bh(&inet6->lock);
14475 read_lock_bh(&inet6->lock);
14476 acaddr = inet6->ac_list;
14478 if (cnt < dhdp->ndo_max_host_ip) {
14479 memcpy(&ipv6_addr[cnt], &acaddr->aca_addr, sizeof(struct in6_addr));
14482 acaddr = acaddr->aca_next;
14484 read_unlock_bh(&inet6->lock);
14496 if (dhdp->ndo_host_ip_overflow) {
14497 dhdp->ndo_host_ip_overflow = FALSE;
14498 if (dhdp->in_suspend) {
14507 MFREE(dhdp->osh, ipv6_addr, sizeof(struct in6_addr) * dhdp->ndo_max_host_ip);
14524 return (dhd_pno_stop_for_ssid(&dhd->pub));
14533 return (dhd_pno_set_for_ssid(&dhd->pub, ssids_local, nssid, scan_fr,
14543 return (dhd_pno_enable(&dhd->pub, enable));
14552 return (dhd_pno_set_for_hotlist(&dhd->pub, p_pfn_bssid, hotlist_params));
14559 return (dhd_pno_stop_for_batch(&dhd->pub));
14566 return (dhd_pno_set_for_batch(&dhd->pub, batch_params));
14573 return (dhd_pno_get_for_batch(&dhd->pub, buf, bufsize, PNO_STATUS_NORMAL));
14584 return (dhd_is_legacy_pno_enabled(&dhd->pub));
14594 return dhd_pno_set_epno(&dhd->pub);
14603 return dhd_pno_flush_fw_epno(&dhd->pub);
14613 return (dhd_pno_set_cfg_gscan(&dhd->pub, type, buf, flush));
14622 return (dhd_wait_batch_results_complete(&dhd->pub));
14631 return (dhd_pno_lock_batch_results(&dhd->pub));
14639 return (dhd_pno_unlock_batch_results(&dhd->pub));
14648 return (dhd_pno_initiate_gscan_request(&dhd->pub, run, flush));
14657 return (dhd_pno_enable_full_scan_result(&dhd->pub, real_time_flag));
14667 return (dhd_handle_hotlist_scan_evt(&dhd->pub, data, send_evt_bytes, type, buf_len));
14677 return (dhd_process_full_gscan_result(&dhd->pub, data, len, send_evt_bytes));
14685 dhd_gscan_hotlist_cache_cleanup(&dhd->pub, type);
14695 return (dhd_gscan_batch_cache_cleanup(&dhd->pub));
14704 return (dhd_retreive_batch_scan_results(&dhd->pub));
14712 return (dhd_pno_process_epno_result(&dhd->pub, data, event, send_evt_bytes));
14728 roam_param->a_band_boost_threshold, roam_param->a_band_penalty_threshold));
14730 roam_param->a_band_boost_factor, roam_param->a_band_penalty_factor,
14731 roam_param->cur_bssid_boost));
14733 roam_param->alert_roam_trigger_threshold, roam_param->a_band_max_boost));
14738 if (dhd->pub.lazy_roam_enable) {
14741 err = dhd_iovar(&dhd->pub, 0, "roam_exp_params",
14763 err = dhd_iovar(&dhd->pub, 0, "roam_exp_params",
14769 dhd->pub.lazy_roam_enable = (enable != 0);
14781 bssid_pref->version = BSSID_PREF_LIST_VERSION;
14783 bssid_pref->flags = (flush && !bssid_pref->count) ? ROAM_EXP_CLEAR_BSSID_PREF: 0;
14785 if (bssid_pref->count) {
14786 len += (bssid_pref->count - 1) * sizeof(wl_bssid_pref_list_t);
14788 err = dhd_iovar(&dhd->pub, 0, "roam_exp_bssid_pref",
14806 err = dhd_wl_ioctl_cmd(&(dhd->pub), WLC_SET_MACLIST, (char *)blacklist,
14815 err = dhd_wl_ioctl_cmd(&(dhd->pub), WLC_SET_MACMODE, (char *)&macmode,
14833 ssid_whitelist->ssid_count = 0;
14839 ssid_whitelist->version = SSID_WHITELIST_VERSION;
14840 ssid_whitelist->flags = flush ? ROAM_EXP_CLEAR_SSID_WHITELIST : 0;
14841 err = dhd_iovar(&dhd->pub, 0, "roam_exp_ssid_whitelist", (char *)ssid_whitelist, len, NULL,
14858 return (dhd_pno_get_gscan(&dhd->pub, type, info, len));
14876 err = dhd_iovar(&dhd->pub, 0, "rssi_monitor", (char *)&rssi_monitor, sizeof(rssi_monitor),
14892 err = dhd_tcpack_suppress_set(&dhd->pub, enable);
14904 dhd_pub_t *dhdp = &dhd->pub;
14915 uint8 *rand_mac_oui = dhdp->rand_mac_oui;
14917 DHD_ERROR(("Random MAC OUI to be used - "MACOUIDBG"\n",
14928 uint8 *rand_mac_oui = dhd->rand_mac_oui;
14939 DHD_ERROR(("Setting rand mac oui to FW - "MACOUIDBG"\n",
14956 return (dhd_rtt_set_cfg(&dhd->pub, buf));
14964 return (dhd_rtt_stop(&dhd->pub, mac_list, mac_cnt));
14972 return (dhd_rtt_register_noti_callback(&dhd->pub, ctx, noti_fn));
14980 return (dhd_rtt_unregister_noti_callback(&dhd->pub, noti_fn));
14988 return (dhd_rtt_capability(&dhd->pub, capa));
14995 return (dhd_rtt_avail_channel(&dhd->pub, channel_info));
15002 return (dhd_rtt_enable_responder(&dhd->pub, channel_info));
15008 return (dhd_rtt_cancel_responder(&dhd->pub));
15045 if ((pbuf = MALLOCZ(dhd_pub->osh, KA_TEMP_BUF_SIZE)) == NULL) {
15051 if ((pmac_frame = MALLOCZ(dhd_pub->osh, KA_FRAME_SIZE)) == NULL) {
15059 * Get current mkeep-alive status.
15069 if (dtoh32(mkeep_alive_pktp->period_msec != 0)) {
15079 mkeep_alive_pktp->keep_alive_id,
15080 dtoh32(mkeep_alive_pktp->period_msec),
15081 dtoh16(mkeep_alive_pktp->len_bytes)));
15083 for (i = 0; i < mkeep_alive_pktp->len_bytes; i++) {
15084 DHD_ERROR(("%02x", mkeep_alive_pktp->data[i]));
15137 memcpy(mkeep_alive_pktp->data, pmac_frame_begin, len_bytes);
15142 * Keep-alive attributes are set in local variable (mkeep_alive_pkt), and
15151 MFREE(dhd_pub->osh, pmac_frame_begin, KA_FRAME_SIZE);
15155 MFREE(dhd_pub->osh, pbuf, KA_TEMP_BUF_SIZE);
15180 * Get current mkeep-alive status. Skip ID 0 which is being used for NULL pkt.
15182 if ((pbuf = MALLOC(dhd_pub->osh, KA_TEMP_BUF_SIZE)) == NULL) {
15200 mkeep_alive_pktp->keep_alive_id,
15201 dtoh32(mkeep_alive_pktp->period_msec),
15202 dtoh16(mkeep_alive_pktp->len_bytes)));
15204 for (i = 0; i < mkeep_alive_pktp->len_bytes; i++) {
15205 DHD_INFO(("%02x", mkeep_alive_pktp->data[i]));
15211 if (dtoh32(mkeep_alive_pktp->period_msec != 0)) {
15229 MFREE(dhd_pub->osh, pbuf, KA_TEMP_BUF_SIZE);
15240 mutex_lock(&dhd->dhd_apf_mutex);
15247 mutex_unlock(&dhd->dhd_apf_mutex);
15256 dhd_pub_t *dhdp = &dhd->pub;
15267 return -ENODEV;
15278 return -EINVAL;
15283 buf = MALLOCZ(dhdp->osh, buf_len);
15286 return -ENOMEM;
15292 pkt_filterp->id = htod32(filter_id);
15293 pkt_filterp->negate_match = htod32(FALSE);
15294 pkt_filterp->type = htod32(WL_PKT_FILTER_TYPE_APF_MATCH);
15296 apf_program = &pkt_filterp->u.apf_program;
15297 apf_program->version = htod16(WL_APF_INTERNAL_VERSION);
15298 apf_program->instr_len = htod16(program_len);
15299 memcpy(apf_program->instrs, program, program_len);
15308 MFREE(dhdp->osh, buf, buf_len);
15318 dhd_pub_t *dhdp = &dhd->pub;
15328 return -ENODEV;
15334 buf = MALLOCZ(dhdp->osh, buf_len);
15337 return -ENOMEM;
15343 pkt_filterp->id = htod32(filter_id);
15344 pkt_filterp->enable = htod32(enable);
15362 MFREE(dhdp->osh, buf, buf_len);
15371 dhd_pub_t *dhdp = &dhd->pub;
15377 return -ENODEV;
15406 dhd_pub_t *dhdp = &dhd->pub;
15423 return -ENODEV;
15440 dhd_pub_t *dhdp = &dhd->pub;
15452 return -ENODEV;
15470 dhd_pub_t *dhdp = &dhd->pub;
15476 if (dhdp->apf_set) {
15481 dhdp->apf_set = FALSE;
15488 dhdp->apf_set = TRUE;
15490 if (dhdp->in_suspend && dhdp->apf_set && !(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
15505 dhd_pub_t *dhdp = &dhd->pub;
15513 if (dhdp->apf_set && (!(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE) &&
15528 dhd_pub_t *dhdp = &dhd->pub;
15533 if (dhdp->apf_set) {
15547 dhd_pub_t *dhdp = &dhd->pub;
15552 if (dhdp->apf_set) {
15555 dhdp->apf_set = FALSE;
15573 /* Ignore compiler warnings due to -Werror=cast-qual */
15576 #pragma GCC diagnostic ignored "-Wcast-qual"
15584 dev = dhd->iflist[0]->net;
15605 ndev = dhd->iflist[i] ? dhd->iflist[i]->net : NULL;
15606 if (ndev && (ndev->flags & IFF_UP)) {
15607 DHD_ERROR(("ndev->name : %s dev close\n",
15608 ndev->name));
15621 link_recovery->hang_reason = HANG_REASON_PCIE_LINK_DOWN_RC_DETECT;
15641 return -EINVAL;
15645 dhd_info = (dhd_info_t *)dhdp->info;
15647 if (dhd_info->scheduled_memdump) {
15649 dhdp->hang_was_pending = 1;
15658 return -ENODEV;
15663 return -EINVAL;
15669 return -ENODEV;
15674 if (dhdp->req_hang_type) {
15676 __FUNCTION__, dhdp->req_hang_type));
15677 dhdp->req_hang_type = 0;
15681 if (!dhdp->hang_was_sent) {
15683 dhdp->hang_counts++;
15684 if (dhdp->hang_counts >= MAX_CONSECUTIVE_HANG_COUNTS) {
15686 __func__, dhdp->hang_counts));
15694 if (!dhdp->info->duart_execute) {
15695 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
15700 dhdp->hang_was_sent = 1;
15702 dhdp->is_bt_recovery_required = TRUE;
15704 schedule_work(&dhdp->info->dhd_hang_process_work);
15717 if (dhd->pub.hang_report) {
15721 ret = dhd_os_send_hang_message(&dhd->pub);
15724 DHD_ERROR(("%s: HANG -> Reset BT\n", __FUNCTION__));
15743 dhdp = &dhd->pub;
15757 dhdp->hang_reason = reason;
15766 return wifi_platform_set_power(dhd->adapter, on, delay_msec);
15773 if (dhd && dhd->pub.up)
15774 return dhd->pub.force_country_change;
15783 if (!dhd->pub.is_blob)
15787 get_customized_country_code(dhd->adapter, country_iso_code, cspec,
15788 dhd->pub.dhd_cflags);
15790 get_customized_country_code(dhd->adapter, country_iso_code, cspec);
15798 strlcpy(cspec->country_abbrev, country_iso_code, WLC_CNTRY_BUF_SZ);
15799 strlcpy(cspec->ccode, country_iso_code, WLC_CNTRY_BUF_SZ);
15814 if (dhd && dhd->pub.up) {
15815 dhd->pub.force_country_change = FALSE;
15816 memcpy(&dhd->pub.dhd_cspec, cspec, sizeof(wl_country_t));
15829 if (dhd && dhd->pub.up) {
15841 return -EINVAL;
15843 strncpy(dhd->fw_path, fw, sizeof(dhd->fw_path) - 1);
15844 dhd->fw_path[sizeof(dhd->fw_path)-1] = '\0';
15874 mutex_lock(&dhd->dhd_net_if_mutex);
15882 mutex_unlock(&dhd->dhd_net_if_mutex);
15889 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
15891 mutex_lock(&dhd->dhd_suspend_mutex);
15898 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
15900 mutex_unlock(&dhd->dhd_suspend_mutex);
15906 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
15910 spin_lock_irqsave(&dhd->dhd_lock, flags);
15917 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
15920 spin_unlock_irqrestore(&dhd->dhd_lock, flags);
15982 return (atomic_read(&dhd->pend_8021x_cnt));
15998 DHD_PERIM_UNLOCK(&dhd->pub);
16000 DHD_PERIM_LOCK(&dhd->pub);
16002 ntimes--;
16008 atomic_set(&dhd->pend_8021x_cnt, 0);
16050 filp_close(fp, current->files);
16179 dhd_pub_t *dhdp = &dhd->pub;
16184 dhd_convert_memdump_type_to_str(dhdp->memdump_type, memdump_type, dhdp->debug_dump_subcmd);
16185 clear_debug_dump_time(dhdp->debug_dump_time_str);
16186 get_debug_dump_time(dhdp->debug_dump_time_str);
16188 DHD_COMMON_DUMP_PATH, fname, memdump_type, dhdp->debug_dump_time_str);
16208 dhd_convert_memdump_type_to_str(dhd->memdump_type, memdump_type, dhd->debug_dump_subcmd);
16209 clear_debug_dump_time(dhd->debug_dump_time_str);
16210 get_debug_dump_time(dhd->debug_dump_time_str);
16213 DHD_COMMON_DUMP_PATH, fname, memdump_type, dhd->debug_dump_time_str);
16217 DHD_COMMON_DUMP_PATH, fname, memdump_type, dhd->debug_dump_time_str);
16221 DHD_COMMON_DUMP_PATH, fname, memdump_type, dhd->debug_dump_time_str);
16226 DHD_COMMON_DUMP_PATH, fname, memdump_type, dhd->debug_dump_time_str);
16230 "/root/", fname, memdump_type, dhd->debug_dump_time_str);
16234 * file instead of caching it. O_TRUNC flag ensures that file will be re-written
16244 "/tmp/", fname, memdump_type, dhd->debug_dump_time_str);
16252 DHD_COMMON_DUMP_PATH, fname, memdump_type, dhd->debug_dump_time_str);
16278 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16282 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
16283 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16284 ret = dhd->wakelock_rx_timeout_enable > dhd->wakelock_ctrl_timeout_enable ?
16285 dhd->wakelock_rx_timeout_enable : dhd->wakelock_ctrl_timeout_enable;
16287 if (dhd->wakelock_rx_timeout_enable)
16288 wake_lock_timeout(&dhd->wl_rxwake,
16289 msecs_to_jiffies(dhd->wakelock_rx_timeout_enable));
16290 if (dhd->wakelock_ctrl_timeout_enable)
16291 wake_lock_timeout(&dhd->wl_ctrlwake,
16292 msecs_to_jiffies(dhd->wakelock_ctrl_timeout_enable));
16294 dhd->wakelock_rx_timeout_enable = 0;
16295 dhd->wakelock_ctrl_timeout_enable = 0;
16296 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16307 ret = dhd_os_wake_lock_timeout(&dhd->pub);
16313 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16316 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
16317 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16318 if (val > dhd->wakelock_rx_timeout_enable)
16319 dhd->wakelock_rx_timeout_enable = val;
16320 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16327 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16330 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
16331 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16332 if (val > dhd->wakelock_ctrl_timeout_enable)
16333 dhd->wakelock_ctrl_timeout_enable = val;
16334 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16341 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16344 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
16345 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16346 dhd->wakelock_ctrl_timeout_enable = 0;
16348 if (wake_lock_active(&dhd->wl_ctrlwake))
16349 wake_unlock(&dhd->wl_ctrlwake);
16351 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16362 ret = dhd_os_wake_lock_rx_timeout_enable(&dhd->pub, val);
16372 ret = dhd_os_wake_lock_ctrl_timeout_enable(&dhd->pub, val);
16417 if (wklock_info->addr == addr) {
16444 wklock_info->counter = dhd->wakelock_counter; \
16446 wklock_info->counter++; \
16453 wklock_info->addr = func_addr; \
16454 wklock_info->lock_type = wklock_type; \
16457 wklock_info->counter = dhd->wakelock_counter; \
16459 wklock_info->counter++; \
16461 HASH_ADD(wklock_history, &wklock_info->wklock_node, func_addr); \
16480 switch (wklock_info->lock_type) {
16483 (void *)wklock_info->addr, wklock_info->counter);
16487 (void *)wklock_info->addr, wklock_info->counter);
16491 (void *)wklock_info->addr, wklock_info->counter);
16495 (void *)wklock_info->addr, wklock_info->counter);
16508 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16515 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16530 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16540 hash_del(&wklock_info->wklock_node);
16542 hlist_del_init(&wklock_info->wklock_node);
16546 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16551 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
16555 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16557 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16566 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16570 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
16571 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16572 if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
16574 wake_lock(&dhd->wl_wifi);
16584 dhd->wakelock_counter++;
16585 ret = dhd->wakelock_counter;
16586 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16594 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16598 wake_lock(&dhd->wl_evtwake);
16609 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16612 wake_lock_timeout(&dhd->wl_pmwake, msecs_to_jiffies(val));
16621 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16624 wake_lock_timeout(&dhd->wl_txflwake, msecs_to_jiffies(val));
16635 ret = dhd_os_wake_lock(&dhd->pub);
16641 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16646 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
16647 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16649 if (dhd->wakelock_counter > 0) {
16650 dhd->wakelock_counter--;
16656 if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
16658 wake_unlock(&dhd->wl_wifi);
16663 ret = dhd->wakelock_counter;
16665 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16672 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16676 wake_unlock(&dhd->wl_evtwake);
16686 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16690 if (wake_lock_active(&dhd->wl_pmwake)) {
16691 wake_unlock(&dhd->wl_pmwake);
16700 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16704 if (wake_lock_active(&dhd->wl_txflwake)) {
16705 wake_unlock(&dhd->wl_txflwake);
16718 dhd = (dhd_info_t *)(pub->info);
16723 if (dhd && (wake_lock_active(&dhd->wl_wifi) ||
16724 (wake_lock_active(&dhd->wl_wdwake))))
16727 if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub))
16747 dhd = (dhd_info_t *)(pub->info);
16754 c = dhd->wakelock_counter;
16755 l1 = wake_lock_active(&dhd->wl_wifi);
16756 l2 = wake_lock_active(&dhd->wl_wdwake);
16757 l3 = wake_lock_active(&dhd->wl_rxwake);
16758 l4 = wake_lock_active(&dhd->wl_ctrlwake);
16759 l7 = wake_lock_active(&dhd->wl_evtwake);
16761 l5 = wake_lock_active(&dhd->wl_intrwake);
16764 l6 = wake_lock_active(&dhd->wl_scanwake);
16766 l8 = wake_lock_active(&dhd->wl_pmwake);
16767 l9 = wake_lock_active(&dhd->wl_txflwake);
16772 DHD_ERROR(("%s wakelock c-%d wl-%d wd-%d rx-%d "
16773 "ctl-%d intr-%d scan-%d evt-%d, pm-%d, txfl-%d\n",
16778 if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub)) {
16791 ret = dhd_os_wake_unlock(&dhd->pub);
16797 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16802 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16803 if (dhd->wakelock_wd_counter == 0 && !dhd->waive_wakelock) {
16806 wake_lock(&dhd->wl_wdwake);
16809 dhd->wakelock_wd_counter++;
16810 ret = dhd->wakelock_wd_counter;
16811 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16818 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16823 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16824 if (dhd->wakelock_wd_counter > 0) {
16825 dhd->wakelock_wd_counter = 0;
16826 if (!dhd->waive_wakelock) {
16828 wake_unlock(&dhd->wl_wdwake);
16832 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16842 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16845 wake_lock_timeout(&dhd->wl_intrwake, msecs_to_jiffies(val));
16854 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16858 if (wake_lock_active(&dhd->wl_intrwake)) {
16859 wake_unlock(&dhd->wl_intrwake);
16871 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16874 wake_lock_timeout(&dhd->wl_scanwake, msecs_to_jiffies(val));
16883 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16887 if (wake_lock_active(&dhd->wl_scanwake)) {
16888 wake_unlock(&dhd->wl_scanwake);
16900 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16904 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
16905 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16908 if (dhd->waive_wakelock == FALSE) {
16915 dhd->wakelock_before_waive = dhd->wakelock_counter;
16916 dhd->waive_wakelock = TRUE;
16918 ret = dhd->wakelock_wd_counter;
16919 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16926 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16932 if ((dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) == 0)
16935 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16938 if (!dhd->waive_wakelock)
16941 dhd->waive_wakelock = FALSE;
16952 if (dhd->wakelock_before_waive == 0 && dhd->wakelock_counter > 0) {
16954 wake_lock(&dhd->wl_wifi);
16956 dhd_bus_dev_pm_stay_awake(&dhd->pub);
16958 } else if (dhd->wakelock_before_waive > 0 && dhd->wakelock_counter == 0) {
16960 wake_unlock(&dhd->wl_wifi);
16962 dhd_bus_dev_pm_relax(&dhd->pub);
16965 dhd->wakelock_before_waive = 0;
16967 ret = dhd->wakelock_wd_counter;
16968 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16975 dhd->wakelock_counter = 0;
16976 dhd->wakelock_rx_timeout_enable = 0;
16977 dhd->wakelock_ctrl_timeout_enable = 0;
16980 wake_lock_init(&dhd->wl_wifi, WAKE_LOCK_SUSPEND, "wlan_wake");
16981 wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake");
16982 wake_lock_init(&dhd->wl_ctrlwake, WAKE_LOCK_SUSPEND, "wlan_ctrl_wake");
16983 wake_lock_init(&dhd->wl_evtwake, WAKE_LOCK_SUSPEND, "wlan_evt_wake");
16984 wake_lock_init(&dhd->wl_pmwake, WAKE_LOCK_SUSPEND, "wlan_pm_wake");
16985 wake_lock_init(&dhd->wl_txflwake, WAKE_LOCK_SUSPEND, "wlan_txfl_wake");
16987 wake_lock_init(&dhd->wl_intrwake, WAKE_LOCK_SUSPEND, "wlan_oob_irq_wake");
16990 wake_lock_init(&dhd->wl_scanwake, WAKE_LOCK_SUSPEND, "wlan_scan_wake");
17002 dhd->wakelock_counter = 0;
17003 dhd->wakelock_rx_timeout_enable = 0;
17004 dhd->wakelock_ctrl_timeout_enable = 0;
17005 wake_lock_destroy(&dhd->wl_wifi);
17006 wake_lock_destroy(&dhd->wl_rxwake);
17007 wake_lock_destroy(&dhd->wl_ctrlwake);
17008 wake_lock_destroy(&dhd->wl_evtwake);
17009 wake_lock_destroy(&dhd->wl_pmwake);
17010 wake_lock_destroy(&dhd->wl_txflwake);
17012 wake_lock_destroy(&dhd->wl_intrwake);
17015 wake_lock_destroy(&dhd->wl_scanwake);
17027 return pub->up;
17042 i = snprintf(&info_string[i], sizeof(info_string) - i,
17056 return -EINVAL;
17061 return -EINVAL;
17066 return -ENODEV;
17069 DHD_OS_WAKE_LOCK(&dhd->pub);
17070 DHD_PERIM_LOCK(&dhd->pub);
17072 ret = dhd_wl_ioctl(&dhd->pub, ifidx, ioc, ioc->buf, ioc->len);
17073 dhd_check_hang(net, &dhd->pub, ret);
17075 DHD_PERIM_UNLOCK(&dhd->pub);
17076 DHD_OS_WAKE_UNLOCK(&dhd->pub);
17088 return -EINVAL;
17097 return dhdp->info->unit;
17112 dhdp = &dhd->pub;
17224 file->private_data = inode->i_private;
17238 return -EINVAL;
17241 if (count > g_dbgfs.size - pos)
17242 count = g_dbgfs.size - pos;
17245 tmp = dhd_readregl(g_dbgfs.dhdp->bus, file->f_pos & (~3));
17249 return -EFAULT;
17251 count -= ret;
17266 return -EINVAL;
17269 if (count > g_dbgfs.size - pos)
17270 count = g_dbgfs.size - pos;
17274 return -EFAULT;
17277 dhd_writeregl(g_dbgfs.dhdp->bus, file->f_pos & (~3), buf);
17285 loff_t pos = -1;
17292 pos = file->f_pos + off;
17295 pos = g_dbgfs.size - off;
17297 return (pos < 0 || pos > g_dbgfs.size) ? -EINVAL : (file->f_pos = pos);
17345 if (!(dhd->chan_isvht80)) {
17346 DHD_ERROR(("%s: chan_status(%d) cpucore!!!\n", __FUNCTION__, dhd->chan_isvht80));
17353 e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
17356 e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
17370 e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
17373 e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
17397 dhd_info_t *dhd = dhdp->info;
17402 ifp = dhd->iflist[idx];
17404 return ifp->mcast_regen_bss_enable;
17410 dhd_info_t *dhd = dhdp->info;
17415 ifp = dhd->iflist[idx];
17417 ifp->mcast_regen_bss_enable = val;
17430 dhd_info_t *dhd = dhdp->info;
17435 ifp = dhd->iflist[idx];
17437 return ifp->ap_isolate;
17443 dhd_info_t *dhd = dhdp->info;
17448 ifp = dhd->iflist[idx];
17451 ifp->ap_isolate = val;
17513 ret = vfs_read(fp, (char *)&dhd->rnd_len, sizeof(dhd->rnd_len), &pos);
17519 dhd->rnd_buf = MALLOCZ(dhd->osh, dhd->rnd_len);
17520 if (!dhd->rnd_buf) {
17525 ret = vfs_read(fp, (char *)dhd->rnd_buf, dhd->rnd_len, &pos);
17538 MFREE(dhd->osh, dhd->rnd_buf, dhd->rnd_len);
17539 dhd->rnd_buf = NULL;
17619 dhd_info = (dhd_info_t *)dhdp->info;
17620 dump = (dhd_dump_t *)MALLOC(dhdp->osh, sizeof(dhd_dump_t));
17625 dump->buf = buf;
17626 dump->bufsize = size;
17628 dhd_get_hscb_info(dhdp, (void*)(&dump->hscb_buf),
17629 (uint32 *)(&dump->hscb_bufsize));
17631 dump->hscb_bufsize = 0;
17642 if (dhdp->memdump_enabled == DUMP_MEMONLY && (!disable_bug_on)) {
17650 (dhdp->memdump_type == DUMP_TYPE_DONGLE_INIT_FAILURE) ||
17653 (dhdp->memdump_type == DUMP_TYPE_DUE_TO_BT) ||
17656 (dhdp->memdump_type == DUMP_TYPE_SMMU_FAULT) ||
17663 dhd_info->scheduled_memdump = FALSE;
17664 (void)dhd_mem_dump((void *)dhdp->info, (void *)dump, 0);
17669 flush_type = MALLOCZ(dhdp->osh,
17674 dhd_log_dump(dhdp->info, flush_type, 0);
17681 dhd_info->scheduled_memdump = TRUE;
17689 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dump,
17701 DHD_ERROR(("%s: ENTER, memdump type %u\n", __FUNCTION__, dhd->pub.memdump_type));
17705 return -ENODEV;
17708 dhdp = &dhd->pub;
17711 return -ENODEV;
17718 ret = -ENODEV;
17724 if (dhdp->sssr_inited && dhdp->collect_sssr) {
17727 dhdp->collect_sssr = FALSE;
17736 ret = -EINVAL;
17748 if (write_dump_to_file(&dhd->pub, dump->buf, dump->bufsize, "mem_dump")) {
17751 dhd->pub.memdump_success = FALSE;
17760 * collect debug_dump as it may be called from non-sleepable context.
17763 if (dhd->scheduled_memdump &&
17764 dhdp->memdump_type != DUMP_TYPE_BY_SYSDUMP) {
17765 log_dump_type_t *flush_type = MALLOCZ(dhdp->osh,
17776 copy_debug_dump_time(dhdp->debug_dump_time_pktlog_str, dhdp->debug_dump_time_str);
17778 clear_debug_dump_time(dhdp->debug_dump_time_str);
17784 if (dhd->scheduled_memdump) {
17792 __FUNCTION__, dhdp->dhd_bus_busy_state));
17794 &dhdp->dhd_bus_busy_state, bitmask, 0);
17797 __FUNCTION__, dhdp->dhd_bus_busy_state));
17802 if (dump->hscb_buf && dump->hscb_bufsize) {
17804 if (write_dump_to_file(&dhd->pub, dump->hscb_buf,
17805 dump->hscb_bufsize, "mem_dump_hscb")) {
17808 dhd->pub.memdump_success = FALSE;
17814 DHD_ERROR(("%s: memdump type %u\n", __FUNCTION__, dhd->pub.memdump_type));
17815 if (dhd->pub.memdump_enabled == DUMP_MEMFILE_BUGON &&
17817 dhd->pub.memdump_type != DUMP_TYPE_BY_SYSDUMP &&
17819 dhd->pub.memdump_type != DUMP_TYPE_BY_USER &&
17821 dhd->pub.memdump_success == TRUE &&
17824 dhd->pub.memdump_type != DUMP_TYPE_DONGLE_HOST_EVENT &&
17826 dhd->pub.memdump_type != DUMP_TYPE_CFG_VENDOR_TRIGGERED) {
17838 DHD_ERROR(("%s: No BUG ON, memdump type %u \n", __FUNCTION__, dhd->pub.memdump_type));
17842 MFREE(dhd->pub.osh, dump, sizeof(dhd_dump_t));
17845 DHD_BUS_BUSY_CLEAR_IN_MEMDUMP(&dhd->pub);
17848 dhd->scheduled_memdump = FALSE;
17850 if (dhdp->hang_was_pending) {
17853 dhdp->hang_was_pending = 0;
17866 dhd_pub_t *dhdp = &dhd_info->pub;
17870 if (dhdp->sssr_reg_info.vasip_regs.vasip_sr_size) {
17871 dig_buf_size = dhdp->sssr_reg_info.vasip_regs.vasip_sr_size;
17872 } else if ((dhdp->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
17873 dhdp->sssr_reg_info.dig_mem_info.dig_sr_size) {
17874 dig_buf_size = dhdp->sssr_reg_info.dig_mem_info.dig_sr_size;
17877 if (dhdp->sssr_dig_buf_before && (dhdp->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
17878 ret = dhd_export_debug_data((char *)dhdp->sssr_dig_buf_before,
17888 dhd_pub_t *dhdp = &dhd_info->pub;
17892 if (dhdp->sssr_reg_info.vasip_regs.vasip_sr_size) {
17893 dig_buf_size = dhdp->sssr_reg_info.vasip_regs.vasip_sr_size;
17894 } else if ((dhdp->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
17895 dhdp->sssr_reg_info.dig_mem_info.dig_sr_size) {
17896 dig_buf_size = dhdp->sssr_reg_info.dig_mem_info.dig_sr_size;
17899 if (dhdp->sssr_dig_buf_after) {
17900 ret = dhd_export_debug_data((char *)dhdp->sssr_dig_buf_after,
17910 dhd_pub_t *dhdp = &dhd_info->pub;
17913 if (dhdp->sssr_d11_before[core] &&
17914 dhdp->sssr_d11_outofreset[core] &&
17915 (dhdp->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
17916 ret = dhd_export_debug_data((char *)dhdp->sssr_d11_before[core],
17926 dhd_pub_t *dhdp = &dhd_info->pub;
17929 if (dhdp->sssr_d11_after[core] &&
17930 dhdp->sssr_d11_outofreset[core]) {
17931 ret = dhd_export_debug_data((char *)dhdp->sssr_d11_after[core],
17955 dhdp = &dhd->pub;
17976 if (dhdp->sssr_d11_before[i] && dhdp->sssr_d11_outofreset[i] &&
17977 (dhdp->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
17978 if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_d11_before[i],
17979 dhdp->sssr_reg_info.mac_regs[i].sr_size, before_sr_dump)) {
17984 if (dhdp->sssr_d11_after[i] && dhdp->sssr_d11_outofreset[i]) {
17985 if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_d11_after[i],
17986 dhdp->sssr_reg_info.mac_regs[i].sr_size, after_sr_dump)) {
17993 if (dhdp->sssr_reg_info.vasip_regs.vasip_sr_size) {
17994 dig_buf_size = dhdp->sssr_reg_info.vasip_regs.vasip_sr_size;
17995 } else if ((dhdp->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
17996 dhdp->sssr_reg_info.dig_mem_info.dig_sr_size) {
17997 dig_buf_size = dhdp->sssr_reg_info.dig_mem_info.dig_sr_size;
18000 if (dhdp->sssr_dig_buf_before && (dhdp->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
18001 if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_dig_buf_before,
18003 DHD_ERROR(("%s: writing SSSR Dig dump before to the file failed\n",
18008 if (dhdp->sssr_dig_buf_after) {
18009 if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_dig_buf_after,
18011 DHD_ERROR(("%s: writing SSSR Dig VASIP dump after to the file failed\n",
18026 dhdp->sssr_dump_mode = dump_mode;
18039 * dhd_mem_dump -> dhd_sssr_dump -> dhd_write_sssr_dump
18040 * Without workqueue -
18043 * With workqueue - all other DUMP_TYPEs : dhd_mem_dump is called in workqueue
18047 dhd_sssr_dump_to_file(dhdp->info);
18066 wl_flush_fw_log_buffer(dhd_linux_get_primary_netdev(&dhd->pub),
18070 * log dump can be scheduled -
18080 dhd_os_logdump_lock(&dhd->pub);
18081 DHD_OS_WAKE_LOCK(&dhd->pub);
18082 if (do_dhd_log_dump(&dhd->pub, type) != BCME_OK) {
18085 DHD_OS_WAKE_UNLOCK(&dhd->pub);
18086 dhd_os_logdump_unlock(&dhd->pub);
18092 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
18102 (dhdp->memdump_enabled == DUMP_MEMONLY) ||
18103 (dhdp->memdump_enabled == DUMP_MEMFILE_BUGON) ||
18105 (dhdp->memdump_type == DUMP_TYPE_SMMU_FAULT)) {
18107 DHD_ERROR(("-------- %s: buf(va)=%llx, buf(pa)=%llx, bufsize=%d\n",
18110 DHD_ERROR(("-------- %s: buf(va)=%x, buf(pa)=%x, bufsize=%d\n",
18130 log_size = (unsigned long)dld_buf->max -
18131 (unsigned long)dld_buf->buffer;
18132 if (dld_buf->wraparound) {
18135 wr_size = (unsigned long)dld_buf->present -
18136 (unsigned long)dld_buf->front;
18141 dhd_print_buf_addr(dhdp, buf_name, dld_buf->buffer, wr_size);
18143 dhd_print_buf_addr(dhdp, buf_name, dld_buf->present, wr_size);
18145 dhd_print_buf_addr(dhdp, buf_name, dld_buf->front, wr_size);
18152 dhdp->ecntr_dbg_ring) {
18154 ring = (dhd_dbg_ring_t *)dhdp->ecntr_dbg_ring;
18156 dhd_print_buf_addr(dhdp, "ecntr_dbg_ring ring_buf", ring->ring_buf,
18162 if (dhdp->statlog) {
18172 dhdp->rtt_dbg_ring) {
18174 ring = (dhd_dbg_ring_t *)dhdp->rtt_dbg_ring;
18176 dhd_print_buf_addr(dhdp, "rtt_dbg_ring ring_buf", ring->ring_buf,
18182 if (dhdp->dongle_trap_occured && dhdp->extended_trap_data) {
18183 dhd_print_buf_addr(dhdp, "extended_trap_data", dhdp->extended_trap_data,
18190 if (dhdp->memdump_type == DUMP_TYPE_DONGLE_HOST_EVENT) {
18191 dhd_print_buf_addr(dhdp, "health_chk_event_data", dhdp->health_chk_event_data,
18197 if (dhdp->concise_dbg_buf) {
18198 dhd_print_buf_addr(dhdp, "concise_dbg_buf", dhdp->concise_dbg_buf,
18223 plen = (unsigned long)end - (unsigned long)bufptr;
18242 spin_lock_irqsave(&dld_buf->lock, flags);
18243 flush_ptr1 = dld_buf->present - tail_len;
18244 if (flush_ptr1 >= dld_buf->front) {
18248 } else if (dld_buf->wraparound) {
18250 flush_ptr1 = dld_buf->front;
18251 len_flush1 = (unsigned long)dld_buf->present - (unsigned long)flush_ptr1;
18252 len_flush2 = (unsigned long)tail_len - len_flush1;
18253 flush_ptr2 = (char *)((unsigned long)dld_buf->max -
18257 flush_ptr1 = dld_buf->front;
18259 len_flush1 = (unsigned long)dld_buf->present - (unsigned long)dld_buf->front;
18261 spin_unlock_irqrestore(&dld_buf->lock, flags);
18282 if (dhd->sssr_d11_before[i] && dhd->sssr_d11_outofreset[i] &&
18283 (dhd->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
18284 arr_len[SSSR_C0_D11_BEFORE] = (dhd->sssr_reg_info.mac_regs[i].sr_size);
18289 dhd->sssr_d11_before[i], arr_len[SSSR_C0_D11_BEFORE]);
18292 if (dhd->sssr_d11_after[i] && dhd->sssr_d11_outofreset[i]) {
18293 arr_len[SSSR_C0_D11_AFTER] = (dhd->sssr_reg_info.mac_regs[i].sr_size);
18298 dhd->sssr_d11_after[i], arr_len[SSSR_C0_D11_AFTER]);
18304 if (dhd->sssr_d11_before[i] && dhd->sssr_d11_outofreset[i] &&
18305 (dhd->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
18306 arr_len[SSSR_C1_D11_BEFORE] = (dhd->sssr_reg_info.mac_regs[i].sr_size);
18311 dhd->sssr_d11_before[i], arr_len[SSSR_C1_D11_BEFORE]);
18314 if (dhd->sssr_d11_after[i] && dhd->sssr_d11_outofreset[i]) {
18315 arr_len[SSSR_C1_D11_AFTER] = (dhd->sssr_reg_info.mac_regs[i].sr_size);
18320 dhd->sssr_d11_after[i], arr_len[SSSR_C1_D11_AFTER]);
18324 if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) {
18325 arr_len[SSSR_DIG_BEFORE] = (dhd->sssr_reg_info.vasip_regs.vasip_sr_size);
18326 arr_len[SSSR_DIG_AFTER] = (dhd->sssr_reg_info.vasip_regs.vasip_sr_size);
18332 if (dhd->sssr_dig_buf_before) {
18334 dhd->sssr_dig_buf_before, arr_len[SSSR_DIG_BEFORE]);
18336 if (dhd->sssr_dig_buf_after) {
18338 dhd->sssr_dig_buf_after, arr_len[SSSR_DIG_AFTER]);
18341 } else if ((dhd->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
18342 dhd->sssr_reg_info.dig_mem_info.dig_sr_addr) {
18343 arr_len[SSSR_DIG_BEFORE] = (dhd->sssr_reg_info.dig_mem_info.dig_sr_size);
18344 arr_len[SSSR_DIG_AFTER] = (dhd->sssr_reg_info.dig_mem_info.dig_sr_size);
18350 if (dhd->sssr_dig_buf_before) {
18352 dhd->sssr_dig_buf_before, arr_len[SSSR_DIG_BEFORE]);
18354 if (dhd->sssr_dig_buf_after) {
18356 dhd->sssr_dig_buf_after, arr_len[SSSR_DIG_AFTER]);
18367 dhd_pub_t *dhdp = &dhd_info->pub;
18369 if (dhdp->sssr_dump_collected) {
18396 dhdp = &dhd_info->pub;
18402 if (dhdp->extended_trap_data) {
18420 dhdp = &dhd_info->pub;
18426 if (dhdp->memdump_type == DUMP_TYPE_DONGLE_HOST_EVENT) {
18444 dhdp = &dhd_info->pub;
18450 if (dhdp->concise_dbg_buf) {
18451 remain_len = dhd_dump(dhdp, (char *)dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
18458 (CONCISE_DUMP_BUFLEN - remain_len));
18471 dhdp = &dhd_info->pub;
18477 if (dhdp->logdump_cookie && dhd_logdump_cookie_count(dhdp) > 0) {
18496 dhdp = &dhd_info->pub;
18502 if (dhdp->concise_dbg_buf) {
18503 remain_len = dhd_dump(dhdp, (char *)dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
18512 length += CONCISE_DUMP_BUFLEN - remain_len;
18543 dhdp = &dhd_info->pub;
18549 if (logdump_ecntr_enable && dhdp->ecntr_dbg_ring) {
18550 ring = (dhd_dbg_ring_t *)dhdp->ecntr_dbg_ring;
18551 length = ring->ring_size + strlen(ECNTRS_LOG_HDR) + sizeof(sec_hdr);
18568 dhdp = &dhd_info->pub;
18574 if (logdump_rtt_enable && dhdp->rtt_dbg_ring) {
18575 ring = (dhd_dbg_ring_t *)dhdp->rtt_dbg_ring;
18576 length = ring->ring_size + strlen(RTT_LOG_HDR) + sizeof(sec_hdr);
18595 dhdp = &dhd_info->pub;
18609 len -= (uint32)strlen(dld_hdrs[type].hdr_str);
18610 len -= (uint32)sizeof(sec_hdr);
18616 ret = dhd_export_debug_data(dld_buf->buffer, fp, user_buf, len, pos);
18635 * we cannot do so, since 'dhdp->osh' is unavailable
18648 dhd_info = (dhd_info_t *)dhdp->info;
18651 if (dhdp->dongle_trap_occured &&
18652 dhdp->extended_trap_data) {
18653 dhdpcie_get_etd_preserve_logs(dhdp, (uint8 *)dhdp->extended_trap_data,
18654 &dhd_info->event_data);
18663 if (dhd_info->pub.dongle_edl_support) {
18668 * wrapped around, only the work items from rd to ring-end are processed.
18703 dhdp = &dhd_info->pub;
18711 switch (dhdp->debug_dump_subcmd) {
18727 if (!dhdp->logdump_periodic_flush) {
18728 get_debug_dump_time(dhdp->debug_dump_time_str);
18730 size - strlen(dump_path),
18731 "_%s", dhdp->debug_dump_time_str);
18747 buf_size = (unsigned long)dld_buf->max -
18748 (unsigned long)dld_buf->buffer;
18750 if (dld_buf->wraparound) {
18754 spin_lock_irqsave(&dld_buf->lock, flags);
18755 wr_size = (unsigned long)dld_buf->present -
18756 (unsigned long)dld_buf->front;
18757 spin_unlock_irqrestore(&dld_buf->lock, flags);
18803 dhdp = &dhd_info->pub;
18811 if (dhdp->memdump_type == DUMP_TYPE_DONGLE_HOST_EVENT) {
18818 len -= (uint32)strlen(HEALTH_CHK_LOG_HDR);
18825 len -= (uint32)sizeof(sec_hdr);
18827 ret = dhd_export_debug_data((char *)dhdp->health_chk_event_data, fp,
18848 dhdp = &dhd_info->pub;
18857 if (dhdp->dongle_trap_occured &&
18858 dhdp->extended_trap_data) {
18865 len -= (uint32)strlen(EXT_TRAP_LOG_HDR);
18872 len -= (uint32)sizeof(sec_hdr);
18874 ret = dhd_export_debug_data((uint8 *)dhdp->extended_trap_data, fp,
18894 dhdp = &dhd_info->pub;
18906 len -= (uint32)strlen(DHD_DUMP_LOG_HDR);
18913 len -= (uint32)sizeof(sec_hdr);
18915 if (dhdp->concise_dbg_buf) {
18916 dhd_dump(dhdp, (char *)dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
18917 ret = dhd_export_debug_data(dhdp->concise_dbg_buf, fp, user_buf, len, pos);
18935 dhdp = &dhd_info->pub;
18941 if (dhdp->logdump_cookie && dhd_logdump_cookie_count(dhdp) > 0) {
18959 dhdp = &dhd_info->pub;
18967 remain_len = dhd_dump(dhdp, (char *)dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
18968 memset(dhdp->concise_dbg_buf, 0, CONCISE_DUMP_BUFLEN);
18977 ret = dhd_export_debug_data(dhdp->concise_dbg_buf, fp, user_buf,
18978 (CONCISE_DUMP_BUFLEN - remain_len), pos);
19009 dhdp = &dhd_info->pub;
19018 dhdp->ecntr_dbg_ring) {
19020 ret = dhd_dump_debug_ring(dhdp, dhdp->ecntr_dbg_ring,
19039 dhdp = &dhd_info->pub;
19047 if (logdump_rtt_enable && dhdp->rtt_dbg_ring) {
19048 ret = dhd_dump_debug_ring(dhdp, dhdp->rtt_dbg_ring,
19065 dhdp = &dhd_info->pub;
19083 dhdp = &dhd_info->pub;
19099 sec_hdr->magic = LOG_DUMP_MAGIC;
19100 sec_hdr->timestamp = local_clock();
19152 if (!dhdp->logdump_periodic_flush || dhdp->last_file_posn == 0)
19164 if (!dhdp->logdump_periodic_flush) {
19166 sizeof(dump_path) - strlen(dump_path),
19167 "_%s", dhdp->debug_dump_time_str);
19184 ret = vfs_getattr(&fp->f_path, &stat, STATX_BASIC_STATS, AT_STATX_SYNC_AS_STAT);
19186 ret = vfs_getattr(&fp->f_path, &stat);
19194 if (dhdp->last_file_posn != 0 &&
19195 stat.size < dhdp->last_file_posn) {
19196 dhdp->last_file_posn = 0;
19199 if (dhdp->logdump_periodic_flush) {
19208 - (unsigned long)g_dld_buf[i].buffer;
19211 log_size += (unsigned long)g_dld_buf[i].present -
19221 ret = generic_file_llseek(fp, dhdp->last_file_posn, SEEK_CUR);
19226 pos = fp->f_pos;
19231 fspace_remain = logdump_max_filesize - pos;
19233 fp->f_pos -= pos;
19234 pos = fp->f_pos;
19255 dhdp->ecntr_dbg_ring) {
19256 dhd_log_dump_ring_to_file(dhdp, dhdp->ecntr_dbg_ring,
19263 if (dhdp->statlog) {
19279 dhdp->rtt_dbg_ring) {
19280 dhd_log_dump_ring_to_file(dhdp, dhdp->rtt_dbg_ring,
19322 if (dhdp->logdump_periodic_flush) {
19324 dhdp->last_file_posn = pos;
19330 DHD_ERROR(("%s: Finished writing log dump to file - '%s' \n",
19336 MFREE(dhdp->osh, type, sizeof(*type));
19391 dhd_pub_t *dhdp = &dhd->pub;
19393 if (dhdp->busstate == DHD_BUS_DOWN) {
19400 __FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state));
19408 *dump_size = dhdp->soc_ram_length;
19422 dhd_pub_t *dhdp = &dhd->pub;
19426 if (dhdp->soc_ram) {
19427 if (orig_len >= dhdp->soc_ram_length) {
19428 *buf = dhdp->soc_ram;
19429 *size = dhdp->soc_ram_length;
19433 " to save the memory dump with %d\n", dhdp->soc_ram_length));
19457 strncpy(*buf, dhd_version, size - 1);
19459 strncpy(*buf, fw_str, size - 1);
19470 dhd_pub_t *dhdp = &dhd->pub;
19489 dhd_pub_t *dhdp = &dhd->pub;
19502 dhd_pub_t *dhdp = &dhd->pub;
19513 dhd_pub_t *dhdp = &dhd->pub;
19520 ret = dhd_export_debug_data((char *)dhdp->axi_err_dump,
19533 int size = -1;
19559 dhd_info_t *dhd = dhdp->info;
19564 ifp = dhd->iflist[bssidx];
19565 return ifp->phnd_arp_table;
19570 dhd_info_t *dhd = dhdp->info;
19575 ifp = dhd->iflist[idx];
19578 return ifp->parp_enable;
19586 dhd_info_t *dhd = dhdp->info;
19589 ifp = dhd->iflist[idx];
19597 ifp->parp_enable = val;
19598 ifp->parp_discard = val;
19599 ifp->parp_allnode = val;
19603 bcm_l2_filter_arp_table_update(dhdp->osh, ifp->phnd_arp_table, TRUE, NULL,
19604 FALSE, dhdp->tickcnt);
19611 dhd_info_t *dhd = dhdp->info;
19616 ifp = dhd->iflist[idx];
19619 return ifp->parp_discard;
19625 dhd_info_t *dhd = dhdp->info;
19630 ifp = dhd->iflist[idx];
19634 return ifp->parp_allnode;
19639 dhd_info_t *dhd = dhdp->info;
19644 ifp = dhd->iflist[idx];
19648 return ifp->dhcp_unicast;
19653 dhd_info_t *dhd = dhdp->info;
19656 ifp = dhd->iflist[idx];
19660 ifp->dhcp_unicast = val;
19666 dhd_info_t *dhd = dhdp->info;
19671 ifp = dhd->iflist[idx];
19675 return ifp->block_ping;
19680 dhd_info_t *dhd = dhdp->info;
19683 ifp = dhd->iflist[idx];
19687 ifp->block_ping = val;
19697 dhd_info_t *dhd = dhdp->info;
19702 ifp = dhd->iflist[idx];
19706 return ifp->grat_arp;
19711 dhd_info_t *dhd = dhdp->info;
19714 ifp = dhd->iflist[idx];
19718 ifp->grat_arp = val;
19725 dhd_info_t *dhd = dhdp->info;
19730 ifp = dhd->iflist[idx];
19734 return ifp->block_tdls;
19739 dhd_info_t *dhd = dhdp->info;
19742 ifp = dhd->iflist[idx];
19746 ifp->block_tdls = val;
19763 return -ENODEV;
19767 if (dhd->pub.op_mode == DHD_FLAG_IBSS_MODE) {
19779 return -EINVAL;
19782 ifp = dhd->iflist[ifidx];
19786 custom_rps_map_set(ifp->net->_rx, RPS_CPU_SETBUF, strlen(RPS_CPU_SETBUF));
19788 custom_rps_map_clear(ifp->net->_rx);
19792 return -ENODEV;
19808 return -ENOMEM;
19824 return -ENOMEM;
19829 map->cpus[i++] = cpu;
19833 map->len = i;
19839 return -1;
19843 old_map = rcu_dereference_protected(queue->rps_map,
19845 rcu_assign_pointer(queue->rps_map, map);
19857 DHD_INFO(("%s : Done. mapping cpu nummber : %d\n", __FUNCTION__, map->len));
19858 return map->len;
19867 map = rcu_dereference_protected(queue->rps_map, 1);
19869 RCU_INIT_POINTER(queue->rps_map, NULL);
19932 return -1;
19935 custom_rps_map_clear(argos_rps_ctrl_data.wlan_primary_netdev->_rx);
19974 dhdp = &dhd->pub;
19975 if (dhdp == NULL || !dhdp->up) {
19989 argos_rps_ctrl_data.wlan_primary_netdev->_rx,
19995 err = -1;
20005 if (dhdp->tcpack_sup_mode != TCPACK_SUP_HOLD) {
20021 if (dhdp->tcpack_sup_mode != TCPACK_SUP_OFF) {
20031 custom_rps_map_clear(argos_rps_ctrl_data.wlan_primary_netdev->_rx);
20067 dhdp->memdump_enabled = DUMP_MEMONLY;
20068 dhdp->memdump_type = DUMP_TYPE_MEMORY_CORRUPTION;
20085 if (dhdp->memdump_enabled == DUMP_DISABLED) {
20086 dhdp->memdump_enabled = DUMP_MEMFILE;
20088 dhdp->memdump_type = DUMP_TYPE_PKTID_AUDIT_FAILURE;
20092 dhdp->hang_reason = HANG_REASON_PCIE_PKTID_ERROR;
20093 dhd_os_check_hang(dhdp, 0, -EREMOTEIO);
20102 dhd_info_t *dhd = dhdp->info;
20104 if (dhd->iflist[0] && dhd->iflist[0]->net)
20105 return dhd->iflist[0]->net;
20113 return dhd_pub->fw_download_status;
20134 return -1;
20153 nlh = (struct nlmsghdr *)skb->data;
20155 if ((cmd->magic == BCM_TO_MAGIC) && (cmd->reason == REASON_DAEMON_STARTED)) {
20156 sender_pid = ((struct nlmsghdr *)(skb->data))->nlmsg_pid;
20241 dhd_info_t *dhd_info = dhd->info;
20270 mutex_init(&dhd_info->logdump_lock);
20282 /* pre-alloc the memory for the log buffers & 'special' buffer */
20288 dld_buf_special->buffer = DHD_OS_PREALLOC(dhd, prealloc_idx++,
20291 prealloc_buf = MALLOCZ(dhd->osh, LOG_DUMP_TOTAL_BUFSIZE);
20292 dld_buf_special->buffer = MALLOCZ(dhd->osh, dld_buf_size[DLD_BUF_TYPE_SPECIAL]);
20295 DHD_ERROR(("Failed to pre-allocate memory for log buffers !\n"));
20298 if (!dld_buf_special->buffer) {
20299 DHD_ERROR(("Failed to pre-allocate memory for special buffer !\n"));
20306 dld_buf->dhd_pub = dhd;
20307 spin_lock_init(&dld_buf->lock);
20308 dld_buf->wraparound = 0;
20310 dld_buf->buffer = bufptr;
20311 dld_buf->max = (unsigned long)dld_buf->buffer + dld_buf_size[i];
20312 bufptr = (uint8 *)dld_buf->max;
20314 dld_buf->max = (unsigned long)dld_buf->buffer + dld_buf_size[i];
20316 dld_buf->present = dld_buf->front = dld_buf->buffer;
20317 dld_buf->remain = dld_buf_size[i];
20318 dld_buf->enable = 1;
20322 /* now use the rest of the pre-alloc'd memory for filter and ecounter log */
20323 dhd->ecntr_dbg_ring = MALLOCZ(dhd->osh, sizeof(dhd_dbg_ring_t));
20324 if (!dhd->ecntr_dbg_ring)
20327 ring = (dhd_dbg_ring_t *)dhd->ecntr_dbg_ring;
20336 DHD_DBG_RING_LOCK(ring->lock, flags);
20337 ring->state = RING_ACTIVE;
20338 ring->threshold = 0;
20339 DHD_DBG_RING_UNLOCK(ring->lock, flags);
20345 /* now use the rest of the pre-alloc'd memory for filter and ecounter log */
20346 dhd->rtt_dbg_ring = MALLOCZ(dhd->osh, sizeof(dhd_dbg_ring_t));
20347 if (!dhd->rtt_dbg_ring)
20350 ring = (dhd_dbg_ring_t *)dhd->rtt_dbg_ring;
20359 DHD_DBG_RING_LOCK(ring->lock, flags);
20360 ring->state = RING_ACTIVE;
20361 ring->threshold = 0;
20362 DHD_DBG_RING_UNLOCK(ring->lock, flags);
20374 dhd->concise_dbg_buf = MALLOC(dhd->osh, CONCISE_DUMP_BUFLEN);
20375 if (!dhd->concise_dbg_buf) {
20390 cookie_buf = MALLOC(dhd->osh, LOG_DUMP_COOKIE_BUFSIZE);
20398 MFREE(dhd->osh, cookie_buf, LOG_DUMP_COOKIE_BUFSIZE);
20405 if (dhd->logdump_cookie) {
20407 MFREE(dhd->osh, dhd->logdump_cookie, LOG_DUMP_COOKIE_BUFSIZE);
20408 dhd->logdump_cookie = NULL;
20411 if (dhd->event_log_filter) {
20416 if (dhd->concise_dbg_buf) {
20417 MFREE(dhd->osh, dhd->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
20421 if (dhd->ecntr_dbg_ring) {
20422 ring = (dhd_dbg_ring_t *)dhd->ecntr_dbg_ring;
20424 ring->ring_buf = NULL;
20425 ring->ring_size = 0;
20426 MFREE(dhd->osh, ring, sizeof(dhd_dbg_ring_t));
20427 dhd->ecntr_dbg_ring = NULL;
20432 if (dhd->rtt_dbg_ring) {
20433 ring = (dhd_dbg_ring_t *)dhd->rtt_dbg_ring;
20435 ring->ring_buf = NULL;
20436 ring->ring_size = 0;
20437 MFREE(dhd->osh, ring, sizeof(dhd_dbg_ring_t));
20438 dhd->rtt_dbg_ring = NULL;
20446 if (dld_buf_special->buffer) {
20447 DHD_OS_PREFREE(dhd, dld_buf_special->buffer,
20452 MFREE(dhd->osh, prealloc_buf, LOG_DUMP_TOTAL_BUFSIZE);
20454 if (dld_buf_special->buffer) {
20455 MFREE(dhd->osh, dld_buf_special->buffer,
20461 dld_buf->enable = 0;
20462 dld_buf->buffer = NULL;
20465 mutex_destroy(&dhd_info->logdump_lock);
20473 dhd_info_t *dhd_info = dhd->info;
20478 if (dhd->concise_dbg_buf) {
20479 MFREE(dhd->osh, dhd->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
20480 dhd->concise_dbg_buf = NULL;
20483 if (dhd->logdump_cookie) {
20485 MFREE(dhd->osh, dhd->logdump_cookie, LOG_DUMP_COOKIE_BUFSIZE);
20486 dhd->logdump_cookie = NULL;
20490 if (dhd->event_log_filter) {
20496 if (dhd->ecntr_dbg_ring) {
20497 ring = (dhd_dbg_ring_t *)dhd->ecntr_dbg_ring;
20499 ring->ring_buf = NULL;
20500 ring->ring_size = 0;
20501 MFREE(dhd->osh, ring, sizeof(dhd_dbg_ring_t));
20502 dhd->ecntr_dbg_ring = NULL;
20507 if (dhd->rtt_dbg_ring) {
20508 ring = (dhd_dbg_ring_t *)dhd->rtt_dbg_ring;
20510 ring->ring_buf = NULL;
20511 ring->ring_size = 0;
20512 MFREE(dhd->osh, ring, sizeof(dhd_dbg_ring_t));
20513 dhd->rtt_dbg_ring = NULL;
20517 /* 'general' buffer points to start of the pre-alloc'd memory */
20521 if (dld_buf->buffer) {
20522 DHD_OS_PREFREE(dhd, dld_buf->buffer, LOG_DUMP_TOTAL_BUFSIZE);
20524 if (dld_buf_special->buffer) {
20525 DHD_OS_PREFREE(dhd, dld_buf_special->buffer,
20529 if (dld_buf->buffer) {
20530 MFREE(dhd->osh, dld_buf->buffer, LOG_DUMP_TOTAL_BUFSIZE);
20532 if (dld_buf_special->buffer) {
20533 MFREE(dhd->osh, dld_buf_special->buffer,
20539 dld_buf->enable = 0;
20540 dld_buf->buffer = NULL;
20543 mutex_destroy(&dhd_info->logdump_lock);
20565 if (dld_buf->enable != 1) {
20571 /* Non ANSI C99 compliant returns -1,
20580 len = DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE - 1;
20585 spin_lock_irqsave(&dld_buf->lock, flags);
20586 if (dld_buf->remain < len) {
20587 dld_buf->wraparound = 1;
20588 dld_buf->present = dld_buf->front;
20589 dld_buf->remain = dld_buf_size[type];
20594 memcpy(dld_buf->present, tmp_buf, len);
20595 dld_buf->remain -= len;
20596 dld_buf->present += len;
20597 spin_unlock_irqrestore(&dld_buf->lock, flags);
20600 ASSERT((unsigned long)dld_buf->present <= dld_buf->max);
20602 if (dld_buf->dhd_pub) {
20603 dhd_pub_t *dhdp = (dhd_pub_t *)dld_buf->dhd_pub;
20604 dhdp->logdump_periodic_flush =
20607 log_dump_type_t *flush_type = MALLOCZ(dhdp->osh,
20611 dhd_schedule_log_dump(dld_buf->dhd_pub, flush_type);
20640 dhd = dhdp->info;
20642 flush_workqueue(dhd->tx_wq);
20643 flush_workqueue(dhd->rx_wq);
20657 if (dhd->duart_execute) {
20680 if (dhdp->memdump_enabled == DUMP_MEMFILE_BUGON)
20683 if (dhdp->hang_reason == HANG_REASON_PCIE_LINK_DOWN_RC_DETECT ||
20684 dhdp->hang_reason == HANG_REASON_PCIE_LINK_DOWN_EP_DETECT ||
20686 dhdp->memdump_success == FALSE ||
20689 dhdp->info->duart_execute = TRUE;
20690 DHD_ERROR(("DHD: %s - execute %s %s\n",
20693 DHD_ERROR(("DHD: %s - %s %s ret = %d\n",
20695 dhdp->info->duart_execute = FALSE;
20698 if (dhdp->memdump_type != DUMP_TYPE_BY_SYSDUMP)
20716 DHD_ERROR(("%s: ----- blob file doesn't exist (%s) -----\n", __FUNCTION__,
20718 dhdp->is_blob = FALSE;
20720 DHD_ERROR(("%s: ----- blob file exists (%s)-----\n", __FUNCTION__, filepath));
20721 dhdp->is_blob = TRUE;
20752 dmaxfer_free_prev_dmaaddr(&dhd_info->pub, dmmap);
20758 dhd_info_t *dhd_info = dhdp->info;
20760 dhd_deferred_schedule_work(dhd_info->dhd_deferred_wq, (void *)dmmap,
20764 /* ---------------------------- End of sysfs implementation ------------------------------------- */
20777 if (!dhdp->bus) {
20778 DHD_ERROR(("%s : dhd->bus is NULL\n", __FUNCTION__));
20784 if (dhdpcie_get_pcieirq(dhdp->bus, &pcie_irq)) {
20791 If dedicated CPU core is not on-line,
20799 irq_set_affinity_hint(pcie_irq, dhdp->info->cpumask_primary);
20800 irq_set_affinity(pcie_irq, dhdp->info->cpumask_primary);
20802 irq_set_affinity(pcie_irq, dhdp->info->cpumask_primary);
20837 if (fp->f_mode & FMODE_WRITE) {
20838 ret = vfs_write(fp, buf, buf_len, &fp->f_pos);
20926 iflist = dhd->info->iflist[idx];
20931 if (iflist->net != NULL) {
20932 if (iflist->net->ieee80211_ptr != NULL) {
20934 (iflist->net->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO) ||
20935 (iflist->net->ieee80211_ptr->iftype == NL80211_IFTYPE_AP)) {
20940 bsd_ifp->ndev = iflist->net;
20941 bsd_ifp->bssidx = iflist->bssidx;
20957 dhd_bandsteer_context_t *dhd_bandsteer_cntx = dhd_bandsteer_mac->dhd_bandsteer_cntx;
20958 dhd_pub_t *dhd = (dhd_pub_t *) dhd_bandsteer_cntx->dhd_pub;
20960 dhd_deferred_schedule_work(dhd->info->dhd_deferred_wq,
20974 buf = MALLOCZ(dhd->osh, FILE_BLOCK_READ_SIZE);
20987 nread = dhd_os_get_image_block(buf, (FILE_BLOCK_READ_SIZE - 1), fd);
21000 MFREE(dhd->osh, buf, FILE_BLOCK_READ_SIZE);
21052 msb = inbuf[i] > '9' ? bcm_toupper(inbuf[i]) - 'A' + 10 : inbuf[i] - '0';
21053 lsb = inbuf[i + 1] > '9' ? bcm_toupper(inbuf[i + 1]) -
21054 'A' + 10 : inbuf[i + 1] - '0';
21096 p_filter_iov = MALLOCZ(dhd->osh, filter_iovsize);
21104 p_filter_iov->version = WL_FILTER_IE_VERSION;
21105 p_filter_iov->len = filter_iovsize;
21106 p_filter_iov->fixed_length = p_filter_iov->len - FILTER_IE_BUFSZ;
21107 p_filter_iov->pktflag = FC_PROBE_REQ;
21108 p_filter_iov->option = WL_FILTER_IE_CHECK_SUB_OPTION;
21110 bufsize = filter_iovsize - WL_FILTER_IE_IOV_HDR_SIZE; /* adjust available size for TLVs */
21111 p_ie_tlv = (wl_filter_ie_tlv_t *)&p_filter_iov->tlvs[0];
21206 all_tlvsize = (bufsize - buf_space_left);
21207 p_filter_iov->len = htol16(all_tlvsize + WL_FILTER_IE_IOV_HDR_SIZE);
21209 p_filter_iov->len, NULL, 0, TRUE);
21216 MFREE(dhd->osh, p_filter_iov, filter_iovsize);
21258 dhdp = &dhd->pub;
21269 if (dhdp->req_hang_type) {
21271 __FUNCTION__, dhdp->req_hang_type));
21272 dhdp->req_hang_type = 0;
21283 if (dhdp->req_hang_type != 0) {
21291 dhdp->req_hang_type = reason;
21298 dhdp->req_hang_type = reason;
21305 dhdp->req_hang_type = reason;
21309 dhdp->req_hang_type = reason;
21314 dhdp->req_hang_type = 0;
21318 dhdp->req_hang_type = 0;
21322 dhdp->req_hang_type = 0;
21327 dhdp->req_hang_type = reason;
21330 dhdp->req_hang_type = 0;
21350 dhdp = &dhd->pub;
21352 if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) {
21358 ret = dhd_bus_perform_flr_with_quiesce(dhdp, dhdp->bus, FALSE);
21363 dhdp->pom_toggle_reg_on(WLAN_FUNC_ID, BY_WLAN_DUE_TO_WLAN);
21371 if (dhdp->enable_erpom) {
21372 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, NULL,
21389 if (dhd_pktlog_dump_write_file(&dhd->pub)) {
21398 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
21417 dhdp = (dhd_pub_t *)cfg->pub;
21419 if (!dhdp || !cfg->ap_sta_info) {
21420 WL_ERR(("dhdp=%p ap_sta_info=%p\n", dhdp, cfg->ap_sta_info));
21424 p_wq_data = (ap_sta_wq_data_t *)MALLOCZ(dhdp->osh, sizeof(ap_sta_wq_data_t));
21426 DHD_ERROR(("%s(): could not allocate memory for - "
21431 mutex_lock(&cfg->ap_sta_info->wq_data_sync);
21433 memcpy(&p_wq_data->e, e, sizeof(wl_event_msg_t));
21434 p_wq_data->dhdp = dhdp;
21435 p_wq_data->bcm_cfg = cfg;
21436 p_wq_data->ndev = (struct net_device *)ndev;
21438 mutex_unlock(&cfg->ap_sta_info->wq_data_sync);
21440 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
21456 local_time = (u32)(curtime.tv_sec -
21465 tm.tm_year - 100, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min,
21493 dhdinfo = dhd->info;
21500 DHD_ERROR(("DHD Tasklet status : 0x%lx\n", dhdinfo->tasklet.state));
21506 #define DHD_RING_ERR_INTERNAL(fmt, ...) DHD_ERROR(("EWPF-" fmt, ##__VA_ARGS__))
21507 #define DHD_RING_TRACE_INTERNAL(fmt, ...) DHD_INFO(("EWPF-" fmt, ##__VA_ARGS__))
21523 uint32 write_idx; /* next write index, -1 : not started */
21524 uint32 read_idx; /* next read index, -1 : not start */
21537 uint32 idx; /* -1 : not started */
21587 ret_ring->type = type;
21588 ret_ring->ring_sync = DHD_RING_SYNC_LOCK_INIT(dhdp->osh);
21589 ret_ring->magic = DHD_RING_MAGIC;
21592 ret_ring->fixed.read_idx = DHD_RING_IDX_INVALID;
21593 ret_ring->fixed.write_idx = DHD_RING_IDX_INVALID;
21594 ret_ring->fixed.lock_idx = DHD_RING_IDX_INVALID;
21595 ret_ring->fixed.elem = buf + sizeof(dhd_ring_info_t);
21596 ret_ring->fixed.elem_size = elem_size;
21597 ret_ring->fixed.elem_cnt = elem_cnt;
21599 ret_ring->single.idx = DHD_RING_IDX_INVALID;
21600 atomic_set(&ret_ring->single.ring_locked, 0);
21601 ret_ring->single.ring_overwrited = 0;
21602 ret_ring->single.rsvd = 0;
21603 ret_ring->single.elem = buf + sizeof(dhd_ring_info_t);
21604 ret_ring->single.elem_size = elem_size;
21605 ret_ring->single.elem_cnt = elem_cnt;
21619 if (ring->magic != DHD_RING_MAGIC) {
21623 if (ring->type != DHD_RING_TYPE_FIXED &&
21624 ring->type != DHD_RING_TYPE_SINGLE_IDX) {
21628 DHD_RING_SYNC_LOCK_DEINIT(dhdp->osh, ring->ring_sync);
21629 ring->ring_sync = NULL;
21630 if (ring->type == DHD_RING_TYPE_FIXED) {
21631 dhd_fixed_ring_info_t *fixed = &ring->fixed;
21632 memset(fixed->elem, 0, fixed->elem_size * fixed->elem_cnt);
21633 fixed->elem_size = fixed->elem_cnt = 0;
21635 dhd_singleidx_ring_info_t *single = &ring->single;
21636 memset(single->elem, 0, single->elem_size * single->elem_cnt);
21637 single->elem_size = single->elem_cnt = 0;
21639 ring->type = 0;
21640 ring->magic = 0;
21653 elem_size = fixed->elem_size;
21654 elem_cnt = fixed->elem_cnt;
21655 elem = fixed->elem;
21658 elem_size = single->elem_size;
21659 elem_cnt = single->elem_cnt;
21660 elem = single->elem;
21667 DHD_RING_ERR(("INVALID POINTER %s:%p, ring->elem:%p\n", sig, ptr, elem));
21670 diff = (uint32)((uint8 *)ptr - (uint8 *)elem);
21672 DHD_RING_ERR(("INVALID POINTER %s:%p, ring->elem:%p\n", sig, ptr, elem));
21691 return (ring->elem_cnt + end - start) % ring->elem_cnt + 1;
21697 return __dhd_fixed_ring_get_count(ring, ring->read_idx, ring->write_idx);
21703 if (ring->read_idx == DHD_RING_IDX_INVALID) {
21706 return (uint8 *)ring->elem + (ring->elem_size * ring->read_idx);
21714 if (ring->read_idx == DHD_RING_IDX_INVALID) {
21719 next_idx = (ring->read_idx + 1) % ring->elem_cnt;
21720 if (ring->read_idx == ring->write_idx) {
21722 ring->read_idx = ring->write_idx = DHD_RING_IDX_INVALID;
21726 ring->read_idx = next_idx;
21733 if (ring->read_idx == DHD_RING_IDX_INVALID) {
21736 return (uint8 *)ring->elem + (ring->elem_size * ring->write_idx);
21744 if (ring->read_idx == DHD_RING_IDX_INVALID) {
21745 ring->read_idx = ring->write_idx = 0;
21746 return (uint8 *)ring->elem;
21750 tmp_idx = (ring->write_idx + 1) % ring->elem_cnt;
21751 if (ring->lock_idx == tmp_idx) {
21755 ring->write_idx = tmp_idx;
21756 if (ring->write_idx == ring->read_idx) {
21758 ring->read_idx = (ring->read_idx + 1) % ring->elem_cnt;
21761 return (uint8 *)ring->elem + (ring->elem_size * ring->write_idx);
21769 if (ring->read_idx == DHD_RING_IDX_INVALID) {
21775 if (cur_idx >= ring->elem_cnt) {
21779 if (cur_idx == ring->write_idx) {
21784 cur_idx = (cur_idx + 1) % ring->elem_cnt;
21785 return (uint8 *)ring->elem + ring->elem_size * cur_idx;
21793 if (ring->read_idx == DHD_RING_IDX_INVALID) {
21798 if (cur_idx >= ring->elem_cnt) {
21801 if (cur_idx == ring->read_idx) {
21806 cur_idx = (cur_idx + ring->elem_cnt - 1) % ring->elem_cnt;
21807 return (uint8 *)ring->elem + ring->elem_size * cur_idx;
21818 if (ring->read_idx == DHD_RING_IDX_INVALID) {
21825 if (first_idx >= ring->elem_cnt) {
21829 first_idx = ring->read_idx;
21834 if (last_idx >= ring->elem_cnt) {
21838 last_idx = ring->write_idx;
21841 ring_filled_cnt = __dhd_fixed_ring_get_count(ring, ring->read_idx, ring->write_idx);
21842 tmp_cnt = __dhd_fixed_ring_get_count(ring, ring->read_idx, first_idx);
21845 ring->write_idx, ring->read_idx, first_idx));
21849 tmp_cnt = __dhd_fixed_ring_get_count(ring, ring->read_idx, last_idx);
21852 ring->write_idx, ring->read_idx, last_idx));
21856 ring->lock_idx = first_idx;
21857 ring->lock_count = __dhd_fixed_ring_get_count(ring, first_idx, last_idx);
21864 if (ring->read_idx == DHD_RING_IDX_INVALID) {
21869 ring->lock_idx = DHD_RING_IDX_INVALID;
21870 ring->lock_count = 0;
21876 if (ring->read_idx == DHD_RING_IDX_INVALID) {
21880 if (ring->lock_idx == DHD_RING_IDX_INVALID) {
21884 return (uint8 *)ring->elem + ring->elem_size * ring->lock_idx;
21891 if (ring->read_idx == DHD_RING_IDX_INVALID) {
21895 if (ring->lock_idx == DHD_RING_IDX_INVALID) {
21900 lock_last_idx = (ring->lock_idx + ring->lock_count - 1) % ring->elem_cnt;
21901 return (uint8 *)ring->elem + ring->elem_size * lock_last_idx;
21907 if (ring->read_idx == DHD_RING_IDX_INVALID) {
21911 if (ring->lock_idx == DHD_RING_IDX_INVALID) {
21915 return ring->lock_count;
21921 if (ring->read_idx == DHD_RING_IDX_INVALID) {
21925 if (ring->lock_idx == DHD_RING_IDX_INVALID) {
21930 ring->lock_count--;
21931 if (ring->lock_count <= 0) {
21932 ring->lock_idx = DHD_RING_IDX_INVALID;
21934 ring->lock_idx = (ring->lock_idx + 1) % ring->elem_cnt;
21942 ring->read_idx = idx;
21948 ring->write_idx = idx;
21954 return ring->read_idx;
21960 return ring->write_idx;
21969 if (ring->idx == DHD_RING_IDX_INVALID) {
21973 if (ring->ring_overwrited) {
21974 tmp_idx = (ring->idx + 1) % ring->elem_cnt;
21977 return (uint8 *)ring->elem + (ring->elem_size * tmp_idx);
21983 if (ring->idx == DHD_RING_IDX_INVALID) {
21987 return (uint8 *)ring->elem + (ring->elem_size * ring->idx);
21993 if (ring->idx == DHD_RING_IDX_INVALID) {
21994 ring->idx = 0;
21995 return (uint8 *)ring->elem;
21999 if (atomic_read(&ring->ring_locked)) {
22004 if (!ring->ring_overwrited && ring->idx == (ring->elem_cnt - 1)) {
22005 ring->ring_overwrited = 1;
22008 ring->idx = (ring->idx + 1) % ring->elem_cnt;
22010 return (uint8 *)ring->elem + (ring->elem_size * ring->idx);
22018 if (ring->idx == DHD_RING_IDX_INVALID) {
22024 if (cur_idx >= ring->elem_cnt) {
22028 if (cur_idx == ring->idx) {
22033 cur_idx = (cur_idx + 1) % ring->elem_cnt;
22035 return (uint8 *)ring->elem + ring->elem_size * cur_idx;
22043 if (ring->idx == DHD_RING_IDX_INVALID) {
22048 if (cur_idx >= ring->elem_cnt) {
22052 if (!ring->ring_overwrited && cur_idx == 0) {
22057 cur_idx = (cur_idx + ring->elem_cnt - 1) % ring->elem_cnt;
22058 if (ring->ring_overwrited && cur_idx == ring->idx) {
22063 return (uint8 *)ring->elem + ring->elem_size * cur_idx;
22069 if (!atomic_read(&ring->ring_locked)) {
22070 atomic_set(&ring->ring_locked, 1);
22077 if (atomic_read(&ring->ring_locked)) {
22078 atomic_set(&ring->ring_locked, 0);
22090 if (!ring || ring->magic != DHD_RING_MAGIC) {
22095 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
22096 if (ring->type == DHD_RING_TYPE_FIXED) {
22097 ret = __dhd_fixed_ring_get_first(&ring->fixed);
22099 if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
22100 ret = __dhd_singleidx_ring_get_first(&ring->single);
22102 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
22113 if (!ring || ring->magic != DHD_RING_MAGIC) {
22118 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
22119 if (ring->type == DHD_RING_TYPE_FIXED) {
22120 __dhd_fixed_ring_free_first(&ring->fixed);
22122 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
22131 if (!ring || ring->magic != DHD_RING_MAGIC) {
22136 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
22137 if (ring->type == DHD_RING_TYPE_FIXED) {
22138 __dhd_fixed_ring_set_read_idx(&ring->fixed, read_idx);
22140 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
22149 if (!ring || ring->magic != DHD_RING_MAGIC) {
22154 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
22155 if (ring->type == DHD_RING_TYPE_FIXED) {
22156 __dhd_fixed_ring_set_write_idx(&ring->fixed, write_idx);
22158 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
22168 if (!ring || ring->magic != DHD_RING_MAGIC) {
22173 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
22174 if (ring->type == DHD_RING_TYPE_FIXED) {
22175 read_idx = __dhd_fixed_ring_get_read_idx(&ring->fixed);
22177 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
22189 if (!ring || ring->magic != DHD_RING_MAGIC) {
22194 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
22195 if (ring->type == DHD_RING_TYPE_FIXED) {
22196 write_idx = __dhd_fixed_ring_get_write_idx(&ring->fixed);
22198 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
22211 if (!ring || ring->magic != DHD_RING_MAGIC) {
22216 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
22217 if (ring->type == DHD_RING_TYPE_FIXED) {
22218 ret = __dhd_fixed_ring_get_last(&ring->fixed);
22220 if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
22221 ret = __dhd_singleidx_ring_get_last(&ring->single);
22223 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
22238 if (!ring || ring->magic != DHD_RING_MAGIC) {
22243 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
22244 if (ring->type == DHD_RING_TYPE_FIXED) {
22245 ret = __dhd_fixed_ring_get_empty(&ring->fixed);
22247 if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
22248 ret = __dhd_singleidx_ring_get_empty(&ring->single);
22250 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
22261 if (!ring || ring->magic != DHD_RING_MAGIC) {
22266 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
22267 if (ring->type == DHD_RING_TYPE_FIXED) {
22268 ret = __dhd_fixed_ring_get_next(&ring->fixed, cur, ring->type);
22270 if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
22271 ret = __dhd_singleidx_ring_get_next(&ring->single, cur, ring->type);
22273 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
22284 if (!ring || ring->magic != DHD_RING_MAGIC) {
22289 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
22290 if (ring->type == DHD_RING_TYPE_FIXED) {
22291 ret = __dhd_fixed_ring_get_prev(&ring->fixed, cur, ring->type);
22293 if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
22294 ret = __dhd_singleidx_ring_get_prev(&ring->single, cur, ring->type);
22296 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
22307 if (!ring || ring->magic != DHD_RING_MAGIC) {
22312 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
22313 if (ring->type == DHD_RING_TYPE_FIXED) {
22314 cnt = __dhd_fixed_ring_get_cur_size(&ring->fixed);
22316 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
22327 if (!ring || ring->magic != DHD_RING_MAGIC) {
22332 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
22333 if (ring->type == DHD_RING_TYPE_FIXED) {
22334 __dhd_fixed_ring_lock(&ring->fixed, first_ptr, last_ptr, ring->type);
22336 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
22346 if (!ring || ring->magic != DHD_RING_MAGIC) {
22351 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
22352 if (ring->type == DHD_RING_TYPE_FIXED) {
22353 __dhd_fixed_ring_lock_free(&ring->fixed);
22355 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
22365 if (!ring || ring->magic != DHD_RING_MAGIC) {
22370 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
22371 if (ring->type == DHD_RING_TYPE_FIXED) {
22372 ret = __dhd_fixed_ring_lock_get_first(&ring->fixed);
22374 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
22385 if (!ring || ring->magic != DHD_RING_MAGIC) {
22390 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
22391 if (ring->type == DHD_RING_TYPE_FIXED) {
22392 ret = __dhd_fixed_ring_lock_get_last(&ring->fixed);
22394 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
22405 if (!ring || ring->magic != DHD_RING_MAGIC) {
22410 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
22411 if (ring->type == DHD_RING_TYPE_FIXED) {
22412 ret = __dhd_fixed_ring_lock_get_count(&ring->fixed);
22414 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
22425 if (!ring || ring->magic != DHD_RING_MAGIC) {
22430 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
22431 if (ring->type == DHD_RING_TYPE_FIXED) {
22432 __dhd_fixed_ring_lock_free_first(&ring->fixed);
22434 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
22443 if (!ring || ring->magic != DHD_RING_MAGIC) {
22448 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
22449 if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
22450 __dhd_singleidx_ring_whole_lock(&ring->single);
22452 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
22461 if (!ring || ring->magic != DHD_RING_MAGIC) {
22466 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
22467 if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
22468 __dhd_singleidx_ring_whole_unlock(&ring->single);
22470 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
22474 #define DHD_VFS_INODE(dir) (dir->d_inode)
22494 DHD_ERROR(("Failed to get kern-path delete file: %s error: %d\n", path, err));
22506 err = -EINVAL;
22531 int fm_idx = -1;
22534 if (strlen(fm_ptr->elems[i].type_name) == 0) {
22538 if (!(strncmp(fname, fm_ptr->elems[i].type_name, strlen(fname)))) {
22544 if (fm_idx == -1) {
22548 if (strlen(fm_ptr->elems[fm_idx].type_name) == 0) {
22549 strncpy(fm_ptr->elems[fm_idx].type_name, fname, DHD_DUMP_TYPE_NAME_SIZE);
22550 fm_ptr->elems[fm_idx].type_name[DHD_DUMP_TYPE_NAME_SIZE - 1] = '\0';
22551 fm_ptr->elems[fm_idx].file_idx = 0;
22558 * dhd_dump_file_manage_enqueue - enqueue dump file path
22569 if (!dhd || !dhd->dump_file_manage) {
22571 __FUNCTION__, dhd, (dhd ? dhd->dump_file_manage : NULL)));
22575 fm_ptr = dhd->dump_file_manage;
22585 elem = &fm_ptr->elems[fm_idx];
22586 fp_idx = elem->file_idx;
22588 __FUNCTION__, fm_idx, fp_idx, elem->file_path[fp_idx]));
22591 if (strlen(elem->file_path[fp_idx]) != 0) {
22592 if (dhd_file_delete(elem->file_path[fp_idx]) < 0) {
22594 __FUNCTION__, elem->file_path[fp_idx]));
22597 __FUNCTION__, elem->file_path[fp_idx]));
22602 strncpy(elem->file_path[fp_idx], dump_path, DHD_DUMP_FILE_PATH_SIZE);
22603 elem->file_path[fp_idx][DHD_DUMP_FILE_PATH_SIZE - 1] = '\0';
22606 elem->file_idx = (elem->file_idx + 1) % DHD_DUMP_FILE_COUNT_MAX;
22616 uint32 irq = (uint32)-1;
22620 dhdp->smmu_fault_occurred = TRUE;
22622 dhdp->axi_error = TRUE;
22623 dhdp->axi_err_dump->axid = axid;
22624 dhdp->axi_err_dump->fault_address = fault_addr;
22628 dhdpcie_get_pcieirq(dhdp->bus, &irq);
22629 if (irq != (uint32)-1) {
22661 dhd_info = (dhd_info_t *)dhd_pub->info;
22662 dhd_if = dhd_info->iflist[0];
22664 ASSERT(dhd_if->net);
22665 if (dhd_if && dhd_if->net) {
22666 dhd_stop(dhd_if->net);
22691 /* Ignore compiler warnings due to -Werror=cast-qual */
22694 #pragma GCC diagnostic ignored "-Wcast-qual"
22701 dhdp = &ifp->info->pub;
22703 if ((dhdp->op_mode & DHD_FLAG_P2P_GO_MODE)||
22704 (dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
22706 wl_cfg80211_del_all_sta(ifp->net, WLAN_REASON_UNSPECIFIED);
22707 } else if ((dhdp->op_mode & DHD_FLAG_P2P_GC_MODE)||
22708 (dhdp->op_mode & DHD_FLAG_STA_MODE)) {
22710 wl_cfg80211_disassoc(ifp->net, WLAN_REASON_UNSPECIFIED);
22717 ifp->tsync_rcvd = 0;
22718 ifp->tsyncack_txed = 0;
22719 ifp->last_sync = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
22728 ifp->tsync_rcvd = 0;
22729 ifp->tsyncack_txed = 0;
22730 ifp->last_sync = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
22739 /* Ignore compiler warnings due to -Werror=cast-qual */
22742 #pragma GCC diagnostic ignored "-Wcast-qual"
22750 if (ifp && ifp->net &&
22751 (OSL_ATOMIC_READ(ifp->info->pub->osh, &ifp->m4state) == M4_TXFAILED)) {
22753 ifp->net->name));
22754 wl_cfg80211_disassoc(ifp->net, WLAN_REASON_4WAY_HANDSHAKE_TIMEOUT);
22761 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
22768 eh = (struct ether_header *)PKTDATA(dhdp->osh, txp);
22769 type = ntoh16(eh->ether_type);
22773 ifp = dhd->iflist[ifidx];
22774 if (!ifp || !ifp->net) {
22781 OSL_ATOMIC_SET(dhdp->osh, &ifp->m4state, M4_TXFAILED);
22782 schedule_delayed_work(&ifp->m4state_work,
22800 dhdinfo = (dhd_info_t *)(dhdp->info);
22806 ifp = dhdinfo->iflist[ifidx];
22808 cancel_delayed_work_sync(&ifp->m4state_work);
22820 dhd = (dhd_info_t *)(pub->info);
22823 spin_lock_irqsave(&dhd->hp2p_lock, flags);
22834 dhd = (dhd_info_t *)(pub->info);
22837 spin_unlock_irqrestore(&dhd->hp2p_lock, flags);
22853 dhdp = &dhd->pub;
22865 if (dhdp->axi_err_dump->etd_axi_error_v1.version != HND_EXT_TRAP_AXIERROR_VERSION_1) {
22867 __FUNCTION__, dhdp->axi_err_dump->etd_axi_error_v1.version));
22873 dhdp->collect_sssr = TRUE;
22876 dhd_schedule_memdump(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
22888 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
22907 dhdp = &dhd->pub;
22915 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
22941 if (dhdp->tid_mode == SET_TID_OFF) {
22945 pktdata = (uint8 *)PKTDATA(dhdp->osh, pkt);
22954 prio = dhdp->target_tid;
22955 uid = dhdp->target_uid;
22962 sk = ((struct sk_buff*)(pkt))->sk;
22964 if ((dhdp->tid_mode == SET_TID_ALL_UDP) ||