xref: /OK3568_Linux_fs/kernel/drivers/net/wireless/rockchip_wlan/ssv6xxx/hci/ssv_hci.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /*
2  * Copyright (c) 2015 South Silicon Valley Microelectronics Inc.
3  * Copyright (c) 2015 iComm Corporation
4  *
5  * This program is free software: you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation, either version 3 of the License, or
8  * (at your option) any later version.
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12  * See the GNU General Public License for more details.
13  * You should have received a copy of the GNU General Public License
14  * along with this program. If not, see <http://www.gnu.org/licenses/>.
15  */
16 
17 #include <linux/kernel.h>
18 #include <linux/version.h>
19 #include <linux/module.h>
20 #include <linux/delay.h>
21 #include <linux/jiffies.h>
22 #include <ssv6200.h>
23 #include "hctrl.h"
24 MODULE_AUTHOR("iComm Semiconductor Co., Ltd");
25 MODULE_DESCRIPTION("HCI driver for SSV6xxx 802.11n wireless LAN cards.");
26 MODULE_SUPPORTED_DEVICE("SSV6xxx WLAN cards");
27 MODULE_LICENSE("Dual BSD/GPL");
28 static struct ssv6xxx_hci_ctrl *ctrl_hci = NULL;
ssv_skb_alloc(s32 len)29 struct sk_buff *ssv_skb_alloc(s32 len)
30 {
31     struct sk_buff *skb;
32     skb = __dev_alloc_skb(len + SSV6200_ALLOC_RSVD , GFP_KERNEL);
33     if (skb != NULL) {
34         skb_reserve(skb, SSV_SKB_info_size);
35     }
36     return skb;
37 }
ssv_skb_free(struct sk_buff * skb)38 void ssv_skb_free(struct sk_buff *skb)
39 {
40     dev_kfree_skb_any(skb);
41 }
ssv6xxx_hci_irq_enable(void)42 static int ssv6xxx_hci_irq_enable(void)
43 {
44  HCI_IRQ_SET_MASK(ctrl_hci, ~(ctrl_hci->int_mask));
45  HCI_IRQ_ENABLE(ctrl_hci);
46     return 0;
47 }
ssv6xxx_hci_irq_disable(void)48 static int ssv6xxx_hci_irq_disable(void)
49 {
50  HCI_IRQ_SET_MASK(ctrl_hci, 0xffffffff);
51  HCI_IRQ_DISABLE(ctrl_hci);
52     return 0;
53 }
ssv6xxx_hci_irq_register(u32 irq_mask)54 static void ssv6xxx_hci_irq_register(u32 irq_mask)
55 {
56     unsigned long flags;
57     u32 regval;
58     mutex_lock(&ctrl_hci->hci_mutex);
59     spin_lock_irqsave(&ctrl_hci->int_lock, flags);
60     ctrl_hci->int_mask |= irq_mask;
61     regval = ~ctrl_hci->int_mask;
62     spin_unlock_irqrestore(&ctrl_hci->int_lock, flags);
63     smp_mb();
64     HCI_IRQ_SET_MASK(ctrl_hci, regval);
65     mutex_unlock(&ctrl_hci->hci_mutex);
66 }
ssv6xxx_hci_get_int_bitno(int txqid)67 static inline u32 ssv6xxx_hci_get_int_bitno(int txqid)
68 {
69     if(txqid == SSV_HW_TXQ_NUM-1)
70         return 1;
71     else
72         return txqid+3;
73 }
ssv6xxx_hci_start(void)74 static int ssv6xxx_hci_start(void)
75 {
76     ssv6xxx_hci_irq_enable();
77     ctrl_hci->hci_start = true;
78     HCI_IRQ_TRIGGER(ctrl_hci);
79     return 0;
80 }
ssv6xxx_hci_stop(void)81 static int ssv6xxx_hci_stop(void)
82 {
83     ssv6xxx_hci_irq_disable();
84     ctrl_hci->hci_start = false;
85     return 0;
86 }
ssv6xxx_hci_read_word(u32 addr,u32 * regval)87 static int ssv6xxx_hci_read_word(u32 addr, u32 *regval)
88 {
89     int ret = HCI_REG_READ(ctrl_hci, addr, regval);
90     return ret;
91 }
ssv6xxx_hci_write_word(u32 addr,u32 regval)92 static int ssv6xxx_hci_write_word(u32 addr, u32 regval)
93 {
94     return HCI_REG_WRITE(ctrl_hci, addr, regval);
95 }
ssv6xxx_hci_load_fw(u8 * firmware_name,u8 openfile)96 static int ssv6xxx_hci_load_fw(u8 *firmware_name,u8 openfile)
97 {
98     return HCI_LOAD_FW(ctrl_hci,firmware_name,openfile);
99 }
ssv6xxx_hci_write_sram(u32 addr,u8 * data,u32 size)100 static int ssv6xxx_hci_write_sram(u32 addr, u8 *data, u32 size)
101 {
102     return HCI_SRAM_WRITE(ctrl_hci, addr, data, size);
103 }
ssv6xxx_hci_pmu_wakeup(void)104 static int ssv6xxx_hci_pmu_wakeup(void)
105 {
106     HCI_PMU_WAKEUP(ctrl_hci);
107     return 0;
108 }
ssv6xxx_hci_interface_reset(void)109 static int ssv6xxx_hci_interface_reset(void)
110 {
111  HCI_IFC_RESET(ctrl_hci);
112  return 0;
113 }
ssv6xxx_hci_send_cmd(struct sk_buff * skb)114 static int ssv6xxx_hci_send_cmd(struct sk_buff *skb)
115 {
116     int ret;
117     ret = IF_SEND(ctrl_hci, (void *)skb->data, skb->len, 0);
118     if (ret < 0) {
119         printk("ssv6xxx_hci_send_cmd fail......\n");
120     }
121     return ret;
122 }
ssv6xxx_hci_enqueue(struct sk_buff * skb,int txqid,u32 tx_flags)123 static int ssv6xxx_hci_enqueue(struct sk_buff *skb, int txqid, u32 tx_flags)
124 {
125     struct ssv_hw_txq *hw_txq;
126     unsigned long flags;
127     u32 status;
128     int qlen = 0;
129     BUG_ON(txqid >= SSV_HW_TXQ_NUM || txqid < 0);
130     if (txqid >= SSV_HW_TXQ_NUM || txqid < 0)
131         return -1;
132     hw_txq = &ctrl_hci->hw_txq[txqid];
133     hw_txq->tx_flags = tx_flags;
134     if (tx_flags & HCI_FLAGS_ENQUEUE_HEAD)
135         skb_queue_head(&hw_txq->qhead, skb);
136     else
137         skb_queue_tail(&hw_txq->qhead, skb);
138     qlen = (int)skb_queue_len(&hw_txq->qhead);
139     if (!(tx_flags & HCI_FLAGS_NO_FLOWCTRL)) {
140         if (skb_queue_len(&hw_txq->qhead) >= hw_txq->max_qsize) {
141             ctrl_hci->shi->hci_tx_flow_ctrl_cb(
142                 ctrl_hci->shi->tx_fctrl_cb_args,
143                 hw_txq->txq_no,
144                 true,2000
145             );
146         }
147     }
148 #ifdef CONFIG_SSV_TX_LOWTHRESHOLD
149     mutex_lock(&ctrl_hci->hci_mutex);
150 #endif
151     spin_lock_irqsave(&ctrl_hci->int_lock, flags);
152     status = ctrl_hci->int_mask ;
153 #ifdef CONFIG_SSV_TX_LOWTHRESHOLD
154     if ((ctrl_hci->int_mask & SSV6XXX_INT_RESOURCE_LOW) == 0)
155     {
156         if (ctrl_hci->shi->if_ops->trigger_tx_rx == NULL)
157         {
158             u32 regval;
159             ctrl_hci->int_mask |= SSV6XXX_INT_RESOURCE_LOW;
160             regval = ~ctrl_hci->int_mask;
161             spin_unlock_irqrestore(&ctrl_hci->int_lock, flags);
162             HCI_IRQ_SET_MASK(ctrl_hci, regval);
163             mutex_unlock(&ctrl_hci->hci_mutex);
164         }
165         else
166         {
167             ctrl_hci->int_status |= SSV6XXX_INT_RESOURCE_LOW;
168             smp_mb();
169             spin_unlock_irqrestore(&ctrl_hci->int_lock, flags);
170             mutex_unlock(&ctrl_hci->hci_mutex);
171             ctrl_hci->shi->if_ops->trigger_tx_rx(ctrl_hci->shi->dev);
172         }
173     }
174     else
175     {
176         spin_unlock_irqrestore(&ctrl_hci->int_lock, flags);
177         mutex_unlock(&ctrl_hci->hci_mutex);
178     }
179 #else
180     {
181         u32 bitno;
182         bitno = ssv6xxx_hci_get_int_bitno(txqid);
183         if ((ctrl_hci->int_mask & BIT(bitno)) == 0)
184         {
185             if (ctrl_hci->shi->if_ops->trigger_tx_rx == NULL)
186             {
187                 queue_work(ctrl_hci->hci_work_queue,&ctrl_hci->hci_tx_work[txqid]);
188             }
189             else
190             {
191                 ctrl_hci->int_status |= BIT(bitno);
192                 smp_mb();
193                 ctrl_hci->shi->if_ops->trigger_tx_rx(ctrl_hci->shi->dev);
194             }
195          }
196     }
197     spin_unlock_irqrestore(&ctrl_hci->int_lock, flags);
198 #endif
199     return qlen;
200 }
ssv6xxx_hci_is_txq_empty(int txqid)201 static bool ssv6xxx_hci_is_txq_empty(int txqid)
202 {
203     struct ssv_hw_txq *hw_txq;
204     BUG_ON(txqid >= SSV_HW_TXQ_NUM);
205     if (txqid >= SSV_HW_TXQ_NUM)
206         return false;
207     hw_txq = &ctrl_hci->hw_txq[txqid];
208     if (skb_queue_len(&hw_txq->qhead) <= 0)
209         return true;
210     return false;
211 }
ssv6xxx_hci_txq_flush(u32 txq_mask)212 static int ssv6xxx_hci_txq_flush(u32 txq_mask)
213 {
214     struct ssv_hw_txq *hw_txq;
215     struct sk_buff *skb = NULL;
216     int txqid;
217     for(txqid=0; txqid<SSV_HW_TXQ_NUM; txqid++) {
218         if ((txq_mask & (1<<txqid)) != 0)
219             continue;
220         hw_txq = &ctrl_hci->hw_txq[txqid];
221         while((skb = skb_dequeue(&hw_txq->qhead))) {
222             ctrl_hci->shi->hci_tx_buf_free_cb (skb,
223                         ctrl_hci->shi->tx_buf_free_args);
224         }
225     }
226     return 0;
227 }
ssv6xxx_hci_txq_flush_by_sta(int aid)228 static int ssv6xxx_hci_txq_flush_by_sta(int aid)
229 {
230     return 0;
231 }
ssv6xxx_hci_txq_pause(u32 txq_mask)232 static int ssv6xxx_hci_txq_pause(u32 txq_mask)
233 {
234     struct ssv_hw_txq *hw_txq;
235     int txqid;
236     mutex_lock(&ctrl_hci->txq_mask_lock);
237     ctrl_hci->txq_mask |= (txq_mask & 0x1F);
238     for(txqid=0; txqid<SSV_HW_TXQ_NUM; txqid++) {
239         if ((ctrl_hci->txq_mask&(1<<txqid)) == 0)
240             continue;
241         hw_txq = &ctrl_hci->hw_txq[txqid];
242         hw_txq->paused = true;
243     }
244     HCI_REG_SET_BITS(ctrl_hci, ADR_MTX_MISC_EN,
245         (ctrl_hci->txq_mask<<16), (0x1F<<16));
246     mutex_unlock(&ctrl_hci->txq_mask_lock);
247     return 0;
248 }
ssv6xxx_hci_txq_resume(u32 txq_mask)249 static int ssv6xxx_hci_txq_resume(u32 txq_mask)
250 {
251     struct ssv_hw_txq *hw_txq;
252     int txqid;
253     mutex_lock(&ctrl_hci->txq_mask_lock);
254     ctrl_hci->txq_mask &= ~(txq_mask&0x1F);
255     for(txqid=0; txqid<SSV_HW_TXQ_NUM; txqid++) {
256         if ((ctrl_hci->txq_mask&(1<<txqid)) != 0)
257             continue;
258         hw_txq = &ctrl_hci->hw_txq[txqid];
259         hw_txq->paused = false;
260     }
261     HCI_REG_SET_BITS(ctrl_hci, ADR_MTX_MISC_EN,
262         (ctrl_hci->txq_mask<<16), (0x1F<<16));
263     mutex_unlock(&ctrl_hci->txq_mask_lock);
264     return 0;
265 }
ssv6xxx_hci_xmit(struct ssv_hw_txq * hw_txq,int max_count,struct ssv6xxx_hw_resource * phw_resource)266 static int ssv6xxx_hci_xmit(struct ssv_hw_txq *hw_txq, int max_count, struct ssv6xxx_hw_resource *phw_resource)
267 {
268     struct sk_buff_head tx_cb_list;
269     struct sk_buff *skb = NULL;
270  int tx_count, ret, page_count;
271     struct ssv6200_tx_desc *tx_desc = NULL;
272     ctrl_hci->xmit_running = 1;
273     skb_queue_head_init(&tx_cb_list);
274     for(tx_count=0; tx_count<max_count; tx_count++) {
275         if (ctrl_hci->hci_start == false){
276             printk("ssv6xxx_hci_xmit - hci_start = false\n");
277             goto xmit_out;
278         }
279         skb = skb_dequeue(&hw_txq->qhead);
280         if (!skb){
281    printk("ssv6xxx_hci_xmit - queue empty\n");
282             goto xmit_out;
283      }
284      page_count = (skb->len + SSV6200_ALLOC_RSVD);
285      if (page_count & HW_MMU_PAGE_MASK)
286       page_count = (page_count >> HW_MMU_PAGE_SHIFT) + 1;
287      else
288       page_count = page_count >> HW_MMU_PAGE_SHIFT;
289      if (page_count > (SSV6200_PAGE_TX_THRESHOLD / 2))
290       printk(KERN_ERR"Asking page %d(%d) exceeds resource limit %d.\n",
291           page_count, skb->len,(SSV6200_PAGE_TX_THRESHOLD / 2));
292      if ((phw_resource->free_tx_page < page_count) || (phw_resource->free_tx_id <= 0) || (phw_resource->max_tx_frame[hw_txq->txq_no] <= 0))
293         {
294       skb_queue_head(&hw_txq->qhead, skb);
295       break;
296         }
297      phw_resource->free_tx_page -= page_count;
298      phw_resource->free_tx_id--;
299      phw_resource->max_tx_frame[hw_txq->txq_no]--;
300   tx_desc = (struct ssv6200_tx_desc *)skb->data;
301 #if 1
302         if (ctrl_hci->shi->hci_skb_update_cb != NULL && tx_desc->reason != ID_TRAP_SW_TXTPUT)
303         {
304             ctrl_hci->shi->hci_skb_update_cb(skb,ctrl_hci->shi->skb_update_args);
305         }
306 #endif
307         ret = IF_SEND(ctrl_hci, (void *)skb->data, skb->len, hw_txq->txq_no);
308         if (ret < 0) {
309             printk(KERN_ALERT "ssv6xxx_hci_xmit fail......\n");
310             skb_queue_head(&hw_txq->qhead, skb);
311             break;
312         }
313         if (tx_desc->reason != ID_TRAP_SW_TXTPUT)
314             skb_queue_tail(&tx_cb_list, skb);
315         else
316             ssv_skb_free(skb);
317         hw_txq->tx_pkt ++;
318 #ifdef CONFIG_IRQ_DEBUG_COUNT
319   if(ctrl_hci->irq_enable)
320   ctrl_hci->irq_tx_pkt_count++;
321 #endif
322         if (!(hw_txq->tx_flags & HCI_FLAGS_NO_FLOWCTRL)) {
323             if (skb_queue_len(&hw_txq->qhead) < hw_txq->resum_thres) {
324                 ctrl_hci->shi->hci_tx_flow_ctrl_cb(
325                     ctrl_hci->shi->tx_fctrl_cb_args,
326         hw_txq->txq_no, false, 2000);
327    }
328   }
329  }
330 xmit_out:
331     if (ctrl_hci->shi->hci_tx_cb && tx_desc && tx_desc->reason != ID_TRAP_SW_TXTPUT) {
332         ctrl_hci->shi->hci_tx_cb (&tx_cb_list,
333             ctrl_hci->shi->tx_cb_args);
334     }
335     ctrl_hci->xmit_running = 0;
336     return tx_count;
337 }
ssv6xxx_hci_tx_handler(void * dev,int max_count)338 static int ssv6xxx_hci_tx_handler(void *dev, int max_count)
339 {
340     struct ssv6xxx_hci_txq_info txq_info;
341     struct ssv6xxx_hci_txq_info2 txq_info2;
342     struct ssv6xxx_hw_resource hw_resource;
343     struct ssv_hw_txq *hw_txq=dev;
344  int ret, tx_count=0;
345     max_count = skb_queue_len(&hw_txq->qhead);
346     if(max_count == 0)
347         return 0;
348     if (hw_txq->txq_no == 4)
349     {
350         ret = HCI_REG_READ(ctrl_hci, ADR_TX_ID_ALL_INFO2, (u32 *)&txq_info2);
351         if (ret < 0) {
352             ctrl_hci->read_rs1_info_fail++;
353             return 0;
354         }
355         //BUG_ON(SSV6200_PAGE_TX_THRESHOLD < txq_info2.tx_use_page);
356         //BUG_ON(SSV6200_ID_TX_THRESHOLD < txq_info2.tx_use_id);
357         if(SSV6200_PAGE_TX_THRESHOLD < txq_info2.tx_use_page)
358             return 0;
359         if(SSV6200_ID_TX_THRESHOLD < txq_info2.tx_use_page)
360             return 0;
361   hw_resource.free_tx_page =
362       SSV6200_PAGE_TX_THRESHOLD - txq_info2.tx_use_page;
363   hw_resource.free_tx_id = SSV6200_ID_TX_THRESHOLD - txq_info2.tx_use_id;
364   hw_resource.max_tx_frame[4] = SSV6200_ID_MANAGER_QUEUE - txq_info2.txq4_size;
365     }
366     else
367     {
368         ret = HCI_REG_READ(ctrl_hci, ADR_TX_ID_ALL_INFO, (u32 *)&txq_info);
369         if (ret < 0) {
370             ctrl_hci->read_rs0_info_fail++;
371             return 0;
372         }
373         //BUG_ON(SSV6200_PAGE_TX_THRESHOLD < txq_info.tx_use_page);
374         //BUG_ON(SSV6200_ID_TX_THRESHOLD < txq_info.tx_use_id);
375         if(SSV6200_PAGE_TX_THRESHOLD < txq_info.tx_use_page)
376             return 0;
377         if(SSV6200_ID_TX_THRESHOLD < txq_info.tx_use_page)
378             return 0;
379   hw_resource.free_tx_page = SSV6200_PAGE_TX_THRESHOLD - txq_info.tx_use_page;
380   hw_resource.free_tx_id = SSV6200_ID_TX_THRESHOLD - txq_info.tx_use_id;
381   hw_resource.max_tx_frame[0] =
382       SSV6200_ID_AC_BK_OUT_QUEUE - txq_info.txq0_size;
383   hw_resource.max_tx_frame[1] =
384       SSV6200_ID_AC_BE_OUT_QUEUE - txq_info.txq1_size;
385   hw_resource.max_tx_frame[2] =
386       SSV6200_ID_AC_VI_OUT_QUEUE - txq_info.txq2_size;
387   hw_resource.max_tx_frame[3] =
388       SSV6200_ID_AC_VO_OUT_QUEUE - txq_info.txq3_size;
389   BUG_ON(hw_resource.max_tx_frame[3] < 0);
390   BUG_ON(hw_resource.max_tx_frame[2] < 0);
391   BUG_ON(hw_resource.max_tx_frame[1] < 0);
392   BUG_ON(hw_resource.max_tx_frame[0] < 0);
393  }
394  {
395 #ifdef CONFIG_IRQ_DEBUG_COUNT
396   if(ctrl_hci->irq_enable)
397    ctrl_hci->real_tx_irq_count++;
398 #endif
399   tx_count = ssv6xxx_hci_xmit(hw_txq, max_count, &hw_resource);
400  }
401     if ( (ctrl_hci->shi->hci_tx_q_empty_cb != NULL)
402         && (skb_queue_len(&hw_txq->qhead) == 0))
403     {
404         ctrl_hci->shi->hci_tx_q_empty_cb(hw_txq->txq_no, ctrl_hci->shi->tx_q_empty_args);
405     }
406     return tx_count;
407 }
ssv6xxx_hci_tx_work(struct work_struct * work)408 void ssv6xxx_hci_tx_work(struct work_struct *work)
409 {
410 #ifdef CONFIG_SSV_TX_LOWTHRESHOLD
411     ssv6xxx_hci_irq_register(SSV6XXX_INT_RESOURCE_LOW);
412 #else
413     int txqid;
414     for(txqid=SSV_HW_TXQ_NUM-1; txqid>=0; txqid--) {
415         u32 bitno;
416         if (&ctrl_hci->hci_tx_work[txqid] != work)
417             continue;
418         bitno = ssv6xxx_hci_get_int_bitno(txqid);
419         ssv6xxx_hci_irq_register(1<<(bitno));
420         break;
421     }
422 #endif
423 }
_do_rx(struct ssv6xxx_hci_ctrl * hctl,u32 isr_status)424 static int _do_rx (struct ssv6xxx_hci_ctrl *hctl, u32 isr_status)
425 {
426     #if !defined(USE_THREAD_RX) || defined(USE_BATCH_RX)
427     struct sk_buff_head rx_list;
428     #endif
429     struct sk_buff *rx_mpdu;
430     int rx_cnt, ret=0;
431     size_t dlen;
432     u32 status = isr_status;
433     #ifdef CONFIG_SSV6XXX_DEBUGFS
434     struct timespec rx_io_start_time, rx_io_end_time, rx_io_diff_time;
435     struct timespec rx_proc_start_time, rx_proc_end_time, rx_proc_diff_time;
436     #endif
437     #if !defined(USE_THREAD_RX) || defined(USE_BATCH_RX)
438     skb_queue_head_init(&rx_list);
439     #endif
440     for (rx_cnt = 0; (status & SSV6XXX_INT_RX) && (rx_cnt < 32 ); rx_cnt++)
441     {
442         #ifdef CONFIG_SSV6XXX_DEBUGFS
443         if (hctl->isr_mib_enable)
444             getnstimeofday(&rx_io_start_time);
445         #endif
446         ret = IF_RECV(hctl, hctl->rx_buf->data, &dlen);
447         #ifdef CONFIG_SSV6XXX_DEBUGFS
448         if (hctl->isr_mib_enable)
449             getnstimeofday(&rx_io_end_time);
450         #endif
451         if (ret < 0 || dlen<=0)
452         {
453             printk("%s(): IF_RECV() retruns %d (dlen=%d)\n", __FUNCTION__,
454                    ret, (int)dlen);
455             if (ret != -84 || dlen>MAX_FRAME_SIZE)
456                 break;
457         }
458         rx_mpdu = hctl->rx_buf;
459         hctl->rx_buf = ssv_skb_alloc(MAX_FRAME_SIZE);
460         if (hctl->rx_buf == NULL)
461         {
462             printk(KERN_ERR "RX buffer allocation failure!\n");
463             hctl->rx_buf = rx_mpdu;
464             break;
465         }
466         hctl->rx_pkt++;
467         #ifdef CONFIG_IRQ_DEBUG_COUNT
468         if (hctl->irq_enable)
469             hctl->irq_rx_pkt_count ++;
470         #endif
471         skb_put(rx_mpdu, dlen);
472         #ifdef CONFIG_SSV6XXX_DEBUGFS
473         if (hctl->isr_mib_enable)
474             getnstimeofday(&rx_proc_start_time);
475         #endif
476         #if !defined(USE_THREAD_RX) || defined(USE_BATCH_RX)
477         __skb_queue_tail(&rx_list, rx_mpdu);
478         #else
479         hctl->shi->hci_rx_cb(rx_mpdu, hctl->shi->rx_cb_args);
480         #endif
481         HCI_IRQ_STATUS(hctl, &status);
482         #ifdef CONFIG_SSV6XXX_DEBUGFS
483         if (hctl->isr_mib_enable)
484         {
485             getnstimeofday(&rx_proc_end_time);
486             hctl->isr_rx_io_count++;
487             rx_io_diff_time = timespec_sub(rx_io_end_time, rx_io_start_time);
488             hctl->isr_rx_io_time += timespec_to_ns(&rx_io_diff_time);
489             rx_proc_diff_time = timespec_sub(rx_proc_end_time, rx_proc_start_time);
490             hctl->isr_rx_proc_time += timespec_to_ns(&rx_proc_diff_time);
491         }
492         #endif
493     }
494     #if !defined(USE_THREAD_RX) || defined(USE_BATCH_RX)
495     #ifdef CONFIG_SSV6XXX_DEBUGFS
496     if (hctl->isr_mib_enable)
497         getnstimeofday(&rx_proc_start_time);
498     #endif
499     hctl->shi->hci_rx_cb(&rx_list, hctl->shi->rx_cb_args);
500     #ifdef CONFIG_SSV6XXX_DEBUGFS
501     if (hctl->isr_mib_enable)
502     {
503         getnstimeofday(&rx_proc_end_time);
504         rx_proc_diff_time = timespec_sub(rx_proc_end_time, rx_proc_start_time);
505         hctl->isr_rx_proc_time += timespec_to_ns(&rx_proc_diff_time);
506     }
507     #endif
508     #endif
509     return ret;
510 }
ssv6xxx_hci_rx_work(struct work_struct * work)511 static void ssv6xxx_hci_rx_work(struct work_struct *work)
512 {
513     #if !defined(USE_THREAD_RX) || defined(USE_BATCH_RX)
514     struct sk_buff_head rx_list;
515     #endif
516     struct sk_buff *rx_mpdu;
517     int rx_cnt, ret;
518     size_t dlen;
519     u32 status;
520 #ifdef CONFIG_SSV6XXX_DEBUGFS
521     struct timespec rx_io_start_time, rx_io_end_time, rx_io_diff_time;
522     struct timespec rx_proc_start_time, rx_proc_end_time, rx_proc_diff_time;
523 #endif
524     ctrl_hci->rx_work_running = 1;
525     #if !defined(USE_THREAD_RX) || defined(USE_BATCH_RX)
526     skb_queue_head_init(&rx_list);
527     #endif
528     status = SSV6XXX_INT_RX;
529     for (rx_cnt = 0; (status & SSV6XXX_INT_RX) && (rx_cnt < 32 ); rx_cnt++) {
530 #ifdef CONFIG_SSV6XXX_DEBUGFS
531         if (ctrl_hci->isr_mib_enable)
532             getnstimeofday(&rx_io_start_time);
533 #endif
534         ret = IF_RECV(ctrl_hci, ctrl_hci->rx_buf->data, &dlen);
535 #ifdef CONFIG_SSV6XXX_DEBUGFS
536         if (ctrl_hci->isr_mib_enable)
537             getnstimeofday(&rx_io_end_time);
538 #endif
539         if (ret < 0 || dlen<=0) {
540             printk("%s(): IF_RECV() retruns %d (dlen=%d)\n", __FUNCTION__,
541                 ret, (int)dlen);
542             if (ret != -84 || dlen>MAX_FRAME_SIZE)
543                 break;
544         }
545         rx_mpdu = ctrl_hci->rx_buf;
546         ctrl_hci->rx_buf = ssv_skb_alloc(MAX_FRAME_SIZE);
547         if (ctrl_hci->rx_buf == NULL) {
548             printk(KERN_ERR "RX buffer allocation failure!\n");
549             ctrl_hci->rx_buf = rx_mpdu;
550             break;
551         }
552         ctrl_hci->rx_pkt ++;
553 #ifdef CONFIG_IRQ_DEBUG_COUNT
554   if(ctrl_hci->irq_enable)
555    ctrl_hci->irq_rx_pkt_count ++;
556 #endif
557         skb_put(rx_mpdu, dlen);
558 #ifdef CONFIG_SSV6XXX_DEBUGFS
559         if (ctrl_hci->isr_mib_enable)
560             getnstimeofday(&rx_proc_start_time);
561 #endif
562         #if !defined(USE_THREAD_RX) || defined(USE_BATCH_RX)
563         __skb_queue_tail(&rx_list, rx_mpdu);
564         #else
565         ctrl_hci->shi->hci_rx_cb(rx_mpdu, ctrl_hci->shi->rx_cb_args);
566         #endif
567         HCI_IRQ_STATUS(ctrl_hci, &status);
568 #ifdef CONFIG_SSV6XXX_DEBUGFS
569         if (ctrl_hci->isr_mib_enable)
570         {
571             getnstimeofday(&rx_proc_end_time);
572             ctrl_hci->isr_rx_io_count++;
573             rx_io_diff_time = timespec_sub(rx_io_end_time, rx_io_start_time);
574             ctrl_hci->isr_rx_io_time += timespec_to_ns(&rx_io_diff_time);
575             rx_proc_diff_time = timespec_sub(rx_proc_end_time, rx_proc_start_time);
576             ctrl_hci->isr_rx_proc_time += timespec_to_ns(&rx_proc_diff_time);
577         }
578 #endif
579     }
580     #if !defined(USE_THREAD_RX) || defined(USE_BATCH_RX)
581     #ifdef CONFIG_SSV6XXX_DEBUGFS
582     if (ctrl_hci->isr_mib_enable)
583         getnstimeofday(&rx_proc_start_time);
584     #endif
585     ctrl_hci->shi->hci_rx_cb(&rx_list, ctrl_hci->shi->rx_cb_args);
586     #ifdef CONFIG_SSV6XXX_DEBUGFS
587     if (ctrl_hci->isr_mib_enable)
588     {
589         getnstimeofday(&rx_proc_end_time);
590         rx_proc_diff_time = timespec_sub(rx_proc_end_time, rx_proc_start_time);
591         ctrl_hci->isr_rx_proc_time += timespec_to_ns(&rx_proc_diff_time);
592     }
593     #endif
594     #endif
595     ctrl_hci->rx_work_running = 0;
596 }
597 #ifdef CONFIG_SSV6XXX_DEBUGFS
ssv6xxx_isr_mib_reset(void)598 static void ssv6xxx_isr_mib_reset (void)
599 {
600     ctrl_hci->isr_mib_reset = 0;
601     ctrl_hci->isr_total_time = 0;
602     ctrl_hci->isr_rx_io_time = 0;
603     ctrl_hci->isr_tx_io_time = 0;
604     ctrl_hci->isr_rx_io_count = 0;
605     ctrl_hci->isr_tx_io_count = 0;
606     ctrl_hci->isr_rx_proc_time =0;
607 }
hw_txq_len_open(struct inode * inode,struct file * filp)608 static int hw_txq_len_open(struct inode *inode, struct file *filp)
609 {
610     filp->private_data = inode->i_private;
611     return 0;
612 }
hw_txq_len_read(struct file * filp,char __user * buffer,size_t count,loff_t * ppos)613 static ssize_t hw_txq_len_read(struct file *filp, char __user *buffer, size_t count, loff_t *ppos)
614 {
615     ssize_t ret;
616     struct ssv6xxx_hci_ctrl *hctl = (struct ssv6xxx_hci_ctrl *)filp->private_data;
617     char *summary_buf = kzalloc(1024, GFP_KERNEL);
618     char *prn_ptr = summary_buf;
619     int prt_size;
620     int buf_size = 1024;
621     int i=0;
622     if (!summary_buf)
623         return -ENOMEM;
624     for (i=0; i<SSV_HW_TXQ_NUM; i++)
625     {
626         prt_size = snprintf(prn_ptr, buf_size, "\n\rhw_txq%d_len: %d", i,
627                             skb_queue_len(&hctl->hw_txq[i].qhead));
628         prn_ptr += prt_size;
629         buf_size -= prt_size;
630     }
631     buf_size = 1024 - buf_size;
632     ret = simple_read_from_buffer(buffer, count, ppos, summary_buf, buf_size);
633     kfree(summary_buf);
634     return ret;
635 }
636 #if 0
637  static ssize_t hw_txq_len_write(struct file *filp, const char __user *buffer, size_t count, loff_t *ppos)
638 {
639     return 0;
640 }
641 #endif
642 struct file_operations hw_txq_len_fops = {
643     .owner = THIS_MODULE,
644     .open = hw_txq_len_open,
645     .read = hw_txq_len_read,
646 };
ssv6xxx_hci_init_debugfs(struct dentry * dev_deugfs_dir)647 bool ssv6xxx_hci_init_debugfs(struct dentry *dev_deugfs_dir)
648 {
649     ctrl_hci->debugfs_dir = debugfs_create_dir("hci", dev_deugfs_dir);
650     if (ctrl_hci->debugfs_dir == NULL)
651     {
652         dev_err(ctrl_hci->shi->dev, "Failed to create HCI debugfs directory.\n");
653         return false;
654     }
655     debugfs_create_u32("TXQ_mask", 00444, ctrl_hci->debugfs_dir, &ctrl_hci->txq_mask);
656     debugfs_create_u32("hci_isr_mib_enable", 00644, ctrl_hci->debugfs_dir, &ctrl_hci->isr_mib_enable);
657     debugfs_create_u32("hci_isr_mib_reset", 00644, ctrl_hci->debugfs_dir, &ctrl_hci->isr_mib_reset);
658     debugfs_create_u64("isr_total_time", 00444, ctrl_hci->debugfs_dir, &ctrl_hci->isr_total_time);
659     debugfs_create_u64("tx_io_time", 00444, ctrl_hci->debugfs_dir, &ctrl_hci->isr_tx_io_time);
660     debugfs_create_u64("rx_io_time", 00444, ctrl_hci->debugfs_dir, &ctrl_hci->isr_rx_io_time);
661     debugfs_create_u32("tx_io_count", 00444, ctrl_hci->debugfs_dir, &ctrl_hci->isr_tx_io_count);
662     debugfs_create_u32("rx_io_count", 00444, ctrl_hci->debugfs_dir, &ctrl_hci->isr_rx_io_count);
663     debugfs_create_u64("rx_proc_time", 00444, ctrl_hci->debugfs_dir, &ctrl_hci->isr_rx_proc_time);
664     debugfs_create_file("hw_txq_len", 00444, ctrl_hci->debugfs_dir, ctrl_hci, &hw_txq_len_fops);
665     return true;
666 }
ssv6xxx_hci_deinit_debugfs(void)667 void ssv6xxx_hci_deinit_debugfs(void)
668 {
669     if (ctrl_hci->debugfs_dir == NULL)
670         return;
671     ctrl_hci->debugfs_dir = NULL;
672 }
673 #endif
_isr_do_rx(struct ssv6xxx_hci_ctrl * hctl,u32 isr_status)674 static int _isr_do_rx (struct ssv6xxx_hci_ctrl *hctl, u32 isr_status)
675 {
676     int status;
677     u32 before = jiffies;
678 #ifdef CONFIG_IRQ_DEBUG_COUNT
679     if (hctl->irq_enable)
680         hctl->rx_irq_count++;
681 #endif
682     if (hctl->isr_summary_eable
683         && hctl->prev_rx_isr_jiffes) {
684        if (hctl->isr_rx_idle_time){
685            hctl->isr_rx_idle_time += (jiffies - hctl->prev_rx_isr_jiffes);
686            hctl->isr_rx_idle_time = hctl->isr_rx_idle_time >>1;
687        }
688        else {
689             hctl->isr_rx_idle_time += (jiffies - hctl->prev_rx_isr_jiffes);
690        }
691     }
692     status = _do_rx(hctl, isr_status);
693     if(hctl->isr_summary_eable){
694         if(hctl->isr_rx_time){
695             hctl->isr_rx_time += (jiffies-before);
696             hctl->isr_rx_time = hctl->isr_rx_time >>1;
697         }
698         else{
699             hctl->isr_rx_time += (jiffies-before);
700         }
701         hctl->prev_rx_isr_jiffes = jiffies;
702     }
703     return status;
704 }
705 #ifdef CONFIG_SSV_TX_LOWTHRESHOLD
_do_tx(struct ssv6xxx_hci_ctrl * hctl,u32 status)706 static int _do_tx (struct ssv6xxx_hci_ctrl *hctl, u32 status)
707 {
708     int q_num;
709     int tx_count = 0;
710     u32 to_disable_int = 1;
711     unsigned long flags;
712     struct ssv_hw_txq *hw_txq;
713     #ifdef CONFIG_SSV6XXX_DEBUGFS
714     struct timespec tx_io_start_time, tx_io_end_time, tx_io_diff_time;
715     #endif
716     #ifdef CONFIG_IRQ_DEBUG_COUNT
717     if ((!(status & SSV6XXX_INT_RX)) && htcl->irq_enable)
718         hctl->tx_irq_count++;
719     #endif
720     if ((status & SSV6XXX_INT_RESOURCE_LOW) == 0)
721          return 0;
722     for (q_num = (SSV_HW_TXQ_NUM - 1); q_num >= 0; q_num--)
723     {
724         u32 before = jiffies;
725         hw_txq = &hctl->hw_txq[q_num];
726         #ifdef CONFIG_SSV6XXX_DEBUGFS
727         if (hctl->isr_mib_enable)
728             getnstimeofday(&tx_io_start_time);
729         #endif
730         tx_count += ssv6xxx_hci_tx_handler(hw_txq, 999);
731         #ifdef CONFIG_SSV6XXX_DEBUGFS
732         if (hctl->isr_mib_enable)
733         {
734             getnstimeofday(&tx_io_end_time);
735             tx_io_diff_time = timespec_sub(tx_io_end_time, tx_io_start_time);
736             hctl->isr_tx_io_time += timespec_to_ns(&tx_io_diff_time);
737         }
738         #endif
739         if (hctl->isr_summary_eable)
740         {
741             if (hctl->isr_tx_time)
742             {
743                 hctl->isr_tx_time += (jiffies-before);
744                 hctl->isr_tx_time = hctl->isr_tx_time >>1;
745             }
746             else
747             {
748                 hctl->isr_tx_time += (jiffies-before);
749             }
750         }
751     }
752     mutex_lock(&hctl->hci_mutex);
753     spin_lock_irqsave(&hctl->int_lock, flags);
754     for (q_num = (SSV_HW_TXQ_NUM - 1); q_num >= 0; q_num--)
755     {
756         hw_txq = &hctl->hw_txq[q_num];
757         if (skb_queue_len(&hw_txq->qhead) > 0)
758         {
759             to_disable_int = 0;
760             break;
761         }
762     }
763     if (to_disable_int)
764     {
765         u32 reg_val;
766 #ifdef CONFIG_TRIGGER_LOW_SDIO_LOADING
767   hctl->int_mask &= ~(SSV6XXX_INT_RESOURCE_LOW);
768 #else
769         hctl->int_mask &= ~(SSV6XXX_INT_RESOURCE_LOW | SSV6XXX_INT_TX);
770 #endif
771         reg_val = ~hctl->int_mask;
772         spin_unlock_irqrestore(&hctl->int_lock, flags);
773         HCI_IRQ_SET_MASK(hctl, reg_val);
774     }
775     else
776     {
777         spin_unlock_irqrestore(&hctl->int_lock, flags);
778     }
779     mutex_unlock(&hctl->hci_mutex);
780     return tx_count;
781 }
782 #else
_do_tx(struct ssv6xxx_hci_ctrl * hctl,u32 status)783 static int _do_tx (struct ssv6xxx_hci_ctrl *hctl, u32 status)
784 {
785     int q_num;
786     int tx_count = 0;
787     #ifdef CONFIG_SSV6XXX_DEBUGFS
788     struct timespec tx_io_start_time, tx_io_end_time, tx_io_diff_time;
789     #endif
790     #ifdef CONFIG_IRQ_DEBUG_COUNT
791     if ((!(status & SSV6XXX_INT_RX)) && htcl->irq_enable)
792         htcl->tx_irq_count++;
793     #endif
794     for (q_num = (SSV_HW_TXQ_NUM - 1); q_num >= 0; q_num--)
795     {
796         int bitno;
797         struct ssv_hw_txq *hw_txq;
798         unsigned long flags;
799         u32 before = jiffies;
800         hw_txq = &hctl->hw_txq[q_num];
801         bitno = ssv6xxx_hci_get_int_bitno(hw_txq->txq_no);
802         if ((status & BIT(bitno)) == 0)
803             continue;
804         #ifdef CONFIG_SSV6XXX_DEBUGFS
805         if (htcl->isr_mib_enable)
806         {
807             getnstimeofday(&tx_io_start_time);
808         }
809         #endif
810         tx_count += ssv6xxx_hci_tx_handler(hw_txq, 999);
811         mutex_lock(&hctl->hci_mutex);
812         spin_lock_irqsave(&hctl->int_lock, flags);
813         if (skb_queue_len(&hw_txq->qhead) <= 0)
814         {
815             u32 reg_val;
816             hctl->int_mask &= ~(1<<bitno);
817             reg_val = ~hctl->int_mask;
818             spin_unlock_irqrestore(&hctl->int_lock, flags);
819             HCI_IRQ_SET_MASK(hctl, reg_val);
820         }
821         else
822         {
823             spin_unlock_irqrestore(&hctl->int_lock, flags);
824         }
825         mutex_unlock(&hctl->hci_mutex);
826         #ifdef CONFIG_SSV6XXX_DEBUGFS
827         if (htcl->isr_mib_enable)
828         {
829             getnstimeofday(&tx_io_end_time);
830             tx_io_diff_time = timespec_sub(tx_io_end_time, tx_io_start_time);
831             htcl->isr_tx_io_time += timespec_to_ns(&tx_io_diff_time);
832         }
833         #endif
834         if (htcl->isr_summary_eable)
835         {
836             if (htcl->isr_tx_time)
837             {
838                 htcl->isr_tx_time += (jiffies - before);
839                 htcl->isr_tx_time = htcl->isr_tx_time >>1;
840             }
841             else
842             {
843                 htcl->isr_tx_time += (jiffies - before);
844             }
845         }
846     }
847     return tx_count;
848 }
849 #endif
850 #ifdef CONFIG_TRIGGER_LOW_SDIO_LOADING
ssv6xxx_hci_isr(int irq,void * args)851 irqreturn_t ssv6xxx_hci_isr(int irq, void *args)
852 {
853  struct ssv6xxx_hci_ctrl *hctl = args;
854  u32 status;
855  int ret = IRQ_HANDLED;
856  bool dbg_isr_miss = true;
857 #ifdef CONFIG_SSV6XXX_DEBUGFS
858   struct timespec start_time, end_time, diff_time;
859 #endif
860  ctrl_hci->isr_running = 1;
861  if (ctrl_hci->isr_summary_eable && ctrl_hci->prev_isr_jiffes) {
862   if (ctrl_hci->isr_idle_time) {
863    ctrl_hci->isr_idle_time +=
864        (jiffies - ctrl_hci->prev_isr_jiffes);
865    ctrl_hci->isr_idle_time = ctrl_hci->isr_idle_time >> 1;
866   } else {
867    ctrl_hci->isr_idle_time +=
868        (jiffies - ctrl_hci->prev_isr_jiffes);
869   }
870  }
871  BUG_ON(!args);
872 #ifdef CONFIG_SSV6XXX_DEBUGFS
873   if (hctl->isr_mib_reset)
874    ssv6xxx_isr_mib_reset();
875   if (hctl->isr_mib_enable)
876    getnstimeofday(&start_time);
877 #endif
878 #ifdef CONFIG_IRQ_DEBUG_COUNT
879   if (ctrl_hci->irq_enable)
880    ctrl_hci->irq_count++;
881 #endif
882     if ((hctl->int_mask & SSV6XXX_INT_RESOURCE_LOW) == 0) {
883         ret = _isr_do_rx(hctl, SSV6XXX_INT_RX);
884         if (ret < 0) {
885             printk("do_rx failed\n");
886             goto out;
887         }
888     } else {
889         HCI_IRQ_STATUS(hctl, &status);
890         if (status & SSV6XXX_INT_RX) {
891             ret = _isr_do_rx(hctl, SSV6XXX_INT_RX);
892             if (ret < 0) {
893                 printk("do_rx failed\n");
894                 goto out;
895             }
896         }
897         if (hctl->int_mask & SSV6XXX_INT_RESOURCE_LOW) {
898             ret = _do_tx(hctl, SSV6XXX_INT_RESOURCE_LOW);
899             if (ret < 0) {
900                 goto out;
901             }
902         }
903     }
904 #ifdef CONFIG_SSV6XXX_DEBUGFS
905   if (ctrl_hci->isr_mib_enable) {
906    getnstimeofday(&end_time);
907    diff_time = timespec_sub(end_time, start_time);
908    ctrl_hci->isr_total_time += timespec_to_ns(&diff_time);
909   }
910 #endif
911  if (ctrl_hci->isr_summary_eable) {
912   if (dbg_isr_miss)
913    ctrl_hci->isr_miss_cnt++;
914   ctrl_hci->prev_isr_jiffes = jiffies;
915  }
916 out:
917  ctrl_hci->isr_running = 0;
918  return IRQ_NONE;
919 }
920 #else
ssv6xxx_hci_isr(int irq,void * args)921 irqreturn_t ssv6xxx_hci_isr(int irq, void *args)
922 {
923     struct ssv6xxx_hci_ctrl *hctl=args;
924     u32 status;
925     unsigned long flags;
926     int ret = IRQ_HANDLED;
927  bool dbg_isr_miss = true;
928     if (ctrl_hci->isr_summary_eable
929         && ctrl_hci->prev_isr_jiffes){
930         if(ctrl_hci->isr_idle_time){
931             ctrl_hci->isr_idle_time += (jiffies - ctrl_hci->prev_isr_jiffes);
932             ctrl_hci->isr_idle_time = ctrl_hci->isr_idle_time >>1;
933         }
934         else{
935             ctrl_hci->isr_idle_time += (jiffies - ctrl_hci->prev_isr_jiffes);
936         }
937     }
938     BUG_ON(!args);
939     do {
940 #ifdef CONFIG_SSV6XXX_DEBUGFS
941         struct timespec start_time, end_time, diff_time;
942         if (hctl->isr_mib_reset)
943             ssv6xxx_isr_mib_reset();
944         if (hctl->isr_mib_enable)
945             getnstimeofday(&start_time);
946 #endif
947 #ifdef CONFIG_IRQ_DEBUG_COUNT
948   if(ctrl_hci->irq_enable)
949          ctrl_hci->irq_count++;
950 #endif
951         mutex_lock(&hctl->hci_mutex);
952         if (hctl->int_status)
953         {
954             u32 regval;
955             spin_lock_irqsave(&hctl->int_lock, flags);
956             hctl->int_mask |= hctl->int_status;
957             hctl->int_status = 0;
958             regval = ~ctrl_hci->int_mask;
959             smp_mb();
960             spin_unlock_irqrestore(&hctl->int_lock, flags);
961             HCI_IRQ_SET_MASK(hctl, regval);
962         }
963         ret = HCI_IRQ_STATUS(hctl, &status);
964         if ((ret < 0) || ((status & hctl->int_mask) == 0)) {
965 #ifdef CONFIG_IRQ_DEBUG_COUNT
966             if (ctrl_hci->irq_enable)
967                 ctrl_hci->invalid_irq_count++;
968 #endif
969             mutex_unlock(&hctl->hci_mutex);
970             ret = IRQ_NONE;
971             break;
972         }
973         spin_lock_irqsave(&hctl->int_lock, flags);
974         status &= hctl->int_mask;
975         spin_unlock_irqrestore(&hctl->int_lock, flags);
976         mutex_unlock(&hctl->hci_mutex);
977         ctrl_hci->isr_running = 1;
978         if (status & SSV6XXX_INT_RX) {
979             ret = _isr_do_rx(hctl, status);
980             if(ret < 0) {
981                 ret = IRQ_NONE;
982                 break;
983             }
984             dbg_isr_miss = false;
985         }
986         if (_do_tx(hctl, status))
987         {
988             dbg_isr_miss = false;
989         }
990         ctrl_hci->isr_running = 0;
991 #ifdef CONFIG_SSV6XXX_DEBUGFS
992         if (ctrl_hci->isr_mib_enable)
993         {
994             getnstimeofday(&end_time);
995             diff_time = timespec_sub(end_time, start_time);
996             ctrl_hci->isr_total_time += timespec_to_ns(&diff_time);
997         }
998 #endif
999     } while (1);
1000     if(ctrl_hci->isr_summary_eable ){
1001         if(dbg_isr_miss)
1002    ctrl_hci->isr_miss_cnt++;
1003      ctrl_hci->prev_isr_jiffes = jiffies;
1004  }
1005     return ret;
1006 }
1007 #endif
1008 static struct ssv6xxx_hci_ops hci_ops =
1009 {
1010     .hci_start = ssv6xxx_hci_start,
1011     .hci_stop = ssv6xxx_hci_stop,
1012     .hci_read_word = ssv6xxx_hci_read_word,
1013     .hci_write_word = ssv6xxx_hci_write_word,
1014     .hci_tx = ssv6xxx_hci_enqueue,
1015     .hci_tx_pause = ssv6xxx_hci_txq_pause,
1016     .hci_tx_resume = ssv6xxx_hci_txq_resume,
1017     .hci_txq_flush = ssv6xxx_hci_txq_flush,
1018     .hci_txq_flush_by_sta = ssv6xxx_hci_txq_flush_by_sta,
1019     .hci_txq_empty = ssv6xxx_hci_is_txq_empty,
1020     .hci_load_fw = ssv6xxx_hci_load_fw,
1021     .hci_pmu_wakeup = ssv6xxx_hci_pmu_wakeup,
1022     .hci_send_cmd = ssv6xxx_hci_send_cmd,
1023     .hci_write_sram = ssv6xxx_hci_write_sram,
1024 #ifdef CONFIG_SSV6XXX_DEBUGFS
1025     .hci_init_debugfs = ssv6xxx_hci_init_debugfs,
1026     .hci_deinit_debugfs = ssv6xxx_hci_deinit_debugfs,
1027 #endif
1028     .hci_interface_reset = ssv6xxx_hci_interface_reset,
1029 };
ssv6xxx_hci_deregister(void)1030 int ssv6xxx_hci_deregister(void)
1031 {
1032     u32 regval;
1033     printk("%s(): \n", __FUNCTION__);
1034     if (ctrl_hci->shi == NULL)
1035         return -1;
1036     regval = 1;
1037     ssv6xxx_hci_irq_disable();
1038     flush_workqueue(ctrl_hci->hci_work_queue);
1039     destroy_workqueue(ctrl_hci->hci_work_queue);
1040     ctrl_hci->shi = NULL;
1041     return 0;
1042 }
1043 EXPORT_SYMBOL(ssv6xxx_hci_deregister);
ssv6xxx_hci_register(struct ssv6xxx_hci_info * shi)1044 int ssv6xxx_hci_register(struct ssv6xxx_hci_info *shi)
1045 {
1046     int i;
1047     if (shi == NULL || ctrl_hci->shi)
1048         return -1;
1049     shi->hci_ops = &hci_ops;
1050     ctrl_hci->shi = shi;
1051     ctrl_hci->txq_mask = 0;
1052     mutex_init(&ctrl_hci->txq_mask_lock);
1053     mutex_init(&ctrl_hci->hci_mutex);
1054     spin_lock_init(&ctrl_hci->int_lock);
1055 #ifdef CONFIG_IRQ_DEBUG_COUNT
1056  ctrl_hci->irq_enable = false;
1057  ctrl_hci->irq_count = 0;
1058  ctrl_hci->invalid_irq_count = 0;
1059  ctrl_hci->tx_irq_count = 0;
1060  ctrl_hci->real_tx_irq_count = 0;
1061  ctrl_hci->rx_irq_count = 0;
1062  ctrl_hci->irq_rx_pkt_count = 0;
1063  ctrl_hci->irq_tx_pkt_count = 0;
1064 #endif
1065     for(i=0; i<SSV_HW_TXQ_NUM; i++) {
1066         memset(&ctrl_hci->hw_txq[i], 0, sizeof(struct ssv_hw_txq));
1067         skb_queue_head_init(&ctrl_hci->hw_txq[i].qhead);
1068         ctrl_hci->hw_txq[i].txq_no = (u32)i;
1069         ctrl_hci->hw_txq[i].max_qsize = SSV_HW_TXQ_MAX_SIZE;
1070         ctrl_hci->hw_txq[i].resum_thres = SSV_HW_TXQ_RESUME_THRES;
1071     }
1072     ctrl_hci->hci_work_queue = create_singlethread_workqueue("ssv6xxx_hci_wq");
1073     INIT_WORK(&ctrl_hci->hci_rx_work, ssv6xxx_hci_rx_work);
1074 #ifdef CONFIG_SSV_TX_LOWTHRESHOLD
1075     INIT_WORK(&ctrl_hci->hci_tx_work, ssv6xxx_hci_tx_work);
1076     ctrl_hci->int_mask = SSV6XXX_INT_RX|SSV6XXX_INT_RESOURCE_LOW;
1077 #else
1078     for(i=0; i<SSV_HW_TXQ_NUM; i++)
1079         INIT_WORK(&ctrl_hci->hci_tx_work[i], ssv6xxx_hci_tx_work);
1080     ctrl_hci->int_mask = SSV6XXX_INT_RX|SSV6XXX_INT_TX|SSV6XXX_INT_LOW_EDCA_0|
1081         SSV6XXX_INT_LOW_EDCA_1|SSV6XXX_INT_LOW_EDCA_2|SSV6XXX_INT_LOW_EDCA_3;
1082 #endif
1083     ctrl_hci->int_status= 0;
1084     HCI_IRQ_SET_MASK(ctrl_hci, 0xFFFFFFFF);
1085     ssv6xxx_hci_irq_disable();
1086     HCI_IRQ_REQUEST(ctrl_hci, ssv6xxx_hci_isr);
1087     #ifdef CONFIG_SSV6XXX_DEBUGFS
1088     ctrl_hci->debugfs_dir = NULL;
1089     ctrl_hci->isr_mib_enable = false;
1090     ctrl_hci->isr_mib_reset = 0;
1091     ctrl_hci->isr_total_time = 0;
1092     ctrl_hci->isr_rx_io_time = 0;
1093     ctrl_hci->isr_tx_io_time = 0;
1094     ctrl_hci->isr_rx_io_count = 0;
1095     ctrl_hci->isr_tx_io_count = 0;
1096     ctrl_hci->isr_rx_proc_time =0;
1097     #endif
1098     return 0;
1099 }
1100 EXPORT_SYMBOL(ssv6xxx_hci_register);
1101 #if (defined(CONFIG_SSV_SUPPORT_ANDROID)||defined(CONFIG_SSV_BUILD_AS_ONE_KO))
ssv6xxx_hci_init(void)1102 int ssv6xxx_hci_init(void)
1103 #else
1104 static int __init ssv6xxx_hci_init(void)
1105 #endif
1106 {
1107 #ifdef CONFIG_SSV6200_CLI_ENABLE
1108     extern struct ssv6xxx_hci_ctrl *ssv_dbg_ctrl_hci;
1109 #endif
1110     ctrl_hci = kzalloc(sizeof(*ctrl_hci), GFP_KERNEL);
1111     if (ctrl_hci == NULL)
1112         return -ENOMEM;
1113     memset((void *)ctrl_hci, 0, sizeof(*ctrl_hci));
1114     ctrl_hci->rx_buf = ssv_skb_alloc(MAX_FRAME_SIZE);
1115     if (ctrl_hci->rx_buf == NULL) {
1116         kfree(ctrl_hci);
1117         return -ENOMEM;
1118     }
1119 #ifdef CONFIG_SSV6200_CLI_ENABLE
1120     ssv_dbg_ctrl_hci = ctrl_hci;
1121 #endif
1122     return 0;
1123 }
1124 #if (defined(CONFIG_SSV_SUPPORT_ANDROID)||defined(CONFIG_SSV_BUILD_AS_ONE_KO))
ssv6xxx_hci_exit(void)1125 void ssv6xxx_hci_exit(void)
1126 #else
1127 static void __exit ssv6xxx_hci_exit(void)
1128 #endif
1129 {
1130 #ifdef CONFIG_SSV6200_CLI_ENABLE
1131     extern struct ssv6xxx_hci_ctrl *ssv_dbg_ctrl_hci;
1132 #endif
1133     kfree(ctrl_hci);
1134     ctrl_hci = NULL;
1135 #ifdef CONFIG_SSV6200_CLI_ENABLE
1136     ssv_dbg_ctrl_hci = NULL;
1137 #endif
1138 }
1139 #if (defined(CONFIG_SSV_SUPPORT_ANDROID)||defined(CONFIG_SSV_BUILD_AS_ONE_KO))
1140 EXPORT_SYMBOL(ssv6xxx_hci_init);
1141 EXPORT_SYMBOL(ssv6xxx_hci_exit);
1142 #else
1143 module_init(ssv6xxx_hci_init);
1144 module_exit(ssv6xxx_hci_exit);
1145 #endif
1146