1*4882a593Smuzhiyun /******************************************************************************
2*4882a593Smuzhiyun *
3*4882a593Smuzhiyun * This file is provided under a dual BSD/GPLv2 license. When using or
4*4882a593Smuzhiyun * redistributing this file, you may do so under either license.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * GPL LICENSE SUMMARY
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * Copyright(c) 2020 Intel Corporation
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * This program is free software; you can redistribute it and/or modify
11*4882a593Smuzhiyun * it under the terms of version 2 of the GNU General Public License as
12*4882a593Smuzhiyun * published by the Free Software Foundation.
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * This program is distributed in the hope that it will be useful, but
15*4882a593Smuzhiyun * WITHOUT ANY WARRANTY; without even the implied warranty of
16*4882a593Smuzhiyun * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17*4882a593Smuzhiyun * General Public License for more details.
18*4882a593Smuzhiyun *
19*4882a593Smuzhiyun * BSD LICENSE
20*4882a593Smuzhiyun *
21*4882a593Smuzhiyun * Copyright(c) 2020 Intel Corporation
22*4882a593Smuzhiyun * All rights reserved.
23*4882a593Smuzhiyun *
24*4882a593Smuzhiyun * Redistribution and use in source and binary forms, with or without
25*4882a593Smuzhiyun * modification, are permitted provided that the following conditions
26*4882a593Smuzhiyun * are met:
27*4882a593Smuzhiyun *
28*4882a593Smuzhiyun * * Redistributions of source code must retain the above copyright
29*4882a593Smuzhiyun * notice, this list of conditions and the following disclaimer.
30*4882a593Smuzhiyun * * Redistributions in binary form must reproduce the above copyright
31*4882a593Smuzhiyun * notice, this list of conditions and the following disclaimer in
32*4882a593Smuzhiyun * the documentation and/or other materials provided with the
33*4882a593Smuzhiyun * distribution.
34*4882a593Smuzhiyun * * Neither the name Intel Corporation nor the names of its
35*4882a593Smuzhiyun * contributors may be used to endorse or promote products derived
36*4882a593Smuzhiyun * from this software without specific prior written permission.
37*4882a593Smuzhiyun *
38*4882a593Smuzhiyun * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
39*4882a593Smuzhiyun * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
40*4882a593Smuzhiyun * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
41*4882a593Smuzhiyun * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
42*4882a593Smuzhiyun * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
43*4882a593Smuzhiyun * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
44*4882a593Smuzhiyun * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
45*4882a593Smuzhiyun * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
46*4882a593Smuzhiyun * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
47*4882a593Smuzhiyun * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
48*4882a593Smuzhiyun * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
49*4882a593Smuzhiyun *
50*4882a593Smuzhiyun *****************************************************************************/
51*4882a593Smuzhiyun #include <net/tso.h>
52*4882a593Smuzhiyun #include <linux/tcp.h>
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun #include "iwl-debug.h"
55*4882a593Smuzhiyun #include "iwl-io.h"
56*4882a593Smuzhiyun #include "fw/api/tx.h"
57*4882a593Smuzhiyun #include "queue/tx.h"
58*4882a593Smuzhiyun #include "iwl-fh.h"
59*4882a593Smuzhiyun #include "iwl-scd.h"
60*4882a593Smuzhiyun #include <linux/dmapool.h>
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun /*
63*4882a593Smuzhiyun * iwl_txq_gen2_tx_stop - Stop all Tx DMA channels
64*4882a593Smuzhiyun */
iwl_txq_gen2_tx_stop(struct iwl_trans * trans)65*4882a593Smuzhiyun void iwl_txq_gen2_tx_stop(struct iwl_trans *trans)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun int txq_id;
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun /*
70*4882a593Smuzhiyun * This function can be called before the op_mode disabled the
71*4882a593Smuzhiyun * queues. This happens when we have an rfkill interrupt.
72*4882a593Smuzhiyun * Since we stop Tx altogether - mark the queues as stopped.
73*4882a593Smuzhiyun */
74*4882a593Smuzhiyun memset(trans->txqs.queue_stopped, 0,
75*4882a593Smuzhiyun sizeof(trans->txqs.queue_stopped));
76*4882a593Smuzhiyun memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun /* Unmap DMA from host system and free skb's */
79*4882a593Smuzhiyun for (txq_id = 0; txq_id < ARRAY_SIZE(trans->txqs.txq); txq_id++) {
80*4882a593Smuzhiyun if (!trans->txqs.txq[txq_id])
81*4882a593Smuzhiyun continue;
82*4882a593Smuzhiyun iwl_txq_gen2_unmap(trans, txq_id);
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun /*
87*4882a593Smuzhiyun * iwl_txq_update_byte_tbl - Set up entry in Tx byte-count array
88*4882a593Smuzhiyun */
iwl_pcie_gen2_update_byte_tbl(struct iwl_trans * trans,struct iwl_txq * txq,u16 byte_cnt,int num_tbs)89*4882a593Smuzhiyun static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans,
90*4882a593Smuzhiyun struct iwl_txq *txq, u16 byte_cnt,
91*4882a593Smuzhiyun int num_tbs)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
94*4882a593Smuzhiyun u8 filled_tfd_size, num_fetch_chunks;
95*4882a593Smuzhiyun u16 len = byte_cnt;
96*4882a593Smuzhiyun __le16 bc_ent;
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window))
99*4882a593Smuzhiyun return;
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
102*4882a593Smuzhiyun num_tbs * sizeof(struct iwl_tfh_tb);
103*4882a593Smuzhiyun /*
104*4882a593Smuzhiyun * filled_tfd_size contains the number of filled bytes in the TFD.
105*4882a593Smuzhiyun * Dividing it by 64 will give the number of chunks to fetch
106*4882a593Smuzhiyun * to SRAM- 0 for one chunk, 1 for 2 and so on.
107*4882a593Smuzhiyun * If, for example, TFD contains only 3 TBs then 32 bytes
108*4882a593Smuzhiyun * of the TFD are used, and only one chunk of 64 bytes should
109*4882a593Smuzhiyun * be fetched
110*4882a593Smuzhiyun */
111*4882a593Smuzhiyun num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
114*4882a593Smuzhiyun struct iwl_gen3_bc_tbl *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun /* Starting from AX210, the HW expects bytes */
117*4882a593Smuzhiyun WARN_ON(trans->txqs.bc_table_dword);
118*4882a593Smuzhiyun WARN_ON(len > 0x3FFF);
119*4882a593Smuzhiyun bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
120*4882a593Smuzhiyun scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent;
121*4882a593Smuzhiyun } else {
122*4882a593Smuzhiyun struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun /* Before AX210, the HW expects DW */
125*4882a593Smuzhiyun WARN_ON(!trans->txqs.bc_table_dword);
126*4882a593Smuzhiyun len = DIV_ROUND_UP(len, 4);
127*4882a593Smuzhiyun WARN_ON(len > 0xFFF);
128*4882a593Smuzhiyun bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
129*4882a593Smuzhiyun scd_bc_tbl->tfd_offset[idx] = bc_ent;
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun /*
134*4882a593Smuzhiyun * iwl_txq_inc_wr_ptr - Send new write index to hardware
135*4882a593Smuzhiyun */
iwl_txq_inc_wr_ptr(struct iwl_trans * trans,struct iwl_txq * txq)136*4882a593Smuzhiyun void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
137*4882a593Smuzhiyun {
138*4882a593Smuzhiyun lockdep_assert_held(&txq->lock);
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr);
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun /*
143*4882a593Smuzhiyun * if not in power-save mode, uCode will never sleep when we're
144*4882a593Smuzhiyun * trying to tx (during RFKILL, we're not trying to tx).
145*4882a593Smuzhiyun */
146*4882a593Smuzhiyun iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16));
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun
iwl_txq_gen2_get_num_tbs(struct iwl_trans * trans,struct iwl_tfh_tfd * tfd)149*4882a593Smuzhiyun static u8 iwl_txq_gen2_get_num_tbs(struct iwl_trans *trans,
150*4882a593Smuzhiyun struct iwl_tfh_tfd *tfd)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun return le16_to_cpu(tfd->num_tbs) & 0x1f;
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun
iwl_txq_gen2_tfd_unmap(struct iwl_trans * trans,struct iwl_cmd_meta * meta,struct iwl_tfh_tfd * tfd)155*4882a593Smuzhiyun void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
156*4882a593Smuzhiyun struct iwl_tfh_tfd *tfd)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun int i, num_tbs;
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun /* Sanity check on number of chunks */
161*4882a593Smuzhiyun num_tbs = iwl_txq_gen2_get_num_tbs(trans, tfd);
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun if (num_tbs > trans->txqs.tfd.max_tbs) {
164*4882a593Smuzhiyun IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
165*4882a593Smuzhiyun return;
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun /* first TB is never freed - it's the bidirectional DMA data */
169*4882a593Smuzhiyun for (i = 1; i < num_tbs; i++) {
170*4882a593Smuzhiyun if (meta->tbs & BIT(i))
171*4882a593Smuzhiyun dma_unmap_page(trans->dev,
172*4882a593Smuzhiyun le64_to_cpu(tfd->tbs[i].addr),
173*4882a593Smuzhiyun le16_to_cpu(tfd->tbs[i].tb_len),
174*4882a593Smuzhiyun DMA_TO_DEVICE);
175*4882a593Smuzhiyun else
176*4882a593Smuzhiyun dma_unmap_single(trans->dev,
177*4882a593Smuzhiyun le64_to_cpu(tfd->tbs[i].addr),
178*4882a593Smuzhiyun le16_to_cpu(tfd->tbs[i].tb_len),
179*4882a593Smuzhiyun DMA_TO_DEVICE);
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun tfd->num_tbs = 0;
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun
iwl_txq_gen2_free_tfd(struct iwl_trans * trans,struct iwl_txq * txq)185*4882a593Smuzhiyun void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
188*4882a593Smuzhiyun * idx is bounded by n_window
189*4882a593Smuzhiyun */
190*4882a593Smuzhiyun int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun lockdep_assert_held(&txq->lock);
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
195*4882a593Smuzhiyun iwl_txq_get_tfd(trans, txq, idx));
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun /* free SKB */
198*4882a593Smuzhiyun if (txq->entries) {
199*4882a593Smuzhiyun struct sk_buff *skb;
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun skb = txq->entries[idx].skb;
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun /* Can be called from irqs-disabled context
204*4882a593Smuzhiyun * If skb is not NULL, it means that the whole queue is being
205*4882a593Smuzhiyun * freed and that the queue is not empty - free the skb
206*4882a593Smuzhiyun */
207*4882a593Smuzhiyun if (skb) {
208*4882a593Smuzhiyun iwl_op_mode_free_skb(trans->op_mode, skb);
209*4882a593Smuzhiyun txq->entries[idx].skb = NULL;
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun
iwl_txq_gen2_set_tb(struct iwl_trans * trans,struct iwl_tfh_tfd * tfd,dma_addr_t addr,u16 len)214*4882a593Smuzhiyun int iwl_txq_gen2_set_tb(struct iwl_trans *trans, struct iwl_tfh_tfd *tfd,
215*4882a593Smuzhiyun dma_addr_t addr, u16 len)
216*4882a593Smuzhiyun {
217*4882a593Smuzhiyun int idx = iwl_txq_gen2_get_num_tbs(trans, tfd);
218*4882a593Smuzhiyun struct iwl_tfh_tb *tb;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun /*
221*4882a593Smuzhiyun * Only WARN here so we know about the issue, but we mess up our
222*4882a593Smuzhiyun * unmap path because not every place currently checks for errors
223*4882a593Smuzhiyun * returned from this function - it can only return an error if
224*4882a593Smuzhiyun * there's no more space, and so when we know there is enough we
225*4882a593Smuzhiyun * don't always check ...
226*4882a593Smuzhiyun */
227*4882a593Smuzhiyun WARN(iwl_txq_crosses_4g_boundary(addr, len),
228*4882a593Smuzhiyun "possible DMA problem with iova:0x%llx, len:%d\n",
229*4882a593Smuzhiyun (unsigned long long)addr, len);
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun if (WARN_ON(idx >= IWL_TFH_NUM_TBS))
232*4882a593Smuzhiyun return -EINVAL;
233*4882a593Smuzhiyun tb = &tfd->tbs[idx];
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun /* Each TFD can point to a maximum max_tbs Tx buffers */
236*4882a593Smuzhiyun if (le16_to_cpu(tfd->num_tbs) >= trans->txqs.tfd.max_tbs) {
237*4882a593Smuzhiyun IWL_ERR(trans, "Error can not send more than %d chunks\n",
238*4882a593Smuzhiyun trans->txqs.tfd.max_tbs);
239*4882a593Smuzhiyun return -EINVAL;
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun put_unaligned_le64(addr, &tb->addr);
243*4882a593Smuzhiyun tb->tb_len = cpu_to_le16(len);
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun tfd->num_tbs = cpu_to_le16(idx + 1);
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun return idx;
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun
get_workaround_page(struct iwl_trans * trans,struct sk_buff * skb)250*4882a593Smuzhiyun static struct page *get_workaround_page(struct iwl_trans *trans,
251*4882a593Smuzhiyun struct sk_buff *skb)
252*4882a593Smuzhiyun {
253*4882a593Smuzhiyun struct page **page_ptr;
254*4882a593Smuzhiyun struct page *ret;
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun ret = alloc_page(GFP_ATOMIC);
259*4882a593Smuzhiyun if (!ret)
260*4882a593Smuzhiyun return NULL;
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun /* set the chaining pointer to the previous page if there */
263*4882a593Smuzhiyun *(void **)(page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr;
264*4882a593Smuzhiyun *page_ptr = ret;
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun return ret;
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun /*
270*4882a593Smuzhiyun * Add a TB and if needed apply the FH HW bug workaround;
271*4882a593Smuzhiyun * meta != NULL indicates that it's a page mapping and we
272*4882a593Smuzhiyun * need to dma_unmap_page() and set the meta->tbs bit in
273*4882a593Smuzhiyun * this case.
274*4882a593Smuzhiyun */
iwl_txq_gen2_set_tb_with_wa(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_tfh_tfd * tfd,dma_addr_t phys,void * virt,u16 len,struct iwl_cmd_meta * meta)275*4882a593Smuzhiyun static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans,
276*4882a593Smuzhiyun struct sk_buff *skb,
277*4882a593Smuzhiyun struct iwl_tfh_tfd *tfd,
278*4882a593Smuzhiyun dma_addr_t phys, void *virt,
279*4882a593Smuzhiyun u16 len, struct iwl_cmd_meta *meta)
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun dma_addr_t oldphys = phys;
282*4882a593Smuzhiyun struct page *page;
283*4882a593Smuzhiyun int ret;
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun if (unlikely(dma_mapping_error(trans->dev, phys)))
286*4882a593Smuzhiyun return -ENOMEM;
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun if (likely(!iwl_txq_crosses_4g_boundary(phys, len))) {
289*4882a593Smuzhiyun ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len);
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun if (ret < 0)
292*4882a593Smuzhiyun goto unmap;
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun if (meta)
295*4882a593Smuzhiyun meta->tbs |= BIT(ret);
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun ret = 0;
298*4882a593Smuzhiyun goto trace;
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun /*
302*4882a593Smuzhiyun * Work around a hardware bug. If (as expressed in the
303*4882a593Smuzhiyun * condition above) the TB ends on a 32-bit boundary,
304*4882a593Smuzhiyun * then the next TB may be accessed with the wrong
305*4882a593Smuzhiyun * address.
306*4882a593Smuzhiyun * To work around it, copy the data elsewhere and make
307*4882a593Smuzhiyun * a new mapping for it so the device will not fail.
308*4882a593Smuzhiyun */
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun if (WARN_ON(len > PAGE_SIZE - sizeof(void *))) {
311*4882a593Smuzhiyun ret = -ENOBUFS;
312*4882a593Smuzhiyun goto unmap;
313*4882a593Smuzhiyun }
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun page = get_workaround_page(trans, skb);
316*4882a593Smuzhiyun if (!page) {
317*4882a593Smuzhiyun ret = -ENOMEM;
318*4882a593Smuzhiyun goto unmap;
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun memcpy(page_address(page), virt, len);
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun phys = dma_map_single(trans->dev, page_address(page), len,
324*4882a593Smuzhiyun DMA_TO_DEVICE);
325*4882a593Smuzhiyun if (unlikely(dma_mapping_error(trans->dev, phys)))
326*4882a593Smuzhiyun return -ENOMEM;
327*4882a593Smuzhiyun ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len);
328*4882a593Smuzhiyun if (ret < 0) {
329*4882a593Smuzhiyun /* unmap the new allocation as single */
330*4882a593Smuzhiyun oldphys = phys;
331*4882a593Smuzhiyun meta = NULL;
332*4882a593Smuzhiyun goto unmap;
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun IWL_WARN(trans,
335*4882a593Smuzhiyun "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n",
336*4882a593Smuzhiyun len, (unsigned long long)oldphys, (unsigned long long)phys);
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun ret = 0;
339*4882a593Smuzhiyun unmap:
340*4882a593Smuzhiyun if (meta)
341*4882a593Smuzhiyun dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE);
342*4882a593Smuzhiyun else
343*4882a593Smuzhiyun dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE);
344*4882a593Smuzhiyun trace:
345*4882a593Smuzhiyun trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len);
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun return ret;
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun #ifdef CONFIG_INET
get_page_hdr(struct iwl_trans * trans,size_t len,struct sk_buff * skb)351*4882a593Smuzhiyun struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
352*4882a593Smuzhiyun struct sk_buff *skb)
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun struct iwl_tso_hdr_page *p = this_cpu_ptr(trans->txqs.tso_hdr_page);
355*4882a593Smuzhiyun struct page **page_ptr;
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun if (WARN_ON(*page_ptr))
360*4882a593Smuzhiyun return NULL;
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun if (!p->page)
363*4882a593Smuzhiyun goto alloc;
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun /*
366*4882a593Smuzhiyun * Check if there's enough room on this page
367*4882a593Smuzhiyun *
368*4882a593Smuzhiyun * Note that we put a page chaining pointer *last* in the
369*4882a593Smuzhiyun * page - we need it somewhere, and if it's there then we
370*4882a593Smuzhiyun * avoid DMA mapping the last bits of the page which may
371*4882a593Smuzhiyun * trigger the 32-bit boundary hardware bug.
372*4882a593Smuzhiyun *
373*4882a593Smuzhiyun * (see also get_workaround_page() in tx-gen2.c)
374*4882a593Smuzhiyun */
375*4882a593Smuzhiyun if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE -
376*4882a593Smuzhiyun sizeof(void *))
377*4882a593Smuzhiyun goto out;
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun /* We don't have enough room on this page, get a new one. */
380*4882a593Smuzhiyun __free_page(p->page);
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun alloc:
383*4882a593Smuzhiyun p->page = alloc_page(GFP_ATOMIC);
384*4882a593Smuzhiyun if (!p->page)
385*4882a593Smuzhiyun return NULL;
386*4882a593Smuzhiyun p->pos = page_address(p->page);
387*4882a593Smuzhiyun /* set the chaining pointer to NULL */
388*4882a593Smuzhiyun *(void **)(page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL;
389*4882a593Smuzhiyun out:
390*4882a593Smuzhiyun *page_ptr = p->page;
391*4882a593Smuzhiyun get_page(p->page);
392*4882a593Smuzhiyun return p;
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun #endif
395*4882a593Smuzhiyun
iwl_txq_gen2_build_amsdu(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_tfh_tfd * tfd,int start_len,u8 hdr_len,struct iwl_device_tx_cmd * dev_cmd)396*4882a593Smuzhiyun static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
397*4882a593Smuzhiyun struct sk_buff *skb,
398*4882a593Smuzhiyun struct iwl_tfh_tfd *tfd, int start_len,
399*4882a593Smuzhiyun u8 hdr_len,
400*4882a593Smuzhiyun struct iwl_device_tx_cmd *dev_cmd)
401*4882a593Smuzhiyun {
402*4882a593Smuzhiyun #ifdef CONFIG_INET
403*4882a593Smuzhiyun struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
404*4882a593Smuzhiyun struct ieee80211_hdr *hdr = (void *)skb->data;
405*4882a593Smuzhiyun unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
406*4882a593Smuzhiyun unsigned int mss = skb_shinfo(skb)->gso_size;
407*4882a593Smuzhiyun u16 length, amsdu_pad;
408*4882a593Smuzhiyun u8 *start_hdr;
409*4882a593Smuzhiyun struct iwl_tso_hdr_page *hdr_page;
410*4882a593Smuzhiyun struct tso_t tso;
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
413*4882a593Smuzhiyun &dev_cmd->hdr, start_len, 0);
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
416*4882a593Smuzhiyun snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
417*4882a593Smuzhiyun total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len;
418*4882a593Smuzhiyun amsdu_pad = 0;
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun /* total amount of header we may need for this A-MSDU */
421*4882a593Smuzhiyun hdr_room = DIV_ROUND_UP(total_len, mss) *
422*4882a593Smuzhiyun (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr));
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun /* Our device supports 9 segments at most, it will fit in 1 page */
425*4882a593Smuzhiyun hdr_page = get_page_hdr(trans, hdr_room, skb);
426*4882a593Smuzhiyun if (!hdr_page)
427*4882a593Smuzhiyun return -ENOMEM;
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun start_hdr = hdr_page->pos;
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun /*
432*4882a593Smuzhiyun * Pull the ieee80211 header to be able to use TSO core,
433*4882a593Smuzhiyun * we will restore it for the tx_status flow.
434*4882a593Smuzhiyun */
435*4882a593Smuzhiyun skb_pull(skb, hdr_len);
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun /*
438*4882a593Smuzhiyun * Remove the length of all the headers that we don't actually
439*4882a593Smuzhiyun * have in the MPDU by themselves, but that we duplicate into
440*4882a593Smuzhiyun * all the different MSDUs inside the A-MSDU.
441*4882a593Smuzhiyun */
442*4882a593Smuzhiyun le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun tso_start(skb, &tso);
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun while (total_len) {
447*4882a593Smuzhiyun /* this is the data left for this subframe */
448*4882a593Smuzhiyun unsigned int data_left = min_t(unsigned int, mss, total_len);
449*4882a593Smuzhiyun struct sk_buff *csum_skb = NULL;
450*4882a593Smuzhiyun unsigned int tb_len;
451*4882a593Smuzhiyun dma_addr_t tb_phys;
452*4882a593Smuzhiyun u8 *subf_hdrs_start = hdr_page->pos;
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun total_len -= data_left;
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun memset(hdr_page->pos, 0, amsdu_pad);
457*4882a593Smuzhiyun hdr_page->pos += amsdu_pad;
458*4882a593Smuzhiyun amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
459*4882a593Smuzhiyun data_left)) & 0x3;
460*4882a593Smuzhiyun ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr));
461*4882a593Smuzhiyun hdr_page->pos += ETH_ALEN;
462*4882a593Smuzhiyun ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr));
463*4882a593Smuzhiyun hdr_page->pos += ETH_ALEN;
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun length = snap_ip_tcp_hdrlen + data_left;
466*4882a593Smuzhiyun *((__be16 *)hdr_page->pos) = cpu_to_be16(length);
467*4882a593Smuzhiyun hdr_page->pos += sizeof(length);
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun /*
470*4882a593Smuzhiyun * This will copy the SNAP as well which will be considered
471*4882a593Smuzhiyun * as MAC header.
472*4882a593Smuzhiyun */
473*4882a593Smuzhiyun tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len);
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun hdr_page->pos += snap_ip_tcp_hdrlen;
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun tb_len = hdr_page->pos - start_hdr;
478*4882a593Smuzhiyun tb_phys = dma_map_single(trans->dev, start_hdr,
479*4882a593Smuzhiyun tb_len, DMA_TO_DEVICE);
480*4882a593Smuzhiyun if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
481*4882a593Smuzhiyun dev_kfree_skb(csum_skb);
482*4882a593Smuzhiyun goto out_err;
483*4882a593Smuzhiyun }
484*4882a593Smuzhiyun /*
485*4882a593Smuzhiyun * No need for _with_wa, this is from the TSO page and
486*4882a593Smuzhiyun * we leave some space at the end of it so can't hit
487*4882a593Smuzhiyun * the buggy scenario.
488*4882a593Smuzhiyun */
489*4882a593Smuzhiyun iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb_len);
490*4882a593Smuzhiyun trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
491*4882a593Smuzhiyun tb_phys, tb_len);
492*4882a593Smuzhiyun /* add this subframe's headers' length to the tx_cmd */
493*4882a593Smuzhiyun le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun /* prepare the start_hdr for the next subframe */
496*4882a593Smuzhiyun start_hdr = hdr_page->pos;
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun /* put the payload */
499*4882a593Smuzhiyun while (data_left) {
500*4882a593Smuzhiyun int ret;
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun tb_len = min_t(unsigned int, tso.size, data_left);
503*4882a593Smuzhiyun tb_phys = dma_map_single(trans->dev, tso.data,
504*4882a593Smuzhiyun tb_len, DMA_TO_DEVICE);
505*4882a593Smuzhiyun ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd,
506*4882a593Smuzhiyun tb_phys, tso.data,
507*4882a593Smuzhiyun tb_len, NULL);
508*4882a593Smuzhiyun if (ret) {
509*4882a593Smuzhiyun dev_kfree_skb(csum_skb);
510*4882a593Smuzhiyun goto out_err;
511*4882a593Smuzhiyun }
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun data_left -= tb_len;
514*4882a593Smuzhiyun tso_build_data(skb, &tso, tb_len);
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun }
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun /* re -add the WiFi header */
519*4882a593Smuzhiyun skb_push(skb, hdr_len);
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun return 0;
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun out_err:
524*4882a593Smuzhiyun #endif
525*4882a593Smuzhiyun return -EINVAL;
526*4882a593Smuzhiyun }
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun static struct
iwl_txq_gen2_build_tx_amsdu(struct iwl_trans * trans,struct iwl_txq * txq,struct iwl_device_tx_cmd * dev_cmd,struct sk_buff * skb,struct iwl_cmd_meta * out_meta,int hdr_len,int tx_cmd_len)529*4882a593Smuzhiyun iwl_tfh_tfd *iwl_txq_gen2_build_tx_amsdu(struct iwl_trans *trans,
530*4882a593Smuzhiyun struct iwl_txq *txq,
531*4882a593Smuzhiyun struct iwl_device_tx_cmd *dev_cmd,
532*4882a593Smuzhiyun struct sk_buff *skb,
533*4882a593Smuzhiyun struct iwl_cmd_meta *out_meta,
534*4882a593Smuzhiyun int hdr_len,
535*4882a593Smuzhiyun int tx_cmd_len)
536*4882a593Smuzhiyun {
537*4882a593Smuzhiyun int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
538*4882a593Smuzhiyun struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
539*4882a593Smuzhiyun dma_addr_t tb_phys;
540*4882a593Smuzhiyun int len;
541*4882a593Smuzhiyun void *tb1_addr;
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun tb_phys = iwl_txq_get_first_tb_dma(txq, idx);
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun /*
546*4882a593Smuzhiyun * No need for _with_wa, the first TB allocation is aligned up
547*4882a593Smuzhiyun * to a 64-byte boundary and thus can't be at the end or cross
548*4882a593Smuzhiyun * a page boundary (much less a 2^32 boundary).
549*4882a593Smuzhiyun */
550*4882a593Smuzhiyun iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun /*
553*4882a593Smuzhiyun * The second TB (tb1) points to the remainder of the TX command
554*4882a593Smuzhiyun * and the 802.11 header - dword aligned size
555*4882a593Smuzhiyun * (This calculation modifies the TX command, so do it before the
556*4882a593Smuzhiyun * setup of the first TB)
557*4882a593Smuzhiyun */
558*4882a593Smuzhiyun len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
559*4882a593Smuzhiyun IWL_FIRST_TB_SIZE;
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun /* do not align A-MSDU to dword as the subframe header aligns it */
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun /* map the data for TB1 */
564*4882a593Smuzhiyun tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
565*4882a593Smuzhiyun tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE);
566*4882a593Smuzhiyun if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
567*4882a593Smuzhiyun goto out_err;
568*4882a593Smuzhiyun /*
569*4882a593Smuzhiyun * No need for _with_wa(), we ensure (via alignment) that the data
570*4882a593Smuzhiyun * here can never cross or end at a page boundary.
571*4882a593Smuzhiyun */
572*4882a593Smuzhiyun iwl_txq_gen2_set_tb(trans, tfd, tb_phys, len);
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun if (iwl_txq_gen2_build_amsdu(trans, skb, tfd, len + IWL_FIRST_TB_SIZE,
575*4882a593Smuzhiyun hdr_len, dev_cmd))
576*4882a593Smuzhiyun goto out_err;
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun /* building the A-MSDU might have changed this data, memcpy it now */
579*4882a593Smuzhiyun memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
580*4882a593Smuzhiyun return tfd;
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun out_err:
583*4882a593Smuzhiyun iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
584*4882a593Smuzhiyun return NULL;
585*4882a593Smuzhiyun }
586*4882a593Smuzhiyun
iwl_txq_gen2_tx_add_frags(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_tfh_tfd * tfd,struct iwl_cmd_meta * out_meta)587*4882a593Smuzhiyun static int iwl_txq_gen2_tx_add_frags(struct iwl_trans *trans,
588*4882a593Smuzhiyun struct sk_buff *skb,
589*4882a593Smuzhiyun struct iwl_tfh_tfd *tfd,
590*4882a593Smuzhiyun struct iwl_cmd_meta *out_meta)
591*4882a593Smuzhiyun {
592*4882a593Smuzhiyun int i;
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
595*4882a593Smuzhiyun const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
596*4882a593Smuzhiyun dma_addr_t tb_phys;
597*4882a593Smuzhiyun unsigned int fragsz = skb_frag_size(frag);
598*4882a593Smuzhiyun int ret;
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun if (!fragsz)
601*4882a593Smuzhiyun continue;
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
604*4882a593Smuzhiyun fragsz, DMA_TO_DEVICE);
605*4882a593Smuzhiyun ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
606*4882a593Smuzhiyun skb_frag_address(frag),
607*4882a593Smuzhiyun fragsz, out_meta);
608*4882a593Smuzhiyun if (ret)
609*4882a593Smuzhiyun return ret;
610*4882a593Smuzhiyun }
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun return 0;
613*4882a593Smuzhiyun }
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun static struct
iwl_txq_gen2_build_tx(struct iwl_trans * trans,struct iwl_txq * txq,struct iwl_device_tx_cmd * dev_cmd,struct sk_buff * skb,struct iwl_cmd_meta * out_meta,int hdr_len,int tx_cmd_len,bool pad)616*4882a593Smuzhiyun iwl_tfh_tfd *iwl_txq_gen2_build_tx(struct iwl_trans *trans,
617*4882a593Smuzhiyun struct iwl_txq *txq,
618*4882a593Smuzhiyun struct iwl_device_tx_cmd *dev_cmd,
619*4882a593Smuzhiyun struct sk_buff *skb,
620*4882a593Smuzhiyun struct iwl_cmd_meta *out_meta,
621*4882a593Smuzhiyun int hdr_len,
622*4882a593Smuzhiyun int tx_cmd_len,
623*4882a593Smuzhiyun bool pad)
624*4882a593Smuzhiyun {
625*4882a593Smuzhiyun int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
626*4882a593Smuzhiyun struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
627*4882a593Smuzhiyun dma_addr_t tb_phys;
628*4882a593Smuzhiyun int len, tb1_len, tb2_len;
629*4882a593Smuzhiyun void *tb1_addr;
630*4882a593Smuzhiyun struct sk_buff *frag;
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun tb_phys = iwl_txq_get_first_tb_dma(txq, idx);
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun /* The first TB points to bi-directional DMA data */
635*4882a593Smuzhiyun memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun /*
638*4882a593Smuzhiyun * No need for _with_wa, the first TB allocation is aligned up
639*4882a593Smuzhiyun * to a 64-byte boundary and thus can't be at the end or cross
640*4882a593Smuzhiyun * a page boundary (much less a 2^32 boundary).
641*4882a593Smuzhiyun */
642*4882a593Smuzhiyun iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun /*
645*4882a593Smuzhiyun * The second TB (tb1) points to the remainder of the TX command
646*4882a593Smuzhiyun * and the 802.11 header - dword aligned size
647*4882a593Smuzhiyun * (This calculation modifies the TX command, so do it before the
648*4882a593Smuzhiyun * setup of the first TB)
649*4882a593Smuzhiyun */
650*4882a593Smuzhiyun len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
651*4882a593Smuzhiyun IWL_FIRST_TB_SIZE;
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun if (pad)
654*4882a593Smuzhiyun tb1_len = ALIGN(len, 4);
655*4882a593Smuzhiyun else
656*4882a593Smuzhiyun tb1_len = len;
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun /* map the data for TB1 */
659*4882a593Smuzhiyun tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
660*4882a593Smuzhiyun tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
661*4882a593Smuzhiyun if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
662*4882a593Smuzhiyun goto out_err;
663*4882a593Smuzhiyun /*
664*4882a593Smuzhiyun * No need for _with_wa(), we ensure (via alignment) that the data
665*4882a593Smuzhiyun * here can never cross or end at a page boundary.
666*4882a593Smuzhiyun */
667*4882a593Smuzhiyun iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
668*4882a593Smuzhiyun trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
669*4882a593Smuzhiyun IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun /* set up TFD's third entry to point to remainder of skb's head */
672*4882a593Smuzhiyun tb2_len = skb_headlen(skb) - hdr_len;
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun if (tb2_len > 0) {
675*4882a593Smuzhiyun int ret;
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun tb_phys = dma_map_single(trans->dev, skb->data + hdr_len,
678*4882a593Smuzhiyun tb2_len, DMA_TO_DEVICE);
679*4882a593Smuzhiyun ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
680*4882a593Smuzhiyun skb->data + hdr_len, tb2_len,
681*4882a593Smuzhiyun NULL);
682*4882a593Smuzhiyun if (ret)
683*4882a593Smuzhiyun goto out_err;
684*4882a593Smuzhiyun }
685*4882a593Smuzhiyun
686*4882a593Smuzhiyun if (iwl_txq_gen2_tx_add_frags(trans, skb, tfd, out_meta))
687*4882a593Smuzhiyun goto out_err;
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun skb_walk_frags(skb, frag) {
690*4882a593Smuzhiyun int ret;
691*4882a593Smuzhiyun
692*4882a593Smuzhiyun tb_phys = dma_map_single(trans->dev, frag->data,
693*4882a593Smuzhiyun skb_headlen(frag), DMA_TO_DEVICE);
694*4882a593Smuzhiyun ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
695*4882a593Smuzhiyun frag->data,
696*4882a593Smuzhiyun skb_headlen(frag), NULL);
697*4882a593Smuzhiyun if (ret)
698*4882a593Smuzhiyun goto out_err;
699*4882a593Smuzhiyun if (iwl_txq_gen2_tx_add_frags(trans, frag, tfd, out_meta))
700*4882a593Smuzhiyun goto out_err;
701*4882a593Smuzhiyun }
702*4882a593Smuzhiyun
703*4882a593Smuzhiyun return tfd;
704*4882a593Smuzhiyun
705*4882a593Smuzhiyun out_err:
706*4882a593Smuzhiyun iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
707*4882a593Smuzhiyun return NULL;
708*4882a593Smuzhiyun }
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun static
iwl_txq_gen2_build_tfd(struct iwl_trans * trans,struct iwl_txq * txq,struct iwl_device_tx_cmd * dev_cmd,struct sk_buff * skb,struct iwl_cmd_meta * out_meta)711*4882a593Smuzhiyun struct iwl_tfh_tfd *iwl_txq_gen2_build_tfd(struct iwl_trans *trans,
712*4882a593Smuzhiyun struct iwl_txq *txq,
713*4882a593Smuzhiyun struct iwl_device_tx_cmd *dev_cmd,
714*4882a593Smuzhiyun struct sk_buff *skb,
715*4882a593Smuzhiyun struct iwl_cmd_meta *out_meta)
716*4882a593Smuzhiyun {
717*4882a593Smuzhiyun struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
718*4882a593Smuzhiyun int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
719*4882a593Smuzhiyun struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
720*4882a593Smuzhiyun int len, hdr_len;
721*4882a593Smuzhiyun bool amsdu;
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun /* There must be data left over for TB1 or this code must be changed */
724*4882a593Smuzhiyun BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE);
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun memset(tfd, 0, sizeof(*tfd));
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
729*4882a593Smuzhiyun len = sizeof(struct iwl_tx_cmd_gen2);
730*4882a593Smuzhiyun else
731*4882a593Smuzhiyun len = sizeof(struct iwl_tx_cmd_gen3);
732*4882a593Smuzhiyun
733*4882a593Smuzhiyun amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
734*4882a593Smuzhiyun (*ieee80211_get_qos_ctl(hdr) &
735*4882a593Smuzhiyun IEEE80211_QOS_CTL_A_MSDU_PRESENT);
736*4882a593Smuzhiyun
737*4882a593Smuzhiyun hdr_len = ieee80211_hdrlen(hdr->frame_control);
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun /*
740*4882a593Smuzhiyun * Only build A-MSDUs here if doing so by GSO, otherwise it may be
741*4882a593Smuzhiyun * an A-MSDU for other reasons, e.g. NAN or an A-MSDU having been
742*4882a593Smuzhiyun * built in the higher layers already.
743*4882a593Smuzhiyun */
744*4882a593Smuzhiyun if (amsdu && skb_shinfo(skb)->gso_size)
745*4882a593Smuzhiyun return iwl_txq_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb,
746*4882a593Smuzhiyun out_meta, hdr_len, len);
747*4882a593Smuzhiyun return iwl_txq_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta,
748*4882a593Smuzhiyun hdr_len, len, !amsdu);
749*4882a593Smuzhiyun }
750*4882a593Smuzhiyun
iwl_txq_space(struct iwl_trans * trans,const struct iwl_txq * q)751*4882a593Smuzhiyun int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q)
752*4882a593Smuzhiyun {
753*4882a593Smuzhiyun unsigned int max;
754*4882a593Smuzhiyun unsigned int used;
755*4882a593Smuzhiyun
756*4882a593Smuzhiyun /*
757*4882a593Smuzhiyun * To avoid ambiguity between empty and completely full queues, there
758*4882a593Smuzhiyun * should always be less than max_tfd_queue_size elements in the queue.
759*4882a593Smuzhiyun * If q->n_window is smaller than max_tfd_queue_size, there is no need
760*4882a593Smuzhiyun * to reserve any queue entries for this purpose.
761*4882a593Smuzhiyun */
762*4882a593Smuzhiyun if (q->n_window < trans->trans_cfg->base_params->max_tfd_queue_size)
763*4882a593Smuzhiyun max = q->n_window;
764*4882a593Smuzhiyun else
765*4882a593Smuzhiyun max = trans->trans_cfg->base_params->max_tfd_queue_size - 1;
766*4882a593Smuzhiyun
767*4882a593Smuzhiyun /*
768*4882a593Smuzhiyun * max_tfd_queue_size is a power of 2, so the following is equivalent to
769*4882a593Smuzhiyun * modulo by max_tfd_queue_size and is well defined.
770*4882a593Smuzhiyun */
771*4882a593Smuzhiyun used = (q->write_ptr - q->read_ptr) &
772*4882a593Smuzhiyun (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
773*4882a593Smuzhiyun
774*4882a593Smuzhiyun if (WARN_ON(used > max))
775*4882a593Smuzhiyun return 0;
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun return max - used;
778*4882a593Smuzhiyun }
779*4882a593Smuzhiyun
iwl_txq_gen2_tx(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_device_tx_cmd * dev_cmd,int txq_id)780*4882a593Smuzhiyun int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
781*4882a593Smuzhiyun struct iwl_device_tx_cmd *dev_cmd, int txq_id)
782*4882a593Smuzhiyun {
783*4882a593Smuzhiyun struct iwl_cmd_meta *out_meta;
784*4882a593Smuzhiyun struct iwl_txq *txq = trans->txqs.txq[txq_id];
785*4882a593Smuzhiyun u16 cmd_len;
786*4882a593Smuzhiyun int idx;
787*4882a593Smuzhiyun void *tfd;
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
790*4882a593Smuzhiyun "queue %d out of range", txq_id))
791*4882a593Smuzhiyun return -EINVAL;
792*4882a593Smuzhiyun
793*4882a593Smuzhiyun if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used),
794*4882a593Smuzhiyun "TX on unused queue %d\n", txq_id))
795*4882a593Smuzhiyun return -EINVAL;
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun if (skb_is_nonlinear(skb) &&
798*4882a593Smuzhiyun skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) &&
799*4882a593Smuzhiyun __skb_linearize(skb))
800*4882a593Smuzhiyun return -ENOMEM;
801*4882a593Smuzhiyun
802*4882a593Smuzhiyun spin_lock(&txq->lock);
803*4882a593Smuzhiyun
804*4882a593Smuzhiyun if (iwl_txq_space(trans, txq) < txq->high_mark) {
805*4882a593Smuzhiyun iwl_txq_stop(trans, txq);
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun /* don't put the packet on the ring, if there is no room */
808*4882a593Smuzhiyun if (unlikely(iwl_txq_space(trans, txq) < 3)) {
809*4882a593Smuzhiyun struct iwl_device_tx_cmd **dev_cmd_ptr;
810*4882a593Smuzhiyun
811*4882a593Smuzhiyun dev_cmd_ptr = (void *)((u8 *)skb->cb +
812*4882a593Smuzhiyun trans->txqs.dev_cmd_offs);
813*4882a593Smuzhiyun
814*4882a593Smuzhiyun *dev_cmd_ptr = dev_cmd;
815*4882a593Smuzhiyun __skb_queue_tail(&txq->overflow_q, skb);
816*4882a593Smuzhiyun spin_unlock(&txq->lock);
817*4882a593Smuzhiyun return 0;
818*4882a593Smuzhiyun }
819*4882a593Smuzhiyun }
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun /* Set up driver data for this TFD */
824*4882a593Smuzhiyun txq->entries[idx].skb = skb;
825*4882a593Smuzhiyun txq->entries[idx].cmd = dev_cmd;
826*4882a593Smuzhiyun
827*4882a593Smuzhiyun dev_cmd->hdr.sequence =
828*4882a593Smuzhiyun cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
829*4882a593Smuzhiyun INDEX_TO_SEQ(idx)));
830*4882a593Smuzhiyun
831*4882a593Smuzhiyun /* Set up first empty entry in queue's array of Tx/cmd buffers */
832*4882a593Smuzhiyun out_meta = &txq->entries[idx].meta;
833*4882a593Smuzhiyun out_meta->flags = 0;
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun tfd = iwl_txq_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta);
836*4882a593Smuzhiyun if (!tfd) {
837*4882a593Smuzhiyun spin_unlock(&txq->lock);
838*4882a593Smuzhiyun return -1;
839*4882a593Smuzhiyun }
840*4882a593Smuzhiyun
841*4882a593Smuzhiyun if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
842*4882a593Smuzhiyun struct iwl_tx_cmd_gen3 *tx_cmd_gen3 =
843*4882a593Smuzhiyun (void *)dev_cmd->payload;
844*4882a593Smuzhiyun
845*4882a593Smuzhiyun cmd_len = le16_to_cpu(tx_cmd_gen3->len);
846*4882a593Smuzhiyun } else {
847*4882a593Smuzhiyun struct iwl_tx_cmd_gen2 *tx_cmd_gen2 =
848*4882a593Smuzhiyun (void *)dev_cmd->payload;
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun cmd_len = le16_to_cpu(tx_cmd_gen2->len);
851*4882a593Smuzhiyun }
852*4882a593Smuzhiyun
853*4882a593Smuzhiyun /* Set up entry for this TFD in Tx byte-count array */
854*4882a593Smuzhiyun iwl_pcie_gen2_update_byte_tbl(trans, txq, cmd_len,
855*4882a593Smuzhiyun iwl_txq_gen2_get_num_tbs(trans, tfd));
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun /* start timer if queue currently empty */
858*4882a593Smuzhiyun if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
859*4882a593Smuzhiyun mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
860*4882a593Smuzhiyun
861*4882a593Smuzhiyun /* Tell device the write index *just past* this latest filled TFD */
862*4882a593Smuzhiyun txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
863*4882a593Smuzhiyun iwl_txq_inc_wr_ptr(trans, txq);
864*4882a593Smuzhiyun /*
865*4882a593Smuzhiyun * At this point the frame is "transmitted" successfully
866*4882a593Smuzhiyun * and we will get a TX status notification eventually.
867*4882a593Smuzhiyun */
868*4882a593Smuzhiyun spin_unlock(&txq->lock);
869*4882a593Smuzhiyun return 0;
870*4882a593Smuzhiyun }
871*4882a593Smuzhiyun
872*4882a593Smuzhiyun /*************** HOST COMMAND QUEUE FUNCTIONS *****/
873*4882a593Smuzhiyun
874*4882a593Smuzhiyun /*
875*4882a593Smuzhiyun * iwl_txq_gen2_unmap - Unmap any remaining DMA mappings and free skb's
876*4882a593Smuzhiyun */
iwl_txq_gen2_unmap(struct iwl_trans * trans,int txq_id)877*4882a593Smuzhiyun void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id)
878*4882a593Smuzhiyun {
879*4882a593Smuzhiyun struct iwl_txq *txq = trans->txqs.txq[txq_id];
880*4882a593Smuzhiyun
881*4882a593Smuzhiyun spin_lock_bh(&txq->lock);
882*4882a593Smuzhiyun while (txq->write_ptr != txq->read_ptr) {
883*4882a593Smuzhiyun IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
884*4882a593Smuzhiyun txq_id, txq->read_ptr);
885*4882a593Smuzhiyun
886*4882a593Smuzhiyun if (txq_id != trans->txqs.cmd.q_id) {
887*4882a593Smuzhiyun int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
888*4882a593Smuzhiyun struct sk_buff *skb = txq->entries[idx].skb;
889*4882a593Smuzhiyun
890*4882a593Smuzhiyun if (!WARN_ON_ONCE(!skb))
891*4882a593Smuzhiyun iwl_txq_free_tso_page(trans, skb);
892*4882a593Smuzhiyun }
893*4882a593Smuzhiyun iwl_txq_gen2_free_tfd(trans, txq);
894*4882a593Smuzhiyun txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
895*4882a593Smuzhiyun }
896*4882a593Smuzhiyun
897*4882a593Smuzhiyun while (!skb_queue_empty(&txq->overflow_q)) {
898*4882a593Smuzhiyun struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
899*4882a593Smuzhiyun
900*4882a593Smuzhiyun iwl_op_mode_free_skb(trans->op_mode, skb);
901*4882a593Smuzhiyun }
902*4882a593Smuzhiyun
903*4882a593Smuzhiyun spin_unlock_bh(&txq->lock);
904*4882a593Smuzhiyun
905*4882a593Smuzhiyun /* just in case - this queue may have been stopped */
906*4882a593Smuzhiyun iwl_wake_queue(trans, txq);
907*4882a593Smuzhiyun }
908*4882a593Smuzhiyun
iwl_txq_gen2_free_memory(struct iwl_trans * trans,struct iwl_txq * txq)909*4882a593Smuzhiyun static void iwl_txq_gen2_free_memory(struct iwl_trans *trans,
910*4882a593Smuzhiyun struct iwl_txq *txq)
911*4882a593Smuzhiyun {
912*4882a593Smuzhiyun struct device *dev = trans->dev;
913*4882a593Smuzhiyun
914*4882a593Smuzhiyun /* De-alloc circular buffer of TFDs */
915*4882a593Smuzhiyun if (txq->tfds) {
916*4882a593Smuzhiyun dma_free_coherent(dev,
917*4882a593Smuzhiyun trans->txqs.tfd.size * txq->n_window,
918*4882a593Smuzhiyun txq->tfds, txq->dma_addr);
919*4882a593Smuzhiyun dma_free_coherent(dev,
920*4882a593Smuzhiyun sizeof(*txq->first_tb_bufs) * txq->n_window,
921*4882a593Smuzhiyun txq->first_tb_bufs, txq->first_tb_dma);
922*4882a593Smuzhiyun }
923*4882a593Smuzhiyun
924*4882a593Smuzhiyun kfree(txq->entries);
925*4882a593Smuzhiyun if (txq->bc_tbl.addr)
926*4882a593Smuzhiyun dma_pool_free(trans->txqs.bc_pool,
927*4882a593Smuzhiyun txq->bc_tbl.addr, txq->bc_tbl.dma);
928*4882a593Smuzhiyun kfree(txq);
929*4882a593Smuzhiyun }
930*4882a593Smuzhiyun
931*4882a593Smuzhiyun /*
932*4882a593Smuzhiyun * iwl_pcie_txq_free - Deallocate DMA queue.
933*4882a593Smuzhiyun * @txq: Transmit queue to deallocate.
934*4882a593Smuzhiyun *
935*4882a593Smuzhiyun * Empty queue by removing and destroying all BD's.
936*4882a593Smuzhiyun * Free all buffers.
937*4882a593Smuzhiyun * 0-fill, but do not free "txq" descriptor structure.
938*4882a593Smuzhiyun */
iwl_txq_gen2_free(struct iwl_trans * trans,int txq_id)939*4882a593Smuzhiyun static void iwl_txq_gen2_free(struct iwl_trans *trans, int txq_id)
940*4882a593Smuzhiyun {
941*4882a593Smuzhiyun struct iwl_txq *txq;
942*4882a593Smuzhiyun int i;
943*4882a593Smuzhiyun
944*4882a593Smuzhiyun if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
945*4882a593Smuzhiyun "queue %d out of range", txq_id))
946*4882a593Smuzhiyun return;
947*4882a593Smuzhiyun
948*4882a593Smuzhiyun txq = trans->txqs.txq[txq_id];
949*4882a593Smuzhiyun
950*4882a593Smuzhiyun if (WARN_ON(!txq))
951*4882a593Smuzhiyun return;
952*4882a593Smuzhiyun
953*4882a593Smuzhiyun iwl_txq_gen2_unmap(trans, txq_id);
954*4882a593Smuzhiyun
955*4882a593Smuzhiyun /* De-alloc array of command/tx buffers */
956*4882a593Smuzhiyun if (txq_id == trans->txqs.cmd.q_id)
957*4882a593Smuzhiyun for (i = 0; i < txq->n_window; i++) {
958*4882a593Smuzhiyun kfree_sensitive(txq->entries[i].cmd);
959*4882a593Smuzhiyun kfree_sensitive(txq->entries[i].free_buf);
960*4882a593Smuzhiyun }
961*4882a593Smuzhiyun del_timer_sync(&txq->stuck_timer);
962*4882a593Smuzhiyun
963*4882a593Smuzhiyun iwl_txq_gen2_free_memory(trans, txq);
964*4882a593Smuzhiyun
965*4882a593Smuzhiyun trans->txqs.txq[txq_id] = NULL;
966*4882a593Smuzhiyun
967*4882a593Smuzhiyun clear_bit(txq_id, trans->txqs.queue_used);
968*4882a593Smuzhiyun }
969*4882a593Smuzhiyun
970*4882a593Smuzhiyun /*
971*4882a593Smuzhiyun * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
972*4882a593Smuzhiyun */
iwl_queue_init(struct iwl_txq * q,int slots_num)973*4882a593Smuzhiyun static int iwl_queue_init(struct iwl_txq *q, int slots_num)
974*4882a593Smuzhiyun {
975*4882a593Smuzhiyun q->n_window = slots_num;
976*4882a593Smuzhiyun
977*4882a593Smuzhiyun /* slots_num must be power-of-two size, otherwise
978*4882a593Smuzhiyun * iwl_txq_get_cmd_index is broken. */
979*4882a593Smuzhiyun if (WARN_ON(!is_power_of_2(slots_num)))
980*4882a593Smuzhiyun return -EINVAL;
981*4882a593Smuzhiyun
982*4882a593Smuzhiyun q->low_mark = q->n_window / 4;
983*4882a593Smuzhiyun if (q->low_mark < 4)
984*4882a593Smuzhiyun q->low_mark = 4;
985*4882a593Smuzhiyun
986*4882a593Smuzhiyun q->high_mark = q->n_window / 8;
987*4882a593Smuzhiyun if (q->high_mark < 2)
988*4882a593Smuzhiyun q->high_mark = 2;
989*4882a593Smuzhiyun
990*4882a593Smuzhiyun q->write_ptr = 0;
991*4882a593Smuzhiyun q->read_ptr = 0;
992*4882a593Smuzhiyun
993*4882a593Smuzhiyun return 0;
994*4882a593Smuzhiyun }
995*4882a593Smuzhiyun
iwl_txq_init(struct iwl_trans * trans,struct iwl_txq * txq,int slots_num,bool cmd_queue)996*4882a593Smuzhiyun int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
997*4882a593Smuzhiyun bool cmd_queue)
998*4882a593Smuzhiyun {
999*4882a593Smuzhiyun int ret;
1000*4882a593Smuzhiyun u32 tfd_queue_max_size =
1001*4882a593Smuzhiyun trans->trans_cfg->base_params->max_tfd_queue_size;
1002*4882a593Smuzhiyun
1003*4882a593Smuzhiyun txq->need_update = false;
1004*4882a593Smuzhiyun
1005*4882a593Smuzhiyun /* max_tfd_queue_size must be power-of-two size, otherwise
1006*4882a593Smuzhiyun * iwl_txq_inc_wrap and iwl_txq_dec_wrap are broken. */
1007*4882a593Smuzhiyun if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1),
1008*4882a593Smuzhiyun "Max tfd queue size must be a power of two, but is %d",
1009*4882a593Smuzhiyun tfd_queue_max_size))
1010*4882a593Smuzhiyun return -EINVAL;
1011*4882a593Smuzhiyun
1012*4882a593Smuzhiyun /* Initialize queue's high/low-water marks, and head/tail indexes */
1013*4882a593Smuzhiyun ret = iwl_queue_init(txq, slots_num);
1014*4882a593Smuzhiyun if (ret)
1015*4882a593Smuzhiyun return ret;
1016*4882a593Smuzhiyun
1017*4882a593Smuzhiyun spin_lock_init(&txq->lock);
1018*4882a593Smuzhiyun
1019*4882a593Smuzhiyun if (cmd_queue) {
1020*4882a593Smuzhiyun static struct lock_class_key iwl_txq_cmd_queue_lock_class;
1021*4882a593Smuzhiyun
1022*4882a593Smuzhiyun lockdep_set_class(&txq->lock, &iwl_txq_cmd_queue_lock_class);
1023*4882a593Smuzhiyun }
1024*4882a593Smuzhiyun
1025*4882a593Smuzhiyun __skb_queue_head_init(&txq->overflow_q);
1026*4882a593Smuzhiyun
1027*4882a593Smuzhiyun return 0;
1028*4882a593Smuzhiyun }
1029*4882a593Smuzhiyun
iwl_txq_free_tso_page(struct iwl_trans * trans,struct sk_buff * skb)1030*4882a593Smuzhiyun void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb)
1031*4882a593Smuzhiyun {
1032*4882a593Smuzhiyun struct page **page_ptr;
1033*4882a593Smuzhiyun struct page *next;
1034*4882a593Smuzhiyun
1035*4882a593Smuzhiyun page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
1036*4882a593Smuzhiyun next = *page_ptr;
1037*4882a593Smuzhiyun *page_ptr = NULL;
1038*4882a593Smuzhiyun
1039*4882a593Smuzhiyun while (next) {
1040*4882a593Smuzhiyun struct page *tmp = next;
1041*4882a593Smuzhiyun
1042*4882a593Smuzhiyun next = *(void **)(page_address(next) + PAGE_SIZE -
1043*4882a593Smuzhiyun sizeof(void *));
1044*4882a593Smuzhiyun __free_page(tmp);
1045*4882a593Smuzhiyun }
1046*4882a593Smuzhiyun }
1047*4882a593Smuzhiyun
iwl_txq_log_scd_error(struct iwl_trans * trans,struct iwl_txq * txq)1048*4882a593Smuzhiyun void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
1049*4882a593Smuzhiyun {
1050*4882a593Smuzhiyun u32 txq_id = txq->id;
1051*4882a593Smuzhiyun u32 status;
1052*4882a593Smuzhiyun bool active;
1053*4882a593Smuzhiyun u8 fifo;
1054*4882a593Smuzhiyun
1055*4882a593Smuzhiyun if (trans->trans_cfg->use_tfh) {
1056*4882a593Smuzhiyun IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id,
1057*4882a593Smuzhiyun txq->read_ptr, txq->write_ptr);
1058*4882a593Smuzhiyun /* TODO: access new SCD registers and dump them */
1059*4882a593Smuzhiyun return;
1060*4882a593Smuzhiyun }
1061*4882a593Smuzhiyun
1062*4882a593Smuzhiyun status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id));
1063*4882a593Smuzhiyun fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
1064*4882a593Smuzhiyun active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
1065*4882a593Smuzhiyun
1066*4882a593Smuzhiyun IWL_ERR(trans,
1067*4882a593Smuzhiyun "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n",
1068*4882a593Smuzhiyun txq_id, active ? "" : "in", fifo,
1069*4882a593Smuzhiyun jiffies_to_msecs(txq->wd_timeout),
1070*4882a593Smuzhiyun txq->read_ptr, txq->write_ptr,
1071*4882a593Smuzhiyun iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) &
1072*4882a593Smuzhiyun (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
1073*4882a593Smuzhiyun iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) &
1074*4882a593Smuzhiyun (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
1075*4882a593Smuzhiyun iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));
1076*4882a593Smuzhiyun }
1077*4882a593Smuzhiyun
iwl_txq_stuck_timer(struct timer_list * t)1078*4882a593Smuzhiyun static void iwl_txq_stuck_timer(struct timer_list *t)
1079*4882a593Smuzhiyun {
1080*4882a593Smuzhiyun struct iwl_txq *txq = from_timer(txq, t, stuck_timer);
1081*4882a593Smuzhiyun struct iwl_trans *trans = txq->trans;
1082*4882a593Smuzhiyun
1083*4882a593Smuzhiyun spin_lock(&txq->lock);
1084*4882a593Smuzhiyun /* check if triggered erroneously */
1085*4882a593Smuzhiyun if (txq->read_ptr == txq->write_ptr) {
1086*4882a593Smuzhiyun spin_unlock(&txq->lock);
1087*4882a593Smuzhiyun return;
1088*4882a593Smuzhiyun }
1089*4882a593Smuzhiyun spin_unlock(&txq->lock);
1090*4882a593Smuzhiyun
1091*4882a593Smuzhiyun iwl_txq_log_scd_error(trans, txq);
1092*4882a593Smuzhiyun
1093*4882a593Smuzhiyun iwl_force_nmi(trans);
1094*4882a593Smuzhiyun }
1095*4882a593Smuzhiyun
iwl_txq_alloc(struct iwl_trans * trans,struct iwl_txq * txq,int slots_num,bool cmd_queue)1096*4882a593Smuzhiyun int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
1097*4882a593Smuzhiyun bool cmd_queue)
1098*4882a593Smuzhiyun {
1099*4882a593Smuzhiyun size_t tfd_sz = trans->txqs.tfd.size *
1100*4882a593Smuzhiyun trans->trans_cfg->base_params->max_tfd_queue_size;
1101*4882a593Smuzhiyun size_t tb0_buf_sz;
1102*4882a593Smuzhiyun int i;
1103*4882a593Smuzhiyun
1104*4882a593Smuzhiyun if (WARN_ON(txq->entries || txq->tfds))
1105*4882a593Smuzhiyun return -EINVAL;
1106*4882a593Smuzhiyun
1107*4882a593Smuzhiyun if (trans->trans_cfg->use_tfh)
1108*4882a593Smuzhiyun tfd_sz = trans->txqs.tfd.size * slots_num;
1109*4882a593Smuzhiyun
1110*4882a593Smuzhiyun timer_setup(&txq->stuck_timer, iwl_txq_stuck_timer, 0);
1111*4882a593Smuzhiyun txq->trans = trans;
1112*4882a593Smuzhiyun
1113*4882a593Smuzhiyun txq->n_window = slots_num;
1114*4882a593Smuzhiyun
1115*4882a593Smuzhiyun txq->entries = kcalloc(slots_num,
1116*4882a593Smuzhiyun sizeof(struct iwl_pcie_txq_entry),
1117*4882a593Smuzhiyun GFP_KERNEL);
1118*4882a593Smuzhiyun
1119*4882a593Smuzhiyun if (!txq->entries)
1120*4882a593Smuzhiyun goto error;
1121*4882a593Smuzhiyun
1122*4882a593Smuzhiyun if (cmd_queue)
1123*4882a593Smuzhiyun for (i = 0; i < slots_num; i++) {
1124*4882a593Smuzhiyun txq->entries[i].cmd =
1125*4882a593Smuzhiyun kmalloc(sizeof(struct iwl_device_cmd),
1126*4882a593Smuzhiyun GFP_KERNEL);
1127*4882a593Smuzhiyun if (!txq->entries[i].cmd)
1128*4882a593Smuzhiyun goto error;
1129*4882a593Smuzhiyun }
1130*4882a593Smuzhiyun
1131*4882a593Smuzhiyun /* Circular buffer of transmit frame descriptors (TFDs),
1132*4882a593Smuzhiyun * shared with device */
1133*4882a593Smuzhiyun txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
1134*4882a593Smuzhiyun &txq->dma_addr, GFP_KERNEL);
1135*4882a593Smuzhiyun if (!txq->tfds)
1136*4882a593Smuzhiyun goto error;
1137*4882a593Smuzhiyun
1138*4882a593Smuzhiyun BUILD_BUG_ON(sizeof(*txq->first_tb_bufs) != IWL_FIRST_TB_SIZE_ALIGN);
1139*4882a593Smuzhiyun
1140*4882a593Smuzhiyun tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num;
1141*4882a593Smuzhiyun
1142*4882a593Smuzhiyun txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz,
1143*4882a593Smuzhiyun &txq->first_tb_dma,
1144*4882a593Smuzhiyun GFP_KERNEL);
1145*4882a593Smuzhiyun if (!txq->first_tb_bufs)
1146*4882a593Smuzhiyun goto err_free_tfds;
1147*4882a593Smuzhiyun
1148*4882a593Smuzhiyun return 0;
1149*4882a593Smuzhiyun err_free_tfds:
1150*4882a593Smuzhiyun dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr);
1151*4882a593Smuzhiyun txq->tfds = NULL;
1152*4882a593Smuzhiyun error:
1153*4882a593Smuzhiyun if (txq->entries && cmd_queue)
1154*4882a593Smuzhiyun for (i = 0; i < slots_num; i++)
1155*4882a593Smuzhiyun kfree(txq->entries[i].cmd);
1156*4882a593Smuzhiyun kfree(txq->entries);
1157*4882a593Smuzhiyun txq->entries = NULL;
1158*4882a593Smuzhiyun
1159*4882a593Smuzhiyun return -ENOMEM;
1160*4882a593Smuzhiyun }
1161*4882a593Smuzhiyun
iwl_txq_dyn_alloc_dma(struct iwl_trans * trans,struct iwl_txq ** intxq,int size,unsigned int timeout)1162*4882a593Smuzhiyun static int iwl_txq_dyn_alloc_dma(struct iwl_trans *trans,
1163*4882a593Smuzhiyun struct iwl_txq **intxq, int size,
1164*4882a593Smuzhiyun unsigned int timeout)
1165*4882a593Smuzhiyun {
1166*4882a593Smuzhiyun size_t bc_tbl_size, bc_tbl_entries;
1167*4882a593Smuzhiyun struct iwl_txq *txq;
1168*4882a593Smuzhiyun int ret;
1169*4882a593Smuzhiyun
1170*4882a593Smuzhiyun WARN_ON(!trans->txqs.bc_tbl_size);
1171*4882a593Smuzhiyun
1172*4882a593Smuzhiyun bc_tbl_size = trans->txqs.bc_tbl_size;
1173*4882a593Smuzhiyun bc_tbl_entries = bc_tbl_size / sizeof(u16);
1174*4882a593Smuzhiyun
1175*4882a593Smuzhiyun if (WARN_ON(size > bc_tbl_entries))
1176*4882a593Smuzhiyun return -EINVAL;
1177*4882a593Smuzhiyun
1178*4882a593Smuzhiyun txq = kzalloc(sizeof(*txq), GFP_KERNEL);
1179*4882a593Smuzhiyun if (!txq)
1180*4882a593Smuzhiyun return -ENOMEM;
1181*4882a593Smuzhiyun
1182*4882a593Smuzhiyun txq->bc_tbl.addr = dma_pool_alloc(trans->txqs.bc_pool, GFP_KERNEL,
1183*4882a593Smuzhiyun &txq->bc_tbl.dma);
1184*4882a593Smuzhiyun if (!txq->bc_tbl.addr) {
1185*4882a593Smuzhiyun IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
1186*4882a593Smuzhiyun kfree(txq);
1187*4882a593Smuzhiyun return -ENOMEM;
1188*4882a593Smuzhiyun }
1189*4882a593Smuzhiyun
1190*4882a593Smuzhiyun ret = iwl_txq_alloc(trans, txq, size, false);
1191*4882a593Smuzhiyun if (ret) {
1192*4882a593Smuzhiyun IWL_ERR(trans, "Tx queue alloc failed\n");
1193*4882a593Smuzhiyun goto error;
1194*4882a593Smuzhiyun }
1195*4882a593Smuzhiyun ret = iwl_txq_init(trans, txq, size, false);
1196*4882a593Smuzhiyun if (ret) {
1197*4882a593Smuzhiyun IWL_ERR(trans, "Tx queue init failed\n");
1198*4882a593Smuzhiyun goto error;
1199*4882a593Smuzhiyun }
1200*4882a593Smuzhiyun
1201*4882a593Smuzhiyun txq->wd_timeout = msecs_to_jiffies(timeout);
1202*4882a593Smuzhiyun
1203*4882a593Smuzhiyun *intxq = txq;
1204*4882a593Smuzhiyun return 0;
1205*4882a593Smuzhiyun
1206*4882a593Smuzhiyun error:
1207*4882a593Smuzhiyun iwl_txq_gen2_free_memory(trans, txq);
1208*4882a593Smuzhiyun return ret;
1209*4882a593Smuzhiyun }
1210*4882a593Smuzhiyun
iwl_txq_alloc_response(struct iwl_trans * trans,struct iwl_txq * txq,struct iwl_host_cmd * hcmd)1211*4882a593Smuzhiyun static int iwl_txq_alloc_response(struct iwl_trans *trans, struct iwl_txq *txq,
1212*4882a593Smuzhiyun struct iwl_host_cmd *hcmd)
1213*4882a593Smuzhiyun {
1214*4882a593Smuzhiyun struct iwl_tx_queue_cfg_rsp *rsp;
1215*4882a593Smuzhiyun int ret, qid;
1216*4882a593Smuzhiyun u32 wr_ptr;
1217*4882a593Smuzhiyun
1218*4882a593Smuzhiyun if (WARN_ON(iwl_rx_packet_payload_len(hcmd->resp_pkt) !=
1219*4882a593Smuzhiyun sizeof(*rsp))) {
1220*4882a593Smuzhiyun ret = -EINVAL;
1221*4882a593Smuzhiyun goto error_free_resp;
1222*4882a593Smuzhiyun }
1223*4882a593Smuzhiyun
1224*4882a593Smuzhiyun rsp = (void *)hcmd->resp_pkt->data;
1225*4882a593Smuzhiyun qid = le16_to_cpu(rsp->queue_number);
1226*4882a593Smuzhiyun wr_ptr = le16_to_cpu(rsp->write_pointer);
1227*4882a593Smuzhiyun
1228*4882a593Smuzhiyun if (qid >= ARRAY_SIZE(trans->txqs.txq)) {
1229*4882a593Smuzhiyun WARN_ONCE(1, "queue index %d unsupported", qid);
1230*4882a593Smuzhiyun ret = -EIO;
1231*4882a593Smuzhiyun goto error_free_resp;
1232*4882a593Smuzhiyun }
1233*4882a593Smuzhiyun
1234*4882a593Smuzhiyun if (test_and_set_bit(qid, trans->txqs.queue_used)) {
1235*4882a593Smuzhiyun WARN_ONCE(1, "queue %d already used", qid);
1236*4882a593Smuzhiyun ret = -EIO;
1237*4882a593Smuzhiyun goto error_free_resp;
1238*4882a593Smuzhiyun }
1239*4882a593Smuzhiyun
1240*4882a593Smuzhiyun txq->id = qid;
1241*4882a593Smuzhiyun trans->txqs.txq[qid] = txq;
1242*4882a593Smuzhiyun wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
1243*4882a593Smuzhiyun
1244*4882a593Smuzhiyun /* Place first TFD at index corresponding to start sequence number */
1245*4882a593Smuzhiyun txq->read_ptr = wr_ptr;
1246*4882a593Smuzhiyun txq->write_ptr = wr_ptr;
1247*4882a593Smuzhiyun
1248*4882a593Smuzhiyun IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
1249*4882a593Smuzhiyun
1250*4882a593Smuzhiyun iwl_free_resp(hcmd);
1251*4882a593Smuzhiyun return qid;
1252*4882a593Smuzhiyun
1253*4882a593Smuzhiyun error_free_resp:
1254*4882a593Smuzhiyun iwl_free_resp(hcmd);
1255*4882a593Smuzhiyun iwl_txq_gen2_free_memory(trans, txq);
1256*4882a593Smuzhiyun return ret;
1257*4882a593Smuzhiyun }
1258*4882a593Smuzhiyun
iwl_txq_dyn_alloc(struct iwl_trans * trans,__le16 flags,u8 sta_id,u8 tid,int cmd_id,int size,unsigned int timeout)1259*4882a593Smuzhiyun int iwl_txq_dyn_alloc(struct iwl_trans *trans, __le16 flags, u8 sta_id, u8 tid,
1260*4882a593Smuzhiyun int cmd_id, int size, unsigned int timeout)
1261*4882a593Smuzhiyun {
1262*4882a593Smuzhiyun struct iwl_txq *txq = NULL;
1263*4882a593Smuzhiyun struct iwl_tx_queue_cfg_cmd cmd = {
1264*4882a593Smuzhiyun .flags = flags,
1265*4882a593Smuzhiyun .sta_id = sta_id,
1266*4882a593Smuzhiyun .tid = tid,
1267*4882a593Smuzhiyun };
1268*4882a593Smuzhiyun struct iwl_host_cmd hcmd = {
1269*4882a593Smuzhiyun .id = cmd_id,
1270*4882a593Smuzhiyun .len = { sizeof(cmd) },
1271*4882a593Smuzhiyun .data = { &cmd, },
1272*4882a593Smuzhiyun .flags = CMD_WANT_SKB,
1273*4882a593Smuzhiyun };
1274*4882a593Smuzhiyun int ret;
1275*4882a593Smuzhiyun
1276*4882a593Smuzhiyun ret = iwl_txq_dyn_alloc_dma(trans, &txq, size, timeout);
1277*4882a593Smuzhiyun if (ret)
1278*4882a593Smuzhiyun return ret;
1279*4882a593Smuzhiyun
1280*4882a593Smuzhiyun cmd.tfdq_addr = cpu_to_le64(txq->dma_addr);
1281*4882a593Smuzhiyun cmd.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
1282*4882a593Smuzhiyun cmd.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
1283*4882a593Smuzhiyun
1284*4882a593Smuzhiyun ret = iwl_trans_send_cmd(trans, &hcmd);
1285*4882a593Smuzhiyun if (ret)
1286*4882a593Smuzhiyun goto error;
1287*4882a593Smuzhiyun
1288*4882a593Smuzhiyun return iwl_txq_alloc_response(trans, txq, &hcmd);
1289*4882a593Smuzhiyun
1290*4882a593Smuzhiyun error:
1291*4882a593Smuzhiyun iwl_txq_gen2_free_memory(trans, txq);
1292*4882a593Smuzhiyun return ret;
1293*4882a593Smuzhiyun }
1294*4882a593Smuzhiyun
iwl_txq_dyn_free(struct iwl_trans * trans,int queue)1295*4882a593Smuzhiyun void iwl_txq_dyn_free(struct iwl_trans *trans, int queue)
1296*4882a593Smuzhiyun {
1297*4882a593Smuzhiyun if (WARN(queue >= IWL_MAX_TVQM_QUEUES,
1298*4882a593Smuzhiyun "queue %d out of range", queue))
1299*4882a593Smuzhiyun return;
1300*4882a593Smuzhiyun
1301*4882a593Smuzhiyun /*
1302*4882a593Smuzhiyun * Upon HW Rfkill - we stop the device, and then stop the queues
1303*4882a593Smuzhiyun * in the op_mode. Just for the sake of the simplicity of the op_mode,
1304*4882a593Smuzhiyun * allow the op_mode to call txq_disable after it already called
1305*4882a593Smuzhiyun * stop_device.
1306*4882a593Smuzhiyun */
1307*4882a593Smuzhiyun if (!test_and_clear_bit(queue, trans->txqs.queue_used)) {
1308*4882a593Smuzhiyun WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
1309*4882a593Smuzhiyun "queue %d not used", queue);
1310*4882a593Smuzhiyun return;
1311*4882a593Smuzhiyun }
1312*4882a593Smuzhiyun
1313*4882a593Smuzhiyun iwl_txq_gen2_unmap(trans, queue);
1314*4882a593Smuzhiyun
1315*4882a593Smuzhiyun iwl_txq_gen2_free_memory(trans, trans->txqs.txq[queue]);
1316*4882a593Smuzhiyun
1317*4882a593Smuzhiyun trans->txqs.txq[queue] = NULL;
1318*4882a593Smuzhiyun
1319*4882a593Smuzhiyun IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
1320*4882a593Smuzhiyun }
1321*4882a593Smuzhiyun
iwl_txq_gen2_tx_free(struct iwl_trans * trans)1322*4882a593Smuzhiyun void iwl_txq_gen2_tx_free(struct iwl_trans *trans)
1323*4882a593Smuzhiyun {
1324*4882a593Smuzhiyun int i;
1325*4882a593Smuzhiyun
1326*4882a593Smuzhiyun memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
1327*4882a593Smuzhiyun
1328*4882a593Smuzhiyun /* Free all TX queues */
1329*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(trans->txqs.txq); i++) {
1330*4882a593Smuzhiyun if (!trans->txqs.txq[i])
1331*4882a593Smuzhiyun continue;
1332*4882a593Smuzhiyun
1333*4882a593Smuzhiyun iwl_txq_gen2_free(trans, i);
1334*4882a593Smuzhiyun }
1335*4882a593Smuzhiyun }
1336*4882a593Smuzhiyun
iwl_txq_gen2_init(struct iwl_trans * trans,int txq_id,int queue_size)1337*4882a593Smuzhiyun int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size)
1338*4882a593Smuzhiyun {
1339*4882a593Smuzhiyun struct iwl_txq *queue;
1340*4882a593Smuzhiyun int ret;
1341*4882a593Smuzhiyun
1342*4882a593Smuzhiyun /* alloc and init the tx queue */
1343*4882a593Smuzhiyun if (!trans->txqs.txq[txq_id]) {
1344*4882a593Smuzhiyun queue = kzalloc(sizeof(*queue), GFP_KERNEL);
1345*4882a593Smuzhiyun if (!queue) {
1346*4882a593Smuzhiyun IWL_ERR(trans, "Not enough memory for tx queue\n");
1347*4882a593Smuzhiyun return -ENOMEM;
1348*4882a593Smuzhiyun }
1349*4882a593Smuzhiyun trans->txqs.txq[txq_id] = queue;
1350*4882a593Smuzhiyun ret = iwl_txq_alloc(trans, queue, queue_size, true);
1351*4882a593Smuzhiyun if (ret) {
1352*4882a593Smuzhiyun IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
1353*4882a593Smuzhiyun goto error;
1354*4882a593Smuzhiyun }
1355*4882a593Smuzhiyun } else {
1356*4882a593Smuzhiyun queue = trans->txqs.txq[txq_id];
1357*4882a593Smuzhiyun }
1358*4882a593Smuzhiyun
1359*4882a593Smuzhiyun ret = iwl_txq_init(trans, queue, queue_size,
1360*4882a593Smuzhiyun (txq_id == trans->txqs.cmd.q_id));
1361*4882a593Smuzhiyun if (ret) {
1362*4882a593Smuzhiyun IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
1363*4882a593Smuzhiyun goto error;
1364*4882a593Smuzhiyun }
1365*4882a593Smuzhiyun trans->txqs.txq[txq_id]->id = txq_id;
1366*4882a593Smuzhiyun set_bit(txq_id, trans->txqs.queue_used);
1367*4882a593Smuzhiyun
1368*4882a593Smuzhiyun return 0;
1369*4882a593Smuzhiyun
1370*4882a593Smuzhiyun error:
1371*4882a593Smuzhiyun iwl_txq_gen2_tx_free(trans);
1372*4882a593Smuzhiyun return ret;
1373*4882a593Smuzhiyun }
1374*4882a593Smuzhiyun
iwl_txq_gen1_tfd_tb_get_addr(struct iwl_trans * trans,void * _tfd,u8 idx)1375*4882a593Smuzhiyun static inline dma_addr_t iwl_txq_gen1_tfd_tb_get_addr(struct iwl_trans *trans,
1376*4882a593Smuzhiyun void *_tfd, u8 idx)
1377*4882a593Smuzhiyun {
1378*4882a593Smuzhiyun struct iwl_tfd *tfd;
1379*4882a593Smuzhiyun struct iwl_tfd_tb *tb;
1380*4882a593Smuzhiyun dma_addr_t addr;
1381*4882a593Smuzhiyun dma_addr_t hi_len;
1382*4882a593Smuzhiyun
1383*4882a593Smuzhiyun if (trans->trans_cfg->use_tfh) {
1384*4882a593Smuzhiyun struct iwl_tfh_tfd *tfd = _tfd;
1385*4882a593Smuzhiyun struct iwl_tfh_tb *tb = &tfd->tbs[idx];
1386*4882a593Smuzhiyun
1387*4882a593Smuzhiyun return (dma_addr_t)(le64_to_cpu(tb->addr));
1388*4882a593Smuzhiyun }
1389*4882a593Smuzhiyun
1390*4882a593Smuzhiyun tfd = _tfd;
1391*4882a593Smuzhiyun tb = &tfd->tbs[idx];
1392*4882a593Smuzhiyun addr = get_unaligned_le32(&tb->lo);
1393*4882a593Smuzhiyun
1394*4882a593Smuzhiyun if (sizeof(dma_addr_t) <= sizeof(u32))
1395*4882a593Smuzhiyun return addr;
1396*4882a593Smuzhiyun
1397*4882a593Smuzhiyun hi_len = le16_to_cpu(tb->hi_n_len) & 0xF;
1398*4882a593Smuzhiyun
1399*4882a593Smuzhiyun /*
1400*4882a593Smuzhiyun * shift by 16 twice to avoid warnings on 32-bit
1401*4882a593Smuzhiyun * (where this code never runs anyway due to the
1402*4882a593Smuzhiyun * if statement above)
1403*4882a593Smuzhiyun */
1404*4882a593Smuzhiyun return addr | ((hi_len << 16) << 16);
1405*4882a593Smuzhiyun }
1406*4882a593Smuzhiyun
iwl_txq_gen1_tfd_unmap(struct iwl_trans * trans,struct iwl_cmd_meta * meta,struct iwl_txq * txq,int index)1407*4882a593Smuzhiyun void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans,
1408*4882a593Smuzhiyun struct iwl_cmd_meta *meta,
1409*4882a593Smuzhiyun struct iwl_txq *txq, int index)
1410*4882a593Smuzhiyun {
1411*4882a593Smuzhiyun int i, num_tbs;
1412*4882a593Smuzhiyun void *tfd = iwl_txq_get_tfd(trans, txq, index);
1413*4882a593Smuzhiyun
1414*4882a593Smuzhiyun /* Sanity check on number of chunks */
1415*4882a593Smuzhiyun num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd);
1416*4882a593Smuzhiyun
1417*4882a593Smuzhiyun if (num_tbs > trans->txqs.tfd.max_tbs) {
1418*4882a593Smuzhiyun IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
1419*4882a593Smuzhiyun /* @todo issue fatal error, it is quite serious situation */
1420*4882a593Smuzhiyun return;
1421*4882a593Smuzhiyun }
1422*4882a593Smuzhiyun
1423*4882a593Smuzhiyun /* first TB is never freed - it's the bidirectional DMA data */
1424*4882a593Smuzhiyun
1425*4882a593Smuzhiyun for (i = 1; i < num_tbs; i++) {
1426*4882a593Smuzhiyun if (meta->tbs & BIT(i))
1427*4882a593Smuzhiyun dma_unmap_page(trans->dev,
1428*4882a593Smuzhiyun iwl_txq_gen1_tfd_tb_get_addr(trans,
1429*4882a593Smuzhiyun tfd, i),
1430*4882a593Smuzhiyun iwl_txq_gen1_tfd_tb_get_len(trans,
1431*4882a593Smuzhiyun tfd, i),
1432*4882a593Smuzhiyun DMA_TO_DEVICE);
1433*4882a593Smuzhiyun else
1434*4882a593Smuzhiyun dma_unmap_single(trans->dev,
1435*4882a593Smuzhiyun iwl_txq_gen1_tfd_tb_get_addr(trans,
1436*4882a593Smuzhiyun tfd, i),
1437*4882a593Smuzhiyun iwl_txq_gen1_tfd_tb_get_len(trans,
1438*4882a593Smuzhiyun tfd, i),
1439*4882a593Smuzhiyun DMA_TO_DEVICE);
1440*4882a593Smuzhiyun }
1441*4882a593Smuzhiyun
1442*4882a593Smuzhiyun meta->tbs = 0;
1443*4882a593Smuzhiyun
1444*4882a593Smuzhiyun if (trans->trans_cfg->use_tfh) {
1445*4882a593Smuzhiyun struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
1446*4882a593Smuzhiyun
1447*4882a593Smuzhiyun tfd_fh->num_tbs = 0;
1448*4882a593Smuzhiyun } else {
1449*4882a593Smuzhiyun struct iwl_tfd *tfd_fh = (void *)tfd;
1450*4882a593Smuzhiyun
1451*4882a593Smuzhiyun tfd_fh->num_tbs = 0;
1452*4882a593Smuzhiyun }
1453*4882a593Smuzhiyun }
1454*4882a593Smuzhiyun
1455*4882a593Smuzhiyun #define IWL_TX_CRC_SIZE 4
1456*4882a593Smuzhiyun #define IWL_TX_DELIMITER_SIZE 4
1457*4882a593Smuzhiyun
1458*4882a593Smuzhiyun /*
1459*4882a593Smuzhiyun * iwl_txq_gen1_update_byte_cnt_tbl - Set up entry in Tx byte-count array
1460*4882a593Smuzhiyun */
iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans * trans,struct iwl_txq * txq,u16 byte_cnt,int num_tbs)1461*4882a593Smuzhiyun void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
1462*4882a593Smuzhiyun struct iwl_txq *txq, u16 byte_cnt,
1463*4882a593Smuzhiyun int num_tbs)
1464*4882a593Smuzhiyun {
1465*4882a593Smuzhiyun struct iwlagn_scd_bc_tbl *scd_bc_tbl;
1466*4882a593Smuzhiyun int write_ptr = txq->write_ptr;
1467*4882a593Smuzhiyun int txq_id = txq->id;
1468*4882a593Smuzhiyun u8 sec_ctl = 0;
1469*4882a593Smuzhiyun u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
1470*4882a593Smuzhiyun __le16 bc_ent;
1471*4882a593Smuzhiyun struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd;
1472*4882a593Smuzhiyun struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
1473*4882a593Smuzhiyun u8 sta_id = tx_cmd->sta_id;
1474*4882a593Smuzhiyun
1475*4882a593Smuzhiyun scd_bc_tbl = trans->txqs.scd_bc_tbls.addr;
1476*4882a593Smuzhiyun
1477*4882a593Smuzhiyun sec_ctl = tx_cmd->sec_ctl;
1478*4882a593Smuzhiyun
1479*4882a593Smuzhiyun switch (sec_ctl & TX_CMD_SEC_MSK) {
1480*4882a593Smuzhiyun case TX_CMD_SEC_CCM:
1481*4882a593Smuzhiyun len += IEEE80211_CCMP_MIC_LEN;
1482*4882a593Smuzhiyun break;
1483*4882a593Smuzhiyun case TX_CMD_SEC_TKIP:
1484*4882a593Smuzhiyun len += IEEE80211_TKIP_ICV_LEN;
1485*4882a593Smuzhiyun break;
1486*4882a593Smuzhiyun case TX_CMD_SEC_WEP:
1487*4882a593Smuzhiyun len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;
1488*4882a593Smuzhiyun break;
1489*4882a593Smuzhiyun }
1490*4882a593Smuzhiyun if (trans->txqs.bc_table_dword)
1491*4882a593Smuzhiyun len = DIV_ROUND_UP(len, 4);
1492*4882a593Smuzhiyun
1493*4882a593Smuzhiyun if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
1494*4882a593Smuzhiyun return;
1495*4882a593Smuzhiyun
1496*4882a593Smuzhiyun bc_ent = cpu_to_le16(len | (sta_id << 12));
1497*4882a593Smuzhiyun
1498*4882a593Smuzhiyun scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
1499*4882a593Smuzhiyun
1500*4882a593Smuzhiyun if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
1501*4882a593Smuzhiyun scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] =
1502*4882a593Smuzhiyun bc_ent;
1503*4882a593Smuzhiyun }
1504*4882a593Smuzhiyun
iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans * trans,struct iwl_txq * txq)1505*4882a593Smuzhiyun void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans,
1506*4882a593Smuzhiyun struct iwl_txq *txq)
1507*4882a593Smuzhiyun {
1508*4882a593Smuzhiyun struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans->txqs.scd_bc_tbls.addr;
1509*4882a593Smuzhiyun int txq_id = txq->id;
1510*4882a593Smuzhiyun int read_ptr = txq->read_ptr;
1511*4882a593Smuzhiyun u8 sta_id = 0;
1512*4882a593Smuzhiyun __le16 bc_ent;
1513*4882a593Smuzhiyun struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd;
1514*4882a593Smuzhiyun struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
1515*4882a593Smuzhiyun
1516*4882a593Smuzhiyun WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
1517*4882a593Smuzhiyun
1518*4882a593Smuzhiyun if (txq_id != trans->txqs.cmd.q_id)
1519*4882a593Smuzhiyun sta_id = tx_cmd->sta_id;
1520*4882a593Smuzhiyun
1521*4882a593Smuzhiyun bc_ent = cpu_to_le16(1 | (sta_id << 12));
1522*4882a593Smuzhiyun
1523*4882a593Smuzhiyun scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
1524*4882a593Smuzhiyun
1525*4882a593Smuzhiyun if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
1526*4882a593Smuzhiyun scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] =
1527*4882a593Smuzhiyun bc_ent;
1528*4882a593Smuzhiyun }
1529