1*4882a593Smuzhiyun /******************************************************************************
2*4882a593Smuzhiyun *
3*4882a593Smuzhiyun * This file is provided under a dual BSD/GPLv2 license. When using or
4*4882a593Smuzhiyun * redistributing this file, you may do so under either license.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * GPL LICENSE SUMMARY
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * Copyright(c) 2020 Intel Corporation
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * This program is free software; you can redistribute it and/or modify it
11*4882a593Smuzhiyun * under the terms of version 2 of the GNU General Public License as
12*4882a593Smuzhiyun * published by the Free Software Foundation.
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * This program is distributed in the hope that it will be useful, but WITHOUT
15*4882a593Smuzhiyun * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17*4882a593Smuzhiyun * more details.
18*4882a593Smuzhiyun *
19*4882a593Smuzhiyun * The full GNU General Public License is included in this distribution in the
20*4882a593Smuzhiyun * file called COPYING.
21*4882a593Smuzhiyun *
22*4882a593Smuzhiyun * Contact Information:
23*4882a593Smuzhiyun * Intel Linux Wireless <linuxwifi@intel.com>
24*4882a593Smuzhiyun * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25*4882a593Smuzhiyun *
26*4882a593Smuzhiyun * BSD LICENSE
27*4882a593Smuzhiyun *
28*4882a593Smuzhiyun * Copyright(c) 2020 Intel Corporation
29*4882a593Smuzhiyun * All rights reserved.
30*4882a593Smuzhiyun *
31*4882a593Smuzhiyun * Redistribution and use in source and binary forms, with or without
32*4882a593Smuzhiyun * modification, are permitted provided that the following conditions
33*4882a593Smuzhiyun * are met:
34*4882a593Smuzhiyun *
35*4882a593Smuzhiyun * * Redistributions of source code must retain the above copyright
36*4882a593Smuzhiyun * notice, this list of conditions and the following disclaimer.
37*4882a593Smuzhiyun * * Redistributions in binary form must reproduce the above copyright
38*4882a593Smuzhiyun * notice, this list of conditions and the following disclaimer in
39*4882a593Smuzhiyun * the documentation and/or other materials provided with the
40*4882a593Smuzhiyun * distribution.
41*4882a593Smuzhiyun * * Neither the name Intel Corporation nor the names of its
42*4882a593Smuzhiyun * contributors may be used to endorse or promote products derived
43*4882a593Smuzhiyun * from this software without specific prior written permission.
44*4882a593Smuzhiyun *
45*4882a593Smuzhiyun * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
46*4882a593Smuzhiyun * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
47*4882a593Smuzhiyun * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
48*4882a593Smuzhiyun * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
49*4882a593Smuzhiyun * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
50*4882a593Smuzhiyun * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
51*4882a593Smuzhiyun * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
52*4882a593Smuzhiyun * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
53*4882a593Smuzhiyun * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
54*4882a593Smuzhiyun * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
55*4882a593Smuzhiyun * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
56*4882a593Smuzhiyun *
57*4882a593Smuzhiyun *****************************************************************************/
58*4882a593Smuzhiyun #ifndef __iwl_trans_queue_tx_h__
59*4882a593Smuzhiyun #define __iwl_trans_queue_tx_h__
60*4882a593Smuzhiyun #include "iwl-fh.h"
61*4882a593Smuzhiyun #include "fw/api/tx.h"
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun struct iwl_tso_hdr_page {
64*4882a593Smuzhiyun struct page *page;
65*4882a593Smuzhiyun u8 *pos;
66*4882a593Smuzhiyun };
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun static inline dma_addr_t
iwl_txq_get_first_tb_dma(struct iwl_txq * txq,int idx)69*4882a593Smuzhiyun iwl_txq_get_first_tb_dma(struct iwl_txq *txq, int idx)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun return txq->first_tb_dma +
72*4882a593Smuzhiyun sizeof(struct iwl_pcie_first_tb_buf) * idx;
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun
iwl_txq_get_cmd_index(const struct iwl_txq * q,u32 index)75*4882a593Smuzhiyun static inline u16 iwl_txq_get_cmd_index(const struct iwl_txq *q, u32 index)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun return index & (q->n_window - 1);
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id);
81*4882a593Smuzhiyun
iwl_wake_queue(struct iwl_trans * trans,struct iwl_txq * txq)82*4882a593Smuzhiyun static inline void iwl_wake_queue(struct iwl_trans *trans,
83*4882a593Smuzhiyun struct iwl_txq *txq)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun if (test_and_clear_bit(txq->id, trans->txqs.queue_stopped)) {
86*4882a593Smuzhiyun IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id);
87*4882a593Smuzhiyun iwl_op_mode_queue_not_full(trans->op_mode, txq->id);
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun
iwl_txq_get_tfd(struct iwl_trans * trans,struct iwl_txq * txq,int idx)91*4882a593Smuzhiyun static inline void *iwl_txq_get_tfd(struct iwl_trans *trans,
92*4882a593Smuzhiyun struct iwl_txq *txq, int idx)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun if (trans->trans_cfg->use_tfh)
95*4882a593Smuzhiyun idx = iwl_txq_get_cmd_index(txq, idx);
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun return txq->tfds + trans->txqs.tfd.size * idx;
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
101*4882a593Smuzhiyun bool cmd_queue);
102*4882a593Smuzhiyun /*
103*4882a593Smuzhiyun * We need this inline in case dma_addr_t is only 32-bits - since the
104*4882a593Smuzhiyun * hardware is always 64-bit, the issue can still occur in that case,
105*4882a593Smuzhiyun * so use u64 for 'phys' here to force the addition in 64-bit.
106*4882a593Smuzhiyun */
iwl_txq_crosses_4g_boundary(u64 phys,u16 len)107*4882a593Smuzhiyun static inline bool iwl_txq_crosses_4g_boundary(u64 phys, u16 len)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun return upper_32_bits(phys) != upper_32_bits(phys + len);
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q);
113*4882a593Smuzhiyun
iwl_txq_stop(struct iwl_trans * trans,struct iwl_txq * txq)114*4882a593Smuzhiyun static inline void iwl_txq_stop(struct iwl_trans *trans, struct iwl_txq *txq)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun if (!test_and_set_bit(txq->id, trans->txqs.queue_stopped)) {
117*4882a593Smuzhiyun iwl_op_mode_queue_full(trans->op_mode, txq->id);
118*4882a593Smuzhiyun IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id);
119*4882a593Smuzhiyun } else {
120*4882a593Smuzhiyun IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
121*4882a593Smuzhiyun txq->id);
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun /**
126*4882a593Smuzhiyun * iwl_txq_inc_wrap - increment queue index, wrap back to beginning
127*4882a593Smuzhiyun * @index -- current index
128*4882a593Smuzhiyun */
iwl_txq_inc_wrap(struct iwl_trans * trans,int index)129*4882a593Smuzhiyun static inline int iwl_txq_inc_wrap(struct iwl_trans *trans, int index)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun return ++index &
132*4882a593Smuzhiyun (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun /**
136*4882a593Smuzhiyun * iwl_txq_dec_wrap - decrement queue index, wrap back to end
137*4882a593Smuzhiyun * @index -- current index
138*4882a593Smuzhiyun */
iwl_txq_dec_wrap(struct iwl_trans * trans,int index)139*4882a593Smuzhiyun static inline int iwl_txq_dec_wrap(struct iwl_trans *trans, int index)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun return --index &
142*4882a593Smuzhiyun (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun
iwl_txq_used(const struct iwl_txq * q,int i)145*4882a593Smuzhiyun static inline bool iwl_txq_used(const struct iwl_txq *q, int i)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun int index = iwl_txq_get_cmd_index(q, i);
148*4882a593Smuzhiyun int r = iwl_txq_get_cmd_index(q, q->read_ptr);
149*4882a593Smuzhiyun int w = iwl_txq_get_cmd_index(q, q->write_ptr);
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun return w >= r ?
152*4882a593Smuzhiyun (index >= r && index < w) :
153*4882a593Smuzhiyun !(index < r && index >= w);
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb);
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq);
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun int iwl_txq_gen2_set_tb(struct iwl_trans *trans,
161*4882a593Smuzhiyun struct iwl_tfh_tfd *tfd, dma_addr_t addr,
162*4882a593Smuzhiyun u16 len);
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans,
165*4882a593Smuzhiyun struct iwl_cmd_meta *meta,
166*4882a593Smuzhiyun struct iwl_tfh_tfd *tfd);
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun int iwl_txq_dyn_alloc(struct iwl_trans *trans,
169*4882a593Smuzhiyun __le16 flags, u8 sta_id, u8 tid,
170*4882a593Smuzhiyun int cmd_id, int size,
171*4882a593Smuzhiyun unsigned int timeout);
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
174*4882a593Smuzhiyun struct iwl_device_tx_cmd *dev_cmd, int txq_id);
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun void iwl_txq_dyn_free(struct iwl_trans *trans, int queue);
177*4882a593Smuzhiyun void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
178*4882a593Smuzhiyun void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq);
179*4882a593Smuzhiyun void iwl_txq_gen2_tx_stop(struct iwl_trans *trans);
180*4882a593Smuzhiyun void iwl_txq_gen2_tx_free(struct iwl_trans *trans);
181*4882a593Smuzhiyun int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
182*4882a593Smuzhiyun bool cmd_queue);
183*4882a593Smuzhiyun int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size);
184*4882a593Smuzhiyun #ifdef CONFIG_INET
185*4882a593Smuzhiyun struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
186*4882a593Smuzhiyun struct sk_buff *skb);
187*4882a593Smuzhiyun #endif
iwl_txq_gen1_tfd_get_num_tbs(struct iwl_trans * trans,void * _tfd)188*4882a593Smuzhiyun static inline u8 iwl_txq_gen1_tfd_get_num_tbs(struct iwl_trans *trans,
189*4882a593Smuzhiyun void *_tfd)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun struct iwl_tfd *tfd;
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun if (trans->trans_cfg->use_tfh) {
194*4882a593Smuzhiyun struct iwl_tfh_tfd *tfd = _tfd;
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun return le16_to_cpu(tfd->num_tbs) & 0x1f;
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun tfd = (struct iwl_tfd *)_tfd;
200*4882a593Smuzhiyun return tfd->num_tbs & 0x1f;
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun
iwl_txq_gen1_tfd_tb_get_len(struct iwl_trans * trans,void * _tfd,u8 idx)203*4882a593Smuzhiyun static inline u16 iwl_txq_gen1_tfd_tb_get_len(struct iwl_trans *trans,
204*4882a593Smuzhiyun void *_tfd, u8 idx)
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun struct iwl_tfd *tfd;
207*4882a593Smuzhiyun struct iwl_tfd_tb *tb;
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun if (trans->trans_cfg->use_tfh) {
210*4882a593Smuzhiyun struct iwl_tfh_tfd *tfd = _tfd;
211*4882a593Smuzhiyun struct iwl_tfh_tb *tb = &tfd->tbs[idx];
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun return le16_to_cpu(tb->tb_len);
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun tfd = (struct iwl_tfd *)_tfd;
217*4882a593Smuzhiyun tb = &tfd->tbs[idx];
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun return le16_to_cpu(tb->hi_n_len) >> 4;
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans,
223*4882a593Smuzhiyun struct iwl_cmd_meta *meta,
224*4882a593Smuzhiyun struct iwl_txq *txq, int index);
225*4882a593Smuzhiyun void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans,
226*4882a593Smuzhiyun struct iwl_txq *txq);
227*4882a593Smuzhiyun void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
228*4882a593Smuzhiyun struct iwl_txq *txq, u16 byte_cnt,
229*4882a593Smuzhiyun int num_tbs);
230*4882a593Smuzhiyun #endif /* __iwl_trans_queue_tx_h__ */
231