xref: /OK3568_Linux_fs/kernel/drivers/net/wireless/intel/iwlwifi/iwl-fh.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /******************************************************************************
2*4882a593Smuzhiyun  *
3*4882a593Smuzhiyun  * This file is provided under a dual BSD/GPLv2 license.  When using or
4*4882a593Smuzhiyun  * redistributing this file, you may do so under either license.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * GPL LICENSE SUMMARY
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
9*4882a593Smuzhiyun  * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
10*4882a593Smuzhiyun  * Copyright(c) 2018 - 2020 Intel Corporation
11*4882a593Smuzhiyun  *
12*4882a593Smuzhiyun  * This program is free software; you can redistribute it and/or modify
13*4882a593Smuzhiyun  * it under the terms of version 2 of the GNU General Public License as
14*4882a593Smuzhiyun  * published by the Free Software Foundation.
15*4882a593Smuzhiyun  *
16*4882a593Smuzhiyun  * This program is distributed in the hope that it will be useful, but
17*4882a593Smuzhiyun  * WITHOUT ANY WARRANTY; without even the implied warranty of
18*4882a593Smuzhiyun  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19*4882a593Smuzhiyun  * General Public License for more details.
20*4882a593Smuzhiyun  *
21*4882a593Smuzhiyun  * The full GNU General Public License is included in this distribution
22*4882a593Smuzhiyun  * in the file called COPYING.
23*4882a593Smuzhiyun  *
24*4882a593Smuzhiyun  * Contact Information:
25*4882a593Smuzhiyun  *  Intel Linux Wireless <linuxwifi@intel.com>
26*4882a593Smuzhiyun  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27*4882a593Smuzhiyun  *
28*4882a593Smuzhiyun  * BSD LICENSE
29*4882a593Smuzhiyun  *
30*4882a593Smuzhiyun  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
31*4882a593Smuzhiyun  * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
32*4882a593Smuzhiyun  * Copyright(c) 2018 - 2020 Intel Corporation
33*4882a593Smuzhiyun  * All rights reserved.
34*4882a593Smuzhiyun  *
35*4882a593Smuzhiyun  * Redistribution and use in source and binary forms, with or without
36*4882a593Smuzhiyun  * modification, are permitted provided that the following conditions
37*4882a593Smuzhiyun  * are met:
38*4882a593Smuzhiyun  *
39*4882a593Smuzhiyun  *  * Redistributions of source code must retain the above copyright
40*4882a593Smuzhiyun  *    notice, this list of conditions and the following disclaimer.
41*4882a593Smuzhiyun  *  * Redistributions in binary form must reproduce the above copyright
42*4882a593Smuzhiyun  *    notice, this list of conditions and the following disclaimer in
43*4882a593Smuzhiyun  *    the documentation and/or other materials provided with the
44*4882a593Smuzhiyun  *    distribution.
45*4882a593Smuzhiyun  *  * Neither the name Intel Corporation nor the names of its
46*4882a593Smuzhiyun  *    contributors may be used to endorse or promote products derived
47*4882a593Smuzhiyun  *    from this software without specific prior written permission.
48*4882a593Smuzhiyun  *
49*4882a593Smuzhiyun  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
50*4882a593Smuzhiyun  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
51*4882a593Smuzhiyun  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
52*4882a593Smuzhiyun  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
53*4882a593Smuzhiyun  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
54*4882a593Smuzhiyun  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
55*4882a593Smuzhiyun  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
56*4882a593Smuzhiyun  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
57*4882a593Smuzhiyun  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
58*4882a593Smuzhiyun  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
59*4882a593Smuzhiyun  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
60*4882a593Smuzhiyun  *
61*4882a593Smuzhiyun  *****************************************************************************/
62*4882a593Smuzhiyun #ifndef __iwl_fh_h__
63*4882a593Smuzhiyun #define __iwl_fh_h__
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun #include <linux/types.h>
66*4882a593Smuzhiyun #include <linux/bitfield.h>
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun #include "iwl-trans.h"
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun /****************************/
71*4882a593Smuzhiyun /* Flow Handler Definitions */
72*4882a593Smuzhiyun /****************************/
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun /**
75*4882a593Smuzhiyun  * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
76*4882a593Smuzhiyun  * Addresses are offsets from device's PCI hardware base address.
77*4882a593Smuzhiyun  */
78*4882a593Smuzhiyun #define FH_MEM_LOWER_BOUND                   (0x1000)
79*4882a593Smuzhiyun #define FH_MEM_UPPER_BOUND                   (0x2000)
80*4882a593Smuzhiyun #define FH_MEM_LOWER_BOUND_GEN2              (0xa06000)
81*4882a593Smuzhiyun #define FH_MEM_UPPER_BOUND_GEN2              (0xa08000)
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun /**
84*4882a593Smuzhiyun  * Keep-Warm (KW) buffer base address.
85*4882a593Smuzhiyun  *
86*4882a593Smuzhiyun  * Driver must allocate a 4KByte buffer that is for keeping the
87*4882a593Smuzhiyun  * host DRAM powered on (via dummy accesses to DRAM) to maintain low-latency
88*4882a593Smuzhiyun  * DRAM access when doing Txing or Rxing.  The dummy accesses prevent host
89*4882a593Smuzhiyun  * from going into a power-savings mode that would cause higher DRAM latency,
90*4882a593Smuzhiyun  * and possible data over/under-runs, before all Tx/Rx is complete.
91*4882a593Smuzhiyun  *
92*4882a593Smuzhiyun  * Driver loads FH_KW_MEM_ADDR_REG with the physical address (bits 35:4)
93*4882a593Smuzhiyun  * of the buffer, which must be 4K aligned.  Once this is set up, the device
94*4882a593Smuzhiyun  * automatically invokes keep-warm accesses when normal accesses might not
95*4882a593Smuzhiyun  * be sufficient to maintain fast DRAM response.
96*4882a593Smuzhiyun  *
97*4882a593Smuzhiyun  * Bit fields:
98*4882a593Smuzhiyun  *  31-0:  Keep-warm buffer physical base address [35:4], must be 4K aligned
99*4882a593Smuzhiyun  */
100*4882a593Smuzhiyun #define FH_KW_MEM_ADDR_REG		     (FH_MEM_LOWER_BOUND + 0x97C)
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun /**
104*4882a593Smuzhiyun  * TFD Circular Buffers Base (CBBC) addresses
105*4882a593Smuzhiyun  *
106*4882a593Smuzhiyun  * Device has 16 base pointer registers, one for each of 16 host-DRAM-resident
107*4882a593Smuzhiyun  * circular buffers (CBs/queues) containing Transmit Frame Descriptors (TFDs)
108*4882a593Smuzhiyun  * (see struct iwl_tfd_frame).  These 16 pointer registers are offset by 0x04
109*4882a593Smuzhiyun  * bytes from one another.  Each TFD circular buffer in DRAM must be 256-byte
110*4882a593Smuzhiyun  * aligned (address bits 0-7 must be 0).
111*4882a593Smuzhiyun  * Later devices have 20 (5000 series) or 30 (higher) queues, but the registers
112*4882a593Smuzhiyun  * for them are in different places.
113*4882a593Smuzhiyun  *
114*4882a593Smuzhiyun  * Bit fields in each pointer register:
115*4882a593Smuzhiyun  *  27-0: TFD CB physical base address [35:8], must be 256-byte aligned
116*4882a593Smuzhiyun  */
117*4882a593Smuzhiyun #define FH_MEM_CBBC_0_15_LOWER_BOUND		(FH_MEM_LOWER_BOUND + 0x9D0)
118*4882a593Smuzhiyun #define FH_MEM_CBBC_0_15_UPPER_BOUND		(FH_MEM_LOWER_BOUND + 0xA10)
119*4882a593Smuzhiyun #define FH_MEM_CBBC_16_19_LOWER_BOUND		(FH_MEM_LOWER_BOUND + 0xBF0)
120*4882a593Smuzhiyun #define FH_MEM_CBBC_16_19_UPPER_BOUND		(FH_MEM_LOWER_BOUND + 0xC00)
121*4882a593Smuzhiyun #define FH_MEM_CBBC_20_31_LOWER_BOUND		(FH_MEM_LOWER_BOUND + 0xB20)
122*4882a593Smuzhiyun #define FH_MEM_CBBC_20_31_UPPER_BOUND		(FH_MEM_LOWER_BOUND + 0xB80)
123*4882a593Smuzhiyun /* 22000 TFD table address, 64 bit */
124*4882a593Smuzhiyun #define TFH_TFDQ_CBB_TABLE			(0x1C00)
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun /* Find TFD CB base pointer for given queue */
FH_MEM_CBBC_QUEUE(struct iwl_trans * trans,unsigned int chnl)127*4882a593Smuzhiyun static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
128*4882a593Smuzhiyun 					     unsigned int chnl)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun 	if (trans->trans_cfg->use_tfh) {
131*4882a593Smuzhiyun 		WARN_ON_ONCE(chnl >= 64);
132*4882a593Smuzhiyun 		return TFH_TFDQ_CBB_TABLE + 8 * chnl;
133*4882a593Smuzhiyun 	}
134*4882a593Smuzhiyun 	if (chnl < 16)
135*4882a593Smuzhiyun 		return FH_MEM_CBBC_0_15_LOWER_BOUND + 4 * chnl;
136*4882a593Smuzhiyun 	if (chnl < 20)
137*4882a593Smuzhiyun 		return FH_MEM_CBBC_16_19_LOWER_BOUND + 4 * (chnl - 16);
138*4882a593Smuzhiyun 	WARN_ON_ONCE(chnl >= 32);
139*4882a593Smuzhiyun 	return FH_MEM_CBBC_20_31_LOWER_BOUND + 4 * (chnl - 20);
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun /* 22000 configuration registers */
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun /*
145*4882a593Smuzhiyun  * TFH Configuration register.
146*4882a593Smuzhiyun  *
147*4882a593Smuzhiyun  * BIT fields:
148*4882a593Smuzhiyun  *
149*4882a593Smuzhiyun  * Bits 3:0:
150*4882a593Smuzhiyun  * Define the maximum number of pending read requests.
151*4882a593Smuzhiyun  * Maximum configuration value allowed is 0xC
152*4882a593Smuzhiyun  * Bits 9:8:
153*4882a593Smuzhiyun  * Define the maximum transfer size. (64 / 128 / 256)
154*4882a593Smuzhiyun  * Bit 10:
155*4882a593Smuzhiyun  * When bit is set and transfer size is set to 128B, the TFH will enable
156*4882a593Smuzhiyun  * reading chunks of more than 64B only if the read address is aligned to 128B.
157*4882a593Smuzhiyun  * In case of DRAM read address which is not aligned to 128B, the TFH will
158*4882a593Smuzhiyun  * enable transfer size which doesn't cross 64B DRAM address boundary.
159*4882a593Smuzhiyun */
160*4882a593Smuzhiyun #define TFH_TRANSFER_MODE		(0x1F40)
161*4882a593Smuzhiyun #define TFH_TRANSFER_MAX_PENDING_REQ	0xc
162*4882a593Smuzhiyun #define TFH_CHUNK_SIZE_128			BIT(8)
163*4882a593Smuzhiyun #define TFH_CHUNK_SPLIT_MODE		BIT(10)
164*4882a593Smuzhiyun /*
165*4882a593Smuzhiyun  * Defines the offset address in dwords referring from the beginning of the
166*4882a593Smuzhiyun  * Tx CMD which will be updated in DRAM.
167*4882a593Smuzhiyun  * Note that the TFH offset address for Tx CMD update is always referring to
168*4882a593Smuzhiyun  * the start of the TFD first TB.
169*4882a593Smuzhiyun  * In case of a DRAM Tx CMD update the TFH will update PN and Key ID
170*4882a593Smuzhiyun  */
171*4882a593Smuzhiyun #define TFH_TXCMD_UPDATE_CFG		(0x1F48)
172*4882a593Smuzhiyun /*
173*4882a593Smuzhiyun  * Controls TX DMA operation
174*4882a593Smuzhiyun  *
175*4882a593Smuzhiyun  * BIT fields:
176*4882a593Smuzhiyun  *
177*4882a593Smuzhiyun  * Bits 31:30: Enable the SRAM DMA channel.
178*4882a593Smuzhiyun  * Turning on bit 31 will kick the SRAM2DRAM DMA.
179*4882a593Smuzhiyun  * Note that the sram2dram may be enabled only after configuring the DRAM and
180*4882a593Smuzhiyun  * SRAM addresses registers and the byte count register.
181*4882a593Smuzhiyun  * Bits 25:24: Defines the interrupt target upon dram2sram transfer done. When
182*4882a593Smuzhiyun  * set to 1 - interrupt is sent to the driver
183*4882a593Smuzhiyun  * Bit 0: Indicates the snoop configuration
184*4882a593Smuzhiyun */
185*4882a593Smuzhiyun #define TFH_SRV_DMA_CHNL0_CTRL	(0x1F60)
186*4882a593Smuzhiyun #define TFH_SRV_DMA_SNOOP	BIT(0)
187*4882a593Smuzhiyun #define TFH_SRV_DMA_TO_DRIVER	BIT(24)
188*4882a593Smuzhiyun #define TFH_SRV_DMA_START	BIT(31)
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun /* Defines the DMA SRAM write start address to transfer a data block */
191*4882a593Smuzhiyun #define TFH_SRV_DMA_CHNL0_SRAM_ADDR	(0x1F64)
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun /* Defines the 64bits DRAM start address to read the DMA data block from */
194*4882a593Smuzhiyun #define TFH_SRV_DMA_CHNL0_DRAM_ADDR	(0x1F68)
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun /*
197*4882a593Smuzhiyun  * Defines the number of bytes to transfer from DRAM to SRAM.
198*4882a593Smuzhiyun  * Note that this register may be configured with non-dword aligned size.
199*4882a593Smuzhiyun  */
200*4882a593Smuzhiyun #define TFH_SRV_DMA_CHNL0_BC	(0x1F70)
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun /**
203*4882a593Smuzhiyun  * Rx SRAM Control and Status Registers (RSCSR)
204*4882a593Smuzhiyun  *
205*4882a593Smuzhiyun  * These registers provide handshake between driver and device for the Rx queue
206*4882a593Smuzhiyun  * (this queue handles *all* command responses, notifications, Rx data, etc.
207*4882a593Smuzhiyun  * sent from uCode to host driver).  Unlike Tx, there is only one Rx
208*4882a593Smuzhiyun  * queue, and only one Rx DMA/FIFO channel.  Also unlike Tx, which can
209*4882a593Smuzhiyun  * concatenate up to 20 DRAM buffers to form a Tx frame, each Receive Buffer
210*4882a593Smuzhiyun  * Descriptor (RBD) points to only one Rx Buffer (RB); there is a 1:1
211*4882a593Smuzhiyun  * mapping between RBDs and RBs.
212*4882a593Smuzhiyun  *
213*4882a593Smuzhiyun  * Driver must allocate host DRAM memory for the following, and set the
214*4882a593Smuzhiyun  * physical address of each into device registers:
215*4882a593Smuzhiyun  *
216*4882a593Smuzhiyun  * 1)  Receive Buffer Descriptor (RBD) circular buffer (CB), typically with 256
217*4882a593Smuzhiyun  *     entries (although any power of 2, up to 4096, is selectable by driver).
218*4882a593Smuzhiyun  *     Each entry (1 dword) points to a receive buffer (RB) of consistent size
219*4882a593Smuzhiyun  *     (typically 4K, although 8K or 16K are also selectable by driver).
220*4882a593Smuzhiyun  *     Driver sets up RB size and number of RBDs in the CB via Rx config
221*4882a593Smuzhiyun  *     register FH_MEM_RCSR_CHNL0_CONFIG_REG.
222*4882a593Smuzhiyun  *
223*4882a593Smuzhiyun  *     Bit fields within one RBD:
224*4882a593Smuzhiyun  *     27-0:  Receive Buffer physical address bits [35:8], 256-byte aligned
225*4882a593Smuzhiyun  *
226*4882a593Smuzhiyun  *     Driver sets physical address [35:8] of base of RBD circular buffer
227*4882a593Smuzhiyun  *     into FH_RSCSR_CHNL0_RBDCB_BASE_REG [27:0].
228*4882a593Smuzhiyun  *
229*4882a593Smuzhiyun  * 2)  Rx status buffer, 8 bytes, in which uCode indicates which Rx Buffers
230*4882a593Smuzhiyun  *     (RBs) have been filled, via a "write pointer", actually the index of
231*4882a593Smuzhiyun  *     the RB's corresponding RBD within the circular buffer.  Driver sets
232*4882a593Smuzhiyun  *     physical address [35:4] into FH_RSCSR_CHNL0_STTS_WPTR_REG [31:0].
233*4882a593Smuzhiyun  *
234*4882a593Smuzhiyun  *     Bit fields in lower dword of Rx status buffer (upper dword not used
235*4882a593Smuzhiyun  *     by driver:
236*4882a593Smuzhiyun  *     31-12:  Not used by driver
237*4882a593Smuzhiyun  *     11- 0:  Index of last filled Rx buffer descriptor
238*4882a593Smuzhiyun  *             (device writes, driver reads this value)
239*4882a593Smuzhiyun  *
240*4882a593Smuzhiyun  * As the driver prepares Receive Buffers (RBs) for device to fill, driver must
241*4882a593Smuzhiyun  * enter pointers to these RBs into contiguous RBD circular buffer entries,
242*4882a593Smuzhiyun  * and update the device's "write" index register,
243*4882a593Smuzhiyun  * FH_RSCSR_CHNL0_RBDCB_WPTR_REG.
244*4882a593Smuzhiyun  *
245*4882a593Smuzhiyun  * This "write" index corresponds to the *next* RBD that the driver will make
246*4882a593Smuzhiyun  * available, i.e. one RBD past the tail of the ready-to-fill RBDs within
247*4882a593Smuzhiyun  * the circular buffer.  This value should initially be 0 (before preparing any
248*4882a593Smuzhiyun  * RBs), should be 8 after preparing the first 8 RBs (for example), and must
249*4882a593Smuzhiyun  * wrap back to 0 at the end of the circular buffer (but don't wrap before
250*4882a593Smuzhiyun  * "read" index has advanced past 1!  See below).
251*4882a593Smuzhiyun  * NOTE:  DEVICE EXPECTS THE WRITE INDEX TO BE INCREMENTED IN MULTIPLES OF 8.
252*4882a593Smuzhiyun  *
253*4882a593Smuzhiyun  * As the device fills RBs (referenced from contiguous RBDs within the circular
254*4882a593Smuzhiyun  * buffer), it updates the Rx status buffer in host DRAM, 2) described above,
255*4882a593Smuzhiyun  * to tell the driver the index of the latest filled RBD.  The driver must
256*4882a593Smuzhiyun  * read this "read" index from DRAM after receiving an Rx interrupt from device
257*4882a593Smuzhiyun  *
258*4882a593Smuzhiyun  * The driver must also internally keep track of a third index, which is the
259*4882a593Smuzhiyun  * next RBD to process.  When receiving an Rx interrupt, driver should process
260*4882a593Smuzhiyun  * all filled but unprocessed RBs up to, but not including, the RB
261*4882a593Smuzhiyun  * corresponding to the "read" index.  For example, if "read" index becomes "1",
262*4882a593Smuzhiyun  * driver may process the RB pointed to by RBD 0.  Depending on volume of
263*4882a593Smuzhiyun  * traffic, there may be many RBs to process.
264*4882a593Smuzhiyun  *
265*4882a593Smuzhiyun  * If read index == write index, device thinks there is no room to put new data.
266*4882a593Smuzhiyun  * Due to this, the maximum number of filled RBs is 255, instead of 256.  To
267*4882a593Smuzhiyun  * be safe, make sure that there is a gap of at least 2 RBDs between "write"
268*4882a593Smuzhiyun  * and "read" indexes; that is, make sure that there are no more than 254
269*4882a593Smuzhiyun  * buffers waiting to be filled.
270*4882a593Smuzhiyun  */
271*4882a593Smuzhiyun #define FH_MEM_RSCSR_LOWER_BOUND	(FH_MEM_LOWER_BOUND + 0xBC0)
272*4882a593Smuzhiyun #define FH_MEM_RSCSR_UPPER_BOUND	(FH_MEM_LOWER_BOUND + 0xC00)
273*4882a593Smuzhiyun #define FH_MEM_RSCSR_CHNL0		(FH_MEM_RSCSR_LOWER_BOUND)
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun /**
276*4882a593Smuzhiyun  * Physical base address of 8-byte Rx Status buffer.
277*4882a593Smuzhiyun  * Bit fields:
278*4882a593Smuzhiyun  *  31-0: Rx status buffer physical base address [35:4], must 16-byte aligned.
279*4882a593Smuzhiyun  */
280*4882a593Smuzhiyun #define FH_RSCSR_CHNL0_STTS_WPTR_REG	(FH_MEM_RSCSR_CHNL0)
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun /**
283*4882a593Smuzhiyun  * Physical base address of Rx Buffer Descriptor Circular Buffer.
284*4882a593Smuzhiyun  * Bit fields:
285*4882a593Smuzhiyun  *  27-0:  RBD CD physical base address [35:8], must be 256-byte aligned.
286*4882a593Smuzhiyun  */
287*4882a593Smuzhiyun #define FH_RSCSR_CHNL0_RBDCB_BASE_REG	(FH_MEM_RSCSR_CHNL0 + 0x004)
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun /**
290*4882a593Smuzhiyun  * Rx write pointer (index, really!).
291*4882a593Smuzhiyun  * Bit fields:
292*4882a593Smuzhiyun  *  11-0:  Index of driver's most recent prepared-to-be-filled RBD, + 1.
293*4882a593Smuzhiyun  *         NOTE:  For 256-entry circular buffer, use only bits [7:0].
294*4882a593Smuzhiyun  */
295*4882a593Smuzhiyun #define FH_RSCSR_CHNL0_RBDCB_WPTR_REG	(FH_MEM_RSCSR_CHNL0 + 0x008)
296*4882a593Smuzhiyun #define FH_RSCSR_CHNL0_WPTR        (FH_RSCSR_CHNL0_RBDCB_WPTR_REG)
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun #define FW_RSCSR_CHNL0_RXDCB_RDPTR_REG	(FH_MEM_RSCSR_CHNL0 + 0x00c)
299*4882a593Smuzhiyun #define FH_RSCSR_CHNL0_RDPTR		FW_RSCSR_CHNL0_RXDCB_RDPTR_REG
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun /**
302*4882a593Smuzhiyun  * Rx Config/Status Registers (RCSR)
303*4882a593Smuzhiyun  * Rx Config Reg for channel 0 (only channel used)
304*4882a593Smuzhiyun  *
305*4882a593Smuzhiyun  * Driver must initialize FH_MEM_RCSR_CHNL0_CONFIG_REG as follows for
306*4882a593Smuzhiyun  * normal operation (see bit fields).
307*4882a593Smuzhiyun  *
308*4882a593Smuzhiyun  * Clearing FH_MEM_RCSR_CHNL0_CONFIG_REG to 0 turns off Rx DMA.
309*4882a593Smuzhiyun  * Driver should poll FH_MEM_RSSR_RX_STATUS_REG	for
310*4882a593Smuzhiyun  * FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (bit 24) before continuing.
311*4882a593Smuzhiyun  *
312*4882a593Smuzhiyun  * Bit fields:
313*4882a593Smuzhiyun  * 31-30: Rx DMA channel enable: '00' off/pause, '01' pause at end of frame,
314*4882a593Smuzhiyun  *        '10' operate normally
315*4882a593Smuzhiyun  * 29-24: reserved
316*4882a593Smuzhiyun  * 23-20: # RBDs in circular buffer = 2^value; use "8" for 256 RBDs (normal),
317*4882a593Smuzhiyun  *        min "5" for 32 RBDs, max "12" for 4096 RBDs.
318*4882a593Smuzhiyun  * 19-18: reserved
319*4882a593Smuzhiyun  * 17-16: size of each receive buffer; '00' 4K (normal), '01' 8K,
320*4882a593Smuzhiyun  *        '10' 12K, '11' 16K.
321*4882a593Smuzhiyun  * 15-14: reserved
322*4882a593Smuzhiyun  * 13-12: IRQ destination; '00' none, '01' host driver (normal operation)
323*4882a593Smuzhiyun  * 11- 4: timeout for closing Rx buffer and interrupting host (units 32 usec)
324*4882a593Smuzhiyun  *        typical value 0x10 (about 1/2 msec)
325*4882a593Smuzhiyun  *  3- 0: reserved
326*4882a593Smuzhiyun  */
327*4882a593Smuzhiyun #define FH_MEM_RCSR_LOWER_BOUND      (FH_MEM_LOWER_BOUND + 0xC00)
328*4882a593Smuzhiyun #define FH_MEM_RCSR_UPPER_BOUND      (FH_MEM_LOWER_BOUND + 0xCC0)
329*4882a593Smuzhiyun #define FH_MEM_RCSR_CHNL0            (FH_MEM_RCSR_LOWER_BOUND)
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun #define FH_MEM_RCSR_CHNL0_CONFIG_REG	(FH_MEM_RCSR_CHNL0)
332*4882a593Smuzhiyun #define FH_MEM_RCSR_CHNL0_RBDCB_WPTR	(FH_MEM_RCSR_CHNL0 + 0x8)
333*4882a593Smuzhiyun #define FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ	(FH_MEM_RCSR_CHNL0 + 0x10)
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun #define FH_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MSK (0x00000FF0) /* bits 4-11 */
336*4882a593Smuzhiyun #define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MSK   (0x00001000) /* bits 12 */
337*4882a593Smuzhiyun #define FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK (0x00008000) /* bit 15 */
338*4882a593Smuzhiyun #define FH_RCSR_CHNL0_RX_CONFIG_RB_SIZE_MSK   (0x00030000) /* bits 16-17 */
339*4882a593Smuzhiyun #define FH_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MSK (0x00F00000) /* bits 20-23 */
340*4882a593Smuzhiyun #define FH_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MSK (0xC0000000) /* bits 30-31*/
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun #define FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS	(20)
343*4882a593Smuzhiyun #define FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS	(4)
344*4882a593Smuzhiyun #define RX_RB_TIMEOUT	(0x11)
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun #define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL         (0x00000000)
347*4882a593Smuzhiyun #define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL     (0x40000000)
348*4882a593Smuzhiyun #define FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL        (0x80000000)
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun #define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K    (0x00000000)
351*4882a593Smuzhiyun #define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K    (0x00010000)
352*4882a593Smuzhiyun #define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K   (0x00020000)
353*4882a593Smuzhiyun #define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K   (0x00030000)
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun #define FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY              (0x00000004)
356*4882a593Smuzhiyun #define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL    (0x00000000)
357*4882a593Smuzhiyun #define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL  (0x00001000)
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun /**
360*4882a593Smuzhiyun  * Rx Shared Status Registers (RSSR)
361*4882a593Smuzhiyun  *
362*4882a593Smuzhiyun  * After stopping Rx DMA channel (writing 0 to
363*4882a593Smuzhiyun  * FH_MEM_RCSR_CHNL0_CONFIG_REG), driver must poll
364*4882a593Smuzhiyun  * FH_MEM_RSSR_RX_STATUS_REG until Rx channel is idle.
365*4882a593Smuzhiyun  *
366*4882a593Smuzhiyun  * Bit fields:
367*4882a593Smuzhiyun  *  24:  1 = Channel 0 is idle
368*4882a593Smuzhiyun  *
369*4882a593Smuzhiyun  * FH_MEM_RSSR_SHARED_CTRL_REG and FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV
370*4882a593Smuzhiyun  * contain default values that should not be altered by the driver.
371*4882a593Smuzhiyun  */
372*4882a593Smuzhiyun #define FH_MEM_RSSR_LOWER_BOUND           (FH_MEM_LOWER_BOUND + 0xC40)
373*4882a593Smuzhiyun #define FH_MEM_RSSR_UPPER_BOUND           (FH_MEM_LOWER_BOUND + 0xD00)
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun #define FH_MEM_RSSR_SHARED_CTRL_REG       (FH_MEM_RSSR_LOWER_BOUND)
376*4882a593Smuzhiyun #define FH_MEM_RSSR_RX_STATUS_REG	(FH_MEM_RSSR_LOWER_BOUND + 0x004)
377*4882a593Smuzhiyun #define FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV\
378*4882a593Smuzhiyun 					(FH_MEM_RSSR_LOWER_BOUND + 0x008)
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun #define FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE	(0x01000000)
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun #define FH_MEM_TFDIB_REG1_ADDR_BITSHIFT	28
383*4882a593Smuzhiyun #define FH_MEM_TB_MAX_LENGTH			(0x00020000)
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun /* 9000 rx series registers */
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun #define RFH_Q0_FRBDCB_BA_LSB 0xA08000 /* 64 bit address */
388*4882a593Smuzhiyun #define RFH_Q_FRBDCB_BA_LSB(q) (RFH_Q0_FRBDCB_BA_LSB + (q) * 8)
389*4882a593Smuzhiyun /* Write index table */
390*4882a593Smuzhiyun #define RFH_Q0_FRBDCB_WIDX 0xA08080
391*4882a593Smuzhiyun #define RFH_Q_FRBDCB_WIDX(q) (RFH_Q0_FRBDCB_WIDX + (q) * 4)
392*4882a593Smuzhiyun /* Write index table - shadow registers */
393*4882a593Smuzhiyun #define RFH_Q0_FRBDCB_WIDX_TRG 0x1C80
394*4882a593Smuzhiyun #define RFH_Q_FRBDCB_WIDX_TRG(q) (RFH_Q0_FRBDCB_WIDX_TRG + (q) * 4)
395*4882a593Smuzhiyun /* Read index table */
396*4882a593Smuzhiyun #define RFH_Q0_FRBDCB_RIDX 0xA080C0
397*4882a593Smuzhiyun #define RFH_Q_FRBDCB_RIDX(q) (RFH_Q0_FRBDCB_RIDX + (q) * 4)
398*4882a593Smuzhiyun /* Used list table */
399*4882a593Smuzhiyun #define RFH_Q0_URBDCB_BA_LSB 0xA08100 /* 64 bit address */
400*4882a593Smuzhiyun #define RFH_Q_URBDCB_BA_LSB(q) (RFH_Q0_URBDCB_BA_LSB + (q) * 8)
401*4882a593Smuzhiyun /* Write index table */
402*4882a593Smuzhiyun #define RFH_Q0_URBDCB_WIDX 0xA08180
403*4882a593Smuzhiyun #define RFH_Q_URBDCB_WIDX(q) (RFH_Q0_URBDCB_WIDX + (q) * 4)
404*4882a593Smuzhiyun #define RFH_Q0_URBDCB_VAID 0xA081C0
405*4882a593Smuzhiyun #define RFH_Q_URBDCB_VAID(q) (RFH_Q0_URBDCB_VAID + (q) * 4)
406*4882a593Smuzhiyun /* stts */
407*4882a593Smuzhiyun #define RFH_Q0_URBD_STTS_WPTR_LSB 0xA08200 /*64 bits address */
408*4882a593Smuzhiyun #define RFH_Q_URBD_STTS_WPTR_LSB(q) (RFH_Q0_URBD_STTS_WPTR_LSB + (q) * 8)
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun #define RFH_Q0_ORB_WPTR_LSB 0xA08280
411*4882a593Smuzhiyun #define RFH_Q_ORB_WPTR_LSB(q) (RFH_Q0_ORB_WPTR_LSB + (q) * 8)
412*4882a593Smuzhiyun #define RFH_RBDBUF_RBD0_LSB 0xA08300
413*4882a593Smuzhiyun #define RFH_RBDBUF_RBD_LSB(q) (RFH_RBDBUF_RBD0_LSB + (q) * 8)
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun /**
416*4882a593Smuzhiyun  * RFH Status Register
417*4882a593Smuzhiyun  *
418*4882a593Smuzhiyun  * Bit fields:
419*4882a593Smuzhiyun  *
420*4882a593Smuzhiyun  * Bit 29: RBD_FETCH_IDLE
421*4882a593Smuzhiyun  * This status flag is set by the RFH when there is no active RBD fetch from
422*4882a593Smuzhiyun  * DRAM.
423*4882a593Smuzhiyun  * Once the RFH RBD controller starts fetching (or when there is a pending
424*4882a593Smuzhiyun  * RBD read response from DRAM), this flag is immediately turned off.
425*4882a593Smuzhiyun  *
426*4882a593Smuzhiyun  * Bit 30: SRAM_DMA_IDLE
427*4882a593Smuzhiyun  * This status flag is set by the RFH when there is no active transaction from
428*4882a593Smuzhiyun  * SRAM to DRAM.
429*4882a593Smuzhiyun  * Once the SRAM to DRAM DMA is active, this flag is immediately turned off.
430*4882a593Smuzhiyun  *
431*4882a593Smuzhiyun  * Bit 31: RXF_DMA_IDLE
432*4882a593Smuzhiyun  * This status flag is set by the RFH when there is no active transaction from
433*4882a593Smuzhiyun  * RXF to DRAM.
434*4882a593Smuzhiyun  * Once the RXF-to-DRAM DMA is active, this flag is immediately turned off.
435*4882a593Smuzhiyun  */
436*4882a593Smuzhiyun #define RFH_GEN_STATUS		0xA09808
437*4882a593Smuzhiyun #define RFH_GEN_STATUS_GEN3	0xA07824
438*4882a593Smuzhiyun #define RBD_FETCH_IDLE	BIT(29)
439*4882a593Smuzhiyun #define SRAM_DMA_IDLE	BIT(30)
440*4882a593Smuzhiyun #define RXF_DMA_IDLE	BIT(31)
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun /* DMA configuration */
443*4882a593Smuzhiyun #define RFH_RXF_DMA_CFG		0xA09820
444*4882a593Smuzhiyun #define RFH_RXF_DMA_CFG_GEN3	0xA07880
445*4882a593Smuzhiyun /* RB size */
446*4882a593Smuzhiyun #define RFH_RXF_DMA_RB_SIZE_MASK (0x000F0000) /* bits 16-19 */
447*4882a593Smuzhiyun #define RFH_RXF_DMA_RB_SIZE_POS 16
448*4882a593Smuzhiyun #define RFH_RXF_DMA_RB_SIZE_1K	(0x1 << RFH_RXF_DMA_RB_SIZE_POS)
449*4882a593Smuzhiyun #define RFH_RXF_DMA_RB_SIZE_2K	(0x2 << RFH_RXF_DMA_RB_SIZE_POS)
450*4882a593Smuzhiyun #define RFH_RXF_DMA_RB_SIZE_4K	(0x4 << RFH_RXF_DMA_RB_SIZE_POS)
451*4882a593Smuzhiyun #define RFH_RXF_DMA_RB_SIZE_8K	(0x8 << RFH_RXF_DMA_RB_SIZE_POS)
452*4882a593Smuzhiyun #define RFH_RXF_DMA_RB_SIZE_12K	(0x9 << RFH_RXF_DMA_RB_SIZE_POS)
453*4882a593Smuzhiyun #define RFH_RXF_DMA_RB_SIZE_16K	(0xA << RFH_RXF_DMA_RB_SIZE_POS)
454*4882a593Smuzhiyun #define RFH_RXF_DMA_RB_SIZE_20K	(0xB << RFH_RXF_DMA_RB_SIZE_POS)
455*4882a593Smuzhiyun #define RFH_RXF_DMA_RB_SIZE_24K	(0xC << RFH_RXF_DMA_RB_SIZE_POS)
456*4882a593Smuzhiyun #define RFH_RXF_DMA_RB_SIZE_28K	(0xD << RFH_RXF_DMA_RB_SIZE_POS)
457*4882a593Smuzhiyun #define RFH_RXF_DMA_RB_SIZE_32K	(0xE << RFH_RXF_DMA_RB_SIZE_POS)
458*4882a593Smuzhiyun /* RB Circular Buffer size:defines the table sizes in RBD units */
459*4882a593Smuzhiyun #define RFH_RXF_DMA_RBDCB_SIZE_MASK (0x00F00000) /* bits 20-23 */
460*4882a593Smuzhiyun #define RFH_RXF_DMA_RBDCB_SIZE_POS 20
461*4882a593Smuzhiyun #define RFH_RXF_DMA_RBDCB_SIZE_8	(0x3 << RFH_RXF_DMA_RBDCB_SIZE_POS)
462*4882a593Smuzhiyun #define RFH_RXF_DMA_RBDCB_SIZE_16	(0x4 << RFH_RXF_DMA_RBDCB_SIZE_POS)
463*4882a593Smuzhiyun #define RFH_RXF_DMA_RBDCB_SIZE_32	(0x5 << RFH_RXF_DMA_RBDCB_SIZE_POS)
464*4882a593Smuzhiyun #define RFH_RXF_DMA_RBDCB_SIZE_64	(0x7 << RFH_RXF_DMA_RBDCB_SIZE_POS)
465*4882a593Smuzhiyun #define RFH_RXF_DMA_RBDCB_SIZE_128	(0x7 << RFH_RXF_DMA_RBDCB_SIZE_POS)
466*4882a593Smuzhiyun #define RFH_RXF_DMA_RBDCB_SIZE_256	(0x8 << RFH_RXF_DMA_RBDCB_SIZE_POS)
467*4882a593Smuzhiyun #define RFH_RXF_DMA_RBDCB_SIZE_512	(0x9 << RFH_RXF_DMA_RBDCB_SIZE_POS)
468*4882a593Smuzhiyun #define RFH_RXF_DMA_RBDCB_SIZE_1024	(0xA << RFH_RXF_DMA_RBDCB_SIZE_POS)
469*4882a593Smuzhiyun #define RFH_RXF_DMA_RBDCB_SIZE_2048	(0xB << RFH_RXF_DMA_RBDCB_SIZE_POS)
470*4882a593Smuzhiyun #define RFH_RXF_DMA_MIN_RB_SIZE_MASK	(0x03000000) /* bit 24-25 */
471*4882a593Smuzhiyun #define RFH_RXF_DMA_MIN_RB_SIZE_POS	24
472*4882a593Smuzhiyun #define RFH_RXF_DMA_MIN_RB_4_8		(3 << RFH_RXF_DMA_MIN_RB_SIZE_POS)
473*4882a593Smuzhiyun #define RFH_RXF_DMA_DROP_TOO_LARGE_MASK	(0x04000000) /* bit 26 */
474*4882a593Smuzhiyun #define RFH_RXF_DMA_SINGLE_FRAME_MASK	(0x20000000) /* bit 29 */
475*4882a593Smuzhiyun #define RFH_DMA_EN_MASK			(0xC0000000) /* bits 30-31*/
476*4882a593Smuzhiyun #define RFH_DMA_EN_ENABLE_VAL		BIT(31)
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun #define RFH_RXF_RXQ_ACTIVE 0xA0980C
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun #define RFH_GEN_CFG	0xA09800
481*4882a593Smuzhiyun #define RFH_GEN_CFG_SERVICE_DMA_SNOOP	BIT(0)
482*4882a593Smuzhiyun #define RFH_GEN_CFG_RFH_DMA_SNOOP	BIT(1)
483*4882a593Smuzhiyun #define RFH_GEN_CFG_RB_CHUNK_SIZE	BIT(4)
484*4882a593Smuzhiyun #define RFH_GEN_CFG_RB_CHUNK_SIZE_128	1
485*4882a593Smuzhiyun #define RFH_GEN_CFG_RB_CHUNK_SIZE_64	0
486*4882a593Smuzhiyun /* the driver assumes everywhere that the default RXQ is 0 */
487*4882a593Smuzhiyun #define RFH_GEN_CFG_DEFAULT_RXQ_NUM	0xF00
488*4882a593Smuzhiyun #define RFH_GEN_CFG_VAL(_n, _v)		FIELD_PREP(RFH_GEN_CFG_ ## _n, _v)
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun /* end of 9000 rx series registers */
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun /* TFDB  Area - TFDs buffer table */
493*4882a593Smuzhiyun #define FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK      (0xFFFFFFFF)
494*4882a593Smuzhiyun #define FH_TFDIB_LOWER_BOUND       (FH_MEM_LOWER_BOUND + 0x900)
495*4882a593Smuzhiyun #define FH_TFDIB_UPPER_BOUND       (FH_MEM_LOWER_BOUND + 0x958)
496*4882a593Smuzhiyun #define FH_TFDIB_CTRL0_REG(_chnl)  (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl))
497*4882a593Smuzhiyun #define FH_TFDIB_CTRL1_REG(_chnl)  (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4)
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun /**
500*4882a593Smuzhiyun  * Transmit DMA Channel Control/Status Registers (TCSR)
501*4882a593Smuzhiyun  *
502*4882a593Smuzhiyun  * Device has one configuration register for each of 8 Tx DMA/FIFO channels
503*4882a593Smuzhiyun  * supported in hardware (don't confuse these with the 16 Tx queues in DRAM,
504*4882a593Smuzhiyun  * which feed the DMA/FIFO channels); config regs are separated by 0x20 bytes.
505*4882a593Smuzhiyun  *
506*4882a593Smuzhiyun  * To use a Tx DMA channel, driver must initialize its
507*4882a593Smuzhiyun  * FH_TCSR_CHNL_TX_CONFIG_REG(chnl) with:
508*4882a593Smuzhiyun  *
509*4882a593Smuzhiyun  * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
510*4882a593Smuzhiyun  * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL
511*4882a593Smuzhiyun  *
512*4882a593Smuzhiyun  * All other bits should be 0.
513*4882a593Smuzhiyun  *
514*4882a593Smuzhiyun  * Bit fields:
515*4882a593Smuzhiyun  * 31-30: Tx DMA channel enable: '00' off/pause, '01' pause at end of frame,
516*4882a593Smuzhiyun  *        '10' operate normally
517*4882a593Smuzhiyun  * 29- 4: Reserved, set to "0"
518*4882a593Smuzhiyun  *     3: Enable internal DMA requests (1, normal operation), disable (0)
519*4882a593Smuzhiyun  *  2- 0: Reserved, set to "0"
520*4882a593Smuzhiyun  */
521*4882a593Smuzhiyun #define FH_TCSR_LOWER_BOUND  (FH_MEM_LOWER_BOUND + 0xD00)
522*4882a593Smuzhiyun #define FH_TCSR_UPPER_BOUND  (FH_MEM_LOWER_BOUND + 0xE60)
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun /* Find Control/Status reg for given Tx DMA/FIFO channel */
525*4882a593Smuzhiyun #define FH_TCSR_CHNL_NUM                            (8)
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun /* TCSR: tx_config register values */
528*4882a593Smuzhiyun #define FH_TCSR_CHNL_TX_CONFIG_REG(_chnl)	\
529*4882a593Smuzhiyun 		(FH_TCSR_LOWER_BOUND + 0x20 * (_chnl))
530*4882a593Smuzhiyun #define FH_TCSR_CHNL_TX_CREDIT_REG(_chnl)	\
531*4882a593Smuzhiyun 		(FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x4)
532*4882a593Smuzhiyun #define FH_TCSR_CHNL_TX_BUF_STS_REG(_chnl)	\
533*4882a593Smuzhiyun 		(FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x8)
534*4882a593Smuzhiyun 
535*4882a593Smuzhiyun #define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF		(0x00000000)
536*4882a593Smuzhiyun #define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRV		(0x00000001)
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun #define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE	(0x00000000)
539*4882a593Smuzhiyun #define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE	(0x00000008)
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun #define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_NOINT	(0x00000000)
542*4882a593Smuzhiyun #define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD	(0x00100000)
543*4882a593Smuzhiyun #define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD	(0x00200000)
544*4882a593Smuzhiyun 
545*4882a593Smuzhiyun #define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT	(0x00000000)
546*4882a593Smuzhiyun #define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_ENDTFD	(0x00400000)
547*4882a593Smuzhiyun #define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_IFTFD	(0x00800000)
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun #define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE	(0x00000000)
550*4882a593Smuzhiyun #define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF	(0x40000000)
551*4882a593Smuzhiyun #define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE	(0x80000000)
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun #define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_EMPTY	(0x00000000)
554*4882a593Smuzhiyun #define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_WAIT	(0x00002000)
555*4882a593Smuzhiyun #define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID	(0x00000003)
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun #define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM		(20)
558*4882a593Smuzhiyun #define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX		(12)
559*4882a593Smuzhiyun 
560*4882a593Smuzhiyun /**
561*4882a593Smuzhiyun  * Tx Shared Status Registers (TSSR)
562*4882a593Smuzhiyun  *
563*4882a593Smuzhiyun  * After stopping Tx DMA channel (writing 0 to
564*4882a593Smuzhiyun  * FH_TCSR_CHNL_TX_CONFIG_REG(chnl)), driver must poll
565*4882a593Smuzhiyun  * FH_TSSR_TX_STATUS_REG until selected Tx channel is idle
566*4882a593Smuzhiyun  * (channel's buffers empty | no pending requests).
567*4882a593Smuzhiyun  *
568*4882a593Smuzhiyun  * Bit fields:
569*4882a593Smuzhiyun  * 31-24:  1 = Channel buffers empty (channel 7:0)
570*4882a593Smuzhiyun  * 23-16:  1 = No pending requests (channel 7:0)
571*4882a593Smuzhiyun  */
572*4882a593Smuzhiyun #define FH_TSSR_LOWER_BOUND		(FH_MEM_LOWER_BOUND + 0xEA0)
573*4882a593Smuzhiyun #define FH_TSSR_UPPER_BOUND		(FH_MEM_LOWER_BOUND + 0xEC0)
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun #define FH_TSSR_TX_STATUS_REG		(FH_TSSR_LOWER_BOUND + 0x010)
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun /**
578*4882a593Smuzhiyun  * Bit fields for TSSR(Tx Shared Status & Control) error status register:
579*4882a593Smuzhiyun  * 31:  Indicates an address error when accessed to internal memory
580*4882a593Smuzhiyun  *	uCode/driver must write "1" in order to clear this flag
581*4882a593Smuzhiyun  * 30:  Indicates that Host did not send the expected number of dwords to FH
582*4882a593Smuzhiyun  *	uCode/driver must write "1" in order to clear this flag
583*4882a593Smuzhiyun  * 16-9:Each status bit is for one channel. Indicates that an (Error) ActDMA
584*4882a593Smuzhiyun  *	command was received from the scheduler while the TRB was already full
585*4882a593Smuzhiyun  *	with previous command
586*4882a593Smuzhiyun  *	uCode/driver must write "1" in order to clear this flag
587*4882a593Smuzhiyun  * 7-0: Each status bit indicates a channel's TxCredit error. When an error
588*4882a593Smuzhiyun  *	bit is set, it indicates that the FH has received a full indication
589*4882a593Smuzhiyun  *	from the RTC TxFIFO and the current value of the TxCredit counter was
590*4882a593Smuzhiyun  *	not equal to zero. This mean that the credit mechanism was not
591*4882a593Smuzhiyun  *	synchronized to the TxFIFO status
592*4882a593Smuzhiyun  *	uCode/driver must write "1" in order to clear this flag
593*4882a593Smuzhiyun  */
594*4882a593Smuzhiyun #define FH_TSSR_TX_ERROR_REG		(FH_TSSR_LOWER_BOUND + 0x018)
595*4882a593Smuzhiyun #define FH_TSSR_TX_MSG_CONFIG_REG	(FH_TSSR_LOWER_BOUND + 0x008)
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun #define FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) ((1 << (_chnl)) << 16)
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun /* Tx service channels */
600*4882a593Smuzhiyun #define FH_SRVC_CHNL		(9)
601*4882a593Smuzhiyun #define FH_SRVC_LOWER_BOUND	(FH_MEM_LOWER_BOUND + 0x9C8)
602*4882a593Smuzhiyun #define FH_SRVC_UPPER_BOUND	(FH_MEM_LOWER_BOUND + 0x9D0)
603*4882a593Smuzhiyun #define FH_SRVC_CHNL_SRAM_ADDR_REG(_chnl) \
604*4882a593Smuzhiyun 		(FH_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4)
605*4882a593Smuzhiyun 
606*4882a593Smuzhiyun #define FH_TX_CHICKEN_BITS_REG	(FH_MEM_LOWER_BOUND + 0xE98)
607*4882a593Smuzhiyun #define FH_TX_TRB_REG(_chan)	(FH_MEM_LOWER_BOUND + 0x958 + (_chan) * 4)
608*4882a593Smuzhiyun 
609*4882a593Smuzhiyun /* Instruct FH to increment the retry count of a packet when
610*4882a593Smuzhiyun  * it is brought from the memory to TX-FIFO
611*4882a593Smuzhiyun  */
612*4882a593Smuzhiyun #define FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN	(0x00000002)
613*4882a593Smuzhiyun 
614*4882a593Smuzhiyun #define RX_POOL_SIZE(rbds)	((rbds) - 1 +	\
615*4882a593Smuzhiyun 				 IWL_MAX_RX_HW_QUEUES *	\
616*4882a593Smuzhiyun 				 (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC))
617*4882a593Smuzhiyun /* cb size is the exponent */
618*4882a593Smuzhiyun #define RX_QUEUE_CB_SIZE(x)	ilog2(x)
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun #define RX_QUEUE_SIZE                         256
621*4882a593Smuzhiyun #define RX_QUEUE_MASK                         255
622*4882a593Smuzhiyun #define RX_QUEUE_SIZE_LOG                     8
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun /**
625*4882a593Smuzhiyun  * struct iwl_rb_status - reserve buffer status
626*4882a593Smuzhiyun  * 	host memory mapped FH registers
627*4882a593Smuzhiyun  * @closed_rb_num [0:11] - Indicates the index of the RB which was closed
628*4882a593Smuzhiyun  * @closed_fr_num [0:11] - Indicates the index of the RX Frame which was closed
629*4882a593Smuzhiyun  * @finished_rb_num [0:11] - Indicates the index of the current RB
630*4882a593Smuzhiyun  * 	in which the last frame was written to
631*4882a593Smuzhiyun  * @finished_fr_num [0:11] - Indicates the index of the RX Frame
632*4882a593Smuzhiyun  * 	which was transferred
633*4882a593Smuzhiyun  */
634*4882a593Smuzhiyun struct iwl_rb_status {
635*4882a593Smuzhiyun 	__le16 closed_rb_num;
636*4882a593Smuzhiyun 	__le16 closed_fr_num;
637*4882a593Smuzhiyun 	__le16 finished_rb_num;
638*4882a593Smuzhiyun 	__le16 finished_fr_nam;
639*4882a593Smuzhiyun 	__le32 __unused;
640*4882a593Smuzhiyun } __packed;
641*4882a593Smuzhiyun 
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun #define TFD_QUEUE_SIZE_MAX      (256)
644*4882a593Smuzhiyun #define TFD_QUEUE_SIZE_MAX_GEN3 (65536)
645*4882a593Smuzhiyun /* cb size is the exponent - 3 */
646*4882a593Smuzhiyun #define TFD_QUEUE_CB_SIZE(x)	(ilog2(x) - 3)
647*4882a593Smuzhiyun #define TFD_QUEUE_SIZE_BC_DUP	(64)
648*4882a593Smuzhiyun #define TFD_QUEUE_BC_SIZE	(TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
649*4882a593Smuzhiyun #define TFD_QUEUE_BC_SIZE_GEN3	1024
650*4882a593Smuzhiyun #define IWL_TX_DMA_MASK        DMA_BIT_MASK(36)
651*4882a593Smuzhiyun #define IWL_NUM_OF_TBS		20
652*4882a593Smuzhiyun #define IWL_TFH_NUM_TBS		25
653*4882a593Smuzhiyun 
iwl_get_dma_hi_addr(dma_addr_t addr)654*4882a593Smuzhiyun static inline u8 iwl_get_dma_hi_addr(dma_addr_t addr)
655*4882a593Smuzhiyun {
656*4882a593Smuzhiyun 	return (sizeof(addr) > sizeof(u32) ? upper_32_bits(addr) : 0) & 0xF;
657*4882a593Smuzhiyun }
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun /**
660*4882a593Smuzhiyun  * enum iwl_tfd_tb_hi_n_len - TB hi_n_len bits
661*4882a593Smuzhiyun  * @TB_HI_N_LEN_ADDR_HI_MSK: high 4 bits (to make it 36) of DMA address
662*4882a593Smuzhiyun  * @TB_HI_N_LEN_LEN_MSK: length of the TB
663*4882a593Smuzhiyun  */
664*4882a593Smuzhiyun enum iwl_tfd_tb_hi_n_len {
665*4882a593Smuzhiyun 	TB_HI_N_LEN_ADDR_HI_MSK	= 0xf,
666*4882a593Smuzhiyun 	TB_HI_N_LEN_LEN_MSK	= 0xfff0,
667*4882a593Smuzhiyun };
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun /**
670*4882a593Smuzhiyun  * struct iwl_tfd_tb transmit buffer descriptor within transmit frame descriptor
671*4882a593Smuzhiyun  *
672*4882a593Smuzhiyun  * This structure contains dma address and length of transmission address
673*4882a593Smuzhiyun  *
674*4882a593Smuzhiyun  * @lo: low [31:0] portion of the dma address of TX buffer
675*4882a593Smuzhiyun  * 	every even is unaligned on 16 bit boundary
676*4882a593Smuzhiyun  * @hi_n_len: &enum iwl_tfd_tb_hi_n_len
677*4882a593Smuzhiyun  */
678*4882a593Smuzhiyun struct iwl_tfd_tb {
679*4882a593Smuzhiyun 	__le32 lo;
680*4882a593Smuzhiyun 	__le16 hi_n_len;
681*4882a593Smuzhiyun } __packed;
682*4882a593Smuzhiyun 
683*4882a593Smuzhiyun /**
684*4882a593Smuzhiyun  * struct iwl_tfh_tb transmit buffer descriptor within transmit frame descriptor
685*4882a593Smuzhiyun  *
686*4882a593Smuzhiyun  * This structure contains dma address and length of transmission address
687*4882a593Smuzhiyun  *
688*4882a593Smuzhiyun  * @tb_len length of the tx buffer
689*4882a593Smuzhiyun  * @addr 64 bits dma address
690*4882a593Smuzhiyun  */
691*4882a593Smuzhiyun struct iwl_tfh_tb {
692*4882a593Smuzhiyun 	__le16 tb_len;
693*4882a593Smuzhiyun 	__le64 addr;
694*4882a593Smuzhiyun } __packed;
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun /**
697*4882a593Smuzhiyun  * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM.
698*4882a593Smuzhiyun  * Both driver and device share these circular buffers, each of which must be
699*4882a593Smuzhiyun  * contiguous 256 TFDs.
700*4882a593Smuzhiyun  * For pre 22000 HW it is 256 x 128 bytes-per-TFD = 32 KBytes
701*4882a593Smuzhiyun  * For 22000 HW and on it is 256 x 256 bytes-per-TFD = 65 KBytes
702*4882a593Smuzhiyun  *
703*4882a593Smuzhiyun  * Driver must indicate the physical address of the base of each
704*4882a593Smuzhiyun  * circular buffer via the FH_MEM_CBBC_QUEUE registers.
705*4882a593Smuzhiyun  *
706*4882a593Smuzhiyun  * Each TFD contains pointer/size information for up to 20 / 25 data buffers
707*4882a593Smuzhiyun  * in host DRAM.  These buffers collectively contain the (one) frame described
708*4882a593Smuzhiyun  * by the TFD.  Each buffer must be a single contiguous block of memory within
709*4882a593Smuzhiyun  * itself, but buffers may be scattered in host DRAM.  Each buffer has max size
710*4882a593Smuzhiyun  * of (4K - 4).  The concatenates all of a TFD's buffers into a single
711*4882a593Smuzhiyun  * Tx frame, up to 8 KBytes in size.
712*4882a593Smuzhiyun  *
713*4882a593Smuzhiyun  * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx.
714*4882a593Smuzhiyun  */
715*4882a593Smuzhiyun 
716*4882a593Smuzhiyun /**
717*4882a593Smuzhiyun  * struct iwl_tfd - Transmit Frame Descriptor (TFD)
718*4882a593Smuzhiyun  * @ __reserved1[3] reserved
719*4882a593Smuzhiyun  * @ num_tbs 0-4 number of active tbs
720*4882a593Smuzhiyun  *	     5   reserved
721*4882a593Smuzhiyun  *	     6-7 padding (not used)
722*4882a593Smuzhiyun  * @ tbs[20]	transmit frame buffer descriptors
723*4882a593Smuzhiyun  * @ __pad	padding
724*4882a593Smuzhiyun  */
725*4882a593Smuzhiyun struct iwl_tfd {
726*4882a593Smuzhiyun 	u8 __reserved1[3];
727*4882a593Smuzhiyun 	u8 num_tbs;
728*4882a593Smuzhiyun 	struct iwl_tfd_tb tbs[IWL_NUM_OF_TBS];
729*4882a593Smuzhiyun 	__le32 __pad;
730*4882a593Smuzhiyun } __packed;
731*4882a593Smuzhiyun 
732*4882a593Smuzhiyun /**
733*4882a593Smuzhiyun  * struct iwl_tfh_tfd - Transmit Frame Descriptor (TFD)
734*4882a593Smuzhiyun  * @ num_tbs 0-4 number of active tbs
735*4882a593Smuzhiyun  *	     5 -15   reserved
736*4882a593Smuzhiyun  * @ tbs[25]	transmit frame buffer descriptors
737*4882a593Smuzhiyun  * @ __pad	padding
738*4882a593Smuzhiyun  */
739*4882a593Smuzhiyun struct iwl_tfh_tfd {
740*4882a593Smuzhiyun 	__le16 num_tbs;
741*4882a593Smuzhiyun 	struct iwl_tfh_tb tbs[IWL_TFH_NUM_TBS];
742*4882a593Smuzhiyun 	__le32 __pad;
743*4882a593Smuzhiyun } __packed;
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun /* Keep Warm Size */
746*4882a593Smuzhiyun #define IWL_KW_SIZE 0x1000	/* 4k */
747*4882a593Smuzhiyun 
748*4882a593Smuzhiyun /* Fixed (non-configurable) rx data from phy */
749*4882a593Smuzhiyun 
750*4882a593Smuzhiyun /**
751*4882a593Smuzhiyun  * struct iwlagn_schedq_bc_tbl scheduler byte count table
752*4882a593Smuzhiyun  *	base physical address provided by SCD_DRAM_BASE_ADDR
753*4882a593Smuzhiyun  * For devices up to 22000:
754*4882a593Smuzhiyun  * @tfd_offset  0-12 - tx command byte count
755*4882a593Smuzhiyun  *		12-16 - station index
756*4882a593Smuzhiyun  * For 22000:
757*4882a593Smuzhiyun  * @tfd_offset  0-12 - tx command byte count
758*4882a593Smuzhiyun  *		12-13 - number of 64 byte chunks
759*4882a593Smuzhiyun  *		14-16 - reserved
760*4882a593Smuzhiyun  */
761*4882a593Smuzhiyun struct iwlagn_scd_bc_tbl {
762*4882a593Smuzhiyun 	__le16 tfd_offset[TFD_QUEUE_BC_SIZE];
763*4882a593Smuzhiyun } __packed;
764*4882a593Smuzhiyun 
765*4882a593Smuzhiyun /**
766*4882a593Smuzhiyun  * struct iwl_gen3_bc_tbl scheduler byte count table gen3
767*4882a593Smuzhiyun  * For AX210 and on:
768*4882a593Smuzhiyun  * @tfd_offset: 0-12 - tx command byte count
769*4882a593Smuzhiyun  *		12-13 - number of 64 byte chunks
770*4882a593Smuzhiyun  *		14-16 - reserved
771*4882a593Smuzhiyun  */
772*4882a593Smuzhiyun struct iwl_gen3_bc_tbl {
773*4882a593Smuzhiyun 	__le16 tfd_offset[TFD_QUEUE_BC_SIZE_GEN3];
774*4882a593Smuzhiyun } __packed;
775*4882a593Smuzhiyun 
776*4882a593Smuzhiyun #endif /* !__iwl_fh_h__ */
777