1*4882a593Smuzhiyun /* SPDX-License-Identifier: ISC */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2004-2011 Atheros Communications Inc.
4*4882a593Smuzhiyun * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
5*4882a593Smuzhiyun * Copyright (c) 2016-2017 Erik Stromdahl <erik.stromdahl@gmail.com>
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #ifndef _SDIO_H_
9*4882a593Smuzhiyun #define _SDIO_H_
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #define ATH10K_HIF_MBOX_BLOCK_SIZE 256
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #define ATH10K_SDIO_MAX_BUFFER_SIZE 4096 /*Unsure of this constant*/
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun /* Mailbox address in SDIO address space */
16*4882a593Smuzhiyun #define ATH10K_HIF_MBOX_BASE_ADDR 0x1000
17*4882a593Smuzhiyun #define ATH10K_HIF_MBOX_WIDTH 0x800
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #define ATH10K_HIF_MBOX_TOT_WIDTH \
20*4882a593Smuzhiyun (ATH10K_HIF_MBOX_NUM_MAX * ATH10K_HIF_MBOX_WIDTH)
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #define ATH10K_HIF_MBOX0_EXT_BASE_ADDR 0x5000
23*4882a593Smuzhiyun #define ATH10K_HIF_MBOX0_EXT_WIDTH (36 * 1024)
24*4882a593Smuzhiyun #define ATH10K_HIF_MBOX0_EXT_WIDTH_ROME_2_0 (56 * 1024)
25*4882a593Smuzhiyun #define ATH10K_HIF_MBOX1_EXT_WIDTH (36 * 1024)
26*4882a593Smuzhiyun #define ATH10K_HIF_MBOX_DUMMY_SPACE_SIZE (2 * 1024)
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #define ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH \
29*4882a593Smuzhiyun (ATH10K_SDIO_MAX_BUFFER_SIZE - sizeof(struct ath10k_htc_hdr))
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun #define ATH10K_HIF_MBOX_NUM_MAX 4
32*4882a593Smuzhiyun #define ATH10K_SDIO_BUS_REQUEST_MAX_NUM 1024
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun #define ATH10K_SDIO_HIF_COMMUNICATION_TIMEOUT_HZ (100 * HZ)
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun /* HTC runs over mailbox 0 */
37*4882a593Smuzhiyun #define ATH10K_HTC_MAILBOX 0
38*4882a593Smuzhiyun #define ATH10K_HTC_MAILBOX_MASK BIT(ATH10K_HTC_MAILBOX)
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun /* GMBOX addresses */
41*4882a593Smuzhiyun #define ATH10K_HIF_GMBOX_BASE_ADDR 0x7000
42*4882a593Smuzhiyun #define ATH10K_HIF_GMBOX_WIDTH 0x4000
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun /* Modified versions of the sdio.h macros.
45*4882a593Smuzhiyun * The macros in sdio.h can't be used easily with the FIELD_{PREP|GET}
46*4882a593Smuzhiyun * macros in bitfield.h, so we define our own macros here.
47*4882a593Smuzhiyun */
48*4882a593Smuzhiyun #define ATH10K_SDIO_DRIVE_DTSX_MASK \
49*4882a593Smuzhiyun (SDIO_DRIVE_DTSx_MASK << SDIO_DRIVE_DTSx_SHIFT)
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun #define ATH10K_SDIO_DRIVE_DTSX_TYPE_B 0
52*4882a593Smuzhiyun #define ATH10K_SDIO_DRIVE_DTSX_TYPE_A 1
53*4882a593Smuzhiyun #define ATH10K_SDIO_DRIVE_DTSX_TYPE_C 2
54*4882a593Smuzhiyun #define ATH10K_SDIO_DRIVE_DTSX_TYPE_D 3
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun /* SDIO CCCR register definitions */
57*4882a593Smuzhiyun #define CCCR_SDIO_IRQ_MODE_REG 0xF0
58*4882a593Smuzhiyun #define CCCR_SDIO_IRQ_MODE_REG_SDIO3 0x16
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun #define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_ADDR 0xF2
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun #define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_A 0x02
63*4882a593Smuzhiyun #define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_C 0x04
64*4882a593Smuzhiyun #define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_D 0x08
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun #define CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS 0xF0
67*4882a593Smuzhiyun #define CCCR_SDIO_ASYNC_INT_DELAY_MASK 0xC0
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun /* mode to enable special 4-bit interrupt assertion without clock */
70*4882a593Smuzhiyun #define SDIO_IRQ_MODE_ASYNC_4BIT_IRQ BIT(0)
71*4882a593Smuzhiyun #define SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_SDIO3 BIT(1)
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun #define ATH10K_SDIO_TARGET_DEBUG_INTR_MASK 0x01
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun /* The theoretical maximum number of RX messages that can be fetched
76*4882a593Smuzhiyun * from the mbox interrupt handler in one loop is derived in the following
77*4882a593Smuzhiyun * way:
78*4882a593Smuzhiyun *
79*4882a593Smuzhiyun * Let's assume that each packet in a bundle of the maximum bundle size
80*4882a593Smuzhiyun * (HTC_HOST_MAX_MSG_PER_RX_BUNDLE) has the HTC header bundle count set
81*4882a593Smuzhiyun * to the maximum value (HTC_HOST_MAX_MSG_PER_RX_BUNDLE).
82*4882a593Smuzhiyun *
83*4882a593Smuzhiyun * in this case the driver must allocate
84*4882a593Smuzhiyun * (HTC_HOST_MAX_MSG_PER_RX_BUNDLE * 2) skb's.
85*4882a593Smuzhiyun */
86*4882a593Smuzhiyun #define ATH10K_SDIO_MAX_RX_MSGS \
87*4882a593Smuzhiyun (HTC_HOST_MAX_MSG_PER_RX_BUNDLE * 2)
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun #define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL 0x00000868u
90*4882a593Smuzhiyun #define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF 0xFFFEFFFF
91*4882a593Smuzhiyun #define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_ON 0x10000
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun enum sdio_mbox_state {
94*4882a593Smuzhiyun SDIO_MBOX_UNKNOWN_STATE = 0,
95*4882a593Smuzhiyun SDIO_MBOX_REQUEST_TO_SLEEP_STATE = 1,
96*4882a593Smuzhiyun SDIO_MBOX_SLEEP_STATE = 2,
97*4882a593Smuzhiyun SDIO_MBOX_AWAKE_STATE = 3,
98*4882a593Smuzhiyun };
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun #define ATH10K_CIS_READ_WAIT_4_RTC_CYCLE_IN_US 125
101*4882a593Smuzhiyun #define ATH10K_CIS_RTC_STATE_ADDR 0x1138
102*4882a593Smuzhiyun #define ATH10K_CIS_RTC_STATE_ON 0x01
103*4882a593Smuzhiyun #define ATH10K_CIS_XTAL_SETTLE_DURATION_IN_US 1500
104*4882a593Smuzhiyun #define ATH10K_CIS_READ_RETRY 10
105*4882a593Smuzhiyun #define ATH10K_MIN_SLEEP_INACTIVITY_TIME_MS 50
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun /* TODO: remove this and use skb->cb instead, much cleaner approach */
108*4882a593Smuzhiyun struct ath10k_sdio_bus_request {
109*4882a593Smuzhiyun struct list_head list;
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun /* sdio address */
112*4882a593Smuzhiyun u32 address;
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun struct sk_buff *skb;
115*4882a593Smuzhiyun enum ath10k_htc_ep_id eid;
116*4882a593Smuzhiyun int status;
117*4882a593Smuzhiyun /* Specifies if the current request is an HTC message.
118*4882a593Smuzhiyun * If not, the eid is not applicable an the TX completion handler
119*4882a593Smuzhiyun * associated with the endpoint will not be invoked.
120*4882a593Smuzhiyun */
121*4882a593Smuzhiyun bool htc_msg;
122*4882a593Smuzhiyun /* Completion that (if set) will be invoked for non HTC requests
123*4882a593Smuzhiyun * (htc_msg == false) when the request has been processed.
124*4882a593Smuzhiyun */
125*4882a593Smuzhiyun struct completion *comp;
126*4882a593Smuzhiyun };
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun struct ath10k_sdio_rx_data {
129*4882a593Smuzhiyun struct sk_buff *skb;
130*4882a593Smuzhiyun size_t alloc_len;
131*4882a593Smuzhiyun size_t act_len;
132*4882a593Smuzhiyun enum ath10k_htc_ep_id eid;
133*4882a593Smuzhiyun bool part_of_bundle;
134*4882a593Smuzhiyun bool last_in_bundle;
135*4882a593Smuzhiyun bool trailer_only;
136*4882a593Smuzhiyun };
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun struct ath10k_sdio_irq_proc_regs {
139*4882a593Smuzhiyun u8 host_int_status;
140*4882a593Smuzhiyun u8 cpu_int_status;
141*4882a593Smuzhiyun u8 error_int_status;
142*4882a593Smuzhiyun u8 counter_int_status;
143*4882a593Smuzhiyun u8 mbox_frame;
144*4882a593Smuzhiyun u8 rx_lookahead_valid;
145*4882a593Smuzhiyun u8 host_int_status2;
146*4882a593Smuzhiyun u8 gmbox_rx_avail;
147*4882a593Smuzhiyun __le32 rx_lookahead[2 * ATH10K_HIF_MBOX_NUM_MAX];
148*4882a593Smuzhiyun __le32 int_status_enable;
149*4882a593Smuzhiyun };
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun struct ath10k_sdio_irq_enable_regs {
152*4882a593Smuzhiyun u8 int_status_en;
153*4882a593Smuzhiyun u8 cpu_int_status_en;
154*4882a593Smuzhiyun u8 err_int_status_en;
155*4882a593Smuzhiyun u8 cntr_int_status_en;
156*4882a593Smuzhiyun };
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun struct ath10k_sdio_irq_data {
159*4882a593Smuzhiyun /* protects irq_proc_reg and irq_en_reg below.
160*4882a593Smuzhiyun * We use a mutex here and not a spinlock since we will have the
161*4882a593Smuzhiyun * mutex locked while calling the sdio_memcpy_ functions.
162*4882a593Smuzhiyun * These function require non atomic context, and hence, spinlocks
163*4882a593Smuzhiyun * can be held while calling these functions.
164*4882a593Smuzhiyun */
165*4882a593Smuzhiyun struct mutex mtx;
166*4882a593Smuzhiyun struct ath10k_sdio_irq_proc_regs *irq_proc_reg;
167*4882a593Smuzhiyun struct ath10k_sdio_irq_enable_regs *irq_en_reg;
168*4882a593Smuzhiyun };
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun struct ath10k_mbox_ext_info {
171*4882a593Smuzhiyun u32 htc_ext_addr;
172*4882a593Smuzhiyun u32 htc_ext_sz;
173*4882a593Smuzhiyun };
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun struct ath10k_mbox_info {
176*4882a593Smuzhiyun u32 htc_addr;
177*4882a593Smuzhiyun struct ath10k_mbox_ext_info ext_info[2];
178*4882a593Smuzhiyun u32 block_size;
179*4882a593Smuzhiyun u32 block_mask;
180*4882a593Smuzhiyun u32 gmbox_addr;
181*4882a593Smuzhiyun u32 gmbox_sz;
182*4882a593Smuzhiyun };
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun struct ath10k_sdio {
185*4882a593Smuzhiyun struct sdio_func *func;
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun struct ath10k_mbox_info mbox_info;
188*4882a593Smuzhiyun bool swap_mbox;
189*4882a593Smuzhiyun u32 mbox_addr[ATH10K_HTC_EP_COUNT];
190*4882a593Smuzhiyun u32 mbox_size[ATH10K_HTC_EP_COUNT];
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun /* available bus requests */
193*4882a593Smuzhiyun struct ath10k_sdio_bus_request bus_req[ATH10K_SDIO_BUS_REQUEST_MAX_NUM];
194*4882a593Smuzhiyun /* free list of bus requests */
195*4882a593Smuzhiyun struct list_head bus_req_freeq;
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun struct sk_buff_head rx_head;
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun /* protects access to bus_req_freeq */
200*4882a593Smuzhiyun spinlock_t lock;
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun struct ath10k_sdio_rx_data rx_pkts[ATH10K_SDIO_MAX_RX_MSGS];
203*4882a593Smuzhiyun size_t n_rx_pkts;
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun struct ath10k *ar;
206*4882a593Smuzhiyun struct ath10k_sdio_irq_data irq_data;
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun /* temporary buffer for sdio read.
209*4882a593Smuzhiyun * It is allocated when probe, and used for receive bundled packets,
210*4882a593Smuzhiyun * the read for bundled packets is not parallel, so it does not need
211*4882a593Smuzhiyun * protected.
212*4882a593Smuzhiyun */
213*4882a593Smuzhiyun u8 *vsg_buffer;
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun /* temporary buffer for BMI requests */
216*4882a593Smuzhiyun u8 *bmi_buf;
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun bool is_disabled;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun struct workqueue_struct *workqueue;
221*4882a593Smuzhiyun struct work_struct wr_async_work;
222*4882a593Smuzhiyun struct list_head wr_asyncq;
223*4882a593Smuzhiyun /* protects access to wr_asyncq */
224*4882a593Smuzhiyun spinlock_t wr_async_lock;
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun struct work_struct async_work_rx;
227*4882a593Smuzhiyun struct timer_list sleep_timer;
228*4882a593Smuzhiyun enum sdio_mbox_state mbox_state;
229*4882a593Smuzhiyun };
230*4882a593Smuzhiyun
ath10k_sdio_priv(struct ath10k * ar)231*4882a593Smuzhiyun static inline struct ath10k_sdio *ath10k_sdio_priv(struct ath10k *ar)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun return (struct ath10k_sdio *)ar->drv_priv;
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun #endif
237