xref: /OK3568_Linux_fs/kernel/drivers/net/wireless/rockchip_wlan/infineon/bcmdhd/bcmsdh_sdmmc.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /*
2  * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
3  *
4  * Portions of this code are copyright (c) 2022 Cypress Semiconductor Corporation
5  *
6  * Copyright (C) 1999-2017, Broadcom Corporation
7  *
8  *      Unless you and Broadcom execute a separate written software license
9  * agreement governing use of this software, this software is licensed to you
10  * under the terms of the GNU General Public License version 2 (the "GPL"),
11  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
12  * following added to such license:
13  *
14  *      As a special exception, the copyright holders of this software give you
15  * permission to link this software with independent modules, and to copy and
16  * distribute the resulting executable under terms of your choice, provided that
17  * you also meet, for each linked independent module, the terms and conditions of
18  * the license of that module.  An independent module is a module which is not
19  * derived from this software.  The special exception does not apply to any
20  * modifications of the software.
21  *
22  *      Notwithstanding the above, under no circumstances may you combine this
23  * software in any way with any other Broadcom software provided under a license
24  * other than the GPL, without Broadcom's express prior written consent.
25  *
26  *
27  * <<Broadcom-WL-IPTag/Proprietary,Open:>>
28  *
29  * $Id: bcmsdh_sdmmc.c 690631 2017-03-17 04:27:33Z $
30  */
31 #include <typedefs.h>
32 
33 #include <bcmdevs.h>
34 #include <bcmendian.h>
35 #include <bcmutils.h>
36 #include <osl.h>
37 #include <sdio.h>	/* SDIO Device and Protocol Specs */
38 #include <sdioh.h>	/* Standard SDIO Host Controller Specification */
39 #include <bcmsdbus.h>	/* bcmsdh to/from specific controller APIs */
40 #include <sdiovar.h>	/* ioctl/iovars */
41 
42 #include <linux/mmc/core.h>
43 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 0, 8))
44 #include <drivers/mmc/core/host.h>
45 #else
46 #include <linux/mmc/host.h>
47 #endif /* (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 0, 0)) */
48 #include <linux/mmc/card.h>
49 #include <linux/mmc/sdio_func.h>
50 #include <linux/mmc/sdio_ids.h>
51 
52 #include <dngl_stats.h>
53 #include <dhd.h>
54 #include <dhd_dbg.h>
55 
56 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
57 #include <linux/suspend.h>
58 extern volatile bool dhd_mmc_suspend;
59 #endif // endif
60 #include "bcmsdh_sdmmc.h"
61 
62 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 0, 0)) || (LINUX_VERSION_CODE >= \
63 	KERNEL_VERSION(4, 4, 0))
64 static inline void
mmc_host_clk_hold(struct mmc_host * host)65 mmc_host_clk_hold(struct mmc_host *host)
66 {
67 	BCM_REFERENCE(host);
68 	return;
69 }
70 
71 static inline void
mmc_host_clk_release(struct mmc_host * host)72 mmc_host_clk_release(struct mmc_host *host)
73 {
74 	BCM_REFERENCE(host);
75 	return;
76 }
77 
78 static inline unsigned int
mmc_host_clk_rate(struct mmc_host * host)79 mmc_host_clk_rate(struct mmc_host *host)
80 {
81 	return host->ios.clock;
82 }
83 #endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(3, 0, 0) */
84 
85 #ifndef BCMSDH_MODULE
86 extern int sdio_function_init(void);
87 extern void sdio_function_cleanup(void);
88 #endif /* BCMSDH_MODULE */
89 
90 #if !defined(OOB_INTR_ONLY)
91 static void IRQHandler(struct sdio_func *func);
92 static void IRQHandlerF2(struct sdio_func *func);
93 #endif /* !defined(OOB_INTR_ONLY) */
94 static int sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr);
95 #if defined(OEM_ANDROID) && !defined(CONFIG_SOC_S5E5515)
sdio_reset_comm(struct mmc_card * card)96 static int sdio_reset_comm(struct mmc_card *card)
97 {
98 	return 0;
99 }
100 #else
101 extern int sdio_reset_comm(struct mmc_card *card);
102 #endif /* OEM_ANDROID */
103 
104 #define DEFAULT_SDIO_F2_BLKSIZE		512
105 #ifndef CUSTOM_SDIO_F2_BLKSIZE
106 #define CUSTOM_SDIO_F2_BLKSIZE		DEFAULT_SDIO_F2_BLKSIZE
107 #endif // endif
108 
109 #define DEFAULT_SDIO_F1_BLKSIZE		64
110 #ifndef CUSTOM_SDIO_F1_BLKSIZE
111 #define CUSTOM_SDIO_F1_BLKSIZE		DEFAULT_SDIO_F1_BLKSIZE
112 #endif // endif
113 
114 #define MAX_IO_RW_EXTENDED_BLK		511
115 
116 uint sd_sdmode = SDIOH_MODE_SD4;	/* Use SD4 mode by default */
117 uint sd_f2_blocksize = CUSTOM_SDIO_F2_BLKSIZE;
118 uint sd_f1_blocksize = CUSTOM_SDIO_F1_BLKSIZE;
119 
120 #if defined(BT_OVER_SDIO)
121 uint sd_f3_blocksize = 64;
122 #endif /* defined (BT_OVER_SDIO) */
123 
124 uint sd_divisor = 2;			/* Default 48MHz/2 = 24MHz */
125 
126 uint sd_power = 1;		/* Default to SD Slot powered ON */
127 uint sd_clock = 1;		/* Default to SD Clock turned ON */
128 uint sd_hiok = FALSE;	/* Don't use hi-speed mode by default */
129 uint sd_msglevel = SDH_ERROR_VAL;
130 uint sd_use_dma = TRUE;
131 
132 #ifndef CUSTOM_RXCHAIN
133 #define CUSTOM_RXCHAIN 0
134 #endif // endif
135 
136 DHD_PM_RESUME_WAIT_INIT(sdioh_request_byte_wait);
137 DHD_PM_RESUME_WAIT_INIT(sdioh_request_word_wait);
138 DHD_PM_RESUME_WAIT_INIT(sdioh_request_packet_wait);
139 DHD_PM_RESUME_WAIT_INIT(sdioh_request_buffer_wait);
140 
141 #if !defined(ARCH_DMA_MINALIGN)
142 #define ARCH_DMA_MINALIGN 128
143 #endif /* !defined(ARCH_DMA_MINALIGN) */
144 #define DMA_ALIGN_MASK	0x03
145 #define MMC_SDIO_ABORT_RETRY_LIMIT 5
146 
147 int sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data);
148 
149 #if defined(BT_OVER_SDIO)
150 extern
sdioh_sdmmc_card_enable_func_f3(sdioh_info_t * sd,struct sdio_func * func)151 void sdioh_sdmmc_card_enable_func_f3(sdioh_info_t *sd, struct sdio_func *func)
152 {
153 	sd->func[3] = func;
154 	sd_info(("%s sd->func[3] %p\n", __FUNCTION__, sd->func[3]));
155 }
156 #endif /* defined (BT_OVER_SDIO) */
157 
158 void  sdmmc_set_clock_rate(sdioh_info_t *sd, uint hz);
159 uint  sdmmc_get_clock_rate(sdioh_info_t *sd);
160 void  sdmmc_set_clock_divisor(sdioh_info_t *sd, uint sd_div);
161 
162 static int
sdioh_sdmmc_card_enablefuncs(sdioh_info_t * sd)163 sdioh_sdmmc_card_enablefuncs(sdioh_info_t *sd)
164 {
165 	int err_ret;
166 	uint32 fbraddr;
167 	uint8 func;
168 
169 	sd_trace(("%s\n", __FUNCTION__));
170 
171 	/* Get the Card's common CIS address */
172 	sd->com_cis_ptr = sdioh_sdmmc_get_cisaddr(sd, SDIOD_CCCR_CISPTR_0);
173 	sd->func_cis_ptr[0] = sd->com_cis_ptr;
174 	sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr));
175 
176 	/* Get the Card's function CIS (for each function) */
177 	for (fbraddr = SDIOD_FBR_STARTADDR, func = 1;
178 	     func <= sd->num_funcs; func++, fbraddr += SDIOD_FBR_SIZE) {
179 		sd->func_cis_ptr[func] = sdioh_sdmmc_get_cisaddr(sd, SDIOD_FBR_CISPTR_0 + fbraddr);
180 		sd_info(("%s: Function %d CIS Ptr = 0x%x\n",
181 		         __FUNCTION__, func, sd->func_cis_ptr[func]));
182 	}
183 
184 	sd->func_cis_ptr[0] = sd->com_cis_ptr;
185 	sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr));
186 
187 	/* Enable Function 1 */
188 	sdio_claim_host(sd->func[1]);
189 	err_ret = sdio_enable_func(sd->func[1]);
190 	sdio_release_host(sd->func[1]);
191 	if (err_ret) {
192 		sd_err(("bcmsdh_sdmmc: Failed to enable F1 Err: 0x%08x", err_ret));
193 	}
194 
195 	return FALSE;
196 }
197 
198 /*
199  *	Public entry points & extern's
200  */
201 extern sdioh_info_t *
sdioh_attach(osl_t * osh,struct sdio_func * func)202 sdioh_attach(osl_t *osh, struct sdio_func *func)
203 {
204 	sdioh_info_t *sd = NULL;
205 	int err_ret;
206 
207 	sd_trace(("%s\n", __FUNCTION__));
208 
209 	if (func == NULL) {
210 		sd_err(("%s: sdio function device is NULL\n", __FUNCTION__));
211 		return NULL;
212 	}
213 
214 	if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) {
215 		sd_err(("sdioh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh)));
216 		return NULL;
217 	}
218 	bzero((char *)sd, sizeof(sdioh_info_t));
219 	sd->osh = osh;
220 	sd->fake_func0.num = 0;
221 	sd->fake_func0.card = func->card;
222 	sd->func[0] = &sd->fake_func0;
223 	sd->func[1] = func->card->sdio_func[0];
224 	sd->func[2] = func->card->sdio_func[1];
225 
226 #if defined(BT_OVER_SDIO)
227 	sd->func[3] = NULL;
228 #endif /* defined (BT_OVER_SDIO) */
229 
230 	sd->num_funcs = 2;
231 	sd->sd_blockmode = TRUE;
232 	sd->use_client_ints = TRUE;
233 	sd->client_block_size[0] = 64;
234 	sd->use_rxchain = CUSTOM_RXCHAIN;
235 	if (sd->func[1] == NULL || sd->func[2] == NULL) {
236 		sd_err(("%s: func 1 or 2 is null \n", __FUNCTION__));
237 		goto fail;
238 	}
239 	sdio_set_drvdata(sd->func[1], sd);
240 
241 	sdio_claim_host(sd->func[1]);
242 	sd->client_block_size[1] = sd_f1_blocksize;
243 	err_ret = sdio_set_block_size(sd->func[1], sd_f1_blocksize);
244 	sdio_release_host(sd->func[1]);
245 	if (err_ret) {
246 		sd_err(("bcmsdh_sdmmc: Failed to set F1 blocksize(%d)\n", err_ret));
247 		goto fail;
248 	}
249 
250 	sdio_claim_host(sd->func[2]);
251 	sd->client_block_size[2] = sd_f2_blocksize;
252 	err_ret = sdio_set_block_size(sd->func[2], sd_f2_blocksize);
253 	sdio_release_host(sd->func[2]);
254 	if (err_ret) {
255 		sd_err(("bcmsdh_sdmmc: Failed to set F2 blocksize to %d(%d)\n",
256 			sd_f2_blocksize, err_ret));
257 		goto fail;
258 	}
259 
260 	sd->sd_clk_rate = sdmmc_get_clock_rate(sd);
261 	DHD_ERROR(("%s: sd clock rate = %u\n", __FUNCTION__, sd->sd_clk_rate));
262 
263 	sdioh_sdmmc_card_enablefuncs(sd);
264 
265 	sd_trace(("%s: Done\n", __FUNCTION__));
266 	return sd;
267 
268 fail:
269 	MFREE(sd->osh, sd, sizeof(sdioh_info_t));
270 	return NULL;
271 }
272 
273 extern SDIOH_API_RC
sdioh_detach(osl_t * osh,sdioh_info_t * sd)274 sdioh_detach(osl_t *osh, sdioh_info_t *sd)
275 {
276 	sd_trace(("%s\n", __FUNCTION__));
277 
278 	if (sd) {
279 
280 		/* Disable Function 2 */
281 		if (sd->func[2]) {
282 			sdio_claim_host(sd->func[2]);
283 			sdio_disable_func(sd->func[2]);
284 			sdio_release_host(sd->func[2]);
285 		}
286 
287 		/* Disable Function 1 */
288 		if (sd->func[1]) {
289 			sdio_claim_host(sd->func[1]);
290 			sdio_disable_func(sd->func[1]);
291 			sdio_release_host(sd->func[1]);
292 		}
293 
294 		sd->func[1] = NULL;
295 		sd->func[2] = NULL;
296 
297 		MFREE(sd->osh, sd, sizeof(sdioh_info_t));
298 	}
299 	return SDIOH_API_RC_SUCCESS;
300 }
301 
302 #if defined(OOB_INTR_ONLY) && defined(HW_OOB)
303 
304 extern SDIOH_API_RC
sdioh_enable_func_intr(sdioh_info_t * sd)305 sdioh_enable_func_intr(sdioh_info_t *sd)
306 {
307 	uint8 reg;
308 	int err;
309 
310 	if (sd->func[0] == NULL) {
311 		sd_err(("%s: function 0 pointer is NULL\n", __FUNCTION__));
312 		return SDIOH_API_RC_FAIL;
313 	}
314 
315 	sdio_claim_host(sd->func[0]);
316 	reg = sdio_readb(sd->func[0], SDIOD_CCCR_INTEN, &err);
317 	if (err) {
318 		sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
319 		sdio_release_host(sd->func[0]);
320 		return SDIOH_API_RC_FAIL;
321 	}
322 	/* Enable F1 and F2 interrupts, clear master enable */
323 	reg &= ~INTR_CTL_MASTER_EN;
324 	reg |= (INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN);
325 #if defined(BT_OVER_SDIO)
326 	reg |= (INTR_CTL_FUNC3_EN);
327 #endif /* defined (BT_OVER_SDIO) */
328 	sdio_writeb(sd->func[0], reg, SDIOD_CCCR_INTEN, &err);
329 	sdio_release_host(sd->func[0]);
330 
331 	if (err) {
332 		sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
333 		return SDIOH_API_RC_FAIL;
334 	}
335 
336 	return SDIOH_API_RC_SUCCESS;
337 }
338 
339 extern SDIOH_API_RC
sdioh_disable_func_intr(sdioh_info_t * sd)340 sdioh_disable_func_intr(sdioh_info_t *sd)
341 {
342 	uint8 reg;
343 	int err;
344 
345 	if (sd->func[0] == NULL) {
346 		sd_err(("%s: function 0 pointer is NULL\n", __FUNCTION__));
347 		return SDIOH_API_RC_FAIL;
348 	}
349 
350 	sdio_claim_host(sd->func[0]);
351 	reg = sdio_readb(sd->func[0], SDIOD_CCCR_INTEN, &err);
352 	if (err) {
353 		sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
354 		sdio_release_host(sd->func[0]);
355 		return SDIOH_API_RC_FAIL;
356 	}
357 	reg &= ~(INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN);
358 #if defined(BT_OVER_SDIO)
359 	reg &= ~INTR_CTL_FUNC3_EN;
360 #endif // endif
361 	/* Disable master interrupt with the last function interrupt */
362 	if (!(reg & 0xFE))
363 		reg = 0;
364 	sdio_writeb(sd->func[0], reg, SDIOD_CCCR_INTEN, &err);
365 	sdio_release_host(sd->func[0]);
366 
367 	if (err) {
368 		sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
369 		return SDIOH_API_RC_FAIL;
370 	}
371 
372 	return SDIOH_API_RC_SUCCESS;
373 }
374 #endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */
375 
376 /* Configure callback to client when we recieve client interrupt */
377 extern SDIOH_API_RC
sdioh_interrupt_register(sdioh_info_t * sd,sdioh_cb_fn_t fn,void * argh)378 sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh)
379 {
380 	sd_trace(("%s: Entering\n", __FUNCTION__));
381 	if (fn == NULL) {
382 		sd_err(("%s: interrupt handler is NULL, not registering\n", __FUNCTION__));
383 		return SDIOH_API_RC_FAIL;
384 	}
385 #if !defined(OOB_INTR_ONLY)
386 	sd->intr_handler = fn;
387 	sd->intr_handler_arg = argh;
388 	sd->intr_handler_valid = TRUE;
389 
390 	/* register and unmask irq */
391 	if (sd->func[2]) {
392 		sdio_claim_host(sd->func[2]);
393 		sdio_claim_irq(sd->func[2], IRQHandlerF2);
394 		sdio_release_host(sd->func[2]);
395 	}
396 
397 	if (sd->func[1]) {
398 		sdio_claim_host(sd->func[1]);
399 		sdio_claim_irq(sd->func[1], IRQHandler);
400 		sdio_release_host(sd->func[1]);
401 	}
402 #elif defined(HW_OOB)
403 	sdioh_enable_func_intr(sd);
404 #endif /* !defined(OOB_INTR_ONLY) */
405 
406 	return SDIOH_API_RC_SUCCESS;
407 }
408 
409 extern SDIOH_API_RC
sdioh_interrupt_deregister(sdioh_info_t * sd)410 sdioh_interrupt_deregister(sdioh_info_t *sd)
411 {
412 	sd_trace(("%s: Entering\n", __FUNCTION__));
413 
414 #if !defined(OOB_INTR_ONLY)
415 	if (sd->func[1]) {
416 		/* register and unmask irq */
417 		sdio_claim_host(sd->func[1]);
418 		sdio_release_irq(sd->func[1]);
419 		sdio_release_host(sd->func[1]);
420 	}
421 
422 	if (sd->func[2]) {
423 		/* Claim host controller F2 */
424 		sdio_claim_host(sd->func[2]);
425 		sdio_release_irq(sd->func[2]);
426 		/* Release host controller F2 */
427 		sdio_release_host(sd->func[2]);
428 	}
429 
430 	sd->intr_handler_valid = FALSE;
431 	sd->intr_handler = NULL;
432 	sd->intr_handler_arg = NULL;
433 #elif defined(HW_OOB)
434 	sdioh_disable_func_intr(sd);
435 #endif /* !defined(OOB_INTR_ONLY) */
436 	return SDIOH_API_RC_SUCCESS;
437 }
438 
439 extern SDIOH_API_RC
sdioh_interrupt_query(sdioh_info_t * sd,bool * onoff)440 sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff)
441 {
442 	sd_trace(("%s: Entering\n", __FUNCTION__));
443 	*onoff = sd->client_intr_enabled;
444 	return SDIOH_API_RC_SUCCESS;
445 }
446 
447 #if defined(DHD_DEBUG)
448 extern bool
sdioh_interrupt_pending(sdioh_info_t * sd)449 sdioh_interrupt_pending(sdioh_info_t *sd)
450 {
451 	return (0);
452 }
453 #endif // endif
454 
455 uint
sdioh_query_iofnum(sdioh_info_t * sd)456 sdioh_query_iofnum(sdioh_info_t *sd)
457 {
458 	return sd->num_funcs;
459 }
460 
461 /* IOVar table */
462 enum {
463 	IOV_MSGLEVEL = 1,
464 	IOV_BLOCKMODE,
465 	IOV_BLOCKSIZE,
466 	IOV_DMA,
467 	IOV_USEINTS,
468 	IOV_NUMINTS,
469 	IOV_NUMLOCALINTS,
470 	IOV_HOSTREG,
471 	IOV_DEVREG,
472 	IOV_DIVISOR,
473 	IOV_SDMODE,
474 	IOV_HISPEED,
475 	IOV_HCIREGS,
476 	IOV_POWER,
477 	IOV_CLOCK,
478 	IOV_RXCHAIN
479 };
480 
481 const bcm_iovar_t sdioh_iovars[] = {
482 	{"sd_msglevel", IOV_MSGLEVEL,	0, 0,	IOVT_UINT32,	0 },
483 	{"sd_blockmode", IOV_BLOCKMODE, 0, 0,	IOVT_BOOL,	0 },
484 	{"sd_blocksize", IOV_BLOCKSIZE, 0, 0,	IOVT_UINT32,	0 }, /* ((fn << 16) | size) */
485 	{"sd_dma",	IOV_DMA,	0, 0,	IOVT_BOOL,	0 },
486 	{"sd_ints",	IOV_USEINTS,	0, 0,	IOVT_BOOL,	0 },
487 	{"sd_numints",	IOV_NUMINTS,	0, 0,	IOVT_UINT32,	0 },
488 	{"sd_numlocalints", IOV_NUMLOCALINTS, 0, 0, IOVT_UINT32,	0 },
489 	{"sd_divisor",	IOV_DIVISOR,	0, 0,	IOVT_UINT32,	0 },
490 	{"sd_power",	IOV_POWER,	0, 0,	IOVT_UINT32,	0 },
491 	{"sd_clock",	IOV_CLOCK,	0, 0,	IOVT_UINT32,	0 },
492 	{"sd_mode",	IOV_SDMODE,	0, 0,	IOVT_UINT32,	100},
493 	{"sd_highspeed", IOV_HISPEED,	0, 0,	IOVT_UINT32,	0 },
494 	{"sd_rxchain",  IOV_RXCHAIN,    0, 0, 	IOVT_BOOL,	0 },
495 	{NULL, 0, 0, 0, 0, 0 }
496 };
497 
498 int
sdioh_iovar_op(sdioh_info_t * si,const char * name,void * params,int plen,void * arg,int len,bool set)499 sdioh_iovar_op(sdioh_info_t *si, const char *name,
500                            void *params, int plen, void *arg, int len, bool set)
501 {
502 	const bcm_iovar_t *vi = NULL;
503 	int bcmerror = 0;
504 	int val_size;
505 	int32 int_val = 0;
506 	bool bool_val;
507 	uint32 actionid;
508 
509 	ASSERT(name);
510 	ASSERT(len >= 0);
511 
512 	/* Get must have return space; Set does not take qualifiers */
513 	ASSERT(set || (arg && len));
514 	ASSERT(!set || (!params && !plen));
515 
516 	sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name));
517 
518 	if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) {
519 		bcmerror = BCME_UNSUPPORTED;
520 		goto exit;
521 	}
522 
523 	if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0)
524 		goto exit;
525 
526 	/* Set up params so get and set can share the convenience variables */
527 	if (params == NULL) {
528 		params = arg;
529 		plen = len;
530 	}
531 
532 	if (vi->type == IOVT_VOID)
533 		val_size = 0;
534 	else if (vi->type == IOVT_BUFFER)
535 		val_size = len;
536 	else
537 		val_size = sizeof(int);
538 
539 	if (plen >= (int)sizeof(int_val))
540 		bcopy(params, &int_val, sizeof(int_val));
541 
542 	bool_val = (int_val != 0) ? TRUE : FALSE;
543 	BCM_REFERENCE(bool_val);
544 
545 	actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
546 	switch (actionid) {
547 	case IOV_GVAL(IOV_MSGLEVEL):
548 		int_val = (int32)sd_msglevel;
549 		bcopy(&int_val, arg, val_size);
550 		break;
551 
552 	case IOV_SVAL(IOV_MSGLEVEL):
553 		sd_msglevel = int_val;
554 		break;
555 
556 	case IOV_GVAL(IOV_BLOCKMODE):
557 		int_val = (int32)si->sd_blockmode;
558 		bcopy(&int_val, arg, val_size);
559 		break;
560 
561 	case IOV_SVAL(IOV_BLOCKMODE):
562 		si->sd_blockmode = (bool)int_val;
563 		/* Haven't figured out how to make non-block mode with DMA */
564 		break;
565 
566 	case IOV_GVAL(IOV_BLOCKSIZE):
567 		if ((uint32)int_val > si->num_funcs) {
568 			bcmerror = BCME_BADARG;
569 			break;
570 		}
571 		int_val = (int32)si->client_block_size[int_val];
572 		bcopy(&int_val, arg, val_size);
573 		break;
574 
575 	case IOV_SVAL(IOV_BLOCKSIZE):
576 	{
577 		uint func = ((uint32)int_val >> 16);
578 		uint blksize = (uint16)int_val;
579 		uint maxsize;
580 
581 		if (func > si->num_funcs) {
582 			bcmerror = BCME_BADARG;
583 			break;
584 		}
585 
586 		switch (func) {
587 		case 0: maxsize = 32; break;
588 		case 1: maxsize = BLOCK_SIZE_4318; break;
589 		case 2: maxsize = BLOCK_SIZE_4328; break;
590 		default: maxsize = 0;
591 		}
592 		if (blksize > maxsize) {
593 			bcmerror = BCME_BADARG;
594 			break;
595 		}
596 		if (!blksize) {
597 			blksize = maxsize;
598 		}
599 
600 		/* Now set it */
601 		si->client_block_size[func] = blksize;
602 
603 #ifdef USE_DYNAMIC_F2_BLKSIZE
604 		if (si->func[func] == NULL) {
605 			sd_err(("%s: SDIO Device not present\n", __FUNCTION__));
606 			bcmerror = BCME_NORESOURCE;
607 			break;
608 		}
609 		sdio_claim_host(si->func[func]);
610 		bcmerror = sdio_set_block_size(si->func[func], blksize);
611 		if (bcmerror)
612 			sd_err(("%s: Failed to set F%d blocksize to %d(%d)\n",
613 				__FUNCTION__, func, blksize, bcmerror));
614 		sdio_release_host(si->func[func]);
615 #endif /* USE_DYNAMIC_F2_BLKSIZE */
616 		break;
617 	}
618 
619 	case IOV_GVAL(IOV_RXCHAIN):
620 		int_val = (int32)si->use_rxchain;
621 		bcopy(&int_val, arg, val_size);
622 		break;
623 
624 	case IOV_GVAL(IOV_DMA):
625 		int_val = (int32)si->sd_use_dma;
626 		bcopy(&int_val, arg, val_size);
627 		break;
628 
629 	case IOV_SVAL(IOV_DMA):
630 		si->sd_use_dma = (bool)int_val;
631 		break;
632 
633 	case IOV_GVAL(IOV_USEINTS):
634 		int_val = (int32)si->use_client_ints;
635 		bcopy(&int_val, arg, val_size);
636 		break;
637 
638 	case IOV_SVAL(IOV_USEINTS):
639 		si->use_client_ints = (bool)int_val;
640 		if (si->use_client_ints)
641 			si->intmask |= CLIENT_INTR;
642 		else
643 			si->intmask &= ~CLIENT_INTR;
644 
645 		break;
646 
647 	case IOV_GVAL(IOV_DIVISOR):
648 		int_val = (uint32)sd_divisor;
649 		bcopy(&int_val, arg, val_size);
650 		break;
651 
652 	case IOV_SVAL(IOV_DIVISOR):
653 		/* set the clock to divisor, if value is non-zero & power of 2 */
654 		if (int_val && !(int_val & (int_val - 1))) {
655 			sd_divisor = int_val;
656 			sdmmc_set_clock_divisor(si, sd_divisor);
657 		} else {
658 			DHD_ERROR(("%s: Invalid sd_divisor value, should be power of 2!\n",
659 				__FUNCTION__));
660 		}
661 		break;
662 
663 	case IOV_GVAL(IOV_POWER):
664 		int_val = (uint32)sd_power;
665 		bcopy(&int_val, arg, val_size);
666 		break;
667 
668 	case IOV_SVAL(IOV_POWER):
669 		sd_power = int_val;
670 		break;
671 
672 	case IOV_GVAL(IOV_CLOCK):
673 		int_val = (uint32)sd_clock;
674 		bcopy(&int_val, arg, val_size);
675 		break;
676 
677 	case IOV_SVAL(IOV_CLOCK):
678 		sd_clock = int_val;
679 		break;
680 
681 	case IOV_GVAL(IOV_SDMODE):
682 		int_val = (uint32)sd_sdmode;
683 		bcopy(&int_val, arg, val_size);
684 		break;
685 
686 	case IOV_SVAL(IOV_SDMODE):
687 		sd_sdmode = int_val;
688 		break;
689 
690 	case IOV_GVAL(IOV_HISPEED):
691 		int_val = (uint32)sd_hiok;
692 		bcopy(&int_val, arg, val_size);
693 		break;
694 
695 	case IOV_SVAL(IOV_HISPEED):
696 		sd_hiok = int_val;
697 		break;
698 
699 	case IOV_GVAL(IOV_NUMINTS):
700 		int_val = (int32)si->intrcount;
701 		bcopy(&int_val, arg, val_size);
702 		break;
703 
704 	case IOV_GVAL(IOV_NUMLOCALINTS):
705 		int_val = (int32)0;
706 		bcopy(&int_val, arg, val_size);
707 		break;
708 	default:
709 		bcmerror = BCME_UNSUPPORTED;
710 		break;
711 	}
712 exit:
713 
714 	return bcmerror;
715 }
716 
717 #if defined(OOB_INTR_ONLY) && defined(HW_OOB)
718 
719 SDIOH_API_RC
sdioh_enable_hw_oob_intr(sdioh_info_t * sd,bool enable)720 sdioh_enable_hw_oob_intr(sdioh_info_t *sd, bool enable)
721 {
722 	SDIOH_API_RC status;
723 	uint8 data;
724 
725 	if (enable)
726 		data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE | SDIO_SEPINT_ACT_HI;
727 	else
728 		data = SDIO_SEPINT_ACT_HI;	/* disable hw oob interrupt */
729 
730 	status = sdioh_request_byte(sd, SDIOH_WRITE, 0, SDIOD_CCCR_BRCM_SEPINT, &data);
731 	return status;
732 }
733 #endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */
734 
735 extern SDIOH_API_RC
sdioh_cfg_read(sdioh_info_t * sd,uint fnc_num,uint32 addr,uint8 * data)736 sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
737 {
738 	SDIOH_API_RC status;
739 	/* No lock needed since sdioh_request_byte does locking */
740 	status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data);
741 	return status;
742 }
743 
744 extern SDIOH_API_RC
sdioh_cfg_write(sdioh_info_t * sd,uint fnc_num,uint32 addr,uint8 * data)745 sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
746 {
747 	/* No lock needed since sdioh_request_byte does locking */
748 	SDIOH_API_RC status;
749 	status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data);
750 	return status;
751 }
752 
753 static int
sdioh_sdmmc_get_cisaddr(sdioh_info_t * sd,uint32 regaddr)754 sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr)
755 {
756 	/* read 24 bits and return valid 17 bit addr */
757 	int i;
758 	uint32 scratch, regdata;
759 	uint8 *ptr = (uint8 *)&scratch;
760 	for (i = 0; i < 3; i++) {
761 		if ((sdioh_sdmmc_card_regread (sd, 0, regaddr, 1, &regdata)) != SUCCESS)
762 			sd_err(("%s: Can't read!\n", __FUNCTION__));
763 
764 		*ptr++ = (uint8) regdata;
765 		regaddr++;
766 	}
767 
768 	/* Only the lower 17-bits are valid */
769 	scratch = ltoh32(scratch);
770 	scratch &= 0x0001FFFF;
771 	return (scratch);
772 }
773 
774 extern SDIOH_API_RC
sdioh_cis_read(sdioh_info_t * sd,uint func,uint8 * cisd,uint32 length)775 sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length)
776 {
777 	uint32 count;
778 	int offset;
779 	uint32 foo;
780 	uint8 *cis = cisd;
781 
782 	sd_trace(("%s: Func = %d\n", __FUNCTION__, func));
783 
784 	if (!sd->func_cis_ptr[func]) {
785 		bzero(cis, length);
786 		sd_err(("%s: no func_cis_ptr[%d]\n", __FUNCTION__, func));
787 		return SDIOH_API_RC_FAIL;
788 	}
789 
790 	sd_err(("%s: func_cis_ptr[%d]=0x%04x\n", __FUNCTION__, func, sd->func_cis_ptr[func]));
791 
792 	for (count = 0; count < length; count++) {
793 		offset =  sd->func_cis_ptr[func] + count;
794 		if (sdioh_sdmmc_card_regread (sd, 0, offset, 1, &foo) < 0) {
795 			sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__));
796 			return SDIOH_API_RC_FAIL;
797 		}
798 
799 		*cis = (uint8)(foo & 0xff);
800 		cis++;
801 	}
802 
803 	return SDIOH_API_RC_SUCCESS;
804 }
805 
806 extern SDIOH_API_RC
sdioh_request_byte(sdioh_info_t * sd,uint rw,uint func,uint regaddr,uint8 * byte)807 sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte)
808 {
809 	int err_ret = 0;
810 #if defined(MMC_SDIO_ABORT)
811 	int sdio_abort_retry = MMC_SDIO_ABORT_RETRY_LIMIT;
812 #endif // endif
813 
814 	sd_info(("%s: rw=%d, func=%d, addr=0x%05x\n", __FUNCTION__, rw, func, regaddr));
815 
816 	DHD_PM_RESUME_WAIT(sdioh_request_byte_wait);
817 	DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
818 	if(rw) { /* CMD52 Write */
819 		if (func == 0) {
820 			/* Can only directly write to some F0 registers.  Handle F2 enable
821 			 * as a special case.
822 			 */
823 			if (regaddr == SDIOD_CCCR_IOEN) {
824 #if defined(BT_OVER_SDIO)
825 				do {
826 				if (sd->func[3]) {
827 					sd_info(("bcmsdh_sdmmc F3: *byte 0x%x\n", *byte));
828 
829 					if (*byte & SDIO_FUNC_ENABLE_3) {
830 						sdio_claim_host(sd->func[3]);
831 
832 						/* Set Function 3 Block Size */
833 						err_ret = sdio_set_block_size(sd->func[3],
834 						sd_f3_blocksize);
835 						if (err_ret) {
836 							sd_err(("F3 blocksize set err%d\n",
837 								err_ret));
838 						}
839 
840 						/* Enable Function 3 */
841 						sd_info(("bcmsdh_sdmmc F3: enable F3 fn %p\n",
842 						sd->func[3]));
843 						err_ret = sdio_enable_func(sd->func[3]);
844 						if (err_ret) {
845 							sd_err(("bcmsdh_sdmmc: enable F3 err:%d\n",
846 								err_ret));
847 						}
848 
849 						sdio_release_host(sd->func[3]);
850 
851 						break;
852 					} else if (*byte & SDIO_FUNC_DISABLE_3) {
853 						sdio_claim_host(sd->func[3]);
854 
855 						/* Disable Function 3 */
856 						sd_info(("bcmsdh_sdmmc F3: disable F3 fn %p\n",
857 						sd->func[3]));
858 						err_ret = sdio_disable_func(sd->func[3]);
859 						if (err_ret) {
860 							sd_err(("bcmsdh_sdmmc: Disable F3 err:%d\n",
861 								err_ret));
862 						}
863 						sdio_release_host(sd->func[3]);
864 						sd->func[3] = NULL;
865 
866 						break;
867 					}
868 				}
869 #endif /* defined (BT_OVER_SDIO) */
870 				if (sd->func[2]) {
871 					sdio_claim_host(sd->func[2]);
872 					if (*byte & SDIO_FUNC_ENABLE_2) {
873 						/* Enable Function 2 */
874 						err_ret = sdio_enable_func(sd->func[2]);
875 						if (err_ret) {
876 							sd_err(("bcmsdh_sdmmc: enable F2 failed:%d",
877 								err_ret));
878 						}
879 					} else {
880 						/* Disable Function 2 */
881 						err_ret = sdio_disable_func(sd->func[2]);
882 						if (err_ret) {
883 							sd_err(("bcmsdh_sdmmc: Disab F2 failed:%d",
884 								err_ret));
885 						}
886 					}
887 					sdio_release_host(sd->func[2]);
888 				}
889 #if defined(BT_OVER_SDIO)
890 			} while (0);
891 #endif /* defined (BT_OVER_SDIO) */
892 		}
893 #if defined(MMC_SDIO_ABORT)
894 			/* to allow abort command through F1 */
895 			else if (regaddr == SDIOD_CCCR_IOABORT) {
896 				while (sdio_abort_retry--) {
897 					if (sd->func[func]) {
898 						sdio_claim_host(sd->func[func]);
899 						/*
900 						 * this sdio_f0_writeb() can be replaced with
901 						 * another api depending upon MMC driver change.
902 						 * As of this time, this is temporaray one
903 						 */
904 						sdio_writeb(sd->func[func],
905 							*byte, regaddr, &err_ret);
906 						sdio_release_host(sd->func[func]);
907 					}
908 					if (!err_ret)
909 						break;
910 				}
911 			}
912 #endif /* MMC_SDIO_ABORT */
913 			else if (regaddr < 0xF0) {
914 				sd_err(("bcmsdh_sdmmc: F0 Wr:0x%02x: write disallowed\n", regaddr));
915 			} else {
916 				/* Claim host controller, perform F0 write, and release */
917 				if (sd->func[func]) {
918 					sdio_claim_host(sd->func[func]);
919 					sdio_f0_writeb(sd->func[func],
920 						*byte, regaddr, &err_ret);
921 					sdio_release_host(sd->func[func]);
922 				}
923 			}
924 		} else {
925 			/* Claim host controller, perform Fn write, and release */
926 			if (sd->func[func]) {
927 				sdio_claim_host(sd->func[func]);
928 				sdio_writeb(sd->func[func], *byte, regaddr, &err_ret);
929 				sdio_release_host(sd->func[func]);
930 			}
931 		}
932 	} else { /* CMD52 Read */
933 		/* Claim host controller, perform Fn read, and release */
934 		if (sd->func[func]) {
935 			sdio_claim_host(sd->func[func]);
936 			if (func == 0) {
937 				*byte = sdio_f0_readb(sd->func[func], regaddr, &err_ret);
938 			} else {
939 				*byte = sdio_readb(sd->func[func], regaddr, &err_ret);
940 			}
941 			sdio_release_host(sd->func[func]);
942 		}
943 	}
944 
945 	if (err_ret) {
946 		if ((regaddr == 0x1001F) && ((err_ret == -ETIMEDOUT) || (err_ret == -EILSEQ))) {
947 		} else {
948 			sd_err(("bcmsdh_sdmmc: Failed to %s byte F%d:@0x%05x=%02x, Err: %d\n",
949 				rw ? "Write" : "Read", func, regaddr, *byte, err_ret));
950 		}
951 	}
952 
953 	return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
954 }
955 
956 extern SDIOH_API_RC
sdioh_request_word(sdioh_info_t * sd,uint cmd_type,uint rw,uint func,uint addr,uint32 * word,uint nbytes)957 sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr,
958                                    uint32 *word, uint nbytes)
959 {
960 	int err_ret = SDIOH_API_RC_FAIL;
961 #if defined(MMC_SDIO_ABORT)
962 	int sdio_abort_retry = MMC_SDIO_ABORT_RETRY_LIMIT;
963 #endif // endif
964 
965 	if (func == 0) {
966 		sd_err(("%s: Only CMD52 allowed to F0.\n", __FUNCTION__));
967 		return SDIOH_API_RC_FAIL;
968 	}
969 
970 	sd_info(("%s: cmd_type=%d, rw=%d, func=%d, addr=0x%05x, nbytes=%d\n",
971 	         __FUNCTION__, cmd_type, rw, func, addr, nbytes));
972 
973 	DHD_PM_RESUME_WAIT(sdioh_request_word_wait);
974 	DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
975 	/* Claim host controller */
976 	sdio_claim_host(sd->func[func]);
977 
978 	if(rw) { /* CMD52 Write */
979 		if (nbytes == 4) {
980 			sdio_writel(sd->func[func], *word, addr, &err_ret);
981 		} else if (nbytes == 2) {
982 			sdio_writew(sd->func[func], (*word & 0xFFFF), addr, &err_ret);
983 		} else {
984 			sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes));
985 		}
986 	} else { /* CMD52 Read */
987 		if (nbytes == 4) {
988 			*word = sdio_readl(sd->func[func], addr, &err_ret);
989 		} else if (nbytes == 2) {
990 			*word = sdio_readw(sd->func[func], addr, &err_ret) & 0xFFFF;
991 		} else {
992 			sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes));
993 		}
994 	}
995 
996 	/* Release host controller */
997 	sdio_release_host(sd->func[func]);
998 
999 	if (err_ret) {
1000 #if defined(MMC_SDIO_ABORT)
1001 		/* Any error on CMD53 transaction should abort that function using function 0. */
1002 		while (sdio_abort_retry--) {
1003 			if (sd->func[0]) {
1004 				sdio_claim_host(sd->func[0]);
1005 				/*
1006 				 * this sdio_f0_writeb() can be replaced with another api
1007 				 * depending upon MMC driver change.
1008 				 * As of this time, this is temporaray one
1009 				 */
1010 				sdio_writeb(sd->func[0],
1011 					func, SDIOD_CCCR_IOABORT, &err_ret);
1012 				sdio_release_host(sd->func[0]);
1013 			}
1014 			if (!err_ret)
1015 				break;
1016 		}
1017 		if (err_ret)
1018 #endif /* MMC_SDIO_ABORT */
1019 		{
1020 			sd_err(("bcmsdh_sdmmc: Failed to %s word, Err: 0x%08x",
1021 				rw ? "Write" : "Read", err_ret));
1022 		}
1023 	}
1024 
1025 	return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
1026 }
1027 
1028 #ifdef BCMSDIOH_TXGLOM
1029 static SDIOH_API_RC
sdioh_request_packet_chain(sdioh_info_t * sd,uint fix_inc,uint write,uint func,uint addr,void * pkt)1030 sdioh_request_packet_chain(sdioh_info_t *sd, uint fix_inc, uint write, uint func,
1031                      uint addr, void *pkt)
1032 {
1033 	bool fifo = (fix_inc == SDIOH_DATA_FIX);
1034 	int err_ret = 0;
1035 	void *pnext;
1036 	uint ttl_len, pkt_offset;
1037 	uint blk_num;
1038 	uint blk_size;
1039 	uint max_blk_count;
1040 	uint max_req_size;
1041 	struct mmc_request mmc_req;
1042 	struct mmc_command mmc_cmd;
1043 	struct mmc_data mmc_dat;
1044 	uint32 sg_count;
1045 	struct sdio_func *sdio_func = sd->func[func];
1046 	struct mmc_host *host = sdio_func->card->host;
1047 
1048 	sd_trace(("%s: Enter\n", __FUNCTION__));
1049 	ASSERT(pkt);
1050 	DHD_PM_RESUME_WAIT(sdioh_request_packet_wait);
1051 	DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
1052 
1053 	blk_size = sd->client_block_size[func];
1054 	max_blk_count = min(host->max_blk_count, (uint)MAX_IO_RW_EXTENDED_BLK);
1055 	max_req_size = min(max_blk_count * blk_size, host->max_req_size);
1056 
1057 	pkt_offset = 0;
1058 	pnext = pkt;
1059 
1060 	while (pnext != NULL) {
1061 		ttl_len = 0;
1062 		sg_count = 0;
1063 		memset(&mmc_req, 0, sizeof(struct mmc_request));
1064 		memset(&mmc_cmd, 0, sizeof(struct mmc_command));
1065 		memset(&mmc_dat, 0, sizeof(struct mmc_data));
1066 		sg_init_table(sd->sg_list, ARRAYSIZE(sd->sg_list));
1067 
1068 		/* Set up scatter-gather DMA descriptors. this loop is to find out the max
1069 		 * data we can transfer with one command 53. blocks per command is limited by
1070 		 * host max_req_size and 9-bit max block number. when the total length of this
1071 		 * packet chain is bigger than max_req_size, use multiple SD_IO_RW_EXTENDED
1072 		 * commands (each transfer is still block aligned)
1073 		 */
1074 		while (pnext != NULL && ttl_len < max_req_size) {
1075 			int pkt_len;
1076 			int sg_data_size;
1077 			uint8 *pdata = (uint8*)PKTDATA(sd->osh, pnext);
1078 
1079 			ASSERT(pdata != NULL);
1080 			pkt_len = PKTLEN(sd->osh, pnext);
1081 			sd_trace(("%s[%d] data=%p, len=%d\n", __FUNCTION__, write, pdata, pkt_len));
1082 			/* sg_count is unlikely larger than the array size, and this is
1083 			 * NOT something we can handle here, but in case it happens, PLEASE put
1084 			 * a restriction on max tx/glom count (based on host->max_segs).
1085 			 */
1086 			if (sg_count >= ARRAYSIZE(sd->sg_list)) {
1087 				sd_err(("%s: sg list entries(%u) exceed limit(%u),"
1088 					" sd blk_size=%u\n",
1089 					__FUNCTION__, sg_count, ARRAYSIZE(sd->sg_list), blk_size));
1090 				return (SDIOH_API_RC_FAIL);
1091 			}
1092 			pdata += pkt_offset;
1093 
1094 			sg_data_size = pkt_len - pkt_offset;
1095 			if (sg_data_size > max_req_size - ttl_len)
1096 				sg_data_size = max_req_size - ttl_len;
1097 			/* some platforms put a restriction on the data size of each scatter-gather
1098 			 * DMA descriptor, use multiple sg buffers when xfer_size is bigger than
1099 			 * max_seg_size
1100 			 */
1101 			if (sg_data_size > host->max_seg_size) {
1102 				sg_data_size = host->max_seg_size;
1103 			}
1104 			sg_set_buf(&sd->sg_list[sg_count++], pdata, sg_data_size);
1105 
1106 			ttl_len += sg_data_size;
1107 			pkt_offset += sg_data_size;
1108 			if (pkt_offset == pkt_len) {
1109 				pnext = PKTNEXT(sd->osh, pnext);
1110 				pkt_offset = 0;
1111 			}
1112 		}
1113 
1114 		if (ttl_len % blk_size != 0) {
1115 			sd_err(("%s, data length %d not aligned to block size %d\n",
1116 				__FUNCTION__,  ttl_len, blk_size));
1117 			return SDIOH_API_RC_FAIL;
1118 		}
1119 		blk_num = ttl_len / blk_size;
1120 		mmc_dat.sg = sd->sg_list;
1121 		mmc_dat.sg_len = sg_count;
1122 		mmc_dat.blksz = blk_size;
1123 		mmc_dat.blocks = blk_num;
1124 		mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
1125 		mmc_cmd.opcode = 53; /* SD_IO_RW_EXTENDED */
1126 		mmc_cmd.arg = write ? 1<<31 : 0;
1127 		mmc_cmd.arg |= (func & 0x7) << 28;
1128 		mmc_cmd.arg |= 1<<27;
1129 		mmc_cmd.arg |= fifo ? 0 : 1<<26;
1130 		mmc_cmd.arg |= (addr & 0x1FFFF) << 9;
1131 		mmc_cmd.arg |= blk_num & 0x1FF;
1132 		mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
1133 		mmc_req.cmd = &mmc_cmd;
1134 		mmc_req.data = &mmc_dat;
1135 		if (!fifo)
1136 			addr += ttl_len;
1137 
1138 		sdio_claim_host(sdio_func);
1139 		mmc_set_data_timeout(&mmc_dat, sdio_func->card);
1140 		mmc_wait_for_req(host, &mmc_req);
1141 		sdio_release_host(sdio_func);
1142 
1143 		err_ret = mmc_cmd.error? mmc_cmd.error : mmc_dat.error;
1144 		if (0 != err_ret) {
1145 			sd_err(("%s:CMD53 %s failed with code %d\n",
1146 				__FUNCTION__, write ? "write" : "read", err_ret));
1147 			return SDIOH_API_RC_FAIL;
1148 		}
1149 	}
1150 
1151 	sd_trace(("%s: Exit\n", __FUNCTION__));
1152 	return SDIOH_API_RC_SUCCESS;
1153 }
1154 #endif /* BCMSDIOH_TXGLOM */
1155 
1156 static SDIOH_API_RC
sdioh_buffer_tofrom_bus(sdioh_info_t * sd,uint fix_inc,uint write,uint func,uint addr,uint8 * buf,uint len)1157 sdioh_buffer_tofrom_bus(sdioh_info_t *sd, uint fix_inc, uint write, uint func,
1158                      uint addr, uint8 *buf, uint len)
1159 {
1160 	bool fifo = (fix_inc == SDIOH_DATA_FIX);
1161 	int err_ret = 0;
1162 
1163 	sd_trace(("%s: Enter\n", __FUNCTION__));
1164 	ASSERT(buf);
1165 
1166 	/* NOTE:
1167 	 * For all writes, each packet length is aligned to 32 (or 4)
1168 	 * bytes in dhdsdio_txpkt_preprocess, and for glom the last packet length
1169 	 * is aligned to block boundary. If you want to align each packet to
1170 	 * a custom size, please do it in dhdsdio_txpkt_preprocess, NOT here
1171 	 *
1172 	 * For reads, the alignment is doen in sdioh_request_buffer.
1173 	 *
1174 	 */
1175 	sdio_claim_host(sd->func[func]);
1176 
1177 	if ((write) && (!fifo))
1178 		err_ret = sdio_memcpy_toio(sd->func[func], addr, buf, len);
1179 	else if (write)
1180 		err_ret = sdio_memcpy_toio(sd->func[func], addr, buf, len);
1181 	else if (fifo)
1182 		err_ret = sdio_readsb(sd->func[func], buf, addr, len);
1183 	else
1184 		err_ret = sdio_memcpy_fromio(sd->func[func], buf, addr, len);
1185 
1186 	sdio_release_host(sd->func[func]);
1187 
1188 	if (err_ret)
1189 		sd_err(("%s: %s FAILED %p, addr=0x%05x, pkt_len=%d, ERR=%d\n", __FUNCTION__,
1190 		       (write) ? "TX" : "RX", buf, addr, len, err_ret));
1191 	else
1192 		sd_trace(("%s: %s xfr'd %p, addr=0x%05x, len=%d\n", __FUNCTION__,
1193 			(write) ? "TX" : "RX", buf, addr, len));
1194 
1195 	sd_trace(("%s: Exit\n", __FUNCTION__));
1196 	return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
1197 }
1198 
1199 /*
1200  * This function takes a buffer or packet, and fixes everything up so that in the
1201  * end, a DMA-able packet is created.
1202  *
1203  * A buffer does not have an associated packet pointer, and may or may not be aligned.
1204  * A packet may consist of a single packet, or a packet chain.  If it is a packet chain,
1205  * then all the packets in the chain must be properly aligned.  If the packet data is not
1206  * aligned, then there may only be one packet, and in this case, it is copied to a new
1207  * aligned packet.
1208  *
1209  */
1210 extern SDIOH_API_RC
sdioh_request_buffer(sdioh_info_t * sd,uint pio_dma,uint fix_inc,uint write,uint func,uint addr,uint reg_width,uint buf_len,uint8 * buffer,void * pkt)1211 sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint write, uint func,
1212 	uint addr, uint reg_width, uint buf_len, uint8 *buffer, void *pkt)
1213 {
1214 	SDIOH_API_RC status;
1215 	void *tmppkt;
1216 
1217 	sd_trace(("%s: Enter\n", __FUNCTION__));
1218 	DHD_PM_RESUME_WAIT(sdioh_request_buffer_wait);
1219 	DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
1220 
1221 	if (pkt) {
1222 #ifdef BCMSDIOH_TXGLOM
1223 		/* packet chain, only used for tx/rx glom, all packets length
1224 		 * are aligned, total length is a block multiple
1225 		 */
1226 		if (PKTNEXT(sd->osh, pkt))
1227 			return sdioh_request_packet_chain(sd, fix_inc, write, func, addr, pkt);
1228 #endif /* BCMSDIOH_TXGLOM */
1229 		/* non-glom mode, ignore the buffer parameter and use the packet pointer
1230 		 * (this shouldn't happen)
1231 		 */
1232 		buffer = PKTDATA(sd->osh, pkt);
1233 		buf_len = PKTLEN(sd->osh, pkt);
1234 	}
1235 
1236 	ASSERT(buffer);
1237 
1238 	/* buffer and length are aligned, use it directly so we can avoid memory copy */
1239 	if (((ulong)buffer & (ARCH_DMA_MINALIGN - 1)) == 0 && (buf_len & DMA_ALIGN_MASK) == 0)
1240 		return sdioh_buffer_tofrom_bus(sd, fix_inc, write, func, addr, buffer, buf_len);
1241 
1242 	sd_trace(("%s: [%d] doing memory copy buf=%p, len=%d\n",
1243 		__FUNCTION__, write, buffer, buf_len));
1244 
1245 	/* otherwise, a memory copy is needed as the input buffer is not aligned */
1246 	tmppkt = PKTGET_STATIC(sd->osh, buf_len + DEFAULT_SDIO_F2_BLKSIZE, write ? TRUE : FALSE);
1247 	if (tmppkt == NULL) {
1248 		sd_err(("%s: PKTGET failed: len %d\n", __FUNCTION__, buf_len));
1249 		return SDIOH_API_RC_FAIL;
1250 	}
1251 
1252 	if (write)
1253 		bcopy(buffer, PKTDATA(sd->osh, tmppkt), buf_len);
1254 
1255 	status = sdioh_buffer_tofrom_bus(sd, fix_inc, write, func, addr,
1256 		PKTDATA(sd->osh, tmppkt), ROUNDUP(buf_len, (DMA_ALIGN_MASK+1)));
1257 
1258 	if (!write)
1259 		bcopy(PKTDATA(sd->osh, tmppkt), buffer, buf_len);
1260 
1261 	PKTFREE_STATIC(sd->osh, tmppkt, write ? TRUE : FALSE);
1262 
1263 	return status;
1264 }
1265 
1266 /* this function performs "abort" for both of host & device */
1267 extern int
sdioh_abort(sdioh_info_t * sd,uint func)1268 sdioh_abort(sdioh_info_t *sd, uint func)
1269 {
1270 #if defined(MMC_SDIO_ABORT)
1271 	char t_func = (char) func;
1272 #endif /* defined(MMC_SDIO_ABORT) */
1273 	sd_trace(("%s: Enter\n", __FUNCTION__));
1274 
1275 #if defined(MMC_SDIO_ABORT)
1276 	/* issue abort cmd52 command through F1 */
1277 	sdioh_request_byte(sd, SD_IO_OP_WRITE, SDIO_FUNC_0, SDIOD_CCCR_IOABORT, &t_func);
1278 #endif /* defined(MMC_SDIO_ABORT) */
1279 
1280 	sd_trace(("%s: Exit\n", __FUNCTION__));
1281 	return SDIOH_API_RC_SUCCESS;
1282 }
1283 
1284 /* Reset and re-initialize the device */
sdioh_sdio_reset(sdioh_info_t * si)1285 int sdioh_sdio_reset(sdioh_info_t *si)
1286 {
1287 	sd_trace(("%s: Enter\n", __FUNCTION__));
1288 	sd_trace(("%s: Exit\n", __FUNCTION__));
1289 	return SDIOH_API_RC_SUCCESS;
1290 }
1291 
1292 /* Disable device interrupt */
1293 void
sdioh_sdmmc_devintr_off(sdioh_info_t * sd)1294 sdioh_sdmmc_devintr_off(sdioh_info_t *sd)
1295 {
1296 	sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
1297 	sd->intmask &= ~CLIENT_INTR;
1298 }
1299 
1300 /* Enable device interrupt */
1301 void
sdioh_sdmmc_devintr_on(sdioh_info_t * sd)1302 sdioh_sdmmc_devintr_on(sdioh_info_t *sd)
1303 {
1304 	sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
1305 	sd->intmask |= CLIENT_INTR;
1306 }
1307 
1308 /* Read client card reg */
1309 int
sdioh_sdmmc_card_regread(sdioh_info_t * sd,int func,uint32 regaddr,int regsize,uint32 * data)1310 sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data)
1311 {
1312 
1313 	if ((func == 0) || (regsize == 1)) {
1314 		uint8 temp = 0;
1315 
1316 		sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp);
1317 		*data = temp;
1318 		*data &= 0xff;
1319 		sd_data(("%s: byte read data=0x%02x\n",
1320 		         __FUNCTION__, *data));
1321 	} else {
1322 		if (sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, data, regsize)) {
1323 			return BCME_SDIO_ERROR;
1324 		}
1325 		if (regsize == 2)
1326 			*data &= 0xffff;
1327 
1328 		sd_data(("%s: word read data=0x%08x\n",
1329 		         __FUNCTION__, *data));
1330 	}
1331 
1332 	return SUCCESS;
1333 }
1334 
1335 #if !defined(OOB_INTR_ONLY)
1336 /* bcmsdh_sdmmc interrupt handler */
IRQHandler(struct sdio_func * func)1337 static void IRQHandler(struct sdio_func *func)
1338 {
1339 	sdioh_info_t *sd;
1340 
1341 	sd = sdio_get_drvdata(func);
1342 
1343 	ASSERT(sd != NULL);
1344 	sdio_release_host(sd->func[0]);
1345 
1346 	if (sd->use_client_ints) {
1347 		sd->intrcount++;
1348 		ASSERT(sd->intr_handler);
1349 		ASSERT(sd->intr_handler_arg);
1350 		(sd->intr_handler)(sd->intr_handler_arg);
1351 	} else {
1352 		sd_err(("bcmsdh_sdmmc: ***IRQHandler\n"));
1353 
1354 		sd_err(("%s: Not ready for intr: enabled %d, handler %p\n",
1355 		        __FUNCTION__, sd->client_intr_enabled, sd->intr_handler));
1356 	}
1357 
1358 	sdio_claim_host(sd->func[0]);
1359 }
1360 
1361 /* bcmsdh_sdmmc interrupt handler for F2 (dummy handler) */
IRQHandlerF2(struct sdio_func * func)1362 static void IRQHandlerF2(struct sdio_func *func)
1363 {
1364 	sd_trace(("bcmsdh_sdmmc: ***IRQHandlerF2\n"));
1365 }
1366 #endif /* !defined(OOB_INTR_ONLY) */
1367 
1368 #ifdef NOTUSED
1369 /* Write client card reg */
1370 static int
sdioh_sdmmc_card_regwrite(sdioh_info_t * sd,int func,uint32 regaddr,int regsize,uint32 data)1371 sdioh_sdmmc_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data)
1372 {
1373 
1374 	if ((func == 0) || (regsize == 1)) {
1375 		uint8 temp;
1376 
1377 		temp = data & 0xff;
1378 		sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp);
1379 		sd_data(("%s: byte write data=0x%02x\n",
1380 		         __FUNCTION__, data));
1381 	} else {
1382 		if (regsize == 2)
1383 			data &= 0xffff;
1384 
1385 		sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, &data, regsize);
1386 
1387 		sd_data(("%s: word write data=0x%08x\n",
1388 		         __FUNCTION__, data));
1389 	}
1390 
1391 	return SUCCESS;
1392 }
1393 #endif /* NOTUSED */
1394 
1395 int
sdioh_start(sdioh_info_t * sd,int stage)1396 sdioh_start(sdioh_info_t *sd, int stage)
1397 {
1398 #if defined(OEM_ANDROID) || defined(OEM_EMBEDDED_LINUX)
1399 	int ret;
1400 
1401 	if (!sd) {
1402 		sd_err(("%s Failed, sd is NULL\n", __FUNCTION__));
1403 		return (0);
1404 	}
1405 
1406 	/* Need to do this stages as we can't enable the interrupt till
1407 		downloading of the firmware is complete, other wise polling
1408 		sdio access will come in way
1409 	*/
1410 	if (sd->func[0]) {
1411 			if (stage == 0) {
1412 		/* Since the power to the chip is killed, we will have
1413 			re enumerate the device again. Set the block size
1414 			and enable the fucntion 1 for in preparation for
1415 			downloading the code
1416 		*/
1417 		/* sdio_reset_comm() - has been fixed in latest kernel/msm.git for Linux
1418 		   2.6.27. The implementation prior to that is buggy, and needs broadcom's
1419 		   patch for it
1420 		*/
1421 		if ((ret = sdio_reset_comm(sd->func[0]->card))) {
1422 			sd_err(("%s Failed, error = %d\n", __FUNCTION__, ret));
1423 			return ret;
1424 		}
1425 		else {
1426 			sd->num_funcs = 2;
1427 			sd->sd_blockmode = TRUE;
1428 			sd->use_client_ints = TRUE;
1429 			sd->client_block_size[0] = 64;
1430 
1431 			if (sd->func[1]) {
1432 				/* Claim host controller */
1433 				sdio_claim_host(sd->func[1]);
1434 
1435 				sd->client_block_size[1] = 64;
1436 				ret = sdio_set_block_size(sd->func[1], 64);
1437 				if (ret) {
1438 					sd_err(("bcmsdh_sdmmc: Failed to set F1 "
1439 						"blocksize(%d)\n", ret));
1440 				}
1441 
1442 				/* Release host controller F1 */
1443 				sdio_release_host(sd->func[1]);
1444 			}
1445 
1446 			if (sd->func[2]) {
1447 				/* Claim host controller F2 */
1448 				sdio_claim_host(sd->func[2]);
1449 
1450 				sd->client_block_size[2] = sd_f2_blocksize;
1451 				ret = sdio_set_block_size(sd->func[2], sd_f2_blocksize);
1452 				if (ret) {
1453 					sd_err(("bcmsdh_sdmmc: Failed to set F2 "
1454 						"blocksize to %d(%d)\n", sd_f2_blocksize, ret));
1455 				}
1456 
1457 				/* Release host controller F2 */
1458 				sdio_release_host(sd->func[2]);
1459 			}
1460 
1461 			sdioh_sdmmc_card_enablefuncs(sd);
1462 			}
1463 		} else {
1464 #if !defined(OOB_INTR_ONLY)
1465 			sdio_claim_host(sd->func[0]);
1466 			if (sd->func[2])
1467 				sdio_claim_irq(sd->func[2], IRQHandlerF2);
1468 			if (sd->func[1])
1469 				sdio_claim_irq(sd->func[1], IRQHandler);
1470 			sdio_release_host(sd->func[0]);
1471 #else /* defined(OOB_INTR_ONLY) */
1472 #if defined(HW_OOB)
1473 			sdioh_enable_func_intr(sd);
1474 #endif // endif
1475 			bcmsdh_oob_intr_set(sd->bcmsdh, TRUE);
1476 #endif /* !defined(OOB_INTR_ONLY) */
1477 		}
1478 	}
1479 	else
1480 		sd_err(("%s Failed\n", __FUNCTION__));
1481 #endif /* defined(OEM_ANDROID) || defined(OEM_EMBEDDED_LINUX) */
1482 
1483 	return (0);
1484 }
1485 
1486 int
sdioh_stop(sdioh_info_t * sd)1487 sdioh_stop(sdioh_info_t *sd)
1488 {
1489 #if defined(OEM_ANDROID) || defined(OEM_EMBEDDED_LINUX)
1490 	/* MSM7201A Android sdio stack has bug with interrupt
1491 		So internaly within SDIO stack they are polling
1492 		which cause issue when device is turned off. So
1493 		unregister interrupt with SDIO stack to stop the
1494 		polling
1495 	*/
1496 	if (sd->func[0]) {
1497 #if !defined(OOB_INTR_ONLY)
1498 		sdio_claim_host(sd->func[0]);
1499 		if (sd->func[1])
1500 			sdio_release_irq(sd->func[1]);
1501 		if (sd->func[2])
1502 			sdio_release_irq(sd->func[2]);
1503 		sdio_release_host(sd->func[0]);
1504 #else /* defined(OOB_INTR_ONLY) */
1505 #if defined(HW_OOB)
1506 		sdioh_disable_func_intr(sd);
1507 #endif // endif
1508 		bcmsdh_oob_intr_set(sd->bcmsdh, FALSE);
1509 #endif /* !defined(OOB_INTR_ONLY) */
1510 	}
1511 	else
1512 		sd_err(("%s Failed\n", __FUNCTION__));
1513 #endif /* defined(OEM_ANDROID) ||  defined(OEM_EMBEDDED_LINUX) */
1514 	return (0);
1515 }
1516 
1517 int
sdioh_waitlockfree(sdioh_info_t * sd)1518 sdioh_waitlockfree(sdioh_info_t *sd)
1519 {
1520 	return (1);
1521 }
1522 
1523 SDIOH_API_RC
sdioh_gpioouten(sdioh_info_t * sd,uint32 gpio)1524 sdioh_gpioouten(sdioh_info_t *sd, uint32 gpio)
1525 {
1526 	return SDIOH_API_RC_FAIL;
1527 }
1528 
1529 SDIOH_API_RC
sdioh_gpioout(sdioh_info_t * sd,uint32 gpio,bool enab)1530 sdioh_gpioout(sdioh_info_t *sd, uint32 gpio, bool enab)
1531 {
1532 	return SDIOH_API_RC_FAIL;
1533 }
1534 
1535 bool
sdioh_gpioin(sdioh_info_t * sd,uint32 gpio)1536 sdioh_gpioin(sdioh_info_t *sd, uint32 gpio)
1537 {
1538 	return FALSE;
1539 }
1540 
1541 SDIOH_API_RC
sdioh_gpio_init(sdioh_info_t * sd)1542 sdioh_gpio_init(sdioh_info_t *sd)
1543 {
1544 	return SDIOH_API_RC_FAIL;
1545 }
1546 
1547 uint
sdmmc_get_clock_rate(sdioh_info_t * sd)1548 sdmmc_get_clock_rate(sdioh_info_t *sd)
1549 {
1550 	struct sdio_func *sdio_func = sd->func[0];
1551 	struct mmc_host *host = sdio_func->card->host;
1552 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(4, 3, 6))
1553 	return mmc_host_clk_rate(host);
1554 #else
1555 	return host->ios.clock;
1556 #endif /* (LINUX_VERSION_CODE <= KERNEL_VERSION(4, 3, 6)) */
1557 }
1558 
1559 void
sdmmc_set_clock_rate(sdioh_info_t * sd,uint hz)1560 sdmmc_set_clock_rate(sdioh_info_t *sd, uint hz)
1561 {
1562 	struct sdio_func *sdio_func = sd->func[0];
1563 	struct mmc_host *host = sdio_func->card->host;
1564 	struct mmc_ios *ios = &host->ios;
1565 
1566 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(4, 3, 6))
1567 	mmc_host_clk_hold(host);
1568 #endif // endif
1569 	DHD_INFO(("%s: Before change: sd clock rate is %u\n", __FUNCTION__, ios->clock));
1570 	if (hz < host->f_min) {
1571 		DHD_ERROR(("%s: Intended rate is below min rate, setting to min\n", __FUNCTION__));
1572 		hz = host->f_min;
1573 	}
1574 
1575 	if (hz > host->f_max) {
1576 		DHD_ERROR(("%s: Intended rate exceeds max rate, setting to max\n", __FUNCTION__));
1577 		hz = host->f_max;
1578 	}
1579 	ios->clock = hz;
1580 	host->ops->set_ios(host, ios);
1581 	DHD_ERROR(("%s: After change: sd clock rate is %u\n", __FUNCTION__, ios->clock));
1582 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(4, 3, 6))
1583 	mmc_host_clk_release(host);
1584 #endif // endif
1585 }
1586 
1587 void
sdmmc_set_clock_divisor(sdioh_info_t * sd,uint sd_div)1588 sdmmc_set_clock_divisor(sdioh_info_t *sd, uint sd_div)
1589 {
1590 	uint hz;
1591 	uint old_div = sdmmc_get_clock_rate(sd);
1592 	if (old_div == sd_div) {
1593 		return;
1594 	}
1595 
1596 	hz = sd->sd_clk_rate / sd_div;
1597 	sdmmc_set_clock_rate(sd, hz);
1598 }
1599