xref: /OK3568_Linux_fs/kernel/drivers/net/wireless/rockchip_wlan/cywdhd/bcmdhd/bcmsdh_sdmmc.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /*
2  * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
3  *
4  * Portions of this code are copyright (c) 2022 Cypress Semiconductor Corporation
5  *
6  * Copyright (C) 1999-2017, Broadcom Corporation
7  *
8  *      Unless you and Broadcom execute a separate written software license
9  * agreement governing use of this software, this software is licensed to you
10  * under the terms of the GNU General Public License version 2 (the "GPL"),
11  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
12  * following added to such license:
13  *
14  *      As a special exception, the copyright holders of this software give you
15  * permission to link this software with independent modules, and to copy and
16  * distribute the resulting executable under terms of your choice, provided that
17  * you also meet, for each linked independent module, the terms and conditions of
18  * the license of that module.  An independent module is a module which is not
19  * derived from this software.  The special exception does not apply to any
20  * modifications of the software.
21  *
22  *      Notwithstanding the above, under no circumstances may you combine this
23  * software in any way with any other Broadcom software provided under a license
24  * other than the GPL, without Broadcom's express prior written consent.
25  *
26  *
27  * <<Broadcom-WL-IPTag/Proprietary,Open:>>
28  *
29  * $Id: bcmsdh_sdmmc.c 690631 2017-03-17 04:27:33Z $
30  */
31 #include <typedefs.h>
32 
33 #include <bcmdevs.h>
34 #include <bcmendian.h>
35 #include <bcmutils.h>
36 #include <osl.h>
37 #include <sdio.h>	/* SDIO Device and Protocol Specs */
38 #include <sdioh.h>	/* Standard SDIO Host Controller Specification */
39 #include <bcmsdbus.h>	/* bcmsdh to/from specific controller APIs */
40 #include <sdiovar.h>	/* ioctl/iovars */
41 
42 #include <linux/mmc/core.h>
43 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 0, 8))
44 #include <drivers/mmc/core/host.h>
45 #else
46 #include <linux/mmc/host.h>
47 #endif /* (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 0, 0)) */
48 #include <linux/mmc/card.h>
49 #include <linux/mmc/sdio_func.h>
50 #include <linux/mmc/sdio_ids.h>
51 
52 #include <dngl_stats.h>
53 #include <dhd.h>
54 #include <dhd_dbg.h>
55 
56 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
57 #include <linux/suspend.h>
58 extern volatile bool dhd_mmc_suspend;
59 #endif // endif
60 #include "bcmsdh_sdmmc.h"
61 
62 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 0, 0)) || (LINUX_VERSION_CODE >= \
63 	KERNEL_VERSION(4, 4, 0))
64 static inline void
mmc_host_clk_hold(struct mmc_host * host)65 mmc_host_clk_hold(struct mmc_host *host)
66 {
67 	BCM_REFERENCE(host);
68 	return;
69 }
70 
71 static inline void
mmc_host_clk_release(struct mmc_host * host)72 mmc_host_clk_release(struct mmc_host *host)
73 {
74 	BCM_REFERENCE(host);
75 	return;
76 }
77 
78 static inline unsigned int
mmc_host_clk_rate(struct mmc_host * host)79 mmc_host_clk_rate(struct mmc_host *host)
80 {
81 	return host->ios.clock;
82 }
83 #endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(3, 0, 0) */
84 
85 #ifndef BCMSDH_MODULE
86 extern int sdio_function_init(void);
87 extern void sdio_function_cleanup(void);
88 #endif /* BCMSDH_MODULE */
89 
90 #if !defined(OOB_INTR_ONLY)
91 static void IRQHandler(struct sdio_func *func);
92 static void IRQHandlerF2(struct sdio_func *func);
93 #endif /* !defined(OOB_INTR_ONLY) */
94 static int sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr);
95 #if defined(OEM_ANDROID) && !defined(CONFIG_SOC_S5E5515)
sdio_reset_comm(struct mmc_card * card)96 static int sdio_reset_comm(struct mmc_card *card)
97 {
98 	return 0;
99 }
100 #else
101 extern int sdio_reset_comm(struct mmc_card *card);
102 #endif /* OEM_ANDROID */
103 
104 #define DEFAULT_SDIO_F2_BLKSIZE		512
105 #ifndef CUSTOM_SDIO_F2_BLKSIZE
106 #define CUSTOM_SDIO_F2_BLKSIZE		DEFAULT_SDIO_F2_BLKSIZE
107 #endif // endif
108 
109 #define DEFAULT_SDIO_F1_BLKSIZE		64
110 #ifndef CUSTOM_SDIO_F1_BLKSIZE
111 #define CUSTOM_SDIO_F1_BLKSIZE		DEFAULT_SDIO_F1_BLKSIZE
112 #endif // endif
113 
114 #define MAX_IO_RW_EXTENDED_BLK		511
115 
116 uint sd_sdmode = SDIOH_MODE_SD4;	/* Use SD4 mode by default */
117 uint sd_f2_blocksize = CUSTOM_SDIO_F2_BLKSIZE;
118 uint sd_f1_blocksize = CUSTOM_SDIO_F1_BLKSIZE;
119 
120 #if defined(BT_OVER_SDIO)
121 uint sd_f3_blocksize = 64;
122 #endif /* defined (BT_OVER_SDIO) */
123 
124 uint sd_divisor = 2;			/* Default 48MHz/2 = 24MHz */
125 
126 uint sd_power = 1;		/* Default to SD Slot powered ON */
127 uint sd_clock = 1;		/* Default to SD Clock turned ON */
128 uint sd_hiok = FALSE;	/* Don't use hi-speed mode by default */
129 uint sd_msglevel = SDH_ERROR_VAL;
130 uint sd_use_dma = TRUE;
131 
132 
133 
134 #ifndef CUSTOM_RXCHAIN
135 #define CUSTOM_RXCHAIN 0
136 #endif // endif
137 
138 DHD_PM_RESUME_WAIT_INIT(sdioh_request_byte_wait);
139 DHD_PM_RESUME_WAIT_INIT(sdioh_request_word_wait);
140 DHD_PM_RESUME_WAIT_INIT(sdioh_request_packet_wait);
141 DHD_PM_RESUME_WAIT_INIT(sdioh_request_buffer_wait);
142 
143 #if !defined(ARCH_DMA_MINALIGN)
144 #define ARCH_DMA_MINALIGN 128
145 #endif /* !defined(ARCH_DMA_MINALIGN) */
146 #define DMA_ALIGN_MASK	0x03
147 #define MMC_SDIO_ABORT_RETRY_LIMIT 5
148 
149 int sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data);
150 
151 #if defined(BT_OVER_SDIO)
152 extern
sdioh_sdmmc_card_enable_func_f3(sdioh_info_t * sd,struct sdio_func * func)153 void sdioh_sdmmc_card_enable_func_f3(sdioh_info_t *sd, struct sdio_func *func)
154 {
155 	sd->func[3] = func;
156 	sd_info(("%s sd->func[3] %p\n", __FUNCTION__, sd->func[3]));
157 }
158 #endif /* defined (BT_OVER_SDIO) */
159 
160 void  sdmmc_set_clock_rate(sdioh_info_t *sd, uint hz);
161 uint  sdmmc_get_clock_rate(sdioh_info_t *sd);
162 void  sdmmc_set_clock_divisor(sdioh_info_t *sd, uint sd_div);
163 
164 static int
sdioh_sdmmc_card_enablefuncs(sdioh_info_t * sd)165 sdioh_sdmmc_card_enablefuncs(sdioh_info_t *sd)
166 {
167 	int err_ret;
168 	uint32 fbraddr;
169 	uint8 func;
170 
171 	sd_trace(("%s\n", __FUNCTION__));
172 
173 	/* Get the Card's common CIS address */
174 	sd->com_cis_ptr = sdioh_sdmmc_get_cisaddr(sd, SDIOD_CCCR_CISPTR_0);
175 	sd->func_cis_ptr[0] = sd->com_cis_ptr;
176 	sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr));
177 
178 	/* Get the Card's function CIS (for each function) */
179 	for (fbraddr = SDIOD_FBR_STARTADDR, func = 1;
180 	     func <= sd->num_funcs; func++, fbraddr += SDIOD_FBR_SIZE) {
181 		sd->func_cis_ptr[func] = sdioh_sdmmc_get_cisaddr(sd, SDIOD_FBR_CISPTR_0 + fbraddr);
182 		sd_info(("%s: Function %d CIS Ptr = 0x%x\n",
183 		         __FUNCTION__, func, sd->func_cis_ptr[func]));
184 	}
185 
186 	sd->func_cis_ptr[0] = sd->com_cis_ptr;
187 	sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr));
188 
189 	/* Enable Function 1 */
190 	sdio_claim_host(sd->func[1]);
191 	err_ret = sdio_enable_func(sd->func[1]);
192 	sdio_release_host(sd->func[1]);
193 	if (err_ret) {
194 		sd_err(("bcmsdh_sdmmc: Failed to enable F1 Err: 0x%08x", err_ret));
195 	}
196 
197 	return FALSE;
198 }
199 
200 /*
201  *	Public entry points & extern's
202  */
203 extern sdioh_info_t *
sdioh_attach(osl_t * osh,struct sdio_func * func)204 sdioh_attach(osl_t *osh, struct sdio_func *func)
205 {
206 	sdioh_info_t *sd = NULL;
207 	int err_ret;
208 
209 	sd_trace(("%s\n", __FUNCTION__));
210 
211 	if (func == NULL) {
212 		sd_err(("%s: sdio function device is NULL\n", __FUNCTION__));
213 		return NULL;
214 	}
215 
216 	if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) {
217 		sd_err(("sdioh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh)));
218 		return NULL;
219 	}
220 	bzero((char *)sd, sizeof(sdioh_info_t));
221 	sd->osh = osh;
222 	sd->fake_func0.num = 0;
223 	sd->fake_func0.card = func->card;
224 	sd->func[0] = &sd->fake_func0;
225 	sd->func[1] = func->card->sdio_func[0];
226 	sd->func[2] = func->card->sdio_func[1];
227 
228 #if defined(BT_OVER_SDIO)
229 	sd->func[3] = NULL;
230 #endif /* defined (BT_OVER_SDIO) */
231 
232 
233 	sd->num_funcs = 2;
234 	sd->sd_blockmode = TRUE;
235 	sd->use_client_ints = TRUE;
236 	sd->client_block_size[0] = 64;
237 	sd->use_rxchain = CUSTOM_RXCHAIN;
238 	if (sd->func[1] == NULL || sd->func[2] == NULL) {
239 		sd_err(("%s: func 1 or 2 is null \n", __FUNCTION__));
240 		goto fail;
241 	}
242 	sdio_set_drvdata(sd->func[1], sd);
243 
244 	sdio_claim_host(sd->func[1]);
245 	sd->client_block_size[1] = sd_f1_blocksize;
246 	err_ret = sdio_set_block_size(sd->func[1], sd_f1_blocksize);
247 	sdio_release_host(sd->func[1]);
248 	if (err_ret) {
249 		sd_err(("bcmsdh_sdmmc: Failed to set F1 blocksize(%d)\n", err_ret));
250 		goto fail;
251 	}
252 
253 	sdio_claim_host(sd->func[2]);
254 	sd->client_block_size[2] = sd_f2_blocksize;
255 	err_ret = sdio_set_block_size(sd->func[2], sd_f2_blocksize);
256 	sdio_release_host(sd->func[2]);
257 	if (err_ret) {
258 		sd_err(("bcmsdh_sdmmc: Failed to set F2 blocksize to %d(%d)\n",
259 			sd_f2_blocksize, err_ret));
260 		goto fail;
261 	}
262 
263 	sd->sd_clk_rate = sdmmc_get_clock_rate(sd);
264 	DHD_ERROR(("%s: sd clock rate = %u\n", __FUNCTION__, sd->sd_clk_rate));
265 
266 	sdioh_sdmmc_card_enablefuncs(sd);
267 
268 	sd_trace(("%s: Done\n", __FUNCTION__));
269 	return sd;
270 
271 fail:
272 	MFREE(sd->osh, sd, sizeof(sdioh_info_t));
273 	return NULL;
274 }
275 
276 extern SDIOH_API_RC
sdioh_detach(osl_t * osh,sdioh_info_t * sd)277 sdioh_detach(osl_t *osh, sdioh_info_t *sd)
278 {
279 	sd_trace(("%s\n", __FUNCTION__));
280 
281 	if (sd) {
282 
283 		/* Disable Function 2 */
284 		if (sd->func[2]) {
285 			sdio_claim_host(sd->func[2]);
286 			sdio_disable_func(sd->func[2]);
287 			sdio_release_host(sd->func[2]);
288 		}
289 
290 		/* Disable Function 1 */
291 		if (sd->func[1]) {
292 			sdio_claim_host(sd->func[1]);
293 			sdio_disable_func(sd->func[1]);
294 			sdio_release_host(sd->func[1]);
295 		}
296 
297 		sd->func[1] = NULL;
298 		sd->func[2] = NULL;
299 
300 		MFREE(sd->osh, sd, sizeof(sdioh_info_t));
301 	}
302 	return SDIOH_API_RC_SUCCESS;
303 }
304 
305 #if defined(OOB_INTR_ONLY) && defined(HW_OOB)
306 
307 extern SDIOH_API_RC
sdioh_enable_func_intr(sdioh_info_t * sd)308 sdioh_enable_func_intr(sdioh_info_t *sd)
309 {
310 	uint8 reg;
311 	int err;
312 
313 	if (sd->func[0] == NULL) {
314 		sd_err(("%s: function 0 pointer is NULL\n", __FUNCTION__));
315 		return SDIOH_API_RC_FAIL;
316 	}
317 
318 	sdio_claim_host(sd->func[0]);
319 	reg = sdio_readb(sd->func[0], SDIOD_CCCR_INTEN, &err);
320 	if (err) {
321 		sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
322 		sdio_release_host(sd->func[0]);
323 		return SDIOH_API_RC_FAIL;
324 	}
325 	/* Enable F1 and F2 interrupts, clear master enable */
326 	reg &= ~INTR_CTL_MASTER_EN;
327 	reg |= (INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN);
328 #if defined(BT_OVER_SDIO)
329 	reg |= (INTR_CTL_FUNC3_EN);
330 #endif /* defined (BT_OVER_SDIO) */
331 	sdio_writeb(sd->func[0], reg, SDIOD_CCCR_INTEN, &err);
332 	sdio_release_host(sd->func[0]);
333 
334 	if (err) {
335 		sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
336 		return SDIOH_API_RC_FAIL;
337 	}
338 
339 	return SDIOH_API_RC_SUCCESS;
340 }
341 
342 extern SDIOH_API_RC
sdioh_disable_func_intr(sdioh_info_t * sd)343 sdioh_disable_func_intr(sdioh_info_t *sd)
344 {
345 	uint8 reg;
346 	int err;
347 
348 	if (sd->func[0] == NULL) {
349 		sd_err(("%s: function 0 pointer is NULL\n", __FUNCTION__));
350 		return SDIOH_API_RC_FAIL;
351 	}
352 
353 	sdio_claim_host(sd->func[0]);
354 	reg = sdio_readb(sd->func[0], SDIOD_CCCR_INTEN, &err);
355 	if (err) {
356 		sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
357 		sdio_release_host(sd->func[0]);
358 		return SDIOH_API_RC_FAIL;
359 	}
360 	reg &= ~(INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN);
361 #if defined(BT_OVER_SDIO)
362 	reg &= ~INTR_CTL_FUNC3_EN;
363 #endif // endif
364 	/* Disable master interrupt with the last function interrupt */
365 	if (!(reg & 0xFE))
366 		reg = 0;
367 	sdio_writeb(sd->func[0], reg, SDIOD_CCCR_INTEN, &err);
368 	sdio_release_host(sd->func[0]);
369 
370 	if (err) {
371 		sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
372 		return SDIOH_API_RC_FAIL;
373 	}
374 
375 	return SDIOH_API_RC_SUCCESS;
376 }
377 #endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */
378 
379 /* Configure callback to client when we recieve client interrupt */
380 extern SDIOH_API_RC
sdioh_interrupt_register(sdioh_info_t * sd,sdioh_cb_fn_t fn,void * argh)381 sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh)
382 {
383 	sd_trace(("%s: Entering\n", __FUNCTION__));
384 	if (fn == NULL) {
385 		sd_err(("%s: interrupt handler is NULL, not registering\n", __FUNCTION__));
386 		return SDIOH_API_RC_FAIL;
387 	}
388 #if !defined(OOB_INTR_ONLY)
389 	sd->intr_handler = fn;
390 	sd->intr_handler_arg = argh;
391 	sd->intr_handler_valid = TRUE;
392 
393 	/* register and unmask irq */
394 	if (sd->func[2]) {
395 		sdio_claim_host(sd->func[2]);
396 		sdio_claim_irq(sd->func[2], IRQHandlerF2);
397 		sdio_release_host(sd->func[2]);
398 	}
399 
400 	if (sd->func[1]) {
401 		sdio_claim_host(sd->func[1]);
402 		sdio_claim_irq(sd->func[1], IRQHandler);
403 		sdio_release_host(sd->func[1]);
404 	}
405 #elif defined(HW_OOB)
406 	sdioh_enable_func_intr(sd);
407 #endif /* !defined(OOB_INTR_ONLY) */
408 
409 	return SDIOH_API_RC_SUCCESS;
410 }
411 
412 extern SDIOH_API_RC
sdioh_interrupt_deregister(sdioh_info_t * sd)413 sdioh_interrupt_deregister(sdioh_info_t *sd)
414 {
415 	sd_trace(("%s: Entering\n", __FUNCTION__));
416 
417 #if !defined(OOB_INTR_ONLY)
418 	if (sd->func[1]) {
419 		/* register and unmask irq */
420 		sdio_claim_host(sd->func[1]);
421 		sdio_release_irq(sd->func[1]);
422 		sdio_release_host(sd->func[1]);
423 	}
424 
425 	if (sd->func[2]) {
426 		/* Claim host controller F2 */
427 		sdio_claim_host(sd->func[2]);
428 		sdio_release_irq(sd->func[2]);
429 		/* Release host controller F2 */
430 		sdio_release_host(sd->func[2]);
431 	}
432 
433 	sd->intr_handler_valid = FALSE;
434 	sd->intr_handler = NULL;
435 	sd->intr_handler_arg = NULL;
436 #elif defined(HW_OOB)
437 	sdioh_disable_func_intr(sd);
438 #endif /* !defined(OOB_INTR_ONLY) */
439 	return SDIOH_API_RC_SUCCESS;
440 }
441 
442 extern SDIOH_API_RC
sdioh_interrupt_query(sdioh_info_t * sd,bool * onoff)443 sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff)
444 {
445 	sd_trace(("%s: Entering\n", __FUNCTION__));
446 	*onoff = sd->client_intr_enabled;
447 	return SDIOH_API_RC_SUCCESS;
448 }
449 
450 #if defined(DHD_DEBUG)
451 extern bool
sdioh_interrupt_pending(sdioh_info_t * sd)452 sdioh_interrupt_pending(sdioh_info_t *sd)
453 {
454 	return (0);
455 }
456 #endif // endif
457 
458 uint
sdioh_query_iofnum(sdioh_info_t * sd)459 sdioh_query_iofnum(sdioh_info_t *sd)
460 {
461 	return sd->num_funcs;
462 }
463 
464 /* IOVar table */
465 enum {
466 	IOV_MSGLEVEL = 1,
467 	IOV_BLOCKMODE,
468 	IOV_BLOCKSIZE,
469 	IOV_DMA,
470 	IOV_USEINTS,
471 	IOV_NUMINTS,
472 	IOV_NUMLOCALINTS,
473 	IOV_HOSTREG,
474 	IOV_DEVREG,
475 	IOV_DIVISOR,
476 	IOV_SDMODE,
477 	IOV_HISPEED,
478 	IOV_HCIREGS,
479 	IOV_POWER,
480 	IOV_CLOCK,
481 	IOV_RXCHAIN
482 };
483 
484 const bcm_iovar_t sdioh_iovars[] = {
485 	{"sd_msglevel", IOV_MSGLEVEL,	0, 0,	IOVT_UINT32,	0 },
486 	{"sd_blockmode", IOV_BLOCKMODE, 0, 0,	IOVT_BOOL,	0 },
487 	{"sd_blocksize", IOV_BLOCKSIZE, 0, 0,	IOVT_UINT32,	0 }, /* ((fn << 16) | size) */
488 	{"sd_dma",	IOV_DMA,	0, 0,	IOVT_BOOL,	0 },
489 	{"sd_ints",	IOV_USEINTS,	0, 0,	IOVT_BOOL,	0 },
490 	{"sd_numints",	IOV_NUMINTS,	0, 0,	IOVT_UINT32,	0 },
491 	{"sd_numlocalints", IOV_NUMLOCALINTS, 0, 0, IOVT_UINT32,	0 },
492 	{"sd_divisor",	IOV_DIVISOR,	0, 0,	IOVT_UINT32,	0 },
493 	{"sd_power",	IOV_POWER,	0, 0,	IOVT_UINT32,	0 },
494 	{"sd_clock",	IOV_CLOCK,	0, 0,	IOVT_UINT32,	0 },
495 	{"sd_mode",	IOV_SDMODE,	0, 0,	IOVT_UINT32,	100},
496 	{"sd_highspeed", IOV_HISPEED,	0, 0,	IOVT_UINT32,	0 },
497 	{"sd_rxchain",  IOV_RXCHAIN,    0, 0, 	IOVT_BOOL,	0 },
498 	{NULL, 0, 0, 0, 0, 0 }
499 };
500 
501 int
sdioh_iovar_op(sdioh_info_t * si,const char * name,void * params,int plen,void * arg,int len,bool set)502 sdioh_iovar_op(sdioh_info_t *si, const char *name,
503                            void *params, int plen, void *arg, int len, bool set)
504 {
505 	const bcm_iovar_t *vi = NULL;
506 	int bcmerror = 0;
507 	int val_size;
508 	int32 int_val = 0;
509 	bool bool_val;
510 	uint32 actionid;
511 
512 	ASSERT(name);
513 	ASSERT(len >= 0);
514 
515 	/* Get must have return space; Set does not take qualifiers */
516 	ASSERT(set || (arg && len));
517 	ASSERT(!set || (!params && !plen));
518 
519 	sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name));
520 
521 	if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) {
522 		bcmerror = BCME_UNSUPPORTED;
523 		goto exit;
524 	}
525 
526 	if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0)
527 		goto exit;
528 
529 	/* Set up params so get and set can share the convenience variables */
530 	if (params == NULL) {
531 		params = arg;
532 		plen = len;
533 	}
534 
535 	if (vi->type == IOVT_VOID)
536 		val_size = 0;
537 	else if (vi->type == IOVT_BUFFER)
538 		val_size = len;
539 	else
540 		val_size = sizeof(int);
541 
542 	if (plen >= (int)sizeof(int_val))
543 		bcopy(params, &int_val, sizeof(int_val));
544 
545 	bool_val = (int_val != 0) ? TRUE : FALSE;
546 	BCM_REFERENCE(bool_val);
547 
548 	actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
549 	switch (actionid) {
550 	case IOV_GVAL(IOV_MSGLEVEL):
551 		int_val = (int32)sd_msglevel;
552 		bcopy(&int_val, arg, val_size);
553 		break;
554 
555 	case IOV_SVAL(IOV_MSGLEVEL):
556 		sd_msglevel = int_val;
557 		break;
558 
559 	case IOV_GVAL(IOV_BLOCKMODE):
560 		int_val = (int32)si->sd_blockmode;
561 		bcopy(&int_val, arg, val_size);
562 		break;
563 
564 	case IOV_SVAL(IOV_BLOCKMODE):
565 		si->sd_blockmode = (bool)int_val;
566 		/* Haven't figured out how to make non-block mode with DMA */
567 		break;
568 
569 	case IOV_GVAL(IOV_BLOCKSIZE):
570 		if ((uint32)int_val > si->num_funcs) {
571 			bcmerror = BCME_BADARG;
572 			break;
573 		}
574 		int_val = (int32)si->client_block_size[int_val];
575 		bcopy(&int_val, arg, val_size);
576 		break;
577 
578 	case IOV_SVAL(IOV_BLOCKSIZE):
579 	{
580 		uint func = ((uint32)int_val >> 16);
581 		uint blksize = (uint16)int_val;
582 		uint maxsize;
583 
584 		if (func > si->num_funcs) {
585 			bcmerror = BCME_BADARG;
586 			break;
587 		}
588 
589 		switch (func) {
590 		case 0: maxsize = 32; break;
591 		case 1: maxsize = BLOCK_SIZE_4318; break;
592 		case 2: maxsize = BLOCK_SIZE_4328; break;
593 		default: maxsize = 0;
594 		}
595 		if (blksize > maxsize) {
596 			bcmerror = BCME_BADARG;
597 			break;
598 		}
599 		if (!blksize) {
600 			blksize = maxsize;
601 		}
602 
603 		/* Now set it */
604 		si->client_block_size[func] = blksize;
605 
606 #ifdef USE_DYNAMIC_F2_BLKSIZE
607 		if (si->func[func] == NULL) {
608 			sd_err(("%s: SDIO Device not present\n", __FUNCTION__));
609 			bcmerror = BCME_NORESOURCE;
610 			break;
611 		}
612 		sdio_claim_host(si->func[func]);
613 		bcmerror = sdio_set_block_size(si->func[func], blksize);
614 		if (bcmerror)
615 			sd_err(("%s: Failed to set F%d blocksize to %d(%d)\n",
616 				__FUNCTION__, func, blksize, bcmerror));
617 		sdio_release_host(si->func[func]);
618 #endif /* USE_DYNAMIC_F2_BLKSIZE */
619 		break;
620 	}
621 
622 	case IOV_GVAL(IOV_RXCHAIN):
623 		int_val = (int32)si->use_rxchain;
624 		bcopy(&int_val, arg, val_size);
625 		break;
626 
627 	case IOV_GVAL(IOV_DMA):
628 		int_val = (int32)si->sd_use_dma;
629 		bcopy(&int_val, arg, val_size);
630 		break;
631 
632 	case IOV_SVAL(IOV_DMA):
633 		si->sd_use_dma = (bool)int_val;
634 		break;
635 
636 	case IOV_GVAL(IOV_USEINTS):
637 		int_val = (int32)si->use_client_ints;
638 		bcopy(&int_val, arg, val_size);
639 		break;
640 
641 	case IOV_SVAL(IOV_USEINTS):
642 		si->use_client_ints = (bool)int_val;
643 		if (si->use_client_ints)
644 			si->intmask |= CLIENT_INTR;
645 		else
646 			si->intmask &= ~CLIENT_INTR;
647 
648 		break;
649 
650 	case IOV_GVAL(IOV_DIVISOR):
651 		int_val = (uint32)sd_divisor;
652 		bcopy(&int_val, arg, val_size);
653 		break;
654 
655 	case IOV_SVAL(IOV_DIVISOR):
656 		/* set the clock to divisor, if value is non-zero & power of 2 */
657 		if (int_val && !(int_val & (int_val - 1))) {
658 			sd_divisor = int_val;
659 			sdmmc_set_clock_divisor(si, sd_divisor);
660 		} else {
661 			DHD_ERROR(("%s: Invalid sd_divisor value, should be power of 2!\n",
662 				__FUNCTION__));
663 		}
664 		break;
665 
666 	case IOV_GVAL(IOV_POWER):
667 		int_val = (uint32)sd_power;
668 		bcopy(&int_val, arg, val_size);
669 		break;
670 
671 	case IOV_SVAL(IOV_POWER):
672 		sd_power = int_val;
673 		break;
674 
675 	case IOV_GVAL(IOV_CLOCK):
676 		int_val = (uint32)sd_clock;
677 		bcopy(&int_val, arg, val_size);
678 		break;
679 
680 	case IOV_SVAL(IOV_CLOCK):
681 		sd_clock = int_val;
682 		break;
683 
684 	case IOV_GVAL(IOV_SDMODE):
685 		int_val = (uint32)sd_sdmode;
686 		bcopy(&int_val, arg, val_size);
687 		break;
688 
689 	case IOV_SVAL(IOV_SDMODE):
690 		sd_sdmode = int_val;
691 		break;
692 
693 	case IOV_GVAL(IOV_HISPEED):
694 		int_val = (uint32)sd_hiok;
695 		bcopy(&int_val, arg, val_size);
696 		break;
697 
698 	case IOV_SVAL(IOV_HISPEED):
699 		sd_hiok = int_val;
700 		break;
701 
702 	case IOV_GVAL(IOV_NUMINTS):
703 		int_val = (int32)si->intrcount;
704 		bcopy(&int_val, arg, val_size);
705 		break;
706 
707 	case IOV_GVAL(IOV_NUMLOCALINTS):
708 		int_val = (int32)0;
709 		bcopy(&int_val, arg, val_size);
710 		break;
711 	default:
712 		bcmerror = BCME_UNSUPPORTED;
713 		break;
714 	}
715 exit:
716 
717 	return bcmerror;
718 }
719 
720 #if defined(OOB_INTR_ONLY) && defined(HW_OOB)
721 
722 SDIOH_API_RC
sdioh_enable_hw_oob_intr(sdioh_info_t * sd,bool enable)723 sdioh_enable_hw_oob_intr(sdioh_info_t *sd, bool enable)
724 {
725 	SDIOH_API_RC status;
726 	uint8 data;
727 
728 	if (enable)
729 		data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE | SDIO_SEPINT_ACT_HI;
730 	else
731 		data = SDIO_SEPINT_ACT_HI;	/* disable hw oob interrupt */
732 
733 	status = sdioh_request_byte(sd, SDIOH_WRITE, 0, SDIOD_CCCR_BRCM_SEPINT, &data);
734 	return status;
735 }
736 #endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */
737 
738 extern SDIOH_API_RC
sdioh_cfg_read(sdioh_info_t * sd,uint fnc_num,uint32 addr,uint8 * data)739 sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
740 {
741 	SDIOH_API_RC status;
742 	/* No lock needed since sdioh_request_byte does locking */
743 	status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data);
744 	return status;
745 }
746 
747 extern SDIOH_API_RC
sdioh_cfg_write(sdioh_info_t * sd,uint fnc_num,uint32 addr,uint8 * data)748 sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
749 {
750 	/* No lock needed since sdioh_request_byte does locking */
751 	SDIOH_API_RC status;
752 	status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data);
753 	return status;
754 }
755 
756 static int
sdioh_sdmmc_get_cisaddr(sdioh_info_t * sd,uint32 regaddr)757 sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr)
758 {
759 	/* read 24 bits and return valid 17 bit addr */
760 	int i;
761 	uint32 scratch, regdata;
762 	uint8 *ptr = (uint8 *)&scratch;
763 	for (i = 0; i < 3; i++) {
764 		if ((sdioh_sdmmc_card_regread (sd, 0, regaddr, 1, &regdata)) != SUCCESS)
765 			sd_err(("%s: Can't read!\n", __FUNCTION__));
766 
767 		*ptr++ = (uint8) regdata;
768 		regaddr++;
769 	}
770 
771 	/* Only the lower 17-bits are valid */
772 	scratch = ltoh32(scratch);
773 	scratch &= 0x0001FFFF;
774 	return (scratch);
775 }
776 
777 extern SDIOH_API_RC
sdioh_cis_read(sdioh_info_t * sd,uint func,uint8 * cisd,uint32 length)778 sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length)
779 {
780 	uint32 count;
781 	int offset;
782 	uint32 foo;
783 	uint8 *cis = cisd;
784 
785 	sd_trace(("%s: Func = %d\n", __FUNCTION__, func));
786 
787 	if (!sd->func_cis_ptr[func]) {
788 		bzero(cis, length);
789 		sd_err(("%s: no func_cis_ptr[%d]\n", __FUNCTION__, func));
790 		return SDIOH_API_RC_FAIL;
791 	}
792 
793 	sd_err(("%s: func_cis_ptr[%d]=0x%04x\n", __FUNCTION__, func, sd->func_cis_ptr[func]));
794 
795 	for (count = 0; count < length; count++) {
796 		offset =  sd->func_cis_ptr[func] + count;
797 		if (sdioh_sdmmc_card_regread (sd, 0, offset, 1, &foo) < 0) {
798 			sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__));
799 			return SDIOH_API_RC_FAIL;
800 		}
801 
802 		*cis = (uint8)(foo & 0xff);
803 		cis++;
804 	}
805 
806 	return SDIOH_API_RC_SUCCESS;
807 }
808 
809 extern SDIOH_API_RC
sdioh_request_byte(sdioh_info_t * sd,uint rw,uint func,uint regaddr,uint8 * byte)810 sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte)
811 {
812 	int err_ret = 0;
813 #if defined(MMC_SDIO_ABORT)
814 	int sdio_abort_retry = MMC_SDIO_ABORT_RETRY_LIMIT;
815 #endif // endif
816 
817 	sd_info(("%s: rw=%d, func=%d, addr=0x%05x\n", __FUNCTION__, rw, func, regaddr));
818 
819 	DHD_PM_RESUME_WAIT(sdioh_request_byte_wait);
820 	DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
821 	if(rw) { /* CMD52 Write */
822 		if (func == 0) {
823 			/* Can only directly write to some F0 registers.  Handle F2 enable
824 			 * as a special case.
825 			 */
826 			if (regaddr == SDIOD_CCCR_IOEN) {
827 #if defined(BT_OVER_SDIO)
828 				do {
829 				if (sd->func[3]) {
830 					sd_info(("bcmsdh_sdmmc F3: *byte 0x%x\n", *byte));
831 
832 					if (*byte & SDIO_FUNC_ENABLE_3) {
833 						sdio_claim_host(sd->func[3]);
834 
835 						/* Set Function 3 Block Size */
836 						err_ret = sdio_set_block_size(sd->func[3],
837 						sd_f3_blocksize);
838 						if (err_ret) {
839 							sd_err(("F3 blocksize set err%d\n",
840 								err_ret));
841 						}
842 
843 						/* Enable Function 3 */
844 						sd_info(("bcmsdh_sdmmc F3: enable F3 fn %p\n",
845 						sd->func[3]));
846 						err_ret = sdio_enable_func(sd->func[3]);
847 						if (err_ret) {
848 							sd_err(("bcmsdh_sdmmc: enable F3 err:%d\n",
849 								err_ret));
850 						}
851 
852 						sdio_release_host(sd->func[3]);
853 
854 						break;
855 					} else if (*byte & SDIO_FUNC_DISABLE_3) {
856 						sdio_claim_host(sd->func[3]);
857 
858 						/* Disable Function 3 */
859 						sd_info(("bcmsdh_sdmmc F3: disable F3 fn %p\n",
860 						sd->func[3]));
861 						err_ret = sdio_disable_func(sd->func[3]);
862 						if (err_ret) {
863 							sd_err(("bcmsdh_sdmmc: Disable F3 err:%d\n",
864 								err_ret));
865 						}
866 						sdio_release_host(sd->func[3]);
867 						sd->func[3] = NULL;
868 
869 						break;
870 					}
871 				}
872 #endif /* defined (BT_OVER_SDIO) */
873 				if (sd->func[2]) {
874 					sdio_claim_host(sd->func[2]);
875 					if (*byte & SDIO_FUNC_ENABLE_2) {
876 						/* Enable Function 2 */
877 						err_ret = sdio_enable_func(sd->func[2]);
878 						if (err_ret) {
879 							sd_err(("bcmsdh_sdmmc: enable F2 failed:%d",
880 								err_ret));
881 						}
882 					} else {
883 						/* Disable Function 2 */
884 						err_ret = sdio_disable_func(sd->func[2]);
885 						if (err_ret) {
886 							sd_err(("bcmsdh_sdmmc: Disab F2 failed:%d",
887 								err_ret));
888 						}
889 					}
890 					sdio_release_host(sd->func[2]);
891 				}
892 #if defined(BT_OVER_SDIO)
893 			} while (0);
894 #endif /* defined (BT_OVER_SDIO) */
895 		}
896 #if defined(MMC_SDIO_ABORT)
897 			/* to allow abort command through F1 */
898 			else if (regaddr == SDIOD_CCCR_IOABORT) {
899 				while (sdio_abort_retry--) {
900 					if (sd->func[func]) {
901 						sdio_claim_host(sd->func[func]);
902 						/*
903 						 * this sdio_f0_writeb() can be replaced with
904 						 * another api depending upon MMC driver change.
905 						 * As of this time, this is temporaray one
906 						 */
907 						sdio_writeb(sd->func[func],
908 							*byte, regaddr, &err_ret);
909 						sdio_release_host(sd->func[func]);
910 					}
911 					if (!err_ret)
912 						break;
913 				}
914 			}
915 #endif /* MMC_SDIO_ABORT */
916 			else if (regaddr < 0xF0) {
917 				sd_err(("bcmsdh_sdmmc: F0 Wr:0x%02x: write disallowed\n", regaddr));
918 			} else {
919 				/* Claim host controller, perform F0 write, and release */
920 				if (sd->func[func]) {
921 					sdio_claim_host(sd->func[func]);
922 					sdio_f0_writeb(sd->func[func],
923 						*byte, regaddr, &err_ret);
924 					sdio_release_host(sd->func[func]);
925 				}
926 			}
927 		} else {
928 			/* Claim host controller, perform Fn write, and release */
929 			if (sd->func[func]) {
930 				sdio_claim_host(sd->func[func]);
931 				sdio_writeb(sd->func[func], *byte, regaddr, &err_ret);
932 				sdio_release_host(sd->func[func]);
933 			}
934 		}
935 	} else { /* CMD52 Read */
936 		/* Claim host controller, perform Fn read, and release */
937 		if (sd->func[func]) {
938 			sdio_claim_host(sd->func[func]);
939 			if (func == 0) {
940 				*byte = sdio_f0_readb(sd->func[func], regaddr, &err_ret);
941 			} else {
942 				*byte = sdio_readb(sd->func[func], regaddr, &err_ret);
943 			}
944 			sdio_release_host(sd->func[func]);
945 		}
946 	}
947 
948 	if (err_ret) {
949 		if ((regaddr == 0x1001F) && ((err_ret == -ETIMEDOUT) || (err_ret == -EILSEQ))) {
950 		} else {
951 			sd_err(("bcmsdh_sdmmc: Failed to %s byte F%d:@0x%05x=%02x, Err: %d\n",
952 				rw ? "Write" : "Read", func, regaddr, *byte, err_ret));
953 		}
954 	}
955 
956 	return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
957 }
958 
959 extern SDIOH_API_RC
sdioh_request_word(sdioh_info_t * sd,uint cmd_type,uint rw,uint func,uint addr,uint32 * word,uint nbytes)960 sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr,
961                                    uint32 *word, uint nbytes)
962 {
963 	int err_ret = SDIOH_API_RC_FAIL;
964 #if defined(MMC_SDIO_ABORT)
965 	int sdio_abort_retry = MMC_SDIO_ABORT_RETRY_LIMIT;
966 #endif // endif
967 
968 	if (func == 0) {
969 		sd_err(("%s: Only CMD52 allowed to F0.\n", __FUNCTION__));
970 		return SDIOH_API_RC_FAIL;
971 	}
972 
973 	sd_info(("%s: cmd_type=%d, rw=%d, func=%d, addr=0x%05x, nbytes=%d\n",
974 	         __FUNCTION__, cmd_type, rw, func, addr, nbytes));
975 
976 	DHD_PM_RESUME_WAIT(sdioh_request_word_wait);
977 	DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
978 	/* Claim host controller */
979 	sdio_claim_host(sd->func[func]);
980 
981 	if(rw) { /* CMD52 Write */
982 		if (nbytes == 4) {
983 			sdio_writel(sd->func[func], *word, addr, &err_ret);
984 		} else if (nbytes == 2) {
985 			sdio_writew(sd->func[func], (*word & 0xFFFF), addr, &err_ret);
986 		} else {
987 			sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes));
988 		}
989 	} else { /* CMD52 Read */
990 		if (nbytes == 4) {
991 			*word = sdio_readl(sd->func[func], addr, &err_ret);
992 		} else if (nbytes == 2) {
993 			*word = sdio_readw(sd->func[func], addr, &err_ret) & 0xFFFF;
994 		} else {
995 			sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes));
996 		}
997 	}
998 
999 	/* Release host controller */
1000 	sdio_release_host(sd->func[func]);
1001 
1002 	if (err_ret) {
1003 #if defined(MMC_SDIO_ABORT)
1004 		/* Any error on CMD53 transaction should abort that function using function 0. */
1005 		while (sdio_abort_retry--) {
1006 			if (sd->func[0]) {
1007 				sdio_claim_host(sd->func[0]);
1008 				/*
1009 				 * this sdio_f0_writeb() can be replaced with another api
1010 				 * depending upon MMC driver change.
1011 				 * As of this time, this is temporaray one
1012 				 */
1013 				sdio_writeb(sd->func[0],
1014 					func, SDIOD_CCCR_IOABORT, &err_ret);
1015 				sdio_release_host(sd->func[0]);
1016 			}
1017 			if (!err_ret)
1018 				break;
1019 		}
1020 		if (err_ret)
1021 #endif /* MMC_SDIO_ABORT */
1022 		{
1023 			sd_err(("bcmsdh_sdmmc: Failed to %s word, Err: 0x%08x",
1024 				rw ? "Write" : "Read", err_ret));
1025 		}
1026 	}
1027 
1028 	return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
1029 }
1030 
1031 #ifdef BCMSDIOH_TXGLOM
1032 static SDIOH_API_RC
sdioh_request_packet_chain(sdioh_info_t * sd,uint fix_inc,uint write,uint func,uint addr,void * pkt)1033 sdioh_request_packet_chain(sdioh_info_t *sd, uint fix_inc, uint write, uint func,
1034                      uint addr, void *pkt)
1035 {
1036 	bool fifo = (fix_inc == SDIOH_DATA_FIX);
1037 	int err_ret = 0;
1038 	void *pnext;
1039 	uint ttl_len, pkt_offset;
1040 	uint blk_num;
1041 	uint blk_size;
1042 	uint max_blk_count;
1043 	uint max_req_size;
1044 	struct mmc_request mmc_req;
1045 	struct mmc_command mmc_cmd;
1046 	struct mmc_data mmc_dat;
1047 	uint32 sg_count;
1048 	struct sdio_func *sdio_func = sd->func[func];
1049 	struct mmc_host *host = sdio_func->card->host;
1050 
1051 	sd_trace(("%s: Enter\n", __FUNCTION__));
1052 	ASSERT(pkt);
1053 	DHD_PM_RESUME_WAIT(sdioh_request_packet_wait);
1054 	DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
1055 
1056 	blk_size = sd->client_block_size[func];
1057 	max_blk_count = min(host->max_blk_count, (uint)MAX_IO_RW_EXTENDED_BLK);
1058 	max_req_size = min(max_blk_count * blk_size, host->max_req_size);
1059 
1060 	pkt_offset = 0;
1061 	pnext = pkt;
1062 
1063 	while (pnext != NULL) {
1064 		ttl_len = 0;
1065 		sg_count = 0;
1066 		memset(&mmc_req, 0, sizeof(struct mmc_request));
1067 		memset(&mmc_cmd, 0, sizeof(struct mmc_command));
1068 		memset(&mmc_dat, 0, sizeof(struct mmc_data));
1069 		sg_init_table(sd->sg_list, ARRAYSIZE(sd->sg_list));
1070 
1071 		/* Set up scatter-gather DMA descriptors. this loop is to find out the max
1072 		 * data we can transfer with one command 53. blocks per command is limited by
1073 		 * host max_req_size and 9-bit max block number. when the total length of this
1074 		 * packet chain is bigger than max_req_size, use multiple SD_IO_RW_EXTENDED
1075 		 * commands (each transfer is still block aligned)
1076 		 */
1077 		while (pnext != NULL && ttl_len < max_req_size) {
1078 			int pkt_len;
1079 			int sg_data_size;
1080 			uint8 *pdata = (uint8*)PKTDATA(sd->osh, pnext);
1081 
1082 			ASSERT(pdata != NULL);
1083 			pkt_len = PKTLEN(sd->osh, pnext);
1084 			sd_trace(("%s[%d] data=%p, len=%d\n", __FUNCTION__, write, pdata, pkt_len));
1085 			/* sg_count is unlikely larger than the array size, and this is
1086 			 * NOT something we can handle here, but in case it happens, PLEASE put
1087 			 * a restriction on max tx/glom count (based on host->max_segs).
1088 			 */
1089 			if (sg_count >= ARRAYSIZE(sd->sg_list)) {
1090 				sd_err(("%s: sg list entries(%u) exceed limit(%u),"
1091 					" sd blk_size=%u\n",
1092 					__FUNCTION__, sg_count, ARRAYSIZE(sd->sg_list), blk_size));
1093 				return (SDIOH_API_RC_FAIL);
1094 			}
1095 			pdata += pkt_offset;
1096 
1097 			sg_data_size = pkt_len - pkt_offset;
1098 			if (sg_data_size > max_req_size - ttl_len)
1099 				sg_data_size = max_req_size - ttl_len;
1100 			/* some platforms put a restriction on the data size of each scatter-gather
1101 			 * DMA descriptor, use multiple sg buffers when xfer_size is bigger than
1102 			 * max_seg_size
1103 			 */
1104 			if (sg_data_size > host->max_seg_size) {
1105 				sg_data_size = host->max_seg_size;
1106 			}
1107 			sg_set_buf(&sd->sg_list[sg_count++], pdata, sg_data_size);
1108 
1109 			ttl_len += sg_data_size;
1110 			pkt_offset += sg_data_size;
1111 			if (pkt_offset == pkt_len) {
1112 				pnext = PKTNEXT(sd->osh, pnext);
1113 				pkt_offset = 0;
1114 			}
1115 		}
1116 
1117 		if (ttl_len % blk_size != 0) {
1118 			sd_err(("%s, data length %d not aligned to block size %d\n",
1119 				__FUNCTION__,  ttl_len, blk_size));
1120 			return SDIOH_API_RC_FAIL;
1121 		}
1122 		blk_num = ttl_len / blk_size;
1123 		mmc_dat.sg = sd->sg_list;
1124 		mmc_dat.sg_len = sg_count;
1125 		mmc_dat.blksz = blk_size;
1126 		mmc_dat.blocks = blk_num;
1127 		mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
1128 		mmc_cmd.opcode = 53; /* SD_IO_RW_EXTENDED */
1129 		mmc_cmd.arg = write ? 1<<31 : 0;
1130 		mmc_cmd.arg |= (func & 0x7) << 28;
1131 		mmc_cmd.arg |= 1<<27;
1132 		mmc_cmd.arg |= fifo ? 0 : 1<<26;
1133 		mmc_cmd.arg |= (addr & 0x1FFFF) << 9;
1134 		mmc_cmd.arg |= blk_num & 0x1FF;
1135 		mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
1136 		mmc_req.cmd = &mmc_cmd;
1137 		mmc_req.data = &mmc_dat;
1138 		if (!fifo)
1139 			addr += ttl_len;
1140 
1141 		sdio_claim_host(sdio_func);
1142 		mmc_set_data_timeout(&mmc_dat, sdio_func->card);
1143 		mmc_wait_for_req(host, &mmc_req);
1144 		sdio_release_host(sdio_func);
1145 
1146 		err_ret = mmc_cmd.error? mmc_cmd.error : mmc_dat.error;
1147 		if (0 != err_ret) {
1148 			sd_err(("%s:CMD53 %s failed with code %d\n",
1149 				__FUNCTION__, write ? "write" : "read", err_ret));
1150 			return SDIOH_API_RC_FAIL;
1151 		}
1152 	}
1153 
1154 	sd_trace(("%s: Exit\n", __FUNCTION__));
1155 	return SDIOH_API_RC_SUCCESS;
1156 }
1157 #endif /* BCMSDIOH_TXGLOM */
1158 
1159 static SDIOH_API_RC
sdioh_buffer_tofrom_bus(sdioh_info_t * sd,uint fix_inc,uint write,uint func,uint addr,uint8 * buf,uint len)1160 sdioh_buffer_tofrom_bus(sdioh_info_t *sd, uint fix_inc, uint write, uint func,
1161                      uint addr, uint8 *buf, uint len)
1162 {
1163 	bool fifo = (fix_inc == SDIOH_DATA_FIX);
1164 	int err_ret = 0;
1165 
1166 	sd_trace(("%s: Enter\n", __FUNCTION__));
1167 	ASSERT(buf);
1168 
1169 	/* NOTE:
1170 	 * For all writes, each packet length is aligned to 32 (or 4)
1171 	 * bytes in dhdsdio_txpkt_preprocess, and for glom the last packet length
1172 	 * is aligned to block boundary. If you want to align each packet to
1173 	 * a custom size, please do it in dhdsdio_txpkt_preprocess, NOT here
1174 	 *
1175 	 * For reads, the alignment is doen in sdioh_request_buffer.
1176 	 *
1177 	 */
1178 	sdio_claim_host(sd->func[func]);
1179 
1180 	if ((write) && (!fifo))
1181 		err_ret = sdio_memcpy_toio(sd->func[func], addr, buf, len);
1182 	else if (write)
1183 		err_ret = sdio_memcpy_toio(sd->func[func], addr, buf, len);
1184 	else if (fifo)
1185 		err_ret = sdio_readsb(sd->func[func], buf, addr, len);
1186 	else
1187 		err_ret = sdio_memcpy_fromio(sd->func[func], buf, addr, len);
1188 
1189 	sdio_release_host(sd->func[func]);
1190 
1191 	if (err_ret)
1192 		sd_err(("%s: %s FAILED %p, addr=0x%05x, pkt_len=%d, ERR=%d\n", __FUNCTION__,
1193 		       (write) ? "TX" : "RX", buf, addr, len, err_ret));
1194 	else
1195 		sd_trace(("%s: %s xfr'd %p, addr=0x%05x, len=%d\n", __FUNCTION__,
1196 			(write) ? "TX" : "RX", buf, addr, len));
1197 
1198 	sd_trace(("%s: Exit\n", __FUNCTION__));
1199 	return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
1200 }
1201 
1202 /*
1203  * This function takes a buffer or packet, and fixes everything up so that in the
1204  * end, a DMA-able packet is created.
1205  *
1206  * A buffer does not have an associated packet pointer, and may or may not be aligned.
1207  * A packet may consist of a single packet, or a packet chain.  If it is a packet chain,
1208  * then all the packets in the chain must be properly aligned.  If the packet data is not
1209  * aligned, then there may only be one packet, and in this case, it is copied to a new
1210  * aligned packet.
1211  *
1212  */
1213 extern SDIOH_API_RC
sdioh_request_buffer(sdioh_info_t * sd,uint pio_dma,uint fix_inc,uint write,uint func,uint addr,uint reg_width,uint buf_len,uint8 * buffer,void * pkt)1214 sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint write, uint func,
1215 	uint addr, uint reg_width, uint buf_len, uint8 *buffer, void *pkt)
1216 {
1217 	SDIOH_API_RC status;
1218 	void *tmppkt;
1219 
1220 	sd_trace(("%s: Enter\n", __FUNCTION__));
1221 	DHD_PM_RESUME_WAIT(sdioh_request_buffer_wait);
1222 	DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
1223 
1224 	if (pkt) {
1225 #ifdef BCMSDIOH_TXGLOM
1226 		/* packet chain, only used for tx/rx glom, all packets length
1227 		 * are aligned, total length is a block multiple
1228 		 */
1229 		if (PKTNEXT(sd->osh, pkt))
1230 			return sdioh_request_packet_chain(sd, fix_inc, write, func, addr, pkt);
1231 #endif /* BCMSDIOH_TXGLOM */
1232 		/* non-glom mode, ignore the buffer parameter and use the packet pointer
1233 		 * (this shouldn't happen)
1234 		 */
1235 		buffer = PKTDATA(sd->osh, pkt);
1236 		buf_len = PKTLEN(sd->osh, pkt);
1237 	}
1238 
1239 	ASSERT(buffer);
1240 
1241 	/* buffer and length are aligned, use it directly so we can avoid memory copy */
1242 	if (((ulong)buffer & (ARCH_DMA_MINALIGN - 1)) == 0 && (buf_len & DMA_ALIGN_MASK) == 0)
1243 		return sdioh_buffer_tofrom_bus(sd, fix_inc, write, func, addr, buffer, buf_len);
1244 
1245 	sd_trace(("%s: [%d] doing memory copy buf=%p, len=%d\n",
1246 		__FUNCTION__, write, buffer, buf_len));
1247 
1248 	/* otherwise, a memory copy is needed as the input buffer is not aligned */
1249 	tmppkt = PKTGET_STATIC(sd->osh, buf_len + DEFAULT_SDIO_F2_BLKSIZE, write ? TRUE : FALSE);
1250 	if (tmppkt == NULL) {
1251 		sd_err(("%s: PKTGET failed: len %d\n", __FUNCTION__, buf_len));
1252 		return SDIOH_API_RC_FAIL;
1253 	}
1254 
1255 	if (write)
1256 		bcopy(buffer, PKTDATA(sd->osh, tmppkt), buf_len);
1257 
1258 	status = sdioh_buffer_tofrom_bus(sd, fix_inc, write, func, addr,
1259 		PKTDATA(sd->osh, tmppkt), ROUNDUP(buf_len, (DMA_ALIGN_MASK+1)));
1260 
1261 	if (!write)
1262 		bcopy(PKTDATA(sd->osh, tmppkt), buffer, buf_len);
1263 
1264 	PKTFREE_STATIC(sd->osh, tmppkt, write ? TRUE : FALSE);
1265 
1266 	return status;
1267 }
1268 
1269 /* this function performs "abort" for both of host & device */
1270 extern int
sdioh_abort(sdioh_info_t * sd,uint func)1271 sdioh_abort(sdioh_info_t *sd, uint func)
1272 {
1273 #if defined(MMC_SDIO_ABORT)
1274 	char t_func = (char) func;
1275 #endif /* defined(MMC_SDIO_ABORT) */
1276 	sd_trace(("%s: Enter\n", __FUNCTION__));
1277 
1278 #if defined(MMC_SDIO_ABORT)
1279 	/* issue abort cmd52 command through F1 */
1280 	sdioh_request_byte(sd, SD_IO_OP_WRITE, SDIO_FUNC_0, SDIOD_CCCR_IOABORT, &t_func);
1281 #endif /* defined(MMC_SDIO_ABORT) */
1282 
1283 	sd_trace(("%s: Exit\n", __FUNCTION__));
1284 	return SDIOH_API_RC_SUCCESS;
1285 }
1286 
1287 /* Reset and re-initialize the device */
sdioh_sdio_reset(sdioh_info_t * si)1288 int sdioh_sdio_reset(sdioh_info_t *si)
1289 {
1290 	sd_trace(("%s: Enter\n", __FUNCTION__));
1291 	sd_trace(("%s: Exit\n", __FUNCTION__));
1292 	return SDIOH_API_RC_SUCCESS;
1293 }
1294 
1295 /* Disable device interrupt */
1296 void
sdioh_sdmmc_devintr_off(sdioh_info_t * sd)1297 sdioh_sdmmc_devintr_off(sdioh_info_t *sd)
1298 {
1299 	sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
1300 	sd->intmask &= ~CLIENT_INTR;
1301 }
1302 
1303 /* Enable device interrupt */
1304 void
sdioh_sdmmc_devintr_on(sdioh_info_t * sd)1305 sdioh_sdmmc_devintr_on(sdioh_info_t *sd)
1306 {
1307 	sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
1308 	sd->intmask |= CLIENT_INTR;
1309 }
1310 
1311 /* Read client card reg */
1312 int
sdioh_sdmmc_card_regread(sdioh_info_t * sd,int func,uint32 regaddr,int regsize,uint32 * data)1313 sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data)
1314 {
1315 
1316 	if ((func == 0) || (regsize == 1)) {
1317 		uint8 temp = 0;
1318 
1319 		sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp);
1320 		*data = temp;
1321 		*data &= 0xff;
1322 		sd_data(("%s: byte read data=0x%02x\n",
1323 		         __FUNCTION__, *data));
1324 	} else {
1325 		if (sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, data, regsize)) {
1326 			return BCME_SDIO_ERROR;
1327 		}
1328 		if (regsize == 2)
1329 			*data &= 0xffff;
1330 
1331 		sd_data(("%s: word read data=0x%08x\n",
1332 		         __FUNCTION__, *data));
1333 	}
1334 
1335 	return SUCCESS;
1336 }
1337 
1338 #if !defined(OOB_INTR_ONLY)
1339 /* bcmsdh_sdmmc interrupt handler */
IRQHandler(struct sdio_func * func)1340 static void IRQHandler(struct sdio_func *func)
1341 {
1342 	sdioh_info_t *sd;
1343 
1344 	sd = sdio_get_drvdata(func);
1345 
1346 	ASSERT(sd != NULL);
1347 	sdio_release_host(sd->func[0]);
1348 
1349 	if (sd->use_client_ints) {
1350 		sd->intrcount++;
1351 		ASSERT(sd->intr_handler);
1352 		ASSERT(sd->intr_handler_arg);
1353 		(sd->intr_handler)(sd->intr_handler_arg);
1354 	} else {
1355 		sd_err(("bcmsdh_sdmmc: ***IRQHandler\n"));
1356 
1357 		sd_err(("%s: Not ready for intr: enabled %d, handler %p\n",
1358 		        __FUNCTION__, sd->client_intr_enabled, sd->intr_handler));
1359 	}
1360 
1361 	sdio_claim_host(sd->func[0]);
1362 }
1363 
1364 /* bcmsdh_sdmmc interrupt handler for F2 (dummy handler) */
IRQHandlerF2(struct sdio_func * func)1365 static void IRQHandlerF2(struct sdio_func *func)
1366 {
1367 	sd_trace(("bcmsdh_sdmmc: ***IRQHandlerF2\n"));
1368 }
1369 #endif /* !defined(OOB_INTR_ONLY) */
1370 
1371 #ifdef NOTUSED
1372 /* Write client card reg */
1373 static int
sdioh_sdmmc_card_regwrite(sdioh_info_t * sd,int func,uint32 regaddr,int regsize,uint32 data)1374 sdioh_sdmmc_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data)
1375 {
1376 
1377 	if ((func == 0) || (regsize == 1)) {
1378 		uint8 temp;
1379 
1380 		temp = data & 0xff;
1381 		sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp);
1382 		sd_data(("%s: byte write data=0x%02x\n",
1383 		         __FUNCTION__, data));
1384 	} else {
1385 		if (regsize == 2)
1386 			data &= 0xffff;
1387 
1388 		sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, &data, regsize);
1389 
1390 		sd_data(("%s: word write data=0x%08x\n",
1391 		         __FUNCTION__, data));
1392 	}
1393 
1394 	return SUCCESS;
1395 }
1396 #endif /* NOTUSED */
1397 
1398 #if defined(OEM_ANDROID) || defined(OEM_EMBEDDED_LINUX)
sdio_sw_reset(sdioh_info_t * sd)1399 static int sdio_sw_reset(sdioh_info_t *sd)
1400 {
1401 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0)
1402 	struct mmc_host *host = sd->func[0]->card->host;
1403 #endif
1404 	int err = 0;
1405 
1406 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0)
1407 	printf("%s: Enter\n", __FUNCTION__);
1408 	sdio_claim_host(sd->func[0]);
1409 	err = mmc_sw_reset(host);
1410 	sdio_release_host(sd->func[0]);
1411 #else
1412 	err = sdio_reset_comm(sd->func[0]->card);
1413 #endif
1414 
1415 	if (err)
1416 		sd_err(("%s Failed, error = %d\n", __FUNCTION__, err));
1417 
1418 	return err;
1419 }
1420 #endif
1421 
1422 int
sdioh_start(sdioh_info_t * sd,int stage)1423 sdioh_start(sdioh_info_t *sd, int stage)
1424 {
1425 #if defined(OEM_ANDROID) || defined(OEM_EMBEDDED_LINUX)
1426 	int ret;
1427 
1428 	if (!sd) {
1429 		sd_err(("%s Failed, sd is NULL\n", __FUNCTION__));
1430 		return (0);
1431 	}
1432 
1433 	/* Need to do this stages as we can't enable the interrupt till
1434 		downloading of the firmware is complete, other wise polling
1435 		sdio access will come in way
1436 	*/
1437 	if (sd->func[0]) {
1438 			if (stage == 0) {
1439 		/* Since the power to the chip is killed, we will have
1440 			re enumerate the device again. Set the block size
1441 			and enable the fucntion 1 for in preparation for
1442 			downloading the code
1443 		*/
1444 		/* sdio_reset_comm() - has been fixed in latest kernel/msm.git for Linux
1445 		   2.6.27. The implementation prior to that is buggy, and needs broadcom's
1446 		   patch for it
1447 		*/
1448 		if ((ret = sdio_sw_reset(sd))) {
1449 			sd_err(("%s Failed, error = %d\n", __FUNCTION__, ret));
1450 			return ret;
1451 		}
1452 		else {
1453 			sd->num_funcs = 2;
1454 			sd->sd_blockmode = TRUE;
1455 			sd->use_client_ints = TRUE;
1456 			sd->client_block_size[0] = 64;
1457 
1458 			if (sd->func[1]) {
1459 				/* Claim host controller */
1460 				sdio_claim_host(sd->func[1]);
1461 
1462 				sd->client_block_size[1] = 64;
1463 				ret = sdio_set_block_size(sd->func[1], 64);
1464 				if (ret) {
1465 					sd_err(("bcmsdh_sdmmc: Failed to set F1 "
1466 						"blocksize(%d)\n", ret));
1467 				}
1468 
1469 				/* Release host controller F1 */
1470 				sdio_release_host(sd->func[1]);
1471 			}
1472 
1473 			if (sd->func[2]) {
1474 				/* Claim host controller F2 */
1475 				sdio_claim_host(sd->func[2]);
1476 
1477 				sd->client_block_size[2] = sd_f2_blocksize;
1478 				ret = sdio_set_block_size(sd->func[2], sd_f2_blocksize);
1479 				if (ret) {
1480 					sd_err(("bcmsdh_sdmmc: Failed to set F2 "
1481 						"blocksize to %d(%d)\n", sd_f2_blocksize, ret));
1482 				}
1483 
1484 				/* Release host controller F2 */
1485 				sdio_release_host(sd->func[2]);
1486 			}
1487 
1488 			sdioh_sdmmc_card_enablefuncs(sd);
1489 			}
1490 		} else {
1491 #if !defined(OOB_INTR_ONLY)
1492 			sdio_claim_host(sd->func[0]);
1493 			if (sd->func[2])
1494 				sdio_claim_irq(sd->func[2], IRQHandlerF2);
1495 			if (sd->func[1])
1496 				sdio_claim_irq(sd->func[1], IRQHandler);
1497 			sdio_release_host(sd->func[0]);
1498 #else /* defined(OOB_INTR_ONLY) */
1499 #if defined(HW_OOB)
1500 			sdioh_enable_func_intr(sd);
1501 #endif // endif
1502 			bcmsdh_oob_intr_set(sd->bcmsdh, TRUE);
1503 #endif /* !defined(OOB_INTR_ONLY) */
1504 		}
1505 	}
1506 	else
1507 		sd_err(("%s Failed\n", __FUNCTION__));
1508 #endif /* defined(OEM_ANDROID) || defined(OEM_EMBEDDED_LINUX) */
1509 
1510 	return (0);
1511 }
1512 
1513 int
sdioh_stop(sdioh_info_t * sd)1514 sdioh_stop(sdioh_info_t *sd)
1515 {
1516 #if defined(OEM_ANDROID) || defined(OEM_EMBEDDED_LINUX)
1517 	/* MSM7201A Android sdio stack has bug with interrupt
1518 		So internaly within SDIO stack they are polling
1519 		which cause issue when device is turned off. So
1520 		unregister interrupt with SDIO stack to stop the
1521 		polling
1522 	*/
1523 	if (sd->func[0]) {
1524 #if !defined(OOB_INTR_ONLY)
1525 		sdio_claim_host(sd->func[0]);
1526 		if (sd->func[1])
1527 			sdio_release_irq(sd->func[1]);
1528 		if (sd->func[2])
1529 			sdio_release_irq(sd->func[2]);
1530 		sdio_release_host(sd->func[0]);
1531 #else /* defined(OOB_INTR_ONLY) */
1532 #if defined(HW_OOB)
1533 		sdioh_disable_func_intr(sd);
1534 #endif // endif
1535 		bcmsdh_oob_intr_set(sd->bcmsdh, FALSE);
1536 #endif /* !defined(OOB_INTR_ONLY) */
1537 	}
1538 	else
1539 		sd_err(("%s Failed\n", __FUNCTION__));
1540 #endif /* defined(OEM_ANDROID) ||  defined(OEM_EMBEDDED_LINUX) */
1541 	return (0);
1542 }
1543 
1544 int
sdioh_waitlockfree(sdioh_info_t * sd)1545 sdioh_waitlockfree(sdioh_info_t *sd)
1546 {
1547 	return (1);
1548 }
1549 
1550 SDIOH_API_RC
sdioh_gpioouten(sdioh_info_t * sd,uint32 gpio)1551 sdioh_gpioouten(sdioh_info_t *sd, uint32 gpio)
1552 {
1553 	return SDIOH_API_RC_FAIL;
1554 }
1555 
1556 SDIOH_API_RC
sdioh_gpioout(sdioh_info_t * sd,uint32 gpio,bool enab)1557 sdioh_gpioout(sdioh_info_t *sd, uint32 gpio, bool enab)
1558 {
1559 	return SDIOH_API_RC_FAIL;
1560 }
1561 
1562 bool
sdioh_gpioin(sdioh_info_t * sd,uint32 gpio)1563 sdioh_gpioin(sdioh_info_t *sd, uint32 gpio)
1564 {
1565 	return FALSE;
1566 }
1567 
1568 SDIOH_API_RC
sdioh_gpio_init(sdioh_info_t * sd)1569 sdioh_gpio_init(sdioh_info_t *sd)
1570 {
1571 	return SDIOH_API_RC_FAIL;
1572 }
1573 
1574 uint
sdmmc_get_clock_rate(sdioh_info_t * sd)1575 sdmmc_get_clock_rate(sdioh_info_t *sd)
1576 {
1577 	struct sdio_func *sdio_func = sd->func[0];
1578 	struct mmc_host *host = sdio_func->card->host;
1579 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(4, 3, 6))
1580 	return mmc_host_clk_rate(host);
1581 #else
1582 	return host->ios.clock;
1583 #endif /* (LINUX_VERSION_CODE <= KERNEL_VERSION(4, 3, 6)) */
1584 }
1585 
1586 void
sdmmc_set_clock_rate(sdioh_info_t * sd,uint hz)1587 sdmmc_set_clock_rate(sdioh_info_t *sd, uint hz)
1588 {
1589 	struct sdio_func *sdio_func = sd->func[0];
1590 	struct mmc_host *host = sdio_func->card->host;
1591 	struct mmc_ios *ios = &host->ios;
1592 
1593 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(4, 3, 6))
1594 	mmc_host_clk_hold(host);
1595 #endif // endif
1596 	DHD_INFO(("%s: Before change: sd clock rate is %u\n", __FUNCTION__, ios->clock));
1597 	if (hz < host->f_min) {
1598 		DHD_ERROR(("%s: Intended rate is below min rate, setting to min\n", __FUNCTION__));
1599 		hz = host->f_min;
1600 	}
1601 
1602 	if (hz > host->f_max) {
1603 		DHD_ERROR(("%s: Intended rate exceeds max rate, setting to max\n", __FUNCTION__));
1604 		hz = host->f_max;
1605 	}
1606 	ios->clock = hz;
1607 	host->ops->set_ios(host, ios);
1608 	DHD_ERROR(("%s: After change: sd clock rate is %u\n", __FUNCTION__, ios->clock));
1609 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(4, 3, 6))
1610 	mmc_host_clk_release(host);
1611 #endif // endif
1612 }
1613 
1614 void
sdmmc_set_clock_divisor(sdioh_info_t * sd,uint sd_div)1615 sdmmc_set_clock_divisor(sdioh_info_t *sd, uint sd_div)
1616 {
1617 	uint hz;
1618 	uint old_div = sdmmc_get_clock_rate(sd);
1619 	if (old_div == sd_div) {
1620 		return;
1621 	}
1622 
1623 	hz = sd->sd_clk_rate / sd_div;
1624 	sdmmc_set_clock_rate(sd, hz);
1625 }
1626