xref: /OK3568_Linux_fs/external/rkwifibt/drivers/infineon/bcmsdh_sdmmc.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /*
2  * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
3  *
4  * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
5  *
6  * Copyright (C) 1999-2017, Broadcom Corporation
7  *
8  *      Unless you and Broadcom execute a separate written software license
9  * agreement governing use of this software, this software is licensed to you
10  * under the terms of the GNU General Public License version 2 (the "GPL"),
11  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
12  * following added to such license:
13  *
14  *      As a special exception, the copyright holders of this software give you
15  * permission to link this software with independent modules, and to copy and
16  * distribute the resulting executable under terms of your choice, provided that
17  * you also meet, for each linked independent module, the terms and conditions of
18  * the license of that module.  An independent module is a module which is not
19  * derived from this software.  The special exception does not apply to any
20  * modifications of the software.
21  *
22  *      Notwithstanding the above, under no circumstances may you combine this
23  * software in any way with any other Broadcom software provided under a license
24  * other than the GPL, without Broadcom's express prior written consent.
25  *
26  *
27  * <<Broadcom-WL-IPTag/Proprietary,Open:>>
28  *
29  * $Id: bcmsdh_sdmmc.c 690631 2017-03-17 04:27:33Z $
30  */
31 #include <typedefs.h>
32 
33 #include <bcmdevs.h>
34 #include <bcmendian.h>
35 #include <bcmutils.h>
36 #include <osl.h>
37 #include <sdio.h>	/* SDIO Device and Protocol Specs */
38 #include <sdioh.h>	/* Standard SDIO Host Controller Specification */
39 #include <bcmsdbus.h>	/* bcmsdh to/from specific controller APIs */
40 #include <sdiovar.h>	/* ioctl/iovars */
41 
42 #include <linux/mmc/core.h>
43 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 0, 8))
44 #include <drivers/mmc/core/host.h>
45 #else
46 #include <linux/mmc/host.h>
47 #endif /* (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 0, 0)) */
48 #include <linux/mmc/card.h>
49 #include <linux/mmc/sdio_func.h>
50 #include <linux/mmc/sdio_ids.h>
51 
52 #include <dngl_stats.h>
53 #include <dhd.h>
54 #include <dhd_dbg.h>
55 
56 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
57 #include <linux/suspend.h>
58 extern volatile bool dhd_mmc_suspend;
59 #endif // endif
60 #include "bcmsdh_sdmmc.h"
61 
62 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 0, 0)) || (LINUX_VERSION_CODE >= \
63 	KERNEL_VERSION(4, 4, 0))
64 static inline void
mmc_host_clk_hold(struct mmc_host * host)65 mmc_host_clk_hold(struct mmc_host *host)
66 {
67 	BCM_REFERENCE(host);
68 	return;
69 }
70 
71 static inline void
mmc_host_clk_release(struct mmc_host * host)72 mmc_host_clk_release(struct mmc_host *host)
73 {
74 	BCM_REFERENCE(host);
75 	return;
76 }
77 
78 static inline unsigned int
mmc_host_clk_rate(struct mmc_host * host)79 mmc_host_clk_rate(struct mmc_host *host)
80 {
81 	return host->ios.clock;
82 }
83 #endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(3, 0, 0) */
84 
85 #ifndef BCMSDH_MODULE
86 extern int sdio_function_init(void);
87 extern void sdio_function_cleanup(void);
88 #endif /* BCMSDH_MODULE */
89 
90 #if !defined(OOB_INTR_ONLY)
91 static void IRQHandler(struct sdio_func *func);
92 static void IRQHandlerF2(struct sdio_func *func);
93 #endif /* !defined(OOB_INTR_ONLY) */
94 static int sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr);
95 #if defined(ANDROID_SDIO_RESET)
96 extern int sdio_reset_comm(struct mmc_card *card);
97 #else
sdio_reset_comm(struct mmc_card * card)98 static int sdio_reset_comm(struct mmc_card *card)
99 {
100 	return 0;
101 }
102 #endif /* ANDROID_SDIO_RESET */
103 
104 #define DEFAULT_SDIO_F2_BLKSIZE		512
105 #ifndef CUSTOM_SDIO_F2_BLKSIZE
106 #define CUSTOM_SDIO_F2_BLKSIZE		DEFAULT_SDIO_F2_BLKSIZE
107 #endif // endif
108 
109 #define DEFAULT_SDIO_F1_BLKSIZE		64
110 #ifndef CUSTOM_SDIO_F1_BLKSIZE
111 #define CUSTOM_SDIO_F1_BLKSIZE		DEFAULT_SDIO_F1_BLKSIZE
112 #endif // endif
113 
114 #define MAX_IO_RW_EXTENDED_BLK		511
115 
116 uint sd_sdmode = SDIOH_MODE_SD4;	/* Use SD4 mode by default */
117 uint sd_f2_blocksize = CUSTOM_SDIO_F2_BLKSIZE;
118 uint sd_f1_blocksize = CUSTOM_SDIO_F1_BLKSIZE;
119 
120 #if defined(BT_OVER_SDIO)
121 uint sd_f3_blocksize = 64;
122 #endif /* defined (BT_OVER_SDIO) */
123 
124 uint sd_divisor = 2;			/* Default 48MHz/2 = 24MHz */
125 
126 uint sd_power = 1;		/* Default to SD Slot powered ON */
127 uint sd_clock = 1;		/* Default to SD Clock turned ON */
128 uint sd_hiok = FALSE;	/* Don't use hi-speed mode by default */
129 uint sd_msglevel = SDH_ERROR_VAL;
130 uint sd_use_dma = TRUE;
131 
132 #ifdef DHD_MAP_CHIP_FIRMWARE_PATH
133 uint sd_chip_module = 0;
134 #endif /* DHD_MAP_CHIP_FIRMWARE_PATH */
135 
136 #ifndef CUSTOM_RXCHAIN
137 #define CUSTOM_RXCHAIN 0
138 #endif // endif
139 
140 DHD_PM_RESUME_WAIT_INIT(sdioh_request_byte_wait);
141 DHD_PM_RESUME_WAIT_INIT(sdioh_request_word_wait);
142 DHD_PM_RESUME_WAIT_INIT(sdioh_request_packet_wait);
143 DHD_PM_RESUME_WAIT_INIT(sdioh_request_buffer_wait);
144 
145 #if !defined(ARCH_DMA_MINALIGN)
146 #define ARCH_DMA_MINALIGN 128
147 #endif /* !defined(ARCH_DMA_MINALIGN) */
148 #define DMA_ALIGN_MASK	0x03
149 #define MMC_SDIO_ABORT_RETRY_LIMIT 5
150 
151 int sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data);
152 
153 #if defined(BT_OVER_SDIO)
154 extern
sdioh_sdmmc_card_enable_func_f3(sdioh_info_t * sd,struct sdio_func * func)155 void sdioh_sdmmc_card_enable_func_f3(sdioh_info_t *sd, struct sdio_func *func)
156 {
157 	sd->func[3] = func;
158 	sd_info(("%s sd->func[3] %p\n", __FUNCTION__, sd->func[3]));
159 }
160 #endif /* defined (BT_OVER_SDIO) */
161 
162 void  sdmmc_set_clock_rate(sdioh_info_t *sd, uint hz);
163 uint  sdmmc_get_clock_rate(sdioh_info_t *sd);
164 void  sdmmc_set_clock_divisor(sdioh_info_t *sd, uint sd_div);
165 
166 static int
sdioh_sdmmc_card_enablefuncs(sdioh_info_t * sd)167 sdioh_sdmmc_card_enablefuncs(sdioh_info_t *sd)
168 {
169 	int err_ret;
170 	uint32 fbraddr;
171 	uint8 func;
172 
173 	sd_trace(("%s\n", __FUNCTION__));
174 
175 	/* Get the Card's common CIS address */
176 	sd->com_cis_ptr = sdioh_sdmmc_get_cisaddr(sd, SDIOD_CCCR_CISPTR_0);
177 	sd->func_cis_ptr[0] = sd->com_cis_ptr;
178 	sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr));
179 
180 	/* Get the Card's function CIS (for each function) */
181 	for (fbraddr = SDIOD_FBR_STARTADDR, func = 1;
182 	     func <= sd->num_funcs; func++, fbraddr += SDIOD_FBR_SIZE) {
183 		sd->func_cis_ptr[func] = sdioh_sdmmc_get_cisaddr(sd, SDIOD_FBR_CISPTR_0 + fbraddr);
184 		sd_info(("%s: Function %d CIS Ptr = 0x%x\n",
185 		         __FUNCTION__, func, sd->func_cis_ptr[func]));
186 	}
187 
188 	sd->func_cis_ptr[0] = sd->com_cis_ptr;
189 	sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr));
190 
191 	/* Enable Function 1 */
192 	sdio_claim_host(sd->func[1]);
193 	err_ret = sdio_enable_func(sd->func[1]);
194 	sdio_release_host(sd->func[1]);
195 	if (err_ret) {
196 		sd_err(("bcmsdh_sdmmc: Failed to enable F1 Err: 0x%08x", err_ret));
197 	}
198 
199 	return FALSE;
200 }
201 
202 /*
203  *	Public entry points & extern's
204  */
205 extern sdioh_info_t *
sdioh_attach(osl_t * osh,struct sdio_func * func)206 sdioh_attach(osl_t *osh, struct sdio_func *func)
207 {
208 	sdioh_info_t *sd = NULL;
209 	int err_ret;
210 
211 	sd_trace(("%s\n", __FUNCTION__));
212 
213 	if (func == NULL) {
214 		sd_err(("%s: sdio function device is NULL\n", __FUNCTION__));
215 		return NULL;
216 	}
217 
218 	if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) {
219 		sd_err(("sdioh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh)));
220 		return NULL;
221 	}
222 	bzero((char *)sd, sizeof(sdioh_info_t));
223 	sd->osh = osh;
224 	sd->fake_func0.num = 0;
225 	sd->fake_func0.card = func->card;
226 	sd->func[0] = &sd->fake_func0;
227 	sd->func[1] = func->card->sdio_func[0];
228 	sd->func[2] = func->card->sdio_func[1];
229 
230 #if defined(BT_OVER_SDIO)
231 	sd->func[3] = NULL;
232 #endif /* defined (BT_OVER_SDIO) */
233 
234 #ifdef DHD_MAP_CHIP_FIRMWARE_PATH
235     if (func->num == 2) {
236         struct sdio_func_tuple *tuple = func->card->tuples;
237 
238         while (tuple != NULL) {
239             if ((tuple->code == 0x81) && (tuple->size == 0x01) && (tuple->data[0] == 0x01)) {
240                 sd_err(("%s: Got the chip vendor, tuple code=0x81\n", __FUNCTION__));
241                 sd_chip_module = 0x81;
242                 break;
243             }
244             tuple = tuple->next;
245         }
246     }
247 #endif /* DHD_MAP_CHIP_FIRMWARE_PATH */
248 
249 	sd->num_funcs = 2;
250 	sd->sd_blockmode = TRUE;
251 	sd->use_client_ints = TRUE;
252 	sd->client_block_size[0] = 64;
253 	sd->use_rxchain = CUSTOM_RXCHAIN;
254 	if (sd->func[1] == NULL || sd->func[2] == NULL) {
255 		sd_err(("%s: func 1 or 2 is null \n", __FUNCTION__));
256 		goto fail;
257 	}
258 	sdio_set_drvdata(sd->func[1], sd);
259 
260 	sdio_claim_host(sd->func[1]);
261 	sd->client_block_size[1] = sd_f1_blocksize;
262 	err_ret = sdio_set_block_size(sd->func[1], sd_f1_blocksize);
263 	sdio_release_host(sd->func[1]);
264 	if (err_ret) {
265 		sd_err(("bcmsdh_sdmmc: Failed to set F1 blocksize(%d)\n", err_ret));
266 		goto fail;
267 	}
268 
269 	sdio_claim_host(sd->func[2]);
270 	sd->client_block_size[2] = sd_f2_blocksize;
271 	err_ret = sdio_set_block_size(sd->func[2], sd_f2_blocksize);
272 	sdio_release_host(sd->func[2]);
273 	if (err_ret) {
274 		sd_err(("bcmsdh_sdmmc: Failed to set F2 blocksize to %d(%d)\n",
275 			sd_f2_blocksize, err_ret));
276 		goto fail;
277 	}
278 
279 	sd->sd_clk_rate = sdmmc_get_clock_rate(sd);
280 	DHD_ERROR(("%s: sd clock rate = %u\n", __FUNCTION__, sd->sd_clk_rate));
281 
282 	sdioh_sdmmc_card_enablefuncs(sd);
283 
284 	sd_trace(("%s: Done\n", __FUNCTION__));
285 	return sd;
286 
287 fail:
288 	MFREE(sd->osh, sd, sizeof(sdioh_info_t));
289 	return NULL;
290 }
291 
292 extern SDIOH_API_RC
sdioh_detach(osl_t * osh,sdioh_info_t * sd)293 sdioh_detach(osl_t *osh, sdioh_info_t *sd)
294 {
295 	sd_trace(("%s\n", __FUNCTION__));
296 
297 	if (sd) {
298 
299 		/* Disable Function 2 */
300 		if (sd->func[2]) {
301 			sdio_claim_host(sd->func[2]);
302 			sdio_disable_func(sd->func[2]);
303 			sdio_release_host(sd->func[2]);
304 		}
305 
306 		/* Disable Function 1 */
307 		if (sd->func[1]) {
308 			sdio_claim_host(sd->func[1]);
309 			sdio_disable_func(sd->func[1]);
310 			sdio_release_host(sd->func[1]);
311 		}
312 
313 		sd->func[1] = NULL;
314 		sd->func[2] = NULL;
315 
316 		MFREE(sd->osh, sd, sizeof(sdioh_info_t));
317 	}
318 	return SDIOH_API_RC_SUCCESS;
319 }
320 
321 #if defined(OOB_INTR_ONLY) && defined(HW_OOB)
322 
323 extern SDIOH_API_RC
sdioh_enable_func_intr(sdioh_info_t * sd)324 sdioh_enable_func_intr(sdioh_info_t *sd)
325 {
326 	uint8 reg;
327 	int err;
328 
329 	if (sd->func[0] == NULL) {
330 		sd_err(("%s: function 0 pointer is NULL\n", __FUNCTION__));
331 		return SDIOH_API_RC_FAIL;
332 	}
333 
334 	sdio_claim_host(sd->func[0]);
335 	reg = sdio_readb(sd->func[0], SDIOD_CCCR_INTEN, &err);
336 	if (err) {
337 		sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
338 		sdio_release_host(sd->func[0]);
339 		return SDIOH_API_RC_FAIL;
340 	}
341 	/* Enable F1 and F2 interrupts, clear master enable */
342 	reg &= ~INTR_CTL_MASTER_EN;
343 	reg |= (INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN);
344 #if defined(BT_OVER_SDIO)
345 	reg |= (INTR_CTL_FUNC3_EN);
346 #endif /* defined (BT_OVER_SDIO) */
347 	sdio_writeb(sd->func[0], reg, SDIOD_CCCR_INTEN, &err);
348 	sdio_release_host(sd->func[0]);
349 
350 	if (err) {
351 		sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
352 		return SDIOH_API_RC_FAIL;
353 	}
354 
355 	return SDIOH_API_RC_SUCCESS;
356 }
357 
358 extern SDIOH_API_RC
sdioh_disable_func_intr(sdioh_info_t * sd)359 sdioh_disable_func_intr(sdioh_info_t *sd)
360 {
361 	uint8 reg;
362 	int err;
363 
364 	if (sd->func[0] == NULL) {
365 		sd_err(("%s: function 0 pointer is NULL\n", __FUNCTION__));
366 		return SDIOH_API_RC_FAIL;
367 	}
368 
369 	sdio_claim_host(sd->func[0]);
370 	reg = sdio_readb(sd->func[0], SDIOD_CCCR_INTEN, &err);
371 	if (err) {
372 		sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
373 		sdio_release_host(sd->func[0]);
374 		return SDIOH_API_RC_FAIL;
375 	}
376 	reg &= ~(INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN);
377 #if defined(BT_OVER_SDIO)
378 	reg &= ~INTR_CTL_FUNC3_EN;
379 #endif // endif
380 	/* Disable master interrupt with the last function interrupt */
381 	if (!(reg & 0xFE))
382 		reg = 0;
383 	sdio_writeb(sd->func[0], reg, SDIOD_CCCR_INTEN, &err);
384 	sdio_release_host(sd->func[0]);
385 
386 	if (err) {
387 		sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
388 		return SDIOH_API_RC_FAIL;
389 	}
390 
391 	return SDIOH_API_RC_SUCCESS;
392 }
393 #endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */
394 
395 /* Configure callback to client when we recieve client interrupt */
396 extern SDIOH_API_RC
sdioh_interrupt_register(sdioh_info_t * sd,sdioh_cb_fn_t fn,void * argh)397 sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh)
398 {
399 	sd_trace(("%s: Entering\n", __FUNCTION__));
400 	if (fn == NULL) {
401 		sd_err(("%s: interrupt handler is NULL, not registering\n", __FUNCTION__));
402 		return SDIOH_API_RC_FAIL;
403 	}
404 #if !defined(OOB_INTR_ONLY)
405 	sd->intr_handler = fn;
406 	sd->intr_handler_arg = argh;
407 	sd->intr_handler_valid = TRUE;
408 
409 	/* register and unmask irq */
410 	if (sd->func[2]) {
411 		sdio_claim_host(sd->func[2]);
412 		sdio_claim_irq(sd->func[2], IRQHandlerF2);
413 		sdio_release_host(sd->func[2]);
414 	}
415 
416 	if (sd->func[1]) {
417 		sdio_claim_host(sd->func[1]);
418 		sdio_claim_irq(sd->func[1], IRQHandler);
419 		sdio_release_host(sd->func[1]);
420 	}
421 #elif defined(HW_OOB)
422 	sdioh_enable_func_intr(sd);
423 #endif /* !defined(OOB_INTR_ONLY) */
424 
425 	return SDIOH_API_RC_SUCCESS;
426 }
427 
428 extern SDIOH_API_RC
sdioh_interrupt_deregister(sdioh_info_t * sd)429 sdioh_interrupt_deregister(sdioh_info_t *sd)
430 {
431 	sd_trace(("%s: Entering\n", __FUNCTION__));
432 
433 #if !defined(OOB_INTR_ONLY)
434 	if (sd->func[1]) {
435 		/* register and unmask irq */
436 		sdio_claim_host(sd->func[1]);
437 		sdio_release_irq(sd->func[1]);
438 		sdio_release_host(sd->func[1]);
439 	}
440 
441 	if (sd->func[2]) {
442 		/* Claim host controller F2 */
443 		sdio_claim_host(sd->func[2]);
444 		sdio_release_irq(sd->func[2]);
445 		/* Release host controller F2 */
446 		sdio_release_host(sd->func[2]);
447 	}
448 
449 	sd->intr_handler_valid = FALSE;
450 	sd->intr_handler = NULL;
451 	sd->intr_handler_arg = NULL;
452 #elif defined(HW_OOB)
453 	sdioh_disable_func_intr(sd);
454 #endif /* !defined(OOB_INTR_ONLY) */
455 	return SDIOH_API_RC_SUCCESS;
456 }
457 
458 extern SDIOH_API_RC
sdioh_interrupt_query(sdioh_info_t * sd,bool * onoff)459 sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff)
460 {
461 	sd_trace(("%s: Entering\n", __FUNCTION__));
462 	*onoff = sd->client_intr_enabled;
463 	return SDIOH_API_RC_SUCCESS;
464 }
465 
466 #if defined(DHD_DEBUG)
467 extern bool
sdioh_interrupt_pending(sdioh_info_t * sd)468 sdioh_interrupt_pending(sdioh_info_t *sd)
469 {
470 	return (0);
471 }
472 #endif // endif
473 
474 uint
sdioh_query_iofnum(sdioh_info_t * sd)475 sdioh_query_iofnum(sdioh_info_t *sd)
476 {
477 	return sd->num_funcs;
478 }
479 
480 /* IOVar table */
481 enum {
482 	IOV_MSGLEVEL = 1,
483 	IOV_BLOCKMODE,
484 	IOV_BLOCKSIZE,
485 	IOV_DMA,
486 	IOV_USEINTS,
487 	IOV_NUMINTS,
488 	IOV_NUMLOCALINTS,
489 	IOV_HOSTREG,
490 	IOV_DEVREG,
491 	IOV_DIVISOR,
492 	IOV_SDMODE,
493 	IOV_HISPEED,
494 	IOV_HCIREGS,
495 	IOV_POWER,
496 	IOV_CLOCK,
497 	IOV_RXCHAIN
498 };
499 
500 const bcm_iovar_t sdioh_iovars[] = {
501 	{"sd_msglevel", IOV_MSGLEVEL,	0, 0,	IOVT_UINT32,	0 },
502 	{"sd_blockmode", IOV_BLOCKMODE, 0, 0,	IOVT_BOOL,	0 },
503 	{"sd_blocksize", IOV_BLOCKSIZE, 0, 0,	IOVT_UINT32,	0 }, /* ((fn << 16) | size) */
504 	{"sd_dma",	IOV_DMA,	0, 0,	IOVT_BOOL,	0 },
505 	{"sd_ints",	IOV_USEINTS,	0, 0,	IOVT_BOOL,	0 },
506 	{"sd_numints",	IOV_NUMINTS,	0, 0,	IOVT_UINT32,	0 },
507 	{"sd_numlocalints", IOV_NUMLOCALINTS, 0, 0, IOVT_UINT32,	0 },
508 	{"sd_divisor",	IOV_DIVISOR,	0, 0,	IOVT_UINT32,	0 },
509 	{"sd_power",	IOV_POWER,	0, 0,	IOVT_UINT32,	0 },
510 	{"sd_clock",	IOV_CLOCK,	0, 0,	IOVT_UINT32,	0 },
511 	{"sd_mode",	IOV_SDMODE,	0, 0,	IOVT_UINT32,	100},
512 	{"sd_highspeed", IOV_HISPEED,	0, 0,	IOVT_UINT32,	0 },
513 	{"sd_rxchain",  IOV_RXCHAIN,    0, 0, 	IOVT_BOOL,	0 },
514 	{NULL, 0, 0, 0, 0, 0 }
515 };
516 
517 int
sdioh_iovar_op(sdioh_info_t * si,const char * name,void * params,int plen,void * arg,int len,bool set)518 sdioh_iovar_op(sdioh_info_t *si, const char *name,
519                            void *params, int plen, void *arg, int len, bool set)
520 {
521 	const bcm_iovar_t *vi = NULL;
522 	int bcmerror = 0;
523 	int val_size;
524 	int32 int_val = 0;
525 	bool bool_val;
526 	uint32 actionid;
527 
528 	ASSERT(name);
529 	ASSERT(len >= 0);
530 
531 	/* Get must have return space; Set does not take qualifiers */
532 	ASSERT(set || (arg && len));
533 	ASSERT(!set || (!params && !plen));
534 
535 	sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name));
536 
537 	if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) {
538 		bcmerror = BCME_UNSUPPORTED;
539 		goto exit;
540 	}
541 
542 	if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0)
543 		goto exit;
544 
545 	/* Set up params so get and set can share the convenience variables */
546 	if (params == NULL) {
547 		params = arg;
548 		plen = len;
549 	}
550 
551 	if (vi->type == IOVT_VOID)
552 		val_size = 0;
553 	else if (vi->type == IOVT_BUFFER)
554 		val_size = len;
555 	else
556 		val_size = sizeof(int);
557 
558 	if (plen >= (int)sizeof(int_val))
559 		bcopy(params, &int_val, sizeof(int_val));
560 
561 	bool_val = (int_val != 0) ? TRUE : FALSE;
562 	BCM_REFERENCE(bool_val);
563 
564 	actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
565 	switch (actionid) {
566 	case IOV_GVAL(IOV_MSGLEVEL):
567 		int_val = (int32)sd_msglevel;
568 		bcopy(&int_val, arg, val_size);
569 		break;
570 
571 	case IOV_SVAL(IOV_MSGLEVEL):
572 		sd_msglevel = int_val;
573 		break;
574 
575 	case IOV_GVAL(IOV_BLOCKMODE):
576 		int_val = (int32)si->sd_blockmode;
577 		bcopy(&int_val, arg, val_size);
578 		break;
579 
580 	case IOV_SVAL(IOV_BLOCKMODE):
581 		si->sd_blockmode = (bool)int_val;
582 		/* Haven't figured out how to make non-block mode with DMA */
583 		break;
584 
585 	case IOV_GVAL(IOV_BLOCKSIZE):
586 		if ((uint32)int_val > si->num_funcs) {
587 			bcmerror = BCME_BADARG;
588 			break;
589 		}
590 		int_val = (int32)si->client_block_size[int_val];
591 		bcopy(&int_val, arg, val_size);
592 		break;
593 
594 	case IOV_SVAL(IOV_BLOCKSIZE):
595 	{
596 		uint func = ((uint32)int_val >> 16);
597 		uint blksize = (uint16)int_val;
598 		uint maxsize;
599 
600 		if (func > si->num_funcs) {
601 			bcmerror = BCME_BADARG;
602 			break;
603 		}
604 
605 		switch (func) {
606 		case 0: maxsize = 32; break;
607 		case 1: maxsize = BLOCK_SIZE_4318; break;
608 		case 2: maxsize = BLOCK_SIZE_4328; break;
609 		default: maxsize = 0;
610 		}
611 		if (blksize > maxsize) {
612 			bcmerror = BCME_BADARG;
613 			break;
614 		}
615 		if (!blksize) {
616 			blksize = maxsize;
617 		}
618 
619 		/* Now set it */
620 		si->client_block_size[func] = blksize;
621 
622 #ifdef USE_DYNAMIC_F2_BLKSIZE
623 		if (si->func[func] == NULL) {
624 			sd_err(("%s: SDIO Device not present\n", __FUNCTION__));
625 			bcmerror = BCME_NORESOURCE;
626 			break;
627 		}
628 		sdio_claim_host(si->func[func]);
629 		bcmerror = sdio_set_block_size(si->func[func], blksize);
630 		if (bcmerror)
631 			sd_err(("%s: Failed to set F%d blocksize to %d(%d)\n",
632 				__FUNCTION__, func, blksize, bcmerror));
633 		sdio_release_host(si->func[func]);
634 #endif /* USE_DYNAMIC_F2_BLKSIZE */
635 		break;
636 	}
637 
638 	case IOV_GVAL(IOV_RXCHAIN):
639 		int_val = (int32)si->use_rxchain;
640 		bcopy(&int_val, arg, val_size);
641 		break;
642 
643 	case IOV_GVAL(IOV_DMA):
644 		int_val = (int32)si->sd_use_dma;
645 		bcopy(&int_val, arg, val_size);
646 		break;
647 
648 	case IOV_SVAL(IOV_DMA):
649 		si->sd_use_dma = (bool)int_val;
650 		break;
651 
652 	case IOV_GVAL(IOV_USEINTS):
653 		int_val = (int32)si->use_client_ints;
654 		bcopy(&int_val, arg, val_size);
655 		break;
656 
657 	case IOV_SVAL(IOV_USEINTS):
658 		si->use_client_ints = (bool)int_val;
659 		if (si->use_client_ints)
660 			si->intmask |= CLIENT_INTR;
661 		else
662 			si->intmask &= ~CLIENT_INTR;
663 
664 		break;
665 
666 	case IOV_GVAL(IOV_DIVISOR):
667 		int_val = (uint32)sd_divisor;
668 		bcopy(&int_val, arg, val_size);
669 		break;
670 
671 	case IOV_SVAL(IOV_DIVISOR):
672 		/* set the clock to divisor, if value is non-zero & power of 2 */
673 		if (int_val && !(int_val & (int_val - 1))) {
674 			sd_divisor = int_val;
675 			sdmmc_set_clock_divisor(si, sd_divisor);
676 		} else {
677 			DHD_ERROR(("%s: Invalid sd_divisor value, should be power of 2!\n",
678 				__FUNCTION__));
679 		}
680 		break;
681 
682 	case IOV_GVAL(IOV_POWER):
683 		int_val = (uint32)sd_power;
684 		bcopy(&int_val, arg, val_size);
685 		break;
686 
687 	case IOV_SVAL(IOV_POWER):
688 		sd_power = int_val;
689 		break;
690 
691 	case IOV_GVAL(IOV_CLOCK):
692 		int_val = (uint32)sd_clock;
693 		bcopy(&int_val, arg, val_size);
694 		break;
695 
696 	case IOV_SVAL(IOV_CLOCK):
697 		sd_clock = int_val;
698 		break;
699 
700 	case IOV_GVAL(IOV_SDMODE):
701 		int_val = (uint32)sd_sdmode;
702 		bcopy(&int_val, arg, val_size);
703 		break;
704 
705 	case IOV_SVAL(IOV_SDMODE):
706 		sd_sdmode = int_val;
707 		break;
708 
709 	case IOV_GVAL(IOV_HISPEED):
710 		int_val = (uint32)sd_hiok;
711 		bcopy(&int_val, arg, val_size);
712 		break;
713 
714 	case IOV_SVAL(IOV_HISPEED):
715 		sd_hiok = int_val;
716 		break;
717 
718 	case IOV_GVAL(IOV_NUMINTS):
719 		int_val = (int32)si->intrcount;
720 		bcopy(&int_val, arg, val_size);
721 		break;
722 
723 	case IOV_GVAL(IOV_NUMLOCALINTS):
724 		int_val = (int32)0;
725 		bcopy(&int_val, arg, val_size);
726 		break;
727 	default:
728 		bcmerror = BCME_UNSUPPORTED;
729 		break;
730 	}
731 exit:
732 
733 	return bcmerror;
734 }
735 
736 #if defined(OOB_INTR_ONLY) && defined(HW_OOB)
737 
738 SDIOH_API_RC
sdioh_enable_hw_oob_intr(sdioh_info_t * sd,bool enable)739 sdioh_enable_hw_oob_intr(sdioh_info_t *sd, bool enable)
740 {
741 	SDIOH_API_RC status;
742 	uint8 data;
743 
744 	if (enable)
745 		data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE | SDIO_SEPINT_ACT_HI;
746 	else
747 		data = SDIO_SEPINT_ACT_HI;	/* disable hw oob interrupt */
748 
749 	status = sdioh_request_byte(sd, SDIOH_WRITE, 0, SDIOD_CCCR_BRCM_SEPINT, &data);
750 	return status;
751 }
752 #endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */
753 
754 extern SDIOH_API_RC
sdioh_cfg_read(sdioh_info_t * sd,uint fnc_num,uint32 addr,uint8 * data)755 sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
756 {
757 	SDIOH_API_RC status;
758 	/* No lock needed since sdioh_request_byte does locking */
759 	status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data);
760 	return status;
761 }
762 
763 extern SDIOH_API_RC
sdioh_cfg_write(sdioh_info_t * sd,uint fnc_num,uint32 addr,uint8 * data)764 sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
765 {
766 	/* No lock needed since sdioh_request_byte does locking */
767 	SDIOH_API_RC status;
768 	status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data);
769 	return status;
770 }
771 
772 static int
sdioh_sdmmc_get_cisaddr(sdioh_info_t * sd,uint32 regaddr)773 sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr)
774 {
775 	/* read 24 bits and return valid 17 bit addr */
776 	int i;
777 	uint32 scratch, regdata;
778 	uint8 *ptr = (uint8 *)&scratch;
779 	for (i = 0; i < 3; i++) {
780 		if ((sdioh_sdmmc_card_regread (sd, 0, regaddr, 1, &regdata)) != SUCCESS)
781 			sd_err(("%s: Can't read!\n", __FUNCTION__));
782 
783 		*ptr++ = (uint8) regdata;
784 		regaddr++;
785 	}
786 
787 	/* Only the lower 17-bits are valid */
788 	scratch = ltoh32(scratch);
789 	scratch &= 0x0001FFFF;
790 	return (scratch);
791 }
792 
793 extern SDIOH_API_RC
sdioh_cis_read(sdioh_info_t * sd,uint func,uint8 * cisd,uint32 length)794 sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length)
795 {
796 	uint32 count;
797 	int offset;
798 	uint32 foo;
799 	uint8 *cis = cisd;
800 
801 	sd_trace(("%s: Func = %d\n", __FUNCTION__, func));
802 
803 	if (!sd->func_cis_ptr[func]) {
804 		bzero(cis, length);
805 		sd_err(("%s: no func_cis_ptr[%d]\n", __FUNCTION__, func));
806 		return SDIOH_API_RC_FAIL;
807 	}
808 
809 	sd_err(("%s: func_cis_ptr[%d]=0x%04x\n", __FUNCTION__, func, sd->func_cis_ptr[func]));
810 
811 	for (count = 0; count < length; count++) {
812 		offset =  sd->func_cis_ptr[func] + count;
813 		if (sdioh_sdmmc_card_regread (sd, 0, offset, 1, &foo) < 0) {
814 			sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__));
815 			return SDIOH_API_RC_FAIL;
816 		}
817 
818 		*cis = (uint8)(foo & 0xff);
819 		cis++;
820 	}
821 
822 	return SDIOH_API_RC_SUCCESS;
823 }
824 
825 extern SDIOH_API_RC
sdioh_request_byte(sdioh_info_t * sd,uint rw,uint func,uint regaddr,uint8 * byte)826 sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte)
827 {
828 	int err_ret = 0;
829 #if defined(MMC_SDIO_ABORT)
830 	int sdio_abort_retry = MMC_SDIO_ABORT_RETRY_LIMIT;
831 #endif // endif
832 
833 	sd_info(("%s: rw=%d, func=%d, addr=0x%05x\n", __FUNCTION__, rw, func, regaddr));
834 
835 	DHD_PM_RESUME_WAIT(sdioh_request_byte_wait);
836 	DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
837 	if(rw) { /* CMD52 Write */
838 		if (func == 0) {
839 			/* Can only directly write to some F0 registers.  Handle F2 enable
840 			 * as a special case.
841 			 */
842 			if (regaddr == SDIOD_CCCR_IOEN) {
843 #if defined(BT_OVER_SDIO)
844 				do {
845 				if (sd->func[3]) {
846 					sd_info(("bcmsdh_sdmmc F3: *byte 0x%x\n", *byte));
847 
848 					if (*byte & SDIO_FUNC_ENABLE_3) {
849 						sdio_claim_host(sd->func[3]);
850 
851 						/* Set Function 3 Block Size */
852 						err_ret = sdio_set_block_size(sd->func[3],
853 						sd_f3_blocksize);
854 						if (err_ret) {
855 							sd_err(("F3 blocksize set err%d\n",
856 								err_ret));
857 						}
858 
859 						/* Enable Function 3 */
860 						sd_info(("bcmsdh_sdmmc F3: enable F3 fn %p\n",
861 						sd->func[3]));
862 						err_ret = sdio_enable_func(sd->func[3]);
863 						if (err_ret) {
864 							sd_err(("bcmsdh_sdmmc: enable F3 err:%d\n",
865 								err_ret));
866 						}
867 
868 						sdio_release_host(sd->func[3]);
869 
870 						break;
871 					} else if (*byte & SDIO_FUNC_DISABLE_3) {
872 						sdio_claim_host(sd->func[3]);
873 
874 						/* Disable Function 3 */
875 						sd_info(("bcmsdh_sdmmc F3: disable F3 fn %p\n",
876 						sd->func[3]));
877 						err_ret = sdio_disable_func(sd->func[3]);
878 						if (err_ret) {
879 							sd_err(("bcmsdh_sdmmc: Disable F3 err:%d\n",
880 								err_ret));
881 						}
882 						sdio_release_host(sd->func[3]);
883 						sd->func[3] = NULL;
884 
885 						break;
886 					}
887 				}
888 #endif /* defined (BT_OVER_SDIO) */
889 				if (sd->func[2]) {
890 					sdio_claim_host(sd->func[2]);
891 					if (*byte & SDIO_FUNC_ENABLE_2) {
892 						/* Enable Function 2 */
893 						err_ret = sdio_enable_func(sd->func[2]);
894 						if (err_ret) {
895 							sd_err(("bcmsdh_sdmmc: enable F2 failed:%d",
896 								err_ret));
897 						}
898 					} else {
899 						/* Disable Function 2 */
900 						err_ret = sdio_disable_func(sd->func[2]);
901 						if (err_ret) {
902 							sd_err(("bcmsdh_sdmmc: Disab F2 failed:%d",
903 								err_ret));
904 						}
905 					}
906 					sdio_release_host(sd->func[2]);
907 				}
908 #if defined(BT_OVER_SDIO)
909 			} while (0);
910 #endif /* defined (BT_OVER_SDIO) */
911 		}
912 #if defined(MMC_SDIO_ABORT)
913 			/* to allow abort command through F1 */
914 			else if (regaddr == SDIOD_CCCR_IOABORT) {
915 				while (sdio_abort_retry--) {
916 					if (sd->func[func]) {
917 						sdio_claim_host(sd->func[func]);
918 						/*
919 						 * this sdio_f0_writeb() can be replaced with
920 						 * another api depending upon MMC driver change.
921 						 * As of this time, this is temporaray one
922 						 */
923 						sdio_writeb(sd->func[func],
924 							*byte, regaddr, &err_ret);
925 						sdio_release_host(sd->func[func]);
926 					}
927 					if (!err_ret)
928 						break;
929 				}
930 			}
931 #endif /* MMC_SDIO_ABORT */
932 			else if (regaddr < 0xF0) {
933 				sd_err(("bcmsdh_sdmmc: F0 Wr:0x%02x: write disallowed\n", regaddr));
934 			} else {
935 				/* Claim host controller, perform F0 write, and release */
936 				if (sd->func[func]) {
937 					sdio_claim_host(sd->func[func]);
938 					sdio_f0_writeb(sd->func[func],
939 						*byte, regaddr, &err_ret);
940 					sdio_release_host(sd->func[func]);
941 				}
942 			}
943 		} else {
944 			/* Claim host controller, perform Fn write, and release */
945 			if (sd->func[func]) {
946 				sdio_claim_host(sd->func[func]);
947 				sdio_writeb(sd->func[func], *byte, regaddr, &err_ret);
948 				sdio_release_host(sd->func[func]);
949 			}
950 		}
951 	} else { /* CMD52 Read */
952 		/* Claim host controller, perform Fn read, and release */
953 		if (sd->func[func]) {
954 			sdio_claim_host(sd->func[func]);
955 			if (func == 0) {
956 				*byte = sdio_f0_readb(sd->func[func], regaddr, &err_ret);
957 			} else {
958 				*byte = sdio_readb(sd->func[func], regaddr, &err_ret);
959 			}
960 			sdio_release_host(sd->func[func]);
961 		}
962 	}
963 
964 	if (err_ret) {
965 		if ((regaddr == 0x1001F) && ((err_ret == -ETIMEDOUT) || (err_ret == -EILSEQ))) {
966 		} else {
967 			sd_err(("bcmsdh_sdmmc: Failed to %s byte F%d:@0x%05x=%02x, Err: %d\n",
968 				rw ? "Write" : "Read", func, regaddr, *byte, err_ret));
969 		}
970 	}
971 
972 	return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
973 }
974 
975 extern SDIOH_API_RC
sdioh_request_word(sdioh_info_t * sd,uint cmd_type,uint rw,uint func,uint addr,uint32 * word,uint nbytes)976 sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr,
977                                    uint32 *word, uint nbytes)
978 {
979 	int err_ret = SDIOH_API_RC_FAIL;
980 #if defined(MMC_SDIO_ABORT)
981 	int sdio_abort_retry = MMC_SDIO_ABORT_RETRY_LIMIT;
982 #endif // endif
983 
984 	if (func == 0) {
985 		sd_err(("%s: Only CMD52 allowed to F0.\n", __FUNCTION__));
986 		return SDIOH_API_RC_FAIL;
987 	}
988 
989 	sd_info(("%s: cmd_type=%d, rw=%d, func=%d, addr=0x%05x, nbytes=%d\n",
990 	         __FUNCTION__, cmd_type, rw, func, addr, nbytes));
991 
992 	DHD_PM_RESUME_WAIT(sdioh_request_word_wait);
993 	DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
994 	/* Claim host controller */
995 	sdio_claim_host(sd->func[func]);
996 
997 	if(rw) { /* CMD52 Write */
998 		if (nbytes == 4) {
999 			sdio_writel(sd->func[func], *word, addr, &err_ret);
1000 		} else if (nbytes == 2) {
1001 			sdio_writew(sd->func[func], (*word & 0xFFFF), addr, &err_ret);
1002 		} else {
1003 			sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes));
1004 		}
1005 	} else { /* CMD52 Read */
1006 		if (nbytes == 4) {
1007 			*word = sdio_readl(sd->func[func], addr, &err_ret);
1008 		} else if (nbytes == 2) {
1009 			*word = sdio_readw(sd->func[func], addr, &err_ret) & 0xFFFF;
1010 		} else {
1011 			sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes));
1012 		}
1013 	}
1014 
1015 	/* Release host controller */
1016 	sdio_release_host(sd->func[func]);
1017 
1018 	if (err_ret) {
1019 #if defined(MMC_SDIO_ABORT)
1020 		/* Any error on CMD53 transaction should abort that function using function 0. */
1021 		while (sdio_abort_retry--) {
1022 			if (sd->func[0]) {
1023 				sdio_claim_host(sd->func[0]);
1024 				/*
1025 				 * this sdio_f0_writeb() can be replaced with another api
1026 				 * depending upon MMC driver change.
1027 				 * As of this time, this is temporaray one
1028 				 */
1029 				sdio_writeb(sd->func[0],
1030 					func, SDIOD_CCCR_IOABORT, &err_ret);
1031 				sdio_release_host(sd->func[0]);
1032 			}
1033 			if (!err_ret)
1034 				break;
1035 		}
1036 		if (err_ret)
1037 #endif /* MMC_SDIO_ABORT */
1038 		{
1039 			sd_err(("bcmsdh_sdmmc: Failed to %s word, Err: 0x%08x",
1040 				rw ? "Write" : "Read", err_ret));
1041 		}
1042 	}
1043 
1044 	return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
1045 }
1046 
1047 #ifdef BCMSDIOH_TXGLOM
1048 static SDIOH_API_RC
sdioh_request_packet_chain(sdioh_info_t * sd,uint fix_inc,uint write,uint func,uint addr,void * pkt)1049 sdioh_request_packet_chain(sdioh_info_t *sd, uint fix_inc, uint write, uint func,
1050                      uint addr, void *pkt)
1051 {
1052 	bool fifo = (fix_inc == SDIOH_DATA_FIX);
1053 	int err_ret = 0;
1054 	void *pnext;
1055 	uint ttl_len, pkt_offset;
1056 	uint blk_num;
1057 	uint blk_size;
1058 	uint max_blk_count;
1059 	uint max_req_size;
1060 	struct mmc_request mmc_req;
1061 	struct mmc_command mmc_cmd;
1062 	struct mmc_data mmc_dat;
1063 	uint32 sg_count;
1064 	struct sdio_func *sdio_func = sd->func[func];
1065 	struct mmc_host *host = sdio_func->card->host;
1066 
1067 	sd_trace(("%s: Enter\n", __FUNCTION__));
1068 	ASSERT(pkt);
1069 	DHD_PM_RESUME_WAIT(sdioh_request_packet_wait);
1070 	DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
1071 
1072 	blk_size = sd->client_block_size[func];
1073 	max_blk_count = min(host->max_blk_count, (uint)MAX_IO_RW_EXTENDED_BLK);
1074 	max_req_size = min(max_blk_count * blk_size, host->max_req_size);
1075 
1076 	pkt_offset = 0;
1077 	pnext = pkt;
1078 
1079 	while (pnext != NULL) {
1080 		ttl_len = 0;
1081 		sg_count = 0;
1082 		memset(&mmc_req, 0, sizeof(struct mmc_request));
1083 		memset(&mmc_cmd, 0, sizeof(struct mmc_command));
1084 		memset(&mmc_dat, 0, sizeof(struct mmc_data));
1085 		sg_init_table(sd->sg_list, ARRAYSIZE(sd->sg_list));
1086 
1087 		/* Set up scatter-gather DMA descriptors. this loop is to find out the max
1088 		 * data we can transfer with one command 53. blocks per command is limited by
1089 		 * host max_req_size and 9-bit max block number. when the total length of this
1090 		 * packet chain is bigger than max_req_size, use multiple SD_IO_RW_EXTENDED
1091 		 * commands (each transfer is still block aligned)
1092 		 */
1093 		while (pnext != NULL && ttl_len < max_req_size) {
1094 			int pkt_len;
1095 			int sg_data_size;
1096 			uint8 *pdata = (uint8*)PKTDATA(sd->osh, pnext);
1097 
1098 			ASSERT(pdata != NULL);
1099 			pkt_len = PKTLEN(sd->osh, pnext);
1100 			sd_trace(("%s[%d] data=%p, len=%d\n", __FUNCTION__, write, pdata, pkt_len));
1101 			/* sg_count is unlikely larger than the array size, and this is
1102 			 * NOT something we can handle here, but in case it happens, PLEASE put
1103 			 * a restriction on max tx/glom count (based on host->max_segs).
1104 			 */
1105 			if (sg_count >= ARRAYSIZE(sd->sg_list)) {
1106 				sd_err(("%s: sg list entries(%u) exceed limit(%u),"
1107 					" sd blk_size=%u\n",
1108 					__FUNCTION__, sg_count, ARRAYSIZE(sd->sg_list), blk_size));
1109 				return (SDIOH_API_RC_FAIL);
1110 			}
1111 			pdata += pkt_offset;
1112 
1113 			sg_data_size = pkt_len - pkt_offset;
1114 			if (sg_data_size > max_req_size - ttl_len)
1115 				sg_data_size = max_req_size - ttl_len;
1116 			/* some platforms put a restriction on the data size of each scatter-gather
1117 			 * DMA descriptor, use multiple sg buffers when xfer_size is bigger than
1118 			 * max_seg_size
1119 			 */
1120 			if (sg_data_size > host->max_seg_size) {
1121 				sg_data_size = host->max_seg_size;
1122 			}
1123 			sg_set_buf(&sd->sg_list[sg_count++], pdata, sg_data_size);
1124 
1125 			ttl_len += sg_data_size;
1126 			pkt_offset += sg_data_size;
1127 			if (pkt_offset == pkt_len) {
1128 				pnext = PKTNEXT(sd->osh, pnext);
1129 				pkt_offset = 0;
1130 			}
1131 		}
1132 
1133 		if (ttl_len % blk_size != 0) {
1134 			sd_err(("%s, data length %d not aligned to block size %d\n",
1135 				__FUNCTION__,  ttl_len, blk_size));
1136 			return SDIOH_API_RC_FAIL;
1137 		}
1138 		blk_num = ttl_len / blk_size;
1139 		mmc_dat.sg = sd->sg_list;
1140 		mmc_dat.sg_len = sg_count;
1141 		mmc_dat.blksz = blk_size;
1142 		mmc_dat.blocks = blk_num;
1143 		mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
1144 		mmc_cmd.opcode = 53; /* SD_IO_RW_EXTENDED */
1145 		mmc_cmd.arg = write ? 1<<31 : 0;
1146 		mmc_cmd.arg |= (func & 0x7) << 28;
1147 		mmc_cmd.arg |= 1<<27;
1148 		mmc_cmd.arg |= fifo ? 0 : 1<<26;
1149 		mmc_cmd.arg |= (addr & 0x1FFFF) << 9;
1150 		mmc_cmd.arg |= blk_num & 0x1FF;
1151 		mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
1152 		mmc_req.cmd = &mmc_cmd;
1153 		mmc_req.data = &mmc_dat;
1154 		if (!fifo)
1155 			addr += ttl_len;
1156 
1157 		sdio_claim_host(sdio_func);
1158 		mmc_set_data_timeout(&mmc_dat, sdio_func->card);
1159 		mmc_wait_for_req(host, &mmc_req);
1160 		sdio_release_host(sdio_func);
1161 
1162 		err_ret = mmc_cmd.error? mmc_cmd.error : mmc_dat.error;
1163 		if (0 != err_ret) {
1164 			sd_err(("%s:CMD53 %s failed with code %d\n",
1165 				__FUNCTION__, write ? "write" : "read", err_ret));
1166 			return SDIOH_API_RC_FAIL;
1167 		}
1168 	}
1169 
1170 	sd_trace(("%s: Exit\n", __FUNCTION__));
1171 	return SDIOH_API_RC_SUCCESS;
1172 }
1173 #endif /* BCMSDIOH_TXGLOM */
1174 
1175 static SDIOH_API_RC
sdioh_buffer_tofrom_bus(sdioh_info_t * sd,uint fix_inc,uint write,uint func,uint addr,uint8 * buf,uint len)1176 sdioh_buffer_tofrom_bus(sdioh_info_t *sd, uint fix_inc, uint write, uint func,
1177                      uint addr, uint8 *buf, uint len)
1178 {
1179 	bool fifo = (fix_inc == SDIOH_DATA_FIX);
1180 	int err_ret = 0;
1181 
1182 	sd_trace(("%s: Enter\n", __FUNCTION__));
1183 	ASSERT(buf);
1184 
1185 	/* NOTE:
1186 	 * For all writes, each packet length is aligned to 32 (or 4)
1187 	 * bytes in dhdsdio_txpkt_preprocess, and for glom the last packet length
1188 	 * is aligned to block boundary. If you want to align each packet to
1189 	 * a custom size, please do it in dhdsdio_txpkt_preprocess, NOT here
1190 	 *
1191 	 * For reads, the alignment is doen in sdioh_request_buffer.
1192 	 *
1193 	 */
1194 	sdio_claim_host(sd->func[func]);
1195 
1196 	if ((write) && (!fifo))
1197 		err_ret = sdio_memcpy_toio(sd->func[func], addr, buf, len);
1198 	else if (write)
1199 		err_ret = sdio_memcpy_toio(sd->func[func], addr, buf, len);
1200 	else if (fifo)
1201 		err_ret = sdio_readsb(sd->func[func], buf, addr, len);
1202 	else
1203 		err_ret = sdio_memcpy_fromio(sd->func[func], buf, addr, len);
1204 
1205 	sdio_release_host(sd->func[func]);
1206 
1207 	if (err_ret)
1208 		sd_err(("%s: %s FAILED %p, addr=0x%05x, pkt_len=%d, ERR=%d\n", __FUNCTION__,
1209 		       (write) ? "TX" : "RX", buf, addr, len, err_ret));
1210 	else
1211 		sd_trace(("%s: %s xfr'd %p, addr=0x%05x, len=%d\n", __FUNCTION__,
1212 			(write) ? "TX" : "RX", buf, addr, len));
1213 
1214 	sd_trace(("%s: Exit\n", __FUNCTION__));
1215 	return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
1216 }
1217 
1218 /*
1219  * This function takes a buffer or packet, and fixes everything up so that in the
1220  * end, a DMA-able packet is created.
1221  *
1222  * A buffer does not have an associated packet pointer, and may or may not be aligned.
1223  * A packet may consist of a single packet, or a packet chain.  If it is a packet chain,
1224  * then all the packets in the chain must be properly aligned.  If the packet data is not
1225  * aligned, then there may only be one packet, and in this case, it is copied to a new
1226  * aligned packet.
1227  *
1228  */
1229 extern SDIOH_API_RC
sdioh_request_buffer(sdioh_info_t * sd,uint pio_dma,uint fix_inc,uint write,uint func,uint addr,uint reg_width,uint buf_len,uint8 * buffer,void * pkt)1230 sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint write, uint func,
1231 	uint addr, uint reg_width, uint buf_len, uint8 *buffer, void *pkt)
1232 {
1233 	SDIOH_API_RC status;
1234 	void *tmppkt;
1235 
1236 	sd_trace(("%s: Enter\n", __FUNCTION__));
1237 	DHD_PM_RESUME_WAIT(sdioh_request_buffer_wait);
1238 	DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
1239 
1240 	if (pkt) {
1241 #ifdef BCMSDIOH_TXGLOM
1242 		/* packet chain, only used for tx/rx glom, all packets length
1243 		 * are aligned, total length is a block multiple
1244 		 */
1245 		if (PKTNEXT(sd->osh, pkt))
1246 			return sdioh_request_packet_chain(sd, fix_inc, write, func, addr, pkt);
1247 #endif /* BCMSDIOH_TXGLOM */
1248 		/* non-glom mode, ignore the buffer parameter and use the packet pointer
1249 		 * (this shouldn't happen)
1250 		 */
1251 		buffer = PKTDATA(sd->osh, pkt);
1252 		buf_len = PKTLEN(sd->osh, pkt);
1253 	}
1254 
1255 	ASSERT(buffer);
1256 
1257 	/* buffer and length are aligned, use it directly so we can avoid memory copy */
1258 	if (((ulong)buffer & (ARCH_DMA_MINALIGN - 1)) == 0 && (buf_len & DMA_ALIGN_MASK) == 0)
1259 		return sdioh_buffer_tofrom_bus(sd, fix_inc, write, func, addr, buffer, buf_len);
1260 
1261 	sd_trace(("%s: [%d] doing memory copy buf=%p, len=%d\n",
1262 		__FUNCTION__, write, buffer, buf_len));
1263 
1264 	/* otherwise, a memory copy is needed as the input buffer is not aligned */
1265 	tmppkt = PKTGET_STATIC(sd->osh, buf_len + DEFAULT_SDIO_F2_BLKSIZE, write ? TRUE : FALSE);
1266 	if (tmppkt == NULL) {
1267 		sd_err(("%s: PKTGET failed: len %d\n", __FUNCTION__, buf_len));
1268 		return SDIOH_API_RC_FAIL;
1269 	}
1270 
1271 	if (write)
1272 		bcopy(buffer, PKTDATA(sd->osh, tmppkt), buf_len);
1273 
1274 	status = sdioh_buffer_tofrom_bus(sd, fix_inc, write, func, addr,
1275 		PKTDATA(sd->osh, tmppkt), ROUNDUP(buf_len, (DMA_ALIGN_MASK+1)));
1276 
1277 	if (!write)
1278 		bcopy(PKTDATA(sd->osh, tmppkt), buffer, buf_len);
1279 
1280 	PKTFREE_STATIC(sd->osh, tmppkt, write ? TRUE : FALSE);
1281 
1282 	return status;
1283 }
1284 
1285 /* this function performs "abort" for both of host & device */
1286 extern int
sdioh_abort(sdioh_info_t * sd,uint func)1287 sdioh_abort(sdioh_info_t *sd, uint func)
1288 {
1289 #if defined(MMC_SDIO_ABORT)
1290 	char t_func = (char) func;
1291 #endif /* defined(MMC_SDIO_ABORT) */
1292 	sd_trace(("%s: Enter\n", __FUNCTION__));
1293 
1294 #if defined(MMC_SDIO_ABORT)
1295 	/* issue abort cmd52 command through F1 */
1296 	sdioh_request_byte(sd, SD_IO_OP_WRITE, SDIO_FUNC_0, SDIOD_CCCR_IOABORT, &t_func);
1297 #endif /* defined(MMC_SDIO_ABORT) */
1298 
1299 	sd_trace(("%s: Exit\n", __FUNCTION__));
1300 	return SDIOH_API_RC_SUCCESS;
1301 }
1302 
1303 /* Reset and re-initialize the device */
sdioh_sdio_reset(sdioh_info_t * si)1304 int sdioh_sdio_reset(sdioh_info_t *si)
1305 {
1306 	sd_trace(("%s: Enter\n", __FUNCTION__));
1307 	sd_trace(("%s: Exit\n", __FUNCTION__));
1308 	return SDIOH_API_RC_SUCCESS;
1309 }
1310 
1311 /* Disable device interrupt */
1312 void
sdioh_sdmmc_devintr_off(sdioh_info_t * sd)1313 sdioh_sdmmc_devintr_off(sdioh_info_t *sd)
1314 {
1315 	sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
1316 	sd->intmask &= ~CLIENT_INTR;
1317 }
1318 
1319 /* Enable device interrupt */
1320 void
sdioh_sdmmc_devintr_on(sdioh_info_t * sd)1321 sdioh_sdmmc_devintr_on(sdioh_info_t *sd)
1322 {
1323 	sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
1324 	sd->intmask |= CLIENT_INTR;
1325 }
1326 
1327 /* Read client card reg */
1328 int
sdioh_sdmmc_card_regread(sdioh_info_t * sd,int func,uint32 regaddr,int regsize,uint32 * data)1329 sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data)
1330 {
1331 
1332 	if ((func == 0) || (regsize == 1)) {
1333 		uint8 temp = 0;
1334 
1335 		sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp);
1336 		*data = temp;
1337 		*data &= 0xff;
1338 		sd_data(("%s: byte read data=0x%02x\n",
1339 		         __FUNCTION__, *data));
1340 	} else {
1341 		if (sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, data, regsize)) {
1342 			return BCME_SDIO_ERROR;
1343 		}
1344 		if (regsize == 2)
1345 			*data &= 0xffff;
1346 
1347 		sd_data(("%s: word read data=0x%08x\n",
1348 		         __FUNCTION__, *data));
1349 	}
1350 
1351 	return SUCCESS;
1352 }
1353 
1354 #if !defined(OOB_INTR_ONLY)
1355 /* bcmsdh_sdmmc interrupt handler */
IRQHandler(struct sdio_func * func)1356 static void IRQHandler(struct sdio_func *func)
1357 {
1358 	sdioh_info_t *sd;
1359 
1360 	sd = sdio_get_drvdata(func);
1361 
1362 	ASSERT(sd != NULL);
1363 	sdio_release_host(sd->func[0]);
1364 
1365 	if (sd->use_client_ints) {
1366 		sd->intrcount++;
1367 		ASSERT(sd->intr_handler);
1368 		ASSERT(sd->intr_handler_arg);
1369 		(sd->intr_handler)(sd->intr_handler_arg);
1370 	} else {
1371 		sd_err(("bcmsdh_sdmmc: ***IRQHandler\n"));
1372 
1373 		sd_err(("%s: Not ready for intr: enabled %d, handler %p\n",
1374 		        __FUNCTION__, sd->client_intr_enabled, sd->intr_handler));
1375 	}
1376 
1377 	sdio_claim_host(sd->func[0]);
1378 }
1379 
1380 /* bcmsdh_sdmmc interrupt handler for F2 (dummy handler) */
IRQHandlerF2(struct sdio_func * func)1381 static void IRQHandlerF2(struct sdio_func *func)
1382 {
1383 	sd_trace(("bcmsdh_sdmmc: ***IRQHandlerF2\n"));
1384 }
1385 #endif /* !defined(OOB_INTR_ONLY) */
1386 
1387 #ifdef NOTUSED
1388 /* Write client card reg */
1389 static int
sdioh_sdmmc_card_regwrite(sdioh_info_t * sd,int func,uint32 regaddr,int regsize,uint32 data)1390 sdioh_sdmmc_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data)
1391 {
1392 
1393 	if ((func == 0) || (regsize == 1)) {
1394 		uint8 temp;
1395 
1396 		temp = data & 0xff;
1397 		sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp);
1398 		sd_data(("%s: byte write data=0x%02x\n",
1399 		         __FUNCTION__, data));
1400 	} else {
1401 		if (regsize == 2)
1402 			data &= 0xffff;
1403 
1404 		sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, &data, regsize);
1405 
1406 		sd_data(("%s: word write data=0x%08x\n",
1407 		         __FUNCTION__, data));
1408 	}
1409 
1410 	return SUCCESS;
1411 }
1412 #endif /* NOTUSED */
1413 
1414 int
sdioh_start(sdioh_info_t * sd,int stage)1415 sdioh_start(sdioh_info_t *sd, int stage)
1416 {
1417 #if defined(OEM_ANDROID) || defined(OEM_EMBEDDED_LINUX)
1418 	int ret;
1419 
1420 	if (!sd) {
1421 		sd_err(("%s Failed, sd is NULL\n", __FUNCTION__));
1422 		return (0);
1423 	}
1424 
1425 	/* Need to do this stages as we can't enable the interrupt till
1426 		downloading of the firmware is complete, other wise polling
1427 		sdio access will come in way
1428 	*/
1429 	if (sd->func[0]) {
1430 			if (stage == 0) {
1431 		/* Since the power to the chip is killed, we will have
1432 			re enumerate the device again. Set the block size
1433 			and enable the fucntion 1 for in preparation for
1434 			downloading the code
1435 		*/
1436 		/* sdio_reset_comm() - has been fixed in latest kernel/msm.git for Linux
1437 		   2.6.27. The implementation prior to that is buggy, and needs broadcom's
1438 		   patch for it
1439 		*/
1440 		if ((ret = sdio_reset_comm(sd->func[0]->card))) {
1441 			sd_err(("%s Failed, error = %d\n", __FUNCTION__, ret));
1442 			return ret;
1443 		}
1444 		else {
1445 			sd->num_funcs = 2;
1446 			sd->sd_blockmode = TRUE;
1447 			sd->use_client_ints = TRUE;
1448 			sd->client_block_size[0] = 64;
1449 
1450 			if (sd->func[1]) {
1451 				/* Claim host controller */
1452 				sdio_claim_host(sd->func[1]);
1453 
1454 				sd->client_block_size[1] = 64;
1455 				ret = sdio_set_block_size(sd->func[1], 64);
1456 				if (ret) {
1457 					sd_err(("bcmsdh_sdmmc: Failed to set F1 "
1458 						"blocksize(%d)\n", ret));
1459 				}
1460 
1461 				/* Release host controller F1 */
1462 				sdio_release_host(sd->func[1]);
1463 			}
1464 
1465 			if (sd->func[2]) {
1466 				/* Claim host controller F2 */
1467 				sdio_claim_host(sd->func[2]);
1468 
1469 				sd->client_block_size[2] = sd_f2_blocksize;
1470 				ret = sdio_set_block_size(sd->func[2], sd_f2_blocksize);
1471 				if (ret) {
1472 					sd_err(("bcmsdh_sdmmc: Failed to set F2 "
1473 						"blocksize to %d(%d)\n", sd_f2_blocksize, ret));
1474 				}
1475 
1476 				/* Release host controller F2 */
1477 				sdio_release_host(sd->func[2]);
1478 			}
1479 
1480 			sdioh_sdmmc_card_enablefuncs(sd);
1481 			}
1482 		} else {
1483 #if !defined(OOB_INTR_ONLY)
1484 			sdio_claim_host(sd->func[0]);
1485 			if (sd->func[2])
1486 				sdio_claim_irq(sd->func[2], IRQHandlerF2);
1487 			if (sd->func[1])
1488 				sdio_claim_irq(sd->func[1], IRQHandler);
1489 			sdio_release_host(sd->func[0]);
1490 #else /* defined(OOB_INTR_ONLY) */
1491 #if defined(HW_OOB)
1492 			sdioh_enable_func_intr(sd);
1493 #endif // endif
1494 			bcmsdh_oob_intr_set(sd->bcmsdh, TRUE);
1495 #endif /* !defined(OOB_INTR_ONLY) */
1496 		}
1497 	}
1498 	else
1499 		sd_err(("%s Failed\n", __FUNCTION__));
1500 #endif /* defined(OEM_ANDROID) || defined(OEM_EMBEDDED_LINUX) */
1501 
1502 	return (0);
1503 }
1504 
1505 int
sdioh_stop(sdioh_info_t * sd)1506 sdioh_stop(sdioh_info_t *sd)
1507 {
1508 #if defined(OEM_ANDROID) || defined(OEM_EMBEDDED_LINUX)
1509 	/* MSM7201A Android sdio stack has bug with interrupt
1510 		So internaly within SDIO stack they are polling
1511 		which cause issue when device is turned off. So
1512 		unregister interrupt with SDIO stack to stop the
1513 		polling
1514 	*/
1515 	if (sd->func[0]) {
1516 #if !defined(OOB_INTR_ONLY)
1517 		sdio_claim_host(sd->func[0]);
1518 		if (sd->func[1])
1519 			sdio_release_irq(sd->func[1]);
1520 		if (sd->func[2])
1521 			sdio_release_irq(sd->func[2]);
1522 		sdio_release_host(sd->func[0]);
1523 #else /* defined(OOB_INTR_ONLY) */
1524 #if defined(HW_OOB)
1525 		sdioh_disable_func_intr(sd);
1526 #endif // endif
1527 		bcmsdh_oob_intr_set(sd->bcmsdh, FALSE);
1528 #endif /* !defined(OOB_INTR_ONLY) */
1529 	}
1530 	else
1531 		sd_err(("%s Failed\n", __FUNCTION__));
1532 #endif /* defined(OEM_ANDROID) ||  defined(OEM_EMBEDDED_LINUX) */
1533 	return (0);
1534 }
1535 
1536 int
sdioh_waitlockfree(sdioh_info_t * sd)1537 sdioh_waitlockfree(sdioh_info_t *sd)
1538 {
1539 	return (1);
1540 }
1541 
1542 SDIOH_API_RC
sdioh_gpioouten(sdioh_info_t * sd,uint32 gpio)1543 sdioh_gpioouten(sdioh_info_t *sd, uint32 gpio)
1544 {
1545 	return SDIOH_API_RC_FAIL;
1546 }
1547 
1548 SDIOH_API_RC
sdioh_gpioout(sdioh_info_t * sd,uint32 gpio,bool enab)1549 sdioh_gpioout(sdioh_info_t *sd, uint32 gpio, bool enab)
1550 {
1551 	return SDIOH_API_RC_FAIL;
1552 }
1553 
1554 bool
sdioh_gpioin(sdioh_info_t * sd,uint32 gpio)1555 sdioh_gpioin(sdioh_info_t *sd, uint32 gpio)
1556 {
1557 	return FALSE;
1558 }
1559 
1560 SDIOH_API_RC
sdioh_gpio_init(sdioh_info_t * sd)1561 sdioh_gpio_init(sdioh_info_t *sd)
1562 {
1563 	return SDIOH_API_RC_FAIL;
1564 }
1565 
1566 uint
sdmmc_get_clock_rate(sdioh_info_t * sd)1567 sdmmc_get_clock_rate(sdioh_info_t *sd)
1568 {
1569 	struct sdio_func *sdio_func = sd->func[0];
1570 	struct mmc_host *host = sdio_func->card->host;
1571 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(4, 3, 6))
1572 	return mmc_host_clk_rate(host);
1573 #else
1574 	return host->ios.clock;
1575 #endif /* (LINUX_VERSION_CODE <= KERNEL_VERSION(4, 3, 6)) */
1576 }
1577 
1578 void
sdmmc_set_clock_rate(sdioh_info_t * sd,uint hz)1579 sdmmc_set_clock_rate(sdioh_info_t *sd, uint hz)
1580 {
1581 	struct sdio_func *sdio_func = sd->func[0];
1582 	struct mmc_host *host = sdio_func->card->host;
1583 	struct mmc_ios *ios = &host->ios;
1584 
1585 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(4, 3, 6))
1586 	mmc_host_clk_hold(host);
1587 #endif // endif
1588 	DHD_INFO(("%s: Before change: sd clock rate is %u\n", __FUNCTION__, ios->clock));
1589 	if (hz < host->f_min) {
1590 		DHD_ERROR(("%s: Intended rate is below min rate, setting to min\n", __FUNCTION__));
1591 		hz = host->f_min;
1592 	}
1593 
1594 	if (hz > host->f_max) {
1595 		DHD_ERROR(("%s: Intended rate exceeds max rate, setting to max\n", __FUNCTION__));
1596 		hz = host->f_max;
1597 	}
1598 	ios->clock = hz;
1599 	host->ops->set_ios(host, ios);
1600 	DHD_ERROR(("%s: After change: sd clock rate is %u\n", __FUNCTION__, ios->clock));
1601 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(4, 3, 6))
1602 	mmc_host_clk_release(host);
1603 #endif // endif
1604 }
1605 
1606 void
sdmmc_set_clock_divisor(sdioh_info_t * sd,uint sd_div)1607 sdmmc_set_clock_divisor(sdioh_info_t *sd, uint sd_div)
1608 {
1609 	uint hz;
1610 	uint old_div = sdmmc_get_clock_rate(sd);
1611 	if (old_div == sd_div) {
1612 		return;
1613 	}
1614 
1615 	hz = sd->sd_clk_rate / sd_div;
1616 	sdmmc_set_clock_rate(sd, hz);
1617 }
1618