1 /*
2 * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
3 *
4 * Copyright (C) 2020, Broadcom.
5 *
6 * Unless you and Broadcom execute a separate written software license
7 * agreement governing use of this software, this software is licensed to you
8 * under the terms of the GNU General Public License version 2 (the "GPL"),
9 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10 * following added to such license:
11 *
12 * As a special exception, the copyright holders of this software give you
13 * permission to link this software with independent modules, and to copy and
14 * distribute the resulting executable under terms of your choice, provided that
15 * you also meet, for each linked independent module, the terms and conditions of
16 * the license of that module. An independent module is a module which is not
17 * derived from this software. The special exception does not apply to any
18 * modifications of the software.
19 *
20 *
21 * <<Broadcom-WL-IPTag/Open:>>
22 *
23 * $Id$
24 */
25 #include <typedefs.h>
26
27 #include <bcmdevs.h>
28 #include <bcmendian.h>
29 #include <bcmutils.h>
30 #include <osl.h>
31 #include <sdio.h> /* SDIO Device and Protocol Specs */
32 #include <sdioh.h> /* Standard SDIO Host Controller Specification */
33 #include <bcmsdbus.h> /* bcmsdh to/from specific controller APIs */
34 #include <sdiovar.h> /* ioctl/iovars */
35
36 #include <linux/mmc/core.h>
37 #include <linux/mmc/host.h>
38 #include <linux/mmc/card.h>
39 #include <linux/mmc/sdio_func.h>
40 #include <linux/mmc/sdio_ids.h>
41
42 #include <dngl_stats.h>
43 #include <dhd.h>
44 #include <dhd_dbg.h>
45
46 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined (CONFIG_PM_SLEEP)
47 #include <linux/suspend.h>
48 extern volatile bool dhd_mmc_suspend;
49 #endif
50 #include "bcmsdh_sdmmc.h"
51
52 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)) || \
53 (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
54 static inline void
mmc_host_clk_hold(struct mmc_host * host)55 mmc_host_clk_hold(struct mmc_host *host)
56 {
57 BCM_REFERENCE(host);
58 return;
59 }
60
61 static inline void
mmc_host_clk_release(struct mmc_host * host)62 mmc_host_clk_release(struct mmc_host *host)
63 {
64 BCM_REFERENCE(host);
65 return;
66 }
67
68 static inline unsigned int
mmc_host_clk_rate(struct mmc_host * host)69 mmc_host_clk_rate(struct mmc_host *host)
70 {
71 return host->ios.clock;
72 }
73 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0) */
74
75 #ifndef BCMSDH_MODULE
76 extern int sdio_function_init(void);
77 extern void sdio_function_cleanup(void);
78 #endif /* BCMSDH_MODULE */
79
80 #if !defined(OOB_INTR_ONLY)
81 static void IRQHandler(struct sdio_func *func);
82 static void IRQHandlerF2(struct sdio_func *func);
83 #endif /* !defined(OOB_INTR_ONLY) */
84 static int sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr);
85 #if defined(ENABLE_INSMOD_NO_FW_LOAD) && !defined(BUS_POWER_RESTORE)
86 #if defined(MMC_SW_RESET) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0)
87 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 19, 0)
88 extern int mmc_sw_reset(struct mmc_card *card);
89 #else
90 extern int mmc_sw_reset(struct mmc_host *host);
91 #endif
92 #elif defined(MMC_HW_RESET) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0)
93 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0)
94 extern int mmc_hw_reset(struct mmc_card *card);
95 #else
96 extern int mmc_hw_reset(struct mmc_host *host);
97 #endif
98 #else
99 extern int sdio_reset_comm(struct mmc_card *card);
100 #endif
101 #endif
102 #ifdef GLOBAL_SDMMC_INSTANCE
103 extern PBCMSDH_SDMMC_INSTANCE gInstance;
104 #endif
105
106 #define DEFAULT_SDIO_F2_BLKSIZE 512
107 #ifndef CUSTOM_SDIO_F2_BLKSIZE
108 #define CUSTOM_SDIO_F2_BLKSIZE DEFAULT_SDIO_F2_BLKSIZE
109 #endif
110
111 #define DEFAULT_SDIO_F1_BLKSIZE 64
112 #ifndef CUSTOM_SDIO_F1_BLKSIZE
113 #define CUSTOM_SDIO_F1_BLKSIZE DEFAULT_SDIO_F1_BLKSIZE
114 #endif
115
116 #define COPY_BUF_SIZE (SDPCM_MAXGLOM_SIZE * 1600)
117
118 #define MAX_IO_RW_EXTENDED_BLK 511
119
120 uint sd_sdmode = SDIOH_MODE_SD4; /* Use SD4 mode by default */
121 uint sd_f2_blocksize = CUSTOM_SDIO_F2_BLKSIZE;
122 uint sd_f1_blocksize = CUSTOM_SDIO_F1_BLKSIZE;
123
124 #if defined (BT_OVER_SDIO)
125 uint sd_f3_blocksize = 64;
126 #endif /* defined (BT_OVER_SDIO) */
127
128 uint sd_divisor = 2; /* Default 48MHz/2 = 24MHz */
129
130 uint sd_power = 1; /* Default to SD Slot powered ON */
131 uint sd_clock = 1; /* Default to SD Clock turned ON */
132 uint sd_hiok = FALSE; /* Don't use hi-speed mode by default */
133 uint sd_msglevel = SDH_ERROR_VAL;
134 uint sd_use_dma = TRUE;
135
136 #ifndef CUSTOM_RXCHAIN
137 #define CUSTOM_RXCHAIN 0
138 #endif
139
140 DHD_PM_RESUME_WAIT_INIT(sdioh_request_byte_wait);
141 DHD_PM_RESUME_WAIT_INIT(sdioh_request_word_wait);
142 DHD_PM_RESUME_WAIT_INIT(sdioh_request_packet_wait);
143 DHD_PM_RESUME_WAIT_INIT(sdioh_request_buffer_wait);
144
145 #define DMA_ALIGN_MASK 0x03
146 #define MMC_SDIO_ABORT_RETRY_LIMIT 5
147
148 int sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data);
149 #ifdef NOTYET
150 static int
151 sdioh_sdmmc_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data);
152 #endif /* NOTYET */
153
154 #if defined (BT_OVER_SDIO)
155 extern
sdioh_sdmmc_card_enable_func_f3(sdioh_info_t * sd,struct sdio_func * func)156 void sdioh_sdmmc_card_enable_func_f3(sdioh_info_t *sd, struct sdio_func *func)
157 {
158 sd->func[3] = func;
159 sd_info(("%s sd->func[3] %p\n", __FUNCTION__, sd->func[3]));
160 }
161 #endif /* defined (BT_OVER_SDIO) */
162
163 void sdmmc_set_clock_rate(sdioh_info_t *sd, uint hz);
164 uint sdmmc_get_clock_rate(sdioh_info_t *sd);
165 void sdmmc_set_clock_divisor(sdioh_info_t *sd, uint sd_div);
166
167 static int
sdioh_sdmmc_card_enablefuncs(sdioh_info_t * sd)168 sdioh_sdmmc_card_enablefuncs(sdioh_info_t *sd)
169 {
170 int err_ret;
171 uint32 fbraddr;
172 uint8 func;
173
174 sd_trace(("%s\n", __FUNCTION__));
175
176 /* Get the Card's common CIS address */
177 sd->com_cis_ptr = sdioh_sdmmc_get_cisaddr(sd, SDIOD_CCCR_CISPTR_0);
178 sd->func_cis_ptr[0] = sd->com_cis_ptr;
179 sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr));
180
181 /* Get the Card's function CIS (for each function) */
182 for (fbraddr = SDIOD_FBR_STARTADDR, func = 1;
183 func <= sd->num_funcs; func++, fbraddr += SDIOD_FBR_SIZE) {
184 sd->func_cis_ptr[func] = sdioh_sdmmc_get_cisaddr(sd, SDIOD_FBR_CISPTR_0 + fbraddr);
185 sd_info(("%s: Function %d CIS Ptr = 0x%x\n",
186 __FUNCTION__, func, sd->func_cis_ptr[func]));
187 }
188
189 sd->func_cis_ptr[0] = sd->com_cis_ptr;
190 sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr));
191
192 /* Enable Function 1 */
193 sdio_claim_host(sd->func[1]);
194 err_ret = sdio_enable_func(sd->func[1]);
195 sdio_release_host(sd->func[1]);
196 if (err_ret) {
197 sd_err(("bcmsdh_sdmmc: Failed to enable F1 Err: 0x%08x\n", err_ret));
198 }
199
200 return FALSE;
201 }
202
203 /*
204 * Public entry points & extern's
205 */
206 extern sdioh_info_t *
sdioh_attach(osl_t * osh,struct sdio_func * func)207 sdioh_attach(osl_t *osh, struct sdio_func *func)
208 {
209 sdioh_info_t *sd = NULL;
210 int err_ret;
211
212 sd_trace(("%s\n", __FUNCTION__));
213
214 if (func == NULL) {
215 sd_err(("%s: sdio function device is NULL\n", __FUNCTION__));
216 return NULL;
217 }
218
219 if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) {
220 sd_err(("sdioh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh)));
221 return NULL;
222 }
223 bzero((char *)sd, sizeof(sdioh_info_t));
224 #if defined(BCMSDIOH_TXGLOM) && defined(BCMSDIOH_STATIC_COPY_BUF)
225 sd->copy_buf = MALLOC(osh, COPY_BUF_SIZE);
226 if (sd->copy_buf == NULL) {
227 sd_err(("%s: MALLOC of %d-byte copy_buf failed\n",
228 __FUNCTION__, COPY_BUF_SIZE));
229 goto fail;
230 }
231 #endif
232 sd->osh = osh;
233 sd->fake_func0.num = 0;
234 sd->fake_func0.card = func->card;
235 sd->func[0] = &sd->fake_func0;
236 #ifdef GLOBAL_SDMMC_INSTANCE
237 if (func->num == 2)
238 sd->func[1] = gInstance->func[1];
239 #else
240 sd->func[1] = func->card->sdio_func[0];
241 #endif
242 sd->func[2] = func->card->sdio_func[1];
243 #ifdef GLOBAL_SDMMC_INSTANCE
244 sd->func[func->num] = func;
245 #endif
246
247 #if defined (BT_OVER_SDIO)
248 sd->func[3] = NULL;
249 #endif /* defined (BT_OVER_SDIO) */
250
251 sd->num_funcs = 2;
252 sd->sd_blockmode = TRUE;
253 sd->use_client_ints = TRUE;
254 sd->client_block_size[0] = 64;
255 sd->use_rxchain = CUSTOM_RXCHAIN;
256 if (sd->func[1] == NULL || sd->func[2] == NULL) {
257 sd_err(("%s: func 1 or 2 is null \n", __FUNCTION__));
258 goto fail;
259 }
260 sdio_set_drvdata(sd->func[1], sd);
261
262 sdio_claim_host(sd->func[1]);
263 sd->client_block_size[1] = sd_f1_blocksize;
264 err_ret = sdio_set_block_size(sd->func[1], sd_f1_blocksize);
265 sdio_release_host(sd->func[1]);
266 if (err_ret) {
267 sd_err(("bcmsdh_sdmmc: Failed to set F1 blocksize(%d)\n", err_ret));
268 goto fail;
269 }
270
271 sdio_claim_host(sd->func[2]);
272 if ((func->device == BCM43362_CHIP_ID || func->device == BCM4330_CHIP_ID) &&
273 sd_f2_blocksize > 128)
274 sd_f2_blocksize = 128;
275 sd->client_block_size[2] = sd_f2_blocksize;
276 printf("%s: set sd_f2_blocksize %d\n", __FUNCTION__, sd_f2_blocksize);
277 err_ret = sdio_set_block_size(sd->func[2], sd_f2_blocksize);
278 sdio_release_host(sd->func[2]);
279 if (err_ret) {
280 sd_err(("bcmsdh_sdmmc: Failed to set F2 blocksize to %d(%d)\n",
281 sd_f2_blocksize, err_ret));
282 goto fail;
283 }
284
285 sd->sd_clk_rate = sdmmc_get_clock_rate(sd);
286 printf("%s: sd clock rate = %u\n", __FUNCTION__, sd->sd_clk_rate);
287 sdioh_sdmmc_card_enablefuncs(sd);
288 #if !defined(OOB_INTR_ONLY)
289 mutex_init(&sd->claim_host_mutex); // terence 20140926: fix for claim host issue
290 #endif
291
292 sd_trace(("%s: Done\n", __FUNCTION__));
293 return sd;
294
295 fail:
296 #if defined(BCMSDIOH_TXGLOM) && defined(BCMSDIOH_STATIC_COPY_BUF)
297 MFREE(sd->osh, sd->copy_buf, COPY_BUF_SIZE);
298 #endif
299 MFREE(sd->osh, sd, sizeof(sdioh_info_t));
300 return NULL;
301 }
302
303 extern SDIOH_API_RC
sdioh_detach(osl_t * osh,sdioh_info_t * sd)304 sdioh_detach(osl_t *osh, sdioh_info_t *sd)
305 {
306 sd_trace(("%s\n", __FUNCTION__));
307
308 if (sd) {
309
310 /* Disable Function 2 */
311 if (sd->func[2]) {
312 sdio_claim_host(sd->func[2]);
313 sdio_disable_func(sd->func[2]);
314 sdio_release_host(sd->func[2]);
315 }
316
317 /* Disable Function 1 */
318 if (sd->func[1]) {
319 sdio_claim_host(sd->func[1]);
320 sdio_disable_func(sd->func[1]);
321 sdio_release_host(sd->func[1]);
322 }
323
324 sd->func[1] = NULL;
325 sd->func[2] = NULL;
326
327 #if defined(BCMSDIOH_TXGLOM) && defined(BCMSDIOH_STATIC_COPY_BUF)
328 MFREE(sd->osh, sd->copy_buf, COPY_BUF_SIZE);
329 #endif
330 MFREE(sd->osh, sd, sizeof(sdioh_info_t));
331 }
332 return SDIOH_API_RC_SUCCESS;
333 }
334
335 #if defined(OOB_INTR_ONLY) && defined(HW_OOB)
336
337 extern SDIOH_API_RC
sdioh_enable_func_intr(sdioh_info_t * sd)338 sdioh_enable_func_intr(sdioh_info_t *sd)
339 {
340 uint8 reg;
341 int err;
342
343 if (sd->func[0] == NULL) {
344 sd_err(("%s: function 0 pointer is NULL\n", __FUNCTION__));
345 return SDIOH_API_RC_FAIL;
346 }
347
348 sdio_claim_host(sd->func[0]);
349 reg = sdio_readb(sd->func[0], SDIOD_CCCR_INTEN, &err);
350 if (err) {
351 sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
352 sdio_release_host(sd->func[0]);
353 return SDIOH_API_RC_FAIL;
354 }
355 /* Enable F1 and F2 interrupts, clear master enable */
356 reg &= ~INTR_CTL_MASTER_EN;
357 reg |= (INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN);
358 #if defined (BT_OVER_SDIO)
359 reg |= (INTR_CTL_FUNC3_EN);
360 #endif /* defined (BT_OVER_SDIO) */
361 sdio_writeb(sd->func[0], reg, SDIOD_CCCR_INTEN, &err);
362 sdio_release_host(sd->func[0]);
363
364 if (err) {
365 sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
366 return SDIOH_API_RC_FAIL;
367 }
368
369 return SDIOH_API_RC_SUCCESS;
370 }
371
372 extern SDIOH_API_RC
sdioh_disable_func_intr(sdioh_info_t * sd)373 sdioh_disable_func_intr(sdioh_info_t *sd)
374 {
375 uint8 reg;
376 int err;
377
378 if (sd->func[0] == NULL) {
379 sd_err(("%s: function 0 pointer is NULL\n", __FUNCTION__));
380 return SDIOH_API_RC_FAIL;
381 }
382
383 sdio_claim_host(sd->func[0]);
384 reg = sdio_readb(sd->func[0], SDIOD_CCCR_INTEN, &err);
385 if (err) {
386 sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
387 sdio_release_host(sd->func[0]);
388 return SDIOH_API_RC_FAIL;
389 }
390 reg &= ~(INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN);
391 #if defined(BT_OVER_SDIO)
392 reg &= ~INTR_CTL_FUNC3_EN;
393 #endif
394 /* Disable master interrupt with the last function interrupt */
395 if (!(reg & 0xFE))
396 reg = 0;
397 sdio_writeb(sd->func[0], reg, SDIOD_CCCR_INTEN, &err);
398 sdio_release_host(sd->func[0]);
399
400 if (err) {
401 sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
402 return SDIOH_API_RC_FAIL;
403 }
404
405 return SDIOH_API_RC_SUCCESS;
406 }
407 #endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */
408
409 /* Configure callback to client when we recieve client interrupt */
410 extern SDIOH_API_RC
sdioh_interrupt_register(sdioh_info_t * sd,sdioh_cb_fn_t fn,void * argh)411 sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh)
412 {
413 sd_trace(("%s: Entering\n", __FUNCTION__));
414 if (fn == NULL) {
415 sd_err(("%s: interrupt handler is NULL, not registering\n", __FUNCTION__));
416 return SDIOH_API_RC_FAIL;
417 }
418 #if !defined(OOB_INTR_ONLY)
419 sd->intr_handler = fn;
420 sd->intr_handler_arg = argh;
421 sd->intr_handler_valid = TRUE;
422
423 /* register and unmask irq */
424 if (sd->func[2]) {
425 sdio_claim_host(sd->func[2]);
426 sdio_claim_irq(sd->func[2], IRQHandlerF2);
427 sdio_release_host(sd->func[2]);
428 }
429
430 if (sd->func[1]) {
431 sdio_claim_host(sd->func[1]);
432 sdio_claim_irq(sd->func[1], IRQHandler);
433 sdio_release_host(sd->func[1]);
434 }
435 #elif defined(HW_OOB)
436 sdioh_enable_func_intr(sd);
437 #endif /* !defined(OOB_INTR_ONLY) */
438
439 return SDIOH_API_RC_SUCCESS;
440 }
441
442 extern SDIOH_API_RC
sdioh_interrupt_deregister(sdioh_info_t * sd)443 sdioh_interrupt_deregister(sdioh_info_t *sd)
444 {
445 sd_trace(("%s: Entering\n", __FUNCTION__));
446
447 #if !defined(OOB_INTR_ONLY)
448 if (sd->func[1]) {
449 /* register and unmask irq */
450 sdio_claim_host(sd->func[1]);
451 sdio_release_irq(sd->func[1]);
452 sdio_release_host(sd->func[1]);
453 }
454
455 if (sd->func[2]) {
456 /* Claim host controller F2 */
457 sdio_claim_host(sd->func[2]);
458 sdio_release_irq(sd->func[2]);
459 /* Release host controller F2 */
460 sdio_release_host(sd->func[2]);
461 }
462
463 sd->intr_handler_valid = FALSE;
464 sd->intr_handler = NULL;
465 sd->intr_handler_arg = NULL;
466 #elif defined(HW_OOB)
467 if (dhd_download_fw_on_driverload)
468 sdioh_disable_func_intr(sd);
469 #endif /* !defined(OOB_INTR_ONLY) */
470 return SDIOH_API_RC_SUCCESS;
471 }
472
473 extern SDIOH_API_RC
sdioh_interrupt_query(sdioh_info_t * sd,bool * onoff)474 sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff)
475 {
476 sd_trace(("%s: Entering\n", __FUNCTION__));
477 *onoff = sd->client_intr_enabled;
478 return SDIOH_API_RC_SUCCESS;
479 }
480
481 #if defined(DHD_DEBUG) || defined(BCMDBG)
482 extern bool
sdioh_interrupt_pending(sdioh_info_t * sd)483 sdioh_interrupt_pending(sdioh_info_t *sd)
484 {
485 return (0);
486 }
487 #endif
488
489 uint
sdioh_query_iofnum(sdioh_info_t * sd)490 sdioh_query_iofnum(sdioh_info_t *sd)
491 {
492 return sd->num_funcs;
493 }
494
495 /* IOVar table */
496 enum {
497 IOV_MSGLEVEL = 1,
498 IOV_BLOCKMODE,
499 IOV_BLOCKSIZE,
500 IOV_DMA,
501 IOV_USEINTS,
502 IOV_NUMINTS,
503 IOV_NUMLOCALINTS,
504 IOV_HOSTREG,
505 IOV_DEVREG,
506 IOV_DIVISOR,
507 IOV_SDMODE,
508 IOV_HISPEED,
509 IOV_HCIREGS,
510 IOV_POWER,
511 IOV_CLOCK,
512 IOV_RXCHAIN
513 };
514
515 const bcm_iovar_t sdioh_iovars[] = {
516 {"sd_msglevel", IOV_MSGLEVEL, 0, 0, IOVT_UINT32, 0 },
517 {"sd_blockmode", IOV_BLOCKMODE, 0, 0, IOVT_BOOL, 0 },
518 {"sd_blocksize", IOV_BLOCKSIZE, 0, 0, IOVT_UINT32, 0 }, /* ((fn << 16) | size) */
519 {"sd_dma", IOV_DMA, 0, 0, IOVT_BOOL, 0 },
520 {"sd_ints", IOV_USEINTS, 0, 0, IOVT_BOOL, 0 },
521 {"sd_numints", IOV_NUMINTS, 0, 0, IOVT_UINT32, 0 },
522 {"sd_numlocalints", IOV_NUMLOCALINTS, 0, 0, IOVT_UINT32, 0 },
523 #ifdef BCMINTERNAL
524 {"sd_hostreg", IOV_HOSTREG, 0, 0, IOVT_BUFFER, sizeof(sdreg_t) },
525 {"sd_devreg", IOV_DEVREG, 0, 0, IOVT_BUFFER, sizeof(sdreg_t) },
526 #endif /* BCMINTERNAL */
527 {"sd_divisor", IOV_DIVISOR, 0, 0, IOVT_UINT32, 0 },
528 {"sd_power", IOV_POWER, 0, 0, IOVT_UINT32, 0 },
529 {"sd_clock", IOV_CLOCK, 0, 0, IOVT_UINT32, 0 },
530 {"sd_mode", IOV_SDMODE, 0, 0, IOVT_UINT32, 100},
531 {"sd_highspeed", IOV_HISPEED, 0, 0, IOVT_UINT32, 0 },
532 {"sd_rxchain", IOV_RXCHAIN, 0, 0, IOVT_BOOL, 0 },
533 #ifdef BCMDBG
534 {"sd_hciregs", IOV_HCIREGS, 0, 0, IOVT_BUFFER, 0 },
535 #endif
536 {NULL, 0, 0, 0, 0, 0 }
537 };
538
539 int
sdioh_iovar_op(sdioh_info_t * si,const char * name,void * params,int plen,void * arg,uint len,bool set)540 sdioh_iovar_op(sdioh_info_t *si, const char *name,
541 void *params, int plen, void *arg, uint len, bool set)
542 {
543 const bcm_iovar_t *vi = NULL;
544 int bcmerror = 0;
545 uint val_size;
546 int32 int_val = 0;
547 bool bool_val;
548 uint32 actionid;
549
550 ASSERT(name);
551
552 /* Get must have return space; Set does not take qualifiers */
553 ASSERT(set || (arg && len));
554 ASSERT(!set || (!params && !plen));
555
556 sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name));
557
558 if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) {
559 bcmerror = BCME_UNSUPPORTED;
560 goto exit;
561 }
562
563 if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0)
564 goto exit;
565
566 /* XXX Copied from dhd, copied from wl; certainly overkill here? */
567 /* Set up params so get and set can share the convenience variables */
568 if (params == NULL) {
569 params = arg;
570 plen = len;
571 }
572
573 if (vi->type == IOVT_VOID)
574 val_size = 0;
575 else if (vi->type == IOVT_BUFFER)
576 val_size = len;
577 else
578 val_size = sizeof(int);
579
580 if (plen >= (int)sizeof(int_val))
581 bcopy(params, &int_val, sizeof(int_val));
582
583 bool_val = (int_val != 0) ? TRUE : FALSE;
584 BCM_REFERENCE(bool_val);
585
586 actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
587 switch (actionid) {
588 case IOV_GVAL(IOV_MSGLEVEL):
589 int_val = (int32)sd_msglevel;
590 bcopy(&int_val, arg, val_size);
591 break;
592
593 case IOV_SVAL(IOV_MSGLEVEL):
594 sd_msglevel = int_val;
595 break;
596
597 case IOV_GVAL(IOV_BLOCKMODE):
598 int_val = (int32)si->sd_blockmode;
599 bcopy(&int_val, arg, val_size);
600 break;
601
602 case IOV_SVAL(IOV_BLOCKMODE):
603 si->sd_blockmode = (bool)int_val;
604 /* Haven't figured out how to make non-block mode with DMA */
605 break;
606
607 case IOV_GVAL(IOV_BLOCKSIZE):
608 if ((uint32)int_val > si->num_funcs) {
609 bcmerror = BCME_BADARG;
610 break;
611 }
612 int_val = (int32)si->client_block_size[int_val];
613 bcopy(&int_val, arg, val_size);
614 break;
615
616 case IOV_SVAL(IOV_BLOCKSIZE):
617 {
618 uint func = ((uint32)int_val >> 16);
619 uint blksize = (uint16)int_val;
620 uint maxsize;
621
622 if (func > si->num_funcs) {
623 bcmerror = BCME_BADARG;
624 break;
625 }
626
627 /* XXX These hardcoded sizes are a hack, remove after proper CIS parsing. */
628 switch (func) {
629 case 0: maxsize = 32; break;
630 case 1: maxsize = BLOCK_SIZE_4318; break;
631 case 2: maxsize = BLOCK_SIZE_4328; break;
632 default: maxsize = 0;
633 }
634 if (blksize > maxsize) {
635 bcmerror = BCME_BADARG;
636 break;
637 }
638 if (!blksize) {
639 blksize = maxsize;
640 }
641
642 /* Now set it */
643 si->client_block_size[func] = blksize;
644
645 #ifdef USE_DYNAMIC_F2_BLKSIZE
646 if (si->func[func] == NULL) {
647 sd_err(("%s: SDIO Device not present\n", __FUNCTION__));
648 bcmerror = BCME_NORESOURCE;
649 break;
650 }
651 sdio_claim_host(si->func[func]);
652 bcmerror = sdio_set_block_size(si->func[func], blksize);
653 if (bcmerror)
654 sd_err(("%s: Failed to set F%d blocksize to %d(%d)\n",
655 __FUNCTION__, func, blksize, bcmerror));
656 sdio_release_host(si->func[func]);
657 #endif /* USE_DYNAMIC_F2_BLKSIZE */
658 break;
659 }
660
661 case IOV_GVAL(IOV_RXCHAIN):
662 int_val = (int32)si->use_rxchain;
663 bcopy(&int_val, arg, val_size);
664 break;
665
666 case IOV_GVAL(IOV_DMA):
667 int_val = (int32)si->sd_use_dma;
668 bcopy(&int_val, arg, val_size);
669 break;
670
671 case IOV_SVAL(IOV_DMA):
672 si->sd_use_dma = (bool)int_val;
673 break;
674
675 case IOV_GVAL(IOV_USEINTS):
676 int_val = (int32)si->use_client_ints;
677 bcopy(&int_val, arg, val_size);
678 break;
679
680 case IOV_SVAL(IOV_USEINTS):
681 si->use_client_ints = (bool)int_val;
682 if (si->use_client_ints)
683 si->intmask |= CLIENT_INTR;
684 else
685 si->intmask &= ~CLIENT_INTR;
686
687 break;
688
689 case IOV_GVAL(IOV_DIVISOR):
690 int_val = (uint32)sd_divisor;
691 bcopy(&int_val, arg, val_size);
692 break;
693
694 case IOV_SVAL(IOV_DIVISOR):
695 /* set the clock to divisor, if value is non-zero & power of 2 */
696 if (int_val && !(int_val & (int_val - 1))) {
697 sd_divisor = int_val;
698 sdmmc_set_clock_divisor(si, sd_divisor);
699 } else {
700 DHD_ERROR(("%s: Invalid sd_divisor value, should be power of 2!\n",
701 __FUNCTION__));
702 }
703 break;
704
705 case IOV_GVAL(IOV_POWER):
706 int_val = (uint32)sd_power;
707 bcopy(&int_val, arg, val_size);
708 break;
709
710 case IOV_SVAL(IOV_POWER):
711 sd_power = int_val;
712 break;
713
714 case IOV_GVAL(IOV_CLOCK):
715 int_val = (uint32)sd_clock;
716 bcopy(&int_val, arg, val_size);
717 break;
718
719 case IOV_SVAL(IOV_CLOCK):
720 sd_clock = int_val;
721 break;
722
723 case IOV_GVAL(IOV_SDMODE):
724 int_val = (uint32)sd_sdmode;
725 bcopy(&int_val, arg, val_size);
726 break;
727
728 case IOV_SVAL(IOV_SDMODE):
729 sd_sdmode = int_val;
730 break;
731
732 case IOV_GVAL(IOV_HISPEED):
733 int_val = (uint32)sd_hiok;
734 bcopy(&int_val, arg, val_size);
735 break;
736
737 case IOV_SVAL(IOV_HISPEED):
738 sd_hiok = int_val;
739 break;
740
741 case IOV_GVAL(IOV_NUMINTS):
742 int_val = (int32)si->intrcount;
743 bcopy(&int_val, arg, val_size);
744 break;
745
746 case IOV_GVAL(IOV_NUMLOCALINTS):
747 int_val = (int32)0;
748 bcopy(&int_val, arg, val_size);
749 break;
750 #ifdef BCMINTERNAL
751 case IOV_GVAL(IOV_HOSTREG):
752 {
753 /* XXX Should copy for alignment reasons */
754 sdreg_t *sd_ptr = (sdreg_t *)params;
755
756 if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) {
757 sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset));
758 bcmerror = BCME_BADARG;
759 break;
760 }
761
762 sd_trace(("%s: rreg%d at offset %d\n", __FUNCTION__,
763 (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32),
764 sd_ptr->offset));
765 if (sd_ptr->offset & 1)
766 int_val = 8; /* sdioh_sdmmc_rreg8(si, sd_ptr->offset); */
767 else if (sd_ptr->offset & 2)
768 int_val = 16; /* sdioh_sdmmc_rreg16(si, sd_ptr->offset); */
769 else
770 int_val = 32; /* sdioh_sdmmc_rreg(si, sd_ptr->offset); */
771
772 bcopy(&int_val, arg, sizeof(int_val));
773 break;
774 }
775
776 case IOV_SVAL(IOV_HOSTREG):
777 {
778 /* XXX Should copy for alignment reasons */
779 sdreg_t *sd_ptr = (sdreg_t *)params;
780
781 if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) {
782 sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset));
783 bcmerror = BCME_BADARG;
784 break;
785 }
786
787 sd_trace(("%s: wreg%d value 0x%08x at offset %d\n", __FUNCTION__, sd_ptr->value,
788 (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32),
789 sd_ptr->offset));
790 break;
791 }
792
793 case IOV_GVAL(IOV_DEVREG):
794 {
795 /* XXX Should copy for alignment reasons */
796 sdreg_t *sd_ptr = (sdreg_t *)params;
797 uint8 data = 0;
798
799 if ((uint)sd_ptr->func > si->num_funcs) {
800 bcmerror = BCME_BADARG;
801 break;
802 }
803
804 if (sdioh_cfg_read(si, sd_ptr->func, sd_ptr->offset, &data)) {
805 bcmerror = BCME_SDIO_ERROR;
806 break;
807 }
808
809 int_val = (int)data;
810 bcopy(&int_val, arg, sizeof(int_val));
811 break;
812 }
813
814 case IOV_SVAL(IOV_DEVREG):
815 {
816 /* XXX Should copy for alignment reasons */
817 sdreg_t *sd_ptr = (sdreg_t *)params;
818 uint8 data = (uint8)sd_ptr->value;
819
820 if ((uint)sd_ptr->func > si->num_funcs) {
821 bcmerror = BCME_BADARG;
822 break;
823 }
824
825 if (sdioh_cfg_write(si, sd_ptr->func, sd_ptr->offset, &data)) {
826 bcmerror = BCME_SDIO_ERROR;
827 break;
828 }
829 break;
830 }
831 #endif /* BCMINTERNAL */
832 default:
833 bcmerror = BCME_UNSUPPORTED;
834 break;
835 }
836 exit:
837
838 /* XXX Remove protective lock after clients all clean... */
839 return bcmerror;
840 }
841
842 #if (defined(OOB_INTR_ONLY) && defined(HW_OOB)) || defined(FORCE_WOWLAN)
843 /*
844 * XXX dhd -i eth0 sd_devreg 0 0xf2 0x3
845 */
846
847 SDIOH_API_RC
sdioh_enable_hw_oob_intr(sdioh_info_t * sd,bool enable)848 sdioh_enable_hw_oob_intr(sdioh_info_t *sd, bool enable)
849 {
850 SDIOH_API_RC status;
851 uint8 data;
852
853 if (enable)
854 data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE | SDIO_SEPINT_ACT_HI;
855 else
856 data = SDIO_SEPINT_ACT_HI; /* disable hw oob interrupt */
857
858 status = sdioh_request_byte(sd, SDIOH_WRITE, 0, SDIOD_CCCR_BRCM_SEPINT, &data);
859 return status;
860 }
861 #endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */
862
863 extern SDIOH_API_RC
sdioh_cfg_read(sdioh_info_t * sd,uint fnc_num,uint32 addr,uint8 * data)864 sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
865 {
866 SDIOH_API_RC status;
867 /* No lock needed since sdioh_request_byte does locking */
868 status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data);
869 return status;
870 }
871
872 extern SDIOH_API_RC
sdioh_cfg_write(sdioh_info_t * sd,uint fnc_num,uint32 addr,uint8 * data)873 sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
874 {
875 /* No lock needed since sdioh_request_byte does locking */
876 SDIOH_API_RC status;
877 status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data);
878 return status;
879 }
880
881 static int
sdioh_sdmmc_get_cisaddr(sdioh_info_t * sd,uint32 regaddr)882 sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr)
883 {
884 /* read 24 bits and return valid 17 bit addr */
885 int i;
886 uint32 scratch, regdata;
887 uint8 *ptr = (uint8 *)&scratch;
888 for (i = 0; i < 3; i++) {
889 if ((sdioh_sdmmc_card_regread (sd, 0, regaddr, 1, ®data)) != SUCCESS)
890 sd_err(("%s: Can't read!\n", __FUNCTION__));
891
892 *ptr++ = (uint8) regdata;
893 regaddr++;
894 }
895
896 /* Only the lower 17-bits are valid */
897 scratch = ltoh32(scratch);
898 scratch &= 0x0001FFFF;
899 return (scratch);
900 }
901
902 extern SDIOH_API_RC
sdioh_cis_read(sdioh_info_t * sd,uint func,uint8 * cisd,uint32 length)903 sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length)
904 {
905 uint32 count;
906 int offset;
907 uint32 foo;
908 uint8 *cis = cisd;
909
910 sd_trace(("%s: Func = %d\n", __FUNCTION__, func));
911
912 if (!sd->func_cis_ptr[func]) {
913 bzero(cis, length);
914 sd_err(("%s: no func_cis_ptr[%d]\n", __FUNCTION__, func));
915 return SDIOH_API_RC_FAIL;
916 }
917
918 sd_err(("%s: func_cis_ptr[%d]=0x%04x\n", __FUNCTION__, func, sd->func_cis_ptr[func]));
919
920 for (count = 0; count < length; count++) {
921 offset = sd->func_cis_ptr[func] + count;
922 if (sdioh_sdmmc_card_regread (sd, 0, offset, 1, &foo) < 0) {
923 sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__));
924 return SDIOH_API_RC_FAIL;
925 }
926
927 *cis = (uint8)(foo & 0xff);
928 cis++;
929 }
930
931 return SDIOH_API_RC_SUCCESS;
932 }
933
934 extern SDIOH_API_RC
sdioh_cisaddr_read(sdioh_info_t * sd,uint func,uint8 * cisd,uint32 offset)935 sdioh_cisaddr_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 offset)
936 {
937 uint32 foo;
938
939 sd_trace(("%s: Func = %d\n", __FUNCTION__, func));
940
941 if (!sd->func_cis_ptr[func]) {
942 sd_err(("%s: no func_cis_ptr[%d]\n", __FUNCTION__, func));
943 return SDIOH_API_RC_FAIL;
944 }
945
946 if (sdioh_sdmmc_card_regread (sd, 0, sd->func_cis_ptr[func]+offset, 1, &foo) < 0) {
947 sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__));
948 return SDIOH_API_RC_FAIL;
949 }
950
951 *cisd = (uint8)(foo & 0xff);
952
953 return SDIOH_API_RC_SUCCESS;
954 }
955
956 extern SDIOH_API_RC
sdioh_request_byte(sdioh_info_t * sd,uint rw,uint func,uint regaddr,uint8 * byte)957 sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte)
958 {
959 int err_ret = 0;
960 #if defined(MMC_SDIO_ABORT)
961 int sdio_abort_retry = MMC_SDIO_ABORT_RETRY_LIMIT;
962 #endif
963 struct osl_timespec now, before;
964
965 if (sd_msglevel & SDH_COST_VAL)
966 osl_do_gettimeofday(&before);
967
968 sd_info(("%s: rw=%d, func=%d, addr=0x%05x\n", __FUNCTION__, rw, func, regaddr));
969
970 DHD_PM_RESUME_WAIT(sdioh_request_byte_wait);
971 DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
972 if(rw) { /* CMD52 Write */
973 if (func == 0) {
974 /* Can only directly write to some F0 registers. Handle F2 enable
975 * as a special case.
976 */
977 if (regaddr == SDIOD_CCCR_IOEN) {
978 #if defined (BT_OVER_SDIO)
979 do {
980 if (sd->func[3]) {
981 sd_info(("bcmsdh_sdmmc F3: *byte 0x%x\n", *byte));
982
983 if (*byte & SDIO_FUNC_ENABLE_3) {
984 sdio_claim_host(sd->func[3]);
985
986 /* Set Function 3 Block Size */
987 err_ret = sdio_set_block_size(sd->func[3],
988 sd_f3_blocksize);
989 if (err_ret) {
990 sd_err(("F3 blocksize set err%d\n",
991 err_ret));
992 }
993
994 /* Enable Function 3 */
995 sd_info(("bcmsdh_sdmmc F3: enable F3 fn %p\n",
996 sd->func[3]));
997 err_ret = sdio_enable_func(sd->func[3]);
998 if (err_ret) {
999 sd_err(("bcmsdh_sdmmc: enable F3 err:%d\n",
1000 err_ret));
1001 }
1002
1003 sdio_release_host(sd->func[3]);
1004
1005 break;
1006 } else if (*byte & SDIO_FUNC_DISABLE_3) {
1007 sdio_claim_host(sd->func[3]);
1008
1009 /* Disable Function 3 */
1010 sd_info(("bcmsdh_sdmmc F3: disable F3 fn %p\n",
1011 sd->func[3]));
1012 err_ret = sdio_disable_func(sd->func[3]);
1013 if (err_ret) {
1014 sd_err(("bcmsdh_sdmmc: Disable F3 err:%d\n",
1015 err_ret));
1016 }
1017 sdio_release_host(sd->func[3]);
1018 sd->func[3] = NULL;
1019
1020 break;
1021 }
1022 }
1023 #endif /* defined (BT_OVER_SDIO) */
1024 if (sd->func[2]) {
1025 sdio_claim_host(sd->func[2]);
1026 if (*byte & SDIO_FUNC_ENABLE_2) {
1027 /* Enable Function 2 */
1028 err_ret = sdio_enable_func(sd->func[2]);
1029 if (err_ret) {
1030 sd_err(("bcmsdh_sdmmc: enable F2 failed:%d\n",
1031 err_ret));
1032 }
1033 } else {
1034 /* Disable Function 2 */
1035 err_ret = sdio_disable_func(sd->func[2]);
1036 if (err_ret) {
1037 sd_err(("bcmsdh_sdmmc: Disab F2 failed:%d\n",
1038 err_ret));
1039 }
1040 }
1041 sdio_release_host(sd->func[2]);
1042 }
1043 #if defined (BT_OVER_SDIO)
1044 } while (0);
1045 #endif /* defined (BT_OVER_SDIO) */
1046 }
1047 #if defined(MMC_SDIO_ABORT)
1048 /* to allow abort command through F1 */
1049 else if (regaddr == SDIOD_CCCR_IOABORT) {
1050 /* XXX Because of SDIO3.0 host issue on Manta,
1051 * sometimes the abort fails.
1052 * Retrying again will fix this issue.
1053 */
1054 while (sdio_abort_retry--) {
1055 if (sd->func[func]) {
1056 sdio_claim_host(sd->func[func]);
1057 /*
1058 * this sdio_f0_writeb() can be replaced with
1059 * another api depending upon MMC driver change.
1060 * As of this time, this is temporaray one
1061 */
1062 sdio_writeb(sd->func[func],
1063 *byte, regaddr, &err_ret);
1064 sdio_release_host(sd->func[func]);
1065 }
1066 if (!err_ret)
1067 break;
1068 }
1069 }
1070 #endif /* MMC_SDIO_ABORT */
1071 #if defined(SDIO_ISR_THREAD)
1072 else if (regaddr == SDIOD_CCCR_INTR_EXTN) {
1073 while (sdio_abort_retry--) {
1074 if (sd->func[func]) {
1075 sdio_claim_host(sd->func[func]);
1076 /*
1077 * this sdio_f0_writeb() can be replaced with
1078 * another api depending upon MMC driver change.
1079 * As of this time, this is temporaray one
1080 */
1081 sdio_writeb(sd->func[func],
1082 *byte, regaddr, &err_ret);
1083 sdio_release_host(sd->func[func]);
1084 }
1085 if (!err_ret)
1086 break;
1087 }
1088 }
1089 #endif
1090 else if (regaddr < 0xF0) {
1091 sd_err(("bcmsdh_sdmmc: F0 Wr:0x%02x: write disallowed\n", regaddr));
1092 } else {
1093 /* Claim host controller, perform F0 write, and release */
1094 if (sd->func[func]) {
1095 sdio_claim_host(sd->func[func]);
1096 sdio_f0_writeb(sd->func[func],
1097 *byte, regaddr, &err_ret);
1098 sdio_release_host(sd->func[func]);
1099 }
1100 }
1101 } else {
1102 /* Claim host controller, perform Fn write, and release */
1103 if (sd->func[func]) {
1104 sdio_claim_host(sd->func[func]);
1105 sdio_writeb(sd->func[func], *byte, regaddr, &err_ret);
1106 sdio_release_host(sd->func[func]);
1107 }
1108 }
1109 } else { /* CMD52 Read */
1110 /* Claim host controller, perform Fn read, and release */
1111 if (sd->func[func]) {
1112 sdio_claim_host(sd->func[func]);
1113 if (func == 0) {
1114 *byte = sdio_f0_readb(sd->func[func], regaddr, &err_ret);
1115 } else {
1116 *byte = sdio_readb(sd->func[func], regaddr, &err_ret);
1117 }
1118 sdio_release_host(sd->func[func]);
1119 }
1120 }
1121
1122 if (err_ret) {
1123 if (regaddr == 0x1001F) {
1124 /* XXX: Read/Write to SBSDIO_FUNC1_SLEEPCSR could return -110(timeout)
1125 * or -84(CRC) error in case the host tries to wake the device up.
1126 * Skip error log message if err code is -110 or -84 when accessing
1127 * to SBSDIO_FUNC1_SLEEPCSR to avoid QA misunderstand and DHD shoul
1128 * print error log message if retry count over the MAX_KSO_ATTEMPTS.
1129 */
1130 } else {
1131 sd_err(("bcmsdh_sdmmc: Failed to %s byte F%d:@0x%05x=%02x, Err: %d\n",
1132 rw ? "Write" : "Read", func, regaddr, *byte, err_ret));
1133 }
1134 }
1135
1136 if (sd_msglevel & SDH_COST_VAL) {
1137 uint32 diff_us;
1138 osl_do_gettimeofday(&now);
1139 diff_us = osl_do_gettimediff(&now, &before);
1140 sd_cost(("%s: rw=%d len=1 cost = %3dms %3dus\n", __FUNCTION__,
1141 rw, diff_us/1000, diff_us%1000));
1142 }
1143
1144 return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
1145 }
1146
1147 uint
sdioh_set_mode(sdioh_info_t * sd,uint mode)1148 sdioh_set_mode(sdioh_info_t *sd, uint mode)
1149 {
1150 if (mode == SDPCM_TXGLOM_CPY)
1151 sd->txglom_mode = mode;
1152 else if (mode == SDPCM_TXGLOM_MDESC)
1153 sd->txglom_mode = mode;
1154
1155 return (sd->txglom_mode);
1156 }
1157
1158 #ifdef PKT_STATICS
1159 uint32
sdioh_get_spend_time(sdioh_info_t * sd)1160 sdioh_get_spend_time(sdioh_info_t *sd)
1161 {
1162 return (sd->sdio_spent_time_us);
1163 }
1164 #endif
1165
1166 extern SDIOH_API_RC
sdioh_request_word(sdioh_info_t * sd,uint cmd_type,uint rw,uint func,uint addr,uint32 * word,uint nbytes)1167 sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr,
1168 uint32 *word, uint nbytes)
1169 {
1170 int err_ret = SDIOH_API_RC_FAIL;
1171 int err_ret2 = SDIOH_API_RC_SUCCESS; // terence 20130621: prevent dhd_dpc in dead lock
1172 #if defined(MMC_SDIO_ABORT)
1173 int sdio_abort_retry = MMC_SDIO_ABORT_RETRY_LIMIT;
1174 #endif
1175 struct osl_timespec now, before;
1176
1177 if (sd_msglevel & SDH_COST_VAL)
1178 osl_do_gettimeofday(&before);
1179
1180 if (func == 0) {
1181 sd_err(("%s: Only CMD52 allowed to F0.\n", __FUNCTION__));
1182 return SDIOH_API_RC_FAIL;
1183 }
1184
1185 sd_info(("%s: cmd_type=%d, rw=%d, func=%d, addr=0x%05x, nbytes=%d\n",
1186 __FUNCTION__, cmd_type, rw, func, addr, nbytes));
1187
1188 DHD_PM_RESUME_WAIT(sdioh_request_word_wait);
1189 DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
1190 /* Claim host controller */
1191 sdio_claim_host(sd->func[func]);
1192
1193 if(rw) { /* CMD52 Write */
1194 if (nbytes == 4) {
1195 sdio_writel(sd->func[func], *word, addr, &err_ret);
1196 } else if (nbytes == 2) {
1197 sdio_writew(sd->func[func], (*word & 0xFFFF), addr, &err_ret);
1198 } else {
1199 sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes));
1200 }
1201 } else { /* CMD52 Read */
1202 if (nbytes == 4) {
1203 *word = sdio_readl(sd->func[func], addr, &err_ret);
1204 } else if (nbytes == 2) {
1205 *word = sdio_readw(sd->func[func], addr, &err_ret) & 0xFFFF;
1206 } else {
1207 sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes));
1208 }
1209 }
1210
1211 /* Release host controller */
1212 sdio_release_host(sd->func[func]);
1213
1214 if (err_ret) {
1215 #if defined(MMC_SDIO_ABORT)
1216 /* Any error on CMD53 transaction should abort that function using function 0. */
1217 while (sdio_abort_retry--) {
1218 if (sd->func[0]) {
1219 sdio_claim_host(sd->func[0]);
1220 /*
1221 * this sdio_f0_writeb() can be replaced with another api
1222 * depending upon MMC driver change.
1223 * As of this time, this is temporaray one
1224 */
1225 sdio_writeb(sd->func[0],
1226 func, SDIOD_CCCR_IOABORT, &err_ret2);
1227 sdio_release_host(sd->func[0]);
1228 }
1229 if (!err_ret2)
1230 break;
1231 }
1232 if (err_ret)
1233 #endif /* MMC_SDIO_ABORT */
1234 {
1235 sd_err(("bcmsdh_sdmmc: Failed to %s word F%d:@0x%05x=%02x, Err: 0x%08x\n",
1236 rw ? "Write" : "Read", func, addr, *word, err_ret));
1237 }
1238 }
1239
1240 if (sd_msglevel & SDH_COST_VAL) {
1241 uint32 diff_us;
1242 osl_do_gettimeofday(&now);
1243 diff_us = osl_do_gettimediff(&now, &before);
1244 sd_cost(("%s: rw=%d, len=%d cost = %3dms %3dus\n", __FUNCTION__,
1245 rw, nbytes, diff_us/1000, diff_us%1000));
1246 }
1247
1248 return (((err_ret == 0)&&(err_ret2 == 0)) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
1249 }
1250
1251 #ifdef BCMSDIOH_TXGLOM
1252 static SDIOH_API_RC
sdioh_request_packet_chain(sdioh_info_t * sd,uint fix_inc,uint write,uint func,uint addr,void * pkt)1253 sdioh_request_packet_chain(sdioh_info_t *sd, uint fix_inc, uint write, uint func,
1254 uint addr, void *pkt)
1255 {
1256 bool fifo = (fix_inc == SDIOH_DATA_FIX);
1257 int err_ret = 0;
1258 void *pnext;
1259 uint ttl_len, pkt_offset;
1260 uint blk_num;
1261 uint blk_size;
1262 uint max_blk_count;
1263 uint max_req_size;
1264 struct mmc_request mmc_req;
1265 struct mmc_command mmc_cmd;
1266 struct mmc_data mmc_dat;
1267 uint32 sg_count;
1268 struct sdio_func *sdio_func = sd->func[func];
1269 struct mmc_host *host = sdio_func->card->host;
1270 uint8 *localbuf = NULL;
1271 uint local_plen = 0;
1272 uint pkt_len = 0;
1273 struct osl_timespec now, before;
1274
1275 sd_trace(("%s: Enter\n", __FUNCTION__));
1276 ASSERT(pkt);
1277 DHD_PM_RESUME_WAIT(sdioh_request_packet_wait);
1278 DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
1279
1280 #ifndef PKT_STATICS
1281 if (sd_msglevel & SDH_COST_VAL)
1282 #endif
1283 osl_do_gettimeofday(&before);
1284
1285 blk_size = sd->client_block_size[func];
1286 max_blk_count = min(host->max_blk_count, (uint)MAX_IO_RW_EXTENDED_BLK);
1287 max_req_size = min(max_blk_count * blk_size, host->max_req_size);
1288
1289 pkt_offset = 0;
1290 pnext = pkt;
1291
1292 ttl_len = 0;
1293 sg_count = 0;
1294 if(sd->txglom_mode == SDPCM_TXGLOM_MDESC) {
1295 while (pnext != NULL) {
1296 ttl_len = 0;
1297 sg_count = 0;
1298 memset(&mmc_req, 0, sizeof(struct mmc_request));
1299 memset(&mmc_cmd, 0, sizeof(struct mmc_command));
1300 memset(&mmc_dat, 0, sizeof(struct mmc_data));
1301 sg_init_table(sd->sg_list, ARRAYSIZE(sd->sg_list));
1302
1303 /* Set up scatter-gather DMA descriptors. this loop is to find out the max
1304 * data we can transfer with one command 53. blocks per command is limited by
1305 * host max_req_size and 9-bit max block number. when the total length of this
1306 * packet chain is bigger than max_req_size, use multiple SD_IO_RW_EXTENDED
1307 * commands (each transfer is still block aligned)
1308 */
1309 while (pnext != NULL && ttl_len < max_req_size) {
1310 int pkt_len;
1311 int sg_data_size;
1312 uint8 *pdata = (uint8*)PKTDATA(sd->osh, pnext);
1313
1314 ASSERT(pdata != NULL);
1315 pkt_len = PKTLEN(sd->osh, pnext);
1316 sd_trace(("%s[%d] data=%p, len=%d\n", __FUNCTION__, write, pdata, pkt_len));
1317 /* sg_count is unlikely larger than the array size, and this is
1318 * NOT something we can handle here, but in case it happens, PLEASE put
1319 * a restriction on max tx/glom count (based on host->max_segs).
1320 */
1321 if (sg_count >= ARRAYSIZE(sd->sg_list)) {
1322 sd_err(("%s: sg list entries(%u) exceed limit(%zu),"
1323 " sd blk_size=%u\n",
1324 __FUNCTION__, sg_count, (size_t)ARRAYSIZE(sd->sg_list), blk_size));
1325 return (SDIOH_API_RC_FAIL);
1326 }
1327 pdata += pkt_offset;
1328
1329 sg_data_size = pkt_len - pkt_offset;
1330 if (sg_data_size > max_req_size - ttl_len)
1331 sg_data_size = max_req_size - ttl_len;
1332 /* some platforms put a restriction on the data size of each scatter-gather
1333 * DMA descriptor, use multiple sg buffers when xfer_size is bigger than
1334 * max_seg_size
1335 */
1336 if (sg_data_size > host->max_seg_size) {
1337 sg_data_size = host->max_seg_size;
1338 }
1339 sg_set_buf(&sd->sg_list[sg_count++], pdata, sg_data_size);
1340
1341 ttl_len += sg_data_size;
1342 pkt_offset += sg_data_size;
1343 if (pkt_offset == pkt_len) {
1344 pnext = PKTNEXT(sd->osh, pnext);
1345 pkt_offset = 0;
1346 }
1347 }
1348
1349 if (ttl_len % blk_size != 0) {
1350 sd_err(("%s, data length %d not aligned to block size %d\n",
1351 __FUNCTION__, ttl_len, blk_size));
1352 return SDIOH_API_RC_FAIL;
1353 }
1354 blk_num = ttl_len / blk_size;
1355 mmc_dat.sg = sd->sg_list;
1356 mmc_dat.sg_len = sg_count;
1357 mmc_dat.blksz = blk_size;
1358 mmc_dat.blocks = blk_num;
1359 mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
1360 mmc_cmd.opcode = 53; /* SD_IO_RW_EXTENDED */
1361 mmc_cmd.arg = write ? 1<<31 : 0;
1362 mmc_cmd.arg |= (func & 0x7) << 28;
1363 mmc_cmd.arg |= 1<<27;
1364 mmc_cmd.arg |= fifo ? 0 : 1<<26;
1365 mmc_cmd.arg |= (addr & 0x1FFFF) << 9;
1366 mmc_cmd.arg |= blk_num & 0x1FF;
1367 mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
1368 mmc_req.cmd = &mmc_cmd;
1369 mmc_req.data = &mmc_dat;
1370 if (!fifo)
1371 addr += ttl_len;
1372
1373 sdio_claim_host(sdio_func);
1374 mmc_set_data_timeout(&mmc_dat, sdio_func->card);
1375 mmc_wait_for_req(host, &mmc_req);
1376 sdio_release_host(sdio_func);
1377
1378 err_ret = mmc_cmd.error? mmc_cmd.error : mmc_dat.error;
1379 if (0 != err_ret) {
1380 sd_err(("%s:CMD53 %s failed with code %d\n",
1381 __FUNCTION__, write ? "write" : "read", err_ret));
1382 return SDIOH_API_RC_FAIL;
1383 }
1384 }
1385 }
1386 else if(sd->txglom_mode == SDPCM_TXGLOM_CPY) {
1387 for (pnext = pkt; pnext; pnext = PKTNEXT(sd->osh, pnext)) {
1388 ttl_len += PKTLEN(sd->osh, pnext);
1389 }
1390 /* Claim host controller */
1391 sdio_claim_host(sd->func[func]);
1392 for (pnext = pkt; pnext; pnext = PKTNEXT(sd->osh, pnext)) {
1393 uint8 *buf = (uint8*)PKTDATA(sd->osh, pnext);
1394 pkt_len = PKTLEN(sd->osh, pnext);
1395 if (!localbuf) {
1396 #ifdef BCMSDIOH_STATIC_COPY_BUF
1397 if (ttl_len <= COPY_BUF_SIZE)
1398 localbuf = sd->copy_buf;
1399 #else
1400 localbuf = (uint8 *)MALLOC(sd->osh, ttl_len);
1401 #endif
1402 if (localbuf == NULL) {
1403 sd_err(("%s: %s localbuf malloc FAILED ttl_len=%d\n",
1404 __FUNCTION__, (write) ? "TX" : "RX", ttl_len));
1405 ttl_len -= pkt_len;
1406 goto txglomfail;
1407 }
1408 }
1409 bcopy(buf, (localbuf + local_plen), pkt_len);
1410 local_plen += pkt_len;
1411 if (PKTNEXT(sd->osh, pnext))
1412 continue;
1413
1414 buf = localbuf;
1415 pkt_len = local_plen;
1416 txglomfail:
1417 /* Align Patch */
1418 if (!write || pkt_len < 32)
1419 pkt_len = (pkt_len + 3) & 0xFFFFFFFC;
1420 else if (pkt_len % blk_size)
1421 pkt_len += blk_size - (pkt_len % blk_size);
1422
1423 if ((write) && (!fifo))
1424 err_ret = sdio_memcpy_toio(sd->func[func], addr, buf, pkt_len);
1425 else if (write)
1426 err_ret = sdio_memcpy_toio(sd->func[func], addr, buf, pkt_len);
1427 else if (fifo)
1428 err_ret = sdio_readsb(sd->func[func], buf, addr, pkt_len);
1429 else
1430 err_ret = sdio_memcpy_fromio(sd->func[func], buf, addr, pkt_len);
1431
1432 if (err_ret)
1433 sd_err(("%s: %s FAILED %p[%d], addr=0x%05x, pkt_len=%d, ERR=%d\n",
1434 __FUNCTION__,
1435 (write) ? "TX" : "RX",
1436 pnext, sg_count, addr, pkt_len, err_ret));
1437 else
1438 sd_trace(("%s: %s xfr'd %p[%d], addr=0x%05x, len=%d\n",
1439 __FUNCTION__,
1440 (write) ? "TX" : "RX",
1441 pnext, sg_count, addr, pkt_len));
1442
1443 if (!fifo)
1444 addr += pkt_len;
1445 sg_count ++;
1446 }
1447 sdio_release_host(sd->func[func]);
1448 } else {
1449 sd_err(("%s: set to wrong glom mode %d\n", __FUNCTION__, sd->txglom_mode));
1450 return SDIOH_API_RC_FAIL;
1451 }
1452
1453 #ifndef BCMSDIOH_STATIC_COPY_BUF
1454 if (localbuf)
1455 MFREE(sd->osh, localbuf, ttl_len);
1456 #endif
1457
1458 #ifndef PKT_STATICS
1459 if (sd_msglevel & SDH_COST_VAL)
1460 #endif
1461 {
1462 uint32 diff_us;
1463 osl_do_gettimeofday(&now);
1464 diff_us = osl_do_gettimediff(&now, &before);
1465 sd_cost(("%s: rw=%d, ttl_len=%4d cost = %3dms %3dus\n", __FUNCTION__,
1466 write, ttl_len, diff_us/1000, diff_us%1000));
1467 #ifdef PKT_STATICS
1468 if (write && (func == 2))
1469 sd->sdio_spent_time_us = diff_us;
1470 #endif
1471 }
1472
1473 sd_trace(("%s: Exit\n", __FUNCTION__));
1474 return SDIOH_API_RC_SUCCESS;
1475 }
1476 #endif /* BCMSDIOH_TXGLOM */
1477
1478 static SDIOH_API_RC
sdioh_buffer_tofrom_bus(sdioh_info_t * sd,uint fix_inc,uint write,uint func,uint addr,uint8 * buf,uint len)1479 sdioh_buffer_tofrom_bus(sdioh_info_t *sd, uint fix_inc, uint write, uint func,
1480 uint addr, uint8 *buf, uint len)
1481 {
1482 bool fifo = (fix_inc == SDIOH_DATA_FIX);
1483 int err_ret = 0;
1484 struct osl_timespec now, before;
1485
1486 sd_trace(("%s: Enter\n", __FUNCTION__));
1487 ASSERT(buf);
1488
1489 if (sd_msglevel & SDH_COST_VAL)
1490 osl_do_gettimeofday(&before);
1491
1492 /* NOTE:
1493 * For all writes, each packet length is aligned to 32 (or 4)
1494 * bytes in dhdsdio_txpkt_preprocess, and for glom the last packet length
1495 * is aligned to block boundary. If you want to align each packet to
1496 * a custom size, please do it in dhdsdio_txpkt_preprocess, NOT here
1497 *
1498 * For reads, the alignment is doen in sdioh_request_buffer.
1499 *
1500 */
1501 sdio_claim_host(sd->func[func]);
1502
1503 if ((write) && (!fifo))
1504 err_ret = sdio_memcpy_toio(sd->func[func], addr, buf, len);
1505 else if (write)
1506 err_ret = sdio_memcpy_toio(sd->func[func], addr, buf, len);
1507 else if (fifo)
1508 err_ret = sdio_readsb(sd->func[func], buf, addr, len);
1509 else
1510 err_ret = sdio_memcpy_fromio(sd->func[func], buf, addr, len);
1511
1512 sdio_release_host(sd->func[func]);
1513
1514 if (err_ret)
1515 sd_err(("%s: %s FAILED %p, addr=0x%05x, pkt_len=%d, ERR=%d\n", __FUNCTION__,
1516 (write) ? "TX" : "RX", buf, addr, len, err_ret));
1517 else
1518 sd_trace(("%s: %s xfr'd %p, addr=0x%05x, len=%d\n", __FUNCTION__,
1519 (write) ? "TX" : "RX", buf, addr, len));
1520
1521 sd_trace(("%s: Exit\n", __FUNCTION__));
1522
1523 if (sd_msglevel & SDH_COST_VAL) {
1524 uint32 diff_us;
1525 osl_do_gettimeofday(&now);
1526 diff_us = osl_do_gettimediff(&now, &before);
1527 sd_cost(("%s: rw=%d, len=%4d cost = %3dms %3dus\n", __FUNCTION__,
1528 write, len, diff_us/1000, diff_us%1000));
1529 }
1530
1531 return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
1532 }
1533
1534 /*
1535 * This function takes a buffer or packet, and fixes everything up so that in the
1536 * end, a DMA-able packet is created.
1537 *
1538 * A buffer does not have an associated packet pointer, and may or may not be aligned.
1539 * A packet may consist of a single packet, or a packet chain. If it is a packet chain,
1540 * then all the packets in the chain must be properly aligned. If the packet data is not
1541 * aligned, then there may only be one packet, and in this case, it is copied to a new
1542 * aligned packet.
1543 *
1544 */
1545 extern SDIOH_API_RC
sdioh_request_buffer(sdioh_info_t * sd,uint pio_dma,uint fix_inc,uint write,uint func,uint addr,uint reg_width,uint buf_len,uint8 * buffer,void * pkt)1546 sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint write, uint func,
1547 uint addr, uint reg_width, uint buf_len, uint8 *buffer, void *pkt)
1548 {
1549 SDIOH_API_RC status;
1550 void *tmppkt;
1551 int is_vmalloc = FALSE;
1552 struct osl_timespec now, before;
1553
1554 sd_trace(("%s: Enter\n", __FUNCTION__));
1555 DHD_PM_RESUME_WAIT(sdioh_request_buffer_wait);
1556 DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
1557
1558 if (sd_msglevel & SDH_COST_VAL)
1559 osl_do_gettimeofday(&before);
1560
1561 if (pkt) {
1562 #ifdef BCMSDIOH_TXGLOM
1563 /* packet chain, only used for tx/rx glom, all packets length
1564 * are aligned, total length is a block multiple
1565 */
1566 if (PKTNEXT(sd->osh, pkt))
1567 return sdioh_request_packet_chain(sd, fix_inc, write, func, addr, pkt);
1568 #endif /* BCMSDIOH_TXGLOM */
1569 /* non-glom mode, ignore the buffer parameter and use the packet pointer
1570 * (this shouldn't happen)
1571 */
1572 buffer = PKTDATA(sd->osh, pkt);
1573 buf_len = PKTLEN(sd->osh, pkt);
1574 }
1575
1576 ASSERT(buffer);
1577 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 24)
1578 is_vmalloc = is_vmalloc_addr(buffer);
1579 #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 24) */
1580
1581 /* buffer and length are aligned, use it directly so we can avoid memory copy */
1582 if ((((ulong)buffer & DMA_ALIGN_MASK) == 0) && ((buf_len & DMA_ALIGN_MASK) == 0) &&
1583 (!is_vmalloc)) {
1584 return sdioh_buffer_tofrom_bus(sd, fix_inc, write, func, addr, buffer, buf_len);
1585 }
1586
1587 if (is_vmalloc) {
1588 sd_trace(("%s: Need to memory copy due to virtual memory address.\n",
1589 __FUNCTION__));
1590 }
1591
1592 sd_trace(("%s: [%d] doing memory copy buf=%p, len=%d\n",
1593 __FUNCTION__, write, buffer, buf_len));
1594
1595 /* otherwise, a memory copy is needed as the input buffer is not aligned */
1596 tmppkt = PKTGET_STATIC(sd->osh, buf_len + DEFAULT_SDIO_F2_BLKSIZE, write ? TRUE : FALSE);
1597 if (tmppkt == NULL) {
1598 sd_err(("%s: PKTGET failed: len %d\n", __FUNCTION__, buf_len));
1599 return SDIOH_API_RC_FAIL;
1600 }
1601
1602 if (write)
1603 bcopy(buffer, PKTDATA(sd->osh, tmppkt), buf_len);
1604
1605 status = sdioh_buffer_tofrom_bus(sd, fix_inc, write, func, addr,
1606 PKTDATA(sd->osh, tmppkt), ROUNDUP(buf_len, (DMA_ALIGN_MASK+1)));
1607
1608 if (!write)
1609 bcopy(PKTDATA(sd->osh, tmppkt), buffer, buf_len);
1610
1611 PKTFREE_STATIC(sd->osh, tmppkt, write ? TRUE : FALSE);
1612
1613 if (sd_msglevel & SDH_COST_VAL) {
1614 uint32 diff_us;
1615 osl_do_gettimeofday(&now);
1616 diff_us = osl_do_gettimediff(&now, &before);
1617 sd_cost(("%s: rw=%d, len=%d cost = %3dms %3dus\n", __FUNCTION__,
1618 write, buf_len, diff_us/1000, diff_us%1000));
1619 }
1620
1621 return status;
1622 }
1623
1624 /* this function performs "abort" for both of host & device */
1625 extern int
sdioh_abort(sdioh_info_t * sd,uint func)1626 sdioh_abort(sdioh_info_t *sd, uint func)
1627 {
1628 #if defined(MMC_SDIO_ABORT)
1629 char t_func = (char) func;
1630 #endif /* defined(MMC_SDIO_ABORT) */
1631 sd_trace(("%s: Enter\n", __FUNCTION__));
1632
1633 /* XXX Standard Linux SDIO Stack cannot perform an abort. */
1634 #if defined(MMC_SDIO_ABORT)
1635 /* issue abort cmd52 command through F1 */
1636 sdioh_request_byte(sd, SD_IO_OP_WRITE, SDIO_FUNC_0, SDIOD_CCCR_IOABORT, &t_func);
1637 #endif /* defined(MMC_SDIO_ABORT) */
1638
1639 sd_trace(("%s: Exit\n", __FUNCTION__));
1640 return SDIOH_API_RC_SUCCESS;
1641 }
1642
1643 /* Reset and re-initialize the device */
sdioh_sdio_reset(sdioh_info_t * si)1644 int sdioh_sdio_reset(sdioh_info_t *si)
1645 {
1646 sd_trace(("%s: Enter\n", __FUNCTION__));
1647 sd_trace(("%s: Exit\n", __FUNCTION__));
1648 return SDIOH_API_RC_SUCCESS;
1649 }
1650
1651 /* Disable device interrupt */
1652 void
sdioh_sdmmc_devintr_off(sdioh_info_t * sd)1653 sdioh_sdmmc_devintr_off(sdioh_info_t *sd)
1654 {
1655 sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
1656 sd->intmask &= ~CLIENT_INTR;
1657 }
1658
1659 /* Enable device interrupt */
1660 void
sdioh_sdmmc_devintr_on(sdioh_info_t * sd)1661 sdioh_sdmmc_devintr_on(sdioh_info_t *sd)
1662 {
1663 sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
1664 sd->intmask |= CLIENT_INTR;
1665 }
1666
1667 /* Read client card reg */
1668 int
sdioh_sdmmc_card_regread(sdioh_info_t * sd,int func,uint32 regaddr,int regsize,uint32 * data)1669 sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data)
1670 {
1671
1672 if ((func == 0) || (regsize == 1)) {
1673 uint8 temp = 0;
1674
1675 sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp);
1676 *data = temp;
1677 *data &= 0xff;
1678 sd_data(("%s: byte read data=0x%02x\n",
1679 __FUNCTION__, *data));
1680 } else {
1681 if (sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, data, regsize)) {
1682 return BCME_SDIO_ERROR;
1683 }
1684 if (regsize == 2)
1685 *data &= 0xffff;
1686
1687 sd_data(("%s: word read data=0x%08x\n",
1688 __FUNCTION__, *data));
1689 }
1690
1691 return SUCCESS;
1692 }
1693
1694 #if !defined(OOB_INTR_ONLY)
sdio_claim_host_lock_local(sdioh_info_t * sd)1695 void sdio_claim_host_lock_local(sdioh_info_t *sd) // terence 20140926: fix for claim host issue
1696 {
1697 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
1698 if (sd)
1699 mutex_lock(&sd->claim_host_mutex);
1700 #endif
1701 }
1702
sdio_claim_host_unlock_local(sdioh_info_t * sd)1703 void sdio_claim_host_unlock_local(sdioh_info_t *sd) // terence 20140926: fix for claim host issue
1704 {
1705 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
1706 if (sd)
1707 mutex_unlock(&sd->claim_host_mutex);
1708 #endif
1709 }
1710
1711 /* bcmsdh_sdmmc interrupt handler */
IRQHandler(struct sdio_func * func)1712 static void IRQHandler(struct sdio_func *func)
1713 {
1714 sdioh_info_t *sd;
1715
1716 sd = sdio_get_drvdata(func);
1717
1718 ASSERT(sd != NULL);
1719 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
1720 if (mutex_is_locked(&sd->claim_host_mutex)) {
1721 printf("%s: muxtex is locked and return\n", __FUNCTION__);
1722 return;
1723 }
1724 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
1725
1726 sdio_claim_host_lock_local(sd);
1727 sdio_release_host(sd->func[0]);
1728
1729 if (sd->use_client_ints) {
1730 sd->intrcount++;
1731 ASSERT(sd->intr_handler);
1732 ASSERT(sd->intr_handler_arg);
1733 (sd->intr_handler)(sd->intr_handler_arg);
1734 } else { /* XXX - Do not remove these sd_err messages. Need to figure
1735 out how to keep interrupts disabled until DHD registers
1736 a handler.
1737 */
1738 sd_err(("bcmsdh_sdmmc: ***IRQHandler\n"));
1739
1740 sd_err(("%s: Not ready for intr: enabled %d, handler %p\n",
1741 __FUNCTION__, sd->client_intr_enabled, sd->intr_handler));
1742 }
1743
1744 sdio_claim_host(sd->func[0]);
1745 sdio_claim_host_unlock_local(sd);
1746 }
1747
1748 /* bcmsdh_sdmmc interrupt handler for F2 (dummy handler) */
IRQHandlerF2(struct sdio_func * func)1749 static void IRQHandlerF2(struct sdio_func *func)
1750 {
1751 sd_trace(("bcmsdh_sdmmc: ***IRQHandlerF2\n"));
1752 }
1753 #endif /* !defined(OOB_INTR_ONLY) */
1754
1755 #ifdef NOTUSED
1756 /* Write client card reg */
1757 static int
sdioh_sdmmc_card_regwrite(sdioh_info_t * sd,int func,uint32 regaddr,int regsize,uint32 data)1758 sdioh_sdmmc_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data)
1759 {
1760
1761 if ((func == 0) || (regsize == 1)) {
1762 uint8 temp;
1763
1764 temp = data & 0xff;
1765 sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp);
1766 sd_data(("%s: byte write data=0x%02x\n",
1767 __FUNCTION__, data));
1768 } else {
1769 if (regsize == 2)
1770 data &= 0xffff;
1771
1772 sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, &data, regsize);
1773
1774 sd_data(("%s: word write data=0x%08x\n",
1775 __FUNCTION__, data));
1776 }
1777
1778 return SUCCESS;
1779 }
1780 #endif /* NOTUSED */
1781
1782 #if defined(ENABLE_INSMOD_NO_FW_LOAD) && !defined(BUS_POWER_RESTORE)
sdio_sw_reset(sdioh_info_t * sd)1783 static int sdio_sw_reset(sdioh_info_t *sd)
1784 {
1785 struct mmc_card *card = sd->func[0]->card;
1786 int err = 0;
1787
1788 #if defined(MMC_SW_RESET) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0)
1789 /* MMC_SW_RESET */
1790 printf("%s: call mmc_sw_reset\n", __FUNCTION__);
1791 sdio_claim_host(sd->func[0]);
1792 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 19, 0)
1793 err = mmc_sw_reset(card);
1794 #else
1795 err = mmc_sw_reset(card->host);
1796 #endif
1797 sdio_release_host(sd->func[0]);
1798 #elif defined(MMC_HW_RESET) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0)
1799 /* MMC_HW_RESET */
1800 printf("%s: call mmc_hw_reset\n", __FUNCTION__);
1801 sdio_claim_host(sd->func[0]);
1802 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)
1803 while (atomic_read(&card->sdio_funcs_probed) > 1) {
1804 atomic_dec(&card->sdio_funcs_probed);
1805 }
1806 #endif
1807 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0)
1808 err = mmc_hw_reset(card);
1809 #else
1810 err = mmc_hw_reset(card->host);
1811 #endif
1812 sdio_release_host(sd->func[0]);
1813 #else
1814 /* sdio_reset_comm */
1815 err = sdio_reset_comm(card);
1816 #endif
1817
1818 if (err)
1819 sd_err(("%s Failed, error = %d\n", __FUNCTION__, err));
1820
1821 return err;
1822 }
1823 #endif
1824
1825 int
sdioh_start(sdioh_info_t * sd,int stage)1826 sdioh_start(sdioh_info_t *sd, int stage)
1827 {
1828 #if defined(OEM_ANDROID)
1829 int ret;
1830
1831 if (!sd) {
1832 sd_err(("%s Failed, sd is NULL\n", __FUNCTION__));
1833 return (0);
1834 }
1835
1836 /* Need to do this stages as we can't enable the interrupt till
1837 downloading of the firmware is complete, other wise polling
1838 sdio access will come in way
1839 */
1840 if (sd->func[0]) {
1841 if (stage == 0) {
1842 /* Since the power to the chip is killed, we will have
1843 re enumerate the device again. Set the block size
1844 and enable the fucntion 1 for in preparation for
1845 downloading the code
1846 */
1847 /* sdio_reset_comm() - has been fixed in latest kernel/msm.git for Linux
1848 2.6.27. The implementation prior to that is buggy, and needs broadcom's
1849 patch for it
1850 */
1851 #if defined(ENABLE_INSMOD_NO_FW_LOAD) && !defined(BUS_POWER_RESTORE)
1852 if ((ret = sdio_sw_reset(sd))) {
1853 sd_err(("%s Failed, error = %d\n", __FUNCTION__, ret));
1854 return ret;
1855 } else
1856 #endif
1857 {
1858 sd->num_funcs = 2;
1859 sd->sd_blockmode = TRUE;
1860 sd->use_client_ints = TRUE;
1861 sd->client_block_size[0] = 64;
1862
1863 if (sd->func[1]) {
1864 /* Claim host controller */
1865 sdio_claim_host(sd->func[1]);
1866
1867 sd->client_block_size[1] = 64;
1868 ret = sdio_set_block_size(sd->func[1], 64);
1869 if (ret) {
1870 sd_err(("bcmsdh_sdmmc: Failed to set F1 "
1871 "blocksize(%d)\n", ret));
1872 }
1873
1874 /* Release host controller F1 */
1875 sdio_release_host(sd->func[1]);
1876 }
1877
1878 if (sd->func[2]) {
1879 /* Claim host controller F2 */
1880 sdio_claim_host(sd->func[2]);
1881
1882 sd->client_block_size[2] = sd_f2_blocksize;
1883 printf("%s: set sd_f2_blocksize %d\n", __FUNCTION__, sd_f2_blocksize);
1884 ret = sdio_set_block_size(sd->func[2], sd_f2_blocksize);
1885 if (ret) {
1886 sd_err(("bcmsdh_sdmmc: Failed to set F2 "
1887 "blocksize to %d(%d)\n", sd_f2_blocksize, ret));
1888 }
1889
1890 /* Release host controller F2 */
1891 sdio_release_host(sd->func[2]);
1892 }
1893
1894 sdioh_sdmmc_card_enablefuncs(sd);
1895 }
1896 } else {
1897 #if !defined(OOB_INTR_ONLY)
1898 sdio_claim_host(sd->func[0]);
1899 if (sd->func[2])
1900 sdio_claim_irq(sd->func[2], IRQHandlerF2);
1901 if (sd->func[1])
1902 sdio_claim_irq(sd->func[1], IRQHandler);
1903 sdio_release_host(sd->func[0]);
1904 #else /* defined(OOB_INTR_ONLY) */
1905 #if defined(HW_OOB)
1906 sdioh_enable_func_intr(sd);
1907 #endif
1908 bcmsdh_oob_intr_set(sd->bcmsdh, TRUE);
1909 #endif /* !defined(OOB_INTR_ONLY) */
1910 }
1911 }
1912 else
1913 sd_err(("%s Failed\n", __FUNCTION__));
1914 #endif /* defined(OEM_ANDROID) */
1915
1916 return (0);
1917 }
1918
1919 int
sdioh_stop(sdioh_info_t * sd)1920 sdioh_stop(sdioh_info_t *sd)
1921 {
1922 #if defined(OEM_ANDROID)
1923 /* MSM7201A Android sdio stack has bug with interrupt
1924 So internaly within SDIO stack they are polling
1925 which cause issue when device is turned off. So
1926 unregister interrupt with SDIO stack to stop the
1927 polling
1928 */
1929 #if !defined(OOB_INTR_ONLY)
1930 sdio_claim_host_lock_local(sd);
1931 #endif
1932 if (sd->func[0]) {
1933 #if !defined(OOB_INTR_ONLY)
1934 sdio_claim_host(sd->func[0]);
1935 if (sd->func[1])
1936 sdio_release_irq(sd->func[1]);
1937 if (sd->func[2])
1938 sdio_release_irq(sd->func[2]);
1939 sdio_release_host(sd->func[0]);
1940 #else /* defined(OOB_INTR_ONLY) */
1941 #if defined(HW_OOB)
1942 sdioh_disable_func_intr(sd);
1943 #endif
1944 bcmsdh_oob_intr_set(sd->bcmsdh, FALSE);
1945 #endif /* !defined(OOB_INTR_ONLY) */
1946 }
1947 else
1948 sd_err(("%s Failed\n", __FUNCTION__));
1949 #endif /* defined(OEM_ANDROID) */
1950 #if !defined(OOB_INTR_ONLY)
1951 sdio_claim_host_unlock_local(sd);
1952 #endif
1953 return (0);
1954 }
1955
1956 int
sdioh_waitlockfree(sdioh_info_t * sd)1957 sdioh_waitlockfree(sdioh_info_t *sd)
1958 {
1959 return (1);
1960 }
1961
1962 #ifdef BCMINTERNAL
1963 extern SDIOH_API_RC
sdioh_test_diag(sdioh_info_t * sd)1964 sdioh_test_diag(sdioh_info_t *sd)
1965 {
1966 sd_trace(("%s: Enter\n", __FUNCTION__));
1967 sd_trace(("%s: Exit\n", __FUNCTION__));
1968 return (0);
1969 }
1970 #endif /* BCMINTERNAL */
1971
1972 SDIOH_API_RC
sdioh_gpioouten(sdioh_info_t * sd,uint32 gpio)1973 sdioh_gpioouten(sdioh_info_t *sd, uint32 gpio)
1974 {
1975 return SDIOH_API_RC_FAIL;
1976 }
1977
1978 SDIOH_API_RC
sdioh_gpioout(sdioh_info_t * sd,uint32 gpio,bool enab)1979 sdioh_gpioout(sdioh_info_t *sd, uint32 gpio, bool enab)
1980 {
1981 return SDIOH_API_RC_FAIL;
1982 }
1983
1984 bool
sdioh_gpioin(sdioh_info_t * sd,uint32 gpio)1985 sdioh_gpioin(sdioh_info_t *sd, uint32 gpio)
1986 {
1987 return FALSE;
1988 }
1989
1990 SDIOH_API_RC
sdioh_gpio_init(sdioh_info_t * sd)1991 sdioh_gpio_init(sdioh_info_t *sd)
1992 {
1993 return SDIOH_API_RC_FAIL;
1994 }
1995
1996 uint
sdmmc_get_clock_rate(sdioh_info_t * sd)1997 sdmmc_get_clock_rate(sdioh_info_t *sd)
1998 {
1999 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)) || (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
2000 struct sdio_func *sdio_func = sd->func[0];
2001 struct mmc_host *host = sdio_func->card->host;
2002 return mmc_host_clk_rate(host);
2003 #else
2004 return 0;
2005 #endif
2006 }
2007
2008 void
sdmmc_set_clock_rate(sdioh_info_t * sd,uint hz)2009 sdmmc_set_clock_rate(sdioh_info_t *sd, uint hz)
2010 {
2011 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)) || (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
2012 struct sdio_func *sdio_func = sd->func[0];
2013 struct mmc_host *host = sdio_func->card->host;
2014 struct mmc_ios *ios = &host->ios;
2015
2016 mmc_host_clk_hold(host);
2017 DHD_INFO(("%s: Before change: sd clock rate is %u\n", __FUNCTION__, ios->clock));
2018 if (hz < host->f_min) {
2019 DHD_ERROR(("%s: Intended rate is below min rate, setting to min\n", __FUNCTION__));
2020 hz = host->f_min;
2021 }
2022
2023 if (hz > host->f_max) {
2024 DHD_ERROR(("%s: Intended rate exceeds max rate, setting to max\n", __FUNCTION__));
2025 hz = host->f_max;
2026 }
2027 ios->clock = hz;
2028 host->ops->set_ios(host, ios);
2029 DHD_ERROR(("%s: After change: sd clock rate is %u\n", __FUNCTION__, ios->clock));
2030 mmc_host_clk_release(host);
2031 #else
2032 return;
2033 #endif
2034 }
2035
2036 void
sdmmc_set_clock_divisor(sdioh_info_t * sd,uint sd_div)2037 sdmmc_set_clock_divisor(sdioh_info_t *sd, uint sd_div)
2038 {
2039 uint hz;
2040 uint old_div = sdmmc_get_clock_rate(sd);
2041 if (old_div == sd_div) {
2042 return;
2043 }
2044
2045 hz = sd->sd_clk_rate / sd_div;
2046 sdmmc_set_clock_rate(sd, hz);
2047 }
2048