1 /*
2 * 'Standard' SDIO HOST CONTROLLER driver
3 *
4 * Portions of this code are copyright (c) 2022 Cypress Semiconductor Corporation
5 *
6 * Copyright (C) 1999-2017, Broadcom Corporation
7 *
8 * Unless you and Broadcom execute a separate written software license
9 * agreement governing use of this software, this software is licensed to you
10 * under the terms of the GNU General Public License version 2 (the "GPL"),
11 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
12 * following added to such license:
13 *
14 * As a special exception, the copyright holders of this software give you
15 * permission to link this software with independent modules, and to copy and
16 * distribute the resulting executable under terms of your choice, provided that
17 * you also meet, for each linked independent module, the terms and conditions of
18 * the license of that module. An independent module is a module which is not
19 * derived from this software. The special exception does not apply to any
20 * modifications of the software.
21 *
22 * Notwithstanding the above, under no circumstances may you combine this
23 * software in any way with any other Broadcom software provided under a license
24 * other than the GPL, without Broadcom's express prior written consent.
25 *
26 *
27 * <<Broadcom-WL-IPTag/Open:>>
28 *
29 * $Id: bcmsdstd.c 700323 2017-05-18 16:12:11Z $
30 */
31
32 #include <typedefs.h>
33
34 #include <bcmdevs.h>
35 #include <bcmendian.h>
36 #include <bcmutils.h>
37 #include <osl.h>
38 #include <siutils.h>
39 #include <sdio.h> /* SDIO Device and Protocol Specs */
40 #include <sdioh.h> /* Standard SDIO Host Controller Specification */
41 #include <bcmsdbus.h> /* bcmsdh to/from specific controller APIs */
42 #include <sdiovar.h> /* ioctl/iovars */
43 #include <pcicfg.h>
44 #include <bcmsdstd.h>
45
46 #define SD_PAGE_BITS 12
47 #define SD_PAGE (1 << SD_PAGE_BITS)
48 #define SDSTD_MAX_TUNING_PHASE 5
49
50 /*
51 * Upper GPIO 16 - 31 are available on J22
52 * J22.pin3 == gpio16, J22.pin5 == gpio17, etc.
53 * Lower GPIO 0 - 15 are available on J15 (WL_GPIO)
54 */
55 #define SDH_GPIO16 16
56 #define SDH_GPIO_ENABLE 0xffff
57 #define DEFAULT_F2_BLKSIZE 256
58
59 #include <bcmsdstd.h>
60 #include <sbsdio.h> /* SDIOH (host controller) core hardware definitions */
61
62 /* Globals */
63 uint sd_msglevel = SDH_ERROR_VAL;
64
65 uint sd_hiok = TRUE; /* Use hi-speed mode if available? */
66 uint sd_sdmode = SDIOH_MODE_SD4; /* Use SD4 mode by default */
67 uint sd_f2_blocksize; /* Default blocksize */
68 uint sd_f1_blocksize = BLOCK_SIZE_4318; /* Default blocksize */
69
70 #define sd3_trace(x)
71
72 /* sd3ClkMode: 0-SDR12 [25MHz]
73 * 1-SDR25 [50MHz]+SHS=1
74 * 2-SDR50 [100MHz]+SSDR50=1
75 * 3-SDR104 [208MHz]+SSDR104=1
76 * 4-DDR50 [50MHz]+SDDR50=1
77 */
78 #define SD3CLKMODE_0_SDR12 (0)
79 #define SD3CLKMODE_1_SDR25 (1)
80 #define SD3CLKMODE_2_SDR50 (2)
81 #define SD3CLKMODE_3_SDR104 (3)
82 #define SD3CLKMODE_4_DDR50 (4)
83 #define SD3CLKMODE_DISABLED (-1)
84 #define SD3CLKMODE_AUTO (99)
85
86 /* values for global_UHSI_Supp : Means host and card caps match. */
87 #define HOST_SDR_UNSUPP (0)
88 #define HOST_SDR_12_25 (1)
89 #define HOST_SDR_50_104_DDR (2)
90
91 /* depends-on/affects sd3_autoselect_uhsi_max.
92 * see sd3_autoselect_uhsi_max
93 */
94 int sd_uhsimode = SD3CLKMODE_DISABLED;
95 uint sd_tuning_period = CAP3_RETUNING_TC_OTHER;
96 uint sd_delay_value = 500000;
97 /* Enables host to dongle glomming. Also increases the
98 * dma buffer size. This will increase the rx throughput
99 * as there will be lesser CMD53 transactions
100 */
101 #ifdef BCMSDIOH_TXGLOM
102 uint sd_txglom;
103 module_param(sd_txglom, uint, 0);
104 #endif /* BCMSDIOH_TXGLOM */
105
106 char dhd_sdiod_uhsi_ds_override[2] = {' '};
107
108 #define MAX_DTS_INDEX (3)
109 #define DRVSTRN_MAX_CHAR ('D')
110 #define DRVSTRN_IGNORE_CHAR (' ')
111
112 char DTS_vals[MAX_DTS_INDEX + 1] = {
113 0x1, /* Driver Strength Type-A */
114 0x0, /* Driver Strength Type-B */
115 0x2, /* Driver Strength Type-C */
116 0x3, /* Driver Strength Type-D */
117 };
118
119 uint32 sd3_autoselect_uhsi_max = 0;
120
121 #define MAX_TUNING_ITERS (40)
122 /* (150+10)millisecs total time; so dividing it for per-loop */
123 #define PER_TRY_TUNING_DELAY_MS (160/MAX_TUNING_ITERS)
124 #define CLKTUNING_MAX_BRR_RETRIES (1000) /* 1 ms: 1000 retries with 1 us delay per loop */
125
126 /* table analogous to preset value register.
127 * This is bcos current HC doesn't have preset value reg support.
128 * All has DrvStr as 'B' [val:0] and CLKGEN as 0.
129 */
130 static unsigned short presetval_sw_table[] = {
131 0x0520, /* initialization: DrvStr:'B' [0]; CLKGen:0;
132 * SDCLKFreqSel: 520 [division: 320*2 = 640: ~400 KHz]
133 */
134 0x0008, /* default speed:DrvStr:'B' [0]; CLKGen:0;
135 * SDCLKFreqSel: 8 [division: 6*2 = 12: ~25 MHz]
136 */
137 0x0004, /* High speed: DrvStr:'B' [0]; CLKGen:0;
138 * SDCLKFreqSel: 4 [division: 3*2 = 6: ~50 MHz]
139 */
140 0x0008, /* SDR12: DrvStr:'B' [0]; CLKGen:0;
141 * SDCLKFreqSel: 8 [division: 6*2 = 12: ~25 MHz]
142 */
143 0x0004, /* SDR25: DrvStr:'B' [0]; CLKGen:0;
144 * SDCLKFreqSel: 4 [division: 3*2 = 6: ~50 MHz]
145 */
146 0x0002, /* SDR50: DrvStr:'B' [0]; CLKGen:0;
147 * SDCLKFreqSel: 2 [division: 1*2 = 2: ~100 MHz]
148 */
149 0x0001, /* SDR104: DrvStr:'B' [0]; CLKGen:0;
150 SDCLKFreqSel: 1 [no division: ~255/~208 MHz]
151 */
152 0x0004 /* DDR50: DrvStr:'B' [0]; CLKGen:0;
153 SDCLKFreqSel: 4 [division: 3*2 = 6: ~50 MHz]
154 */
155 };
156
157 /* This is to have software overrides to the hardware. Info follows:
158 For override [1]: Preset registers: not supported
159 Voltage switch: not supported
160 Clock Tuning: not supported
161 */
162 #ifdef BCMQT
163 bool sd3_sw_override1 = TRUE;
164 #else
165 bool sd3_sw_override1 = FALSE;
166 #endif // endif
167 bool sd3_sw_read_magic_bytes = FALSE;
168
169 #define SD3_TUNING_REQD(sd, sd_uhsimode) ((sd_uhsimode != SD3CLKMODE_DISABLED) && \
170 (sd->version == HOST_CONTR_VER_3) && \
171 ((sd_uhsimode == SD3CLKMODE_3_SDR104) || \
172 ((sd_uhsimode == SD3CLKMODE_2_SDR50) && \
173 (GFIELD(sd->caps3, CAP3_TUNING_SDR50)))))
174
175 /* find next power of 2 */
176 #define NEXT_POW2(n) {n--; n |= n>>1; n |= n>>2; n |= n>>4; n++;}
177
178 #ifdef BCMSDYIELD
179 bool sd_yieldcpu = TRUE; /* Allow CPU yielding for buffer requests */
180 uint sd_minyield = 0; /* Minimum xfer size to allow CPU yield */
181 bool sd_forcerb = FALSE; /* Force sync readback in intrs_on/off */
182 #endif // endif
183
184 #define F1_SLEEPCSR_ADDR 0x1001F
185
186 #ifdef BCMQT
187 #define SDSTD_WAIT_TIME_MS 1
188 #endif /* BCMQT */
189
190 uint sd_divisor = 2; /* Default 48MHz/2 = 24MHz
191 :might get changed in code for 208
192 */
193
194 uint sd_power = 1; /* Default to SD Slot powered ON */
195 uint sd_3_power_save = 1; /* Default to SDIO 3.0 power save */
196 uint sd_clock = 1; /* Default to SD Clock turned ON */
197 uint sd_pci_slot = 0xFFFFffff; /* Used to force selection of a particular PCI slot */
198 uint8 sd_dma_mode = DMA_MODE_AUTO; /* Default to AUTO & program based on capability */
199
200 uint sd_toctl = 7;
201 static bool trap_errs = FALSE;
202
203 static const char *dma_mode_description[] = { "PIO", "SDMA", "ADMA1", "32b ADMA2", "64b ADMA2" };
204
205 /* Prototypes */
206 static bool sdstd_start_clock(sdioh_info_t *sd, uint16 divisor);
207 static uint16 sdstd_start_power(sdioh_info_t *sd, int volts_req);
208 static bool sdstd_bus_width(sdioh_info_t *sd, int width);
209 static int sdstd_set_highspeed_mode(sdioh_info_t *sd, bool HSMode);
210 static int sdstd_set_dma_mode(sdioh_info_t *sd, int8 dma_mode);
211 static int sdstd_card_enablefuncs(sdioh_info_t *sd);
212 static void sdstd_cmd_getrsp(sdioh_info_t *sd, uint32 *rsp_buffer, int count);
213 static int sdstd_cmd_issue(sdioh_info_t *sd, bool use_dma, uint32 cmd, uint32 arg);
214 static int sdstd_card_regread(sdioh_info_t *sd, int func, uint32 regaddr,
215 int regsize, uint32 *data);
216 static int sdstd_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr,
217 int regsize, uint32 data);
218 static int sdstd_driver_init(sdioh_info_t *sd);
219 static bool sdstd_reset(sdioh_info_t *sd, bool host_reset, bool client_reset);
220 static int sdstd_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo,
221 uint32 addr, int nbytes, uint32 *data);
222 static int sdstd_abort(sdioh_info_t *sd, uint func);
223 static int sdstd_check_errs(sdioh_info_t *sdioh_info, uint32 cmd, uint32 arg);
224 static int set_client_block_size(sdioh_info_t *sd, int func, int blocksize);
225 static void sd_map_dma(sdioh_info_t * sd);
226 static void sd_unmap_dma(sdioh_info_t * sd);
227 static void sd_clear_adma_dscr_buf(sdioh_info_t *sd);
228 static void sd_fill_dma_data_buf(sdioh_info_t *sd, uint8 data);
229 static void sd_create_adma_descriptor(sdioh_info_t *sd,
230 uint32 index, uint32 addr_phys,
231 uint16 length, uint16 flags);
232 static void sd_dump_adma_dscr(sdioh_info_t *sd);
233 static void sdstd_dumpregs(sdioh_info_t *sd);
234
235 static int sdstd_3_set_highspeed_uhsi_mode(sdioh_info_t *sd, int sd3ClkMode);
236 static int sdstd_3_sigvoltswitch_proc(sdioh_info_t *sd);
237 static int sdstd_3_get_matching_uhsi_clkmode(sdioh_info_t *sd,
238 int sd3_requested_clkmode);
239 static bool sdstd_3_get_matching_drvstrn(sdioh_info_t *sd,
240 int sd3_requested_clkmode, uint32 *drvstrn, uint16 *presetval);
241 static int sdstd_3_clock_wrapper(sdioh_info_t *sd);
242 static int sdstd_clock_wrapper(sdioh_info_t *sd);
243
244 /*
245 * Private register access routines.
246 */
247
248 /* 16 bit PCI regs */
249
250 extern uint16 sdstd_rreg16(sdioh_info_t *sd, uint reg);
251 uint16
sdstd_rreg16(sdioh_info_t * sd,uint reg)252 sdstd_rreg16(sdioh_info_t *sd, uint reg)
253 {
254
255 volatile uint16 data = *(volatile uint16 *)(sd->mem_space + reg);
256 sd_ctrl(("16: R Reg 0x%02x, Data 0x%x\n", reg, data));
257 return data;
258 }
259
260 extern void sdstd_wreg16(sdioh_info_t *sd, uint reg, uint16 data);
261 void
sdstd_wreg16(sdioh_info_t * sd,uint reg,uint16 data)262 sdstd_wreg16(sdioh_info_t *sd, uint reg, uint16 data)
263 {
264 *(volatile uint16 *)(sd->mem_space + reg) = (uint16) data;
265 sd_ctrl(("16: W Reg 0x%02x, Data 0x%x\n", reg, data));
266 }
267
268 static void
sdstd_or_reg16(sdioh_info_t * sd,uint reg,uint16 val)269 sdstd_or_reg16(sdioh_info_t *sd, uint reg, uint16 val)
270 {
271 volatile uint16 data = *(volatile uint16 *)(sd->mem_space + reg);
272 sd_ctrl(("16: OR Reg 0x%02x, Val 0x%x\n", reg, val));
273 data |= val;
274 *(volatile uint16 *)(sd->mem_space + reg) = (uint16)data;
275
276 }
277 static void
sdstd_mod_reg16(sdioh_info_t * sd,uint reg,int16 mask,uint16 val)278 sdstd_mod_reg16(sdioh_info_t *sd, uint reg, int16 mask, uint16 val)
279 {
280
281 volatile uint16 data = *(volatile uint16 *)(sd->mem_space + reg);
282 sd_ctrl(("16: MOD Reg 0x%02x, Mask 0x%x, Val 0x%x\n", reg, mask, val));
283 data &= ~mask;
284 data |= (val & mask);
285 *(volatile uint16 *)(sd->mem_space + reg) = (uint16)data;
286 }
287
288 /* 32 bit PCI regs */
289 static uint32
sdstd_rreg(sdioh_info_t * sd,uint reg)290 sdstd_rreg(sdioh_info_t *sd, uint reg)
291 {
292 volatile uint32 data = *(volatile uint32 *)(sd->mem_space + reg);
293 sd_ctrl(("32: R Reg 0x%02x, Data 0x%x\n", reg, data));
294 return data;
295 }
296 static inline void
sdstd_wreg(sdioh_info_t * sd,uint reg,uint32 data)297 sdstd_wreg(sdioh_info_t *sd, uint reg, uint32 data)
298 {
299 *(volatile uint32 *)(sd->mem_space + reg) = (uint32)data;
300 sd_ctrl(("32: W Reg 0x%02x, Data 0x%x\n", reg, data));
301
302 }
303
304 /* 8 bit PCI regs */
305 static inline void
sdstd_wreg8(sdioh_info_t * sd,uint reg,uint8 data)306 sdstd_wreg8(sdioh_info_t *sd, uint reg, uint8 data)
307 {
308 *(volatile uint8 *)(sd->mem_space + reg) = (uint8)data;
309 sd_ctrl(("08: W Reg 0x%02x, Data 0x%x\n", reg, data));
310 }
311 static uint8
sdstd_rreg8(sdioh_info_t * sd,uint reg)312 sdstd_rreg8(sdioh_info_t *sd, uint reg)
313 {
314 volatile uint8 data = *(volatile uint8 *)(sd->mem_space + reg);
315 sd_ctrl(("08: R Reg 0x%02x, Data 0x%x\n", reg, data));
316 return data;
317 }
318
319 /*
320 * Private work routines
321 */
322
323 sdioh_info_t *glob_sd;
324
325 /*
326 * Public entry points & extern's
327 */
328 extern sdioh_info_t *
sdioh_attach(osl_t * osh,void * bar0,uint irq)329 sdioh_attach(osl_t *osh, void *bar0, uint irq)
330 {
331 sdioh_info_t *sd;
332
333 sd_trace(("%s\n", __FUNCTION__));
334 if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) {
335 sd_err(("sdioh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh)));
336 return NULL;
337 }
338 bzero((char *)sd, sizeof(sdioh_info_t));
339 glob_sd = sd;
340 sd->osh = osh;
341 if (sdstd_osinit(sd) != 0) {
342 sd_err(("%s:sdstd_osinit() failed\n", __FUNCTION__));
343 MFREE(sd->osh, sd, sizeof(sdioh_info_t));
344 return NULL;
345 }
346 sd->mem_space = (volatile char *)sdstd_reg_map(osh, (ulong)bar0, SDIOH_REG_WINSZ);
347 sd_init_dma(sd);
348 sd->irq = irq;
349 if (sd->mem_space == NULL) {
350 sd_err(("%s:ioremap() failed\n", __FUNCTION__));
351 sdstd_osfree(sd);
352 MFREE(sd->osh, sd, sizeof(sdioh_info_t));
353 return NULL;
354 }
355 sd_info(("%s:sd->mem_space = %p\n", __FUNCTION__, sd->mem_space));
356 sd->intr_handler = NULL;
357 sd->intr_handler_arg = NULL;
358 sd->intr_handler_valid = FALSE;
359
360 /* Set defaults */
361 sd->sd_blockmode = TRUE;
362 sd->use_client_ints = TRUE;
363 sd->sd_dma_mode = sd_dma_mode;
364
365 if (!sd->sd_blockmode)
366 sd->sd_dma_mode = DMA_MODE_NONE;
367
368 if (sdstd_driver_init(sd) != SUCCESS) {
369 /* If host CPU was reset without resetting SD bus or
370 SD device, the device will still have its RCA but
371 driver no longer knows what it is (since driver has been restarted).
372 go through once to clear the RCA and a gain reassign it.
373 */
374 sd_info(("driver_init failed - Reset RCA and try again\n"));
375 if (sdstd_driver_init(sd) != SUCCESS) {
376 sd_err(("%s:driver_init() failed()\n", __FUNCTION__));
377 if (sd->mem_space) {
378 sdstd_reg_unmap(osh, (ulong)sd->mem_space, SDIOH_REG_WINSZ);
379 sd->mem_space = NULL;
380 }
381 sdstd_osfree(sd);
382 MFREE(sd->osh, sd, sizeof(sdioh_info_t));
383 return (NULL);
384 }
385 }
386
387 OSL_DMADDRWIDTH(osh, 32);
388
389 /* Always map DMA buffers, so we can switch between DMA modes. */
390 sd_map_dma(sd);
391
392 if (sdstd_register_irq(sd, irq) != SUCCESS) {
393 sd_err(("%s: sdstd_register_irq() failed for irq = %d\n", __FUNCTION__, irq));
394 sdstd_free_irq(sd->irq, sd);
395 if (sd->mem_space) {
396 sdstd_reg_unmap(osh, (ulong)sd->mem_space, SDIOH_REG_WINSZ);
397 sd->mem_space = NULL;
398 }
399
400 sdstd_osfree(sd);
401 MFREE(sd->osh, sd, sizeof(sdioh_info_t));
402 return (NULL);
403 }
404
405 sd_trace(("%s: Done\n", __FUNCTION__));
406 return sd;
407 }
408
409 extern SDIOH_API_RC
sdioh_detach(osl_t * osh,sdioh_info_t * sd)410 sdioh_detach(osl_t *osh, sdioh_info_t *sd)
411 {
412 sd_trace(("%s\n", __FUNCTION__));
413 if (sd) {
414 sd_unmap_dma(sd);
415 sdstd_wreg16(sd, SD_IntrSignalEnable, 0);
416 if (sd->sd3_tuning_reqd == TRUE) {
417 sdstd_3_osclean_tuning(sd);
418 sd->sd3_tuning_reqd = FALSE;
419 }
420 sd->sd3_tuning_disable = FALSE;
421 sd_trace(("%s: freeing irq %d\n", __FUNCTION__, sd->irq));
422 sdstd_free_irq(sd->irq, sd);
423 if (sd->card_init_done)
424 sdstd_reset(sd, 1, 1);
425 if (sd->mem_space) {
426 sdstd_reg_unmap(osh, (ulong)sd->mem_space, SDIOH_REG_WINSZ);
427 sd->mem_space = NULL;
428 }
429
430 sdstd_osfree(sd);
431 MFREE(sd->osh, sd, sizeof(sdioh_info_t));
432 }
433 return SDIOH_API_RC_SUCCESS;
434 }
435
436 /* Configure callback to client when we receive client interrupt */
437 extern SDIOH_API_RC
sdioh_interrupt_register(sdioh_info_t * sd,sdioh_cb_fn_t fn,void * argh)438 sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh)
439 {
440 sd_trace(("%s: Entering\n", __FUNCTION__));
441 sd->intr_handler = fn;
442 sd->intr_handler_arg = argh;
443 sd->intr_handler_valid = TRUE;
444 return SDIOH_API_RC_SUCCESS;
445 }
446
447 extern SDIOH_API_RC
sdioh_interrupt_deregister(sdioh_info_t * sd)448 sdioh_interrupt_deregister(sdioh_info_t *sd)
449 {
450 sd_trace(("%s: Entering\n", __FUNCTION__));
451 sd->intr_handler_valid = FALSE;
452 sd->intr_handler = NULL;
453 sd->intr_handler_arg = NULL;
454 return SDIOH_API_RC_SUCCESS;
455 }
456
457 extern SDIOH_API_RC
sdioh_interrupt_query(sdioh_info_t * sd,bool * onoff)458 sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff)
459 {
460 sd_trace(("%s: Entering\n", __FUNCTION__));
461 *onoff = sd->client_intr_enabled;
462 return SDIOH_API_RC_SUCCESS;
463 }
464
465 #if defined(DHD_DEBUG)
466 extern bool
sdioh_interrupt_pending(sdioh_info_t * sd)467 sdioh_interrupt_pending(sdioh_info_t *sd)
468 {
469 uint16 intrstatus;
470 intrstatus = sdstd_rreg16(sd, SD_IntrStatus);
471 return !!(intrstatus & CLIENT_INTR);
472 }
473 #endif // endif
474
475 uint
sdioh_query_iofnum(sdioh_info_t * sd)476 sdioh_query_iofnum(sdioh_info_t *sd)
477 {
478 return sd->num_funcs;
479 }
480
481 /* IOVar table */
482 enum {
483 IOV_MSGLEVEL = 1,
484 IOV_BLOCKMODE,
485 IOV_BLOCKSIZE,
486 IOV_DMA,
487 IOV_USEINTS,
488 IOV_NUMINTS,
489 IOV_NUMLOCALINTS,
490 IOV_HOSTREG,
491 IOV_DEVREG,
492 IOV_DIVISOR,
493 IOV_SDMODE,
494 IOV_HISPEED,
495 IOV_HCIREGS,
496 IOV_POWER,
497 IOV_POWER_SAVE,
498 IOV_YIELDCPU,
499 IOV_MINYIELD,
500 IOV_FORCERB,
501 IOV_CLOCK,
502 IOV_UHSIMOD,
503 IOV_TUNEMOD,
504 IOV_TUNEDIS
505 };
506
507 const bcm_iovar_t sdioh_iovars[] = {
508 {"sd_msglevel", IOV_MSGLEVEL, 0, 0, IOVT_UINT32, 0 },
509 {"sd_blockmode", IOV_BLOCKMODE, 0, 0, IOVT_BOOL, 0 },
510 {"sd_blocksize", IOV_BLOCKSIZE, 0, 0, IOVT_UINT32, 0 }, /* ((fn << 16) | size) */
511 {"sd_dma", IOV_DMA, 0, 0, IOVT_UINT32, 0 },
512 #ifdef BCMSDYIELD
513 {"sd_yieldcpu", IOV_YIELDCPU, 0, 0, IOVT_BOOL, 0 },
514 {"sd_minyield", IOV_MINYIELD, 0, 0, IOVT_UINT32, 0 },
515 {"sd_forcerb", IOV_FORCERB, 0, 0, IOVT_BOOL, 0 },
516 #endif // endif
517 {"sd_ints", IOV_USEINTS, 0, 0, IOVT_BOOL, 0 },
518 {"sd_numints", IOV_NUMINTS, 0, 0, IOVT_UINT32, 0 },
519 {"sd_numlocalints", IOV_NUMLOCALINTS, 0, 0, IOVT_UINT32, 0 },
520 {"sd_hostreg", IOV_HOSTREG, 0, 0, IOVT_BUFFER, sizeof(sdreg_t) },
521 {"sd_devreg", IOV_DEVREG, 0, 0, IOVT_BUFFER, sizeof(sdreg_t) },
522 {"sd_divisor", IOV_DIVISOR, 0, 0, IOVT_UINT32, 0 },
523 {"sd_power", IOV_POWER, 0, 0, IOVT_UINT32, 0 },
524 {"sd_power_save", IOV_POWER_SAVE, 0, 0, IOVT_UINT32, 0 },
525 {"sd_clock", IOV_CLOCK, 0, 0, IOVT_UINT32, 0 },
526 {"sd_mode", IOV_SDMODE, 0, 0, IOVT_UINT32, 100},
527 {"sd_highspeed", IOV_HISPEED, 0, 0, IOVT_UINT32, 0},
528 {"sd_uhsimode", IOV_UHSIMOD, 0, 0, IOVT_UINT32, 0},
529 {"tuning_mode", IOV_TUNEMOD, 0, 0, IOVT_UINT32, 0},
530 {"sd3_tuning_disable", IOV_TUNEDIS, 0, 0, IOVT_BOOL, 0},
531
532 {NULL, 0, 0, 0, 0, 0 }
533 };
sdstd_turn_on_clock(sdioh_info_t * sd)534 uint8 sdstd_turn_on_clock(sdioh_info_t *sd)
535 {
536 sdstd_or_reg16(sd, SD_ClockCntrl, 0x4);
537 return 0;
538 }
539
sdstd_turn_off_clock(sdioh_info_t * sd)540 uint8 sdstd_turn_off_clock(sdioh_info_t *sd)
541 {
542 sdstd_wreg16(sd, SD_ClockCntrl, sdstd_rreg16(sd, SD_ClockCntrl) & ~((uint16)0x4));
543 return 0;
544 }
545
546 int
sdioh_iovar_op(sdioh_info_t * si,const char * name,void * params,int plen,void * arg,int len,bool set)547 sdioh_iovar_op(sdioh_info_t *si, const char *name,
548 void *params, int plen, void *arg, int len, bool set)
549 {
550 const bcm_iovar_t *vi = NULL;
551 int bcmerror = 0;
552 int val_size;
553 int32 int_val = 0;
554 bool bool_val;
555 uint32 actionid;
556
557 ASSERT(name);
558 ASSERT(len >= 0);
559
560 /* Get must have return space; Set does not take qualifiers */
561 ASSERT(set || (arg && len));
562 ASSERT(!set || (!params && !plen));
563
564 sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name));
565
566 if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) {
567 bcmerror = BCME_UNSUPPORTED;
568 goto exit;
569 }
570
571 if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0)
572 goto exit;
573
574 /* Set up params so get and set can share the convenience variables */
575 if (params == NULL) {
576 params = arg;
577 plen = len;
578 }
579
580 if (vi->type == IOVT_VOID)
581 val_size = 0;
582 else if (vi->type == IOVT_BUFFER)
583 val_size = len;
584 else
585 val_size = sizeof(int);
586
587 if (plen >= (int)sizeof(int_val))
588 bcopy(params, &int_val, sizeof(int_val));
589
590 bool_val = (int_val != 0) ? TRUE : FALSE;
591 BCM_REFERENCE(bool_val);
592
593 actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
594 switch (actionid) {
595 case IOV_GVAL(IOV_MSGLEVEL):
596 int_val = (int32)sd_msglevel;
597 bcopy(&int_val, arg, val_size);
598 break;
599
600 case IOV_SVAL(IOV_MSGLEVEL):
601 sd_msglevel = int_val;
602 break;
603
604 case IOV_GVAL(IOV_BLOCKMODE):
605 int_val = (int32)si->sd_blockmode;
606 bcopy(&int_val, arg, val_size);
607 break;
608
609 case IOV_SVAL(IOV_BLOCKMODE):
610 si->sd_blockmode = (bool)int_val;
611 /* Haven't figured out how to make non-block mode with DMA */
612 if (!si->sd_blockmode)
613 si->sd_dma_mode = DMA_MODE_NONE;
614 break;
615
616 #ifdef BCMSDYIELD
617 case IOV_GVAL(IOV_YIELDCPU):
618 int_val = sd_yieldcpu;
619 bcopy(&int_val, arg, val_size);
620 break;
621
622 case IOV_SVAL(IOV_YIELDCPU):
623 sd_yieldcpu = (bool)int_val;
624 break;
625
626 case IOV_GVAL(IOV_MINYIELD):
627 int_val = sd_minyield;
628 bcopy(&int_val, arg, val_size);
629 break;
630
631 case IOV_SVAL(IOV_MINYIELD):
632 sd_minyield = (bool)int_val;
633 break;
634
635 case IOV_GVAL(IOV_FORCERB):
636 int_val = sd_forcerb;
637 bcopy(&int_val, arg, val_size);
638 break;
639
640 case IOV_SVAL(IOV_FORCERB):
641 sd_forcerb = (bool)int_val;
642 break;
643 #endif /* BCMSDYIELD */
644
645 case IOV_GVAL(IOV_BLOCKSIZE):
646 if ((uint32)int_val > si->num_funcs) {
647 bcmerror = BCME_BADARG;
648 break;
649 }
650 int_val = (int32)si->client_block_size[int_val];
651 bcopy(&int_val, arg, val_size);
652 break;
653
654 case IOV_SVAL(IOV_BLOCKSIZE):
655 {
656 uint func = ((uint32)int_val >> 16);
657 uint blksize = (uint16)int_val;
658
659 if (func > si->num_funcs) {
660 bcmerror = BCME_BADARG;
661 break;
662 }
663
664 /* Now set it */
665 sdstd_lock(si);
666 bcmerror = set_client_block_size(si, func, blksize);
667 sdstd_unlock(si);
668 break;
669 }
670
671 case IOV_GVAL(IOV_DMA):
672 int_val = (int32)si->sd_dma_mode;
673 bcopy(&int_val, arg, val_size);
674 break;
675
676 case IOV_SVAL(IOV_DMA):
677 si->sd_dma_mode = (char)int_val;
678 sdstd_set_dma_mode(si, si->sd_dma_mode);
679 break;
680
681 case IOV_GVAL(IOV_USEINTS):
682 int_val = (int32)si->use_client_ints;
683 bcopy(&int_val, arg, val_size);
684 break;
685
686 case IOV_SVAL(IOV_USEINTS):
687 si->use_client_ints = (bool)int_val;
688 if (si->use_client_ints)
689 si->intmask |= CLIENT_INTR;
690 else
691 si->intmask &= ~CLIENT_INTR;
692 break;
693
694 case IOV_GVAL(IOV_DIVISOR):
695 int_val = (uint32)sd_divisor;
696 bcopy(&int_val, arg, val_size);
697 break;
698
699 case IOV_SVAL(IOV_DIVISOR):
700 sd_divisor = int_val;
701 if (!sdstd_start_clock(si, (uint16)sd_divisor)) {
702 sd_err(("set clock failed!\n"));
703 bcmerror = BCME_ERROR;
704 }
705 break;
706
707 case IOV_GVAL(IOV_POWER):
708 int_val = (uint32)sd_power;
709 bcopy(&int_val, arg, val_size);
710 break;
711
712 case IOV_GVAL(IOV_POWER_SAVE):
713 int_val = (uint32)sd_3_power_save;
714 bcopy(&int_val, arg, val_size);
715 break;
716
717 case IOV_SVAL(IOV_POWER):
718 sd_power = int_val;
719 if (sd_power == 1) {
720 if (sdstd_driver_init(si) != SUCCESS) {
721 sd_err(("set SD Slot power failed!\n"));
722 bcmerror = BCME_ERROR;
723 } else {
724 sd_err(("SD Slot Powered ON.\n"));
725 }
726 } else {
727 uint8 pwr = 0;
728
729 pwr = SFIELD(pwr, PWR_BUS_EN, 0);
730 sdstd_wreg8(si, SD_PwrCntrl, pwr); /* Set Voltage level */
731 sd_err(("SD Slot Powered OFF.\n"));
732 }
733 break;
734
735 case IOV_SVAL(IOV_POWER_SAVE):
736 sd_3_power_save = int_val;
737 break;
738
739 case IOV_GVAL(IOV_CLOCK):
740 int_val = (uint32)sd_clock;
741 bcopy(&int_val, arg, val_size);
742 break;
743
744 case IOV_SVAL(IOV_CLOCK):
745 sd_clock = int_val;
746 if (sd_clock == 1) {
747 sd_info(("SD Clock turned ON.\n"));
748 if (!sdstd_start_clock(si, (uint16)sd_divisor)) {
749 sd_err(("sdstd_start_clock failed\n"));
750 bcmerror = BCME_ERROR;
751 }
752 } else {
753 /* turn off HC clock */
754 sdstd_wreg16(si, SD_ClockCntrl,
755 sdstd_rreg16(si, SD_ClockCntrl) & ~((uint16)0x4));
756
757 sd_info(("SD Clock turned OFF.\n"));
758 }
759 break;
760
761 case IOV_GVAL(IOV_SDMODE):
762 int_val = (uint32)sd_sdmode;
763 bcopy(&int_val, arg, val_size);
764 break;
765
766 case IOV_SVAL(IOV_SDMODE):
767 sd_sdmode = int_val;
768
769 if (!sdstd_bus_width(si, sd_sdmode)) {
770 sd_err(("sdstd_bus_width failed\n"));
771 bcmerror = BCME_ERROR;
772 }
773 break;
774
775 case IOV_GVAL(IOV_HISPEED):
776 int_val = (uint32)sd_hiok;
777 bcopy(&int_val, arg, val_size);
778 break;
779
780 case IOV_SVAL(IOV_HISPEED):
781 sd_hiok = int_val;
782 bcmerror = sdstd_set_highspeed_mode(si, (bool)sd_hiok);
783 break;
784
785 case IOV_GVAL(IOV_UHSIMOD):
786 sd3_trace(("%s: Get UHSI: \n", __FUNCTION__));
787 int_val = (int)sd_uhsimode;
788 bcopy(&int_val, arg, val_size);
789 break;
790
791 case IOV_SVAL(IOV_UHSIMOD):
792 {
793 int oldval = sd_uhsimode; /* save old, working value */
794 sd3_trace(("%s: Set UHSI: \n", __FUNCTION__));
795 /* check if UHSI is supported by card/host */
796 if (!(si->card_UHSI_voltage_Supported && si->host_UHSISupported)) {
797 sd_err(("%s:UHSI not suppoted!\n", __FUNCTION__));
798 bcmerror = BCME_UNSUPPORTED;
799 break;
800 }
801 /* check for valid values */
802 if (!((int_val == SD3CLKMODE_AUTO) ||
803 (int_val == SD3CLKMODE_DISABLED) ||
804 ((int_val >= SD3CLKMODE_0_SDR12) &&
805 (int_val <= SD3CLKMODE_4_DDR50)))) {
806 sd_err(("%s:CLK: bad arg!\n", __FUNCTION__));
807 bcmerror = BCME_BADARG;
808 break;
809 }
810
811 sd_uhsimode = int_val;
812 if (SUCCESS != sdstd_3_clock_wrapper(si)) {
813 sd_err(("%s:Error in setting uhsi clkmode:%d,"
814 "restoring back to %d\n", __FUNCTION__,
815 sd_uhsimode, oldval));
816 /* try to set back the old one */
817 sd_uhsimode = oldval;
818 if (SUCCESS != sdstd_3_clock_wrapper(si)) {
819 sd_err(("%s:Error in setting uhsi to old mode;"
820 "ignoring:\n", __FUNCTION__));
821 }
822 }
823 break;
824 }
825 #ifdef DHD_DEBUG
826 case IOV_SVAL(IOV_TUNEMOD):
827 {
828
829 if( int_val == SD_DHD_DISABLE_PERIODIC_TUNING) { /* do tuning single time */
830 sd3_trace(("Start tuning from Iovar\n"));
831 si->sd3_tuning_reqd = TRUE;
832 sdstd_enable_disable_periodic_timer(si, int_val);
833 sdstd_lock(si);
834 sdstd_3_clk_tuning(si, sdstd_3_get_uhsi_clkmode(si));
835 sdstd_unlock(si);
836 si->sd3_tuning_reqd = FALSE;
837 }
838 if (int_val == SD_DHD_ENABLE_PERIODIC_TUNING) {
839 sd3_trace(("Enabling automatic tuning\n"));
840 si->sd3_tuning_reqd = TRUE;
841 sdstd_enable_disable_periodic_timer(si, int_val);
842 }
843 break;
844 }
845 #endif /* debugging purpose */
846 case IOV_GVAL(IOV_NUMINTS):
847 int_val = (int32)si->intrcount;
848 bcopy(&int_val, arg, val_size);
849 break;
850
851 case IOV_GVAL(IOV_NUMLOCALINTS):
852 int_val = (int32)si->local_intrcount;
853 bcopy(&int_val, arg, val_size);
854 break;
855
856 case IOV_GVAL(IOV_HOSTREG):
857 {
858 sdreg_t *sd_ptr = (sdreg_t *)params;
859
860 if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD3_WL_BT_reset_register) {
861 sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset));
862 bcmerror = BCME_BADARG;
863 break;
864 }
865
866 sd_trace(("%s: rreg%d at offset %d\n", __FUNCTION__,
867 (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32),
868 sd_ptr->offset));
869 if (sd_ptr->offset & 1)
870 int_val = sdstd_rreg8(si, sd_ptr->offset);
871 else if (sd_ptr->offset & 2)
872 int_val = sdstd_rreg16(si, sd_ptr->offset);
873 else
874 int_val = sdstd_rreg(si, sd_ptr->offset);
875
876 bcopy(&int_val, arg, sizeof(int_val));
877 break;
878 }
879
880 case IOV_SVAL(IOV_HOSTREG):
881 {
882 sdreg_t *sd_ptr = (sdreg_t *)params;
883
884 if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD3_WL_BT_reset_register) {
885 sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset));
886 bcmerror = BCME_BADARG;
887 break;
888 }
889
890 sd_trace(("%s: wreg%d value 0x%08x at offset %d\n", __FUNCTION__, sd_ptr->value,
891 (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32),
892 sd_ptr->offset));
893 if (sd_ptr->offset & 1)
894 sdstd_wreg8(si, sd_ptr->offset, (uint8)sd_ptr->value);
895 else if (sd_ptr->offset & 2)
896 sdstd_wreg16(si, sd_ptr->offset, (uint16)sd_ptr->value);
897 else
898 sdstd_wreg(si, sd_ptr->offset, (uint32)sd_ptr->value);
899
900 break;
901 }
902
903 case IOV_GVAL(IOV_DEVREG):
904 {
905 sdreg_t *sd_ptr = (sdreg_t *)params;
906 uint8 data;
907
908 if (sdioh_cfg_read(si, sd_ptr->func, sd_ptr->offset, &data)) {
909 bcmerror = BCME_SDIO_ERROR;
910 break;
911 }
912
913 int_val = (int)data;
914 bcopy(&int_val, arg, sizeof(int_val));
915 break;
916 }
917
918 case IOV_SVAL(IOV_DEVREG):
919 {
920 sdreg_t *sd_ptr = (sdreg_t *)params;
921 uint8 data = (uint8)sd_ptr->value;
922
923 if (sdioh_cfg_write(si, sd_ptr->func, sd_ptr->offset, &data)) {
924 bcmerror = BCME_SDIO_ERROR;
925 break;
926 }
927 break;
928 }
929
930 case IOV_SVAL(IOV_TUNEDIS):
931 si->sd3_tuning_disable = (bool)int_val;
932 break;
933
934 default:
935 bcmerror = BCME_UNSUPPORTED;
936 break;
937 }
938 exit:
939
940 return bcmerror;
941 }
942
943 extern SDIOH_API_RC
sdioh_cfg_read(sdioh_info_t * sd,uint fnc_num,uint32 addr,uint8 * data)944 sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
945 {
946 SDIOH_API_RC status;
947 /* No lock needed since sdioh_request_byte does locking */
948 status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data);
949 return status;
950 }
951
952 extern SDIOH_API_RC
sdioh_cfg_write(sdioh_info_t * sd,uint fnc_num,uint32 addr,uint8 * data)953 sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
954 {
955 /* No lock needed since sdioh_request_byte does locking */
956 SDIOH_API_RC status;
957 status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data);
958 return status;
959 }
960
961 extern SDIOH_API_RC
sdioh_cis_read(sdioh_info_t * sd,uint func,uint8 * cisd,uint32 length)962 sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length)
963 {
964 uint32 count;
965 int offset;
966 uint32 foo;
967 uint8 *cis = cisd;
968
969 sd_trace(("%s: Func = %d\n", __FUNCTION__, func));
970
971 if (!sd->func_cis_ptr[func]) {
972 bzero(cis, length);
973 return SDIOH_API_RC_FAIL;
974 }
975
976 sdstd_lock(sd);
977 *cis = 0;
978 for (count = 0; count < length; count++) {
979 offset = sd->func_cis_ptr[func] + count;
980 if (sdstd_card_regread(sd, 0, offset, 1, &foo)) {
981 sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__));
982 sdstd_unlock(sd);
983 return SDIOH_API_RC_FAIL;
984 }
985 *cis = (uint8)(foo & 0xff);
986 cis++;
987 }
988 sdstd_unlock(sd);
989 return SDIOH_API_RC_SUCCESS;
990 }
991
992 extern SDIOH_API_RC
sdioh_request_byte(sdioh_info_t * sd,uint rw,uint func,uint regaddr,uint8 * byte)993 sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte)
994 {
995 int status = SDIOH_API_RC_SUCCESS;
996 uint32 cmd_arg;
997 uint32 rsp5;
998
999 sdstd_lock(sd);
1000 if (rw == SDIOH_READ)
1001 sdstd_3_check_and_do_tuning(sd, CHECK_TUNING_PRE_DATA);
1002
1003 /* Change to DATA_TRANSFER_ONGOING , protection against tuning tasklet */
1004 sdstd_3_set_data_state(sd, DATA_TRANSFER_ONGOING);
1005
1006 cmd_arg = 0;
1007 cmd_arg = SFIELD(cmd_arg, CMD52_FUNCTION, func);
1008 cmd_arg = SFIELD(cmd_arg, CMD52_REG_ADDR, regaddr);
1009 cmd_arg = SFIELD(cmd_arg, CMD52_RW_FLAG, rw == SDIOH_READ ? 0 : 1);
1010 cmd_arg = SFIELD(cmd_arg, CMD52_RAW, 0);
1011 cmd_arg = SFIELD(cmd_arg, CMD52_DATA, rw == SDIOH_READ ? 0 : *byte);
1012
1013 if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_52, cmd_arg)) != SUCCESS) {
1014 /* Change to DATA_TRANSFER_IDLE */
1015 sdstd_3_set_data_state(sd, DATA_TRANSFER_IDLE);
1016 sdstd_unlock(sd);
1017 return status;
1018 }
1019
1020 sdstd_cmd_getrsp(sd, &rsp5, 1);
1021 if (sdstd_rreg16 (sd, SD_ErrorIntrStatus) != 0) {
1022 sd_err(("%s: 1: ErrorintrStatus 0x%x\n",
1023 __FUNCTION__, sdstd_rreg16(sd, SD_ErrorIntrStatus)));
1024 status = SDIOH_API_RC_FAIL;
1025 }
1026 if (GFIELD(rsp5, RSP5_FLAGS) != 0x10) {
1027 if (GFIELD(cmd_arg, CMD52_REG_ADDR) != F1_SLEEPCSR_ADDR) {
1028 sd_err(("%s: rsp5 flags is 0x%x\t %d \n",
1029 __FUNCTION__, GFIELD(rsp5, RSP5_FLAGS), func));
1030 }
1031 status = SDIOH_API_RC_FAIL;
1032 }
1033
1034 if (GFIELD(rsp5, RSP5_STUFF)) {
1035 sd_err(("%s: rsp5 stuff is 0x%x: should be 0\n",
1036 __FUNCTION__, GFIELD(rsp5, RSP5_STUFF)));
1037 status = SDIOH_API_RC_FAIL;
1038 }
1039
1040 if (rw == SDIOH_READ)
1041 *byte = GFIELD(rsp5, RSP5_DATA);
1042
1043 /* Change to DATA_TRANSFER_IDLE */
1044 sdstd_3_set_data_state(sd, DATA_TRANSFER_IDLE);
1045
1046 /* check if we have to do tuning; if so, start */
1047 sdstd_3_check_and_do_tuning(sd, CHECK_TUNING_POST_DATA);
1048
1049 sdstd_unlock(sd);
1050 return status;
1051 }
1052
1053 extern SDIOH_API_RC
sdioh_request_word(sdioh_info_t * sd,uint cmd_type,uint rw,uint func,uint addr,uint32 * word,uint nbytes)1054 sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr,
1055 uint32 *word, uint nbytes)
1056 {
1057 int status;
1058
1059 sdstd_lock(sd);
1060
1061 sdstd_3_check_and_do_tuning(sd, CHECK_TUNING_PRE_DATA);
1062
1063 /* Change to DATA_TRANSFER_ONGOING , protection against tuning tasklet */
1064 sdstd_3_set_data_state(sd, DATA_TRANSFER_ONGOING);
1065
1066 if (rw == SDIOH_READ) {
1067 status = sdstd_card_regread(sd, func, addr, nbytes, word);
1068 } else {
1069 status = sdstd_card_regwrite(sd, func, addr, nbytes, *word);
1070 }
1071
1072 /* Change to DATA_TRANSFER_IDLE */
1073 sdstd_3_set_data_state(sd, DATA_TRANSFER_IDLE);
1074
1075 /* check if we have to do tuning; if so, start */
1076 sdstd_3_check_and_do_tuning(sd, CHECK_TUNING_POST_DATA);
1077
1078 sdstd_unlock(sd);
1079 return (status == SUCCESS ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
1080 }
1081
1082 #ifdef BCMSDIOH_TXGLOM
1083 void
sdioh_glom_post(sdioh_info_t * sd,uint8 * frame,void * pkt,uint len)1084 sdioh_glom_post(sdioh_info_t *sd, uint8 *frame, void *pkt, uint len)
1085 {
1086 BCM_REFERENCE(pkt);
1087 sd->glom_info.dma_buf_arr[sd->glom_info.count] = frame;
1088 sd->glom_info.nbytes[sd->glom_info.count] = len;
1089 /* Convert the frame addr to phy addr for DMA in case of host controller version3 */
1090 if (sd->txglom_mode == SDPCM_TXGLOM_MDESC) {
1091 sd->glom_info.dma_phys_arr[sd->glom_info.count] = DMA_MAP(sd->osh,
1092 frame,
1093 len,
1094 DMA_TX, 0, 0);
1095 }
1096 sd->glom_info.count++;
1097 }
1098
1099 void
sdioh_glom_clear(sdioh_info_t * sd)1100 sdioh_glom_clear(sdioh_info_t *sd)
1101 {
1102 int i;
1103 /* DMA_MAP is done per frame only if host controller version is 3 */
1104 if (sd->txglom_mode == SDPCM_TXGLOM_MDESC) {
1105 for (i = 0; i < sd->glom_info.count; i++) {
1106 DMA_UNMAP(sd->osh,
1107 sd->glom_info.dma_phys_arr[i],
1108 sd->glom_info.nbytes[i],
1109 DMA_TX, 0, 0);
1110 }
1111 }
1112 sd->glom_info.count = 0;
1113 }
1114
1115 uint
sdioh_set_mode(sdioh_info_t * sd,uint mode)1116 sdioh_set_mode(sdioh_info_t *sd, uint mode)
1117 {
1118 if (mode == SDPCM_TXGLOM_CPY)
1119 sd->txglom_mode = mode;
1120 else if ((mode == SDPCM_TXGLOM_MDESC) && (sd->version == HOST_CONTR_VER_3))
1121 sd->txglom_mode = mode;
1122
1123 return (sd->txglom_mode);
1124 }
1125
1126 bool
sdioh_glom_enabled(void)1127 sdioh_glom_enabled(void)
1128 {
1129 return sd_txglom;
1130 }
1131 #endif /* BCMSDIOH_TXGLOM */
1132
1133 extern SDIOH_API_RC
sdioh_request_buffer(sdioh_info_t * sd,uint pio_dma,uint fix_inc,uint rw,uint func,uint addr,uint reg_width,uint buflen_u,uint8 * buffer,void * pkt)1134 sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint rw, uint func,
1135 uint addr, uint reg_width, uint buflen_u, uint8 *buffer, void *pkt)
1136 {
1137 uint8 is_ddr50 = FALSE;
1138 int len;
1139 int buflen = (int)buflen_u;
1140 bool fifo = (fix_inc == SDIOH_DATA_FIX);
1141 uint8 *localbuf = NULL, *tmpbuf = NULL;
1142 bool local_blockmode = sd->sd_blockmode;
1143 SDIOH_API_RC status = SDIOH_API_RC_SUCCESS;
1144
1145 sdstd_lock(sd);
1146
1147 is_ddr50 = (sd_uhsimode == SD3CLKMODE_4_DDR50) ? TRUE : FALSE;
1148
1149 sdstd_3_check_and_do_tuning(sd, CHECK_TUNING_PRE_DATA);
1150
1151 /* Change to DATA_TRANSFER_ONGOING , protection against tuning tasklet */
1152 sdstd_3_set_data_state(sd, DATA_TRANSFER_ONGOING);
1153
1154 ASSERT(reg_width == 4);
1155 ASSERT(buflen_u < (1 << 30));
1156 ASSERT(sd->client_block_size[func]);
1157
1158 #ifdef BCMSDIOH_TXGLOM
1159 if (sd_txglom) {
1160 while (pkt) {
1161 sdioh_glom_post(sd, PKTDATA(sd->osh, pkt), pkt, PKTLEN(sd->osh, pkt));
1162 pkt = PKTNEXT(sd->osh, pkt);
1163 }
1164 }
1165 #endif // endif
1166 sd_data(("%s: %c len %d r_cnt %d t_cnt %d, pkt @0x%p\n",
1167 __FUNCTION__, rw == SDIOH_READ ? 'R' : 'W',
1168 buflen_u, sd->r_cnt, sd->t_cnt, pkt));
1169
1170 /* Break buffer down into blocksize chunks:
1171 * Bytemode: 1 block at a time.
1172 * Blockmode: Multiples of blocksizes at a time w/ max of SD_PAGE.
1173 * Both: leftovers are handled last (will be sent via bytemode).
1174 */
1175 while (buflen > 0) {
1176 if (local_blockmode) {
1177 int max_tran_size = SD_PAGE;
1178 #ifdef BCMSDIOH_TXGLOM
1179 /* There is no alignment requirement for HC3 */
1180 if ((sd->version == HOST_CONTR_VER_3) && sd_txglom)
1181 max_tran_size = SD_PAGE * 4;
1182 #endif // endif
1183 /* Max xfer is Page size */
1184 len = MIN(max_tran_size, buflen);
1185
1186 /* Round down to a block boundry */
1187 if (buflen > sd->client_block_size[func])
1188 len = (len/sd->client_block_size[func]) *
1189 sd->client_block_size[func];
1190 if ((func == SDIO_FUNC_1) && (((len % 4) == 3) || (((len % 2) == 1) &&
1191 (is_ddr50))) && ((rw == SDIOH_WRITE) || (rw == SDIOH_READ))) {
1192 sd_err(("%s: Rounding up buffer to mod4 length.\n", __FUNCTION__));
1193 len++;
1194 tmpbuf = buffer;
1195 if ((localbuf = (uint8 *)MALLOC(sd->osh, len)) == NULL) {
1196 sd_err(("out of memory, malloced %d bytes\n",
1197 MALLOCED(sd->osh)));
1198 status = SDIOH_API_RC_FAIL;
1199 goto done;
1200 }
1201 bcopy(buffer, localbuf, len);
1202 buffer = localbuf;
1203 }
1204 } else {
1205 /* Byte mode: One block at a time */
1206 len = MIN(sd->client_block_size[func], buflen);
1207 }
1208
1209 if (sdstd_card_buf(sd, rw, func, fifo, addr, len, (uint32 *)buffer) != SUCCESS) {
1210 status = SDIOH_API_RC_FAIL;
1211 }
1212
1213 if (local_blockmode && localbuf) {
1214 MFREE(sd->osh, localbuf, len);
1215 localbuf = NULL;
1216 len--;
1217 buffer = tmpbuf;
1218 sd_err(("%s: Restoring back buffer ptr and len.\n", __FUNCTION__));
1219 }
1220
1221 if (status == SDIOH_API_RC_FAIL) {
1222 goto done;
1223 }
1224
1225 buffer += len;
1226 buflen -= len;
1227 if (!fifo)
1228 addr += len;
1229 #ifdef BCMSDIOH_TXGLOM
1230 /* This loop should not come in case of glommed pkts as it is send in
1231 * multiple of blocks or total pkt size less than a block
1232 */
1233 if (sd->glom_info.count != 0)
1234 buflen = 0;
1235 #endif // endif
1236 }
1237 done:
1238
1239 /* Change to DATA_TRANSFER_IDLE */
1240 sdstd_3_set_data_state(sd, DATA_TRANSFER_IDLE);
1241
1242 /* check if we have to do tuning; if so, start */
1243 sdstd_3_check_and_do_tuning(sd, CHECK_TUNING_POST_DATA);
1244
1245 sdstd_unlock(sd);
1246
1247 #ifdef BCMSDIOH_TXGLOM
1248 if (sd_txglom)
1249 sdioh_glom_clear(sd);
1250 #endif // endif
1251
1252 return status;
1253 }
1254
1255 extern SDIOH_API_RC
sdioh_gpioouten(sdioh_info_t * sd,uint32 gpio)1256 sdioh_gpioouten(sdioh_info_t *sd, uint32 gpio)
1257 {
1258 uint offset = 0;
1259 uint16 val;
1260
1261 /* check if upper bank */
1262 if (gpio >= SDH_GPIO16) {
1263 gpio -= SDH_GPIO16;
1264 offset = 2;
1265 }
1266
1267 val = sdstd_rreg16(sd, SD_GPIO_OE + offset);
1268 val |= (1 << gpio);
1269 sdstd_wreg16(sd, SD_GPIO_OE + offset, val);
1270
1271 return SDIOH_API_RC_SUCCESS;
1272 }
1273
1274 extern SDIOH_API_RC
sdioh_gpioout(sdioh_info_t * sd,uint32 gpio,bool enab)1275 sdioh_gpioout(sdioh_info_t *sd, uint32 gpio, bool enab)
1276 {
1277 uint offset = 0;
1278 uint16 val;
1279
1280 /* check if upper bank */
1281 if (gpio >= SDH_GPIO16) {
1282 gpio -= SDH_GPIO16;
1283 offset = 2;
1284 }
1285
1286 val = sdstd_rreg16(sd, SD_GPIO_Reg + offset);
1287 if (enab == TRUE)
1288 val |= (1 << gpio);
1289 else
1290 val &= ~(1 << gpio);
1291 sdstd_wreg16(sd, SD_GPIO_Reg + offset, val);
1292
1293 return SDIOH_API_RC_SUCCESS;
1294 }
1295
1296 extern bool
sdioh_gpioin(sdioh_info_t * sd,uint32 gpio)1297 sdioh_gpioin(sdioh_info_t *sd, uint32 gpio)
1298 {
1299 uint offset = 0;
1300 uint16 val;
1301
1302 /* check if upper bank */
1303 if (gpio >= SDH_GPIO16) {
1304 gpio -= SDH_GPIO16;
1305 offset = 2;
1306 }
1307
1308 val = sdstd_rreg16(sd, SD_GPIO_Reg + offset);
1309 val = (val >> gpio) & 1;
1310
1311 return (val == 1);
1312 }
1313
1314 extern SDIOH_API_RC
sdioh_gpio_init(sdioh_info_t * sd)1315 sdioh_gpio_init(sdioh_info_t *sd)
1316 {
1317 uint rev;
1318
1319 rev = sdstd_rreg16(sd, SD_HostControllerVersion) >> 8;
1320
1321 /* Only P206 (fpga rev >= 16) supports gpio */
1322 if (rev < 16) {
1323 sd_err(("%s: gpio not supported in rev %d \n", __FUNCTION__, rev));
1324 return SDIOH_API_RC_FAIL;
1325 }
1326
1327 sdstd_wreg16(sd, SD_GPIO_Enable, SDH_GPIO_ENABLE);
1328 sdstd_wreg16(sd, SD_GPIO_Enable + 2, SDH_GPIO_ENABLE);
1329
1330 /* Default to input */
1331 sdstd_wreg16(sd, SD_GPIO_OE, 0);
1332 sdstd_wreg16(sd, SD_GPIO_OE + 2, 0);
1333
1334 return SDIOH_API_RC_SUCCESS;
1335 }
1336
1337 extern SDIOH_API_RC
sdioh_sleep(sdioh_info_t * sd,bool enab)1338 sdioh_sleep(sdioh_info_t *sd, bool enab)
1339 {
1340 SDIOH_API_RC status;
1341 uint32 cmd_arg = 0, rsp1 = 0;
1342 int retry = 100;
1343
1344 sdstd_lock(sd);
1345
1346 cmd_arg = SFIELD(cmd_arg, CMD14_RCA, sd->card_rca);
1347 cmd_arg = SFIELD(cmd_arg, CMD14_SLEEP, enab);
1348
1349 /*
1350 * For ExitSleep:
1351 * 1) Repeat CMD14 until R1 is received
1352 * 2) Send CMD7
1353 */
1354 status = SDIOH_API_RC_FAIL;
1355 while (retry-- > 0) {
1356 if ((sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_14, cmd_arg)) == SUCCESS) {
1357 status = SDIOH_API_RC_SUCCESS;
1358 break;
1359 }
1360 OSL_DELAY(1400);
1361 }
1362
1363 if (status == SDIOH_API_RC_FAIL) {
1364 sd_err(("%s: CMD14: failed! enable:%d\n", __FUNCTION__, enab));
1365 goto exit;
1366 }
1367
1368 sdstd_cmd_getrsp(sd, &rsp1, 1);
1369 sd_info(("%s: CMD14 OK: cmd_resp:0x%x\n", __FUNCTION__, rsp1));
1370
1371 /* ExitSleep: Send CMD7 After R1 */
1372 if (enab == FALSE) {
1373 /* Select the card */
1374 cmd_arg = SFIELD(0, CMD7_RCA, sd->card_rca);
1375 if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_7, cmd_arg)) != SUCCESS) {
1376 sd_err(("%s: CMD14 send CMD7 failed!\n", __FUNCTION__));
1377 status = SDIOH_API_RC_FAIL;
1378 goto exit;
1379 }
1380
1381 sdstd_cmd_getrsp(sd, &rsp1, 1);
1382 if (rsp1 != SDIOH_CMD7_EXP_STATUS) {
1383 sd_err(("%s: CMD7 response error. Response = 0x%x!\n",
1384 __FUNCTION__, rsp1));
1385 status = SDIOH_API_RC_FAIL;
1386 goto exit;
1387 }
1388 }
1389
1390 exit:
1391 sdstd_unlock(sd);
1392
1393 return status;
1394 }
1395
1396 static int
sdstd_abort(sdioh_info_t * sd,uint func)1397 sdstd_abort(sdioh_info_t *sd, uint func)
1398 {
1399 int err = 0;
1400 int retries;
1401
1402 uint16 cmd_reg;
1403 uint32 cmd_arg;
1404 uint32 rsp5;
1405 uint8 rflags;
1406
1407 uint16 int_reg = 0;
1408 uint16 plain_intstatus;
1409
1410 /* Argument is write to F0 (CCCR) IOAbort with function number */
1411 cmd_arg = 0;
1412 cmd_arg = SFIELD(cmd_arg, CMD52_FUNCTION, SDIO_FUNC_0);
1413 cmd_arg = SFIELD(cmd_arg, CMD52_REG_ADDR, SDIOD_CCCR_IOABORT);
1414 cmd_arg = SFIELD(cmd_arg, CMD52_RW_FLAG, SD_IO_OP_WRITE);
1415 cmd_arg = SFIELD(cmd_arg, CMD52_RAW, 0);
1416 cmd_arg = SFIELD(cmd_arg, CMD52_DATA, func);
1417
1418 /* Command is CMD52 write */
1419 cmd_reg = 0;
1420 cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48_BUSY);
1421 cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1);
1422 cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1);
1423 cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
1424 cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_ABORT);
1425 cmd_reg = SFIELD(cmd_reg, CMD_INDEX, SDIOH_CMD_52);
1426
1427 if (sd->sd_mode == SDIOH_MODE_SPI) {
1428 cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0);
1429 cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0);
1430 }
1431
1432 /* Wait for CMD_INHIBIT to go away as per spec section 3.6.1.1 */
1433 retries = RETRIES_SMALL;
1434 while (GFIELD(sdstd_rreg(sd, SD_PresentState), PRES_CMD_INHIBIT)) {
1435 if (retries == RETRIES_SMALL)
1436 sd_err(("%s: Waiting for Command Inhibit, state 0x%08x\n",
1437 __FUNCTION__, sdstd_rreg(sd, SD_PresentState)));
1438 if (!--retries) {
1439 sd_err(("%s: Command Inhibit timeout, state 0x%08x\n",
1440 __FUNCTION__, sdstd_rreg(sd, SD_PresentState)));
1441 if (trap_errs)
1442 ASSERT(0);
1443 err = BCME_SDIO_ERROR;
1444 goto done;
1445 }
1446 }
1447
1448 /* Clear errors from any previous commands */
1449 if ((plain_intstatus = sdstd_rreg16(sd, SD_ErrorIntrStatus)) != 0) {
1450 sd_err(("abort: clearing errstat 0x%04x\n", plain_intstatus));
1451 sdstd_wreg16(sd, SD_ErrorIntrStatus, plain_intstatus);
1452 }
1453 plain_intstatus = sdstd_rreg16(sd, SD_IntrStatus);
1454 if (plain_intstatus & ~(SFIELD(0, INTSTAT_CARD_INT, 1))) {
1455 sd_err(("abort: intstatus 0x%04x\n", plain_intstatus));
1456 if (GFIELD(plain_intstatus, INTSTAT_CMD_COMPLETE)) {
1457 sd_err(("SDSTD_ABORT: CMD COMPLETE SET BEFORE COMMAND GIVEN!!!\n"));
1458 }
1459 if (GFIELD(plain_intstatus, INTSTAT_CARD_REMOVAL)) {
1460 sd_err(("SDSTD_ABORT: INTSTAT_CARD_REMOVAL\n"));
1461 err = BCME_NODEVICE;
1462 goto done;
1463 }
1464 }
1465
1466 /* Issue the command */
1467 sdstd_wreg(sd, SD_Arg0, cmd_arg);
1468 sdstd_wreg16(sd, SD_Command, cmd_reg);
1469
1470 /* In interrupt mode return, expect later CMD_COMPLETE interrupt */
1471 if (!sd->polled_mode)
1472 return err;
1473
1474 /* Otherwise, wait for the command to complete */
1475 retries = RETRIES_LARGE;
1476 do {
1477 #ifdef BCMQT
1478 if (retries != RETRIES_LARGE) {
1479 OSL_SLEEP(SDSTD_WAIT_TIME_MS);
1480 }
1481 #endif /* BCMQT */
1482 int_reg = sdstd_rreg16(sd, SD_IntrStatus);
1483 } while (--retries &&
1484 (GFIELD(int_reg, INTSTAT_ERROR_INT) == 0) &&
1485 (GFIELD(int_reg, INTSTAT_CMD_COMPLETE) == 0));
1486
1487 /* If command completion fails, do a cmd reset and note the error */
1488 if (!retries) {
1489 sd_err(("%s: CMD_COMPLETE timeout: intr 0x%04x err 0x%04x state 0x%08x\n",
1490 __FUNCTION__, int_reg,
1491 sdstd_rreg16(sd, SD_ErrorIntrStatus),
1492 sdstd_rreg(sd, SD_PresentState)));
1493
1494 sdstd_wreg8(sd, SD_SoftwareReset, SFIELD(0, SW_RESET_CMD, 1));
1495 retries = RETRIES_LARGE;
1496 do {
1497 #ifdef BCMQT
1498 if (retries != RETRIES_LARGE) {
1499 OSL_SLEEP(SDSTD_WAIT_TIME_MS);
1500 }
1501 #endif /* BCMQT */
1502 sd_trace(("%s: waiting for CMD line reset\n", __FUNCTION__));
1503 } while ((GFIELD(sdstd_rreg8(sd, SD_SoftwareReset),
1504 SW_RESET_CMD)) && retries--);
1505
1506 if (!retries) {
1507 sd_err(("%s: Timeout waiting for CMD line reset\n", __FUNCTION__));
1508 }
1509
1510 if (trap_errs)
1511 ASSERT(0);
1512
1513 err = BCME_SDIO_ERROR;
1514 }
1515
1516 /* Clear Command Complete interrupt */
1517 int_reg = SFIELD(0, INTSTAT_CMD_COMPLETE, 1);
1518 sdstd_wreg16(sd, SD_IntrStatus, int_reg);
1519
1520 /* Check for Errors */
1521 if ((plain_intstatus = sdstd_rreg16 (sd, SD_ErrorIntrStatus)) != 0) {
1522 sd_err(("%s: ErrorintrStatus: 0x%x, "
1523 "(intrstatus = 0x%x, present state 0x%x) clearing\n",
1524 __FUNCTION__, plain_intstatus,
1525 sdstd_rreg16(sd, SD_IntrStatus),
1526 sdstd_rreg(sd, SD_PresentState)));
1527
1528 sdstd_wreg16(sd, SD_ErrorIntrStatus, plain_intstatus);
1529
1530 sdstd_wreg8(sd, SD_SoftwareReset, SFIELD(0, SW_RESET_DAT, 1));
1531 retries = RETRIES_LARGE;
1532 do {
1533 #ifdef BCMQT
1534 if (retries != RETRIES_LARGE) {
1535 OSL_SLEEP(SDSTD_WAIT_TIME_MS);
1536 }
1537 #endif /* BCMQT */
1538 sd_trace(("%s: waiting for DAT line reset\n", __FUNCTION__));
1539 } while ((GFIELD(sdstd_rreg8(sd, SD_SoftwareReset),
1540 SW_RESET_DAT)) && retries--);
1541
1542 if (!retries) {
1543 sd_err(("%s: Timeout waiting for DAT line reset\n", __FUNCTION__));
1544 }
1545
1546 if (trap_errs)
1547 ASSERT(0);
1548
1549 /* ABORT is dataless, only cmd errs count */
1550 if (plain_intstatus & ERRINT_CMD_ERRS)
1551 err = BCME_SDIO_ERROR;
1552 }
1553
1554 /* If command failed don't bother looking at response */
1555 if (err)
1556 goto done;
1557
1558 /* Otherwise, check the response */
1559 sdstd_cmd_getrsp(sd, &rsp5, 1);
1560 rflags = GFIELD(rsp5, RSP5_FLAGS);
1561
1562 if (rflags & SD_RSP_R5_ERRBITS) {
1563 sd_err(("%s: R5 flags include errbits: 0x%02x\n", __FUNCTION__, rflags));
1564
1565 /* The CRC error flag applies to the previous command */
1566 if (rflags & (SD_RSP_R5_ERRBITS & ~SD_RSP_R5_COM_CRC_ERROR)) {
1567 err = BCME_SDIO_ERROR;
1568 goto done;
1569 }
1570 }
1571
1572 if (((rflags & (SD_RSP_R5_IO_CURRENTSTATE0 | SD_RSP_R5_IO_CURRENTSTATE1)) != 0x10) &&
1573 ((rflags & (SD_RSP_R5_IO_CURRENTSTATE0 | SD_RSP_R5_IO_CURRENTSTATE1)) != 0x20)) {
1574 sd_err(("%s: R5 flags has bad state: 0x%02x\n", __FUNCTION__, rflags));
1575 err = BCME_SDIO_ERROR;
1576 goto done;
1577 }
1578
1579 if (GFIELD(rsp5, RSP5_STUFF)) {
1580 sd_err(("%s: rsp5 stuff is 0x%x: should be 0\n",
1581 __FUNCTION__, GFIELD(rsp5, RSP5_STUFF)));
1582 err = BCME_SDIO_ERROR;
1583 goto done;
1584 }
1585
1586 done:
1587 if (err == BCME_NODEVICE)
1588 return err;
1589
1590 sdstd_wreg8(sd, SD_SoftwareReset,
1591 SFIELD(SFIELD(0, SW_RESET_DAT, 1), SW_RESET_CMD, 1));
1592
1593 retries = RETRIES_LARGE;
1594 do {
1595 rflags = sdstd_rreg8(sd, SD_SoftwareReset);
1596 if (!GFIELD(rflags, SW_RESET_DAT) && !GFIELD(rflags, SW_RESET_CMD))
1597 break;
1598 #ifdef BCMQT
1599 if (retries != RETRIES_LARGE) {
1600 OSL_SLEEP(SDSTD_WAIT_TIME_MS);
1601 }
1602 #endif /* BCMQT */
1603 } while (--retries);
1604
1605 if (!retries) {
1606 sd_err(("%s: Timeout waiting for DAT/CMD reset: 0x%02x\n",
1607 __FUNCTION__, rflags));
1608 err = BCME_SDIO_ERROR;
1609 }
1610
1611 return err;
1612 }
1613
1614 extern int
sdioh_abort(sdioh_info_t * sd,uint fnum)1615 sdioh_abort(sdioh_info_t *sd, uint fnum)
1616 {
1617 int ret;
1618
1619 sdstd_lock(sd);
1620 ret = sdstd_abort(sd, fnum);
1621 sdstd_unlock(sd);
1622
1623 return ret;
1624 }
1625
1626 int
sdioh_start(sdioh_info_t * sd,int stage)1627 sdioh_start(sdioh_info_t *sd, int stage)
1628 {
1629 return SUCCESS;
1630 }
1631
1632 int
sdioh_stop(sdioh_info_t * sd)1633 sdioh_stop(sdioh_info_t *sd)
1634 {
1635 return SUCCESS;
1636 }
1637
1638 int
sdioh_waitlockfree(sdioh_info_t * sd)1639 sdioh_waitlockfree(sdioh_info_t *sd)
1640 {
1641 sdstd_waitlockfree(sd);
1642 return SUCCESS;
1643 }
1644
1645 static int
sdstd_check_errs(sdioh_info_t * sdioh_info,uint32 cmd,uint32 arg)1646 sdstd_check_errs(sdioh_info_t *sdioh_info, uint32 cmd, uint32 arg)
1647 {
1648 uint16 regval;
1649 uint retries;
1650 uint function = 0;
1651
1652 /* If no errors, we're done */
1653 if ((regval = sdstd_rreg16(sdioh_info, SD_ErrorIntrStatus)) == 0)
1654 return SUCCESS;
1655
1656 #ifdef BCMQT
1657 if (regval == 0xFFFF) {
1658 sd_err(("%s: Bogus SD_ErrorIntrStatus: 0x%x????\n", __FUNCTION__, regval));
1659 sdstd_wreg16(sdioh_info, SD_ErrorIntrStatus, regval);
1660 return SUCCESS;
1661 }
1662 #endif // endif
1663
1664 sd_info(("%s: ErrorIntrStatus 0x%04x (clearing), IntrStatus 0x%04x PresentState 0x%08x\n",
1665 __FUNCTION__, regval, sdstd_rreg16(sdioh_info, SD_IntrStatus),
1666 sdstd_rreg(sdioh_info, SD_PresentState)));
1667 sdstd_wreg16(sdioh_info, SD_ErrorIntrStatus, regval);
1668
1669 if (cmd == SDIOH_CMD_14) {
1670 if (regval & ERRINT_CMD_TIMEOUT_BIT) {
1671 regval &= ~ERRINT_CMD_TIMEOUT_BIT;
1672 }
1673 }
1674
1675 /* On command error, issue CMD reset */
1676 if (regval & ERRINT_CMD_ERRS) {
1677 sd_trace(("%s: issuing CMD reset\n", __FUNCTION__));
1678 sdstd_wreg8(sdioh_info, SD_SoftwareReset, SFIELD(0, SW_RESET_CMD, 1));
1679 for (retries = RETRIES_LARGE; retries; retries--) {
1680 if (!(GFIELD(sdstd_rreg8(sdioh_info, SD_SoftwareReset), SW_RESET_CMD)))
1681 break;
1682 #ifdef BCMQT
1683 if (retries != RETRIES_LARGE) {
1684 OSL_SLEEP(SDSTD_WAIT_TIME_MS);
1685 }
1686 #endif /* BCMQT */
1687 }
1688 if (!retries) {
1689 sd_err(("%s: Timeout waiting for CMD line reset\n", __FUNCTION__));
1690 }
1691 }
1692
1693 /* On data error, issue DAT reset */
1694 if (regval & ERRINT_DATA_ERRS) {
1695 if (regval & ERRINT_ADMA_BIT)
1696 sd_err(("%s:ADMAError: status:0x%x\n",
1697 __FUNCTION__, sdstd_rreg(sdioh_info, SD_ADMA_ErrStatus)));
1698 sd_trace(("%s: issuing DAT reset\n", __FUNCTION__));
1699 sdstd_wreg8(sdioh_info, SD_SoftwareReset, SFIELD(0, SW_RESET_DAT, 1));
1700 for (retries = RETRIES_LARGE; retries; retries--) {
1701 if (!(GFIELD(sdstd_rreg8(sdioh_info, SD_SoftwareReset), SW_RESET_DAT)))
1702 break;
1703 #ifdef BCMQT
1704 if (retries != RETRIES_LARGE) {
1705 OSL_SLEEP(SDSTD_WAIT_TIME_MS);
1706 }
1707 #endif /* BCMQT */
1708 }
1709 if (!retries) {
1710 sd_err(("%s: Timeout waiting for DAT line reset\n", __FUNCTION__));
1711 }
1712 }
1713
1714 /* For an IO command (CMD52 or CMD53) issue an abort to the appropriate function */
1715 if (cmd == SDIOH_CMD_53)
1716 function = GFIELD(arg, CMD53_FUNCTION);
1717 else if (cmd == SDIOH_CMD_52) {
1718 if (GFIELD(arg, CMD52_REG_ADDR) != F1_SLEEPCSR_ADDR)
1719 function = GFIELD(arg, CMD52_FUNCTION);
1720 }
1721 if (function) {
1722 sd_trace(("%s: requesting abort for function %d after cmd %d\n",
1723 __FUNCTION__, function, cmd));
1724 sdstd_abort(sdioh_info, function);
1725 }
1726
1727 if (trap_errs)
1728 ASSERT(0);
1729
1730 return ERROR;
1731 }
1732
1733 /*
1734 * Private/Static work routines
1735 */
1736 static bool
sdstd_reset(sdioh_info_t * sd,bool host_reset,bool client_reset)1737 sdstd_reset(sdioh_info_t *sd, bool host_reset, bool client_reset)
1738 {
1739 int retries = RETRIES_LARGE;
1740 uchar regval;
1741
1742 if (!sd)
1743 return TRUE;
1744
1745 sdstd_lock(sd);
1746 /* Reset client card */
1747 if (client_reset && (sd->adapter_slot != -1)) {
1748 if (sdstd_card_regwrite(sd, 0, SDIOD_CCCR_IOABORT, 1, 0x8) != SUCCESS)
1749 sd_err(("%s: Cannot write to card reg 0x%x\n",
1750 __FUNCTION__, SDIOD_CCCR_IOABORT));
1751 else
1752 sd->card_rca = 0;
1753 }
1754
1755 /* Reset host controller */
1756 if (host_reset) {
1757 regval = SFIELD(0, SW_RESET_ALL, 1);
1758 sdstd_wreg8(sd, SD_SoftwareReset, regval);
1759 do {
1760 sd_trace(("%s: waiting for reset\n", __FUNCTION__));
1761 #ifdef BCMQT
1762 if (retries != RETRIES_LARGE) {
1763 OSL_SLEEP(SDSTD_WAIT_TIME_MS);
1764 }
1765 #endif /* BCMQT */
1766 } while ((sdstd_rreg8(sd, SD_SoftwareReset) & regval) && retries--);
1767
1768 if (!retries) {
1769 sd_err(("%s: Timeout waiting for host reset\n", __FUNCTION__));
1770 sdstd_unlock(sd);
1771 return (FALSE);
1772 }
1773
1774 /* A reset should reset bus back to 1 bit mode */
1775 sd->sd_mode = SDIOH_MODE_SD1;
1776 sdstd_set_dma_mode(sd, sd->sd_dma_mode);
1777 }
1778 sdstd_unlock(sd);
1779 return TRUE;
1780 }
1781
1782 /* Disable device interrupt */
1783 void
sdstd_devintr_off(sdioh_info_t * sd)1784 sdstd_devintr_off(sdioh_info_t *sd)
1785 {
1786 sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
1787 if (sd->use_client_ints) {
1788 sd->intmask &= ~CLIENT_INTR;
1789 sdstd_wreg16(sd, SD_IntrSignalEnable, sd->intmask);
1790 sdstd_rreg16(sd, SD_IntrSignalEnable); /* Sync readback */
1791 }
1792 }
1793
1794 /* Enable device interrupt */
1795 void
sdstd_devintr_on(sdioh_info_t * sd)1796 sdstd_devintr_on(sdioh_info_t *sd)
1797 {
1798 ASSERT(sd->lockcount == 0);
1799 sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
1800 if (sd->use_client_ints) {
1801 if (sd->version < HOST_CONTR_VER_3) {
1802 uint16 status = sdstd_rreg16(sd, SD_IntrStatusEnable);
1803 sdstd_wreg16(sd, SD_IntrStatusEnable, SFIELD(status, INTSTAT_CARD_INT, 0));
1804 sdstd_wreg16(sd, SD_IntrStatusEnable, status);
1805 }
1806
1807 sd->intmask |= CLIENT_INTR;
1808 sdstd_wreg16(sd, SD_IntrSignalEnable, sd->intmask);
1809 sdstd_rreg16(sd, SD_IntrSignalEnable); /* Sync readback */
1810 }
1811 }
1812
1813 #ifdef BCMSDYIELD
1814 /* Enable/disable other interrupts */
1815 void
sdstd_intrs_on(sdioh_info_t * sd,uint16 norm,uint16 err)1816 sdstd_intrs_on(sdioh_info_t *sd, uint16 norm, uint16 err)
1817 {
1818 if (err) {
1819 norm = SFIELD(norm, INTSTAT_ERROR_INT, 1);
1820 sdstd_wreg16(sd, SD_ErrorIntrSignalEnable, err);
1821 }
1822
1823 sd->intmask |= norm;
1824 sdstd_wreg16(sd, SD_IntrSignalEnable, sd->intmask);
1825 if (sd_forcerb)
1826 sdstd_rreg16(sd, SD_IntrSignalEnable); /* Sync readback */
1827 }
1828
1829 void
sdstd_intrs_off(sdioh_info_t * sd,uint16 norm,uint16 err)1830 sdstd_intrs_off(sdioh_info_t *sd, uint16 norm, uint16 err)
1831 {
1832 if (err) {
1833 norm = SFIELD(norm, INTSTAT_ERROR_INT, 1);
1834 sdstd_wreg16(sd, SD_ErrorIntrSignalEnable, 0);
1835 }
1836
1837 sd->intmask &= ~norm;
1838 sdstd_wreg16(sd, SD_IntrSignalEnable, sd->intmask);
1839 if (sd_forcerb)
1840 sdstd_rreg16(sd, SD_IntrSignalEnable); /* Sync readback */
1841 }
1842 #endif /* BCMSDYIELD */
1843
1844 static int
sdstd_host_init(sdioh_info_t * sd)1845 sdstd_host_init(sdioh_info_t *sd)
1846 {
1847 int num_slots, full_slot;
1848 uint8 reg8;
1849 uint32 card_ins;
1850 int slot, first_bar = 0;
1851 bool detect_slots = FALSE;
1852 uint bar;
1853
1854 /* Check for Arasan ID */
1855 if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_SI_IMAGE) {
1856 sd_info(("%s: Found Arasan Standard SDIO Host Controller\n", __FUNCTION__));
1857 sd->controller_type = SDIOH_TYPE_ARASAN_HDK;
1858 detect_slots = TRUE;
1859 /* Controller supports SDMA, so turn it on here. */
1860 sd->sd_dma_mode = DMA_MODE_SDMA;
1861 } else if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_CYPRESS) {
1862 sd_info(("%s: Found Cypress 27xx Standard SDIO Host Controller\n", __FUNCTION__));
1863 sd->controller_type = SDIOH_TYPE_BCM27XX;
1864 detect_slots = FALSE;
1865 } else if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_BROADCOM) {
1866 sd_info(("%s: Found Broadcom 27xx Standard SDIO Host Controller\n", __FUNCTION__));
1867 sd->controller_type = SDIOH_TYPE_BCM27XX;
1868 detect_slots = FALSE;
1869 } else if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_TI) {
1870 sd_info(("%s: Found TI PCIxx21 Standard SDIO Host Controller\n", __FUNCTION__));
1871 sd->controller_type = SDIOH_TYPE_TI_PCIXX21;
1872 detect_slots = TRUE;
1873 } else if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_RICOH) {
1874 sd_info(("%s: Ricoh Co Ltd R5C822 SD/SDIO/MMC/MS/MSPro Host Adapter\n",
1875 __FUNCTION__));
1876 sd->controller_type = SDIOH_TYPE_RICOH_R5C822;
1877 detect_slots = TRUE;
1878 } else if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_JMICRON) {
1879 sd_info(("%s: JMicron Standard SDIO Host Controller\n",
1880 __FUNCTION__));
1881 sd->controller_type = SDIOH_TYPE_JMICRON;
1882 detect_slots = TRUE;
1883 } else {
1884 return ERROR;
1885 }
1886
1887 /*
1888 * Determine num of slots
1889 * Search each slot
1890 */
1891
1892 first_bar = OSL_PCI_READ_CONFIG(sd->osh, SD_SlotInfo, 4) & 0x7;
1893 num_slots = (OSL_PCI_READ_CONFIG(sd->osh, SD_SlotInfo, 4) & 0xff) >> 4;
1894 num_slots &= 7;
1895 num_slots++; /* map bits to num slots according to spec */
1896
1897 if (OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) ==
1898 ((SDIOH_FPGA_ID << 16) | VENDOR_BROADCOM)) {
1899 sd_err(("%s: Found Broadcom Standard SDIO Host Controller FPGA\n", __FUNCTION__));
1900 /* Set BAR0 Window to SDIOSTH core */
1901 OSL_PCI_WRITE_CONFIG(sd->osh, PCI_BAR0_WIN, 4, 0x18001000);
1902
1903 /* Set defaults particular to this controller. */
1904 detect_slots = TRUE;
1905 num_slots = 1;
1906 first_bar = 0;
1907
1908 /* Controller supports ADMA2, so turn it on here. */
1909 sd->sd_dma_mode = DMA_MODE_ADMA2;
1910 } else if (OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) ==
1911 ((SDIOH_FPGA_ID << 16) | VENDOR_CYPRESS)) {
1912 sd_err(("%s: Found Cypress Standard SDIO Host Controller FPGA\n", __FUNCTION__));
1913 /* Set BAR0 Window to SDIOSTH core */
1914 OSL_PCI_WRITE_CONFIG(sd->osh, PCI_BAR0_WIN, 4, 0x18001000);
1915
1916 /* Set defaults particular to this controller. */
1917 detect_slots = TRUE;
1918 num_slots = 1;
1919 first_bar = 0;
1920
1921 /* Controller supports ADMA2, so turn it on here. */
1922 sd->sd_dma_mode = DMA_MODE_ADMA2;
1923 }
1924
1925 /* Map in each slot on the board and query it to see if a
1926 * card is inserted. Use the first populated slot found.
1927 */
1928 if (sd->mem_space) {
1929 sdstd_reg_unmap(sd->osh, (ulong)sd->mem_space, SDIOH_REG_WINSZ);
1930 sd->mem_space = NULL;
1931 }
1932
1933 full_slot = -1;
1934
1935 for (slot = 0; slot < num_slots; slot++) {
1936 bar = OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_BAR0 + (4*(slot + first_bar)), 4);
1937 #ifdef BCMQT
1938 /* PCIe 64-bit alignment */
1939 bar &= 0xfffffff8;
1940 #endif // endif
1941 sd->mem_space = (volatile char *)sdstd_reg_map(sd->osh,
1942 (uintptr)bar, SDIOH_REG_WINSZ);
1943
1944 sd->adapter_slot = -1;
1945
1946 if (detect_slots) {
1947 card_ins = GFIELD(sdstd_rreg(sd, SD_PresentState), PRES_CARD_PRESENT);
1948 } else {
1949 card_ins = TRUE;
1950 }
1951
1952 if (card_ins) {
1953 sd_info(("%s: SDIO slot %d: Full\n", __FUNCTION__, slot));
1954 if (full_slot < 0)
1955 full_slot = slot;
1956 } else {
1957 sd_info(("%s: SDIO slot %d: Empty\n", __FUNCTION__, slot));
1958 }
1959
1960 if (sd->mem_space) {
1961 sdstd_reg_unmap(sd->osh, (ulong)sd->mem_space, SDIOH_REG_WINSZ);
1962 sd->mem_space = NULL;
1963 }
1964 }
1965
1966 if (full_slot < 0) {
1967 sd_err(("No slots on SDIO controller are populated\n"));
1968 return -1;
1969 }
1970
1971 bar = OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_BAR0 + (4*(full_slot + first_bar)), 4);
1972 #ifdef BCMQT
1973 /* PCIe 64-bit alignment */
1974 bar &= 0xfffffff8;
1975 #endif // endif
1976 sd->mem_space = (volatile char *)sdstd_reg_map(sd->osh, (uintptr)bar, SDIOH_REG_WINSZ);
1977
1978 #ifdef BCMQT
1979 sd_err(("Using slot %d at BAR%d [0x%08x] mem_space 0x%p\n",
1980 full_slot,
1981 (full_slot + first_bar),
1982 OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_BAR0 + (4*(full_slot + first_bar)), 4) &
1983 0xfffffff8,
1984 sd->mem_space));
1985 #else
1986 sd_err(("Using slot %d at BAR%d [0x%08x] mem_space 0x%p\n",
1987 full_slot,
1988 (full_slot + first_bar),
1989 OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_BAR0 + (4*(full_slot + first_bar)), 4),
1990 sd->mem_space));
1991 #endif /* BCMQT */
1992
1993 sd->adapter_slot = full_slot;
1994
1995 sd->version = sdstd_rreg16(sd, SD_HostControllerVersion) & 0xFF;
1996 switch (sd->version) {
1997 case 0:
1998 sd_err(("Host Controller version 1.0, Vendor Revision: 0x%02x\n",
1999 sdstd_rreg16(sd, SD_HostControllerVersion) >> 8));
2000 break;
2001 case 1:
2002 sd_err(("Host Controller version 2.0, Vendor Revision: 0x%02x\n",
2003 sdstd_rreg16(sd, SD_HostControllerVersion) >> 8));
2004 break;
2005 case 2:
2006 sd_err(("Host Controller version 3.0, Vendor Revision: 0x%02x\n",
2007 sdstd_rreg16(sd, SD_HostControllerVersion) >> 8));
2008 break;
2009 default:
2010 sd_err(("%s: Host Controller version 0x%02x not supported.\n",
2011 __FUNCTION__, sd->version));
2012 break;
2013 }
2014
2015 sd->caps = sdstd_rreg(sd, SD_Capabilities); /* Cache this for later use */
2016 /* MSB 32 bits of caps supported in sdio 3.0 */
2017 sd->caps3 = sdstd_rreg(sd, SD_Capabilities3); /* Cache this for later use */
2018 sd3_trace(("sd3: %s: caps: 0x%x; MCCap:0x%x\n", __FUNCTION__, sd->caps, sd->curr_caps));
2019 sd3_trace(("sd3: %s: caps3: 0x%x\n", __FUNCTION__, sd->caps3));
2020 sd->curr_caps = sdstd_rreg(sd, SD_MaxCurCap);
2021
2022 sd_info(("%s: caps: 0x%x; MCCap:0x%x\n", __FUNCTION__, sd->caps, sd->curr_caps));
2023
2024 sdstd_set_dma_mode(sd, sd->sd_dma_mode);
2025
2026 sdstd_reset(sd, 1, 0);
2027
2028 /* Read SD4/SD1 mode */
2029 if ((reg8 = sdstd_rreg8(sd, SD_HostCntrl))) {
2030 if (reg8 & SD4_MODE) {
2031 sd_err(("%s: Host cntrlr already in 4 bit mode: 0x%x\n",
2032 __FUNCTION__, reg8));
2033 }
2034 }
2035
2036 /* Default power on mode is SD1 */
2037 sd->sd_mode = SDIOH_MODE_SD1;
2038 sd->polled_mode = TRUE;
2039 sd->host_init_done = TRUE;
2040 sd->card_init_done = FALSE;
2041 sd->adapter_slot = full_slot;
2042
2043 if (sd_uhsimode == SD3CLKMODE_DISABLED) {
2044 sd->version = HOST_CONTR_VER_2;
2045 sd3_trace(("%s:forcing to SDIO HC 2.0\n", __FUNCTION__));
2046 }
2047
2048 if (sd->version == HOST_CONTR_VER_3) {
2049 /* read host ctrl 2 */
2050 uint16 reg16 = 0;
2051 sd3_trace(("sd3: %s: HC3: reading additional regs\n", __FUNCTION__));
2052
2053 reg16 = sdstd_rreg16(sd, SD3_HostCntrl2);
2054
2055 sd_info(("%s: HCtrl: 0x%x; HCtrl2:0x%x\n", __FUNCTION__, reg8, reg16));
2056 BCM_REFERENCE(reg16);
2057
2058 /* if HC supports 1.8V and one of the SDR/DDR modes, hc uhci support is PRESENT */
2059 if ((GFIELD(sd->caps, CAP_VOLT_1_8)) &&
2060 (GFIELD(sd->caps3, CAP3_SDR50_SUP) ||
2061 GFIELD(sd->caps3, CAP3_SDR104_SUP) ||
2062 GFIELD(sd->caps3, CAP3_DDR50_SUP)))
2063 sd->host_UHSISupported = 1;
2064 }
2065
2066 #ifdef BCMQT
2067 {
2068 uint32 intmask;
2069
2070 /* FIX: force interrupts with QT sdio20 host */
2071 /* pci cw [expr $def(configbase) +0x95] 1 2 */
2072 intmask = OSL_PCI_READ_CONFIG(sd->osh, PCI_INT_MASK, 4);
2073 intmask |= 0x0200;
2074 OSL_PCI_WRITE_CONFIG(sd->osh, PCI_INT_MASK, 4, intmask);
2075 }
2076 #endif // endif
2077 return (SUCCESS);
2078 }
2079 #define CMD5_RETRIES 1000
2080 static int
get_ocr(sdioh_info_t * sd,uint32 * cmd_arg,uint32 * cmd_rsp)2081 get_ocr(sdioh_info_t *sd, uint32 *cmd_arg, uint32 *cmd_rsp)
2082 {
2083 int retries, status;
2084
2085 /* Get the Card's Operation Condition. Occasionally the board
2086 * takes a while to become ready
2087 */
2088 retries = CMD5_RETRIES;
2089 do {
2090 *cmd_rsp = 0;
2091 if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_5, *cmd_arg))
2092 != SUCCESS) {
2093 sd_err(("%s: CMD5 failed\n", __FUNCTION__));
2094 return status;
2095 }
2096 sdstd_cmd_getrsp(sd, cmd_rsp, 1);
2097 if (!GFIELD(*cmd_rsp, RSP4_CARD_READY))
2098 sd_trace(("%s: Waiting for card to become ready\n", __FUNCTION__));
2099 #ifdef BCMQT
2100 if (retries != CMD5_RETRIES) {
2101 OSL_SLEEP(SDSTD_WAIT_TIME_MS);
2102 }
2103 #endif /* BCMQT */
2104 } while ((!GFIELD(*cmd_rsp, RSP4_CARD_READY)) && --retries);
2105 if (!retries)
2106 return ERROR;
2107
2108 return (SUCCESS);
2109 }
2110
2111 static int
sdstd_client_init(sdioh_info_t * sd)2112 sdstd_client_init(sdioh_info_t *sd)
2113 {
2114 uint32 cmd_arg, cmd_rsp;
2115 int status;
2116 uint8 fn_ints;
2117 uint32 regdata;
2118 uint16 powerstat = 0;
2119
2120 sd_trace(("%s: Powering up slot %d\n", __FUNCTION__, sd->adapter_slot));
2121
2122 /* Clear any pending ints */
2123 sdstd_wreg16(sd, SD_IntrStatus, 0x1fff);
2124 sdstd_wreg16(sd, SD_ErrorIntrStatus, 0x0fff);
2125
2126 /* Enable both Normal and Error Status. This does not enable
2127 * interrupts, it only enables the status bits to
2128 * become 'live'
2129 */
2130
2131 if (!sd->host_UHSISupported)
2132 sdstd_wreg16(sd, SD_IntrStatusEnable, 0x1ff);
2133 else
2134 {
2135 /* INT_x interrupts, but DO NOT enable signalling [enable retuning
2136 * will happen later]
2137 */
2138 sdstd_wreg16(sd, SD_IntrStatusEnable, 0x0fff);
2139 }
2140 sdstd_wreg16(sd, SD_ErrorIntrStatusEnable, 0xffff);
2141
2142 sdstd_wreg16(sd, SD_IntrSignalEnable, 0); /* Disable ints for now. */
2143
2144 if (sd->host_UHSISupported) {
2145 /* when HC is started for SDIO 3.0 mode, start in lowest voltage mode first. */
2146 powerstat = sdstd_start_power(sd, 1);
2147 if (SDIO_OCR_READ_FAIL == powerstat) {
2148 /* This could be because the device is 3.3V, and possible does
2149 * not have sdio3.0 support. So, try in highest voltage
2150 */
2151 sd_err(("sdstd_start_power: legacy device: trying highest voltage\n"));
2152 sd_err(("%s failed\n", __FUNCTION__));
2153 return ERROR;
2154 } else if (TRUE != powerstat) {
2155 sd_err(("sdstd_start_power failed\n"));
2156 return ERROR;
2157 }
2158 } else
2159 if (TRUE != sdstd_start_power(sd, 0)) {
2160 sd_err(("sdstd_start_power failed\n"));
2161 return ERROR;
2162 }
2163
2164 if (sd->num_funcs == 0) {
2165 sd_err(("%s: No IO funcs!\n", __FUNCTION__));
2166 return ERROR;
2167 }
2168
2169 /* In SPI mode, issue CMD0 first */
2170 if (sd->sd_mode == SDIOH_MODE_SPI) {
2171 cmd_arg = 0;
2172 if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_0, cmd_arg))
2173 != SUCCESS) {
2174 sd_err(("BCMSDIOH: cardinit: CMD0 failed!\n"));
2175 return status;
2176 }
2177 }
2178
2179 if (sd->sd_mode != SDIOH_MODE_SPI) {
2180 uint16 rsp6_status;
2181
2182 /* Card is operational. Ask it to send an RCA */
2183 cmd_arg = 0;
2184 if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_3, cmd_arg))
2185 != SUCCESS) {
2186 sd_err(("%s: CMD3 failed!\n", __FUNCTION__));
2187 return status;
2188 }
2189
2190 /* Verify the card status returned with the cmd response */
2191 sdstd_cmd_getrsp(sd, &cmd_rsp, 1);
2192 rsp6_status = GFIELD(cmd_rsp, RSP6_STATUS);
2193 if (GFIELD(rsp6_status, RSP6STAT_COM_CRC_ERROR) ||
2194 GFIELD(rsp6_status, RSP6STAT_ILLEGAL_CMD) ||
2195 GFIELD(rsp6_status, RSP6STAT_ERROR)) {
2196 sd_err(("%s: CMD3 response error. Response = 0x%x!\n",
2197 __FUNCTION__, rsp6_status));
2198 return ERROR;
2199 }
2200
2201 /* Save the Card's RCA */
2202 sd->card_rca = GFIELD(cmd_rsp, RSP6_IO_RCA);
2203 sd_info(("RCA is 0x%x\n", sd->card_rca));
2204
2205 if (rsp6_status)
2206 sd_err(("raw status is 0x%x\n", rsp6_status));
2207
2208 /* Select the card */
2209 cmd_arg = SFIELD(0, CMD7_RCA, sd->card_rca);
2210 if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_7, cmd_arg))
2211 != SUCCESS) {
2212 sd_err(("%s: CMD7 failed!\n", __FUNCTION__));
2213 return status;
2214 }
2215 sdstd_cmd_getrsp(sd, &cmd_rsp, 1);
2216 if (cmd_rsp != SDIOH_CMD7_EXP_STATUS) {
2217 sd_err(("%s: CMD7 response error. Response = 0x%x!\n",
2218 __FUNCTION__, cmd_rsp));
2219 return ERROR;
2220 }
2221 }
2222
2223 /* Disable default/power-up device Card Detect (CD) pull up resistor on DAT3
2224 * via CCCR bus interface control register. Set CD disable bit while leaving
2225 * others alone.
2226 */
2227 if (sdstd_card_regread (sd, 0, SDIOD_CCCR_BICTRL, 1, ®data) != SUCCESS) {
2228 sd_err(("Disabling card detect: read of device CCCR BICTRL register failed\n"));
2229 return ERROR;
2230 }
2231 regdata |= BUS_CARD_DETECT_DIS;
2232
2233 if (sdstd_card_regwrite (sd, 0, SDIOD_CCCR_BICTRL, 1, regdata) != SUCCESS) {
2234 sd_err(("Disabling card detect: write of device CCCR BICTRL register failed\n"));
2235 return ERROR;
2236 }
2237
2238 sdstd_card_enablefuncs(sd);
2239
2240 if (!sdstd_bus_width(sd, sd_sdmode)) {
2241 sd_err(("sdstd_bus_width failed\n"));
2242 return ERROR;
2243 }
2244
2245 set_client_block_size(sd, 1, sd_f1_blocksize);
2246 fn_ints = INTR_CTL_FUNC1_EN;
2247
2248 if (sd->num_funcs >= 2) {
2249 set_client_block_size(sd, 2, DEFAULT_F2_BLKSIZE /* BLOCK_SIZE_4328 */);
2250 fn_ints |= INTR_CTL_FUNC2_EN;
2251 }
2252
2253 /* Enable/Disable Client interrupts */
2254 /* Turn on here but disable at host controller? */
2255 if (sdstd_card_regwrite(sd, 0, SDIOD_CCCR_INTEN, 1,
2256 (fn_ints | INTR_CTL_MASTER_EN)) != SUCCESS) {
2257 sd_err(("%s: Could not enable ints in CCCR\n", __FUNCTION__));
2258 return ERROR;
2259 }
2260
2261 if (sd_uhsimode != SD3CLKMODE_DISABLED) {
2262 /* Switch to High-speed clocking mode if both host and device support it */
2263 if (sdstd_3_clock_wrapper(sd) != SUCCESS) {
2264 sd_err(("sdstd_3_clock_wrapper failed\n"));
2265 return ERROR;
2266 }
2267 } else
2268 {
2269 if (sdstd_clock_wrapper(sd)) {
2270 sd_err(("sdstd_start_clock failed\n"));
2271 return ERROR;
2272 }
2273 }
2274 sd->card_init_done = TRUE;
2275
2276 return SUCCESS;
2277 }
2278
2279 static int
sdstd_clock_wrapper(sdioh_info_t * sd)2280 sdstd_clock_wrapper(sdioh_info_t *sd)
2281 {
2282 sd_trace(("%s:Enter\n", __FUNCTION__));
2283 /* After configuring for High-Speed mode, set the desired clock rate. */
2284 sdstd_set_highspeed_mode(sd, (bool)sd_hiok);
2285
2286 if (FALSE == sdstd_start_clock(sd, (uint16)sd_divisor)) {
2287 sd_err(("sdstd_start_clock failed\n"));
2288 return ERROR;
2289 }
2290 return SUCCESS;
2291 }
2292
2293 static int
sdstd_3_clock_wrapper(sdioh_info_t * sd)2294 sdstd_3_clock_wrapper(sdioh_info_t *sd)
2295 {
2296 int retclk = 0;
2297 sd_info(("%s: Enter\n", __FUNCTION__));
2298 if (sd->card_UHSI_voltage_Supported) {
2299 /* check if clk config requested is supported by both host and target. */
2300 retclk = sdstd_3_get_matching_uhsi_clkmode(sd, sd_uhsimode);
2301
2302 /* if no match for requested caps, try to get the max match possible */
2303 if (retclk == -1) {
2304 /* if auto enabled */
2305 if (sd3_autoselect_uhsi_max == 1) {
2306 retclk = sdstd_3_get_matching_uhsi_clkmode(sd, SD3CLKMODE_AUTO);
2307 /* still NO match */
2308 if (retclk == -1) {
2309 /* NO match with HC and card capabilities. Now try the
2310 * High speed/legacy mode if possible.
2311 */
2312
2313 sd_err(("%s: Not able to set requested clock\n",
2314 __FUNCTION__));
2315 return ERROR;
2316 }
2317 } else {
2318 /* means user doesn't want auto clock. So return ERROR */
2319 sd_err(("%s: Not able to set requested clock, Try"
2320 "auto mode\n", __FUNCTION__));
2321 return ERROR;
2322 }
2323 }
2324
2325 if (retclk != -1) {
2326 /* set the current clk to be selected clock */
2327 sd_uhsimode = retclk;
2328
2329 if (BCME_OK != sdstd_3_set_highspeed_uhsi_mode(sd, sd_uhsimode)) {
2330 sd_err(("%s: Not able to set requested clock\n", __FUNCTION__));
2331 return ERROR;
2332 }
2333 } else {
2334 /* try legacy mode */
2335 if (SUCCESS != sdstd_clock_wrapper(sd)) {
2336 sd_err(("sdstd_start_clock failed\n"));
2337 return ERROR;
2338 }
2339 }
2340 } else {
2341 sd_info(("%s: Legacy Mode Clock\n", __FUNCTION__));
2342 /* try legacy mode */
2343 if (SUCCESS != sdstd_clock_wrapper(sd)) {
2344 sd_err(("%s sdstd_clock_wrapper failed\n", __FUNCTION__));
2345 return ERROR;
2346 }
2347 }
2348 return SUCCESS;
2349 }
2350
2351 int
sdstd_3_clk_tuning(sdioh_info_t * sd,uint32 sd3ClkMode)2352 sdstd_3_clk_tuning(sdioh_info_t *sd, uint32 sd3ClkMode)
2353 {
2354 int status, lcount = 0, brr_count = 0;
2355 uint16 val1 = 0, bufready = 0;
2356 uint32 val2 = 0;
2357 uint8 phase_info_local = 0;
2358
2359 sd3_trace(("sd3: %s: Enter\n", __FUNCTION__));
2360 /* if (NOT SDR104) OR
2361 * (SDR_50 AND sdr50_tuning_reqd is NOT enabled)
2362 * return success, as tuning not reqd.
2363 */
2364 if (!sd->sd3_tuning_reqd) {
2365 sd_info(("%s: Tuning NOT reqd!\n", __FUNCTION__));
2366 return SUCCESS;
2367 }
2368
2369 /* execute tuning procedure */
2370
2371 /* enable Buffer ready status. [donot enable the interrupt right now] */
2372 /* Execute tuning */
2373 sd_trace(("sd3: %s: Execute tuning\n", __FUNCTION__));
2374 val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
2375 val1 = SFIELD(val1, HOSTCtrl2_EXEC_TUNING, 1);
2376 sdstd_wreg16(sd, SD3_HostCntrl2, val1);
2377
2378 do {
2379 sd3_trace(("sd3: %s: cmd19 issue\n", __FUNCTION__));
2380 /* Issue cmd19 */
2381 if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_19, 0))
2382 != SUCCESS) {
2383 sd_err(("%s: CMD19 failed\n", __FUNCTION__));
2384 val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
2385 val1 = SFIELD(val1, HOSTCtrl2_EXEC_TUNING, 0);
2386 val1 = SFIELD(val1, HOSTCtrl2_SAMPCLK_SEL, 0);
2387 sdstd_wreg16(sd, SD3_HostCntrl2, val1);
2388 return status;
2389 }
2390
2391 /* wait for buffer read ready */
2392 brr_count = 0;
2393 do {
2394 bufready = sdstd_rreg16(sd, SD_IntrStatus);
2395
2396 if (GFIELD(bufready, INTSTAT_BUF_READ_READY))
2397 break;
2398
2399 /* delay after checking bufready becuase INTSTAT_BUF_READ_READY
2400 might have been most likely set already in the first check
2401 */
2402 OSL_DELAY(1);
2403 } while (++brr_count < CLKTUNING_MAX_BRR_RETRIES);
2404
2405 /* buffer read ready timedout */
2406 if (brr_count == CLKTUNING_MAX_BRR_RETRIES) {
2407 sd_err(("%s: TUNINGFAILED: BRR response timedout!\n",
2408 __FUNCTION__));
2409 val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
2410 val1 = SFIELD(val1, HOSTCtrl2_EXEC_TUNING, 0);
2411 val1 = SFIELD(val1, HOSTCtrl2_SAMPCLK_SEL, 0);
2412 sdstd_wreg16(sd, SD3_HostCntrl2, val1);
2413 return ERROR;
2414 }
2415
2416 /* In response to CMD19 card will send 64 magic bytes.
2417 * Current Aizyc HC h/w doesn't auto clear those bytes.
2418 * So read 64 bytes send by card.
2419 * Aizyc need to implement in hw to do an auto clear.
2420 */
2421 if (sd3_sw_read_magic_bytes == TRUE)
2422 {
2423 uint8 l_cnt_1 = 0;
2424 uint32 l_val_1 = 0;
2425 for (l_cnt_1 = 0; l_cnt_1 < 16; l_cnt_1++) {
2426 l_val_1 = sdstd_rreg(sd, SD_BufferDataPort0);
2427 sd_trace(("%s:l_val_1 = 0x%x", __FUNCTION__, l_val_1));
2428 }
2429 BCM_REFERENCE(l_val_1);
2430 }
2431
2432 /* clear BuffReadReady int */
2433 bufready = SFIELD(bufready, INTSTAT_BUF_READ_READY, 1);
2434 sdstd_wreg16(sd, SD_IntrStatus, bufready);
2435
2436 /* wait before continuing */
2437 /* OSL_DELAY(PER_TRY_TUNING_DELAY_MS * 1000); */ /* Not required */
2438
2439 /* check execute tuning bit */
2440 val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
2441 if (!GFIELD(val1, HOSTCtrl2_EXEC_TUNING)) {
2442 /* done tuning, break from loop */
2443 break;
2444 }
2445
2446 /* max tuning iterations exceeded */
2447 if (lcount++ > MAX_TUNING_ITERS) {
2448 sd_err(("%s: TUNINGFAILED: Max tuning iterations"
2449 "exceeded!\n", __FUNCTION__));
2450 val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
2451 val1 = SFIELD(val1, HOSTCtrl2_EXEC_TUNING, 0);
2452 val1 = SFIELD(val1, HOSTCtrl2_SAMPCLK_SEL, 0);
2453 sdstd_wreg16(sd, SD3_HostCntrl2, val1);
2454 return ERROR;
2455 }
2456 } while (1);
2457
2458 val2 = sdstd_rreg(sd, SD3_Tuning_Info_Register);
2459 phase_info_local = ((val2>>15)& 0x7);
2460 sd_info(("Phase passed info: 0x%x\n", (val2>>8)& 0x3F));
2461 sd_info(("Phase selected post tune: 0x%x\n", phase_info_local));
2462
2463 if (phase_info_local > SDSTD_MAX_TUNING_PHASE) {
2464 sd_err(("!!Phase selected:%x\n", phase_info_local));
2465 }
2466
2467 /* check sampling clk select */
2468 val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
2469 if (!GFIELD(val1, HOSTCtrl2_SAMPCLK_SEL)) {
2470 /* error in selecting clk */
2471 sd_err(("%s: TUNINGFAILED: SamplClkSel failed!\n", __FUNCTION__));
2472 val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
2473 val1 = SFIELD(val1, HOSTCtrl2_EXEC_TUNING, 0);
2474 val1 = SFIELD(val1, HOSTCtrl2_SAMPCLK_SEL, 0);
2475 sdstd_wreg16(sd, SD3_HostCntrl2, val1);
2476 return ERROR;
2477 }
2478 /* done: */
2479 sd_info(("%s: TUNING Success!\n", __FUNCTION__));
2480 return SUCCESS;
2481 }
2482
2483 void
sdstd_3_enable_retuning_int(sdioh_info_t * sd)2484 sdstd_3_enable_retuning_int(sdioh_info_t *sd)
2485 {
2486 uint16 raw_int;
2487 unsigned long flags;
2488
2489 sdstd_os_lock_irqsave(sd, &flags);
2490 raw_int = sdstd_rreg16(sd, SD_IntrSignalEnable);
2491 sdstd_wreg16(sd, SD_IntrSignalEnable, (raw_int | HC_INTR_RETUNING));
2492 /* Enable retuning status */
2493 raw_int = sdstd_rreg16(sd, SD_IntrStatusEnable);
2494 sdstd_wreg16(sd, SD_IntrStatusEnable, (raw_int | HC_INTR_RETUNING));
2495 sdstd_os_unlock_irqrestore(sd, &flags);
2496 }
2497
2498 void
sdstd_3_disable_retuning_int(sdioh_info_t * sd)2499 sdstd_3_disable_retuning_int(sdioh_info_t *sd)
2500 {
2501 uint16 raw_int;
2502 unsigned long flags;
2503
2504 sdstd_os_lock_irqsave(sd, &flags);
2505 sd->intmask &= ~HC_INTR_RETUNING;
2506 raw_int = sdstd_rreg16(sd, SD_IntrSignalEnable);
2507 sdstd_wreg16(sd, SD_IntrSignalEnable, (raw_int & (~HC_INTR_RETUNING)));
2508 /* Disable retuning status */
2509 raw_int = sdstd_rreg16(sd, SD_IntrStatusEnable);
2510 sdstd_wreg16(sd, SD_IntrStatusEnable, (raw_int & (~HC_INTR_RETUNING)));
2511 sdstd_os_unlock_irqrestore(sd, &flags);
2512 }
2513
2514 bool
sdstd_3_is_retuning_int_set(sdioh_info_t * sd)2515 sdstd_3_is_retuning_int_set(sdioh_info_t *sd)
2516 {
2517 uint16 raw_int;
2518
2519 raw_int = sdstd_rreg16(sd, SD_IntrStatus);
2520
2521 if (GFIELD(raw_int, INTSTAT_RETUNING_INT))
2522 return TRUE;
2523
2524 return FALSE;
2525 }
2526
2527 /*
2528 Assumption: sd3ClkMode is checked to be present in both host/card
2529 capabilities before entering this function. VALID values for sd3ClkMode
2530 in this function: SD3CLKMODE_2, 3, 4 [0 and 1 NOT supported as
2531 they are legacy] For that, need to call
2532 sdstd_3_get_matching_uhsi_clkmode()
2533 */
2534 static int
sdstd_3_set_highspeed_uhsi_mode(sdioh_info_t * sd,int sd3ClkMode)2535 sdstd_3_set_highspeed_uhsi_mode(sdioh_info_t *sd, int sd3ClkMode)
2536 {
2537 uint32 drvstrn;
2538 int status;
2539 uint8 hc_reg8;
2540 uint16 val1 = 0, presetval = 0;
2541 uint32 regdata;
2542
2543 sd3_trace(("sd3: %s:enter:clkmode:%d\n", __FUNCTION__, sd3ClkMode));
2544
2545 hc_reg8 = sdstd_rreg8(sd, SD_HostCntrl);
2546
2547 if (HOST_SDR_UNSUPP == sd->global_UHSI_Supp) {
2548 sd_err(("%s:Trying to set clk with unsupported global support\n", __FUNCTION__));
2549 return BCME_ERROR;
2550 }
2551
2552 /* get [double check, as this is already done in
2553 sdstd_3_get_matching_uhsi_clkmode] drvstrn
2554 */
2555 if (!sdstd_3_get_matching_drvstrn(sd, sd3ClkMode, &drvstrn, &presetval)) {
2556 sd_err(("%s:DRVStrn mismatch!: card strn:0x%x; HC preset"
2557 "val:0x%x\n", __FUNCTION__, drvstrn, presetval));
2558 return BCME_SDIO_ERROR;
2559 }
2560
2561 /* also set driver type select in CCCR */
2562 if ((status = sdstd_card_regwrite(sd, 0, SDIOD_CCCR_DRIVER_STRENGTH,
2563 1, drvstrn)) != BCME_OK) {
2564 sd_err(("%s:Setting SDIOD_CCCR_DRIVER_STRENGTH in card Failed!\n", __FUNCTION__));
2565 return BCME_SDIO_ERROR;
2566 }
2567
2568 /* ********** change Bus speed select in device */
2569 if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
2570 1, ®data)) != SUCCESS) {
2571 sd_err(("%s:FAILED 1\n", __FUNCTION__));
2572 return BCME_SDIO_ERROR;
2573 }
2574 sd_info(("Attempting to change BSS.current val:0x%x\n", regdata));
2575
2576 if (regdata & SDIO_SPEED_SHS) {
2577 sd_info(("Device supports High-Speed mode.\n"));
2578 /* clear existing BSS */
2579 regdata &= ~0xE;
2580
2581 regdata |= (sd3ClkMode << 1);
2582
2583 sd_info(("Writing %08x to Card at %08x\n",
2584 regdata, SDIOD_CCCR_SPEED_CONTROL));
2585 if ((status = sdstd_card_regwrite(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
2586 1, regdata)) != BCME_OK) {
2587 sd_err(("%s:FAILED 2\n", __FUNCTION__));
2588 return BCME_SDIO_ERROR;
2589 }
2590
2591 if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
2592 1, ®data)) != BCME_OK) {
2593 sd_err(("%s:FAILED 3\n", __FUNCTION__));
2594 return BCME_SDIO_ERROR;
2595 }
2596
2597 sd_info(("Read %08x from Card at %08x\n", regdata, SDIOD_CCCR_SPEED_CONTROL));
2598 }
2599 else {
2600 sd_err(("Device does not support High-Speed Mode.\n"));
2601 }
2602
2603 /* SD Clock Enable = 0 */
2604 sdstd_wreg16(sd, SD_ClockCntrl,
2605 sdstd_rreg16(sd, SD_ClockCntrl) & ~((uint16)0x4));
2606
2607 /* set to HighSpeed mode */
2608 /* TBD: is these to change SD_HostCntrl reqd for UHSI? */
2609 hc_reg8 = SFIELD(hc_reg8, HOST_HI_SPEED_EN, 1);
2610 sdstd_wreg8(sd, SD_HostCntrl, hc_reg8);
2611
2612 /* set UHS Mode select in HC2 and also set preset */
2613 val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
2614 val1 = SFIELD(val1, HOSTCtrl2_UHSMODE_SEL, sd3ClkMode);
2615 if (TRUE != sd3_sw_override1) {
2616 val1 = SFIELD(val1, HOSTCtrl2_PRESVAL_EN, 1);
2617 } else {
2618 /* set hC registers manually using the retreived values */
2619 /* *set drvstrn */
2620 val1 = SFIELD(val1, HOSTCtrl2_DRIVSTRENGTH_SEL,
2621 GFIELD(presetval, PRESET_DRIVR_SELECT));
2622 val1 = SFIELD(val1, HOSTCtrl2_PRESVAL_EN, 0);
2623 }
2624
2625 /* finally write Hcontrol2 */
2626 sdstd_wreg16(sd, SD3_HostCntrl2, val1);
2627
2628 sd_err(("%s:HostCtrl2 final value:0x%x\n", __FUNCTION__, val1));
2629
2630 /* start clock : clk will be enabled inside. */
2631 if (FALSE == sdstd_start_clock(sd, GFIELD(presetval, PRESET_CLK_DIV))) {
2632 sd_err(("sdstd_start_clock failed\n"));
2633 return ERROR;
2634 }
2635
2636 /* execute first tuning procedure */
2637 if (!sd3_sw_override1) {
2638 if (SD3_TUNING_REQD(sd, sd3ClkMode)) {
2639 sd_err(("%s: Tuning start..\n", __FUNCTION__));
2640 sd->sd3_tuning_reqd = TRUE;
2641 /* TBD: first time: enabling INT's could be problem? */
2642 sdstd_3_start_tuning(sd);
2643 }
2644 else
2645 sd->sd3_tuning_reqd = FALSE;
2646 }
2647
2648 return BCME_OK;
2649 }
2650
2651 /* Check & do tuning if required */
sdstd_3_check_and_do_tuning(sdioh_info_t * sd,int tuning_param)2652 void sdstd_3_check_and_do_tuning(sdioh_info_t *sd, int tuning_param)
2653 {
2654 int retries = 0;
2655
2656 if (!sd->sd3_tuning_disable && sd->sd3_tuning_reqd) {
2657 sd3_trace(("sd3: %s: tuning reqd\n", __FUNCTION__));
2658 if (tuning_param == CHECK_TUNING_PRE_DATA) {
2659 if (sd->sd3_tun_state == TUNING_ONGOING) {
2660 retries = RETRIES_SMALL;
2661 /* check if tuning is already going on */
2662 while ((GFIELD(sdstd_rreg(sd, SD3_HostCntrl2),
2663 HOSTCtrl2_EXEC_TUNING)) && retries--) {
2664 if (retries == RETRIES_SMALL)
2665 sd_err(("%s: Waiting for Tuning to complete\n",
2666 __FUNCTION__));
2667 }
2668
2669 if (!retries) {
2670 sd_err(("%s: Tuning wait timeout\n", __FUNCTION__));
2671 if (trap_errs)
2672 ASSERT(0);
2673 }
2674 } else if (sd->sd3_tun_state == TUNING_START) {
2675 /* check and start tuning if required. */
2676 sd3_trace(("sd3 : %s : Doing Tuning before Data Transfer\n",
2677 __FUNCTION__));
2678 sdstd_3_start_tuning(sd);
2679 }
2680 } else if (tuning_param == CHECK_TUNING_POST_DATA) {
2681 if (sd->sd3_tun_state == TUNING_START_AFTER_DAT) {
2682 sd3_trace(("sd3: %s: tuning start\n", __FUNCTION__));
2683 /* check and start tuning if required. */
2684 sdstd_3_start_tuning(sd);
2685 }
2686 }
2687 }
2688 }
2689 /* Need to run this function in interrupt-disabled context */
sdstd_3_check_and_set_retuning(sdioh_info_t * sd)2690 bool sdstd_3_check_and_set_retuning(sdioh_info_t *sd)
2691 {
2692 sd3_trace(("sd3: %s:\n", __FUNCTION__));
2693
2694 /* if already initiated, just return without anything */
2695 if ((sd->sd3_tun_state == TUNING_START) ||
2696 (sd->sd3_tun_state == TUNING_ONGOING) ||
2697 (sd->sd3_tun_state == TUNING_START_AFTER_DAT)) {
2698 /* do nothing */
2699 return FALSE;
2700 }
2701
2702 if (sd->sd3_dat_state == DATA_TRANSFER_IDLE) {
2703 sd->sd3_tun_state = TUNING_START; /* tuning to be started by the tasklet */
2704 return TRUE;
2705 } else {
2706 /* tuning to be started after finishing the existing data transfer */
2707 sd->sd3_tun_state = TUNING_START_AFTER_DAT;
2708 }
2709 return FALSE;
2710 }
2711
sdstd_3_get_data_state(sdioh_info_t * sd)2712 int sdstd_3_get_data_state(sdioh_info_t *sd)
2713 {
2714 return sd->sd3_dat_state;
2715 }
2716
sdstd_3_set_data_state(sdioh_info_t * sd,int state)2717 void sdstd_3_set_data_state(sdioh_info_t *sd, int state)
2718 {
2719 sd->sd3_dat_state = state;
2720 }
2721
sdstd_3_get_tune_state(sdioh_info_t * sd)2722 int sdstd_3_get_tune_state(sdioh_info_t *sd)
2723 {
2724 return sd->sd3_tun_state;
2725 }
2726
sdstd_3_set_tune_state(sdioh_info_t * sd,int state)2727 void sdstd_3_set_tune_state(sdioh_info_t *sd, int state)
2728 {
2729 sd->sd3_tun_state = state;
2730 }
2731
sdstd_3_get_tuning_exp(sdioh_info_t * sd)2732 uint8 sdstd_3_get_tuning_exp(sdioh_info_t *sd)
2733 {
2734 if (sd_tuning_period == CAP3_RETUNING_TC_OTHER) {
2735 return GFIELD(sd->caps3, CAP3_RETUNING_TC);
2736 } else {
2737 return (uint8)sd_tuning_period;
2738 }
2739 }
2740
sdstd_3_get_uhsi_clkmode(sdioh_info_t * sd)2741 uint32 sdstd_3_get_uhsi_clkmode(sdioh_info_t *sd)
2742 {
2743 return sd_uhsimode;
2744 }
2745
2746 /* check, to see if the card supports driver_type corr to the driver_type
2747 in preset value, which will be selected by requested UHSI mode
2748 input:
2749 clk mode: valid values: SD3CLKMODE_2_SDR50, SD3CLKMODE_3_SDR104,
2750 SD3CLKMODE_4_DDR50, SD3CLKMODE_AUTO
2751 outputs:
2752 return_val: TRUE; if a matching drvstrn for the given clkmode is
2753 found in both HC and card. otherwise, FALSE.
2754 [other outputs below valid ONLY if return_val is TRUE]
2755 drvstrn : driver strength read from CCCR.
2756 presetval: value of preset reg, corr to the clkmode.
2757 */
2758 static bool
sdstd_3_get_matching_drvstrn(sdioh_info_t * sd,int sd3_requested_clkmode,uint32 * drvstrn,uint16 * presetval)2759 sdstd_3_get_matching_drvstrn(sdioh_info_t *sd, int sd3_requested_clkmode,
2760 uint32 *drvstrn, uint16 *presetval)
2761 {
2762 int status;
2763 uint8 presetreg;
2764 uint8 cccr_reqd_dtype_mask = 1;
2765
2766 sd3_trace(("sd3: %s:\n", __FUNCTION__));
2767
2768 if (sd3_requested_clkmode != SD3CLKMODE_AUTO) {
2769 /* CARD: get the card driver strength from cccr */
2770 if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_DRIVER_STRENGTH,
2771 1, drvstrn)) != BCME_OK) {
2772 sd_err(("%s:Reading SDIOD_CCCR_DRIVER_STRENGTH from card"
2773 "Failed!\n", __FUNCTION__));
2774 return FALSE;
2775 }
2776 if (TRUE != sd3_sw_override1) {
2777 /* HOSTC: get the addr of preset register indexed by the clkmode */
2778 presetreg = SD3_PresetValStart +
2779 (2*sd3_requested_clkmode + 6);
2780 *presetval = sdstd_rreg16(sd, presetreg);
2781 } else {
2782 /* Note: +3 for mapping between SD3CLKMODE_xxx and presetval_sw_table */
2783 *presetval = presetval_sw_table[sd3_requested_clkmode + 3];
2784 }
2785 sd_err(("%s:reqCLK: %d, presetval: 0x%x\n",
2786 __FUNCTION__, sd3_requested_clkmode, *presetval));
2787
2788 cccr_reqd_dtype_mask <<= GFIELD(*presetval, PRESET_DRIVR_SELECT);
2789
2790 /* compare/match */
2791 if (!(cccr_reqd_dtype_mask & GFIELD(*drvstrn, SDIO_BUS_DRVR_TYPE_CAP))) {
2792 sd_err(("%s:cccr_reqd_dtype_mask and SDIO_BUS_DRVR_TYPE_CAP"
2793 "not matching!:reqd:0x%x, cap:0x%x\n", __FUNCTION__,
2794 cccr_reqd_dtype_mask, GFIELD(*drvstrn, SDIO_BUS_DRVR_TYPE_CAP)));
2795 return FALSE;
2796 } else {
2797 /* check if drive strength override is required. If so, first setit */
2798 if (*dhd_sdiod_uhsi_ds_override != DRVSTRN_IGNORE_CHAR) {
2799 int ds_offset = 0;
2800 uint32 temp = 0;
2801
2802 /* drvstrn to reflect the preset val: this is default */
2803 *drvstrn = GFIELD(*presetval, PRESET_DRIVR_SELECT);
2804
2805 /* now check override */
2806 ds_offset = (((int)DRVSTRN_MAX_CHAR -
2807 (int)(*dhd_sdiod_uhsi_ds_override)));
2808 if ((ds_offset >= 0) && (ds_offset <= MAX_DTS_INDEX)) {
2809 ds_offset = MAX_DTS_INDEX - ds_offset;
2810 sd_err(("%s:Drive strength override: %c, offset: "
2811 "%d, val: %d\n", __FUNCTION__,
2812 *dhd_sdiod_uhsi_ds_override,
2813 ds_offset, DTS_vals[ds_offset]));
2814 temp = SFIELD(*drvstrn, SDIO_BUS_DRVR_TYPE_SEL,
2815 DTS_vals[ds_offset]);
2816 sd_err(("%s:DrvStrn orig: 0x%x, modif: 0x%x\n",
2817 __FUNCTION__, *drvstrn, temp));
2818 *drvstrn = temp;
2819 } else {
2820 /* else case is default: use preset val */
2821 sd_err(("%s:override invalid: DrvStrn is from "
2822 "preset: 0x%x\n",
2823 __FUNCTION__, *drvstrn));
2824 }
2825 } else {
2826 sd_err(("%s:DrvStrn is from preset: 0x%x\n",
2827 __FUNCTION__, *drvstrn));
2828 }
2829 }
2830 } else {
2831 /* TBD check for sd3_requested_clkmode : -1 also. */
2832 sd_err(("%s: Automode not supported!\n", __FUNCTION__));
2833 return FALSE;
2834 }
2835 return TRUE;
2836 }
2837
2838 /* Returns a matching UHSI clk speed is found. If not, returns -1.
2839 Also, if sd3_requested_clkmode is -1, finds the closest max match clk and returns.
2840 */
2841 static int
sdstd_3_get_matching_uhsi_clkmode(sdioh_info_t * sd,int sd3_requested_clkmode)2842 sdstd_3_get_matching_uhsi_clkmode(sdioh_info_t *sd, int sd3_requested_clkmode)
2843 {
2844 uint32 card_val_uhsisupp;
2845 uint8 speedmask = 1;
2846 uint32 drvstrn;
2847 uint16 presetval;
2848 int status;
2849
2850 sd3_trace(("sd3: %s:\n", __FUNCTION__));
2851
2852 sd->global_UHSI_Supp = HOST_SDR_UNSUPP;
2853
2854 /* for legacy/25MHz/50MHz bus speeds, no checks done here */
2855 if ((sd3_requested_clkmode == SD3CLKMODE_0_SDR12) ||
2856 (sd3_requested_clkmode == SD3CLKMODE_1_SDR25)) {
2857 sd->global_UHSI_Supp = HOST_SDR_12_25;
2858 return sd3_requested_clkmode;
2859 }
2860 /* get cap of card */
2861 if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_UHSI_SUPPORT,
2862 1, &card_val_uhsisupp)) != BCME_OK) {
2863 sd_err(("%s:SDIOD_CCCR_UHSI_SUPPORT query failed!\n", __FUNCTION__));
2864 return -1;
2865 }
2866 sd_info(("%s:Read %08x from Card at %08x\n", __FUNCTION__,
2867 card_val_uhsisupp, SDIOD_CCCR_UHSI_SUPPORT));
2868
2869 if (sd3_requested_clkmode != SD3CLKMODE_AUTO) {
2870 /* Note: it is assumed that, following are executed when (sd3ClkMode >= 2) */
2871 speedmask <<= (sd3_requested_clkmode - SD3CLKMODE_2_SDR50);
2872
2873 /* check first about 3.0 HS CLK modes */
2874 if (!(GFIELD(sd->caps3, CAP3_30CLKCAP) & speedmask)) {
2875 sd_err(("%s:HC does not support req 3.0 UHSI mode."
2876 "requested:%d; capable:0x%x\n", __FUNCTION__,
2877 sd3_requested_clkmode, GFIELD(sd->caps3, CAP3_30CLKCAP)));
2878 return -1;
2879 }
2880
2881 /* check first about 3.0 CARD CLK modes */
2882 if (!(GFIELD(card_val_uhsisupp, SDIO_BUS_SPEED_UHSICAP) & speedmask)) {
2883 sd_err(("%s:Card does not support req 3.0 UHSI mode. requested:%d;"
2884 "capable:0x%x\n", __FUNCTION__, sd3_requested_clkmode,
2885 GFIELD(card_val_uhsisupp, SDIO_BUS_SPEED_UHSICAP)));
2886 return -1;
2887 }
2888
2889 /* check, to see if the card supports driver_type corr to the
2890 driver_type in preset value, which will be selected by
2891 requested UHSI mode
2892 */
2893 if (!sdstd_3_get_matching_drvstrn(sd, sd3_requested_clkmode,
2894 &drvstrn, &presetval)) {
2895 sd_err(("%s:DRVStrn mismatch!: card strn:0x%x; HC preset"
2896 "val:0x%x\n", __FUNCTION__, drvstrn, presetval));
2897 return -1;
2898 }
2899 /* success path. change the support variable accordingly */
2900 sd->global_UHSI_Supp = HOST_SDR_50_104_DDR;
2901 return sd3_requested_clkmode;
2902 } else {
2903 /* auto clk selection: get the highest clock capable by both card and HC */
2904 /* TBD TOBE DONE */
2905 /* sd->global_UHSI_Supp = TRUE; on success */
2906 return -1;
2907 }
2908 }
2909
2910 static int
sdstd_3_sigvoltswitch_proc(sdioh_info_t * sd)2911 sdstd_3_sigvoltswitch_proc(sdioh_info_t *sd)
2912 {
2913 int status;
2914 uint32 cmd_rsp = 0, presst;
2915 uint16 val1 = 0;
2916
2917 sd3_trace(("sd3: %s:\n", __FUNCTION__));
2918
2919 /* Issue cmd11 */
2920 if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_11, 0))
2921 != SUCCESS) {
2922 sd_err(("%s: CMD11 failed\n", __FUNCTION__));
2923 return status;
2924 }
2925
2926 /* check response */
2927 sdstd_cmd_getrsp(sd, &cmd_rsp, 1);
2928 if (
2929 GFIELD(cmd_rsp, RSP1_ERROR) || /* bit 19 */
2930 GFIELD(cmd_rsp, RSP1_ILLEGAL_CMD) || /* bit 22 */
2931 GFIELD(cmd_rsp, RSP1_COM_CRC_ERROR) || /* bit 23 */
2932 GFIELD(cmd_rsp, RSP1_CARD_LOCKED) /* bit 25 */ ) {
2933 sd_err(("%s: FAIL:CMD11: cmd_resp:0x%x\n", __FUNCTION__, cmd_rsp));
2934 return ERROR;
2935 }
2936
2937 /* SD Clock Enable = 0 */
2938 sdstd_wreg16(sd, SD_ClockCntrl,
2939 sdstd_rreg16(sd, SD_ClockCntrl) & ~((uint16)0x4));
2940
2941 /* check DAT[3..0] using Present State Reg. If not 0, error */
2942 presst = sdstd_rreg(sd, SD_PresentState);
2943 if (0 != GFIELD(presst, PRES_DAT_SIGNAL)) {
2944 sd_err(("%s: FAIL: PRESTT:0x%x\n", __FUNCTION__, presst));
2945 return ERROR;
2946 }
2947
2948 /* turn 1.8V sig enable in HC2 */
2949 val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
2950 val1 = SFIELD(val1, HOSTCtrl2_1_8SIG_EN, 1);
2951 sdstd_wreg16(sd, SD3_HostCntrl2, val1);
2952
2953 #ifdef BCMQT
2954 /* wait 10s for Zebu */
2955 OSL_DELAY(10 * 1000 * 1000);
2956 #else
2957 /* wait 5ms */
2958 OSL_DELAY(5 * 1000);
2959 #endif /* BCMQT */
2960
2961 /* check 1.8V sig enable in HC2. if cleared, error */
2962 val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
2963 if (!GFIELD(val1, HOSTCtrl2_1_8SIG_EN)) {
2964 sd_err(("%s: FAIL: HC2:1.8V_En:0x%x\n", __FUNCTION__, val1));
2965 return ERROR;
2966 }
2967
2968 /* SD Clock Enable = 1 */
2969 val1 = sdstd_rreg16(sd, SD_ClockCntrl);
2970 sdstd_wreg16(sd, SD_ClockCntrl, val1 | 0x4);
2971
2972 #ifdef BCMQT
2973 /* wait 5s for Zebu */
2974 OSL_DELAY(5 * 1000 * 1000);
2975 #else
2976 /* wait 1ms */
2977 OSL_DELAY(1 * 1000);
2978 #endif /* BCMQT */
2979
2980 /* check DAT[3..0] using Present State Reg. If not 0b1111, error */
2981 presst = sdstd_rreg(sd, SD_PresentState);
2982 if (0xf != GFIELD(presst, PRES_DAT_SIGNAL)) {
2983 sd_err(("%s: FAIL: PRESTT_FINAL:0x%x\n", __FUNCTION__, presst));
2984 return ERROR;
2985 }
2986
2987 return (SUCCESS);
2988 }
2989
2990 static int
sdstd_set_highspeed_mode(sdioh_info_t * sd,bool HSMode)2991 sdstd_set_highspeed_mode(sdioh_info_t *sd, bool HSMode)
2992 {
2993 uint32 regdata;
2994 int status;
2995 uint8 reg8;
2996
2997 uint32 drvstrn;
2998
2999 reg8 = sdstd_rreg8(sd, SD_HostCntrl);
3000
3001 if (HSMode == TRUE) {
3002 if (sd_hiok && (GFIELD(sd->caps, CAP_HIGHSPEED)) == 0) {
3003 sd_err(("Host Controller does not support hi-speed mode.\n"));
3004 return BCME_ERROR;
3005 }
3006
3007 sd_info(("Attempting to enable High-Speed mode.\n"));
3008
3009 if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
3010 1, ®data)) != SUCCESS) {
3011 return BCME_SDIO_ERROR;
3012 }
3013 if (regdata & SDIO_SPEED_SHS) {
3014 sd_info(("Device supports High-Speed mode.\n"));
3015
3016 regdata |= SDIO_SPEED_EHS;
3017
3018 sd_info(("Writing %08x to Card at %08x\n",
3019 regdata, SDIOD_CCCR_SPEED_CONTROL));
3020 if ((status = sdstd_card_regwrite(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
3021 1, regdata)) != BCME_OK) {
3022 return BCME_SDIO_ERROR;
3023 }
3024
3025 if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
3026 1, ®data)) != BCME_OK) {
3027 return BCME_SDIO_ERROR;
3028 }
3029
3030 sd_info(("Read %08x to Card at %08x\n", regdata, SDIOD_CCCR_SPEED_CONTROL));
3031
3032 reg8 = SFIELD(reg8, HOST_HI_SPEED_EN, 1);
3033
3034 sd_err(("High-speed clocking mode enabled.\n"));
3035 }
3036 else {
3037 sd_err(("Device does not support High-Speed Mode.\n"));
3038 reg8 = SFIELD(reg8, HOST_HI_SPEED_EN, 0);
3039 }
3040 } else {
3041 /* Force off device bit */
3042 if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
3043 1, ®data)) != BCME_OK) {
3044 return status;
3045 }
3046 if (regdata & SDIO_SPEED_EHS) {
3047 regdata &= ~SDIO_SPEED_EHS;
3048 if ((status = sdstd_card_regwrite(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
3049 1, regdata)) != BCME_OK) {
3050 return status;
3051 }
3052 }
3053
3054 sd_err(("High-speed clocking mode disabled.\n"));
3055 reg8 = SFIELD(reg8, HOST_HI_SPEED_EN, 0);
3056 }
3057
3058 if ((sd->host_UHSISupported) && (sd->card_UHSI_voltage_Supported)) {
3059 /* also set the default driver strength in the card/HC [this is reqd because,
3060 if earlier we selected any other drv_strn, we need to reset it]
3061 */
3062 /* get the card driver strength from cccr */
3063 if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_DRIVER_STRENGTH,
3064 1, &drvstrn)) != BCME_OK) {
3065 sd_err(("%s:Reading SDIOD_CCCR_DRIVER_STRENGTH from card"
3066 "Failed!\n", __FUNCTION__));
3067 return BCME_SDIO_ERROR;
3068 }
3069
3070 /* reset card drv strn */
3071 drvstrn = SFIELD(drvstrn, SDIO_BUS_DRVR_TYPE_SEL, 0);
3072
3073 /* set card drv strn */
3074 if ((status = sdstd_card_regwrite(sd, 0, SDIOD_CCCR_DRIVER_STRENGTH,
3075 1, drvstrn)) != BCME_OK) {
3076 sd_err(("%s:Setting SDIOD_CCCR_DRIVER_STRENGTH in"
3077 "card Failed!\n", __FUNCTION__));
3078 return BCME_SDIO_ERROR;
3079 }
3080 }
3081
3082 sdstd_wreg8(sd, SD_HostCntrl, reg8);
3083
3084 return BCME_OK;
3085 }
3086
3087 /* Select DMA Mode:
3088 * If dma_mode == DMA_MODE_AUTO, pick the "best" mode.
3089 * Otherwise, pick the selected mode if supported.
3090 * If not supported, use PIO mode.
3091 */
3092 static int
sdstd_set_dma_mode(sdioh_info_t * sd,int8 dma_mode)3093 sdstd_set_dma_mode(sdioh_info_t *sd, int8 dma_mode)
3094 {
3095 uint8 reg8, dma_sel_bits = SDIOH_SDMA_MODE;
3096 int8 prev_dma_mode = sd->sd_dma_mode;
3097
3098 switch (prev_dma_mode) {
3099 case DMA_MODE_AUTO:
3100 sd_dma(("%s: Selecting best DMA mode supported by controller.\n",
3101 __FUNCTION__));
3102 if (GFIELD(sd->caps, CAP_ADMA2)) {
3103 sd->sd_dma_mode = DMA_MODE_ADMA2;
3104 dma_sel_bits = SDIOH_ADMA2_MODE;
3105 } else if (GFIELD(sd->caps, CAP_ADMA1)) {
3106 sd->sd_dma_mode = DMA_MODE_ADMA1;
3107 dma_sel_bits = SDIOH_ADMA1_MODE;
3108 } else if (GFIELD(sd->caps, CAP_DMA)) {
3109 sd->sd_dma_mode = DMA_MODE_SDMA;
3110 } else {
3111 sd->sd_dma_mode = DMA_MODE_NONE;
3112 }
3113 break;
3114 case DMA_MODE_NONE:
3115 sd->sd_dma_mode = DMA_MODE_NONE;
3116 break;
3117 case DMA_MODE_SDMA:
3118 if (GFIELD(sd->caps, CAP_DMA)) {
3119 sd->sd_dma_mode = DMA_MODE_SDMA;
3120 } else {
3121 sd_err(("%s: SDMA not supported by controller.\n", __FUNCTION__));
3122 sd->sd_dma_mode = DMA_MODE_NONE;
3123 }
3124 break;
3125 case DMA_MODE_ADMA1:
3126 if (GFIELD(sd->caps, CAP_ADMA1)) {
3127 sd->sd_dma_mode = DMA_MODE_ADMA1;
3128 dma_sel_bits = SDIOH_ADMA1_MODE;
3129 } else {
3130 sd_err(("%s: ADMA1 not supported by controller.\n", __FUNCTION__));
3131 sd->sd_dma_mode = DMA_MODE_NONE;
3132 }
3133 break;
3134 case DMA_MODE_ADMA2:
3135 if (GFIELD(sd->caps, CAP_ADMA2)) {
3136 sd->sd_dma_mode = DMA_MODE_ADMA2;
3137 dma_sel_bits = SDIOH_ADMA2_MODE;
3138 } else {
3139 sd_err(("%s: ADMA2 not supported by controller.\n", __FUNCTION__));
3140 sd->sd_dma_mode = DMA_MODE_NONE;
3141 }
3142 break;
3143 case DMA_MODE_ADMA2_64:
3144 sd_err(("%s: 64b ADMA2 not supported by driver.\n", __FUNCTION__));
3145 sd->sd_dma_mode = DMA_MODE_NONE;
3146 break;
3147 default:
3148 sd_err(("%s: Unsupported DMA Mode %d requested.\n", __FUNCTION__,
3149 prev_dma_mode));
3150 sd->sd_dma_mode = DMA_MODE_NONE;
3151 break;
3152 }
3153
3154 /* clear SysAddr, only used for SDMA */
3155 sdstd_wreg(sd, SD_SysAddr, 0);
3156
3157 sd_err(("%s: %s mode selected.\n", __FUNCTION__, dma_mode_description[sd->sd_dma_mode]));
3158
3159 reg8 = sdstd_rreg8(sd, SD_HostCntrl);
3160 reg8 = SFIELD(reg8, HOST_DMA_SEL, dma_sel_bits);
3161 sdstd_wreg8(sd, SD_HostCntrl, reg8);
3162 sd_dma(("%s: SD_HostCntrl=0x%02x\n", __FUNCTION__, reg8));
3163
3164 return BCME_OK;
3165 }
3166
3167 bool
sdstd_start_clock(sdioh_info_t * sd,uint16 new_sd_divisor)3168 sdstd_start_clock(sdioh_info_t *sd, uint16 new_sd_divisor)
3169 {
3170 uint rc, count;
3171 uint16 divisor;
3172 uint16 regdata;
3173 uint16 val1;
3174
3175 sd3_trace(("%s: starting clk\n", __FUNCTION__));
3176 /* turn off HC clock */
3177 sdstd_wreg16(sd, SD_ClockCntrl,
3178 sdstd_rreg16(sd, SD_ClockCntrl) & ~((uint16)0x4)); /* Disable the HC clock */
3179
3180 /* Set divisor */
3181 if (sd->host_UHSISupported) {
3182 divisor = (new_sd_divisor >> 1);
3183 } else
3184 {
3185 /* new logic: if divisor > 256, restrict to 256 */
3186 if (new_sd_divisor > 256)
3187 new_sd_divisor = 256;
3188 divisor = (new_sd_divisor >> 1) << 8;
3189 }
3190
3191 sd_info(("Clock control is 0x%x\n", sdstd_rreg16(sd, SD_ClockCntrl)));
3192 if (sd->host_UHSISupported) {
3193 /* *get preset value and shift so that.
3194 * bits 0-7 are in 15-8 and 9-8 are in 7-6 of clkctrl
3195 */
3196 val1 = divisor << 2;
3197 val1 &= 0x0ffc;
3198 val1 |= divisor >> 8;
3199 val1 <<= 6;
3200 printf("divisor:%x;val1:%x\n", divisor, val1);
3201 sdstd_mod_reg16(sd, SD_ClockCntrl, 0xffC0, val1);
3202 } else
3203 {
3204 sdstd_mod_reg16(sd, SD_ClockCntrl, 0xff00, divisor);
3205 }
3206
3207 sd_err(("%s: Using clock divisor of %d (regval 0x%04x)\n", __FUNCTION__,
3208 new_sd_divisor, divisor));
3209 if (new_sd_divisor > 0)
3210 sd_err(("%s:now, divided clk is: %d Hz\n",
3211 __FUNCTION__, GFIELD(sd->caps, CAP_BASECLK)*1000000/new_sd_divisor));
3212 else
3213 sd_err(("Using Primary Clock Freq of %d MHz\n", GFIELD(sd->caps, CAP_BASECLK)));
3214 sd_info(("Primary Clock Freq = %d MHz\n", GFIELD(sd->caps, CAP_BASECLK)));
3215 if (GFIELD(sd->caps, CAP_TO_CLKFREQ) == 50) {
3216 sd_info(("%s: Resulting SDIO clock is %d %s\n", __FUNCTION__,
3217 ((50 % new_sd_divisor) ? (50000 / new_sd_divisor) : (50 / new_sd_divisor)),
3218 ((50 % new_sd_divisor) ? "KHz" : "MHz")));
3219 } else if (GFIELD(sd->caps, CAP_TO_CLKFREQ) == 48) {
3220 sd_info(("%s: Resulting SDIO clock is %d %s\n", __FUNCTION__,
3221 ((48 % new_sd_divisor) ? (48000 / new_sd_divisor) : (48 / new_sd_divisor)),
3222 ((48 % new_sd_divisor) ? "KHz" : "MHz")));
3223 } else if (GFIELD(sd->caps, CAP_TO_CLKFREQ) == 33) {
3224 sd_info(("%s: Resulting SDIO clock is %d %s\n", __FUNCTION__,
3225 ((33 % new_sd_divisor) ? (33000 / new_sd_divisor) : (33 / new_sd_divisor)),
3226 ((33 % new_sd_divisor) ? "KHz" : "MHz")));
3227 } else if (GFIELD(sd->caps, CAP_TO_CLKFREQ) == 31) {
3228 sd_info(("%s: Resulting SDIO clock is %d %s\n", __FUNCTION__,
3229 ((31 % new_sd_divisor) ? (31000 / new_sd_divisor) : (31 / new_sd_divisor)),
3230 ((31 % new_sd_divisor) ? "KHz" : "MHz")));
3231 } else if (GFIELD(sd->caps, CAP_TO_CLKFREQ) == 8) {
3232 sd_info(("%s: Resulting SDIO clock is %d %s\n", __FUNCTION__,
3233 ((8 % new_sd_divisor) ? (8000 / new_sd_divisor) : (8 / new_sd_divisor)),
3234 ((8 % new_sd_divisor) ? "KHz" : "MHz")));
3235 } else if (sd->controller_type == SDIOH_TYPE_BCM27XX) {
3236 } else {
3237 sd_err(("Need to determine divisor for %d MHz clocks\n",
3238 GFIELD(sd->caps, CAP_BASECLK)));
3239 sd_err(("Consult SD Host Controller Spec: Clock Control Register\n"));
3240 return (FALSE);
3241 }
3242
3243 sdstd_or_reg16(sd, SD_ClockCntrl, 0x1); /* Enable the clock */
3244
3245 /* Wait for clock to stabilize */
3246 rc = (sdstd_rreg16(sd, SD_ClockCntrl) & 2);
3247 count = 0;
3248 while (!rc) {
3249 OSL_DELAY(1);
3250 sd_info(("Waiting for clock to become stable 0x%x\n", rc));
3251 rc = (sdstd_rreg16(sd, SD_ClockCntrl) & 2);
3252 count++;
3253 if (count > 10000) {
3254 sd_err(("%s:Clocks failed to stabilize after %u attempts\n",
3255 __FUNCTION__, count));
3256 return (FALSE);
3257 }
3258 }
3259 /* Turn on clock */
3260 sdstd_or_reg16(sd, SD_ClockCntrl, 0x4);
3261
3262 OSL_DELAY(20);
3263
3264 /* Set timeout control (adjust default value based on divisor).
3265 * Disabling timeout interrupts during setting is advised by host spec.
3266 */
3267 #ifdef BCMQT
3268 if (GFIELD(sd->caps, CAP_BASECLK) < 50)
3269 #endif // endif
3270 {
3271 uint toval;
3272
3273 toval = sd_toctl;
3274 divisor = new_sd_divisor;
3275
3276 while (toval && !(divisor & 1)) {
3277 toval -= 1;
3278 divisor >>= 1;
3279 }
3280
3281 regdata = sdstd_rreg16(sd, SD_ErrorIntrStatusEnable);
3282 sdstd_wreg16(sd, SD_ErrorIntrStatusEnable, (regdata & ~ERRINT_DATA_TIMEOUT_BIT));
3283 sdstd_wreg8(sd, SD_TimeoutCntrl, (uint8)toval);
3284 sdstd_wreg16(sd, SD_ErrorIntrStatusEnable, regdata);
3285 }
3286 #ifdef BCMQT
3287 else {
3288 sd_info(("%s: REsetting err int control\n", __FUNCTION__));
3289 regdata = sdstd_rreg16(sd, SD_ErrorIntrStatusEnable);
3290 sdstd_wreg16(sd, SD_ErrorIntrStatusEnable, (regdata & ~ERRINT_DATA_TIMEOUT_BIT));
3291 }
3292 #endif // endif
3293 OSL_DELAY(2);
3294
3295 sd_info(("Final Clock control is 0x%x\n", sdstd_rreg16(sd, SD_ClockCntrl)));
3296
3297 return TRUE;
3298 }
3299
3300 uint16
sdstd_start_power(sdioh_info_t * sd,int volts_req)3301 sdstd_start_power(sdioh_info_t *sd, int volts_req)
3302 {
3303 char *s;
3304 uint32 cmd_arg;
3305 uint32 cmd_rsp;
3306 uint8 pwr = 0;
3307 int volts = 0;
3308 uint16 val1;
3309 uint16 init_divider = 0;
3310 uint8 baseclk = 0;
3311 bool selhighest = (volts_req == 0) ? TRUE : FALSE;
3312
3313 /* reset the card uhsi volt support to false */
3314 sd->card_UHSI_voltage_Supported = FALSE;
3315
3316 /* Ensure a power on reset by turning off bus power in case it happened to
3317 * be on already. (This might happen if driver doesn't unload/clean up correctly,
3318 * crash, etc.) Leave off for 100ms to make sure the power off isn't
3319 * ignored/filtered by the device. Note we can't skip this step if the power is
3320 * off already since we don't know how long it has been off before starting
3321 * the driver.
3322 */
3323 sdstd_wreg8(sd, SD_PwrCntrl, 0);
3324 sd_info(("Turning off VDD/bus power briefly (100ms) to ensure reset\n"));
3325 OSL_DELAY(100000);
3326
3327 /* For selecting highest available voltage, start from lowest and iterate */
3328 if (!volts_req)
3329 volts_req = 1;
3330
3331 s = NULL;
3332
3333 if (volts_req == 1) {
3334 if (GFIELD(sd->caps, CAP_VOLT_1_8)) {
3335 volts = 5;
3336 s = "1.8";
3337 if (FALSE == selhighest)
3338 goto voltsel;
3339 else
3340 volts_req++;
3341 } else {
3342 sd_err(("HC doesn't support voltage! trying higher voltage: %d\n", volts));
3343 volts_req++;
3344 }
3345 }
3346
3347 if (volts_req == 2) {
3348 if (GFIELD(sd->caps, CAP_VOLT_3_0)) {
3349 volts = 6;
3350 s = "3.0";
3351 if (FALSE == selhighest)
3352 goto voltsel;
3353 else volts_req++;
3354 } else {
3355 sd_err(("HC doesn't support voltage! trying higher voltage: %d\n", volts));
3356 volts_req++;
3357 }
3358 }
3359
3360 if (volts_req == 3) {
3361 if (GFIELD(sd->caps, CAP_VOLT_3_3)) {
3362 volts = 7;
3363 s = "3.3";
3364 } else {
3365 if ((FALSE == selhighest) || (volts == 0)) {
3366 sd_err(("HC doesn't support any voltage! error!\n"));
3367 return FALSE;
3368 }
3369 }
3370 }
3371
3372 voltsel:
3373 pwr = SFIELD(pwr, PWR_VOLTS, volts);
3374 pwr = SFIELD(pwr, PWR_BUS_EN, 1);
3375 sdstd_wreg8(sd, SD_PwrCntrl, pwr); /* Set Voltage level */
3376 sd_info(("Setting Bus Power to %s Volts\n", s));
3377 BCM_REFERENCE(s);
3378
3379 if ((sd->version == HOST_CONTR_VER_3) && (volts == 5)) {
3380 val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
3381 val1 = SFIELD(val1, HOSTCtrl2_1_8SIG_EN, 1);
3382 sdstd_wreg16(sd, SD3_HostCntrl2, val1);
3383 }
3384
3385 /* Wait for 500ms for power to stabilize. Some designs have reset IC's
3386 * which can hold reset low for close to 300ms. In addition there can
3387 * be ramp time for VDD and/or VDDIO which might be provided from a LDO.
3388 * For these reasons we need a pretty conservative delay here to have
3389 * predictable reset behavior in the face of an unknown design.
3390 */
3391 OSL_DELAY(500000);
3392
3393 baseclk = GFIELD(sd->caps, CAP_BASECLK);
3394 sd_info(("%s:baseclk: %d MHz\n", __FUNCTION__, baseclk));
3395 /* for 3.0, find divisor */
3396 if (sd->host_UHSISupported) {
3397 /* ToDo : Dynamic modification of preset value table based on base clk */
3398 sd3_trace(("sd3: %s: checking divisor\n", __FUNCTION__));
3399 if (GFIELD(sd->caps3, CAP3_CLK_MULT) != 0) {
3400 sd_err(("%s:Possible error: CLK Mul 1 CLOCKING NOT supported!\n",
3401 __FUNCTION__));
3402 return FALSE;
3403 } else {
3404 /* calculate dividor, which leads to 400KHz. */
3405 init_divider = baseclk*10/4; /* baseclk*1000000/(400000); */
3406 /* make it a multiple of 2. */
3407 init_divider += (init_divider & 0x1);
3408 sd_err(("%s:divider used for init:%d\n",
3409 __FUNCTION__, init_divider));
3410 }
3411 } else {
3412 /* Note: sd_divisor assumes that SDIO Base CLK is 50MHz. */
3413 int final_freq_based_on_div = 50/sd_divisor;
3414 if (baseclk > 50)
3415 sd_divisor = baseclk/final_freq_based_on_div;
3416 /* TBD: merge both SDIO 2.0 and 3.0 to share same divider logic */
3417 init_divider = baseclk*10/4; /* baseclk*1000000/(400000); */
3418 /* find next power of 2 */
3419 NEXT_POW2(init_divider);
3420 sd_err(("%s:NONUHSI: divider used for init:%d\n",
3421 __FUNCTION__, init_divider));
3422 }
3423
3424 /* Start at ~400KHz clock rate for initialization */
3425 if (!sdstd_start_clock(sd, init_divider)) {
3426 sd_err(("%s: sdstd_start_clock failed\n", __FUNCTION__));
3427 return FALSE;
3428 }
3429
3430 /* Get the Card's Operation Condition. Occasionally the board
3431 * takes a while to become ready
3432 */
3433 cmd_arg = 0;
3434 cmd_rsp = 0;
3435 if (get_ocr(sd, &cmd_arg, &cmd_rsp) != SUCCESS) {
3436 sd_err(("%s: Failed to get OCR bailing\n", __FUNCTION__));
3437 /* No need to reset as not sure in what state the card is. */
3438 return SDIO_OCR_READ_FAIL;
3439 }
3440
3441 sd_info(("cmd_rsp = 0x%x\n", cmd_rsp));
3442 sd_info(("mem_present = %d\n", GFIELD(cmd_rsp, RSP4_MEM_PRESENT)));
3443 sd_info(("num_funcs = %d\n", GFIELD(cmd_rsp, RSP4_NUM_FUNCS)));
3444 sd_info(("card_ready = %d\n", GFIELD(cmd_rsp, RSP4_CARD_READY)));
3445 sd_info(("OCR = 0x%x\n", GFIELD(cmd_rsp, RSP4_IO_OCR)));
3446
3447 /* Verify that the card supports I/O mode */
3448 if (GFIELD(cmd_rsp, RSP4_NUM_FUNCS) == 0) {
3449 sd_err(("%s: Card does not support I/O\n", __FUNCTION__));
3450 return ERROR;
3451 }
3452 sd->num_funcs = GFIELD(cmd_rsp, RSP4_NUM_FUNCS);
3453
3454 /* Examine voltage: Arasan only supports 3.3 volts,
3455 * so look for 3.2-3.3 Volts and also 3.3-3.4 volts.
3456 */
3457
3458 if ((GFIELD(cmd_rsp, RSP4_IO_OCR) & (0x3 << 20)) == 0) {
3459 sd_err(("This client does not support 3.3 volts!\n"));
3460 return ERROR;
3461 }
3462 sd_info(("Leaving bus power at 3.3 Volts\n"));
3463
3464 cmd_arg = SFIELD(0, CMD5_OCR, 0xfff000);
3465 /* if HC uhsi supported and card voltage set is 3.3V then switch to 1.8V */
3466 if ((sd->host_UHSISupported) && (volts == 5)) {
3467 /* set S18R also */
3468 cmd_arg = SFIELD(cmd_arg, CMD5_S18R, 1);
3469 }
3470 cmd_rsp = 0;
3471 get_ocr(sd, &cmd_arg, &cmd_rsp);
3472 sd_info(("OCR = 0x%x\n", GFIELD(cmd_rsp, RSP4_IO_OCR)));
3473
3474 if ((sd->host_UHSISupported)) {
3475 /* card responded with s18A => card supports sdio3.0,do tuning proc */
3476 if (GFIELD(cmd_rsp, RSP4_S18A) == 1) {
3477 if (sdstd_3_sigvoltswitch_proc(sd)) {
3478 /* continue with legacy way of working */
3479 sd_err(("%s: voltage switch not done. error, stopping\n",
3480 __FUNCTION__));
3481 /* How to gracefully proceced here? */
3482 return FALSE;
3483 } else {
3484 sd->card_UHSI_voltage_Supported = TRUE;
3485 sd_err(("%s: voltage switch SUCCESS!\n", __FUNCTION__));
3486 }
3487 } else {
3488 /* This could happen for 2 cases.
3489 * 1) means card is NOT sdio3.0 . Note that
3490 * card_UHSI_voltage_Supported is already false.
3491 * 2) card is sdio3.0 but it is already in 1.8V.
3492 * But now, how to change host controller's voltage?
3493 * In this case we need to do the following.
3494 * sd->card_UHSI_voltage_Supported = TRUE;
3495 * turn 1.8V sig enable in HC2
3496 * val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
3497 * val1 = SFIELD(val1, HOSTCtrl2_1_8SIG_EN, 1);
3498 * sdstd_wreg16(sd, SD3_HostCntrl2, val1);
3499 */
3500 sd_info(("%s: Not sdio3.0: host_UHSISupported: %d; HC volts=%d\n",
3501 __FUNCTION__, sd->host_UHSISupported, volts));
3502 }
3503 } else {
3504 sd_info(("%s: Legacy [non sdio3.0] HC\n", __FUNCTION__));
3505 }
3506
3507 return TRUE;
3508 }
3509
3510 bool
sdstd_bus_width(sdioh_info_t * sd,int new_mode)3511 sdstd_bus_width(sdioh_info_t *sd, int new_mode)
3512 {
3513 uint32 regdata;
3514 int status;
3515 uint8 reg8;
3516
3517 sd_trace(("%s\n", __FUNCTION__));
3518 if (sd->sd_mode == new_mode) {
3519 sd_info(("%s: Already at width %d\n", __FUNCTION__, new_mode));
3520 /* Could exit, but continue just in case... */
3521 }
3522
3523 /* Set client side via reg 0x7 in CCCR */
3524 if ((status = sdstd_card_regread (sd, 0, SDIOD_CCCR_BICTRL, 1, ®data)) != SUCCESS)
3525 return (bool)status;
3526 regdata &= ~BUS_SD_DATA_WIDTH_MASK;
3527 if (new_mode == SDIOH_MODE_SD4) {
3528 sd_info(("Changing to SD4 Mode\n"));
3529 regdata |= SD4_MODE;
3530 } else if (new_mode == SDIOH_MODE_SD1) {
3531 sd_info(("Changing to SD1 Mode\n"));
3532 } else {
3533 sd_err(("SPI Mode not supported by Standard Host Controller\n"));
3534 }
3535
3536 if ((status = sdstd_card_regwrite (sd, 0, SDIOD_CCCR_BICTRL, 1, regdata)) != SUCCESS)
3537 return (bool)status;
3538
3539 if (sd->host_UHSISupported) {
3540 uint32 card_asyncint = 0;
3541 uint16 host_asyncint = 0;
3542
3543 if ((status = sdstd_card_regread (sd, 0, SDIOD_CCCR_INTR_EXTN, 1,
3544 &card_asyncint)) != SUCCESS) {
3545 sd_err(("%s:INTR EXT getting failed!, ignoring\n", __FUNCTION__));
3546 } else {
3547 host_asyncint = sdstd_rreg16(sd, SD3_HostCntrl2);
3548
3549 /* check if supported by host and card */
3550 if ((regdata & SD4_MODE) &&
3551 (GFIELD(card_asyncint, SDIO_BUS_ASYNCINT_CAP)) &&
3552 (GFIELD(sd->caps, CAP_ASYNCINT_SUP))) {
3553 /* set enable async int in card */
3554 card_asyncint = SFIELD(card_asyncint, SDIO_BUS_ASYNCINT_SEL, 1);
3555
3556 if ((status = sdstd_card_regwrite (sd, 0,
3557 SDIOD_CCCR_INTR_EXTN, 1, card_asyncint)) != SUCCESS)
3558 sd_err(("%s:INTR EXT setting failed!, ignoring\n",
3559 __FUNCTION__));
3560 else {
3561 /* set enable async int in host */
3562 host_asyncint = SFIELD(host_asyncint,
3563 HOSTCtrl2_ASYINT_EN, 1);
3564 sdstd_wreg16(sd, SD3_HostCntrl2, host_asyncint);
3565 }
3566 } else {
3567 sd_err(("%s:INTR EXT NOT supported by either host or"
3568 "card!, ignoring\n", __FUNCTION__));
3569 }
3570 }
3571 }
3572
3573 /* Set host side via Host reg */
3574 reg8 = sdstd_rreg8(sd, SD_HostCntrl) & ~SD4_MODE;
3575 if (new_mode == SDIOH_MODE_SD4)
3576 reg8 |= SD4_MODE;
3577 sdstd_wreg8(sd, SD_HostCntrl, reg8);
3578
3579 sd->sd_mode = new_mode;
3580
3581 return TRUE;
3582 }
3583
3584 static int
sdstd_driver_init(sdioh_info_t * sd)3585 sdstd_driver_init(sdioh_info_t *sd)
3586 {
3587 sd_trace(("%s\n", __FUNCTION__));
3588 sd->sd3_tuning_reqd = FALSE;
3589 sd->sd3_tuning_disable = FALSE;
3590 if ((sdstd_host_init(sd)) != SUCCESS) {
3591 return ERROR;
3592 }
3593
3594 /* Give WL_reset before sending CMD5 to dongle for Revx SDIO3 HC's */
3595 if ((sd->controller_type == SDIOH_TYPE_RICOH_R5C822) && (sd->version == HOST_CONTR_VER_3))
3596 {
3597 sdstd_wreg16(sd, SD3_WL_BT_reset_register, 0x8);
3598 OSL_DELAY(sd_delay_value);
3599 sdstd_wreg16(sd, SD3_WL_BT_reset_register, 0x0);
3600 OSL_DELAY(500000);
3601 }
3602
3603 if (sdstd_client_init(sd) != SUCCESS) {
3604 return ERROR;
3605 }
3606
3607 /* if the global cap matched and is SDR 104/50 [if 50 it is reqd] enable tuning. */
3608 if ((TRUE != sd3_sw_override1) && SD3_TUNING_REQD(sd, sd_uhsimode)) {
3609 sd->sd3_tuning_reqd = TRUE;
3610
3611 /* init OS structs for tuning */
3612 sdstd_3_osinit_tuning(sd);
3613
3614 /* enable HC tuning interrupt OR timer based on tuning method */
3615 if (GFIELD(sd->caps3, CAP3_RETUNING_MODES)) {
3616 /* enable both RTReq and timer */
3617 sd->intmask |= HC_INTR_RETUNING;
3618 sdstd_wreg16(sd, SD_IntrSignalEnable, sd->intmask);
3619 #ifdef BCMSDYIELD
3620 if (sd_forcerb)
3621 sdstd_rreg16(sd, SD_IntrSignalEnable); /* Sync readback */
3622 #endif /* BCMSDYIELD */
3623 }
3624 }
3625
3626 return SUCCESS;
3627 }
3628
3629 static int
sdstd_get_cisaddr(sdioh_info_t * sd,uint32 regaddr)3630 sdstd_get_cisaddr(sdioh_info_t *sd, uint32 regaddr)
3631 {
3632 /* read 24 bits and return valid 17 bit addr */
3633 int i;
3634 uint32 scratch, regdata;
3635 uint8 *ptr = (uint8 *)&scratch;
3636 for (i = 0; i < 3; i++) {
3637 if ((sdstd_card_regread (sd, 0, regaddr, 1, ®data)) != SUCCESS)
3638 sd_err(("%s: Can't read!\n", __FUNCTION__));
3639
3640 *ptr++ = (uint8) regdata;
3641 regaddr++;
3642 }
3643 /* Only the lower 17-bits are valid */
3644 scratch = ltoh32(scratch);
3645 scratch &= 0x0001FFFF;
3646 return (scratch);
3647 }
3648
3649 static int
sdstd_card_enablefuncs(sdioh_info_t * sd)3650 sdstd_card_enablefuncs(sdioh_info_t *sd)
3651 {
3652 int status;
3653 uint32 regdata;
3654 uint32 fbraddr;
3655 uint8 func;
3656
3657 sd_trace(("%s\n", __FUNCTION__));
3658
3659 /* Get the Card's common CIS address */
3660 sd->com_cis_ptr = sdstd_get_cisaddr(sd, SDIOD_CCCR_CISPTR_0);
3661 sd->func_cis_ptr[0] = sd->com_cis_ptr;
3662 sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr));
3663
3664 /* Get the Card's function CIS (for each function) */
3665 for (fbraddr = SDIOD_FBR_STARTADDR, func = 1;
3666 func <= sd->num_funcs; func++, fbraddr += SDIOD_FBR_SIZE) {
3667 sd->func_cis_ptr[func] = sdstd_get_cisaddr(sd, SDIOD_FBR_CISPTR_0 + fbraddr);
3668 sd_info(("%s: Function %d CIS Ptr = 0x%x\n",
3669 __FUNCTION__, func, sd->func_cis_ptr[func]));
3670 }
3671
3672 /* Enable function 1 on the card */
3673 regdata = SDIO_FUNC_ENABLE_1;
3674 if ((status = sdstd_card_regwrite(sd, 0, SDIOD_CCCR_IOEN, 1, regdata)) != SUCCESS)
3675 return status;
3676
3677 return SUCCESS;
3678 }
3679
3680 /* Read client card reg */
3681 static int
sdstd_card_regread(sdioh_info_t * sd,int func,uint32 regaddr,int regsize,uint32 * data)3682 sdstd_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data)
3683 {
3684 int status;
3685 uint32 cmd_arg;
3686 uint32 rsp5;
3687
3688 cmd_arg = 0;
3689
3690 if ((func == 0) || (regsize == 1)) {
3691 cmd_arg = SFIELD(cmd_arg, CMD52_FUNCTION, func);
3692 cmd_arg = SFIELD(cmd_arg, CMD52_REG_ADDR, regaddr);
3693 cmd_arg = SFIELD(cmd_arg, CMD52_RW_FLAG, SDIOH_XFER_TYPE_READ);
3694 cmd_arg = SFIELD(cmd_arg, CMD52_RAW, 0);
3695 cmd_arg = SFIELD(cmd_arg, CMD52_DATA, 0);
3696
3697 if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_52, cmd_arg))
3698 != SUCCESS)
3699 return status;
3700
3701 sdstd_cmd_getrsp(sd, &rsp5, 1);
3702 if (sdstd_rreg16(sd, SD_ErrorIntrStatus) != 0) {
3703 sd_err(("%s: 1: ErrorintrStatus 0x%x\n",
3704 __FUNCTION__, sdstd_rreg16(sd, SD_ErrorIntrStatus)));
3705 }
3706
3707 if (GFIELD(rsp5, RSP5_FLAGS) != 0x10)
3708 sd_err(("%s: rsp5 flags is 0x%x\t %d\n",
3709 __FUNCTION__, GFIELD(rsp5, RSP5_FLAGS), func));
3710
3711 if (GFIELD(rsp5, RSP5_STUFF))
3712 sd_err(("%s: rsp5 stuff is 0x%x: should be 0\n",
3713 __FUNCTION__, GFIELD(rsp5, RSP5_STUFF)));
3714 *data = GFIELD(rsp5, RSP5_DATA);
3715
3716 sd_data(("%s: Resp data(0x%x)\n", __FUNCTION__, *data));
3717 } else {
3718 cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, regsize);
3719 cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 1);
3720 cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 0);
3721 cmd_arg = SFIELD(cmd_arg, CMD53_FUNCTION, func);
3722 cmd_arg = SFIELD(cmd_arg, CMD53_REG_ADDR, regaddr);
3723 cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_READ);
3724
3725 sd->data_xfer_count = regsize;
3726
3727 /* sdstd_cmd_issue() returns with the command complete bit
3728 * in the ISR already cleared
3729 */
3730 if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_53, cmd_arg))
3731 != SUCCESS)
3732 return status;
3733
3734 sdstd_cmd_getrsp(sd, &rsp5, 1);
3735
3736 if (GFIELD(rsp5, RSP5_FLAGS) != 0x10)
3737 sd_err(("%s: rsp5 flags is 0x%x\t %d\n",
3738 __FUNCTION__, GFIELD(rsp5, RSP5_FLAGS), func));
3739
3740 if (GFIELD(rsp5, RSP5_STUFF))
3741 sd_err(("%s: rsp5 stuff is 0x%x: should be 0\n",
3742 __FUNCTION__, GFIELD(rsp5, RSP5_STUFF)));
3743
3744 if (sd->polled_mode) {
3745 volatile uint16 int_reg;
3746 int retries = RETRIES_LARGE;
3747
3748 /* Wait for Read Buffer to become ready */
3749 do {
3750 sdstd_os_yield(sd);
3751 int_reg = sdstd_rreg16(sd, SD_IntrStatus);
3752 #ifdef BCMQT
3753 if (retries != RETRIES_LARGE) {
3754 OSL_SLEEP(SDSTD_WAIT_TIME_MS);
3755 }
3756 #endif /* BCMQT */
3757 } while (--retries && (GFIELD(int_reg, INTSTAT_BUF_READ_READY) == 0));
3758
3759 if (!retries) {
3760 sd_err(("%s: Timeout on Buf_Read_Ready: "
3761 "intStat: 0x%x errint: 0x%x PresentState 0x%x\n",
3762 __FUNCTION__, int_reg,
3763 sdstd_rreg16(sd, SD_ErrorIntrStatus),
3764 sdstd_rreg(sd, SD_PresentState)));
3765 sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg);
3766 return (ERROR);
3767 }
3768
3769 /* Have Buffer Ready, so clear it and read the data */
3770 sdstd_wreg16(sd, SD_IntrStatus, SFIELD(0, INTSTAT_BUF_READ_READY, 1));
3771 if (regsize == 2)
3772 *data = sdstd_rreg16(sd, SD_BufferDataPort0);
3773 else
3774 *data = sdstd_rreg(sd, SD_BufferDataPort0);
3775
3776 sd_data(("%s: Resp data(0x%x)\n", __FUNCTION__, *data));
3777 /* Check Status.
3778 * After the data is read, the Transfer Complete bit should be on
3779 */
3780 retries = RETRIES_LARGE;
3781 do {
3782 int_reg = sdstd_rreg16(sd, SD_IntrStatus);
3783 #ifdef BCMQT
3784 if (retries != RETRIES_LARGE) {
3785 OSL_SLEEP(SDSTD_WAIT_TIME_MS);
3786 }
3787 #endif /* BCMQT */
3788 } while (--retries && (GFIELD(int_reg, INTSTAT_XFER_COMPLETE) == 0));
3789
3790 /* Check for any errors from the data phase */
3791 if (sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg))
3792 return ERROR;
3793
3794 if (!retries) {
3795 sd_err(("%s: Timeout on xfer complete: "
3796 "intr 0x%04x err 0x%04x state 0x%08x\n",
3797 __FUNCTION__, int_reg,
3798 sdstd_rreg16(sd, SD_ErrorIntrStatus),
3799 sdstd_rreg(sd, SD_PresentState)));
3800 return (ERROR);
3801 }
3802
3803 sdstd_wreg16(sd, SD_IntrStatus, SFIELD(0, INTSTAT_XFER_COMPLETE, 1));
3804 }
3805 }
3806 if (sd->polled_mode) {
3807 if (regsize == 2)
3808 *data &= 0xffff;
3809 }
3810 return SUCCESS;
3811 }
3812
3813 bool
check_client_intr(sdioh_info_t * sd)3814 check_client_intr(sdioh_info_t *sd)
3815 {
3816 uint16 raw_int, cur_int, old_int;
3817
3818 raw_int = sdstd_rreg16(sd, SD_IntrStatus);
3819 cur_int = raw_int & sd->intmask;
3820
3821 if (!cur_int) {
3822 /* Not an error -- might share interrupts... */
3823 return FALSE;
3824 }
3825
3826 if (GFIELD(cur_int, INTSTAT_CARD_INT)) {
3827 unsigned long flags;
3828
3829 sdstd_os_lock_irqsave(sd, &flags);
3830 old_int = sdstd_rreg16(sd, SD_IntrStatusEnable);
3831 sdstd_wreg16(sd, SD_IntrStatusEnable, SFIELD(old_int, INTSTAT_CARD_INT, 0));
3832 sdstd_os_unlock_irqrestore(sd, &flags);
3833
3834 if (sd->client_intr_enabled && sd->use_client_ints) {
3835 sd->intrcount++;
3836 ASSERT(sd->intr_handler);
3837 ASSERT(sd->intr_handler_arg);
3838 (sd->intr_handler)(sd->intr_handler_arg);
3839 } else {
3840 sd_err(("%s: Not ready for intr: enabled %d, handler %p\n",
3841 __FUNCTION__, sd->client_intr_enabled, sd->intr_handler));
3842 }
3843 sdstd_os_lock_irqsave(sd, &flags);
3844 old_int = sdstd_rreg16(sd, SD_IntrStatusEnable);
3845 sdstd_wreg16(sd, SD_IntrStatusEnable, SFIELD(old_int, INTSTAT_CARD_INT, 1));
3846 sdstd_os_unlock_irqrestore(sd, &flags);
3847 } else {
3848 /* Local interrupt: disable, set flag, and save intrstatus */
3849 sdstd_wreg16(sd, SD_IntrSignalEnable, 0);
3850 sdstd_wreg16(sd, SD_ErrorIntrSignalEnable, 0);
3851 sd->local_intrcount++;
3852 sd->got_hcint = TRUE;
3853 sd->last_intrstatus = cur_int;
3854 }
3855
3856 return TRUE;
3857 }
3858
3859 void
sdstd_spinbits(sdioh_info_t * sd,uint16 norm,uint16 err)3860 sdstd_spinbits(sdioh_info_t *sd, uint16 norm, uint16 err)
3861 {
3862 uint16 int_reg, err_reg;
3863 int retries = RETRIES_LARGE;
3864
3865 do {
3866 int_reg = sdstd_rreg16(sd, SD_IntrStatus);
3867 err_reg = sdstd_rreg16(sd, SD_ErrorIntrStatus);
3868 #ifdef BCMQT
3869 if (retries != RETRIES_LARGE) {
3870 OSL_SLEEP(SDSTD_WAIT_TIME_MS);
3871 }
3872 #endif /* BCMQT */
3873 } while (--retries && !(int_reg & norm) && !(err_reg & err));
3874
3875 norm |= sd->intmask;
3876 if (err_reg & err)
3877 norm = SFIELD(norm, INTSTAT_ERROR_INT, 1);
3878 sd->last_intrstatus = int_reg & norm;
3879 }
3880
3881 /* write a client register */
3882 static int
sdstd_card_regwrite(sdioh_info_t * sd,int func,uint32 regaddr,int regsize,uint32 data)3883 sdstd_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data)
3884 {
3885 int status;
3886 uint32 cmd_arg, rsp5, flags;
3887
3888 cmd_arg = 0;
3889
3890 if ((func == 0) || (regsize == 1)) {
3891 cmd_arg = SFIELD(cmd_arg, CMD52_FUNCTION, func);
3892 cmd_arg = SFIELD(cmd_arg, CMD52_REG_ADDR, regaddr);
3893 cmd_arg = SFIELD(cmd_arg, CMD52_RW_FLAG, SDIOH_XFER_TYPE_WRITE);
3894 cmd_arg = SFIELD(cmd_arg, CMD52_RAW, 0);
3895 cmd_arg = SFIELD(cmd_arg, CMD52_DATA, data & 0xff);
3896 if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_52, cmd_arg))
3897 != SUCCESS)
3898 return status;
3899
3900 sdstd_cmd_getrsp(sd, &rsp5, 1);
3901 flags = GFIELD(rsp5, RSP5_FLAGS);
3902 if (flags && (flags != 0x10))
3903 sd_err(("%s: rsp5.rsp5.flags = 0x%x, expecting 0x10\n",
3904 __FUNCTION__, flags));
3905 }
3906 else {
3907 cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, regsize);
3908 cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 1);
3909 cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 0);
3910 cmd_arg = SFIELD(cmd_arg, CMD53_FUNCTION, func);
3911 cmd_arg = SFIELD(cmd_arg, CMD53_REG_ADDR, regaddr);
3912 cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_WRITE);
3913
3914 sd->data_xfer_count = regsize;
3915
3916 /* sdstd_cmd_issue() returns with the command complete bit
3917 * in the ISR already cleared
3918 */
3919 if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_53, cmd_arg))
3920 != SUCCESS)
3921 return status;
3922
3923 sdstd_cmd_getrsp(sd, &rsp5, 1);
3924
3925 if (GFIELD(rsp5, RSP5_FLAGS) != 0x10)
3926 sd_err(("%s: rsp5 flags = 0x%x, expecting 0x10\n",
3927 __FUNCTION__, GFIELD(rsp5, RSP5_FLAGS)));
3928 if (GFIELD(rsp5, RSP5_STUFF))
3929 sd_err(("%s: rsp5 stuff is 0x%x: expecting 0\n",
3930 __FUNCTION__, GFIELD(rsp5, RSP5_STUFF)));
3931
3932 if (sd->polled_mode) {
3933 uint16 int_reg;
3934 int retries = RETRIES_LARGE;
3935
3936 /* Wait for Write Buffer to become ready */
3937 do {
3938 #ifdef BCMQT
3939 if (retries != RETRIES_LARGE) {
3940 OSL_SLEEP(SDSTD_WAIT_TIME_MS);
3941 }
3942 #endif /* BCMQT */
3943 int_reg = sdstd_rreg16(sd, SD_IntrStatus);
3944 } while (--retries && (GFIELD(int_reg, INTSTAT_BUF_WRITE_READY) == 0));
3945
3946 if (!retries) {
3947 sd_err(("%s: Timeout on Buf_Write_Ready: intStat: 0x%x "
3948 "errint: 0x%x PresentState 0x%x\n",
3949 __FUNCTION__, int_reg,
3950 sdstd_rreg16(sd, SD_ErrorIntrStatus),
3951 sdstd_rreg(sd, SD_PresentState)));
3952 sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg);
3953 return (ERROR);
3954 }
3955 /* Clear Write Buf Ready bit */
3956 int_reg = 0;
3957 int_reg = SFIELD(int_reg, INTSTAT_BUF_WRITE_READY, 1);
3958 sdstd_wreg16(sd, SD_IntrStatus, int_reg);
3959
3960 /* At this point we have Buffer Ready, so write the data */
3961 if (regsize == 2)
3962 sdstd_wreg16(sd, SD_BufferDataPort0, (uint16) data);
3963 else
3964 sdstd_wreg(sd, SD_BufferDataPort0, data);
3965
3966 /* Wait for Transfer Complete */
3967 retries = RETRIES_LARGE;
3968 do {
3969 #ifdef BCMQT
3970 if (retries != RETRIES_LARGE) {
3971 OSL_SLEEP(SDSTD_WAIT_TIME_MS);
3972 }
3973 #endif /* BCMQT */
3974 int_reg = sdstd_rreg16(sd, SD_IntrStatus);
3975 } while (--retries && (GFIELD(int_reg, INTSTAT_XFER_COMPLETE) == 0));
3976
3977 /* Check for any errors from the data phase */
3978 if (sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg))
3979 return ERROR;
3980
3981 if (retries == 0) {
3982 sd_err(("%s: Timeout for xfer complete; State = 0x%x, "
3983 "intr state=0x%x, Errintstatus 0x%x rcnt %d, tcnt %d\n",
3984 __FUNCTION__, sdstd_rreg(sd, SD_PresentState),
3985 int_reg, sdstd_rreg16(sd, SD_ErrorIntrStatus),
3986 sd->r_cnt, sd->t_cnt));
3987 }
3988 /* Clear the status bits */
3989 sdstd_wreg16(sd, SD_IntrStatus, SFIELD(int_reg, INTSTAT_CARD_INT, 0));
3990 }
3991 }
3992 return SUCCESS;
3993 }
3994
3995 void
sdstd_cmd_getrsp(sdioh_info_t * sd,uint32 * rsp_buffer,int count)3996 sdstd_cmd_getrsp(sdioh_info_t *sd, uint32 *rsp_buffer, int count /* num 32 bit words */)
3997 {
3998 int rsp_count;
3999 int respaddr = SD_Response0;
4000
4001 if (count > 4)
4002 count = 4;
4003
4004 for (rsp_count = 0; rsp_count < count; rsp_count++) {
4005 *rsp_buffer++ = sdstd_rreg(sd, respaddr);
4006 respaddr += 4;
4007 }
4008 }
4009
4010 /*
4011 Note: options: 0 - default
4012 1 - tuning option: Means that, this cmd issue is as a part
4013 of tuning. So no need to check the start tuning function.
4014 */
4015 static int
sdstd_cmd_issue(sdioh_info_t * sdioh_info,bool use_dma,uint32 cmd,uint32 arg)4016 sdstd_cmd_issue(sdioh_info_t *sdioh_info, bool use_dma, uint32 cmd, uint32 arg)
4017 {
4018 uint16 cmd_reg;
4019 int retries;
4020 uint32 cmd_arg;
4021 uint16 xfer_reg = 0;
4022
4023 if ((sdioh_info->sd_mode == SDIOH_MODE_SPI) &&
4024 ((cmd == SDIOH_CMD_3) || (cmd == SDIOH_CMD_7) || (cmd == SDIOH_CMD_15))) {
4025 sd_err(("%s: Cmd %d is not for SPI\n", __FUNCTION__, cmd));
4026 return ERROR;
4027 }
4028
4029 retries = RETRIES_SMALL;
4030 while ((GFIELD(sdstd_rreg(sdioh_info, SD_PresentState), PRES_CMD_INHIBIT)) && --retries) {
4031 if (retries == RETRIES_SMALL)
4032 sd_err(("%s: Waiting for Command Inhibit cmd = %d 0x%x\n",
4033 __FUNCTION__, cmd, sdstd_rreg(sdioh_info, SD_PresentState)));
4034 #ifdef BCMQT
4035 else {
4036 OSL_SLEEP(SDSTD_WAIT_TIME_MS);
4037 }
4038 #endif /* BCMQT */
4039 }
4040 if (!retries) {
4041 sd_err(("%s: Command Inhibit timeout\n", __FUNCTION__));
4042 if (trap_errs)
4043 ASSERT(0);
4044 return ERROR;
4045 }
4046
4047 cmd_reg = 0;
4048 switch (cmd) {
4049 case SDIOH_CMD_0: /* Set Card to Idle State - No Response */
4050 sd_data(("%s: CMD%d\n", __FUNCTION__, cmd));
4051 cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_NONE);
4052 cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0);
4053 cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0);
4054 cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
4055 cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
4056 cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
4057 break;
4058
4059 case SDIOH_CMD_3: /* Ask card to send RCA - Response R6 */
4060 sd_data(("%s: CMD%d\n", __FUNCTION__, cmd));
4061 cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48);
4062 cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0);
4063 cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0);
4064 cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
4065 cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
4066 cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
4067 break;
4068
4069 case SDIOH_CMD_5: /* Send Operation condition - Response R4 */
4070 sd_data(("%s: CMD%d\n", __FUNCTION__, cmd));
4071 cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48);
4072 cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0);
4073 cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0);
4074 cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
4075 cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
4076 cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
4077 break;
4078
4079 case SDIOH_CMD_7: /* Select card - Response R1 */
4080 sd_data(("%s: CMD%d\n", __FUNCTION__, cmd));
4081 cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48);
4082 cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1);
4083 cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1);
4084 cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
4085 cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
4086 cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
4087 break;
4088
4089 case SDIOH_CMD_14: /* eSD Sleep - Response R1 */
4090 case SDIOH_CMD_11: /* Select card - Response R1 */
4091 sd_data(("%s: CMD%d\n", __FUNCTION__, cmd));
4092 cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48);
4093 cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1);
4094 cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1);
4095 cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
4096 cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
4097 cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
4098 break;
4099
4100 case SDIOH_CMD_15: /* Set card to inactive state - Response None */
4101 sd_data(("%s: CMD%d\n", __FUNCTION__, cmd));
4102 cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_NONE);
4103 cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0);
4104 cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0);
4105 cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
4106 cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
4107 cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
4108 break;
4109
4110 case SDIOH_CMD_19: /* clock tuning - Response R1 */
4111 sd_data(("%s: CMD%d\n", __FUNCTION__, cmd));
4112 cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48);
4113 cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1);
4114 cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1);
4115 cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 1);
4116 cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
4117 cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
4118 /* Host controller reads 64 byte magic pattern from card
4119 * Hence Direction = 1 ( READ )
4120 */
4121 xfer_reg = SFIELD(xfer_reg, XFER_DATA_DIRECTION, 1);
4122 break;
4123
4124 case SDIOH_CMD_52: /* IO R/W Direct (single byte) - Response R5 */
4125
4126 sd_data(("%s: CMD52 func(%d) addr(0x%x) %s data(0x%x)\n",
4127 __FUNCTION__,
4128 GFIELD(arg, CMD52_FUNCTION),
4129 GFIELD(arg, CMD52_REG_ADDR),
4130 GFIELD(arg, CMD52_RW_FLAG) ? "W" : "R",
4131 GFIELD(arg, CMD52_DATA)));
4132
4133 cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48);
4134 cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1);
4135 cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1);
4136 cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
4137 cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
4138 cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
4139 break;
4140
4141 case SDIOH_CMD_53: /* IO R/W Extended (multiple bytes/blocks) */
4142
4143 sd_data(("%s: CMD53 func(%d) addr(0x%x) %s mode(%s) cnt(%d), %s\n",
4144 __FUNCTION__,
4145 GFIELD(arg, CMD53_FUNCTION),
4146 GFIELD(arg, CMD53_REG_ADDR),
4147 GFIELD(arg, CMD53_RW_FLAG) ? "W" : "R",
4148 GFIELD(arg, CMD53_BLK_MODE) ? "Block" : "Byte",
4149 GFIELD(arg, CMD53_BYTE_BLK_CNT),
4150 GFIELD(arg, CMD53_OP_CODE) ? "Incrementing addr" : "Single addr"));
4151
4152 cmd_arg = arg;
4153 xfer_reg = 0;
4154
4155 cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48);
4156 cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1);
4157 cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1);
4158 cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 1);
4159 cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
4160 cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
4161
4162 use_dma = USE_DMA(sdioh_info) && GFIELD(cmd_arg, CMD53_BLK_MODE);
4163
4164 if (GFIELD(cmd_arg, CMD53_BLK_MODE)) {
4165 uint16 blocksize;
4166 uint16 blockcount;
4167 int func;
4168
4169 ASSERT(sdioh_info->sd_blockmode);
4170
4171 func = GFIELD(cmd_arg, CMD53_FUNCTION);
4172 blocksize = MIN((int)sdioh_info->data_xfer_count,
4173 sdioh_info->client_block_size[func]);
4174 blockcount = GFIELD(cmd_arg, CMD53_BYTE_BLK_CNT);
4175
4176 /* data_xfer_cnt is already setup so that for multiblock mode,
4177 * it is the entire buffer length. For non-block or single block,
4178 * it is < 64 bytes
4179 */
4180 if (use_dma) {
4181 switch (sdioh_info->sd_dma_mode) {
4182 case DMA_MODE_SDMA:
4183 sd_dma(("%s: SDMA: SysAddr reg was 0x%x now 0x%x\n",
4184 __FUNCTION__, sdstd_rreg(sdioh_info, SD_SysAddr),
4185 (uint32)sdioh_info->dma_phys));
4186 sdstd_wreg(sdioh_info, SD_SysAddr, sdioh_info->dma_phys);
4187 break;
4188 case DMA_MODE_ADMA1:
4189 case DMA_MODE_ADMA2:
4190 sd_dma(("%s: ADMA: Using ADMA\n", __FUNCTION__));
4191 #ifdef BCMSDIOH_TXGLOM
4192 /* multi-descriptor is currently used only for hc3 */
4193 if ((sdioh_info->glom_info.count != 0) &&
4194 (sdioh_info->txglom_mode == SDPCM_TXGLOM_MDESC)) {
4195 uint32 i = 0;
4196 for (i = 0;
4197 i < sdioh_info->glom_info.count-1;
4198 i++) {
4199 glom_buf_t *glom_info;
4200 glom_info = &(sdioh_info->glom_info);
4201 sd_create_adma_descriptor(sdioh_info,
4202 i,
4203 glom_info->dma_phys_arr[i],
4204 glom_info->nbytes[i],
4205 ADMA2_ATTRIBUTE_VALID |
4206 ADMA2_ATTRIBUTE_ACT_TRAN);
4207 }
4208
4209 sd_create_adma_descriptor(sdioh_info,
4210 i,
4211 sdioh_info->glom_info.dma_phys_arr[i],
4212 sdioh_info->glom_info.nbytes[i],
4213 ADMA2_ATTRIBUTE_VALID |
4214 ADMA2_ATTRIBUTE_END |
4215 ADMA2_ATTRIBUTE_INT |
4216 ADMA2_ATTRIBUTE_ACT_TRAN);
4217 } else
4218 #endif /* BCMSDIOH_TXGLOM */
4219 {
4220 sd_create_adma_descriptor(sdioh_info, 0,
4221 sdioh_info->dma_phys, blockcount*blocksize,
4222 ADMA2_ATTRIBUTE_VALID | ADMA2_ATTRIBUTE_END |
4223 ADMA2_ATTRIBUTE_INT | ADMA2_ATTRIBUTE_ACT_TRAN);
4224 }
4225 /* Dump descriptor if DMA debugging is enabled. */
4226 if (sd_msglevel & SDH_DMA_VAL) {
4227 sd_dump_adma_dscr(sdioh_info);
4228 }
4229
4230 sdstd_wreg(sdioh_info, SD_ADMA_SysAddr,
4231 sdioh_info->adma2_dscr_phys);
4232 break;
4233 default:
4234 sd_err(("%s: unsupported DMA mode %d.\n",
4235 __FUNCTION__, sdioh_info->sd_dma_mode));
4236 break;
4237 }
4238 }
4239
4240 sd_trace(("%s: Setting block count %d, block size %d bytes\n",
4241 __FUNCTION__, blockcount, blocksize));
4242 sdstd_wreg16(sdioh_info, SD_BlockSize, blocksize);
4243 sdstd_wreg16(sdioh_info, SD_BlockCount, blockcount);
4244
4245 xfer_reg = SFIELD(xfer_reg, XFER_DMA_ENABLE, use_dma);
4246
4247 if (sdioh_info->client_block_size[func] != blocksize)
4248 set_client_block_size(sdioh_info, func, blocksize);
4249
4250 if (blockcount > 1) {
4251 xfer_reg = SFIELD(xfer_reg, XFER_MULTI_BLOCK, 1);
4252 xfer_reg = SFIELD(xfer_reg, XFER_BLK_COUNT_EN, 1);
4253 xfer_reg = SFIELD(xfer_reg, XFER_CMD_12_EN, 0);
4254 } else {
4255 xfer_reg = SFIELD(xfer_reg, XFER_MULTI_BLOCK, 0);
4256 xfer_reg = SFIELD(xfer_reg, XFER_BLK_COUNT_EN, 0);
4257 xfer_reg = SFIELD(xfer_reg, XFER_CMD_12_EN, 0);
4258 }
4259
4260 if (GFIELD(cmd_arg, CMD53_RW_FLAG) == SDIOH_XFER_TYPE_READ)
4261 xfer_reg = SFIELD(xfer_reg, XFER_DATA_DIRECTION, 1);
4262 else
4263 xfer_reg = SFIELD(xfer_reg, XFER_DATA_DIRECTION, 0);
4264
4265 retries = RETRIES_SMALL;
4266 while (GFIELD(sdstd_rreg(sdioh_info, SD_PresentState),
4267 PRES_DAT_INHIBIT) && --retries) {
4268 sd_err(("%s: Waiting for Data Inhibit cmd = %d\n",
4269 __FUNCTION__, cmd));
4270 #ifdef BCMQT
4271 if (retries != RETRIES_SMALL) {
4272 OSL_SLEEP(SDSTD_WAIT_TIME_MS);
4273 }
4274 #endif /* BCMQT */
4275 }
4276 if (!retries) {
4277 sd_err(("%s: Data Inhibit timeout\n", __FUNCTION__));
4278 if (trap_errs)
4279 ASSERT(0);
4280 return ERROR;
4281 }
4282
4283 /* Consider deferring this write to the comment below "Deferred Write" */
4284 sdstd_wreg16(sdioh_info, SD_TransferMode, xfer_reg);
4285
4286 } else { /* Non block mode */
4287 uint16 bytes = GFIELD(cmd_arg, CMD53_BYTE_BLK_CNT);
4288 /* The byte/block count field only has 9 bits,
4289 * so, to do a 512-byte bytemode transfer, this
4290 * field will contain 0, but we need to tell the
4291 * controller we're transferring 512 bytes.
4292 */
4293 if (bytes == 0) bytes = 512;
4294
4295 if (use_dma)
4296 sdstd_wreg(sdioh_info, SD_SysAddr, sdioh_info->dma_phys);
4297
4298 /* PCI: Transfer Mode register 0x0c */
4299 xfer_reg = SFIELD(xfer_reg, XFER_DMA_ENABLE, bytes <= 4 ? 0 : use_dma);
4300 xfer_reg = SFIELD(xfer_reg, XFER_CMD_12_EN, 0);
4301 if (GFIELD(cmd_arg, CMD53_RW_FLAG) == SDIOH_XFER_TYPE_READ)
4302 xfer_reg = SFIELD(xfer_reg, XFER_DATA_DIRECTION, 1);
4303 else
4304 xfer_reg = SFIELD(xfer_reg, XFER_DATA_DIRECTION, 0);
4305 /* See table 2-8 Host Controller spec ver 1.00 */
4306 xfer_reg = SFIELD(xfer_reg, XFER_BLK_COUNT_EN, 0); /* Dont care */
4307 xfer_reg = SFIELD(xfer_reg, XFER_MULTI_BLOCK, 0);
4308
4309 sdstd_wreg16(sdioh_info, SD_BlockSize, bytes);
4310
4311 sdstd_wreg16(sdioh_info, SD_BlockCount, 1);
4312
4313 retries = RETRIES_SMALL;
4314 while (GFIELD(sdstd_rreg(sdioh_info, SD_PresentState),
4315 PRES_DAT_INHIBIT) && --retries)
4316 sd_err(("%s: Waiting for Data Inhibit cmd = %d\n",
4317 __FUNCTION__, cmd));
4318 if (!retries) {
4319 sd_err(("%s: Data Inhibit timeout\n", __FUNCTION__));
4320 if (trap_errs)
4321 ASSERT(0);
4322 return ERROR;
4323 }
4324
4325 /* Consider deferring this write to the comment below "Deferred Write" */
4326 sdstd_wreg16(sdioh_info, SD_TransferMode, xfer_reg);
4327 }
4328 break;
4329
4330 default:
4331 sd_err(("%s: Unknown command\n", __FUNCTION__));
4332 return ERROR;
4333 }
4334
4335 if (sdioh_info->sd_mode == SDIOH_MODE_SPI) {
4336 cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0);
4337 cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0);
4338 }
4339
4340 /* Setup and issue the SDIO command */
4341 sdstd_wreg(sdioh_info, SD_Arg0, arg);
4342
4343 /* Deferred Write
4344 * Consider deferring the two writes above until this point in the code.
4345 * The following would do one 32 bit write.
4346 *
4347 * {
4348 * uint32 tmp32 = cmd_reg << 16;
4349 * tmp32 |= xfer_reg;
4350 * sdstd_wreg(sdioh_info, SD_TransferMode, tmp32);
4351 * }
4352 */
4353
4354 /* Alternate to Deferred Write START */
4355
4356 /* In response to CMD19 card sends 64 byte magic pattern.
4357 * So SD_BlockSize = 64 & SD_BlockCount = 1
4358 */
4359 if (GFIELD(cmd_reg, CMD_INDEX) == SDIOH_CMD_19) {
4360 sdstd_wreg16(sdioh_info, SD_TransferMode, xfer_reg);
4361 sdstd_wreg16(sdioh_info, SD_BlockSize, 64);
4362 sdstd_wreg16(sdioh_info, SD_BlockCount, 1);
4363 }
4364 sdstd_wreg16(sdioh_info, SD_Command, cmd_reg);
4365
4366 /* Alternate to Deferred Write END */
4367
4368 /* If we are in polled mode, wait for the command to complete.
4369 * In interrupt mode, return immediately. The calling function will
4370 * know that the command has completed when the CMDATDONE interrupt
4371 * is asserted
4372 */
4373 if (sdioh_info->polled_mode) {
4374 uint16 int_reg = 0;
4375 retries = RETRIES_LARGE;
4376
4377 /* For CMD19 no need to wait for cmd completion */
4378 if (GFIELD(cmd_reg, CMD_INDEX) == SDIOH_CMD_19)
4379 return SUCCESS;
4380
4381 do {
4382 int_reg = sdstd_rreg16(sdioh_info, SD_IntrStatus);
4383 sdstd_os_yield(sdioh_info);
4384 #ifdef BCMQT
4385 if (retries != RETRIES_LARGE) {
4386 OSL_SLEEP(SDSTD_WAIT_TIME_MS);
4387 }
4388 #endif /* BCMQT */
4389 } while (--retries &&
4390 (GFIELD(int_reg, INTSTAT_ERROR_INT) == 0) &&
4391 (GFIELD(int_reg, INTSTAT_CMD_COMPLETE) == 0));
4392
4393 if (!retries) {
4394 sd_err(("%s: CMD_COMPLETE timeout: intrStatus: 0x%x "
4395 "error stat 0x%x state 0x%x\n",
4396 __FUNCTION__, int_reg,
4397 sdstd_rreg16(sdioh_info, SD_ErrorIntrStatus),
4398 sdstd_rreg(sdioh_info, SD_PresentState)));
4399
4400 /* Attempt to reset CMD line when we get a CMD timeout */
4401 sdstd_wreg8(sdioh_info, SD_SoftwareReset, SFIELD(0, SW_RESET_CMD, 1));
4402 retries = RETRIES_LARGE;
4403 do {
4404 sd_trace(("%s: waiting for CMD line reset\n", __FUNCTION__));
4405 #ifdef BCMQT
4406 if (retries != RETRIES_LARGE) {
4407 OSL_SLEEP(SDSTD_WAIT_TIME_MS);
4408 }
4409 #endif /* BCMQT */
4410 } while ((GFIELD(sdstd_rreg8(sdioh_info, SD_SoftwareReset),
4411 SW_RESET_CMD)) && retries--);
4412
4413 if (!retries) {
4414 sd_err(("%s: Timeout waiting for CMD line reset\n", __FUNCTION__));
4415 }
4416
4417 if (trap_errs)
4418 ASSERT(0);
4419 return (ERROR);
4420 }
4421
4422 /* Clear Command Complete interrupt */
4423 int_reg = SFIELD(0, INTSTAT_CMD_COMPLETE, 1);
4424 sdstd_wreg16(sdioh_info, SD_IntrStatus, int_reg);
4425
4426 /* Check for Errors */
4427 if (sdstd_check_errs(sdioh_info, cmd, arg)) {
4428 if (trap_errs)
4429 ASSERT(0);
4430 return ERROR;
4431 }
4432 }
4433 return SUCCESS;
4434 }
4435
4436 static int
sdstd_card_buf(sdioh_info_t * sd,int rw,int func,bool fifo,uint32 addr,int nbytes,uint32 * data)4437 sdstd_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo, uint32 addr, int nbytes, uint32 *data)
4438 {
4439 int status;
4440 uint32 cmd_arg;
4441 uint32 rsp5;
4442 uint16 int_reg, int_bit;
4443 uint flags;
4444 int num_blocks, blocksize;
4445 bool local_blockmode, local_dma;
4446 bool read = rw == SDIOH_READ ? 1 : 0;
4447 bool local_yield = FALSE;
4448 #ifdef BCMSDIOH_TXGLOM
4449 uint32 i;
4450 uint8 *localbuf = NULL;
4451 #endif // endif
4452 #ifdef BCMQT
4453 int retries;
4454 #endif // endif
4455
4456 ASSERT(nbytes);
4457
4458 cmd_arg = 0;
4459
4460 sd_data(("%s: %s 53 addr 0x%x, len %d bytes, r_cnt %d t_cnt %d\n",
4461 __FUNCTION__, read ? "Rd" : "Wr", addr, nbytes, sd->r_cnt, sd->t_cnt));
4462
4463 if (read) sd->r_cnt++; else sd->t_cnt++;
4464
4465 local_blockmode = sd->sd_blockmode;
4466 local_dma = USE_DMA(sd);
4467
4468 #ifdef BCMSDIOH_TXGLOM
4469 /* If multiple buffers are there, then calculate the nbytes from that */
4470 if (!read && (func == SDIO_FUNC_2) && (sd->glom_info.count != 0)) {
4471 uint32 ii;
4472 nbytes = 0;
4473 for (ii = 0; ii < sd->glom_info.count; ii++) {
4474 nbytes += sd->glom_info.nbytes[ii];
4475 }
4476 ASSERT(nbytes <= sd->alloced_dma_size);
4477 }
4478 #endif // endif
4479
4480 /* Don't bother with block mode on small xfers */
4481 if (nbytes < sd->client_block_size[func]) {
4482 sd_data(("setting local blockmode to false: nbytes (%d) != block_size (%d)\n",
4483 nbytes, sd->client_block_size[func]));
4484 local_blockmode = FALSE;
4485 local_dma = FALSE;
4486 #ifdef BCMSDIOH_TXGLOM
4487 /* In glommed case, create a single pkt from multiple pkts */
4488 if (!read && (func == SDIO_FUNC_2) && (sd->glom_info.count != 0)) {
4489 uint32 offset = 0;
4490 localbuf = (uint8 *)MALLOC(sd->osh, nbytes);
4491 data = (uint32 *)localbuf;
4492 for (i = 0; i < sd->glom_info.count; i++) {
4493 bcopy(sd->glom_info.dma_buf_arr[i],
4494 ((uint8 *)data + offset),
4495 sd->glom_info.nbytes[i]);
4496 offset += sd->glom_info.nbytes[i];
4497 }
4498 }
4499 #endif // endif
4500 }
4501
4502 if (local_blockmode) {
4503 blocksize = MIN(sd->client_block_size[func], nbytes);
4504 num_blocks = nbytes/blocksize;
4505 cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, num_blocks);
4506 cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 1);
4507 } else {
4508 num_blocks = 1;
4509 blocksize = nbytes;
4510 cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, nbytes);
4511 cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 0);
4512 }
4513
4514 if (local_dma && !read) {
4515 #ifdef BCMSDIOH_TXGLOM
4516 if ((func == SDIO_FUNC_2) && (sd->glom_info.count != 0)) {
4517 /* In case of hc ver 2 DMA_MAP may not work properly due to 4K alignment
4518 * requirements. So copying pkt to 4K aligned pre-allocated pkt.
4519 * Total length should not cross the pre-alloced memory size
4520 */
4521 if (sd->txglom_mode == SDPCM_TXGLOM_CPY) {
4522 uint32 total_bytes = 0;
4523 for (i = 0; i < sd->glom_info.count; i++) {
4524 bcopy(sd->glom_info.dma_buf_arr[i],
4525 (uint8 *)sd->dma_buf + total_bytes,
4526 sd->glom_info.nbytes[i]);
4527 total_bytes += sd->glom_info.nbytes[i];
4528 }
4529 sd_sync_dma(sd, read, total_bytes);
4530 }
4531 } else
4532 #endif /* BCMSDIOH_TXGLOM */
4533 {
4534 bcopy(data, sd->dma_buf, nbytes);
4535 sd_sync_dma(sd, read, nbytes);
4536 }
4537 }
4538
4539 if (fifo)
4540 cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 0);
4541 else
4542 cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 1);
4543
4544 cmd_arg = SFIELD(cmd_arg, CMD53_FUNCTION, func);
4545 cmd_arg = SFIELD(cmd_arg, CMD53_REG_ADDR, addr);
4546 if (read)
4547 cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_READ);
4548 else
4549 cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_WRITE);
4550
4551 sd->data_xfer_count = nbytes;
4552
4553 /* sdstd_cmd_issue() returns with the command complete bit
4554 * in the ISR already cleared
4555 */
4556 if ((status = sdstd_cmd_issue(sd, local_dma, SDIOH_CMD_53, cmd_arg)) != SUCCESS) {
4557 sd_err(("%s: cmd_issue failed for %s\n", __FUNCTION__, (read ? "read" : "write")));
4558 return status;
4559 }
4560
4561 sdstd_cmd_getrsp(sd, &rsp5, 1);
4562
4563 if ((flags = GFIELD(rsp5, RSP5_FLAGS)) != 0x10) {
4564 sd_err(("%s: Rsp5: nbytes %d, dma %d blockmode %d, read %d "
4565 "numblocks %d, blocksize %d\n",
4566 __FUNCTION__, nbytes, local_dma, local_dma, read, num_blocks, blocksize));
4567
4568 if (flags & 1)
4569 sd_err(("%s: rsp5: Command not accepted: arg out of range 0x%x, "
4570 "bytes %d dma %d\n",
4571 __FUNCTION__, flags, GFIELD(cmd_arg, CMD53_BYTE_BLK_CNT),
4572 GFIELD(cmd_arg, CMD53_BLK_MODE)));
4573 if (flags & 0x8)
4574 sd_err(("%s: Rsp5: General Error\n", __FUNCTION__));
4575
4576 sd_err(("%s: rsp5 flags = 0x%x, expecting 0x10 returning error\n",
4577 __FUNCTION__, flags));
4578 if (trap_errs)
4579 ASSERT(0);
4580 return ERROR;
4581 }
4582
4583 if (GFIELD(rsp5, RSP5_STUFF))
4584 sd_err(("%s: rsp5 stuff is 0x%x: expecting 0\n",
4585 __FUNCTION__, GFIELD(rsp5, RSP5_STUFF)));
4586
4587 #ifdef BCMSDYIELD
4588 local_yield = sd_yieldcpu && ((uint)nbytes >= sd_minyield);
4589 #endif // endif
4590
4591 if (!local_dma) {
4592 int bytes, ii;
4593 uint32 tmp;
4594
4595 for (ii = 0; ii < num_blocks; ii++) {
4596 int words;
4597
4598 /* Decide which status bit we're waiting for */
4599 if (read)
4600 int_bit = SFIELD(0, INTSTAT_BUF_READ_READY, 1);
4601 else
4602 int_bit = SFIELD(0, INTSTAT_BUF_WRITE_READY, 1);
4603
4604 /* If not on, wait for it (or for xfer error) */
4605 int_reg = sdstd_rreg16(sd, SD_IntrStatus);
4606 #ifdef BCMQT
4607 retries = RETRIES_LARGE;
4608 while (!(int_reg & int_bit) && --retries) {
4609 OSL_SLEEP(SDSTD_WAIT_TIME_MS);
4610 int_reg = sdstd_rreg16(sd, SD_IntrStatus);
4611 }
4612 #endif // endif
4613 if (!(int_reg & int_bit)) {
4614 status = sdstd_waitbits(sd, int_bit, ERRINT_TRANSFER_ERRS,
4615 local_yield, &int_reg);
4616 switch (status) {
4617 case -1:
4618 sd_err(("%s: pio interrupted\n", __FUNCTION__));
4619 return ERROR;
4620 case -2:
4621 sd_err(("%s: pio timeout waiting for interrupt\n",
4622 __FUNCTION__));
4623 return ERROR;
4624 }
4625 }
4626 /* Confirm we got the bit w/o error */
4627 if (!(int_reg & int_bit) || GFIELD(int_reg, INTSTAT_ERROR_INT)) {
4628 sd_err(("%s: Error or timeout for Buf_%s_Ready: intStat: 0x%x "
4629 "errint: 0x%x PresentState 0x%x\n",
4630 __FUNCTION__, read ? "Read" : "Write", int_reg,
4631 sdstd_rreg16(sd, SD_ErrorIntrStatus),
4632 sdstd_rreg(sd, SD_PresentState)));
4633 sdstd_dumpregs(sd);
4634 sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg);
4635 return (ERROR);
4636 }
4637
4638 /* Clear Buf Ready bit */
4639 sdstd_wreg16(sd, SD_IntrStatus, int_bit);
4640
4641 /* At this point we have Buffer Ready, write the data 4 bytes at a time */
4642 for (words = blocksize/4; words; words--) {
4643 if (read)
4644 *data = sdstd_rreg(sd, SD_BufferDataPort0);
4645 else
4646 sdstd_wreg(sd, SD_BufferDataPort0, *data);
4647 data++;
4648 }
4649
4650 bytes = blocksize % 4;
4651
4652 /* If no leftover bytes, go to next block */
4653 if (!bytes)
4654 continue;
4655
4656 switch (bytes) {
4657 case 1:
4658 /* R/W 8 bits */
4659 if (read)
4660 *(data++) = (uint32)(sdstd_rreg8(sd, SD_BufferDataPort0));
4661 else
4662 sdstd_wreg8(sd, SD_BufferDataPort0,
4663 (uint8)(*(data++) & 0xff));
4664 break;
4665 case 2:
4666 /* R/W 16 bits */
4667 if (read)
4668 *(data++) = (uint32)sdstd_rreg16(sd, SD_BufferDataPort0);
4669 else
4670 sdstd_wreg16(sd, SD_BufferDataPort0, (uint16)(*(data++)));
4671 break;
4672 case 3:
4673 /* R/W 24 bits:
4674 * SD_BufferDataPort0[0-15] | SD_BufferDataPort1[16-23]
4675 */
4676 if (read) {
4677 tmp = (uint32)sdstd_rreg16(sd, SD_BufferDataPort0);
4678 tmp |= ((uint32)(sdstd_rreg8(sd,
4679 SD_BufferDataPort1)) << 16);
4680 *(data++) = tmp;
4681 } else {
4682 tmp = *(data++);
4683 sdstd_wreg16(sd, SD_BufferDataPort0, (uint16)tmp & 0xffff);
4684 sdstd_wreg8(sd, SD_BufferDataPort1,
4685 (uint8)((tmp >> 16) & 0xff));
4686 }
4687 break;
4688 default:
4689 sd_err(("%s: Unexpected bytes leftover %d\n",
4690 __FUNCTION__, bytes));
4691 ASSERT(0);
4692 break;
4693 }
4694 }
4695 } /* End PIO processing */
4696
4697 /* Wait for Transfer Complete or Transfer Error */
4698 int_bit = SFIELD(0, INTSTAT_XFER_COMPLETE, 1);
4699
4700 /* If not on, wait for it (or for xfer error) */
4701 int_reg = sdstd_rreg16(sd, SD_IntrStatus);
4702 #ifdef BCMQT
4703 retries = RETRIES_LARGE;
4704 while (!(int_reg & int_bit) && --retries) {
4705 OSL_SLEEP(SDSTD_WAIT_TIME_MS);
4706 int_reg = sdstd_rreg16(sd, SD_IntrStatus);
4707 }
4708 #endif // endif
4709 if (!(int_reg & int_bit)) {
4710 status = sdstd_waitbits(sd, int_bit, ERRINT_TRANSFER_ERRS, local_yield, &int_reg);
4711 switch (status) {
4712 case -1:
4713 sd_err(("%s: interrupted\n", __FUNCTION__));
4714 return ERROR;
4715 case -2:
4716 sd_err(("%s: timeout waiting for interrupt\n", __FUNCTION__));
4717 return ERROR;
4718 }
4719 }
4720
4721 /* Check for any errors from the data phase */
4722 if (sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg))
4723 return ERROR;
4724
4725 /* May have gotten a software timeout if not blocking? */
4726 int_reg = sdstd_rreg16(sd, SD_IntrStatus);
4727 if (!(int_reg & int_bit)) {
4728 sd_err(("%s: Error or Timeout for xfer complete; %s, dma %d, State 0x%08x, "
4729 "intr 0x%04x, Err 0x%04x, len = %d, rcnt %d, tcnt %d\n",
4730 __FUNCTION__, read ? "R" : "W", local_dma,
4731 sdstd_rreg(sd, SD_PresentState), int_reg,
4732 sdstd_rreg16(sd, SD_ErrorIntrStatus), nbytes,
4733 sd->r_cnt, sd->t_cnt));
4734 sdstd_dumpregs(sd);
4735 return ERROR;
4736 }
4737
4738 /* Clear the status bits */
4739 int_reg = int_bit;
4740 if (local_dma) {
4741 /* DMA Complete */
4742 /* Reads in particular don't have DMA_COMPLETE set */
4743 int_reg = SFIELD(int_reg, INTSTAT_DMA_INT, 1);
4744 }
4745 sdstd_wreg16(sd, SD_IntrStatus, int_reg);
4746
4747 /* Fetch data */
4748 if (local_dma && read) {
4749 sd_sync_dma(sd, read, nbytes);
4750 bcopy(sd->dma_buf, data, nbytes);
4751 }
4752 #ifdef BCMSDIOH_TXGLOM
4753 if (localbuf)
4754 MFREE(sd->osh, localbuf, nbytes);
4755 #endif // endif
4756 return SUCCESS;
4757 }
4758
4759 static int
set_client_block_size(sdioh_info_t * sd,int func,int block_size)4760 set_client_block_size(sdioh_info_t *sd, int func, int block_size)
4761 {
4762 int base;
4763 int err = 0;
4764
4765 if (func == 1)
4766 block_size = MIN(sd_f1_blocksize, block_size);
4767
4768 sd_err(("%s: Setting block size %d, func %d\n", __FUNCTION__, block_size, func));
4769 sd->client_block_size[func] = block_size;
4770
4771 /* Set the block size in the SDIO Card register */
4772 base = func * SDIOD_FBR_SIZE;
4773 err = sdstd_card_regwrite(sd, 0, base+SDIOD_CCCR_BLKSIZE_0, 1, block_size & 0xff);
4774 if (!err) {
4775 err = sdstd_card_regwrite(sd, 0, base+SDIOD_CCCR_BLKSIZE_1, 1,
4776 (block_size >> 8) & 0xff);
4777 }
4778
4779 /* Do not set the block size in the SDIO Host register, that
4780 * is func dependent and will get done on an individual
4781 * transaction basis
4782 */
4783
4784 return (err ? BCME_SDIO_ERROR : 0);
4785 }
4786
4787 /* Reset and re-initialize the device */
4788 int
sdioh_sdio_reset(sdioh_info_t * si)4789 sdioh_sdio_reset(sdioh_info_t *si)
4790 {
4791 uint8 hreg;
4792
4793 /* Reset the attached device (use slower clock for safety) */
4794 if (!sdstd_start_clock(si, 128)) {
4795 sd_err(("set clock failed!\n"));
4796 return ERROR;
4797 }
4798 sdstd_reset(si, 0, 1);
4799
4800 /* Reset portions of the host state accordingly */
4801 hreg = sdstd_rreg8(si, SD_HostCntrl);
4802 hreg = SFIELD(hreg, HOST_HI_SPEED_EN, 0);
4803 hreg = SFIELD(hreg, HOST_DATA_WIDTH, 0);
4804 si->sd_mode = SDIOH_MODE_SD1;
4805
4806 /* Reinitialize the card */
4807 si->card_init_done = FALSE;
4808 return sdstd_client_init(si);
4809 }
4810
4811 static void
sd_map_dma(sdioh_info_t * sd)4812 sd_map_dma(sdioh_info_t * sd)
4813 {
4814
4815 int alloced;
4816 void *va;
4817 uint dma_buf_size = SD_PAGE;
4818
4819 #ifdef BCMSDIOH_TXGLOM
4820 /* There is no alignment requirement for HC3 */
4821 if ((sd->version == HOST_CONTR_VER_3) && sd_txglom) {
4822 /* Max glom packet length is 64KB */
4823 dma_buf_size = SD_PAGE * 16;
4824 }
4825 #endif // endif
4826
4827 alloced = 0;
4828 if ((va = DMA_ALLOC_CONSISTENT(sd->osh, dma_buf_size, SD_PAGE_BITS, &alloced,
4829 &sd->dma_start_phys, 0x12)) == NULL) {
4830 sd->sd_dma_mode = DMA_MODE_NONE;
4831 sd->dma_start_buf = 0;
4832 sd->dma_buf = (void *)0;
4833 sd->dma_phys = 0;
4834 sd->alloced_dma_size = 0;
4835 sd_err(("%s: DMA_ALLOC failed. Disabling DMA support.\n", __FUNCTION__));
4836 } else {
4837 sd->dma_start_buf = va;
4838 sd->dma_buf = (void *)ROUNDUP((uintptr)va, SD_PAGE);
4839 sd->dma_phys = ROUNDUP((sd->dma_start_phys), SD_PAGE);
4840 sd->alloced_dma_size = alloced;
4841 sd_err(("%s: Mapped DMA Buffer %dbytes @virt/phys: %p/0x%lx\n",
4842 __FUNCTION__, sd->alloced_dma_size, sd->dma_buf, sd->dma_phys));
4843 sd_fill_dma_data_buf(sd, 0xA5);
4844 }
4845
4846 if ((va = DMA_ALLOC_CONSISTENT(sd->osh, SD_PAGE, SD_PAGE_BITS, &alloced,
4847 &sd->adma2_dscr_start_phys, 0x12)) == NULL) {
4848 sd->sd_dma_mode = DMA_MODE_NONE;
4849 sd->adma2_dscr_start_buf = 0;
4850 sd->adma2_dscr_buf = (void *)0;
4851 sd->adma2_dscr_phys = 0;
4852 sd->alloced_adma2_dscr_size = 0;
4853 sd_err(("%s: DMA_ALLOC failed for descriptor buffer. "
4854 "Disabling DMA support.\n", __FUNCTION__));
4855 } else {
4856 sd->adma2_dscr_start_buf = va;
4857 sd->adma2_dscr_buf = (void *)ROUNDUP((uintptr)va, SD_PAGE);
4858 sd->adma2_dscr_phys = ROUNDUP((sd->adma2_dscr_start_phys), SD_PAGE);
4859 sd->alloced_adma2_dscr_size = alloced;
4860 sd_err(("%s: Mapped ADMA2 Descriptor Buffer %dbytes @virt/phys: %p/0x%lx\n",
4861 __FUNCTION__, sd->alloced_adma2_dscr_size, sd->adma2_dscr_buf,
4862 sd->adma2_dscr_phys));
4863 sd_clear_adma_dscr_buf(sd);
4864 }
4865 }
4866
4867 static void
sd_unmap_dma(sdioh_info_t * sd)4868 sd_unmap_dma(sdioh_info_t * sd)
4869 {
4870 if (sd->dma_start_buf) {
4871 DMA_FREE_CONSISTENT(sd->osh, sd->dma_start_buf, sd->alloced_dma_size,
4872 sd->dma_start_phys, 0x12);
4873 }
4874
4875 if (sd->adma2_dscr_start_buf) {
4876 DMA_FREE_CONSISTENT(sd->osh, sd->adma2_dscr_start_buf, sd->alloced_adma2_dscr_size,
4877 sd->adma2_dscr_start_phys, 0x12);
4878 }
4879 }
4880
4881 static void
sd_clear_adma_dscr_buf(sdioh_info_t * sd)4882 sd_clear_adma_dscr_buf(sdioh_info_t *sd)
4883 {
4884 bzero((char *)sd->adma2_dscr_buf, SD_PAGE);
4885 sd_dump_adma_dscr(sd);
4886 }
4887
4888 static void
sd_fill_dma_data_buf(sdioh_info_t * sd,uint8 data)4889 sd_fill_dma_data_buf(sdioh_info_t *sd, uint8 data)
4890 {
4891 memset((char *)sd->dma_buf, data, SD_PAGE);
4892 }
4893
4894 static void
sd_create_adma_descriptor(sdioh_info_t * sd,uint32 index,uint32 addr_phys,uint16 length,uint16 flags)4895 sd_create_adma_descriptor(sdioh_info_t *sd, uint32 index,
4896 uint32 addr_phys, uint16 length, uint16 flags)
4897 {
4898 adma2_dscr_32b_t *adma2_dscr_table;
4899 adma1_dscr_t *adma1_dscr_table;
4900
4901 adma2_dscr_table = sd->adma2_dscr_buf;
4902 adma1_dscr_table = sd->adma2_dscr_buf;
4903
4904 switch (sd->sd_dma_mode) {
4905 case DMA_MODE_ADMA2:
4906 sd_dma(("%s: creating ADMA2 descriptor for index %d\n",
4907 __FUNCTION__, index));
4908
4909 adma2_dscr_table[index].phys_addr = addr_phys;
4910 adma2_dscr_table[index].len_attr = length << 16;
4911 adma2_dscr_table[index].len_attr |= flags;
4912 break;
4913 case DMA_MODE_ADMA1:
4914 /* ADMA1 requires two descriptors, one for len
4915 * and the other for data transfer
4916 */
4917 index <<= 1;
4918
4919 sd_dma(("%s: creating ADMA1 descriptor for index %d\n",
4920 __FUNCTION__, index));
4921
4922 adma1_dscr_table[index].phys_addr_attr = length << 12;
4923 adma1_dscr_table[index].phys_addr_attr |= (ADMA1_ATTRIBUTE_ACT_SET |
4924 ADMA2_ATTRIBUTE_VALID);
4925 adma1_dscr_table[index+1].phys_addr_attr = addr_phys & 0xFFFFF000;
4926 adma1_dscr_table[index+1].phys_addr_attr |= (flags & 0x3f);
4927 break;
4928 default:
4929 sd_err(("%s: cannot create ADMA descriptor for DMA mode %d\n",
4930 __FUNCTION__, sd->sd_dma_mode));
4931 break;
4932 }
4933 }
4934
4935 static void
sd_dump_adma_dscr(sdioh_info_t * sd)4936 sd_dump_adma_dscr(sdioh_info_t *sd)
4937 {
4938 adma2_dscr_32b_t *adma2_dscr_table;
4939 adma1_dscr_t *adma1_dscr_table;
4940 uint32 i = 0;
4941 uint16 flags;
4942 char flags_str[32];
4943
4944 ASSERT(sd->adma2_dscr_buf != NULL);
4945
4946 adma2_dscr_table = sd->adma2_dscr_buf;
4947 adma1_dscr_table = sd->adma2_dscr_buf;
4948
4949 switch (sd->sd_dma_mode) {
4950 case DMA_MODE_ADMA2:
4951 sd_err(("ADMA2 Descriptor Table (%dbytes) @virt/phys: %p/0x%lx\n",
4952 SD_PAGE, sd->adma2_dscr_buf, sd->adma2_dscr_phys));
4953 sd_err((" #[Descr VA ] Buffer PA | Len | Flags (5:4 2 1 0)"
4954 " |\n"));
4955 while (adma2_dscr_table->len_attr & ADMA2_ATTRIBUTE_VALID) {
4956 flags = adma2_dscr_table->len_attr & 0xFFFF;
4957 sprintf(flags_str, "%s%s%s%s",
4958 ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
4959 ADMA2_ATTRIBUTE_ACT_LINK) ? "LINK " :
4960 ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
4961 ADMA2_ATTRIBUTE_ACT_TRAN) ? "TRAN " :
4962 ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
4963 ADMA2_ATTRIBUTE_ACT_NOP) ? "NOP " : "RSV ",
4964 (flags & ADMA2_ATTRIBUTE_INT ? "INT " : " "),
4965 (flags & ADMA2_ATTRIBUTE_END ? "END " : " "),
4966 (flags & ADMA2_ATTRIBUTE_VALID ? "VALID" : ""));
4967 sd_err(("%2d[0x%p]: 0x%08x | 0x%04x | 0x%04x (%s) |\n",
4968 i, adma2_dscr_table, adma2_dscr_table->phys_addr,
4969 adma2_dscr_table->len_attr >> 16, flags, flags_str));
4970 i++;
4971
4972 /* Follow LINK descriptors or skip to next. */
4973 if ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
4974 ADMA2_ATTRIBUTE_ACT_LINK) {
4975 adma2_dscr_table = phys_to_virt(
4976 adma2_dscr_table->phys_addr);
4977 } else {
4978 adma2_dscr_table++;
4979 }
4980
4981 }
4982 break;
4983 case DMA_MODE_ADMA1:
4984 sd_err(("ADMA1 Descriptor Table (%dbytes) @virt/phys: %p/0x%lx\n",
4985 SD_PAGE, sd->adma2_dscr_buf, sd->adma2_dscr_phys));
4986 sd_err((" #[Descr VA ] Buffer PA | Flags (5:4 2 1 0) |\n"));
4987
4988 for (i = 0; adma1_dscr_table->phys_addr_attr & ADMA2_ATTRIBUTE_VALID; i++) {
4989 flags = adma1_dscr_table->phys_addr_attr & 0x3F;
4990 sprintf(flags_str, "%s%s%s%s",
4991 ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
4992 ADMA2_ATTRIBUTE_ACT_LINK) ? "LINK " :
4993 ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
4994 ADMA2_ATTRIBUTE_ACT_TRAN) ? "TRAN " :
4995 ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
4996 ADMA2_ATTRIBUTE_ACT_NOP) ? "NOP " : "SET ",
4997 (flags & ADMA2_ATTRIBUTE_INT ? "INT " : " "),
4998 (flags & ADMA2_ATTRIBUTE_END ? "END " : " "),
4999 (flags & ADMA2_ATTRIBUTE_VALID ? "VALID" : ""));
5000 sd_err(("%2d[0x%p]: 0x%08x | 0x%04x | (%s) |\n",
5001 i, adma1_dscr_table,
5002 adma1_dscr_table->phys_addr_attr & 0xFFFFF000,
5003 flags, flags_str));
5004
5005 /* Follow LINK descriptors or skip to next. */
5006 if ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
5007 ADMA2_ATTRIBUTE_ACT_LINK) {
5008 adma1_dscr_table = phys_to_virt(
5009 adma1_dscr_table->phys_addr_attr & 0xFFFFF000);
5010 } else {
5011 adma1_dscr_table++;
5012 }
5013 }
5014 break;
5015 default:
5016 sd_err(("Unknown DMA Descriptor Table Format.\n"));
5017 break;
5018 }
5019 }
5020
5021 static void
sdstd_dumpregs(sdioh_info_t * sd)5022 sdstd_dumpregs(sdioh_info_t *sd)
5023 {
5024 sd_err(("IntrStatus: 0x%04x ErrorIntrStatus 0x%04x\n",
5025 sdstd_rreg16(sd, SD_IntrStatus),
5026 sdstd_rreg16(sd, SD_ErrorIntrStatus)));
5027 sd_err(("IntrStatusEnable: 0x%04x ErrorIntrStatusEnable 0x%04x\n",
5028 sdstd_rreg16(sd, SD_IntrStatusEnable),
5029 sdstd_rreg16(sd, SD_ErrorIntrStatusEnable)));
5030 sd_err(("IntrSignalEnable: 0x%04x ErrorIntrSignalEnable 0x%04x\n",
5031 sdstd_rreg16(sd, SD_IntrSignalEnable),
5032 sdstd_rreg16(sd, SD_ErrorIntrSignalEnable)));
5033 }
5034