1 /*
2 * 'Standard' SDIO HOST CONTROLLER driver
3 *
4 * Copyright (C) 2020, Broadcom.
5 *
6 * Unless you and Broadcom execute a separate written software license
7 * agreement governing use of this software, this software is licensed to you
8 * under the terms of the GNU General Public License version 2 (the "GPL"),
9 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10 * following added to such license:
11 *
12 * As a special exception, the copyright holders of this software give you
13 * permission to link this software with independent modules, and to copy and
14 * distribute the resulting executable under terms of your choice, provided that
15 * you also meet, for each linked independent module, the terms and conditions of
16 * the license of that module. An independent module is a module which is not
17 * derived from this software. The special exception does not apply to any
18 * modifications of the software.
19 *
20 *
21 * <<Broadcom-WL-IPTag/Open:>>
22 *
23 * $Id$
24 */
25
26 #include <typedefs.h>
27
28 #include <bcmdevs.h>
29 #include <bcmendian.h>
30 #include <bcmutils.h>
31 #include <osl.h>
32 #include <siutils.h>
33 #include <sdio.h> /* SDIO Device and Protocol Specs */
34 #include <sdioh.h> /* Standard SDIO Host Controller Specification */
35 #include <bcmsdbus.h> /* bcmsdh to/from specific controller APIs */
36 #include <sdiovar.h> /* ioctl/iovars */
37 #include <pcicfg.h>
38 #include <bcmsdstd.h>
39 /* XXX Quick NDIS hack */
40 #ifdef NDIS
41 #define inline __inline
42 #define PCI_CFG_VID 0
43 #define PCI_CFG_BAR0 0x10
44 #endif
45
46 #define SD_PAGE_BITS 12
47 #define SD_PAGE (1 << SD_PAGE_BITS)
48 #define SDSTD_MAX_TUNING_PHASE 5
49
50 /*
51 * Upper GPIO 16 - 31 are available on J22
52 * J22.pin3 == gpio16, J22.pin5 == gpio17, etc.
53 * Lower GPIO 0 - 15 are available on J15 (WL_GPIO)
54 */
55 #define SDH_GPIO16 16
56 #define SDH_GPIO_ENABLE 0xffff
57
58 #include <bcmsdstd.h>
59 #include <sbsdio.h> /* SDIOH (host controller) core hardware definitions */
60
61 /* Globals */
62 uint sd_msglevel = SDH_ERROR_VAL;
63
64 uint sd_hiok = TRUE; /* Use hi-speed mode if available? */
65 uint sd_sdmode = SDIOH_MODE_SD4; /* Use SD4 mode by default */
66 uint sd_f2_blocksize = 64; /* Default blocksize */
67 uint sd_f1_blocksize = BLOCK_SIZE_4318; /* Default blocksize */
68
69 #define sd3_trace(x)
70
71 /* sd3ClkMode: 0-SDR12 [25MHz]
72 * 1-SDR25 [50MHz]+SHS=1
73 * 2-SDR50 [100MHz]+SSDR50=1
74 * 3-SDR104 [208MHz]+SSDR104=1
75 * 4-DDR50 [50MHz]+SDDR50=1
76 */
77 #define SD3CLKMODE_0_SDR12 (0)
78 #define SD3CLKMODE_1_SDR25 (1)
79 #define SD3CLKMODE_2_SDR50 (2)
80 #define SD3CLKMODE_3_SDR104 (3)
81 #define SD3CLKMODE_4_DDR50 (4)
82 #define SD3CLKMODE_DISABLED (-1)
83 #define SD3CLKMODE_AUTO (99)
84
85 /* values for global_UHSI_Supp : Means host and card caps match. */
86 #define HOST_SDR_UNSUPP (0)
87 #define HOST_SDR_12_25 (1)
88 #define HOST_SDR_50_104_DDR (2)
89
90 /* depends-on/affects sd3_autoselect_uhsi_max.
91 * see sd3_autoselect_uhsi_max
92 */
93 int sd_uhsimode = SD3CLKMODE_DISABLED;
94 uint sd_tuning_period = CAP3_RETUNING_TC_OTHER;
95 uint sd_delay_value = 500000;
96 /* Enables host to dongle glomming. Also increases the
97 * dma buffer size. This will increase the rx throughput
98 * as there will be lesser CMD53 transactions
99 */
100 #ifdef BCMSDIOH_TXGLOM
101 uint sd_txglom;
102 #ifdef LINUX
103 module_param(sd_txglom, uint, 0);
104 #endif
105 #endif /* BCMSDIOH_TXGLOM */
106
107 char dhd_sdiod_uhsi_ds_override[2] = {' '};
108
109 #define MAX_DTS_INDEX (3)
110 #define DRVSTRN_MAX_CHAR ('D')
111 #define DRVSTRN_IGNORE_CHAR (' ')
112
113 char DTS_vals[MAX_DTS_INDEX + 1] = {
114 0x1, /* Driver Strength Type-A */
115 0x0, /* Driver Strength Type-B */
116 0x2, /* Driver Strength Type-C */
117 0x3, /* Driver Strength Type-D */
118 };
119
120 /* depends-on/affects sd_uhsimode.
121 select MAX speed automatically based on caps of host and card.
122 If this is 1, sd_uhsimode will be ignored. If the sd_uhsimode is set
123 by the user specifically, this var becomes 0. default value: 0. [XXX:TBD: for future]
124 */
125 uint32 sd3_autoselect_uhsi_max = 0;
126
127 #define MAX_TUNING_ITERS (40)
128 /* (150+10)millisecs total time; so dividing it for per-loop */
129 #define PER_TRY_TUNING_DELAY_MS (160/MAX_TUNING_ITERS)
130 #define CLKTUNING_MAX_BRR_RETRIES (1000) /* 1 ms: 1000 retries with 1 us delay per loop */
131
132 /* table analogous to preset value register.
133 * This is bcos current HC doesn't have preset value reg support.
134 * All has DrvStr as 'B' [val:0] and CLKGEN as 0.
135 */
136 static unsigned short presetval_sw_table[] = {
137 0x0520, /* initialization: DrvStr:'B' [0]; CLKGen:0;
138 * SDCLKFreqSel: 520 [division: 320*2 = 640: ~400 KHz]
139 */
140 0x0008, /* default speed:DrvStr:'B' [0]; CLKGen:0;
141 * SDCLKFreqSel: 8 [division: 6*2 = 12: ~25 MHz]
142 */
143 0x0004, /* High speed: DrvStr:'B' [0]; CLKGen:0;
144 * SDCLKFreqSel: 4 [division: 3*2 = 6: ~50 MHz]
145 */
146 0x0008, /* SDR12: DrvStr:'B' [0]; CLKGen:0;
147 * SDCLKFreqSel: 8 [division: 6*2 = 12: ~25 MHz]
148 */
149 0x0004, /* SDR25: DrvStr:'B' [0]; CLKGen:0;
150 * SDCLKFreqSel: 4 [division: 3*2 = 6: ~50 MHz]
151 */
152 0x0001, /* SDR50: DrvStr:'B' [0]; CLKGen:0;
153 * SDCLKFreqSel: 2 [division: 1*2 = 2: ~100 MHz]
154 */
155 0x0001, /* SDR104: DrvStr:'B' [0]; CLKGen:0;
156 SDCLKFreqSel: 1 [no division: ~255/~208 MHz]
157 */
158 0x0002 /* DDR50: DrvStr:'B' [0]; CLKGen:0;
159 SDCLKFreqSel: 4 [division: 3*2 = 6: ~50 MHz]
160 */
161 };
162
163 /* This is to have software overrides to the hardware. Info follows:
164 For override [1]: Preset registers: not supported
165 Voltage switch: not supported
166 Clock Tuning: not supported
167 */
168 bool sd3_sw_override1 = FALSE;
169 bool sd3_sw_read_magic_bytes = FALSE;
170
171 #define SD3_TUNING_REQD(sd, sd_uhsimode) ((sd_uhsimode != SD3CLKMODE_DISABLED) && \
172 (sd->version == HOST_CONTR_VER_3) && \
173 ((sd_uhsimode == SD3CLKMODE_3_SDR104) || \
174 ((sd_uhsimode == SD3CLKMODE_2_SDR50) && \
175 (GFIELD(sd->caps3, CAP3_TUNING_SDR50)))))
176
177 /* find next power of 2 */
178 #define NEXT_POW2(n) {n--; n |= n>>1; n |= n>>2; n |= n>>4; n++;}
179
180 #ifdef BCMSDYIELD
181 bool sd_yieldcpu = TRUE; /* Allow CPU yielding for buffer requests */
182 uint sd_minyield = 0; /* Minimum xfer size to allow CPU yield */
183 bool sd_forcerb = FALSE; /* Force sync readback in intrs_on/off */
184 #endif
185
186 /* XXX: Issues with CMD14 enter/exit sleep
187 * XXX: Temp fix for special CMD14 handling
188 */
189 #define F1_SLEEPCSR_ADDR 0x1001F
190
191 uint sd_divisor = 2; /* Default 48MHz/2 = 24MHz
192 :might get changed in code for 208
193 */
194
195 uint sd_power = 1; /* Default to SD Slot powered ON */
196 uint sd_3_power_save = 1; /* Default to SDIO 3.0 power save */
197 uint sd_clock = 1; /* Default to SD Clock turned ON */
198 uint sd_pci_slot = 0xFFFFffff; /* Used to force selection of a particular PCI slot */
199 uint8 sd_dma_mode = DMA_MODE_AUTO; /* Default to AUTO & program based on capability */
200
201 /* XXX Base timeout counter value on 48MHz (2^20 @ 48MHz => 21845us)
202 * Could adjust by adding sd_divisor (to maintain bit count) but really
203 * need something more elaborate to do that right. Still allows xfer
204 * of about 1000 bytes at 400KHz, so constant is ok.
205 * Timeout control N produces 2^(13+N) counter.
206 */
207 uint sd_toctl = 7;
208 static bool trap_errs = FALSE;
209
210 static const char *dma_mode_description[] = { "PIO", "SDMA", "ADMA1", "32b ADMA2", "64b ADMA2" };
211
212 /* Prototypes */
213 static bool sdstd_start_clock(sdioh_info_t *sd, uint16 divisor);
214 static uint16 sdstd_start_power(sdioh_info_t *sd, int volts_req);
215 static bool sdstd_bus_width(sdioh_info_t *sd, int width);
216 static int sdstd_set_highspeed_mode(sdioh_info_t *sd, bool HSMode);
217 static int sdstd_set_dma_mode(sdioh_info_t *sd, int8 dma_mode);
218 static int sdstd_card_enablefuncs(sdioh_info_t *sd);
219 static void sdstd_cmd_getrsp(sdioh_info_t *sd, uint32 *rsp_buffer, int count);
220 static int sdstd_cmd_issue(sdioh_info_t *sd, bool use_dma, uint32 cmd, uint32 arg);
221 static int sdstd_card_regread(sdioh_info_t *sd, int func, uint32 regaddr,
222 int regsize, uint32 *data);
223 static int sdstd_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr,
224 int regsize, uint32 data);
225 static int sdstd_driver_init(sdioh_info_t *sd);
226 static bool sdstd_reset(sdioh_info_t *sd, bool host_reset, bool client_reset);
227 static int sdstd_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo,
228 uint32 addr, int nbytes, uint32 *data);
229 static int sdstd_abort(sdioh_info_t *sd, uint func);
230 static int sdstd_check_errs(sdioh_info_t *sdioh_info, uint32 cmd, uint32 arg);
231 static int set_client_block_size(sdioh_info_t *sd, int func, int blocksize);
232 static void sd_map_dma(sdioh_info_t * sd);
233 static void sd_unmap_dma(sdioh_info_t * sd);
234 static void sd_clear_adma_dscr_buf(sdioh_info_t *sd);
235 static void sd_fill_dma_data_buf(sdioh_info_t *sd, uint8 data);
236 static void sd_create_adma_descriptor(sdioh_info_t *sd,
237 uint32 index, uint32 addr_phys,
238 uint16 length, uint16 flags);
239 static void sd_dump_adma_dscr(sdioh_info_t *sd);
240 static void sdstd_dumpregs(sdioh_info_t *sd);
241
242 static int sdstd_3_set_highspeed_uhsi_mode(sdioh_info_t *sd, int sd3ClkMode);
243 static int sdstd_3_sigvoltswitch_proc(sdioh_info_t *sd);
244 static int sdstd_3_get_matching_uhsi_clkmode(sdioh_info_t *sd,
245 int sd3_requested_clkmode);
246 static bool sdstd_3_get_matching_drvstrn(sdioh_info_t *sd,
247 int sd3_requested_clkmode, uint32 *drvstrn, uint16 *presetval);
248 static int sdstd_3_clock_wrapper(sdioh_info_t *sd);
249 static int sdstd_clock_wrapper(sdioh_info_t *sd);
250
251 #ifdef BCMINTERNAL
252 #ifdef NOTUSED
253 static int parse_caps(uint32 caps_reg, char *buf, int len);
254 static int parse_state(uint32 state_reg, char *buf, int len);
255 static void cis_fetch(sdioh_info_t *sd, int func, char *data, int len);
256 #endif /* NOTUSED */
257 #endif /* BCMINTERNAL */
258
259 #ifdef BCMDBG
260 static void print_regs(sdioh_info_t *sd);
261 #endif
262
263 /*
264 * Private register access routines.
265 */
266
267 /* 16 bit PCI regs */
268
269 /* XXX This is a hack to satisfy the -Wmissing-prototypes warning */
270 extern uint16 sdstd_rreg16(sdioh_info_t *sd, uint reg);
271 uint16
sdstd_rreg16(sdioh_info_t * sd,uint reg)272 sdstd_rreg16(sdioh_info_t *sd, uint reg)
273 {
274
275 volatile uint16 data = *(volatile uint16 *)(sd->mem_space + reg);
276 sd_ctrl(("16: R Reg 0x%02x, Data 0x%x\n", reg, data));
277 return data;
278 }
279
280 /* XXX This is a hack to satisfy the -Wmissing-prototypes warning */
281 extern void sdstd_wreg16(sdioh_info_t *sd, uint reg, uint16 data);
282 void
sdstd_wreg16(sdioh_info_t * sd,uint reg,uint16 data)283 sdstd_wreg16(sdioh_info_t *sd, uint reg, uint16 data)
284 {
285 *(volatile uint16 *)(sd->mem_space + reg) = (uint16) data;
286 sd_ctrl(("16: W Reg 0x%02x, Data 0x%x\n", reg, data));
287 }
288
289 static void
sdstd_or_reg16(sdioh_info_t * sd,uint reg,uint16 val)290 sdstd_or_reg16(sdioh_info_t *sd, uint reg, uint16 val)
291 {
292 volatile uint16 data = *(volatile uint16 *)(sd->mem_space + reg);
293 sd_ctrl(("16: OR Reg 0x%02x, Val 0x%x\n", reg, val));
294 data |= val;
295 *(volatile uint16 *)(sd->mem_space + reg) = (uint16)data;
296
297 }
298 static void
sdstd_mod_reg16(sdioh_info_t * sd,uint reg,int16 mask,uint16 val)299 sdstd_mod_reg16(sdioh_info_t *sd, uint reg, int16 mask, uint16 val)
300 {
301
302 volatile uint16 data = *(volatile uint16 *)(sd->mem_space + reg);
303 sd_ctrl(("16: MOD Reg 0x%02x, Mask 0x%x, Val 0x%x\n", reg, mask, val));
304 data &= ~mask;
305 data |= (val & mask);
306 *(volatile uint16 *)(sd->mem_space + reg) = (uint16)data;
307 }
308
309 /* 32 bit PCI regs */
310 static uint32
sdstd_rreg(sdioh_info_t * sd,uint reg)311 sdstd_rreg(sdioh_info_t *sd, uint reg)
312 {
313 volatile uint32 data = *(volatile uint32 *)(sd->mem_space + reg);
314 sd_ctrl(("32: R Reg 0x%02x, Data 0x%x\n", reg, data));
315 return data;
316 }
317 static inline void
sdstd_wreg(sdioh_info_t * sd,uint reg,uint32 data)318 sdstd_wreg(sdioh_info_t *sd, uint reg, uint32 data)
319 {
320 *(volatile uint32 *)(sd->mem_space + reg) = (uint32)data;
321 sd_ctrl(("32: W Reg 0x%02x, Data 0x%x\n", reg, data));
322
323 }
324 #ifdef BCMINTERNAL
325 #ifdef NOTUSED
326 static void
sdstd_or_reg(sdioh_info_t * sd,uint reg,uint32 val)327 sdstd_or_reg(sdioh_info_t *sd, uint reg, uint32 val)
328 {
329 volatile uint32 data = *(volatile uint32 *)(sd->mem_space + reg);
330 data |= val;
331 *(volatile uint32 *)(sd->mem_space + reg) = (volatile uint32)data;
332 }
333 static void
sdstd_mod_reg(sdioh_info_t * sd,uint reg,uint32 mask,uint32 val)334 sdstd_mod_reg(sdioh_info_t *sd, uint reg, uint32 mask, uint32 val)
335 {
336 volatile uint32 data = *(volatile uint32 *)(sd->mem_space + reg);
337 data &= ~mask;
338 data |= (val & mask);
339 *(volatile uint32 *)(sd->mem_space + reg) = (volatile uint32)data;
340 }
341 #endif /* NOTUSED */
342 #endif /* BCMINTERNAL */
343
344 /* 8 bit PCI regs */
345 static inline void
sdstd_wreg8(sdioh_info_t * sd,uint reg,uint8 data)346 sdstd_wreg8(sdioh_info_t *sd, uint reg, uint8 data)
347 {
348 *(volatile uint8 *)(sd->mem_space + reg) = (uint8)data;
349 sd_ctrl(("08: W Reg 0x%02x, Data 0x%x\n", reg, data));
350 }
351 static uint8
sdstd_rreg8(sdioh_info_t * sd,uint reg)352 sdstd_rreg8(sdioh_info_t *sd, uint reg)
353 {
354 volatile uint8 data = *(volatile uint8 *)(sd->mem_space + reg);
355 sd_ctrl(("08: R Reg 0x%02x, Data 0x%x\n", reg, data));
356 return data;
357 }
358
359 /*
360 * Private work routines
361 */
362
363 sdioh_info_t *glob_sd;
364
365 /*
366 * Public entry points & extern's
367 */
368 extern sdioh_info_t *
sdioh_attach(osl_t * osh,void * bar0,uint irq)369 sdioh_attach(osl_t *osh, void *bar0, uint irq)
370 {
371 sdioh_info_t *sd;
372
373 sd_trace(("%s\n", __FUNCTION__));
374 if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) {
375 sd_err(("sdioh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh)));
376 return NULL;
377 }
378 bzero((char *)sd, sizeof(sdioh_info_t));
379 glob_sd = sd;
380 sd->osh = osh;
381 if (sdstd_osinit(sd) != 0) {
382 sd_err(("%s:sdstd_osinit() failed\n", __FUNCTION__));
383 MFREE(sd->osh, sd, sizeof(sdioh_info_t));
384 return NULL;
385 }
386 sd->mem_space = (volatile char *)sdstd_reg_map(osh, (ulong)bar0, SDIOH_REG_WINSZ);
387 sd_init_dma(sd);
388 sd->irq = irq;
389 if (sd->mem_space == NULL) {
390 sd_err(("%s:ioremap() failed\n", __FUNCTION__));
391 sdstd_osfree(sd);
392 MFREE(sd->osh, sd, sizeof(sdioh_info_t));
393 return NULL;
394 }
395 sd_info(("%s:sd->mem_space = %p\n", __FUNCTION__, sd->mem_space));
396 sd->intr_handler = NULL;
397 sd->intr_handler_arg = NULL;
398 sd->intr_handler_valid = FALSE;
399
400 /* Set defaults */
401 sd->sd_blockmode = TRUE;
402 sd->use_client_ints = TRUE;
403 sd->sd_dma_mode = sd_dma_mode;
404
405 /* XXX Haven't figured out how to make bytemode work with dma */
406 if (!sd->sd_blockmode)
407 sd->sd_dma_mode = DMA_MODE_NONE;
408
409 if (sdstd_driver_init(sd) != SUCCESS) {
410 /* If host CPU was reset without resetting SD bus or
411 SD device, the device will still have its RCA but
412 driver no longer knows what it is (since driver has been restarted).
413 go through once to clear the RCA and a gain reassign it.
414 */
415 sd_info(("driver_init failed - Reset RCA and try again\n"));
416 if (sdstd_driver_init(sd) != SUCCESS) {
417 sd_err(("%s:driver_init() failed()\n", __FUNCTION__));
418 if (sd->mem_space) {
419 sdstd_reg_unmap(osh, (ulong)sd->mem_space, SDIOH_REG_WINSZ);
420 sd->mem_space = NULL;
421 }
422 sdstd_osfree(sd);
423 MFREE(sd->osh, sd, sizeof(sdioh_info_t));
424 return (NULL);
425 }
426 }
427
428 /* XXX Needed for NDIS as its OSL checks for correct dma address width
429 * This value is normally set by wlc_attach() which has yet to run
430 */
431 OSL_DMADDRWIDTH(osh, 32);
432
433 /* Always map DMA buffers, so we can switch between DMA modes. */
434 sd_map_dma(sd);
435
436 if (sdstd_register_irq(sd, irq) != SUCCESS) {
437 sd_err(("%s: sdstd_register_irq() failed for irq = %d\n", __FUNCTION__, irq));
438 sdstd_free_irq(sd->irq, sd);
439 if (sd->mem_space) {
440 sdstd_reg_unmap(osh, (ulong)sd->mem_space, SDIOH_REG_WINSZ);
441 sd->mem_space = NULL;
442 }
443
444 sdstd_osfree(sd);
445 MFREE(sd->osh, sd, sizeof(sdioh_info_t));
446 return (NULL);
447 }
448
449 sd_trace(("%s: Done\n", __FUNCTION__));
450 return sd;
451 }
452
453 extern SDIOH_API_RC
sdioh_detach(osl_t * osh,sdioh_info_t * sd)454 sdioh_detach(osl_t *osh, sdioh_info_t *sd)
455 {
456 sd_trace(("%s\n", __FUNCTION__));
457 if (sd) {
458 sd_unmap_dma(sd);
459 sdstd_wreg16(sd, SD_IntrSignalEnable, 0);
460 if (sd->sd3_tuning_reqd == TRUE) {
461 sdstd_3_osclean_tuning(sd);
462 sd->sd3_tuning_reqd = FALSE;
463 }
464 sd->sd3_tuning_disable = FALSE;
465 sd_trace(("%s: freeing irq %d\n", __FUNCTION__, sd->irq));
466 sdstd_free_irq(sd->irq, sd);
467 if (sd->card_init_done)
468 sdstd_reset(sd, 1, 1);
469 if (sd->mem_space) {
470 sdstd_reg_unmap(osh, (ulong)sd->mem_space, SDIOH_REG_WINSZ);
471 sd->mem_space = NULL;
472 }
473
474 sdstd_osfree(sd);
475 MFREE(sd->osh, sd, sizeof(sdioh_info_t));
476 }
477 return SDIOH_API_RC_SUCCESS;
478 }
479
480 /* Configure callback to client when we receive client interrupt */
481 extern SDIOH_API_RC
sdioh_interrupt_register(sdioh_info_t * sd,sdioh_cb_fn_t fn,void * argh)482 sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh)
483 {
484 sd_trace(("%s: Entering\n", __FUNCTION__));
485 sd->intr_handler = fn;
486 sd->intr_handler_arg = argh;
487 sd->intr_handler_valid = TRUE;
488 return SDIOH_API_RC_SUCCESS;
489 }
490
491 extern SDIOH_API_RC
sdioh_interrupt_deregister(sdioh_info_t * sd)492 sdioh_interrupt_deregister(sdioh_info_t *sd)
493 {
494 sd_trace(("%s: Entering\n", __FUNCTION__));
495 sd->intr_handler_valid = FALSE;
496 sd->intr_handler = NULL;
497 sd->intr_handler_arg = NULL;
498 return SDIOH_API_RC_SUCCESS;
499 }
500
501 extern SDIOH_API_RC
sdioh_interrupt_query(sdioh_info_t * sd,bool * onoff)502 sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff)
503 {
504 sd_trace(("%s: Entering\n", __FUNCTION__));
505 *onoff = sd->client_intr_enabled;
506 return SDIOH_API_RC_SUCCESS;
507 }
508
509 #if defined(DHD_DEBUG) || defined(BCMDBG)
510 extern bool
sdioh_interrupt_pending(sdioh_info_t * sd)511 sdioh_interrupt_pending(sdioh_info_t *sd)
512 {
513 uint16 intrstatus;
514 intrstatus = sdstd_rreg16(sd, SD_IntrStatus);
515 return !!(intrstatus & CLIENT_INTR);
516 }
517 #endif
518
519 uint
sdioh_query_iofnum(sdioh_info_t * sd)520 sdioh_query_iofnum(sdioh_info_t *sd)
521 {
522 return sd->num_funcs;
523 }
524
525 /* IOVar table */
526 enum {
527 IOV_MSGLEVEL = 1,
528 IOV_BLOCKMODE,
529 IOV_BLOCKSIZE,
530 IOV_DMA,
531 IOV_USEINTS,
532 IOV_NUMINTS,
533 IOV_NUMLOCALINTS,
534 IOV_HOSTREG,
535 IOV_DEVREG,
536 IOV_DIVISOR,
537 IOV_SDMODE,
538 IOV_HISPEED,
539 IOV_HCIREGS,
540 IOV_POWER,
541 IOV_POWER_SAVE,
542 IOV_YIELDCPU,
543 IOV_MINYIELD,
544 IOV_FORCERB,
545 IOV_CLOCK,
546 IOV_UHSIMOD,
547 IOV_TUNEMOD,
548 IOV_TUNEDIS
549 };
550
551 const bcm_iovar_t sdioh_iovars[] = {
552 {"sd_msglevel", IOV_MSGLEVEL, 0, 0, IOVT_UINT32, 0 },
553 {"sd_blockmode", IOV_BLOCKMODE, 0, 0, IOVT_BOOL, 0 },
554 {"sd_blocksize", IOV_BLOCKSIZE, 0, 0, IOVT_UINT32, 0 }, /* ((fn << 16) | size) */
555 {"sd_dma", IOV_DMA, 0, 0, IOVT_UINT32, 0 },
556 #ifdef BCMSDYIELD
557 {"sd_yieldcpu", IOV_YIELDCPU, 0, 0, IOVT_BOOL, 0 },
558 {"sd_minyield", IOV_MINYIELD, 0, 0, IOVT_UINT32, 0 },
559 {"sd_forcerb", IOV_FORCERB, 0, 0, IOVT_BOOL, 0 },
560 #endif
561 {"sd_ints", IOV_USEINTS, 0, 0, IOVT_BOOL, 0 },
562 {"sd_numints", IOV_NUMINTS, 0, 0, IOVT_UINT32, 0 },
563 {"sd_numlocalints", IOV_NUMLOCALINTS, 0, 0, IOVT_UINT32, 0 },
564 {"sd_hostreg", IOV_HOSTREG, 0, 0, IOVT_BUFFER, sizeof(sdreg_t) },
565 {"sd_devreg", IOV_DEVREG, 0, 0, IOVT_BUFFER, sizeof(sdreg_t) },
566 {"sd_divisor", IOV_DIVISOR, 0, 0, IOVT_UINT32, 0 },
567 {"sd_power", IOV_POWER, 0, 0, IOVT_UINT32, 0 },
568 {"sd_power_save", IOV_POWER_SAVE, 0, 0, IOVT_UINT32, 0 },
569 {"sd_clock", IOV_CLOCK, 0, 0, IOVT_UINT32, 0 },
570 {"sd_mode", IOV_SDMODE, 0, 0, IOVT_UINT32, 100},
571 {"sd_highspeed", IOV_HISPEED, 0, 0, IOVT_UINT32, 0},
572 {"sd_uhsimode", IOV_UHSIMOD, 0, 0, IOVT_UINT32, 0},
573 #ifdef BCMDBG
574 {"sd_hciregs", IOV_HCIREGS, 0, 0, IOVT_BUFFER, 0 },
575 #endif
576 {"tuning_mode", IOV_TUNEMOD, 0, 0, IOVT_UINT32, 0},
577 {"sd3_tuning_disable", IOV_TUNEDIS, 0, 0, IOVT_BOOL, 0},
578
579 {NULL, 0, 0, 0, 0, 0 }
580 };
sdstd_turn_on_clock(sdioh_info_t * sd)581 uint8 sdstd_turn_on_clock(sdioh_info_t *sd)
582 {
583 sdstd_or_reg16(sd, SD_ClockCntrl, 0x4);
584 return 0;
585 }
586
sdstd_turn_off_clock(sdioh_info_t * sd)587 uint8 sdstd_turn_off_clock(sdioh_info_t *sd)
588 {
589 sdstd_wreg16(sd, SD_ClockCntrl, sdstd_rreg16(sd, SD_ClockCntrl) & ~((uint16)0x4));
590 return 0;
591 }
592
593 int
sdioh_iovar_op(sdioh_info_t * si,const char * name,void * params,int plen,void * arg,uint len,bool set)594 sdioh_iovar_op(sdioh_info_t *si, const char *name,
595 void *params, int plen, void *arg, uint len, bool set)
596 {
597 const bcm_iovar_t *vi = NULL;
598 int bcmerror = 0;
599 uint val_size;
600 int32 int_val = 0;
601 bool bool_val;
602 uint32 actionid;
603
604 ASSERT(name);
605
606 /* Get must have return space; Set does not take qualifiers */
607 ASSERT(set || (arg && len));
608 ASSERT(!set || (!params && !plen));
609
610 sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name));
611
612 if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) {
613 bcmerror = BCME_UNSUPPORTED;
614 goto exit;
615 }
616
617 if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0)
618 goto exit;
619
620 /* XXX Copied from dhd, copied from wl; certainly overkill here? */
621 /* Set up params so get and set can share the convenience variables */
622 if (params == NULL) {
623 params = arg;
624 plen = len;
625 }
626
627 if (vi->type == IOVT_VOID)
628 val_size = 0;
629 else if (vi->type == IOVT_BUFFER)
630 val_size = len;
631 else
632 val_size = sizeof(int);
633
634 if (plen >= (int)sizeof(int_val))
635 bcopy(params, &int_val, sizeof(int_val));
636
637 bool_val = (int_val != 0) ? TRUE : FALSE;
638 BCM_REFERENCE(bool_val);
639
640 actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
641 switch (actionid) {
642 case IOV_GVAL(IOV_MSGLEVEL):
643 int_val = (int32)sd_msglevel;
644 bcopy(&int_val, arg, val_size);
645 break;
646
647 case IOV_SVAL(IOV_MSGLEVEL):
648 sd_msglevel = int_val;
649 break;
650
651 case IOV_GVAL(IOV_BLOCKMODE):
652 int_val = (int32)si->sd_blockmode;
653 bcopy(&int_val, arg, val_size);
654 break;
655
656 case IOV_SVAL(IOV_BLOCKMODE):
657 si->sd_blockmode = (bool)int_val;
658 /* Haven't figured out how to make non-block mode with DMA */
659 if (!si->sd_blockmode)
660 si->sd_dma_mode = DMA_MODE_NONE;
661 break;
662
663 #ifdef BCMSDYIELD
664 case IOV_GVAL(IOV_YIELDCPU):
665 int_val = sd_yieldcpu;
666 bcopy(&int_val, arg, val_size);
667 break;
668
669 case IOV_SVAL(IOV_YIELDCPU):
670 sd_yieldcpu = (bool)int_val;
671 break;
672
673 case IOV_GVAL(IOV_MINYIELD):
674 int_val = sd_minyield;
675 bcopy(&int_val, arg, val_size);
676 break;
677
678 case IOV_SVAL(IOV_MINYIELD):
679 sd_minyield = (bool)int_val;
680 break;
681
682 case IOV_GVAL(IOV_FORCERB):
683 int_val = sd_forcerb;
684 bcopy(&int_val, arg, val_size);
685 break;
686
687 case IOV_SVAL(IOV_FORCERB):
688 sd_forcerb = (bool)int_val;
689 break;
690 #endif /* BCMSDYIELD */
691
692 case IOV_GVAL(IOV_BLOCKSIZE):
693 if ((uint32)int_val > si->num_funcs) {
694 bcmerror = BCME_BADARG;
695 break;
696 }
697 int_val = (int32)si->client_block_size[int_val];
698 bcopy(&int_val, arg, val_size);
699 break;
700
701 case IOV_SVAL(IOV_BLOCKSIZE):
702 {
703 uint func = ((uint32)int_val >> 16);
704 uint blksize = (uint16)int_val;
705 uint maxsize;
706
707 if (func > si->num_funcs) {
708 bcmerror = BCME_BADARG;
709 break;
710 }
711
712 /* XXX These hardcoded sizes are a hack, remove after proper CIS parsing. */
713 switch (func) {
714 case 0: maxsize = 32; break;
715 case 1: maxsize = BLOCK_SIZE_4318; break;
716 case 2: maxsize = BLOCK_SIZE_4328; break;
717 default: maxsize = 0;
718 }
719 if (blksize > maxsize) {
720 bcmerror = BCME_BADARG;
721 break;
722 }
723 if (!blksize) {
724 blksize = maxsize;
725 }
726
727 /* Now set it */
728 sdstd_lock(si);
729 bcmerror = set_client_block_size(si, func, blksize);
730 sdstd_unlock(si);
731 break;
732 }
733
734 case IOV_GVAL(IOV_DMA):
735 int_val = (int32)si->sd_dma_mode;
736 bcopy(&int_val, arg, val_size);
737 break;
738
739 case IOV_SVAL(IOV_DMA):
740 si->sd_dma_mode = (char)int_val;
741 sdstd_set_dma_mode(si, si->sd_dma_mode);
742 break;
743
744 case IOV_GVAL(IOV_USEINTS):
745 int_val = (int32)si->use_client_ints;
746 bcopy(&int_val, arg, val_size);
747 break;
748
749 case IOV_SVAL(IOV_USEINTS):
750 si->use_client_ints = (bool)int_val;
751 if (si->use_client_ints)
752 si->intmask |= CLIENT_INTR;
753 else
754 si->intmask &= ~CLIENT_INTR;
755 break;
756
757 case IOV_GVAL(IOV_DIVISOR):
758 int_val = (uint32)sd_divisor;
759 bcopy(&int_val, arg, val_size);
760 break;
761
762 case IOV_SVAL(IOV_DIVISOR):
763 sd_divisor = int_val;
764 if (!sdstd_start_clock(si, (uint16)sd_divisor)) {
765 sd_err(("set clock failed!\n"));
766 bcmerror = BCME_ERROR;
767 }
768 break;
769
770 case IOV_GVAL(IOV_POWER):
771 int_val = (uint32)sd_power;
772 bcopy(&int_val, arg, val_size);
773 break;
774
775 case IOV_GVAL(IOV_POWER_SAVE):
776 int_val = (uint32)sd_3_power_save;
777 bcopy(&int_val, arg, val_size);
778 break;
779
780 case IOV_SVAL(IOV_POWER):
781 sd_power = int_val;
782 if (sd_power == 1) {
783 if (sdstd_driver_init(si) != SUCCESS) {
784 sd_err(("set SD Slot power failed!\n"));
785 bcmerror = BCME_ERROR;
786 } else {
787 sd_err(("SD Slot Powered ON.\n"));
788 }
789 } else {
790 uint8 pwr = 0;
791
792 pwr = SFIELD(pwr, PWR_BUS_EN, 0);
793 sdstd_wreg8(si, SD_PwrCntrl, pwr); /* Set Voltage level */
794 sd_err(("SD Slot Powered OFF.\n"));
795 }
796 break;
797
798 case IOV_SVAL(IOV_POWER_SAVE):
799 sd_3_power_save = int_val;
800 break;
801
802 case IOV_GVAL(IOV_CLOCK):
803 int_val = (uint32)sd_clock;
804 bcopy(&int_val, arg, val_size);
805 break;
806
807 case IOV_SVAL(IOV_CLOCK):
808 sd_clock = int_val;
809 if (sd_clock == 1) {
810 sd_info(("SD Clock turned ON.\n"));
811 if (!sdstd_start_clock(si, (uint16)sd_divisor)) {
812 sd_err(("sdstd_start_clock failed\n"));
813 bcmerror = BCME_ERROR;
814 }
815 } else {
816 /* turn off HC clock */
817 sdstd_wreg16(si, SD_ClockCntrl,
818 sdstd_rreg16(si, SD_ClockCntrl) & ~((uint16)0x4));
819
820 sd_info(("SD Clock turned OFF.\n"));
821 }
822 break;
823
824 case IOV_GVAL(IOV_SDMODE):
825 int_val = (uint32)sd_sdmode;
826 bcopy(&int_val, arg, val_size);
827 break;
828
829 case IOV_SVAL(IOV_SDMODE):
830 sd_sdmode = int_val;
831
832 if (!sdstd_bus_width(si, sd_sdmode)) {
833 sd_err(("sdstd_bus_width failed\n"));
834 bcmerror = BCME_ERROR;
835 }
836 break;
837
838 case IOV_GVAL(IOV_HISPEED):
839 int_val = (uint32)sd_hiok;
840 bcopy(&int_val, arg, val_size);
841 break;
842
843 case IOV_SVAL(IOV_HISPEED):
844 sd_hiok = int_val;
845 bcmerror = sdstd_set_highspeed_mode(si, (bool)sd_hiok);
846 break;
847
848 case IOV_GVAL(IOV_UHSIMOD):
849 sd3_trace(("%s: Get UHSI: \n", __FUNCTION__));
850 int_val = (int)sd_uhsimode;
851 bcopy(&int_val, arg, val_size);
852 break;
853
854 case IOV_SVAL(IOV_UHSIMOD):
855 {
856 int oldval = sd_uhsimode; /* save old, working value */
857 sd3_trace(("%s: Set UHSI: \n", __FUNCTION__));
858 /* check if UHSI is supported by card/host */
859 if (!(si->card_UHSI_voltage_Supported && si->host_UHSISupported)) {
860 sd_err(("%s:UHSI not suppoted!\n", __FUNCTION__));
861 bcmerror = BCME_UNSUPPORTED;
862 break;
863 }
864 /* check for valid values */
865 if (!((int_val == SD3CLKMODE_AUTO) ||
866 (int_val == SD3CLKMODE_DISABLED) ||
867 ((int_val >= SD3CLKMODE_0_SDR12) &&
868 (int_val <= SD3CLKMODE_4_DDR50)))) {
869 sd_err(("%s:CLK: bad arg!\n", __FUNCTION__));
870 bcmerror = BCME_BADARG;
871 break;
872 }
873
874 sd_uhsimode = int_val;
875 if (SUCCESS != sdstd_3_clock_wrapper(si)) {
876 sd_err(("%s:Error in setting uhsi clkmode:%d,"
877 "restoring back to %d\n", __FUNCTION__,
878 sd_uhsimode, oldval));
879 /* try to set back the old one */
880 sd_uhsimode = oldval;
881 if (SUCCESS != sdstd_3_clock_wrapper(si)) {
882 sd_err(("%s:Error in setting uhsi to old mode;"
883 "ignoring:\n", __FUNCTION__));
884 }
885 }
886 break;
887 }
888 #ifdef DHD_DEBUG
889 case IOV_SVAL(IOV_TUNEMOD):
890 {
891
892 if( int_val == SD_DHD_DISABLE_PERIODIC_TUNING) { /* do tuning single time */
893 sd3_trace(("Start tuning from Iovar\n"));
894 si->sd3_tuning_reqd = TRUE;
895 sdstd_enable_disable_periodic_timer(si, int_val);
896 sdstd_lock(si);
897 sdstd_3_clk_tuning(si, sdstd_3_get_uhsi_clkmode(si));
898 sdstd_unlock(si);
899 si->sd3_tuning_reqd = FALSE;
900 }
901 if (int_val == SD_DHD_ENABLE_PERIODIC_TUNING) {
902 sd3_trace(("Enabling automatic tuning\n"));
903 si->sd3_tuning_reqd = TRUE;
904 sdstd_enable_disable_periodic_timer(si, int_val);
905 }
906 break;
907 }
908 #endif /* debugging purpose */
909 case IOV_GVAL(IOV_NUMINTS):
910 int_val = (int32)si->intrcount;
911 bcopy(&int_val, arg, val_size);
912 break;
913
914 case IOV_GVAL(IOV_NUMLOCALINTS):
915 int_val = (int32)si->local_intrcount;
916 bcopy(&int_val, arg, val_size);
917 break;
918
919 case IOV_GVAL(IOV_HOSTREG):
920 {
921 /* XXX Should copy for alignment reasons */
922 sdreg_t *sd_ptr = (sdreg_t *)params;
923
924 if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD3_WL_BT_reset_register) {
925 sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset));
926 bcmerror = BCME_BADARG;
927 break;
928 }
929
930 sd_trace(("%s: rreg%d at offset %d\n", __FUNCTION__,
931 (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32),
932 sd_ptr->offset));
933 if (sd_ptr->offset & 1)
934 int_val = sdstd_rreg8(si, sd_ptr->offset);
935 else if (sd_ptr->offset & 2)
936 int_val = sdstd_rreg16(si, sd_ptr->offset);
937 else
938 int_val = sdstd_rreg(si, sd_ptr->offset);
939
940 bcopy(&int_val, arg, sizeof(int_val));
941 break;
942 }
943
944 case IOV_SVAL(IOV_HOSTREG):
945 {
946 /* XXX Should copy for alignment reasons */
947 sdreg_t *sd_ptr = (sdreg_t *)params;
948
949 if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD3_WL_BT_reset_register) {
950 sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset));
951 bcmerror = BCME_BADARG;
952 break;
953 }
954
955 sd_trace(("%s: wreg%d value 0x%08x at offset %d\n", __FUNCTION__, sd_ptr->value,
956 (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32),
957 sd_ptr->offset));
958 if (sd_ptr->offset & 1)
959 sdstd_wreg8(si, sd_ptr->offset, (uint8)sd_ptr->value);
960 else if (sd_ptr->offset & 2)
961 sdstd_wreg16(si, sd_ptr->offset, (uint16)sd_ptr->value);
962 else
963 sdstd_wreg(si, sd_ptr->offset, (uint32)sd_ptr->value);
964
965 break;
966 }
967
968 case IOV_GVAL(IOV_DEVREG):
969 {
970 /* XXX Should copy for alignment reasons */
971 sdreg_t *sd_ptr = (sdreg_t *)params;
972 uint8 data;
973
974 if (sdioh_cfg_read(si, sd_ptr->func, sd_ptr->offset, &data)) {
975 bcmerror = BCME_SDIO_ERROR;
976 break;
977 }
978
979 int_val = (int)data;
980 bcopy(&int_val, arg, sizeof(int_val));
981 break;
982 }
983
984 case IOV_SVAL(IOV_DEVREG):
985 {
986 /* XXX Should copy for alignment reasons */
987 sdreg_t *sd_ptr = (sdreg_t *)params;
988 uint8 data = (uint8)sd_ptr->value;
989
990 if (sdioh_cfg_write(si, sd_ptr->func, sd_ptr->offset, &data)) {
991 bcmerror = BCME_SDIO_ERROR;
992 break;
993 }
994 break;
995 }
996
997 #ifdef BCMDBG
998 case IOV_GVAL(IOV_HCIREGS):
999 {
1000 struct bcmstrbuf b;
1001 bcm_binit(&b, arg, len);
1002
1003 sdstd_lock(si);
1004 bcm_bprintf(&b, "IntrStatus: 0x%04x ErrorIntrStatus 0x%04x\n",
1005 sdstd_rreg16(si, SD_IntrStatus),
1006 sdstd_rreg16(si, SD_ErrorIntrStatus));
1007 bcm_bprintf(&b, "IntrStatusEnable: 0x%04x ErrorIntrStatusEnable 0x%04x\n",
1008 sdstd_rreg16(si, SD_IntrStatusEnable),
1009 sdstd_rreg16(si, SD_ErrorIntrStatusEnable));
1010 bcm_bprintf(&b, "IntrSignalEnable: 0x%04x ErrorIntrSignalEnable 0x%04x\n",
1011 sdstd_rreg16(si, SD_IntrSignalEnable),
1012 sdstd_rreg16(si, SD_ErrorIntrSignalEnable));
1013 print_regs(si);
1014
1015 sdstd_unlock(si);
1016
1017 if (!b.size)
1018 bcmerror = BCME_BUFTOOSHORT;
1019 break;
1020 }
1021 #endif /* BCMDBG */
1022
1023 case IOV_SVAL(IOV_TUNEDIS):
1024 si->sd3_tuning_disable = (bool)int_val;
1025 break;
1026
1027 default:
1028 bcmerror = BCME_UNSUPPORTED;
1029 break;
1030 }
1031 exit:
1032
1033 /* XXX Remove protective lock after clients all clean... */
1034 return bcmerror;
1035 }
1036
1037 extern SDIOH_API_RC
sdioh_cfg_read(sdioh_info_t * sd,uint fnc_num,uint32 addr,uint8 * data)1038 sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
1039 {
1040 SDIOH_API_RC status;
1041 /* No lock needed since sdioh_request_byte does locking */
1042 status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data);
1043 return status;
1044 }
1045
1046 extern SDIOH_API_RC
sdioh_cfg_write(sdioh_info_t * sd,uint fnc_num,uint32 addr,uint8 * data)1047 sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
1048 {
1049 /* No lock needed since sdioh_request_byte does locking */
1050 SDIOH_API_RC status;
1051 status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data);
1052 return status;
1053 }
1054
1055 extern SDIOH_API_RC
sdioh_cis_read(sdioh_info_t * sd,uint func,uint8 * cisd,uint32 length)1056 sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length)
1057 {
1058 uint32 count;
1059 int offset;
1060 uint32 foo;
1061 uint8 *cis = cisd;
1062
1063 sd_trace(("%s: Func = %d\n", __FUNCTION__, func));
1064
1065 if (!sd->func_cis_ptr[func]) {
1066 bzero(cis, length);
1067 return SDIOH_API_RC_FAIL;
1068 }
1069
1070 sdstd_lock(sd);
1071 *cis = 0;
1072 for (count = 0; count < length; count++) {
1073 offset = sd->func_cis_ptr[func] + count;
1074 if (sdstd_card_regread(sd, 0, offset, 1, &foo)) {
1075 sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__));
1076 sdstd_unlock(sd);
1077 return SDIOH_API_RC_FAIL;
1078 }
1079 *cis = (uint8)(foo & 0xff);
1080 cis++;
1081 }
1082 sdstd_unlock(sd);
1083 return SDIOH_API_RC_SUCCESS;
1084 }
1085
1086 extern SDIOH_API_RC
sdioh_request_byte(sdioh_info_t * sd,uint rw,uint func,uint regaddr,uint8 * byte)1087 sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte)
1088 {
1089 int status = SDIOH_API_RC_SUCCESS;
1090 uint32 cmd_arg;
1091 uint32 rsp5;
1092
1093 sdstd_lock(sd);
1094 if (rw == SDIOH_READ)
1095 sdstd_3_check_and_do_tuning(sd, CHECK_TUNING_PRE_DATA);
1096
1097 /* Change to DATA_TRANSFER_ONGOING , protection against tuning tasklet */
1098 sdstd_3_set_data_state(sd, DATA_TRANSFER_ONGOING);
1099
1100 #ifdef BCMDBG
1101 if (sdstd_rreg16 (sd, SD_ErrorIntrStatus) != 0) {
1102 sd_err(("%s: Entering: ErririntrStatus 0x%x, intstat = 0x%x\n",
1103 __FUNCTION__, sdstd_rreg16(sd, SD_ErrorIntrStatus),
1104 sdstd_rreg16(sd, SD_IntrStatus)));
1105 }
1106 #endif
1107 cmd_arg = 0;
1108 cmd_arg = SFIELD(cmd_arg, CMD52_FUNCTION, func);
1109 cmd_arg = SFIELD(cmd_arg, CMD52_REG_ADDR, regaddr);
1110 cmd_arg = SFIELD(cmd_arg, CMD52_RW_FLAG, rw == SDIOH_READ ? 0 : 1);
1111 cmd_arg = SFIELD(cmd_arg, CMD52_RAW, 0);
1112 cmd_arg = SFIELD(cmd_arg, CMD52_DATA, rw == SDIOH_READ ? 0 : *byte);
1113
1114 if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_52, cmd_arg)) != SUCCESS) {
1115 /* Change to DATA_TRANSFER_IDLE */
1116 sdstd_3_set_data_state(sd, DATA_TRANSFER_IDLE);
1117 sdstd_unlock(sd);
1118 return status;
1119 }
1120
1121 sdstd_cmd_getrsp(sd, &rsp5, 1);
1122 if (sdstd_rreg16 (sd, SD_ErrorIntrStatus) != 0) {
1123 sd_err(("%s: 1: ErrorintrStatus 0x%x\n",
1124 __FUNCTION__, sdstd_rreg16(sd, SD_ErrorIntrStatus)));
1125 status = SDIOH_API_RC_FAIL;
1126 }
1127 if (GFIELD(rsp5, RSP5_FLAGS) != 0x10) {
1128 /* PR 101351: sdiod_aos sleep followed by immediate wakeup
1129 * before sdiod_aos takes over has a problem.
1130 * While exiting sleep with CMD14, device returning 0x00
1131 * Don't flag as error for now for 0x1001f.
1132 */
1133 if (GFIELD(cmd_arg, CMD52_REG_ADDR) != F1_SLEEPCSR_ADDR) {
1134 sd_err(("%s: rsp5 flags is 0x%x\t %d \n",
1135 __FUNCTION__, GFIELD(rsp5, RSP5_FLAGS), func));
1136 }
1137 status = SDIOH_API_RC_FAIL;
1138 }
1139
1140 if (GFIELD(rsp5, RSP5_STUFF)) {
1141 sd_err(("%s: rsp5 stuff is 0x%x: should be 0\n",
1142 __FUNCTION__, GFIELD(rsp5, RSP5_STUFF)));
1143 status = SDIOH_API_RC_FAIL;
1144 }
1145
1146 if (rw == SDIOH_READ)
1147 *byte = GFIELD(rsp5, RSP5_DATA);
1148
1149 /* Change to DATA_TRANSFER_IDLE */
1150 sdstd_3_set_data_state(sd, DATA_TRANSFER_IDLE);
1151
1152 /* check if we have to do tuning; if so, start */
1153 sdstd_3_check_and_do_tuning(sd, CHECK_TUNING_POST_DATA);
1154
1155 sdstd_unlock(sd);
1156 return status;
1157 }
1158
1159 extern SDIOH_API_RC
sdioh_request_word(sdioh_info_t * sd,uint cmd_type,uint rw,uint func,uint addr,uint32 * word,uint nbytes)1160 sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr,
1161 uint32 *word, uint nbytes)
1162 {
1163 int status;
1164
1165 sdstd_lock(sd);
1166
1167 sdstd_3_check_and_do_tuning(sd, CHECK_TUNING_PRE_DATA);
1168
1169 /* Change to DATA_TRANSFER_ONGOING , protection against tuning tasklet */
1170 sdstd_3_set_data_state(sd, DATA_TRANSFER_ONGOING);
1171
1172 if (rw == SDIOH_READ) {
1173 status = sdstd_card_regread(sd, func, addr, nbytes, word);
1174 } else {
1175 status = sdstd_card_regwrite(sd, func, addr, nbytes, *word);
1176 }
1177
1178 /* Change to DATA_TRANSFER_IDLE */
1179 sdstd_3_set_data_state(sd, DATA_TRANSFER_IDLE);
1180
1181 /* check if we have to do tuning; if so, start */
1182 sdstd_3_check_and_do_tuning(sd, CHECK_TUNING_POST_DATA);
1183
1184 sdstd_unlock(sd);
1185 return (status == SUCCESS ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
1186 }
1187
1188 #ifdef BCMSDIOH_TXGLOM
1189 void
sdioh_glom_post(sdioh_info_t * sd,uint8 * frame,void * pkt,uint len)1190 sdioh_glom_post(sdioh_info_t *sd, uint8 *frame, void *pkt, uint len)
1191 {
1192 BCM_REFERENCE(pkt);
1193 sd->glom_info.dma_buf_arr[sd->glom_info.count] = frame;
1194 sd->glom_info.nbytes[sd->glom_info.count] = len;
1195 /* Convert the frame addr to phy addr for DMA in case of host controller version3 */
1196 if (sd->txglom_mode == SDPCM_TXGLOM_MDESC) {
1197 sd->glom_info.dma_phys_arr[sd->glom_info.count] = DMA_MAP(sd->osh,
1198 frame,
1199 len,
1200 DMA_TX, 0, 0);
1201 }
1202 sd->glom_info.count++;
1203 }
1204
1205 void
sdioh_glom_clear(sdioh_info_t * sd)1206 sdioh_glom_clear(sdioh_info_t *sd)
1207 {
1208 int i;
1209 /* DMA_MAP is done per frame only if host controller version is 3 */
1210 if (sd->txglom_mode == SDPCM_TXGLOM_MDESC) {
1211 for (i = 0; i < sd->glom_info.count; i++) {
1212 DMA_UNMAP(sd->osh,
1213 sd->glom_info.dma_phys_arr[i],
1214 sd->glom_info.nbytes[i],
1215 DMA_TX, 0, 0);
1216 }
1217 }
1218 sd->glom_info.count = 0;
1219 }
1220
1221 uint
sdioh_set_mode(sdioh_info_t * sd,uint mode)1222 sdioh_set_mode(sdioh_info_t *sd, uint mode)
1223 {
1224 if (mode == SDPCM_TXGLOM_CPY)
1225 sd->txglom_mode = mode;
1226 else if ((mode == SDPCM_TXGLOM_MDESC) && (sd->version == HOST_CONTR_VER_3))
1227 sd->txglom_mode = mode;
1228
1229 return (sd->txglom_mode);
1230 }
1231
1232 bool
sdioh_glom_enabled(void)1233 sdioh_glom_enabled(void)
1234 {
1235 return sd_txglom;
1236 }
1237 #endif /* BCMSDIOH_TXGLOM */
1238
1239 extern SDIOH_API_RC
sdioh_request_buffer(sdioh_info_t * sd,uint pio_dma,uint fix_inc,uint rw,uint func,uint addr,uint reg_width,uint buflen_u,uint8 * buffer,void * pkt)1240 sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint rw, uint func,
1241 uint addr, uint reg_width, uint buflen_u, uint8 *buffer, void *pkt)
1242 {
1243 uint8 is_ddr50 = FALSE;
1244 int len;
1245 int buflen = (int)buflen_u;
1246 bool fifo = (fix_inc == SDIOH_DATA_FIX);
1247 uint8 *localbuf = NULL, *tmpbuf = NULL;
1248 bool local_blockmode = sd->sd_blockmode;
1249 SDIOH_API_RC status = SDIOH_API_RC_SUCCESS;
1250
1251 sdstd_lock(sd);
1252
1253 is_ddr50 = (sd_uhsimode == SD3CLKMODE_4_DDR50) ? TRUE : FALSE;
1254
1255 sdstd_3_check_and_do_tuning(sd, CHECK_TUNING_PRE_DATA);
1256
1257 /* Change to DATA_TRANSFER_ONGOING , protection against tuning tasklet */
1258 sdstd_3_set_data_state(sd, DATA_TRANSFER_ONGOING);
1259
1260 ASSERT(reg_width == 4);
1261 ASSERT(buflen_u < (1 << 30));
1262 ASSERT(sd->client_block_size[func]);
1263
1264 #ifdef BCMSDIOH_TXGLOM
1265 if (sd_txglom) {
1266 while (pkt) {
1267 sdioh_glom_post(sd, PKTDATA(sd->osh, pkt), pkt, PKTLEN(sd->osh, pkt));
1268 pkt = PKTNEXT(sd->osh, pkt);
1269 }
1270 }
1271 #endif
1272 sd_data(("%s: %c len %d r_cnt %d t_cnt %d, pkt @0x%p\n",
1273 __FUNCTION__, rw == SDIOH_READ ? 'R' : 'W',
1274 buflen_u, sd->r_cnt, sd->t_cnt, pkt));
1275
1276 /* Break buffer down into blocksize chunks:
1277 * Bytemode: 1 block at a time.
1278 * Blockmode: Multiples of blocksizes at a time w/ max of SD_PAGE.
1279 * Both: leftovers are handled last (will be sent via bytemode).
1280 */
1281 while (buflen > 0) {
1282 if (local_blockmode) {
1283 int max_tran_size = SD_PAGE;
1284 #ifdef BCMSDIOH_TXGLOM
1285 /* There is no alignment requirement for HC3 */
1286 if ((sd->version == HOST_CONTR_VER_3) && sd_txglom)
1287 max_tran_size = SD_PAGE * 4;
1288 #endif
1289 /* Max xfer is Page size */
1290 len = MIN(max_tran_size, buflen);
1291
1292 /* Round down to a block boundry */
1293 if (buflen > sd->client_block_size[func])
1294 len = (len/sd->client_block_size[func]) *
1295 sd->client_block_size[func];
1296 /* XXX Arasan trashes 3-byte transfers, WAR to add one byte extra. */
1297 /* XXX In Case of SDIO3.0 DDR50 mode if no of bytes to be
1298 * transferred is odd append one more byte to make it even.
1299 * Check If odd bytes can come for SDIO_FUNC_2 also.
1300 */
1301 if ((func == SDIO_FUNC_1) && (((len % 4) == 3) || (((len % 2) == 1) &&
1302 (is_ddr50))) && ((rw == SDIOH_WRITE) || (rw == SDIOH_READ))) {
1303 sd_err(("%s: Rounding up buffer to mod4 length.\n", __FUNCTION__));
1304 len++;
1305 tmpbuf = buffer;
1306 if ((localbuf = (uint8 *)MALLOC(sd->osh, len)) == NULL) {
1307 sd_err(("out of memory, malloced %d bytes\n",
1308 MALLOCED(sd->osh)));
1309 status = SDIOH_API_RC_FAIL;
1310 goto done;
1311 }
1312 bcopy(buffer, localbuf, len);
1313 buffer = localbuf;
1314 }
1315 } else {
1316 /* Byte mode: One block at a time */
1317 len = MIN(sd->client_block_size[func], buflen);
1318 }
1319
1320 if (sdstd_card_buf(sd, rw, func, fifo, addr, len, (uint32 *)buffer) != SUCCESS) {
1321 status = SDIOH_API_RC_FAIL;
1322 }
1323
1324 /* XXX Restore len and buffer pointer WAR'ed for Arasan 3-byte transfer problem */
1325 /* XXX WAR for SDIO3.0 DDR50 mode. */
1326 if (local_blockmode && localbuf) {
1327 MFREE(sd->osh, localbuf, len);
1328 len--;
1329 buffer = tmpbuf;
1330 sd_err(("%s: Restoring back buffer ptr and len.\n", __FUNCTION__));
1331 }
1332
1333 if (status == SDIOH_API_RC_FAIL) {
1334 goto done;
1335 }
1336
1337 buffer += len;
1338 buflen -= len;
1339 if (!fifo)
1340 addr += len;
1341 #ifdef BCMSDIOH_TXGLOM
1342 /* This loop should not come in case of glommed pkts as it is send in
1343 * multiple of blocks or total pkt size less than a block
1344 */
1345 if (sd->glom_info.count != 0)
1346 buflen = 0;
1347 #endif
1348 }
1349 done:
1350
1351 /* Change to DATA_TRANSFER_IDLE */
1352 sdstd_3_set_data_state(sd, DATA_TRANSFER_IDLE);
1353
1354 /* check if we have to do tuning; if so, start */
1355 sdstd_3_check_and_do_tuning(sd, CHECK_TUNING_POST_DATA);
1356
1357 sdstd_unlock(sd);
1358
1359 #ifdef BCMSDIOH_TXGLOM
1360 if (sd_txglom)
1361 sdioh_glom_clear(sd);
1362 #endif
1363
1364 return status;
1365 }
1366
1367 extern SDIOH_API_RC
sdioh_gpioouten(sdioh_info_t * sd,uint32 gpio)1368 sdioh_gpioouten(sdioh_info_t *sd, uint32 gpio)
1369 {
1370 uint offset = 0;
1371 uint16 val;
1372
1373 /* check if upper bank */
1374 if (gpio >= SDH_GPIO16) {
1375 gpio -= SDH_GPIO16;
1376 offset = 2;
1377 }
1378
1379 val = sdstd_rreg16(sd, SD_GPIO_OE + offset);
1380 val |= (1 << gpio);
1381 sdstd_wreg16(sd, SD_GPIO_OE + offset, val);
1382
1383 return SDIOH_API_RC_SUCCESS;
1384 }
1385
1386 extern SDIOH_API_RC
sdioh_gpioout(sdioh_info_t * sd,uint32 gpio,bool enab)1387 sdioh_gpioout(sdioh_info_t *sd, uint32 gpio, bool enab)
1388 {
1389 uint offset = 0;
1390 uint16 val;
1391
1392 /* check if upper bank */
1393 if (gpio >= SDH_GPIO16) {
1394 gpio -= SDH_GPIO16;
1395 offset = 2;
1396 }
1397
1398 val = sdstd_rreg16(sd, SD_GPIO_Reg + offset);
1399 if (enab == TRUE)
1400 val |= (1 << gpio);
1401 else
1402 val &= ~(1 << gpio);
1403 sdstd_wreg16(sd, SD_GPIO_Reg + offset, val);
1404
1405 return SDIOH_API_RC_SUCCESS;
1406 }
1407
1408 extern bool
sdioh_gpioin(sdioh_info_t * sd,uint32 gpio)1409 sdioh_gpioin(sdioh_info_t *sd, uint32 gpio)
1410 {
1411 uint offset = 0;
1412 uint16 val;
1413
1414 /* check if upper bank */
1415 if (gpio >= SDH_GPIO16) {
1416 gpio -= SDH_GPIO16;
1417 offset = 2;
1418 }
1419
1420 val = sdstd_rreg16(sd, SD_GPIO_Reg + offset);
1421 val = (val >> gpio) & 1;
1422
1423 return (val == 1);
1424 }
1425
1426 extern SDIOH_API_RC
sdioh_gpio_init(sdioh_info_t * sd)1427 sdioh_gpio_init(sdioh_info_t *sd)
1428 {
1429 uint rev;
1430
1431 rev = sdstd_rreg16(sd, SD_HostControllerVersion) >> 8;
1432
1433 /* Only P206 (fpga rev >= 16) supports gpio */
1434 if (rev < 16) {
1435 sd_err(("%s: gpio not supported in rev %d \n", __FUNCTION__, rev));
1436 return SDIOH_API_RC_FAIL;
1437 }
1438
1439 sdstd_wreg16(sd, SD_GPIO_Enable, SDH_GPIO_ENABLE);
1440 sdstd_wreg16(sd, SD_GPIO_Enable + 2, SDH_GPIO_ENABLE);
1441
1442 /* Default to input */
1443 sdstd_wreg16(sd, SD_GPIO_OE, 0);
1444 sdstd_wreg16(sd, SD_GPIO_OE + 2, 0);
1445
1446 return SDIOH_API_RC_SUCCESS;
1447 }
1448
1449 extern SDIOH_API_RC
sdioh_sleep(sdioh_info_t * sd,bool enab)1450 sdioh_sleep(sdioh_info_t *sd, bool enab)
1451 {
1452 SDIOH_API_RC status;
1453 uint32 cmd_arg = 0, rsp1 = 0;
1454 int retry = 100;
1455
1456 sdstd_lock(sd);
1457
1458 cmd_arg = SFIELD(cmd_arg, CMD14_RCA, sd->card_rca);
1459 cmd_arg = SFIELD(cmd_arg, CMD14_SLEEP, enab);
1460
1461 /*
1462 * For ExitSleep:
1463 * 1) Repeat CMD14 until R1 is received
1464 * 2) Send CMD7
1465 */
1466 status = SDIOH_API_RC_FAIL;
1467 while (retry-- > 0) {
1468 if ((sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_14, cmd_arg)) == SUCCESS) {
1469 status = SDIOH_API_RC_SUCCESS;
1470 break;
1471 }
1472 OSL_DELAY(1400);
1473 }
1474
1475 if (status == SDIOH_API_RC_FAIL) {
1476 sd_err(("%s: CMD14: failed! enable:%d\n", __FUNCTION__, enab));
1477 goto exit;
1478 }
1479
1480 sdstd_cmd_getrsp(sd, &rsp1, 1);
1481 sd_info(("%s: CMD14 OK: cmd_resp:0x%x\n", __FUNCTION__, rsp1));
1482
1483 /* ExitSleep: Send CMD7 After R1 */
1484 if (enab == FALSE) {
1485 /* Select the card */
1486 cmd_arg = SFIELD(0, CMD7_RCA, sd->card_rca);
1487 if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_7, cmd_arg)) != SUCCESS) {
1488 sd_err(("%s: CMD14 send CMD7 failed!\n", __FUNCTION__));
1489 status = SDIOH_API_RC_FAIL;
1490 goto exit;
1491 }
1492
1493 sdstd_cmd_getrsp(sd, &rsp1, 1);
1494 if (rsp1 != SDIOH_CMD7_EXP_STATUS) {
1495 sd_err(("%s: CMD7 response error. Response = 0x%x!\n",
1496 __FUNCTION__, rsp1));
1497 status = SDIOH_API_RC_FAIL;
1498 goto exit;
1499 }
1500 }
1501
1502 exit:
1503 sdstd_unlock(sd);
1504
1505 return status;
1506 }
1507
1508 /* XXX Copied guts of request_byte and cmd_issue. Might make sense to fold this into
1509 * those by passing another parameter indicating command type (abort). [But maybe
1510 * keeping it separate is better -- if called internally on command failure it's less
1511 * recursion to wrap your head around?]
1512 */
1513 static int
sdstd_abort(sdioh_info_t * sd,uint func)1514 sdstd_abort(sdioh_info_t *sd, uint func)
1515 {
1516 int err = 0;
1517 int retries;
1518
1519 uint16 cmd_reg;
1520 uint32 cmd_arg;
1521 uint32 rsp5;
1522 uint8 rflags;
1523
1524 uint16 int_reg = 0;
1525 uint16 plain_intstatus;
1526
1527 /* Argument is write to F0 (CCCR) IOAbort with function number */
1528 cmd_arg = 0;
1529 cmd_arg = SFIELD(cmd_arg, CMD52_FUNCTION, SDIO_FUNC_0);
1530 cmd_arg = SFIELD(cmd_arg, CMD52_REG_ADDR, SDIOD_CCCR_IOABORT);
1531 cmd_arg = SFIELD(cmd_arg, CMD52_RW_FLAG, SD_IO_OP_WRITE);
1532 cmd_arg = SFIELD(cmd_arg, CMD52_RAW, 0);
1533 cmd_arg = SFIELD(cmd_arg, CMD52_DATA, func);
1534
1535 /* Command is CMD52 write */
1536 cmd_reg = 0;
1537 cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48_BUSY);
1538 cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1);
1539 cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1);
1540 cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
1541 cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_ABORT);
1542 cmd_reg = SFIELD(cmd_reg, CMD_INDEX, SDIOH_CMD_52);
1543
1544 /* XXX Copied from cmd_issue(), but no SPI response handling! */
1545 if (sd->sd_mode == SDIOH_MODE_SPI) {
1546 cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0);
1547 cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0);
1548 }
1549
1550 /* Wait for CMD_INHIBIT to go away as per spec section 3.6.1.1 */
1551 /* XXX For a single-threaded driver, what circumstances would result
1552 * in cmd_inhibit being on but going off in a short time? Experiment
1553 * shows a HW command timeout doesn't leave inhibit on, so maybe a SW
1554 * timeout? Then that command should be responsible for clearing...
1555 */
1556 retries = RETRIES_SMALL;
1557 while (GFIELD(sdstd_rreg(sd, SD_PresentState), PRES_CMD_INHIBIT)) {
1558 if (retries == RETRIES_SMALL)
1559 sd_err(("%s: Waiting for Command Inhibit, state 0x%08x\n",
1560 __FUNCTION__, sdstd_rreg(sd, SD_PresentState)));
1561 if (!--retries) {
1562 sd_err(("%s: Command Inhibit timeout, state 0x%08x\n",
1563 __FUNCTION__, sdstd_rreg(sd, SD_PresentState)));
1564 if (trap_errs)
1565 ASSERT(0);
1566 err = BCME_SDIO_ERROR;
1567 goto done;
1568 }
1569 }
1570
1571 /* Clear errors from any previous commands */
1572 if ((plain_intstatus = sdstd_rreg16(sd, SD_ErrorIntrStatus)) != 0) {
1573 sd_err(("abort: clearing errstat 0x%04x\n", plain_intstatus));
1574 sdstd_wreg16(sd, SD_ErrorIntrStatus, plain_intstatus);
1575 }
1576 plain_intstatus = sdstd_rreg16(sd, SD_IntrStatus);
1577 if (plain_intstatus & ~(SFIELD(0, INTSTAT_CARD_INT, 1))) {
1578 sd_err(("abort: intstatus 0x%04x\n", plain_intstatus));
1579 if (GFIELD(plain_intstatus, INTSTAT_CMD_COMPLETE)) {
1580 sd_err(("SDSTD_ABORT: CMD COMPLETE SET BEFORE COMMAND GIVEN!!!\n"));
1581 }
1582 if (GFIELD(plain_intstatus, INTSTAT_CARD_REMOVAL)) {
1583 sd_err(("SDSTD_ABORT: INTSTAT_CARD_REMOVAL\n"));
1584 err = BCME_NODEVICE;
1585 goto done;
1586 }
1587 }
1588
1589 /* Issue the command */
1590 sdstd_wreg(sd, SD_Arg0, cmd_arg);
1591 sdstd_wreg16(sd, SD_Command, cmd_reg);
1592
1593 /* In interrupt mode return, expect later CMD_COMPLETE interrupt */
1594 if (!sd->polled_mode)
1595 return err;
1596
1597 /* Otherwise, wait for the command to complete */
1598 retries = RETRIES_LARGE;
1599 do {
1600 int_reg = sdstd_rreg16(sd, SD_IntrStatus);
1601 } while (--retries &&
1602 (GFIELD(int_reg, INTSTAT_ERROR_INT) == 0) &&
1603 (GFIELD(int_reg, INTSTAT_CMD_COMPLETE) == 0));
1604
1605 /* If command completion fails, do a cmd reset and note the error */
1606 if (!retries) {
1607 sd_err(("%s: CMD_COMPLETE timeout: intr 0x%04x err 0x%04x state 0x%08x\n",
1608 __FUNCTION__, int_reg,
1609 sdstd_rreg16(sd, SD_ErrorIntrStatus),
1610 sdstd_rreg(sd, SD_PresentState)));
1611
1612 sdstd_wreg8(sd, SD_SoftwareReset, SFIELD(0, SW_RESET_CMD, 1));
1613 retries = RETRIES_LARGE;
1614 do {
1615 sd_trace(("%s: waiting for CMD line reset\n", __FUNCTION__));
1616 } while ((GFIELD(sdstd_rreg8(sd, SD_SoftwareReset),
1617 SW_RESET_CMD)) && retries--);
1618
1619 if (!retries) {
1620 sd_err(("%s: Timeout waiting for CMD line reset\n", __FUNCTION__));
1621 }
1622
1623 if (trap_errs)
1624 ASSERT(0);
1625
1626 err = BCME_SDIO_ERROR;
1627 }
1628
1629 /* Clear Command Complete interrupt */
1630 int_reg = SFIELD(0, INTSTAT_CMD_COMPLETE, 1);
1631 sdstd_wreg16(sd, SD_IntrStatus, int_reg);
1632
1633 /* Check for Errors */
1634 if ((plain_intstatus = sdstd_rreg16 (sd, SD_ErrorIntrStatus)) != 0) {
1635 sd_err(("%s: ErrorintrStatus: 0x%x, "
1636 "(intrstatus = 0x%x, present state 0x%x) clearing\n",
1637 __FUNCTION__, plain_intstatus,
1638 sdstd_rreg16(sd, SD_IntrStatus),
1639 sdstd_rreg(sd, SD_PresentState)));
1640
1641 sdstd_wreg16(sd, SD_ErrorIntrStatus, plain_intstatus);
1642
1643 sdstd_wreg8(sd, SD_SoftwareReset, SFIELD(0, SW_RESET_DAT, 1));
1644 retries = RETRIES_LARGE;
1645 do {
1646 sd_trace(("%s: waiting for DAT line reset\n", __FUNCTION__));
1647 } while ((GFIELD(sdstd_rreg8(sd, SD_SoftwareReset),
1648 SW_RESET_DAT)) && retries--);
1649
1650 if (!retries) {
1651 sd_err(("%s: Timeout waiting for DAT line reset\n", __FUNCTION__));
1652 }
1653
1654 if (trap_errs)
1655 ASSERT(0);
1656
1657 /* ABORT is dataless, only cmd errs count */
1658 /* XXX But what about busy timeout? Response valid? */
1659 if (plain_intstatus & ERRINT_CMD_ERRS)
1660 err = BCME_SDIO_ERROR;
1661 }
1662
1663 /* If command failed don't bother looking at response */
1664 if (err)
1665 goto done;
1666
1667 /* Otherwise, check the response */
1668 sdstd_cmd_getrsp(sd, &rsp5, 1);
1669 rflags = GFIELD(rsp5, RSP5_FLAGS);
1670
1671 if (rflags & SD_RSP_R5_ERRBITS) {
1672 sd_err(("%s: R5 flags include errbits: 0x%02x\n", __FUNCTION__, rflags));
1673
1674 /* The CRC error flag applies to the previous command */
1675 if (rflags & (SD_RSP_R5_ERRBITS & ~SD_RSP_R5_COM_CRC_ERROR)) {
1676 err = BCME_SDIO_ERROR;
1677 goto done;
1678 }
1679 }
1680
1681 if (((rflags & (SD_RSP_R5_IO_CURRENTSTATE0 | SD_RSP_R5_IO_CURRENTSTATE1)) != 0x10) &&
1682 ((rflags & (SD_RSP_R5_IO_CURRENTSTATE0 | SD_RSP_R5_IO_CURRENTSTATE1)) != 0x20)) {
1683 sd_err(("%s: R5 flags has bad state: 0x%02x\n", __FUNCTION__, rflags));
1684 err = BCME_SDIO_ERROR;
1685 goto done;
1686 }
1687
1688 if (GFIELD(rsp5, RSP5_STUFF)) {
1689 sd_err(("%s: rsp5 stuff is 0x%x: should be 0\n",
1690 __FUNCTION__, GFIELD(rsp5, RSP5_STUFF)));
1691 err = BCME_SDIO_ERROR;
1692 goto done;
1693 }
1694
1695 done:
1696 if (err == BCME_NODEVICE)
1697 return err;
1698
1699 /* XXX As per spec 3.7.1 (and to be safe) do the resets here */
1700 sdstd_wreg8(sd, SD_SoftwareReset,
1701 SFIELD(SFIELD(0, SW_RESET_DAT, 1), SW_RESET_CMD, 1));
1702
1703 retries = RETRIES_LARGE;
1704 do {
1705 rflags = sdstd_rreg8(sd, SD_SoftwareReset);
1706 if (!GFIELD(rflags, SW_RESET_DAT) && !GFIELD(rflags, SW_RESET_CMD))
1707 break;
1708 } while (--retries);
1709
1710 if (!retries) {
1711 sd_err(("%s: Timeout waiting for DAT/CMD reset: 0x%02x\n",
1712 __FUNCTION__, rflags));
1713 err = BCME_SDIO_ERROR;
1714 }
1715
1716 return err;
1717 }
1718
1719 extern int
sdioh_abort(sdioh_info_t * sd,uint fnum)1720 sdioh_abort(sdioh_info_t *sd, uint fnum)
1721 {
1722 int ret;
1723
1724 sdstd_lock(sd);
1725 ret = sdstd_abort(sd, fnum);
1726 sdstd_unlock(sd);
1727
1728 return ret;
1729 }
1730
1731 int
sdioh_start(sdioh_info_t * sd,int stage)1732 sdioh_start(sdioh_info_t *sd, int stage)
1733 {
1734 return SUCCESS;
1735 }
1736
1737 int
sdioh_stop(sdioh_info_t * sd)1738 sdioh_stop(sdioh_info_t *sd)
1739 {
1740 return SUCCESS;
1741 }
1742
1743 int
sdioh_waitlockfree(sdioh_info_t * sd)1744 sdioh_waitlockfree(sdioh_info_t *sd)
1745 {
1746 sdstd_waitlockfree(sd);
1747 return SUCCESS;
1748 }
1749
1750 static int
sdstd_check_errs(sdioh_info_t * sdioh_info,uint32 cmd,uint32 arg)1751 sdstd_check_errs(sdioh_info_t *sdioh_info, uint32 cmd, uint32 arg)
1752 {
1753 uint16 regval;
1754 uint retries;
1755 uint function = 0;
1756
1757 /* If no errors, we're done */
1758 if ((regval = sdstd_rreg16(sdioh_info, SD_ErrorIntrStatus)) == 0)
1759 return SUCCESS;
1760
1761 #ifdef BCMQT
1762 if (regval == 0xFFFF) {
1763 /* XXX - Getting bogus errors under QT
1764 * XXX - Not sure why; Just ignore for now
1765 */
1766 sd_err(("%s: Bogus SD_ErrorIntrStatus: 0x%x????\n", __FUNCTION__, regval));
1767 sdstd_wreg16(sdioh_info, SD_ErrorIntrStatus, regval);
1768 return SUCCESS;
1769 }
1770 #endif
1771
1772 sd_info(("%s: ErrorIntrStatus 0x%04x (clearing), IntrStatus 0x%04x PresentState 0x%08x\n",
1773 __FUNCTION__, regval, sdstd_rreg16(sdioh_info, SD_IntrStatus),
1774 sdstd_rreg(sdioh_info, SD_PresentState)));
1775 sdstd_wreg16(sdioh_info, SD_ErrorIntrStatus, regval);
1776
1777 if (cmd == SDIOH_CMD_14) {
1778 if (regval & ERRINT_CMD_TIMEOUT_BIT) {
1779 /* PR 101351: sdiod_aos sleep followed by immediate wakeup
1780 * before sdiod_aos takes over has a problem.
1781 * Getting command timeouts while exiting sleep
1782 * with CMD14. Ignore this error due to this PR.
1783 */
1784 regval &= ~ERRINT_CMD_TIMEOUT_BIT;
1785 }
1786 }
1787
1788 /* On command error, issue CMD reset */
1789 if (regval & ERRINT_CMD_ERRS) {
1790 sd_trace(("%s: issuing CMD reset\n", __FUNCTION__));
1791 sdstd_wreg8(sdioh_info, SD_SoftwareReset, SFIELD(0, SW_RESET_CMD, 1));
1792 for (retries = RETRIES_LARGE; retries; retries--)
1793 if (!(GFIELD(sdstd_rreg8(sdioh_info, SD_SoftwareReset), SW_RESET_CMD)))
1794 break;
1795 if (!retries) {
1796 sd_err(("%s: Timeout waiting for CMD line reset\n", __FUNCTION__));
1797 }
1798 }
1799
1800 /* On data error, issue DAT reset */
1801 if (regval & ERRINT_DATA_ERRS) {
1802 if (regval & ERRINT_ADMA_BIT)
1803 sd_err(("%s:ADMAError: status:0x%x\n",
1804 __FUNCTION__, sdstd_rreg(sdioh_info, SD_ADMA_ErrStatus)));
1805 sd_trace(("%s: issuing DAT reset\n", __FUNCTION__));
1806 sdstd_wreg8(sdioh_info, SD_SoftwareReset, SFIELD(0, SW_RESET_DAT, 1));
1807 for (retries = RETRIES_LARGE; retries; retries--)
1808 if (!(GFIELD(sdstd_rreg8(sdioh_info, SD_SoftwareReset), SW_RESET_DAT)))
1809 break;
1810 if (!retries) {
1811 sd_err(("%s: Timeout waiting for DAT line reset\n", __FUNCTION__));
1812 }
1813 }
1814
1815 /* For an IO command (CMD52 or CMD53) issue an abort to the appropriate function */
1816 if (cmd == SDIOH_CMD_53)
1817 function = GFIELD(arg, CMD53_FUNCTION);
1818 else if (cmd == SDIOH_CMD_52) {
1819 /* PR 101351: sdiod_aos sleep followed by immediate wakeup
1820 * before sdiod_aos takes over has a problem.
1821 */
1822 if (GFIELD(arg, CMD52_REG_ADDR) != F1_SLEEPCSR_ADDR)
1823 function = GFIELD(arg, CMD52_FUNCTION);
1824 }
1825 if (function) {
1826 sd_trace(("%s: requesting abort for function %d after cmd %d\n",
1827 __FUNCTION__, function, cmd));
1828 sdstd_abort(sdioh_info, function);
1829 }
1830
1831 if (trap_errs)
1832 ASSERT(0);
1833
1834 return ERROR;
1835 }
1836
1837 #ifdef BCMINTERNAL
1838 extern SDIOH_API_RC
sdioh_test_diag(sdioh_info_t * sd)1839 sdioh_test_diag(sdioh_info_t *sd)
1840 {
1841 sd_err(("%s: Implement me\n", __FUNCTION__));
1842 return (0);
1843 }
1844 #endif /* BCMINTERNAL */
1845
1846 /*
1847 * Private/Static work routines
1848 */
1849 static bool
sdstd_reset(sdioh_info_t * sd,bool host_reset,bool client_reset)1850 sdstd_reset(sdioh_info_t *sd, bool host_reset, bool client_reset)
1851 {
1852 int retries = RETRIES_LARGE;
1853 uchar regval;
1854
1855 if (!sd)
1856 return TRUE;
1857
1858 sdstd_lock(sd);
1859 /* Reset client card */
1860 if (client_reset && (sd->adapter_slot != -1)) {
1861 if (sdstd_card_regwrite(sd, 0, SDIOD_CCCR_IOABORT, 1, 0x8) != SUCCESS)
1862 sd_err(("%s: Cannot write to card reg 0x%x\n",
1863 __FUNCTION__, SDIOD_CCCR_IOABORT));
1864 else
1865 sd->card_rca = 0;
1866 }
1867
1868 /* Reset host controller */
1869 if (host_reset) {
1870 regval = SFIELD(0, SW_RESET_ALL, 1);
1871 sdstd_wreg8(sd, SD_SoftwareReset, regval);
1872 do {
1873 sd_trace(("%s: waiting for reset\n", __FUNCTION__));
1874 } while ((sdstd_rreg8(sd, SD_SoftwareReset) & regval) && retries--);
1875
1876 if (!retries) {
1877 sd_err(("%s: Timeout waiting for host reset\n", __FUNCTION__));
1878 sdstd_unlock(sd);
1879 return (FALSE);
1880 }
1881
1882 /* A reset should reset bus back to 1 bit mode */
1883 sd->sd_mode = SDIOH_MODE_SD1;
1884 sdstd_set_dma_mode(sd, sd->sd_dma_mode);
1885 }
1886 sdstd_unlock(sd);
1887 return TRUE;
1888 }
1889
1890 /* Disable device interrupt */
1891 void
sdstd_devintr_off(sdioh_info_t * sd)1892 sdstd_devintr_off(sdioh_info_t *sd)
1893 {
1894 sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
1895 if (sd->use_client_ints) {
1896 sd->intmask &= ~CLIENT_INTR;
1897 sdstd_wreg16(sd, SD_IntrSignalEnable, sd->intmask);
1898 sdstd_rreg16(sd, SD_IntrSignalEnable); /* Sync readback */
1899 }
1900 }
1901
1902 /* Enable device interrupt */
1903 void
sdstd_devintr_on(sdioh_info_t * sd)1904 sdstd_devintr_on(sdioh_info_t *sd)
1905 {
1906 ASSERT(sd->lockcount == 0);
1907 sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
1908 if (sd->use_client_ints) {
1909 if (sd->version < HOST_CONTR_VER_3) {
1910 uint16 status = sdstd_rreg16(sd, SD_IntrStatusEnable);
1911 sdstd_wreg16(sd, SD_IntrStatusEnable, SFIELD(status, INTSTAT_CARD_INT, 0));
1912 sdstd_wreg16(sd, SD_IntrStatusEnable, status);
1913 }
1914
1915 sd->intmask |= CLIENT_INTR;
1916 sdstd_wreg16(sd, SD_IntrSignalEnable, sd->intmask);
1917 sdstd_rreg16(sd, SD_IntrSignalEnable); /* Sync readback */
1918 }
1919 }
1920
1921 #ifdef BCMSDYIELD
1922 /* Enable/disable other interrupts */
1923 void
sdstd_intrs_on(sdioh_info_t * sd,uint16 norm,uint16 err)1924 sdstd_intrs_on(sdioh_info_t *sd, uint16 norm, uint16 err)
1925 {
1926 if (err) {
1927 norm = SFIELD(norm, INTSTAT_ERROR_INT, 1);
1928 sdstd_wreg16(sd, SD_ErrorIntrSignalEnable, err);
1929 }
1930
1931 sd->intmask |= norm;
1932 sdstd_wreg16(sd, SD_IntrSignalEnable, sd->intmask);
1933 if (sd_forcerb)
1934 sdstd_rreg16(sd, SD_IntrSignalEnable); /* Sync readback */
1935 }
1936
1937 void
sdstd_intrs_off(sdioh_info_t * sd,uint16 norm,uint16 err)1938 sdstd_intrs_off(sdioh_info_t *sd, uint16 norm, uint16 err)
1939 {
1940 if (err) {
1941 norm = SFIELD(norm, INTSTAT_ERROR_INT, 1);
1942 sdstd_wreg16(sd, SD_ErrorIntrSignalEnable, 0);
1943 }
1944
1945 sd->intmask &= ~norm;
1946 sdstd_wreg16(sd, SD_IntrSignalEnable, sd->intmask);
1947 if (sd_forcerb)
1948 sdstd_rreg16(sd, SD_IntrSignalEnable); /* Sync readback */
1949 }
1950 #endif /* BCMSDYIELD */
1951
1952 static int
sdstd_host_init(sdioh_info_t * sd)1953 sdstd_host_init(sdioh_info_t *sd)
1954 {
1955 int num_slots, full_slot;
1956 uint8 reg8;
1957 uint32 card_ins;
1958 int slot, first_bar = 0;
1959 bool detect_slots = FALSE;
1960 #ifdef _WIN32
1961 NDIS_PHYSICAL_ADDRESS bar;
1962 #else
1963 uint bar;
1964 #endif
1965
1966 /* Check for Arasan ID */
1967 if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_SI_IMAGE) {
1968 sd_info(("%s: Found Arasan Standard SDIO Host Controller\n", __FUNCTION__));
1969 sd->controller_type = SDIOH_TYPE_ARASAN_HDK;
1970 detect_slots = TRUE;
1971 /* Controller supports SDMA, so turn it on here. */
1972 sd->sd_dma_mode = DMA_MODE_SDMA;
1973 } else if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_BROADCOM) {
1974 sd_info(("%s: Found Broadcom 27xx Standard SDIO Host Controller\n", __FUNCTION__));
1975 sd->controller_type = SDIOH_TYPE_BCM27XX;
1976 detect_slots = FALSE;
1977 } else if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_TI) {
1978 sd_info(("%s: Found TI PCIxx21 Standard SDIO Host Controller\n", __FUNCTION__));
1979 sd->controller_type = SDIOH_TYPE_TI_PCIXX21;
1980 detect_slots = TRUE;
1981 } else if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_RICOH) {
1982 sd_info(("%s: Ricoh Co Ltd R5C822 SD/SDIO/MMC/MS/MSPro Host Adapter\n",
1983 __FUNCTION__));
1984 sd->controller_type = SDIOH_TYPE_RICOH_R5C822;
1985 detect_slots = TRUE;
1986 } else if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_JMICRON) {
1987 sd_info(("%s: JMicron Standard SDIO Host Controller\n",
1988 __FUNCTION__));
1989 sd->controller_type = SDIOH_TYPE_JMICRON;
1990 detect_slots = TRUE;
1991 #ifdef BCMINTERNAL
1992 } else if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_JINVANI) {
1993 sd_info(("%s: Found Jinvani Standard SDIO Host Controller\n", __FUNCTION__));
1994 detect_slots = FALSE;
1995 sd->controller_type = SDIOH_TYPE_JINVANI_GOLD;
1996 #endif /* BCMINTERNAL */
1997 } else {
1998 return ERROR;
1999 }
2000
2001 /*
2002 * Determine num of slots
2003 * Search each slot
2004 */
2005
2006 first_bar = OSL_PCI_READ_CONFIG(sd->osh, SD_SlotInfo, 4) & 0x7;
2007 num_slots = (OSL_PCI_READ_CONFIG(sd->osh, SD_SlotInfo, 4) & 0xff) >> 4;
2008 num_slots &= 7;
2009 num_slots++; /* map bits to num slots according to spec */
2010
2011 /* XXX Since the sdio20h core does not present the proper SD_SlotInfo
2012 * register at PCI config space offset 0x40, fake it here. Also,
2013 * set the BAR0 window to point to the sdio20h core.
2014 */
2015 if (OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) ==
2016 ((SDIOH_FPGA_ID << 16) | VENDOR_BROADCOM)) {
2017 sd_err(("%s: Found Broadcom Standard SDIO Host Controller FPGA\n", __FUNCTION__));
2018 /* Set BAR0 Window to SDIOSTH core */
2019 OSL_PCI_WRITE_CONFIG(sd->osh, PCI_BAR0_WIN, 4, 0x18001000);
2020
2021 /* Set defaults particular to this controller. */
2022 detect_slots = TRUE;
2023 num_slots = 1;
2024 first_bar = 0;
2025
2026 /* Controller supports ADMA2, so turn it on here. */
2027 sd->sd_dma_mode = DMA_MODE_ADMA2;
2028 }
2029
2030 /* Map in each slot on the board and query it to see if a
2031 * card is inserted. Use the first populated slot found.
2032 */
2033 if (sd->mem_space) {
2034 sdstd_reg_unmap(sd->osh, (ulong)sd->mem_space, SDIOH_REG_WINSZ);
2035 sd->mem_space = NULL;
2036 }
2037
2038 full_slot = -1;
2039
2040 for (slot = 0; slot < num_slots; slot++) {
2041 /* XXX :Ugly define, is there a better way */
2042 #ifdef _WIN32
2043 bar.HighPart = 0;
2044 bar.LowPart = OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_BAR0
2045 + (4*(slot + first_bar)), 4);
2046 sd->mem_space = (volatile char *)sdstd_reg_map(sd->osh,
2047 (int32)&bar, SDIOH_REG_WINSZ);
2048 #else
2049 bar = OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_BAR0 + (4*(slot + first_bar)), 4);
2050 sd->mem_space = (volatile char *)sdstd_reg_map(sd->osh,
2051 (uintptr)bar, SDIOH_REG_WINSZ);
2052 #endif
2053
2054 sd->adapter_slot = -1;
2055
2056 if (detect_slots) {
2057 card_ins = GFIELD(sdstd_rreg(sd, SD_PresentState), PRES_CARD_PRESENT);
2058 } else {
2059 card_ins = TRUE;
2060 }
2061
2062 if (card_ins) {
2063 sd_info(("%s: SDIO slot %d: Full\n", __FUNCTION__, slot));
2064 if (full_slot < 0)
2065 full_slot = slot;
2066 } else {
2067 sd_info(("%s: SDIO slot %d: Empty\n", __FUNCTION__, slot));
2068 }
2069
2070 if (sd->mem_space) {
2071 sdstd_reg_unmap(sd->osh, (ulong)sd->mem_space, SDIOH_REG_WINSZ);
2072 sd->mem_space = NULL;
2073 }
2074 }
2075
2076 if (full_slot < 0) {
2077 sd_err(("No slots on SDIO controller are populated\n"));
2078 return -1;
2079 }
2080
2081 /* XXX :Ugly define, is there a better way */
2082 #ifdef _WIN32
2083 bar.HighPart = 0;
2084 bar.LowPart = OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_BAR0 + (4*(full_slot + first_bar)), 4);
2085 sd->mem_space = (volatile char *)sdstd_reg_map(sd->osh, (int32)&bar, SDIOH_REG_WINSZ);
2086 #else
2087 bar = OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_BAR0 + (4*(full_slot + first_bar)), 4);
2088 sd->mem_space = (volatile char *)sdstd_reg_map(sd->osh, (uintptr)bar, SDIOH_REG_WINSZ);
2089 #endif
2090
2091 sd_err(("Using slot %d at BAR%d [0x%08x] mem_space 0x%p\n",
2092 full_slot,
2093 (full_slot + first_bar),
2094 OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_BAR0 + (4*(full_slot + first_bar)), 4),
2095 sd->mem_space));
2096
2097 sd->adapter_slot = full_slot;
2098
2099 sd->version = sdstd_rreg16(sd, SD_HostControllerVersion) & 0xFF;
2100 switch (sd->version) {
2101 case 0:
2102 sd_err(("Host Controller version 1.0, Vendor Revision: 0x%02x\n",
2103 sdstd_rreg16(sd, SD_HostControllerVersion) >> 8));
2104 break;
2105 case 1:
2106 sd_err(("Host Controller version 2.0, Vendor Revision: 0x%02x\n",
2107 sdstd_rreg16(sd, SD_HostControllerVersion) >> 8));
2108 break;
2109 case 2:
2110 sd_err(("Host Controller version 3.0, Vendor Revision: 0x%02x\n",
2111 sdstd_rreg16(sd, SD_HostControllerVersion) >> 8));
2112 break;
2113 default:
2114 sd_err(("%s: Host Controller version 0x%02x not supported.\n",
2115 __FUNCTION__, sd->version));
2116 break;
2117 }
2118
2119 sd->caps = sdstd_rreg(sd, SD_Capabilities); /* Cache this for later use */
2120 /* MSB 32 bits of caps supported in sdio 3.0 */
2121 sd->caps3 = sdstd_rreg(sd, SD_Capabilities3); /* Cache this for later use */
2122 sd3_trace(("sd3: %s: caps: 0x%x; MCCap:0x%x\n", __FUNCTION__, sd->caps, sd->curr_caps));
2123 sd3_trace(("sd3: %s: caps3: 0x%x\n", __FUNCTION__, sd->caps3));
2124 sd->curr_caps = sdstd_rreg(sd, SD_MaxCurCap);
2125
2126 sd_info(("%s: caps: 0x%x; MCCap:0x%x\n", __FUNCTION__, sd->caps, sd->curr_caps));
2127
2128 sdstd_set_dma_mode(sd, sd->sd_dma_mode);
2129
2130 #if defined(BCMINTERNAL)
2131 if (OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) ==
2132 ((SDIOH_FPGA_ID << 16) | VENDOR_BROADCOM)) {
2133 sd_err(("* * * SDIO20H FPGA Build Date: 0x%04x\n", sdstd_rreg(sd, 0x110)));
2134 }
2135
2136 if (GFIELD(sd->caps, CAP_MAXBLOCK) == 0x3) {
2137 sd_info(("SD HOST CAPS: Max block size is INVALID\n"));
2138 } else {
2139 sd_info(("SD HOST CAPS: Max block size is %d bytes\n",
2140 512 << GFIELD(sd->caps, CAP_MAXBLOCK)));
2141 }
2142
2143 sd_info(("SD HOST CAPS: 64-bit DMA is %ssupported.\n",
2144 GFIELD(sd->caps, CAP_64BIT_HOST) ? "" : "not "));
2145 sd_info(("SD HOST CAPS: Suspend/Resume is %ssupported.\n",
2146 GFIELD(sd->caps, CAP_SUSPEND) ? "" : "not "));
2147
2148 sd_err(("SD HOST CAPS: SD Host supports "));
2149 if (GFIELD(sd->caps, CAP_VOLT_3_3)) {
2150 sd_err(("3.3V"));
2151 if (GFIELD(sd->curr_caps, CAP_CURR_3_3)) {
2152 sd_err(("@%dmA\n", 4*GFIELD(sd->curr_caps, CAP_CURR_3_3)));
2153 }
2154 }
2155 if (GFIELD(sd->caps, CAP_VOLT_3_0)) {
2156 sd_err((", 3.0V"));
2157 if (GFIELD(sd->curr_caps, CAP_CURR_3_0)) {
2158 sd_err(("@%dmA\n", 4*GFIELD(sd->curr_caps, CAP_CURR_3_0)));
2159 }
2160 }
2161 if (GFIELD(sd->caps, CAP_VOLT_1_8)) {
2162 sd_err((", 1.8V"));
2163 if (GFIELD(sd->curr_caps, CAP_CURR_1_8)) {
2164 sd_err(("@%dmA\n", 4*GFIELD(sd->curr_caps, CAP_CURR_1_8)));
2165 }
2166 }
2167 sd_err(("\n"));
2168 #endif /* defined(BCMINTERNAL) */
2169
2170 sdstd_reset(sd, 1, 0);
2171
2172 /* Read SD4/SD1 mode */
2173 if ((reg8 = sdstd_rreg8(sd, SD_HostCntrl))) {
2174 if (reg8 & SD4_MODE) {
2175 sd_err(("%s: Host cntrlr already in 4 bit mode: 0x%x\n",
2176 __FUNCTION__, reg8));
2177 }
2178 }
2179
2180 /* Default power on mode is SD1 */
2181 sd->sd_mode = SDIOH_MODE_SD1;
2182 sd->polled_mode = TRUE;
2183 sd->host_init_done = TRUE;
2184 sd->card_init_done = FALSE;
2185 sd->adapter_slot = full_slot;
2186
2187 /* XXX: If sd_uhsimode is disabled, which means that, use the HC in SDIO 2.0 mode. */
2188 if (sd_uhsimode == SD3CLKMODE_DISABLED) {
2189 sd->version = HOST_CONTR_VER_2;
2190 sd3_trace(("%s:forcing to SDIO HC 2.0\n", __FUNCTION__));
2191 }
2192
2193 if (sd->version == HOST_CONTR_VER_3) {
2194 /* read host ctrl 2 */
2195 uint16 reg16 = 0;
2196 sd3_trace(("sd3: %s: HC3: reading additional regs\n", __FUNCTION__));
2197
2198 reg16 = sdstd_rreg16(sd, SD3_HostCntrl2);
2199
2200 sd_info(("%s: HCtrl: 0x%x; HCtrl2:0x%x\n", __FUNCTION__, reg8, reg16));
2201 BCM_REFERENCE(reg16);
2202
2203 /* if HC supports 1.8V and one of the SDR/DDR modes, hc uhci support is PRESENT */
2204 if ((GFIELD(sd->caps, CAP_VOLT_1_8)) &&
2205 (GFIELD(sd->caps3, CAP3_SDR50_SUP) ||
2206 GFIELD(sd->caps3, CAP3_SDR104_SUP) ||
2207 GFIELD(sd->caps3, CAP3_DDR50_SUP)))
2208 sd->host_UHSISupported = 1;
2209 }
2210
2211 #ifdef BCMQT
2212 {
2213 uint32 intmask;
2214
2215 /* FIX: force interrupts with QT sdio20 host */
2216 /* pci cw [expr $def(configbase) +0x95] 1 2 */
2217 intmask = OSL_PCI_READ_CONFIG(sd->osh, PCI_INT_MASK, 4);
2218 intmask |= 0x0200;
2219 OSL_PCI_WRITE_CONFIG(sd->osh, PCI_INT_MASK, 4, intmask);
2220 }
2221 #endif
2222 return (SUCCESS);
2223 }
2224 #define CMD5_RETRIES 200
2225 static int
get_ocr(sdioh_info_t * sd,uint32 * cmd_arg,uint32 * cmd_rsp)2226 get_ocr(sdioh_info_t *sd, uint32 *cmd_arg, uint32 *cmd_rsp)
2227 {
2228 int retries, status;
2229
2230 /* Get the Card's Operation Condition. Occasionally the board
2231 * takes a while to become ready
2232 */
2233 retries = CMD5_RETRIES;
2234 do {
2235 *cmd_rsp = 0;
2236 if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_5, *cmd_arg))
2237 != SUCCESS) {
2238 sd_err(("%s: CMD5 failed\n", __FUNCTION__));
2239 return status;
2240 }
2241 sdstd_cmd_getrsp(sd, cmd_rsp, 1);
2242 if (!GFIELD(*cmd_rsp, RSP4_CARD_READY))
2243 sd_trace(("%s: Waiting for card to become ready\n", __FUNCTION__));
2244 } while ((!GFIELD(*cmd_rsp, RSP4_CARD_READY)) && --retries);
2245 if (!retries)
2246 return ERROR;
2247
2248 return (SUCCESS);
2249 }
2250
2251 static int
sdstd_client_init(sdioh_info_t * sd)2252 sdstd_client_init(sdioh_info_t *sd)
2253 {
2254 uint32 cmd_arg, cmd_rsp;
2255 int status;
2256 uint8 fn_ints;
2257 uint32 regdata;
2258 uint16 powerstat = 0;
2259
2260 #ifdef BCMINTERNAL
2261 #ifdef NOTUSED
2262 /* Handy routine to dump capabilities. */
2263 static char caps_buf[500];
2264 parse_caps(sd->caps, caps_buf, 500);
2265 sd_err((caps_buf));
2266 #endif /* NOTUSED */
2267 #endif /* BCMINTERNAL */
2268
2269 sd_trace(("%s: Powering up slot %d\n", __FUNCTION__, sd->adapter_slot));
2270
2271 /* Clear any pending ints */
2272 sdstd_wreg16(sd, SD_IntrStatus, 0x1fff);
2273 sdstd_wreg16(sd, SD_ErrorIntrStatus, 0x0fff);
2274
2275 /* Enable both Normal and Error Status. This does not enable
2276 * interrupts, it only enables the status bits to
2277 * become 'live'
2278 */
2279
2280 if (!sd->host_UHSISupported)
2281 sdstd_wreg16(sd, SD_IntrStatusEnable, 0x1ff);
2282 else
2283 {
2284 /* INT_x interrupts, but DO NOT enable signalling [enable retuning
2285 * will happen later]
2286 */
2287 sdstd_wreg16(sd, SD_IntrStatusEnable, 0x0fff);
2288 }
2289 sdstd_wreg16(sd, SD_ErrorIntrStatusEnable, 0xffff);
2290
2291 sdstd_wreg16(sd, SD_IntrSignalEnable, 0); /* Disable ints for now. */
2292
2293 if (sd->host_UHSISupported) {
2294 /* when HC is started for SDIO 3.0 mode, start in lowest voltage mode first. */
2295 powerstat = sdstd_start_power(sd, 1);
2296 if (SDIO_OCR_READ_FAIL == powerstat) {
2297 /* This could be because the device is 3.3V, and possible does
2298 * not have sdio3.0 support. So, try in highest voltage
2299 */
2300 sd_err(("sdstd_start_power: legacy device: trying highest voltage\n"));
2301 sd_err(("%s failed\n", __FUNCTION__));
2302 return ERROR;
2303 } else if (TRUE != powerstat) {
2304 sd_err(("sdstd_start_power failed\n"));
2305 return ERROR;
2306 }
2307 } else
2308 /* XXX legacy driver: start in highest voltage mode first.
2309 * CAUTION: trying to start a legacy dhd with sdio3.0HC and sdio3.0 device could
2310 * burn the sdio3.0device if the device has started in 1.8V.
2311 */
2312 if (TRUE != sdstd_start_power(sd, 0)) {
2313 sd_err(("sdstd_start_power failed\n"));
2314 return ERROR;
2315 }
2316
2317 if (sd->num_funcs == 0) {
2318 sd_err(("%s: No IO funcs!\n", __FUNCTION__));
2319 return ERROR;
2320 }
2321
2322 /* In SPI mode, issue CMD0 first */
2323 if (sd->sd_mode == SDIOH_MODE_SPI) {
2324 cmd_arg = 0;
2325 if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_0, cmd_arg))
2326 != SUCCESS) {
2327 sd_err(("BCMSDIOH: cardinit: CMD0 failed!\n"));
2328 return status;
2329 }
2330 }
2331
2332 if (sd->sd_mode != SDIOH_MODE_SPI) {
2333 uint16 rsp6_status;
2334
2335 /* Card is operational. Ask it to send an RCA */
2336 cmd_arg = 0;
2337 if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_3, cmd_arg))
2338 != SUCCESS) {
2339 sd_err(("%s: CMD3 failed!\n", __FUNCTION__));
2340 return status;
2341 }
2342
2343 /* Verify the card status returned with the cmd response */
2344 sdstd_cmd_getrsp(sd, &cmd_rsp, 1);
2345 rsp6_status = GFIELD(cmd_rsp, RSP6_STATUS);
2346 if (GFIELD(rsp6_status, RSP6STAT_COM_CRC_ERROR) ||
2347 GFIELD(rsp6_status, RSP6STAT_ILLEGAL_CMD) ||
2348 GFIELD(rsp6_status, RSP6STAT_ERROR)) {
2349 sd_err(("%s: CMD3 response error. Response = 0x%x!\n",
2350 __FUNCTION__, rsp6_status));
2351 return ERROR;
2352 }
2353
2354 /* Save the Card's RCA */
2355 sd->card_rca = GFIELD(cmd_rsp, RSP6_IO_RCA);
2356 sd_info(("RCA is 0x%x\n", sd->card_rca));
2357
2358 if (rsp6_status)
2359 sd_err(("raw status is 0x%x\n", rsp6_status));
2360
2361 /* Select the card */
2362 cmd_arg = SFIELD(0, CMD7_RCA, sd->card_rca);
2363 if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_7, cmd_arg))
2364 != SUCCESS) {
2365 sd_err(("%s: CMD7 failed!\n", __FUNCTION__));
2366 return status;
2367 }
2368 sdstd_cmd_getrsp(sd, &cmd_rsp, 1);
2369 if (cmd_rsp != SDIOH_CMD7_EXP_STATUS) {
2370 sd_err(("%s: CMD7 response error. Response = 0x%x!\n",
2371 __FUNCTION__, cmd_rsp));
2372 return ERROR;
2373 }
2374 }
2375
2376 /* Disable default/power-up device Card Detect (CD) pull up resistor on DAT3
2377 * via CCCR bus interface control register. Set CD disable bit while leaving
2378 * others alone.
2379 */
2380 if (sdstd_card_regread (sd, 0, SDIOD_CCCR_BICTRL, 1, ®data) != SUCCESS) {
2381 sd_err(("Disabling card detect: read of device CCCR BICTRL register failed\n"));
2382 return ERROR;
2383 }
2384 regdata |= BUS_CARD_DETECT_DIS;
2385
2386 if (sdstd_card_regwrite (sd, 0, SDIOD_CCCR_BICTRL, 1, regdata) != SUCCESS) {
2387 sd_err(("Disabling card detect: write of device CCCR BICTRL register failed\n"));
2388 return ERROR;
2389 }
2390
2391 sdstd_card_enablefuncs(sd);
2392
2393 if (!sdstd_bus_width(sd, sd_sdmode)) {
2394 sd_err(("sdstd_bus_width failed\n"));
2395 return ERROR;
2396 }
2397
2398 set_client_block_size(sd, 1, sd_f1_blocksize);
2399 fn_ints = INTR_CTL_FUNC1_EN;
2400
2401 if (sd->num_funcs >= 2) {
2402 /* XXX Device side can't handle 512 yet */
2403 set_client_block_size(sd, 2, sd_f2_blocksize /* BLOCK_SIZE_4328 */);
2404 fn_ints |= INTR_CTL_FUNC2_EN;
2405 }
2406
2407 /* Enable/Disable Client interrupts */
2408 /* Turn on here but disable at host controller? */
2409 if (sdstd_card_regwrite(sd, 0, SDIOD_CCCR_INTEN, 1,
2410 (fn_ints | INTR_CTL_MASTER_EN)) != SUCCESS) {
2411 sd_err(("%s: Could not enable ints in CCCR\n", __FUNCTION__));
2412 return ERROR;
2413 }
2414
2415 if (sd_uhsimode != SD3CLKMODE_DISABLED) {
2416 /* Switch to High-speed clocking mode if both host and device support it */
2417 if (sdstd_3_clock_wrapper(sd) != SUCCESS) {
2418 sd_err(("sdstd_3_clock_wrapper failed\n"));
2419 return ERROR;
2420 }
2421 } else
2422 {
2423 if (sdstd_clock_wrapper(sd)) {
2424 sd_err(("sdstd_start_clock failed\n"));
2425 return ERROR;
2426 }
2427 }
2428 sd->card_init_done = TRUE;
2429
2430 return SUCCESS;
2431 }
2432
2433 static int
sdstd_clock_wrapper(sdioh_info_t * sd)2434 sdstd_clock_wrapper(sdioh_info_t *sd)
2435 {
2436 sd_trace(("%s:Enter\n", __FUNCTION__));
2437 /* After configuring for High-Speed mode, set the desired clock rate. */
2438 sdstd_set_highspeed_mode(sd, (bool)sd_hiok);
2439
2440 if (FALSE == sdstd_start_clock(sd, (uint16)sd_divisor)) {
2441 sd_err(("sdstd_start_clock failed\n"));
2442 return ERROR;
2443 }
2444 return SUCCESS;
2445 }
2446
2447 static int
sdstd_3_clock_wrapper(sdioh_info_t * sd)2448 sdstd_3_clock_wrapper(sdioh_info_t *sd)
2449 {
2450 int retclk = 0;
2451 sd_info(("%s: Enter\n", __FUNCTION__));
2452 if (sd->card_UHSI_voltage_Supported) {
2453 /* check if clk config requested is supported by both host and target. */
2454 retclk = sdstd_3_get_matching_uhsi_clkmode(sd, sd_uhsimode);
2455
2456 /* if no match for requested caps, try to get the max match possible */
2457 if (retclk == -1) {
2458 /* if auto enabled */
2459 if (sd3_autoselect_uhsi_max == 1) {
2460 retclk = sdstd_3_get_matching_uhsi_clkmode(sd, SD3CLKMODE_AUTO);
2461 /* still NO match */
2462 if (retclk == -1) {
2463 /* NO match with HC and card capabilities. Now try the
2464 * High speed/legacy mode if possible.
2465 */
2466
2467 sd_err(("%s: Not able to set requested clock\n",
2468 __FUNCTION__));
2469 return ERROR;
2470 }
2471 } else {
2472 /* means user doesn't want auto clock. So return ERROR */
2473 sd_err(("%s: Not able to set requested clock, Try"
2474 "auto mode\n", __FUNCTION__));
2475 return ERROR;
2476 }
2477 }
2478
2479 if (retclk != -1) {
2480 /* set the current clk to be selected clock */
2481 sd_uhsimode = retclk;
2482
2483 if (BCME_OK != sdstd_3_set_highspeed_uhsi_mode(sd, sd_uhsimode)) {
2484 sd_err(("%s: Not able to set requested clock\n", __FUNCTION__));
2485 return ERROR;
2486 }
2487 } else {
2488 /* try legacy mode */
2489 if (SUCCESS != sdstd_clock_wrapper(sd)) {
2490 sd_err(("sdstd_start_clock failed\n"));
2491 return ERROR;
2492 }
2493 }
2494 } else {
2495 sd_info(("%s: Legacy Mode Clock\n", __FUNCTION__));
2496 /* try legacy mode */
2497 if (SUCCESS != sdstd_clock_wrapper(sd)) {
2498 sd_err(("%s sdstd_clock_wrapper failed\n", __FUNCTION__));
2499 return ERROR;
2500 }
2501 }
2502 return SUCCESS;
2503 }
2504
2505 int
sdstd_3_clk_tuning(sdioh_info_t * sd,uint32 sd3ClkMode)2506 sdstd_3_clk_tuning(sdioh_info_t *sd, uint32 sd3ClkMode)
2507 {
2508 int status, lcount = 0, brr_count = 0;
2509 uint16 val1 = 0, bufready = 0;
2510 uint32 val2 = 0;
2511 uint8 phase_info_local = 0;
2512
2513 sd3_trace(("sd3: %s: Enter\n", __FUNCTION__));
2514 /* if (NOT SDR104) OR
2515 * (SDR_50 AND sdr50_tuning_reqd is NOT enabled)
2516 * return success, as tuning not reqd.
2517 */
2518 if (!sd->sd3_tuning_reqd) {
2519 sd_info(("%s: Tuning NOT reqd!\n", __FUNCTION__));
2520 return SUCCESS;
2521 }
2522
2523 /* execute tuning procedure */
2524
2525 /* enable Buffer ready status. [donot enable the interrupt right now] */
2526 /* Execute tuning */
2527 sd_trace(("sd3: %s: Execute tuning\n", __FUNCTION__));
2528 val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
2529 val1 = SFIELD(val1, HOSTCtrl2_EXEC_TUNING, 1);
2530 sdstd_wreg16(sd, SD3_HostCntrl2, val1);
2531
2532 do {
2533 sd3_trace(("sd3: %s: cmd19 issue\n", __FUNCTION__));
2534 /* Issue cmd19 */
2535 if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_19, 0))
2536 != SUCCESS) {
2537 sd_err(("%s: CMD19 failed\n", __FUNCTION__));
2538 val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
2539 val1 = SFIELD(val1, HOSTCtrl2_EXEC_TUNING, 0);
2540 val1 = SFIELD(val1, HOSTCtrl2_SAMPCLK_SEL, 0);
2541 sdstd_wreg16(sd, SD3_HostCntrl2, val1);
2542 return status;
2543 }
2544
2545 /* wait for buffer read ready */
2546 brr_count = 0;
2547 do {
2548 bufready = sdstd_rreg16(sd, SD_IntrStatus);
2549
2550 if (GFIELD(bufready, INTSTAT_BUF_READ_READY))
2551 break;
2552
2553 /* delay after checking bufready becuase INTSTAT_BUF_READ_READY
2554 might have been most likely set already in the first check
2555 */
2556 OSL_DELAY(1);
2557 } while (++brr_count < CLKTUNING_MAX_BRR_RETRIES);
2558
2559 /* buffer read ready timedout */
2560 if (brr_count == CLKTUNING_MAX_BRR_RETRIES) {
2561 sd_err(("%s: TUNINGFAILED: BRR response timedout!\n",
2562 __FUNCTION__));
2563 val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
2564 val1 = SFIELD(val1, HOSTCtrl2_EXEC_TUNING, 0);
2565 val1 = SFIELD(val1, HOSTCtrl2_SAMPCLK_SEL, 0);
2566 sdstd_wreg16(sd, SD3_HostCntrl2, val1);
2567 return ERROR;
2568 }
2569
2570 /* In response to CMD19 card will send 64 magic bytes.
2571 * Current Aizyc HC h/w doesn't auto clear those bytes.
2572 * So read 64 bytes send by card.
2573 * Aizyc need to implement in hw to do an auto clear.
2574 */
2575 if (sd3_sw_read_magic_bytes == TRUE)
2576 {
2577 uint8 l_cnt_1 = 0;
2578 uint32 l_val_1 = 0;
2579 for (l_cnt_1 = 0; l_cnt_1 < 16; l_cnt_1++) {
2580 l_val_1 = sdstd_rreg(sd, SD_BufferDataPort0);
2581 sd_trace(("%s:l_val_1 = 0x%x", __FUNCTION__, l_val_1));
2582 }
2583 BCM_REFERENCE(l_val_1);
2584 }
2585
2586 /* clear BuffReadReady int */
2587 bufready = SFIELD(bufready, INTSTAT_BUF_READ_READY, 1);
2588 sdstd_wreg16(sd, SD_IntrStatus, bufready);
2589
2590 /* wait before continuing */
2591 /* OSL_DELAY(PER_TRY_TUNING_DELAY_MS * 1000); */ /* Not required */
2592
2593 /* check execute tuning bit */
2594 val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
2595 if (!GFIELD(val1, HOSTCtrl2_EXEC_TUNING)) {
2596 /* done tuning, break from loop */
2597 break;
2598 }
2599
2600 /* max tuning iterations exceeded */
2601 if (lcount++ > MAX_TUNING_ITERS) {
2602 sd_err(("%s: TUNINGFAILED: Max tuning iterations"
2603 "exceeded!\n", __FUNCTION__));
2604 val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
2605 val1 = SFIELD(val1, HOSTCtrl2_EXEC_TUNING, 0);
2606 val1 = SFIELD(val1, HOSTCtrl2_SAMPCLK_SEL, 0);
2607 sdstd_wreg16(sd, SD3_HostCntrl2, val1);
2608 return ERROR;
2609 }
2610 } while (1);
2611
2612 val2 = sdstd_rreg(sd, SD3_Tuning_Info_Register);
2613 phase_info_local = ((val2>>15)& 0x7);
2614 sd_info(("Phase passed info: 0x%x\n", (val2>>8)& 0x3F));
2615 sd_info(("Phase selected post tune: 0x%x\n", phase_info_local));
2616
2617 if (phase_info_local > SDSTD_MAX_TUNING_PHASE) {
2618 sd_err(("!!Phase selected:%x\n", phase_info_local));
2619 }
2620
2621 /* check sampling clk select */
2622 val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
2623 if (!GFIELD(val1, HOSTCtrl2_SAMPCLK_SEL)) {
2624 /* error in selecting clk */
2625 sd_err(("%s: TUNINGFAILED: SamplClkSel failed!\n", __FUNCTION__));
2626 val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
2627 val1 = SFIELD(val1, HOSTCtrl2_EXEC_TUNING, 0);
2628 val1 = SFIELD(val1, HOSTCtrl2_SAMPCLK_SEL, 0);
2629 sdstd_wreg16(sd, SD3_HostCntrl2, val1);
2630 return ERROR;
2631 }
2632 /* done: */
2633 sd_info(("%s: TUNING Success!\n", __FUNCTION__));
2634 return SUCCESS;
2635 }
2636
2637 void
sdstd_3_enable_retuning_int(sdioh_info_t * sd)2638 sdstd_3_enable_retuning_int(sdioh_info_t *sd)
2639 {
2640 uint16 raw_int;
2641 unsigned long flags;
2642
2643 sdstd_os_lock_irqsave(sd, &flags);
2644 raw_int = sdstd_rreg16(sd, SD_IntrSignalEnable);
2645 sdstd_wreg16(sd, SD_IntrSignalEnable, (raw_int | HC_INTR_RETUNING));
2646 /* Enable retuning status */
2647 raw_int = sdstd_rreg16(sd, SD_IntrStatusEnable);
2648 sdstd_wreg16(sd, SD_IntrStatusEnable, (raw_int | HC_INTR_RETUNING));
2649 sdstd_os_unlock_irqrestore(sd, &flags);
2650 }
2651
2652 void
sdstd_3_disable_retuning_int(sdioh_info_t * sd)2653 sdstd_3_disable_retuning_int(sdioh_info_t *sd)
2654 {
2655 uint16 raw_int;
2656 unsigned long flags;
2657
2658 sdstd_os_lock_irqsave(sd, &flags);
2659 sd->intmask &= ~HC_INTR_RETUNING;
2660 raw_int = sdstd_rreg16(sd, SD_IntrSignalEnable);
2661 sdstd_wreg16(sd, SD_IntrSignalEnable, (raw_int & (~HC_INTR_RETUNING)));
2662 /* Disable retuning status */
2663 raw_int = sdstd_rreg16(sd, SD_IntrStatusEnable);
2664 sdstd_wreg16(sd, SD_IntrStatusEnable, (raw_int & (~HC_INTR_RETUNING)));
2665 sdstd_os_unlock_irqrestore(sd, &flags);
2666 }
2667
2668 bool
sdstd_3_is_retuning_int_set(sdioh_info_t * sd)2669 sdstd_3_is_retuning_int_set(sdioh_info_t *sd)
2670 {
2671 uint16 raw_int;
2672
2673 raw_int = sdstd_rreg16(sd, SD_IntrStatus);
2674
2675 if (GFIELD(raw_int, INTSTAT_RETUNING_INT))
2676 return TRUE;
2677
2678 return FALSE;
2679 }
2680
2681 /*
2682 Assumption: sd3ClkMode is checked to be present in both host/card
2683 capabilities before entering this function. VALID values for sd3ClkMode
2684 in this function: SD3CLKMODE_2, 3, 4 [0 and 1 NOT supported as
2685 they are legacy] For that, need to call
2686 sdstd_3_get_matching_uhsi_clkmode()
2687 */
2688 static int
sdstd_3_set_highspeed_uhsi_mode(sdioh_info_t * sd,int sd3ClkMode)2689 sdstd_3_set_highspeed_uhsi_mode(sdioh_info_t *sd, int sd3ClkMode)
2690 {
2691 uint32 drvstrn;
2692 int status;
2693 uint8 hc_reg8;
2694 uint16 val1 = 0, presetval = 0;
2695 uint32 regdata;
2696
2697 sd3_trace(("sd3: %s:enter:clkmode:%d\n", __FUNCTION__, sd3ClkMode));
2698
2699 hc_reg8 = sdstd_rreg8(sd, SD_HostCntrl);
2700
2701 if (HOST_SDR_UNSUPP == sd->global_UHSI_Supp) {
2702 sd_err(("%s:Trying to set clk with unsupported global support\n", __FUNCTION__));
2703 return BCME_ERROR;
2704 }
2705
2706 /* get [double check, as this is already done in
2707 sdstd_3_get_matching_uhsi_clkmode] drvstrn
2708 */
2709 if (!sdstd_3_get_matching_drvstrn(sd, sd3ClkMode, &drvstrn, &presetval)) {
2710 sd_err(("%s:DRVStrn mismatch!: card strn:0x%x; HC preset"
2711 "val:0x%x\n", __FUNCTION__, drvstrn, presetval));
2712 return BCME_SDIO_ERROR;
2713 }
2714
2715 /* also set driver type select in CCCR */
2716 if ((status = sdstd_card_regwrite(sd, 0, SDIOD_CCCR_DRIVER_STRENGTH,
2717 1, drvstrn)) != BCME_OK) {
2718 sd_err(("%s:Setting SDIOD_CCCR_DRIVER_STRENGTH in card Failed!\n", __FUNCTION__));
2719 return BCME_SDIO_ERROR;
2720 }
2721
2722 /* ********** change Bus speed select in device */
2723 if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
2724 1, ®data)) != SUCCESS) {
2725 sd_err(("%s:FAILED 1\n", __FUNCTION__));
2726 return BCME_SDIO_ERROR;
2727 }
2728 sd_info(("Attempting to change BSS.current val:0x%x\n", regdata));
2729
2730 if (regdata & SDIO_SPEED_SHS) {
2731 sd_info(("Device supports High-Speed mode.\n"));
2732 /* clear existing BSS */
2733 regdata &= ~0xE;
2734
2735 regdata |= (sd3ClkMode << 1);
2736
2737 sd_info(("Writing %08x to Card at %08x\n",
2738 regdata, SDIOD_CCCR_SPEED_CONTROL));
2739 if ((status = sdstd_card_regwrite(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
2740 1, regdata)) != BCME_OK) {
2741 sd_err(("%s:FAILED 2\n", __FUNCTION__));
2742 return BCME_SDIO_ERROR;
2743 }
2744
2745 if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
2746 1, ®data)) != BCME_OK) {
2747 sd_err(("%s:FAILED 3\n", __FUNCTION__));
2748 return BCME_SDIO_ERROR;
2749 }
2750
2751 sd_info(("Read %08x from Card at %08x\n", regdata, SDIOD_CCCR_SPEED_CONTROL));
2752 }
2753 else {
2754 sd_err(("Device does not support High-Speed Mode.\n"));
2755 }
2756
2757 /* SD Clock Enable = 0 */
2758 sdstd_wreg16(sd, SD_ClockCntrl,
2759 sdstd_rreg16(sd, SD_ClockCntrl) & ~((uint16)0x4));
2760
2761 /* set to HighSpeed mode */
2762 /* TBD: is these to change SD_HostCntrl reqd for UHSI? */
2763 hc_reg8 = SFIELD(hc_reg8, HOST_HI_SPEED_EN, 1);
2764 sdstd_wreg8(sd, SD_HostCntrl, hc_reg8);
2765
2766 /* set UHS Mode select in HC2 and also set preset */
2767 val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
2768 val1 = SFIELD(val1, HOSTCtrl2_UHSMODE_SEL, sd3ClkMode);
2769 if (TRUE != sd3_sw_override1) {
2770 val1 = SFIELD(val1, HOSTCtrl2_PRESVAL_EN, 1);
2771 } else {
2772 /* set hC registers manually using the retreived values */
2773 /* *set drvstrn */
2774 val1 = SFIELD(val1, HOSTCtrl2_DRIVSTRENGTH_SEL,
2775 GFIELD(presetval, PRESET_DRIVR_SELECT));
2776 val1 = SFIELD(val1, HOSTCtrl2_PRESVAL_EN, 0);
2777 }
2778
2779 /* finally write Hcontrol2 */
2780 sdstd_wreg16(sd, SD3_HostCntrl2, val1);
2781
2782 sd_err(("%s:HostCtrl2 final value:0x%x\n", __FUNCTION__, val1));
2783
2784 /* start clock : clk will be enabled inside. */
2785 if (FALSE == sdstd_start_clock(sd, GFIELD(presetval, PRESET_CLK_DIV))) {
2786 sd_err(("sdstd_start_clock failed\n"));
2787 return ERROR;
2788 }
2789
2790 /* execute first tuning procedure */
2791 if (!sd3_sw_override1) {
2792 if (SD3_TUNING_REQD(sd, sd3ClkMode)) {
2793 sd_err(("%s: Tuning start..\n", __FUNCTION__));
2794 sd->sd3_tuning_reqd = TRUE;
2795 /* TBD: first time: enabling INT's could be problem? */
2796 sdstd_3_start_tuning(sd);
2797 }
2798 else
2799 sd->sd3_tuning_reqd = FALSE;
2800 }
2801
2802 return BCME_OK;
2803 }
2804
2805 /* Check & do tuning if required */
sdstd_3_check_and_do_tuning(sdioh_info_t * sd,int tuning_param)2806 void sdstd_3_check_and_do_tuning(sdioh_info_t *sd, int tuning_param)
2807 {
2808 int retries = 0;
2809
2810 if (!sd->sd3_tuning_disable && sd->sd3_tuning_reqd) {
2811 sd3_trace(("sd3: %s: tuning reqd\n", __FUNCTION__));
2812 if (tuning_param == CHECK_TUNING_PRE_DATA) {
2813 if (sd->sd3_tun_state == TUNING_ONGOING) {
2814 retries = RETRIES_SMALL;
2815 /* check if tuning is already going on */
2816 while ((GFIELD(sdstd_rreg(sd, SD3_HostCntrl2),
2817 HOSTCtrl2_EXEC_TUNING)) && retries--) {
2818 if (retries == RETRIES_SMALL)
2819 sd_err(("%s: Waiting for Tuning to complete\n",
2820 __FUNCTION__));
2821 }
2822
2823 if (!retries) {
2824 sd_err(("%s: Tuning wait timeout\n", __FUNCTION__));
2825 if (trap_errs)
2826 ASSERT(0);
2827 }
2828 } else if (sd->sd3_tun_state == TUNING_START) {
2829 /* check and start tuning if required. */
2830 sd3_trace(("sd3 : %s : Doing Tuning before Data Transfer\n",
2831 __FUNCTION__));
2832 sdstd_3_start_tuning(sd);
2833 }
2834 } else if (tuning_param == CHECK_TUNING_POST_DATA) {
2835 if (sd->sd3_tun_state == TUNING_START_AFTER_DAT) {
2836 sd3_trace(("sd3: %s: tuning start\n", __FUNCTION__));
2837 /* check and start tuning if required. */
2838 sdstd_3_start_tuning(sd);
2839 }
2840 }
2841 }
2842 }
2843 /* Need to run this function in interrupt-disabled context */
sdstd_3_check_and_set_retuning(sdioh_info_t * sd)2844 bool sdstd_3_check_and_set_retuning(sdioh_info_t *sd)
2845 {
2846 sd3_trace(("sd3: %s:\n", __FUNCTION__));
2847
2848 /* if already initiated, just return without anything */
2849 if ((sd->sd3_tun_state == TUNING_START) ||
2850 (sd->sd3_tun_state == TUNING_ONGOING) ||
2851 (sd->sd3_tun_state == TUNING_START_AFTER_DAT)) {
2852 /* do nothing */
2853 return FALSE;
2854 }
2855
2856 if (sd->sd3_dat_state == DATA_TRANSFER_IDLE) {
2857 sd->sd3_tun_state = TUNING_START; /* tuning to be started by the tasklet */
2858 return TRUE;
2859 } else {
2860 /* tuning to be started after finishing the existing data transfer */
2861 sd->sd3_tun_state = TUNING_START_AFTER_DAT;
2862 }
2863 return FALSE;
2864 }
2865
sdstd_3_get_data_state(sdioh_info_t * sd)2866 int sdstd_3_get_data_state(sdioh_info_t *sd)
2867 {
2868 return sd->sd3_dat_state;
2869 }
2870
sdstd_3_set_data_state(sdioh_info_t * sd,int state)2871 void sdstd_3_set_data_state(sdioh_info_t *sd, int state)
2872 {
2873 sd->sd3_dat_state = state;
2874 }
2875
sdstd_3_get_tune_state(sdioh_info_t * sd)2876 int sdstd_3_get_tune_state(sdioh_info_t *sd)
2877 {
2878 return sd->sd3_tun_state;
2879 }
2880
sdstd_3_set_tune_state(sdioh_info_t * sd,int state)2881 void sdstd_3_set_tune_state(sdioh_info_t *sd, int state)
2882 {
2883 sd->sd3_tun_state = state;
2884 }
2885
sdstd_3_get_tuning_exp(sdioh_info_t * sd)2886 uint8 sdstd_3_get_tuning_exp(sdioh_info_t *sd)
2887 {
2888 if (sd_tuning_period == CAP3_RETUNING_TC_OTHER) {
2889 return GFIELD(sd->caps3, CAP3_RETUNING_TC);
2890 } else {
2891 return (uint8)sd_tuning_period;
2892 }
2893 }
2894
sdstd_3_get_uhsi_clkmode(sdioh_info_t * sd)2895 uint32 sdstd_3_get_uhsi_clkmode(sdioh_info_t *sd)
2896 {
2897 return sd_uhsimode;
2898 }
2899
2900 /* check, to see if the card supports driver_type corr to the driver_type
2901 in preset value, which will be selected by requested UHSI mode
2902 input:
2903 clk mode: valid values: SD3CLKMODE_2_SDR50, SD3CLKMODE_3_SDR104,
2904 SD3CLKMODE_4_DDR50, SD3CLKMODE_AUTO
2905 outputs:
2906 return_val: TRUE; if a matching drvstrn for the given clkmode is
2907 found in both HC and card. otherwise, FALSE.
2908 [other outputs below valid ONLY if return_val is TRUE]
2909 drvstrn : driver strength read from CCCR.
2910 presetval: value of preset reg, corr to the clkmode.
2911 */
2912 static bool
sdstd_3_get_matching_drvstrn(sdioh_info_t * sd,int sd3_requested_clkmode,uint32 * drvstrn,uint16 * presetval)2913 sdstd_3_get_matching_drvstrn(sdioh_info_t *sd, int sd3_requested_clkmode,
2914 uint32 *drvstrn, uint16 *presetval)
2915 {
2916 int status;
2917 uint8 presetreg;
2918 uint8 cccr_reqd_dtype_mask = 1;
2919
2920 sd3_trace(("sd3: %s:\n", __FUNCTION__));
2921
2922 if (sd3_requested_clkmode != SD3CLKMODE_AUTO) {
2923 /* CARD: get the card driver strength from cccr */
2924 if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_DRIVER_STRENGTH,
2925 1, drvstrn)) != BCME_OK) {
2926 sd_err(("%s:Reading SDIOD_CCCR_DRIVER_STRENGTH from card"
2927 "Failed!\n", __FUNCTION__));
2928 return FALSE;
2929 }
2930 if (TRUE != sd3_sw_override1) {
2931 /* HOSTC: get the addr of preset register indexed by the clkmode */
2932 presetreg = SD3_PresetValStart +
2933 (2*sd3_requested_clkmode + 6);
2934 *presetval = sdstd_rreg16(sd, presetreg);
2935 } else {
2936 /* Note: +3 for mapping between SD3CLKMODE_xxx and presetval_sw_table */
2937 *presetval = presetval_sw_table[sd3_requested_clkmode + 3];
2938 }
2939 sd_err(("%s:reqCLK: %d, presetval: 0x%x\n",
2940 __FUNCTION__, sd3_requested_clkmode, *presetval));
2941
2942 cccr_reqd_dtype_mask <<= GFIELD(*presetval, PRESET_DRIVR_SELECT);
2943
2944 /* compare/match */
2945 if (!(cccr_reqd_dtype_mask & GFIELD(*drvstrn, SDIO_BUS_DRVR_TYPE_CAP))) {
2946 sd_err(("%s:cccr_reqd_dtype_mask and SDIO_BUS_DRVR_TYPE_CAP"
2947 "not matching!:reqd:0x%x, cap:0x%x\n", __FUNCTION__,
2948 cccr_reqd_dtype_mask, GFIELD(*drvstrn, SDIO_BUS_DRVR_TYPE_CAP)));
2949 return FALSE;
2950 } else {
2951 /* check if drive strength override is required. If so, first setit */
2952 if (*dhd_sdiod_uhsi_ds_override != DRVSTRN_IGNORE_CHAR) {
2953 int ds_offset = 0;
2954 uint32 temp = 0;
2955
2956 /* drvstrn to reflect the preset val: this is default */
2957 *drvstrn = GFIELD(*presetval, PRESET_DRIVR_SELECT);
2958
2959 /* now check override */
2960 ds_offset = (((int)DRVSTRN_MAX_CHAR -
2961 (int)(*dhd_sdiod_uhsi_ds_override)));
2962 if ((ds_offset >= 0) && (ds_offset <= MAX_DTS_INDEX)) {
2963 ds_offset = MAX_DTS_INDEX - ds_offset;
2964 sd_err(("%s:Drive strength override: %c, offset: "
2965 "%d, val: %d\n", __FUNCTION__,
2966 *dhd_sdiod_uhsi_ds_override,
2967 ds_offset, DTS_vals[ds_offset]));
2968 temp = SFIELD(*drvstrn, SDIO_BUS_DRVR_TYPE_SEL,
2969 DTS_vals[ds_offset]);
2970 sd_err(("%s:DrvStrn orig: 0x%x, modif: 0x%x\n",
2971 __FUNCTION__, *drvstrn, temp));
2972 *drvstrn = temp;
2973 } else {
2974 /* else case is default: use preset val */
2975 sd_err(("%s:override invalid: DrvStrn is from "
2976 "preset: 0x%x\n",
2977 __FUNCTION__, *drvstrn));
2978 }
2979 } else {
2980 sd_err(("%s:DrvStrn is from preset: 0x%x\n",
2981 __FUNCTION__, *drvstrn));
2982 }
2983 }
2984 } else {
2985 /* TBD check for sd3_requested_clkmode : -1 also. */
2986 sd_err(("%s: Automode not supported!\n", __FUNCTION__));
2987 return FALSE;
2988 }
2989 return TRUE;
2990 }
2991
2992 /* Returns a matching UHSI clk speed is found. If not, returns -1.
2993 Also, if sd3_requested_clkmode is -1, finds the closest max match clk and returns.
2994 */
2995 static int
sdstd_3_get_matching_uhsi_clkmode(sdioh_info_t * sd,int sd3_requested_clkmode)2996 sdstd_3_get_matching_uhsi_clkmode(sdioh_info_t *sd, int sd3_requested_clkmode)
2997 {
2998 uint32 card_val_uhsisupp;
2999 uint8 speedmask = 1;
3000 uint32 drvstrn;
3001 uint16 presetval;
3002 int status;
3003
3004 sd3_trace(("sd3: %s:\n", __FUNCTION__));
3005
3006 sd->global_UHSI_Supp = HOST_SDR_UNSUPP;
3007
3008 /* for legacy/25MHz/50MHz bus speeds, no checks done here */
3009 if ((sd3_requested_clkmode == SD3CLKMODE_0_SDR12) ||
3010 (sd3_requested_clkmode == SD3CLKMODE_1_SDR25)) {
3011 sd->global_UHSI_Supp = HOST_SDR_12_25;
3012 return sd3_requested_clkmode;
3013 }
3014 /* get cap of card */
3015 if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_UHSI_SUPPORT,
3016 1, &card_val_uhsisupp)) != BCME_OK) {
3017 sd_err(("%s:SDIOD_CCCR_UHSI_SUPPORT query failed!\n", __FUNCTION__));
3018 return -1;
3019 }
3020 sd_info(("%s:Read %08x from Card at %08x\n", __FUNCTION__,
3021 card_val_uhsisupp, SDIOD_CCCR_UHSI_SUPPORT));
3022
3023 if (sd3_requested_clkmode != SD3CLKMODE_AUTO) {
3024 /* Note: it is assumed that, following are executed when (sd3ClkMode >= 2) */
3025 speedmask <<= (sd3_requested_clkmode - SD3CLKMODE_2_SDR50);
3026
3027 /* check first about 3.0 HS CLK modes */
3028 if (!(GFIELD(sd->caps3, CAP3_30CLKCAP) & speedmask)) {
3029 sd_err(("%s:HC does not support req 3.0 UHSI mode."
3030 "requested:%d; capable:0x%x\n", __FUNCTION__,
3031 sd3_requested_clkmode, GFIELD(sd->caps3, CAP3_30CLKCAP)));
3032 return -1;
3033 }
3034
3035 /* check first about 3.0 CARD CLK modes */
3036 if (!(GFIELD(card_val_uhsisupp, SDIO_BUS_SPEED_UHSICAP) & speedmask)) {
3037 sd_err(("%s:Card does not support req 3.0 UHSI mode. requested:%d;"
3038 "capable:0x%x\n", __FUNCTION__, sd3_requested_clkmode,
3039 GFIELD(card_val_uhsisupp, SDIO_BUS_SPEED_UHSICAP)));
3040 return -1;
3041 }
3042
3043 /* check, to see if the card supports driver_type corr to the
3044 driver_type in preset value, which will be selected by
3045 requested UHSI mode
3046 */
3047 if (!sdstd_3_get_matching_drvstrn(sd, sd3_requested_clkmode,
3048 &drvstrn, &presetval)) {
3049 sd_err(("%s:DRVStrn mismatch!: card strn:0x%x; HC preset"
3050 "val:0x%x\n", __FUNCTION__, drvstrn, presetval));
3051 return -1;
3052 }
3053 /* success path. change the support variable accordingly */
3054 sd->global_UHSI_Supp = HOST_SDR_50_104_DDR;
3055 return sd3_requested_clkmode;
3056 } else {
3057 /* auto clk selection: get the highest clock capable by both card and HC */
3058 /* TBD TOBE DONE */
3059 /* sd->global_UHSI_Supp = TRUE; on success */
3060 return -1;
3061 }
3062 }
3063
3064 static int
sdstd_3_sigvoltswitch_proc(sdioh_info_t * sd)3065 sdstd_3_sigvoltswitch_proc(sdioh_info_t *sd)
3066 {
3067 int status;
3068 uint32 cmd_rsp = 0, presst;
3069 uint16 val1 = 0;
3070
3071 sd3_trace(("sd3: %s:\n", __FUNCTION__));
3072
3073 /* Issue cmd11 */
3074 if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_11, 0))
3075 != SUCCESS) {
3076 sd_err(("%s: CMD11 failed\n", __FUNCTION__));
3077 return status;
3078 }
3079
3080 /* check response */
3081 sdstd_cmd_getrsp(sd, &cmd_rsp, 1);
3082 if (
3083 GFIELD(cmd_rsp, RSP1_ERROR) || /* bit 19 */
3084 GFIELD(cmd_rsp, RSP1_ILLEGAL_CMD) || /* bit 22 */
3085 GFIELD(cmd_rsp, RSP1_COM_CRC_ERROR) || /* bit 23 */
3086 GFIELD(cmd_rsp, RSP1_CARD_LOCKED) /* bit 25 */ ) {
3087 sd_err(("%s: FAIL:CMD11: cmd_resp:0x%x\n", __FUNCTION__, cmd_rsp));
3088 return ERROR;
3089 }
3090
3091 /* SD Clock Enable = 0 */
3092 sdstd_wreg16(sd, SD_ClockCntrl,
3093 sdstd_rreg16(sd, SD_ClockCntrl) & ~((uint16)0x4));
3094
3095 /* check DAT[3..0] using Present State Reg. If not 0, error */
3096 presst = sdstd_rreg(sd, SD_PresentState);
3097 if (0 != GFIELD(presst, PRES_DAT_SIGNAL)) {
3098 sd_err(("%s: FAIL: PRESTT:0x%x\n", __FUNCTION__, presst));
3099 return ERROR;
3100 }
3101
3102 /* turn 1.8V sig enable in HC2 */
3103 val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
3104 val1 = SFIELD(val1, HOSTCtrl2_1_8SIG_EN, 1);
3105 sdstd_wreg16(sd, SD3_HostCntrl2, val1);
3106
3107 /* wait 5ms */
3108 OSL_DELAY(5000);
3109
3110 /* check 1.8V sig enable in HC2. if cleared, error */
3111 val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
3112
3113 if (!GFIELD(val1, HOSTCtrl2_1_8SIG_EN)) {
3114 sd_err(("%s: FAIL: HC2:1.8V_En:0x%x\n", __FUNCTION__, val1));
3115 return ERROR;
3116 }
3117
3118 /* SD Clock Enable = 1 */
3119 val1 = sdstd_rreg16(sd, SD_ClockCntrl);
3120 sdstd_wreg16(sd, SD_ClockCntrl, val1 | 0x4);
3121
3122 /* wait 1ms */
3123 OSL_DELAY(1000);
3124
3125 /* check DAT[3..0] using Present State Reg. If not 0b1111, error */
3126 presst = sdstd_rreg(sd, SD_PresentState);
3127 if (0xf != GFIELD(presst, PRES_DAT_SIGNAL)) {
3128 sd_err(("%s: FAIL: PRESTT_FINAL:0x%x\n", __FUNCTION__, presst));
3129 return ERROR;
3130 }
3131
3132 return (SUCCESS);
3133 }
3134
3135 static int
sdstd_set_highspeed_mode(sdioh_info_t * sd,bool HSMode)3136 sdstd_set_highspeed_mode(sdioh_info_t *sd, bool HSMode)
3137 {
3138 uint32 regdata;
3139 int status;
3140 uint8 reg8;
3141
3142 uint32 drvstrn;
3143
3144 reg8 = sdstd_rreg8(sd, SD_HostCntrl);
3145
3146 #ifdef BCMINTERNAL
3147 /* The Jinvani SD Gold Host forces the highest clock rate in high-speed mode */
3148 /* Only enable high-speed mode if the SD clock divisor is 1. */
3149 if (sd->controller_type == SDIOH_TYPE_JINVANI_GOLD) {
3150 if (sd_divisor != 1) {
3151 HSMode = FALSE;
3152 }
3153 }
3154 #endif /* BCMINTERNAL */
3155
3156 if (HSMode == TRUE) {
3157 if (sd_hiok && (GFIELD(sd->caps, CAP_HIGHSPEED)) == 0) {
3158 sd_err(("Host Controller does not support hi-speed mode.\n"));
3159 return BCME_ERROR;
3160 }
3161
3162 sd_info(("Attempting to enable High-Speed mode.\n"));
3163
3164 if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
3165 1, ®data)) != SUCCESS) {
3166 return BCME_SDIO_ERROR;
3167 }
3168 if (regdata & SDIO_SPEED_SHS) {
3169 sd_info(("Device supports High-Speed mode.\n"));
3170
3171 regdata |= SDIO_SPEED_EHS;
3172
3173 sd_info(("Writing %08x to Card at %08x\n",
3174 regdata, SDIOD_CCCR_SPEED_CONTROL));
3175 if ((status = sdstd_card_regwrite(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
3176 1, regdata)) != BCME_OK) {
3177 return BCME_SDIO_ERROR;
3178 }
3179
3180 if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
3181 1, ®data)) != BCME_OK) {
3182 return BCME_SDIO_ERROR;
3183 }
3184
3185 sd_info(("Read %08x to Card at %08x\n", regdata, SDIOD_CCCR_SPEED_CONTROL));
3186
3187 reg8 = SFIELD(reg8, HOST_HI_SPEED_EN, 1);
3188
3189 sd_err(("High-speed clocking mode enabled.\n"));
3190 }
3191 else {
3192 sd_err(("Device does not support High-Speed Mode.\n"));
3193 reg8 = SFIELD(reg8, HOST_HI_SPEED_EN, 0);
3194 }
3195 } else {
3196 /* Force off device bit */
3197 if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
3198 1, ®data)) != BCME_OK) {
3199 return status;
3200 }
3201 if (regdata & SDIO_SPEED_EHS) {
3202 regdata &= ~SDIO_SPEED_EHS;
3203 if ((status = sdstd_card_regwrite(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
3204 1, regdata)) != BCME_OK) {
3205 return status;
3206 }
3207 }
3208
3209 sd_err(("High-speed clocking mode disabled.\n"));
3210 reg8 = SFIELD(reg8, HOST_HI_SPEED_EN, 0);
3211 }
3212
3213 if ((sd->host_UHSISupported) && (sd->card_UHSI_voltage_Supported)) {
3214 /* also set the default driver strength in the card/HC [this is reqd because,
3215 if earlier we selected any other drv_strn, we need to reset it]
3216 */
3217 /* get the card driver strength from cccr */
3218 if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_DRIVER_STRENGTH,
3219 1, &drvstrn)) != BCME_OK) {
3220 sd_err(("%s:Reading SDIOD_CCCR_DRIVER_STRENGTH from card"
3221 "Failed!\n", __FUNCTION__));
3222 return BCME_SDIO_ERROR;
3223 }
3224
3225 /* reset card drv strn */
3226 drvstrn = SFIELD(drvstrn, SDIO_BUS_DRVR_TYPE_SEL, 0);
3227
3228 /* set card drv strn */
3229 if ((status = sdstd_card_regwrite(sd, 0, SDIOD_CCCR_DRIVER_STRENGTH,
3230 1, drvstrn)) != BCME_OK) {
3231 sd_err(("%s:Setting SDIOD_CCCR_DRIVER_STRENGTH in"
3232 "card Failed!\n", __FUNCTION__));
3233 return BCME_SDIO_ERROR;
3234 }
3235 }
3236
3237 sdstd_wreg8(sd, SD_HostCntrl, reg8);
3238
3239 return BCME_OK;
3240 }
3241
3242 /* Select DMA Mode:
3243 * If dma_mode == DMA_MODE_AUTO, pick the "best" mode.
3244 * Otherwise, pick the selected mode if supported.
3245 * If not supported, use PIO mode.
3246 */
3247 static int
sdstd_set_dma_mode(sdioh_info_t * sd,int8 dma_mode)3248 sdstd_set_dma_mode(sdioh_info_t *sd, int8 dma_mode)
3249 {
3250 uint8 reg8, dma_sel_bits = SDIOH_SDMA_MODE;
3251 int8 prev_dma_mode = sd->sd_dma_mode;
3252
3253 switch (prev_dma_mode) {
3254 case DMA_MODE_AUTO:
3255 sd_dma(("%s: Selecting best DMA mode supported by controller.\n",
3256 __FUNCTION__));
3257 if (GFIELD(sd->caps, CAP_ADMA2)) {
3258 sd->sd_dma_mode = DMA_MODE_ADMA2;
3259 dma_sel_bits = SDIOH_ADMA2_MODE;
3260 } else if (GFIELD(sd->caps, CAP_ADMA1)) {
3261 sd->sd_dma_mode = DMA_MODE_ADMA1;
3262 dma_sel_bits = SDIOH_ADMA1_MODE;
3263 } else if (GFIELD(sd->caps, CAP_DMA)) {
3264 sd->sd_dma_mode = DMA_MODE_SDMA;
3265 } else {
3266 sd->sd_dma_mode = DMA_MODE_NONE;
3267 }
3268 break;
3269 case DMA_MODE_NONE:
3270 sd->sd_dma_mode = DMA_MODE_NONE;
3271 break;
3272 case DMA_MODE_SDMA:
3273 if (GFIELD(sd->caps, CAP_DMA)) {
3274 sd->sd_dma_mode = DMA_MODE_SDMA;
3275 } else {
3276 sd_err(("%s: SDMA not supported by controller.\n", __FUNCTION__));
3277 sd->sd_dma_mode = DMA_MODE_NONE;
3278 }
3279 break;
3280 case DMA_MODE_ADMA1:
3281 if (GFIELD(sd->caps, CAP_ADMA1)) {
3282 sd->sd_dma_mode = DMA_MODE_ADMA1;
3283 dma_sel_bits = SDIOH_ADMA1_MODE;
3284 } else {
3285 sd_err(("%s: ADMA1 not supported by controller.\n", __FUNCTION__));
3286 sd->sd_dma_mode = DMA_MODE_NONE;
3287 }
3288 break;
3289 case DMA_MODE_ADMA2:
3290 if (GFIELD(sd->caps, CAP_ADMA2)) {
3291 sd->sd_dma_mode = DMA_MODE_ADMA2;
3292 dma_sel_bits = SDIOH_ADMA2_MODE;
3293 } else {
3294 sd_err(("%s: ADMA2 not supported by controller.\n", __FUNCTION__));
3295 sd->sd_dma_mode = DMA_MODE_NONE;
3296 }
3297 break;
3298 case DMA_MODE_ADMA2_64:
3299 sd_err(("%s: 64b ADMA2 not supported by driver.\n", __FUNCTION__));
3300 sd->sd_dma_mode = DMA_MODE_NONE;
3301 break;
3302 default:
3303 sd_err(("%s: Unsupported DMA Mode %d requested.\n", __FUNCTION__,
3304 prev_dma_mode));
3305 sd->sd_dma_mode = DMA_MODE_NONE;
3306 break;
3307 }
3308
3309 /* clear SysAddr, only used for SDMA */
3310 sdstd_wreg(sd, SD_SysAddr, 0);
3311
3312 sd_err(("%s: %s mode selected.\n", __FUNCTION__, dma_mode_description[sd->sd_dma_mode]));
3313
3314 reg8 = sdstd_rreg8(sd, SD_HostCntrl);
3315 reg8 = SFIELD(reg8, HOST_DMA_SEL, dma_sel_bits);
3316 sdstd_wreg8(sd, SD_HostCntrl, reg8);
3317 sd_dma(("%s: SD_HostCntrl=0x%02x\n", __FUNCTION__, reg8));
3318
3319 return BCME_OK;
3320 }
3321
3322 #ifdef BCMDBG
3323 void
print_regs(sdioh_info_t * sd)3324 print_regs(sdioh_info_t *sd)
3325 {
3326 uint8 reg8 = 0;
3327 uint16 reg16 = 0;
3328 uint32 reg32 = 0;
3329 uint8 presetreg;
3330 int i;
3331
3332 reg8 = sdstd_rreg8(sd, SD_BlockSize);
3333 printf("REGS: SD_BlockSize [004h]:0x%x\n", reg8);
3334
3335 reg8 = sdstd_rreg8(sd, SD_BlockCount);
3336 printf("REGS: SD_BlockCount [006h]:0x%x\n", reg8);
3337
3338 reg8 = sdstd_rreg8(sd, SD_BlockSize);
3339 printf("REGS: SD_BlockSize [004h]:0x%x\n", reg8);
3340
3341 reg8 = sdstd_rreg8(sd, SD_TransferMode);
3342 printf("REGS: SD_TransferMode [00Ch]:0x%x\n", reg8);
3343
3344 reg8 = sdstd_rreg8(sd, SD_HostCntrl);
3345 printf("REGS: SD_HostCntrl [028h]:0x%x\n", reg8);
3346
3347 reg32 = sdstd_rreg(sd, SD_PresentState);
3348 printf("REGS: SD_PresentState [024h]:0x%x\n", reg32);
3349
3350 reg8 = sdstd_rreg8(sd, SD_PwrCntrl);
3351 printf("REGS: SD_PwrCntrl [029h]:0x%x\n", reg8);
3352
3353 reg8 = sdstd_rreg8(sd, SD_BlockGapCntrl);
3354 printf("REGS: SD_BlockGapCntrl [02Ah]:0x%x\n", reg8);
3355
3356 reg8 = sdstd_rreg8(sd, SD_WakeupCntrl);
3357 printf("REGS: SD_WakeupCntrl [02Bh]:0x%x\n", reg8);
3358
3359 reg16 = sdstd_rreg16(sd, SD_ClockCntrl);
3360 printf("REGS: SD_ClockCntrl [02Ch]:0x%x\n", reg16);
3361
3362 reg8 = sdstd_rreg8(sd, SD_TimeoutCntrl);
3363 printf("REGS: SD_TimeoutCntrl [02Eh]:0x%x\n", reg8);
3364
3365 reg8 = sdstd_rreg8(sd, SD_SoftwareReset);
3366 printf("REGS: SD_SoftwareReset [02Fh]:0x%x\n", reg8);
3367
3368 reg16 = sdstd_rreg16(sd, SD_IntrStatus);
3369 printf("REGS: SD_IntrStatus [030h]:0x%x\n", reg16);
3370
3371 reg16 = sdstd_rreg16(sd, SD_ErrorIntrStatus);
3372 printf("REGS: SD_ErrorIntrStatus [032h]:0x%x\n", reg16);
3373
3374 reg16 = sdstd_rreg16(sd, SD_IntrStatusEnable);
3375 printf("REGS: SD_IntrStatusEnable [034h]:0x%x\n", reg16);
3376
3377 reg16 = sdstd_rreg16(sd, SD_ErrorIntrStatusEnable);
3378 printf("REGS: SD_ErrorIntrStatusEnable [036h]:0x%x\n", reg16);
3379
3380 reg16 = sdstd_rreg16(sd, SD_IntrSignalEnable);
3381 printf("REGS: SD_IntrSignalEnable [038h]:0x%x\n", reg16);
3382
3383 reg16 = sdstd_rreg16(sd, SD_ErrorIntrSignalEnable);
3384 printf("REGS: SD_ErrorIntrSignalEnable [03Ah]:0x%x\n", reg16);
3385
3386 reg32 = sdstd_rreg(sd, SD_Capabilities);
3387 printf("REGS: SD_Capabilities [040h]:0x%x\n", reg32);
3388
3389 reg32 = sdstd_rreg(sd, SD_MaxCurCap);
3390 printf("REGS: SD_MaxCurCap [04Ah]:0x%x\n", reg32);
3391
3392 reg32 = sdstd_rreg(sd, SD_Capabilities3);
3393 printf("REGS: SD_Capabilities3 [044h]:0x%x\n", reg32);
3394
3395 reg16 = sdstd_rreg16(sd, SD3_HostCntrl2);
3396 printf("REGS: SD3_HostCntrl2 [03Eh]:0x%x\n", reg16);
3397
3398 for (i = 0; i < 8; i++) {
3399 presetreg = SD3_PresetValStart + i*2;
3400 printf("REGS: Presetvalreg:ix[%d]:0x%x, val=0x%x\n", i,
3401 presetreg, sdstd_rreg16(sd, presetreg));
3402 }
3403
3404 reg16 = sdstd_rreg16(sd, SD_SlotInterruptStatus);
3405 printf("REGS: SD_SlotInterruptStatus [0FCh]:0x%x\n", reg16);
3406
3407 reg16 = sdstd_rreg16(sd, SD_HostControllerVersion);
3408 printf("REGS: SD_HostControllerVersion [0FEh]:0x%x\n", reg16);
3409 }
3410 #endif /* BCMDBG */
3411
3412 #ifdef BCMINTERNAL
3413 #ifdef NOTUSED
3414 static int
parse_state(uint32 state,char * buf,int len)3415 parse_state(uint32 state, char *buf, int len)
3416 {
3417 char *data = buf;
3418
3419 sd_err(("Parsing state 0x%x\n", state));
3420 if (!len) {
3421 return (0);
3422 }
3423
3424 data += sprintf(data, "cmd_inhibit %d\n", GFIELD(state, PRES_CMD_INHIBIT));
3425 data += sprintf(data, "dat_inhibit %d\n", GFIELD(state, PRES_DAT_INHIBIT));
3426 data += sprintf(data, "dat_busy %d\n", GFIELD(state, PRES_DAT_BUSY));
3427 data += sprintf(data, "write_active %d\n", GFIELD(state, PRES_WRITE_ACTIVE));
3428 data += sprintf(data, "read_active %d\n", GFIELD(state, PRES_READ_ACTIVE));
3429 data += sprintf(data, "write_data_rdy %d\n", GFIELD(state, PRES_WRITE_DATA_RDY));
3430 data += sprintf(data, "read_data_rdy %d\n", GFIELD(state, PRES_READ_DATA_RDY));
3431 data += sprintf(data, "card_present %d\n", GFIELD(state, PRES_CARD_PRESENT));
3432 data += sprintf(data, "card_stable %d\n", GFIELD(state, PRES_CARD_STABLE));
3433 data += sprintf(data, "card_present_raw %d\n", GFIELD(state, PRES_CARD_PRESENT_RAW));
3434 data += sprintf(data, "write_enabled %d\n", GFIELD(state, PRES_WRITE_ENABLED));
3435 data += sprintf(data, "cmd_signal %d\n", GFIELD(state, PRES_CMD_SIGNAL));
3436
3437 return (data - buf);
3438 }
3439
3440 static int
parse_caps(uint32 cap,char * buf,int len)3441 parse_caps(uint32 cap, char *buf, int len)
3442 {
3443 int block = 0xbeef;
3444 char *data = buf;
3445
3446 data += sprintf(data, "TimeOut Clock Freq:\t%d\n", GFIELD(cap, CAP_TO_CLKFREQ));
3447 data += sprintf(data, "TimeOut Clock Unit:\t%d\n", GFIELD(cap, CAP_TO_CLKUNIT));
3448 data += sprintf(data, "Base Clock:\t\t%d\n", GFIELD(cap, CAP_BASECLK));
3449 switch (GFIELD(cap, CAP_MAXBLOCK)) {
3450 case 0: block = 512; break;
3451 case 1: block = 1024; break;
3452 case 2: block = 2048; break;
3453 case 3: block = 0; break;
3454 }
3455 data += sprintf(data, "Max Block Size:\t\t%d\n", block);
3456 data += sprintf(data, "Support High Speed:\t%d\n", GFIELD(cap, CAP_HIGHSPEED));
3457 data += sprintf(data, "Support DMA:\t\t%d\n", GFIELD(cap, CAP_DMA));
3458 data += sprintf(data, "Support Suspend:\t%d\n", GFIELD(cap, CAP_SUSPEND));
3459 data += sprintf(data, "Support 3.3 Volts:\t%d\n", GFIELD(cap, CAP_VOLT_3_3));
3460 data += sprintf(data, "Support 3.0 Volts:\t%d\n", GFIELD(cap, CAP_VOLT_3_0));
3461 data += sprintf(data, "Support 1.8 Volts:\t%d\n", GFIELD(cap, CAP_VOLT_1_8));
3462 return (data - buf);
3463 }
3464 #endif /* NOTUSED */
3465 #endif /* BCMINTERNAL */
3466
3467 /* XXX Per SDIO Host Controller Spec section 3.2.1
3468 Note: for 2.x HC, new_sd_divisor should be a power of 2, but for 3.0
3469 HC, new_sd_divisor should be a multiple of 2.
3470 */
3471 bool
sdstd_start_clock(sdioh_info_t * sd,uint16 new_sd_divisor)3472 sdstd_start_clock(sdioh_info_t *sd, uint16 new_sd_divisor)
3473 {
3474 uint rc, count;
3475 uint16 divisor;
3476 uint16 regdata;
3477 uint16 val1;
3478
3479 sd3_trace(("%s: starting clk\n", __FUNCTION__));
3480 /* turn off HC clock */
3481 sdstd_wreg16(sd, SD_ClockCntrl,
3482 sdstd_rreg16(sd, SD_ClockCntrl) & ~((uint16)0x4)); /* Disable the HC clock */
3483
3484 /* Set divisor */
3485 if (sd->host_UHSISupported) {
3486 #ifdef BCMDBG
3487 if ((new_sd_divisor != 1) && /* 1 is a valid value */
3488 ((new_sd_divisor & (0x1)) || /* check for multiple of 2 */
3489 (new_sd_divisor == 0) ||
3490 (new_sd_divisor > 0x3ff))) {
3491 sd_err(("3.0: Invalid clock divisor target: %d\n", new_sd_divisor));
3492 return FALSE;
3493 }
3494 #endif
3495 divisor = (new_sd_divisor >> 1);
3496 } else
3497 {
3498 #ifdef BCMDBG
3499 if ((new_sd_divisor & (new_sd_divisor-1)) ||
3500 (new_sd_divisor == 0)) {
3501 sd_err(("Invalid clock divisor target: %d\n", new_sd_divisor));
3502 return FALSE;
3503 }
3504 #endif
3505 /* new logic: if divisor > 256, restrict to 256 */
3506 if (new_sd_divisor > 256)
3507 new_sd_divisor = 256;
3508 divisor = (new_sd_divisor >> 1) << 8;
3509 }
3510 #ifdef BCMINTERNAL
3511 if (sd->controller_type == SDIOH_TYPE_JINVANI_GOLD) {
3512 divisor = (new_sd_divisor >> 2) << 8;
3513 }
3514 #endif /* BCMINTERNAL */
3515
3516 sd_info(("Clock control is 0x%x\n", sdstd_rreg16(sd, SD_ClockCntrl)));
3517 if (sd->host_UHSISupported) {
3518 /* *get preset value and shift so that.
3519 * bits 0-7 are in 15-8 and 9-8 are in 7-6 of clkctrl
3520 */
3521 val1 = divisor << 2;
3522 val1 &= 0x0ffc;
3523 val1 |= divisor >> 8;
3524 val1 <<= 6;
3525 printf("divisor:%x;val1:%x\n", divisor, val1);
3526 sdstd_mod_reg16(sd, SD_ClockCntrl, 0xffC0, val1);
3527 } else
3528 {
3529 sdstd_mod_reg16(sd, SD_ClockCntrl, 0xff00, divisor);
3530 }
3531
3532 sd_err(("%s: Using clock divisor of %d (regval 0x%04x)\n", __FUNCTION__,
3533 new_sd_divisor, divisor));
3534 if (new_sd_divisor > 0)
3535 sd_err(("%s:now, divided clk is: %d Hz\n",
3536 __FUNCTION__, GFIELD(sd->caps, CAP_BASECLK)*1000000/new_sd_divisor));
3537 else
3538 sd_err(("Using Primary Clock Freq of %d MHz\n", GFIELD(sd->caps, CAP_BASECLK)));
3539 sd_info(("Primary Clock Freq = %d MHz\n", GFIELD(sd->caps, CAP_BASECLK)));
3540 if (GFIELD(sd->caps, CAP_TO_CLKFREQ) == 50) {
3541 sd_info(("%s: Resulting SDIO clock is %d %s\n", __FUNCTION__,
3542 ((50 % new_sd_divisor) ? (50000 / new_sd_divisor) : (50 / new_sd_divisor)),
3543 ((50 % new_sd_divisor) ? "KHz" : "MHz")));
3544 } else if (GFIELD(sd->caps, CAP_TO_CLKFREQ) == 48) {
3545 sd_info(("%s: Resulting SDIO clock is %d %s\n", __FUNCTION__,
3546 ((48 % new_sd_divisor) ? (48000 / new_sd_divisor) : (48 / new_sd_divisor)),
3547 ((48 % new_sd_divisor) ? "KHz" : "MHz")));
3548 } else if (GFIELD(sd->caps, CAP_TO_CLKFREQ) == 33) {
3549 sd_info(("%s: Resulting SDIO clock is %d %s\n", __FUNCTION__,
3550 ((33 % new_sd_divisor) ? (33000 / new_sd_divisor) : (33 / new_sd_divisor)),
3551 ((33 % new_sd_divisor) ? "KHz" : "MHz")));
3552 } else if (GFIELD(sd->caps, CAP_TO_CLKFREQ) == 31) {
3553 sd_info(("%s: Resulting SDIO clock is %d %s\n", __FUNCTION__,
3554 ((31 % new_sd_divisor) ? (31000 / new_sd_divisor) : (31 / new_sd_divisor)),
3555 ((31 % new_sd_divisor) ? "KHz" : "MHz")));
3556 } else if (GFIELD(sd->caps, CAP_TO_CLKFREQ) == 8) {
3557 sd_info(("%s: Resulting SDIO clock is %d %s\n", __FUNCTION__,
3558 ((8 % new_sd_divisor) ? (8000 / new_sd_divisor) : (8 / new_sd_divisor)),
3559 ((8 % new_sd_divisor) ? "KHz" : "MHz")));
3560 } else if (sd->controller_type == SDIOH_TYPE_BCM27XX) {
3561 /* XXX - BCM 27XX Standard Host Controller returns 0 for CLKFREQ */
3562 } else {
3563 sd_err(("Need to determine divisor for %d MHz clocks\n",
3564 GFIELD(sd->caps, CAP_BASECLK)));
3565 sd_err(("Consult SD Host Controller Spec: Clock Control Register\n"));
3566 return (FALSE);
3567 }
3568
3569 sdstd_or_reg16(sd, SD_ClockCntrl, 0x1); /* Enable the clock */
3570
3571 /* Wait for clock to stabilize */
3572 rc = (sdstd_rreg16(sd, SD_ClockCntrl) & 2);
3573 count = 0;
3574 while (!rc) {
3575 OSL_DELAY(1);
3576 sd_info(("Waiting for clock to become stable 0x%x\n", rc));
3577 rc = (sdstd_rreg16(sd, SD_ClockCntrl) & 2);
3578 count++;
3579 if (count > 10000) {
3580 sd_err(("%s:Clocks failed to stabilize after %u attempts\n",
3581 __FUNCTION__, count));
3582 return (FALSE);
3583 }
3584 }
3585 /* Turn on clock */
3586 sdstd_or_reg16(sd, SD_ClockCntrl, 0x4);
3587
3588 OSL_DELAY(20);
3589
3590 /* Set timeout control (adjust default value based on divisor).
3591 * Disabling timeout interrupts during setting is advised by host spec.
3592 */
3593 #ifdef BCMQT
3594 if (GFIELD(sd->caps, CAP_BASECLK) < 50)
3595 #endif
3596 {
3597 uint toval;
3598
3599 toval = sd_toctl;
3600 divisor = new_sd_divisor;
3601
3602 while (toval && !(divisor & 1)) {
3603 toval -= 1;
3604 divisor >>= 1;
3605 }
3606
3607 regdata = sdstd_rreg16(sd, SD_ErrorIntrStatusEnable);
3608 sdstd_wreg16(sd, SD_ErrorIntrStatusEnable, (regdata & ~ERRINT_DATA_TIMEOUT_BIT));
3609 sdstd_wreg8(sd, SD_TimeoutCntrl, (uint8)toval);
3610 sdstd_wreg16(sd, SD_ErrorIntrStatusEnable, regdata);
3611 }
3612 #ifdef BCMQT
3613 else {
3614 sd_info(("%s: REsetting err int control\n", __FUNCTION__));
3615 /* XXX: turn off timeout INT, it resets clk ctrl bit */
3616 regdata = sdstd_rreg16(sd, SD_ErrorIntrStatusEnable);
3617 sdstd_wreg16(sd, SD_ErrorIntrStatusEnable, (regdata & ~ERRINT_DATA_TIMEOUT_BIT));
3618 }
3619 #endif
3620 OSL_DELAY(2);
3621
3622 sd_info(("Final Clock control is 0x%x\n", sdstd_rreg16(sd, SD_ClockCntrl)));
3623
3624 return TRUE;
3625 }
3626
3627 /* XXX Per SDIO Host Controller Spec 3.3
3628 * volts_req:
3629 * 0 means default: select highest voltage.
3630 * 1 means 1.8V
3631 * 2 means 3.0V
3632 * 3 means 3.3V
3633 * returns
3634 * TRUE: no error
3635 * FALSE: general error
3636 * SDIO_OCR_READ_FAIL: ocr reading failure. Now the HC has to try in other available voltages.
3637 */
3638 uint16
sdstd_start_power(sdioh_info_t * sd,int volts_req)3639 sdstd_start_power(sdioh_info_t *sd, int volts_req)
3640 {
3641 char *s;
3642 uint32 cmd_arg;
3643 uint32 cmd_rsp;
3644 uint8 pwr = 0;
3645 int volts = 0;
3646 uint16 val1;
3647 uint16 init_divider = 0;
3648 uint8 baseclk = 0;
3649 bool selhighest = (volts_req == 0) ? TRUE : FALSE;
3650
3651 /* reset the card uhsi volt support to false */
3652 sd->card_UHSI_voltage_Supported = FALSE;
3653
3654 /* Ensure a power on reset by turning off bus power in case it happened to
3655 * be on already. (This might happen if driver doesn't unload/clean up correctly,
3656 * crash, etc.) Leave off for 100ms to make sure the power off isn't
3657 * ignored/filtered by the device. Note we can't skip this step if the power is
3658 * off already since we don't know how long it has been off before starting
3659 * the driver.
3660 */
3661 sdstd_wreg8(sd, SD_PwrCntrl, 0);
3662 sd_info(("Turning off VDD/bus power briefly (100ms) to ensure reset\n"));
3663 OSL_DELAY(100000);
3664
3665 /* For selecting highest available voltage, start from lowest and iterate */
3666 if (!volts_req)
3667 volts_req = 1;
3668
3669 s = NULL;
3670
3671 if (volts_req == 1) {
3672 if (GFIELD(sd->caps, CAP_VOLT_1_8)) {
3673 volts = 5;
3674 s = "1.8";
3675 if (FALSE == selhighest)
3676 goto voltsel;
3677 else
3678 volts_req++;
3679 } else {
3680 sd_err(("HC doesn't support voltage! trying higher voltage: %d\n", volts));
3681 volts_req++;
3682 }
3683 }
3684
3685 if (volts_req == 2) {
3686 if (GFIELD(sd->caps, CAP_VOLT_3_0)) {
3687 volts = 6;
3688 s = "3.0";
3689 if (FALSE == selhighest)
3690 goto voltsel;
3691 else volts_req++;
3692 } else {
3693 sd_err(("HC doesn't support voltage! trying higher voltage: %d\n", volts));
3694 volts_req++;
3695 }
3696 }
3697
3698 if (volts_req == 3) {
3699 if (GFIELD(sd->caps, CAP_VOLT_3_3)) {
3700 volts = 7;
3701 s = "3.3";
3702 } else {
3703 if ((FALSE == selhighest) || (volts == 0)) {
3704 sd_err(("HC doesn't support any voltage! error!\n"));
3705 return FALSE;
3706 }
3707 }
3708 }
3709
3710 /* XXX
3711 * if UHSI is NOT supported, check for other voltages also. This is a safety measure
3712 * for embedded devices also, so that HC starts in lower power first. If this
3713 * function fails, the caller may disable UHSISupported
3714 * and call start power again to check support in higher voltages.
3715 */
3716
3717 voltsel:
3718 pwr = SFIELD(pwr, PWR_VOLTS, volts);
3719 pwr = SFIELD(pwr, PWR_BUS_EN, 1);
3720 sdstd_wreg8(sd, SD_PwrCntrl, pwr); /* Set Voltage level */
3721 sd_info(("Setting Bus Power to %s Volts\n", s));
3722 BCM_REFERENCE(s);
3723
3724 /*
3725 * PR101766 : BRCM SDIO3.0 card is an embedded SD device. It is not a SD card.
3726 * VDDIO signalling will be tied to 1.8v level on all SDIO3.0 based boards.
3727 * So program the HC to drive VDDIO at 1.8v level.
3728 */
3729 if ((sd->version == HOST_CONTR_VER_3) && (volts == 5)) {
3730 val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
3731 val1 = SFIELD(val1, HOSTCtrl2_1_8SIG_EN, 1);
3732 sdstd_wreg16(sd, SD3_HostCntrl2, val1);
3733 }
3734
3735 /* Wait for 500ms for power to stabilize. Some designs have reset IC's
3736 * which can hold reset low for close to 300ms. In addition there can
3737 * be ramp time for VDD and/or VDDIO which might be provided from a LDO.
3738 * For these reasons we need a pretty conservative delay here to have
3739 * predictable reset behavior in the face of an unknown design.
3740 */
3741 OSL_DELAY(500000);
3742
3743 baseclk = GFIELD(sd->caps, CAP_BASECLK);
3744 sd_info(("%s:baseclk: %d MHz\n", __FUNCTION__, baseclk));
3745 /* for 3.0, find divisor */
3746 if (sd->host_UHSISupported) {
3747 /* ToDo : Dynamic modification of preset value table based on base clk */
3748 sd3_trace(("sd3: %s: checking divisor\n", __FUNCTION__));
3749 if (GFIELD(sd->caps3, CAP3_CLK_MULT) != 0) {
3750 sd_err(("%s:Possible error: CLK Mul 1 CLOCKING NOT supported!\n",
3751 __FUNCTION__));
3752 return FALSE;
3753 } else {
3754 /* calculate dividor, which leads to 400KHz. */
3755 init_divider = baseclk*10/4; /* baseclk*1000000/(400000); */
3756 /* make it a multiple of 2. */
3757 init_divider += (init_divider & 0x1);
3758 sd_err(("%s:divider used for init:%d\n",
3759 __FUNCTION__, init_divider));
3760 }
3761 } else {
3762 /* Note: sd_divisor assumes that SDIO Base CLK is 50MHz. */
3763 int final_freq_based_on_div = 50/sd_divisor;
3764 if (baseclk > 50)
3765 sd_divisor = baseclk/final_freq_based_on_div;
3766 /* TBD: merge both SDIO 2.0 and 3.0 to share same divider logic */
3767 init_divider = baseclk*10/4; /* baseclk*1000000/(400000); */
3768 /* find next power of 2 */
3769 NEXT_POW2(init_divider);
3770 sd_err(("%s:NONUHSI: divider used for init:%d\n",
3771 __FUNCTION__, init_divider));
3772 }
3773
3774 /* Start at ~400KHz clock rate for initialization */
3775 if (!sdstd_start_clock(sd, init_divider)) {
3776 sd_err(("%s: sdstd_start_clock failed\n", __FUNCTION__));
3777 return FALSE;
3778 }
3779
3780 /* Get the Card's Operation Condition. Occasionally the board
3781 * takes a while to become ready
3782 */
3783 cmd_arg = 0;
3784 cmd_rsp = 0;
3785 if (get_ocr(sd, &cmd_arg, &cmd_rsp) != SUCCESS) {
3786 sd_err(("%s: Failed to get OCR bailing\n", __FUNCTION__));
3787 /* No need to reset as not sure in what state the card is. */
3788 return SDIO_OCR_READ_FAIL;
3789 }
3790
3791 sd_info(("cmd_rsp = 0x%x\n", cmd_rsp));
3792 sd_info(("mem_present = %d\n", GFIELD(cmd_rsp, RSP4_MEM_PRESENT)));
3793 sd_info(("num_funcs = %d\n", GFIELD(cmd_rsp, RSP4_NUM_FUNCS)));
3794 sd_info(("card_ready = %d\n", GFIELD(cmd_rsp, RSP4_CARD_READY)));
3795 sd_info(("OCR = 0x%x\n", GFIELD(cmd_rsp, RSP4_IO_OCR)));
3796
3797 /* Verify that the card supports I/O mode */
3798 if (GFIELD(cmd_rsp, RSP4_NUM_FUNCS) == 0) {
3799 sd_err(("%s: Card does not support I/O\n", __FUNCTION__));
3800 return ERROR;
3801 }
3802 sd->num_funcs = GFIELD(cmd_rsp, RSP4_NUM_FUNCS);
3803
3804 /* Examine voltage: Arasan only supports 3.3 volts,
3805 * so look for 3.2-3.3 Volts and also 3.3-3.4 volts.
3806 */
3807
3808 /* XXX Pg 10 SDIO spec v1.10 */
3809 if ((GFIELD(cmd_rsp, RSP4_IO_OCR) & (0x3 << 20)) == 0) {
3810 sd_err(("This client does not support 3.3 volts!\n"));
3811 return ERROR;
3812 }
3813 sd_info(("Leaving bus power at 3.3 Volts\n"));
3814
3815 cmd_arg = SFIELD(0, CMD5_OCR, 0xfff000);
3816 /* if HC uhsi supported and card voltage set is 3.3V then switch to 1.8V */
3817 if ((sd->host_UHSISupported) && (volts == 5)) {
3818 /* set S18R also */
3819 cmd_arg = SFIELD(cmd_arg, CMD5_S18R, 1);
3820 }
3821 cmd_rsp = 0;
3822 get_ocr(sd, &cmd_arg, &cmd_rsp);
3823 sd_info(("OCR = 0x%x\n", GFIELD(cmd_rsp, RSP4_IO_OCR)));
3824
3825 if ((sd->host_UHSISupported)) {
3826 /* card responded with s18A => card supports sdio3.0,do tuning proc */
3827 if (GFIELD(cmd_rsp, RSP4_S18A) == 1) {
3828 if (sdstd_3_sigvoltswitch_proc(sd)) {
3829 /* continue with legacy way of working */
3830 sd_err(("%s: voltage switch not done. error, stopping\n",
3831 __FUNCTION__));
3832 /* How to gracefully proceced here? */
3833 return FALSE;
3834 } else {
3835 sd->card_UHSI_voltage_Supported = TRUE;
3836 sd_err(("%s: voltage switch SUCCESS!\n", __FUNCTION__));
3837 }
3838 } else {
3839 /* This could happen for 2 cases.
3840 * 1) means card is NOT sdio3.0 . Note that
3841 * card_UHSI_voltage_Supported is already false.
3842 * 2) card is sdio3.0 but it is already in 1.8V.
3843 * But now, how to change host controller's voltage?
3844 * In this case we need to do the following.
3845 * sd->card_UHSI_voltage_Supported = TRUE;
3846 * turn 1.8V sig enable in HC2
3847 * val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
3848 * val1 = SFIELD(val1, HOSTCtrl2_1_8SIG_EN, 1);
3849 * sdstd_wreg16(sd, SD3_HostCntrl2, val1);
3850 */
3851 sd_info(("%s: Not sdio3.0: host_UHSISupported: %d; HC volts=%d\n",
3852 __FUNCTION__, sd->host_UHSISupported, volts));
3853 }
3854 } else {
3855 sd_info(("%s: Legacy [non sdio3.0] HC\n", __FUNCTION__));
3856 }
3857
3858 return TRUE;
3859 }
3860
3861 bool
sdstd_bus_width(sdioh_info_t * sd,int new_mode)3862 sdstd_bus_width(sdioh_info_t *sd, int new_mode)
3863 {
3864 uint32 regdata;
3865 int status;
3866 uint8 reg8;
3867
3868 sd_trace(("%s\n", __FUNCTION__));
3869 if (sd->sd_mode == new_mode) {
3870 sd_info(("%s: Already at width %d\n", __FUNCTION__, new_mode));
3871 /* Could exit, but continue just in case... */
3872 }
3873
3874 /* Set client side via reg 0x7 in CCCR */
3875 if ((status = sdstd_card_regread (sd, 0, SDIOD_CCCR_BICTRL, 1, ®data)) != SUCCESS)
3876 return (bool)status;
3877 regdata &= ~BUS_SD_DATA_WIDTH_MASK;
3878 if (new_mode == SDIOH_MODE_SD4) {
3879 sd_info(("Changing to SD4 Mode\n"));
3880 regdata |= SD4_MODE;
3881 } else if (new_mode == SDIOH_MODE_SD1) {
3882 sd_info(("Changing to SD1 Mode\n"));
3883 } else {
3884 sd_err(("SPI Mode not supported by Standard Host Controller\n"));
3885 }
3886
3887 if ((status = sdstd_card_regwrite (sd, 0, SDIOD_CCCR_BICTRL, 1, regdata)) != SUCCESS)
3888 return (bool)status;
3889
3890 if (sd->host_UHSISupported) {
3891 uint32 card_asyncint = 0;
3892 uint16 host_asyncint = 0;
3893
3894 if ((status = sdstd_card_regread (sd, 0, SDIOD_CCCR_INTR_EXTN, 1,
3895 &card_asyncint)) != SUCCESS) {
3896 sd_err(("%s:INTR EXT getting failed!, ignoring\n", __FUNCTION__));
3897 } else {
3898 host_asyncint = sdstd_rreg16(sd, SD3_HostCntrl2);
3899
3900 /* check if supported by host and card */
3901 if ((regdata & SD4_MODE) &&
3902 (GFIELD(card_asyncint, SDIO_BUS_ASYNCINT_CAP)) &&
3903 (GFIELD(sd->caps, CAP_ASYNCINT_SUP))) {
3904 /* set enable async int in card */
3905 card_asyncint = SFIELD(card_asyncint, SDIO_BUS_ASYNCINT_SEL, 1);
3906
3907 if ((status = sdstd_card_regwrite (sd, 0,
3908 SDIOD_CCCR_INTR_EXTN, 1, card_asyncint)) != SUCCESS)
3909 sd_err(("%s:INTR EXT setting failed!, ignoring\n",
3910 __FUNCTION__));
3911 else {
3912 /* set enable async int in host */
3913 host_asyncint = SFIELD(host_asyncint,
3914 HOSTCtrl2_ASYINT_EN, 1);
3915 sdstd_wreg16(sd, SD3_HostCntrl2, host_asyncint);
3916 }
3917 } else {
3918 sd_err(("%s:INTR EXT NOT supported by either host or"
3919 "card!, ignoring\n", __FUNCTION__));
3920 }
3921 }
3922 }
3923
3924 /* Set host side via Host reg */
3925 reg8 = sdstd_rreg8(sd, SD_HostCntrl) & ~SD4_MODE;
3926 if (new_mode == SDIOH_MODE_SD4)
3927 reg8 |= SD4_MODE;
3928 sdstd_wreg8(sd, SD_HostCntrl, reg8);
3929
3930 sd->sd_mode = new_mode;
3931
3932 return TRUE;
3933 }
3934
3935 static int
sdstd_driver_init(sdioh_info_t * sd)3936 sdstd_driver_init(sdioh_info_t *sd)
3937 {
3938 sd_trace(("%s\n", __FUNCTION__));
3939 sd->sd3_tuning_reqd = FALSE;
3940 sd->sd3_tuning_disable = FALSE;
3941 if ((sdstd_host_init(sd)) != SUCCESS) {
3942 return ERROR;
3943 }
3944
3945 /* Give WL_reset before sending CMD5 to dongle for Revx SDIO3 HC's */
3946 if ((sd->controller_type == SDIOH_TYPE_RICOH_R5C822) && (sd->version == HOST_CONTR_VER_3))
3947 {
3948 sdstd_wreg16(sd, SD3_WL_BT_reset_register, 0x8);
3949 OSL_DELAY(sd_delay_value);
3950 sdstd_wreg16(sd, SD3_WL_BT_reset_register, 0x0);
3951 OSL_DELAY(500000);
3952 }
3953
3954 if (sdstd_client_init(sd) != SUCCESS) {
3955 return ERROR;
3956 }
3957
3958 /* if the global cap matched and is SDR 104/50 [if 50 it is reqd] enable tuning. */
3959 if ((TRUE != sd3_sw_override1) && SD3_TUNING_REQD(sd, sd_uhsimode)) {
3960 sd->sd3_tuning_reqd = TRUE;
3961
3962 /* init OS structs for tuning */
3963 sdstd_3_osinit_tuning(sd);
3964
3965 /* enable HC tuning interrupt OR timer based on tuning method */
3966 if (GFIELD(sd->caps3, CAP3_RETUNING_MODES)) {
3967 /* enable both RTReq and timer */
3968 sd->intmask |= HC_INTR_RETUNING;
3969 sdstd_wreg16(sd, SD_IntrSignalEnable, sd->intmask);
3970 #ifdef BCMSDYIELD
3971 if (sd_forcerb)
3972 sdstd_rreg16(sd, SD_IntrSignalEnable); /* Sync readback */
3973 #endif /* BCMSDYIELD */
3974 }
3975 }
3976
3977 return SUCCESS;
3978 }
3979
3980 static int
sdstd_get_cisaddr(sdioh_info_t * sd,uint32 regaddr)3981 sdstd_get_cisaddr(sdioh_info_t *sd, uint32 regaddr)
3982 {
3983 /* read 24 bits and return valid 17 bit addr */
3984 int i;
3985 uint32 scratch, regdata;
3986 uint8 *ptr = (uint8 *)&scratch;
3987 for (i = 0; i < 3; i++) {
3988 if ((sdstd_card_regread (sd, 0, regaddr, 1, ®data)) != SUCCESS)
3989 sd_err(("%s: Can't read!\n", __FUNCTION__));
3990
3991 *ptr++ = (uint8) regdata;
3992 regaddr++;
3993 }
3994 /* Only the lower 17-bits are valid */
3995 scratch = ltoh32(scratch);
3996 scratch &= 0x0001FFFF;
3997 return (scratch);
3998 }
3999
4000 static int
sdstd_card_enablefuncs(sdioh_info_t * sd)4001 sdstd_card_enablefuncs(sdioh_info_t *sd)
4002 {
4003 int status;
4004 uint32 regdata;
4005 uint32 fbraddr;
4006 uint8 func;
4007
4008 sd_trace(("%s\n", __FUNCTION__));
4009
4010 /* Get the Card's common CIS address */
4011 sd->com_cis_ptr = sdstd_get_cisaddr(sd, SDIOD_CCCR_CISPTR_0);
4012 sd->func_cis_ptr[0] = sd->com_cis_ptr;
4013 sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr));
4014
4015 /* Get the Card's function CIS (for each function) */
4016 for (fbraddr = SDIOD_FBR_STARTADDR, func = 1;
4017 func <= sd->num_funcs; func++, fbraddr += SDIOD_FBR_SIZE) {
4018 sd->func_cis_ptr[func] = sdstd_get_cisaddr(sd, SDIOD_FBR_CISPTR_0 + fbraddr);
4019 sd_info(("%s: Function %d CIS Ptr = 0x%x\n",
4020 __FUNCTION__, func, sd->func_cis_ptr[func]));
4021 }
4022
4023 /* Enable function 1 on the card */
4024 regdata = SDIO_FUNC_ENABLE_1;
4025 if ((status = sdstd_card_regwrite(sd, 0, SDIOD_CCCR_IOEN, 1, regdata)) != SUCCESS)
4026 return status;
4027
4028 return SUCCESS;
4029 }
4030
4031 /* Read client card reg */
4032 static int
sdstd_card_regread(sdioh_info_t * sd,int func,uint32 regaddr,int regsize,uint32 * data)4033 sdstd_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data)
4034 {
4035 int status;
4036 uint32 cmd_arg;
4037 uint32 rsp5;
4038
4039 #ifdef BCMDBG
4040 if (sdstd_rreg16 (sd, SD_ErrorIntrStatus) != 0) {
4041 sd_err(("%s: Entering: ErrorintrStatus 0x%x, intstat = 0x%x\n",
4042 __FUNCTION__, sdstd_rreg16(sd, SD_ErrorIntrStatus),
4043 sdstd_rreg16(sd, SD_IntrStatus)));
4044 }
4045 #endif
4046
4047 cmd_arg = 0;
4048
4049 if ((func == 0) || (regsize == 1)) {
4050 cmd_arg = SFIELD(cmd_arg, CMD52_FUNCTION, func);
4051 cmd_arg = SFIELD(cmd_arg, CMD52_REG_ADDR, regaddr);
4052 cmd_arg = SFIELD(cmd_arg, CMD52_RW_FLAG, SDIOH_XFER_TYPE_READ);
4053 cmd_arg = SFIELD(cmd_arg, CMD52_RAW, 0);
4054 cmd_arg = SFIELD(cmd_arg, CMD52_DATA, 0);
4055
4056 if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_52, cmd_arg))
4057 != SUCCESS)
4058 return status;
4059
4060 sdstd_cmd_getrsp(sd, &rsp5, 1);
4061 if (sdstd_rreg16(sd, SD_ErrorIntrStatus) != 0) {
4062 sd_err(("%s: 1: ErrorintrStatus 0x%x\n",
4063 __FUNCTION__, sdstd_rreg16(sd, SD_ErrorIntrStatus)));
4064 }
4065
4066 if (GFIELD(rsp5, RSP5_FLAGS) != 0x10)
4067 sd_err(("%s: rsp5 flags is 0x%x\t %d\n",
4068 __FUNCTION__, GFIELD(rsp5, RSP5_FLAGS), func));
4069
4070 if (GFIELD(rsp5, RSP5_STUFF))
4071 sd_err(("%s: rsp5 stuff is 0x%x: should be 0\n",
4072 __FUNCTION__, GFIELD(rsp5, RSP5_STUFF)));
4073 *data = GFIELD(rsp5, RSP5_DATA);
4074
4075 sd_data(("%s: Resp data(0x%x)\n", __FUNCTION__, *data));
4076 } else {
4077 cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, regsize);
4078 cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 1); /* XXX SDIO spec v 1.10, Sec 5.3 */
4079 cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 0);
4080 cmd_arg = SFIELD(cmd_arg, CMD53_FUNCTION, func);
4081 cmd_arg = SFIELD(cmd_arg, CMD53_REG_ADDR, regaddr);
4082 cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_READ);
4083
4084 sd->data_xfer_count = regsize;
4085
4086 /* sdstd_cmd_issue() returns with the command complete bit
4087 * in the ISR already cleared
4088 */
4089 if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_53, cmd_arg))
4090 != SUCCESS)
4091 return status;
4092
4093 sdstd_cmd_getrsp(sd, &rsp5, 1);
4094
4095 if (GFIELD(rsp5, RSP5_FLAGS) != 0x10)
4096 sd_err(("%s: rsp5 flags is 0x%x\t %d\n",
4097 __FUNCTION__, GFIELD(rsp5, RSP5_FLAGS), func));
4098
4099 if (GFIELD(rsp5, RSP5_STUFF))
4100 sd_err(("%s: rsp5 stuff is 0x%x: should be 0\n",
4101 __FUNCTION__, GFIELD(rsp5, RSP5_STUFF)));
4102
4103 if (sd->polled_mode) {
4104 volatile uint16 int_reg;
4105 int retries = RETRIES_LARGE;
4106
4107 /* Wait for Read Buffer to become ready */
4108 do {
4109 sdstd_os_yield(sd);
4110 int_reg = sdstd_rreg16(sd, SD_IntrStatus);
4111 } while (--retries && (GFIELD(int_reg, INTSTAT_BUF_READ_READY) == 0));
4112
4113 if (!retries) {
4114 sd_err(("%s: Timeout on Buf_Read_Ready: "
4115 "intStat: 0x%x errint: 0x%x PresentState 0x%x\n",
4116 __FUNCTION__, int_reg,
4117 sdstd_rreg16(sd, SD_ErrorIntrStatus),
4118 sdstd_rreg(sd, SD_PresentState)));
4119 sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg);
4120 return (ERROR);
4121 }
4122
4123 /* Have Buffer Ready, so clear it and read the data */
4124 sdstd_wreg16(sd, SD_IntrStatus, SFIELD(0, INTSTAT_BUF_READ_READY, 1));
4125 if (regsize == 2)
4126 *data = sdstd_rreg16(sd, SD_BufferDataPort0);
4127 else
4128 *data = sdstd_rreg(sd, SD_BufferDataPort0);
4129
4130 sd_data(("%s: Resp data(0x%x)\n", __FUNCTION__, *data));
4131 /* Check Status.
4132 * After the data is read, the Transfer Complete bit should be on
4133 */
4134 retries = RETRIES_LARGE;
4135 do {
4136 int_reg = sdstd_rreg16(sd, SD_IntrStatus);
4137 } while (--retries && (GFIELD(int_reg, INTSTAT_XFER_COMPLETE) == 0));
4138
4139 /* Check for any errors from the data phase */
4140 if (sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg))
4141 return ERROR;
4142
4143 if (!retries) {
4144 sd_err(("%s: Timeout on xfer complete: "
4145 "intr 0x%04x err 0x%04x state 0x%08x\n",
4146 __FUNCTION__, int_reg,
4147 sdstd_rreg16(sd, SD_ErrorIntrStatus),
4148 sdstd_rreg(sd, SD_PresentState)));
4149 return (ERROR);
4150 }
4151
4152 sdstd_wreg16(sd, SD_IntrStatus, SFIELD(0, INTSTAT_XFER_COMPLETE, 1));
4153 }
4154 }
4155 if (sd->polled_mode) {
4156 if (regsize == 2)
4157 *data &= 0xffff;
4158 }
4159 return SUCCESS;
4160 }
4161
4162 bool
check_client_intr(sdioh_info_t * sd)4163 check_client_intr(sdioh_info_t *sd)
4164 {
4165 uint16 raw_int, cur_int, old_int;
4166
4167 raw_int = sdstd_rreg16(sd, SD_IntrStatus);
4168 cur_int = raw_int & sd->intmask;
4169
4170 if (!cur_int) {
4171 /* Not an error -- might share interrupts... */
4172 return FALSE;
4173 }
4174
4175 if (GFIELD(cur_int, INTSTAT_CARD_INT)) {
4176 unsigned long flags;
4177
4178 sdstd_os_lock_irqsave(sd, &flags);
4179 old_int = sdstd_rreg16(sd, SD_IntrStatusEnable);
4180 sdstd_wreg16(sd, SD_IntrStatusEnable, SFIELD(old_int, INTSTAT_CARD_INT, 0));
4181 sdstd_os_unlock_irqrestore(sd, &flags);
4182
4183 if (sd->client_intr_enabled && sd->use_client_ints) {
4184 sd->intrcount++;
4185 ASSERT(sd->intr_handler);
4186 ASSERT(sd->intr_handler_arg);
4187 (sd->intr_handler)(sd->intr_handler_arg);
4188 } else {
4189 sd_err(("%s: Not ready for intr: enabled %d, handler %p\n",
4190 __FUNCTION__, sd->client_intr_enabled, sd->intr_handler));
4191 }
4192 sdstd_os_lock_irqsave(sd, &flags);
4193 old_int = sdstd_rreg16(sd, SD_IntrStatusEnable);
4194 sdstd_wreg16(sd, SD_IntrStatusEnable, SFIELD(old_int, INTSTAT_CARD_INT, 1));
4195 sdstd_os_unlock_irqrestore(sd, &flags);
4196 } else {
4197 /* Local interrupt: disable, set flag, and save intrstatus */
4198 sdstd_wreg16(sd, SD_IntrSignalEnable, 0);
4199 sdstd_wreg16(sd, SD_ErrorIntrSignalEnable, 0);
4200 sd->local_intrcount++;
4201 sd->got_hcint = TRUE;
4202 sd->last_intrstatus = cur_int;
4203 }
4204
4205 return TRUE;
4206 }
4207
4208 void
sdstd_spinbits(sdioh_info_t * sd,uint16 norm,uint16 err)4209 sdstd_spinbits(sdioh_info_t *sd, uint16 norm, uint16 err)
4210 {
4211 uint16 int_reg, err_reg;
4212 int retries = RETRIES_LARGE;
4213
4214 do {
4215 int_reg = sdstd_rreg16(sd, SD_IntrStatus);
4216 err_reg = sdstd_rreg16(sd, SD_ErrorIntrStatus);
4217 } while (--retries && !(int_reg & norm) && !(err_reg & err));
4218
4219 norm |= sd->intmask;
4220 if (err_reg & err)
4221 norm = SFIELD(norm, INTSTAT_ERROR_INT, 1);
4222 sd->last_intrstatus = int_reg & norm;
4223 }
4224
4225 /* write a client register */
4226 static int
sdstd_card_regwrite(sdioh_info_t * sd,int func,uint32 regaddr,int regsize,uint32 data)4227 sdstd_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data)
4228 {
4229 int status;
4230 uint32 cmd_arg, rsp5, flags;
4231
4232 cmd_arg = 0;
4233
4234 if ((func == 0) || (regsize == 1)) {
4235 cmd_arg = SFIELD(cmd_arg, CMD52_FUNCTION, func);
4236 cmd_arg = SFIELD(cmd_arg, CMD52_REG_ADDR, regaddr);
4237 cmd_arg = SFIELD(cmd_arg, CMD52_RW_FLAG, SDIOH_XFER_TYPE_WRITE);
4238 cmd_arg = SFIELD(cmd_arg, CMD52_RAW, 0);
4239 cmd_arg = SFIELD(cmd_arg, CMD52_DATA, data & 0xff);
4240 if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_52, cmd_arg))
4241 != SUCCESS)
4242 return status;
4243
4244 sdstd_cmd_getrsp(sd, &rsp5, 1);
4245 flags = GFIELD(rsp5, RSP5_FLAGS);
4246 if (flags && (flags != 0x10))
4247 sd_err(("%s: rsp5.rsp5.flags = 0x%x, expecting 0x10\n",
4248 __FUNCTION__, flags));
4249 }
4250 else {
4251 cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, regsize);
4252 /* XXX SDIO spec v 1.10, Sec 5.3 Not FIFO */
4253 cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 1);
4254 cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 0);
4255 cmd_arg = SFIELD(cmd_arg, CMD53_FUNCTION, func);
4256 cmd_arg = SFIELD(cmd_arg, CMD53_REG_ADDR, regaddr);
4257 cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_WRITE);
4258
4259 sd->data_xfer_count = regsize;
4260
4261 /* sdstd_cmd_issue() returns with the command complete bit
4262 * in the ISR already cleared
4263 */
4264 if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_53, cmd_arg))
4265 != SUCCESS)
4266 return status;
4267
4268 sdstd_cmd_getrsp(sd, &rsp5, 1);
4269
4270 if (GFIELD(rsp5, RSP5_FLAGS) != 0x10)
4271 sd_err(("%s: rsp5 flags = 0x%x, expecting 0x10\n",
4272 __FUNCTION__, GFIELD(rsp5, RSP5_FLAGS)));
4273 if (GFIELD(rsp5, RSP5_STUFF))
4274 sd_err(("%s: rsp5 stuff is 0x%x: expecting 0\n",
4275 __FUNCTION__, GFIELD(rsp5, RSP5_STUFF)));
4276
4277 if (sd->polled_mode) {
4278 uint16 int_reg;
4279 int retries = RETRIES_LARGE;
4280
4281 /* Wait for Write Buffer to become ready */
4282 do {
4283 int_reg = sdstd_rreg16(sd, SD_IntrStatus);
4284 } while (--retries && (GFIELD(int_reg, INTSTAT_BUF_WRITE_READY) == 0));
4285
4286 if (!retries) {
4287 sd_err(("%s: Timeout on Buf_Write_Ready: intStat: 0x%x "
4288 "errint: 0x%x PresentState 0x%x\n",
4289 __FUNCTION__, int_reg,
4290 sdstd_rreg16(sd, SD_ErrorIntrStatus),
4291 sdstd_rreg(sd, SD_PresentState)));
4292 sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg);
4293 return (ERROR);
4294 }
4295 /* Clear Write Buf Ready bit */
4296 int_reg = 0;
4297 int_reg = SFIELD(int_reg, INTSTAT_BUF_WRITE_READY, 1);
4298 sdstd_wreg16(sd, SD_IntrStatus, int_reg);
4299
4300 /* At this point we have Buffer Ready, so write the data */
4301 if (regsize == 2)
4302 sdstd_wreg16(sd, SD_BufferDataPort0, (uint16) data);
4303 else
4304 sdstd_wreg(sd, SD_BufferDataPort0, data);
4305
4306 /* Wait for Transfer Complete */
4307 retries = RETRIES_LARGE;
4308 do {
4309 int_reg = sdstd_rreg16(sd, SD_IntrStatus);
4310 } while (--retries && (GFIELD(int_reg, INTSTAT_XFER_COMPLETE) == 0));
4311
4312 /* Check for any errors from the data phase */
4313 if (sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg))
4314 return ERROR;
4315
4316 if (retries == 0) {
4317 sd_err(("%s: Timeout for xfer complete; State = 0x%x, "
4318 "intr state=0x%x, Errintstatus 0x%x rcnt %d, tcnt %d\n",
4319 __FUNCTION__, sdstd_rreg(sd, SD_PresentState),
4320 int_reg, sdstd_rreg16(sd, SD_ErrorIntrStatus),
4321 sd->r_cnt, sd->t_cnt));
4322 }
4323 /* Clear the status bits */
4324 sdstd_wreg16(sd, SD_IntrStatus, SFIELD(int_reg, INTSTAT_CARD_INT, 0));
4325 }
4326 }
4327 return SUCCESS;
4328 }
4329
4330 void
sdstd_cmd_getrsp(sdioh_info_t * sd,uint32 * rsp_buffer,int count)4331 sdstd_cmd_getrsp(sdioh_info_t *sd, uint32 *rsp_buffer, int count /* num 32 bit words */)
4332 {
4333 int rsp_count;
4334 int respaddr = SD_Response0;
4335
4336 if (count > 4)
4337 count = 4;
4338
4339 for (rsp_count = 0; rsp_count < count; rsp_count++) {
4340 *rsp_buffer++ = sdstd_rreg(sd, respaddr);
4341 respaddr += 4;
4342 }
4343 }
4344
4345 /*
4346 Note: options: 0 - default
4347 1 - tuning option: Means that, this cmd issue is as a part
4348 of tuning. So no need to check the start tuning function.
4349 */
4350 static int
sdstd_cmd_issue(sdioh_info_t * sdioh_info,bool use_dma,uint32 cmd,uint32 arg)4351 sdstd_cmd_issue(sdioh_info_t *sdioh_info, bool use_dma, uint32 cmd, uint32 arg)
4352 {
4353 uint16 cmd_reg;
4354 int retries;
4355 uint32 cmd_arg;
4356 uint16 xfer_reg = 0;
4357
4358 #ifdef BCMDBG
4359 if (sdstd_rreg16(sdioh_info, SD_ErrorIntrStatus) != 0) {
4360 sd_err(("%s: Entering: ErrorIntrStatus 0x%x, Expecting 0\n",
4361 __FUNCTION__, sdstd_rreg16(sdioh_info, SD_ErrorIntrStatus)));
4362 }
4363 #endif
4364
4365 if ((sdioh_info->sd_mode == SDIOH_MODE_SPI) &&
4366 ((cmd == SDIOH_CMD_3) || (cmd == SDIOH_CMD_7) || (cmd == SDIOH_CMD_15))) {
4367 sd_err(("%s: Cmd %d is not for SPI\n", __FUNCTION__, cmd));
4368 return ERROR;
4369 }
4370
4371 retries = RETRIES_SMALL;
4372 while ((GFIELD(sdstd_rreg(sdioh_info, SD_PresentState), PRES_CMD_INHIBIT)) && --retries) {
4373 if (retries == RETRIES_SMALL)
4374 sd_err(("%s: Waiting for Command Inhibit cmd = %d 0x%x\n",
4375 __FUNCTION__, cmd, sdstd_rreg(sdioh_info, SD_PresentState)));
4376 }
4377 if (!retries) {
4378 sd_err(("%s: Command Inhibit timeout\n", __FUNCTION__));
4379 if (trap_errs)
4380 ASSERT(0);
4381 return ERROR;
4382 }
4383
4384 cmd_reg = 0;
4385 switch (cmd) {
4386 case SDIOH_CMD_0: /* Set Card to Idle State - No Response */
4387 sd_data(("%s: CMD%d\n", __FUNCTION__, cmd));
4388 cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_NONE);
4389 cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0);
4390 cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0);
4391 cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
4392 cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
4393 cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
4394 break;
4395
4396 case SDIOH_CMD_3: /* Ask card to send RCA - Response R6 */
4397 sd_data(("%s: CMD%d\n", __FUNCTION__, cmd));
4398 cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48);
4399 cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0);
4400 cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0);
4401 cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
4402 cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
4403 cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
4404 break;
4405
4406 case SDIOH_CMD_5: /* Send Operation condition - Response R4 */
4407 sd_data(("%s: CMD%d\n", __FUNCTION__, cmd));
4408 cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48);
4409 cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0);
4410 cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0);
4411 cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
4412 cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
4413 cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
4414 break;
4415
4416 case SDIOH_CMD_7: /* Select card - Response R1 */
4417 sd_data(("%s: CMD%d\n", __FUNCTION__, cmd));
4418 cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48);
4419 cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1);
4420 cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1);
4421 cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
4422 cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
4423 cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
4424 break;
4425
4426 case SDIOH_CMD_14: /* eSD Sleep - Response R1 */
4427 case SDIOH_CMD_11: /* Select card - Response R1 */
4428 sd_data(("%s: CMD%d\n", __FUNCTION__, cmd));
4429 cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48);
4430 cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1);
4431 cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1);
4432 cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
4433 cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
4434 cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
4435 break;
4436
4437 case SDIOH_CMD_15: /* Set card to inactive state - Response None */
4438 sd_data(("%s: CMD%d\n", __FUNCTION__, cmd));
4439 cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_NONE);
4440 cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0);
4441 cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0);
4442 cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
4443 cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
4444 cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
4445 break;
4446
4447 case SDIOH_CMD_19: /* clock tuning - Response R1 */
4448 sd_data(("%s: CMD%d\n", __FUNCTION__, cmd));
4449 cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48);
4450 cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1);
4451 cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1);
4452 cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 1);
4453 cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
4454 cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
4455 /* Host controller reads 64 byte magic pattern from card
4456 * Hence Direction = 1 ( READ )
4457 */
4458 xfer_reg = SFIELD(xfer_reg, XFER_DATA_DIRECTION, 1);
4459 break;
4460
4461 case SDIOH_CMD_52: /* IO R/W Direct (single byte) - Response R5 */
4462
4463 sd_data(("%s: CMD52 func(%d) addr(0x%x) %s data(0x%x)\n",
4464 __FUNCTION__,
4465 GFIELD(arg, CMD52_FUNCTION),
4466 GFIELD(arg, CMD52_REG_ADDR),
4467 GFIELD(arg, CMD52_RW_FLAG) ? "W" : "R",
4468 GFIELD(arg, CMD52_DATA)));
4469
4470 cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48);
4471 cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1);
4472 cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1);
4473 cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
4474 cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
4475 cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
4476 break;
4477
4478 case SDIOH_CMD_53: /* IO R/W Extended (multiple bytes/blocks) */
4479
4480 sd_data(("%s: CMD53 func(%d) addr(0x%x) %s mode(%s) cnt(%d), %s\n",
4481 __FUNCTION__,
4482 GFIELD(arg, CMD53_FUNCTION),
4483 GFIELD(arg, CMD53_REG_ADDR),
4484 GFIELD(arg, CMD53_RW_FLAG) ? "W" : "R",
4485 GFIELD(arg, CMD53_BLK_MODE) ? "Block" : "Byte",
4486 GFIELD(arg, CMD53_BYTE_BLK_CNT),
4487 GFIELD(arg, CMD53_OP_CODE) ? "Incrementing addr" : "Single addr"));
4488
4489 cmd_arg = arg;
4490 xfer_reg = 0;
4491
4492 cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48);
4493 cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1);
4494 cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1);
4495 cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 1);
4496 cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
4497 cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
4498
4499 use_dma = USE_DMA(sdioh_info) && GFIELD(cmd_arg, CMD53_BLK_MODE);
4500
4501 if (GFIELD(cmd_arg, CMD53_BLK_MODE)) {
4502 uint16 blocksize;
4503 uint16 blockcount;
4504 int func;
4505
4506 ASSERT(sdioh_info->sd_blockmode);
4507
4508 func = GFIELD(cmd_arg, CMD53_FUNCTION);
4509 blocksize = MIN((int)sdioh_info->data_xfer_count,
4510 sdioh_info->client_block_size[func]);
4511 blockcount = GFIELD(cmd_arg, CMD53_BYTE_BLK_CNT);
4512
4513 /* data_xfer_cnt is already setup so that for multiblock mode,
4514 * it is the entire buffer length. For non-block or single block,
4515 * it is < 64 bytes
4516 */
4517 if (use_dma) {
4518 switch (sdioh_info->sd_dma_mode) {
4519 case DMA_MODE_SDMA:
4520 sd_dma(("%s: SDMA: SysAddr reg was 0x%x now 0x%x\n",
4521 __FUNCTION__, sdstd_rreg(sdioh_info, SD_SysAddr),
4522 (uint32)sdioh_info->dma_phys));
4523 sdstd_wreg(sdioh_info, SD_SysAddr, sdioh_info->dma_phys);
4524 break;
4525 case DMA_MODE_ADMA1:
4526 case DMA_MODE_ADMA2:
4527 sd_dma(("%s: ADMA: Using ADMA\n", __FUNCTION__));
4528 #ifdef BCMSDIOH_TXGLOM
4529 /* multi-descriptor is currently used only for hc3 */
4530 if ((sdioh_info->glom_info.count != 0) &&
4531 (sdioh_info->txglom_mode == SDPCM_TXGLOM_MDESC)) {
4532 uint32 i = 0;
4533 for (i = 0;
4534 i < sdioh_info->glom_info.count-1;
4535 i++) {
4536 glom_buf_t *glom_info;
4537 glom_info = &(sdioh_info->glom_info);
4538 sd_create_adma_descriptor(sdioh_info,
4539 i,
4540 glom_info->dma_phys_arr[i],
4541 glom_info->nbytes[i],
4542 ADMA2_ATTRIBUTE_VALID |
4543 ADMA2_ATTRIBUTE_ACT_TRAN);
4544 }
4545
4546 sd_create_adma_descriptor(sdioh_info,
4547 i,
4548 sdioh_info->glom_info.dma_phys_arr[i],
4549 sdioh_info->glom_info.nbytes[i],
4550 ADMA2_ATTRIBUTE_VALID |
4551 ADMA2_ATTRIBUTE_END |
4552 ADMA2_ATTRIBUTE_INT |
4553 ADMA2_ATTRIBUTE_ACT_TRAN);
4554 } else
4555 #endif /* BCMSDIOH_TXGLOM */
4556 {
4557 sd_create_adma_descriptor(sdioh_info, 0,
4558 sdioh_info->dma_phys, blockcount*blocksize,
4559 ADMA2_ATTRIBUTE_VALID | ADMA2_ATTRIBUTE_END |
4560 ADMA2_ATTRIBUTE_INT | ADMA2_ATTRIBUTE_ACT_TRAN);
4561 }
4562 /* Dump descriptor if DMA debugging is enabled. */
4563 if (sd_msglevel & SDH_DMA_VAL) {
4564 sd_dump_adma_dscr(sdioh_info);
4565 }
4566
4567 sdstd_wreg(sdioh_info, SD_ADMA_SysAddr,
4568 sdioh_info->adma2_dscr_phys);
4569 break;
4570 default:
4571 sd_err(("%s: unsupported DMA mode %d.\n",
4572 __FUNCTION__, sdioh_info->sd_dma_mode));
4573 break;
4574 }
4575 }
4576
4577 sd_trace(("%s: Setting block count %d, block size %d bytes\n",
4578 __FUNCTION__, blockcount, blocksize));
4579 sdstd_wreg16(sdioh_info, SD_BlockSize, blocksize);
4580 sdstd_wreg16(sdioh_info, SD_BlockCount, blockcount);
4581
4582 xfer_reg = SFIELD(xfer_reg, XFER_DMA_ENABLE, use_dma);
4583
4584 if (sdioh_info->client_block_size[func] != blocksize)
4585 set_client_block_size(sdioh_info, 1, blocksize);
4586
4587 if (blockcount > 1) {
4588 xfer_reg = SFIELD(xfer_reg, XFER_MULTI_BLOCK, 1);
4589 xfer_reg = SFIELD(xfer_reg, XFER_BLK_COUNT_EN, 1);
4590 xfer_reg = SFIELD(xfer_reg, XFER_CMD_12_EN, 0);
4591 } else {
4592 xfer_reg = SFIELD(xfer_reg, XFER_MULTI_BLOCK, 0);
4593 xfer_reg = SFIELD(xfer_reg, XFER_BLK_COUNT_EN, 0);
4594 xfer_reg = SFIELD(xfer_reg, XFER_CMD_12_EN, 0);
4595 }
4596
4597 if (GFIELD(cmd_arg, CMD53_RW_FLAG) == SDIOH_XFER_TYPE_READ)
4598 xfer_reg = SFIELD(xfer_reg, XFER_DATA_DIRECTION, 1);
4599 else
4600 xfer_reg = SFIELD(xfer_reg, XFER_DATA_DIRECTION, 0);
4601
4602 retries = RETRIES_SMALL;
4603 while (GFIELD(sdstd_rreg(sdioh_info, SD_PresentState),
4604 PRES_DAT_INHIBIT) && --retries)
4605 sd_err(("%s: Waiting for Data Inhibit cmd = %d\n",
4606 __FUNCTION__, cmd));
4607 if (!retries) {
4608 sd_err(("%s: Data Inhibit timeout\n", __FUNCTION__));
4609 if (trap_errs)
4610 ASSERT(0);
4611 return ERROR;
4612 }
4613
4614 /* Consider deferring this write to the comment below "Deferred Write" */
4615 sdstd_wreg16(sdioh_info, SD_TransferMode, xfer_reg);
4616
4617 } else { /* Non block mode */
4618 uint16 bytes = GFIELD(cmd_arg, CMD53_BYTE_BLK_CNT);
4619 /* The byte/block count field only has 9 bits,
4620 * so, to do a 512-byte bytemode transfer, this
4621 * field will contain 0, but we need to tell the
4622 * controller we're transferring 512 bytes.
4623 */
4624 if (bytes == 0) bytes = 512;
4625
4626 if (use_dma)
4627 sdstd_wreg(sdioh_info, SD_SysAddr, sdioh_info->dma_phys);
4628
4629 /* PCI: Transfer Mode register 0x0c */
4630 xfer_reg = SFIELD(xfer_reg, XFER_DMA_ENABLE, bytes <= 4 ? 0 : use_dma);
4631 xfer_reg = SFIELD(xfer_reg, XFER_CMD_12_EN, 0);
4632 if (GFIELD(cmd_arg, CMD53_RW_FLAG) == SDIOH_XFER_TYPE_READ)
4633 xfer_reg = SFIELD(xfer_reg, XFER_DATA_DIRECTION, 1);
4634 else
4635 xfer_reg = SFIELD(xfer_reg, XFER_DATA_DIRECTION, 0);
4636 /* See table 2-8 Host Controller spec ver 1.00 */
4637 xfer_reg = SFIELD(xfer_reg, XFER_BLK_COUNT_EN, 0); /* Dont care */
4638 xfer_reg = SFIELD(xfer_reg, XFER_MULTI_BLOCK, 0);
4639
4640 sdstd_wreg16(sdioh_info, SD_BlockSize, bytes);
4641
4642 /* XXX This should be a don't care but Arasan needs it
4643 * to be one. Its fixed in later versions (but they
4644 * don't have version numbers, sigh).
4645 */
4646 sdstd_wreg16(sdioh_info, SD_BlockCount, 1);
4647
4648 retries = RETRIES_SMALL;
4649 while (GFIELD(sdstd_rreg(sdioh_info, SD_PresentState),
4650 PRES_DAT_INHIBIT) && --retries)
4651 sd_err(("%s: Waiting for Data Inhibit cmd = %d\n",
4652 __FUNCTION__, cmd));
4653 if (!retries) {
4654 sd_err(("%s: Data Inhibit timeout\n", __FUNCTION__));
4655 if (trap_errs)
4656 ASSERT(0);
4657 return ERROR;
4658 }
4659
4660 /* Consider deferring this write to the comment below "Deferred Write" */
4661 sdstd_wreg16(sdioh_info, SD_TransferMode, xfer_reg);
4662 }
4663 break;
4664
4665 default:
4666 sd_err(("%s: Unknown command\n", __FUNCTION__));
4667 return ERROR;
4668 }
4669
4670 if (sdioh_info->sd_mode == SDIOH_MODE_SPI) {
4671 cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0);
4672 cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0);
4673 }
4674
4675 /* Setup and issue the SDIO command */
4676 sdstd_wreg(sdioh_info, SD_Arg0, arg);
4677
4678 /* Deferred Write
4679 * Consider deferring the two writes above until this point in the code.
4680 * The following would do one 32 bit write.
4681 *
4682 * {
4683 * uint32 tmp32 = cmd_reg << 16;
4684 * tmp32 |= xfer_reg;
4685 * sdstd_wreg(sdioh_info, SD_TransferMode, tmp32);
4686 * }
4687 */
4688
4689 /* Alternate to Deferred Write START */
4690
4691 /* In response to CMD19 card sends 64 byte magic pattern.
4692 * So SD_BlockSize = 64 & SD_BlockCount = 1
4693 */
4694 if (GFIELD(cmd_reg, CMD_INDEX) == SDIOH_CMD_19) {
4695 sdstd_wreg16(sdioh_info, SD_TransferMode, xfer_reg);
4696 sdstd_wreg16(sdioh_info, SD_BlockSize, 64);
4697 sdstd_wreg16(sdioh_info, SD_BlockCount, 1);
4698 }
4699 sdstd_wreg16(sdioh_info, SD_Command, cmd_reg);
4700
4701 /* Alternate to Deferred Write END */
4702
4703 /* If we are in polled mode, wait for the command to complete.
4704 * In interrupt mode, return immediately. The calling function will
4705 * know that the command has completed when the CMDATDONE interrupt
4706 * is asserted
4707 */
4708 if (sdioh_info->polled_mode) {
4709 uint16 int_reg = 0;
4710 retries = RETRIES_LARGE;
4711
4712 /* For CMD19 no need to wait for cmd completion */
4713 if (GFIELD(cmd_reg, CMD_INDEX) == SDIOH_CMD_19)
4714 return SUCCESS;
4715
4716 do {
4717 int_reg = sdstd_rreg16(sdioh_info, SD_IntrStatus);
4718 sdstd_os_yield(sdioh_info);
4719 } while (--retries &&
4720 (GFIELD(int_reg, INTSTAT_ERROR_INT) == 0) &&
4721 (GFIELD(int_reg, INTSTAT_CMD_COMPLETE) == 0));
4722
4723 if (!retries) {
4724 sd_err(("%s: CMD_COMPLETE timeout: intrStatus: 0x%x "
4725 "error stat 0x%x state 0x%x\n",
4726 __FUNCTION__, int_reg,
4727 sdstd_rreg16(sdioh_info, SD_ErrorIntrStatus),
4728 sdstd_rreg(sdioh_info, SD_PresentState)));
4729
4730 /* Attempt to reset CMD line when we get a CMD timeout */
4731 sdstd_wreg8(sdioh_info, SD_SoftwareReset, SFIELD(0, SW_RESET_CMD, 1));
4732 retries = RETRIES_LARGE;
4733 do {
4734 sd_trace(("%s: waiting for CMD line reset\n", __FUNCTION__));
4735 } while ((GFIELD(sdstd_rreg8(sdioh_info, SD_SoftwareReset),
4736 SW_RESET_CMD)) && retries--);
4737
4738 if (!retries) {
4739 sd_err(("%s: Timeout waiting for CMD line reset\n", __FUNCTION__));
4740 }
4741
4742 if (trap_errs)
4743 ASSERT(0);
4744 return (ERROR);
4745 }
4746
4747 /* Clear Command Complete interrupt */
4748 int_reg = SFIELD(0, INTSTAT_CMD_COMPLETE, 1);
4749 sdstd_wreg16(sdioh_info, SD_IntrStatus, int_reg);
4750
4751 /* Check for Errors */
4752 if (sdstd_check_errs(sdioh_info, cmd, arg)) {
4753 if (trap_errs)
4754 ASSERT(0);
4755 return ERROR;
4756 }
4757 }
4758 return SUCCESS;
4759 }
4760
4761 /*
4762 * XXX On entry: If single block or non-block, buffersize <= blocksize.
4763 * If Mulitblock, buffersize is unlimited.
4764 * Question is how to handle the leftovers in either single or multiblock.
4765 * I think the caller should break the buffer up so this routine will always
4766 * use blocksize == buffersize to handle the end piece of the buffer
4767 */
4768
4769 static int
sdstd_card_buf(sdioh_info_t * sd,int rw,int func,bool fifo,uint32 addr,int nbytes,uint32 * data)4770 sdstd_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo, uint32 addr, int nbytes, uint32 *data)
4771 {
4772 int retval = SUCCESS;
4773 int status;
4774 uint32 cmd_arg;
4775 uint32 rsp5;
4776 uint16 int_reg, int_bit;
4777 uint flags;
4778 int num_blocks, blocksize;
4779 bool local_blockmode, local_dma;
4780 bool read = rw == SDIOH_READ ? 1 : 0;
4781 bool local_yield = FALSE;
4782 #ifdef BCMSDIOH_TXGLOM
4783 uint32 i;
4784 uint8 *localbuf = NULL;
4785 #endif
4786
4787 ASSERT(nbytes);
4788
4789 cmd_arg = 0;
4790
4791 sd_data(("%s: %s 53 addr 0x%x, len %d bytes, r_cnt %d t_cnt %d\n",
4792 __FUNCTION__, read ? "Rd" : "Wr", addr, nbytes, sd->r_cnt, sd->t_cnt));
4793
4794 if (read) sd->r_cnt++; else sd->t_cnt++;
4795
4796 local_blockmode = sd->sd_blockmode;
4797 local_dma = USE_DMA(sd);
4798
4799 #ifdef BCMSDIOH_TXGLOM
4800 /* If multiple buffers are there, then calculate the nbytes from that */
4801 if (!read && (func == SDIO_FUNC_2) && (sd->glom_info.count != 0)) {
4802 uint32 ii;
4803 nbytes = 0;
4804 for (ii = 0; ii < sd->glom_info.count; ii++) {
4805 nbytes += sd->glom_info.nbytes[ii];
4806 }
4807 ASSERT(nbytes <= sd->alloced_dma_size);
4808 }
4809 #endif
4810
4811 /* Don't bother with block mode on small xfers */
4812 if (nbytes < sd->client_block_size[func]) {
4813 sd_data(("setting local blockmode to false: nbytes (%d) != block_size (%d)\n",
4814 nbytes, sd->client_block_size[func]));
4815 local_blockmode = FALSE;
4816 local_dma = FALSE;
4817 #ifdef BCMSDIOH_TXGLOM
4818 /* In glommed case, create a single pkt from multiple pkts */
4819 if (!read && (func == SDIO_FUNC_2) && (sd->glom_info.count != 0)) {
4820 uint32 offset = 0;
4821 localbuf = (uint8 *)MALLOC(sd->osh, nbytes);
4822 data = (uint32 *)localbuf;
4823 for (i = 0; i < sd->glom_info.count; i++) {
4824 bcopy(sd->glom_info.dma_buf_arr[i],
4825 ((uint8 *)data + offset),
4826 sd->glom_info.nbytes[i]);
4827 offset += sd->glom_info.nbytes[i];
4828 }
4829 }
4830 #endif
4831 }
4832
4833 if (local_blockmode) {
4834 blocksize = MIN(sd->client_block_size[func], nbytes);
4835 num_blocks = nbytes/blocksize;
4836 cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, num_blocks);
4837 cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 1);
4838 } else {
4839 num_blocks = 1;
4840 blocksize = nbytes;
4841 cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, nbytes);
4842 cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 0);
4843 }
4844
4845 if (local_dma && !read) {
4846 #ifdef BCMSDIOH_TXGLOM
4847 if ((func == SDIO_FUNC_2) && (sd->glom_info.count != 0)) {
4848 /* In case of hc ver 2 DMA_MAP may not work properly due to 4K alignment
4849 * requirements. So copying pkt to 4K aligned pre-allocated pkt.
4850 * Total length should not cross the pre-alloced memory size
4851 */
4852 if (sd->txglom_mode == SDPCM_TXGLOM_CPY) {
4853 uint32 total_bytes = 0;
4854 for (i = 0; i < sd->glom_info.count; i++) {
4855 bcopy(sd->glom_info.dma_buf_arr[i],
4856 (uint8 *)sd->dma_buf + total_bytes,
4857 sd->glom_info.nbytes[i]);
4858 total_bytes += sd->glom_info.nbytes[i];
4859 }
4860 sd_sync_dma(sd, read, total_bytes);
4861 }
4862 } else
4863 #endif /* BCMSDIOH_TXGLOM */
4864 {
4865 bcopy(data, sd->dma_buf, nbytes);
4866 sd_sync_dma(sd, read, nbytes);
4867 }
4868 }
4869
4870 if (fifo)
4871 cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 0); /* XXX SDIO spec v 1.10, Sec 5.3 */
4872 else
4873 cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 1); /* XXX SDIO spec v 1.10, Sec 5.3 */
4874
4875 cmd_arg = SFIELD(cmd_arg, CMD53_FUNCTION, func);
4876 cmd_arg = SFIELD(cmd_arg, CMD53_REG_ADDR, addr);
4877 if (read)
4878 cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_READ);
4879 else
4880 cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_WRITE);
4881
4882 sd->data_xfer_count = nbytes;
4883
4884 /* sdstd_cmd_issue() returns with the command complete bit
4885 * in the ISR already cleared
4886 */
4887 if ((status = sdstd_cmd_issue(sd, local_dma, SDIOH_CMD_53, cmd_arg)) != SUCCESS) {
4888 sd_err(("%s: cmd_issue failed for %s\n", __FUNCTION__, (read ? "read" : "write")));
4889 retval = status;
4890 goto done;
4891 }
4892
4893 sdstd_cmd_getrsp(sd, &rsp5, 1);
4894
4895 if ((flags = GFIELD(rsp5, RSP5_FLAGS)) != 0x10) {
4896 sd_err(("%s: Rsp5: nbytes %d, dma %d blockmode %d, read %d "
4897 "numblocks %d, blocksize %d\n",
4898 __FUNCTION__, nbytes, local_dma, local_dma, read, num_blocks, blocksize));
4899
4900 if (flags & 1)
4901 sd_err(("%s: rsp5: Command not accepted: arg out of range 0x%x, "
4902 "bytes %d dma %d\n",
4903 __FUNCTION__, flags, GFIELD(cmd_arg, CMD53_BYTE_BLK_CNT),
4904 GFIELD(cmd_arg, CMD53_BLK_MODE)));
4905 if (flags & 0x8)
4906 sd_err(("%s: Rsp5: General Error\n", __FUNCTION__));
4907
4908 sd_err(("%s: rsp5 flags = 0x%x, expecting 0x10 returning error\n",
4909 __FUNCTION__, flags));
4910 if (trap_errs)
4911 ASSERT(0);
4912 retval = ERROR;
4913 goto done;
4914 }
4915
4916 if (GFIELD(rsp5, RSP5_STUFF))
4917 sd_err(("%s: rsp5 stuff is 0x%x: expecting 0\n",
4918 __FUNCTION__, GFIELD(rsp5, RSP5_STUFF)));
4919
4920 #ifdef BCMSDYIELD
4921 local_yield = sd_yieldcpu && ((uint)nbytes >= sd_minyield);
4922 #endif
4923
4924 if (!local_dma) {
4925 int bytes, ii;
4926 uint32 tmp;
4927
4928 for (ii = 0; ii < num_blocks; ii++) {
4929 int words;
4930
4931 /* Decide which status bit we're waiting for */
4932 if (read)
4933 int_bit = SFIELD(0, INTSTAT_BUF_READ_READY, 1);
4934 else
4935 int_bit = SFIELD(0, INTSTAT_BUF_WRITE_READY, 1);
4936
4937 /* If not on, wait for it (or for xfer error) */
4938 int_reg = sdstd_rreg16(sd, SD_IntrStatus);
4939 if (!(int_reg & int_bit)) {
4940 status = sdstd_waitbits(sd, int_bit, ERRINT_TRANSFER_ERRS,
4941 local_yield, &int_reg);
4942 switch (status) {
4943 case -1:
4944 sd_err(("%s: pio interrupted\n", __FUNCTION__));
4945 retval = ERROR;
4946 goto done;
4947 case -2:
4948 sd_err(("%s: pio timeout waiting for interrupt\n",
4949 __FUNCTION__));
4950 retval = ERROR;
4951 goto done;
4952 }
4953 }
4954 #ifdef BCMSLTGT
4955 /* int_reg = sdstd_rreg16(sd, SD_IntrStatus); */
4956 #endif
4957 /* Confirm we got the bit w/o error */
4958 if (!(int_reg & int_bit) || GFIELD(int_reg, INTSTAT_ERROR_INT)) {
4959 sd_err(("%s: Error or timeout for Buf_%s_Ready: intStat: 0x%x "
4960 "errint: 0x%x PresentState 0x%x\n",
4961 __FUNCTION__, read ? "Read" : "Write", int_reg,
4962 sdstd_rreg16(sd, SD_ErrorIntrStatus),
4963 sdstd_rreg(sd, SD_PresentState)));
4964 sdstd_dumpregs(sd);
4965 sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg);
4966 retval = ERROR;
4967 goto done;
4968 }
4969
4970 /* Clear Buf Ready bit */
4971 sdstd_wreg16(sd, SD_IntrStatus, int_bit);
4972
4973 /* At this point we have Buffer Ready, write the data 4 bytes at a time */
4974 for (words = blocksize/4; words; words--) {
4975 if (read)
4976 *data = sdstd_rreg(sd, SD_BufferDataPort0);
4977 else
4978 sdstd_wreg(sd, SD_BufferDataPort0, *data);
4979 data++;
4980 }
4981
4982 /* XXX
4983 * Handle < 4 bytes. wlc_pio.c currently (as of 12/20/05) truncates buflen
4984 * to be evenly divisible by 4. However dongle passes arbitrary lengths,
4985 * so handle it here
4986 */
4987 bytes = blocksize % 4;
4988
4989 /* If no leftover bytes, go to next block */
4990 if (!bytes)
4991 continue;
4992
4993 switch (bytes) {
4994 case 1:
4995 /* R/W 8 bits */
4996 if (read)
4997 *(data++) = (uint32)(sdstd_rreg8(sd, SD_BufferDataPort0));
4998 else
4999 sdstd_wreg8(sd, SD_BufferDataPort0,
5000 (uint8)(*(data++) & 0xff));
5001 break;
5002 case 2:
5003 /* R/W 16 bits */
5004 if (read)
5005 *(data++) = (uint32)sdstd_rreg16(sd, SD_BufferDataPort0);
5006 else
5007 sdstd_wreg16(sd, SD_BufferDataPort0, (uint16)(*(data++)));
5008 break;
5009 case 3:
5010 /* R/W 24 bits:
5011 * SD_BufferDataPort0[0-15] | SD_BufferDataPort1[16-23]
5012 */
5013 if (read) {
5014 tmp = (uint32)sdstd_rreg16(sd, SD_BufferDataPort0);
5015 tmp |= ((uint32)(sdstd_rreg8(sd,
5016 SD_BufferDataPort1)) << 16);
5017 *(data++) = tmp;
5018 } else {
5019 tmp = *(data++);
5020 sdstd_wreg16(sd, SD_BufferDataPort0, (uint16)tmp & 0xffff);
5021 sdstd_wreg8(sd, SD_BufferDataPort1,
5022 (uint8)((tmp >> 16) & 0xff));
5023 }
5024 break;
5025 default:
5026 sd_err(("%s: Unexpected bytes leftover %d\n",
5027 __FUNCTION__, bytes));
5028 ASSERT(0);
5029 break;
5030 }
5031 }
5032 } /* End PIO processing */
5033
5034 /* Wait for Transfer Complete or Transfer Error */
5035 int_bit = SFIELD(0, INTSTAT_XFER_COMPLETE, 1);
5036
5037 /* If not on, wait for it (or for xfer error) */
5038 int_reg = sdstd_rreg16(sd, SD_IntrStatus);
5039 if (!(int_reg & int_bit)) {
5040 status = sdstd_waitbits(sd, int_bit, ERRINT_TRANSFER_ERRS, local_yield, &int_reg);
5041 switch (status) {
5042 case -1:
5043 sd_err(("%s: interrupted\n", __FUNCTION__));
5044 retval = ERROR;
5045 goto done;
5046 case -2:
5047 sd_err(("%s: timeout waiting for interrupt\n", __FUNCTION__));
5048 retval = ERROR;
5049 goto done;
5050 }
5051 }
5052
5053 /* Check for any errors from the data phase */
5054 if (sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg)) {
5055 retval = ERROR;
5056 goto done;
5057 }
5058
5059 /* May have gotten a software timeout if not blocking? */
5060 int_reg = sdstd_rreg16(sd, SD_IntrStatus);
5061 if (!(int_reg & int_bit)) {
5062 sd_err(("%s: Error or Timeout for xfer complete; %s, dma %d, State 0x%08x, "
5063 "intr 0x%04x, Err 0x%04x, len = %d, rcnt %d, tcnt %d\n",
5064 __FUNCTION__, read ? "R" : "W", local_dma,
5065 sdstd_rreg(sd, SD_PresentState), int_reg,
5066 sdstd_rreg16(sd, SD_ErrorIntrStatus), nbytes,
5067 sd->r_cnt, sd->t_cnt));
5068 sdstd_dumpregs(sd);
5069 retval = ERROR;
5070 goto done;
5071 }
5072
5073 /* Clear the status bits */
5074 int_reg = int_bit;
5075 if (local_dma) {
5076 /* DMA Complete */
5077 /* XXX Step 14, Section 3.6.2.2 Stnd Cntrlr Spec */
5078 /* Reads in particular don't have DMA_COMPLETE set */
5079 int_reg = SFIELD(int_reg, INTSTAT_DMA_INT, 1);
5080 }
5081 sdstd_wreg16(sd, SD_IntrStatus, int_reg);
5082
5083 /* Fetch data */
5084 if (local_dma && read) {
5085 sd_sync_dma(sd, read, nbytes);
5086 bcopy(sd->dma_buf, data, nbytes);
5087 }
5088
5089 done:
5090 #ifdef BCMSDIOH_TXGLOM
5091 if (localbuf)
5092 MFREE(sd->osh, localbuf, nbytes);
5093 #endif
5094 return retval;
5095 }
5096
5097 static int
set_client_block_size(sdioh_info_t * sd,int func,int block_size)5098 set_client_block_size(sdioh_info_t *sd, int func, int block_size)
5099 {
5100 int base;
5101 int err = 0;
5102
5103 sd_err(("%s: Setting block size %d, func %d\n", __FUNCTION__, block_size, func));
5104 sd->client_block_size[func] = block_size;
5105
5106 /* Set the block size in the SDIO Card register */
5107 base = func * SDIOD_FBR_SIZE;
5108 err = sdstd_card_regwrite(sd, 0, base+SDIOD_CCCR_BLKSIZE_0, 1, block_size & 0xff);
5109 if (!err) {
5110 err = sdstd_card_regwrite(sd, 0, base+SDIOD_CCCR_BLKSIZE_1, 1,
5111 (block_size >> 8) & 0xff);
5112 }
5113
5114 /* Do not set the block size in the SDIO Host register, that
5115 * is func dependent and will get done on an individual
5116 * transaction basis
5117 */
5118
5119 return (err ? BCME_SDIO_ERROR : 0);
5120 }
5121
5122 /* Reset and re-initialize the device */
5123 int
sdioh_sdio_reset(sdioh_info_t * si)5124 sdioh_sdio_reset(sdioh_info_t *si)
5125 {
5126 uint8 hreg;
5127
5128 /* Reset the attached device (use slower clock for safety) */
5129 if (!sdstd_start_clock(si, 128)) {
5130 sd_err(("set clock failed!\n"));
5131 return ERROR;
5132 }
5133 sdstd_reset(si, 0, 1);
5134
5135 /* Reset portions of the host state accordingly */
5136 hreg = sdstd_rreg8(si, SD_HostCntrl);
5137 hreg = SFIELD(hreg, HOST_HI_SPEED_EN, 0);
5138 hreg = SFIELD(hreg, HOST_DATA_WIDTH, 0);
5139 si->sd_mode = SDIOH_MODE_SD1;
5140
5141 /* Reinitialize the card */
5142 si->card_init_done = FALSE;
5143 return sdstd_client_init(si);
5144 }
5145
5146 #ifdef BCMINTERNAL
5147 #ifdef NOTUSED
5148 static void
cis_fetch(sdioh_info_t * sd,int func,char * data,int len)5149 cis_fetch(sdioh_info_t *sd, int func, char *data, int len)
5150 {
5151 int count;
5152 int offset;
5153 char *end = data + len;
5154 uint32 foo;
5155
5156 for (count = 0; count < 512 && data < end; count++) {
5157 offset = sd->func_cis_ptr[func] + count;
5158 if (sdstd_card_regread (sd, func, offset, 1, &foo) < 0) {
5159 sd_err(("%s: regread failed\n", __FUNCTION__));
5160 return;
5161 }
5162 data += sprintf(data, "%.2x ", foo & 0xff);
5163 if (((count+1) % 16) == 0)
5164 data += sprintf(data, "\n");
5165 }
5166 }
5167 #endif /* NOTUSED */
5168 #endif /* BCMINTERNAL */
5169
5170 static void
sd_map_dma(sdioh_info_t * sd)5171 sd_map_dma(sdioh_info_t * sd)
5172 {
5173
5174 int alloced;
5175 void *va;
5176 uint dma_buf_size = SD_PAGE;
5177
5178 #ifdef BCMSDIOH_TXGLOM
5179 /* There is no alignment requirement for HC3 */
5180 if ((sd->version == HOST_CONTR_VER_3) && sd_txglom) {
5181 /* Max glom packet length is 64KB */
5182 dma_buf_size = SD_PAGE * 16;
5183 }
5184 #endif
5185
5186 alloced = 0;
5187 if ((va = DMA_ALLOC_CONSISTENT(sd->osh, dma_buf_size, SD_PAGE_BITS, &alloced,
5188 &sd->dma_start_phys, 0x12)) == NULL) {
5189 sd->sd_dma_mode = DMA_MODE_NONE;
5190 sd->dma_start_buf = 0;
5191 sd->dma_buf = (void *)0;
5192 sd->dma_phys = 0;
5193 sd->alloced_dma_size = 0;
5194 sd_err(("%s: DMA_ALLOC failed. Disabling DMA support.\n", __FUNCTION__));
5195 } else {
5196 sd->dma_start_buf = va;
5197 sd->dma_buf = (void *)ROUNDUP((uintptr)va, SD_PAGE);
5198 sd->dma_phys = ROUNDUP((sd->dma_start_phys), SD_PAGE);
5199 sd->alloced_dma_size = alloced;
5200 sd_err(("%s: Mapped DMA Buffer %dbytes @virt/phys: %p/0x%x-0x%x\n",
5201 __FUNCTION__, sd->alloced_dma_size, sd->dma_buf,
5202 (uint)PHYSADDRHI(sd->dma_phys), (uint)PHYSADDRLO(sd->dma_phys)));
5203 sd_fill_dma_data_buf(sd, 0xA5);
5204 }
5205
5206 if ((va = DMA_ALLOC_CONSISTENT(sd->osh, SD_PAGE, SD_PAGE_BITS, &alloced,
5207 &sd->adma2_dscr_start_phys, 0x12)) == NULL) {
5208 sd->sd_dma_mode = DMA_MODE_NONE;
5209 sd->adma2_dscr_start_buf = 0;
5210 sd->adma2_dscr_buf = (void *)0;
5211 sd->adma2_dscr_phys = 0;
5212 sd->alloced_adma2_dscr_size = 0;
5213 sd_err(("%s: DMA_ALLOC failed for descriptor buffer. "
5214 "Disabling DMA support.\n", __FUNCTION__));
5215 } else {
5216 sd->adma2_dscr_start_buf = va;
5217 sd->adma2_dscr_buf = (void *)ROUNDUP((uintptr)va, SD_PAGE);
5218 sd->adma2_dscr_phys = ROUNDUP((sd->adma2_dscr_start_phys), SD_PAGE);
5219 sd->alloced_adma2_dscr_size = alloced;
5220 sd_err(("%s: Mapped ADMA2 Descriptor Buffer %dbytes @virt/phys: %p/0x%x-0x%x\n",
5221 __FUNCTION__, sd->alloced_adma2_dscr_size, sd->adma2_dscr_buf,
5222 (uint)PHYSADDRHI(sd->adma2_dscr_phys),
5223 (uint)PHYSADDRLO(sd->adma2_dscr_phys)));
5224 sd_clear_adma_dscr_buf(sd);
5225 }
5226 }
5227
5228 static void
sd_unmap_dma(sdioh_info_t * sd)5229 sd_unmap_dma(sdioh_info_t * sd)
5230 {
5231 if (sd->dma_start_buf) {
5232 DMA_FREE_CONSISTENT(sd->osh, sd->dma_start_buf, sd->alloced_dma_size,
5233 sd->dma_start_phys, 0x12);
5234 }
5235
5236 if (sd->adma2_dscr_start_buf) {
5237 DMA_FREE_CONSISTENT(sd->osh, sd->adma2_dscr_start_buf, sd->alloced_adma2_dscr_size,
5238 sd->adma2_dscr_start_phys, 0x12);
5239 }
5240 }
5241
5242 static void
sd_clear_adma_dscr_buf(sdioh_info_t * sd)5243 sd_clear_adma_dscr_buf(sdioh_info_t *sd)
5244 {
5245 bzero((char *)sd->adma2_dscr_buf, SD_PAGE);
5246 sd_dump_adma_dscr(sd);
5247 }
5248
5249 static void
sd_fill_dma_data_buf(sdioh_info_t * sd,uint8 data)5250 sd_fill_dma_data_buf(sdioh_info_t *sd, uint8 data)
5251 {
5252 memset((char *)sd->dma_buf, data, SD_PAGE);
5253 }
5254
5255 static void
sd_create_adma_descriptor(sdioh_info_t * sd,uint32 index,uint32 addr_phys,uint16 length,uint16 flags)5256 sd_create_adma_descriptor(sdioh_info_t *sd, uint32 index,
5257 uint32 addr_phys, uint16 length, uint16 flags)
5258 {
5259 adma2_dscr_32b_t *adma2_dscr_table;
5260 adma1_dscr_t *adma1_dscr_table;
5261
5262 adma2_dscr_table = sd->adma2_dscr_buf;
5263 adma1_dscr_table = sd->adma2_dscr_buf;
5264
5265 switch (sd->sd_dma_mode) {
5266 case DMA_MODE_ADMA2:
5267 sd_dma(("%s: creating ADMA2 descriptor for index %d\n",
5268 __FUNCTION__, index));
5269
5270 adma2_dscr_table[index].phys_addr = addr_phys;
5271 adma2_dscr_table[index].len_attr = length << 16;
5272 adma2_dscr_table[index].len_attr |= flags;
5273 break;
5274 case DMA_MODE_ADMA1:
5275 /* ADMA1 requires two descriptors, one for len
5276 * and the other for data transfer
5277 */
5278 index <<= 1;
5279
5280 sd_dma(("%s: creating ADMA1 descriptor for index %d\n",
5281 __FUNCTION__, index));
5282
5283 adma1_dscr_table[index].phys_addr_attr = length << 12;
5284 adma1_dscr_table[index].phys_addr_attr |= (ADMA1_ATTRIBUTE_ACT_SET |
5285 ADMA2_ATTRIBUTE_VALID);
5286 adma1_dscr_table[index+1].phys_addr_attr = addr_phys & 0xFFFFF000;
5287 adma1_dscr_table[index+1].phys_addr_attr |= (flags & 0x3f);
5288 break;
5289 default:
5290 sd_err(("%s: cannot create ADMA descriptor for DMA mode %d\n",
5291 __FUNCTION__, sd->sd_dma_mode));
5292 break;
5293 }
5294 }
5295
5296 static void
sd_dump_adma_dscr(sdioh_info_t * sd)5297 sd_dump_adma_dscr(sdioh_info_t *sd)
5298 {
5299 adma2_dscr_32b_t *adma2_dscr_table;
5300 adma1_dscr_t *adma1_dscr_table;
5301 uint32 i = 0;
5302 uint16 flags;
5303 char flags_str[32];
5304
5305 ASSERT(sd->adma2_dscr_buf != NULL);
5306
5307 adma2_dscr_table = sd->adma2_dscr_buf;
5308 adma1_dscr_table = sd->adma2_dscr_buf;
5309
5310 switch (sd->sd_dma_mode) {
5311 case DMA_MODE_ADMA2:
5312 sd_err(("ADMA2 Descriptor Table (%dbytes) @virt/phys: %p/0x%x-0x%x\n",
5313 SD_PAGE, sd->adma2_dscr_buf,
5314 (uint)PHYSADDRHI(sd->adma2_dscr_phys),
5315 (uint)PHYSADDRLO(sd->adma2_dscr_phys)));
5316 sd_err((" #[Descr VA ] Buffer PA | Len | Flags (5:4 2 1 0)"
5317 " |\n"));
5318 while (adma2_dscr_table->len_attr & ADMA2_ATTRIBUTE_VALID) {
5319 flags = adma2_dscr_table->len_attr & 0xFFFF;
5320 sprintf(flags_str, "%s%s%s%s",
5321 ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
5322 ADMA2_ATTRIBUTE_ACT_LINK) ? "LINK " :
5323 ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
5324 ADMA2_ATTRIBUTE_ACT_TRAN) ? "TRAN " :
5325 ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
5326 ADMA2_ATTRIBUTE_ACT_NOP) ? "NOP " : "RSV ",
5327 (flags & ADMA2_ATTRIBUTE_INT ? "INT " : " "),
5328 (flags & ADMA2_ATTRIBUTE_END ? "END " : " "),
5329 (flags & ADMA2_ATTRIBUTE_VALID ? "VALID" : ""));
5330 sd_err(("%2d[0x%p]: 0x%08x | 0x%04x | 0x%04x (%s) |\n",
5331 i, adma2_dscr_table, adma2_dscr_table->phys_addr,
5332 adma2_dscr_table->len_attr >> 16, flags, flags_str));
5333 i++;
5334
5335 #ifdef linux
5336 /* Follow LINK descriptors or skip to next. */
5337 if ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
5338 ADMA2_ATTRIBUTE_ACT_LINK) {
5339 adma2_dscr_table = phys_to_virt(
5340 adma2_dscr_table->phys_addr);
5341 } else {
5342 adma2_dscr_table++;
5343 }
5344 #else
5345 adma2_dscr_table++;
5346 #endif /* linux */
5347
5348 }
5349 break;
5350 case DMA_MODE_ADMA1:
5351 sd_err(("ADMA1 Descriptor Table (%dbytes) @virt/phys: %p/0x%x-0x%x\n",
5352 SD_PAGE, sd->adma2_dscr_buf,
5353 (uint)PHYSADDRHI(sd->adma2_dscr_phys),
5354 (uint)PHYSADDRLO(sd->adma2_dscr_phys)));
5355 sd_err((" #[Descr VA ] Buffer PA | Flags (5:4 2 1 0) |\n"));
5356
5357 for (i = 0; adma1_dscr_table->phys_addr_attr & ADMA2_ATTRIBUTE_VALID; i++) {
5358 flags = adma1_dscr_table->phys_addr_attr & 0x3F;
5359 sprintf(flags_str, "%s%s%s%s",
5360 ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
5361 ADMA2_ATTRIBUTE_ACT_LINK) ? "LINK " :
5362 ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
5363 ADMA2_ATTRIBUTE_ACT_TRAN) ? "TRAN " :
5364 ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
5365 ADMA2_ATTRIBUTE_ACT_NOP) ? "NOP " : "SET ",
5366 (flags & ADMA2_ATTRIBUTE_INT ? "INT " : " "),
5367 (flags & ADMA2_ATTRIBUTE_END ? "END " : " "),
5368 (flags & ADMA2_ATTRIBUTE_VALID ? "VALID" : ""));
5369 sd_err(("%2d[0x%p]: 0x%08x | 0x%04x | (%s) |\n",
5370 i, adma1_dscr_table,
5371 adma1_dscr_table->phys_addr_attr & 0xFFFFF000,
5372 flags, flags_str));
5373
5374 #ifdef linux
5375 /* Follow LINK descriptors or skip to next. */
5376 if ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
5377 ADMA2_ATTRIBUTE_ACT_LINK) {
5378 adma1_dscr_table = phys_to_virt(
5379 adma1_dscr_table->phys_addr_attr & 0xFFFFF000);
5380 } else {
5381 adma1_dscr_table++;
5382 }
5383 #else
5384 adma2_dscr_table++;
5385 #endif /* linux */
5386 }
5387 break;
5388 default:
5389 sd_err(("Unknown DMA Descriptor Table Format.\n"));
5390 break;
5391 }
5392 }
5393
5394 static void
sdstd_dumpregs(sdioh_info_t * sd)5395 sdstd_dumpregs(sdioh_info_t *sd)
5396 {
5397 sd_err(("IntrStatus: 0x%04x ErrorIntrStatus 0x%04x\n",
5398 sdstd_rreg16(sd, SD_IntrStatus),
5399 sdstd_rreg16(sd, SD_ErrorIntrStatus)));
5400 sd_err(("IntrStatusEnable: 0x%04x ErrorIntrStatusEnable 0x%04x\n",
5401 sdstd_rreg16(sd, SD_IntrStatusEnable),
5402 sdstd_rreg16(sd, SD_ErrorIntrStatusEnable)));
5403 sd_err(("IntrSignalEnable: 0x%04x ErrorIntrSignalEnable 0x%04x\n",
5404 sdstd_rreg16(sd, SD_IntrSignalEnable),
5405 sdstd_rreg16(sd, SD_ErrorIntrSignalEnable)));
5406 }
5407