xref: /rk3399_ARM-atf/drivers/ufs/ufs.c (revision 9a905a7d86867bab8a5d9befd40a67a6ab9aaea2)
1 /*
2  * Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <endian.h>
9 #include <errno.h>
10 #include <stdint.h>
11 #include <string.h>
12 
13 #include <platform_def.h>
14 
15 #include <arch_helpers.h>
16 #include <common/debug.h>
17 #include <drivers/delay_timer.h>
18 #include <drivers/ufs.h>
19 #include <lib/mmio.h>
20 
21 #define CDB_ADDR_MASK			127
22 #define ALIGN_CDB(x)			(((x) + CDB_ADDR_MASK) & ~CDB_ADDR_MASK)
23 #define ALIGN_8(x)			(((x) + 7) & ~7)
24 
25 #define UFS_DESC_SIZE			0x400
26 #define MAX_UFS_DESC_SIZE		0x8000		/* 32 descriptors */
27 
28 #define MAX_PRDT_SIZE			0x40000		/* 256KB */
29 
30 static ufs_params_t ufs_params;
31 static int nutrs;	/* Number of UTP Transfer Request Slots */
32 
33 /*
34  * ufs_uic_error_handler - UIC error interrupts handler
35  * @ignore_linereset: set to ignore PA_LAYER_GEN_ERR (UIC error)
36  *
37  * Returns
38  * 0 - ignore error
39  * -EIO - fatal error, needs re-init
40  * -EAGAIN - non-fatal error, retries are sufficient
41  */
42 static int ufs_uic_error_handler(bool ignore_linereset)
43 {
44 	uint32_t data;
45 	int result = 0;
46 
47 	data = mmio_read_32(ufs_params.reg_base + UECPA);
48 	if (data & UFS_UIC_PA_ERROR_MASK) {
49 		if (data & PA_LAYER_GEN_ERR) {
50 			if (!ignore_linereset) {
51 				return -EIO;
52 			}
53 		} else {
54 			result = -EAGAIN;
55 		}
56 	}
57 
58 	data = mmio_read_32(ufs_params.reg_base + UECDL);
59 	if (data & UFS_UIC_DL_ERROR_MASK) {
60 		if (data & PA_INIT_ERR) {
61 			return -EIO;
62 		}
63 		result = -EAGAIN;
64 	}
65 
66 	/* NL/TL/DME error requires retries */
67 	data = mmio_read_32(ufs_params.reg_base + UECN);
68 	if (data & UFS_UIC_NL_ERROR_MASK) {
69 		result = -EAGAIN;
70 	}
71 
72 	data = mmio_read_32(ufs_params.reg_base + UECT);
73 	if (data & UFS_UIC_TL_ERROR_MASK) {
74 		result = -EAGAIN;
75 	}
76 
77 	data = mmio_read_32(ufs_params.reg_base + UECDME);
78 	if (data & UFS_UIC_DME_ERROR_MASK) {
79 		result = -EAGAIN;
80 	}
81 
82 	return result;
83 }
84 
85 /*
86  * ufs_error_handler - error interrupts handler
87  * @status: interrupt status
88  * @ignore_linereset: set to ignore PA_LAYER_GEN_ERR (UIC error)
89  *
90  * Returns
91  * 0 - ignore error
92  * -EIO - fatal error, needs re-init
93  * -EAGAIN - non-fatal error, retries are sufficient
94  */
95 static int ufs_error_handler(uint32_t status, bool ignore_linereset)
96 {
97 	int result;
98 
99 	if (status & UFS_INT_UE) {
100 		result = ufs_uic_error_handler(ignore_linereset);
101 		if (result != 0) {
102 			return result;
103 		}
104 	}
105 
106 	/* Return I/O error on fatal error, it is upto the caller to re-init UFS */
107 	if (status & UFS_INT_FATAL) {
108 		return -EIO;
109 	}
110 
111 	/* retry for non-fatal errors */
112 	return -EAGAIN;
113 }
114 
115 /*
116  * ufs_wait_for_int_status - wait for expected interrupt status
117  * @expected: expected interrupt status bit
118  * @timeout_ms: timeout in milliseconds to poll for
119  * @ignore_linereset: set to ignore PA_LAYER_GEN_ERR (UIC error)
120  *
121  * Returns
122  * 0 - received expected interrupt and cleared it
123  * -EIO - fatal error, needs re-init
124  * -EAGAIN - non-fatal error, caller can retry
125  * -ETIMEDOUT - timed out waiting for interrupt status
126  */
127 static int ufs_wait_for_int_status(const uint32_t expected_status,
128 				   unsigned int timeout_ms,
129 				   bool ignore_linereset)
130 {
131 	uint32_t interrupt_status, interrupts_enabled;
132 	int result = 0;
133 
134 	interrupts_enabled = mmio_read_32(ufs_params.reg_base + IE);
135 	do {
136 		interrupt_status = mmio_read_32(ufs_params.reg_base + IS) & interrupts_enabled;
137 		if (interrupt_status & UFS_INT_ERR) {
138 			mmio_write_32(ufs_params.reg_base + IS, interrupt_status & UFS_INT_ERR);
139 			result = ufs_error_handler(interrupt_status, ignore_linereset);
140 			if (result != 0) {
141 				return result;
142 			}
143 		}
144 
145 		if (interrupt_status & expected_status) {
146 			break;
147 		}
148 		mdelay(1);
149 	} while (timeout_ms-- > 0);
150 
151 	if (!(interrupt_status & expected_status)) {
152 		return -ETIMEDOUT;
153 	}
154 
155 	mmio_write_32(ufs_params.reg_base + IS, expected_status);
156 
157 	return result;
158 }
159 
160 
161 int ufshc_send_uic_cmd(uintptr_t base, uic_cmd_t *cmd)
162 {
163 	unsigned int data;
164 	int result;
165 
166 	if (base == 0 || cmd == NULL)
167 		return -EINVAL;
168 
169 	data = mmio_read_32(base + HCS);
170 	if ((data & HCS_UCRDY) == 0)
171 		return -EBUSY;
172 	mmio_write_32(base + IS, ~0);
173 	mmio_write_32(base + UCMDARG1, cmd->arg1);
174 	mmio_write_32(base + UCMDARG2, cmd->arg2);
175 	mmio_write_32(base + UCMDARG3, cmd->arg3);
176 	mmio_write_32(base + UICCMD, cmd->op);
177 
178 	result = ufs_wait_for_int_status(UFS_INT_UCCS, UIC_CMD_TIMEOUT_MS,
179 					 cmd->op == DME_SET);
180 	if (result != 0) {
181 		return result;
182 	}
183 
184 	return mmio_read_32(base + UCMDARG2) & CONFIG_RESULT_CODE_MASK;
185 }
186 
187 int ufshc_dme_get(unsigned int attr, unsigned int idx, unsigned int *val)
188 {
189 	uintptr_t base;
190 	unsigned int data;
191 	int result, retries;
192 	uic_cmd_t cmd;
193 
194 	assert(ufs_params.reg_base != 0);
195 
196 	if (val == NULL)
197 		return -EINVAL;
198 
199 	base = ufs_params.reg_base;
200 	for (retries = 0; retries < 100; retries++) {
201 		data = mmio_read_32(base + HCS);
202 		if ((data & HCS_UCRDY) != 0)
203 			break;
204 		mdelay(1);
205 	}
206 	if (retries >= 100)
207 		return -EBUSY;
208 
209 	cmd.arg1 = (attr << 16) | GEN_SELECTOR_IDX(idx);
210 	cmd.arg2 = 0;
211 	cmd.arg3 = 0;
212 	cmd.op = DME_GET;
213 	for (retries = 0; retries < UFS_UIC_COMMAND_RETRIES; ++retries) {
214 		result = ufshc_send_uic_cmd(base, &cmd);
215 		if (result == 0)
216 			break;
217 		/* -EIO requires UFS re-init */
218 		if (result == -EIO) {
219 			return result;
220 		}
221 	}
222 	if (retries >= UFS_UIC_COMMAND_RETRIES)
223 		return -EIO;
224 
225 	*val = mmio_read_32(base + UCMDARG3);
226 	return 0;
227 }
228 
229 int ufshc_dme_set(unsigned int attr, unsigned int idx, unsigned int val)
230 {
231 	uintptr_t base;
232 	int result, retries;
233 	uic_cmd_t cmd;
234 
235 	assert((ufs_params.reg_base != 0));
236 
237 	base = ufs_params.reg_base;
238 	cmd.arg1 = (attr << 16) | GEN_SELECTOR_IDX(idx);
239 	cmd.arg2 = 0;
240 	cmd.arg3 = val;
241 	cmd.op = DME_SET;
242 
243 	for (retries = 0; retries < UFS_UIC_COMMAND_RETRIES; ++retries) {
244 		result = ufshc_send_uic_cmd(base, &cmd);
245 		if (result == 0)
246 			break;
247 		/* -EIO requires UFS re-init */
248 		if (result == -EIO) {
249 			return result;
250 		}
251 	}
252 	if (retries >= UFS_UIC_COMMAND_RETRIES)
253 		return -EIO;
254 
255 	return 0;
256 }
257 
258 static int ufshc_hce_enable(uintptr_t base)
259 {
260 	unsigned int data;
261 	int retries;
262 
263 	/* Enable Host Controller */
264 	mmio_write_32(base + HCE, HCE_ENABLE);
265 
266 	/* Wait until basic initialization sequence completed */
267 	for (retries = 0; retries < HCE_ENABLE_INNER_RETRIES; ++retries) {
268 		data = mmio_read_32(base + HCE);
269 		if (data & HCE_ENABLE) {
270 			break;
271 		}
272 		udelay(HCE_ENABLE_TIMEOUT_US);
273 	}
274 	if (retries >= HCE_ENABLE_INNER_RETRIES) {
275 		return -ETIMEDOUT;
276 	}
277 
278 	return 0;
279 }
280 
281 static int ufshc_hce_disable(uintptr_t base)
282 {
283 	unsigned int data;
284 	int timeout;
285 
286 	/* Disable Host Controller */
287 	mmio_write_32(base + HCE, HCE_DISABLE);
288 	timeout = HCE_DISABLE_TIMEOUT_US;
289 	do {
290 		data = mmio_read_32(base + HCE);
291 		if ((data & HCE_ENABLE) == HCE_DISABLE) {
292 			break;
293 		}
294 		udelay(1);
295 	} while (--timeout > 0);
296 
297 	if (timeout <= 0) {
298 		return -ETIMEDOUT;
299 	}
300 
301 	return 0;
302 }
303 
304 
305 static int ufshc_reset(uintptr_t base)
306 {
307 	unsigned int data;
308 	int retries, result;
309 
310 	/* disable controller if enabled */
311 	if (mmio_read_32(base + HCE) & HCE_ENABLE) {
312 		result = ufshc_hce_disable(base);
313 		if (result != 0) {
314 			return -EIO;
315 		}
316 	}
317 
318 	for (retries = 0; retries < HCE_ENABLE_OUTER_RETRIES; ++retries) {
319 		result = ufshc_hce_enable(base);
320 		if (result == 0) {
321 			break;
322 		}
323 	}
324 	if (retries >= HCE_ENABLE_OUTER_RETRIES) {
325 		return -EIO;
326 	}
327 
328 	/* Enable UIC Interrupts alone. We can ignore other interrupts until
329 	 * link is up as there might be spurious error interrupts during link-up
330 	 */
331 	data = UFS_INT_UCCS | UFS_INT_UHES | UFS_INT_UHXS | UFS_INT_UPMS;
332 	mmio_write_32(base + IE, data);
333 
334 	return 0;
335 }
336 
337 static int ufshc_dme_link_startup(uintptr_t base)
338 {
339 	uic_cmd_t cmd;
340 
341 	memset(&cmd, 0, sizeof(cmd));
342 	cmd.op = DME_LINKSTARTUP;
343 	return ufshc_send_uic_cmd(base, &cmd);
344 }
345 
346 static int ufshc_link_startup(uintptr_t base)
347 {
348 	int data, result;
349 	int retries;
350 
351 	for (retries = DME_LINKSTARTUP_RETRIES; retries > 0; retries--) {
352 		result = ufshc_dme_link_startup(base);
353 		if (result != 0) {
354 			/* Reset controller before trying again */
355 			result = ufshc_reset(base);
356 			if (result != 0) {
357 				return result;
358 			}
359 			continue;
360 		}
361 		assert(mmio_read_32(base + HCS) & HCS_DP);
362 		data = mmio_read_32(base + IS);
363 		if (data & UFS_INT_ULSS)
364 			mmio_write_32(base + IS, UFS_INT_ULSS);
365 
366 		/* clear UE set due to line-reset */
367 		if (data & UFS_INT_UE) {
368 			mmio_write_32(base + IS, UFS_INT_UE);
369 		}
370 		/* clearing line-reset, UECPA is cleared on read */
371 		mmio_read_32(base + UECPA);
372 		return 0;
373 	}
374 	return -EIO;
375 }
376 
377 /* Read Door Bell register to check if slot zero is available */
378 static int is_slot_available(void)
379 {
380 	if (mmio_read_32(ufs_params.reg_base + UTRLDBR) & 0x1) {
381 		return -EBUSY;
382 	}
383 	return 0;
384 }
385 
386 static void get_utrd(utp_utrd_t *utrd)
387 {
388 	uintptr_t base;
389 	int result;
390 	utrd_header_t *hd;
391 
392 	assert(utrd != NULL);
393 	result = is_slot_available();
394 	assert(result == 0);
395 
396 	/* clear utrd */
397 	memset((void *)utrd, 0, sizeof(utp_utrd_t));
398 	base = ufs_params.desc_base;
399 	/* clear the descriptor */
400 	memset((void *)base, 0, UFS_DESC_SIZE);
401 
402 	utrd->header = base;
403 	utrd->task_tag = 1; /* We always use the first slot */
404 	/* CDB address should be aligned with 128 bytes */
405 	utrd->upiu = ALIGN_CDB(utrd->header + sizeof(utrd_header_t));
406 	utrd->resp_upiu = ALIGN_8(utrd->upiu + sizeof(cmd_upiu_t));
407 	utrd->size_upiu = utrd->resp_upiu - utrd->upiu;
408 	utrd->size_resp_upiu = ALIGN_8(sizeof(resp_upiu_t));
409 	utrd->prdt = utrd->resp_upiu + utrd->size_resp_upiu;
410 
411 	hd = (utrd_header_t *)utrd->header;
412 	hd->ucdba = utrd->upiu & UINT32_MAX;
413 	hd->ucdbau = (utrd->upiu >> 32) & UINT32_MAX;
414 	/* Both RUL and RUO is based on DWORD */
415 	hd->rul = utrd->size_resp_upiu >> 2;
416 	hd->ruo = utrd->size_upiu >> 2;
417 	(void)result;
418 }
419 
420 /*
421  * Prepare UTRD, Command UPIU, Response UPIU.
422  */
423 static int ufs_prepare_cmd(utp_utrd_t *utrd, uint8_t op, uint8_t lun,
424 			   int lba, uintptr_t buf, size_t length)
425 {
426 	utrd_header_t *hd;
427 	cmd_upiu_t *upiu;
428 	prdt_t *prdt;
429 	unsigned int ulba;
430 	unsigned int lba_cnt;
431 	uintptr_t desc_limit;
432 	uintptr_t prdt_end;
433 
434 	hd = (utrd_header_t *)utrd->header;
435 	upiu = (cmd_upiu_t *)utrd->upiu;
436 
437 	hd->i = 1;
438 	hd->ct = CT_UFS_STORAGE;
439 	hd->ocs = OCS_MASK;
440 
441 	upiu->trans_type = CMD_UPIU;
442 	upiu->task_tag = utrd->task_tag;
443 	upiu->cdb[0] = op;
444 	ulba = (unsigned int)lba;
445 	lba_cnt = (unsigned int)(length >> UFS_BLOCK_SHIFT);
446 	switch (op) {
447 	case CDBCMD_TEST_UNIT_READY:
448 		break;
449 	case CDBCMD_READ_CAPACITY_10:
450 		hd->dd = DD_OUT;
451 		upiu->flags = UPIU_FLAGS_R | UPIU_FLAGS_ATTR_S;
452 		upiu->lun = lun;
453 		break;
454 	case CDBCMD_READ_10:
455 		hd->dd = DD_OUT;
456 		upiu->flags = UPIU_FLAGS_R | UPIU_FLAGS_ATTR_S;
457 		upiu->lun = lun;
458 		upiu->cdb[1] = RW_WITHOUT_CACHE;
459 		/* set logical block address */
460 		upiu->cdb[2] = (ulba >> 24) & 0xff;
461 		upiu->cdb[3] = (ulba >> 16) & 0xff;
462 		upiu->cdb[4] = (ulba >> 8) & 0xff;
463 		upiu->cdb[5] = ulba & 0xff;
464 		/* set transfer length */
465 		upiu->cdb[7] = (lba_cnt >> 8) & 0xff;
466 		upiu->cdb[8] = lba_cnt & 0xff;
467 		break;
468 	case CDBCMD_WRITE_10:
469 		hd->dd = DD_IN;
470 		upiu->flags = UPIU_FLAGS_W | UPIU_FLAGS_ATTR_S;
471 		upiu->lun = lun;
472 		upiu->cdb[1] = RW_WITHOUT_CACHE;
473 		/* set logical block address */
474 		upiu->cdb[2] = (ulba >> 24) & 0xff;
475 		upiu->cdb[3] = (ulba >> 16) & 0xff;
476 		upiu->cdb[4] = (ulba >> 8) & 0xff;
477 		upiu->cdb[5] = ulba & 0xff;
478 		/* set transfer length */
479 		upiu->cdb[7] = (lba_cnt >> 8) & 0xff;
480 		upiu->cdb[8] = lba_cnt & 0xff;
481 		break;
482 	default:
483 		assert(0);
484 		break;
485 	}
486 	if (hd->dd == DD_IN) {
487 		flush_dcache_range(buf, length);
488 	} else if (hd->dd == DD_OUT) {
489 		inv_dcache_range(buf, length);
490 	}
491 
492 	utrd->prdt_length = 0;
493 	if (length) {
494 		upiu->exp_data_trans_len = htobe32(length);
495 		assert(lba_cnt <= UINT16_MAX);
496 		prdt = (prdt_t *)utrd->prdt;
497 
498 		desc_limit = ufs_params.desc_base + ufs_params.desc_size;
499 		while (length > 0) {
500 			if ((uintptr_t)prdt + sizeof(prdt_t) > desc_limit) {
501 				ERROR("UFS: Exceeded descriptor limit. Image is too large\n");
502 				panic();
503 			}
504 			prdt->dba = (unsigned int)(buf & UINT32_MAX);
505 			prdt->dbau = (unsigned int)((buf >> 32) & UINT32_MAX);
506 			/* prdt->dbc counts from 0 */
507 			if (length > MAX_PRDT_SIZE) {
508 				prdt->dbc = MAX_PRDT_SIZE - 1;
509 				length = length - MAX_PRDT_SIZE;
510 			} else {
511 				prdt->dbc = length - 1;
512 				length = 0;
513 			}
514 			buf += MAX_PRDT_SIZE;
515 			prdt++;
516 			utrd->prdt_length++;
517 		}
518 		hd->prdtl = utrd->prdt_length;
519 		hd->prdto = (utrd->size_upiu + utrd->size_resp_upiu) >> 2;
520 	}
521 
522 	prdt_end = utrd->prdt + utrd->prdt_length * sizeof(prdt_t);
523 	flush_dcache_range(utrd->header, prdt_end - utrd->header);
524 	return 0;
525 }
526 
527 static int ufs_prepare_query(utp_utrd_t *utrd, uint8_t op, uint8_t idn,
528 			     uint8_t index, uint8_t sel,
529 			     uintptr_t buf, size_t length)
530 {
531 	utrd_header_t *hd;
532 	query_upiu_t *query_upiu;
533 
534 
535 	hd = (utrd_header_t *)utrd->header;
536 	query_upiu = (query_upiu_t *)utrd->upiu;
537 
538 	hd->i = 1;
539 	hd->ct = CT_UFS_STORAGE;
540 	hd->ocs = OCS_MASK;
541 
542 	query_upiu->trans_type = QUERY_REQUEST_UPIU;
543 	query_upiu->task_tag = utrd->task_tag;
544 	query_upiu->ts.desc.opcode = op;
545 	query_upiu->ts.desc.idn = idn;
546 	query_upiu->ts.desc.index = index;
547 	query_upiu->ts.desc.selector = sel;
548 	switch (op) {
549 	case QUERY_READ_DESC:
550 		query_upiu->query_func = QUERY_FUNC_STD_READ;
551 		query_upiu->ts.desc.length = htobe16(length);
552 		break;
553 	case QUERY_WRITE_DESC:
554 		query_upiu->query_func = QUERY_FUNC_STD_WRITE;
555 		query_upiu->ts.desc.length = htobe16(length);
556 		memcpy((void *)(utrd->upiu + sizeof(query_upiu_t)),
557 		       (void *)buf, length);
558 		break;
559 	case QUERY_READ_ATTR:
560 	case QUERY_READ_FLAG:
561 		query_upiu->query_func = QUERY_FUNC_STD_READ;
562 		break;
563 	case QUERY_CLEAR_FLAG:
564 	case QUERY_SET_FLAG:
565 		query_upiu->query_func = QUERY_FUNC_STD_WRITE;
566 		break;
567 	case QUERY_WRITE_ATTR:
568 		query_upiu->query_func = QUERY_FUNC_STD_WRITE;
569 		query_upiu->ts.attr.value = htobe32(*((uint32_t *)buf));
570 		break;
571 	default:
572 		assert(0);
573 		break;
574 	}
575 	flush_dcache_range((uintptr_t)utrd->header, UFS_DESC_SIZE);
576 	return 0;
577 }
578 
579 static void ufs_prepare_nop_out(utp_utrd_t *utrd)
580 {
581 	utrd_header_t *hd;
582 	nop_out_upiu_t *nop_out;
583 
584 	hd = (utrd_header_t *)utrd->header;
585 	nop_out = (nop_out_upiu_t *)utrd->upiu;
586 
587 	hd->i = 1;
588 	hd->ct = CT_UFS_STORAGE;
589 	hd->ocs = OCS_MASK;
590 
591 	nop_out->trans_type = 0;
592 	nop_out->task_tag = utrd->task_tag;
593 	flush_dcache_range((uintptr_t)utrd->header, UFS_DESC_SIZE);
594 }
595 
596 static void ufs_send_request(int task_tag)
597 {
598 	unsigned int data;
599 	int slot;
600 
601 	slot = task_tag - 1;
602 	/* clear all interrupts */
603 	mmio_write_32(ufs_params.reg_base + IS, ~0);
604 
605 	mmio_write_32(ufs_params.reg_base + UTRLRSR, 1);
606 	assert(mmio_read_32(ufs_params.reg_base + UTRLRSR) == 1);
607 
608 	data = UTRIACR_IAEN | UTRIACR_CTR | UTRIACR_IACTH(0x1F) |
609 	       UTRIACR_IATOVAL(0xFF);
610 	mmio_write_32(ufs_params.reg_base + UTRIACR, data);
611 	/* send request */
612 	mmio_setbits_32(ufs_params.reg_base + UTRLDBR, 1 << slot);
613 }
614 
615 static int ufs_check_resp(utp_utrd_t *utrd, int trans_type, unsigned int timeout_ms)
616 {
617 	utrd_header_t *hd;
618 	resp_upiu_t *resp;
619 	sense_data_t *sense;
620 	unsigned int data;
621 	int slot, result;
622 
623 	hd = (utrd_header_t *)utrd->header;
624 	resp = (resp_upiu_t *)utrd->resp_upiu;
625 
626 	result = ufs_wait_for_int_status(UFS_INT_UTRCS, timeout_ms, false);
627 	if (result != 0) {
628 		return result;
629 	}
630 
631 	slot = utrd->task_tag - 1;
632 
633 	data = mmio_read_32(ufs_params.reg_base + UTRLDBR);
634 	assert((data & (1 << slot)) == 0);
635 	/*
636 	 * Invalidate the header after DMA read operation has
637 	 * completed to avoid cpu referring to the prefetched
638 	 * data brought in before DMA completion.
639 	 */
640 	inv_dcache_range((uintptr_t)hd, UFS_DESC_SIZE);
641 	assert(hd->ocs == OCS_SUCCESS);
642 	assert((resp->trans_type & TRANS_TYPE_CODE_MASK) == trans_type);
643 
644 	sense = &resp->sd.sense;
645 	if (sense->resp_code == SENSE_DATA_VALID &&
646 	    sense->sense_key == SENSE_KEY_UNIT_ATTENTION && sense->asc == 0x29 &&
647 	    sense->ascq == 0) {
648 		WARN("Unit Attention Condition\n");
649 		return -EAGAIN;
650 	}
651 
652 	(void)resp;
653 	(void)slot;
654 	(void)data;
655 	return 0;
656 }
657 
658 static void ufs_send_cmd(utp_utrd_t *utrd, uint8_t cmd_op, uint8_t lun, int lba, uintptr_t buf,
659 			 size_t length)
660 {
661 	int result, i;
662 
663 	for (i = 0; i < UFS_CMD_RETRIES; ++i) {
664 		get_utrd(utrd);
665 		result = ufs_prepare_cmd(utrd, cmd_op, lun, lba, buf, length);
666 		assert(result == 0);
667 		ufs_send_request(utrd->task_tag);
668 		result = ufs_check_resp(utrd, RESPONSE_UPIU, CMD_TIMEOUT_MS);
669 		if (result == 0 || result == -EIO) {
670 			break;
671 		}
672 	}
673 	assert(result == 0);
674 	(void)result;
675 }
676 
677 #ifdef UFS_RESP_DEBUG
678 static void dump_upiu(utp_utrd_t *utrd)
679 {
680 	utrd_header_t *hd;
681 	int i;
682 
683 	hd = (utrd_header_t *)utrd->header;
684 	INFO("utrd:0x%x, ruo:0x%x, rul:0x%x, ocs:0x%x, UTRLDBR:0x%x\n",
685 		(unsigned int)(uintptr_t)utrd, hd->ruo, hd->rul, hd->ocs,
686 		mmio_read_32(ufs_params.reg_base + UTRLDBR));
687 	for (i = 0; i < sizeof(utrd_header_t); i += 4) {
688 		INFO("[%lx]:0x%x\n",
689 			(uintptr_t)utrd->header + i,
690 			*(unsigned int *)((uintptr_t)utrd->header + i));
691 	}
692 
693 	for (i = 0; i < sizeof(cmd_upiu_t); i += 4) {
694 		INFO("cmd[%lx]:0x%x\n",
695 			utrd->upiu + i,
696 			*(unsigned int *)(utrd->upiu + i));
697 	}
698 	for (i = 0; i < sizeof(resp_upiu_t); i += 4) {
699 		INFO("resp[%lx]:0x%x\n",
700 			utrd->resp_upiu + i,
701 			*(unsigned int *)(utrd->resp_upiu + i));
702 	}
703 	for (i = 0; i < sizeof(prdt_t); i += 4) {
704 		INFO("prdt[%lx]:0x%x\n",
705 			utrd->prdt + i,
706 			*(unsigned int *)(utrd->prdt + i));
707 	}
708 }
709 #endif
710 
711 static void ufs_verify_init(void)
712 {
713 	utp_utrd_t utrd;
714 	int result;
715 
716 	get_utrd(&utrd);
717 	ufs_prepare_nop_out(&utrd);
718 	ufs_send_request(utrd.task_tag);
719 	result = ufs_check_resp(&utrd, NOP_IN_UPIU, NOP_OUT_TIMEOUT_MS);
720 	assert(result == 0);
721 	(void)result;
722 }
723 
724 static void ufs_verify_ready(void)
725 {
726 	utp_utrd_t utrd;
727 	ufs_send_cmd(&utrd, CDBCMD_TEST_UNIT_READY, 0, 0, 0, 0);
728 }
729 
730 static void ufs_query(uint8_t op, uint8_t idn, uint8_t index, uint8_t sel,
731 		      uintptr_t buf, size_t size)
732 {
733 	utp_utrd_t utrd;
734 	query_resp_upiu_t *resp;
735 	int result;
736 
737 	switch (op) {
738 	case QUERY_READ_FLAG:
739 	case QUERY_READ_ATTR:
740 	case QUERY_READ_DESC:
741 	case QUERY_WRITE_DESC:
742 	case QUERY_WRITE_ATTR:
743 		assert(((buf & 3) == 0) && (size != 0));
744 		break;
745 	default:
746 		/* Do nothing in default case */
747 		break;
748 	}
749 	get_utrd(&utrd);
750 	ufs_prepare_query(&utrd, op, idn, index, sel, buf, size);
751 	ufs_send_request(utrd.task_tag);
752 	result = ufs_check_resp(&utrd, QUERY_RESPONSE_UPIU, QUERY_REQ_TIMEOUT_MS);
753 	assert(result == 0);
754 	resp = (query_resp_upiu_t *)utrd.resp_upiu;
755 #ifdef UFS_RESP_DEBUG
756 	dump_upiu(&utrd);
757 #endif
758 	assert(resp->query_resp == QUERY_RESP_SUCCESS);
759 
760 	switch (op) {
761 	case QUERY_READ_FLAG:
762 		*(uint32_t *)buf = (uint32_t)resp->ts.flag.value;
763 		break;
764 	case QUERY_READ_DESC:
765 		memcpy((void *)buf,
766 		       (void *)(utrd.resp_upiu + sizeof(query_resp_upiu_t)),
767 		       size);
768 		break;
769 	case QUERY_READ_ATTR:
770 		*(uint32_t *)buf = htobe32(resp->ts.attr.value);
771 		break;
772 	default:
773 		/* Do nothing in default case */
774 		break;
775 	}
776 	(void)result;
777 }
778 
779 unsigned int ufs_read_attr(int idn)
780 {
781 	unsigned int value;
782 
783 	ufs_query(QUERY_READ_ATTR, idn, 0, 0,
784 		  (uintptr_t)&value, sizeof(value));
785 	return value;
786 }
787 
788 void ufs_write_attr(int idn, unsigned int value)
789 {
790 	ufs_query(QUERY_WRITE_ATTR, idn, 0, 0,
791 		  (uintptr_t)&value, sizeof(value));
792 }
793 
794 unsigned int ufs_read_flag(int idn)
795 {
796 	unsigned int value;
797 
798 	ufs_query(QUERY_READ_FLAG, idn, 0, 0,
799 		  (uintptr_t)&value, sizeof(value));
800 	return value;
801 }
802 
803 void ufs_set_flag(int idn)
804 {
805 	ufs_query(QUERY_SET_FLAG, idn, 0, 0, 0, 0);
806 }
807 
808 void ufs_clear_flag(int idn)
809 {
810 	ufs_query(QUERY_CLEAR_FLAG, idn, 0, 0, 0, 0);
811 }
812 
813 void ufs_read_desc(int idn, int index, uintptr_t buf, size_t size)
814 {
815 	ufs_query(QUERY_READ_DESC, idn, index, 0, buf, size);
816 }
817 
818 void ufs_write_desc(int idn, int index, uintptr_t buf, size_t size)
819 {
820 	ufs_query(QUERY_WRITE_DESC, idn, index, 0, buf, size);
821 }
822 
823 static int ufs_read_capacity(int lun, unsigned int *num, unsigned int *size)
824 {
825 	utp_utrd_t utrd;
826 	resp_upiu_t *resp;
827 	sense_data_t *sense;
828 	unsigned char data[CACHE_WRITEBACK_GRANULE << 1];
829 	uintptr_t buf;
830 	int retries = UFS_READ_CAPACITY_RETRIES;
831 
832 	assert((ufs_params.reg_base != 0) &&
833 	       (ufs_params.desc_base != 0) &&
834 	       (ufs_params.desc_size >= UFS_DESC_SIZE) &&
835 	       (num != NULL) && (size != NULL));
836 
837 	/* align buf address */
838 	buf = (uintptr_t)data;
839 	buf = (buf + CACHE_WRITEBACK_GRANULE - 1) &
840 	      ~(CACHE_WRITEBACK_GRANULE - 1);
841 	do {
842 		ufs_send_cmd(&utrd, CDBCMD_READ_CAPACITY_10, lun, 0,
843 			    buf, READ_CAPACITY_LENGTH);
844 #ifdef UFS_RESP_DEBUG
845 		dump_upiu(&utrd);
846 #endif
847 		resp = (resp_upiu_t *)utrd.resp_upiu;
848 		sense = &resp->sd.sense;
849 		if (!((sense->resp_code == SENSE_DATA_VALID) &&
850 		    (sense->sense_key == SENSE_KEY_UNIT_ATTENTION) &&
851 		    (sense->asc == 0x29) && (sense->ascq == 0))) {
852 			inv_dcache_range(buf, CACHE_WRITEBACK_GRANULE);
853 			/* last logical block address */
854 			*num = be32toh(*(unsigned int *)buf);
855 			if (*num)
856 				*num += 1;
857 			/* logical block length in bytes */
858 			*size = be32toh(*(unsigned int *)(buf + 4));
859 
860 			return 0;
861 		}
862 
863 	} while (retries-- > 0);
864 
865 	return -ETIMEDOUT;
866 }
867 
868 size_t ufs_read_blocks(int lun, int lba, uintptr_t buf, size_t size)
869 {
870 	utp_utrd_t utrd;
871 	resp_upiu_t *resp;
872 
873 	assert((ufs_params.reg_base != 0) &&
874 	       (ufs_params.desc_base != 0) &&
875 	       (ufs_params.desc_size >= UFS_DESC_SIZE));
876 
877 	ufs_send_cmd(&utrd, CDBCMD_READ_10, lun, lba, buf, size);
878 #ifdef UFS_RESP_DEBUG
879 	dump_upiu(&utrd);
880 #endif
881 	/*
882 	 * Invalidate prefetched cache contents before cpu
883 	 * accesses the buf.
884 	 */
885 	inv_dcache_range(buf, size);
886 	resp = (resp_upiu_t *)utrd.resp_upiu;
887 	return size - resp->res_trans_cnt;
888 }
889 
890 size_t ufs_write_blocks(int lun, int lba, const uintptr_t buf, size_t size)
891 {
892 	utp_utrd_t utrd;
893 	resp_upiu_t *resp;
894 
895 	assert((ufs_params.reg_base != 0) &&
896 	       (ufs_params.desc_base != 0) &&
897 	       (ufs_params.desc_size >= UFS_DESC_SIZE));
898 
899 	ufs_send_cmd(&utrd, CDBCMD_WRITE_10, lun, lba, buf, size);
900 #ifdef UFS_RESP_DEBUG
901 	dump_upiu(&utrd);
902 #endif
903 	resp = (resp_upiu_t *)utrd.resp_upiu;
904 	return size - resp->res_trans_cnt;
905 }
906 
907 static int ufs_set_fdevice_init(void)
908 {
909 	unsigned int result;
910 	int timeout;
911 
912 	ufs_set_flag(FLAG_DEVICE_INIT);
913 
914 	timeout = FDEVICEINIT_TIMEOUT_MS;
915 	do {
916 		result = ufs_read_flag(FLAG_DEVICE_INIT);
917 		if (!result) {
918 			break;
919 		}
920 		mdelay(5);
921 		timeout -= 5;
922 	} while (timeout > 0);
923 
924 	if (result != 0U) {
925 		return -ETIMEDOUT;
926 	}
927 
928 	return 0;
929 }
930 
931 static void ufs_enum(void)
932 {
933 	unsigned int blk_num, blk_size;
934 	int i, result;
935 
936 	mmio_write_32(ufs_params.reg_base + UTRLBA,
937 		      ufs_params.desc_base & UINT32_MAX);
938 	mmio_write_32(ufs_params.reg_base + UTRLBAU,
939 		      (ufs_params.desc_base >> 32) & UINT32_MAX);
940 
941 	ufs_verify_init();
942 	ufs_verify_ready();
943 
944 	result = ufs_set_fdevice_init();
945 	assert(result == 0);
946 
947 	blk_num = 0;
948 	blk_size = 0;
949 
950 	/* dump available LUNs */
951 	for (i = 0; i < UFS_MAX_LUNS; i++) {
952 		result = ufs_read_capacity(i, &blk_num, &blk_size);
953 		if (result != 0) {
954 			WARN("UFS LUN%d dump failed\n", i);
955 		}
956 		if (blk_num && blk_size) {
957 			INFO("UFS LUN%d contains %d blocks with %d-byte size\n",
958 			     i, blk_num, blk_size);
959 		}
960 	}
961 
962 	(void)result;
963 }
964 
965 static void ufs_get_device_info(struct ufs_dev_desc *card_data)
966 {
967 	uint8_t desc_buf[DESC_DEVICE_MAX_SIZE];
968 
969 	ufs_query(QUERY_READ_DESC, DESC_TYPE_DEVICE, 0, 0,
970 				(uintptr_t)desc_buf, DESC_DEVICE_MAX_SIZE);
971 
972 	/*
973 	 * getting vendor (manufacturerID) and Bank Index in big endian
974 	 * format
975 	 */
976 	card_data->wmanufacturerid = (uint16_t)((desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8) |
977 				     (desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1]));
978 }
979 
980 int ufs_init(const ufs_ops_t *ops, ufs_params_t *params)
981 {
982 	int result;
983 	unsigned int data;
984 	uic_cmd_t cmd;
985 	struct ufs_dev_desc card = {0};
986 
987 	assert((params != NULL) &&
988 	       (params->reg_base != 0) &&
989 	       (params->desc_base != 0) &&
990 	       (params->desc_size >= UFS_DESC_SIZE));
991 
992 	memcpy(&ufs_params, params, sizeof(ufs_params_t));
993 
994 	/* 0 means 1 slot */
995 	nutrs = (mmio_read_32(ufs_params.reg_base + CAP) & CAP_NUTRS_MASK) + 1;
996 	if (nutrs > (ufs_params.desc_size / UFS_DESC_SIZE)) {
997 		nutrs = ufs_params.desc_size / UFS_DESC_SIZE;
998 	}
999 
1000 
1001 	if (ufs_params.flags & UFS_FLAGS_SKIPINIT) {
1002 		mmio_write_32(ufs_params.reg_base + UTRLBA,
1003 			      ufs_params.desc_base & UINT32_MAX);
1004 		mmio_write_32(ufs_params.reg_base + UTRLBAU,
1005 			      (ufs_params.desc_base >> 32) & UINT32_MAX);
1006 
1007 		result = ufshc_dme_get(0x1571, 0, &data);
1008 		assert(result == 0);
1009 		result = ufshc_dme_get(0x41, 0, &data);
1010 		assert(result == 0);
1011 		if (data == 1) {
1012 			/* prepare to exit hibernate mode */
1013 			memset(&cmd, 0, sizeof(uic_cmd_t));
1014 			cmd.op = DME_HIBERNATE_EXIT;
1015 			result = ufshc_send_uic_cmd(ufs_params.reg_base,
1016 						    &cmd);
1017 			assert(result == 0);
1018 			data = mmio_read_32(ufs_params.reg_base + UCMDARG2);
1019 			assert(data == 0);
1020 			do {
1021 				data = mmio_read_32(ufs_params.reg_base + IS);
1022 			} while ((data & UFS_INT_UHXS) == 0);
1023 			mmio_write_32(ufs_params.reg_base + IS, UFS_INT_UHXS);
1024 			data = mmio_read_32(ufs_params.reg_base + HCS);
1025 			assert((data & HCS_UPMCRS_MASK) == HCS_PWR_LOCAL);
1026 		}
1027 		result = ufshc_dme_get(0x1568, 0, &data);
1028 		assert(result == 0);
1029 		assert((data > 0) && (data <= 3));
1030 	} else {
1031 		assert((ops != NULL) && (ops->phy_init != NULL) &&
1032 		       (ops->phy_set_pwr_mode != NULL));
1033 
1034 		result = ufshc_reset(ufs_params.reg_base);
1035 		assert(result == 0);
1036 		ops->phy_init(&ufs_params);
1037 		result = ufshc_link_startup(ufs_params.reg_base);
1038 		assert(result == 0);
1039 
1040 		/* enable all interrupts */
1041 		data = UFS_INT_UCCS | UFS_INT_UHES | UFS_INT_UHXS | UFS_INT_UPMS;
1042 		data |= UFS_INT_UTRCS | UFS_INT_ERR;
1043 		mmio_write_32(ufs_params.reg_base + IE, data);
1044 
1045 		ufs_enum();
1046 
1047 		ufs_get_device_info(&card);
1048 		if (card.wmanufacturerid == UFS_VENDOR_SKHYNIX) {
1049 			ufs_params.flags |= UFS_FLAGS_VENDOR_SKHYNIX;
1050 		}
1051 
1052 		ops->phy_set_pwr_mode(&ufs_params);
1053 	}
1054 
1055 	(void)result;
1056 	return 0;
1057 }
1058