xref: /rk3399_rockchip-uboot/drivers/ufs/ufs.c (revision 6cef3c7b7cc45a9c882fc64e5496a416c99313b8)
1 // SPDX-License-Identifier: GPL-2.0+
2 /**
3  * ufs.c - Universal Flash Subsystem (UFS) driver
4  *
5  * Taken from Linux Kernel v5.2 (drivers/scsi/ufs/ufshcd.c) and ported
6  * to u-boot.
7  *
8  * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
9  */
10 #include <charset.h>
11 #include <common.h>
12 #include <dm.h>
13 #include <log.h>
14 #include <dm/lists.h>
15 #include <dm/device-internal.h>
16 #include <malloc.h>
17 #include <hexdump.h>
18 #include <scsi.h>
19 #include <asm/io.h>
20 #include <asm/dma-mapping.h>
21 #include <linux/bitops.h>
22 #include <linux/delay.h>
23 
24 #if defined(CONFIG_SUPPORT_USBPLUG)
25 #include "ufs-rockchip-usbplug.h"
26 #endif
27 
28 #include "ufs.h"
29 
30 #if defined(CONFIG_ROCKCHIP_UFS_RPMB)
31 #include "ufs-rockchip-rpmb.h"
32 #endif
33 
34 #define UFSHCD_ENABLE_INTRS	(UTP_TRANSFER_REQ_COMPL |\
35 				 UTP_TASK_REQ_COMPL |\
36 				 UFSHCD_ERROR_MASK)
37 /* maximum number of link-startup retries */
38 #define DME_LINKSTARTUP_RETRIES 3
39 
40 /* maximum number of retries for a general UIC command  */
41 #define UFS_UIC_COMMAND_RETRIES 3
42 
43 /* Query request retries */
44 #define QUERY_REQ_RETRIES 3
45 /* Query request timeout */
46 #define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
47 
48 /* maximum timeout in ms for a general UIC command */
49 #define UFS_UIC_CMD_TIMEOUT	1000
50 
51 #define UFS_UIC_LINKUP_TIMEOUT	150
52 /* NOP OUT retries waiting for NOP IN response */
53 /* Polling time to wait for fDeviceInit */
54 #define FDEVICEINIT_COMPL_TIMEOUT 1500 /* millisecs */
55 
56 #define NOP_OUT_RETRIES    10
57 /* Timeout after 30 msecs if NOP OUT hangs without response */
58 #define NOP_OUT_TIMEOUT    1500 /* msecs */
59 
60 /* Only use one Task Tag for all requests */
61 #define TASK_TAG	0
62 
63 /* Expose the flag value from utp_upiu_query.value */
64 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF
65 
66 #define MAX_PRDT_ENTRY	262144
67 
68 /* maximum bytes per request */
69 #define UFS_MAX_BYTES	(128 * 256 * 1024)
70 
71 static inline bool ufshcd_is_hba_active(struct ufs_hba *hba);
72 static inline void ufshcd_hba_stop(struct ufs_hba *hba);
73 static int ufshcd_hba_enable(struct ufs_hba *hba);
74 
75 /*
76  * ufshcd_wait_for_register - wait for register value to change
77  */
78 static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
79 				    u32 val, unsigned long timeout_ms)
80 {
81 	int err = 0;
82 	unsigned long start = get_timer(0);
83 
84 	/* ignore bits that we don't intend to wait on */
85 	val = val & mask;
86 
87 	while ((ufshcd_readl(hba, reg) & mask) != val) {
88 		if (get_timer(start) > timeout_ms) {
89 			if ((ufshcd_readl(hba, reg) & mask) != val)
90 				err = -ETIMEDOUT;
91 			break;
92 		}
93 	}
94 
95 	return err;
96 }
97 
98 /**
99  * ufshcd_init_pwr_info - setting the POR (power on reset)
100  * values in hba power info
101  */
102 static void ufshcd_init_pwr_info(struct ufs_hba *hba)
103 {
104 	hba->pwr_info.gear_rx = UFS_PWM_G1;
105 	hba->pwr_info.gear_tx = UFS_PWM_G1;
106 	hba->pwr_info.lane_rx = 1;
107 	hba->pwr_info.lane_tx = 1;
108 	hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
109 	hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
110 	hba->pwr_info.hs_rate = 0;
111 }
112 
113 /**
114  * ufshcd_print_pwr_info - print power params as saved in hba
115  * power info
116  */
117 static void ufshcd_print_pwr_info(struct ufs_hba *hba)
118 {
119 	static const char * const names[] = {
120 		"INVALID MODE",
121 		"FAST MODE",
122 		"SLOW_MODE",
123 		"INVALID MODE",
124 		"FASTAUTO_MODE",
125 		"SLOWAUTO_MODE",
126 		"INVALID MODE",
127 	};
128 
129 	dev_err(hba->dev, "[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
130 		hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
131 		hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
132 		names[hba->pwr_info.pwr_rx],
133 		names[hba->pwr_info.pwr_tx],
134 		hba->pwr_info.hs_rate);
135 }
136 
137 /**
138  * ufshcd_ready_for_uic_cmd - Check if controller is ready
139  *                            to accept UIC commands
140  */
141 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
142 {
143 	if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
144 		return true;
145 	else
146 		return false;
147 }
148 
149 /**
150  * ufshcd_get_uic_cmd_result - Get the UIC command result
151  */
152 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
153 {
154 	return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
155 	       MASK_UIC_COMMAND_RESULT;
156 }
157 
158 /**
159  * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
160  */
161 static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
162 {
163 	return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
164 }
165 
166 /**
167  * ufshcd_is_device_present - Check if any device connected to
168  *			      the host controller
169  */
170 static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
171 {
172 	return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
173 						DEVICE_PRESENT) ? true : false;
174 }
175 
176 /**
177  * ufshcd_send_uic_cmd - UFS Interconnect layer command API
178  *
179  */
180 static int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
181 {
182 	unsigned long start = 0;
183 	u32 intr_status;
184 	u32 enabled_intr_status;
185 	int timeout = UFS_UIC_CMD_TIMEOUT;
186 
187 	if (!ufshcd_ready_for_uic_cmd(hba)) {
188 		dev_err(hba->dev,
189 			"Controller not ready to accept UIC commands\n");
190 		return -EIO;
191 	}
192 
193 	if (uic_cmd->command == UIC_CMD_DME_LINK_STARTUP)
194 		timeout = UFS_UIC_LINKUP_TIMEOUT;
195 
196 	debug("sending uic command:%d\n", uic_cmd->command);
197 
198 	/* Write Args */
199 	ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
200 	ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
201 	ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
202 
203 	/* Write UIC Cmd */
204 	ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
205 		      REG_UIC_COMMAND);
206 
207 	start = get_timer(0);
208 	do {
209 		intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
210 		enabled_intr_status = intr_status & hba->intr_mask;
211 		ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
212 
213 		if (get_timer(start) > timeout) {
214 			dev_err(hba->dev,
215 				"Timedout waiting for UIC response\n");
216 
217 			return -ETIMEDOUT;
218 		}
219 
220 		if (enabled_intr_status & UFSHCD_ERROR_MASK) {
221 			dev_err(hba->dev, "Error in status:%08x\n",
222 				enabled_intr_status);
223 
224 			return -1;
225 		}
226 	} while (!(enabled_intr_status & UFSHCD_UIC_MASK));
227 
228 	uic_cmd->argument2 = ufshcd_get_uic_cmd_result(hba);
229 	uic_cmd->argument3 = ufshcd_get_dme_attr_val(hba);
230 
231 	debug("Sent successfully\n");
232 
233 	return 0;
234 }
235 
236 /**
237  * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
238  *
239  */
240 int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, u8 attr_set,
241 			u32 mib_val, u8 peer)
242 {
243 	struct uic_command uic_cmd = {0};
244 	static const char *const action[] = {
245 		"dme-set",
246 		"dme-peer-set"
247 	};
248 	const char *set = action[!!peer];
249 	int ret;
250 	int retries = UFS_UIC_COMMAND_RETRIES;
251 
252 	uic_cmd.command = peer ?
253 		UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
254 	uic_cmd.argument1 = attr_sel;
255 	uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
256 	uic_cmd.argument3 = mib_val;
257 
258 	do {
259 		/* for peer attributes we retry upon failure */
260 		ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
261 		if (ret)
262 			dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
263 				set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
264 	} while (ret && peer && --retries);
265 
266 	if (ret)
267 		dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
268 			set, UIC_GET_ATTR_ID(attr_sel), mib_val,
269 			UFS_UIC_COMMAND_RETRIES - retries);
270 
271 	return ret;
272 }
273 
274 /**
275  * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
276  *
277  */
278 int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
279 			u32 *mib_val, u8 peer)
280 {
281 	struct uic_command uic_cmd = {0};
282 	static const char *const action[] = {
283 		"dme-get",
284 		"dme-peer-get"
285 	};
286 	const char *get = action[!!peer];
287 	int ret;
288 	int retries = UFS_UIC_COMMAND_RETRIES;
289 
290 	uic_cmd.command = peer ?
291 		UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
292 	uic_cmd.argument1 = attr_sel;
293 
294 	do {
295 		/* for peer attributes we retry upon failure */
296 		ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
297 		if (ret)
298 			dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
299 				get, UIC_GET_ATTR_ID(attr_sel), ret);
300 	} while (ret && peer && --retries);
301 
302 	if (ret)
303 		dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
304 			get, UIC_GET_ATTR_ID(attr_sel),
305 			UFS_UIC_COMMAND_RETRIES - retries);
306 
307 	if (mib_val && !ret)
308 		*mib_val = uic_cmd.argument3;
309 
310 	return ret;
311 }
312 
313 static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
314 {
315 	u32 tx_lanes, i, err = 0;
316 
317 	if (!peer)
318 		ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
319 			       &tx_lanes);
320 	else
321 		ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
322 				    &tx_lanes);
323 	for (i = 0; i < tx_lanes; i++) {
324 		if (!peer)
325 			err = ufshcd_dme_set(hba,
326 					     UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
327 					     UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
328 					     0);
329 		else
330 			err = ufshcd_dme_peer_set(hba,
331 					UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
332 					UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
333 					0);
334 		if (err) {
335 			dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d\n",
336 				__func__, peer, i, err);
337 			break;
338 		}
339 	}
340 
341 	return err;
342 }
343 
344 static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
345 {
346 	return ufshcd_disable_tx_lcc(hba, true);
347 }
348 
349 /**
350  * ufshcd_dme_link_startup - Notify Unipro to perform link startup
351  *
352  */
353 static int ufshcd_dme_link_startup(struct ufs_hba *hba)
354 {
355 	struct uic_command uic_cmd = {0};
356 	int ret;
357 
358 	uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
359 
360 	ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
361 	if (ret)
362 		dev_dbg(hba->dev,
363 			"dme-link-startup: error code %d\n", ret);
364 	return ret;
365 }
366 
367 int ufshcd_dme_enable(struct ufs_hba *hba)
368 {
369 	struct uic_command uic_cmd = {0};
370 	int ret;
371 
372 	uic_cmd.command = UIC_CMD_DME_ENABLE;
373 
374 	ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
375 	if (ret)
376 		dev_err(hba->dev,
377 			"dme-enable: error code %d\n", ret);
378 	return ret;
379 }
380 
381 int ufshcd_dme_reset(struct ufs_hba *hba)
382 {
383 	struct uic_command uic_cmd = {0};
384 	int ret;
385 
386 	uic_cmd.command = UIC_CMD_DME_RESET;
387 
388 	ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
389 	if (ret)
390 		dev_err(hba->dev,
391 			"dme-reset: error code %d\n", ret);
392 	return ret;
393 }
394 
395 /**
396  * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
397  *
398  */
399 static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
400 {
401 	ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
402 }
403 
404 /**
405  * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
406  */
407 static inline int ufshcd_get_lists_status(u32 reg)
408 {
409 	return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY);
410 }
411 
412 /**
413  * ufshcd_enable_run_stop_reg - Enable run-stop registers,
414  *			When run-stop registers are set to 1, it indicates the
415  *			host controller that it can process the requests
416  */
417 static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
418 {
419 	ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
420 		      REG_UTP_TASK_REQ_LIST_RUN_STOP);
421 	ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
422 		      REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
423 }
424 
425 /**
426  * ufshcd_enable_intr - enable interrupts
427  */
428 static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
429 {
430 	u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
431 	u32 rw;
432 
433 	if (hba->version == UFSHCI_VERSION_10) {
434 		rw = set & INTERRUPT_MASK_RW_VER_10;
435 		set = rw | ((set ^ intrs) & intrs);
436 	} else {
437 		set |= intrs;
438 	}
439 
440 	ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
441 
442 	hba->intr_mask = set;
443 }
444 
445 /**
446  * ufshcd_make_hba_operational - Make UFS controller operational
447  *
448  * To bring UFS host controller to operational state,
449  * 1. Enable required interrupts
450  * 2. Configure interrupt aggregation
451  * 3. Program UTRL and UTMRL base address
452  * 4. Configure run-stop-registers
453  *
454  */
455 static int ufshcd_make_hba_operational(struct ufs_hba *hba)
456 {
457 	int err = 0;
458 	u32 reg;
459 
460 	/* Enable required interrupts */
461 	ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
462 
463 	/* Disable interrupt aggregation */
464 	ufshcd_disable_intr_aggr(hba);
465 
466 	/* Configure UTRL and UTMRL base address registers */
467 	ufshcd_writel(hba, lower_32_bits((dma_addr_t)hba->utrdl),
468 		      REG_UTP_TRANSFER_REQ_LIST_BASE_L);
469 	ufshcd_writel(hba, upper_32_bits((dma_addr_t)hba->utrdl),
470 		      REG_UTP_TRANSFER_REQ_LIST_BASE_H);
471 	ufshcd_writel(hba, lower_32_bits((dma_addr_t)hba->utmrdl),
472 		      REG_UTP_TASK_REQ_LIST_BASE_L);
473 	ufshcd_writel(hba, upper_32_bits((dma_addr_t)hba->utmrdl),
474 		      REG_UTP_TASK_REQ_LIST_BASE_H);
475 
476 	/*
477 	 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
478 	 */
479 	reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
480 	if (!(ufshcd_get_lists_status(reg))) {
481 		ufshcd_enable_run_stop_reg(hba);
482 	} else {
483 		dev_err(hba->dev,
484 			"Host controller not ready to process requests\n");
485 		err = -EIO;
486 		goto out;
487 	}
488 
489 out:
490 	return err;
491 }
492 
493 /**
494  * ufshcd_link_startup - Initialize unipro link startup
495  */
496 static int ufshcd_link_startup(struct ufs_hba *hba)
497 {
498 	int ret;
499 	int retries = DME_LINKSTARTUP_RETRIES;
500 	bool link_startup_again = true;
501 
502 	if (ufshcd_is_device_present(hba))
503 		goto  device_present;
504 
505 link_startup:
506 	do {
507 		ufshcd_ops_link_startup_notify(hba, PRE_CHANGE);
508 
509 		ret = ufshcd_dme_link_startup(hba);
510 
511 		/* check if device is detected by inter-connect layer */
512 		if (!ret && !ufshcd_is_device_present(hba)) {
513 			dev_err(hba->dev, "%s: Device not present\n", __func__);
514 			ret = -ENXIO;
515 			goto out;
516 		}
517 
518 		/*
519 		 * DME link lost indication is only received when link is up,
520 		 * but we can't be sure if the link is up until link startup
521 		 * succeeds. So reset the local Uni-Pro and try again.
522 		 */
523 		if (ret && ufshcd_hba_enable(hba))
524 			goto out;
525 	} while (ret && retries--);
526 
527 	if (ret)
528 		/* failed to get the link up... retire */
529 		goto out;
530 
531 	if (link_startup_again) {
532 		link_startup_again = false;
533 		retries = DME_LINKSTARTUP_RETRIES;
534 		goto link_startup;
535 	}
536 
537 device_present:
538 	/* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
539 	ufshcd_init_pwr_info(hba);
540 
541 	if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
542 		ret = ufshcd_disable_device_tx_lcc(hba);
543 		if (ret)
544 			goto out;
545 	}
546 
547 	/* Include any host controller configuration via UIC commands */
548 	ret = ufshcd_ops_link_startup_notify(hba, POST_CHANGE);
549 	if (ret)
550 		goto out;
551 
552 	ret = ufshcd_make_hba_operational(hba);
553 out:
554 	if (ret)
555 		dev_err(hba->dev, "link startup failed %d\n", ret);
556 
557 	return ret;
558 }
559 
560 /**
561  * ufshcd_hba_stop - Send controller to reset state
562  */
563 static inline void ufshcd_hba_stop(struct ufs_hba *hba)
564 {
565 	int err;
566 
567 	ufshcd_writel(hba, CONTROLLER_DISABLE,  REG_CONTROLLER_ENABLE);
568 	err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
569 				       CONTROLLER_ENABLE, CONTROLLER_DISABLE,
570 				       10);
571 	if (err)
572 		dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
573 }
574 
575 /**
576  * ufshcd_is_hba_active - Get controller state
577  */
578 static inline bool ufshcd_is_hba_active(struct ufs_hba *hba)
579 {
580 	return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE)
581 		? false : true;
582 }
583 
584 /**
585  * ufshcd_hba_start - Start controller initialization sequence
586  */
587 static inline void ufshcd_hba_start(struct ufs_hba *hba)
588 {
589 	ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
590 }
591 
592 /**
593  * ufshcd_hba_enable - initialize the controller
594  */
595 static int ufshcd_hba_enable(struct ufs_hba *hba)
596 {
597 	int retry;
598 
599 	if (!ufshcd_is_hba_active(hba))
600 		/* change controller state to "reset state" */
601 		ufshcd_hba_stop(hba);
602 
603 	ufshcd_ops_hce_enable_notify(hba, PRE_CHANGE);
604 
605 	/* start controller initialization sequence */
606 	ufshcd_hba_start(hba);
607 
608 	/*
609 	 * To initialize a UFS host controller HCE bit must be set to 1.
610 	 * During initialization the HCE bit value changes from 1->0->1.
611 	 * When the host controller completes initialization sequence
612 	 * it sets the value of HCE bit to 1. The same HCE bit is read back
613 	 * to check if the controller has completed initialization sequence.
614 	 * So without this delay the value HCE = 1, set in the previous
615 	 * instruction might be read back.
616 	 * This delay can be changed based on the controller.
617 	 */
618 	mdelay(1);
619 
620 	/* wait for the host controller to complete initialization */
621 	retry = 10;
622 	while (ufshcd_is_hba_active(hba)) {
623 		if (retry) {
624 			retry--;
625 		} else {
626 			dev_err(hba->dev, "Controller enable failed\n");
627 			return -EIO;
628 		}
629 		mdelay(5);
630 	}
631 
632 	/* enable UIC related interrupts */
633 	ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
634 
635 	if (ufshcd_ops_hce_enable_notify(hba, POST_CHANGE))
636 		return -EIO;
637 
638 	return 0;
639 }
640 
641 /**
642  * ufshcd_host_memory_configure - configure local reference block with
643  *				memory offsets
644  */
645 static void ufshcd_host_memory_configure(struct ufs_hba *hba)
646 {
647 	struct utp_transfer_req_desc *utrdlp;
648 	dma_addr_t cmd_desc_dma_addr;
649 	u16 response_offset;
650 	u16 prdt_offset;
651 
652 	utrdlp = hba->utrdl;
653 	cmd_desc_dma_addr = (dma_addr_t)hba->ucdl;
654 
655 	utrdlp->command_desc_base_addr_lo =
656 				cpu_to_le32(lower_32_bits(cmd_desc_dma_addr));
657 	utrdlp->command_desc_base_addr_hi =
658 				cpu_to_le32(upper_32_bits(cmd_desc_dma_addr));
659 
660 	response_offset = offsetof(struct utp_transfer_cmd_desc, response_upiu);
661 	prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table);
662 
663 	utrdlp->response_upiu_offset = cpu_to_le16(response_offset >> 2);
664 	utrdlp->prd_table_offset = cpu_to_le16(prdt_offset >> 2);
665 	utrdlp->response_upiu_length = cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
666 
667 	hba->ucd_req_ptr = (struct utp_upiu_req *)hba->ucdl;
668 	hba->ucd_rsp_ptr =
669 		(struct utp_upiu_rsp *)&hba->ucdl->response_upiu;
670 	hba->ucd_prdt_ptr =
671 		(struct ufshcd_sg_entry *)&hba->ucdl->prd_table;
672 }
673 
674 /**
675  * ufshcd_memory_alloc - allocate memory for host memory space data structures
676  */
677 static int ufshcd_memory_alloc(struct ufs_hba *hba)
678 {
679 	/* Allocate one Transfer Request Descriptor
680 	 * Should be aligned to 1k boundary.
681 	 */
682 	hba->utrdl = memalign(1024, sizeof(struct utp_transfer_req_desc));
683 	if (!hba->utrdl) {
684 		dev_err(hba->dev, "Transfer Descriptor memory allocation failed\n");
685 		return -ENOMEM;
686 	}
687 
688 	/* Allocate one Command Descriptor
689 	 * Should be aligned to 1k boundary.
690 	 */
691 	hba->ucdl = memalign(1024, sizeof(struct utp_transfer_cmd_desc));
692 	if (!hba->ucdl) {
693 		dev_err(hba->dev, "Command descriptor memory allocation failed\n");
694 		return -ENOMEM;
695 	}
696 
697 	hba->dev_desc = memalign(ARCH_DMA_MINALIGN, sizeof(struct ufs_device_descriptor));
698 	if (!hba->dev_desc) {
699 		dev_err(hba->dev, "memory allocation failed\n");
700 		return -ENOMEM;
701 	}
702 
703 #if defined(CONFIG_SUPPORT_USBPLUG)
704 	hba->rc_desc = memalign(ARCH_DMA_MINALIGN, sizeof(struct ufs_configuration_descriptor));
705 	hba->wc_desc = memalign(ARCH_DMA_MINALIGN, sizeof(struct ufs_configuration_descriptor));
706 	hba->geo_desc = memalign(ARCH_DMA_MINALIGN, sizeof(struct ufs_geometry_descriptor));
707 	if (!hba->rc_desc || !hba->wc_desc || !hba->geo_desc) {
708 		dev_err(hba->dev, "memory allocation failed\n");
709 		return -ENOMEM;
710 	}
711 #endif
712 	return 0;
713 }
714 
715 /**
716  * ufshcd_get_intr_mask - Get the interrupt bit mask
717  */
718 static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
719 {
720 	u32 intr_mask = 0;
721 
722 	switch (hba->version) {
723 	case UFSHCI_VERSION_10:
724 		intr_mask = INTERRUPT_MASK_ALL_VER_10;
725 		break;
726 	case UFSHCI_VERSION_11:
727 	case UFSHCI_VERSION_20:
728 		intr_mask = INTERRUPT_MASK_ALL_VER_11;
729 		break;
730 	case UFSHCI_VERSION_21:
731 	default:
732 		intr_mask = INTERRUPT_MASK_ALL_VER_21;
733 		break;
734 	}
735 
736 	return intr_mask;
737 }
738 
739 /**
740  * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
741  */
742 static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
743 {
744 	return ufshcd_readl(hba, REG_UFS_VERSION);
745 }
746 
747 /**
748  * ufshcd_get_upmcrs - Get the power mode change request status
749  */
750 static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
751 {
752 	return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
753 }
754 
755 /**
756  * ufshcd_cache_flush_and_invalidate - Flush and invalidate cache
757  *
758  * Flush and invalidate cache in aligned address..address+size range.
759  * The invalidation is in place to avoid stale data in cache.
760  */
761 static void ufshcd_cache_flush_and_invalidate(void *addr, unsigned long size)
762 {
763 	uintptr_t aaddr = (uintptr_t)addr & ~(ARCH_DMA_MINALIGN - 1);
764 	unsigned long asize = ALIGN(size, ARCH_DMA_MINALIGN);
765 
766 	flush_dcache_range(aaddr, aaddr + asize);
767 	invalidate_dcache_range(aaddr, aaddr + asize);
768 }
769 
770 /**
771  * ufshcd_prepare_req_desc_hdr() - Fills the requests header
772  * descriptor according to request
773  */
774 static void ufshcd_prepare_req_desc_hdr(struct utp_transfer_req_desc *req_desc,
775 					u32 *upiu_flags,
776 					enum dma_data_direction cmd_dir)
777 {
778 	u32 data_direction;
779 	u32 dword_0;
780 
781 	if (cmd_dir == DMA_FROM_DEVICE) {
782 		data_direction = UTP_DEVICE_TO_HOST;
783 		*upiu_flags = UPIU_CMD_FLAGS_READ;
784 	} else if (cmd_dir == DMA_TO_DEVICE) {
785 		data_direction = UTP_HOST_TO_DEVICE;
786 		*upiu_flags = UPIU_CMD_FLAGS_WRITE;
787 	} else {
788 		data_direction = UTP_NO_DATA_TRANSFER;
789 		*upiu_flags = UPIU_CMD_FLAGS_NONE;
790 	}
791 
792 	dword_0 = data_direction | (0x1 << UPIU_COMMAND_TYPE_OFFSET);
793 
794 	/* Enable Interrupt for command */
795 	dword_0 |= UTP_REQ_DESC_INT_CMD;
796 
797 	/* Transfer request descriptor header fields */
798 	req_desc->header.dword_0 = cpu_to_le32(dword_0);
799 	/* dword_1 is reserved, hence it is set to 0 */
800 	req_desc->header.dword_1 = 0;
801 	/*
802 	 * assigning invalid value for command status. Controller
803 	 * updates OCS on command completion, with the command
804 	 * status
805 	 */
806 	req_desc->header.dword_2 =
807 		cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
808 	/* dword_3 is reserved, hence it is set to 0 */
809 	req_desc->header.dword_3 = 0;
810 
811 	req_desc->prd_table_length = 0;
812 
813 	ufshcd_cache_flush_and_invalidate(req_desc, sizeof(*req_desc));
814 }
815 
816 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
817 					      u32 upiu_flags)
818 {
819 	struct utp_upiu_req *ucd_req_ptr = hba->ucd_req_ptr;
820 	struct ufs_query *query = &hba->dev_cmd.query;
821 	u16 len = be16_to_cpu(query->request.upiu_req.length);
822 
823 	/* Query request header */
824 	ucd_req_ptr->header.dword_0 =
825 				UPIU_HEADER_DWORD(UPIU_TRANSACTION_QUERY_REQ,
826 						  upiu_flags, 0, TASK_TAG);
827 	ucd_req_ptr->header.dword_1 =
828 				UPIU_HEADER_DWORD(0, query->request.query_func,
829 						  0, 0);
830 
831 	/* Data segment length only need for WRITE_DESC */
832 	if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
833 		ucd_req_ptr->header.dword_2 =
834 				UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
835 	else
836 		ucd_req_ptr->header.dword_2 = 0;
837 
838 	/* Copy the Query Request buffer as is */
839 	memcpy(&ucd_req_ptr->qr, &query->request.upiu_req, QUERY_OSF_SIZE);
840 
841 	/* Copy the Descriptor */
842 	if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC) {
843  		memcpy(ucd_req_ptr + 1, query->descriptor, len);
844 		ufshcd_cache_flush_and_invalidate(ucd_req_ptr,
845 				ALIGN(sizeof(*ucd_req_ptr) + len, ARCH_DMA_MINALIGN));
846 	} else {
847 		ufshcd_cache_flush_and_invalidate(ucd_req_ptr, sizeof(*ucd_req_ptr));
848 	}
849 
850 	memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
851 	ufshcd_cache_flush_and_invalidate(hba->ucd_rsp_ptr, sizeof(*hba->ucd_rsp_ptr));
852 }
853 
854 static inline void ufshcd_prepare_utp_nop_upiu(struct ufs_hba *hba)
855 {
856 	struct utp_upiu_req *ucd_req_ptr = hba->ucd_req_ptr;
857 
858 	memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
859 
860 	/* command descriptor fields */
861 	ucd_req_ptr->header.dword_0 =
862 			UPIU_HEADER_DWORD(UPIU_TRANSACTION_NOP_OUT, 0, 0, TASK_TAG);
863 	/* clear rest of the fields of basic header */
864 	ucd_req_ptr->header.dword_1 = 0;
865 	ucd_req_ptr->header.dword_2 = 0;
866 
867 	memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
868 
869 	ufshcd_cache_flush_and_invalidate(ucd_req_ptr, sizeof(*ucd_req_ptr));
870 	ufshcd_cache_flush_and_invalidate(hba->ucd_rsp_ptr, sizeof(*hba->ucd_rsp_ptr));
871 }
872 
873 /**
874  * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU)
875  *			     for Device Management Purposes
876  */
877 static int ufshcd_comp_devman_upiu(struct ufs_hba *hba,
878 				   enum dev_cmd_type cmd_type)
879 {
880 	u32 upiu_flags;
881 	int ret = 0;
882 	struct utp_transfer_req_desc *req_desc = hba->utrdl;
883 
884 	hba->dev_cmd.type = cmd_type;
885 
886 	ufshcd_prepare_req_desc_hdr(req_desc, &upiu_flags, DMA_NONE);
887 	switch (cmd_type) {
888 	case DEV_CMD_TYPE_QUERY:
889 		ufshcd_prepare_utp_query_req_upiu(hba, upiu_flags);
890 		break;
891 	case DEV_CMD_TYPE_NOP:
892 		ufshcd_prepare_utp_nop_upiu(hba);
893 		break;
894 	default:
895 		ret = -EINVAL;
896 	}
897 
898 	return ret;
899 }
900 
901 static int ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
902 {
903 	unsigned long start;
904 	u32 intr_status;
905 	u32 enabled_intr_status;
906 
907 	ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
908 
909 	start = get_timer(0);
910 	do {
911 		intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
912 		enabled_intr_status = intr_status & hba->intr_mask;
913 		ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
914 
915 		if (get_timer(start) > QUERY_REQ_TIMEOUT) {
916 			dev_err(hba->dev,
917 				"Timedout waiting for UTP response\n");
918 
919 			return -ETIMEDOUT;
920 		}
921 
922 		if (enabled_intr_status & UFSHCD_ERROR_MASK) {
923 			dev_err(hba->dev, "Error in status:%08x\n",
924 				enabled_intr_status);
925 
926 			return -1;
927 		}
928 	} while (!(enabled_intr_status & UTP_TRANSFER_REQ_COMPL));
929 
930 	return 0;
931 }
932 
933 /**
934  * ufshcd_get_req_rsp - returns the TR response transaction type
935  */
936 static inline int ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
937 {
938 	return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
939 }
940 
941 /**
942  * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
943  *
944  */
945 static inline int ufshcd_get_tr_ocs(struct ufs_hba *hba)
946 {
947 	return le32_to_cpu(hba->utrdl->header.dword_2) & MASK_OCS;
948 }
949 
950 static inline int ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
951 {
952 	return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
953 }
954 
955 static int ufshcd_check_query_response(struct ufs_hba *hba)
956 {
957 	struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
958 
959 	/* Get the UPIU response */
960 	query_res->response = ufshcd_get_rsp_upiu_result(hba->ucd_rsp_ptr) >>
961 				UPIU_RSP_CODE_OFFSET;
962 	return query_res->response;
963 }
964 
965 /**
966  * ufshcd_copy_query_response() - Copy the Query Response and the data
967  * descriptor
968  */
969 static int ufshcd_copy_query_response(struct ufs_hba *hba)
970 {
971 	struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
972 
973 	memcpy(&query_res->upiu_res, &hba->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
974 
975 	/* Get the descriptor */
976 	if (hba->dev_cmd.query.descriptor &&
977 	    hba->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
978 		u8 *descp = (u8 *)hba->ucd_rsp_ptr +
979 				GENERAL_UPIU_REQUEST_SIZE;
980 		u16 resp_len;
981 		u16 buf_len;
982 
983 		/* data segment length */
984 		resp_len = be32_to_cpu(hba->ucd_rsp_ptr->header.dword_2) &
985 						MASK_QUERY_DATA_SEG_LEN;
986 		buf_len =
987 			be16_to_cpu(hba->dev_cmd.query.request.upiu_req.length);
988 		if (likely(buf_len >= resp_len)) {
989 			int size = ALIGN(GENERAL_UPIU_REQUEST_SIZE + resp_len, ARCH_DMA_MINALIGN);
990 
991 			invalidate_dcache_range((uintptr_t)hba->ucd_rsp_ptr, (uintptr_t)hba->ucd_rsp_ptr + size);
992 			memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
993 		} else {
994 			dev_warn(hba->dev,
995 				 "%s: Response size is bigger than buffer",
996 				 __func__);
997 			return -EINVAL;
998 		}
999 	}
1000 
1001 	return 0;
1002 }
1003 
1004 /**
1005  * ufshcd_exec_dev_cmd - API for sending device management requests
1006  */
1007 int ufshcd_exec_dev_cmd(struct ufs_hba *hba, enum dev_cmd_type cmd_type, int timeout)
1008 {
1009 	int err;
1010 	int resp;
1011 
1012 	err = ufshcd_comp_devman_upiu(hba, cmd_type);
1013 	if (err)
1014 		return err;
1015 
1016 	err = ufshcd_send_command(hba, TASK_TAG);
1017 	if (err)
1018 		return err;
1019 
1020 	err = ufshcd_get_tr_ocs(hba);
1021 	if (err) {
1022 		dev_err(hba->dev, "Error in OCS:%d\n", err);
1023 		return -EINVAL;
1024 	}
1025 
1026 	resp = ufshcd_get_req_rsp(hba->ucd_rsp_ptr);
1027 	switch (resp) {
1028 	case UPIU_TRANSACTION_NOP_IN:
1029 		break;
1030 	case UPIU_TRANSACTION_QUERY_RSP:
1031 		err = ufshcd_check_query_response(hba);
1032 		if (!err)
1033 			err = ufshcd_copy_query_response(hba);
1034 		break;
1035 	case UPIU_TRANSACTION_REJECT_UPIU:
1036 		/* TODO: handle Reject UPIU Response */
1037 		err = -EPERM;
1038 		dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
1039 			__func__);
1040 		break;
1041 	default:
1042 		err = -EINVAL;
1043 		dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
1044 			__func__, resp);
1045 	}
1046 
1047 	return err;
1048 }
1049 
1050 /**
1051  * ufshcd_init_query() - init the query response and request parameters
1052  */
1053 static inline void ufshcd_init_query(struct ufs_hba *hba,
1054 				     struct ufs_query_req **request,
1055 				     struct ufs_query_res **response,
1056 				     enum query_opcode opcode,
1057 				     u8 idn, u8 index, u8 selector)
1058 {
1059 	*request = &hba->dev_cmd.query.request;
1060 	*response = &hba->dev_cmd.query.response;
1061 	memset(*request, 0, sizeof(struct ufs_query_req));
1062 	memset(*response, 0, sizeof(struct ufs_query_res));
1063 	(*request)->upiu_req.opcode = opcode;
1064 	(*request)->upiu_req.idn = idn;
1065 	(*request)->upiu_req.index = index;
1066 	(*request)->upiu_req.selector = selector;
1067 }
1068 
1069 /**
1070  * ufshcd_query_flag() - API function for sending flag query requests
1071  */
1072 int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
1073 		      enum flag_idn idn, bool *flag_res)
1074 {
1075 	struct ufs_query_req *request = NULL;
1076 	struct ufs_query_res *response = NULL;
1077 	int err, index = 0, selector = 0;
1078 	int timeout = QUERY_REQ_TIMEOUT;
1079 
1080 	ufshcd_init_query(hba, &request, &response, opcode, idn, index,
1081 			  selector);
1082 
1083 	switch (opcode) {
1084 	case UPIU_QUERY_OPCODE_SET_FLAG:
1085 	case UPIU_QUERY_OPCODE_CLEAR_FLAG:
1086 	case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
1087 		request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
1088 		break;
1089 	case UPIU_QUERY_OPCODE_READ_FLAG:
1090 		request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
1091 		if (!flag_res) {
1092 			/* No dummy reads */
1093 			dev_err(hba->dev, "%s: Invalid argument for read request\n",
1094 				__func__);
1095 			err = -EINVAL;
1096 			goto out;
1097 		}
1098 		break;
1099 	default:
1100 		dev_err(hba->dev,
1101 			"%s: Expected query flag opcode but got = %d\n",
1102 			__func__, opcode);
1103 		err = -EINVAL;
1104 		goto out;
1105 	}
1106 
1107 	err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
1108 
1109 	if (err) {
1110 		dev_err(hba->dev,
1111 			"%s: Sending flag query for idn %d failed, err = %d\n",
1112 			__func__, idn, err);
1113 		goto out;
1114 	}
1115 
1116 	if (flag_res)
1117 		*flag_res = (be32_to_cpu(response->upiu_res.value) &
1118 				MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
1119 
1120 out:
1121 	return err;
1122 }
1123 
1124 static int ufshcd_query_flag_retry(struct ufs_hba *hba,
1125 				   enum query_opcode opcode,
1126 				   enum flag_idn idn, bool *flag_res)
1127 {
1128 	int ret;
1129 	int retries;
1130 
1131 	for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
1132 		ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
1133 		if (ret)
1134 			dev_dbg(hba->dev,
1135 				"%s: failed with error %d, retries %d\n",
1136 				__func__, ret, retries);
1137 		else
1138 			break;
1139 	}
1140 
1141 	if (ret)
1142 		dev_err(hba->dev,
1143 			"%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
1144 			__func__, opcode, idn, ret, retries);
1145 	return ret;
1146 }
1147 
1148 static int __ufshcd_query_descriptor(struct ufs_hba *hba,
1149 				     enum query_opcode opcode,
1150 				     enum desc_idn idn, u8 index, u8 selector,
1151 				     u8 *desc_buf, int *buf_len)
1152 {
1153 	struct ufs_query_req *request = NULL;
1154 	struct ufs_query_res *response = NULL;
1155 	int err;
1156 
1157 	if (!desc_buf) {
1158 		dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
1159 			__func__, opcode);
1160 		err = -EINVAL;
1161 		goto out;
1162 	}
1163 
1164 	if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
1165 		dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
1166 			__func__, *buf_len);
1167 		err = -EINVAL;
1168 		goto out;
1169 	}
1170 
1171 	ufshcd_init_query(hba, &request, &response, opcode, idn, index, selector);
1172 	hba->dev_cmd.query.descriptor = desc_buf;
1173 	request->upiu_req.length = cpu_to_be16(*buf_len);
1174 
1175 	switch (opcode) {
1176 	case UPIU_QUERY_OPCODE_WRITE_DESC:
1177 		request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
1178 		break;
1179 	case UPIU_QUERY_OPCODE_READ_DESC:
1180 		request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
1181 		break;
1182 	default:
1183 		dev_err(hba->dev, "%s: Expected query descriptor opcode but got = 0x%.2x\n",
1184 			__func__, opcode);
1185 		err = -EINVAL;
1186 		goto out;
1187 	}
1188 
1189 	err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
1190 
1191 	if (err) {
1192 		dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
1193 			__func__, opcode, idn, index, err);
1194 		goto out;
1195 	}
1196 
1197 	hba->dev_cmd.query.descriptor = NULL;
1198 	*buf_len = be16_to_cpu(response->upiu_res.length);
1199 
1200 out:
1201 	return err;
1202 }
1203 
1204 /**
1205  * ufshcd_query_descriptor_retry - API function for sending descriptor requests
1206  */
1207 int ufshcd_query_descriptor_retry(struct ufs_hba *hba, enum query_opcode opcode,
1208 				  enum desc_idn idn, u8 index, u8 selector,
1209 				  u8 *desc_buf, int *buf_len)
1210 {
1211 	int err;
1212 	int retries;
1213 
1214 	for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
1215 		err = __ufshcd_query_descriptor(hba, opcode, idn, index,
1216 						selector, desc_buf, buf_len);
1217 		if (!err || err == -EINVAL)
1218 			break;
1219 	}
1220 
1221 	return err;
1222 }
1223 
1224 /**
1225  * ufshcd_read_desc_length - read the specified descriptor length from header
1226  */
1227 int ufshcd_read_desc_length(struct ufs_hba *hba, enum desc_idn desc_id,
1228 				   int desc_index, int *desc_length)
1229 {
1230 	int ret;
1231 	u8 header[QUERY_DESC_HDR_SIZE];
1232 	int header_len = QUERY_DESC_HDR_SIZE;
1233 
1234 	if (desc_id >= QUERY_DESC_IDN_MAX)
1235 		return -EINVAL;
1236 
1237 	ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
1238 					    desc_id, desc_index, 0, header,
1239 					    &header_len);
1240 
1241 	if (ret) {
1242 		dev_err(hba->dev, "%s: Failed to get descriptor header id %d\n",
1243 			__func__, desc_id);
1244 		return ret;
1245 	} else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) {
1246 		dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch\n",
1247 			 __func__, header[QUERY_DESC_DESC_TYPE_OFFSET],
1248 			 desc_id);
1249 		ret = -EINVAL;
1250 	}
1251 
1252 	*desc_length = header[QUERY_DESC_LENGTH_OFFSET];
1253 
1254 	return ret;
1255 }
1256 
1257 static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
1258 {
1259 	int err;
1260 
1261 	err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0,
1262 				      &hba->desc_size.dev_desc);
1263 	if (err)
1264 		hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
1265 
1266 	err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0,
1267 				      &hba->desc_size.pwr_desc);
1268 	if (err)
1269 		hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
1270 
1271 	err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0,
1272 				      &hba->desc_size.interc_desc);
1273 	if (err)
1274 		hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
1275 
1276 	err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0,
1277 				      &hba->desc_size.conf_desc);
1278 	if (err)
1279 		hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
1280 
1281 	err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0,
1282 				      &hba->desc_size.unit_desc);
1283 	if (err)
1284 		hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
1285 
1286 	err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0,
1287 				      &hba->desc_size.geom_desc);
1288 	if (err)
1289 		hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
1290 
1291 	err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_HEALTH, 0,
1292 				      &hba->desc_size.hlth_desc);
1293 	if (err)
1294 		hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
1295 }
1296 
1297 /**
1298  * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
1299  *
1300  */
1301 int ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
1302 				 int *desc_len)
1303 {
1304 	switch (desc_id) {
1305 	case QUERY_DESC_IDN_DEVICE:
1306 		*desc_len = hba->desc_size.dev_desc;
1307 		break;
1308 	case QUERY_DESC_IDN_POWER:
1309 		*desc_len = hba->desc_size.pwr_desc;
1310 		break;
1311 	case QUERY_DESC_IDN_GEOMETRY:
1312 		*desc_len = hba->desc_size.geom_desc;
1313 		break;
1314 	case QUERY_DESC_IDN_CONFIGURATION:
1315 		*desc_len = hba->desc_size.conf_desc;
1316 		break;
1317 	case QUERY_DESC_IDN_UNIT:
1318 		*desc_len = hba->desc_size.unit_desc;
1319 		break;
1320 	case QUERY_DESC_IDN_INTERCONNECT:
1321 		*desc_len = hba->desc_size.interc_desc;
1322 		break;
1323 	case QUERY_DESC_IDN_STRING:
1324 		*desc_len = QUERY_DESC_MAX_SIZE;
1325 		break;
1326 	case QUERY_DESC_IDN_HEALTH:
1327 		*desc_len = hba->desc_size.hlth_desc;
1328 		break;
1329 	case QUERY_DESC_IDN_RFU_0:
1330 	case QUERY_DESC_IDN_RFU_1:
1331 		*desc_len = 0;
1332 		break;
1333 	default:
1334 		*desc_len = 0;
1335 		return -EINVAL;
1336 	}
1337 	return 0;
1338 }
1339 EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
1340 
1341 /**
1342  * ufshcd_read_desc_param - read the specified descriptor parameter
1343  *
1344  */
1345 int ufshcd_read_desc_param(struct ufs_hba *hba, enum desc_idn desc_id,
1346 			   int desc_index, u8 param_offset, u8 *param_read_buf,
1347 			   u8 param_size)
1348 {
1349 	int ret;
1350 	u8 *desc_buf;
1351 	int buff_len;
1352 	bool is_kmalloc = true;
1353 
1354 	/* Safety check */
1355 	if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
1356 		return -EINVAL;
1357 
1358 	/* Get the max length of descriptor from structure filled up at probe
1359 	 * time.
1360 	 */
1361 	ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
1362 
1363 	/* Sanity checks */
1364 	if (ret || !buff_len) {
1365 		dev_err(hba->dev, "%s: Failed to get full descriptor length\n",
1366 			__func__);
1367 		return ret;
1368 	}
1369 
1370 	/* Check whether we need temp memory */
1371 	if (param_offset != 0 || param_size < buff_len) {
1372 		desc_buf = kmalloc(buff_len, GFP_KERNEL);
1373 		if (!desc_buf)
1374 			return -ENOMEM;
1375 	} else {
1376 		desc_buf = param_read_buf;
1377 		is_kmalloc = false;
1378 	}
1379 
1380 	/* Request for full descriptor */
1381 	ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
1382 					    desc_id, desc_index, 0, desc_buf,
1383 					    &buff_len);
1384 
1385 	if (ret) {
1386 		dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d\n",
1387 			__func__, desc_id, desc_index, param_offset, ret);
1388 		goto out;
1389 	}
1390 
1391 	/* Sanity check */
1392 	if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
1393 		dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header\n",
1394 			__func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
1395 		ret = -EINVAL;
1396 		goto out;
1397 	}
1398 
1399 	/* Check wherher we will not copy more data, than available */
1400 	if (is_kmalloc && param_size > buff_len)
1401 		param_size = buff_len;
1402 
1403 	if (is_kmalloc)
1404 		memcpy(param_read_buf, &desc_buf[param_offset], param_size);
1405 out:
1406 	if (is_kmalloc)
1407 		kfree(desc_buf);
1408 	return ret;
1409 }
1410 
1411 /* replace non-printable or non-ASCII characters with spaces */
1412 static inline void ufshcd_remove_non_printable(uint8_t *val)
1413 {
1414 	if (!val)
1415 		return;
1416 
1417 	if (*val < 0x20 || *val > 0x7e)
1418 		*val = ' ';
1419 }
1420 
1421 /**
1422  * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
1423  * state) and waits for it to take effect.
1424  *
1425  */
1426 static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
1427 {
1428 	unsigned long start = 0;
1429 	u8 status;
1430 	int ret;
1431 
1432 	ret = ufshcd_send_uic_cmd(hba, cmd);
1433 	if (ret) {
1434 		dev_err(hba->dev,
1435 			"pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
1436 			cmd->command, cmd->argument3, ret);
1437 
1438 		return ret;
1439 	}
1440 
1441 	start = get_timer(0);
1442 	do {
1443 		status = ufshcd_get_upmcrs(hba);
1444 		if (get_timer(start) > UFS_UIC_CMD_TIMEOUT) {
1445 			dev_err(hba->dev,
1446 				"pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
1447 				cmd->command, status);
1448 			ret = (status != PWR_OK) ? status : -1;
1449 			break;
1450 		}
1451 	} while (status != PWR_LOCAL);
1452 
1453 	return ret;
1454 }
1455 
1456 /**
1457  * ufshcd_uic_change_pwr_mode - Perform the UIC power mode change
1458  *				using DME_SET primitives.
1459  */
1460 static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
1461 {
1462 	struct uic_command uic_cmd = {0};
1463 	int ret;
1464 
1465 	uic_cmd.command = UIC_CMD_DME_SET;
1466 	uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
1467 	uic_cmd.argument3 = mode;
1468 	ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
1469 
1470 	return ret;
1471 }
1472 
1473 static
1474 void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufs_hba *hba,
1475 				      struct scsi_cmd *pccb, u32 upiu_flags)
1476 {
1477 	struct utp_upiu_req *ucd_req_ptr = hba->ucd_req_ptr;
1478 	unsigned int cdb_len;
1479 
1480 	/* command descriptor fields */
1481 	ucd_req_ptr->header.dword_0 =
1482 			UPIU_HEADER_DWORD(UPIU_TRANSACTION_COMMAND, upiu_flags,
1483 					  pccb->lun, TASK_TAG);
1484 	ucd_req_ptr->header.dword_1 =
1485 			UPIU_HEADER_DWORD(UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
1486 
1487 	/* Total EHS length and Data segment length will be zero */
1488 	ucd_req_ptr->header.dword_2 = 0;
1489 
1490 	ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(pccb->datalen);
1491 
1492 	cdb_len = min_t(unsigned short, pccb->cmdlen, UFS_CDB_SIZE);
1493 	memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE);
1494 	memcpy(ucd_req_ptr->sc.cdb, pccb->cmd, cdb_len);
1495 
1496 	memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
1497 	ufshcd_cache_flush_and_invalidate(ucd_req_ptr, sizeof(*ucd_req_ptr));
1498 	ufshcd_cache_flush_and_invalidate(hba->ucd_rsp_ptr, sizeof(*hba->ucd_rsp_ptr));
1499 }
1500 
1501 static inline void prepare_prdt_desc(struct ufshcd_sg_entry *entry,
1502 				     unsigned char *buf, ulong len)
1503 {
1504 	entry->size = cpu_to_le32(len) | GENMASK(1, 0);
1505 	entry->base_addr = cpu_to_le32(lower_32_bits((unsigned long)buf));
1506 	entry->upper_addr = cpu_to_le32(upper_32_bits((unsigned long)buf));
1507 }
1508 
1509 static void prepare_prdt_table(struct ufs_hba *hba, struct scsi_cmd *pccb)
1510 {
1511 	struct utp_transfer_req_desc *req_desc = hba->utrdl;
1512 	struct ufshcd_sg_entry *prd_table = hba->ucd_prdt_ptr;
1513 	uintptr_t aaddr = (uintptr_t)(pccb->pdata) & ~(ARCH_DMA_MINALIGN - 1);
1514 	ulong datalen = pccb->datalen;
1515 	int table_length;
1516 	u8 *buf;
1517 	int i;
1518 
1519 	if (!datalen) {
1520 		req_desc->prd_table_length = 0;
1521 		ufshcd_cache_flush_and_invalidate(req_desc, sizeof(*req_desc));
1522 		return;
1523 	}
1524 
1525 	if (pccb->dma_dir == DMA_TO_DEVICE) {	/* Write to device */
1526 		flush_dcache_range(aaddr, ALIGN(aaddr + datalen + ARCH_DMA_MINALIGN - 1, ARCH_DMA_MINALIGN));
1527 	}
1528 
1529 	/* In any case, invalidate cache to avoid stale data in it. */
1530 	invalidate_dcache_range(aaddr, ALIGN(aaddr + datalen + ARCH_DMA_MINALIGN - 1, ARCH_DMA_MINALIGN));
1531 
1532 	table_length = DIV_ROUND_UP(pccb->datalen, MAX_PRDT_ENTRY);
1533 	buf = pccb->pdata;
1534 	i = table_length;
1535 	while (--i) {
1536 		prepare_prdt_desc(&prd_table[table_length - i - 1], buf,
1537 				  MAX_PRDT_ENTRY - 1);
1538 		buf += MAX_PRDT_ENTRY;
1539 		datalen -= MAX_PRDT_ENTRY;
1540 	}
1541 
1542 	prepare_prdt_desc(&prd_table[table_length - i - 1], buf, datalen - 1);
1543 
1544 	req_desc->prd_table_length = table_length;
1545 	ufshcd_cache_flush_and_invalidate(prd_table, sizeof(*prd_table) * table_length);
1546 	ufshcd_cache_flush_and_invalidate(req_desc, sizeof(*req_desc));
1547 }
1548 
1549 int ufs_send_scsi_cmd(struct ufs_hba *hba, struct scsi_cmd *pccb)
1550 {
1551 	struct utp_transfer_req_desc *req_desc = hba->utrdl;
1552 	u32 upiu_flags;
1553 	int ocs, result = 0, retry_count = 3;
1554 	u8 scsi_status;
1555 
1556 	if (hba->quirks & UFSDEV_QUIRK_LUN_IN_SCSI_COMMANDS)
1557 		pccb->cmd[1] &= 0x1F;
1558 
1559 retry:
1560 	ufshcd_prepare_req_desc_hdr(req_desc, &upiu_flags, pccb->dma_dir);
1561 	ufshcd_prepare_utp_scsi_cmd_upiu(hba, pccb, upiu_flags);
1562 	prepare_prdt_table(hba, pccb);
1563 
1564 	if (ufshcd_send_command(hba, TASK_TAG) == -ETIMEDOUT && retry_count) {
1565 		retry_count--;
1566 		goto retry;
1567 	}
1568 
1569 	ocs = ufshcd_get_tr_ocs(hba);
1570 	switch (ocs) {
1571 	case OCS_SUCCESS:
1572 		result = ufshcd_get_req_rsp(hba->ucd_rsp_ptr);
1573 		switch (result) {
1574 		case UPIU_TRANSACTION_RESPONSE:
1575 			result = ufshcd_get_rsp_upiu_result(hba->ucd_rsp_ptr);
1576 
1577 			scsi_status = result & MASK_SCSI_STATUS;
1578 			if (pccb->cmd[0] == SCSI_TST_U_RDY && scsi_status) {
1579 				/* Test ready cmd will fail with Phison UFS, break to continue */
1580 				if (retry_count) {
1581 					retry_count--;
1582 					goto retry;
1583 				}
1584 				break;
1585 			}
1586 			if (scsi_status)
1587 				return -EINVAL;
1588 
1589 			break;
1590 		case UPIU_TRANSACTION_REJECT_UPIU:
1591 			/* TODO: handle Reject UPIU Response */
1592 			dev_err(hba->dev,
1593 				"Reject UPIU not fully implemented\n");
1594 			return -EINVAL;
1595 		default:
1596 			dev_err(hba->dev,
1597 				"Unexpected request response code = %x\n",
1598 				result);
1599 			return -EINVAL;
1600 		}
1601 		break;
1602 	default:
1603 		dev_err(hba->dev, "OCS error from controller = %x\n", ocs);
1604 		return -EINVAL;
1605 	}
1606 
1607 	return 0;
1608 }
1609 
1610 static int ufs_scsi_exec(struct udevice *scsi_dev, struct scsi_cmd *pccb)
1611 {
1612 	struct ufs_hba *hba = dev_get_uclass_priv(scsi_dev->parent);
1613 
1614 	return ufs_send_scsi_cmd(hba, pccb);
1615 }
1616 
1617 static inline int ufshcd_read_desc(struct ufs_hba *hba, enum desc_idn desc_id,
1618 				   int desc_index, u8 *buf, u32 size)
1619 {
1620 	return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
1621 }
1622 
1623 static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
1624 {
1625 	return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
1626 }
1627 
1628 /**
1629  * ufshcd_read_string_desc - read string descriptor
1630  *
1631  */
1632 int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index,
1633 			    u8 *buf, u32 size, bool ascii)
1634 {
1635 	int err = 0;
1636 
1637 	err = ufshcd_read_desc(hba, QUERY_DESC_IDN_STRING, desc_index, buf,
1638 			       size);
1639 
1640 	if (err) {
1641 		dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n",
1642 			__func__, QUERY_REQ_RETRIES, err);
1643 		goto out;
1644 	}
1645 
1646 	if (ascii) {
1647 		int desc_len;
1648 		int ascii_len;
1649 		int i;
1650 		u8 *buff_ascii;
1651 
1652 		desc_len = buf[0];
1653 		/* remove header and divide by 2 to move from UTF16 to UTF8 */
1654 		ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1;
1655 		if (size < ascii_len + QUERY_DESC_HDR_SIZE) {
1656 			dev_err(hba->dev, "%s: buffer allocated size is too small\n",
1657 				__func__);
1658 			err = -ENOMEM;
1659 			goto out;
1660 		}
1661 
1662 		buff_ascii = kmalloc(ALIGN(ascii_len, ARCH_DMA_MINALIGN), GFP_KERNEL);
1663 		if (!buff_ascii) {
1664 			err = -ENOMEM;
1665 			goto out;
1666 		}
1667 
1668 		/*
1669 		 * the descriptor contains string in UTF16 format
1670 		 * we need to convert to utf-8 so it can be displayed
1671 		 */
1672 		utf16_to_utf8(buff_ascii,
1673 			      (uint16_t *)&buf[QUERY_DESC_HDR_SIZE], ascii_len);
1674 
1675 		/* replace non-printable or non-ASCII characters with spaces */
1676 		for (i = 0; i < ascii_len; i++)
1677 			ufshcd_remove_non_printable(&buff_ascii[i]);
1678 
1679 		memset(buf + QUERY_DESC_HDR_SIZE, 0,
1680 		       size - QUERY_DESC_HDR_SIZE);
1681 		memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len);
1682 		buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE;
1683 		kfree(buff_ascii);
1684 	}
1685 out:
1686 	return err;
1687 }
1688 
1689 static int ufs_get_device_desc(struct ufs_hba *hba, struct ufs_device_descriptor *dev_desc)
1690 {
1691 	int err;
1692 	size_t buff_len;
1693 
1694 	buff_len = sizeof(*dev_desc);
1695 	if (buff_len > hba->desc_size.dev_desc)
1696 		buff_len = hba->desc_size.dev_desc;
1697 
1698 	err = ufshcd_read_device_desc(hba, (u8 *)dev_desc, buff_len);
1699 	if (err)
1700 		dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
1701 			__func__, err);
1702 
1703 	return err;
1704 }
1705 
1706 /**
1707  * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
1708  */
1709 static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
1710 {
1711 	struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
1712 
1713 	if (hba->max_pwr_info.is_valid)
1714 		return 0;
1715 
1716 	pwr_info->pwr_tx = FAST_MODE;
1717 	pwr_info->pwr_rx = FAST_MODE;
1718 	pwr_info->hs_rate = PA_HS_MODE_B;
1719 
1720 	/* Get the connected lane count */
1721 	ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
1722 		       &pwr_info->lane_rx);
1723 	ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
1724 		       &pwr_info->lane_tx);
1725 
1726 	if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
1727 		dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
1728 			__func__, pwr_info->lane_rx, pwr_info->lane_tx);
1729 		return -EINVAL;
1730 	}
1731 
1732 	/*
1733 	 * First, get the maximum gears of HS speed.
1734 	 * If a zero value, it means there is no HSGEAR capability.
1735 	 * Then, get the maximum gears of PWM speed.
1736 	 */
1737 	ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
1738 	if (!pwr_info->gear_rx) {
1739 		ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
1740 			       &pwr_info->gear_rx);
1741 		if (!pwr_info->gear_rx) {
1742 			dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
1743 				__func__, pwr_info->gear_rx);
1744 			return -EINVAL;
1745 		}
1746 		pwr_info->pwr_rx = SLOW_MODE;
1747 	}
1748 
1749 	ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
1750 			    &pwr_info->gear_tx);
1751 	if (!pwr_info->gear_tx) {
1752 		ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
1753 				    &pwr_info->gear_tx);
1754 		if (!pwr_info->gear_tx) {
1755 			dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
1756 				__func__, pwr_info->gear_tx);
1757 			return -EINVAL;
1758 		}
1759 		pwr_info->pwr_tx = SLOW_MODE;
1760 	}
1761 
1762 	hba->max_pwr_info.is_valid = true;
1763 	return 0;
1764 }
1765 
1766 static int ufshcd_change_power_mode(struct ufs_hba *hba,
1767 				    struct ufs_pa_layer_attr *pwr_mode)
1768 {
1769 	int ret;
1770 
1771 	/* if already configured to the requested pwr_mode */
1772 	if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
1773 	    pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
1774 	    pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
1775 	    pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
1776 	    pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
1777 	    pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
1778 	    pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
1779 		dev_dbg(hba->dev, "%s: power already configured\n", __func__);
1780 		return 0;
1781 	}
1782 
1783 	/*
1784 	 * Configure attributes for power mode change with below.
1785 	 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
1786 	 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
1787 	 * - PA_HSSERIES
1788 	 */
1789 	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
1790 	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
1791 		       pwr_mode->lane_rx);
1792 	if (pwr_mode->pwr_rx == FASTAUTO_MODE || pwr_mode->pwr_rx == FAST_MODE)
1793 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
1794 	else
1795 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
1796 
1797 	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
1798 	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
1799 		       pwr_mode->lane_tx);
1800 	if (pwr_mode->pwr_tx == FASTAUTO_MODE || pwr_mode->pwr_tx == FAST_MODE)
1801 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
1802 	else
1803 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
1804 
1805 	if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
1806 	    pwr_mode->pwr_tx == FASTAUTO_MODE ||
1807 	    pwr_mode->pwr_rx == FAST_MODE ||
1808 	    pwr_mode->pwr_tx == FAST_MODE)
1809 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
1810 			       pwr_mode->hs_rate);
1811 
1812 	ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4 |
1813 					 pwr_mode->pwr_tx);
1814 
1815 	if (ret) {
1816 		dev_err(hba->dev,
1817 			"%s: power mode change failed %d\n", __func__, ret);
1818 
1819 		return ret;
1820 	}
1821 
1822 	/* Copy new Power Mode to power info */
1823 	memcpy(&hba->pwr_info, pwr_mode, sizeof(struct ufs_pa_layer_attr));
1824 
1825 	return ret;
1826 }
1827 
1828 /**
1829  * ufshcd_verify_dev_init() - Verify device initialization
1830  *
1831  */
1832 static int ufshcd_verify_dev_init(struct ufs_hba *hba)
1833 {
1834 	int retries;
1835 	int err;
1836 
1837 	for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
1838 		err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
1839 					  NOP_OUT_TIMEOUT);
1840 		if (!err || err == -ETIMEDOUT)
1841 			break;
1842 
1843 		dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
1844 	}
1845 
1846 	if (err)
1847 		dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
1848 
1849 	return err;
1850 }
1851 
1852 /**
1853  * ufshcd_complete_dev_init() - checks device readiness
1854  */
1855 static int ufshcd_complete_dev_init(struct ufs_hba *hba)
1856 {
1857 	unsigned long start = 0;
1858 	int i;
1859 	int err;
1860 	bool flag_res = 1;
1861 
1862 	err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
1863 				      QUERY_FLAG_IDN_FDEVICEINIT, NULL);
1864 	if (err) {
1865 		dev_err(hba->dev,
1866 			"%s setting fDeviceInit flag failed with error %d\n",
1867 			__func__, err);
1868 		goto out;
1869 	}
1870 
1871 	/* poll for max. 1500ms for fDeviceInit flag to clear */
1872 	start = get_timer(0);
1873 	for (i = 0; i < 3000 && !err && flag_res; i++) {
1874 		err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
1875 					      QUERY_FLAG_IDN_FDEVICEINIT,
1876 					      &flag_res);
1877 		if (get_timer(start) > FDEVICEINIT_COMPL_TIMEOUT)
1878 			break;
1879 		udelay(500);
1880 	}
1881 
1882 	if (err)
1883 		dev_err(hba->dev,
1884 			"%s reading fDeviceInit flag failed with error %d\n",
1885 			__func__, err);
1886 	else if (flag_res)
1887 		dev_err(hba->dev,
1888 			"%s fDeviceInit was not cleared by the device\n",
1889 			__func__);
1890 
1891 out:
1892 	return err;
1893 }
1894 
1895 static void ufshcd_def_desc_sizes(struct ufs_hba *hba)
1896 {
1897 	hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
1898 	hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
1899 	hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
1900 	hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
1901 	hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
1902 	hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
1903 	hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
1904 }
1905 
1906 int _ufs_start(struct ufs_hba *hba)
1907 {
1908 	int ret, retry_count = 1;
1909 
1910 retry:
1911 	ret = ufshcd_link_startup(hba);
1912 	if (ret)
1913 		return ret;
1914 
1915 	ret = ufshcd_verify_dev_init(hba);
1916 	if (ret) {
1917 		ufshcd_hba_enable(hba);
1918 		if (retry_count--)
1919 			goto retry;
1920 		return ret;
1921 	}
1922 
1923 	ret = ufshcd_complete_dev_init(hba);
1924 	if (ret)
1925 		return ret;
1926 
1927 	/* Init check for device descriptor sizes */
1928 	ufshcd_init_desc_sizes(hba);
1929 
1930 	ret = ufs_get_device_desc(hba, hba->dev_desc);
1931 	if (ret) {
1932 		dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
1933 			__func__, ret);
1934 
1935 		return ret;
1936 	}
1937 
1938 	if (hba->dev_desc->w_spec_version == 0x1002)
1939 		hba->quirks |= UFSDEV_QUIRK_LUN_IN_SCSI_COMMANDS;
1940 
1941 	if (hba->dev_desc->w_spec_version == 0x2002)
1942 		if (hba->dev_desc->w_manufacturer_id == 0x250A ||
1943 		    hba->dev_desc->w_manufacturer_id == 0x9802 ||
1944 		    hba->dev_desc->w_manufacturer_id == 0xD60C )
1945 			hba->quirks |= UFSDEV_QUIRK_LUN_IN_SCSI_COMMANDS;
1946 
1947 	return ret;
1948 }
1949 
1950 int ufs_start(struct ufs_hba *hba)
1951 {
1952 	int ret;
1953 
1954 	ret = _ufs_start(hba);
1955 	if (ret)
1956 		return ret;
1957 
1958 #if defined(CONFIG_SUPPORT_USBPLUG)
1959 	ret = ufs_create_partition_inventory(hba);
1960 	if (ret) {
1961 		dev_err(hba->dev, "%s: Failed to creat partition. err = %d\n", __func__, ret);
1962 		return ret;
1963 	}
1964 #endif
1965 	if (ufshcd_get_max_pwr_mode(hba)) {
1966 		dev_err(hba->dev,
1967 			"%s: Failed getting max supported power mode\n",
1968 			__func__);
1969 	} else {
1970 		ret = ufshcd_change_power_mode(hba, &hba->max_pwr_info.info);
1971 		if (ret) {
1972 			dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
1973 				__func__, ret);
1974 
1975 			return ret;
1976 		}
1977 
1978 		printf("Device at %s up at:", hba->dev->name);
1979 		ufshcd_print_pwr_info(hba);
1980 	}
1981 
1982 #if defined(CONFIG_ROCKCHIP_UFS_RPMB)
1983 	ufs_rpmb_init(hba);
1984 #endif
1985 
1986 	return 0;
1987 }
1988 
1989 int ufshcd_probe(struct udevice *ufs_dev, struct ufs_hba_ops *hba_ops)
1990 {
1991 	struct ufs_hba *hba = dev_get_uclass_priv(ufs_dev);
1992 	struct scsi_platdata *scsi_plat;
1993 	struct udevice *scsi_dev;
1994 	int err;
1995 
1996 	device_find_first_child(ufs_dev, &scsi_dev);
1997 	if (!scsi_dev)
1998 		return -ENODEV;
1999 
2000 	scsi_plat = dev_get_uclass_platdata(scsi_dev);
2001 	scsi_plat->max_id = UFSHCD_MAX_ID;
2002 	scsi_plat->max_lun = UFS_MAX_LUNS;
2003 	//scsi_plat->max_bytes_per_req = UFS_MAX_BYTES;
2004 
2005 	hba->dev = ufs_dev;
2006 	hba->ops = hba_ops;
2007 	hba->mmio_base = (void *)dev_read_addr(ufs_dev);
2008 
2009 	/* Set descriptor lengths to specification defaults */
2010 	ufshcd_def_desc_sizes(hba);
2011 
2012 	ufshcd_ops_init(hba);
2013 
2014 	/* Read capabilties registers */
2015 	hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
2016 
2017 	/* Get UFS version supported by the controller */
2018 	hba->version = ufshcd_get_ufs_version(hba);
2019 	if (hba->version != UFSHCI_VERSION_10 &&
2020 	    hba->version != UFSHCI_VERSION_11 &&
2021 	    hba->version != UFSHCI_VERSION_20 &&
2022 	    hba->version != UFSHCI_VERSION_21)
2023 		dev_err(hba->dev, "invalid UFS version 0x%x\n",
2024 			hba->version);
2025 
2026 	/* Get Interrupt bit mask per version */
2027 	hba->intr_mask = ufshcd_get_intr_mask(hba);
2028 
2029 	/* Allocate memory for host memory space */
2030 	err = ufshcd_memory_alloc(hba);
2031 	if (err) {
2032 		dev_err(hba->dev, "Memory allocation failed\n");
2033 		return err;
2034 	}
2035 
2036 	/* Configure Local data structures */
2037 	ufshcd_host_memory_configure(hba);
2038 
2039 	/*
2040 	 * In order to avoid any spurious interrupt immediately after
2041 	 * registering UFS controller interrupt handler, clear any pending UFS
2042 	 * interrupt status and disable all the UFS interrupts.
2043 	 */
2044 	ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
2045 		      REG_INTERRUPT_STATUS);
2046 	ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
2047 
2048 	err = ufshcd_hba_enable(hba);
2049 	if (err) {
2050 		dev_err(hba->dev, "Host controller enable failed\n");
2051 		return err;
2052 	}
2053 
2054 	err = ufs_start(hba);
2055 	if (err)
2056 		return err;
2057 
2058 	return 0;
2059 }
2060 
2061 int ufs_scsi_bind(struct udevice *ufs_dev, struct udevice **scsi_devp)
2062 {
2063 	int ret = device_bind_driver(ufs_dev, "ufs_scsi", "ufs_scsi",
2064 				     scsi_devp);
2065 
2066 	return ret;
2067 }
2068 
2069 static struct scsi_ops ufs_ops = {
2070 	.exec		= ufs_scsi_exec,
2071 };
2072 
2073 int ufs_probe_dev(int index)
2074 {
2075 	struct udevice *dev;
2076 
2077 	return uclass_get_device(UCLASS_UFS, index, &dev);
2078 }
2079 
2080 int ufs_probe(void)
2081 {
2082 	struct udevice *dev;
2083 	int ret, i;
2084 
2085 	for (i = 0;; i++) {
2086 		ret = uclass_get_device(UCLASS_UFS, i, &dev);
2087 		if (ret == -ENODEV)
2088 			break;
2089 	}
2090 
2091 	return 0;
2092 }
2093 
2094 U_BOOT_DRIVER(ufs_scsi) = {
2095 	.id = UCLASS_SCSI,
2096 	.name = "ufs_scsi",
2097 	.ops = &ufs_ops,
2098 };
2099