xref: /OK3568_Linux_fs/u-boot/drivers/ufs/ufs.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0+
2 /**
3  * ufs.c - Universal Flash Subsystem (UFS) driver
4  *
5  * Taken from Linux Kernel v5.2 (drivers/scsi/ufs/ufshcd.c) and ported
6  * to u-boot.
7  *
8  * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
9  */
10 #include <charset.h>
11 #include <common.h>
12 #include <dm.h>
13 #include <log.h>
14 #include <dm/lists.h>
15 #include <dm/device-internal.h>
16 #include <malloc.h>
17 #include <hexdump.h>
18 #include <scsi.h>
19 #include <asm/io.h>
20 #include <asm/dma-mapping.h>
21 #include <linux/bitops.h>
22 #include <linux/delay.h>
23 
24 #include "ufs.h"
25 
26 #define UFSHCD_ENABLE_INTRS	(UTP_TRANSFER_REQ_COMPL |\
27 				 UTP_TASK_REQ_COMPL |\
28 				 UFSHCD_ERROR_MASK)
29 /* maximum number of link-startup retries */
30 #define DME_LINKSTARTUP_RETRIES 3
31 
32 /* maximum number of retries for a general UIC command  */
33 #define UFS_UIC_COMMAND_RETRIES 3
34 
35 /* Query request retries */
36 #define QUERY_REQ_RETRIES 3
37 /* Query request timeout */
38 #define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
39 
40 /* maximum timeout in ms for a general UIC command */
41 #define UFS_UIC_CMD_TIMEOUT	1000
42 /* NOP OUT retries waiting for NOP IN response */
43 #define NOP_OUT_RETRIES    10
44 /* Timeout after 30 msecs if NOP OUT hangs without response */
45 #define NOP_OUT_TIMEOUT    30 /* msecs */
46 
47 /* Only use one Task Tag for all requests */
48 #define TASK_TAG	0
49 
50 /* Expose the flag value from utp_upiu_query.value */
51 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF
52 
53 #define MAX_PRDT_ENTRY	262144
54 
55 /* maximum bytes per request */
56 #define UFS_MAX_BYTES	(128 * 256 * 1024)
57 
58 static inline bool ufshcd_is_hba_active(struct ufs_hba *hba);
59 static inline void ufshcd_hba_stop(struct ufs_hba *hba);
60 static int ufshcd_hba_enable(struct ufs_hba *hba);
61 
62 /*
63  * ufshcd_wait_for_register - wait for register value to change
64  */
ufshcd_wait_for_register(struct ufs_hba * hba,u32 reg,u32 mask,u32 val,unsigned long timeout_ms)65 static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
66 				    u32 val, unsigned long timeout_ms)
67 {
68 	int err = 0;
69 	unsigned long start = get_timer(0);
70 
71 	/* ignore bits that we don't intend to wait on */
72 	val = val & mask;
73 
74 	while ((ufshcd_readl(hba, reg) & mask) != val) {
75 		if (get_timer(start) > timeout_ms) {
76 			if ((ufshcd_readl(hba, reg) & mask) != val)
77 				err = -ETIMEDOUT;
78 			break;
79 		}
80 	}
81 
82 	return err;
83 }
84 
85 /**
86  * ufshcd_init_pwr_info - setting the POR (power on reset)
87  * values in hba power info
88  */
ufshcd_init_pwr_info(struct ufs_hba * hba)89 static void ufshcd_init_pwr_info(struct ufs_hba *hba)
90 {
91 	hba->pwr_info.gear_rx = UFS_PWM_G1;
92 	hba->pwr_info.gear_tx = UFS_PWM_G1;
93 	hba->pwr_info.lane_rx = 1;
94 	hba->pwr_info.lane_tx = 1;
95 	hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
96 	hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
97 	hba->pwr_info.hs_rate = 0;
98 }
99 
100 /**
101  * ufshcd_print_pwr_info - print power params as saved in hba
102  * power info
103  */
ufshcd_print_pwr_info(struct ufs_hba * hba)104 static void ufshcd_print_pwr_info(struct ufs_hba *hba)
105 {
106 	static const char * const names[] = {
107 		"INVALID MODE",
108 		"FAST MODE",
109 		"SLOW_MODE",
110 		"INVALID MODE",
111 		"FASTAUTO_MODE",
112 		"SLOWAUTO_MODE",
113 		"INVALID MODE",
114 	};
115 
116 	dev_err(hba->dev, "[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
117 		hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
118 		hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
119 		names[hba->pwr_info.pwr_rx],
120 		names[hba->pwr_info.pwr_tx],
121 		hba->pwr_info.hs_rate);
122 }
123 
124 /**
125  * ufshcd_ready_for_uic_cmd - Check if controller is ready
126  *                            to accept UIC commands
127  */
ufshcd_ready_for_uic_cmd(struct ufs_hba * hba)128 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
129 {
130 	if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
131 		return true;
132 	else
133 		return false;
134 }
135 
136 /**
137  * ufshcd_get_uic_cmd_result - Get the UIC command result
138  */
ufshcd_get_uic_cmd_result(struct ufs_hba * hba)139 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
140 {
141 	return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
142 	       MASK_UIC_COMMAND_RESULT;
143 }
144 
145 /**
146  * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
147  */
ufshcd_get_dme_attr_val(struct ufs_hba * hba)148 static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
149 {
150 	return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
151 }
152 
153 /**
154  * ufshcd_is_device_present - Check if any device connected to
155  *			      the host controller
156  */
ufshcd_is_device_present(struct ufs_hba * hba)157 static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
158 {
159 	return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
160 						DEVICE_PRESENT) ? true : false;
161 }
162 
163 /**
164  * ufshcd_send_uic_cmd - UFS Interconnect layer command API
165  *
166  */
ufshcd_send_uic_cmd(struct ufs_hba * hba,struct uic_command * uic_cmd)167 static int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
168 {
169 	unsigned long start = 0;
170 	u32 intr_status;
171 	u32 enabled_intr_status;
172 
173 	if (!ufshcd_ready_for_uic_cmd(hba)) {
174 		dev_err(hba->dev,
175 			"Controller not ready to accept UIC commands\n");
176 		return -EIO;
177 	}
178 
179 	debug("sending uic command:%d\n", uic_cmd->command);
180 
181 	/* Write Args */
182 	ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
183 	ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
184 	ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
185 
186 	/* Write UIC Cmd */
187 	ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
188 		      REG_UIC_COMMAND);
189 
190 	start = get_timer(0);
191 	do {
192 		intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
193 		enabled_intr_status = intr_status & hba->intr_mask;
194 		ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
195 
196 		if (get_timer(start) > UFS_UIC_CMD_TIMEOUT) {
197 			dev_err(hba->dev,
198 				"Timedout waiting for UIC response\n");
199 
200 			return -ETIMEDOUT;
201 		}
202 
203 		if (enabled_intr_status & UFSHCD_ERROR_MASK) {
204 			dev_err(hba->dev, "Error in status:%08x\n",
205 				enabled_intr_status);
206 
207 			return -1;
208 		}
209 	} while (!(enabled_intr_status & UFSHCD_UIC_MASK));
210 
211 	uic_cmd->argument2 = ufshcd_get_uic_cmd_result(hba);
212 	uic_cmd->argument3 = ufshcd_get_dme_attr_val(hba);
213 
214 	debug("Sent successfully\n");
215 
216 	return 0;
217 }
218 
219 /**
220  * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
221  *
222  */
ufshcd_dme_set_attr(struct ufs_hba * hba,u32 attr_sel,u8 attr_set,u32 mib_val,u8 peer)223 int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, u8 attr_set,
224 			u32 mib_val, u8 peer)
225 {
226 	struct uic_command uic_cmd = {0};
227 	static const char *const action[] = {
228 		"dme-set",
229 		"dme-peer-set"
230 	};
231 	const char *set = action[!!peer];
232 	int ret;
233 	int retries = UFS_UIC_COMMAND_RETRIES;
234 
235 	uic_cmd.command = peer ?
236 		UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
237 	uic_cmd.argument1 = attr_sel;
238 	uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
239 	uic_cmd.argument3 = mib_val;
240 
241 	do {
242 		/* for peer attributes we retry upon failure */
243 		ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
244 		if (ret)
245 			dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
246 				set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
247 	} while (ret && peer && --retries);
248 
249 	if (ret)
250 		dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
251 			set, UIC_GET_ATTR_ID(attr_sel), mib_val,
252 			UFS_UIC_COMMAND_RETRIES - retries);
253 
254 	return ret;
255 }
256 
257 /**
258  * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
259  *
260  */
ufshcd_dme_get_attr(struct ufs_hba * hba,u32 attr_sel,u32 * mib_val,u8 peer)261 int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
262 			u32 *mib_val, u8 peer)
263 {
264 	struct uic_command uic_cmd = {0};
265 	static const char *const action[] = {
266 		"dme-get",
267 		"dme-peer-get"
268 	};
269 	const char *get = action[!!peer];
270 	int ret;
271 	int retries = UFS_UIC_COMMAND_RETRIES;
272 
273 	uic_cmd.command = peer ?
274 		UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
275 	uic_cmd.argument1 = attr_sel;
276 
277 	do {
278 		/* for peer attributes we retry upon failure */
279 		ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
280 		if (ret)
281 			dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
282 				get, UIC_GET_ATTR_ID(attr_sel), ret);
283 	} while (ret && peer && --retries);
284 
285 	if (ret)
286 		dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
287 			get, UIC_GET_ATTR_ID(attr_sel),
288 			UFS_UIC_COMMAND_RETRIES - retries);
289 
290 	if (mib_val && !ret)
291 		*mib_val = uic_cmd.argument3;
292 
293 	return ret;
294 }
295 
ufshcd_disable_tx_lcc(struct ufs_hba * hba,bool peer)296 static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
297 {
298 	u32 tx_lanes, i, err = 0;
299 
300 	if (!peer)
301 		ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
302 			       &tx_lanes);
303 	else
304 		ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
305 				    &tx_lanes);
306 	for (i = 0; i < tx_lanes; i++) {
307 		if (!peer)
308 			err = ufshcd_dme_set(hba,
309 					     UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
310 					     UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
311 					     0);
312 		else
313 			err = ufshcd_dme_peer_set(hba,
314 					UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
315 					UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
316 					0);
317 		if (err) {
318 			dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
319 				__func__, peer, i, err);
320 			break;
321 		}
322 	}
323 
324 	return err;
325 }
326 
ufshcd_disable_device_tx_lcc(struct ufs_hba * hba)327 static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
328 {
329 	return ufshcd_disable_tx_lcc(hba, true);
330 }
331 
332 /**
333  * ufshcd_dme_link_startup - Notify Unipro to perform link startup
334  *
335  */
ufshcd_dme_link_startup(struct ufs_hba * hba)336 static int ufshcd_dme_link_startup(struct ufs_hba *hba)
337 {
338 	struct uic_command uic_cmd = {0};
339 	int ret;
340 
341 	uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
342 
343 	ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
344 	if (ret)
345 		dev_dbg(hba->dev,
346 			"dme-link-startup: error code %d\n", ret);
347 	return ret;
348 }
349 
350 /**
351  * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
352  *
353  */
ufshcd_disable_intr_aggr(struct ufs_hba * hba)354 static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
355 {
356 	ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
357 }
358 
359 /**
360  * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
361  */
ufshcd_get_lists_status(u32 reg)362 static inline int ufshcd_get_lists_status(u32 reg)
363 {
364 	return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY);
365 }
366 
367 /**
368  * ufshcd_enable_run_stop_reg - Enable run-stop registers,
369  *			When run-stop registers are set to 1, it indicates the
370  *			host controller that it can process the requests
371  */
ufshcd_enable_run_stop_reg(struct ufs_hba * hba)372 static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
373 {
374 	ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
375 		      REG_UTP_TASK_REQ_LIST_RUN_STOP);
376 	ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
377 		      REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
378 }
379 
380 /**
381  * ufshcd_enable_intr - enable interrupts
382  */
ufshcd_enable_intr(struct ufs_hba * hba,u32 intrs)383 static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
384 {
385 	u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
386 	u32 rw;
387 
388 	if (hba->version == UFSHCI_VERSION_10) {
389 		rw = set & INTERRUPT_MASK_RW_VER_10;
390 		set = rw | ((set ^ intrs) & intrs);
391 	} else {
392 		set |= intrs;
393 	}
394 
395 	ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
396 
397 	hba->intr_mask = set;
398 }
399 
400 /**
401  * ufshcd_make_hba_operational - Make UFS controller operational
402  *
403  * To bring UFS host controller to operational state,
404  * 1. Enable required interrupts
405  * 2. Configure interrupt aggregation
406  * 3. Program UTRL and UTMRL base address
407  * 4. Configure run-stop-registers
408  *
409  */
ufshcd_make_hba_operational(struct ufs_hba * hba)410 static int ufshcd_make_hba_operational(struct ufs_hba *hba)
411 {
412 	int err = 0;
413 	u32 reg;
414 
415 	/* Enable required interrupts */
416 	ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
417 
418 	/* Disable interrupt aggregation */
419 	ufshcd_disable_intr_aggr(hba);
420 
421 	/* Configure UTRL and UTMRL base address registers */
422 	ufshcd_writel(hba, lower_32_bits((dma_addr_t)hba->utrdl),
423 		      REG_UTP_TRANSFER_REQ_LIST_BASE_L);
424 	ufshcd_writel(hba, upper_32_bits((dma_addr_t)hba->utrdl),
425 		      REG_UTP_TRANSFER_REQ_LIST_BASE_H);
426 	ufshcd_writel(hba, lower_32_bits((dma_addr_t)hba->utmrdl),
427 		      REG_UTP_TASK_REQ_LIST_BASE_L);
428 	ufshcd_writel(hba, upper_32_bits((dma_addr_t)hba->utmrdl),
429 		      REG_UTP_TASK_REQ_LIST_BASE_H);
430 
431 	/*
432 	 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
433 	 */
434 	reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
435 	if (!(ufshcd_get_lists_status(reg))) {
436 		ufshcd_enable_run_stop_reg(hba);
437 	} else {
438 		dev_err(hba->dev,
439 			"Host controller not ready to process requests");
440 		err = -EIO;
441 		goto out;
442 	}
443 
444 out:
445 	return err;
446 }
447 
448 /**
449  * ufshcd_link_startup - Initialize unipro link startup
450  */
ufshcd_link_startup(struct ufs_hba * hba)451 static int ufshcd_link_startup(struct ufs_hba *hba)
452 {
453 	int ret;
454 	int retries = DME_LINKSTARTUP_RETRIES;
455 	bool link_startup_again = true;
456 
457 link_startup:
458 	do {
459 		ufshcd_ops_link_startup_notify(hba, PRE_CHANGE);
460 
461 		ret = ufshcd_dme_link_startup(hba);
462 
463 		/* check if device is detected by inter-connect layer */
464 		if (!ret && !ufshcd_is_device_present(hba)) {
465 			dev_err(hba->dev, "%s: Device not present\n", __func__);
466 			ret = -ENXIO;
467 			goto out;
468 		}
469 
470 		/*
471 		 * DME link lost indication is only received when link is up,
472 		 * but we can't be sure if the link is up until link startup
473 		 * succeeds. So reset the local Uni-Pro and try again.
474 		 */
475 		if (ret && ufshcd_hba_enable(hba))
476 			goto out;
477 	} while (ret && retries--);
478 
479 	if (ret)
480 		/* failed to get the link up... retire */
481 		goto out;
482 
483 	if (link_startup_again) {
484 		link_startup_again = false;
485 		retries = DME_LINKSTARTUP_RETRIES;
486 		goto link_startup;
487 	}
488 
489 	/* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
490 	ufshcd_init_pwr_info(hba);
491 
492 	if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
493 		ret = ufshcd_disable_device_tx_lcc(hba);
494 		if (ret)
495 			goto out;
496 	}
497 
498 	/* Include any host controller configuration via UIC commands */
499 	ret = ufshcd_ops_link_startup_notify(hba, POST_CHANGE);
500 	if (ret)
501 		goto out;
502 
503 	ret = ufshcd_make_hba_operational(hba);
504 out:
505 	if (ret)
506 		dev_err(hba->dev, "link startup failed %d\n", ret);
507 
508 	return ret;
509 }
510 
511 /**
512  * ufshcd_hba_stop - Send controller to reset state
513  */
ufshcd_hba_stop(struct ufs_hba * hba)514 static inline void ufshcd_hba_stop(struct ufs_hba *hba)
515 {
516 	int err;
517 
518 	ufshcd_writel(hba, CONTROLLER_DISABLE,  REG_CONTROLLER_ENABLE);
519 	err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
520 				       CONTROLLER_ENABLE, CONTROLLER_DISABLE,
521 				       10);
522 	if (err)
523 		dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
524 }
525 
526 /**
527  * ufshcd_is_hba_active - Get controller state
528  */
ufshcd_is_hba_active(struct ufs_hba * hba)529 static inline bool ufshcd_is_hba_active(struct ufs_hba *hba)
530 {
531 	return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE)
532 		? false : true;
533 }
534 
535 /**
536  * ufshcd_hba_start - Start controller initialization sequence
537  */
ufshcd_hba_start(struct ufs_hba * hba)538 static inline void ufshcd_hba_start(struct ufs_hba *hba)
539 {
540 	ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
541 }
542 
543 /**
544  * ufshcd_hba_enable - initialize the controller
545  */
ufshcd_hba_enable(struct ufs_hba * hba)546 static int ufshcd_hba_enable(struct ufs_hba *hba)
547 {
548 	int retry;
549 
550 	if (!ufshcd_is_hba_active(hba))
551 		/* change controller state to "reset state" */
552 		ufshcd_hba_stop(hba);
553 
554 	ufshcd_ops_hce_enable_notify(hba, PRE_CHANGE);
555 
556 	/* start controller initialization sequence */
557 	ufshcd_hba_start(hba);
558 
559 	/*
560 	 * To initialize a UFS host controller HCE bit must be set to 1.
561 	 * During initialization the HCE bit value changes from 1->0->1.
562 	 * When the host controller completes initialization sequence
563 	 * it sets the value of HCE bit to 1. The same HCE bit is read back
564 	 * to check if the controller has completed initialization sequence.
565 	 * So without this delay the value HCE = 1, set in the previous
566 	 * instruction might be read back.
567 	 * This delay can be changed based on the controller.
568 	 */
569 	mdelay(1);
570 
571 	/* wait for the host controller to complete initialization */
572 	retry = 10;
573 	while (ufshcd_is_hba_active(hba)) {
574 		if (retry) {
575 			retry--;
576 		} else {
577 			dev_err(hba->dev, "Controller enable failed\n");
578 			return -EIO;
579 		}
580 		mdelay(5);
581 	}
582 
583 	/* enable UIC related interrupts */
584 	ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
585 
586 	ufshcd_ops_hce_enable_notify(hba, POST_CHANGE);
587 
588 	return 0;
589 }
590 
591 /**
592  * ufshcd_host_memory_configure - configure local reference block with
593  *				memory offsets
594  */
ufshcd_host_memory_configure(struct ufs_hba * hba)595 static void ufshcd_host_memory_configure(struct ufs_hba *hba)
596 {
597 	struct utp_transfer_req_desc *utrdlp;
598 	dma_addr_t cmd_desc_dma_addr;
599 	u16 response_offset;
600 	u16 prdt_offset;
601 
602 	utrdlp = hba->utrdl;
603 	cmd_desc_dma_addr = (dma_addr_t)hba->ucdl;
604 
605 	utrdlp->command_desc_base_addr_lo =
606 				cpu_to_le32(lower_32_bits(cmd_desc_dma_addr));
607 	utrdlp->command_desc_base_addr_hi =
608 				cpu_to_le32(upper_32_bits(cmd_desc_dma_addr));
609 
610 	response_offset = offsetof(struct utp_transfer_cmd_desc, response_upiu);
611 	prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table);
612 
613 	utrdlp->response_upiu_offset = cpu_to_le16(response_offset >> 2);
614 	utrdlp->prd_table_offset = cpu_to_le16(prdt_offset >> 2);
615 	utrdlp->response_upiu_length = cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
616 
617 	hba->ucd_req_ptr = (struct utp_upiu_req *)hba->ucdl;
618 	hba->ucd_rsp_ptr =
619 		(struct utp_upiu_rsp *)&hba->ucdl->response_upiu;
620 	hba->ucd_prdt_ptr =
621 		(struct ufshcd_sg_entry *)&hba->ucdl->prd_table;
622 }
623 
624 /**
625  * ufshcd_memory_alloc - allocate memory for host memory space data structures
626  */
ufshcd_memory_alloc(struct ufs_hba * hba)627 static int ufshcd_memory_alloc(struct ufs_hba *hba)
628 {
629 	/* Allocate one Transfer Request Descriptor
630 	 * Should be aligned to 1k boundary.
631 	 */
632 	hba->utrdl = memalign(1024, sizeof(struct utp_transfer_req_desc));
633 	if (!hba->utrdl) {
634 		dev_err(hba->dev, "Transfer Descriptor memory allocation failed\n");
635 		return -ENOMEM;
636 	}
637 
638 	/* Allocate one Command Descriptor
639 	 * Should be aligned to 1k boundary.
640 	 */
641 	hba->ucdl = memalign(1024, sizeof(struct utp_transfer_cmd_desc));
642 	if (!hba->ucdl) {
643 		dev_err(hba->dev, "Command descriptor memory allocation failed\n");
644 		return -ENOMEM;
645 	}
646 
647 	return 0;
648 }
649 
650 /**
651  * ufshcd_get_intr_mask - Get the interrupt bit mask
652  */
ufshcd_get_intr_mask(struct ufs_hba * hba)653 static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
654 {
655 	u32 intr_mask = 0;
656 
657 	switch (hba->version) {
658 	case UFSHCI_VERSION_10:
659 		intr_mask = INTERRUPT_MASK_ALL_VER_10;
660 		break;
661 	case UFSHCI_VERSION_11:
662 	case UFSHCI_VERSION_20:
663 		intr_mask = INTERRUPT_MASK_ALL_VER_11;
664 		break;
665 	case UFSHCI_VERSION_21:
666 	default:
667 		intr_mask = INTERRUPT_MASK_ALL_VER_21;
668 		break;
669 	}
670 
671 	return intr_mask;
672 }
673 
674 /**
675  * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
676  */
ufshcd_get_ufs_version(struct ufs_hba * hba)677 static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
678 {
679 	return ufshcd_readl(hba, REG_UFS_VERSION);
680 }
681 
682 /**
683  * ufshcd_get_upmcrs - Get the power mode change request status
684  */
ufshcd_get_upmcrs(struct ufs_hba * hba)685 static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
686 {
687 	return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
688 }
689 
690 /**
691  * ufshcd_prepare_req_desc_hdr() - Fills the requests header
692  * descriptor according to request
693  */
ufshcd_prepare_req_desc_hdr(struct utp_transfer_req_desc * req_desc,u32 * upiu_flags,enum dma_data_direction cmd_dir)694 static void ufshcd_prepare_req_desc_hdr(struct utp_transfer_req_desc *req_desc,
695 					u32 *upiu_flags,
696 					enum dma_data_direction cmd_dir)
697 {
698 	u32 data_direction;
699 	u32 dword_0;
700 
701 	if (cmd_dir == DMA_FROM_DEVICE) {
702 		data_direction = UTP_DEVICE_TO_HOST;
703 		*upiu_flags = UPIU_CMD_FLAGS_READ;
704 	} else if (cmd_dir == DMA_TO_DEVICE) {
705 		data_direction = UTP_HOST_TO_DEVICE;
706 		*upiu_flags = UPIU_CMD_FLAGS_WRITE;
707 	} else {
708 		data_direction = UTP_NO_DATA_TRANSFER;
709 		*upiu_flags = UPIU_CMD_FLAGS_NONE;
710 	}
711 
712 	dword_0 = data_direction | (0x1 << UPIU_COMMAND_TYPE_OFFSET);
713 
714 	/* Enable Interrupt for command */
715 	dword_0 |= UTP_REQ_DESC_INT_CMD;
716 
717 	/* Transfer request descriptor header fields */
718 	req_desc->header.dword_0 = cpu_to_le32(dword_0);
719 	/* dword_1 is reserved, hence it is set to 0 */
720 	req_desc->header.dword_1 = 0;
721 	/*
722 	 * assigning invalid value for command status. Controller
723 	 * updates OCS on command completion, with the command
724 	 * status
725 	 */
726 	req_desc->header.dword_2 =
727 		cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
728 	/* dword_3 is reserved, hence it is set to 0 */
729 	req_desc->header.dword_3 = 0;
730 
731 	req_desc->prd_table_length = 0;
732 }
733 
ufshcd_prepare_utp_query_req_upiu(struct ufs_hba * hba,u32 upiu_flags)734 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
735 					      u32 upiu_flags)
736 {
737 	struct utp_upiu_req *ucd_req_ptr = hba->ucd_req_ptr;
738 	struct ufs_query *query = &hba->dev_cmd.query;
739 	u16 len = be16_to_cpu(query->request.upiu_req.length);
740 
741 	/* Query request header */
742 	ucd_req_ptr->header.dword_0 =
743 				UPIU_HEADER_DWORD(UPIU_TRANSACTION_QUERY_REQ,
744 						  upiu_flags, 0, TASK_TAG);
745 	ucd_req_ptr->header.dword_1 =
746 				UPIU_HEADER_DWORD(0, query->request.query_func,
747 						  0, 0);
748 
749 	/* Data segment length only need for WRITE_DESC */
750 	if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
751 		ucd_req_ptr->header.dword_2 =
752 				UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
753 	else
754 		ucd_req_ptr->header.dword_2 = 0;
755 
756 	/* Copy the Query Request buffer as is */
757 	memcpy(&ucd_req_ptr->qr, &query->request.upiu_req, QUERY_OSF_SIZE);
758 
759 	/* Copy the Descriptor */
760 	if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
761 		memcpy(ucd_req_ptr + 1, query->descriptor, len);
762 
763 	memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
764 }
765 
ufshcd_prepare_utp_nop_upiu(struct ufs_hba * hba)766 static inline void ufshcd_prepare_utp_nop_upiu(struct ufs_hba *hba)
767 {
768 	struct utp_upiu_req *ucd_req_ptr = hba->ucd_req_ptr;
769 
770 	memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
771 
772 	/* command descriptor fields */
773 	ucd_req_ptr->header.dword_0 =
774 			UPIU_HEADER_DWORD(UPIU_TRANSACTION_NOP_OUT, 0, 0, TASK_TAG);
775 	/* clear rest of the fields of basic header */
776 	ucd_req_ptr->header.dword_1 = 0;
777 	ucd_req_ptr->header.dword_2 = 0;
778 
779 	memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
780 }
781 
782 /**
783  * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU)
784  *			     for Device Management Purposes
785  */
ufshcd_comp_devman_upiu(struct ufs_hba * hba,enum dev_cmd_type cmd_type)786 static int ufshcd_comp_devman_upiu(struct ufs_hba *hba,
787 				   enum dev_cmd_type cmd_type)
788 {
789 	u32 upiu_flags;
790 	int ret = 0;
791 	struct utp_transfer_req_desc *req_desc = hba->utrdl;
792 
793 	hba->dev_cmd.type = cmd_type;
794 
795 	ufshcd_prepare_req_desc_hdr(req_desc, &upiu_flags, DMA_NONE);
796 	switch (cmd_type) {
797 	case DEV_CMD_TYPE_QUERY:
798 		ufshcd_prepare_utp_query_req_upiu(hba, upiu_flags);
799 		break;
800 	case DEV_CMD_TYPE_NOP:
801 		ufshcd_prepare_utp_nop_upiu(hba);
802 		break;
803 	default:
804 		ret = -EINVAL;
805 	}
806 
807 	return ret;
808 }
809 
ufshcd_send_command(struct ufs_hba * hba,unsigned int task_tag)810 static int ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
811 {
812 	unsigned long start;
813 	u32 intr_status;
814 	u32 enabled_intr_status;
815 
816 	ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
817 
818 	start = get_timer(0);
819 	do {
820 		intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
821 		enabled_intr_status = intr_status & hba->intr_mask;
822 		ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
823 
824 		if (get_timer(start) > QUERY_REQ_TIMEOUT) {
825 			dev_err(hba->dev,
826 				"Timedout waiting for UTP response\n");
827 
828 			return -ETIMEDOUT;
829 		}
830 
831 		if (enabled_intr_status & UFSHCD_ERROR_MASK) {
832 			dev_err(hba->dev, "Error in status:%08x\n",
833 				enabled_intr_status);
834 
835 			return -1;
836 		}
837 	} while (!(enabled_intr_status & UTP_TRANSFER_REQ_COMPL));
838 
839 	return 0;
840 }
841 
842 /**
843  * ufshcd_get_req_rsp - returns the TR response transaction type
844  */
ufshcd_get_req_rsp(struct utp_upiu_rsp * ucd_rsp_ptr)845 static inline int ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
846 {
847 	return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
848 }
849 
850 /**
851  * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
852  *
853  */
ufshcd_get_tr_ocs(struct ufs_hba * hba)854 static inline int ufshcd_get_tr_ocs(struct ufs_hba *hba)
855 {
856 	return le32_to_cpu(hba->utrdl->header.dword_2) & MASK_OCS;
857 }
858 
ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp * ucd_rsp_ptr)859 static inline int ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
860 {
861 	return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
862 }
863 
ufshcd_check_query_response(struct ufs_hba * hba)864 static int ufshcd_check_query_response(struct ufs_hba *hba)
865 {
866 	struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
867 
868 	/* Get the UPIU response */
869 	query_res->response = ufshcd_get_rsp_upiu_result(hba->ucd_rsp_ptr) >>
870 				UPIU_RSP_CODE_OFFSET;
871 	return query_res->response;
872 }
873 
874 /**
875  * ufshcd_copy_query_response() - Copy the Query Response and the data
876  * descriptor
877  */
ufshcd_copy_query_response(struct ufs_hba * hba)878 static int ufshcd_copy_query_response(struct ufs_hba *hba)
879 {
880 	struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
881 
882 	memcpy(&query_res->upiu_res, &hba->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
883 
884 	/* Get the descriptor */
885 	if (hba->dev_cmd.query.descriptor &&
886 	    hba->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
887 		u8 *descp = (u8 *)hba->ucd_rsp_ptr +
888 				GENERAL_UPIU_REQUEST_SIZE;
889 		u16 resp_len;
890 		u16 buf_len;
891 
892 		/* data segment length */
893 		resp_len = be32_to_cpu(hba->ucd_rsp_ptr->header.dword_2) &
894 						MASK_QUERY_DATA_SEG_LEN;
895 		buf_len =
896 			be16_to_cpu(hba->dev_cmd.query.request.upiu_req.length);
897 		if (likely(buf_len >= resp_len)) {
898 			memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
899 		} else {
900 			dev_warn(hba->dev,
901 				 "%s: Response size is bigger than buffer",
902 				 __func__);
903 			return -EINVAL;
904 		}
905 	}
906 
907 	return 0;
908 }
909 
910 /**
911  * ufshcd_exec_dev_cmd - API for sending device management requests
912  */
ufshcd_exec_dev_cmd(struct ufs_hba * hba,enum dev_cmd_type cmd_type,int timeout)913 static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, enum dev_cmd_type cmd_type,
914 			       int timeout)
915 {
916 	int err;
917 	int resp;
918 
919 	err = ufshcd_comp_devman_upiu(hba, cmd_type);
920 	if (err)
921 		return err;
922 
923 	err = ufshcd_send_command(hba, TASK_TAG);
924 	if (err)
925 		return err;
926 
927 	err = ufshcd_get_tr_ocs(hba);
928 	if (err) {
929 		dev_err(hba->dev, "Error in OCS:%d\n", err);
930 		return -EINVAL;
931 	}
932 
933 	resp = ufshcd_get_req_rsp(hba->ucd_rsp_ptr);
934 	switch (resp) {
935 	case UPIU_TRANSACTION_NOP_IN:
936 		break;
937 	case UPIU_TRANSACTION_QUERY_RSP:
938 		err = ufshcd_check_query_response(hba);
939 		if (!err)
940 			err = ufshcd_copy_query_response(hba);
941 		break;
942 	case UPIU_TRANSACTION_REJECT_UPIU:
943 		/* TODO: handle Reject UPIU Response */
944 		err = -EPERM;
945 		dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
946 			__func__);
947 		break;
948 	default:
949 		err = -EINVAL;
950 		dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
951 			__func__, resp);
952 	}
953 
954 	return err;
955 }
956 
957 /**
958  * ufshcd_init_query() - init the query response and request parameters
959  */
ufshcd_init_query(struct ufs_hba * hba,struct ufs_query_req ** request,struct ufs_query_res ** response,enum query_opcode opcode,u8 idn,u8 index,u8 selector)960 static inline void ufshcd_init_query(struct ufs_hba *hba,
961 				     struct ufs_query_req **request,
962 				     struct ufs_query_res **response,
963 				     enum query_opcode opcode,
964 				     u8 idn, u8 index, u8 selector)
965 {
966 	*request = &hba->dev_cmd.query.request;
967 	*response = &hba->dev_cmd.query.response;
968 	memset(*request, 0, sizeof(struct ufs_query_req));
969 	memset(*response, 0, sizeof(struct ufs_query_res));
970 	(*request)->upiu_req.opcode = opcode;
971 	(*request)->upiu_req.idn = idn;
972 	(*request)->upiu_req.index = index;
973 	(*request)->upiu_req.selector = selector;
974 }
975 
976 /**
977  * ufshcd_query_flag() - API function for sending flag query requests
978  */
ufshcd_query_flag(struct ufs_hba * hba,enum query_opcode opcode,enum flag_idn idn,bool * flag_res)979 int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
980 		      enum flag_idn idn, bool *flag_res)
981 {
982 	struct ufs_query_req *request = NULL;
983 	struct ufs_query_res *response = NULL;
984 	int err, index = 0, selector = 0;
985 	int timeout = QUERY_REQ_TIMEOUT;
986 
987 	ufshcd_init_query(hba, &request, &response, opcode, idn, index,
988 			  selector);
989 
990 	switch (opcode) {
991 	case UPIU_QUERY_OPCODE_SET_FLAG:
992 	case UPIU_QUERY_OPCODE_CLEAR_FLAG:
993 	case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
994 		request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
995 		break;
996 	case UPIU_QUERY_OPCODE_READ_FLAG:
997 		request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
998 		if (!flag_res) {
999 			/* No dummy reads */
1000 			dev_err(hba->dev, "%s: Invalid argument for read request\n",
1001 				__func__);
1002 			err = -EINVAL;
1003 			goto out;
1004 		}
1005 		break;
1006 	default:
1007 		dev_err(hba->dev,
1008 			"%s: Expected query flag opcode but got = %d\n",
1009 			__func__, opcode);
1010 		err = -EINVAL;
1011 		goto out;
1012 	}
1013 
1014 	err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
1015 
1016 	if (err) {
1017 		dev_err(hba->dev,
1018 			"%s: Sending flag query for idn %d failed, err = %d\n",
1019 			__func__, idn, err);
1020 		goto out;
1021 	}
1022 
1023 	if (flag_res)
1024 		*flag_res = (be32_to_cpu(response->upiu_res.value) &
1025 				MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
1026 
1027 out:
1028 	return err;
1029 }
1030 
ufshcd_query_flag_retry(struct ufs_hba * hba,enum query_opcode opcode,enum flag_idn idn,bool * flag_res)1031 static int ufshcd_query_flag_retry(struct ufs_hba *hba,
1032 				   enum query_opcode opcode,
1033 				   enum flag_idn idn, bool *flag_res)
1034 {
1035 	int ret;
1036 	int retries;
1037 
1038 	for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
1039 		ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
1040 		if (ret)
1041 			dev_dbg(hba->dev,
1042 				"%s: failed with error %d, retries %d\n",
1043 				__func__, ret, retries);
1044 		else
1045 			break;
1046 	}
1047 
1048 	if (ret)
1049 		dev_err(hba->dev,
1050 			"%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
1051 			__func__, opcode, idn, ret, retries);
1052 	return ret;
1053 }
1054 
__ufshcd_query_descriptor(struct ufs_hba * hba,enum query_opcode opcode,enum desc_idn idn,u8 index,u8 selector,u8 * desc_buf,int * buf_len)1055 static int __ufshcd_query_descriptor(struct ufs_hba *hba,
1056 				     enum query_opcode opcode,
1057 				     enum desc_idn idn, u8 index, u8 selector,
1058 				     u8 *desc_buf, int *buf_len)
1059 {
1060 	struct ufs_query_req *request = NULL;
1061 	struct ufs_query_res *response = NULL;
1062 	int err;
1063 
1064 	if (!desc_buf) {
1065 		dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
1066 			__func__, opcode);
1067 		err = -EINVAL;
1068 		goto out;
1069 	}
1070 
1071 	if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
1072 		dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
1073 			__func__, *buf_len);
1074 		err = -EINVAL;
1075 		goto out;
1076 	}
1077 
1078 	ufshcd_init_query(hba, &request, &response, opcode, idn, index,
1079 			  selector);
1080 	hba->dev_cmd.query.descriptor = desc_buf;
1081 	request->upiu_req.length = cpu_to_be16(*buf_len);
1082 
1083 	switch (opcode) {
1084 	case UPIU_QUERY_OPCODE_WRITE_DESC:
1085 		request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
1086 		break;
1087 	case UPIU_QUERY_OPCODE_READ_DESC:
1088 		request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
1089 		break;
1090 	default:
1091 		dev_err(hba->dev, "%s: Expected query descriptor opcode but got = 0x%.2x\n",
1092 			__func__, opcode);
1093 		err = -EINVAL;
1094 		goto out;
1095 	}
1096 
1097 	err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
1098 
1099 	if (err) {
1100 		dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
1101 			__func__, opcode, idn, index, err);
1102 		goto out;
1103 	}
1104 
1105 	hba->dev_cmd.query.descriptor = NULL;
1106 	*buf_len = be16_to_cpu(response->upiu_res.length);
1107 
1108 out:
1109 	return err;
1110 }
1111 
1112 /**
1113  * ufshcd_query_descriptor_retry - API function for sending descriptor requests
1114  */
ufshcd_query_descriptor_retry(struct ufs_hba * hba,enum query_opcode opcode,enum desc_idn idn,u8 index,u8 selector,u8 * desc_buf,int * buf_len)1115 int ufshcd_query_descriptor_retry(struct ufs_hba *hba, enum query_opcode opcode,
1116 				  enum desc_idn idn, u8 index, u8 selector,
1117 				  u8 *desc_buf, int *buf_len)
1118 {
1119 	int err;
1120 	int retries;
1121 
1122 	for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
1123 		err = __ufshcd_query_descriptor(hba, opcode, idn, index,
1124 						selector, desc_buf, buf_len);
1125 		if (!err || err == -EINVAL)
1126 			break;
1127 	}
1128 
1129 	return err;
1130 }
1131 
1132 /**
1133  * ufshcd_read_desc_length - read the specified descriptor length from header
1134  */
ufshcd_read_desc_length(struct ufs_hba * hba,enum desc_idn desc_id,int desc_index,int * desc_length)1135 static int ufshcd_read_desc_length(struct ufs_hba *hba, enum desc_idn desc_id,
1136 				   int desc_index, int *desc_length)
1137 {
1138 	int ret;
1139 	u8 header[QUERY_DESC_HDR_SIZE];
1140 	int header_len = QUERY_DESC_HDR_SIZE;
1141 
1142 	if (desc_id >= QUERY_DESC_IDN_MAX)
1143 		return -EINVAL;
1144 
1145 	ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
1146 					    desc_id, desc_index, 0, header,
1147 					    &header_len);
1148 
1149 	if (ret) {
1150 		dev_err(hba->dev, "%s: Failed to get descriptor header id %d",
1151 			__func__, desc_id);
1152 		return ret;
1153 	} else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) {
1154 		dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch",
1155 			 __func__, header[QUERY_DESC_DESC_TYPE_OFFSET],
1156 			 desc_id);
1157 		ret = -EINVAL;
1158 	}
1159 
1160 	*desc_length = header[QUERY_DESC_LENGTH_OFFSET];
1161 
1162 	return ret;
1163 }
1164 
ufshcd_init_desc_sizes(struct ufs_hba * hba)1165 static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
1166 {
1167 	int err;
1168 
1169 	err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0,
1170 				      &hba->desc_size.dev_desc);
1171 	if (err)
1172 		hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
1173 
1174 	err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0,
1175 				      &hba->desc_size.pwr_desc);
1176 	if (err)
1177 		hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
1178 
1179 	err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0,
1180 				      &hba->desc_size.interc_desc);
1181 	if (err)
1182 		hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
1183 
1184 	err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0,
1185 				      &hba->desc_size.conf_desc);
1186 	if (err)
1187 		hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
1188 
1189 	err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0,
1190 				      &hba->desc_size.unit_desc);
1191 	if (err)
1192 		hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
1193 
1194 	err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0,
1195 				      &hba->desc_size.geom_desc);
1196 	if (err)
1197 		hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
1198 
1199 	err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_HEALTH, 0,
1200 				      &hba->desc_size.hlth_desc);
1201 	if (err)
1202 		hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
1203 }
1204 
1205 /**
1206  * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
1207  *
1208  */
ufshcd_map_desc_id_to_length(struct ufs_hba * hba,enum desc_idn desc_id,int * desc_len)1209 int ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
1210 				 int *desc_len)
1211 {
1212 	switch (desc_id) {
1213 	case QUERY_DESC_IDN_DEVICE:
1214 		*desc_len = hba->desc_size.dev_desc;
1215 		break;
1216 	case QUERY_DESC_IDN_POWER:
1217 		*desc_len = hba->desc_size.pwr_desc;
1218 		break;
1219 	case QUERY_DESC_IDN_GEOMETRY:
1220 		*desc_len = hba->desc_size.geom_desc;
1221 		break;
1222 	case QUERY_DESC_IDN_CONFIGURATION:
1223 		*desc_len = hba->desc_size.conf_desc;
1224 		break;
1225 	case QUERY_DESC_IDN_UNIT:
1226 		*desc_len = hba->desc_size.unit_desc;
1227 		break;
1228 	case QUERY_DESC_IDN_INTERCONNECT:
1229 		*desc_len = hba->desc_size.interc_desc;
1230 		break;
1231 	case QUERY_DESC_IDN_STRING:
1232 		*desc_len = QUERY_DESC_MAX_SIZE;
1233 		break;
1234 	case QUERY_DESC_IDN_HEALTH:
1235 		*desc_len = hba->desc_size.hlth_desc;
1236 		break;
1237 	case QUERY_DESC_IDN_RFU_0:
1238 	case QUERY_DESC_IDN_RFU_1:
1239 		*desc_len = 0;
1240 		break;
1241 	default:
1242 		*desc_len = 0;
1243 		return -EINVAL;
1244 	}
1245 	return 0;
1246 }
1247 EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
1248 
1249 /**
1250  * ufshcd_read_desc_param - read the specified descriptor parameter
1251  *
1252  */
ufshcd_read_desc_param(struct ufs_hba * hba,enum desc_idn desc_id,int desc_index,u8 param_offset,u8 * param_read_buf,u8 param_size)1253 int ufshcd_read_desc_param(struct ufs_hba *hba, enum desc_idn desc_id,
1254 			   int desc_index, u8 param_offset, u8 *param_read_buf,
1255 			   u8 param_size)
1256 {
1257 	int ret;
1258 	u8 *desc_buf;
1259 	int buff_len;
1260 	bool is_kmalloc = true;
1261 
1262 	/* Safety check */
1263 	if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
1264 		return -EINVAL;
1265 
1266 	/* Get the max length of descriptor from structure filled up at probe
1267 	 * time.
1268 	 */
1269 	ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
1270 
1271 	/* Sanity checks */
1272 	if (ret || !buff_len) {
1273 		dev_err(hba->dev, "%s: Failed to get full descriptor length",
1274 			__func__);
1275 		return ret;
1276 	}
1277 
1278 	/* Check whether we need temp memory */
1279 	if (param_offset != 0 || param_size < buff_len) {
1280 		desc_buf = kmalloc(buff_len, GFP_KERNEL);
1281 		if (!desc_buf)
1282 			return -ENOMEM;
1283 	} else {
1284 		desc_buf = param_read_buf;
1285 		is_kmalloc = false;
1286 	}
1287 
1288 	/* Request for full descriptor */
1289 	ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
1290 					    desc_id, desc_index, 0, desc_buf,
1291 					    &buff_len);
1292 
1293 	if (ret) {
1294 		dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
1295 			__func__, desc_id, desc_index, param_offset, ret);
1296 		goto out;
1297 	}
1298 
1299 	/* Sanity check */
1300 	if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
1301 		dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
1302 			__func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
1303 		ret = -EINVAL;
1304 		goto out;
1305 	}
1306 
1307 	/* Check wherher we will not copy more data, than available */
1308 	if (is_kmalloc && param_size > buff_len)
1309 		param_size = buff_len;
1310 
1311 	if (is_kmalloc)
1312 		memcpy(param_read_buf, &desc_buf[param_offset], param_size);
1313 out:
1314 	if (is_kmalloc)
1315 		kfree(desc_buf);
1316 	return ret;
1317 }
1318 
1319 /* replace non-printable or non-ASCII characters with spaces */
ufshcd_remove_non_printable(uint8_t * val)1320 static inline void ufshcd_remove_non_printable(uint8_t *val)
1321 {
1322 	if (!val)
1323 		return;
1324 
1325 	if (*val < 0x20 || *val > 0x7e)
1326 		*val = ' ';
1327 }
1328 
1329 /**
1330  * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
1331  * state) and waits for it to take effect.
1332  *
1333  */
ufshcd_uic_pwr_ctrl(struct ufs_hba * hba,struct uic_command * cmd)1334 static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
1335 {
1336 	unsigned long start = 0;
1337 	u8 status;
1338 	int ret;
1339 
1340 	ret = ufshcd_send_uic_cmd(hba, cmd);
1341 	if (ret) {
1342 		dev_err(hba->dev,
1343 			"pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
1344 			cmd->command, cmd->argument3, ret);
1345 
1346 		return ret;
1347 	}
1348 
1349 	start = get_timer(0);
1350 	do {
1351 		status = ufshcd_get_upmcrs(hba);
1352 		if (get_timer(start) > UFS_UIC_CMD_TIMEOUT) {
1353 			dev_err(hba->dev,
1354 				"pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
1355 				cmd->command, status);
1356 			ret = (status != PWR_OK) ? status : -1;
1357 			break;
1358 		}
1359 	} while (status != PWR_LOCAL);
1360 
1361 	return ret;
1362 }
1363 
1364 /**
1365  * ufshcd_uic_change_pwr_mode - Perform the UIC power mode change
1366  *				using DME_SET primitives.
1367  */
ufshcd_uic_change_pwr_mode(struct ufs_hba * hba,u8 mode)1368 static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
1369 {
1370 	struct uic_command uic_cmd = {0};
1371 	int ret;
1372 
1373 	uic_cmd.command = UIC_CMD_DME_SET;
1374 	uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
1375 	uic_cmd.argument3 = mode;
1376 	ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
1377 
1378 	return ret;
1379 }
1380 
1381 static
ufshcd_prepare_utp_scsi_cmd_upiu(struct ufs_hba * hba,struct scsi_cmd * pccb,u32 upiu_flags)1382 void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufs_hba *hba,
1383 				      struct scsi_cmd *pccb, u32 upiu_flags)
1384 {
1385 	struct utp_upiu_req *ucd_req_ptr = hba->ucd_req_ptr;
1386 	unsigned int cdb_len;
1387 
1388 	/* command descriptor fields */
1389 	ucd_req_ptr->header.dword_0 =
1390 			UPIU_HEADER_DWORD(UPIU_TRANSACTION_COMMAND, upiu_flags,
1391 					  pccb->lun, TASK_TAG);
1392 	ucd_req_ptr->header.dword_1 =
1393 			UPIU_HEADER_DWORD(UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
1394 
1395 	/* Total EHS length and Data segment length will be zero */
1396 	ucd_req_ptr->header.dword_2 = 0;
1397 
1398 	ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(pccb->datalen);
1399 
1400 	cdb_len = min_t(unsigned short, pccb->cmdlen, UFS_CDB_SIZE);
1401 	memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE);
1402 	memcpy(ucd_req_ptr->sc.cdb, pccb->cmd, cdb_len);
1403 
1404 	memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
1405 }
1406 
prepare_prdt_desc(struct ufshcd_sg_entry * entry,unsigned char * buf,ulong len)1407 static inline void prepare_prdt_desc(struct ufshcd_sg_entry *entry,
1408 				     unsigned char *buf, ulong len)
1409 {
1410 	entry->size = cpu_to_le32(len) | GENMASK(1, 0);
1411 	entry->base_addr = cpu_to_le32(lower_32_bits((unsigned long)buf));
1412 	entry->upper_addr = cpu_to_le32(upper_32_bits((unsigned long)buf));
1413 }
1414 
prepare_prdt_table(struct ufs_hba * hba,struct scsi_cmd * pccb)1415 static void prepare_prdt_table(struct ufs_hba *hba, struct scsi_cmd *pccb)
1416 {
1417 	struct utp_transfer_req_desc *req_desc = hba->utrdl;
1418 	struct ufshcd_sg_entry *prd_table = hba->ucd_prdt_ptr;
1419 	ulong datalen = pccb->datalen;
1420 	int table_length;
1421 	u8 *buf;
1422 	int i;
1423 
1424 	if (!datalen) {
1425 		req_desc->prd_table_length = 0;
1426 		return;
1427 	}
1428 
1429 	table_length = DIV_ROUND_UP(pccb->datalen, MAX_PRDT_ENTRY);
1430 	buf = pccb->pdata;
1431 	i = table_length;
1432 	while (--i) {
1433 		prepare_prdt_desc(&prd_table[table_length - i - 1], buf,
1434 				  MAX_PRDT_ENTRY - 1);
1435 		buf += MAX_PRDT_ENTRY;
1436 		datalen -= MAX_PRDT_ENTRY;
1437 	}
1438 
1439 	prepare_prdt_desc(&prd_table[table_length - i - 1], buf, datalen - 1);
1440 
1441 	req_desc->prd_table_length = table_length;
1442 }
1443 
ufs_scsi_exec(struct udevice * scsi_dev,struct scsi_cmd * pccb)1444 static int ufs_scsi_exec(struct udevice *scsi_dev, struct scsi_cmd *pccb)
1445 {
1446 	struct ufs_hba *hba = dev_get_uclass_priv(scsi_dev->parent);
1447 	struct utp_transfer_req_desc *req_desc = hba->utrdl;
1448 	u32 upiu_flags;
1449 	int ocs, result = 0;
1450 	u8 scsi_status;
1451 
1452 	ufshcd_prepare_req_desc_hdr(req_desc, &upiu_flags, pccb->dma_dir);
1453 	ufshcd_prepare_utp_scsi_cmd_upiu(hba, pccb, upiu_flags);
1454 	prepare_prdt_table(hba, pccb);
1455 
1456 	ufshcd_send_command(hba, TASK_TAG);
1457 
1458 	ocs = ufshcd_get_tr_ocs(hba);
1459 	switch (ocs) {
1460 	case OCS_SUCCESS:
1461 		result = ufshcd_get_req_rsp(hba->ucd_rsp_ptr);
1462 		switch (result) {
1463 		case UPIU_TRANSACTION_RESPONSE:
1464 			result = ufshcd_get_rsp_upiu_result(hba->ucd_rsp_ptr);
1465 
1466 			scsi_status = result & MASK_SCSI_STATUS;
1467 			if (scsi_status)
1468 				return -EINVAL;
1469 
1470 			break;
1471 		case UPIU_TRANSACTION_REJECT_UPIU:
1472 			/* TODO: handle Reject UPIU Response */
1473 			dev_err(hba->dev,
1474 				"Reject UPIU not fully implemented\n");
1475 			return -EINVAL;
1476 		default:
1477 			dev_err(hba->dev,
1478 				"Unexpected request response code = %x\n",
1479 				result);
1480 			return -EINVAL;
1481 		}
1482 		break;
1483 	default:
1484 		dev_err(hba->dev, "OCS error from controller = %x\n", ocs);
1485 		return -EINVAL;
1486 	}
1487 
1488 	return 0;
1489 }
1490 
ufshcd_read_desc(struct ufs_hba * hba,enum desc_idn desc_id,int desc_index,u8 * buf,u32 size)1491 static inline int ufshcd_read_desc(struct ufs_hba *hba, enum desc_idn desc_id,
1492 				   int desc_index, u8 *buf, u32 size)
1493 {
1494 	return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
1495 }
1496 
ufshcd_read_device_desc(struct ufs_hba * hba,u8 * buf,u32 size)1497 static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
1498 {
1499 	return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
1500 }
1501 
1502 /**
1503  * ufshcd_read_string_desc - read string descriptor
1504  *
1505  */
ufshcd_read_string_desc(struct ufs_hba * hba,int desc_index,u8 * buf,u32 size,bool ascii)1506 int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index,
1507 			    u8 *buf, u32 size, bool ascii)
1508 {
1509 	int err = 0;
1510 
1511 	err = ufshcd_read_desc(hba, QUERY_DESC_IDN_STRING, desc_index, buf,
1512 			       size);
1513 
1514 	if (err) {
1515 		dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n",
1516 			__func__, QUERY_REQ_RETRIES, err);
1517 		goto out;
1518 	}
1519 
1520 	if (ascii) {
1521 		int desc_len;
1522 		int ascii_len;
1523 		int i;
1524 		u8 *buff_ascii;
1525 
1526 		desc_len = buf[0];
1527 		/* remove header and divide by 2 to move from UTF16 to UTF8 */
1528 		ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1;
1529 		if (size < ascii_len + QUERY_DESC_HDR_SIZE) {
1530 			dev_err(hba->dev, "%s: buffer allocated size is too small\n",
1531 				__func__);
1532 			err = -ENOMEM;
1533 			goto out;
1534 		}
1535 
1536 		buff_ascii = kmalloc(ascii_len, GFP_KERNEL);
1537 		if (!buff_ascii) {
1538 			err = -ENOMEM;
1539 			goto out;
1540 		}
1541 
1542 		/*
1543 		 * the descriptor contains string in UTF16 format
1544 		 * we need to convert to utf-8 so it can be displayed
1545 		 */
1546 		utf16_to_utf8(buff_ascii,
1547 			      (uint16_t *)&buf[QUERY_DESC_HDR_SIZE], ascii_len);
1548 
1549 		/* replace non-printable or non-ASCII characters with spaces */
1550 		for (i = 0; i < ascii_len; i++)
1551 			ufshcd_remove_non_printable(&buff_ascii[i]);
1552 
1553 		memset(buf + QUERY_DESC_HDR_SIZE, 0,
1554 		       size - QUERY_DESC_HDR_SIZE);
1555 		memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len);
1556 		buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE;
1557 		kfree(buff_ascii);
1558 	}
1559 out:
1560 	return err;
1561 }
1562 
ufs_get_device_desc(struct ufs_hba * hba,struct ufs_dev_desc * dev_desc)1563 static int ufs_get_device_desc(struct ufs_hba *hba,
1564 			       struct ufs_dev_desc *dev_desc)
1565 {
1566 	int err;
1567 	size_t buff_len;
1568 	u8 model_index;
1569 	u8 *desc_buf;
1570 
1571 	buff_len = max_t(size_t, hba->desc_size.dev_desc,
1572 			 QUERY_DESC_MAX_SIZE + 1);
1573 	desc_buf = kmalloc(buff_len, GFP_KERNEL);
1574 	if (!desc_buf) {
1575 		err = -ENOMEM;
1576 		goto out;
1577 	}
1578 
1579 	err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc);
1580 	if (err) {
1581 		dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
1582 			__func__, err);
1583 		goto out;
1584 	}
1585 
1586 	/*
1587 	 * getting vendor (manufacturerID) and Bank Index in big endian
1588 	 * format
1589 	 */
1590 	dev_desc->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
1591 				     desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
1592 
1593 	model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
1594 
1595 	/* Zero-pad entire buffer for string termination. */
1596 	memset(desc_buf, 0, buff_len);
1597 
1598 	err = ufshcd_read_string_desc(hba, model_index, desc_buf,
1599 				      QUERY_DESC_MAX_SIZE, true/*ASCII*/);
1600 	if (err) {
1601 		dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
1602 			__func__, err);
1603 		goto out;
1604 	}
1605 
1606 	desc_buf[QUERY_DESC_MAX_SIZE] = '\0';
1607 	strlcpy(dev_desc->model, (char *)(desc_buf + QUERY_DESC_HDR_SIZE),
1608 		min_t(u8, desc_buf[QUERY_DESC_LENGTH_OFFSET],
1609 		      MAX_MODEL_LEN));
1610 
1611 	/* Null terminate the model string */
1612 	dev_desc->model[MAX_MODEL_LEN] = '\0';
1613 
1614 out:
1615 	kfree(desc_buf);
1616 	return err;
1617 }
1618 
1619 /**
1620  * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
1621  */
ufshcd_get_max_pwr_mode(struct ufs_hba * hba)1622 static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
1623 {
1624 	struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
1625 
1626 	if (hba->max_pwr_info.is_valid)
1627 		return 0;
1628 
1629 	pwr_info->pwr_tx = FAST_MODE;
1630 	pwr_info->pwr_rx = FAST_MODE;
1631 	pwr_info->hs_rate = PA_HS_MODE_B;
1632 
1633 	/* Get the connected lane count */
1634 	ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
1635 		       &pwr_info->lane_rx);
1636 	ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
1637 		       &pwr_info->lane_tx);
1638 
1639 	if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
1640 		dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
1641 			__func__, pwr_info->lane_rx, pwr_info->lane_tx);
1642 		return -EINVAL;
1643 	}
1644 
1645 	/*
1646 	 * First, get the maximum gears of HS speed.
1647 	 * If a zero value, it means there is no HSGEAR capability.
1648 	 * Then, get the maximum gears of PWM speed.
1649 	 */
1650 	ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
1651 	if (!pwr_info->gear_rx) {
1652 		ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
1653 			       &pwr_info->gear_rx);
1654 		if (!pwr_info->gear_rx) {
1655 			dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
1656 				__func__, pwr_info->gear_rx);
1657 			return -EINVAL;
1658 		}
1659 		pwr_info->pwr_rx = SLOW_MODE;
1660 	}
1661 
1662 	ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
1663 			    &pwr_info->gear_tx);
1664 	if (!pwr_info->gear_tx) {
1665 		ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
1666 				    &pwr_info->gear_tx);
1667 		if (!pwr_info->gear_tx) {
1668 			dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
1669 				__func__, pwr_info->gear_tx);
1670 			return -EINVAL;
1671 		}
1672 		pwr_info->pwr_tx = SLOW_MODE;
1673 	}
1674 
1675 	hba->max_pwr_info.is_valid = true;
1676 	return 0;
1677 }
1678 
ufshcd_change_power_mode(struct ufs_hba * hba,struct ufs_pa_layer_attr * pwr_mode)1679 static int ufshcd_change_power_mode(struct ufs_hba *hba,
1680 				    struct ufs_pa_layer_attr *pwr_mode)
1681 {
1682 	int ret;
1683 
1684 	/* if already configured to the requested pwr_mode */
1685 	if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
1686 	    pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
1687 	    pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
1688 	    pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
1689 	    pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
1690 	    pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
1691 	    pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
1692 		dev_dbg(hba->dev, "%s: power already configured\n", __func__);
1693 		return 0;
1694 	}
1695 
1696 	/*
1697 	 * Configure attributes for power mode change with below.
1698 	 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
1699 	 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
1700 	 * - PA_HSSERIES
1701 	 */
1702 	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
1703 	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
1704 		       pwr_mode->lane_rx);
1705 	if (pwr_mode->pwr_rx == FASTAUTO_MODE || pwr_mode->pwr_rx == FAST_MODE)
1706 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
1707 	else
1708 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
1709 
1710 	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
1711 	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
1712 		       pwr_mode->lane_tx);
1713 	if (pwr_mode->pwr_tx == FASTAUTO_MODE || pwr_mode->pwr_tx == FAST_MODE)
1714 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
1715 	else
1716 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
1717 
1718 	if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
1719 	    pwr_mode->pwr_tx == FASTAUTO_MODE ||
1720 	    pwr_mode->pwr_rx == FAST_MODE ||
1721 	    pwr_mode->pwr_tx == FAST_MODE)
1722 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
1723 			       pwr_mode->hs_rate);
1724 
1725 	ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4 |
1726 					 pwr_mode->pwr_tx);
1727 
1728 	if (ret) {
1729 		dev_err(hba->dev,
1730 			"%s: power mode change failed %d\n", __func__, ret);
1731 
1732 		return ret;
1733 	}
1734 
1735 	/* Copy new Power Mode to power info */
1736 	memcpy(&hba->pwr_info, pwr_mode, sizeof(struct ufs_pa_layer_attr));
1737 
1738 	return ret;
1739 }
1740 
1741 /**
1742  * ufshcd_verify_dev_init() - Verify device initialization
1743  *
1744  */
ufshcd_verify_dev_init(struct ufs_hba * hba)1745 static int ufshcd_verify_dev_init(struct ufs_hba *hba)
1746 {
1747 	int retries;
1748 	int err;
1749 
1750 	for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
1751 		err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
1752 					  NOP_OUT_TIMEOUT);
1753 		if (!err || err == -ETIMEDOUT)
1754 			break;
1755 
1756 		dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
1757 	}
1758 
1759 	if (err)
1760 		dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
1761 
1762 	return err;
1763 }
1764 
1765 /**
1766  * ufshcd_complete_dev_init() - checks device readiness
1767  */
ufshcd_complete_dev_init(struct ufs_hba * hba)1768 static int ufshcd_complete_dev_init(struct ufs_hba *hba)
1769 {
1770 	int i;
1771 	int err;
1772 	bool flag_res = 1;
1773 
1774 	err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
1775 				      QUERY_FLAG_IDN_FDEVICEINIT, NULL);
1776 	if (err) {
1777 		dev_err(hba->dev,
1778 			"%s setting fDeviceInit flag failed with error %d\n",
1779 			__func__, err);
1780 		goto out;
1781 	}
1782 
1783 	/* poll for max. 1000 iterations for fDeviceInit flag to clear */
1784 	for (i = 0; i < 1000 && !err && flag_res; i++)
1785 		err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
1786 					      QUERY_FLAG_IDN_FDEVICEINIT,
1787 					      &flag_res);
1788 
1789 	if (err)
1790 		dev_err(hba->dev,
1791 			"%s reading fDeviceInit flag failed with error %d\n",
1792 			__func__, err);
1793 	else if (flag_res)
1794 		dev_err(hba->dev,
1795 			"%s fDeviceInit was not cleared by the device\n",
1796 			__func__);
1797 
1798 out:
1799 	return err;
1800 }
1801 
ufshcd_def_desc_sizes(struct ufs_hba * hba)1802 static void ufshcd_def_desc_sizes(struct ufs_hba *hba)
1803 {
1804 	hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
1805 	hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
1806 	hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
1807 	hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
1808 	hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
1809 	hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
1810 	hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
1811 }
1812 
ufs_start(struct ufs_hba * hba)1813 int ufs_start(struct ufs_hba *hba)
1814 {
1815 	struct ufs_dev_desc card = {0};
1816 	int ret;
1817 
1818 	ret = ufshcd_link_startup(hba);
1819 	if (ret)
1820 		return ret;
1821 
1822 	ret = ufshcd_verify_dev_init(hba);
1823 	if (ret)
1824 		return ret;
1825 
1826 	ret = ufshcd_complete_dev_init(hba);
1827 	if (ret)
1828 		return ret;
1829 
1830 	/* Init check for device descriptor sizes */
1831 	ufshcd_init_desc_sizes(hba);
1832 
1833 	ret = ufs_get_device_desc(hba, &card);
1834 	if (ret) {
1835 		dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
1836 			__func__, ret);
1837 
1838 		return ret;
1839 	}
1840 
1841 	if (ufshcd_get_max_pwr_mode(hba)) {
1842 		dev_err(hba->dev,
1843 			"%s: Failed getting max supported power mode\n",
1844 			__func__);
1845 	} else {
1846 		ret = ufshcd_change_power_mode(hba, &hba->max_pwr_info.info);
1847 		if (ret) {
1848 			dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
1849 				__func__, ret);
1850 
1851 			return ret;
1852 		}
1853 
1854 		printf("Device at %s up at:", hba->dev->name);
1855 		ufshcd_print_pwr_info(hba);
1856 	}
1857 
1858 	return 0;
1859 }
1860 
ufshcd_probe(struct udevice * ufs_dev,struct ufs_hba_ops * hba_ops)1861 int ufshcd_probe(struct udevice *ufs_dev, struct ufs_hba_ops *hba_ops)
1862 {
1863 	struct ufs_hba *hba = dev_get_uclass_priv(ufs_dev);
1864 	struct scsi_platdata *scsi_plat;
1865 	struct udevice *scsi_dev;
1866 	int err;
1867 
1868 	device_find_first_child(ufs_dev, &scsi_dev);
1869 	if (!scsi_dev)
1870 		return -ENODEV;
1871 
1872 	scsi_plat = dev_get_uclass_platdata(scsi_dev);
1873 	scsi_plat->max_id = UFSHCD_MAX_ID;
1874 	scsi_plat->max_lun = UFS_MAX_LUNS;
1875 	//scsi_plat->max_bytes_per_req = UFS_MAX_BYTES;
1876 
1877 	hba->dev = ufs_dev;
1878 	hba->ops = hba_ops;
1879 	hba->mmio_base = (void *)dev_read_addr(ufs_dev);
1880 
1881 	/* Set descriptor lengths to specification defaults */
1882 	ufshcd_def_desc_sizes(hba);
1883 
1884 	ufshcd_ops_init(hba);
1885 
1886 	/* Read capabilties registers */
1887 	hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
1888 
1889 	/* Get UFS version supported by the controller */
1890 	hba->version = ufshcd_get_ufs_version(hba);
1891 	if (hba->version != UFSHCI_VERSION_10 &&
1892 	    hba->version != UFSHCI_VERSION_11 &&
1893 	    hba->version != UFSHCI_VERSION_20 &&
1894 	    hba->version != UFSHCI_VERSION_21)
1895 		dev_err(hba->dev, "invalid UFS version 0x%x\n",
1896 			hba->version);
1897 
1898 	/* Get Interrupt bit mask per version */
1899 	hba->intr_mask = ufshcd_get_intr_mask(hba);
1900 
1901 	/* Allocate memory for host memory space */
1902 	err = ufshcd_memory_alloc(hba);
1903 	if (err) {
1904 		dev_err(hba->dev, "Memory allocation failed\n");
1905 		return err;
1906 	}
1907 
1908 	/* Configure Local data structures */
1909 	ufshcd_host_memory_configure(hba);
1910 
1911 	/*
1912 	 * In order to avoid any spurious interrupt immediately after
1913 	 * registering UFS controller interrupt handler, clear any pending UFS
1914 	 * interrupt status and disable all the UFS interrupts.
1915 	 */
1916 	ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
1917 		      REG_INTERRUPT_STATUS);
1918 	ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
1919 
1920 	err = ufshcd_hba_enable(hba);
1921 	if (err) {
1922 		dev_err(hba->dev, "Host controller enable failed\n");
1923 		return err;
1924 	}
1925 
1926 	err = ufs_start(hba);
1927 	if (err)
1928 		return err;
1929 
1930 	return 0;
1931 }
1932 
ufs_scsi_bind(struct udevice * ufs_dev,struct udevice ** scsi_devp)1933 int ufs_scsi_bind(struct udevice *ufs_dev, struct udevice **scsi_devp)
1934 {
1935 	int ret = device_bind_driver(ufs_dev, "ufs_scsi", "ufs_scsi",
1936 				     scsi_devp);
1937 
1938 	return ret;
1939 }
1940 
1941 static struct scsi_ops ufs_ops = {
1942 	.exec		= ufs_scsi_exec,
1943 };
1944 
ufs_probe_dev(int index)1945 int ufs_probe_dev(int index)
1946 {
1947 	struct udevice *dev;
1948 
1949 	return uclass_get_device(UCLASS_UFS, index, &dev);
1950 }
1951 
ufs_probe(void)1952 int ufs_probe(void)
1953 {
1954 	struct udevice *dev;
1955 	int ret, i;
1956 
1957 	for (i = 0;; i++) {
1958 		ret = uclass_get_device(UCLASS_UFS, i, &dev);
1959 		if (ret == -ENODEV)
1960 			break;
1961 	}
1962 
1963 	return 0;
1964 }
1965 
1966 U_BOOT_DRIVER(ufs_scsi) = {
1967 	.id = UCLASS_SCSI,
1968 	.name = "ufs_scsi",
1969 	.ops = &ufs_ops,
1970 };
1971