xref: /rk3399_rockchip-uboot/drivers/nvme/nvme.c (revision a5f116a39542f8f1209e73d1ccd30f8a710f98d8)
1 /*
2  * Copyright (C) 2017 NXP Semiconductors
3  * Copyright (C) 2017 Bin Meng <bmeng.cn@gmail.com>
4  *
5  * SPDX-License-Identifier:	GPL-2.0+
6  */
7 
8 #include <common.h>
9 #include <dm.h>
10 #include <errno.h>
11 #include <memalign.h>
12 #include <pci.h>
13 #include <dm/device-internal.h>
14 #include "nvme.h"
15 
16 #define NVME_Q_DEPTH		2
17 #define NVME_AQ_DEPTH		2
18 #define NVME_SQ_SIZE(depth)	(depth * sizeof(struct nvme_command))
19 #define NVME_CQ_SIZE(depth)	(depth * sizeof(struct nvme_completion))
20 #define NVME_CQ_ALLOCATION	ALIGN(NVME_CQ_SIZE(NVME_Q_DEPTH), \
21 				      ARCH_DMA_MINALIGN)
22 #define ADMIN_TIMEOUT		60
23 #define IO_TIMEOUT		30
24 #define MAX_PRP_POOL		512
25 
26 enum nvme_queue_id {
27 	NVME_ADMIN_Q,
28 	NVME_IO_Q,
29 	NVME_Q_NUM,
30 };
31 
32 /*
33  * An NVM Express queue. Each device has at least two (one for admin
34  * commands and one for I/O commands).
35  */
36 struct nvme_queue {
37 	struct nvme_dev *dev;
38 	struct nvme_command *sq_cmds;
39 	struct nvme_completion *cqes;
40 	wait_queue_head_t sq_full;
41 	u32 __iomem *q_db;
42 	u16 q_depth;
43 	s16 cq_vector;
44 	u16 sq_head;
45 	u16 sq_tail;
46 	u16 cq_head;
47 	u16 qid;
48 	u8 cq_phase;
49 	u8 cqe_seen;
50 	unsigned long cmdid_data[];
51 };
52 
53 static int nvme_wait_ready(struct nvme_dev *dev, bool enabled)
54 {
55 	u32 bit = enabled ? NVME_CSTS_RDY : 0;
56 	int timeout;
57 	ulong start;
58 
59 	/* Timeout field in the CAP register is in 500 millisecond units */
60 	timeout = NVME_CAP_TIMEOUT(dev->cap) * 500;
61 
62 	start = get_timer(0);
63 	while (get_timer(start) < timeout) {
64 		if ((readl(&dev->bar->csts) & NVME_CSTS_RDY) == bit)
65 			return 0;
66 	}
67 
68 	return -ETIME;
69 }
70 
71 static int nvme_setup_prps(struct nvme_dev *dev, u64 *prp2,
72 			   int total_len, u64 dma_addr)
73 {
74 	u32 page_size = dev->page_size;
75 	int offset = dma_addr & (page_size - 1);
76 	u64 *prp_pool;
77 	int length = total_len;
78 	int i, nprps;
79 	u32 prps_per_page = page_size >> 3;
80 	u32 num_pages;
81 
82 	length -= (page_size - offset);
83 
84 	if (length <= 0) {
85 		*prp2 = 0;
86 		return 0;
87 	}
88 
89 	if (length)
90 		dma_addr += (page_size - offset);
91 
92 	if (length <= page_size) {
93 		*prp2 = dma_addr;
94 		return 0;
95 	}
96 
97 	nprps = DIV_ROUND_UP(length, page_size);
98 	num_pages = DIV_ROUND_UP(nprps, prps_per_page);
99 
100 	if (nprps > dev->prp_entry_num) {
101 		free(dev->prp_pool);
102 		/*
103 		 * Always increase in increments of pages.  It doesn't waste
104 		 * much memory and reduces the number of allocations.
105 		 */
106 		dev->prp_pool = memalign(page_size, num_pages * page_size);
107 		if (!dev->prp_pool) {
108 			printf("Error: malloc prp_pool fail\n");
109 			return -ENOMEM;
110 		}
111 		dev->prp_entry_num = prps_per_page * num_pages;
112 	}
113 
114 	prp_pool = dev->prp_pool;
115 	i = 0;
116 	while (nprps) {
117 		if (i == ((page_size >> 3) - 1)) {
118 			*(prp_pool + i) = cpu_to_le64((ulong)prp_pool +
119 					page_size);
120 			i = 0;
121 			prp_pool += page_size;
122 		}
123 		*(prp_pool + i++) = cpu_to_le64(dma_addr);
124 		dma_addr += page_size;
125 		nprps--;
126 	}
127 	*prp2 = (ulong)dev->prp_pool;
128 
129 	flush_dcache_range((ulong)dev->prp_pool, (ulong)dev->prp_pool +
130 			   dev->prp_entry_num * sizeof(u64));
131 
132 	return 0;
133 }
134 
135 static __le16 nvme_get_cmd_id(void)
136 {
137 	static unsigned short cmdid;
138 
139 	return cpu_to_le16((cmdid < USHRT_MAX) ? cmdid++ : 0);
140 }
141 
142 static u16 nvme_read_completion_status(struct nvme_queue *nvmeq, u16 index)
143 {
144 	/*
145 	 * Single CQ entries are always smaller than a cache line, so we
146 	 * can't invalidate them individually. However CQ entries are
147 	 * read only by the CPU, so it's safe to always invalidate all of them,
148 	 * as the cache line should never become dirty.
149 	 */
150 	ulong start = (ulong)&nvmeq->cqes[0];
151 	ulong stop = start + NVME_CQ_ALLOCATION;
152 
153 	invalidate_dcache_range(start, stop);
154 
155 	return readw(&(nvmeq->cqes[index].status));
156 }
157 
158 /**
159  * nvme_submit_cmd() - copy a command into a queue and ring the doorbell
160  *
161  * @nvmeq:	The queue to use
162  * @cmd:	The command to send
163  */
164 static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
165 {
166 	u16 tail = nvmeq->sq_tail;
167 
168 	memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
169 	flush_dcache_range((ulong)&nvmeq->sq_cmds[tail],
170 			   (ulong)&nvmeq->sq_cmds[tail] + sizeof(*cmd));
171 
172 	if (++tail == nvmeq->q_depth)
173 		tail = 0;
174 	writel(tail, nvmeq->q_db);
175 	nvmeq->sq_tail = tail;
176 }
177 
178 static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq,
179 				struct nvme_command *cmd,
180 				u32 *result, unsigned timeout)
181 {
182 	u16 head = nvmeq->cq_head;
183 	u16 phase = nvmeq->cq_phase;
184 	u16 status;
185 	ulong start_time;
186 	ulong timeout_us = timeout * 100000;
187 
188 	cmd->common.command_id = nvme_get_cmd_id();
189 	nvme_submit_cmd(nvmeq, cmd);
190 
191 	start_time = timer_get_us();
192 
193 	for (;;) {
194 		status = nvme_read_completion_status(nvmeq, head);
195 		if ((status & 0x01) == phase)
196 			break;
197 		if (timeout_us > 0 && (timer_get_us() - start_time)
198 		    >= timeout_us)
199 			return -ETIMEDOUT;
200 	}
201 
202 	status >>= 1;
203 	if (status) {
204 		printf("ERROR: status = %x, phase = %d, head = %d\n",
205 		       status, phase, head);
206 		status = 0;
207 		if (++head == nvmeq->q_depth) {
208 			head = 0;
209 			phase = !phase;
210 		}
211 		writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
212 		nvmeq->cq_head = head;
213 		nvmeq->cq_phase = phase;
214 
215 		return -EIO;
216 	}
217 
218 	if (result)
219 		*result = readl(&(nvmeq->cqes[head].result));
220 
221 	if (++head == nvmeq->q_depth) {
222 		head = 0;
223 		phase = !phase;
224 	}
225 	writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
226 	nvmeq->cq_head = head;
227 	nvmeq->cq_phase = phase;
228 
229 	return status;
230 }
231 
232 static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
233 				 u32 *result)
234 {
235 	return nvme_submit_sync_cmd(dev->queues[NVME_ADMIN_Q], cmd,
236 				    result, ADMIN_TIMEOUT);
237 }
238 
239 static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev,
240 					   int qid, int depth)
241 {
242 	struct nvme_queue *nvmeq = malloc(sizeof(*nvmeq));
243 	if (!nvmeq)
244 		return NULL;
245 	memset(nvmeq, 0, sizeof(*nvmeq));
246 
247 	nvmeq->cqes = (void *)memalign(4096, NVME_CQ_ALLOCATION);
248 	if (!nvmeq->cqes)
249 		goto free_nvmeq;
250 	memset((void *)nvmeq->cqes, 0, NVME_CQ_SIZE(depth));
251 
252 	nvmeq->sq_cmds = (void *)memalign(4096, NVME_SQ_SIZE(depth));
253 	if (!nvmeq->sq_cmds)
254 		goto free_queue;
255 	memset((void *)nvmeq->sq_cmds, 0, NVME_SQ_SIZE(depth));
256 
257 	nvmeq->dev = dev;
258 
259 	nvmeq->cq_head = 0;
260 	nvmeq->cq_phase = 1;
261 	nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
262 	nvmeq->q_depth = depth;
263 	nvmeq->qid = qid;
264 	dev->queue_count++;
265 	dev->queues[qid] = nvmeq;
266 
267 	return nvmeq;
268 
269  free_queue:
270 	free((void *)nvmeq->cqes);
271  free_nvmeq:
272 	free(nvmeq);
273 
274 	return NULL;
275 }
276 
277 static int nvme_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
278 {
279 	struct nvme_command c;
280 
281 	memset(&c, 0, sizeof(c));
282 	c.delete_queue.opcode = opcode;
283 	c.delete_queue.qid = cpu_to_le16(id);
284 
285 	return nvme_submit_admin_cmd(dev, &c, NULL);
286 }
287 
288 static int nvme_delete_sq(struct nvme_dev *dev, u16 sqid)
289 {
290 	return nvme_delete_queue(dev, nvme_admin_delete_sq, sqid);
291 }
292 
293 static int nvme_delete_cq(struct nvme_dev *dev, u16 cqid)
294 {
295 	return nvme_delete_queue(dev, nvme_admin_delete_cq, cqid);
296 }
297 
298 static int nvme_enable_ctrl(struct nvme_dev *dev)
299 {
300 	dev->ctrl_config &= ~NVME_CC_SHN_MASK;
301 	dev->ctrl_config |= NVME_CC_ENABLE;
302 	writel(dev->ctrl_config, &dev->bar->cc);
303 
304 	return nvme_wait_ready(dev, true);
305 }
306 
307 static int nvme_disable_ctrl(struct nvme_dev *dev)
308 {
309 	dev->ctrl_config &= ~NVME_CC_SHN_MASK;
310 	dev->ctrl_config &= ~NVME_CC_ENABLE;
311 	writel(dev->ctrl_config, &dev->bar->cc);
312 
313 	return nvme_wait_ready(dev, false);
314 }
315 
316 static void nvme_free_queue(struct nvme_queue *nvmeq)
317 {
318 	free((void *)nvmeq->cqes);
319 	free(nvmeq->sq_cmds);
320 	free(nvmeq);
321 }
322 
323 static void nvme_free_queues(struct nvme_dev *dev, int lowest)
324 {
325 	int i;
326 
327 	for (i = dev->queue_count - 1; i >= lowest; i--) {
328 		struct nvme_queue *nvmeq = dev->queues[i];
329 		dev->queue_count--;
330 		dev->queues[i] = NULL;
331 		nvme_free_queue(nvmeq);
332 	}
333 }
334 
335 static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
336 {
337 	struct nvme_dev *dev = nvmeq->dev;
338 
339 	nvmeq->sq_tail = 0;
340 	nvmeq->cq_head = 0;
341 	nvmeq->cq_phase = 1;
342 	nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
343 	memset((void *)nvmeq->cqes, 0, NVME_CQ_SIZE(nvmeq->q_depth));
344 	flush_dcache_range((ulong)nvmeq->cqes,
345 			   (ulong)nvmeq->cqes + NVME_CQ_ALLOCATION);
346 	dev->online_queues++;
347 }
348 
349 static int nvme_configure_admin_queue(struct nvme_dev *dev)
350 {
351 	int result;
352 	u32 aqa;
353 	u64 cap = dev->cap;
354 	struct nvme_queue *nvmeq;
355 	/* most architectures use 4KB as the page size */
356 	unsigned page_shift = 12;
357 	unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12;
358 	unsigned dev_page_max = NVME_CAP_MPSMAX(cap) + 12;
359 
360 	if (page_shift < dev_page_min) {
361 		debug("Device minimum page size (%u) too large for host (%u)\n",
362 		      1 << dev_page_min, 1 << page_shift);
363 		return -ENODEV;
364 	}
365 
366 	if (page_shift > dev_page_max) {
367 		debug("Device maximum page size (%u) smaller than host (%u)\n",
368 		      1 << dev_page_max, 1 << page_shift);
369 		page_shift = dev_page_max;
370 	}
371 
372 	result = nvme_disable_ctrl(dev);
373 	if (result < 0)
374 		return result;
375 
376 	nvmeq = dev->queues[NVME_ADMIN_Q];
377 	if (!nvmeq) {
378 		nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH);
379 		if (!nvmeq)
380 			return -ENOMEM;
381 	}
382 
383 	aqa = nvmeq->q_depth - 1;
384 	aqa |= aqa << 16;
385 
386 	dev->page_size = 1 << page_shift;
387 
388 	dev->ctrl_config = NVME_CC_CSS_NVM;
389 	dev->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT;
390 	dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
391 	dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
392 
393 	writel(aqa, &dev->bar->aqa);
394 	nvme_writeq((ulong)nvmeq->sq_cmds, &dev->bar->asq);
395 	nvme_writeq((ulong)nvmeq->cqes, &dev->bar->acq);
396 
397 	result = nvme_enable_ctrl(dev);
398 	if (result)
399 		goto free_nvmeq;
400 
401 	nvmeq->cq_vector = 0;
402 
403 	nvme_init_queue(dev->queues[NVME_ADMIN_Q], 0);
404 
405 	return result;
406 
407  free_nvmeq:
408 	nvme_free_queues(dev, 0);
409 
410 	return result;
411 }
412 
413 static int nvme_alloc_cq(struct nvme_dev *dev, u16 qid,
414 			    struct nvme_queue *nvmeq)
415 {
416 	struct nvme_command c;
417 	int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
418 
419 	memset(&c, 0, sizeof(c));
420 	c.create_cq.opcode = nvme_admin_create_cq;
421 	c.create_cq.prp1 = cpu_to_le64((ulong)nvmeq->cqes);
422 	c.create_cq.cqid = cpu_to_le16(qid);
423 	c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
424 	c.create_cq.cq_flags = cpu_to_le16(flags);
425 	c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);
426 
427 	return nvme_submit_admin_cmd(dev, &c, NULL);
428 }
429 
430 static int nvme_alloc_sq(struct nvme_dev *dev, u16 qid,
431 			    struct nvme_queue *nvmeq)
432 {
433 	struct nvme_command c;
434 	int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM;
435 
436 	memset(&c, 0, sizeof(c));
437 	c.create_sq.opcode = nvme_admin_create_sq;
438 	c.create_sq.prp1 = cpu_to_le64((ulong)nvmeq->sq_cmds);
439 	c.create_sq.sqid = cpu_to_le16(qid);
440 	c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
441 	c.create_sq.sq_flags = cpu_to_le16(flags);
442 	c.create_sq.cqid = cpu_to_le16(qid);
443 
444 	return nvme_submit_admin_cmd(dev, &c, NULL);
445 }
446 
447 int nvme_identify(struct nvme_dev *dev, unsigned nsid,
448 		  unsigned cns, dma_addr_t dma_addr)
449 {
450 	struct nvme_command c;
451 	u32 page_size = dev->page_size;
452 	int offset = dma_addr & (page_size - 1);
453 	int length = sizeof(struct nvme_id_ctrl);
454 	int ret;
455 
456 	memset(&c, 0, sizeof(c));
457 	c.identify.opcode = nvme_admin_identify;
458 	c.identify.nsid = cpu_to_le32(nsid);
459 	c.identify.prp1 = cpu_to_le64(dma_addr);
460 
461 	length -= (page_size - offset);
462 	if (length <= 0) {
463 		c.identify.prp2 = 0;
464 	} else {
465 		dma_addr += (page_size - offset);
466 		c.identify.prp2 = cpu_to_le64(dma_addr);
467 	}
468 
469 	c.identify.cns = cpu_to_le32(cns);
470 
471 	invalidate_dcache_range(dma_addr,
472 				dma_addr + sizeof(struct nvme_id_ctrl));
473 
474 	ret = nvme_submit_admin_cmd(dev, &c, NULL);
475 	if (!ret)
476 		invalidate_dcache_range(dma_addr,
477 					dma_addr + sizeof(struct nvme_id_ctrl));
478 
479 	return ret;
480 }
481 
482 int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
483 		      dma_addr_t dma_addr, u32 *result)
484 {
485 	struct nvme_command c;
486 	int ret;
487 
488 	memset(&c, 0, sizeof(c));
489 	c.features.opcode = nvme_admin_get_features;
490 	c.features.nsid = cpu_to_le32(nsid);
491 	c.features.prp1 = cpu_to_le64(dma_addr);
492 	c.features.fid = cpu_to_le32(fid);
493 
494 	ret = nvme_submit_admin_cmd(dev, &c, result);
495 
496 	/*
497 	 * TODO: Add some cache invalidation when a DMA buffer is involved
498 	 * in the request, here and before the command gets submitted. The
499 	 * buffer size varies by feature, also some features use a different
500 	 * field in the command packet to hold the buffer address.
501 	 * Section 5.21.1 (Set Features command) in the NVMe specification
502 	 * details the buffer requirements for each feature.
503 	 *
504 	 * At the moment there is no user of this function.
505 	 */
506 
507 	return ret;
508 }
509 
510 int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
511 		      dma_addr_t dma_addr, u32 *result)
512 {
513 	struct nvme_command c;
514 
515 	memset(&c, 0, sizeof(c));
516 	c.features.opcode = nvme_admin_set_features;
517 	c.features.prp1 = cpu_to_le64(dma_addr);
518 	c.features.fid = cpu_to_le32(fid);
519 	c.features.dword11 = cpu_to_le32(dword11);
520 
521 	/*
522 	 * TODO: Add a cache clean (aka flush) operation when a DMA buffer is
523 	 * involved in the request. The buffer size varies by feature, also
524 	 * some features use a different field in the command packet to hold
525 	 * the buffer address. Section 5.21.1 (Set Features command) in the
526 	 * NVMe specification details the buffer requirements for each
527 	 * feature.
528 	 * At the moment the only user of this function is not using
529 	 * any DMA buffer at all.
530 	 */
531 
532 	return nvme_submit_admin_cmd(dev, &c, result);
533 }
534 
535 static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
536 {
537 	struct nvme_dev *dev = nvmeq->dev;
538 	int result;
539 
540 	nvmeq->cq_vector = qid - 1;
541 	result = nvme_alloc_cq(dev, qid, nvmeq);
542 	if (result < 0)
543 		goto release_cq;
544 
545 	result = nvme_alloc_sq(dev, qid, nvmeq);
546 	if (result < 0)
547 		goto release_sq;
548 
549 	nvme_init_queue(nvmeq, qid);
550 
551 	return result;
552 
553  release_sq:
554 	nvme_delete_sq(dev, qid);
555  release_cq:
556 	nvme_delete_cq(dev, qid);
557 
558 	return result;
559 }
560 
561 static int nvme_set_queue_count(struct nvme_dev *dev, int count)
562 {
563 	int status;
564 	u32 result;
565 	u32 q_count = (count - 1) | ((count - 1) << 16);
566 
567 	status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES,
568 			q_count, 0, &result);
569 
570 	if (status < 0)
571 		return status;
572 	if (status > 1)
573 		return 0;
574 
575 	return min(result & 0xffff, result >> 16) + 1;
576 }
577 
578 static void nvme_create_io_queues(struct nvme_dev *dev)
579 {
580 	unsigned int i;
581 
582 	for (i = dev->queue_count; i <= dev->max_qid; i++)
583 		if (!nvme_alloc_queue(dev, i, dev->q_depth))
584 			break;
585 
586 	for (i = dev->online_queues; i <= dev->queue_count - 1; i++)
587 		if (nvme_create_queue(dev->queues[i], i))
588 			break;
589 }
590 
591 static int nvme_setup_io_queues(struct nvme_dev *dev)
592 {
593 	int nr_io_queues;
594 	int result;
595 
596 	nr_io_queues = 1;
597 	result = nvme_set_queue_count(dev, nr_io_queues);
598 	if (result <= 0)
599 		return result;
600 
601 	dev->max_qid = nr_io_queues;
602 
603 	/* Free previously allocated queues */
604 	nvme_free_queues(dev, nr_io_queues + 1);
605 	nvme_create_io_queues(dev);
606 
607 	return 0;
608 }
609 
610 static int nvme_get_info_from_identify(struct nvme_dev *dev)
611 {
612 	struct nvme_id_ctrl *ctrl;
613 	int ret;
614 	int shift = NVME_CAP_MPSMIN(dev->cap) + 12;
615 
616 	ctrl = memalign(dev->page_size, sizeof(struct nvme_id_ctrl));
617 	if (!ctrl)
618 		return -ENOMEM;
619 
620 	ret = nvme_identify(dev, 0, 1, (dma_addr_t)(long)ctrl);
621 	if (ret) {
622 		free(ctrl);
623 		return -EIO;
624 	}
625 
626 	dev->nn = le32_to_cpu(ctrl->nn);
627 	dev->vwc = ctrl->vwc;
628 	memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
629 	memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
630 	memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
631 	if (ctrl->mdts)
632 		dev->max_transfer_shift = (ctrl->mdts + shift);
633 	else {
634 		/*
635 		 * Maximum Data Transfer Size (MDTS) field indicates the maximum
636 		 * data transfer size between the host and the controller. The
637 		 * host should not submit a command that exceeds this transfer
638 		 * size. The value is in units of the minimum memory page size
639 		 * and is reported as a power of two (2^n).
640 		 *
641 		 * The spec also says: a value of 0h indicates no restrictions
642 		 * on transfer size. But in nvme_blk_read/write() below we have
643 		 * the following algorithm for maximum number of logic blocks
644 		 * per transfer:
645 		 *
646 		 * u16 lbas = 1 << (dev->max_transfer_shift - ns->lba_shift);
647 		 *
648 		 * In order for lbas not to overflow, the maximum number is 15
649 		 * which means dev->max_transfer_shift = 15 + 9 (ns->lba_shift).
650 		 * Let's use 20 which provides 1MB size.
651 		 */
652 		dev->max_transfer_shift = 20;
653 	}
654 
655 	free(ctrl);
656 	return 0;
657 }
658 
659 int nvme_get_namespace_id(struct udevice *udev, u32 *ns_id, u8 *eui64)
660 {
661 	struct nvme_ns *ns = dev_get_priv(udev);
662 
663 	if (ns_id)
664 		*ns_id = ns->ns_id;
665 	if (eui64)
666 		memcpy(eui64, ns->eui64, sizeof(ns->eui64));
667 
668 	return 0;
669 }
670 
671 int nvme_scan_namespace(void)
672 {
673 	struct uclass *uc;
674 	struct udevice *dev;
675 	int ret;
676 
677 	ret = uclass_get(UCLASS_NVME, &uc);
678 	if (ret)
679 		return ret;
680 
681 	uclass_foreach_dev(dev, uc) {
682 		ret = device_probe(dev);
683 		if (ret)
684 			return ret;
685 	}
686 
687 	return 0;
688 }
689 
690 static int nvme_blk_probe(struct udevice *udev)
691 {
692 	struct nvme_dev *ndev = dev_get_priv(udev->parent);
693 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
694 	struct nvme_ns *ns = dev_get_priv(udev);
695 	u8 flbas;
696 	struct pci_child_platdata *pplat;
697 	struct nvme_id_ns *id;
698 
699 	id = memalign(ndev->page_size, sizeof(struct nvme_id_ns));
700 	if (!id)
701 		return -ENOMEM;
702 
703 	ns->dev = ndev;
704 	/* extract the namespace id from the block device name */
705 	ns->ns_id = trailing_strtol(udev->name);
706 	if (nvme_identify(ndev, ns->ns_id, 0, (dma_addr_t)(long)id)) {
707 		free(id);
708 		return -EIO;
709 	}
710 
711 	memcpy(&ns->eui64, &id->eui64, sizeof(id->eui64));
712 	flbas = id->flbas & NVME_NS_FLBAS_LBA_MASK;
713 	ns->flbas = flbas;
714 	ns->lba_shift = id->lbaf[flbas].ds;
715 	list_add(&ns->list, &ndev->namespaces);
716 
717 	desc->lba = le64_to_cpu(id->nsze);
718 	desc->log2blksz = ns->lba_shift;
719 	desc->blksz = 1 << ns->lba_shift;
720 	desc->bdev = udev;
721 	pplat = dev_get_parent_platdata(udev->parent);
722 	sprintf(desc->vendor, "0x%.4x", pplat->vendor);
723 	memcpy(desc->product, ndev->serial, sizeof(ndev->serial));
724 	memcpy(desc->revision, ndev->firmware_rev, sizeof(ndev->firmware_rev));
725 	part_init(desc);
726 
727 	free(id);
728 	return 0;
729 }
730 
731 static ulong nvme_blk_rw(struct udevice *udev, lbaint_t blknr,
732 			 lbaint_t blkcnt, void *buffer, bool read)
733 {
734 	struct nvme_ns *ns = dev_get_priv(udev);
735 	struct nvme_dev *dev = ns->dev;
736 	struct nvme_command c;
737 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
738 	int status;
739 	u64 prp2;
740 	u64 total_len = blkcnt << desc->log2blksz;
741 	u64 temp_len = total_len;
742 
743 	u64 slba = blknr;
744 	u16 lbas = 1 << (dev->max_transfer_shift - ns->lba_shift);
745 	u64 total_lbas = blkcnt;
746 
747 	flush_dcache_range((unsigned long)buffer,
748 			   (unsigned long)buffer + total_len);
749 
750 	c.rw.opcode = read ? nvme_cmd_read : nvme_cmd_write;
751 	c.rw.flags = 0;
752 	c.rw.nsid = cpu_to_le32(ns->ns_id);
753 	c.rw.control = 0;
754 	c.rw.dsmgmt = 0;
755 	c.rw.reftag = 0;
756 	c.rw.apptag = 0;
757 	c.rw.appmask = 0;
758 	c.rw.metadata = 0;
759 
760 	while (total_lbas) {
761 		if (total_lbas < lbas) {
762 			lbas = (u16)total_lbas;
763 			total_lbas = 0;
764 		} else {
765 			total_lbas -= lbas;
766 		}
767 
768 		if (nvme_setup_prps(dev, &prp2,
769 				    lbas << ns->lba_shift, (ulong)buffer))
770 			return -EIO;
771 		c.rw.slba = cpu_to_le64(slba);
772 		slba += lbas;
773 		c.rw.length = cpu_to_le16(lbas - 1);
774 		c.rw.prp1 = cpu_to_le64((ulong)buffer);
775 		c.rw.prp2 = cpu_to_le64(prp2);
776 		status = nvme_submit_sync_cmd(dev->queues[NVME_IO_Q],
777 				&c, NULL, IO_TIMEOUT);
778 		if (status)
779 			break;
780 		temp_len -= (u32)lbas << ns->lba_shift;
781 		buffer += lbas << ns->lba_shift;
782 	}
783 
784 	if (read)
785 		invalidate_dcache_range((unsigned long)buffer,
786 					(unsigned long)buffer + total_len);
787 
788 	return (total_len - temp_len) >> desc->log2blksz;
789 }
790 
791 static ulong nvme_blk_read(struct udevice *udev, lbaint_t blknr,
792 			   lbaint_t blkcnt, void *buffer)
793 {
794 	return nvme_blk_rw(udev, blknr, blkcnt, buffer, true);
795 }
796 
797 static ulong nvme_blk_write(struct udevice *udev, lbaint_t blknr,
798 			    lbaint_t blkcnt, const void *buffer)
799 {
800 	return nvme_blk_rw(udev, blknr, blkcnt, (void *)buffer, false);
801 }
802 
803 static const struct blk_ops nvme_blk_ops = {
804 	.read	= nvme_blk_read,
805 	.write	= nvme_blk_write,
806 };
807 
808 U_BOOT_DRIVER(nvme_blk) = {
809 	.name	= "nvme-blk",
810 	.id	= UCLASS_BLK,
811 	.probe	= nvme_blk_probe,
812 	.ops	= &nvme_blk_ops,
813 	.priv_auto_alloc_size = sizeof(struct nvme_ns),
814 };
815 
816 static int nvme_bind(struct udevice *udev)
817 {
818 	static int ndev_num;
819 	char name[20];
820 
821 	sprintf(name, "nvme#%d", ndev_num++);
822 
823 	return device_set_name(udev, name);
824 }
825 
826 static int nvme_probe(struct udevice *udev)
827 {
828 	int ret;
829 	struct nvme_dev *ndev = dev_get_priv(udev);
830 	struct nvme_id_ns *id;
831 
832 	ndev->instance = trailing_strtol(udev->name);
833 
834 	INIT_LIST_HEAD(&ndev->namespaces);
835 	ndev->bar = dm_pci_map_bar(udev, PCI_BASE_ADDRESS_0,
836 			PCI_REGION_MEM);
837 	if (readl(&ndev->bar->csts) == -1) {
838 		ret = -ENODEV;
839 		printf("Error: %s: Out of memory!\n", udev->name);
840 		goto free_nvme;
841 	}
842 
843 	ndev->queues = malloc(NVME_Q_NUM * sizeof(struct nvme_queue *));
844 	if (!ndev->queues) {
845 		ret = -ENOMEM;
846 		printf("Error: %s: Out of memory!\n", udev->name);
847 		goto free_nvme;
848 	}
849 	memset(ndev->queues, 0, NVME_Q_NUM * sizeof(struct nvme_queue *));
850 
851 	ndev->cap = nvme_readq(&ndev->bar->cap);
852 	ndev->q_depth = min_t(int, NVME_CAP_MQES(ndev->cap) + 1, NVME_Q_DEPTH);
853 	ndev->db_stride = 1 << NVME_CAP_STRIDE(ndev->cap);
854 	ndev->dbs = ((void __iomem *)ndev->bar) + 4096;
855 
856 	ret = nvme_configure_admin_queue(ndev);
857 	if (ret)
858 		goto free_queue;
859 
860 	/* Allocate after the page size is known */
861 	ndev->prp_pool = memalign(ndev->page_size, MAX_PRP_POOL);
862 	if (!ndev->prp_pool) {
863 		ret = -ENOMEM;
864 		printf("Error: %s: Out of memory!\n", udev->name);
865 		goto free_nvme;
866 	}
867 	ndev->prp_entry_num = MAX_PRP_POOL >> 3;
868 
869 	ret = nvme_setup_io_queues(ndev);
870 	if (ret)
871 		goto free_queue;
872 
873 	nvme_get_info_from_identify(ndev);
874 
875 	/* Create a blk device for each namespace */
876 
877 	id = memalign(ndev->page_size, sizeof(struct nvme_id_ns));
878 	if (!id) {
879 		ret = -ENOMEM;
880 		goto free_queue;
881 	}
882 
883 	for (int i = 1; i <= ndev->nn; i++) {
884 		struct udevice *ns_udev;
885 		char name[20];
886 
887 		memset(id, 0, sizeof(*id));
888 		if (nvme_identify(ndev, i, 0, (dma_addr_t)(long)id)) {
889 			ret = -EIO;
890 			goto free_id;
891 		}
892 
893 		/* skip inactive namespace */
894 		if (!id->nsze)
895 			continue;
896 
897 		/*
898 		 * Encode the namespace id to the device name so that
899 		 * we can extract it when doing the probe.
900 		 */
901 		sprintf(name, "blk#%d", i);
902 
903 		/* The real blksz and size will be set by nvme_blk_probe() */
904 		ret = blk_create_devicef(udev, "nvme-blk", name, IF_TYPE_NVME,
905 					 -1, 512, 0, &ns_udev);
906 		if (ret)
907 			goto free_id;
908 	}
909 
910 	free(id);
911 	return 0;
912 
913 free_id:
914 	free(id);
915 free_queue:
916 	free((void *)ndev->queues);
917 free_nvme:
918 	return ret;
919 }
920 
921 U_BOOT_DRIVER(nvme) = {
922 	.name	= "nvme",
923 	.id	= UCLASS_NVME,
924 	.bind	= nvme_bind,
925 	.probe	= nvme_probe,
926 	.priv_auto_alloc_size = sizeof(struct nvme_dev),
927 };
928 
929 struct pci_device_id nvme_supported[] = {
930 	{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, ~0) },
931 	{}
932 };
933 
934 U_BOOT_PCI_DEVICE(nvme, nvme_supported);
935