xref: /rk3399_rockchip-uboot/drivers/nvme/nvme.c (revision ff15e12394bd8a3d0c8c7b4d76243f4cde50ac3e)
1 /*
2  * Copyright (C) 2017 NXP Semiconductors
3  * Copyright (C) 2017 Bin Meng <bmeng.cn@gmail.com>
4  *
5  * SPDX-License-Identifier:	GPL-2.0+
6  */
7 
8 #include <common.h>
9 #include <dm.h>
10 #include <errno.h>
11 #include <memalign.h>
12 #include <pci.h>
13 #include <dm/device-internal.h>
14 #include "nvme.h"
15 
16 #define NVME_Q_DEPTH		2
17 #define NVME_AQ_DEPTH		2
18 #define NVME_SQ_SIZE(depth)	(depth * sizeof(struct nvme_command))
19 #define NVME_CQ_SIZE(depth)	(depth * sizeof(struct nvme_completion))
20 #define ADMIN_TIMEOUT		60
21 #define IO_TIMEOUT		30
22 #define MAX_PRP_POOL		512
23 
24 enum nvme_queue_id {
25 	NVME_ADMIN_Q,
26 	NVME_IO_Q,
27 	NVME_Q_NUM,
28 };
29 
30 /*
31  * An NVM Express queue. Each device has at least two (one for admin
32  * commands and one for I/O commands).
33  */
34 struct nvme_queue {
35 	struct nvme_dev *dev;
36 	struct nvme_command *sq_cmds;
37 	struct nvme_completion *cqes;
38 	wait_queue_head_t sq_full;
39 	u32 __iomem *q_db;
40 	u16 q_depth;
41 	s16 cq_vector;
42 	u16 sq_head;
43 	u16 sq_tail;
44 	u16 cq_head;
45 	u16 qid;
46 	u8 cq_phase;
47 	u8 cqe_seen;
48 	unsigned long cmdid_data[];
49 };
50 
51 static int nvme_wait_ready(struct nvme_dev *dev, bool enabled)
52 {
53 	u32 bit = enabled ? NVME_CSTS_RDY : 0;
54 	int timeout;
55 	ulong start;
56 
57 	/* Timeout field in the CAP register is in 500 millisecond units */
58 	timeout = NVME_CAP_TIMEOUT(dev->cap) * 500;
59 
60 	start = get_timer(0);
61 	while (get_timer(start) < timeout) {
62 		if ((readl(&dev->bar->csts) & NVME_CSTS_RDY) == bit)
63 			return 0;
64 	}
65 
66 	return -ETIME;
67 }
68 
69 static int nvme_setup_prps(struct nvme_dev *dev, u64 *prp2,
70 			   int total_len, u64 dma_addr)
71 {
72 	u32 page_size = dev->page_size;
73 	int offset = dma_addr & (page_size - 1);
74 	u64 *prp_pool;
75 	int length = total_len;
76 	int i, nprps;
77 	u32 prps_per_page = (page_size >> 3) - 1;
78 	u32 num_pages;
79 
80 	length -= (page_size - offset);
81 
82 	if (length <= 0) {
83 		*prp2 = 0;
84 		return 0;
85 	}
86 
87 	if (length)
88 		dma_addr += (page_size - offset);
89 
90 	if (length <= page_size) {
91 		*prp2 = dma_addr;
92 		return 0;
93 	}
94 
95 	nprps = DIV_ROUND_UP(length, page_size);
96 	num_pages = DIV_ROUND_UP(nprps, prps_per_page);
97 
98 	if (nprps > dev->prp_entry_num) {
99 		free(dev->prp_pool);
100 		/*
101 		 * Always increase in increments of pages.  It doesn't waste
102 		 * much memory and reduces the number of allocations.
103 		 */
104 		dev->prp_pool = memalign(page_size, num_pages * page_size);
105 		if (!dev->prp_pool) {
106 			printf("Error: malloc prp_pool fail\n");
107 			return -ENOMEM;
108 		}
109 		dev->prp_entry_num = prps_per_page * num_pages;
110 	}
111 
112 	prp_pool = dev->prp_pool;
113 	i = 0;
114 	while (nprps) {
115 		if (i == ((page_size >> 3) - 1)) {
116 			*(prp_pool + i) = cpu_to_le64((ulong)prp_pool +
117 					page_size);
118 			i = 0;
119 			prp_pool += page_size;
120 		}
121 		*(prp_pool + i++) = cpu_to_le64(dma_addr);
122 		dma_addr += page_size;
123 		nprps--;
124 	}
125 	*prp2 = (ulong)dev->prp_pool;
126 
127 	return 0;
128 }
129 
130 static __le16 nvme_get_cmd_id(void)
131 {
132 	static unsigned short cmdid;
133 
134 	return cpu_to_le16((cmdid < USHRT_MAX) ? cmdid++ : 0);
135 }
136 
137 static u16 nvme_read_completion_status(struct nvme_queue *nvmeq, u16 index)
138 {
139 	u64 start = (ulong)&nvmeq->cqes[index];
140 	u64 stop = start + sizeof(struct nvme_completion);
141 
142 	invalidate_dcache_range(start, stop);
143 
144 	return le16_to_cpu(readw(&(nvmeq->cqes[index].status)));
145 }
146 
147 /**
148  * nvme_submit_cmd() - copy a command into a queue and ring the doorbell
149  *
150  * @nvmeq:	The queue to use
151  * @cmd:	The command to send
152  */
153 static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
154 {
155 	u16 tail = nvmeq->sq_tail;
156 
157 	memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
158 	flush_dcache_range((ulong)&nvmeq->sq_cmds[tail],
159 			   (ulong)&nvmeq->sq_cmds[tail] + sizeof(*cmd));
160 
161 	if (++tail == nvmeq->q_depth)
162 		tail = 0;
163 	writel(tail, nvmeq->q_db);
164 	nvmeq->sq_tail = tail;
165 }
166 
167 static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq,
168 				struct nvme_command *cmd,
169 				u32 *result, unsigned timeout)
170 {
171 	u16 head = nvmeq->cq_head;
172 	u16 phase = nvmeq->cq_phase;
173 	u16 status;
174 	ulong start_time;
175 	ulong timeout_us = timeout * 100000;
176 
177 	cmd->common.command_id = nvme_get_cmd_id();
178 	nvme_submit_cmd(nvmeq, cmd);
179 
180 	start_time = timer_get_us();
181 
182 	for (;;) {
183 		status = nvme_read_completion_status(nvmeq, head);
184 		if ((status & 0x01) == phase)
185 			break;
186 		if (timeout_us > 0 && (timer_get_us() - start_time)
187 		    >= timeout_us)
188 			return -ETIMEDOUT;
189 	}
190 
191 	status >>= 1;
192 	if (status) {
193 		printf("ERROR: status = %x, phase = %d, head = %d\n",
194 		       status, phase, head);
195 		status = 0;
196 		if (++head == nvmeq->q_depth) {
197 			head = 0;
198 			phase = !phase;
199 		}
200 		writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
201 		nvmeq->cq_head = head;
202 		nvmeq->cq_phase = phase;
203 
204 		return -EIO;
205 	}
206 
207 	if (result)
208 		*result = le32_to_cpu(readl(&(nvmeq->cqes[head].result)));
209 
210 	if (++head == nvmeq->q_depth) {
211 		head = 0;
212 		phase = !phase;
213 	}
214 	writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
215 	nvmeq->cq_head = head;
216 	nvmeq->cq_phase = phase;
217 
218 	return status;
219 }
220 
221 static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
222 				 u32 *result)
223 {
224 	return nvme_submit_sync_cmd(dev->queues[NVME_ADMIN_Q], cmd,
225 				    result, ADMIN_TIMEOUT);
226 }
227 
228 static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev,
229 					   int qid, int depth)
230 {
231 	struct nvme_queue *nvmeq = malloc(sizeof(*nvmeq));
232 	if (!nvmeq)
233 		return NULL;
234 	memset(nvmeq, 0, sizeof(*nvmeq));
235 
236 	nvmeq->cqes = (void *)memalign(4096, NVME_CQ_SIZE(depth));
237 	if (!nvmeq->cqes)
238 		goto free_nvmeq;
239 	memset((void *)nvmeq->cqes, 0, NVME_CQ_SIZE(depth));
240 
241 	nvmeq->sq_cmds = (void *)memalign(4096, NVME_SQ_SIZE(depth));
242 	if (!nvmeq->sq_cmds)
243 		goto free_queue;
244 	memset((void *)nvmeq->sq_cmds, 0, NVME_SQ_SIZE(depth));
245 
246 	nvmeq->dev = dev;
247 
248 	nvmeq->cq_head = 0;
249 	nvmeq->cq_phase = 1;
250 	nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
251 	nvmeq->q_depth = depth;
252 	nvmeq->qid = qid;
253 	dev->queue_count++;
254 	dev->queues[qid] = nvmeq;
255 
256 	return nvmeq;
257 
258  free_queue:
259 	free((void *)nvmeq->cqes);
260  free_nvmeq:
261 	free(nvmeq);
262 
263 	return NULL;
264 }
265 
266 static int nvme_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
267 {
268 	struct nvme_command c;
269 
270 	memset(&c, 0, sizeof(c));
271 	c.delete_queue.opcode = opcode;
272 	c.delete_queue.qid = cpu_to_le16(id);
273 
274 	return nvme_submit_admin_cmd(dev, &c, NULL);
275 }
276 
277 static int nvme_delete_sq(struct nvme_dev *dev, u16 sqid)
278 {
279 	return nvme_delete_queue(dev, nvme_admin_delete_sq, sqid);
280 }
281 
282 static int nvme_delete_cq(struct nvme_dev *dev, u16 cqid)
283 {
284 	return nvme_delete_queue(dev, nvme_admin_delete_cq, cqid);
285 }
286 
287 static int nvme_enable_ctrl(struct nvme_dev *dev)
288 {
289 	dev->ctrl_config &= ~NVME_CC_SHN_MASK;
290 	dev->ctrl_config |= NVME_CC_ENABLE;
291 	writel(cpu_to_le32(dev->ctrl_config), &dev->bar->cc);
292 
293 	return nvme_wait_ready(dev, true);
294 }
295 
296 static int nvme_disable_ctrl(struct nvme_dev *dev)
297 {
298 	dev->ctrl_config &= ~NVME_CC_SHN_MASK;
299 	dev->ctrl_config &= ~NVME_CC_ENABLE;
300 	writel(cpu_to_le32(dev->ctrl_config), &dev->bar->cc);
301 
302 	return nvme_wait_ready(dev, false);
303 }
304 
305 static void nvme_free_queue(struct nvme_queue *nvmeq)
306 {
307 	free((void *)nvmeq->cqes);
308 	free(nvmeq->sq_cmds);
309 	free(nvmeq);
310 }
311 
312 static void nvme_free_queues(struct nvme_dev *dev, int lowest)
313 {
314 	int i;
315 
316 	for (i = dev->queue_count - 1; i >= lowest; i--) {
317 		struct nvme_queue *nvmeq = dev->queues[i];
318 		dev->queue_count--;
319 		dev->queues[i] = NULL;
320 		nvme_free_queue(nvmeq);
321 	}
322 }
323 
324 static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
325 {
326 	struct nvme_dev *dev = nvmeq->dev;
327 
328 	nvmeq->sq_tail = 0;
329 	nvmeq->cq_head = 0;
330 	nvmeq->cq_phase = 1;
331 	nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
332 	memset((void *)nvmeq->cqes, 0, NVME_CQ_SIZE(nvmeq->q_depth));
333 	flush_dcache_range((ulong)nvmeq->cqes,
334 			   (ulong)nvmeq->cqes + NVME_CQ_SIZE(nvmeq->q_depth));
335 	dev->online_queues++;
336 }
337 
338 static int nvme_configure_admin_queue(struct nvme_dev *dev)
339 {
340 	int result;
341 	u32 aqa;
342 	u64 cap = dev->cap;
343 	struct nvme_queue *nvmeq;
344 	/* most architectures use 4KB as the page size */
345 	unsigned page_shift = 12;
346 	unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12;
347 	unsigned dev_page_max = NVME_CAP_MPSMAX(cap) + 12;
348 
349 	if (page_shift < dev_page_min) {
350 		debug("Device minimum page size (%u) too large for host (%u)\n",
351 		      1 << dev_page_min, 1 << page_shift);
352 		return -ENODEV;
353 	}
354 
355 	if (page_shift > dev_page_max) {
356 		debug("Device maximum page size (%u) smaller than host (%u)\n",
357 		      1 << dev_page_max, 1 << page_shift);
358 		page_shift = dev_page_max;
359 	}
360 
361 	result = nvme_disable_ctrl(dev);
362 	if (result < 0)
363 		return result;
364 
365 	nvmeq = dev->queues[NVME_ADMIN_Q];
366 	if (!nvmeq) {
367 		nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH);
368 		if (!nvmeq)
369 			return -ENOMEM;
370 	}
371 
372 	aqa = nvmeq->q_depth - 1;
373 	aqa |= aqa << 16;
374 	aqa |= aqa << 16;
375 
376 	dev->page_size = 1 << page_shift;
377 
378 	dev->ctrl_config = NVME_CC_CSS_NVM;
379 	dev->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT;
380 	dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
381 	dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
382 
383 	writel(aqa, &dev->bar->aqa);
384 	nvme_writeq((ulong)nvmeq->sq_cmds, &dev->bar->asq);
385 	nvme_writeq((ulong)nvmeq->cqes, &dev->bar->acq);
386 
387 	result = nvme_enable_ctrl(dev);
388 	if (result)
389 		goto free_nvmeq;
390 
391 	nvmeq->cq_vector = 0;
392 
393 	nvme_init_queue(dev->queues[NVME_ADMIN_Q], 0);
394 
395 	return result;
396 
397  free_nvmeq:
398 	nvme_free_queues(dev, 0);
399 
400 	return result;
401 }
402 
403 static int nvme_alloc_cq(struct nvme_dev *dev, u16 qid,
404 			    struct nvme_queue *nvmeq)
405 {
406 	struct nvme_command c;
407 	int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
408 
409 	memset(&c, 0, sizeof(c));
410 	c.create_cq.opcode = nvme_admin_create_cq;
411 	c.create_cq.prp1 = cpu_to_le64((ulong)nvmeq->cqes);
412 	c.create_cq.cqid = cpu_to_le16(qid);
413 	c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
414 	c.create_cq.cq_flags = cpu_to_le16(flags);
415 	c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);
416 
417 	return nvme_submit_admin_cmd(dev, &c, NULL);
418 }
419 
420 static int nvme_alloc_sq(struct nvme_dev *dev, u16 qid,
421 			    struct nvme_queue *nvmeq)
422 {
423 	struct nvme_command c;
424 	int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM;
425 
426 	memset(&c, 0, sizeof(c));
427 	c.create_sq.opcode = nvme_admin_create_sq;
428 	c.create_sq.prp1 = cpu_to_le64((ulong)nvmeq->sq_cmds);
429 	c.create_sq.sqid = cpu_to_le16(qid);
430 	c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
431 	c.create_sq.sq_flags = cpu_to_le16(flags);
432 	c.create_sq.cqid = cpu_to_le16(qid);
433 
434 	return nvme_submit_admin_cmd(dev, &c, NULL);
435 }
436 
437 int nvme_identify(struct nvme_dev *dev, unsigned nsid,
438 		  unsigned cns, dma_addr_t dma_addr)
439 {
440 	struct nvme_command c;
441 	u32 page_size = dev->page_size;
442 	int offset = dma_addr & (page_size - 1);
443 	int length = sizeof(struct nvme_id_ctrl);
444 	int ret;
445 
446 	memset(&c, 0, sizeof(c));
447 	c.identify.opcode = nvme_admin_identify;
448 	c.identify.nsid = cpu_to_le32(nsid);
449 	c.identify.prp1 = cpu_to_le64(dma_addr);
450 
451 	length -= (page_size - offset);
452 	if (length <= 0) {
453 		c.identify.prp2 = 0;
454 	} else {
455 		dma_addr += (page_size - offset);
456 		c.identify.prp2 = cpu_to_le64(dma_addr);
457 	}
458 
459 	c.identify.cns = cpu_to_le32(cns);
460 
461 	ret = nvme_submit_admin_cmd(dev, &c, NULL);
462 	if (!ret)
463 		invalidate_dcache_range(dma_addr,
464 					dma_addr + sizeof(struct nvme_id_ctrl));
465 
466 	return ret;
467 }
468 
469 int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
470 		      dma_addr_t dma_addr, u32 *result)
471 {
472 	struct nvme_command c;
473 
474 	memset(&c, 0, sizeof(c));
475 	c.features.opcode = nvme_admin_get_features;
476 	c.features.nsid = cpu_to_le32(nsid);
477 	c.features.prp1 = cpu_to_le64(dma_addr);
478 	c.features.fid = cpu_to_le32(fid);
479 
480 	/*
481 	 * TODO: add cache invalidate operation when the size of
482 	 * the DMA buffer is known
483 	 */
484 
485 	return nvme_submit_admin_cmd(dev, &c, result);
486 }
487 
488 int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
489 		      dma_addr_t dma_addr, u32 *result)
490 {
491 	struct nvme_command c;
492 
493 	memset(&c, 0, sizeof(c));
494 	c.features.opcode = nvme_admin_set_features;
495 	c.features.prp1 = cpu_to_le64(dma_addr);
496 	c.features.fid = cpu_to_le32(fid);
497 	c.features.dword11 = cpu_to_le32(dword11);
498 
499 	/*
500 	 * TODO: add cache flush operation when the size of
501 	 * the DMA buffer is known
502 	 */
503 
504 	return nvme_submit_admin_cmd(dev, &c, result);
505 }
506 
507 static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
508 {
509 	struct nvme_dev *dev = nvmeq->dev;
510 	int result;
511 
512 	nvmeq->cq_vector = qid - 1;
513 	result = nvme_alloc_cq(dev, qid, nvmeq);
514 	if (result < 0)
515 		goto release_cq;
516 
517 	result = nvme_alloc_sq(dev, qid, nvmeq);
518 	if (result < 0)
519 		goto release_sq;
520 
521 	nvme_init_queue(nvmeq, qid);
522 
523 	return result;
524 
525  release_sq:
526 	nvme_delete_sq(dev, qid);
527  release_cq:
528 	nvme_delete_cq(dev, qid);
529 
530 	return result;
531 }
532 
533 static int nvme_set_queue_count(struct nvme_dev *dev, int count)
534 {
535 	int status;
536 	u32 result;
537 	u32 q_count = (count - 1) | ((count - 1) << 16);
538 
539 	status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES,
540 			q_count, 0, &result);
541 
542 	if (status < 0)
543 		return status;
544 	if (status > 1)
545 		return 0;
546 
547 	return min(result & 0xffff, result >> 16) + 1;
548 }
549 
550 static void nvme_create_io_queues(struct nvme_dev *dev)
551 {
552 	unsigned int i;
553 
554 	for (i = dev->queue_count; i <= dev->max_qid; i++)
555 		if (!nvme_alloc_queue(dev, i, dev->q_depth))
556 			break;
557 
558 	for (i = dev->online_queues; i <= dev->queue_count - 1; i++)
559 		if (nvme_create_queue(dev->queues[i], i))
560 			break;
561 }
562 
563 static int nvme_setup_io_queues(struct nvme_dev *dev)
564 {
565 	int nr_io_queues;
566 	int result;
567 
568 	nr_io_queues = 1;
569 	result = nvme_set_queue_count(dev, nr_io_queues);
570 	if (result <= 0)
571 		return result;
572 
573 	dev->max_qid = nr_io_queues;
574 
575 	/* Free previously allocated queues */
576 	nvme_free_queues(dev, nr_io_queues + 1);
577 	nvme_create_io_queues(dev);
578 
579 	return 0;
580 }
581 
582 static int nvme_get_info_from_identify(struct nvme_dev *dev)
583 {
584 	ALLOC_CACHE_ALIGN_BUFFER(char, buf, sizeof(struct nvme_id_ctrl));
585 	struct nvme_id_ctrl *ctrl = (struct nvme_id_ctrl *)buf;
586 	int ret;
587 	int shift = NVME_CAP_MPSMIN(dev->cap) + 12;
588 
589 	ret = nvme_identify(dev, 0, 1, (dma_addr_t)(long)ctrl);
590 	if (ret)
591 		return -EIO;
592 
593 	dev->nn = le32_to_cpu(ctrl->nn);
594 	dev->vwc = ctrl->vwc;
595 	memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
596 	memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
597 	memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
598 	if (ctrl->mdts)
599 		dev->max_transfer_shift = (ctrl->mdts + shift);
600 	else {
601 		/*
602 		 * Maximum Data Transfer Size (MDTS) field indicates the maximum
603 		 * data transfer size between the host and the controller. The
604 		 * host should not submit a command that exceeds this transfer
605 		 * size. The value is in units of the minimum memory page size
606 		 * and is reported as a power of two (2^n).
607 		 *
608 		 * The spec also says: a value of 0h indicates no restrictions
609 		 * on transfer size. But in nvme_blk_read/write() below we have
610 		 * the following algorithm for maximum number of logic blocks
611 		 * per transfer:
612 		 *
613 		 * u16 lbas = 1 << (dev->max_transfer_shift - ns->lba_shift);
614 		 *
615 		 * In order for lbas not to overflow, the maximum number is 15
616 		 * which means dev->max_transfer_shift = 15 + 9 (ns->lba_shift).
617 		 * Let's use 20 which provides 1MB size.
618 		 */
619 		dev->max_transfer_shift = 20;
620 	}
621 
622 	return 0;
623 }
624 
625 int nvme_get_namespace_id(struct udevice *udev, u32 *ns_id, u8 *eui64)
626 {
627 	struct nvme_ns *ns = dev_get_priv(udev);
628 
629 	if (ns_id)
630 		*ns_id = ns->ns_id;
631 	if (eui64)
632 		memcpy(eui64, ns->eui64, sizeof(ns->eui64));
633 
634 	return 0;
635 }
636 
637 int nvme_scan_namespace(void)
638 {
639 	struct uclass *uc;
640 	struct udevice *dev;
641 	int ret;
642 
643 	ret = uclass_get(UCLASS_NVME, &uc);
644 	if (ret)
645 		return ret;
646 
647 	uclass_foreach_dev(dev, uc) {
648 		ret = device_probe(dev);
649 		if (ret)
650 			return ret;
651 	}
652 
653 	return 0;
654 }
655 
656 static int nvme_blk_probe(struct udevice *udev)
657 {
658 	struct nvme_dev *ndev = dev_get_priv(udev->parent);
659 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
660 	struct nvme_ns *ns = dev_get_priv(udev);
661 	u8 flbas;
662 	ALLOC_CACHE_ALIGN_BUFFER(char, buf, sizeof(struct nvme_id_ns));
663 	struct nvme_id_ns *id = (struct nvme_id_ns *)buf;
664 	struct pci_child_platdata *pplat;
665 
666 	memset(ns, 0, sizeof(*ns));
667 	ns->dev = ndev;
668 	/* extract the namespace id from the block device name */
669 	ns->ns_id = trailing_strtol(udev->name) + 1;
670 	if (nvme_identify(ndev, ns->ns_id, 0, (dma_addr_t)(long)id))
671 		return -EIO;
672 
673 	memcpy(&ns->eui64, &id->eui64, sizeof(id->eui64));
674 	flbas = id->flbas & NVME_NS_FLBAS_LBA_MASK;
675 	ns->flbas = flbas;
676 	ns->lba_shift = id->lbaf[flbas].ds;
677 	ns->mode_select_num_blocks = le64_to_cpu(id->nsze);
678 	ns->mode_select_block_len = 1 << ns->lba_shift;
679 	list_add(&ns->list, &ndev->namespaces);
680 
681 	desc->lba = ns->mode_select_num_blocks;
682 	desc->log2blksz = ns->lba_shift;
683 	desc->blksz = 1 << ns->lba_shift;
684 	desc->bdev = udev;
685 	pplat = dev_get_parent_platdata(udev->parent);
686 	sprintf(desc->vendor, "0x%.4x", pplat->vendor);
687 	memcpy(desc->product, ndev->serial, sizeof(ndev->serial));
688 	memcpy(desc->revision, ndev->firmware_rev, sizeof(ndev->firmware_rev));
689 	part_init(desc);
690 
691 	return 0;
692 }
693 
694 static ulong nvme_blk_rw(struct udevice *udev, lbaint_t blknr,
695 			 lbaint_t blkcnt, void *buffer, bool read)
696 {
697 	struct nvme_ns *ns = dev_get_priv(udev);
698 	struct nvme_dev *dev = ns->dev;
699 	struct nvme_command c;
700 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
701 	int status;
702 	u64 prp2;
703 	u64 total_len = blkcnt << desc->log2blksz;
704 	u64 temp_len = total_len;
705 
706 	u64 slba = blknr;
707 	u16 lbas = 1 << (dev->max_transfer_shift - ns->lba_shift);
708 	u64 total_lbas = blkcnt;
709 
710 	if (!read)
711 		flush_dcache_range((unsigned long)buffer,
712 				   (unsigned long)buffer + total_len);
713 
714 	c.rw.opcode = read ? nvme_cmd_read : nvme_cmd_write;
715 	c.rw.flags = 0;
716 	c.rw.nsid = cpu_to_le32(ns->ns_id);
717 	c.rw.control = 0;
718 	c.rw.dsmgmt = 0;
719 	c.rw.reftag = 0;
720 	c.rw.apptag = 0;
721 	c.rw.appmask = 0;
722 	c.rw.metadata = 0;
723 
724 	while (total_lbas) {
725 		if (total_lbas < lbas) {
726 			lbas = (u16)total_lbas;
727 			total_lbas = 0;
728 		} else {
729 			total_lbas -= lbas;
730 		}
731 
732 		if (nvme_setup_prps(dev, &prp2,
733 				    lbas << ns->lba_shift, (ulong)buffer))
734 			return -EIO;
735 		c.rw.slba = cpu_to_le64(slba);
736 		slba += lbas;
737 		c.rw.length = cpu_to_le16(lbas - 1);
738 		c.rw.prp1 = cpu_to_le64((ulong)buffer);
739 		c.rw.prp2 = cpu_to_le64(prp2);
740 		status = nvme_submit_sync_cmd(dev->queues[NVME_IO_Q],
741 				&c, NULL, IO_TIMEOUT);
742 		if (status)
743 			break;
744 		temp_len -= (u32)lbas << ns->lba_shift;
745 		buffer += lbas << ns->lba_shift;
746 	}
747 
748 	if (read)
749 		invalidate_dcache_range((unsigned long)buffer,
750 					(unsigned long)buffer + total_len);
751 
752 	return (total_len - temp_len) >> desc->log2blksz;
753 }
754 
755 static ulong nvme_blk_read(struct udevice *udev, lbaint_t blknr,
756 			   lbaint_t blkcnt, void *buffer)
757 {
758 	return nvme_blk_rw(udev, blknr, blkcnt, buffer, true);
759 }
760 
761 static ulong nvme_blk_write(struct udevice *udev, lbaint_t blknr,
762 			    lbaint_t blkcnt, const void *buffer)
763 {
764 	return nvme_blk_rw(udev, blknr, blkcnt, (void *)buffer, false);
765 }
766 
767 static const struct blk_ops nvme_blk_ops = {
768 	.read	= nvme_blk_read,
769 	.write	= nvme_blk_write,
770 };
771 
772 U_BOOT_DRIVER(nvme_blk) = {
773 	.name	= "nvme-blk",
774 	.id	= UCLASS_BLK,
775 	.probe	= nvme_blk_probe,
776 	.ops	= &nvme_blk_ops,
777 	.priv_auto_alloc_size = sizeof(struct nvme_ns),
778 };
779 
780 static int nvme_bind(struct udevice *udev)
781 {
782 	static int ndev_num;
783 	char name[20];
784 
785 	sprintf(name, "nvme#%d", ndev_num++);
786 
787 	return device_set_name(udev, name);
788 }
789 
790 static int nvme_probe(struct udevice *udev)
791 {
792 	int ret;
793 	struct nvme_dev *ndev = dev_get_priv(udev);
794 
795 	ndev->instance = trailing_strtol(udev->name);
796 
797 	INIT_LIST_HEAD(&ndev->namespaces);
798 	ndev->bar = dm_pci_map_bar(udev, PCI_BASE_ADDRESS_0,
799 			PCI_REGION_MEM);
800 	if (readl(&ndev->bar->csts) == -1) {
801 		ret = -ENODEV;
802 		printf("Error: %s: Out of memory!\n", udev->name);
803 		goto free_nvme;
804 	}
805 
806 	ndev->queues = malloc(NVME_Q_NUM * sizeof(struct nvme_queue *));
807 	if (!ndev->queues) {
808 		ret = -ENOMEM;
809 		printf("Error: %s: Out of memory!\n", udev->name);
810 		goto free_nvme;
811 	}
812 	memset(ndev->queues, 0, NVME_Q_NUM * sizeof(struct nvme_queue *));
813 
814 	ndev->cap = nvme_readq(&ndev->bar->cap);
815 	ndev->q_depth = min_t(int, NVME_CAP_MQES(ndev->cap) + 1, NVME_Q_DEPTH);
816 	ndev->db_stride = 1 << NVME_CAP_STRIDE(ndev->cap);
817 	ndev->dbs = ((void __iomem *)ndev->bar) + 4096;
818 
819 	ret = nvme_configure_admin_queue(ndev);
820 	if (ret)
821 		goto free_queue;
822 
823 	/* Allocate after the page size is known */
824 	ndev->prp_pool = memalign(ndev->page_size, MAX_PRP_POOL);
825 	if (!ndev->prp_pool) {
826 		ret = -ENOMEM;
827 		printf("Error: %s: Out of memory!\n", udev->name);
828 		goto free_nvme;
829 	}
830 	ndev->prp_entry_num = MAX_PRP_POOL >> 3;
831 
832 	ret = nvme_setup_io_queues(ndev);
833 	if (ret)
834 		goto free_queue;
835 
836 	nvme_get_info_from_identify(ndev);
837 
838 	return 0;
839 
840 free_queue:
841 	free((void *)ndev->queues);
842 free_nvme:
843 	return ret;
844 }
845 
846 U_BOOT_DRIVER(nvme) = {
847 	.name	= "nvme",
848 	.id	= UCLASS_NVME,
849 	.bind	= nvme_bind,
850 	.probe	= nvme_probe,
851 	.priv_auto_alloc_size = sizeof(struct nvme_dev),
852 };
853 
854 struct pci_device_id nvme_supported[] = {
855 	{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, ~0) },
856 	{}
857 };
858 
859 U_BOOT_PCI_DEVICE(nvme, nvme_supported);
860