xref: /rk3399_rockchip-uboot/drivers/nvme/nvme.c (revision 8ceda4e546425baf0b56fd29ffd3b58f17b4040e)
1 /*
2  * Copyright (C) 2017 NXP Semiconductors
3  * Copyright (C) 2017 Bin Meng <bmeng.cn@gmail.com>
4  *
5  * SPDX-License-Identifier:	GPL-2.0+
6  */
7 
8 #include <common.h>
9 #include <dm.h>
10 #include <errno.h>
11 #include <memalign.h>
12 #include <pci.h>
13 #include <dm/device-internal.h>
14 #include "nvme.h"
15 
16 #define NVME_Q_DEPTH		2
17 #define NVME_AQ_DEPTH		2
18 #define NVME_SQ_SIZE(depth)	(depth * sizeof(struct nvme_command))
19 #define NVME_CQ_SIZE(depth)	(depth * sizeof(struct nvme_completion))
20 #define ADMIN_TIMEOUT		60
21 #define IO_TIMEOUT		30
22 #define MAX_PRP_POOL		512
23 
24 enum nvme_queue_id {
25 	NVME_ADMIN_Q,
26 	NVME_IO_Q,
27 	NVME_Q_NUM,
28 };
29 
30 /*
31  * An NVM Express queue. Each device has at least two (one for admin
32  * commands and one for I/O commands).
33  */
34 struct nvme_queue {
35 	struct nvme_dev *dev;
36 	struct nvme_command *sq_cmds;
37 	struct nvme_completion *cqes;
38 	wait_queue_head_t sq_full;
39 	u32 __iomem *q_db;
40 	u16 q_depth;
41 	s16 cq_vector;
42 	u16 sq_head;
43 	u16 sq_tail;
44 	u16 cq_head;
45 	u16 qid;
46 	u8 cq_phase;
47 	u8 cqe_seen;
48 	unsigned long cmdid_data[];
49 };
50 
51 static int nvme_wait_ready(struct nvme_dev *dev, bool enabled)
52 {
53 	u32 bit = enabled ? NVME_CSTS_RDY : 0;
54 	int timeout;
55 	ulong start;
56 
57 	/* Timeout field in the CAP register is in 500 millisecond units */
58 	timeout = NVME_CAP_TIMEOUT(dev->cap) * 500;
59 
60 	start = get_timer(0);
61 	while (get_timer(start) < timeout) {
62 		if ((readl(&dev->bar->csts) & NVME_CSTS_RDY) == bit)
63 			return 0;
64 	}
65 
66 	return -ETIME;
67 }
68 
69 static int nvme_setup_prps(struct nvme_dev *dev, u64 *prp2,
70 			   int total_len, u64 dma_addr)
71 {
72 	u32 page_size = dev->page_size;
73 	int offset = dma_addr & (page_size - 1);
74 	u64 *prp_pool;
75 	int length = total_len;
76 	int i, nprps;
77 	u32 prps_per_page = (page_size >> 3) - 1;
78 	u32 num_pages;
79 
80 	length -= (page_size - offset);
81 
82 	if (length <= 0) {
83 		*prp2 = 0;
84 		return 0;
85 	}
86 
87 	if (length)
88 		dma_addr += (page_size - offset);
89 
90 	if (length <= page_size) {
91 		*prp2 = dma_addr;
92 		return 0;
93 	}
94 
95 	nprps = DIV_ROUND_UP(length, page_size);
96 	num_pages = DIV_ROUND_UP(nprps, prps_per_page);
97 
98 	if (nprps > dev->prp_entry_num) {
99 		free(dev->prp_pool);
100 		/*
101 		 * Always increase in increments of pages.  It doesn't waste
102 		 * much memory and reduces the number of allocations.
103 		 */
104 		dev->prp_pool = memalign(page_size, num_pages * page_size);
105 		if (!dev->prp_pool) {
106 			printf("Error: malloc prp_pool fail\n");
107 			return -ENOMEM;
108 		}
109 		dev->prp_entry_num = prps_per_page * num_pages;
110 	}
111 
112 	prp_pool = dev->prp_pool;
113 	i = 0;
114 	while (nprps) {
115 		if (i == ((page_size >> 3) - 1)) {
116 			*(prp_pool + i) = cpu_to_le64((ulong)prp_pool +
117 					page_size);
118 			i = 0;
119 			prp_pool += page_size;
120 		}
121 		*(prp_pool + i++) = cpu_to_le64(dma_addr);
122 		dma_addr += page_size;
123 		nprps--;
124 	}
125 	*prp2 = (ulong)dev->prp_pool;
126 
127 	flush_dcache_range((ulong)dev->prp_pool, (ulong)dev->prp_pool +
128 			   dev->prp_entry_num * sizeof(u64));
129 
130 	return 0;
131 }
132 
133 static __le16 nvme_get_cmd_id(void)
134 {
135 	static unsigned short cmdid;
136 
137 	return cpu_to_le16((cmdid < USHRT_MAX) ? cmdid++ : 0);
138 }
139 
140 static u16 nvme_read_completion_status(struct nvme_queue *nvmeq, u16 index)
141 {
142 	u64 start = (ulong)&nvmeq->cqes[index];
143 	u64 stop = start + sizeof(struct nvme_completion);
144 
145 	invalidate_dcache_range(start, stop);
146 
147 	return le16_to_cpu(readw(&(nvmeq->cqes[index].status)));
148 }
149 
150 /**
151  * nvme_submit_cmd() - copy a command into a queue and ring the doorbell
152  *
153  * @nvmeq:	The queue to use
154  * @cmd:	The command to send
155  */
156 static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
157 {
158 	u16 tail = nvmeq->sq_tail;
159 
160 	memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
161 	flush_dcache_range((ulong)&nvmeq->sq_cmds[tail],
162 			   (ulong)&nvmeq->sq_cmds[tail] + sizeof(*cmd));
163 
164 	if (++tail == nvmeq->q_depth)
165 		tail = 0;
166 	writel(tail, nvmeq->q_db);
167 	nvmeq->sq_tail = tail;
168 }
169 
170 static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq,
171 				struct nvme_command *cmd,
172 				u32 *result, unsigned timeout)
173 {
174 	u16 head = nvmeq->cq_head;
175 	u16 phase = nvmeq->cq_phase;
176 	u16 status;
177 	ulong start_time;
178 	ulong timeout_us = timeout * 100000;
179 
180 	cmd->common.command_id = nvme_get_cmd_id();
181 	nvme_submit_cmd(nvmeq, cmd);
182 
183 	start_time = timer_get_us();
184 
185 	for (;;) {
186 		status = nvme_read_completion_status(nvmeq, head);
187 		if ((status & 0x01) == phase)
188 			break;
189 		if (timeout_us > 0 && (timer_get_us() - start_time)
190 		    >= timeout_us)
191 			return -ETIMEDOUT;
192 	}
193 
194 	status >>= 1;
195 	if (status) {
196 		printf("ERROR: status = %x, phase = %d, head = %d\n",
197 		       status, phase, head);
198 		status = 0;
199 		if (++head == nvmeq->q_depth) {
200 			head = 0;
201 			phase = !phase;
202 		}
203 		writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
204 		nvmeq->cq_head = head;
205 		nvmeq->cq_phase = phase;
206 
207 		return -EIO;
208 	}
209 
210 	if (result)
211 		*result = le32_to_cpu(readl(&(nvmeq->cqes[head].result)));
212 
213 	if (++head == nvmeq->q_depth) {
214 		head = 0;
215 		phase = !phase;
216 	}
217 	writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
218 	nvmeq->cq_head = head;
219 	nvmeq->cq_phase = phase;
220 
221 	return status;
222 }
223 
224 static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
225 				 u32 *result)
226 {
227 	return nvme_submit_sync_cmd(dev->queues[NVME_ADMIN_Q], cmd,
228 				    result, ADMIN_TIMEOUT);
229 }
230 
231 static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev,
232 					   int qid, int depth)
233 {
234 	struct nvme_queue *nvmeq = malloc(sizeof(*nvmeq));
235 	if (!nvmeq)
236 		return NULL;
237 	memset(nvmeq, 0, sizeof(*nvmeq));
238 
239 	nvmeq->cqes = (void *)memalign(4096, NVME_CQ_SIZE(depth));
240 	if (!nvmeq->cqes)
241 		goto free_nvmeq;
242 	memset((void *)nvmeq->cqes, 0, NVME_CQ_SIZE(depth));
243 
244 	nvmeq->sq_cmds = (void *)memalign(4096, NVME_SQ_SIZE(depth));
245 	if (!nvmeq->sq_cmds)
246 		goto free_queue;
247 	memset((void *)nvmeq->sq_cmds, 0, NVME_SQ_SIZE(depth));
248 
249 	nvmeq->dev = dev;
250 
251 	nvmeq->cq_head = 0;
252 	nvmeq->cq_phase = 1;
253 	nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
254 	nvmeq->q_depth = depth;
255 	nvmeq->qid = qid;
256 	dev->queue_count++;
257 	dev->queues[qid] = nvmeq;
258 
259 	return nvmeq;
260 
261  free_queue:
262 	free((void *)nvmeq->cqes);
263  free_nvmeq:
264 	free(nvmeq);
265 
266 	return NULL;
267 }
268 
269 static int nvme_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
270 {
271 	struct nvme_command c;
272 
273 	memset(&c, 0, sizeof(c));
274 	c.delete_queue.opcode = opcode;
275 	c.delete_queue.qid = cpu_to_le16(id);
276 
277 	return nvme_submit_admin_cmd(dev, &c, NULL);
278 }
279 
280 static int nvme_delete_sq(struct nvme_dev *dev, u16 sqid)
281 {
282 	return nvme_delete_queue(dev, nvme_admin_delete_sq, sqid);
283 }
284 
285 static int nvme_delete_cq(struct nvme_dev *dev, u16 cqid)
286 {
287 	return nvme_delete_queue(dev, nvme_admin_delete_cq, cqid);
288 }
289 
290 static int nvme_enable_ctrl(struct nvme_dev *dev)
291 {
292 	dev->ctrl_config &= ~NVME_CC_SHN_MASK;
293 	dev->ctrl_config |= NVME_CC_ENABLE;
294 	writel(cpu_to_le32(dev->ctrl_config), &dev->bar->cc);
295 
296 	return nvme_wait_ready(dev, true);
297 }
298 
299 static int nvme_disable_ctrl(struct nvme_dev *dev)
300 {
301 	dev->ctrl_config &= ~NVME_CC_SHN_MASK;
302 	dev->ctrl_config &= ~NVME_CC_ENABLE;
303 	writel(cpu_to_le32(dev->ctrl_config), &dev->bar->cc);
304 
305 	return nvme_wait_ready(dev, false);
306 }
307 
308 static void nvme_free_queue(struct nvme_queue *nvmeq)
309 {
310 	free((void *)nvmeq->cqes);
311 	free(nvmeq->sq_cmds);
312 	free(nvmeq);
313 }
314 
315 static void nvme_free_queues(struct nvme_dev *dev, int lowest)
316 {
317 	int i;
318 
319 	for (i = dev->queue_count - 1; i >= lowest; i--) {
320 		struct nvme_queue *nvmeq = dev->queues[i];
321 		dev->queue_count--;
322 		dev->queues[i] = NULL;
323 		nvme_free_queue(nvmeq);
324 	}
325 }
326 
327 static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
328 {
329 	struct nvme_dev *dev = nvmeq->dev;
330 
331 	nvmeq->sq_tail = 0;
332 	nvmeq->cq_head = 0;
333 	nvmeq->cq_phase = 1;
334 	nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
335 	memset((void *)nvmeq->cqes, 0, NVME_CQ_SIZE(nvmeq->q_depth));
336 	flush_dcache_range((ulong)nvmeq->cqes,
337 			   (ulong)nvmeq->cqes + NVME_CQ_SIZE(nvmeq->q_depth));
338 	dev->online_queues++;
339 }
340 
341 static int nvme_configure_admin_queue(struct nvme_dev *dev)
342 {
343 	int result;
344 	u32 aqa;
345 	u64 cap = dev->cap;
346 	struct nvme_queue *nvmeq;
347 	/* most architectures use 4KB as the page size */
348 	unsigned page_shift = 12;
349 	unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12;
350 	unsigned dev_page_max = NVME_CAP_MPSMAX(cap) + 12;
351 
352 	if (page_shift < dev_page_min) {
353 		debug("Device minimum page size (%u) too large for host (%u)\n",
354 		      1 << dev_page_min, 1 << page_shift);
355 		return -ENODEV;
356 	}
357 
358 	if (page_shift > dev_page_max) {
359 		debug("Device maximum page size (%u) smaller than host (%u)\n",
360 		      1 << dev_page_max, 1 << page_shift);
361 		page_shift = dev_page_max;
362 	}
363 
364 	result = nvme_disable_ctrl(dev);
365 	if (result < 0)
366 		return result;
367 
368 	nvmeq = dev->queues[NVME_ADMIN_Q];
369 	if (!nvmeq) {
370 		nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH);
371 		if (!nvmeq)
372 			return -ENOMEM;
373 	}
374 
375 	aqa = nvmeq->q_depth - 1;
376 	aqa |= aqa << 16;
377 	aqa |= aqa << 16;
378 
379 	dev->page_size = 1 << page_shift;
380 
381 	dev->ctrl_config = NVME_CC_CSS_NVM;
382 	dev->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT;
383 	dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
384 	dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
385 
386 	writel(aqa, &dev->bar->aqa);
387 	nvme_writeq((ulong)nvmeq->sq_cmds, &dev->bar->asq);
388 	nvme_writeq((ulong)nvmeq->cqes, &dev->bar->acq);
389 
390 	result = nvme_enable_ctrl(dev);
391 	if (result)
392 		goto free_nvmeq;
393 
394 	nvmeq->cq_vector = 0;
395 
396 	nvme_init_queue(dev->queues[NVME_ADMIN_Q], 0);
397 
398 	return result;
399 
400  free_nvmeq:
401 	nvme_free_queues(dev, 0);
402 
403 	return result;
404 }
405 
406 static int nvme_alloc_cq(struct nvme_dev *dev, u16 qid,
407 			    struct nvme_queue *nvmeq)
408 {
409 	struct nvme_command c;
410 	int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
411 
412 	memset(&c, 0, sizeof(c));
413 	c.create_cq.opcode = nvme_admin_create_cq;
414 	c.create_cq.prp1 = cpu_to_le64((ulong)nvmeq->cqes);
415 	c.create_cq.cqid = cpu_to_le16(qid);
416 	c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
417 	c.create_cq.cq_flags = cpu_to_le16(flags);
418 	c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);
419 
420 	return nvme_submit_admin_cmd(dev, &c, NULL);
421 }
422 
423 static int nvme_alloc_sq(struct nvme_dev *dev, u16 qid,
424 			    struct nvme_queue *nvmeq)
425 {
426 	struct nvme_command c;
427 	int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM;
428 
429 	memset(&c, 0, sizeof(c));
430 	c.create_sq.opcode = nvme_admin_create_sq;
431 	c.create_sq.prp1 = cpu_to_le64((ulong)nvmeq->sq_cmds);
432 	c.create_sq.sqid = cpu_to_le16(qid);
433 	c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
434 	c.create_sq.sq_flags = cpu_to_le16(flags);
435 	c.create_sq.cqid = cpu_to_le16(qid);
436 
437 	return nvme_submit_admin_cmd(dev, &c, NULL);
438 }
439 
440 int nvme_identify(struct nvme_dev *dev, unsigned nsid,
441 		  unsigned cns, dma_addr_t dma_addr)
442 {
443 	struct nvme_command c;
444 	u32 page_size = dev->page_size;
445 	int offset = dma_addr & (page_size - 1);
446 	int length = sizeof(struct nvme_id_ctrl);
447 	int ret;
448 
449 	memset(&c, 0, sizeof(c));
450 	c.identify.opcode = nvme_admin_identify;
451 	c.identify.nsid = cpu_to_le32(nsid);
452 	c.identify.prp1 = cpu_to_le64(dma_addr);
453 
454 	length -= (page_size - offset);
455 	if (length <= 0) {
456 		c.identify.prp2 = 0;
457 	} else {
458 		dma_addr += (page_size - offset);
459 		c.identify.prp2 = cpu_to_le64(dma_addr);
460 	}
461 
462 	c.identify.cns = cpu_to_le32(cns);
463 
464 	invalidate_dcache_range(dma_addr,
465 				dma_addr + sizeof(struct nvme_id_ctrl));
466 
467 	ret = nvme_submit_admin_cmd(dev, &c, NULL);
468 	if (!ret)
469 		invalidate_dcache_range(dma_addr,
470 					dma_addr + sizeof(struct nvme_id_ctrl));
471 
472 	return ret;
473 }
474 
475 int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
476 		      dma_addr_t dma_addr, u32 *result)
477 {
478 	struct nvme_command c;
479 
480 	memset(&c, 0, sizeof(c));
481 	c.features.opcode = nvme_admin_get_features;
482 	c.features.nsid = cpu_to_le32(nsid);
483 	c.features.prp1 = cpu_to_le64(dma_addr);
484 	c.features.fid = cpu_to_le32(fid);
485 
486 	/*
487 	 * TODO: add cache invalidate operation when the size of
488 	 * the DMA buffer is known
489 	 */
490 
491 	return nvme_submit_admin_cmd(dev, &c, result);
492 }
493 
494 int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
495 		      dma_addr_t dma_addr, u32 *result)
496 {
497 	struct nvme_command c;
498 
499 	memset(&c, 0, sizeof(c));
500 	c.features.opcode = nvme_admin_set_features;
501 	c.features.prp1 = cpu_to_le64(dma_addr);
502 	c.features.fid = cpu_to_le32(fid);
503 	c.features.dword11 = cpu_to_le32(dword11);
504 
505 	/*
506 	 * TODO: add cache flush operation when the size of
507 	 * the DMA buffer is known
508 	 */
509 
510 	return nvme_submit_admin_cmd(dev, &c, result);
511 }
512 
513 static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
514 {
515 	struct nvme_dev *dev = nvmeq->dev;
516 	int result;
517 
518 	nvmeq->cq_vector = qid - 1;
519 	result = nvme_alloc_cq(dev, qid, nvmeq);
520 	if (result < 0)
521 		goto release_cq;
522 
523 	result = nvme_alloc_sq(dev, qid, nvmeq);
524 	if (result < 0)
525 		goto release_sq;
526 
527 	nvme_init_queue(nvmeq, qid);
528 
529 	return result;
530 
531  release_sq:
532 	nvme_delete_sq(dev, qid);
533  release_cq:
534 	nvme_delete_cq(dev, qid);
535 
536 	return result;
537 }
538 
539 static int nvme_set_queue_count(struct nvme_dev *dev, int count)
540 {
541 	int status;
542 	u32 result;
543 	u32 q_count = (count - 1) | ((count - 1) << 16);
544 
545 	status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES,
546 			q_count, 0, &result);
547 
548 	if (status < 0)
549 		return status;
550 	if (status > 1)
551 		return 0;
552 
553 	return min(result & 0xffff, result >> 16) + 1;
554 }
555 
556 static void nvme_create_io_queues(struct nvme_dev *dev)
557 {
558 	unsigned int i;
559 
560 	for (i = dev->queue_count; i <= dev->max_qid; i++)
561 		if (!nvme_alloc_queue(dev, i, dev->q_depth))
562 			break;
563 
564 	for (i = dev->online_queues; i <= dev->queue_count - 1; i++)
565 		if (nvme_create_queue(dev->queues[i], i))
566 			break;
567 }
568 
569 static int nvme_setup_io_queues(struct nvme_dev *dev)
570 {
571 	int nr_io_queues;
572 	int result;
573 
574 	nr_io_queues = 1;
575 	result = nvme_set_queue_count(dev, nr_io_queues);
576 	if (result <= 0)
577 		return result;
578 
579 	dev->max_qid = nr_io_queues;
580 
581 	/* Free previously allocated queues */
582 	nvme_free_queues(dev, nr_io_queues + 1);
583 	nvme_create_io_queues(dev);
584 
585 	return 0;
586 }
587 
588 static int nvme_get_info_from_identify(struct nvme_dev *dev)
589 {
590 	struct nvme_id_ctrl *ctrl;
591 	int ret;
592 	int shift = NVME_CAP_MPSMIN(dev->cap) + 12;
593 
594 	ctrl = memalign(dev->page_size, sizeof(struct nvme_id_ctrl));
595 	if (!ctrl)
596 		return -ENOMEM;
597 
598 	ret = nvme_identify(dev, 0, 1, (dma_addr_t)(long)ctrl);
599 	if (ret) {
600 		free(ctrl);
601 		return -EIO;
602 	}
603 
604 	dev->nn = le32_to_cpu(ctrl->nn);
605 	dev->vwc = ctrl->vwc;
606 	memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
607 	memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
608 	memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
609 	if (ctrl->mdts)
610 		dev->max_transfer_shift = (ctrl->mdts + shift);
611 	else {
612 		/*
613 		 * Maximum Data Transfer Size (MDTS) field indicates the maximum
614 		 * data transfer size between the host and the controller. The
615 		 * host should not submit a command that exceeds this transfer
616 		 * size. The value is in units of the minimum memory page size
617 		 * and is reported as a power of two (2^n).
618 		 *
619 		 * The spec also says: a value of 0h indicates no restrictions
620 		 * on transfer size. But in nvme_blk_read/write() below we have
621 		 * the following algorithm for maximum number of logic blocks
622 		 * per transfer:
623 		 *
624 		 * u16 lbas = 1 << (dev->max_transfer_shift - ns->lba_shift);
625 		 *
626 		 * In order for lbas not to overflow, the maximum number is 15
627 		 * which means dev->max_transfer_shift = 15 + 9 (ns->lba_shift).
628 		 * Let's use 20 which provides 1MB size.
629 		 */
630 		dev->max_transfer_shift = 20;
631 	}
632 
633 	free(ctrl);
634 	return 0;
635 }
636 
637 int nvme_get_namespace_id(struct udevice *udev, u32 *ns_id, u8 *eui64)
638 {
639 	struct nvme_ns *ns = dev_get_priv(udev);
640 
641 	if (ns_id)
642 		*ns_id = ns->ns_id;
643 	if (eui64)
644 		memcpy(eui64, ns->eui64, sizeof(ns->eui64));
645 
646 	return 0;
647 }
648 
649 int nvme_scan_namespace(void)
650 {
651 	struct uclass *uc;
652 	struct udevice *dev;
653 	int ret;
654 
655 	ret = uclass_get(UCLASS_NVME, &uc);
656 	if (ret)
657 		return ret;
658 
659 	uclass_foreach_dev(dev, uc) {
660 		ret = device_probe(dev);
661 		if (ret)
662 			return ret;
663 	}
664 
665 	return 0;
666 }
667 
668 static int nvme_blk_probe(struct udevice *udev)
669 {
670 	struct nvme_dev *ndev = dev_get_priv(udev->parent);
671 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
672 	struct nvme_ns *ns = dev_get_priv(udev);
673 	u8 flbas;
674 	struct pci_child_platdata *pplat;
675 	struct nvme_id_ns *id;
676 
677 	id = memalign(ndev->page_size, sizeof(struct nvme_id_ns));
678 	if (!id)
679 		return -ENOMEM;
680 
681 	memset(ns, 0, sizeof(*ns));
682 	ns->dev = ndev;
683 	/* extract the namespace id from the block device name */
684 	ns->ns_id = trailing_strtol(udev->name) + 1;
685 	if (nvme_identify(ndev, ns->ns_id, 0, (dma_addr_t)(long)id)) {
686 		free(id);
687 		return -EIO;
688 	}
689 
690 	memcpy(&ns->eui64, &id->eui64, sizeof(id->eui64));
691 	flbas = id->flbas & NVME_NS_FLBAS_LBA_MASK;
692 	ns->flbas = flbas;
693 	ns->lba_shift = id->lbaf[flbas].ds;
694 	ns->mode_select_num_blocks = le64_to_cpu(id->nsze);
695 	ns->mode_select_block_len = 1 << ns->lba_shift;
696 	list_add(&ns->list, &ndev->namespaces);
697 
698 	desc->lba = ns->mode_select_num_blocks;
699 	desc->log2blksz = ns->lba_shift;
700 	desc->blksz = 1 << ns->lba_shift;
701 	desc->bdev = udev;
702 	pplat = dev_get_parent_platdata(udev->parent);
703 	sprintf(desc->vendor, "0x%.4x", pplat->vendor);
704 	memcpy(desc->product, ndev->serial, sizeof(ndev->serial));
705 	memcpy(desc->revision, ndev->firmware_rev, sizeof(ndev->firmware_rev));
706 	part_init(desc);
707 
708 	free(id);
709 	return 0;
710 }
711 
712 static ulong nvme_blk_rw(struct udevice *udev, lbaint_t blknr,
713 			 lbaint_t blkcnt, void *buffer, bool read)
714 {
715 	struct nvme_ns *ns = dev_get_priv(udev);
716 	struct nvme_dev *dev = ns->dev;
717 	struct nvme_command c;
718 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
719 	int status;
720 	u64 prp2;
721 	u64 total_len = blkcnt << desc->log2blksz;
722 	u64 temp_len = total_len;
723 
724 	u64 slba = blknr;
725 	u16 lbas = 1 << (dev->max_transfer_shift - ns->lba_shift);
726 	u64 total_lbas = blkcnt;
727 
728 	flush_dcache_range((unsigned long)buffer,
729 			   (unsigned long)buffer + total_len);
730 
731 	c.rw.opcode = read ? nvme_cmd_read : nvme_cmd_write;
732 	c.rw.flags = 0;
733 	c.rw.nsid = cpu_to_le32(ns->ns_id);
734 	c.rw.control = 0;
735 	c.rw.dsmgmt = 0;
736 	c.rw.reftag = 0;
737 	c.rw.apptag = 0;
738 	c.rw.appmask = 0;
739 	c.rw.metadata = 0;
740 
741 	while (total_lbas) {
742 		if (total_lbas < lbas) {
743 			lbas = (u16)total_lbas;
744 			total_lbas = 0;
745 		} else {
746 			total_lbas -= lbas;
747 		}
748 
749 		if (nvme_setup_prps(dev, &prp2,
750 				    lbas << ns->lba_shift, (ulong)buffer))
751 			return -EIO;
752 		c.rw.slba = cpu_to_le64(slba);
753 		slba += lbas;
754 		c.rw.length = cpu_to_le16(lbas - 1);
755 		c.rw.prp1 = cpu_to_le64((ulong)buffer);
756 		c.rw.prp2 = cpu_to_le64(prp2);
757 		status = nvme_submit_sync_cmd(dev->queues[NVME_IO_Q],
758 				&c, NULL, IO_TIMEOUT);
759 		if (status)
760 			break;
761 		temp_len -= (u32)lbas << ns->lba_shift;
762 		buffer += lbas << ns->lba_shift;
763 	}
764 
765 	if (read)
766 		invalidate_dcache_range((unsigned long)buffer,
767 					(unsigned long)buffer + total_len);
768 
769 	return (total_len - temp_len) >> desc->log2blksz;
770 }
771 
772 static ulong nvme_blk_read(struct udevice *udev, lbaint_t blknr,
773 			   lbaint_t blkcnt, void *buffer)
774 {
775 	return nvme_blk_rw(udev, blknr, blkcnt, buffer, true);
776 }
777 
778 static ulong nvme_blk_write(struct udevice *udev, lbaint_t blknr,
779 			    lbaint_t blkcnt, const void *buffer)
780 {
781 	return nvme_blk_rw(udev, blknr, blkcnt, (void *)buffer, false);
782 }
783 
784 static const struct blk_ops nvme_blk_ops = {
785 	.read	= nvme_blk_read,
786 	.write	= nvme_blk_write,
787 };
788 
789 U_BOOT_DRIVER(nvme_blk) = {
790 	.name	= "nvme-blk",
791 	.id	= UCLASS_BLK,
792 	.probe	= nvme_blk_probe,
793 	.ops	= &nvme_blk_ops,
794 	.priv_auto_alloc_size = sizeof(struct nvme_ns),
795 };
796 
797 static int nvme_bind(struct udevice *udev)
798 {
799 	static int ndev_num;
800 	char name[20];
801 
802 	sprintf(name, "nvme#%d", ndev_num++);
803 
804 	return device_set_name(udev, name);
805 }
806 
807 static int nvme_probe(struct udevice *udev)
808 {
809 	int ret;
810 	struct nvme_dev *ndev = dev_get_priv(udev);
811 
812 	ndev->instance = trailing_strtol(udev->name);
813 
814 	INIT_LIST_HEAD(&ndev->namespaces);
815 	ndev->bar = dm_pci_map_bar(udev, PCI_BASE_ADDRESS_0,
816 			PCI_REGION_MEM);
817 	if (readl(&ndev->bar->csts) == -1) {
818 		ret = -ENODEV;
819 		printf("Error: %s: Out of memory!\n", udev->name);
820 		goto free_nvme;
821 	}
822 
823 	ndev->queues = malloc(NVME_Q_NUM * sizeof(struct nvme_queue *));
824 	if (!ndev->queues) {
825 		ret = -ENOMEM;
826 		printf("Error: %s: Out of memory!\n", udev->name);
827 		goto free_nvme;
828 	}
829 	memset(ndev->queues, 0, NVME_Q_NUM * sizeof(struct nvme_queue *));
830 
831 	ndev->cap = nvme_readq(&ndev->bar->cap);
832 	ndev->q_depth = min_t(int, NVME_CAP_MQES(ndev->cap) + 1, NVME_Q_DEPTH);
833 	ndev->db_stride = 1 << NVME_CAP_STRIDE(ndev->cap);
834 	ndev->dbs = ((void __iomem *)ndev->bar) + 4096;
835 
836 	ret = nvme_configure_admin_queue(ndev);
837 	if (ret)
838 		goto free_queue;
839 
840 	/* Allocate after the page size is known */
841 	ndev->prp_pool = memalign(ndev->page_size, MAX_PRP_POOL);
842 	if (!ndev->prp_pool) {
843 		ret = -ENOMEM;
844 		printf("Error: %s: Out of memory!\n", udev->name);
845 		goto free_nvme;
846 	}
847 	ndev->prp_entry_num = MAX_PRP_POOL >> 3;
848 
849 	ret = nvme_setup_io_queues(ndev);
850 	if (ret)
851 		goto free_queue;
852 
853 	nvme_get_info_from_identify(ndev);
854 
855 	return 0;
856 
857 free_queue:
858 	free((void *)ndev->queues);
859 free_nvme:
860 	return ret;
861 }
862 
863 U_BOOT_DRIVER(nvme) = {
864 	.name	= "nvme",
865 	.id	= UCLASS_NVME,
866 	.bind	= nvme_bind,
867 	.probe	= nvme_probe,
868 	.priv_auto_alloc_size = sizeof(struct nvme_dev),
869 };
870 
871 struct pci_device_id nvme_supported[] = {
872 	{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, ~0) },
873 	{}
874 };
875 
876 U_BOOT_PCI_DEVICE(nvme, nvme_supported);
877