xref: /rk3399_rockchip-uboot/drivers/nvme/nvme.c (revision 51341de713beb7205186c7e12b9cf7c38b5faf8a)
1982388eaSZhikang Zhang /*
2982388eaSZhikang Zhang  * Copyright (C) 2017 NXP Semiconductors
3982388eaSZhikang Zhang  * Copyright (C) 2017 Bin Meng <bmeng.cn@gmail.com>
4982388eaSZhikang Zhang  *
5982388eaSZhikang Zhang  * SPDX-License-Identifier:	GPL-2.0+
6982388eaSZhikang Zhang  */
7982388eaSZhikang Zhang 
8982388eaSZhikang Zhang #include <common.h>
9d22a8e88SJon Lin #include <bouncebuf.h>
10982388eaSZhikang Zhang #include <dm.h>
11982388eaSZhikang Zhang #include <errno.h>
12982388eaSZhikang Zhang #include <memalign.h>
13982388eaSZhikang Zhang #include <pci.h>
14982388eaSZhikang Zhang #include <dm/device-internal.h>
15982388eaSZhikang Zhang #include "nvme.h"
16982388eaSZhikang Zhang 
17982388eaSZhikang Zhang #define NVME_Q_DEPTH		2
18982388eaSZhikang Zhang #define NVME_AQ_DEPTH		2
19982388eaSZhikang Zhang #define NVME_SQ_SIZE(depth)	(depth * sizeof(struct nvme_command))
20982388eaSZhikang Zhang #define NVME_CQ_SIZE(depth)	(depth * sizeof(struct nvme_completion))
211a11099dSAndre Przywara #define NVME_CQ_ALLOCATION	ALIGN(NVME_CQ_SIZE(NVME_Q_DEPTH), \
221a11099dSAndre Przywara 				      ARCH_DMA_MINALIGN)
23982388eaSZhikang Zhang #define ADMIN_TIMEOUT		60
24982388eaSZhikang Zhang #define IO_TIMEOUT		30
25982388eaSZhikang Zhang #define MAX_PRP_POOL		512
26982388eaSZhikang Zhang 
27722e668dSBin Meng enum nvme_queue_id {
28722e668dSBin Meng 	NVME_ADMIN_Q,
29722e668dSBin Meng 	NVME_IO_Q,
30722e668dSBin Meng 	NVME_Q_NUM,
31722e668dSBin Meng };
32722e668dSBin Meng 
33982388eaSZhikang Zhang /*
34982388eaSZhikang Zhang  * An NVM Express queue. Each device has at least two (one for admin
35982388eaSZhikang Zhang  * commands and one for I/O commands).
36982388eaSZhikang Zhang  */
37982388eaSZhikang Zhang struct nvme_queue {
38982388eaSZhikang Zhang 	struct nvme_dev *dev;
39982388eaSZhikang Zhang 	struct nvme_command *sq_cmds;
40982388eaSZhikang Zhang 	struct nvme_completion *cqes;
41982388eaSZhikang Zhang 	wait_queue_head_t sq_full;
42982388eaSZhikang Zhang 	u32 __iomem *q_db;
43982388eaSZhikang Zhang 	u16 q_depth;
44982388eaSZhikang Zhang 	s16 cq_vector;
45982388eaSZhikang Zhang 	u16 sq_head;
46982388eaSZhikang Zhang 	u16 sq_tail;
47982388eaSZhikang Zhang 	u16 cq_head;
48982388eaSZhikang Zhang 	u16 qid;
49982388eaSZhikang Zhang 	u8 cq_phase;
50982388eaSZhikang Zhang 	u8 cqe_seen;
51982388eaSZhikang Zhang 	unsigned long cmdid_data[];
52982388eaSZhikang Zhang };
53982388eaSZhikang Zhang 
nvme_wait_ready(struct nvme_dev * dev,bool enabled)54982388eaSZhikang Zhang static int nvme_wait_ready(struct nvme_dev *dev, bool enabled)
55982388eaSZhikang Zhang {
56982388eaSZhikang Zhang 	u32 bit = enabled ? NVME_CSTS_RDY : 0;
5704d2a384SBin Meng 	int timeout;
5804d2a384SBin Meng 	ulong start;
59982388eaSZhikang Zhang 
6004d2a384SBin Meng 	/* Timeout field in the CAP register is in 500 millisecond units */
6104d2a384SBin Meng 	timeout = NVME_CAP_TIMEOUT(dev->cap) * 500;
62982388eaSZhikang Zhang 
6304d2a384SBin Meng 	start = get_timer(0);
6404d2a384SBin Meng 	while (get_timer(start) < timeout) {
6504d2a384SBin Meng 		if ((readl(&dev->bar->csts) & NVME_CSTS_RDY) == bit)
66982388eaSZhikang Zhang 			return 0;
67982388eaSZhikang Zhang 	}
68982388eaSZhikang Zhang 
6904d2a384SBin Meng 	return -ETIME;
7004d2a384SBin Meng }
7104d2a384SBin Meng 
nvme_setup_prps(struct nvme_dev * dev,u64 * prp2,int total_len,u64 dma_addr)72982388eaSZhikang Zhang static int nvme_setup_prps(struct nvme_dev *dev, u64 *prp2,
73982388eaSZhikang Zhang 			   int total_len, u64 dma_addr)
74982388eaSZhikang Zhang {
75982388eaSZhikang Zhang 	u32 page_size = dev->page_size;
76982388eaSZhikang Zhang 	int offset = dma_addr & (page_size - 1);
77982388eaSZhikang Zhang 	u64 *prp_pool;
78982388eaSZhikang Zhang 	int length = total_len;
79982388eaSZhikang Zhang 	int i, nprps;
8017d24daaSWesley Sheng 	u32 prps_per_page = page_size >> 3;
8117fa3169SAaron Williams 	u32 num_pages;
8217fa3169SAaron Williams 
83982388eaSZhikang Zhang 	length -= (page_size - offset);
84982388eaSZhikang Zhang 
85982388eaSZhikang Zhang 	if (length <= 0) {
86982388eaSZhikang Zhang 		*prp2 = 0;
87982388eaSZhikang Zhang 		return 0;
88982388eaSZhikang Zhang 	}
89982388eaSZhikang Zhang 
90982388eaSZhikang Zhang 	if (length)
91982388eaSZhikang Zhang 		dma_addr += (page_size - offset);
92982388eaSZhikang Zhang 
93982388eaSZhikang Zhang 	if (length <= page_size) {
94982388eaSZhikang Zhang 		*prp2 = dma_addr;
95982388eaSZhikang Zhang 		return 0;
96982388eaSZhikang Zhang 	}
97982388eaSZhikang Zhang 
98982388eaSZhikang Zhang 	nprps = DIV_ROUND_UP(length, page_size);
997ee2a044SJon Lin 	num_pages = DIV_ROUND_UP(nprps + 1, prps_per_page);
100982388eaSZhikang Zhang 
101982388eaSZhikang Zhang 	if (nprps > dev->prp_entry_num) {
102982388eaSZhikang Zhang 		free(dev->prp_pool);
10317fa3169SAaron Williams 		/*
10417fa3169SAaron Williams 		 * Always increase in increments of pages.  It doesn't waste
10517fa3169SAaron Williams 		 * much memory and reduces the number of allocations.
10617fa3169SAaron Williams 		 */
10717fa3169SAaron Williams 		dev->prp_pool = memalign(page_size, num_pages * page_size);
108982388eaSZhikang Zhang 		if (!dev->prp_pool) {
109982388eaSZhikang Zhang 			printf("Error: malloc prp_pool fail\n");
110982388eaSZhikang Zhang 			return -ENOMEM;
111982388eaSZhikang Zhang 		}
11217fa3169SAaron Williams 		dev->prp_entry_num = prps_per_page * num_pages;
113982388eaSZhikang Zhang 	}
114982388eaSZhikang Zhang 
115982388eaSZhikang Zhang 	prp_pool = dev->prp_pool;
116982388eaSZhikang Zhang 	i = 0;
117982388eaSZhikang Zhang 	while (nprps) {
1187ee2a044SJon Lin 		if (i == prps_per_page) {
1197ee2a044SJon Lin 			*(prp_pool + i) = *(prp_pool + i - 1);
1207ee2a044SJon Lin 			*(prp_pool + i - 1) = cpu_to_le64((ulong)prp_pool +
121982388eaSZhikang Zhang 					page_size);
1227ee2a044SJon Lin 			i = 1;
123982388eaSZhikang Zhang 			prp_pool += page_size;
124982388eaSZhikang Zhang 		}
125982388eaSZhikang Zhang 		*(prp_pool + i++) = cpu_to_le64(dma_addr);
126982388eaSZhikang Zhang 		dma_addr += page_size;
127982388eaSZhikang Zhang 		nprps--;
128982388eaSZhikang Zhang 	}
129982388eaSZhikang Zhang 	*prp2 = (ulong)dev->prp_pool;
130982388eaSZhikang Zhang 
131c5dc95c8SPatrick Wildt 	flush_dcache_range((ulong)dev->prp_pool, (ulong)dev->prp_pool +
132c5dc95c8SPatrick Wildt 			   dev->prp_entry_num * sizeof(u64));
133c5dc95c8SPatrick Wildt 
134982388eaSZhikang Zhang 	return 0;
135982388eaSZhikang Zhang }
136982388eaSZhikang Zhang 
nvme_get_cmd_id(void)137982388eaSZhikang Zhang static __le16 nvme_get_cmd_id(void)
138982388eaSZhikang Zhang {
139982388eaSZhikang Zhang 	static unsigned short cmdid;
140982388eaSZhikang Zhang 
141982388eaSZhikang Zhang 	return cpu_to_le16((cmdid < USHRT_MAX) ? cmdid++ : 0);
142982388eaSZhikang Zhang }
143982388eaSZhikang Zhang 
nvme_read_completion_status(struct nvme_queue * nvmeq,u16 index)144982388eaSZhikang Zhang static u16 nvme_read_completion_status(struct nvme_queue *nvmeq, u16 index)
145982388eaSZhikang Zhang {
1461a11099dSAndre Przywara 	/*
1471a11099dSAndre Przywara 	 * Single CQ entries are always smaller than a cache line, so we
1481a11099dSAndre Przywara 	 * can't invalidate them individually. However CQ entries are
1491a11099dSAndre Przywara 	 * read only by the CPU, so it's safe to always invalidate all of them,
1501a11099dSAndre Przywara 	 * as the cache line should never become dirty.
1511a11099dSAndre Przywara 	 */
1521a11099dSAndre Przywara 	ulong start = (ulong)&nvmeq->cqes[0];
1531a11099dSAndre Przywara 	ulong stop = start + NVME_CQ_ALLOCATION;
154982388eaSZhikang Zhang 
155982388eaSZhikang Zhang 	invalidate_dcache_range(start, stop);
156982388eaSZhikang Zhang 
15737e08029SDavid Lamparter 	return readw(&(nvmeq->cqes[index].status));
158982388eaSZhikang Zhang }
159982388eaSZhikang Zhang 
160982388eaSZhikang Zhang /**
161982388eaSZhikang Zhang  * nvme_submit_cmd() - copy a command into a queue and ring the doorbell
162982388eaSZhikang Zhang  *
163982388eaSZhikang Zhang  * @nvmeq:	The queue to use
164982388eaSZhikang Zhang  * @cmd:	The command to send
165982388eaSZhikang Zhang  */
nvme_submit_cmd(struct nvme_queue * nvmeq,struct nvme_command * cmd)166982388eaSZhikang Zhang static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
167982388eaSZhikang Zhang {
168982388eaSZhikang Zhang 	u16 tail = nvmeq->sq_tail;
169982388eaSZhikang Zhang 
170982388eaSZhikang Zhang 	memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
171982388eaSZhikang Zhang 	flush_dcache_range((ulong)&nvmeq->sq_cmds[tail],
172982388eaSZhikang Zhang 			   (ulong)&nvmeq->sq_cmds[tail] + sizeof(*cmd));
173982388eaSZhikang Zhang 
174982388eaSZhikang Zhang 	if (++tail == nvmeq->q_depth)
175982388eaSZhikang Zhang 		tail = 0;
176982388eaSZhikang Zhang 	writel(tail, nvmeq->q_db);
177982388eaSZhikang Zhang 	nvmeq->sq_tail = tail;
178982388eaSZhikang Zhang }
179982388eaSZhikang Zhang 
nvme_submit_sync_cmd(struct nvme_queue * nvmeq,struct nvme_command * cmd,u32 * result,unsigned timeout)180982388eaSZhikang Zhang static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq,
181982388eaSZhikang Zhang 				struct nvme_command *cmd,
182982388eaSZhikang Zhang 				u32 *result, unsigned timeout)
183982388eaSZhikang Zhang {
184982388eaSZhikang Zhang 	u16 head = nvmeq->cq_head;
185982388eaSZhikang Zhang 	u16 phase = nvmeq->cq_phase;
186982388eaSZhikang Zhang 	u16 status;
187982388eaSZhikang Zhang 	ulong start_time;
188982388eaSZhikang Zhang 	ulong timeout_us = timeout * 100000;
189982388eaSZhikang Zhang 
190982388eaSZhikang Zhang 	cmd->common.command_id = nvme_get_cmd_id();
191982388eaSZhikang Zhang 	nvme_submit_cmd(nvmeq, cmd);
192982388eaSZhikang Zhang 
193982388eaSZhikang Zhang 	start_time = timer_get_us();
194982388eaSZhikang Zhang 
195982388eaSZhikang Zhang 	for (;;) {
196982388eaSZhikang Zhang 		status = nvme_read_completion_status(nvmeq, head);
197982388eaSZhikang Zhang 		if ((status & 0x01) == phase)
198982388eaSZhikang Zhang 			break;
199982388eaSZhikang Zhang 		if (timeout_us > 0 && (timer_get_us() - start_time)
200982388eaSZhikang Zhang 		    >= timeout_us)
201982388eaSZhikang Zhang 			return -ETIMEDOUT;
202982388eaSZhikang Zhang 	}
203982388eaSZhikang Zhang 
204982388eaSZhikang Zhang 	status >>= 1;
205982388eaSZhikang Zhang 	if (status) {
206982388eaSZhikang Zhang 		printf("ERROR: status = %x, phase = %d, head = %d\n",
207982388eaSZhikang Zhang 		       status, phase, head);
208982388eaSZhikang Zhang 		status = 0;
209982388eaSZhikang Zhang 		if (++head == nvmeq->q_depth) {
210982388eaSZhikang Zhang 			head = 0;
211982388eaSZhikang Zhang 			phase = !phase;
212982388eaSZhikang Zhang 		}
213982388eaSZhikang Zhang 		writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
214982388eaSZhikang Zhang 		nvmeq->cq_head = head;
215982388eaSZhikang Zhang 		nvmeq->cq_phase = phase;
216982388eaSZhikang Zhang 
217982388eaSZhikang Zhang 		return -EIO;
218982388eaSZhikang Zhang 	}
219982388eaSZhikang Zhang 
220982388eaSZhikang Zhang 	if (result)
22137e08029SDavid Lamparter 		*result = readl(&(nvmeq->cqes[head].result));
222982388eaSZhikang Zhang 
223982388eaSZhikang Zhang 	if (++head == nvmeq->q_depth) {
224982388eaSZhikang Zhang 		head = 0;
225982388eaSZhikang Zhang 		phase = !phase;
226982388eaSZhikang Zhang 	}
227982388eaSZhikang Zhang 	writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
228982388eaSZhikang Zhang 	nvmeq->cq_head = head;
229982388eaSZhikang Zhang 	nvmeq->cq_phase = phase;
230982388eaSZhikang Zhang 
231982388eaSZhikang Zhang 	return status;
232982388eaSZhikang Zhang }
233982388eaSZhikang Zhang 
nvme_submit_admin_cmd(struct nvme_dev * dev,struct nvme_command * cmd,u32 * result)234982388eaSZhikang Zhang static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
235982388eaSZhikang Zhang 				 u32 *result)
236982388eaSZhikang Zhang {
237722e668dSBin Meng 	return nvme_submit_sync_cmd(dev->queues[NVME_ADMIN_Q], cmd,
238722e668dSBin Meng 				    result, ADMIN_TIMEOUT);
239982388eaSZhikang Zhang }
240982388eaSZhikang Zhang 
nvme_alloc_queue(struct nvme_dev * dev,int qid,int depth)241982388eaSZhikang Zhang static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev,
242982388eaSZhikang Zhang 					   int qid, int depth)
243982388eaSZhikang Zhang {
244982388eaSZhikang Zhang 	struct nvme_queue *nvmeq = malloc(sizeof(*nvmeq));
245982388eaSZhikang Zhang 	if (!nvmeq)
246982388eaSZhikang Zhang 		return NULL;
247982388eaSZhikang Zhang 	memset(nvmeq, 0, sizeof(*nvmeq));
248982388eaSZhikang Zhang 
2491a11099dSAndre Przywara 	nvmeq->cqes = (void *)memalign(4096, NVME_CQ_ALLOCATION);
250982388eaSZhikang Zhang 	if (!nvmeq->cqes)
251982388eaSZhikang Zhang 		goto free_nvmeq;
252982388eaSZhikang Zhang 	memset((void *)nvmeq->cqes, 0, NVME_CQ_SIZE(depth));
253982388eaSZhikang Zhang 
254982388eaSZhikang Zhang 	nvmeq->sq_cmds = (void *)memalign(4096, NVME_SQ_SIZE(depth));
255982388eaSZhikang Zhang 	if (!nvmeq->sq_cmds)
256982388eaSZhikang Zhang 		goto free_queue;
257982388eaSZhikang Zhang 	memset((void *)nvmeq->sq_cmds, 0, NVME_SQ_SIZE(depth));
258982388eaSZhikang Zhang 
259982388eaSZhikang Zhang 	nvmeq->dev = dev;
260982388eaSZhikang Zhang 
261982388eaSZhikang Zhang 	nvmeq->cq_head = 0;
262982388eaSZhikang Zhang 	nvmeq->cq_phase = 1;
263982388eaSZhikang Zhang 	nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
264982388eaSZhikang Zhang 	nvmeq->q_depth = depth;
265982388eaSZhikang Zhang 	nvmeq->qid = qid;
266982388eaSZhikang Zhang 	dev->queue_count++;
267982388eaSZhikang Zhang 	dev->queues[qid] = nvmeq;
268982388eaSZhikang Zhang 
269982388eaSZhikang Zhang 	return nvmeq;
270982388eaSZhikang Zhang 
271982388eaSZhikang Zhang  free_queue:
272982388eaSZhikang Zhang 	free((void *)nvmeq->cqes);
273982388eaSZhikang Zhang  free_nvmeq:
274982388eaSZhikang Zhang 	free(nvmeq);
275982388eaSZhikang Zhang 
276982388eaSZhikang Zhang 	return NULL;
277982388eaSZhikang Zhang }
278982388eaSZhikang Zhang 
nvme_delete_queue(struct nvme_dev * dev,u8 opcode,u16 id)279982388eaSZhikang Zhang static int nvme_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
280982388eaSZhikang Zhang {
281982388eaSZhikang Zhang 	struct nvme_command c;
282982388eaSZhikang Zhang 
283982388eaSZhikang Zhang 	memset(&c, 0, sizeof(c));
284982388eaSZhikang Zhang 	c.delete_queue.opcode = opcode;
285982388eaSZhikang Zhang 	c.delete_queue.qid = cpu_to_le16(id);
286982388eaSZhikang Zhang 
287982388eaSZhikang Zhang 	return nvme_submit_admin_cmd(dev, &c, NULL);
288982388eaSZhikang Zhang }
289982388eaSZhikang Zhang 
nvme_delete_sq(struct nvme_dev * dev,u16 sqid)290982388eaSZhikang Zhang static int nvme_delete_sq(struct nvme_dev *dev, u16 sqid)
291982388eaSZhikang Zhang {
292982388eaSZhikang Zhang 	return nvme_delete_queue(dev, nvme_admin_delete_sq, sqid);
293982388eaSZhikang Zhang }
294982388eaSZhikang Zhang 
nvme_delete_cq(struct nvme_dev * dev,u16 cqid)295982388eaSZhikang Zhang static int nvme_delete_cq(struct nvme_dev *dev, u16 cqid)
296982388eaSZhikang Zhang {
297982388eaSZhikang Zhang 	return nvme_delete_queue(dev, nvme_admin_delete_cq, cqid);
298982388eaSZhikang Zhang }
299982388eaSZhikang Zhang 
nvme_enable_ctrl(struct nvme_dev * dev)300982388eaSZhikang Zhang static int nvme_enable_ctrl(struct nvme_dev *dev)
301982388eaSZhikang Zhang {
302982388eaSZhikang Zhang 	dev->ctrl_config &= ~NVME_CC_SHN_MASK;
303982388eaSZhikang Zhang 	dev->ctrl_config |= NVME_CC_ENABLE;
30437e08029SDavid Lamparter 	writel(dev->ctrl_config, &dev->bar->cc);
305982388eaSZhikang Zhang 
306982388eaSZhikang Zhang 	return nvme_wait_ready(dev, true);
307982388eaSZhikang Zhang }
308982388eaSZhikang Zhang 
nvme_disable_ctrl(struct nvme_dev * dev)309982388eaSZhikang Zhang static int nvme_disable_ctrl(struct nvme_dev *dev)
310982388eaSZhikang Zhang {
311982388eaSZhikang Zhang 	dev->ctrl_config &= ~NVME_CC_SHN_MASK;
312982388eaSZhikang Zhang 	dev->ctrl_config &= ~NVME_CC_ENABLE;
31337e08029SDavid Lamparter 	writel(dev->ctrl_config, &dev->bar->cc);
314982388eaSZhikang Zhang 
315*51341de7SShawn Lin 	if (dev->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY)
316*51341de7SShawn Lin 		mdelay(NVME_QUIRK_DELAY_AMOUNT);
317*51341de7SShawn Lin 
318982388eaSZhikang Zhang 	return nvme_wait_ready(dev, false);
319982388eaSZhikang Zhang }
320982388eaSZhikang Zhang 
nvme_free_queue(struct nvme_queue * nvmeq)321982388eaSZhikang Zhang static void nvme_free_queue(struct nvme_queue *nvmeq)
322982388eaSZhikang Zhang {
323982388eaSZhikang Zhang 	free((void *)nvmeq->cqes);
324982388eaSZhikang Zhang 	free(nvmeq->sq_cmds);
325982388eaSZhikang Zhang 	free(nvmeq);
326982388eaSZhikang Zhang }
327982388eaSZhikang Zhang 
nvme_free_queues(struct nvme_dev * dev,int lowest)328982388eaSZhikang Zhang static void nvme_free_queues(struct nvme_dev *dev, int lowest)
329982388eaSZhikang Zhang {
330982388eaSZhikang Zhang 	int i;
331982388eaSZhikang Zhang 
332982388eaSZhikang Zhang 	for (i = dev->queue_count - 1; i >= lowest; i--) {
333982388eaSZhikang Zhang 		struct nvme_queue *nvmeq = dev->queues[i];
334982388eaSZhikang Zhang 		dev->queue_count--;
335982388eaSZhikang Zhang 		dev->queues[i] = NULL;
336982388eaSZhikang Zhang 		nvme_free_queue(nvmeq);
337982388eaSZhikang Zhang 	}
338982388eaSZhikang Zhang }
339982388eaSZhikang Zhang 
nvme_init_queue(struct nvme_queue * nvmeq,u16 qid)340982388eaSZhikang Zhang static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
341982388eaSZhikang Zhang {
342982388eaSZhikang Zhang 	struct nvme_dev *dev = nvmeq->dev;
343982388eaSZhikang Zhang 
344982388eaSZhikang Zhang 	nvmeq->sq_tail = 0;
345982388eaSZhikang Zhang 	nvmeq->cq_head = 0;
346982388eaSZhikang Zhang 	nvmeq->cq_phase = 1;
347982388eaSZhikang Zhang 	nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
348982388eaSZhikang Zhang 	memset((void *)nvmeq->cqes, 0, NVME_CQ_SIZE(nvmeq->q_depth));
349982388eaSZhikang Zhang 	flush_dcache_range((ulong)nvmeq->cqes,
3501a11099dSAndre Przywara 			   (ulong)nvmeq->cqes + NVME_CQ_ALLOCATION);
351982388eaSZhikang Zhang 	dev->online_queues++;
352982388eaSZhikang Zhang }
353982388eaSZhikang Zhang 
nvme_configure_admin_queue(struct nvme_dev * dev)354982388eaSZhikang Zhang static int nvme_configure_admin_queue(struct nvme_dev *dev)
355982388eaSZhikang Zhang {
356982388eaSZhikang Zhang 	int result;
357982388eaSZhikang Zhang 	u32 aqa;
358b65c6921SBin Meng 	u64 cap = dev->cap;
359982388eaSZhikang Zhang 	struct nvme_queue *nvmeq;
360982388eaSZhikang Zhang 	/* most architectures use 4KB as the page size */
361982388eaSZhikang Zhang 	unsigned page_shift = 12;
362982388eaSZhikang Zhang 	unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12;
363982388eaSZhikang Zhang 	unsigned dev_page_max = NVME_CAP_MPSMAX(cap) + 12;
364982388eaSZhikang Zhang 
365982388eaSZhikang Zhang 	if (page_shift < dev_page_min) {
366982388eaSZhikang Zhang 		debug("Device minimum page size (%u) too large for host (%u)\n",
367982388eaSZhikang Zhang 		      1 << dev_page_min, 1 << page_shift);
368982388eaSZhikang Zhang 		return -ENODEV;
369982388eaSZhikang Zhang 	}
370982388eaSZhikang Zhang 
371982388eaSZhikang Zhang 	if (page_shift > dev_page_max) {
372982388eaSZhikang Zhang 		debug("Device maximum page size (%u) smaller than host (%u)\n",
373982388eaSZhikang Zhang 		      1 << dev_page_max, 1 << page_shift);
374982388eaSZhikang Zhang 		page_shift = dev_page_max;
375982388eaSZhikang Zhang 	}
376982388eaSZhikang Zhang 
377982388eaSZhikang Zhang 	result = nvme_disable_ctrl(dev);
378982388eaSZhikang Zhang 	if (result < 0)
379982388eaSZhikang Zhang 		return result;
380982388eaSZhikang Zhang 
381722e668dSBin Meng 	nvmeq = dev->queues[NVME_ADMIN_Q];
382982388eaSZhikang Zhang 	if (!nvmeq) {
383982388eaSZhikang Zhang 		nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH);
384982388eaSZhikang Zhang 		if (!nvmeq)
385982388eaSZhikang Zhang 			return -ENOMEM;
386982388eaSZhikang Zhang 	}
387982388eaSZhikang Zhang 
388982388eaSZhikang Zhang 	aqa = nvmeq->q_depth - 1;
389982388eaSZhikang Zhang 	aqa |= aqa << 16;
390982388eaSZhikang Zhang 
391982388eaSZhikang Zhang 	dev->page_size = 1 << page_shift;
392982388eaSZhikang Zhang 
393982388eaSZhikang Zhang 	dev->ctrl_config = NVME_CC_CSS_NVM;
394982388eaSZhikang Zhang 	dev->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT;
395982388eaSZhikang Zhang 	dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
396982388eaSZhikang Zhang 	dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
397982388eaSZhikang Zhang 
398982388eaSZhikang Zhang 	writel(aqa, &dev->bar->aqa);
399982388eaSZhikang Zhang 	nvme_writeq((ulong)nvmeq->sq_cmds, &dev->bar->asq);
400982388eaSZhikang Zhang 	nvme_writeq((ulong)nvmeq->cqes, &dev->bar->acq);
401982388eaSZhikang Zhang 
402982388eaSZhikang Zhang 	result = nvme_enable_ctrl(dev);
403982388eaSZhikang Zhang 	if (result)
404982388eaSZhikang Zhang 		goto free_nvmeq;
405982388eaSZhikang Zhang 
406982388eaSZhikang Zhang 	nvmeq->cq_vector = 0;
407982388eaSZhikang Zhang 
408722e668dSBin Meng 	nvme_init_queue(dev->queues[NVME_ADMIN_Q], 0);
409982388eaSZhikang Zhang 
410982388eaSZhikang Zhang 	return result;
411982388eaSZhikang Zhang 
412982388eaSZhikang Zhang  free_nvmeq:
413982388eaSZhikang Zhang 	nvme_free_queues(dev, 0);
414982388eaSZhikang Zhang 
415982388eaSZhikang Zhang 	return result;
416982388eaSZhikang Zhang }
417982388eaSZhikang Zhang 
nvme_alloc_cq(struct nvme_dev * dev,u16 qid,struct nvme_queue * nvmeq)418982388eaSZhikang Zhang static int nvme_alloc_cq(struct nvme_dev *dev, u16 qid,
419982388eaSZhikang Zhang 			    struct nvme_queue *nvmeq)
420982388eaSZhikang Zhang {
421982388eaSZhikang Zhang 	struct nvme_command c;
422982388eaSZhikang Zhang 	int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
423982388eaSZhikang Zhang 
424982388eaSZhikang Zhang 	memset(&c, 0, sizeof(c));
425982388eaSZhikang Zhang 	c.create_cq.opcode = nvme_admin_create_cq;
426982388eaSZhikang Zhang 	c.create_cq.prp1 = cpu_to_le64((ulong)nvmeq->cqes);
427982388eaSZhikang Zhang 	c.create_cq.cqid = cpu_to_le16(qid);
428982388eaSZhikang Zhang 	c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
429982388eaSZhikang Zhang 	c.create_cq.cq_flags = cpu_to_le16(flags);
430982388eaSZhikang Zhang 	c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);
431982388eaSZhikang Zhang 
432982388eaSZhikang Zhang 	return nvme_submit_admin_cmd(dev, &c, NULL);
433982388eaSZhikang Zhang }
434982388eaSZhikang Zhang 
nvme_alloc_sq(struct nvme_dev * dev,u16 qid,struct nvme_queue * nvmeq)435982388eaSZhikang Zhang static int nvme_alloc_sq(struct nvme_dev *dev, u16 qid,
436982388eaSZhikang Zhang 			    struct nvme_queue *nvmeq)
437982388eaSZhikang Zhang {
438982388eaSZhikang Zhang 	struct nvme_command c;
439982388eaSZhikang Zhang 	int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM;
440982388eaSZhikang Zhang 
441982388eaSZhikang Zhang 	memset(&c, 0, sizeof(c));
442982388eaSZhikang Zhang 	c.create_sq.opcode = nvme_admin_create_sq;
443982388eaSZhikang Zhang 	c.create_sq.prp1 = cpu_to_le64((ulong)nvmeq->sq_cmds);
444982388eaSZhikang Zhang 	c.create_sq.sqid = cpu_to_le16(qid);
445982388eaSZhikang Zhang 	c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
446982388eaSZhikang Zhang 	c.create_sq.sq_flags = cpu_to_le16(flags);
447982388eaSZhikang Zhang 	c.create_sq.cqid = cpu_to_le16(qid);
448982388eaSZhikang Zhang 
449982388eaSZhikang Zhang 	return nvme_submit_admin_cmd(dev, &c, NULL);
450982388eaSZhikang Zhang }
451982388eaSZhikang Zhang 
nvme_identify(struct nvme_dev * dev,unsigned nsid,unsigned cns,dma_addr_t dma_addr)452982388eaSZhikang Zhang int nvme_identify(struct nvme_dev *dev, unsigned nsid,
453982388eaSZhikang Zhang 		  unsigned cns, dma_addr_t dma_addr)
454982388eaSZhikang Zhang {
455982388eaSZhikang Zhang 	struct nvme_command c;
456982388eaSZhikang Zhang 	u32 page_size = dev->page_size;
457982388eaSZhikang Zhang 	int offset = dma_addr & (page_size - 1);
458982388eaSZhikang Zhang 	int length = sizeof(struct nvme_id_ctrl);
459704e040aSBin Meng 	int ret;
460982388eaSZhikang Zhang 
461982388eaSZhikang Zhang 	memset(&c, 0, sizeof(c));
462982388eaSZhikang Zhang 	c.identify.opcode = nvme_admin_identify;
463982388eaSZhikang Zhang 	c.identify.nsid = cpu_to_le32(nsid);
464982388eaSZhikang Zhang 	c.identify.prp1 = cpu_to_le64(dma_addr);
465982388eaSZhikang Zhang 
466982388eaSZhikang Zhang 	length -= (page_size - offset);
467982388eaSZhikang Zhang 	if (length <= 0) {
468982388eaSZhikang Zhang 		c.identify.prp2 = 0;
469982388eaSZhikang Zhang 	} else {
470982388eaSZhikang Zhang 		dma_addr += (page_size - offset);
4713e185629SBin Meng 		c.identify.prp2 = cpu_to_le64(dma_addr);
472982388eaSZhikang Zhang 	}
473982388eaSZhikang Zhang 
474982388eaSZhikang Zhang 	c.identify.cns = cpu_to_le32(cns);
475982388eaSZhikang Zhang 
4768ceda4e5SJagan Teki 	invalidate_dcache_range(dma_addr,
4778ceda4e5SJagan Teki 				dma_addr + sizeof(struct nvme_id_ctrl));
4788ceda4e5SJagan Teki 
479704e040aSBin Meng 	ret = nvme_submit_admin_cmd(dev, &c, NULL);
480704e040aSBin Meng 	if (!ret)
481704e040aSBin Meng 		invalidate_dcache_range(dma_addr,
482704e040aSBin Meng 					dma_addr + sizeof(struct nvme_id_ctrl));
483704e040aSBin Meng 
484704e040aSBin Meng 	return ret;
485982388eaSZhikang Zhang }
486982388eaSZhikang Zhang 
nvme_get_features(struct nvme_dev * dev,unsigned fid,unsigned nsid,dma_addr_t dma_addr,u32 * result)487982388eaSZhikang Zhang int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
488982388eaSZhikang Zhang 		      dma_addr_t dma_addr, u32 *result)
489982388eaSZhikang Zhang {
490982388eaSZhikang Zhang 	struct nvme_command c;
491524790bcSAndre Przywara 	int ret;
492982388eaSZhikang Zhang 
493982388eaSZhikang Zhang 	memset(&c, 0, sizeof(c));
494982388eaSZhikang Zhang 	c.features.opcode = nvme_admin_get_features;
495982388eaSZhikang Zhang 	c.features.nsid = cpu_to_le32(nsid);
496982388eaSZhikang Zhang 	c.features.prp1 = cpu_to_le64(dma_addr);
497982388eaSZhikang Zhang 	c.features.fid = cpu_to_le32(fid);
498982388eaSZhikang Zhang 
499524790bcSAndre Przywara 	ret = nvme_submit_admin_cmd(dev, &c, result);
500524790bcSAndre Przywara 
501704e040aSBin Meng 	/*
502524790bcSAndre Przywara 	 * TODO: Add some cache invalidation when a DMA buffer is involved
503524790bcSAndre Przywara 	 * in the request, here and before the command gets submitted. The
504524790bcSAndre Przywara 	 * buffer size varies by feature, also some features use a different
505524790bcSAndre Przywara 	 * field in the command packet to hold the buffer address.
506524790bcSAndre Przywara 	 * Section 5.21.1 (Set Features command) in the NVMe specification
507524790bcSAndre Przywara 	 * details the buffer requirements for each feature.
508524790bcSAndre Przywara 	 *
509524790bcSAndre Przywara 	 * At the moment there is no user of this function.
510704e040aSBin Meng 	 */
511704e040aSBin Meng 
512524790bcSAndre Przywara 	return ret;
513982388eaSZhikang Zhang }
514982388eaSZhikang Zhang 
nvme_set_features(struct nvme_dev * dev,unsigned fid,unsigned dword11,dma_addr_t dma_addr,u32 * result)515982388eaSZhikang Zhang int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
516982388eaSZhikang Zhang 		      dma_addr_t dma_addr, u32 *result)
517982388eaSZhikang Zhang {
518982388eaSZhikang Zhang 	struct nvme_command c;
519982388eaSZhikang Zhang 
520982388eaSZhikang Zhang 	memset(&c, 0, sizeof(c));
521982388eaSZhikang Zhang 	c.features.opcode = nvme_admin_set_features;
522982388eaSZhikang Zhang 	c.features.prp1 = cpu_to_le64(dma_addr);
523982388eaSZhikang Zhang 	c.features.fid = cpu_to_le32(fid);
524982388eaSZhikang Zhang 	c.features.dword11 = cpu_to_le32(dword11);
525982388eaSZhikang Zhang 
526704e040aSBin Meng 	/*
527524790bcSAndre Przywara 	 * TODO: Add a cache clean (aka flush) operation when a DMA buffer is
528524790bcSAndre Przywara 	 * involved in the request. The buffer size varies by feature, also
529524790bcSAndre Przywara 	 * some features use a different field in the command packet to hold
530524790bcSAndre Przywara 	 * the buffer address. Section 5.21.1 (Set Features command) in the
531524790bcSAndre Przywara 	 * NVMe specification details the buffer requirements for each
532524790bcSAndre Przywara 	 * feature.
533524790bcSAndre Przywara 	 * At the moment the only user of this function is not using
534524790bcSAndre Przywara 	 * any DMA buffer at all.
535704e040aSBin Meng 	 */
536704e040aSBin Meng 
537982388eaSZhikang Zhang 	return nvme_submit_admin_cmd(dev, &c, result);
538982388eaSZhikang Zhang }
539982388eaSZhikang Zhang 
nvme_create_queue(struct nvme_queue * nvmeq,int qid)540982388eaSZhikang Zhang static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
541982388eaSZhikang Zhang {
542982388eaSZhikang Zhang 	struct nvme_dev *dev = nvmeq->dev;
543982388eaSZhikang Zhang 	int result;
544982388eaSZhikang Zhang 
545982388eaSZhikang Zhang 	nvmeq->cq_vector = qid - 1;
546982388eaSZhikang Zhang 	result = nvme_alloc_cq(dev, qid, nvmeq);
547982388eaSZhikang Zhang 	if (result < 0)
548982388eaSZhikang Zhang 		goto release_cq;
549982388eaSZhikang Zhang 
550982388eaSZhikang Zhang 	result = nvme_alloc_sq(dev, qid, nvmeq);
551982388eaSZhikang Zhang 	if (result < 0)
552982388eaSZhikang Zhang 		goto release_sq;
553982388eaSZhikang Zhang 
554982388eaSZhikang Zhang 	nvme_init_queue(nvmeq, qid);
555982388eaSZhikang Zhang 
556982388eaSZhikang Zhang 	return result;
557982388eaSZhikang Zhang 
558982388eaSZhikang Zhang  release_sq:
559982388eaSZhikang Zhang 	nvme_delete_sq(dev, qid);
560982388eaSZhikang Zhang  release_cq:
561982388eaSZhikang Zhang 	nvme_delete_cq(dev, qid);
562982388eaSZhikang Zhang 
563982388eaSZhikang Zhang 	return result;
564982388eaSZhikang Zhang }
565982388eaSZhikang Zhang 
nvme_set_queue_count(struct nvme_dev * dev,int count)566982388eaSZhikang Zhang static int nvme_set_queue_count(struct nvme_dev *dev, int count)
567982388eaSZhikang Zhang {
568982388eaSZhikang Zhang 	int status;
569982388eaSZhikang Zhang 	u32 result;
570982388eaSZhikang Zhang 	u32 q_count = (count - 1) | ((count - 1) << 16);
571982388eaSZhikang Zhang 
572982388eaSZhikang Zhang 	status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES,
573982388eaSZhikang Zhang 			q_count, 0, &result);
574982388eaSZhikang Zhang 
575982388eaSZhikang Zhang 	if (status < 0)
576982388eaSZhikang Zhang 		return status;
577982388eaSZhikang Zhang 	if (status > 1)
578982388eaSZhikang Zhang 		return 0;
579982388eaSZhikang Zhang 
580982388eaSZhikang Zhang 	return min(result & 0xffff, result >> 16) + 1;
581982388eaSZhikang Zhang }
582982388eaSZhikang Zhang 
nvme_create_io_queues(struct nvme_dev * dev)583982388eaSZhikang Zhang static void nvme_create_io_queues(struct nvme_dev *dev)
584982388eaSZhikang Zhang {
585982388eaSZhikang Zhang 	unsigned int i;
586982388eaSZhikang Zhang 
587982388eaSZhikang Zhang 	for (i = dev->queue_count; i <= dev->max_qid; i++)
588982388eaSZhikang Zhang 		if (!nvme_alloc_queue(dev, i, dev->q_depth))
589982388eaSZhikang Zhang 			break;
590982388eaSZhikang Zhang 
591982388eaSZhikang Zhang 	for (i = dev->online_queues; i <= dev->queue_count - 1; i++)
592982388eaSZhikang Zhang 		if (nvme_create_queue(dev->queues[i], i))
593982388eaSZhikang Zhang 			break;
594982388eaSZhikang Zhang }
595982388eaSZhikang Zhang 
nvme_setup_io_queues(struct nvme_dev * dev)596982388eaSZhikang Zhang static int nvme_setup_io_queues(struct nvme_dev *dev)
597982388eaSZhikang Zhang {
598982388eaSZhikang Zhang 	int nr_io_queues;
599982388eaSZhikang Zhang 	int result;
600982388eaSZhikang Zhang 
601982388eaSZhikang Zhang 	nr_io_queues = 1;
602982388eaSZhikang Zhang 	result = nvme_set_queue_count(dev, nr_io_queues);
603982388eaSZhikang Zhang 	if (result <= 0)
604982388eaSZhikang Zhang 		return result;
605982388eaSZhikang Zhang 
606982388eaSZhikang Zhang 	dev->max_qid = nr_io_queues;
607982388eaSZhikang Zhang 
608982388eaSZhikang Zhang 	/* Free previously allocated queues */
609982388eaSZhikang Zhang 	nvme_free_queues(dev, nr_io_queues + 1);
610982388eaSZhikang Zhang 	nvme_create_io_queues(dev);
611982388eaSZhikang Zhang 
612982388eaSZhikang Zhang 	return 0;
613982388eaSZhikang Zhang }
614982388eaSZhikang Zhang 
nvme_get_info_from_identify(struct nvme_dev * dev)615982388eaSZhikang Zhang static int nvme_get_info_from_identify(struct nvme_dev *dev)
616982388eaSZhikang Zhang {
6177796c58fSPatrick Wildt 	struct nvme_id_ctrl *ctrl;
618982388eaSZhikang Zhang 	int ret;
619b65c6921SBin Meng 	int shift = NVME_CAP_MPSMIN(dev->cap) + 12;
620982388eaSZhikang Zhang 
6217796c58fSPatrick Wildt 	ctrl = memalign(dev->page_size, sizeof(struct nvme_id_ctrl));
6227796c58fSPatrick Wildt 	if (!ctrl)
6237796c58fSPatrick Wildt 		return -ENOMEM;
6247796c58fSPatrick Wildt 
6251107b9c2SBin Meng 	ret = nvme_identify(dev, 0, 1, (dma_addr_t)(long)ctrl);
6267796c58fSPatrick Wildt 	if (ret) {
6277796c58fSPatrick Wildt 		free(ctrl);
628982388eaSZhikang Zhang 		return -EIO;
6297796c58fSPatrick Wildt 	}
630982388eaSZhikang Zhang 
631982388eaSZhikang Zhang 	dev->nn = le32_to_cpu(ctrl->nn);
632982388eaSZhikang Zhang 	dev->vwc = ctrl->vwc;
633982388eaSZhikang Zhang 	memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
634982388eaSZhikang Zhang 	memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
635982388eaSZhikang Zhang 	memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
636982388eaSZhikang Zhang 	if (ctrl->mdts)
637982388eaSZhikang Zhang 		dev->max_transfer_shift = (ctrl->mdts + shift);
638beb5f521SBin Meng 	else {
639beb5f521SBin Meng 		/*
640beb5f521SBin Meng 		 * Maximum Data Transfer Size (MDTS) field indicates the maximum
641beb5f521SBin Meng 		 * data transfer size between the host and the controller. The
642beb5f521SBin Meng 		 * host should not submit a command that exceeds this transfer
643beb5f521SBin Meng 		 * size. The value is in units of the minimum memory page size
644beb5f521SBin Meng 		 * and is reported as a power of two (2^n).
645beb5f521SBin Meng 		 *
646beb5f521SBin Meng 		 * The spec also says: a value of 0h indicates no restrictions
647beb5f521SBin Meng 		 * on transfer size. But in nvme_blk_read/write() below we have
648beb5f521SBin Meng 		 * the following algorithm for maximum number of logic blocks
649beb5f521SBin Meng 		 * per transfer:
650beb5f521SBin Meng 		 *
651beb5f521SBin Meng 		 * u16 lbas = 1 << (dev->max_transfer_shift - ns->lba_shift);
652beb5f521SBin Meng 		 *
653beb5f521SBin Meng 		 * In order for lbas not to overflow, the maximum number is 15
654beb5f521SBin Meng 		 * which means dev->max_transfer_shift = 15 + 9 (ns->lba_shift).
655beb5f521SBin Meng 		 * Let's use 20 which provides 1MB size.
656beb5f521SBin Meng 		 */
657beb5f521SBin Meng 		dev->max_transfer_shift = 20;
658beb5f521SBin Meng 	}
659982388eaSZhikang Zhang 
6607796c58fSPatrick Wildt 	free(ctrl);
661982388eaSZhikang Zhang 	return 0;
662982388eaSZhikang Zhang }
663982388eaSZhikang Zhang 
nvme_get_namespace_id(struct udevice * udev,u32 * ns_id,u8 * eui64)664ff15e123SPatrick Wildt int nvme_get_namespace_id(struct udevice *udev, u32 *ns_id, u8 *eui64)
665ff15e123SPatrick Wildt {
666ff15e123SPatrick Wildt 	struct nvme_ns *ns = dev_get_priv(udev);
667ff15e123SPatrick Wildt 
668ff15e123SPatrick Wildt 	if (ns_id)
669ff15e123SPatrick Wildt 		*ns_id = ns->ns_id;
670ff15e123SPatrick Wildt 	if (eui64)
671ff15e123SPatrick Wildt 		memcpy(eui64, ns->eui64, sizeof(ns->eui64));
672ff15e123SPatrick Wildt 
673ff15e123SPatrick Wildt 	return 0;
674ff15e123SPatrick Wildt }
675ff15e123SPatrick Wildt 
nvme_scan_namespace(void)676982388eaSZhikang Zhang int nvme_scan_namespace(void)
677982388eaSZhikang Zhang {
678982388eaSZhikang Zhang 	struct uclass *uc;
679982388eaSZhikang Zhang 	struct udevice *dev;
680982388eaSZhikang Zhang 	int ret;
681982388eaSZhikang Zhang 
682982388eaSZhikang Zhang 	ret = uclass_get(UCLASS_NVME, &uc);
683982388eaSZhikang Zhang 	if (ret)
684982388eaSZhikang Zhang 		return ret;
685982388eaSZhikang Zhang 
686982388eaSZhikang Zhang 	uclass_foreach_dev(dev, uc) {
687982388eaSZhikang Zhang 		ret = device_probe(dev);
688a535aa9aSMoritz Fischer 		if (ret) {
689a535aa9aSMoritz Fischer 			printf("Failed to probe '%s': err=%dE\n", dev->name,
690a535aa9aSMoritz Fischer 				ret);
691a535aa9aSMoritz Fischer 			/* Bail if we ran out of memory, else keep trying */
692a535aa9aSMoritz Fischer 			if (ret != -EBUSY)
693982388eaSZhikang Zhang 				return ret;
694982388eaSZhikang Zhang 		}
695a535aa9aSMoritz Fischer 	}
696982388eaSZhikang Zhang 
697982388eaSZhikang Zhang 	return 0;
698982388eaSZhikang Zhang }
699982388eaSZhikang Zhang 
nvme_blk_probe(struct udevice * udev)700982388eaSZhikang Zhang static int nvme_blk_probe(struct udevice *udev)
701982388eaSZhikang Zhang {
702982388eaSZhikang Zhang 	struct nvme_dev *ndev = dev_get_priv(udev->parent);
703982388eaSZhikang Zhang 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
704982388eaSZhikang Zhang 	struct nvme_ns *ns = dev_get_priv(udev);
705982388eaSZhikang Zhang 	u8 flbas;
706e5dc2d26SBin Meng 	struct pci_child_platdata *pplat;
7077796c58fSPatrick Wildt 	struct nvme_id_ns *id;
7087796c58fSPatrick Wildt 
7097796c58fSPatrick Wildt 	id = memalign(ndev->page_size, sizeof(struct nvme_id_ns));
7107796c58fSPatrick Wildt 	if (!id)
7117796c58fSPatrick Wildt 		return -ENOMEM;
712982388eaSZhikang Zhang 
713982388eaSZhikang Zhang 	ns->dev = ndev;
71418aa5a41SBin Meng 	/* extract the namespace id from the block device name */
7157522bf7bSBin Meng 	ns->ns_id = trailing_strtol(udev->name);
7167796c58fSPatrick Wildt 	if (nvme_identify(ndev, ns->ns_id, 0, (dma_addr_t)(long)id)) {
7177796c58fSPatrick Wildt 		free(id);
718982388eaSZhikang Zhang 		return -EIO;
7197796c58fSPatrick Wildt 	}
720982388eaSZhikang Zhang 
721ff15e123SPatrick Wildt 	memcpy(&ns->eui64, &id->eui64, sizeof(id->eui64));
722982388eaSZhikang Zhang 	flbas = id->flbas & NVME_NS_FLBAS_LBA_MASK;
723982388eaSZhikang Zhang 	ns->flbas = flbas;
724982388eaSZhikang Zhang 	ns->lba_shift = id->lbaf[flbas].ds;
725982388eaSZhikang Zhang 	list_add(&ns->list, &ndev->namespaces);
726982388eaSZhikang Zhang 
72711bd08e3SBin Meng 	desc->lba = le64_to_cpu(id->nsze);
728982388eaSZhikang Zhang 	desc->log2blksz = ns->lba_shift;
729982388eaSZhikang Zhang 	desc->blksz = 1 << ns->lba_shift;
730982388eaSZhikang Zhang 	desc->bdev = udev;
731e5dc2d26SBin Meng 	pplat = dev_get_parent_platdata(udev->parent);
732e5dc2d26SBin Meng 	sprintf(desc->vendor, "0x%.4x", pplat->vendor);
733982388eaSZhikang Zhang 	memcpy(desc->product, ndev->serial, sizeof(ndev->serial));
734982388eaSZhikang Zhang 	memcpy(desc->revision, ndev->firmware_rev, sizeof(ndev->firmware_rev));
735982388eaSZhikang Zhang 	part_init(desc);
736982388eaSZhikang Zhang 
7377796c58fSPatrick Wildt 	free(id);
738982388eaSZhikang Zhang 	return 0;
739982388eaSZhikang Zhang }
740982388eaSZhikang Zhang 
nvme_blk_rw(struct udevice * udev,lbaint_t blknr,lbaint_t blkcnt,void * buffer,bool read)741625a483cSBin Meng static ulong nvme_blk_rw(struct udevice *udev, lbaint_t blknr,
742625a483cSBin Meng 			 lbaint_t blkcnt, void *buffer, bool read)
743982388eaSZhikang Zhang {
744982388eaSZhikang Zhang 	struct nvme_ns *ns = dev_get_priv(udev);
745982388eaSZhikang Zhang 	struct nvme_dev *dev = ns->dev;
746982388eaSZhikang Zhang 	struct nvme_command c;
747982388eaSZhikang Zhang 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
748982388eaSZhikang Zhang 	int status;
749982388eaSZhikang Zhang 	u64 prp2;
750982388eaSZhikang Zhang 	u64 total_len = blkcnt << desc->log2blksz;
751982388eaSZhikang Zhang 	u64 temp_len = total_len;
752d22a8e88SJon Lin 	uintptr_t temp_buffer;
753982388eaSZhikang Zhang 
754982388eaSZhikang Zhang 	u64 slba = blknr;
755982388eaSZhikang Zhang 	u16 lbas = 1 << (dev->max_transfer_shift - ns->lba_shift);
756982388eaSZhikang Zhang 	u64 total_lbas = blkcnt;
757982388eaSZhikang Zhang 
758d22a8e88SJon Lin 	struct bounce_buffer bb;
759d22a8e88SJon Lin 	unsigned int bb_flags;
760d22a8e88SJon Lin 	int ret;
761d22a8e88SJon Lin 
762d22a8e88SJon Lin 	if (read)
763d22a8e88SJon Lin 		bb_flags = GEN_BB_WRITE;
764d22a8e88SJon Lin 	else
765d22a8e88SJon Lin 		bb_flags = GEN_BB_READ;
766d22a8e88SJon Lin 
767d22a8e88SJon Lin 	ret = bounce_buffer_start(&bb, buffer, total_len, bb_flags);
768d22a8e88SJon Lin 	if (ret)
769d22a8e88SJon Lin 		return -ENOMEM;
770d22a8e88SJon Lin 	temp_buffer = (unsigned long)bb.bounce_buffer;
771704e040aSBin Meng 
772625a483cSBin Meng 	c.rw.opcode = read ? nvme_cmd_read : nvme_cmd_write;
773982388eaSZhikang Zhang 	c.rw.flags = 0;
774982388eaSZhikang Zhang 	c.rw.nsid = cpu_to_le32(ns->ns_id);
775982388eaSZhikang Zhang 	c.rw.control = 0;
776982388eaSZhikang Zhang 	c.rw.dsmgmt = 0;
777982388eaSZhikang Zhang 	c.rw.reftag = 0;
778982388eaSZhikang Zhang 	c.rw.apptag = 0;
779982388eaSZhikang Zhang 	c.rw.appmask = 0;
780982388eaSZhikang Zhang 	c.rw.metadata = 0;
781982388eaSZhikang Zhang 
7822a01f66bSJon Lin 	/* Enable FUA for data integrity if vwc is enabled */
7832a01f66bSJon Lin 	if (dev->vwc)
7842a01f66bSJon Lin 		c.rw.control |= NVME_RW_FUA;
7852a01f66bSJon Lin 
786982388eaSZhikang Zhang 	while (total_lbas) {
787982388eaSZhikang Zhang 		if (total_lbas < lbas) {
788982388eaSZhikang Zhang 			lbas = (u16)total_lbas;
789982388eaSZhikang Zhang 			total_lbas = 0;
790982388eaSZhikang Zhang 		} else {
791982388eaSZhikang Zhang 			total_lbas -= lbas;
792982388eaSZhikang Zhang 		}
793982388eaSZhikang Zhang 
794625a483cSBin Meng 		if (nvme_setup_prps(dev, &prp2,
7951f544334SStefan Agner 				    lbas << ns->lba_shift, temp_buffer))
796982388eaSZhikang Zhang 			return -EIO;
797982388eaSZhikang Zhang 		c.rw.slba = cpu_to_le64(slba);
798982388eaSZhikang Zhang 		slba += lbas;
799982388eaSZhikang Zhang 		c.rw.length = cpu_to_le16(lbas - 1);
8001f544334SStefan Agner 		c.rw.prp1 = cpu_to_le64(temp_buffer);
801982388eaSZhikang Zhang 		c.rw.prp2 = cpu_to_le64(prp2);
802722e668dSBin Meng 		status = nvme_submit_sync_cmd(dev->queues[NVME_IO_Q],
803982388eaSZhikang Zhang 				&c, NULL, IO_TIMEOUT);
804982388eaSZhikang Zhang 		if (status)
805982388eaSZhikang Zhang 			break;
80652a5690eSBin Meng 		temp_len -= (u32)lbas << ns->lba_shift;
8071f544334SStefan Agner 		temp_buffer += lbas << ns->lba_shift;
808982388eaSZhikang Zhang 	}
809982388eaSZhikang Zhang 
810d22a8e88SJon Lin 	bounce_buffer_stop(&bb);
811704e040aSBin Meng 
812982388eaSZhikang Zhang 	return (total_len - temp_len) >> desc->log2blksz;
813982388eaSZhikang Zhang }
814982388eaSZhikang Zhang 
nvme_blk_read(struct udevice * udev,lbaint_t blknr,lbaint_t blkcnt,void * buffer)815625a483cSBin Meng static ulong nvme_blk_read(struct udevice *udev, lbaint_t blknr,
816625a483cSBin Meng 			   lbaint_t blkcnt, void *buffer)
817625a483cSBin Meng {
818625a483cSBin Meng 	return nvme_blk_rw(udev, blknr, blkcnt, buffer, true);
819625a483cSBin Meng }
820625a483cSBin Meng 
nvme_blk_write(struct udevice * udev,lbaint_t blknr,lbaint_t blkcnt,const void * buffer)821982388eaSZhikang Zhang static ulong nvme_blk_write(struct udevice *udev, lbaint_t blknr,
822982388eaSZhikang Zhang 			    lbaint_t blkcnt, const void *buffer)
823982388eaSZhikang Zhang {
824625a483cSBin Meng 	return nvme_blk_rw(udev, blknr, blkcnt, (void *)buffer, false);
825982388eaSZhikang Zhang }
826982388eaSZhikang Zhang 
nvme_blk_erase(struct udevice * udev,lbaint_t blknr,lbaint_t blkcnt)82732a1e554SShawn Lin static ulong nvme_blk_erase(struct udevice *udev, lbaint_t blknr,
82832a1e554SShawn Lin 			    lbaint_t blkcnt)
82932a1e554SShawn Lin {
83032a1e554SShawn Lin 	ALLOC_CACHE_ALIGN_BUFFER(struct nvme_dsm_range, range, sizeof(struct nvme_dsm_range));
83132a1e554SShawn Lin 	struct nvme_ns *ns = dev_get_priv(udev);
83232a1e554SShawn Lin 	struct nvme_dev *dev = ns->dev;
83332a1e554SShawn Lin 	struct nvme_command cmnd;
83432a1e554SShawn Lin 
83532a1e554SShawn Lin 	memset(&cmnd, 0, sizeof(cmnd));
83632a1e554SShawn Lin 
83732a1e554SShawn Lin 	range->cattr = cpu_to_le32(0);
83832a1e554SShawn Lin 	range->nlb = cpu_to_le32(blkcnt);
83932a1e554SShawn Lin 	range->slba = cpu_to_le64(blknr);
84032a1e554SShawn Lin 
84132a1e554SShawn Lin 	cmnd.dsm.opcode = nvme_cmd_dsm;
84232a1e554SShawn Lin         cmnd.dsm.command_id = nvme_get_cmd_id();
84332a1e554SShawn Lin 	cmnd.dsm.nsid = cpu_to_le32(ns->ns_id);
84432a1e554SShawn Lin 	cmnd.dsm.prp1 = cpu_to_le64((ulong)range);
84532a1e554SShawn Lin 	cmnd.dsm.nr = 0;
84632a1e554SShawn Lin 	cmnd.dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
84732a1e554SShawn Lin 	cmnd.common.nsid = cpu_to_le32(ns->ns_id);
84832a1e554SShawn Lin 
84932a1e554SShawn Lin 	flush_dcache_range((ulong)range,
85032a1e554SShawn Lin 			(ulong)range + sizeof(struct nvme_dsm_range));
85132a1e554SShawn Lin 
85232a1e554SShawn Lin 	nvme_submit_cmd(dev->queues[NVME_IO_Q], &cmnd);
85332a1e554SShawn Lin 	return blkcnt;
85432a1e554SShawn Lin }
85532a1e554SShawn Lin 
nvme_blk_write_zeroes(struct udevice * udev,lbaint_t blknr,lbaint_t blkcnt)8569d9df2d6SShawn Lin static ulong nvme_blk_write_zeroes(struct udevice *udev, lbaint_t blknr, lbaint_t blkcnt)
8579d9df2d6SShawn Lin {
8589d9df2d6SShawn Lin 	struct nvme_ns *ns = dev_get_priv(udev);
8599d9df2d6SShawn Lin 	struct nvme_dev *dev = ns->dev;
8609d9df2d6SShawn Lin 	struct nvme_command cmnd;
8619d9df2d6SShawn Lin 
862*51341de7SShawn Lin 	if (dev->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
863*51341de7SShawn Lin 		nvme_blk_erase(udev, blknr, blkcnt);
864*51341de7SShawn Lin 
8659d9df2d6SShawn Lin 	memset(&cmnd, 0, sizeof(cmnd));
8669d9df2d6SShawn Lin 
8679d9df2d6SShawn Lin 	cmnd.write_zeroes.opcode = nvme_cmd_write_zeroes;
8689d9df2d6SShawn Lin 	cmnd.write_zeroes.nsid = cpu_to_le32(ns->ns_id);
8699d9df2d6SShawn Lin 	cmnd.write_zeroes.slba = cpu_to_le64(blknr);
8709d9df2d6SShawn Lin 	cmnd.write_zeroes.length = cpu_to_le16(blkcnt - 1);
8719d9df2d6SShawn Lin 	cmnd.write_zeroes.control = 0;
8729d9df2d6SShawn Lin 	cmnd.write_zeroes.command_id = nvme_get_cmd_id();
8739d9df2d6SShawn Lin 
8749d9df2d6SShawn Lin 	nvme_submit_cmd(dev->queues[NVME_IO_Q], &cmnd);
8759d9df2d6SShawn Lin 	return blkcnt;
8769d9df2d6SShawn Lin }
8779d9df2d6SShawn Lin 
878982388eaSZhikang Zhang static const struct blk_ops nvme_blk_ops = {
879982388eaSZhikang Zhang 	.read	= nvme_blk_read,
880982388eaSZhikang Zhang 	.write	= nvme_blk_write,
8819d9df2d6SShawn Lin 	.write_zeroes = nvme_blk_write_zeroes,
88232a1e554SShawn Lin 	.erase  = nvme_blk_erase,
883982388eaSZhikang Zhang };
884982388eaSZhikang Zhang 
885982388eaSZhikang Zhang U_BOOT_DRIVER(nvme_blk) = {
886982388eaSZhikang Zhang 	.name	= "nvme-blk",
887982388eaSZhikang Zhang 	.id	= UCLASS_BLK,
888982388eaSZhikang Zhang 	.probe	= nvme_blk_probe,
889982388eaSZhikang Zhang 	.ops	= &nvme_blk_ops,
890982388eaSZhikang Zhang 	.priv_auto_alloc_size = sizeof(struct nvme_ns),
891982388eaSZhikang Zhang };
892982388eaSZhikang Zhang 
nvme_bind(struct udevice * udev)893982388eaSZhikang Zhang static int nvme_bind(struct udevice *udev)
894982388eaSZhikang Zhang {
89518aa5a41SBin Meng 	static int ndev_num;
896982388eaSZhikang Zhang 	char name[20];
89718aa5a41SBin Meng 
89818aa5a41SBin Meng 	sprintf(name, "nvme#%d", ndev_num++);
899982388eaSZhikang Zhang 
900982388eaSZhikang Zhang 	return device_set_name(udev, name);
901982388eaSZhikang Zhang }
902982388eaSZhikang Zhang 
903*51341de7SShawn Lin static const struct pci_device_id nvme_id_table[] = {
904*51341de7SShawn Lin 	{ PCI_VDEVICE(INTEL, 0x0953),   /* Intel 750/P3500/P3600/P3700 */
905*51341de7SShawn Lin 	  .driver_data = NVME_QUIRK_DEALLOCATE_ZEROES, },
906*51341de7SShawn Lin 	{ PCI_VDEVICE(INTEL, 0x0a53),   /* Intel P3520 */
907*51341de7SShawn Lin 	  .driver_data = NVME_QUIRK_DEALLOCATE_ZEROES, },
908*51341de7SShawn Lin 	{ PCI_VDEVICE(INTEL, 0x0a54),   /* Intel P4500/P4600 */
909*51341de7SShawn Lin 	  .driver_data = NVME_QUIRK_DEALLOCATE_ZEROES  },
910*51341de7SShawn Lin 	{ PCI_VDEVICE(INTEL, 0x0a55),   /* Dell Express Flash P4600 */
911*51341de7SShawn Lin 	  .driver_data = NVME_QUIRK_DEALLOCATE_ZEROES, },
912*51341de7SShawn Lin         { PCI_DEVICE(0x1bb1, 0x0100),   /* Seagate Nytro Flash Storage */
913*51341de7SShawn Lin 	  .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
914*51341de7SShawn Lin 	{ PCI_DEVICE(0x1c58, 0x0003),   /* HGST adapter */
915*51341de7SShawn Lin 	  .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
916*51341de7SShawn Lin 	{ PCI_DEVICE(0x1c58, 0x0023),   /* WDC SN200 adapter */
917*51341de7SShawn Lin 	  .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
918*51341de7SShawn Lin 	{ PCI_DEVICE(0x1c5f, 0x0540),   /* Memblaze Pblaze4 adapter */
919*51341de7SShawn Lin 	  .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
920*51341de7SShawn Lin 	{ PCI_DEVICE(0x144d, 0xa821),   /* Samsung PM1725 */
921*51341de7SShawn Lin 	  .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
922*51341de7SShawn Lin 	{ PCI_DEVICE(0x144d, 0xa822),   /* Samsung PM1725a */
923*51341de7SShawn Lin 	  .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
924*51341de7SShawn Lin 	{ PCI_DEVICE(0x1987, 0x5013),   /* Phison E13 */
925*51341de7SShawn Lin 	  .driver_data = NVME_QUIRK_LIMIT_IOQD32},
926*51341de7SShawn Lin };
927*51341de7SShawn Lin 
nvme_apply_quirks(struct udevice * udev)928*51341de7SShawn Lin static void nvme_apply_quirks(struct udevice *udev)
929*51341de7SShawn Lin {
930*51341de7SShawn Lin 	struct nvme_dev *ndev = dev_get_priv(udev);
931*51341de7SShawn Lin 	u16 vendor_id, device_id;
932*51341de7SShawn Lin 	unsigned int i;
933*51341de7SShawn Lin 
934*51341de7SShawn Lin 	dm_pci_read_config16(udev, PCI_VENDOR_ID, &vendor_id);
935*51341de7SShawn Lin 	dm_pci_read_config16(udev, PCI_DEVICE_ID, &device_id);
936*51341de7SShawn Lin 
937*51341de7SShawn Lin 	for (i = 0; i < ARRAY_SIZE(nvme_id_table); i++) {
938*51341de7SShawn Lin 		if (vendor_id == nvme_id_table[i].vendor &&
939*51341de7SShawn Lin 		    device_id == nvme_id_table[i].device) {
940*51341de7SShawn Lin 			ndev->quirks |= nvme_id_table[i].driver_data;
941*51341de7SShawn Lin 			debug("vid 0x%x, pid 0x%x apply quirks 0x%lx\n",
942*51341de7SShawn Lin 			      vendor_id, device_id, nvme_id_table[i].driver_data);
943*51341de7SShawn Lin 		}
944*51341de7SShawn Lin 	}
945*51341de7SShawn Lin }
946*51341de7SShawn Lin 
nvme_probe(struct udevice * udev)947982388eaSZhikang Zhang static int nvme_probe(struct udevice *udev)
948982388eaSZhikang Zhang {
949982388eaSZhikang Zhang 	int ret;
950982388eaSZhikang Zhang 	struct nvme_dev *ndev = dev_get_priv(udev);
951e802110fSBin Meng 	struct nvme_id_ns *id;
952982388eaSZhikang Zhang 
953982388eaSZhikang Zhang 	ndev->instance = trailing_strtol(udev->name);
954982388eaSZhikang Zhang 
955982388eaSZhikang Zhang 	INIT_LIST_HEAD(&ndev->namespaces);
956982388eaSZhikang Zhang 	ndev->bar = dm_pci_map_bar(udev, PCI_BASE_ADDRESS_0,
957982388eaSZhikang Zhang 			PCI_REGION_MEM);
958982388eaSZhikang Zhang 	if (readl(&ndev->bar->csts) == -1) {
9592510677dSMoritz Fischer 		ret = -EBUSY;
9602510677dSMoritz Fischer 		printf("Error: %s: Controller not ready!\n", udev->name);
961982388eaSZhikang Zhang 		goto free_nvme;
962982388eaSZhikang Zhang 	}
963982388eaSZhikang Zhang 
964722e668dSBin Meng 	ndev->queues = malloc(NVME_Q_NUM * sizeof(struct nvme_queue *));
965982388eaSZhikang Zhang 	if (!ndev->queues) {
966982388eaSZhikang Zhang 		ret = -ENOMEM;
967982388eaSZhikang Zhang 		printf("Error: %s: Out of memory!\n", udev->name);
968982388eaSZhikang Zhang 		goto free_nvme;
969982388eaSZhikang Zhang 	}
97037d46870SBin Meng 	memset(ndev->queues, 0, NVME_Q_NUM * sizeof(struct nvme_queue *));
971982388eaSZhikang Zhang 
972*51341de7SShawn Lin 	nvme_apply_quirks(udev);
973*51341de7SShawn Lin 
974b65c6921SBin Meng 	ndev->cap = nvme_readq(&ndev->bar->cap);
975b65c6921SBin Meng 	ndev->q_depth = min_t(int, NVME_CAP_MQES(ndev->cap) + 1, NVME_Q_DEPTH);
976*51341de7SShawn Lin 	if (ndev->quirks & NVME_QUIRK_LIMIT_IOQD32)
977*51341de7SShawn Lin 		ndev->q_depth = min_t(int, ndev->q_depth, 32);
978b65c6921SBin Meng 	ndev->db_stride = 1 << NVME_CAP_STRIDE(ndev->cap);
979982388eaSZhikang Zhang 	ndev->dbs = ((void __iomem *)ndev->bar) + 4096;
980982388eaSZhikang Zhang 
981982388eaSZhikang Zhang 	ret = nvme_configure_admin_queue(ndev);
982982388eaSZhikang Zhang 	if (ret)
983982388eaSZhikang Zhang 		goto free_queue;
984982388eaSZhikang Zhang 
98517fa3169SAaron Williams 	/* Allocate after the page size is known */
98617fa3169SAaron Williams 	ndev->prp_pool = memalign(ndev->page_size, MAX_PRP_POOL);
98717fa3169SAaron Williams 	if (!ndev->prp_pool) {
98817fa3169SAaron Williams 		ret = -ENOMEM;
98917fa3169SAaron Williams 		printf("Error: %s: Out of memory!\n", udev->name);
99017fa3169SAaron Williams 		goto free_nvme;
99117fa3169SAaron Williams 	}
99217fa3169SAaron Williams 	ndev->prp_entry_num = MAX_PRP_POOL >> 3;
99317fa3169SAaron Williams 
994982388eaSZhikang Zhang 	ret = nvme_setup_io_queues(ndev);
995982388eaSZhikang Zhang 	if (ret)
996982388eaSZhikang Zhang 		goto free_queue;
997982388eaSZhikang Zhang 
998982388eaSZhikang Zhang 	nvme_get_info_from_identify(ndev);
999982388eaSZhikang Zhang 
1000a6a8d6a1SBin Meng 	/* Create a blk device for each namespace */
1001e802110fSBin Meng 
1002e802110fSBin Meng 	id = memalign(ndev->page_size, sizeof(struct nvme_id_ns));
1003e802110fSBin Meng 	if (!id) {
1004e802110fSBin Meng 		ret = -ENOMEM;
1005e802110fSBin Meng 		goto free_queue;
1006e802110fSBin Meng 	}
1007e802110fSBin Meng 
10087522bf7bSBin Meng 	for (int i = 1; i <= ndev->nn; i++) {
1009a6a8d6a1SBin Meng 		struct udevice *ns_udev;
1010a6a8d6a1SBin Meng 		char name[20];
1011a6a8d6a1SBin Meng 
1012e802110fSBin Meng 		memset(id, 0, sizeof(*id));
10137522bf7bSBin Meng 		if (nvme_identify(ndev, i, 0, (dma_addr_t)(long)id)) {
1014e802110fSBin Meng 			ret = -EIO;
1015e802110fSBin Meng 			goto free_id;
1016e802110fSBin Meng 		}
1017e802110fSBin Meng 
1018e802110fSBin Meng 		/* skip inactive namespace */
1019e802110fSBin Meng 		if (!id->nsze)
1020e802110fSBin Meng 			continue;
1021e802110fSBin Meng 
1022a6a8d6a1SBin Meng 		/*
1023a6a8d6a1SBin Meng 		 * Encode the namespace id to the device name so that
1024a6a8d6a1SBin Meng 		 * we can extract it when doing the probe.
1025a6a8d6a1SBin Meng 		 */
1026a6a8d6a1SBin Meng 		sprintf(name, "blk#%d", i);
1027a6a8d6a1SBin Meng 
1028a6a8d6a1SBin Meng 		/* The real blksz and size will be set by nvme_blk_probe() */
1029a6a8d6a1SBin Meng 		ret = blk_create_devicef(udev, "nvme-blk", name, IF_TYPE_NVME,
1030a6a8d6a1SBin Meng 					 -1, 512, 0, &ns_udev);
1031a6a8d6a1SBin Meng 		if (ret)
1032e802110fSBin Meng 			goto free_id;
1033a6a8d6a1SBin Meng 	}
1034a6a8d6a1SBin Meng 
1035e802110fSBin Meng 	free(id);
1036982388eaSZhikang Zhang 	return 0;
1037982388eaSZhikang Zhang 
1038e802110fSBin Meng free_id:
1039e802110fSBin Meng 	free(id);
1040982388eaSZhikang Zhang free_queue:
1041982388eaSZhikang Zhang 	free((void *)ndev->queues);
1042982388eaSZhikang Zhang free_nvme:
1043982388eaSZhikang Zhang 	return ret;
1044982388eaSZhikang Zhang }
1045982388eaSZhikang Zhang 
1046982388eaSZhikang Zhang U_BOOT_DRIVER(nvme) = {
1047982388eaSZhikang Zhang 	.name	= "nvme",
1048982388eaSZhikang Zhang 	.id	= UCLASS_NVME,
1049982388eaSZhikang Zhang 	.bind	= nvme_bind,
1050982388eaSZhikang Zhang 	.probe	= nvme_probe,
1051982388eaSZhikang Zhang 	.priv_auto_alloc_size = sizeof(struct nvme_dev),
1052982388eaSZhikang Zhang };
1053982388eaSZhikang Zhang 
1054982388eaSZhikang Zhang struct pci_device_id nvme_supported[] = {
10550deb9131SJon Nettleton 	{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, ~0) },
1056982388eaSZhikang Zhang 	{}
1057982388eaSZhikang Zhang };
1058982388eaSZhikang Zhang 
1059982388eaSZhikang Zhang U_BOOT_PCI_DEVICE(nvme, nvme_supported);
1060