xref: /OK3568_Linux_fs/u-boot/drivers/nvme/nvme.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright (C) 2017 NXP Semiconductors
3*4882a593Smuzhiyun  * Copyright (C) 2017 Bin Meng <bmeng.cn@gmail.com>
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * SPDX-License-Identifier:	GPL-2.0+
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <common.h>
9*4882a593Smuzhiyun #include <bouncebuf.h>
10*4882a593Smuzhiyun #include <dm.h>
11*4882a593Smuzhiyun #include <errno.h>
12*4882a593Smuzhiyun #include <memalign.h>
13*4882a593Smuzhiyun #include <pci.h>
14*4882a593Smuzhiyun #include <dm/device-internal.h>
15*4882a593Smuzhiyun #include "nvme.h"
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #define NVME_Q_DEPTH		2
18*4882a593Smuzhiyun #define NVME_AQ_DEPTH		2
19*4882a593Smuzhiyun #define NVME_SQ_SIZE(depth)	(depth * sizeof(struct nvme_command))
20*4882a593Smuzhiyun #define NVME_CQ_SIZE(depth)	(depth * sizeof(struct nvme_completion))
21*4882a593Smuzhiyun #define NVME_CQ_ALLOCATION	ALIGN(NVME_CQ_SIZE(NVME_Q_DEPTH), \
22*4882a593Smuzhiyun 				      ARCH_DMA_MINALIGN)
23*4882a593Smuzhiyun #define ADMIN_TIMEOUT		60
24*4882a593Smuzhiyun #define IO_TIMEOUT		30
25*4882a593Smuzhiyun #define MAX_PRP_POOL		512
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun enum nvme_queue_id {
28*4882a593Smuzhiyun 	NVME_ADMIN_Q,
29*4882a593Smuzhiyun 	NVME_IO_Q,
30*4882a593Smuzhiyun 	NVME_Q_NUM,
31*4882a593Smuzhiyun };
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun /*
34*4882a593Smuzhiyun  * An NVM Express queue. Each device has at least two (one for admin
35*4882a593Smuzhiyun  * commands and one for I/O commands).
36*4882a593Smuzhiyun  */
37*4882a593Smuzhiyun struct nvme_queue {
38*4882a593Smuzhiyun 	struct nvme_dev *dev;
39*4882a593Smuzhiyun 	struct nvme_command *sq_cmds;
40*4882a593Smuzhiyun 	struct nvme_completion *cqes;
41*4882a593Smuzhiyun 	wait_queue_head_t sq_full;
42*4882a593Smuzhiyun 	u32 __iomem *q_db;
43*4882a593Smuzhiyun 	u16 q_depth;
44*4882a593Smuzhiyun 	s16 cq_vector;
45*4882a593Smuzhiyun 	u16 sq_head;
46*4882a593Smuzhiyun 	u16 sq_tail;
47*4882a593Smuzhiyun 	u16 cq_head;
48*4882a593Smuzhiyun 	u16 qid;
49*4882a593Smuzhiyun 	u8 cq_phase;
50*4882a593Smuzhiyun 	u8 cqe_seen;
51*4882a593Smuzhiyun 	unsigned long cmdid_data[];
52*4882a593Smuzhiyun };
53*4882a593Smuzhiyun 
nvme_wait_ready(struct nvme_dev * dev,bool enabled)54*4882a593Smuzhiyun static int nvme_wait_ready(struct nvme_dev *dev, bool enabled)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun 	u32 bit = enabled ? NVME_CSTS_RDY : 0;
57*4882a593Smuzhiyun 	int timeout;
58*4882a593Smuzhiyun 	ulong start;
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	/* Timeout field in the CAP register is in 500 millisecond units */
61*4882a593Smuzhiyun 	timeout = NVME_CAP_TIMEOUT(dev->cap) * 500;
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	start = get_timer(0);
64*4882a593Smuzhiyun 	while (get_timer(start) < timeout) {
65*4882a593Smuzhiyun 		if ((readl(&dev->bar->csts) & NVME_CSTS_RDY) == bit)
66*4882a593Smuzhiyun 			return 0;
67*4882a593Smuzhiyun 	}
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	return -ETIME;
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun 
nvme_setup_prps(struct nvme_dev * dev,u64 * prp2,int total_len,u64 dma_addr)72*4882a593Smuzhiyun static int nvme_setup_prps(struct nvme_dev *dev, u64 *prp2,
73*4882a593Smuzhiyun 			   int total_len, u64 dma_addr)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun 	u32 page_size = dev->page_size;
76*4882a593Smuzhiyun 	int offset = dma_addr & (page_size - 1);
77*4882a593Smuzhiyun 	u64 *prp_pool;
78*4882a593Smuzhiyun 	int length = total_len;
79*4882a593Smuzhiyun 	int i, nprps;
80*4882a593Smuzhiyun 	u32 prps_per_page = page_size >> 3;
81*4882a593Smuzhiyun 	u32 num_pages;
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	length -= (page_size - offset);
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	if (length <= 0) {
86*4882a593Smuzhiyun 		*prp2 = 0;
87*4882a593Smuzhiyun 		return 0;
88*4882a593Smuzhiyun 	}
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	if (length)
91*4882a593Smuzhiyun 		dma_addr += (page_size - offset);
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	if (length <= page_size) {
94*4882a593Smuzhiyun 		*prp2 = dma_addr;
95*4882a593Smuzhiyun 		return 0;
96*4882a593Smuzhiyun 	}
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	nprps = DIV_ROUND_UP(length, page_size);
99*4882a593Smuzhiyun 	num_pages = DIV_ROUND_UP(nprps + 1, prps_per_page);
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	if (nprps > dev->prp_entry_num) {
102*4882a593Smuzhiyun 		free(dev->prp_pool);
103*4882a593Smuzhiyun 		/*
104*4882a593Smuzhiyun 		 * Always increase in increments of pages.  It doesn't waste
105*4882a593Smuzhiyun 		 * much memory and reduces the number of allocations.
106*4882a593Smuzhiyun 		 */
107*4882a593Smuzhiyun 		dev->prp_pool = memalign(page_size, num_pages * page_size);
108*4882a593Smuzhiyun 		if (!dev->prp_pool) {
109*4882a593Smuzhiyun 			printf("Error: malloc prp_pool fail\n");
110*4882a593Smuzhiyun 			return -ENOMEM;
111*4882a593Smuzhiyun 		}
112*4882a593Smuzhiyun 		dev->prp_entry_num = prps_per_page * num_pages;
113*4882a593Smuzhiyun 	}
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	prp_pool = dev->prp_pool;
116*4882a593Smuzhiyun 	i = 0;
117*4882a593Smuzhiyun 	while (nprps) {
118*4882a593Smuzhiyun 		if (i == prps_per_page) {
119*4882a593Smuzhiyun 			*(prp_pool + i) = *(prp_pool + i - 1);
120*4882a593Smuzhiyun 			*(prp_pool + i - 1) = cpu_to_le64((ulong)prp_pool +
121*4882a593Smuzhiyun 					page_size);
122*4882a593Smuzhiyun 			i = 1;
123*4882a593Smuzhiyun 			prp_pool += page_size;
124*4882a593Smuzhiyun 		}
125*4882a593Smuzhiyun 		*(prp_pool + i++) = cpu_to_le64(dma_addr);
126*4882a593Smuzhiyun 		dma_addr += page_size;
127*4882a593Smuzhiyun 		nprps--;
128*4882a593Smuzhiyun 	}
129*4882a593Smuzhiyun 	*prp2 = (ulong)dev->prp_pool;
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	flush_dcache_range((ulong)dev->prp_pool, (ulong)dev->prp_pool +
132*4882a593Smuzhiyun 			   dev->prp_entry_num * sizeof(u64));
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	return 0;
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun 
nvme_get_cmd_id(void)137*4882a593Smuzhiyun static __le16 nvme_get_cmd_id(void)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun 	static unsigned short cmdid;
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	return cpu_to_le16((cmdid < USHRT_MAX) ? cmdid++ : 0);
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun 
nvme_read_completion_status(struct nvme_queue * nvmeq,u16 index)144*4882a593Smuzhiyun static u16 nvme_read_completion_status(struct nvme_queue *nvmeq, u16 index)
145*4882a593Smuzhiyun {
146*4882a593Smuzhiyun 	/*
147*4882a593Smuzhiyun 	 * Single CQ entries are always smaller than a cache line, so we
148*4882a593Smuzhiyun 	 * can't invalidate them individually. However CQ entries are
149*4882a593Smuzhiyun 	 * read only by the CPU, so it's safe to always invalidate all of them,
150*4882a593Smuzhiyun 	 * as the cache line should never become dirty.
151*4882a593Smuzhiyun 	 */
152*4882a593Smuzhiyun 	ulong start = (ulong)&nvmeq->cqes[0];
153*4882a593Smuzhiyun 	ulong stop = start + NVME_CQ_ALLOCATION;
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	invalidate_dcache_range(start, stop);
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	return readw(&(nvmeq->cqes[index].status));
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun /**
161*4882a593Smuzhiyun  * nvme_submit_cmd() - copy a command into a queue and ring the doorbell
162*4882a593Smuzhiyun  *
163*4882a593Smuzhiyun  * @nvmeq:	The queue to use
164*4882a593Smuzhiyun  * @cmd:	The command to send
165*4882a593Smuzhiyun  */
nvme_submit_cmd(struct nvme_queue * nvmeq,struct nvme_command * cmd)166*4882a593Smuzhiyun static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun 	u16 tail = nvmeq->sq_tail;
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
171*4882a593Smuzhiyun 	flush_dcache_range((ulong)&nvmeq->sq_cmds[tail],
172*4882a593Smuzhiyun 			   (ulong)&nvmeq->sq_cmds[tail] + sizeof(*cmd));
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	if (++tail == nvmeq->q_depth)
175*4882a593Smuzhiyun 		tail = 0;
176*4882a593Smuzhiyun 	writel(tail, nvmeq->q_db);
177*4882a593Smuzhiyun 	nvmeq->sq_tail = tail;
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun 
nvme_submit_sync_cmd(struct nvme_queue * nvmeq,struct nvme_command * cmd,u32 * result,unsigned timeout)180*4882a593Smuzhiyun static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq,
181*4882a593Smuzhiyun 				struct nvme_command *cmd,
182*4882a593Smuzhiyun 				u32 *result, unsigned timeout)
183*4882a593Smuzhiyun {
184*4882a593Smuzhiyun 	u16 head = nvmeq->cq_head;
185*4882a593Smuzhiyun 	u16 phase = nvmeq->cq_phase;
186*4882a593Smuzhiyun 	u16 status;
187*4882a593Smuzhiyun 	ulong start_time;
188*4882a593Smuzhiyun 	ulong timeout_us = timeout * 100000;
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	cmd->common.command_id = nvme_get_cmd_id();
191*4882a593Smuzhiyun 	nvme_submit_cmd(nvmeq, cmd);
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	start_time = timer_get_us();
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	for (;;) {
196*4882a593Smuzhiyun 		status = nvme_read_completion_status(nvmeq, head);
197*4882a593Smuzhiyun 		if ((status & 0x01) == phase)
198*4882a593Smuzhiyun 			break;
199*4882a593Smuzhiyun 		if (timeout_us > 0 && (timer_get_us() - start_time)
200*4882a593Smuzhiyun 		    >= timeout_us)
201*4882a593Smuzhiyun 			return -ETIMEDOUT;
202*4882a593Smuzhiyun 	}
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	status >>= 1;
205*4882a593Smuzhiyun 	if (status) {
206*4882a593Smuzhiyun 		printf("ERROR: status = %x, phase = %d, head = %d\n",
207*4882a593Smuzhiyun 		       status, phase, head);
208*4882a593Smuzhiyun 		status = 0;
209*4882a593Smuzhiyun 		if (++head == nvmeq->q_depth) {
210*4882a593Smuzhiyun 			head = 0;
211*4882a593Smuzhiyun 			phase = !phase;
212*4882a593Smuzhiyun 		}
213*4882a593Smuzhiyun 		writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
214*4882a593Smuzhiyun 		nvmeq->cq_head = head;
215*4882a593Smuzhiyun 		nvmeq->cq_phase = phase;
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 		return -EIO;
218*4882a593Smuzhiyun 	}
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	if (result)
221*4882a593Smuzhiyun 		*result = readl(&(nvmeq->cqes[head].result));
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	if (++head == nvmeq->q_depth) {
224*4882a593Smuzhiyun 		head = 0;
225*4882a593Smuzhiyun 		phase = !phase;
226*4882a593Smuzhiyun 	}
227*4882a593Smuzhiyun 	writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
228*4882a593Smuzhiyun 	nvmeq->cq_head = head;
229*4882a593Smuzhiyun 	nvmeq->cq_phase = phase;
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	return status;
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun 
nvme_submit_admin_cmd(struct nvme_dev * dev,struct nvme_command * cmd,u32 * result)234*4882a593Smuzhiyun static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
235*4882a593Smuzhiyun 				 u32 *result)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun 	return nvme_submit_sync_cmd(dev->queues[NVME_ADMIN_Q], cmd,
238*4882a593Smuzhiyun 				    result, ADMIN_TIMEOUT);
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun 
nvme_alloc_queue(struct nvme_dev * dev,int qid,int depth)241*4882a593Smuzhiyun static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev,
242*4882a593Smuzhiyun 					   int qid, int depth)
243*4882a593Smuzhiyun {
244*4882a593Smuzhiyun 	struct nvme_queue *nvmeq = malloc(sizeof(*nvmeq));
245*4882a593Smuzhiyun 	if (!nvmeq)
246*4882a593Smuzhiyun 		return NULL;
247*4882a593Smuzhiyun 	memset(nvmeq, 0, sizeof(*nvmeq));
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	nvmeq->cqes = (void *)memalign(4096, NVME_CQ_ALLOCATION);
250*4882a593Smuzhiyun 	if (!nvmeq->cqes)
251*4882a593Smuzhiyun 		goto free_nvmeq;
252*4882a593Smuzhiyun 	memset((void *)nvmeq->cqes, 0, NVME_CQ_SIZE(depth));
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	nvmeq->sq_cmds = (void *)memalign(4096, NVME_SQ_SIZE(depth));
255*4882a593Smuzhiyun 	if (!nvmeq->sq_cmds)
256*4882a593Smuzhiyun 		goto free_queue;
257*4882a593Smuzhiyun 	memset((void *)nvmeq->sq_cmds, 0, NVME_SQ_SIZE(depth));
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	nvmeq->dev = dev;
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	nvmeq->cq_head = 0;
262*4882a593Smuzhiyun 	nvmeq->cq_phase = 1;
263*4882a593Smuzhiyun 	nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
264*4882a593Smuzhiyun 	nvmeq->q_depth = depth;
265*4882a593Smuzhiyun 	nvmeq->qid = qid;
266*4882a593Smuzhiyun 	dev->queue_count++;
267*4882a593Smuzhiyun 	dev->queues[qid] = nvmeq;
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	return nvmeq;
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun  free_queue:
272*4882a593Smuzhiyun 	free((void *)nvmeq->cqes);
273*4882a593Smuzhiyun  free_nvmeq:
274*4882a593Smuzhiyun 	free(nvmeq);
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	return NULL;
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun 
nvme_delete_queue(struct nvme_dev * dev,u8 opcode,u16 id)279*4882a593Smuzhiyun static int nvme_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun 	struct nvme_command c;
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 	memset(&c, 0, sizeof(c));
284*4882a593Smuzhiyun 	c.delete_queue.opcode = opcode;
285*4882a593Smuzhiyun 	c.delete_queue.qid = cpu_to_le16(id);
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	return nvme_submit_admin_cmd(dev, &c, NULL);
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun 
nvme_delete_sq(struct nvme_dev * dev,u16 sqid)290*4882a593Smuzhiyun static int nvme_delete_sq(struct nvme_dev *dev, u16 sqid)
291*4882a593Smuzhiyun {
292*4882a593Smuzhiyun 	return nvme_delete_queue(dev, nvme_admin_delete_sq, sqid);
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun 
nvme_delete_cq(struct nvme_dev * dev,u16 cqid)295*4882a593Smuzhiyun static int nvme_delete_cq(struct nvme_dev *dev, u16 cqid)
296*4882a593Smuzhiyun {
297*4882a593Smuzhiyun 	return nvme_delete_queue(dev, nvme_admin_delete_cq, cqid);
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun 
nvme_enable_ctrl(struct nvme_dev * dev)300*4882a593Smuzhiyun static int nvme_enable_ctrl(struct nvme_dev *dev)
301*4882a593Smuzhiyun {
302*4882a593Smuzhiyun 	dev->ctrl_config &= ~NVME_CC_SHN_MASK;
303*4882a593Smuzhiyun 	dev->ctrl_config |= NVME_CC_ENABLE;
304*4882a593Smuzhiyun 	writel(dev->ctrl_config, &dev->bar->cc);
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	return nvme_wait_ready(dev, true);
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun 
nvme_disable_ctrl(struct nvme_dev * dev)309*4882a593Smuzhiyun static int nvme_disable_ctrl(struct nvme_dev *dev)
310*4882a593Smuzhiyun {
311*4882a593Smuzhiyun 	dev->ctrl_config &= ~NVME_CC_SHN_MASK;
312*4882a593Smuzhiyun 	dev->ctrl_config &= ~NVME_CC_ENABLE;
313*4882a593Smuzhiyun 	writel(dev->ctrl_config, &dev->bar->cc);
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 	return nvme_wait_ready(dev, false);
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun 
nvme_free_queue(struct nvme_queue * nvmeq)318*4882a593Smuzhiyun static void nvme_free_queue(struct nvme_queue *nvmeq)
319*4882a593Smuzhiyun {
320*4882a593Smuzhiyun 	free((void *)nvmeq->cqes);
321*4882a593Smuzhiyun 	free(nvmeq->sq_cmds);
322*4882a593Smuzhiyun 	free(nvmeq);
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun 
nvme_free_queues(struct nvme_dev * dev,int lowest)325*4882a593Smuzhiyun static void nvme_free_queues(struct nvme_dev *dev, int lowest)
326*4882a593Smuzhiyun {
327*4882a593Smuzhiyun 	int i;
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 	for (i = dev->queue_count - 1; i >= lowest; i--) {
330*4882a593Smuzhiyun 		struct nvme_queue *nvmeq = dev->queues[i];
331*4882a593Smuzhiyun 		dev->queue_count--;
332*4882a593Smuzhiyun 		dev->queues[i] = NULL;
333*4882a593Smuzhiyun 		nvme_free_queue(nvmeq);
334*4882a593Smuzhiyun 	}
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun 
nvme_init_queue(struct nvme_queue * nvmeq,u16 qid)337*4882a593Smuzhiyun static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
338*4882a593Smuzhiyun {
339*4882a593Smuzhiyun 	struct nvme_dev *dev = nvmeq->dev;
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	nvmeq->sq_tail = 0;
342*4882a593Smuzhiyun 	nvmeq->cq_head = 0;
343*4882a593Smuzhiyun 	nvmeq->cq_phase = 1;
344*4882a593Smuzhiyun 	nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
345*4882a593Smuzhiyun 	memset((void *)nvmeq->cqes, 0, NVME_CQ_SIZE(nvmeq->q_depth));
346*4882a593Smuzhiyun 	flush_dcache_range((ulong)nvmeq->cqes,
347*4882a593Smuzhiyun 			   (ulong)nvmeq->cqes + NVME_CQ_ALLOCATION);
348*4882a593Smuzhiyun 	dev->online_queues++;
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun 
nvme_configure_admin_queue(struct nvme_dev * dev)351*4882a593Smuzhiyun static int nvme_configure_admin_queue(struct nvme_dev *dev)
352*4882a593Smuzhiyun {
353*4882a593Smuzhiyun 	int result;
354*4882a593Smuzhiyun 	u32 aqa;
355*4882a593Smuzhiyun 	u64 cap = dev->cap;
356*4882a593Smuzhiyun 	struct nvme_queue *nvmeq;
357*4882a593Smuzhiyun 	/* most architectures use 4KB as the page size */
358*4882a593Smuzhiyun 	unsigned page_shift = 12;
359*4882a593Smuzhiyun 	unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12;
360*4882a593Smuzhiyun 	unsigned dev_page_max = NVME_CAP_MPSMAX(cap) + 12;
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	if (page_shift < dev_page_min) {
363*4882a593Smuzhiyun 		debug("Device minimum page size (%u) too large for host (%u)\n",
364*4882a593Smuzhiyun 		      1 << dev_page_min, 1 << page_shift);
365*4882a593Smuzhiyun 		return -ENODEV;
366*4882a593Smuzhiyun 	}
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	if (page_shift > dev_page_max) {
369*4882a593Smuzhiyun 		debug("Device maximum page size (%u) smaller than host (%u)\n",
370*4882a593Smuzhiyun 		      1 << dev_page_max, 1 << page_shift);
371*4882a593Smuzhiyun 		page_shift = dev_page_max;
372*4882a593Smuzhiyun 	}
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 	result = nvme_disable_ctrl(dev);
375*4882a593Smuzhiyun 	if (result < 0)
376*4882a593Smuzhiyun 		return result;
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	nvmeq = dev->queues[NVME_ADMIN_Q];
379*4882a593Smuzhiyun 	if (!nvmeq) {
380*4882a593Smuzhiyun 		nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH);
381*4882a593Smuzhiyun 		if (!nvmeq)
382*4882a593Smuzhiyun 			return -ENOMEM;
383*4882a593Smuzhiyun 	}
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	aqa = nvmeq->q_depth - 1;
386*4882a593Smuzhiyun 	aqa |= aqa << 16;
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 	dev->page_size = 1 << page_shift;
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 	dev->ctrl_config = NVME_CC_CSS_NVM;
391*4882a593Smuzhiyun 	dev->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT;
392*4882a593Smuzhiyun 	dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
393*4882a593Smuzhiyun 	dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	writel(aqa, &dev->bar->aqa);
396*4882a593Smuzhiyun 	nvme_writeq((ulong)nvmeq->sq_cmds, &dev->bar->asq);
397*4882a593Smuzhiyun 	nvme_writeq((ulong)nvmeq->cqes, &dev->bar->acq);
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 	result = nvme_enable_ctrl(dev);
400*4882a593Smuzhiyun 	if (result)
401*4882a593Smuzhiyun 		goto free_nvmeq;
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	nvmeq->cq_vector = 0;
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun 	nvme_init_queue(dev->queues[NVME_ADMIN_Q], 0);
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 	return result;
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun  free_nvmeq:
410*4882a593Smuzhiyun 	nvme_free_queues(dev, 0);
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 	return result;
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun 
nvme_alloc_cq(struct nvme_dev * dev,u16 qid,struct nvme_queue * nvmeq)415*4882a593Smuzhiyun static int nvme_alloc_cq(struct nvme_dev *dev, u16 qid,
416*4882a593Smuzhiyun 			    struct nvme_queue *nvmeq)
417*4882a593Smuzhiyun {
418*4882a593Smuzhiyun 	struct nvme_command c;
419*4882a593Smuzhiyun 	int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 	memset(&c, 0, sizeof(c));
422*4882a593Smuzhiyun 	c.create_cq.opcode = nvme_admin_create_cq;
423*4882a593Smuzhiyun 	c.create_cq.prp1 = cpu_to_le64((ulong)nvmeq->cqes);
424*4882a593Smuzhiyun 	c.create_cq.cqid = cpu_to_le16(qid);
425*4882a593Smuzhiyun 	c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
426*4882a593Smuzhiyun 	c.create_cq.cq_flags = cpu_to_le16(flags);
427*4882a593Smuzhiyun 	c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 	return nvme_submit_admin_cmd(dev, &c, NULL);
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun 
nvme_alloc_sq(struct nvme_dev * dev,u16 qid,struct nvme_queue * nvmeq)432*4882a593Smuzhiyun static int nvme_alloc_sq(struct nvme_dev *dev, u16 qid,
433*4882a593Smuzhiyun 			    struct nvme_queue *nvmeq)
434*4882a593Smuzhiyun {
435*4882a593Smuzhiyun 	struct nvme_command c;
436*4882a593Smuzhiyun 	int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM;
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 	memset(&c, 0, sizeof(c));
439*4882a593Smuzhiyun 	c.create_sq.opcode = nvme_admin_create_sq;
440*4882a593Smuzhiyun 	c.create_sq.prp1 = cpu_to_le64((ulong)nvmeq->sq_cmds);
441*4882a593Smuzhiyun 	c.create_sq.sqid = cpu_to_le16(qid);
442*4882a593Smuzhiyun 	c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
443*4882a593Smuzhiyun 	c.create_sq.sq_flags = cpu_to_le16(flags);
444*4882a593Smuzhiyun 	c.create_sq.cqid = cpu_to_le16(qid);
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 	return nvme_submit_admin_cmd(dev, &c, NULL);
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun 
nvme_identify(struct nvme_dev * dev,unsigned nsid,unsigned cns,dma_addr_t dma_addr)449*4882a593Smuzhiyun int nvme_identify(struct nvme_dev *dev, unsigned nsid,
450*4882a593Smuzhiyun 		  unsigned cns, dma_addr_t dma_addr)
451*4882a593Smuzhiyun {
452*4882a593Smuzhiyun 	struct nvme_command c;
453*4882a593Smuzhiyun 	u32 page_size = dev->page_size;
454*4882a593Smuzhiyun 	int offset = dma_addr & (page_size - 1);
455*4882a593Smuzhiyun 	int length = sizeof(struct nvme_id_ctrl);
456*4882a593Smuzhiyun 	int ret;
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	memset(&c, 0, sizeof(c));
459*4882a593Smuzhiyun 	c.identify.opcode = nvme_admin_identify;
460*4882a593Smuzhiyun 	c.identify.nsid = cpu_to_le32(nsid);
461*4882a593Smuzhiyun 	c.identify.prp1 = cpu_to_le64(dma_addr);
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 	length -= (page_size - offset);
464*4882a593Smuzhiyun 	if (length <= 0) {
465*4882a593Smuzhiyun 		c.identify.prp2 = 0;
466*4882a593Smuzhiyun 	} else {
467*4882a593Smuzhiyun 		dma_addr += (page_size - offset);
468*4882a593Smuzhiyun 		c.identify.prp2 = cpu_to_le64(dma_addr);
469*4882a593Smuzhiyun 	}
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 	c.identify.cns = cpu_to_le32(cns);
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	invalidate_dcache_range(dma_addr,
474*4882a593Smuzhiyun 				dma_addr + sizeof(struct nvme_id_ctrl));
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 	ret = nvme_submit_admin_cmd(dev, &c, NULL);
477*4882a593Smuzhiyun 	if (!ret)
478*4882a593Smuzhiyun 		invalidate_dcache_range(dma_addr,
479*4882a593Smuzhiyun 					dma_addr + sizeof(struct nvme_id_ctrl));
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 	return ret;
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun 
nvme_get_features(struct nvme_dev * dev,unsigned fid,unsigned nsid,dma_addr_t dma_addr,u32 * result)484*4882a593Smuzhiyun int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
485*4882a593Smuzhiyun 		      dma_addr_t dma_addr, u32 *result)
486*4882a593Smuzhiyun {
487*4882a593Smuzhiyun 	struct nvme_command c;
488*4882a593Smuzhiyun 	int ret;
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 	memset(&c, 0, sizeof(c));
491*4882a593Smuzhiyun 	c.features.opcode = nvme_admin_get_features;
492*4882a593Smuzhiyun 	c.features.nsid = cpu_to_le32(nsid);
493*4882a593Smuzhiyun 	c.features.prp1 = cpu_to_le64(dma_addr);
494*4882a593Smuzhiyun 	c.features.fid = cpu_to_le32(fid);
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 	ret = nvme_submit_admin_cmd(dev, &c, result);
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun 	/*
499*4882a593Smuzhiyun 	 * TODO: Add some cache invalidation when a DMA buffer is involved
500*4882a593Smuzhiyun 	 * in the request, here and before the command gets submitted. The
501*4882a593Smuzhiyun 	 * buffer size varies by feature, also some features use a different
502*4882a593Smuzhiyun 	 * field in the command packet to hold the buffer address.
503*4882a593Smuzhiyun 	 * Section 5.21.1 (Set Features command) in the NVMe specification
504*4882a593Smuzhiyun 	 * details the buffer requirements for each feature.
505*4882a593Smuzhiyun 	 *
506*4882a593Smuzhiyun 	 * At the moment there is no user of this function.
507*4882a593Smuzhiyun 	 */
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun 	return ret;
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun 
nvme_set_features(struct nvme_dev * dev,unsigned fid,unsigned dword11,dma_addr_t dma_addr,u32 * result)512*4882a593Smuzhiyun int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
513*4882a593Smuzhiyun 		      dma_addr_t dma_addr, u32 *result)
514*4882a593Smuzhiyun {
515*4882a593Smuzhiyun 	struct nvme_command c;
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun 	memset(&c, 0, sizeof(c));
518*4882a593Smuzhiyun 	c.features.opcode = nvme_admin_set_features;
519*4882a593Smuzhiyun 	c.features.prp1 = cpu_to_le64(dma_addr);
520*4882a593Smuzhiyun 	c.features.fid = cpu_to_le32(fid);
521*4882a593Smuzhiyun 	c.features.dword11 = cpu_to_le32(dword11);
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun 	/*
524*4882a593Smuzhiyun 	 * TODO: Add a cache clean (aka flush) operation when a DMA buffer is
525*4882a593Smuzhiyun 	 * involved in the request. The buffer size varies by feature, also
526*4882a593Smuzhiyun 	 * some features use a different field in the command packet to hold
527*4882a593Smuzhiyun 	 * the buffer address. Section 5.21.1 (Set Features command) in the
528*4882a593Smuzhiyun 	 * NVMe specification details the buffer requirements for each
529*4882a593Smuzhiyun 	 * feature.
530*4882a593Smuzhiyun 	 * At the moment the only user of this function is not using
531*4882a593Smuzhiyun 	 * any DMA buffer at all.
532*4882a593Smuzhiyun 	 */
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun 	return nvme_submit_admin_cmd(dev, &c, result);
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun 
nvme_create_queue(struct nvme_queue * nvmeq,int qid)537*4882a593Smuzhiyun static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
538*4882a593Smuzhiyun {
539*4882a593Smuzhiyun 	struct nvme_dev *dev = nvmeq->dev;
540*4882a593Smuzhiyun 	int result;
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	nvmeq->cq_vector = qid - 1;
543*4882a593Smuzhiyun 	result = nvme_alloc_cq(dev, qid, nvmeq);
544*4882a593Smuzhiyun 	if (result < 0)
545*4882a593Smuzhiyun 		goto release_cq;
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 	result = nvme_alloc_sq(dev, qid, nvmeq);
548*4882a593Smuzhiyun 	if (result < 0)
549*4882a593Smuzhiyun 		goto release_sq;
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun 	nvme_init_queue(nvmeq, qid);
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	return result;
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun  release_sq:
556*4882a593Smuzhiyun 	nvme_delete_sq(dev, qid);
557*4882a593Smuzhiyun  release_cq:
558*4882a593Smuzhiyun 	nvme_delete_cq(dev, qid);
559*4882a593Smuzhiyun 
560*4882a593Smuzhiyun 	return result;
561*4882a593Smuzhiyun }
562*4882a593Smuzhiyun 
nvme_set_queue_count(struct nvme_dev * dev,int count)563*4882a593Smuzhiyun static int nvme_set_queue_count(struct nvme_dev *dev, int count)
564*4882a593Smuzhiyun {
565*4882a593Smuzhiyun 	int status;
566*4882a593Smuzhiyun 	u32 result;
567*4882a593Smuzhiyun 	u32 q_count = (count - 1) | ((count - 1) << 16);
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun 	status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES,
570*4882a593Smuzhiyun 			q_count, 0, &result);
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun 	if (status < 0)
573*4882a593Smuzhiyun 		return status;
574*4882a593Smuzhiyun 	if (status > 1)
575*4882a593Smuzhiyun 		return 0;
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 	return min(result & 0xffff, result >> 16) + 1;
578*4882a593Smuzhiyun }
579*4882a593Smuzhiyun 
nvme_create_io_queues(struct nvme_dev * dev)580*4882a593Smuzhiyun static void nvme_create_io_queues(struct nvme_dev *dev)
581*4882a593Smuzhiyun {
582*4882a593Smuzhiyun 	unsigned int i;
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 	for (i = dev->queue_count; i <= dev->max_qid; i++)
585*4882a593Smuzhiyun 		if (!nvme_alloc_queue(dev, i, dev->q_depth))
586*4882a593Smuzhiyun 			break;
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun 	for (i = dev->online_queues; i <= dev->queue_count - 1; i++)
589*4882a593Smuzhiyun 		if (nvme_create_queue(dev->queues[i], i))
590*4882a593Smuzhiyun 			break;
591*4882a593Smuzhiyun }
592*4882a593Smuzhiyun 
nvme_setup_io_queues(struct nvme_dev * dev)593*4882a593Smuzhiyun static int nvme_setup_io_queues(struct nvme_dev *dev)
594*4882a593Smuzhiyun {
595*4882a593Smuzhiyun 	int nr_io_queues;
596*4882a593Smuzhiyun 	int result;
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun 	nr_io_queues = 1;
599*4882a593Smuzhiyun 	result = nvme_set_queue_count(dev, nr_io_queues);
600*4882a593Smuzhiyun 	if (result <= 0)
601*4882a593Smuzhiyun 		return result;
602*4882a593Smuzhiyun 
603*4882a593Smuzhiyun 	dev->max_qid = nr_io_queues;
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun 	/* Free previously allocated queues */
606*4882a593Smuzhiyun 	nvme_free_queues(dev, nr_io_queues + 1);
607*4882a593Smuzhiyun 	nvme_create_io_queues(dev);
608*4882a593Smuzhiyun 
609*4882a593Smuzhiyun 	return 0;
610*4882a593Smuzhiyun }
611*4882a593Smuzhiyun 
nvme_get_info_from_identify(struct nvme_dev * dev)612*4882a593Smuzhiyun static int nvme_get_info_from_identify(struct nvme_dev *dev)
613*4882a593Smuzhiyun {
614*4882a593Smuzhiyun 	struct nvme_id_ctrl *ctrl;
615*4882a593Smuzhiyun 	int ret;
616*4882a593Smuzhiyun 	int shift = NVME_CAP_MPSMIN(dev->cap) + 12;
617*4882a593Smuzhiyun 
618*4882a593Smuzhiyun 	ctrl = memalign(dev->page_size, sizeof(struct nvme_id_ctrl));
619*4882a593Smuzhiyun 	if (!ctrl)
620*4882a593Smuzhiyun 		return -ENOMEM;
621*4882a593Smuzhiyun 
622*4882a593Smuzhiyun 	ret = nvme_identify(dev, 0, 1, (dma_addr_t)(long)ctrl);
623*4882a593Smuzhiyun 	if (ret) {
624*4882a593Smuzhiyun 		free(ctrl);
625*4882a593Smuzhiyun 		return -EIO;
626*4882a593Smuzhiyun 	}
627*4882a593Smuzhiyun 
628*4882a593Smuzhiyun 	dev->nn = le32_to_cpu(ctrl->nn);
629*4882a593Smuzhiyun 	dev->vwc = ctrl->vwc;
630*4882a593Smuzhiyun 	memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
631*4882a593Smuzhiyun 	memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
632*4882a593Smuzhiyun 	memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
633*4882a593Smuzhiyun 	if (ctrl->mdts)
634*4882a593Smuzhiyun 		dev->max_transfer_shift = (ctrl->mdts + shift);
635*4882a593Smuzhiyun 	else {
636*4882a593Smuzhiyun 		/*
637*4882a593Smuzhiyun 		 * Maximum Data Transfer Size (MDTS) field indicates the maximum
638*4882a593Smuzhiyun 		 * data transfer size between the host and the controller. The
639*4882a593Smuzhiyun 		 * host should not submit a command that exceeds this transfer
640*4882a593Smuzhiyun 		 * size. The value is in units of the minimum memory page size
641*4882a593Smuzhiyun 		 * and is reported as a power of two (2^n).
642*4882a593Smuzhiyun 		 *
643*4882a593Smuzhiyun 		 * The spec also says: a value of 0h indicates no restrictions
644*4882a593Smuzhiyun 		 * on transfer size. But in nvme_blk_read/write() below we have
645*4882a593Smuzhiyun 		 * the following algorithm for maximum number of logic blocks
646*4882a593Smuzhiyun 		 * per transfer:
647*4882a593Smuzhiyun 		 *
648*4882a593Smuzhiyun 		 * u16 lbas = 1 << (dev->max_transfer_shift - ns->lba_shift);
649*4882a593Smuzhiyun 		 *
650*4882a593Smuzhiyun 		 * In order for lbas not to overflow, the maximum number is 15
651*4882a593Smuzhiyun 		 * which means dev->max_transfer_shift = 15 + 9 (ns->lba_shift).
652*4882a593Smuzhiyun 		 * Let's use 20 which provides 1MB size.
653*4882a593Smuzhiyun 		 */
654*4882a593Smuzhiyun 		dev->max_transfer_shift = 20;
655*4882a593Smuzhiyun 	}
656*4882a593Smuzhiyun 
657*4882a593Smuzhiyun 	free(ctrl);
658*4882a593Smuzhiyun 	return 0;
659*4882a593Smuzhiyun }
660*4882a593Smuzhiyun 
nvme_get_namespace_id(struct udevice * udev,u32 * ns_id,u8 * eui64)661*4882a593Smuzhiyun int nvme_get_namespace_id(struct udevice *udev, u32 *ns_id, u8 *eui64)
662*4882a593Smuzhiyun {
663*4882a593Smuzhiyun 	struct nvme_ns *ns = dev_get_priv(udev);
664*4882a593Smuzhiyun 
665*4882a593Smuzhiyun 	if (ns_id)
666*4882a593Smuzhiyun 		*ns_id = ns->ns_id;
667*4882a593Smuzhiyun 	if (eui64)
668*4882a593Smuzhiyun 		memcpy(eui64, ns->eui64, sizeof(ns->eui64));
669*4882a593Smuzhiyun 
670*4882a593Smuzhiyun 	return 0;
671*4882a593Smuzhiyun }
672*4882a593Smuzhiyun 
nvme_scan_namespace(void)673*4882a593Smuzhiyun int nvme_scan_namespace(void)
674*4882a593Smuzhiyun {
675*4882a593Smuzhiyun 	struct uclass *uc;
676*4882a593Smuzhiyun 	struct udevice *dev;
677*4882a593Smuzhiyun 	int ret;
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun 	ret = uclass_get(UCLASS_NVME, &uc);
680*4882a593Smuzhiyun 	if (ret)
681*4882a593Smuzhiyun 		return ret;
682*4882a593Smuzhiyun 
683*4882a593Smuzhiyun 	uclass_foreach_dev(dev, uc) {
684*4882a593Smuzhiyun 		ret = device_probe(dev);
685*4882a593Smuzhiyun 		if (ret)
686*4882a593Smuzhiyun 			return ret;
687*4882a593Smuzhiyun 	}
688*4882a593Smuzhiyun 
689*4882a593Smuzhiyun 	return 0;
690*4882a593Smuzhiyun }
691*4882a593Smuzhiyun 
nvme_blk_probe(struct udevice * udev)692*4882a593Smuzhiyun static int nvme_blk_probe(struct udevice *udev)
693*4882a593Smuzhiyun {
694*4882a593Smuzhiyun 	struct nvme_dev *ndev = dev_get_priv(udev->parent);
695*4882a593Smuzhiyun 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
696*4882a593Smuzhiyun 	struct nvme_ns *ns = dev_get_priv(udev);
697*4882a593Smuzhiyun 	u8 flbas;
698*4882a593Smuzhiyun 	struct pci_child_platdata *pplat;
699*4882a593Smuzhiyun 	struct nvme_id_ns *id;
700*4882a593Smuzhiyun 
701*4882a593Smuzhiyun 	id = memalign(ndev->page_size, sizeof(struct nvme_id_ns));
702*4882a593Smuzhiyun 	if (!id)
703*4882a593Smuzhiyun 		return -ENOMEM;
704*4882a593Smuzhiyun 
705*4882a593Smuzhiyun 	ns->dev = ndev;
706*4882a593Smuzhiyun 	/* extract the namespace id from the block device name */
707*4882a593Smuzhiyun 	ns->ns_id = trailing_strtol(udev->name);
708*4882a593Smuzhiyun 	if (nvme_identify(ndev, ns->ns_id, 0, (dma_addr_t)(long)id)) {
709*4882a593Smuzhiyun 		free(id);
710*4882a593Smuzhiyun 		return -EIO;
711*4882a593Smuzhiyun 	}
712*4882a593Smuzhiyun 
713*4882a593Smuzhiyun 	memcpy(&ns->eui64, &id->eui64, sizeof(id->eui64));
714*4882a593Smuzhiyun 	flbas = id->flbas & NVME_NS_FLBAS_LBA_MASK;
715*4882a593Smuzhiyun 	ns->flbas = flbas;
716*4882a593Smuzhiyun 	ns->lba_shift = id->lbaf[flbas].ds;
717*4882a593Smuzhiyun 	list_add(&ns->list, &ndev->namespaces);
718*4882a593Smuzhiyun 
719*4882a593Smuzhiyun 	desc->lba = le64_to_cpu(id->nsze);
720*4882a593Smuzhiyun 	desc->log2blksz = ns->lba_shift;
721*4882a593Smuzhiyun 	desc->blksz = 1 << ns->lba_shift;
722*4882a593Smuzhiyun 	desc->bdev = udev;
723*4882a593Smuzhiyun 	pplat = dev_get_parent_platdata(udev->parent);
724*4882a593Smuzhiyun 	sprintf(desc->vendor, "0x%.4x", pplat->vendor);
725*4882a593Smuzhiyun 	memcpy(desc->product, ndev->serial, sizeof(ndev->serial));
726*4882a593Smuzhiyun 	memcpy(desc->revision, ndev->firmware_rev, sizeof(ndev->firmware_rev));
727*4882a593Smuzhiyun 	part_init(desc);
728*4882a593Smuzhiyun 
729*4882a593Smuzhiyun 	free(id);
730*4882a593Smuzhiyun 	return 0;
731*4882a593Smuzhiyun }
732*4882a593Smuzhiyun 
nvme_blk_rw(struct udevice * udev,lbaint_t blknr,lbaint_t blkcnt,void * buffer,bool read)733*4882a593Smuzhiyun static ulong nvme_blk_rw(struct udevice *udev, lbaint_t blknr,
734*4882a593Smuzhiyun 			 lbaint_t blkcnt, void *buffer, bool read)
735*4882a593Smuzhiyun {
736*4882a593Smuzhiyun 	struct nvme_ns *ns = dev_get_priv(udev);
737*4882a593Smuzhiyun 	struct nvme_dev *dev = ns->dev;
738*4882a593Smuzhiyun 	struct nvme_command c;
739*4882a593Smuzhiyun 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
740*4882a593Smuzhiyun 	int status;
741*4882a593Smuzhiyun 	u64 prp2;
742*4882a593Smuzhiyun 	u64 total_len = blkcnt << desc->log2blksz;
743*4882a593Smuzhiyun 	u64 temp_len = total_len;
744*4882a593Smuzhiyun 	uintptr_t temp_buffer;
745*4882a593Smuzhiyun 
746*4882a593Smuzhiyun 	u64 slba = blknr;
747*4882a593Smuzhiyun 	u16 lbas = 1 << (dev->max_transfer_shift - ns->lba_shift);
748*4882a593Smuzhiyun 	u64 total_lbas = blkcnt;
749*4882a593Smuzhiyun 
750*4882a593Smuzhiyun 	struct bounce_buffer bb;
751*4882a593Smuzhiyun 	unsigned int bb_flags;
752*4882a593Smuzhiyun 	int ret;
753*4882a593Smuzhiyun 
754*4882a593Smuzhiyun 	if (read)
755*4882a593Smuzhiyun 		bb_flags = GEN_BB_WRITE;
756*4882a593Smuzhiyun 	else
757*4882a593Smuzhiyun 		bb_flags = GEN_BB_READ;
758*4882a593Smuzhiyun 
759*4882a593Smuzhiyun 	ret = bounce_buffer_start(&bb, buffer, total_len, bb_flags);
760*4882a593Smuzhiyun 	if (ret)
761*4882a593Smuzhiyun 		return -ENOMEM;
762*4882a593Smuzhiyun 	temp_buffer = (unsigned long)bb.bounce_buffer;
763*4882a593Smuzhiyun 
764*4882a593Smuzhiyun 	c.rw.opcode = read ? nvme_cmd_read : nvme_cmd_write;
765*4882a593Smuzhiyun 	c.rw.flags = 0;
766*4882a593Smuzhiyun 	c.rw.nsid = cpu_to_le32(ns->ns_id);
767*4882a593Smuzhiyun 	c.rw.control = 0;
768*4882a593Smuzhiyun 	c.rw.dsmgmt = 0;
769*4882a593Smuzhiyun 	c.rw.reftag = 0;
770*4882a593Smuzhiyun 	c.rw.apptag = 0;
771*4882a593Smuzhiyun 	c.rw.appmask = 0;
772*4882a593Smuzhiyun 	c.rw.metadata = 0;
773*4882a593Smuzhiyun 
774*4882a593Smuzhiyun 	/* Enable FUA for data integrity if vwc is enabled */
775*4882a593Smuzhiyun 	if (dev->vwc)
776*4882a593Smuzhiyun 		c.rw.control |= NVME_RW_FUA;
777*4882a593Smuzhiyun 
778*4882a593Smuzhiyun 	while (total_lbas) {
779*4882a593Smuzhiyun 		if (total_lbas < lbas) {
780*4882a593Smuzhiyun 			lbas = (u16)total_lbas;
781*4882a593Smuzhiyun 			total_lbas = 0;
782*4882a593Smuzhiyun 		} else {
783*4882a593Smuzhiyun 			total_lbas -= lbas;
784*4882a593Smuzhiyun 		}
785*4882a593Smuzhiyun 
786*4882a593Smuzhiyun 		if (nvme_setup_prps(dev, &prp2,
787*4882a593Smuzhiyun 				    lbas << ns->lba_shift, temp_buffer))
788*4882a593Smuzhiyun 			return -EIO;
789*4882a593Smuzhiyun 		c.rw.slba = cpu_to_le64(slba);
790*4882a593Smuzhiyun 		slba += lbas;
791*4882a593Smuzhiyun 		c.rw.length = cpu_to_le16(lbas - 1);
792*4882a593Smuzhiyun 		c.rw.prp1 = cpu_to_le64(temp_buffer);
793*4882a593Smuzhiyun 		c.rw.prp2 = cpu_to_le64(prp2);
794*4882a593Smuzhiyun 		status = nvme_submit_sync_cmd(dev->queues[NVME_IO_Q],
795*4882a593Smuzhiyun 				&c, NULL, IO_TIMEOUT);
796*4882a593Smuzhiyun 		if (status)
797*4882a593Smuzhiyun 			break;
798*4882a593Smuzhiyun 		temp_len -= (u32)lbas << ns->lba_shift;
799*4882a593Smuzhiyun 		temp_buffer += lbas << ns->lba_shift;
800*4882a593Smuzhiyun 	}
801*4882a593Smuzhiyun 
802*4882a593Smuzhiyun 	bounce_buffer_stop(&bb);
803*4882a593Smuzhiyun 
804*4882a593Smuzhiyun 	return (total_len - temp_len) >> desc->log2blksz;
805*4882a593Smuzhiyun }
806*4882a593Smuzhiyun 
nvme_blk_read(struct udevice * udev,lbaint_t blknr,lbaint_t blkcnt,void * buffer)807*4882a593Smuzhiyun static ulong nvme_blk_read(struct udevice *udev, lbaint_t blknr,
808*4882a593Smuzhiyun 			   lbaint_t blkcnt, void *buffer)
809*4882a593Smuzhiyun {
810*4882a593Smuzhiyun 	return nvme_blk_rw(udev, blknr, blkcnt, buffer, true);
811*4882a593Smuzhiyun }
812*4882a593Smuzhiyun 
nvme_blk_write(struct udevice * udev,lbaint_t blknr,lbaint_t blkcnt,const void * buffer)813*4882a593Smuzhiyun static ulong nvme_blk_write(struct udevice *udev, lbaint_t blknr,
814*4882a593Smuzhiyun 			    lbaint_t blkcnt, const void *buffer)
815*4882a593Smuzhiyun {
816*4882a593Smuzhiyun 	return nvme_blk_rw(udev, blknr, blkcnt, (void *)buffer, false);
817*4882a593Smuzhiyun }
818*4882a593Smuzhiyun 
819*4882a593Smuzhiyun static const struct blk_ops nvme_blk_ops = {
820*4882a593Smuzhiyun 	.read	= nvme_blk_read,
821*4882a593Smuzhiyun 	.write	= nvme_blk_write,
822*4882a593Smuzhiyun };
823*4882a593Smuzhiyun 
824*4882a593Smuzhiyun U_BOOT_DRIVER(nvme_blk) = {
825*4882a593Smuzhiyun 	.name	= "nvme-blk",
826*4882a593Smuzhiyun 	.id	= UCLASS_BLK,
827*4882a593Smuzhiyun 	.probe	= nvme_blk_probe,
828*4882a593Smuzhiyun 	.ops	= &nvme_blk_ops,
829*4882a593Smuzhiyun 	.priv_auto_alloc_size = sizeof(struct nvme_ns),
830*4882a593Smuzhiyun };
831*4882a593Smuzhiyun 
nvme_bind(struct udevice * udev)832*4882a593Smuzhiyun static int nvme_bind(struct udevice *udev)
833*4882a593Smuzhiyun {
834*4882a593Smuzhiyun 	static int ndev_num;
835*4882a593Smuzhiyun 	char name[20];
836*4882a593Smuzhiyun 
837*4882a593Smuzhiyun 	sprintf(name, "nvme#%d", ndev_num++);
838*4882a593Smuzhiyun 
839*4882a593Smuzhiyun 	return device_set_name(udev, name);
840*4882a593Smuzhiyun }
841*4882a593Smuzhiyun 
nvme_probe(struct udevice * udev)842*4882a593Smuzhiyun static int nvme_probe(struct udevice *udev)
843*4882a593Smuzhiyun {
844*4882a593Smuzhiyun 	int ret;
845*4882a593Smuzhiyun 	struct nvme_dev *ndev = dev_get_priv(udev);
846*4882a593Smuzhiyun 	struct nvme_id_ns *id;
847*4882a593Smuzhiyun 
848*4882a593Smuzhiyun 	ndev->instance = trailing_strtol(udev->name);
849*4882a593Smuzhiyun 
850*4882a593Smuzhiyun 	INIT_LIST_HEAD(&ndev->namespaces);
851*4882a593Smuzhiyun 	ndev->bar = dm_pci_map_bar(udev, PCI_BASE_ADDRESS_0,
852*4882a593Smuzhiyun 			PCI_REGION_MEM);
853*4882a593Smuzhiyun 	if (readl(&ndev->bar->csts) == -1) {
854*4882a593Smuzhiyun 		ret = -ENODEV;
855*4882a593Smuzhiyun 		printf("Error: %s: Out of memory!\n", udev->name);
856*4882a593Smuzhiyun 		goto free_nvme;
857*4882a593Smuzhiyun 	}
858*4882a593Smuzhiyun 
859*4882a593Smuzhiyun 	ndev->queues = malloc(NVME_Q_NUM * sizeof(struct nvme_queue *));
860*4882a593Smuzhiyun 	if (!ndev->queues) {
861*4882a593Smuzhiyun 		ret = -ENOMEM;
862*4882a593Smuzhiyun 		printf("Error: %s: Out of memory!\n", udev->name);
863*4882a593Smuzhiyun 		goto free_nvme;
864*4882a593Smuzhiyun 	}
865*4882a593Smuzhiyun 	memset(ndev->queues, 0, NVME_Q_NUM * sizeof(struct nvme_queue *));
866*4882a593Smuzhiyun 
867*4882a593Smuzhiyun 	ndev->cap = nvme_readq(&ndev->bar->cap);
868*4882a593Smuzhiyun 	ndev->q_depth = min_t(int, NVME_CAP_MQES(ndev->cap) + 1, NVME_Q_DEPTH);
869*4882a593Smuzhiyun 	ndev->db_stride = 1 << NVME_CAP_STRIDE(ndev->cap);
870*4882a593Smuzhiyun 	ndev->dbs = ((void __iomem *)ndev->bar) + 4096;
871*4882a593Smuzhiyun 
872*4882a593Smuzhiyun 	ret = nvme_configure_admin_queue(ndev);
873*4882a593Smuzhiyun 	if (ret)
874*4882a593Smuzhiyun 		goto free_queue;
875*4882a593Smuzhiyun 
876*4882a593Smuzhiyun 	/* Allocate after the page size is known */
877*4882a593Smuzhiyun 	ndev->prp_pool = memalign(ndev->page_size, MAX_PRP_POOL);
878*4882a593Smuzhiyun 	if (!ndev->prp_pool) {
879*4882a593Smuzhiyun 		ret = -ENOMEM;
880*4882a593Smuzhiyun 		printf("Error: %s: Out of memory!\n", udev->name);
881*4882a593Smuzhiyun 		goto free_nvme;
882*4882a593Smuzhiyun 	}
883*4882a593Smuzhiyun 	ndev->prp_entry_num = MAX_PRP_POOL >> 3;
884*4882a593Smuzhiyun 
885*4882a593Smuzhiyun 	ret = nvme_setup_io_queues(ndev);
886*4882a593Smuzhiyun 	if (ret)
887*4882a593Smuzhiyun 		goto free_queue;
888*4882a593Smuzhiyun 
889*4882a593Smuzhiyun 	nvme_get_info_from_identify(ndev);
890*4882a593Smuzhiyun 
891*4882a593Smuzhiyun 	/* Create a blk device for each namespace */
892*4882a593Smuzhiyun 
893*4882a593Smuzhiyun 	id = memalign(ndev->page_size, sizeof(struct nvme_id_ns));
894*4882a593Smuzhiyun 	if (!id) {
895*4882a593Smuzhiyun 		ret = -ENOMEM;
896*4882a593Smuzhiyun 		goto free_queue;
897*4882a593Smuzhiyun 	}
898*4882a593Smuzhiyun 
899*4882a593Smuzhiyun 	for (int i = 1; i <= ndev->nn; i++) {
900*4882a593Smuzhiyun 		struct udevice *ns_udev;
901*4882a593Smuzhiyun 		char name[20];
902*4882a593Smuzhiyun 
903*4882a593Smuzhiyun 		memset(id, 0, sizeof(*id));
904*4882a593Smuzhiyun 		if (nvme_identify(ndev, i, 0, (dma_addr_t)(long)id)) {
905*4882a593Smuzhiyun 			ret = -EIO;
906*4882a593Smuzhiyun 			goto free_id;
907*4882a593Smuzhiyun 		}
908*4882a593Smuzhiyun 
909*4882a593Smuzhiyun 		/* skip inactive namespace */
910*4882a593Smuzhiyun 		if (!id->nsze)
911*4882a593Smuzhiyun 			continue;
912*4882a593Smuzhiyun 
913*4882a593Smuzhiyun 		/*
914*4882a593Smuzhiyun 		 * Encode the namespace id to the device name so that
915*4882a593Smuzhiyun 		 * we can extract it when doing the probe.
916*4882a593Smuzhiyun 		 */
917*4882a593Smuzhiyun 		sprintf(name, "blk#%d", i);
918*4882a593Smuzhiyun 
919*4882a593Smuzhiyun 		/* The real blksz and size will be set by nvme_blk_probe() */
920*4882a593Smuzhiyun 		ret = blk_create_devicef(udev, "nvme-blk", name, IF_TYPE_NVME,
921*4882a593Smuzhiyun 					 -1, 512, 0, &ns_udev);
922*4882a593Smuzhiyun 		if (ret)
923*4882a593Smuzhiyun 			goto free_id;
924*4882a593Smuzhiyun 	}
925*4882a593Smuzhiyun 
926*4882a593Smuzhiyun 	free(id);
927*4882a593Smuzhiyun 	return 0;
928*4882a593Smuzhiyun 
929*4882a593Smuzhiyun free_id:
930*4882a593Smuzhiyun 	free(id);
931*4882a593Smuzhiyun free_queue:
932*4882a593Smuzhiyun 	free((void *)ndev->queues);
933*4882a593Smuzhiyun free_nvme:
934*4882a593Smuzhiyun 	return ret;
935*4882a593Smuzhiyun }
936*4882a593Smuzhiyun 
937*4882a593Smuzhiyun U_BOOT_DRIVER(nvme) = {
938*4882a593Smuzhiyun 	.name	= "nvme",
939*4882a593Smuzhiyun 	.id	= UCLASS_NVME,
940*4882a593Smuzhiyun 	.bind	= nvme_bind,
941*4882a593Smuzhiyun 	.probe	= nvme_probe,
942*4882a593Smuzhiyun 	.priv_auto_alloc_size = sizeof(struct nvme_dev),
943*4882a593Smuzhiyun };
944*4882a593Smuzhiyun 
945*4882a593Smuzhiyun struct pci_device_id nvme_supported[] = {
946*4882a593Smuzhiyun 	{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, ~0) },
947*4882a593Smuzhiyun 	{}
948*4882a593Smuzhiyun };
949*4882a593Smuzhiyun 
950*4882a593Smuzhiyun U_BOOT_PCI_DEVICE(nvme, nvme_supported);
951