Lines Matching refs:nvmeq

144 static u16 nvme_read_completion_status(struct nvme_queue *nvmeq, u16 index)  in nvme_read_completion_status()  argument
152 ulong start = (ulong)&nvmeq->cqes[0]; in nvme_read_completion_status()
157 return readw(&(nvmeq->cqes[index].status)); in nvme_read_completion_status()
166 static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd) in nvme_submit_cmd() argument
168 u16 tail = nvmeq->sq_tail; in nvme_submit_cmd()
170 memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd)); in nvme_submit_cmd()
171 flush_dcache_range((ulong)&nvmeq->sq_cmds[tail], in nvme_submit_cmd()
172 (ulong)&nvmeq->sq_cmds[tail] + sizeof(*cmd)); in nvme_submit_cmd()
174 if (++tail == nvmeq->q_depth) in nvme_submit_cmd()
176 writel(tail, nvmeq->q_db); in nvme_submit_cmd()
177 nvmeq->sq_tail = tail; in nvme_submit_cmd()
180 static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, in nvme_submit_sync_cmd() argument
184 u16 head = nvmeq->cq_head; in nvme_submit_sync_cmd()
185 u16 phase = nvmeq->cq_phase; in nvme_submit_sync_cmd()
191 nvme_submit_cmd(nvmeq, cmd); in nvme_submit_sync_cmd()
196 status = nvme_read_completion_status(nvmeq, head); in nvme_submit_sync_cmd()
209 if (++head == nvmeq->q_depth) { in nvme_submit_sync_cmd()
213 writel(head, nvmeq->q_db + nvmeq->dev->db_stride); in nvme_submit_sync_cmd()
214 nvmeq->cq_head = head; in nvme_submit_sync_cmd()
215 nvmeq->cq_phase = phase; in nvme_submit_sync_cmd()
221 *result = readl(&(nvmeq->cqes[head].result)); in nvme_submit_sync_cmd()
223 if (++head == nvmeq->q_depth) { in nvme_submit_sync_cmd()
227 writel(head, nvmeq->q_db + nvmeq->dev->db_stride); in nvme_submit_sync_cmd()
228 nvmeq->cq_head = head; in nvme_submit_sync_cmd()
229 nvmeq->cq_phase = phase; in nvme_submit_sync_cmd()
244 struct nvme_queue *nvmeq = malloc(sizeof(*nvmeq)); in nvme_alloc_queue() local
245 if (!nvmeq) in nvme_alloc_queue()
247 memset(nvmeq, 0, sizeof(*nvmeq)); in nvme_alloc_queue()
249 nvmeq->cqes = (void *)memalign(4096, NVME_CQ_ALLOCATION); in nvme_alloc_queue()
250 if (!nvmeq->cqes) in nvme_alloc_queue()
252 memset((void *)nvmeq->cqes, 0, NVME_CQ_SIZE(depth)); in nvme_alloc_queue()
254 nvmeq->sq_cmds = (void *)memalign(4096, NVME_SQ_SIZE(depth)); in nvme_alloc_queue()
255 if (!nvmeq->sq_cmds) in nvme_alloc_queue()
257 memset((void *)nvmeq->sq_cmds, 0, NVME_SQ_SIZE(depth)); in nvme_alloc_queue()
259 nvmeq->dev = dev; in nvme_alloc_queue()
261 nvmeq->cq_head = 0; in nvme_alloc_queue()
262 nvmeq->cq_phase = 1; in nvme_alloc_queue()
263 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; in nvme_alloc_queue()
264 nvmeq->q_depth = depth; in nvme_alloc_queue()
265 nvmeq->qid = qid; in nvme_alloc_queue()
267 dev->queues[qid] = nvmeq; in nvme_alloc_queue()
269 return nvmeq; in nvme_alloc_queue()
272 free((void *)nvmeq->cqes); in nvme_alloc_queue()
274 free(nvmeq); in nvme_alloc_queue()
318 static void nvme_free_queue(struct nvme_queue *nvmeq) in nvme_free_queue() argument
320 free((void *)nvmeq->cqes); in nvme_free_queue()
321 free(nvmeq->sq_cmds); in nvme_free_queue()
322 free(nvmeq); in nvme_free_queue()
330 struct nvme_queue *nvmeq = dev->queues[i]; in nvme_free_queues() local
333 nvme_free_queue(nvmeq); in nvme_free_queues()
337 static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid) in nvme_init_queue() argument
339 struct nvme_dev *dev = nvmeq->dev; in nvme_init_queue()
341 nvmeq->sq_tail = 0; in nvme_init_queue()
342 nvmeq->cq_head = 0; in nvme_init_queue()
343 nvmeq->cq_phase = 1; in nvme_init_queue()
344 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; in nvme_init_queue()
345 memset((void *)nvmeq->cqes, 0, NVME_CQ_SIZE(nvmeq->q_depth)); in nvme_init_queue()
346 flush_dcache_range((ulong)nvmeq->cqes, in nvme_init_queue()
347 (ulong)nvmeq->cqes + NVME_CQ_ALLOCATION); in nvme_init_queue()
356 struct nvme_queue *nvmeq; in nvme_configure_admin_queue() local
378 nvmeq = dev->queues[NVME_ADMIN_Q]; in nvme_configure_admin_queue()
379 if (!nvmeq) { in nvme_configure_admin_queue()
380 nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH); in nvme_configure_admin_queue()
381 if (!nvmeq) in nvme_configure_admin_queue()
385 aqa = nvmeq->q_depth - 1; in nvme_configure_admin_queue()
396 nvme_writeq((ulong)nvmeq->sq_cmds, &dev->bar->asq); in nvme_configure_admin_queue()
397 nvme_writeq((ulong)nvmeq->cqes, &dev->bar->acq); in nvme_configure_admin_queue()
403 nvmeq->cq_vector = 0; in nvme_configure_admin_queue()
416 struct nvme_queue *nvmeq) in nvme_alloc_cq() argument
423 c.create_cq.prp1 = cpu_to_le64((ulong)nvmeq->cqes); in nvme_alloc_cq()
425 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); in nvme_alloc_cq()
427 c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector); in nvme_alloc_cq()
433 struct nvme_queue *nvmeq) in nvme_alloc_sq() argument
440 c.create_sq.prp1 = cpu_to_le64((ulong)nvmeq->sq_cmds); in nvme_alloc_sq()
442 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); in nvme_alloc_sq()
537 static int nvme_create_queue(struct nvme_queue *nvmeq, int qid) in nvme_create_queue() argument
539 struct nvme_dev *dev = nvmeq->dev; in nvme_create_queue()
542 nvmeq->cq_vector = qid - 1; in nvme_create_queue()
543 result = nvme_alloc_cq(dev, qid, nvmeq); in nvme_create_queue()
547 result = nvme_alloc_sq(dev, qid, nvmeq); in nvme_create_queue()
551 nvme_init_queue(nvmeq, qid); in nvme_create_queue()