xref: /OK3568_Linux_fs/kernel/drivers/nvme/target/io-cmd-file.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * NVMe Over Fabrics Target File I/O commands implementation.
4*4882a593Smuzhiyun  * Copyright (c) 2017-2018 Western Digital Corporation or its
5*4882a593Smuzhiyun  * affiliates.
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8*4882a593Smuzhiyun #include <linux/uio.h>
9*4882a593Smuzhiyun #include <linux/falloc.h>
10*4882a593Smuzhiyun #include <linux/file.h>
11*4882a593Smuzhiyun #include <linux/fs.h>
12*4882a593Smuzhiyun #include "nvmet.h"
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #define NVMET_MAX_MPOOL_BVEC		16
15*4882a593Smuzhiyun #define NVMET_MIN_MPOOL_OBJ		16
16*4882a593Smuzhiyun 
nvmet_file_ns_revalidate(struct nvmet_ns * ns)17*4882a593Smuzhiyun int nvmet_file_ns_revalidate(struct nvmet_ns *ns)
18*4882a593Smuzhiyun {
19*4882a593Smuzhiyun 	struct kstat stat;
20*4882a593Smuzhiyun 	int ret;
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun 	ret = vfs_getattr(&ns->file->f_path, &stat, STATX_SIZE,
23*4882a593Smuzhiyun 			  AT_STATX_FORCE_SYNC);
24*4882a593Smuzhiyun 	if (!ret)
25*4882a593Smuzhiyun 		ns->size = stat.size;
26*4882a593Smuzhiyun 	return ret;
27*4882a593Smuzhiyun }
28*4882a593Smuzhiyun 
nvmet_file_ns_disable(struct nvmet_ns * ns)29*4882a593Smuzhiyun void nvmet_file_ns_disable(struct nvmet_ns *ns)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun 	if (ns->file) {
32*4882a593Smuzhiyun 		if (ns->buffered_io)
33*4882a593Smuzhiyun 			flush_workqueue(buffered_io_wq);
34*4882a593Smuzhiyun 		mempool_destroy(ns->bvec_pool);
35*4882a593Smuzhiyun 		ns->bvec_pool = NULL;
36*4882a593Smuzhiyun 		kmem_cache_destroy(ns->bvec_cache);
37*4882a593Smuzhiyun 		ns->bvec_cache = NULL;
38*4882a593Smuzhiyun 		fput(ns->file);
39*4882a593Smuzhiyun 		ns->file = NULL;
40*4882a593Smuzhiyun 	}
41*4882a593Smuzhiyun }
42*4882a593Smuzhiyun 
nvmet_file_ns_enable(struct nvmet_ns * ns)43*4882a593Smuzhiyun int nvmet_file_ns_enable(struct nvmet_ns *ns)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun 	int flags = O_RDWR | O_LARGEFILE;
46*4882a593Smuzhiyun 	int ret;
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun 	if (!ns->buffered_io)
49*4882a593Smuzhiyun 		flags |= O_DIRECT;
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	ns->file = filp_open(ns->device_path, flags, 0);
52*4882a593Smuzhiyun 	if (IS_ERR(ns->file)) {
53*4882a593Smuzhiyun 		ret = PTR_ERR(ns->file);
54*4882a593Smuzhiyun 		pr_err("failed to open file %s: (%d)\n",
55*4882a593Smuzhiyun 			ns->device_path, ret);
56*4882a593Smuzhiyun 		ns->file = NULL;
57*4882a593Smuzhiyun 		return ret;
58*4882a593Smuzhiyun 	}
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	ret = nvmet_file_ns_revalidate(ns);
61*4882a593Smuzhiyun 	if (ret)
62*4882a593Smuzhiyun 		goto err;
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 	/*
65*4882a593Smuzhiyun 	 * i_blkbits can be greater than the universally accepted upper bound,
66*4882a593Smuzhiyun 	 * so make sure we export a sane namespace lba_shift.
67*4882a593Smuzhiyun 	 */
68*4882a593Smuzhiyun 	ns->blksize_shift = min_t(u8,
69*4882a593Smuzhiyun 			file_inode(ns->file)->i_blkbits, 12);
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	ns->bvec_cache = kmem_cache_create("nvmet-bvec",
72*4882a593Smuzhiyun 			NVMET_MAX_MPOOL_BVEC * sizeof(struct bio_vec),
73*4882a593Smuzhiyun 			0, SLAB_HWCACHE_ALIGN, NULL);
74*4882a593Smuzhiyun 	if (!ns->bvec_cache) {
75*4882a593Smuzhiyun 		ret = -ENOMEM;
76*4882a593Smuzhiyun 		goto err;
77*4882a593Smuzhiyun 	}
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	ns->bvec_pool = mempool_create(NVMET_MIN_MPOOL_OBJ, mempool_alloc_slab,
80*4882a593Smuzhiyun 			mempool_free_slab, ns->bvec_cache);
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	if (!ns->bvec_pool) {
83*4882a593Smuzhiyun 		ret = -ENOMEM;
84*4882a593Smuzhiyun 		goto err;
85*4882a593Smuzhiyun 	}
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	return ret;
88*4882a593Smuzhiyun err:
89*4882a593Smuzhiyun 	ns->size = 0;
90*4882a593Smuzhiyun 	ns->blksize_shift = 0;
91*4882a593Smuzhiyun 	nvmet_file_ns_disable(ns);
92*4882a593Smuzhiyun 	return ret;
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun 
nvmet_file_init_bvec(struct bio_vec * bv,struct scatterlist * sg)95*4882a593Smuzhiyun static void nvmet_file_init_bvec(struct bio_vec *bv, struct scatterlist *sg)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun 	bv->bv_page = sg_page(sg);
98*4882a593Smuzhiyun 	bv->bv_offset = sg->offset;
99*4882a593Smuzhiyun 	bv->bv_len = sg->length;
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun 
nvmet_file_submit_bvec(struct nvmet_req * req,loff_t pos,unsigned long nr_segs,size_t count,int ki_flags)102*4882a593Smuzhiyun static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos,
103*4882a593Smuzhiyun 		unsigned long nr_segs, size_t count, int ki_flags)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun 	struct kiocb *iocb = &req->f.iocb;
106*4882a593Smuzhiyun 	ssize_t (*call_iter)(struct kiocb *iocb, struct iov_iter *iter);
107*4882a593Smuzhiyun 	struct iov_iter iter;
108*4882a593Smuzhiyun 	int rw;
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	if (req->cmd->rw.opcode == nvme_cmd_write) {
111*4882a593Smuzhiyun 		if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
112*4882a593Smuzhiyun 			ki_flags |= IOCB_DSYNC;
113*4882a593Smuzhiyun 		call_iter = req->ns->file->f_op->write_iter;
114*4882a593Smuzhiyun 		rw = WRITE;
115*4882a593Smuzhiyun 	} else {
116*4882a593Smuzhiyun 		call_iter = req->ns->file->f_op->read_iter;
117*4882a593Smuzhiyun 		rw = READ;
118*4882a593Smuzhiyun 	}
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	iov_iter_bvec(&iter, rw, req->f.bvec, nr_segs, count);
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	iocb->ki_pos = pos;
123*4882a593Smuzhiyun 	iocb->ki_filp = req->ns->file;
124*4882a593Smuzhiyun 	iocb->ki_flags = ki_flags | iocb_flags(req->ns->file);
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	return call_iter(iocb, &iter);
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun 
nvmet_file_io_done(struct kiocb * iocb,long ret,long ret2)129*4882a593Smuzhiyun static void nvmet_file_io_done(struct kiocb *iocb, long ret, long ret2)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun 	struct nvmet_req *req = container_of(iocb, struct nvmet_req, f.iocb);
132*4882a593Smuzhiyun 	u16 status = NVME_SC_SUCCESS;
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	if (req->f.bvec != req->inline_bvec) {
135*4882a593Smuzhiyun 		if (likely(req->f.mpool_alloc == false))
136*4882a593Smuzhiyun 			kfree(req->f.bvec);
137*4882a593Smuzhiyun 		else
138*4882a593Smuzhiyun 			mempool_free(req->f.bvec, req->ns->bvec_pool);
139*4882a593Smuzhiyun 	}
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	if (unlikely(ret != req->transfer_len))
142*4882a593Smuzhiyun 		status = errno_to_nvme_status(req, ret);
143*4882a593Smuzhiyun 	nvmet_req_complete(req, status);
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun 
nvmet_file_execute_io(struct nvmet_req * req,int ki_flags)146*4882a593Smuzhiyun static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun 	ssize_t nr_bvec = req->sg_cnt;
149*4882a593Smuzhiyun 	unsigned long bv_cnt = 0;
150*4882a593Smuzhiyun 	bool is_sync = false;
151*4882a593Smuzhiyun 	size_t len = 0, total_len = 0;
152*4882a593Smuzhiyun 	ssize_t ret = 0;
153*4882a593Smuzhiyun 	loff_t pos;
154*4882a593Smuzhiyun 	int i;
155*4882a593Smuzhiyun 	struct scatterlist *sg;
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	if (req->f.mpool_alloc && nr_bvec > NVMET_MAX_MPOOL_BVEC)
158*4882a593Smuzhiyun 		is_sync = true;
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	pos = le64_to_cpu(req->cmd->rw.slba) << req->ns->blksize_shift;
161*4882a593Smuzhiyun 	if (unlikely(pos + req->transfer_len > req->ns->size)) {
162*4882a593Smuzhiyun 		nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC));
163*4882a593Smuzhiyun 		return true;
164*4882a593Smuzhiyun 	}
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	memset(&req->f.iocb, 0, sizeof(struct kiocb));
167*4882a593Smuzhiyun 	for_each_sg(req->sg, sg, req->sg_cnt, i) {
168*4882a593Smuzhiyun 		nvmet_file_init_bvec(&req->f.bvec[bv_cnt], sg);
169*4882a593Smuzhiyun 		len += req->f.bvec[bv_cnt].bv_len;
170*4882a593Smuzhiyun 		total_len += req->f.bvec[bv_cnt].bv_len;
171*4882a593Smuzhiyun 		bv_cnt++;
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 		WARN_ON_ONCE((nr_bvec - 1) < 0);
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 		if (unlikely(is_sync) &&
176*4882a593Smuzhiyun 		    (nr_bvec - 1 == 0 || bv_cnt == NVMET_MAX_MPOOL_BVEC)) {
177*4882a593Smuzhiyun 			ret = nvmet_file_submit_bvec(req, pos, bv_cnt, len, 0);
178*4882a593Smuzhiyun 			if (ret < 0)
179*4882a593Smuzhiyun 				goto complete;
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 			pos += len;
182*4882a593Smuzhiyun 			bv_cnt = 0;
183*4882a593Smuzhiyun 			len = 0;
184*4882a593Smuzhiyun 		}
185*4882a593Smuzhiyun 		nr_bvec--;
186*4882a593Smuzhiyun 	}
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	if (WARN_ON_ONCE(total_len != req->transfer_len)) {
189*4882a593Smuzhiyun 		ret = -EIO;
190*4882a593Smuzhiyun 		goto complete;
191*4882a593Smuzhiyun 	}
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	if (unlikely(is_sync)) {
194*4882a593Smuzhiyun 		ret = total_len;
195*4882a593Smuzhiyun 		goto complete;
196*4882a593Smuzhiyun 	}
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	/*
199*4882a593Smuzhiyun 	 * A NULL ki_complete ask for synchronous execution, which we want
200*4882a593Smuzhiyun 	 * for the IOCB_NOWAIT case.
201*4882a593Smuzhiyun 	 */
202*4882a593Smuzhiyun 	if (!(ki_flags & IOCB_NOWAIT))
203*4882a593Smuzhiyun 		req->f.iocb.ki_complete = nvmet_file_io_done;
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	ret = nvmet_file_submit_bvec(req, pos, bv_cnt, total_len, ki_flags);
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	switch (ret) {
208*4882a593Smuzhiyun 	case -EIOCBQUEUED:
209*4882a593Smuzhiyun 		return true;
210*4882a593Smuzhiyun 	case -EAGAIN:
211*4882a593Smuzhiyun 		if (WARN_ON_ONCE(!(ki_flags & IOCB_NOWAIT)))
212*4882a593Smuzhiyun 			goto complete;
213*4882a593Smuzhiyun 		return false;
214*4882a593Smuzhiyun 	case -EOPNOTSUPP:
215*4882a593Smuzhiyun 		/*
216*4882a593Smuzhiyun 		 * For file systems returning error -EOPNOTSUPP, handle
217*4882a593Smuzhiyun 		 * IOCB_NOWAIT error case separately and retry without
218*4882a593Smuzhiyun 		 * IOCB_NOWAIT.
219*4882a593Smuzhiyun 		 */
220*4882a593Smuzhiyun 		if ((ki_flags & IOCB_NOWAIT))
221*4882a593Smuzhiyun 			return false;
222*4882a593Smuzhiyun 		break;
223*4882a593Smuzhiyun 	}
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun complete:
226*4882a593Smuzhiyun 	nvmet_file_io_done(&req->f.iocb, ret, 0);
227*4882a593Smuzhiyun 	return true;
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun 
nvmet_file_buffered_io_work(struct work_struct * w)230*4882a593Smuzhiyun static void nvmet_file_buffered_io_work(struct work_struct *w)
231*4882a593Smuzhiyun {
232*4882a593Smuzhiyun 	struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	nvmet_file_execute_io(req, 0);
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun 
nvmet_file_submit_buffered_io(struct nvmet_req * req)237*4882a593Smuzhiyun static void nvmet_file_submit_buffered_io(struct nvmet_req *req)
238*4882a593Smuzhiyun {
239*4882a593Smuzhiyun 	INIT_WORK(&req->f.work, nvmet_file_buffered_io_work);
240*4882a593Smuzhiyun 	queue_work(buffered_io_wq, &req->f.work);
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun 
nvmet_file_execute_rw(struct nvmet_req * req)243*4882a593Smuzhiyun static void nvmet_file_execute_rw(struct nvmet_req *req)
244*4882a593Smuzhiyun {
245*4882a593Smuzhiyun 	ssize_t nr_bvec = req->sg_cnt;
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 	if (!nvmet_check_transfer_len(req, nvmet_rw_data_len(req)))
248*4882a593Smuzhiyun 		return;
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	if (!req->sg_cnt || !nr_bvec) {
251*4882a593Smuzhiyun 		nvmet_req_complete(req, 0);
252*4882a593Smuzhiyun 		return;
253*4882a593Smuzhiyun 	}
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	if (nr_bvec > NVMET_MAX_INLINE_BIOVEC)
256*4882a593Smuzhiyun 		req->f.bvec = kmalloc_array(nr_bvec, sizeof(struct bio_vec),
257*4882a593Smuzhiyun 				GFP_KERNEL);
258*4882a593Smuzhiyun 	else
259*4882a593Smuzhiyun 		req->f.bvec = req->inline_bvec;
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	if (unlikely(!req->f.bvec)) {
262*4882a593Smuzhiyun 		/* fallback under memory pressure */
263*4882a593Smuzhiyun 		req->f.bvec = mempool_alloc(req->ns->bvec_pool, GFP_KERNEL);
264*4882a593Smuzhiyun 		req->f.mpool_alloc = true;
265*4882a593Smuzhiyun 	} else
266*4882a593Smuzhiyun 		req->f.mpool_alloc = false;
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	if (req->ns->buffered_io) {
269*4882a593Smuzhiyun 		if (likely(!req->f.mpool_alloc) &&
270*4882a593Smuzhiyun 		    (req->ns->file->f_mode & FMODE_NOWAIT) &&
271*4882a593Smuzhiyun 		    nvmet_file_execute_io(req, IOCB_NOWAIT))
272*4882a593Smuzhiyun 			return;
273*4882a593Smuzhiyun 		nvmet_file_submit_buffered_io(req);
274*4882a593Smuzhiyun 	} else
275*4882a593Smuzhiyun 		nvmet_file_execute_io(req, 0);
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun 
nvmet_file_flush(struct nvmet_req * req)278*4882a593Smuzhiyun u16 nvmet_file_flush(struct nvmet_req *req)
279*4882a593Smuzhiyun {
280*4882a593Smuzhiyun 	return errno_to_nvme_status(req, vfs_fsync(req->ns->file, 1));
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun 
nvmet_file_flush_work(struct work_struct * w)283*4882a593Smuzhiyun static void nvmet_file_flush_work(struct work_struct *w)
284*4882a593Smuzhiyun {
285*4882a593Smuzhiyun 	struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	nvmet_req_complete(req, nvmet_file_flush(req));
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun 
nvmet_file_execute_flush(struct nvmet_req * req)290*4882a593Smuzhiyun static void nvmet_file_execute_flush(struct nvmet_req *req)
291*4882a593Smuzhiyun {
292*4882a593Smuzhiyun 	if (!nvmet_check_transfer_len(req, 0))
293*4882a593Smuzhiyun 		return;
294*4882a593Smuzhiyun 	INIT_WORK(&req->f.work, nvmet_file_flush_work);
295*4882a593Smuzhiyun 	schedule_work(&req->f.work);
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun 
nvmet_file_execute_discard(struct nvmet_req * req)298*4882a593Smuzhiyun static void nvmet_file_execute_discard(struct nvmet_req *req)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun 	int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
301*4882a593Smuzhiyun 	struct nvme_dsm_range range;
302*4882a593Smuzhiyun 	loff_t offset, len;
303*4882a593Smuzhiyun 	u16 status = 0;
304*4882a593Smuzhiyun 	int ret;
305*4882a593Smuzhiyun 	int i;
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) {
308*4882a593Smuzhiyun 		status = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
309*4882a593Smuzhiyun 					sizeof(range));
310*4882a593Smuzhiyun 		if (status)
311*4882a593Smuzhiyun 			break;
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 		offset = le64_to_cpu(range.slba) << req->ns->blksize_shift;
314*4882a593Smuzhiyun 		len = le32_to_cpu(range.nlb);
315*4882a593Smuzhiyun 		len <<= req->ns->blksize_shift;
316*4882a593Smuzhiyun 		if (offset + len > req->ns->size) {
317*4882a593Smuzhiyun 			req->error_slba = le64_to_cpu(range.slba);
318*4882a593Smuzhiyun 			status = errno_to_nvme_status(req, -ENOSPC);
319*4882a593Smuzhiyun 			break;
320*4882a593Smuzhiyun 		}
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 		ret = vfs_fallocate(req->ns->file, mode, offset, len);
323*4882a593Smuzhiyun 		if (ret && ret != -EOPNOTSUPP) {
324*4882a593Smuzhiyun 			req->error_slba = le64_to_cpu(range.slba);
325*4882a593Smuzhiyun 			status = errno_to_nvme_status(req, ret);
326*4882a593Smuzhiyun 			break;
327*4882a593Smuzhiyun 		}
328*4882a593Smuzhiyun 	}
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	nvmet_req_complete(req, status);
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun 
nvmet_file_dsm_work(struct work_struct * w)333*4882a593Smuzhiyun static void nvmet_file_dsm_work(struct work_struct *w)
334*4882a593Smuzhiyun {
335*4882a593Smuzhiyun 	struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	switch (le32_to_cpu(req->cmd->dsm.attributes)) {
338*4882a593Smuzhiyun 	case NVME_DSMGMT_AD:
339*4882a593Smuzhiyun 		nvmet_file_execute_discard(req);
340*4882a593Smuzhiyun 		return;
341*4882a593Smuzhiyun 	case NVME_DSMGMT_IDR:
342*4882a593Smuzhiyun 	case NVME_DSMGMT_IDW:
343*4882a593Smuzhiyun 	default:
344*4882a593Smuzhiyun 		/* Not supported yet */
345*4882a593Smuzhiyun 		nvmet_req_complete(req, 0);
346*4882a593Smuzhiyun 		return;
347*4882a593Smuzhiyun 	}
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun 
nvmet_file_execute_dsm(struct nvmet_req * req)350*4882a593Smuzhiyun static void nvmet_file_execute_dsm(struct nvmet_req *req)
351*4882a593Smuzhiyun {
352*4882a593Smuzhiyun 	if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req)))
353*4882a593Smuzhiyun 		return;
354*4882a593Smuzhiyun 	INIT_WORK(&req->f.work, nvmet_file_dsm_work);
355*4882a593Smuzhiyun 	schedule_work(&req->f.work);
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun 
nvmet_file_write_zeroes_work(struct work_struct * w)358*4882a593Smuzhiyun static void nvmet_file_write_zeroes_work(struct work_struct *w)
359*4882a593Smuzhiyun {
360*4882a593Smuzhiyun 	struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
361*4882a593Smuzhiyun 	struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes;
362*4882a593Smuzhiyun 	int mode = FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE;
363*4882a593Smuzhiyun 	loff_t offset;
364*4882a593Smuzhiyun 	loff_t len;
365*4882a593Smuzhiyun 	int ret;
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 	offset = le64_to_cpu(write_zeroes->slba) << req->ns->blksize_shift;
368*4882a593Smuzhiyun 	len = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) <<
369*4882a593Smuzhiyun 			req->ns->blksize_shift);
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 	if (unlikely(offset + len > req->ns->size)) {
372*4882a593Smuzhiyun 		nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC));
373*4882a593Smuzhiyun 		return;
374*4882a593Smuzhiyun 	}
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 	ret = vfs_fallocate(req->ns->file, mode, offset, len);
377*4882a593Smuzhiyun 	nvmet_req_complete(req, ret < 0 ? errno_to_nvme_status(req, ret) : 0);
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun 
nvmet_file_execute_write_zeroes(struct nvmet_req * req)380*4882a593Smuzhiyun static void nvmet_file_execute_write_zeroes(struct nvmet_req *req)
381*4882a593Smuzhiyun {
382*4882a593Smuzhiyun 	if (!nvmet_check_transfer_len(req, 0))
383*4882a593Smuzhiyun 		return;
384*4882a593Smuzhiyun 	INIT_WORK(&req->f.work, nvmet_file_write_zeroes_work);
385*4882a593Smuzhiyun 	schedule_work(&req->f.work);
386*4882a593Smuzhiyun }
387*4882a593Smuzhiyun 
nvmet_file_parse_io_cmd(struct nvmet_req * req)388*4882a593Smuzhiyun u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
389*4882a593Smuzhiyun {
390*4882a593Smuzhiyun 	struct nvme_command *cmd = req->cmd;
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	switch (cmd->common.opcode) {
393*4882a593Smuzhiyun 	case nvme_cmd_read:
394*4882a593Smuzhiyun 	case nvme_cmd_write:
395*4882a593Smuzhiyun 		req->execute = nvmet_file_execute_rw;
396*4882a593Smuzhiyun 		return 0;
397*4882a593Smuzhiyun 	case nvme_cmd_flush:
398*4882a593Smuzhiyun 		req->execute = nvmet_file_execute_flush;
399*4882a593Smuzhiyun 		return 0;
400*4882a593Smuzhiyun 	case nvme_cmd_dsm:
401*4882a593Smuzhiyun 		req->execute = nvmet_file_execute_dsm;
402*4882a593Smuzhiyun 		return 0;
403*4882a593Smuzhiyun 	case nvme_cmd_write_zeroes:
404*4882a593Smuzhiyun 		req->execute = nvmet_file_execute_write_zeroes;
405*4882a593Smuzhiyun 		return 0;
406*4882a593Smuzhiyun 	default:
407*4882a593Smuzhiyun 		pr_err("unhandled cmd for file ns %d on qid %d\n",
408*4882a593Smuzhiyun 				cmd->common.opcode, req->sq->qid);
409*4882a593Smuzhiyun 		req->error_loc = offsetof(struct nvme_common_command, opcode);
410*4882a593Smuzhiyun 		return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
411*4882a593Smuzhiyun 	}
412*4882a593Smuzhiyun }
413