Lines Matching +full:0 +full:ns
17 int nvmet_file_ns_revalidate(struct nvmet_ns *ns) in nvmet_file_ns_revalidate() argument
22 ret = vfs_getattr(&ns->file->f_path, &stat, STATX_SIZE, in nvmet_file_ns_revalidate()
25 ns->size = stat.size; in nvmet_file_ns_revalidate()
29 void nvmet_file_ns_disable(struct nvmet_ns *ns) in nvmet_file_ns_disable() argument
31 if (ns->file) { in nvmet_file_ns_disable()
32 if (ns->buffered_io) in nvmet_file_ns_disable()
34 mempool_destroy(ns->bvec_pool); in nvmet_file_ns_disable()
35 ns->bvec_pool = NULL; in nvmet_file_ns_disable()
36 kmem_cache_destroy(ns->bvec_cache); in nvmet_file_ns_disable()
37 ns->bvec_cache = NULL; in nvmet_file_ns_disable()
38 fput(ns->file); in nvmet_file_ns_disable()
39 ns->file = NULL; in nvmet_file_ns_disable()
43 int nvmet_file_ns_enable(struct nvmet_ns *ns) in nvmet_file_ns_enable() argument
48 if (!ns->buffered_io) in nvmet_file_ns_enable()
51 ns->file = filp_open(ns->device_path, flags, 0); in nvmet_file_ns_enable()
52 if (IS_ERR(ns->file)) { in nvmet_file_ns_enable()
53 ret = PTR_ERR(ns->file); in nvmet_file_ns_enable()
55 ns->device_path, ret); in nvmet_file_ns_enable()
56 ns->file = NULL; in nvmet_file_ns_enable()
60 ret = nvmet_file_ns_revalidate(ns); in nvmet_file_ns_enable()
68 ns->blksize_shift = min_t(u8, in nvmet_file_ns_enable()
69 file_inode(ns->file)->i_blkbits, 12); in nvmet_file_ns_enable()
71 ns->bvec_cache = kmem_cache_create("nvmet-bvec", in nvmet_file_ns_enable()
73 0, SLAB_HWCACHE_ALIGN, NULL); in nvmet_file_ns_enable()
74 if (!ns->bvec_cache) { in nvmet_file_ns_enable()
79 ns->bvec_pool = mempool_create(NVMET_MIN_MPOOL_OBJ, mempool_alloc_slab, in nvmet_file_ns_enable()
80 mempool_free_slab, ns->bvec_cache); in nvmet_file_ns_enable()
82 if (!ns->bvec_pool) { in nvmet_file_ns_enable()
89 ns->size = 0; in nvmet_file_ns_enable()
90 ns->blksize_shift = 0; in nvmet_file_ns_enable()
91 nvmet_file_ns_disable(ns); in nvmet_file_ns_enable()
113 call_iter = req->ns->file->f_op->write_iter; in nvmet_file_submit_bvec()
116 call_iter = req->ns->file->f_op->read_iter; in nvmet_file_submit_bvec()
123 iocb->ki_filp = req->ns->file; in nvmet_file_submit_bvec()
124 iocb->ki_flags = ki_flags | iocb_flags(req->ns->file); in nvmet_file_submit_bvec()
138 mempool_free(req->f.bvec, req->ns->bvec_pool); in nvmet_file_io_done()
149 unsigned long bv_cnt = 0; in nvmet_file_execute_io()
151 size_t len = 0, total_len = 0; in nvmet_file_execute_io()
152 ssize_t ret = 0; in nvmet_file_execute_io()
160 pos = le64_to_cpu(req->cmd->rw.slba) << req->ns->blksize_shift; in nvmet_file_execute_io()
161 if (unlikely(pos + req->transfer_len > req->ns->size)) { in nvmet_file_execute_io()
166 memset(&req->f.iocb, 0, sizeof(struct kiocb)); in nvmet_file_execute_io()
173 WARN_ON_ONCE((nr_bvec - 1) < 0); in nvmet_file_execute_io()
176 (nr_bvec - 1 == 0 || bv_cnt == NVMET_MAX_MPOOL_BVEC)) { in nvmet_file_execute_io()
177 ret = nvmet_file_submit_bvec(req, pos, bv_cnt, len, 0); in nvmet_file_execute_io()
178 if (ret < 0) in nvmet_file_execute_io()
182 bv_cnt = 0; in nvmet_file_execute_io()
183 len = 0; in nvmet_file_execute_io()
226 nvmet_file_io_done(&req->f.iocb, ret, 0); in nvmet_file_execute_io()
234 nvmet_file_execute_io(req, 0); in nvmet_file_buffered_io_work()
251 nvmet_req_complete(req, 0); in nvmet_file_execute_rw()
263 req->f.bvec = mempool_alloc(req->ns->bvec_pool, GFP_KERNEL); in nvmet_file_execute_rw()
268 if (req->ns->buffered_io) { in nvmet_file_execute_rw()
270 (req->ns->file->f_mode & FMODE_NOWAIT) && in nvmet_file_execute_rw()
275 nvmet_file_execute_io(req, 0); in nvmet_file_execute_rw()
280 return errno_to_nvme_status(req, vfs_fsync(req->ns->file, 1)); in nvmet_file_flush()
292 if (!nvmet_check_transfer_len(req, 0)) in nvmet_file_execute_flush()
303 u16 status = 0; in nvmet_file_execute_discard()
307 for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) { in nvmet_file_execute_discard()
313 offset = le64_to_cpu(range.slba) << req->ns->blksize_shift; in nvmet_file_execute_discard()
315 len <<= req->ns->blksize_shift; in nvmet_file_execute_discard()
316 if (offset + len > req->ns->size) { in nvmet_file_execute_discard()
322 ret = vfs_fallocate(req->ns->file, mode, offset, len); in nvmet_file_execute_discard()
345 nvmet_req_complete(req, 0); in nvmet_file_dsm_work()
367 offset = le64_to_cpu(write_zeroes->slba) << req->ns->blksize_shift; in nvmet_file_write_zeroes_work()
369 req->ns->blksize_shift); in nvmet_file_write_zeroes_work()
371 if (unlikely(offset + len > req->ns->size)) { in nvmet_file_write_zeroes_work()
376 ret = vfs_fallocate(req->ns->file, mode, offset, len); in nvmet_file_write_zeroes_work()
377 nvmet_req_complete(req, ret < 0 ? errno_to_nvme_status(req, ret) : 0); in nvmet_file_write_zeroes_work()
382 if (!nvmet_check_transfer_len(req, 0)) in nvmet_file_execute_write_zeroes()
396 return 0; in nvmet_file_parse_io_cmd()
399 return 0; in nvmet_file_parse_io_cmd()
402 return 0; in nvmet_file_parse_io_cmd()
405 return 0; in nvmet_file_parse_io_cmd()
407 pr_err("unhandled cmd for file ns %d on qid %d\n", in nvmet_file_parse_io_cmd()