1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * NVMe I/O command implementation.
4*4882a593Smuzhiyun * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7*4882a593Smuzhiyun #include <linux/blkdev.h>
8*4882a593Smuzhiyun #include <linux/module.h>
9*4882a593Smuzhiyun #include "nvmet.h"
10*4882a593Smuzhiyun
nvmet_bdev_set_limits(struct block_device * bdev,struct nvme_id_ns * id)11*4882a593Smuzhiyun void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
12*4882a593Smuzhiyun {
13*4882a593Smuzhiyun const struct queue_limits *ql = &bdev_get_queue(bdev)->limits;
14*4882a593Smuzhiyun /* Number of logical blocks per physical block. */
15*4882a593Smuzhiyun const u32 lpp = ql->physical_block_size / ql->logical_block_size;
16*4882a593Smuzhiyun /* Logical blocks per physical block, 0's based. */
17*4882a593Smuzhiyun const __le16 lpp0b = to0based(lpp);
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun /*
20*4882a593Smuzhiyun * For NVMe 1.2 and later, bit 1 indicates that the fields NAWUN,
21*4882a593Smuzhiyun * NAWUPF, and NACWU are defined for this namespace and should be
22*4882a593Smuzhiyun * used by the host for this namespace instead of the AWUN, AWUPF,
23*4882a593Smuzhiyun * and ACWU fields in the Identify Controller data structure. If
24*4882a593Smuzhiyun * any of these fields are zero that means that the corresponding
25*4882a593Smuzhiyun * field from the identify controller data structure should be used.
26*4882a593Smuzhiyun */
27*4882a593Smuzhiyun id->nsfeat |= 1 << 1;
28*4882a593Smuzhiyun id->nawun = lpp0b;
29*4882a593Smuzhiyun id->nawupf = lpp0b;
30*4882a593Smuzhiyun id->nacwu = lpp0b;
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun /*
33*4882a593Smuzhiyun * Bit 4 indicates that the fields NPWG, NPWA, NPDG, NPDA, and
34*4882a593Smuzhiyun * NOWS are defined for this namespace and should be used by
35*4882a593Smuzhiyun * the host for I/O optimization.
36*4882a593Smuzhiyun */
37*4882a593Smuzhiyun id->nsfeat |= 1 << 4;
38*4882a593Smuzhiyun /* NPWG = Namespace Preferred Write Granularity. 0's based */
39*4882a593Smuzhiyun id->npwg = lpp0b;
40*4882a593Smuzhiyun /* NPWA = Namespace Preferred Write Alignment. 0's based */
41*4882a593Smuzhiyun id->npwa = id->npwg;
42*4882a593Smuzhiyun /* NPDG = Namespace Preferred Deallocate Granularity. 0's based */
43*4882a593Smuzhiyun id->npdg = to0based(ql->discard_granularity / ql->logical_block_size);
44*4882a593Smuzhiyun /* NPDG = Namespace Preferred Deallocate Alignment */
45*4882a593Smuzhiyun id->npda = id->npdg;
46*4882a593Smuzhiyun /* NOWS = Namespace Optimal Write Size */
47*4882a593Smuzhiyun id->nows = to0based(ql->io_opt / ql->logical_block_size);
48*4882a593Smuzhiyun }
49*4882a593Smuzhiyun
nvmet_bdev_ns_enable_integrity(struct nvmet_ns * ns)50*4882a593Smuzhiyun static void nvmet_bdev_ns_enable_integrity(struct nvmet_ns *ns)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun struct blk_integrity *bi = bdev_get_integrity(ns->bdev);
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun if (bi) {
55*4882a593Smuzhiyun ns->metadata_size = bi->tuple_size;
56*4882a593Smuzhiyun if (bi->profile == &t10_pi_type1_crc)
57*4882a593Smuzhiyun ns->pi_type = NVME_NS_DPS_PI_TYPE1;
58*4882a593Smuzhiyun else if (bi->profile == &t10_pi_type3_crc)
59*4882a593Smuzhiyun ns->pi_type = NVME_NS_DPS_PI_TYPE3;
60*4882a593Smuzhiyun else
61*4882a593Smuzhiyun /* Unsupported metadata type */
62*4882a593Smuzhiyun ns->metadata_size = 0;
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun
nvmet_bdev_ns_enable(struct nvmet_ns * ns)66*4882a593Smuzhiyun int nvmet_bdev_ns_enable(struct nvmet_ns *ns)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun int ret;
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun ns->bdev = blkdev_get_by_path(ns->device_path,
71*4882a593Smuzhiyun FMODE_READ | FMODE_WRITE, NULL);
72*4882a593Smuzhiyun if (IS_ERR(ns->bdev)) {
73*4882a593Smuzhiyun ret = PTR_ERR(ns->bdev);
74*4882a593Smuzhiyun if (ret != -ENOTBLK) {
75*4882a593Smuzhiyun pr_err("failed to open block device %s: (%ld)\n",
76*4882a593Smuzhiyun ns->device_path, PTR_ERR(ns->bdev));
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun ns->bdev = NULL;
79*4882a593Smuzhiyun return ret;
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun ns->size = i_size_read(ns->bdev->bd_inode);
82*4882a593Smuzhiyun ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun ns->pi_type = 0;
85*4882a593Smuzhiyun ns->metadata_size = 0;
86*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY_T10))
87*4882a593Smuzhiyun nvmet_bdev_ns_enable_integrity(ns);
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun return 0;
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun
nvmet_bdev_ns_disable(struct nvmet_ns * ns)92*4882a593Smuzhiyun void nvmet_bdev_ns_disable(struct nvmet_ns *ns)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun if (ns->bdev) {
95*4882a593Smuzhiyun blkdev_put(ns->bdev, FMODE_WRITE | FMODE_READ);
96*4882a593Smuzhiyun ns->bdev = NULL;
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun
nvmet_bdev_ns_revalidate(struct nvmet_ns * ns)100*4882a593Smuzhiyun void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun ns->size = i_size_read(ns->bdev->bd_inode);
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
blk_to_nvme_status(struct nvmet_req * req,blk_status_t blk_sts)105*4882a593Smuzhiyun static u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun u16 status = NVME_SC_SUCCESS;
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun if (likely(blk_sts == BLK_STS_OK))
110*4882a593Smuzhiyun return status;
111*4882a593Smuzhiyun /*
112*4882a593Smuzhiyun * Right now there exists M : 1 mapping between block layer error
113*4882a593Smuzhiyun * to the NVMe status code (see nvme_error_status()). For consistency,
114*4882a593Smuzhiyun * when we reverse map we use most appropriate NVMe Status code from
115*4882a593Smuzhiyun * the group of the NVMe staus codes used in the nvme_error_status().
116*4882a593Smuzhiyun */
117*4882a593Smuzhiyun switch (blk_sts) {
118*4882a593Smuzhiyun case BLK_STS_NOSPC:
119*4882a593Smuzhiyun status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
120*4882a593Smuzhiyun req->error_loc = offsetof(struct nvme_rw_command, length);
121*4882a593Smuzhiyun break;
122*4882a593Smuzhiyun case BLK_STS_TARGET:
123*4882a593Smuzhiyun status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
124*4882a593Smuzhiyun req->error_loc = offsetof(struct nvme_rw_command, slba);
125*4882a593Smuzhiyun break;
126*4882a593Smuzhiyun case BLK_STS_NOTSUPP:
127*4882a593Smuzhiyun req->error_loc = offsetof(struct nvme_common_command, opcode);
128*4882a593Smuzhiyun switch (req->cmd->common.opcode) {
129*4882a593Smuzhiyun case nvme_cmd_dsm:
130*4882a593Smuzhiyun case nvme_cmd_write_zeroes:
131*4882a593Smuzhiyun status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR;
132*4882a593Smuzhiyun break;
133*4882a593Smuzhiyun default:
134*4882a593Smuzhiyun status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun break;
137*4882a593Smuzhiyun case BLK_STS_MEDIUM:
138*4882a593Smuzhiyun status = NVME_SC_ACCESS_DENIED;
139*4882a593Smuzhiyun req->error_loc = offsetof(struct nvme_rw_command, nsid);
140*4882a593Smuzhiyun break;
141*4882a593Smuzhiyun case BLK_STS_IOERR:
142*4882a593Smuzhiyun default:
143*4882a593Smuzhiyun status = NVME_SC_INTERNAL | NVME_SC_DNR;
144*4882a593Smuzhiyun req->error_loc = offsetof(struct nvme_common_command, opcode);
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun switch (req->cmd->common.opcode) {
148*4882a593Smuzhiyun case nvme_cmd_read:
149*4882a593Smuzhiyun case nvme_cmd_write:
150*4882a593Smuzhiyun req->error_slba = le64_to_cpu(req->cmd->rw.slba);
151*4882a593Smuzhiyun break;
152*4882a593Smuzhiyun case nvme_cmd_write_zeroes:
153*4882a593Smuzhiyun req->error_slba =
154*4882a593Smuzhiyun le64_to_cpu(req->cmd->write_zeroes.slba);
155*4882a593Smuzhiyun break;
156*4882a593Smuzhiyun default:
157*4882a593Smuzhiyun req->error_slba = 0;
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun return status;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun
nvmet_bio_done(struct bio * bio)162*4882a593Smuzhiyun static void nvmet_bio_done(struct bio *bio)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun struct nvmet_req *req = bio->bi_private;
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status));
167*4882a593Smuzhiyun if (bio != &req->b.inline_bio)
168*4882a593Smuzhiyun bio_put(bio);
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun #ifdef CONFIG_BLK_DEV_INTEGRITY
nvmet_bdev_alloc_bip(struct nvmet_req * req,struct bio * bio,struct sg_mapping_iter * miter)172*4882a593Smuzhiyun static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio,
173*4882a593Smuzhiyun struct sg_mapping_iter *miter)
174*4882a593Smuzhiyun {
175*4882a593Smuzhiyun struct blk_integrity *bi;
176*4882a593Smuzhiyun struct bio_integrity_payload *bip;
177*4882a593Smuzhiyun struct block_device *bdev = req->ns->bdev;
178*4882a593Smuzhiyun int rc;
179*4882a593Smuzhiyun size_t resid, len;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun bi = bdev_get_integrity(bdev);
182*4882a593Smuzhiyun if (unlikely(!bi)) {
183*4882a593Smuzhiyun pr_err("Unable to locate bio_integrity\n");
184*4882a593Smuzhiyun return -ENODEV;
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun bip = bio_integrity_alloc(bio, GFP_NOIO,
188*4882a593Smuzhiyun min_t(unsigned int, req->metadata_sg_cnt, BIO_MAX_PAGES));
189*4882a593Smuzhiyun if (IS_ERR(bip)) {
190*4882a593Smuzhiyun pr_err("Unable to allocate bio_integrity_payload\n");
191*4882a593Smuzhiyun return PTR_ERR(bip);
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
195*4882a593Smuzhiyun /* virtual start sector must be in integrity interval units */
196*4882a593Smuzhiyun bip_set_seed(bip, bio->bi_iter.bi_sector >>
197*4882a593Smuzhiyun (bi->interval_exp - SECTOR_SHIFT));
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun resid = bip->bip_iter.bi_size;
200*4882a593Smuzhiyun while (resid > 0 && sg_miter_next(miter)) {
201*4882a593Smuzhiyun len = min_t(size_t, miter->length, resid);
202*4882a593Smuzhiyun rc = bio_integrity_add_page(bio, miter->page, len,
203*4882a593Smuzhiyun offset_in_page(miter->addr));
204*4882a593Smuzhiyun if (unlikely(rc != len)) {
205*4882a593Smuzhiyun pr_err("bio_integrity_add_page() failed; %d\n", rc);
206*4882a593Smuzhiyun sg_miter_stop(miter);
207*4882a593Smuzhiyun return -ENOMEM;
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun resid -= len;
211*4882a593Smuzhiyun if (len < miter->length)
212*4882a593Smuzhiyun miter->consumed -= miter->length - len;
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun sg_miter_stop(miter);
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun return 0;
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun #else
nvmet_bdev_alloc_bip(struct nvmet_req * req,struct bio * bio,struct sg_mapping_iter * miter)219*4882a593Smuzhiyun static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio,
220*4882a593Smuzhiyun struct sg_mapping_iter *miter)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun return -EINVAL;
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun #endif /* CONFIG_BLK_DEV_INTEGRITY */
225*4882a593Smuzhiyun
nvmet_bdev_execute_rw(struct nvmet_req * req)226*4882a593Smuzhiyun static void nvmet_bdev_execute_rw(struct nvmet_req *req)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun int sg_cnt = req->sg_cnt;
229*4882a593Smuzhiyun struct bio *bio;
230*4882a593Smuzhiyun struct scatterlist *sg;
231*4882a593Smuzhiyun struct blk_plug plug;
232*4882a593Smuzhiyun sector_t sector;
233*4882a593Smuzhiyun int op, i, rc;
234*4882a593Smuzhiyun struct sg_mapping_iter prot_miter;
235*4882a593Smuzhiyun unsigned int iter_flags;
236*4882a593Smuzhiyun unsigned int total_len = nvmet_rw_data_len(req) + req->metadata_len;
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun if (!nvmet_check_transfer_len(req, total_len))
239*4882a593Smuzhiyun return;
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun if (!req->sg_cnt) {
242*4882a593Smuzhiyun nvmet_req_complete(req, 0);
243*4882a593Smuzhiyun return;
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun if (req->cmd->rw.opcode == nvme_cmd_write) {
247*4882a593Smuzhiyun op = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
248*4882a593Smuzhiyun if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
249*4882a593Smuzhiyun op |= REQ_FUA;
250*4882a593Smuzhiyun iter_flags = SG_MITER_TO_SG;
251*4882a593Smuzhiyun } else {
252*4882a593Smuzhiyun op = REQ_OP_READ;
253*4882a593Smuzhiyun iter_flags = SG_MITER_FROM_SG;
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun if (is_pci_p2pdma_page(sg_page(req->sg)))
257*4882a593Smuzhiyun op |= REQ_NOMERGE;
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun sector = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba);
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun if (nvmet_use_inline_bvec(req)) {
262*4882a593Smuzhiyun bio = &req->b.inline_bio;
263*4882a593Smuzhiyun bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
264*4882a593Smuzhiyun } else {
265*4882a593Smuzhiyun bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun bio_set_dev(bio, req->ns->bdev);
268*4882a593Smuzhiyun bio->bi_iter.bi_sector = sector;
269*4882a593Smuzhiyun bio->bi_private = req;
270*4882a593Smuzhiyun bio->bi_end_io = nvmet_bio_done;
271*4882a593Smuzhiyun bio->bi_opf = op;
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun blk_start_plug(&plug);
274*4882a593Smuzhiyun if (req->metadata_len)
275*4882a593Smuzhiyun sg_miter_start(&prot_miter, req->metadata_sg,
276*4882a593Smuzhiyun req->metadata_sg_cnt, iter_flags);
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun for_each_sg(req->sg, sg, req->sg_cnt, i) {
279*4882a593Smuzhiyun while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
280*4882a593Smuzhiyun != sg->length) {
281*4882a593Smuzhiyun struct bio *prev = bio;
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun if (req->metadata_len) {
284*4882a593Smuzhiyun rc = nvmet_bdev_alloc_bip(req, bio,
285*4882a593Smuzhiyun &prot_miter);
286*4882a593Smuzhiyun if (unlikely(rc)) {
287*4882a593Smuzhiyun bio_io_error(bio);
288*4882a593Smuzhiyun return;
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
293*4882a593Smuzhiyun bio_set_dev(bio, req->ns->bdev);
294*4882a593Smuzhiyun bio->bi_iter.bi_sector = sector;
295*4882a593Smuzhiyun bio->bi_opf = op;
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun bio_chain(bio, prev);
298*4882a593Smuzhiyun submit_bio(prev);
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun sector += sg->length >> 9;
302*4882a593Smuzhiyun sg_cnt--;
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun if (req->metadata_len) {
306*4882a593Smuzhiyun rc = nvmet_bdev_alloc_bip(req, bio, &prot_miter);
307*4882a593Smuzhiyun if (unlikely(rc)) {
308*4882a593Smuzhiyun bio_io_error(bio);
309*4882a593Smuzhiyun return;
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun submit_bio(bio);
314*4882a593Smuzhiyun blk_finish_plug(&plug);
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun
nvmet_bdev_execute_flush(struct nvmet_req * req)317*4882a593Smuzhiyun static void nvmet_bdev_execute_flush(struct nvmet_req *req)
318*4882a593Smuzhiyun {
319*4882a593Smuzhiyun struct bio *bio = &req->b.inline_bio;
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun if (!nvmet_check_transfer_len(req, 0))
322*4882a593Smuzhiyun return;
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
325*4882a593Smuzhiyun bio_set_dev(bio, req->ns->bdev);
326*4882a593Smuzhiyun bio->bi_private = req;
327*4882a593Smuzhiyun bio->bi_end_io = nvmet_bio_done;
328*4882a593Smuzhiyun bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun submit_bio(bio);
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun
nvmet_bdev_flush(struct nvmet_req * req)333*4882a593Smuzhiyun u16 nvmet_bdev_flush(struct nvmet_req *req)
334*4882a593Smuzhiyun {
335*4882a593Smuzhiyun if (blkdev_issue_flush(req->ns->bdev, GFP_KERNEL))
336*4882a593Smuzhiyun return NVME_SC_INTERNAL | NVME_SC_DNR;
337*4882a593Smuzhiyun return 0;
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun
nvmet_bdev_discard_range(struct nvmet_req * req,struct nvme_dsm_range * range,struct bio ** bio)340*4882a593Smuzhiyun static u16 nvmet_bdev_discard_range(struct nvmet_req *req,
341*4882a593Smuzhiyun struct nvme_dsm_range *range, struct bio **bio)
342*4882a593Smuzhiyun {
343*4882a593Smuzhiyun struct nvmet_ns *ns = req->ns;
344*4882a593Smuzhiyun int ret;
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun ret = __blkdev_issue_discard(ns->bdev,
347*4882a593Smuzhiyun nvmet_lba_to_sect(ns, range->slba),
348*4882a593Smuzhiyun le32_to_cpu(range->nlb) << (ns->blksize_shift - 9),
349*4882a593Smuzhiyun GFP_KERNEL, 0, bio);
350*4882a593Smuzhiyun if (ret && ret != -EOPNOTSUPP) {
351*4882a593Smuzhiyun req->error_slba = le64_to_cpu(range->slba);
352*4882a593Smuzhiyun return errno_to_nvme_status(req, ret);
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun return NVME_SC_SUCCESS;
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun
nvmet_bdev_execute_discard(struct nvmet_req * req)357*4882a593Smuzhiyun static void nvmet_bdev_execute_discard(struct nvmet_req *req)
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun struct nvme_dsm_range range;
360*4882a593Smuzhiyun struct bio *bio = NULL;
361*4882a593Smuzhiyun int i;
362*4882a593Smuzhiyun u16 status;
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) {
365*4882a593Smuzhiyun status = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
366*4882a593Smuzhiyun sizeof(range));
367*4882a593Smuzhiyun if (status)
368*4882a593Smuzhiyun break;
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun status = nvmet_bdev_discard_range(req, &range, &bio);
371*4882a593Smuzhiyun if (status)
372*4882a593Smuzhiyun break;
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun if (bio) {
376*4882a593Smuzhiyun bio->bi_private = req;
377*4882a593Smuzhiyun bio->bi_end_io = nvmet_bio_done;
378*4882a593Smuzhiyun if (status)
379*4882a593Smuzhiyun bio_io_error(bio);
380*4882a593Smuzhiyun else
381*4882a593Smuzhiyun submit_bio(bio);
382*4882a593Smuzhiyun } else {
383*4882a593Smuzhiyun nvmet_req_complete(req, status);
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun
nvmet_bdev_execute_dsm(struct nvmet_req * req)387*4882a593Smuzhiyun static void nvmet_bdev_execute_dsm(struct nvmet_req *req)
388*4882a593Smuzhiyun {
389*4882a593Smuzhiyun if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req)))
390*4882a593Smuzhiyun return;
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun switch (le32_to_cpu(req->cmd->dsm.attributes)) {
393*4882a593Smuzhiyun case NVME_DSMGMT_AD:
394*4882a593Smuzhiyun nvmet_bdev_execute_discard(req);
395*4882a593Smuzhiyun return;
396*4882a593Smuzhiyun case NVME_DSMGMT_IDR:
397*4882a593Smuzhiyun case NVME_DSMGMT_IDW:
398*4882a593Smuzhiyun default:
399*4882a593Smuzhiyun /* Not supported yet */
400*4882a593Smuzhiyun nvmet_req_complete(req, 0);
401*4882a593Smuzhiyun return;
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun
nvmet_bdev_execute_write_zeroes(struct nvmet_req * req)405*4882a593Smuzhiyun static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req)
406*4882a593Smuzhiyun {
407*4882a593Smuzhiyun struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes;
408*4882a593Smuzhiyun struct bio *bio = NULL;
409*4882a593Smuzhiyun sector_t sector;
410*4882a593Smuzhiyun sector_t nr_sector;
411*4882a593Smuzhiyun int ret;
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun if (!nvmet_check_transfer_len(req, 0))
414*4882a593Smuzhiyun return;
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun sector = nvmet_lba_to_sect(req->ns, write_zeroes->slba);
417*4882a593Smuzhiyun nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) <<
418*4882a593Smuzhiyun (req->ns->blksize_shift - 9));
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun ret = __blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector,
421*4882a593Smuzhiyun GFP_KERNEL, &bio, 0);
422*4882a593Smuzhiyun if (bio) {
423*4882a593Smuzhiyun bio->bi_private = req;
424*4882a593Smuzhiyun bio->bi_end_io = nvmet_bio_done;
425*4882a593Smuzhiyun submit_bio(bio);
426*4882a593Smuzhiyun } else {
427*4882a593Smuzhiyun nvmet_req_complete(req, errno_to_nvme_status(req, ret));
428*4882a593Smuzhiyun }
429*4882a593Smuzhiyun }
430*4882a593Smuzhiyun
nvmet_bdev_parse_io_cmd(struct nvmet_req * req)431*4882a593Smuzhiyun u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req)
432*4882a593Smuzhiyun {
433*4882a593Smuzhiyun struct nvme_command *cmd = req->cmd;
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun switch (cmd->common.opcode) {
436*4882a593Smuzhiyun case nvme_cmd_read:
437*4882a593Smuzhiyun case nvme_cmd_write:
438*4882a593Smuzhiyun req->execute = nvmet_bdev_execute_rw;
439*4882a593Smuzhiyun if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns))
440*4882a593Smuzhiyun req->metadata_len = nvmet_rw_metadata_len(req);
441*4882a593Smuzhiyun return 0;
442*4882a593Smuzhiyun case nvme_cmd_flush:
443*4882a593Smuzhiyun req->execute = nvmet_bdev_execute_flush;
444*4882a593Smuzhiyun return 0;
445*4882a593Smuzhiyun case nvme_cmd_dsm:
446*4882a593Smuzhiyun req->execute = nvmet_bdev_execute_dsm;
447*4882a593Smuzhiyun return 0;
448*4882a593Smuzhiyun case nvme_cmd_write_zeroes:
449*4882a593Smuzhiyun req->execute = nvmet_bdev_execute_write_zeroes;
450*4882a593Smuzhiyun return 0;
451*4882a593Smuzhiyun default:
452*4882a593Smuzhiyun pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
453*4882a593Smuzhiyun req->sq->qid);
454*4882a593Smuzhiyun req->error_loc = offsetof(struct nvme_common_command, opcode);
455*4882a593Smuzhiyun return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun }
458