1*4882a593Smuzhiyun /*******************************************************************************
2*4882a593Smuzhiyun * Vhost kernel TCM fabric driver for virtio SCSI initiators
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * (C) Copyright 2010-2013 Datera, Inc.
5*4882a593Smuzhiyun * (C) Copyright 2010-2012 IBM Corp.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * Authors: Nicholas A. Bellinger <nab@daterainc.com>
10*4882a593Smuzhiyun * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * This program is free software; you can redistribute it and/or modify
13*4882a593Smuzhiyun * it under the terms of the GNU General Public License as published by
14*4882a593Smuzhiyun * the Free Software Foundation; either version 2 of the License, or
15*4882a593Smuzhiyun * (at your option) any later version.
16*4882a593Smuzhiyun *
17*4882a593Smuzhiyun * This program is distributed in the hope that it will be useful,
18*4882a593Smuzhiyun * but WITHOUT ANY WARRANTY; without even the implied warranty of
19*4882a593Smuzhiyun * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20*4882a593Smuzhiyun * GNU General Public License for more details.
21*4882a593Smuzhiyun *
22*4882a593Smuzhiyun ****************************************************************************/
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #include <linux/module.h>
25*4882a593Smuzhiyun #include <linux/moduleparam.h>
26*4882a593Smuzhiyun #include <generated/utsrelease.h>
27*4882a593Smuzhiyun #include <linux/utsname.h>
28*4882a593Smuzhiyun #include <linux/init.h>
29*4882a593Smuzhiyun #include <linux/slab.h>
30*4882a593Smuzhiyun #include <linux/kthread.h>
31*4882a593Smuzhiyun #include <linux/types.h>
32*4882a593Smuzhiyun #include <linux/string.h>
33*4882a593Smuzhiyun #include <linux/configfs.h>
34*4882a593Smuzhiyun #include <linux/ctype.h>
35*4882a593Smuzhiyun #include <linux/compat.h>
36*4882a593Smuzhiyun #include <linux/eventfd.h>
37*4882a593Smuzhiyun #include <linux/fs.h>
38*4882a593Smuzhiyun #include <linux/vmalloc.h>
39*4882a593Smuzhiyun #include <linux/miscdevice.h>
40*4882a593Smuzhiyun #include <asm/unaligned.h>
41*4882a593Smuzhiyun #include <scsi/scsi_common.h>
42*4882a593Smuzhiyun #include <scsi/scsi_proto.h>
43*4882a593Smuzhiyun #include <target/target_core_base.h>
44*4882a593Smuzhiyun #include <target/target_core_fabric.h>
45*4882a593Smuzhiyun #include <linux/vhost.h>
46*4882a593Smuzhiyun #include <linux/virtio_scsi.h>
47*4882a593Smuzhiyun #include <linux/llist.h>
48*4882a593Smuzhiyun #include <linux/bitmap.h>
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun #include "vhost.h"
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun #define VHOST_SCSI_VERSION "v0.1"
53*4882a593Smuzhiyun #define VHOST_SCSI_NAMELEN 256
54*4882a593Smuzhiyun #define VHOST_SCSI_MAX_CDB_SIZE 32
55*4882a593Smuzhiyun #define VHOST_SCSI_PREALLOC_SGLS 2048
56*4882a593Smuzhiyun #define VHOST_SCSI_PREALLOC_UPAGES 2048
57*4882a593Smuzhiyun #define VHOST_SCSI_PREALLOC_PROT_SGLS 2048
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun /* Max number of requests before requeueing the job.
60*4882a593Smuzhiyun * Using this limit prevents one virtqueue from starving others with
61*4882a593Smuzhiyun * request.
62*4882a593Smuzhiyun */
63*4882a593Smuzhiyun #define VHOST_SCSI_WEIGHT 256
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun struct vhost_scsi_inflight {
66*4882a593Smuzhiyun /* Wait for the flush operation to finish */
67*4882a593Smuzhiyun struct completion comp;
68*4882a593Smuzhiyun /* Refcount for the inflight reqs */
69*4882a593Smuzhiyun struct kref kref;
70*4882a593Smuzhiyun };
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun struct vhost_scsi_cmd {
73*4882a593Smuzhiyun /* Descriptor from vhost_get_vq_desc() for virt_queue segment */
74*4882a593Smuzhiyun int tvc_vq_desc;
75*4882a593Smuzhiyun /* virtio-scsi initiator task attribute */
76*4882a593Smuzhiyun int tvc_task_attr;
77*4882a593Smuzhiyun /* virtio-scsi response incoming iovecs */
78*4882a593Smuzhiyun int tvc_in_iovs;
79*4882a593Smuzhiyun /* virtio-scsi initiator data direction */
80*4882a593Smuzhiyun enum dma_data_direction tvc_data_direction;
81*4882a593Smuzhiyun /* Expected data transfer length from virtio-scsi header */
82*4882a593Smuzhiyun u32 tvc_exp_data_len;
83*4882a593Smuzhiyun /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
84*4882a593Smuzhiyun u64 tvc_tag;
85*4882a593Smuzhiyun /* The number of scatterlists associated with this cmd */
86*4882a593Smuzhiyun u32 tvc_sgl_count;
87*4882a593Smuzhiyun u32 tvc_prot_sgl_count;
88*4882a593Smuzhiyun /* Saved unpacked SCSI LUN for vhost_scsi_submission_work() */
89*4882a593Smuzhiyun u32 tvc_lun;
90*4882a593Smuzhiyun /* Pointer to the SGL formatted memory from virtio-scsi */
91*4882a593Smuzhiyun struct scatterlist *tvc_sgl;
92*4882a593Smuzhiyun struct scatterlist *tvc_prot_sgl;
93*4882a593Smuzhiyun struct page **tvc_upages;
94*4882a593Smuzhiyun /* Pointer to response header iovec */
95*4882a593Smuzhiyun struct iovec tvc_resp_iov;
96*4882a593Smuzhiyun /* Pointer to vhost_scsi for our device */
97*4882a593Smuzhiyun struct vhost_scsi *tvc_vhost;
98*4882a593Smuzhiyun /* Pointer to vhost_virtqueue for the cmd */
99*4882a593Smuzhiyun struct vhost_virtqueue *tvc_vq;
100*4882a593Smuzhiyun /* Pointer to vhost nexus memory */
101*4882a593Smuzhiyun struct vhost_scsi_nexus *tvc_nexus;
102*4882a593Smuzhiyun /* The TCM I/O descriptor that is accessed via container_of() */
103*4882a593Smuzhiyun struct se_cmd tvc_se_cmd;
104*4882a593Smuzhiyun /* work item used for cmwq dispatch to vhost_scsi_submission_work() */
105*4882a593Smuzhiyun struct work_struct work;
106*4882a593Smuzhiyun /* Copy of the incoming SCSI command descriptor block (CDB) */
107*4882a593Smuzhiyun unsigned char tvc_cdb[VHOST_SCSI_MAX_CDB_SIZE];
108*4882a593Smuzhiyun /* Sense buffer that will be mapped into outgoing status */
109*4882a593Smuzhiyun unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
110*4882a593Smuzhiyun /* Completed commands list, serviced from vhost worker thread */
111*4882a593Smuzhiyun struct llist_node tvc_completion_list;
112*4882a593Smuzhiyun /* Used to track inflight cmd */
113*4882a593Smuzhiyun struct vhost_scsi_inflight *inflight;
114*4882a593Smuzhiyun };
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun struct vhost_scsi_nexus {
117*4882a593Smuzhiyun /* Pointer to TCM session for I_T Nexus */
118*4882a593Smuzhiyun struct se_session *tvn_se_sess;
119*4882a593Smuzhiyun };
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun struct vhost_scsi_tpg {
122*4882a593Smuzhiyun /* Vhost port target portal group tag for TCM */
123*4882a593Smuzhiyun u16 tport_tpgt;
124*4882a593Smuzhiyun /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
125*4882a593Smuzhiyun int tv_tpg_port_count;
126*4882a593Smuzhiyun /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
127*4882a593Smuzhiyun int tv_tpg_vhost_count;
128*4882a593Smuzhiyun /* Used for enabling T10-PI with legacy devices */
129*4882a593Smuzhiyun int tv_fabric_prot_type;
130*4882a593Smuzhiyun /* list for vhost_scsi_list */
131*4882a593Smuzhiyun struct list_head tv_tpg_list;
132*4882a593Smuzhiyun /* Used to protect access for tpg_nexus */
133*4882a593Smuzhiyun struct mutex tv_tpg_mutex;
134*4882a593Smuzhiyun /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
135*4882a593Smuzhiyun struct vhost_scsi_nexus *tpg_nexus;
136*4882a593Smuzhiyun /* Pointer back to vhost_scsi_tport */
137*4882a593Smuzhiyun struct vhost_scsi_tport *tport;
138*4882a593Smuzhiyun /* Returned by vhost_scsi_make_tpg() */
139*4882a593Smuzhiyun struct se_portal_group se_tpg;
140*4882a593Smuzhiyun /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
141*4882a593Smuzhiyun struct vhost_scsi *vhost_scsi;
142*4882a593Smuzhiyun struct list_head tmf_queue;
143*4882a593Smuzhiyun };
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun struct vhost_scsi_tport {
146*4882a593Smuzhiyun /* SCSI protocol the tport is providing */
147*4882a593Smuzhiyun u8 tport_proto_id;
148*4882a593Smuzhiyun /* Binary World Wide unique Port Name for Vhost Target port */
149*4882a593Smuzhiyun u64 tport_wwpn;
150*4882a593Smuzhiyun /* ASCII formatted WWPN for Vhost Target port */
151*4882a593Smuzhiyun char tport_name[VHOST_SCSI_NAMELEN];
152*4882a593Smuzhiyun /* Returned by vhost_scsi_make_tport() */
153*4882a593Smuzhiyun struct se_wwn tport_wwn;
154*4882a593Smuzhiyun };
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun struct vhost_scsi_evt {
157*4882a593Smuzhiyun /* event to be sent to guest */
158*4882a593Smuzhiyun struct virtio_scsi_event event;
159*4882a593Smuzhiyun /* event list, serviced from vhost worker thread */
160*4882a593Smuzhiyun struct llist_node list;
161*4882a593Smuzhiyun };
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun enum {
164*4882a593Smuzhiyun VHOST_SCSI_VQ_CTL = 0,
165*4882a593Smuzhiyun VHOST_SCSI_VQ_EVT = 1,
166*4882a593Smuzhiyun VHOST_SCSI_VQ_IO = 2,
167*4882a593Smuzhiyun };
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun /* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
170*4882a593Smuzhiyun enum {
171*4882a593Smuzhiyun VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
172*4882a593Smuzhiyun (1ULL << VIRTIO_SCSI_F_T10_PI)
173*4882a593Smuzhiyun };
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun #define VHOST_SCSI_MAX_TARGET 256
176*4882a593Smuzhiyun #define VHOST_SCSI_MAX_VQ 128
177*4882a593Smuzhiyun #define VHOST_SCSI_MAX_EVENT 128
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun struct vhost_scsi_virtqueue {
180*4882a593Smuzhiyun struct vhost_virtqueue vq;
181*4882a593Smuzhiyun /*
182*4882a593Smuzhiyun * Reference counting for inflight reqs, used for flush operation. At
183*4882a593Smuzhiyun * each time, one reference tracks new commands submitted, while we
184*4882a593Smuzhiyun * wait for another one to reach 0.
185*4882a593Smuzhiyun */
186*4882a593Smuzhiyun struct vhost_scsi_inflight inflights[2];
187*4882a593Smuzhiyun /*
188*4882a593Smuzhiyun * Indicate current inflight in use, protected by vq->mutex.
189*4882a593Smuzhiyun * Writers must also take dev mutex and flush under it.
190*4882a593Smuzhiyun */
191*4882a593Smuzhiyun int inflight_idx;
192*4882a593Smuzhiyun struct vhost_scsi_cmd *scsi_cmds;
193*4882a593Smuzhiyun struct sbitmap scsi_tags;
194*4882a593Smuzhiyun int max_cmds;
195*4882a593Smuzhiyun };
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun struct vhost_scsi {
198*4882a593Smuzhiyun /* Protected by vhost_scsi->dev.mutex */
199*4882a593Smuzhiyun struct vhost_scsi_tpg **vs_tpg;
200*4882a593Smuzhiyun char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun struct vhost_dev dev;
203*4882a593Smuzhiyun struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ];
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun struct vhost_work vs_completion_work; /* cmd completion work item */
206*4882a593Smuzhiyun struct llist_head vs_completion_list; /* cmd completion queue */
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun struct vhost_work vs_event_work; /* evt injection work item */
209*4882a593Smuzhiyun struct llist_head vs_event_list; /* evt injection queue */
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun bool vs_events_missed; /* any missed events, protected by vq->mutex */
212*4882a593Smuzhiyun int vs_events_nr; /* num of pending events, protected by vq->mutex */
213*4882a593Smuzhiyun };
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun struct vhost_scsi_tmf {
216*4882a593Smuzhiyun struct vhost_work vwork;
217*4882a593Smuzhiyun struct vhost_scsi_tpg *tpg;
218*4882a593Smuzhiyun struct vhost_scsi *vhost;
219*4882a593Smuzhiyun struct vhost_scsi_virtqueue *svq;
220*4882a593Smuzhiyun struct list_head queue_entry;
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun struct se_cmd se_cmd;
223*4882a593Smuzhiyun u8 scsi_resp;
224*4882a593Smuzhiyun struct vhost_scsi_inflight *inflight;
225*4882a593Smuzhiyun struct iovec resp_iov;
226*4882a593Smuzhiyun int in_iovs;
227*4882a593Smuzhiyun int vq_desc;
228*4882a593Smuzhiyun };
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun /*
231*4882a593Smuzhiyun * Context for processing request and control queue operations.
232*4882a593Smuzhiyun */
233*4882a593Smuzhiyun struct vhost_scsi_ctx {
234*4882a593Smuzhiyun int head;
235*4882a593Smuzhiyun unsigned int out, in;
236*4882a593Smuzhiyun size_t req_size, rsp_size;
237*4882a593Smuzhiyun size_t out_size, in_size;
238*4882a593Smuzhiyun u8 *target, *lunp;
239*4882a593Smuzhiyun void *req;
240*4882a593Smuzhiyun struct iov_iter out_iter;
241*4882a593Smuzhiyun };
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun static struct workqueue_struct *vhost_scsi_workqueue;
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun /* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */
246*4882a593Smuzhiyun static DEFINE_MUTEX(vhost_scsi_mutex);
247*4882a593Smuzhiyun static LIST_HEAD(vhost_scsi_list);
248*4882a593Smuzhiyun
vhost_scsi_done_inflight(struct kref * kref)249*4882a593Smuzhiyun static void vhost_scsi_done_inflight(struct kref *kref)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun struct vhost_scsi_inflight *inflight;
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun inflight = container_of(kref, struct vhost_scsi_inflight, kref);
254*4882a593Smuzhiyun complete(&inflight->comp);
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun
vhost_scsi_init_inflight(struct vhost_scsi * vs,struct vhost_scsi_inflight * old_inflight[])257*4882a593Smuzhiyun static void vhost_scsi_init_inflight(struct vhost_scsi *vs,
258*4882a593Smuzhiyun struct vhost_scsi_inflight *old_inflight[])
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun struct vhost_scsi_inflight *new_inflight;
261*4882a593Smuzhiyun struct vhost_virtqueue *vq;
262*4882a593Smuzhiyun int idx, i;
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
265*4882a593Smuzhiyun vq = &vs->vqs[i].vq;
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun mutex_lock(&vq->mutex);
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun /* store old infight */
270*4882a593Smuzhiyun idx = vs->vqs[i].inflight_idx;
271*4882a593Smuzhiyun if (old_inflight)
272*4882a593Smuzhiyun old_inflight[i] = &vs->vqs[i].inflights[idx];
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun /* setup new infight */
275*4882a593Smuzhiyun vs->vqs[i].inflight_idx = idx ^ 1;
276*4882a593Smuzhiyun new_inflight = &vs->vqs[i].inflights[idx ^ 1];
277*4882a593Smuzhiyun kref_init(&new_inflight->kref);
278*4882a593Smuzhiyun init_completion(&new_inflight->comp);
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun mutex_unlock(&vq->mutex);
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun static struct vhost_scsi_inflight *
vhost_scsi_get_inflight(struct vhost_virtqueue * vq)285*4882a593Smuzhiyun vhost_scsi_get_inflight(struct vhost_virtqueue *vq)
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun struct vhost_scsi_inflight *inflight;
288*4882a593Smuzhiyun struct vhost_scsi_virtqueue *svq;
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
291*4882a593Smuzhiyun inflight = &svq->inflights[svq->inflight_idx];
292*4882a593Smuzhiyun kref_get(&inflight->kref);
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun return inflight;
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun
vhost_scsi_put_inflight(struct vhost_scsi_inflight * inflight)297*4882a593Smuzhiyun static void vhost_scsi_put_inflight(struct vhost_scsi_inflight *inflight)
298*4882a593Smuzhiyun {
299*4882a593Smuzhiyun kref_put(&inflight->kref, vhost_scsi_done_inflight);
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun
vhost_scsi_check_true(struct se_portal_group * se_tpg)302*4882a593Smuzhiyun static int vhost_scsi_check_true(struct se_portal_group *se_tpg)
303*4882a593Smuzhiyun {
304*4882a593Smuzhiyun return 1;
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun
vhost_scsi_check_false(struct se_portal_group * se_tpg)307*4882a593Smuzhiyun static int vhost_scsi_check_false(struct se_portal_group *se_tpg)
308*4882a593Smuzhiyun {
309*4882a593Smuzhiyun return 0;
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun
vhost_scsi_get_fabric_wwn(struct se_portal_group * se_tpg)312*4882a593Smuzhiyun static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg)
313*4882a593Smuzhiyun {
314*4882a593Smuzhiyun struct vhost_scsi_tpg *tpg = container_of(se_tpg,
315*4882a593Smuzhiyun struct vhost_scsi_tpg, se_tpg);
316*4882a593Smuzhiyun struct vhost_scsi_tport *tport = tpg->tport;
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun return &tport->tport_name[0];
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun
vhost_scsi_get_tpgt(struct se_portal_group * se_tpg)321*4882a593Smuzhiyun static u16 vhost_scsi_get_tpgt(struct se_portal_group *se_tpg)
322*4882a593Smuzhiyun {
323*4882a593Smuzhiyun struct vhost_scsi_tpg *tpg = container_of(se_tpg,
324*4882a593Smuzhiyun struct vhost_scsi_tpg, se_tpg);
325*4882a593Smuzhiyun return tpg->tport_tpgt;
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun
vhost_scsi_check_prot_fabric_only(struct se_portal_group * se_tpg)328*4882a593Smuzhiyun static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg)
329*4882a593Smuzhiyun {
330*4882a593Smuzhiyun struct vhost_scsi_tpg *tpg = container_of(se_tpg,
331*4882a593Smuzhiyun struct vhost_scsi_tpg, se_tpg);
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun return tpg->tv_fabric_prot_type;
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun
vhost_scsi_tpg_get_inst_index(struct se_portal_group * se_tpg)336*4882a593Smuzhiyun static u32 vhost_scsi_tpg_get_inst_index(struct se_portal_group *se_tpg)
337*4882a593Smuzhiyun {
338*4882a593Smuzhiyun return 1;
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun
vhost_scsi_release_cmd_res(struct se_cmd * se_cmd)341*4882a593Smuzhiyun static void vhost_scsi_release_cmd_res(struct se_cmd *se_cmd)
342*4882a593Smuzhiyun {
343*4882a593Smuzhiyun struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,
344*4882a593Smuzhiyun struct vhost_scsi_cmd, tvc_se_cmd);
345*4882a593Smuzhiyun struct vhost_scsi_virtqueue *svq = container_of(tv_cmd->tvc_vq,
346*4882a593Smuzhiyun struct vhost_scsi_virtqueue, vq);
347*4882a593Smuzhiyun struct vhost_scsi_inflight *inflight = tv_cmd->inflight;
348*4882a593Smuzhiyun int i;
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun if (tv_cmd->tvc_sgl_count) {
351*4882a593Smuzhiyun for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
352*4882a593Smuzhiyun put_page(sg_page(&tv_cmd->tvc_sgl[i]));
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun if (tv_cmd->tvc_prot_sgl_count) {
355*4882a593Smuzhiyun for (i = 0; i < tv_cmd->tvc_prot_sgl_count; i++)
356*4882a593Smuzhiyun put_page(sg_page(&tv_cmd->tvc_prot_sgl[i]));
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun sbitmap_clear_bit(&svq->scsi_tags, se_cmd->map_tag);
360*4882a593Smuzhiyun vhost_scsi_put_inflight(inflight);
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun
vhost_scsi_release_tmf_res(struct vhost_scsi_tmf * tmf)363*4882a593Smuzhiyun static void vhost_scsi_release_tmf_res(struct vhost_scsi_tmf *tmf)
364*4882a593Smuzhiyun {
365*4882a593Smuzhiyun struct vhost_scsi_tpg *tpg = tmf->tpg;
366*4882a593Smuzhiyun struct vhost_scsi_inflight *inflight = tmf->inflight;
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun mutex_lock(&tpg->tv_tpg_mutex);
369*4882a593Smuzhiyun list_add_tail(&tpg->tmf_queue, &tmf->queue_entry);
370*4882a593Smuzhiyun mutex_unlock(&tpg->tv_tpg_mutex);
371*4882a593Smuzhiyun vhost_scsi_put_inflight(inflight);
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun
vhost_scsi_release_cmd(struct se_cmd * se_cmd)374*4882a593Smuzhiyun static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
375*4882a593Smuzhiyun {
376*4882a593Smuzhiyun if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
377*4882a593Smuzhiyun struct vhost_scsi_tmf *tmf = container_of(se_cmd,
378*4882a593Smuzhiyun struct vhost_scsi_tmf, se_cmd);
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun vhost_work_queue(&tmf->vhost->dev, &tmf->vwork);
381*4882a593Smuzhiyun } else {
382*4882a593Smuzhiyun struct vhost_scsi_cmd *cmd = container_of(se_cmd,
383*4882a593Smuzhiyun struct vhost_scsi_cmd, tvc_se_cmd);
384*4882a593Smuzhiyun struct vhost_scsi *vs = cmd->tvc_vhost;
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
387*4882a593Smuzhiyun vhost_work_queue(&vs->dev, &vs->vs_completion_work);
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun
vhost_scsi_sess_get_index(struct se_session * se_sess)391*4882a593Smuzhiyun static u32 vhost_scsi_sess_get_index(struct se_session *se_sess)
392*4882a593Smuzhiyun {
393*4882a593Smuzhiyun return 0;
394*4882a593Smuzhiyun }
395*4882a593Smuzhiyun
vhost_scsi_write_pending(struct se_cmd * se_cmd)396*4882a593Smuzhiyun static int vhost_scsi_write_pending(struct se_cmd *se_cmd)
397*4882a593Smuzhiyun {
398*4882a593Smuzhiyun /* Go ahead and process the write immediately */
399*4882a593Smuzhiyun target_execute_cmd(se_cmd);
400*4882a593Smuzhiyun return 0;
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun
vhost_scsi_set_default_node_attrs(struct se_node_acl * nacl)403*4882a593Smuzhiyun static void vhost_scsi_set_default_node_attrs(struct se_node_acl *nacl)
404*4882a593Smuzhiyun {
405*4882a593Smuzhiyun return;
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun
vhost_scsi_get_cmd_state(struct se_cmd * se_cmd)408*4882a593Smuzhiyun static int vhost_scsi_get_cmd_state(struct se_cmd *se_cmd)
409*4882a593Smuzhiyun {
410*4882a593Smuzhiyun return 0;
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun
vhost_scsi_queue_data_in(struct se_cmd * se_cmd)413*4882a593Smuzhiyun static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd)
414*4882a593Smuzhiyun {
415*4882a593Smuzhiyun transport_generic_free_cmd(se_cmd, 0);
416*4882a593Smuzhiyun return 0;
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun
vhost_scsi_queue_status(struct se_cmd * se_cmd)419*4882a593Smuzhiyun static int vhost_scsi_queue_status(struct se_cmd *se_cmd)
420*4882a593Smuzhiyun {
421*4882a593Smuzhiyun transport_generic_free_cmd(se_cmd, 0);
422*4882a593Smuzhiyun return 0;
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun
vhost_scsi_queue_tm_rsp(struct se_cmd * se_cmd)425*4882a593Smuzhiyun static void vhost_scsi_queue_tm_rsp(struct se_cmd *se_cmd)
426*4882a593Smuzhiyun {
427*4882a593Smuzhiyun struct vhost_scsi_tmf *tmf = container_of(se_cmd, struct vhost_scsi_tmf,
428*4882a593Smuzhiyun se_cmd);
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun tmf->scsi_resp = se_cmd->se_tmr_req->response;
431*4882a593Smuzhiyun transport_generic_free_cmd(&tmf->se_cmd, 0);
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun
vhost_scsi_aborted_task(struct se_cmd * se_cmd)434*4882a593Smuzhiyun static void vhost_scsi_aborted_task(struct se_cmd *se_cmd)
435*4882a593Smuzhiyun {
436*4882a593Smuzhiyun return;
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun
vhost_scsi_free_evt(struct vhost_scsi * vs,struct vhost_scsi_evt * evt)439*4882a593Smuzhiyun static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun vs->vs_events_nr--;
442*4882a593Smuzhiyun kfree(evt);
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun static struct vhost_scsi_evt *
vhost_scsi_allocate_evt(struct vhost_scsi * vs,u32 event,u32 reason)446*4882a593Smuzhiyun vhost_scsi_allocate_evt(struct vhost_scsi *vs,
447*4882a593Smuzhiyun u32 event, u32 reason)
448*4882a593Smuzhiyun {
449*4882a593Smuzhiyun struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
450*4882a593Smuzhiyun struct vhost_scsi_evt *evt;
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
453*4882a593Smuzhiyun vs->vs_events_missed = true;
454*4882a593Smuzhiyun return NULL;
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun evt = kzalloc(sizeof(*evt), GFP_KERNEL);
458*4882a593Smuzhiyun if (!evt) {
459*4882a593Smuzhiyun vq_err(vq, "Failed to allocate vhost_scsi_evt\n");
460*4882a593Smuzhiyun vs->vs_events_missed = true;
461*4882a593Smuzhiyun return NULL;
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun evt->event.event = cpu_to_vhost32(vq, event);
465*4882a593Smuzhiyun evt->event.reason = cpu_to_vhost32(vq, reason);
466*4882a593Smuzhiyun vs->vs_events_nr++;
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun return evt;
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun
vhost_scsi_check_stop_free(struct se_cmd * se_cmd)471*4882a593Smuzhiyun static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
472*4882a593Smuzhiyun {
473*4882a593Smuzhiyun return target_put_sess_cmd(se_cmd);
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun static void
vhost_scsi_do_evt_work(struct vhost_scsi * vs,struct vhost_scsi_evt * evt)477*4882a593Smuzhiyun vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
478*4882a593Smuzhiyun {
479*4882a593Smuzhiyun struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
480*4882a593Smuzhiyun struct virtio_scsi_event *event = &evt->event;
481*4882a593Smuzhiyun struct virtio_scsi_event __user *eventp;
482*4882a593Smuzhiyun unsigned out, in;
483*4882a593Smuzhiyun int head, ret;
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun if (!vhost_vq_get_backend(vq)) {
486*4882a593Smuzhiyun vs->vs_events_missed = true;
487*4882a593Smuzhiyun return;
488*4882a593Smuzhiyun }
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun again:
491*4882a593Smuzhiyun vhost_disable_notify(&vs->dev, vq);
492*4882a593Smuzhiyun head = vhost_get_vq_desc(vq, vq->iov,
493*4882a593Smuzhiyun ARRAY_SIZE(vq->iov), &out, &in,
494*4882a593Smuzhiyun NULL, NULL);
495*4882a593Smuzhiyun if (head < 0) {
496*4882a593Smuzhiyun vs->vs_events_missed = true;
497*4882a593Smuzhiyun return;
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun if (head == vq->num) {
500*4882a593Smuzhiyun if (vhost_enable_notify(&vs->dev, vq))
501*4882a593Smuzhiyun goto again;
502*4882a593Smuzhiyun vs->vs_events_missed = true;
503*4882a593Smuzhiyun return;
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
507*4882a593Smuzhiyun vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
508*4882a593Smuzhiyun vq->iov[out].iov_len);
509*4882a593Smuzhiyun vs->vs_events_missed = true;
510*4882a593Smuzhiyun return;
511*4882a593Smuzhiyun }
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun if (vs->vs_events_missed) {
514*4882a593Smuzhiyun event->event |= cpu_to_vhost32(vq, VIRTIO_SCSI_T_EVENTS_MISSED);
515*4882a593Smuzhiyun vs->vs_events_missed = false;
516*4882a593Smuzhiyun }
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun eventp = vq->iov[out].iov_base;
519*4882a593Smuzhiyun ret = __copy_to_user(eventp, event, sizeof(*event));
520*4882a593Smuzhiyun if (!ret)
521*4882a593Smuzhiyun vhost_add_used_and_signal(&vs->dev, vq, head, 0);
522*4882a593Smuzhiyun else
523*4882a593Smuzhiyun vq_err(vq, "Faulted on vhost_scsi_send_event\n");
524*4882a593Smuzhiyun }
525*4882a593Smuzhiyun
vhost_scsi_evt_work(struct vhost_work * work)526*4882a593Smuzhiyun static void vhost_scsi_evt_work(struct vhost_work *work)
527*4882a593Smuzhiyun {
528*4882a593Smuzhiyun struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
529*4882a593Smuzhiyun vs_event_work);
530*4882a593Smuzhiyun struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
531*4882a593Smuzhiyun struct vhost_scsi_evt *evt, *t;
532*4882a593Smuzhiyun struct llist_node *llnode;
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun mutex_lock(&vq->mutex);
535*4882a593Smuzhiyun llnode = llist_del_all(&vs->vs_event_list);
536*4882a593Smuzhiyun llist_for_each_entry_safe(evt, t, llnode, list) {
537*4882a593Smuzhiyun vhost_scsi_do_evt_work(vs, evt);
538*4882a593Smuzhiyun vhost_scsi_free_evt(vs, evt);
539*4882a593Smuzhiyun }
540*4882a593Smuzhiyun mutex_unlock(&vq->mutex);
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun /* Fill in status and signal that we are done processing this command
544*4882a593Smuzhiyun *
545*4882a593Smuzhiyun * This is scheduled in the vhost work queue so we are called with the owner
546*4882a593Smuzhiyun * process mm and can access the vring.
547*4882a593Smuzhiyun */
vhost_scsi_complete_cmd_work(struct vhost_work * work)548*4882a593Smuzhiyun static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
549*4882a593Smuzhiyun {
550*4882a593Smuzhiyun struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
551*4882a593Smuzhiyun vs_completion_work);
552*4882a593Smuzhiyun DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
553*4882a593Smuzhiyun struct virtio_scsi_cmd_resp v_rsp;
554*4882a593Smuzhiyun struct vhost_scsi_cmd *cmd, *t;
555*4882a593Smuzhiyun struct llist_node *llnode;
556*4882a593Smuzhiyun struct se_cmd *se_cmd;
557*4882a593Smuzhiyun struct iov_iter iov_iter;
558*4882a593Smuzhiyun int ret, vq;
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
561*4882a593Smuzhiyun llnode = llist_del_all(&vs->vs_completion_list);
562*4882a593Smuzhiyun llist_for_each_entry_safe(cmd, t, llnode, tvc_completion_list) {
563*4882a593Smuzhiyun se_cmd = &cmd->tvc_se_cmd;
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
566*4882a593Smuzhiyun cmd, se_cmd->residual_count, se_cmd->scsi_status);
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun memset(&v_rsp, 0, sizeof(v_rsp));
569*4882a593Smuzhiyun v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq, se_cmd->residual_count);
570*4882a593Smuzhiyun /* TODO is status_qualifier field needed? */
571*4882a593Smuzhiyun v_rsp.status = se_cmd->scsi_status;
572*4882a593Smuzhiyun v_rsp.sense_len = cpu_to_vhost32(cmd->tvc_vq,
573*4882a593Smuzhiyun se_cmd->scsi_sense_length);
574*4882a593Smuzhiyun memcpy(v_rsp.sense, cmd->tvc_sense_buf,
575*4882a593Smuzhiyun se_cmd->scsi_sense_length);
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun iov_iter_init(&iov_iter, READ, &cmd->tvc_resp_iov,
578*4882a593Smuzhiyun cmd->tvc_in_iovs, sizeof(v_rsp));
579*4882a593Smuzhiyun ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
580*4882a593Smuzhiyun if (likely(ret == sizeof(v_rsp))) {
581*4882a593Smuzhiyun struct vhost_scsi_virtqueue *q;
582*4882a593Smuzhiyun vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
583*4882a593Smuzhiyun q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
584*4882a593Smuzhiyun vq = q - vs->vqs;
585*4882a593Smuzhiyun __set_bit(vq, signal);
586*4882a593Smuzhiyun } else
587*4882a593Smuzhiyun pr_err("Faulted on virtio_scsi_cmd_resp\n");
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun vhost_scsi_release_cmd_res(se_cmd);
590*4882a593Smuzhiyun }
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun vq = -1;
593*4882a593Smuzhiyun while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
594*4882a593Smuzhiyun < VHOST_SCSI_MAX_VQ)
595*4882a593Smuzhiyun vhost_signal(&vs->dev, &vs->vqs[vq].vq);
596*4882a593Smuzhiyun }
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun static struct vhost_scsi_cmd *
vhost_scsi_get_cmd(struct vhost_virtqueue * vq,struct vhost_scsi_tpg * tpg,unsigned char * cdb,u64 scsi_tag,u16 lun,u8 task_attr,u32 exp_data_len,int data_direction)599*4882a593Smuzhiyun vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
600*4882a593Smuzhiyun unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr,
601*4882a593Smuzhiyun u32 exp_data_len, int data_direction)
602*4882a593Smuzhiyun {
603*4882a593Smuzhiyun struct vhost_scsi_virtqueue *svq = container_of(vq,
604*4882a593Smuzhiyun struct vhost_scsi_virtqueue, vq);
605*4882a593Smuzhiyun struct vhost_scsi_cmd *cmd;
606*4882a593Smuzhiyun struct vhost_scsi_nexus *tv_nexus;
607*4882a593Smuzhiyun struct scatterlist *sg, *prot_sg;
608*4882a593Smuzhiyun struct page **pages;
609*4882a593Smuzhiyun int tag;
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun tv_nexus = tpg->tpg_nexus;
612*4882a593Smuzhiyun if (!tv_nexus) {
613*4882a593Smuzhiyun pr_err("Unable to locate active struct vhost_scsi_nexus\n");
614*4882a593Smuzhiyun return ERR_PTR(-EIO);
615*4882a593Smuzhiyun }
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun tag = sbitmap_get(&svq->scsi_tags, 0, false);
618*4882a593Smuzhiyun if (tag < 0) {
619*4882a593Smuzhiyun pr_err("Unable to obtain tag for vhost_scsi_cmd\n");
620*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
621*4882a593Smuzhiyun }
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun cmd = &svq->scsi_cmds[tag];
624*4882a593Smuzhiyun sg = cmd->tvc_sgl;
625*4882a593Smuzhiyun prot_sg = cmd->tvc_prot_sgl;
626*4882a593Smuzhiyun pages = cmd->tvc_upages;
627*4882a593Smuzhiyun memset(cmd, 0, sizeof(*cmd));
628*4882a593Smuzhiyun cmd->tvc_sgl = sg;
629*4882a593Smuzhiyun cmd->tvc_prot_sgl = prot_sg;
630*4882a593Smuzhiyun cmd->tvc_upages = pages;
631*4882a593Smuzhiyun cmd->tvc_se_cmd.map_tag = tag;
632*4882a593Smuzhiyun cmd->tvc_tag = scsi_tag;
633*4882a593Smuzhiyun cmd->tvc_lun = lun;
634*4882a593Smuzhiyun cmd->tvc_task_attr = task_attr;
635*4882a593Smuzhiyun cmd->tvc_exp_data_len = exp_data_len;
636*4882a593Smuzhiyun cmd->tvc_data_direction = data_direction;
637*4882a593Smuzhiyun cmd->tvc_nexus = tv_nexus;
638*4882a593Smuzhiyun cmd->inflight = vhost_scsi_get_inflight(vq);
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun memcpy(cmd->tvc_cdb, cdb, VHOST_SCSI_MAX_CDB_SIZE);
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun return cmd;
643*4882a593Smuzhiyun }
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun /*
646*4882a593Smuzhiyun * Map a user memory range into a scatterlist
647*4882a593Smuzhiyun *
648*4882a593Smuzhiyun * Returns the number of scatterlist entries used or -errno on error.
649*4882a593Smuzhiyun */
650*4882a593Smuzhiyun static int
vhost_scsi_map_to_sgl(struct vhost_scsi_cmd * cmd,struct iov_iter * iter,struct scatterlist * sgl,bool write)651*4882a593Smuzhiyun vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
652*4882a593Smuzhiyun struct iov_iter *iter,
653*4882a593Smuzhiyun struct scatterlist *sgl,
654*4882a593Smuzhiyun bool write)
655*4882a593Smuzhiyun {
656*4882a593Smuzhiyun struct page **pages = cmd->tvc_upages;
657*4882a593Smuzhiyun struct scatterlist *sg = sgl;
658*4882a593Smuzhiyun ssize_t bytes;
659*4882a593Smuzhiyun size_t offset;
660*4882a593Smuzhiyun unsigned int npages = 0;
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun bytes = iov_iter_get_pages(iter, pages, LONG_MAX,
663*4882a593Smuzhiyun VHOST_SCSI_PREALLOC_UPAGES, &offset);
664*4882a593Smuzhiyun /* No pages were pinned */
665*4882a593Smuzhiyun if (bytes <= 0)
666*4882a593Smuzhiyun return bytes < 0 ? bytes : -EFAULT;
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun iov_iter_advance(iter, bytes);
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun while (bytes) {
671*4882a593Smuzhiyun unsigned n = min_t(unsigned, PAGE_SIZE - offset, bytes);
672*4882a593Smuzhiyun sg_set_page(sg++, pages[npages++], n, offset);
673*4882a593Smuzhiyun bytes -= n;
674*4882a593Smuzhiyun offset = 0;
675*4882a593Smuzhiyun }
676*4882a593Smuzhiyun return npages;
677*4882a593Smuzhiyun }
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun static int
vhost_scsi_calc_sgls(struct iov_iter * iter,size_t bytes,int max_sgls)680*4882a593Smuzhiyun vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls)
681*4882a593Smuzhiyun {
682*4882a593Smuzhiyun int sgl_count = 0;
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun if (!iter || !iter->iov) {
685*4882a593Smuzhiyun pr_err("%s: iter->iov is NULL, but expected bytes: %zu"
686*4882a593Smuzhiyun " present\n", __func__, bytes);
687*4882a593Smuzhiyun return -EINVAL;
688*4882a593Smuzhiyun }
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun sgl_count = iov_iter_npages(iter, 0xffff);
691*4882a593Smuzhiyun if (sgl_count > max_sgls) {
692*4882a593Smuzhiyun pr_err("%s: requested sgl_count: %d exceeds pre-allocated"
693*4882a593Smuzhiyun " max_sgls: %d\n", __func__, sgl_count, max_sgls);
694*4882a593Smuzhiyun return -EINVAL;
695*4882a593Smuzhiyun }
696*4882a593Smuzhiyun return sgl_count;
697*4882a593Smuzhiyun }
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun static int
vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd * cmd,bool write,struct iov_iter * iter,struct scatterlist * sg,int sg_count)700*4882a593Smuzhiyun vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write,
701*4882a593Smuzhiyun struct iov_iter *iter,
702*4882a593Smuzhiyun struct scatterlist *sg, int sg_count)
703*4882a593Smuzhiyun {
704*4882a593Smuzhiyun struct scatterlist *p = sg;
705*4882a593Smuzhiyun int ret;
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun while (iov_iter_count(iter)) {
708*4882a593Smuzhiyun ret = vhost_scsi_map_to_sgl(cmd, iter, sg, write);
709*4882a593Smuzhiyun if (ret < 0) {
710*4882a593Smuzhiyun while (p < sg) {
711*4882a593Smuzhiyun struct page *page = sg_page(p++);
712*4882a593Smuzhiyun if (page)
713*4882a593Smuzhiyun put_page(page);
714*4882a593Smuzhiyun }
715*4882a593Smuzhiyun return ret;
716*4882a593Smuzhiyun }
717*4882a593Smuzhiyun sg += ret;
718*4882a593Smuzhiyun }
719*4882a593Smuzhiyun return 0;
720*4882a593Smuzhiyun }
721*4882a593Smuzhiyun
722*4882a593Smuzhiyun static int
vhost_scsi_mapal(struct vhost_scsi_cmd * cmd,size_t prot_bytes,struct iov_iter * prot_iter,size_t data_bytes,struct iov_iter * data_iter)723*4882a593Smuzhiyun vhost_scsi_mapal(struct vhost_scsi_cmd *cmd,
724*4882a593Smuzhiyun size_t prot_bytes, struct iov_iter *prot_iter,
725*4882a593Smuzhiyun size_t data_bytes, struct iov_iter *data_iter)
726*4882a593Smuzhiyun {
727*4882a593Smuzhiyun int sgl_count, ret;
728*4882a593Smuzhiyun bool write = (cmd->tvc_data_direction == DMA_FROM_DEVICE);
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun if (prot_bytes) {
731*4882a593Smuzhiyun sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes,
732*4882a593Smuzhiyun VHOST_SCSI_PREALLOC_PROT_SGLS);
733*4882a593Smuzhiyun if (sgl_count < 0)
734*4882a593Smuzhiyun return sgl_count;
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun sg_init_table(cmd->tvc_prot_sgl, sgl_count);
737*4882a593Smuzhiyun cmd->tvc_prot_sgl_count = sgl_count;
738*4882a593Smuzhiyun pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
739*4882a593Smuzhiyun cmd->tvc_prot_sgl, cmd->tvc_prot_sgl_count);
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun ret = vhost_scsi_iov_to_sgl(cmd, write, prot_iter,
742*4882a593Smuzhiyun cmd->tvc_prot_sgl,
743*4882a593Smuzhiyun cmd->tvc_prot_sgl_count);
744*4882a593Smuzhiyun if (ret < 0) {
745*4882a593Smuzhiyun cmd->tvc_prot_sgl_count = 0;
746*4882a593Smuzhiyun return ret;
747*4882a593Smuzhiyun }
748*4882a593Smuzhiyun }
749*4882a593Smuzhiyun sgl_count = vhost_scsi_calc_sgls(data_iter, data_bytes,
750*4882a593Smuzhiyun VHOST_SCSI_PREALLOC_SGLS);
751*4882a593Smuzhiyun if (sgl_count < 0)
752*4882a593Smuzhiyun return sgl_count;
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun sg_init_table(cmd->tvc_sgl, sgl_count);
755*4882a593Smuzhiyun cmd->tvc_sgl_count = sgl_count;
756*4882a593Smuzhiyun pr_debug("%s data_sg %p data_sgl_count %u\n", __func__,
757*4882a593Smuzhiyun cmd->tvc_sgl, cmd->tvc_sgl_count);
758*4882a593Smuzhiyun
759*4882a593Smuzhiyun ret = vhost_scsi_iov_to_sgl(cmd, write, data_iter,
760*4882a593Smuzhiyun cmd->tvc_sgl, cmd->tvc_sgl_count);
761*4882a593Smuzhiyun if (ret < 0) {
762*4882a593Smuzhiyun cmd->tvc_sgl_count = 0;
763*4882a593Smuzhiyun return ret;
764*4882a593Smuzhiyun }
765*4882a593Smuzhiyun return 0;
766*4882a593Smuzhiyun }
767*4882a593Smuzhiyun
vhost_scsi_to_tcm_attr(int attr)768*4882a593Smuzhiyun static int vhost_scsi_to_tcm_attr(int attr)
769*4882a593Smuzhiyun {
770*4882a593Smuzhiyun switch (attr) {
771*4882a593Smuzhiyun case VIRTIO_SCSI_S_SIMPLE:
772*4882a593Smuzhiyun return TCM_SIMPLE_TAG;
773*4882a593Smuzhiyun case VIRTIO_SCSI_S_ORDERED:
774*4882a593Smuzhiyun return TCM_ORDERED_TAG;
775*4882a593Smuzhiyun case VIRTIO_SCSI_S_HEAD:
776*4882a593Smuzhiyun return TCM_HEAD_TAG;
777*4882a593Smuzhiyun case VIRTIO_SCSI_S_ACA:
778*4882a593Smuzhiyun return TCM_ACA_TAG;
779*4882a593Smuzhiyun default:
780*4882a593Smuzhiyun break;
781*4882a593Smuzhiyun }
782*4882a593Smuzhiyun return TCM_SIMPLE_TAG;
783*4882a593Smuzhiyun }
784*4882a593Smuzhiyun
vhost_scsi_submission_work(struct work_struct * work)785*4882a593Smuzhiyun static void vhost_scsi_submission_work(struct work_struct *work)
786*4882a593Smuzhiyun {
787*4882a593Smuzhiyun struct vhost_scsi_cmd *cmd =
788*4882a593Smuzhiyun container_of(work, struct vhost_scsi_cmd, work);
789*4882a593Smuzhiyun struct vhost_scsi_nexus *tv_nexus;
790*4882a593Smuzhiyun struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
791*4882a593Smuzhiyun struct scatterlist *sg_ptr, *sg_prot_ptr = NULL;
792*4882a593Smuzhiyun int rc;
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun /* FIXME: BIDI operation */
795*4882a593Smuzhiyun if (cmd->tvc_sgl_count) {
796*4882a593Smuzhiyun sg_ptr = cmd->tvc_sgl;
797*4882a593Smuzhiyun
798*4882a593Smuzhiyun if (cmd->tvc_prot_sgl_count)
799*4882a593Smuzhiyun sg_prot_ptr = cmd->tvc_prot_sgl;
800*4882a593Smuzhiyun else
801*4882a593Smuzhiyun se_cmd->prot_pto = true;
802*4882a593Smuzhiyun } else {
803*4882a593Smuzhiyun sg_ptr = NULL;
804*4882a593Smuzhiyun }
805*4882a593Smuzhiyun tv_nexus = cmd->tvc_nexus;
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun se_cmd->tag = 0;
808*4882a593Smuzhiyun rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
809*4882a593Smuzhiyun cmd->tvc_cdb, &cmd->tvc_sense_buf[0],
810*4882a593Smuzhiyun cmd->tvc_lun, cmd->tvc_exp_data_len,
811*4882a593Smuzhiyun vhost_scsi_to_tcm_attr(cmd->tvc_task_attr),
812*4882a593Smuzhiyun cmd->tvc_data_direction, TARGET_SCF_ACK_KREF,
813*4882a593Smuzhiyun sg_ptr, cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr,
814*4882a593Smuzhiyun cmd->tvc_prot_sgl_count);
815*4882a593Smuzhiyun if (rc < 0) {
816*4882a593Smuzhiyun transport_send_check_condition_and_sense(se_cmd,
817*4882a593Smuzhiyun TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
818*4882a593Smuzhiyun transport_generic_free_cmd(se_cmd, 0);
819*4882a593Smuzhiyun }
820*4882a593Smuzhiyun }
821*4882a593Smuzhiyun
822*4882a593Smuzhiyun static void
vhost_scsi_send_bad_target(struct vhost_scsi * vs,struct vhost_virtqueue * vq,int head,unsigned out)823*4882a593Smuzhiyun vhost_scsi_send_bad_target(struct vhost_scsi *vs,
824*4882a593Smuzhiyun struct vhost_virtqueue *vq,
825*4882a593Smuzhiyun int head, unsigned out)
826*4882a593Smuzhiyun {
827*4882a593Smuzhiyun struct virtio_scsi_cmd_resp __user *resp;
828*4882a593Smuzhiyun struct virtio_scsi_cmd_resp rsp;
829*4882a593Smuzhiyun int ret;
830*4882a593Smuzhiyun
831*4882a593Smuzhiyun memset(&rsp, 0, sizeof(rsp));
832*4882a593Smuzhiyun rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
833*4882a593Smuzhiyun resp = vq->iov[out].iov_base;
834*4882a593Smuzhiyun ret = __copy_to_user(resp, &rsp, sizeof(rsp));
835*4882a593Smuzhiyun if (!ret)
836*4882a593Smuzhiyun vhost_add_used_and_signal(&vs->dev, vq, head, 0);
837*4882a593Smuzhiyun else
838*4882a593Smuzhiyun pr_err("Faulted on virtio_scsi_cmd_resp\n");
839*4882a593Smuzhiyun }
840*4882a593Smuzhiyun
841*4882a593Smuzhiyun static int
vhost_scsi_get_desc(struct vhost_scsi * vs,struct vhost_virtqueue * vq,struct vhost_scsi_ctx * vc)842*4882a593Smuzhiyun vhost_scsi_get_desc(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
843*4882a593Smuzhiyun struct vhost_scsi_ctx *vc)
844*4882a593Smuzhiyun {
845*4882a593Smuzhiyun int ret = -ENXIO;
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun vc->head = vhost_get_vq_desc(vq, vq->iov,
848*4882a593Smuzhiyun ARRAY_SIZE(vq->iov), &vc->out, &vc->in,
849*4882a593Smuzhiyun NULL, NULL);
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
852*4882a593Smuzhiyun vc->head, vc->out, vc->in);
853*4882a593Smuzhiyun
854*4882a593Smuzhiyun /* On error, stop handling until the next kick. */
855*4882a593Smuzhiyun if (unlikely(vc->head < 0))
856*4882a593Smuzhiyun goto done;
857*4882a593Smuzhiyun
858*4882a593Smuzhiyun /* Nothing new? Wait for eventfd to tell us they refilled. */
859*4882a593Smuzhiyun if (vc->head == vq->num) {
860*4882a593Smuzhiyun if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
861*4882a593Smuzhiyun vhost_disable_notify(&vs->dev, vq);
862*4882a593Smuzhiyun ret = -EAGAIN;
863*4882a593Smuzhiyun }
864*4882a593Smuzhiyun goto done;
865*4882a593Smuzhiyun }
866*4882a593Smuzhiyun
867*4882a593Smuzhiyun /*
868*4882a593Smuzhiyun * Get the size of request and response buffers.
869*4882a593Smuzhiyun * FIXME: Not correct for BIDI operation
870*4882a593Smuzhiyun */
871*4882a593Smuzhiyun vc->out_size = iov_length(vq->iov, vc->out);
872*4882a593Smuzhiyun vc->in_size = iov_length(&vq->iov[vc->out], vc->in);
873*4882a593Smuzhiyun
874*4882a593Smuzhiyun /*
875*4882a593Smuzhiyun * Copy over the virtio-scsi request header, which for a
876*4882a593Smuzhiyun * ANY_LAYOUT enabled guest may span multiple iovecs, or a
877*4882a593Smuzhiyun * single iovec may contain both the header + outgoing
878*4882a593Smuzhiyun * WRITE payloads.
879*4882a593Smuzhiyun *
880*4882a593Smuzhiyun * copy_from_iter() will advance out_iter, so that it will
881*4882a593Smuzhiyun * point at the start of the outgoing WRITE payload, if
882*4882a593Smuzhiyun * DMA_TO_DEVICE is set.
883*4882a593Smuzhiyun */
884*4882a593Smuzhiyun iov_iter_init(&vc->out_iter, WRITE, vq->iov, vc->out, vc->out_size);
885*4882a593Smuzhiyun ret = 0;
886*4882a593Smuzhiyun
887*4882a593Smuzhiyun done:
888*4882a593Smuzhiyun return ret;
889*4882a593Smuzhiyun }
890*4882a593Smuzhiyun
891*4882a593Smuzhiyun static int
vhost_scsi_chk_size(struct vhost_virtqueue * vq,struct vhost_scsi_ctx * vc)892*4882a593Smuzhiyun vhost_scsi_chk_size(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc)
893*4882a593Smuzhiyun {
894*4882a593Smuzhiyun if (unlikely(vc->in_size < vc->rsp_size)) {
895*4882a593Smuzhiyun vq_err(vq,
896*4882a593Smuzhiyun "Response buf too small, need min %zu bytes got %zu",
897*4882a593Smuzhiyun vc->rsp_size, vc->in_size);
898*4882a593Smuzhiyun return -EINVAL;
899*4882a593Smuzhiyun } else if (unlikely(vc->out_size < vc->req_size)) {
900*4882a593Smuzhiyun vq_err(vq,
901*4882a593Smuzhiyun "Request buf too small, need min %zu bytes got %zu",
902*4882a593Smuzhiyun vc->req_size, vc->out_size);
903*4882a593Smuzhiyun return -EIO;
904*4882a593Smuzhiyun }
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun return 0;
907*4882a593Smuzhiyun }
908*4882a593Smuzhiyun
909*4882a593Smuzhiyun static int
vhost_scsi_get_req(struct vhost_virtqueue * vq,struct vhost_scsi_ctx * vc,struct vhost_scsi_tpg ** tpgp)910*4882a593Smuzhiyun vhost_scsi_get_req(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc,
911*4882a593Smuzhiyun struct vhost_scsi_tpg **tpgp)
912*4882a593Smuzhiyun {
913*4882a593Smuzhiyun int ret = -EIO;
914*4882a593Smuzhiyun
915*4882a593Smuzhiyun if (unlikely(!copy_from_iter_full(vc->req, vc->req_size,
916*4882a593Smuzhiyun &vc->out_iter))) {
917*4882a593Smuzhiyun vq_err(vq, "Faulted on copy_from_iter_full\n");
918*4882a593Smuzhiyun } else if (unlikely(*vc->lunp != 1)) {
919*4882a593Smuzhiyun /* virtio-scsi spec requires byte 0 of the lun to be 1 */
920*4882a593Smuzhiyun vq_err(vq, "Illegal virtio-scsi lun: %u\n", *vc->lunp);
921*4882a593Smuzhiyun } else {
922*4882a593Smuzhiyun struct vhost_scsi_tpg **vs_tpg, *tpg;
923*4882a593Smuzhiyun
924*4882a593Smuzhiyun vs_tpg = vhost_vq_get_backend(vq); /* validated at handler entry */
925*4882a593Smuzhiyun
926*4882a593Smuzhiyun tpg = READ_ONCE(vs_tpg[*vc->target]);
927*4882a593Smuzhiyun if (unlikely(!tpg)) {
928*4882a593Smuzhiyun vq_err(vq, "Target 0x%x does not exist\n", *vc->target);
929*4882a593Smuzhiyun } else {
930*4882a593Smuzhiyun if (tpgp)
931*4882a593Smuzhiyun *tpgp = tpg;
932*4882a593Smuzhiyun ret = 0;
933*4882a593Smuzhiyun }
934*4882a593Smuzhiyun }
935*4882a593Smuzhiyun
936*4882a593Smuzhiyun return ret;
937*4882a593Smuzhiyun }
938*4882a593Smuzhiyun
vhost_buf_to_lun(u8 * lun_buf)939*4882a593Smuzhiyun static u16 vhost_buf_to_lun(u8 *lun_buf)
940*4882a593Smuzhiyun {
941*4882a593Smuzhiyun return ((lun_buf[2] << 8) | lun_buf[3]) & 0x3FFF;
942*4882a593Smuzhiyun }
943*4882a593Smuzhiyun
944*4882a593Smuzhiyun static void
vhost_scsi_handle_vq(struct vhost_scsi * vs,struct vhost_virtqueue * vq)945*4882a593Smuzhiyun vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
946*4882a593Smuzhiyun {
947*4882a593Smuzhiyun struct vhost_scsi_tpg **vs_tpg, *tpg;
948*4882a593Smuzhiyun struct virtio_scsi_cmd_req v_req;
949*4882a593Smuzhiyun struct virtio_scsi_cmd_req_pi v_req_pi;
950*4882a593Smuzhiyun struct vhost_scsi_ctx vc;
951*4882a593Smuzhiyun struct vhost_scsi_cmd *cmd;
952*4882a593Smuzhiyun struct iov_iter in_iter, prot_iter, data_iter;
953*4882a593Smuzhiyun u64 tag;
954*4882a593Smuzhiyun u32 exp_data_len, data_direction;
955*4882a593Smuzhiyun int ret, prot_bytes, c = 0;
956*4882a593Smuzhiyun u16 lun;
957*4882a593Smuzhiyun u8 task_attr;
958*4882a593Smuzhiyun bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
959*4882a593Smuzhiyun void *cdb;
960*4882a593Smuzhiyun
961*4882a593Smuzhiyun mutex_lock(&vq->mutex);
962*4882a593Smuzhiyun /*
963*4882a593Smuzhiyun * We can handle the vq only after the endpoint is setup by calling the
964*4882a593Smuzhiyun * VHOST_SCSI_SET_ENDPOINT ioctl.
965*4882a593Smuzhiyun */
966*4882a593Smuzhiyun vs_tpg = vhost_vq_get_backend(vq);
967*4882a593Smuzhiyun if (!vs_tpg)
968*4882a593Smuzhiyun goto out;
969*4882a593Smuzhiyun
970*4882a593Smuzhiyun memset(&vc, 0, sizeof(vc));
971*4882a593Smuzhiyun vc.rsp_size = sizeof(struct virtio_scsi_cmd_resp);
972*4882a593Smuzhiyun
973*4882a593Smuzhiyun vhost_disable_notify(&vs->dev, vq);
974*4882a593Smuzhiyun
975*4882a593Smuzhiyun do {
976*4882a593Smuzhiyun ret = vhost_scsi_get_desc(vs, vq, &vc);
977*4882a593Smuzhiyun if (ret)
978*4882a593Smuzhiyun goto err;
979*4882a593Smuzhiyun
980*4882a593Smuzhiyun /*
981*4882a593Smuzhiyun * Setup pointers and values based upon different virtio-scsi
982*4882a593Smuzhiyun * request header if T10_PI is enabled in KVM guest.
983*4882a593Smuzhiyun */
984*4882a593Smuzhiyun if (t10_pi) {
985*4882a593Smuzhiyun vc.req = &v_req_pi;
986*4882a593Smuzhiyun vc.req_size = sizeof(v_req_pi);
987*4882a593Smuzhiyun vc.lunp = &v_req_pi.lun[0];
988*4882a593Smuzhiyun vc.target = &v_req_pi.lun[1];
989*4882a593Smuzhiyun } else {
990*4882a593Smuzhiyun vc.req = &v_req;
991*4882a593Smuzhiyun vc.req_size = sizeof(v_req);
992*4882a593Smuzhiyun vc.lunp = &v_req.lun[0];
993*4882a593Smuzhiyun vc.target = &v_req.lun[1];
994*4882a593Smuzhiyun }
995*4882a593Smuzhiyun
996*4882a593Smuzhiyun /*
997*4882a593Smuzhiyun * Validate the size of request and response buffers.
998*4882a593Smuzhiyun * Check for a sane response buffer so we can report
999*4882a593Smuzhiyun * early errors back to the guest.
1000*4882a593Smuzhiyun */
1001*4882a593Smuzhiyun ret = vhost_scsi_chk_size(vq, &vc);
1002*4882a593Smuzhiyun if (ret)
1003*4882a593Smuzhiyun goto err;
1004*4882a593Smuzhiyun
1005*4882a593Smuzhiyun ret = vhost_scsi_get_req(vq, &vc, &tpg);
1006*4882a593Smuzhiyun if (ret)
1007*4882a593Smuzhiyun goto err;
1008*4882a593Smuzhiyun
1009*4882a593Smuzhiyun ret = -EIO; /* bad target on any error from here on */
1010*4882a593Smuzhiyun
1011*4882a593Smuzhiyun /*
1012*4882a593Smuzhiyun * Determine data_direction by calculating the total outgoing
1013*4882a593Smuzhiyun * iovec sizes + incoming iovec sizes vs. virtio-scsi request +
1014*4882a593Smuzhiyun * response headers respectively.
1015*4882a593Smuzhiyun *
1016*4882a593Smuzhiyun * For DMA_TO_DEVICE this is out_iter, which is already pointing
1017*4882a593Smuzhiyun * to the right place.
1018*4882a593Smuzhiyun *
1019*4882a593Smuzhiyun * For DMA_FROM_DEVICE, the iovec will be just past the end
1020*4882a593Smuzhiyun * of the virtio-scsi response header in either the same
1021*4882a593Smuzhiyun * or immediately following iovec.
1022*4882a593Smuzhiyun *
1023*4882a593Smuzhiyun * Any associated T10_PI bytes for the outgoing / incoming
1024*4882a593Smuzhiyun * payloads are included in calculation of exp_data_len here.
1025*4882a593Smuzhiyun */
1026*4882a593Smuzhiyun prot_bytes = 0;
1027*4882a593Smuzhiyun
1028*4882a593Smuzhiyun if (vc.out_size > vc.req_size) {
1029*4882a593Smuzhiyun data_direction = DMA_TO_DEVICE;
1030*4882a593Smuzhiyun exp_data_len = vc.out_size - vc.req_size;
1031*4882a593Smuzhiyun data_iter = vc.out_iter;
1032*4882a593Smuzhiyun } else if (vc.in_size > vc.rsp_size) {
1033*4882a593Smuzhiyun data_direction = DMA_FROM_DEVICE;
1034*4882a593Smuzhiyun exp_data_len = vc.in_size - vc.rsp_size;
1035*4882a593Smuzhiyun
1036*4882a593Smuzhiyun iov_iter_init(&in_iter, READ, &vq->iov[vc.out], vc.in,
1037*4882a593Smuzhiyun vc.rsp_size + exp_data_len);
1038*4882a593Smuzhiyun iov_iter_advance(&in_iter, vc.rsp_size);
1039*4882a593Smuzhiyun data_iter = in_iter;
1040*4882a593Smuzhiyun } else {
1041*4882a593Smuzhiyun data_direction = DMA_NONE;
1042*4882a593Smuzhiyun exp_data_len = 0;
1043*4882a593Smuzhiyun }
1044*4882a593Smuzhiyun /*
1045*4882a593Smuzhiyun * If T10_PI header + payload is present, setup prot_iter values
1046*4882a593Smuzhiyun * and recalculate data_iter for vhost_scsi_mapal() mapping to
1047*4882a593Smuzhiyun * host scatterlists via get_user_pages_fast().
1048*4882a593Smuzhiyun */
1049*4882a593Smuzhiyun if (t10_pi) {
1050*4882a593Smuzhiyun if (v_req_pi.pi_bytesout) {
1051*4882a593Smuzhiyun if (data_direction != DMA_TO_DEVICE) {
1052*4882a593Smuzhiyun vq_err(vq, "Received non zero pi_bytesout,"
1053*4882a593Smuzhiyun " but wrong data_direction\n");
1054*4882a593Smuzhiyun goto err;
1055*4882a593Smuzhiyun }
1056*4882a593Smuzhiyun prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
1057*4882a593Smuzhiyun } else if (v_req_pi.pi_bytesin) {
1058*4882a593Smuzhiyun if (data_direction != DMA_FROM_DEVICE) {
1059*4882a593Smuzhiyun vq_err(vq, "Received non zero pi_bytesin,"
1060*4882a593Smuzhiyun " but wrong data_direction\n");
1061*4882a593Smuzhiyun goto err;
1062*4882a593Smuzhiyun }
1063*4882a593Smuzhiyun prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
1064*4882a593Smuzhiyun }
1065*4882a593Smuzhiyun /*
1066*4882a593Smuzhiyun * Set prot_iter to data_iter and truncate it to
1067*4882a593Smuzhiyun * prot_bytes, and advance data_iter past any
1068*4882a593Smuzhiyun * preceeding prot_bytes that may be present.
1069*4882a593Smuzhiyun *
1070*4882a593Smuzhiyun * Also fix up the exp_data_len to reflect only the
1071*4882a593Smuzhiyun * actual data payload length.
1072*4882a593Smuzhiyun */
1073*4882a593Smuzhiyun if (prot_bytes) {
1074*4882a593Smuzhiyun exp_data_len -= prot_bytes;
1075*4882a593Smuzhiyun prot_iter = data_iter;
1076*4882a593Smuzhiyun iov_iter_truncate(&prot_iter, prot_bytes);
1077*4882a593Smuzhiyun iov_iter_advance(&data_iter, prot_bytes);
1078*4882a593Smuzhiyun }
1079*4882a593Smuzhiyun tag = vhost64_to_cpu(vq, v_req_pi.tag);
1080*4882a593Smuzhiyun task_attr = v_req_pi.task_attr;
1081*4882a593Smuzhiyun cdb = &v_req_pi.cdb[0];
1082*4882a593Smuzhiyun lun = vhost_buf_to_lun(v_req_pi.lun);
1083*4882a593Smuzhiyun } else {
1084*4882a593Smuzhiyun tag = vhost64_to_cpu(vq, v_req.tag);
1085*4882a593Smuzhiyun task_attr = v_req.task_attr;
1086*4882a593Smuzhiyun cdb = &v_req.cdb[0];
1087*4882a593Smuzhiyun lun = vhost_buf_to_lun(v_req.lun);
1088*4882a593Smuzhiyun }
1089*4882a593Smuzhiyun /*
1090*4882a593Smuzhiyun * Check that the received CDB size does not exceeded our
1091*4882a593Smuzhiyun * hardcoded max for vhost-scsi, then get a pre-allocated
1092*4882a593Smuzhiyun * cmd descriptor for the new virtio-scsi tag.
1093*4882a593Smuzhiyun *
1094*4882a593Smuzhiyun * TODO what if cdb was too small for varlen cdb header?
1095*4882a593Smuzhiyun */
1096*4882a593Smuzhiyun if (unlikely(scsi_command_size(cdb) > VHOST_SCSI_MAX_CDB_SIZE)) {
1097*4882a593Smuzhiyun vq_err(vq, "Received SCSI CDB with command_size: %d that"
1098*4882a593Smuzhiyun " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1099*4882a593Smuzhiyun scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE);
1100*4882a593Smuzhiyun goto err;
1101*4882a593Smuzhiyun }
1102*4882a593Smuzhiyun cmd = vhost_scsi_get_cmd(vq, tpg, cdb, tag, lun, task_attr,
1103*4882a593Smuzhiyun exp_data_len + prot_bytes,
1104*4882a593Smuzhiyun data_direction);
1105*4882a593Smuzhiyun if (IS_ERR(cmd)) {
1106*4882a593Smuzhiyun vq_err(vq, "vhost_scsi_get_cmd failed %ld\n",
1107*4882a593Smuzhiyun PTR_ERR(cmd));
1108*4882a593Smuzhiyun goto err;
1109*4882a593Smuzhiyun }
1110*4882a593Smuzhiyun cmd->tvc_vhost = vs;
1111*4882a593Smuzhiyun cmd->tvc_vq = vq;
1112*4882a593Smuzhiyun cmd->tvc_resp_iov = vq->iov[vc.out];
1113*4882a593Smuzhiyun cmd->tvc_in_iovs = vc.in;
1114*4882a593Smuzhiyun
1115*4882a593Smuzhiyun pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
1116*4882a593Smuzhiyun cmd->tvc_cdb[0], cmd->tvc_lun);
1117*4882a593Smuzhiyun pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:"
1118*4882a593Smuzhiyun " %d\n", cmd, exp_data_len, prot_bytes, data_direction);
1119*4882a593Smuzhiyun
1120*4882a593Smuzhiyun if (data_direction != DMA_NONE) {
1121*4882a593Smuzhiyun if (unlikely(vhost_scsi_mapal(cmd, prot_bytes,
1122*4882a593Smuzhiyun &prot_iter, exp_data_len,
1123*4882a593Smuzhiyun &data_iter))) {
1124*4882a593Smuzhiyun vq_err(vq, "Failed to map iov to sgl\n");
1125*4882a593Smuzhiyun vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd);
1126*4882a593Smuzhiyun goto err;
1127*4882a593Smuzhiyun }
1128*4882a593Smuzhiyun }
1129*4882a593Smuzhiyun /*
1130*4882a593Smuzhiyun * Save the descriptor from vhost_get_vq_desc() to be used to
1131*4882a593Smuzhiyun * complete the virtio-scsi request in TCM callback context via
1132*4882a593Smuzhiyun * vhost_scsi_queue_data_in() and vhost_scsi_queue_status()
1133*4882a593Smuzhiyun */
1134*4882a593Smuzhiyun cmd->tvc_vq_desc = vc.head;
1135*4882a593Smuzhiyun /*
1136*4882a593Smuzhiyun * Dispatch cmd descriptor for cmwq execution in process
1137*4882a593Smuzhiyun * context provided by vhost_scsi_workqueue. This also ensures
1138*4882a593Smuzhiyun * cmd is executed on the same kworker CPU as this vhost
1139*4882a593Smuzhiyun * thread to gain positive L2 cache locality effects.
1140*4882a593Smuzhiyun */
1141*4882a593Smuzhiyun INIT_WORK(&cmd->work, vhost_scsi_submission_work);
1142*4882a593Smuzhiyun queue_work(vhost_scsi_workqueue, &cmd->work);
1143*4882a593Smuzhiyun ret = 0;
1144*4882a593Smuzhiyun err:
1145*4882a593Smuzhiyun /*
1146*4882a593Smuzhiyun * ENXIO: No more requests, or read error, wait for next kick
1147*4882a593Smuzhiyun * EINVAL: Invalid response buffer, drop the request
1148*4882a593Smuzhiyun * EIO: Respond with bad target
1149*4882a593Smuzhiyun * EAGAIN: Pending request
1150*4882a593Smuzhiyun */
1151*4882a593Smuzhiyun if (ret == -ENXIO)
1152*4882a593Smuzhiyun break;
1153*4882a593Smuzhiyun else if (ret == -EIO)
1154*4882a593Smuzhiyun vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
1155*4882a593Smuzhiyun } while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
1156*4882a593Smuzhiyun out:
1157*4882a593Smuzhiyun mutex_unlock(&vq->mutex);
1158*4882a593Smuzhiyun }
1159*4882a593Smuzhiyun
1160*4882a593Smuzhiyun static void
vhost_scsi_send_tmf_resp(struct vhost_scsi * vs,struct vhost_virtqueue * vq,int in_iovs,int vq_desc,struct iovec * resp_iov,int tmf_resp_code)1161*4882a593Smuzhiyun vhost_scsi_send_tmf_resp(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
1162*4882a593Smuzhiyun int in_iovs, int vq_desc, struct iovec *resp_iov,
1163*4882a593Smuzhiyun int tmf_resp_code)
1164*4882a593Smuzhiyun {
1165*4882a593Smuzhiyun struct virtio_scsi_ctrl_tmf_resp rsp;
1166*4882a593Smuzhiyun struct iov_iter iov_iter;
1167*4882a593Smuzhiyun int ret;
1168*4882a593Smuzhiyun
1169*4882a593Smuzhiyun pr_debug("%s\n", __func__);
1170*4882a593Smuzhiyun memset(&rsp, 0, sizeof(rsp));
1171*4882a593Smuzhiyun rsp.response = tmf_resp_code;
1172*4882a593Smuzhiyun
1173*4882a593Smuzhiyun iov_iter_init(&iov_iter, READ, resp_iov, in_iovs, sizeof(rsp));
1174*4882a593Smuzhiyun
1175*4882a593Smuzhiyun ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
1176*4882a593Smuzhiyun if (likely(ret == sizeof(rsp)))
1177*4882a593Smuzhiyun vhost_add_used_and_signal(&vs->dev, vq, vq_desc, 0);
1178*4882a593Smuzhiyun else
1179*4882a593Smuzhiyun pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n");
1180*4882a593Smuzhiyun }
1181*4882a593Smuzhiyun
vhost_scsi_tmf_resp_work(struct vhost_work * work)1182*4882a593Smuzhiyun static void vhost_scsi_tmf_resp_work(struct vhost_work *work)
1183*4882a593Smuzhiyun {
1184*4882a593Smuzhiyun struct vhost_scsi_tmf *tmf = container_of(work, struct vhost_scsi_tmf,
1185*4882a593Smuzhiyun vwork);
1186*4882a593Smuzhiyun int resp_code;
1187*4882a593Smuzhiyun
1188*4882a593Smuzhiyun if (tmf->scsi_resp == TMR_FUNCTION_COMPLETE)
1189*4882a593Smuzhiyun resp_code = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
1190*4882a593Smuzhiyun else
1191*4882a593Smuzhiyun resp_code = VIRTIO_SCSI_S_FUNCTION_REJECTED;
1192*4882a593Smuzhiyun
1193*4882a593Smuzhiyun vhost_scsi_send_tmf_resp(tmf->vhost, &tmf->svq->vq, tmf->in_iovs,
1194*4882a593Smuzhiyun tmf->vq_desc, &tmf->resp_iov, resp_code);
1195*4882a593Smuzhiyun vhost_scsi_release_tmf_res(tmf);
1196*4882a593Smuzhiyun }
1197*4882a593Smuzhiyun
1198*4882a593Smuzhiyun static void
vhost_scsi_handle_tmf(struct vhost_scsi * vs,struct vhost_scsi_tpg * tpg,struct vhost_virtqueue * vq,struct virtio_scsi_ctrl_tmf_req * vtmf,struct vhost_scsi_ctx * vc)1199*4882a593Smuzhiyun vhost_scsi_handle_tmf(struct vhost_scsi *vs, struct vhost_scsi_tpg *tpg,
1200*4882a593Smuzhiyun struct vhost_virtqueue *vq,
1201*4882a593Smuzhiyun struct virtio_scsi_ctrl_tmf_req *vtmf,
1202*4882a593Smuzhiyun struct vhost_scsi_ctx *vc)
1203*4882a593Smuzhiyun {
1204*4882a593Smuzhiyun struct vhost_scsi_virtqueue *svq = container_of(vq,
1205*4882a593Smuzhiyun struct vhost_scsi_virtqueue, vq);
1206*4882a593Smuzhiyun struct vhost_scsi_tmf *tmf;
1207*4882a593Smuzhiyun
1208*4882a593Smuzhiyun if (vhost32_to_cpu(vq, vtmf->subtype) !=
1209*4882a593Smuzhiyun VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET)
1210*4882a593Smuzhiyun goto send_reject;
1211*4882a593Smuzhiyun
1212*4882a593Smuzhiyun if (!tpg->tpg_nexus || !tpg->tpg_nexus->tvn_se_sess) {
1213*4882a593Smuzhiyun pr_err("Unable to locate active struct vhost_scsi_nexus for LUN RESET.\n");
1214*4882a593Smuzhiyun goto send_reject;
1215*4882a593Smuzhiyun }
1216*4882a593Smuzhiyun
1217*4882a593Smuzhiyun mutex_lock(&tpg->tv_tpg_mutex);
1218*4882a593Smuzhiyun if (list_empty(&tpg->tmf_queue)) {
1219*4882a593Smuzhiyun pr_err("Missing reserve TMF. Could not handle LUN RESET.\n");
1220*4882a593Smuzhiyun mutex_unlock(&tpg->tv_tpg_mutex);
1221*4882a593Smuzhiyun goto send_reject;
1222*4882a593Smuzhiyun }
1223*4882a593Smuzhiyun
1224*4882a593Smuzhiyun tmf = list_first_entry(&tpg->tmf_queue, struct vhost_scsi_tmf,
1225*4882a593Smuzhiyun queue_entry);
1226*4882a593Smuzhiyun list_del_init(&tmf->queue_entry);
1227*4882a593Smuzhiyun mutex_unlock(&tpg->tv_tpg_mutex);
1228*4882a593Smuzhiyun
1229*4882a593Smuzhiyun tmf->tpg = tpg;
1230*4882a593Smuzhiyun tmf->vhost = vs;
1231*4882a593Smuzhiyun tmf->svq = svq;
1232*4882a593Smuzhiyun tmf->resp_iov = vq->iov[vc->out];
1233*4882a593Smuzhiyun tmf->vq_desc = vc->head;
1234*4882a593Smuzhiyun tmf->in_iovs = vc->in;
1235*4882a593Smuzhiyun tmf->inflight = vhost_scsi_get_inflight(vq);
1236*4882a593Smuzhiyun
1237*4882a593Smuzhiyun if (target_submit_tmr(&tmf->se_cmd, tpg->tpg_nexus->tvn_se_sess, NULL,
1238*4882a593Smuzhiyun vhost_buf_to_lun(vtmf->lun), NULL,
1239*4882a593Smuzhiyun TMR_LUN_RESET, GFP_KERNEL, 0,
1240*4882a593Smuzhiyun TARGET_SCF_ACK_KREF) < 0) {
1241*4882a593Smuzhiyun vhost_scsi_release_tmf_res(tmf);
1242*4882a593Smuzhiyun goto send_reject;
1243*4882a593Smuzhiyun }
1244*4882a593Smuzhiyun
1245*4882a593Smuzhiyun return;
1246*4882a593Smuzhiyun
1247*4882a593Smuzhiyun send_reject:
1248*4882a593Smuzhiyun vhost_scsi_send_tmf_resp(vs, vq, vc->in, vc->head, &vq->iov[vc->out],
1249*4882a593Smuzhiyun VIRTIO_SCSI_S_FUNCTION_REJECTED);
1250*4882a593Smuzhiyun }
1251*4882a593Smuzhiyun
1252*4882a593Smuzhiyun static void
vhost_scsi_send_an_resp(struct vhost_scsi * vs,struct vhost_virtqueue * vq,struct vhost_scsi_ctx * vc)1253*4882a593Smuzhiyun vhost_scsi_send_an_resp(struct vhost_scsi *vs,
1254*4882a593Smuzhiyun struct vhost_virtqueue *vq,
1255*4882a593Smuzhiyun struct vhost_scsi_ctx *vc)
1256*4882a593Smuzhiyun {
1257*4882a593Smuzhiyun struct virtio_scsi_ctrl_an_resp rsp;
1258*4882a593Smuzhiyun struct iov_iter iov_iter;
1259*4882a593Smuzhiyun int ret;
1260*4882a593Smuzhiyun
1261*4882a593Smuzhiyun pr_debug("%s\n", __func__);
1262*4882a593Smuzhiyun memset(&rsp, 0, sizeof(rsp)); /* event_actual = 0 */
1263*4882a593Smuzhiyun rsp.response = VIRTIO_SCSI_S_OK;
1264*4882a593Smuzhiyun
1265*4882a593Smuzhiyun iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp));
1266*4882a593Smuzhiyun
1267*4882a593Smuzhiyun ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
1268*4882a593Smuzhiyun if (likely(ret == sizeof(rsp)))
1269*4882a593Smuzhiyun vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
1270*4882a593Smuzhiyun else
1271*4882a593Smuzhiyun pr_err("Faulted on virtio_scsi_ctrl_an_resp\n");
1272*4882a593Smuzhiyun }
1273*4882a593Smuzhiyun
1274*4882a593Smuzhiyun static void
vhost_scsi_ctl_handle_vq(struct vhost_scsi * vs,struct vhost_virtqueue * vq)1275*4882a593Smuzhiyun vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1276*4882a593Smuzhiyun {
1277*4882a593Smuzhiyun struct vhost_scsi_tpg *tpg;
1278*4882a593Smuzhiyun union {
1279*4882a593Smuzhiyun __virtio32 type;
1280*4882a593Smuzhiyun struct virtio_scsi_ctrl_an_req an;
1281*4882a593Smuzhiyun struct virtio_scsi_ctrl_tmf_req tmf;
1282*4882a593Smuzhiyun } v_req;
1283*4882a593Smuzhiyun struct vhost_scsi_ctx vc;
1284*4882a593Smuzhiyun size_t typ_size;
1285*4882a593Smuzhiyun int ret, c = 0;
1286*4882a593Smuzhiyun
1287*4882a593Smuzhiyun mutex_lock(&vq->mutex);
1288*4882a593Smuzhiyun /*
1289*4882a593Smuzhiyun * We can handle the vq only after the endpoint is setup by calling the
1290*4882a593Smuzhiyun * VHOST_SCSI_SET_ENDPOINT ioctl.
1291*4882a593Smuzhiyun */
1292*4882a593Smuzhiyun if (!vhost_vq_get_backend(vq))
1293*4882a593Smuzhiyun goto out;
1294*4882a593Smuzhiyun
1295*4882a593Smuzhiyun memset(&vc, 0, sizeof(vc));
1296*4882a593Smuzhiyun
1297*4882a593Smuzhiyun vhost_disable_notify(&vs->dev, vq);
1298*4882a593Smuzhiyun
1299*4882a593Smuzhiyun do {
1300*4882a593Smuzhiyun ret = vhost_scsi_get_desc(vs, vq, &vc);
1301*4882a593Smuzhiyun if (ret)
1302*4882a593Smuzhiyun goto err;
1303*4882a593Smuzhiyun
1304*4882a593Smuzhiyun /*
1305*4882a593Smuzhiyun * Get the request type first in order to setup
1306*4882a593Smuzhiyun * other parameters dependent on the type.
1307*4882a593Smuzhiyun */
1308*4882a593Smuzhiyun vc.req = &v_req.type;
1309*4882a593Smuzhiyun typ_size = sizeof(v_req.type);
1310*4882a593Smuzhiyun
1311*4882a593Smuzhiyun if (unlikely(!copy_from_iter_full(vc.req, typ_size,
1312*4882a593Smuzhiyun &vc.out_iter))) {
1313*4882a593Smuzhiyun vq_err(vq, "Faulted on copy_from_iter tmf type\n");
1314*4882a593Smuzhiyun /*
1315*4882a593Smuzhiyun * The size of the response buffer depends on the
1316*4882a593Smuzhiyun * request type and must be validated against it.
1317*4882a593Smuzhiyun * Since the request type is not known, don't send
1318*4882a593Smuzhiyun * a response.
1319*4882a593Smuzhiyun */
1320*4882a593Smuzhiyun continue;
1321*4882a593Smuzhiyun }
1322*4882a593Smuzhiyun
1323*4882a593Smuzhiyun switch (vhost32_to_cpu(vq, v_req.type)) {
1324*4882a593Smuzhiyun case VIRTIO_SCSI_T_TMF:
1325*4882a593Smuzhiyun vc.req = &v_req.tmf;
1326*4882a593Smuzhiyun vc.req_size = sizeof(struct virtio_scsi_ctrl_tmf_req);
1327*4882a593Smuzhiyun vc.rsp_size = sizeof(struct virtio_scsi_ctrl_tmf_resp);
1328*4882a593Smuzhiyun vc.lunp = &v_req.tmf.lun[0];
1329*4882a593Smuzhiyun vc.target = &v_req.tmf.lun[1];
1330*4882a593Smuzhiyun break;
1331*4882a593Smuzhiyun case VIRTIO_SCSI_T_AN_QUERY:
1332*4882a593Smuzhiyun case VIRTIO_SCSI_T_AN_SUBSCRIBE:
1333*4882a593Smuzhiyun vc.req = &v_req.an;
1334*4882a593Smuzhiyun vc.req_size = sizeof(struct virtio_scsi_ctrl_an_req);
1335*4882a593Smuzhiyun vc.rsp_size = sizeof(struct virtio_scsi_ctrl_an_resp);
1336*4882a593Smuzhiyun vc.lunp = &v_req.an.lun[0];
1337*4882a593Smuzhiyun vc.target = NULL;
1338*4882a593Smuzhiyun break;
1339*4882a593Smuzhiyun default:
1340*4882a593Smuzhiyun vq_err(vq, "Unknown control request %d", v_req.type);
1341*4882a593Smuzhiyun continue;
1342*4882a593Smuzhiyun }
1343*4882a593Smuzhiyun
1344*4882a593Smuzhiyun /*
1345*4882a593Smuzhiyun * Validate the size of request and response buffers.
1346*4882a593Smuzhiyun * Check for a sane response buffer so we can report
1347*4882a593Smuzhiyun * early errors back to the guest.
1348*4882a593Smuzhiyun */
1349*4882a593Smuzhiyun ret = vhost_scsi_chk_size(vq, &vc);
1350*4882a593Smuzhiyun if (ret)
1351*4882a593Smuzhiyun goto err;
1352*4882a593Smuzhiyun
1353*4882a593Smuzhiyun /*
1354*4882a593Smuzhiyun * Get the rest of the request now that its size is known.
1355*4882a593Smuzhiyun */
1356*4882a593Smuzhiyun vc.req += typ_size;
1357*4882a593Smuzhiyun vc.req_size -= typ_size;
1358*4882a593Smuzhiyun
1359*4882a593Smuzhiyun ret = vhost_scsi_get_req(vq, &vc, &tpg);
1360*4882a593Smuzhiyun if (ret)
1361*4882a593Smuzhiyun goto err;
1362*4882a593Smuzhiyun
1363*4882a593Smuzhiyun if (v_req.type == VIRTIO_SCSI_T_TMF)
1364*4882a593Smuzhiyun vhost_scsi_handle_tmf(vs, tpg, vq, &v_req.tmf, &vc);
1365*4882a593Smuzhiyun else
1366*4882a593Smuzhiyun vhost_scsi_send_an_resp(vs, vq, &vc);
1367*4882a593Smuzhiyun err:
1368*4882a593Smuzhiyun /*
1369*4882a593Smuzhiyun * ENXIO: No more requests, or read error, wait for next kick
1370*4882a593Smuzhiyun * EINVAL: Invalid response buffer, drop the request
1371*4882a593Smuzhiyun * EIO: Respond with bad target
1372*4882a593Smuzhiyun * EAGAIN: Pending request
1373*4882a593Smuzhiyun */
1374*4882a593Smuzhiyun if (ret == -ENXIO)
1375*4882a593Smuzhiyun break;
1376*4882a593Smuzhiyun else if (ret == -EIO)
1377*4882a593Smuzhiyun vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
1378*4882a593Smuzhiyun } while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
1379*4882a593Smuzhiyun out:
1380*4882a593Smuzhiyun mutex_unlock(&vq->mutex);
1381*4882a593Smuzhiyun }
1382*4882a593Smuzhiyun
vhost_scsi_ctl_handle_kick(struct vhost_work * work)1383*4882a593Smuzhiyun static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
1384*4882a593Smuzhiyun {
1385*4882a593Smuzhiyun struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1386*4882a593Smuzhiyun poll.work);
1387*4882a593Smuzhiyun struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1388*4882a593Smuzhiyun
1389*4882a593Smuzhiyun pr_debug("%s: The handling func for control queue.\n", __func__);
1390*4882a593Smuzhiyun vhost_scsi_ctl_handle_vq(vs, vq);
1391*4882a593Smuzhiyun }
1392*4882a593Smuzhiyun
1393*4882a593Smuzhiyun static void
vhost_scsi_send_evt(struct vhost_scsi * vs,struct vhost_scsi_tpg * tpg,struct se_lun * lun,u32 event,u32 reason)1394*4882a593Smuzhiyun vhost_scsi_send_evt(struct vhost_scsi *vs,
1395*4882a593Smuzhiyun struct vhost_scsi_tpg *tpg,
1396*4882a593Smuzhiyun struct se_lun *lun,
1397*4882a593Smuzhiyun u32 event,
1398*4882a593Smuzhiyun u32 reason)
1399*4882a593Smuzhiyun {
1400*4882a593Smuzhiyun struct vhost_scsi_evt *evt;
1401*4882a593Smuzhiyun
1402*4882a593Smuzhiyun evt = vhost_scsi_allocate_evt(vs, event, reason);
1403*4882a593Smuzhiyun if (!evt)
1404*4882a593Smuzhiyun return;
1405*4882a593Smuzhiyun
1406*4882a593Smuzhiyun if (tpg && lun) {
1407*4882a593Smuzhiyun /* TODO: share lun setup code with virtio-scsi.ko */
1408*4882a593Smuzhiyun /*
1409*4882a593Smuzhiyun * Note: evt->event is zeroed when we allocate it and
1410*4882a593Smuzhiyun * lun[4-7] need to be zero according to virtio-scsi spec.
1411*4882a593Smuzhiyun */
1412*4882a593Smuzhiyun evt->event.lun[0] = 0x01;
1413*4882a593Smuzhiyun evt->event.lun[1] = tpg->tport_tpgt;
1414*4882a593Smuzhiyun if (lun->unpacked_lun >= 256)
1415*4882a593Smuzhiyun evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
1416*4882a593Smuzhiyun evt->event.lun[3] = lun->unpacked_lun & 0xFF;
1417*4882a593Smuzhiyun }
1418*4882a593Smuzhiyun
1419*4882a593Smuzhiyun llist_add(&evt->list, &vs->vs_event_list);
1420*4882a593Smuzhiyun vhost_work_queue(&vs->dev, &vs->vs_event_work);
1421*4882a593Smuzhiyun }
1422*4882a593Smuzhiyun
vhost_scsi_evt_handle_kick(struct vhost_work * work)1423*4882a593Smuzhiyun static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
1424*4882a593Smuzhiyun {
1425*4882a593Smuzhiyun struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1426*4882a593Smuzhiyun poll.work);
1427*4882a593Smuzhiyun struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1428*4882a593Smuzhiyun
1429*4882a593Smuzhiyun mutex_lock(&vq->mutex);
1430*4882a593Smuzhiyun if (!vhost_vq_get_backend(vq))
1431*4882a593Smuzhiyun goto out;
1432*4882a593Smuzhiyun
1433*4882a593Smuzhiyun if (vs->vs_events_missed)
1434*4882a593Smuzhiyun vhost_scsi_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
1435*4882a593Smuzhiyun out:
1436*4882a593Smuzhiyun mutex_unlock(&vq->mutex);
1437*4882a593Smuzhiyun }
1438*4882a593Smuzhiyun
vhost_scsi_handle_kick(struct vhost_work * work)1439*4882a593Smuzhiyun static void vhost_scsi_handle_kick(struct vhost_work *work)
1440*4882a593Smuzhiyun {
1441*4882a593Smuzhiyun struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1442*4882a593Smuzhiyun poll.work);
1443*4882a593Smuzhiyun struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1444*4882a593Smuzhiyun
1445*4882a593Smuzhiyun vhost_scsi_handle_vq(vs, vq);
1446*4882a593Smuzhiyun }
1447*4882a593Smuzhiyun
vhost_scsi_flush_vq(struct vhost_scsi * vs,int index)1448*4882a593Smuzhiyun static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
1449*4882a593Smuzhiyun {
1450*4882a593Smuzhiyun vhost_poll_flush(&vs->vqs[index].vq.poll);
1451*4882a593Smuzhiyun }
1452*4882a593Smuzhiyun
1453*4882a593Smuzhiyun /* Callers must hold dev mutex */
vhost_scsi_flush(struct vhost_scsi * vs)1454*4882a593Smuzhiyun static void vhost_scsi_flush(struct vhost_scsi *vs)
1455*4882a593Smuzhiyun {
1456*4882a593Smuzhiyun struct vhost_scsi_inflight *old_inflight[VHOST_SCSI_MAX_VQ];
1457*4882a593Smuzhiyun int i;
1458*4882a593Smuzhiyun
1459*4882a593Smuzhiyun /* Init new inflight and remember the old inflight */
1460*4882a593Smuzhiyun vhost_scsi_init_inflight(vs, old_inflight);
1461*4882a593Smuzhiyun
1462*4882a593Smuzhiyun /*
1463*4882a593Smuzhiyun * The inflight->kref was initialized to 1. We decrement it here to
1464*4882a593Smuzhiyun * indicate the start of the flush operation so that it will reach 0
1465*4882a593Smuzhiyun * when all the reqs are finished.
1466*4882a593Smuzhiyun */
1467*4882a593Smuzhiyun for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1468*4882a593Smuzhiyun kref_put(&old_inflight[i]->kref, vhost_scsi_done_inflight);
1469*4882a593Smuzhiyun
1470*4882a593Smuzhiyun /* Flush both the vhost poll and vhost work */
1471*4882a593Smuzhiyun for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1472*4882a593Smuzhiyun vhost_scsi_flush_vq(vs, i);
1473*4882a593Smuzhiyun vhost_work_flush(&vs->dev, &vs->vs_completion_work);
1474*4882a593Smuzhiyun vhost_work_flush(&vs->dev, &vs->vs_event_work);
1475*4882a593Smuzhiyun
1476*4882a593Smuzhiyun /* Wait for all reqs issued before the flush to be finished */
1477*4882a593Smuzhiyun for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1478*4882a593Smuzhiyun wait_for_completion(&old_inflight[i]->comp);
1479*4882a593Smuzhiyun }
1480*4882a593Smuzhiyun
vhost_scsi_destroy_vq_cmds(struct vhost_virtqueue * vq)1481*4882a593Smuzhiyun static void vhost_scsi_destroy_vq_cmds(struct vhost_virtqueue *vq)
1482*4882a593Smuzhiyun {
1483*4882a593Smuzhiyun struct vhost_scsi_virtqueue *svq = container_of(vq,
1484*4882a593Smuzhiyun struct vhost_scsi_virtqueue, vq);
1485*4882a593Smuzhiyun struct vhost_scsi_cmd *tv_cmd;
1486*4882a593Smuzhiyun unsigned int i;
1487*4882a593Smuzhiyun
1488*4882a593Smuzhiyun if (!svq->scsi_cmds)
1489*4882a593Smuzhiyun return;
1490*4882a593Smuzhiyun
1491*4882a593Smuzhiyun for (i = 0; i < svq->max_cmds; i++) {
1492*4882a593Smuzhiyun tv_cmd = &svq->scsi_cmds[i];
1493*4882a593Smuzhiyun
1494*4882a593Smuzhiyun kfree(tv_cmd->tvc_sgl);
1495*4882a593Smuzhiyun kfree(tv_cmd->tvc_prot_sgl);
1496*4882a593Smuzhiyun kfree(tv_cmd->tvc_upages);
1497*4882a593Smuzhiyun }
1498*4882a593Smuzhiyun
1499*4882a593Smuzhiyun sbitmap_free(&svq->scsi_tags);
1500*4882a593Smuzhiyun kfree(svq->scsi_cmds);
1501*4882a593Smuzhiyun svq->scsi_cmds = NULL;
1502*4882a593Smuzhiyun }
1503*4882a593Smuzhiyun
vhost_scsi_setup_vq_cmds(struct vhost_virtqueue * vq,int max_cmds)1504*4882a593Smuzhiyun static int vhost_scsi_setup_vq_cmds(struct vhost_virtqueue *vq, int max_cmds)
1505*4882a593Smuzhiyun {
1506*4882a593Smuzhiyun struct vhost_scsi_virtqueue *svq = container_of(vq,
1507*4882a593Smuzhiyun struct vhost_scsi_virtqueue, vq);
1508*4882a593Smuzhiyun struct vhost_scsi_cmd *tv_cmd;
1509*4882a593Smuzhiyun unsigned int i;
1510*4882a593Smuzhiyun
1511*4882a593Smuzhiyun if (svq->scsi_cmds)
1512*4882a593Smuzhiyun return 0;
1513*4882a593Smuzhiyun
1514*4882a593Smuzhiyun if (sbitmap_init_node(&svq->scsi_tags, max_cmds, -1, GFP_KERNEL,
1515*4882a593Smuzhiyun NUMA_NO_NODE))
1516*4882a593Smuzhiyun return -ENOMEM;
1517*4882a593Smuzhiyun svq->max_cmds = max_cmds;
1518*4882a593Smuzhiyun
1519*4882a593Smuzhiyun svq->scsi_cmds = kcalloc(max_cmds, sizeof(*tv_cmd), GFP_KERNEL);
1520*4882a593Smuzhiyun if (!svq->scsi_cmds) {
1521*4882a593Smuzhiyun sbitmap_free(&svq->scsi_tags);
1522*4882a593Smuzhiyun return -ENOMEM;
1523*4882a593Smuzhiyun }
1524*4882a593Smuzhiyun
1525*4882a593Smuzhiyun for (i = 0; i < max_cmds; i++) {
1526*4882a593Smuzhiyun tv_cmd = &svq->scsi_cmds[i];
1527*4882a593Smuzhiyun
1528*4882a593Smuzhiyun tv_cmd->tvc_sgl = kcalloc(VHOST_SCSI_PREALLOC_SGLS,
1529*4882a593Smuzhiyun sizeof(struct scatterlist),
1530*4882a593Smuzhiyun GFP_KERNEL);
1531*4882a593Smuzhiyun if (!tv_cmd->tvc_sgl) {
1532*4882a593Smuzhiyun pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
1533*4882a593Smuzhiyun goto out;
1534*4882a593Smuzhiyun }
1535*4882a593Smuzhiyun
1536*4882a593Smuzhiyun tv_cmd->tvc_upages = kcalloc(VHOST_SCSI_PREALLOC_UPAGES,
1537*4882a593Smuzhiyun sizeof(struct page *),
1538*4882a593Smuzhiyun GFP_KERNEL);
1539*4882a593Smuzhiyun if (!tv_cmd->tvc_upages) {
1540*4882a593Smuzhiyun pr_err("Unable to allocate tv_cmd->tvc_upages\n");
1541*4882a593Smuzhiyun goto out;
1542*4882a593Smuzhiyun }
1543*4882a593Smuzhiyun
1544*4882a593Smuzhiyun tv_cmd->tvc_prot_sgl = kcalloc(VHOST_SCSI_PREALLOC_PROT_SGLS,
1545*4882a593Smuzhiyun sizeof(struct scatterlist),
1546*4882a593Smuzhiyun GFP_KERNEL);
1547*4882a593Smuzhiyun if (!tv_cmd->tvc_prot_sgl) {
1548*4882a593Smuzhiyun pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
1549*4882a593Smuzhiyun goto out;
1550*4882a593Smuzhiyun }
1551*4882a593Smuzhiyun }
1552*4882a593Smuzhiyun return 0;
1553*4882a593Smuzhiyun out:
1554*4882a593Smuzhiyun vhost_scsi_destroy_vq_cmds(vq);
1555*4882a593Smuzhiyun return -ENOMEM;
1556*4882a593Smuzhiyun }
1557*4882a593Smuzhiyun
1558*4882a593Smuzhiyun /*
1559*4882a593Smuzhiyun * Called from vhost_scsi_ioctl() context to walk the list of available
1560*4882a593Smuzhiyun * vhost_scsi_tpg with an active struct vhost_scsi_nexus
1561*4882a593Smuzhiyun *
1562*4882a593Smuzhiyun * The lock nesting rule is:
1563*4882a593Smuzhiyun * vhost_scsi_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
1564*4882a593Smuzhiyun */
1565*4882a593Smuzhiyun static int
vhost_scsi_set_endpoint(struct vhost_scsi * vs,struct vhost_scsi_target * t)1566*4882a593Smuzhiyun vhost_scsi_set_endpoint(struct vhost_scsi *vs,
1567*4882a593Smuzhiyun struct vhost_scsi_target *t)
1568*4882a593Smuzhiyun {
1569*4882a593Smuzhiyun struct se_portal_group *se_tpg;
1570*4882a593Smuzhiyun struct vhost_scsi_tport *tv_tport;
1571*4882a593Smuzhiyun struct vhost_scsi_tpg *tpg;
1572*4882a593Smuzhiyun struct vhost_scsi_tpg **vs_tpg;
1573*4882a593Smuzhiyun struct vhost_virtqueue *vq;
1574*4882a593Smuzhiyun int index, ret, i, len;
1575*4882a593Smuzhiyun bool match = false;
1576*4882a593Smuzhiyun
1577*4882a593Smuzhiyun mutex_lock(&vhost_scsi_mutex);
1578*4882a593Smuzhiyun mutex_lock(&vs->dev.mutex);
1579*4882a593Smuzhiyun
1580*4882a593Smuzhiyun /* Verify that ring has been setup correctly. */
1581*4882a593Smuzhiyun for (index = 0; index < vs->dev.nvqs; ++index) {
1582*4882a593Smuzhiyun /* Verify that ring has been setup correctly. */
1583*4882a593Smuzhiyun if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1584*4882a593Smuzhiyun ret = -EFAULT;
1585*4882a593Smuzhiyun goto out;
1586*4882a593Smuzhiyun }
1587*4882a593Smuzhiyun }
1588*4882a593Smuzhiyun
1589*4882a593Smuzhiyun len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
1590*4882a593Smuzhiyun vs_tpg = kzalloc(len, GFP_KERNEL);
1591*4882a593Smuzhiyun if (!vs_tpg) {
1592*4882a593Smuzhiyun ret = -ENOMEM;
1593*4882a593Smuzhiyun goto out;
1594*4882a593Smuzhiyun }
1595*4882a593Smuzhiyun if (vs->vs_tpg)
1596*4882a593Smuzhiyun memcpy(vs_tpg, vs->vs_tpg, len);
1597*4882a593Smuzhiyun
1598*4882a593Smuzhiyun list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) {
1599*4882a593Smuzhiyun mutex_lock(&tpg->tv_tpg_mutex);
1600*4882a593Smuzhiyun if (!tpg->tpg_nexus) {
1601*4882a593Smuzhiyun mutex_unlock(&tpg->tv_tpg_mutex);
1602*4882a593Smuzhiyun continue;
1603*4882a593Smuzhiyun }
1604*4882a593Smuzhiyun if (tpg->tv_tpg_vhost_count != 0) {
1605*4882a593Smuzhiyun mutex_unlock(&tpg->tv_tpg_mutex);
1606*4882a593Smuzhiyun continue;
1607*4882a593Smuzhiyun }
1608*4882a593Smuzhiyun tv_tport = tpg->tport;
1609*4882a593Smuzhiyun
1610*4882a593Smuzhiyun if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1611*4882a593Smuzhiyun if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
1612*4882a593Smuzhiyun mutex_unlock(&tpg->tv_tpg_mutex);
1613*4882a593Smuzhiyun ret = -EEXIST;
1614*4882a593Smuzhiyun goto undepend;
1615*4882a593Smuzhiyun }
1616*4882a593Smuzhiyun /*
1617*4882a593Smuzhiyun * In order to ensure individual vhost-scsi configfs
1618*4882a593Smuzhiyun * groups cannot be removed while in use by vhost ioctl,
1619*4882a593Smuzhiyun * go ahead and take an explicit se_tpg->tpg_group.cg_item
1620*4882a593Smuzhiyun * dependency now.
1621*4882a593Smuzhiyun */
1622*4882a593Smuzhiyun se_tpg = &tpg->se_tpg;
1623*4882a593Smuzhiyun ret = target_depend_item(&se_tpg->tpg_group.cg_item);
1624*4882a593Smuzhiyun if (ret) {
1625*4882a593Smuzhiyun pr_warn("target_depend_item() failed: %d\n", ret);
1626*4882a593Smuzhiyun mutex_unlock(&tpg->tv_tpg_mutex);
1627*4882a593Smuzhiyun goto undepend;
1628*4882a593Smuzhiyun }
1629*4882a593Smuzhiyun tpg->tv_tpg_vhost_count++;
1630*4882a593Smuzhiyun tpg->vhost_scsi = vs;
1631*4882a593Smuzhiyun vs_tpg[tpg->tport_tpgt] = tpg;
1632*4882a593Smuzhiyun match = true;
1633*4882a593Smuzhiyun }
1634*4882a593Smuzhiyun mutex_unlock(&tpg->tv_tpg_mutex);
1635*4882a593Smuzhiyun }
1636*4882a593Smuzhiyun
1637*4882a593Smuzhiyun if (match) {
1638*4882a593Smuzhiyun memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
1639*4882a593Smuzhiyun sizeof(vs->vs_vhost_wwpn));
1640*4882a593Smuzhiyun
1641*4882a593Smuzhiyun for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
1642*4882a593Smuzhiyun vq = &vs->vqs[i].vq;
1643*4882a593Smuzhiyun if (!vhost_vq_is_setup(vq))
1644*4882a593Smuzhiyun continue;
1645*4882a593Smuzhiyun
1646*4882a593Smuzhiyun ret = vhost_scsi_setup_vq_cmds(vq, vq->num);
1647*4882a593Smuzhiyun if (ret)
1648*4882a593Smuzhiyun goto destroy_vq_cmds;
1649*4882a593Smuzhiyun }
1650*4882a593Smuzhiyun
1651*4882a593Smuzhiyun for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1652*4882a593Smuzhiyun vq = &vs->vqs[i].vq;
1653*4882a593Smuzhiyun mutex_lock(&vq->mutex);
1654*4882a593Smuzhiyun vhost_vq_set_backend(vq, vs_tpg);
1655*4882a593Smuzhiyun vhost_vq_init_access(vq);
1656*4882a593Smuzhiyun mutex_unlock(&vq->mutex);
1657*4882a593Smuzhiyun }
1658*4882a593Smuzhiyun ret = 0;
1659*4882a593Smuzhiyun } else {
1660*4882a593Smuzhiyun ret = -EEXIST;
1661*4882a593Smuzhiyun }
1662*4882a593Smuzhiyun
1663*4882a593Smuzhiyun /*
1664*4882a593Smuzhiyun * Act as synchronize_rcu to make sure access to
1665*4882a593Smuzhiyun * old vs->vs_tpg is finished.
1666*4882a593Smuzhiyun */
1667*4882a593Smuzhiyun vhost_scsi_flush(vs);
1668*4882a593Smuzhiyun kfree(vs->vs_tpg);
1669*4882a593Smuzhiyun vs->vs_tpg = vs_tpg;
1670*4882a593Smuzhiyun goto out;
1671*4882a593Smuzhiyun
1672*4882a593Smuzhiyun destroy_vq_cmds:
1673*4882a593Smuzhiyun for (i--; i >= VHOST_SCSI_VQ_IO; i--) {
1674*4882a593Smuzhiyun if (!vhost_vq_get_backend(&vs->vqs[i].vq))
1675*4882a593Smuzhiyun vhost_scsi_destroy_vq_cmds(&vs->vqs[i].vq);
1676*4882a593Smuzhiyun }
1677*4882a593Smuzhiyun undepend:
1678*4882a593Smuzhiyun for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1679*4882a593Smuzhiyun tpg = vs_tpg[i];
1680*4882a593Smuzhiyun if (tpg) {
1681*4882a593Smuzhiyun tpg->tv_tpg_vhost_count--;
1682*4882a593Smuzhiyun target_undepend_item(&tpg->se_tpg.tpg_group.cg_item);
1683*4882a593Smuzhiyun }
1684*4882a593Smuzhiyun }
1685*4882a593Smuzhiyun kfree(vs_tpg);
1686*4882a593Smuzhiyun out:
1687*4882a593Smuzhiyun mutex_unlock(&vs->dev.mutex);
1688*4882a593Smuzhiyun mutex_unlock(&vhost_scsi_mutex);
1689*4882a593Smuzhiyun return ret;
1690*4882a593Smuzhiyun }
1691*4882a593Smuzhiyun
1692*4882a593Smuzhiyun static int
vhost_scsi_clear_endpoint(struct vhost_scsi * vs,struct vhost_scsi_target * t)1693*4882a593Smuzhiyun vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
1694*4882a593Smuzhiyun struct vhost_scsi_target *t)
1695*4882a593Smuzhiyun {
1696*4882a593Smuzhiyun struct se_portal_group *se_tpg;
1697*4882a593Smuzhiyun struct vhost_scsi_tport *tv_tport;
1698*4882a593Smuzhiyun struct vhost_scsi_tpg *tpg;
1699*4882a593Smuzhiyun struct vhost_virtqueue *vq;
1700*4882a593Smuzhiyun bool match = false;
1701*4882a593Smuzhiyun int index, ret, i;
1702*4882a593Smuzhiyun u8 target;
1703*4882a593Smuzhiyun
1704*4882a593Smuzhiyun mutex_lock(&vhost_scsi_mutex);
1705*4882a593Smuzhiyun mutex_lock(&vs->dev.mutex);
1706*4882a593Smuzhiyun /* Verify that ring has been setup correctly. */
1707*4882a593Smuzhiyun for (index = 0; index < vs->dev.nvqs; ++index) {
1708*4882a593Smuzhiyun if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1709*4882a593Smuzhiyun ret = -EFAULT;
1710*4882a593Smuzhiyun goto err_dev;
1711*4882a593Smuzhiyun }
1712*4882a593Smuzhiyun }
1713*4882a593Smuzhiyun
1714*4882a593Smuzhiyun if (!vs->vs_tpg) {
1715*4882a593Smuzhiyun ret = 0;
1716*4882a593Smuzhiyun goto err_dev;
1717*4882a593Smuzhiyun }
1718*4882a593Smuzhiyun
1719*4882a593Smuzhiyun for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1720*4882a593Smuzhiyun target = i;
1721*4882a593Smuzhiyun tpg = vs->vs_tpg[target];
1722*4882a593Smuzhiyun if (!tpg)
1723*4882a593Smuzhiyun continue;
1724*4882a593Smuzhiyun
1725*4882a593Smuzhiyun mutex_lock(&tpg->tv_tpg_mutex);
1726*4882a593Smuzhiyun tv_tport = tpg->tport;
1727*4882a593Smuzhiyun if (!tv_tport) {
1728*4882a593Smuzhiyun ret = -ENODEV;
1729*4882a593Smuzhiyun goto err_tpg;
1730*4882a593Smuzhiyun }
1731*4882a593Smuzhiyun
1732*4882a593Smuzhiyun if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1733*4882a593Smuzhiyun pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu"
1734*4882a593Smuzhiyun " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
1735*4882a593Smuzhiyun tv_tport->tport_name, tpg->tport_tpgt,
1736*4882a593Smuzhiyun t->vhost_wwpn, t->vhost_tpgt);
1737*4882a593Smuzhiyun ret = -EINVAL;
1738*4882a593Smuzhiyun goto err_tpg;
1739*4882a593Smuzhiyun }
1740*4882a593Smuzhiyun tpg->tv_tpg_vhost_count--;
1741*4882a593Smuzhiyun tpg->vhost_scsi = NULL;
1742*4882a593Smuzhiyun vs->vs_tpg[target] = NULL;
1743*4882a593Smuzhiyun match = true;
1744*4882a593Smuzhiyun mutex_unlock(&tpg->tv_tpg_mutex);
1745*4882a593Smuzhiyun /*
1746*4882a593Smuzhiyun * Release se_tpg->tpg_group.cg_item configfs dependency now
1747*4882a593Smuzhiyun * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur.
1748*4882a593Smuzhiyun */
1749*4882a593Smuzhiyun se_tpg = &tpg->se_tpg;
1750*4882a593Smuzhiyun target_undepend_item(&se_tpg->tpg_group.cg_item);
1751*4882a593Smuzhiyun }
1752*4882a593Smuzhiyun if (match) {
1753*4882a593Smuzhiyun for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1754*4882a593Smuzhiyun vq = &vs->vqs[i].vq;
1755*4882a593Smuzhiyun mutex_lock(&vq->mutex);
1756*4882a593Smuzhiyun vhost_vq_set_backend(vq, NULL);
1757*4882a593Smuzhiyun mutex_unlock(&vq->mutex);
1758*4882a593Smuzhiyun /*
1759*4882a593Smuzhiyun * Make sure cmds are not running before tearing them
1760*4882a593Smuzhiyun * down.
1761*4882a593Smuzhiyun */
1762*4882a593Smuzhiyun vhost_scsi_flush(vs);
1763*4882a593Smuzhiyun vhost_scsi_destroy_vq_cmds(vq);
1764*4882a593Smuzhiyun }
1765*4882a593Smuzhiyun }
1766*4882a593Smuzhiyun /*
1767*4882a593Smuzhiyun * Act as synchronize_rcu to make sure access to
1768*4882a593Smuzhiyun * old vs->vs_tpg is finished.
1769*4882a593Smuzhiyun */
1770*4882a593Smuzhiyun vhost_scsi_flush(vs);
1771*4882a593Smuzhiyun kfree(vs->vs_tpg);
1772*4882a593Smuzhiyun vs->vs_tpg = NULL;
1773*4882a593Smuzhiyun WARN_ON(vs->vs_events_nr);
1774*4882a593Smuzhiyun mutex_unlock(&vs->dev.mutex);
1775*4882a593Smuzhiyun mutex_unlock(&vhost_scsi_mutex);
1776*4882a593Smuzhiyun return 0;
1777*4882a593Smuzhiyun
1778*4882a593Smuzhiyun err_tpg:
1779*4882a593Smuzhiyun mutex_unlock(&tpg->tv_tpg_mutex);
1780*4882a593Smuzhiyun err_dev:
1781*4882a593Smuzhiyun mutex_unlock(&vs->dev.mutex);
1782*4882a593Smuzhiyun mutex_unlock(&vhost_scsi_mutex);
1783*4882a593Smuzhiyun return ret;
1784*4882a593Smuzhiyun }
1785*4882a593Smuzhiyun
vhost_scsi_set_features(struct vhost_scsi * vs,u64 features)1786*4882a593Smuzhiyun static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
1787*4882a593Smuzhiyun {
1788*4882a593Smuzhiyun struct vhost_virtqueue *vq;
1789*4882a593Smuzhiyun int i;
1790*4882a593Smuzhiyun
1791*4882a593Smuzhiyun if (features & ~VHOST_SCSI_FEATURES)
1792*4882a593Smuzhiyun return -EOPNOTSUPP;
1793*4882a593Smuzhiyun
1794*4882a593Smuzhiyun mutex_lock(&vs->dev.mutex);
1795*4882a593Smuzhiyun if ((features & (1 << VHOST_F_LOG_ALL)) &&
1796*4882a593Smuzhiyun !vhost_log_access_ok(&vs->dev)) {
1797*4882a593Smuzhiyun mutex_unlock(&vs->dev.mutex);
1798*4882a593Smuzhiyun return -EFAULT;
1799*4882a593Smuzhiyun }
1800*4882a593Smuzhiyun
1801*4882a593Smuzhiyun for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1802*4882a593Smuzhiyun vq = &vs->vqs[i].vq;
1803*4882a593Smuzhiyun mutex_lock(&vq->mutex);
1804*4882a593Smuzhiyun vq->acked_features = features;
1805*4882a593Smuzhiyun mutex_unlock(&vq->mutex);
1806*4882a593Smuzhiyun }
1807*4882a593Smuzhiyun mutex_unlock(&vs->dev.mutex);
1808*4882a593Smuzhiyun return 0;
1809*4882a593Smuzhiyun }
1810*4882a593Smuzhiyun
vhost_scsi_open(struct inode * inode,struct file * f)1811*4882a593Smuzhiyun static int vhost_scsi_open(struct inode *inode, struct file *f)
1812*4882a593Smuzhiyun {
1813*4882a593Smuzhiyun struct vhost_scsi *vs;
1814*4882a593Smuzhiyun struct vhost_virtqueue **vqs;
1815*4882a593Smuzhiyun int r = -ENOMEM, i;
1816*4882a593Smuzhiyun
1817*4882a593Smuzhiyun vs = kzalloc(sizeof(*vs), GFP_KERNEL | __GFP_NOWARN | __GFP_RETRY_MAYFAIL);
1818*4882a593Smuzhiyun if (!vs) {
1819*4882a593Smuzhiyun vs = vzalloc(sizeof(*vs));
1820*4882a593Smuzhiyun if (!vs)
1821*4882a593Smuzhiyun goto err_vs;
1822*4882a593Smuzhiyun }
1823*4882a593Smuzhiyun
1824*4882a593Smuzhiyun vqs = kmalloc_array(VHOST_SCSI_MAX_VQ, sizeof(*vqs), GFP_KERNEL);
1825*4882a593Smuzhiyun if (!vqs)
1826*4882a593Smuzhiyun goto err_vqs;
1827*4882a593Smuzhiyun
1828*4882a593Smuzhiyun vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
1829*4882a593Smuzhiyun vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work);
1830*4882a593Smuzhiyun
1831*4882a593Smuzhiyun vs->vs_events_nr = 0;
1832*4882a593Smuzhiyun vs->vs_events_missed = false;
1833*4882a593Smuzhiyun
1834*4882a593Smuzhiyun vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq;
1835*4882a593Smuzhiyun vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1836*4882a593Smuzhiyun vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
1837*4882a593Smuzhiyun vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
1838*4882a593Smuzhiyun for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
1839*4882a593Smuzhiyun vqs[i] = &vs->vqs[i].vq;
1840*4882a593Smuzhiyun vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
1841*4882a593Smuzhiyun }
1842*4882a593Smuzhiyun vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV,
1843*4882a593Smuzhiyun VHOST_SCSI_WEIGHT, 0, true, NULL);
1844*4882a593Smuzhiyun
1845*4882a593Smuzhiyun vhost_scsi_init_inflight(vs, NULL);
1846*4882a593Smuzhiyun
1847*4882a593Smuzhiyun f->private_data = vs;
1848*4882a593Smuzhiyun return 0;
1849*4882a593Smuzhiyun
1850*4882a593Smuzhiyun err_vqs:
1851*4882a593Smuzhiyun kvfree(vs);
1852*4882a593Smuzhiyun err_vs:
1853*4882a593Smuzhiyun return r;
1854*4882a593Smuzhiyun }
1855*4882a593Smuzhiyun
vhost_scsi_release(struct inode * inode,struct file * f)1856*4882a593Smuzhiyun static int vhost_scsi_release(struct inode *inode, struct file *f)
1857*4882a593Smuzhiyun {
1858*4882a593Smuzhiyun struct vhost_scsi *vs = f->private_data;
1859*4882a593Smuzhiyun struct vhost_scsi_target t;
1860*4882a593Smuzhiyun
1861*4882a593Smuzhiyun mutex_lock(&vs->dev.mutex);
1862*4882a593Smuzhiyun memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
1863*4882a593Smuzhiyun mutex_unlock(&vs->dev.mutex);
1864*4882a593Smuzhiyun vhost_scsi_clear_endpoint(vs, &t);
1865*4882a593Smuzhiyun vhost_dev_stop(&vs->dev);
1866*4882a593Smuzhiyun vhost_dev_cleanup(&vs->dev);
1867*4882a593Smuzhiyun /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
1868*4882a593Smuzhiyun vhost_scsi_flush(vs);
1869*4882a593Smuzhiyun kfree(vs->dev.vqs);
1870*4882a593Smuzhiyun kvfree(vs);
1871*4882a593Smuzhiyun return 0;
1872*4882a593Smuzhiyun }
1873*4882a593Smuzhiyun
1874*4882a593Smuzhiyun static long
vhost_scsi_ioctl(struct file * f,unsigned int ioctl,unsigned long arg)1875*4882a593Smuzhiyun vhost_scsi_ioctl(struct file *f,
1876*4882a593Smuzhiyun unsigned int ioctl,
1877*4882a593Smuzhiyun unsigned long arg)
1878*4882a593Smuzhiyun {
1879*4882a593Smuzhiyun struct vhost_scsi *vs = f->private_data;
1880*4882a593Smuzhiyun struct vhost_scsi_target backend;
1881*4882a593Smuzhiyun void __user *argp = (void __user *)arg;
1882*4882a593Smuzhiyun u64 __user *featurep = argp;
1883*4882a593Smuzhiyun u32 __user *eventsp = argp;
1884*4882a593Smuzhiyun u32 events_missed;
1885*4882a593Smuzhiyun u64 features;
1886*4882a593Smuzhiyun int r, abi_version = VHOST_SCSI_ABI_VERSION;
1887*4882a593Smuzhiyun struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1888*4882a593Smuzhiyun
1889*4882a593Smuzhiyun switch (ioctl) {
1890*4882a593Smuzhiyun case VHOST_SCSI_SET_ENDPOINT:
1891*4882a593Smuzhiyun if (copy_from_user(&backend, argp, sizeof backend))
1892*4882a593Smuzhiyun return -EFAULT;
1893*4882a593Smuzhiyun if (backend.reserved != 0)
1894*4882a593Smuzhiyun return -EOPNOTSUPP;
1895*4882a593Smuzhiyun
1896*4882a593Smuzhiyun return vhost_scsi_set_endpoint(vs, &backend);
1897*4882a593Smuzhiyun case VHOST_SCSI_CLEAR_ENDPOINT:
1898*4882a593Smuzhiyun if (copy_from_user(&backend, argp, sizeof backend))
1899*4882a593Smuzhiyun return -EFAULT;
1900*4882a593Smuzhiyun if (backend.reserved != 0)
1901*4882a593Smuzhiyun return -EOPNOTSUPP;
1902*4882a593Smuzhiyun
1903*4882a593Smuzhiyun return vhost_scsi_clear_endpoint(vs, &backend);
1904*4882a593Smuzhiyun case VHOST_SCSI_GET_ABI_VERSION:
1905*4882a593Smuzhiyun if (copy_to_user(argp, &abi_version, sizeof abi_version))
1906*4882a593Smuzhiyun return -EFAULT;
1907*4882a593Smuzhiyun return 0;
1908*4882a593Smuzhiyun case VHOST_SCSI_SET_EVENTS_MISSED:
1909*4882a593Smuzhiyun if (get_user(events_missed, eventsp))
1910*4882a593Smuzhiyun return -EFAULT;
1911*4882a593Smuzhiyun mutex_lock(&vq->mutex);
1912*4882a593Smuzhiyun vs->vs_events_missed = events_missed;
1913*4882a593Smuzhiyun mutex_unlock(&vq->mutex);
1914*4882a593Smuzhiyun return 0;
1915*4882a593Smuzhiyun case VHOST_SCSI_GET_EVENTS_MISSED:
1916*4882a593Smuzhiyun mutex_lock(&vq->mutex);
1917*4882a593Smuzhiyun events_missed = vs->vs_events_missed;
1918*4882a593Smuzhiyun mutex_unlock(&vq->mutex);
1919*4882a593Smuzhiyun if (put_user(events_missed, eventsp))
1920*4882a593Smuzhiyun return -EFAULT;
1921*4882a593Smuzhiyun return 0;
1922*4882a593Smuzhiyun case VHOST_GET_FEATURES:
1923*4882a593Smuzhiyun features = VHOST_SCSI_FEATURES;
1924*4882a593Smuzhiyun if (copy_to_user(featurep, &features, sizeof features))
1925*4882a593Smuzhiyun return -EFAULT;
1926*4882a593Smuzhiyun return 0;
1927*4882a593Smuzhiyun case VHOST_SET_FEATURES:
1928*4882a593Smuzhiyun if (copy_from_user(&features, featurep, sizeof features))
1929*4882a593Smuzhiyun return -EFAULT;
1930*4882a593Smuzhiyun return vhost_scsi_set_features(vs, features);
1931*4882a593Smuzhiyun default:
1932*4882a593Smuzhiyun mutex_lock(&vs->dev.mutex);
1933*4882a593Smuzhiyun r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
1934*4882a593Smuzhiyun /* TODO: flush backend after dev ioctl. */
1935*4882a593Smuzhiyun if (r == -ENOIOCTLCMD)
1936*4882a593Smuzhiyun r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
1937*4882a593Smuzhiyun mutex_unlock(&vs->dev.mutex);
1938*4882a593Smuzhiyun return r;
1939*4882a593Smuzhiyun }
1940*4882a593Smuzhiyun }
1941*4882a593Smuzhiyun
1942*4882a593Smuzhiyun static const struct file_operations vhost_scsi_fops = {
1943*4882a593Smuzhiyun .owner = THIS_MODULE,
1944*4882a593Smuzhiyun .release = vhost_scsi_release,
1945*4882a593Smuzhiyun .unlocked_ioctl = vhost_scsi_ioctl,
1946*4882a593Smuzhiyun .compat_ioctl = compat_ptr_ioctl,
1947*4882a593Smuzhiyun .open = vhost_scsi_open,
1948*4882a593Smuzhiyun .llseek = noop_llseek,
1949*4882a593Smuzhiyun };
1950*4882a593Smuzhiyun
1951*4882a593Smuzhiyun static struct miscdevice vhost_scsi_misc = {
1952*4882a593Smuzhiyun MISC_DYNAMIC_MINOR,
1953*4882a593Smuzhiyun "vhost-scsi",
1954*4882a593Smuzhiyun &vhost_scsi_fops,
1955*4882a593Smuzhiyun };
1956*4882a593Smuzhiyun
vhost_scsi_register(void)1957*4882a593Smuzhiyun static int __init vhost_scsi_register(void)
1958*4882a593Smuzhiyun {
1959*4882a593Smuzhiyun return misc_register(&vhost_scsi_misc);
1960*4882a593Smuzhiyun }
1961*4882a593Smuzhiyun
vhost_scsi_deregister(void)1962*4882a593Smuzhiyun static void vhost_scsi_deregister(void)
1963*4882a593Smuzhiyun {
1964*4882a593Smuzhiyun misc_deregister(&vhost_scsi_misc);
1965*4882a593Smuzhiyun }
1966*4882a593Smuzhiyun
vhost_scsi_dump_proto_id(struct vhost_scsi_tport * tport)1967*4882a593Smuzhiyun static char *vhost_scsi_dump_proto_id(struct vhost_scsi_tport *tport)
1968*4882a593Smuzhiyun {
1969*4882a593Smuzhiyun switch (tport->tport_proto_id) {
1970*4882a593Smuzhiyun case SCSI_PROTOCOL_SAS:
1971*4882a593Smuzhiyun return "SAS";
1972*4882a593Smuzhiyun case SCSI_PROTOCOL_FCP:
1973*4882a593Smuzhiyun return "FCP";
1974*4882a593Smuzhiyun case SCSI_PROTOCOL_ISCSI:
1975*4882a593Smuzhiyun return "iSCSI";
1976*4882a593Smuzhiyun default:
1977*4882a593Smuzhiyun break;
1978*4882a593Smuzhiyun }
1979*4882a593Smuzhiyun
1980*4882a593Smuzhiyun return "Unknown";
1981*4882a593Smuzhiyun }
1982*4882a593Smuzhiyun
1983*4882a593Smuzhiyun static void
vhost_scsi_do_plug(struct vhost_scsi_tpg * tpg,struct se_lun * lun,bool plug)1984*4882a593Smuzhiyun vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg,
1985*4882a593Smuzhiyun struct se_lun *lun, bool plug)
1986*4882a593Smuzhiyun {
1987*4882a593Smuzhiyun
1988*4882a593Smuzhiyun struct vhost_scsi *vs = tpg->vhost_scsi;
1989*4882a593Smuzhiyun struct vhost_virtqueue *vq;
1990*4882a593Smuzhiyun u32 reason;
1991*4882a593Smuzhiyun
1992*4882a593Smuzhiyun if (!vs)
1993*4882a593Smuzhiyun return;
1994*4882a593Smuzhiyun
1995*4882a593Smuzhiyun mutex_lock(&vs->dev.mutex);
1996*4882a593Smuzhiyun
1997*4882a593Smuzhiyun if (plug)
1998*4882a593Smuzhiyun reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
1999*4882a593Smuzhiyun else
2000*4882a593Smuzhiyun reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
2001*4882a593Smuzhiyun
2002*4882a593Smuzhiyun vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
2003*4882a593Smuzhiyun mutex_lock(&vq->mutex);
2004*4882a593Smuzhiyun if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
2005*4882a593Smuzhiyun vhost_scsi_send_evt(vs, tpg, lun,
2006*4882a593Smuzhiyun VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
2007*4882a593Smuzhiyun mutex_unlock(&vq->mutex);
2008*4882a593Smuzhiyun mutex_unlock(&vs->dev.mutex);
2009*4882a593Smuzhiyun }
2010*4882a593Smuzhiyun
vhost_scsi_hotplug(struct vhost_scsi_tpg * tpg,struct se_lun * lun)2011*4882a593Smuzhiyun static void vhost_scsi_hotplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
2012*4882a593Smuzhiyun {
2013*4882a593Smuzhiyun vhost_scsi_do_plug(tpg, lun, true);
2014*4882a593Smuzhiyun }
2015*4882a593Smuzhiyun
vhost_scsi_hotunplug(struct vhost_scsi_tpg * tpg,struct se_lun * lun)2016*4882a593Smuzhiyun static void vhost_scsi_hotunplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
2017*4882a593Smuzhiyun {
2018*4882a593Smuzhiyun vhost_scsi_do_plug(tpg, lun, false);
2019*4882a593Smuzhiyun }
2020*4882a593Smuzhiyun
vhost_scsi_port_link(struct se_portal_group * se_tpg,struct se_lun * lun)2021*4882a593Smuzhiyun static int vhost_scsi_port_link(struct se_portal_group *se_tpg,
2022*4882a593Smuzhiyun struct se_lun *lun)
2023*4882a593Smuzhiyun {
2024*4882a593Smuzhiyun struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2025*4882a593Smuzhiyun struct vhost_scsi_tpg, se_tpg);
2026*4882a593Smuzhiyun struct vhost_scsi_tmf *tmf;
2027*4882a593Smuzhiyun
2028*4882a593Smuzhiyun tmf = kzalloc(sizeof(*tmf), GFP_KERNEL);
2029*4882a593Smuzhiyun if (!tmf)
2030*4882a593Smuzhiyun return -ENOMEM;
2031*4882a593Smuzhiyun INIT_LIST_HEAD(&tmf->queue_entry);
2032*4882a593Smuzhiyun vhost_work_init(&tmf->vwork, vhost_scsi_tmf_resp_work);
2033*4882a593Smuzhiyun
2034*4882a593Smuzhiyun mutex_lock(&vhost_scsi_mutex);
2035*4882a593Smuzhiyun
2036*4882a593Smuzhiyun mutex_lock(&tpg->tv_tpg_mutex);
2037*4882a593Smuzhiyun tpg->tv_tpg_port_count++;
2038*4882a593Smuzhiyun list_add_tail(&tmf->queue_entry, &tpg->tmf_queue);
2039*4882a593Smuzhiyun mutex_unlock(&tpg->tv_tpg_mutex);
2040*4882a593Smuzhiyun
2041*4882a593Smuzhiyun vhost_scsi_hotplug(tpg, lun);
2042*4882a593Smuzhiyun
2043*4882a593Smuzhiyun mutex_unlock(&vhost_scsi_mutex);
2044*4882a593Smuzhiyun
2045*4882a593Smuzhiyun return 0;
2046*4882a593Smuzhiyun }
2047*4882a593Smuzhiyun
vhost_scsi_port_unlink(struct se_portal_group * se_tpg,struct se_lun * lun)2048*4882a593Smuzhiyun static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
2049*4882a593Smuzhiyun struct se_lun *lun)
2050*4882a593Smuzhiyun {
2051*4882a593Smuzhiyun struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2052*4882a593Smuzhiyun struct vhost_scsi_tpg, se_tpg);
2053*4882a593Smuzhiyun struct vhost_scsi_tmf *tmf;
2054*4882a593Smuzhiyun
2055*4882a593Smuzhiyun mutex_lock(&vhost_scsi_mutex);
2056*4882a593Smuzhiyun
2057*4882a593Smuzhiyun mutex_lock(&tpg->tv_tpg_mutex);
2058*4882a593Smuzhiyun tpg->tv_tpg_port_count--;
2059*4882a593Smuzhiyun tmf = list_first_entry(&tpg->tmf_queue, struct vhost_scsi_tmf,
2060*4882a593Smuzhiyun queue_entry);
2061*4882a593Smuzhiyun list_del(&tmf->queue_entry);
2062*4882a593Smuzhiyun kfree(tmf);
2063*4882a593Smuzhiyun mutex_unlock(&tpg->tv_tpg_mutex);
2064*4882a593Smuzhiyun
2065*4882a593Smuzhiyun vhost_scsi_hotunplug(tpg, lun);
2066*4882a593Smuzhiyun
2067*4882a593Smuzhiyun mutex_unlock(&vhost_scsi_mutex);
2068*4882a593Smuzhiyun }
2069*4882a593Smuzhiyun
vhost_scsi_tpg_attrib_fabric_prot_type_store(struct config_item * item,const char * page,size_t count)2070*4882a593Smuzhiyun static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_store(
2071*4882a593Smuzhiyun struct config_item *item, const char *page, size_t count)
2072*4882a593Smuzhiyun {
2073*4882a593Smuzhiyun struct se_portal_group *se_tpg = attrib_to_tpg(item);
2074*4882a593Smuzhiyun struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2075*4882a593Smuzhiyun struct vhost_scsi_tpg, se_tpg);
2076*4882a593Smuzhiyun unsigned long val;
2077*4882a593Smuzhiyun int ret = kstrtoul(page, 0, &val);
2078*4882a593Smuzhiyun
2079*4882a593Smuzhiyun if (ret) {
2080*4882a593Smuzhiyun pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
2081*4882a593Smuzhiyun return ret;
2082*4882a593Smuzhiyun }
2083*4882a593Smuzhiyun if (val != 0 && val != 1 && val != 3) {
2084*4882a593Smuzhiyun pr_err("Invalid vhost_scsi fabric_prot_type: %lu\n", val);
2085*4882a593Smuzhiyun return -EINVAL;
2086*4882a593Smuzhiyun }
2087*4882a593Smuzhiyun tpg->tv_fabric_prot_type = val;
2088*4882a593Smuzhiyun
2089*4882a593Smuzhiyun return count;
2090*4882a593Smuzhiyun }
2091*4882a593Smuzhiyun
vhost_scsi_tpg_attrib_fabric_prot_type_show(struct config_item * item,char * page)2092*4882a593Smuzhiyun static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_show(
2093*4882a593Smuzhiyun struct config_item *item, char *page)
2094*4882a593Smuzhiyun {
2095*4882a593Smuzhiyun struct se_portal_group *se_tpg = attrib_to_tpg(item);
2096*4882a593Smuzhiyun struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2097*4882a593Smuzhiyun struct vhost_scsi_tpg, se_tpg);
2098*4882a593Smuzhiyun
2099*4882a593Smuzhiyun return sprintf(page, "%d\n", tpg->tv_fabric_prot_type);
2100*4882a593Smuzhiyun }
2101*4882a593Smuzhiyun
2102*4882a593Smuzhiyun CONFIGFS_ATTR(vhost_scsi_tpg_attrib_, fabric_prot_type);
2103*4882a593Smuzhiyun
2104*4882a593Smuzhiyun static struct configfs_attribute *vhost_scsi_tpg_attrib_attrs[] = {
2105*4882a593Smuzhiyun &vhost_scsi_tpg_attrib_attr_fabric_prot_type,
2106*4882a593Smuzhiyun NULL,
2107*4882a593Smuzhiyun };
2108*4882a593Smuzhiyun
vhost_scsi_make_nexus(struct vhost_scsi_tpg * tpg,const char * name)2109*4882a593Smuzhiyun static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
2110*4882a593Smuzhiyun const char *name)
2111*4882a593Smuzhiyun {
2112*4882a593Smuzhiyun struct vhost_scsi_nexus *tv_nexus;
2113*4882a593Smuzhiyun
2114*4882a593Smuzhiyun mutex_lock(&tpg->tv_tpg_mutex);
2115*4882a593Smuzhiyun if (tpg->tpg_nexus) {
2116*4882a593Smuzhiyun mutex_unlock(&tpg->tv_tpg_mutex);
2117*4882a593Smuzhiyun pr_debug("tpg->tpg_nexus already exists\n");
2118*4882a593Smuzhiyun return -EEXIST;
2119*4882a593Smuzhiyun }
2120*4882a593Smuzhiyun
2121*4882a593Smuzhiyun tv_nexus = kzalloc(sizeof(*tv_nexus), GFP_KERNEL);
2122*4882a593Smuzhiyun if (!tv_nexus) {
2123*4882a593Smuzhiyun mutex_unlock(&tpg->tv_tpg_mutex);
2124*4882a593Smuzhiyun pr_err("Unable to allocate struct vhost_scsi_nexus\n");
2125*4882a593Smuzhiyun return -ENOMEM;
2126*4882a593Smuzhiyun }
2127*4882a593Smuzhiyun /*
2128*4882a593Smuzhiyun * Since we are running in 'demo mode' this call with generate a
2129*4882a593Smuzhiyun * struct se_node_acl for the vhost_scsi struct se_portal_group with
2130*4882a593Smuzhiyun * the SCSI Initiator port name of the passed configfs group 'name'.
2131*4882a593Smuzhiyun */
2132*4882a593Smuzhiyun tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg, 0, 0,
2133*4882a593Smuzhiyun TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
2134*4882a593Smuzhiyun (unsigned char *)name, tv_nexus, NULL);
2135*4882a593Smuzhiyun if (IS_ERR(tv_nexus->tvn_se_sess)) {
2136*4882a593Smuzhiyun mutex_unlock(&tpg->tv_tpg_mutex);
2137*4882a593Smuzhiyun kfree(tv_nexus);
2138*4882a593Smuzhiyun return -ENOMEM;
2139*4882a593Smuzhiyun }
2140*4882a593Smuzhiyun tpg->tpg_nexus = tv_nexus;
2141*4882a593Smuzhiyun
2142*4882a593Smuzhiyun mutex_unlock(&tpg->tv_tpg_mutex);
2143*4882a593Smuzhiyun return 0;
2144*4882a593Smuzhiyun }
2145*4882a593Smuzhiyun
vhost_scsi_drop_nexus(struct vhost_scsi_tpg * tpg)2146*4882a593Smuzhiyun static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
2147*4882a593Smuzhiyun {
2148*4882a593Smuzhiyun struct se_session *se_sess;
2149*4882a593Smuzhiyun struct vhost_scsi_nexus *tv_nexus;
2150*4882a593Smuzhiyun
2151*4882a593Smuzhiyun mutex_lock(&tpg->tv_tpg_mutex);
2152*4882a593Smuzhiyun tv_nexus = tpg->tpg_nexus;
2153*4882a593Smuzhiyun if (!tv_nexus) {
2154*4882a593Smuzhiyun mutex_unlock(&tpg->tv_tpg_mutex);
2155*4882a593Smuzhiyun return -ENODEV;
2156*4882a593Smuzhiyun }
2157*4882a593Smuzhiyun
2158*4882a593Smuzhiyun se_sess = tv_nexus->tvn_se_sess;
2159*4882a593Smuzhiyun if (!se_sess) {
2160*4882a593Smuzhiyun mutex_unlock(&tpg->tv_tpg_mutex);
2161*4882a593Smuzhiyun return -ENODEV;
2162*4882a593Smuzhiyun }
2163*4882a593Smuzhiyun
2164*4882a593Smuzhiyun if (tpg->tv_tpg_port_count != 0) {
2165*4882a593Smuzhiyun mutex_unlock(&tpg->tv_tpg_mutex);
2166*4882a593Smuzhiyun pr_err("Unable to remove TCM_vhost I_T Nexus with"
2167*4882a593Smuzhiyun " active TPG port count: %d\n",
2168*4882a593Smuzhiyun tpg->tv_tpg_port_count);
2169*4882a593Smuzhiyun return -EBUSY;
2170*4882a593Smuzhiyun }
2171*4882a593Smuzhiyun
2172*4882a593Smuzhiyun if (tpg->tv_tpg_vhost_count != 0) {
2173*4882a593Smuzhiyun mutex_unlock(&tpg->tv_tpg_mutex);
2174*4882a593Smuzhiyun pr_err("Unable to remove TCM_vhost I_T Nexus with"
2175*4882a593Smuzhiyun " active TPG vhost count: %d\n",
2176*4882a593Smuzhiyun tpg->tv_tpg_vhost_count);
2177*4882a593Smuzhiyun return -EBUSY;
2178*4882a593Smuzhiyun }
2179*4882a593Smuzhiyun
2180*4882a593Smuzhiyun pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
2181*4882a593Smuzhiyun " %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
2182*4882a593Smuzhiyun tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
2183*4882a593Smuzhiyun
2184*4882a593Smuzhiyun /*
2185*4882a593Smuzhiyun * Release the SCSI I_T Nexus to the emulated vhost Target Port
2186*4882a593Smuzhiyun */
2187*4882a593Smuzhiyun target_remove_session(se_sess);
2188*4882a593Smuzhiyun tpg->tpg_nexus = NULL;
2189*4882a593Smuzhiyun mutex_unlock(&tpg->tv_tpg_mutex);
2190*4882a593Smuzhiyun
2191*4882a593Smuzhiyun kfree(tv_nexus);
2192*4882a593Smuzhiyun return 0;
2193*4882a593Smuzhiyun }
2194*4882a593Smuzhiyun
vhost_scsi_tpg_nexus_show(struct config_item * item,char * page)2195*4882a593Smuzhiyun static ssize_t vhost_scsi_tpg_nexus_show(struct config_item *item, char *page)
2196*4882a593Smuzhiyun {
2197*4882a593Smuzhiyun struct se_portal_group *se_tpg = to_tpg(item);
2198*4882a593Smuzhiyun struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2199*4882a593Smuzhiyun struct vhost_scsi_tpg, se_tpg);
2200*4882a593Smuzhiyun struct vhost_scsi_nexus *tv_nexus;
2201*4882a593Smuzhiyun ssize_t ret;
2202*4882a593Smuzhiyun
2203*4882a593Smuzhiyun mutex_lock(&tpg->tv_tpg_mutex);
2204*4882a593Smuzhiyun tv_nexus = tpg->tpg_nexus;
2205*4882a593Smuzhiyun if (!tv_nexus) {
2206*4882a593Smuzhiyun mutex_unlock(&tpg->tv_tpg_mutex);
2207*4882a593Smuzhiyun return -ENODEV;
2208*4882a593Smuzhiyun }
2209*4882a593Smuzhiyun ret = snprintf(page, PAGE_SIZE, "%s\n",
2210*4882a593Smuzhiyun tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
2211*4882a593Smuzhiyun mutex_unlock(&tpg->tv_tpg_mutex);
2212*4882a593Smuzhiyun
2213*4882a593Smuzhiyun return ret;
2214*4882a593Smuzhiyun }
2215*4882a593Smuzhiyun
vhost_scsi_tpg_nexus_store(struct config_item * item,const char * page,size_t count)2216*4882a593Smuzhiyun static ssize_t vhost_scsi_tpg_nexus_store(struct config_item *item,
2217*4882a593Smuzhiyun const char *page, size_t count)
2218*4882a593Smuzhiyun {
2219*4882a593Smuzhiyun struct se_portal_group *se_tpg = to_tpg(item);
2220*4882a593Smuzhiyun struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2221*4882a593Smuzhiyun struct vhost_scsi_tpg, se_tpg);
2222*4882a593Smuzhiyun struct vhost_scsi_tport *tport_wwn = tpg->tport;
2223*4882a593Smuzhiyun unsigned char i_port[VHOST_SCSI_NAMELEN], *ptr, *port_ptr;
2224*4882a593Smuzhiyun int ret;
2225*4882a593Smuzhiyun /*
2226*4882a593Smuzhiyun * Shutdown the active I_T nexus if 'NULL' is passed..
2227*4882a593Smuzhiyun */
2228*4882a593Smuzhiyun if (!strncmp(page, "NULL", 4)) {
2229*4882a593Smuzhiyun ret = vhost_scsi_drop_nexus(tpg);
2230*4882a593Smuzhiyun return (!ret) ? count : ret;
2231*4882a593Smuzhiyun }
2232*4882a593Smuzhiyun /*
2233*4882a593Smuzhiyun * Otherwise make sure the passed virtual Initiator port WWN matches
2234*4882a593Smuzhiyun * the fabric protocol_id set in vhost_scsi_make_tport(), and call
2235*4882a593Smuzhiyun * vhost_scsi_make_nexus().
2236*4882a593Smuzhiyun */
2237*4882a593Smuzhiyun if (strlen(page) >= VHOST_SCSI_NAMELEN) {
2238*4882a593Smuzhiyun pr_err("Emulated NAA Sas Address: %s, exceeds"
2239*4882a593Smuzhiyun " max: %d\n", page, VHOST_SCSI_NAMELEN);
2240*4882a593Smuzhiyun return -EINVAL;
2241*4882a593Smuzhiyun }
2242*4882a593Smuzhiyun snprintf(&i_port[0], VHOST_SCSI_NAMELEN, "%s", page);
2243*4882a593Smuzhiyun
2244*4882a593Smuzhiyun ptr = strstr(i_port, "naa.");
2245*4882a593Smuzhiyun if (ptr) {
2246*4882a593Smuzhiyun if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
2247*4882a593Smuzhiyun pr_err("Passed SAS Initiator Port %s does not"
2248*4882a593Smuzhiyun " match target port protoid: %s\n", i_port,
2249*4882a593Smuzhiyun vhost_scsi_dump_proto_id(tport_wwn));
2250*4882a593Smuzhiyun return -EINVAL;
2251*4882a593Smuzhiyun }
2252*4882a593Smuzhiyun port_ptr = &i_port[0];
2253*4882a593Smuzhiyun goto check_newline;
2254*4882a593Smuzhiyun }
2255*4882a593Smuzhiyun ptr = strstr(i_port, "fc.");
2256*4882a593Smuzhiyun if (ptr) {
2257*4882a593Smuzhiyun if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
2258*4882a593Smuzhiyun pr_err("Passed FCP Initiator Port %s does not"
2259*4882a593Smuzhiyun " match target port protoid: %s\n", i_port,
2260*4882a593Smuzhiyun vhost_scsi_dump_proto_id(tport_wwn));
2261*4882a593Smuzhiyun return -EINVAL;
2262*4882a593Smuzhiyun }
2263*4882a593Smuzhiyun port_ptr = &i_port[3]; /* Skip over "fc." */
2264*4882a593Smuzhiyun goto check_newline;
2265*4882a593Smuzhiyun }
2266*4882a593Smuzhiyun ptr = strstr(i_port, "iqn.");
2267*4882a593Smuzhiyun if (ptr) {
2268*4882a593Smuzhiyun if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
2269*4882a593Smuzhiyun pr_err("Passed iSCSI Initiator Port %s does not"
2270*4882a593Smuzhiyun " match target port protoid: %s\n", i_port,
2271*4882a593Smuzhiyun vhost_scsi_dump_proto_id(tport_wwn));
2272*4882a593Smuzhiyun return -EINVAL;
2273*4882a593Smuzhiyun }
2274*4882a593Smuzhiyun port_ptr = &i_port[0];
2275*4882a593Smuzhiyun goto check_newline;
2276*4882a593Smuzhiyun }
2277*4882a593Smuzhiyun pr_err("Unable to locate prefix for emulated Initiator Port:"
2278*4882a593Smuzhiyun " %s\n", i_port);
2279*4882a593Smuzhiyun return -EINVAL;
2280*4882a593Smuzhiyun /*
2281*4882a593Smuzhiyun * Clear any trailing newline for the NAA WWN
2282*4882a593Smuzhiyun */
2283*4882a593Smuzhiyun check_newline:
2284*4882a593Smuzhiyun if (i_port[strlen(i_port)-1] == '\n')
2285*4882a593Smuzhiyun i_port[strlen(i_port)-1] = '\0';
2286*4882a593Smuzhiyun
2287*4882a593Smuzhiyun ret = vhost_scsi_make_nexus(tpg, port_ptr);
2288*4882a593Smuzhiyun if (ret < 0)
2289*4882a593Smuzhiyun return ret;
2290*4882a593Smuzhiyun
2291*4882a593Smuzhiyun return count;
2292*4882a593Smuzhiyun }
2293*4882a593Smuzhiyun
2294*4882a593Smuzhiyun CONFIGFS_ATTR(vhost_scsi_tpg_, nexus);
2295*4882a593Smuzhiyun
2296*4882a593Smuzhiyun static struct configfs_attribute *vhost_scsi_tpg_attrs[] = {
2297*4882a593Smuzhiyun &vhost_scsi_tpg_attr_nexus,
2298*4882a593Smuzhiyun NULL,
2299*4882a593Smuzhiyun };
2300*4882a593Smuzhiyun
2301*4882a593Smuzhiyun static struct se_portal_group *
vhost_scsi_make_tpg(struct se_wwn * wwn,const char * name)2302*4882a593Smuzhiyun vhost_scsi_make_tpg(struct se_wwn *wwn, const char *name)
2303*4882a593Smuzhiyun {
2304*4882a593Smuzhiyun struct vhost_scsi_tport *tport = container_of(wwn,
2305*4882a593Smuzhiyun struct vhost_scsi_tport, tport_wwn);
2306*4882a593Smuzhiyun
2307*4882a593Smuzhiyun struct vhost_scsi_tpg *tpg;
2308*4882a593Smuzhiyun u16 tpgt;
2309*4882a593Smuzhiyun int ret;
2310*4882a593Smuzhiyun
2311*4882a593Smuzhiyun if (strstr(name, "tpgt_") != name)
2312*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
2313*4882a593Smuzhiyun if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET)
2314*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
2315*4882a593Smuzhiyun
2316*4882a593Smuzhiyun tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
2317*4882a593Smuzhiyun if (!tpg) {
2318*4882a593Smuzhiyun pr_err("Unable to allocate struct vhost_scsi_tpg");
2319*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
2320*4882a593Smuzhiyun }
2321*4882a593Smuzhiyun mutex_init(&tpg->tv_tpg_mutex);
2322*4882a593Smuzhiyun INIT_LIST_HEAD(&tpg->tv_tpg_list);
2323*4882a593Smuzhiyun INIT_LIST_HEAD(&tpg->tmf_queue);
2324*4882a593Smuzhiyun tpg->tport = tport;
2325*4882a593Smuzhiyun tpg->tport_tpgt = tpgt;
2326*4882a593Smuzhiyun
2327*4882a593Smuzhiyun ret = core_tpg_register(wwn, &tpg->se_tpg, tport->tport_proto_id);
2328*4882a593Smuzhiyun if (ret < 0) {
2329*4882a593Smuzhiyun kfree(tpg);
2330*4882a593Smuzhiyun return NULL;
2331*4882a593Smuzhiyun }
2332*4882a593Smuzhiyun mutex_lock(&vhost_scsi_mutex);
2333*4882a593Smuzhiyun list_add_tail(&tpg->tv_tpg_list, &vhost_scsi_list);
2334*4882a593Smuzhiyun mutex_unlock(&vhost_scsi_mutex);
2335*4882a593Smuzhiyun
2336*4882a593Smuzhiyun return &tpg->se_tpg;
2337*4882a593Smuzhiyun }
2338*4882a593Smuzhiyun
vhost_scsi_drop_tpg(struct se_portal_group * se_tpg)2339*4882a593Smuzhiyun static void vhost_scsi_drop_tpg(struct se_portal_group *se_tpg)
2340*4882a593Smuzhiyun {
2341*4882a593Smuzhiyun struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2342*4882a593Smuzhiyun struct vhost_scsi_tpg, se_tpg);
2343*4882a593Smuzhiyun
2344*4882a593Smuzhiyun mutex_lock(&vhost_scsi_mutex);
2345*4882a593Smuzhiyun list_del(&tpg->tv_tpg_list);
2346*4882a593Smuzhiyun mutex_unlock(&vhost_scsi_mutex);
2347*4882a593Smuzhiyun /*
2348*4882a593Smuzhiyun * Release the virtual I_T Nexus for this vhost TPG
2349*4882a593Smuzhiyun */
2350*4882a593Smuzhiyun vhost_scsi_drop_nexus(tpg);
2351*4882a593Smuzhiyun /*
2352*4882a593Smuzhiyun * Deregister the se_tpg from TCM..
2353*4882a593Smuzhiyun */
2354*4882a593Smuzhiyun core_tpg_deregister(se_tpg);
2355*4882a593Smuzhiyun kfree(tpg);
2356*4882a593Smuzhiyun }
2357*4882a593Smuzhiyun
2358*4882a593Smuzhiyun static struct se_wwn *
vhost_scsi_make_tport(struct target_fabric_configfs * tf,struct config_group * group,const char * name)2359*4882a593Smuzhiyun vhost_scsi_make_tport(struct target_fabric_configfs *tf,
2360*4882a593Smuzhiyun struct config_group *group,
2361*4882a593Smuzhiyun const char *name)
2362*4882a593Smuzhiyun {
2363*4882a593Smuzhiyun struct vhost_scsi_tport *tport;
2364*4882a593Smuzhiyun char *ptr;
2365*4882a593Smuzhiyun u64 wwpn = 0;
2366*4882a593Smuzhiyun int off = 0;
2367*4882a593Smuzhiyun
2368*4882a593Smuzhiyun /* if (vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
2369*4882a593Smuzhiyun return ERR_PTR(-EINVAL); */
2370*4882a593Smuzhiyun
2371*4882a593Smuzhiyun tport = kzalloc(sizeof(*tport), GFP_KERNEL);
2372*4882a593Smuzhiyun if (!tport) {
2373*4882a593Smuzhiyun pr_err("Unable to allocate struct vhost_scsi_tport");
2374*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
2375*4882a593Smuzhiyun }
2376*4882a593Smuzhiyun tport->tport_wwpn = wwpn;
2377*4882a593Smuzhiyun /*
2378*4882a593Smuzhiyun * Determine the emulated Protocol Identifier and Target Port Name
2379*4882a593Smuzhiyun * based on the incoming configfs directory name.
2380*4882a593Smuzhiyun */
2381*4882a593Smuzhiyun ptr = strstr(name, "naa.");
2382*4882a593Smuzhiyun if (ptr) {
2383*4882a593Smuzhiyun tport->tport_proto_id = SCSI_PROTOCOL_SAS;
2384*4882a593Smuzhiyun goto check_len;
2385*4882a593Smuzhiyun }
2386*4882a593Smuzhiyun ptr = strstr(name, "fc.");
2387*4882a593Smuzhiyun if (ptr) {
2388*4882a593Smuzhiyun tport->tport_proto_id = SCSI_PROTOCOL_FCP;
2389*4882a593Smuzhiyun off = 3; /* Skip over "fc." */
2390*4882a593Smuzhiyun goto check_len;
2391*4882a593Smuzhiyun }
2392*4882a593Smuzhiyun ptr = strstr(name, "iqn.");
2393*4882a593Smuzhiyun if (ptr) {
2394*4882a593Smuzhiyun tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
2395*4882a593Smuzhiyun goto check_len;
2396*4882a593Smuzhiyun }
2397*4882a593Smuzhiyun
2398*4882a593Smuzhiyun pr_err("Unable to locate prefix for emulated Target Port:"
2399*4882a593Smuzhiyun " %s\n", name);
2400*4882a593Smuzhiyun kfree(tport);
2401*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
2402*4882a593Smuzhiyun
2403*4882a593Smuzhiyun check_len:
2404*4882a593Smuzhiyun if (strlen(name) >= VHOST_SCSI_NAMELEN) {
2405*4882a593Smuzhiyun pr_err("Emulated %s Address: %s, exceeds"
2406*4882a593Smuzhiyun " max: %d\n", name, vhost_scsi_dump_proto_id(tport),
2407*4882a593Smuzhiyun VHOST_SCSI_NAMELEN);
2408*4882a593Smuzhiyun kfree(tport);
2409*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
2410*4882a593Smuzhiyun }
2411*4882a593Smuzhiyun snprintf(&tport->tport_name[0], VHOST_SCSI_NAMELEN, "%s", &name[off]);
2412*4882a593Smuzhiyun
2413*4882a593Smuzhiyun pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
2414*4882a593Smuzhiyun " %s Address: %s\n", vhost_scsi_dump_proto_id(tport), name);
2415*4882a593Smuzhiyun
2416*4882a593Smuzhiyun return &tport->tport_wwn;
2417*4882a593Smuzhiyun }
2418*4882a593Smuzhiyun
vhost_scsi_drop_tport(struct se_wwn * wwn)2419*4882a593Smuzhiyun static void vhost_scsi_drop_tport(struct se_wwn *wwn)
2420*4882a593Smuzhiyun {
2421*4882a593Smuzhiyun struct vhost_scsi_tport *tport = container_of(wwn,
2422*4882a593Smuzhiyun struct vhost_scsi_tport, tport_wwn);
2423*4882a593Smuzhiyun
2424*4882a593Smuzhiyun pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
2425*4882a593Smuzhiyun " %s Address: %s\n", vhost_scsi_dump_proto_id(tport),
2426*4882a593Smuzhiyun tport->tport_name);
2427*4882a593Smuzhiyun
2428*4882a593Smuzhiyun kfree(tport);
2429*4882a593Smuzhiyun }
2430*4882a593Smuzhiyun
2431*4882a593Smuzhiyun static ssize_t
vhost_scsi_wwn_version_show(struct config_item * item,char * page)2432*4882a593Smuzhiyun vhost_scsi_wwn_version_show(struct config_item *item, char *page)
2433*4882a593Smuzhiyun {
2434*4882a593Smuzhiyun return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
2435*4882a593Smuzhiyun "on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2436*4882a593Smuzhiyun utsname()->machine);
2437*4882a593Smuzhiyun }
2438*4882a593Smuzhiyun
2439*4882a593Smuzhiyun CONFIGFS_ATTR_RO(vhost_scsi_wwn_, version);
2440*4882a593Smuzhiyun
2441*4882a593Smuzhiyun static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
2442*4882a593Smuzhiyun &vhost_scsi_wwn_attr_version,
2443*4882a593Smuzhiyun NULL,
2444*4882a593Smuzhiyun };
2445*4882a593Smuzhiyun
2446*4882a593Smuzhiyun static const struct target_core_fabric_ops vhost_scsi_ops = {
2447*4882a593Smuzhiyun .module = THIS_MODULE,
2448*4882a593Smuzhiyun .fabric_name = "vhost",
2449*4882a593Smuzhiyun .max_data_sg_nents = VHOST_SCSI_PREALLOC_SGLS,
2450*4882a593Smuzhiyun .tpg_get_wwn = vhost_scsi_get_fabric_wwn,
2451*4882a593Smuzhiyun .tpg_get_tag = vhost_scsi_get_tpgt,
2452*4882a593Smuzhiyun .tpg_check_demo_mode = vhost_scsi_check_true,
2453*4882a593Smuzhiyun .tpg_check_demo_mode_cache = vhost_scsi_check_true,
2454*4882a593Smuzhiyun .tpg_check_demo_mode_write_protect = vhost_scsi_check_false,
2455*4882a593Smuzhiyun .tpg_check_prod_mode_write_protect = vhost_scsi_check_false,
2456*4882a593Smuzhiyun .tpg_check_prot_fabric_only = vhost_scsi_check_prot_fabric_only,
2457*4882a593Smuzhiyun .tpg_get_inst_index = vhost_scsi_tpg_get_inst_index,
2458*4882a593Smuzhiyun .release_cmd = vhost_scsi_release_cmd,
2459*4882a593Smuzhiyun .check_stop_free = vhost_scsi_check_stop_free,
2460*4882a593Smuzhiyun .sess_get_index = vhost_scsi_sess_get_index,
2461*4882a593Smuzhiyun .sess_get_initiator_sid = NULL,
2462*4882a593Smuzhiyun .write_pending = vhost_scsi_write_pending,
2463*4882a593Smuzhiyun .set_default_node_attributes = vhost_scsi_set_default_node_attrs,
2464*4882a593Smuzhiyun .get_cmd_state = vhost_scsi_get_cmd_state,
2465*4882a593Smuzhiyun .queue_data_in = vhost_scsi_queue_data_in,
2466*4882a593Smuzhiyun .queue_status = vhost_scsi_queue_status,
2467*4882a593Smuzhiyun .queue_tm_rsp = vhost_scsi_queue_tm_rsp,
2468*4882a593Smuzhiyun .aborted_task = vhost_scsi_aborted_task,
2469*4882a593Smuzhiyun /*
2470*4882a593Smuzhiyun * Setup callers for generic logic in target_core_fabric_configfs.c
2471*4882a593Smuzhiyun */
2472*4882a593Smuzhiyun .fabric_make_wwn = vhost_scsi_make_tport,
2473*4882a593Smuzhiyun .fabric_drop_wwn = vhost_scsi_drop_tport,
2474*4882a593Smuzhiyun .fabric_make_tpg = vhost_scsi_make_tpg,
2475*4882a593Smuzhiyun .fabric_drop_tpg = vhost_scsi_drop_tpg,
2476*4882a593Smuzhiyun .fabric_post_link = vhost_scsi_port_link,
2477*4882a593Smuzhiyun .fabric_pre_unlink = vhost_scsi_port_unlink,
2478*4882a593Smuzhiyun
2479*4882a593Smuzhiyun .tfc_wwn_attrs = vhost_scsi_wwn_attrs,
2480*4882a593Smuzhiyun .tfc_tpg_base_attrs = vhost_scsi_tpg_attrs,
2481*4882a593Smuzhiyun .tfc_tpg_attrib_attrs = vhost_scsi_tpg_attrib_attrs,
2482*4882a593Smuzhiyun };
2483*4882a593Smuzhiyun
vhost_scsi_init(void)2484*4882a593Smuzhiyun static int __init vhost_scsi_init(void)
2485*4882a593Smuzhiyun {
2486*4882a593Smuzhiyun int ret = -ENOMEM;
2487*4882a593Smuzhiyun
2488*4882a593Smuzhiyun pr_debug("TCM_VHOST fabric module %s on %s/%s"
2489*4882a593Smuzhiyun " on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2490*4882a593Smuzhiyun utsname()->machine);
2491*4882a593Smuzhiyun
2492*4882a593Smuzhiyun /*
2493*4882a593Smuzhiyun * Use our own dedicated workqueue for submitting I/O into
2494*4882a593Smuzhiyun * target core to avoid contention within system_wq.
2495*4882a593Smuzhiyun */
2496*4882a593Smuzhiyun vhost_scsi_workqueue = alloc_workqueue("vhost_scsi", 0, 0);
2497*4882a593Smuzhiyun if (!vhost_scsi_workqueue)
2498*4882a593Smuzhiyun goto out;
2499*4882a593Smuzhiyun
2500*4882a593Smuzhiyun ret = vhost_scsi_register();
2501*4882a593Smuzhiyun if (ret < 0)
2502*4882a593Smuzhiyun goto out_destroy_workqueue;
2503*4882a593Smuzhiyun
2504*4882a593Smuzhiyun ret = target_register_template(&vhost_scsi_ops);
2505*4882a593Smuzhiyun if (ret < 0)
2506*4882a593Smuzhiyun goto out_vhost_scsi_deregister;
2507*4882a593Smuzhiyun
2508*4882a593Smuzhiyun return 0;
2509*4882a593Smuzhiyun
2510*4882a593Smuzhiyun out_vhost_scsi_deregister:
2511*4882a593Smuzhiyun vhost_scsi_deregister();
2512*4882a593Smuzhiyun out_destroy_workqueue:
2513*4882a593Smuzhiyun destroy_workqueue(vhost_scsi_workqueue);
2514*4882a593Smuzhiyun out:
2515*4882a593Smuzhiyun return ret;
2516*4882a593Smuzhiyun };
2517*4882a593Smuzhiyun
vhost_scsi_exit(void)2518*4882a593Smuzhiyun static void vhost_scsi_exit(void)
2519*4882a593Smuzhiyun {
2520*4882a593Smuzhiyun target_unregister_template(&vhost_scsi_ops);
2521*4882a593Smuzhiyun vhost_scsi_deregister();
2522*4882a593Smuzhiyun destroy_workqueue(vhost_scsi_workqueue);
2523*4882a593Smuzhiyun };
2524*4882a593Smuzhiyun
2525*4882a593Smuzhiyun MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
2526*4882a593Smuzhiyun MODULE_ALIAS("tcm_vhost");
2527*4882a593Smuzhiyun MODULE_LICENSE("GPL");
2528*4882a593Smuzhiyun module_init(vhost_scsi_init);
2529*4882a593Smuzhiyun module_exit(vhost_scsi_exit);
2530