1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * virtio-fs: Virtio Filesystem
4 * Copyright (C) 2018 Red Hat, Inc.
5 */
6
7 #include <linux/fs.h>
8 #include <linux/dax.h>
9 #include <linux/pci.h>
10 #include <linux/pfn_t.h>
11 #include <linux/module.h>
12 #include <linux/virtio.h>
13 #include <linux/virtio_fs.h>
14 #include <linux/delay.h>
15 #include <linux/fs_context.h>
16 #include <linux/fs_parser.h>
17 #include <linux/highmem.h>
18 #include <linux/uio.h>
19 #include "fuse_i.h"
20
21 /* List of virtio-fs device instances and a lock for the list. Also provides
22 * mutual exclusion in device removal and mounting path
23 */
24 static DEFINE_MUTEX(virtio_fs_mutex);
25 static LIST_HEAD(virtio_fs_instances);
26
27 enum {
28 VQ_HIPRIO,
29 VQ_REQUEST
30 };
31
32 #define VQ_NAME_LEN 24
33
34 /* Per-virtqueue state */
35 struct virtio_fs_vq {
36 spinlock_t lock;
37 struct virtqueue *vq; /* protected by ->lock */
38 struct work_struct done_work;
39 struct list_head queued_reqs;
40 struct list_head end_reqs; /* End these requests */
41 struct delayed_work dispatch_work;
42 struct fuse_dev *fud;
43 bool connected;
44 long in_flight;
45 struct completion in_flight_zero; /* No inflight requests */
46 char name[VQ_NAME_LEN];
47 } ____cacheline_aligned_in_smp;
48
49 /* A virtio-fs device instance */
50 struct virtio_fs {
51 struct kref refcount;
52 struct list_head list; /* on virtio_fs_instances */
53 char *tag;
54 struct virtio_fs_vq *vqs;
55 unsigned int nvqs; /* number of virtqueues */
56 unsigned int num_request_queues; /* number of request queues */
57 struct dax_device *dax_dev;
58
59 /* DAX memory window where file contents are mapped */
60 void *window_kaddr;
61 phys_addr_t window_phys_addr;
62 size_t window_len;
63 };
64
65 struct virtio_fs_forget_req {
66 struct fuse_in_header ih;
67 struct fuse_forget_in arg;
68 };
69
70 struct virtio_fs_forget {
71 /* This request can be temporarily queued on virt queue */
72 struct list_head list;
73 struct virtio_fs_forget_req req;
74 };
75
76 struct virtio_fs_req_work {
77 struct fuse_req *req;
78 struct virtio_fs_vq *fsvq;
79 struct work_struct done_work;
80 };
81
82 static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
83 struct fuse_req *req, bool in_flight);
84
85 enum {
86 OPT_DAX,
87 };
88
89 static const struct fs_parameter_spec virtio_fs_parameters[] = {
90 fsparam_flag("dax", OPT_DAX),
91 {}
92 };
93
virtio_fs_parse_param(struct fs_context * fc,struct fs_parameter * param)94 static int virtio_fs_parse_param(struct fs_context *fc,
95 struct fs_parameter *param)
96 {
97 struct fs_parse_result result;
98 struct fuse_fs_context *ctx = fc->fs_private;
99 int opt;
100
101 opt = fs_parse(fc, virtio_fs_parameters, param, &result);
102 if (opt < 0)
103 return opt;
104
105 switch (opt) {
106 case OPT_DAX:
107 ctx->dax = 1;
108 break;
109 default:
110 return -EINVAL;
111 }
112
113 return 0;
114 }
115
virtio_fs_free_fc(struct fs_context * fc)116 static void virtio_fs_free_fc(struct fs_context *fc)
117 {
118 struct fuse_fs_context *ctx = fc->fs_private;
119
120 kfree(ctx);
121 }
122
vq_to_fsvq(struct virtqueue * vq)123 static inline struct virtio_fs_vq *vq_to_fsvq(struct virtqueue *vq)
124 {
125 struct virtio_fs *fs = vq->vdev->priv;
126
127 return &fs->vqs[vq->index];
128 }
129
vq_to_fpq(struct virtqueue * vq)130 static inline struct fuse_pqueue *vq_to_fpq(struct virtqueue *vq)
131 {
132 return &vq_to_fsvq(vq)->fud->pq;
133 }
134
135 /* Should be called with fsvq->lock held. */
inc_in_flight_req(struct virtio_fs_vq * fsvq)136 static inline void inc_in_flight_req(struct virtio_fs_vq *fsvq)
137 {
138 fsvq->in_flight++;
139 }
140
141 /* Should be called with fsvq->lock held. */
dec_in_flight_req(struct virtio_fs_vq * fsvq)142 static inline void dec_in_flight_req(struct virtio_fs_vq *fsvq)
143 {
144 WARN_ON(fsvq->in_flight <= 0);
145 fsvq->in_flight--;
146 if (!fsvq->in_flight)
147 complete(&fsvq->in_flight_zero);
148 }
149
release_virtio_fs_obj(struct kref * ref)150 static void release_virtio_fs_obj(struct kref *ref)
151 {
152 struct virtio_fs *vfs = container_of(ref, struct virtio_fs, refcount);
153
154 kfree(vfs->vqs);
155 kfree(vfs);
156 }
157
158 /* Make sure virtiofs_mutex is held */
virtio_fs_put(struct virtio_fs * fs)159 static void virtio_fs_put(struct virtio_fs *fs)
160 {
161 kref_put(&fs->refcount, release_virtio_fs_obj);
162 }
163
virtio_fs_fiq_release(struct fuse_iqueue * fiq)164 static void virtio_fs_fiq_release(struct fuse_iqueue *fiq)
165 {
166 struct virtio_fs *vfs = fiq->priv;
167
168 mutex_lock(&virtio_fs_mutex);
169 virtio_fs_put(vfs);
170 mutex_unlock(&virtio_fs_mutex);
171 }
172
virtio_fs_drain_queue(struct virtio_fs_vq * fsvq)173 static void virtio_fs_drain_queue(struct virtio_fs_vq *fsvq)
174 {
175 WARN_ON(fsvq->in_flight < 0);
176
177 /* Wait for in flight requests to finish.*/
178 spin_lock(&fsvq->lock);
179 if (fsvq->in_flight) {
180 /* We are holding virtio_fs_mutex. There should not be any
181 * waiters waiting for completion.
182 */
183 reinit_completion(&fsvq->in_flight_zero);
184 spin_unlock(&fsvq->lock);
185 wait_for_completion(&fsvq->in_flight_zero);
186 } else {
187 spin_unlock(&fsvq->lock);
188 }
189
190 flush_work(&fsvq->done_work);
191 flush_delayed_work(&fsvq->dispatch_work);
192 }
193
virtio_fs_drain_all_queues_locked(struct virtio_fs * fs)194 static void virtio_fs_drain_all_queues_locked(struct virtio_fs *fs)
195 {
196 struct virtio_fs_vq *fsvq;
197 int i;
198
199 for (i = 0; i < fs->nvqs; i++) {
200 fsvq = &fs->vqs[i];
201 virtio_fs_drain_queue(fsvq);
202 }
203 }
204
virtio_fs_drain_all_queues(struct virtio_fs * fs)205 static void virtio_fs_drain_all_queues(struct virtio_fs *fs)
206 {
207 /* Provides mutual exclusion between ->remove and ->kill_sb
208 * paths. We don't want both of these draining queue at the
209 * same time. Current completion logic reinits completion
210 * and that means there should not be any other thread
211 * doing reinit or waiting for completion already.
212 */
213 mutex_lock(&virtio_fs_mutex);
214 virtio_fs_drain_all_queues_locked(fs);
215 mutex_unlock(&virtio_fs_mutex);
216 }
217
virtio_fs_start_all_queues(struct virtio_fs * fs)218 static void virtio_fs_start_all_queues(struct virtio_fs *fs)
219 {
220 struct virtio_fs_vq *fsvq;
221 int i;
222
223 for (i = 0; i < fs->nvqs; i++) {
224 fsvq = &fs->vqs[i];
225 spin_lock(&fsvq->lock);
226 fsvq->connected = true;
227 spin_unlock(&fsvq->lock);
228 }
229 }
230
231 /* Add a new instance to the list or return -EEXIST if tag name exists*/
virtio_fs_add_instance(struct virtio_fs * fs)232 static int virtio_fs_add_instance(struct virtio_fs *fs)
233 {
234 struct virtio_fs *fs2;
235 bool duplicate = false;
236
237 mutex_lock(&virtio_fs_mutex);
238
239 list_for_each_entry(fs2, &virtio_fs_instances, list) {
240 if (strcmp(fs->tag, fs2->tag) == 0)
241 duplicate = true;
242 }
243
244 if (!duplicate)
245 list_add_tail(&fs->list, &virtio_fs_instances);
246
247 mutex_unlock(&virtio_fs_mutex);
248
249 if (duplicate)
250 return -EEXIST;
251 return 0;
252 }
253
254 /* Return the virtio_fs with a given tag, or NULL */
virtio_fs_find_instance(const char * tag)255 static struct virtio_fs *virtio_fs_find_instance(const char *tag)
256 {
257 struct virtio_fs *fs;
258
259 mutex_lock(&virtio_fs_mutex);
260
261 list_for_each_entry(fs, &virtio_fs_instances, list) {
262 if (strcmp(fs->tag, tag) == 0) {
263 kref_get(&fs->refcount);
264 goto found;
265 }
266 }
267
268 fs = NULL; /* not found */
269
270 found:
271 mutex_unlock(&virtio_fs_mutex);
272
273 return fs;
274 }
275
virtio_fs_free_devs(struct virtio_fs * fs)276 static void virtio_fs_free_devs(struct virtio_fs *fs)
277 {
278 unsigned int i;
279
280 for (i = 0; i < fs->nvqs; i++) {
281 struct virtio_fs_vq *fsvq = &fs->vqs[i];
282
283 if (!fsvq->fud)
284 continue;
285
286 fuse_dev_free(fsvq->fud);
287 fsvq->fud = NULL;
288 }
289 }
290
291 /* Read filesystem name from virtio config into fs->tag (must kfree()). */
virtio_fs_read_tag(struct virtio_device * vdev,struct virtio_fs * fs)292 static int virtio_fs_read_tag(struct virtio_device *vdev, struct virtio_fs *fs)
293 {
294 char tag_buf[sizeof_field(struct virtio_fs_config, tag)];
295 char *end;
296 size_t len;
297
298 virtio_cread_bytes(vdev, offsetof(struct virtio_fs_config, tag),
299 &tag_buf, sizeof(tag_buf));
300 end = memchr(tag_buf, '\0', sizeof(tag_buf));
301 if (end == tag_buf)
302 return -EINVAL; /* empty tag */
303 if (!end)
304 end = &tag_buf[sizeof(tag_buf)];
305
306 len = end - tag_buf;
307 fs->tag = devm_kmalloc(&vdev->dev, len + 1, GFP_KERNEL);
308 if (!fs->tag)
309 return -ENOMEM;
310 memcpy(fs->tag, tag_buf, len);
311 fs->tag[len] = '\0';
312 return 0;
313 }
314
315 /* Work function for hiprio completion */
virtio_fs_hiprio_done_work(struct work_struct * work)316 static void virtio_fs_hiprio_done_work(struct work_struct *work)
317 {
318 struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
319 done_work);
320 struct virtqueue *vq = fsvq->vq;
321
322 /* Free completed FUSE_FORGET requests */
323 spin_lock(&fsvq->lock);
324 do {
325 unsigned int len;
326 void *req;
327
328 virtqueue_disable_cb(vq);
329
330 while ((req = virtqueue_get_buf(vq, &len)) != NULL) {
331 kfree(req);
332 dec_in_flight_req(fsvq);
333 }
334 } while (!virtqueue_enable_cb(vq) && likely(!virtqueue_is_broken(vq)));
335 spin_unlock(&fsvq->lock);
336 }
337
virtio_fs_request_dispatch_work(struct work_struct * work)338 static void virtio_fs_request_dispatch_work(struct work_struct *work)
339 {
340 struct fuse_req *req;
341 struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
342 dispatch_work.work);
343 int ret;
344
345 pr_debug("virtio-fs: worker %s called.\n", __func__);
346 while (1) {
347 spin_lock(&fsvq->lock);
348 req = list_first_entry_or_null(&fsvq->end_reqs, struct fuse_req,
349 list);
350 if (!req) {
351 spin_unlock(&fsvq->lock);
352 break;
353 }
354
355 list_del_init(&req->list);
356 spin_unlock(&fsvq->lock);
357 fuse_request_end(req);
358 }
359
360 /* Dispatch pending requests */
361 while (1) {
362 spin_lock(&fsvq->lock);
363 req = list_first_entry_or_null(&fsvq->queued_reqs,
364 struct fuse_req, list);
365 if (!req) {
366 spin_unlock(&fsvq->lock);
367 return;
368 }
369 list_del_init(&req->list);
370 spin_unlock(&fsvq->lock);
371
372 ret = virtio_fs_enqueue_req(fsvq, req, true);
373 if (ret < 0) {
374 if (ret == -ENOMEM || ret == -ENOSPC) {
375 spin_lock(&fsvq->lock);
376 list_add_tail(&req->list, &fsvq->queued_reqs);
377 schedule_delayed_work(&fsvq->dispatch_work,
378 msecs_to_jiffies(1));
379 spin_unlock(&fsvq->lock);
380 return;
381 }
382 req->out.h.error = ret;
383 spin_lock(&fsvq->lock);
384 dec_in_flight_req(fsvq);
385 spin_unlock(&fsvq->lock);
386 pr_err("virtio-fs: virtio_fs_enqueue_req() failed %d\n",
387 ret);
388 fuse_request_end(req);
389 }
390 }
391 }
392
393 /*
394 * Returns 1 if queue is full and sender should wait a bit before sending
395 * next request, 0 otherwise.
396 */
send_forget_request(struct virtio_fs_vq * fsvq,struct virtio_fs_forget * forget,bool in_flight)397 static int send_forget_request(struct virtio_fs_vq *fsvq,
398 struct virtio_fs_forget *forget,
399 bool in_flight)
400 {
401 struct scatterlist sg;
402 struct virtqueue *vq;
403 int ret = 0;
404 bool notify;
405 struct virtio_fs_forget_req *req = &forget->req;
406
407 spin_lock(&fsvq->lock);
408 if (!fsvq->connected) {
409 if (in_flight)
410 dec_in_flight_req(fsvq);
411 kfree(forget);
412 goto out;
413 }
414
415 sg_init_one(&sg, req, sizeof(*req));
416 vq = fsvq->vq;
417 dev_dbg(&vq->vdev->dev, "%s\n", __func__);
418
419 ret = virtqueue_add_outbuf(vq, &sg, 1, forget, GFP_ATOMIC);
420 if (ret < 0) {
421 if (ret == -ENOMEM || ret == -ENOSPC) {
422 pr_debug("virtio-fs: Could not queue FORGET: err=%d. Will try later\n",
423 ret);
424 list_add_tail(&forget->list, &fsvq->queued_reqs);
425 schedule_delayed_work(&fsvq->dispatch_work,
426 msecs_to_jiffies(1));
427 if (!in_flight)
428 inc_in_flight_req(fsvq);
429 /* Queue is full */
430 ret = 1;
431 } else {
432 pr_debug("virtio-fs: Could not queue FORGET: err=%d. Dropping it.\n",
433 ret);
434 kfree(forget);
435 if (in_flight)
436 dec_in_flight_req(fsvq);
437 }
438 goto out;
439 }
440
441 if (!in_flight)
442 inc_in_flight_req(fsvq);
443 notify = virtqueue_kick_prepare(vq);
444 spin_unlock(&fsvq->lock);
445
446 if (notify)
447 virtqueue_notify(vq);
448 return ret;
449 out:
450 spin_unlock(&fsvq->lock);
451 return ret;
452 }
453
virtio_fs_hiprio_dispatch_work(struct work_struct * work)454 static void virtio_fs_hiprio_dispatch_work(struct work_struct *work)
455 {
456 struct virtio_fs_forget *forget;
457 struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
458 dispatch_work.work);
459 pr_debug("virtio-fs: worker %s called.\n", __func__);
460 while (1) {
461 spin_lock(&fsvq->lock);
462 forget = list_first_entry_or_null(&fsvq->queued_reqs,
463 struct virtio_fs_forget, list);
464 if (!forget) {
465 spin_unlock(&fsvq->lock);
466 return;
467 }
468
469 list_del(&forget->list);
470 spin_unlock(&fsvq->lock);
471 if (send_forget_request(fsvq, forget, true))
472 return;
473 }
474 }
475
476 /* Allocate and copy args into req->argbuf */
copy_args_to_argbuf(struct fuse_req * req)477 static int copy_args_to_argbuf(struct fuse_req *req)
478 {
479 struct fuse_args *args = req->args;
480 unsigned int offset = 0;
481 unsigned int num_in;
482 unsigned int num_out;
483 unsigned int len;
484 unsigned int i;
485
486 num_in = args->in_numargs - args->in_pages;
487 num_out = args->out_numargs - args->out_pages;
488 len = fuse_len_args(num_in, (struct fuse_arg *) args->in_args) +
489 fuse_len_args(num_out, args->out_args);
490
491 req->argbuf = kmalloc(len, GFP_ATOMIC);
492 if (!req->argbuf)
493 return -ENOMEM;
494
495 for (i = 0; i < num_in; i++) {
496 memcpy(req->argbuf + offset,
497 args->in_args[i].value,
498 args->in_args[i].size);
499 offset += args->in_args[i].size;
500 }
501
502 return 0;
503 }
504
505 /* Copy args out of and free req->argbuf */
copy_args_from_argbuf(struct fuse_args * args,struct fuse_req * req)506 static void copy_args_from_argbuf(struct fuse_args *args, struct fuse_req *req)
507 {
508 unsigned int remaining;
509 unsigned int offset;
510 unsigned int num_in;
511 unsigned int num_out;
512 unsigned int i;
513
514 remaining = req->out.h.len - sizeof(req->out.h);
515 num_in = args->in_numargs - args->in_pages;
516 num_out = args->out_numargs - args->out_pages;
517 offset = fuse_len_args(num_in, (struct fuse_arg *)args->in_args);
518
519 for (i = 0; i < num_out; i++) {
520 unsigned int argsize = args->out_args[i].size;
521
522 if (args->out_argvar &&
523 i == args->out_numargs - 1 &&
524 argsize > remaining) {
525 argsize = remaining;
526 }
527
528 memcpy(args->out_args[i].value, req->argbuf + offset, argsize);
529 offset += argsize;
530
531 if (i != args->out_numargs - 1)
532 remaining -= argsize;
533 }
534
535 /* Store the actual size of the variable-length arg */
536 if (args->out_argvar)
537 args->out_args[args->out_numargs - 1].size = remaining;
538
539 kfree(req->argbuf);
540 req->argbuf = NULL;
541 }
542
543 /* Work function for request completion */
virtio_fs_request_complete(struct fuse_req * req,struct virtio_fs_vq * fsvq)544 static void virtio_fs_request_complete(struct fuse_req *req,
545 struct virtio_fs_vq *fsvq)
546 {
547 struct fuse_pqueue *fpq = &fsvq->fud->pq;
548 struct fuse_args *args;
549 struct fuse_args_pages *ap;
550 unsigned int len, i, thislen;
551 struct page *page;
552
553 /*
554 * TODO verify that server properly follows FUSE protocol
555 * (oh.uniq, oh.len)
556 */
557 args = req->args;
558 copy_args_from_argbuf(args, req);
559
560 if (args->out_pages && args->page_zeroing) {
561 len = args->out_args[args->out_numargs - 1].size;
562 ap = container_of(args, typeof(*ap), args);
563 for (i = 0; i < ap->num_pages; i++) {
564 thislen = ap->descs[i].length;
565 if (len < thislen) {
566 WARN_ON(ap->descs[i].offset);
567 page = ap->pages[i];
568 zero_user_segment(page, len, thislen);
569 len = 0;
570 } else {
571 len -= thislen;
572 }
573 }
574 }
575
576 spin_lock(&fpq->lock);
577 clear_bit(FR_SENT, &req->flags);
578 spin_unlock(&fpq->lock);
579
580 fuse_request_end(req);
581 spin_lock(&fsvq->lock);
582 dec_in_flight_req(fsvq);
583 spin_unlock(&fsvq->lock);
584 }
585
virtio_fs_complete_req_work(struct work_struct * work)586 static void virtio_fs_complete_req_work(struct work_struct *work)
587 {
588 struct virtio_fs_req_work *w =
589 container_of(work, typeof(*w), done_work);
590
591 virtio_fs_request_complete(w->req, w->fsvq);
592 kfree(w);
593 }
594
virtio_fs_requests_done_work(struct work_struct * work)595 static void virtio_fs_requests_done_work(struct work_struct *work)
596 {
597 struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
598 done_work);
599 struct fuse_pqueue *fpq = &fsvq->fud->pq;
600 struct virtqueue *vq = fsvq->vq;
601 struct fuse_req *req;
602 struct fuse_req *next;
603 unsigned int len;
604 LIST_HEAD(reqs);
605
606 /* Collect completed requests off the virtqueue */
607 spin_lock(&fsvq->lock);
608 do {
609 virtqueue_disable_cb(vq);
610
611 while ((req = virtqueue_get_buf(vq, &len)) != NULL) {
612 spin_lock(&fpq->lock);
613 list_move_tail(&req->list, &reqs);
614 spin_unlock(&fpq->lock);
615 }
616 } while (!virtqueue_enable_cb(vq) && likely(!virtqueue_is_broken(vq)));
617 spin_unlock(&fsvq->lock);
618
619 /* End requests */
620 list_for_each_entry_safe(req, next, &reqs, list) {
621 list_del_init(&req->list);
622
623 /* blocking async request completes in a worker context */
624 if (req->args->may_block) {
625 struct virtio_fs_req_work *w;
626
627 w = kzalloc(sizeof(*w), GFP_NOFS | __GFP_NOFAIL);
628 INIT_WORK(&w->done_work, virtio_fs_complete_req_work);
629 w->fsvq = fsvq;
630 w->req = req;
631 schedule_work(&w->done_work);
632 } else {
633 virtio_fs_request_complete(req, fsvq);
634 }
635 }
636 }
637
638 /* Virtqueue interrupt handler */
virtio_fs_vq_done(struct virtqueue * vq)639 static void virtio_fs_vq_done(struct virtqueue *vq)
640 {
641 struct virtio_fs_vq *fsvq = vq_to_fsvq(vq);
642
643 dev_dbg(&vq->vdev->dev, "%s %s\n", __func__, fsvq->name);
644
645 schedule_work(&fsvq->done_work);
646 }
647
virtio_fs_init_vq(struct virtio_fs_vq * fsvq,char * name,int vq_type)648 static void virtio_fs_init_vq(struct virtio_fs_vq *fsvq, char *name,
649 int vq_type)
650 {
651 strncpy(fsvq->name, name, VQ_NAME_LEN);
652 spin_lock_init(&fsvq->lock);
653 INIT_LIST_HEAD(&fsvq->queued_reqs);
654 INIT_LIST_HEAD(&fsvq->end_reqs);
655 init_completion(&fsvq->in_flight_zero);
656
657 if (vq_type == VQ_REQUEST) {
658 INIT_WORK(&fsvq->done_work, virtio_fs_requests_done_work);
659 INIT_DELAYED_WORK(&fsvq->dispatch_work,
660 virtio_fs_request_dispatch_work);
661 } else {
662 INIT_WORK(&fsvq->done_work, virtio_fs_hiprio_done_work);
663 INIT_DELAYED_WORK(&fsvq->dispatch_work,
664 virtio_fs_hiprio_dispatch_work);
665 }
666 }
667
668 /* Initialize virtqueues */
virtio_fs_setup_vqs(struct virtio_device * vdev,struct virtio_fs * fs)669 static int virtio_fs_setup_vqs(struct virtio_device *vdev,
670 struct virtio_fs *fs)
671 {
672 struct virtqueue **vqs;
673 vq_callback_t **callbacks;
674 const char **names;
675 unsigned int i;
676 int ret = 0;
677
678 virtio_cread_le(vdev, struct virtio_fs_config, num_request_queues,
679 &fs->num_request_queues);
680 if (fs->num_request_queues == 0)
681 return -EINVAL;
682
683 fs->nvqs = VQ_REQUEST + fs->num_request_queues;
684 fs->vqs = kcalloc(fs->nvqs, sizeof(fs->vqs[VQ_HIPRIO]), GFP_KERNEL);
685 if (!fs->vqs)
686 return -ENOMEM;
687
688 vqs = kmalloc_array(fs->nvqs, sizeof(vqs[VQ_HIPRIO]), GFP_KERNEL);
689 callbacks = kmalloc_array(fs->nvqs, sizeof(callbacks[VQ_HIPRIO]),
690 GFP_KERNEL);
691 names = kmalloc_array(fs->nvqs, sizeof(names[VQ_HIPRIO]), GFP_KERNEL);
692 if (!vqs || !callbacks || !names) {
693 ret = -ENOMEM;
694 goto out;
695 }
696
697 /* Initialize the hiprio/forget request virtqueue */
698 callbacks[VQ_HIPRIO] = virtio_fs_vq_done;
699 virtio_fs_init_vq(&fs->vqs[VQ_HIPRIO], "hiprio", VQ_HIPRIO);
700 names[VQ_HIPRIO] = fs->vqs[VQ_HIPRIO].name;
701
702 /* Initialize the requests virtqueues */
703 for (i = VQ_REQUEST; i < fs->nvqs; i++) {
704 char vq_name[VQ_NAME_LEN];
705
706 snprintf(vq_name, VQ_NAME_LEN, "requests.%u", i - VQ_REQUEST);
707 virtio_fs_init_vq(&fs->vqs[i], vq_name, VQ_REQUEST);
708 callbacks[i] = virtio_fs_vq_done;
709 names[i] = fs->vqs[i].name;
710 }
711
712 ret = virtio_find_vqs(vdev, fs->nvqs, vqs, callbacks, names, NULL);
713 if (ret < 0)
714 goto out;
715
716 for (i = 0; i < fs->nvqs; i++)
717 fs->vqs[i].vq = vqs[i];
718
719 virtio_fs_start_all_queues(fs);
720 out:
721 kfree(names);
722 kfree(callbacks);
723 kfree(vqs);
724 if (ret)
725 kfree(fs->vqs);
726 return ret;
727 }
728
729 /* Free virtqueues (device must already be reset) */
virtio_fs_cleanup_vqs(struct virtio_device * vdev,struct virtio_fs * fs)730 static void virtio_fs_cleanup_vqs(struct virtio_device *vdev,
731 struct virtio_fs *fs)
732 {
733 vdev->config->del_vqs(vdev);
734 }
735
736 /* Map a window offset to a page frame number. The window offset will have
737 * been produced by .iomap_begin(), which maps a file offset to a window
738 * offset.
739 */
virtio_fs_direct_access(struct dax_device * dax_dev,pgoff_t pgoff,long nr_pages,void ** kaddr,pfn_t * pfn)740 static long virtio_fs_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
741 long nr_pages, void **kaddr, pfn_t *pfn)
742 {
743 struct virtio_fs *fs = dax_get_private(dax_dev);
744 phys_addr_t offset = PFN_PHYS(pgoff);
745 size_t max_nr_pages = fs->window_len/PAGE_SIZE - pgoff;
746
747 if (kaddr)
748 *kaddr = fs->window_kaddr + offset;
749 if (pfn)
750 *pfn = phys_to_pfn_t(fs->window_phys_addr + offset,
751 PFN_DEV | PFN_MAP);
752 return nr_pages > max_nr_pages ? max_nr_pages : nr_pages;
753 }
754
virtio_fs_copy_from_iter(struct dax_device * dax_dev,pgoff_t pgoff,void * addr,size_t bytes,struct iov_iter * i)755 static size_t virtio_fs_copy_from_iter(struct dax_device *dax_dev,
756 pgoff_t pgoff, void *addr,
757 size_t bytes, struct iov_iter *i)
758 {
759 return copy_from_iter(addr, bytes, i);
760 }
761
virtio_fs_copy_to_iter(struct dax_device * dax_dev,pgoff_t pgoff,void * addr,size_t bytes,struct iov_iter * i)762 static size_t virtio_fs_copy_to_iter(struct dax_device *dax_dev,
763 pgoff_t pgoff, void *addr,
764 size_t bytes, struct iov_iter *i)
765 {
766 return copy_to_iter(addr, bytes, i);
767 }
768
virtio_fs_zero_page_range(struct dax_device * dax_dev,pgoff_t pgoff,size_t nr_pages)769 static int virtio_fs_zero_page_range(struct dax_device *dax_dev,
770 pgoff_t pgoff, size_t nr_pages)
771 {
772 long rc;
773 void *kaddr;
774
775 rc = dax_direct_access(dax_dev, pgoff, nr_pages, &kaddr, NULL);
776 if (rc < 0)
777 return rc;
778 memset(kaddr, 0, nr_pages << PAGE_SHIFT);
779 dax_flush(dax_dev, kaddr, nr_pages << PAGE_SHIFT);
780 return 0;
781 }
782
783 static const struct dax_operations virtio_fs_dax_ops = {
784 .direct_access = virtio_fs_direct_access,
785 .copy_from_iter = virtio_fs_copy_from_iter,
786 .copy_to_iter = virtio_fs_copy_to_iter,
787 .zero_page_range = virtio_fs_zero_page_range,
788 };
789
virtio_fs_cleanup_dax(void * data)790 static void virtio_fs_cleanup_dax(void *data)
791 {
792 struct dax_device *dax_dev = data;
793
794 kill_dax(dax_dev);
795 put_dax(dax_dev);
796 }
797
virtio_fs_setup_dax(struct virtio_device * vdev,struct virtio_fs * fs)798 static int virtio_fs_setup_dax(struct virtio_device *vdev, struct virtio_fs *fs)
799 {
800 struct virtio_shm_region cache_reg;
801 struct dev_pagemap *pgmap;
802 bool have_cache;
803
804 if (!IS_ENABLED(CONFIG_FUSE_DAX))
805 return 0;
806
807 /* Get cache region */
808 have_cache = virtio_get_shm_region(vdev, &cache_reg,
809 (u8)VIRTIO_FS_SHMCAP_ID_CACHE);
810 if (!have_cache) {
811 dev_notice(&vdev->dev, "%s: No cache capability\n", __func__);
812 return 0;
813 }
814
815 if (!devm_request_mem_region(&vdev->dev, cache_reg.addr, cache_reg.len,
816 dev_name(&vdev->dev))) {
817 dev_warn(&vdev->dev, "could not reserve region addr=0x%llx len=0x%llx\n",
818 cache_reg.addr, cache_reg.len);
819 return -EBUSY;
820 }
821
822 dev_notice(&vdev->dev, "Cache len: 0x%llx @ 0x%llx\n", cache_reg.len,
823 cache_reg.addr);
824
825 pgmap = devm_kzalloc(&vdev->dev, sizeof(*pgmap), GFP_KERNEL);
826 if (!pgmap)
827 return -ENOMEM;
828
829 pgmap->type = MEMORY_DEVICE_FS_DAX;
830
831 /* Ideally we would directly use the PCI BAR resource but
832 * devm_memremap_pages() wants its own copy in pgmap. So
833 * initialize a struct resource from scratch (only the start
834 * and end fields will be used).
835 */
836 pgmap->range = (struct range) {
837 .start = (phys_addr_t) cache_reg.addr,
838 .end = (phys_addr_t) cache_reg.addr + cache_reg.len - 1,
839 };
840 pgmap->nr_range = 1;
841
842 fs->window_kaddr = devm_memremap_pages(&vdev->dev, pgmap);
843 if (IS_ERR(fs->window_kaddr))
844 return PTR_ERR(fs->window_kaddr);
845
846 fs->window_phys_addr = (phys_addr_t) cache_reg.addr;
847 fs->window_len = (phys_addr_t) cache_reg.len;
848
849 dev_dbg(&vdev->dev, "%s: window kaddr 0x%px phys_addr 0x%llx len 0x%llx\n",
850 __func__, fs->window_kaddr, cache_reg.addr, cache_reg.len);
851
852 fs->dax_dev = alloc_dax(fs, NULL, &virtio_fs_dax_ops, 0);
853 if (IS_ERR(fs->dax_dev))
854 return PTR_ERR(fs->dax_dev);
855
856 return devm_add_action_or_reset(&vdev->dev, virtio_fs_cleanup_dax,
857 fs->dax_dev);
858 }
859
virtio_fs_probe(struct virtio_device * vdev)860 static int virtio_fs_probe(struct virtio_device *vdev)
861 {
862 struct virtio_fs *fs;
863 int ret;
864
865 fs = kzalloc(sizeof(*fs), GFP_KERNEL);
866 if (!fs)
867 return -ENOMEM;
868 kref_init(&fs->refcount);
869 vdev->priv = fs;
870
871 ret = virtio_fs_read_tag(vdev, fs);
872 if (ret < 0)
873 goto out;
874
875 ret = virtio_fs_setup_vqs(vdev, fs);
876 if (ret < 0)
877 goto out;
878
879 /* TODO vq affinity */
880
881 ret = virtio_fs_setup_dax(vdev, fs);
882 if (ret < 0)
883 goto out_vqs;
884
885 /* Bring the device online in case the filesystem is mounted and
886 * requests need to be sent before we return.
887 */
888 virtio_device_ready(vdev);
889
890 ret = virtio_fs_add_instance(fs);
891 if (ret < 0)
892 goto out_vqs;
893
894 return 0;
895
896 out_vqs:
897 vdev->config->reset(vdev);
898 virtio_fs_cleanup_vqs(vdev, fs);
899 kfree(fs->vqs);
900
901 out:
902 vdev->priv = NULL;
903 kfree(fs);
904 return ret;
905 }
906
virtio_fs_stop_all_queues(struct virtio_fs * fs)907 static void virtio_fs_stop_all_queues(struct virtio_fs *fs)
908 {
909 struct virtio_fs_vq *fsvq;
910 int i;
911
912 for (i = 0; i < fs->nvqs; i++) {
913 fsvq = &fs->vqs[i];
914 spin_lock(&fsvq->lock);
915 fsvq->connected = false;
916 spin_unlock(&fsvq->lock);
917 }
918 }
919
virtio_fs_remove(struct virtio_device * vdev)920 static void virtio_fs_remove(struct virtio_device *vdev)
921 {
922 struct virtio_fs *fs = vdev->priv;
923
924 mutex_lock(&virtio_fs_mutex);
925 /* This device is going away. No one should get new reference */
926 list_del_init(&fs->list);
927 virtio_fs_stop_all_queues(fs);
928 virtio_fs_drain_all_queues_locked(fs);
929 vdev->config->reset(vdev);
930 virtio_fs_cleanup_vqs(vdev, fs);
931
932 vdev->priv = NULL;
933 /* Put device reference on virtio_fs object */
934 virtio_fs_put(fs);
935 mutex_unlock(&virtio_fs_mutex);
936 }
937
938 #ifdef CONFIG_PM_SLEEP
virtio_fs_freeze(struct virtio_device * vdev)939 static int virtio_fs_freeze(struct virtio_device *vdev)
940 {
941 /* TODO need to save state here */
942 pr_warn("virtio-fs: suspend/resume not yet supported\n");
943 return -EOPNOTSUPP;
944 }
945
virtio_fs_restore(struct virtio_device * vdev)946 static int virtio_fs_restore(struct virtio_device *vdev)
947 {
948 /* TODO need to restore state here */
949 return 0;
950 }
951 #endif /* CONFIG_PM_SLEEP */
952
953 static const struct virtio_device_id id_table[] = {
954 { VIRTIO_ID_FS, VIRTIO_DEV_ANY_ID },
955 {},
956 };
957
958 static const unsigned int feature_table[] = {};
959
960 static struct virtio_driver virtio_fs_driver = {
961 .driver.name = KBUILD_MODNAME,
962 .driver.owner = THIS_MODULE,
963 .id_table = id_table,
964 .feature_table = feature_table,
965 .feature_table_size = ARRAY_SIZE(feature_table),
966 .probe = virtio_fs_probe,
967 .remove = virtio_fs_remove,
968 #ifdef CONFIG_PM_SLEEP
969 .freeze = virtio_fs_freeze,
970 .restore = virtio_fs_restore,
971 #endif
972 };
973
virtio_fs_wake_forget_and_unlock(struct fuse_iqueue * fiq,bool sync)974 static void virtio_fs_wake_forget_and_unlock(struct fuse_iqueue *fiq, bool sync)
975 __releases(fiq->lock)
976 {
977 struct fuse_forget_link *link;
978 struct virtio_fs_forget *forget;
979 struct virtio_fs_forget_req *req;
980 struct virtio_fs *fs;
981 struct virtio_fs_vq *fsvq;
982 u64 unique;
983
984 link = fuse_dequeue_forget(fiq, 1, NULL);
985 unique = fuse_get_unique(fiq);
986
987 fs = fiq->priv;
988 fsvq = &fs->vqs[VQ_HIPRIO];
989 spin_unlock(&fiq->lock);
990
991 /* Allocate a buffer for the request */
992 forget = kmalloc(sizeof(*forget), GFP_NOFS | __GFP_NOFAIL);
993 req = &forget->req;
994
995 req->ih = (struct fuse_in_header){
996 .opcode = FUSE_FORGET,
997 .nodeid = link->forget_one.nodeid,
998 .unique = unique,
999 .len = sizeof(*req),
1000 };
1001 req->arg = (struct fuse_forget_in){
1002 .nlookup = link->forget_one.nlookup,
1003 };
1004
1005 send_forget_request(fsvq, forget, false);
1006 kfree(link);
1007 }
1008
virtio_fs_wake_interrupt_and_unlock(struct fuse_iqueue * fiq,bool sync)1009 static void virtio_fs_wake_interrupt_and_unlock(struct fuse_iqueue *fiq,
1010 bool sync)
1011 __releases(fiq->lock)
1012 {
1013 /*
1014 * TODO interrupts.
1015 *
1016 * Normal fs operations on a local filesystems aren't interruptible.
1017 * Exceptions are blocking lock operations; for example fcntl(F_SETLKW)
1018 * with shared lock between host and guest.
1019 */
1020 spin_unlock(&fiq->lock);
1021 }
1022
1023 /* Count number of scatter-gather elements required */
sg_count_fuse_pages(struct fuse_page_desc * page_descs,unsigned int num_pages,unsigned int total_len)1024 static unsigned int sg_count_fuse_pages(struct fuse_page_desc *page_descs,
1025 unsigned int num_pages,
1026 unsigned int total_len)
1027 {
1028 unsigned int i;
1029 unsigned int this_len;
1030
1031 for (i = 0; i < num_pages && total_len; i++) {
1032 this_len = min(page_descs[i].length, total_len);
1033 total_len -= this_len;
1034 }
1035
1036 return i;
1037 }
1038
1039 /* Return the number of scatter-gather list elements required */
sg_count_fuse_req(struct fuse_req * req)1040 static unsigned int sg_count_fuse_req(struct fuse_req *req)
1041 {
1042 struct fuse_args *args = req->args;
1043 struct fuse_args_pages *ap = container_of(args, typeof(*ap), args);
1044 unsigned int size, total_sgs = 1 /* fuse_in_header */;
1045
1046 if (args->in_numargs - args->in_pages)
1047 total_sgs += 1;
1048
1049 if (args->in_pages) {
1050 size = args->in_args[args->in_numargs - 1].size;
1051 total_sgs += sg_count_fuse_pages(ap->descs, ap->num_pages,
1052 size);
1053 }
1054
1055 if (!test_bit(FR_ISREPLY, &req->flags))
1056 return total_sgs;
1057
1058 total_sgs += 1 /* fuse_out_header */;
1059
1060 if (args->out_numargs - args->out_pages)
1061 total_sgs += 1;
1062
1063 if (args->out_pages) {
1064 size = args->out_args[args->out_numargs - 1].size;
1065 total_sgs += sg_count_fuse_pages(ap->descs, ap->num_pages,
1066 size);
1067 }
1068
1069 return total_sgs;
1070 }
1071
1072 /* Add pages to scatter-gather list and return number of elements used */
sg_init_fuse_pages(struct scatterlist * sg,struct page ** pages,struct fuse_page_desc * page_descs,unsigned int num_pages,unsigned int total_len)1073 static unsigned int sg_init_fuse_pages(struct scatterlist *sg,
1074 struct page **pages,
1075 struct fuse_page_desc *page_descs,
1076 unsigned int num_pages,
1077 unsigned int total_len)
1078 {
1079 unsigned int i;
1080 unsigned int this_len;
1081
1082 for (i = 0; i < num_pages && total_len; i++) {
1083 sg_init_table(&sg[i], 1);
1084 this_len = min(page_descs[i].length, total_len);
1085 sg_set_page(&sg[i], pages[i], this_len, page_descs[i].offset);
1086 total_len -= this_len;
1087 }
1088
1089 return i;
1090 }
1091
1092 /* Add args to scatter-gather list and return number of elements used */
sg_init_fuse_args(struct scatterlist * sg,struct fuse_req * req,struct fuse_arg * args,unsigned int numargs,bool argpages,void * argbuf,unsigned int * len_used)1093 static unsigned int sg_init_fuse_args(struct scatterlist *sg,
1094 struct fuse_req *req,
1095 struct fuse_arg *args,
1096 unsigned int numargs,
1097 bool argpages,
1098 void *argbuf,
1099 unsigned int *len_used)
1100 {
1101 struct fuse_args_pages *ap = container_of(req->args, typeof(*ap), args);
1102 unsigned int total_sgs = 0;
1103 unsigned int len;
1104
1105 len = fuse_len_args(numargs - argpages, args);
1106 if (len)
1107 sg_init_one(&sg[total_sgs++], argbuf, len);
1108
1109 if (argpages)
1110 total_sgs += sg_init_fuse_pages(&sg[total_sgs],
1111 ap->pages, ap->descs,
1112 ap->num_pages,
1113 args[numargs - 1].size);
1114
1115 if (len_used)
1116 *len_used = len;
1117
1118 return total_sgs;
1119 }
1120
1121 /* Add a request to a virtqueue and kick the device */
virtio_fs_enqueue_req(struct virtio_fs_vq * fsvq,struct fuse_req * req,bool in_flight)1122 static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
1123 struct fuse_req *req, bool in_flight)
1124 {
1125 /* requests need at least 4 elements */
1126 struct scatterlist *stack_sgs[6];
1127 struct scatterlist stack_sg[ARRAY_SIZE(stack_sgs)];
1128 struct scatterlist **sgs = stack_sgs;
1129 struct scatterlist *sg = stack_sg;
1130 struct virtqueue *vq;
1131 struct fuse_args *args = req->args;
1132 unsigned int argbuf_used = 0;
1133 unsigned int out_sgs = 0;
1134 unsigned int in_sgs = 0;
1135 unsigned int total_sgs;
1136 unsigned int i;
1137 int ret;
1138 bool notify;
1139 struct fuse_pqueue *fpq;
1140
1141 /* Does the sglist fit on the stack? */
1142 total_sgs = sg_count_fuse_req(req);
1143 if (total_sgs > ARRAY_SIZE(stack_sgs)) {
1144 sgs = kmalloc_array(total_sgs, sizeof(sgs[0]), GFP_ATOMIC);
1145 sg = kmalloc_array(total_sgs, sizeof(sg[0]), GFP_ATOMIC);
1146 if (!sgs || !sg) {
1147 ret = -ENOMEM;
1148 goto out;
1149 }
1150 }
1151
1152 /* Use a bounce buffer since stack args cannot be mapped */
1153 ret = copy_args_to_argbuf(req);
1154 if (ret < 0)
1155 goto out;
1156
1157 /* Request elements */
1158 sg_init_one(&sg[out_sgs++], &req->in.h, sizeof(req->in.h));
1159 out_sgs += sg_init_fuse_args(&sg[out_sgs], req,
1160 (struct fuse_arg *)args->in_args,
1161 args->in_numargs, args->in_pages,
1162 req->argbuf, &argbuf_used);
1163
1164 /* Reply elements */
1165 if (test_bit(FR_ISREPLY, &req->flags)) {
1166 sg_init_one(&sg[out_sgs + in_sgs++],
1167 &req->out.h, sizeof(req->out.h));
1168 in_sgs += sg_init_fuse_args(&sg[out_sgs + in_sgs], req,
1169 args->out_args, args->out_numargs,
1170 args->out_pages,
1171 req->argbuf + argbuf_used, NULL);
1172 }
1173
1174 WARN_ON(out_sgs + in_sgs != total_sgs);
1175
1176 for (i = 0; i < total_sgs; i++)
1177 sgs[i] = &sg[i];
1178
1179 spin_lock(&fsvq->lock);
1180
1181 if (!fsvq->connected) {
1182 spin_unlock(&fsvq->lock);
1183 ret = -ENOTCONN;
1184 goto out;
1185 }
1186
1187 vq = fsvq->vq;
1188 ret = virtqueue_add_sgs(vq, sgs, out_sgs, in_sgs, req, GFP_ATOMIC);
1189 if (ret < 0) {
1190 spin_unlock(&fsvq->lock);
1191 goto out;
1192 }
1193
1194 /* Request successfully sent. */
1195 fpq = &fsvq->fud->pq;
1196 spin_lock(&fpq->lock);
1197 list_add_tail(&req->list, fpq->processing);
1198 spin_unlock(&fpq->lock);
1199 set_bit(FR_SENT, &req->flags);
1200 /* matches barrier in request_wait_answer() */
1201 smp_mb__after_atomic();
1202
1203 if (!in_flight)
1204 inc_in_flight_req(fsvq);
1205 notify = virtqueue_kick_prepare(vq);
1206
1207 spin_unlock(&fsvq->lock);
1208
1209 if (notify)
1210 virtqueue_notify(vq);
1211
1212 out:
1213 if (ret < 0 && req->argbuf) {
1214 kfree(req->argbuf);
1215 req->argbuf = NULL;
1216 }
1217 if (sgs != stack_sgs) {
1218 kfree(sgs);
1219 kfree(sg);
1220 }
1221
1222 return ret;
1223 }
1224
virtio_fs_wake_pending_and_unlock(struct fuse_iqueue * fiq,bool sync)1225 static void virtio_fs_wake_pending_and_unlock(struct fuse_iqueue *fiq,
1226 bool sync)
1227 __releases(fiq->lock)
1228 {
1229 unsigned int queue_id = VQ_REQUEST; /* TODO multiqueue */
1230 struct virtio_fs *fs;
1231 struct fuse_req *req;
1232 struct virtio_fs_vq *fsvq;
1233 int ret;
1234
1235 WARN_ON(list_empty(&fiq->pending));
1236 req = list_last_entry(&fiq->pending, struct fuse_req, list);
1237 clear_bit(FR_PENDING, &req->flags);
1238 list_del_init(&req->list);
1239 WARN_ON(!list_empty(&fiq->pending));
1240 spin_unlock(&fiq->lock);
1241
1242 fs = fiq->priv;
1243
1244 pr_debug("%s: opcode %u unique %#llx nodeid %#llx in.len %u out.len %u\n",
1245 __func__, req->in.h.opcode, req->in.h.unique,
1246 req->in.h.nodeid, req->in.h.len,
1247 fuse_len_args(req->args->out_numargs, req->args->out_args));
1248
1249 fsvq = &fs->vqs[queue_id];
1250 ret = virtio_fs_enqueue_req(fsvq, req, false);
1251 if (ret < 0) {
1252 if (ret == -ENOMEM || ret == -ENOSPC) {
1253 /*
1254 * Virtqueue full. Retry submission from worker
1255 * context as we might be holding fc->bg_lock.
1256 */
1257 spin_lock(&fsvq->lock);
1258 list_add_tail(&req->list, &fsvq->queued_reqs);
1259 inc_in_flight_req(fsvq);
1260 schedule_delayed_work(&fsvq->dispatch_work,
1261 msecs_to_jiffies(1));
1262 spin_unlock(&fsvq->lock);
1263 return;
1264 }
1265 req->out.h.error = ret;
1266 pr_err("virtio-fs: virtio_fs_enqueue_req() failed %d\n", ret);
1267
1268 /* Can't end request in submission context. Use a worker */
1269 spin_lock(&fsvq->lock);
1270 list_add_tail(&req->list, &fsvq->end_reqs);
1271 schedule_delayed_work(&fsvq->dispatch_work, 0);
1272 spin_unlock(&fsvq->lock);
1273 return;
1274 }
1275 }
1276
1277 static const struct fuse_iqueue_ops virtio_fs_fiq_ops = {
1278 .wake_forget_and_unlock = virtio_fs_wake_forget_and_unlock,
1279 .wake_interrupt_and_unlock = virtio_fs_wake_interrupt_and_unlock,
1280 .wake_pending_and_unlock = virtio_fs_wake_pending_and_unlock,
1281 .release = virtio_fs_fiq_release,
1282 };
1283
virtio_fs_ctx_set_defaults(struct fuse_fs_context * ctx)1284 static inline void virtio_fs_ctx_set_defaults(struct fuse_fs_context *ctx)
1285 {
1286 ctx->rootmode = S_IFDIR;
1287 ctx->default_permissions = 1;
1288 ctx->allow_other = 1;
1289 ctx->max_read = UINT_MAX;
1290 ctx->blksize = 512;
1291 ctx->destroy = true;
1292 ctx->no_control = true;
1293 ctx->no_force_umount = true;
1294 }
1295
virtio_fs_fill_super(struct super_block * sb,struct fs_context * fsc)1296 static int virtio_fs_fill_super(struct super_block *sb, struct fs_context *fsc)
1297 {
1298 struct fuse_mount *fm = get_fuse_mount_super(sb);
1299 struct fuse_conn *fc = fm->fc;
1300 struct virtio_fs *fs = fc->iq.priv;
1301 struct fuse_fs_context *ctx = fsc->fs_private;
1302 unsigned int i;
1303 int err;
1304
1305 virtio_fs_ctx_set_defaults(ctx);
1306 mutex_lock(&virtio_fs_mutex);
1307
1308 /* After holding mutex, make sure virtiofs device is still there.
1309 * Though we are holding a reference to it, drive ->remove might
1310 * still have cleaned up virtual queues. In that case bail out.
1311 */
1312 err = -EINVAL;
1313 if (list_empty(&fs->list)) {
1314 pr_info("virtio-fs: tag <%s> not found\n", fs->tag);
1315 goto err;
1316 }
1317
1318 err = -ENOMEM;
1319 /* Allocate fuse_dev for hiprio and notification queues */
1320 for (i = 0; i < fs->nvqs; i++) {
1321 struct virtio_fs_vq *fsvq = &fs->vqs[i];
1322
1323 fsvq->fud = fuse_dev_alloc();
1324 if (!fsvq->fud)
1325 goto err_free_fuse_devs;
1326 }
1327
1328 /* virtiofs allocates and installs its own fuse devices */
1329 ctx->fudptr = NULL;
1330 if (ctx->dax) {
1331 if (!fs->dax_dev) {
1332 err = -EINVAL;
1333 pr_err("virtio-fs: dax can't be enabled as filesystem"
1334 " device does not support it.\n");
1335 goto err_free_fuse_devs;
1336 }
1337 ctx->dax_dev = fs->dax_dev;
1338 }
1339 err = fuse_fill_super_common(sb, ctx);
1340 if (err < 0)
1341 goto err_free_fuse_devs;
1342
1343 for (i = 0; i < fs->nvqs; i++) {
1344 struct virtio_fs_vq *fsvq = &fs->vqs[i];
1345
1346 fuse_dev_install(fsvq->fud, fc);
1347 }
1348
1349 /* Previous unmount will stop all queues. Start these again */
1350 virtio_fs_start_all_queues(fs);
1351 fuse_send_init(fm);
1352 mutex_unlock(&virtio_fs_mutex);
1353 return 0;
1354
1355 err_free_fuse_devs:
1356 virtio_fs_free_devs(fs);
1357 err:
1358 mutex_unlock(&virtio_fs_mutex);
1359 return err;
1360 }
1361
virtio_fs_conn_destroy(struct fuse_mount * fm)1362 static void virtio_fs_conn_destroy(struct fuse_mount *fm)
1363 {
1364 struct fuse_conn *fc = fm->fc;
1365 struct virtio_fs *vfs = fc->iq.priv;
1366 struct virtio_fs_vq *fsvq = &vfs->vqs[VQ_HIPRIO];
1367
1368 /* Stop dax worker. Soon evict_inodes() will be called which
1369 * will free all memory ranges belonging to all inodes.
1370 */
1371 if (IS_ENABLED(CONFIG_FUSE_DAX))
1372 fuse_dax_cancel_work(fc);
1373
1374 /* Stop forget queue. Soon destroy will be sent */
1375 spin_lock(&fsvq->lock);
1376 fsvq->connected = false;
1377 spin_unlock(&fsvq->lock);
1378 virtio_fs_drain_all_queues(vfs);
1379
1380 fuse_conn_destroy(fm);
1381
1382 /* fuse_conn_destroy() must have sent destroy. Stop all queues
1383 * and drain one more time and free fuse devices. Freeing fuse
1384 * devices will drop their reference on fuse_conn and that in
1385 * turn will drop its reference on virtio_fs object.
1386 */
1387 virtio_fs_stop_all_queues(vfs);
1388 virtio_fs_drain_all_queues(vfs);
1389 virtio_fs_free_devs(vfs);
1390 }
1391
virtio_kill_sb(struct super_block * sb)1392 static void virtio_kill_sb(struct super_block *sb)
1393 {
1394 struct fuse_mount *fm = get_fuse_mount_super(sb);
1395 bool last;
1396
1397 /* If mount failed, we can still be called without any fc */
1398 if (fm) {
1399 last = fuse_mount_remove(fm);
1400 if (last)
1401 virtio_fs_conn_destroy(fm);
1402 }
1403 kill_anon_super(sb);
1404 }
1405
virtio_fs_test_super(struct super_block * sb,struct fs_context * fsc)1406 static int virtio_fs_test_super(struct super_block *sb,
1407 struct fs_context *fsc)
1408 {
1409 struct fuse_mount *fsc_fm = fsc->s_fs_info;
1410 struct fuse_mount *sb_fm = get_fuse_mount_super(sb);
1411
1412 return fsc_fm->fc->iq.priv == sb_fm->fc->iq.priv;
1413 }
1414
virtio_fs_set_super(struct super_block * sb,struct fs_context * fsc)1415 static int virtio_fs_set_super(struct super_block *sb,
1416 struct fs_context *fsc)
1417 {
1418 int err;
1419
1420 err = get_anon_bdev(&sb->s_dev);
1421 if (!err)
1422 fuse_mount_get(fsc->s_fs_info);
1423
1424 return err;
1425 }
1426
virtio_fs_get_tree(struct fs_context * fsc)1427 static int virtio_fs_get_tree(struct fs_context *fsc)
1428 {
1429 struct virtio_fs *fs;
1430 struct super_block *sb;
1431 struct fuse_conn *fc;
1432 struct fuse_mount *fm;
1433 int err;
1434
1435 /* This gets a reference on virtio_fs object. This ptr gets installed
1436 * in fc->iq->priv. Once fuse_conn is going away, it calls ->put()
1437 * to drop the reference to this object.
1438 */
1439 fs = virtio_fs_find_instance(fsc->source);
1440 if (!fs) {
1441 pr_info("virtio-fs: tag <%s> not found\n", fsc->source);
1442 return -EINVAL;
1443 }
1444
1445 fc = kzalloc(sizeof(struct fuse_conn), GFP_KERNEL);
1446 if (!fc) {
1447 mutex_lock(&virtio_fs_mutex);
1448 virtio_fs_put(fs);
1449 mutex_unlock(&virtio_fs_mutex);
1450 return -ENOMEM;
1451 }
1452
1453 fm = kzalloc(sizeof(struct fuse_mount), GFP_KERNEL);
1454 if (!fm) {
1455 mutex_lock(&virtio_fs_mutex);
1456 virtio_fs_put(fs);
1457 mutex_unlock(&virtio_fs_mutex);
1458 kfree(fc);
1459 return -ENOMEM;
1460 }
1461
1462 fuse_conn_init(fc, fm, fsc->user_ns, &virtio_fs_fiq_ops, fs);
1463 fc->release = fuse_free_conn;
1464 fc->delete_stale = true;
1465 fc->auto_submounts = true;
1466
1467 fsc->s_fs_info = fm;
1468 sb = sget_fc(fsc, virtio_fs_test_super, virtio_fs_set_super);
1469 fuse_mount_put(fm);
1470 if (IS_ERR(sb))
1471 return PTR_ERR(sb);
1472
1473 if (!sb->s_root) {
1474 err = virtio_fs_fill_super(sb, fsc);
1475 if (err) {
1476 fuse_mount_put(fm);
1477 sb->s_fs_info = NULL;
1478 deactivate_locked_super(sb);
1479 return err;
1480 }
1481
1482 sb->s_flags |= SB_ACTIVE;
1483 }
1484
1485 WARN_ON(fsc->root);
1486 fsc->root = dget(sb->s_root);
1487 return 0;
1488 }
1489
1490 static const struct fs_context_operations virtio_fs_context_ops = {
1491 .free = virtio_fs_free_fc,
1492 .parse_param = virtio_fs_parse_param,
1493 .get_tree = virtio_fs_get_tree,
1494 };
1495
virtio_fs_init_fs_context(struct fs_context * fsc)1496 static int virtio_fs_init_fs_context(struct fs_context *fsc)
1497 {
1498 struct fuse_fs_context *ctx;
1499
1500 ctx = kzalloc(sizeof(struct fuse_fs_context), GFP_KERNEL);
1501 if (!ctx)
1502 return -ENOMEM;
1503 fsc->fs_private = ctx;
1504 fsc->ops = &virtio_fs_context_ops;
1505 return 0;
1506 }
1507
1508 static struct file_system_type virtio_fs_type = {
1509 .owner = THIS_MODULE,
1510 .name = "virtiofs",
1511 .init_fs_context = virtio_fs_init_fs_context,
1512 .kill_sb = virtio_kill_sb,
1513 };
1514
virtio_fs_init(void)1515 static int __init virtio_fs_init(void)
1516 {
1517 int ret;
1518
1519 ret = register_virtio_driver(&virtio_fs_driver);
1520 if (ret < 0)
1521 return ret;
1522
1523 ret = register_filesystem(&virtio_fs_type);
1524 if (ret < 0) {
1525 unregister_virtio_driver(&virtio_fs_driver);
1526 return ret;
1527 }
1528
1529 return 0;
1530 }
1531 module_init(virtio_fs_init);
1532
virtio_fs_exit(void)1533 static void __exit virtio_fs_exit(void)
1534 {
1535 unregister_filesystem(&virtio_fs_type);
1536 unregister_virtio_driver(&virtio_fs_driver);
1537 }
1538 module_exit(virtio_fs_exit);
1539
1540 MODULE_AUTHOR("Stefan Hajnoczi <stefanha@redhat.com>");
1541 MODULE_DESCRIPTION("Virtio Filesystem");
1542 MODULE_LICENSE("GPL");
1543 MODULE_ALIAS_FS(KBUILD_MODNAME);
1544 MODULE_DEVICE_TABLE(virtio, id_table);
1545