1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-or-later */
2*4882a593Smuzhiyun #ifndef _DRIVERS_VIRTIO_VIRTIO_PCI_COMMON_H
3*4882a593Smuzhiyun #define _DRIVERS_VIRTIO_VIRTIO_PCI_COMMON_H
4*4882a593Smuzhiyun /*
5*4882a593Smuzhiyun * Virtio PCI driver - APIs for common functionality for all device versions
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * This module allows virtio devices to be used over a virtual PCI device.
8*4882a593Smuzhiyun * This can be used with QEMU based VMMs like KVM or Xen.
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * Copyright IBM Corp. 2007
11*4882a593Smuzhiyun * Copyright Red Hat, Inc. 2014
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * Authors:
14*4882a593Smuzhiyun * Anthony Liguori <aliguori@us.ibm.com>
15*4882a593Smuzhiyun * Rusty Russell <rusty@rustcorp.com.au>
16*4882a593Smuzhiyun * Michael S. Tsirkin <mst@redhat.com>
17*4882a593Smuzhiyun */
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #include <linux/module.h>
20*4882a593Smuzhiyun #include <linux/list.h>
21*4882a593Smuzhiyun #include <linux/pci.h>
22*4882a593Smuzhiyun #include <linux/slab.h>
23*4882a593Smuzhiyun #include <linux/interrupt.h>
24*4882a593Smuzhiyun #include <linux/virtio.h>
25*4882a593Smuzhiyun #include <linux/virtio_config.h>
26*4882a593Smuzhiyun #include <linux/virtio_ring.h>
27*4882a593Smuzhiyun #include <linux/virtio_pci.h>
28*4882a593Smuzhiyun #include <linux/highmem.h>
29*4882a593Smuzhiyun #include <linux/spinlock.h>
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun struct virtio_pci_vq_info {
32*4882a593Smuzhiyun /* the actual virtqueue */
33*4882a593Smuzhiyun struct virtqueue *vq;
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun /* the list node for the virtqueues list */
36*4882a593Smuzhiyun struct list_head node;
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun /* MSI-X vector (or none) */
39*4882a593Smuzhiyun unsigned msix_vector;
40*4882a593Smuzhiyun };
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun /* Our device structure */
43*4882a593Smuzhiyun struct virtio_pci_device {
44*4882a593Smuzhiyun struct virtio_device vdev;
45*4882a593Smuzhiyun struct pci_dev *pci_dev;
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun /* In legacy mode, these two point to within ->legacy. */
48*4882a593Smuzhiyun /* Where to read and clear interrupt */
49*4882a593Smuzhiyun u8 __iomem *isr;
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun /* Modern only fields */
52*4882a593Smuzhiyun /* The IO mapping for the PCI config space (non-legacy mode) */
53*4882a593Smuzhiyun struct virtio_pci_common_cfg __iomem *common;
54*4882a593Smuzhiyun /* Device-specific data (non-legacy mode) */
55*4882a593Smuzhiyun void __iomem *device;
56*4882a593Smuzhiyun /* Base of vq notifications (non-legacy mode). */
57*4882a593Smuzhiyun void __iomem *notify_base;
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun /* So we can sanity-check accesses. */
60*4882a593Smuzhiyun size_t notify_len;
61*4882a593Smuzhiyun size_t device_len;
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun /* Capability for when we need to map notifications per-vq. */
64*4882a593Smuzhiyun int notify_map_cap;
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun /* Multiply queue_notify_off by this value. (non-legacy mode). */
67*4882a593Smuzhiyun u32 notify_offset_multiplier;
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun int modern_bars;
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun /* Legacy only field */
72*4882a593Smuzhiyun /* the IO mapping for the PCI config space */
73*4882a593Smuzhiyun void __iomem *ioaddr;
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun /* a list of queues so we can dispatch IRQs */
76*4882a593Smuzhiyun spinlock_t lock;
77*4882a593Smuzhiyun struct list_head virtqueues;
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun /* array of all queues for house-keeping */
80*4882a593Smuzhiyun struct virtio_pci_vq_info **vqs;
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun /* MSI-X support */
83*4882a593Smuzhiyun int msix_enabled;
84*4882a593Smuzhiyun int intx_enabled;
85*4882a593Smuzhiyun cpumask_var_t *msix_affinity_masks;
86*4882a593Smuzhiyun /* Name strings for interrupts. This size should be enough,
87*4882a593Smuzhiyun * and I'm too lazy to allocate each name separately. */
88*4882a593Smuzhiyun char (*msix_names)[256];
89*4882a593Smuzhiyun /* Number of available vectors */
90*4882a593Smuzhiyun unsigned msix_vectors;
91*4882a593Smuzhiyun /* Vectors allocated, excluding per-vq vectors if any */
92*4882a593Smuzhiyun unsigned msix_used_vectors;
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun /* Whether we have vector per vq */
95*4882a593Smuzhiyun bool per_vq_vectors;
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun struct virtqueue *(*setup_vq)(struct virtio_pci_device *vp_dev,
98*4882a593Smuzhiyun struct virtio_pci_vq_info *info,
99*4882a593Smuzhiyun unsigned idx,
100*4882a593Smuzhiyun void (*callback)(struct virtqueue *vq),
101*4882a593Smuzhiyun const char *name,
102*4882a593Smuzhiyun bool ctx,
103*4882a593Smuzhiyun u16 msix_vec);
104*4882a593Smuzhiyun void (*del_vq)(struct virtio_pci_vq_info *info);
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun u16 (*config_vector)(struct virtio_pci_device *vp_dev, u16 vector);
107*4882a593Smuzhiyun };
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun /* Constants for MSI-X */
110*4882a593Smuzhiyun /* Use first vector for configuration changes, second and the rest for
111*4882a593Smuzhiyun * virtqueues Thus, we need at least 2 vectors for MSI. */
112*4882a593Smuzhiyun enum {
113*4882a593Smuzhiyun VP_MSIX_CONFIG_VECTOR = 0,
114*4882a593Smuzhiyun VP_MSIX_VQ_VECTOR = 1,
115*4882a593Smuzhiyun };
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun /* Convert a generic virtio device to our structure */
to_vp_device(struct virtio_device * vdev)118*4882a593Smuzhiyun static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun return container_of(vdev, struct virtio_pci_device, vdev);
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun /* wait for pending irq handlers */
124*4882a593Smuzhiyun void vp_synchronize_vectors(struct virtio_device *vdev);
125*4882a593Smuzhiyun /* the notify function used when creating a virt queue */
126*4882a593Smuzhiyun bool vp_notify(struct virtqueue *vq);
127*4882a593Smuzhiyun /* the config->del_vqs() implementation */
128*4882a593Smuzhiyun void vp_del_vqs(struct virtio_device *vdev);
129*4882a593Smuzhiyun /* the config->find_vqs() implementation */
130*4882a593Smuzhiyun int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
131*4882a593Smuzhiyun struct virtqueue *vqs[], vq_callback_t *callbacks[],
132*4882a593Smuzhiyun const char * const names[], const bool *ctx,
133*4882a593Smuzhiyun struct irq_affinity *desc);
134*4882a593Smuzhiyun const char *vp_bus_name(struct virtio_device *vdev);
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun /* Setup the affinity for a virtqueue:
137*4882a593Smuzhiyun * - force the affinity for per vq vector
138*4882a593Smuzhiyun * - OR over all affinities for shared MSI
139*4882a593Smuzhiyun * - ignore the affinity request if we're using INTX
140*4882a593Smuzhiyun */
141*4882a593Smuzhiyun int vp_set_vq_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask);
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index);
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_VIRTIO_PCI_LEGACY)
146*4882a593Smuzhiyun int virtio_pci_legacy_probe(struct virtio_pci_device *);
147*4882a593Smuzhiyun void virtio_pci_legacy_remove(struct virtio_pci_device *);
148*4882a593Smuzhiyun #else
virtio_pci_legacy_probe(struct virtio_pci_device * vp_dev)149*4882a593Smuzhiyun static inline int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun return -ENODEV;
152*4882a593Smuzhiyun }
virtio_pci_legacy_remove(struct virtio_pci_device * vp_dev)153*4882a593Smuzhiyun static inline void virtio_pci_legacy_remove(struct virtio_pci_device *vp_dev)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun #endif
157*4882a593Smuzhiyun int virtio_pci_modern_probe(struct virtio_pci_device *);
158*4882a593Smuzhiyun void virtio_pci_modern_remove(struct virtio_pci_device *);
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun #endif
161