1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _LINUX_VIRTIO_CONFIG_H
3*4882a593Smuzhiyun #define _LINUX_VIRTIO_CONFIG_H
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun #include <linux/err.h>
6*4882a593Smuzhiyun #include <linux/bug.h>
7*4882a593Smuzhiyun #include <linux/virtio.h>
8*4882a593Smuzhiyun #include <linux/virtio_byteorder.h>
9*4882a593Smuzhiyun #include <linux/compiler_types.h>
10*4882a593Smuzhiyun #include <uapi/linux/virtio_config.h>
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun struct irq_affinity;
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun struct virtio_shm_region {
15*4882a593Smuzhiyun u64 addr;
16*4882a593Smuzhiyun u64 len;
17*4882a593Smuzhiyun };
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun /**
20*4882a593Smuzhiyun * virtio_config_ops - operations for configuring a virtio device
21*4882a593Smuzhiyun * Note: Do not assume that a transport implements all of the operations
22*4882a593Smuzhiyun * getting/setting a value as a simple read/write! Generally speaking,
23*4882a593Smuzhiyun * any of @get/@set, @get_status/@set_status, or @get_features/
24*4882a593Smuzhiyun * @finalize_features are NOT safe to be called from an atomic
25*4882a593Smuzhiyun * context.
26*4882a593Smuzhiyun * @get: read the value of a configuration field
27*4882a593Smuzhiyun * vdev: the virtio_device
28*4882a593Smuzhiyun * offset: the offset of the configuration field
29*4882a593Smuzhiyun * buf: the buffer to write the field value into.
30*4882a593Smuzhiyun * len: the length of the buffer
31*4882a593Smuzhiyun * @set: write the value of a configuration field
32*4882a593Smuzhiyun * vdev: the virtio_device
33*4882a593Smuzhiyun * offset: the offset of the configuration field
34*4882a593Smuzhiyun * buf: the buffer to read the field value from.
35*4882a593Smuzhiyun * len: the length of the buffer
36*4882a593Smuzhiyun * @generation: config generation counter (optional)
37*4882a593Smuzhiyun * vdev: the virtio_device
38*4882a593Smuzhiyun * Returns the config generation counter
39*4882a593Smuzhiyun * @get_status: read the status byte
40*4882a593Smuzhiyun * vdev: the virtio_device
41*4882a593Smuzhiyun * Returns the status byte
42*4882a593Smuzhiyun * @set_status: write the status byte
43*4882a593Smuzhiyun * vdev: the virtio_device
44*4882a593Smuzhiyun * status: the new status byte
45*4882a593Smuzhiyun * @reset: reset the device
46*4882a593Smuzhiyun * vdev: the virtio device
47*4882a593Smuzhiyun * After this, status and feature negotiation must be done again
48*4882a593Smuzhiyun * Device must not be reset from its vq/config callbacks, or in
49*4882a593Smuzhiyun * parallel with being added/removed.
50*4882a593Smuzhiyun * @find_vqs: find virtqueues and instantiate them.
51*4882a593Smuzhiyun * vdev: the virtio_device
52*4882a593Smuzhiyun * nvqs: the number of virtqueues to find
53*4882a593Smuzhiyun * vqs: on success, includes new virtqueues
54*4882a593Smuzhiyun * callbacks: array of callbacks, for each virtqueue
55*4882a593Smuzhiyun * include a NULL entry for vqs that do not need a callback
56*4882a593Smuzhiyun * names: array of virtqueue names (mainly for debugging)
57*4882a593Smuzhiyun * include a NULL entry for vqs unused by driver
58*4882a593Smuzhiyun * Returns 0 on success or error status
59*4882a593Smuzhiyun * @del_vqs: free virtqueues found by find_vqs().
60*4882a593Smuzhiyun * @get_features: get the array of feature bits for this device.
61*4882a593Smuzhiyun * vdev: the virtio_device
62*4882a593Smuzhiyun * Returns the first 64 feature bits (all we currently need).
63*4882a593Smuzhiyun * @finalize_features: confirm what device features we'll be using.
64*4882a593Smuzhiyun * vdev: the virtio_device
65*4882a593Smuzhiyun * This sends the driver feature bits to the device: it can change
66*4882a593Smuzhiyun * the dev->feature bits if it wants.
67*4882a593Smuzhiyun * Note: despite the name this can be called any number of times.
68*4882a593Smuzhiyun * Returns 0 on success or error status
69*4882a593Smuzhiyun * @bus_name: return the bus name associated with the device (optional)
70*4882a593Smuzhiyun * vdev: the virtio_device
71*4882a593Smuzhiyun * This returns a pointer to the bus name a la pci_name from which
72*4882a593Smuzhiyun * the caller can then copy.
73*4882a593Smuzhiyun * @set_vq_affinity: set the affinity for a virtqueue (optional).
74*4882a593Smuzhiyun * @get_vq_affinity: get the affinity for a virtqueue (optional).
75*4882a593Smuzhiyun * @get_shm_region: get a shared memory region based on the index.
76*4882a593Smuzhiyun */
77*4882a593Smuzhiyun typedef void vq_callback_t(struct virtqueue *);
78*4882a593Smuzhiyun struct virtio_config_ops {
79*4882a593Smuzhiyun void (*get)(struct virtio_device *vdev, unsigned offset,
80*4882a593Smuzhiyun void *buf, unsigned len);
81*4882a593Smuzhiyun void (*set)(struct virtio_device *vdev, unsigned offset,
82*4882a593Smuzhiyun const void *buf, unsigned len);
83*4882a593Smuzhiyun u32 (*generation)(struct virtio_device *vdev);
84*4882a593Smuzhiyun u8 (*get_status)(struct virtio_device *vdev);
85*4882a593Smuzhiyun void (*set_status)(struct virtio_device *vdev, u8 status);
86*4882a593Smuzhiyun void (*reset)(struct virtio_device *vdev);
87*4882a593Smuzhiyun int (*find_vqs)(struct virtio_device *, unsigned nvqs,
88*4882a593Smuzhiyun struct virtqueue *vqs[], vq_callback_t *callbacks[],
89*4882a593Smuzhiyun const char * const names[], const bool *ctx,
90*4882a593Smuzhiyun struct irq_affinity *desc);
91*4882a593Smuzhiyun void (*del_vqs)(struct virtio_device *);
92*4882a593Smuzhiyun u64 (*get_features)(struct virtio_device *vdev);
93*4882a593Smuzhiyun int (*finalize_features)(struct virtio_device *vdev);
94*4882a593Smuzhiyun const char *(*bus_name)(struct virtio_device *vdev);
95*4882a593Smuzhiyun int (*set_vq_affinity)(struct virtqueue *vq,
96*4882a593Smuzhiyun const struct cpumask *cpu_mask);
97*4882a593Smuzhiyun const struct cpumask *(*get_vq_affinity)(struct virtio_device *vdev,
98*4882a593Smuzhiyun int index);
99*4882a593Smuzhiyun bool (*get_shm_region)(struct virtio_device *vdev,
100*4882a593Smuzhiyun struct virtio_shm_region *region, u8 id);
101*4882a593Smuzhiyun };
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun /* If driver didn't advertise the feature, it will never appear. */
104*4882a593Smuzhiyun void virtio_check_driver_offered_feature(const struct virtio_device *vdev,
105*4882a593Smuzhiyun unsigned int fbit);
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun /**
108*4882a593Smuzhiyun * __virtio_test_bit - helper to test feature bits. For use by transports.
109*4882a593Smuzhiyun * Devices should normally use virtio_has_feature,
110*4882a593Smuzhiyun * which includes more checks.
111*4882a593Smuzhiyun * @vdev: the device
112*4882a593Smuzhiyun * @fbit: the feature bit
113*4882a593Smuzhiyun */
__virtio_test_bit(const struct virtio_device * vdev,unsigned int fbit)114*4882a593Smuzhiyun static inline bool __virtio_test_bit(const struct virtio_device *vdev,
115*4882a593Smuzhiyun unsigned int fbit)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun /* Did you forget to fix assumptions on max features? */
118*4882a593Smuzhiyun if (__builtin_constant_p(fbit))
119*4882a593Smuzhiyun BUILD_BUG_ON(fbit >= 64);
120*4882a593Smuzhiyun else
121*4882a593Smuzhiyun BUG_ON(fbit >= 64);
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun return vdev->features & BIT_ULL(fbit);
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun /**
127*4882a593Smuzhiyun * __virtio_set_bit - helper to set feature bits. For use by transports.
128*4882a593Smuzhiyun * @vdev: the device
129*4882a593Smuzhiyun * @fbit: the feature bit
130*4882a593Smuzhiyun */
__virtio_set_bit(struct virtio_device * vdev,unsigned int fbit)131*4882a593Smuzhiyun static inline void __virtio_set_bit(struct virtio_device *vdev,
132*4882a593Smuzhiyun unsigned int fbit)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun /* Did you forget to fix assumptions on max features? */
135*4882a593Smuzhiyun if (__builtin_constant_p(fbit))
136*4882a593Smuzhiyun BUILD_BUG_ON(fbit >= 64);
137*4882a593Smuzhiyun else
138*4882a593Smuzhiyun BUG_ON(fbit >= 64);
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun vdev->features |= BIT_ULL(fbit);
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun /**
144*4882a593Smuzhiyun * __virtio_clear_bit - helper to clear feature bits. For use by transports.
145*4882a593Smuzhiyun * @vdev: the device
146*4882a593Smuzhiyun * @fbit: the feature bit
147*4882a593Smuzhiyun */
__virtio_clear_bit(struct virtio_device * vdev,unsigned int fbit)148*4882a593Smuzhiyun static inline void __virtio_clear_bit(struct virtio_device *vdev,
149*4882a593Smuzhiyun unsigned int fbit)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun /* Did you forget to fix assumptions on max features? */
152*4882a593Smuzhiyun if (__builtin_constant_p(fbit))
153*4882a593Smuzhiyun BUILD_BUG_ON(fbit >= 64);
154*4882a593Smuzhiyun else
155*4882a593Smuzhiyun BUG_ON(fbit >= 64);
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun vdev->features &= ~BIT_ULL(fbit);
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun /**
161*4882a593Smuzhiyun * virtio_has_feature - helper to determine if this device has this feature.
162*4882a593Smuzhiyun * @vdev: the device
163*4882a593Smuzhiyun * @fbit: the feature bit
164*4882a593Smuzhiyun */
virtio_has_feature(const struct virtio_device * vdev,unsigned int fbit)165*4882a593Smuzhiyun static inline bool virtio_has_feature(const struct virtio_device *vdev,
166*4882a593Smuzhiyun unsigned int fbit)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun if (fbit < VIRTIO_TRANSPORT_F_START)
169*4882a593Smuzhiyun virtio_check_driver_offered_feature(vdev, fbit);
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun return __virtio_test_bit(vdev, fbit);
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun /**
175*4882a593Smuzhiyun * virtio_has_dma_quirk - determine whether this device has the DMA quirk
176*4882a593Smuzhiyun * @vdev: the device
177*4882a593Smuzhiyun */
virtio_has_dma_quirk(const struct virtio_device * vdev)178*4882a593Smuzhiyun static inline bool virtio_has_dma_quirk(const struct virtio_device *vdev)
179*4882a593Smuzhiyun {
180*4882a593Smuzhiyun /*
181*4882a593Smuzhiyun * Note the reverse polarity of the quirk feature (compared to most
182*4882a593Smuzhiyun * other features), this is for compatibility with legacy systems.
183*4882a593Smuzhiyun */
184*4882a593Smuzhiyun return !virtio_has_feature(vdev, VIRTIO_F_ACCESS_PLATFORM);
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun static inline
virtio_find_single_vq(struct virtio_device * vdev,vq_callback_t * c,const char * n)188*4882a593Smuzhiyun struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev,
189*4882a593Smuzhiyun vq_callback_t *c, const char *n)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun vq_callback_t *callbacks[] = { c };
192*4882a593Smuzhiyun const char *names[] = { n };
193*4882a593Smuzhiyun struct virtqueue *vq;
194*4882a593Smuzhiyun int err = vdev->config->find_vqs(vdev, 1, &vq, callbacks, names, NULL,
195*4882a593Smuzhiyun NULL);
196*4882a593Smuzhiyun if (err < 0)
197*4882a593Smuzhiyun return ERR_PTR(err);
198*4882a593Smuzhiyun return vq;
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun static inline
virtio_find_vqs(struct virtio_device * vdev,unsigned nvqs,struct virtqueue * vqs[],vq_callback_t * callbacks[],const char * const names[],struct irq_affinity * desc)202*4882a593Smuzhiyun int virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs,
203*4882a593Smuzhiyun struct virtqueue *vqs[], vq_callback_t *callbacks[],
204*4882a593Smuzhiyun const char * const names[],
205*4882a593Smuzhiyun struct irq_affinity *desc)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, NULL, desc);
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun static inline
virtio_find_vqs_ctx(struct virtio_device * vdev,unsigned nvqs,struct virtqueue * vqs[],vq_callback_t * callbacks[],const char * const names[],const bool * ctx,struct irq_affinity * desc)211*4882a593Smuzhiyun int virtio_find_vqs_ctx(struct virtio_device *vdev, unsigned nvqs,
212*4882a593Smuzhiyun struct virtqueue *vqs[], vq_callback_t *callbacks[],
213*4882a593Smuzhiyun const char * const names[], const bool *ctx,
214*4882a593Smuzhiyun struct irq_affinity *desc)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, ctx,
217*4882a593Smuzhiyun desc);
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun /**
221*4882a593Smuzhiyun * virtio_device_ready - enable vq use in probe function
222*4882a593Smuzhiyun * @vdev: the device
223*4882a593Smuzhiyun *
224*4882a593Smuzhiyun * Driver must call this to use vqs in the probe function.
225*4882a593Smuzhiyun *
226*4882a593Smuzhiyun * Note: vqs are enabled automatically after probe returns.
227*4882a593Smuzhiyun */
228*4882a593Smuzhiyun static inline
virtio_device_ready(struct virtio_device * dev)229*4882a593Smuzhiyun void virtio_device_ready(struct virtio_device *dev)
230*4882a593Smuzhiyun {
231*4882a593Smuzhiyun unsigned status = dev->config->get_status(dev);
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun BUG_ON(status & VIRTIO_CONFIG_S_DRIVER_OK);
234*4882a593Smuzhiyun dev->config->set_status(dev, status | VIRTIO_CONFIG_S_DRIVER_OK);
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun static inline
virtio_bus_name(struct virtio_device * vdev)238*4882a593Smuzhiyun const char *virtio_bus_name(struct virtio_device *vdev)
239*4882a593Smuzhiyun {
240*4882a593Smuzhiyun if (!vdev->config->bus_name)
241*4882a593Smuzhiyun return "virtio";
242*4882a593Smuzhiyun return vdev->config->bus_name(vdev);
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun /**
246*4882a593Smuzhiyun * virtqueue_set_affinity - setting affinity for a virtqueue
247*4882a593Smuzhiyun * @vq: the virtqueue
248*4882a593Smuzhiyun * @cpu: the cpu no.
249*4882a593Smuzhiyun *
250*4882a593Smuzhiyun * Pay attention the function are best-effort: the affinity hint may not be set
251*4882a593Smuzhiyun * due to config support, irq type and sharing.
252*4882a593Smuzhiyun *
253*4882a593Smuzhiyun */
254*4882a593Smuzhiyun static inline
virtqueue_set_affinity(struct virtqueue * vq,const struct cpumask * cpu_mask)255*4882a593Smuzhiyun int virtqueue_set_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask)
256*4882a593Smuzhiyun {
257*4882a593Smuzhiyun struct virtio_device *vdev = vq->vdev;
258*4882a593Smuzhiyun if (vdev->config->set_vq_affinity)
259*4882a593Smuzhiyun return vdev->config->set_vq_affinity(vq, cpu_mask);
260*4882a593Smuzhiyun return 0;
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun static inline
virtio_get_shm_region(struct virtio_device * vdev,struct virtio_shm_region * region,u8 id)264*4882a593Smuzhiyun bool virtio_get_shm_region(struct virtio_device *vdev,
265*4882a593Smuzhiyun struct virtio_shm_region *region, u8 id)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun if (!vdev->config->get_shm_region)
268*4882a593Smuzhiyun return false;
269*4882a593Smuzhiyun return vdev->config->get_shm_region(vdev, region, id);
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun
virtio_is_little_endian(struct virtio_device * vdev)272*4882a593Smuzhiyun static inline bool virtio_is_little_endian(struct virtio_device *vdev)
273*4882a593Smuzhiyun {
274*4882a593Smuzhiyun return virtio_has_feature(vdev, VIRTIO_F_VERSION_1) ||
275*4882a593Smuzhiyun virtio_legacy_is_little_endian();
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun /* Memory accessors */
virtio16_to_cpu(struct virtio_device * vdev,__virtio16 val)279*4882a593Smuzhiyun static inline u16 virtio16_to_cpu(struct virtio_device *vdev, __virtio16 val)
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun return __virtio16_to_cpu(virtio_is_little_endian(vdev), val);
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun
cpu_to_virtio16(struct virtio_device * vdev,u16 val)284*4882a593Smuzhiyun static inline __virtio16 cpu_to_virtio16(struct virtio_device *vdev, u16 val)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun return __cpu_to_virtio16(virtio_is_little_endian(vdev), val);
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun
virtio32_to_cpu(struct virtio_device * vdev,__virtio32 val)289*4882a593Smuzhiyun static inline u32 virtio32_to_cpu(struct virtio_device *vdev, __virtio32 val)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun return __virtio32_to_cpu(virtio_is_little_endian(vdev), val);
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun
cpu_to_virtio32(struct virtio_device * vdev,u32 val)294*4882a593Smuzhiyun static inline __virtio32 cpu_to_virtio32(struct virtio_device *vdev, u32 val)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun return __cpu_to_virtio32(virtio_is_little_endian(vdev), val);
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun
virtio64_to_cpu(struct virtio_device * vdev,__virtio64 val)299*4882a593Smuzhiyun static inline u64 virtio64_to_cpu(struct virtio_device *vdev, __virtio64 val)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun return __virtio64_to_cpu(virtio_is_little_endian(vdev), val);
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun
cpu_to_virtio64(struct virtio_device * vdev,u64 val)304*4882a593Smuzhiyun static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val)
305*4882a593Smuzhiyun {
306*4882a593Smuzhiyun return __cpu_to_virtio64(virtio_is_little_endian(vdev), val);
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun #define virtio_to_cpu(vdev, x) \
310*4882a593Smuzhiyun _Generic((x), \
311*4882a593Smuzhiyun __u8: (x), \
312*4882a593Smuzhiyun __virtio16: virtio16_to_cpu((vdev), (x)), \
313*4882a593Smuzhiyun __virtio32: virtio32_to_cpu((vdev), (x)), \
314*4882a593Smuzhiyun __virtio64: virtio64_to_cpu((vdev), (x)) \
315*4882a593Smuzhiyun )
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun #define cpu_to_virtio(vdev, x, m) \
318*4882a593Smuzhiyun _Generic((m), \
319*4882a593Smuzhiyun __u8: (x), \
320*4882a593Smuzhiyun __virtio16: cpu_to_virtio16((vdev), (x)), \
321*4882a593Smuzhiyun __virtio32: cpu_to_virtio32((vdev), (x)), \
322*4882a593Smuzhiyun __virtio64: cpu_to_virtio64((vdev), (x)) \
323*4882a593Smuzhiyun )
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun #define __virtio_native_type(structname, member) \
326*4882a593Smuzhiyun typeof(virtio_to_cpu(NULL, ((structname*)0)->member))
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun /* Config space accessors. */
329*4882a593Smuzhiyun #define virtio_cread(vdev, structname, member, ptr) \
330*4882a593Smuzhiyun do { \
331*4882a593Smuzhiyun typeof(((structname*)0)->member) virtio_cread_v; \
332*4882a593Smuzhiyun \
333*4882a593Smuzhiyun might_sleep(); \
334*4882a593Smuzhiyun /* Sanity check: must match the member's type */ \
335*4882a593Smuzhiyun typecheck(typeof(virtio_to_cpu((vdev), virtio_cread_v)), *(ptr)); \
336*4882a593Smuzhiyun \
337*4882a593Smuzhiyun switch (sizeof(virtio_cread_v)) { \
338*4882a593Smuzhiyun case 1: \
339*4882a593Smuzhiyun case 2: \
340*4882a593Smuzhiyun case 4: \
341*4882a593Smuzhiyun vdev->config->get((vdev), \
342*4882a593Smuzhiyun offsetof(structname, member), \
343*4882a593Smuzhiyun &virtio_cread_v, \
344*4882a593Smuzhiyun sizeof(virtio_cread_v)); \
345*4882a593Smuzhiyun break; \
346*4882a593Smuzhiyun default: \
347*4882a593Smuzhiyun __virtio_cread_many((vdev), \
348*4882a593Smuzhiyun offsetof(structname, member), \
349*4882a593Smuzhiyun &virtio_cread_v, \
350*4882a593Smuzhiyun 1, \
351*4882a593Smuzhiyun sizeof(virtio_cread_v)); \
352*4882a593Smuzhiyun break; \
353*4882a593Smuzhiyun } \
354*4882a593Smuzhiyun *(ptr) = virtio_to_cpu(vdev, virtio_cread_v); \
355*4882a593Smuzhiyun } while(0)
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun /* Config space accessors. */
358*4882a593Smuzhiyun #define virtio_cwrite(vdev, structname, member, ptr) \
359*4882a593Smuzhiyun do { \
360*4882a593Smuzhiyun typeof(((structname*)0)->member) virtio_cwrite_v = \
361*4882a593Smuzhiyun cpu_to_virtio(vdev, *(ptr), ((structname*)0)->member); \
362*4882a593Smuzhiyun \
363*4882a593Smuzhiyun might_sleep(); \
364*4882a593Smuzhiyun /* Sanity check: must match the member's type */ \
365*4882a593Smuzhiyun typecheck(typeof(virtio_to_cpu((vdev), virtio_cwrite_v)), *(ptr)); \
366*4882a593Smuzhiyun \
367*4882a593Smuzhiyun vdev->config->set((vdev), offsetof(structname, member), \
368*4882a593Smuzhiyun &virtio_cwrite_v, \
369*4882a593Smuzhiyun sizeof(virtio_cwrite_v)); \
370*4882a593Smuzhiyun } while(0)
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun /*
373*4882a593Smuzhiyun * Nothing virtio-specific about these, but let's worry about generalizing
374*4882a593Smuzhiyun * these later.
375*4882a593Smuzhiyun */
376*4882a593Smuzhiyun #define virtio_le_to_cpu(x) \
377*4882a593Smuzhiyun _Generic((x), \
378*4882a593Smuzhiyun __u8: (u8)(x), \
379*4882a593Smuzhiyun __le16: (u16)le16_to_cpu(x), \
380*4882a593Smuzhiyun __le32: (u32)le32_to_cpu(x), \
381*4882a593Smuzhiyun __le64: (u64)le64_to_cpu(x) \
382*4882a593Smuzhiyun )
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun #define virtio_cpu_to_le(x, m) \
385*4882a593Smuzhiyun _Generic((m), \
386*4882a593Smuzhiyun __u8: (x), \
387*4882a593Smuzhiyun __le16: cpu_to_le16(x), \
388*4882a593Smuzhiyun __le32: cpu_to_le32(x), \
389*4882a593Smuzhiyun __le64: cpu_to_le64(x) \
390*4882a593Smuzhiyun )
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun /* LE (e.g. modern) Config space accessors. */
393*4882a593Smuzhiyun #define virtio_cread_le(vdev, structname, member, ptr) \
394*4882a593Smuzhiyun do { \
395*4882a593Smuzhiyun typeof(((structname*)0)->member) virtio_cread_v; \
396*4882a593Smuzhiyun \
397*4882a593Smuzhiyun might_sleep(); \
398*4882a593Smuzhiyun /* Sanity check: must match the member's type */ \
399*4882a593Smuzhiyun typecheck(typeof(virtio_le_to_cpu(virtio_cread_v)), *(ptr)); \
400*4882a593Smuzhiyun \
401*4882a593Smuzhiyun switch (sizeof(virtio_cread_v)) { \
402*4882a593Smuzhiyun case 1: \
403*4882a593Smuzhiyun case 2: \
404*4882a593Smuzhiyun case 4: \
405*4882a593Smuzhiyun vdev->config->get((vdev), \
406*4882a593Smuzhiyun offsetof(structname, member), \
407*4882a593Smuzhiyun &virtio_cread_v, \
408*4882a593Smuzhiyun sizeof(virtio_cread_v)); \
409*4882a593Smuzhiyun break; \
410*4882a593Smuzhiyun default: \
411*4882a593Smuzhiyun __virtio_cread_many((vdev), \
412*4882a593Smuzhiyun offsetof(structname, member), \
413*4882a593Smuzhiyun &virtio_cread_v, \
414*4882a593Smuzhiyun 1, \
415*4882a593Smuzhiyun sizeof(virtio_cread_v)); \
416*4882a593Smuzhiyun break; \
417*4882a593Smuzhiyun } \
418*4882a593Smuzhiyun *(ptr) = virtio_le_to_cpu(virtio_cread_v); \
419*4882a593Smuzhiyun } while(0)
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun #define virtio_cwrite_le(vdev, structname, member, ptr) \
422*4882a593Smuzhiyun do { \
423*4882a593Smuzhiyun typeof(((structname*)0)->member) virtio_cwrite_v = \
424*4882a593Smuzhiyun virtio_cpu_to_le(*(ptr), ((structname*)0)->member); \
425*4882a593Smuzhiyun \
426*4882a593Smuzhiyun might_sleep(); \
427*4882a593Smuzhiyun /* Sanity check: must match the member's type */ \
428*4882a593Smuzhiyun typecheck(typeof(virtio_le_to_cpu(virtio_cwrite_v)), *(ptr)); \
429*4882a593Smuzhiyun \
430*4882a593Smuzhiyun vdev->config->set((vdev), offsetof(structname, member), \
431*4882a593Smuzhiyun &virtio_cwrite_v, \
432*4882a593Smuzhiyun sizeof(virtio_cwrite_v)); \
433*4882a593Smuzhiyun } while(0)
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun /* Read @count fields, @bytes each. */
__virtio_cread_many(struct virtio_device * vdev,unsigned int offset,void * buf,size_t count,size_t bytes)437*4882a593Smuzhiyun static inline void __virtio_cread_many(struct virtio_device *vdev,
438*4882a593Smuzhiyun unsigned int offset,
439*4882a593Smuzhiyun void *buf, size_t count, size_t bytes)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun u32 old, gen = vdev->config->generation ?
442*4882a593Smuzhiyun vdev->config->generation(vdev) : 0;
443*4882a593Smuzhiyun int i;
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun might_sleep();
446*4882a593Smuzhiyun do {
447*4882a593Smuzhiyun old = gen;
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun for (i = 0; i < count; i++)
450*4882a593Smuzhiyun vdev->config->get(vdev, offset + bytes * i,
451*4882a593Smuzhiyun buf + i * bytes, bytes);
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun gen = vdev->config->generation ?
454*4882a593Smuzhiyun vdev->config->generation(vdev) : 0;
455*4882a593Smuzhiyun } while (gen != old);
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun
virtio_cread_bytes(struct virtio_device * vdev,unsigned int offset,void * buf,size_t len)458*4882a593Smuzhiyun static inline void virtio_cread_bytes(struct virtio_device *vdev,
459*4882a593Smuzhiyun unsigned int offset,
460*4882a593Smuzhiyun void *buf, size_t len)
461*4882a593Smuzhiyun {
462*4882a593Smuzhiyun __virtio_cread_many(vdev, offset, buf, len, 1);
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun
virtio_cread8(struct virtio_device * vdev,unsigned int offset)465*4882a593Smuzhiyun static inline u8 virtio_cread8(struct virtio_device *vdev, unsigned int offset)
466*4882a593Smuzhiyun {
467*4882a593Smuzhiyun u8 ret;
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun might_sleep();
470*4882a593Smuzhiyun vdev->config->get(vdev, offset, &ret, sizeof(ret));
471*4882a593Smuzhiyun return ret;
472*4882a593Smuzhiyun }
473*4882a593Smuzhiyun
virtio_cwrite8(struct virtio_device * vdev,unsigned int offset,u8 val)474*4882a593Smuzhiyun static inline void virtio_cwrite8(struct virtio_device *vdev,
475*4882a593Smuzhiyun unsigned int offset, u8 val)
476*4882a593Smuzhiyun {
477*4882a593Smuzhiyun might_sleep();
478*4882a593Smuzhiyun vdev->config->set(vdev, offset, &val, sizeof(val));
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun
virtio_cread16(struct virtio_device * vdev,unsigned int offset)481*4882a593Smuzhiyun static inline u16 virtio_cread16(struct virtio_device *vdev,
482*4882a593Smuzhiyun unsigned int offset)
483*4882a593Smuzhiyun {
484*4882a593Smuzhiyun __virtio16 ret;
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun might_sleep();
487*4882a593Smuzhiyun vdev->config->get(vdev, offset, &ret, sizeof(ret));
488*4882a593Smuzhiyun return virtio16_to_cpu(vdev, ret);
489*4882a593Smuzhiyun }
490*4882a593Smuzhiyun
virtio_cwrite16(struct virtio_device * vdev,unsigned int offset,u16 val)491*4882a593Smuzhiyun static inline void virtio_cwrite16(struct virtio_device *vdev,
492*4882a593Smuzhiyun unsigned int offset, u16 val)
493*4882a593Smuzhiyun {
494*4882a593Smuzhiyun __virtio16 v;
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun might_sleep();
497*4882a593Smuzhiyun v = cpu_to_virtio16(vdev, val);
498*4882a593Smuzhiyun vdev->config->set(vdev, offset, &v, sizeof(v));
499*4882a593Smuzhiyun }
500*4882a593Smuzhiyun
virtio_cread32(struct virtio_device * vdev,unsigned int offset)501*4882a593Smuzhiyun static inline u32 virtio_cread32(struct virtio_device *vdev,
502*4882a593Smuzhiyun unsigned int offset)
503*4882a593Smuzhiyun {
504*4882a593Smuzhiyun __virtio32 ret;
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun might_sleep();
507*4882a593Smuzhiyun vdev->config->get(vdev, offset, &ret, sizeof(ret));
508*4882a593Smuzhiyun return virtio32_to_cpu(vdev, ret);
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun
virtio_cwrite32(struct virtio_device * vdev,unsigned int offset,u32 val)511*4882a593Smuzhiyun static inline void virtio_cwrite32(struct virtio_device *vdev,
512*4882a593Smuzhiyun unsigned int offset, u32 val)
513*4882a593Smuzhiyun {
514*4882a593Smuzhiyun __virtio32 v;
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun might_sleep();
517*4882a593Smuzhiyun v = cpu_to_virtio32(vdev, val);
518*4882a593Smuzhiyun vdev->config->set(vdev, offset, &v, sizeof(v));
519*4882a593Smuzhiyun }
520*4882a593Smuzhiyun
virtio_cread64(struct virtio_device * vdev,unsigned int offset)521*4882a593Smuzhiyun static inline u64 virtio_cread64(struct virtio_device *vdev,
522*4882a593Smuzhiyun unsigned int offset)
523*4882a593Smuzhiyun {
524*4882a593Smuzhiyun __virtio64 ret;
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun __virtio_cread_many(vdev, offset, &ret, 1, sizeof(ret));
527*4882a593Smuzhiyun return virtio64_to_cpu(vdev, ret);
528*4882a593Smuzhiyun }
529*4882a593Smuzhiyun
virtio_cwrite64(struct virtio_device * vdev,unsigned int offset,u64 val)530*4882a593Smuzhiyun static inline void virtio_cwrite64(struct virtio_device *vdev,
531*4882a593Smuzhiyun unsigned int offset, u64 val)
532*4882a593Smuzhiyun {
533*4882a593Smuzhiyun __virtio64 v;
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun might_sleep();
536*4882a593Smuzhiyun v = cpu_to_virtio64(vdev, val);
537*4882a593Smuzhiyun vdev->config->set(vdev, offset, &v, sizeof(v));
538*4882a593Smuzhiyun }
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun /* Conditional config space accessors. */
541*4882a593Smuzhiyun #define virtio_cread_feature(vdev, fbit, structname, member, ptr) \
542*4882a593Smuzhiyun ({ \
543*4882a593Smuzhiyun int _r = 0; \
544*4882a593Smuzhiyun if (!virtio_has_feature(vdev, fbit)) \
545*4882a593Smuzhiyun _r = -ENOENT; \
546*4882a593Smuzhiyun else \
547*4882a593Smuzhiyun virtio_cread((vdev), structname, member, ptr); \
548*4882a593Smuzhiyun _r; \
549*4882a593Smuzhiyun })
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun /* Conditional config space accessors. */
552*4882a593Smuzhiyun #define virtio_cread_le_feature(vdev, fbit, structname, member, ptr) \
553*4882a593Smuzhiyun ({ \
554*4882a593Smuzhiyun int _r = 0; \
555*4882a593Smuzhiyun if (!virtio_has_feature(vdev, fbit)) \
556*4882a593Smuzhiyun _r = -ENOENT; \
557*4882a593Smuzhiyun else \
558*4882a593Smuzhiyun virtio_cread_le((vdev), structname, member, ptr); \
559*4882a593Smuzhiyun _r; \
560*4882a593Smuzhiyun })
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun #ifdef CONFIG_ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS
563*4882a593Smuzhiyun int arch_has_restricted_virtio_memory_access(void);
564*4882a593Smuzhiyun #else
arch_has_restricted_virtio_memory_access(void)565*4882a593Smuzhiyun static inline int arch_has_restricted_virtio_memory_access(void)
566*4882a593Smuzhiyun {
567*4882a593Smuzhiyun return 0;
568*4882a593Smuzhiyun }
569*4882a593Smuzhiyun #endif /* CONFIG_ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS */
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun #endif /* _LINUX_VIRTIO_CONFIG_H */
572