1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_VIRTIO_CONFIG_H
3#define _LINUX_VIRTIO_CONFIG_H
4
5#include <linux/err.h>
6#include <linux/bug.h>
7#include <linux/virtio.h>
8#include <linux/virtio_byteorder.h>
9#include <linux/compiler_types.h>
10#include <uapi/linux/virtio_config.h>
11
12struct irq_affinity;
13
14struct virtio_shm_region {
15 u64 addr;
16 u64 len;
17};
18
19typedef void vq_callback_t(struct virtqueue *);
20
21/**
22 * struct virtio_config_ops - operations for configuring a virtio device
23 * Note: Do not assume that a transport implements all of the operations
24 * getting/setting a value as a simple read/write! Generally speaking,
25 * any of @get/@set, @get_status/@set_status, or @get_features/
26 * @finalize_features are NOT safe to be called from an atomic
27 * context.
28 * @get: read the value of a configuration field
29 * vdev: the virtio_device
30 * offset: the offset of the configuration field
31 * buf: the buffer to write the field value into.
32 * len: the length of the buffer
33 * @set: write the value of a configuration field
34 * vdev: the virtio_device
35 * offset: the offset of the configuration field
36 * buf: the buffer to read the field value from.
37 * len: the length of the buffer
38 * @generation: config generation counter (optional)
39 * vdev: the virtio_device
40 * Returns the config generation counter
41 * @get_status: read the status byte
42 * vdev: the virtio_device
43 * Returns the status byte
44 * @set_status: write the status byte
45 * vdev: the virtio_device
46 * status: the new status byte
47 * @reset: reset the device
48 * vdev: the virtio device
49 * After this, status and feature negotiation must be done again
50 * Device must not be reset from its vq/config callbacks, or in
51 * parallel with being added/removed.
52 * @find_vqs: find virtqueues and instantiate them.
53 * vdev: the virtio_device
54 * nvqs: the number of virtqueues to find
55 * vqs: on success, includes new virtqueues
56 * callbacks: array of callbacks, for each virtqueue
57 * include a NULL entry for vqs that do not need a callback
58 * names: array of virtqueue names (mainly for debugging)
59 * include a NULL entry for vqs unused by driver
60 * Returns 0 on success or error status
61 * @del_vqs: free virtqueues found by find_vqs().
62 * @synchronize_cbs: synchronize with the virtqueue callbacks (optional)
63 * The function guarantees that all memory operations on the
64 * queue before it are visible to the vring_interrupt() that is
65 * called after it.
66 * vdev: the virtio_device
67 * @get_features: get the array of feature bits for this device.
68 * vdev: the virtio_device
69 * Returns the first 64 feature bits (all we currently need).
70 * @finalize_features: confirm what device features we'll be using.
71 * vdev: the virtio_device
72 * This sends the driver feature bits to the device: it can change
73 * the dev->feature bits if it wants.
74 * Note that despite the name this can be called any number of
75 * times.
76 * Returns 0 on success or error status
77 * @bus_name: return the bus name associated with the device (optional)
78 * vdev: the virtio_device
79 * This returns a pointer to the bus name a la pci_name from which
80 * the caller can then copy.
81 * @set_vq_affinity: set the affinity for a virtqueue (optional).
82 * @get_vq_affinity: get the affinity for a virtqueue (optional).
83 * @get_shm_region: get a shared memory region based on the index.
84 * @disable_vq_and_reset: reset a queue individually (optional).
85 * vq: the virtqueue
86 * Returns 0 on success or error status
87 * disable_vq_and_reset will guarantee that the callbacks are disabled and
88 * synchronized.
89 * Except for the callback, the caller should guarantee that the vring is
90 * not accessed by any functions of virtqueue.
91 * @enable_vq_after_reset: enable a reset queue
92 * vq: the virtqueue
93 * Returns 0 on success or error status
94 * If disable_vq_and_reset is set, then enable_vq_after_reset must also be
95 * set.
96 * @create_avq: create admin virtqueue resource.
97 * @destroy_avq: destroy admin virtqueue resource.
98 */
99struct virtio_config_ops {
100 void (*get)(struct virtio_device *vdev, unsigned offset,
101 void *buf, unsigned len);
102 void (*set)(struct virtio_device *vdev, unsigned offset,
103 const void *buf, unsigned len);
104 u32 (*generation)(struct virtio_device *vdev);
105 u8 (*get_status)(struct virtio_device *vdev);
106 void (*set_status)(struct virtio_device *vdev, u8 status);
107 void (*reset)(struct virtio_device *vdev);
108 int (*find_vqs)(struct virtio_device *, unsigned nvqs,
109 struct virtqueue *vqs[], vq_callback_t *callbacks[],
110 const char * const names[], const bool *ctx,
111 struct irq_affinity *desc);
112 void (*del_vqs)(struct virtio_device *);
113 void (*synchronize_cbs)(struct virtio_device *);
114 u64 (*get_features)(struct virtio_device *vdev);
115 int (*finalize_features)(struct virtio_device *vdev);
116 const char *(*bus_name)(struct virtio_device *vdev);
117 int (*set_vq_affinity)(struct virtqueue *vq,
118 const struct cpumask *cpu_mask);
119 const struct cpumask *(*get_vq_affinity)(struct virtio_device *vdev,
120 int index);
121 bool (*get_shm_region)(struct virtio_device *vdev,
122 struct virtio_shm_region *region, u8 id);
123 int (*disable_vq_and_reset)(struct virtqueue *vq);
124 int (*enable_vq_after_reset)(struct virtqueue *vq);
125 int (*create_avq)(struct virtio_device *vdev);
126 void (*destroy_avq)(struct virtio_device *vdev);
127};
128
129/* If driver didn't advertise the feature, it will never appear. */
130void virtio_check_driver_offered_feature(const struct virtio_device *vdev,
131 unsigned int fbit);
132
133/**
134 * __virtio_test_bit - helper to test feature bits. For use by transports.
135 * Devices should normally use virtio_has_feature,
136 * which includes more checks.
137 * @vdev: the device
138 * @fbit: the feature bit
139 */
140static inline bool __virtio_test_bit(const struct virtio_device *vdev,
141 unsigned int fbit)
142{
143 /* Did you forget to fix assumptions on max features? */
144 if (__builtin_constant_p(fbit))
145 BUILD_BUG_ON(fbit >= 64);
146 else
147 BUG_ON(fbit >= 64);
148
149 return vdev->features & BIT_ULL(fbit);
150}
151
152/**
153 * __virtio_set_bit - helper to set feature bits. For use by transports.
154 * @vdev: the device
155 * @fbit: the feature bit
156 */
157static inline void __virtio_set_bit(struct virtio_device *vdev,
158 unsigned int fbit)
159{
160 /* Did you forget to fix assumptions on max features? */
161 if (__builtin_constant_p(fbit))
162 BUILD_BUG_ON(fbit >= 64);
163 else
164 BUG_ON(fbit >= 64);
165
166 vdev->features |= BIT_ULL(fbit);
167}
168
169/**
170 * __virtio_clear_bit - helper to clear feature bits. For use by transports.
171 * @vdev: the device
172 * @fbit: the feature bit
173 */
174static inline void __virtio_clear_bit(struct virtio_device *vdev,
175 unsigned int fbit)
176{
177 /* Did you forget to fix assumptions on max features? */
178 if (__builtin_constant_p(fbit))
179 BUILD_BUG_ON(fbit >= 64);
180 else
181 BUG_ON(fbit >= 64);
182
183 vdev->features &= ~BIT_ULL(fbit);
184}
185
186/**
187 * virtio_has_feature - helper to determine if this device has this feature.
188 * @vdev: the device
189 * @fbit: the feature bit
190 */
191static inline bool virtio_has_feature(const struct virtio_device *vdev,
192 unsigned int fbit)
193{
194 if (fbit < VIRTIO_TRANSPORT_F_START)
195 virtio_check_driver_offered_feature(vdev, fbit);
196
197 return __virtio_test_bit(vdev, fbit);
198}
199
200/**
201 * virtio_has_dma_quirk - determine whether this device has the DMA quirk
202 * @vdev: the device
203 */
204static inline bool virtio_has_dma_quirk(const struct virtio_device *vdev)
205{
206 /*
207 * Note the reverse polarity of the quirk feature (compared to most
208 * other features), this is for compatibility with legacy systems.
209 */
210 return !virtio_has_feature(vdev, VIRTIO_F_ACCESS_PLATFORM);
211}
212
213static inline
214struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev,
215 vq_callback_t *c, const char *n)
216{
217 vq_callback_t *callbacks[] = { c };
218 const char *names[] = { n };
219 struct virtqueue *vq;
220 int err = vdev->config->find_vqs(vdev, 1, &vq, callbacks, names, NULL,
221 NULL);
222 if (err < 0)
223 return ERR_PTR(error: err);
224 return vq;
225}
226
227static inline
228int virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs,
229 struct virtqueue *vqs[], vq_callback_t *callbacks[],
230 const char * const names[],
231 struct irq_affinity *desc)
232{
233 return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, NULL, desc);
234}
235
236static inline
237int virtio_find_vqs_ctx(struct virtio_device *vdev, unsigned nvqs,
238 struct virtqueue *vqs[], vq_callback_t *callbacks[],
239 const char * const names[], const bool *ctx,
240 struct irq_affinity *desc)
241{
242 return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, ctx,
243 desc);
244}
245
246/**
247 * virtio_synchronize_cbs - synchronize with virtqueue callbacks
248 * @dev: the virtio device
249 */
250static inline
251void virtio_synchronize_cbs(struct virtio_device *dev)
252{
253 if (dev->config->synchronize_cbs) {
254 dev->config->synchronize_cbs(dev);
255 } else {
256 /*
257 * A best effort fallback to synchronize with
258 * interrupts, preemption and softirq disabled
259 * regions. See comment above synchronize_rcu().
260 */
261 synchronize_rcu();
262 }
263}
264
265/**
266 * virtio_device_ready - enable vq use in probe function
267 * @dev: the virtio device
268 *
269 * Driver must call this to use vqs in the probe function.
270 *
271 * Note: vqs are enabled automatically after probe returns.
272 */
273static inline
274void virtio_device_ready(struct virtio_device *dev)
275{
276 unsigned status = dev->config->get_status(dev);
277
278 WARN_ON(status & VIRTIO_CONFIG_S_DRIVER_OK);
279
280#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
281 /*
282 * The virtio_synchronize_cbs() makes sure vring_interrupt()
283 * will see the driver specific setup if it sees vq->broken
284 * as false (even if the notifications come before DRIVER_OK).
285 */
286 virtio_synchronize_cbs(dev);
287 __virtio_unbreak_device(dev);
288#endif
289 /*
290 * The transport should ensure the visibility of vq->broken
291 * before setting DRIVER_OK. See the comments for the transport
292 * specific set_status() method.
293 *
294 * A well behaved device will only notify a virtqueue after
295 * DRIVER_OK, this means the device should "see" the coherenct
296 * memory write that set vq->broken as false which is done by
297 * the driver when it sees DRIVER_OK, then the following
298 * driver's vring_interrupt() will see vq->broken as false so
299 * we won't lose any notification.
300 */
301 dev->config->set_status(dev, status | VIRTIO_CONFIG_S_DRIVER_OK);
302}
303
304static inline
305const char *virtio_bus_name(struct virtio_device *vdev)
306{
307 if (!vdev->config->bus_name)
308 return "virtio";
309 return vdev->config->bus_name(vdev);
310}
311
312/**
313 * virtqueue_set_affinity - setting affinity for a virtqueue
314 * @vq: the virtqueue
315 * @cpu_mask: the cpu mask
316 *
317 * Pay attention the function are best-effort: the affinity hint may not be set
318 * due to config support, irq type and sharing.
319 *
320 */
321static inline
322int virtqueue_set_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask)
323{
324 struct virtio_device *vdev = vq->vdev;
325 if (vdev->config->set_vq_affinity)
326 return vdev->config->set_vq_affinity(vq, cpu_mask);
327 return 0;
328}
329
330static inline
331bool virtio_get_shm_region(struct virtio_device *vdev,
332 struct virtio_shm_region *region, u8 id)
333{
334 if (!vdev->config->get_shm_region)
335 return false;
336 return vdev->config->get_shm_region(vdev, region, id);
337}
338
339static inline bool virtio_is_little_endian(struct virtio_device *vdev)
340{
341 return virtio_has_feature(vdev, VIRTIO_F_VERSION_1) ||
342 virtio_legacy_is_little_endian();
343}
344
345/* Memory accessors */
346static inline u16 virtio16_to_cpu(struct virtio_device *vdev, __virtio16 val)
347{
348 return __virtio16_to_cpu(little_endian: virtio_is_little_endian(vdev), val);
349}
350
351static inline __virtio16 cpu_to_virtio16(struct virtio_device *vdev, u16 val)
352{
353 return __cpu_to_virtio16(little_endian: virtio_is_little_endian(vdev), val);
354}
355
356static inline u32 virtio32_to_cpu(struct virtio_device *vdev, __virtio32 val)
357{
358 return __virtio32_to_cpu(little_endian: virtio_is_little_endian(vdev), val);
359}
360
361static inline __virtio32 cpu_to_virtio32(struct virtio_device *vdev, u32 val)
362{
363 return __cpu_to_virtio32(little_endian: virtio_is_little_endian(vdev), val);
364}
365
366static inline u64 virtio64_to_cpu(struct virtio_device *vdev, __virtio64 val)
367{
368 return __virtio64_to_cpu(little_endian: virtio_is_little_endian(vdev), val);
369}
370
371static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val)
372{
373 return __cpu_to_virtio64(little_endian: virtio_is_little_endian(vdev), val);
374}
375
376#define virtio_to_cpu(vdev, x) \
377 _Generic((x), \
378 __u8: (x), \
379 __virtio16: virtio16_to_cpu((vdev), (x)), \
380 __virtio32: virtio32_to_cpu((vdev), (x)), \
381 __virtio64: virtio64_to_cpu((vdev), (x)) \
382 )
383
384#define cpu_to_virtio(vdev, x, m) \
385 _Generic((m), \
386 __u8: (x), \
387 __virtio16: cpu_to_virtio16((vdev), (x)), \
388 __virtio32: cpu_to_virtio32((vdev), (x)), \
389 __virtio64: cpu_to_virtio64((vdev), (x)) \
390 )
391
392#define __virtio_native_type(structname, member) \
393 typeof(virtio_to_cpu(NULL, ((structname*)0)->member))
394
395/* Config space accessors. */
396#define virtio_cread(vdev, structname, member, ptr) \
397 do { \
398 typeof(((structname*)0)->member) virtio_cread_v; \
399 \
400 might_sleep(); \
401 /* Sanity check: must match the member's type */ \
402 typecheck(typeof(virtio_to_cpu((vdev), virtio_cread_v)), *(ptr)); \
403 \
404 switch (sizeof(virtio_cread_v)) { \
405 case 1: \
406 case 2: \
407 case 4: \
408 vdev->config->get((vdev), \
409 offsetof(structname, member), \
410 &virtio_cread_v, \
411 sizeof(virtio_cread_v)); \
412 break; \
413 default: \
414 __virtio_cread_many((vdev), \
415 offsetof(structname, member), \
416 &virtio_cread_v, \
417 1, \
418 sizeof(virtio_cread_v)); \
419 break; \
420 } \
421 *(ptr) = virtio_to_cpu(vdev, virtio_cread_v); \
422 } while(0)
423
424/* Config space accessors. */
425#define virtio_cwrite(vdev, structname, member, ptr) \
426 do { \
427 typeof(((structname*)0)->member) virtio_cwrite_v = \
428 cpu_to_virtio(vdev, *(ptr), ((structname*)0)->member); \
429 \
430 might_sleep(); \
431 /* Sanity check: must match the member's type */ \
432 typecheck(typeof(virtio_to_cpu((vdev), virtio_cwrite_v)), *(ptr)); \
433 \
434 vdev->config->set((vdev), offsetof(structname, member), \
435 &virtio_cwrite_v, \
436 sizeof(virtio_cwrite_v)); \
437 } while(0)
438
439/*
440 * Nothing virtio-specific about these, but let's worry about generalizing
441 * these later.
442 */
443#define virtio_le_to_cpu(x) \
444 _Generic((x), \
445 __u8: (u8)(x), \
446 __le16: (u16)le16_to_cpu(x), \
447 __le32: (u32)le32_to_cpu(x), \
448 __le64: (u64)le64_to_cpu(x) \
449 )
450
451#define virtio_cpu_to_le(x, m) \
452 _Generic((m), \
453 __u8: (x), \
454 __le16: cpu_to_le16(x), \
455 __le32: cpu_to_le32(x), \
456 __le64: cpu_to_le64(x) \
457 )
458
459/* LE (e.g. modern) Config space accessors. */
460#define virtio_cread_le(vdev, structname, member, ptr) \
461 do { \
462 typeof(((structname*)0)->member) virtio_cread_v; \
463 \
464 might_sleep(); \
465 /* Sanity check: must match the member's type */ \
466 typecheck(typeof(virtio_le_to_cpu(virtio_cread_v)), *(ptr)); \
467 \
468 switch (sizeof(virtio_cread_v)) { \
469 case 1: \
470 case 2: \
471 case 4: \
472 vdev->config->get((vdev), \
473 offsetof(structname, member), \
474 &virtio_cread_v, \
475 sizeof(virtio_cread_v)); \
476 break; \
477 default: \
478 __virtio_cread_many((vdev), \
479 offsetof(structname, member), \
480 &virtio_cread_v, \
481 1, \
482 sizeof(virtio_cread_v)); \
483 break; \
484 } \
485 *(ptr) = virtio_le_to_cpu(virtio_cread_v); \
486 } while(0)
487
488#define virtio_cwrite_le(vdev, structname, member, ptr) \
489 do { \
490 typeof(((structname*)0)->member) virtio_cwrite_v = \
491 virtio_cpu_to_le(*(ptr), ((structname*)0)->member); \
492 \
493 might_sleep(); \
494 /* Sanity check: must match the member's type */ \
495 typecheck(typeof(virtio_le_to_cpu(virtio_cwrite_v)), *(ptr)); \
496 \
497 vdev->config->set((vdev), offsetof(structname, member), \
498 &virtio_cwrite_v, \
499 sizeof(virtio_cwrite_v)); \
500 } while(0)
501
502
503/* Read @count fields, @bytes each. */
504static inline void __virtio_cread_many(struct virtio_device *vdev,
505 unsigned int offset,
506 void *buf, size_t count, size_t bytes)
507{
508 u32 old, gen = vdev->config->generation ?
509 vdev->config->generation(vdev) : 0;
510 int i;
511
512 might_sleep();
513 do {
514 old = gen;
515
516 for (i = 0; i < count; i++)
517 vdev->config->get(vdev, offset + bytes * i,
518 buf + i * bytes, bytes);
519
520 gen = vdev->config->generation ?
521 vdev->config->generation(vdev) : 0;
522 } while (gen != old);
523}
524
525static inline void virtio_cread_bytes(struct virtio_device *vdev,
526 unsigned int offset,
527 void *buf, size_t len)
528{
529 __virtio_cread_many(vdev, offset, buf, count: len, bytes: 1);
530}
531
532static inline u8 virtio_cread8(struct virtio_device *vdev, unsigned int offset)
533{
534 u8 ret;
535
536 might_sleep();
537 vdev->config->get(vdev, offset, &ret, sizeof(ret));
538 return ret;
539}
540
541static inline void virtio_cwrite8(struct virtio_device *vdev,
542 unsigned int offset, u8 val)
543{
544 might_sleep();
545 vdev->config->set(vdev, offset, &val, sizeof(val));
546}
547
548static inline u16 virtio_cread16(struct virtio_device *vdev,
549 unsigned int offset)
550{
551 __virtio16 ret;
552
553 might_sleep();
554 vdev->config->get(vdev, offset, &ret, sizeof(ret));
555 return virtio16_to_cpu(vdev, val: ret);
556}
557
558static inline void virtio_cwrite16(struct virtio_device *vdev,
559 unsigned int offset, u16 val)
560{
561 __virtio16 v;
562
563 might_sleep();
564 v = cpu_to_virtio16(vdev, val);
565 vdev->config->set(vdev, offset, &v, sizeof(v));
566}
567
568static inline u32 virtio_cread32(struct virtio_device *vdev,
569 unsigned int offset)
570{
571 __virtio32 ret;
572
573 might_sleep();
574 vdev->config->get(vdev, offset, &ret, sizeof(ret));
575 return virtio32_to_cpu(vdev, val: ret);
576}
577
578static inline void virtio_cwrite32(struct virtio_device *vdev,
579 unsigned int offset, u32 val)
580{
581 __virtio32 v;
582
583 might_sleep();
584 v = cpu_to_virtio32(vdev, val);
585 vdev->config->set(vdev, offset, &v, sizeof(v));
586}
587
588static inline u64 virtio_cread64(struct virtio_device *vdev,
589 unsigned int offset)
590{
591 __virtio64 ret;
592
593 __virtio_cread_many(vdev, offset, buf: &ret, count: 1, bytes: sizeof(ret));
594 return virtio64_to_cpu(vdev, val: ret);
595}
596
597static inline void virtio_cwrite64(struct virtio_device *vdev,
598 unsigned int offset, u64 val)
599{
600 __virtio64 v;
601
602 might_sleep();
603 v = cpu_to_virtio64(vdev, val);
604 vdev->config->set(vdev, offset, &v, sizeof(v));
605}
606
607/* Conditional config space accessors. */
608#define virtio_cread_feature(vdev, fbit, structname, member, ptr) \
609 ({ \
610 int _r = 0; \
611 if (!virtio_has_feature(vdev, fbit)) \
612 _r = -ENOENT; \
613 else \
614 virtio_cread((vdev), structname, member, ptr); \
615 _r; \
616 })
617
618/* Conditional config space accessors. */
619#define virtio_cread_le_feature(vdev, fbit, structname, member, ptr) \
620 ({ \
621 int _r = 0; \
622 if (!virtio_has_feature(vdev, fbit)) \
623 _r = -ENOENT; \
624 else \
625 virtio_cread_le((vdev), structname, member, ptr); \
626 _r; \
627 })
628
629#endif /* _LINUX_VIRTIO_CONFIG_H */
630

source code of linux/include/linux/virtio_config.h