1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_VIRTIO_CONFIG_H
3 #define _LINUX_VIRTIO_CONFIG_H
4
5 #include <linux/err.h>
6 #include <linux/bug.h>
7 #include <linux/virtio.h>
8 #include <linux/virtio_byteorder.h>
9 #include <linux/compiler_types.h>
10 #include <uapi/linux/virtio_config.h>
11
12 struct irq_affinity;
13
14 struct virtio_shm_region {
15 u64 addr;
16 u64 len;
17 };
18
19 /**
20 * virtio_config_ops - operations for configuring a virtio device
21 * Note: Do not assume that a transport implements all of the operations
22 * getting/setting a value as a simple read/write! Generally speaking,
23 * any of @get/@set, @get_status/@set_status, or @get_features/
24 * @finalize_features are NOT safe to be called from an atomic
25 * context.
26 * @get: read the value of a configuration field
27 * vdev: the virtio_device
28 * offset: the offset of the configuration field
29 * buf: the buffer to write the field value into.
30 * len: the length of the buffer
31 * @set: write the value of a configuration field
32 * vdev: the virtio_device
33 * offset: the offset of the configuration field
34 * buf: the buffer to read the field value from.
35 * len: the length of the buffer
36 * @generation: config generation counter (optional)
37 * vdev: the virtio_device
38 * Returns the config generation counter
39 * @get_status: read the status byte
40 * vdev: the virtio_device
41 * Returns the status byte
42 * @set_status: write the status byte
43 * vdev: the virtio_device
44 * status: the new status byte
45 * @reset: reset the device
46 * vdev: the virtio device
47 * After this, status and feature negotiation must be done again
48 * Device must not be reset from its vq/config callbacks, or in
49 * parallel with being added/removed.
50 * @find_vqs: find virtqueues and instantiate them.
51 * vdev: the virtio_device
52 * nvqs: the number of virtqueues to find
53 * vqs: on success, includes new virtqueues
54 * callbacks: array of callbacks, for each virtqueue
55 * include a NULL entry for vqs that do not need a callback
56 * names: array of virtqueue names (mainly for debugging)
57 * include a NULL entry for vqs unused by driver
58 * Returns 0 on success or error status
59 * @del_vqs: free virtqueues found by find_vqs().
60 * @get_features: get the array of feature bits for this device.
61 * vdev: the virtio_device
62 * Returns the first 64 feature bits (all we currently need).
63 * @finalize_features: confirm what device features we'll be using.
64 * vdev: the virtio_device
65 * This sends the driver feature bits to the device: it can change
66 * the dev->feature bits if it wants.
67 * Note: despite the name this can be called any number of times.
68 * Returns 0 on success or error status
69 * @bus_name: return the bus name associated with the device (optional)
70 * vdev: the virtio_device
71 * This returns a pointer to the bus name a la pci_name from which
72 * the caller can then copy.
73 * @set_vq_affinity: set the affinity for a virtqueue (optional).
74 * @get_vq_affinity: get the affinity for a virtqueue (optional).
75 * @get_shm_region: get a shared memory region based on the index.
76 */
77 typedef void vq_callback_t(struct virtqueue *);
78 struct virtio_config_ops {
79 void (*get)(struct virtio_device *vdev, unsigned offset,
80 void *buf, unsigned len);
81 void (*set)(struct virtio_device *vdev, unsigned offset,
82 const void *buf, unsigned len);
83 u32 (*generation)(struct virtio_device *vdev);
84 u8 (*get_status)(struct virtio_device *vdev);
85 void (*set_status)(struct virtio_device *vdev, u8 status);
86 void (*reset)(struct virtio_device *vdev);
87 int (*find_vqs)(struct virtio_device *, unsigned nvqs,
88 struct virtqueue *vqs[], vq_callback_t *callbacks[],
89 const char * const names[], const bool *ctx,
90 struct irq_affinity *desc);
91 void (*del_vqs)(struct virtio_device *);
92 u64 (*get_features)(struct virtio_device *vdev);
93 int (*finalize_features)(struct virtio_device *vdev);
94 const char *(*bus_name)(struct virtio_device *vdev);
95 int (*set_vq_affinity)(struct virtqueue *vq,
96 const struct cpumask *cpu_mask);
97 const struct cpumask *(*get_vq_affinity)(struct virtio_device *vdev,
98 int index);
99 bool (*get_shm_region)(struct virtio_device *vdev,
100 struct virtio_shm_region *region, u8 id);
101 };
102
103 /* If driver didn't advertise the feature, it will never appear. */
104 void virtio_check_driver_offered_feature(const struct virtio_device *vdev,
105 unsigned int fbit);
106
107 /**
108 * __virtio_test_bit - helper to test feature bits. For use by transports.
109 * Devices should normally use virtio_has_feature,
110 * which includes more checks.
111 * @vdev: the device
112 * @fbit: the feature bit
113 */
__virtio_test_bit(const struct virtio_device * vdev,unsigned int fbit)114 static inline bool __virtio_test_bit(const struct virtio_device *vdev,
115 unsigned int fbit)
116 {
117 /* Did you forget to fix assumptions on max features? */
118 if (__builtin_constant_p(fbit))
119 BUILD_BUG_ON(fbit >= 64);
120 else
121 BUG_ON(fbit >= 64);
122
123 return vdev->features & BIT_ULL(fbit);
124 }
125
126 /**
127 * __virtio_set_bit - helper to set feature bits. For use by transports.
128 * @vdev: the device
129 * @fbit: the feature bit
130 */
__virtio_set_bit(struct virtio_device * vdev,unsigned int fbit)131 static inline void __virtio_set_bit(struct virtio_device *vdev,
132 unsigned int fbit)
133 {
134 /* Did you forget to fix assumptions on max features? */
135 if (__builtin_constant_p(fbit))
136 BUILD_BUG_ON(fbit >= 64);
137 else
138 BUG_ON(fbit >= 64);
139
140 vdev->features |= BIT_ULL(fbit);
141 }
142
143 /**
144 * __virtio_clear_bit - helper to clear feature bits. For use by transports.
145 * @vdev: the device
146 * @fbit: the feature bit
147 */
__virtio_clear_bit(struct virtio_device * vdev,unsigned int fbit)148 static inline void __virtio_clear_bit(struct virtio_device *vdev,
149 unsigned int fbit)
150 {
151 /* Did you forget to fix assumptions on max features? */
152 if (__builtin_constant_p(fbit))
153 BUILD_BUG_ON(fbit >= 64);
154 else
155 BUG_ON(fbit >= 64);
156
157 vdev->features &= ~BIT_ULL(fbit);
158 }
159
160 /**
161 * virtio_has_feature - helper to determine if this device has this feature.
162 * @vdev: the device
163 * @fbit: the feature bit
164 */
virtio_has_feature(const struct virtio_device * vdev,unsigned int fbit)165 static inline bool virtio_has_feature(const struct virtio_device *vdev,
166 unsigned int fbit)
167 {
168 if (fbit < VIRTIO_TRANSPORT_F_START)
169 virtio_check_driver_offered_feature(vdev, fbit);
170
171 return __virtio_test_bit(vdev, fbit);
172 }
173
174 /**
175 * virtio_has_dma_quirk - determine whether this device has the DMA quirk
176 * @vdev: the device
177 */
virtio_has_dma_quirk(const struct virtio_device * vdev)178 static inline bool virtio_has_dma_quirk(const struct virtio_device *vdev)
179 {
180 /*
181 * Note the reverse polarity of the quirk feature (compared to most
182 * other features), this is for compatibility with legacy systems.
183 */
184 return !virtio_has_feature(vdev, VIRTIO_F_ACCESS_PLATFORM);
185 }
186
187 static inline
virtio_find_single_vq(struct virtio_device * vdev,vq_callback_t * c,const char * n)188 struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev,
189 vq_callback_t *c, const char *n)
190 {
191 vq_callback_t *callbacks[] = { c };
192 const char *names[] = { n };
193 struct virtqueue *vq;
194 int err = vdev->config->find_vqs(vdev, 1, &vq, callbacks, names, NULL,
195 NULL);
196 if (err < 0)
197 return ERR_PTR(err);
198 return vq;
199 }
200
201 static inline
virtio_find_vqs(struct virtio_device * vdev,unsigned nvqs,struct virtqueue * vqs[],vq_callback_t * callbacks[],const char * const names[],struct irq_affinity * desc)202 int virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs,
203 struct virtqueue *vqs[], vq_callback_t *callbacks[],
204 const char * const names[],
205 struct irq_affinity *desc)
206 {
207 return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, NULL, desc);
208 }
209
210 static inline
virtio_find_vqs_ctx(struct virtio_device * vdev,unsigned nvqs,struct virtqueue * vqs[],vq_callback_t * callbacks[],const char * const names[],const bool * ctx,struct irq_affinity * desc)211 int virtio_find_vqs_ctx(struct virtio_device *vdev, unsigned nvqs,
212 struct virtqueue *vqs[], vq_callback_t *callbacks[],
213 const char * const names[], const bool *ctx,
214 struct irq_affinity *desc)
215 {
216 return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, ctx,
217 desc);
218 }
219
220 /**
221 * virtio_device_ready - enable vq use in probe function
222 * @vdev: the device
223 *
224 * Driver must call this to use vqs in the probe function.
225 *
226 * Note: vqs are enabled automatically after probe returns.
227 */
228 static inline
virtio_device_ready(struct virtio_device * dev)229 void virtio_device_ready(struct virtio_device *dev)
230 {
231 unsigned status = dev->config->get_status(dev);
232
233 BUG_ON(status & VIRTIO_CONFIG_S_DRIVER_OK);
234 dev->config->set_status(dev, status | VIRTIO_CONFIG_S_DRIVER_OK);
235 }
236
237 static inline
virtio_bus_name(struct virtio_device * vdev)238 const char *virtio_bus_name(struct virtio_device *vdev)
239 {
240 if (!vdev->config->bus_name)
241 return "virtio";
242 return vdev->config->bus_name(vdev);
243 }
244
245 /**
246 * virtqueue_set_affinity - setting affinity for a virtqueue
247 * @vq: the virtqueue
248 * @cpu: the cpu no.
249 *
250 * Pay attention the function are best-effort: the affinity hint may not be set
251 * due to config support, irq type and sharing.
252 *
253 */
254 static inline
virtqueue_set_affinity(struct virtqueue * vq,const struct cpumask * cpu_mask)255 int virtqueue_set_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask)
256 {
257 struct virtio_device *vdev = vq->vdev;
258 if (vdev->config->set_vq_affinity)
259 return vdev->config->set_vq_affinity(vq, cpu_mask);
260 return 0;
261 }
262
263 static inline
virtio_get_shm_region(struct virtio_device * vdev,struct virtio_shm_region * region,u8 id)264 bool virtio_get_shm_region(struct virtio_device *vdev,
265 struct virtio_shm_region *region, u8 id)
266 {
267 if (!vdev->config->get_shm_region)
268 return false;
269 return vdev->config->get_shm_region(vdev, region, id);
270 }
271
virtio_is_little_endian(struct virtio_device * vdev)272 static inline bool virtio_is_little_endian(struct virtio_device *vdev)
273 {
274 return virtio_has_feature(vdev, VIRTIO_F_VERSION_1) ||
275 virtio_legacy_is_little_endian();
276 }
277
278 /* Memory accessors */
virtio16_to_cpu(struct virtio_device * vdev,__virtio16 val)279 static inline u16 virtio16_to_cpu(struct virtio_device *vdev, __virtio16 val)
280 {
281 return __virtio16_to_cpu(virtio_is_little_endian(vdev), val);
282 }
283
cpu_to_virtio16(struct virtio_device * vdev,u16 val)284 static inline __virtio16 cpu_to_virtio16(struct virtio_device *vdev, u16 val)
285 {
286 return __cpu_to_virtio16(virtio_is_little_endian(vdev), val);
287 }
288
virtio32_to_cpu(struct virtio_device * vdev,__virtio32 val)289 static inline u32 virtio32_to_cpu(struct virtio_device *vdev, __virtio32 val)
290 {
291 return __virtio32_to_cpu(virtio_is_little_endian(vdev), val);
292 }
293
cpu_to_virtio32(struct virtio_device * vdev,u32 val)294 static inline __virtio32 cpu_to_virtio32(struct virtio_device *vdev, u32 val)
295 {
296 return __cpu_to_virtio32(virtio_is_little_endian(vdev), val);
297 }
298
virtio64_to_cpu(struct virtio_device * vdev,__virtio64 val)299 static inline u64 virtio64_to_cpu(struct virtio_device *vdev, __virtio64 val)
300 {
301 return __virtio64_to_cpu(virtio_is_little_endian(vdev), val);
302 }
303
cpu_to_virtio64(struct virtio_device * vdev,u64 val)304 static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val)
305 {
306 return __cpu_to_virtio64(virtio_is_little_endian(vdev), val);
307 }
308
309 #define virtio_to_cpu(vdev, x) \
310 _Generic((x), \
311 __u8: (x), \
312 __virtio16: virtio16_to_cpu((vdev), (x)), \
313 __virtio32: virtio32_to_cpu((vdev), (x)), \
314 __virtio64: virtio64_to_cpu((vdev), (x)) \
315 )
316
317 #define cpu_to_virtio(vdev, x, m) \
318 _Generic((m), \
319 __u8: (x), \
320 __virtio16: cpu_to_virtio16((vdev), (x)), \
321 __virtio32: cpu_to_virtio32((vdev), (x)), \
322 __virtio64: cpu_to_virtio64((vdev), (x)) \
323 )
324
325 #define __virtio_native_type(structname, member) \
326 typeof(virtio_to_cpu(NULL, ((structname*)0)->member))
327
328 /* Config space accessors. */
329 #define virtio_cread(vdev, structname, member, ptr) \
330 do { \
331 typeof(((structname*)0)->member) virtio_cread_v; \
332 \
333 might_sleep(); \
334 /* Sanity check: must match the member's type */ \
335 typecheck(typeof(virtio_to_cpu((vdev), virtio_cread_v)), *(ptr)); \
336 \
337 switch (sizeof(virtio_cread_v)) { \
338 case 1: \
339 case 2: \
340 case 4: \
341 vdev->config->get((vdev), \
342 offsetof(structname, member), \
343 &virtio_cread_v, \
344 sizeof(virtio_cread_v)); \
345 break; \
346 default: \
347 __virtio_cread_many((vdev), \
348 offsetof(structname, member), \
349 &virtio_cread_v, \
350 1, \
351 sizeof(virtio_cread_v)); \
352 break; \
353 } \
354 *(ptr) = virtio_to_cpu(vdev, virtio_cread_v); \
355 } while(0)
356
357 /* Config space accessors. */
358 #define virtio_cwrite(vdev, structname, member, ptr) \
359 do { \
360 typeof(((structname*)0)->member) virtio_cwrite_v = \
361 cpu_to_virtio(vdev, *(ptr), ((structname*)0)->member); \
362 \
363 might_sleep(); \
364 /* Sanity check: must match the member's type */ \
365 typecheck(typeof(virtio_to_cpu((vdev), virtio_cwrite_v)), *(ptr)); \
366 \
367 vdev->config->set((vdev), offsetof(structname, member), \
368 &virtio_cwrite_v, \
369 sizeof(virtio_cwrite_v)); \
370 } while(0)
371
372 /*
373 * Nothing virtio-specific about these, but let's worry about generalizing
374 * these later.
375 */
376 #define virtio_le_to_cpu(x) \
377 _Generic((x), \
378 __u8: (u8)(x), \
379 __le16: (u16)le16_to_cpu(x), \
380 __le32: (u32)le32_to_cpu(x), \
381 __le64: (u64)le64_to_cpu(x) \
382 )
383
384 #define virtio_cpu_to_le(x, m) \
385 _Generic((m), \
386 __u8: (x), \
387 __le16: cpu_to_le16(x), \
388 __le32: cpu_to_le32(x), \
389 __le64: cpu_to_le64(x) \
390 )
391
392 /* LE (e.g. modern) Config space accessors. */
393 #define virtio_cread_le(vdev, structname, member, ptr) \
394 do { \
395 typeof(((structname*)0)->member) virtio_cread_v; \
396 \
397 might_sleep(); \
398 /* Sanity check: must match the member's type */ \
399 typecheck(typeof(virtio_le_to_cpu(virtio_cread_v)), *(ptr)); \
400 \
401 switch (sizeof(virtio_cread_v)) { \
402 case 1: \
403 case 2: \
404 case 4: \
405 vdev->config->get((vdev), \
406 offsetof(structname, member), \
407 &virtio_cread_v, \
408 sizeof(virtio_cread_v)); \
409 break; \
410 default: \
411 __virtio_cread_many((vdev), \
412 offsetof(structname, member), \
413 &virtio_cread_v, \
414 1, \
415 sizeof(virtio_cread_v)); \
416 break; \
417 } \
418 *(ptr) = virtio_le_to_cpu(virtio_cread_v); \
419 } while(0)
420
421 #define virtio_cwrite_le(vdev, structname, member, ptr) \
422 do { \
423 typeof(((structname*)0)->member) virtio_cwrite_v = \
424 virtio_cpu_to_le(*(ptr), ((structname*)0)->member); \
425 \
426 might_sleep(); \
427 /* Sanity check: must match the member's type */ \
428 typecheck(typeof(virtio_le_to_cpu(virtio_cwrite_v)), *(ptr)); \
429 \
430 vdev->config->set((vdev), offsetof(structname, member), \
431 &virtio_cwrite_v, \
432 sizeof(virtio_cwrite_v)); \
433 } while(0)
434
435
436 /* Read @count fields, @bytes each. */
__virtio_cread_many(struct virtio_device * vdev,unsigned int offset,void * buf,size_t count,size_t bytes)437 static inline void __virtio_cread_many(struct virtio_device *vdev,
438 unsigned int offset,
439 void *buf, size_t count, size_t bytes)
440 {
441 u32 old, gen = vdev->config->generation ?
442 vdev->config->generation(vdev) : 0;
443 int i;
444
445 might_sleep();
446 do {
447 old = gen;
448
449 for (i = 0; i < count; i++)
450 vdev->config->get(vdev, offset + bytes * i,
451 buf + i * bytes, bytes);
452
453 gen = vdev->config->generation ?
454 vdev->config->generation(vdev) : 0;
455 } while (gen != old);
456 }
457
virtio_cread_bytes(struct virtio_device * vdev,unsigned int offset,void * buf,size_t len)458 static inline void virtio_cread_bytes(struct virtio_device *vdev,
459 unsigned int offset,
460 void *buf, size_t len)
461 {
462 __virtio_cread_many(vdev, offset, buf, len, 1);
463 }
464
virtio_cread8(struct virtio_device * vdev,unsigned int offset)465 static inline u8 virtio_cread8(struct virtio_device *vdev, unsigned int offset)
466 {
467 u8 ret;
468
469 might_sleep();
470 vdev->config->get(vdev, offset, &ret, sizeof(ret));
471 return ret;
472 }
473
virtio_cwrite8(struct virtio_device * vdev,unsigned int offset,u8 val)474 static inline void virtio_cwrite8(struct virtio_device *vdev,
475 unsigned int offset, u8 val)
476 {
477 might_sleep();
478 vdev->config->set(vdev, offset, &val, sizeof(val));
479 }
480
virtio_cread16(struct virtio_device * vdev,unsigned int offset)481 static inline u16 virtio_cread16(struct virtio_device *vdev,
482 unsigned int offset)
483 {
484 __virtio16 ret;
485
486 might_sleep();
487 vdev->config->get(vdev, offset, &ret, sizeof(ret));
488 return virtio16_to_cpu(vdev, ret);
489 }
490
virtio_cwrite16(struct virtio_device * vdev,unsigned int offset,u16 val)491 static inline void virtio_cwrite16(struct virtio_device *vdev,
492 unsigned int offset, u16 val)
493 {
494 __virtio16 v;
495
496 might_sleep();
497 v = cpu_to_virtio16(vdev, val);
498 vdev->config->set(vdev, offset, &v, sizeof(v));
499 }
500
virtio_cread32(struct virtio_device * vdev,unsigned int offset)501 static inline u32 virtio_cread32(struct virtio_device *vdev,
502 unsigned int offset)
503 {
504 __virtio32 ret;
505
506 might_sleep();
507 vdev->config->get(vdev, offset, &ret, sizeof(ret));
508 return virtio32_to_cpu(vdev, ret);
509 }
510
virtio_cwrite32(struct virtio_device * vdev,unsigned int offset,u32 val)511 static inline void virtio_cwrite32(struct virtio_device *vdev,
512 unsigned int offset, u32 val)
513 {
514 __virtio32 v;
515
516 might_sleep();
517 v = cpu_to_virtio32(vdev, val);
518 vdev->config->set(vdev, offset, &v, sizeof(v));
519 }
520
virtio_cread64(struct virtio_device * vdev,unsigned int offset)521 static inline u64 virtio_cread64(struct virtio_device *vdev,
522 unsigned int offset)
523 {
524 __virtio64 ret;
525
526 __virtio_cread_many(vdev, offset, &ret, 1, sizeof(ret));
527 return virtio64_to_cpu(vdev, ret);
528 }
529
virtio_cwrite64(struct virtio_device * vdev,unsigned int offset,u64 val)530 static inline void virtio_cwrite64(struct virtio_device *vdev,
531 unsigned int offset, u64 val)
532 {
533 __virtio64 v;
534
535 might_sleep();
536 v = cpu_to_virtio64(vdev, val);
537 vdev->config->set(vdev, offset, &v, sizeof(v));
538 }
539
540 /* Conditional config space accessors. */
541 #define virtio_cread_feature(vdev, fbit, structname, member, ptr) \
542 ({ \
543 int _r = 0; \
544 if (!virtio_has_feature(vdev, fbit)) \
545 _r = -ENOENT; \
546 else \
547 virtio_cread((vdev), structname, member, ptr); \
548 _r; \
549 })
550
551 /* Conditional config space accessors. */
552 #define virtio_cread_le_feature(vdev, fbit, structname, member, ptr) \
553 ({ \
554 int _r = 0; \
555 if (!virtio_has_feature(vdev, fbit)) \
556 _r = -ENOENT; \
557 else \
558 virtio_cread_le((vdev), structname, member, ptr); \
559 _r; \
560 })
561
562 #ifdef CONFIG_ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS
563 int arch_has_restricted_virtio_memory_access(void);
564 #else
arch_has_restricted_virtio_memory_access(void)565 static inline int arch_has_restricted_virtio_memory_access(void)
566 {
567 return 0;
568 }
569 #endif /* CONFIG_ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS */
570
571 #endif /* _LINUX_VIRTIO_CONFIG_H */
572