1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 *
4 * Copyright (c) 2011, Microsoft Corporation.
5 *
6 * Authors:
7 * Haiyang Zhang <haiyangz@microsoft.com>
8 * Hank Janssen <hjanssen@microsoft.com>
9 * K. Y. Srinivasan <kys@microsoft.com>
10 */
11
12 #ifndef _HYPERV_H
13 #define _HYPERV_H
14
15 #include <uapi/linux/hyperv.h>
16
17 #include <linux/mm.h>
18 #include <linux/types.h>
19 #include <linux/scatterlist.h>
20 #include <linux/list.h>
21 #include <linux/timer.h>
22 #include <linux/completion.h>
23 #include <linux/device.h>
24 #include <linux/mod_devicetable.h>
25 #include <linux/interrupt.h>
26 #include <linux/reciprocal_div.h>
27 #include <asm/hyperv-tlfs.h>
28
29 #define MAX_PAGE_BUFFER_COUNT 32
30 #define MAX_MULTIPAGE_BUFFER_COUNT 32 /* 128K */
31
32 #pragma pack(push, 1)
33
34 /*
35 * Types for GPADL, decides is how GPADL header is created.
36 *
37 * It doesn't make much difference between BUFFER and RING if PAGE_SIZE is the
38 * same as HV_HYP_PAGE_SIZE.
39 *
40 * If PAGE_SIZE is bigger than HV_HYP_PAGE_SIZE, the headers of ring buffers
41 * will be of PAGE_SIZE, however, only the first HV_HYP_PAGE will be put
42 * into gpadl, therefore the number for HV_HYP_PAGE and the indexes of each
43 * HV_HYP_PAGE will be different between different types of GPADL, for example
44 * if PAGE_SIZE is 64K:
45 *
46 * BUFFER:
47 *
48 * gva: |-- 64k --|-- 64k --| ... |
49 * gpa: | 4k | 4k | ... | 4k | 4k | 4k | ... | 4k |
50 * index: 0 1 2 15 16 17 18 .. 31 32 ...
51 * | | ... | | | ... | ...
52 * v V V V V V
53 * gpadl: | 4k | 4k | ... | 4k | 4k | 4k | ... | 4k | ... |
54 * index: 0 1 2 ... 15 16 17 18 .. 31 32 ...
55 *
56 * RING:
57 *
58 * | header | data | header | data |
59 * gva: |-- 64k --|-- 64k --| ... |-- 64k --|-- 64k --| ... |
60 * gpa: | 4k | .. | 4k | 4k | ... | 4k | ... | 4k | .. | 4k | .. | ... |
61 * index: 0 1 16 17 18 31 ... n n+1 n+16 ... 2n
62 * | / / / | / /
63 * | / / / | / /
64 * | / / ... / ... | / ... /
65 * | / / / | / /
66 * | / / / | / /
67 * V V V V V V v
68 * gpadl: | 4k | 4k | ... | ... | 4k | 4k | ... |
69 * index: 0 1 2 ... 16 ... n-15 n-14 n-13 ... 2n-30
70 */
71 enum hv_gpadl_type {
72 HV_GPADL_BUFFER,
73 HV_GPADL_RING
74 };
75
76 /* Single-page buffer */
77 struct hv_page_buffer {
78 u32 len;
79 u32 offset;
80 u64 pfn;
81 };
82
83 /* Multiple-page buffer */
84 struct hv_multipage_buffer {
85 /* Length and Offset determines the # of pfns in the array */
86 u32 len;
87 u32 offset;
88 u64 pfn_array[MAX_MULTIPAGE_BUFFER_COUNT];
89 };
90
91 /*
92 * Multiple-page buffer array; the pfn array is variable size:
93 * The number of entries in the PFN array is determined by
94 * "len" and "offset".
95 */
96 struct hv_mpb_array {
97 /* Length and Offset determines the # of pfns in the array */
98 u32 len;
99 u32 offset;
100 u64 pfn_array[];
101 };
102
103 /* 0x18 includes the proprietary packet header */
104 #define MAX_PAGE_BUFFER_PACKET (0x18 + \
105 (sizeof(struct hv_page_buffer) * \
106 MAX_PAGE_BUFFER_COUNT))
107 #define MAX_MULTIPAGE_BUFFER_PACKET (0x18 + \
108 sizeof(struct hv_multipage_buffer))
109
110
111 #pragma pack(pop)
112
113 struct hv_ring_buffer {
114 /* Offset in bytes from the start of ring data below */
115 u32 write_index;
116
117 /* Offset in bytes from the start of ring data below */
118 u32 read_index;
119
120 u32 interrupt_mask;
121
122 /*
123 * WS2012/Win8 and later versions of Hyper-V implement interrupt
124 * driven flow management. The feature bit feat_pending_send_sz
125 * is set by the host on the host->guest ring buffer, and by the
126 * guest on the guest->host ring buffer.
127 *
128 * The meaning of the feature bit is a bit complex in that it has
129 * semantics that apply to both ring buffers. If the guest sets
130 * the feature bit in the guest->host ring buffer, the guest is
131 * telling the host that:
132 * 1) It will set the pending_send_sz field in the guest->host ring
133 * buffer when it is waiting for space to become available, and
134 * 2) It will read the pending_send_sz field in the host->guest
135 * ring buffer and interrupt the host when it frees enough space
136 *
137 * Similarly, if the host sets the feature bit in the host->guest
138 * ring buffer, the host is telling the guest that:
139 * 1) It will set the pending_send_sz field in the host->guest ring
140 * buffer when it is waiting for space to become available, and
141 * 2) It will read the pending_send_sz field in the guest->host
142 * ring buffer and interrupt the guest when it frees enough space
143 *
144 * If either the guest or host does not set the feature bit that it
145 * owns, that guest or host must do polling if it encounters a full
146 * ring buffer, and not signal the other end with an interrupt.
147 */
148 u32 pending_send_sz;
149 u32 reserved1[12];
150 union {
151 struct {
152 u32 feat_pending_send_sz:1;
153 };
154 u32 value;
155 } feature_bits;
156
157 /* Pad it to PAGE_SIZE so that data starts on page boundary */
158 u8 reserved2[PAGE_SIZE - 68];
159
160 /*
161 * Ring data starts here + RingDataStartOffset
162 * !!! DO NOT place any fields below this !!!
163 */
164 u8 buffer[];
165 } __packed;
166
167 /* Calculate the proper size of a ringbuffer, it must be page-aligned */
168 #define VMBUS_RING_SIZE(payload_sz) PAGE_ALIGN(sizeof(struct hv_ring_buffer) + \
169 (payload_sz))
170
171 struct hv_ring_buffer_info {
172 struct hv_ring_buffer *ring_buffer;
173 u32 ring_size; /* Include the shared header */
174 struct reciprocal_value ring_size_div10_reciprocal;
175 spinlock_t ring_lock;
176
177 u32 ring_datasize; /* < ring_size */
178 u32 priv_read_index;
179 /*
180 * The ring buffer mutex lock. This lock prevents the ring buffer from
181 * being freed while the ring buffer is being accessed.
182 */
183 struct mutex ring_buffer_mutex;
184 };
185
186
hv_get_bytes_to_read(const struct hv_ring_buffer_info * rbi)187 static inline u32 hv_get_bytes_to_read(const struct hv_ring_buffer_info *rbi)
188 {
189 u32 read_loc, write_loc, dsize, read;
190
191 dsize = rbi->ring_datasize;
192 read_loc = rbi->ring_buffer->read_index;
193 write_loc = READ_ONCE(rbi->ring_buffer->write_index);
194
195 read = write_loc >= read_loc ? (write_loc - read_loc) :
196 (dsize - read_loc) + write_loc;
197
198 return read;
199 }
200
hv_get_bytes_to_write(const struct hv_ring_buffer_info * rbi)201 static inline u32 hv_get_bytes_to_write(const struct hv_ring_buffer_info *rbi)
202 {
203 u32 read_loc, write_loc, dsize, write;
204
205 dsize = rbi->ring_datasize;
206 read_loc = READ_ONCE(rbi->ring_buffer->read_index);
207 write_loc = rbi->ring_buffer->write_index;
208
209 write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
210 read_loc - write_loc;
211 return write;
212 }
213
hv_get_avail_to_write_percent(const struct hv_ring_buffer_info * rbi)214 static inline u32 hv_get_avail_to_write_percent(
215 const struct hv_ring_buffer_info *rbi)
216 {
217 u32 avail_write = hv_get_bytes_to_write(rbi);
218
219 return reciprocal_divide(
220 (avail_write << 3) + (avail_write << 1),
221 rbi->ring_size_div10_reciprocal);
222 }
223
224 /*
225 * VMBUS version is 32 bit entity broken up into
226 * two 16 bit quantities: major_number. minor_number.
227 *
228 * 0 . 13 (Windows Server 2008)
229 * 1 . 1 (Windows 7)
230 * 2 . 4 (Windows 8)
231 * 3 . 0 (Windows 8 R2)
232 * 4 . 0 (Windows 10)
233 * 4 . 1 (Windows 10 RS3)
234 * 5 . 0 (Newer Windows 10)
235 * 5 . 1 (Windows 10 RS4)
236 * 5 . 2 (Windows Server 2019, RS5)
237 */
238
239 #define VERSION_WS2008 ((0 << 16) | (13))
240 #define VERSION_WIN7 ((1 << 16) | (1))
241 #define VERSION_WIN8 ((2 << 16) | (4))
242 #define VERSION_WIN8_1 ((3 << 16) | (0))
243 #define VERSION_WIN10 ((4 << 16) | (0))
244 #define VERSION_WIN10_V4_1 ((4 << 16) | (1))
245 #define VERSION_WIN10_V5 ((5 << 16) | (0))
246 #define VERSION_WIN10_V5_1 ((5 << 16) | (1))
247 #define VERSION_WIN10_V5_2 ((5 << 16) | (2))
248
249 /* Make maximum size of pipe payload of 16K */
250 #define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384)
251
252 /* Define PipeMode values. */
253 #define VMBUS_PIPE_TYPE_BYTE 0x00000000
254 #define VMBUS_PIPE_TYPE_MESSAGE 0x00000004
255
256 /* The size of the user defined data buffer for non-pipe offers. */
257 #define MAX_USER_DEFINED_BYTES 120
258
259 /* The size of the user defined data buffer for pipe offers. */
260 #define MAX_PIPE_USER_DEFINED_BYTES 116
261
262 /*
263 * At the center of the Channel Management library is the Channel Offer. This
264 * struct contains the fundamental information about an offer.
265 */
266 struct vmbus_channel_offer {
267 guid_t if_type;
268 guid_t if_instance;
269
270 /*
271 * These two fields are not currently used.
272 */
273 u64 reserved1;
274 u64 reserved2;
275
276 u16 chn_flags;
277 u16 mmio_megabytes; /* in bytes * 1024 * 1024 */
278
279 union {
280 /* Non-pipes: The user has MAX_USER_DEFINED_BYTES bytes. */
281 struct {
282 unsigned char user_def[MAX_USER_DEFINED_BYTES];
283 } std;
284
285 /*
286 * Pipes:
287 * The following sructure is an integrated pipe protocol, which
288 * is implemented on top of standard user-defined data. Pipe
289 * clients have MAX_PIPE_USER_DEFINED_BYTES left for their own
290 * use.
291 */
292 struct {
293 u32 pipe_mode;
294 unsigned char user_def[MAX_PIPE_USER_DEFINED_BYTES];
295 } pipe;
296 } u;
297 /*
298 * The sub_channel_index is defined in Win8: a value of zero means a
299 * primary channel and a value of non-zero means a sub-channel.
300 *
301 * Before Win8, the field is reserved, meaning it's always zero.
302 */
303 u16 sub_channel_index;
304 u16 reserved3;
305 } __packed;
306
307 /* Server Flags */
308 #define VMBUS_CHANNEL_ENUMERATE_DEVICE_INTERFACE 1
309 #define VMBUS_CHANNEL_SERVER_SUPPORTS_TRANSFER_PAGES 2
310 #define VMBUS_CHANNEL_SERVER_SUPPORTS_GPADLS 4
311 #define VMBUS_CHANNEL_NAMED_PIPE_MODE 0x10
312 #define VMBUS_CHANNEL_LOOPBACK_OFFER 0x100
313 #define VMBUS_CHANNEL_PARENT_OFFER 0x200
314 #define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION 0x400
315 #define VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER 0x2000
316
317 struct vmpacket_descriptor {
318 u16 type;
319 u16 offset8;
320 u16 len8;
321 u16 flags;
322 u64 trans_id;
323 } __packed;
324
325 struct vmpacket_header {
326 u32 prev_pkt_start_offset;
327 struct vmpacket_descriptor descriptor;
328 } __packed;
329
330 struct vmtransfer_page_range {
331 u32 byte_count;
332 u32 byte_offset;
333 } __packed;
334
335 struct vmtransfer_page_packet_header {
336 struct vmpacket_descriptor d;
337 u16 xfer_pageset_id;
338 u8 sender_owns_set;
339 u8 reserved;
340 u32 range_cnt;
341 struct vmtransfer_page_range ranges[1];
342 } __packed;
343
344 struct vmgpadl_packet_header {
345 struct vmpacket_descriptor d;
346 u32 gpadl;
347 u32 reserved;
348 } __packed;
349
350 struct vmadd_remove_transfer_page_set {
351 struct vmpacket_descriptor d;
352 u32 gpadl;
353 u16 xfer_pageset_id;
354 u16 reserved;
355 } __packed;
356
357 /*
358 * This structure defines a range in guest physical space that can be made to
359 * look virtually contiguous.
360 */
361 struct gpa_range {
362 u32 byte_count;
363 u32 byte_offset;
364 u64 pfn_array[];
365 };
366
367 /*
368 * This is the format for an Establish Gpadl packet, which contains a handle by
369 * which this GPADL will be known and a set of GPA ranges associated with it.
370 * This can be converted to a MDL by the guest OS. If there are multiple GPA
371 * ranges, then the resulting MDL will be "chained," representing multiple VA
372 * ranges.
373 */
374 struct vmestablish_gpadl {
375 struct vmpacket_descriptor d;
376 u32 gpadl;
377 u32 range_cnt;
378 struct gpa_range range[1];
379 } __packed;
380
381 /*
382 * This is the format for a Teardown Gpadl packet, which indicates that the
383 * GPADL handle in the Establish Gpadl packet will never be referenced again.
384 */
385 struct vmteardown_gpadl {
386 struct vmpacket_descriptor d;
387 u32 gpadl;
388 u32 reserved; /* for alignment to a 8-byte boundary */
389 } __packed;
390
391 /*
392 * This is the format for a GPA-Direct packet, which contains a set of GPA
393 * ranges, in addition to commands and/or data.
394 */
395 struct vmdata_gpa_direct {
396 struct vmpacket_descriptor d;
397 u32 reserved;
398 u32 range_cnt;
399 struct gpa_range range[1];
400 } __packed;
401
402 /* This is the format for a Additional Data Packet. */
403 struct vmadditional_data {
404 struct vmpacket_descriptor d;
405 u64 total_bytes;
406 u32 offset;
407 u32 byte_cnt;
408 unsigned char data[1];
409 } __packed;
410
411 union vmpacket_largest_possible_header {
412 struct vmpacket_descriptor simple_hdr;
413 struct vmtransfer_page_packet_header xfer_page_hdr;
414 struct vmgpadl_packet_header gpadl_hdr;
415 struct vmadd_remove_transfer_page_set add_rm_xfer_page_hdr;
416 struct vmestablish_gpadl establish_gpadl_hdr;
417 struct vmteardown_gpadl teardown_gpadl_hdr;
418 struct vmdata_gpa_direct data_gpa_direct_hdr;
419 };
420
421 #define VMPACKET_DATA_START_ADDRESS(__packet) \
422 (void *)(((unsigned char *)__packet) + \
423 ((struct vmpacket_descriptor)__packet)->offset8 * 8)
424
425 #define VMPACKET_DATA_LENGTH(__packet) \
426 ((((struct vmpacket_descriptor)__packet)->len8 - \
427 ((struct vmpacket_descriptor)__packet)->offset8) * 8)
428
429 #define VMPACKET_TRANSFER_MODE(__packet) \
430 (((struct IMPACT)__packet)->type)
431
432 enum vmbus_packet_type {
433 VM_PKT_INVALID = 0x0,
434 VM_PKT_SYNCH = 0x1,
435 VM_PKT_ADD_XFER_PAGESET = 0x2,
436 VM_PKT_RM_XFER_PAGESET = 0x3,
437 VM_PKT_ESTABLISH_GPADL = 0x4,
438 VM_PKT_TEARDOWN_GPADL = 0x5,
439 VM_PKT_DATA_INBAND = 0x6,
440 VM_PKT_DATA_USING_XFER_PAGES = 0x7,
441 VM_PKT_DATA_USING_GPADL = 0x8,
442 VM_PKT_DATA_USING_GPA_DIRECT = 0x9,
443 VM_PKT_CANCEL_REQUEST = 0xa,
444 VM_PKT_COMP = 0xb,
445 VM_PKT_DATA_USING_ADDITIONAL_PKT = 0xc,
446 VM_PKT_ADDITIONAL_DATA = 0xd
447 };
448
449 #define VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED 1
450
451
452 /* Version 1 messages */
453 enum vmbus_channel_message_type {
454 CHANNELMSG_INVALID = 0,
455 CHANNELMSG_OFFERCHANNEL = 1,
456 CHANNELMSG_RESCIND_CHANNELOFFER = 2,
457 CHANNELMSG_REQUESTOFFERS = 3,
458 CHANNELMSG_ALLOFFERS_DELIVERED = 4,
459 CHANNELMSG_OPENCHANNEL = 5,
460 CHANNELMSG_OPENCHANNEL_RESULT = 6,
461 CHANNELMSG_CLOSECHANNEL = 7,
462 CHANNELMSG_GPADL_HEADER = 8,
463 CHANNELMSG_GPADL_BODY = 9,
464 CHANNELMSG_GPADL_CREATED = 10,
465 CHANNELMSG_GPADL_TEARDOWN = 11,
466 CHANNELMSG_GPADL_TORNDOWN = 12,
467 CHANNELMSG_RELID_RELEASED = 13,
468 CHANNELMSG_INITIATE_CONTACT = 14,
469 CHANNELMSG_VERSION_RESPONSE = 15,
470 CHANNELMSG_UNLOAD = 16,
471 CHANNELMSG_UNLOAD_RESPONSE = 17,
472 CHANNELMSG_18 = 18,
473 CHANNELMSG_19 = 19,
474 CHANNELMSG_20 = 20,
475 CHANNELMSG_TL_CONNECT_REQUEST = 21,
476 CHANNELMSG_MODIFYCHANNEL = 22,
477 CHANNELMSG_TL_CONNECT_RESULT = 23,
478 CHANNELMSG_COUNT
479 };
480
481 /* Hyper-V supports about 2048 channels, and the RELIDs start with 1. */
482 #define INVALID_RELID U32_MAX
483
484 struct vmbus_channel_message_header {
485 enum vmbus_channel_message_type msgtype;
486 u32 padding;
487 } __packed;
488
489 /* Query VMBus Version parameters */
490 struct vmbus_channel_query_vmbus_version {
491 struct vmbus_channel_message_header header;
492 u32 version;
493 } __packed;
494
495 /* VMBus Version Supported parameters */
496 struct vmbus_channel_version_supported {
497 struct vmbus_channel_message_header header;
498 u8 version_supported;
499 } __packed;
500
501 /* Offer Channel parameters */
502 struct vmbus_channel_offer_channel {
503 struct vmbus_channel_message_header header;
504 struct vmbus_channel_offer offer;
505 u32 child_relid;
506 u8 monitorid;
507 /*
508 * win7 and beyond splits this field into a bit field.
509 */
510 u8 monitor_allocated:1;
511 u8 reserved:7;
512 /*
513 * These are new fields added in win7 and later.
514 * Do not access these fields without checking the
515 * negotiated protocol.
516 *
517 * If "is_dedicated_interrupt" is set, we must not set the
518 * associated bit in the channel bitmap while sending the
519 * interrupt to the host.
520 *
521 * connection_id is to be used in signaling the host.
522 */
523 u16 is_dedicated_interrupt:1;
524 u16 reserved1:15;
525 u32 connection_id;
526 } __packed;
527
528 /* Rescind Offer parameters */
529 struct vmbus_channel_rescind_offer {
530 struct vmbus_channel_message_header header;
531 u32 child_relid;
532 } __packed;
533
534 static inline u32
hv_ringbuffer_pending_size(const struct hv_ring_buffer_info * rbi)535 hv_ringbuffer_pending_size(const struct hv_ring_buffer_info *rbi)
536 {
537 return rbi->ring_buffer->pending_send_sz;
538 }
539
540 /*
541 * Request Offer -- no parameters, SynIC message contains the partition ID
542 * Set Snoop -- no parameters, SynIC message contains the partition ID
543 * Clear Snoop -- no parameters, SynIC message contains the partition ID
544 * All Offers Delivered -- no parameters, SynIC message contains the partition
545 * ID
546 * Flush Client -- no parameters, SynIC message contains the partition ID
547 */
548
549 /* Open Channel parameters */
550 struct vmbus_channel_open_channel {
551 struct vmbus_channel_message_header header;
552
553 /* Identifies the specific VMBus channel that is being opened. */
554 u32 child_relid;
555
556 /* ID making a particular open request at a channel offer unique. */
557 u32 openid;
558
559 /* GPADL for the channel's ring buffer. */
560 u32 ringbuffer_gpadlhandle;
561
562 /*
563 * Starting with win8, this field will be used to specify
564 * the target virtual processor on which to deliver the interrupt for
565 * the host to guest communication.
566 * Prior to win8, incoming channel interrupts would only
567 * be delivered on cpu 0. Setting this value to 0 would
568 * preserve the earlier behavior.
569 */
570 u32 target_vp;
571
572 /*
573 * The upstream ring buffer begins at offset zero in the memory
574 * described by RingBufferGpadlHandle. The downstream ring buffer
575 * follows it at this offset (in pages).
576 */
577 u32 downstream_ringbuffer_pageoffset;
578
579 /* User-specific data to be passed along to the server endpoint. */
580 unsigned char userdata[MAX_USER_DEFINED_BYTES];
581 } __packed;
582
583 /* Open Channel Result parameters */
584 struct vmbus_channel_open_result {
585 struct vmbus_channel_message_header header;
586 u32 child_relid;
587 u32 openid;
588 u32 status;
589 } __packed;
590
591 /* Close channel parameters; */
592 struct vmbus_channel_close_channel {
593 struct vmbus_channel_message_header header;
594 u32 child_relid;
595 } __packed;
596
597 /* Channel Message GPADL */
598 #define GPADL_TYPE_RING_BUFFER 1
599 #define GPADL_TYPE_SERVER_SAVE_AREA 2
600 #define GPADL_TYPE_TRANSACTION 8
601
602 /*
603 * The number of PFNs in a GPADL message is defined by the number of
604 * pages that would be spanned by ByteCount and ByteOffset. If the
605 * implied number of PFNs won't fit in this packet, there will be a
606 * follow-up packet that contains more.
607 */
608 struct vmbus_channel_gpadl_header {
609 struct vmbus_channel_message_header header;
610 u32 child_relid;
611 u32 gpadl;
612 u16 range_buflen;
613 u16 rangecount;
614 struct gpa_range range[];
615 } __packed;
616
617 /* This is the followup packet that contains more PFNs. */
618 struct vmbus_channel_gpadl_body {
619 struct vmbus_channel_message_header header;
620 u32 msgnumber;
621 u32 gpadl;
622 u64 pfn[];
623 } __packed;
624
625 struct vmbus_channel_gpadl_created {
626 struct vmbus_channel_message_header header;
627 u32 child_relid;
628 u32 gpadl;
629 u32 creation_status;
630 } __packed;
631
632 struct vmbus_channel_gpadl_teardown {
633 struct vmbus_channel_message_header header;
634 u32 child_relid;
635 u32 gpadl;
636 } __packed;
637
638 struct vmbus_channel_gpadl_torndown {
639 struct vmbus_channel_message_header header;
640 u32 gpadl;
641 } __packed;
642
643 struct vmbus_channel_relid_released {
644 struct vmbus_channel_message_header header;
645 u32 child_relid;
646 } __packed;
647
648 struct vmbus_channel_initiate_contact {
649 struct vmbus_channel_message_header header;
650 u32 vmbus_version_requested;
651 u32 target_vcpu; /* The VCPU the host should respond to */
652 union {
653 u64 interrupt_page;
654 struct {
655 u8 msg_sint;
656 u8 padding1[3];
657 u32 padding2;
658 };
659 };
660 u64 monitor_page1;
661 u64 monitor_page2;
662 } __packed;
663
664 /* Hyper-V socket: guest's connect()-ing to host */
665 struct vmbus_channel_tl_connect_request {
666 struct vmbus_channel_message_header header;
667 guid_t guest_endpoint_id;
668 guid_t host_service_id;
669 } __packed;
670
671 /* Modify Channel parameters, cf. vmbus_send_modifychannel() */
672 struct vmbus_channel_modifychannel {
673 struct vmbus_channel_message_header header;
674 u32 child_relid;
675 u32 target_vp;
676 } __packed;
677
678 struct vmbus_channel_version_response {
679 struct vmbus_channel_message_header header;
680 u8 version_supported;
681
682 u8 connection_state;
683 u16 padding;
684
685 /*
686 * On new hosts that support VMBus protocol 5.0, we must use
687 * VMBUS_MESSAGE_CONNECTION_ID_4 for the Initiate Contact Message,
688 * and for subsequent messages, we must use the Message Connection ID
689 * field in the host-returned Version Response Message.
690 *
691 * On old hosts, we should always use VMBUS_MESSAGE_CONNECTION_ID (1).
692 */
693 u32 msg_conn_id;
694 } __packed;
695
696 enum vmbus_channel_state {
697 CHANNEL_OFFER_STATE,
698 CHANNEL_OPENING_STATE,
699 CHANNEL_OPEN_STATE,
700 CHANNEL_OPENED_STATE,
701 };
702
703 /*
704 * Represents each channel msg on the vmbus connection This is a
705 * variable-size data structure depending on the msg type itself
706 */
707 struct vmbus_channel_msginfo {
708 /* Bookkeeping stuff */
709 struct list_head msglistentry;
710
711 /* So far, this is only used to handle gpadl body message */
712 struct list_head submsglist;
713
714 /* Synchronize the request/response if needed */
715 struct completion waitevent;
716 struct vmbus_channel *waiting_channel;
717 union {
718 struct vmbus_channel_version_supported version_supported;
719 struct vmbus_channel_open_result open_result;
720 struct vmbus_channel_gpadl_torndown gpadl_torndown;
721 struct vmbus_channel_gpadl_created gpadl_created;
722 struct vmbus_channel_version_response version_response;
723 } response;
724
725 u32 msgsize;
726 /*
727 * The channel message that goes out on the "wire".
728 * It will contain at minimum the VMBUS_CHANNEL_MESSAGE_HEADER header
729 */
730 unsigned char msg[];
731 };
732
733 struct vmbus_close_msg {
734 struct vmbus_channel_msginfo info;
735 struct vmbus_channel_close_channel msg;
736 };
737
738 /* Define connection identifier type. */
739 union hv_connection_id {
740 u32 asu32;
741 struct {
742 u32 id:24;
743 u32 reserved:8;
744 } u;
745 };
746
747 enum vmbus_device_type {
748 HV_IDE = 0,
749 HV_SCSI,
750 HV_FC,
751 HV_NIC,
752 HV_ND,
753 HV_PCIE,
754 HV_FB,
755 HV_KBD,
756 HV_MOUSE,
757 HV_KVP,
758 HV_TS,
759 HV_HB,
760 HV_SHUTDOWN,
761 HV_FCOPY,
762 HV_BACKUP,
763 HV_DM,
764 HV_UNKNOWN,
765 };
766
767 struct vmbus_device {
768 u16 dev_type;
769 guid_t guid;
770 bool perf_device;
771 };
772
773 struct vmbus_channel {
774 struct list_head listentry;
775
776 struct hv_device *device_obj;
777
778 enum vmbus_channel_state state;
779
780 struct vmbus_channel_offer_channel offermsg;
781 /*
782 * These are based on the OfferMsg.MonitorId.
783 * Save it here for easy access.
784 */
785 u8 monitor_grp;
786 u8 monitor_bit;
787
788 bool rescind; /* got rescind msg */
789 struct completion rescind_event;
790
791 u32 ringbuffer_gpadlhandle;
792
793 /* Allocated memory for ring buffer */
794 struct page *ringbuffer_page;
795 u32 ringbuffer_pagecount;
796 u32 ringbuffer_send_offset;
797 struct hv_ring_buffer_info outbound; /* send to parent */
798 struct hv_ring_buffer_info inbound; /* receive from parent */
799
800 struct vmbus_close_msg close_msg;
801
802 /* Statistics */
803 u64 interrupts; /* Host to Guest interrupts */
804 u64 sig_events; /* Guest to Host events */
805
806 /*
807 * Guest to host interrupts caused by the outbound ring buffer changing
808 * from empty to not empty.
809 */
810 u64 intr_out_empty;
811
812 /*
813 * Indicates that a full outbound ring buffer was encountered. The flag
814 * is set to true when a full outbound ring buffer is encountered and
815 * set to false when a write to the outbound ring buffer is completed.
816 */
817 bool out_full_flag;
818
819 /* Channel callback's invoked in softirq context */
820 struct tasklet_struct callback_event;
821 void (*onchannel_callback)(void *context);
822 void *channel_callback_context;
823
824 void (*change_target_cpu_callback)(struct vmbus_channel *channel,
825 u32 old, u32 new);
826
827 /*
828 * Synchronize channel scheduling and channel removal; see the inline
829 * comments in vmbus_chan_sched() and vmbus_reset_channel_cb().
830 */
831 spinlock_t sched_lock;
832
833 /*
834 * A channel can be marked for one of three modes of reading:
835 * BATCHED - callback called from taslket and should read
836 * channel until empty. Interrupts from the host
837 * are masked while read is in process (default).
838 * DIRECT - callback called from tasklet (softirq).
839 * ISR - callback called in interrupt context and must
840 * invoke its own deferred processing.
841 * Host interrupts are disabled and must be re-enabled
842 * when ring is empty.
843 */
844 enum hv_callback_mode {
845 HV_CALL_BATCHED,
846 HV_CALL_DIRECT,
847 HV_CALL_ISR
848 } callback_mode;
849
850 bool is_dedicated_interrupt;
851 u64 sig_event;
852
853 /*
854 * Starting with win8, this field will be used to specify the
855 * target CPU on which to deliver the interrupt for the host
856 * to guest communication.
857 *
858 * Prior to win8, incoming channel interrupts would only be
859 * delivered on CPU 0. Setting this value to 0 would preserve
860 * the earlier behavior.
861 */
862 u32 target_cpu;
863 /*
864 * Support for sub-channels. For high performance devices,
865 * it will be useful to have multiple sub-channels to support
866 * a scalable communication infrastructure with the host.
867 * The support for sub-channels is implemented as an extention
868 * to the current infrastructure.
869 * The initial offer is considered the primary channel and this
870 * offer message will indicate if the host supports sub-channels.
871 * The guest is free to ask for sub-channels to be offerred and can
872 * open these sub-channels as a normal "primary" channel. However,
873 * all sub-channels will have the same type and instance guids as the
874 * primary channel. Requests sent on a given channel will result in a
875 * response on the same channel.
876 */
877
878 /*
879 * Sub-channel creation callback. This callback will be called in
880 * process context when a sub-channel offer is received from the host.
881 * The guest can open the sub-channel in the context of this callback.
882 */
883 void (*sc_creation_callback)(struct vmbus_channel *new_sc);
884
885 /*
886 * Channel rescind callback. Some channels (the hvsock ones), need to
887 * register a callback which is invoked in vmbus_onoffer_rescind().
888 */
889 void (*chn_rescind_callback)(struct vmbus_channel *channel);
890
891 /*
892 * All Sub-channels of a primary channel are linked here.
893 */
894 struct list_head sc_list;
895 /*
896 * The primary channel this sub-channel belongs to.
897 * This will be NULL for the primary channel.
898 */
899 struct vmbus_channel *primary_channel;
900 /*
901 * Support per-channel state for use by vmbus drivers.
902 */
903 void *per_channel_state;
904
905 /*
906 * Defer freeing channel until after all cpu's have
907 * gone through grace period.
908 */
909 struct rcu_head rcu;
910
911 /*
912 * For sysfs per-channel properties.
913 */
914 struct kobject kobj;
915
916 /*
917 * For performance critical channels (storage, networking
918 * etc,), Hyper-V has a mechanism to enhance the throughput
919 * at the expense of latency:
920 * When the host is to be signaled, we just set a bit in a shared page
921 * and this bit will be inspected by the hypervisor within a certain
922 * window and if the bit is set, the host will be signaled. The window
923 * of time is the monitor latency - currently around 100 usecs. This
924 * mechanism improves throughput by:
925 *
926 * A) Making the host more efficient - each time it wakes up,
927 * potentially it will process morev number of packets. The
928 * monitor latency allows a batch to build up.
929 * B) By deferring the hypercall to signal, we will also minimize
930 * the interrupts.
931 *
932 * Clearly, these optimizations improve throughput at the expense of
933 * latency. Furthermore, since the channel is shared for both
934 * control and data messages, control messages currently suffer
935 * unnecessary latency adversley impacting performance and boot
936 * time. To fix this issue, permit tagging the channel as being
937 * in "low latency" mode. In this mode, we will bypass the monitor
938 * mechanism.
939 */
940 bool low_latency;
941
942 bool probe_done;
943
944 /*
945 * Cache the device ID here for easy access; this is useful, in
946 * particular, in situations where the channel's device_obj has
947 * not been allocated/initialized yet.
948 */
949 u16 device_id;
950
951 /*
952 * We must offload the handling of the primary/sub channels
953 * from the single-threaded vmbus_connection.work_queue to
954 * two different workqueue, otherwise we can block
955 * vmbus_connection.work_queue and hang: see vmbus_process_offer().
956 */
957 struct work_struct add_channel_work;
958
959 /*
960 * Guest to host interrupts caused by the inbound ring buffer changing
961 * from full to not full while a packet is waiting.
962 */
963 u64 intr_in_full;
964
965 /*
966 * The total number of write operations that encountered a full
967 * outbound ring buffer.
968 */
969 u64 out_full_total;
970
971 /*
972 * The number of write operations that were the first to encounter a
973 * full outbound ring buffer.
974 */
975 u64 out_full_first;
976
977 /* enabling/disabling fuzz testing on the channel (default is false)*/
978 bool fuzz_testing_state;
979
980 /*
981 * Interrupt delay will delay the guest from emptying the ring buffer
982 * for a specific amount of time. The delay is in microseconds and will
983 * be between 1 to a maximum of 1000, its default is 0 (no delay).
984 * The Message delay will delay guest reading on a per message basis
985 * in microseconds between 1 to 1000 with the default being 0
986 * (no delay).
987 */
988 u32 fuzz_testing_interrupt_delay;
989 u32 fuzz_testing_message_delay;
990
991 };
992
is_hvsock_channel(const struct vmbus_channel * c)993 static inline bool is_hvsock_channel(const struct vmbus_channel *c)
994 {
995 return !!(c->offermsg.offer.chn_flags &
996 VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER);
997 }
998
is_sub_channel(const struct vmbus_channel * c)999 static inline bool is_sub_channel(const struct vmbus_channel *c)
1000 {
1001 return c->offermsg.offer.sub_channel_index != 0;
1002 }
1003
set_channel_read_mode(struct vmbus_channel * c,enum hv_callback_mode mode)1004 static inline void set_channel_read_mode(struct vmbus_channel *c,
1005 enum hv_callback_mode mode)
1006 {
1007 c->callback_mode = mode;
1008 }
1009
set_per_channel_state(struct vmbus_channel * c,void * s)1010 static inline void set_per_channel_state(struct vmbus_channel *c, void *s)
1011 {
1012 c->per_channel_state = s;
1013 }
1014
get_per_channel_state(struct vmbus_channel * c)1015 static inline void *get_per_channel_state(struct vmbus_channel *c)
1016 {
1017 return c->per_channel_state;
1018 }
1019
set_channel_pending_send_size(struct vmbus_channel * c,u32 size)1020 static inline void set_channel_pending_send_size(struct vmbus_channel *c,
1021 u32 size)
1022 {
1023 unsigned long flags;
1024
1025 if (size) {
1026 spin_lock_irqsave(&c->outbound.ring_lock, flags);
1027 ++c->out_full_total;
1028
1029 if (!c->out_full_flag) {
1030 ++c->out_full_first;
1031 c->out_full_flag = true;
1032 }
1033 spin_unlock_irqrestore(&c->outbound.ring_lock, flags);
1034 } else {
1035 c->out_full_flag = false;
1036 }
1037
1038 c->outbound.ring_buffer->pending_send_sz = size;
1039 }
1040
set_low_latency_mode(struct vmbus_channel * c)1041 static inline void set_low_latency_mode(struct vmbus_channel *c)
1042 {
1043 c->low_latency = true;
1044 }
1045
clear_low_latency_mode(struct vmbus_channel * c)1046 static inline void clear_low_latency_mode(struct vmbus_channel *c)
1047 {
1048 c->low_latency = false;
1049 }
1050
1051 void vmbus_onmessage(struct vmbus_channel_message_header *hdr);
1052
1053 int vmbus_request_offers(void);
1054
1055 /*
1056 * APIs for managing sub-channels.
1057 */
1058
1059 void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
1060 void (*sc_cr_cb)(struct vmbus_channel *new_sc));
1061
1062 void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
1063 void (*chn_rescind_cb)(struct vmbus_channel *));
1064
1065 /*
1066 * Check if sub-channels have already been offerred. This API will be useful
1067 * when the driver is unloaded after establishing sub-channels. In this case,
1068 * when the driver is re-loaded, the driver would have to check if the
1069 * subchannels have already been established before attempting to request
1070 * the creation of sub-channels.
1071 * This function returns TRUE to indicate that subchannels have already been
1072 * created.
1073 * This function should be invoked after setting the callback function for
1074 * sub-channel creation.
1075 */
1076 bool vmbus_are_subchannels_present(struct vmbus_channel *primary);
1077
1078 /* The format must be the same as struct vmdata_gpa_direct */
1079 struct vmbus_channel_packet_page_buffer {
1080 u16 type;
1081 u16 dataoffset8;
1082 u16 length8;
1083 u16 flags;
1084 u64 transactionid;
1085 u32 reserved;
1086 u32 rangecount;
1087 struct hv_page_buffer range[MAX_PAGE_BUFFER_COUNT];
1088 } __packed;
1089
1090 /* The format must be the same as struct vmdata_gpa_direct */
1091 struct vmbus_channel_packet_multipage_buffer {
1092 u16 type;
1093 u16 dataoffset8;
1094 u16 length8;
1095 u16 flags;
1096 u64 transactionid;
1097 u32 reserved;
1098 u32 rangecount; /* Always 1 in this case */
1099 struct hv_multipage_buffer range;
1100 } __packed;
1101
1102 /* The format must be the same as struct vmdata_gpa_direct */
1103 struct vmbus_packet_mpb_array {
1104 u16 type;
1105 u16 dataoffset8;
1106 u16 length8;
1107 u16 flags;
1108 u64 transactionid;
1109 u32 reserved;
1110 u32 rangecount; /* Always 1 in this case */
1111 struct hv_mpb_array range;
1112 } __packed;
1113
1114 int vmbus_alloc_ring(struct vmbus_channel *channel,
1115 u32 send_size, u32 recv_size);
1116 void vmbus_free_ring(struct vmbus_channel *channel);
1117
1118 int vmbus_connect_ring(struct vmbus_channel *channel,
1119 void (*onchannel_callback)(void *context),
1120 void *context);
1121 int vmbus_disconnect_ring(struct vmbus_channel *channel);
1122
1123 extern int vmbus_open(struct vmbus_channel *channel,
1124 u32 send_ringbuffersize,
1125 u32 recv_ringbuffersize,
1126 void *userdata,
1127 u32 userdatalen,
1128 void (*onchannel_callback)(void *context),
1129 void *context);
1130
1131 extern void vmbus_close(struct vmbus_channel *channel);
1132
1133 extern int vmbus_sendpacket(struct vmbus_channel *channel,
1134 void *buffer,
1135 u32 bufferLen,
1136 u64 requestid,
1137 enum vmbus_packet_type type,
1138 u32 flags);
1139
1140 extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
1141 struct hv_page_buffer pagebuffers[],
1142 u32 pagecount,
1143 void *buffer,
1144 u32 bufferlen,
1145 u64 requestid);
1146
1147 extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
1148 struct vmbus_packet_mpb_array *mpb,
1149 u32 desc_size,
1150 void *buffer,
1151 u32 bufferlen,
1152 u64 requestid);
1153
1154 extern int vmbus_establish_gpadl(struct vmbus_channel *channel,
1155 void *kbuffer,
1156 u32 size,
1157 u32 *gpadl_handle);
1158
1159 extern int vmbus_teardown_gpadl(struct vmbus_channel *channel,
1160 u32 gpadl_handle);
1161
1162 void vmbus_reset_channel_cb(struct vmbus_channel *channel);
1163
1164 extern int vmbus_recvpacket(struct vmbus_channel *channel,
1165 void *buffer,
1166 u32 bufferlen,
1167 u32 *buffer_actual_len,
1168 u64 *requestid);
1169
1170 extern int vmbus_recvpacket_raw(struct vmbus_channel *channel,
1171 void *buffer,
1172 u32 bufferlen,
1173 u32 *buffer_actual_len,
1174 u64 *requestid);
1175
1176
1177 extern void vmbus_ontimer(unsigned long data);
1178
1179 /* Base driver object */
1180 struct hv_driver {
1181 const char *name;
1182
1183 /*
1184 * A hvsock offer, which has a VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER
1185 * channel flag, actually doesn't mean a synthetic device because the
1186 * offer's if_type/if_instance can change for every new hvsock
1187 * connection.
1188 *
1189 * However, to facilitate the notification of new-offer/rescind-offer
1190 * from vmbus driver to hvsock driver, we can handle hvsock offer as
1191 * a special vmbus device, and hence we need the below flag to
1192 * indicate if the driver is the hvsock driver or not: we need to
1193 * specially treat the hvosck offer & driver in vmbus_match().
1194 */
1195 bool hvsock;
1196
1197 /* the device type supported by this driver */
1198 guid_t dev_type;
1199 const struct hv_vmbus_device_id *id_table;
1200
1201 struct device_driver driver;
1202
1203 /* dynamic device GUID's */
1204 struct {
1205 spinlock_t lock;
1206 struct list_head list;
1207 } dynids;
1208
1209 int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *);
1210 int (*remove)(struct hv_device *);
1211 void (*shutdown)(struct hv_device *);
1212
1213 int (*suspend)(struct hv_device *);
1214 int (*resume)(struct hv_device *);
1215
1216 };
1217
1218 /* Base device object */
1219 struct hv_device {
1220 /* the device type id of this device */
1221 guid_t dev_type;
1222
1223 /* the device instance id of this device */
1224 guid_t dev_instance;
1225 u16 vendor_id;
1226 u16 device_id;
1227
1228 struct device device;
1229 char *driver_override; /* Driver name to force a match */
1230
1231 struct vmbus_channel *channel;
1232 struct kset *channels_kset;
1233
1234 /* place holder to keep track of the dir for hv device in debugfs */
1235 struct dentry *debug_dir;
1236
1237 };
1238
1239
device_to_hv_device(struct device * d)1240 static inline struct hv_device *device_to_hv_device(struct device *d)
1241 {
1242 return container_of(d, struct hv_device, device);
1243 }
1244
drv_to_hv_drv(struct device_driver * d)1245 static inline struct hv_driver *drv_to_hv_drv(struct device_driver *d)
1246 {
1247 return container_of(d, struct hv_driver, driver);
1248 }
1249
hv_set_drvdata(struct hv_device * dev,void * data)1250 static inline void hv_set_drvdata(struct hv_device *dev, void *data)
1251 {
1252 dev_set_drvdata(&dev->device, data);
1253 }
1254
hv_get_drvdata(struct hv_device * dev)1255 static inline void *hv_get_drvdata(struct hv_device *dev)
1256 {
1257 return dev_get_drvdata(&dev->device);
1258 }
1259
1260 struct hv_ring_buffer_debug_info {
1261 u32 current_interrupt_mask;
1262 u32 current_read_index;
1263 u32 current_write_index;
1264 u32 bytes_avail_toread;
1265 u32 bytes_avail_towrite;
1266 };
1267
1268
1269 int hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
1270 struct hv_ring_buffer_debug_info *debug_info);
1271
1272 bool hv_ringbuffer_spinlock_busy(struct vmbus_channel *channel);
1273
1274 /* Vmbus interface */
1275 #define vmbus_driver_register(driver) \
1276 __vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
1277 int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
1278 struct module *owner,
1279 const char *mod_name);
1280 void vmbus_driver_unregister(struct hv_driver *hv_driver);
1281
1282 void vmbus_hvsock_device_unregister(struct vmbus_channel *channel);
1283
1284 int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
1285 resource_size_t min, resource_size_t max,
1286 resource_size_t size, resource_size_t align,
1287 bool fb_overlap_ok);
1288 void vmbus_free_mmio(resource_size_t start, resource_size_t size);
1289
1290 /*
1291 * GUID definitions of various offer types - services offered to the guest.
1292 */
1293
1294 /*
1295 * Network GUID
1296 * {f8615163-df3e-46c5-913f-f2d2f965ed0e}
1297 */
1298 #define HV_NIC_GUID \
1299 .guid = GUID_INIT(0xf8615163, 0xdf3e, 0x46c5, 0x91, 0x3f, \
1300 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e)
1301
1302 /*
1303 * IDE GUID
1304 * {32412632-86cb-44a2-9b5c-50d1417354f5}
1305 */
1306 #define HV_IDE_GUID \
1307 .guid = GUID_INIT(0x32412632, 0x86cb, 0x44a2, 0x9b, 0x5c, \
1308 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5)
1309
1310 /*
1311 * SCSI GUID
1312 * {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f}
1313 */
1314 #define HV_SCSI_GUID \
1315 .guid = GUID_INIT(0xba6163d9, 0x04a1, 0x4d29, 0xb6, 0x05, \
1316 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f)
1317
1318 /*
1319 * Shutdown GUID
1320 * {0e0b6031-5213-4934-818b-38d90ced39db}
1321 */
1322 #define HV_SHUTDOWN_GUID \
1323 .guid = GUID_INIT(0x0e0b6031, 0x5213, 0x4934, 0x81, 0x8b, \
1324 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb)
1325
1326 /*
1327 * Time Synch GUID
1328 * {9527E630-D0AE-497b-ADCE-E80AB0175CAF}
1329 */
1330 #define HV_TS_GUID \
1331 .guid = GUID_INIT(0x9527e630, 0xd0ae, 0x497b, 0xad, 0xce, \
1332 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf)
1333
1334 /*
1335 * Heartbeat GUID
1336 * {57164f39-9115-4e78-ab55-382f3bd5422d}
1337 */
1338 #define HV_HEART_BEAT_GUID \
1339 .guid = GUID_INIT(0x57164f39, 0x9115, 0x4e78, 0xab, 0x55, \
1340 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d)
1341
1342 /*
1343 * KVP GUID
1344 * {a9a0f4e7-5a45-4d96-b827-8a841e8c03e6}
1345 */
1346 #define HV_KVP_GUID \
1347 .guid = GUID_INIT(0xa9a0f4e7, 0x5a45, 0x4d96, 0xb8, 0x27, \
1348 0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6)
1349
1350 /*
1351 * Dynamic memory GUID
1352 * {525074dc-8985-46e2-8057-a307dc18a502}
1353 */
1354 #define HV_DM_GUID \
1355 .guid = GUID_INIT(0x525074dc, 0x8985, 0x46e2, 0x80, 0x57, \
1356 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02)
1357
1358 /*
1359 * Mouse GUID
1360 * {cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a}
1361 */
1362 #define HV_MOUSE_GUID \
1363 .guid = GUID_INIT(0xcfa8b69e, 0x5b4a, 0x4cc0, 0xb9, 0x8b, \
1364 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a)
1365
1366 /*
1367 * Keyboard GUID
1368 * {f912ad6d-2b17-48ea-bd65-f927a61c7684}
1369 */
1370 #define HV_KBD_GUID \
1371 .guid = GUID_INIT(0xf912ad6d, 0x2b17, 0x48ea, 0xbd, 0x65, \
1372 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84)
1373
1374 /*
1375 * VSS (Backup/Restore) GUID
1376 */
1377 #define HV_VSS_GUID \
1378 .guid = GUID_INIT(0x35fa2e29, 0xea23, 0x4236, 0x96, 0xae, \
1379 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40)
1380 /*
1381 * Synthetic Video GUID
1382 * {DA0A7802-E377-4aac-8E77-0558EB1073F8}
1383 */
1384 #define HV_SYNTHVID_GUID \
1385 .guid = GUID_INIT(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \
1386 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8)
1387
1388 /*
1389 * Synthetic FC GUID
1390 * {2f9bcc4a-0069-4af3-b76b-6fd0be528cda}
1391 */
1392 #define HV_SYNTHFC_GUID \
1393 .guid = GUID_INIT(0x2f9bcc4a, 0x0069, 0x4af3, 0xb7, 0x6b, \
1394 0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda)
1395
1396 /*
1397 * Guest File Copy Service
1398 * {34D14BE3-DEE4-41c8-9AE7-6B174977C192}
1399 */
1400
1401 #define HV_FCOPY_GUID \
1402 .guid = GUID_INIT(0x34d14be3, 0xdee4, 0x41c8, 0x9a, 0xe7, \
1403 0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92)
1404
1405 /*
1406 * NetworkDirect. This is the guest RDMA service.
1407 * {8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501}
1408 */
1409 #define HV_ND_GUID \
1410 .guid = GUID_INIT(0x8c2eaf3d, 0x32a7, 0x4b09, 0xab, 0x99, \
1411 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01)
1412
1413 /*
1414 * PCI Express Pass Through
1415 * {44C4F61D-4444-4400-9D52-802E27EDE19F}
1416 */
1417
1418 #define HV_PCIE_GUID \
1419 .guid = GUID_INIT(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \
1420 0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f)
1421
1422 /*
1423 * Linux doesn't support the 3 devices: the first two are for
1424 * Automatic Virtual Machine Activation, and the third is for
1425 * Remote Desktop Virtualization.
1426 * {f8e65716-3cb3-4a06-9a60-1889c5cccab5}
1427 * {3375baf4-9e15-4b30-b765-67acb10d607b}
1428 * {276aacf4-ac15-426c-98dd-7521ad3f01fe}
1429 */
1430
1431 #define HV_AVMA1_GUID \
1432 .guid = GUID_INIT(0xf8e65716, 0x3cb3, 0x4a06, 0x9a, 0x60, \
1433 0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5)
1434
1435 #define HV_AVMA2_GUID \
1436 .guid = GUID_INIT(0x3375baf4, 0x9e15, 0x4b30, 0xb7, 0x65, \
1437 0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b)
1438
1439 #define HV_RDV_GUID \
1440 .guid = GUID_INIT(0x276aacf4, 0xac15, 0x426c, 0x98, 0xdd, \
1441 0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe)
1442
1443 /*
1444 * Common header for Hyper-V ICs
1445 */
1446
1447 #define ICMSGTYPE_NEGOTIATE 0
1448 #define ICMSGTYPE_HEARTBEAT 1
1449 #define ICMSGTYPE_KVPEXCHANGE 2
1450 #define ICMSGTYPE_SHUTDOWN 3
1451 #define ICMSGTYPE_TIMESYNC 4
1452 #define ICMSGTYPE_VSS 5
1453
1454 #define ICMSGHDRFLAG_TRANSACTION 1
1455 #define ICMSGHDRFLAG_REQUEST 2
1456 #define ICMSGHDRFLAG_RESPONSE 4
1457
1458
1459 /*
1460 * While we want to handle util services as regular devices,
1461 * there is only one instance of each of these services; so
1462 * we statically allocate the service specific state.
1463 */
1464
1465 struct hv_util_service {
1466 u8 *recv_buffer;
1467 void *channel;
1468 void (*util_cb)(void *);
1469 int (*util_init)(struct hv_util_service *);
1470 void (*util_deinit)(void);
1471 int (*util_pre_suspend)(void);
1472 int (*util_pre_resume)(void);
1473 };
1474
1475 struct vmbuspipe_hdr {
1476 u32 flags;
1477 u32 msgsize;
1478 } __packed;
1479
1480 struct ic_version {
1481 u16 major;
1482 u16 minor;
1483 } __packed;
1484
1485 struct icmsg_hdr {
1486 struct ic_version icverframe;
1487 u16 icmsgtype;
1488 struct ic_version icvermsg;
1489 u16 icmsgsize;
1490 u32 status;
1491 u8 ictransaction_id;
1492 u8 icflags;
1493 u8 reserved[2];
1494 } __packed;
1495
1496 struct icmsg_negotiate {
1497 u16 icframe_vercnt;
1498 u16 icmsg_vercnt;
1499 u32 reserved;
1500 struct ic_version icversion_data[1]; /* any size array */
1501 } __packed;
1502
1503 struct shutdown_msg_data {
1504 u32 reason_code;
1505 u32 timeout_seconds;
1506 u32 flags;
1507 u8 display_message[2048];
1508 } __packed;
1509
1510 struct heartbeat_msg_data {
1511 u64 seq_num;
1512 u32 reserved[8];
1513 } __packed;
1514
1515 /* Time Sync IC defs */
1516 #define ICTIMESYNCFLAG_PROBE 0
1517 #define ICTIMESYNCFLAG_SYNC 1
1518 #define ICTIMESYNCFLAG_SAMPLE 2
1519
1520 #ifdef __x86_64__
1521 #define WLTIMEDELTA 116444736000000000L /* in 100ns unit */
1522 #else
1523 #define WLTIMEDELTA 116444736000000000LL
1524 #endif
1525
1526 struct ictimesync_data {
1527 u64 parenttime;
1528 u64 childtime;
1529 u64 roundtriptime;
1530 u8 flags;
1531 } __packed;
1532
1533 struct ictimesync_ref_data {
1534 u64 parenttime;
1535 u64 vmreferencetime;
1536 u8 flags;
1537 char leapflags;
1538 char stratum;
1539 u8 reserved[3];
1540 } __packed;
1541
1542 struct hyperv_service_callback {
1543 u8 msg_type;
1544 char *log_msg;
1545 guid_t data;
1546 struct vmbus_channel *channel;
1547 void (*callback)(void *context);
1548 };
1549
1550 #define MAX_SRV_VER 0x7ffffff
1551 extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf,
1552 const int *fw_version, int fw_vercnt,
1553 const int *srv_version, int srv_vercnt,
1554 int *nego_fw_version, int *nego_srv_version);
1555
1556 void hv_process_channel_removal(struct vmbus_channel *channel);
1557
1558 void vmbus_setevent(struct vmbus_channel *channel);
1559 /*
1560 * Negotiated version with the Host.
1561 */
1562
1563 extern __u32 vmbus_proto_version;
1564
1565 int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id,
1566 const guid_t *shv_host_servie_id);
1567 int vmbus_send_modifychannel(u32 child_relid, u32 target_vp);
1568 void vmbus_set_event(struct vmbus_channel *channel);
1569
1570 /* Get the start of the ring buffer. */
1571 static inline void *
hv_get_ring_buffer(const struct hv_ring_buffer_info * ring_info)1572 hv_get_ring_buffer(const struct hv_ring_buffer_info *ring_info)
1573 {
1574 return ring_info->ring_buffer->buffer;
1575 }
1576
1577 /*
1578 * Mask off host interrupt callback notifications
1579 */
hv_begin_read(struct hv_ring_buffer_info * rbi)1580 static inline void hv_begin_read(struct hv_ring_buffer_info *rbi)
1581 {
1582 rbi->ring_buffer->interrupt_mask = 1;
1583
1584 /* make sure mask update is not reordered */
1585 virt_mb();
1586 }
1587
1588 /*
1589 * Re-enable host callback and return number of outstanding bytes
1590 */
hv_end_read(struct hv_ring_buffer_info * rbi)1591 static inline u32 hv_end_read(struct hv_ring_buffer_info *rbi)
1592 {
1593
1594 rbi->ring_buffer->interrupt_mask = 0;
1595
1596 /* make sure mask update is not reordered */
1597 virt_mb();
1598
1599 /*
1600 * Now check to see if the ring buffer is still empty.
1601 * If it is not, we raced and we need to process new
1602 * incoming messages.
1603 */
1604 return hv_get_bytes_to_read(rbi);
1605 }
1606
1607 /*
1608 * An API to support in-place processing of incoming VMBUS packets.
1609 */
1610
1611 /* Get data payload associated with descriptor */
hv_pkt_data(const struct vmpacket_descriptor * desc)1612 static inline void *hv_pkt_data(const struct vmpacket_descriptor *desc)
1613 {
1614 return (void *)((unsigned long)desc + (desc->offset8 << 3));
1615 }
1616
1617 /* Get data size associated with descriptor */
hv_pkt_datalen(const struct vmpacket_descriptor * desc)1618 static inline u32 hv_pkt_datalen(const struct vmpacket_descriptor *desc)
1619 {
1620 return (desc->len8 << 3) - (desc->offset8 << 3);
1621 }
1622
1623
1624 struct vmpacket_descriptor *
1625 hv_pkt_iter_first(struct vmbus_channel *channel);
1626
1627 struct vmpacket_descriptor *
1628 __hv_pkt_iter_next(struct vmbus_channel *channel,
1629 const struct vmpacket_descriptor *pkt);
1630
1631 void hv_pkt_iter_close(struct vmbus_channel *channel);
1632
1633 /*
1634 * Get next packet descriptor from iterator
1635 * If at end of list, return NULL and update host.
1636 */
1637 static inline struct vmpacket_descriptor *
hv_pkt_iter_next(struct vmbus_channel * channel,const struct vmpacket_descriptor * pkt)1638 hv_pkt_iter_next(struct vmbus_channel *channel,
1639 const struct vmpacket_descriptor *pkt)
1640 {
1641 struct vmpacket_descriptor *nxt;
1642
1643 nxt = __hv_pkt_iter_next(channel, pkt);
1644 if (!nxt)
1645 hv_pkt_iter_close(channel);
1646
1647 return nxt;
1648 }
1649
1650 #define foreach_vmbus_pkt(pkt, channel) \
1651 for (pkt = hv_pkt_iter_first(channel); pkt; \
1652 pkt = hv_pkt_iter_next(channel, pkt))
1653
1654 /*
1655 * Interface for passing data between SR-IOV PF and VF drivers. The VF driver
1656 * sends requests to read and write blocks. Each block must be 128 bytes or
1657 * smaller. Optionally, the VF driver can register a callback function which
1658 * will be invoked when the host says that one or more of the first 64 block
1659 * IDs is "invalid" which means that the VF driver should reread them.
1660 */
1661 #define HV_CONFIG_BLOCK_SIZE_MAX 128
1662
1663 int hyperv_read_cfg_blk(struct pci_dev *dev, void *buf, unsigned int buf_len,
1664 unsigned int block_id, unsigned int *bytes_returned);
1665 int hyperv_write_cfg_blk(struct pci_dev *dev, void *buf, unsigned int len,
1666 unsigned int block_id);
1667 int hyperv_reg_block_invalidate(struct pci_dev *dev, void *context,
1668 void (*block_invalidate)(void *context,
1669 u64 block_mask));
1670
1671 struct hyperv_pci_block_ops {
1672 int (*read_block)(struct pci_dev *dev, void *buf, unsigned int buf_len,
1673 unsigned int block_id, unsigned int *bytes_returned);
1674 int (*write_block)(struct pci_dev *dev, void *buf, unsigned int len,
1675 unsigned int block_id);
1676 int (*reg_blk_invalidate)(struct pci_dev *dev, void *context,
1677 void (*block_invalidate)(void *context,
1678 u64 block_mask));
1679 };
1680
1681 extern struct hyperv_pci_block_ops hvpci_block_ops;
1682
virt_to_hvpfn(void * addr)1683 static inline unsigned long virt_to_hvpfn(void *addr)
1684 {
1685 phys_addr_t paddr;
1686
1687 if (is_vmalloc_addr(addr))
1688 paddr = page_to_phys(vmalloc_to_page(addr)) +
1689 offset_in_page(addr);
1690 else
1691 paddr = __pa(addr);
1692
1693 return paddr >> HV_HYP_PAGE_SHIFT;
1694 }
1695
1696 #define NR_HV_HYP_PAGES_IN_PAGE (PAGE_SIZE / HV_HYP_PAGE_SIZE)
1697 #define offset_in_hvpage(ptr) ((unsigned long)(ptr) & ~HV_HYP_PAGE_MASK)
1698 #define HVPFN_UP(x) (((x) + HV_HYP_PAGE_SIZE-1) >> HV_HYP_PAGE_SHIFT)
1699 #define page_to_hvpfn(page) (page_to_pfn(page) * NR_HV_HYP_PAGES_IN_PAGE)
1700
1701 #endif /* _HYPERV_H */
1702