1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 *
4 * Copyright (c) 2009, Microsoft Corporation.
5 *
6 * Authors:
7 * Haiyang Zhang <haiyangz@microsoft.com>
8 * Hank Janssen <hjanssen@microsoft.com>
9 * K. Y. Srinivasan <kys@microsoft.com>
10 */
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13 #include <linux/kernel.h>
14 #include <linux/mm.h>
15 #include <linux/hyperv.h>
16 #include <linux/uio.h>
17 #include <linux/vmalloc.h>
18 #include <linux/slab.h>
19 #include <linux/prefetch.h>
20
21 #include "hyperv_vmbus.h"
22
23 #define VMBUS_PKT_TRAILER 8
24
25 /*
26 * When we write to the ring buffer, check if the host needs to
27 * be signaled. Here is the details of this protocol:
28 *
29 * 1. The host guarantees that while it is draining the
30 * ring buffer, it will set the interrupt_mask to
31 * indicate it does not need to be interrupted when
32 * new data is placed.
33 *
34 * 2. The host guarantees that it will completely drain
35 * the ring buffer before exiting the read loop. Further,
36 * once the ring buffer is empty, it will clear the
37 * interrupt_mask and re-check to see if new data has
38 * arrived.
39 *
40 * KYS: Oct. 30, 2016:
41 * It looks like Windows hosts have logic to deal with DOS attacks that
42 * can be triggered if it receives interrupts when it is not expecting
43 * the interrupt. The host expects interrupts only when the ring
44 * transitions from empty to non-empty (or full to non full on the guest
45 * to host ring).
46 * So, base the signaling decision solely on the ring state until the
47 * host logic is fixed.
48 */
49
hv_signal_on_write(u32 old_write,struct vmbus_channel * channel)50 static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel)
51 {
52 struct hv_ring_buffer_info *rbi = &channel->outbound;
53
54 virt_mb();
55 if (READ_ONCE(rbi->ring_buffer->interrupt_mask))
56 return;
57
58 /* check interrupt_mask before read_index */
59 virt_rmb();
60 /*
61 * This is the only case we need to signal when the
62 * ring transitions from being empty to non-empty.
63 */
64 if (old_write == READ_ONCE(rbi->ring_buffer->read_index)) {
65 ++channel->intr_out_empty;
66 vmbus_setevent(channel);
67 }
68 }
69
70 /* Get the next write location for the specified ring buffer. */
71 static inline u32
hv_get_next_write_location(struct hv_ring_buffer_info * ring_info)72 hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
73 {
74 u32 next = ring_info->ring_buffer->write_index;
75
76 return next;
77 }
78
79 /* Set the next write location for the specified ring buffer. */
80 static inline void
hv_set_next_write_location(struct hv_ring_buffer_info * ring_info,u32 next_write_location)81 hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
82 u32 next_write_location)
83 {
84 ring_info->ring_buffer->write_index = next_write_location;
85 }
86
87 /* Set the next read location for the specified ring buffer. */
88 static inline void
hv_set_next_read_location(struct hv_ring_buffer_info * ring_info,u32 next_read_location)89 hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
90 u32 next_read_location)
91 {
92 ring_info->ring_buffer->read_index = next_read_location;
93 ring_info->priv_read_index = next_read_location;
94 }
95
96 /* Get the size of the ring buffer. */
97 static inline u32
hv_get_ring_buffersize(const struct hv_ring_buffer_info * ring_info)98 hv_get_ring_buffersize(const struct hv_ring_buffer_info *ring_info)
99 {
100 return ring_info->ring_datasize;
101 }
102
103 /* Get the read and write indices as u64 of the specified ring buffer. */
104 static inline u64
hv_get_ring_bufferindices(struct hv_ring_buffer_info * ring_info)105 hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
106 {
107 return (u64)ring_info->ring_buffer->write_index << 32;
108 }
109
110 /*
111 * Helper routine to copy from source to ring buffer.
112 * Assume there is enough room. Handles wrap-around in dest case only!!
113 */
hv_copyto_ringbuffer(struct hv_ring_buffer_info * ring_info,u32 start_write_offset,const void * src,u32 srclen)114 static u32 hv_copyto_ringbuffer(
115 struct hv_ring_buffer_info *ring_info,
116 u32 start_write_offset,
117 const void *src,
118 u32 srclen)
119 {
120 void *ring_buffer = hv_get_ring_buffer(ring_info);
121 u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
122
123 memcpy(ring_buffer + start_write_offset, src, srclen);
124
125 start_write_offset += srclen;
126 if (start_write_offset >= ring_buffer_size)
127 start_write_offset -= ring_buffer_size;
128
129 return start_write_offset;
130 }
131
132 /*
133 *
134 * hv_get_ringbuffer_availbytes()
135 *
136 * Get number of bytes available to read and to write to
137 * for the specified ring buffer
138 */
139 static void
hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info * rbi,u32 * read,u32 * write)140 hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi,
141 u32 *read, u32 *write)
142 {
143 u32 read_loc, write_loc, dsize;
144
145 /* Capture the read/write indices before they changed */
146 read_loc = READ_ONCE(rbi->ring_buffer->read_index);
147 write_loc = READ_ONCE(rbi->ring_buffer->write_index);
148 dsize = rbi->ring_datasize;
149
150 *write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
151 read_loc - write_loc;
152 *read = dsize - *write;
153 }
154
155 /* Get various debug metrics for the specified ring buffer. */
hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info * ring_info,struct hv_ring_buffer_debug_info * debug_info)156 int hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
157 struct hv_ring_buffer_debug_info *debug_info)
158 {
159 u32 bytes_avail_towrite;
160 u32 bytes_avail_toread;
161
162 mutex_lock(&ring_info->ring_buffer_mutex);
163
164 if (!ring_info->ring_buffer) {
165 mutex_unlock(&ring_info->ring_buffer_mutex);
166 return -EINVAL;
167 }
168
169 hv_get_ringbuffer_availbytes(ring_info,
170 &bytes_avail_toread,
171 &bytes_avail_towrite);
172 debug_info->bytes_avail_toread = bytes_avail_toread;
173 debug_info->bytes_avail_towrite = bytes_avail_towrite;
174 debug_info->current_read_index = ring_info->ring_buffer->read_index;
175 debug_info->current_write_index = ring_info->ring_buffer->write_index;
176 debug_info->current_interrupt_mask
177 = ring_info->ring_buffer->interrupt_mask;
178 mutex_unlock(&ring_info->ring_buffer_mutex);
179
180 return 0;
181 }
182 EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo);
183
184 /* Initialize a channel's ring buffer info mutex locks */
hv_ringbuffer_pre_init(struct vmbus_channel * channel)185 void hv_ringbuffer_pre_init(struct vmbus_channel *channel)
186 {
187 mutex_init(&channel->inbound.ring_buffer_mutex);
188 mutex_init(&channel->outbound.ring_buffer_mutex);
189 }
190
191 /* Initialize the ring buffer. */
hv_ringbuffer_init(struct hv_ring_buffer_info * ring_info,struct page * pages,u32 page_cnt)192 int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
193 struct page *pages, u32 page_cnt)
194 {
195 int i;
196 struct page **pages_wraparound;
197
198 BUILD_BUG_ON((sizeof(struct hv_ring_buffer) != PAGE_SIZE));
199
200 /*
201 * First page holds struct hv_ring_buffer, do wraparound mapping for
202 * the rest.
203 */
204 pages_wraparound = kcalloc(page_cnt * 2 - 1, sizeof(struct page *),
205 GFP_KERNEL);
206 if (!pages_wraparound)
207 return -ENOMEM;
208
209 pages_wraparound[0] = pages;
210 for (i = 0; i < 2 * (page_cnt - 1); i++)
211 pages_wraparound[i + 1] = &pages[i % (page_cnt - 1) + 1];
212
213 ring_info->ring_buffer = (struct hv_ring_buffer *)
214 vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP, PAGE_KERNEL);
215
216 kfree(pages_wraparound);
217
218
219 if (!ring_info->ring_buffer)
220 return -ENOMEM;
221
222 ring_info->ring_buffer->read_index =
223 ring_info->ring_buffer->write_index = 0;
224
225 /* Set the feature bit for enabling flow control. */
226 ring_info->ring_buffer->feature_bits.value = 1;
227
228 ring_info->ring_size = page_cnt << PAGE_SHIFT;
229 ring_info->ring_size_div10_reciprocal =
230 reciprocal_value(ring_info->ring_size / 10);
231 ring_info->ring_datasize = ring_info->ring_size -
232 sizeof(struct hv_ring_buffer);
233 ring_info->priv_read_index = 0;
234
235 spin_lock_init(&ring_info->ring_lock);
236
237 return 0;
238 }
239
240 /* Cleanup the ring buffer. */
hv_ringbuffer_cleanup(struct hv_ring_buffer_info * ring_info)241 void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
242 {
243 mutex_lock(&ring_info->ring_buffer_mutex);
244 vunmap(ring_info->ring_buffer);
245 ring_info->ring_buffer = NULL;
246 mutex_unlock(&ring_info->ring_buffer_mutex);
247 }
248
249 /*
250 * Check if the ring buffer spinlock is available to take or not; used on
251 * atomic contexts, like panic path (see the Hyper-V framebuffer driver).
252 */
253
hv_ringbuffer_spinlock_busy(struct vmbus_channel * channel)254 bool hv_ringbuffer_spinlock_busy(struct vmbus_channel *channel)
255 {
256 struct hv_ring_buffer_info *rinfo = &channel->outbound;
257
258 return spin_is_locked(&rinfo->ring_lock);
259 }
260 EXPORT_SYMBOL_GPL(hv_ringbuffer_spinlock_busy);
261
262 /* Write to the ring buffer. */
hv_ringbuffer_write(struct vmbus_channel * channel,const struct kvec * kv_list,u32 kv_count,u64 requestid)263 int hv_ringbuffer_write(struct vmbus_channel *channel,
264 const struct kvec *kv_list, u32 kv_count,
265 u64 requestid)
266 {
267 int i;
268 u32 bytes_avail_towrite;
269 u32 totalbytes_towrite = sizeof(u64);
270 u32 next_write_location;
271 u32 old_write;
272 u64 prev_indices;
273 unsigned long flags;
274 struct hv_ring_buffer_info *outring_info = &channel->outbound;
275 struct vmpacket_descriptor *desc = kv_list[0].iov_base;
276 u64 rqst_id = VMBUS_NO_RQSTOR;
277
278 if (channel->rescind)
279 return -ENODEV;
280
281 for (i = 0; i < kv_count; i++)
282 totalbytes_towrite += kv_list[i].iov_len;
283
284 spin_lock_irqsave(&outring_info->ring_lock, flags);
285
286 bytes_avail_towrite = hv_get_bytes_to_write(outring_info);
287
288 /*
289 * If there is only room for the packet, assume it is full.
290 * Otherwise, the next time around, we think the ring buffer
291 * is empty since the read index == write index.
292 */
293 if (bytes_avail_towrite <= totalbytes_towrite) {
294 ++channel->out_full_total;
295
296 if (!channel->out_full_flag) {
297 ++channel->out_full_first;
298 channel->out_full_flag = true;
299 }
300
301 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
302 return -EAGAIN;
303 }
304
305 channel->out_full_flag = false;
306
307 /* Write to the ring buffer */
308 next_write_location = hv_get_next_write_location(outring_info);
309
310 old_write = next_write_location;
311
312 for (i = 0; i < kv_count; i++) {
313 next_write_location = hv_copyto_ringbuffer(outring_info,
314 next_write_location,
315 kv_list[i].iov_base,
316 kv_list[i].iov_len);
317 }
318
319 /*
320 * Allocate the request ID after the data has been copied into the
321 * ring buffer. Once this request ID is allocated, the completion
322 * path could find the data and free it.
323 */
324
325 if (desc->flags == VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED) {
326 rqst_id = vmbus_next_request_id(&channel->requestor, requestid);
327 if (rqst_id == VMBUS_RQST_ERROR) {
328 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
329 return -EAGAIN;
330 }
331 }
332 desc = hv_get_ring_buffer(outring_info) + old_write;
333 desc->trans_id = (rqst_id == VMBUS_NO_RQSTOR) ? requestid : rqst_id;
334
335 /* Set previous packet start */
336 prev_indices = hv_get_ring_bufferindices(outring_info);
337
338 next_write_location = hv_copyto_ringbuffer(outring_info,
339 next_write_location,
340 &prev_indices,
341 sizeof(u64));
342
343 /* Issue a full memory barrier before updating the write index */
344 virt_mb();
345
346 /* Now, update the write location */
347 hv_set_next_write_location(outring_info, next_write_location);
348
349
350 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
351
352 hv_signal_on_write(old_write, channel);
353
354 if (channel->rescind) {
355 if (rqst_id != VMBUS_NO_RQSTOR) {
356 /* Reclaim request ID to avoid leak of IDs */
357 vmbus_request_addr(&channel->requestor, rqst_id);
358 }
359 return -ENODEV;
360 }
361
362 return 0;
363 }
364
hv_ringbuffer_read(struct vmbus_channel * channel,void * buffer,u32 buflen,u32 * buffer_actual_len,u64 * requestid,bool raw)365 int hv_ringbuffer_read(struct vmbus_channel *channel,
366 void *buffer, u32 buflen, u32 *buffer_actual_len,
367 u64 *requestid, bool raw)
368 {
369 struct vmpacket_descriptor *desc;
370 u32 packetlen, offset;
371
372 if (unlikely(buflen == 0))
373 return -EINVAL;
374
375 *buffer_actual_len = 0;
376 *requestid = 0;
377
378 /* Make sure there is something to read */
379 desc = hv_pkt_iter_first(channel);
380 if (desc == NULL) {
381 /*
382 * No error is set when there is even no header, drivers are
383 * supposed to analyze buffer_actual_len.
384 */
385 return 0;
386 }
387
388 offset = raw ? 0 : (desc->offset8 << 3);
389 packetlen = (desc->len8 << 3) - offset;
390 *buffer_actual_len = packetlen;
391 *requestid = desc->trans_id;
392
393 if (unlikely(packetlen > buflen))
394 return -ENOBUFS;
395
396 /* since ring is double mapped, only one copy is necessary */
397 memcpy(buffer, (const char *)desc + offset, packetlen);
398
399 /* Advance ring index to next packet descriptor */
400 __hv_pkt_iter_next(channel, desc);
401
402 /* Notify host of update */
403 hv_pkt_iter_close(channel);
404
405 return 0;
406 }
407
408 /*
409 * Determine number of bytes available in ring buffer after
410 * the current iterator (priv_read_index) location.
411 *
412 * This is similar to hv_get_bytes_to_read but with private
413 * read index instead.
414 */
hv_pkt_iter_avail(const struct hv_ring_buffer_info * rbi)415 static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi)
416 {
417 u32 priv_read_loc = rbi->priv_read_index;
418 u32 write_loc;
419
420 /*
421 * The Hyper-V host writes the packet data, then uses
422 * store_release() to update the write_index. Use load_acquire()
423 * here to prevent loads of the packet data from being re-ordered
424 * before the read of the write_index and potentially getting
425 * stale data.
426 */
427 write_loc = virt_load_acquire(&rbi->ring_buffer->write_index);
428
429 if (write_loc >= priv_read_loc)
430 return write_loc - priv_read_loc;
431 else
432 return (rbi->ring_datasize - priv_read_loc) + write_loc;
433 }
434
435 /*
436 * Get first vmbus packet from ring buffer after read_index
437 *
438 * If ring buffer is empty, returns NULL and no other action needed.
439 */
hv_pkt_iter_first(struct vmbus_channel * channel)440 struct vmpacket_descriptor *hv_pkt_iter_first(struct vmbus_channel *channel)
441 {
442 struct hv_ring_buffer_info *rbi = &channel->inbound;
443 struct vmpacket_descriptor *desc;
444
445 hv_debug_delay_test(channel, MESSAGE_DELAY);
446 if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor))
447 return NULL;
448
449 desc = hv_get_ring_buffer(rbi) + rbi->priv_read_index;
450 if (desc)
451 prefetch((char *)desc + (desc->len8 << 3));
452
453 return desc;
454 }
455 EXPORT_SYMBOL_GPL(hv_pkt_iter_first);
456
457 /*
458 * Get next vmbus packet from ring buffer.
459 *
460 * Advances the current location (priv_read_index) and checks for more
461 * data. If the end of the ring buffer is reached, then return NULL.
462 */
463 struct vmpacket_descriptor *
__hv_pkt_iter_next(struct vmbus_channel * channel,const struct vmpacket_descriptor * desc)464 __hv_pkt_iter_next(struct vmbus_channel *channel,
465 const struct vmpacket_descriptor *desc)
466 {
467 struct hv_ring_buffer_info *rbi = &channel->inbound;
468 u32 packetlen = desc->len8 << 3;
469 u32 dsize = rbi->ring_datasize;
470
471 hv_debug_delay_test(channel, MESSAGE_DELAY);
472 /* bump offset to next potential packet */
473 rbi->priv_read_index += packetlen + VMBUS_PKT_TRAILER;
474 if (rbi->priv_read_index >= dsize)
475 rbi->priv_read_index -= dsize;
476
477 /* more data? */
478 return hv_pkt_iter_first(channel);
479 }
480 EXPORT_SYMBOL_GPL(__hv_pkt_iter_next);
481
482 /* How many bytes were read in this iterator cycle */
hv_pkt_iter_bytes_read(const struct hv_ring_buffer_info * rbi,u32 start_read_index)483 static u32 hv_pkt_iter_bytes_read(const struct hv_ring_buffer_info *rbi,
484 u32 start_read_index)
485 {
486 if (rbi->priv_read_index >= start_read_index)
487 return rbi->priv_read_index - start_read_index;
488 else
489 return rbi->ring_datasize - start_read_index +
490 rbi->priv_read_index;
491 }
492
493 /*
494 * Update host ring buffer after iterating over packets. If the host has
495 * stopped queuing new entries because it found the ring buffer full, and
496 * sufficient space is being freed up, signal the host. But be careful to
497 * only signal the host when necessary, both for performance reasons and
498 * because Hyper-V protects itself by throttling guests that signal
499 * inappropriately.
500 *
501 * Determining when to signal is tricky. There are three key data inputs
502 * that must be handled in this order to avoid race conditions:
503 *
504 * 1. Update the read_index
505 * 2. Read the pending_send_sz
506 * 3. Read the current write_index
507 *
508 * The interrupt_mask is not used to determine when to signal. The
509 * interrupt_mask is used only on the guest->host ring buffer when
510 * sending requests to the host. The host does not use it on the host->
511 * guest ring buffer to indicate whether it should be signaled.
512 */
hv_pkt_iter_close(struct vmbus_channel * channel)513 void hv_pkt_iter_close(struct vmbus_channel *channel)
514 {
515 struct hv_ring_buffer_info *rbi = &channel->inbound;
516 u32 curr_write_sz, pending_sz, bytes_read, start_read_index;
517
518 /*
519 * Make sure all reads are done before we update the read index since
520 * the writer may start writing to the read area once the read index
521 * is updated.
522 */
523 virt_rmb();
524 start_read_index = rbi->ring_buffer->read_index;
525 rbi->ring_buffer->read_index = rbi->priv_read_index;
526
527 /*
528 * Older versions of Hyper-V (before WS2102 and Win8) do not
529 * implement pending_send_sz and simply poll if the host->guest
530 * ring buffer is full. No signaling is needed or expected.
531 */
532 if (!rbi->ring_buffer->feature_bits.feat_pending_send_sz)
533 return;
534
535 /*
536 * Issue a full memory barrier before making the signaling decision.
537 * If reading pending_send_sz were to be reordered and happen
538 * before we commit the new read_index, a race could occur. If the
539 * host were to set the pending_send_sz after we have sampled
540 * pending_send_sz, and the ring buffer blocks before we commit the
541 * read index, we could miss sending the interrupt. Issue a full
542 * memory barrier to address this.
543 */
544 virt_mb();
545
546 /*
547 * If the pending_send_sz is zero, then the ring buffer is not
548 * blocked and there is no need to signal. This is far by the
549 * most common case, so exit quickly for best performance.
550 */
551 pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
552 if (!pending_sz)
553 return;
554
555 /*
556 * Ensure the read of write_index in hv_get_bytes_to_write()
557 * happens after the read of pending_send_sz.
558 */
559 virt_rmb();
560 curr_write_sz = hv_get_bytes_to_write(rbi);
561 bytes_read = hv_pkt_iter_bytes_read(rbi, start_read_index);
562
563 /*
564 * We want to signal the host only if we're transitioning
565 * from a "not enough free space" state to a "enough free
566 * space" state. For example, it's possible that this function
567 * could run and free up enough space to signal the host, and then
568 * run again and free up additional space before the host has a
569 * chance to clear the pending_send_sz. The 2nd invocation would
570 * be a null transition from "enough free space" to "enough free
571 * space", which doesn't warrant a signal.
572 *
573 * Exactly filling the ring buffer is treated as "not enough
574 * space". The ring buffer always must have at least one byte
575 * empty so the empty and full conditions are distinguishable.
576 * hv_get_bytes_to_write() doesn't fully tell the truth in
577 * this regard.
578 *
579 * So first check if we were in the "enough free space" state
580 * before we began the iteration. If so, the host was not
581 * blocked, and there's no need to signal.
582 */
583 if (curr_write_sz - bytes_read > pending_sz)
584 return;
585
586 /*
587 * Similarly, if the new state is "not enough space", then
588 * there's no need to signal.
589 */
590 if (curr_write_sz <= pending_sz)
591 return;
592
593 ++channel->intr_in_full;
594 vmbus_setevent(channel);
595 }
596 EXPORT_SYMBOL_GPL(hv_pkt_iter_close);
597