1 /*
2 *
3 * Copyright (c) 2009, Microsoft Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 * Authors:
19 * Haiyang Zhang <haiyangz@microsoft.com>
20 * Hank Janssen <hjanssen@microsoft.com>
21 * K. Y. Srinivasan <kys@microsoft.com>
22 *
23 */
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
26 #include <linux/kernel.h>
27 #include <linux/mm.h>
28 #include <linux/hyperv.h>
29 #include <linux/uio.h>
30
31 #include "hyperv_vmbus.h"
32
hv_begin_read(struct hv_ring_buffer_info * rbi)33 void hv_begin_read(struct hv_ring_buffer_info *rbi)
34 {
35 rbi->ring_buffer->interrupt_mask = 1;
36 mb();
37 }
38
hv_end_read(struct hv_ring_buffer_info * rbi)39 u32 hv_end_read(struct hv_ring_buffer_info *rbi)
40 {
41 u32 read;
42 u32 write;
43
44 rbi->ring_buffer->interrupt_mask = 0;
45 mb();
46
47 /*
48 * Now check to see if the ring buffer is still empty.
49 * If it is not, we raced and we need to process new
50 * incoming messages.
51 */
52 hv_get_ringbuffer_availbytes(rbi, &read, &write);
53
54 return read;
55 }
56
57 /*
58 * When we write to the ring buffer, check if the host needs to
59 * be signaled. Here is the details of this protocol:
60 *
61 * 1. The host guarantees that while it is draining the
62 * ring buffer, it will set the interrupt_mask to
63 * indicate it does not need to be interrupted when
64 * new data is placed.
65 *
66 * 2. The host guarantees that it will completely drain
67 * the ring buffer before exiting the read loop. Further,
68 * once the ring buffer is empty, it will clear the
69 * interrupt_mask and re-check to see if new data has
70 * arrived.
71 */
72
hv_need_to_signal(u32 old_write,struct hv_ring_buffer_info * rbi)73 static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
74 {
75 mb();
76 if (rbi->ring_buffer->interrupt_mask)
77 return false;
78
79 /* check interrupt_mask before read_index */
80 rmb();
81 /*
82 * This is the only case we need to signal when the
83 * ring transitions from being empty to non-empty.
84 */
85 if (old_write == rbi->ring_buffer->read_index)
86 return true;
87
88 return false;
89 }
90
91 /*
92 * To optimize the flow management on the send-side,
93 * when the sender is blocked because of lack of
94 * sufficient space in the ring buffer, potential the
95 * consumer of the ring buffer can signal the producer.
96 * This is controlled by the following parameters:
97 *
98 * 1. pending_send_sz: This is the size in bytes that the
99 * producer is trying to send.
100 * 2. The feature bit feat_pending_send_sz set to indicate if
101 * the consumer of the ring will signal when the ring
102 * state transitions from being full to a state where
103 * there is room for the producer to send the pending packet.
104 */
105
hv_need_to_signal_on_read(struct hv_ring_buffer_info * rbi)106 static bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi)
107 {
108 u32 cur_write_sz;
109 u32 r_size;
110 u32 write_loc;
111 u32 read_loc = rbi->ring_buffer->read_index;
112 u32 pending_sz;
113
114 /*
115 * Issue a full memory barrier before making the signaling decision.
116 * Here is the reason for having this barrier:
117 * If the reading of the pend_sz (in this function)
118 * were to be reordered and read before we commit the new read
119 * index (in the calling function) we could
120 * have a problem. If the host were to set the pending_sz after we
121 * have sampled pending_sz and go to sleep before we commit the
122 * read index, we could miss sending the interrupt. Issue a full
123 * memory barrier to address this.
124 */
125 mb();
126
127 pending_sz = rbi->ring_buffer->pending_send_sz;
128 write_loc = rbi->ring_buffer->write_index;
129 /* If the other end is not blocked on write don't bother. */
130 if (pending_sz == 0)
131 return false;
132
133 r_size = rbi->ring_datasize;
134 cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) :
135 read_loc - write_loc;
136
137 if (cur_write_sz >= pending_sz)
138 return true;
139
140 return false;
141 }
142
143 /* Get the next write location for the specified ring buffer. */
144 static inline u32
hv_get_next_write_location(struct hv_ring_buffer_info * ring_info)145 hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
146 {
147 u32 next = ring_info->ring_buffer->write_index;
148
149 return next;
150 }
151
152 /* Set the next write location for the specified ring buffer. */
153 static inline void
hv_set_next_write_location(struct hv_ring_buffer_info * ring_info,u32 next_write_location)154 hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
155 u32 next_write_location)
156 {
157 ring_info->ring_buffer->write_index = next_write_location;
158 }
159
160 /* Get the next read location for the specified ring buffer. */
161 static inline u32
hv_get_next_read_location(struct hv_ring_buffer_info * ring_info)162 hv_get_next_read_location(struct hv_ring_buffer_info *ring_info)
163 {
164 u32 next = ring_info->ring_buffer->read_index;
165
166 return next;
167 }
168
169 /*
170 * Get the next read location + offset for the specified ring buffer.
171 * This allows the caller to skip.
172 */
173 static inline u32
hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info * ring_info,u32 offset)174 hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info *ring_info,
175 u32 offset)
176 {
177 u32 next = ring_info->ring_buffer->read_index;
178
179 next += offset;
180 next %= ring_info->ring_datasize;
181
182 return next;
183 }
184
185 /* Set the next read location for the specified ring buffer. */
186 static inline void
hv_set_next_read_location(struct hv_ring_buffer_info * ring_info,u32 next_read_location)187 hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
188 u32 next_read_location)
189 {
190 ring_info->ring_buffer->read_index = next_read_location;
191 }
192
193
194 /* Get the start of the ring buffer. */
195 static inline void *
hv_get_ring_buffer(struct hv_ring_buffer_info * ring_info)196 hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
197 {
198 return (void *)ring_info->ring_buffer->buffer;
199 }
200
201
202 /* Get the size of the ring buffer. */
203 static inline u32
hv_get_ring_buffersize(struct hv_ring_buffer_info * ring_info)204 hv_get_ring_buffersize(struct hv_ring_buffer_info *ring_info)
205 {
206 return ring_info->ring_datasize;
207 }
208
209 /* Get the read and write indices as u64 of the specified ring buffer. */
210 static inline u64
hv_get_ring_bufferindices(struct hv_ring_buffer_info * ring_info)211 hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
212 {
213 return (u64)ring_info->ring_buffer->write_index << 32;
214 }
215
216 /*
217 * Helper routine to copy to source from ring buffer.
218 * Assume there is enough room. Handles wrap-around in src case only!!
219 */
hv_copyfrom_ringbuffer(struct hv_ring_buffer_info * ring_info,void * dest,u32 destlen,u32 start_read_offset)220 static u32 hv_copyfrom_ringbuffer(
221 struct hv_ring_buffer_info *ring_info,
222 void *dest,
223 u32 destlen,
224 u32 start_read_offset)
225 {
226 void *ring_buffer = hv_get_ring_buffer(ring_info);
227 u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
228
229 u32 frag_len;
230
231 /* wrap-around detected at the src */
232 if (destlen > ring_buffer_size - start_read_offset) {
233 frag_len = ring_buffer_size - start_read_offset;
234
235 memcpy(dest, ring_buffer + start_read_offset, frag_len);
236 memcpy(dest + frag_len, ring_buffer, destlen - frag_len);
237 } else
238
239 memcpy(dest, ring_buffer + start_read_offset, destlen);
240
241
242 start_read_offset += destlen;
243 start_read_offset %= ring_buffer_size;
244
245 return start_read_offset;
246 }
247
248
249 /*
250 * Helper routine to copy from source to ring buffer.
251 * Assume there is enough room. Handles wrap-around in dest case only!!
252 */
hv_copyto_ringbuffer(struct hv_ring_buffer_info * ring_info,u32 start_write_offset,void * src,u32 srclen)253 static u32 hv_copyto_ringbuffer(
254 struct hv_ring_buffer_info *ring_info,
255 u32 start_write_offset,
256 void *src,
257 u32 srclen)
258 {
259 void *ring_buffer = hv_get_ring_buffer(ring_info);
260 u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
261 u32 frag_len;
262
263 /* wrap-around detected! */
264 if (srclen > ring_buffer_size - start_write_offset) {
265 frag_len = ring_buffer_size - start_write_offset;
266 memcpy(ring_buffer + start_write_offset, src, frag_len);
267 memcpy(ring_buffer, src + frag_len, srclen - frag_len);
268 } else
269 memcpy(ring_buffer + start_write_offset, src, srclen);
270
271 start_write_offset += srclen;
272 start_write_offset %= ring_buffer_size;
273
274 return start_write_offset;
275 }
276
277 /* Get various debug metrics for the specified ring buffer. */
hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info * ring_info,struct hv_ring_buffer_debug_info * debug_info)278 void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
279 struct hv_ring_buffer_debug_info *debug_info)
280 {
281 u32 bytes_avail_towrite;
282 u32 bytes_avail_toread;
283
284 if (ring_info->ring_buffer) {
285 hv_get_ringbuffer_availbytes(ring_info,
286 &bytes_avail_toread,
287 &bytes_avail_towrite);
288
289 debug_info->bytes_avail_toread = bytes_avail_toread;
290 debug_info->bytes_avail_towrite = bytes_avail_towrite;
291 debug_info->current_read_index =
292 ring_info->ring_buffer->read_index;
293 debug_info->current_write_index =
294 ring_info->ring_buffer->write_index;
295 debug_info->current_interrupt_mask =
296 ring_info->ring_buffer->interrupt_mask;
297 }
298 }
299
300 /* Initialize the ring buffer. */
hv_ringbuffer_init(struct hv_ring_buffer_info * ring_info,void * buffer,u32 buflen)301 int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
302 void *buffer, u32 buflen)
303 {
304 if (sizeof(struct hv_ring_buffer) != PAGE_SIZE)
305 return -EINVAL;
306
307 memset(ring_info, 0, sizeof(struct hv_ring_buffer_info));
308
309 ring_info->ring_buffer = (struct hv_ring_buffer *)buffer;
310 ring_info->ring_buffer->read_index =
311 ring_info->ring_buffer->write_index = 0;
312
313 /* Set the feature bit for enabling flow control. */
314 ring_info->ring_buffer->feature_bits.value = 1;
315
316 ring_info->ring_size = buflen;
317 ring_info->ring_datasize = buflen - sizeof(struct hv_ring_buffer);
318
319 spin_lock_init(&ring_info->ring_lock);
320
321 return 0;
322 }
323
324 /* Cleanup the ring buffer. */
hv_ringbuffer_cleanup(struct hv_ring_buffer_info * ring_info)325 void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
326 {
327 }
328
329 /* Write to the ring buffer. */
hv_ringbuffer_write(struct hv_ring_buffer_info * outring_info,struct kvec * kv_list,u32 kv_count,bool * signal)330 int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
331 struct kvec *kv_list, u32 kv_count, bool *signal)
332 {
333 int i = 0;
334 u32 bytes_avail_towrite;
335 u32 bytes_avail_toread;
336 u32 totalbytes_towrite = 0;
337
338 u32 next_write_location;
339 u32 old_write;
340 u64 prev_indices = 0;
341 unsigned long flags;
342
343 for (i = 0; i < kv_count; i++)
344 totalbytes_towrite += kv_list[i].iov_len;
345
346 totalbytes_towrite += sizeof(u64);
347
348 spin_lock_irqsave(&outring_info->ring_lock, flags);
349
350 hv_get_ringbuffer_availbytes(outring_info,
351 &bytes_avail_toread,
352 &bytes_avail_towrite);
353
354 /*
355 * If there is only room for the packet, assume it is full.
356 * Otherwise, the next time around, we think the ring buffer
357 * is empty since the read index == write index.
358 */
359 if (bytes_avail_towrite <= totalbytes_towrite) {
360 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
361 return -EAGAIN;
362 }
363
364 /* Write to the ring buffer */
365 next_write_location = hv_get_next_write_location(outring_info);
366
367 old_write = next_write_location;
368
369 for (i = 0; i < kv_count; i++) {
370 next_write_location = hv_copyto_ringbuffer(outring_info,
371 next_write_location,
372 kv_list[i].iov_base,
373 kv_list[i].iov_len);
374 }
375
376 /* Set previous packet start */
377 prev_indices = hv_get_ring_bufferindices(outring_info);
378
379 next_write_location = hv_copyto_ringbuffer(outring_info,
380 next_write_location,
381 &prev_indices,
382 sizeof(u64));
383
384 /* Issue a full memory barrier before updating the write index */
385 mb();
386
387 /* Now, update the write location */
388 hv_set_next_write_location(outring_info, next_write_location);
389
390
391 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
392
393 *signal = hv_need_to_signal(old_write, outring_info);
394 return 0;
395 }
396
397
398 /* Read without advancing the read index. */
hv_ringbuffer_peek(struct hv_ring_buffer_info * Inring_info,void * Buffer,u32 buflen)399 int hv_ringbuffer_peek(struct hv_ring_buffer_info *Inring_info,
400 void *Buffer, u32 buflen)
401 {
402 u32 bytes_avail_towrite;
403 u32 bytes_avail_toread;
404 u32 next_read_location = 0;
405 unsigned long flags;
406
407 spin_lock_irqsave(&Inring_info->ring_lock, flags);
408
409 hv_get_ringbuffer_availbytes(Inring_info,
410 &bytes_avail_toread,
411 &bytes_avail_towrite);
412
413 /* Make sure there is something to read */
414 if (bytes_avail_toread < buflen) {
415
416 spin_unlock_irqrestore(&Inring_info->ring_lock, flags);
417
418 return -EAGAIN;
419 }
420
421 /* Convert to byte offset */
422 next_read_location = hv_get_next_read_location(Inring_info);
423
424 next_read_location = hv_copyfrom_ringbuffer(Inring_info,
425 Buffer,
426 buflen,
427 next_read_location);
428
429 spin_unlock_irqrestore(&Inring_info->ring_lock, flags);
430
431 return 0;
432 }
433
434
435 /* Read and advance the read index. */
hv_ringbuffer_read(struct hv_ring_buffer_info * inring_info,void * buffer,u32 buflen,u32 offset,bool * signal)436 int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
437 u32 buflen, u32 offset, bool *signal)
438 {
439 u32 bytes_avail_towrite;
440 u32 bytes_avail_toread;
441 u32 next_read_location = 0;
442 u64 prev_indices = 0;
443 unsigned long flags;
444
445 if (buflen <= 0)
446 return -EINVAL;
447
448 spin_lock_irqsave(&inring_info->ring_lock, flags);
449
450 hv_get_ringbuffer_availbytes(inring_info,
451 &bytes_avail_toread,
452 &bytes_avail_towrite);
453
454 /* Make sure there is something to read */
455 if (bytes_avail_toread < buflen) {
456 spin_unlock_irqrestore(&inring_info->ring_lock, flags);
457
458 return -EAGAIN;
459 }
460
461 next_read_location =
462 hv_get_next_readlocation_withoffset(inring_info, offset);
463
464 next_read_location = hv_copyfrom_ringbuffer(inring_info,
465 buffer,
466 buflen,
467 next_read_location);
468
469 next_read_location = hv_copyfrom_ringbuffer(inring_info,
470 &prev_indices,
471 sizeof(u64),
472 next_read_location);
473
474 /*
475 * Make sure all reads are done before we update the read index since
476 * the writer may start writing to the read area once the read index
477 * is updated.
478 */
479 mb();
480
481 /* Update the read index */
482 hv_set_next_read_location(inring_info, next_read_location);
483
484 spin_unlock_irqrestore(&inring_info->ring_lock, flags);
485
486 *signal = hv_need_to_signal_on_read(inring_info);
487
488 return 0;
489 }
490