Lines Matching +full:two +full:- +full:channel
1 // SPDX-License-Identifier: GPL-2.0-only
26 * hv_gpadl_size - Return the real size of a gpadl, the size that Hyper-V uses
28 * For BUFFER gpadl, Hyper-V uses the exact same size as the guest does.
33 * (PAGE_SIZE - HV_HYP_PAGE_SIZE) gap. And since there are two rings in a
34 * ringbuffer, the total size for a RING gpadl that Hyper-V uses is the
43 /* The size of a ringbuffer must be page-aligned */ in hv_gpadl_size()
46 * Two things to notice here: in hv_gpadl_size()
47 * 1) We're processing two ring buffers as a unit in hv_gpadl_size()
49 * the first guest-size page of each of the two ring buffers. in hv_gpadl_size()
50 * So we effectively subtract out two guest-size pages, and add in hv_gpadl_size()
51 * back two Hyper-V size pages. in hv_gpadl_size()
53 return size - 2 * (PAGE_SIZE - HV_HYP_PAGE_SIZE); in hv_gpadl_size()
60 * hv_ring_gpadl_send_hvpgoffset - Calculate the send offset (in unit of
74 * therefore leaving a (PAGE_SIZE - HV_HYP_PAGE_SIZE) gap. in hv_ring_gpadl_send_hvpgoffset()
79 return (offset - (PAGE_SIZE - HV_HYP_PAGE_SIZE)) >> HV_HYP_PAGE_SHIFT; in hv_ring_gpadl_send_hvpgoffset()
83 * hv_gpadl_hvpfn - Return the Hyper-V page PFN of the @i th Hyper-V page in
106 delta = PAGE_SIZE - HV_HYP_PAGE_SIZE; in hv_gpadl_hvpfn()
108 delta = 2 * (PAGE_SIZE - HV_HYP_PAGE_SIZE); in hv_gpadl_hvpfn()
119 * vmbus_setevent- Trigger an event notification on the specified
120 * channel.
122 void vmbus_setevent(struct vmbus_channel *channel) in vmbus_setevent() argument
126 trace_vmbus_setevent(channel); in vmbus_setevent()
132 if (channel->offermsg.monitor_allocated && !channel->low_latency) { in vmbus_setevent()
133 vmbus_send_interrupt(channel->offermsg.child_relid); in vmbus_setevent()
138 sync_set_bit(channel->monitor_bit, in vmbus_setevent()
139 (unsigned long *)&monitorpage->trigger_group in vmbus_setevent()
140 [channel->monitor_grp].pending); in vmbus_setevent()
143 vmbus_set_event(channel); in vmbus_setevent()
148 /* vmbus_free_ring - drop mapping of ring buffer */
149 void vmbus_free_ring(struct vmbus_channel *channel) in vmbus_free_ring() argument
151 hv_ringbuffer_cleanup(&channel->outbound); in vmbus_free_ring()
152 hv_ringbuffer_cleanup(&channel->inbound); in vmbus_free_ring()
154 if (channel->ringbuffer_page) { in vmbus_free_ring()
155 __free_pages(channel->ringbuffer_page, in vmbus_free_ring()
156 get_order(channel->ringbuffer_pagecount in vmbus_free_ring()
158 channel->ringbuffer_page = NULL; in vmbus_free_ring()
163 /* vmbus_alloc_ring - allocate and map pages for ring buffer */
171 return -EINVAL; in vmbus_alloc_ring()
175 page = alloc_pages_node(cpu_to_node(newchannel->target_cpu), in vmbus_alloc_ring()
182 return -ENOMEM; in vmbus_alloc_ring()
184 newchannel->ringbuffer_page = page; in vmbus_alloc_ring()
185 newchannel->ringbuffer_pagecount = (send_size + recv_size) >> PAGE_SHIFT; in vmbus_alloc_ring()
186 newchannel->ringbuffer_send_offset = send_size >> PAGE_SHIFT; in vmbus_alloc_ring()
192 /* Used for Hyper-V Socket: a guest client's connect() to the host */
213 * Set/change the vCPU (@target_vp) the channel (@child_relid) will interrupt.
215 * CHANNELMSG_MODIFYCHANNEL messages are aynchronous. Also, Hyper-V does not
217 * the "old" vCPU and start interrupting the "new" vCPU for the given channel.
241 * create_gpadl_header - Creates a gpadl for the specified buffer
260 pfnsize = MAX_SIZE_CHANNEL_MESSAGE - in create_gpadl_header()
261 sizeof(struct vmbus_channel_gpadl_header) - in create_gpadl_header()
275 INIT_LIST_HEAD(&msgheader->submsglist); in create_gpadl_header()
276 msgheader->msgsize = msgsize; in create_gpadl_header()
279 msgheader->msg; in create_gpadl_header()
280 gpadl_header->rangecount = 1; in create_gpadl_header()
281 gpadl_header->range_buflen = sizeof(struct gpa_range) + in create_gpadl_header()
283 gpadl_header->range[0].byte_offset = 0; in create_gpadl_header()
284 gpadl_header->range[0].byte_count = hv_gpadl_size(type, size); in create_gpadl_header()
286 gpadl_header->range[0].pfn_array[i] = hv_gpadl_hvpfn( in create_gpadl_header()
291 pfnleft = pagecount - pfncount; in create_gpadl_header()
294 pfnsize = MAX_SIZE_CHANNEL_MESSAGE - in create_gpadl_header()
317 &msgheader->submsglist, in create_gpadl_header()
320 list_del(&pos->msglistentry); in create_gpadl_header()
327 msgbody->msgsize = msgsize; in create_gpadl_header()
329 (struct vmbus_channel_gpadl_body *)msgbody->msg; in create_gpadl_header()
333 * be 64-bit in create_gpadl_header()
338 gpadl_body->pfn[i] = hv_gpadl_hvpfn(type, in create_gpadl_header()
342 list_add_tail(&msgbody->msglistentry, in create_gpadl_header()
343 &msgheader->submsglist); in create_gpadl_header()
345 pfnleft -= pfncurr; in create_gpadl_header()
356 INIT_LIST_HEAD(&msgheader->submsglist); in create_gpadl_header()
357 msgheader->msgsize = msgsize; in create_gpadl_header()
360 msgheader->msg; in create_gpadl_header()
361 gpadl_header->rangecount = 1; in create_gpadl_header()
362 gpadl_header->range_buflen = sizeof(struct gpa_range) + in create_gpadl_header()
364 gpadl_header->range[0].byte_offset = 0; in create_gpadl_header()
365 gpadl_header->range[0].byte_count = hv_gpadl_size(type, size); in create_gpadl_header()
367 gpadl_header->range[0].pfn_array[i] = hv_gpadl_hvpfn( in create_gpadl_header()
377 return -ENOMEM; in create_gpadl_header()
381 * __vmbus_establish_gpadl - Establish a GPADL for a buffer or ringbuffer
383 * @channel: a channel
386 * @size: page-size multiple
391 static int __vmbus_establish_gpadl(struct vmbus_channel *channel, in __vmbus_establish_gpadl() argument
406 (atomic_inc_return(&vmbus_connection.next_gpadl_handle) - 1); in __vmbus_establish_gpadl()
412 init_completion(&msginfo->waitevent); in __vmbus_establish_gpadl()
413 msginfo->waiting_channel = channel; in __vmbus_establish_gpadl()
415 gpadlmsg = (struct vmbus_channel_gpadl_header *)msginfo->msg; in __vmbus_establish_gpadl()
416 gpadlmsg->header.msgtype = CHANNELMSG_GPADL_HEADER; in __vmbus_establish_gpadl()
417 gpadlmsg->child_relid = channel->offermsg.child_relid; in __vmbus_establish_gpadl()
418 gpadlmsg->gpadl = next_gpadl_handle; in __vmbus_establish_gpadl()
422 list_add_tail(&msginfo->msglistentry, in __vmbus_establish_gpadl()
427 if (channel->rescind) { in __vmbus_establish_gpadl()
428 ret = -ENODEV; in __vmbus_establish_gpadl()
432 ret = vmbus_post_msg(gpadlmsg, msginfo->msgsize - in __vmbus_establish_gpadl()
440 list_for_each(curr, &msginfo->submsglist) { in __vmbus_establish_gpadl()
443 (struct vmbus_channel_gpadl_body *)submsginfo->msg; in __vmbus_establish_gpadl()
445 gpadl_body->header.msgtype = in __vmbus_establish_gpadl()
447 gpadl_body->gpadl = next_gpadl_handle; in __vmbus_establish_gpadl()
450 submsginfo->msgsize - sizeof(*submsginfo), in __vmbus_establish_gpadl()
459 wait_for_completion(&msginfo->waitevent); in __vmbus_establish_gpadl()
461 if (msginfo->response.gpadl_created.creation_status != 0) { in __vmbus_establish_gpadl()
463 msginfo->response.gpadl_created.creation_status); in __vmbus_establish_gpadl()
465 ret = -EDQUOT; in __vmbus_establish_gpadl()
469 if (channel->rescind) { in __vmbus_establish_gpadl()
470 ret = -ENODEV; in __vmbus_establish_gpadl()
475 *gpadl_handle = gpadlmsg->gpadl; in __vmbus_establish_gpadl()
479 list_del(&msginfo->msglistentry); in __vmbus_establish_gpadl()
481 list_for_each_entry_safe(submsginfo, tmp, &msginfo->submsglist, in __vmbus_establish_gpadl()
491 * vmbus_establish_gpadl - Establish a GPADL for the specified buffer
493 * @channel: a channel
495 * @size: page-size multiple
498 int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer, in vmbus_establish_gpadl() argument
501 return __vmbus_establish_gpadl(channel, HV_GPADL_BUFFER, kbuffer, size, in vmbus_establish_gpadl()
512 struct page *page = newchannel->ringbuffer_page; in __vmbus_open()
518 return -EINVAL; in __vmbus_open()
520 send_pages = newchannel->ringbuffer_send_offset; in __vmbus_open()
521 recv_pages = newchannel->ringbuffer_pagecount - send_pages; in __vmbus_open()
523 if (newchannel->state != CHANNEL_OPEN_STATE) in __vmbus_open()
524 return -EINVAL; in __vmbus_open()
526 newchannel->state = CHANNEL_OPENING_STATE; in __vmbus_open()
527 newchannel->onchannel_callback = onchannelcallback; in __vmbus_open()
528 newchannel->channel_callback_context = context; in __vmbus_open()
530 err = hv_ringbuffer_init(&newchannel->outbound, page, send_pages); in __vmbus_open()
534 err = hv_ringbuffer_init(&newchannel->inbound, in __vmbus_open()
540 newchannel->ringbuffer_gpadlhandle = 0; in __vmbus_open()
543 page_address(newchannel->ringbuffer_page), in __vmbus_open()
545 newchannel->ringbuffer_send_offset << PAGE_SHIFT, in __vmbus_open()
546 &newchannel->ringbuffer_gpadlhandle); in __vmbus_open()
550 /* Create and init the channel open message */ in __vmbus_open()
555 err = -ENOMEM; in __vmbus_open()
559 init_completion(&open_info->waitevent); in __vmbus_open()
560 open_info->waiting_channel = newchannel; in __vmbus_open()
562 open_msg = (struct vmbus_channel_open_channel *)open_info->msg; in __vmbus_open()
563 open_msg->header.msgtype = CHANNELMSG_OPENCHANNEL; in __vmbus_open()
564 open_msg->openid = newchannel->offermsg.child_relid; in __vmbus_open()
565 open_msg->child_relid = newchannel->offermsg.child_relid; in __vmbus_open()
566 open_msg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle; in __vmbus_open()
568 * The unit of ->downstream_ringbuffer_pageoffset is HV_HYP_PAGE and in __vmbus_open()
569 * the unit of ->ringbuffer_send_offset (i.e. send_pages) is PAGE, so in __vmbus_open()
572 open_msg->downstream_ringbuffer_pageoffset = in __vmbus_open()
574 open_msg->target_vp = hv_cpu_number_to_vp_number(newchannel->target_cpu); in __vmbus_open()
577 memcpy(open_msg->userdata, userdata, userdatalen); in __vmbus_open()
580 list_add_tail(&open_info->msglistentry, in __vmbus_open()
584 if (newchannel->rescind) { in __vmbus_open()
585 err = -ENODEV; in __vmbus_open()
597 wait_for_completion(&open_info->waitevent); in __vmbus_open()
600 list_del(&open_info->msglistentry); in __vmbus_open()
603 if (newchannel->rescind) { in __vmbus_open()
604 err = -ENODEV; in __vmbus_open()
608 if (open_info->response.open_result.status) { in __vmbus_open()
609 err = -EAGAIN; in __vmbus_open()
613 newchannel->state = CHANNEL_OPENED_STATE; in __vmbus_open()
619 list_del(&open_info->msglistentry); in __vmbus_open()
624 vmbus_teardown_gpadl(newchannel, newchannel->ringbuffer_gpadlhandle); in __vmbus_open()
625 newchannel->ringbuffer_gpadlhandle = 0; in __vmbus_open()
627 hv_ringbuffer_cleanup(&newchannel->outbound); in __vmbus_open()
628 hv_ringbuffer_cleanup(&newchannel->inbound); in __vmbus_open()
629 newchannel->state = CHANNEL_OPEN_STATE; in __vmbus_open()
634 * vmbus_connect_ring - Open the channel but reuse ring buffer
644 * vmbus_open - Open the specified channel.
668 * vmbus_teardown_gpadl -Teardown the specified GPADL handle
670 int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle) in vmbus_teardown_gpadl() argument
680 return -ENOMEM; in vmbus_teardown_gpadl()
682 init_completion(&info->waitevent); in vmbus_teardown_gpadl()
683 info->waiting_channel = channel; in vmbus_teardown_gpadl()
685 msg = (struct vmbus_channel_gpadl_teardown *)info->msg; in vmbus_teardown_gpadl()
687 msg->header.msgtype = CHANNELMSG_GPADL_TEARDOWN; in vmbus_teardown_gpadl()
688 msg->child_relid = channel->offermsg.child_relid; in vmbus_teardown_gpadl()
689 msg->gpadl = gpadl_handle; in vmbus_teardown_gpadl()
692 list_add_tail(&info->msglistentry, in vmbus_teardown_gpadl()
696 if (channel->rescind) in vmbus_teardown_gpadl()
707 wait_for_completion(&info->waitevent); in vmbus_teardown_gpadl()
711 * If the channel has been rescinded; in vmbus_teardown_gpadl()
715 if (channel->rescind) in vmbus_teardown_gpadl()
719 list_del(&info->msglistentry); in vmbus_teardown_gpadl()
727 void vmbus_reset_channel_cb(struct vmbus_channel *channel) in vmbus_reset_channel_cb() argument
732 * vmbus_on_event(), running in the per-channel tasklet, can race in vmbus_reset_channel_cb()
734 * the former is accessing channel->inbound.ring_buffer, the latter in vmbus_reset_channel_cb()
741 * and that the channel ring buffer is no longer being accessed, cf. in vmbus_reset_channel_cb()
744 tasklet_disable(&channel->callback_event); in vmbus_reset_channel_cb()
747 spin_lock_irqsave(&channel->sched_lock, flags); in vmbus_reset_channel_cb()
748 channel->onchannel_callback = NULL; in vmbus_reset_channel_cb()
749 spin_unlock_irqrestore(&channel->sched_lock, flags); in vmbus_reset_channel_cb()
751 channel->sc_creation_callback = NULL; in vmbus_reset_channel_cb()
753 /* Re-enable tasklet for use on re-open */ in vmbus_reset_channel_cb()
754 tasklet_enable(&channel->callback_event); in vmbus_reset_channel_cb()
757 static int vmbus_close_internal(struct vmbus_channel *channel) in vmbus_close_internal() argument
762 vmbus_reset_channel_cb(channel); in vmbus_close_internal()
766 * util_probe() -> vmbus_open() returns -ENOMEM) and the device is in vmbus_close_internal()
768 * in Hyper-V Manager), the driver's remove() invokes vmbus_close(): in vmbus_close_internal()
771 if (channel->state != CHANNEL_OPENED_STATE) in vmbus_close_internal()
772 return -EINVAL; in vmbus_close_internal()
774 channel->state = CHANNEL_OPEN_STATE; in vmbus_close_internal()
778 msg = &channel->close_msg.msg; in vmbus_close_internal()
780 msg->header.msgtype = CHANNELMSG_CLOSECHANNEL; in vmbus_close_internal()
781 msg->child_relid = channel->offermsg.child_relid; in vmbus_close_internal()
796 /* Tear down the gpadl for the channel's ring buffer */ in vmbus_close_internal()
797 else if (channel->ringbuffer_gpadlhandle) { in vmbus_close_internal()
798 ret = vmbus_teardown_gpadl(channel, in vmbus_close_internal()
799 channel->ringbuffer_gpadlhandle); in vmbus_close_internal()
808 channel->ringbuffer_gpadlhandle = 0; in vmbus_close_internal()
814 /* disconnect ring - close all channels */
815 int vmbus_disconnect_ring(struct vmbus_channel *channel) in vmbus_disconnect_ring() argument
820 if (channel->primary_channel != NULL) in vmbus_disconnect_ring()
821 return -EINVAL; in vmbus_disconnect_ring()
823 list_for_each_entry_safe(cur_channel, tmp, &channel->sc_list, sc_list) { in vmbus_disconnect_ring()
824 if (cur_channel->rescind) in vmbus_disconnect_ring()
825 wait_for_completion(&cur_channel->rescind_event); in vmbus_disconnect_ring()
831 if (cur_channel->rescind) in vmbus_disconnect_ring()
841 ret = vmbus_close_internal(channel); in vmbus_disconnect_ring()
849 * vmbus_close - Close the specified channel
851 void vmbus_close(struct vmbus_channel *channel) in vmbus_close() argument
853 if (vmbus_disconnect_ring(channel) == 0) in vmbus_close()
854 vmbus_free_ring(channel); in vmbus_close()
859 * vmbus_sendpacket() - Send the specified buffer on the given channel
860 * @channel: Pointer to vmbus_channel structure
868 * Sends data in @buffer directly to Hyper-V via the vmbus.
869 * This will send the data unparsed to Hyper-V.
871 * Mainly used by Hyper-V drivers.
873 int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer, in vmbus_sendpacket() argument
888 /* in 8-bytes granularity */ in vmbus_sendpacket()
898 bufferlist[2].iov_len = (packetlen_aligned - packetlen); in vmbus_sendpacket()
900 return hv_ringbuffer_write(channel, bufferlist, num_vecs); in vmbus_sendpacket()
905 * vmbus_sendpacket_pagebuffer - Send a range of single-page buffer
911 int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel, in vmbus_sendpacket_pagebuffer() argument
925 return -EINVAL; in vmbus_sendpacket_pagebuffer()
931 descsize = sizeof(struct vmbus_channel_packet_page_buffer) - in vmbus_sendpacket_pagebuffer()
932 ((MAX_PAGE_BUFFER_COUNT - pagecount) * in vmbus_sendpacket_pagebuffer()
940 desc.dataoffset8 = descsize >> 3; /* in 8-bytes granularity */ in vmbus_sendpacket_pagebuffer()
957 bufferlist[2].iov_len = (packetlen_aligned - packetlen); in vmbus_sendpacket_pagebuffer()
959 return hv_ringbuffer_write(channel, bufferlist, 3); in vmbus_sendpacket_pagebuffer()
964 * vmbus_sendpacket_multipagebuffer - Send a multi-page buffer packet
968 int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel, in vmbus_sendpacket_mpb_desc() argument
982 desc->type = VM_PKT_DATA_USING_GPA_DIRECT; in vmbus_sendpacket_mpb_desc()
983 desc->flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED; in vmbus_sendpacket_mpb_desc()
984 desc->dataoffset8 = desc_size >> 3; /* in 8-bytes granularity */ in vmbus_sendpacket_mpb_desc()
985 desc->length8 = (u16)(packetlen_aligned >> 3); in vmbus_sendpacket_mpb_desc()
986 desc->transactionid = requestid; in vmbus_sendpacket_mpb_desc()
987 desc->reserved = 0; in vmbus_sendpacket_mpb_desc()
988 desc->rangecount = 1; in vmbus_sendpacket_mpb_desc()
995 bufferlist[2].iov_len = (packetlen_aligned - packetlen); in vmbus_sendpacket_mpb_desc()
997 return hv_ringbuffer_write(channel, bufferlist, 3); in vmbus_sendpacket_mpb_desc()
1002 * __vmbus_recvpacket() - Retrieve the user packet on the specified channel
1003 * @channel: Pointer to vmbus_channel structure
1010 * Receives directly from the hyper-v vmbus and puts the data it received
1011 * into Buffer. This will receive the data unparsed from hyper-v.
1013 * Mainly used by Hyper-V drivers.
1016 __vmbus_recvpacket(struct vmbus_channel *channel, void *buffer, in __vmbus_recvpacket() argument
1020 return hv_ringbuffer_read(channel, buffer, bufferlen, in __vmbus_recvpacket()
1025 int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer, in vmbus_recvpacket() argument
1029 return __vmbus_recvpacket(channel, buffer, bufferlen, in vmbus_recvpacket()
1035 * vmbus_recvpacket_raw - Retrieve the raw packet on the specified channel
1037 int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer, in vmbus_recvpacket_raw() argument
1041 return __vmbus_recvpacket(channel, buffer, bufferlen, in vmbus_recvpacket_raw()