1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2009, Microsoft Corporation.
4 *
5 * Authors:
6 * Haiyang Zhang <haiyangz@microsoft.com>
7 * Hank Janssen <hjanssen@microsoft.com>
8 */
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11 #include <linux/kernel.h>
12 #include <linux/sched.h>
13 #include <linux/wait.h>
14 #include <linux/mm.h>
15 #include <linux/delay.h>
16 #include <linux/io.h>
17 #include <linux/slab.h>
18 #include <linux/netdevice.h>
19 #include <linux/if_ether.h>
20 #include <linux/vmalloc.h>
21 #include <linux/rtnetlink.h>
22 #include <linux/prefetch.h>
23
24 #include <asm/sync_bitops.h>
25
26 #include "hyperv_net.h"
27 #include "netvsc_trace.h"
28
29 /*
30 * Switch the data path from the synthetic interface to the VF
31 * interface.
32 */
netvsc_switch_datapath(struct net_device * ndev,bool vf)33 void netvsc_switch_datapath(struct net_device *ndev, bool vf)
34 {
35 struct net_device_context *net_device_ctx = netdev_priv(ndev);
36 struct hv_device *dev = net_device_ctx->device_ctx;
37 struct netvsc_device *nv_dev = rtnl_dereference(net_device_ctx->nvdev);
38 struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt;
39
40 memset(init_pkt, 0, sizeof(struct nvsp_message));
41 init_pkt->hdr.msg_type = NVSP_MSG4_TYPE_SWITCH_DATA_PATH;
42 if (vf)
43 init_pkt->msg.v4_msg.active_dp.active_datapath =
44 NVSP_DATAPATH_VF;
45 else
46 init_pkt->msg.v4_msg.active_dp.active_datapath =
47 NVSP_DATAPATH_SYNTHETIC;
48
49 trace_nvsp_send(ndev, init_pkt);
50
51 vmbus_sendpacket(dev->channel, init_pkt,
52 sizeof(struct nvsp_message),
53 (unsigned long)init_pkt,
54 VM_PKT_DATA_INBAND, 0);
55 }
56
57 /* Worker to setup sub channels on initial setup
58 * Initial hotplug event occurs in softirq context
59 * and can't wait for channels.
60 */
netvsc_subchan_work(struct work_struct * w)61 static void netvsc_subchan_work(struct work_struct *w)
62 {
63 struct netvsc_device *nvdev =
64 container_of(w, struct netvsc_device, subchan_work);
65 struct rndis_device *rdev;
66 int i, ret;
67
68 /* Avoid deadlock with device removal already under RTNL */
69 if (!rtnl_trylock()) {
70 schedule_work(w);
71 return;
72 }
73
74 rdev = nvdev->extension;
75 if (rdev) {
76 ret = rndis_set_subchannel(rdev->ndev, nvdev, NULL);
77 if (ret == 0) {
78 netif_device_attach(rdev->ndev);
79 } else {
80 /* fallback to only primary channel */
81 for (i = 1; i < nvdev->num_chn; i++)
82 netif_napi_del(&nvdev->chan_table[i].napi);
83
84 nvdev->max_chn = 1;
85 nvdev->num_chn = 1;
86 }
87 }
88
89 rtnl_unlock();
90 }
91
alloc_net_device(void)92 static struct netvsc_device *alloc_net_device(void)
93 {
94 struct netvsc_device *net_device;
95
96 net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
97 if (!net_device)
98 return NULL;
99
100 init_waitqueue_head(&net_device->wait_drain);
101 net_device->destroy = false;
102 net_device->tx_disable = true;
103
104 net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
105 net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
106
107 init_completion(&net_device->channel_init_wait);
108 init_waitqueue_head(&net_device->subchan_open);
109 INIT_WORK(&net_device->subchan_work, netvsc_subchan_work);
110
111 return net_device;
112 }
113
free_netvsc_device(struct rcu_head * head)114 static void free_netvsc_device(struct rcu_head *head)
115 {
116 struct netvsc_device *nvdev
117 = container_of(head, struct netvsc_device, rcu);
118 int i;
119
120 kfree(nvdev->extension);
121 vfree(nvdev->recv_buf);
122 vfree(nvdev->send_buf);
123 kfree(nvdev->send_section_map);
124
125 for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
126 xdp_rxq_info_unreg(&nvdev->chan_table[i].xdp_rxq);
127 vfree(nvdev->chan_table[i].mrc.slots);
128 }
129
130 kfree(nvdev);
131 }
132
free_netvsc_device_rcu(struct netvsc_device * nvdev)133 static void free_netvsc_device_rcu(struct netvsc_device *nvdev)
134 {
135 call_rcu(&nvdev->rcu, free_netvsc_device);
136 }
137
netvsc_revoke_recv_buf(struct hv_device * device,struct netvsc_device * net_device,struct net_device * ndev)138 static void netvsc_revoke_recv_buf(struct hv_device *device,
139 struct netvsc_device *net_device,
140 struct net_device *ndev)
141 {
142 struct nvsp_message *revoke_packet;
143 int ret;
144
145 /*
146 * If we got a section count, it means we received a
147 * SendReceiveBufferComplete msg (ie sent
148 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
149 * to send a revoke msg here
150 */
151 if (net_device->recv_section_cnt) {
152 /* Send the revoke receive buffer */
153 revoke_packet = &net_device->revoke_packet;
154 memset(revoke_packet, 0, sizeof(struct nvsp_message));
155
156 revoke_packet->hdr.msg_type =
157 NVSP_MSG1_TYPE_REVOKE_RECV_BUF;
158 revoke_packet->msg.v1_msg.
159 revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
160
161 trace_nvsp_send(ndev, revoke_packet);
162
163 ret = vmbus_sendpacket(device->channel,
164 revoke_packet,
165 sizeof(struct nvsp_message),
166 (unsigned long)revoke_packet,
167 VM_PKT_DATA_INBAND, 0);
168 /* If the failure is because the channel is rescinded;
169 * ignore the failure since we cannot send on a rescinded
170 * channel. This would allow us to properly cleanup
171 * even when the channel is rescinded.
172 */
173 if (device->channel->rescind)
174 ret = 0;
175 /*
176 * If we failed here, we might as well return and
177 * have a leak rather than continue and a bugchk
178 */
179 if (ret != 0) {
180 netdev_err(ndev, "unable to send "
181 "revoke receive buffer to netvsp\n");
182 return;
183 }
184 net_device->recv_section_cnt = 0;
185 }
186 }
187
netvsc_revoke_send_buf(struct hv_device * device,struct netvsc_device * net_device,struct net_device * ndev)188 static void netvsc_revoke_send_buf(struct hv_device *device,
189 struct netvsc_device *net_device,
190 struct net_device *ndev)
191 {
192 struct nvsp_message *revoke_packet;
193 int ret;
194
195 /* Deal with the send buffer we may have setup.
196 * If we got a send section size, it means we received a
197 * NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE msg (ie sent
198 * NVSP_MSG1_TYPE_SEND_SEND_BUF msg) therefore, we need
199 * to send a revoke msg here
200 */
201 if (net_device->send_section_cnt) {
202 /* Send the revoke receive buffer */
203 revoke_packet = &net_device->revoke_packet;
204 memset(revoke_packet, 0, sizeof(struct nvsp_message));
205
206 revoke_packet->hdr.msg_type =
207 NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
208 revoke_packet->msg.v1_msg.revoke_send_buf.id =
209 NETVSC_SEND_BUFFER_ID;
210
211 trace_nvsp_send(ndev, revoke_packet);
212
213 ret = vmbus_sendpacket(device->channel,
214 revoke_packet,
215 sizeof(struct nvsp_message),
216 (unsigned long)revoke_packet,
217 VM_PKT_DATA_INBAND, 0);
218
219 /* If the failure is because the channel is rescinded;
220 * ignore the failure since we cannot send on a rescinded
221 * channel. This would allow us to properly cleanup
222 * even when the channel is rescinded.
223 */
224 if (device->channel->rescind)
225 ret = 0;
226
227 /* If we failed here, we might as well return and
228 * have a leak rather than continue and a bugchk
229 */
230 if (ret != 0) {
231 netdev_err(ndev, "unable to send "
232 "revoke send buffer to netvsp\n");
233 return;
234 }
235 net_device->send_section_cnt = 0;
236 }
237 }
238
netvsc_teardown_recv_gpadl(struct hv_device * device,struct netvsc_device * net_device,struct net_device * ndev)239 static void netvsc_teardown_recv_gpadl(struct hv_device *device,
240 struct netvsc_device *net_device,
241 struct net_device *ndev)
242 {
243 int ret;
244
245 if (net_device->recv_buf_gpadl_handle) {
246 ret = vmbus_teardown_gpadl(device->channel,
247 net_device->recv_buf_gpadl_handle);
248
249 /* If we failed here, we might as well return and have a leak
250 * rather than continue and a bugchk
251 */
252 if (ret != 0) {
253 netdev_err(ndev,
254 "unable to teardown receive buffer's gpadl\n");
255 return;
256 }
257 net_device->recv_buf_gpadl_handle = 0;
258 }
259 }
260
netvsc_teardown_send_gpadl(struct hv_device * device,struct netvsc_device * net_device,struct net_device * ndev)261 static void netvsc_teardown_send_gpadl(struct hv_device *device,
262 struct netvsc_device *net_device,
263 struct net_device *ndev)
264 {
265 int ret;
266
267 if (net_device->send_buf_gpadl_handle) {
268 ret = vmbus_teardown_gpadl(device->channel,
269 net_device->send_buf_gpadl_handle);
270
271 /* If we failed here, we might as well return and have a leak
272 * rather than continue and a bugchk
273 */
274 if (ret != 0) {
275 netdev_err(ndev,
276 "unable to teardown send buffer's gpadl\n");
277 return;
278 }
279 net_device->send_buf_gpadl_handle = 0;
280 }
281 }
282
netvsc_alloc_recv_comp_ring(struct netvsc_device * net_device,u32 q_idx)283 int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx)
284 {
285 struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
286 int node = cpu_to_node(nvchan->channel->target_cpu);
287 size_t size;
288
289 size = net_device->recv_completion_cnt * sizeof(struct recv_comp_data);
290 nvchan->mrc.slots = vzalloc_node(size, node);
291 if (!nvchan->mrc.slots)
292 nvchan->mrc.slots = vzalloc(size);
293
294 return nvchan->mrc.slots ? 0 : -ENOMEM;
295 }
296
netvsc_init_buf(struct hv_device * device,struct netvsc_device * net_device,const struct netvsc_device_info * device_info)297 static int netvsc_init_buf(struct hv_device *device,
298 struct netvsc_device *net_device,
299 const struct netvsc_device_info *device_info)
300 {
301 struct nvsp_1_message_send_receive_buffer_complete *resp;
302 struct net_device *ndev = hv_get_drvdata(device);
303 struct nvsp_message *init_packet;
304 unsigned int buf_size;
305 size_t map_words;
306 int ret = 0;
307
308 /* Get receive buffer area. */
309 buf_size = device_info->recv_sections * device_info->recv_section_size;
310 buf_size = roundup(buf_size, PAGE_SIZE);
311
312 /* Legacy hosts only allow smaller receive buffer */
313 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
314 buf_size = min_t(unsigned int, buf_size,
315 NETVSC_RECEIVE_BUFFER_SIZE_LEGACY);
316
317 net_device->recv_buf = vzalloc(buf_size);
318 if (!net_device->recv_buf) {
319 netdev_err(ndev,
320 "unable to allocate receive buffer of size %u\n",
321 buf_size);
322 ret = -ENOMEM;
323 goto cleanup;
324 }
325
326 net_device->recv_buf_size = buf_size;
327
328 /*
329 * Establish the gpadl handle for this buffer on this
330 * channel. Note: This call uses the vmbus connection rather
331 * than the channel to establish the gpadl handle.
332 */
333 ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf,
334 buf_size,
335 &net_device->recv_buf_gpadl_handle);
336 if (ret != 0) {
337 netdev_err(ndev,
338 "unable to establish receive buffer's gpadl\n");
339 goto cleanup;
340 }
341
342 /* Notify the NetVsp of the gpadl handle */
343 init_packet = &net_device->channel_init_pkt;
344 memset(init_packet, 0, sizeof(struct nvsp_message));
345 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
346 init_packet->msg.v1_msg.send_recv_buf.
347 gpadl_handle = net_device->recv_buf_gpadl_handle;
348 init_packet->msg.v1_msg.
349 send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
350
351 trace_nvsp_send(ndev, init_packet);
352
353 /* Send the gpadl notification request */
354 ret = vmbus_sendpacket(device->channel, init_packet,
355 sizeof(struct nvsp_message),
356 (unsigned long)init_packet,
357 VM_PKT_DATA_INBAND,
358 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
359 if (ret != 0) {
360 netdev_err(ndev,
361 "unable to send receive buffer's gpadl to netvsp\n");
362 goto cleanup;
363 }
364
365 wait_for_completion(&net_device->channel_init_wait);
366
367 /* Check the response */
368 resp = &init_packet->msg.v1_msg.send_recv_buf_complete;
369 if (resp->status != NVSP_STAT_SUCCESS) {
370 netdev_err(ndev,
371 "Unable to complete receive buffer initialization with NetVsp - status %d\n",
372 resp->status);
373 ret = -EINVAL;
374 goto cleanup;
375 }
376
377 /* Parse the response */
378 netdev_dbg(ndev, "Receive sections: %u sub_allocs: size %u count: %u\n",
379 resp->num_sections, resp->sections[0].sub_alloc_size,
380 resp->sections[0].num_sub_allocs);
381
382 /* There should only be one section for the entire receive buffer */
383 if (resp->num_sections != 1 || resp->sections[0].offset != 0) {
384 ret = -EINVAL;
385 goto cleanup;
386 }
387
388 net_device->recv_section_size = resp->sections[0].sub_alloc_size;
389 net_device->recv_section_cnt = resp->sections[0].num_sub_allocs;
390
391 /* Ensure buffer will not overflow */
392 if (net_device->recv_section_size < NETVSC_MTU_MIN || (u64)net_device->recv_section_size *
393 (u64)net_device->recv_section_cnt > (u64)buf_size) {
394 netdev_err(ndev, "invalid recv_section_size %u\n",
395 net_device->recv_section_size);
396 ret = -EINVAL;
397 goto cleanup;
398 }
399
400 /* Setup receive completion ring.
401 * Add 1 to the recv_section_cnt because at least one entry in a
402 * ring buffer has to be empty.
403 */
404 net_device->recv_completion_cnt = net_device->recv_section_cnt + 1;
405 ret = netvsc_alloc_recv_comp_ring(net_device, 0);
406 if (ret)
407 goto cleanup;
408
409 /* Now setup the send buffer. */
410 buf_size = device_info->send_sections * device_info->send_section_size;
411 buf_size = round_up(buf_size, PAGE_SIZE);
412
413 net_device->send_buf = vzalloc(buf_size);
414 if (!net_device->send_buf) {
415 netdev_err(ndev, "unable to allocate send buffer of size %u\n",
416 buf_size);
417 ret = -ENOMEM;
418 goto cleanup;
419 }
420
421 /* Establish the gpadl handle for this buffer on this
422 * channel. Note: This call uses the vmbus connection rather
423 * than the channel to establish the gpadl handle.
424 */
425 ret = vmbus_establish_gpadl(device->channel, net_device->send_buf,
426 buf_size,
427 &net_device->send_buf_gpadl_handle);
428 if (ret != 0) {
429 netdev_err(ndev,
430 "unable to establish send buffer's gpadl\n");
431 goto cleanup;
432 }
433
434 /* Notify the NetVsp of the gpadl handle */
435 init_packet = &net_device->channel_init_pkt;
436 memset(init_packet, 0, sizeof(struct nvsp_message));
437 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
438 init_packet->msg.v1_msg.send_send_buf.gpadl_handle =
439 net_device->send_buf_gpadl_handle;
440 init_packet->msg.v1_msg.send_send_buf.id = NETVSC_SEND_BUFFER_ID;
441
442 trace_nvsp_send(ndev, init_packet);
443
444 /* Send the gpadl notification request */
445 ret = vmbus_sendpacket(device->channel, init_packet,
446 sizeof(struct nvsp_message),
447 (unsigned long)init_packet,
448 VM_PKT_DATA_INBAND,
449 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
450 if (ret != 0) {
451 netdev_err(ndev,
452 "unable to send send buffer's gpadl to netvsp\n");
453 goto cleanup;
454 }
455
456 wait_for_completion(&net_device->channel_init_wait);
457
458 /* Check the response */
459 if (init_packet->msg.v1_msg.
460 send_send_buf_complete.status != NVSP_STAT_SUCCESS) {
461 netdev_err(ndev, "Unable to complete send buffer "
462 "initialization with NetVsp - status %d\n",
463 init_packet->msg.v1_msg.
464 send_send_buf_complete.status);
465 ret = -EINVAL;
466 goto cleanup;
467 }
468
469 /* Parse the response */
470 net_device->send_section_size = init_packet->msg.
471 v1_msg.send_send_buf_complete.section_size;
472 if (net_device->send_section_size < NETVSC_MTU_MIN) {
473 netdev_err(ndev, "invalid send_section_size %u\n",
474 net_device->send_section_size);
475 ret = -EINVAL;
476 goto cleanup;
477 }
478
479 /* Section count is simply the size divided by the section size. */
480 net_device->send_section_cnt = buf_size / net_device->send_section_size;
481
482 netdev_dbg(ndev, "Send section size: %d, Section count:%d\n",
483 net_device->send_section_size, net_device->send_section_cnt);
484
485 /* Setup state for managing the send buffer. */
486 map_words = DIV_ROUND_UP(net_device->send_section_cnt, BITS_PER_LONG);
487
488 net_device->send_section_map = kcalloc(map_words, sizeof(ulong), GFP_KERNEL);
489 if (net_device->send_section_map == NULL) {
490 ret = -ENOMEM;
491 goto cleanup;
492 }
493
494 goto exit;
495
496 cleanup:
497 netvsc_revoke_recv_buf(device, net_device, ndev);
498 netvsc_revoke_send_buf(device, net_device, ndev);
499 netvsc_teardown_recv_gpadl(device, net_device, ndev);
500 netvsc_teardown_send_gpadl(device, net_device, ndev);
501
502 exit:
503 return ret;
504 }
505
506 /* Negotiate NVSP protocol version */
negotiate_nvsp_ver(struct hv_device * device,struct netvsc_device * net_device,struct nvsp_message * init_packet,u32 nvsp_ver)507 static int negotiate_nvsp_ver(struct hv_device *device,
508 struct netvsc_device *net_device,
509 struct nvsp_message *init_packet,
510 u32 nvsp_ver)
511 {
512 struct net_device *ndev = hv_get_drvdata(device);
513 int ret;
514
515 memset(init_packet, 0, sizeof(struct nvsp_message));
516 init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
517 init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver;
518 init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver;
519 trace_nvsp_send(ndev, init_packet);
520
521 /* Send the init request */
522 ret = vmbus_sendpacket(device->channel, init_packet,
523 sizeof(struct nvsp_message),
524 (unsigned long)init_packet,
525 VM_PKT_DATA_INBAND,
526 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
527
528 if (ret != 0)
529 return ret;
530
531 wait_for_completion(&net_device->channel_init_wait);
532
533 if (init_packet->msg.init_msg.init_complete.status !=
534 NVSP_STAT_SUCCESS)
535 return -EINVAL;
536
537 if (nvsp_ver == NVSP_PROTOCOL_VERSION_1)
538 return 0;
539
540 /* NVSPv2 or later: Send NDIS config */
541 memset(init_packet, 0, sizeof(struct nvsp_message));
542 init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
543 init_packet->msg.v2_msg.send_ndis_config.mtu = ndev->mtu + ETH_HLEN;
544 init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1;
545
546 if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5) {
547 init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1;
548
549 /* Teaming bit is needed to receive link speed updates */
550 init_packet->msg.v2_msg.send_ndis_config.capability.teaming = 1;
551 }
552
553 if (nvsp_ver >= NVSP_PROTOCOL_VERSION_61)
554 init_packet->msg.v2_msg.send_ndis_config.capability.rsc = 1;
555
556 trace_nvsp_send(ndev, init_packet);
557
558 ret = vmbus_sendpacket(device->channel, init_packet,
559 sizeof(struct nvsp_message),
560 (unsigned long)init_packet,
561 VM_PKT_DATA_INBAND, 0);
562
563 return ret;
564 }
565
netvsc_connect_vsp(struct hv_device * device,struct netvsc_device * net_device,const struct netvsc_device_info * device_info)566 static int netvsc_connect_vsp(struct hv_device *device,
567 struct netvsc_device *net_device,
568 const struct netvsc_device_info *device_info)
569 {
570 struct net_device *ndev = hv_get_drvdata(device);
571 static const u32 ver_list[] = {
572 NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
573 NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5,
574 NVSP_PROTOCOL_VERSION_6, NVSP_PROTOCOL_VERSION_61
575 };
576 struct nvsp_message *init_packet;
577 int ndis_version, i, ret;
578
579 init_packet = &net_device->channel_init_pkt;
580
581 /* Negotiate the latest NVSP protocol supported */
582 for (i = ARRAY_SIZE(ver_list) - 1; i >= 0; i--)
583 if (negotiate_nvsp_ver(device, net_device, init_packet,
584 ver_list[i]) == 0) {
585 net_device->nvsp_version = ver_list[i];
586 break;
587 }
588
589 if (i < 0) {
590 ret = -EPROTO;
591 goto cleanup;
592 }
593
594 pr_debug("Negotiated NVSP version:%x\n", net_device->nvsp_version);
595
596 /* Send the ndis version */
597 memset(init_packet, 0, sizeof(struct nvsp_message));
598
599 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4)
600 ndis_version = 0x00060001;
601 else
602 ndis_version = 0x0006001e;
603
604 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
605 init_packet->msg.v1_msg.
606 send_ndis_ver.ndis_major_ver =
607 (ndis_version & 0xFFFF0000) >> 16;
608 init_packet->msg.v1_msg.
609 send_ndis_ver.ndis_minor_ver =
610 ndis_version & 0xFFFF;
611
612 trace_nvsp_send(ndev, init_packet);
613
614 /* Send the init request */
615 ret = vmbus_sendpacket(device->channel, init_packet,
616 sizeof(struct nvsp_message),
617 (unsigned long)init_packet,
618 VM_PKT_DATA_INBAND, 0);
619 if (ret != 0)
620 goto cleanup;
621
622
623 ret = netvsc_init_buf(device, net_device, device_info);
624
625 cleanup:
626 return ret;
627 }
628
629 /*
630 * netvsc_device_remove - Callback when the root bus device is removed
631 */
netvsc_device_remove(struct hv_device * device)632 void netvsc_device_remove(struct hv_device *device)
633 {
634 struct net_device *ndev = hv_get_drvdata(device);
635 struct net_device_context *net_device_ctx = netdev_priv(ndev);
636 struct netvsc_device *net_device
637 = rtnl_dereference(net_device_ctx->nvdev);
638 int i;
639
640 /*
641 * Revoke receive buffer. If host is pre-Win2016 then tear down
642 * receive buffer GPADL. Do the same for send buffer.
643 */
644 netvsc_revoke_recv_buf(device, net_device, ndev);
645 if (vmbus_proto_version < VERSION_WIN10)
646 netvsc_teardown_recv_gpadl(device, net_device, ndev);
647
648 netvsc_revoke_send_buf(device, net_device, ndev);
649 if (vmbus_proto_version < VERSION_WIN10)
650 netvsc_teardown_send_gpadl(device, net_device, ndev);
651
652 RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
653
654 /* Disable NAPI and disassociate its context from the device. */
655 for (i = 0; i < net_device->num_chn; i++) {
656 /* See also vmbus_reset_channel_cb(). */
657 /* only disable enabled NAPI channel */
658 if (i < ndev->real_num_rx_queues)
659 napi_disable(&net_device->chan_table[i].napi);
660
661 netif_napi_del(&net_device->chan_table[i].napi);
662 }
663
664 /*
665 * At this point, no one should be accessing net_device
666 * except in here
667 */
668 netdev_dbg(ndev, "net device safe to remove\n");
669
670 /* Now, we can close the channel safely */
671 vmbus_close(device->channel);
672
673 /*
674 * If host is Win2016 or higher then we do the GPADL tear down
675 * here after VMBus is closed.
676 */
677 if (vmbus_proto_version >= VERSION_WIN10) {
678 netvsc_teardown_recv_gpadl(device, net_device, ndev);
679 netvsc_teardown_send_gpadl(device, net_device, ndev);
680 }
681
682 /* Release all resources */
683 free_netvsc_device_rcu(net_device);
684 }
685
686 #define RING_AVAIL_PERCENT_HIWATER 20
687 #define RING_AVAIL_PERCENT_LOWATER 10
688
netvsc_free_send_slot(struct netvsc_device * net_device,u32 index)689 static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
690 u32 index)
691 {
692 sync_change_bit(index, net_device->send_section_map);
693 }
694
netvsc_send_tx_complete(struct net_device * ndev,struct netvsc_device * net_device,struct vmbus_channel * channel,const struct vmpacket_descriptor * desc,int budget)695 static void netvsc_send_tx_complete(struct net_device *ndev,
696 struct netvsc_device *net_device,
697 struct vmbus_channel *channel,
698 const struct vmpacket_descriptor *desc,
699 int budget)
700 {
701 struct sk_buff *skb = (struct sk_buff *)(unsigned long)desc->trans_id;
702 struct net_device_context *ndev_ctx = netdev_priv(ndev);
703 u16 q_idx = 0;
704 int queue_sends;
705
706 /* Notify the layer above us */
707 if (likely(skb)) {
708 const struct hv_netvsc_packet *packet
709 = (struct hv_netvsc_packet *)skb->cb;
710 u32 send_index = packet->send_buf_index;
711 struct netvsc_stats *tx_stats;
712
713 if (send_index != NETVSC_INVALID_INDEX)
714 netvsc_free_send_slot(net_device, send_index);
715 q_idx = packet->q_idx;
716
717 tx_stats = &net_device->chan_table[q_idx].tx_stats;
718
719 u64_stats_update_begin(&tx_stats->syncp);
720 tx_stats->packets += packet->total_packets;
721 tx_stats->bytes += packet->total_bytes;
722 u64_stats_update_end(&tx_stats->syncp);
723
724 napi_consume_skb(skb, budget);
725 }
726
727 queue_sends =
728 atomic_dec_return(&net_device->chan_table[q_idx].queue_sends);
729
730 if (unlikely(net_device->destroy)) {
731 if (queue_sends == 0)
732 wake_up(&net_device->wait_drain);
733 } else {
734 struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
735
736 if (netif_tx_queue_stopped(txq) && !net_device->tx_disable &&
737 (hv_get_avail_to_write_percent(&channel->outbound) >
738 RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) {
739 netif_tx_wake_queue(txq);
740 ndev_ctx->eth_stats.wake_queue++;
741 }
742 }
743 }
744
netvsc_send_completion(struct net_device * ndev,struct netvsc_device * net_device,struct vmbus_channel * incoming_channel,const struct vmpacket_descriptor * desc,int budget)745 static void netvsc_send_completion(struct net_device *ndev,
746 struct netvsc_device *net_device,
747 struct vmbus_channel *incoming_channel,
748 const struct vmpacket_descriptor *desc,
749 int budget)
750 {
751 const struct nvsp_message *nvsp_packet = hv_pkt_data(desc);
752 u32 msglen = hv_pkt_datalen(desc);
753
754 /* Ensure packet is big enough to read header fields */
755 if (msglen < sizeof(struct nvsp_message_header)) {
756 netdev_err(ndev, "nvsp_message length too small: %u\n", msglen);
757 return;
758 }
759
760 switch (nvsp_packet->hdr.msg_type) {
761 case NVSP_MSG_TYPE_INIT_COMPLETE:
762 if (msglen < sizeof(struct nvsp_message_header) +
763 sizeof(struct nvsp_message_init_complete)) {
764 netdev_err(ndev, "nvsp_msg length too small: %u\n",
765 msglen);
766 return;
767 }
768 fallthrough;
769
770 case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE:
771 if (msglen < sizeof(struct nvsp_message_header) +
772 sizeof(struct nvsp_1_message_send_receive_buffer_complete)) {
773 netdev_err(ndev, "nvsp_msg1 length too small: %u\n",
774 msglen);
775 return;
776 }
777 fallthrough;
778
779 case NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE:
780 if (msglen < sizeof(struct nvsp_message_header) +
781 sizeof(struct nvsp_1_message_send_send_buffer_complete)) {
782 netdev_err(ndev, "nvsp_msg1 length too small: %u\n",
783 msglen);
784 return;
785 }
786 fallthrough;
787
788 case NVSP_MSG5_TYPE_SUBCHANNEL:
789 if (msglen < sizeof(struct nvsp_message_header) +
790 sizeof(struct nvsp_5_subchannel_complete)) {
791 netdev_err(ndev, "nvsp_msg5 length too small: %u\n",
792 msglen);
793 return;
794 }
795 /* Copy the response back */
796 memcpy(&net_device->channel_init_pkt, nvsp_packet,
797 sizeof(struct nvsp_message));
798 complete(&net_device->channel_init_wait);
799 break;
800
801 case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE:
802 netvsc_send_tx_complete(ndev, net_device, incoming_channel,
803 desc, budget);
804 break;
805
806 default:
807 netdev_err(ndev,
808 "Unknown send completion type %d received!!\n",
809 nvsp_packet->hdr.msg_type);
810 }
811 }
812
netvsc_get_next_send_section(struct netvsc_device * net_device)813 static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
814 {
815 unsigned long *map_addr = net_device->send_section_map;
816 unsigned int i;
817
818 for_each_clear_bit(i, map_addr, net_device->send_section_cnt) {
819 if (sync_test_and_set_bit(i, map_addr) == 0)
820 return i;
821 }
822
823 return NETVSC_INVALID_INDEX;
824 }
825
netvsc_copy_to_send_buf(struct netvsc_device * net_device,unsigned int section_index,u32 pend_size,struct hv_netvsc_packet * packet,struct rndis_message * rndis_msg,struct hv_page_buffer * pb,bool xmit_more)826 static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
827 unsigned int section_index,
828 u32 pend_size,
829 struct hv_netvsc_packet *packet,
830 struct rndis_message *rndis_msg,
831 struct hv_page_buffer *pb,
832 bool xmit_more)
833 {
834 char *start = net_device->send_buf;
835 char *dest = start + (section_index * net_device->send_section_size)
836 + pend_size;
837 int i;
838 u32 padding = 0;
839 u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt :
840 packet->page_buf_cnt;
841 u32 remain;
842
843 /* Add padding */
844 remain = packet->total_data_buflen & (net_device->pkt_align - 1);
845 if (xmit_more && remain) {
846 padding = net_device->pkt_align - remain;
847 rndis_msg->msg_len += padding;
848 packet->total_data_buflen += padding;
849 }
850
851 for (i = 0; i < page_count; i++) {
852 char *src = phys_to_virt(pb[i].pfn << HV_HYP_PAGE_SHIFT);
853 u32 offset = pb[i].offset;
854 u32 len = pb[i].len;
855
856 memcpy(dest, (src + offset), len);
857 dest += len;
858 }
859
860 if (padding)
861 memset(dest, 0, padding);
862 }
863
netvsc_send_pkt(struct hv_device * device,struct hv_netvsc_packet * packet,struct netvsc_device * net_device,struct hv_page_buffer * pb,struct sk_buff * skb)864 static inline int netvsc_send_pkt(
865 struct hv_device *device,
866 struct hv_netvsc_packet *packet,
867 struct netvsc_device *net_device,
868 struct hv_page_buffer *pb,
869 struct sk_buff *skb)
870 {
871 struct nvsp_message nvmsg;
872 struct nvsp_1_message_send_rndis_packet *rpkt =
873 &nvmsg.msg.v1_msg.send_rndis_pkt;
874 struct netvsc_channel * const nvchan =
875 &net_device->chan_table[packet->q_idx];
876 struct vmbus_channel *out_channel = nvchan->channel;
877 struct net_device *ndev = hv_get_drvdata(device);
878 struct net_device_context *ndev_ctx = netdev_priv(ndev);
879 struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx);
880 u64 req_id;
881 int ret;
882 u32 ring_avail = hv_get_avail_to_write_percent(&out_channel->outbound);
883
884 nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
885 if (skb)
886 rpkt->channel_type = 0; /* 0 is RMC_DATA */
887 else
888 rpkt->channel_type = 1; /* 1 is RMC_CONTROL */
889
890 rpkt->send_buf_section_index = packet->send_buf_index;
891 if (packet->send_buf_index == NETVSC_INVALID_INDEX)
892 rpkt->send_buf_section_size = 0;
893 else
894 rpkt->send_buf_section_size = packet->total_data_buflen;
895
896 req_id = (ulong)skb;
897
898 if (out_channel->rescind)
899 return -ENODEV;
900
901 trace_nvsp_send_pkt(ndev, out_channel, rpkt);
902
903 if (packet->page_buf_cnt) {
904 if (packet->cp_partial)
905 pb += packet->rmsg_pgcnt;
906
907 ret = vmbus_sendpacket_pagebuffer(out_channel,
908 pb, packet->page_buf_cnt,
909 &nvmsg, sizeof(nvmsg),
910 req_id);
911 } else {
912 ret = vmbus_sendpacket(out_channel,
913 &nvmsg, sizeof(nvmsg),
914 req_id, VM_PKT_DATA_INBAND,
915 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
916 }
917
918 if (ret == 0) {
919 atomic_inc_return(&nvchan->queue_sends);
920
921 if (ring_avail < RING_AVAIL_PERCENT_LOWATER) {
922 netif_tx_stop_queue(txq);
923 ndev_ctx->eth_stats.stop_queue++;
924 }
925 } else if (ret == -EAGAIN) {
926 netif_tx_stop_queue(txq);
927 ndev_ctx->eth_stats.stop_queue++;
928 } else {
929 netdev_err(ndev,
930 "Unable to send packet pages %u len %u, ret %d\n",
931 packet->page_buf_cnt, packet->total_data_buflen,
932 ret);
933 }
934
935 if (netif_tx_queue_stopped(txq) &&
936 atomic_read(&nvchan->queue_sends) < 1 &&
937 !net_device->tx_disable) {
938 netif_tx_wake_queue(txq);
939 ndev_ctx->eth_stats.wake_queue++;
940 if (ret == -EAGAIN)
941 ret = -ENOSPC;
942 }
943
944 return ret;
945 }
946
947 /* Move packet out of multi send data (msd), and clear msd */
move_pkt_msd(struct hv_netvsc_packet ** msd_send,struct sk_buff ** msd_skb,struct multi_send_data * msdp)948 static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send,
949 struct sk_buff **msd_skb,
950 struct multi_send_data *msdp)
951 {
952 *msd_skb = msdp->skb;
953 *msd_send = msdp->pkt;
954 msdp->skb = NULL;
955 msdp->pkt = NULL;
956 msdp->count = 0;
957 }
958
959 /* RCU already held by caller */
netvsc_send(struct net_device * ndev,struct hv_netvsc_packet * packet,struct rndis_message * rndis_msg,struct hv_page_buffer * pb,struct sk_buff * skb,bool xdp_tx)960 int netvsc_send(struct net_device *ndev,
961 struct hv_netvsc_packet *packet,
962 struct rndis_message *rndis_msg,
963 struct hv_page_buffer *pb,
964 struct sk_buff *skb,
965 bool xdp_tx)
966 {
967 struct net_device_context *ndev_ctx = netdev_priv(ndev);
968 struct netvsc_device *net_device
969 = rcu_dereference_bh(ndev_ctx->nvdev);
970 struct hv_device *device = ndev_ctx->device_ctx;
971 int ret = 0;
972 struct netvsc_channel *nvchan;
973 u32 pktlen = packet->total_data_buflen, msd_len = 0;
974 unsigned int section_index = NETVSC_INVALID_INDEX;
975 struct multi_send_data *msdp;
976 struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
977 struct sk_buff *msd_skb = NULL;
978 bool try_batch, xmit_more;
979
980 /* If device is rescinded, return error and packet will get dropped. */
981 if (unlikely(!net_device || net_device->destroy))
982 return -ENODEV;
983
984 nvchan = &net_device->chan_table[packet->q_idx];
985 packet->send_buf_index = NETVSC_INVALID_INDEX;
986 packet->cp_partial = false;
987
988 /* Send a control message or XDP packet directly without accessing
989 * msd (Multi-Send Data) field which may be changed during data packet
990 * processing.
991 */
992 if (!skb || xdp_tx)
993 return netvsc_send_pkt(device, packet, net_device, pb, skb);
994
995 /* batch packets in send buffer if possible */
996 msdp = &nvchan->msd;
997 if (msdp->pkt)
998 msd_len = msdp->pkt->total_data_buflen;
999
1000 try_batch = msd_len > 0 && msdp->count < net_device->max_pkt;
1001 if (try_batch && msd_len + pktlen + net_device->pkt_align <
1002 net_device->send_section_size) {
1003 section_index = msdp->pkt->send_buf_index;
1004
1005 } else if (try_batch && msd_len + packet->rmsg_size <
1006 net_device->send_section_size) {
1007 section_index = msdp->pkt->send_buf_index;
1008 packet->cp_partial = true;
1009
1010 } else if (pktlen + net_device->pkt_align <
1011 net_device->send_section_size) {
1012 section_index = netvsc_get_next_send_section(net_device);
1013 if (unlikely(section_index == NETVSC_INVALID_INDEX)) {
1014 ++ndev_ctx->eth_stats.tx_send_full;
1015 } else {
1016 move_pkt_msd(&msd_send, &msd_skb, msdp);
1017 msd_len = 0;
1018 }
1019 }
1020
1021 /* Keep aggregating only if stack says more data is coming
1022 * and not doing mixed modes send and not flow blocked
1023 */
1024 xmit_more = netdev_xmit_more() &&
1025 !packet->cp_partial &&
1026 !netif_xmit_stopped(netdev_get_tx_queue(ndev, packet->q_idx));
1027
1028 if (section_index != NETVSC_INVALID_INDEX) {
1029 netvsc_copy_to_send_buf(net_device,
1030 section_index, msd_len,
1031 packet, rndis_msg, pb, xmit_more);
1032
1033 packet->send_buf_index = section_index;
1034
1035 if (packet->cp_partial) {
1036 packet->page_buf_cnt -= packet->rmsg_pgcnt;
1037 packet->total_data_buflen = msd_len + packet->rmsg_size;
1038 } else {
1039 packet->page_buf_cnt = 0;
1040 packet->total_data_buflen += msd_len;
1041 }
1042
1043 if (msdp->pkt) {
1044 packet->total_packets += msdp->pkt->total_packets;
1045 packet->total_bytes += msdp->pkt->total_bytes;
1046 }
1047
1048 if (msdp->skb)
1049 dev_consume_skb_any(msdp->skb);
1050
1051 if (xmit_more) {
1052 msdp->skb = skb;
1053 msdp->pkt = packet;
1054 msdp->count++;
1055 } else {
1056 cur_send = packet;
1057 msdp->skb = NULL;
1058 msdp->pkt = NULL;
1059 msdp->count = 0;
1060 }
1061 } else {
1062 move_pkt_msd(&msd_send, &msd_skb, msdp);
1063 cur_send = packet;
1064 }
1065
1066 if (msd_send) {
1067 int m_ret = netvsc_send_pkt(device, msd_send, net_device,
1068 NULL, msd_skb);
1069
1070 if (m_ret != 0) {
1071 netvsc_free_send_slot(net_device,
1072 msd_send->send_buf_index);
1073 dev_kfree_skb_any(msd_skb);
1074 }
1075 }
1076
1077 if (cur_send)
1078 ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb);
1079
1080 if (ret != 0 && section_index != NETVSC_INVALID_INDEX)
1081 netvsc_free_send_slot(net_device, section_index);
1082
1083 return ret;
1084 }
1085
1086 /* Send pending recv completions */
send_recv_completions(struct net_device * ndev,struct netvsc_device * nvdev,struct netvsc_channel * nvchan)1087 static int send_recv_completions(struct net_device *ndev,
1088 struct netvsc_device *nvdev,
1089 struct netvsc_channel *nvchan)
1090 {
1091 struct multi_recv_comp *mrc = &nvchan->mrc;
1092 struct recv_comp_msg {
1093 struct nvsp_message_header hdr;
1094 u32 status;
1095 } __packed;
1096 struct recv_comp_msg msg = {
1097 .hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE,
1098 };
1099 int ret;
1100
1101 while (mrc->first != mrc->next) {
1102 const struct recv_comp_data *rcd
1103 = mrc->slots + mrc->first;
1104
1105 msg.status = rcd->status;
1106 ret = vmbus_sendpacket(nvchan->channel, &msg, sizeof(msg),
1107 rcd->tid, VM_PKT_COMP, 0);
1108 if (unlikely(ret)) {
1109 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1110
1111 ++ndev_ctx->eth_stats.rx_comp_busy;
1112 return ret;
1113 }
1114
1115 if (++mrc->first == nvdev->recv_completion_cnt)
1116 mrc->first = 0;
1117 }
1118
1119 /* receive completion ring has been emptied */
1120 if (unlikely(nvdev->destroy))
1121 wake_up(&nvdev->wait_drain);
1122
1123 return 0;
1124 }
1125
1126 /* Count how many receive completions are outstanding */
recv_comp_slot_avail(const struct netvsc_device * nvdev,const struct multi_recv_comp * mrc,u32 * filled,u32 * avail)1127 static void recv_comp_slot_avail(const struct netvsc_device *nvdev,
1128 const struct multi_recv_comp *mrc,
1129 u32 *filled, u32 *avail)
1130 {
1131 u32 count = nvdev->recv_completion_cnt;
1132
1133 if (mrc->next >= mrc->first)
1134 *filled = mrc->next - mrc->first;
1135 else
1136 *filled = (count - mrc->first) + mrc->next;
1137
1138 *avail = count - *filled - 1;
1139 }
1140
1141 /* Add receive complete to ring to send to host. */
enq_receive_complete(struct net_device * ndev,struct netvsc_device * nvdev,u16 q_idx,u64 tid,u32 status)1142 static void enq_receive_complete(struct net_device *ndev,
1143 struct netvsc_device *nvdev, u16 q_idx,
1144 u64 tid, u32 status)
1145 {
1146 struct netvsc_channel *nvchan = &nvdev->chan_table[q_idx];
1147 struct multi_recv_comp *mrc = &nvchan->mrc;
1148 struct recv_comp_data *rcd;
1149 u32 filled, avail;
1150
1151 recv_comp_slot_avail(nvdev, mrc, &filled, &avail);
1152
1153 if (unlikely(filled > NAPI_POLL_WEIGHT)) {
1154 send_recv_completions(ndev, nvdev, nvchan);
1155 recv_comp_slot_avail(nvdev, mrc, &filled, &avail);
1156 }
1157
1158 if (unlikely(!avail)) {
1159 netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n",
1160 q_idx, tid);
1161 return;
1162 }
1163
1164 rcd = mrc->slots + mrc->next;
1165 rcd->tid = tid;
1166 rcd->status = status;
1167
1168 if (++mrc->next == nvdev->recv_completion_cnt)
1169 mrc->next = 0;
1170 }
1171
netvsc_receive(struct net_device * ndev,struct netvsc_device * net_device,struct netvsc_channel * nvchan,const struct vmpacket_descriptor * desc)1172 static int netvsc_receive(struct net_device *ndev,
1173 struct netvsc_device *net_device,
1174 struct netvsc_channel *nvchan,
1175 const struct vmpacket_descriptor *desc)
1176 {
1177 struct net_device_context *net_device_ctx = netdev_priv(ndev);
1178 struct vmbus_channel *channel = nvchan->channel;
1179 const struct vmtransfer_page_packet_header *vmxferpage_packet
1180 = container_of(desc, const struct vmtransfer_page_packet_header, d);
1181 const struct nvsp_message *nvsp = hv_pkt_data(desc);
1182 u32 msglen = hv_pkt_datalen(desc);
1183 u16 q_idx = channel->offermsg.offer.sub_channel_index;
1184 char *recv_buf = net_device->recv_buf;
1185 u32 status = NVSP_STAT_SUCCESS;
1186 int i;
1187 int count = 0;
1188
1189 /* Ensure packet is big enough to read header fields */
1190 if (msglen < sizeof(struct nvsp_message_header)) {
1191 netif_err(net_device_ctx, rx_err, ndev,
1192 "invalid nvsp header, length too small: %u\n",
1193 msglen);
1194 return 0;
1195 }
1196
1197 /* Make sure this is a valid nvsp packet */
1198 if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) {
1199 netif_err(net_device_ctx, rx_err, ndev,
1200 "Unknown nvsp packet type received %u\n",
1201 nvsp->hdr.msg_type);
1202 return 0;
1203 }
1204
1205 /* Validate xfer page pkt header */
1206 if ((desc->offset8 << 3) < sizeof(struct vmtransfer_page_packet_header)) {
1207 netif_err(net_device_ctx, rx_err, ndev,
1208 "Invalid xfer page pkt, offset too small: %u\n",
1209 desc->offset8 << 3);
1210 return 0;
1211 }
1212
1213 if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) {
1214 netif_err(net_device_ctx, rx_err, ndev,
1215 "Invalid xfer page set id - expecting %x got %x\n",
1216 NETVSC_RECEIVE_BUFFER_ID,
1217 vmxferpage_packet->xfer_pageset_id);
1218 return 0;
1219 }
1220
1221 count = vmxferpage_packet->range_cnt;
1222
1223 /* Check count for a valid value */
1224 if (NETVSC_XFER_HEADER_SIZE(count) > desc->offset8 << 3) {
1225 netif_err(net_device_ctx, rx_err, ndev,
1226 "Range count is not valid: %d\n",
1227 count);
1228 return 0;
1229 }
1230
1231 /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
1232 for (i = 0; i < count; i++) {
1233 u32 offset = vmxferpage_packet->ranges[i].byte_offset;
1234 u32 buflen = vmxferpage_packet->ranges[i].byte_count;
1235 void *data;
1236 int ret;
1237
1238 if (unlikely(offset > net_device->recv_buf_size ||
1239 buflen > net_device->recv_buf_size - offset)) {
1240 nvchan->rsc.cnt = 0;
1241 status = NVSP_STAT_FAIL;
1242 netif_err(net_device_ctx, rx_err, ndev,
1243 "Packet offset:%u + len:%u too big\n",
1244 offset, buflen);
1245
1246 continue;
1247 }
1248
1249 data = recv_buf + offset;
1250
1251 nvchan->rsc.is_last = (i == count - 1);
1252
1253 trace_rndis_recv(ndev, q_idx, data);
1254
1255 /* Pass it to the upper layer */
1256 ret = rndis_filter_receive(ndev, net_device,
1257 nvchan, data, buflen);
1258
1259 if (unlikely(ret != NVSP_STAT_SUCCESS)) {
1260 /* Drop incomplete packet */
1261 nvchan->rsc.cnt = 0;
1262 status = NVSP_STAT_FAIL;
1263 }
1264 }
1265
1266 enq_receive_complete(ndev, net_device, q_idx,
1267 vmxferpage_packet->d.trans_id, status);
1268
1269 return count;
1270 }
1271
netvsc_send_table(struct net_device * ndev,struct netvsc_device * nvscdev,const struct nvsp_message * nvmsg,u32 msglen)1272 static void netvsc_send_table(struct net_device *ndev,
1273 struct netvsc_device *nvscdev,
1274 const struct nvsp_message *nvmsg,
1275 u32 msglen)
1276 {
1277 struct net_device_context *net_device_ctx = netdev_priv(ndev);
1278 u32 count, offset, *tab;
1279 int i;
1280
1281 /* Ensure packet is big enough to read send_table fields */
1282 if (msglen < sizeof(struct nvsp_message_header) +
1283 sizeof(struct nvsp_5_send_indirect_table)) {
1284 netdev_err(ndev, "nvsp_v5_msg length too small: %u\n", msglen);
1285 return;
1286 }
1287
1288 count = nvmsg->msg.v5_msg.send_table.count;
1289 offset = nvmsg->msg.v5_msg.send_table.offset;
1290
1291 if (count != VRSS_SEND_TAB_SIZE) {
1292 netdev_err(ndev, "Received wrong send-table size:%u\n", count);
1293 return;
1294 }
1295
1296 /* If negotiated version <= NVSP_PROTOCOL_VERSION_6, the offset may be
1297 * wrong due to a host bug. So fix the offset here.
1298 */
1299 if (nvscdev->nvsp_version <= NVSP_PROTOCOL_VERSION_6 &&
1300 msglen >= sizeof(struct nvsp_message_header) +
1301 sizeof(union nvsp_6_message_uber) + count * sizeof(u32))
1302 offset = sizeof(struct nvsp_message_header) +
1303 sizeof(union nvsp_6_message_uber);
1304
1305 /* Boundary check for all versions */
1306 if (offset > msglen - count * sizeof(u32)) {
1307 netdev_err(ndev, "Received send-table offset too big:%u\n",
1308 offset);
1309 return;
1310 }
1311
1312 tab = (void *)nvmsg + offset;
1313
1314 for (i = 0; i < count; i++)
1315 net_device_ctx->tx_table[i] = tab[i];
1316 }
1317
netvsc_send_vf(struct net_device * ndev,const struct nvsp_message * nvmsg,u32 msglen)1318 static void netvsc_send_vf(struct net_device *ndev,
1319 const struct nvsp_message *nvmsg,
1320 u32 msglen)
1321 {
1322 struct net_device_context *net_device_ctx = netdev_priv(ndev);
1323
1324 /* Ensure packet is big enough to read its fields */
1325 if (msglen < sizeof(struct nvsp_message_header) +
1326 sizeof(struct nvsp_4_send_vf_association)) {
1327 netdev_err(ndev, "nvsp_v4_msg length too small: %u\n", msglen);
1328 return;
1329 }
1330
1331 net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
1332 net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
1333
1334 if (net_device_ctx->vf_alloc)
1335 complete(&net_device_ctx->vf_add);
1336
1337 netdev_info(ndev, "VF slot %u %s\n",
1338 net_device_ctx->vf_serial,
1339 net_device_ctx->vf_alloc ? "added" : "removed");
1340 }
1341
netvsc_receive_inband(struct net_device * ndev,struct netvsc_device * nvscdev,const struct vmpacket_descriptor * desc)1342 static void netvsc_receive_inband(struct net_device *ndev,
1343 struct netvsc_device *nvscdev,
1344 const struct vmpacket_descriptor *desc)
1345 {
1346 const struct nvsp_message *nvmsg = hv_pkt_data(desc);
1347 u32 msglen = hv_pkt_datalen(desc);
1348
1349 /* Ensure packet is big enough to read header fields */
1350 if (msglen < sizeof(struct nvsp_message_header)) {
1351 netdev_err(ndev, "inband nvsp_message length too small: %u\n", msglen);
1352 return;
1353 }
1354
1355 switch (nvmsg->hdr.msg_type) {
1356 case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
1357 netvsc_send_table(ndev, nvscdev, nvmsg, msglen);
1358 break;
1359
1360 case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
1361 netvsc_send_vf(ndev, nvmsg, msglen);
1362 break;
1363 }
1364 }
1365
netvsc_process_raw_pkt(struct hv_device * device,struct netvsc_channel * nvchan,struct netvsc_device * net_device,struct net_device * ndev,const struct vmpacket_descriptor * desc,int budget)1366 static int netvsc_process_raw_pkt(struct hv_device *device,
1367 struct netvsc_channel *nvchan,
1368 struct netvsc_device *net_device,
1369 struct net_device *ndev,
1370 const struct vmpacket_descriptor *desc,
1371 int budget)
1372 {
1373 struct vmbus_channel *channel = nvchan->channel;
1374 const struct nvsp_message *nvmsg = hv_pkt_data(desc);
1375
1376 trace_nvsp_recv(ndev, channel, nvmsg);
1377
1378 switch (desc->type) {
1379 case VM_PKT_COMP:
1380 netvsc_send_completion(ndev, net_device, channel, desc, budget);
1381 break;
1382
1383 case VM_PKT_DATA_USING_XFER_PAGES:
1384 return netvsc_receive(ndev, net_device, nvchan, desc);
1385 break;
1386
1387 case VM_PKT_DATA_INBAND:
1388 netvsc_receive_inband(ndev, net_device, desc);
1389 break;
1390
1391 default:
1392 netdev_err(ndev, "unhandled packet type %d, tid %llx\n",
1393 desc->type, desc->trans_id);
1394 break;
1395 }
1396
1397 return 0;
1398 }
1399
netvsc_channel_to_device(struct vmbus_channel * channel)1400 static struct hv_device *netvsc_channel_to_device(struct vmbus_channel *channel)
1401 {
1402 struct vmbus_channel *primary = channel->primary_channel;
1403
1404 return primary ? primary->device_obj : channel->device_obj;
1405 }
1406
1407 /* Network processing softirq
1408 * Process data in incoming ring buffer from host
1409 * Stops when ring is empty or budget is met or exceeded.
1410 */
netvsc_poll(struct napi_struct * napi,int budget)1411 int netvsc_poll(struct napi_struct *napi, int budget)
1412 {
1413 struct netvsc_channel *nvchan
1414 = container_of(napi, struct netvsc_channel, napi);
1415 struct netvsc_device *net_device = nvchan->net_device;
1416 struct vmbus_channel *channel = nvchan->channel;
1417 struct hv_device *device = netvsc_channel_to_device(channel);
1418 struct net_device *ndev = hv_get_drvdata(device);
1419 int work_done = 0;
1420 int ret;
1421
1422 /* If starting a new interval */
1423 if (!nvchan->desc)
1424 nvchan->desc = hv_pkt_iter_first(channel);
1425
1426 while (nvchan->desc && work_done < budget) {
1427 work_done += netvsc_process_raw_pkt(device, nvchan, net_device,
1428 ndev, nvchan->desc, budget);
1429 nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc);
1430 }
1431
1432 /* Send any pending receive completions */
1433 ret = send_recv_completions(ndev, net_device, nvchan);
1434
1435 /* If it did not exhaust NAPI budget this time
1436 * and not doing busy poll
1437 * then re-enable host interrupts
1438 * and reschedule if ring is not empty
1439 * or sending receive completion failed.
1440 */
1441 if (work_done < budget &&
1442 napi_complete_done(napi, work_done) &&
1443 (ret || hv_end_read(&channel->inbound)) &&
1444 napi_schedule_prep(napi)) {
1445 hv_begin_read(&channel->inbound);
1446 __napi_schedule(napi);
1447 }
1448
1449 /* Driver may overshoot since multiple packets per descriptor */
1450 return min(work_done, budget);
1451 }
1452
1453 /* Call back when data is available in host ring buffer.
1454 * Processing is deferred until network softirq (NAPI)
1455 */
netvsc_channel_cb(void * context)1456 void netvsc_channel_cb(void *context)
1457 {
1458 struct netvsc_channel *nvchan = context;
1459 struct vmbus_channel *channel = nvchan->channel;
1460 struct hv_ring_buffer_info *rbi = &channel->inbound;
1461
1462 /* preload first vmpacket descriptor */
1463 prefetch(hv_get_ring_buffer(rbi) + rbi->priv_read_index);
1464
1465 if (napi_schedule_prep(&nvchan->napi)) {
1466 /* disable interrupts from host */
1467 hv_begin_read(rbi);
1468
1469 __napi_schedule_irqoff(&nvchan->napi);
1470 }
1471 }
1472
1473 /*
1474 * netvsc_device_add - Callback when the device belonging to this
1475 * driver is added
1476 */
netvsc_device_add(struct hv_device * device,const struct netvsc_device_info * device_info)1477 struct netvsc_device *netvsc_device_add(struct hv_device *device,
1478 const struct netvsc_device_info *device_info)
1479 {
1480 int i, ret = 0;
1481 struct netvsc_device *net_device;
1482 struct net_device *ndev = hv_get_drvdata(device);
1483 struct net_device_context *net_device_ctx = netdev_priv(ndev);
1484
1485 net_device = alloc_net_device();
1486 if (!net_device)
1487 return ERR_PTR(-ENOMEM);
1488
1489 for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
1490 net_device_ctx->tx_table[i] = 0;
1491
1492 /* Because the device uses NAPI, all the interrupt batching and
1493 * control is done via Net softirq, not the channel handling
1494 */
1495 set_channel_read_mode(device->channel, HV_CALL_ISR);
1496
1497 /* If we're reopening the device we may have multiple queues, fill the
1498 * chn_table with the default channel to use it before subchannels are
1499 * opened.
1500 * Initialize the channel state before we open;
1501 * we can be interrupted as soon as we open the channel.
1502 */
1503
1504 for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
1505 struct netvsc_channel *nvchan = &net_device->chan_table[i];
1506
1507 nvchan->channel = device->channel;
1508 nvchan->net_device = net_device;
1509 u64_stats_init(&nvchan->tx_stats.syncp);
1510 u64_stats_init(&nvchan->rx_stats.syncp);
1511
1512 ret = xdp_rxq_info_reg(&nvchan->xdp_rxq, ndev, i);
1513
1514 if (ret) {
1515 netdev_err(ndev, "xdp_rxq_info_reg fail: %d\n", ret);
1516 goto cleanup2;
1517 }
1518
1519 ret = xdp_rxq_info_reg_mem_model(&nvchan->xdp_rxq,
1520 MEM_TYPE_PAGE_SHARED, NULL);
1521
1522 if (ret) {
1523 netdev_err(ndev, "xdp reg_mem_model fail: %d\n", ret);
1524 goto cleanup2;
1525 }
1526 }
1527
1528 /* Enable NAPI handler before init callbacks */
1529 netif_napi_add(ndev, &net_device->chan_table[0].napi,
1530 netvsc_poll, NAPI_POLL_WEIGHT);
1531
1532 /* Open the channel */
1533 ret = vmbus_open(device->channel, netvsc_ring_bytes,
1534 netvsc_ring_bytes, NULL, 0,
1535 netvsc_channel_cb, net_device->chan_table);
1536
1537 if (ret != 0) {
1538 netdev_err(ndev, "unable to open channel: %d\n", ret);
1539 goto cleanup;
1540 }
1541
1542 /* Channel is opened */
1543 netdev_dbg(ndev, "hv_netvsc channel opened successfully\n");
1544
1545 napi_enable(&net_device->chan_table[0].napi);
1546
1547 /* Connect with the NetVsp */
1548 ret = netvsc_connect_vsp(device, net_device, device_info);
1549 if (ret != 0) {
1550 netdev_err(ndev,
1551 "unable to connect to NetVSP - %d\n", ret);
1552 goto close;
1553 }
1554
1555 /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
1556 * populated.
1557 */
1558 rcu_assign_pointer(net_device_ctx->nvdev, net_device);
1559
1560 return net_device;
1561
1562 close:
1563 RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
1564 napi_disable(&net_device->chan_table[0].napi);
1565
1566 /* Now, we can close the channel safely */
1567 vmbus_close(device->channel);
1568
1569 cleanup:
1570 netif_napi_del(&net_device->chan_table[0].napi);
1571
1572 cleanup2:
1573 free_netvsc_device(&net_device->rcu);
1574
1575 return ERR_PTR(ret);
1576 }
1577