• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2009, Microsoft Corporation.
4  *
5  * Authors:
6  *   Haiyang Zhang <haiyangz@microsoft.com>
7  *   Hank Janssen  <hjanssen@microsoft.com>
8  */
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
11 #include <linux/wait.h>
12 #include <linux/highmem.h>
13 #include <linux/slab.h>
14 #include <linux/io.h>
15 #include <linux/if_ether.h>
16 #include <linux/netdevice.h>
17 #include <linux/if_vlan.h>
18 #include <linux/nls.h>
19 #include <linux/vmalloc.h>
20 #include <linux/rtnetlink.h>
21 #include <linux/ucs2_string.h>
22 
23 #include "hyperv_net.h"
24 #include "netvsc_trace.h"
25 
26 static void rndis_set_multicast(struct work_struct *w);
27 
28 #define RNDIS_EXT_LEN HV_HYP_PAGE_SIZE
29 struct rndis_request {
30 	struct list_head list_ent;
31 	struct completion  wait_event;
32 
33 	struct rndis_message response_msg;
34 	/*
35 	 * The buffer for extended info after the RNDIS response message. It's
36 	 * referenced based on the data offset in the RNDIS message. Its size
37 	 * is enough for current needs, and should be sufficient for the near
38 	 * future.
39 	 */
40 	u8 response_ext[RNDIS_EXT_LEN];
41 
42 	/* Simplify allocation by having a netvsc packet inline */
43 	struct hv_netvsc_packet	pkt;
44 
45 	struct rndis_message request_msg;
46 	/*
47 	 * The buffer for the extended info after the RNDIS request message.
48 	 * It is referenced and sized in a similar way as response_ext.
49 	 */
50 	u8 request_ext[RNDIS_EXT_LEN];
51 };
52 
53 static const u8 netvsc_hash_key[NETVSC_HASH_KEYLEN] = {
54 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
55 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
56 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
57 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
58 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
59 };
60 
get_rndis_device(void)61 static struct rndis_device *get_rndis_device(void)
62 {
63 	struct rndis_device *device;
64 
65 	device = kzalloc(sizeof(struct rndis_device), GFP_KERNEL);
66 	if (!device)
67 		return NULL;
68 
69 	spin_lock_init(&device->request_lock);
70 
71 	INIT_LIST_HEAD(&device->req_list);
72 	INIT_WORK(&device->mcast_work, rndis_set_multicast);
73 
74 	device->state = RNDIS_DEV_UNINITIALIZED;
75 
76 	return device;
77 }
78 
get_rndis_request(struct rndis_device * dev,u32 msg_type,u32 msg_len)79 static struct rndis_request *get_rndis_request(struct rndis_device *dev,
80 					     u32 msg_type,
81 					     u32 msg_len)
82 {
83 	struct rndis_request *request;
84 	struct rndis_message *rndis_msg;
85 	struct rndis_set_request *set;
86 	unsigned long flags;
87 
88 	request = kzalloc(sizeof(struct rndis_request), GFP_KERNEL);
89 	if (!request)
90 		return NULL;
91 
92 	init_completion(&request->wait_event);
93 
94 	rndis_msg = &request->request_msg;
95 	rndis_msg->ndis_msg_type = msg_type;
96 	rndis_msg->msg_len = msg_len;
97 
98 	request->pkt.q_idx = 0;
99 
100 	/*
101 	 * Set the request id. This field is always after the rndis header for
102 	 * request/response packet types so we just used the SetRequest as a
103 	 * template
104 	 */
105 	set = &rndis_msg->msg.set_req;
106 	set->req_id = atomic_inc_return(&dev->new_req_id);
107 
108 	/* Add to the request list */
109 	spin_lock_irqsave(&dev->request_lock, flags);
110 	list_add_tail(&request->list_ent, &dev->req_list);
111 	spin_unlock_irqrestore(&dev->request_lock, flags);
112 
113 	return request;
114 }
115 
put_rndis_request(struct rndis_device * dev,struct rndis_request * req)116 static void put_rndis_request(struct rndis_device *dev,
117 			    struct rndis_request *req)
118 {
119 	unsigned long flags;
120 
121 	spin_lock_irqsave(&dev->request_lock, flags);
122 	list_del(&req->list_ent);
123 	spin_unlock_irqrestore(&dev->request_lock, flags);
124 
125 	kfree(req);
126 }
127 
dump_rndis_message(struct net_device * netdev,const struct rndis_message * rndis_msg)128 static void dump_rndis_message(struct net_device *netdev,
129 			       const struct rndis_message *rndis_msg)
130 {
131 	switch (rndis_msg->ndis_msg_type) {
132 	case RNDIS_MSG_PACKET:
133 		netdev_dbg(netdev, "RNDIS_MSG_PACKET (len %u, "
134 			   "data offset %u data len %u, # oob %u, "
135 			   "oob offset %u, oob len %u, pkt offset %u, "
136 			   "pkt len %u\n",
137 			   rndis_msg->msg_len,
138 			   rndis_msg->msg.pkt.data_offset,
139 			   rndis_msg->msg.pkt.data_len,
140 			   rndis_msg->msg.pkt.num_oob_data_elements,
141 			   rndis_msg->msg.pkt.oob_data_offset,
142 			   rndis_msg->msg.pkt.oob_data_len,
143 			   rndis_msg->msg.pkt.per_pkt_info_offset,
144 			   rndis_msg->msg.pkt.per_pkt_info_len);
145 		break;
146 
147 	case RNDIS_MSG_INIT_C:
148 		netdev_dbg(netdev, "RNDIS_MSG_INIT_C "
149 			"(len %u, id 0x%x, status 0x%x, major %d, minor %d, "
150 			"device flags %d, max xfer size 0x%x, max pkts %u, "
151 			"pkt aligned %u)\n",
152 			rndis_msg->msg_len,
153 			rndis_msg->msg.init_complete.req_id,
154 			rndis_msg->msg.init_complete.status,
155 			rndis_msg->msg.init_complete.major_ver,
156 			rndis_msg->msg.init_complete.minor_ver,
157 			rndis_msg->msg.init_complete.dev_flags,
158 			rndis_msg->msg.init_complete.max_xfer_size,
159 			rndis_msg->msg.init_complete.
160 			   max_pkt_per_msg,
161 			rndis_msg->msg.init_complete.
162 			   pkt_alignment_factor);
163 		break;
164 
165 	case RNDIS_MSG_QUERY_C:
166 		netdev_dbg(netdev, "RNDIS_MSG_QUERY_C "
167 			"(len %u, id 0x%x, status 0x%x, buf len %u, "
168 			"buf offset %u)\n",
169 			rndis_msg->msg_len,
170 			rndis_msg->msg.query_complete.req_id,
171 			rndis_msg->msg.query_complete.status,
172 			rndis_msg->msg.query_complete.
173 			   info_buflen,
174 			rndis_msg->msg.query_complete.
175 			   info_buf_offset);
176 		break;
177 
178 	case RNDIS_MSG_SET_C:
179 		netdev_dbg(netdev,
180 			"RNDIS_MSG_SET_C (len %u, id 0x%x, status 0x%x)\n",
181 			rndis_msg->msg_len,
182 			rndis_msg->msg.set_complete.req_id,
183 			rndis_msg->msg.set_complete.status);
184 		break;
185 
186 	case RNDIS_MSG_INDICATE:
187 		netdev_dbg(netdev, "RNDIS_MSG_INDICATE "
188 			"(len %u, status 0x%x, buf len %u, buf offset %u)\n",
189 			rndis_msg->msg_len,
190 			rndis_msg->msg.indicate_status.status,
191 			rndis_msg->msg.indicate_status.status_buflen,
192 			rndis_msg->msg.indicate_status.status_buf_offset);
193 		break;
194 
195 	default:
196 		netdev_dbg(netdev, "0x%x (len %u)\n",
197 			rndis_msg->ndis_msg_type,
198 			rndis_msg->msg_len);
199 		break;
200 	}
201 }
202 
rndis_filter_send_request(struct rndis_device * dev,struct rndis_request * req)203 static int rndis_filter_send_request(struct rndis_device *dev,
204 				  struct rndis_request *req)
205 {
206 	struct hv_netvsc_packet *packet;
207 	struct hv_page_buffer page_buf[2];
208 	struct hv_page_buffer *pb = page_buf;
209 	int ret;
210 
211 	/* Setup the packet to send it */
212 	packet = &req->pkt;
213 
214 	packet->total_data_buflen = req->request_msg.msg_len;
215 	packet->page_buf_cnt = 1;
216 
217 	pb[0].pfn = virt_to_phys(&req->request_msg) >>
218 					HV_HYP_PAGE_SHIFT;
219 	pb[0].len = req->request_msg.msg_len;
220 	pb[0].offset = offset_in_hvpage(&req->request_msg);
221 
222 	/* Add one page_buf when request_msg crossing page boundary */
223 	if (pb[0].offset + pb[0].len > HV_HYP_PAGE_SIZE) {
224 		packet->page_buf_cnt++;
225 		pb[0].len = HV_HYP_PAGE_SIZE -
226 			pb[0].offset;
227 		pb[1].pfn = virt_to_phys((void *)&req->request_msg
228 			+ pb[0].len) >> HV_HYP_PAGE_SHIFT;
229 		pb[1].offset = 0;
230 		pb[1].len = req->request_msg.msg_len -
231 			pb[0].len;
232 	}
233 
234 	trace_rndis_send(dev->ndev, 0, &req->request_msg);
235 
236 	rcu_read_lock_bh();
237 	ret = netvsc_send(dev->ndev, packet, NULL, pb, NULL, false);
238 	rcu_read_unlock_bh();
239 
240 	return ret;
241 }
242 
rndis_set_link_state(struct rndis_device * rdev,struct rndis_request * request)243 static void rndis_set_link_state(struct rndis_device *rdev,
244 				 struct rndis_request *request)
245 {
246 	u32 link_status;
247 	struct rndis_query_complete *query_complete;
248 
249 	query_complete = &request->response_msg.msg.query_complete;
250 
251 	if (query_complete->status == RNDIS_STATUS_SUCCESS &&
252 	    query_complete->info_buflen == sizeof(u32)) {
253 		memcpy(&link_status, (void *)((unsigned long)query_complete +
254 		       query_complete->info_buf_offset), sizeof(u32));
255 		rdev->link_state = link_status != 0;
256 	}
257 }
258 
rndis_filter_receive_response(struct net_device * ndev,struct netvsc_device * nvdev,const struct rndis_message * resp)259 static void rndis_filter_receive_response(struct net_device *ndev,
260 					  struct netvsc_device *nvdev,
261 					  const struct rndis_message *resp)
262 {
263 	struct rndis_device *dev = nvdev->extension;
264 	struct rndis_request *request = NULL;
265 	bool found = false;
266 	unsigned long flags;
267 
268 	/* This should never happen, it means control message
269 	 * response received after device removed.
270 	 */
271 	if (dev->state == RNDIS_DEV_UNINITIALIZED) {
272 		netdev_err(ndev,
273 			   "got rndis message uninitialized\n");
274 		return;
275 	}
276 
277 	/* Ensure the packet is big enough to read req_id. Req_id is the 1st
278 	 * field in any request/response message, so the payload should have at
279 	 * least sizeof(u32) bytes
280 	 */
281 	if (resp->msg_len - RNDIS_HEADER_SIZE < sizeof(u32)) {
282 		netdev_err(ndev, "rndis msg_len too small: %u\n",
283 			   resp->msg_len);
284 		return;
285 	}
286 
287 	spin_lock_irqsave(&dev->request_lock, flags);
288 	list_for_each_entry(request, &dev->req_list, list_ent) {
289 		/*
290 		 * All request/response message contains RequestId as the 1st
291 		 * field
292 		 */
293 		if (request->request_msg.msg.init_req.req_id
294 		    == resp->msg.init_complete.req_id) {
295 			found = true;
296 			break;
297 		}
298 	}
299 	spin_unlock_irqrestore(&dev->request_lock, flags);
300 
301 	if (found) {
302 		if (resp->msg_len <=
303 		    sizeof(struct rndis_message) + RNDIS_EXT_LEN) {
304 			memcpy(&request->response_msg, resp,
305 			       resp->msg_len);
306 			if (request->request_msg.ndis_msg_type ==
307 			    RNDIS_MSG_QUERY && request->request_msg.msg.
308 			    query_req.oid == RNDIS_OID_GEN_MEDIA_CONNECT_STATUS)
309 				rndis_set_link_state(dev, request);
310 		} else {
311 			netdev_err(ndev,
312 				"rndis response buffer overflow "
313 				"detected (size %u max %zu)\n",
314 				resp->msg_len,
315 				sizeof(struct rndis_message));
316 
317 			if (resp->ndis_msg_type ==
318 			    RNDIS_MSG_RESET_C) {
319 				/* does not have a request id field */
320 				request->response_msg.msg.reset_complete.
321 					status = RNDIS_STATUS_BUFFER_OVERFLOW;
322 			} else {
323 				request->response_msg.msg.
324 				init_complete.status =
325 					RNDIS_STATUS_BUFFER_OVERFLOW;
326 			}
327 		}
328 
329 		complete(&request->wait_event);
330 	} else {
331 		netdev_err(ndev,
332 			"no rndis request found for this response "
333 			"(id 0x%x res type 0x%x)\n",
334 			resp->msg.init_complete.req_id,
335 			resp->ndis_msg_type);
336 	}
337 }
338 
339 /*
340  * Get the Per-Packet-Info with the specified type
341  * return NULL if not found.
342  */
rndis_get_ppi(struct net_device * ndev,struct rndis_packet * rpkt,u32 rpkt_len,u32 type,u8 internal)343 static inline void *rndis_get_ppi(struct net_device *ndev,
344 				  struct rndis_packet *rpkt,
345 				  u32 rpkt_len, u32 type, u8 internal)
346 {
347 	struct rndis_per_packet_info *ppi;
348 	int len;
349 
350 	if (rpkt->per_pkt_info_offset == 0)
351 		return NULL;
352 
353 	/* Validate info_offset and info_len */
354 	if (rpkt->per_pkt_info_offset < sizeof(struct rndis_packet) ||
355 	    rpkt->per_pkt_info_offset > rpkt_len) {
356 		netdev_err(ndev, "Invalid per_pkt_info_offset: %u\n",
357 			   rpkt->per_pkt_info_offset);
358 		return NULL;
359 	}
360 
361 	if (rpkt->per_pkt_info_len > rpkt_len - rpkt->per_pkt_info_offset) {
362 		netdev_err(ndev, "Invalid per_pkt_info_len: %u\n",
363 			   rpkt->per_pkt_info_len);
364 		return NULL;
365 	}
366 
367 	ppi = (struct rndis_per_packet_info *)((ulong)rpkt +
368 		rpkt->per_pkt_info_offset);
369 	len = rpkt->per_pkt_info_len;
370 
371 	while (len > 0) {
372 		/* Validate ppi_offset and ppi_size */
373 		if (ppi->size > len) {
374 			netdev_err(ndev, "Invalid ppi size: %u\n", ppi->size);
375 			continue;
376 		}
377 
378 		if (ppi->ppi_offset >= ppi->size) {
379 			netdev_err(ndev, "Invalid ppi_offset: %u\n", ppi->ppi_offset);
380 			continue;
381 		}
382 
383 		if (ppi->type == type && ppi->internal == internal)
384 			return (void *)((ulong)ppi + ppi->ppi_offset);
385 		len -= ppi->size;
386 		ppi = (struct rndis_per_packet_info *)((ulong)ppi + ppi->size);
387 	}
388 
389 	return NULL;
390 }
391 
392 static inline
rsc_add_data(struct netvsc_channel * nvchan,const struct ndis_pkt_8021q_info * vlan,const struct ndis_tcp_ip_checksum_info * csum_info,const u32 * hash_info,void * data,u32 len)393 void rsc_add_data(struct netvsc_channel *nvchan,
394 		  const struct ndis_pkt_8021q_info *vlan,
395 		  const struct ndis_tcp_ip_checksum_info *csum_info,
396 		  const u32 *hash_info,
397 		  void *data, u32 len)
398 {
399 	u32 cnt = nvchan->rsc.cnt;
400 
401 	if (cnt) {
402 		nvchan->rsc.pktlen += len;
403 	} else {
404 		nvchan->rsc.vlan = vlan;
405 		nvchan->rsc.csum_info = csum_info;
406 		nvchan->rsc.pktlen = len;
407 		nvchan->rsc.hash_info = hash_info;
408 	}
409 
410 	nvchan->rsc.data[cnt] = data;
411 	nvchan->rsc.len[cnt] = len;
412 	nvchan->rsc.cnt++;
413 }
414 
rndis_filter_receive_data(struct net_device * ndev,struct netvsc_device * nvdev,struct netvsc_channel * nvchan,struct rndis_message * msg,u32 data_buflen)415 static int rndis_filter_receive_data(struct net_device *ndev,
416 				     struct netvsc_device *nvdev,
417 				     struct netvsc_channel *nvchan,
418 				     struct rndis_message *msg,
419 				     u32 data_buflen)
420 {
421 	struct rndis_packet *rndis_pkt = &msg->msg.pkt;
422 	const struct ndis_tcp_ip_checksum_info *csum_info;
423 	const struct ndis_pkt_8021q_info *vlan;
424 	const struct rndis_pktinfo_id *pktinfo_id;
425 	const u32 *hash_info;
426 	u32 data_offset, rpkt_len;
427 	void *data;
428 	bool rsc_more = false;
429 	int ret;
430 
431 	/* Ensure data_buflen is big enough to read header fields */
432 	if (data_buflen < RNDIS_HEADER_SIZE + sizeof(struct rndis_packet)) {
433 		netdev_err(ndev, "invalid rndis pkt, data_buflen too small: %u\n",
434 			   data_buflen);
435 		return NVSP_STAT_FAIL;
436 	}
437 
438 	/* Validate rndis_pkt offset */
439 	if (rndis_pkt->data_offset >= data_buflen - RNDIS_HEADER_SIZE) {
440 		netdev_err(ndev, "invalid rndis packet offset: %u\n",
441 			   rndis_pkt->data_offset);
442 		return NVSP_STAT_FAIL;
443 	}
444 
445 	/* Remove the rndis header and pass it back up the stack */
446 	data_offset = RNDIS_HEADER_SIZE + rndis_pkt->data_offset;
447 
448 	rpkt_len = data_buflen - RNDIS_HEADER_SIZE;
449 	data_buflen -= data_offset;
450 
451 	/*
452 	 * Make sure we got a valid RNDIS message, now total_data_buflen
453 	 * should be the data packet size plus the trailer padding size
454 	 */
455 	if (unlikely(data_buflen < rndis_pkt->data_len)) {
456 		netdev_err(ndev, "rndis message buffer "
457 			   "overflow detected (got %u, min %u)"
458 			   "...dropping this message!\n",
459 			   data_buflen, rndis_pkt->data_len);
460 		return NVSP_STAT_FAIL;
461 	}
462 
463 	vlan = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, IEEE_8021Q_INFO, 0);
464 
465 	csum_info = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, TCPIP_CHKSUM_PKTINFO, 0);
466 
467 	hash_info = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, NBL_HASH_VALUE, 0);
468 
469 	pktinfo_id = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, RNDIS_PKTINFO_ID, 1);
470 
471 	data = (void *)msg + data_offset;
472 
473 	/* Identify RSC frags, drop erroneous packets */
474 	if (pktinfo_id && (pktinfo_id->flag & RNDIS_PKTINFO_SUBALLOC)) {
475 		if (pktinfo_id->flag & RNDIS_PKTINFO_1ST_FRAG)
476 			nvchan->rsc.cnt = 0;
477 		else if (nvchan->rsc.cnt == 0)
478 			goto drop;
479 
480 		rsc_more = true;
481 
482 		if (pktinfo_id->flag & RNDIS_PKTINFO_LAST_FRAG)
483 			rsc_more = false;
484 
485 		if (rsc_more && nvchan->rsc.is_last)
486 			goto drop;
487 	} else {
488 		nvchan->rsc.cnt = 0;
489 	}
490 
491 	if (unlikely(nvchan->rsc.cnt >= NVSP_RSC_MAX))
492 		goto drop;
493 
494 	/* Put data into per channel structure.
495 	 * Also, remove the rndis trailer padding from rndis packet message
496 	 * rndis_pkt->data_len tell us the real data length, we only copy
497 	 * the data packet to the stack, without the rndis trailer padding
498 	 */
499 	rsc_add_data(nvchan, vlan, csum_info, hash_info,
500 		     data, rndis_pkt->data_len);
501 
502 	if (rsc_more)
503 		return NVSP_STAT_SUCCESS;
504 
505 	ret = netvsc_recv_callback(ndev, nvdev, nvchan);
506 	nvchan->rsc.cnt = 0;
507 
508 	return ret;
509 
510 drop:
511 	return NVSP_STAT_FAIL;
512 }
513 
rndis_filter_receive(struct net_device * ndev,struct netvsc_device * net_dev,struct netvsc_channel * nvchan,void * data,u32 buflen)514 int rndis_filter_receive(struct net_device *ndev,
515 			 struct netvsc_device *net_dev,
516 			 struct netvsc_channel *nvchan,
517 			 void *data, u32 buflen)
518 {
519 	struct net_device_context *net_device_ctx = netdev_priv(ndev);
520 	struct rndis_message *rndis_msg = data;
521 
522 	if (netif_msg_rx_status(net_device_ctx))
523 		dump_rndis_message(ndev, rndis_msg);
524 
525 	/* Validate incoming rndis_message packet */
526 	if (buflen < RNDIS_HEADER_SIZE || rndis_msg->msg_len < RNDIS_HEADER_SIZE ||
527 	    buflen < rndis_msg->msg_len) {
528 		netdev_err(ndev, "Invalid rndis_msg (buflen: %u, msg_len: %u)\n",
529 			   buflen, rndis_msg->msg_len);
530 		return NVSP_STAT_FAIL;
531 	}
532 
533 	switch (rndis_msg->ndis_msg_type) {
534 	case RNDIS_MSG_PACKET:
535 		return rndis_filter_receive_data(ndev, net_dev, nvchan,
536 						 rndis_msg, buflen);
537 	case RNDIS_MSG_INIT_C:
538 	case RNDIS_MSG_QUERY_C:
539 	case RNDIS_MSG_SET_C:
540 		/* completion msgs */
541 		rndis_filter_receive_response(ndev, net_dev, rndis_msg);
542 		break;
543 
544 	case RNDIS_MSG_INDICATE:
545 		/* notification msgs */
546 		netvsc_linkstatus_callback(ndev, rndis_msg);
547 		break;
548 	default:
549 		netdev_err(ndev,
550 			"unhandled rndis message (type %u len %u)\n",
551 			   rndis_msg->ndis_msg_type,
552 			   rndis_msg->msg_len);
553 		return NVSP_STAT_FAIL;
554 	}
555 
556 	return NVSP_STAT_SUCCESS;
557 }
558 
rndis_filter_query_device(struct rndis_device * dev,struct netvsc_device * nvdev,u32 oid,void * result,u32 * result_size)559 static int rndis_filter_query_device(struct rndis_device *dev,
560 				     struct netvsc_device *nvdev,
561 				     u32 oid, void *result, u32 *result_size)
562 {
563 	struct rndis_request *request;
564 	u32 inresult_size = *result_size;
565 	struct rndis_query_request *query;
566 	struct rndis_query_complete *query_complete;
567 	int ret = 0;
568 
569 	if (!result)
570 		return -EINVAL;
571 
572 	*result_size = 0;
573 	request = get_rndis_request(dev, RNDIS_MSG_QUERY,
574 			RNDIS_MESSAGE_SIZE(struct rndis_query_request));
575 	if (!request) {
576 		ret = -ENOMEM;
577 		goto cleanup;
578 	}
579 
580 	/* Setup the rndis query */
581 	query = &request->request_msg.msg.query_req;
582 	query->oid = oid;
583 	query->info_buf_offset = sizeof(struct rndis_query_request);
584 	query->info_buflen = 0;
585 	query->dev_vc_handle = 0;
586 
587 	if (oid == OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES) {
588 		struct ndis_offload *hwcaps;
589 		u32 nvsp_version = nvdev->nvsp_version;
590 		u8 ndis_rev;
591 		size_t size;
592 
593 		if (nvsp_version >= NVSP_PROTOCOL_VERSION_5) {
594 			ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_3;
595 			size = NDIS_OFFLOAD_SIZE;
596 		} else if (nvsp_version >= NVSP_PROTOCOL_VERSION_4) {
597 			ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_2;
598 			size = NDIS_OFFLOAD_SIZE_6_1;
599 		} else {
600 			ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_1;
601 			size = NDIS_OFFLOAD_SIZE_6_0;
602 		}
603 
604 		request->request_msg.msg_len += size;
605 		query->info_buflen = size;
606 		hwcaps = (struct ndis_offload *)
607 			((unsigned long)query + query->info_buf_offset);
608 
609 		hwcaps->header.type = NDIS_OBJECT_TYPE_OFFLOAD;
610 		hwcaps->header.revision = ndis_rev;
611 		hwcaps->header.size = size;
612 
613 	} else if (oid == OID_GEN_RECEIVE_SCALE_CAPABILITIES) {
614 		struct ndis_recv_scale_cap *cap;
615 
616 		request->request_msg.msg_len +=
617 			sizeof(struct ndis_recv_scale_cap);
618 		query->info_buflen = sizeof(struct ndis_recv_scale_cap);
619 		cap = (struct ndis_recv_scale_cap *)((unsigned long)query +
620 						     query->info_buf_offset);
621 		cap->hdr.type = NDIS_OBJECT_TYPE_RSS_CAPABILITIES;
622 		cap->hdr.rev = NDIS_RECEIVE_SCALE_CAPABILITIES_REVISION_2;
623 		cap->hdr.size = sizeof(struct ndis_recv_scale_cap);
624 	}
625 
626 	ret = rndis_filter_send_request(dev, request);
627 	if (ret != 0)
628 		goto cleanup;
629 
630 	wait_for_completion(&request->wait_event);
631 
632 	/* Copy the response back */
633 	query_complete = &request->response_msg.msg.query_complete;
634 
635 	if (query_complete->info_buflen > inresult_size) {
636 		ret = -1;
637 		goto cleanup;
638 	}
639 
640 	memcpy(result,
641 	       (void *)((unsigned long)query_complete +
642 			 query_complete->info_buf_offset),
643 	       query_complete->info_buflen);
644 
645 	*result_size = query_complete->info_buflen;
646 
647 cleanup:
648 	if (request)
649 		put_rndis_request(dev, request);
650 
651 	return ret;
652 }
653 
654 /* Get the hardware offload capabilities */
655 static int
rndis_query_hwcaps(struct rndis_device * dev,struct netvsc_device * net_device,struct ndis_offload * caps)656 rndis_query_hwcaps(struct rndis_device *dev, struct netvsc_device *net_device,
657 		   struct ndis_offload *caps)
658 {
659 	u32 caps_len = sizeof(*caps);
660 	int ret;
661 
662 	memset(caps, 0, sizeof(*caps));
663 
664 	ret = rndis_filter_query_device(dev, net_device,
665 					OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES,
666 					caps, &caps_len);
667 	if (ret)
668 		return ret;
669 
670 	if (caps->header.type != NDIS_OBJECT_TYPE_OFFLOAD) {
671 		netdev_warn(dev->ndev, "invalid NDIS objtype %#x\n",
672 			    caps->header.type);
673 		return -EINVAL;
674 	}
675 
676 	if (caps->header.revision < NDIS_OFFLOAD_PARAMETERS_REVISION_1) {
677 		netdev_warn(dev->ndev, "invalid NDIS objrev %x\n",
678 			    caps->header.revision);
679 		return -EINVAL;
680 	}
681 
682 	if (caps->header.size > caps_len ||
683 	    caps->header.size < NDIS_OFFLOAD_SIZE_6_0) {
684 		netdev_warn(dev->ndev,
685 			    "invalid NDIS objsize %u, data size %u\n",
686 			    caps->header.size, caps_len);
687 		return -EINVAL;
688 	}
689 
690 	return 0;
691 }
692 
rndis_filter_query_device_mac(struct rndis_device * dev,struct netvsc_device * net_device)693 static int rndis_filter_query_device_mac(struct rndis_device *dev,
694 					 struct netvsc_device *net_device)
695 {
696 	u32 size = ETH_ALEN;
697 
698 	return rndis_filter_query_device(dev, net_device,
699 				      RNDIS_OID_802_3_PERMANENT_ADDRESS,
700 				      dev->hw_mac_adr, &size);
701 }
702 
703 #define NWADR_STR "NetworkAddress"
704 #define NWADR_STRLEN 14
705 
rndis_filter_set_device_mac(struct netvsc_device * nvdev,const char * mac)706 int rndis_filter_set_device_mac(struct netvsc_device *nvdev,
707 				const char *mac)
708 {
709 	struct rndis_device *rdev = nvdev->extension;
710 	struct rndis_request *request;
711 	struct rndis_set_request *set;
712 	struct rndis_config_parameter_info *cpi;
713 	wchar_t *cfg_nwadr, *cfg_mac;
714 	struct rndis_set_complete *set_complete;
715 	char macstr[2*ETH_ALEN+1];
716 	u32 extlen = sizeof(struct rndis_config_parameter_info) +
717 		2*NWADR_STRLEN + 4*ETH_ALEN;
718 	int ret;
719 
720 	request = get_rndis_request(rdev, RNDIS_MSG_SET,
721 		RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
722 	if (!request)
723 		return -ENOMEM;
724 
725 	set = &request->request_msg.msg.set_req;
726 	set->oid = RNDIS_OID_GEN_RNDIS_CONFIG_PARAMETER;
727 	set->info_buflen = extlen;
728 	set->info_buf_offset = sizeof(struct rndis_set_request);
729 	set->dev_vc_handle = 0;
730 
731 	cpi = (struct rndis_config_parameter_info *)((ulong)set +
732 		set->info_buf_offset);
733 	cpi->parameter_name_offset =
734 		sizeof(struct rndis_config_parameter_info);
735 	/* Multiply by 2 because host needs 2 bytes (utf16) for each char */
736 	cpi->parameter_name_length = 2*NWADR_STRLEN;
737 	cpi->parameter_type = RNDIS_CONFIG_PARAM_TYPE_STRING;
738 	cpi->parameter_value_offset =
739 		cpi->parameter_name_offset + cpi->parameter_name_length;
740 	/* Multiply by 4 because each MAC byte displayed as 2 utf16 chars */
741 	cpi->parameter_value_length = 4*ETH_ALEN;
742 
743 	cfg_nwadr = (wchar_t *)((ulong)cpi + cpi->parameter_name_offset);
744 	cfg_mac = (wchar_t *)((ulong)cpi + cpi->parameter_value_offset);
745 	ret = utf8s_to_utf16s(NWADR_STR, NWADR_STRLEN, UTF16_HOST_ENDIAN,
746 			      cfg_nwadr, NWADR_STRLEN);
747 	if (ret < 0)
748 		goto cleanup;
749 	snprintf(macstr, 2*ETH_ALEN+1, "%pm", mac);
750 	ret = utf8s_to_utf16s(macstr, 2*ETH_ALEN, UTF16_HOST_ENDIAN,
751 			      cfg_mac, 2*ETH_ALEN);
752 	if (ret < 0)
753 		goto cleanup;
754 
755 	ret = rndis_filter_send_request(rdev, request);
756 	if (ret != 0)
757 		goto cleanup;
758 
759 	wait_for_completion(&request->wait_event);
760 
761 	set_complete = &request->response_msg.msg.set_complete;
762 	if (set_complete->status != RNDIS_STATUS_SUCCESS)
763 		ret = -EIO;
764 
765 cleanup:
766 	put_rndis_request(rdev, request);
767 	return ret;
768 }
769 
770 int
rndis_filter_set_offload_params(struct net_device * ndev,struct netvsc_device * nvdev,struct ndis_offload_params * req_offloads)771 rndis_filter_set_offload_params(struct net_device *ndev,
772 				struct netvsc_device *nvdev,
773 				struct ndis_offload_params *req_offloads)
774 {
775 	struct rndis_device *rdev = nvdev->extension;
776 	struct rndis_request *request;
777 	struct rndis_set_request *set;
778 	struct ndis_offload_params *offload_params;
779 	struct rndis_set_complete *set_complete;
780 	u32 extlen = sizeof(struct ndis_offload_params);
781 	int ret;
782 	u32 vsp_version = nvdev->nvsp_version;
783 
784 	if (vsp_version <= NVSP_PROTOCOL_VERSION_4) {
785 		extlen = VERSION_4_OFFLOAD_SIZE;
786 		/* On NVSP_PROTOCOL_VERSION_4 and below, we do not support
787 		 * UDP checksum offload.
788 		 */
789 		req_offloads->udp_ip_v4_csum = 0;
790 		req_offloads->udp_ip_v6_csum = 0;
791 	}
792 
793 	request = get_rndis_request(rdev, RNDIS_MSG_SET,
794 		RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
795 	if (!request)
796 		return -ENOMEM;
797 
798 	set = &request->request_msg.msg.set_req;
799 	set->oid = OID_TCP_OFFLOAD_PARAMETERS;
800 	set->info_buflen = extlen;
801 	set->info_buf_offset = sizeof(struct rndis_set_request);
802 	set->dev_vc_handle = 0;
803 
804 	offload_params = (struct ndis_offload_params *)((ulong)set +
805 				set->info_buf_offset);
806 	*offload_params = *req_offloads;
807 	offload_params->header.type = NDIS_OBJECT_TYPE_DEFAULT;
808 	offload_params->header.revision = NDIS_OFFLOAD_PARAMETERS_REVISION_3;
809 	offload_params->header.size = extlen;
810 
811 	ret = rndis_filter_send_request(rdev, request);
812 	if (ret != 0)
813 		goto cleanup;
814 
815 	wait_for_completion(&request->wait_event);
816 	set_complete = &request->response_msg.msg.set_complete;
817 	if (set_complete->status != RNDIS_STATUS_SUCCESS) {
818 		netdev_err(ndev, "Fail to set offload on host side:0x%x\n",
819 			   set_complete->status);
820 		ret = -EINVAL;
821 	}
822 
823 cleanup:
824 	put_rndis_request(rdev, request);
825 	return ret;
826 }
827 
rndis_set_rss_param_msg(struct rndis_device * rdev,const u8 * rss_key,u16 flag)828 static int rndis_set_rss_param_msg(struct rndis_device *rdev,
829 				   const u8 *rss_key, u16 flag)
830 {
831 	struct net_device *ndev = rdev->ndev;
832 	struct net_device_context *ndc = netdev_priv(ndev);
833 	struct rndis_request *request;
834 	struct rndis_set_request *set;
835 	struct rndis_set_complete *set_complete;
836 	u32 extlen = sizeof(struct ndis_recv_scale_param) +
837 		     4 * ITAB_NUM + NETVSC_HASH_KEYLEN;
838 	struct ndis_recv_scale_param *rssp;
839 	u32 *itab;
840 	u8 *keyp;
841 	int i, ret;
842 
843 	request = get_rndis_request(
844 			rdev, RNDIS_MSG_SET,
845 			RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
846 	if (!request)
847 		return -ENOMEM;
848 
849 	set = &request->request_msg.msg.set_req;
850 	set->oid = OID_GEN_RECEIVE_SCALE_PARAMETERS;
851 	set->info_buflen = extlen;
852 	set->info_buf_offset = sizeof(struct rndis_set_request);
853 	set->dev_vc_handle = 0;
854 
855 	rssp = (struct ndis_recv_scale_param *)(set + 1);
856 	rssp->hdr.type = NDIS_OBJECT_TYPE_RSS_PARAMETERS;
857 	rssp->hdr.rev = NDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2;
858 	rssp->hdr.size = sizeof(struct ndis_recv_scale_param);
859 	rssp->flag = flag;
860 	rssp->hashinfo = NDIS_HASH_FUNC_TOEPLITZ | NDIS_HASH_IPV4 |
861 			 NDIS_HASH_TCP_IPV4 | NDIS_HASH_IPV6 |
862 			 NDIS_HASH_TCP_IPV6;
863 	rssp->indirect_tabsize = 4*ITAB_NUM;
864 	rssp->indirect_taboffset = sizeof(struct ndis_recv_scale_param);
865 	rssp->hashkey_size = NETVSC_HASH_KEYLEN;
866 	rssp->hashkey_offset = rssp->indirect_taboffset +
867 			       rssp->indirect_tabsize;
868 
869 	/* Set indirection table entries */
870 	itab = (u32 *)(rssp + 1);
871 	for (i = 0; i < ITAB_NUM; i++)
872 		itab[i] = ndc->rx_table[i];
873 
874 	/* Set hask key values */
875 	keyp = (u8 *)((unsigned long)rssp + rssp->hashkey_offset);
876 	memcpy(keyp, rss_key, NETVSC_HASH_KEYLEN);
877 
878 	ret = rndis_filter_send_request(rdev, request);
879 	if (ret != 0)
880 		goto cleanup;
881 
882 	wait_for_completion(&request->wait_event);
883 	set_complete = &request->response_msg.msg.set_complete;
884 	if (set_complete->status == RNDIS_STATUS_SUCCESS) {
885 		if (!(flag & NDIS_RSS_PARAM_FLAG_DISABLE_RSS) &&
886 		    !(flag & NDIS_RSS_PARAM_FLAG_HASH_KEY_UNCHANGED))
887 			memcpy(rdev->rss_key, rss_key, NETVSC_HASH_KEYLEN);
888 
889 	} else {
890 		netdev_err(ndev, "Fail to set RSS parameters:0x%x\n",
891 			   set_complete->status);
892 		ret = -EINVAL;
893 	}
894 
895 cleanup:
896 	put_rndis_request(rdev, request);
897 	return ret;
898 }
899 
rndis_filter_set_rss_param(struct rndis_device * rdev,const u8 * rss_key)900 int rndis_filter_set_rss_param(struct rndis_device *rdev,
901 			       const u8 *rss_key)
902 {
903 	/* Disable RSS before change */
904 	rndis_set_rss_param_msg(rdev, rss_key,
905 				NDIS_RSS_PARAM_FLAG_DISABLE_RSS);
906 
907 	return rndis_set_rss_param_msg(rdev, rss_key, 0);
908 }
909 
rndis_filter_query_device_link_status(struct rndis_device * dev,struct netvsc_device * net_device)910 static int rndis_filter_query_device_link_status(struct rndis_device *dev,
911 						 struct netvsc_device *net_device)
912 {
913 	u32 size = sizeof(u32);
914 	u32 link_status;
915 
916 	return rndis_filter_query_device(dev, net_device,
917 					 RNDIS_OID_GEN_MEDIA_CONNECT_STATUS,
918 					 &link_status, &size);
919 }
920 
rndis_filter_query_link_speed(struct rndis_device * dev,struct netvsc_device * net_device)921 static int rndis_filter_query_link_speed(struct rndis_device *dev,
922 					 struct netvsc_device *net_device)
923 {
924 	u32 size = sizeof(u32);
925 	u32 link_speed;
926 	struct net_device_context *ndc;
927 	int ret;
928 
929 	ret = rndis_filter_query_device(dev, net_device,
930 					RNDIS_OID_GEN_LINK_SPEED,
931 					&link_speed, &size);
932 
933 	if (!ret) {
934 		ndc = netdev_priv(dev->ndev);
935 
936 		/* The link speed reported from host is in 100bps unit, so
937 		 * we convert it to Mbps here.
938 		 */
939 		ndc->speed = link_speed / 10000;
940 	}
941 
942 	return ret;
943 }
944 
rndis_filter_set_packet_filter(struct rndis_device * dev,u32 new_filter)945 static int rndis_filter_set_packet_filter(struct rndis_device *dev,
946 					  u32 new_filter)
947 {
948 	struct rndis_request *request;
949 	struct rndis_set_request *set;
950 	int ret;
951 
952 	if (dev->filter == new_filter)
953 		return 0;
954 
955 	request = get_rndis_request(dev, RNDIS_MSG_SET,
956 			RNDIS_MESSAGE_SIZE(struct rndis_set_request) +
957 			sizeof(u32));
958 	if (!request)
959 		return -ENOMEM;
960 
961 	/* Setup the rndis set */
962 	set = &request->request_msg.msg.set_req;
963 	set->oid = RNDIS_OID_GEN_CURRENT_PACKET_FILTER;
964 	set->info_buflen = sizeof(u32);
965 	set->info_buf_offset = sizeof(struct rndis_set_request);
966 
967 	memcpy((void *)(unsigned long)set + sizeof(struct rndis_set_request),
968 	       &new_filter, sizeof(u32));
969 
970 	ret = rndis_filter_send_request(dev, request);
971 	if (ret == 0) {
972 		wait_for_completion(&request->wait_event);
973 		dev->filter = new_filter;
974 	}
975 
976 	put_rndis_request(dev, request);
977 
978 	return ret;
979 }
980 
rndis_set_multicast(struct work_struct * w)981 static void rndis_set_multicast(struct work_struct *w)
982 {
983 	struct rndis_device *rdev
984 		= container_of(w, struct rndis_device, mcast_work);
985 	u32 filter = NDIS_PACKET_TYPE_DIRECTED;
986 	unsigned int flags = rdev->ndev->flags;
987 
988 	if (flags & IFF_PROMISC) {
989 		filter = NDIS_PACKET_TYPE_PROMISCUOUS;
990 	} else {
991 		if (!netdev_mc_empty(rdev->ndev) || (flags & IFF_ALLMULTI))
992 			filter |= NDIS_PACKET_TYPE_ALL_MULTICAST;
993 		if (flags & IFF_BROADCAST)
994 			filter |= NDIS_PACKET_TYPE_BROADCAST;
995 	}
996 
997 	rndis_filter_set_packet_filter(rdev, filter);
998 }
999 
rndis_filter_update(struct netvsc_device * nvdev)1000 void rndis_filter_update(struct netvsc_device *nvdev)
1001 {
1002 	struct rndis_device *rdev = nvdev->extension;
1003 
1004 	schedule_work(&rdev->mcast_work);
1005 }
1006 
rndis_filter_init_device(struct rndis_device * dev,struct netvsc_device * nvdev)1007 static int rndis_filter_init_device(struct rndis_device *dev,
1008 				    struct netvsc_device *nvdev)
1009 {
1010 	struct rndis_request *request;
1011 	struct rndis_initialize_request *init;
1012 	struct rndis_initialize_complete *init_complete;
1013 	u32 status;
1014 	int ret;
1015 
1016 	request = get_rndis_request(dev, RNDIS_MSG_INIT,
1017 			RNDIS_MESSAGE_SIZE(struct rndis_initialize_request));
1018 	if (!request) {
1019 		ret = -ENOMEM;
1020 		goto cleanup;
1021 	}
1022 
1023 	/* Setup the rndis set */
1024 	init = &request->request_msg.msg.init_req;
1025 	init->major_ver = RNDIS_MAJOR_VERSION;
1026 	init->minor_ver = RNDIS_MINOR_VERSION;
1027 	init->max_xfer_size = 0x4000;
1028 
1029 	dev->state = RNDIS_DEV_INITIALIZING;
1030 
1031 	ret = rndis_filter_send_request(dev, request);
1032 	if (ret != 0) {
1033 		dev->state = RNDIS_DEV_UNINITIALIZED;
1034 		goto cleanup;
1035 	}
1036 
1037 	wait_for_completion(&request->wait_event);
1038 
1039 	init_complete = &request->response_msg.msg.init_complete;
1040 	status = init_complete->status;
1041 	if (status == RNDIS_STATUS_SUCCESS) {
1042 		dev->state = RNDIS_DEV_INITIALIZED;
1043 		nvdev->max_pkt = init_complete->max_pkt_per_msg;
1044 		nvdev->pkt_align = 1 << init_complete->pkt_alignment_factor;
1045 		ret = 0;
1046 	} else {
1047 		dev->state = RNDIS_DEV_UNINITIALIZED;
1048 		ret = -EINVAL;
1049 	}
1050 
1051 cleanup:
1052 	if (request)
1053 		put_rndis_request(dev, request);
1054 
1055 	return ret;
1056 }
1057 
netvsc_device_idle(const struct netvsc_device * nvdev)1058 static bool netvsc_device_idle(const struct netvsc_device *nvdev)
1059 {
1060 	int i;
1061 
1062 	for (i = 0; i < nvdev->num_chn; i++) {
1063 		const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
1064 
1065 		if (nvchan->mrc.first != nvchan->mrc.next)
1066 			return false;
1067 
1068 		if (atomic_read(&nvchan->queue_sends) > 0)
1069 			return false;
1070 	}
1071 
1072 	return true;
1073 }
1074 
rndis_filter_halt_device(struct netvsc_device * nvdev,struct rndis_device * dev)1075 static void rndis_filter_halt_device(struct netvsc_device *nvdev,
1076 				     struct rndis_device *dev)
1077 {
1078 	struct rndis_request *request;
1079 	struct rndis_halt_request *halt;
1080 
1081 	/* Attempt to do a rndis device halt */
1082 	request = get_rndis_request(dev, RNDIS_MSG_HALT,
1083 				RNDIS_MESSAGE_SIZE(struct rndis_halt_request));
1084 	if (!request)
1085 		goto cleanup;
1086 
1087 	/* Setup the rndis set */
1088 	halt = &request->request_msg.msg.halt_req;
1089 	halt->req_id = atomic_inc_return(&dev->new_req_id);
1090 
1091 	/* Ignore return since this msg is optional. */
1092 	rndis_filter_send_request(dev, request);
1093 
1094 	dev->state = RNDIS_DEV_UNINITIALIZED;
1095 
1096 cleanup:
1097 	nvdev->destroy = true;
1098 
1099 	/* Force flag to be ordered before waiting */
1100 	wmb();
1101 
1102 	/* Wait for all send completions */
1103 	wait_event(nvdev->wait_drain, netvsc_device_idle(nvdev));
1104 
1105 	if (request)
1106 		put_rndis_request(dev, request);
1107 }
1108 
rndis_filter_open_device(struct rndis_device * dev)1109 static int rndis_filter_open_device(struct rndis_device *dev)
1110 {
1111 	int ret;
1112 
1113 	if (dev->state != RNDIS_DEV_INITIALIZED)
1114 		return 0;
1115 
1116 	ret = rndis_filter_set_packet_filter(dev,
1117 					 NDIS_PACKET_TYPE_BROADCAST |
1118 					 NDIS_PACKET_TYPE_ALL_MULTICAST |
1119 					 NDIS_PACKET_TYPE_DIRECTED);
1120 	if (ret == 0)
1121 		dev->state = RNDIS_DEV_DATAINITIALIZED;
1122 
1123 	return ret;
1124 }
1125 
rndis_filter_close_device(struct rndis_device * dev)1126 static int rndis_filter_close_device(struct rndis_device *dev)
1127 {
1128 	int ret;
1129 
1130 	if (dev->state != RNDIS_DEV_DATAINITIALIZED)
1131 		return 0;
1132 
1133 	/* Make sure rndis_set_multicast doesn't re-enable filter! */
1134 	cancel_work_sync(&dev->mcast_work);
1135 
1136 	ret = rndis_filter_set_packet_filter(dev, 0);
1137 	if (ret == -ENODEV)
1138 		ret = 0;
1139 
1140 	if (ret == 0)
1141 		dev->state = RNDIS_DEV_INITIALIZED;
1142 
1143 	return ret;
1144 }
1145 
netvsc_sc_open(struct vmbus_channel * new_sc)1146 static void netvsc_sc_open(struct vmbus_channel *new_sc)
1147 {
1148 	struct net_device *ndev =
1149 		hv_get_drvdata(new_sc->primary_channel->device_obj);
1150 	struct net_device_context *ndev_ctx = netdev_priv(ndev);
1151 	struct netvsc_device *nvscdev;
1152 	u16 chn_index = new_sc->offermsg.offer.sub_channel_index;
1153 	struct netvsc_channel *nvchan;
1154 	int ret;
1155 
1156 	/* This is safe because this callback only happens when
1157 	 * new device is being setup and waiting on the channel_init_wait.
1158 	 */
1159 	nvscdev = rcu_dereference_raw(ndev_ctx->nvdev);
1160 	if (!nvscdev || chn_index >= nvscdev->num_chn)
1161 		return;
1162 
1163 	nvchan = nvscdev->chan_table + chn_index;
1164 
1165 	/* Because the device uses NAPI, all the interrupt batching and
1166 	 * control is done via Net softirq, not the channel handling
1167 	 */
1168 	set_channel_read_mode(new_sc, HV_CALL_ISR);
1169 
1170 	/* Set the channel before opening.*/
1171 	nvchan->channel = new_sc;
1172 
1173 	ret = vmbus_open(new_sc, netvsc_ring_bytes,
1174 			 netvsc_ring_bytes, NULL, 0,
1175 			 netvsc_channel_cb, nvchan);
1176 	if (ret == 0)
1177 		napi_enable(&nvchan->napi);
1178 	else
1179 		netdev_notice(ndev, "sub channel open failed: %d\n", ret);
1180 
1181 	if (atomic_inc_return(&nvscdev->open_chn) == nvscdev->num_chn)
1182 		wake_up(&nvscdev->subchan_open);
1183 }
1184 
1185 /* Open sub-channels after completing the handling of the device probe.
1186  * This breaks overlap of processing the host message for the
1187  * new primary channel with the initialization of sub-channels.
1188  */
rndis_set_subchannel(struct net_device * ndev,struct netvsc_device * nvdev,struct netvsc_device_info * dev_info)1189 int rndis_set_subchannel(struct net_device *ndev,
1190 			 struct netvsc_device *nvdev,
1191 			 struct netvsc_device_info *dev_info)
1192 {
1193 	struct nvsp_message *init_packet = &nvdev->channel_init_pkt;
1194 	struct net_device_context *ndev_ctx = netdev_priv(ndev);
1195 	struct hv_device *hv_dev = ndev_ctx->device_ctx;
1196 	struct rndis_device *rdev = nvdev->extension;
1197 	int i, ret;
1198 
1199 	ASSERT_RTNL();
1200 
1201 	memset(init_packet, 0, sizeof(struct nvsp_message));
1202 	init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL;
1203 	init_packet->msg.v5_msg.subchn_req.op = NVSP_SUBCHANNEL_ALLOCATE;
1204 	init_packet->msg.v5_msg.subchn_req.num_subchannels =
1205 						nvdev->num_chn - 1;
1206 	trace_nvsp_send(ndev, init_packet);
1207 
1208 	ret = vmbus_sendpacket(hv_dev->channel, init_packet,
1209 			       sizeof(struct nvsp_message),
1210 			       (unsigned long)init_packet,
1211 			       VM_PKT_DATA_INBAND,
1212 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1213 	if (ret) {
1214 		netdev_err(ndev, "sub channel allocate send failed: %d\n", ret);
1215 		return ret;
1216 	}
1217 
1218 	wait_for_completion(&nvdev->channel_init_wait);
1219 	if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) {
1220 		netdev_err(ndev, "sub channel request failed\n");
1221 		return -EIO;
1222 	}
1223 
1224 	nvdev->num_chn = 1 +
1225 		init_packet->msg.v5_msg.subchn_comp.num_subchannels;
1226 
1227 	/* wait for all sub channels to open */
1228 	wait_event(nvdev->subchan_open,
1229 		   atomic_read(&nvdev->open_chn) == nvdev->num_chn);
1230 
1231 	for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
1232 		ndev_ctx->tx_table[i] = i % nvdev->num_chn;
1233 
1234 	/* ignore failures from setting rss parameters, still have channels */
1235 	if (dev_info)
1236 		rndis_filter_set_rss_param(rdev, dev_info->rss_key);
1237 	else
1238 		rndis_filter_set_rss_param(rdev, netvsc_hash_key);
1239 
1240 	netif_set_real_num_tx_queues(ndev, nvdev->num_chn);
1241 	netif_set_real_num_rx_queues(ndev, nvdev->num_chn);
1242 
1243 	return 0;
1244 }
1245 
rndis_netdev_set_hwcaps(struct rndis_device * rndis_device,struct netvsc_device * nvdev)1246 static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
1247 				   struct netvsc_device *nvdev)
1248 {
1249 	struct net_device *net = rndis_device->ndev;
1250 	struct net_device_context *net_device_ctx = netdev_priv(net);
1251 	struct ndis_offload hwcaps;
1252 	struct ndis_offload_params offloads;
1253 	unsigned int gso_max_size = GSO_MAX_SIZE;
1254 	int ret;
1255 
1256 	/* Find HW offload capabilities */
1257 	ret = rndis_query_hwcaps(rndis_device, nvdev, &hwcaps);
1258 	if (ret != 0)
1259 		return ret;
1260 
1261 	/* A value of zero means "no change"; now turn on what we want. */
1262 	memset(&offloads, 0, sizeof(struct ndis_offload_params));
1263 
1264 	/* Linux does not care about IP checksum, always does in kernel */
1265 	offloads.ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_DISABLED;
1266 
1267 	/* Reset previously set hw_features flags */
1268 	net->hw_features &= ~NETVSC_SUPPORTED_HW_FEATURES;
1269 	net_device_ctx->tx_checksum_mask = 0;
1270 
1271 	/* Compute tx offload settings based on hw capabilities */
1272 	net->hw_features |= NETIF_F_RXCSUM;
1273 	net->hw_features |= NETIF_F_SG;
1274 	net->hw_features |= NETIF_F_RXHASH;
1275 
1276 	if ((hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_ALL_TCP4) == NDIS_TXCSUM_ALL_TCP4) {
1277 		/* Can checksum TCP */
1278 		net->hw_features |= NETIF_F_IP_CSUM;
1279 		net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV4_TCP;
1280 
1281 		offloads.tcp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
1282 
1283 		if (hwcaps.lsov2.ip4_encap & NDIS_OFFLOAD_ENCAP_8023) {
1284 			offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
1285 			net->hw_features |= NETIF_F_TSO;
1286 
1287 			if (hwcaps.lsov2.ip4_maxsz < gso_max_size)
1288 				gso_max_size = hwcaps.lsov2.ip4_maxsz;
1289 		}
1290 
1291 		if (hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_CAP_UDP4) {
1292 			offloads.udp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
1293 			net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV4_UDP;
1294 		}
1295 	}
1296 
1297 	if ((hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_ALL_TCP6) == NDIS_TXCSUM_ALL_TCP6) {
1298 		net->hw_features |= NETIF_F_IPV6_CSUM;
1299 
1300 		offloads.tcp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
1301 		net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV6_TCP;
1302 
1303 		if ((hwcaps.lsov2.ip6_encap & NDIS_OFFLOAD_ENCAP_8023) &&
1304 		    (hwcaps.lsov2.ip6_opts & NDIS_LSOV2_CAP_IP6) == NDIS_LSOV2_CAP_IP6) {
1305 			offloads.lso_v2_ipv6 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
1306 			net->hw_features |= NETIF_F_TSO6;
1307 
1308 			if (hwcaps.lsov2.ip6_maxsz < gso_max_size)
1309 				gso_max_size = hwcaps.lsov2.ip6_maxsz;
1310 		}
1311 
1312 		if (hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_CAP_UDP6) {
1313 			offloads.udp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
1314 			net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV6_UDP;
1315 		}
1316 	}
1317 
1318 	if (hwcaps.rsc.ip4 && hwcaps.rsc.ip6) {
1319 		net->hw_features |= NETIF_F_LRO;
1320 
1321 		if (net->features & NETIF_F_LRO) {
1322 			offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED;
1323 			offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED;
1324 		} else {
1325 			offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED;
1326 			offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED;
1327 		}
1328 	}
1329 
1330 	/* In case some hw_features disappeared we need to remove them from
1331 	 * net->features list as they're no longer supported.
1332 	 */
1333 	net->features &= ~NETVSC_SUPPORTED_HW_FEATURES | net->hw_features;
1334 
1335 	netif_set_gso_max_size(net, gso_max_size);
1336 
1337 	ret = rndis_filter_set_offload_params(net, nvdev, &offloads);
1338 
1339 	return ret;
1340 }
1341 
rndis_get_friendly_name(struct net_device * net,struct rndis_device * rndis_device,struct netvsc_device * net_device)1342 static void rndis_get_friendly_name(struct net_device *net,
1343 				    struct rndis_device *rndis_device,
1344 				    struct netvsc_device *net_device)
1345 {
1346 	ucs2_char_t wname[256];
1347 	unsigned long len;
1348 	u8 ifalias[256];
1349 	u32 size;
1350 
1351 	size = sizeof(wname);
1352 	if (rndis_filter_query_device(rndis_device, net_device,
1353 				      RNDIS_OID_GEN_FRIENDLY_NAME,
1354 				      wname, &size) != 0)
1355 		return;	/* ignore if host does not support */
1356 
1357 	if (size == 0)
1358 		return;	/* name not set */
1359 
1360 	/* Convert Windows Unicode string to UTF-8 */
1361 	len = ucs2_as_utf8(ifalias, wname, sizeof(ifalias));
1362 
1363 	/* ignore the default value from host */
1364 	if (strcmp(ifalias, "Network Adapter") != 0)
1365 		dev_set_alias(net, ifalias, len);
1366 }
1367 
rndis_filter_device_add(struct hv_device * dev,struct netvsc_device_info * device_info)1368 struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
1369 				      struct netvsc_device_info *device_info)
1370 {
1371 	struct net_device *net = hv_get_drvdata(dev);
1372 	struct net_device_context *ndc = netdev_priv(net);
1373 	struct netvsc_device *net_device;
1374 	struct rndis_device *rndis_device;
1375 	struct ndis_recv_scale_cap rsscap;
1376 	u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
1377 	u32 mtu, size;
1378 	u32 num_possible_rss_qs;
1379 	int i, ret;
1380 
1381 	rndis_device = get_rndis_device();
1382 	if (!rndis_device)
1383 		return ERR_PTR(-ENODEV);
1384 
1385 	/* Let the inner driver handle this first to create the netvsc channel
1386 	 * NOTE! Once the channel is created, we may get a receive callback
1387 	 * (RndisFilterOnReceive()) before this call is completed
1388 	 */
1389 	net_device = netvsc_device_add(dev, device_info);
1390 	if (IS_ERR(net_device)) {
1391 		kfree(rndis_device);
1392 		return net_device;
1393 	}
1394 
1395 	/* Initialize the rndis device */
1396 	net_device->max_chn = 1;
1397 	net_device->num_chn = 1;
1398 
1399 	net_device->extension = rndis_device;
1400 	rndis_device->ndev = net;
1401 
1402 	/* Send the rndis initialization message */
1403 	ret = rndis_filter_init_device(rndis_device, net_device);
1404 	if (ret != 0)
1405 		goto err_dev_remv;
1406 
1407 	/* Get the MTU from the host */
1408 	size = sizeof(u32);
1409 	ret = rndis_filter_query_device(rndis_device, net_device,
1410 					RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE,
1411 					&mtu, &size);
1412 	if (ret == 0 && size == sizeof(u32) && mtu < net->mtu)
1413 		net->mtu = mtu;
1414 
1415 	/* Get the mac address */
1416 	ret = rndis_filter_query_device_mac(rndis_device, net_device);
1417 	if (ret != 0)
1418 		goto err_dev_remv;
1419 
1420 	memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN);
1421 
1422 	/* Get friendly name as ifalias*/
1423 	if (!net->ifalias)
1424 		rndis_get_friendly_name(net, rndis_device, net_device);
1425 
1426 	/* Query and set hardware capabilities */
1427 	ret = rndis_netdev_set_hwcaps(rndis_device, net_device);
1428 	if (ret != 0)
1429 		goto err_dev_remv;
1430 
1431 	rndis_filter_query_device_link_status(rndis_device, net_device);
1432 
1433 	netdev_dbg(net, "Device MAC %pM link state %s\n",
1434 		   rndis_device->hw_mac_adr,
1435 		   rndis_device->link_state ? "down" : "up");
1436 
1437 	if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_5)
1438 		goto out;
1439 
1440 	rndis_filter_query_link_speed(rndis_device, net_device);
1441 
1442 	/* vRSS setup */
1443 	memset(&rsscap, 0, rsscap_size);
1444 	ret = rndis_filter_query_device(rndis_device, net_device,
1445 					OID_GEN_RECEIVE_SCALE_CAPABILITIES,
1446 					&rsscap, &rsscap_size);
1447 	if (ret || rsscap.num_recv_que < 2)
1448 		goto out;
1449 
1450 	/* This guarantees that num_possible_rss_qs <= num_online_cpus */
1451 	num_possible_rss_qs = min_t(u32, num_online_cpus(),
1452 				    rsscap.num_recv_que);
1453 
1454 	net_device->max_chn = min_t(u32, VRSS_CHANNEL_MAX, num_possible_rss_qs);
1455 
1456 	/* We will use the given number of channels if available. */
1457 	net_device->num_chn = min(net_device->max_chn, device_info->num_chn);
1458 
1459 	if (!netif_is_rxfh_configured(net)) {
1460 		for (i = 0; i < ITAB_NUM; i++)
1461 			ndc->rx_table[i] = ethtool_rxfh_indir_default(
1462 						i, net_device->num_chn);
1463 	}
1464 
1465 	atomic_set(&net_device->open_chn, 1);
1466 	vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open);
1467 
1468 	for (i = 1; i < net_device->num_chn; i++) {
1469 		ret = netvsc_alloc_recv_comp_ring(net_device, i);
1470 		if (ret) {
1471 			while (--i != 0)
1472 				vfree(net_device->chan_table[i].mrc.slots);
1473 			goto out;
1474 		}
1475 	}
1476 
1477 	for (i = 1; i < net_device->num_chn; i++)
1478 		netif_napi_add(net, &net_device->chan_table[i].napi,
1479 			       netvsc_poll, NAPI_POLL_WEIGHT);
1480 
1481 	return net_device;
1482 
1483 out:
1484 	/* setting up multiple channels failed */
1485 	net_device->max_chn = 1;
1486 	net_device->num_chn = 1;
1487 	return net_device;
1488 
1489 err_dev_remv:
1490 	rndis_filter_device_remove(dev, net_device);
1491 	return ERR_PTR(ret);
1492 }
1493 
rndis_filter_device_remove(struct hv_device * dev,struct netvsc_device * net_dev)1494 void rndis_filter_device_remove(struct hv_device *dev,
1495 				struct netvsc_device *net_dev)
1496 {
1497 	struct rndis_device *rndis_dev = net_dev->extension;
1498 
1499 	/* Halt and release the rndis device */
1500 	rndis_filter_halt_device(net_dev, rndis_dev);
1501 
1502 	netvsc_device_remove(dev);
1503 }
1504 
rndis_filter_open(struct netvsc_device * nvdev)1505 int rndis_filter_open(struct netvsc_device *nvdev)
1506 {
1507 	if (!nvdev)
1508 		return -EINVAL;
1509 
1510 	return rndis_filter_open_device(nvdev->extension);
1511 }
1512 
rndis_filter_close(struct netvsc_device * nvdev)1513 int rndis_filter_close(struct netvsc_device *nvdev)
1514 {
1515 	if (!nvdev)
1516 		return -EINVAL;
1517 
1518 	return rndis_filter_close_device(nvdev->extension);
1519 }
1520