• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2012, Microsoft Corporation.
3  *
4  * Author:
5  *   K. Y. Srinivasan <kys@microsoft.com>
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms of the GNU General Public License version 2 as published
9  * by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful, but
12  * WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14  * NON INFRINGEMENT.  See the GNU General Public License for more
15  * details.
16  *
17  */
18 
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 
21 #include <linux/kernel.h>
22 #include <linux/jiffies.h>
23 #include <linux/mman.h>
24 #include <linux/delay.h>
25 #include <linux/init.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
28 #include <linux/kthread.h>
29 #include <linux/completion.h>
30 #include <linux/memory_hotplug.h>
31 #include <linux/memory.h>
32 #include <linux/notifier.h>
33 #include <linux/percpu_counter.h>
34 
35 #include <linux/hyperv.h>
36 
37 /*
38  * We begin with definitions supporting the Dynamic Memory protocol
39  * with the host.
40  *
41  * Begin protocol definitions.
42  */
43 
44 
45 
46 /*
47  * Protocol versions. The low word is the minor version, the high word the major
48  * version.
49  *
50  * History:
51  * Initial version 1.0
52  * Changed to 0.1 on 2009/03/25
53  * Changes to 0.2 on 2009/05/14
54  * Changes to 0.3 on 2009/12/03
55  * Changed to 1.0 on 2011/04/05
56  */
57 
58 #define DYNMEM_MAKE_VERSION(Major, Minor) ((__u32)(((Major) << 16) | (Minor)))
59 #define DYNMEM_MAJOR_VERSION(Version) ((__u32)(Version) >> 16)
60 #define DYNMEM_MINOR_VERSION(Version) ((__u32)(Version) & 0xff)
61 
62 enum {
63 	DYNMEM_PROTOCOL_VERSION_1 = DYNMEM_MAKE_VERSION(0, 3),
64 	DYNMEM_PROTOCOL_VERSION_2 = DYNMEM_MAKE_VERSION(1, 0),
65 	DYNMEM_PROTOCOL_VERSION_3 = DYNMEM_MAKE_VERSION(2, 0),
66 
67 	DYNMEM_PROTOCOL_VERSION_WIN7 = DYNMEM_PROTOCOL_VERSION_1,
68 	DYNMEM_PROTOCOL_VERSION_WIN8 = DYNMEM_PROTOCOL_VERSION_2,
69 	DYNMEM_PROTOCOL_VERSION_WIN10 = DYNMEM_PROTOCOL_VERSION_3,
70 
71 	DYNMEM_PROTOCOL_VERSION_CURRENT = DYNMEM_PROTOCOL_VERSION_WIN10
72 };
73 
74 
75 
76 /*
77  * Message Types
78  */
79 
80 enum dm_message_type {
81 	/*
82 	 * Version 0.3
83 	 */
84 	DM_ERROR			= 0,
85 	DM_VERSION_REQUEST		= 1,
86 	DM_VERSION_RESPONSE		= 2,
87 	DM_CAPABILITIES_REPORT		= 3,
88 	DM_CAPABILITIES_RESPONSE	= 4,
89 	DM_STATUS_REPORT		= 5,
90 	DM_BALLOON_REQUEST		= 6,
91 	DM_BALLOON_RESPONSE		= 7,
92 	DM_UNBALLOON_REQUEST		= 8,
93 	DM_UNBALLOON_RESPONSE		= 9,
94 	DM_MEM_HOT_ADD_REQUEST		= 10,
95 	DM_MEM_HOT_ADD_RESPONSE		= 11,
96 	DM_VERSION_03_MAX		= 11,
97 	/*
98 	 * Version 1.0.
99 	 */
100 	DM_INFO_MESSAGE			= 12,
101 	DM_VERSION_1_MAX		= 12
102 };
103 
104 
105 /*
106  * Structures defining the dynamic memory management
107  * protocol.
108  */
109 
110 union dm_version {
111 	struct {
112 		__u16 minor_version;
113 		__u16 major_version;
114 	};
115 	__u32 version;
116 } __packed;
117 
118 
119 union dm_caps {
120 	struct {
121 		__u64 balloon:1;
122 		__u64 hot_add:1;
123 		/*
124 		 * To support guests that may have alignment
125 		 * limitations on hot-add, the guest can specify
126 		 * its alignment requirements; a value of n
127 		 * represents an alignment of 2^n in mega bytes.
128 		 */
129 		__u64 hot_add_alignment:4;
130 		__u64 reservedz:58;
131 	} cap_bits;
132 	__u64 caps;
133 } __packed;
134 
135 union dm_mem_page_range {
136 	struct  {
137 		/*
138 		 * The PFN number of the first page in the range.
139 		 * 40 bits is the architectural limit of a PFN
140 		 * number for AMD64.
141 		 */
142 		__u64 start_page:40;
143 		/*
144 		 * The number of pages in the range.
145 		 */
146 		__u64 page_cnt:24;
147 	} finfo;
148 	__u64  page_range;
149 } __packed;
150 
151 
152 
153 /*
154  * The header for all dynamic memory messages:
155  *
156  * type: Type of the message.
157  * size: Size of the message in bytes; including the header.
158  * trans_id: The guest is responsible for manufacturing this ID.
159  */
160 
161 struct dm_header {
162 	__u16 type;
163 	__u16 size;
164 	__u32 trans_id;
165 } __packed;
166 
167 /*
168  * A generic message format for dynamic memory.
169  * Specific message formats are defined later in the file.
170  */
171 
172 struct dm_message {
173 	struct dm_header hdr;
174 	__u8 data[]; /* enclosed message */
175 } __packed;
176 
177 
178 /*
179  * Specific message types supporting the dynamic memory protocol.
180  */
181 
182 /*
183  * Version negotiation message. Sent from the guest to the host.
184  * The guest is free to try different versions until the host
185  * accepts the version.
186  *
187  * dm_version: The protocol version requested.
188  * is_last_attempt: If TRUE, this is the last version guest will request.
189  * reservedz: Reserved field, set to zero.
190  */
191 
192 struct dm_version_request {
193 	struct dm_header hdr;
194 	union dm_version version;
195 	__u32 is_last_attempt:1;
196 	__u32 reservedz:31;
197 } __packed;
198 
199 /*
200  * Version response message; Host to Guest and indicates
201  * if the host has accepted the version sent by the guest.
202  *
203  * is_accepted: If TRUE, host has accepted the version and the guest
204  * should proceed to the next stage of the protocol. FALSE indicates that
205  * guest should re-try with a different version.
206  *
207  * reservedz: Reserved field, set to zero.
208  */
209 
210 struct dm_version_response {
211 	struct dm_header hdr;
212 	__u64 is_accepted:1;
213 	__u64 reservedz:63;
214 } __packed;
215 
216 /*
217  * Message reporting capabilities. This is sent from the guest to the
218  * host.
219  */
220 
221 struct dm_capabilities {
222 	struct dm_header hdr;
223 	union dm_caps caps;
224 	__u64 min_page_cnt;
225 	__u64 max_page_number;
226 } __packed;
227 
228 /*
229  * Response to the capabilities message. This is sent from the host to the
230  * guest. This message notifies if the host has accepted the guest's
231  * capabilities. If the host has not accepted, the guest must shutdown
232  * the service.
233  *
234  * is_accepted: Indicates if the host has accepted guest's capabilities.
235  * reservedz: Must be 0.
236  */
237 
238 struct dm_capabilities_resp_msg {
239 	struct dm_header hdr;
240 	__u64 is_accepted:1;
241 	__u64 reservedz:63;
242 } __packed;
243 
244 /*
245  * This message is used to report memory pressure from the guest.
246  * This message is not part of any transaction and there is no
247  * response to this message.
248  *
249  * num_avail: Available memory in pages.
250  * num_committed: Committed memory in pages.
251  * page_file_size: The accumulated size of all page files
252  *		   in the system in pages.
253  * zero_free: The nunber of zero and free pages.
254  * page_file_writes: The writes to the page file in pages.
255  * io_diff: An indicator of file cache efficiency or page file activity,
256  *	    calculated as File Cache Page Fault Count - Page Read Count.
257  *	    This value is in pages.
258  *
259  * Some of these metrics are Windows specific and fortunately
260  * the algorithm on the host side that computes the guest memory
261  * pressure only uses num_committed value.
262  */
263 
264 struct dm_status {
265 	struct dm_header hdr;
266 	__u64 num_avail;
267 	__u64 num_committed;
268 	__u64 page_file_size;
269 	__u64 zero_free;
270 	__u32 page_file_writes;
271 	__u32 io_diff;
272 } __packed;
273 
274 
275 /*
276  * Message to ask the guest to allocate memory - balloon up message.
277  * This message is sent from the host to the guest. The guest may not be
278  * able to allocate as much memory as requested.
279  *
280  * num_pages: number of pages to allocate.
281  */
282 
283 struct dm_balloon {
284 	struct dm_header hdr;
285 	__u32 num_pages;
286 	__u32 reservedz;
287 } __packed;
288 
289 
290 /*
291  * Balloon response message; this message is sent from the guest
292  * to the host in response to the balloon message.
293  *
294  * reservedz: Reserved; must be set to zero.
295  * more_pages: If FALSE, this is the last message of the transaction.
296  * if TRUE there will atleast one more message from the guest.
297  *
298  * range_count: The number of ranges in the range array.
299  *
300  * range_array: An array of page ranges returned to the host.
301  *
302  */
303 
304 struct dm_balloon_response {
305 	struct dm_header hdr;
306 	__u32 reservedz;
307 	__u32 more_pages:1;
308 	__u32 range_count:31;
309 	union dm_mem_page_range range_array[];
310 } __packed;
311 
312 /*
313  * Un-balloon message; this message is sent from the host
314  * to the guest to give guest more memory.
315  *
316  * more_pages: If FALSE, this is the last message of the transaction.
317  * if TRUE there will atleast one more message from the guest.
318  *
319  * reservedz: Reserved; must be set to zero.
320  *
321  * range_count: The number of ranges in the range array.
322  *
323  * range_array: An array of page ranges returned to the host.
324  *
325  */
326 
327 struct dm_unballoon_request {
328 	struct dm_header hdr;
329 	__u32 more_pages:1;
330 	__u32 reservedz:31;
331 	__u32 range_count;
332 	union dm_mem_page_range range_array[];
333 } __packed;
334 
335 /*
336  * Un-balloon response message; this message is sent from the guest
337  * to the host in response to an unballoon request.
338  *
339  */
340 
341 struct dm_unballoon_response {
342 	struct dm_header hdr;
343 } __packed;
344 
345 
346 /*
347  * Hot add request message. Message sent from the host to the guest.
348  *
349  * mem_range: Memory range to hot add.
350  *
351  * On Linux we currently don't support this since we cannot hot add
352  * arbitrary granularity of memory.
353  */
354 
355 struct dm_hot_add {
356 	struct dm_header hdr;
357 	union dm_mem_page_range range;
358 } __packed;
359 
360 /*
361  * Hot add response message.
362  * This message is sent by the guest to report the status of a hot add request.
363  * If page_count is less than the requested page count, then the host should
364  * assume all further hot add requests will fail, since this indicates that
365  * the guest has hit an upper physical memory barrier.
366  *
367  * Hot adds may also fail due to low resources; in this case, the guest must
368  * not complete this message until the hot add can succeed, and the host must
369  * not send a new hot add request until the response is sent.
370  * If VSC fails to hot add memory DYNMEM_NUMBER_OF_UNSUCCESSFUL_HOTADD_ATTEMPTS
371  * times it fails the request.
372  *
373  *
374  * page_count: number of pages that were successfully hot added.
375  *
376  * result: result of the operation 1: success, 0: failure.
377  *
378  */
379 
380 struct dm_hot_add_response {
381 	struct dm_header hdr;
382 	__u32 page_count;
383 	__u32 result;
384 } __packed;
385 
386 /*
387  * Types of information sent from host to the guest.
388  */
389 
390 enum dm_info_type {
391 	INFO_TYPE_MAX_PAGE_CNT = 0,
392 	MAX_INFO_TYPE
393 };
394 
395 
396 /*
397  * Header for the information message.
398  */
399 
400 struct dm_info_header {
401 	enum dm_info_type type;
402 	__u32 data_size;
403 } __packed;
404 
405 /*
406  * This message is sent from the host to the guest to pass
407  * some relevant information (win8 addition).
408  *
409  * reserved: no used.
410  * info_size: size of the information blob.
411  * info: information blob.
412  */
413 
414 struct dm_info_msg {
415 	struct dm_header hdr;
416 	__u32 reserved;
417 	__u32 info_size;
418 	__u8  info[];
419 };
420 
421 /*
422  * End protocol definitions.
423  */
424 
425 /*
426  * State to manage hot adding memory into the guest.
427  * The range start_pfn : end_pfn specifies the range
428  * that the host has asked us to hot add. The range
429  * start_pfn : ha_end_pfn specifies the range that we have
430  * currently hot added. We hot add in multiples of 128M
431  * chunks; it is possible that we may not be able to bring
432  * online all the pages in the region. The range
433  * covered_start_pfn:covered_end_pfn defines the pages that can
434  * be brough online.
435  */
436 
437 struct hv_hotadd_state {
438 	struct list_head list;
439 	unsigned long start_pfn;
440 	unsigned long covered_start_pfn;
441 	unsigned long covered_end_pfn;
442 	unsigned long ha_end_pfn;
443 	unsigned long end_pfn;
444 	/*
445 	 * A list of gaps.
446 	 */
447 	struct list_head gap_list;
448 };
449 
450 struct hv_hotadd_gap {
451 	struct list_head list;
452 	unsigned long start_pfn;
453 	unsigned long end_pfn;
454 };
455 
456 struct balloon_state {
457 	__u32 num_pages;
458 	struct work_struct wrk;
459 };
460 
461 struct hot_add_wrk {
462 	union dm_mem_page_range ha_page_range;
463 	union dm_mem_page_range ha_region_range;
464 	struct work_struct wrk;
465 };
466 
467 static bool hot_add = true;
468 static bool do_hot_add;
469 /*
470  * Delay reporting memory pressure by
471  * the specified number of seconds.
472  */
473 static uint pressure_report_delay = 45;
474 
475 /*
476  * The last time we posted a pressure report to host.
477  */
478 static unsigned long last_post_time;
479 
480 module_param(hot_add, bool, (S_IRUGO | S_IWUSR));
481 MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
482 
483 module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
484 MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
485 static atomic_t trans_id = ATOMIC_INIT(0);
486 
487 static int dm_ring_size = (5 * PAGE_SIZE);
488 
489 /*
490  * Driver specific state.
491  */
492 
493 enum hv_dm_state {
494 	DM_INITIALIZING = 0,
495 	DM_INITIALIZED,
496 	DM_BALLOON_UP,
497 	DM_BALLOON_DOWN,
498 	DM_HOT_ADD,
499 	DM_INIT_ERROR
500 };
501 
502 
503 static __u8 recv_buffer[PAGE_SIZE];
504 static __u8 *send_buffer;
505 #define PAGES_IN_2M	512
506 #define HA_CHUNK (32 * 1024)
507 
508 struct hv_dynmem_device {
509 	struct hv_device *dev;
510 	enum hv_dm_state state;
511 	struct completion host_event;
512 	struct completion config_event;
513 
514 	/*
515 	 * Number of pages we have currently ballooned out.
516 	 */
517 	unsigned int num_pages_ballooned;
518 	unsigned int num_pages_onlined;
519 	unsigned int num_pages_added;
520 
521 	/*
522 	 * State to manage the ballooning (up) operation.
523 	 */
524 	struct balloon_state balloon_wrk;
525 
526 	/*
527 	 * State to execute the "hot-add" operation.
528 	 */
529 	struct hot_add_wrk ha_wrk;
530 
531 	/*
532 	 * This state tracks if the host has specified a hot-add
533 	 * region.
534 	 */
535 	bool host_specified_ha_region;
536 
537 	/*
538 	 * State to synchronize hot-add.
539 	 */
540 	struct completion  ol_waitevent;
541 	bool ha_waiting;
542 	/*
543 	 * This thread handles hot-add
544 	 * requests from the host as well as notifying
545 	 * the host with regards to memory pressure in
546 	 * the guest.
547 	 */
548 	struct task_struct *thread;
549 
550 	struct mutex ha_region_mutex;
551 
552 	/*
553 	 * A list of hot-add regions.
554 	 */
555 	struct list_head ha_region_list;
556 
557 	/*
558 	 * We start with the highest version we can support
559 	 * and downgrade based on the host; we save here the
560 	 * next version to try.
561 	 */
562 	__u32 next_version;
563 };
564 
565 static struct hv_dynmem_device dm_device;
566 
567 static void post_status(struct hv_dynmem_device *dm);
568 
569 #ifdef CONFIG_MEMORY_HOTPLUG
hv_memory_notifier(struct notifier_block * nb,unsigned long val,void * v)570 static int hv_memory_notifier(struct notifier_block *nb, unsigned long val,
571 			      void *v)
572 {
573 	struct memory_notify *mem = (struct memory_notify *)v;
574 
575 	switch (val) {
576 	case MEM_GOING_ONLINE:
577 		mutex_lock(&dm_device.ha_region_mutex);
578 		break;
579 
580 	case MEM_ONLINE:
581 		dm_device.num_pages_onlined += mem->nr_pages;
582 	case MEM_CANCEL_ONLINE:
583 		if (val == MEM_ONLINE ||
584 		    mutex_is_locked(&dm_device.ha_region_mutex))
585 			mutex_unlock(&dm_device.ha_region_mutex);
586 		if (dm_device.ha_waiting) {
587 			dm_device.ha_waiting = false;
588 			complete(&dm_device.ol_waitevent);
589 		}
590 		break;
591 
592 	case MEM_OFFLINE:
593 		mutex_lock(&dm_device.ha_region_mutex);
594 		dm_device.num_pages_onlined -= mem->nr_pages;
595 		mutex_unlock(&dm_device.ha_region_mutex);
596 		break;
597 	case MEM_GOING_OFFLINE:
598 	case MEM_CANCEL_OFFLINE:
599 		break;
600 	}
601 	return NOTIFY_OK;
602 }
603 
604 static struct notifier_block hv_memory_nb = {
605 	.notifier_call = hv_memory_notifier,
606 	.priority = 0
607 };
608 
609 /* Check if the particular page is backed and can be onlined and online it. */
hv_page_online_one(struct hv_hotadd_state * has,struct page * pg)610 static void hv_page_online_one(struct hv_hotadd_state *has, struct page *pg)
611 {
612 	unsigned long cur_start_pgp;
613 	unsigned long cur_end_pgp;
614 	struct hv_hotadd_gap *gap;
615 
616 	cur_start_pgp = (unsigned long)pfn_to_page(has->covered_start_pfn);
617 	cur_end_pgp = (unsigned long)pfn_to_page(has->covered_end_pfn);
618 
619 	/* The page is not backed. */
620 	if (((unsigned long)pg < cur_start_pgp) ||
621 	    ((unsigned long)pg >= cur_end_pgp))
622 		return;
623 
624 	/* Check for gaps. */
625 	list_for_each_entry(gap, &has->gap_list, list) {
626 		cur_start_pgp = (unsigned long)
627 			pfn_to_page(gap->start_pfn);
628 		cur_end_pgp = (unsigned long)
629 			pfn_to_page(gap->end_pfn);
630 		if (((unsigned long)pg >= cur_start_pgp) &&
631 		    ((unsigned long)pg < cur_end_pgp)) {
632 			return;
633 		}
634 	}
635 
636 	/* This frame is currently backed; online the page. */
637 	__online_page_set_limits(pg);
638 	__online_page_increment_counters(pg);
639 	__online_page_free(pg);
640 }
641 
hv_bring_pgs_online(struct hv_hotadd_state * has,unsigned long start_pfn,unsigned long size)642 static void hv_bring_pgs_online(struct hv_hotadd_state *has,
643 				unsigned long start_pfn, unsigned long size)
644 {
645 	int i;
646 
647 	for (i = 0; i < size; i++)
648 		hv_page_online_one(has, pfn_to_page(start_pfn + i));
649 }
650 
hv_mem_hot_add(unsigned long start,unsigned long size,unsigned long pfn_count,struct hv_hotadd_state * has)651 static void hv_mem_hot_add(unsigned long start, unsigned long size,
652 				unsigned long pfn_count,
653 				struct hv_hotadd_state *has)
654 {
655 	int ret = 0;
656 	int i, nid;
657 	unsigned long start_pfn;
658 	unsigned long processed_pfn;
659 	unsigned long total_pfn = pfn_count;
660 
661 	for (i = 0; i < (size/HA_CHUNK); i++) {
662 		start_pfn = start + (i * HA_CHUNK);
663 		has->ha_end_pfn +=  HA_CHUNK;
664 
665 		if (total_pfn > HA_CHUNK) {
666 			processed_pfn = HA_CHUNK;
667 			total_pfn -= HA_CHUNK;
668 		} else {
669 			processed_pfn = total_pfn;
670 			total_pfn = 0;
671 		}
672 
673 		has->covered_end_pfn +=  processed_pfn;
674 
675 		init_completion(&dm_device.ol_waitevent);
676 		dm_device.ha_waiting = true;
677 
678 		mutex_unlock(&dm_device.ha_region_mutex);
679 		nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn));
680 		ret = add_memory(nid, PFN_PHYS((start_pfn)),
681 				(HA_CHUNK << PAGE_SHIFT));
682 
683 		if (ret) {
684 			pr_info("hot_add memory failed error is %d\n", ret);
685 			if (ret == -EEXIST) {
686 				/*
687 				 * This error indicates that the error
688 				 * is not a transient failure. This is the
689 				 * case where the guest's physical address map
690 				 * precludes hot adding memory. Stop all further
691 				 * memory hot-add.
692 				 */
693 				do_hot_add = false;
694 			}
695 			has->ha_end_pfn -= HA_CHUNK;
696 			has->covered_end_pfn -=  processed_pfn;
697 			mutex_lock(&dm_device.ha_region_mutex);
698 			break;
699 		}
700 
701 		/*
702 		 * Wait for the memory block to be onlined.
703 		 * Since the hot add has succeeded, it is ok to
704 		 * proceed even if the pages in the hot added region
705 		 * have not been "onlined" within the allowed time.
706 		 */
707 		wait_for_completion_timeout(&dm_device.ol_waitevent, 5*HZ);
708 		mutex_lock(&dm_device.ha_region_mutex);
709 		post_status(&dm_device);
710 	}
711 
712 	return;
713 }
714 
hv_online_page(struct page * pg)715 static void hv_online_page(struct page *pg)
716 {
717 	struct list_head *cur;
718 	struct hv_hotadd_state *has;
719 	unsigned long cur_start_pgp;
720 	unsigned long cur_end_pgp;
721 
722 	list_for_each(cur, &dm_device.ha_region_list) {
723 		has = list_entry(cur, struct hv_hotadd_state, list);
724 		cur_start_pgp = (unsigned long)
725 			pfn_to_page(has->start_pfn);
726 		cur_end_pgp = (unsigned long)pfn_to_page(has->end_pfn);
727 
728 		/* The page belongs to a different HAS. */
729 		if (((unsigned long)pg < cur_start_pgp) ||
730 		    ((unsigned long)pg >= cur_end_pgp))
731 			continue;
732 
733 		hv_page_online_one(has, pg);
734 		break;
735 	}
736 }
737 
pfn_covered(unsigned long start_pfn,unsigned long pfn_cnt)738 static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
739 {
740 	struct list_head *cur;
741 	struct hv_hotadd_state *has;
742 	struct hv_hotadd_gap *gap;
743 	unsigned long residual, new_inc;
744 
745 	if (list_empty(&dm_device.ha_region_list))
746 		return false;
747 
748 	list_for_each(cur, &dm_device.ha_region_list) {
749 		has = list_entry(cur, struct hv_hotadd_state, list);
750 
751 		/*
752 		 * If the pfn range we are dealing with is not in the current
753 		 * "hot add block", move on.
754 		 */
755 		if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
756 			continue;
757 
758 		/*
759 		 * If the current start pfn is not where the covered_end
760 		 * is, create a gap and update covered_end_pfn.
761 		 */
762 		if (has->covered_end_pfn != start_pfn) {
763 			gap = kzalloc(sizeof(struct hv_hotadd_gap), GFP_ATOMIC);
764 			if (!gap)
765 				return -ENOMEM;
766 
767 			INIT_LIST_HEAD(&gap->list);
768 			gap->start_pfn = has->covered_end_pfn;
769 			gap->end_pfn = start_pfn;
770 			list_add_tail(&gap->list, &has->gap_list);
771 
772 			has->covered_end_pfn = start_pfn;
773 		}
774 
775 		/*
776 		 * If the current hot add-request extends beyond
777 		 * our current limit; extend it.
778 		 */
779 		if ((start_pfn + pfn_cnt) > has->end_pfn) {
780 			residual = (start_pfn + pfn_cnt - has->end_pfn);
781 			/*
782 			 * Extend the region by multiples of HA_CHUNK.
783 			 */
784 			new_inc = (residual / HA_CHUNK) * HA_CHUNK;
785 			if (residual % HA_CHUNK)
786 				new_inc += HA_CHUNK;
787 
788 			has->end_pfn += new_inc;
789 		}
790 
791 		return 1;
792 	}
793 
794 	return 0;
795 }
796 
handle_pg_range(unsigned long pg_start,unsigned long pg_count)797 static unsigned long handle_pg_range(unsigned long pg_start,
798 					unsigned long pg_count)
799 {
800 	unsigned long start_pfn = pg_start;
801 	unsigned long pfn_cnt = pg_count;
802 	unsigned long size;
803 	struct list_head *cur;
804 	struct hv_hotadd_state *has;
805 	unsigned long pgs_ol = 0;
806 	unsigned long old_covered_state;
807 
808 	if (list_empty(&dm_device.ha_region_list))
809 		return 0;
810 
811 	list_for_each(cur, &dm_device.ha_region_list) {
812 		has = list_entry(cur, struct hv_hotadd_state, list);
813 
814 		/*
815 		 * If the pfn range we are dealing with is not in the current
816 		 * "hot add block", move on.
817 		 */
818 		if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
819 			continue;
820 
821 		old_covered_state = has->covered_end_pfn;
822 
823 		if (start_pfn < has->ha_end_pfn) {
824 			/*
825 			 * This is the case where we are backing pages
826 			 * in an already hot added region. Bring
827 			 * these pages online first.
828 			 */
829 			pgs_ol = has->ha_end_pfn - start_pfn;
830 			if (pgs_ol > pfn_cnt)
831 				pgs_ol = pfn_cnt;
832 
833 			has->covered_end_pfn +=  pgs_ol;
834 			pfn_cnt -= pgs_ol;
835 			/*
836 			 * Check if the corresponding memory block is already
837 			 * online by checking its last previously backed page.
838 			 * In case it is we need to bring rest (which was not
839 			 * backed previously) online too.
840 			 */
841 			if (start_pfn > has->start_pfn &&
842 			    !PageReserved(pfn_to_page(start_pfn - 1)))
843 				hv_bring_pgs_online(has, start_pfn, pgs_ol);
844 
845 		}
846 
847 		if ((has->ha_end_pfn < has->end_pfn) && (pfn_cnt > 0)) {
848 			/*
849 			 * We have some residual hot add range
850 			 * that needs to be hot added; hot add
851 			 * it now. Hot add a multiple of
852 			 * of HA_CHUNK that fully covers the pages
853 			 * we have.
854 			 */
855 			size = (has->end_pfn - has->ha_end_pfn);
856 			if (pfn_cnt <= size) {
857 				size = ((pfn_cnt / HA_CHUNK) * HA_CHUNK);
858 				if (pfn_cnt % HA_CHUNK)
859 					size += HA_CHUNK;
860 			} else {
861 				pfn_cnt = size;
862 			}
863 			hv_mem_hot_add(has->ha_end_pfn, size, pfn_cnt, has);
864 		}
865 		/*
866 		 * If we managed to online any pages that were given to us,
867 		 * we declare success.
868 		 */
869 		return has->covered_end_pfn - old_covered_state;
870 
871 	}
872 
873 	return 0;
874 }
875 
process_hot_add(unsigned long pg_start,unsigned long pfn_cnt,unsigned long rg_start,unsigned long rg_size)876 static unsigned long process_hot_add(unsigned long pg_start,
877 					unsigned long pfn_cnt,
878 					unsigned long rg_start,
879 					unsigned long rg_size)
880 {
881 	struct hv_hotadd_state *ha_region = NULL;
882 	int covered;
883 
884 	if (pfn_cnt == 0)
885 		return 0;
886 
887 	if (!dm_device.host_specified_ha_region) {
888 		covered = pfn_covered(pg_start, pfn_cnt);
889 		if (covered < 0)
890 			return 0;
891 
892 		if (covered)
893 			goto do_pg_range;
894 	}
895 
896 	/*
897 	 * If the host has specified a hot-add range; deal with it first.
898 	 */
899 
900 	if (rg_size != 0) {
901 		ha_region = kzalloc(sizeof(struct hv_hotadd_state), GFP_KERNEL);
902 		if (!ha_region)
903 			return 0;
904 
905 		INIT_LIST_HEAD(&ha_region->list);
906 		INIT_LIST_HEAD(&ha_region->gap_list);
907 
908 		list_add_tail(&ha_region->list, &dm_device.ha_region_list);
909 		ha_region->start_pfn = rg_start;
910 		ha_region->ha_end_pfn = rg_start;
911 		ha_region->covered_start_pfn = pg_start;
912 		ha_region->covered_end_pfn = pg_start;
913 		ha_region->end_pfn = rg_start + rg_size;
914 	}
915 
916 do_pg_range:
917 	/*
918 	 * Process the page range specified; bringing them
919 	 * online if possible.
920 	 */
921 	return handle_pg_range(pg_start, pfn_cnt);
922 }
923 
924 #endif
925 
hot_add_req(struct work_struct * dummy)926 static void hot_add_req(struct work_struct *dummy)
927 {
928 	struct dm_hot_add_response resp;
929 #ifdef CONFIG_MEMORY_HOTPLUG
930 	unsigned long pg_start, pfn_cnt;
931 	unsigned long rg_start, rg_sz;
932 #endif
933 	struct hv_dynmem_device *dm = &dm_device;
934 
935 	memset(&resp, 0, sizeof(struct dm_hot_add_response));
936 	resp.hdr.type = DM_MEM_HOT_ADD_RESPONSE;
937 	resp.hdr.size = sizeof(struct dm_hot_add_response);
938 
939 #ifdef CONFIG_MEMORY_HOTPLUG
940 	mutex_lock(&dm_device.ha_region_mutex);
941 	pg_start = dm->ha_wrk.ha_page_range.finfo.start_page;
942 	pfn_cnt = dm->ha_wrk.ha_page_range.finfo.page_cnt;
943 
944 	rg_start = dm->ha_wrk.ha_region_range.finfo.start_page;
945 	rg_sz = dm->ha_wrk.ha_region_range.finfo.page_cnt;
946 
947 	if ((rg_start == 0) && (!dm->host_specified_ha_region)) {
948 		unsigned long region_size;
949 		unsigned long region_start;
950 
951 		/*
952 		 * The host has not specified the hot-add region.
953 		 * Based on the hot-add page range being specified,
954 		 * compute a hot-add region that can cover the pages
955 		 * that need to be hot-added while ensuring the alignment
956 		 * and size requirements of Linux as it relates to hot-add.
957 		 */
958 		region_start = pg_start;
959 		region_size = (pfn_cnt / HA_CHUNK) * HA_CHUNK;
960 		if (pfn_cnt % HA_CHUNK)
961 			region_size += HA_CHUNK;
962 
963 		region_start = (pg_start / HA_CHUNK) * HA_CHUNK;
964 
965 		rg_start = region_start;
966 		rg_sz = region_size;
967 	}
968 
969 	if (do_hot_add)
970 		resp.page_count = process_hot_add(pg_start, pfn_cnt,
971 						rg_start, rg_sz);
972 
973 	dm->num_pages_added += resp.page_count;
974 	mutex_unlock(&dm_device.ha_region_mutex);
975 #endif
976 	/*
977 	 * The result field of the response structure has the
978 	 * following semantics:
979 	 *
980 	 * 1. If all or some pages hot-added: Guest should return success.
981 	 *
982 	 * 2. If no pages could be hot-added:
983 	 *
984 	 * If the guest returns success, then the host
985 	 * will not attempt any further hot-add operations. This
986 	 * signifies a permanent failure.
987 	 *
988 	 * If the guest returns failure, then this failure will be
989 	 * treated as a transient failure and the host may retry the
990 	 * hot-add operation after some delay.
991 	 */
992 	if (resp.page_count > 0)
993 		resp.result = 1;
994 	else if (!do_hot_add)
995 		resp.result = 1;
996 	else
997 		resp.result = 0;
998 
999 	if (!do_hot_add || (resp.page_count == 0))
1000 		pr_info("Memory hot add failed\n");
1001 
1002 	dm->state = DM_INITIALIZED;
1003 	resp.hdr.trans_id = atomic_inc_return(&trans_id);
1004 	vmbus_sendpacket(dm->dev->channel, &resp,
1005 			sizeof(struct dm_hot_add_response),
1006 			(unsigned long)NULL,
1007 			VM_PKT_DATA_INBAND, 0);
1008 }
1009 
process_info(struct hv_dynmem_device * dm,struct dm_info_msg * msg)1010 static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
1011 {
1012 	struct dm_info_header *info_hdr;
1013 
1014 	info_hdr = (struct dm_info_header *)msg->info;
1015 
1016 	switch (info_hdr->type) {
1017 	case INFO_TYPE_MAX_PAGE_CNT:
1018 		pr_info("Received INFO_TYPE_MAX_PAGE_CNT\n");
1019 		pr_info("Data Size is %d\n", info_hdr->data_size);
1020 		break;
1021 	default:
1022 		pr_info("Received Unknown type: %d\n", info_hdr->type);
1023 	}
1024 }
1025 
compute_balloon_floor(void)1026 static unsigned long compute_balloon_floor(void)
1027 {
1028 	unsigned long min_pages;
1029 #define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
1030 	/* Simple continuous piecewiese linear function:
1031 	 *  max MiB -> min MiB  gradient
1032 	 *       0         0
1033 	 *      16        16
1034 	 *      32        24
1035 	 *     128        72    (1/2)
1036 	 *     512       168    (1/4)
1037 	 *    2048       360    (1/8)
1038 	 *    8192       744    (1/16)
1039 	 *   32768      1512	(1/32)
1040 	 */
1041 	if (totalram_pages < MB2PAGES(128))
1042 		min_pages = MB2PAGES(8) + (totalram_pages >> 1);
1043 	else if (totalram_pages < MB2PAGES(512))
1044 		min_pages = MB2PAGES(40) + (totalram_pages >> 2);
1045 	else if (totalram_pages < MB2PAGES(2048))
1046 		min_pages = MB2PAGES(104) + (totalram_pages >> 3);
1047 	else if (totalram_pages < MB2PAGES(8192))
1048 		min_pages = MB2PAGES(232) + (totalram_pages >> 4);
1049 	else
1050 		min_pages = MB2PAGES(488) + (totalram_pages >> 5);
1051 #undef MB2PAGES
1052 	return min_pages;
1053 }
1054 
1055 /*
1056  * Post our status as it relates memory pressure to the
1057  * host. Host expects the guests to post this status
1058  * periodically at 1 second intervals.
1059  *
1060  * The metrics specified in this protocol are very Windows
1061  * specific and so we cook up numbers here to convey our memory
1062  * pressure.
1063  */
1064 
post_status(struct hv_dynmem_device * dm)1065 static void post_status(struct hv_dynmem_device *dm)
1066 {
1067 	struct dm_status status;
1068 	struct sysinfo val;
1069 	unsigned long now = jiffies;
1070 	unsigned long last_post = last_post_time;
1071 
1072 	if (pressure_report_delay > 0) {
1073 		--pressure_report_delay;
1074 		return;
1075 	}
1076 
1077 	if (!time_after(now, (last_post_time + HZ)))
1078 		return;
1079 
1080 	si_meminfo(&val);
1081 	memset(&status, 0, sizeof(struct dm_status));
1082 	status.hdr.type = DM_STATUS_REPORT;
1083 	status.hdr.size = sizeof(struct dm_status);
1084 	status.hdr.trans_id = atomic_inc_return(&trans_id);
1085 
1086 	/*
1087 	 * The host expects the guest to report free and committed memory.
1088 	 * Furthermore, the host expects the pressure information to include
1089 	 * the ballooned out pages. For a given amount of memory that we are
1090 	 * managing we need to compute a floor below which we should not
1091 	 * balloon. Compute this and add it to the pressure report.
1092 	 * We also need to report all offline pages (num_pages_added -
1093 	 * num_pages_onlined) as committed to the host, otherwise it can try
1094 	 * asking us to balloon them out.
1095 	 */
1096 	status.num_avail = val.freeram;
1097 	status.num_committed = vm_memory_committed() +
1098 		dm->num_pages_ballooned +
1099 		(dm->num_pages_added > dm->num_pages_onlined ?
1100 		 dm->num_pages_added - dm->num_pages_onlined : 0) +
1101 		compute_balloon_floor();
1102 
1103 	/*
1104 	 * If our transaction ID is no longer current, just don't
1105 	 * send the status. This can happen if we were interrupted
1106 	 * after we picked our transaction ID.
1107 	 */
1108 	if (status.hdr.trans_id != atomic_read(&trans_id))
1109 		return;
1110 
1111 	/*
1112 	 * If the last post time that we sampled has changed,
1113 	 * we have raced, don't post the status.
1114 	 */
1115 	if (last_post != last_post_time)
1116 		return;
1117 
1118 	last_post_time = jiffies;
1119 	vmbus_sendpacket(dm->dev->channel, &status,
1120 				sizeof(struct dm_status),
1121 				(unsigned long)NULL,
1122 				VM_PKT_DATA_INBAND, 0);
1123 
1124 }
1125 
free_balloon_pages(struct hv_dynmem_device * dm,union dm_mem_page_range * range_array)1126 static void free_balloon_pages(struct hv_dynmem_device *dm,
1127 			 union dm_mem_page_range *range_array)
1128 {
1129 	int num_pages = range_array->finfo.page_cnt;
1130 	__u64 start_frame = range_array->finfo.start_page;
1131 	struct page *pg;
1132 	int i;
1133 
1134 	for (i = 0; i < num_pages; i++) {
1135 		pg = pfn_to_page(i + start_frame);
1136 		__free_page(pg);
1137 		dm->num_pages_ballooned--;
1138 	}
1139 }
1140 
1141 
1142 
alloc_balloon_pages(struct hv_dynmem_device * dm,unsigned int num_pages,struct dm_balloon_response * bl_resp,int alloc_unit)1143 static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm,
1144 					unsigned int num_pages,
1145 					struct dm_balloon_response *bl_resp,
1146 					int alloc_unit)
1147 {
1148 	unsigned int i = 0;
1149 	struct page *pg;
1150 
1151 	if (num_pages < alloc_unit)
1152 		return 0;
1153 
1154 	for (i = 0; (i * alloc_unit) < num_pages; i++) {
1155 		if (bl_resp->hdr.size + sizeof(union dm_mem_page_range) >
1156 			PAGE_SIZE)
1157 			return i * alloc_unit;
1158 
1159 		/*
1160 		 * We execute this code in a thread context. Furthermore,
1161 		 * we don't want the kernel to try too hard.
1162 		 */
1163 		pg = alloc_pages(GFP_HIGHUSER | __GFP_NORETRY |
1164 				__GFP_NOMEMALLOC | __GFP_NOWARN,
1165 				get_order(alloc_unit << PAGE_SHIFT));
1166 
1167 		if (!pg)
1168 			return i * alloc_unit;
1169 
1170 		dm->num_pages_ballooned += alloc_unit;
1171 
1172 		/*
1173 		 * If we allocatted 2M pages; split them so we
1174 		 * can free them in any order we get.
1175 		 */
1176 
1177 		if (alloc_unit != 1)
1178 			split_page(pg, get_order(alloc_unit << PAGE_SHIFT));
1179 
1180 		bl_resp->range_count++;
1181 		bl_resp->range_array[i].finfo.start_page =
1182 			page_to_pfn(pg);
1183 		bl_resp->range_array[i].finfo.page_cnt = alloc_unit;
1184 		bl_resp->hdr.size += sizeof(union dm_mem_page_range);
1185 
1186 	}
1187 
1188 	return num_pages;
1189 }
1190 
1191 
1192 
balloon_up(struct work_struct * dummy)1193 static void balloon_up(struct work_struct *dummy)
1194 {
1195 	unsigned int num_pages = dm_device.balloon_wrk.num_pages;
1196 	unsigned int num_ballooned = 0;
1197 	struct dm_balloon_response *bl_resp;
1198 	int alloc_unit;
1199 	int ret;
1200 	bool done = false;
1201 	int i;
1202 	struct sysinfo val;
1203 	unsigned long floor;
1204 
1205 	/* The host balloons pages in 2M granularity. */
1206 	WARN_ON_ONCE(num_pages % PAGES_IN_2M != 0);
1207 
1208 	/*
1209 	 * We will attempt 2M allocations. However, if we fail to
1210 	 * allocate 2M chunks, we will go back to 4k allocations.
1211 	 */
1212 	alloc_unit = 512;
1213 
1214 	si_meminfo(&val);
1215 	floor = compute_balloon_floor();
1216 
1217 	/* Refuse to balloon below the floor, keep the 2M granularity. */
1218 	if (val.freeram < num_pages || val.freeram - num_pages < floor) {
1219 		num_pages = val.freeram > floor ? (val.freeram - floor) : 0;
1220 		num_pages -= num_pages % PAGES_IN_2M;
1221 	}
1222 
1223 	while (!done) {
1224 		bl_resp = (struct dm_balloon_response *)send_buffer;
1225 		memset(send_buffer, 0, PAGE_SIZE);
1226 		bl_resp->hdr.type = DM_BALLOON_RESPONSE;
1227 		bl_resp->hdr.size = sizeof(struct dm_balloon_response);
1228 		bl_resp->more_pages = 1;
1229 
1230 
1231 		num_pages -= num_ballooned;
1232 		num_ballooned = alloc_balloon_pages(&dm_device, num_pages,
1233 						    bl_resp, alloc_unit);
1234 
1235 		if (alloc_unit != 1 && num_ballooned == 0) {
1236 			alloc_unit = 1;
1237 			continue;
1238 		}
1239 
1240 		if (num_ballooned == 0 || num_ballooned == num_pages) {
1241 			bl_resp->more_pages = 0;
1242 			done = true;
1243 			dm_device.state = DM_INITIALIZED;
1244 		}
1245 
1246 		/*
1247 		 * We are pushing a lot of data through the channel;
1248 		 * deal with transient failures caused because of the
1249 		 * lack of space in the ring buffer.
1250 		 */
1251 
1252 		do {
1253 			bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
1254 			ret = vmbus_sendpacket(dm_device.dev->channel,
1255 						bl_resp,
1256 						bl_resp->hdr.size,
1257 						(unsigned long)NULL,
1258 						VM_PKT_DATA_INBAND, 0);
1259 
1260 			if (ret == -EAGAIN)
1261 				msleep(20);
1262 			post_status(&dm_device);
1263 		} while (ret == -EAGAIN);
1264 
1265 		if (ret) {
1266 			/*
1267 			 * Free up the memory we allocatted.
1268 			 */
1269 			pr_info("Balloon response failed\n");
1270 
1271 			for (i = 0; i < bl_resp->range_count; i++)
1272 				free_balloon_pages(&dm_device,
1273 						 &bl_resp->range_array[i]);
1274 
1275 			done = true;
1276 		}
1277 	}
1278 
1279 }
1280 
balloon_down(struct hv_dynmem_device * dm,struct dm_unballoon_request * req)1281 static void balloon_down(struct hv_dynmem_device *dm,
1282 			struct dm_unballoon_request *req)
1283 {
1284 	union dm_mem_page_range *range_array = req->range_array;
1285 	int range_count = req->range_count;
1286 	struct dm_unballoon_response resp;
1287 	int i;
1288 
1289 	for (i = 0; i < range_count; i++) {
1290 		free_balloon_pages(dm, &range_array[i]);
1291 		complete(&dm_device.config_event);
1292 	}
1293 
1294 	if (req->more_pages == 1)
1295 		return;
1296 
1297 	memset(&resp, 0, sizeof(struct dm_unballoon_response));
1298 	resp.hdr.type = DM_UNBALLOON_RESPONSE;
1299 	resp.hdr.trans_id = atomic_inc_return(&trans_id);
1300 	resp.hdr.size = sizeof(struct dm_unballoon_response);
1301 
1302 	vmbus_sendpacket(dm_device.dev->channel, &resp,
1303 				sizeof(struct dm_unballoon_response),
1304 				(unsigned long)NULL,
1305 				VM_PKT_DATA_INBAND, 0);
1306 
1307 	dm->state = DM_INITIALIZED;
1308 }
1309 
1310 static void balloon_onchannelcallback(void *context);
1311 
dm_thread_func(void * dm_dev)1312 static int dm_thread_func(void *dm_dev)
1313 {
1314 	struct hv_dynmem_device *dm = dm_dev;
1315 
1316 	while (!kthread_should_stop()) {
1317 		wait_for_completion_interruptible_timeout(
1318 						&dm_device.config_event, 1*HZ);
1319 		/*
1320 		 * The host expects us to post information on the memory
1321 		 * pressure every second.
1322 		 */
1323 		reinit_completion(&dm_device.config_event);
1324 		post_status(dm);
1325 	}
1326 
1327 	return 0;
1328 }
1329 
1330 
version_resp(struct hv_dynmem_device * dm,struct dm_version_response * vresp)1331 static void version_resp(struct hv_dynmem_device *dm,
1332 			struct dm_version_response *vresp)
1333 {
1334 	struct dm_version_request version_req;
1335 	int ret;
1336 
1337 	if (vresp->is_accepted) {
1338 		/*
1339 		 * We are done; wakeup the
1340 		 * context waiting for version
1341 		 * negotiation.
1342 		 */
1343 		complete(&dm->host_event);
1344 		return;
1345 	}
1346 	/*
1347 	 * If there are more versions to try, continue
1348 	 * with negotiations; if not
1349 	 * shutdown the service since we are not able
1350 	 * to negotiate a suitable version number
1351 	 * with the host.
1352 	 */
1353 	if (dm->next_version == 0)
1354 		goto version_error;
1355 
1356 	memset(&version_req, 0, sizeof(struct dm_version_request));
1357 	version_req.hdr.type = DM_VERSION_REQUEST;
1358 	version_req.hdr.size = sizeof(struct dm_version_request);
1359 	version_req.hdr.trans_id = atomic_inc_return(&trans_id);
1360 	version_req.version.version = dm->next_version;
1361 
1362 	/*
1363 	 * Set the next version to try in case current version fails.
1364 	 * Win7 protocol ought to be the last one to try.
1365 	 */
1366 	switch (version_req.version.version) {
1367 	case DYNMEM_PROTOCOL_VERSION_WIN8:
1368 		dm->next_version = DYNMEM_PROTOCOL_VERSION_WIN7;
1369 		version_req.is_last_attempt = 0;
1370 		break;
1371 	default:
1372 		dm->next_version = 0;
1373 		version_req.is_last_attempt = 1;
1374 	}
1375 
1376 	ret = vmbus_sendpacket(dm->dev->channel, &version_req,
1377 				sizeof(struct dm_version_request),
1378 				(unsigned long)NULL,
1379 				VM_PKT_DATA_INBAND, 0);
1380 
1381 	if (ret)
1382 		goto version_error;
1383 
1384 	return;
1385 
1386 version_error:
1387 	dm->state = DM_INIT_ERROR;
1388 	complete(&dm->host_event);
1389 }
1390 
cap_resp(struct hv_dynmem_device * dm,struct dm_capabilities_resp_msg * cap_resp)1391 static void cap_resp(struct hv_dynmem_device *dm,
1392 			struct dm_capabilities_resp_msg *cap_resp)
1393 {
1394 	if (!cap_resp->is_accepted) {
1395 		pr_info("Capabilities not accepted by host\n");
1396 		dm->state = DM_INIT_ERROR;
1397 	}
1398 	complete(&dm->host_event);
1399 }
1400 
balloon_onchannelcallback(void * context)1401 static void balloon_onchannelcallback(void *context)
1402 {
1403 	struct hv_device *dev = context;
1404 	u32 recvlen;
1405 	u64 requestid;
1406 	struct dm_message *dm_msg;
1407 	struct dm_header *dm_hdr;
1408 	struct hv_dynmem_device *dm = hv_get_drvdata(dev);
1409 	struct dm_balloon *bal_msg;
1410 	struct dm_hot_add *ha_msg;
1411 	union dm_mem_page_range *ha_pg_range;
1412 	union dm_mem_page_range *ha_region;
1413 
1414 	memset(recv_buffer, 0, sizeof(recv_buffer));
1415 	vmbus_recvpacket(dev->channel, recv_buffer,
1416 			 PAGE_SIZE, &recvlen, &requestid);
1417 
1418 	if (recvlen > 0) {
1419 		dm_msg = (struct dm_message *)recv_buffer;
1420 		dm_hdr = &dm_msg->hdr;
1421 
1422 		switch (dm_hdr->type) {
1423 		case DM_VERSION_RESPONSE:
1424 			version_resp(dm,
1425 				 (struct dm_version_response *)dm_msg);
1426 			break;
1427 
1428 		case DM_CAPABILITIES_RESPONSE:
1429 			cap_resp(dm,
1430 				 (struct dm_capabilities_resp_msg *)dm_msg);
1431 			break;
1432 
1433 		case DM_BALLOON_REQUEST:
1434 			if (dm->state == DM_BALLOON_UP)
1435 				pr_warn("Currently ballooning\n");
1436 			bal_msg = (struct dm_balloon *)recv_buffer;
1437 			dm->state = DM_BALLOON_UP;
1438 			dm_device.balloon_wrk.num_pages = bal_msg->num_pages;
1439 			schedule_work(&dm_device.balloon_wrk.wrk);
1440 			break;
1441 
1442 		case DM_UNBALLOON_REQUEST:
1443 			dm->state = DM_BALLOON_DOWN;
1444 			balloon_down(dm,
1445 				 (struct dm_unballoon_request *)recv_buffer);
1446 			break;
1447 
1448 		case DM_MEM_HOT_ADD_REQUEST:
1449 			if (dm->state == DM_HOT_ADD)
1450 				pr_warn("Currently hot-adding\n");
1451 			dm->state = DM_HOT_ADD;
1452 			ha_msg = (struct dm_hot_add *)recv_buffer;
1453 			if (ha_msg->hdr.size == sizeof(struct dm_hot_add)) {
1454 				/*
1455 				 * This is a normal hot-add request specifying
1456 				 * hot-add memory.
1457 				 */
1458 				ha_pg_range = &ha_msg->range;
1459 				dm->ha_wrk.ha_page_range = *ha_pg_range;
1460 				dm->ha_wrk.ha_region_range.page_range = 0;
1461 			} else {
1462 				/*
1463 				 * Host is specifying that we first hot-add
1464 				 * a region and then partially populate this
1465 				 * region.
1466 				 */
1467 				dm->host_specified_ha_region = true;
1468 				ha_pg_range = &ha_msg->range;
1469 				ha_region = &ha_pg_range[1];
1470 				dm->ha_wrk.ha_page_range = *ha_pg_range;
1471 				dm->ha_wrk.ha_region_range = *ha_region;
1472 			}
1473 			schedule_work(&dm_device.ha_wrk.wrk);
1474 			break;
1475 
1476 		case DM_INFO_MESSAGE:
1477 			process_info(dm, (struct dm_info_msg *)dm_msg);
1478 			break;
1479 
1480 		default:
1481 			pr_err("Unhandled message: type: %d\n", dm_hdr->type);
1482 
1483 		}
1484 	}
1485 
1486 }
1487 
balloon_probe(struct hv_device * dev,const struct hv_vmbus_device_id * dev_id)1488 static int balloon_probe(struct hv_device *dev,
1489 			const struct hv_vmbus_device_id *dev_id)
1490 {
1491 	int ret;
1492 	unsigned long t;
1493 	struct dm_version_request version_req;
1494 	struct dm_capabilities cap_msg;
1495 
1496 	do_hot_add = hot_add;
1497 
1498 	/*
1499 	 * First allocate a send buffer.
1500 	 */
1501 
1502 	send_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
1503 	if (!send_buffer)
1504 		return -ENOMEM;
1505 
1506 	ret = vmbus_open(dev->channel, dm_ring_size, dm_ring_size, NULL, 0,
1507 			balloon_onchannelcallback, dev);
1508 
1509 	if (ret)
1510 		goto probe_error0;
1511 
1512 	dm_device.dev = dev;
1513 	dm_device.state = DM_INITIALIZING;
1514 	dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN8;
1515 	init_completion(&dm_device.host_event);
1516 	init_completion(&dm_device.config_event);
1517 	INIT_LIST_HEAD(&dm_device.ha_region_list);
1518 	mutex_init(&dm_device.ha_region_mutex);
1519 	INIT_WORK(&dm_device.balloon_wrk.wrk, balloon_up);
1520 	INIT_WORK(&dm_device.ha_wrk.wrk, hot_add_req);
1521 	dm_device.host_specified_ha_region = false;
1522 
1523 	dm_device.thread =
1524 		 kthread_run(dm_thread_func, &dm_device, "hv_balloon");
1525 	if (IS_ERR(dm_device.thread)) {
1526 		ret = PTR_ERR(dm_device.thread);
1527 		goto probe_error1;
1528 	}
1529 
1530 #ifdef CONFIG_MEMORY_HOTPLUG
1531 	set_online_page_callback(&hv_online_page);
1532 	register_memory_notifier(&hv_memory_nb);
1533 #endif
1534 
1535 	hv_set_drvdata(dev, &dm_device);
1536 	/*
1537 	 * Initiate the hand shake with the host and negotiate
1538 	 * a version that the host can support. We start with the
1539 	 * highest version number and go down if the host cannot
1540 	 * support it.
1541 	 */
1542 	memset(&version_req, 0, sizeof(struct dm_version_request));
1543 	version_req.hdr.type = DM_VERSION_REQUEST;
1544 	version_req.hdr.size = sizeof(struct dm_version_request);
1545 	version_req.hdr.trans_id = atomic_inc_return(&trans_id);
1546 	version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN10;
1547 	version_req.is_last_attempt = 0;
1548 
1549 	ret = vmbus_sendpacket(dev->channel, &version_req,
1550 				sizeof(struct dm_version_request),
1551 				(unsigned long)NULL,
1552 				VM_PKT_DATA_INBAND, 0);
1553 	if (ret)
1554 		goto probe_error2;
1555 
1556 	t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
1557 	if (t == 0) {
1558 		ret = -ETIMEDOUT;
1559 		goto probe_error2;
1560 	}
1561 
1562 	/*
1563 	 * If we could not negotiate a compatible version with the host
1564 	 * fail the probe function.
1565 	 */
1566 	if (dm_device.state == DM_INIT_ERROR) {
1567 		ret = -ETIMEDOUT;
1568 		goto probe_error2;
1569 	}
1570 	/*
1571 	 * Now submit our capabilities to the host.
1572 	 */
1573 	memset(&cap_msg, 0, sizeof(struct dm_capabilities));
1574 	cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
1575 	cap_msg.hdr.size = sizeof(struct dm_capabilities);
1576 	cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
1577 
1578 	cap_msg.caps.cap_bits.balloon = 1;
1579 	cap_msg.caps.cap_bits.hot_add = 1;
1580 
1581 	/*
1582 	 * Specify our alignment requirements as it relates
1583 	 * memory hot-add. Specify 128MB alignment.
1584 	 */
1585 	cap_msg.caps.cap_bits.hot_add_alignment = 7;
1586 
1587 	/*
1588 	 * Currently the host does not use these
1589 	 * values and we set them to what is done in the
1590 	 * Windows driver.
1591 	 */
1592 	cap_msg.min_page_cnt = 0;
1593 	cap_msg.max_page_number = -1;
1594 
1595 	ret = vmbus_sendpacket(dev->channel, &cap_msg,
1596 				sizeof(struct dm_capabilities),
1597 				(unsigned long)NULL,
1598 				VM_PKT_DATA_INBAND, 0);
1599 	if (ret)
1600 		goto probe_error2;
1601 
1602 	t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
1603 	if (t == 0) {
1604 		ret = -ETIMEDOUT;
1605 		goto probe_error2;
1606 	}
1607 
1608 	/*
1609 	 * If the host does not like our capabilities,
1610 	 * fail the probe function.
1611 	 */
1612 	if (dm_device.state == DM_INIT_ERROR) {
1613 		ret = -ETIMEDOUT;
1614 		goto probe_error2;
1615 	}
1616 
1617 	dm_device.state = DM_INITIALIZED;
1618 
1619 	return 0;
1620 
1621 probe_error2:
1622 #ifdef CONFIG_MEMORY_HOTPLUG
1623 	restore_online_page_callback(&hv_online_page);
1624 #endif
1625 	kthread_stop(dm_device.thread);
1626 
1627 probe_error1:
1628 	vmbus_close(dev->channel);
1629 probe_error0:
1630 	kfree(send_buffer);
1631 	return ret;
1632 }
1633 
balloon_remove(struct hv_device * dev)1634 static int balloon_remove(struct hv_device *dev)
1635 {
1636 	struct hv_dynmem_device *dm = hv_get_drvdata(dev);
1637 	struct list_head *cur, *tmp;
1638 	struct hv_hotadd_state *has;
1639 	struct hv_hotadd_gap *gap, *tmp_gap;
1640 
1641 	if (dm->num_pages_ballooned != 0)
1642 		pr_warn("Ballooned pages: %d\n", dm->num_pages_ballooned);
1643 
1644 	cancel_work_sync(&dm->balloon_wrk.wrk);
1645 	cancel_work_sync(&dm->ha_wrk.wrk);
1646 
1647 	vmbus_close(dev->channel);
1648 	kthread_stop(dm->thread);
1649 	kfree(send_buffer);
1650 #ifdef CONFIG_MEMORY_HOTPLUG
1651 	restore_online_page_callback(&hv_online_page);
1652 	unregister_memory_notifier(&hv_memory_nb);
1653 #endif
1654 	list_for_each_safe(cur, tmp, &dm->ha_region_list) {
1655 		has = list_entry(cur, struct hv_hotadd_state, list);
1656 		list_for_each_entry_safe(gap, tmp_gap, &has->gap_list, list) {
1657 			list_del(&gap->list);
1658 			kfree(gap);
1659 		}
1660 		list_del(&has->list);
1661 		kfree(has);
1662 	}
1663 
1664 	return 0;
1665 }
1666 
1667 static const struct hv_vmbus_device_id id_table[] = {
1668 	/* Dynamic Memory Class ID */
1669 	/* 525074DC-8985-46e2-8057-A307DC18A502 */
1670 	{ HV_DM_GUID, },
1671 	{ },
1672 };
1673 
1674 MODULE_DEVICE_TABLE(vmbus, id_table);
1675 
1676 static  struct hv_driver balloon_drv = {
1677 	.name = "hv_balloon",
1678 	.id_table = id_table,
1679 	.probe =  balloon_probe,
1680 	.remove =  balloon_remove,
1681 };
1682 
init_balloon_drv(void)1683 static int __init init_balloon_drv(void)
1684 {
1685 
1686 	return vmbus_driver_register(&balloon_drv);
1687 }
1688 
1689 module_init(init_balloon_drv);
1690 
1691 MODULE_DESCRIPTION("Hyper-V Balloon");
1692 MODULE_LICENSE("GPL");
1693