• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * VMware Balloon driver.
4  *
5  * Copyright (C) 2000-2018, VMware, Inc. All Rights Reserved.
6  *
7  * This is VMware physical memory management driver for Linux. The driver
8  * acts like a "balloon" that can be inflated to reclaim physical pages by
9  * reserving them in the guest and invalidating them in the monitor,
10  * freeing up the underlying machine pages so they can be allocated to
11  * other guests.  The balloon can also be deflated to allow the guest to
12  * use more physical memory. Higher level policies can control the sizes
13  * of balloons in VMs in order to manage physical memory resources.
14  */
15 
16 //#define DEBUG
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 
19 #include <linux/types.h>
20 #include <linux/io.h>
21 #include <linux/kernel.h>
22 #include <linux/mm.h>
23 #include <linux/vmalloc.h>
24 #include <linux/sched.h>
25 #include <linux/module.h>
26 #include <linux/workqueue.h>
27 #include <linux/debugfs.h>
28 #include <linux/seq_file.h>
29 #include <linux/rwsem.h>
30 #include <linux/slab.h>
31 #include <linux/spinlock.h>
32 #include <linux/mount.h>
33 #include <linux/pseudo_fs.h>
34 #include <linux/balloon_compaction.h>
35 #include <linux/vmw_vmci_defs.h>
36 #include <linux/vmw_vmci_api.h>
37 #include <asm/hypervisor.h>
38 
39 MODULE_AUTHOR("VMware, Inc.");
40 MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
41 MODULE_ALIAS("dmi:*:svnVMware*:*");
42 MODULE_ALIAS("vmware_vmmemctl");
43 MODULE_LICENSE("GPL");
44 
45 static bool __read_mostly vmwballoon_shrinker_enable;
46 module_param(vmwballoon_shrinker_enable, bool, 0444);
47 MODULE_PARM_DESC(vmwballoon_shrinker_enable,
48 	"Enable non-cooperative out-of-memory protection. Disabled by default as it may degrade performance.");
49 
50 /* Delay in seconds after shrink before inflation. */
51 #define VMBALLOON_SHRINK_DELAY		(5)
52 
53 /* Maximum number of refused pages we accumulate during inflation cycle */
54 #define VMW_BALLOON_MAX_REFUSED		16
55 
56 /* Magic number for the balloon mount-point */
57 #define BALLOON_VMW_MAGIC		0x0ba11007
58 
59 /*
60  * Hypervisor communication port definitions.
61  */
62 #define VMW_BALLOON_HV_PORT		0x5670
63 #define VMW_BALLOON_HV_MAGIC		0x456c6d6f
64 #define VMW_BALLOON_GUEST_ID		1	/* Linux */
65 
66 enum vmwballoon_capabilities {
67 	/*
68 	 * Bit 0 is reserved and not associated to any capability.
69 	 */
70 	VMW_BALLOON_BASIC_CMDS			= (1 << 1),
71 	VMW_BALLOON_BATCHED_CMDS		= (1 << 2),
72 	VMW_BALLOON_BATCHED_2M_CMDS		= (1 << 3),
73 	VMW_BALLOON_SIGNALLED_WAKEUP_CMD	= (1 << 4),
74 	VMW_BALLOON_64_BIT_TARGET		= (1 << 5)
75 };
76 
77 #define VMW_BALLOON_CAPABILITIES_COMMON	(VMW_BALLOON_BASIC_CMDS \
78 					| VMW_BALLOON_BATCHED_CMDS \
79 					| VMW_BALLOON_BATCHED_2M_CMDS \
80 					| VMW_BALLOON_SIGNALLED_WAKEUP_CMD)
81 
82 #define VMW_BALLOON_2M_ORDER		(PMD_SHIFT - PAGE_SHIFT)
83 
84 /*
85  * 64-bit targets are only supported in 64-bit
86  */
87 #ifdef CONFIG_64BIT
88 #define VMW_BALLOON_CAPABILITIES	(VMW_BALLOON_CAPABILITIES_COMMON \
89 					| VMW_BALLOON_64_BIT_TARGET)
90 #else
91 #define VMW_BALLOON_CAPABILITIES	VMW_BALLOON_CAPABILITIES_COMMON
92 #endif
93 
94 enum vmballoon_page_size_type {
95 	VMW_BALLOON_4K_PAGE,
96 	VMW_BALLOON_2M_PAGE,
97 	VMW_BALLOON_LAST_SIZE = VMW_BALLOON_2M_PAGE
98 };
99 
100 #define VMW_BALLOON_NUM_PAGE_SIZES	(VMW_BALLOON_LAST_SIZE + 1)
101 
102 static const char * const vmballoon_page_size_names[] = {
103 	[VMW_BALLOON_4K_PAGE]			= "4k",
104 	[VMW_BALLOON_2M_PAGE]			= "2M"
105 };
106 
107 enum vmballoon_op {
108 	VMW_BALLOON_INFLATE,
109 	VMW_BALLOON_DEFLATE
110 };
111 
112 enum vmballoon_op_stat_type {
113 	VMW_BALLOON_OP_STAT,
114 	VMW_BALLOON_OP_FAIL_STAT
115 };
116 
117 #define VMW_BALLOON_OP_STAT_TYPES	(VMW_BALLOON_OP_FAIL_STAT + 1)
118 
119 /**
120  * enum vmballoon_cmd_type - backdoor commands.
121  *
122  * Availability of the commands is as followed:
123  *
124  * %VMW_BALLOON_CMD_START, %VMW_BALLOON_CMD_GET_TARGET and
125  * %VMW_BALLOON_CMD_GUEST_ID are always available.
126  *
127  * If the host reports %VMW_BALLOON_BASIC_CMDS are supported then
128  * %VMW_BALLOON_CMD_LOCK and %VMW_BALLOON_CMD_UNLOCK commands are available.
129  *
130  * If the host reports %VMW_BALLOON_BATCHED_CMDS are supported then
131  * %VMW_BALLOON_CMD_BATCHED_LOCK and VMW_BALLOON_CMD_BATCHED_UNLOCK commands
132  * are available.
133  *
134  * If the host reports %VMW_BALLOON_BATCHED_2M_CMDS are supported then
135  * %VMW_BALLOON_CMD_BATCHED_2M_LOCK and %VMW_BALLOON_CMD_BATCHED_2M_UNLOCK
136  * are supported.
137  *
138  * If the host reports  VMW_BALLOON_SIGNALLED_WAKEUP_CMD is supported then
139  * VMW_BALLOON_CMD_VMCI_DOORBELL_SET command is supported.
140  *
141  * @VMW_BALLOON_CMD_START: Communicating supported version with the hypervisor.
142  * @VMW_BALLOON_CMD_GET_TARGET: Gets the balloon target size.
143  * @VMW_BALLOON_CMD_LOCK: Informs the hypervisor about a ballooned page.
144  * @VMW_BALLOON_CMD_UNLOCK: Informs the hypervisor about a page that is about
145  *			    to be deflated from the balloon.
146  * @VMW_BALLOON_CMD_GUEST_ID: Informs the hypervisor about the type of OS that
147  *			      runs in the VM.
148  * @VMW_BALLOON_CMD_BATCHED_LOCK: Inform the hypervisor about a batch of
149  *				  ballooned pages (up to 512).
150  * @VMW_BALLOON_CMD_BATCHED_UNLOCK: Inform the hypervisor about a batch of
151  *				  pages that are about to be deflated from the
152  *				  balloon (up to 512).
153  * @VMW_BALLOON_CMD_BATCHED_2M_LOCK: Similar to @VMW_BALLOON_CMD_BATCHED_LOCK
154  *				     for 2MB pages.
155  * @VMW_BALLOON_CMD_BATCHED_2M_UNLOCK: Similar to
156  *				       @VMW_BALLOON_CMD_BATCHED_UNLOCK for 2MB
157  *				       pages.
158  * @VMW_BALLOON_CMD_VMCI_DOORBELL_SET: A command to set doorbell notification
159  *				       that would be invoked when the balloon
160  *				       size changes.
161  * @VMW_BALLOON_CMD_LAST: Value of the last command.
162  */
163 enum vmballoon_cmd_type {
164 	VMW_BALLOON_CMD_START,
165 	VMW_BALLOON_CMD_GET_TARGET,
166 	VMW_BALLOON_CMD_LOCK,
167 	VMW_BALLOON_CMD_UNLOCK,
168 	VMW_BALLOON_CMD_GUEST_ID,
169 	/* No command 5 */
170 	VMW_BALLOON_CMD_BATCHED_LOCK = 6,
171 	VMW_BALLOON_CMD_BATCHED_UNLOCK,
172 	VMW_BALLOON_CMD_BATCHED_2M_LOCK,
173 	VMW_BALLOON_CMD_BATCHED_2M_UNLOCK,
174 	VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
175 	VMW_BALLOON_CMD_LAST = VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
176 };
177 
178 #define VMW_BALLOON_CMD_NUM	(VMW_BALLOON_CMD_LAST + 1)
179 
180 enum vmballoon_error_codes {
181 	VMW_BALLOON_SUCCESS,
182 	VMW_BALLOON_ERROR_CMD_INVALID,
183 	VMW_BALLOON_ERROR_PPN_INVALID,
184 	VMW_BALLOON_ERROR_PPN_LOCKED,
185 	VMW_BALLOON_ERROR_PPN_UNLOCKED,
186 	VMW_BALLOON_ERROR_PPN_PINNED,
187 	VMW_BALLOON_ERROR_PPN_NOTNEEDED,
188 	VMW_BALLOON_ERROR_RESET,
189 	VMW_BALLOON_ERROR_BUSY
190 };
191 
192 #define VMW_BALLOON_SUCCESS_WITH_CAPABILITIES	(0x03000000)
193 
194 #define VMW_BALLOON_CMD_WITH_TARGET_MASK			\
195 	((1UL << VMW_BALLOON_CMD_GET_TARGET)		|	\
196 	 (1UL << VMW_BALLOON_CMD_LOCK)			|	\
197 	 (1UL << VMW_BALLOON_CMD_UNLOCK)		|	\
198 	 (1UL << VMW_BALLOON_CMD_BATCHED_LOCK)		|	\
199 	 (1UL << VMW_BALLOON_CMD_BATCHED_UNLOCK)	|	\
200 	 (1UL << VMW_BALLOON_CMD_BATCHED_2M_LOCK)	|	\
201 	 (1UL << VMW_BALLOON_CMD_BATCHED_2M_UNLOCK))
202 
203 static const char * const vmballoon_cmd_names[] = {
204 	[VMW_BALLOON_CMD_START]			= "start",
205 	[VMW_BALLOON_CMD_GET_TARGET]		= "target",
206 	[VMW_BALLOON_CMD_LOCK]			= "lock",
207 	[VMW_BALLOON_CMD_UNLOCK]		= "unlock",
208 	[VMW_BALLOON_CMD_GUEST_ID]		= "guestType",
209 	[VMW_BALLOON_CMD_BATCHED_LOCK]		= "batchLock",
210 	[VMW_BALLOON_CMD_BATCHED_UNLOCK]	= "batchUnlock",
211 	[VMW_BALLOON_CMD_BATCHED_2M_LOCK]	= "2m-lock",
212 	[VMW_BALLOON_CMD_BATCHED_2M_UNLOCK]	= "2m-unlock",
213 	[VMW_BALLOON_CMD_VMCI_DOORBELL_SET]	= "doorbellSet"
214 };
215 
216 enum vmballoon_stat_page {
217 	VMW_BALLOON_PAGE_STAT_ALLOC,
218 	VMW_BALLOON_PAGE_STAT_ALLOC_FAIL,
219 	VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC,
220 	VMW_BALLOON_PAGE_STAT_REFUSED_FREE,
221 	VMW_BALLOON_PAGE_STAT_FREE,
222 	VMW_BALLOON_PAGE_STAT_LAST = VMW_BALLOON_PAGE_STAT_FREE
223 };
224 
225 #define VMW_BALLOON_PAGE_STAT_NUM	(VMW_BALLOON_PAGE_STAT_LAST + 1)
226 
227 enum vmballoon_stat_general {
228 	VMW_BALLOON_STAT_TIMER,
229 	VMW_BALLOON_STAT_DOORBELL,
230 	VMW_BALLOON_STAT_RESET,
231 	VMW_BALLOON_STAT_SHRINK,
232 	VMW_BALLOON_STAT_SHRINK_FREE,
233 	VMW_BALLOON_STAT_LAST = VMW_BALLOON_STAT_SHRINK_FREE
234 };
235 
236 #define VMW_BALLOON_STAT_NUM		(VMW_BALLOON_STAT_LAST + 1)
237 
238 static DEFINE_STATIC_KEY_TRUE(vmw_balloon_batching);
239 static DEFINE_STATIC_KEY_FALSE(balloon_stat_enabled);
240 
241 struct vmballoon_ctl {
242 	struct list_head pages;
243 	struct list_head refused_pages;
244 	struct list_head prealloc_pages;
245 	unsigned int n_refused_pages;
246 	unsigned int n_pages;
247 	enum vmballoon_page_size_type page_size;
248 	enum vmballoon_op op;
249 };
250 
251 /**
252  * struct vmballoon_batch_entry - a batch entry for lock or unlock.
253  *
254  * @status: the status of the operation, which is written by the hypervisor.
255  * @reserved: reserved for future use. Must be set to zero.
256  * @pfn: the physical frame number of the page to be locked or unlocked.
257  */
258 struct vmballoon_batch_entry {
259 	u64 status : 5;
260 	u64 reserved : PAGE_SHIFT - 5;
261 	u64 pfn : 52;
262 } __packed;
263 
264 struct vmballoon {
265 	/**
266 	 * @max_page_size: maximum supported page size for ballooning.
267 	 *
268 	 * Protected by @conf_sem
269 	 */
270 	enum vmballoon_page_size_type max_page_size;
271 
272 	/**
273 	 * @size: balloon actual size in basic page size (frames).
274 	 *
275 	 * While we currently do not support size which is bigger than 32-bit,
276 	 * in preparation for future support, use 64-bits.
277 	 */
278 	atomic64_t size;
279 
280 	/**
281 	 * @target: balloon target size in basic page size (frames).
282 	 *
283 	 * We do not protect the target under the assumption that setting the
284 	 * value is always done through a single write. If this assumption ever
285 	 * breaks, we would have to use X_ONCE for accesses, and suffer the less
286 	 * optimized code. Although we may read stale target value if multiple
287 	 * accesses happen at once, the performance impact should be minor.
288 	 */
289 	unsigned long target;
290 
291 	/**
292 	 * @reset_required: reset flag
293 	 *
294 	 * Setting this flag may introduce races, but the code is expected to
295 	 * handle them gracefully. In the worst case, another operation will
296 	 * fail as reset did not take place. Clearing the flag is done while
297 	 * holding @conf_sem for write.
298 	 */
299 	bool reset_required;
300 
301 	/**
302 	 * @capabilities: hypervisor balloon capabilities.
303 	 *
304 	 * Protected by @conf_sem.
305 	 */
306 	unsigned long capabilities;
307 
308 	/**
309 	 * @batch_page: pointer to communication batch page.
310 	 *
311 	 * When batching is used, batch_page points to a page, which holds up to
312 	 * %VMW_BALLOON_BATCH_MAX_PAGES entries for locking or unlocking.
313 	 */
314 	struct vmballoon_batch_entry *batch_page;
315 
316 	/**
317 	 * @batch_max_pages: maximum pages that can be locked/unlocked.
318 	 *
319 	 * Indicates the number of pages that the hypervisor can lock or unlock
320 	 * at once, according to whether batching is enabled. If batching is
321 	 * disabled, only a single page can be locked/unlock on each operation.
322 	 *
323 	 * Protected by @conf_sem.
324 	 */
325 	unsigned int batch_max_pages;
326 
327 	/**
328 	 * @page: page to be locked/unlocked by the hypervisor
329 	 *
330 	 * @page is only used when batching is disabled and a single page is
331 	 * reclaimed on each iteration.
332 	 *
333 	 * Protected by @comm_lock.
334 	 */
335 	struct page *page;
336 
337 	/**
338 	 * @shrink_timeout: timeout until the next inflation.
339 	 *
340 	 * After an shrink event, indicates the time in jiffies after which
341 	 * inflation is allowed again. Can be written concurrently with reads,
342 	 * so must use READ_ONCE/WRITE_ONCE when accessing.
343 	 */
344 	unsigned long shrink_timeout;
345 
346 	/* statistics */
347 	struct vmballoon_stats *stats;
348 
349 #ifdef CONFIG_DEBUG_FS
350 	/* debugfs file exporting statistics */
351 	struct dentry *dbg_entry;
352 #endif
353 
354 	/**
355 	 * @b_dev_info: balloon device information descriptor.
356 	 */
357 	struct balloon_dev_info b_dev_info;
358 
359 	struct delayed_work dwork;
360 
361 	/**
362 	 * @huge_pages - list of the inflated 2MB pages.
363 	 *
364 	 * Protected by @b_dev_info.pages_lock .
365 	 */
366 	struct list_head huge_pages;
367 
368 	/**
369 	 * @vmci_doorbell.
370 	 *
371 	 * Protected by @conf_sem.
372 	 */
373 	struct vmci_handle vmci_doorbell;
374 
375 	/**
376 	 * @conf_sem: semaphore to protect the configuration and the statistics.
377 	 */
378 	struct rw_semaphore conf_sem;
379 
380 	/**
381 	 * @comm_lock: lock to protect the communication with the host.
382 	 *
383 	 * Lock ordering: @conf_sem -> @comm_lock .
384 	 */
385 	spinlock_t comm_lock;
386 
387 	/**
388 	 * @shrinker: shrinker interface that is used to avoid over-inflation.
389 	 */
390 	struct shrinker shrinker;
391 
392 	/**
393 	 * @shrinker_registered: whether the shrinker was registered.
394 	 *
395 	 * The shrinker interface does not handle gracefully the removal of
396 	 * shrinker that was not registered before. This indication allows to
397 	 * simplify the unregistration process.
398 	 */
399 	bool shrinker_registered;
400 };
401 
402 static struct vmballoon balloon;
403 
404 struct vmballoon_stats {
405 	/* timer / doorbell operations */
406 	atomic64_t general_stat[VMW_BALLOON_STAT_NUM];
407 
408 	/* allocation statistics for huge and small pages */
409 	atomic64_t
410 	       page_stat[VMW_BALLOON_PAGE_STAT_NUM][VMW_BALLOON_NUM_PAGE_SIZES];
411 
412 	/* Monitor operations: total operations, and failures */
413 	atomic64_t ops[VMW_BALLOON_CMD_NUM][VMW_BALLOON_OP_STAT_TYPES];
414 };
415 
is_vmballoon_stats_on(void)416 static inline bool is_vmballoon_stats_on(void)
417 {
418 	return IS_ENABLED(CONFIG_DEBUG_FS) &&
419 		static_branch_unlikely(&balloon_stat_enabled);
420 }
421 
vmballoon_stats_op_inc(struct vmballoon * b,unsigned int op,enum vmballoon_op_stat_type type)422 static inline void vmballoon_stats_op_inc(struct vmballoon *b, unsigned int op,
423 					  enum vmballoon_op_stat_type type)
424 {
425 	if (is_vmballoon_stats_on())
426 		atomic64_inc(&b->stats->ops[op][type]);
427 }
428 
vmballoon_stats_gen_inc(struct vmballoon * b,enum vmballoon_stat_general stat)429 static inline void vmballoon_stats_gen_inc(struct vmballoon *b,
430 					   enum vmballoon_stat_general stat)
431 {
432 	if (is_vmballoon_stats_on())
433 		atomic64_inc(&b->stats->general_stat[stat]);
434 }
435 
vmballoon_stats_gen_add(struct vmballoon * b,enum vmballoon_stat_general stat,unsigned int val)436 static inline void vmballoon_stats_gen_add(struct vmballoon *b,
437 					   enum vmballoon_stat_general stat,
438 					   unsigned int val)
439 {
440 	if (is_vmballoon_stats_on())
441 		atomic64_add(val, &b->stats->general_stat[stat]);
442 }
443 
vmballoon_stats_page_inc(struct vmballoon * b,enum vmballoon_stat_page stat,enum vmballoon_page_size_type size)444 static inline void vmballoon_stats_page_inc(struct vmballoon *b,
445 					    enum vmballoon_stat_page stat,
446 					    enum vmballoon_page_size_type size)
447 {
448 	if (is_vmballoon_stats_on())
449 		atomic64_inc(&b->stats->page_stat[stat][size]);
450 }
451 
vmballoon_stats_page_add(struct vmballoon * b,enum vmballoon_stat_page stat,enum vmballoon_page_size_type size,unsigned int val)452 static inline void vmballoon_stats_page_add(struct vmballoon *b,
453 					    enum vmballoon_stat_page stat,
454 					    enum vmballoon_page_size_type size,
455 					    unsigned int val)
456 {
457 	if (is_vmballoon_stats_on())
458 		atomic64_add(val, &b->stats->page_stat[stat][size]);
459 }
460 
461 static inline unsigned long
__vmballoon_cmd(struct vmballoon * b,unsigned long cmd,unsigned long arg1,unsigned long arg2,unsigned long * result)462 __vmballoon_cmd(struct vmballoon *b, unsigned long cmd, unsigned long arg1,
463 		unsigned long arg2, unsigned long *result)
464 {
465 	unsigned long status, dummy1, dummy2, dummy3, local_result;
466 
467 	vmballoon_stats_op_inc(b, cmd, VMW_BALLOON_OP_STAT);
468 
469 	asm volatile ("inl %%dx" :
470 		"=a"(status),
471 		"=c"(dummy1),
472 		"=d"(dummy2),
473 		"=b"(local_result),
474 		"=S"(dummy3) :
475 		"0"(VMW_BALLOON_HV_MAGIC),
476 		"1"(cmd),
477 		"2"(VMW_BALLOON_HV_PORT),
478 		"3"(arg1),
479 		"4"(arg2) :
480 		"memory");
481 
482 	/* update the result if needed */
483 	if (result)
484 		*result = (cmd == VMW_BALLOON_CMD_START) ? dummy1 :
485 							   local_result;
486 
487 	/* update target when applicable */
488 	if (status == VMW_BALLOON_SUCCESS &&
489 	    ((1ul << cmd) & VMW_BALLOON_CMD_WITH_TARGET_MASK))
490 		WRITE_ONCE(b->target, local_result);
491 
492 	if (status != VMW_BALLOON_SUCCESS &&
493 	    status != VMW_BALLOON_SUCCESS_WITH_CAPABILITIES) {
494 		vmballoon_stats_op_inc(b, cmd, VMW_BALLOON_OP_FAIL_STAT);
495 		pr_debug("%s: %s [0x%lx,0x%lx) failed, returned %ld\n",
496 			 __func__, vmballoon_cmd_names[cmd], arg1, arg2,
497 			 status);
498 	}
499 
500 	/* mark reset required accordingly */
501 	if (status == VMW_BALLOON_ERROR_RESET)
502 		b->reset_required = true;
503 
504 	return status;
505 }
506 
507 static __always_inline unsigned long
vmballoon_cmd(struct vmballoon * b,unsigned long cmd,unsigned long arg1,unsigned long arg2)508 vmballoon_cmd(struct vmballoon *b, unsigned long cmd, unsigned long arg1,
509 	      unsigned long arg2)
510 {
511 	unsigned long dummy;
512 
513 	return __vmballoon_cmd(b, cmd, arg1, arg2, &dummy);
514 }
515 
516 /*
517  * Send "start" command to the host, communicating supported version
518  * of the protocol.
519  */
vmballoon_send_start(struct vmballoon * b,unsigned long req_caps)520 static int vmballoon_send_start(struct vmballoon *b, unsigned long req_caps)
521 {
522 	unsigned long status, capabilities;
523 
524 	status = __vmballoon_cmd(b, VMW_BALLOON_CMD_START, req_caps, 0,
525 				 &capabilities);
526 
527 	switch (status) {
528 	case VMW_BALLOON_SUCCESS_WITH_CAPABILITIES:
529 		b->capabilities = capabilities;
530 		break;
531 	case VMW_BALLOON_SUCCESS:
532 		b->capabilities = VMW_BALLOON_BASIC_CMDS;
533 		break;
534 	default:
535 		return -EIO;
536 	}
537 
538 	/*
539 	 * 2MB pages are only supported with batching. If batching is for some
540 	 * reason disabled, do not use 2MB pages, since otherwise the legacy
541 	 * mechanism is used with 2MB pages, causing a failure.
542 	 */
543 	b->max_page_size = VMW_BALLOON_4K_PAGE;
544 	if ((b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS) &&
545 	    (b->capabilities & VMW_BALLOON_BATCHED_CMDS))
546 		b->max_page_size = VMW_BALLOON_2M_PAGE;
547 
548 
549 	return 0;
550 }
551 
552 /**
553  * vmballoon_send_guest_id - communicate guest type to the host.
554  *
555  * @b: pointer to the balloon.
556  *
557  * Communicate guest type to the host so that it can adjust ballooning
558  * algorithm to the one most appropriate for the guest. This command
559  * is normally issued after sending "start" command and is part of
560  * standard reset sequence.
561  *
562  * Return: zero on success or appropriate error code.
563  */
vmballoon_send_guest_id(struct vmballoon * b)564 static int vmballoon_send_guest_id(struct vmballoon *b)
565 {
566 	unsigned long status;
567 
568 	status = vmballoon_cmd(b, VMW_BALLOON_CMD_GUEST_ID,
569 			       VMW_BALLOON_GUEST_ID, 0);
570 
571 	return status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
572 }
573 
574 /**
575  * vmballoon_page_order() - return the order of the page
576  * @page_size: the size of the page.
577  *
578  * Return: the allocation order.
579  */
580 static inline
vmballoon_page_order(enum vmballoon_page_size_type page_size)581 unsigned int vmballoon_page_order(enum vmballoon_page_size_type page_size)
582 {
583 	return page_size == VMW_BALLOON_2M_PAGE ? VMW_BALLOON_2M_ORDER : 0;
584 }
585 
586 /**
587  * vmballoon_page_in_frames() - returns the number of frames in a page.
588  * @page_size: the size of the page.
589  *
590  * Return: the number of 4k frames.
591  */
592 static inline unsigned int
vmballoon_page_in_frames(enum vmballoon_page_size_type page_size)593 vmballoon_page_in_frames(enum vmballoon_page_size_type page_size)
594 {
595 	return 1 << vmballoon_page_order(page_size);
596 }
597 
598 /**
599  * vmballoon_mark_page_offline() - mark a page as offline
600  * @page: pointer for the page.
601  * @page_size: the size of the page.
602  */
603 static void
vmballoon_mark_page_offline(struct page * page,enum vmballoon_page_size_type page_size)604 vmballoon_mark_page_offline(struct page *page,
605 			    enum vmballoon_page_size_type page_size)
606 {
607 	int i;
608 
609 	for (i = 0; i < vmballoon_page_in_frames(page_size); i++)
610 		__SetPageOffline(page + i);
611 }
612 
613 /**
614  * vmballoon_mark_page_online() - mark a page as online
615  * @page: pointer for the page.
616  * @page_size: the size of the page.
617  */
618 static void
vmballoon_mark_page_online(struct page * page,enum vmballoon_page_size_type page_size)619 vmballoon_mark_page_online(struct page *page,
620 			   enum vmballoon_page_size_type page_size)
621 {
622 	int i;
623 
624 	for (i = 0; i < vmballoon_page_in_frames(page_size); i++)
625 		__ClearPageOffline(page + i);
626 }
627 
628 /**
629  * vmballoon_send_get_target() - Retrieve desired balloon size from the host.
630  *
631  * @b: pointer to the balloon.
632  *
633  * Return: zero on success, EINVAL if limit does not fit in 32-bit, as required
634  * by the host-guest protocol and EIO if an error occurred in communicating with
635  * the host.
636  */
vmballoon_send_get_target(struct vmballoon * b)637 static int vmballoon_send_get_target(struct vmballoon *b)
638 {
639 	unsigned long status;
640 	unsigned long limit;
641 
642 	limit = totalram_pages();
643 
644 	/* Ensure limit fits in 32-bits if 64-bit targets are not supported */
645 	if (!(b->capabilities & VMW_BALLOON_64_BIT_TARGET) &&
646 	    limit != (u32)limit)
647 		return -EINVAL;
648 
649 	status = vmballoon_cmd(b, VMW_BALLOON_CMD_GET_TARGET, limit, 0);
650 
651 	return status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
652 }
653 
654 /**
655  * vmballoon_alloc_page_list - allocates a list of pages.
656  *
657  * @b: pointer to the balloon.
658  * @ctl: pointer for the %struct vmballoon_ctl, which defines the operation.
659  * @req_n_pages: the number of requested pages.
660  *
661  * Tries to allocate @req_n_pages. Add them to the list of balloon pages in
662  * @ctl.pages and updates @ctl.n_pages to reflect the number of pages.
663  *
664  * Return: zero on success or error code otherwise.
665  */
vmballoon_alloc_page_list(struct vmballoon * b,struct vmballoon_ctl * ctl,unsigned int req_n_pages)666 static int vmballoon_alloc_page_list(struct vmballoon *b,
667 				     struct vmballoon_ctl *ctl,
668 				     unsigned int req_n_pages)
669 {
670 	struct page *page;
671 	unsigned int i;
672 
673 	for (i = 0; i < req_n_pages; i++) {
674 		/*
675 		 * First check if we happen to have pages that were allocated
676 		 * before. This happens when 2MB page rejected during inflation
677 		 * by the hypervisor, and then split into 4KB pages.
678 		 */
679 		if (!list_empty(&ctl->prealloc_pages)) {
680 			page = list_first_entry(&ctl->prealloc_pages,
681 						struct page, lru);
682 			list_del(&page->lru);
683 		} else {
684 			if (ctl->page_size == VMW_BALLOON_2M_PAGE)
685 				page = alloc_pages(__GFP_HIGHMEM|__GFP_NOWARN|
686 					__GFP_NOMEMALLOC, VMW_BALLOON_2M_ORDER);
687 			else
688 				page = balloon_page_alloc();
689 
690 			vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_ALLOC,
691 						 ctl->page_size);
692 		}
693 
694 		if (page) {
695 			/* Success. Add the page to the list and continue. */
696 			list_add(&page->lru, &ctl->pages);
697 			continue;
698 		}
699 
700 		/* Allocation failed. Update statistics and stop. */
701 		vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_ALLOC_FAIL,
702 					 ctl->page_size);
703 		break;
704 	}
705 
706 	ctl->n_pages = i;
707 
708 	return req_n_pages == ctl->n_pages ? 0 : -ENOMEM;
709 }
710 
711 /**
712  * vmballoon_handle_one_result - Handle lock/unlock result for a single page.
713  *
714  * @b: pointer for %struct vmballoon.
715  * @page: pointer for the page whose result should be handled.
716  * @page_size: size of the page.
717  * @status: status of the operation as provided by the hypervisor.
718  */
vmballoon_handle_one_result(struct vmballoon * b,struct page * page,enum vmballoon_page_size_type page_size,unsigned long status)719 static int vmballoon_handle_one_result(struct vmballoon *b, struct page *page,
720 				       enum vmballoon_page_size_type page_size,
721 				       unsigned long status)
722 {
723 	/* On success do nothing. The page is already on the balloon list. */
724 	if (likely(status == VMW_BALLOON_SUCCESS))
725 		return 0;
726 
727 	pr_debug("%s: failed comm pfn %lx status %lu page_size %s\n", __func__,
728 		 page_to_pfn(page), status,
729 		 vmballoon_page_size_names[page_size]);
730 
731 	/* Error occurred */
732 	vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC,
733 				 page_size);
734 
735 	return -EIO;
736 }
737 
738 /**
739  * vmballoon_status_page - returns the status of (un)lock operation
740  *
741  * @b: pointer to the balloon.
742  * @idx: index for the page for which the operation is performed.
743  * @p: pointer to where the page struct is returned.
744  *
745  * Following a lock or unlock operation, returns the status of the operation for
746  * an individual page. Provides the page that the operation was performed on on
747  * the @page argument.
748  *
749  * Returns: The status of a lock or unlock operation for an individual page.
750  */
vmballoon_status_page(struct vmballoon * b,int idx,struct page ** p)751 static unsigned long vmballoon_status_page(struct vmballoon *b, int idx,
752 					   struct page **p)
753 {
754 	if (static_branch_likely(&vmw_balloon_batching)) {
755 		/* batching mode */
756 		*p = pfn_to_page(b->batch_page[idx].pfn);
757 		return b->batch_page[idx].status;
758 	}
759 
760 	/* non-batching mode */
761 	*p = b->page;
762 
763 	/*
764 	 * If a failure occurs, the indication will be provided in the status
765 	 * of the entire operation, which is considered before the individual
766 	 * page status. So for non-batching mode, the indication is always of
767 	 * success.
768 	 */
769 	return VMW_BALLOON_SUCCESS;
770 }
771 
772 /**
773  * vmballoon_lock_op - notifies the host about inflated/deflated pages.
774  * @b: pointer to the balloon.
775  * @num_pages: number of inflated/deflated pages.
776  * @page_size: size of the page.
777  * @op: the type of operation (lock or unlock).
778  *
779  * Notify the host about page(s) that were ballooned (or removed from the
780  * balloon) so that host can use it without fear that guest will need it (or
781  * stop using them since the VM does). Host may reject some pages, we need to
782  * check the return value and maybe submit a different page. The pages that are
783  * inflated/deflated are pointed by @b->page.
784  *
785  * Return: result as provided by the hypervisor.
786  */
vmballoon_lock_op(struct vmballoon * b,unsigned int num_pages,enum vmballoon_page_size_type page_size,enum vmballoon_op op)787 static unsigned long vmballoon_lock_op(struct vmballoon *b,
788 				       unsigned int num_pages,
789 				       enum vmballoon_page_size_type page_size,
790 				       enum vmballoon_op op)
791 {
792 	unsigned long cmd, pfn;
793 
794 	lockdep_assert_held(&b->comm_lock);
795 
796 	if (static_branch_likely(&vmw_balloon_batching)) {
797 		if (op == VMW_BALLOON_INFLATE)
798 			cmd = page_size == VMW_BALLOON_2M_PAGE ?
799 				VMW_BALLOON_CMD_BATCHED_2M_LOCK :
800 				VMW_BALLOON_CMD_BATCHED_LOCK;
801 		else
802 			cmd = page_size == VMW_BALLOON_2M_PAGE ?
803 				VMW_BALLOON_CMD_BATCHED_2M_UNLOCK :
804 				VMW_BALLOON_CMD_BATCHED_UNLOCK;
805 
806 		pfn = PHYS_PFN(virt_to_phys(b->batch_page));
807 	} else {
808 		cmd = op == VMW_BALLOON_INFLATE ? VMW_BALLOON_CMD_LOCK :
809 						  VMW_BALLOON_CMD_UNLOCK;
810 		pfn = page_to_pfn(b->page);
811 
812 		/* In non-batching mode, PFNs must fit in 32-bit */
813 		if (unlikely(pfn != (u32)pfn))
814 			return VMW_BALLOON_ERROR_PPN_INVALID;
815 	}
816 
817 	return vmballoon_cmd(b, cmd, pfn, num_pages);
818 }
819 
820 /**
821  * vmballoon_add_page - adds a page towards lock/unlock operation.
822  *
823  * @b: pointer to the balloon.
824  * @idx: index of the page to be ballooned in this batch.
825  * @p: pointer to the page that is about to be ballooned.
826  *
827  * Adds the page to be ballooned. Must be called while holding @comm_lock.
828  */
vmballoon_add_page(struct vmballoon * b,unsigned int idx,struct page * p)829 static void vmballoon_add_page(struct vmballoon *b, unsigned int idx,
830 			       struct page *p)
831 {
832 	lockdep_assert_held(&b->comm_lock);
833 
834 	if (static_branch_likely(&vmw_balloon_batching))
835 		b->batch_page[idx] = (struct vmballoon_batch_entry)
836 					{ .pfn = page_to_pfn(p) };
837 	else
838 		b->page = p;
839 }
840 
841 /**
842  * vmballoon_lock - lock or unlock a batch of pages.
843  *
844  * @b: pointer to the balloon.
845  * @ctl: pointer for the %struct vmballoon_ctl, which defines the operation.
846  *
847  * Notifies the host of about ballooned pages (after inflation or deflation,
848  * according to @ctl). If the host rejects the page put it on the
849  * @ctl refuse list. These refused page are then released when moving to the
850  * next size of pages.
851  *
852  * Note that we neither free any @page here nor put them back on the ballooned
853  * pages list. Instead we queue it for later processing. We do that for several
854  * reasons. First, we do not want to free the page under the lock. Second, it
855  * allows us to unify the handling of lock and unlock. In the inflate case, the
856  * caller will check if there are too many refused pages and release them.
857  * Although it is not identical to the past behavior, it should not affect
858  * performance.
859  */
vmballoon_lock(struct vmballoon * b,struct vmballoon_ctl * ctl)860 static int vmballoon_lock(struct vmballoon *b, struct vmballoon_ctl *ctl)
861 {
862 	unsigned long batch_status;
863 	struct page *page;
864 	unsigned int i, num_pages;
865 
866 	num_pages = ctl->n_pages;
867 	if (num_pages == 0)
868 		return 0;
869 
870 	/* communication with the host is done under the communication lock */
871 	spin_lock(&b->comm_lock);
872 
873 	i = 0;
874 	list_for_each_entry(page, &ctl->pages, lru)
875 		vmballoon_add_page(b, i++, page);
876 
877 	batch_status = vmballoon_lock_op(b, ctl->n_pages, ctl->page_size,
878 					 ctl->op);
879 
880 	/*
881 	 * Iterate over the pages in the provided list. Since we are changing
882 	 * @ctl->n_pages we are saving the original value in @num_pages and
883 	 * use this value to bound the loop.
884 	 */
885 	for (i = 0; i < num_pages; i++) {
886 		unsigned long status;
887 
888 		status = vmballoon_status_page(b, i, &page);
889 
890 		/*
891 		 * Failure of the whole batch overrides a single operation
892 		 * results.
893 		 */
894 		if (batch_status != VMW_BALLOON_SUCCESS)
895 			status = batch_status;
896 
897 		/* Continue if no error happened */
898 		if (!vmballoon_handle_one_result(b, page, ctl->page_size,
899 						 status))
900 			continue;
901 
902 		/*
903 		 * Error happened. Move the pages to the refused list and update
904 		 * the pages number.
905 		 */
906 		list_move(&page->lru, &ctl->refused_pages);
907 		ctl->n_pages--;
908 		ctl->n_refused_pages++;
909 	}
910 
911 	spin_unlock(&b->comm_lock);
912 
913 	return batch_status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
914 }
915 
916 /**
917  * vmballoon_release_page_list() - Releases a page list
918  *
919  * @page_list: list of pages to release.
920  * @n_pages: pointer to the number of pages.
921  * @page_size: whether the pages in the list are 2MB (or else 4KB).
922  *
923  * Releases the list of pages and zeros the number of pages.
924  */
vmballoon_release_page_list(struct list_head * page_list,int * n_pages,enum vmballoon_page_size_type page_size)925 static void vmballoon_release_page_list(struct list_head *page_list,
926 				       int *n_pages,
927 				       enum vmballoon_page_size_type page_size)
928 {
929 	struct page *page, *tmp;
930 
931 	list_for_each_entry_safe(page, tmp, page_list, lru) {
932 		list_del(&page->lru);
933 		__free_pages(page, vmballoon_page_order(page_size));
934 	}
935 
936 	if (n_pages)
937 		*n_pages = 0;
938 }
939 
940 
941 /*
942  * Release pages that were allocated while attempting to inflate the
943  * balloon but were refused by the host for one reason or another.
944  */
vmballoon_release_refused_pages(struct vmballoon * b,struct vmballoon_ctl * ctl)945 static void vmballoon_release_refused_pages(struct vmballoon *b,
946 					    struct vmballoon_ctl *ctl)
947 {
948 	vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_REFUSED_FREE,
949 				 ctl->page_size);
950 
951 	vmballoon_release_page_list(&ctl->refused_pages, &ctl->n_refused_pages,
952 				    ctl->page_size);
953 }
954 
955 /**
956  * vmballoon_change - retrieve the required balloon change
957  *
958  * @b: pointer for the balloon.
959  *
960  * Return: the required change for the balloon size. A positive number
961  * indicates inflation, a negative number indicates a deflation.
962  */
vmballoon_change(struct vmballoon * b)963 static int64_t vmballoon_change(struct vmballoon *b)
964 {
965 	int64_t size, target;
966 
967 	size = atomic64_read(&b->size);
968 	target = READ_ONCE(b->target);
969 
970 	/*
971 	 * We must cast first because of int sizes
972 	 * Otherwise we might get huge positives instead of negatives
973 	 */
974 
975 	if (b->reset_required)
976 		return 0;
977 
978 	/* consider a 2MB slack on deflate, unless the balloon is emptied */
979 	if (target < size && target != 0 &&
980 	    size - target < vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE))
981 		return 0;
982 
983 	/* If an out-of-memory recently occurred, inflation is disallowed. */
984 	if (target > size && time_before(jiffies, READ_ONCE(b->shrink_timeout)))
985 		return 0;
986 
987 	return target - size;
988 }
989 
990 /**
991  * vmballoon_enqueue_page_list() - Enqueues list of pages after inflation.
992  *
993  * @b: pointer to balloon.
994  * @pages: list of pages to enqueue.
995  * @n_pages: pointer to number of pages in list. The value is zeroed.
996  * @page_size: whether the pages are 2MB or 4KB pages.
997  *
998  * Enqueues the provides list of pages in the ballooned page list, clears the
999  * list and zeroes the number of pages that was provided.
1000  */
vmballoon_enqueue_page_list(struct vmballoon * b,struct list_head * pages,unsigned int * n_pages,enum vmballoon_page_size_type page_size)1001 static void vmballoon_enqueue_page_list(struct vmballoon *b,
1002 					struct list_head *pages,
1003 					unsigned int *n_pages,
1004 					enum vmballoon_page_size_type page_size)
1005 {
1006 	unsigned long flags;
1007 	struct page *page;
1008 
1009 	if (page_size == VMW_BALLOON_4K_PAGE) {
1010 		balloon_page_list_enqueue(&b->b_dev_info, pages);
1011 	} else {
1012 		/*
1013 		 * Keep the huge pages in a local list which is not available
1014 		 * for the balloon compaction mechanism.
1015 		 */
1016 		spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
1017 
1018 		list_for_each_entry(page, pages, lru) {
1019 			vmballoon_mark_page_offline(page, VMW_BALLOON_2M_PAGE);
1020 		}
1021 
1022 		list_splice_init(pages, &b->huge_pages);
1023 		__count_vm_events(BALLOON_INFLATE, *n_pages *
1024 				  vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE));
1025 		spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags);
1026 	}
1027 
1028 	*n_pages = 0;
1029 }
1030 
1031 /**
1032  * vmballoon_dequeue_page_list() - Dequeues page lists for deflation.
1033  *
1034  * @b: pointer to balloon.
1035  * @pages: list of pages to enqueue.
1036  * @n_pages: pointer to number of pages in list. The value is zeroed.
1037  * @page_size: whether the pages are 2MB or 4KB pages.
1038  * @n_req_pages: the number of requested pages.
1039  *
1040  * Dequeues the number of requested pages from the balloon for deflation. The
1041  * number of dequeued pages may be lower, if not enough pages in the requested
1042  * size are available.
1043  */
vmballoon_dequeue_page_list(struct vmballoon * b,struct list_head * pages,unsigned int * n_pages,enum vmballoon_page_size_type page_size,unsigned int n_req_pages)1044 static void vmballoon_dequeue_page_list(struct vmballoon *b,
1045 					struct list_head *pages,
1046 					unsigned int *n_pages,
1047 					enum vmballoon_page_size_type page_size,
1048 					unsigned int n_req_pages)
1049 {
1050 	struct page *page, *tmp;
1051 	unsigned int i = 0;
1052 	unsigned long flags;
1053 
1054 	/* In the case of 4k pages, use the compaction infrastructure */
1055 	if (page_size == VMW_BALLOON_4K_PAGE) {
1056 		*n_pages = balloon_page_list_dequeue(&b->b_dev_info, pages,
1057 						     n_req_pages);
1058 		return;
1059 	}
1060 
1061 	/* 2MB pages */
1062 	spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
1063 	list_for_each_entry_safe(page, tmp, &b->huge_pages, lru) {
1064 		vmballoon_mark_page_online(page, VMW_BALLOON_2M_PAGE);
1065 
1066 		list_move(&page->lru, pages);
1067 		if (++i == n_req_pages)
1068 			break;
1069 	}
1070 
1071 	__count_vm_events(BALLOON_DEFLATE,
1072 			  i * vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE));
1073 	spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags);
1074 	*n_pages = i;
1075 }
1076 
1077 /**
1078  * vmballoon_split_refused_pages() - Split the 2MB refused pages to 4k.
1079  *
1080  * If inflation of 2MB pages was denied by the hypervisor, it is likely to be
1081  * due to one or few 4KB pages. These 2MB pages may keep being allocated and
1082  * then being refused. To prevent this case, this function splits the refused
1083  * pages into 4KB pages and adds them into @prealloc_pages list.
1084  *
1085  * @ctl: pointer for the %struct vmballoon_ctl, which defines the operation.
1086  */
vmballoon_split_refused_pages(struct vmballoon_ctl * ctl)1087 static void vmballoon_split_refused_pages(struct vmballoon_ctl *ctl)
1088 {
1089 	struct page *page, *tmp;
1090 	unsigned int i, order;
1091 
1092 	order = vmballoon_page_order(ctl->page_size);
1093 
1094 	list_for_each_entry_safe(page, tmp, &ctl->refused_pages, lru) {
1095 		list_del(&page->lru);
1096 		split_page(page, order);
1097 		for (i = 0; i < (1 << order); i++)
1098 			list_add(&page[i].lru, &ctl->prealloc_pages);
1099 	}
1100 	ctl->n_refused_pages = 0;
1101 }
1102 
1103 /**
1104  * vmballoon_inflate() - Inflate the balloon towards its target size.
1105  *
1106  * @b: pointer to the balloon.
1107  */
vmballoon_inflate(struct vmballoon * b)1108 static void vmballoon_inflate(struct vmballoon *b)
1109 {
1110 	int64_t to_inflate_frames;
1111 	struct vmballoon_ctl ctl = {
1112 		.pages = LIST_HEAD_INIT(ctl.pages),
1113 		.refused_pages = LIST_HEAD_INIT(ctl.refused_pages),
1114 		.prealloc_pages = LIST_HEAD_INIT(ctl.prealloc_pages),
1115 		.page_size = b->max_page_size,
1116 		.op = VMW_BALLOON_INFLATE
1117 	};
1118 
1119 	while ((to_inflate_frames = vmballoon_change(b)) > 0) {
1120 		unsigned int to_inflate_pages, page_in_frames;
1121 		int alloc_error, lock_error = 0;
1122 
1123 		VM_BUG_ON(!list_empty(&ctl.pages));
1124 		VM_BUG_ON(ctl.n_pages != 0);
1125 
1126 		page_in_frames = vmballoon_page_in_frames(ctl.page_size);
1127 
1128 		to_inflate_pages = min_t(unsigned long, b->batch_max_pages,
1129 					 DIV_ROUND_UP_ULL(to_inflate_frames,
1130 							  page_in_frames));
1131 
1132 		/* Start by allocating */
1133 		alloc_error = vmballoon_alloc_page_list(b, &ctl,
1134 							to_inflate_pages);
1135 
1136 		/* Actually lock the pages by telling the hypervisor */
1137 		lock_error = vmballoon_lock(b, &ctl);
1138 
1139 		/*
1140 		 * If an error indicates that something serious went wrong,
1141 		 * stop the inflation.
1142 		 */
1143 		if (lock_error)
1144 			break;
1145 
1146 		/* Update the balloon size */
1147 		atomic64_add(ctl.n_pages * page_in_frames, &b->size);
1148 
1149 		vmballoon_enqueue_page_list(b, &ctl.pages, &ctl.n_pages,
1150 					    ctl.page_size);
1151 
1152 		/*
1153 		 * If allocation failed or the number of refused pages exceeds
1154 		 * the maximum allowed, move to the next page size.
1155 		 */
1156 		if (alloc_error ||
1157 		    ctl.n_refused_pages >= VMW_BALLOON_MAX_REFUSED) {
1158 			if (ctl.page_size == VMW_BALLOON_4K_PAGE)
1159 				break;
1160 
1161 			/*
1162 			 * Split the refused pages to 4k. This will also empty
1163 			 * the refused pages list.
1164 			 */
1165 			vmballoon_split_refused_pages(&ctl);
1166 			ctl.page_size--;
1167 		}
1168 
1169 		cond_resched();
1170 	}
1171 
1172 	/*
1173 	 * Release pages that were allocated while attempting to inflate the
1174 	 * balloon but were refused by the host for one reason or another,
1175 	 * and update the statistics.
1176 	 */
1177 	if (ctl.n_refused_pages != 0)
1178 		vmballoon_release_refused_pages(b, &ctl);
1179 
1180 	vmballoon_release_page_list(&ctl.prealloc_pages, NULL, ctl.page_size);
1181 }
1182 
1183 /**
1184  * vmballoon_deflate() - Decrease the size of the balloon.
1185  *
1186  * @b: pointer to the balloon
1187  * @n_frames: the number of frames to deflate. If zero, automatically
1188  * calculated according to the target size.
1189  * @coordinated: whether to coordinate with the host
1190  *
1191  * Decrease the size of the balloon allowing guest to use more memory.
1192  *
1193  * Return: The number of deflated frames (i.e., basic page size units)
1194  */
vmballoon_deflate(struct vmballoon * b,uint64_t n_frames,bool coordinated)1195 static unsigned long vmballoon_deflate(struct vmballoon *b, uint64_t n_frames,
1196 				       bool coordinated)
1197 {
1198 	unsigned long deflated_frames = 0;
1199 	unsigned long tried_frames = 0;
1200 	struct vmballoon_ctl ctl = {
1201 		.pages = LIST_HEAD_INIT(ctl.pages),
1202 		.refused_pages = LIST_HEAD_INIT(ctl.refused_pages),
1203 		.page_size = VMW_BALLOON_4K_PAGE,
1204 		.op = VMW_BALLOON_DEFLATE
1205 	};
1206 
1207 	/* free pages to reach target */
1208 	while (true) {
1209 		unsigned int to_deflate_pages, n_unlocked_frames;
1210 		unsigned int page_in_frames;
1211 		int64_t to_deflate_frames;
1212 		bool deflated_all;
1213 
1214 		page_in_frames = vmballoon_page_in_frames(ctl.page_size);
1215 
1216 		VM_BUG_ON(!list_empty(&ctl.pages));
1217 		VM_BUG_ON(ctl.n_pages);
1218 		VM_BUG_ON(!list_empty(&ctl.refused_pages));
1219 		VM_BUG_ON(ctl.n_refused_pages);
1220 
1221 		/*
1222 		 * If we were requested a specific number of frames, we try to
1223 		 * deflate this number of frames. Otherwise, deflation is
1224 		 * performed according to the target and balloon size.
1225 		 */
1226 		to_deflate_frames = n_frames ? n_frames - tried_frames :
1227 					       -vmballoon_change(b);
1228 
1229 		/* break if no work to do */
1230 		if (to_deflate_frames <= 0)
1231 			break;
1232 
1233 		/*
1234 		 * Calculate the number of frames based on current page size,
1235 		 * but limit the deflated frames to a single chunk
1236 		 */
1237 		to_deflate_pages = min_t(unsigned long, b->batch_max_pages,
1238 					 DIV_ROUND_UP_ULL(to_deflate_frames,
1239 							  page_in_frames));
1240 
1241 		/* First take the pages from the balloon pages. */
1242 		vmballoon_dequeue_page_list(b, &ctl.pages, &ctl.n_pages,
1243 					    ctl.page_size, to_deflate_pages);
1244 
1245 		/*
1246 		 * Before pages are moving to the refused list, count their
1247 		 * frames as frames that we tried to deflate.
1248 		 */
1249 		tried_frames += ctl.n_pages * page_in_frames;
1250 
1251 		/*
1252 		 * Unlock the pages by communicating with the hypervisor if the
1253 		 * communication is coordinated (i.e., not pop). We ignore the
1254 		 * return code. Instead we check if all the pages we manage to
1255 		 * unlock all the pages. If we failed, we will move to the next
1256 		 * page size, and would eventually try again later.
1257 		 */
1258 		if (coordinated)
1259 			vmballoon_lock(b, &ctl);
1260 
1261 		/*
1262 		 * Check if we deflated enough. We will move to the next page
1263 		 * size if we did not manage to do so. This calculation takes
1264 		 * place now, as once the pages are released, the number of
1265 		 * pages is zeroed.
1266 		 */
1267 		deflated_all = (ctl.n_pages == to_deflate_pages);
1268 
1269 		/* Update local and global counters */
1270 		n_unlocked_frames = ctl.n_pages * page_in_frames;
1271 		atomic64_sub(n_unlocked_frames, &b->size);
1272 		deflated_frames += n_unlocked_frames;
1273 
1274 		vmballoon_stats_page_add(b, VMW_BALLOON_PAGE_STAT_FREE,
1275 					 ctl.page_size, ctl.n_pages);
1276 
1277 		/* free the ballooned pages */
1278 		vmballoon_release_page_list(&ctl.pages, &ctl.n_pages,
1279 					    ctl.page_size);
1280 
1281 		/* Return the refused pages to the ballooned list. */
1282 		vmballoon_enqueue_page_list(b, &ctl.refused_pages,
1283 					    &ctl.n_refused_pages,
1284 					    ctl.page_size);
1285 
1286 		/* If we failed to unlock all the pages, move to next size. */
1287 		if (!deflated_all) {
1288 			if (ctl.page_size == b->max_page_size)
1289 				break;
1290 			ctl.page_size++;
1291 		}
1292 
1293 		cond_resched();
1294 	}
1295 
1296 	return deflated_frames;
1297 }
1298 
1299 /**
1300  * vmballoon_deinit_batching - disables batching mode.
1301  *
1302  * @b: pointer to &struct vmballoon.
1303  *
1304  * Disables batching, by deallocating the page for communication with the
1305  * hypervisor and disabling the static key to indicate that batching is off.
1306  */
vmballoon_deinit_batching(struct vmballoon * b)1307 static void vmballoon_deinit_batching(struct vmballoon *b)
1308 {
1309 	free_page((unsigned long)b->batch_page);
1310 	b->batch_page = NULL;
1311 	static_branch_disable(&vmw_balloon_batching);
1312 	b->batch_max_pages = 1;
1313 }
1314 
1315 /**
1316  * vmballoon_init_batching - enable batching mode.
1317  *
1318  * @b: pointer to &struct vmballoon.
1319  *
1320  * Enables batching, by allocating a page for communication with the hypervisor
1321  * and enabling the static_key to use batching.
1322  *
1323  * Return: zero on success or an appropriate error-code.
1324  */
vmballoon_init_batching(struct vmballoon * b)1325 static int vmballoon_init_batching(struct vmballoon *b)
1326 {
1327 	struct page *page;
1328 
1329 	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
1330 	if (!page)
1331 		return -ENOMEM;
1332 
1333 	b->batch_page = page_address(page);
1334 	b->batch_max_pages = PAGE_SIZE / sizeof(struct vmballoon_batch_entry);
1335 
1336 	static_branch_enable(&vmw_balloon_batching);
1337 
1338 	return 0;
1339 }
1340 
1341 /*
1342  * Receive notification and resize balloon
1343  */
vmballoon_doorbell(void * client_data)1344 static void vmballoon_doorbell(void *client_data)
1345 {
1346 	struct vmballoon *b = client_data;
1347 
1348 	vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_DOORBELL);
1349 
1350 	mod_delayed_work(system_freezable_wq, &b->dwork, 0);
1351 }
1352 
1353 /*
1354  * Clean up vmci doorbell
1355  */
vmballoon_vmci_cleanup(struct vmballoon * b)1356 static void vmballoon_vmci_cleanup(struct vmballoon *b)
1357 {
1358 	vmballoon_cmd(b, VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
1359 		      VMCI_INVALID_ID, VMCI_INVALID_ID);
1360 
1361 	if (!vmci_handle_is_invalid(b->vmci_doorbell)) {
1362 		vmci_doorbell_destroy(b->vmci_doorbell);
1363 		b->vmci_doorbell = VMCI_INVALID_HANDLE;
1364 	}
1365 }
1366 
1367 /**
1368  * vmballoon_vmci_init - Initialize vmci doorbell.
1369  *
1370  * @b: pointer to the balloon.
1371  *
1372  * Return: zero on success or when wakeup command not supported. Error-code
1373  * otherwise.
1374  *
1375  * Initialize vmci doorbell, to get notified as soon as balloon changes.
1376  */
vmballoon_vmci_init(struct vmballoon * b)1377 static int vmballoon_vmci_init(struct vmballoon *b)
1378 {
1379 	unsigned long error;
1380 
1381 	if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) == 0)
1382 		return 0;
1383 
1384 	error = vmci_doorbell_create(&b->vmci_doorbell, VMCI_FLAG_DELAYED_CB,
1385 				     VMCI_PRIVILEGE_FLAG_RESTRICTED,
1386 				     vmballoon_doorbell, b);
1387 
1388 	if (error != VMCI_SUCCESS)
1389 		goto fail;
1390 
1391 	error =	__vmballoon_cmd(b, VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
1392 				b->vmci_doorbell.context,
1393 				b->vmci_doorbell.resource, NULL);
1394 
1395 	if (error != VMW_BALLOON_SUCCESS)
1396 		goto fail;
1397 
1398 	return 0;
1399 fail:
1400 	vmballoon_vmci_cleanup(b);
1401 	return -EIO;
1402 }
1403 
1404 /**
1405  * vmballoon_pop - Quickly release all pages allocate for the balloon.
1406  *
1407  * @b: pointer to the balloon.
1408  *
1409  * This function is called when host decides to "reset" balloon for one reason
1410  * or another. Unlike normal "deflate" we do not (shall not) notify host of the
1411  * pages being released.
1412  */
vmballoon_pop(struct vmballoon * b)1413 static void vmballoon_pop(struct vmballoon *b)
1414 {
1415 	unsigned long size;
1416 
1417 	while ((size = atomic64_read(&b->size)))
1418 		vmballoon_deflate(b, size, false);
1419 }
1420 
1421 /*
1422  * Perform standard reset sequence by popping the balloon (in case it
1423  * is not  empty) and then restarting protocol. This operation normally
1424  * happens when host responds with VMW_BALLOON_ERROR_RESET to a command.
1425  */
vmballoon_reset(struct vmballoon * b)1426 static void vmballoon_reset(struct vmballoon *b)
1427 {
1428 	int error;
1429 
1430 	down_write(&b->conf_sem);
1431 
1432 	vmballoon_vmci_cleanup(b);
1433 
1434 	/* free all pages, skipping monitor unlock */
1435 	vmballoon_pop(b);
1436 
1437 	if (vmballoon_send_start(b, VMW_BALLOON_CAPABILITIES))
1438 		goto unlock;
1439 
1440 	if ((b->capabilities & VMW_BALLOON_BATCHED_CMDS) != 0) {
1441 		if (vmballoon_init_batching(b)) {
1442 			/*
1443 			 * We failed to initialize batching, inform the monitor
1444 			 * about it by sending a null capability.
1445 			 *
1446 			 * The guest will retry in one second.
1447 			 */
1448 			vmballoon_send_start(b, 0);
1449 			goto unlock;
1450 		}
1451 	} else if ((b->capabilities & VMW_BALLOON_BASIC_CMDS) != 0) {
1452 		vmballoon_deinit_batching(b);
1453 	}
1454 
1455 	vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_RESET);
1456 	b->reset_required = false;
1457 
1458 	error = vmballoon_vmci_init(b);
1459 	if (error)
1460 		pr_err("failed to initialize vmci doorbell\n");
1461 
1462 	if (vmballoon_send_guest_id(b))
1463 		pr_err("failed to send guest ID to the host\n");
1464 
1465 unlock:
1466 	up_write(&b->conf_sem);
1467 }
1468 
1469 /**
1470  * vmballoon_work - periodic balloon worker for reset, inflation and deflation.
1471  *
1472  * @work: pointer to the &work_struct which is provided by the workqueue.
1473  *
1474  * Resets the protocol if needed, gets the new size and adjusts balloon as
1475  * needed. Repeat in 1 sec.
1476  */
vmballoon_work(struct work_struct * work)1477 static void vmballoon_work(struct work_struct *work)
1478 {
1479 	struct delayed_work *dwork = to_delayed_work(work);
1480 	struct vmballoon *b = container_of(dwork, struct vmballoon, dwork);
1481 	int64_t change = 0;
1482 
1483 	if (b->reset_required)
1484 		vmballoon_reset(b);
1485 
1486 	down_read(&b->conf_sem);
1487 
1488 	/*
1489 	 * Update the stats while holding the semaphore to ensure that
1490 	 * @stats_enabled is consistent with whether the stats are actually
1491 	 * enabled
1492 	 */
1493 	vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_TIMER);
1494 
1495 	if (!vmballoon_send_get_target(b))
1496 		change = vmballoon_change(b);
1497 
1498 	if (change != 0) {
1499 		pr_debug("%s - size: %llu, target %lu\n", __func__,
1500 			 atomic64_read(&b->size), READ_ONCE(b->target));
1501 
1502 		if (change > 0)
1503 			vmballoon_inflate(b);
1504 		else  /* (change < 0) */
1505 			vmballoon_deflate(b, 0, true);
1506 	}
1507 
1508 	up_read(&b->conf_sem);
1509 
1510 	/*
1511 	 * We are using a freezable workqueue so that balloon operations are
1512 	 * stopped while the system transitions to/from sleep/hibernation.
1513 	 */
1514 	queue_delayed_work(system_freezable_wq,
1515 			   dwork, round_jiffies_relative(HZ));
1516 
1517 }
1518 
1519 /**
1520  * vmballoon_shrinker_scan() - deflate the balloon due to memory pressure.
1521  * @shrinker: pointer to the balloon shrinker.
1522  * @sc: page reclaim information.
1523  *
1524  * Returns: number of pages that were freed during deflation.
1525  */
vmballoon_shrinker_scan(struct shrinker * shrinker,struct shrink_control * sc)1526 static unsigned long vmballoon_shrinker_scan(struct shrinker *shrinker,
1527 					     struct shrink_control *sc)
1528 {
1529 	struct vmballoon *b = &balloon;
1530 	unsigned long deflated_frames;
1531 
1532 	pr_debug("%s - size: %llu", __func__, atomic64_read(&b->size));
1533 
1534 	vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_SHRINK);
1535 
1536 	/*
1537 	 * If the lock is also contended for read, we cannot easily reclaim and
1538 	 * we bail out.
1539 	 */
1540 	if (!down_read_trylock(&b->conf_sem))
1541 		return 0;
1542 
1543 	deflated_frames = vmballoon_deflate(b, sc->nr_to_scan, true);
1544 
1545 	vmballoon_stats_gen_add(b, VMW_BALLOON_STAT_SHRINK_FREE,
1546 				deflated_frames);
1547 
1548 	/*
1549 	 * Delay future inflation for some time to mitigate the situations in
1550 	 * which balloon continuously grows and shrinks. Use WRITE_ONCE() since
1551 	 * the access is asynchronous.
1552 	 */
1553 	WRITE_ONCE(b->shrink_timeout, jiffies + HZ * VMBALLOON_SHRINK_DELAY);
1554 
1555 	up_read(&b->conf_sem);
1556 
1557 	return deflated_frames;
1558 }
1559 
1560 /**
1561  * vmballoon_shrinker_count() - return the number of ballooned pages.
1562  * @shrinker: pointer to the balloon shrinker.
1563  * @sc: page reclaim information.
1564  *
1565  * Returns: number of 4k pages that are allocated for the balloon and can
1566  *	    therefore be reclaimed under pressure.
1567  */
vmballoon_shrinker_count(struct shrinker * shrinker,struct shrink_control * sc)1568 static unsigned long vmballoon_shrinker_count(struct shrinker *shrinker,
1569 					      struct shrink_control *sc)
1570 {
1571 	struct vmballoon *b = &balloon;
1572 
1573 	return atomic64_read(&b->size);
1574 }
1575 
vmballoon_unregister_shrinker(struct vmballoon * b)1576 static void vmballoon_unregister_shrinker(struct vmballoon *b)
1577 {
1578 	if (b->shrinker_registered)
1579 		unregister_shrinker(&b->shrinker);
1580 	b->shrinker_registered = false;
1581 }
1582 
vmballoon_register_shrinker(struct vmballoon * b)1583 static int vmballoon_register_shrinker(struct vmballoon *b)
1584 {
1585 	int r;
1586 
1587 	/* Do nothing if the shrinker is not enabled */
1588 	if (!vmwballoon_shrinker_enable)
1589 		return 0;
1590 
1591 	b->shrinker.scan_objects = vmballoon_shrinker_scan;
1592 	b->shrinker.count_objects = vmballoon_shrinker_count;
1593 	b->shrinker.seeks = DEFAULT_SEEKS;
1594 
1595 	r = register_shrinker(&b->shrinker);
1596 
1597 	if (r == 0)
1598 		b->shrinker_registered = true;
1599 
1600 	return r;
1601 }
1602 
1603 /*
1604  * DEBUGFS Interface
1605  */
1606 #ifdef CONFIG_DEBUG_FS
1607 
1608 static const char * const vmballoon_stat_page_names[] = {
1609 	[VMW_BALLOON_PAGE_STAT_ALLOC]		= "alloc",
1610 	[VMW_BALLOON_PAGE_STAT_ALLOC_FAIL]	= "allocFail",
1611 	[VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC]	= "errAlloc",
1612 	[VMW_BALLOON_PAGE_STAT_REFUSED_FREE]	= "errFree",
1613 	[VMW_BALLOON_PAGE_STAT_FREE]		= "free"
1614 };
1615 
1616 static const char * const vmballoon_stat_names[] = {
1617 	[VMW_BALLOON_STAT_TIMER]		= "timer",
1618 	[VMW_BALLOON_STAT_DOORBELL]		= "doorbell",
1619 	[VMW_BALLOON_STAT_RESET]		= "reset",
1620 	[VMW_BALLOON_STAT_SHRINK]		= "shrink",
1621 	[VMW_BALLOON_STAT_SHRINK_FREE]		= "shrinkFree"
1622 };
1623 
vmballoon_enable_stats(struct vmballoon * b)1624 static int vmballoon_enable_stats(struct vmballoon *b)
1625 {
1626 	int r = 0;
1627 
1628 	down_write(&b->conf_sem);
1629 
1630 	/* did we somehow race with another reader which enabled stats? */
1631 	if (b->stats)
1632 		goto out;
1633 
1634 	b->stats = kzalloc(sizeof(*b->stats), GFP_KERNEL);
1635 
1636 	if (!b->stats) {
1637 		/* allocation failed */
1638 		r = -ENOMEM;
1639 		goto out;
1640 	}
1641 	static_key_enable(&balloon_stat_enabled.key);
1642 out:
1643 	up_write(&b->conf_sem);
1644 	return r;
1645 }
1646 
1647 /**
1648  * vmballoon_debug_show - shows statistics of balloon operations.
1649  * @f: pointer to the &struct seq_file.
1650  * @offset: ignored.
1651  *
1652  * Provides the statistics that can be accessed in vmmemctl in the debugfs.
1653  * To avoid the overhead - mainly that of memory - of collecting the statistics,
1654  * we only collect statistics after the first time the counters are read.
1655  *
1656  * Return: zero on success or an error code.
1657  */
vmballoon_debug_show(struct seq_file * f,void * offset)1658 static int vmballoon_debug_show(struct seq_file *f, void *offset)
1659 {
1660 	struct vmballoon *b = f->private;
1661 	int i, j;
1662 
1663 	/* enables stats if they are disabled */
1664 	if (!b->stats) {
1665 		int r = vmballoon_enable_stats(b);
1666 
1667 		if (r)
1668 			return r;
1669 	}
1670 
1671 	/* format capabilities info */
1672 	seq_printf(f, "%-22s: %#16x\n", "balloon capabilities",
1673 		   VMW_BALLOON_CAPABILITIES);
1674 	seq_printf(f, "%-22s: %#16lx\n", "used capabilities", b->capabilities);
1675 	seq_printf(f, "%-22s: %16s\n", "is resetting",
1676 		   b->reset_required ? "y" : "n");
1677 
1678 	/* format size info */
1679 	seq_printf(f, "%-22s: %16lu\n", "target", READ_ONCE(b->target));
1680 	seq_printf(f, "%-22s: %16llu\n", "current", atomic64_read(&b->size));
1681 
1682 	for (i = 0; i < VMW_BALLOON_CMD_NUM; i++) {
1683 		if (vmballoon_cmd_names[i] == NULL)
1684 			continue;
1685 
1686 		seq_printf(f, "%-22s: %16llu (%llu failed)\n",
1687 			   vmballoon_cmd_names[i],
1688 			   atomic64_read(&b->stats->ops[i][VMW_BALLOON_OP_STAT]),
1689 			   atomic64_read(&b->stats->ops[i][VMW_BALLOON_OP_FAIL_STAT]));
1690 	}
1691 
1692 	for (i = 0; i < VMW_BALLOON_STAT_NUM; i++)
1693 		seq_printf(f, "%-22s: %16llu\n",
1694 			   vmballoon_stat_names[i],
1695 			   atomic64_read(&b->stats->general_stat[i]));
1696 
1697 	for (i = 0; i < VMW_BALLOON_PAGE_STAT_NUM; i++) {
1698 		for (j = 0; j < VMW_BALLOON_NUM_PAGE_SIZES; j++)
1699 			seq_printf(f, "%-18s(%s): %16llu\n",
1700 				   vmballoon_stat_page_names[i],
1701 				   vmballoon_page_size_names[j],
1702 				   atomic64_read(&b->stats->page_stat[i][j]));
1703 	}
1704 
1705 	return 0;
1706 }
1707 
1708 DEFINE_SHOW_ATTRIBUTE(vmballoon_debug);
1709 
vmballoon_debugfs_init(struct vmballoon * b)1710 static void __init vmballoon_debugfs_init(struct vmballoon *b)
1711 {
1712 	b->dbg_entry = debugfs_create_file("vmmemctl", S_IRUGO, NULL, b,
1713 					   &vmballoon_debug_fops);
1714 }
1715 
vmballoon_debugfs_exit(struct vmballoon * b)1716 static void __exit vmballoon_debugfs_exit(struct vmballoon *b)
1717 {
1718 	static_key_disable(&balloon_stat_enabled.key);
1719 	debugfs_remove(b->dbg_entry);
1720 	kfree(b->stats);
1721 	b->stats = NULL;
1722 }
1723 
1724 #else
1725 
vmballoon_debugfs_init(struct vmballoon * b)1726 static inline void vmballoon_debugfs_init(struct vmballoon *b)
1727 {
1728 }
1729 
vmballoon_debugfs_exit(struct vmballoon * b)1730 static inline void vmballoon_debugfs_exit(struct vmballoon *b)
1731 {
1732 }
1733 
1734 #endif	/* CONFIG_DEBUG_FS */
1735 
1736 
1737 #ifdef CONFIG_BALLOON_COMPACTION
1738 
vmballoon_init_fs_context(struct fs_context * fc)1739 static int vmballoon_init_fs_context(struct fs_context *fc)
1740 {
1741 	return init_pseudo(fc, BALLOON_VMW_MAGIC) ? 0 : -ENOMEM;
1742 }
1743 
1744 static struct file_system_type vmballoon_fs = {
1745 	.name           	= "balloon-vmware",
1746 	.init_fs_context	= vmballoon_init_fs_context,
1747 	.kill_sb        	= kill_anon_super,
1748 };
1749 
1750 static struct vfsmount *vmballoon_mnt;
1751 
1752 /**
1753  * vmballoon_migratepage() - migrates a balloon page.
1754  * @b_dev_info: balloon device information descriptor.
1755  * @newpage: the page to which @page should be migrated.
1756  * @page: a ballooned page that should be migrated.
1757  * @mode: migration mode, ignored.
1758  *
1759  * This function is really open-coded, but that is according to the interface
1760  * that balloon_compaction provides.
1761  *
1762  * Return: zero on success, -EAGAIN when migration cannot be performed
1763  *	   momentarily, and -EBUSY if migration failed and should be retried
1764  *	   with that specific page.
1765  */
vmballoon_migratepage(struct balloon_dev_info * b_dev_info,struct page * newpage,struct page * page,enum migrate_mode mode)1766 static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info,
1767 				 struct page *newpage, struct page *page,
1768 				 enum migrate_mode mode)
1769 {
1770 	unsigned long status, flags;
1771 	struct vmballoon *b;
1772 	int ret;
1773 
1774 	b = container_of(b_dev_info, struct vmballoon, b_dev_info);
1775 
1776 	/*
1777 	 * If the semaphore is taken, there is ongoing configuration change
1778 	 * (i.e., balloon reset), so try again.
1779 	 */
1780 	if (!down_read_trylock(&b->conf_sem))
1781 		return -EAGAIN;
1782 
1783 	spin_lock(&b->comm_lock);
1784 	/*
1785 	 * We must start by deflating and not inflating, as otherwise the
1786 	 * hypervisor may tell us that it has enough memory and the new page is
1787 	 * not needed. Since the old page is isolated, we cannot use the list
1788 	 * interface to unlock it, as the LRU field is used for isolation.
1789 	 * Instead, we use the native interface directly.
1790 	 */
1791 	vmballoon_add_page(b, 0, page);
1792 	status = vmballoon_lock_op(b, 1, VMW_BALLOON_4K_PAGE,
1793 				   VMW_BALLOON_DEFLATE);
1794 
1795 	if (status == VMW_BALLOON_SUCCESS)
1796 		status = vmballoon_status_page(b, 0, &page);
1797 
1798 	/*
1799 	 * If a failure happened, let the migration mechanism know that it
1800 	 * should not retry.
1801 	 */
1802 	if (status != VMW_BALLOON_SUCCESS) {
1803 		spin_unlock(&b->comm_lock);
1804 		ret = -EBUSY;
1805 		goto out_unlock;
1806 	}
1807 
1808 	/*
1809 	 * The page is isolated, so it is safe to delete it without holding
1810 	 * @pages_lock . We keep holding @comm_lock since we will need it in a
1811 	 * second.
1812 	 */
1813 	balloon_page_delete(page);
1814 
1815 	put_page(page);
1816 
1817 	/* Inflate */
1818 	vmballoon_add_page(b, 0, newpage);
1819 	status = vmballoon_lock_op(b, 1, VMW_BALLOON_4K_PAGE,
1820 				   VMW_BALLOON_INFLATE);
1821 
1822 	if (status == VMW_BALLOON_SUCCESS)
1823 		status = vmballoon_status_page(b, 0, &newpage);
1824 
1825 	spin_unlock(&b->comm_lock);
1826 
1827 	if (status != VMW_BALLOON_SUCCESS) {
1828 		/*
1829 		 * A failure happened. While we can deflate the page we just
1830 		 * inflated, this deflation can also encounter an error. Instead
1831 		 * we will decrease the size of the balloon to reflect the
1832 		 * change and report failure.
1833 		 */
1834 		atomic64_dec(&b->size);
1835 		ret = -EBUSY;
1836 	} else {
1837 		/*
1838 		 * Success. Take a reference for the page, and we will add it to
1839 		 * the list after acquiring the lock.
1840 		 */
1841 		get_page(newpage);
1842 		ret = MIGRATEPAGE_SUCCESS;
1843 	}
1844 
1845 	/* Update the balloon list under the @pages_lock */
1846 	spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
1847 
1848 	/*
1849 	 * On inflation success, we already took a reference for the @newpage.
1850 	 * If we succeed just insert it to the list and update the statistics
1851 	 * under the lock.
1852 	 */
1853 	if (ret == MIGRATEPAGE_SUCCESS) {
1854 		balloon_page_insert(&b->b_dev_info, newpage);
1855 		__count_vm_event(BALLOON_MIGRATE);
1856 	}
1857 
1858 	/*
1859 	 * We deflated successfully, so regardless to the inflation success, we
1860 	 * need to reduce the number of isolated_pages.
1861 	 */
1862 	b->b_dev_info.isolated_pages--;
1863 	spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags);
1864 
1865 out_unlock:
1866 	up_read(&b->conf_sem);
1867 	return ret;
1868 }
1869 
1870 /**
1871  * vmballoon_compaction_deinit() - removes compaction related data.
1872  *
1873  * @b: pointer to the balloon.
1874  */
vmballoon_compaction_deinit(struct vmballoon * b)1875 static void vmballoon_compaction_deinit(struct vmballoon *b)
1876 {
1877 	if (!IS_ERR(b->b_dev_info.inode))
1878 		iput(b->b_dev_info.inode);
1879 
1880 	b->b_dev_info.inode = NULL;
1881 	kern_unmount(vmballoon_mnt);
1882 	vmballoon_mnt = NULL;
1883 }
1884 
1885 /**
1886  * vmballoon_compaction_init() - initialized compaction for the balloon.
1887  *
1888  * @b: pointer to the balloon.
1889  *
1890  * If during the initialization a failure occurred, this function does not
1891  * perform cleanup. The caller must call vmballoon_compaction_deinit() in this
1892  * case.
1893  *
1894  * Return: zero on success or error code on failure.
1895  */
vmballoon_compaction_init(struct vmballoon * b)1896 static __init int vmballoon_compaction_init(struct vmballoon *b)
1897 {
1898 	vmballoon_mnt = kern_mount(&vmballoon_fs);
1899 	if (IS_ERR(vmballoon_mnt))
1900 		return PTR_ERR(vmballoon_mnt);
1901 
1902 	b->b_dev_info.migratepage = vmballoon_migratepage;
1903 	b->b_dev_info.inode = alloc_anon_inode(vmballoon_mnt->mnt_sb);
1904 
1905 	if (IS_ERR(b->b_dev_info.inode))
1906 		return PTR_ERR(b->b_dev_info.inode);
1907 
1908 	b->b_dev_info.inode->i_mapping->a_ops = &balloon_aops;
1909 	return 0;
1910 }
1911 
1912 #else /* CONFIG_BALLOON_COMPACTION */
1913 
vmballoon_compaction_deinit(struct vmballoon * b)1914 static void vmballoon_compaction_deinit(struct vmballoon *b)
1915 {
1916 }
1917 
vmballoon_compaction_init(struct vmballoon * b)1918 static int vmballoon_compaction_init(struct vmballoon *b)
1919 {
1920 	return 0;
1921 }
1922 
1923 #endif /* CONFIG_BALLOON_COMPACTION */
1924 
vmballoon_init(void)1925 static int __init vmballoon_init(void)
1926 {
1927 	int error;
1928 
1929 	/*
1930 	 * Check if we are running on VMware's hypervisor and bail out
1931 	 * if we are not.
1932 	 */
1933 	if (x86_hyper_type != X86_HYPER_VMWARE)
1934 		return -ENODEV;
1935 
1936 	INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work);
1937 
1938 	error = vmballoon_register_shrinker(&balloon);
1939 	if (error)
1940 		goto fail;
1941 
1942 	/*
1943 	 * Initialization of compaction must be done after the call to
1944 	 * balloon_devinfo_init() .
1945 	 */
1946 	balloon_devinfo_init(&balloon.b_dev_info);
1947 	error = vmballoon_compaction_init(&balloon);
1948 	if (error)
1949 		goto fail;
1950 
1951 	INIT_LIST_HEAD(&balloon.huge_pages);
1952 	spin_lock_init(&balloon.comm_lock);
1953 	init_rwsem(&balloon.conf_sem);
1954 	balloon.vmci_doorbell = VMCI_INVALID_HANDLE;
1955 	balloon.batch_page = NULL;
1956 	balloon.page = NULL;
1957 	balloon.reset_required = true;
1958 
1959 	queue_delayed_work(system_freezable_wq, &balloon.dwork, 0);
1960 
1961 	vmballoon_debugfs_init(&balloon);
1962 
1963 	return 0;
1964 fail:
1965 	vmballoon_unregister_shrinker(&balloon);
1966 	vmballoon_compaction_deinit(&balloon);
1967 	return error;
1968 }
1969 
1970 /*
1971  * Using late_initcall() instead of module_init() allows the balloon to use the
1972  * VMCI doorbell even when the balloon is built into the kernel. Otherwise the
1973  * VMCI is probed only after the balloon is initialized. If the balloon is used
1974  * as a module, late_initcall() is equivalent to module_init().
1975  */
1976 late_initcall(vmballoon_init);
1977 
vmballoon_exit(void)1978 static void __exit vmballoon_exit(void)
1979 {
1980 	vmballoon_unregister_shrinker(&balloon);
1981 	vmballoon_vmci_cleanup(&balloon);
1982 	cancel_delayed_work_sync(&balloon.dwork);
1983 
1984 	vmballoon_debugfs_exit(&balloon);
1985 
1986 	/*
1987 	 * Deallocate all reserved memory, and reset connection with monitor.
1988 	 * Reset connection before deallocating memory to avoid potential for
1989 	 * additional spurious resets from guest touching deallocated pages.
1990 	 */
1991 	vmballoon_send_start(&balloon, 0);
1992 	vmballoon_pop(&balloon);
1993 
1994 	/* Only once we popped the balloon, compaction can be deinit */
1995 	vmballoon_compaction_deinit(&balloon);
1996 }
1997 module_exit(vmballoon_exit);
1998