1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Virtio balloon implementation, inspired by Dor Laor and Marcelo
4 * Tosatti's implementations.
5 *
6 * Copyright 2008 Rusty Russell IBM Corporation
7 */
8
9 #include <linux/virtio.h>
10 #include <linux/virtio_balloon.h>
11 #include <linux/swap.h>
12 #include <linux/workqueue.h>
13 #include <linux/delay.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/balloon_compaction.h>
17 #include <linux/oom.h>
18 #include <linux/wait.h>
19 #include <linux/mm.h>
20 #include <linux/page_reporting.h>
21 #include <linux/kstrtox.h>
22
23 /*
24 * Balloon device works in 4K page units. So each page is pointed to by
25 * multiple balloon pages. All memory counters in this driver are in balloon
26 * page units.
27 */
28 #define VIRTIO_BALLOON_PAGES_PER_PAGE (unsigned int)(PAGE_SIZE >> VIRTIO_BALLOON_PFN_SHIFT)
29 #define VIRTIO_BALLOON_ARRAY_PFNS_MAX 256
30 /* Maximum number of (4k) pages to deflate on OOM notifications. */
31 #define VIRTIO_BALLOON_OOM_NR_PAGES 256
32 #define VIRTIO_BALLOON_OOM_NOTIFY_PRIORITY 80
33
34 #define VIRTIO_BALLOON_FREE_PAGE_ALLOC_FLAG (__GFP_NORETRY | __GFP_NOWARN | \
35 __GFP_NOMEMALLOC)
36 /* The order of free page blocks to report to host */
37 #define VIRTIO_BALLOON_HINT_BLOCK_ORDER MAX_PAGE_ORDER
38 /* The size of a free page block in bytes */
39 #define VIRTIO_BALLOON_HINT_BLOCK_BYTES \
40 (1 << (VIRTIO_BALLOON_HINT_BLOCK_ORDER + PAGE_SHIFT))
41 #define VIRTIO_BALLOON_HINT_BLOCK_PAGES (1 << VIRTIO_BALLOON_HINT_BLOCK_ORDER)
42
43 enum virtio_balloon_vq {
44 VIRTIO_BALLOON_VQ_INFLATE,
45 VIRTIO_BALLOON_VQ_DEFLATE,
46 VIRTIO_BALLOON_VQ_STATS,
47 VIRTIO_BALLOON_VQ_FREE_PAGE,
48 VIRTIO_BALLOON_VQ_REPORTING,
49 VIRTIO_BALLOON_VQ_MAX
50 };
51
52 enum virtio_balloon_config_read {
53 VIRTIO_BALLOON_CONFIG_READ_CMD_ID = 0,
54 };
55
56 struct virtio_balloon {
57 struct virtio_device *vdev;
58 struct virtqueue *inflate_vq, *deflate_vq, *stats_vq, *free_page_vq;
59
60 /* Balloon's own wq for cpu-intensive work items */
61 struct workqueue_struct *balloon_wq;
62 /* The free page reporting work item submitted to the balloon wq */
63 struct work_struct report_free_page_work;
64
65 /* The balloon servicing is delegated to a freezable workqueue. */
66 struct work_struct update_balloon_stats_work;
67 struct work_struct update_balloon_size_work;
68
69 /* Prevent updating balloon when it is being canceled. */
70 spinlock_t stop_update_lock;
71 bool stop_update;
72 /* Bitmap to indicate if reading the related config fields are needed */
73 unsigned long config_read_bitmap;
74
75 /* The list of allocated free pages, waiting to be given back to mm */
76 struct list_head free_page_list;
77 spinlock_t free_page_list_lock;
78 /* The number of free page blocks on the above list */
79 unsigned long num_free_page_blocks;
80 /*
81 * The cmd id received from host.
82 * Read it via virtio_balloon_cmd_id_received to get the latest value
83 * sent from host.
84 */
85 u32 cmd_id_received_cache;
86 /* The cmd id that is actively in use */
87 __virtio32 cmd_id_active;
88 /* Buffer to store the stop sign */
89 __virtio32 cmd_id_stop;
90
91 /* Waiting for host to ack the pages we released. */
92 wait_queue_head_t acked;
93
94 /* Number of balloon pages we've told the Host we're not using. */
95 unsigned int num_pages;
96 /*
97 * The pages we've told the Host we're not using are enqueued
98 * at vb_dev_info->pages list.
99 * Each page on this list adds VIRTIO_BALLOON_PAGES_PER_PAGE
100 * to num_pages above.
101 */
102 struct balloon_dev_info vb_dev_info;
103
104 /* Synchronize access/update to this struct virtio_balloon elements */
105 struct mutex balloon_lock;
106
107 /* The array of pfns we tell the Host about. */
108 unsigned int num_pfns;
109 __virtio32 pfns[VIRTIO_BALLOON_ARRAY_PFNS_MAX];
110
111 /* Memory statistics */
112 struct virtio_balloon_stat stats[VIRTIO_BALLOON_S_NR];
113
114 /* Shrinker to return free pages - VIRTIO_BALLOON_F_FREE_PAGE_HINT */
115 struct shrinker *shrinker;
116
117 /* OOM notifier to deflate on OOM - VIRTIO_BALLOON_F_DEFLATE_ON_OOM */
118 struct notifier_block oom_nb;
119
120 /* Free page reporting device */
121 struct virtqueue *reporting_vq;
122 struct page_reporting_dev_info pr_dev_info;
123
124 /* State for keeping the wakeup_source active while adjusting the balloon */
125 spinlock_t wakeup_lock;
126 bool processing_wakeup_event;
127 u32 wakeup_signal_mask;
128
129 bool bail_on_out_of_puff;
130 };
131
132 #define VIRTIO_BALLOON_WAKEUP_SIGNAL_ADJUST (1 << 0)
133 #define VIRTIO_BALLOON_WAKEUP_SIGNAL_STATS (1 << 1)
134
135 static const struct virtio_device_id id_table[] = {
136 { VIRTIO_ID_BALLOON, VIRTIO_DEV_ANY_ID },
137 { 0 },
138 };
139
page_to_balloon_pfn(struct page * page)140 static u32 page_to_balloon_pfn(struct page *page)
141 {
142 unsigned long pfn = page_to_pfn(page);
143
144 BUILD_BUG_ON(PAGE_SHIFT < VIRTIO_BALLOON_PFN_SHIFT);
145 /* Convert pfn from Linux page size to balloon page size. */
146 return pfn * VIRTIO_BALLOON_PAGES_PER_PAGE;
147 }
148
start_wakeup_event(struct virtio_balloon * vb,u32 mask)149 static void start_wakeup_event(struct virtio_balloon *vb, u32 mask)
150 {
151 unsigned long flags;
152
153 spin_lock_irqsave(&vb->wakeup_lock, flags);
154 vb->wakeup_signal_mask |= mask;
155 if (!vb->processing_wakeup_event) {
156 vb->processing_wakeup_event = true;
157 pm_stay_awake(&vb->vdev->dev);
158 }
159 spin_unlock_irqrestore(&vb->wakeup_lock, flags);
160 }
161
process_wakeup_event(struct virtio_balloon * vb,u32 mask)162 static void process_wakeup_event(struct virtio_balloon *vb, u32 mask)
163 {
164 spin_lock_irq(&vb->wakeup_lock);
165 vb->wakeup_signal_mask &= ~mask;
166 spin_unlock_irq(&vb->wakeup_lock);
167 }
168
finish_wakeup_event(struct virtio_balloon * vb)169 static void finish_wakeup_event(struct virtio_balloon *vb)
170 {
171 spin_lock_irq(&vb->wakeup_lock);
172 if (!vb->wakeup_signal_mask && vb->processing_wakeup_event) {
173 vb->processing_wakeup_event = false;
174 pm_relax(&vb->vdev->dev);
175 }
176 spin_unlock_irq(&vb->wakeup_lock);
177 }
178
balloon_ack(struct virtqueue * vq)179 static void balloon_ack(struct virtqueue *vq)
180 {
181 struct virtio_balloon *vb = vq->vdev->priv;
182
183 wake_up(&vb->acked);
184 }
185
tell_host(struct virtio_balloon * vb,struct virtqueue * vq)186 static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq)
187 {
188 struct scatterlist sg;
189 unsigned int len;
190
191 sg_init_one(&sg, vb->pfns, sizeof(vb->pfns[0]) * vb->num_pfns);
192
193 post_page_relinquish_tlb_inv();
194
195 /* We should always be able to add one buffer to an empty queue. */
196 virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL);
197 virtqueue_kick(vq);
198
199 /* When host has read buffer, this completes via balloon_ack */
200 wait_event(vb->acked, virtqueue_get_buf(vq, &len));
201
202 }
203
virtballoon_free_page_report(struct page_reporting_dev_info * pr_dev_info,struct scatterlist * sgl,unsigned int nents)204 static int virtballoon_free_page_report(struct page_reporting_dev_info *pr_dev_info,
205 struct scatterlist *sgl, unsigned int nents)
206 {
207 struct virtio_balloon *vb =
208 container_of(pr_dev_info, struct virtio_balloon, pr_dev_info);
209 struct virtqueue *vq = vb->reporting_vq;
210 unsigned int unused, err;
211 struct scatterlist *sg;
212
213 for (sg = sgl; sg != NULL; sg = sg_next(sg))
214 page_relinquish(sg_page(sg), sg->length >> PAGE_SHIFT);
215
216 /* We should always be able to add these buffers to an empty queue. */
217 err = virtqueue_add_inbuf(vq, sgl, nents, vb, GFP_NOWAIT | __GFP_NOWARN);
218
219 /*
220 * In the extremely unlikely case that something has occurred and we
221 * are able to trigger an error we will simply display a warning
222 * and exit without actually processing the pages.
223 */
224 if (WARN_ON_ONCE(err))
225 return err;
226
227 post_page_relinquish_tlb_inv();
228
229 virtqueue_kick(vq);
230
231 /* When host has read buffer, this completes via balloon_ack */
232 wait_event(vb->acked, virtqueue_get_buf(vq, &unused));
233
234 return 0;
235 }
236
set_page_pfns(struct virtio_balloon * vb,__virtio32 pfns[],struct page * page)237 static void set_page_pfns(struct virtio_balloon *vb,
238 __virtio32 pfns[], struct page *page)
239 {
240 unsigned int i;
241
242 BUILD_BUG_ON(VIRTIO_BALLOON_PAGES_PER_PAGE > VIRTIO_BALLOON_ARRAY_PFNS_MAX);
243
244 /*
245 * Set balloon pfns pointing at this page.
246 * Note that the first pfn points at start of the page.
247 */
248 for (i = 0; i < VIRTIO_BALLOON_PAGES_PER_PAGE; i++)
249 pfns[i] = cpu_to_virtio32(vb->vdev,
250 page_to_balloon_pfn(page) + i);
251 }
252
fill_balloon(struct virtio_balloon * vb,size_t num,bool * out_of_puff)253 static unsigned int fill_balloon(struct virtio_balloon *vb, size_t num,
254 bool *out_of_puff)
255 {
256 unsigned int num_allocated_pages;
257 unsigned int num_pfns;
258 struct page *page;
259 LIST_HEAD(pages);
260
261 /* We can only do one array worth at a time. */
262 num = min(num, ARRAY_SIZE(vb->pfns));
263
264 for (num_pfns = 0; num_pfns < num;
265 num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) {
266 struct page *page = balloon_page_alloc();
267
268 if (!page) {
269 dev_info_ratelimited(&vb->vdev->dev,
270 "Out of puff! Can't get %u pages\n",
271 VIRTIO_BALLOON_PAGES_PER_PAGE);
272 /* Sleep for at least 1/5 of a second before retry. */
273 msleep(200);
274 *out_of_puff = true;
275 break;
276 }
277
278 balloon_page_push(&pages, page);
279 page_relinquish(page, 1);
280 }
281
282 mutex_lock(&vb->balloon_lock);
283
284 vb->num_pfns = 0;
285
286 while ((page = balloon_page_pop(&pages))) {
287 balloon_page_enqueue(&vb->vb_dev_info, page);
288
289 set_page_pfns(vb, vb->pfns + vb->num_pfns, page);
290 vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE;
291 if (!virtio_has_feature(vb->vdev,
292 VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
293 adjust_managed_page_count(page, -1);
294 vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE;
295 }
296
297 num_allocated_pages = vb->num_pfns;
298 /* Did we get any? */
299 if (vb->num_pfns != 0)
300 tell_host(vb, vb->inflate_vq);
301 mutex_unlock(&vb->balloon_lock);
302
303 return num_allocated_pages;
304 }
305
release_pages_balloon(struct virtio_balloon * vb,struct list_head * pages)306 static void release_pages_balloon(struct virtio_balloon *vb,
307 struct list_head *pages)
308 {
309 struct page *page, *next;
310
311 list_for_each_entry_safe(page, next, pages, lru) {
312 if (!virtio_has_feature(vb->vdev,
313 VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
314 adjust_managed_page_count(page, 1);
315 list_del(&page->lru);
316 put_page(page); /* balloon reference */
317 }
318 }
319
leak_balloon(struct virtio_balloon * vb,size_t num)320 static unsigned int leak_balloon(struct virtio_balloon *vb, size_t num)
321 {
322 unsigned int num_freed_pages;
323 struct page *page;
324 struct balloon_dev_info *vb_dev_info = &vb->vb_dev_info;
325 LIST_HEAD(pages);
326
327 /* We can only do one array worth at a time. */
328 num = min(num, ARRAY_SIZE(vb->pfns));
329
330 mutex_lock(&vb->balloon_lock);
331 /* We can't release more pages than taken */
332 num = min(num, (size_t)vb->num_pages);
333 for (vb->num_pfns = 0; vb->num_pfns < num;
334 vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) {
335 page = balloon_page_dequeue(vb_dev_info);
336 if (!page)
337 break;
338 set_page_pfns(vb, vb->pfns + vb->num_pfns, page);
339 list_add(&page->lru, &pages);
340 vb->num_pages -= VIRTIO_BALLOON_PAGES_PER_PAGE;
341 }
342
343 num_freed_pages = vb->num_pfns;
344 /*
345 * Note that if
346 * virtio_has_feature(vdev, VIRTIO_BALLOON_F_MUST_TELL_HOST);
347 * is true, we *have* to do it in this order
348 */
349 if (vb->num_pfns != 0)
350 tell_host(vb, vb->deflate_vq);
351 release_pages_balloon(vb, &pages);
352 mutex_unlock(&vb->balloon_lock);
353 return num_freed_pages;
354 }
355
update_stat(struct virtio_balloon * vb,int idx,u16 tag,u64 val)356 static inline void update_stat(struct virtio_balloon *vb, int idx,
357 u16 tag, u64 val)
358 {
359 BUG_ON(idx >= VIRTIO_BALLOON_S_NR);
360 vb->stats[idx].tag = cpu_to_virtio16(vb->vdev, tag);
361 vb->stats[idx].val = cpu_to_virtio64(vb->vdev, val);
362 }
363
364 #define pages_to_bytes(x) ((u64)(x) << PAGE_SHIFT)
365
366 #ifdef CONFIG_VM_EVENT_COUNTERS
367 /* Return the number of entries filled by vm events */
update_balloon_vm_stats(struct virtio_balloon * vb)368 static inline unsigned int update_balloon_vm_stats(struct virtio_balloon *vb)
369 {
370 unsigned long events[NR_VM_EVENT_ITEMS];
371 unsigned int idx = 0;
372 unsigned int zid;
373 unsigned long stall = 0;
374
375 all_vm_events(events);
376 update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_IN,
377 pages_to_bytes(events[PSWPIN]));
378 update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_OUT,
379 pages_to_bytes(events[PSWPOUT]));
380 update_stat(vb, idx++, VIRTIO_BALLOON_S_MAJFLT, events[PGMAJFAULT]);
381 update_stat(vb, idx++, VIRTIO_BALLOON_S_MINFLT, events[PGFAULT]);
382 update_stat(vb, idx++, VIRTIO_BALLOON_S_OOM_KILL, events[OOM_KILL]);
383
384 /* sum all the stall events */
385 for (zid = 0; zid < MAX_NR_ZONES; zid++)
386 stall += events[ALLOCSTALL_NORMAL - ZONE_NORMAL + zid];
387
388 update_stat(vb, idx++, VIRTIO_BALLOON_S_ALLOC_STALL, stall);
389
390 update_stat(vb, idx++, VIRTIO_BALLOON_S_ASYNC_SCAN,
391 pages_to_bytes(events[PGSCAN_KSWAPD]));
392 update_stat(vb, idx++, VIRTIO_BALLOON_S_DIRECT_SCAN,
393 pages_to_bytes(events[PGSCAN_DIRECT]));
394 update_stat(vb, idx++, VIRTIO_BALLOON_S_ASYNC_RECLAIM,
395 pages_to_bytes(events[PGSTEAL_KSWAPD]));
396 update_stat(vb, idx++, VIRTIO_BALLOON_S_DIRECT_RECLAIM,
397 pages_to_bytes(events[PGSTEAL_DIRECT]));
398
399 #ifdef CONFIG_HUGETLB_PAGE
400 update_stat(vb, idx++, VIRTIO_BALLOON_S_HTLB_PGALLOC,
401 events[HTLB_BUDDY_PGALLOC]);
402 update_stat(vb, idx++, VIRTIO_BALLOON_S_HTLB_PGFAIL,
403 events[HTLB_BUDDY_PGALLOC_FAIL]);
404 #endif /* CONFIG_HUGETLB_PAGE */
405
406 return idx;
407 }
408 #else /* CONFIG_VM_EVENT_COUNTERS */
update_balloon_vm_stats(struct virtio_balloon * vb)409 static inline unsigned int update_balloon_vm_stats(struct virtio_balloon *vb)
410 {
411 return 0;
412 }
413 #endif /* CONFIG_VM_EVENT_COUNTERS */
414
update_balloon_stats(struct virtio_balloon * vb)415 static unsigned int update_balloon_stats(struct virtio_balloon *vb)
416 {
417 struct sysinfo i;
418 unsigned int idx;
419 long available;
420 unsigned long caches;
421
422 idx = update_balloon_vm_stats(vb);
423
424 si_meminfo(&i);
425 available = si_mem_available();
426 caches = global_node_page_state(NR_FILE_PAGES);
427 update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMFREE,
428 pages_to_bytes(i.freeram));
429 update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMTOT,
430 pages_to_bytes(i.totalram));
431 update_stat(vb, idx++, VIRTIO_BALLOON_S_AVAIL,
432 pages_to_bytes(available));
433 update_stat(vb, idx++, VIRTIO_BALLOON_S_CACHES,
434 pages_to_bytes(caches));
435
436 return idx;
437 }
438
439 /*
440 * While most virtqueues communicate guest-initiated requests to the hypervisor,
441 * the stats queue operates in reverse. The driver initializes the virtqueue
442 * with a single buffer. From that point forward, all conversations consist of
443 * a hypervisor request (a call to this function) which directs us to refill
444 * the virtqueue with a fresh stats buffer. Since stats collection can sleep,
445 * we delegate the job to a freezable workqueue that will do the actual work via
446 * stats_handle_request().
447 */
stats_request(struct virtqueue * vq)448 static void stats_request(struct virtqueue *vq)
449 {
450 struct virtio_balloon *vb = vq->vdev->priv;
451
452 spin_lock(&vb->stop_update_lock);
453 if (!vb->stop_update) {
454 start_wakeup_event(vb, VIRTIO_BALLOON_WAKEUP_SIGNAL_STATS);
455 queue_work(system_freezable_wq, &vb->update_balloon_stats_work);
456 }
457 spin_unlock(&vb->stop_update_lock);
458 }
459
stats_handle_request(struct virtio_balloon * vb)460 static void stats_handle_request(struct virtio_balloon *vb)
461 {
462 struct virtqueue *vq;
463 struct scatterlist sg;
464 unsigned int len, num_stats;
465
466 num_stats = update_balloon_stats(vb);
467
468 vq = vb->stats_vq;
469 if (!virtqueue_get_buf(vq, &len))
470 return;
471 sg_init_one(&sg, vb->stats, sizeof(vb->stats[0]) * num_stats);
472 virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL);
473 virtqueue_kick(vq);
474 }
475
towards_target(struct virtio_balloon * vb)476 static inline s64 towards_target(struct virtio_balloon *vb)
477 {
478 s64 target;
479 u32 num_pages;
480
481 /* Legacy balloon config space is LE, unlike all other devices. */
482 virtio_cread_le(vb->vdev, struct virtio_balloon_config, num_pages,
483 &num_pages);
484
485 /*
486 * Aligned up to guest page size to avoid inflating and deflating
487 * balloon endlessly.
488 */
489 target = ALIGN(num_pages, VIRTIO_BALLOON_PAGES_PER_PAGE);
490 return target - vb->num_pages;
491 }
492
493 /* Gives back @num_to_return blocks of free pages to mm. */
return_free_pages_to_mm(struct virtio_balloon * vb,unsigned long num_to_return)494 static unsigned long return_free_pages_to_mm(struct virtio_balloon *vb,
495 unsigned long num_to_return)
496 {
497 struct page *page;
498 unsigned long num_returned;
499
500 spin_lock_irq(&vb->free_page_list_lock);
501 for (num_returned = 0; num_returned < num_to_return; num_returned++) {
502 page = balloon_page_pop(&vb->free_page_list);
503 if (!page)
504 break;
505 free_pages((unsigned long)page_address(page),
506 VIRTIO_BALLOON_HINT_BLOCK_ORDER);
507 }
508 vb->num_free_page_blocks -= num_returned;
509 spin_unlock_irq(&vb->free_page_list_lock);
510
511 return num_returned;
512 }
513
virtio_balloon_queue_free_page_work(struct virtio_balloon * vb)514 static void virtio_balloon_queue_free_page_work(struct virtio_balloon *vb)
515 {
516 if (!virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
517 return;
518
519 /* No need to queue the work if the bit was already set. */
520 if (test_and_set_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID,
521 &vb->config_read_bitmap))
522 return;
523
524 queue_work(vb->balloon_wq, &vb->report_free_page_work);
525 }
526
start_update_balloon_size(struct virtio_balloon * vb)527 static void start_update_balloon_size(struct virtio_balloon *vb)
528 {
529 start_wakeup_event(vb, VIRTIO_BALLOON_WAKEUP_SIGNAL_ADJUST);
530 queue_work(system_freezable_wq, &vb->update_balloon_size_work);
531 }
532
virtballoon_changed(struct virtio_device * vdev)533 static void virtballoon_changed(struct virtio_device *vdev)
534 {
535 struct virtio_balloon *vb = vdev->priv;
536 unsigned long flags;
537
538 spin_lock_irqsave(&vb->stop_update_lock, flags);
539 if (!vb->stop_update) {
540 start_update_balloon_size(vb);
541 virtio_balloon_queue_free_page_work(vb);
542 }
543 spin_unlock_irqrestore(&vb->stop_update_lock, flags);
544 }
545
update_balloon_size(struct virtio_balloon * vb)546 static void update_balloon_size(struct virtio_balloon *vb)
547 {
548 u32 actual = vb->num_pages;
549
550 /* Legacy balloon config space is LE, unlike all other devices. */
551 virtio_cwrite_le(vb->vdev, struct virtio_balloon_config, actual,
552 &actual);
553 }
554
update_balloon_stats_func(struct work_struct * work)555 static void update_balloon_stats_func(struct work_struct *work)
556 {
557 struct virtio_balloon *vb;
558
559 vb = container_of(work, struct virtio_balloon,
560 update_balloon_stats_work);
561
562 process_wakeup_event(vb, VIRTIO_BALLOON_WAKEUP_SIGNAL_STATS);
563 stats_handle_request(vb);
564 finish_wakeup_event(vb);
565 }
566
update_balloon_size_func(struct work_struct * work)567 static void update_balloon_size_func(struct work_struct *work)
568 {
569 struct virtio_balloon *vb;
570 s64 diff;
571 bool out_of_puff = false;
572
573 vb = container_of(work, struct virtio_balloon,
574 update_balloon_size_work);
575
576 process_wakeup_event(vb, VIRTIO_BALLOON_WAKEUP_SIGNAL_ADJUST);
577
578 diff = towards_target(vb);
579
580 if (diff) {
581 if (diff > 0)
582 diff -= fill_balloon(vb, diff, &out_of_puff);
583 else
584 diff += leak_balloon(vb, -diff);
585 update_balloon_size(vb);
586 }
587
588 if (diff && !(vb->bail_on_out_of_puff && out_of_puff))
589 queue_work(system_freezable_wq, work);
590 else
591 finish_wakeup_event(vb);
592 }
593
init_vqs(struct virtio_balloon * vb)594 static int init_vqs(struct virtio_balloon *vb)
595 {
596 struct virtqueue_info vqs_info[VIRTIO_BALLOON_VQ_MAX] = {};
597 struct virtqueue *vqs[VIRTIO_BALLOON_VQ_MAX];
598 int err;
599
600 /*
601 * Inflateq and deflateq are used unconditionally. The names[]
602 * will be NULL if the related feature is not enabled, which will
603 * cause no allocation for the corresponding virtqueue in find_vqs.
604 */
605 vqs_info[VIRTIO_BALLOON_VQ_INFLATE].callback = balloon_ack;
606 vqs_info[VIRTIO_BALLOON_VQ_INFLATE].name = "inflate";
607 vqs_info[VIRTIO_BALLOON_VQ_DEFLATE].callback = balloon_ack;
608 vqs_info[VIRTIO_BALLOON_VQ_DEFLATE].name = "deflate";
609
610 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
611 vqs_info[VIRTIO_BALLOON_VQ_STATS].name = "stats";
612 vqs_info[VIRTIO_BALLOON_VQ_STATS].callback = stats_request;
613 }
614
615 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
616 vqs_info[VIRTIO_BALLOON_VQ_FREE_PAGE].name = "free_page_vq";
617
618 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_REPORTING)) {
619 vqs_info[VIRTIO_BALLOON_VQ_REPORTING].name = "reporting_vq";
620 vqs_info[VIRTIO_BALLOON_VQ_REPORTING].callback = balloon_ack;
621 }
622
623 err = virtio_find_vqs(vb->vdev, VIRTIO_BALLOON_VQ_MAX, vqs,
624 vqs_info, NULL);
625 if (err)
626 return err;
627
628 vb->inflate_vq = vqs[VIRTIO_BALLOON_VQ_INFLATE];
629 vb->deflate_vq = vqs[VIRTIO_BALLOON_VQ_DEFLATE];
630 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
631 struct scatterlist sg;
632 unsigned int num_stats;
633 vb->stats_vq = vqs[VIRTIO_BALLOON_VQ_STATS];
634
635 /*
636 * Prime this virtqueue with one buffer so the hypervisor can
637 * use it to signal us later (it can't be broken yet!).
638 */
639 num_stats = update_balloon_stats(vb);
640
641 sg_init_one(&sg, vb->stats, sizeof(vb->stats[0]) * num_stats);
642 err = virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb,
643 GFP_KERNEL);
644 if (err) {
645 dev_warn(&vb->vdev->dev, "%s: add stat_vq failed\n",
646 __func__);
647 return err;
648 }
649 virtqueue_kick(vb->stats_vq);
650 }
651
652 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) {
653 vb->free_page_vq = vqs[VIRTIO_BALLOON_VQ_FREE_PAGE];
654 virtqueue_disable_dma_api_for_buffers(vb->free_page_vq);
655 }
656
657 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_REPORTING)) {
658 vb->reporting_vq = vqs[VIRTIO_BALLOON_VQ_REPORTING];
659 virtqueue_disable_dma_api_for_buffers(vb->reporting_vq);
660 }
661
662 return 0;
663 }
664
virtio_balloon_cmd_id_received(struct virtio_balloon * vb)665 static u32 virtio_balloon_cmd_id_received(struct virtio_balloon *vb)
666 {
667 if (test_and_clear_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID,
668 &vb->config_read_bitmap)) {
669 /* Legacy balloon config space is LE, unlike all other devices. */
670 virtio_cread_le(vb->vdev, struct virtio_balloon_config,
671 free_page_hint_cmd_id,
672 &vb->cmd_id_received_cache);
673 }
674
675 return vb->cmd_id_received_cache;
676 }
677
send_cmd_id_start(struct virtio_balloon * vb)678 static int send_cmd_id_start(struct virtio_balloon *vb)
679 {
680 struct scatterlist sg;
681 struct virtqueue *vq = vb->free_page_vq;
682 int err, unused;
683
684 /* Detach all the used buffers from the vq */
685 while (virtqueue_get_buf(vq, &unused))
686 ;
687
688 vb->cmd_id_active = cpu_to_virtio32(vb->vdev,
689 virtio_balloon_cmd_id_received(vb));
690 sg_init_one(&sg, &vb->cmd_id_active, sizeof(vb->cmd_id_active));
691 err = virtqueue_add_outbuf(vq, &sg, 1, &vb->cmd_id_active, GFP_KERNEL);
692 if (!err)
693 virtqueue_kick(vq);
694 return err;
695 }
696
send_cmd_id_stop(struct virtio_balloon * vb)697 static int send_cmd_id_stop(struct virtio_balloon *vb)
698 {
699 struct scatterlist sg;
700 struct virtqueue *vq = vb->free_page_vq;
701 int err, unused;
702
703 /* Detach all the used buffers from the vq */
704 while (virtqueue_get_buf(vq, &unused))
705 ;
706
707 sg_init_one(&sg, &vb->cmd_id_stop, sizeof(vb->cmd_id_stop));
708 err = virtqueue_add_outbuf(vq, &sg, 1, &vb->cmd_id_stop, GFP_KERNEL);
709 if (!err)
710 virtqueue_kick(vq);
711 return err;
712 }
713
get_free_page_and_send(struct virtio_balloon * vb)714 static int get_free_page_and_send(struct virtio_balloon *vb)
715 {
716 struct virtqueue *vq = vb->free_page_vq;
717 struct page *page;
718 struct scatterlist sg;
719 int err, unused;
720 void *p;
721
722 /* Detach all the used buffers from the vq */
723 while (virtqueue_get_buf(vq, &unused))
724 ;
725
726 page = alloc_pages(VIRTIO_BALLOON_FREE_PAGE_ALLOC_FLAG,
727 VIRTIO_BALLOON_HINT_BLOCK_ORDER);
728 /*
729 * When the allocation returns NULL, it indicates that we have got all
730 * the possible free pages, so return -EINTR to stop.
731 */
732 if (!page)
733 return -EINTR;
734
735 p = page_address(page);
736 sg_init_one(&sg, p, VIRTIO_BALLOON_HINT_BLOCK_BYTES);
737 /* There is always 1 entry reserved for the cmd id to use. */
738 if (vq->num_free > 1) {
739 err = virtqueue_add_inbuf(vq, &sg, 1, p, GFP_KERNEL);
740 if (unlikely(err)) {
741 free_pages((unsigned long)p,
742 VIRTIO_BALLOON_HINT_BLOCK_ORDER);
743 return err;
744 }
745 virtqueue_kick(vq);
746 spin_lock_irq(&vb->free_page_list_lock);
747 balloon_page_push(&vb->free_page_list, page);
748 vb->num_free_page_blocks++;
749 spin_unlock_irq(&vb->free_page_list_lock);
750 } else {
751 /*
752 * The vq has no available entry to add this page block, so
753 * just free it.
754 */
755 free_pages((unsigned long)p, VIRTIO_BALLOON_HINT_BLOCK_ORDER);
756 }
757
758 return 0;
759 }
760
send_free_pages(struct virtio_balloon * vb)761 static int send_free_pages(struct virtio_balloon *vb)
762 {
763 int err;
764 u32 cmd_id_active;
765
766 while (1) {
767 /*
768 * If a stop id or a new cmd id was just received from host,
769 * stop the reporting.
770 */
771 cmd_id_active = virtio32_to_cpu(vb->vdev, vb->cmd_id_active);
772 if (unlikely(cmd_id_active !=
773 virtio_balloon_cmd_id_received(vb)))
774 break;
775
776 /*
777 * The free page blocks are allocated and sent to host one by
778 * one.
779 */
780 err = get_free_page_and_send(vb);
781 if (err == -EINTR)
782 break;
783 else if (unlikely(err))
784 return err;
785 }
786
787 return 0;
788 }
789
virtio_balloon_report_free_page(struct virtio_balloon * vb)790 static void virtio_balloon_report_free_page(struct virtio_balloon *vb)
791 {
792 int err;
793 struct device *dev = &vb->vdev->dev;
794
795 /* Start by sending the received cmd id to host with an outbuf. */
796 err = send_cmd_id_start(vb);
797 if (unlikely(err))
798 dev_err(dev, "Failed to send a start id, err = %d\n", err);
799
800 err = send_free_pages(vb);
801 if (unlikely(err))
802 dev_err(dev, "Failed to send a free page, err = %d\n", err);
803
804 /* End by sending a stop id to host with an outbuf. */
805 err = send_cmd_id_stop(vb);
806 if (unlikely(err))
807 dev_err(dev, "Failed to send a stop id, err = %d\n", err);
808 }
809
report_free_page_func(struct work_struct * work)810 static void report_free_page_func(struct work_struct *work)
811 {
812 struct virtio_balloon *vb = container_of(work, struct virtio_balloon,
813 report_free_page_work);
814 u32 cmd_id_received;
815
816 cmd_id_received = virtio_balloon_cmd_id_received(vb);
817 if (cmd_id_received == VIRTIO_BALLOON_CMD_ID_DONE) {
818 /* Pass ULONG_MAX to give back all the free pages */
819 return_free_pages_to_mm(vb, ULONG_MAX);
820 } else if (cmd_id_received != VIRTIO_BALLOON_CMD_ID_STOP &&
821 cmd_id_received !=
822 virtio32_to_cpu(vb->vdev, vb->cmd_id_active)) {
823 virtio_balloon_report_free_page(vb);
824 }
825 }
826
827 #ifdef CONFIG_BALLOON_COMPACTION
828 /*
829 * virtballoon_migratepage - perform the balloon page migration on behalf of
830 * a compaction thread. (called under page lock)
831 * @vb_dev_info: the balloon device
832 * @newpage: page that will replace the isolated page after migration finishes.
833 * @page : the isolated (old) page that is about to be migrated to newpage.
834 * @mode : compaction mode -- not used for balloon page migration.
835 *
836 * After a ballooned page gets isolated by compaction procedures, this is the
837 * function that performs the page migration on behalf of a compaction thread
838 * The page migration for virtio balloon is done in a simple swap fashion which
839 * follows these two macro steps:
840 * 1) insert newpage into vb->pages list and update the host about it;
841 * 2) update the host about the old page removed from vb->pages list;
842 *
843 * This function preforms the balloon page migration task.
844 * Called through movable_operations->migrate_page
845 */
virtballoon_migratepage(struct balloon_dev_info * vb_dev_info,struct page * newpage,struct page * page,enum migrate_mode mode)846 static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info,
847 struct page *newpage, struct page *page, enum migrate_mode mode)
848 {
849 struct virtio_balloon *vb = container_of(vb_dev_info,
850 struct virtio_balloon, vb_dev_info);
851 unsigned long flags;
852
853 /*
854 * In order to avoid lock contention while migrating pages concurrently
855 * to leak_balloon() or fill_balloon() we just give up the balloon_lock
856 * this turn, as it is easier to retry the page migration later.
857 * This also prevents fill_balloon() getting stuck into a mutex
858 * recursion in the case it ends up triggering memory compaction
859 * while it is attempting to inflate the ballon.
860 */
861 if (!mutex_trylock(&vb->balloon_lock))
862 return -EAGAIN;
863
864 get_page(newpage); /* balloon reference */
865
866 /*
867 * When we migrate a page to a different zone and adjusted the
868 * managed page count when inflating, we have to fixup the count of
869 * both involved zones.
870 */
871 if (!virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM) &&
872 page_zone(page) != page_zone(newpage)) {
873 adjust_managed_page_count(page, 1);
874 adjust_managed_page_count(newpage, -1);
875 }
876
877 /* balloon's page migration 1st step -- inflate "newpage" */
878 spin_lock_irqsave(&vb_dev_info->pages_lock, flags);
879 balloon_page_insert(vb_dev_info, newpage);
880 vb_dev_info->isolated_pages--;
881 __count_vm_event(BALLOON_MIGRATE);
882 spin_unlock_irqrestore(&vb_dev_info->pages_lock, flags);
883 vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE;
884 set_page_pfns(vb, vb->pfns, newpage);
885 tell_host(vb, vb->inflate_vq);
886
887 /* balloon's page migration 2nd step -- deflate "page" */
888 spin_lock_irqsave(&vb_dev_info->pages_lock, flags);
889 balloon_page_delete(page);
890 spin_unlock_irqrestore(&vb_dev_info->pages_lock, flags);
891 vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE;
892 set_page_pfns(vb, vb->pfns, page);
893 tell_host(vb, vb->deflate_vq);
894
895 mutex_unlock(&vb->balloon_lock);
896
897 put_page(page); /* balloon reference */
898
899 return MIGRATEPAGE_SUCCESS;
900 }
901 #endif /* CONFIG_BALLOON_COMPACTION */
902
shrink_free_pages(struct virtio_balloon * vb,unsigned long pages_to_free)903 static unsigned long shrink_free_pages(struct virtio_balloon *vb,
904 unsigned long pages_to_free)
905 {
906 unsigned long blocks_to_free, blocks_freed;
907
908 pages_to_free = round_up(pages_to_free,
909 VIRTIO_BALLOON_HINT_BLOCK_PAGES);
910 blocks_to_free = pages_to_free / VIRTIO_BALLOON_HINT_BLOCK_PAGES;
911 blocks_freed = return_free_pages_to_mm(vb, blocks_to_free);
912
913 return blocks_freed * VIRTIO_BALLOON_HINT_BLOCK_PAGES;
914 }
915
virtio_balloon_shrinker_scan(struct shrinker * shrinker,struct shrink_control * sc)916 static unsigned long virtio_balloon_shrinker_scan(struct shrinker *shrinker,
917 struct shrink_control *sc)
918 {
919 struct virtio_balloon *vb = shrinker->private_data;
920
921 return shrink_free_pages(vb, sc->nr_to_scan);
922 }
923
virtio_balloon_shrinker_count(struct shrinker * shrinker,struct shrink_control * sc)924 static unsigned long virtio_balloon_shrinker_count(struct shrinker *shrinker,
925 struct shrink_control *sc)
926 {
927 struct virtio_balloon *vb = shrinker->private_data;
928
929 return vb->num_free_page_blocks * VIRTIO_BALLOON_HINT_BLOCK_PAGES;
930 }
931
virtio_balloon_oom_notify(struct notifier_block * nb,unsigned long dummy,void * parm)932 static int virtio_balloon_oom_notify(struct notifier_block *nb,
933 unsigned long dummy, void *parm)
934 {
935 struct virtio_balloon *vb = container_of(nb,
936 struct virtio_balloon, oom_nb);
937 unsigned long *freed = parm;
938
939 *freed += leak_balloon(vb, VIRTIO_BALLOON_OOM_NR_PAGES) /
940 VIRTIO_BALLOON_PAGES_PER_PAGE;
941 update_balloon_size(vb);
942
943 return NOTIFY_OK;
944 }
945
virtio_balloon_unregister_shrinker(struct virtio_balloon * vb)946 static void virtio_balloon_unregister_shrinker(struct virtio_balloon *vb)
947 {
948 shrinker_free(vb->shrinker);
949 }
950
virtio_balloon_register_shrinker(struct virtio_balloon * vb)951 static int virtio_balloon_register_shrinker(struct virtio_balloon *vb)
952 {
953 vb->shrinker = shrinker_alloc(0, "virtio-balloon");
954 if (!vb->shrinker)
955 return -ENOMEM;
956
957 vb->shrinker->scan_objects = virtio_balloon_shrinker_scan;
958 vb->shrinker->count_objects = virtio_balloon_shrinker_count;
959 vb->shrinker->private_data = vb;
960
961 shrinker_register(vb->shrinker);
962
963 return 0;
964 }
965
bail_on_out_of_puff_show(struct device * d,struct device_attribute * attr,char * buf)966 static ssize_t bail_on_out_of_puff_show(struct device *d, struct device_attribute *attr,
967 char *buf)
968 {
969 struct virtio_device *vdev =
970 container_of(d, struct virtio_device, dev);
971 struct virtio_balloon *vb = vdev->priv;
972
973 return sprintf(buf, "%c\n", vb->bail_on_out_of_puff ? '1' : '0');
974 }
975
bail_on_out_of_puff_store(struct device * d,struct device_attribute * attr,const char * buf,size_t count)976 static ssize_t bail_on_out_of_puff_store(struct device *d, struct device_attribute *attr,
977 const char *buf, size_t count)
978 {
979 struct virtio_device *vdev =
980 container_of(d, struct virtio_device, dev);
981 struct virtio_balloon *vb = vdev->priv;
982
983 return kstrtobool(buf, &vb->bail_on_out_of_puff) ?: count;
984 }
985
986 static DEVICE_ATTR_RW(bail_on_out_of_puff);
987
988 static struct attribute *virtio_balloon_sysfs_entries[] = {
989 &dev_attr_bail_on_out_of_puff.attr,
990 NULL
991 };
992
993 static const struct attribute_group virtio_balloon_attribute_group = {
994 .name = NULL, /* put in device directory */
995 .attrs = virtio_balloon_sysfs_entries,
996 };
997
virtballoon_probe(struct virtio_device * vdev)998 static int virtballoon_probe(struct virtio_device *vdev)
999 {
1000 struct virtio_balloon *vb;
1001 int err;
1002
1003 if (!vdev->config->get) {
1004 dev_err(&vdev->dev, "%s failure: config access disabled\n",
1005 __func__);
1006 return -EINVAL;
1007 }
1008
1009 vdev->priv = vb = kzalloc(sizeof(*vb), GFP_KERNEL);
1010 if (!vb) {
1011 err = -ENOMEM;
1012 goto out;
1013 }
1014
1015 INIT_WORK(&vb->update_balloon_stats_work, update_balloon_stats_func);
1016 INIT_WORK(&vb->update_balloon_size_work, update_balloon_size_func);
1017 spin_lock_init(&vb->stop_update_lock);
1018 mutex_init(&vb->balloon_lock);
1019 init_waitqueue_head(&vb->acked);
1020 vb->vdev = vdev;
1021
1022 balloon_devinfo_init(&vb->vb_dev_info);
1023
1024 err = init_vqs(vb);
1025 if (err)
1026 goto out_free_vb;
1027
1028 err = sysfs_create_group(&vdev->dev.kobj,
1029 &virtio_balloon_attribute_group);
1030 if (err)
1031 goto out_del_vqs;
1032
1033 #ifdef CONFIG_BALLOON_COMPACTION
1034 vb->vb_dev_info.migratepage = virtballoon_migratepage;
1035 #endif
1036 if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) {
1037 /*
1038 * There is always one entry reserved for cmd id, so the ring
1039 * size needs to be at least two to report free page hints.
1040 */
1041 if (virtqueue_get_vring_size(vb->free_page_vq) < 2) {
1042 err = -ENOSPC;
1043 goto out_remove_sysfs;
1044 }
1045 vb->balloon_wq = alloc_workqueue("balloon-wq",
1046 WQ_FREEZABLE | WQ_CPU_INTENSIVE, 0);
1047 if (!vb->balloon_wq) {
1048 err = -ENOMEM;
1049 goto out_remove_sysfs;
1050 }
1051 INIT_WORK(&vb->report_free_page_work, report_free_page_func);
1052 vb->cmd_id_received_cache = VIRTIO_BALLOON_CMD_ID_STOP;
1053 vb->cmd_id_active = cpu_to_virtio32(vb->vdev,
1054 VIRTIO_BALLOON_CMD_ID_STOP);
1055 vb->cmd_id_stop = cpu_to_virtio32(vb->vdev,
1056 VIRTIO_BALLOON_CMD_ID_STOP);
1057 spin_lock_init(&vb->free_page_list_lock);
1058 INIT_LIST_HEAD(&vb->free_page_list);
1059 /*
1060 * We're allowed to reuse any free pages, even if they are
1061 * still to be processed by the host.
1062 */
1063 err = virtio_balloon_register_shrinker(vb);
1064 if (err)
1065 goto out_del_balloon_wq;
1066 }
1067
1068 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) {
1069 vb->oom_nb.notifier_call = virtio_balloon_oom_notify;
1070 vb->oom_nb.priority = VIRTIO_BALLOON_OOM_NOTIFY_PRIORITY;
1071 err = register_oom_notifier(&vb->oom_nb);
1072 if (err < 0)
1073 goto out_unregister_shrinker;
1074 }
1075
1076 if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_PAGE_POISON)) {
1077 /* Start with poison val of 0 representing general init */
1078 __u32 poison_val = 0;
1079
1080 /*
1081 * Let the hypervisor know that we are expecting a
1082 * specific value to be written back in balloon pages.
1083 *
1084 * If the PAGE_POISON value was larger than a byte we would
1085 * need to byte swap poison_val here to guarantee it is
1086 * little-endian. However for now it is a single byte so we
1087 * can pass it as-is.
1088 */
1089 if (!want_init_on_free())
1090 memset(&poison_val, PAGE_POISON, sizeof(poison_val));
1091
1092 virtio_cwrite_le(vb->vdev, struct virtio_balloon_config,
1093 poison_val, &poison_val);
1094 }
1095
1096 vb->pr_dev_info.report = virtballoon_free_page_report;
1097 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_REPORTING)) {
1098 unsigned int capacity;
1099
1100 capacity = virtqueue_get_vring_size(vb->reporting_vq);
1101 if (capacity < PAGE_REPORTING_CAPACITY) {
1102 err = -ENOSPC;
1103 goto out_unregister_oom;
1104 }
1105
1106 /*
1107 * The default page reporting order is @pageblock_order, which
1108 * corresponds to 512MB in size on ARM64 when 64KB base page
1109 * size is used. The page reporting won't be triggered if the
1110 * freeing page can't come up with a free area like that huge.
1111 * So we specify the page reporting order to 5, corresponding
1112 * to 2MB. It helps to avoid THP splitting if 4KB base page
1113 * size is used by host.
1114 *
1115 * Ideally, the page reporting order is selected based on the
1116 * host's base page size. However, it needs more work to report
1117 * that value. The hard-coded order would be fine currently.
1118 */
1119 #if defined(CONFIG_ARM64) && defined(CONFIG_ARM64_64K_PAGES)
1120 vb->pr_dev_info.order = 5;
1121 #endif
1122
1123 err = page_reporting_register(&vb->pr_dev_info);
1124 if (err)
1125 goto out_unregister_oom;
1126 }
1127
1128 spin_lock_init(&vb->wakeup_lock);
1129
1130 /*
1131 * The virtio balloon itself can't wake up the device, but it is
1132 * responsible for processing wakeup events passed up from the transport
1133 * layer. Wakeup sources don't support nesting/chaining calls, so we use
1134 * our own wakeup source to ensure wakeup events are properly handled
1135 * without trampling on the transport layer's wakeup source.
1136 */
1137 device_set_wakeup_capable(&vb->vdev->dev, true);
1138
1139 virtio_device_ready(vdev);
1140
1141 if (towards_target(vb))
1142 virtballoon_changed(vdev);
1143 return 0;
1144
1145 out_unregister_oom:
1146 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
1147 unregister_oom_notifier(&vb->oom_nb);
1148 out_unregister_shrinker:
1149 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
1150 virtio_balloon_unregister_shrinker(vb);
1151 out_del_balloon_wq:
1152 if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
1153 destroy_workqueue(vb->balloon_wq);
1154 out_remove_sysfs:
1155 sysfs_remove_group(&vdev->dev.kobj, &virtio_balloon_attribute_group);
1156 out_del_vqs:
1157 vdev->config->del_vqs(vdev);
1158 out_free_vb:
1159 kfree(vb);
1160 out:
1161 return err;
1162 }
1163
remove_common(struct virtio_balloon * vb)1164 static void remove_common(struct virtio_balloon *vb)
1165 {
1166 /* There might be pages left in the balloon: free them. */
1167 while (vb->num_pages)
1168 leak_balloon(vb, vb->num_pages);
1169 update_balloon_size(vb);
1170
1171 /* There might be free pages that are being reported: release them. */
1172 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
1173 return_free_pages_to_mm(vb, ULONG_MAX);
1174
1175 /* Now we reset the device so we can clean up the queues. */
1176 virtio_reset_device(vb->vdev);
1177
1178 vb->vdev->config->del_vqs(vb->vdev);
1179 }
1180
virtballoon_remove(struct virtio_device * vdev)1181 static void virtballoon_remove(struct virtio_device *vdev)
1182 {
1183 struct virtio_balloon *vb = vdev->priv;
1184
1185 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_REPORTING))
1186 page_reporting_unregister(&vb->pr_dev_info);
1187 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
1188 unregister_oom_notifier(&vb->oom_nb);
1189 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
1190 virtio_balloon_unregister_shrinker(vb);
1191 spin_lock_irq(&vb->stop_update_lock);
1192 vb->stop_update = true;
1193 spin_unlock_irq(&vb->stop_update_lock);
1194 cancel_work_sync(&vb->update_balloon_size_work);
1195 cancel_work_sync(&vb->update_balloon_stats_work);
1196
1197 if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) {
1198 cancel_work_sync(&vb->report_free_page_work);
1199 destroy_workqueue(vb->balloon_wq);
1200 }
1201
1202 sysfs_remove_group(&vdev->dev.kobj, &virtio_balloon_attribute_group);
1203
1204 remove_common(vb);
1205 kfree(vb);
1206 }
1207
1208 #ifdef CONFIG_PM_SLEEP
virtballoon_freeze(struct virtio_device * vdev)1209 static int virtballoon_freeze(struct virtio_device *vdev)
1210 {
1211 struct virtio_balloon *vb = vdev->priv;
1212
1213 /*
1214 * The workqueue is already frozen by the PM core before this
1215 * function is called.
1216 */
1217 remove_common(vb);
1218 return 0;
1219 }
1220
virtballoon_restore(struct virtio_device * vdev)1221 static int virtballoon_restore(struct virtio_device *vdev)
1222 {
1223 struct virtio_balloon *vb = vdev->priv;
1224 int ret;
1225
1226 ret = init_vqs(vdev->priv);
1227 if (ret)
1228 return ret;
1229
1230 virtio_device_ready(vdev);
1231
1232 if (towards_target(vb))
1233 virtballoon_changed(vdev);
1234 update_balloon_size(vb);
1235 return 0;
1236 }
1237 #endif
1238
virtballoon_validate(struct virtio_device * vdev)1239 static int virtballoon_validate(struct virtio_device *vdev)
1240 {
1241 if (WARN_ON(page_relinquish_disallowed()))
1242 return -EINVAL;
1243
1244 /*
1245 * Inform the hypervisor that our pages are poisoned or
1246 * initialized. If we cannot do that then we should disable
1247 * page reporting as it could potentially change the contents
1248 * of our free pages.
1249 */
1250 if (!want_init_on_free() && !page_poisoning_enabled_static())
1251 __virtio_clear_bit(vdev, VIRTIO_BALLOON_F_PAGE_POISON);
1252 else if (!virtio_has_feature(vdev, VIRTIO_BALLOON_F_PAGE_POISON))
1253 __virtio_clear_bit(vdev, VIRTIO_BALLOON_F_REPORTING);
1254
1255 return 0;
1256 }
1257
1258 static unsigned int features[] = {
1259 VIRTIO_BALLOON_F_MUST_TELL_HOST,
1260 VIRTIO_BALLOON_F_STATS_VQ,
1261 VIRTIO_BALLOON_F_DEFLATE_ON_OOM,
1262 VIRTIO_BALLOON_F_FREE_PAGE_HINT,
1263 VIRTIO_BALLOON_F_PAGE_POISON,
1264 VIRTIO_BALLOON_F_REPORTING,
1265 };
1266
1267 static struct virtio_driver virtio_balloon_driver = {
1268 .feature_table = features,
1269 .feature_table_size = ARRAY_SIZE(features),
1270 .driver.name = KBUILD_MODNAME,
1271 .id_table = id_table,
1272 .validate = virtballoon_validate,
1273 .probe = virtballoon_probe,
1274 .remove = virtballoon_remove,
1275 .config_changed = virtballoon_changed,
1276 #ifdef CONFIG_PM_SLEEP
1277 .freeze = virtballoon_freeze,
1278 .restore = virtballoon_restore,
1279 #endif
1280 };
1281
1282 module_virtio_driver(virtio_balloon_driver);
1283 MODULE_DEVICE_TABLE(virtio, id_table);
1284 MODULE_DESCRIPTION("Virtio balloon driver");
1285 MODULE_LICENSE("GPL");
1286