1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Virtio-mem device driver.
4 *
5 * Copyright Red Hat, Inc. 2020
6 *
7 * Author(s): David Hildenbrand <david@redhat.com>
8 */
9
10 #include <linux/virtio.h>
11 #include <linux/virtio_mem.h>
12 #include <linux/workqueue.h>
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/mm.h>
16 #include <linux/memory_hotplug.h>
17 #include <linux/memory.h>
18 #include <linux/hrtimer.h>
19 #include <linux/crash_dump.h>
20 #include <linux/mutex.h>
21 #include <linux/bitmap.h>
22 #include <linux/lockdep.h>
23
24 #include <acpi/acpi_numa.h>
25
26 static bool unplug_online = true;
27 module_param(unplug_online, bool, 0644);
28 MODULE_PARM_DESC(unplug_online, "Try to unplug online memory");
29
30 enum virtio_mem_mb_state {
31 /* Unplugged, not added to Linux. Can be reused later. */
32 VIRTIO_MEM_MB_STATE_UNUSED = 0,
33 /* (Partially) plugged, not added to Linux. Error on add_memory(). */
34 VIRTIO_MEM_MB_STATE_PLUGGED,
35 /* Fully plugged, fully added to Linux, offline. */
36 VIRTIO_MEM_MB_STATE_OFFLINE,
37 /* Partially plugged, fully added to Linux, offline. */
38 VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL,
39 /* Fully plugged, fully added to Linux, online. */
40 VIRTIO_MEM_MB_STATE_ONLINE,
41 /* Partially plugged, fully added to Linux, online. */
42 VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL,
43 VIRTIO_MEM_MB_STATE_COUNT
44 };
45
46 struct virtio_mem {
47 struct virtio_device *vdev;
48
49 /* We might first have to unplug all memory when starting up. */
50 bool unplug_all_required;
51
52 /* Workqueue that processes the plug/unplug requests. */
53 struct work_struct wq;
54 atomic_t config_changed;
55
56 /* Virtqueue for guest->host requests. */
57 struct virtqueue *vq;
58
59 /* Wait for a host response to a guest request. */
60 wait_queue_head_t host_resp;
61
62 /* Space for one guest request and the host response. */
63 struct virtio_mem_req req;
64 struct virtio_mem_resp resp;
65
66 /* The current size of the device. */
67 uint64_t plugged_size;
68 /* The requested size of the device. */
69 uint64_t requested_size;
70
71 /* The device block size (for communicating with the device). */
72 uint64_t device_block_size;
73 /* The translated node id. NUMA_NO_NODE in case not specified. */
74 int nid;
75 /* Physical start address of the memory region. */
76 uint64_t addr;
77 /* Maximum region size in bytes. */
78 uint64_t region_size;
79
80 /* The subblock size. */
81 uint64_t subblock_size;
82 /* The number of subblocks per memory block. */
83 uint32_t nb_sb_per_mb;
84
85 /* Id of the first memory block of this device. */
86 unsigned long first_mb_id;
87 /* Id of the last memory block of this device. */
88 unsigned long last_mb_id;
89 /* Id of the last usable memory block of this device. */
90 unsigned long last_usable_mb_id;
91 /* Id of the next memory bock to prepare when needed. */
92 unsigned long next_mb_id;
93
94 /* The parent resource for all memory added via this device. */
95 struct resource *parent_resource;
96 /*
97 * Copy of "System RAM (virtio_mem)" to be used for
98 * add_memory_driver_managed().
99 */
100 const char *resource_name;
101
102 /* Summary of all memory block states. */
103 unsigned long nb_mb_state[VIRTIO_MEM_MB_STATE_COUNT];
104 #define VIRTIO_MEM_NB_OFFLINE_THRESHOLD 10
105
106 /*
107 * One byte state per memory block.
108 *
109 * Allocated via vmalloc(). When preparing new blocks, resized
110 * (alloc+copy+free) when needed (crossing pages with the next mb).
111 * (when crossing pages).
112 *
113 * With 128MB memory blocks, we have states for 512GB of memory in one
114 * page.
115 */
116 uint8_t *mb_state;
117
118 /*
119 * $nb_sb_per_mb bit per memory block. Handled similar to mb_state.
120 *
121 * With 4MB subblocks, we manage 128GB of memory in one page.
122 */
123 unsigned long *sb_bitmap;
124
125 /*
126 * Mutex that protects the nb_mb_state, mb_state, and sb_bitmap.
127 *
128 * When this lock is held the pointers can't change, ONLINE and
129 * OFFLINE blocks can't change the state and no subblocks will get
130 * plugged/unplugged.
131 */
132 struct mutex hotplug_mutex;
133 bool hotplug_active;
134
135 /* An error occurred we cannot handle - stop processing requests. */
136 bool broken;
137
138 /* The driver is being removed. */
139 spinlock_t removal_lock;
140 bool removing;
141
142 /* Timer for retrying to plug/unplug memory. */
143 struct hrtimer retry_timer;
144 unsigned int retry_timer_ms;
145 #define VIRTIO_MEM_RETRY_TIMER_MIN_MS 50000
146 #define VIRTIO_MEM_RETRY_TIMER_MAX_MS 300000
147
148 /* Memory notifier (online/offline events). */
149 struct notifier_block memory_notifier;
150
151 /* Next device in the list of virtio-mem devices. */
152 struct list_head next;
153 };
154
155 /*
156 * We have to share a single online_page callback among all virtio-mem
157 * devices. We use RCU to iterate the list in the callback.
158 */
159 static DEFINE_MUTEX(virtio_mem_mutex);
160 static LIST_HEAD(virtio_mem_devices);
161
162 static void virtio_mem_online_page_cb(struct page *page, unsigned int order);
163
164 /*
165 * Register a virtio-mem device so it will be considered for the online_page
166 * callback.
167 */
register_virtio_mem_device(struct virtio_mem * vm)168 static int register_virtio_mem_device(struct virtio_mem *vm)
169 {
170 int rc = 0;
171
172 /* First device registers the callback. */
173 mutex_lock(&virtio_mem_mutex);
174 if (list_empty(&virtio_mem_devices))
175 rc = set_online_page_callback(&virtio_mem_online_page_cb);
176 if (!rc)
177 list_add_rcu(&vm->next, &virtio_mem_devices);
178 mutex_unlock(&virtio_mem_mutex);
179
180 return rc;
181 }
182
183 /*
184 * Unregister a virtio-mem device so it will no longer be considered for the
185 * online_page callback.
186 */
unregister_virtio_mem_device(struct virtio_mem * vm)187 static void unregister_virtio_mem_device(struct virtio_mem *vm)
188 {
189 /* Last device unregisters the callback. */
190 mutex_lock(&virtio_mem_mutex);
191 list_del_rcu(&vm->next);
192 if (list_empty(&virtio_mem_devices))
193 restore_online_page_callback(&virtio_mem_online_page_cb);
194 mutex_unlock(&virtio_mem_mutex);
195
196 synchronize_rcu();
197 }
198
199 /*
200 * Calculate the memory block id of a given address.
201 */
virtio_mem_phys_to_mb_id(unsigned long addr)202 static unsigned long virtio_mem_phys_to_mb_id(unsigned long addr)
203 {
204 return addr / memory_block_size_bytes();
205 }
206
207 /*
208 * Calculate the physical start address of a given memory block id.
209 */
virtio_mem_mb_id_to_phys(unsigned long mb_id)210 static unsigned long virtio_mem_mb_id_to_phys(unsigned long mb_id)
211 {
212 return mb_id * memory_block_size_bytes();
213 }
214
215 /*
216 * Calculate the subblock id of a given address.
217 */
virtio_mem_phys_to_sb_id(struct virtio_mem * vm,unsigned long addr)218 static unsigned long virtio_mem_phys_to_sb_id(struct virtio_mem *vm,
219 unsigned long addr)
220 {
221 const unsigned long mb_id = virtio_mem_phys_to_mb_id(addr);
222 const unsigned long mb_addr = virtio_mem_mb_id_to_phys(mb_id);
223
224 return (addr - mb_addr) / vm->subblock_size;
225 }
226
227 /*
228 * Set the state of a memory block, taking care of the state counter.
229 */
virtio_mem_mb_set_state(struct virtio_mem * vm,unsigned long mb_id,enum virtio_mem_mb_state state)230 static void virtio_mem_mb_set_state(struct virtio_mem *vm, unsigned long mb_id,
231 enum virtio_mem_mb_state state)
232 {
233 const unsigned long idx = mb_id - vm->first_mb_id;
234 enum virtio_mem_mb_state old_state;
235
236 old_state = vm->mb_state[idx];
237 vm->mb_state[idx] = state;
238
239 BUG_ON(vm->nb_mb_state[old_state] == 0);
240 vm->nb_mb_state[old_state]--;
241 vm->nb_mb_state[state]++;
242 }
243
244 /*
245 * Get the state of a memory block.
246 */
virtio_mem_mb_get_state(struct virtio_mem * vm,unsigned long mb_id)247 static enum virtio_mem_mb_state virtio_mem_mb_get_state(struct virtio_mem *vm,
248 unsigned long mb_id)
249 {
250 const unsigned long idx = mb_id - vm->first_mb_id;
251
252 return vm->mb_state[idx];
253 }
254
255 /*
256 * Prepare the state array for the next memory block.
257 */
virtio_mem_mb_state_prepare_next_mb(struct virtio_mem * vm)258 static int virtio_mem_mb_state_prepare_next_mb(struct virtio_mem *vm)
259 {
260 unsigned long old_bytes = vm->next_mb_id - vm->first_mb_id + 1;
261 unsigned long new_bytes = vm->next_mb_id - vm->first_mb_id + 2;
262 int old_pages = PFN_UP(old_bytes);
263 int new_pages = PFN_UP(new_bytes);
264 uint8_t *new_mb_state;
265
266 if (vm->mb_state && old_pages == new_pages)
267 return 0;
268
269 new_mb_state = vzalloc(new_pages * PAGE_SIZE);
270 if (!new_mb_state)
271 return -ENOMEM;
272
273 mutex_lock(&vm->hotplug_mutex);
274 if (vm->mb_state)
275 memcpy(new_mb_state, vm->mb_state, old_pages * PAGE_SIZE);
276 vfree(vm->mb_state);
277 vm->mb_state = new_mb_state;
278 mutex_unlock(&vm->hotplug_mutex);
279
280 return 0;
281 }
282
283 #define virtio_mem_for_each_mb_state(_vm, _mb_id, _state) \
284 for (_mb_id = _vm->first_mb_id; \
285 _mb_id < _vm->next_mb_id && _vm->nb_mb_state[_state]; \
286 _mb_id++) \
287 if (virtio_mem_mb_get_state(_vm, _mb_id) == _state)
288
289 #define virtio_mem_for_each_mb_state_rev(_vm, _mb_id, _state) \
290 for (_mb_id = _vm->next_mb_id - 1; \
291 _mb_id >= _vm->first_mb_id && _vm->nb_mb_state[_state]; \
292 _mb_id--) \
293 if (virtio_mem_mb_get_state(_vm, _mb_id) == _state)
294
295 /*
296 * Mark all selected subblocks plugged.
297 *
298 * Will not modify the state of the memory block.
299 */
virtio_mem_mb_set_sb_plugged(struct virtio_mem * vm,unsigned long mb_id,int sb_id,int count)300 static void virtio_mem_mb_set_sb_plugged(struct virtio_mem *vm,
301 unsigned long mb_id, int sb_id,
302 int count)
303 {
304 const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb + sb_id;
305
306 __bitmap_set(vm->sb_bitmap, bit, count);
307 }
308
309 /*
310 * Mark all selected subblocks unplugged.
311 *
312 * Will not modify the state of the memory block.
313 */
virtio_mem_mb_set_sb_unplugged(struct virtio_mem * vm,unsigned long mb_id,int sb_id,int count)314 static void virtio_mem_mb_set_sb_unplugged(struct virtio_mem *vm,
315 unsigned long mb_id, int sb_id,
316 int count)
317 {
318 const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb + sb_id;
319
320 __bitmap_clear(vm->sb_bitmap, bit, count);
321 }
322
323 /*
324 * Test if all selected subblocks are plugged.
325 */
virtio_mem_mb_test_sb_plugged(struct virtio_mem * vm,unsigned long mb_id,int sb_id,int count)326 static bool virtio_mem_mb_test_sb_plugged(struct virtio_mem *vm,
327 unsigned long mb_id, int sb_id,
328 int count)
329 {
330 const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb + sb_id;
331
332 if (count == 1)
333 return test_bit(bit, vm->sb_bitmap);
334
335 /* TODO: Helper similar to bitmap_set() */
336 return find_next_zero_bit(vm->sb_bitmap, bit + count, bit) >=
337 bit + count;
338 }
339
340 /*
341 * Test if all selected subblocks are unplugged.
342 */
virtio_mem_mb_test_sb_unplugged(struct virtio_mem * vm,unsigned long mb_id,int sb_id,int count)343 static bool virtio_mem_mb_test_sb_unplugged(struct virtio_mem *vm,
344 unsigned long mb_id, int sb_id,
345 int count)
346 {
347 const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb + sb_id;
348
349 /* TODO: Helper similar to bitmap_set() */
350 return find_next_bit(vm->sb_bitmap, bit + count, bit) >= bit + count;
351 }
352
353 /*
354 * Find the first unplugged subblock. Returns vm->nb_sb_per_mb in case there is
355 * none.
356 */
virtio_mem_mb_first_unplugged_sb(struct virtio_mem * vm,unsigned long mb_id)357 static int virtio_mem_mb_first_unplugged_sb(struct virtio_mem *vm,
358 unsigned long mb_id)
359 {
360 const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb;
361
362 return find_next_zero_bit(vm->sb_bitmap, bit + vm->nb_sb_per_mb, bit) -
363 bit;
364 }
365
366 /*
367 * Prepare the subblock bitmap for the next memory block.
368 */
virtio_mem_sb_bitmap_prepare_next_mb(struct virtio_mem * vm)369 static int virtio_mem_sb_bitmap_prepare_next_mb(struct virtio_mem *vm)
370 {
371 const unsigned long old_nb_mb = vm->next_mb_id - vm->first_mb_id;
372 const unsigned long old_nb_bits = old_nb_mb * vm->nb_sb_per_mb;
373 const unsigned long new_nb_bits = (old_nb_mb + 1) * vm->nb_sb_per_mb;
374 int old_pages = PFN_UP(BITS_TO_LONGS(old_nb_bits) * sizeof(long));
375 int new_pages = PFN_UP(BITS_TO_LONGS(new_nb_bits) * sizeof(long));
376 unsigned long *new_sb_bitmap, *old_sb_bitmap;
377
378 if (vm->sb_bitmap && old_pages == new_pages)
379 return 0;
380
381 new_sb_bitmap = vzalloc(new_pages * PAGE_SIZE);
382 if (!new_sb_bitmap)
383 return -ENOMEM;
384
385 mutex_lock(&vm->hotplug_mutex);
386 if (new_sb_bitmap)
387 memcpy(new_sb_bitmap, vm->sb_bitmap, old_pages * PAGE_SIZE);
388
389 old_sb_bitmap = vm->sb_bitmap;
390 vm->sb_bitmap = new_sb_bitmap;
391 mutex_unlock(&vm->hotplug_mutex);
392
393 vfree(old_sb_bitmap);
394 return 0;
395 }
396
397 /*
398 * Try to add a memory block to Linux. This will usually only fail
399 * if out of memory.
400 *
401 * Must not be called with the vm->hotplug_mutex held (possible deadlock with
402 * onlining code).
403 *
404 * Will not modify the state of the memory block.
405 */
virtio_mem_mb_add(struct virtio_mem * vm,unsigned long mb_id)406 static int virtio_mem_mb_add(struct virtio_mem *vm, unsigned long mb_id)
407 {
408 const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id);
409 int nid = vm->nid;
410
411 if (nid == NUMA_NO_NODE)
412 nid = memory_add_physaddr_to_nid(addr);
413
414 /*
415 * When force-unloading the driver and we still have memory added to
416 * Linux, the resource name has to stay.
417 */
418 if (!vm->resource_name) {
419 vm->resource_name = kstrdup_const("System RAM (virtio_mem)",
420 GFP_KERNEL);
421 if (!vm->resource_name)
422 return -ENOMEM;
423 }
424
425 dev_dbg(&vm->vdev->dev, "adding memory block: %lu\n", mb_id);
426 return add_memory_driver_managed(nid, addr, memory_block_size_bytes(),
427 vm->resource_name,
428 MEMHP_MERGE_RESOURCE);
429 }
430
431 /*
432 * Try to remove a memory block from Linux. Will only fail if the memory block
433 * is not offline.
434 *
435 * Must not be called with the vm->hotplug_mutex held (possible deadlock with
436 * onlining code).
437 *
438 * Will not modify the state of the memory block.
439 */
virtio_mem_mb_remove(struct virtio_mem * vm,unsigned long mb_id)440 static int virtio_mem_mb_remove(struct virtio_mem *vm, unsigned long mb_id)
441 {
442 const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id);
443 int nid = vm->nid;
444
445 if (nid == NUMA_NO_NODE)
446 nid = memory_add_physaddr_to_nid(addr);
447
448 dev_dbg(&vm->vdev->dev, "removing memory block: %lu\n", mb_id);
449 return remove_memory(nid, addr, memory_block_size_bytes());
450 }
451
452 /*
453 * Try to offline and remove a memory block from Linux.
454 *
455 * Must not be called with the vm->hotplug_mutex held (possible deadlock with
456 * onlining code).
457 *
458 * Will not modify the state of the memory block.
459 */
virtio_mem_mb_offline_and_remove(struct virtio_mem * vm,unsigned long mb_id)460 static int virtio_mem_mb_offline_and_remove(struct virtio_mem *vm,
461 unsigned long mb_id)
462 {
463 const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id);
464 int nid = vm->nid;
465
466 if (nid == NUMA_NO_NODE)
467 nid = memory_add_physaddr_to_nid(addr);
468
469 dev_dbg(&vm->vdev->dev, "offlining and removing memory block: %lu\n",
470 mb_id);
471 return offline_and_remove_memory(nid, addr, memory_block_size_bytes());
472 }
473
474 /*
475 * Trigger the workqueue so the device can perform its magic.
476 */
virtio_mem_retry(struct virtio_mem * vm)477 static void virtio_mem_retry(struct virtio_mem *vm)
478 {
479 unsigned long flags;
480
481 spin_lock_irqsave(&vm->removal_lock, flags);
482 if (!vm->removing)
483 queue_work(system_freezable_wq, &vm->wq);
484 spin_unlock_irqrestore(&vm->removal_lock, flags);
485 }
486
virtio_mem_translate_node_id(struct virtio_mem * vm,uint16_t node_id)487 static int virtio_mem_translate_node_id(struct virtio_mem *vm, uint16_t node_id)
488 {
489 int node = NUMA_NO_NODE;
490
491 #if defined(CONFIG_ACPI_NUMA)
492 if (virtio_has_feature(vm->vdev, VIRTIO_MEM_F_ACPI_PXM))
493 node = pxm_to_node(node_id);
494 #endif
495 return node;
496 }
497
498 /*
499 * Test if a virtio-mem device overlaps with the given range. Can be called
500 * from (notifier) callbacks lockless.
501 */
virtio_mem_overlaps_range(struct virtio_mem * vm,unsigned long start,unsigned long size)502 static bool virtio_mem_overlaps_range(struct virtio_mem *vm,
503 unsigned long start, unsigned long size)
504 {
505 unsigned long dev_start = virtio_mem_mb_id_to_phys(vm->first_mb_id);
506 unsigned long dev_end = virtio_mem_mb_id_to_phys(vm->last_mb_id) +
507 memory_block_size_bytes();
508
509 return start < dev_end && dev_start < start + size;
510 }
511
512 /*
513 * Test if a virtio-mem device owns a memory block. Can be called from
514 * (notifier) callbacks lockless.
515 */
virtio_mem_owned_mb(struct virtio_mem * vm,unsigned long mb_id)516 static bool virtio_mem_owned_mb(struct virtio_mem *vm, unsigned long mb_id)
517 {
518 return mb_id >= vm->first_mb_id && mb_id <= vm->last_mb_id;
519 }
520
virtio_mem_notify_going_online(struct virtio_mem * vm,unsigned long mb_id)521 static int virtio_mem_notify_going_online(struct virtio_mem *vm,
522 unsigned long mb_id)
523 {
524 switch (virtio_mem_mb_get_state(vm, mb_id)) {
525 case VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL:
526 case VIRTIO_MEM_MB_STATE_OFFLINE:
527 return NOTIFY_OK;
528 default:
529 break;
530 }
531 dev_warn_ratelimited(&vm->vdev->dev,
532 "memory block onlining denied\n");
533 return NOTIFY_BAD;
534 }
535
virtio_mem_notify_offline(struct virtio_mem * vm,unsigned long mb_id)536 static void virtio_mem_notify_offline(struct virtio_mem *vm,
537 unsigned long mb_id)
538 {
539 switch (virtio_mem_mb_get_state(vm, mb_id)) {
540 case VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL:
541 virtio_mem_mb_set_state(vm, mb_id,
542 VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL);
543 break;
544 case VIRTIO_MEM_MB_STATE_ONLINE:
545 virtio_mem_mb_set_state(vm, mb_id,
546 VIRTIO_MEM_MB_STATE_OFFLINE);
547 break;
548 default:
549 BUG();
550 break;
551 }
552
553 /*
554 * Trigger the workqueue, maybe we can now unplug memory. Also,
555 * when we offline and remove a memory block, this will re-trigger
556 * us immediately - which is often nice because the removal of
557 * the memory block (e.g., memmap) might have freed up memory
558 * on other memory blocks we manage.
559 */
560 virtio_mem_retry(vm);
561 }
562
virtio_mem_notify_online(struct virtio_mem * vm,unsigned long mb_id)563 static void virtio_mem_notify_online(struct virtio_mem *vm, unsigned long mb_id)
564 {
565 unsigned long nb_offline;
566
567 switch (virtio_mem_mb_get_state(vm, mb_id)) {
568 case VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL:
569 virtio_mem_mb_set_state(vm, mb_id,
570 VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL);
571 break;
572 case VIRTIO_MEM_MB_STATE_OFFLINE:
573 virtio_mem_mb_set_state(vm, mb_id, VIRTIO_MEM_MB_STATE_ONLINE);
574 break;
575 default:
576 BUG();
577 break;
578 }
579 nb_offline = vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE] +
580 vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL];
581
582 /* see if we can add new blocks now that we onlined one block */
583 if (nb_offline == VIRTIO_MEM_NB_OFFLINE_THRESHOLD - 1)
584 virtio_mem_retry(vm);
585 }
586
virtio_mem_notify_going_offline(struct virtio_mem * vm,unsigned long mb_id)587 static void virtio_mem_notify_going_offline(struct virtio_mem *vm,
588 unsigned long mb_id)
589 {
590 const unsigned long nr_pages = PFN_DOWN(vm->subblock_size);
591 struct page *page;
592 unsigned long pfn;
593 int sb_id, i;
594
595 for (sb_id = 0; sb_id < vm->nb_sb_per_mb; sb_id++) {
596 if (virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id, 1))
597 continue;
598 /*
599 * Drop our reference to the pages so the memory can get
600 * offlined and add the unplugged pages to the managed
601 * page counters (so offlining code can correctly subtract
602 * them again).
603 */
604 pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
605 sb_id * vm->subblock_size);
606 adjust_managed_page_count(pfn_to_page(pfn), nr_pages);
607 for (i = 0; i < nr_pages; i++) {
608 page = pfn_to_page(pfn + i);
609 if (WARN_ON(!page_ref_dec_and_test(page)))
610 dump_page(page, "unplugged page referenced");
611 }
612 }
613 }
614
virtio_mem_notify_cancel_offline(struct virtio_mem * vm,unsigned long mb_id)615 static void virtio_mem_notify_cancel_offline(struct virtio_mem *vm,
616 unsigned long mb_id)
617 {
618 const unsigned long nr_pages = PFN_DOWN(vm->subblock_size);
619 unsigned long pfn;
620 int sb_id, i;
621
622 for (sb_id = 0; sb_id < vm->nb_sb_per_mb; sb_id++) {
623 if (virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id, 1))
624 continue;
625 /*
626 * Get the reference we dropped when going offline and
627 * subtract the unplugged pages from the managed page
628 * counters.
629 */
630 pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
631 sb_id * vm->subblock_size);
632 adjust_managed_page_count(pfn_to_page(pfn), -nr_pages);
633 for (i = 0; i < nr_pages; i++)
634 page_ref_inc(pfn_to_page(pfn + i));
635 }
636 }
637
638 /*
639 * This callback will either be called synchronously from add_memory() or
640 * asynchronously (e.g., triggered via user space). We have to be careful
641 * with locking when calling add_memory().
642 */
virtio_mem_memory_notifier_cb(struct notifier_block * nb,unsigned long action,void * arg)643 static int virtio_mem_memory_notifier_cb(struct notifier_block *nb,
644 unsigned long action, void *arg)
645 {
646 struct virtio_mem *vm = container_of(nb, struct virtio_mem,
647 memory_notifier);
648 struct memory_notify *mhp = arg;
649 const unsigned long start = PFN_PHYS(mhp->start_pfn);
650 const unsigned long size = PFN_PHYS(mhp->nr_pages);
651 const unsigned long mb_id = virtio_mem_phys_to_mb_id(start);
652 int rc = NOTIFY_OK;
653
654 if (!virtio_mem_overlaps_range(vm, start, size))
655 return NOTIFY_DONE;
656
657 /*
658 * Memory is onlined/offlined in memory block granularity. We cannot
659 * cross virtio-mem device boundaries and memory block boundaries. Bail
660 * out if this ever changes.
661 */
662 if (WARN_ON_ONCE(size != memory_block_size_bytes() ||
663 !IS_ALIGNED(start, memory_block_size_bytes())))
664 return NOTIFY_BAD;
665
666 /*
667 * Avoid circular locking lockdep warnings. We lock the mutex
668 * e.g., in MEM_GOING_ONLINE and unlock it in MEM_ONLINE. The
669 * blocking_notifier_call_chain() has it's own lock, which gets unlocked
670 * between both notifier calls and will bail out. False positive.
671 */
672 lockdep_off();
673
674 switch (action) {
675 case MEM_GOING_OFFLINE:
676 mutex_lock(&vm->hotplug_mutex);
677 if (vm->removing) {
678 rc = notifier_from_errno(-EBUSY);
679 mutex_unlock(&vm->hotplug_mutex);
680 break;
681 }
682 vm->hotplug_active = true;
683 virtio_mem_notify_going_offline(vm, mb_id);
684 break;
685 case MEM_GOING_ONLINE:
686 mutex_lock(&vm->hotplug_mutex);
687 if (vm->removing) {
688 rc = notifier_from_errno(-EBUSY);
689 mutex_unlock(&vm->hotplug_mutex);
690 break;
691 }
692 vm->hotplug_active = true;
693 rc = virtio_mem_notify_going_online(vm, mb_id);
694 break;
695 case MEM_OFFLINE:
696 virtio_mem_notify_offline(vm, mb_id);
697 vm->hotplug_active = false;
698 mutex_unlock(&vm->hotplug_mutex);
699 break;
700 case MEM_ONLINE:
701 virtio_mem_notify_online(vm, mb_id);
702 vm->hotplug_active = false;
703 mutex_unlock(&vm->hotplug_mutex);
704 break;
705 case MEM_CANCEL_OFFLINE:
706 if (!vm->hotplug_active)
707 break;
708 virtio_mem_notify_cancel_offline(vm, mb_id);
709 vm->hotplug_active = false;
710 mutex_unlock(&vm->hotplug_mutex);
711 break;
712 case MEM_CANCEL_ONLINE:
713 if (!vm->hotplug_active)
714 break;
715 vm->hotplug_active = false;
716 mutex_unlock(&vm->hotplug_mutex);
717 break;
718 default:
719 break;
720 }
721
722 lockdep_on();
723
724 return rc;
725 }
726
727 /*
728 * Set a range of pages PG_offline. Remember pages that were never onlined
729 * (via generic_online_page()) using PageDirty().
730 */
virtio_mem_set_fake_offline(unsigned long pfn,unsigned int nr_pages,bool onlined)731 static void virtio_mem_set_fake_offline(unsigned long pfn,
732 unsigned int nr_pages, bool onlined)
733 {
734 for (; nr_pages--; pfn++) {
735 struct page *page = pfn_to_page(pfn);
736
737 __SetPageOffline(page);
738 if (!onlined) {
739 SetPageDirty(page);
740 /* FIXME: remove after cleanups */
741 ClearPageReserved(page);
742 }
743 }
744 }
745
746 /*
747 * Clear PG_offline from a range of pages. If the pages were never onlined,
748 * (via generic_online_page()), clear PageDirty().
749 */
virtio_mem_clear_fake_offline(unsigned long pfn,unsigned int nr_pages,bool onlined)750 static void virtio_mem_clear_fake_offline(unsigned long pfn,
751 unsigned int nr_pages, bool onlined)
752 {
753 for (; nr_pages--; pfn++) {
754 struct page *page = pfn_to_page(pfn);
755
756 __ClearPageOffline(page);
757 if (!onlined)
758 ClearPageDirty(page);
759 }
760 }
761
762 /*
763 * Release a range of fake-offline pages to the buddy, effectively
764 * fake-onlining them.
765 */
virtio_mem_fake_online(unsigned long pfn,unsigned int nr_pages)766 static void virtio_mem_fake_online(unsigned long pfn, unsigned int nr_pages)
767 {
768 const int order = MAX_ORDER - 1;
769 int i;
770
771 /*
772 * We are always called with subblock granularity, which is at least
773 * aligned to MAX_ORDER - 1.
774 */
775 for (i = 0; i < nr_pages; i += 1 << order) {
776 struct page *page = pfn_to_page(pfn + i);
777
778 /*
779 * If the page is PageDirty(), it was kept fake-offline when
780 * onlining the memory block. Otherwise, it was allocated
781 * using alloc_contig_range(). All pages in a subblock are
782 * alike.
783 */
784 if (PageDirty(page)) {
785 virtio_mem_clear_fake_offline(pfn + i, 1 << order,
786 false);
787 generic_online_page(page, order);
788 } else {
789 virtio_mem_clear_fake_offline(pfn + i, 1 << order,
790 true);
791 free_contig_range(pfn + i, 1 << order);
792 adjust_managed_page_count(page, 1 << order);
793 }
794 }
795 }
796
virtio_mem_online_page_cb(struct page * page,unsigned int order)797 static void virtio_mem_online_page_cb(struct page *page, unsigned int order)
798 {
799 const unsigned long addr = page_to_phys(page);
800 const unsigned long mb_id = virtio_mem_phys_to_mb_id(addr);
801 struct virtio_mem *vm;
802 int sb_id;
803
804 /*
805 * We exploit here that subblocks have at least MAX_ORDER - 1
806 * size/alignment and that this callback is is called with such a
807 * size/alignment. So we cannot cross subblocks and therefore
808 * also not memory blocks.
809 */
810 rcu_read_lock();
811 list_for_each_entry_rcu(vm, &virtio_mem_devices, next) {
812 if (!virtio_mem_owned_mb(vm, mb_id))
813 continue;
814
815 sb_id = virtio_mem_phys_to_sb_id(vm, addr);
816 /*
817 * If plugged, online the pages, otherwise, set them fake
818 * offline (PageOffline).
819 */
820 if (virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id, 1))
821 generic_online_page(page, order);
822 else
823 virtio_mem_set_fake_offline(PFN_DOWN(addr), 1 << order,
824 false);
825 rcu_read_unlock();
826 return;
827 }
828 rcu_read_unlock();
829
830 /* not virtio-mem memory, but e.g., a DIMM. online it */
831 generic_online_page(page, order);
832 }
833
virtio_mem_send_request(struct virtio_mem * vm,const struct virtio_mem_req * req)834 static uint64_t virtio_mem_send_request(struct virtio_mem *vm,
835 const struct virtio_mem_req *req)
836 {
837 struct scatterlist *sgs[2], sg_req, sg_resp;
838 unsigned int len;
839 int rc;
840
841 /* don't use the request residing on the stack (vaddr) */
842 vm->req = *req;
843
844 /* out: buffer for request */
845 sg_init_one(&sg_req, &vm->req, sizeof(vm->req));
846 sgs[0] = &sg_req;
847
848 /* in: buffer for response */
849 sg_init_one(&sg_resp, &vm->resp, sizeof(vm->resp));
850 sgs[1] = &sg_resp;
851
852 rc = virtqueue_add_sgs(vm->vq, sgs, 1, 1, vm, GFP_KERNEL);
853 if (rc < 0)
854 return rc;
855
856 virtqueue_kick(vm->vq);
857
858 /* wait for a response */
859 wait_event(vm->host_resp, virtqueue_get_buf(vm->vq, &len));
860
861 return virtio16_to_cpu(vm->vdev, vm->resp.type);
862 }
863
virtio_mem_send_plug_request(struct virtio_mem * vm,uint64_t addr,uint64_t size)864 static int virtio_mem_send_plug_request(struct virtio_mem *vm, uint64_t addr,
865 uint64_t size)
866 {
867 const uint64_t nb_vm_blocks = size / vm->device_block_size;
868 const struct virtio_mem_req req = {
869 .type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_PLUG),
870 .u.plug.addr = cpu_to_virtio64(vm->vdev, addr),
871 .u.plug.nb_blocks = cpu_to_virtio16(vm->vdev, nb_vm_blocks),
872 };
873
874 if (atomic_read(&vm->config_changed))
875 return -EAGAIN;
876
877 switch (virtio_mem_send_request(vm, &req)) {
878 case VIRTIO_MEM_RESP_ACK:
879 vm->plugged_size += size;
880 return 0;
881 case VIRTIO_MEM_RESP_NACK:
882 return -EAGAIN;
883 case VIRTIO_MEM_RESP_BUSY:
884 return -ETXTBSY;
885 case VIRTIO_MEM_RESP_ERROR:
886 return -EINVAL;
887 default:
888 return -ENOMEM;
889 }
890 }
891
virtio_mem_send_unplug_request(struct virtio_mem * vm,uint64_t addr,uint64_t size)892 static int virtio_mem_send_unplug_request(struct virtio_mem *vm, uint64_t addr,
893 uint64_t size)
894 {
895 const uint64_t nb_vm_blocks = size / vm->device_block_size;
896 const struct virtio_mem_req req = {
897 .type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_UNPLUG),
898 .u.unplug.addr = cpu_to_virtio64(vm->vdev, addr),
899 .u.unplug.nb_blocks = cpu_to_virtio16(vm->vdev, nb_vm_blocks),
900 };
901
902 if (atomic_read(&vm->config_changed))
903 return -EAGAIN;
904
905 switch (virtio_mem_send_request(vm, &req)) {
906 case VIRTIO_MEM_RESP_ACK:
907 vm->plugged_size -= size;
908 return 0;
909 case VIRTIO_MEM_RESP_BUSY:
910 return -ETXTBSY;
911 case VIRTIO_MEM_RESP_ERROR:
912 return -EINVAL;
913 default:
914 return -ENOMEM;
915 }
916 }
917
virtio_mem_send_unplug_all_request(struct virtio_mem * vm)918 static int virtio_mem_send_unplug_all_request(struct virtio_mem *vm)
919 {
920 const struct virtio_mem_req req = {
921 .type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_UNPLUG_ALL),
922 };
923
924 switch (virtio_mem_send_request(vm, &req)) {
925 case VIRTIO_MEM_RESP_ACK:
926 vm->unplug_all_required = false;
927 vm->plugged_size = 0;
928 /* usable region might have shrunk */
929 atomic_set(&vm->config_changed, 1);
930 return 0;
931 case VIRTIO_MEM_RESP_BUSY:
932 return -ETXTBSY;
933 default:
934 return -ENOMEM;
935 }
936 }
937
938 /*
939 * Plug selected subblocks. Updates the plugged state, but not the state
940 * of the memory block.
941 */
virtio_mem_mb_plug_sb(struct virtio_mem * vm,unsigned long mb_id,int sb_id,int count)942 static int virtio_mem_mb_plug_sb(struct virtio_mem *vm, unsigned long mb_id,
943 int sb_id, int count)
944 {
945 const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id) +
946 sb_id * vm->subblock_size;
947 const uint64_t size = count * vm->subblock_size;
948 int rc;
949
950 dev_dbg(&vm->vdev->dev, "plugging memory block: %lu : %i - %i\n", mb_id,
951 sb_id, sb_id + count - 1);
952
953 rc = virtio_mem_send_plug_request(vm, addr, size);
954 if (!rc)
955 virtio_mem_mb_set_sb_plugged(vm, mb_id, sb_id, count);
956 return rc;
957 }
958
959 /*
960 * Unplug selected subblocks. Updates the plugged state, but not the state
961 * of the memory block.
962 */
virtio_mem_mb_unplug_sb(struct virtio_mem * vm,unsigned long mb_id,int sb_id,int count)963 static int virtio_mem_mb_unplug_sb(struct virtio_mem *vm, unsigned long mb_id,
964 int sb_id, int count)
965 {
966 const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id) +
967 sb_id * vm->subblock_size;
968 const uint64_t size = count * vm->subblock_size;
969 int rc;
970
971 dev_dbg(&vm->vdev->dev, "unplugging memory block: %lu : %i - %i\n",
972 mb_id, sb_id, sb_id + count - 1);
973
974 rc = virtio_mem_send_unplug_request(vm, addr, size);
975 if (!rc)
976 virtio_mem_mb_set_sb_unplugged(vm, mb_id, sb_id, count);
977 return rc;
978 }
979
980 /*
981 * Unplug the desired number of plugged subblocks of a offline or not-added
982 * memory block. Will fail if any subblock cannot get unplugged (instead of
983 * skipping it).
984 *
985 * Will not modify the state of the memory block.
986 *
987 * Note: can fail after some subblocks were unplugged.
988 */
virtio_mem_mb_unplug_any_sb(struct virtio_mem * vm,unsigned long mb_id,uint64_t * nb_sb)989 static int virtio_mem_mb_unplug_any_sb(struct virtio_mem *vm,
990 unsigned long mb_id, uint64_t *nb_sb)
991 {
992 int sb_id, count;
993 int rc;
994
995 sb_id = vm->nb_sb_per_mb - 1;
996 while (*nb_sb) {
997 /* Find the next candidate subblock */
998 while (sb_id >= 0 &&
999 virtio_mem_mb_test_sb_unplugged(vm, mb_id, sb_id, 1))
1000 sb_id--;
1001 if (sb_id < 0)
1002 break;
1003 /* Try to unplug multiple subblocks at a time */
1004 count = 1;
1005 while (count < *nb_sb && sb_id > 0 &&
1006 virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id - 1, 1)) {
1007 count++;
1008 sb_id--;
1009 }
1010
1011 rc = virtio_mem_mb_unplug_sb(vm, mb_id, sb_id, count);
1012 if (rc)
1013 return rc;
1014 *nb_sb -= count;
1015 sb_id--;
1016 }
1017
1018 return 0;
1019 }
1020
1021 /*
1022 * Unplug all plugged subblocks of an offline or not-added memory block.
1023 *
1024 * Will not modify the state of the memory block.
1025 *
1026 * Note: can fail after some subblocks were unplugged.
1027 */
virtio_mem_mb_unplug(struct virtio_mem * vm,unsigned long mb_id)1028 static int virtio_mem_mb_unplug(struct virtio_mem *vm, unsigned long mb_id)
1029 {
1030 uint64_t nb_sb = vm->nb_sb_per_mb;
1031
1032 return virtio_mem_mb_unplug_any_sb(vm, mb_id, &nb_sb);
1033 }
1034
1035 /*
1036 * Prepare tracking data for the next memory block.
1037 */
virtio_mem_prepare_next_mb(struct virtio_mem * vm,unsigned long * mb_id)1038 static int virtio_mem_prepare_next_mb(struct virtio_mem *vm,
1039 unsigned long *mb_id)
1040 {
1041 int rc;
1042
1043 if (vm->next_mb_id > vm->last_usable_mb_id)
1044 return -ENOSPC;
1045
1046 /* Resize the state array if required. */
1047 rc = virtio_mem_mb_state_prepare_next_mb(vm);
1048 if (rc)
1049 return rc;
1050
1051 /* Resize the subblock bitmap if required. */
1052 rc = virtio_mem_sb_bitmap_prepare_next_mb(vm);
1053 if (rc)
1054 return rc;
1055
1056 vm->nb_mb_state[VIRTIO_MEM_MB_STATE_UNUSED]++;
1057 *mb_id = vm->next_mb_id++;
1058 return 0;
1059 }
1060
1061 /*
1062 * Don't add too many blocks that are not onlined yet to avoid running OOM.
1063 */
virtio_mem_too_many_mb_offline(struct virtio_mem * vm)1064 static bool virtio_mem_too_many_mb_offline(struct virtio_mem *vm)
1065 {
1066 unsigned long nb_offline;
1067
1068 nb_offline = vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE] +
1069 vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL];
1070 return nb_offline >= VIRTIO_MEM_NB_OFFLINE_THRESHOLD;
1071 }
1072
1073 /*
1074 * Try to plug the desired number of subblocks and add the memory block
1075 * to Linux.
1076 *
1077 * Will modify the state of the memory block.
1078 */
virtio_mem_mb_plug_and_add(struct virtio_mem * vm,unsigned long mb_id,uint64_t * nb_sb)1079 static int virtio_mem_mb_plug_and_add(struct virtio_mem *vm,
1080 unsigned long mb_id,
1081 uint64_t *nb_sb)
1082 {
1083 const int count = min_t(int, *nb_sb, vm->nb_sb_per_mb);
1084 int rc, rc2;
1085
1086 if (WARN_ON_ONCE(!count))
1087 return -EINVAL;
1088
1089 /*
1090 * Plug the requested number of subblocks before adding it to linux,
1091 * so that onlining will directly online all plugged subblocks.
1092 */
1093 rc = virtio_mem_mb_plug_sb(vm, mb_id, 0, count);
1094 if (rc)
1095 return rc;
1096
1097 /*
1098 * Mark the block properly offline before adding it to Linux,
1099 * so the memory notifiers will find the block in the right state.
1100 */
1101 if (count == vm->nb_sb_per_mb)
1102 virtio_mem_mb_set_state(vm, mb_id,
1103 VIRTIO_MEM_MB_STATE_OFFLINE);
1104 else
1105 virtio_mem_mb_set_state(vm, mb_id,
1106 VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL);
1107
1108 /* Add the memory block to linux - if that fails, try to unplug. */
1109 rc = virtio_mem_mb_add(vm, mb_id);
1110 if (rc) {
1111 enum virtio_mem_mb_state new_state = VIRTIO_MEM_MB_STATE_UNUSED;
1112
1113 dev_err(&vm->vdev->dev,
1114 "adding memory block %lu failed with %d\n", mb_id, rc);
1115 rc2 = virtio_mem_mb_unplug_sb(vm, mb_id, 0, count);
1116
1117 /*
1118 * TODO: Linux MM does not properly clean up yet in all cases
1119 * where adding of memory failed - especially on -ENOMEM.
1120 */
1121 if (rc2)
1122 new_state = VIRTIO_MEM_MB_STATE_PLUGGED;
1123 virtio_mem_mb_set_state(vm, mb_id, new_state);
1124 return rc;
1125 }
1126
1127 *nb_sb -= count;
1128 return 0;
1129 }
1130
1131 /*
1132 * Try to plug the desired number of subblocks of a memory block that
1133 * is already added to Linux.
1134 *
1135 * Will modify the state of the memory block.
1136 *
1137 * Note: Can fail after some subblocks were successfully plugged.
1138 */
virtio_mem_mb_plug_any_sb(struct virtio_mem * vm,unsigned long mb_id,uint64_t * nb_sb,bool online)1139 static int virtio_mem_mb_plug_any_sb(struct virtio_mem *vm, unsigned long mb_id,
1140 uint64_t *nb_sb, bool online)
1141 {
1142 unsigned long pfn, nr_pages;
1143 int sb_id, count;
1144 int rc;
1145
1146 if (WARN_ON_ONCE(!*nb_sb))
1147 return -EINVAL;
1148
1149 while (*nb_sb) {
1150 sb_id = virtio_mem_mb_first_unplugged_sb(vm, mb_id);
1151 if (sb_id >= vm->nb_sb_per_mb)
1152 break;
1153 count = 1;
1154 while (count < *nb_sb &&
1155 sb_id + count < vm->nb_sb_per_mb &&
1156 !virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id + count,
1157 1))
1158 count++;
1159
1160 rc = virtio_mem_mb_plug_sb(vm, mb_id, sb_id, count);
1161 if (rc)
1162 return rc;
1163 *nb_sb -= count;
1164 if (!online)
1165 continue;
1166
1167 /* fake-online the pages if the memory block is online */
1168 pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
1169 sb_id * vm->subblock_size);
1170 nr_pages = PFN_DOWN(count * vm->subblock_size);
1171 virtio_mem_fake_online(pfn, nr_pages);
1172 }
1173
1174 if (virtio_mem_mb_test_sb_plugged(vm, mb_id, 0, vm->nb_sb_per_mb)) {
1175 if (online)
1176 virtio_mem_mb_set_state(vm, mb_id,
1177 VIRTIO_MEM_MB_STATE_ONLINE);
1178 else
1179 virtio_mem_mb_set_state(vm, mb_id,
1180 VIRTIO_MEM_MB_STATE_OFFLINE);
1181 }
1182
1183 return 0;
1184 }
1185
1186 /*
1187 * Try to plug the requested amount of memory.
1188 */
virtio_mem_plug_request(struct virtio_mem * vm,uint64_t diff)1189 static int virtio_mem_plug_request(struct virtio_mem *vm, uint64_t diff)
1190 {
1191 uint64_t nb_sb = diff / vm->subblock_size;
1192 unsigned long mb_id;
1193 int rc;
1194
1195 if (!nb_sb)
1196 return 0;
1197
1198 /* Don't race with onlining/offlining */
1199 mutex_lock(&vm->hotplug_mutex);
1200
1201 /* Try to plug subblocks of partially plugged online blocks. */
1202 virtio_mem_for_each_mb_state(vm, mb_id,
1203 VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL) {
1204 rc = virtio_mem_mb_plug_any_sb(vm, mb_id, &nb_sb, true);
1205 if (rc || !nb_sb)
1206 goto out_unlock;
1207 cond_resched();
1208 }
1209
1210 /* Try to plug subblocks of partially plugged offline blocks. */
1211 virtio_mem_for_each_mb_state(vm, mb_id,
1212 VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL) {
1213 rc = virtio_mem_mb_plug_any_sb(vm, mb_id, &nb_sb, false);
1214 if (rc || !nb_sb)
1215 goto out_unlock;
1216 cond_resched();
1217 }
1218
1219 /*
1220 * We won't be working on online/offline memory blocks from this point,
1221 * so we can't race with memory onlining/offlining. Drop the mutex.
1222 */
1223 mutex_unlock(&vm->hotplug_mutex);
1224
1225 /* Try to plug and add unused blocks */
1226 virtio_mem_for_each_mb_state(vm, mb_id, VIRTIO_MEM_MB_STATE_UNUSED) {
1227 if (virtio_mem_too_many_mb_offline(vm))
1228 return -ENOSPC;
1229
1230 rc = virtio_mem_mb_plug_and_add(vm, mb_id, &nb_sb);
1231 if (rc || !nb_sb)
1232 return rc;
1233 cond_resched();
1234 }
1235
1236 /* Try to prepare, plug and add new blocks */
1237 while (nb_sb) {
1238 if (virtio_mem_too_many_mb_offline(vm))
1239 return -ENOSPC;
1240
1241 rc = virtio_mem_prepare_next_mb(vm, &mb_id);
1242 if (rc)
1243 return rc;
1244 rc = virtio_mem_mb_plug_and_add(vm, mb_id, &nb_sb);
1245 if (rc)
1246 return rc;
1247 cond_resched();
1248 }
1249
1250 return 0;
1251 out_unlock:
1252 mutex_unlock(&vm->hotplug_mutex);
1253 return rc;
1254 }
1255
1256 /*
1257 * Unplug the desired number of plugged subblocks of an offline memory block.
1258 * Will fail if any subblock cannot get unplugged (instead of skipping it).
1259 *
1260 * Will modify the state of the memory block. Might temporarily drop the
1261 * hotplug_mutex.
1262 *
1263 * Note: Can fail after some subblocks were successfully unplugged.
1264 */
virtio_mem_mb_unplug_any_sb_offline(struct virtio_mem * vm,unsigned long mb_id,uint64_t * nb_sb)1265 static int virtio_mem_mb_unplug_any_sb_offline(struct virtio_mem *vm,
1266 unsigned long mb_id,
1267 uint64_t *nb_sb)
1268 {
1269 int rc;
1270
1271 rc = virtio_mem_mb_unplug_any_sb(vm, mb_id, nb_sb);
1272
1273 /* some subblocks might have been unplugged even on failure */
1274 if (!virtio_mem_mb_test_sb_plugged(vm, mb_id, 0, vm->nb_sb_per_mb))
1275 virtio_mem_mb_set_state(vm, mb_id,
1276 VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL);
1277 if (rc)
1278 return rc;
1279
1280 if (virtio_mem_mb_test_sb_unplugged(vm, mb_id, 0, vm->nb_sb_per_mb)) {
1281 /*
1282 * Remove the block from Linux - this should never fail.
1283 * Hinder the block from getting onlined by marking it
1284 * unplugged. Temporarily drop the mutex, so
1285 * any pending GOING_ONLINE requests can be serviced/rejected.
1286 */
1287 virtio_mem_mb_set_state(vm, mb_id,
1288 VIRTIO_MEM_MB_STATE_UNUSED);
1289
1290 mutex_unlock(&vm->hotplug_mutex);
1291 rc = virtio_mem_mb_remove(vm, mb_id);
1292 BUG_ON(rc);
1293 mutex_lock(&vm->hotplug_mutex);
1294 }
1295 return 0;
1296 }
1297
1298 /*
1299 * Unplug the given plugged subblocks of an online memory block.
1300 *
1301 * Will modify the state of the memory block.
1302 */
virtio_mem_mb_unplug_sb_online(struct virtio_mem * vm,unsigned long mb_id,int sb_id,int count)1303 static int virtio_mem_mb_unplug_sb_online(struct virtio_mem *vm,
1304 unsigned long mb_id, int sb_id,
1305 int count)
1306 {
1307 const unsigned long nr_pages = PFN_DOWN(vm->subblock_size) * count;
1308 unsigned long start_pfn;
1309 int rc;
1310
1311 start_pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
1312 sb_id * vm->subblock_size);
1313 rc = alloc_contig_range(start_pfn, start_pfn + nr_pages,
1314 MIGRATE_MOVABLE, GFP_KERNEL);
1315 if (rc == -ENOMEM)
1316 /* whoops, out of memory */
1317 return rc;
1318 if (rc)
1319 return -EBUSY;
1320
1321 /* Mark it as fake-offline before unplugging it */
1322 virtio_mem_set_fake_offline(start_pfn, nr_pages, true);
1323 adjust_managed_page_count(pfn_to_page(start_pfn), -nr_pages);
1324
1325 /* Try to unplug the allocated memory */
1326 rc = virtio_mem_mb_unplug_sb(vm, mb_id, sb_id, count);
1327 if (rc) {
1328 /* Return the memory to the buddy. */
1329 virtio_mem_fake_online(start_pfn, nr_pages);
1330 return rc;
1331 }
1332
1333 virtio_mem_mb_set_state(vm, mb_id,
1334 VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL);
1335 return 0;
1336 }
1337
1338 /*
1339 * Unplug the desired number of plugged subblocks of an online memory block.
1340 * Will skip subblock that are busy.
1341 *
1342 * Will modify the state of the memory block. Might temporarily drop the
1343 * hotplug_mutex.
1344 *
1345 * Note: Can fail after some subblocks were successfully unplugged. Can
1346 * return 0 even if subblocks were busy and could not get unplugged.
1347 */
virtio_mem_mb_unplug_any_sb_online(struct virtio_mem * vm,unsigned long mb_id,uint64_t * nb_sb)1348 static int virtio_mem_mb_unplug_any_sb_online(struct virtio_mem *vm,
1349 unsigned long mb_id,
1350 uint64_t *nb_sb)
1351 {
1352 int rc, sb_id;
1353
1354 /* If possible, try to unplug the complete block in one shot. */
1355 if (*nb_sb >= vm->nb_sb_per_mb &&
1356 virtio_mem_mb_test_sb_plugged(vm, mb_id, 0, vm->nb_sb_per_mb)) {
1357 rc = virtio_mem_mb_unplug_sb_online(vm, mb_id, 0,
1358 vm->nb_sb_per_mb);
1359 if (!rc) {
1360 *nb_sb -= vm->nb_sb_per_mb;
1361 goto unplugged;
1362 } else if (rc != -EBUSY)
1363 return rc;
1364 }
1365
1366 /* Fallback to single subblocks. */
1367 for (sb_id = vm->nb_sb_per_mb - 1; sb_id >= 0 && *nb_sb; sb_id--) {
1368 /* Find the next candidate subblock */
1369 while (sb_id >= 0 &&
1370 !virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id, 1))
1371 sb_id--;
1372 if (sb_id < 0)
1373 break;
1374
1375 rc = virtio_mem_mb_unplug_sb_online(vm, mb_id, sb_id, 1);
1376 if (rc == -EBUSY)
1377 continue;
1378 else if (rc)
1379 return rc;
1380 *nb_sb -= 1;
1381 }
1382
1383 unplugged:
1384 /*
1385 * Once all subblocks of a memory block were unplugged, offline and
1386 * remove it. This will usually not fail, as no memory is in use
1387 * anymore - however some other notifiers might NACK the request.
1388 */
1389 if (virtio_mem_mb_test_sb_unplugged(vm, mb_id, 0, vm->nb_sb_per_mb)) {
1390 mutex_unlock(&vm->hotplug_mutex);
1391 rc = virtio_mem_mb_offline_and_remove(vm, mb_id);
1392 mutex_lock(&vm->hotplug_mutex);
1393 if (!rc)
1394 virtio_mem_mb_set_state(vm, mb_id,
1395 VIRTIO_MEM_MB_STATE_UNUSED);
1396 }
1397
1398 return 0;
1399 }
1400
1401 /*
1402 * Try to unplug the requested amount of memory.
1403 */
virtio_mem_unplug_request(struct virtio_mem * vm,uint64_t diff)1404 static int virtio_mem_unplug_request(struct virtio_mem *vm, uint64_t diff)
1405 {
1406 uint64_t nb_sb = diff / vm->subblock_size;
1407 unsigned long mb_id;
1408 int rc;
1409
1410 if (!nb_sb)
1411 return 0;
1412
1413 /*
1414 * We'll drop the mutex a couple of times when it is safe to do so.
1415 * This might result in some blocks switching the state (online/offline)
1416 * and we could miss them in this run - we will retry again later.
1417 */
1418 mutex_lock(&vm->hotplug_mutex);
1419
1420 /* Try to unplug subblocks of partially plugged offline blocks. */
1421 virtio_mem_for_each_mb_state_rev(vm, mb_id,
1422 VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL) {
1423 rc = virtio_mem_mb_unplug_any_sb_offline(vm, mb_id,
1424 &nb_sb);
1425 if (rc || !nb_sb)
1426 goto out_unlock;
1427 cond_resched();
1428 }
1429
1430 /* Try to unplug subblocks of plugged offline blocks. */
1431 virtio_mem_for_each_mb_state_rev(vm, mb_id,
1432 VIRTIO_MEM_MB_STATE_OFFLINE) {
1433 rc = virtio_mem_mb_unplug_any_sb_offline(vm, mb_id,
1434 &nb_sb);
1435 if (rc || !nb_sb)
1436 goto out_unlock;
1437 cond_resched();
1438 }
1439
1440 if (!unplug_online) {
1441 mutex_unlock(&vm->hotplug_mutex);
1442 return 0;
1443 }
1444
1445 /* Try to unplug subblocks of partially plugged online blocks. */
1446 virtio_mem_for_each_mb_state_rev(vm, mb_id,
1447 VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL) {
1448 rc = virtio_mem_mb_unplug_any_sb_online(vm, mb_id,
1449 &nb_sb);
1450 if (rc || !nb_sb)
1451 goto out_unlock;
1452 mutex_unlock(&vm->hotplug_mutex);
1453 cond_resched();
1454 mutex_lock(&vm->hotplug_mutex);
1455 }
1456
1457 /* Try to unplug subblocks of plugged online blocks. */
1458 virtio_mem_for_each_mb_state_rev(vm, mb_id,
1459 VIRTIO_MEM_MB_STATE_ONLINE) {
1460 rc = virtio_mem_mb_unplug_any_sb_online(vm, mb_id,
1461 &nb_sb);
1462 if (rc || !nb_sb)
1463 goto out_unlock;
1464 mutex_unlock(&vm->hotplug_mutex);
1465 cond_resched();
1466 mutex_lock(&vm->hotplug_mutex);
1467 }
1468
1469 mutex_unlock(&vm->hotplug_mutex);
1470 return nb_sb ? -EBUSY : 0;
1471 out_unlock:
1472 mutex_unlock(&vm->hotplug_mutex);
1473 return rc;
1474 }
1475
1476 /*
1477 * Try to unplug all blocks that couldn't be unplugged before, for example,
1478 * because the hypervisor was busy.
1479 */
virtio_mem_unplug_pending_mb(struct virtio_mem * vm)1480 static int virtio_mem_unplug_pending_mb(struct virtio_mem *vm)
1481 {
1482 unsigned long mb_id;
1483 int rc;
1484
1485 virtio_mem_for_each_mb_state(vm, mb_id, VIRTIO_MEM_MB_STATE_PLUGGED) {
1486 rc = virtio_mem_mb_unplug(vm, mb_id);
1487 if (rc)
1488 return rc;
1489 virtio_mem_mb_set_state(vm, mb_id, VIRTIO_MEM_MB_STATE_UNUSED);
1490 }
1491
1492 return 0;
1493 }
1494
1495 /*
1496 * Update all parts of the config that could have changed.
1497 */
virtio_mem_refresh_config(struct virtio_mem * vm)1498 static void virtio_mem_refresh_config(struct virtio_mem *vm)
1499 {
1500 const uint64_t phys_limit = 1UL << MAX_PHYSMEM_BITS;
1501 uint64_t new_plugged_size, usable_region_size, end_addr;
1502
1503 /* the plugged_size is just a reflection of what _we_ did previously */
1504 virtio_cread_le(vm->vdev, struct virtio_mem_config, plugged_size,
1505 &new_plugged_size);
1506 if (WARN_ON_ONCE(new_plugged_size != vm->plugged_size))
1507 vm->plugged_size = new_plugged_size;
1508
1509 /* calculate the last usable memory block id */
1510 virtio_cread_le(vm->vdev, struct virtio_mem_config,
1511 usable_region_size, &usable_region_size);
1512 end_addr = vm->addr + usable_region_size;
1513 end_addr = min(end_addr, phys_limit);
1514 vm->last_usable_mb_id = virtio_mem_phys_to_mb_id(end_addr) - 1;
1515
1516 /* see if there is a request to change the size */
1517 virtio_cread_le(vm->vdev, struct virtio_mem_config, requested_size,
1518 &vm->requested_size);
1519
1520 dev_info(&vm->vdev->dev, "plugged size: 0x%llx", vm->plugged_size);
1521 dev_info(&vm->vdev->dev, "requested size: 0x%llx", vm->requested_size);
1522 }
1523
1524 /*
1525 * Workqueue function for handling plug/unplug requests and config updates.
1526 */
virtio_mem_run_wq(struct work_struct * work)1527 static void virtio_mem_run_wq(struct work_struct *work)
1528 {
1529 struct virtio_mem *vm = container_of(work, struct virtio_mem, wq);
1530 uint64_t diff;
1531 int rc;
1532
1533 hrtimer_cancel(&vm->retry_timer);
1534
1535 if (vm->broken)
1536 return;
1537
1538 retry:
1539 rc = 0;
1540
1541 /* Make sure we start with a clean state if there are leftovers. */
1542 if (unlikely(vm->unplug_all_required))
1543 rc = virtio_mem_send_unplug_all_request(vm);
1544
1545 if (atomic_read(&vm->config_changed)) {
1546 atomic_set(&vm->config_changed, 0);
1547 virtio_mem_refresh_config(vm);
1548 }
1549
1550 /* Unplug any leftovers from previous runs */
1551 if (!rc)
1552 rc = virtio_mem_unplug_pending_mb(vm);
1553
1554 if (!rc && vm->requested_size != vm->plugged_size) {
1555 if (vm->requested_size > vm->plugged_size) {
1556 diff = vm->requested_size - vm->plugged_size;
1557 rc = virtio_mem_plug_request(vm, diff);
1558 } else {
1559 diff = vm->plugged_size - vm->requested_size;
1560 rc = virtio_mem_unplug_request(vm, diff);
1561 }
1562 }
1563
1564 switch (rc) {
1565 case 0:
1566 vm->retry_timer_ms = VIRTIO_MEM_RETRY_TIMER_MIN_MS;
1567 break;
1568 case -ENOSPC:
1569 /*
1570 * We cannot add any more memory (alignment, physical limit)
1571 * or we have too many offline memory blocks.
1572 */
1573 break;
1574 case -ETXTBSY:
1575 /*
1576 * The hypervisor cannot process our request right now
1577 * (e.g., out of memory, migrating);
1578 */
1579 case -EBUSY:
1580 /*
1581 * We cannot free up any memory to unplug it (all plugged memory
1582 * is busy).
1583 */
1584 case -ENOMEM:
1585 /* Out of memory, try again later. */
1586 hrtimer_start(&vm->retry_timer, ms_to_ktime(vm->retry_timer_ms),
1587 HRTIMER_MODE_REL);
1588 break;
1589 case -EAGAIN:
1590 /* Retry immediately (e.g., the config changed). */
1591 goto retry;
1592 default:
1593 /* Unknown error, mark as broken */
1594 dev_err(&vm->vdev->dev,
1595 "unknown error, marking device broken: %d\n", rc);
1596 vm->broken = true;
1597 }
1598 }
1599
virtio_mem_timer_expired(struct hrtimer * timer)1600 static enum hrtimer_restart virtio_mem_timer_expired(struct hrtimer *timer)
1601 {
1602 struct virtio_mem *vm = container_of(timer, struct virtio_mem,
1603 retry_timer);
1604
1605 virtio_mem_retry(vm);
1606 vm->retry_timer_ms = min_t(unsigned int, vm->retry_timer_ms * 2,
1607 VIRTIO_MEM_RETRY_TIMER_MAX_MS);
1608 return HRTIMER_NORESTART;
1609 }
1610
virtio_mem_handle_response(struct virtqueue * vq)1611 static void virtio_mem_handle_response(struct virtqueue *vq)
1612 {
1613 struct virtio_mem *vm = vq->vdev->priv;
1614
1615 wake_up(&vm->host_resp);
1616 }
1617
virtio_mem_init_vq(struct virtio_mem * vm)1618 static int virtio_mem_init_vq(struct virtio_mem *vm)
1619 {
1620 struct virtqueue *vq;
1621
1622 vq = virtio_find_single_vq(vm->vdev, virtio_mem_handle_response,
1623 "guest-request");
1624 if (IS_ERR(vq))
1625 return PTR_ERR(vq);
1626 vm->vq = vq;
1627
1628 return 0;
1629 }
1630
virtio_mem_init(struct virtio_mem * vm)1631 static int virtio_mem_init(struct virtio_mem *vm)
1632 {
1633 const uint64_t phys_limit = 1UL << MAX_PHYSMEM_BITS;
1634 uint16_t node_id;
1635
1636 if (!vm->vdev->config->get) {
1637 dev_err(&vm->vdev->dev, "config access disabled\n");
1638 return -EINVAL;
1639 }
1640
1641 /*
1642 * We don't want to (un)plug or reuse any memory when in kdump. The
1643 * memory is still accessible (but not mapped).
1644 */
1645 if (is_kdump_kernel()) {
1646 dev_warn(&vm->vdev->dev, "disabled in kdump kernel\n");
1647 return -EBUSY;
1648 }
1649
1650 /* Fetch all properties that can't change. */
1651 virtio_cread_le(vm->vdev, struct virtio_mem_config, plugged_size,
1652 &vm->plugged_size);
1653 virtio_cread_le(vm->vdev, struct virtio_mem_config, block_size,
1654 &vm->device_block_size);
1655 virtio_cread_le(vm->vdev, struct virtio_mem_config, node_id,
1656 &node_id);
1657 vm->nid = virtio_mem_translate_node_id(vm, node_id);
1658 virtio_cread_le(vm->vdev, struct virtio_mem_config, addr, &vm->addr);
1659 virtio_cread_le(vm->vdev, struct virtio_mem_config, region_size,
1660 &vm->region_size);
1661
1662 /*
1663 * We always hotplug memory in memory block granularity. This way,
1664 * we have to wait for exactly one memory block to online.
1665 */
1666 if (vm->device_block_size > memory_block_size_bytes()) {
1667 dev_err(&vm->vdev->dev,
1668 "The block size is not supported (too big).\n");
1669 return -EINVAL;
1670 }
1671
1672 /* bad device setup - warn only */
1673 if (!IS_ALIGNED(vm->addr, memory_block_size_bytes()))
1674 dev_warn(&vm->vdev->dev,
1675 "The alignment of the physical start address can make some memory unusable.\n");
1676 if (!IS_ALIGNED(vm->addr + vm->region_size, memory_block_size_bytes()))
1677 dev_warn(&vm->vdev->dev,
1678 "The alignment of the physical end address can make some memory unusable.\n");
1679 if (vm->addr + vm->region_size > phys_limit)
1680 dev_warn(&vm->vdev->dev,
1681 "Some memory is not addressable. This can make some memory unusable.\n");
1682
1683 /*
1684 * Calculate the subblock size:
1685 * - At least MAX_ORDER - 1 / pageblock_order.
1686 * - At least the device block size.
1687 * In the worst case, a single subblock per memory block.
1688 */
1689 vm->subblock_size = PAGE_SIZE * 1ul << max_t(uint32_t, MAX_ORDER - 1,
1690 pageblock_order);
1691 vm->subblock_size = max_t(uint64_t, vm->device_block_size,
1692 vm->subblock_size);
1693 vm->nb_sb_per_mb = memory_block_size_bytes() / vm->subblock_size;
1694
1695 /* Round up to the next full memory block */
1696 vm->first_mb_id = virtio_mem_phys_to_mb_id(vm->addr - 1 +
1697 memory_block_size_bytes());
1698 vm->next_mb_id = vm->first_mb_id;
1699 vm->last_mb_id = virtio_mem_phys_to_mb_id(vm->addr +
1700 vm->region_size) - 1;
1701
1702 dev_info(&vm->vdev->dev, "start address: 0x%llx", vm->addr);
1703 dev_info(&vm->vdev->dev, "region size: 0x%llx", vm->region_size);
1704 dev_info(&vm->vdev->dev, "device block size: 0x%llx",
1705 (unsigned long long)vm->device_block_size);
1706 dev_info(&vm->vdev->dev, "memory block size: 0x%lx",
1707 memory_block_size_bytes());
1708 dev_info(&vm->vdev->dev, "subblock size: 0x%llx",
1709 (unsigned long long)vm->subblock_size);
1710 if (vm->nid != NUMA_NO_NODE)
1711 dev_info(&vm->vdev->dev, "nid: %d", vm->nid);
1712
1713 return 0;
1714 }
1715
virtio_mem_create_resource(struct virtio_mem * vm)1716 static int virtio_mem_create_resource(struct virtio_mem *vm)
1717 {
1718 /*
1719 * When force-unloading the driver and removing the device, we
1720 * could have a garbage pointer. Duplicate the string.
1721 */
1722 const char *name = kstrdup(dev_name(&vm->vdev->dev), GFP_KERNEL);
1723
1724 if (!name)
1725 return -ENOMEM;
1726
1727 vm->parent_resource = __request_mem_region(vm->addr, vm->region_size,
1728 name, IORESOURCE_SYSTEM_RAM);
1729 if (!vm->parent_resource) {
1730 kfree(name);
1731 dev_warn(&vm->vdev->dev, "could not reserve device region\n");
1732 dev_info(&vm->vdev->dev,
1733 "reloading the driver is not supported\n");
1734 return -EBUSY;
1735 }
1736
1737 /* The memory is not actually busy - make add_memory() work. */
1738 vm->parent_resource->flags &= ~IORESOURCE_BUSY;
1739 return 0;
1740 }
1741
virtio_mem_delete_resource(struct virtio_mem * vm)1742 static void virtio_mem_delete_resource(struct virtio_mem *vm)
1743 {
1744 const char *name;
1745
1746 if (!vm->parent_resource)
1747 return;
1748
1749 name = vm->parent_resource->name;
1750 release_resource(vm->parent_resource);
1751 kfree(vm->parent_resource);
1752 kfree(name);
1753 vm->parent_resource = NULL;
1754 }
1755
virtio_mem_probe(struct virtio_device * vdev)1756 static int virtio_mem_probe(struct virtio_device *vdev)
1757 {
1758 struct virtio_mem *vm;
1759 int rc;
1760
1761 BUILD_BUG_ON(sizeof(struct virtio_mem_req) != 24);
1762 BUILD_BUG_ON(sizeof(struct virtio_mem_resp) != 10);
1763
1764 vdev->priv = vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1765 if (!vm)
1766 return -ENOMEM;
1767
1768 init_waitqueue_head(&vm->host_resp);
1769 vm->vdev = vdev;
1770 INIT_WORK(&vm->wq, virtio_mem_run_wq);
1771 mutex_init(&vm->hotplug_mutex);
1772 INIT_LIST_HEAD(&vm->next);
1773 spin_lock_init(&vm->removal_lock);
1774 hrtimer_init(&vm->retry_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1775 vm->retry_timer.function = virtio_mem_timer_expired;
1776 vm->retry_timer_ms = VIRTIO_MEM_RETRY_TIMER_MIN_MS;
1777
1778 /* register the virtqueue */
1779 rc = virtio_mem_init_vq(vm);
1780 if (rc)
1781 goto out_free_vm;
1782
1783 /* initialize the device by querying the config */
1784 rc = virtio_mem_init(vm);
1785 if (rc)
1786 goto out_del_vq;
1787
1788 /* create the parent resource for all memory */
1789 rc = virtio_mem_create_resource(vm);
1790 if (rc)
1791 goto out_del_vq;
1792
1793 /*
1794 * If we still have memory plugged, we have to unplug all memory first.
1795 * Registering our parent resource makes sure that this memory isn't
1796 * actually in use (e.g., trying to reload the driver).
1797 */
1798 if (vm->plugged_size) {
1799 vm->unplug_all_required = 1;
1800 dev_info(&vm->vdev->dev, "unplugging all memory is required\n");
1801 }
1802
1803 /* register callbacks */
1804 vm->memory_notifier.notifier_call = virtio_mem_memory_notifier_cb;
1805 rc = register_memory_notifier(&vm->memory_notifier);
1806 if (rc)
1807 goto out_del_resource;
1808 rc = register_virtio_mem_device(vm);
1809 if (rc)
1810 goto out_unreg_mem;
1811
1812 virtio_device_ready(vdev);
1813
1814 /* trigger a config update to start processing the requested_size */
1815 atomic_set(&vm->config_changed, 1);
1816 queue_work(system_freezable_wq, &vm->wq);
1817
1818 return 0;
1819 out_unreg_mem:
1820 unregister_memory_notifier(&vm->memory_notifier);
1821 out_del_resource:
1822 virtio_mem_delete_resource(vm);
1823 out_del_vq:
1824 vdev->config->del_vqs(vdev);
1825 out_free_vm:
1826 kfree(vm);
1827 vdev->priv = NULL;
1828
1829 return rc;
1830 }
1831
virtio_mem_remove(struct virtio_device * vdev)1832 static void virtio_mem_remove(struct virtio_device *vdev)
1833 {
1834 struct virtio_mem *vm = vdev->priv;
1835 unsigned long mb_id;
1836 int rc;
1837
1838 /*
1839 * Make sure the workqueue won't be triggered anymore and no memory
1840 * blocks can be onlined/offlined until we're finished here.
1841 */
1842 mutex_lock(&vm->hotplug_mutex);
1843 spin_lock_irq(&vm->removal_lock);
1844 vm->removing = true;
1845 spin_unlock_irq(&vm->removal_lock);
1846 mutex_unlock(&vm->hotplug_mutex);
1847
1848 /* wait until the workqueue stopped */
1849 cancel_work_sync(&vm->wq);
1850 hrtimer_cancel(&vm->retry_timer);
1851
1852 /*
1853 * After we unregistered our callbacks, user space can online partially
1854 * plugged offline blocks. Make sure to remove them.
1855 */
1856 virtio_mem_for_each_mb_state(vm, mb_id,
1857 VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL) {
1858 rc = virtio_mem_mb_remove(vm, mb_id);
1859 BUG_ON(rc);
1860 virtio_mem_mb_set_state(vm, mb_id, VIRTIO_MEM_MB_STATE_UNUSED);
1861 }
1862 /*
1863 * After we unregistered our callbacks, user space can no longer
1864 * offline partially plugged online memory blocks. No need to worry
1865 * about them.
1866 */
1867
1868 /* unregister callbacks */
1869 unregister_virtio_mem_device(vm);
1870 unregister_memory_notifier(&vm->memory_notifier);
1871
1872 /*
1873 * There is no way we could reliably remove all memory we have added to
1874 * the system. And there is no way to stop the driver/device from going
1875 * away. Warn at least.
1876 */
1877 if (vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE] ||
1878 vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL] ||
1879 vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE] ||
1880 vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL]) {
1881 dev_warn(&vdev->dev, "device still has system memory added\n");
1882 } else {
1883 virtio_mem_delete_resource(vm);
1884 kfree_const(vm->resource_name);
1885 }
1886
1887 /* remove all tracking data - no locking needed */
1888 vfree(vm->mb_state);
1889 vfree(vm->sb_bitmap);
1890
1891 /* reset the device and cleanup the queues */
1892 vdev->config->reset(vdev);
1893 vdev->config->del_vqs(vdev);
1894
1895 kfree(vm);
1896 vdev->priv = NULL;
1897 }
1898
virtio_mem_config_changed(struct virtio_device * vdev)1899 static void virtio_mem_config_changed(struct virtio_device *vdev)
1900 {
1901 struct virtio_mem *vm = vdev->priv;
1902
1903 atomic_set(&vm->config_changed, 1);
1904 virtio_mem_retry(vm);
1905 }
1906
1907 #ifdef CONFIG_PM_SLEEP
virtio_mem_freeze(struct virtio_device * vdev)1908 static int virtio_mem_freeze(struct virtio_device *vdev)
1909 {
1910 /*
1911 * When restarting the VM, all memory is usually unplugged. Don't
1912 * allow to suspend/hibernate.
1913 */
1914 dev_err(&vdev->dev, "save/restore not supported.\n");
1915 return -EPERM;
1916 }
1917
virtio_mem_restore(struct virtio_device * vdev)1918 static int virtio_mem_restore(struct virtio_device *vdev)
1919 {
1920 return -EPERM;
1921 }
1922 #endif
1923
1924 static unsigned int virtio_mem_features[] = {
1925 #if defined(CONFIG_NUMA) && defined(CONFIG_ACPI_NUMA)
1926 VIRTIO_MEM_F_ACPI_PXM,
1927 #endif
1928 };
1929
1930 static const struct virtio_device_id virtio_mem_id_table[] = {
1931 { VIRTIO_ID_MEM, VIRTIO_DEV_ANY_ID },
1932 { 0 },
1933 };
1934
1935 static struct virtio_driver virtio_mem_driver = {
1936 .feature_table = virtio_mem_features,
1937 .feature_table_size = ARRAY_SIZE(virtio_mem_features),
1938 .driver.name = KBUILD_MODNAME,
1939 .driver.owner = THIS_MODULE,
1940 .id_table = virtio_mem_id_table,
1941 .probe = virtio_mem_probe,
1942 .remove = virtio_mem_remove,
1943 .config_changed = virtio_mem_config_changed,
1944 #ifdef CONFIG_PM_SLEEP
1945 .freeze = virtio_mem_freeze,
1946 .restore = virtio_mem_restore,
1947 #endif
1948 };
1949
1950 module_virtio_driver(virtio_mem_driver);
1951 MODULE_DEVICE_TABLE(virtio, virtio_mem_id_table);
1952 MODULE_AUTHOR("David Hildenbrand <david@redhat.com>");
1953 MODULE_DESCRIPTION("Virtio-mem driver");
1954 MODULE_LICENSE("GPL");
1955