• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/backing-dev.h>
3 #include <linux/falloc.h>
4 #include <linux/kvm_host.h>
5 #include <linux/pagemap.h>
6 #include <linux/anon_inodes.h>
7 
8 #include "kvm_mm.h"
9 
10 struct kvm_gmem {
11 	struct kvm *kvm;
12 	struct xarray bindings;
13 	struct list_head entry;
14 };
15 
16 /**
17  * folio_file_pfn - like folio_file_page, but return a pfn.
18  * @folio: The folio which contains this index.
19  * @index: The index we want to look up.
20  *
21  * Return: The pfn for this index.
22  */
folio_file_pfn(struct folio * folio,pgoff_t index)23 static inline kvm_pfn_t folio_file_pfn(struct folio *folio, pgoff_t index)
24 {
25 	return folio_pfn(folio) + (index & (folio_nr_pages(folio) - 1));
26 }
27 
__kvm_gmem_prepare_folio(struct kvm * kvm,struct kvm_memory_slot * slot,pgoff_t index,struct folio * folio)28 static int __kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slot,
29 				    pgoff_t index, struct folio *folio)
30 {
31 #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_PREPARE
32 	kvm_pfn_t pfn = folio_file_pfn(folio, index);
33 	gfn_t gfn = slot->base_gfn + index - slot->gmem.pgoff;
34 	int rc = kvm_arch_gmem_prepare(kvm, gfn, pfn, folio_order(folio));
35 	if (rc) {
36 		pr_warn_ratelimited("gmem: Failed to prepare folio for index %lx GFN %llx PFN %llx error %d.\n",
37 				    index, gfn, pfn, rc);
38 		return rc;
39 	}
40 #endif
41 
42 	return 0;
43 }
44 
kvm_gmem_mark_prepared(struct folio * folio)45 static inline void kvm_gmem_mark_prepared(struct folio *folio)
46 {
47 	folio_mark_uptodate(folio);
48 }
49 
50 /*
51  * Process @folio, which contains @gfn, so that the guest can use it.
52  * The folio must be locked and the gfn must be contained in @slot.
53  * On successful return the guest sees a zero page so as to avoid
54  * leaking host data and the up-to-date flag is set.
55  */
kvm_gmem_prepare_folio(struct kvm * kvm,struct kvm_memory_slot * slot,gfn_t gfn,struct folio * folio)56 static int kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slot,
57 				  gfn_t gfn, struct folio *folio)
58 {
59 	unsigned long nr_pages, i;
60 	pgoff_t index;
61 	int r;
62 
63 	nr_pages = folio_nr_pages(folio);
64 	for (i = 0; i < nr_pages; i++)
65 		clear_highpage(folio_page(folio, i));
66 
67 	/*
68 	 * Preparing huge folios should always be safe, since it should
69 	 * be possible to split them later if needed.
70 	 *
71 	 * Right now the folio order is always going to be zero, but the
72 	 * code is ready for huge folios.  The only assumption is that
73 	 * the base pgoff of memslots is naturally aligned with the
74 	 * requested page order, ensuring that huge folios can also use
75 	 * huge page table entries for GPA->HPA mapping.
76 	 *
77 	 * The order will be passed when creating the guest_memfd, and
78 	 * checked when creating memslots.
79 	 */
80 	WARN_ON(!IS_ALIGNED(slot->gmem.pgoff, 1 << folio_order(folio)));
81 	index = gfn - slot->base_gfn + slot->gmem.pgoff;
82 	index = ALIGN_DOWN(index, 1 << folio_order(folio));
83 	r = __kvm_gmem_prepare_folio(kvm, slot, index, folio);
84 	if (!r)
85 		kvm_gmem_mark_prepared(folio);
86 
87 	return r;
88 }
89 
90 /*
91  * Returns a locked folio on success.  The caller is responsible for
92  * setting the up-to-date flag before the memory is mapped into the guest.
93  * There is no backing storage for the memory, so the folio will remain
94  * up-to-date until it's removed.
95  *
96  * Ignore accessed, referenced, and dirty flags.  The memory is
97  * unevictable and there is no storage to write back to.
98  */
kvm_gmem_get_folio(struct inode * inode,pgoff_t index)99 static struct folio *kvm_gmem_get_folio(struct inode *inode, pgoff_t index)
100 {
101 	/* TODO: Support huge pages. */
102 	return filemap_grab_folio(inode->i_mapping, index);
103 }
104 
kvm_gmem_invalidate_begin(struct kvm_gmem * gmem,pgoff_t start,pgoff_t end)105 static void kvm_gmem_invalidate_begin(struct kvm_gmem *gmem, pgoff_t start,
106 				      pgoff_t end)
107 {
108 	bool flush = false, found_memslot = false;
109 	struct kvm_memory_slot *slot;
110 	struct kvm *kvm = gmem->kvm;
111 	unsigned long index;
112 
113 	xa_for_each_range(&gmem->bindings, index, slot, start, end - 1) {
114 		pgoff_t pgoff = slot->gmem.pgoff;
115 
116 		struct kvm_gfn_range gfn_range = {
117 			.start = slot->base_gfn + max(pgoff, start) - pgoff,
118 			.end = slot->base_gfn + min(pgoff + slot->npages, end) - pgoff,
119 			.slot = slot,
120 			.may_block = true,
121 			/* guest memfd is relevant to only private mappings. */
122 			.attr_filter = KVM_FILTER_PRIVATE,
123 		};
124 
125 		if (!found_memslot) {
126 			found_memslot = true;
127 
128 			KVM_MMU_LOCK(kvm);
129 			kvm_mmu_invalidate_begin(kvm);
130 		}
131 
132 		flush |= kvm_mmu_unmap_gfn_range(kvm, &gfn_range);
133 	}
134 
135 	if (flush)
136 		kvm_flush_remote_tlbs(kvm);
137 
138 	if (found_memslot)
139 		KVM_MMU_UNLOCK(kvm);
140 }
141 
kvm_gmem_invalidate_end(struct kvm_gmem * gmem,pgoff_t start,pgoff_t end)142 static void kvm_gmem_invalidate_end(struct kvm_gmem *gmem, pgoff_t start,
143 				    pgoff_t end)
144 {
145 	struct kvm *kvm = gmem->kvm;
146 
147 	if (xa_find(&gmem->bindings, &start, end - 1, XA_PRESENT)) {
148 		KVM_MMU_LOCK(kvm);
149 		kvm_mmu_invalidate_end(kvm);
150 		KVM_MMU_UNLOCK(kvm);
151 	}
152 }
153 
kvm_gmem_punch_hole(struct inode * inode,loff_t offset,loff_t len)154 static long kvm_gmem_punch_hole(struct inode *inode, loff_t offset, loff_t len)
155 {
156 	struct list_head *gmem_list = &inode->i_mapping->i_private_list;
157 	pgoff_t start = offset >> PAGE_SHIFT;
158 	pgoff_t end = (offset + len) >> PAGE_SHIFT;
159 	struct kvm_gmem *gmem;
160 
161 	/*
162 	 * Bindings must be stable across invalidation to ensure the start+end
163 	 * are balanced.
164 	 */
165 	filemap_invalidate_lock(inode->i_mapping);
166 
167 	list_for_each_entry(gmem, gmem_list, entry)
168 		kvm_gmem_invalidate_begin(gmem, start, end);
169 
170 	truncate_inode_pages_range(inode->i_mapping, offset, offset + len - 1);
171 
172 	list_for_each_entry(gmem, gmem_list, entry)
173 		kvm_gmem_invalidate_end(gmem, start, end);
174 
175 	filemap_invalidate_unlock(inode->i_mapping);
176 
177 	return 0;
178 }
179 
kvm_gmem_allocate(struct inode * inode,loff_t offset,loff_t len)180 static long kvm_gmem_allocate(struct inode *inode, loff_t offset, loff_t len)
181 {
182 	struct address_space *mapping = inode->i_mapping;
183 	pgoff_t start, index, end;
184 	int r;
185 
186 	/* Dedicated guest is immutable by default. */
187 	if (offset + len > i_size_read(inode))
188 		return -EINVAL;
189 
190 	filemap_invalidate_lock_shared(mapping);
191 
192 	start = offset >> PAGE_SHIFT;
193 	end = (offset + len) >> PAGE_SHIFT;
194 
195 	r = 0;
196 	for (index = start; index < end; ) {
197 		struct folio *folio;
198 
199 		if (signal_pending(current)) {
200 			r = -EINTR;
201 			break;
202 		}
203 
204 		folio = kvm_gmem_get_folio(inode, index);
205 		if (IS_ERR(folio)) {
206 			r = PTR_ERR(folio);
207 			break;
208 		}
209 
210 		index = folio_next_index(folio);
211 
212 		folio_unlock(folio);
213 		folio_put(folio);
214 
215 		/* 64-bit only, wrapping the index should be impossible. */
216 		if (WARN_ON_ONCE(!index))
217 			break;
218 
219 		cond_resched();
220 	}
221 
222 	filemap_invalidate_unlock_shared(mapping);
223 
224 	return r;
225 }
226 
kvm_gmem_fallocate(struct file * file,int mode,loff_t offset,loff_t len)227 static long kvm_gmem_fallocate(struct file *file, int mode, loff_t offset,
228 			       loff_t len)
229 {
230 	int ret;
231 
232 	if (!(mode & FALLOC_FL_KEEP_SIZE))
233 		return -EOPNOTSUPP;
234 
235 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
236 		return -EOPNOTSUPP;
237 
238 	if (!PAGE_ALIGNED(offset) || !PAGE_ALIGNED(len))
239 		return -EINVAL;
240 
241 	if (mode & FALLOC_FL_PUNCH_HOLE)
242 		ret = kvm_gmem_punch_hole(file_inode(file), offset, len);
243 	else
244 		ret = kvm_gmem_allocate(file_inode(file), offset, len);
245 
246 	if (!ret)
247 		file_modified(file);
248 	return ret;
249 }
250 
kvm_gmem_release(struct inode * inode,struct file * file)251 static int kvm_gmem_release(struct inode *inode, struct file *file)
252 {
253 	struct kvm_gmem *gmem = file->private_data;
254 	struct kvm_memory_slot *slot;
255 	struct kvm *kvm = gmem->kvm;
256 	unsigned long index;
257 
258 	/*
259 	 * Prevent concurrent attempts to *unbind* a memslot.  This is the last
260 	 * reference to the file and thus no new bindings can be created, but
261 	 * dereferencing the slot for existing bindings needs to be protected
262 	 * against memslot updates, specifically so that unbind doesn't race
263 	 * and free the memslot (kvm_gmem_get_file() will return NULL).
264 	 */
265 	mutex_lock(&kvm->slots_lock);
266 
267 	filemap_invalidate_lock(inode->i_mapping);
268 
269 	xa_for_each(&gmem->bindings, index, slot)
270 		rcu_assign_pointer(slot->gmem.file, NULL);
271 
272 	synchronize_rcu();
273 
274 	/*
275 	 * All in-flight operations are gone and new bindings can be created.
276 	 * Zap all SPTEs pointed at by this file.  Do not free the backing
277 	 * memory, as its lifetime is associated with the inode, not the file.
278 	 */
279 	kvm_gmem_invalidate_begin(gmem, 0, -1ul);
280 	kvm_gmem_invalidate_end(gmem, 0, -1ul);
281 
282 	list_del(&gmem->entry);
283 
284 	filemap_invalidate_unlock(inode->i_mapping);
285 
286 	mutex_unlock(&kvm->slots_lock);
287 
288 	xa_destroy(&gmem->bindings);
289 	kfree(gmem);
290 
291 	kvm_put_kvm(kvm);
292 
293 	return 0;
294 }
295 
kvm_gmem_get_file(struct kvm_memory_slot * slot)296 static inline struct file *kvm_gmem_get_file(struct kvm_memory_slot *slot)
297 {
298 	/*
299 	 * Do not return slot->gmem.file if it has already been closed;
300 	 * there might be some time between the last fput() and when
301 	 * kvm_gmem_release() clears slot->gmem.file, and you do not
302 	 * want to spin in the meanwhile.
303 	 */
304 	return get_file_active(&slot->gmem.file);
305 }
306 
307 static struct file_operations kvm_gmem_fops = {
308 	.open		= generic_file_open,
309 	.release	= kvm_gmem_release,
310 	.fallocate	= kvm_gmem_fallocate,
311 };
312 
kvm_gmem_init(struct module * module)313 void kvm_gmem_init(struct module *module)
314 {
315 	kvm_gmem_fops.owner = module;
316 }
317 
kvm_gmem_migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)318 static int kvm_gmem_migrate_folio(struct address_space *mapping,
319 				  struct folio *dst, struct folio *src,
320 				  enum migrate_mode mode)
321 {
322 	WARN_ON_ONCE(1);
323 	return -EINVAL;
324 }
325 
kvm_gmem_error_folio(struct address_space * mapping,struct folio * folio)326 static int kvm_gmem_error_folio(struct address_space *mapping, struct folio *folio)
327 {
328 	struct list_head *gmem_list = &mapping->i_private_list;
329 	struct kvm_gmem *gmem;
330 	pgoff_t start, end;
331 
332 	filemap_invalidate_lock_shared(mapping);
333 
334 	start = folio->index;
335 	end = start + folio_nr_pages(folio);
336 
337 	list_for_each_entry(gmem, gmem_list, entry)
338 		kvm_gmem_invalidate_begin(gmem, start, end);
339 
340 	/*
341 	 * Do not truncate the range, what action is taken in response to the
342 	 * error is userspace's decision (assuming the architecture supports
343 	 * gracefully handling memory errors).  If/when the guest attempts to
344 	 * access a poisoned page, kvm_gmem_get_pfn() will return -EHWPOISON,
345 	 * at which point KVM can either terminate the VM or propagate the
346 	 * error to userspace.
347 	 */
348 
349 	list_for_each_entry(gmem, gmem_list, entry)
350 		kvm_gmem_invalidate_end(gmem, start, end);
351 
352 	filemap_invalidate_unlock_shared(mapping);
353 
354 	return MF_DELAYED;
355 }
356 
357 #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE
kvm_gmem_free_folio(struct folio * folio)358 static void kvm_gmem_free_folio(struct folio *folio)
359 {
360 	struct page *page = folio_page(folio, 0);
361 	kvm_pfn_t pfn = page_to_pfn(page);
362 	int order = folio_order(folio);
363 
364 	kvm_arch_gmem_invalidate(pfn, pfn + (1ul << order));
365 }
366 #endif
367 
368 static const struct address_space_operations kvm_gmem_aops = {
369 	.dirty_folio = noop_dirty_folio,
370 	.migrate_folio	= kvm_gmem_migrate_folio,
371 	.error_remove_folio = kvm_gmem_error_folio,
372 #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE
373 	.free_folio = kvm_gmem_free_folio,
374 #endif
375 };
376 
kvm_gmem_getattr(struct mnt_idmap * idmap,const struct path * path,struct kstat * stat,u32 request_mask,unsigned int query_flags)377 static int kvm_gmem_getattr(struct mnt_idmap *idmap, const struct path *path,
378 			    struct kstat *stat, u32 request_mask,
379 			    unsigned int query_flags)
380 {
381 	struct inode *inode = path->dentry->d_inode;
382 
383 	generic_fillattr(idmap, request_mask, inode, stat);
384 	return 0;
385 }
386 
kvm_gmem_setattr(struct mnt_idmap * idmap,struct dentry * dentry,struct iattr * attr)387 static int kvm_gmem_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
388 			    struct iattr *attr)
389 {
390 	return -EINVAL;
391 }
392 static const struct inode_operations kvm_gmem_iops = {
393 	.getattr	= kvm_gmem_getattr,
394 	.setattr	= kvm_gmem_setattr,
395 };
396 
__kvm_gmem_create(struct kvm * kvm,loff_t size,u64 flags)397 static int __kvm_gmem_create(struct kvm *kvm, loff_t size, u64 flags)
398 {
399 	const char *anon_name = "[kvm-gmem]";
400 	struct kvm_gmem *gmem;
401 	struct inode *inode;
402 	struct file *file;
403 	int fd, err;
404 
405 	fd = get_unused_fd_flags(0);
406 	if (fd < 0)
407 		return fd;
408 
409 	gmem = kzalloc(sizeof(*gmem), GFP_KERNEL);
410 	if (!gmem) {
411 		err = -ENOMEM;
412 		goto err_fd;
413 	}
414 
415 	file = anon_inode_create_getfile(anon_name, &kvm_gmem_fops, gmem,
416 					 O_RDWR, NULL);
417 	if (IS_ERR(file)) {
418 		err = PTR_ERR(file);
419 		goto err_gmem;
420 	}
421 
422 	file->f_flags |= O_LARGEFILE;
423 
424 	inode = file->f_inode;
425 	WARN_ON(file->f_mapping != inode->i_mapping);
426 
427 	inode->i_private = (void *)(unsigned long)flags;
428 	inode->i_op = &kvm_gmem_iops;
429 	inode->i_mapping->a_ops = &kvm_gmem_aops;
430 	inode->i_mode |= S_IFREG;
431 	inode->i_size = size;
432 	mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER);
433 	mapping_set_inaccessible(inode->i_mapping);
434 	/* Unmovable mappings are supposed to be marked unevictable as well. */
435 	WARN_ON_ONCE(!mapping_unevictable(inode->i_mapping));
436 
437 	kvm_get_kvm(kvm);
438 	gmem->kvm = kvm;
439 	xa_init(&gmem->bindings);
440 	list_add(&gmem->entry, &inode->i_mapping->i_private_list);
441 
442 	fd_install(fd, file);
443 	return fd;
444 
445 err_gmem:
446 	kfree(gmem);
447 err_fd:
448 	put_unused_fd(fd);
449 	return err;
450 }
451 
kvm_gmem_create(struct kvm * kvm,struct kvm_create_guest_memfd * args)452 int kvm_gmem_create(struct kvm *kvm, struct kvm_create_guest_memfd *args)
453 {
454 	loff_t size = args->size;
455 	u64 flags = args->flags;
456 	u64 valid_flags = 0;
457 
458 	if (flags & ~valid_flags)
459 		return -EINVAL;
460 
461 	if (size <= 0 || !PAGE_ALIGNED(size))
462 		return -EINVAL;
463 
464 	return __kvm_gmem_create(kvm, size, flags);
465 }
466 
kvm_gmem_bind(struct kvm * kvm,struct kvm_memory_slot * slot,unsigned int fd,loff_t offset)467 int kvm_gmem_bind(struct kvm *kvm, struct kvm_memory_slot *slot,
468 		  unsigned int fd, loff_t offset)
469 {
470 	loff_t size = slot->npages << PAGE_SHIFT;
471 	unsigned long start, end;
472 	struct kvm_gmem *gmem;
473 	struct inode *inode;
474 	struct file *file;
475 	int r = -EINVAL;
476 
477 	BUILD_BUG_ON(sizeof(gfn_t) != sizeof(slot->gmem.pgoff));
478 
479 	file = fget(fd);
480 	if (!file)
481 		return -EBADF;
482 
483 	if (file->f_op != &kvm_gmem_fops)
484 		goto err;
485 
486 	gmem = file->private_data;
487 	if (gmem->kvm != kvm)
488 		goto err;
489 
490 	inode = file_inode(file);
491 
492 	if (offset < 0 || !PAGE_ALIGNED(offset) ||
493 	    offset + size > i_size_read(inode))
494 		goto err;
495 
496 	filemap_invalidate_lock(inode->i_mapping);
497 
498 	start = offset >> PAGE_SHIFT;
499 	end = start + slot->npages;
500 
501 	if (!xa_empty(&gmem->bindings) &&
502 	    xa_find(&gmem->bindings, &start, end - 1, XA_PRESENT)) {
503 		filemap_invalidate_unlock(inode->i_mapping);
504 		goto err;
505 	}
506 
507 	/*
508 	 * No synchronize_rcu() needed, any in-flight readers are guaranteed to
509 	 * be see either a NULL file or this new file, no need for them to go
510 	 * away.
511 	 */
512 	rcu_assign_pointer(slot->gmem.file, file);
513 	slot->gmem.pgoff = start;
514 
515 	xa_store_range(&gmem->bindings, start, end - 1, slot, GFP_KERNEL);
516 	filemap_invalidate_unlock(inode->i_mapping);
517 
518 	/*
519 	 * Drop the reference to the file, even on success.  The file pins KVM,
520 	 * not the other way 'round.  Active bindings are invalidated if the
521 	 * file is closed before memslots are destroyed.
522 	 */
523 	r = 0;
524 err:
525 	fput(file);
526 	return r;
527 }
528 
kvm_gmem_unbind(struct kvm_memory_slot * slot)529 void kvm_gmem_unbind(struct kvm_memory_slot *slot)
530 {
531 	unsigned long start = slot->gmem.pgoff;
532 	unsigned long end = start + slot->npages;
533 	struct kvm_gmem *gmem;
534 	struct file *file;
535 
536 	/*
537 	 * Nothing to do if the underlying file was already closed (or is being
538 	 * closed right now), kvm_gmem_release() invalidates all bindings.
539 	 */
540 	file = kvm_gmem_get_file(slot);
541 	if (!file)
542 		return;
543 
544 	gmem = file->private_data;
545 
546 	filemap_invalidate_lock(file->f_mapping);
547 	xa_store_range(&gmem->bindings, start, end - 1, NULL, GFP_KERNEL);
548 	rcu_assign_pointer(slot->gmem.file, NULL);
549 	synchronize_rcu();
550 	filemap_invalidate_unlock(file->f_mapping);
551 
552 	fput(file);
553 }
554 
555 /* Returns a locked folio on success.  */
556 static struct folio *
__kvm_gmem_get_pfn(struct file * file,struct kvm_memory_slot * slot,gfn_t gfn,kvm_pfn_t * pfn,bool * is_prepared,int * max_order)557 __kvm_gmem_get_pfn(struct file *file, struct kvm_memory_slot *slot,
558 		   gfn_t gfn, kvm_pfn_t *pfn, bool *is_prepared,
559 		   int *max_order)
560 {
561 	pgoff_t index = gfn - slot->base_gfn + slot->gmem.pgoff;
562 	struct kvm_gmem *gmem = file->private_data;
563 	struct folio *folio;
564 
565 	if (file != slot->gmem.file) {
566 		WARN_ON_ONCE(slot->gmem.file);
567 		return ERR_PTR(-EFAULT);
568 	}
569 
570 	gmem = file->private_data;
571 	if (xa_load(&gmem->bindings, index) != slot) {
572 		WARN_ON_ONCE(xa_load(&gmem->bindings, index));
573 		return ERR_PTR(-EIO);
574 	}
575 
576 	folio = kvm_gmem_get_folio(file_inode(file), index);
577 	if (IS_ERR(folio))
578 		return folio;
579 
580 	if (folio_test_hwpoison(folio)) {
581 		folio_unlock(folio);
582 		folio_put(folio);
583 		return ERR_PTR(-EHWPOISON);
584 	}
585 
586 	*pfn = folio_file_pfn(folio, index);
587 	if (max_order)
588 		*max_order = 0;
589 
590 	*is_prepared = folio_test_uptodate(folio);
591 	return folio;
592 }
593 
kvm_gmem_get_pfn(struct kvm * kvm,struct kvm_memory_slot * slot,gfn_t gfn,kvm_pfn_t * pfn,int * max_order)594 int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
595 		     gfn_t gfn, kvm_pfn_t *pfn, int *max_order)
596 {
597 	struct file *file = kvm_gmem_get_file(slot);
598 	struct folio *folio;
599 	bool is_prepared = false;
600 	int r = 0;
601 
602 	if (!file)
603 		return -EFAULT;
604 
605 	folio = __kvm_gmem_get_pfn(file, slot, gfn, pfn, &is_prepared, max_order);
606 	if (IS_ERR(folio)) {
607 		r = PTR_ERR(folio);
608 		goto out;
609 	}
610 
611 	if (!is_prepared)
612 		r = kvm_gmem_prepare_folio(kvm, slot, gfn, folio);
613 
614 	folio_unlock(folio);
615 	if (r < 0)
616 		folio_put(folio);
617 
618 out:
619 	fput(file);
620 	return r;
621 }
622 EXPORT_SYMBOL_GPL(kvm_gmem_get_pfn);
623 
624 #ifdef CONFIG_KVM_GENERIC_PRIVATE_MEM
kvm_gmem_populate(struct kvm * kvm,gfn_t start_gfn,void __user * src,long npages,kvm_gmem_populate_cb post_populate,void * opaque)625 long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long npages,
626 		       kvm_gmem_populate_cb post_populate, void *opaque)
627 {
628 	struct file *file;
629 	struct kvm_memory_slot *slot;
630 	void __user *p;
631 
632 	int ret = 0, max_order;
633 	long i;
634 
635 	lockdep_assert_held(&kvm->slots_lock);
636 	if (npages < 0)
637 		return -EINVAL;
638 
639 	slot = gfn_to_memslot(kvm, start_gfn);
640 	if (!kvm_slot_can_be_private(slot))
641 		return -EINVAL;
642 
643 	file = kvm_gmem_get_file(slot);
644 	if (!file)
645 		return -EFAULT;
646 
647 	filemap_invalidate_lock(file->f_mapping);
648 
649 	npages = min_t(ulong, slot->npages - (start_gfn - slot->base_gfn), npages);
650 	for (i = 0; i < npages; i += (1 << max_order)) {
651 		struct folio *folio;
652 		gfn_t gfn = start_gfn + i;
653 		bool is_prepared = false;
654 		kvm_pfn_t pfn;
655 
656 		if (signal_pending(current)) {
657 			ret = -EINTR;
658 			break;
659 		}
660 
661 		folio = __kvm_gmem_get_pfn(file, slot, gfn, &pfn, &is_prepared, &max_order);
662 		if (IS_ERR(folio)) {
663 			ret = PTR_ERR(folio);
664 			break;
665 		}
666 
667 		if (is_prepared) {
668 			folio_unlock(folio);
669 			folio_put(folio);
670 			ret = -EEXIST;
671 			break;
672 		}
673 
674 		folio_unlock(folio);
675 		WARN_ON(!IS_ALIGNED(gfn, 1 << max_order) ||
676 			(npages - i) < (1 << max_order));
677 
678 		ret = -EINVAL;
679 		while (!kvm_range_has_memory_attributes(kvm, gfn, gfn + (1 << max_order),
680 							KVM_MEMORY_ATTRIBUTE_PRIVATE,
681 							KVM_MEMORY_ATTRIBUTE_PRIVATE)) {
682 			if (!max_order)
683 				goto put_folio_and_exit;
684 			max_order--;
685 		}
686 
687 		p = src ? src + i * PAGE_SIZE : NULL;
688 		ret = post_populate(kvm, gfn, pfn, p, max_order, opaque);
689 		if (!ret)
690 			kvm_gmem_mark_prepared(folio);
691 
692 put_folio_and_exit:
693 		folio_put(folio);
694 		if (ret)
695 			break;
696 	}
697 
698 	filemap_invalidate_unlock(file->f_mapping);
699 
700 	fput(file);
701 	return ret && !i ? ret : i;
702 }
703 EXPORT_SYMBOL_GPL(kvm_gmem_populate);
704 #endif
705