• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Based on arch/arm/mm/copypage.c
4  *
5  * Copyright (C) 2002 Deep Blue Solutions Ltd, All Rights Reserved.
6  * Copyright (C) 2012 ARM Ltd.
7  */
8 
9 #include <linux/bitops.h>
10 #include <linux/mm.h>
11 
12 #include <asm/page.h>
13 #include <asm/cacheflush.h>
14 #include <asm/cpufeature.h>
15 #include <asm/mte.h>
16 
copy_highpage(struct page * to,struct page * from)17 void copy_highpage(struct page *to, struct page *from)
18 {
19 	void *kto = page_address(to);
20 	void *kfrom = page_address(from);
21 
22 	copy_page(kto, kfrom);
23 
24 	if (kasan_hw_tags_enabled())
25 		page_kasan_tag_reset(to);
26 
27 	if (system_supports_mte() && test_bit(PG_mte_tagged, &from->flags)) {
28 		set_bit(PG_mte_tagged, &to->flags);
29 		/*
30 		 * We need smp_wmb() in between setting the flags and clearing the
31 		 * tags because if another thread reads page->flags and builds a
32 		 * tagged address out of it, there is an actual dependency to the
33 		 * memory access, but on the current thread we do not guarantee that
34 		 * the new page->flags are visible before the tags were updated.
35 		 */
36 		smp_wmb();
37 		mte_copy_page_tags(kto, kfrom);
38 	}
39 }
40 EXPORT_SYMBOL(copy_highpage);
41 
copy_user_highpage(struct page * to,struct page * from,unsigned long vaddr,struct vm_area_struct * vma)42 void copy_user_highpage(struct page *to, struct page *from,
43 			unsigned long vaddr, struct vm_area_struct *vma)
44 {
45 	copy_highpage(to, from);
46 	flush_dcache_page(to);
47 }
48 EXPORT_SYMBOL_GPL(copy_user_highpage);
49