• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2023 Institute of Parallel And Distributed Systems (IPADS), Shanghai Jiao Tong University (SJTU)
3  * Licensed under the Mulan PSL v2.
4  * You can use this software according to the terms and conditions of the Mulan PSL v2.
5  * You may obtain a copy of Mulan PSL v2 at:
6  *     http://license.coscl.org.cn/MulanPSL2
7  * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
8  * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
9  * PURPOSE.
10  * See the Mulan PSL v2 for more details.
11  */
12 #include <arch/mm/page_table.h>
13 #include <common/types.h>
14 #include <common/macro.h>
15 #include <mm/vmspace.h>
16 #include <mm/mm.h>
17 #include <arch/sync.h>
18 
19 /*
20  * Invalidate TLB template:
21  *  DSB: ensure page table updates are finished
22  *  TLBI
23  *  DSB: ensure TLB invalidation is finished
24  *  ISB: ensure the instruction fetching is using new mappings
25  */
26 #define TLBI_ASID_SHIFT 48
27 
28 /* Flush tlbs in all the cores by asid. */
flush_tlb_by_asid(u64 asid)29 static void flush_tlb_by_asid(u64 asid)
30 {
31     /* inner sharable barrier */
32     dsb(ish);
33     asm volatile("tlbi aside1is, %0\n" : : "r"(asid << TLBI_ASID_SHIFT) :);
34     dsb(ish);
35     isb();
36 }
37 
38 /* Flush tlbs of designated VAs. The asid info is encoded in @addr_arg. */
flush_tlb_addr_asid(u64 addr_arg,u64 page_cnt)39 static void flush_tlb_addr_asid(u64 addr_arg, u64 page_cnt)
40 {
41     u64 i;
42 
43     /* inner sharable barrier */
44     dsb(ish);
45     for (i = 0; i < page_cnt; ++i) {
46         asm volatile("tlbi vae1is, %0\n" : : "r"(addr_arg) :);
47         addr_arg++;
48     }
49     dsb(ish);
50     isb();
51 }
52 
53 /*
54  * The arg for 'tlbi vae1is': | ASID | TTL | VA (virtual frame number) |.
55  * If ARMv8.4-TTL is not supported, TTL should be 0.
56  */
get_tlbi_va_arg(vaddr_t addr,u64 asid)57 static u64 get_tlbi_va_arg(vaddr_t addr, u64 asid)
58 {
59     vaddr_t arg;
60 
61     arg = addr >> PAGE_SHIFT;
62     arg |= asid << TLBI_ASID_SHIFT;
63 
64     return arg;
65 }
66 
67 
68 #define TLB_SHOOTDOWN_THRESHOLD 2
69 
do_flush_tlb_opt(vaddr_t start_va,u64 page_cnt,u64 asid)70 static void do_flush_tlb_opt(vaddr_t start_va, u64 page_cnt, u64 asid)
71 {
72     /* flush tlbs in all the cpus */
73     if (page_cnt > TLB_SHOOTDOWN_THRESHOLD) {
74         /* Flush all the TLBs of the ASID in all the cpus */
75         flush_tlb_by_asid(asid);
76     } else {
77         /* Flush each TLB entry one-by-one in all the cpus */
78         flush_tlb_addr_asid(get_tlbi_va_arg(start_va, asid), page_cnt);
79     }
80 }
81 
82 /* Exposed functions */
flush_tlb_opt(struct vmspace * vmspace,vaddr_t start_va,size_t len)83 void flush_tlb_opt(struct vmspace* vmspace, vaddr_t start_va, size_t len)
84 {
85     u64 page_cnt;
86     u64 asid;
87 
88     if (unlikely(len < PAGE_SIZE))
89         kwarn("func: %s. len (%p) < PAGE_SIZE\n", __func__, len);
90 
91     if (len == 0)
92         return;
93 
94     start_va = ROUND_DOWN(start_va, PAGE_SIZE);
95     len = ROUND_UP(len, PAGE_SIZE);
96     page_cnt = len / PAGE_SIZE;
97 
98     asid = vmspace->pcid;
99 
100     do_flush_tlb_opt(start_va, page_cnt, asid);
101 }
102 
flush_tlb_by_range(struct vmspace * vmspace,vaddr_t start_va,size_t len)103 void flush_tlb_by_range(struct vmspace* vmspace, vaddr_t start_va, size_t len)
104 {
105     flush_tlb_opt(vmspace, start_va, len);
106 }
107 
flush_tlb_all(void)108 void flush_tlb_all(void)
109 {
110     /* full system barrier */
111     dsb(sy);
112     asm volatile("tlbi vmalle1is\n\t" : : :);
113     dsb(sy);
114     isb();
115 }
116 
flush_tlb_by_vmspace(struct vmspace * vmspace)117 void flush_tlb_by_vmspace(struct vmspace* vmspace)
118 {
119     flush_tlb_by_asid(vmspace->pcid);
120 }
121