1 /*
2 * Copyright (c) 2023 Institute of Parallel And Distributed Systems (IPADS), Shanghai Jiao Tong University (SJTU)
3 * Licensed under the Mulan PSL v2.
4 * You can use this software according to the terms and conditions of the Mulan PSL v2.
5 * You may obtain a copy of Mulan PSL v2 at:
6 * http://license.coscl.org.cn/MulanPSL2
7 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
8 * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
9 * PURPOSE.
10 * See the Mulan PSL v2 for more details.
11 */
12 #include <arch/machine/registers.h>
13 #include <arch/machine/smp.h>
14 #include <arch/mm/cache.h>
15 #include <common/types.h>
16
17 /*
18 * DCZID_EL0: Data Cache Zero ID register
19 * DZP, bit [4] used to indicate whether use of DC ZVA instructions is permitted or not
20 * BS, bit [3:0] used to indicate log2 of the block size in words.
21 * The maximum size supported is 2KB (value == 9).
22 */
23
24 /* A global varible to inidicate block size which will be cleaned by dc zva call */
25 int dczva_line_size = 0;
26
27 #define DZP_SHIFT 4
28 #define DZBS_MASK 0xf
29
30 /*
31 * Read Data Cache Zero ID register
32 */
read_dczid(void)33 long read_dczid(void)
34 {
35 long val;
36
37 asm volatile("mrs %0, dczid_el0\n\t" : "=r"(val));
38 return val;
39 }
40
41 /*
42 * Check whether support DC ZVA and get dczva_line_size
43 */
cache_setup(void)44 void cache_setup(void)
45 {
46 long dczid_val;
47 int dczid_bs;
48
49 dczid_val = read_dczid();
50 if (dczid_val & (1 << DZP_SHIFT)) {
51 /* the 4th bit indicates the instruction is disabled */
52 dczva_line_size = 0;
53 } else {
54 /* the zero size stores in the last four bits */
55 dczid_bs = dczid_val & (DZBS_MASK);
56 dczva_line_size = sizeof(int) << dczid_bs;
57 }
58 }
59
60 #define CACHE_LINE_LENGTH 64
61
62 #define ICACHE_POLICY_VPIPT 0
63 #define ICACHE_POLICY_AIVIVT 1
64 #define ICACHE_POLICY_VIPT 2
65 #define ICACHE_POLICY_PIPT 3
66
__dcache_clean_area(u64 start,u64 end)67 static inline void __dcache_clean_area(u64 start, u64 end)
68 {
69 while (start < end) {
70 asm volatile("dc cvac, %0" : : "r"(start) : "memory");
71 start += CACHE_LINE_LENGTH;
72 }
73 }
74
__dcache_clean_area_pou(u64 start,u64 end)75 static inline void __dcache_clean_area_pou(u64 start, u64 end)
76 {
77 while (start < end) {
78 asm volatile("dc cvau, %0" : : "r"(start) : "memory");
79 start += CACHE_LINE_LENGTH;
80 }
81 }
82
__dcache_inv_area(u64 start,u64 end)83 static inline void __dcache_inv_area(u64 start, u64 end)
84 {
85 while (start < end) {
86 asm volatile("dc ivac, %0" : : "r"(start) : "memory");
87 start += CACHE_LINE_LENGTH;
88 }
89 }
90
__dcache_clean_and_inv_area(u64 start,u64 end)91 static inline void __dcache_clean_and_inv_area(u64 start, u64 end)
92 {
93 while (start < end) {
94 asm volatile("dc civac, %0" : : "r"(start) : "memory");
95 start += CACHE_LINE_LENGTH;
96 }
97 }
98
flush_icache_all(void)99 static inline void flush_icache_all(void)
100 {
101 /*
102 * ic iallu is not enough. We need instructions to be data
103 * coherence in the inner shareable domain. So we use ic ialluis:
104 * Invalidate instruction cache ALL to PoU (Inner Shareable)
105 */
106 asm volatile("ic ialluis");
107 }
108
flush_icache_range(u64 start,u64 end)109 static inline void flush_icache_range(u64 start, u64 end)
110 {
111 while (start < end) {
112 asm volatile("ic ivau, %0" : : "r"(start) : "memory");
113 start += CACHE_LINE_LENGTH;
114 }
115 }
116
is_icache_aliasing(u64 ctr_el0)117 static inline bool is_icache_aliasing(u64 ctr_el0)
118 {
119 /*
120 * VIVT icache has cache cache alias issue.
121 * VIPT icache is either aliasing or non-aliasing
122 * depending on the highest index bit and PAGE_SHIFT.
123 * We just assume all VIPT icache to be aliasing.
124 */
125 switch ((ctr_el0 >> CTR_EL0_L1Ip_SHIFT) & CTR_EL0_L1Ip_MASK) {
126 case ICACHE_POLICY_PIPT:
127 case ICACHE_POLICY_VPIPT:
128 return false;
129 case ICACHE_POLICY_AIVIVT:
130 case ICACHE_POLICY_VIPT:
131 /* Assume aliasing */
132 return true;
133 default:
134 BUG("Unknown instruction cache policy!");
135 return false;
136 }
137 }
138
__sync_idcache(u64 start,u64 end)139 static inline void __sync_idcache(u64 start, u64 end)
140 {
141 /*
142 * If IDC bit is set, clean data cache is not required for
143 * instruction to data coherence.
144 */
145 if (ctr_el0 & CTR_EL0_IDC) {
146 asm volatile("dsb ishst");
147 } else {
148 __dcache_clean_area_pou(start, end);
149 asm volatile("dsb ish");
150 }
151
152 /*
153 * If DIC bit is set, invalidate instruction cache is not required for
154 * data to instruction coherence.
155 */
156 if (!(ctr_el0 & CTR_EL0_DIC)) {
157 if (is_icache_aliasing(ctr_el0)) {
158 flush_icache_all();
159 } else {
160 flush_icache_range(start, end);
161 }
162 }
163 }
164
arch_flush_cache(vaddr_t start,size_t len,int op_type)165 void arch_flush_cache(vaddr_t start, size_t len, int op_type)
166 {
167 u64 real_start;
168 u64 real_end;
169
170 BUG_ON((long)len < 0);
171 real_start = ROUND_DOWN(start, CACHE_LINE_LENGTH);
172 real_end = ROUND_UP(start + len, CACHE_LINE_LENGTH);
173 BUG_ON(real_start > real_end);
174
175 switch (op_type) {
176 case CACHE_CLEAN:
177 __dcache_clean_area(real_start, real_end);
178 break;
179 case CACHE_INVALIDATE:
180 __dcache_inv_area(real_start, real_end);
181 break;
182 case CACHE_CLEAN_AND_INV:
183 __dcache_clean_and_inv_area(real_start, real_end);
184 break;
185 case SYNC_IDCACHE:
186 __sync_idcache(real_start, real_end);
187 break;
188 default:
189 BUG("Unsupported cache operation type: %d\n", op_type);
190 break;
191 }
192 asm volatile("dsb ish");
193 asm volatile("isb");
194 }
195
196 /*
197 void flush_icache_range(u64 start, u64 size)
198 {
199 __builtin___clear_cache((char*)start, (char*)(start + size));
200 }
201 */
202