1 // Copyright 2015, VIXL authors
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are met:
6 //
7 // * Redistributions of source code must retain the above copyright notice,
8 // this list of conditions and the following disclaimer.
9 // * Redistributions in binary form must reproduce the above copyright notice,
10 // this list of conditions and the following disclaimer in the documentation
11 // and/or other materials provided with the distribution.
12 // * Neither the name of ARM Limited nor the names of its contributors may be
13 // used to endorse or promote products derived from this software without
14 // specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
27 #include "../utils-vixl.h"
28
29 #include "cpu-aarch64.h"
30
31 namespace vixl {
32 namespace aarch64 {
33
34 // Initialise to smallest possible cache size.
35 unsigned CPU::dcache_line_size_ = 1;
36 unsigned CPU::icache_line_size_ = 1;
37
38
39 // Currently computes I and D cache line size.
SetUp()40 void CPU::SetUp() {
41 uint32_t cache_type_register = GetCacheType();
42
43 // The cache type register holds information about the caches, including I
44 // D caches line size.
45 static const int kDCacheLineSizeShift = 16;
46 static const int kICacheLineSizeShift = 0;
47 static const uint32_t kDCacheLineSizeMask = 0xf << kDCacheLineSizeShift;
48 static const uint32_t kICacheLineSizeMask = 0xf << kICacheLineSizeShift;
49
50 // The cache type register holds the size of the I and D caches in words as
51 // a power of two.
52 uint32_t dcache_line_size_power_of_two =
53 (cache_type_register & kDCacheLineSizeMask) >> kDCacheLineSizeShift;
54 uint32_t icache_line_size_power_of_two =
55 (cache_type_register & kICacheLineSizeMask) >> kICacheLineSizeShift;
56
57 dcache_line_size_ = 4 << dcache_line_size_power_of_two;
58 icache_line_size_ = 4 << icache_line_size_power_of_two;
59 }
60
61
GetCacheType()62 uint32_t CPU::GetCacheType() {
63 #ifdef __aarch64__
64 uint64_t cache_type_register;
65 // Copy the content of the cache type register to a core register.
66 __asm__ __volatile__("mrs %[ctr], ctr_el0" // NOLINT(runtime/references)
67 : [ctr] "=r"(cache_type_register));
68 VIXL_ASSERT(IsUint32(cache_type_register));
69 return static_cast<uint32_t>(cache_type_register);
70 #else
71 // This will lead to a cache with 1 byte long lines, which is fine since
72 // neither EnsureIAndDCacheCoherency nor the simulator will need this
73 // information.
74 return 0;
75 #endif
76 }
77
78
EnsureIAndDCacheCoherency(void * address,size_t length)79 void CPU::EnsureIAndDCacheCoherency(void *address, size_t length) {
80 #ifdef __aarch64__
81 // Implement the cache synchronisation for all targets where AArch64 is the
82 // host, even if we're building the simulator for an AAarch64 host. This
83 // allows for cases where the user wants to simulate code as well as run it
84 // natively.
85
86 if (length == 0) {
87 return;
88 }
89
90 // The code below assumes user space cache operations are allowed.
91
92 // Work out the line sizes for each cache, and use them to determine the
93 // start addresses.
94 uintptr_t start = reinterpret_cast<uintptr_t>(address);
95 uintptr_t dsize = static_cast<uintptr_t>(dcache_line_size_);
96 uintptr_t isize = static_cast<uintptr_t>(icache_line_size_);
97 uintptr_t dline = start & ~(dsize - 1);
98 uintptr_t iline = start & ~(isize - 1);
99
100 // Cache line sizes are always a power of 2.
101 VIXL_ASSERT(IsPowerOf2(dsize));
102 VIXL_ASSERT(IsPowerOf2(isize));
103 uintptr_t end = start + length;
104
105 do {
106 __asm__ __volatile__(
107 // Clean each line of the D cache containing the target data.
108 //
109 // dc : Data Cache maintenance
110 // c : Clean
111 // va : by (Virtual) Address
112 // u : to the point of Unification
113 // The point of unification for a processor is the point by which the
114 // instruction and data caches are guaranteed to see the same copy of a
115 // memory location. See ARM DDI 0406B page B2-12 for more information.
116 " dc cvau, %[dline]\n"
117 :
118 : [dline] "r"(dline)
119 // This code does not write to memory, but the "memory" dependency
120 // prevents GCC from reordering the code.
121 : "memory");
122 dline += dsize;
123 } while (dline < end);
124
125 __asm__ __volatile__(
126 // Make sure that the data cache operations (above) complete before the
127 // instruction cache operations (below).
128 //
129 // dsb : Data Synchronisation Barrier
130 // ish : Inner SHareable domain
131 //
132 // The point of unification for an Inner Shareable shareability domain is
133 // the point by which the instruction and data caches of all the
134 // processors
135 // in that Inner Shareable shareability domain are guaranteed to see the
136 // same copy of a memory location. See ARM DDI 0406B page B2-12 for more
137 // information.
138 " dsb ish\n"
139 :
140 :
141 : "memory");
142
143 do {
144 __asm__ __volatile__(
145 // Invalidate each line of the I cache containing the target data.
146 //
147 // ic : Instruction Cache maintenance
148 // i : Invalidate
149 // va : by Address
150 // u : to the point of Unification
151 " ic ivau, %[iline]\n"
152 :
153 : [iline] "r"(iline)
154 : "memory");
155 iline += isize;
156 } while (iline < end);
157
158 __asm__ __volatile__(
159 // Make sure that the instruction cache operations (above) take effect
160 // before the isb (below).
161 " dsb ish\n"
162
163 // Ensure that any instructions already in the pipeline are discarded and
164 // reloaded from the new data.
165 // isb : Instruction Synchronisation Barrier
166 " isb\n"
167 :
168 :
169 : "memory");
170 #else
171 // If the host isn't AArch64, we must be using the simulator, so this function
172 // doesn't have to do anything.
173 USE(address, length);
174 #endif
175 }
176
177 } // namespace aarch64
178 } // namespace vixl
179