1 // Copyright 2014, VIXL authors 2 // All rights reserved. 3 // 4 // Redistribution and use in source and binary forms, with or without 5 // modification, are permitted provided that the following conditions are met: 6 // 7 // * Redistributions of source code must retain the above copyright notice, 8 // this list of conditions and the following disclaimer. 9 // * Redistributions in binary form must reproduce the above copyright notice, 10 // this list of conditions and the following disclaimer in the documentation 11 // and/or other materials provided with the distribution. 12 // * Neither the name of ARM Limited nor the names of its contributors may be 13 // used to endorse or promote products derived from this software without 14 // specific prior written permission. 15 // 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND 17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE 20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 27 #ifndef VIXL_AARCH64_DECODER_AARCH64_H_ 28 #define VIXL_AARCH64_DECODER_AARCH64_H_ 29 30 #include <list> 31 32 #include "../globals-vixl.h" 33 34 #include "instructions-aarch64.h" 35 36 37 // List macro containing all visitors needed by the decoder class. 38 39 #define VISITOR_LIST_THAT_RETURN(V) \ 40 V(PCRelAddressing) \ 41 V(AddSubImmediate) \ 42 V(LogicalImmediate) \ 43 V(MoveWideImmediate) \ 44 V(Bitfield) \ 45 V(Extract) \ 46 V(UnconditionalBranch) \ 47 V(UnconditionalBranchToRegister) \ 48 V(CompareBranch) \ 49 V(TestBranch) \ 50 V(ConditionalBranch) \ 51 V(System) \ 52 V(Exception) \ 53 V(LoadStorePairPostIndex) \ 54 V(LoadStorePairOffset) \ 55 V(LoadStorePairPreIndex) \ 56 V(LoadStorePairNonTemporal) \ 57 V(LoadLiteral) \ 58 V(LoadStoreUnscaledOffset) \ 59 V(LoadStorePostIndex) \ 60 V(LoadStorePreIndex) \ 61 V(LoadStoreRegisterOffset) \ 62 V(LoadStoreUnsignedOffset) \ 63 V(LoadStoreExclusive) \ 64 V(LogicalShifted) \ 65 V(AddSubShifted) \ 66 V(AddSubExtended) \ 67 V(AddSubWithCarry) \ 68 V(ConditionalCompareRegister) \ 69 V(ConditionalCompareImmediate) \ 70 V(ConditionalSelect) \ 71 V(DataProcessing1Source) \ 72 V(DataProcessing2Source) \ 73 V(DataProcessing3Source) \ 74 V(FPCompare) \ 75 V(FPConditionalCompare) \ 76 V(FPConditionalSelect) \ 77 V(FPImmediate) \ 78 V(FPDataProcessing1Source) \ 79 V(FPDataProcessing2Source) \ 80 V(FPDataProcessing3Source) \ 81 V(FPIntegerConvert) \ 82 V(FPFixedPointConvert) \ 83 V(Crypto2RegSHA) \ 84 V(Crypto3RegSHA) \ 85 V(CryptoAES) \ 86 V(NEON2RegMisc) \ 87 V(NEON3Different) \ 88 V(NEON3Same) \ 89 V(NEONAcrossLanes) \ 90 V(NEONByIndexedElement) \ 91 V(NEONCopy) \ 92 V(NEONExtract) \ 93 V(NEONLoadStoreMultiStruct) \ 94 V(NEONLoadStoreMultiStructPostIndex) \ 95 V(NEONLoadStoreSingleStruct) \ 96 V(NEONLoadStoreSingleStructPostIndex) \ 97 V(NEONModifiedImmediate) \ 98 V(NEONScalar2RegMisc) \ 99 V(NEONScalar3Diff) \ 100 V(NEONScalar3Same) \ 101 V(NEONScalarByIndexedElement) \ 102 V(NEONScalarCopy) \ 103 V(NEONScalarPairwise) \ 104 V(NEONScalarShiftImmediate) \ 105 V(NEONShiftImmediate) \ 106 V(NEONTable) \ 107 V(NEONPerm) 108 109 #define VISITOR_LIST_THAT_DONT_RETURN(V) \ 110 V(Unallocated) \ 111 V(Unimplemented) 112 113 #define VISITOR_LIST(V) \ 114 VISITOR_LIST_THAT_RETURN(V) \ 115 VISITOR_LIST_THAT_DONT_RETURN(V) 116 117 namespace vixl { 118 namespace aarch64 { 119 120 // The Visitor interface. Disassembler and simulator (and other tools) 121 // must provide implementations for all of these functions. 122 class DecoderVisitor { 123 public: 124 enum VisitorConstness { kConstVisitor, kNonConstVisitor }; 125 explicit DecoderVisitor(VisitorConstness constness = kConstVisitor) constness_(constness)126 : constness_(constness) {} 127 ~DecoderVisitor()128 virtual ~DecoderVisitor() {} 129 130 #define DECLARE(A) virtual void Visit##A(const Instruction* instr) = 0; VISITOR_LIST(DECLARE)131 VISITOR_LIST(DECLARE) 132 #undef DECLARE 133 134 bool IsConstVisitor() const { return constness_ == kConstVisitor; } MutableInstruction(const Instruction * instr)135 Instruction* MutableInstruction(const Instruction* instr) { 136 VIXL_ASSERT(!IsConstVisitor()); 137 return const_cast<Instruction*>(instr); 138 } 139 140 private: 141 const VisitorConstness constness_; 142 }; 143 144 145 class Decoder { 146 public: Decoder()147 Decoder() {} 148 149 // Top-level wrappers around the actual decoding function. Decode(const Instruction * instr)150 void Decode(const Instruction* instr) { 151 std::list<DecoderVisitor*>::iterator it; 152 for (it = visitors_.begin(); it != visitors_.end(); it++) { 153 VIXL_ASSERT((*it)->IsConstVisitor()); 154 } 155 DecodeInstruction(instr); 156 } Decode(Instruction * instr)157 void Decode(Instruction* instr) { 158 DecodeInstruction(const_cast<const Instruction*>(instr)); 159 } 160 161 // Register a new visitor class with the decoder. 162 // Decode() will call the corresponding visitor method from all registered 163 // visitor classes when decoding reaches the leaf node of the instruction 164 // decode tree. 165 // Visitors are called in order. 166 // A visitor can be registered multiple times. 167 // 168 // d.AppendVisitor(V1); 169 // d.AppendVisitor(V2); 170 // d.PrependVisitor(V2); 171 // d.AppendVisitor(V3); 172 // 173 // d.Decode(i); 174 // 175 // will call in order visitor methods in V2, V1, V2, V3. 176 void AppendVisitor(DecoderVisitor* visitor); 177 void PrependVisitor(DecoderVisitor* visitor); 178 // These helpers register `new_visitor` before or after the first instance of 179 // `registered_visiter` in the list. 180 // So if 181 // V1, V2, V1, V2 182 // are registered in this order in the decoder, calls to 183 // d.InsertVisitorAfter(V3, V1); 184 // d.InsertVisitorBefore(V4, V2); 185 // will yield the order 186 // V1, V3, V4, V2, V1, V2 187 // 188 // For more complex modifications of the order of registered visitors, one can 189 // directly access and modify the list of visitors via the `visitors()' 190 // accessor. 191 void InsertVisitorBefore(DecoderVisitor* new_visitor, 192 DecoderVisitor* registered_visitor); 193 void InsertVisitorAfter(DecoderVisitor* new_visitor, 194 DecoderVisitor* registered_visitor); 195 196 // Remove all instances of a previously registered visitor class from the list 197 // of visitors stored by the decoder. 198 void RemoveVisitor(DecoderVisitor* visitor); 199 200 #define DECLARE(A) void Visit##A(const Instruction* instr); VISITOR_LIST(DECLARE)201 VISITOR_LIST(DECLARE) 202 #undef DECLARE 203 204 205 std::list<DecoderVisitor*>* visitors() { return &visitors_; } 206 207 private: 208 // Decodes an instruction and calls the visitor functions registered with the 209 // Decoder class. 210 void DecodeInstruction(const Instruction* instr); 211 212 // Decode the PC relative addressing instruction, and call the corresponding 213 // visitors. 214 // On entry, instruction bits 27:24 = 0x0. 215 void DecodePCRelAddressing(const Instruction* instr); 216 217 // Decode the add/subtract immediate instruction, and call the correspoding 218 // visitors. 219 // On entry, instruction bits 27:24 = 0x1. 220 void DecodeAddSubImmediate(const Instruction* instr); 221 222 // Decode the branch, system command, and exception generation parts of 223 // the instruction tree, and call the corresponding visitors. 224 // On entry, instruction bits 27:24 = {0x4, 0x5, 0x6, 0x7}. 225 void DecodeBranchSystemException(const Instruction* instr); 226 227 // Decode the load and store parts of the instruction tree, and call 228 // the corresponding visitors. 229 // On entry, instruction bits 27:24 = {0x8, 0x9, 0xC, 0xD}. 230 void DecodeLoadStore(const Instruction* instr); 231 232 // Decode the logical immediate and move wide immediate parts of the 233 // instruction tree, and call the corresponding visitors. 234 // On entry, instruction bits 27:24 = 0x2. 235 void DecodeLogical(const Instruction* instr); 236 237 // Decode the bitfield and extraction parts of the instruction tree, 238 // and call the corresponding visitors. 239 // On entry, instruction bits 27:24 = 0x3. 240 void DecodeBitfieldExtract(const Instruction* instr); 241 242 // Decode the data processing parts of the instruction tree, and call the 243 // corresponding visitors. 244 // On entry, instruction bits 27:24 = {0x1, 0xA, 0xB}. 245 void DecodeDataProcessing(const Instruction* instr); 246 247 // Decode the floating point parts of the instruction tree, and call the 248 // corresponding visitors. 249 // On entry, instruction bits 27:24 = {0xE, 0xF}. 250 void DecodeFP(const Instruction* instr); 251 252 // Decode the Advanced SIMD (NEON) load/store part of the instruction tree, 253 // and call the corresponding visitors. 254 // On entry, instruction bits 29:25 = 0x6. 255 void DecodeNEONLoadStore(const Instruction* instr); 256 257 // Decode the Advanced SIMD (NEON) vector data processing part of the 258 // instruction tree, and call the corresponding visitors. 259 // On entry, instruction bits 28:25 = 0x7. 260 void DecodeNEONVectorDataProcessing(const Instruction* instr); 261 262 // Decode the Advanced SIMD (NEON) scalar data processing part of the 263 // instruction tree, and call the corresponding visitors. 264 // On entry, instruction bits 28:25 = 0xF. 265 void DecodeNEONScalarDataProcessing(const Instruction* instr); 266 267 private: 268 // Visitors are registered in a list. 269 std::list<DecoderVisitor*> visitors_; 270 }; 271 272 } // namespace aarch64 273 } // namespace vixl 274 275 #endif // VIXL_AARCH64_DECODER_AARCH64_H_ 276