1 /*
2 * Copyright (c) 2021-2024 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #ifndef COMPILER_OPTIMIZER_CODEGEN_TARGET_AARCH32_TARGET_H
17 #define COMPILER_OPTIMIZER_CODEGEN_TARGET_AARCH32_TARGET_H
18
19 #include "operands.h"
20 #include "encode.h"
21 #include "callconv.h"
22 #include "target_info.h"
23
24 #ifdef USE_VIXL_ARM32
25 #include "aarch32/constants-aarch32.h"
26 #include "aarch32/instructions-aarch32.h"
27 #include "aarch32/macro-assembler-aarch32.h"
28 #else
29 #error "Wrong build type, please add VIXL in build"
30 #endif // USE_VIXL_ARM32
31
32 namespace ark::compiler::aarch32 {
33 inline constexpr uint32_t AVAILABLE_DOUBLE_WORD_REGISTERS = 4;
34 inline constexpr size_t AARCH32_COUNT_REG = 3;
35 inline constexpr size_t AARCH32_COUNT_VREG = 2;
36
37 // Temporary registers used (r12 already used by vixl)
38 // r11 is used as FP register for frames
39 const std::array<unsigned, AARCH32_COUNT_REG> AARCH32_TMP_REG = {
40 vixl::aarch32::r8.GetCode(), vixl::aarch32::r9.GetCode(), vixl::aarch32::r12.GetCode()};
41
42 // Temporary vector registers used
43 const std::array<unsigned, AARCH32_COUNT_VREG> AARCH32_TMP_VREG = {vixl::aarch32::s14.GetCode(),
44 vixl::aarch32::s15.GetCode()};
45
46 static inline constexpr const uint8_t UNDEF_REG = std::numeric_limits<uint8_t>::max();
47
VixlReg(Reg reg)48 static inline vixl::aarch32::Register VixlReg(Reg reg)
49 {
50 ASSERT(reg.IsValid());
51 if (reg.IsScalar()) {
52 auto vixlReg = vixl::aarch32::Register(reg.GetId());
53 ASSERT(vixlReg.IsValid());
54 return vixlReg;
55 }
56 // Unsupported register type
57 UNREACHABLE();
58 return vixl::aarch32::Register();
59 }
60
61 // Upper half-part for register
VixlRegU(Reg reg)62 static inline vixl::aarch32::Register VixlRegU(Reg reg)
63 {
64 ASSERT(reg.IsValid());
65 if (reg.IsScalar()) {
66 auto vixlReg = vixl::aarch32::Register(reg.GetId() + 1);
67 ASSERT(reg.GetId() <= AVAILABLE_DOUBLE_WORD_REGISTERS * 2U);
68 ASSERT(vixlReg.IsValid());
69 return vixlReg;
70 }
71 // Unsupported register type
72 UNREACHABLE();
73 return vixl::aarch32::Register();
74 }
VixlVRegCaseWordSize(Reg reg)75 static inline vixl::aarch32::VRegister VixlVRegCaseWordSize(Reg reg)
76 {
77 // Aarch32 Vreg map double regs for 2 single-word registers
78 auto vixlVreg = vixl::aarch32::SRegister(reg.GetId());
79 ASSERT(vixlVreg.IsValid());
80 return vixlVreg;
81 }
82
VixlVReg(Reg reg)83 static inline vixl::aarch32::VRegister VixlVReg(Reg reg)
84 {
85 ASSERT(reg.IsValid());
86 ASSERT(reg.IsFloat());
87 if (reg.GetSize() == WORD_SIZE) {
88 return VixlVRegCaseWordSize(reg);
89 }
90 ASSERT(reg.GetSize() == DOUBLE_WORD_SIZE);
91 ASSERT(reg.GetId() % 2U == 0);
92 auto vixlVreg = vixl::aarch32::DRegister(reg.GetId() / 2U);
93 ASSERT(vixlVreg.IsValid());
94 return vixlVreg;
95 }
96
VixlImm(Imm imm)97 static inline vixl::aarch32::Operand VixlImm(Imm imm)
98 {
99 // Unsupported 64-bit values - force cast
100 return vixl::aarch32::Operand(static_cast<int32_t>(imm.GetRawValue()));
101 }
102
103 // Upper half for immediate
VixlImmU(Imm imm)104 static inline vixl::aarch32::Operand VixlImmU(Imm imm)
105 {
106 // Unsupported 64-bit values - force cast
107 // NOLINTNEXTLINE(hicpp-signed-bitwise)
108 auto data = static_cast<int32_t>(imm.GetRawValue() >> WORD_SIZE);
109 return vixl::aarch32::Operand(data);
110 }
111
VixlImm(const int32_t imm)112 static inline vixl::aarch32::Operand VixlImm(const int32_t imm)
113 {
114 return vixl::aarch32::Operand(imm);
115 }
116
VixlNeonImm(const float imm)117 static inline vixl::aarch32::NeonImmediate VixlNeonImm(const float imm)
118 {
119 return vixl::aarch32::NeonImmediate(imm);
120 }
121
VixlNeonImm(const double imm)122 static inline vixl::aarch32::NeonImmediate VixlNeonImm(const double imm)
123 {
124 return vixl::aarch32::NeonImmediate(imm);
125 }
126
127 class Aarch32RegisterDescription final : public RegistersDescription {
128 // r4-r10 - "0000011111110000"
129 // NOLINTNEXTLINE(readability-identifier-naming)
130 const RegMask CALLEE_SAVED = RegMask(GetCalleeRegsMask(Arch::AARCH32, false));
131 // s16-s31 - "11111111111111110000000000000000"
132 // NOLINTNEXTLINE(readability-identifier-naming)
133 const VRegMask CALLEE_SAVEDV = VRegMask(GetCalleeRegsMask(Arch::AARCH32, true));
134 // r0-r3 - "0000000000001111"
135 // NOLINTNEXTLINE(readability-identifier-naming)
136 const RegMask CALLER_SAVED = RegMask(GetCallerRegsMask(Arch::AARCH32, false));
137 // s0-s15 - "00000000000000001111111111111111"
138 // NOLINTNEXTLINE(readability-identifier-naming)
139 const VRegMask CALLER_SAVEDV = VRegMask(GetCallerRegsMask(Arch::AARCH32, true));
140
141 public:
142 explicit Aarch32RegisterDescription(ArenaAllocator *allocator);
143 NO_MOVE_SEMANTIC(Aarch32RegisterDescription);
144 NO_COPY_SEMANTIC(Aarch32RegisterDescription);
145 ~Aarch32RegisterDescription() override = default;
146
147 ArenaVector<Reg> GetCalleeSaved() override;
148 void SetCalleeSaved(const ArenaVector<Reg> ®s) override;
149
150 // Set used regs - change GetCallee
151 void SetUsedRegs(const ArenaVector<Reg> ®s) override;
152
153 RegMask GetCallerSavedRegMask() const override;
154
155 VRegMask GetCallerSavedVRegMask() const override;
156 bool IsCalleeRegister(Reg reg) override;
157 Reg GetZeroReg() const override;
158 bool IsZeroReg([[maybe_unused]] Reg reg) const override;
159
160 Reg::RegIDType GetTempReg() override;
161
162 Reg::RegIDType GetTempVReg() override;
163
164 RegMask GetDefaultRegMask() const override;
165
166 VRegMask GetVRegMask() override;
167
168 bool SupportMapping(uint32_t type) override;
169
170 bool IsValid() const override;
171
172 bool IsRegUsed(ArenaVector<Reg> vecReg, Reg reg) override;
173
174 // NOTE(igorban): implement as virtual
175 static bool IsTmp(Reg reg);
176
177 public:
178 // Special implementation-specific getters
179 RegMask GetCalleeSavedR();
180 VRegMask GetCalleeSavedV();
181 RegMask GetCallerSavedR();
182 VRegMask GetCallerSavedV();
183 uint8_t GetAligmentReg(bool isCallee);
184
185 private:
186 // Full list of arm64 General-purpose registers (with vector registers)
187 ArenaVector<Reg> aarch32RegList_;
188 //
189 ArenaVector<Reg> usedRegs_;
190 Reg tmpReg1_;
191 Reg tmpReg2_;
192
193 RegMask calleeSaved_ {CALLEE_SAVED};
194 RegMask callerSaved_ {CALLER_SAVED};
195
196 VRegMask calleeSavedv_ {CALLEE_SAVEDV};
197 VRegMask callerSavedv_ {CALLER_SAVEDV};
198
199 uint8_t allignmentRegCallee_ {UNDEF_REG};
200 uint8_t allignmentRegCaller_ {UNDEF_REG};
201 }; // Aarch32RegisterDescription
202
203 class Aarch32Encoder;
204
205 class Aarch32LabelHolder final : public LabelHolder {
206 public:
207 using LabelType = vixl::aarch32::Label;
Aarch32LabelHolder(Encoder * enc)208 explicit Aarch32LabelHolder(Encoder *enc) : LabelHolder(enc), labels_(enc->GetAllocator()->Adapter()) {};
209
210 LabelId CreateLabel() override;
211 void CreateLabels(LabelId size) override;
212 void BindLabel(LabelId id) override;
213 LabelType *GetLabel(LabelId id);
214 LabelId Size() override;
215 NO_MOVE_SEMANTIC(Aarch32LabelHolder);
216 NO_COPY_SEMANTIC(Aarch32LabelHolder);
217 ~Aarch32LabelHolder() override = default;
218
219 private:
220 ArenaVector<LabelType *> labels_;
221 LabelId id_ {0};
222 friend Aarch32Encoder;
223 }; // Aarch32LabelHolder
224
225 class Aarch32ParameterInfo final : public ParameterInfo {
226 public:
227 std::variant<Reg, uint8_t> GetNativeParam(const TypeInfo &type) override;
228 Location GetNextLocation(DataType::Type type) override;
229 };
230
231 class Aarch32CallingConvention : public CallingConvention {
232 public:
233 Aarch32CallingConvention(ArenaAllocator *allocator, Encoder *enc, RegistersDescription *descr, CallConvMode mode);
234
235 static constexpr auto GetTarget();
236
237 bool IsValid() const override;
238
239 void GeneratePrologue(const FrameInfo &frameInfo) override;
240 void GenerateEpilogue(const FrameInfo &frameInfo, std::function<void()> postJob) override;
241 void GenerateNativePrologue(const FrameInfo &frameInfo) override;
242 void GenerateNativeEpilogue(const FrameInfo &frameInfo, std::function<void()> postJob) override;
243
244 void *GetCodeEntry() override;
245 uint32_t GetCodeSize() override;
246
247 vixl::aarch32::MacroAssembler *GetMasm();
248
249 // Calculating information about parameters and save regs_offset registers for special needs
250 ParameterInfo *GetParameterInfo(uint8_t regsOffset) override;
251
252 NO_MOVE_SEMANTIC(Aarch32CallingConvention);
253 NO_COPY_SEMANTIC(Aarch32CallingConvention);
254 ~Aarch32CallingConvention() override = default;
255
256 private:
257 uint8_t PushPopVRegs(VRegMask vregs, bool isPush);
258 uint8_t PushRegs(RegMask regs, VRegMask vregs, bool isCallee);
259 uint8_t PopRegs(RegMask regs, VRegMask vregs, bool isCallee);
260 }; // Aarch32CallingConvention
261
262 class Aarch32Encoder final : public Encoder {
263 public:
264 explicit Aarch32Encoder(ArenaAllocator *allocator);
265
266 LabelHolder *GetLabels() const override;
267 ~Aarch32Encoder() override;
268
269 NO_COPY_SEMANTIC(Aarch32Encoder);
270 NO_MOVE_SEMANTIC(Aarch32Encoder);
271
272 bool IsValid() const override;
273 static constexpr auto GetTarget();
274 void SetMaxAllocatedBytes(size_t size) override;
275
276 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
277 #define UNARY_OPERATION(opc) void Encode##opc(Reg dst, Reg src0) override;
278 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
279 #define BINARY_OPERATION(opc) \
280 void Encode##opc(Reg dst, Reg src0, Reg src1) override; \
281 void Encode##opc(Reg dst, Reg src0, Imm src1) override;
282 // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
283 #define INST_DEF(OPCODE, MACRO) MACRO(OPCODE)
284
285 ENCODE_MATH_LIST(INST_DEF)
286
287 #undef UNARY_OPERATION
288 #undef BINARY_OPERATION
289 #undef INST_DEF
290
291 void EncodeNop() override;
292
293 // Additional special instructions
294 void EncodeCastToBool(Reg dst, Reg src) override;
295 void EncodeCast(Reg dst, bool dstSigned, Reg src, bool srcSigned) override;
296 void EncodeMin(Reg dst, bool dstSigned, Reg src0, Reg src1) override;
297 void EncodeDiv(Reg dst, bool dstSigned, Reg src0, Reg src1) override;
298 void EncodeMod(Reg dst, bool dstSigned, Reg src0, Reg src1) override;
299 void EncodeMax(Reg dst, bool dstSigned, Reg src0, Reg src1) override;
300
301 void EncodeLdr(Reg dst, bool dstSigned, MemRef mem) override;
302 void EncodeLdr(Reg dst, bool dstSigned, const vixl::aarch32::MemOperand &vixlMem);
303 void EncodeLdrAcquire(Reg dst, bool dstSigned, MemRef mem) override;
304
305 void EncodeMemoryBarrier(memory_order::Order order) override;
306
307 void EncodeMov(Reg dst, Imm src) override;
308 void EncodeStr(Reg src, const vixl::aarch32::MemOperand &vixlMem);
309 void EncodeStr(Reg src, MemRef mem) override;
310 void EncodeStrRelease(Reg src, MemRef mem) override;
311 void EncodeStp(Reg src0, Reg src1, MemRef mem) override;
312
313 /* builtins-related encoders */
314 void EncodeIsInf(Reg dst, Reg src) override;
315 void EncodeIsInteger(Reg dst, Reg src) override;
316 void EncodeIsSafeInteger(Reg dst, Reg src) override;
317 void EncodeBitCount(Reg dst, Reg src) override;
318 void EncodeCountLeadingZeroBits(Reg dst, Reg src) override;
319 void EncodeCeil([[maybe_unused]] Reg dst, [[maybe_unused]] Reg src) override;
320 void EncodeFloor([[maybe_unused]] Reg dst, [[maybe_unused]] Reg src) override;
321 void EncodeRint([[maybe_unused]] Reg dst, [[maybe_unused]] Reg src) override;
322 void EncodeTrunc([[maybe_unused]] Reg dst, [[maybe_unused]] Reg src) override;
323 void EncodeRoundAway([[maybe_unused]] Reg dst, [[maybe_unused]] Reg src) override;
324 void EncodeRoundToPInf([[maybe_unused]] Reg dst, [[maybe_unused]] Reg src) override;
325 void EncodeReverseBytes(Reg dst, Reg src) override;
326 void EncodeReverseBits(Reg dst, Reg src) override;
327 void EncodeFpToBits(Reg dst, Reg src) override;
328 void EncodeMoveBitsRaw(Reg dst, Reg src) override;
329
330 void EncodeLdrExclusive(Reg dst, Reg addr, bool acquire) override;
331 void EncodeStrExclusive(Reg dst, Reg src, Reg addr, bool release) override;
332
333 // zerod high part: [reg.size, 64)
334 void EncodeStrz(Reg src, MemRef mem) override;
335 void EncodeSti(int64_t src, uint8_t srcSizeBytes, MemRef mem) override;
336 void EncodeSti(double src, MemRef mem) override;
337 void EncodeSti(float src, MemRef mem) override;
338 // size must be 8, 16,32 or 64
339 void EncodeMemCopy(MemRef memFrom, MemRef memTo, size_t size) override;
340 // size must be 8, 16,32 or 64
341 // zerod high part: [reg.size, 64)
342 void EncodeMemCopyz(MemRef memFrom, MemRef memTo, size_t size) override;
343
344 void EncodeCmp(Reg dst, Reg src0, Reg src1, Condition cc) override;
345
346 void EncodeCompare(Reg dst, Reg src0, Reg src1, Condition cc) override;
347 void EncodeCompareTest(Reg dst, Reg src0, Reg src1, Condition cc) override;
348 void EncodeAtomicByteOr(Reg addr, Reg value, bool fastEncoding) override;
349
350 void EncodeSelect(ArgsSelect &&args) override;
351 void EncodeSelect(ArgsSelectImm &&args) override;
352 void EncodeSelectTest(ArgsSelect &&args) override;
353 void EncodeSelectTest(ArgsSelectImm &&args) override;
354
355 bool CanEncodeImmAddSubCmp(int64_t imm, uint32_t size, bool signedCompare) override;
356 bool CanEncodeImmLogical(uint64_t imm, uint32_t size) override;
357
358 size_t GetCursorOffset() const override;
359 void SetCursorOffset(size_t offset) override;
360
361 Reg AcquireScratchRegister(TypeInfo type) override;
362 void AcquireScratchRegister(Reg reg) override;
363 void ReleaseScratchRegister(Reg reg) override;
364 bool IsScratchRegisterReleased(Reg reg) const override;
365 RegMask GetScratchRegistersMask() const override;
366 RegMask GetScratchFpRegistersMask() const override;
367 RegMask GetAvailableScratchRegisters() const override;
368 VRegMask GetAvailableScratchFpRegisters() const override;
369 void SetRegister(RegMask *mask, VRegMask *vmask, Reg reg, bool val) const override;
370
371 TypeInfo GetRefType() override;
372
373 size_t DisasmInstr(std::ostream &stream, size_t pc, ssize_t codeOffset) const override;
374
375 void *BufferData() const override;
376 size_t BufferSize() const override;
377
378 bool InitMasm() override;
379 void Finalize() override;
380
381 void MakeCall(compiler::RelocationInfo *relocation) override;
382 void MakeCall(const void *entryPoint) override;
383 void MakeCall(MemRef entryPoint) override;
384 void MakeCall(Reg reg) override;
385
386 void MakeCallAot(intptr_t offset) override;
387 void MakeCallByOffset(intptr_t offset) override;
388 void MakeLoadAotTable(intptr_t offset, Reg reg) override;
389 void MakeLoadAotTableAddr(intptr_t offset, Reg addr, Reg val) override;
390
391 // Encode unconditional branch
392 void EncodeJump(LabelHolder::LabelId id) override;
393 // Encode jump with compare to zero
394 void EncodeJump(LabelHolder::LabelId id, Reg src, Condition cc) override;
395 // Compare reg and immediate and branch
396 void EncodeJump(LabelHolder::LabelId id, Reg src, Imm imm, Condition cc) override;
397 // Compare two regs and branch
398 void EncodeJump(LabelHolder::LabelId id, Reg src0, Reg src1, Condition cc) override;
399 // Compare reg and immediate and branch
400 void EncodeJumpTest(LabelHolder::LabelId id, Reg src, Imm imm, Condition cc) override;
401 // Compare two regs and branch
402 void EncodeJumpTest(LabelHolder::LabelId id, Reg src0, Reg src1, Condition cc) override;
403 // Encode jump by register value
404 void EncodeJump(Reg dst) override;
405 void EncodeJump(RelocationInfo *relocation) override;
406 void EncodeBitTestAndBranch(LabelHolder::LabelId id, compiler::Reg reg, uint32_t bitPos, bool bitValue) override;
407
408 void EncodeAbort() override;
409 void EncodeReturn() override;
410 void EncodeStackOverflowCheck(ssize_t offset) override;
411
412 void SaveRegisters(RegMask registers, ssize_t slot, size_t startReg, bool isFp) override;
413 void LoadRegisters(RegMask registers, ssize_t slot, size_t startReg, bool isFp) override;
414 void SaveRegisters(RegMask registers, bool isFp, ssize_t slot, Reg base, RegMask mask) override;
415 void LoadRegisters(RegMask registers, bool isFp, ssize_t slot, Reg base, RegMask mask) override;
416 void PushRegisters(RegMask registers, bool isFp) override;
417 void PopRegisters(RegMask registers, bool isFp) override;
418
419 static vixl::aarch32::MemOperand ConvertMem(MemRef mem);
420
421 static bool IsNeedToPrepareMemLdS(MemRef mem, const TypeInfo &memType, bool isSigned);
422 vixl::aarch32::MemOperand PrepareMemLdS(MemRef mem, const TypeInfo &memType, vixl::aarch32::Register tmp,
423 bool isSigned, bool copySp = false);
424
425 void MakeLibCall(Reg dst, Reg src0, Reg src1, void *entryPoint, bool secondValue = false);
426
427 void MakeLibCall(Reg dst, Reg src, void *entryPoint);
428
429 vixl::aarch32::MacroAssembler *GetMasm() const;
430 size_t GetLabelAddress(LabelHolder::LabelId label) override;
431 bool LabelHasLinks(LabelHolder::LabelId label) override;
432
433 private:
434 template <bool IS_STORE>
435 void LoadStoreRegisters(RegMask registers, ssize_t slot, size_t startReg, bool isFp);
436 template <bool IS_STORE>
437 void LoadStoreRegisters(RegMask registers, bool isFp, int32_t slot, Reg base, RegMask mask);
438 template <bool IS_STORE>
439 void LoadStoreRegistersMainLoop(RegMask registers, bool isFp, int32_t slot, Reg base, RegMask mask);
440
441 private:
442 vixl::aarch32::MemOperand PrepareMemLdSForFloat(MemRef mem, vixl::aarch32::Register tmp);
443 void EncodeCastFloatToFloat(Reg dst, Reg src);
444 void EncodeCastFloatToInt64(Reg dst, Reg src);
445 void EncodeCastDoubleToInt64(Reg dst, Reg src);
446 void EncodeCastScalarToFloat(Reg dst, Reg src, bool srcSigned);
447 void EncodeCastFloatToScalar(Reg dst, bool dstSigned, Reg src);
448 void EncodeCastFloatToScalarWithSmallDst(Reg dst, bool dstSigned, Reg src);
449
450 void EncoderCastExtendFromInt32(Reg dst, bool dstSigned);
451 void EncodeCastScalar(Reg dst, bool dstSigned, Reg src, bool srcSigned);
452 void EncodeCastScalarFromSignedScalar(Reg dst, Reg src);
453 void EncodeCastScalarFromUnsignedScalar(Reg dst, Reg src);
454 template <bool IS_MAX>
455 void EncodeMinMaxFp(Reg dst, Reg src0, Reg src1);
456 void EncodeVorr(Reg dst, Reg src0, Reg src1);
457 void EncodeVand(Reg dst, Reg src0, Reg src1);
458 void MakeLibCallWithFloatResult(Reg dst, Reg src0, Reg src1, void *entryPoint, bool secondValue);
459 void MakeLibCallWithDoubleResult(Reg dst, Reg src0, Reg src1, void *entryPoint, bool secondValue);
460 void MakeLibCallWithInt64Result(Reg dst, Reg src0, Reg src1, void *entryPoint, bool secondValue);
461 void MakeLibCallFromFloatToScalar(Reg dst, Reg src, void *entryPoint);
462 void MakeLibCallFromScalarToFloat(Reg dst, Reg src, void *entryPoint);
463 void CompareHelper(Reg src0, Reg src1, Condition *cc);
464 void TestHelper(Reg src0, Reg src1, Condition cc);
465 bool CompareImmHelper(Reg src, int64_t imm, Condition *cc);
466 void TestImmHelper(Reg src, Imm imm, Condition cc);
467 bool CompareNegImmHelper(Reg src, int64_t value, const Condition *cc);
468 bool ComparePosImmHelper(Reg src, int64_t value, Condition *cc);
469 void CompareZeroHelper(Reg src, Condition *cc);
470 void EncodeCmpFracWithDelta(Reg src);
471 static inline constexpr int32_t MEM_BIG_OFFSET = 4095;
472 static inline constexpr int32_t MEM_SMALL_OFFSET = 255;
473 static inline constexpr int32_t VMEM_OFFSET = 1020;
474 Aarch32LabelHolder *labels_ {nullptr};
475 vixl::aarch32::MacroAssembler *masm_ {nullptr};
476 bool lrAcquired_ {false};
477 }; // Aarch32Encoder
478
479 } // namespace ark::compiler::aarch32
480
481 #endif // COMPILER_OPTIMIZER_CODEGEN_TARGET_AARCH32_TARGET_H
482