1 /*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "dex/compiler_internals.h"
18 #include "dex_file-inl.h"
19 #include "gc_map.h"
20 #include "mapping_table.h"
21 #include "mir_to_lir-inl.h"
22 #include "verifier/dex_gc_map.h"
23 #include "verifier/method_verifier.h"
24
25 namespace art {
26
IsInexpensiveConstant(RegLocation rl_src)27 bool Mir2Lir::IsInexpensiveConstant(RegLocation rl_src) {
28 bool res = false;
29 if (rl_src.is_const) {
30 if (rl_src.wide) {
31 if (rl_src.fp) {
32 res = InexpensiveConstantDouble(mir_graph_->ConstantValueWide(rl_src));
33 } else {
34 res = InexpensiveConstantLong(mir_graph_->ConstantValueWide(rl_src));
35 }
36 } else {
37 if (rl_src.fp) {
38 res = InexpensiveConstantFloat(mir_graph_->ConstantValue(rl_src));
39 } else {
40 res = InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src));
41 }
42 }
43 }
44 return res;
45 }
46
MarkSafepointPC(LIR * inst)47 void Mir2Lir::MarkSafepointPC(LIR* inst) {
48 inst->def_mask = ENCODE_ALL;
49 LIR* safepoint_pc = NewLIR0(kPseudoSafepointPC);
50 DCHECK_EQ(safepoint_pc->def_mask, ENCODE_ALL);
51 }
52
FastInstance(uint32_t field_idx,int & field_offset,bool & is_volatile,bool is_put)53 bool Mir2Lir::FastInstance(uint32_t field_idx, int& field_offset, bool& is_volatile, bool is_put) {
54 return cu_->compiler_driver->ComputeInstanceFieldInfo(
55 field_idx, mir_graph_->GetCurrentDexCompilationUnit(), field_offset, is_volatile, is_put);
56 }
57
58 /* Convert an instruction to a NOP */
NopLIR(LIR * lir)59 void Mir2Lir::NopLIR(LIR* lir) {
60 lir->flags.is_nop = true;
61 }
62
SetMemRefType(LIR * lir,bool is_load,int mem_type)63 void Mir2Lir::SetMemRefType(LIR* lir, bool is_load, int mem_type) {
64 uint64_t *mask_ptr;
65 uint64_t mask = ENCODE_MEM;
66 DCHECK(GetTargetInstFlags(lir->opcode) & (IS_LOAD | IS_STORE));
67 if (is_load) {
68 mask_ptr = &lir->use_mask;
69 } else {
70 mask_ptr = &lir->def_mask;
71 }
72 /* Clear out the memref flags */
73 *mask_ptr &= ~mask;
74 /* ..and then add back the one we need */
75 switch (mem_type) {
76 case kLiteral:
77 DCHECK(is_load);
78 *mask_ptr |= ENCODE_LITERAL;
79 break;
80 case kDalvikReg:
81 *mask_ptr |= ENCODE_DALVIK_REG;
82 break;
83 case kHeapRef:
84 *mask_ptr |= ENCODE_HEAP_REF;
85 break;
86 case kMustNotAlias:
87 /* Currently only loads can be marked as kMustNotAlias */
88 DCHECK(!(GetTargetInstFlags(lir->opcode) & IS_STORE));
89 *mask_ptr |= ENCODE_MUST_NOT_ALIAS;
90 break;
91 default:
92 LOG(FATAL) << "Oat: invalid memref kind - " << mem_type;
93 }
94 }
95
96 /*
97 * Mark load/store instructions that access Dalvik registers through the stack.
98 */
AnnotateDalvikRegAccess(LIR * lir,int reg_id,bool is_load,bool is64bit)99 void Mir2Lir::AnnotateDalvikRegAccess(LIR* lir, int reg_id, bool is_load,
100 bool is64bit) {
101 SetMemRefType(lir, is_load, kDalvikReg);
102
103 /*
104 * Store the Dalvik register id in alias_info. Mark the MSB if it is a 64-bit
105 * access.
106 */
107 lir->alias_info = ENCODE_ALIAS_INFO(reg_id, is64bit);
108 }
109
110 /*
111 * Debugging macros
112 */
113 #define DUMP_RESOURCE_MASK(X)
114
115 /* Pretty-print a LIR instruction */
DumpLIRInsn(LIR * lir,unsigned char * base_addr)116 void Mir2Lir::DumpLIRInsn(LIR* lir, unsigned char* base_addr) {
117 int offset = lir->offset;
118 int dest = lir->operands[0];
119 const bool dump_nop = (cu_->enable_debug & (1 << kDebugShowNops));
120
121 /* Handle pseudo-ops individually, and all regular insns as a group */
122 switch (lir->opcode) {
123 case kPseudoMethodEntry:
124 LOG(INFO) << "-------- method entry "
125 << PrettyMethod(cu_->method_idx, *cu_->dex_file);
126 break;
127 case kPseudoMethodExit:
128 LOG(INFO) << "-------- Method_Exit";
129 break;
130 case kPseudoBarrier:
131 LOG(INFO) << "-------- BARRIER";
132 break;
133 case kPseudoEntryBlock:
134 LOG(INFO) << "-------- entry offset: 0x" << std::hex << dest;
135 break;
136 case kPseudoDalvikByteCodeBoundary:
137 if (lir->operands[0] == 0) {
138 lir->operands[0] = reinterpret_cast<uintptr_t>("No instruction string");
139 }
140 LOG(INFO) << "-------- dalvik offset: 0x" << std::hex
141 << lir->dalvik_offset << " @ " << reinterpret_cast<char*>(lir->operands[0]);
142 break;
143 case kPseudoExitBlock:
144 LOG(INFO) << "-------- exit offset: 0x" << std::hex << dest;
145 break;
146 case kPseudoPseudoAlign4:
147 LOG(INFO) << reinterpret_cast<uintptr_t>(base_addr) + offset << " (0x" << std::hex
148 << offset << "): .align4";
149 break;
150 case kPseudoEHBlockLabel:
151 LOG(INFO) << "Exception_Handling:";
152 break;
153 case kPseudoTargetLabel:
154 case kPseudoNormalBlockLabel:
155 LOG(INFO) << "L" << reinterpret_cast<void*>(lir) << ":";
156 break;
157 case kPseudoThrowTarget:
158 LOG(INFO) << "LT" << reinterpret_cast<void*>(lir) << ":";
159 break;
160 case kPseudoIntrinsicRetry:
161 LOG(INFO) << "IR" << reinterpret_cast<void*>(lir) << ":";
162 break;
163 case kPseudoSuspendTarget:
164 LOG(INFO) << "LS" << reinterpret_cast<void*>(lir) << ":";
165 break;
166 case kPseudoSafepointPC:
167 LOG(INFO) << "LsafepointPC_0x" << std::hex << lir->offset << "_" << lir->dalvik_offset << ":";
168 break;
169 case kPseudoExportedPC:
170 LOG(INFO) << "LexportedPC_0x" << std::hex << lir->offset << "_" << lir->dalvik_offset << ":";
171 break;
172 case kPseudoCaseLabel:
173 LOG(INFO) << "LC" << reinterpret_cast<void*>(lir) << ": Case target 0x"
174 << std::hex << lir->operands[0] << "|" << std::dec <<
175 lir->operands[0];
176 break;
177 default:
178 if (lir->flags.is_nop && !dump_nop) {
179 break;
180 } else {
181 std::string op_name(BuildInsnString(GetTargetInstName(lir->opcode),
182 lir, base_addr));
183 std::string op_operands(BuildInsnString(GetTargetInstFmt(lir->opcode),
184 lir, base_addr));
185 LOG(INFO) << StringPrintf("%05x: %-9s%s%s",
186 reinterpret_cast<unsigned int>(base_addr + offset),
187 op_name.c_str(), op_operands.c_str(),
188 lir->flags.is_nop ? "(nop)" : "");
189 }
190 break;
191 }
192
193 if (lir->use_mask && (!lir->flags.is_nop || dump_nop)) {
194 DUMP_RESOURCE_MASK(DumpResourceMask(lir, lir->use_mask, "use"));
195 }
196 if (lir->def_mask && (!lir->flags.is_nop || dump_nop)) {
197 DUMP_RESOURCE_MASK(DumpResourceMask(lir, lir->def_mask, "def"));
198 }
199 }
200
DumpPromotionMap()201 void Mir2Lir::DumpPromotionMap() {
202 int num_regs = cu_->num_dalvik_registers + cu_->num_compiler_temps + 1;
203 for (int i = 0; i < num_regs; i++) {
204 PromotionMap v_reg_map = promotion_map_[i];
205 std::string buf;
206 if (v_reg_map.fp_location == kLocPhysReg) {
207 StringAppendF(&buf, " : s%d", v_reg_map.FpReg & FpRegMask());
208 }
209
210 std::string buf3;
211 if (i < cu_->num_dalvik_registers) {
212 StringAppendF(&buf3, "%02d", i);
213 } else if (i == mir_graph_->GetMethodSReg()) {
214 buf3 = "Method*";
215 } else {
216 StringAppendF(&buf3, "ct%d", i - cu_->num_dalvik_registers);
217 }
218
219 LOG(INFO) << StringPrintf("V[%s] -> %s%d%s", buf3.c_str(),
220 v_reg_map.core_location == kLocPhysReg ?
221 "r" : "SP+", v_reg_map.core_location == kLocPhysReg ?
222 v_reg_map.core_reg : SRegOffset(i),
223 buf.c_str());
224 }
225 }
226
227 /* Dump a mapping table */
DumpMappingTable(const char * table_name,const std::string & descriptor,const std::string & name,const std::string & signature,const std::vector<uint32_t> & v)228 void Mir2Lir::DumpMappingTable(const char* table_name, const std::string& descriptor,
229 const std::string& name, const std::string& signature,
230 const std::vector<uint32_t>& v) {
231 if (v.size() > 0) {
232 std::string line(StringPrintf("\n %s %s%s_%s_table[%zu] = {", table_name,
233 descriptor.c_str(), name.c_str(), signature.c_str(), v.size()));
234 std::replace(line.begin(), line.end(), ';', '_');
235 LOG(INFO) << line;
236 for (uint32_t i = 0; i < v.size(); i+=2) {
237 line = StringPrintf(" {0x%05x, 0x%04x},", v[i], v[i+1]);
238 LOG(INFO) << line;
239 }
240 LOG(INFO) <<" };\n\n";
241 }
242 }
243
244 /* Dump instructions and constant pool contents */
CodegenDump()245 void Mir2Lir::CodegenDump() {
246 LOG(INFO) << "Dumping LIR insns for "
247 << PrettyMethod(cu_->method_idx, *cu_->dex_file);
248 LIR* lir_insn;
249 int insns_size = cu_->code_item->insns_size_in_code_units_;
250
251 LOG(INFO) << "Regs (excluding ins) : " << cu_->num_regs;
252 LOG(INFO) << "Ins : " << cu_->num_ins;
253 LOG(INFO) << "Outs : " << cu_->num_outs;
254 LOG(INFO) << "CoreSpills : " << num_core_spills_;
255 LOG(INFO) << "FPSpills : " << num_fp_spills_;
256 LOG(INFO) << "CompilerTemps : " << cu_->num_compiler_temps;
257 LOG(INFO) << "Frame size : " << frame_size_;
258 LOG(INFO) << "code size is " << total_size_ <<
259 " bytes, Dalvik size is " << insns_size * 2;
260 LOG(INFO) << "expansion factor: "
261 << static_cast<float>(total_size_) / static_cast<float>(insns_size * 2);
262 DumpPromotionMap();
263 for (lir_insn = first_lir_insn_; lir_insn != NULL; lir_insn = lir_insn->next) {
264 DumpLIRInsn(lir_insn, 0);
265 }
266 for (lir_insn = literal_list_; lir_insn != NULL; lir_insn = lir_insn->next) {
267 LOG(INFO) << StringPrintf("%x (%04x): .word (%#x)", lir_insn->offset, lir_insn->offset,
268 lir_insn->operands[0]);
269 }
270
271 const DexFile::MethodId& method_id =
272 cu_->dex_file->GetMethodId(cu_->method_idx);
273 std::string signature(cu_->dex_file->GetMethodSignature(method_id));
274 std::string name(cu_->dex_file->GetMethodName(method_id));
275 std::string descriptor(cu_->dex_file->GetMethodDeclaringClassDescriptor(method_id));
276
277 // Dump mapping tables
278 DumpMappingTable("PC2Dex_MappingTable", descriptor, name, signature, pc2dex_mapping_table_);
279 DumpMappingTable("Dex2PC_MappingTable", descriptor, name, signature, dex2pc_mapping_table_);
280 }
281
282 /*
283 * Search the existing constants in the literal pool for an exact or close match
284 * within specified delta (greater or equal to 0).
285 */
ScanLiteralPool(LIR * data_target,int value,unsigned int delta)286 LIR* Mir2Lir::ScanLiteralPool(LIR* data_target, int value, unsigned int delta) {
287 while (data_target) {
288 if ((static_cast<unsigned>(value - data_target->operands[0])) <= delta)
289 return data_target;
290 data_target = data_target->next;
291 }
292 return NULL;
293 }
294
295 /* Search the existing constants in the literal pool for an exact wide match */
ScanLiteralPoolWide(LIR * data_target,int val_lo,int val_hi)296 LIR* Mir2Lir::ScanLiteralPoolWide(LIR* data_target, int val_lo, int val_hi) {
297 bool lo_match = false;
298 LIR* lo_target = NULL;
299 while (data_target) {
300 if (lo_match && (data_target->operands[0] == val_hi)) {
301 // Record high word in case we need to expand this later.
302 lo_target->operands[1] = val_hi;
303 return lo_target;
304 }
305 lo_match = false;
306 if (data_target->operands[0] == val_lo) {
307 lo_match = true;
308 lo_target = data_target;
309 }
310 data_target = data_target->next;
311 }
312 return NULL;
313 }
314
315 /*
316 * The following are building blocks to insert constants into the pool or
317 * instruction streams.
318 */
319
320 /* Add a 32-bit constant to the constant pool */
AddWordData(LIR ** constant_list_p,int value)321 LIR* Mir2Lir::AddWordData(LIR* *constant_list_p, int value) {
322 /* Add the constant to the literal pool */
323 if (constant_list_p) {
324 LIR* new_value = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), ArenaAllocator::kAllocData));
325 new_value->operands[0] = value;
326 new_value->next = *constant_list_p;
327 *constant_list_p = new_value;
328 return new_value;
329 }
330 return NULL;
331 }
332
333 /* Add a 64-bit constant to the constant pool or mixed with code */
AddWideData(LIR ** constant_list_p,int val_lo,int val_hi)334 LIR* Mir2Lir::AddWideData(LIR* *constant_list_p, int val_lo, int val_hi) {
335 AddWordData(constant_list_p, val_hi);
336 return AddWordData(constant_list_p, val_lo);
337 }
338
PushWord(std::vector<uint8_t> & buf,int data)339 static void PushWord(std::vector<uint8_t>&buf, int data) {
340 buf.push_back(data & 0xff);
341 buf.push_back((data >> 8) & 0xff);
342 buf.push_back((data >> 16) & 0xff);
343 buf.push_back((data >> 24) & 0xff);
344 }
345
AlignBuffer(std::vector<uint8_t> & buf,size_t offset)346 static void AlignBuffer(std::vector<uint8_t>&buf, size_t offset) {
347 while (buf.size() < offset) {
348 buf.push_back(0);
349 }
350 }
351
352 /* Write the literal pool to the output stream */
InstallLiteralPools()353 void Mir2Lir::InstallLiteralPools() {
354 AlignBuffer(code_buffer_, data_offset_);
355 LIR* data_lir = literal_list_;
356 while (data_lir != NULL) {
357 PushWord(code_buffer_, data_lir->operands[0]);
358 data_lir = NEXT_LIR(data_lir);
359 }
360 // Push code and method literals, record offsets for the compiler to patch.
361 data_lir = code_literal_list_;
362 while (data_lir != NULL) {
363 uint32_t target = data_lir->operands[0];
364 cu_->compiler_driver->AddCodePatch(cu_->dex_file,
365 cu_->class_def_idx,
366 cu_->method_idx,
367 cu_->invoke_type,
368 target,
369 static_cast<InvokeType>(data_lir->operands[1]),
370 code_buffer_.size());
371 const DexFile::MethodId& id = cu_->dex_file->GetMethodId(target);
372 // unique based on target to ensure code deduplication works
373 uint32_t unique_patch_value = reinterpret_cast<uint32_t>(&id);
374 PushWord(code_buffer_, unique_patch_value);
375 data_lir = NEXT_LIR(data_lir);
376 }
377 data_lir = method_literal_list_;
378 while (data_lir != NULL) {
379 uint32_t target = data_lir->operands[0];
380 cu_->compiler_driver->AddMethodPatch(cu_->dex_file,
381 cu_->class_def_idx,
382 cu_->method_idx,
383 cu_->invoke_type,
384 target,
385 static_cast<InvokeType>(data_lir->operands[1]),
386 code_buffer_.size());
387 const DexFile::MethodId& id = cu_->dex_file->GetMethodId(target);
388 // unique based on target to ensure code deduplication works
389 uint32_t unique_patch_value = reinterpret_cast<uint32_t>(&id);
390 PushWord(code_buffer_, unique_patch_value);
391 data_lir = NEXT_LIR(data_lir);
392 }
393 }
394
395 /* Write the switch tables to the output stream */
InstallSwitchTables()396 void Mir2Lir::InstallSwitchTables() {
397 GrowableArray<SwitchTable*>::Iterator iterator(&switch_tables_);
398 while (true) {
399 Mir2Lir::SwitchTable* tab_rec = iterator.Next();
400 if (tab_rec == NULL) break;
401 AlignBuffer(code_buffer_, tab_rec->offset);
402 /*
403 * For Arm, our reference point is the address of the bx
404 * instruction that does the launch, so we have to subtract
405 * the auto pc-advance. For other targets the reference point
406 * is a label, so we can use the offset as-is.
407 */
408 int bx_offset = INVALID_OFFSET;
409 switch (cu_->instruction_set) {
410 case kThumb2:
411 bx_offset = tab_rec->anchor->offset + 4;
412 break;
413 case kX86:
414 bx_offset = 0;
415 break;
416 case kMips:
417 bx_offset = tab_rec->anchor->offset;
418 break;
419 default: LOG(FATAL) << "Unexpected instruction set: " << cu_->instruction_set;
420 }
421 if (cu_->verbose) {
422 LOG(INFO) << "Switch table for offset 0x" << std::hex << bx_offset;
423 }
424 if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) {
425 const int* keys = reinterpret_cast<const int*>(&(tab_rec->table[2]));
426 for (int elems = 0; elems < tab_rec->table[1]; elems++) {
427 int disp = tab_rec->targets[elems]->offset - bx_offset;
428 if (cu_->verbose) {
429 LOG(INFO) << " Case[" << elems << "] key: 0x"
430 << std::hex << keys[elems] << ", disp: 0x"
431 << std::hex << disp;
432 }
433 PushWord(code_buffer_, keys[elems]);
434 PushWord(code_buffer_,
435 tab_rec->targets[elems]->offset - bx_offset);
436 }
437 } else {
438 DCHECK_EQ(static_cast<int>(tab_rec->table[0]),
439 static_cast<int>(Instruction::kPackedSwitchSignature));
440 for (int elems = 0; elems < tab_rec->table[1]; elems++) {
441 int disp = tab_rec->targets[elems]->offset - bx_offset;
442 if (cu_->verbose) {
443 LOG(INFO) << " Case[" << elems << "] disp: 0x"
444 << std::hex << disp;
445 }
446 PushWord(code_buffer_, tab_rec->targets[elems]->offset - bx_offset);
447 }
448 }
449 }
450 }
451
452 /* Write the fill array dta to the output stream */
InstallFillArrayData()453 void Mir2Lir::InstallFillArrayData() {
454 GrowableArray<FillArrayData*>::Iterator iterator(&fill_array_data_);
455 while (true) {
456 Mir2Lir::FillArrayData *tab_rec = iterator.Next();
457 if (tab_rec == NULL) break;
458 AlignBuffer(code_buffer_, tab_rec->offset);
459 for (int i = 0; i < (tab_rec->size + 1) / 2; i++) {
460 code_buffer_.push_back(tab_rec->table[i] & 0xFF);
461 code_buffer_.push_back((tab_rec->table[i] >> 8) & 0xFF);
462 }
463 }
464 }
465
AssignLiteralOffsetCommon(LIR * lir,int offset)466 static int AssignLiteralOffsetCommon(LIR* lir, int offset) {
467 for (; lir != NULL; lir = lir->next) {
468 lir->offset = offset;
469 offset += 4;
470 }
471 return offset;
472 }
473
474 // Make sure we have a code address for every declared catch entry
VerifyCatchEntries()475 bool Mir2Lir::VerifyCatchEntries() {
476 bool success = true;
477 for (std::set<uint32_t>::const_iterator it = mir_graph_->catches_.begin();
478 it != mir_graph_->catches_.end(); ++it) {
479 uint32_t dex_pc = *it;
480 bool found = false;
481 for (size_t i = 0; i < dex2pc_mapping_table_.size(); i += 2) {
482 if (dex_pc == dex2pc_mapping_table_[i+1]) {
483 found = true;
484 break;
485 }
486 }
487 if (!found) {
488 LOG(INFO) << "Missing native PC for catch entry @ 0x" << std::hex << dex_pc;
489 success = false;
490 }
491 }
492 // Now, try in the other direction
493 for (size_t i = 0; i < dex2pc_mapping_table_.size(); i += 2) {
494 uint32_t dex_pc = dex2pc_mapping_table_[i+1];
495 if (mir_graph_->catches_.find(dex_pc) == mir_graph_->catches_.end()) {
496 LOG(INFO) << "Unexpected catch entry @ dex pc 0x" << std::hex << dex_pc;
497 success = false;
498 }
499 }
500 if (!success) {
501 LOG(INFO) << "Bad dex2pcMapping table in " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
502 LOG(INFO) << "Entries @ decode: " << mir_graph_->catches_.size() << ", Entries in table: "
503 << dex2pc_mapping_table_.size()/2;
504 }
505 return success;
506 }
507
508
CreateMappingTables()509 void Mir2Lir::CreateMappingTables() {
510 for (LIR* tgt_lir = first_lir_insn_; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) {
511 if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoSafepointPC)) {
512 pc2dex_mapping_table_.push_back(tgt_lir->offset);
513 pc2dex_mapping_table_.push_back(tgt_lir->dalvik_offset);
514 }
515 if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoExportedPC)) {
516 dex2pc_mapping_table_.push_back(tgt_lir->offset);
517 dex2pc_mapping_table_.push_back(tgt_lir->dalvik_offset);
518 }
519 }
520 if (kIsDebugBuild) {
521 CHECK(VerifyCatchEntries());
522 }
523 CHECK_EQ(pc2dex_mapping_table_.size() & 1, 0U);
524 CHECK_EQ(dex2pc_mapping_table_.size() & 1, 0U);
525 uint32_t total_entries = (pc2dex_mapping_table_.size() + dex2pc_mapping_table_.size()) / 2;
526 uint32_t pc2dex_entries = pc2dex_mapping_table_.size() / 2;
527 encoded_mapping_table_.PushBack(total_entries);
528 encoded_mapping_table_.PushBack(pc2dex_entries);
529 encoded_mapping_table_.InsertBack(pc2dex_mapping_table_.begin(), pc2dex_mapping_table_.end());
530 encoded_mapping_table_.InsertBack(dex2pc_mapping_table_.begin(), dex2pc_mapping_table_.end());
531 if (kIsDebugBuild) {
532 // Verify the encoded table holds the expected data.
533 MappingTable table(&encoded_mapping_table_.GetData()[0]);
534 CHECK_EQ(table.TotalSize(), total_entries);
535 CHECK_EQ(table.PcToDexSize(), pc2dex_entries);
536 CHECK_EQ(table.DexToPcSize(), dex2pc_mapping_table_.size() / 2);
537 MappingTable::PcToDexIterator it = table.PcToDexBegin();
538 for (uint32_t i = 0; i < pc2dex_mapping_table_.size(); ++i, ++it) {
539 CHECK_EQ(pc2dex_mapping_table_.at(i), it.NativePcOffset());
540 ++i;
541 CHECK_EQ(pc2dex_mapping_table_.at(i), it.DexPc());
542 }
543 MappingTable::DexToPcIterator it2 = table.DexToPcBegin();
544 for (uint32_t i = 0; i < dex2pc_mapping_table_.size(); ++i, ++it2) {
545 CHECK_EQ(dex2pc_mapping_table_.at(i), it2.NativePcOffset());
546 ++i;
547 CHECK_EQ(dex2pc_mapping_table_.at(i), it2.DexPc());
548 }
549 }
550 }
551
552 class NativePcToReferenceMapBuilder {
553 public:
NativePcToReferenceMapBuilder(std::vector<uint8_t> * table,size_t entries,uint32_t max_native_offset,size_t references_width)554 NativePcToReferenceMapBuilder(std::vector<uint8_t>* table,
555 size_t entries, uint32_t max_native_offset,
556 size_t references_width) : entries_(entries),
557 references_width_(references_width), in_use_(entries),
558 table_(table) {
559 // Compute width in bytes needed to hold max_native_offset.
560 native_offset_width_ = 0;
561 while (max_native_offset != 0) {
562 native_offset_width_++;
563 max_native_offset >>= 8;
564 }
565 // Resize table and set up header.
566 table->resize((EntryWidth() * entries) + sizeof(uint32_t));
567 CHECK_LT(native_offset_width_, 1U << 3);
568 (*table)[0] = native_offset_width_ & 7;
569 CHECK_LT(references_width_, 1U << 13);
570 (*table)[0] |= (references_width_ << 3) & 0xFF;
571 (*table)[1] = (references_width_ >> 5) & 0xFF;
572 CHECK_LT(entries, 1U << 16);
573 (*table)[2] = entries & 0xFF;
574 (*table)[3] = (entries >> 8) & 0xFF;
575 }
576
AddEntry(uint32_t native_offset,const uint8_t * references)577 void AddEntry(uint32_t native_offset, const uint8_t* references) {
578 size_t table_index = TableIndex(native_offset);
579 while (in_use_[table_index]) {
580 table_index = (table_index + 1) % entries_;
581 }
582 in_use_[table_index] = true;
583 SetNativeOffset(table_index, native_offset);
584 DCHECK_EQ(native_offset, GetNativeOffset(table_index));
585 SetReferences(table_index, references);
586 }
587
588 private:
TableIndex(uint32_t native_offset)589 size_t TableIndex(uint32_t native_offset) {
590 return NativePcOffsetToReferenceMap::Hash(native_offset) % entries_;
591 }
592
GetNativeOffset(size_t table_index)593 uint32_t GetNativeOffset(size_t table_index) {
594 uint32_t native_offset = 0;
595 size_t table_offset = (table_index * EntryWidth()) + sizeof(uint32_t);
596 for (size_t i = 0; i < native_offset_width_; i++) {
597 native_offset |= (*table_)[table_offset + i] << (i * 8);
598 }
599 return native_offset;
600 }
601
SetNativeOffset(size_t table_index,uint32_t native_offset)602 void SetNativeOffset(size_t table_index, uint32_t native_offset) {
603 size_t table_offset = (table_index * EntryWidth()) + sizeof(uint32_t);
604 for (size_t i = 0; i < native_offset_width_; i++) {
605 (*table_)[table_offset + i] = (native_offset >> (i * 8)) & 0xFF;
606 }
607 }
608
SetReferences(size_t table_index,const uint8_t * references)609 void SetReferences(size_t table_index, const uint8_t* references) {
610 size_t table_offset = (table_index * EntryWidth()) + sizeof(uint32_t);
611 memcpy(&(*table_)[table_offset + native_offset_width_], references, references_width_);
612 }
613
EntryWidth() const614 size_t EntryWidth() const {
615 return native_offset_width_ + references_width_;
616 }
617
618 // Number of entries in the table.
619 const size_t entries_;
620 // Number of bytes used to encode the reference bitmap.
621 const size_t references_width_;
622 // Number of bytes used to encode a native offset.
623 size_t native_offset_width_;
624 // Entries that are in use.
625 std::vector<bool> in_use_;
626 // The table we're building.
627 std::vector<uint8_t>* const table_;
628 };
629
CreateNativeGcMap()630 void Mir2Lir::CreateNativeGcMap() {
631 const std::vector<uint32_t>& mapping_table = pc2dex_mapping_table_;
632 uint32_t max_native_offset = 0;
633 for (size_t i = 0; i < mapping_table.size(); i += 2) {
634 uint32_t native_offset = mapping_table[i + 0];
635 if (native_offset > max_native_offset) {
636 max_native_offset = native_offset;
637 }
638 }
639 MethodReference method_ref(cu_->dex_file, cu_->method_idx);
640 const std::vector<uint8_t>* gc_map_raw = verifier::MethodVerifier::GetDexGcMap(method_ref);
641 verifier::DexPcToReferenceMap dex_gc_map(&(*gc_map_raw)[4], gc_map_raw->size() - 4);
642 // Compute native offset to references size.
643 NativePcToReferenceMapBuilder native_gc_map_builder(&native_gc_map_,
644 mapping_table.size() / 2, max_native_offset,
645 dex_gc_map.RegWidth());
646
647 for (size_t i = 0; i < mapping_table.size(); i += 2) {
648 uint32_t native_offset = mapping_table[i + 0];
649 uint32_t dex_pc = mapping_table[i + 1];
650 const uint8_t* references = dex_gc_map.FindBitMap(dex_pc, false);
651 CHECK(references != NULL) << "Missing ref for dex pc 0x" << std::hex << dex_pc;
652 native_gc_map_builder.AddEntry(native_offset, references);
653 }
654 }
655
656 /* Determine the offset of each literal field */
AssignLiteralOffset(int offset)657 int Mir2Lir::AssignLiteralOffset(int offset) {
658 offset = AssignLiteralOffsetCommon(literal_list_, offset);
659 offset = AssignLiteralOffsetCommon(code_literal_list_, offset);
660 offset = AssignLiteralOffsetCommon(method_literal_list_, offset);
661 return offset;
662 }
663
AssignSwitchTablesOffset(int offset)664 int Mir2Lir::AssignSwitchTablesOffset(int offset) {
665 GrowableArray<SwitchTable*>::Iterator iterator(&switch_tables_);
666 while (true) {
667 Mir2Lir::SwitchTable *tab_rec = iterator.Next();
668 if (tab_rec == NULL) break;
669 tab_rec->offset = offset;
670 if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) {
671 offset += tab_rec->table[1] * (sizeof(int) * 2);
672 } else {
673 DCHECK_EQ(static_cast<int>(tab_rec->table[0]),
674 static_cast<int>(Instruction::kPackedSwitchSignature));
675 offset += tab_rec->table[1] * sizeof(int);
676 }
677 }
678 return offset;
679 }
680
AssignFillArrayDataOffset(int offset)681 int Mir2Lir::AssignFillArrayDataOffset(int offset) {
682 GrowableArray<FillArrayData*>::Iterator iterator(&fill_array_data_);
683 while (true) {
684 Mir2Lir::FillArrayData *tab_rec = iterator.Next();
685 if (tab_rec == NULL) break;
686 tab_rec->offset = offset;
687 offset += tab_rec->size;
688 // word align
689 offset = (offset + 3) & ~3;
690 }
691 return offset;
692 }
693
694 // LIR offset assignment.
AssignInsnOffsets()695 int Mir2Lir::AssignInsnOffsets() {
696 LIR* lir;
697 int offset = 0;
698
699 for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) {
700 lir->offset = offset;
701 if (lir->opcode >= 0) {
702 if (!lir->flags.is_nop) {
703 offset += lir->flags.size;
704 }
705 } else if (lir->opcode == kPseudoPseudoAlign4) {
706 if (offset & 0x2) {
707 offset += 2;
708 lir->operands[0] = 1;
709 } else {
710 lir->operands[0] = 0;
711 }
712 }
713 /* Pseudo opcodes don't consume space */
714 }
715
716 return offset;
717 }
718
719 /*
720 * Walk the compilation unit and assign offsets to instructions
721 * and literals and compute the total size of the compiled unit.
722 */
AssignOffsets()723 void Mir2Lir::AssignOffsets() {
724 int offset = AssignInsnOffsets();
725
726 /* Const values have to be word aligned */
727 offset = (offset + 3) & ~3;
728
729 /* Set up offsets for literals */
730 data_offset_ = offset;
731
732 offset = AssignLiteralOffset(offset);
733
734 offset = AssignSwitchTablesOffset(offset);
735
736 offset = AssignFillArrayDataOffset(offset);
737
738 total_size_ = offset;
739 }
740
741 /*
742 * Go over each instruction in the list and calculate the offset from the top
743 * before sending them off to the assembler. If out-of-range branch distance is
744 * seen rearrange the instructions a bit to correct it.
745 */
AssembleLIR()746 void Mir2Lir::AssembleLIR() {
747 AssignOffsets();
748 int assembler_retries = 0;
749 /*
750 * Assemble here. Note that we generate code with optimistic assumptions
751 * and if found now to work, we'll have to redo the sequence and retry.
752 */
753
754 while (true) {
755 AssemblerStatus res = AssembleInstructions(0);
756 if (res == kSuccess) {
757 break;
758 } else {
759 assembler_retries++;
760 if (assembler_retries > MAX_ASSEMBLER_RETRIES) {
761 CodegenDump();
762 LOG(FATAL) << "Assembler error - too many retries";
763 }
764 // Redo offsets and try again
765 AssignOffsets();
766 code_buffer_.clear();
767 }
768 }
769
770 // Install literals
771 InstallLiteralPools();
772
773 // Install switch tables
774 InstallSwitchTables();
775
776 // Install fill array data
777 InstallFillArrayData();
778
779 // Create the mapping table and native offset to reference map.
780 CreateMappingTables();
781
782 CreateNativeGcMap();
783 }
784
785 /*
786 * Insert a kPseudoCaseLabel at the beginning of the Dalvik
787 * offset vaddr. This label will be used to fix up the case
788 * branch table during the assembly phase. Be sure to set
789 * all resource flags on this to prevent code motion across
790 * target boundaries. KeyVal is just there for debugging.
791 */
InsertCaseLabel(int vaddr,int keyVal)792 LIR* Mir2Lir::InsertCaseLabel(int vaddr, int keyVal) {
793 SafeMap<unsigned int, LIR*>::iterator it;
794 it = boundary_map_.find(vaddr);
795 if (it == boundary_map_.end()) {
796 LOG(FATAL) << "Error: didn't find vaddr 0x" << std::hex << vaddr;
797 }
798 LIR* new_label = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), ArenaAllocator::kAllocLIR));
799 new_label->dalvik_offset = vaddr;
800 new_label->opcode = kPseudoCaseLabel;
801 new_label->operands[0] = keyVal;
802 InsertLIRAfter(it->second, new_label);
803 return new_label;
804 }
805
MarkPackedCaseLabels(Mir2Lir::SwitchTable * tab_rec)806 void Mir2Lir::MarkPackedCaseLabels(Mir2Lir::SwitchTable *tab_rec) {
807 const uint16_t* table = tab_rec->table;
808 int base_vaddr = tab_rec->vaddr;
809 const int *targets = reinterpret_cast<const int*>(&table[4]);
810 int entries = table[1];
811 int low_key = s4FromSwitchData(&table[2]);
812 for (int i = 0; i < entries; i++) {
813 tab_rec->targets[i] = InsertCaseLabel(base_vaddr + targets[i], i + low_key);
814 }
815 }
816
MarkSparseCaseLabels(Mir2Lir::SwitchTable * tab_rec)817 void Mir2Lir::MarkSparseCaseLabels(Mir2Lir::SwitchTable *tab_rec) {
818 const uint16_t* table = tab_rec->table;
819 int base_vaddr = tab_rec->vaddr;
820 int entries = table[1];
821 const int* keys = reinterpret_cast<const int*>(&table[2]);
822 const int* targets = &keys[entries];
823 for (int i = 0; i < entries; i++) {
824 tab_rec->targets[i] = InsertCaseLabel(base_vaddr + targets[i], keys[i]);
825 }
826 }
827
ProcessSwitchTables()828 void Mir2Lir::ProcessSwitchTables() {
829 GrowableArray<SwitchTable*>::Iterator iterator(&switch_tables_);
830 while (true) {
831 Mir2Lir::SwitchTable *tab_rec = iterator.Next();
832 if (tab_rec == NULL) break;
833 if (tab_rec->table[0] == Instruction::kPackedSwitchSignature) {
834 MarkPackedCaseLabels(tab_rec);
835 } else if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) {
836 MarkSparseCaseLabels(tab_rec);
837 } else {
838 LOG(FATAL) << "Invalid switch table";
839 }
840 }
841 }
842
DumpSparseSwitchTable(const uint16_t * table)843 void Mir2Lir::DumpSparseSwitchTable(const uint16_t* table) {
844 /*
845 * Sparse switch data format:
846 * ushort ident = 0x0200 magic value
847 * ushort size number of entries in the table; > 0
848 * int keys[size] keys, sorted low-to-high; 32-bit aligned
849 * int targets[size] branch targets, relative to switch opcode
850 *
851 * Total size is (2+size*4) 16-bit code units.
852 */
853 uint16_t ident = table[0];
854 int entries = table[1];
855 const int* keys = reinterpret_cast<const int*>(&table[2]);
856 const int* targets = &keys[entries];
857 LOG(INFO) << "Sparse switch table - ident:0x" << std::hex << ident
858 << ", entries: " << std::dec << entries;
859 for (int i = 0; i < entries; i++) {
860 LOG(INFO) << " Key[" << keys[i] << "] -> 0x" << std::hex << targets[i];
861 }
862 }
863
DumpPackedSwitchTable(const uint16_t * table)864 void Mir2Lir::DumpPackedSwitchTable(const uint16_t* table) {
865 /*
866 * Packed switch data format:
867 * ushort ident = 0x0100 magic value
868 * ushort size number of entries in the table
869 * int first_key first (and lowest) switch case value
870 * int targets[size] branch targets, relative to switch opcode
871 *
872 * Total size is (4+size*2) 16-bit code units.
873 */
874 uint16_t ident = table[0];
875 const int* targets = reinterpret_cast<const int*>(&table[4]);
876 int entries = table[1];
877 int low_key = s4FromSwitchData(&table[2]);
878 LOG(INFO) << "Packed switch table - ident:0x" << std::hex << ident
879 << ", entries: " << std::dec << entries << ", low_key: " << low_key;
880 for (int i = 0; i < entries; i++) {
881 LOG(INFO) << " Key[" << (i + low_key) << "] -> 0x" << std::hex
882 << targets[i];
883 }
884 }
885
886 /*
887 * Set up special LIR to mark a Dalvik byte-code instruction start and
888 * record it in the boundary_map. NOTE: in cases such as kMirOpCheck in
889 * which we split a single Dalvik instruction, only the first MIR op
890 * associated with a Dalvik PC should be entered into the map.
891 */
MarkBoundary(int offset,const char * inst_str)892 LIR* Mir2Lir::MarkBoundary(int offset, const char* inst_str) {
893 LIR* res = NewLIR1(kPseudoDalvikByteCodeBoundary, reinterpret_cast<uintptr_t>(inst_str));
894 if (boundary_map_.find(offset) == boundary_map_.end()) {
895 boundary_map_.Put(offset, res);
896 }
897 return res;
898 }
899
EvaluateBranch(Instruction::Code opcode,int32_t src1,int32_t src2)900 bool Mir2Lir::EvaluateBranch(Instruction::Code opcode, int32_t src1, int32_t src2) {
901 bool is_taken;
902 switch (opcode) {
903 case Instruction::IF_EQ: is_taken = (src1 == src2); break;
904 case Instruction::IF_NE: is_taken = (src1 != src2); break;
905 case Instruction::IF_LT: is_taken = (src1 < src2); break;
906 case Instruction::IF_GE: is_taken = (src1 >= src2); break;
907 case Instruction::IF_GT: is_taken = (src1 > src2); break;
908 case Instruction::IF_LE: is_taken = (src1 <= src2); break;
909 case Instruction::IF_EQZ: is_taken = (src1 == 0); break;
910 case Instruction::IF_NEZ: is_taken = (src1 != 0); break;
911 case Instruction::IF_LTZ: is_taken = (src1 < 0); break;
912 case Instruction::IF_GEZ: is_taken = (src1 >= 0); break;
913 case Instruction::IF_GTZ: is_taken = (src1 > 0); break;
914 case Instruction::IF_LEZ: is_taken = (src1 <= 0); break;
915 default:
916 LOG(FATAL) << "Unexpected opcode " << opcode;
917 is_taken = false;
918 }
919 return is_taken;
920 }
921
922 // Convert relation of src1/src2 to src2/src1
FlipComparisonOrder(ConditionCode before)923 ConditionCode Mir2Lir::FlipComparisonOrder(ConditionCode before) {
924 ConditionCode res;
925 switch (before) {
926 case kCondEq: res = kCondEq; break;
927 case kCondNe: res = kCondNe; break;
928 case kCondLt: res = kCondGt; break;
929 case kCondGt: res = kCondLt; break;
930 case kCondLe: res = kCondGe; break;
931 case kCondGe: res = kCondLe; break;
932 default:
933 res = static_cast<ConditionCode>(0);
934 LOG(FATAL) << "Unexpected ccode " << before;
935 }
936 return res;
937 }
938
939 // TODO: move to mir_to_lir.cc
Mir2Lir(CompilationUnit * cu,MIRGraph * mir_graph,ArenaAllocator * arena)940 Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
941 : Backend(arena),
942 literal_list_(NULL),
943 method_literal_list_(NULL),
944 code_literal_list_(NULL),
945 cu_(cu),
946 mir_graph_(mir_graph),
947 switch_tables_(arena, 4, kGrowableArraySwitchTables),
948 fill_array_data_(arena, 4, kGrowableArrayFillArrayData),
949 throw_launchpads_(arena, 2048, kGrowableArrayThrowLaunchPads),
950 suspend_launchpads_(arena, 4, kGrowableArraySuspendLaunchPads),
951 intrinsic_launchpads_(arena, 2048, kGrowableArrayMisc),
952 data_offset_(0),
953 total_size_(0),
954 block_label_list_(NULL),
955 current_dalvik_offset_(0),
956 reg_pool_(NULL),
957 live_sreg_(0),
958 num_core_spills_(0),
959 num_fp_spills_(0),
960 frame_size_(0),
961 core_spill_mask_(0),
962 fp_spill_mask_(0),
963 first_lir_insn_(NULL),
964 last_lir_insn_(NULL) {
965 promotion_map_ = static_cast<PromotionMap*>
966 (arena_->Alloc((cu_->num_dalvik_registers + cu_->num_compiler_temps + 1) *
967 sizeof(promotion_map_[0]), ArenaAllocator::kAllocRegAlloc));
968 }
969
Materialize()970 void Mir2Lir::Materialize() {
971 CompilerInitializeRegAlloc(); // Needs to happen after SSA naming
972
973 /* Allocate Registers using simple local allocation scheme */
974 SimpleRegAlloc();
975
976 if (mir_graph_->IsSpecialCase()) {
977 /*
978 * Custom codegen for special cases. If for any reason the
979 * special codegen doesn't succeed, first_lir_insn_ will
980 * set to NULL;
981 */
982 SpecialMIR2LIR(mir_graph_->GetSpecialCase());
983 }
984
985 /* Convert MIR to LIR, etc. */
986 if (first_lir_insn_ == NULL) {
987 MethodMIR2LIR();
988 }
989
990 /* Method is not empty */
991 if (first_lir_insn_) {
992 // mark the targets of switch statement case labels
993 ProcessSwitchTables();
994
995 /* Convert LIR into machine code. */
996 AssembleLIR();
997
998 if (cu_->verbose) {
999 CodegenDump();
1000 }
1001 }
1002 }
1003
GetCompiledMethod()1004 CompiledMethod* Mir2Lir::GetCompiledMethod() {
1005 // Combine vmap tables - core regs, then fp regs - into vmap_table
1006 std::vector<uint16_t> raw_vmap_table;
1007 // Core regs may have been inserted out of order - sort first
1008 std::sort(core_vmap_table_.begin(), core_vmap_table_.end());
1009 for (size_t i = 0 ; i < core_vmap_table_.size(); ++i) {
1010 // Copy, stripping out the phys register sort key
1011 raw_vmap_table.push_back(~(-1 << VREG_NUM_WIDTH) & core_vmap_table_[i]);
1012 }
1013 // If we have a frame, push a marker to take place of lr
1014 if (frame_size_ > 0) {
1015 raw_vmap_table.push_back(INVALID_VREG);
1016 } else {
1017 DCHECK_EQ(__builtin_popcount(core_spill_mask_), 0);
1018 DCHECK_EQ(__builtin_popcount(fp_spill_mask_), 0);
1019 }
1020 // Combine vmap tables - core regs, then fp regs. fp regs already sorted
1021 for (uint32_t i = 0; i < fp_vmap_table_.size(); i++) {
1022 raw_vmap_table.push_back(fp_vmap_table_[i]);
1023 }
1024 UnsignedLeb128EncodingVector vmap_encoder;
1025 // Prefix the encoded data with its size.
1026 vmap_encoder.PushBack(raw_vmap_table.size());
1027 for (uint16_t cur : raw_vmap_table) {
1028 vmap_encoder.PushBack(cur);
1029 }
1030 CompiledMethod* result =
1031 new CompiledMethod(*cu_->compiler_driver, cu_->instruction_set, code_buffer_, frame_size_,
1032 core_spill_mask_, fp_spill_mask_, encoded_mapping_table_.GetData(),
1033 vmap_encoder.GetData(), native_gc_map_);
1034 return result;
1035 }
1036
ComputeFrameSize()1037 int Mir2Lir::ComputeFrameSize() {
1038 /* Figure out the frame size */
1039 static const uint32_t kAlignMask = kStackAlignment - 1;
1040 uint32_t size = (num_core_spills_ + num_fp_spills_ +
1041 1 /* filler word */ + cu_->num_regs + cu_->num_outs +
1042 cu_->num_compiler_temps + 1 /* cur_method* */)
1043 * sizeof(uint32_t);
1044 /* Align and set */
1045 return (size + kAlignMask) & ~(kAlignMask);
1046 }
1047
1048 /*
1049 * Append an LIR instruction to the LIR list maintained by a compilation
1050 * unit
1051 */
AppendLIR(LIR * lir)1052 void Mir2Lir::AppendLIR(LIR* lir) {
1053 if (first_lir_insn_ == NULL) {
1054 DCHECK(last_lir_insn_ == NULL);
1055 last_lir_insn_ = first_lir_insn_ = lir;
1056 lir->prev = lir->next = NULL;
1057 } else {
1058 last_lir_insn_->next = lir;
1059 lir->prev = last_lir_insn_;
1060 lir->next = NULL;
1061 last_lir_insn_ = lir;
1062 }
1063 }
1064
1065 /*
1066 * Insert an LIR instruction before the current instruction, which cannot be the
1067 * first instruction.
1068 *
1069 * prev_lir <-> new_lir <-> current_lir
1070 */
InsertLIRBefore(LIR * current_lir,LIR * new_lir)1071 void Mir2Lir::InsertLIRBefore(LIR* current_lir, LIR* new_lir) {
1072 DCHECK(current_lir->prev != NULL);
1073 LIR *prev_lir = current_lir->prev;
1074
1075 prev_lir->next = new_lir;
1076 new_lir->prev = prev_lir;
1077 new_lir->next = current_lir;
1078 current_lir->prev = new_lir;
1079 }
1080
1081 /*
1082 * Insert an LIR instruction after the current instruction, which cannot be the
1083 * first instruction.
1084 *
1085 * current_lir -> new_lir -> old_next
1086 */
InsertLIRAfter(LIR * current_lir,LIR * new_lir)1087 void Mir2Lir::InsertLIRAfter(LIR* current_lir, LIR* new_lir) {
1088 new_lir->prev = current_lir;
1089 new_lir->next = current_lir->next;
1090 current_lir->next = new_lir;
1091 new_lir->next->prev = new_lir;
1092 }
1093
1094
1095 } // namespace art
1096