1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "asm_support_arm.S" 18#include "interpreter/cfi_asm_support.h" 19 20#include "arch/quick_alloc_entrypoints.S" 21#include "arch/quick_field_entrypoints.S" 22 23 /* Deliver the given exception */ 24 .extern artDeliverExceptionFromCode 25 /* Deliver an exception pending on a thread */ 26 .extern artDeliverPendingException 27 28.macro SETUP_SAVE_REFS_AND_ARGS_FRAME rTemp 29 SETUP_SAVE_REFS_AND_ARGS_FRAME_REGISTERS_ONLY 30 LOAD_RUNTIME_INSTANCE \rTemp @ Load Runtime::Current into rTemp. 31 @ Load kSaveRefsAndArgs Method* into rTemp. 32 ldr \rTemp, [\rTemp, #RUNTIME_SAVE_REFS_AND_ARGS_METHOD_OFFSET] 33 str \rTemp, [sp, #0] @ Place Method* at bottom of stack. 34 str sp, [rSELF, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame. 35.endm 36 37.macro SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_R0 38 SETUP_SAVE_REFS_AND_ARGS_FRAME_REGISTERS_ONLY 39 str r0, [sp, #0] @ Store ArtMethod* to bottom of stack. 40 str sp, [rSELF, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame. 41.endm 42 43 /* 44 * Macro that sets up the callee save frame to conform with 45 * Runtime::CreateCalleeSaveMethod(kSaveEverything) 46 * when core registers are already saved. 47 */ 48.macro SETUP_SAVE_EVERYTHING_FRAME_CORE_REGS_SAVED rTemp, runtime_method_offset = RUNTIME_SAVE_EVERYTHING_METHOD_OFFSET 49 @ 14 words of callee saves and args already saved. 50 vpush {d0-d15} @ 32 words, 2 for each of the 16 saved doubles. 51 .cfi_adjust_cfa_offset 128 52 sub sp, #8 @ 2 words of space, alignment padding and Method* 53 .cfi_adjust_cfa_offset 8 54 LOAD_RUNTIME_INSTANCE \rTemp @ Load Runtime::Current into rTemp. 55 @ Load kSaveEverything Method* into rTemp. 56 ldr \rTemp, [\rTemp, #\runtime_method_offset] 57 str \rTemp, [sp, #0] @ Place Method* at bottom of stack. 58 str sp, [rSELF, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame. 59 60 // Ugly compile-time check, but we only have the preprocessor. 61#if (FRAME_SIZE_SAVE_EVERYTHING != 56 + 128 + 8) 62#error "FRAME_SIZE_SAVE_EVERYTHING(ARM) size not as expected." 63#endif 64.endm 65 66 /* 67 * Macro that sets up the callee save frame to conform with 68 * Runtime::CreateCalleeSaveMethod(kSaveEverything) 69 */ 70.macro SETUP_SAVE_EVERYTHING_FRAME rTemp, runtime_method_offset = RUNTIME_SAVE_EVERYTHING_METHOD_OFFSET 71 push {r0-r12, lr} @ 14 words of callee saves and args. 72 .cfi_adjust_cfa_offset 56 73 .cfi_rel_offset r0, 0 74 .cfi_rel_offset r1, 4 75 .cfi_rel_offset r2, 8 76 .cfi_rel_offset r3, 12 77 .cfi_rel_offset r4, 16 78 .cfi_rel_offset r5, 20 79 .cfi_rel_offset r6, 24 80 .cfi_rel_offset r7, 28 81 .cfi_rel_offset r8, 32 82 .cfi_rel_offset r9, 36 83 .cfi_rel_offset r10, 40 84 .cfi_rel_offset r11, 44 85 .cfi_rel_offset ip, 48 86 .cfi_rel_offset lr, 52 87 SETUP_SAVE_EVERYTHING_FRAME_CORE_REGS_SAVED \rTemp, \runtime_method_offset 88.endm 89 90.macro RESTORE_SAVE_EVERYTHING_FRAME 91 add sp, #8 @ rewind sp 92 .cfi_adjust_cfa_offset -8 93 vpop {d0-d15} 94 .cfi_adjust_cfa_offset -128 95 pop {r0-r12, lr} @ 14 words of callee saves 96 .cfi_restore r0 97 .cfi_restore r1 98 .cfi_restore r2 99 .cfi_restore r3 100 .cfi_restore r4 101 .cfi_restore r5 102 .cfi_restore r6 103 .cfi_restore r7 104 .cfi_restore r8 105 .cfi_restore r9 106 .cfi_restore r10 107 .cfi_restore r11 108 .cfi_restore r12 109 .cfi_restore lr 110 .cfi_adjust_cfa_offset -56 111.endm 112 113.macro RESTORE_SAVE_EVERYTHING_FRAME_KEEP_R0 114 add sp, #8 @ rewind sp 115 .cfi_adjust_cfa_offset -8 116 vpop {d0-d15} 117 .cfi_adjust_cfa_offset -128 118 add sp, #4 @ skip r0 119 .cfi_adjust_cfa_offset -4 120 .cfi_restore r0 @ debugger can no longer restore caller's r0 121 pop {r1-r12, lr} @ 13 words of callee saves 122 .cfi_restore r1 123 .cfi_restore r2 124 .cfi_restore r3 125 .cfi_restore r4 126 .cfi_restore r5 127 .cfi_restore r6 128 .cfi_restore r7 129 .cfi_restore r8 130 .cfi_restore r9 131 .cfi_restore r10 132 .cfi_restore r11 133 .cfi_restore r12 134 .cfi_restore lr 135 .cfi_adjust_cfa_offset -52 136.endm 137 138.macro RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION is_ref = 0 139 // Use R2 to allow returning 64-bit values in R0-R1. 140 ldr r2, [rSELF, # THREAD_EXCEPTION_OFFSET] // Get exception field. 141 CFI_REMEMBER_STATE 142 cbnz r2, 1f 143 DEOPT_OR_RETURN r2, \is_ref // Check if deopt is required 1441: 145 CFI_RESTORE_STATE_AND_DEF_CFA sp, 0 146 DELIVER_PENDING_EXCEPTION 147.endm 148 149.macro RETURN_REF_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION 150 RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION /* is_ref= */ 1 151.endm 152 153.macro DEOPT_OR_RETURN temp, is_ref = 0 154 ldr \temp, [rSELF, #THREAD_DEOPT_CHECK_REQUIRED_OFFSET] 155 cbnz \temp, 2f 156 bx lr 1572: 158 SETUP_SAVE_EVERYTHING_FRAME \temp 159 mov r2, \is_ref // pass if result is a reference 160 mov r1, r0 // pass the result 161 mov r0, rSELF // Thread::Current 162 bl artDeoptimizeIfNeeded 163 164 CFI_REMEMBER_STATE 165 cbnz r0, 3f 166 167 RESTORE_SAVE_EVERYTHING_FRAME 168 REFRESH_MARKING_REGISTER 169 bx lr 170 1713: 172 // Deoptimize 173 CFI_RESTORE_STATE_AND_DEF_CFA sp, FRAME_SIZE_SAVE_EVERYTHING 174 bl art_quick_do_long_jump // (Context*) 175 bkpt // Unreached 176.endm 177 178.macro DEOPT_OR_RESTORE_SAVE_EVERYTHING_FRAME_AND_RETURN_R0 temp, is_ref 179 ldr \temp, [rSELF, #THREAD_DEOPT_CHECK_REQUIRED_OFFSET] 180 cbnz \temp, 2f 181 CFI_REMEMBER_STATE 182 RESTORE_SAVE_EVERYTHING_FRAME_KEEP_R0 183 REFRESH_MARKING_REGISTER 184 bx lr 185 CFI_RESTORE_STATE_AND_DEF_CFA sp, FRAME_SIZE_SAVE_EVERYTHING 1862: 187 str r0, [sp, SAVE_EVERYTHING_FRAME_R0_OFFSET] // update result in the frame 188 mov r2, \is_ref // pass if result is a reference 189 mov r1, r0 // pass the result 190 mov r0, rSELF // Thread::Current 191 bl artDeoptimizeIfNeeded 192 193 CFI_REMEMBER_STATE 194 cbnz r0, 3f 195 196 RESTORE_SAVE_EVERYTHING_FRAME 197 REFRESH_MARKING_REGISTER 198 bx lr 199 2003: 201 // Deoptimize 202 CFI_RESTORE_STATE_AND_DEF_CFA sp, FRAME_SIZE_SAVE_EVERYTHING 203 bl art_quick_do_long_jump // (Context*) 204 bkpt // Unreached 205.endm 206 207.macro NO_ARG_RUNTIME_EXCEPTION c_name, cxx_name 208 .extern \cxx_name 209ENTRY \c_name 210 SETUP_SAVE_ALL_CALLEE_SAVES_FRAME r0 @ save all registers as basis for long jump context 211 mov r0, rSELF @ pass Thread::Current 212 bl \cxx_name @ \cxx_name(Thread*) 213 bl art_quick_do_long_jump @ (Context*) 214 bkpt // Unreached 215END \c_name 216.endm 217 218.macro NO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING c_name, cxx_name 219 .extern \cxx_name 220ENTRY \c_name 221 SETUP_SAVE_EVERYTHING_FRAME r0 @ save all registers as basis for long jump context 222 mov r0, rSELF @ pass Thread::Current 223 bl \cxx_name @ \cxx_name(Thread*) 224 bl art_quick_do_long_jump @ (Context*) 225 bkpt // Unreached 226END \c_name 227.endm 228 229.macro ONE_ARG_RUNTIME_EXCEPTION c_name, cxx_name 230 .extern \cxx_name 231ENTRY \c_name 232 SETUP_SAVE_ALL_CALLEE_SAVES_FRAME r1 @ save all registers as basis for long jump context 233 mov r1, rSELF @ pass Thread::Current 234 bl \cxx_name @ \cxx_name(Thread*) 235 bl art_quick_do_long_jump @ (Context*) 236 bkpt // Unreached 237END \c_name 238.endm 239 240.macro TWO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING c_name, cxx_name 241 .extern \cxx_name 242ENTRY \c_name 243 SETUP_SAVE_EVERYTHING_FRAME r2 @ save all registers as basis for long jump context 244 mov r2, rSELF @ pass Thread::Current 245 bl \cxx_name @ \cxx_name(Thread*) 246 bl art_quick_do_long_jump @ (Context*) 247 bkpt // Unreached 248END \c_name 249.endm 250 251.macro RETURN_OR_DEOPT_IF_INT_RESULT_IS_ZERO_OR_DELIVER 252 CFI_REMEMBER_STATE 253 cbnz r0, 1f @ result non-zero branch over 254 DEOPT_OR_RETURN r1 2551: 256 CFI_RESTORE_STATE_AND_DEF_CFA sp, 0 257 DELIVER_PENDING_EXCEPTION 258.endm 259 260.macro RETURN_OR_DEOPT_IF_RESULT_IS_NON_NULL_OR_DELIVER 261 CFI_REMEMBER_STATE 262 cbz r0, 1f @ result zero branch over 263 DEOPT_OR_RETURN r1, /* is_ref= */ 1 2641: 265 CFI_RESTORE_STATE_AND_DEF_CFA sp, 0 266 DELIVER_PENDING_EXCEPTION 267.endm 268 269// Macros taking opportunity of code similarities for downcalls. 270// Used for field and allocation entrypoints. 271.macro N_ARG_DOWNCALL n, name, entrypoint, return 272 .extern \entrypoint 273ENTRY \name 274 SETUP_SAVE_REFS_ONLY_FRAME r\n @ save callee saves in case of GC 275 mov r\n, rSELF @ pass Thread::Current 276 bl \entrypoint @ (<args>, Thread*) 277 RESTORE_SAVE_REFS_ONLY_FRAME 278 REFRESH_MARKING_REGISTER 279 \return 280END \name 281.endm 282 283.macro ONE_ARG_DOWNCALL name, entrypoint, return 284 N_ARG_DOWNCALL 1, \name, \entrypoint, \return 285.endm 286 287.macro TWO_ARG_DOWNCALL name, entrypoint, return 288 N_ARG_DOWNCALL 2, \name, \entrypoint, \return 289.endm 290 291.macro THREE_ARG_DOWNCALL name, entrypoint, return 292 N_ARG_DOWNCALL 3, \name, \entrypoint, \return 293.endm 294 295// Macro to facilitate adding new allocation entrypoints. 296.macro FOUR_ARG_DOWNCALL name, entrypoint, return 297 .extern \entrypoint 298ENTRY \name 299 SETUP_SAVE_REFS_ONLY_FRAME r12 @ save callee saves in case of GC 300 str rSELF, [sp, #-16]! @ expand the frame and pass Thread::Current 301 .cfi_adjust_cfa_offset 16 302 bl \entrypoint @ (<args>, Thread*) 303 DECREASE_FRAME 16 @ strip the extra frame 304 RESTORE_SAVE_REFS_ONLY_FRAME 305 REFRESH_MARKING_REGISTER 306 \return 307END \name 308.endm 309 310 /* 311 * Called by managed code, saves callee saves and then calls artThrowException 312 * that will place a mock Method* at the bottom of the stack. Arg1 holds the exception. 313 */ 314ONE_ARG_RUNTIME_EXCEPTION art_quick_deliver_exception, artDeliverExceptionFromCode 315 316 /* 317 * Called by managed code to create and deliver a NullPointerException. 318 */ 319NO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING art_quick_throw_null_pointer_exception, artThrowNullPointerExceptionFromCode 320 321 /* 322 * Call installed by a signal handler to create and deliver a NullPointerException. 323 */ 324 .extern art_quick_throw_null_pointer_exception_from_signal 325ENTRY art_quick_throw_null_pointer_exception_from_signal 326 // The fault handler pushes the gc map address, i.e. "return address", to stack 327 // and passes the fault address in LR. So we need to set up the CFI info accordingly. 328 .cfi_def_cfa_offset __SIZEOF_POINTER__ 329 .cfi_rel_offset lr, 0 330 push {r0-r12} @ 13 words of callee saves and args; LR already saved. 331 .cfi_adjust_cfa_offset 52 332 .cfi_rel_offset r0, 0 333 .cfi_rel_offset r1, 4 334 .cfi_rel_offset r2, 8 335 .cfi_rel_offset r3, 12 336 .cfi_rel_offset r4, 16 337 .cfi_rel_offset r5, 20 338 .cfi_rel_offset r6, 24 339 .cfi_rel_offset r7, 28 340 .cfi_rel_offset r8, 32 341 .cfi_rel_offset r9, 36 342 .cfi_rel_offset r10, 40 343 .cfi_rel_offset r11, 44 344 .cfi_rel_offset ip, 48 345 346 @ save all registers as basis for long jump context 347 SETUP_SAVE_EVERYTHING_FRAME_CORE_REGS_SAVED r1 348 mov r0, lr @ pass the fault address stored in LR by the fault handler. 349 mov r1, rSELF @ pass Thread::Current 350 bl artThrowNullPointerExceptionFromSignal @ (Thread*) 351 bl art_quick_do_long_jump @ (Context*) 352 bkpt // Unreached 353END art_quick_throw_null_pointer_exception_from_signal 354 355 /* 356 * Called by managed code to create and deliver an ArithmeticException. 357 */ 358NO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING art_quick_throw_div_zero, artThrowDivZeroFromCode 359 360 /* 361 * Called by managed code to create and deliver an ArrayIndexOutOfBoundsException. Arg1 holds 362 * index, arg2 holds limit. 363 */ 364TWO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING art_quick_throw_array_bounds, artThrowArrayBoundsFromCode 365 366 /* 367 * Called by managed code to create and deliver a StringIndexOutOfBoundsException 368 * as if thrown from a call to String.charAt(). Arg1 holds index, arg2 holds limit. 369 */ 370TWO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING art_quick_throw_string_bounds, artThrowStringBoundsFromCode 371 372 /* 373 * Called by managed code to create and deliver a StackOverflowError. 374 */ 375NO_ARG_RUNTIME_EXCEPTION art_quick_throw_stack_overflow, artThrowStackOverflowFromCode 376 377 /* 378 * All generated callsites for interface invokes and invocation slow paths will load arguments 379 * as usual - except instead of loading arg0/r0 with the target Method*, arg0/r0 will contain 380 * the method_idx. This wrapper will save arg1-arg3, and call the appropriate C helper. 381 * NOTE: "this" is first visible argument of the target, and so can be found in arg1/r1. 382 * 383 * The helper will attempt to locate the target and return a 64-bit result in r0/r1 consisting 384 * of the target Method* in r0 and method->code_ in r1. 385 * 386 * If unsuccessful, the helper will return null/null. There will bea pending exception in the 387 * thread and we branch to another stub to deliver it. 388 * 389 * On success this wrapper will restore arguments and *jump* to the target, leaving the lr 390 * pointing back to the original caller. 391 * 392 * Clobbers IP (R12). 393 */ 394.macro INVOKE_TRAMPOLINE_BODY cxx_name 395 .extern \cxx_name 396 SETUP_SAVE_REFS_AND_ARGS_FRAME r2 @ save callee saves in case allocation triggers GC 397 mov r2, rSELF @ pass Thread::Current 398 mov r3, sp 399 bl \cxx_name @ (method_idx, this, Thread*, SP) 400 mov r12, r1 @ save Method*->code_ 401 RESTORE_SAVE_REFS_AND_ARGS_FRAME 402 REFRESH_MARKING_REGISTER 403 cbz r0, 1f @ did we find the target? if not go to exception delivery 404 bx r12 @ tail call to target 4051: 406 DELIVER_PENDING_EXCEPTION 407.endm 408.macro INVOKE_TRAMPOLINE c_name, cxx_name 409ENTRY \c_name 410 INVOKE_TRAMPOLINE_BODY \cxx_name 411END \c_name 412.endm 413 414INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline_with_access_check, artInvokeInterfaceTrampolineWithAccessCheck 415 416INVOKE_TRAMPOLINE art_quick_invoke_static_trampoline_with_access_check, artInvokeStaticTrampolineWithAccessCheck 417INVOKE_TRAMPOLINE art_quick_invoke_direct_trampoline_with_access_check, artInvokeDirectTrampolineWithAccessCheck 418INVOKE_TRAMPOLINE art_quick_invoke_super_trampoline_with_access_check, artInvokeSuperTrampolineWithAccessCheck 419INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvokeVirtualTrampolineWithAccessCheck 420 421 /* 422 * Quick invocation stub internal. 423 * On entry: 424 * r0 = method pointer 425 * r1 = argument array or null for no argument methods 426 * r2 = size of argument array in bytes 427 * r3 = (managed) thread pointer 428 * [sp] = JValue* result 429 * [sp + 4] = result_in_float 430 * [sp + 8] = core register argument array 431 * [sp + 12] = fp register argument array 432 * +-------------------------+ 433 * | uint32_t* fp_reg_args | 434 * | uint32_t* core_reg_args | 435 * | result_in_float | <- Caller frame 436 * | Jvalue* result | 437 * +-------------------------+ 438 * | lr | 439 * | r11 | 440 * | r9 | 441 * | r4 | <- r11 442 * +-------------------------+ 443 * | uint32_t out[n-1] | 444 * | : : | Outs 445 * | uint32_t out[0] | 446 * | StackRef<ArtMethod> | <- SP value=null 447 * +-------------------------+ 448 */ 449ENTRY art_quick_invoke_stub_internal 450 SPILL_ALL_CALLEE_SAVE_GPRS @ spill regs (9) 451 mov r11, sp @ save the stack pointer 452 .cfi_def_cfa_register r11 453 454 mov r9, r3 @ move managed thread pointer into r9 455 456 add r4, r2, #4 @ create space for method pointer in frame 457 sub r4, sp, r4 @ reserve & align *stack* to 16 bytes: native calling 458 and r4, #0xFFFFFFF0 @ convention only aligns to 8B, so we have to ensure ART 459 mov sp, r4 @ 16B alignment ourselves. 460 461 mov r4, r0 @ save method* 462 add r0, sp, #4 @ pass stack pointer + method ptr as dest for memcpy 463 bl memcpy @ memcpy (dest, src, bytes) 464 mov ip, #0 @ set ip to 0 465 str ip, [sp] @ store null for method* at bottom of frame 466 467 ldr ip, [r11, #48] @ load fp register argument array pointer 468 vldm ip, {s0-s15} @ copy s0 - s15 469 470 ldr ip, [r11, #44] @ load core register argument array pointer 471 mov r0, r4 @ restore method* 472 add ip, ip, #4 @ skip r0 473 ldm ip, {r1-r3} @ copy r1 - r3 474 475 REFRESH_MARKING_REGISTER 476 477 ldr ip, [r0, #ART_METHOD_QUICK_CODE_OFFSET_32] @ get pointer to the code 478 blx ip @ call the method 479 480 mov sp, r11 @ restore the stack pointer 481 .cfi_def_cfa_register sp 482 483 ldr r4, [sp, #40] @ load result_is_float 484 ldr r9, [sp, #36] @ load the result pointer 485 cmp r4, #0 486 ite eq 487 strdeq r0, [r9] @ store r0/r1 into result pointer 488 vstrne d0, [r9] @ store s0-s1/d0 into result pointer 489 490 pop {r4, r5, r6, r7, r8, r9, r10, r11, pc} @ restore spill regs 491END art_quick_invoke_stub_internal 492 493 /* 494 * On stack replacement stub. 495 * On entry: 496 * r0 = stack to copy 497 * r1 = size of stack 498 * r2 = pc to call 499 * r3 = JValue* result 500 * [sp] = shorty 501 * [sp + 4] = thread 502 */ 503ENTRY art_quick_osr_stub 504 SPILL_ALL_CALLEE_SAVE_GPRS @ Spill regs (9) 505 vpush {s16-s31} @ Spill fp-regs (16) 506 .cfi_adjust_cfa_offset 64 507 SAVE_SIZE=(9*4+16*4) 508 mov r11, sp @ Save the stack pointer 509 .cfi_def_cfa r11, SAVE_SIZE @ CFA = r11 + SAVE_SIZE 510 CFI_REMEMBER_STATE 511 mov r10, r1 @ Save size of stack 512 ldr r9, [r11, #(SAVE_SIZE+4)] @ Move managed thread pointer into r9 513 REFRESH_MARKING_REGISTER 514 mov r6, r2 @ Save the pc to call 515 sub r7, sp, #12 @ Reserve space for stack pointer, 516 @ JValue* result, and ArtMethod* slot. 517 and r7, #0xFFFFFFF0 @ Align stack pointer 518 mov sp, r7 @ Update stack pointer 519 str r11, [sp, #4] @ Save old stack pointer 520 str r3, [sp, #8] @ Save JValue* result 521 mov ip, #0 522 str ip, [sp] @ Store null for ArtMethod* at bottom of frame 523 // r11 isn't properly spilled in the osr method, so we need use DWARF expression. 524 // NB: the CFI must be before the call since this is the address gdb will lookup. 525 // NB: gdb expects that cfa_expression returns the CFA value (not address to it). 526 .cfi_escape /* CFA = [sp + 4] + SAVE_SIZE */ \ 527 0x0f, 6, /* DW_CFA_def_cfa_expression(len) */ \ 528 0x92, 13, 4, /* DW_OP_bregx(reg,offset) */ \ 529 0x06, /* DW_OP_deref */ \ 530 0x23, SAVE_SIZE /* DW_OP_plus_uconst(val) */ 531 bl .Losr_entry @ Call the method 532 ldr r10, [sp, #8] @ Restore JValue* result 533 ldr sp, [sp, #4] @ Restore saved stack pointer 534 .cfi_def_cfa sp, SAVE_SIZE @ CFA = sp + SAVE_SIZE 535 strd r0, [r10] @ Store r0/r1 into result pointer 536 vpop {s16-s31} 537 .cfi_adjust_cfa_offset -64 538 pop {r4, r5, r6, r7, r8, r9, r10, r11, pc} 539.Losr_entry: 540 CFI_RESTORE_STATE_AND_DEF_CFA r11, SAVE_SIZE @ CFA = r11 + SAVE_SIZE 541 sub sp, sp, r10 @ Reserve space for callee stack 542 sub r10, r10, #4 543 str lr, [sp, r10] @ Store link register per the compiler ABI 544 mov r2, r10 545 mov r1, r0 546 mov r0, sp 547 bl memcpy @ memcpy (dest r0, src r1, bytes r2) 548 bx r6 549END art_quick_osr_stub 550 551 /* 552 * On entry r0 is the long jump context. This is expected to be returned from a previous 553 * entrypoint call which threw an exception or deoptimized. 554 * The r12 (IP) shall be clobbered rather than retrieved from gprs_. 555 */ 556ARM_ENTRY art_quick_do_long_jump 557 // Reserve space for the gprs + fprs; 558 INCREASE_FRAME ARM_LONG_JUMP_CONTEXT_SIZE 559 560 mov r1, sp 561 add r2, sp, #ARM_LONG_JUMP_GPRS_SIZE 562 563 bl artContextCopyForLongJump // Context* context, uintptr_t* gprs, uintptr_t* fprs 564 565 add r0, sp, #ARM_LONG_JUMP_GPRS_SIZE 566 567 vldm r0, {s0-s31} @ Load all fprs from argument fprs_. 568 @ Do not access fprs_ from now, they may be below SP. 569 ldm sp, {r0-r11} @ load r0-r11 from gprs_. 570 ldr r12, [sp, #60] @ Load the value of PC (r15) from gprs_ (60 = 4 * 15) into IP (r12). 571 ldr lr, [sp, #56] @ Load LR from gprs_, 56 = 4 * 14. 572 ldr sp, [sp, #52] @ Load SP from gprs_ 52 = 4 * 13. 573 @ Do not access gprs_ from now, they are below SP. 574 .cfi_def_cfa_offset 0 575 REFRESH_MARKING_REGISTER 576 bx r12 @ Do long jump. 577END art_quick_do_long_jump 578 579 /* 580 * Entry from managed code that calls artHandleFillArrayDataFromCode and delivers exception on 581 * failure. 582 */ 583TWO_ARG_DOWNCALL art_quick_handle_fill_data, \ 584 artHandleFillArrayDataFromCode, \ 585 RETURN_OR_DEOPT_IF_INT_RESULT_IS_ZERO_OR_DELIVER 586 587 /* 588 * Entry from managed code that tries to lock the object in a fast path and 589 * calls `artLockObjectFromCode()` for the difficult cases, may block for GC. 590 * r0 holds the possibly null object to lock. 591 */ 592ENTRY art_quick_lock_object 593 // Note: the slow path is actually the art_quick_lock_object_no_inline (tail call). 594 LOCK_OBJECT_FAST_PATH r0, r1, r2, r3, .Llock_object_slow, /*can_be_null*/ 1 595END art_quick_lock_object 596 597 /* 598 * Entry from managed code that calls `artLockObjectFromCode()`, may block for GC. 599 * r0 holds the possibly null object to lock. 600 */ 601 .extern artLockObjectFromCode 602ENTRY art_quick_lock_object_no_inline 603 // This is also the slow path for art_quick_lock_object. 604 // Note that we need a local label as the assembler emits bad instructions 605 // for CBZ/CBNZ if we try to jump to `art_quick_lock_object_no_inline`. 606.Llock_object_slow: 607 SETUP_SAVE_REFS_ONLY_FRAME r1 @ save callee saves in case we block 608 mov r1, rSELF @ pass Thread::Current 609 bl artLockObjectFromCode @ (Object* obj, Thread*) 610 RESTORE_SAVE_REFS_ONLY_FRAME 611 REFRESH_MARKING_REGISTER 612 RETURN_OR_DEOPT_IF_INT_RESULT_IS_ZERO_OR_DELIVER 613END art_quick_lock_object_no_inline 614 615 /* 616 * Entry from managed code that tries to unlock the object in a fast path and calls 617 * `artUnlockObjectFromCode()` for the difficult cases and delivers exception on failure. 618 * r0 holds the possibly null object to unlock. 619 */ 620ENTRY art_quick_unlock_object 621 // Note: the slow path is actually the art_quick_unlock_object_no_inline (tail call). 622 UNLOCK_OBJECT_FAST_PATH r0, r1, r2, r3, .Lunlock_object_slow, /*can_be_null*/ 1 623END art_quick_unlock_object 624 625 /* 626 * Entry from managed code that calls `artUnlockObjectFromCode()` 627 * and delivers exception on failure. 628 * r0 holds the possibly null object to unlock. 629 */ 630 .extern artUnlockObjectFromCode 631ENTRY art_quick_unlock_object_no_inline 632 // This is also the slow path for art_quick_unlock_object. 633 // Note that we need a local label as the assembler emits bad instructions 634 // for CBZ/CBNZ if we try to jump to `art_quick_unlock_object_no_inline`. 635.Lunlock_object_slow: 636 @ save callee saves in case exception allocation triggers GC 637 SETUP_SAVE_REFS_ONLY_FRAME r1 638 mov r1, rSELF @ pass Thread::Current 639 bl artUnlockObjectFromCode @ (Object* obj, Thread*) 640 RESTORE_SAVE_REFS_ONLY_FRAME 641 REFRESH_MARKING_REGISTER 642 RETURN_OR_DEOPT_IF_INT_RESULT_IS_ZERO_OR_DELIVER 643END art_quick_unlock_object_no_inline 644 645 /* 646 * Entry from managed code that calls artInstanceOfFromCode and on failure calls 647 * artThrowClassCastExceptionForObject. 648 */ 649 .extern artInstanceOfFromCode 650 .extern artThrowClassCastExceptionForObject 651ENTRY art_quick_check_instance_of 652 // Type check using the bit string passes null as the target class. In that case just throw. 653 cbz r1, .Lthrow_class_cast_exception_for_bitstring_check 654 655 push {r0-r2, lr} @ save arguments, padding (r2) and link register 656 .cfi_adjust_cfa_offset 16 657 .cfi_rel_offset r0, 0 658 .cfi_rel_offset r1, 4 659 .cfi_rel_offset r2, 8 660 .cfi_rel_offset lr, 12 661 bl artInstanceOfFromCode 662 cbz r0, .Lthrow_class_cast_exception 663 pop {r0-r2, pc} 664 665.Lthrow_class_cast_exception: 666 pop {r0-r2, lr} 667 .cfi_adjust_cfa_offset -16 668 .cfi_restore r0 669 .cfi_restore r1 670 .cfi_restore r2 671 .cfi_restore lr 672 673.Lthrow_class_cast_exception_for_bitstring_check: 674 SETUP_SAVE_ALL_CALLEE_SAVES_FRAME r2 @ save all registers as basis for long jump context 675 mov r2, rSELF @ pass Thread::Current 676 bl artThrowClassCastExceptionForObject @ (Object*, Class*, Thread*) 677 bl art_quick_do_long_jump @ (Context*) 678 bkpt // Unreached 679END art_quick_check_instance_of 680 681// Restore rReg's value from [sp, #offset] if rReg is not the same as rExclude. 682.macro POP_REG_NE rReg, offset, rExclude 683 .ifnc \rReg, \rExclude 684 ldr \rReg, [sp, #\offset] @ restore rReg 685 .cfi_restore \rReg 686 .endif 687.endm 688 689// Save rReg's value to [sp, #offset]. 690.macro PUSH_REG rReg, offset 691 str \rReg, [sp, #\offset] @ save rReg 692 .cfi_rel_offset \rReg, \offset 693.endm 694 695 // Helper macros for `art_quick_aput_obj`. 696#ifdef USE_READ_BARRIER 697#ifdef USE_BAKER_READ_BARRIER 698.macro BAKER_RB_CHECK_GRAY_BIT_AND_LOAD rDest, rObj, offset, gray_slow_path_label 699 ldr ip, [\rObj, #MIRROR_OBJECT_LOCK_WORD_OFFSET] 700 tst ip, #LOCK_WORD_READ_BARRIER_STATE_MASK_SHIFTED 701 bne \gray_slow_path_label 702 // False dependency to avoid needing load/load fence. 703 add \rObj, \rObj, ip, lsr #32 704 ldr \rDest, [\rObj, #\offset] 705 UNPOISON_HEAP_REF \rDest 706.endm 707 708.macro BAKER_RB_LOAD_AND_MARK rDest, rObj, offset, mark_function 709 ldr \rDest, [\rObj, #\offset] 710 UNPOISON_HEAP_REF \rDest 711 str lr, [sp, #-8]! @ Save LR with correct stack alignment. 712 .cfi_rel_offset lr, 0 713 .cfi_adjust_cfa_offset 8 714 bl \mark_function 715 ldr lr, [sp], #8 @ Restore LR. 716 .cfi_restore lr 717 .cfi_adjust_cfa_offset -8 718.endm 719#else // USE_BAKER_READ_BARRIER 720 .extern artReadBarrierSlow 721.macro READ_BARRIER_SLOW rDest, rObj, offset 722 push {r0-r3, ip, lr} @ 6 words for saved registers (used in art_quick_aput_obj) 723 .cfi_adjust_cfa_offset 24 724 .cfi_rel_offset r0, 0 725 .cfi_rel_offset r1, 4 726 .cfi_rel_offset r2, 8 727 .cfi_rel_offset r3, 12 728 .cfi_rel_offset ip, 16 729 .cfi_rel_offset lr, 20 730 sub sp, #8 @ push padding 731 .cfi_adjust_cfa_offset 8 732 @ mov r0, \rRef @ pass ref in r0 (no-op for now since parameter ref is unused) 733 .ifnc \rObj, r1 734 mov r1, \rObj @ pass rObj 735 .endif 736 mov r2, #\offset @ pass offset 737 bl artReadBarrierSlow @ artReadBarrierSlow(ref, rObj, offset) 738 @ No need to unpoison return value in r0, artReadBarrierSlow() would do the unpoisoning. 739 .ifnc \rDest, r0 740 mov \rDest, r0 @ save return value in rDest 741 .endif 742 add sp, #8 @ pop padding 743 .cfi_adjust_cfa_offset -8 744 POP_REG_NE r0, 0, \rDest @ conditionally restore saved registers 745 POP_REG_NE r1, 4, \rDest 746 POP_REG_NE r2, 8, \rDest 747 POP_REG_NE r3, 12, \rDest 748 POP_REG_NE ip, 16, \rDest 749 add sp, #20 750 .cfi_adjust_cfa_offset -20 751 pop {lr} @ restore lr 752 .cfi_adjust_cfa_offset -4 753 .cfi_restore lr 754.endm 755#endif // USE_BAKER_READ_BARRIER 756#endif // USE_READ_BARRIER 757 758 .hidden art_quick_aput_obj 759ENTRY art_quick_aput_obj 760#if defined(USE_READ_BARRIER) && !defined(USE_BAKER_READ_BARRIER) 761 @ The offset to .Ldo_aput_null is too large to use cbz due to expansion from `READ_BARRIER_SLOW`. 762 tst r2, r2 763 beq .Laput_obj_null 764 READ_BARRIER_SLOW r3, r0, MIRROR_OBJECT_CLASS_OFFSET 765 READ_BARRIER_SLOW r3, r3, MIRROR_CLASS_COMPONENT_TYPE_OFFSET 766 READ_BARRIER_SLOW r4, r2, MIRROR_OBJECT_CLASS_OFFSET 767#else // !defined(USE_READ_BARRIER) || defined(USE_BAKER_READ_BARRIER) 768 cbz r2, .Laput_obj_null 769#ifdef USE_READ_BARRIER 770 cmp rMR, #0 771 bne .Laput_obj_gc_marking 772#endif // USE_READ_BARRIER 773 ldr r3, [r0, #MIRROR_OBJECT_CLASS_OFFSET] 774 UNPOISON_HEAP_REF r3 775 // R4 is a scratch register in managed ARM ABI. 776 ldr r4, [r2, #MIRROR_OBJECT_CLASS_OFFSET] 777 UNPOISON_HEAP_REF r4 778 ldr r3, [r3, #MIRROR_CLASS_COMPONENT_TYPE_OFFSET] 779 UNPOISON_HEAP_REF r3 780#endif // !defined(USE_READ_BARRIER) || defined(USE_BAKER_READ_BARRIER) 781 cmp r3, r4 @ value's type == array's component type - trivial assignability 782 bne .Laput_obj_check_assignability 783.Laput_obj_store: 784 add r3, r0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET 785 POISON_HEAP_REF r2 786 str r2, [r3, r1, lsl #2] 787 ldr r3, [rSELF, #THREAD_CARD_TABLE_OFFSET] 788 lsr r0, r0, #CARD_TABLE_CARD_SHIFT 789 strb r3, [r3, r0] 790 blx lr 791 792.Laput_obj_null: 793 add r3, r0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET 794 str r2, [r3, r1, lsl #2] 795 blx lr 796 797.Laput_obj_check_assignability: 798 push {r0-r2, lr} @ save arguments 799 .cfi_adjust_cfa_offset 16 800 .cfi_rel_offset lr, 12 801 mov r1, r4 802 mov r0, r3 803 bl artIsAssignableFromCode 804 CFI_REMEMBER_STATE 805 cbz r0, .Lthrow_array_store_exception 806 pop {r0-r2, lr} 807 .cfi_restore lr 808 .cfi_adjust_cfa_offset -16 809 add r3, r0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET 810 POISON_HEAP_REF r2 811 str r2, [r3, r1, lsl #2] 812 ldr r3, [rSELF, #THREAD_CARD_TABLE_OFFSET] 813 lsr r0, r0, #CARD_TABLE_CARD_SHIFT 814 strb r3, [r3, r0] 815 blx lr 816 817.Lthrow_array_store_exception: 818 CFI_RESTORE_STATE_AND_DEF_CFA sp, 16 819 pop {r0-r2, lr} 820 .cfi_restore lr 821 .cfi_adjust_cfa_offset -16 822#if defined(USE_READ_BARRIER) && defined(USE_BAKER_READ_BARRIER) 823 CFI_REMEMBER_STATE 824#endif // defined(USE_READ_BARRIER) && defined(USE_BAKER_READ_BARRIER) 825 SETUP_SAVE_ALL_CALLEE_SAVES_FRAME r3 826 mov r1, r2 827 mov r2, rSELF @ Pass Thread::Current. 828 bl artThrowArrayStoreException @ (Class*, Class*, Thread*) 829 bl art_quick_do_long_jump @ (Context*) 830 bkpt // Unreached 831 832#if defined(USE_READ_BARRIER) && defined(USE_BAKER_READ_BARRIER) 833 CFI_RESTORE_STATE_AND_DEF_CFA sp, 0 834.Laput_obj_gc_marking: 835 BAKER_RB_CHECK_GRAY_BIT_AND_LOAD \ 836 r3, r0, MIRROR_OBJECT_CLASS_OFFSET, .Laput_obj_mark_array_class 837.Laput_obj_mark_array_class_continue: 838 BAKER_RB_CHECK_GRAY_BIT_AND_LOAD \ 839 r3, r3, MIRROR_CLASS_COMPONENT_TYPE_OFFSET, .Laput_obj_mark_array_element 840.Laput_obj_mark_array_element_continue: 841 BAKER_RB_CHECK_GRAY_BIT_AND_LOAD \ 842 r4, r2, MIRROR_OBJECT_CLASS_OFFSET, .Laput_obj_mark_object_class 843.Laput_obj_mark_object_class_continue: 844 845 cmp r3, r4 @ value's type == array's component type - trivial assignability 846 // All registers are set up for correctly `.Laput_obj_check_assignability`. 847 bne .Laput_obj_check_assignability 848 b .Laput_obj_store 849 850.Laput_obj_mark_array_class: 851 BAKER_RB_LOAD_AND_MARK r3, r0, MIRROR_OBJECT_CLASS_OFFSET, art_quick_read_barrier_mark_reg03 852 b .Laput_obj_mark_array_class_continue 853 854.Laput_obj_mark_array_element: 855 BAKER_RB_LOAD_AND_MARK \ 856 r3, r3, MIRROR_CLASS_COMPONENT_TYPE_OFFSET, art_quick_read_barrier_mark_reg03 857 b .Laput_obj_mark_array_element_continue 858 859.Laput_obj_mark_object_class: 860 BAKER_RB_LOAD_AND_MARK r4, r2, MIRROR_OBJECT_CLASS_OFFSET, art_quick_read_barrier_mark_reg04 861 b .Laput_obj_mark_object_class_continue 862#endif // defined(USE_READ_BARRIER) && defined(USE_BAKER_READ_BARRIER) 863END art_quick_aput_obj 864 865 /* 866 * Macro for resolution and initialization of indexed DEX file 867 * constants such as classes and strings. 868 */ 869.macro ONE_ARG_SAVE_EVERYTHING_DOWNCALL name, entrypoint, runtime_method_offset = RUNTIME_SAVE_EVERYTHING_METHOD_OFFSET 870 .extern \entrypoint 871ENTRY \name 872 SETUP_SAVE_EVERYTHING_FRAME r1, \runtime_method_offset @ save everything in case of GC 873 mov r1, rSELF @ pass Thread::Current 874 bl \entrypoint @ (uint32_t index, Thread*) 875 cbz r0, 1f @ If result is null, deliver the OOME. 876 str r0, [sp, #136] @ store result in the frame 877 DEOPT_OR_RESTORE_SAVE_EVERYTHING_FRAME_AND_RETURN_R0 r1, /* is_ref= */ 1 8781: 879 DELIVER_PENDING_EXCEPTION_FRAME_READY 880END \name 881.endm 882 883.macro ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT name, entrypoint 884 ONE_ARG_SAVE_EVERYTHING_DOWNCALL \name, \entrypoint, RUNTIME_SAVE_EVERYTHING_FOR_CLINIT_METHOD_OFFSET 885.endm 886 887ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT art_quick_initialize_static_storage, artInitializeStaticStorageFromCode 888ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT art_quick_resolve_type, artResolveTypeFromCode 889ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_type_and_verify_access, artResolveTypeAndVerifyAccessFromCode 890ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_method_handle, artResolveMethodHandleFromCode 891ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_method_type, artResolveMethodTypeFromCode 892ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_string, artResolveStringFromCode 893 894// Note: Functions `art{Get,Set}<Kind>{Static,Instance}FromCompiledCode` are 895// defined with a macro in runtime/entrypoints/quick/quick_field_entrypoints.cc. 896 897GENERATE_STATIC_FIELD_GETTERS 898 899GENERATE_INSTANCE_FIELD_GETTERS 900 901GENERATE_STATIC_FIELD_SETTERS /* emit64= */ 0 902 903GENERATE_INSTANCE_FIELD_SETTERS /* emit64= */ 0 904 905 /* 906 * Called by managed code to resolve an instance field and store a wide value. 907 */ 908 .extern artSet64InstanceFromCompiledCode 909ENTRY art_quick_set64_instance 910 SETUP_SAVE_REFS_ONLY_FRAME r12 @ save callee saves in case of GC 911 @ r2:r3 contain the wide argument 912 str rSELF, [sp, #-16]! @ expand the frame and pass Thread::Current 913 .cfi_adjust_cfa_offset 16 914 bl artSet64InstanceFromCompiledCode @ (field_idx, Object*, new_val, Thread*) 915 add sp, #16 @ release out args 916 .cfi_adjust_cfa_offset -16 917 RESTORE_SAVE_REFS_ONLY_FRAME @ TODO: we can clearly save an add here 918 REFRESH_MARKING_REGISTER 919 RETURN_OR_DEOPT_IF_INT_RESULT_IS_ZERO_OR_DELIVER 920END art_quick_set64_instance 921 922 .extern artSet64StaticFromCompiledCode 923ENTRY art_quick_set64_static 924 SETUP_SAVE_REFS_ONLY_FRAME r12 @ save callee saves in case of GC 925 @ r2:r3 contain the wide argument 926 str rSELF, [sp, #-16]! @ expand the frame and pass Thread::Current 927 .cfi_adjust_cfa_offset 16 928 bl artSet64StaticFromCompiledCode @ (field_idx, new_val, Thread*) 929 add sp, #16 @ release out args 930 .cfi_adjust_cfa_offset -16 931 RESTORE_SAVE_REFS_ONLY_FRAME @ TODO: we can clearly save an add here 932 REFRESH_MARKING_REGISTER 933 RETURN_OR_DEOPT_IF_INT_RESULT_IS_ZERO_OR_DELIVER 934END art_quick_set64_static 935 936// Generate the allocation entrypoints for each allocator. 937GENERATE_ALLOC_ENTRYPOINTS_FOR_NON_TLAB_ALLOCATORS 938// Comment out allocators that have arm specific asm. 939// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB) 940// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab, RegionTLAB) 941GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB) 942GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_OBJECT(_region_tlab, RegionTLAB) 943// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_region_tlab, RegionTLAB) 944// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED8(_region_tlab, RegionTLAB) 945// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED16(_region_tlab, RegionTLAB) 946// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED32(_region_tlab, RegionTLAB) 947// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED64(_region_tlab, RegionTLAB) 948GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_tlab, RegionTLAB) 949GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_tlab, RegionTLAB) 950GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab, RegionTLAB) 951 952// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB) 953// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_tlab, TLAB) 954GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_tlab, TLAB) 955GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_OBJECT(_tlab, TLAB) 956// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_tlab, TLAB) 957// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED8(_tlab, TLAB) 958// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED16(_tlab, TLAB) 959// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED32(_tlab, TLAB) 960// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED64(_tlab, TLAB) 961GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_tlab, TLAB) 962GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_tlab, TLAB) 963GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_tlab, TLAB) 964 965// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_RESOLVED_OBJECT(_rosalloc, RosAlloc). 966// 967// If isInitialized=1 then the compiler assumes the object's class has already been initialized. 968// If isInitialized=0 the compiler can only assume it's been at least resolved. 969.macro ART_QUICK_ALLOC_OBJECT_ROSALLOC c_name, cxx_name, isInitialized 970ENTRY \c_name 971 // Fast path rosalloc allocation. 972 // r0: type/return value, rSELF (r9): Thread::Current 973 // r1, r2, r3, r12: free. 974 ldr r3, [rSELF, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET] // Check if the thread local 975 // allocation stack has room. 976 // TODO: consider using ldrd. 977 ldr r12, [rSELF, #THREAD_LOCAL_ALLOC_STACK_END_OFFSET] 978 cmp r3, r12 979 bhs .Lslow_path\c_name 980 981 ldr r3, [r0, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (r3) 982 cmp r3, #ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE // Check if the size is for a thread 983 // local allocation. 984 // If the class is not yet visibly initialized, or it is finalizable, 985 // the object size will be very large to force the branch below to be taken. 986 // 987 // See Class::SetStatus() in class.cc for more details. 988 bhs .Lslow_path\c_name 989 // Compute the rosalloc bracket index 990 // from the size. Since the size is 991 // already aligned we can combine the 992 // two shifts together. 993 add r12, rSELF, r3, lsr #(ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT - POINTER_SIZE_SHIFT) 994 // Subtract pointer size since there 995 // are no runs for 0 byte allocations 996 // and the size is already aligned. 997 // Load the rosalloc run (r12) 998 ldr r12, [r12, #(THREAD_ROSALLOC_RUNS_OFFSET - __SIZEOF_POINTER__)] 999 // Load the free list head (r3). This 1000 // will be the return val. 1001 ldr r3, [r12, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)] 1002 cbz r3, .Lslow_path\c_name 1003 // "Point of no slow path". Won't go to the slow path from here on. OK to clobber r0 and r1. 1004 ldr r1, [r3, #ROSALLOC_SLOT_NEXT_OFFSET] // Load the next pointer of the head 1005 // and update the list head with the 1006 // next pointer. 1007 str r1, [r12, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)] 1008 // Store the class pointer in the 1009 // header. This also overwrites the 1010 // next pointer. The offsets are 1011 // asserted to match. 1012#if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET 1013#error "Class pointer needs to overwrite next pointer." 1014#endif 1015 POISON_HEAP_REF r0 1016 str r0, [r3, #MIRROR_OBJECT_CLASS_OFFSET] 1017 // Push the new object onto the thread 1018 // local allocation stack and 1019 // increment the thread local 1020 // allocation stack top. 1021 ldr r1, [rSELF, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET] 1022 str r3, [r1], #COMPRESSED_REFERENCE_SIZE // (Increment r1 as a side effect.) 1023 str r1, [rSELF, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET] 1024 // Decrement the size of the free list 1025 1026 // After this "STR" the object is published to the thread local allocation stack, 1027 // and it will be observable from a runtime internal (eg. Heap::VisitObjects) point of view. 1028 // It is not yet visible to the running (user) compiled code until after the return. 1029 // 1030 // To avoid the memory barrier prior to the "STR", a trick is employed, by differentiating 1031 // the state of the allocation stack slot. It can be a pointer to one of: 1032 // 0) Null entry, because the stack was bumped but the new pointer wasn't written yet. 1033 // (The stack initial state is "null" pointers). 1034 // 1) A partially valid object, with an invalid class pointer to the next free rosalloc slot. 1035 // 2) A fully valid object, with a valid class pointer pointing to a real class. 1036 // Other states are not allowed. 1037 // 1038 // An object that is invalid only temporarily, and will eventually become valid. 1039 // The internal runtime code simply checks if the object is not null or is partial and then 1040 // ignores it. 1041 // 1042 // (Note: The actual check is done by seeing if a non-null object has a class pointer pointing 1043 // to ClassClass, and that the ClassClass's class pointer is self-cyclic. A rosalloc free slot 1044 // "next" pointer is not-cyclic.) 1045 // 1046 // See also b/28790624 for a listing of CLs dealing with this race. 1047 ldr r1, [r12, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)] 1048 sub r1, #1 1049 // TODO: consider combining this store 1050 // and the list head store above using 1051 // strd. 1052 str r1, [r12, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)] 1053 1054 mov r0, r3 // Set the return value and return. 1055 // No barrier. The class is already observably initialized (otherwise the fast 1056 // path size check above would fail) and new-instance allocations are protected 1057 // from publishing by the compiler which inserts its own StoreStore barrier. 1058 bx lr 1059 1060.Lslow_path\c_name: 1061 SETUP_SAVE_REFS_ONLY_FRAME r2 @ save callee saves in case of GC 1062 mov r1, rSELF @ pass Thread::Current 1063 bl \cxx_name @ (mirror::Class* cls, Thread*) 1064 RESTORE_SAVE_REFS_ONLY_FRAME 1065 REFRESH_MARKING_REGISTER 1066 RETURN_OR_DEOPT_IF_RESULT_IS_NON_NULL_OR_DELIVER 1067END \c_name 1068.endm 1069 1070ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_resolved_rosalloc, artAllocObjectFromCodeResolvedRosAlloc, /* isInitialized */ 0 1071ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_initialized_rosalloc, artAllocObjectFromCodeInitializedRosAlloc, /* isInitialized */ 1 1072 1073// The common fast path code for art_quick_alloc_object_resolved/initialized_tlab 1074// and art_quick_alloc_object_resolved/initialized_region_tlab. 1075// 1076// r0: type, rSELF (r9): Thread::Current, r1, r2, r3, r12: free. 1077// Need to preserve r0 to the slow path. 1078// 1079// If isInitialized=1 then the compiler assumes the object's class has already been initialized. 1080// If isInitialized=0 the compiler can only assume it's been at least resolved. 1081.macro ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH slowPathLabel isInitialized 1082 // Load thread_local_pos (r12) and 1083 // thread_local_end (r3) with ldrd. 1084 // Check constraints for ldrd. 1085#if !((THREAD_LOCAL_POS_OFFSET + 4 == THREAD_LOCAL_END_OFFSET) && (THREAD_LOCAL_POS_OFFSET % 8 == 0)) 1086#error "Thread::thread_local_pos/end must be consecutive and are 8 byte aligned for performance" 1087#endif 1088 ldrd r12, r3, [rSELF, #THREAD_LOCAL_POS_OFFSET] 1089 sub r12, r3, r12 // Compute the remaining buf size. 1090 ldr r3, [r0, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (r3). 1091 cmp r3, r12 // Check if it fits. 1092 // If the class is not yet visibly initialized, or it is finalizable, 1093 // the object size will be very large to force the branch below to be taken. 1094 // 1095 // See Class::SetStatus() in class.cc for more details. 1096 bhi \slowPathLabel 1097 // "Point of no slow path". Won't go to the slow path from here on. OK to clobber r0 and r1. 1098 // Reload old thread_local_pos (r0) 1099 // for the return value. 1100 ldr r2, [rSELF, #THREAD_LOCAL_POS_OFFSET] 1101 add r1, r2, r3 1102 str r1, [rSELF, #THREAD_LOCAL_POS_OFFSET] // Store new thread_local_pos. 1103 // After this "STR" the object is published to the thread local allocation stack, 1104 // and it will be observable from a runtime internal (eg. Heap::VisitObjects) point of view. 1105 // It is not yet visible to the running (user) compiled code until after the return. 1106 // 1107 // To avoid the memory barrier prior to the "STR", a trick is employed, by differentiating 1108 // the state of the object. It can be either: 1109 // 1) A partially valid object, with a null class pointer 1110 // (because the initial state of TLAB buffers is all 0s/nulls). 1111 // 2) A fully valid object, with a valid class pointer pointing to a real class. 1112 // Other states are not allowed. 1113 // 1114 // An object that is invalid only temporarily, and will eventually become valid. 1115 // The internal runtime code simply checks if the object is not null or is partial and then 1116 // ignores it. 1117 // 1118 // (Note: The actual check is done by checking that the object's class pointer is non-null. 1119 // Also, unlike rosalloc, the object can never be observed as null). 1120 POISON_HEAP_REF r0 1121 str r0, [r2, #MIRROR_OBJECT_CLASS_OFFSET] // Store the class pointer. 1122 mov r0, r2 1123 // No barrier. The class is already observably initialized (otherwise the fast 1124 // path size check above would fail) and new-instance allocations are protected 1125 // from publishing by the compiler which inserts its own StoreStore barrier. 1126 bx lr 1127.endm 1128 1129// The common code for art_quick_alloc_object_*region_tlab 1130// Currently the implementation ignores isInitialized. TODO(b/172087402): clean this up. 1131// Caller must execute a constructor fence after this. 1132.macro GENERATE_ALLOC_OBJECT_RESOLVED_TLAB name, entrypoint, isInitialized 1133ENTRY \name 1134 // Fast path tlab allocation. 1135 // r0: type, rSELF (r9): Thread::Current 1136 // r1, r2, r3, r12: free. 1137 ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lslow_path\name, \isInitialized 1138.Lslow_path\name: 1139 SETUP_SAVE_REFS_ONLY_FRAME r2 // Save callee saves in case of GC. 1140 mov r1, rSELF // Pass Thread::Current. 1141 bl \entrypoint // (mirror::Class* klass, Thread*) 1142 RESTORE_SAVE_REFS_ONLY_FRAME 1143 REFRESH_MARKING_REGISTER 1144 RETURN_OR_DEOPT_IF_RESULT_IS_NON_NULL_OR_DELIVER 1145END \name 1146.endm 1147 1148GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_resolved_region_tlab, artAllocObjectFromCodeResolvedRegionTLAB, /* isInitialized */ 0 1149GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_initialized_region_tlab, artAllocObjectFromCodeInitializedRegionTLAB, /* isInitialized */ 1 1150GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_resolved_tlab, artAllocObjectFromCodeResolvedTLAB, /* isInitialized */ 0 1151GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_initialized_tlab, artAllocObjectFromCodeInitializedTLAB, /* isInitialized */ 1 1152 1153 1154// The common fast path code for art_quick_alloc_array_resolved/initialized_tlab 1155// and art_quick_alloc_array_resolved/initialized_region_tlab. 1156// 1157// r0: type, r1: component_count, r2: total_size, rSELF (r9): Thread::Current, r3, r12: free. 1158// Need to preserve r0 and r1 to the slow path. 1159.macro ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED_WITH_SIZE slowPathLabel 1160 and r2, r2, #OBJECT_ALIGNMENT_MASK_TOGGLED // Apply alignment mask 1161 // (addr + 7) & ~7. 1162 1163 // Load thread_local_pos (r3) and 1164 // thread_local_end (r12) with ldrd. 1165 // Check constraints for ldrd. 1166#if !((THREAD_LOCAL_POS_OFFSET + 4 == THREAD_LOCAL_END_OFFSET) && (THREAD_LOCAL_POS_OFFSET % 8 == 0)) 1167#error "Thread::thread_local_pos/end must be consecutive and are 8 byte aligned for performance" 1168#endif 1169 ldrd r3, r12, [rSELF, #THREAD_LOCAL_POS_OFFSET] 1170 sub r12, r12, r3 // Compute the remaining buf size. 1171 cmp r2, r12 // Check if the total_size fits. 1172 // The array class is always initialized here. Unlike new-instance, 1173 // this does not act as a double test. 1174 bhi \slowPathLabel 1175 // "Point of no slow path". Won't go to the slow path from here on. OK to clobber r0 and r1. 1176 add r2, r2, r3 1177 str r2, [rSELF, #THREAD_LOCAL_POS_OFFSET] // Store new thread_local_pos. 1178 POISON_HEAP_REF r0 1179 str r0, [r3, #MIRROR_OBJECT_CLASS_OFFSET] // Store the class pointer. 1180 str r1, [r3, #MIRROR_ARRAY_LENGTH_OFFSET] // Store the array length. 1181 mov r0, r3 1182// new-array is special. The class is loaded and immediately goes to the Initialized state 1183// before it is published. Therefore the only fence needed is for the publication of the object. 1184// See ClassLinker::CreateArrayClass() for more details. 1185 1186// For publication of the new array, we don't need a 'dmb ishst' here. 1187// The compiler generates 'dmb ishst' for all new-array insts. 1188 bx lr 1189.endm 1190 1191// Caller must execute a constructor fence after this. 1192.macro GENERATE_ALLOC_ARRAY_TLAB name, entrypoint, size_setup 1193ENTRY \name 1194 // Fast path array allocation for region tlab allocation. 1195 // r0: mirror::Class* type 1196 // r1: int32_t component_count 1197 // rSELF (r9): thread 1198 // r2, r3, r12: free. 1199 \size_setup .Lslow_path\name 1200 ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED_WITH_SIZE .Lslow_path\name 1201.Lslow_path\name: 1202 // r0: mirror::Class* klass 1203 // r1: int32_t component_count 1204 // r2: Thread* self 1205 SETUP_SAVE_REFS_ONLY_FRAME r2 // save callee saves in case of GC 1206 mov r2, rSELF // pass Thread::Current 1207 bl \entrypoint 1208 RESTORE_SAVE_REFS_ONLY_FRAME 1209 REFRESH_MARKING_REGISTER 1210 RETURN_OR_DEOPT_IF_RESULT_IS_NON_NULL_OR_DELIVER 1211END \name 1212.endm 1213 1214.macro COMPUTE_ARRAY_SIZE_UNKNOWN slow_path 1215 movw r2, #((MIN_LARGE_OBJECT_THRESHOLD - MIRROR_WIDE_ARRAY_DATA_OFFSET) / 8) 1216 cmp r1, r2 1217 bhi \slow_path 1218 // Array classes are never finalizable 1219 // or uninitialized, no need to check. 1220 ldr r3, [r0, #MIRROR_CLASS_COMPONENT_TYPE_OFFSET] // Load component type 1221 UNPOISON_HEAP_REF r3 1222 ldr r3, [r3, #MIRROR_CLASS_OBJECT_PRIMITIVE_TYPE_OFFSET] 1223 lsr r3, r3, #PRIMITIVE_TYPE_SIZE_SHIFT_SHIFT // Component size shift is in high 16 1224 // bits. 1225 lsl r2, r1, r3 // Calculate data size 1226 // Add array data offset and alignment. 1227 add r2, r2, #(MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK) 1228#if MIRROR_WIDE_ARRAY_DATA_OFFSET != MIRROR_INT_ARRAY_DATA_OFFSET + 4 1229#error Long array data offset must be 4 greater than int array data offset. 1230#endif 1231 1232 add r3, r3, #1 // Add 4 to the length only if the 1233 // component size shift is 3 1234 // (for 64 bit alignment). 1235 and r3, r3, #4 1236 add r2, r2, r3 1237.endm 1238 1239.macro COMPUTE_ARRAY_SIZE_8 slow_path 1240 // Possibly a large object, go slow. 1241 // Also does negative array size check. 1242 movw r2, #(MIN_LARGE_OBJECT_THRESHOLD - MIRROR_INT_ARRAY_DATA_OFFSET) 1243 cmp r1, r2 1244 bhi \slow_path 1245 // Add array data offset and alignment. 1246 add r2, r1, #(MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK) 1247.endm 1248 1249.macro COMPUTE_ARRAY_SIZE_16 slow_path 1250 // Possibly a large object, go slow. 1251 // Also does negative array size check. 1252 movw r2, #((MIN_LARGE_OBJECT_THRESHOLD - MIRROR_INT_ARRAY_DATA_OFFSET) / 2) 1253 cmp r1, r2 1254 bhi \slow_path 1255 lsl r2, r1, #1 1256 // Add array data offset and alignment. 1257 add r2, r2, #(MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK) 1258.endm 1259 1260.macro COMPUTE_ARRAY_SIZE_32 slow_path 1261 // Possibly a large object, go slow. 1262 // Also does negative array size check. 1263 movw r2, #((MIN_LARGE_OBJECT_THRESHOLD - MIRROR_INT_ARRAY_DATA_OFFSET) / 4) 1264 cmp r1, r2 1265 bhi \slow_path 1266 lsl r2, r1, #2 1267 // Add array data offset and alignment. 1268 add r2, r2, #(MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK) 1269.endm 1270 1271.macro COMPUTE_ARRAY_SIZE_64 slow_path 1272 // Possibly a large object, go slow. 1273 // Also does negative array size check. 1274 movw r2, #((MIN_LARGE_OBJECT_THRESHOLD - MIRROR_LONG_ARRAY_DATA_OFFSET) / 8) 1275 cmp r1, r2 1276 bhi \slow_path 1277 lsl r2, r1, #3 1278 // Add array data offset and alignment. 1279 add r2, r2, #(MIRROR_WIDE_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK) 1280.endm 1281 1282GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_UNKNOWN 1283GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved8_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_8 1284GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved16_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_16 1285GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved32_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_32 1286GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved64_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_64 1287GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_UNKNOWN 1288GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved8_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_8 1289GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved16_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_16 1290GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved32_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_32 1291GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved64_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_64 1292 1293 /* 1294 * Called by managed code when the value in rSUSPEND has been decremented to 0. 1295 */ 1296 .extern artTestSuspendFromCode 1297ENTRY art_quick_test_suspend 1298 SETUP_SAVE_EVERYTHING_FRAME r0, RUNTIME_SAVE_EVERYTHING_FOR_SUSPEND_CHECK_METHOD_OFFSET @ save everything for GC stack crawl 1299 mov r0, rSELF 1300 bl artTestSuspendFromCode @ (Thread*) 1301 1302 CFI_REMEMBER_STATE 1303 cbnz r0, .Ltest_suspend_deoptimize 1304 1305 RESTORE_SAVE_EVERYTHING_FRAME 1306 REFRESH_MARKING_REGISTER 1307 bx lr 1308 1309.Ltest_suspend_deoptimize: 1310 // Deoptimize 1311 CFI_RESTORE_STATE_AND_DEF_CFA sp, FRAME_SIZE_SAVE_EVERYTHING 1312 bl art_quick_do_long_jump @ (Context*) 1313 bkpt // Unreached 1314END art_quick_test_suspend 1315 1316 .extern artImplicitSuspendFromCode 1317ENTRY art_quick_implicit_suspend 1318 mov r0, rSELF 1319 SETUP_SAVE_REFS_ONLY_FRAME r1 @ save callee saves for stack crawl 1320 bl artImplicitSuspendFromCode @ (Thread*) 1321 1322 CFI_REMEMBER_STATE 1323 cbnz r0, .Limplicit_suspend_deopt 1324 1325 RESTORE_SAVE_REFS_ONLY_FRAME 1326 REFRESH_MARKING_REGISTER 1327 bx lr 1328 1329.Limplicit_suspend_deopt: 1330 // Deoptimize 1331 CFI_RESTORE_STATE_AND_DEF_CFA sp, FRAME_SIZE_SAVE_REFS_ONLY 1332 bl art_quick_do_long_jump @ (Context*) 1333 bkpt // Unreached 1334END art_quick_implicit_suspend 1335 1336 /* 1337 * Called by managed code that is attempting to call a method on a proxy class. On entry 1338 * r0 holds the proxy method and r1 holds the receiver; r2 and r3 may contain arguments. The 1339 * frame size of the invoked proxy method agrees with a ref and args callee save frame. 1340 */ 1341 .extern artQuickProxyInvokeHandler 1342ENTRY art_quick_proxy_invoke_handler 1343 SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_R0 1344 mov r2, rSELF @ pass Thread::Current 1345 mov r3, sp @ pass SP 1346 blx artQuickProxyInvokeHandler @ (Method* proxy method, receiver, Thread*, SP) 1347 ldr r2, [rSELF, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_ 1348 // Tear down the callee-save frame. Skip arg registers. 1349 add sp, #(FRAME_SIZE_SAVE_REFS_AND_ARGS - FRAME_SIZE_SAVE_REFS_ONLY) 1350 .cfi_adjust_cfa_offset -(FRAME_SIZE_SAVE_REFS_AND_ARGS - FRAME_SIZE_SAVE_REFS_ONLY) 1351 RESTORE_SAVE_REFS_ONLY_FRAME 1352 REFRESH_MARKING_REGISTER 1353 cbnz r2, 1f @ success if no exception is pending 1354 vmov d0, r0, r1 @ store into fpr, for when it's a fpr return... 1355 bx lr @ return on success 13561: 1357 DELIVER_PENDING_EXCEPTION 1358END art_quick_proxy_invoke_handler 1359 1360 /* 1361 * Called to resolve an imt conflict. 1362 * r0 is the conflict ArtMethod. 1363 * r12 is a hidden argument that holds the target interface method. 1364 * 1365 * Note that this stub writes to r0, r4, and r12. 1366 */ 1367ENTRY art_quick_imt_conflict_trampoline 1368 ldr r0, [r0, #ART_METHOD_JNI_OFFSET_32] // Load ImtConflictTable 1369 ldr r4, [r0] // Load first entry in ImtConflictTable. 1370.Limt_table_iterate: 1371 cmp r4, r12 1372 // Branch if found. Benchmarks have shown doing a branch here is better. 1373 beq .Limt_table_found 1374 // If the entry is null, the interface method is not in the ImtConflictTable. 1375 cbz r4, .Lconflict_trampoline 1376 // Iterate over the entries of the ImtConflictTable. 1377 ldr r4, [r0, #(2 * __SIZEOF_POINTER__)]! 1378 b .Limt_table_iterate 1379.Limt_table_found: 1380 // We successfully hit an entry in the table. Load the target method 1381 // and jump to it. 1382 ldr r0, [r0, #__SIZEOF_POINTER__] 1383 ldr pc, [r0, #ART_METHOD_QUICK_CODE_OFFSET_32] 1384.Lconflict_trampoline: 1385 // Pass interface method to the trampoline. 1386 mov r0, r12 1387 INVOKE_TRAMPOLINE_BODY artInvokeInterfaceTrampoline 1388END art_quick_imt_conflict_trampoline 1389 1390 .extern artQuickResolutionTrampoline 1391ENTRY art_quick_resolution_trampoline 1392 SETUP_SAVE_REFS_AND_ARGS_FRAME r2 1393 mov r2, rSELF @ pass Thread::Current 1394 mov r3, sp @ pass SP 1395 blx artQuickResolutionTrampoline @ (Method* called, receiver, Thread*, SP) 1396 CFI_REMEMBER_STATE 1397 cbz r0, 1f @ is code pointer null? goto exception 1398 mov r12, r0 1399 ldr r0, [sp, #0] @ load resolved method in r0 1400 RESTORE_SAVE_REFS_AND_ARGS_FRAME 1401 REFRESH_MARKING_REGISTER 1402 bx r12 @ tail-call into actual code 14031: 1404 CFI_RESTORE_STATE_AND_DEF_CFA sp, FRAME_SIZE_SAVE_REFS_AND_ARGS 1405 RESTORE_SAVE_REFS_AND_ARGS_FRAME 1406 DELIVER_PENDING_EXCEPTION 1407END art_quick_resolution_trampoline 1408 1409 /* 1410 * Called to do a generic JNI down-call 1411 */ 1412ENTRY art_quick_generic_jni_trampoline 1413 SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_R0 1414 1415 // Save rSELF 1416 mov r11, rSELF 1417 // Save SP , so we can have static CFI info. r10 is saved in ref_and_args. 1418 mov r10, sp 1419 .cfi_def_cfa_register r10 1420 1421 sub sp, sp, #GENERIC_JNI_TRAMPOLINE_RESERVED_AREA 1422 1423 // prepare for artQuickGenericJniTrampoline call 1424 // (Thread*, managed_sp, reserved_area) 1425 // r0 r1 r2 <= C calling convention 1426 // rSELF r10 sp <= where they are 1427 1428 mov r0, rSELF // Thread* 1429 mov r1, r10 // SP for the managed frame. 1430 mov r2, sp // reserved area for arguments and other saved data (up to managed frame) 1431 blx artQuickGenericJniTrampoline // (Thread*, managed_sp, reserved_area) 1432 1433 // The C call will have registered the complete save-frame on success. 1434 // The result of the call is: 1435 // r0: pointer to native code, 0 on error. 1436 // The bottom of the reserved area contains values for arg registers, 1437 // hidden arg register and SP for out args for the call. 1438 1439 // Check for error (class init check or locking for synchronized native method can throw). 1440 cbz r0, .Lexception_in_native 1441 1442 // Save the code pointer 1443 mov lr, r0 1444 1445 // Load parameters from frame into registers r0-r3 (soft-float), 1446 // hidden arg (r4) for @CriticalNative and SP for out args. 1447 pop {r0-r3, r4, ip} 1448 1449 // Apply the new SP for out args, releasing unneeded reserved area. 1450 mov sp, ip 1451 1452 // Softfloat. 1453 // TODO: Change to hardfloat when supported. 1454 1455 blx lr // native call. 1456 1457 // result sign extension is handled in C code 1458 // prepare for artQuickGenericJniEndTrampoline call 1459 // (Thread*, result, result_f) 1460 // r0 r2,r3 stack <= C calling convention 1461 // r11 r0,r1 r0,r1 <= where they are 1462 sub sp, sp, #8 // Stack alignment. 1463 1464 push {r0-r1} 1465 mov r3, r1 1466 mov r2, r0 1467 mov r0, r11 1468 1469 blx artQuickGenericJniEndTrampoline 1470 1471 // Restore self pointer. 1472 mov rSELF, r11 1473 1474 // Pending exceptions possible. 1475 ldr r2, [rSELF, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_ 1476 cbnz r2, .Lexception_in_native 1477 1478 // Tear down the alloca. 1479 mov sp, r10 1480 1481 // store into fpr, for when it's a fpr return... 1482 vmov d0, r0, r1 1483 1484 LOAD_RUNTIME_INSTANCE r2 1485 ldr r2, [r2, #RUNTIME_INSTRUMENTATION_OFFSET] 1486 ldrb r2, [r2, #INSTRUMENTATION_RUN_EXIT_HOOKS_OFFSET] 1487 CFI_REMEMBER_STATE 1488 cbnz r2, .Lcall_method_exit_hook 1489.Lcall_method_exit_hook_done: 1490 1491 // Tear down the callee-save frame. Skip arg registers. 1492 .cfi_def_cfa_register sp 1493 add sp, #(FRAME_SIZE_SAVE_REFS_AND_ARGS - 7 * 4) 1494 .cfi_adjust_cfa_offset -(FRAME_SIZE_SAVE_REFS_AND_ARGS - 7 * 4) 1495 pop {r5-r8, r10-r11, lr} @ This must match the non-args registers restored by 1496 .cfi_restore r5 @ `RESTORE_SAVE_REFS_AND_ARGS_FRAME`. 1497 .cfi_restore r6 1498 .cfi_restore r7 1499 .cfi_restore r8 1500 .cfi_restore r10 1501 .cfi_restore r11 1502 .cfi_restore lr 1503 .cfi_adjust_cfa_offset -(7 * 4) 1504 REFRESH_MARKING_REGISTER 1505 bx lr // ret 1506 1507.Lcall_method_exit_hook: 1508 CFI_RESTORE_STATE_AND_DEF_CFA r10, FRAME_SIZE_SAVE_REFS_AND_ARGS 1509 mov r2, #FRAME_SIZE_SAVE_REFS_AND_ARGS 1510 bl art_quick_method_exit_hook 1511 b .Lcall_method_exit_hook_done 1512 1513.Lexception_in_native: 1514 ldr ip, [rSELF, #THREAD_TOP_QUICK_FRAME_OFFSET] 1515 add ip, ip, #-1 // Remove the GenericJNI tag. ADD/SUB writing directly to SP is UNPREDICTABLE. 1516 mov sp, ip 1517 bl art_deliver_pending_exception 1518END art_quick_generic_jni_trampoline 1519 1520ENTRY art_deliver_pending_exception 1521 # This will create a new save-all frame, required by the runtime. 1522 DELIVER_PENDING_EXCEPTION 1523END art_deliver_pending_exception 1524 1525 .extern artQuickToInterpreterBridge 1526ENTRY art_quick_to_interpreter_bridge 1527 SETUP_SAVE_REFS_AND_ARGS_FRAME r1 1528 mov r1, rSELF @ pass Thread::Current 1529 mov r2, sp @ pass SP 1530 blx artQuickToInterpreterBridge @ (Method* method, Thread*, SP) 1531 ldr r2, [rSELF, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_ 1532 // Tear down the callee-save frame. Skip arg registers. 1533 add sp, #(FRAME_SIZE_SAVE_REFS_AND_ARGS - FRAME_SIZE_SAVE_REFS_ONLY) 1534 .cfi_adjust_cfa_offset -(FRAME_SIZE_SAVE_REFS_AND_ARGS - FRAME_SIZE_SAVE_REFS_ONLY) 1535 RESTORE_SAVE_REFS_ONLY_FRAME 1536 REFRESH_MARKING_REGISTER 1537 cbnz r2, 1f @ success if no exception is pending 1538 vmov d0, r0, r1 @ store into fpr, for when it's a fpr return... 1539 bx lr @ return on success 15401: 1541 DELIVER_PENDING_EXCEPTION 1542END art_quick_to_interpreter_bridge 1543 1544/* 1545 * Called to attempt to execute an obsolete method. 1546 */ 1547ONE_ARG_RUNTIME_EXCEPTION art_invoke_obsolete_method_stub, artInvokeObsoleteMethod 1548 1549 /* 1550 * Compiled code has requested that we deoptimize into the interpreter. The deoptimization 1551 * will long jump to the interpreter bridge. 1552 */ 1553 .extern artDeoptimizeFromCompiledCode 1554ENTRY art_quick_deoptimize_from_compiled_code 1555 SETUP_SAVE_EVERYTHING_FRAME r1 1556 mov r1, rSELF @ pass Thread::Current 1557 blx artDeoptimizeFromCompiledCode @ (DeoptimizationKind, Thread*) 1558 bl art_quick_do_long_jump @ (Context*) 1559 bkpt // Unreached 1560END art_quick_deoptimize_from_compiled_code 1561 1562 /* 1563 * Signed 64-bit integer multiply. 1564 * 1565 * Consider WXxYZ (r1r0 x r3r2) with a long multiply: 1566 * WX 1567 * x YZ 1568 * -------- 1569 * ZW ZX 1570 * YW YX 1571 * 1572 * The low word of the result holds ZX, the high word holds 1573 * (ZW+YX) + (the high overflow from ZX). YW doesn't matter because 1574 * it doesn't fit in the low 64 bits. 1575 * 1576 * Unlike most ARM math operations, multiply instructions have 1577 * restrictions on using the same register more than once (Rd and Rm 1578 * cannot be the same). 1579 */ 1580 /* mul-long vAA, vBB, vCC */ 1581ENTRY art_quick_mul_long 1582 push {r9-r10} 1583 .cfi_adjust_cfa_offset 8 1584 .cfi_rel_offset r9, 0 1585 .cfi_rel_offset r10, 4 1586 mul ip, r2, r1 @ ip<- ZxW 1587 umull r9, r10, r2, r0 @ r9/r10 <- ZxX 1588 mla r2, r0, r3, ip @ r2<- YxX + (ZxW) 1589 add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX)) 1590 mov r0,r9 1591 mov r1,r10 1592 pop {r9-r10} 1593 .cfi_adjust_cfa_offset -8 1594 .cfi_restore r9 1595 .cfi_restore r10 1596 bx lr 1597END art_quick_mul_long 1598 1599 /* 1600 * Long integer shift. This is different from the generic 32/64-bit 1601 * binary operations because vAA/vBB are 64-bit but vCC (the shift 1602 * distance) is 32-bit. Also, Dalvik requires us to ignore all but the low 1603 * 6 bits. 1604 * On entry: 1605 * r0: low word 1606 * r1: high word 1607 * r2: shift count 1608 */ 1609 /* shl-long vAA, vBB, vCC */ 1610ARM_ENTRY art_quick_shl_long @ ARM code as thumb code requires spills 1611 and r2, r2, #63 @ r2<- r2 & 0x3f 1612 mov r1, r1, asl r2 @ r1<- r1 << r2 1613 rsb r3, r2, #32 @ r3<- 32 - r2 1614 orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2)) 1615 subs ip, r2, #32 @ ip<- r2 - 32 1616 movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32) 1617 mov r0, r0, asl r2 @ r0<- r0 << r2 1618 bx lr 1619END art_quick_shl_long 1620 1621 /* 1622 * Long integer shift. This is different from the generic 32/64-bit 1623 * binary operations because vAA/vBB are 64-bit but vCC (the shift 1624 * distance) is 32-bit. Also, Dalvik requires us to ignore all but the low 1625 * 6 bits. 1626 * On entry: 1627 * r0: low word 1628 * r1: high word 1629 * r2: shift count 1630 */ 1631 /* shr-long vAA, vBB, vCC */ 1632ARM_ENTRY art_quick_shr_long @ ARM code as thumb code requires spills 1633 and r2, r2, #63 @ r0<- r0 & 0x3f 1634 mov r0, r0, lsr r2 @ r0<- r2 >> r2 1635 rsb r3, r2, #32 @ r3<- 32 - r2 1636 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 1637 subs ip, r2, #32 @ ip<- r2 - 32 1638 movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32) 1639 mov r1, r1, asr r2 @ r1<- r1 >> r2 1640 bx lr 1641END art_quick_shr_long 1642 1643 /* 1644 * Long integer shift. This is different from the generic 32/64-bit 1645 * binary operations because vAA/vBB are 64-bit but vCC (the shift 1646 * distance) is 32-bit. Also, Dalvik requires us to ignore all but the low 1647 * 6 bits. 1648 * On entry: 1649 * r0: low word 1650 * r1: high word 1651 * r2: shift count 1652 */ 1653 /* ushr-long vAA, vBB, vCC */ 1654ARM_ENTRY art_quick_ushr_long @ ARM code as thumb code requires spills 1655 and r2, r2, #63 @ r0<- r0 & 0x3f 1656 mov r0, r0, lsr r2 @ r0<- r2 >> r2 1657 rsb r3, r2, #32 @ r3<- 32 - r2 1658 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 1659 subs ip, r2, #32 @ ip<- r2 - 32 1660 movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32) 1661 mov r1, r1, lsr r2 @ r1<- r1 >>> r2 1662 bx lr 1663END art_quick_ushr_long 1664 1665 /* 1666 * String's indexOf. 1667 * 1668 * On entry: 1669 * r0: string object (known non-null) 1670 * r1: char to match (known <= 0xFFFF) 1671 * r2: Starting offset in string data 1672 */ 1673ENTRY art_quick_indexof 1674 push {r4, r10-r11, lr} @ 4 words of callee saves 1675 .cfi_adjust_cfa_offset 16 1676 .cfi_rel_offset r4, 0 1677 .cfi_rel_offset r10, 4 1678 .cfi_rel_offset r11, 8 1679 .cfi_rel_offset lr, 12 1680#if (STRING_COMPRESSION_FEATURE) 1681 ldr r4, [r0, #MIRROR_STRING_COUNT_OFFSET] 1682#else 1683 ldr r3, [r0, #MIRROR_STRING_COUNT_OFFSET] 1684#endif 1685 add r0, #MIRROR_STRING_VALUE_OFFSET 1686#if (STRING_COMPRESSION_FEATURE) 1687 /* r4 count (with flag) and r3 holds actual length */ 1688 lsr r3, r4, #1 1689#endif 1690 /* Clamp start to [0..count] */ 1691 cmp r2, #0 1692 it lt 1693 movlt r2, #0 1694 cmp r2, r3 1695 it gt 1696 movgt r2, r3 1697 1698 /* Save a copy in r12 to later compute result */ 1699 mov r12, r0 1700 1701 /* Build pointer to start of data to compare and pre-bias */ 1702#if (STRING_COMPRESSION_FEATURE) 1703 lsrs r4, r4, #1 1704 bcc .Lstring_indexof_compressed 1705#endif 1706 add r0, r0, r2, lsl #1 1707 sub r0, #2 1708 1709 /* Compute iteration count */ 1710 sub r2, r3, r2 1711 1712 /* 1713 * At this point we have: 1714 * r0: start of data to test 1715 * r1: char to compare 1716 * r2: iteration count 1717 * r4: compression style (used temporarily) 1718 * r12: original start of string data 1719 * r3, r4, r10, r11 available for loading string data 1720 */ 1721 1722 subs r2, #4 1723 blt .Lindexof_remainder 1724 1725.Lindexof_loop4: 1726 ldrh r3, [r0, #2]! 1727 ldrh r4, [r0, #2]! 1728 ldrh r10, [r0, #2]! 1729 ldrh r11, [r0, #2]! 1730 cmp r3, r1 1731 beq .Lmatch_0 1732 cmp r4, r1 1733 beq .Lmatch_1 1734 cmp r10, r1 1735 beq .Lmatch_2 1736 cmp r11, r1 1737 beq .Lmatch_3 1738 subs r2, #4 1739 bge .Lindexof_loop4 1740 1741.Lindexof_remainder: 1742 adds r2, #4 1743 beq .Lindexof_nomatch 1744 1745.Lindexof_loop1: 1746 ldrh r3, [r0, #2]! 1747 cmp r3, r1 1748 beq .Lmatch_3 1749 subs r2, #1 1750 bne .Lindexof_loop1 1751 1752.Lindexof_nomatch: 1753 mov r0, #-1 1754 pop {r4, r10-r11, pc} 1755 1756.Lmatch_0: 1757 sub r0, #6 1758 sub r0, r12 1759 asr r0, r0, #1 1760 pop {r4, r10-r11, pc} 1761.Lmatch_1: 1762 sub r0, #4 1763 sub r0, r12 1764 asr r0, r0, #1 1765 pop {r4, r10-r11, pc} 1766.Lmatch_2: 1767 sub r0, #2 1768 sub r0, r12 1769 asr r0, r0, #1 1770 pop {r4, r10-r11, pc} 1771.Lmatch_3: 1772 sub r0, r12 1773 asr r0, r0, #1 1774 pop {r4, r10-r11, pc} 1775#if (STRING_COMPRESSION_FEATURE) 1776.Lstring_indexof_compressed: 1777 add r0, r0, r2 1778 sub r0, #1 1779 sub r2, r3, r2 1780.Lstring_indexof_compressed_loop: 1781 subs r2, #1 1782 blt .Lindexof_nomatch 1783 ldrb r3, [r0, #1]! 1784 cmp r3, r1 1785 beq .Lstring_indexof_compressed_matched 1786 b .Lstring_indexof_compressed_loop 1787.Lstring_indexof_compressed_matched: 1788 sub r0, r12 1789 pop {r4, r10-r11, pc} 1790#endif 1791END art_quick_indexof 1792 1793 /* Assembly routines used to handle ABI differences. */ 1794 1795 /* double fmod(double a, double b) */ 1796 .extern fmod 1797ENTRY art_quick_fmod 1798 push {lr} 1799 .cfi_adjust_cfa_offset 4 1800 .cfi_rel_offset lr, 0 1801 sub sp, #4 1802 .cfi_adjust_cfa_offset 4 1803 vmov r0, r1, d0 1804 vmov r2, r3, d1 1805 bl fmod 1806 vmov d0, r0, r1 1807 add sp, #4 1808 .cfi_adjust_cfa_offset -4 1809 pop {pc} 1810END art_quick_fmod 1811 1812 /* float fmodf(float a, float b) */ 1813 .extern fmodf 1814ENTRY art_quick_fmodf 1815 push {lr} 1816 .cfi_adjust_cfa_offset 4 1817 .cfi_rel_offset lr, 0 1818 sub sp, #4 1819 .cfi_adjust_cfa_offset 4 1820 vmov r0, r1, d0 1821 bl fmodf 1822 vmov s0, r0 1823 add sp, #4 1824 .cfi_adjust_cfa_offset -4 1825 pop {pc} 1826END art_quick_fmodf 1827 1828 /* int64_t art_d2l(double d) */ 1829 .extern art_d2l 1830ENTRY art_quick_d2l 1831 vmov r0, r1, d0 1832 b art_d2l 1833END art_quick_d2l 1834 1835 /* int64_t art_f2l(float f) */ 1836 .extern art_f2l 1837ENTRY art_quick_f2l 1838 vmov r0, s0 1839 b art_f2l 1840END art_quick_f2l 1841 1842 /* float art_l2f(int64_t l) */ 1843 .extern art_l2f 1844ENTRY art_quick_l2f 1845 push {lr} 1846 .cfi_adjust_cfa_offset 4 1847 .cfi_rel_offset lr, 0 1848 sub sp, #4 1849 .cfi_adjust_cfa_offset 4 1850 bl art_l2f 1851 vmov s0, r0 1852 add sp, #4 1853 .cfi_adjust_cfa_offset -4 1854 pop {pc} 1855END art_quick_l2f 1856 1857 .extern artStringBuilderAppend 1858ENTRY art_quick_string_builder_append 1859 SETUP_SAVE_REFS_ONLY_FRAME r2 @ save callee saves in case of GC 1860 add r1, sp, #(FRAME_SIZE_SAVE_REFS_ONLY + __SIZEOF_POINTER__) @ pass args 1861 mov r2, rSELF @ pass Thread::Current 1862 bl artStringBuilderAppend @ (uint32_t, const unit32_t*, Thread*) 1863 RESTORE_SAVE_REFS_ONLY_FRAME 1864 REFRESH_MARKING_REGISTER 1865 RETURN_OR_DEOPT_IF_RESULT_IS_NON_NULL_OR_DELIVER 1866END art_quick_string_builder_append 1867 1868 /* 1869 * Create a function `name` calling the ReadBarrier::Mark routine, 1870 * getting its argument and returning its result through register 1871 * `reg`, saving and restoring all caller-save registers. 1872 * 1873 * IP is clobbered; `reg` must not be IP. 1874 * 1875 * If `reg` is different from `r0`, the generated function follows a 1876 * non-standard runtime calling convention: 1877 * - register `reg` (which may be different from R0) is used to pass the (sole) argument, 1878 * - register `reg` (which may be different from R0) is used to return the result, 1879 * - all other registers are callee-save (the values they hold are preserved). 1880 */ 1881.macro READ_BARRIER_MARK_REG name, reg 1882ENTRY \name 1883 // Null check so that we can load the lock word. 1884 SMART_CBZ \reg, .Lret_rb_\name 1885 // Check lock word for mark bit, if marked return. Use IP for scratch since it is blocked. 1886 ldr ip, [\reg, MIRROR_OBJECT_LOCK_WORD_OFFSET] 1887 tst ip, #LOCK_WORD_MARK_BIT_MASK_SHIFTED 1888 beq .Lnot_marked_rb_\name 1889 // Already marked, return right away. 1890.Lret_rb_\name: 1891 bx lr 1892 1893.Lnot_marked_rb_\name: 1894 // Test that both the forwarding state bits are 1. 1895#if (LOCK_WORD_STATE_SHIFT != 30) || (LOCK_WORD_STATE_FORWARDING_ADDRESS != 3) 1896 // To use "CMP ip, #modified-immediate; BHS", we need the lock word state in 1897 // the highest bits and the "forwarding address" state to have all bits set. 1898#error "Unexpected lock word state shift or forwarding address state value." 1899#endif 1900 cmp ip, #(LOCK_WORD_STATE_FORWARDING_ADDRESS << LOCK_WORD_STATE_SHIFT) 1901 bhs .Lret_forwarding_address\name 1902 1903.Lslow_rb_\name: 1904 // Save IP: The kSaveEverything entrypoint art_quick_resolve_string used to 1905 // make a tail call here. Currently, it serves only for stack alignment but 1906 // we may reintroduce kSaveEverything calls here in the future. 1907 push {r0-r4, r9, ip, lr} @ save return address, core caller-save registers and ip 1908 .cfi_adjust_cfa_offset 32 1909 .cfi_rel_offset r0, 0 1910 .cfi_rel_offset r1, 4 1911 .cfi_rel_offset r2, 8 1912 .cfi_rel_offset r3, 12 1913 .cfi_rel_offset r4, 16 1914 .cfi_rel_offset r9, 20 1915 .cfi_rel_offset ip, 24 1916 .cfi_rel_offset lr, 28 1917 1918 .ifnc \reg, r0 1919 mov r0, \reg @ pass arg1 - obj from `reg` 1920 .endif 1921 1922 vpush {s0-s15} @ save floating-point caller-save registers 1923 .cfi_adjust_cfa_offset 64 1924 bl artReadBarrierMark @ r0 <- artReadBarrierMark(obj) 1925 vpop {s0-s15} @ restore floating-point registers 1926 .cfi_adjust_cfa_offset -64 1927 1928 .ifc \reg, r0 @ Save result to the stack slot or destination register. 1929 str r0, [sp, #0] 1930 .else 1931 .ifc \reg, r1 1932 str r0, [sp, #4] 1933 .else 1934 .ifc \reg, r2 1935 str r0, [sp, #8] 1936 .else 1937 .ifc \reg, r3 1938 str r0, [sp, #12] 1939 .else 1940 .ifc \reg, r4 1941 str r0, [sp, #16] 1942 .else 1943 .ifc \reg, r9 1944 str r0, [sp, #20] 1945 .else 1946 mov \reg, r0 1947 .endif 1948 .endif 1949 .endif 1950 .endif 1951 .endif 1952 .endif 1953 1954 pop {r0-r4, r9, ip, lr} @ restore caller-save registers 1955 .cfi_adjust_cfa_offset -32 1956 .cfi_restore r0 1957 .cfi_restore r1 1958 .cfi_restore r2 1959 .cfi_restore r3 1960 .cfi_restore r4 1961 .cfi_restore r9 1962 .cfi_restore ip 1963 .cfi_restore lr 1964 bx lr 1965.Lret_forwarding_address\name: 1966 // Shift left by the forwarding address shift. This clears out the state bits since they are 1967 // in the top 2 bits of the lock word. 1968 lsl \reg, ip, #LOCK_WORD_STATE_FORWARDING_ADDRESS_SHIFT 1969 bx lr 1970END \name 1971.endm 1972 1973READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg00, r0 1974READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg01, r1 1975READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg02, r2 1976READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg03, r3 1977READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg04, r4 1978READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg05, r5 1979READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg06, r6 1980READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg07, r7 1981READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg08, r8 1982READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg09, r9 1983READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg10, r10 1984READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg11, r11 1985 1986// Helper macros for Baker CC read barrier mark introspection (BRBMI). 1987.macro BRBMI_FOR_REGISTERS macro_for_register, macro_for_reserved_register 1988 \macro_for_register r0 1989 \macro_for_register r1 1990 \macro_for_register r2 1991 \macro_for_register r3 1992 \macro_for_register r4 1993 \macro_for_register r5 1994 \macro_for_register r6 1995 \macro_for_register r7 1996 \macro_for_reserved_register // r8 (rMR) is the marking register. 1997 \macro_for_register r9 1998 \macro_for_register r10 1999 \macro_for_register r11 2000 \macro_for_reserved_register // IP is reserved. 2001 \macro_for_reserved_register // SP is reserved. 2002 \macro_for_reserved_register // LR is reserved. 2003 \macro_for_reserved_register // PC is reserved. 2004.endm 2005 2006.macro BRBMI_RETURN_SWITCH_CASE reg 2007 .balign 8 2008.Lmark_introspection_return_switch_case_\reg: 2009 mov rMR, #1 2010 mov \reg, ip 2011 bx lr 2012.endm 2013 2014.macro BRBMI_RETURN_SWITCH_CASE_OFFSET reg 2015 .byte (.Lmark_introspection_return_switch_case_\reg - .Lmark_introspection_return_table) / 2 2016.endm 2017 2018.macro BRBMI_BAD_RETURN_SWITCH_CASE_OFFSET 2019 .byte (.Lmark_introspection_return_switch_case_bad - .Lmark_introspection_return_table) / 2 2020.endm 2021 2022#if BAKER_MARK_INTROSPECTION_FIELD_LDR_WIDE_OFFSET != BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET 2023#error "Array and field introspection code sharing requires same LDR offset." 2024#endif 2025.macro BRBMI_ARRAY_LOAD index_reg 2026 ldr ip, [ip, \index_reg, lsl #2] // 4 bytes. 2027 b art_quick_read_barrier_mark_introspection // Should be 2 bytes, encoding T2. 2028 .balign 8 // Add padding to 8 bytes. 2029.endm 2030 2031.macro BRBMI_BKPT_FILL_4B 2032 bkpt 0 2033 bkpt 0 2034.endm 2035 2036.macro BRBMI_BKPT_FILL_8B 2037 BRBMI_BKPT_FILL_4B 2038 BRBMI_BKPT_FILL_4B 2039.endm 2040 2041.macro BRBMI_RUNTIME_CALL 2042 // Note: This macro generates exactly 22 bytes of code. The core register 2043 // PUSH and the MOVs are 16-bit instructions, the rest is 32-bit instructions. 2044 2045 push {r0-r3, r7, lr} // Save return address and caller-save registers. 2046 .cfi_adjust_cfa_offset 24 2047 .cfi_rel_offset r0, 0 2048 .cfi_rel_offset r1, 4 2049 .cfi_rel_offset r2, 8 2050 .cfi_rel_offset r3, 12 2051 .cfi_rel_offset r7, 16 2052 .cfi_rel_offset lr, 20 2053 2054 mov r0, ip // Pass the reference. 2055 vpush {s0-s15} // save floating-point caller-save registers 2056 .cfi_adjust_cfa_offset 64 2057 bl artReadBarrierMark // r0 <- artReadBarrierMark(obj) 2058 vpop {s0-s15} // restore floating-point registers 2059 .cfi_adjust_cfa_offset -64 2060 mov ip, r0 // Move reference to ip in preparation for return switch. 2061 2062 pop {r0-r3, r7, lr} // Restore registers. 2063 .cfi_adjust_cfa_offset -24 2064 .cfi_restore r0 2065 .cfi_restore r1 2066 .cfi_restore r2 2067 .cfi_restore r3 2068 .cfi_restore r7 2069 .cfi_restore lr 2070.endm 2071 2072.macro BRBMI_CHECK_NULL_AND_MARKED label_suffix 2073 // If reference is null, just return it in the right register. 2074 cmp ip, #0 2075 beq .Lmark_introspection_return\label_suffix 2076 // Use rMR as temp and check the mark bit of the reference. 2077 ldr rMR, [ip, #MIRROR_OBJECT_LOCK_WORD_OFFSET] 2078 tst rMR, #LOCK_WORD_MARK_BIT_MASK_SHIFTED 2079 beq .Lmark_introspection_unmarked\label_suffix 2080.Lmark_introspection_return\label_suffix: 2081.endm 2082 2083.macro BRBMI_UNMARKED_FORWARDING_ADDRESS_CHECK label_suffix 2084.Lmark_introspection_unmarked\label_suffix: 2085 // Check if the top two bits are one, if this is the case it is a forwarding address. 2086#if (LOCK_WORD_STATE_SHIFT != 30) || (LOCK_WORD_STATE_FORWARDING_ADDRESS != 3) 2087 // To use "CMP ip, #modified-immediate; BHS", we need the lock word state in 2088 // the highest bits and the "forwarding address" state to have all bits set. 2089#error "Unexpected lock word state shift or forwarding address state value." 2090#endif 2091 cmp rMR, #(LOCK_WORD_STATE_FORWARDING_ADDRESS << LOCK_WORD_STATE_SHIFT) 2092 bhs .Lmark_introspection_forwarding_address\label_suffix 2093.endm 2094 2095.macro BRBMI_EXTRACT_FORWARDING_ADDRESS label_suffix 2096.Lmark_introspection_forwarding_address\label_suffix: 2097 // Note: This macro generates exactly 22 bytes of code, the branch is near. 2098 2099 // Shift left by the forwarding address shift. This clears out the state bits since they are 2100 // in the top 2 bits of the lock word. 2101 lsl ip, rMR, #LOCK_WORD_STATE_FORWARDING_ADDRESS_SHIFT 2102 b .Lmark_introspection_return\label_suffix 2103.endm 2104 2105.macro BRBMI_LOAD_RETURN_REG_FROM_CODE_wide ldr_offset 2106 // Load the half of the instruction that contains Rt. Adjust for the thumb state in LR. 2107 ldrh rMR, [lr, #(-1 + \ldr_offset + 2)] 2108.endm 2109 2110.macro BRBMI_LOAD_RETURN_REG_FROM_CODE_narrow ldr_offset 2111 // Load the 16-bit instruction. Adjust for the thumb state in LR. 2112 ldrh rMR, [lr, #(-1 + \ldr_offset)] 2113.endm 2114 2115.macro BRBMI_EXTRACT_RETURN_REG_wide 2116 lsr rMR, rMR, #12 // Extract `ref_reg`. 2117.endm 2118 2119.macro BRBMI_EXTRACT_RETURN_REG_narrow 2120 and rMR, rMR, #7 // Extract `ref_reg`. 2121.endm 2122 2123.macro BRBMI_LOAD_AND_EXTRACT_RETURN_REG ldr_offset, label_suffix 2124 BRBMI_LOAD_RETURN_REG_FROM_CODE\label_suffix \ldr_offset 2125 BRBMI_EXTRACT_RETURN_REG\label_suffix 2126.endm 2127 2128.macro BRBMI_GC_ROOT gc_root_ldr_offset, label_suffix 2129 .balign 32 2130 .thumb_func 2131 .type art_quick_read_barrier_mark_introspection_gc_roots\label_suffix, #function 2132 .hidden art_quick_read_barrier_mark_introspection_gc_roots\label_suffix 2133 .global art_quick_read_barrier_mark_introspection_gc_roots\label_suffix 2134art_quick_read_barrier_mark_introspection_gc_roots\label_suffix: 2135 BRBMI_LOAD_AND_EXTRACT_RETURN_REG \gc_root_ldr_offset, \label_suffix 2136.endm 2137 2138.macro BRBMI_FIELD_SLOW_PATH ldr_offset, label_suffix 2139 .balign 16 2140 // Note: Generates exactly 16 bytes of code. 2141 BRBMI_UNMARKED_FORWARDING_ADDRESS_CHECK \label_suffix 2142 BRBMI_LOAD_AND_EXTRACT_RETURN_REG \ldr_offset, \label_suffix 2143 b .Lmark_introspection_runtime_call 2144.endm 2145 2146 /* 2147 * Use introspection to load a reference from the same address as the LDR 2148 * instruction in generated code would load (unless loaded by the thunk, 2149 * see below), call ReadBarrier::Mark() with that reference if needed 2150 * and return it in the same register as the LDR instruction would load. 2151 * 2152 * The entrypoint is called through a thunk that differs across load kinds. 2153 * For field and array loads the LDR instruction in generated code follows 2154 * the branch to the thunk, i.e. the LDR is (ignoring the heap poisoning) 2155 * at [LR, #(-4 - 1)] (encoding T3) or [LR, #(-2 - 1)] (encoding T1) where 2156 * the -1 is an adjustment for the Thumb mode bit in LR, and the thunk 2157 * knows the holder and performs the gray bit check, returning to the LDR 2158 * instruction if the object is not gray, so this entrypoint no longer 2159 * needs to know anything about the holder. For GC root loads, the LDR 2160 * instruction in generated code precedes the branch to the thunk, i.e. the 2161 * LDR is at [LR, #(-8 - 1)] (encoding T3) or [LR, #(-6 - 1)] (encoding T1) 2162 * where the -1 is again the Thumb mode bit adjustment, and the thunk does 2163 * not do the gray bit check. 2164 * 2165 * For field accesses and array loads with a constant index the thunk loads 2166 * the reference into IP using introspection and calls the main entrypoint 2167 * ("wide", for 32-bit LDR) art_quick_read_barrier_mark_introspection or 2168 * the "narrow" entrypoint (for 16-bit LDR). The latter is at a known 2169 * offset (BAKER_MARK_INTROSPECTION_FIELD_LDR_NARROW_ENTRYPOINT_OFFSET) 2170 * from the main entrypoint and the thunk adjusts the entrypoint pointer. 2171 * With heap poisoning enabled, the passed reference is poisoned. 2172 * 2173 * For array accesses with non-constant index, the thunk inserts the bits 2174 * 0-5 of the LDR instruction to the entrypoint address, effectively 2175 * calculating a switch case label based on the index register (bits 0-3) 2176 * and adding an extra offset (bits 4-5 hold the shift which is always 2 2177 * for reference loads) to differentiate from the main entrypoint, then 2178 * moves the base register to IP and jumps to the switch case. Therefore 2179 * we need to align the main entrypoint to 512 bytes, accounting for 2180 * a 256-byte offset followed by 16 array entrypoints starting at 2181 * art_quick_read_barrier_mark_introspection_arrays, each containing an LDR 2182 * (register) and a branch to the main entrypoint. 2183 * 2184 * For GC root accesses we cannot use the main entrypoint because of the 2185 * different offset where the LDR instruction in generated code is located. 2186 * (And even with heap poisoning enabled, GC roots are not poisoned.) 2187 * To re-use the same entrypoint pointer in generated code, we make sure 2188 * that the gc root entrypoint (a copy of the entrypoint with a different 2189 * offset for introspection loads) is located at a known offset (0xc0/0xe0 2190 * bytes, or BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_WIDE_ENTRYPOINT_OFFSET/ 2191 * BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_NARROW_ENTRYPOINT_OFFSET) from the 2192 * main entrypoint and the GC root thunk adjusts the entrypoint pointer, 2193 * moves the root register to IP and jumps to the customized entrypoint, 2194 * art_quick_read_barrier_mark_introspection_gc_roots_{wide,narrow}. 2195 * The thunk also performs all the fast-path checks, so we need just the 2196 * slow path. 2197 * 2198 * Intrinsic CAS operations (VarHandle*CompareAnd{Set,Exchange}* and 2199 * UnsafeCASObject) use similar code to the GC roots wide load but using 2200 * MOV (register, T3) instead of the LDR (immediate, T3), with destination 2201 * register in bits 8-11 rather than 12-15. Therefore they have their own 2202 * entrypoint, art_quick_read_barrier_mark_introspection_intrinsic_cas 2203 * at the offset BAKER_MARK_INTROSPECTION_INTRINSIC_CAS_ENTRYPOINT_OFFSET. 2204 * This is used only for high registers, low registers reuse the GC roots 2205 * narrow load entrypoint as the low 3 bits of the destination register 2206 * for MOV (register) encoding T1 match the LDR (immediate) encoding T1. 2207 * 2208 * The code structure is 2209 * art_quick_read_barrier_mark_introspection: // @0x00 2210 * Up to 32 bytes code for main entrypoint fast-path code for fields 2211 * (and array elements with constant offset) with LDR encoding T3; 2212 * jumps to the switch in the "narrow" entrypoint. 2213 * art_quick_read_barrier_mark_introspection_narrow: // @0x20 2214 * Up to 48 bytes code for fast path code for fields (and array 2215 * elements with constant offset) with LDR encoding T1, ending in the 2216 * return switch instruction TBB and the table with switch offsets. 2217 * .Lmark_introspection_return_switch_case_r0: // @0x50 2218 * Exactly 88 bytes of code for the return switch cases (8 bytes per 2219 * case, 11 cases; no code for reserved registers). 2220 * .Lmark_introspection_forwarding_address_narrow: // @0xa8 2221 * Exactly 6 bytes to extract the forwarding address and jump to the 2222 * "narrow" entrypoint fast path. 2223 * .Lmark_introspection_return_switch_case_bad: // @0xae 2224 * Exactly 2 bytes, bkpt for unexpected return register. 2225 * .Lmark_introspection_unmarked_narrow: // @0xb0 2226 * Exactly 16 bytes for "narrow" entrypoint slow path. 2227 * art_quick_read_barrier_mark_introspection_gc_roots_wide: // @0xc0 2228 * GC root entrypoint code for LDR encoding T3 (10 bytes); loads and 2229 * extracts the return register and jumps to the runtime call. 2230 * .Lmark_introspection_forwarding_address_wide: // @0xca 2231 * Exactly 6 bytes to extract the forwarding address and jump to the 2232 * "wide" entrypoint fast path. 2233 * .Lmark_introspection_unmarked_wide: // @0xd0 2234 * Exactly 16 bytes for "wide" entrypoint slow path. 2235 * art_quick_read_barrier_mark_introspection_gc_roots_narrow: // @0xe0 2236 * GC root entrypoint code for LDR encoding T1 (8 bytes); loads and 2237 * extracts the return register and falls through to the runtime call. 2238 * .Lmark_introspection_runtime_call: // @0xe8 2239 * Exactly 24 bytes for the runtime call to MarkReg() and jump to the 2240 * return switch. 2241 * art_quick_read_barrier_mark_introspection_arrays: // @0x100 2242 * Exactly 128 bytes for array load switch cases (16x2 instructions). 2243 * art_quick_read_barrier_mark_introspection_intrinsic_cas: // @0x180 2244 * Intrinsic CAS entrypoint for MOV (register) encoding T3 (6 bytes). 2245 * Loads the return register and jumps to the runtime call. 2246 */ 2247#if defined(USE_READ_BARRIER) && defined(USE_BAKER_READ_BARRIER) 2248ENTRY_ALIGNED art_quick_read_barrier_mark_introspection, 512 2249 // At this point, IP contains the reference, rMR is clobbered by the thunk 2250 // and can be freely used as it will be set back to 1 before returning. 2251 // For heap poisoning, the reference is poisoned, so unpoison it first. 2252 UNPOISON_HEAP_REF ip 2253 // Check for null or marked, lock word is loaded into rMR. 2254 BRBMI_CHECK_NULL_AND_MARKED _wide 2255 // Load and extract the return register from the instruction. 2256 BRBMI_LOAD_AND_EXTRACT_RETURN_REG BAKER_MARK_INTROSPECTION_FIELD_LDR_WIDE_OFFSET, _wide 2257 b .Lmark_introspection_return_switch 2258 2259 .balign 32 2260 .thumb_func 2261 .type art_quick_read_barrier_mark_introspection_narrow, #function 2262 .hidden art_quick_read_barrier_mark_introspection_narrow 2263 .global art_quick_read_barrier_mark_introspection_narrow 2264art_quick_read_barrier_mark_introspection_narrow: 2265 // At this point, IP contains the reference, rMR is clobbered by the thunk 2266 // and can be freely used as it will be set back to 1 before returning. 2267 // For heap poisoning, the reference is poisoned, so unpoison it first. 2268 UNPOISON_HEAP_REF ip 2269 // Check for null or marked, lock word is loaded into rMR. 2270 BRBMI_CHECK_NULL_AND_MARKED _narrow 2271 // Load and extract the return register from the instruction. 2272 BRBMI_LOAD_AND_EXTRACT_RETURN_REG BAKER_MARK_INTROSPECTION_FIELD_LDR_NARROW_OFFSET, _narrow 2273.Lmark_introspection_return_switch: 2274 tbb [pc, rMR] // Jump to the switch case. 2275.Lmark_introspection_return_table: 2276 BRBMI_FOR_REGISTERS BRBMI_RETURN_SWITCH_CASE_OFFSET, BRBMI_BAD_RETURN_SWITCH_CASE_OFFSET 2277 BRBMI_FOR_REGISTERS BRBMI_RETURN_SWITCH_CASE, /* no code */ 2278 2279 .balign 8 2280 BRBMI_EXTRACT_FORWARDING_ADDRESS _narrow // 6 bytes 2281.Lmark_introspection_return_switch_case_bad: 2282 bkpt // 2 bytes 2283 2284 BRBMI_FIELD_SLOW_PATH BAKER_MARK_INTROSPECTION_FIELD_LDR_NARROW_OFFSET, _narrow 2285 2286 // 8 bytes for the loading and extracting of the return register. 2287 BRBMI_GC_ROOT BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_WIDE_OFFSET, _wide 2288 // 2 bytes for near branch to the runtime call. 2289 b .Lmark_introspection_runtime_call 2290 2291 BRBMI_EXTRACT_FORWARDING_ADDRESS _wide // Not even 4-byte aligned. 2292 2293 BRBMI_FIELD_SLOW_PATH BAKER_MARK_INTROSPECTION_FIELD_LDR_WIDE_OFFSET, _wide 2294 2295 // 8 bytes for the loading and extracting of the return register. 2296 BRBMI_GC_ROOT BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_NARROW_OFFSET, _narrow 2297 // And the runtime call and branch to the switch taking exactly 24 bytes 2298 // (22 bytes for BRBMI_RUNTIME_CALL and 2 bytes for the near branch) 2299 // shall take the rest of the 32-byte section (within a cache line). 2300.Lmark_introspection_runtime_call: 2301 BRBMI_RUNTIME_CALL 2302 b .Lmark_introspection_return_switch 2303 2304 .balign 256 2305 .thumb_func 2306 .type art_quick_read_barrier_mark_introspection_arrays, #function 2307 .hidden art_quick_read_barrier_mark_introspection_arrays 2308 .global art_quick_read_barrier_mark_introspection_arrays 2309art_quick_read_barrier_mark_introspection_arrays: 2310 BRBMI_FOR_REGISTERS BRBMI_ARRAY_LOAD, BRBMI_BKPT_FILL_8B 2311 2312 .balign 8 2313 .thumb_func 2314 .type art_quick_read_barrier_mark_introspection_intrinsic_cas, #function 2315 .hidden art_quick_read_barrier_mark_introspection_intrinsic_cas 2316 .global art_quick_read_barrier_mark_introspection_intrinsic_cas 2317art_quick_read_barrier_mark_introspection_intrinsic_cas: 2318 // Load the byte of the MOV instruction that contains Rd. Adjust for the thumb state in LR. 2319 // The MOV (register, T3) is |11101010010|S|1111|(0)000|Rd|0000|Rm|, so the byte we read 2320 // here, i.e. |(0)000|Rd|, contains only the register number, the top 4 bits are 0. 2321 ldrb rMR, [lr, #(-1 + BAKER_MARK_INTROSPECTION_INTRINSIC_CAS_MOV_OFFSET + 3)] 2322 b .Lmark_introspection_runtime_call 2323END art_quick_read_barrier_mark_introspection 2324#else // defined(USE_READ_BARRIER) && defined(USE_BAKER_READ_BARRIER) 2325ENTRY art_quick_read_barrier_mark_introspection 2326 bkpt // Unreachable. 2327END art_quick_read_barrier_mark_introspection 2328#endif // defined(USE_READ_BARRIER) && defined(USE_BAKER_READ_BARRIER) 2329 2330.extern artInvokePolymorphic 2331ENTRY art_quick_invoke_polymorphic 2332 SETUP_SAVE_REFS_AND_ARGS_FRAME r2 2333 mov r0, r1 @ r0 := receiver 2334 mov r1, rSELF @ r1 := Thread::Current 2335 mov r2, sp @ r2 := SP 2336 bl artInvokePolymorphic @ artInvokePolymorphic(receiver, Thread*, SP) 2337 str r1, [sp, 72] @ r0:r1 := Result. Copy r1 to context. 2338 RESTORE_SAVE_REFS_AND_ARGS_FRAME 2339 REFRESH_MARKING_REGISTER 2340 vmov d0, r0, r1 @ Put result r0:r1 into floating point return register. 2341 RETURN_OR_DELIVER_PENDING_EXCEPTION_REG r2 2342END art_quick_invoke_polymorphic 2343 2344.extern artInvokeCustom 2345ENTRY art_quick_invoke_custom 2346 SETUP_SAVE_REFS_AND_ARGS_FRAME r1 2347 @ r0 := call_site_idx 2348 mov r1, rSELF @ r1 := Thread::Current 2349 mov r2, sp @ r2 := SP 2350 bl artInvokeCustom @ artInvokeCustom(call_site_idx, Thread*, SP) 2351 str r1, [sp, #72] @ Save r1 to context (r0:r1 = result) 2352 RESTORE_SAVE_REFS_AND_ARGS_FRAME 2353 REFRESH_MARKING_REGISTER 2354 vmov d0, r0, r1 @ Put result r0:r1 into floating point return register. 2355 RETURN_OR_DELIVER_PENDING_EXCEPTION_REG r2 2356END art_quick_invoke_custom 2357 2358// r0 contains the class, r4 contains the inline cache. We can use ip as temporary. 2359ENTRY art_quick_update_inline_cache 2360#if (INLINE_CACHE_SIZE != 5) 2361#error "INLINE_CACHE_SIZE not as expected." 2362#endif 2363#if defined(USE_READ_BARRIER) && defined(USE_BAKER_READ_BARRIER) 2364 // Don't update the cache if we are marking. 2365 cmp rMR, #0 2366 bne .Ldone 2367#endif 2368.Lentry1: 2369 ldr ip, [r4, #INLINE_CACHE_CLASSES_OFFSET] 2370 cmp ip, r0 2371 beq .Ldone 2372 cmp ip, #0 2373 bne .Lentry2 2374 ldrex ip, [r4, #INLINE_CACHE_CLASSES_OFFSET] 2375 cmp ip, #0 2376 bne .Lentry1 2377 strex ip, r0, [r4, #INLINE_CACHE_CLASSES_OFFSET] 2378 cmp ip, #0 2379 bne .Ldone 2380 b .Lentry1 2381.Lentry2: 2382 ldr ip, [r4, #INLINE_CACHE_CLASSES_OFFSET+4] 2383 cmp ip, r0 2384 beq .Ldone 2385 cmp ip, #0 2386 bne .Lentry3 2387 ldrex ip, [r4, #INLINE_CACHE_CLASSES_OFFSET+4] 2388 cmp ip, #0 2389 bne .Lentry2 2390 strex ip, r0, [r4, #INLINE_CACHE_CLASSES_OFFSET+4] 2391 cmp ip, #0 2392 bne .Ldone 2393 b .Lentry2 2394.Lentry3: 2395 ldr ip, [r4, #INLINE_CACHE_CLASSES_OFFSET+8] 2396 cmp ip, r0 2397 beq .Ldone 2398 cmp ip, #0 2399 bne .Lentry4 2400 ldrex ip, [r4, #INLINE_CACHE_CLASSES_OFFSET+8] 2401 cmp ip, #0 2402 bne .Lentry3 2403 strex ip, r0, [r4, #INLINE_CACHE_CLASSES_OFFSET+8] 2404 cmp ip, #0 2405 bne .Ldone 2406 b .Lentry3 2407.Lentry4: 2408 ldr ip, [r4, #INLINE_CACHE_CLASSES_OFFSET+12] 2409 cmp ip, r0 2410 beq .Ldone 2411 cmp ip, #0 2412 bne .Lentry5 2413 ldrex ip, [r4, #INLINE_CACHE_CLASSES_OFFSET+12] 2414 cmp ip, #0 2415 bne .Lentry4 2416 strex ip, r0, [r4, #INLINE_CACHE_CLASSES_OFFSET+12] 2417 cmp ip, #0 2418 bne .Ldone 2419 b .Lentry4 2420.Lentry5: 2421 // Unconditionally store, the inline cache is megamorphic. 2422 str r0, [r4, #INLINE_CACHE_CLASSES_OFFSET+16] 2423.Ldone: 2424 blx lr 2425END art_quick_update_inline_cache 2426 2427// On entry, method is at the bottom of the stack. 2428ENTRY art_quick_compile_optimized 2429 SETUP_SAVE_EVERYTHING_FRAME r0 2430 ldr r0, [sp, FRAME_SIZE_SAVE_EVERYTHING] @ pass ArtMethod 2431 mov r1, rSELF @ pass Thread::Current 2432 bl artCompileOptimized @ (ArtMethod*, Thread*) 2433 RESTORE_SAVE_EVERYTHING_FRAME 2434 // We don't need to restore the marking register here, as 2435 // artCompileOptimized doesn't allow thread suspension. 2436 blx lr 2437END art_quick_compile_optimized 2438 2439// On entry, method is at the bottom of the stack. 2440ENTRY art_quick_method_entry_hook 2441 SETUP_SAVE_EVERYTHING_FRAME r0 2442 ldr r0, [sp, FRAME_SIZE_SAVE_EVERYTHING] @ pass ArtMethod 2443 mov r1, rSELF @ pass Thread::Current 2444 mov r2, sp @ pass SP 2445 bl artMethodEntryHook @ (ArtMethod*, Thread*, SP) 2446 2447 CFI_REMEMBER_STATE 2448 cbnz r0, .Lentryhook_deopt 2449 2450 RESTORE_SAVE_EVERYTHING_FRAME 2451 REFRESH_MARKING_REGISTER 2452 blx lr 2453 2454.Lentryhook_deopt: 2455 // Deoptimize 2456 CFI_RESTORE_STATE_AND_DEF_CFA sp, FRAME_SIZE_SAVE_EVERYTHING 2457 bl art_quick_do_long_jump @ (Context*) 2458 bkpt // Unreached 2459END art_quick_method_entry_hook 2460 2461ENTRY art_quick_method_exit_hook 2462 SETUP_SAVE_EVERYTHING_FRAME r5 2463 2464 INCREASE_FRAME 4 @ align stack 2465 push {r2} @ pass frame_size stack 2466 .cfi_adjust_cfa_offset 4 2467 add r3, sp, #(8 + 8) @ store fpr_res pointer, in kSaveEverything frame 2468 add r2, sp, #(136 + 8) @ store gpr_res pointer, in kSaveEverything frame 2469 add r1, sp, #(FRAME_SIZE_SAVE_EVERYTHING + 8) @ pass ArtMethod** 2470 mov r0, rSELF @ pass Thread::Current 2471 blx artMethodExitHook @ (Thread*, ArtMethod**, gpr_res*, fpr_res*, 2472 @ frame_size) 2473 DECREASE_FRAME 8 @ pop arguments on stack 2474 2475 CFI_REMEMBER_STATE 2476 cbnz r0, .Lexithook_deopt_or_exception 2477 2478 RESTORE_SAVE_EVERYTHING_FRAME 2479 REFRESH_MARKING_REGISTER 2480 blx lr 2481 2482.Lexithook_deopt_or_exception: 2483 // Deoptimize or exception thrown. 2484 CFI_RESTORE_STATE_AND_DEF_CFA sp, FRAME_SIZE_SAVE_EVERYTHING 2485 bl art_quick_do_long_jump @ (Context*) 2486 bkpt // Unreached 2487END art_quick_method_exit_hook 2488