Searched refs:VG_IS_8_ALIGNED (Results 1 – 7 of 7) sorted by relevance
99 CHECK( VG_IS_8_ALIGNED(0x0) ); in test_VG_IS_XYZ_ALIGNED()100 CHECK( ! VG_IS_8_ALIGNED(0x1) ); in test_VG_IS_XYZ_ALIGNED()101 CHECK( ! VG_IS_8_ALIGNED(0x2) ); in test_VG_IS_XYZ_ALIGNED()102 CHECK( ! VG_IS_8_ALIGNED(0x3) ); in test_VG_IS_XYZ_ALIGNED()103 CHECK( ! VG_IS_8_ALIGNED(0x4) ); in test_VG_IS_XYZ_ALIGNED()104 CHECK( ! VG_IS_8_ALIGNED(0x5) ); in test_VG_IS_XYZ_ALIGNED()105 CHECK( ! VG_IS_8_ALIGNED(0x6) ); in test_VG_IS_XYZ_ALIGNED()106 CHECK( ! VG_IS_8_ALIGNED(0x7) ); in test_VG_IS_XYZ_ALIGNED()107 CHECK( VG_IS_8_ALIGNED(0x8) ); in test_VG_IS_XYZ_ALIGNED()108 CHECK( ! VG_IS_8_ALIGNED(0x9) ); in test_VG_IS_XYZ_ALIGNED()[all …]
1146 && nBits == 64 && VG_IS_8_ALIGNED(a))) { in mc_LOADVn_slow()1281 && nBits == 64 && VG_IS_8_ALIGNED(a))) { in mc_STOREVn_slow()1479 if (VG_IS_8_ALIGNED(a)) break; in set_address_range_perms()2614 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) { in mc_new_mem_stack_8_w_ECU()2628 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) { in mc_new_mem_stack_8()2642 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) { in mc_die_mem_stack_8()2659 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) { in mc_new_mem_stack_12_w_ECU()2677 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) { in mc_new_mem_stack_12()2696 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP-12 )) { in mc_die_mem_stack_12()2719 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) { in mc_new_mem_stack_16_w_ECU()[all …]
735 vg_assert(VG_IS_8_ALIGNED(offsetof(VexGuestX86State,guest_FPREG))); in do_pre_run_checks()750 vg_assert(VG_IS_8_ALIGNED(offsetof(VexGuestAMD64State,guest_FPREG))); in do_pre_run_checks()752 vg_assert(VG_IS_8_ALIGNED(offsetof(VexGuestAMD64State,guest_RAX))); in do_pre_run_checks()753 vg_assert(VG_IS_8_ALIGNED(offsetof(VexGuestAMD64State,guest_RIP))); in do_pre_run_checks()775 vg_assert(VG_IS_8_ALIGNED(& tst->arch.vex.guest_D1)); in do_pre_run_checks()776 vg_assert(VG_IS_8_ALIGNED(& tst->arch.vex_shadow1.guest_D1)); in do_pre_run_checks()777 vg_assert(VG_IS_8_ALIGNED(& tst->arch.vex_shadow2.guest_D1)); in do_pre_run_checks()
162 #define VG_IS_8_ALIGNED(aaa_p) (0 == (((Addr)(aaa_p)) & ((Addr)0x7))) macro
194 vg_assert(VG_IS_8_ALIGNED(sp)); in VG_()
3390 while (UNLIKELY(!VG_IS_8_ALIGNED(a)) && LIKELY(len > 0)) { in Filter__clear_range()3414 if (UNLIKELY( !VG_IS_8_ALIGNED(a) )) in Filter__ok_to_skip_crd64()3529 if (UNLIKELY( !VG_IS_8_ALIGNED(a) )) in Filter__ok_to_skip_cwr64()
460 if (!VG_IS_8_ALIGNED(*sym_avma_out)) { in get_elf_symbol_info()