| /external/vixl/examples/aarch64/ | 
| D | simulated-runtime-calls.cc | 71   __ Lsl(w0, w0, 2);  in GenerateRuntimeCallExamples()  local
  | 
| /external/vixl/test/aarch64/ | 
| D | test-assembler-aarch64.cc | 6166   __ Lsl(x16, x0, x1);  in TEST()  local 6167   __ Lsl(x17, x0, x2);  in TEST()  local 6168   __ Lsl(x18, x0, x3);  in TEST()  local 6169   __ Lsl(x19, x0, x4);  in TEST()  local 6170   __ Lsl(x20, x0, x5);  in TEST()  local 6171   __ Lsl(x21, x0, x6);  in TEST()  local 6173   __ Lsl(w22, w0, w1);  in TEST()  local 6174   __ Lsl(w23, w0, w2);  in TEST()  local 6175   __ Lsl(w24, w0, w3);  in TEST()  local 6176   __ Lsl(w25, w0, w4);  in TEST()  local [all …] 
 | 
| D | test-assembler-sve-aarch64.cc | 10710     __ Lsl(zn_s, zn_s, kSRegSize);  in sve_st1_scalar_plus_vector_helper()  local 12701   __ Lsl(zd_lsl, zn, shift - 1);  // Lsl supports 0 - lane_size-1.  in BitwiseShiftImmHelper()  local 12995   __ Lsl(z3.VnB(), p0.Merging(), z0.VnB(), z1.VnB());  in TEST_SVE()  local 13001   __ Lsl(z6.VnH(), p3.Merging(), z0.VnH(), z1.VnH());  in TEST_SVE()  local 13007   __ Lsl(z9.VnS(), p0.Merging(), z0.VnS(), z1.VnS());  in TEST_SVE()  local 13012   __ Lsl(z12.VnD(), p0.Merging(), z0.VnD(), z1.VnD());  in TEST_SVE()  local 13016   __ Lsl(z14.VnD(), p0.Merging(), z1.VnD(), z11.VnD());  in TEST_SVE()  local 13070   __ Lsl(z3.VnB(), p0.Merging(), z0.VnB(), z1.VnD());  in TEST_SVE()  local 13075   __ Lsl(z6.VnH(), p3.Merging(), z6.VnH(), z1.VnD());  in TEST_SVE()  local 13080   __ Lsl(z9.VnS(), p0.Merging(), z0.VnS(), z1.VnD());  in TEST_SVE()  local [all …] 
 | 
| /external/vixl/src/aarch64/ | 
| D | macro-assembler-aarch64.h | 2017   void Lsl(const Register& rd, const Register& rn, unsigned shift) {  in Lsl()  function 2024   void Lsl(const Register& rd, const Register& rn, const Register& rm) {  in Lsl()  function 5233   void Lsl(const ZRegister& zd,  in Lsl()  function 5245   void Lsl(const ZRegister& zd, const ZRegister& zn, int shift) {  in Lsl()  function 5250   void Lsl(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) {  in Lsl()  function
  | 
| /external/vixl/test/aarch32/ | 
| D | test-assembler-aarch32.cc | 783   __ Lsl(r3, r1, 4);  in TEST()  local 807   __ Lsl(r3, r1, r9);  in TEST()  local 2795   __ Lsl(r4, r3, 28);  in TEST()  local
  | 
| /external/swiftshader/third_party/subzero/src/ | 
| D | IceInstARM32.h | 397     Lsl,  enumerator
  | 
| /external/vixl/src/aarch32/ | 
| D | macro-assembler-aarch32.h | 2254   void Lsl(Condition cond, Register rd, Register rm, const Operand& operand) {  in Lsl()  function 2271   void Lsl(Register rd, Register rm, const Operand& operand) {  in Lsl()  function 2274   void Lsl(FlagsUpdate flags,  in Lsl()  function 2300   void Lsl(FlagsUpdate flags,  in Lsl()  function
  |