• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1   /* Copyright (C) 2008 The Android Open Source Project
2    *
3    * Licensed under the Apache License, Version 2.0 (the "License");
4    * you may not use this file except in compliance with the License.
5    * You may obtain a copy of the License at
6    *
7    * http://www.apache.org/licenses/LICENSE-2.0
8    *
9    * Unless required by applicable law or agreed to in writing, software
10    * distributed under the License is distributed on an "AS IS" BASIS,
11    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12    * See the License for the specific language governing permissions and
13    * limitations under the License.
14    */
15
16   /*
17    * File: OP_SHR_LONG_2ADDR.S
18    *
19    * Code: Performs a shift left long
20    *
21    * For: shl-long/2addr
22    *
23    * Description: Perform a binary shift operation using two source registers
24    *              where the fist is the value to shift and the second is the
25    *              shift amount. Store the result in the first source register.
26    *
27    * Format: B|A|op (12x)
28    *
29    * Syntax: op vA, vB
30    */
31
32    movl        rINST, %edx             # %edx<- BA
33    shr         $$4, %edx               # %edx<- B
34    andl        $$15, rINST             # rINST<- BA
35    movss       (rFP, %edx, 4),  %xmm0  # %xmm0<- vB
36    movq        (rFP, rINST, 4), %xmm1  # %xmm1<- vA
37    movq        .LshiftMask, %xmm2
38    pand        %xmm2, %xmm0            # %xmm0<- masked for the shift bits
39    psrlq       %xmm0, %xmm1            # %xmm1<- shifted vBB
40    cmpl        $$0, 4(rFP, rINST, 4)   # check if we need to consider sign
41    jl          .L${opcode}_finish      # consider sign
42    jmp         .L${opcode}_final       # sign is fine, finish
43%break
44
45.L${opcode}_finish:
46    movq        .Lvalue64, %xmm3        # %xmm3<- 64
47    psubq       %xmm0, %xmm3            # %xmm3<- 64 - shift amount
48    movq        .L64bits, %xmm4         # %xmm4<- lower 64 bits set
49    psllq       %xmm3, %xmm4            # %xmm4<- correct mask for sign bits
50    por         %xmm4, %xmm1            # %xmm1<- signed and shifted vBB
51
52.L${opcode}_final:
53    movq        %xmm1, (rFP, rINST, 4)  # vAA<- shifted vBB
54    FINISH      1                       # jump to next instruction
55