• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1   /* Copyright (C) 2008 The Android Open Source Project
2    *
3    * Licensed under the Apache License, Version 2.0 (the "License");
4    * you may not use this file except in compliance with the License.
5    * You may obtain a copy of the License at
6    *
7    * http://www.apache.org/licenses/LICENSE-2.0
8    *
9    * Unless required by applicable law or agreed to in writing, software
10    * distributed under the License is distributed on an "AS IS" BASIS,
11    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12    * See the License for the specific language governing permissions and
13    * limitations under the License.
14    */
15
16   /*
17    * File: OP_SHR_LONG.S
18    *
19    * Code: Performs a shift right long
20    *
21    * For: shl-long
22    *
23    * Description: Perform a binary shift operation using two source registers
24    *              where one is the shift amount and the other is the value to shift.
25    *              Store the result in a destination register.
26    *
27    * Format: AA|op CC|BB (23x)
28    *
29    * Syntax: op vAA, vBB, vCC
30    */
31
32    FETCH_BB    1, %edx                 # %edx<- BB
33    FETCH_CC    1, %eax                 # %eax<- CC
34    movq        (rFP, %edx, 4), %xmm1   # %xmm1<- vBB
35    movss       (rFP, %eax, 4), %xmm0   # %xmm0<- vCC
36    movq        .LshiftMask, %xmm2
37    pand        %xmm2, %xmm0            # %xmm0<- masked for the shift bits
38    psrlq       %xmm0, %xmm1            # %xmm1<- shifted vBB
39    cmpl        $$0, 4(rFP, %edx, 4)    # check if we need to consider sign
40    jl          .L${opcode}_finish      # consider sign
41    jmp         .L${opcode}_final       # sign is fine, finish
42%break
43
44.L${opcode}_finish:
45    movq        .Lvalue64, %xmm3        # %xmm3<- 64
46    psubq       %xmm0, %xmm3            # %xmm3<- 64 - shift amount
47    movq        .L64bits, %xmm4         # %xmm4<- lower 64 bits set
48    psllq       %xmm3, %xmm4            # %xmm4<- correct mask for sign bits
49    por         %xmm4, %xmm1            # %xmm1<- signed and shifted vBB
50
51.L${opcode}_final:
52    movq        %xmm1, (rFP, rINST, 4)  # vAA<- shifted vBB
53    FINISH      2                       # jump to next instruction
54