• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2023 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "x64_proepilog.h"
17 #include "x64_memlayout.h"
18 #include "x64_isa.h"
19 #include "isel.h"
20 #include "x64_cg.h"
21 
22 namespace maplebe {
23 using namespace maple;
NeedProEpilog()24 bool X64GenProEpilog::NeedProEpilog()
25 {
26     return true;
27 }
GenerateCalleeSavedRegs(bool isPush)28 void X64GenProEpilog::GenerateCalleeSavedRegs(bool isPush)
29 {
30     X64CGFunc &x64cgFunc = static_cast<X64CGFunc &>(cgFunc);
31     const auto &calleeSavedRegs = x64cgFunc.GetCalleeSavedRegs();
32     if (calleeSavedRegs.empty()) {
33         return;
34     }
35     /* CalleeSave(0) = -(FrameSize + CalleeReg - ArgsStk) */
36     X64MemLayout *memLayout = static_cast<X64MemLayout *>(cgFunc.GetMemlayout());
37     int64 offset = -static_cast<int64>((memLayout->StackFrameSize() +
38         static_cast<X64CGFunc &>(cgFunc).SizeOfCalleeSaved() - memLayout->SizeOfArgsToStackPass()));
39     RegOperand &baseReg = cgFunc.GetOpndBuilder()->CreatePReg(x64::RBP, k64BitSize, kRegTyInt);
40     std::vector<std::pair<uint16, int32>> calleeRegAndOffsetVec;
41     for (const auto &reg : calleeSavedRegs) {
42         RegType regType = IsGPRegister(reg) ? kRegTyInt : kRegTyFloat;
43         uint32 regByteSize = IsGPRegister(reg) ? kX64IntregBytelen : kX64FpregBytelen;
44         uint32 regSize = regByteSize * kBitsPerByte;
45         DEBUG_ASSERT((regSize == k32BitSize || regSize == k64BitSize), "only supported 32/64-bits");
46         RegOperand &calleeReg = cgFunc.GetOpndBuilder()->CreatePReg(reg, regSize, regType);
47         calleeRegAndOffsetVec.push_back(
48             std::pair<uint16, int32>(static_cast<uint16>(reg) - 1, static_cast<int32>(offset)));
49         MemOperand &memOpnd = cgFunc.GetOpndBuilder()->CreateMem(baseReg, offset, regSize);
50         if (isPush) {
51             GeneratePushCalleeSavedRegs(calleeReg, memOpnd, regSize);
52         } else {
53             GeneratePopCalleeSavedRegs(calleeReg, memOpnd, regSize);
54         }
55         offset += static_cast<int64>(regByteSize);
56     }
57     const auto &emitMemoryManager = CGOptions::GetInstance().GetEmitMemoryManager();
58     if (emitMemoryManager.codeSpace != nullptr) {
59         emitMemoryManager.funcCalleeOffsetSaver(emitMemoryManager.codeSpace, cgFunc.GetName(), calleeRegAndOffsetVec);
60         const int32 fp2SPDelta = 16;  // FP + return address;
61         emitMemoryManager.funcFpSPDeltaSaver(emitMemoryManager.codeSpace, cgFunc.GetName(), fp2SPDelta);
62     }
63 }
64 
GeneratePushCalleeSavedRegs(RegOperand & regOpnd,MemOperand & memOpnd,uint32 regSize)65 void X64GenProEpilog::GeneratePushCalleeSavedRegs(RegOperand &regOpnd, MemOperand &memOpnd, uint32 regSize)
66 {
67     MOperator mMovrmOp = (regSize == k32BitSize) ? x64::MOP_movl_r_m : x64::MOP_movq_r_m;
68     Insn &copyInsn = cgFunc.GetInsnBuilder()->BuildInsn(mMovrmOp, X64CG::kMd[mMovrmOp]);
69     copyInsn.AddOpndChain(regOpnd).AddOpndChain(memOpnd);
70     cgFunc.GetCurBB()->AppendInsn(copyInsn);
71 }
72 
GeneratePopCalleeSavedRegs(RegOperand & regOpnd,MemOperand & memOpnd,uint32 regSize)73 void X64GenProEpilog::GeneratePopCalleeSavedRegs(RegOperand &regOpnd, MemOperand &memOpnd, uint32 regSize)
74 {
75     MOperator mMovrmOp = (regSize == k32BitSize) ? x64::MOP_movl_m_r : x64::MOP_movq_m_r;
76     Insn &copyInsn = cgFunc.GetInsnBuilder()->BuildInsn(mMovrmOp, X64CG::kMd[mMovrmOp]);
77     copyInsn.AddOpndChain(memOpnd).AddOpndChain(regOpnd);
78     cgFunc.GetCurBB()->AppendInsn(copyInsn);
79 }
80 
GeneratePushUnnamedVarargRegs()81 void X64GenProEpilog::GeneratePushUnnamedVarargRegs()
82 {
83     if (cgFunc.GetMirModule().IsCModule() && cgFunc.GetFunction().GetAttr(FUNCATTR_varargs)) {
84         X64MemLayout *memlayout = static_cast<X64MemLayout *>(cgFunc.GetMemlayout());
85         uint8 size = GetPointerSize();
86         uint32 dataSizeBits = size * kBitsPerByte;
87         int64 offset = -memlayout->GetGRSaveAreaBaseLoc();
88         if (memlayout->GetSizeOfGRSaveArea() % kX64StackPtrAlignment) {
89             offset += size; /* End of area should be aligned. Hole between VR and GR area */
90         }
91         uint32 start_regno = k6BitSize - (memlayout->GetSizeOfGRSaveArea() / size);
92         DEBUG_ASSERT(start_regno <= k6BitSize, "Incorrect starting GR regno for GR Save Area");
93 
94         /* Parameter registers in x86: %rdi, %rsi, %rdx, %rcx, %r8, %r9 */
95         std::vector<X64reg> paramRegs = {RDI, RSI, RDX, RCX, R8, R9};
96         for (uint32 i = start_regno; i < paramRegs.size(); i++) {
97             MOperator mMovrmOp = x64::MOP_movq_r_m;
98             RegOperand &opndFpReg = cgFunc.GetOpndBuilder()->CreatePReg(x64::RBP, k64BitSize, kRegTyInt);
99             MemOperand &memOpnd = cgFunc.GetOpndBuilder()->CreateMem(opndFpReg, offset, dataSizeBits);
100             Insn &copyInsn = cgFunc.GetInsnBuilder()->BuildInsn(mMovrmOp, X64CG::kMd[mMovrmOp]);
101             RegOperand &regOpnd = cgFunc.GetOpndBuilder()->CreatePReg(paramRegs[i], k64BitSize, kRegTyInt);
102             copyInsn.AddOpndChain(regOpnd).AddOpndChain(memOpnd);
103             cgFunc.GetCurBB()->AppendInsn(copyInsn);
104             offset += size;
105         }
106 
107         if (!CGOptions::UseGeneralRegOnly()) {
108             offset = -memlayout->GetVRSaveAreaBaseLoc();
109             start_regno = k6BitSize - (memlayout->GetSizeOfVRSaveArea() / (size * k2BitSize));
110             DEBUG_ASSERT(start_regno <= k6BitSize, "Incorrect starting GR regno for VR Save Area");
111             for (uint32 i = start_regno + static_cast<uint32>(V0); i < static_cast<uint32>(V6); i++) {
112                 MOperator mMovrmOp = x64::MOP_movq_r_m;
113                 RegOperand &opndFpReg = cgFunc.GetOpndBuilder()->CreatePReg(x64::RBP, k64BitSize, kRegTyInt);
114                 MemOperand &memOpnd = cgFunc.GetOpndBuilder()->CreateMem(opndFpReg, offset, dataSizeBits);
115                 Insn &copyInsn = cgFunc.GetInsnBuilder()->BuildInsn(mMovrmOp, X64CG::kMd[mMovrmOp]);
116                 RegOperand &regOpnd =
117                     cgFunc.GetOpndBuilder()->CreatePReg(static_cast<X64reg>(i), k64BitSize, kRegTyInt);
118                 copyInsn.AddOpndChain(regOpnd).AddOpndChain(memOpnd);
119 
120                 cgFunc.GetCurBB()->AppendInsn(copyInsn);
121                 offset += (size * k2BitSize);
122             }
123         }
124     }
125 }
126 
GenerateProlog(BB & bb)127 void X64GenProEpilog::GenerateProlog(BB &bb)
128 {
129     auto &x64CGFunc = static_cast<X64CGFunc &>(cgFunc);
130     BB *formerCurBB = cgFunc.GetCurBB();
131     x64CGFunc.GetDummyBB()->ClearInsns();
132     x64CGFunc.GetDummyBB()->SetIsProEpilog(true);
133     cgFunc.SetCurBB(*x64CGFunc.GetDummyBB());
134 
135     /* push %rbp */
136     MOperator mPushrOp = x64::MOP_pushq_r;
137     Insn &pushInsn = cgFunc.GetInsnBuilder()->BuildInsn(mPushrOp, X64CG::kMd[mPushrOp]);
138     RegOperand &opndFpReg = cgFunc.GetOpndBuilder()->CreatePReg(x64::RBP, k64BitSize, kRegTyInt);
139     pushInsn.AddOpndChain(opndFpReg);
140     cgFunc.GetCurBB()->AppendInsn(pushInsn);
141 
142     /* mov %rsp, %rbp */
143     MOperator mMovrrOp = x64::MOP_movq_r_r;
144     Insn &copyInsn = cgFunc.GetInsnBuilder()->BuildInsn(mMovrrOp, X64CG::kMd[mMovrrOp]);
145     RegOperand &opndSpReg = cgFunc.GetOpndBuilder()->CreatePReg(x64::RSP, k64BitSize, kRegTyInt);
146     copyInsn.AddOpndChain(opndSpReg).AddOpndChain(opndFpReg);
147     cgFunc.GetCurBB()->AppendInsn(copyInsn);
148 
149     /* sub $framesize, %rsp */
150     if (cgFunc.GetFunction().HasCall() || cgFunc.HasVLAOrAlloca()) {
151         MOperator mSubirOp = x64::MOP_subq_i_r;
152         Insn &subInsn = cgFunc.GetInsnBuilder()->BuildInsn(mSubirOp, X64CG::kMd[mSubirOp]);
153         auto *memLayout = static_cast<X64MemLayout *>(cgFunc.GetMemlayout());
154         int64 trueFrameSize =
155             static_cast<int64>(memLayout->StackFrameSize() + static_cast<X64CGFunc &>(cgFunc).SizeOfCalleeSaved());
156         ImmOperand &opndImm = cgFunc.GetOpndBuilder()->CreateImm(k32BitSize, trueFrameSize);
157         subInsn.AddOpndChain(opndImm).AddOpndChain(opndSpReg);
158         cgFunc.GetCurBB()->AppendInsn(subInsn);
159     }
160 
161     GenerateCalleeSavedRegs(true);
162     GeneratePushUnnamedVarargRegs();
163 
164     bb.InsertAtBeginning(*x64CGFunc.GetDummyBB());
165     x64CGFunc.GetDummyBB()->SetIsProEpilog(false);
166     cgFunc.SetCurBB(*formerCurBB);
167 }
168 
GenerateEpilog(BB & bb)169 void X64GenProEpilog::GenerateEpilog(BB &bb)
170 {
171     auto &x64CGFunc = static_cast<X64CGFunc &>(cgFunc);
172     BB *formerCurBB = cgFunc.GetCurBB();
173     x64CGFunc.GetDummyBB()->ClearInsns();
174     x64CGFunc.GetDummyBB()->SetIsProEpilog(true);
175     cgFunc.SetCurBB(*x64CGFunc.GetDummyBB());
176 
177     GenerateCalleeSavedRegs(false);
178 
179     if (cgFunc.GetFunction().HasCall() || cgFunc.HasVLAOrAlloca()) {
180         /*
181          * leave  equal with
182          * mov rsp rbp
183          * pop rbp
184          */
185         MOperator mLeaveOp = x64::MOP_leaveq;
186         Insn &popInsn = cgFunc.GetInsnBuilder()->BuildInsn(mLeaveOp, X64CG::kMd[mLeaveOp]);
187         cgFunc.GetCurBB()->AppendInsn(popInsn);
188     } else {
189         /* pop %rbp */
190         MOperator mPopOp = x64::MOP_popq_r;
191         Insn &pushInsn = cgFunc.GetInsnBuilder()->BuildInsn(mPopOp, X64CG::kMd[mPopOp]);
192         RegOperand &opndFpReg = cgFunc.GetOpndBuilder()->CreatePReg(x64::RBP, k64BitSize, kRegTyInt);
193         pushInsn.AddOpndChain(opndFpReg);
194         cgFunc.GetCurBB()->AppendInsn(pushInsn);
195     }
196     /* ret */
197     MOperator mRetOp = x64::MOP_retq;
198     Insn &retInsn = cgFunc.GetInsnBuilder()->BuildInsn(mRetOp, X64CG::kMd[mRetOp]);
199     cgFunc.GetCurBB()->AppendInsn(retInsn);
200 
201     bb.AppendBBInsns(*x64CGFunc.GetDummyBB());
202     x64CGFunc.GetDummyBB()->SetIsProEpilog(false);
203     cgFunc.SetCurBB(*formerCurBB);
204 }
205 
Run()206 void X64GenProEpilog::Run()
207 {
208     GenerateProlog(*(cgFunc.GetFirstBB()));
209     GenerateEpilog(*(cgFunc.GetLastBB()));
210 }
211 } /* namespace maplebe */
212