1//===-- VECallingConv.td - Calling Conventions VE ----------*- tablegen -*-===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8// 9// This describes the calling conventions for the VE architectures. 10// 11//===----------------------------------------------------------------------===// 12 13//===----------------------------------------------------------------------===// 14// Aurora VE 15//===----------------------------------------------------------------------===// 16def CC_VE_C_Stack: CallingConv<[ 17 // F128 are assigned to the stack in 16-byte aligned units 18 CCIfType<[f128], CCAssignToStackWithShadow<16, 16, [SX7]>>, 19 20 // All of the rest are assigned to the stack in 8-byte aligned units. 21 CCAssignToStack<0, 8> 22]>; 23 24///// C Calling Convention (VE ABI v2.1) ///// 25// 26// Reference: https://www.nec.com/en/global/prod/hpc/aurora/document/VE-ABI_v2.1.pdf 27// 28def CC_VE_C : CallingConv<[ 29 // All arguments get passed in generic registers if there is space. 30 31 // Promote i1/i8/i16/i32 arguments to i64. 32 CCIfType<[i1, i8, i16, i32], CCPromoteToType<i64>>, 33 34 // Convert float arguments to i64 with padding. 35 // 63 31 0 36 // +------+------+ 37 // | float| 0 | 38 // +------+------+ 39 CCIfType<[f32], CCBitConvertToType<i64>>, 40 41 // bool, char, int, enum, long, long long, float, double 42 // --> generic 64 bit registers 43 CCIfType<[i64, f64], 44 CCAssignToReg<[SX0, SX1, SX2, SX3, SX4, SX5, SX6, SX7]>>, 45 46 // long double --> pair of generic 64 bit registers 47 // 48 // NOTE: If Q1 is allocated while SX1 is free, llvm tries to allocate SX1 for 49 // following operands, this masks SX1 to avoid such behavior. 50 CCIfType<[f128], 51 CCAssignToRegWithShadow<[Q0, Q1, Q2, Q3], 52 [SX0, SX1, SX3, SX5]>>, 53 54 // Alternatively, they are assigned to the stack in 8-byte aligned units. 55 CCDelegateTo<CC_VE_C_Stack> 56]>; 57 58///// Standard vararg C Calling Convention (VE ABI v2.1) ///// 59// All arguments get passed in stack for varargs function or non-prototyped 60// function. 61def CC_VE2 : CallingConv<[ 62 // Promote i1/i8/i16/i32 arguments to i64. 63 CCIfType<[i1, i8, i16, i32], CCPromoteToType<i64>>, 64 65 // Convert float arguments to i64 with padding. 66 // 63 31 0 67 // +------+------+ 68 // | float| 0 | 69 // +------+------+ 70 CCIfType<[f32], CCBitConvertToType<i64>>, 71 72 // F128 are assigned to the stack in 16-byte aligned units 73 CCIfType<[f128], CCAssignToStack<16, 16>>, 74 75 CCAssignToStack<0, 8> 76]>; 77 78def RetCC_VE_C : CallingConv<[ 79 // Promote i1/i8/i16/i32 return values to i64. 80 CCIfType<[i1, i8, i16, i32], CCPromoteToType<i64>>, 81 82 // Convert float return values to i64 with padding. 83 // 63 31 0 84 // +------+------+ 85 // | float| 0 | 86 // +------+------+ 87 CCIfType<[f32], CCBitConvertToType<i64>>, 88 89 // bool, char, int, enum, long, long long, float, double 90 // --> generic 64 bit registers 91 CCIfType<[i64, f64], 92 CCAssignToReg<[SX0, SX1, SX2, SX3, SX4, SX5, SX6, SX7]>>, 93 94 // long double --> pair of generic 64 bit registers 95 CCIfType<[f128], 96 CCAssignToRegWithShadow<[Q0, Q1, Q2, Q3], 97 [SX0, SX1, SX3, SX5]>>, 98]>; 99 100///// Custom fastcc ///// 101// 102// This passes vector params and return values in registers. Scalar values are 103// handled conforming to the standard cc. 104def CC_VE_Fast : CallingConv<[ 105 // vector --> generic vector registers 106 CCIfType<[v256i32, v256f32, v256i64, v256f64], 107 CCAssignToReg<[V0, V1, V2, V3, V4, V5, V6, V7]>>, 108 // TODO: make this conditional on packed mode 109 CCIfType<[v512i32, v512f32], 110 CCAssignToReg<[V0, V1, V2, V3, V4, V5, V6, V7]>>, 111 112 // vector mask --> generic vector mask registers 113 CCIfType<[v256i1], 114 CCAssignToReg<[VM1, VM2, VM3, VM4, VM5, VM6, VM7]>>, 115 116 // pair of vector mask --> generic vector mask registers 117 CCIfType<[v512i1], 118 CCAssignToRegWithShadow<[VMP1, VMP2, VMP3], 119 [VM1, VM1, VM3]>>, 120 121 // Follow the standard C CC for scalars. 122 CCDelegateTo<CC_VE_C> 123]>; 124 125def RetCC_VE_Fast : CallingConv<[ 126 // vector --> generic vector registers 127 CCIfType<[v256i32, v256f32, v256i64, v256f64], 128 CCAssignToReg<[V0, V1, V2, V3, V4, V5, V6, V7]>>, 129 // TODO: make this conditional on packed mode 130 CCIfType<[v512i32, v512f32], 131 CCAssignToReg<[V0, V1, V2, V3, V4, V5, V6, V7]>>, 132 133 // vector mask --> generic vector mask registers 134 CCIfType<[v256i1], 135 CCAssignToReg<[VM1, VM2, VM3, VM4, VM5, VM6, VM7]>>, 136 137 // pair of vector mask --> generic vector mask registers 138 CCIfType<[v512i1], 139 CCAssignToRegWithShadow<[VMP1, VMP2, VMP3], 140 [VM1, VM1, VM3]>>, 141 142 // Follow the standard C CC for scalars. 143 CCDelegateTo<RetCC_VE_C> 144]>; 145 146// Callee-saved registers 147def CSR : CalleeSavedRegs<(add (sequence "SX%u", 18, 33))>; 148def CSR_NoRegs : CalleeSavedRegs<(add)>; 149 150// PreserveAll (clobbers s62,s63) - used for ve_grow_stack 151def CSR_preserve_all : CalleeSavedRegs<(add (sequence "SX%u", 0, 61), 152 (sequence "V%u", 0, 63), 153 (sequence "VM%u", 1, 15))>; 154