Home
last modified time | relevance | path

Searched refs:vsl (Results 1 – 25 of 71) sorted by relevance

123

/external/tensorflow/tensorflow/compiler/xla/service/cpu/
Dllvm_ir_runtime.cc164 VectorSupportLibrary vsl(F32, vector_width, b, "exp_f32"); in GenerateVF32Exp() local
206 input = vsl.Clamp(input, GetIeeeF32(-87.8), GetIeeeF32(88.8)); in GenerateVF32Exp()
211 llvm::Value* n = vsl.Floor(vsl.MulAdd(input, cephes_LOG2EF, half)); in GenerateVF32Exp()
252 n = vsl.Clamp(n, GetIeeeF32(-127), GetIeeeF32(127)); in GenerateVF32Exp()
255 x = vsl.Sub(x, vsl.Mul(cephes_exp_C1, n)); in GenerateVF32Exp()
256 x = vsl.Sub(x, vsl.Mul(cephes_exp_C2, n)); in GenerateVF32Exp()
259 llvm::Value* z = vsl.MulAdd(x, cephes_exp_p0, cephes_exp_p1); in GenerateVF32Exp()
260 z = vsl.MulAdd(z, x, cephes_exp_p2); in GenerateVF32Exp()
261 z = vsl.MulAdd(z, x, cephes_exp_p3); in GenerateVF32Exp()
262 z = vsl.MulAdd(z, x, cephes_exp_p4); in GenerateVF32Exp()
[all …]
Dtiled_dot_emitter.cc36 MemoryTile(VectorSupportLibrary* vsl, llvm::IRBuilder<>* b, in MemoryTile() argument
39 : vsl_(vsl), b_(b) { in MemoryTile()
723 void HandleResiduesOnK(VectorSupportLibrary* vsl, llvm::Value* n_start,
725 void HandleResiduesOnM(VectorSupportLibrary* vsl, int64 tile_size_k,
731 void EmitTiledGemm(VectorSupportLibrary* vsl, int64 tile_size_k,
780 VectorSupportLibrary vsl(scalar_type(), current_vectorization_width, b_, in HandleResiduesOnN() local
782 HandleResiduesOnK(&vsl, GetInt64(n_start), GetInt64(n_end)); in HandleResiduesOnN()
795 VectorSupportLibrary vsl(scalar_type(), 1, b_, "gemm"); in HandleResiduesOnN() local
798 HandleResiduesOnK(&vsl, n_i, n_i_next); in HandleResiduesOnN()
803 void TiledSmallGemmEmitter::HandleResiduesOnK(VectorSupportLibrary* vsl, in HandleResiduesOnK() argument
[all …]
/external/llvm-project/clang/test/CodeGen/SystemZ/
Dbuiltins-systemz-zvector-error.c11 volatile vector signed long long vsl; variable
70vsl = vec_permi(vsl, vsl, idx); // expected-error {{no matching function}} expected-error {{argume… in test_core()
73 vsl = vec_permi(vsl, vsl, -1); // expected-error {{no matching function}} in test_core()
76 vsl = vec_permi(vsl, vsl, 4); // expected-error {{no matching function}} in test_core()
134 vsl = vec_gather_element(vsl, vul, cptrsl, idx); // expected-error {{no matching function}} in test_core()
137 vsl = vec_gather_element(vsl, vul, cptrsl, -1); // expected-error {{no matching function}} in test_core()
140 vsl = vec_gather_element(vsl, vul, cptrsl, 2); // expected-error {{no matching function}} in test_core()
198 vec_scatter_element(vsl, vul, ptrsl, idx); // expected-error {{no matching function}} in test_core()
201 vec_scatter_element(vsl, vul, ptrsl, -1); // expected-error {{no matching function}} in test_core()
204 vec_scatter_element(vsl, vul, ptrsl, 2); // expected-error {{no matching function}} in test_core()
[all …]
Dbuiltins-systemz-zvector.c14 volatile vector signed long long vsl; variable
115 sl = vec_extract(vsl, idx); in test_core()
155 vsl = vec_insert(sl, vsl, idx); in test_core()
186 vsl = vec_promote(sl, idx); in test_core()
214 vsl = vec_insert_and_zero(cptrsl); in test_core()
251 vsl = vec_perm(vsl, vsl, vuc); in test_core()
264 vsl = vec_permi(vsl, vsl, 0); in test_core()
267 vsl = vec_permi(vsl, vsl, 1); in test_core()
270 vsl = vec_permi(vsl, vsl, 2); in test_core()
273 vsl = vec_permi(vsl, vsl, 3); in test_core()
[all …]
Dbuiltins-systemz-zvector3.c14 volatile vector signed long long vsl; variable
84 vsl += vec_revb(vec_xl(idx, cptrsl)); in test_core()
101 vec_xst(vec_revb(vsl), idx, ptrsl); in test_core()
118 vsl += vec_revb(vec_insert_and_zero(cptrsl)); in test_core()
135 vsl += vec_revb(vec_splats(sl)); in test_core()
163 vsl2 = vsl; in test_core()
164 vsl += vec_revb(vec_insert(sl, vec_revb(vsl2), 0)); in test_core()
194 sl = vec_extract(vec_revb(vsl), 0); in test_core()
222 vsl += vec_reve(vec_xl(idx, cptrsl)); in test_core()
243 vec_xst(vec_reve(vsl), idx, ptrsl); in test_core()
[all …]
Dbuiltins-systemz-vector.c18 volatile vec_slong vsl; variable
70 vsi = __builtin_s390_vpksg(vsl, vsl); in test_core()
77 vsi = __builtin_s390_vpksgs(vsl, vsl, &cc); in test_core()
87 vsl = __builtin_s390_vuphf(vsi); in test_core()
94 vsl = __builtin_s390_vuplf(vsi); in test_core()
137 vsl = __builtin_s390_vavgg(vsl, vsl); in test_integer()
240 vsl = __builtin_s390_vmaef(vsi, vsi, vsl); in test_integer()
253 vsl = __builtin_s390_vmaof(vsi, vsi, vsl); in test_integer()
279 vsl = __builtin_s390_vmef(vsi, vsi); in test_integer()
292 vsl = __builtin_s390_vmof(vsi, vsi); in test_integer()
[all …]
Dbuiltins-systemz-vector-constrained.c8 volatile vec_slong vsl; variable
14 vsl = __builtin_s390_vfcedbs(vd, vd, &cc); in test_float()
16 vsl = __builtin_s390_vfchdbs(vd, vd, &cc); in test_float()
18 vsl = __builtin_s390_vfchedbs(vd, vd, &cc); in test_float()
21 vsl = __builtin_s390_vftcidb(vd, 0, &cc); in test_float()
23 vsl = __builtin_s390_vftcidb(vd, 4095, &cc); in test_float()
Dbuiltins-systemz-zvector3-error.c11 volatile vector signed long long vsl; variable
87 vsl = vec_sldb(vsl, vsl, idx); // expected-error {{no matching function}} \ in test_integer()
128 vsl = vec_srdb(vsl, vsl, idx); // expected-error {{no matching function}} \ in test_integer()
Dbuiltins-systemz-zvector-constrained.c13 volatile vector signed long long vsl; variable
232 vd = vec_ctd(vsl, 0); in test_float()
238 vd = vec_ctd(vsl, 1); in test_float()
246 vd = vec_ctd(vsl, 31); in test_float()
255 vsl = vec_ctsl(vd, 0); in test_float()
261 vsl = vec_ctsl(vd, 1); in test_float()
269 vsl = vec_ctsl(vd, 31); in test_float()
278 vd = vec_double(vsl); in test_float()
285 vsl = vec_signed(vd); in test_float()
Dbuiltins-systemz-zvector2.c14 volatile vector signed long long vsl; variable
510 vsl = vec_nand(vsl, vsl); in test_integer()
539 vsl = vec_orc(vsl, vsl); in test_integer()
568 vsl = vec_eqv(vsl, vsl); in test_integer()
597 vul = vec_popcnt(vsl); in test_integer()
610 vd = vec_slb(vd, vsl); in test_integer()
636 vd = vec_srab(vd, vsl); in test_integer()
649 vd = vec_srb(vd, vsl); in test_integer()
760 vd = vec_double(vsl); in test_float()
767 vsl = vec_signed(vd); in test_float()
Dbuiltins-systemz-zvector3-constrained.c14 volatile vector signed long long vsl; variable
82 vd = vec_double(vsl); in test_float()
95 vsl = vec_signed(vd); in test_float()
Dbuiltins-systemz-zvector2-constrained.c13 volatile vector signed long long vsl; variable
457 vd = vec_double(vsl); in test_float()
464 vsl = vec_signed(vd); in test_float()
Dbuiltins-systemz-vector3-error.c19 volatile vec_slong vsl; variable
Dbuiltins-systemz-vector3.c19 volatile vec_slong vsl; variable
Dbuiltins-systemz-vector2-error.c19 volatile vec_slong vsl; variable
Dbuiltins-systemz-zvector2-error.c11 volatile vector signed long long vsl; variable
Dbuiltins-systemz-vector2.c19 volatile vec_slong vsl; variable
/external/clang/test/CodeGen/
Dbuiltins-systemz-zvector-error.c11 volatile vector signed long long vsl; variable
70 vsl = vec_permi(vsl, vsl, idx); // expected-error {{no matching function}} in test_core()
73 vsl = vec_permi(vsl, vsl, -1); // expected-error {{no matching function}} in test_core()
76 vsl = vec_permi(vsl, vsl, 4); // expected-error {{no matching function}} in test_core()
134 vsl = vec_gather_element(vsl, vul, cptrsl, idx); // expected-error {{no matching function}} in test_core()
137 vsl = vec_gather_element(vsl, vul, cptrsl, -1); // expected-error {{no matching function}} in test_core()
140 vsl = vec_gather_element(vsl, vul, cptrsl, 2); // expected-error {{no matching function}} in test_core()
198 vec_scatter_element(vsl, vul, ptrsl, idx); // expected-error {{no matching function}} in test_core()
201 vec_scatter_element(vsl, vul, ptrsl, -1); // expected-error {{no matching function}} in test_core()
204 vec_scatter_element(vsl, vul, ptrsl, 2); // expected-error {{no matching function}} in test_core()
[all …]
Dbuiltins-systemz-zvector.c11 volatile vector signed long long vsl; variable
94 sl = vec_extract(vsl, idx); in test_core()
121 vsl = vec_insert(sl, vsl, idx); in test_core()
142 vsl = vec_promote(sl, idx); in test_core()
161 vsl = vec_insert_and_zero(cptrsl); in test_core()
186 vsl = vec_perm(vsl, vsl, vuc); in test_core()
195 vsl = vec_permi(vsl, vsl, 0); in test_core()
197 vsl = vec_permi(vsl, vsl, 1); in test_core()
199 vsl = vec_permi(vsl, vsl, 2); in test_core()
201 vsl = vec_permi(vsl, vsl, 3); in test_core()
[all …]
Dbuiltins-systemz-vector.c18 volatile vec_slong vsl; variable
70 vsi = __builtin_s390_vpksg(vsl, vsl); in test_core()
77 vsi = __builtin_s390_vpksgs(vsl, vsl, &cc); in test_core()
87 vsl = __builtin_s390_vuphf(vsi); in test_core()
94 vsl = __builtin_s390_vuplf(vsi); in test_core()
137 vsl = __builtin_s390_vavgg(vsl, vsl); in test_integer()
240 vsl = __builtin_s390_vmaef(vsi, vsi, vsl); in test_integer()
253 vsl = __builtin_s390_vmaof(vsi, vsi, vsl); in test_integer()
279 vsl = __builtin_s390_vmef(vsi, vsi); in test_integer()
292 vsl = __builtin_s390_vmof(vsi, vsi); in test_integer()
[all …]
/external/llvm-project/llvm/test/CodeGen/VE/VELIntrinsics/
Dvbrd.ll19 %3 = tail call fast <256 x double> @llvm.ve.vl.vbrdd.vsl(double %0, i32 256)
25 declare <256 x double> @llvm.ve.vl.vbrdd.vsl(double, i32)
95 %3 = tail call fast <256 x double> @llvm.ve.vl.vbrdl.vsl(i64 %0, i32 256)
101 declare <256 x double> @llvm.ve.vl.vbrdl.vsl(i64, i32)
154 %3 = tail call fast <256 x double> @llvm.ve.vl.vbrdl.vsl(i64 31, i32 256)
204 %3 = tail call fast <256 x double> @llvm.ve.vl.vbrds.vsl(float %0, i32 256)
210 declare <256 x double> @llvm.ve.vl.vbrds.vsl(float, i32)
274 %3 = tail call fast <256 x double> @llvm.ve.vl.vbrdw.vsl(i32 %0, i32 256)
280 declare <256 x double> @llvm.ve.vl.vbrdw.vsl(i32, i32)
336 %3 = tail call fast <256 x double> @llvm.ve.vl.vbrdw.vsl(i32 31, i32 256)
[all …]
/external/llvm-project/llvm/test/CodeGen/PowerPC/
Dshift128.ll52 ; P8-NOT: {{\b}}vsl
55 ; P9: vsl
/external/llvm/test/CodeGen/PowerPC/
Dp8-scalar_vector_conversions.ll1265 define i64 @getsl0(<2 x i64> %vsl) {
1267 %vsl.addr = alloca <2 x i64>, align 16
1268 store <2 x i64> %vsl, <2 x i64>* %vsl.addr, align 16
1269 %0 = load <2 x i64>, <2 x i64>* %vsl.addr, align 16
1280 define i64 @getsl1(<2 x i64> %vsl) {
1282 %vsl.addr = alloca <2 x i64>, align 16
1283 store <2 x i64> %vsl, <2 x i64>* %vsl.addr, align 16
1284 %0 = load <2 x i64>, <2 x i64>* %vsl.addr, align 16
1325 define i64 @getvelsl(<2 x i64> %vsl, i32 signext %i) {
1327 %vsl.addr = alloca <2 x i64>, align 16
[all …]
/external/capstone/suite/MC/PowerPC/
Dppc64-encoding-vmx.s.cs43 0x10,0x43,0x21,0xc4 = vsl 2, 3, 4
/external/boringssl/linux-ppc64le/crypto/modes/
Dghashp8-ppc.S27 vsl 9,9,5

123