/external/eigen/lapack/ |
D | svd.cpp | 14 …*jobz, int *m, int* n, Scalar* a, int *lda, RealScalar *s, Scalar *u, int *ldu, Scalar *vt, int *l… 27 else if(*ldu <1 || (*jobz=='A' && *ldu <*m) 28 || (*jobz=='O' && *m<*n && *ldu<*m)) *info = -8; 62 matrix(u,*m,*m,*ldu) = svd.matrixU(); 67 matrix(u,*m,diag_size,*ldu) = svd.matrixU(); 77 matrix(u,*m,*m,*ldu) = svd.matrixU(); 85 …*jobv, int *m, int* n, Scalar* a, int *lda, RealScalar *s, Scalar *u, int *ldu, Scalar *vt, int *l… 99 else if(*ldu <1 || ((*jobu=='A' || *jobu=='S') && *ldu<*m)) *info = -9; 128 if(*jobu=='A') matrix(u,*m,*m,*ldu) = svd.matrixU(); 129 else if(*jobu=='S') matrix(u,*m,diag_size,*ldu) = svd.matrixU();
|
/external/llvm/test/CodeGen/NVPTX/ |
D | ldu-ldg.ll | 4 declare i8 @llvm.nvvm.ldu.global.i.i8.p1i8(i8 addrspace(1)* %ptr, i32 %align) 5 declare i32 @llvm.nvvm.ldu.global.i.i32.p1i32(i32 addrspace(1)* %ptr, i32 %align) 12 ; ldu.global.u8 13 %val = tail call i8 @llvm.nvvm.ldu.global.i.i8.p1i8(i8 addrspace(1)* %ptr, i32 4) 19 ; ldu.global.u32 20 %val = tail call i32 @llvm.nvvm.ldu.global.i.i32.p1i32(i32 addrspace(1)* %ptr, i32 4)
|
D | ldu-reg-plus-offset.ll | 7 ; CHECK: ldu.global.u32 %r{{[0-9]+}}, [%r{{[0-9]+}}+32]; 8 ; CHECK: ldu.global.u32 %r{{[0-9]+}}, [%r{{[0-9]+}}+36]; 10 %t1 = call i32 @llvm.nvvm.ldu.global.i.i32.p0i32(i32* %p2, i32 4) 12 %t2 = call i32 @llvm.nvvm.ldu.global.i.i32.p0i32(i32* %p3, i32 4) 18 declare i32 @llvm.nvvm.ldu.global.i.i32.p0i32(i32*, i32)
|
D | ldu-i8.ll | 5 declare i8 @llvm.nvvm.ldu.global.i.i8.p0i8(i8*, i32) 9 ; CHECK: ldu.global.u8 12 %val = tail call i8 @llvm.nvvm.ldu.global.i.i8.p0i8(i8* %a, i32 4)
|
D | bug26185-2.ll | 3 ; Verify that we correctly emit code for extending ldg/ldu. We do not expose 4 ; extending variants in the backend, but the ldg/ldu selection code may pick
|
/external/llvm-project/llvm/test/CodeGen/NVPTX/ |
D | ldu-ldg.ll | 4 declare i8 @llvm.nvvm.ldu.global.i.i8.p1i8(i8 addrspace(1)* %ptr, i32 %align) 5 declare i32 @llvm.nvvm.ldu.global.i.i32.p1i32(i32 addrspace(1)* %ptr, i32 %align) 12 ; ldu.global.u8 13 %val = tail call i8 @llvm.nvvm.ldu.global.i.i8.p1i8(i8 addrspace(1)* %ptr, i32 4) 19 ; ldu.global.u32 20 %val = tail call i32 @llvm.nvvm.ldu.global.i.i32.p1i32(i32 addrspace(1)* %ptr, i32 4)
|
D | ldu-reg-plus-offset.ll | 7 ; CHECK: ldu.global.u32 %r{{[0-9]+}}, [%r{{[0-9]+}}+32]; 8 ; CHECK: ldu.global.u32 %r{{[0-9]+}}, [%r{{[0-9]+}}+36]; 10 %t1 = call i32 @llvm.nvvm.ldu.global.i.i32.p0i32(i32* %p2, i32 4) 12 %t2 = call i32 @llvm.nvvm.ldu.global.i.i32.p0i32(i32* %p3, i32 4) 18 declare i32 @llvm.nvvm.ldu.global.i.i32.p0i32(i32*, i32)
|
D | ldu-i8.ll | 5 declare i8 @llvm.nvvm.ldu.global.i.i8.p0i8(i8*, i32) 9 ; CHECK: ldu.global.u8 12 %val = tail call i8 @llvm.nvvm.ldu.global.i.i8.p0i8(i8* %a, i32 4)
|
D | bug26185-2.ll | 3 ; Verify that we correctly emit code for extending ldg/ldu. We do not expose 4 ; extending variants in the backend, but the ldg/ldu selection code may pick
|
/external/eigen/Eigen/src/SVD/ |
D | JacobiSVD_LAPACKE.h | 53 lapack_int lda = internal::convert_index<lapack_int>(matrix.outerStride()), ldu, ldvt; \ 60 ldu = internal::convert_index<lapack_int>(m_matrixU.outerStride()); \ 62 } else { ldu=1; u=&dummy; }\ 71 …ACKE_TYPE*)m_temp.data(), lda, (LAPACKE_RTYPE*)m_singularValues.data(), u, ldu, vt, ldvt, superb.d…
|
/external/eigen/Eigen/src/SparseLU/ |
D | SparseLU_panel_bmod.h | 104 Index ldu = internal::first_multiple<Index>(u_rows, PacketSize); in panel_bmod() local 105 … Map<ScalarMatrix, Aligned, OuterStride<> > U(tempv.data(), u_rows, u_cols, OuterStride<>(ldu)); in panel_bmod() 145 eigen_assert(tempv.size()>w*ldu + nrow*w + 1); in panel_bmod() 149 MappedMatrixBlock L(tempv.data()+w*ldu+offset, nrow, u_cols, OuterStride<>(ldl)); in panel_bmod()
|
/external/llvm-project/llvm/test/MC/VE/ |
D | LD.s | 22 # CHECK-INST: ldu %s11, 20(%s10, %s11) 24 ldu %s11, 20(%s10, %s11) label
|
/external/llvm-project/llvm/test/Instrumentation/MemorySanitizer/ |
D | msan_x86intrinsics.ll | 33 %call = call <16 x i8> @llvm.x86.sse3.ldu.dq(i8* %p) 37 declare <16 x i8> @llvm.x86.sse3.ldu.dq(i8* %p) nounwind 44 ; CHECK: call <16 x i8> @llvm.x86.sse3.ldu.dq
|
/external/llvm-project/llvm/test/CodeGen/VE/Scalar/ |
D | callee.ll | 22 ; CHECK-NEXT: ldu %s0, 252(, %s11) 38 ; CHECK-NEXT: ldu %s0, 252(, %s11)
|
D | va_callee.ll | 11 ; CHECK: ldu %s23, 236(, %s9) 52 ; CHECK: ldu %s23, 99 ; CHECK: ldu %s23,
|
/external/boringssl/linux-ppc64le/crypto/test/ |
D | trampoline-ppc.S | 187 ldu 3, 8(11) 189 ldu 4, 8(11) 191 ldu 5, 8(11) 193 ldu 6, 8(11) 195 ldu 7, 8(11) 197 ldu 8, 8(11) 199 ldu 9, 8(11) 201 ldu 10, 8(11)
|
/external/openscreen/third_party/boringssl/linux-ppc64le/crypto/test/ |
D | trampoline-ppc.S | 187 ldu 3, 8(11) 189 ldu 4, 8(11) 191 ldu 5, 8(11) 193 ldu 6, 8(11) 195 ldu 7, 8(11) 197 ldu 8, 8(11) 199 ldu 9, 8(11) 201 ldu 10, 8(11)
|
/external/rust/crates/quiche/deps/boringssl/linux-ppc64le/crypto/test/ |
D | trampoline-ppc.S | 187 ldu 3, 8(11) 189 ldu 4, 8(11) 191 ldu 5, 8(11) 193 ldu 6, 8(11) 195 ldu 7, 8(11) 197 ldu 8, 8(11) 199 ldu 9, 8(11) 201 ldu 10, 8(11)
|
/external/tensorflow/tensorflow/stream_executor/cuda/ |
D | cusolver_dense_9_0.inc | 1262 int n, float *A, int lda, float *S, float *U, int ldu, float *VT, int ldvt, 1269 return func_ptr(handle, jobu, jobvt, m, n, A, lda, S, U, ldu, VT, ldvt, work, 1275 int n, double *A, int lda, double *S, double *U, int ldu, double *VT, 1282 return func_ptr(handle, jobu, jobvt, m, n, A, lda, S, U, ldu, VT, ldvt, work, 1289 int ldu, cuComplex *VT, int ldvt, cuComplex *work, int lwork, 1297 return func_ptr(handle, jobu, jobvt, m, n, A, lda, S, U, ldu, VT, ldvt, work, 1304 cuDoubleComplex *U, int ldu, cuDoubleComplex *VT, int ldvt, 1312 return func_ptr(handle, jobu, jobvt, m, n, A, lda, S, U, ldu, VT, ldvt, work, 1956 const float *A, int lda, const float *S, const float *U, int ldu, 1965 return func_ptr(handle, jobz, m, n, A, lda, S, U, ldu, V, ldv, lwork, params, [all …]
|
D | cusolver_dense_10_0.inc | 1360 int n, float *A, int lda, float *S, float *U, int ldu, float *VT, int ldvt, 1367 return func_ptr(handle, jobu, jobvt, m, n, A, lda, S, U, ldu, VT, ldvt, work, 1373 int n, double *A, int lda, double *S, double *U, int ldu, double *VT, 1380 return func_ptr(handle, jobu, jobvt, m, n, A, lda, S, U, ldu, VT, ldvt, work, 1387 int ldu, cuComplex *VT, int ldvt, cuComplex *work, int lwork, 1395 return func_ptr(handle, jobu, jobvt, m, n, A, lda, S, U, ldu, VT, ldvt, work, 1402 cuDoubleComplex *U, int ldu, cuDoubleComplex *VT, int ldvt, 1410 return func_ptr(handle, jobu, jobvt, m, n, A, lda, S, U, ldu, VT, ldvt, work, 2054 const float *A, int lda, const float *S, const float *U, int ldu, 2063 return func_ptr(handle, jobz, m, n, A, lda, S, U, ldu, V, ldv, lwork, params, [all …]
|
D | cusolver_dense_10_1.inc | 1838 int n, float *A, int lda, float *S, float *U, int ldu, float *VT, int ldvt, 1845 return func_ptr(handle, jobu, jobvt, m, n, A, lda, S, U, ldu, VT, ldvt, work, 1851 int n, double *A, int lda, double *S, double *U, int ldu, double *VT, 1858 return func_ptr(handle, jobu, jobvt, m, n, A, lda, S, U, ldu, VT, ldvt, work, 1865 int ldu, cuComplex *VT, int ldvt, cuComplex *work, int lwork, 1873 return func_ptr(handle, jobu, jobvt, m, n, A, lda, S, U, ldu, VT, ldvt, work, 1880 cuDoubleComplex *U, int ldu, cuDoubleComplex *VT, int ldvt, 1888 return func_ptr(handle, jobu, jobvt, m, n, A, lda, S, U, ldu, VT, ldvt, work, 2771 const float *A, int lda, const float *S, const float *U, int ldu, 2780 return func_ptr(handle, jobz, m, n, A, lda, S, U, ldu, V, ldv, lwork, params, [all …]
|
D | cusolver_dense_10_2.inc | 2366 int n, float *A, int lda, float *S, float *U, int ldu, float *VT, int ldvt, 2373 return func_ptr(handle, jobu, jobvt, m, n, A, lda, S, U, ldu, VT, ldvt, work, 2379 int n, double *A, int lda, double *S, double *U, int ldu, double *VT, 2386 return func_ptr(handle, jobu, jobvt, m, n, A, lda, S, U, ldu, VT, ldvt, work, 2393 int ldu, cuComplex *VT, int ldvt, cuComplex *work, int lwork, 2401 return func_ptr(handle, jobu, jobvt, m, n, A, lda, S, U, ldu, VT, ldvt, work, 2408 cuDoubleComplex *U, int ldu, cuDoubleComplex *VT, int ldvt, 2416 return func_ptr(handle, jobu, jobvt, m, n, A, lda, S, U, ldu, VT, ldvt, work, 3299 const float *A, int lda, const float *S, const float *U, int ldu, 3308 return func_ptr(handle, jobz, m, n, A, lda, S, U, ldu, V, ldv, lwork, params, [all …]
|
/external/llvm/test/CodeGen/X86/ |
D | sse3-intrinsics-x86.ll | 54 %res = call <16 x i8> @llvm.x86.sse3.ldu.dq(i8* %a0) ; <<16 x i8>> [#uses=1] 57 declare <16 x i8> @llvm.x86.sse3.ldu.dq(i8*) nounwind readonly
|
/external/tensorflow/tensorflow/core/util/ |
D | cuda_solvers.cc | 674 Scalar* S, Scalar* U, int ldu, Scalar* VT, int ldvt, int* dev_lapack_info) { in GesvdImpl() argument 684 ldu, CUDAComplex(VT), ldvt, in GesvdImpl() 694 int lda, Scalar* dev_S, Scalar* dev_U, int ldu, Scalar* dev_VT, \ 699 dev_S, dev_U, ldu, dev_VT, ldvt, dev_lapack_info); \ 711 int ldu, Scalar* V, int ldv, in GesvdjBatchedImpl() argument 721 ldu, CUDAComplex(V), ldv, &lwork, svdj_info, batch_size)); in GesvdjBatchedImpl() 727 ldu, CUDAComplex(V), ldv, CUDAComplex(dev_workspace.mutable_data()), in GesvdjBatchedImpl() 737 Scalar* dev_S, Scalar* dev_U, int ldu, Scalar* dev_V, int ldv, \ 742 lda, dev_S, dev_U, ldu, dev_V, ldv, \
|
/external/tensorflow/tensorflow/python/autograph/operators/ |
D | variables.py | 29 def ldu(load_v, name): function
|