/external/eigen/Eigen/src/Core/ |
D | DenseStorage.h | 251 Index m_rows; 254 EIGEN_DEVICE_FUNC DenseStorage() : m_rows(0), m_cols(0) {} 256 : m_data(internal::constructor_without_unaligned_array_assert()), m_rows(0), m_cols(0) {} 257 …NC DenseStorage(const DenseStorage& other) : m_data(other.m_data), m_rows(other.m_rows), m_cols(ot… 263 m_rows = other.m_rows; 268 EIGEN_DEVICE_FUNC DenseStorage(Index, Index rows, Index cols) : m_rows(rows), m_cols(cols) {} 270 …{ std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); } 271 EIGEN_DEVICE_FUNC Index rows() const {return m_rows;} 273 …EIGEN_DEVICE_FUNC void conservativeResize(Index, Index rows, Index cols) { m_rows = rows; m_cols =… 274 EIGEN_DEVICE_FUNC void resize(Index, Index rows, Index cols) { m_rows = rows; m_cols = cols; } [all …]
|
D | MapBase.h | 89 EIGEN_DEVICE_FUNC inline Index rows() const { return m_rows.value(); } in rows() 149 …explicit inline MapBase(PointerType dataPtr) : m_data(dataPtr), m_rows(RowsAtCompileTime), m_cols(… in MapBase() 159 m_rows(RowsAtCompileTime == Dynamic ? vecSize : Index(RowsAtCompileTime)), in MapBase() 171 : m_data(dataPtr), m_rows(rows), m_cols(cols) in MapBase() 201 const internal::variable_if_dynamic<Index, RowsAtCompileTime> m_rows; variable
|
D | BandMatrix.h | 209 m_rows(rows), m_supers(supers), m_subs(subs) 214 inline Index rows() const { return m_rows.value(); } 231 internal::variable_if_dynamic<Index, Rows> m_rows; 271 m_rows(rows), m_supers(supers), m_subs(subs) 278 inline Index rows() const { return m_rows.value(); } 294 internal::variable_if_dynamic<Index, _Rows> m_rows;
|
D | CoreEvaluators.h | 1191 m_rows(replicate.nestedExpression().rows()), 1201 : row % m_rows.value(); 1215 : (RowFactor==1 ? index : index%m_rows.value()); 1226 : row % m_rows.value(); 1240 : (RowFactor==1 ? index : index%m_rows.value()); 1248 const variable_if_dynamic<Index, ArgType::RowsAtCompileTime> m_rows; 1448 m_rows(ReverseRow ? reverse.nestedExpression().rows() : 1), 1455 return m_argImpl.coeff(ReverseRow ? m_rows.value() - row - 1 : row, 1462 return m_argImpl.coeff(m_rows.value() * m_cols.value() - index - 1); 1468 return m_argImpl.coeffRef(ReverseRow ? m_rows.value() - row - 1 : row, [all …]
|
D | CwiseNullaryOp.h | 69 : m_rows(rows), m_cols(cols), m_functor(func) 78 EIGEN_STRONG_INLINE Index rows() const { return m_rows.value(); } 87 const internal::variable_if_dynamic<Index, RowsAtCompileTime> m_rows;
|
/external/python/pybind11/tests/ |
D | test_buffers.cpp | 18 Matrix(py::ssize_t rows, py::ssize_t cols) : m_rows(rows), m_cols(cols) { in TEST_SUBMODULE() 19 print_created(this, std::to_string(m_rows) + "x" + std::to_string(m_cols) + " matrix"); in TEST_SUBMODULE() 24 Matrix(const Matrix &s) : m_rows(s.m_rows), m_cols(s.m_cols) { in TEST_SUBMODULE() 25 … print_copy_created(this, std::to_string(m_rows) + "x" + std::to_string(m_cols) + " matrix"); in TEST_SUBMODULE() 26 m_data = new float[(size_t) (m_rows * m_cols)]; in TEST_SUBMODULE() 27 memcpy(m_data, s.m_data, sizeof(float) * (size_t) (m_rows * m_cols)); in TEST_SUBMODULE() 30 Matrix(Matrix &&s) : m_rows(s.m_rows), m_cols(s.m_cols), m_data(s.m_data) { in TEST_SUBMODULE() 32 s.m_rows = 0; in TEST_SUBMODULE() 38 … print_destroyed(this, std::to_string(m_rows) + "x" + std::to_string(m_cols) + " matrix"); in TEST_SUBMODULE() 43 … print_copy_assigned(this, std::to_string(m_rows) + "x" + std::to_string(m_cols) + " matrix"); in TEST_SUBMODULE() [all …]
|
/external/pdfium/third_party/agg23/ |
D | agg_rendering_buffer.h | 46 FX_Free(m_rows); in ~rendering_buffer() 50 m_rows(0), in rendering_buffer() 59 m_rows(0), in rendering_buffer() 74 FX_Free(m_rows); in attach() 75 m_rows = FX_Alloc(int8u*, m_max_height = height); in attach() 81 int8u** rows = m_rows; in attach() 115 return m_rows[y]; in row() 119 return m_rows[y]; in row() 131 return m_rows; in rows() 138 int8u** m_rows; variable
|
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/ |
D | sse_tensor_utils.cc | 97 const int8_t* __restrict__ matrix, const int m_rows, const int m_cols, in SseMatrixBatchVectorMultiplyAccumulateImpl() argument 106 for (std::intptr_t row = 0; row < m_rows; ++row) { in SseMatrixBatchVectorMultiplyAccumulateImpl() 209 const int8_t* __restrict__ matrix, const int m_rows, const int m_cols, in SseMatrixBatchVectorMultiplyAccumulate() argument 214 matrix, m_rows, m_cols, vectors, scaling_factors, n_batch, result, in SseMatrixBatchVectorMultiplyAccumulate() 220 const int8_t* __restrict__ matrix, const int m_rows, const int m_cols, in SseMatrixBatchVectorMultiplyAccumulate() argument 224 if (m_rows % 4 == 0) { in SseMatrixBatchVectorMultiplyAccumulate() 226 SseCpuBackendGemm(vectors, bias, matrix, n_batch, m_cols, m_rows, in SseMatrixBatchVectorMultiplyAccumulate() 232 const int total_size = n_batch * m_rows; in SseMatrixBatchVectorMultiplyAccumulate() 235 const float batch_scaling_factor0 = scaling_factors[i / m_rows]; in SseMatrixBatchVectorMultiplyAccumulate() 236 const float batch_scaling_factor1 = scaling_factors[(i + 4) / m_rows]; in SseMatrixBatchVectorMultiplyAccumulate() [all …]
|
D | sse_tensor_utils.h | 37 void MatrixBatchVectorMultiplyAccumulate(const float* matrix, int m_rows, in MatrixBatchVectorMultiplyAccumulate() argument 40 NEON_OR_PORTABLE(MatrixBatchVectorMultiplyAccumulate, matrix, m_rows, m_cols, in MatrixBatchVectorMultiplyAccumulate() 45 const int8_t* __restrict__ matrix, const int m_rows, const int m_cols, in MatrixBatchVectorMultiplyAccumulate() argument 49 SSE_OR_PORTABLE(MatrixBatchVectorMultiplyAccumulate, matrix, m_rows, m_cols, in MatrixBatchVectorMultiplyAccumulate() 54 const int8_t* __restrict__ matrix, const int m_rows, const int m_cols, in MatrixBatchVectorMultiplyAccumulate() argument 59 SSE_OR_PORTABLE(MatrixBatchVectorMultiplyAccumulate, matrix, m_rows, m_cols, in MatrixBatchVectorMultiplyAccumulate() 65 const int8_t* __restrict__ matrix, const int m_rows, const int m_cols, in MatrixBatchVectorMultiplyAccumulate() argument 70 SSE_OR_PORTABLE(MatrixBatchVectorMultiplyAccumulate, matrix, m_rows, m_cols, in MatrixBatchVectorMultiplyAccumulate() 76 const int32_t* __restrict__ indices, int m_rows, int m_cols, in SparseMatrixBatchVectorMultiplyAccumulate1x4() argument 79 segments, indices, m_rows, m_cols, vector, n_batch, result); in SparseMatrixBatchVectorMultiplyAccumulate1x4() [all …]
|
D | neon_tensor_utils.h | 27 void MatrixBatchVectorMultiplyAccumulate(const float* matrix, int m_rows, in MatrixBatchVectorMultiplyAccumulate() argument 30 NEON_OR_PORTABLE(MatrixBatchVectorMultiplyAccumulate, matrix, m_rows, m_cols, in MatrixBatchVectorMultiplyAccumulate() 35 const int m_rows, const int m_cols, in MatrixBatchVectorMultiplyAccumulate() argument 40 NEON_OR_PORTABLE(MatrixBatchVectorMultiplyAccumulate, matrix, m_rows, m_cols, in MatrixBatchVectorMultiplyAccumulate() 45 const int m_rows, const int m_cols, in MatrixBatchVectorMultiplyAccumulate() argument 51 NEON_OR_PORTABLE(MatrixBatchVectorMultiplyAccumulate, matrix, m_rows, m_cols, in MatrixBatchVectorMultiplyAccumulate() 56 const int8_t* __restrict__ matrix, const int m_rows, const int m_cols, in MatrixBatchVectorMultiplyAccumulate() argument 61 NEON_OR_PORTABLE(MatrixBatchVectorMultiplyAccumulate, matrix, m_rows, m_cols, in MatrixBatchVectorMultiplyAccumulate() 68 const int32_t* __restrict__ indices, int m_rows, int m_cols, in SparseMatrixBatchVectorMultiplyAccumulate1x4() argument 71 segments, indices, m_rows, m_cols, vector, n_batch, result); in SparseMatrixBatchVectorMultiplyAccumulate1x4() [all …]
|
D | neon_tensor_utils.cc | 200 void NeonMatrixBatchVectorMultiplyAccumulate(const float* matrix, int m_rows, in NeonMatrixBatchVectorMultiplyAccumulate() argument 210 float* result_in_batch = result + b * m_rows; in NeonMatrixBatchVectorMultiplyAccumulate() 215 for (int r = 0; r < m_rows; r++) { in NeonMatrixBatchVectorMultiplyAccumulate() 305 const int8_t* __restrict__ matrix, const int m_rows, const int m_cols, in DotprodMatrixBatchFourVectorMultiplyAccumulate() argument 313 for (int row = 0; row < m_rows; row += 2) { in DotprodMatrixBatchFourVectorMultiplyAccumulate() 315 float* result_ptr = result + (batch * m_rows) + row; in DotprodMatrixBatchFourVectorMultiplyAccumulate() 321 const uint64_t wide_rows = m_rows * sizeof(float); in DotprodMatrixBatchFourVectorMultiplyAccumulate() 433 const int8_t* __restrict__ matrix, const int m_rows, const int m_cols, in DotprodMatrixBatchFourVectorMultiplyAccumulate() argument 441 for (int row = 0; row < m_rows; row += 2) { in DotprodMatrixBatchFourVectorMultiplyAccumulate() 445 float* result_ptr = result + (batch * m_rows) + row; in DotprodMatrixBatchFourVectorMultiplyAccumulate() [all …]
|
D | neon_tensor_utils_impl.h | 32 void NeonMatrixBatchVectorMultiplyAccumulate(const float* matrix, int m_rows, 38 const int m_rows, const int m_cols, 47 const int m_rows, const int m_cols, 56 const int8_t* __restrict__ matrix, const int m_rows, const int m_cols, 108 const int32_t* __restrict__ indices, int m_rows, int m_cols, 115 int m_rows, int m_cols, const float* __restrict__ vector, int n_batch, 121 const int8_t* __restrict__ matrix, const uint8_t* ledger, const int m_rows,
|
D | sse_tensor_utils_impl.h | 33 const int8_t* __restrict__ matrix, const int m_rows, const int m_cols, 41 const int8_t* __restrict__ matrix, const int m_rows, const int m_cols, 48 const int8_t* __restrict__ matrix, const int m_rows, const int m_cols, 59 const int m_rows, const int m_cols, const int8_t* __restrict__ vectors,
|
/external/eigen/debug/msvc/ |
D | eigen_autoexp_part.dat | 127 rows: $c.m_storage.m_rows, 134 …_storage.m_data)[($i % $c.m_storage.m_rows)*$c.m_storage.m_cols + (($i- $i % $c.m_storage.m_rows)/… 135 size: ($r==1)*$c.m_storage.m_rows+($r==0)*$c.m_storage.m_cols 142 size: ($r==1)*$c.m_storage.m_rows+($r==0)*$c.m_storage.m_cols 152 $c.m_storage.m_rows, 158 size : $c.m_storage.m_rows*$c.m_storage.m_cols 215 rows: $c.m_storage.m_rows, 222 …storage.m_data)[($i % $c.m_storage.m_rows)*$c.ColsAtCompileTime + (($i- $i % $c.m_storage.m_rows)/… 223 size: ($r==1)*$c.m_storage.m_rows+($r==0)*$c.ColsAtCompileTime 230 size: ($r==1)*$c.m_storage.m_rows+($r==0)*$c.ColsAtCompileTime [all …]
|
/external/tensorflow/tensorflow/lite/kernels/internal/reference/ |
D | portable_tensor_utils.h | 58 void MatrixBatchVectorMultiplyAccumulate(const float* matrix, int m_rows, in MatrixBatchVectorMultiplyAccumulate() argument 61 PortableMatrixBatchVectorMultiplyAccumulate(matrix, m_rows, m_cols, vector, in MatrixBatchVectorMultiplyAccumulate() 66 const int m_rows, const int m_cols, in MatrixBatchVectorMultiplyAccumulate() argument 71 PortableMatrixBatchVectorMultiplyAccumulate(matrix, m_rows, m_cols, vector, in MatrixBatchVectorMultiplyAccumulate() 76 const int8_t* __restrict__ matrix, const int m_rows, const int m_cols, in MatrixBatchVectorMultiplyAccumulate() argument 82 matrix, m_rows, m_cols, vectors, scaling_factors, n_batch, result, in MatrixBatchVectorMultiplyAccumulate() 88 const int m_rows, const int m_cols, in MatrixBatchVectorMultiplyAccumulate() argument 94 PortableMatrixBatchVectorMultiplyAccumulate(matrix, m_rows, m_cols, vector, in MatrixBatchVectorMultiplyAccumulate() 100 const int32_t* __restrict__ indices, int m_rows, int m_cols, in SparseMatrixBatchVectorMultiplyAccumulate1x4() argument 103 matrix, segments, indices, m_rows, m_cols, vector, n_batch, result); in SparseMatrixBatchVectorMultiplyAccumulate1x4() [all …]
|
D | portable_tensor_utils_impl.h | 58 int m_rows, int m_cols, 63 const int8_t* __restrict__ matrix, const int m_rows, const int m_cols, 68 const int8_t* __restrict__ matrix, const int m_rows, const int m_cols, 75 const int8_t* __restrict__ matrix, const int m_rows, const int m_cols, 82 const int32_t* __restrict__ indices, int m_rows, int m_cols, 87 int m_rows, int m_cols, const float* __restrict__ vector, int n_batch, 91 const int8_t* __restrict__ matrix, const uint8_t* ledger, const int m_rows,
|
D | portable_tensor_utils.cc | 120 int m_rows, int m_cols, in PortableMatrixBatchVectorMultiplyAccumulate() argument 126 for (int r = 0; r < m_rows; r++) { in PortableMatrixBatchVectorMultiplyAccumulate() 139 const int8_t* __restrict__ matrix, const int m_rows, const int m_cols, in PortableMatrixBatchVectorMultiplyAccumulate() argument 146 for (int row = 0; row < m_rows; ++row) { in PortableMatrixBatchVectorMultiplyAccumulate() 164 const int8_t* __restrict__ matrix, const int m_rows, const int m_cols, in PortableMatrixBatchVectorMultiplyAccumulate() argument 171 matrix, m_rows, m_cols, vectors, scaling_factors, n_batch, result); in PortableMatrixBatchVectorMultiplyAccumulate() 175 PortableReductionSumVector(matrix, row_sums, m_rows, m_cols); in PortableMatrixBatchVectorMultiplyAccumulate() 185 for (int row = 0; row < m_rows; ++row) { in PortableMatrixBatchVectorMultiplyAccumulate() 208 const int32_t* __restrict__ indices, int m_rows, int m_cols, in PortableSparseMatrixBatchVectorMultiplyAccumulate1x4() argument 214 for (int row = 0; row < m_rows; row++) { in PortableSparseMatrixBatchVectorMultiplyAccumulate1x4() [all …]
|
/external/eigen/Eigen/src/SVD/ |
D | SVDBase.h | 192 inline Index rows() const { return m_rows; } in rows() 236 Index m_nonzeroSingularValues, m_rows, m_cols, m_diagSize; variable 248 m_rows(-1), m_cols(-1), m_diagSize(0) in SVDBase() 280 rows == m_rows && in allocate() 287 m_rows = rows; in allocate() 301 m_diagSize = (std::min)(m_rows, m_cols); in allocate() 304 m_matrixU.resize(m_rows, m_computeFullU ? m_rows : m_computeThinU ? m_diagSize : 0); in allocate()
|
D | JacobiSVD.h | 596 using Base::m_rows; 618 rows == m_rows && 625 m_rows = rows; 644 m_diagSize = (std::min)(m_rows, m_cols); 647 m_matrixU.resize(m_rows, m_computeFullU ? m_rows 656 if(m_cols>m_rows) m_qr_precond_morecols.allocate(*this); 657 if(m_rows>m_cols) m_qr_precond_morerows.allocate(*this); 658 if(m_rows!=m_cols) m_scaledMatrix.resize(rows,cols); 681 if(m_rows!=m_cols) 690 if(m_computeFullU) m_matrixU.setIdentity(m_rows,m_rows); [all …]
|
D | JacobiSVD_LAPACKE.h | 71 …X##gesvd( matrix_order, jobu, jobvt, internal::convert_index<lapack_int>(m_rows), internal::conver…
|
/external/eigen/doc/examples/ |
D | make_circulant.cpp.evaluator | 18 : m_argImpl(xpr.m_arg), m_rows(xpr.rows()) 24 if (index < 0) index += m_rows; 29 const Index m_rows;
|
/external/tensorflow/tensorflow/lite/kernels/internal/ |
D | tensor_utils.h | 43 const int8_t* __restrict__ matrix, const int m_rows, const int m_cols, 51 const int8_t* __restrict__ matrix, const int m_rows, const int m_cols, 61 const int8_t* __restrict__ matrix, const int m_rows, const int m_cols, in MatrixBatchVectorMultiplyAccumulate() argument 72 MatrixBatchVectorMultiplyAccumulate(matrix, m_rows, m_cols, vectors, in MatrixBatchVectorMultiplyAccumulate()
|
D | tensor_utils_common.h | 88 void MatrixBatchVectorMultiplyAccumulate(const float* matrix, int m_rows, 98 const int32_t* __restrict__ indices, int m_rows, int m_cols, 113 int m_rows, int m_cols, const float* __restrict__ vector, int n_batch, 124 const int8_t* __restrict__ matrix, const int m_rows, const int m_cols, 133 const int8_t* __restrict__ matrix, const int m_rows, const int m_cols, 151 const int m_rows, const int m_cols, const int8_t* __restrict__ vectors,
|
/external/eigen/Eigen/src/SPQRSupport/ |
D | SuiteSparseQRSupport.h | 124 m_rows = matrix.rows(); 142 inline Index rows() const {return m_rows; } 248 Index m_rows;
|
/external/OpenCL-CTS/test_conformance/spir/ |
D | run_services.cpp | 318 return m_rows.size(); in getNumRows() 323 m_rows.push_back(dr); in addTableRow() 328 assert((index > -1 && (size_t)index < m_rows.size()) && "Index out of bound"); in operator []() 329 return *m_rows[index]; in operator []() 334 assert((index > -1 && (size_t)index < m_rows.size()) && "Index out of bound"); in operator []() 335 return *m_rows[index]; in operator []()
|