Home
last modified time | relevance | path

Searched refs:m_rows (Results 1 – 19 of 19) sorted by relevance

/external/eigen/Eigen/src/Core/
DDenseStorage.h251 Index m_rows;
254 EIGEN_DEVICE_FUNC DenseStorage() : m_rows(0), m_cols(0) {}
256 : m_data(internal::constructor_without_unaligned_array_assert()), m_rows(0), m_cols(0) {}
257 …NC DenseStorage(const DenseStorage& other) : m_data(other.m_data), m_rows(other.m_rows), m_cols(ot…
263 m_rows = other.m_rows;
268 EIGEN_DEVICE_FUNC DenseStorage(Index, Index rows, Index cols) : m_rows(rows), m_cols(cols) {}
270 …{ std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); }
271 EIGEN_DEVICE_FUNC Index rows() const {return m_rows;}
273 …EIGEN_DEVICE_FUNC void conservativeResize(Index, Index rows, Index cols) { m_rows = rows; m_cols =…
274 EIGEN_DEVICE_FUNC void resize(Index, Index rows, Index cols) { m_rows = rows; m_cols = cols; }
[all …]
DMapBase.h89 EIGEN_DEVICE_FUNC inline Index rows() const { return m_rows.value(); } in rows()
149 …explicit inline MapBase(PointerType dataPtr) : m_data(dataPtr), m_rows(RowsAtCompileTime), m_cols(… in MapBase()
159 m_rows(RowsAtCompileTime == Dynamic ? vecSize : Index(RowsAtCompileTime)), in MapBase()
171 : m_data(dataPtr), m_rows(rows), m_cols(cols) in MapBase()
201 const internal::variable_if_dynamic<Index, RowsAtCompileTime> m_rows; variable
DBandMatrix.h209 m_rows(rows), m_supers(supers), m_subs(subs)
214 inline Index rows() const { return m_rows.value(); }
231 internal::variable_if_dynamic<Index, Rows> m_rows;
271 m_rows(rows), m_supers(supers), m_subs(subs)
278 inline Index rows() const { return m_rows.value(); }
294 internal::variable_if_dynamic<Index, _Rows> m_rows;
DCoreEvaluators.h1191 m_rows(replicate.nestedExpression().rows()),
1201 : row % m_rows.value();
1215 : (RowFactor==1 ? index : index%m_rows.value());
1226 : row % m_rows.value();
1240 : (RowFactor==1 ? index : index%m_rows.value());
1248 const variable_if_dynamic<Index, ArgType::RowsAtCompileTime> m_rows;
1448 m_rows(ReverseRow ? reverse.nestedExpression().rows() : 1),
1455 return m_argImpl.coeff(ReverseRow ? m_rows.value() - row - 1 : row,
1462 return m_argImpl.coeff(m_rows.value() * m_cols.value() - index - 1);
1468 return m_argImpl.coeffRef(ReverseRow ? m_rows.value() - row - 1 : row,
[all …]
DCwiseNullaryOp.h69 : m_rows(rows), m_cols(cols), m_functor(func)
78 EIGEN_STRONG_INLINE Index rows() const { return m_rows.value(); }
87 const internal::variable_if_dynamic<Index, RowsAtCompileTime> m_rows;
/external/pdfium/third_party/agg23/
Dagg_rendering_buffer.h46 FX_Free(m_rows); in ~rendering_buffer()
50 m_rows(0), in rendering_buffer()
59 m_rows(0), in rendering_buffer()
74 FX_Free(m_rows); in attach()
75 m_rows = FX_Alloc(int8u*, m_max_height = height); in attach()
81 int8u** rows = m_rows; in attach()
115 return m_rows[y]; in row()
119 return m_rows[y]; in row()
131 return m_rows; in rows()
138 int8u** m_rows; variable
/external/eigen/debug/msvc/
Deigen_autoexp_part.dat127 rows: $c.m_storage.m_rows,
134 …_storage.m_data)[($i % $c.m_storage.m_rows)*$c.m_storage.m_cols + (($i- $i % $c.m_storage.m_rows)/…
135 size: ($r==1)*$c.m_storage.m_rows+($r==0)*$c.m_storage.m_cols
142 size: ($r==1)*$c.m_storage.m_rows+($r==0)*$c.m_storage.m_cols
152 $c.m_storage.m_rows,
158 size : $c.m_storage.m_rows*$c.m_storage.m_cols
215 rows: $c.m_storage.m_rows,
222 …storage.m_data)[($i % $c.m_storage.m_rows)*$c.ColsAtCompileTime + (($i- $i % $c.m_storage.m_rows)/…
223 size: ($r==1)*$c.m_storage.m_rows+($r==0)*$c.ColsAtCompileTime
230 size: ($r==1)*$c.m_storage.m_rows+($r==0)*$c.ColsAtCompileTime
[all …]
/external/tensorflow/tensorflow/lite/kernels/internal/reference/
Dportable_tensor_utils.h41 int m_rows, int m_cols,
47 const int8_t* __restrict__ matrix, const int m_rows, const int m_cols,
52 const float* matrix, const uint8_t* ledger, int m_rows, int m_cols,
56 const int8_t* __restrict__ matrix, const uint8_t* ledger, const int m_rows,
161 void MatrixBatchVectorMultiplyAccumulate(const float* matrix, int m_rows, in MatrixBatchVectorMultiplyAccumulate() argument
165 PortableMatrixBatchVectorMultiplyAccumulate(matrix, m_rows, m_cols, vector, in MatrixBatchVectorMultiplyAccumulate()
170 const int8_t* __restrict__ matrix, const int m_rows, const int m_cols, in MatrixBatchVectorMultiplyAccumulate() argument
173 PortableMatrixBatchVectorMultiplyAccumulate(matrix, m_rows, m_cols, vector, in MatrixBatchVectorMultiplyAccumulate()
179 const float* matrix, const uint8_t* ledger, int m_rows, int m_cols, in SparseMatrixBatchVectorMultiplyAccumulate() argument
182 matrix, ledger, m_rows, m_cols, vector, n_batch, result, result_stride); in SparseMatrixBatchVectorMultiplyAccumulate()
[all …]
Dportable_tensor_utils.cc69 int m_rows, int m_cols, in PortableMatrixBatchVectorMultiplyAccumulate() argument
76 for (int r = 0; r < m_rows; r++) { in PortableMatrixBatchVectorMultiplyAccumulate()
89 const int8_t* __restrict__ matrix, const int m_rows, const int m_cols, in PortableMatrixBatchVectorMultiplyAccumulate() argument
97 for (row = 0; row < m_rows; ++row, result += result_stride) { in PortableMatrixBatchVectorMultiplyAccumulate()
114 const float* matrix, const uint8_t* ledger, int m_rows, int m_cols, in PortableSparseMatrixBatchVectorMultiplyAccumulate() argument
123 for (int r = 0; r < m_rows; r++) { in PortableSparseMatrixBatchVectorMultiplyAccumulate()
144 const int8_t* __restrict__ matrix, const uint8_t* ledger, const int m_rows, in PortableSparseMatrixBatchVectorMultiplyAccumulate() argument
157 for (row = 0; row < m_rows; ++row, result += result_stride) { in PortableSparseMatrixBatchVectorMultiplyAccumulate()
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/
Dtensor_utils_impl.h38 int m_rows, int m_cols,
42 void NeonMatrixBatchVectorMultiplyAccumulate(const float* matrix, int m_rows,
49 const int8_t* __restrict__ matrix, const int m_rows, const int m_cols,
53 const int8_t* __restrict__ matrix, const int m_rows, const int m_cols,
58 const float* matrix, const uint8_t* ledger, int m_rows, int m_cols,
61 const float* matrix, const uint8_t* ledger, int m_rows, int m_cols,
66 const int8_t* __restrict__ matrix, const uint8_t* ledger, const int m_rows,
71 const int8_t* __restrict__ matrix, const uint8_t* ledger, const int m_rows,
Dneon_tensor_utils.h27 void MatrixBatchVectorMultiplyAccumulate(const float* matrix, int m_rows, in MatrixBatchVectorMultiplyAccumulate() argument
31 NEON_OR_PORTABLE(MatrixBatchVectorMultiplyAccumulate, matrix, m_rows, m_cols, in MatrixBatchVectorMultiplyAccumulate()
36 const int8_t* __restrict__ matrix, const int m_rows, const int m_cols, in MatrixBatchVectorMultiplyAccumulate() argument
39 NEON_OR_PORTABLE(MatrixBatchVectorMultiplyAccumulate, matrix, m_rows, m_cols, in MatrixBatchVectorMultiplyAccumulate()
44 const float* matrix, const uint8_t* ledger, const int m_rows, in SparseMatrixBatchVectorMultiplyAccumulate() argument
48 matrix, ledger, m_rows, m_cols, vector, n_batch, result, result_stride); in SparseMatrixBatchVectorMultiplyAccumulate()
52 const int8_t* __restrict__ matrix, const uint8_t* ledger, const int m_rows, in SparseMatrixBatchVectorMultiplyAccumulate() argument
56 NeonSparseMatrixBatchVectorMultiplyAccumulate(matrix, ledger, m_rows, m_cols, in SparseMatrixBatchVectorMultiplyAccumulate()
Dneon_tensor_utils.cc97 void NeonMatrixBatchVectorMultiplyAccumulate(const float* matrix, int m_rows, in NeonMatrixBatchVectorMultiplyAccumulate() argument
108 float* result_in_batch = result + b * m_rows * result_stride; in NeonMatrixBatchVectorMultiplyAccumulate()
113 for (int r = 0; r < m_rows; r++) { in NeonMatrixBatchVectorMultiplyAccumulate()
197 const int8_t* __restrict__ matrix, const int m_rows, const int m_cols, in DotprodMatrixBatchFourVectorMultiplyAccumulate() argument
205 for (int row = 0; row < m_rows; row += 2) { in DotprodMatrixBatchFourVectorMultiplyAccumulate()
207 float* result_ptr = result + (batch * m_rows) + row; in DotprodMatrixBatchFourVectorMultiplyAccumulate()
213 const uint64_t wide_rows = m_rows * sizeof(float); in DotprodMatrixBatchFourVectorMultiplyAccumulate()
314 const int8_t* __restrict__ matrix, const uint8_t* ledger, const int m_rows, in DotprodSparseMatrixBatchVectorMultiplyAccumulate() argument
321 for (int row = 0; row < m_rows; row++) { in DotprodSparseMatrixBatchVectorMultiplyAccumulate()
371 result[(batch * m_rows + row) * result_stride] += in DotprodSparseMatrixBatchVectorMultiplyAccumulate()
[all …]
/external/eigen/Eigen/src/SVD/
DSVDBase.h192 inline Index rows() const { return m_rows; } in rows()
236 Index m_nonzeroSingularValues, m_rows, m_cols, m_diagSize; variable
248 m_rows(-1), m_cols(-1), m_diagSize(0) in SVDBase()
280 rows == m_rows && in allocate()
287 m_rows = rows; in allocate()
301 m_diagSize = (std::min)(m_rows, m_cols); in allocate()
304 m_matrixU.resize(m_rows, m_computeFullU ? m_rows : m_computeThinU ? m_diagSize : 0); in allocate()
DJacobiSVD.h596 using Base::m_rows;
618 rows == m_rows &&
625 m_rows = rows;
644 m_diagSize = (std::min)(m_rows, m_cols);
647 m_matrixU.resize(m_rows, m_computeFullU ? m_rows
656 if(m_cols>m_rows) m_qr_precond_morecols.allocate(*this);
657 if(m_rows>m_cols) m_qr_precond_morerows.allocate(*this);
658 if(m_rows!=m_cols) m_scaledMatrix.resize(rows,cols);
681 if(m_rows!=m_cols)
690 if(m_computeFullU) m_matrixU.setIdentity(m_rows,m_rows);
[all …]
DJacobiSVD_LAPACKE.h71 …X##gesvd( matrix_order, jobu, jobvt, internal::convert_index<lapack_int>(m_rows), internal::conver…
/external/tensorflow/tensorflow/lite/kernels/internal/
Dtensor_utils.h53 void MatrixBatchVectorMultiplyAccumulate(const float* matrix, int m_rows,
70 const float* matrix, const uint8_t* ledger, int m_rows, int m_cols,
81 const int8_t* __restrict__ matrix, const int m_rows, const int m_cols,
97 const int8_t* __restrict__ matrix, const uint8_t* ledger, const int m_rows,
/external/eigen/doc/examples/
Dmake_circulant.cpp.evaluator18 : m_argImpl(xpr.m_arg), m_rows(xpr.rows())
24 if (index < 0) index += m_rows;
29 const Index m_rows;
/external/eigen/Eigen/src/SPQRSupport/
DSuiteSparseQRSupport.h124 m_rows = matrix.rows();
142 inline Index rows() const {return m_rows; }
248 Index m_rows;
/external/eigen/doc/
DInsideEigenExample.dox99 inline DenseStorage(int size, int rows, int) : m_data(internal::aligned_new<T>(size)), m_rows(rows)…
104 As you can see, the constructor also sets the \a m_rows member to \a size. Notice that there is no …
108 …ctorXf::rows(), which returns DenseStorage::rows(), which returns the \a m_rows member, which was …