/external/ceres-solver/internal/ceres/ |
D | compressed_row_sparse_matrix_test.cc | 83 vector<int>* row_blocks = crsm->mutable_row_blocks(); in SetUp() local 84 row_blocks->resize(num_rows); in SetUp() 85 std::fill(row_blocks->begin(), row_blocks->end(), 1); in SetUp() 180 const vector<int> pre_row_blocks = crsm->row_blocks(); in TEST_F() 186 LOG(INFO) << appendage->row_blocks().size(); in TEST_F() 190 const vector<int> post_row_blocks = crsm->row_blocks(); in TEST_F() 200 EXPECT_EQ(expected_row_blocks, crsm->row_blocks()); in TEST_F() 204 EXPECT_EQ(crsm->row_blocks(), pre_row_blocks); in TEST_F() 255 EXPECT_EQ(blocks, matrix->row_blocks()); in TEST() 391 ASSERT_EQ(transpose->row_blocks().size(), matrix.col_blocks().size()); in TEST() [all …]
|
D | compressed_col_sparse_matrix_utils_test.cc | 87 int FillBlock(const vector<int>& row_blocks, in FillBlock() argument 95 row_pos += row_blocks[i]; in FillBlock() 104 for (int r = 0; r < row_blocks[row_block_id]; ++r) { in FillBlock() 128 vector<int> row_blocks; in TEST() local 129 row_blocks.push_back(1); in TEST() 130 row_blocks.push_back(2); in TEST() 131 row_blocks.push_back(2); in TEST() 140 offset += FillBlock(row_blocks, col_blocks, \ in TEST() 177 row_blocks, in TEST()
|
D | compressed_row_sparse_matrix.cc | 235 CHECK(row_blocks_.size() == 0 || m.row_blocks().size() !=0) in AppendRows() 238 << "The matrix being appended has: " << m.row_blocks().size() in AppendRows() 260 row_blocks_.insert(row_blocks_.end(), m.row_blocks().begin(), m.row_blocks().end()); in AppendRows() 488 const vector<int>& row_blocks = m.row_blocks(); in CreateOuterProductMatrixAndProgram() local 491 for (int row_block = 0; row_block < row_blocks.size(); ++row_block) { in CreateOuterProductMatrixAndProgram() 492 const int row_block_end = row_block_begin + row_blocks[row_block]; in CreateOuterProductMatrixAndProgram() 514 const vector<int>& row_blocks = m.row_blocks(); in ComputeOuterProduct() local 521 for (int row_block = 0; row_block < row_blocks.size(); ++row_block) { in ComputeOuterProduct() 522 const int row_block_end = row_block_begin + row_blocks[row_block]; in ComputeOuterProduct()
|
D | suitesparse.cc | 153 const vector<int>& row_blocks, in BlockAnalyzeCholesky() argument 157 if (!BlockAMDOrdering(A, row_blocks, col_blocks, &ordering)) { in BlockAnalyzeCholesky() 207 const vector<int>& row_blocks, in BlockAMDOrdering() argument 210 const int num_row_blocks = row_blocks.size(); in BlockAMDOrdering() 220 row_blocks, in BlockAMDOrdering() 244 BlockOrderingToScalarOrdering(row_blocks, block_ordering, ordering); in BlockAMDOrdering()
|
D | compressed_col_sparse_matrix_utils.cc | 43 const vector<int>& row_blocks, in CompressedColumnScalarMatrixToBlockMatrix() argument 49 const int num_row_blocks = row_blocks.size(); in CompressedColumnScalarMatrixToBlockMatrix() 55 cursor += row_blocks[i]; in CompressedColumnScalarMatrixToBlockMatrix()
|
D | cxsparse.cc | 107 const vector<int>& row_blocks, in BlockAnalyzeCholesky() argument 109 const int num_row_blocks = row_blocks.size(); in BlockAnalyzeCholesky() 116 row_blocks, in BlockAnalyzeCholesky() 135 BlockOrderingToScalarOrdering(row_blocks, block_ordering, &scalar_ordering); in BlockAnalyzeCholesky()
|
D | compressed_row_jacobian_writer.cc | 55 vector<int>& row_blocks = *(jacobian->mutable_row_blocks()); in PopulateJacobianRowAndColumnBlockVectors() local 56 row_blocks.resize(residual_blocks.size()); in PopulateJacobianRowAndColumnBlockVectors() 58 row_blocks[i] = residual_blocks[i]->NumResiduals(); in PopulateJacobianRowAndColumnBlockVectors()
|
D | suitesparse.h | 150 const vector<int>& row_blocks, 218 const vector<int>& row_blocks,
|
D | cxsparse.h | 110 const vector<int>& row_blocks,
|
D | compressed_row_sparse_matrix.h | 112 const vector<int>& row_blocks() const { return row_blocks_; } in row_blocks() function
|
D | compressed_col_sparse_matrix_utils.h | 52 const vector<int>& row_blocks,
|
D | sparse_normal_cholesky_solver.cc | 338 A->row_blocks(), in SolveImplUsingSuiteSparse()
|