/external/eigen/doc/ |
D | LeastSquares.dox | 3 /** \eigenManualPage LeastSquares Solving linear least squares systems 5 This page describes how to solve linear least squares systems using %Eigen. An overdetermined system 20 solve linear squares systems. It is not enough to compute only the singular values (the default for 22 computing least squares solutions: 37 The solve() method in QR decomposition classes also computes the least squares solution. There are 53 Finding the least squares solution of \a Ax = \a b is equivalent to solving the normal equation
|
D | TutorialLinearAlgebra.dox | 184 \section TutorialLinAlgLeastsquares Least squares solving 186 The most accurate method to do least squares solving is with a SVD decomposition. Eigen provides one 187 as the JacobiSVD class, and its solve() is doing least-squares solving. 199 normal matrix or a QR decomposition. Our page on \link LeastSquares least squares solving \endlink
|
/external/skia/gm/ |
D | thinrects.cpp | 100 constexpr SkRect squares[] = { in DrawSquares() local 110 for (size_t j = 0; j < SK_ARRAY_COUNT(squares); ++j) { in DrawSquares() 111 canvas->drawRect(squares[j], p); in DrawSquares()
|
/external/skqp/gm/ |
D | thinrects.cpp | 100 constexpr SkRect squares[] = { in DrawSquares() local 110 for (size_t j = 0; j < SK_ARRAY_COUNT(squares); ++j) { in DrawSquares() 111 canvas->drawRect(squares[j], p); in DrawSquares()
|
/external/python/cpython2/Lib/test/ |
D | test_generators.py | 1127 squares = [" " for j in range(n)] 1128 squares[row2col[i]] = "Q" 1129 print "|" + "|".join(squares) + "|" 1314 squares = [[None] * n for i in range(m)] 1318 squares[i1][j1] = format % k 1324 row = squares[i]
|
/external/tensorflow/tensorflow/core/graph/ |
D | graph_partition_test.cc | 575 std::vector<ops::Square> squares; in TEST() local 577 squares.emplace_back(root.WithOpName(strings::StrCat("s", i)), in TEST() 579 squares.back().node()->AddAttr("_start_time", 50 - (i + 1)); in TEST() 584 for (const auto& s : squares) inputs.push_back(s); in TEST() 598 ASSERT_EQ(1 + squares.size() + placeholders.size(), nodes.size()); in TEST() 605 for (int i = 0; i < squares.size(); ++i) { in TEST() 670 std::vector<Square> squares; in TEST() local 671 squares.reserve(indexes.size()); in TEST() 673 squares.emplace_back(root.WithOpName(strings::StrCat("s", i)), in TEST() 675 squares.back().node()->AddAttr("_start_time", 500 - (i + 1)); in TEST() [all …]
|
/external/tensorflow/tensorflow/core/api_def/base_api/ |
D | api_def_MatrixSolveLs.pbtxt | 31 summary: "Solves one or more linear least-squares problems." 39 in the least squares sense. 51 \\(X = (A^H A + \lambda I)^{-1} A^H B\\), which solves the least-squares 64 least-squares solution, even when \\(A\\) is rank deficient. This path is
|
/external/ImageMagick/MagickCore/ |
D | segment.c | 282 *squares; in Classify() local 509 squares=(double *) AcquireQuantumMemory(513UL,sizeof(*squares)); in Classify() 510 if (squares == (double *) NULL) in Classify() 513 squares+=255; in Classify() 515 squares[i]=(double) i*(double) i; in Classify() 609 distance_squared=squares[(ssize_t) ScaleQuantumToChar( in Classify() 611 ScaleQuantumToChar(ClampToQuantum(p->red))]+squares[(ssize_t) in Classify() 613 ScaleQuantumToChar(ClampToQuantum(p->green))]+squares[(ssize_t) in Classify() 620 distance_squared=squares[(ssize_t) ScaleQuantumToChar( in Classify() 622 ScaleQuantumToChar(ClampToQuantum(p->red))]+squares[ in Classify() [all …]
|
/external/tensorflow/tensorflow/examples/android/jni/object_tracking/ |
D | object_detector.h | 59 std::vector<BoundingSquare>* const squares) { in FillWithSquares() argument 68 squares->push_back(descriptor_area); in FillWithSquares() 74 squares->size(), starting_square_size, smallest_square_size, in FillWithSquares()
|
/external/python/cpython3/Lib/test/ |
D | test_generators.py | 1423 squares = [" " for j in range(n)] 1424 squares[row2col[i]] = "Q" 1425 print("|" + "|".join(squares) + "|") 1610 squares = [[None] * n for i in range(m)] 1614 squares[i1][j1] = format % k 1620 row = squares[i]
|
/external/lmfit/man/ |
D | lmfit.pod | 11 lmfit - Levenberg-Marquardt least-squares minimization 16 B<lmfit> is a C library for Levenberg-Marquardt least-squares minimization and curve fitting. It is…
|
/external/fec/ |
D | sumsq_mmx_assist.s | 7 # Evaluate sum of squares of signed 16-bit input samples 49 # Evaluate sum of squares of signed 16-bit input samples
|
D | sumsq_sse2_assist.s | 6 # Evaluate sum of squares of signed 16-bit input samples
|
/external/tensorflow/tensorflow/contrib/tensor_forest/kernels/ |
D | tree_utils_test.cc | 44 Tensor squares = test::AsTensor<float>({29, 12}, {2}); in TEST() local 47 squares.unaligned_flat<float>(), 3), in TEST()
|
D | tree_utils.h | 98 float WeightedVariance(const T1& sums, const T2& squares, float count) { in WeightedVariance() argument 100 const auto e_x2 = squares / count; in WeightedVariance()
|
/external/lmfit/ |
D | lmfit.pc.in | 7 Description: Levenberg-Marquardt mean-squares minimization and curve fitting
|
D | configure.ac | 2 # Levenberg-Marquardt least squares fitting
|
/external/libvpx/libvpx/vp9/encoder/x86/ |
D | vp9_error_sse2.asm | 36 ; individual errors are max. 15bit+sign, so squares are 30bit, and 92 ; individual errors are max. 15bit+sign, so squares are 30bit, and
|
/external/python/cpython3/Doc/tutorial/ |
D | introduction.rst | 382 >>> squares = [1, 4, 9, 16, 25] 383 >>> squares 389 >>> squares[0] # indexing returns the item 391 >>> squares[-1] 393 >>> squares[-3:] # slicing returns a new list 399 >>> squares[:] 404 >>> squares + [36, 49, 64, 81, 100]
|
D | datastructures.rst | 197 For example, assume we want to create a list of squares, like:: 199 >>> squares = [] 201 ... squares.append(x**2) 203 >>> squares 207 after the loop completes. We can calculate the list of squares without any 210 squares = list(map(lambda x: x**2, range(10))) 214 squares = [x**2 for x in range(10)]
|
/external/python/cpython2/Doc/tutorial/ |
D | introduction.rst | 476 >>> squares = [1, 4, 9, 16, 25] 477 >>> squares 483 >>> squares[0] # indexing returns the item 485 >>> squares[-1] 487 >>> squares[-3:] # slicing returns a new list 493 >>> squares[:] 498 >>> squares + [36, 49, 64, 81, 100]
|
D | datastructures.rst | 252 For example, assume we want to create a list of squares, like:: 254 >>> squares = [] 256 ... squares.append(x**2) 258 >>> squares 263 squares = [x**2 for x in range(10)] 265 This is also equivalent to ``squares = map(lambda x: x**2, range(10))``,
|
/external/tensorflow/tensorflow/contrib/solvers/ |
D | BUILD | 2 # Contains ops for iterative solvers for linear systems, linear least-squares
|
/external/tensorflow/tensorflow/contrib/tensor_forest/kernels/v4/ |
D | grow_stats.cc | 819 const auto& squares = cand.left_stats().regression().mean_output_squares(); in ExtractFromProto() local 822 left_square(split_num, i) = squares.value(i).float_value(); in ExtractFromProto() 851 auto* squares = cand->mutable_left_stats() in PackToProto() local 856 squares->add_value()->set_float_value(left_square(split_num, i)); in PackToProto()
|
/external/libaom/libaom/av1/encoder/x86/ |
D | error_sse2.asm | 38 ; individual errors are max. 15bit+sign, so squares are 30bit, and
|