Home
last modified time | relevance | path

Searched refs:predictions (Results 1 – 25 of 199) sorted by relevance

12345678

/external/tensorflow/tensorflow/contrib/learn/python/learn/estimators/
Dmodel_fn_test.py47 def create_model_fn_ops(self, predictions, output_alternatives, argument
52 predictions=predictions,
70 self.assertEqual(model_fn_ops.predictions, estimator_spec.predictions)
82 predictions = self.create_predictions()
84 predictions, None, mode=model_fn.ModeKeys.INFER)
90 predictions = self.create_predictions()
92 constants.ProblemType.LINEAR_REGRESSION, predictions)}
94 predictions, output_alternatives, mode=model_fn.ModeKeys.INFER)
103 self.assertAllEqual(predictions["scores"].eval(),
107 predictions = self.create_predictions()
[all …]
Dlogistic_regressor.py49 predictions, loss, train_op = model_fn(features, labels, mode)
53 predictions=predictions,
59 predictions=predictions,
65 'predictions': predictions
124 def _make_logistic_eval_metric_ops(labels, predictions, thresholds): argument
143 predictions)
152 labels=labels_tensor, predictions=predictions)
156 math_ops.greater_equal(predictions, threshold),
160 predictions=predictions_at_threshold))
164 predictions=predictions_at_threshold))
[all …]
Dmodel_fn.py72 predictions=None, argument
130 get_graph_from_inputs((predictions, loss, train_op))
153 if predictions is None:
157 if isinstance(predictions, dict):
158 predictions = {
160 for k, v in six.iteritems(predictions)
163 predictions = sparse_tensor.convert_to_tensor_or_sparse_tensor(
164 predictions)
186 predictions=predictions,
236 def _export_output(problem_type, predictions): # pylint: disable=missing-docstring argument
[all …]
/external/tensorflow/tensorflow/python/ops/
Dconfusion_matrix.py38 labels, predictions, expected_rank_diff=0, name=None): argument
64 [labels, predictions]):
65 predictions = ops.convert_to_tensor(predictions)
67 predictions_shape = predictions.get_shape()
75 predictions = array_ops.squeeze(predictions, [-1])
78 return labels, predictions
81 rank_diff = array_ops.rank(predictions) - array_ops.rank(labels)
84 predictions = control_flow_ops.cond(
86 lambda: array_ops.squeeze(predictions, [-1]),
87 lambda: predictions)
[all …]
Dmetrics_impl.py53 def _remove_squeezable_dimensions(predictions, labels, weights): argument
76 predictions = ops.convert_to_tensor(predictions)
78 labels, predictions = confusion_matrix.remove_squeezable_dimensions(
79 labels, predictions)
80 predictions.get_shape().assert_is_compatible_with(labels.get_shape())
83 return predictions, labels, None
89 return predictions, labels, weights
91 predictions_shape = predictions.get_shape()
102 rank_diff = weights_rank_tensor - array_ops.rank(predictions)
126 return predictions, labels, weights
[all …]
/external/tensorflow/tensorflow/contrib/metrics/python/ops/
Dmetric_ops.py66 def streaming_true_positives(predictions, argument
102 predictions=predictions,
110 def streaming_true_negatives(predictions, argument
146 predictions=predictions,
154 def streaming_false_positives(predictions, argument
190 predictions=predictions,
198 def streaming_false_negatives(predictions, argument
233 predictions=predictions,
346 def streaming_accuracy(predictions, argument
395 predictions=predictions,
[all …]
Dmetric_ops_test.py487 predictions=array_ops.ones((10, 1)),
496 predictions=array_ops.ones((10, 1)),
504 predictions=array_ops.ones((10, 1)),
510 predictions = array_ops.ones((10, 3))
513 metrics.streaming_accuracy(predictions, labels)
516 predictions = array_ops.ones((10, 3))
520 metrics.streaming_accuracy(predictions, labels, weights)
523 predictions = random_ops.random_uniform(
527 accuracy, update_op = metrics.streaming_accuracy(predictions, labels)
550 predictions = preds_queue.dequeue()
[all …]
/external/tensorflow/tensorflow/contrib/learn/python/learn/
Dmetric_spec_test.py35 def _fn0(predictions, labels, weights=None): argument
36 self.assertEqual("p1_value", predictions)
41 def _fn1(predictions, targets, weights=None): argument
42 self.assertEqual("p1_value", predictions)
150 def _fn(predictions): argument
151 self.assertEqual(predictions_, predictions)
191 def _fn0(predictions, labels): argument
192 self.assertEqual("p1_value", predictions)
196 def _fn1(predictions, targets): argument
197 self.assertEqual("p1_value", predictions)
[all …]
Dmetric_spec.py156 _sentinel=None, labels=None, predictions=None, weights=None): argument
160 predictions_arg: predictions,
172 _sentinel=None, labels=None, predictions=None, weights=None): argument
178 return metric_fn(labels, predictions)
179 return metric_fn(labels, predictions, **{weights_arg: weights})
185 _sentinel=None, labels=None, predictions=None, weights=None):
192 return metric_fn(predictions, **kwargs)
198 _sentinel=None, labels=None, predictions=None, weights=None): argument
202 predictions_arg: predictions,
213 _sentinel=None, labels=None, predictions=None, weights=None): argument
[all …]
/external/tensorflow/tensorflow/contrib/lite/models/smartreply/
Dpredictor_test.cc67 std::vector<PredictorResponse> predictions; in TEST_F() local
69 GetSegmentPredictions({"Welcome"}, *model_, /*config=*/{{}}, &predictions); in TEST_F()
70 EXPECT_GT(predictions.size(), 0); in TEST_F()
73 for (const auto &item : predictions) { in TEST_F()
81 &predictions, in TEST_F()
86 std::vector<PredictorResponse> predictions; in TEST_F() local
89 &predictions); in TEST_F()
90 EXPECT_GT(predictions.size(), 0); in TEST_F()
93 for (const auto &item : predictions) { in TEST_F()
100 EXPECT_THAT(&predictions, IncludeAnyResponesIn(std::unordered_set<string>( in TEST_F()
[all …]
/external/tensorflow/tensorflow/python/kernel_tests/
Dmetrics_test.py509 predictions=array_ops.ones((10, 1)),
518 predictions=array_ops.ones((10, 1)),
526 predictions=array_ops.ones((10, 1)),
532 predictions = array_ops.ones((10, 3))
535 metrics.accuracy(labels, predictions)
538 predictions = array_ops.ones((10, 3))
542 metrics.accuracy(labels, predictions, weights)
545 predictions = random_ops.random_uniform(
549 accuracy, update_op = metrics.accuracy(labels, predictions)
572 predictions = preds_queue.dequeue()
[all …]
Din_topk_op_test.py31 def _validateInTopK(self, predictions, target, k, expected): argument
34 precision = nn_ops.in_top_k(predictions, target, k)
40 predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
42 self._validateInTopK(predictions, target, 1, [True, False])
45 predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
47 self._validateInTopK(predictions, target, 2, [False, True])
51 predictions = [[0.1, 0.3, 0.2, 0.2], [0.1, 0.3, 0.2, 0.2]]
53 self._validateInTopK(predictions, target, 2, [True, True])
56 predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
58 self._validateInTopK(predictions, target, 2, [False, True])
[all …]
Dconfusion_matrix_test.py47 labels=[1, 2, 4], predictions=[2, 2, 4])))
49 def _testConfMatrix(self, labels, predictions, truth, weights=None, argument
52 dtype = predictions.dtype
54 labels, predictions, dtype=dtype, weights=weights,
61 predictions = np.arange(5, dtype=dtype)
71 self._testConfMatrix(labels=labels, predictions=predictions, truth=truth)
125 predictions = np.asarray([1, 2, 3], dtype=dtype)
137 self._testConfMatrix(labels=labels, predictions=predictions, truth=truth)
147 predictions = np.asarray([1, 1, 2, 3, 5, 6, 1, 2, 3, 4], dtype=dtype)
159 self._testConfMatrix(labels=labels, predictions=predictions, truth=truth)
[all …]
/external/tensorflow/tensorflow/python/ops/losses/
Dlosses_impl.py219 labels, predictions, weights=1.0, scope=None, argument
253 if predictions is None:
256 (predictions, labels, weights)) as scope:
257 predictions = math_ops.to_float(predictions)
259 predictions.get_shape().assert_is_compatible_with(labels.get_shape())
260 losses = math_ops.abs(math_ops.subtract(predictions, labels))
268 labels, predictions, axis=None, weights=1.0, scope=None, argument
305 if predictions is None:
308 (predictions, labels, weights)) as scope:
309 predictions = math_ops.to_float(predictions)
[all …]
/external/tensorflow/tensorflow/contrib/learn/python/learn/utils/
Dexport.py89 def generic_signature_fn(examples, unused_features, predictions): argument
110 if not isinstance(predictions, dict):
111 predictions = {'outputs': predictions}
112 tensors.update(predictions)
121 def classification_signature_fn(examples, unused_features, predictions): argument
139 if isinstance(predictions, dict):
141 examples, classes_tensor=predictions['classes'])
144 examples, classes_tensor=predictions)
153 examples, unused_features, predictions): argument
171 if isinstance(predictions, dict):
[all …]
/external/tensorflow/tensorflow/python/estimator/
Dmodel_fn_test.py61 predictions = {'loss': loss}
65 predictions=predictions,
206 predictions={'loss': loss},
213 predictions = {'loss': loss}
217 predictions=predictions,
255 predictions={'loss': loss},
264 predictions={'loss': constant_op.constant(1.)},
272 predictions={'loss': constant_op.constant(1.)})
280 predictions={'loss': loss},
293 predictions={'prediction': constant_op.constant(1.)},
[all …]
Dmodel_fn.py71 predictions=None, argument
191 if predictions is None:
194 predictions = {}
196 if isinstance(predictions, dict):
197 predictions = {
199 for k, v in six.iteritems(predictions)
202 predictions = _check_is_tensor(predictions, 'predictions')
263 if isinstance(predictions, dict):
264 for key, value in six.iteritems(predictions):
269 elif predictions is not None:
[all …]
/external/tensorflow/tensorflow/contrib/linear_optimizer/python/kernel_tests/
Dsdca_ops_test.py163 def get_binary_predictions_for_logistic(predictions, cutoff=0.5): argument
165 math_ops.greater_equal(predictions,
166 array_ops.ones_like(predictions) * cutoff),
170 def get_binary_predictions_for_hinge(predictions): argument
172 math_ops.greater_equal(predictions, array_ops.zeros_like(predictions)),
217 predictions = lr.predictions(examples)
233 predicted_labels = get_binary_predictions_for_logistic(predictions)
267 predictions = lr.predictions(examples)
296 predicted_labels = get_binary_predictions_for_logistic(predictions)
329 predictions = lr.predictions(examples)
[all …]
/external/tensorflow/tensorflow/contrib/losses/python/losses/
Dloss_ops.py264 def absolute_difference(predictions, labels=None, weights=1.0, scope=None): argument
290 [predictions, labels, weights]) as scope:
291 predictions.get_shape().assert_is_compatible_with(labels.get_shape())
292 predictions = math_ops.to_float(predictions)
294 losses = math_ops.abs(math_ops.subtract(predictions, labels))
439 def log_loss(predictions, labels=None, weights=1.0, epsilon=1e-7, scope=None): argument
466 [predictions, labels, weights]) as scope:
467 predictions.get_shape().assert_is_compatible_with(labels.get_shape())
468 predictions = math_ops.to_float(predictions)
471 labels, math_ops.log(predictions + epsilon)) - math_ops.multiply(
[all …]
/external/tensorflow/tensorflow/tools/api/golden/
Dtensorflow.metrics.pbtxt5 …argspec: "args=[\'labels\', \'predictions\', \'weights\', \'metrics_collections\', \'updates_colle…
9 …argspec: "args=[\'labels\', \'predictions\', \'weights\', \'num_thresholds\', \'metrics_collection…
13 …argspec: "args=[\'labels\', \'predictions\', \'k\', \'weights\', \'metrics_collections\', \'update…
17 …argspec: "args=[\'labels\', \'predictions\', \'weights\', \'metrics_collections\', \'updates_colle…
21 …argspec: "args=[\'labels\', \'predictions\', \'thresholds\', \'weights\', \'metrics_collections\',…
25 …argspec: "args=[\'labels\', \'predictions\', \'weights\', \'metrics_collections\', \'updates_colle…
29 …argspec: "args=[\'labels\', \'predictions\', \'thresholds\', \'weights\', \'metrics_collections\',…
37 …argspec: "args=[\'labels\', \'predictions\', \'weights\', \'metrics_collections\', \'updates_colle…
41 …argspec: "args=[\'labels\', \'predictions\', \'dim\', \'weights\', \'metrics_collections\', \'upda…
45 …argspec: "args=[\'labels\', \'predictions\', \'num_classes\', \'weights\', \'metrics_collections\'…
[all …]
/external/tensorflow/tensorflow/python/estimator/canned/
Dhead.py474 def _predictions_mean(predictions, weights=None, name=None): argument
476 name, 'predictions_mean', (predictions, weights)) as scope:
477 predictions = math_ops.to_float(predictions, name='predictions')
479 weights = weights_broadcast_ops.broadcast_weights(weights, predictions)
480 return metrics_lib.mean(predictions, weights=weights, name=scope)
483 def _auc(labels, predictions, weights=None, curve='ROC', name=None): argument
484 with ops.name_scope(name, 'auc', (predictions, labels, weights)) as scope:
485 predictions = math_ops.to_float(predictions, name='predictions')
487 weights = weights_broadcast_ops.broadcast_weights(weights, predictions)
489 labels=labels, predictions=predictions, weights=weights, curve=curve,
[all …]
/external/tensorflow/tensorflow/contrib/estimator/python/estimator/
Dextenders.py90 new_metrics = _call_metric_fn(metric_fn, features, labels, spec.predictions,
202 def verify_keys_and_predictions(features, predictions): argument
203 if not isinstance(predictions, dict):
206 'Given: {}'.format(type(predictions)))
213 if key in predictions:
217 'of forward_features.'.format(key, predictions.keys()))
223 predictions = spec.predictions
224 if predictions is None:
226 verify_keys_and_predictions(features, predictions)
235 predictions[key] = feature
[all …]
Dextenders_test.py82 def metric_fn(features, predictions, labels, config): argument
85 self.assertIn('logistic', predictions)
98 def metric_fn(labels, config, features, predictions): argument
101 self.assertIn('logistic', predictions)
176 predictions = next(estimator.predict(input_fn=input_fn))
177 self.assertIn('id', predictions)
178 self.assertEqual(101, predictions['id'])
190 predictions = next(estimator.predict(input_fn=input_fn))
191 self.assertIn('id', predictions)
192 self.assertIn('x', predictions)
[all …]
Dreplicate_model_fn_test.py187 predictions = math_ops.multiply(features, c)
190 labels=labels, predictions=predictions, reduction=losses.Reduction.SUM)
194 'accuracy': metrics_lib.accuracy(labels, predictions),
195 'auc': metrics_lib.auc(labels, predictions)
205 predictions={'probabilities': predictions},
377 }, session.run(estimator_spec.predictions))
442 }, session.run(estimator_spec.predictions))
499 predictions = math_ops.multiply(features, c)
502 labels=labels, predictions=predictions, reduction=losses.Reduction.SUM)
506 'accuracy': metrics_lib.accuracy(labels, predictions),
[all …]
/external/tensorflow/tensorflow/contrib/tensor_forest/client/
Deval_metrics.py45 def _accuracy(predictions, targets, weights=None): argument
46 return metric_ops.streaming_accuracy(predictions, targets, weights=weights)
80 def _predictions(predictions, unused_targets, **unused_kwargs): argument
81 return predictions
92 def _precision(predictions, targets, weights=None): argument
93 return metric_ops.streaming_precision(predictions, targets, weights=weights)
96 def _precision_at_thresholds(predictions, targets, weights=None): argument
98 array_ops.slice(predictions, [0, 1], [-1, 1]),
105 def _recall(predictions, targets, weights=None): argument
106 return metric_ops.streaming_recall(predictions, targets, weights=weights)
[all …]

12345678