/external/tensorflow/tensorflow/core/kernels/ |
D | sdca_ops_test.cc | 88 const int num_examples) { in SparseExampleIndices() argument 89 const int x_size = num_examples * 4; in SparseExampleIndices() 96 const int num_examples) { in SparseFeatureIndices() argument 97 const int x_size = num_examples * 4; in SparseFeatureIndices() 122 void GetGraphs(const int32 num_examples, const int32 num_sparse_feature_groups, in GetGraphs() argument 179 SparseExampleIndices(g, sparse_features_per_group, num_examples))); in GetGraphs() 184 SparseFeatureIndices(g, sparse_features_per_group, num_examples))); in GetGraphs() 189 NodeBuilder::NodeOut(RandomZeroOrOne(g, num_examples * 4))); in GetGraphs() 196 RandomZeroOrOneMatrix(g, num_examples, dense_features_per_group))); in GetGraphs() 199 Node* const weights = Ones(g, num_examples); in GetGraphs() [all …]
|
D | sdca_internal.cc | 247 for (int example_id = 0; example_id < num_examples(); ++example_id) { in SampleAdaptiveProbabilities() 270 auto local_gen = generator.ReserveSamples32(num_examples()); in SampleAdaptiveProbabilities() 284 while (id < num_examples() && num_retries < num_examples()) { in SampleAdaptiveProbabilities() 295 examples_not_seen.reserve(num_examples()); in SampleAdaptiveProbabilities() 296 for (int i = 0; i < num_examples(); ++i) { in SampleAdaptiveProbabilities() 305 for (int i = id; i < num_examples(); ++i) { in SampleAdaptiveProbabilities() 350 const int num_examples = static_cast<int>(example_weights.size()); in Initialize() local 360 examples_.resize(num_examples); in Initialize() 361 probabilities_.resize(num_examples); in Initialize() 362 sampled_index_.resize(num_examples); in Initialize() [all …]
|
D | sdca_internal.h | 337 int num_examples() const { return examples_.size(); } in num_examples() function 352 const DeviceBase::CpuWorkerThreads& worker_threads, int num_examples, 363 const DeviceBase::CpuWorkerThreads& worker_threads, int num_examples, 371 const DeviceBase::CpuWorkerThreads& worker_threads, int num_examples,
|
D | sdca_ops.cc | 144 TensorShape expected_example_state_shape({examples.num_examples(), 4}); in DoCompute() 228 examples.num_examples(), kCostPerUnit, train_step); in DoCompute()
|
/external/tensorflow/tensorflow/tools/gcs_test/python/ |
D | gcs_smoke.py | 39 def create_examples(num_examples, input_mean): argument 41 ids = np.arange(num_examples).reshape([num_examples, 1]) 42 inputs = np.random.randn(num_examples, 1) + input_mean 45 for row in range(num_examples): 208 example_data = create_examples(FLAGS.num_examples, 5) 223 if read_count != FLAGS.num_examples: 226 FLAGS.num_examples)) 241 for _ in range(FLAGS.num_examples):
|
/external/tensorflow/tensorflow/python/debug/examples/ |
D | debug_keras.py | 32 num_examples = 8 36 xs = np.zeros([num_examples, input_dims]) 37 ys = np.zeros([num_examples, output_dims]) 39 (xs, ys)).repeat(num_examples).batch(int(num_examples / steps_per_epoch))
|
/external/tensorflow/tensorflow/examples/how_tos/reading_data/ |
D | convert_to_records.py | 44 num_examples = data_set.num_examples 46 if images.shape[0] != num_examples: 48 (images.shape[0], num_examples)) 56 for index in range(num_examples):
|
/external/tensorflow/tensorflow/contrib/crf/ |
D | README.md | 15 num_examples = 10 21 x = np.random.rand(num_examples, num_words, num_features).astype(np.float32) 24 y = np.random.randint(num_tags, size=[num_examples, num_words]).astype(np.int32) 27 sequence_lengths = np.full(num_examples, num_words - 1, dtype=np.int32) 42 [num_examples, num_words, num_tags])
|
/external/tensorflow/tensorflow/contrib/linear_optimizer/python/ |
D | sdca_estimator_test.py | 519 num_examples = 40 522 constant_op.constant([str(x + 1) for x in range(num_examples)]), 526 constant_op.constant([[0.0]] * num_examples), 528 for i in range(num_examples)]) 563 num_examples = 200 564 half = int(num_examples / 2) 567 constant_op.constant([str(x + 1) for x in range(num_examples)]), 616 num_examples = 200 617 half = int(num_examples / 2) 620 constant_op.constant([str(x + 1) for x in range(num_examples)]),
|
/external/tensorflow/tensorflow/contrib/factorization/examples/ |
D | mnist.py | 106 batch_size = min(FLAGS.batch_size, data_set.num_examples) 107 steps_per_epoch = data_set.num_examples // batch_size 108 num_examples = steps_per_epoch * batch_size 115 precision = true_count / num_examples 117 (num_examples, true_count, precision))
|
/external/tensorflow/tensorflow/examples/tutorials/mnist/ |
D | fully_connected_feed.py | 104 steps_per_epoch = data_set.num_examples // FLAGS.batch_size 105 num_examples = steps_per_epoch * FLAGS.batch_size 111 precision = float(true_count) / num_examples 113 (num_examples, true_count, precision))
|
/external/tensorflow/tensorflow/contrib/factorization/python/ops/ |
D | gmm_ops_test.py | 39 self.num_examples = 1000 44 self.data, self.true_assignments = self.make_data(self.num_examples) 48 self.num_examples, self.centers) 136 self.assertEqual((self.num_examples, 1), scores.shape)
|
D | gmm_ops.py | 405 num_examples = math_ops.cast(math_ops.reduce_sum(final_points_in_k), 408 (num_examples + MEPS))
|
/external/tensorflow/tensorflow/core/ops/ |
D | parsing_ops.cc | 173 shape_inference::DimensionHandle num_examples = c->Dim(input, 0); in __anon952516500402() local 197 TF_RETURN_IF_ERROR(c->Concatenate(c->Vector(num_examples), s, &s)); in __anon952516500402() 219 c->Concatenate(c->Matrix(num_examples, c->UnknownDim()), s, &s)); in __anon952516500402() 225 c->set_output(output_idx++, c->Vector(num_examples)); in __anon952516500402()
|
/external/tensorflow/tensorflow/contrib/linear_optimizer/python/kernel_tests/ |
D | sdca_ops_test.py | 113 def make_random_examples_and_variables_dicts(num_examples, dim, num_non_zero): argument 118 [i for i in range(num_examples) for _ in range(num_non_zero)], [ 119 i for _ in range(num_examples) 122 [num_non_zero**(-0.5) for _ in range(num_examples * num_non_zero)]) 127 example_weights=[random.random() for _ in range(num_examples)], 129 1. if random.random() > 0.5 else 0. for _ in range(num_examples) 131 example_ids=[str(i) for i in range(num_examples)]) 395 num_examples = 1000 401 num_examples, dim, non_zeros)
|
/external/tensorflow/tensorflow/contrib/gan/python/features/python/ |
D | virtual_batchnorm_test.py | 130 num_examples = 4 132 range(num_examples)] 138 for i in range(num_examples):
|
/external/tensorflow/tensorflow/core/util/ |
D | example_proto_fast_parsing.cc | 1763 int num_examples = serialized.size(); in FastParseSequenceExample() local 1774 if (!example_names.empty() && example_names.size() != num_examples) { in FastParseSequenceExample() 1826 all_context_features(num_examples); in FastParseSequenceExample() 1828 all_sequence_features(num_examples); in FastParseSequenceExample() 1830 for (int d = 0; d < num_examples; d++) { in FastParseSequenceExample() 2013 dense_shape.AddDim(num_examples); in FastParseSequenceExample() 2041 for (int e = 0; e < num_examples; e++) { in FastParseSequenceExample() 2154 for (int e = 0; e < num_examples; e++) { in FastParseSequenceExample() 2192 out_shape(0) = num_examples; in FastParseSequenceExample() 2196 TensorShape dense_length_shape({num_examples}); in FastParseSequenceExample() [all …]
|
/external/tensorflow/tensorflow/contrib/learn/python/learn/estimators/ |
D | linear_test.py | 1739 num_examples = 40 1742 constant_op.constant([str(x + 1) for x in range(num_examples)]), 1746 constant_op.constant([[0.0]] * num_examples), 1748 [[1 if i % 4 == 0 else 0] for i in range(num_examples)]) 1785 num_examples = 200 1786 half = int(num_examples / 2) 1789 constant_op.constant([str(x + 1) for x in range(num_examples)]), 1839 num_examples = 200 1840 half = int(num_examples / 2) 1843 constant_op.constant([str(x + 1) for x in range(num_examples)]),
|
/external/tensorflow/tensorflow/contrib/model_pruning/examples/cifar10/ |
D | cifar10_eval.py | 80 num_iter = int(math.ceil(FLAGS.num_examples / 128))
|
/external/tensorflow/tensorflow/contrib/eager/python/examples/nmt_with_attention/ |
D | nmt_with_attention.ipynb | 175 "def create_dataset(path, num_examples):\n", 178 … word_pairs = [[preprocess_sentence(w) for w in l.split('\\t')] for l in lines[:num_examples]]\n", 232 "def load_dataset(path, num_examples):\n", 234 " pairs = create_dataset(path, num_examples)\n", 287 "num_examples = 30000\n", 288 …r, inp_lang, targ_lang, max_length_inp, max_length_targ = load_dataset(path_to_file, num_examples)"
|
/external/tensorflow/tensorflow/tools/docker/notebooks/ |
D | 2_getting_started.ipynb | 158 "num_examples = 50\n", 159 "X = np.array([np.linspace(-2, 4, num_examples), np.linspace(-6, 6, num_examples)])\n", 160 "X += np.random.randn(2, num_examples)\n", 286 "num_examples = 50\n", 287 "X = np.array([np.linspace(-2, 4, num_examples), np.linspace(-6, 6, num_examples)])\n", 350 "X += np.random.randn(2, num_examples)\n", 579 "num_examples = 50\n", 580 "X = np.array([np.linspace(-2, 4, num_examples), np.linspace(-6, 6, num_examples)])\n", 582 "X += np.random.randn(2, num_examples)\n",
|
/external/tensorflow/tensorflow/lite/kernels/ |
D | bidirectional_sequence_rnn_test.cc | 1027 const int num_examples = 64; in TEST() local 1028 for (int k = 0; k < num_examples; k++) { in TEST()
|
/external/tensorflow/tensorflow/contrib/learn/python/learn/datasets/ |
D | mnist.py | 176 def num_examples(self): member in DataSet
|
/external/tensorflow/tensorflow/contrib/slim/ |
D | README.md | 890 num_examples = 10000 892 num_batches = math.ceil(num_examples / float(batch_size))
|