/external/tensorflow/tensorflow/core/kernels/data/ |
D | repeat_dataset_op.cc | 111 bool* end_of_sequence) override { in GetNextInternal() argument 112 *end_of_sequence = true; in GetNextInternal() 143 bool* end_of_sequence) override { in GetNextInternal() argument 146 *end_of_sequence = true; in GetNextInternal() 151 input_impl_->GetNext(ctx, out_tensors, end_of_sequence)); in GetNextInternal() 152 if (!*end_of_sequence) { in GetNextInternal() 159 *end_of_sequence = true; in GetNextInternal() 215 bool* end_of_sequence) override { in GetNextInternal() argument 222 Status s = input_impl_->GetNext(ctx, out_tensors, end_of_sequence); in GetNextInternal() 223 if (first_call_ && *end_of_sequence) { in GetNextInternal() [all …]
|
D | batch_dataset_op.cc | 134 bool* end_of_sequence) override { in GetNextInternal() argument 141 *end_of_sequence = true; in GetNextInternal() 145 *end_of_sequence = false; in GetNextInternal() 146 for (int i = 0; i < dataset()->batch_size_ && !*end_of_sequence; in GetNextInternal() 150 end_of_sequence)); in GetNextInternal() 151 if (!*end_of_sequence) { in GetNextInternal() 160 DCHECK(*end_of_sequence); in GetNextInternal() 166 *end_of_sequence = true; in GetNextInternal() 210 *end_of_sequence = false; in GetNextInternal()
|
D | skip_dataset_op.cc | 98 bool* end_of_sequence) override { in GetNextInternal() argument 99 *end_of_sequence = true; in GetNextInternal() 131 bool* end_of_sequence) override { in GetNextInternal() argument 135 *end_of_sequence = true; in GetNextInternal() 146 input_impl_->GetNext(ctx, &dummy_out_tensors, end_of_sequence)); in GetNextInternal() 147 if (*end_of_sequence) { in GetNextInternal() 158 input_impl_->GetNext(ctx, out_tensors, end_of_sequence)); in GetNextInternal() 159 if (*end_of_sequence) { in GetNextInternal()
|
D | take_dataset_op.cc | 47 bool* end_of_sequence) override { in GetNextInternal() argument 48 *end_of_sequence = true; in GetNextInternal() 79 bool* end_of_sequence) override { in GetNextInternal() argument 82 *end_of_sequence = true; in GetNextInternal() 87 input_impl_->GetNext(ctx, out_tensors, end_of_sequence)); in GetNextInternal() 88 if (!*end_of_sequence) { in GetNextInternal() 94 *end_of_sequence = true; in GetNextInternal()
|
D | padded_batch_dataset_op.cc | 224 bool* end_of_sequence) override { in GetNextInternal() argument 231 *end_of_sequence = true; in GetNextInternal() 234 *end_of_sequence = false; in GetNextInternal() 236 for (int i = 0; i < dataset()->batch_size_ && !*end_of_sequence; in GetNextInternal() 240 end_of_sequence)); in GetNextInternal() 241 if (!*end_of_sequence) { in GetNextInternal() 245 if (*end_of_sequence) { in GetNextInternal() 252 DCHECK(*end_of_sequence); in GetNextInternal() 258 *end_of_sequence = true; in GetNextInternal() 344 *end_of_sequence = false; in GetNextInternal()
|
D | concatenate_dataset_op_test.cc | 187 bool end_of_sequence = false; in TEST_P() local 189 while (!end_of_sequence) { in TEST_P() 191 &end_of_sequence)); in TEST_P() 192 if (!end_of_sequence) { in TEST_P() 314 bool end_of_sequence = false; in TEST_P() local 328 &end_of_sequence)); in TEST_P() 329 if (!end_of_sequence) { in TEST_P() 340 EXPECT_TRUE(end_of_sequence); in TEST_P() 343 EXPECT_FALSE(end_of_sequence); in TEST_P()
|
D | filter_by_component_dataset_op.cc | 112 bool* end_of_sequence) override { in GetNextInternal() argument 121 *end_of_sequence = true; in GetNextInternal() 125 input_impl_->GetNext(ctx, out_tensors, end_of_sequence)); in GetNextInternal() 127 if (*end_of_sequence) { in GetNextInternal() 140 *end_of_sequence = false; in GetNextInternal()
|
D | zip_dataset_op_test.cc | 172 bool end_of_sequence = false; in TEST_P() local 174 while (!end_of_sequence) { in TEST_P() 176 &end_of_sequence)); in TEST_P() 177 if (!end_of_sequence) { in TEST_P() 314 bool end_of_sequence = false; in TEST_P() local 327 &end_of_sequence)); in TEST_P() 328 if (!end_of_sequence) { in TEST_P() 339 EXPECT_TRUE(end_of_sequence); in TEST_P() 342 EXPECT_FALSE(end_of_sequence); in TEST_P()
|
D | prefetch_dataset_op.cc | 107 bool* end_of_sequence) override { in GetNextInternal() argument 128 return Consume(ctx, out_tensors, end_of_sequence); in GetNextInternal() 132 *end_of_sequence = true; in GetNextInternal() 149 return input_impl_->GetNext(ctx, out_tensors, end_of_sequence); in GetNextInternal() 232 bool* end_of_sequence) EXCLUSIVE_LOCKS_REQUIRED(mu_) { in Consume() argument 255 *end_of_sequence = false; in Consume() 306 bool end_of_sequence; in PrefetchThread() local 309 ctx.get(), &buffer_element.value, &end_of_sequence); in PrefetchThread() 310 if (buffer_element.status.ok() && end_of_sequence) { in PrefetchThread()
|
D | cache_dataset_ops.cc | 133 bool* end_of_sequence) override { in GetNextInternal() argument 135 return iterator_->GetNext(ctx, out_tensors, end_of_sequence); in GetNextInternal() 213 bool* end_of_sequence) override { in GetNextInternal() argument 229 input_impl_->GetNext(ctx, out_tensors, end_of_sequence)); in GetNextInternal() 230 if (*end_of_sequence && out_tensors->empty()) { in GetNextInternal() 247 if (*end_of_sequence) { in GetNextInternal() 449 bool* end_of_sequence) override { in GetNextInternal() argument 451 *end_of_sequence = false; in GetNextInternal() 474 *end_of_sequence = true; in GetNextInternal() 718 bool* end_of_sequence) override { in GetNextInternal() argument [all …]
|
D | zip_dataset_op.cc | 128 bool* end_of_sequence) override { in GetNextInternal() argument 131 *end_of_sequence = true; in GetNextInternal() 139 input_impl->GetNext(ctx, &input_tensors, end_of_sequence)); in GetNextInternal() 140 if (*end_of_sequence) { in GetNextInternal() 146 if (*end_of_sequence) { in GetNextInternal()
|
D | concatenate_dataset_op.cc | 121 bool* end_of_sequence) override { in GetNextInternal() argument 124 *end_of_sequence = true; in GetNextInternal() 129 input_impl_->GetNext(ctx, out_tensors, end_of_sequence)); in GetNextInternal() 130 if (!*end_of_sequence) { in GetNextInternal() 138 *end_of_sequence = true; in GetNextInternal()
|
D | repeat_dataset_op_test.cc | 165 bool end_of_sequence = false; in TEST_P() local 172 &end_of_sequence)); in TEST_P() 184 EXPECT_FALSE(end_of_sequence); in TEST_P() 186 while (!end_of_sequence) { in TEST_P() 188 &end_of_sequence)); in TEST_P() 189 if (!end_of_sequence) { in TEST_P() 511 bool end_of_sequence = repeat_dataset->Cardinality() == 0; in TEST_P() local 525 &end_of_sequence)); in TEST_P() 526 if (!end_of_sequence) { in TEST_P() 542 EXPECT_FALSE(end_of_sequence); in TEST_P() [all …]
|
D | window_dataset_op.cc | 144 bool* end_of_sequence) override { in GetNextInternal() argument 153 *end_of_sequence = true; in GetNextInternal() 160 *end_of_sequence = false; in GetNextInternal() 162 i < target_size && !*end_of_sequence; ++i) { in GetNextInternal() 165 input_impl_->GetNext(ctx, &element, end_of_sequence); in GetNextInternal() 166 if (!*end_of_sequence) { in GetNextInternal() 179 DCHECK(*end_of_sequence); in GetNextInternal() 224 *end_of_sequence = false; in GetNextInternal()
|
D | iterator_ops.cc | 71 bool* end_of_sequence) { in GetNext() argument 85 IteratorContext(std::move(params)), out_tensors, end_of_sequence); in GetNext() 95 bool* end_of_sequence) { in GetNext() argument 96 return GetNext(&ctx, out_tensors, end_of_sequence); in GetNext() 602 bool end_of_sequence = false; in ComputeAsync() local 605 raw_iterator->GetNext(&iter_ctx, &components, &end_of_sequence); in ComputeAsync() 610 if (end_of_sequence) { in ComputeAsync() 621 raw_iterator->GetNext(&iter_ctx, &components, &end_of_sequence); in ComputeAsync() 626 if (!end_of_sequence) { in ComputeAsync() 957 bool end_of_sequence = false; in ComputeAsync() local [all …]
|
D | shard_dataset_op.cc | 122 bool* end_of_sequence) override { in GetNextInternal() argument 126 *end_of_sequence = true; in GetNextInternal() 134 input_impl_->GetNext(ctx, &result, end_of_sequence)); in GetNextInternal() 135 if (*end_of_sequence) { in GetNextInternal()
|
/external/tensorflow/tensorflow/core/kernels/data/experimental/ |
D | take_while_dataset_op.cc | 59 const std::vector<Tensor>& args, bool* end_of_sequence) { in MakeDataset() argument 69 *end_of_sequence = !result[0].scalar<bool>()(); in MakeDataset() 77 const std::vector<Tensor>& args, bool* end_of_sequence) { in MakeDataset() argument 83 *end_of_sequence = !predicate.scalar<bool>()(); in MakeDataset() 183 bool* end_of_sequence) override { in GetNextInternal() argument 187 *end_of_sequence = true; in GetNextInternal() 191 input_impl_->GetNext(ctx, out_tensors, end_of_sequence)); in GetNextInternal() 193 if (*end_of_sequence) { in GetNextInternal() 199 end_of_sequence); in GetNextInternal()
|
D | unbatch_dataset_op.cc | 99 bool* end_of_sequence) override { in GetNextInternal() argument 102 *end_of_sequence = true; in GetNextInternal() 105 *end_of_sequence = false; in GetNextInternal() 106 while (!*end_of_sequence) { in GetNextInternal() 117 *end_of_sequence = false; in GetNextInternal() 124 input_impl_->GetNext(ctx, &tensors_, end_of_sequence)); in GetNextInternal() 125 if (!*end_of_sequence) { in GetNextInternal()
|
D | ignore_errors_dataset_op.cc | 87 bool* end_of_sequence) override { in GetNextInternal() argument 91 *end_of_sequence = true; in GetNextInternal() 94 Status s = input_impl_->GetNext(ctx, out_tensors, end_of_sequence); in GetNextInternal() 97 s = input_impl_->GetNext(ctx, out_tensors, end_of_sequence); in GetNextInternal() 100 if (*end_of_sequence) { in GetNextInternal()
|
D | sliding_window_dataset_op.cc | 143 bool* end_of_sequence) override { in GetNextInternal() argument 151 *end_of_sequence = true; in GetNextInternal() 158 *end_of_sequence = false; in GetNextInternal() 159 for (size_t i = buffer_.size(); i < target_size && !*end_of_sequence; in GetNextInternal() 163 input_impl_->GetNext(ctx, &element, end_of_sequence)); in GetNextInternal() 164 if (!*end_of_sequence) { in GetNextInternal() 173 DCHECK(*end_of_sequence); in GetNextInternal() 228 *end_of_sequence = false; in GetNextInternal()
|
D | to_tf_record_op.cc | 83 bool end_of_sequence; in ComputeAsync() local 86 ctx, iterator->GetNext(&iter_ctx, &components, &end_of_sequence), in ComputeAsync() 89 if (!end_of_sequence) { in ComputeAsync() 94 } while (!end_of_sequence); in ComputeAsync()
|
D | parallel_interleave_dataset_op.cc | 265 bool* end_of_sequence) override { in GetNextInternal() argument 299 *end_of_sequence = false; in GetNextInternal() 348 *end_of_sequence = true; in GetNextInternal() 568 bool end_of_sequence = false; member 744 bool end_of_sequence = false; in WorkerThread() local 745 while (!end_of_sequence) { in WorkerThread() 753 !worker_thread_states_[thread_index].end_of_sequence) { in WorkerThread() 759 &worker_thread_states_[thread_index].end_of_sequence); in WorkerThread() 760 end_of_sequence = in WorkerThread() 761 worker_thread_states_[thread_index].end_of_sequence; in WorkerThread() [all …]
|
D | stats_dataset_ops.cc | 104 bool* end_of_sequence) override { in GetNextInternal() argument 107 Status s = input_impl_->GetNext(ctx, out_tensors, end_of_sequence); in GetNextInternal() 110 if (stats_aggregator && !*end_of_sequence) { in GetNextInternal() 214 bool* end_of_sequence) override { in GetNextInternal() argument 216 Status s = input_impl_->GetNext(ctx, out_tensors, end_of_sequence); in GetNextInternal() 218 if (stats_aggregator && s.ok() && !*end_of_sequence) { in GetNextInternal()
|
D | dense_to_sparse_batch_dataset_op.cc | 158 bool* end_of_sequence) override { in GetNextInternal() argument 183 *end_of_sequence = false; in GetNextInternal() 186 !*end_of_sequence; in GetNextInternal() 190 end_of_sequence)); in GetNextInternal() 191 if (!*end_of_sequence) { in GetNextInternal() 225 DCHECK(*end_of_sequence); in GetNextInternal() 278 *end_of_sequence = false; in GetNextInternal()
|
D | directed_interleave_dataset_op.cc | 153 bool* end_of_sequence) override { in GetNextInternal() argument 156 *end_of_sequence = true; in GetNextInternal() 162 *end_of_sequence = false; in GetNextInternal() 164 ctx, &selector_result, end_of_sequence)); in GetNextInternal() 165 if (*end_of_sequence) { in GetNextInternal() 194 *end_of_sequence = true; in GetNextInternal()
|