/external/tensorflow/tensorflow/contrib/boosted_trees/python/ops/ |
D | stats_accumulator_ops.py | 44 (stamp_token, num_updates, partition_ids, feature_ids, gradients, 51 saver.BaseSaverBuilder.SaveSpec(partition_ids, slice_spec, 72 def deserialize(self, stamp_token, num_updates, partition_ids, feature_ids, argument 77 self._resource_handle, stamp_token, num_updates, partition_ids, 81 self._resource_handle, stamp_token, num_updates, partition_ids, 99 partition_ids=restored_tensors[2], 196 def add(self, stamp_token, partition_ids, feature_ids, gradients, hessians): argument 198 partition_ids, feature_ids, gradients, hessians = (self._make_summary( 199 partition_ids, feature_ids, gradients, hessians)) 202 [self.resource_handle], stamp_token, [partition_ids], [feature_ids], [all …]
|
/external/tensorflow/tensorflow/contrib/boosted_trees/python/kernel_tests/ |
D | split_handler_ops_test.py | 42 partition_ids = array_ops.constant([0, 0, 1], dtype=dtypes.int32) 51 partition_ids=partition_ids, 115 partition_ids = array_ops.constant([0, 0, 1], dtype=dtypes.int32) 125 partition_ids=partition_ids, 157 partition_ids = array_ops.constant([], dtype=dtypes.int32) 165 partition_ids=partition_ids, 194 partition_ids = array_ops.constant([0, 0, 0, 1, 1], dtype=dtypes.int32) 206 partition_ids=partition_ids, 282 partition_ids = array_ops.constant([0, 1], dtype=dtypes.int32) 291 partition_ids=partition_ids, [all …]
|
D | stats_accumulator_ops_test.py | 40 partition_ids=[1, 2], 68 partition_ids=[1, 2, 1], 97 partition_ids=[1, 2], 103 partition_ids=[1], 129 partition_ids=[1, 2], 171 partition_ids=[1, 2], 181 partition_ids=[3, 4], 205 partition_ids=[1, 2, 1], 229 partition_ids=[1, 2], 238 partition_ids=[1], [all …]
|
D | training_ops_test.py | 344 partition_ids=[ 458 partition_ids=[ 610 partition_ids=[ 796 partition_ids=[ 936 partition_ids=[handler1_partitions, handler2_partitions], 1006 partition_ids=[ 1111 partition_ids=[handler1_partitions, handler2_partitions], 1194 partition_ids=[handler1_partitions], 1262 partition_ids=[handler1_partitions, handler2_partitions], 1343 partition_ids=[handler1_partitions], [all …]
|
/external/tensorflow/tensorflow/contrib/boosted_trees/kernels/ |
D | split_handler_ops.cc | 152 const auto& partition_ids = partition_ids_t->vec<int32>(); in Compute() local 172 for (int i = 1; i < partition_ids.size(); ++i) { in Compute() 173 if (partition_ids(i) != partition_ids(i - 1)) { in Compute() 175 OP_REQUIRES(context, partition_ids(i) >= partition_ids(i - 1), in Compute() 180 if (partition_ids.size() > 0) { in Compute() 181 partition_boundaries.push_back(partition_ids.size()); in Compute() 227 bucket_boundaries, partition_ids, bucket_ids, gradients_t, in Compute() 234 bucket_boundaries, partition_ids, bucket_ids, gradients_t, in Compute() 246 const tensorflow::TTypes<int32>::ConstVec& partition_ids, in ComputeNormalDecisionTree() argument 299 (*output_partition_ids)(root_idx) = partition_ids(start_index); in ComputeNormalDecisionTree() [all …]
|
D | stats_accumulator_ops.cc | 140 auto partition_ids = partition_ids_t->vec<int32>(); in SerializeScalarAccumulatorToOutput() local 163 partition_ids(i) = iter.first.partition_id; in SerializeScalarAccumulatorToOutput() 181 auto partition_ids = partition_ids_t->vec<int32>(); in SerializeTensorAccumulatorToOutput() local 208 partition_ids(i) = iter.first.partition_id; in SerializeTensorAccumulatorToOutput() 229 const auto& partition_ids = partition_ids_t.vec<int32>(); in AddToScalarAccumulator() local 238 PartitionKey(partition_ids(i), feature_ids_and_dimensions(i, 0), in AddToScalarAccumulator() 274 const auto& partition_ids = partition_ids_t.vec<int32>(); in AddToTensorAccumulator() local 301 PartitionKey(partition_ids(i), feature_ids_and_dimensions(i, 0), in AddToTensorAccumulator()
|
D | training_ops.cc | 448 const auto& partition_ids = partition_ids_list[handler_id].vec<int32>(); in FindBestSplitsPerPartitionNormal() local 451 OP_REQUIRES(context, partition_ids.size() == gains.size(), in FindBestSplitsPerPartitionNormal() 454 partition_ids.size(), " != ", gains.size())); in FindBestSplitsPerPartitionNormal() 455 OP_REQUIRES(context, partition_ids.size() == splits.size(), in FindBestSplitsPerPartitionNormal() 458 partition_ids.size(), " != ", splits.size())); in FindBestSplitsPerPartitionNormal() 462 const auto& partition_id = partition_ids(candidate_idx); in FindBestSplitsPerPartitionNormal()
|
/external/tensorflow/tensorflow/contrib/boosted_trees/lib/learner/batch/ |
D | ordinal_split_handler.py | 270 are_splits_ready, partition_ids, gains, split_infos = ( 278 return are_splits_ready, partition_ids, gains, split_infos 307 num_minibatches, partition_ids, bucket_ids, gradients, hessians = ( 311 num_minibatches, partition_ids, bucket_ids, gradients, hessians = ( 320 with ops.control_dependencies([flush_quantiles, partition_ids]): 322 partition_ids, gains, split_infos = ( 326 partition_ids=partition_ids, 338 return are_splits_ready, partition_ids, gains, split_infos 450 are_splits_ready, partition_ids, gains, split_infos = ( 457 return are_splits_ready, partition_ids, gains, split_infos [all …]
|
D | categorical_split_handler.py | 165 partition_ids = array_ops.concat( 177 return (partition_ids, feature_ids_and_dimensions, filtered_gradients, 180 partition_ids, feature_ids, gradients_out, hessians_out = ( 182 result = self._stats_accumulator.schedule_add(partition_ids, feature_ids, 190 num_minibatches, partition_ids, feature_ids, gradients, hessians = ( 198 partition_ids, gains, split_infos = ( 201 partition_ids=partition_ids, 217 return (are_splits_ready, partition_ids, gains, split_infos)
|
D | categorical_split_handler_test.py | 59 partition_ids = [0, 0, 0, 1] 86 partition_ids, 95 partition_ids, 182 partition_ids = [1, 1, 1, 2] 210 partition_ids, 219 partition_ids, 293 partition_ids = [0, 0, 0, 1] 321 partition_ids, 330 partition_ids, 419 partition_ids = [0, 0, 0, 1] [all …]
|
D | ordinal_split_handler_test.py | 63 partition_ids = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32) 89 partition_ids, 103 partition_ids, 197 partition_ids = array_ops.constant([1, 1, 1, 2], dtype=dtypes.int32) 224 partition_ids, 241 partition_ids, 333 partition_ids = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32) 360 partition_ids, 374 partition_ids, 383 partition_ids, [all …]
|
/external/tensorflow/tensorflow/contrib/boosted_trees/python/training/functions/ |
D | gbdt_batch_test.py | 241 partition_ids = array_ops.zeros([4], dtypes.int32) 251 "partition_ids": partition_ids, 505 partition_ids = array_ops.zeros([4], dtypes.int32) 515 "partition_ids": partition_ids, 609 partition_ids = array_ops.zeros([4], dtypes.int32) 619 "partition_ids": partition_ids, 713 partition_ids = array_ops.zeros([4], dtypes.int32) 723 "partition_ids": partition_ids, 785 partition_ids = array_ops.zeros([4], dtypes.int32) 795 "partition_ids": partition_ids, [all …]
|
D | gbdt_batch.py | 86 partition_ids, argument 109 result[PARTITION_IDS] = partition_ids 527 partition_ids = prediction_ops.gradient_trees_partition_examples( 538 return _make_predictions_dict(ensemble_stamp, predictions, partition_ids, 651 partition_ids = predictions_dict[PARTITION_IDS] 924 ensemble_stamp, partition_ids, squeezed_gradients, squeezed_hessians, 1062 partition_ids, gains, split_info) = handler.make_splits( 1065 partition_ids_list.append(partition_ids) 1073 partition_ids = array_ops.concat(partition_ids_list, axis=0) 1099 partition_ids_list = list(array_ops.split(partition_ids, sizes, axis=0)) [all …]
|