/external/tensorflow/tensorflow/python/keras/ |
D | model_subclassing_test.py | 49 def __init__(self, use_bn=False, use_dp=False, num_classes=10): argument 53 self.num_classes = num_classes 56 self.dense2 = keras.layers.Dense(num_classes, activation='softmax') 73 def __init__(self, num_classes=10): argument 75 self.num_classes = num_classes 79 self.dense1 = keras.layers.Dense(num_classes, activation='softmax') 89 def __init__(self, use_bn=False, use_dp=False, num_classes=(2, 3)): argument 93 self.num_classes = num_classes 96 self.dense2 = keras.layers.Dense(num_classes[0], activation='softmax') 97 self.dense3 = keras.layers.Dense(num_classes[1], activation='softmax') [all …]
|
D | testing_utils.py | 43 num_classes, argument 51 num_classes: Integer, number of classes for the data and targets. 60 templates = 2 * num_classes * np.random.random((num_classes,) + input_shape) 61 y = np.random.randint(0, num_classes, size=(num_sample,)) 288 def get_small_sequential_mlp(num_hidden, num_classes, input_dim=None): argument 295 activation = 'sigmoid' if num_classes == 1 else 'softmax' 296 model.add(keras.layers.Dense(num_classes, activation=activation)) 300 def get_small_functional_mlp(num_hidden, num_classes, input_dim): argument 303 activation = 'sigmoid' if num_classes == 1 else 'softmax' 304 outputs = keras.layers.Dense(num_classes, activation=activation)(outputs) [all …]
|
D | callbacks_v1_test.py | 38 NUM_CLASSES = 2 variable 57 num_classes=NUM_CLASSES) 85 model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax')) 166 num_classes=NUM_CLASSES) 192 output1 = keras.layers.Dense(NUM_CLASSES, activation='softmax')(hidden) 193 output2 = keras.layers.Dense(NUM_CLASSES, activation='softmax')(hidden) 270 num_classes=NUM_CLASSES) 281 model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax')) 321 num_hidden=10, num_classes=10, input_dim=100) 366 num_classes=NUM_CLASSES) [all …]
|
/external/tensorflow/tensorflow/contrib/slim/python/slim/nets/ |
D | inception_v3_test.py | 41 num_classes = 1000 44 logits, end_points = inception_v3.inception_v3(inputs, num_classes) 47 [batch_size, num_classes]) 50 [batch_size, num_classes]) 135 num_classes = 1000 138 _, end_points = inception_v3.inception_v3(inputs, num_classes) 142 [batch_size, num_classes]) 146 [batch_size, num_classes]) 159 num_classes = 1000 162 _, end_points = inception_v3.inception_v3(inputs, num_classes) [all …]
|
D | vgg_test.py | 36 num_classes = 1000 39 logits, _ = vgg.vgg_a(inputs, num_classes) 42 [batch_size, num_classes]) 47 num_classes = 1000 50 logits, _ = vgg.vgg_a(inputs, num_classes, spatial_squeeze=False) 53 [batch_size, 2, 2, num_classes]) 58 num_classes = 1000 62 _, end_points = vgg.vgg_a(inputs, num_classes, is_training=is_training) 75 num_classes = 1000 78 vgg.vgg_a(inputs, num_classes) [all …]
|
D | inception_v2_test.py | 41 num_classes = 1000 44 logits, end_points = inception_v2.inception_v2(inputs, num_classes) 47 [batch_size, num_classes]) 50 [batch_size, num_classes]) 129 num_classes = 1000 132 _, end_points = inception_v2.inception_v2(inputs, num_classes) 140 inputs, num_classes, scope='depth_multiplied_net', depth_multiplier=0.5) 150 num_classes = 1000 153 _, end_points = inception_v2.inception_v2(inputs, num_classes) 161 inputs, num_classes, scope='depth_multiplied_net', depth_multiplier=2.0) [all …]
|
D | overfeat_test.py | 35 num_classes = 1000 38 logits, _ = overfeat.overfeat(inputs, num_classes) 41 [batch_size, num_classes]) 46 num_classes = 1000 49 logits, _ = overfeat.overfeat(inputs, num_classes, spatial_squeeze=False) 52 [batch_size, 2, 2, num_classes]) 57 num_classes = 1000 60 _, end_points = overfeat.overfeat(inputs, num_classes) 72 num_classes = 1000 75 overfeat.overfeat(inputs, num_classes) [all …]
|
D | alexnet_test.py | 35 num_classes = 1000 38 logits, _ = alexnet.alexnet_v2(inputs, num_classes) 41 [batch_size, num_classes]) 46 num_classes = 1000 49 logits, _ = alexnet.alexnet_v2(inputs, num_classes, spatial_squeeze=False) 52 [batch_size, 4, 7, num_classes]) 57 num_classes = 1000 60 _, end_points = alexnet.alexnet_v2(inputs, num_classes) 72 num_classes = 1000 75 alexnet.alexnet_v2(inputs, num_classes) [all …]
|
D | inception_v1_test.py | 41 num_classes = 1000 44 logits, end_points = inception_v1.inception_v1(inputs, num_classes) 47 [batch_size, num_classes]) 50 [batch_size, num_classes]) 144 num_classes = 1000 149 logits, end_points = inception_v1.inception_v1(inputs, num_classes) 152 [batch_size, num_classes]) 162 num_classes = 1000 165 logits, _ = inception_v1.inception_v1(inputs, num_classes) 167 self.assertListEqual(logits.get_shape().as_list(), [None, num_classes]) [all …]
|
/external/google-fruit/extras/benchmark/tables/ |
D | fruit_wiki.yml | 10 dimension: "num_classes" 92 num_classes: 100 111 num_classes: 1000 130 num_classes: 100 149 num_classes: 1000 168 num_classes: 100 187 num_classes: 1000 206 num_classes: 100 225 num_classes: 1000 244 num_classes: 100 [all …]
|
/external/tensorflow/tensorflow/contrib/tensor_forest/kernels/v4/ |
D | stat_utils.cc | 24 // num_classes for smoothing each class, then Gini looks more like this: 33 float GiniImpurity(const LeafStat& stats, int32 num_classes) { in GiniImpurity() argument 34 const float smoothed_sum = num_classes + stats.weight_sum(); in GiniImpurity() 36 2 * stats.weight_sum() + num_classes) / in GiniImpurity() 40 float WeightedGiniImpurity(const LeafStat& stats, int32 num_classes) { in WeightedGiniImpurity() argument 41 return stats.weight_sum() * GiniImpurity(stats, num_classes); in WeightedGiniImpurity() 74 float SmoothedGini(float sum, float square, int num_classes) { in SmoothedGini() argument 76 const float smoothed_sum = num_classes + sum; in SmoothedGini() 77 return 1.0 - (square + 2 * sum + num_classes) / (smoothed_sum * smoothed_sum); in SmoothedGini() 80 float WeightedSmoothedGini(float sum, float square, int num_classes) { in WeightedSmoothedGini() argument [all …]
|
/external/tensorflow/tensorflow/python/ops/ |
D | confusion_matrix.py | 96 num_classes=None, argument 108 If `num_classes` is `None`, then `num_classes` will be set to one plus the 110 start at 0. For example, if `num_classes` is 3, then the possible labels 133 num_classes: The possible number of labels the classification task can 151 (predictions, labels, num_classes, weights)) as name: 169 if num_classes is None: 170 num_classes = math_ops.maximum(math_ops.reduce_max(predictions), 173 num_classes_int64 = math_ops.cast(num_classes, dtypes.int64) 189 shape = array_ops.stack([num_classes, num_classes]) 206 num_classes=None, argument [all …]
|
D | nn_test.py | 507 def _GenerateTestData(self, num_classes, dim, batch_size, num_true, labels, argument 514 num_classes: An int. The number of embedding classes in the test case. 519 sampled: A list of indices in [0, num_classes). 525 of shape [num_classes, dim] 527 of shape [num_classes]. 537 weights = np.random.randn(num_classes, dim).astype(np.float32) 538 biases = np.random.randn(num_classes).astype(np.float32) 593 num_classes = 5 598 low=0, high=num_classes, size=batch_size * num_true) 601 num_classes=num_classes, [all …]
|
/external/tensorflow/tensorflow/contrib/nn/python/ops/ |
D | sampling_ops.py | 116 num_classes, argument 175 weights: A `Tensor` or `PartitionedVariable` of shape `[num_classes, dim]`, 177 has shape [num_classes, dim]. The (possibly-sharded) class embeddings. 178 biases: A `Tensor` or `PartitionedVariable` of shape `[num_classes]`. 189 num_classes: An `int`. The number of possible classes. 209 if num_sampled > num_classes: 210 raise ValueError("num_sampled ({}) cannot be greater than num_classes ({})". 211 format(num_sampled, num_classes)) 227 range_max=num_classes) 239 num_classes=num_classes, [all …]
|
/external/tensorflow/tensorflow/python/keras/utils/ |
D | np_utils_test.py | 30 num_classes = 5 32 expected_shapes = [(1, num_classes), 33 (3, num_classes), 34 (4, 3, num_classes), 35 (5, 4, 3, num_classes), 36 (3, num_classes)] 37 labels = [np.random.randint(0, num_classes, shape) for shape in shapes] 39 keras.utils.to_categorical(label, num_classes) for label in labels]
|
D | np_utils.py | 25 def to_categorical(y, num_classes=None, dtype='float32'): argument 32 (integers from 0 to num_classes). 33 num_classes: total number of classes. 45 if not num_classes: 46 num_classes = np.max(y) + 1 48 categorical = np.zeros((n, num_classes), dtype=dtype) 50 output_shape = input_shape + (num_classes,)
|
/external/tensorflow/tensorflow/core/util/ctc/ |
D | ctc_loss_calculator.h | 94 int num_classes, const Vector& seq_len, 123 auto num_classes = inputs[0].cols(); in CalculateLoss() local 136 if (inputs[t].cols() != num_classes) { in CalculateLoss() 138 " to be: ", num_classes, in CalculateLoss() 161 batch_size, num_classes, seq_len, labels, &max_u_prime, &l_primes); in CalculateLoss() 167 auto ComputeLossAndGradients = [this, num_classes, &labels, &l_primes, in CalculateLoss() 196 Matrix y(num_classes, seq_len(b)); in CalculateLoss() 206 // y, prob are in num_classes x seq_len(b) in CalculateLoss() 262 max_seq_len * num_classes * in CalculateLoss() 264 max_seq_len * 2 * (2 * num_classes + 1) * in CalculateLoss() [all …]
|
/external/tensorflow/tensorflow/contrib/tensor_forest/kernels/ |
D | tree_utils_test.cc | 98 const int32 num_classes = 4; in TEST() local 102 {num_accumulators, num_classes}); in TEST() 108 {num_accumulators, num_splits, num_classes}); in TEST() 116 const int32 num_classes = 4; in TEST() local 121 {num_accumulators, num_classes}); in TEST() 127 {num_accumulators, num_splits, num_classes}); in TEST() 135 const int32 num_classes = 4; in TEST() local 139 {num_accumulators, num_classes}); in TEST() 143 {num_accumulators, num_classes}); in TEST() 150 {num_accumulators, num_splits, num_classes}); in TEST() [all …]
|
D | tree_utils.cc | 68 const Eigen::Tensor<float, 1, Eigen::RowMajor>& rights, int32 num_classes, in ClassificationSplitScore() argument 72 // count vector is num_classes + 1. in ClassificationSplitScore() 73 offsets[0] = i * (num_classes + 1) + 1; in ClassificationSplitScore() 75 extents[0] = num_classes; in ClassificationSplitScore() 86 const int32 num_classes = in GetTwoBestClassification() local 106 std::bind(ClassificationSplitScore, splits, rights, num_classes, in GetTwoBestClassification() 238 const int32 num_classes = in MakeBootstrapWeights() local 246 float denom = static_cast<float>(n) + static_cast<float>(num_classes); in MakeBootstrapWeights() 248 weights->resize(num_classes * 2); in MakeBootstrapWeights() 249 for (int i = 0; i < num_classes; i++) { in MakeBootstrapWeights() [all …]
|
/external/tensorflow/tensorflow/core/kernels/ |
D | multinomial_op_gpu.cu.cc | 42 __global__ void MultinomialKernel(int32 nthreads, const int32 num_classes, in MultinomialKernel() argument 46 const int maxima_idx = index / num_classes; in MultinomialKernel() 50 static_cast<UnsignedOutputType>(index % num_classes)); in MultinomialKernel() 62 int num_classes, int num_samples, in operator ()() 74 bsc.set(2, num_classes); in operator ()() 78 boc.set(2, num_classes); in operator ()() 83 Eigen::array<int, 3> bsc{batch_size, num_samples, num_classes}; in operator ()() 84 Eigen::array<int, 3> boc{batch_size, 1, num_classes}; in operator ()() 106 /*in_dim1=*/num_classes, /*in_dim2=*/1, /*out_rank=*/1, in operator ()() 112 const int32 work_items = batch_size * num_samples * num_classes; in operator ()() [all …]
|
D | multinomial_op.cc | 51 int num_classes, int num_samples, 77 int num_classes, int num_samples, in operator ()() 86 auto DoWork = [ctx, num_samples, num_classes, &gen, &output, &logits]( in operator ()() 99 ctx->allocate_temp(DT_DOUBLE, TensorShape({num_classes}), in operator ()() 107 for (int64 j = 0; j < num_classes; ++j) { in operator ()() 119 for (int64 j = 0; j < num_classes; ++j) { in operator ()() 127 const double* cdf_end = cdf.data() + num_classes; in operator ()() 137 50 * (num_samples * std::log(num_classes) / std::log(2) + num_classes); in operator ()() 176 const int num_classes = static_cast<int>(logits_t.dim_size(1)); in DoCompute() local 177 OP_REQUIRES(ctx, num_classes > 0, in DoCompute() [all …]
|
/external/tensorflow/tensorflow/python/kernel_tests/random/ |
D | multinomial_op_test.py | 154 logits: Numpy ndarray of shape [batch_size, num_classes]. 156 sampler: A sampler function that takes (1) a [batch_size, num_classes] 160 Frequencies from sampled classes; shape [batch_size, num_classes]. 167 batch_size, num_classes = logits.shape 173 self.assertLess(max(cnts.keys()), num_classes) 177 for k in range(num_classes)] 203 with self.assertRaisesOpError("num_classes should be positive"): 216 def native_op_vs_composed_ops(batch_size, num_classes, num_samples, num_iters): argument 218 shape = [batch_size, num_classes] 247 for num_classes in [10000, 100000]: [all …]
|
/external/tensorflow/tensorflow/contrib/boosted_trees/estimator_batch/ |
D | estimator_test.py | 146 learner_config.num_classes = 2 165 learner_config.num_classes = 2 187 learner_config.num_classes = 2 212 learner_config.num_classes = 2 232 learner_config.num_classes = 2 252 learner_config.num_classes = 2 280 learner_config.num_classes = 2 300 learner_config.num_classes = 2 320 learner_config.num_classes = 3 330 n_classes=learner_config.num_classes, [all …]
|
/external/google-fruit/extras/benchmark/suites/ |
D | fruit_full.yml | 24 num_classes: &num_classes 48 num_classes: *num_classes 62 num_classes: *num_classes
|
/external/tensorflow/tensorflow/python/keras/engine/ |
D | sequential_test.py | 67 num_classes = 2 70 num_hidden, num_classes, input_dim) 76 y = np.random.random((batch_size, num_classes)) 105 num_classes = 2 107 model = testing_utils.get_small_sequential_mlp(num_hidden, num_classes) 118 y = np.random.random((batch_size, num_classes)) 128 num_classes = 2 132 model = testing_utils.get_small_sequential_mlp(num_hidden, num_classes) 143 y = array_ops.zeros((num_samples, num_classes)) 227 num_classes = 2 [all …]
|