/external/tensorflow/tensorflow/python/keras/tests/ |
D | model_subclassing_compiled_test.py | 42 num_classes = 2 47 num_hidden=32, num_classes=num_classes, use_dp=True, use_bn=True) 55 y = np.zeros((num_samples, num_classes)) 61 num_classes = (2, 3) 66 num_classes=num_classes, use_dp=True, use_bn=True) 75 y1 = np.zeros((num_samples, num_classes[0])) 76 y2 = np.zeros((num_samples, num_classes[1])) 82 num_classes = 2 88 num_hidden=32, num_classes=num_classes, use_dp=True, use_bn=True) 95 y = np.zeros((num_samples, num_classes), dtype=np.float32) [all …]
|
D | model_subclassing_test_util.py | 28 def __init__(self, num_classes=10): argument 30 self.num_classes = num_classes 34 self.dense1 = keras.layers.Dense(num_classes, activation='softmax') 42 def get_multi_io_subclass_model(use_bn=False, use_dp=False, num_classes=(2, 3)): argument 48 branch_a.append(keras.layers.Dense(num_classes[0], activation='softmax')) 53 branch_b.append(keras.layers.Dense(num_classes[1], activation='softmax')) 65 def __init__(self, num_classes=2): argument 67 self.num_classes = num_classes 69 self.dense2 = keras.layers.Dense(num_classes, activation='relu') 72 num_hidden=32, num_classes=4, use_bn=True, use_dp=True) [all …]
|
D | model_subclassing_test.py | 134 num_classes = 2 138 num_hidden=32, num_classes=num_classes, use_dp=True, use_bn=True) 210 num_classes = 2 215 num_hidden=32, num_classes=num_classes, use_dp=True, use_bn=True) 227 num_classes = 2 232 num_hidden=32, num_classes=num_classes, use_dp=True, use_bn=True) 244 num_classes = 10 249 model = model_util.SimpleConvTestModel(num_classes) 262 num_classes = 10 267 model = model_util.SimpleConvTestModel(num_classes) [all …]
|
/external/google-fruit/extras/benchmark/tables/ |
D | fruit_wiki.yml | 10 dimension: "num_classes" 81 num_classes: 100 107 num_classes: 250 133 num_classes: 1000 159 num_classes: 100 185 num_classes: 250 211 num_classes: 1000 237 num_classes: 100 263 num_classes: 250 289 num_classes: 1000 [all …]
|
/external/google-fruit/extras/benchmark/suites/ |
D | fruit_full.yml | 24 num_classes: &num_classes 51 num_classes: *num_classes 62 num_classes: *num_classes 79 num_classes: *num_classes 91 num_classes: *num_classes
|
D | debug.yml | 64 num_classes: 78 num_classes: 96 num_classes: 110 num_classes: 130 num_classes: 142 num_classes: 159 num_classes: 171 num_classes:
|
D | fruit_mostly_full.yml | 24 num_classes: &num_classes 48 num_classes: *num_classes 59 num_classes: *num_classes
|
D | simple_di_full.yml | 27 num_classes: &num_classes 54 num_classes: *num_classes 67 num_classes: *num_classes
|
/external/tensorflow/tensorflow/python/keras/utils/ |
D | np_utils_test.py | 30 num_classes = 5 32 expected_shapes = [(1, num_classes), (3, num_classes), (4, 3, num_classes), 33 (5, 4, 3, num_classes), (3, num_classes), 34 (3, 2, num_classes)] 35 labels = [np.random.randint(0, num_classes, shape) for shape in shapes] 37 np_utils.to_categorical(label, num_classes) for label in labels]
|
D | np_utils.py | 25 def to_categorical(y, num_classes=None, dtype='float32'): argument 32 (integers from 0 to num_classes). 33 num_classes: total number of classes. If `None`, this would be inferred 43 >>> a = tf.keras.utils.to_categorical([0, 1, 2, 3], num_classes=4) 74 if not num_classes: 75 num_classes = np.max(y) + 1 77 categorical = np.zeros((n, num_classes), dtype=dtype) 79 output_shape = input_shape + (num_classes,)
|
/external/tensorflow/tensorflow/python/ops/ |
D | confusion_matrix.py | 98 num_classes=None, argument 110 If `num_classes` is `None`, then `num_classes` will be set to one plus the 112 start at 0. For example, if `num_classes` is 3, then the possible labels 135 num_classes: The possible number of labels the classification task can 153 (predictions, labels, num_classes, weights)) as name: 171 if num_classes is None: 172 num_classes = math_ops.maximum(math_ops.reduce_max(predictions), 175 num_classes_int64 = math_ops.cast(num_classes, dtypes.int64) 191 shape = array_ops.stack([num_classes, num_classes]) 206 num_classes=None, argument [all …]
|
/external/tensorflow/tensorflow/core/kernels/ |
D | in_topk_op_gpu.cu.cc | 40 const T* __restrict__ predictions, // dims: [ num_targets x num_classes ] in ComputePredictionMaskKernel() 42 int64* __restrict__ mask, // dims: [ num_targets x num_classes ] in ComputePredictionMaskKernel() 43 int num_targets, int num_classes) { in ComputePredictionMaskKernel() argument 44 GPU_1D_KERNEL_LOOP(i, num_targets * num_classes) { in ComputePredictionMaskKernel() 45 const int batch_index = i / num_classes; in ComputePredictionMaskKernel() 48 if (!FastBoundsCheck(target_idx, num_classes)) { in ComputePredictionMaskKernel() 55 ldg(predictions + batch_index * num_classes + target_idx); in ComputePredictionMaskKernel() 96 const Eigen::Index num_classes = predictions.dimension(1); in operator ()() local 99 context, num_targets * num_classes < std::numeric_limits<int>::max(), in operator ()() 103 if (num_targets == 0 || num_classes == 0) { in operator ()() [all …]
|
D | multinomial_op_gpu.cu.cc | 42 __global__ void MultinomialKernel(int32 nthreads, const int32 num_classes, in MultinomialKernel() argument 48 const int maxima_idx = index / num_classes; in MultinomialKernel() 52 static_cast<UnsignedOutputType>(index % num_classes)); in MultinomialKernel() 64 int num_classes, int num_samples, in operator ()() 77 bsc.set(2, num_classes); in operator ()() 81 boc.set(2, num_classes); in operator ()() 86 Eigen::array<int, 3> bsc{batch_size, num_samples, num_classes}; in operator ()() 87 Eigen::array<int, 3> boc{batch_size, 1, num_classes}; in operator ()() 110 /*in_dim1=*/num_classes, /*in_dim2=*/1, /*out_rank=*/1, in operator ()() 116 const int32 work_items = batch_size * num_samples * num_classes; in operator ()() [all …]
|
D | multinomial_op.cc | 51 int num_classes, int num_samples, 77 int num_classes, int num_samples, in operator ()() 86 auto DoWork = [ctx, num_samples, num_classes, &gen, &output, &logits]( in operator ()() 99 ctx->allocate_temp(DT_DOUBLE, TensorShape({num_classes}), in operator ()() 107 for (int64 j = 0; j < num_classes; ++j) { in operator ()() 119 for (int64 j = 0; j < num_classes; ++j) { in operator ()() 127 const double* cdf_end = cdf.data() + num_classes; in operator ()() 137 50 * (num_samples * std::log(num_classes) / std::log(2) + num_classes); in operator ()() 176 const int num_classes = static_cast<int>(logits_t.dim_size(1)); in DoCompute() local 177 OP_REQUIRES(ctx, num_classes > 0, in DoCompute() [all …]
|
D | ctc_decoder_ops.cc | 208 errors::InvalidArgument("num_classes cannot exceed max int")); in Compute() 209 const int num_classes = static_cast<const int>(num_classes_raw); in Compute() local 215 input_list_t.emplace_back(inputs_t.data() + t * batch_size * num_classes, in Compute() 216 batch_size, num_classes); in Compute() 223 // Assumption: the blank index is num_classes - 1 in Compute() 224 int blank_index = num_classes - 1; in Compute() 246 const int64 kCostPerUnit = 50 * max_time * num_classes; in Compute() 309 errors::InvalidArgument("num_classes cannot exceed max int")); in Compute() 310 const int num_classes = static_cast<const int>(num_classes_raw); in Compute() local 318 input_list_t.emplace_back(inputs_t.data() + t * batch_size * num_classes, in Compute() [all …]
|
/external/tensorflow/tensorflow/python/kernel_tests/random/ |
D | multinomial_op_test.py | 154 logits: Numpy ndarray of shape [batch_size, num_classes]. 156 sampler: A sampler function that takes (1) a [batch_size, num_classes] 160 Frequencies from sampled classes; shape [batch_size, num_classes]. 167 batch_size, num_classes = logits.shape 173 self.assertLess(max(cnts.keys()), num_classes) 177 for k in range(num_classes)] 203 with self.assertRaisesOpError("num_classes should be positive"): 216 def native_op_vs_composed_ops(batch_size, num_classes, num_samples, num_iters): argument 218 shape = [batch_size, num_classes] 247 for num_classes in [10000, 100000]: [all …]
|
/external/tensorflow/tensorflow/python/keras/ |
D | callbacks_v1_test.py | 45 NUM_CLASSES = 2 variable 63 num_classes=NUM_CLASSES) 91 model.add(layers.Dense(NUM_CLASSES, activation='softmax')) 171 num_classes=NUM_CLASSES) 197 output1 = layers.Dense(NUM_CLASSES, activation='softmax')(hidden) 198 output2 = layers.Dense(NUM_CLASSES, activation='softmax')(hidden) 274 num_classes=NUM_CLASSES) 285 model.add(layers.Dense(NUM_CLASSES, activation='softmax')) 324 num_hidden=10, num_classes=10, input_dim=100) 369 num_classes=NUM_CLASSES) [all …]
|
D | regularizers_test.py | 35 NUM_CLASSES = 2 variable 43 model.add(keras.layers.Dense(NUM_CLASSES, 54 num_classes=NUM_CLASSES) 55 y_train = np_utils.to_categorical(y_train, NUM_CLASSES) 56 y_test = np_utils.to_categorical(y_test, NUM_CLASSES) 137 NUM_CLASSES, 155 NUM_CLASSES, 177 NUM_CLASSES, 184 NUM_CLASSES, kernel_regularizer=regularizer)
|
D | testing_utils.py | 65 num_classes, argument 73 num_classes: Integer, number of classes for the data and targets. 82 templates = 2 * num_classes * np.random.random((num_classes,) + input_shape) 83 y = np.random.randint(0, num_classes, size=(num_sample,)) 450 def get_small_sequential_mlp(num_hidden, num_classes, input_dim=None): argument 456 activation = 'sigmoid' if num_classes == 1 else 'softmax' 457 model.add(layers.Dense(num_classes, activation=activation)) 461 def get_small_functional_mlp(num_hidden, num_classes, input_dim): argument 464 activation = 'sigmoid' if num_classes == 1 else 'softmax' 465 outputs = layers.Dense(num_classes, activation=activation)(outputs) [all …]
|
/external/tensorflow/tensorflow/python/keras/preprocessing/ |
D | image_dataset_test.py | 56 num_classes=2, argument 68 for class_index in range(num_classes): 101 directory = self._prepare_directory(count=7, num_classes=2) 125 directory = self._prepare_directory(num_classes=2) 157 directory = self._prepare_directory(num_classes=2) 173 directory = self._prepare_directory(num_classes=4, count=15) 185 directory = self._prepare_directory(num_classes=4, count=15) 222 directory = self._prepare_directory(num_classes=4, color_mode='rgba') 230 directory = self._prepare_directory(num_classes=4, color_mode='grayscale') 242 directory = self._prepare_directory(num_classes=2, count=10) [all …]
|
D | text_dataset_test.py | 35 num_classes=2, argument 46 for class_index in range(num_classes): 72 directory = self._prepare_directory(count=7, num_classes=2) 96 directory = self._prepare_directory(num_classes=2) 126 directory = self._prepare_directory(num_classes=4, count=15) 135 directory = self._prepare_directory(num_classes=4, count=15) 169 directory = self._prepare_directory(num_classes=2, count=10) 184 directory = self._prepare_directory(num_classes=2, count=2) 192 directory = self._prepare_directory(num_classes=2, count=25, 202 directory = self._prepare_directory(num_classes=2, count=0) [all …]
|
/external/tensorflow/tensorflow/python/keras/wrappers/ |
D | scikit_learn_test.py | 32 NUM_CLASSES = 2 variable 43 model.add(keras.layers.Dense(NUM_CLASSES)) 56 num_classes=NUM_CLASSES) 66 assert prediction in range(NUM_CLASSES) 69 assert proba.shape == (TEST_SAMPLES, NUM_CLASSES) 92 num_classes=NUM_CLASSES)
|
/external/tensorflow/tensorflow/core/util/ctc/ |
D | ctc_loss_calculator.h | 99 int num_classes, const Vector& seq_len, 131 auto num_classes = inputs[0].cols(); in CalculateLoss() local 144 if (inputs[t].cols() != num_classes) { in CalculateLoss() 146 " to be: ", num_classes, in CalculateLoss() 169 batch_size, num_classes, seq_len, labels, &max_u_prime, &l_primes); in CalculateLoss() 175 auto ComputeLossAndGradients = [this, num_classes, &labels, &l_primes, in CalculateLoss() 204 Matrix y(num_classes, seq_len(b)); in CalculateLoss() 214 // y, prob are in num_classes x seq_len(b) in CalculateLoss() 270 max_seq_len * num_classes * in CalculateLoss() 272 max_seq_len * 2 * (2 * num_classes + 1) * in CalculateLoss() [all …]
|
/external/tensorflow/tensorflow/python/data/experimental/ops/ |
D | resampling.py | 45 `tf.int32` tensor. Values should be in `[0, num_classes)`. 46 target_dist: A floating point type tensor, shaped `[num_classes]`. 48 `[num_classes]`. If not provided, the true class distribution is 141 `tf.int32` tensor. Values should be in `[0, num_classes)`. 180 num_classes = (target_dist_t.shape[0] or array_ops.shape(target_dist_t)[0]) 182 [num_classes], np.int64(smoothing_constant)) 210 num_examples_per_class_seen: Type `int64`, shape `[num_classes]`, 215 `[num_classes]`. 216 dist: The updated distribution. Type `float32`, shape `[num_classes]`. 218 num_classes = num_examples_per_class_seen.get_shape()[0] [all …]
|
/external/tensorflow/tensorflow/python/ops/numpy_ops/integration_test/benchmarks/ |
D | numpy_mlp.py | 22 NUM_CLASSES = 3 variable 34 def __init__(self, num_classes=NUM_CLASSES, input_size=INPUT_SIZE, argument 38 self.w2 = np.random.uniform(size=[hidden_units, num_classes]).astype( 42 self.b2 = np.random.uniform(size=[1, num_classes]).astype(
|