/external/tensorflow/tensorflow/python/keras/engine/ |
D | training_generator_v1.py | 45 validation_data=None, argument 144 do_validation = validation_data is not None 147 steps_per_epoch, validation_data, validation_steps, mode, 300 validation_data, 366 steps_per_epoch, validation_data, validation_steps, argument 411 data_utils.is_generator_or_sequence(validation_data) or 412 isinstance(validation_data, iterator_ops.IteratorBase)) 413 if (val_gen and not isinstance(validation_data, data_utils.Sequence) and 560 validation_data=None, argument 581 validation_data=validation_data, [all …]
|
D | training_utils_v1.py | 1861 def unpack_validation_data(validation_data, raise_if_ambiguous=True): argument 1877 if (isinstance(validation_data, (iterator_ops.Iterator, 1881 or not hasattr(validation_data, '__len__')): 1882 val_x = validation_data 1885 elif len(validation_data) == 2: 1887 val_x, val_y = validation_data # pylint: disable=unpacking-non-sequence 1890 val_x, val_y, val_sample_weight = validation_data, None, None 1891 elif len(validation_data) == 3: 1893 val_x, val_y, val_sample_weight = validation_data # pylint: disable=unpacking-non-sequence 1895 val_x, val_y, val_sample_weight = validation_data, None, None [all …]
|
D | training.py | 888 validation_data=None, argument 1122 (x, y, sample_weight), validation_data = ( 1126 if validation_data: 1128 data_adapter.unpack_x_y_sample_weight(validation_data)) 1202 if validation_data and self._should_eval(epoch, validation_freq): 1946 validation_data=None, argument 1970 validation_data=validation_data,
|
D | training_arrays_v1.py | 607 validation_data=None, argument 631 if validation_data: 633 validation_data, batch_size, validation_steps)
|
D | training_distributed_v1.py | 586 validation_data=None, argument 628 if validation_data: 630 training_utils_v1.unpack_validation_data(validation_data))
|
D | training_v1.py | 623 validation_data=None, argument 802 validation_data=validation_data, 1227 validation_data=None, argument 1251 validation_data=validation_data, 1375 def _prepare_validation_data(self, validation_data, batch_size, argument 1379 validation_data)
|
/external/libopus/training/ |
D | rnn_train.py | 111 epochs=10, validation_data=(x_train, y_train)) 151 epochs=202, initial_epoch=201, validation_data=(x_train, y_train)) 157 epochs=203, initial_epoch=202, validation_data=(x_train, y_train)) 163 epochs=204, initial_epoch=203, validation_data=(x_train, y_train)) 169 epochs=205, initial_epoch=204, validation_data=(x_train, y_train)) 175 epochs=206, initial_epoch=205, validation_data=(x_train, y_train))
|
/external/tensorflow/tensorflow/python/keras/distribute/ |
D | dataset_creator_model_fit_test.py | 70 validation_data=dataset_creator.DatasetCreator(dataset_fn), 77 validation_data=dataset_creator.DatasetCreator(dataset_fn), 88 validation_data=(x, y), 100 validation_data=(x, y), 223 validation_data = dataset_creator.DatasetCreator(fit_dataset_fn) 225 model = self._model_fit(strategy, x=x, validation_data=validation_data)
|
D | dataset_creator_model_fit_test_base.py | 114 validation_data=None, argument 137 if validation_data is None: 138 validation_data = dataset_creator.DatasetCreator( 149 validation_data=validation_data,
|
/external/libopus/dnn/torch/lpcnet/ |
D | train_lpcnet.py | 180 validation_data = LPCNetDataset( setup['validation_dataset'], variable 189 …validation_dataloader = torch.utils.data.DataLoader(validation_data, batch_size=batch_size, drop_l…
|
/external/rnnoise/src/ |
D | rnn_train.py | 65 validation_data=(x_train, y_train))
|
/external/libopus/scripts/ |
D | rnn_train.py | 66 validation_data=(x_train, y_train))
|
/external/libopus/dnn/torch/osce/ |
D | train_vocoder.py | 153 validation_data = LPCNetVocodingDataset(setup['validation_dataset'], **data_config) variable 155 …validation_dataloader = torch.utils.data.DataLoader(validation_data, batch_size=batch_size, drop_l…
|
D | train_model.py | 169 validation_data = SilkEnhancementSet(setup['validation_dataset'], **data_config) variable 171 …validation_dataloader = torch.utils.data.DataLoader(validation_data, batch_size=batch_size, drop_l…
|
D | adv_train_vocoder.py | 163 validation_data = LPCNetVocodingDataset(setup['validation_dataset'], **data_config) variable 165 …validation_dataloader = torch.utils.data.DataLoader(validation_data, batch_size=batch_size, drop_l…
|
D | adv_train_model.py | 166 validation_data = SilkEnhancementSet(setup['validation_dataset'], **data_config) variable 168 …validation_dataloader = torch.utils.data.DataLoader(validation_data, batch_size=batch_size, drop_l…
|
/external/tensorflow/tensorflow/lite/g3doc/examples/modify/model_maker/ |
D | image_classification.ipynb | 401 "validation_data, test_data = rest_data.split(0.5)" 451 "model = image_classifier.create(train_data, validation_data=validation_data)" 751 …ier.create(train_data, model_spec=model_spec.get('mobilenet_v2'), validation_data=validation_data)" 846 … "* `validation_data`: Validation data. If None, skips validation process. None by default.\n", 870 "model = image_classifier.create(train_data, validation_data=validation_data, epochs=10)"
|
D | question_answer.ipynb | 134 "validation_data = DataLoader.from_squad(validation_data_path, spec, is_training=False)\n", 140 "metric = model.evaluate(validation_data)\n", 311 "validation_data = DataLoader.from_squad(validation_data_path, spec, is_training=False)" 378 "model.evaluate(validation_data)" 458 "model.evaluate_tflite('model.tflite', validation_data)"
|
D | object_detection.ipynb | 277 …"train_data, validation_data, test_data = object_detector.DataLoader.from_csv('gs://cloud-ml-data/… 301 …rain_data, model_spec=spec, batch_size=8, train_whole_model=True, validation_data=validation_data)" 743 …object_detector.create(train_data, model_spec=spec, epochs=10, validation_data=validation_data)\n",
|
D | speech_recognition.ipynb | 557 " train_data, validation_data = train_data.split(train_data_ratio)\n", 593 " train_data, validation_data = train_data.split(train_data_ratio)\n", 632 "model = audio_classifier.create(train_data, spec, validation_data, batch_size, epochs)"
|
D | audio_classification.ipynb | 330 "train_data, validation_data = train_data.split(0.8)\n", 367 " validation_data,\n",
|
/external/tensorflow/tensorflow/lite/g3doc/tutorials/ |
D | model_maker_object_detection.ipynb | 273 …"train_data, validation_data, test_data = object_detector.DataLoader.from_csv('gs://cloud-ml-data/… 297 …rain_data, model_spec=spec, batch_size=8, train_whole_model=True, validation_data=validation_data)" 750 …object_detector.create(train_data, model_spec=spec, epochs=10, validation_data=validation_data)\n",
|
D | model_maker_audio_classification.ipynb | 331 "train_data, validation_data = train_data.split(0.8)\n", 368 " validation_data,\n",
|
/external/tensorflow/tensorflow/compiler/xla/g3doc/tutorials/ |
D | autoclustering_xla.ipynb | 202 …" model.fit(x_train, y_train, batch_size=256, epochs=epochs, validation_data=(x_test, y_test), sh…
|
/external/vulkan-validation-layers/layers/ |
D | object_tracker_utils.cpp | 338 …ValidationObject *validation_data = GetValidationObject(instance_data->object_dispatch, LayerObjec… in PreCallRecordDestroyDevice() local 339 ObjectLifetimes *object_lifetimes = static_cast<ObjectLifetimes *>(validation_data); in PreCallRecordDestroyDevice()
|