/third_party/mindspore/tests/st/quantization/resnet50_quant/ |
D | test_resnet50_quant.py | 71 epoch_size = config.epoch_size 102 total_epochs=config.epoch_size, 119 model.train(epoch_size, dataset, callbacks=callbacks,
|
/third_party/mindspore/tests/st/quantization/mobilenetv2_quant/ |
D | test_mobilenetv2_quant.py | 68 epoch_size = config.epoch_size 97 total_epochs=epoch_size + config.start_epoch, 110 model.train(epoch_size, dataset, callbacks=callback,
|
D | test_mobilenetv2_quant_gpu.py | 66 epoch_size = config.epoch_size 95 total_epochs=epoch_size + config.start_epoch, 108 model.train(epoch_size, dataset, callbacks=callback,
|
/third_party/mindspore/tests/ut/python/exec/ |
D | test_train_with_lars.py | 74 def lr_gen(fn, epoch_size): argument 75 for i in range(epoch_size): 79 def me_train_tensor(net, input_np, label_np, epoch_size=2): argument 84 …opt = Momentum(get_net_trainable_reordered_params(net)[2], lr_gen(lambda i: 0.1, epoch_size), 0.9,…
|
D | test_train.py | 27 def lr_gen(fn, epoch_size): argument 28 for i in range(epoch_size): 32 def me_train_tensor(net, input_np, label_np, epoch_size=2): argument 35 …um(filter(lambda x: x.requires_grad, net.get_parameters()), lr_gen(lambda i: 0.1, epoch_size), 0.9, 42 for epoch in range(0, epoch_size):
|
/third_party/mindspore/tests/ut/python/parallel/ |
D | test_full_batch.py | 70 epoch_size = 2 87 model.train(epoch_size, dataset, dataset_sink_mode=False) 98 epoch_size = 2 111 model.train(epoch_size, dataset, dataset_sink_mode=False)
|
D | test_reshape.py | 88 epoch_size = 2 102 model.train(epoch_size, dataset, dataset_sink_mode=False) 429 epoch_size = 2 440 model.train(epoch_size, dataset, dataset_sink_mode=False) 571 epoch_size = 2 583 model.train(epoch_size, dataset, dataset_sink_mode=False) 590 epoch_size = 2 603 model.train(epoch_size, dataset, dataset_sink_mode=False) 632 epoch_size = 2 644 model.train(epoch_size, dataset, dataset_sink_mode=False) [all …]
|
D | test_bool_grad.py | 65 epoch_size = 1 77 model.train(epoch_size, dataset, dataset_sink_mode=False)
|
D | test_topk.py | 68 epoch_size = 2 72 model.train(epoch_size, dataset, dataset_sink_mode=False)
|
D | test_transpose.py | 72 epoch_size = 2 89 model.train(epoch_size, dataset, dataset_sink_mode=False)
|
D | test_gathernd.py | 69 epoch_size = 2 73 model.train(epoch_size, dataset, dataset_sink_mode=False)
|
D | test_auto_parallel_for_loop_simplify.py | 88 epoch_size = 2 93 model.train(epoch_size, dataset, dataset_sink_mode=False)
|
D | test_range.py | 79 epoch_size = 2 83 model.train(epoch_size, dataset, dataset_sink_mode=False)
|
D | test_select.py | 70 epoch_size = 2 74 model.train(epoch_size, dataset, dataset_sink_mode=False)
|
/third_party/mindspore/tests/st/auto_parallel/ |
D | soft_entropy_loss_expand_parallel.py | 198 epoch_size = 6 200 model.train(epoch_size, dataset, callbacks=single_callback, dataset_sink_mode=False) 210 epoch_size = 6 212 model.train(epoch_size, dataset, callbacks=parallel_callback, dataset_sink_mode=False) 236 epoch_size = 6 238 model.train(epoch_size, dataset, callbacks=parallel_callback, dataset_sink_mode=False) 262 epoch_size = 6 264 model.train(epoch_size, dataset, callbacks=parallel_callback, dataset_sink_mode=False)
|
/third_party/mindspore/tests/st/tbe_networks/ |
D | test_resnet_cifar_1p.py | 134 def train_process(epoch_size, num_classes, batch_size): argument 145 model.train(epoch_size, dataset, callbacks=[loss_cb]) 155 epoch_size = 1 158 acc = train_process(epoch_size, num_classes, batch_size)
|
D | test_resnet_cifar_8p.py | 145 def train_process(q, device_id, epoch_size, num_classes, device_num, batch_size, enable_hccl): argument 170 model.train(epoch_size, dataset, callbacks=[loss_cb]) 177 epoch_size = 1 185 … args=(q, device_id, epoch_size, num_classes, device_num, batch_size, enable_hccl)))
|
D | resnet_cifar.py | 131 epoch_size = args_opt.epoch_size variable 145 model.train(epoch_size, dataset, callbacks=[ckpoint_cb, loss_cb, time_cb])
|
/third_party/mindspore/tests/st/mem_reuse/ |
D | resnet_cifar_memreuse.py | 130 epoch_size = args_opt.epoch_size variable 138 dataset = create_dataset(epoch_size) 143 model.train(epoch_size, dataset, callbacks=[ckpoint_cb, loss_cb])
|
D | resnet_cifar_normal.py | 130 epoch_size = args_opt.epoch_size variable 138 dataset = create_dataset(epoch_size) 143 model.train(epoch_size, dataset, callbacks=[ckpoint_cb, loss_cb])
|
/third_party/mindspore/tests/st/ps/part_ps/ |
D | test_ps_embedding_heterogeneous_conv2d_adam.py | 127 epoch_size=1, target='CPU', sparse=True): argument 134 self.epoch_size = epoch_size 150 model.train(self.epoch_size, dataset, dataset_sink_mode=False) 167 model.train(self.epoch_size, dataset, dataset_sink_mode=False)
|
/third_party/mindspore/tests/st/networks/models/resnet50/ |
D | test_resnet50_imagenet.py | 132 def train_process(q, device_id, epoch_size, device_num, enable_hccl): argument 176 … warmup_epochs=config.warmup_epochs, total_epochs=config.epoch_size, 215 for epoch_idx in range(0, int(epoch_size / eval_interval)): 230 def train_process_thor(q, device_id, epoch_size, device_num, enable_hccl): argument 295 for epoch_idx in range(0, int(epoch_size / eval_interval)): 325 epoch_size = 2 333 args=(q, device_id, epoch_size, device_num, enable_hccl)))
|
/third_party/mindspore/tests/st/pynative/ |
D | test_pynative_lenet.py | 140 epoch_size = 20 154 for epoch in range(0, epoch_size): 178 epoch_size = 20 193 for epoch in range(0, epoch_size):
|
/third_party/mindspore/tests/st/networks/models/bert/src/ |
D | dataset.py | 26 def create_bert_dataset(epoch_size=1, device_num=1, rank=0, do_shuffle="true", data_dir=None, schem… argument 29 repeat_count = epoch_size
|
/third_party/mindspore/tests/st/networks/models/bert/bert_performance/ |
D | test_bert_thor.py | 143 def train_process_bert_thor(q, device_id, epoch_size, device_num): argument 164 new_repeat_count = epoch_size * data_set.get_dataset_size() // data_sink_steps 199 epoch_size = 2 203 …process.append(Process(target=train_process_bert_thor, args=(q, device_id, epoch_size, device_num)…
|