1# Copyright 2020 Huawei Technologies Co., Ltd 2# 3# Licensed under the Apache License, Version 2.0 (the "License"); 4# you may not use this file except in compliance with the License. 5# You may obtain a copy of the License at 6# 7# http://www.apache.org/licenses/LICENSE-2.0 8# 9# Unless required by applicable law or agreed to in writing, software 10# distributed under the License is distributed on an "AS IS" BASIS, 11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12# See the License for the specific language governing permissions and 13# limitations under the License. 14# ============================================================================ 15import numpy as np 16 17import mindspore as ms 18from mindspore import context, Tensor, Parameter 19from mindspore.nn import Cell, Momentum 20from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits 21from mindspore.ops import operations as P 22from mindspore.train import Model 23from tests.dataset_mock import MindData 24 25 26class Dataset(MindData): 27 def __init__(self, predict, label, length=3): 28 super(Dataset, self).__init__(size=length) 29 self.predict = predict 30 self.label = label 31 self.index = 0 32 self.length = length 33 34 def __iter__(self): 35 return self 36 37 def __next__(self): 38 if self.index >= self.length: 39 raise StopIteration 40 self.index += 1 41 return self.predict, self.label 42 43 def reset(self): 44 self.index = 0 45 46 47class Net(Cell): 48 def __init__(self, mul_weight, strategy1=None, strategy2=None): 49 super().__init__() 50 self.mul = P.Mul().shard(strategy1) 51 self.neg = P.Neg().shard(strategy2) 52 self.mul_weight = Parameter(mul_weight, "w1") 53 54 def construct(self, x): 55 out = self.mul(x, self.mul_weight) 56 out = self.neg(out) 57 return out 58 59 60_x = Tensor(np.ones([32, 128]), dtype=ms.float32) 61_b = Tensor(np.ones([32]), dtype=ms.int32) 62_w1 = Tensor(np.ones([512, 128]), dtype=ms.float32) 63 64 65def compile_net(net): 66 learning_rate = 0.1 67 momentum = 0.9 68 epoch_size = 2 69 dataset = Dataset(_x, _b) 70 loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') 71 opt = Momentum(net.trainable_params(), learning_rate, momentum) 72 model = Model(net, loss, optimizer=opt) 73 model.train(epoch_size, dataset, dataset_sink_mode=False) 74 context.reset_auto_parallel_context() 75 76 77def test_neg_data_parallel(): 78 context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) 79 strategy1 = ((16, 1), (16, 1)) 80 strategy2 = ((16, 1),) 81 net = Net(_w1, strategy1, strategy2) 82 compile_net(net) 83 84 85def test_neg_model_parallel(): 86 context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) 87 strategy1 = ((1, 16), (1, 16)) 88 strategy2 = ((1, 16),) 89 net = Net(_w1, strategy1, strategy2) 90 compile_net(net) 91 92 93def test_neg_hybrid_parallel(): 94 context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) 95 strategy1 = ((4, 4), (4, 4)) 96 strategy2 = ((4, 4),) 97 net = Net(_w1, strategy1, strategy2) 98 compile_net(net) 99 100 101def test_neg_auto_parallel(): 102 context.set_auto_parallel_context(parallel_mode="auto_parallel", device_num=16, global_rank=0) 103 net = Net(_w1) 104 compile_net(net) 105 106 107def test_neg_repeat_calc(): 108 context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) 109 strategy1 = ((4, 4), (4, 4)) 110 strategy2 = ((2, 2),) 111 net = Net(_w1, strategy1, strategy2) 112 compile_net(net) 113 114 115def test_neg_repeat_calc2(): 116 context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) 117 strategy1 = ((4, 2), (4, 2)) 118 strategy2 = ((4, 4),) 119 net = Net(_w1, strategy1, strategy2) 120 compile_net(net) 121