1# Copyright 2019 Huawei Technologies Co., Ltd 2# 3# Licensed under the Apache License, Version 2.0 (the "License"); 4# you may not use this file except in compliance with the License. 5# You may obtain a copy of the License at 6# 7# http://www.apache.org/licenses/LICENSE-2.0 8# 9# Unless required by applicable law or agreed to in writing, software 10# distributed under the License is distributed on an "AS IS" BASIS, 11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12# See the License for the specific language governing permissions and 13# limitations under the License. 14 15import numpy as np 16 17import mindspore as ms 18import mindspore.nn as nn 19from mindspore import Tensor 20from mindspore import context 21from mindspore.common.api import _cell_graph_executor 22from mindspore.ops import composite as C 23from mindspore.ops import operations as P 24from tests.ut.python.ops.test_math_ops import VirtualLoss 25 26 27grad_all = C.GradOperation(get_all=True) 28 29 30class NetWithLoss(nn.Cell): 31 def __init__(self, network): 32 super(NetWithLoss, self).__init__() 33 self.loss = VirtualLoss() 34 self.network = network 35 36 def construct(self, x, w1, w2): 37 predict = self.network(x, w1, w2) 38 return self.loss(predict) 39 40 41class GradWrap(nn.Cell): 42 def __init__(self, network): 43 super(GradWrap, self).__init__() 44 self.network = network 45 46 def construct(self, x, w1, w2): 47 return grad_all(self.network)(x, w1, w2) 48 49 50class NetConv(nn.Cell): 51 def __init__(self, 52 cin, 53 cout, 54 kernel_size, 55 stride=1, 56 pad_mode='pad', 57 padding=0, 58 dilation=1, 59 group=1, 60 has_bias=False, 61 weight_init='normal', 62 bias_init='zeros', 63 strategy=None): 64 super(NetConv, self).__init__() 65 self.conv = nn.Conv2d(cin, 66 cout, 67 kernel_size, 68 stride, 69 pad_mode, 70 padding, 71 dilation, 72 group, 73 has_bias, 74 weight_init, 75 bias_init) 76 self.conv.conv2d.shard(strategy) 77 78 def construct(self, input_x): 79 return self.conv(input_x) 80 81 82def test_batch(): 83 class Net(nn.Cell): 84 def __init__(self, strategy1, strategy2, strategy3): 85 super().__init__() 86 self.conv1 = NetConv(16, 8, (3, 3), bias_init='zeros', strategy=strategy1) 87 self.mul1 = P.Mul().shard(strategy2) 88 self.conv2 = NetConv(8, 64, (9, 9), bias_init='zeros', strategy=strategy1) 89 self.mul2 = P.Mul().shard(strategy3) 90 91 def construct(self, x, w1, w2): 92 out1 = self.conv1(x) 93 out2 = self.mul1(out1, w1) 94 out3 = self.conv2(out2) 95 out4 = self.mul2(out3, w2) 96 97 return out4 98 99 context.set_auto_parallel_context(device_num=8, global_rank=0) 100 context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") 101 strategy1 = ((8, 1, 1, 1), (1, 1, 1, 1)) 102 strategy2 = ((1, 1, 1, 8), (1, 1, 1, 8)) 103 strategy3 = ((4, 1, 1, 2), (4, 1, 1, 2)) 104 105 net = GradWrap(NetWithLoss(Net(strategy1, strategy2, strategy3))) 106 net.set_auto_parallel() 107 108 x = Tensor(np.ones([128, 16, 34, 34]), dtype=ms.float32) 109 w1 = Tensor(np.ones([128, 8, 32, 32]), dtype=ms.float32) 110 w2 = Tensor(np.ones([128, 64, 24, 24]), dtype=ms.float32) 111 net.set_train() 112 _cell_graph_executor.compile(net, x, w1, w2) 113 114 115if __name__ == '__main__': 116 test_batch() 117