• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2020 Huawei Technologies Co., Ltd
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ============================================================================
15"""test checking for some ops"""
16import functools
17import logging
18import numpy as np
19import pytest
20
21import mindspore.context as context
22from mindspore import Tensor
23from mindspore import nn
24from mindspore.common.api import _cell_graph_executor
25from mindspore.ops import operations as P
26from ..ut_filter import non_graph_engine
27from ....mindspore_test_framework.mindspore_test import mindspore_test
28from ....mindspore_test_framework.pipeline.forward.compile_forward \
29    import pipeline_for_compile_forward_ge_graph_for_case_by_case_config
30from ....mindspore_test_framework.pipeline.forward.verify_exception \
31    import pipeline_for_verify_exception_for_case_by_case_config
32
33logging.basicConfig(level=logging.WARNING)
34
35
36# pylint: disable=abstract-method
37class NetMissConstruct(nn.Cell):
38    """ NetMissConstruct definition """
39
40    def __init__(self):
41        super(NetMissConstruct, self).__init__()
42        self.conv1 = nn.Conv2d(1, 6, 5, pad_mode='valid')
43        self.conv2 = nn.Conv2d(6, 16, 5, pad_mode='valid')
44        self.fc1 = nn.Dense(16 * 5 * 5, 120)
45        self.fc2 = nn.Dense(120, 84)
46        self.fc3 = nn.Dense(84, 10)
47        self.relu = nn.ReLU()
48        self.max_pool2d = nn.MaxPool2d(kernel_size=2)
49        self.flatten = P.Flatten()
50
51    # TestCase: Mis-spelled 'construct' to 'construtc'
52    def construtc(self, x):
53        x = self.max_pool2d(self.relu(self.conv1(x)))
54        x = self.max_pool2d(self.relu(self.conv2(x)))
55        x = self.flatten(x)
56        x = self.relu(self.fc1(x))
57        x = self.relu(self.fc2(x))
58        x = self.fc3(x)
59        return x
60
61
62def test_net_without_construct():
63    """ test_net_without_construct """
64    net = NetMissConstruct()
65    inp = Tensor(np.ones([1, 1, 32, 32]).astype(np.float32))
66    _cell_graph_executor.compile(net, inp)
67
68
69class NetWithRaise(nn.Cell):
70    """ NetWithRaise definition """
71
72    def __init__(self):
73        super(NetWithRaise, self).__init__()
74        self.conv1 = nn.Conv2d(1, 6, 5, pad_mode='valid')
75
76    # raise exception in method 'construct'
77    def construct(self, x):
78        raise 'exception in construct'
79
80
81def test_net_with_raise():
82    """ test_net_with_raise """
83    net = NetWithRaise()
84    inp = Tensor(np.ones([1, 1, 32, 32]).astype(np.float32))
85    with pytest.raises(RuntimeError) as err:
86        _cell_graph_executor.compile(net, inp)
87    assert "Unsupported statement 'Raise'." in str(err.value)
88
89
90class NetAddN(nn.Cell):
91    """net for test AddN"""
92
93    def __init__(self):
94        super(NetAddN, self).__init__()
95        self.net = P.AddN()
96
97    def construct(self, x):
98        return self.net(x)
99
100
101class NetSplit(nn.Cell):
102    "net for test Split"
103
104    def __init__(self):
105        super(NetSplit, self).__init__()
106        self.net = P.Split(1, 2)
107
108    def construct(self, x):
109        return self.net(x)
110
111
112class NetBatchMatMul(nn.Cell):
113    """net for test BatchMatMul"""
114
115    def __init__(self):
116        super(NetBatchMatMul, self).__init__()
117        self.op = P.BatchMatMul()
118
119    def construct(self, x, y):
120        return self.op(x, y)
121
122
123test_case_check_ops = [
124    ('Conv_Padding_1', {
125        'block': nn.Conv2d(1, 6, 5, pad_mode='same', padding=0),
126        'desc_inputs': [Tensor(np.ones(shape=[1, 1, 6, 5]).astype(np.float32))]}),
127    ('Conv_Padding_2', {
128        'block': nn.Conv2d(1, 6, 5, pad_mode='valid', padding=0),
129        'desc_inputs': [Tensor(np.ones(shape=[1, 1, 6, 5]).astype(np.float32))]}),
130    ('Conv_Padding_3', {
131        'block': nn.Conv2d(1, 6, 5, pad_mode='pad', padding=0),
132        'desc_inputs': [Tensor(np.ones(shape=[1, 1, 6, 5]).astype(np.float32))]}),
133    ('Conv_Padding_4', {
134        'block': nn.Conv2d(1, 6, 5, pad_mode='pad', padding=7),
135        'desc_inputs': [Tensor(np.ones(shape=[1, 1, 6, 5]).astype(np.float32))]}),
136    ('Conv_Bias_1', {
137        'block': nn.Conv2d(1, 6, 5, has_bias=True, bias_init=Tensor(np.ones([6]).astype(np.float32))),
138        'desc_inputs': [Tensor(np.ones(shape=[1, 1, 6, 5]).astype(np.float32))]}),
139    ('Conv_Bias_2', {
140        'block': nn.Conv2d(1, 6, 5, has_bias=True, bias_init='zeros'),
141        'desc_inputs': [Tensor(np.ones(shape=[1, 1, 6, 5]).astype(np.float32))]}),
142    ('Conv_Bias_3', {
143        'block': nn.Conv2d(1, 6, 5, has_bias=False, bias_init='zeros'),
144        'desc_inputs': [Tensor(np.ones(shape=[1, 1, 6, 5]).astype(np.float32))]}),
145    ('Conv_Bias_4', {
146        'block': nn.Conv2d(1, 6, 5, has_bias=False, bias_init=Tensor(np.ones([6]).astype(np.float32))),
147        'desc_inputs': [Tensor(np.ones(shape=[1, 1, 6, 5]).astype(np.float32))]}),
148    ('Dense_Bias_1', {
149        'block': nn.Dense(1, 6, has_bias=True, bias_init=Tensor(np.ones([6]).astype(np.float32))),
150        'desc_inputs': [Tensor(np.ones(shape=[6, 1]).astype(np.float32))]}),
151    ('Dense_Bias_2', {
152        'block': nn.Dense(1, 6, has_bias=True, bias_init='zeros'),
153        'desc_inputs': [Tensor(np.ones(shape=[6, 1]).astype(np.float32))]}),
154    ('Dense_Bias_3', {
155        'block': nn.Dense(1, 6, has_bias=False, bias_init='zeros'),
156        'desc_inputs': [Tensor(np.ones(shape=[6, 1]).astype(np.float32))]}),
157    ('Dense_Bias_4', {
158        'block': nn.Dense(1, 6, has_bias=False, bias_init=Tensor(np.ones([6]).astype(np.float32))),
159        'desc_inputs': [Tensor(np.ones(shape=[6, 1]).astype(np.float32))]}),
160    ('MaxPool2d_1', {
161        'block': nn.MaxPool2d(5, pad_mode='same'),
162        'desc_inputs': [Tensor(np.ones(shape=[5, 5, 8, 8]).astype(np.float32))]}),
163    ('MaxPool2d_2', {
164        'block': nn.MaxPool2d(5, pad_mode='valid'),
165        'desc_inputs': [Tensor(np.ones(shape=[5, 5, 8, 8]).astype(np.float32))]}),
166    ('AvgPool2d_1', {
167        'block': nn.AvgPool2d(5, pad_mode='same'),
168        'desc_inputs': [Tensor(np.ones(shape=[5, 5, 8, 8]).astype(np.float32))]}),
169    ('AvgPool2d_2', {
170        'block': nn.AvgPool2d(5, pad_mode='valid'),
171        'desc_inputs': [Tensor(np.ones(shape=[5, 5, 8, 8]).astype(np.float32))]}),
172    ('Conv2D_1', {
173        'block': P.Conv2D(1, 6, pad_mode='same', pad=0),
174        'desc_inputs': [Tensor(np.ones(shape=[5, 5, 8, 8]).astype(np.float32)),
175                        Tensor(np.ones(shape=[1, 5, 6, 6]).astype(np.float32))]}),
176    ('Conv2D_2', {
177        'block': P.Conv2D(1, 6, pad_mode='valid', pad=0),
178        'desc_inputs': [Tensor(np.ones(shape=[5, 5, 8, 8]).astype(np.float32)),
179                        Tensor(np.ones(shape=[1, 5, 6, 6]).astype(np.float32))]}),
180    ('Conv2D_3', {
181        'block': P.Conv2D(1, 6, pad_mode='pad', pad=0),
182        'desc_inputs': [Tensor(np.ones(shape=[5, 5, 8, 8]).astype(np.float32)),
183                        Tensor(np.ones(shape=[1, 5, 6, 6]).astype(np.float32))]}),
184    ('Conv2D_4', {
185        'block': P.Conv2D(1, 6, pad_mode='pad', pad=7),
186        'desc_inputs': [Tensor(np.ones(shape=[5, 5, 8, 8]).astype(np.float32)),
187                        Tensor(np.ones(shape=[1, 5, 6, 6]).astype(np.float32))]}),
188    ('MatMul_1', {
189        'block': P.MatMul(),
190        'desc_inputs': [Tensor(np.ones(shape=[1, 3])), Tensor(np.ones(shape=[3, 4]))]}),
191    ('MatMul_2', {
192        'block': P.BatchMatMul(),
193        'desc_inputs': [Tensor(np.ones(shape=[5, 1, 5])), Tensor(np.ones(shape=[5, 5, 4]))]}),
194    ('MatMul_Transpose_1', {
195        'block': P.MatMul(transpose_a=True),
196        'desc_inputs': [Tensor(np.ones(shape=[3, 1])), Tensor(np.ones(shape=[3, 4]))]}),
197    ('MatMul_Transpose_2', {
198        'block': P.MatMul(transpose_b=True),
199        'desc_inputs': [Tensor(np.ones(shape=[3, 2])), Tensor(np.ones(shape=[5, 2]))]}),
200    ('MatMul_Transpose_3', {
201        'block': P.MatMul(transpose_a=True, transpose_b=True),
202        'desc_inputs': [Tensor(np.ones(shape=[3, 2])), Tensor(np.ones(shape=[5, 3]))]}),
203    ('BatchMatMul', {
204        'block': NetBatchMatMul(),
205        'desc_inputs': [Tensor(np.ones(shape=[3, 1, 5])), Tensor(np.ones(shape=[3, 5, 4]))]}),
206]
207
208test_case_lists = [test_case_check_ops]
209test_exec_case = functools.reduce(lambda x, y: x + y, test_case_lists)
210# use -k to select certain testcast
211# pytest tests/python/ops/test_ops.py::test_backward -k LayerNorm
212
213
214
215@non_graph_engine
216@mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config)
217def test_exec():
218    context.set_context(mode=context.GRAPH_MODE)
219    return test_exec_case
220
221
222raise_set = [
223    ('Conv_Padding_1_Error', {
224        'block': (lambda x: nn.Conv2d(1, 6, 5, pad_mode='same', padding=7), {'exception': ValueError}),
225        'desc_inputs': [Tensor(np.ones(shape=[1, 1, 6, 5]).astype(np.float32))]}),
226    ('Conv_Padding_2_Error', {
227        'block': (lambda x: nn.Conv2d(1, 6, 5, pad_mode='same', padding=7), {'exception': ValueError}),
228        'desc_inputs': [Tensor(np.ones(shape=[1, 1, 6, 5]).astype(np.float32))]}),
229    ('Conv2D_1_Error', {
230        'block': (lambda x, y: P.Conv2D(1, 6, pad_mode='same', pad=7), {'exception': ValueError}),
231        'desc_inputs': [Tensor(np.ones(shape=[5, 5, 8, 8]).astype(np.float32)),
232                        Tensor(np.ones(shape=[1, 5, 6, 6]).astype(np.float32))]}),
233    ('Conv2D_2_Error', {
234        'block': (lambda x, y: P.Conv2D(1, 6, pad_mode='valid', pad=7), {'exception': ValueError}),
235        'desc_inputs': [Tensor(np.ones(shape=[5, 5, 8, 8]).astype(np.float32)),
236                        Tensor(np.ones(shape=[1, 5, 6, 6]).astype(np.float32))]}),
237    ('NetAddN_Error', {
238        'block': (NetAddN(), {'exception': TypeError}),
239        'desc_inputs': [(np.random.randn(1, 2, 3, 4).astype(np.float32),
240                         np.random.randn(1, 2, 3, 4).astype(np.float32))]}),
241    ('AddN_Error', {
242        'block': (P.AddN(), {'exception': TypeError}),
243        'desc_inputs': [(np.random.randn(1, 2, 3, 4).astype(np.float32),
244                         np.random.randn(1, 2, 3, 4).astype(np.float32))]}),
245    ('Splite_Error', {
246        'block': (NetSplit(), {'exception': TypeError}),
247        'desc_inputs': [None]}),
248    ('MatMul_1_Error', {
249        'block': (P.MatMul(), {'exception': ValueError}),
250        'desc_inputs': [Tensor(np.ones(shape=[5])), Tensor(np.ones(shape=[4]))]}),
251    ('MatMul_2_Error', {
252        'block': (P.MatMul(), {'exception': ValueError}),
253        'desc_inputs': [Tensor(np.ones(shape=[1, 5])), Tensor(np.ones(shape=[3, 4]))]}),
254    ('MatMul_3_Error', {
255        'block': (P.MatMul(), {'exception': ValueError}),
256        'desc_inputs': [Tensor(np.ones(shape=[1, 5])), Tensor(np.ones(shape=[5, 5, 4]))]}),
257    ('MatMul_Transpose_1_Error', {
258        'block': (P.MatMul(transpose_a=True), {'exception': ValueError}),
259        'desc_inputs': [Tensor(np.ones(shape=[1, 3])), Tensor(np.ones(shape=[3, 4]))]}),
260    ('MatMul_Transpose_2_Error', {
261        'block': (P.MatMul(transpose_b=True), {'exception': ValueError}),
262        'desc_inputs': [Tensor(np.ones(shape=[3, 2])), Tensor(np.ones(shape=[2, 5]))]}),
263    ('MatMul_Transpose_3_Error', {
264        'block': (P.MatMul(transpose_a=True, transpose_b=True), {'exception': ValueError}),
265        'desc_inputs': [Tensor(np.ones(shape=[3, 2])), Tensor(np.ones(shape=[3, 5]))]}),
266    ('BatchMatMul_1_Error', {
267        'block': (P.BatchMatMul(), {'exception': ValueError}),
268        'desc_inputs': [Tensor(np.ones(shape=[5])), Tensor(np.ones(shape=[4]))]}),
269    ('BatchMatMul_2_Error', {
270        'block': (P.BatchMatMul(), {'exception': ValueError}),
271        'desc_inputs': [Tensor(np.ones(shape=[1, 5])), Tensor(np.ones(shape=[3, 4]))]}),
272    ('BatchMatMul_3_Error', {
273        'block': (P.BatchMatMul(), {'exception': ValueError}),
274        'desc_inputs': [Tensor(np.ones(shape=[3, 1, 5])), Tensor(np.ones(shape=[3, 3, 4]))]}),
275    ('BatchMatMul_4_Error', {
276        'block': (P.BatchMatMul(), {'exception': ValueError}),
277        'desc_inputs': [Tensor(np.ones(shape=[3, 1, 5])), Tensor(np.ones(shape=[1, 3, 5, 4]))]}),
278    ('BatchMatMul_5_Error', {
279        'block': (P.BatchMatMul(), {'exception': ValueError}),
280        'desc_inputs': [Tensor(np.ones(shape=[3, 1, 5])), Tensor(np.ones(shape=[2, 5, 4]))]}),
281]
282
283
284@mindspore_test(pipeline_for_verify_exception_for_case_by_case_config)
285def test_check_exception():
286    return raise_set
287