• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2020-2021 Huawei Technologies Co., Ltd
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ============================================================================
15
16import numpy as np
17import pytest
18
19import mindspore.context as context
20import mindspore.nn as nn
21from mindspore import Tensor
22from mindspore.common.api import ms_function
23from mindspore.common.initializer import initializer
24from mindspore.common.parameter import Parameter
25from mindspore.ops import operations as P
26
27
28class ConcatV32(nn.Cell):
29    def __init__(self, nptype):
30        super(ConcatV32, self).__init__()
31
32        self.cat = P.Concat(axis=2)
33        self.x1 = Parameter(initializer(
34            Tensor(np.arange(2 * 2 * 1).reshape(2, 2, 1).astype(nptype)), [2, 2, 1]), name='x1')
35        self.x2 = Parameter(initializer(
36            Tensor(np.arange(2 * 2 * 2).reshape(2, 2, 2).astype(nptype)), [2, 2, 2]), name='x2')
37
38    @ms_function
39    def construct(self):
40        return self.cat((self.x1, self.x2))
41
42
43def axis32(nptype):
44    context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU')
45
46    cat = ConcatV32(nptype)
47    output = cat()
48    expect = np.array([[[0., 0., 1.],
49                        [1., 2., 3.]],
50                       [[2., 4., 5.],
51                        [3., 6., 7.]]]).astype(nptype)
52    assert (output.asnumpy() == expect).all()
53
54@pytest.mark.level0
55@pytest.mark.platform_x86_gpu_training
56@pytest.mark.env_onecard
57def test_axis32_float64():
58    axis32(np.float64)
59
60@pytest.mark.level0
61@pytest.mark.platform_x86_gpu_training
62@pytest.mark.env_onecard
63def test_axis32_float32():
64    axis32(np.float32)
65
66@pytest.mark.level1
67@pytest.mark.platform_x86_gpu_training
68@pytest.mark.env_onecard
69def test_axis32_int16():
70    axis32(np.int16)
71
72@pytest.mark.level1
73@pytest.mark.platform_x86_gpu_training
74@pytest.mark.env_onecard
75def test_axis32_uint8():
76    axis32(np.uint8)
77
78@pytest.mark.level1
79@pytest.mark.platform_x86_gpu_training
80@pytest.mark.env_onecard
81def test_axis32_bool():
82    axis32(np.bool)
83
84
85class ConcatV43(nn.Cell):
86    def __init__(self, nptype):
87        super(ConcatV43, self).__init__()
88
89        self.cat = P.Concat(axis=3)
90        self.x1 = Parameter(initializer(
91            Tensor(np.arange(2 * 2 * 2 * 2).reshape(2, 2, 2, 2).astype(nptype)), [2, 2, 2, 2]), name='x1')
92        self.x2 = Parameter(initializer(
93            Tensor(np.arange(2 * 2 * 2 * 3).reshape(2, 2, 2, 3).astype(nptype)), [2, 2, 2, 3]), name='x2')
94
95    @ms_function
96    def construct(self):
97        return self.cat((self.x1, self.x2))
98
99
100def axis43(nptype):
101    context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU')
102
103    cat = ConcatV43(nptype)
104    output = cat()
105    expect = np.array([[[[0., 1., 0., 1., 2.],
106                         [2., 3., 3., 4., 5.]],
107                        [[4., 5., 6., 7., 8.],
108                         [6., 7., 9., 10., 11.]]],
109                       [[[8., 9., 12., 13., 14.],
110                         [10., 11., 15., 16., 17.]],
111                        [[12., 13., 18., 19., 20.],
112                         [14., 15., 21., 22., 23.]]]]).astype(nptype)
113    assert (output.asnumpy() == expect).all()
114
115@pytest.mark.level0
116@pytest.mark.platform_x86_gpu_training
117@pytest.mark.env_onecard
118def test_axis43_float64():
119    axis43(np.float64)
120
121@pytest.mark.level0
122@pytest.mark.platform_x86_gpu_training
123@pytest.mark.env_onecard
124def test_axis43_float32():
125    axis43(np.float32)
126
127@pytest.mark.level1
128@pytest.mark.platform_x86_gpu_training
129@pytest.mark.env_onecard
130def test_axis43_int16():
131    axis43(np.int16)
132
133@pytest.mark.level0
134@pytest.mark.platform_x86_gpu_training
135@pytest.mark.env_onecard
136def test_axis43_uint8():
137    axis43(np.uint8)
138
139@pytest.mark.level1
140@pytest.mark.platform_x86_gpu_training
141@pytest.mark.env_onecard
142def test_axis43_bool():
143    axis43(np.bool)
144
145
146class ConcatV21(nn.Cell):
147    def __init__(self, nptype):
148        super(ConcatV21, self).__init__()
149
150        self.cat = P.Concat(axis=1)
151        self.x1 = Parameter(initializer(
152            Tensor(np.arange(2 * 2).reshape(2, 2).astype(nptype)), [2, 2]), name='x1')
153        self.x2 = Parameter(initializer(
154            Tensor(np.arange(2 * 3).reshape(2, 3).astype(nptype)), [2, 3]), name='x2')
155
156    @ms_function
157    def construct(self):
158        return self.cat((self.x1, self.x2))
159
160
161def axis21(nptype):
162    context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU')
163
164    cat = ConcatV21(nptype)
165    output = cat()
166    expect = np.array([[0., 1., 0., 1., 2.],
167                       [2., 3., 3., 4., 5.]]).astype(nptype)
168    assert (output.asnumpy() == expect).all()
169
170@pytest.mark.level0
171@pytest.mark.platform_x86_gpu_training
172@pytest.mark.env_onecard
173def test_axis21_float64():
174    axis21(np.float64)
175
176@pytest.mark.level0
177@pytest.mark.platform_x86_gpu_training
178@pytest.mark.env_onecard
179def test_axis21_float32():
180    axis21(np.float32)
181
182@pytest.mark.level1
183@pytest.mark.platform_x86_gpu_training
184@pytest.mark.env_onecard
185def test_axis21_int16():
186    axis21(np.int16)
187
188@pytest.mark.level0
189@pytest.mark.platform_x86_gpu_training
190@pytest.mark.env_onecard
191def test_axis21_uint8():
192    axis21(np.uint8)
193
194@pytest.mark.level1
195@pytest.mark.platform_x86_gpu_training
196@pytest.mark.env_onecard
197def test_axis21_bool():
198    axis21(np.bool)
199
200
201class Concat3INet(nn.Cell):
202    def __init__(self):
203        super(Concat3INet, self).__init__()
204        self.cat = P.Concat(axis=1)
205
206    def construct(self, x1, x2, x3):
207        return self.cat((x1, x2, x3))
208
209
210def concat_3i(nptype):
211    context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU')
212
213    cat = Concat3INet()
214    x1_np = np.random.randn(32, 4, 224, 224).astype(nptype)
215    x2_np = np.random.randn(32, 8, 224, 224).astype(nptype)
216    x3_np = np.random.randn(32, 10, 224, 224).astype(nptype)
217    output_np = np.concatenate((x1_np, x2_np, x3_np), axis=1)
218
219    x1_ms = Tensor(x1_np)
220    x2_ms = Tensor(x2_np)
221    x3_ms = Tensor(x3_np)
222    output_ms = cat(x1_ms, x2_ms, x3_ms)
223
224    error = np.ones(shape=output_np.shape) * 10e-6
225    diff = output_ms.asnumpy() - output_np
226    assert np.all(diff < error)
227
228@pytest.mark.level0
229@pytest.mark.platform_x86_gpu_training
230@pytest.mark.env_onecard
231def test_concat_3i_float64():
232    concat_3i(np.float64)
233
234@pytest.mark.level0
235@pytest.mark.platform_x86_gpu_training
236@pytest.mark.env_onecard
237def test_concat_3i_float32():
238    concat_3i(np.float32)
239
240@pytest.mark.level1
241@pytest.mark.platform_x86_gpu_training
242@pytest.mark.env_onecard
243def test_concat_3i_int16():
244    concat_3i(np.int16)
245
246@pytest.mark.level1
247@pytest.mark.platform_x86_gpu_training
248@pytest.mark.env_onecard
249def test_concat_3i_uint8():
250    concat_3i(np.uint8)
251
252@pytest.mark.level1
253@pytest.mark.platform_x86_gpu_training
254@pytest.mark.env_onecard
255def test_concat_3i_bool():
256    context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU')
257    cat = Concat3INet()
258
259    x1_np = np.random.choice([True, False], (32, 4, 224, 224)).astype(np.bool)
260    x2_np = np.random.choice([True, False], (32, 8, 224, 224)).astype(np.bool)
261    x3_np = np.random.choice([True, False], (32, 10, 224, 224)).astype(np.bool)
262    output_np = np.concatenate((x1_np, x2_np, x3_np), axis=1)
263
264    x1_ms = Tensor(x1_np)
265    x2_ms = Tensor(x2_np)
266    x3_ms = Tensor(x3_np)
267    output_ms = cat(x1_ms, x2_ms, x3_ms)
268
269    assert (output_ms.asnumpy() == output_np).all()
270
271
272class Concat4INet(nn.Cell):
273    def __init__(self):
274        super(Concat4INet, self).__init__()
275        self.cat = P.Concat(axis=1)
276
277    def construct(self, x1, x2, x3, x4):
278        return self.cat((x1, x2, x3, x4))
279
280
281def concat_4i(nptype):
282    context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU')
283
284    cat = Concat4INet()
285    x1_np = np.random.randn(32, 4, 224, 224).astype(nptype)
286    x2_np = np.random.randn(32, 8, 224, 224).astype(nptype)
287    x3_np = np.random.randn(32, 10, 224, 224).astype(nptype)
288    x4_np = np.random.randn(32, 5, 224, 224).astype(nptype)
289    output_np = np.concatenate((x1_np, x2_np, x3_np, x4_np), axis=1)
290
291    x1_ms = Tensor(x1_np)
292    x2_ms = Tensor(x2_np)
293    x3_ms = Tensor(x3_np)
294    x4_ms = Tensor(x4_np)
295    output_ms = cat(x1_ms, x2_ms, x3_ms, x4_ms)
296
297    error = np.ones(shape=output_np.shape) * 10e-6
298    diff = output_ms.asnumpy() - output_np
299    assert np.all(diff < error)
300
301@pytest.mark.level0
302@pytest.mark.platform_x86_gpu_training
303@pytest.mark.env_onecard
304def test_concat_4i_float64():
305    concat_4i(np.float64)
306
307@pytest.mark.level0
308@pytest.mark.platform_x86_gpu_training
309@pytest.mark.env_onecard
310def test_concat_4i_float32():
311    concat_4i(np.float32)
312
313@pytest.mark.level1
314@pytest.mark.platform_x86_gpu_training
315@pytest.mark.env_onecard
316def test_concat_4i_int16():
317    concat_4i(np.int16)
318
319@pytest.mark.level1
320@pytest.mark.platform_x86_gpu_training
321@pytest.mark.env_onecard
322def test_concat_4i_uint8():
323    concat_4i(np.uint8)
324
325@pytest.mark.level1
326@pytest.mark.platform_x86_gpu_training
327@pytest.mark.env_onecard
328def test_concat_4i_bool():
329    context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU')
330
331    cat = Concat4INet()
332    x1_np = np.random.choice([True, False], (32, 4, 224, 224)).astype(np.bool)
333    x2_np = np.random.choice([True, False], (32, 8, 224, 224)).astype(np.bool)
334    x3_np = np.random.choice([True, False], (32, 10, 224, 224)).astype(np.bool)
335    x4_np = np.random.choice([True, False], (32, 5, 224, 224)).astype(np.bool)
336    output_np = np.concatenate((x1_np, x2_np, x3_np, x4_np), axis=1)
337
338    x1_ms = Tensor(x1_np)
339    x2_ms = Tensor(x2_np)
340    x3_ms = Tensor(x3_np)
341    x4_ms = Tensor(x4_np)
342    output_ms = cat(x1_ms, x2_ms, x3_ms, x4_ms)
343
344    assert (output_ms.asnumpy() == output_np).all()
345