• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2020 Huawei Technologies Co., Ltd
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ============================================================================
15
16import numpy as np
17import pytest
18
19import mindspore.context as context
20import mindspore.nn as nn
21from mindspore import Tensor
22from mindspore.ops import operations as P
23from mindspore.ops.operations import _inner_ops as inner
24
25class NetUnique(nn.Cell):
26    def __init__(self):
27        super(NetUnique, self).__init__()
28        self.unique = P.Unique()
29
30    def construct(self, x):
31        x_unique, x_idx = self.unique(x)
32        return x_unique, x_idx
33
34
35class NetUniqueDynamic(nn.Cell):
36    def __init__(self):
37        super(NetUniqueDynamic, self).__init__()
38        self.convert = inner.GpuConvertToDynamicShape()
39        self.unique = P.Unique()
40        self.split = P.Split(0, 2)
41
42    def construct(self, x):
43        x_convert = self.convert(x)
44        x_unique, x_idx = self.unique(x_convert)
45        x_split = self.split(x_unique)
46        return x_unique, x_idx, x_split
47
48
49@pytest.mark.level0
50@pytest.mark.platform_x86_gpu_training
51@pytest.mark.env_onecard
52def test_unique_1d():
53    x = Tensor(np.array([4, 5, 1, 2, 3, 3, 4, 5]).astype(np.float32))
54    exp_output = np.array([1, 2, 3, 4, 5]).astype(np.float32)
55    exp_idx = np.array([3, 4, 0, 1, 2, 2, 3, 4]).astype(np.int32)
56    context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
57    net = NetUnique()
58    x_unique, x_idx = net(x)
59    assert (x_unique.asnumpy() == exp_output).all()
60    assert (x_idx.asnumpy() == exp_idx).all()
61
62
63@pytest.mark.level0
64@pytest.mark.platform_x86_gpu_training
65@pytest.mark.env_onecard
66def test_unique_1d_float():
67    x = Tensor(np.array([0.4, 0.5, 1.23, 2.2, 12.43, 12.43, 0.4, 0.5]).astype(np.float32))
68    exp_output = np.array([0.4, 0.5, 1.23, 2.2, 12.43]).astype(np.float32)
69    exp_idx = np.array([0, 1, 2, 3, 4, 4, 0, 1]).astype(np.int32)
70    context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
71    net = NetUnique()
72    x_unique, x_idx = net(x)
73    assert (x_unique.asnumpy() == exp_output).all()
74    assert (x_idx.asnumpy() == exp_idx).all()
75
76
77@pytest.mark.level0
78@pytest.mark.platform_x86_gpu_training
79@pytest.mark.env_onecard
80def test_unique_1d_sorted():
81    x = Tensor(np.array([1, 1, 2, 4, 4, 4, 7, 8, 8]).astype(np.float32))
82    exp_output = np.array([1, 2, 4, 7, 8]).astype(np.float32)
83    exp_idx = np.array([0, 0, 1, 2, 2, 2, 3, 4, 4]).astype(np.int32)
84    context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
85    net = NetUnique()
86    x_unique, x_idx = net(x)
87    assert (x_unique.asnumpy() == exp_output).all()
88    assert (x_idx.asnumpy() == exp_idx).all()
89
90
91@pytest.mark.level0
92@pytest.mark.platform_x86_gpu_training
93@pytest.mark.env_onecard
94def test_unique_zeros():
95    x = Tensor(np.zeros(1000).astype(np.float32))
96    exp_output = np.zeros(1).astype(np.float32)
97    exp_idx = np.zeros(1000).astype(np.int32)
98    context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
99    net = NetUnique()
100    x_unique, x_idx = net(x)
101    assert (x_unique.asnumpy() == exp_output).all()
102    assert (x_idx.asnumpy() == exp_idx).all()
103
104
105@pytest.mark.level0
106@pytest.mark.platform_x86_gpu_training
107@pytest.mark.env_onecard
108def test_unique_large():
109    x_np1 = np.arange(100)
110    x_np2 = np.arange(100, 200)
111    x_np3 = np.arange(200, 300)
112    x_np = np.concatenate((x_np1, x_np2, x_np3, x_np1, x_np2, x_np3, x_np1, x_np2, x_np3))
113    x = Tensor(x_np.astype(np.float32))
114    exp_output = np.arange(300).astype(np.float32)
115    exp_idx = np.concatenate((x_np1, x_np2, x_np3, x_np1, x_np2, x_np3, x_np1, x_np2, x_np3)).astype(np.int32)
116    context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
117    net = NetUnique()
118    x_unique, x_idx = net(x)
119    assert (x_unique.asnumpy() == exp_output).all()
120    assert (x_idx.asnumpy() == exp_idx).all()
121
122
123@pytest.mark.level0
124@pytest.mark.platform_x86_gpu_training
125@pytest.mark.env_onecard
126def test_unique_1d_half():
127    x = Tensor(np.array([0.4, 0.5, 1.23, 2.2, 12.43, 12.43, 0.4, 0.5]).astype(np.float16))
128    exp_output = np.array([0.4, 0.5, 1.23, 2.2, 12.43]).astype(np.float16)
129    exp_idx = np.array([0, 1, 2, 3, 4, 4, 0, 1]).astype(np.int32)
130    context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
131    net = NetUnique()
132    x_unique, x_idx = net(x)
133    assert (x_unique.asnumpy() == exp_output).all()
134    assert (x_idx.asnumpy() == exp_idx).all()
135
136
137@pytest.mark.level0
138@pytest.mark.platform_x86_gpu_training
139@pytest.mark.env_onecard
140def test_unique_1d_sorted_half():
141    x = Tensor(np.array([1, 1, 2, 4, 4, 4, 7, 8, 8]).astype(np.float16))
142    exp_output = np.array([1, 2, 4, 7, 8]).astype(np.float16)
143    exp_idx = np.array([0, 0, 1, 2, 2, 2, 3, 4, 4]).astype(np.int32)
144    context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
145    net = NetUnique()
146    x_unique, x_idx = net(x)
147    assert (x_unique.asnumpy() == exp_output).all()
148    assert (x_idx.asnumpy() == exp_idx).all()
149
150
151@pytest.mark.level0
152@pytest.mark.platform_x86_gpu_training
153@pytest.mark.env_onecard
154def test_unique_zeros_half():
155    x = Tensor(np.zeros(1000).astype(np.float16))
156    exp_output = np.zeros(1).astype(np.float16)
157    exp_idx = np.zeros(1000).astype(np.int32)
158    context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
159    net = NetUnique()
160    x_unique, x_idx = net(x)
161    assert (x_unique.asnumpy() == exp_output).all()
162    assert (x_idx.asnumpy() == exp_idx).all()
163
164
165@pytest.mark.level0
166@pytest.mark.platform_x86_gpu_training
167@pytest.mark.env_onecard
168def test_unique_large_half():
169    x_np1 = np.arange(100)
170    x_np2 = np.arange(100, 200)
171    x_np3 = np.arange(200, 300)
172    x_np = np.concatenate((x_np1, x_np2, x_np3, x_np1, x_np2, x_np3, x_np1, x_np2, x_np3))
173    x = Tensor(x_np.astype(np.float16))
174    exp_output = np.arange(300).astype(np.float16)
175    exp_idx = np.concatenate((x_np1, x_np2, x_np3, x_np1, x_np2, x_np3, x_np1, x_np2, x_np3)).astype(np.int32)
176    context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
177    net = NetUnique()
178    x_unique, x_idx = net(x)
179    assert (x_unique.asnumpy() == exp_output).all()
180    assert (x_idx.asnumpy() == exp_idx).all()
181
182
183@pytest.mark.level0
184@pytest.mark.platform_x86_gpu_training
185@pytest.mark.env_onecard
186def test_unique_1d_int32():
187    x = Tensor(np.array([4, 5, 1, 2, 3, 3, 4, 5]).astype(np.int32))
188    exp_output = np.array([1, 2, 3, 4, 5]).astype(np.int32)
189    exp_idx = np.array([3, 4, 0, 1, 2, 2, 3, 4]).astype(np.int32)
190    context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
191    net = NetUnique()
192    x_unique, x_idx = net(x)
193    assert (x_unique.asnumpy() == exp_output).all()
194    assert (x_idx.asnumpy() == exp_idx).all()
195
196
197@pytest.mark.level0
198@pytest.mark.platform_x86_gpu_training
199@pytest.mark.env_onecard
200def test_unique_1d_sorted_int32():
201    x = Tensor(np.array([1, 1, 2, 4, 4, 4, 7, 8, 8]).astype(np.int32))
202    exp_output = np.array([1, 2, 4, 7, 8]).astype(np.int32)
203    exp_idx = np.array([0, 0, 1, 2, 2, 2, 3, 4, 4]).astype(np.int32)
204    context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
205    net = NetUnique()
206    x_unique, x_idx = net(x)
207    assert (x_unique.asnumpy() == exp_output).all()
208    assert (x_idx.asnumpy() == exp_idx).all()
209
210
211@pytest.mark.level0
212@pytest.mark.platform_x86_gpu_training
213@pytest.mark.env_onecard
214def test_unique_zeros_int32():
215    x = Tensor(np.zeros(1000).astype(np.int32))
216    exp_output = np.zeros(1).astype(np.int32)
217    exp_idx = np.zeros(1000).astype(np.int32)
218    context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
219    net = NetUnique()
220    x_unique, x_idx = net(x)
221    assert (x_unique.asnumpy() == exp_output).all()
222    assert (x_idx.asnumpy() == exp_idx).all()
223
224
225@pytest.mark.level0
226@pytest.mark.platform_x86_gpu_training
227@pytest.mark.env_onecard
228def test_unique_large_int32():
229    x_np1 = np.arange(100)
230    x_np2 = np.arange(100, 200)
231    x_np3 = np.arange(200, 300)
232    x_np = np.concatenate((x_np1, x_np2, x_np3, x_np1, x_np2, x_np3, x_np1, x_np2, x_np3))
233    x = Tensor(x_np.astype(np.int32))
234    exp_output = np.arange(300).astype(np.int32)
235    exp_idx = np.concatenate((x_np1, x_np2, x_np3, x_np1, x_np2, x_np3, x_np1, x_np2, x_np3)).astype(np.int32)
236    context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
237    net = NetUnique()
238    x_unique, x_idx = net(x)
239    assert (x_unique.asnumpy() == exp_output).all()
240    assert (x_idx.asnumpy() == exp_idx).all()
241
242
243@pytest.mark.level0
244@pytest.mark.platform_x86_gpu_training
245@pytest.mark.env_onecard
246def test_unique_dynamic():
247    x = Tensor(np.array([4, 5, 1, 2, 3, 3, 4, 5, 6]).astype(np.float32))
248    expt_unique = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
249    expt_index = np.array([3, 4, 0, 1, 2, 2, 3, 4, 5]).astype(np.int32)
250    expt_split = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)
251
252    x2 = Tensor(np.array([1, 1, 4, 4, 7, 8, 8]).astype(np.float32))
253    expt_unique2 = np.array([1, 4, 7, 8]).astype(np.float32)
254    expt_index2 = np.array([0, 0, 1, 1, 2, 3, 3]).astype(np.int32)
255    expt_split2 = np.array([[1, 4], [7, 8]]).astype(np.float32)
256
257    context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
258    net = NetUniqueDynamic()
259    x_unique, x_idx, x_split = net(x)
260    assert (x_unique.asnumpy() == expt_unique).all()
261    assert (x_idx.asnumpy() == expt_index).all()
262    for i, out in enumerate(x_split):
263        assert (out.asnumpy() == expt_split[i]).all()
264
265    x_unique2, x_idx2, x_split2 = net(x2)
266    assert (x_unique2.asnumpy() == expt_unique2).all()
267    assert (x_idx2.asnumpy() == expt_index2).all()
268    for i, out in enumerate(x_split2):
269        assert (out.asnumpy() == expt_split2[i]).all()
270
271
272@pytest.mark.level0
273@pytest.mark.platform_x86_gpu_training
274@pytest.mark.env_onecard
275def test_unique_1d_int64():
276    x = Tensor(np.array([4, 5, 1, 2, 3, 3, 4, 5]).astype(np.int64))
277    exp_output = np.array([1, 2, 3, 4, 5]).astype(np.int64)
278    exp_idx = np.array([3, 4, 0, 1, 2, 2, 3, 4]).astype(np.int64)
279    context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
280    net = NetUnique()
281    x_unique, x_idx = net(x)
282    print(x_unique)
283    print(x_idx)
284    assert (x_unique.asnumpy() == exp_output).all()
285    assert (x_idx.asnumpy() == exp_idx).all()
286
287
288@pytest.mark.level0
289@pytest.mark.platform_x86_gpu_training
290@pytest.mark.env_onecard
291def test_unique_1d_sorted_int64():
292    x = Tensor(np.array([1, 1, 2, 4, 4, 4, 7, 8, 8]).astype(np.int64))
293    exp_output = np.array([1, 2, 4, 7, 8]).astype(np.int64)
294    exp_idx = np.array([0, 0, 1, 2, 2, 2, 3, 4, 4]).astype(np.int64)
295    context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
296    net = NetUnique()
297    x_unique, x_idx = net(x)
298    assert (x_unique.asnumpy() == exp_output).all()
299    assert (x_idx.asnumpy() == exp_idx).all()
300
301
302@pytest.mark.level0
303@pytest.mark.platform_x86_gpu_training
304@pytest.mark.env_onecard
305def test_unique_zeros_int64():
306    x = Tensor(np.zeros(1000).astype(np.int64))
307    exp_output = np.zeros(1).astype(np.int64)
308    exp_idx = np.zeros(1000).astype(np.int64)
309    context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
310    net = NetUnique()
311    x_unique, x_idx = net(x)
312    assert (x_unique.asnumpy() == exp_output).all()
313    assert (x_idx.asnumpy() == exp_idx).all()
314
315
316@pytest.mark.level0
317@pytest.mark.platform_x86_gpu_training
318@pytest.mark.env_onecard
319def test_unique_large_int64():
320    x_np1 = np.arange(100)
321    x_np2 = np.arange(100, 200)
322    x_np3 = np.arange(200, 300)
323    x_np = np.concatenate((x_np1, x_np2, x_np3, x_np1, x_np2, x_np3, x_np1, x_np2, x_np3))
324    x = Tensor(x_np.astype(np.int64))
325    exp_output = np.arange(300).astype(np.int64)
326    exp_idx = np.concatenate((x_np1, x_np2, x_np3, x_np1, x_np2, x_np3, x_np1, x_np2, x_np3)).astype(np.int64)
327    context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
328    net = NetUnique()
329    x_unique, x_idx = net(x)
330    assert (x_unique.asnumpy() == exp_output).all()
331    assert (x_idx.asnumpy() == exp_idx).all()
332