• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2020 Huawei Technologies Co., Ltd
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ============================================================================
15""" test_vm """
16import numpy as np
17
18from .....vm_impl import vm
19
20
21def test_avg_pooling():
22    """ test_avg_pooling """
23    input_data = np.array([[[[-4., -3., 1., 9.],
24                             [-9., -1., 3., 4.],
25                             [1., -1., -3., -6.],
26                             [-2., -1., -2., -15.]]]]).astype(np.float32)
27    out = vm.avg_pooling(input_data, pool_h=2, pool_w=2, stride=1)
28    expect_out = [[[[-4.25, 0.0, 4.25],
29                    [-2.5, -0.5, -0.5],
30                    [-0.75, -1.75, -6.5]]]]
31    assert (expect_out == out).all()
32
33
34def test_avg_pool_grad():
35    """ test_avg_pool_grad """
36    # To do
37    input_data = np.array([[[[1., 2, 3, 4],
38                             [5, 6, 7, 8],
39                             [9, 10, 11, 12],
40                             [13, 14, 15, 16]]]]).astype(np.float32)
41    dout = vm.avg_pooling(input_data, pool_h=2, pool_w=2, stride=1)
42    print("vm.avg_pooling dout: ", dout)
43    out = vm.avg_pool_grad(dout, input_data.shape, 2, 2, 1)
44    print("vm.avg_pool_grad: ", out)
45    assert True
46
47
48def test_batch_norm():
49    """ test_batch_norm """
50    input_data = np.random.randint(0, 255, [1, 3, 224, 224])
51    print("input_data.shape: ", input_data.shape)
52    print("input_data: ", input_data)
53    output = vm.batch_norm(input_data)
54    print("vm.batch_norm: ", output)
55
56
57def test_conv2d():
58    """ test_conv2d """
59    x = np.array([[[
60        [3, 0, 1, 2, 7, 4],
61        [1, 5, 8, 9, 3, 1],
62        [2, 7, 2, 5, 1, 3],
63        [0, 1, 3, 1, 7, 8],
64        [4, 2, 1, 6, 2, 8],
65        [2, 4, 5, 2, 3, 9]]]]).astype(np.float32)
66    weight = np.array([[[[1, 0, -1], [1, 0, -1], [1, 0, -1]]]]).astype(np.float32)
67    out = vm.conv2d(x, weight)
68    expect_out = np.array([[[
69        [-5., -4., 0., 8.],
70        [-10., -2., 2., 3.],
71        [0., -2., -4., -7.],
72        [-3., -2., -3., -16.]]]]).astype(np.float32)
73    assert (expect_out == out).all()
74
75
76def test_conv2d_with_bias():
77    """ test_conv2d_with_bias """
78    x = np.array([[[
79        [3, 0, 1, 2, 7, 4],
80        [1, 5, 8, 9, 3, 1],
81        [2, 7, 2, 5, 1, 3],
82        [0, 1, 3, 1, 7, 8],
83        [4, 2, 1, 6, 2, 8],
84        [2, 4, 5, 2, 3, 9]]]]).astype(np.float32)
85    weight = np.array([[[[1, 0, -1], [1, 0, -1], [1, 0, -1]]]]).astype(np.float32)
86    bias = np.array([1]).astype(np.float32)
87    out = vm.conv2d(x, weight, bias)
88    expect_out = np.array([[[
89        [-4., -3., 1., 9.],
90        [-9., -1., 3., 4.],
91        [1., -1., -3., -6.],
92        [-2., -1., -2., -15.]]]]).astype(np.float32)
93    assert (expect_out == out).all()
94
95
96def test_conv2d_backprop_filter():
97    """ test_conv2d_backprop_filter """
98    x = np.array([[[
99        [3, 0, 1, 2, 7, 4],
100        [1, 5, 8, 9, 3, 1],
101        [2, 7, 2, 5, 1, 3],
102        [0, 1, 3, 1, 7, 8],
103        [4, 2, 1, 6, 2, 8],
104        [2, 4, 5, 2, 3, 9]]]]).astype(np.float32)
105    weight = np.array([[[[1, 0, -1], [1, 0, -1], [1, 0, -1]]]]).astype(np.float32)
106    out = vm.conv2d(x, weight)
107    backprop_filter = vm.conv2d_backprop_filter(out, x, weight.shape)
108    print(backprop_filter)
109    assert True
110
111
112def test_conv2d_backprop_input():
113    """ test_conv2d_backprop_input """
114    x = np.array([[[
115        [3, 0, 1, 2, 7, 4],
116        [1, 5, 8, 9, 3, 1],
117        [2, 7, 2, 5, 1, 3],
118        [0, 1, 3, 1, 7, 8],
119        [4, 2, 1, 6, 2, 8],
120        [2, 4, 5, 2, 3, 9]]]]).astype(np.float32)
121    weight = np.array([[[[1, 0, -1], [1, 0, -1], [1, 0, -1]]]]).astype(np.float32)
122    out = vm.conv2d(x, weight)
123    grad = vm.conv2d_backprop_input(out, x.shape, weight)
124    print(grad)
125    assert True
126
127
128def test_flatten():
129    """ test_flatten """
130    x = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)
131    y = vm.flatten(x)
132    assert ([1, 2, 3, 4, 5, 6] == y.T).all()
133    assert np.float32 == y.dtype
134
135
136def test_flatten2():
137    """ test_flatten2 """
138    x = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)
139    y = vm.flatten2(x)
140    assert ([1, 2, 3, 4, 5, 6] == y).all()
141    assert (1, 6) == y.shape
142    assert np.float32 == y.dtype
143
144
145def test_flatten_batch():
146    """ test_flatten_batch """
147    x = np.array([[[9, 4, 14, 1],
148                   [7, 10, 14, 13],
149                   [1, 9, 16, 7],
150                   [15, 16, 0, 4]],
151                  [[16, 13, 13, 10],
152                   [0, 12, 5, 9],
153                   [15, 0, 11, 1],
154                   [4, 16, 4, 1]],
155                  [[2, 8, 1, 13],
156                   [5, 15, 4, 11],
157                   [8, 2, 17, 16],
158                   [5, 13, 0, 2]],
159                  [[14, 8, 6, 8],
160                   [0, 8, 6, 15],
161                   [9, 1, 8, 5],
162                   [12, 6, 13, 8]],
163                  [[13, 11, 6, 3],
164                   [8, 6, 16, 5],
165                   [7, 10, 0, 8],
166                   [17, 17, 17, 3]]]).astype(np.float32)
167    y = vm.flatten_batch(x)
168    expect_out = np.array(
169        [[9, 4, 14, 1, 7, 10, 14, 13, 1, 9, 16, 7, 15, 16, 0, 4],
170         [16, 13, 13, 10, 0, 12, 5, 9, 15, 0, 11, 1, 4, 16, 4, 1],
171         [2, 8, 1, 13, 5, 15, 4, 11, 8, 2, 17, 16, 5, 13, 0, 2],
172         [14, 8, 6, 8, 0, 8, 6, 15, 9, 1, 8, 5, 12, 6, 13, 8],
173         [13, 11, 6, 3, 8, 6, 16, 5, 7, 10, 0, 8, 17, 17, 17, 3]]).astype(np.float32)
174    assert (expect_out == y).all()
175    assert expect_out.shape == y.shape
176    assert np.float32 == y.dtype
177
178
179def test_im2col():
180    """ test_im2col """
181    img = np.ones([1, 1, 32, 32]).astype(np.float32) * 0.01
182    print("input img: ", img)
183    col = vm.im2col(img, 2, 3, 1, 1)
184    print("output col.shape : ", col.shape)
185    print("output col: ", col)
186    print("output col.dtype: ", col.dtype)
187    assert np.float32 == col.dtype
188
189
190def test_matmul():
191    """ test_matmul """
192    x = np.array([1, 2, 3]).astype(np.float32)
193    w = np.array([0, 1, 0.5]).astype(np.float32)
194    y = vm.matmul(x, w)
195    assert y == 3.5
196    assert np.float32 == y.dtype
197
198
199def test_max_pooling():
200    """ test_max_pooling """
201    input_data = np.array([[[
202        [-4., -3., 1., 9.],
203        [-9., -1., 3., 4.],
204        [1., -1., -3., -6.],
205        [-2., -1., -2., -15.]]]]).astype(np.float32)
206    out = vm.max_pooling(input_data, pool_h=2, pool_w=2, stride=1)
207    expect_out = [[[[-1., 3., 9.],
208                    [1., 3., 4.],
209                    [1., -1., -2.]]]]
210    assert (expect_out == out).all()
211    assert np.float32 == out.dtype
212
213
214def test_np_convolve():
215    """ test_np_convolve """
216    out = np.convolve([1, 2, 3], [0, 1, 0.5]).astype(np.float32)
217    assert ([0.0, 1.0, 2.5, 4.0, 1.5] == out).all()
218    assert np.float32 == out.dtype
219
220
221def test_np_convolve_same():
222    """ test_np_convolve_same """
223    out = np.convolve([1, 2, 3], [0, 1, 0.5], 'same').astype(np.float32)
224    assert ([1.0, 2.5, 4.0] == out).all()
225    assert np.float32 == out.dtype
226
227
228def test_np_convolve_valid():
229    """ test_np_convolve_valid """
230    out = np.convolve([1, 2, 3], [0, 1, 0.5], 'valid').astype(np.float32)
231    assert ([2.5] == out).all()
232    assert np.float32 == out.dtype
233
234
235def test_relu():
236    """ test_relu """
237    x = np.array([-0.32208174, 0.33999891]).astype(np.float32)
238    y = vm.relu(x)
239    assert np.allclose([-0., 0.33999891], y)
240    assert np.float32 == y.dtype
241
242    y = vm.relu_grad(y)
243    assert (y == [0., 1.]).all()
244    assert np.float32 == y.dtype
245
246
247def test_softmax():
248    """ test_softmax """
249    logits = 2.84806275 * np.ones([1, 10]).astype(np.float32)
250    y = vm.softmax(logits)
251    assert np.allclose([0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1], y)
252    assert np.float32 == y.dtype
253
254    logits = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)
255    y = vm.softmax(logits, axis=1)
256    labels = [[0.09003057, 0.24472847, 0.66524096], [0.09003057, 0.24472847, 0.66524096]]
257    assert np.allclose(labels, y)
258    assert np.float32 == y.dtype
259
260
261def test_softmax_cross_entropy_with_logit():
262    """ test_softmax_cross_entropy_with_logit """
263    logits = np.array([[1, 2, 3, 4, 2, 1, 0, 2, 1, 1], [1, 2, 4, 1, 0, 5, 0, 2, 1, 3]], dtype=np.float32)
264    labels = np.array([[0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0]], dtype=np.float32)
265    loss, dx = vm.softmax_cross_entropy_with_logits(logits, labels)
266    print("logits.shape: ", logits.shape)
267    print("logits: ", logits)
268    print("softmax: ", vm.softmax(logits))
269    print("labels: ", labels)
270    print("loss: ", loss)
271    print("dx: ", dx)
272    assert np.float32 == loss.dtype
273    assert np.float32 == dx.dtype
274