• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2020-2021 Huawei Technologies Co., Ltd
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ============================================================================
15
16import numpy as np
17import pytest
18
19import mindspore.context as context
20import mindspore.nn as nn
21from mindspore import Tensor
22from mindspore.common import dtype as mstype
23from mindspore.ops import operations as P
24from mindspore.ops.operations import _inner_ops as inner
25
26
27class BatchMatMulNet(nn.Cell):
28    def __init__(self, transpose_a=False, transpose_b=False):
29        super(BatchMatMulNet, self).__init__()
30        self.batch_matmul = P.BatchMatMul(transpose_a, transpose_b)
31
32    def construct(self, x, y):
33        return self.batch_matmul(x, y)
34
35@pytest.mark.level0
36@pytest.mark.platform_x86_gpu_training
37@pytest.mark.env_onecard
38def test_4d():
39    input_x = Tensor(np.arange(2 * 4 * 1 * 3).reshape(2, 4, 1, 3), mstype.float32)
40    input_y = Tensor(np.arange(2 * 4 * 3 * 4).reshape(2, 4, 3, 4), mstype.float32)
41
42    context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
43    net = BatchMatMulNet()
44    output = net(input_x, input_y)
45    expect = [[[[20, 23, 26, 29]],
46               [[200, 212, 224, 236]],
47               [[596, 617, 638, 659]],
48               [[1208, 1238, 1268, 1298]]],
49
50              [[[2036, 2075, 2114, 2153]],
51               [[3080, 3128, 3176, 3224]],
52               [[4340, 4397, 4454, 4511]],
53               [[5816, 5882, 5948, 6014]]]]
54    assert (output.asnumpy() == expect).all()
55
56
57@pytest.mark.level0
58@pytest.mark.platform_x86_gpu_training
59@pytest.mark.env_onecard
60def test_4d_float64():
61    input_x = Tensor(np.arange(2 * 4 * 1 * 3).reshape(2, 4, 1, 3), mstype.float64)
62    input_y = Tensor(np.arange(2 * 4 * 3 * 4).reshape(2, 4, 3, 4), mstype.float64)
63
64    context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
65    net = BatchMatMulNet()
66    output = net(input_x, input_y)
67    expect = [[[[20, 23, 26, 29]],
68               [[200, 212, 224, 236]],
69               [[596, 617, 638, 659]],
70               [[1208, 1238, 1268, 1298]]],
71
72              [[[2036, 2075, 2114, 2153]],
73               [[3080, 3128, 3176, 3224]],
74               [[4340, 4397, 4454, 4511]],
75               [[5816, 5882, 5948, 6014]]]]
76    assert (output.asnumpy() == expect).all()
77
78
79@pytest.mark.level0
80@pytest.mark.platform_x86_gpu_training
81@pytest.mark.env_onecard
82def test_4d_transpose_a():
83    input_x = Tensor(np.arange(2 * 4 * 3 * 1).reshape(2, 4, 3, 1), mstype.float32)
84    input_y = Tensor(np.arange(2 * 4 * 3 * 4).reshape(2, 4, 3, 4), mstype.float32)
85
86    context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
87    net = BatchMatMulNet(transpose_a=True)
88    output = net(input_x, input_y)
89    expect = [[[[20, 23, 26, 29]],
90               [[200, 212, 224, 236]],
91               [[596, 617, 638, 659]],
92               [[1208, 1238, 1268, 1298]]],
93
94              [[[2036, 2075, 2114, 2153]],
95               [[3080, 3128, 3176, 3224]],
96               [[4340, 4397, 4454, 4511]],
97               [[5816, 5882, 5948, 6014]]]]
98    assert (output.asnumpy() == expect).all()
99
100
101@pytest.mark.level0
102@pytest.mark.platform_x86_gpu_training
103@pytest.mark.env_onecard
104def test_4d_transpose_b():
105    input_x = Tensor(np.arange(2 * 4 * 1 * 3).reshape(2, 4, 1, 3), mstype.float32)
106    input_y = Tensor(np.arange(2 * 4 * 4 * 3).reshape(2, 4, 4, 3), mstype.float32)
107
108    context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
109    net = BatchMatMulNet(transpose_b=True)
110    output = net(input_x, input_y)
111    expect = [[[[5, 14, 23, 32]],
112               [[158, 194, 230, 266]],
113               [[527, 590, 653, 716]],
114               [[1112, 1202, 1292, 1382]]],
115
116              [[[1913, 2030, 2147, 2264]],
117               [[2930, 3074, 3218, 3362]],
118               [[4163, 4334, 4505, 4676]],
119               [[5612, 5810, 6008, 6206]]]]
120    assert (output.asnumpy() == expect).all()
121
122
123@pytest.mark.level0
124@pytest.mark.platform_x86_gpu_training
125@pytest.mark.env_onecard
126def test_4d_transpose_ab():
127    input_x = Tensor(np.arange(2 * 4 * 3 * 1).reshape(2, 4, 3, 1), mstype.float32)
128    input_y = Tensor(np.arange(2 * 4 * 4 * 3).reshape(2, 4, 4, 3), mstype.float32)
129
130    context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
131    net = BatchMatMulNet(transpose_a=True, transpose_b=True)
132    output = net(input_x, input_y)
133    expect = [[[[5, 14, 23, 32]],
134               [[158, 194, 230, 266]],
135               [[527, 590, 653, 716]],
136               [[1112, 1202, 1292, 1382]]],
137
138              [[[1913, 2030, 2147, 2264]],
139               [[2930, 3074, 3218, 3362]],
140               [[4163, 4334, 4505, 4676]],
141               [[5612, 5810, 6008, 6206]]]]
142    assert (output.asnumpy() == expect).all()
143
144
145@pytest.mark.level0
146@pytest.mark.platform_x86_gpu_training
147@pytest.mark.env_onecard
148def test_4D_fp16():
149    input_x = Tensor(np.arange(2 * 4 * 1 * 3).reshape(2, 4, 1, 3), mstype.float16)
150    input_y = Tensor(np.arange(2 * 4 * 3 * 4).reshape(2, 4, 3, 4), mstype.float16)
151
152    context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
153    net = BatchMatMulNet()
154    output = net(input_x, input_y)
155    expect = np.array([[[[20, 23, 26, 29]],
156                        [[200, 212, 224, 236]],
157                        [[596, 617, 638, 659]],
158                        [[1208, 1238, 1268, 1298]]],
159
160                       [[[2036, 2076, 2114, 2152]],
161                        [[3080, 3128, 3176, 3224]],
162                        [[4340, 4396, 4456, 4510]],
163                        [[5816, 5880, 5948, 6016]]]]).astype(np.float16)
164    assert (output.asnumpy() == expect).all()
165
166
167class BatchMatMul_d(nn.Cell):
168    def __init__(self, transpose_a=False, transpose_b=False):
169        super(BatchMatMul_d, self).__init__()
170        self.batch_matmul = P.BatchMatMul(transpose_a, transpose_b)
171        self.test_dynamic = inner.GpuConvertToDynamicShape()
172
173    def construct(self, x, y):
174        x = self.test_dynamic(x)
175        y = self.test_dynamic(y)
176        return self.batch_matmul(x, y)
177
178
179@pytest.mark.level0
180@pytest.mark.platform_x86_gpu_training
181@pytest.mark.env_onecard
182def test_batchmatmul_dynamic():
183
184    context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
185    net = BatchMatMul_d()
186
187    x1 = np.arange(8).reshape(2, 2, 2).astype(np.float32)
188    y1 = np.arange(28).reshape(2, 2, 7).astype(np.float32)
189
190    output1 = net(Tensor(x1), Tensor(y1))
191    expect1 = np.matmul(x1, y1)
192    assert (output1.asnumpy() == expect1).all()
193
194    x2 = np.arange(2 * 4 * 1 * 3).reshape(2, 4, 1, 3).astype(np.float32)
195    y2 = np.arange(2 * 4 * 3 * 4).reshape(2, 4, 3, 4).astype(np.float32)
196
197    output2 = net(Tensor(x2), Tensor(y2))
198    expect2 = np.matmul(x2, y2)
199    assert (output2.asnumpy() == expect2).all()
200