1# Copyright 2020 Huawei Technologies Co., Ltd 2# 3# Licensed under the Apache License, Version 2.0 (the "License"); 4# you may not use this file except in compliance with the License. 5# You may obtain a copy of the License at 6# 7# http://www.apache.org/licenses/LICENSE-2.0 8# 9# Unless required by applicable law or agreed to in writing, software 10# distributed under the License is distributed on an "AS IS" BASIS, 11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12# See the License for the specific language governing permissions and 13# limitations under the License. 14# ============================================================================ 15 16import numpy as np 17import pytest 18 19import mindspore.context as context 20import mindspore.nn as nn 21from mindspore import Tensor 22from mindspore.ops import operations as P 23 24 25class Net(nn.Cell): 26 def __init__(self): 27 super(Net, self).__init__() 28 self.status = P.FloatStatus() 29 30 def construct(self, x): 31 return self.status(x) 32 33 34class Netnan(nn.Cell): 35 def __init__(self): 36 super(Netnan, self).__init__() 37 self.isnan = P.IsNan() 38 39 def construct(self, x): 40 return self.isnan(x) 41 42 43class Netinf(nn.Cell): 44 def __init__(self): 45 super(Netinf, self).__init__() 46 self.isinf = P.IsInf() 47 48 def construct(self, x): 49 return self.isinf(x) 50 51 52class Netfinite(nn.Cell): 53 def __init__(self): 54 super(Netfinite, self).__init__() 55 self.isfinite = P.IsFinite() 56 57 def construct(self, x): 58 return self.isfinite(x) 59 60 61context.set_context(mode=context.GRAPH_MODE, device_target="GPU") 62x1 = np.array([[1.2, 2, np.nan, 88]]).astype(np.float32) 63x2 = np.array([[np.inf, 1, 88.0, 0]]).astype(np.float32) 64x3 = np.array([[1, 2], [3, 4], [5.0, 88.0]]).astype(np.float32) 65 66 67@pytest.mark.level0 68@pytest.mark.platform_x86_gpu_training 69@pytest.mark.env_onecard 70def test_status(): 71 ms_status = Net() 72 output1 = ms_status(Tensor(x1)) 73 expect1 = 1 74 assert output1.asnumpy()[0] == expect1 75 76 output2 = ms_status(Tensor(x2)) 77 expect2 = 1 78 assert output2.asnumpy()[0] == expect2 79 80 output3 = ms_status(Tensor(x3)) 81 expect3 = 0 82 assert output3.asnumpy()[0] == expect3 83 84 85@pytest.mark.level0 86@pytest.mark.platform_x86_gpu_training 87@pytest.mark.env_onecard 88def test_nan(): 89 ms_isnan = Netnan() 90 output1 = ms_isnan(Tensor(x1)) 91 expect1 = [[False, False, True, False]] 92 assert (output1.asnumpy() == expect1).all() 93 94 output2 = ms_isnan(Tensor(x2)) 95 expect2 = [[False, False, False, False]] 96 assert (output2.asnumpy() == expect2).all() 97 98 output3 = ms_isnan(Tensor(x3)) 99 expect3 = [[False, False], [False, False], [False, False]] 100 assert (output3.asnumpy() == expect3).all() 101 102 103@pytest.mark.level0 104@pytest.mark.platform_x86_gpu_training 105@pytest.mark.env_onecard 106def test_inf(): 107 ms_isinf = Netinf() 108 output1 = ms_isinf(Tensor(x1)) 109 expect1 = [[False, False, False, False]] 110 assert (output1.asnumpy() == expect1).all() 111 112 output2 = ms_isinf(Tensor(x2)) 113 expect2 = [[True, False, False, False]] 114 assert (output2.asnumpy() == expect2).all() 115 116 output3 = ms_isinf(Tensor(x3)) 117 expect3 = [[False, False], [False, False], [False, False]] 118 assert (output3.asnumpy() == expect3).all() 119 120 121@pytest.mark.level0 122@pytest.mark.platform_x86_gpu_training 123@pytest.mark.env_onecard 124def test_finite(): 125 ms_isfinite = Netfinite() 126 output1 = ms_isfinite(Tensor(x1)) 127 expect1 = [[True, True, False, True]] 128 assert (output1.asnumpy() == expect1).all() 129 130 output2 = ms_isfinite(Tensor(x2)) 131 expect2 = [[False, True, True, True]] 132 assert (output2.asnumpy() == expect2).all() 133 134 output3 = ms_isfinite(Tensor(x3)) 135 expect3 = [[True, True], [True, True], [True, True]] 136 assert (output3.asnumpy() == expect3).all() 137