1# Copyright 2020 Huawei Technologies Co., Ltd 2# 3# Licensed under the Apache License, Version 2.0 (the "License"); 4# you may not use this file except in compliance with the License. 5# You may obtain a copy of the License at 6# 7# http://www.apache.org/licenses/LICENSE-2.0 8# 9# Unless required by applicable law or agreed to in writing, software 10# distributed under the License is distributed on an "AS IS" BASIS, 11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12# See the License for the specific language governing permissions and 13# limitations under the License. 14# ============================================================================ 15""" test loss """ 16import numpy as np 17import pytest 18from mindspore.common import dtype as mstype 19from mindspore import nn 20from mindspore import Tensor 21from ..ut_filter import non_graph_engine 22 23 24def test_L1Loss(): 25 loss = nn.L1Loss() 26 input_data = Tensor(np.array([[1, 2, 3], [2, 3, 4]]).astype(np.float32)) 27 target_data = Tensor(np.array([[0, 2, 5], [3, 1, 1]]).astype(np.float32)) 28 loss(input_data, target_data) 29 30 31def test_MSELoss(): 32 loss = nn.MSELoss() 33 input_data = Tensor(np.array([[1, 2, 3], [2, 3, 2]]).astype(np.float32)) 34 target_data = Tensor(np.array([[0, 0, 5], [1, 2, 3]]).astype(np.float32)) 35 loss(input_data, target_data) 36 37 38@non_graph_engine 39def test_SoftmaxCrossEntropyWithLogits(): 40 """ test_SoftmaxCrossEntropyWithLogits """ 41 loss = nn.SoftmaxCrossEntropyWithLogits() 42 43 logits = Tensor(np.random.randint(0, 9, [100, 10]).astype(np.float32)) 44 labels = Tensor(np.random.randint(0, 9, [100, 10]).astype(np.float32)) 45 loss.construct(logits, labels) 46 47 48def test_SoftmaxCrossEntropyWithLogits_reduce(): 49 """ test_SoftmaxCrossEntropyWithLogits """ 50 loss = nn.SoftmaxCrossEntropyWithLogits(reduction="mean") 51 52 logits = Tensor(np.random.randint(0, 9, [100, 10]).astype(np.float32)) 53 labels = Tensor(np.random.randint(0, 9, [100, 10]).astype(np.float32)) 54 loss(logits, labels) 55 56 57def test_BCELoss(): 58 """ test_BCELoss """ 59 loss = nn.BCELoss() 60 61 inputs_data = Tensor(np.array([[0.1, 0.2, 0.3], [0.5, 0.7, 0.9]]).astype(np.float32)) 62 target_data = Tensor(np.array([[0, 1, 0], [0, 0, 1]]).astype(np.float32)) 63 loss(inputs_data, target_data) 64 65 66def test_BCELoss_reduce(): 67 """ test_BCELoss """ 68 loss = nn.BCELoss(reduction='mean') 69 70 inputs_data = Tensor(np.array([[0.1, 0.2, 0.3], [0.5, 0.7, 0.9]]).astype(np.float32)) 71 target_data = Tensor(np.array([[0, 1, 0], [0, 0, 1]]).astype(np.float32)) 72 loss(inputs_data, target_data) 73 74 75def test_BCELoss_weight(): 76 """ test_BCELoss """ 77 weight = Tensor(np.array([[1.0, 2.0, 3.0], [2.2, 2.6, 3.9]]).astype(np.float32)) 78 loss = nn.BCELoss(weight=weight) 79 80 inputs_data = Tensor(np.array([[0.1, 0.2, 0.3], [0.5, 0.7, 0.9]]).astype(np.float32)) 81 target_data = Tensor(np.array([[0, 1, 0], [0, 0, 1]]).astype(np.float32)) 82 loss(inputs_data, target_data) 83 84 85def test_cosine_embedding_loss(): 86 """ test CosineEmbeddingLoss """ 87 loss = nn.CosineEmbeddingLoss() 88 x1 = Tensor(np.array([[0.3, 0.8], [0.4, 0.3]]).astype(np.float32)) 89 x2 = Tensor(np.array([[0.4, 1.2], [-0.4, -0.9]]).astype(np.float32)) 90 label = Tensor(np.array([1, -1]).astype(np.int32)) 91 loss(x1, x2, label) 92 93 94def test_focal_loss(): 95 """ test_FocalLoss """ 96 x1 = Tensor([[0.8, 1.4], [0.5, 0.9], [1.2, 0.9]], mstype.float32) 97 x2 = Tensor([[1], [1], [0]], mstype.int32) 98 focalloss = nn.FocalLoss() 99 focalloss(x1, x2) 100 101 102def test_focal_loss_gamma(): 103 """ test_FocalLoss """ 104 x1 = Tensor([[0.8, 1.4], [0.5, 0.9], [1.2, 0.9]], mstype.float32) 105 x2 = Tensor([[1], [1], [0]], mstype.int32) 106 with pytest.raises(TypeError): 107 focalloss = nn.FocalLoss(weight=None, gamma="mmm", reduction='mean') 108 focalloss(x1, x2) 109 110 111def test_focal_loss_weight(): 112 """ test_FocalLoss """ 113 x1 = Tensor([[0.8, 1.4], [0.5, 0.9], [1.2, 0.9]], mstype.float32) 114 x2 = Tensor([[1], [1]], mstype.int32) 115 with pytest.raises(TypeError): 116 focalloss = nn.FocalLoss(weight='a', gamma=2.0, reduction='mean') 117 focalloss(x1, x2) 118 119 120def test_focal_loss_reduction(): 121 """ test_FocalLoss """ 122 x1 = Tensor([[0.8, 1.4], [0.5, 0.9], [1.2, 0.9]], mstype.float32) 123 x2 = Tensor([[1], [1], [0]], mstype.int32) 124 with pytest.raises(ValueError): 125 focalloss = nn.FocalLoss(weight=None, gamma=2.0, reduction='m') 126 focalloss(x1, x2) 127 128 129def test_focal_loss_input(): 130 """ test_FocalLoss """ 131 x1 = Tensor([[0.8, 1.4], [0.5, 0.9], [1.2, 0.9]], mstype.float32) 132 x2 = Tensor([[1]], mstype.int32) 133 focalloss = nn.FocalLoss(weight=None, gamma=2.0, reduction='mean') 134 with pytest.raises(ValueError): 135 focalloss(x1, x2) 136 137 138def test_dice_loss(): 139 """ test_dice_loss """ 140 loss = nn.DiceLoss() 141 y_pred = Tensor(np.array([[0.2, 0.5], [0.3, 0.1], [0.9, 0.6]]), mstype.float32) 142 y = Tensor(np.array([[0, 1], [1, 0], [0, 1]]), mstype.float32) 143 # Pass the test if no error is reported 144 loss(y_pred, y) 145 146 147def test_dice_loss_check_shape(): 148 """ test_dice_loss """ 149 loss = nn.DiceLoss() 150 y_pred = Tensor(np.array([[0.2, 0.5], [0.3, 0.1], [0.9, 0.6]]), mstype.float32) 151 y = Tensor(np.array([[1, 0], [0, 1]]), mstype.float32) 152 with pytest.raises(ValueError): 153 loss(y_pred, y) 154 155 156def test_multi_class_dice_loss(): 157 """ test_multi_class_dice_loss """ 158 loss = nn.MultiClassDiceLoss(weights=None, ignore_indiex=None, activation="softmax") 159 y_pred = Tensor(np.array([[0.2, 0.5], [0.3, 0.1], [0.9, 0.6]]), mstype.float32) 160 y = Tensor(np.array([[0, 1], [1, 0], [0, 1]]), mstype.float32) 161 loss(y_pred, y) 162 163 164def test_multi_class_dice_loss_check_shape(): 165 """ test_multi_class_dice_loss """ 166 loss = nn.MultiClassDiceLoss(weights=None, ignore_indiex=None, activation="softmax") 167 y_pred = Tensor(np.array([[0.2, 0.5], [0.3, 0.1], [0.9, 0.6]]), mstype.float32) 168 y = Tensor(np.array([[1, 0], [0, 1]]), mstype.float32) 169 with pytest.raises(ValueError): 170 loss(y_pred, y) 171 172 173def test_multi_class_dice_loss_init_weight(): 174 """ test_multi_class_dice_loss """ 175 with pytest.raises(TypeError): 176 loss = nn.MultiClassDiceLoss(weights='1', ignore_indiex=None, activation="softmax") 177 y_pred = Tensor(np.array([[0.2, 0.5], [0.3, 0.1], [0.9, 0.6]]), mstype.float32) 178 y = Tensor(np.array([[1, 0], [0, 1]]), mstype.float32) 179 loss(y_pred, y) 180 181 182def test_multi_class_dice_loss_init_ignore_indiex(): 183 """ test_multi_class_dice_loss """ 184 with pytest.raises(TypeError): 185 loss = nn.MultiClassDiceLoss(weights=None, ignore_indiex="2", activation="softmax") 186 y_pred = Tensor(np.array([[0.2, 0.5], [0.3, 0.1], [0.9, 0.6]]), mstype.float32) 187 y = Tensor(np.array([[1, 0], [0, 1]]), mstype.float32) 188 loss(y_pred, y) 189 190 191def test_multi_class_dice_loss_init_activation(): 192 """ test_multi_class_dice_loss """ 193 with pytest.raises(TypeError): 194 loss = nn.MultiClassDiceLoss(weights=None, ignore_indiex=None, activation=2) 195 y_pred = Tensor(np.array([[0.2, 0.5], [0.3, 0.1], [0.9, 0.6]]), mstype.float32) 196 y = Tensor(np.array([[1, 0], [0, 1]]), mstype.float32) 197 loss(y_pred, y) 198 199 200def test_multi_class_dice_loss_init_activation2(): 201 """ test_multi_class_dice_loss """ 202 with pytest.raises(ValueError): 203 loss = nn.MultiClassDiceLoss(weights=None, ignore_indiex=None, activation='www') 204 y_pred = Tensor(np.array([[0.2, 0.5], [0.3, 0.1], [0.9, 0.6]]), mstype.float32) 205 y = Tensor(np.array([[1, 0], [0, 1]]), mstype.float32) 206 loss(y_pred, y) 207 208 209def test_rmse_loss(): 210 loss = nn.RMSELoss() 211 input_data = Tensor(np.array([[1, 2, 3], [2, 3, 2]]).astype(np.float32)) 212 target_data = Tensor(np.array([[0, 0, 5], [1, 2, 3]]).astype(np.float32)) 213 loss(input_data, target_data) 214 215 216def test_mae_loss(): 217 loss = nn.MAELoss() 218 input_data = Tensor(np.array([[1, 2, 3], [2, 3, 2]]).astype(np.float32)) 219 target_data = Tensor(np.array([[0, 0, 5], [1, 2, 3]]).astype(np.float32)) 220 loss(input_data, target_data) 221