1# Copyright 2021 Huawei Technologies Co., Ltd 2# 3# Licensed under the Apache License, Version 2.0 (the "License"); 4# you may not use this file except in compliance with the License. 5# You may obtain a copy of the License at 6# 7# http://www.apache.org/licenses/LICENSE-2.0 8# 9# Unless required by applicable law or agreed to in writing, software 10# distributed under the License is distributed on an "AS IS" BASIS, 11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12# See the License for the specific language governing permissions and 13# limitations under the License. 14# ============================================================================ 15""" test nn ops """ 16import numpy as np 17import pytest 18 19import mindspore 20import mindspore.context as context 21import mindspore.nn as nn 22from mindspore import Tensor, Parameter 23from mindspore.common.initializer import initializer 24from mindspore.ops import composite as C 25from mindspore.ops import operations as P 26from mindspore.ops import functional as F 27from mindspore.ops.operations import _grad_ops as G 28from mindspore.ops import prim_attr_register, PrimitiveWithInfer 29from mindspore._c_expression import security 30from tests.security_utils import security_off_wrap 31from ..ut_filter import non_graph_engine 32from ....mindspore_test_framework.mindspore_test import mindspore_test 33from ....mindspore_test_framework.pipeline.forward.compile_forward \ 34 import pipeline_for_compile_forward_ge_graph_for_case_by_case_config 35from ....mindspore_test_framework.pipeline.forward.verify_exception \ 36 import pipeline_for_verify_exception_for_case_by_case_config 37context.set_context(mode=context.GRAPH_MODE) 38 39 40def conv3x3(in_channels, out_channels, stride=1, padding=1): 41 """3x3 convolution """ 42 return nn.Conv2d(in_channels, out_channels, 43 kernel_size=3, stride=stride, padding=padding) 44 45 46def conv1x1(in_channels, out_channels, stride=1, padding=0): 47 """1x1 convolution""" 48 return nn.Conv2d(in_channels, out_channels, 49 kernel_size=1, stride=stride, padding=padding) 50 51 52grad = C.GradOperation() 53grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True) 54 55 56class ResidualBlock(nn.Cell): 57 """ 58 residual Block 59 """ 60 expansion = 4 61 62 def __init__(self, 63 in_channels, 64 out_channels, 65 stride=1, 66 down_sample=False): 67 super(ResidualBlock, self).__init__() 68 69 out_chls = out_channels // self.expansion 70 self.conv1 = conv1x1(in_channels, out_chls, stride=1, padding=0) 71 self.bn1 = nn.BatchNorm2d(out_chls) 72 73 self.conv2 = conv3x3(out_chls, out_chls, stride=stride, padding=0) 74 self.bn2 = nn.BatchNorm2d(out_chls) 75 76 self.conv3 = conv1x1(out_chls, out_channels, stride=1, padding=0) 77 self.bn3 = nn.BatchNorm2d(out_channels) 78 79 self.relu = nn.ReLU() 80 self.downsample = down_sample 81 82 self.conv_down_sample = conv1x1(in_channels, out_channels, 83 stride=stride, padding=0) 84 self.bn_down_sample = nn.BatchNorm2d(out_channels) 85 self.add = P.Add() 86 87 def construct(self, x): 88 """ 89 :param x: 90 :return: 91 """ 92 identity = x 93 94 out = self.conv1(x) 95 out = self.bn1(out) 96 out = self.relu(out) 97 98 out = self.conv2(out) 99 out = self.bn2(out) 100 out = self.relu(out) 101 102 out = self.conv3(out) 103 out = self.bn3(out) 104 105 if self.downsample: 106 identity = self.conv_down_sample(identity) 107 identity = self.bn_down_sample(identity) 108 109 out = self.add(out, identity) 110 out = self.relu(out) 111 112 return out 113 114 115class VirtualLossGrad(PrimitiveWithInfer): 116 """ VirtualLossGrad definition """ 117 118 @prim_attr_register 119 def __init__(self): 120 """init VirtualLossGrad""" 121 122 def __call__(self, x, out, dout): 123 raise NotImplementedError 124 125 def infer_shape(self, x_shape, out_shape, dout_shape): 126 return x_shape 127 128 def infer_dtype(self, x_dtype, out_dtype, dout_dtype): 129 return x_dtype 130 131 132class VirtualLoss(PrimitiveWithInfer): 133 """ VirtualLoss definition """ 134 135 @prim_attr_register 136 def __init__(self): 137 """init VirtualLoss""" 138 139 def __call__(self, x): 140 raise NotImplementedError 141 142 def get_bprop(self): 143 loss_grad = VirtualLossGrad() 144 145 def bprop(x, out, dout): 146 # pylint: disable=unused-argument 147 dx = loss_grad(x, out, dout) 148 return (dx,) 149 150 return bprop 151 152 def infer_shape(self, x_shape): 153 return [] 154 155 def infer_dtype(self, x_dtype): 156 return x_dtype 157 158 159class VirtualNetWithLoss(nn.Cell): 160 """ VirtualNetWithLoss definition """ 161 162 def __init__(self, network): 163 super(VirtualNetWithLoss, self).__init__() 164 self.loss = VirtualLoss() 165 self.network = network 166 167 def construct(self, x): 168 predict = self.network(x) 169 return self.loss(predict) 170 171 172class SoftMaxGrad(nn.Cell): 173 """ SoftMaxGrad definition """ 174 175 def __init__(self, network): 176 super(SoftMaxGrad, self).__init__() 177 self.network = network 178 179 def construct(self, x): 180 return grad(self.network)(x) 181 182 183class DropoutGrad(nn.Cell): 184 """ DropoutGrad definition """ 185 186 def __init__(self, network): 187 super(DropoutGrad, self).__init__() 188 self.network = network 189 190 def construct(self, x): 191 return grad(self.network)(x) 192 193 194class ScalarSummaryNet(nn.Cell): 195 """ ScalarSummaryNet definition """ 196 197 def __init__(self): 198 super(ScalarSummaryNet, self).__init__() 199 self.summary = P.ScalarSummary() 200 201 def construct(self, scalar): 202 string_in = "bias_value" 203 out = self.summary(string_in, scalar) 204 return out 205 206 207class L2NormalizeNet(nn.Cell): 208 """ L2NormalizeNet definition """ 209 210 def __init__(self): 211 super(L2NormalizeNet, self).__init__() 212 self.l2_normalize = P.L2Normalize() 213 214 def construct(self, x): 215 out = self.l2_normalize(x) 216 return out 217 218 219class HistogramSummaryNet(nn.Cell): 220 """HistogramSummaryNet definition""" 221 222 def __init__(self): 223 super(HistogramSummaryNet, self).__init__() 224 self.summary = P.HistogramSummary() 225 226 def construct(self, tensor): 227 string_in = "wight_value" 228 out = self.summary(string_in, tensor) 229 return out 230 231 232class FusedBatchNormGrad(nn.Cell): 233 """ FusedBatchNormGrad definition """ 234 235 def __init__(self, network): 236 super(FusedBatchNormGrad, self).__init__() 237 self.grad = C.GradOperation(get_all=True, sens_param=True) 238 self.network = network 239 240 def construct(self, inp, output_grad): 241 return self.grad(self.network)(inp, output_grad) 242 243 244class NetWithLoss(nn.Cell): 245 """ NetWithLoss definition """ 246 247 def __init__(self, network): 248 super(NetWithLoss, self).__init__() 249 self.loss = P.SmoothL1Loss() 250 self.network = network 251 252 def construct(self, x, label): 253 predict = self.network(x) 254 return self.loss(predict, label) 255 256 257class Grad(nn.Cell): 258 """ GradWrap definition """ 259 260 def __init__(self, network): 261 super(Grad, self).__init__() 262 self.network = network 263 self.network.set_train() 264 265 def construct(self, x, label): 266 return grad(self.network)(x, label) 267 268 269class BatchnormNet(nn.Cell): 270 """ BatchnormNet definition """ 271 272 def __init__(self): 273 super(BatchnormNet, self).__init__() 274 self.conv1 = nn.Conv2d(3, 4, kernel_size=8, stride=2, pad_mode="pad", padding=3) 275 self.bn1 = nn.BatchNorm2d(4) 276 self.flatten = P.Flatten() 277 self.weight = Parameter(Tensor(np.ones([64, 10], np.float32)), name="weight") 278 self.bias = Parameter(Tensor(np.ones([10], np.float32)), name="bias") 279 self.fc = P.MatMul() 280 self.biasAdd = P.BiasAdd() 281 282 def construct(self, x): 283 x = self.conv1(x) 284 x = self.bn1(x) 285 x = self.flatten(x) 286 x = self.biasAdd(self.fc(x, self.weight), self.bias) 287 return x 288 289 290class NetWithLossClass(nn.Cell): 291 """ NetWithLossClass definition """ 292 293 def __init__(self, network): 294 super(NetWithLossClass, self).__init__(auto_prefix=False) 295 self.loss = nn.SoftmaxCrossEntropyWithLogits() 296 self.network = network 297 298 def construct(self, x, label): 299 predict = self.network(x) 300 return self.loss(predict, label) 301 302 303class BlockNet(nn.Cell): 304 """ BlockNet definition """ 305 306 def __init__(self): 307 super(BlockNet, self).__init__() 308 self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, pad_mode="pad", padding=3) 309 self.bn1 = nn.BatchNorm2d(64) 310 self.relu = nn.ReLU() 311 self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2) 312 self.block_down_sample = ResidualBlock( 313 64, 256, stride=1, down_sample=True 314 ) 315 self.flatten = P.Flatten() 316 self.weight = Parameter(Tensor(np.ones([1024, 10]).astype(np.float32)), name="weight") 317 self.bias = Parameter(Tensor(np.ones([10]).astype((np.float32))), name="bias") 318 self.fc = P.MatMul() 319 self.biasAdd = P.BiasAdd() 320 321 def construct(self, x): 322 x = self.conv1(x) 323 return x 324 325 326class Conv2dWithBiasNet(nn.Cell): 327 """ Conv2dWithBiasNet definition """ 328 329 def __init__(self): 330 super(Conv2dWithBiasNet, self).__init__() 331 self.conv = nn.Conv2d(3, 10, 1, bias_init='zeros') 332 self.flatten = P.Flatten() 333 334 def construct(self, input_x): 335 return self.flatten(self.conv(input_x)) 336 337 338class Conv2dNativeNet(nn.Cell): 339 """ Conv2dNativeNet definition """ 340 341 def __init__(self): 342 super(Conv2dNativeNet, self).__init__() 343 self.conv = P.DepthwiseConv2dNative(channel_multiplier=3, kernel_size=(3, 3)) 344 self.flatten = P.Flatten() 345 channel_multipliers = 1 346 in_channels = 3 347 kernel_size = (3, 3) 348 self.weight = Parameter(initializer( 349 Tensor(np.ones([channel_multipliers, in_channels, *kernel_size], dtype=np.float32)), 350 [channel_multipliers, in_channels, *kernel_size]), name='weight') 351 352 def construct(self, input_x): 353 return self.flatten(self.conv(input_x, self.weight)) 354 355 356class StateNet(nn.Cell): 357 """ StateTestTensor definition """ 358 359 def __init__(self): 360 super(StateNet, self).__init__() 361 weight = Tensor(np.ones([2, 1, 2, 2], np.float32)) 362 self.s1 = Parameter(weight, name="s1") 363 self.s2 = Parameter(weight, name="s2") 364 self.sub = P.Sub() 365 self.loss = nn.SoftmaxCrossEntropyWithLogits() 366 self.assign = P.Assign() 367 368 def construct(self, x): 369 x = F.depend(x, self.assign(self.s1, x + self.s1)) 370 self.s1 = self.sub(self.s1, x) 371 self.s2 = self.sub(self.s2, x) 372 return x 373 374 375def test_conv2d_same_primitive(): 376 class Conv2DSameNet(nn.Cell): 377 def __init__(self): 378 super(Conv2DSameNet, self).__init__() 379 self.conv1 = nn.Conv2d(16, 64, (1, 41), (1, 4), "same", 0, 1, has_bias=True) 380 self.conv2 = nn.Conv2d(16, 64, (1, 41), (1, 4), "same", 0, 1, has_bias=True) 381 382 def construct(self, x, y): 383 r1 = self.conv1(x) 384 r2 = self.conv2(y) 385 return (r1, r2) 386 t1 = Tensor(np.ones([1, 16, 1, 1918]).astype(np.float32)) 387 t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32)) 388 net = Conv2DSameNet() 389 net(t1, t2) 390 391 392class ComparisonNet(nn.Cell): 393 def __init__(self): 394 """ ComparisonNet definition """ 395 super(ComparisonNet, self).__init__() 396 397 def construct(self, x, y): 398 ret = x <= y 399 return ret 400 401 402def test_max_pool_with_arg_max(): 403 class NetMaxPoolWithArgMax(nn.Cell): 404 def __init__(self): 405 """ ComparisonNet definition """ 406 super(NetMaxPoolWithArgMax, self).__init__() 407 self.max_pool_with_arg_max = P.MaxPoolWithArgmax(pad_mode="valid", kernel_size=2, strides=1) 408 409 def construct(self, x): 410 ret = self.max_pool_with_arg_max(x) 411 return ret 412 413 x = Tensor(np.ones([1, 1, 3, 3], np.float32)) 414 net = NetMaxPoolWithArgMax() 415 context.set_context(mode=context.GRAPH_MODE) 416 ret = net(x) 417 print(ret) 418 419 420class GradWrapUnfold(nn.Cell): 421 """ GradWrapUnfold definition """ 422 423 def __init__(self, network): 424 super(GradWrapUnfold, self).__init__() 425 self.network = network 426 self.sens = Tensor(np.ones([1, 4, 2, 2], np.float32)) 427 428 def construct(self, x): 429 return grad_all_with_sens(self.network)(x, self.sens) 430 431 432class UnfoldNetValid(nn.Cell): 433 """ UnfoldNetValid definition """ 434 435 def __init__(self): 436 super(UnfoldNetValid, self).__init__() 437 self.unfold = nn.Unfold(ksizes=[1, 2, 2, 1], 438 strides=[1, 1, 1, 1], 439 rates=[1, 1, 1, 1], 440 padding='VALID') 441 442 def construct(self, x): 443 return self.unfold(x) 444 445 446class UnfoldNetSame(nn.Cell): 447 """ UnfoldNetSame definition """ 448 449 def __init__(self): 450 super(UnfoldNetSame, self).__init__() 451 self.unfold = nn.Unfold(ksizes=[1, 2, 2, 1], 452 strides=[1, 1, 1, 1], 453 rates=[1, 1, 1, 1], 454 padding='SAME') 455 456 def construct(self, x): 457 return self.unfold(x) 458 459 460class FlattenNet(nn.Cell): 461 """ FlattenNet definition """ 462 463 def __init__(self): 464 super(FlattenNet, self).__init__() 465 self.flatten = P.Flatten() 466 467 def construct(self, x): 468 return self.flatten(x) 469 470 471class PReLUNet(nn.Cell): 472 """ PReLUNet definition """ 473 474 def __init__(self): 475 super(PReLUNet, self).__init__() 476 self.prelu = P.PReLU() 477 self.w = Tensor(np.ones(3, np.float32)) 478 479 def construct(self, x): 480 return self.prelu(x, self.w) 481 482 483class PReLUGradNet(nn.Cell): 484 """ PReLUGradNet definition """ 485 486 def __init__(self): 487 super(PReLUGradNet, self).__init__() 488 self.prelu_grad = G.PReLUGrad() 489 490 def construct(self, dout, x, w): 491 return self.prelu_grad(dout, x, w) 492 493 494class LRNNet(nn.Cell): 495 """ LRNNet definition """ 496 497 def __init__(self): 498 super(LRNNet, self).__init__() 499 self.lrn = P.LRN() 500 501 def construct(self, x): 502 return self.lrn(x) 503 504 505class LRNGradNet(nn.Cell): 506 """ LRNGradNet definition """ 507 508 def __init__(self): 509 super(LRNGradNet, self).__init__() 510 self.lrn_grad = G.LRNGrad() 511 512 def construct(self, dout, x, out): 513 return self.lrn_grad(dout, x, out) 514 515 516test_cases = [ 517 ('SoftMaxGrad', { 518 'block': SoftMaxGrad(VirtualNetWithLoss(P.Softmax())), 519 'desc_inputs': [[128, 32, 32, 64]], 520 'desc_bprop': [[128, 32, 32, 64]], 521 }), 522 ('DropoutGrad', { 523 'block': DropoutGrad(VirtualNetWithLoss(nn.Dropout())), 524 'desc_inputs': [[128, 32, 32, 64]], 525 'desc_bprop': [[128, 32, 32, 64]], 526 }), 527 ('L2Normalize', { 528 'block': L2NormalizeNet(), 529 'desc_inputs': [Tensor(np.array([[1.0, 2, 3], [4.0, 5, 6], [7.0, 8, 9]]), mindspore.float32)], 530 }), 531 ('FusedBatchNormGrad', { 532 'block': FusedBatchNormGrad(nn.BatchNorm2d(num_features=512, eps=1e-5, momentum=0.1)), 533 'desc_inputs': [[64, 512, 7, 7], [64, 512, 7, 7]], 534 'desc_bprop': [[64, 512, 7, 7]], 535 }), 536 ('BatchnormGrad', { 537 'block': Grad(NetWithLoss(BatchnormNet())), 538 'desc_inputs': [Tensor(np.ones([1, 3, 8, 8], np.float32)), Tensor(np.zeros([1, 10], np.float32))], 539 }), 540 ('BlockGrad', { 541 'block': Grad(NetWithLossClass(BlockNet())), 542 'desc_inputs': [Tensor(np.ones([1, 3, 8, 8], np.float32)), Tensor(np.zeros([1, 64, 4, 4], np.float32))], 543 }), 544 ('Conv2dWithBiasGrad', { 545 'block': Grad(NetWithLossClass(Conv2dWithBiasNet())), 546 'desc_inputs': [Tensor(np.ones([1, 3, 16, 16], np.float32)), Tensor(np.zeros([1, 2560], np.float32))], 547 }), 548 ('Conv2dNativeGrad', { 549 'block': Grad(NetWithLossClass(Conv2dNativeNet())), 550 'desc_inputs': [Tensor(np.ones([1, 3, 16, 16], np.float32)), Tensor(np.zeros([1, 1764], np.float32))], 551 }), 552 ('StateTest', { 553 'block': StateNet(), 554 'desc_inputs': [Tensor(np.ones([2, 1, 2, 2]).astype(np.float32))], 555 }), 556 ('StateGrad', { 557 'block': Grad(NetWithLossClass(StateNet())), 558 'desc_inputs': [Tensor(np.ones([2, 1, 2, 2], np.float32)), Tensor(np.ones([2, 1, 2, 2], np.float32))], 559 }), 560 ('ComparisonTest', { 561 'block': ComparisonNet(), 562 'desc_inputs': [Tensor(np.ones([6, 9, 10], np.int32)), Tensor(np.ones([6, 9, 10], np.int32))], 563 }), 564 ('UnfoldValid', { 565 'block': UnfoldNetValid(), 566 'desc_inputs': [Tensor(np.ones([1, 1, 3, 3], np.float32))], 567 'desc_bprop': [Tensor(np.ones([1, 4, 2, 2], np.float32))], 568 'skip': ['backward']}), 569 ('UnfoldSame', { 570 'block': UnfoldNetSame(), 571 'desc_inputs': [Tensor(np.ones([1, 1, 3, 3], np.float32))], 572 'desc_bprop': [Tensor(np.ones([1, 4, 3, 3], np.float32))], 573 'skip': ['backward']}), 574 ('UnfoldGrad', { 575 'block': GradWrapUnfold(UnfoldNetValid()), 576 'desc_inputs': [Tensor(np.ones([1, 1, 3, 3], np.float32))], 577 'desc_bprop': [Tensor(np.ones([1, 4, 2, 2], np.float32))], 578 'skip': ['backward']}), 579 ('LogSigmoid', { 580 'block': nn.LogSigmoid(), 581 'desc_inputs': [Tensor(np.array([1, 2, 3, 4]).astype(np.float32))], 582 'desc_bprop': [Tensor(np.array([1, 2, 3, 4]).astype(np.float32))], 583 'skip': ['backward']}), 584 ('ReduceLogSumExp', { 585 'block': nn.ReduceLogSumExp((0,), False), 586 'desc_inputs': [Tensor(np.array([3, 4, 5, 6]).astype(np.float32))], 587 'skip': ['backward']}), 588 ('LGamma', { 589 'block': nn.LGamma(), 590 'desc_inputs': [Tensor(np.array([3, 4, 5, 6]).astype(np.float32))], 591 'skip': ['backward']}), 592 ('IGamma', { 593 'block': nn.IGamma(), 594 'desc_inputs': [Tensor(np.array([3, 4, 5, 6]).astype(np.float32)), 595 Tensor(np.array([3, 4, 5, 6]).astype(np.float32))], 596 'skip': ['backward']}), 597 ('DiGamma', { 598 'block': nn.DiGamma(), 599 'desc_inputs': [Tensor(np.array([3, 4, 5, 6]).astype(np.float32))], 600 'skip': ['backward']}), 601 ('LBeta', { 602 'block': nn.LBeta(), 603 'desc_inputs': [Tensor(np.array([3, 4, 5, 6]).astype(np.float32)), 604 Tensor(np.array([3, 4, 5, 6]).astype(np.float32))], 605 'skip': ['backward']}), 606 ('FlattenNet', { 607 'block': FlattenNet(), 608 'desc_inputs': [Tensor(np.ones([1, 2, 3, 4], np.float32))], 609 }), 610 ('PReLUNet', { 611 'block': PReLUNet(), 612 'desc_inputs': [Tensor(np.ones([1, 3, 4, 4], np.float32))], 613 }), 614 ('PReLUGradNet', { 615 'block': PReLUGradNet(), 616 'desc_inputs': [Tensor(np.ones([1, 3, 4, 4], np.float32)), 617 Tensor(np.ones([1, 3, 4, 4], np.float32)), 618 Tensor(np.ones(3, np.float32))], 619 }), 620 ('MatrixDiag', { 621 'block': nn.MatrixDiag(), 622 'desc_inputs': [Tensor(np.array([1, 2, 3]).astype(np.float32))], 623 'skip': ['backward'] 624 }), 625 ('MatrixDiagPart', { 626 'block': nn.MatrixDiagPart(), 627 'desc_inputs': [Tensor(np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32))], 628 'skip': ['backward'] 629 }), 630 ('MatrixSetDiag', { 631 'block': nn.MatrixSetDiag(), 632 'desc_inputs': [Tensor(np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)), 633 Tensor(np.array([1, 2]).astype(np.float32))], 634 'skip': ['backward'] 635 }), 636 ('MatInverse', { 637 'block': nn.MatInverse(), 638 'desc_inputs': [Tensor(np.array([[4, 12, -16], [12, 37, -43], [-16, -43, 98]]).astype(np.float32))], 639 'skip': ['backward'] 640 }), 641 ('MatDet', { 642 'block': nn.MatDet(), 643 'desc_inputs': [Tensor(np.array([[4, 12, -16], [12, 37, -43], [-16, -43, 98]]).astype(np.float32))], 644 'skip': ['backward'] 645 }), 646 ('LRNNet', { 647 'block': LRNNet(), 648 'desc_inputs': [Tensor(np.ones([1, 5, 4, 4], np.float32))], 649 }), 650 ('LRNGradNet', { 651 'block': LRNGradNet(), 652 'desc_inputs': [Tensor(np.ones([1, 5, 4, 4], np.float32)), 653 Tensor(np.ones([1, 5, 4, 4], np.float32)), 654 Tensor(np.ones([1, 5, 4, 4], np.float32))], 655 }), 656] 657 658test_cases_for_verify_exception = [ 659 ('ApplyMomentum_Error', { 660 'block': (P.ApplyMomentum(), {'exception': TypeError}), 661 'desc_inputs': [[2], [128, 32, 32, 64], [128, 32, 32, 64], [128, 32, 32, 64], [128, 32, 32, 64]], 662 'desc_bprop': [[128, 32, 32, 64]], 663 'skip': ['backward'] 664 }), 665 ('Conv2d_ValueError_1', { 666 'block': (lambda _: P.Conv2D(3, 4, mode=-2.0), {'exception': TypeError}), 667 'desc_inputs': [0], 668 }), 669 ('Conv2d_ValueError_2', { 670 'block': (lambda _: P.Conv2D(3, 4, mode=-2), {'exception': ValueError}), 671 'desc_inputs': [0], 672 }), 673 ('MaxPoolWithArgmax_ValueError_1', { 674 'block': (lambda _: P.MaxPoolWithArgmax(pad_mode='sane'), {'exception': ValueError}), 675 'desc_inputs': [0], 676 }), 677 ('MaxPoolWithArgmax_ValueError_2', { 678 'block': (lambda _: P.MaxPoolWithArgmax(kernel_size='1'), {'exception': TypeError}), 679 'desc_inputs': [0], 680 }), 681 ('MaxPoolWithArgmax_ValueError_3', { 682 'block': (lambda _: P.MaxPoolWithArgmax(kernel_size=-2), {'exception': ValueError}), 683 'desc_inputs': [0], 684 }), 685 ('MaxPoolWithArgmax_ValueError_4', { 686 'block': (lambda _: P.MaxPoolWithArgmax(strides=-1), {'exception': ValueError}), 687 'desc_inputs': [0], 688 }), 689 ('Softmax_ValueError_1', { 690 'block': (lambda _: P.Softmax("1"), {'exception': TypeError}), 691 'desc_inputs': [0], 692 }), 693 ('Softmax_ValueError_2', { 694 'block': (lambda _: P.Softmax(1.1), {'exception': TypeError}), 695 'desc_inputs': [0], 696 }), 697 ('Softmax_ValueError_3', { 698 'block': (lambda _: P.Softmax(axis="1"), {'exception': TypeError}), 699 'desc_inputs': [0], 700 }), 701 ('DropoutGenMask_ValueError_1', { 702 'block': (lambda _: P.DropoutGenMask(Seed0="seed0"), {'exception': TypeError}), 703 'desc_inputs': [0], 704 }), 705 ('DropoutGenMask_ValueError_2', { 706 'block': (lambda _: P.DropoutGenMask(Seed0=1.0), {'exception': TypeError}), 707 'desc_inputs': [0], 708 }), 709 ('DropoutGenMask_ValueError_3', { 710 'block': (lambda _: P.DropoutGenMask(Seed1="seed1"), {'exception': TypeError}), 711 'desc_inputs': [0], 712 }), 713 ('DropoutGenMask_ValueError_4', { 714 'block': (lambda _: P.DropoutGenMask(Seed1=2.0), {'exception': TypeError}), 715 'desc_inputs': [0], 716 }), 717 ('MaxPool2d_ValueError_1', { 718 'block': (nn.MaxPool2d(kernel_size=120, stride=1, pad_mode="valid"), {'exception': ValueError}), 719 'desc_inputs': [Tensor(np.random.randn(32, 3, 112, 112).astype(np.float32).transpose(0, 3, 1, 2))], 720 }), 721 ('MaxPool2d_ValueError_2', { 722 'block': ( 723 lambda _: nn.MaxPool2d(kernel_size=120, stride=True, pad_mode="valid"), 724 {'exception': TypeError}, 725 ), 726 'desc_inputs': [Tensor(np.random.randn(32, 3, 112, 112).astype(np.float32).transpose(0, 3, 1, 2))], 727 }), 728 ('MaxPool2d_ValueError_3', { 729 'block': ( 730 lambda _: nn.MaxPool2d(kernel_size=3, stride=True, pad_mode="valid"), 731 {'exception': TypeError}, 732 ), 733 'desc_inputs': [Tensor(np.random.randn(32, 3, 112, 112).astype(np.float32).transpose(0, 3, 1, 2))], 734 }), 735 ('ReduceLogsumexp_TypeError_1', { 736 'block': ( 737 lambda _: nn.ReduceLogSumExp(axis=(0,), keep_dims=2), 738 {'exception': TypeError}, 739 ), 740 'desc_inputs': [Tensor(np.array([3, 4, 5, 6]).astype(np.float32))], 741 }), 742 ('ReduceLogsumexp_TypeError_2', { 743 'block': ( 744 lambda _: nn.ReduceLogSumExp(axis=1.2, keep_dims=True), 745 {'exception': TypeError}, 746 ), 747 'desc_inputs': [Tensor(np.array([3, 4, 5, 6]).astype(np.float32))], 748 }), 749] 750 751 752@security_off_wrap 753@non_graph_engine 754@mindspore_test(pipeline_for_verify_exception_for_case_by_case_config) 755def test_summary_nn_ops(): 756 if security.enable_security(): 757 return [] 758 test_cases_for_summary_ops = [ 759 ('ScalarSummary', { 760 'block': ScalarSummaryNet(), 761 'desc_inputs': [Tensor(2.2)], 762 }), 763 ('HistogramSummary', { 764 'block': HistogramSummaryNet(), 765 'desc_inputs': [[1, 2, 3]], 766 }), 767 ] 768 context.set_context(mode=context.GRAPH_MODE, device_target="GPU") 769 return test_cases_for_summary_ops 770 771 772def test_summary_nn_ops_security_on(): 773 if security.enable_security(): 774 with pytest.raises(ValueError) as exc: 775 ScalarSummaryNet() 776 assert str(exc.value) == 'The Summary is not supported, please without `-s on` and recompile source.' 777 778 779@non_graph_engine 780@mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config) 781def test_compile(): 782 context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") 783 return test_cases 784 785 786@mindspore_test(pipeline_for_verify_exception_for_case_by_case_config) 787def test_check_exception(): 788 return test_cases_for_verify_exception 789