• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2020-2021 Huawei Technologies Co., Ltd
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ============================================================================
15"""activation"""
16import numpy as np
17
18from mindspore._checkparam import Validator as validator
19from mindspore._extends import cell_attr_register
20from mindspore.common import dtype as mstype
21from mindspore.common.parameter import Parameter
22from mindspore.common.tensor import Tensor
23from mindspore.ops import functional as F
24from mindspore.ops import operations as P
25from ..cell import Cell
26
27__all__ = ['Softmax',
28           'LogSoftmax',
29           'ReLU',
30           'ReLU6',
31           'Tanh',
32           'GELU',
33           'FastGelu',
34           'Sigmoid',
35           'PReLU',
36           'get_activation',
37           'LeakyReLU',
38           'HSigmoid',
39           'HSwish',
40           'ELU',
41           'LogSigmoid',
42           'SoftShrink',
43           'HShrink',
44           ]
45
46
47class Softmax(Cell):
48    r"""
49    Softmax activation function.
50
51    Applies the Softmax function to an n-dimensional input Tensor.
52
53    The input is a Tensor of logits transformed with exponential function and then
54    normalized to lie in range [0, 1] and sum up to 1.
55
56    Softmax is defined as:
57
58    .. math::
59        \text{softmax}(x_{i}) =  \frac{\exp(x_i)}{\sum_{j=0}^{n-1}\exp(x_j)},
60
61    where :math:`x_{i}` is the :math:`i`-th slice in the given dimension of the input Tensor.
62
63    Args:
64        axis (Union[int, tuple[int]]): The axis to apply Softmax operation, -1 means the last dimension. Default: -1.
65
66    Inputs:
67        - **x** (Tensor) - The input of Softmax with data type of float16 or float32.
68
69    Outputs:
70        Tensor, which has the same type and shape as `x` with values in the range[0,1].
71
72    Raises:
73        TypeError: If `axis` is neither an int nor a tuple.
74        TypeError: If dtype of `x` is neither float16 nor float32.
75        ValueError: If `axis` is a tuple whose length is less than 1.
76        ValueError: If `axis` is a tuple whose elements are not all in range [-len(x), len(x)).
77
78    Supported Platforms:
79        ``Ascend`` ``GPU`` ``CPU``
80
81    Examples:
82        >>> x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
83        >>> softmax = nn.Softmax()
84        >>> output = softmax(x)
85        >>> print(output)
86        [0.03168 0.01166 0.0861  0.636   0.2341 ]
87    """
88
89    def __init__(self, axis=-1):
90        """Initialize Softmax."""
91        super(Softmax, self).__init__()
92        self.softmax = P.Softmax(axis)
93
94    def construct(self, x):
95        return self.softmax(x)
96
97
98class LogSoftmax(Cell):
99    r"""
100    LogSoftmax activation function.
101
102    Applies the LogSoftmax function to n-dimensional input tensor.
103
104    The input is transformed by the Softmax function and then by the log function to lie in range[-inf,0).
105
106    Logsoftmax is defined as:
107
108    .. math::
109
110        \text{logsoftmax}(x_i) = \log \left(\frac{\exp(x_i)}{\sum_{j=0}^{n-1} \exp(x_j)}\right),
111
112    where :math:`x_{i}` is the :math:`i`-th slice in the given dimension of the input Tensor.
113
114    Args:
115        axis (int): The axis to apply LogSoftmax operation, -1 means the last dimension. Default: -1.
116
117    Inputs:
118        - **x** (Tensor) - The input of LogSoftmax, with float16 or float32 data type.
119
120    Outputs:
121        Tensor, which has the same type and shape as the input as `x` with values in the range[-inf,0).
122
123    Raises:
124        TypeError: If `axis` is not an int.
125        TypeError: If dtype of `x` is neither float16 nor float32.
126        ValueError: If `axis` is not in range [-len(x), len(x)).
127
128    Supported Platforms:
129        ``Ascend`` ``GPU`` ``CPU``
130
131    Examples:
132        >>> x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
133        >>> log_softmax = nn.LogSoftmax()
134        >>> output = log_softmax(x)
135        >>> print(output)
136        [[-5.00672150e+00 -6.72150636e-03 -1.20067215e+01]
137         [-7.00091219e+00 -1.40009127e+01 -9.12250078e-04]]
138    """
139
140    def __init__(self, axis=-1):
141        """Initialize LogSoftmax."""
142        super(LogSoftmax, self).__init__()
143        self.log_softmax = P.LogSoftmax(axis)
144
145    def construct(self, x):
146        return self.log_softmax(x)
147
148
149class ELU(Cell):
150    r"""
151    Exponential Linear Uint activation function.
152
153    Applies the exponential linear unit function element-wise.
154    The activation function is defined as:
155
156    .. math::
157        E_{i} =
158        \begin{cases}
159        x, &\text{if } x \geq 0; \cr
160        \text{alpha} * (\exp(x_i) - 1), &\text{otherwise.}
161        \end{cases}
162
163    The picture about ELU looks like this `ELU <https://en.wikipedia.org/wiki/
164    Activation_function#/media/File:Activation_elu.svg>`_.
165
166    Args:
167        alpha (float): The coefficient of negative factor whose type is float. Default: 1.0.
168
169    Inputs:
170        - **x** (Tensor) - The input of ELU with data type of float16 or float32.
171          The shape is :math:`(N,*)` where :math:`*` means,any number of additional dimensions.
172
173    Outputs:
174        Tensor, with the same type and shape as the `x`.
175
176    Raises:
177        TypeError: If `alpha` is not a float.
178        TypeError: If dtype of `x` is neither float16 nor float32.
179        ValueError: If `alpha` is not equal to 1.0.
180
181    Supported Platforms:
182        ``Ascend`` ``GPU`` ``CPU``
183
184    Examples:
185        >>> x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float32)
186        >>> elu = nn.ELU()
187        >>> result = elu(x)
188        >>> print(result)
189        [-0.63212055  -0.86466473  0.  2.  1.]
190    """
191
192    def __init__(self, alpha=1.0):
193        """Initialize ELU."""
194        super(ELU, self).__init__()
195        self.elu = P.Elu(alpha)
196
197    def construct(self, x):
198        return self.elu(x)
199
200
201class ReLU(Cell):
202    r"""
203    Rectified Linear Unit activation function.
204
205    Applies the rectified linear unit function element-wise.
206
207    .. math::
208
209        \text{ReLU}(x) = (x)^+ = \max(0, x),
210
211    It returns element-wise :math:`\max(0, x)`, specially, the neurons with the negative output
212    will be suppressed and the active neurons will stay the same.
213
214    The picture about ReLU looks like this `ReLU <https://en.wikipedia.org/wiki/
215    Activation_function#/media/File:Activation_rectified_linear.svg>`_.
216
217    Inputs:
218        - **x** (Tensor) - The input of ReLU. The data type is Number.
219          The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
220
221    Outputs:
222        Tensor, with the same type and shape as the `x`.
223
224    Raises:
225        TypeError: If dtype of `x` is not a number.
226
227    Supported Platforms:
228        ``Ascend`` ``GPU`` ``CPU``
229
230    Examples:
231        >>> x = Tensor(np.array([-1, 2, -3, 2, -1]), mindspore.float16)
232        >>> relu = nn.ReLU()
233        >>> output = relu(x)
234        >>> print(output)
235        [0. 2. 0. 2. 0.]
236    """
237
238    def __init__(self):
239        """Initialize ReLU."""
240        super(ReLU, self).__init__()
241        self.relu = P.ReLU()
242
243    def construct(self, x):
244        return self.relu(x)
245
246
247class ReLU6(Cell):
248    r"""
249    Compute ReLU6 activation function.
250
251    ReLU6 is similar to ReLU with a upper limit of 6, which if the inputs are greater than 6, the outputs
252    will be suppressed to 6.
253    It computes element-wise as
254
255    .. math::
256
257        \min(\max(0, x), 6).
258
259    The input is a Tensor of any valid shape.
260
261    Inputs:
262        - **x** (Tensor) - The input of ReLU6 with data type of float16 or float32.
263          The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
264
265    Outputs:
266        Tensor, which has the same type as `x`.
267
268    Raises:
269        TypeError: If dtype of `x` is neither float16 nor float32.
270
271    Supported Platforms:
272        ``Ascend`` ``GPU`` ``CPU``
273
274    Examples:
275        >>> x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
276        >>> relu6 = nn.ReLU6()
277        >>> output = relu6(x)
278        >>> print(output)
279        [0. 0. 0. 2. 1.]
280    """
281
282    def __init__(self):
283        """Initialize ReLU6."""
284        super(ReLU6, self).__init__()
285        self.relu6 = P.ReLU6()
286
287    def construct(self, x):
288        return self.relu6(x)
289
290
291class LeakyReLU(Cell):
292    r"""
293    Leaky ReLU activation function.
294
295    LeakyReLU is similar to ReLU, but LeakyReLU has a slope that makes it not equal to 0 at x < 0.
296    The activation function is defined as:
297
298    .. math::
299            \text{leaky_relu}(x) = \begin{cases}x, &\text{if } x \geq 0; \cr
300            \text{alpha} * x, &\text{otherwise.}\end{cases}
301
302    See https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf
303
304    Args:
305        alpha (Union[int, float]): Slope of the activation function at x < 0. Default: 0.2.
306
307    Inputs:
308        - **x** (Tensor) - The input of LeakyReLU.
309          The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
310
311    Outputs:
312        Tensor, has the same type and shape as the `x`.
313
314    Raises:
315        TypeError: If `alpha` is not a float or an int.
316
317    Supported Platforms:
318        ``Ascend`` ``GPU`` ``CPU``
319
320    Examples:
321        >>> x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
322        >>> leaky_relu = nn.LeakyReLU()
323        >>> output = leaky_relu(x)
324        >>> print(output)
325        [[-0.2  4.  -1.6]
326         [ 2.  -1.   9. ]]
327    """
328
329    def __init__(self, alpha=0.2):
330        """Initialize LeakyReLU."""
331        super(LeakyReLU, self).__init__()
332        validator.check_value_type('alpha', alpha, [float, int], self.cls_name)
333        self.greater_equal = P.GreaterEqual()
334        self.mul = P.Mul()
335        self.alpha = alpha
336        self.select_op = P.Maximum()
337        if self.alpha > 1:
338            self.select_op = P.Minimum()
339
340    def construct(self, x):
341        alpha_array = P.Cast()(F.scalar_to_array(self.alpha), P.DType()(x))
342        out = self.select_op(alpha_array * x, x)
343        return out
344
345
346class Tanh(Cell):
347    r"""
348    Tanh activation function.
349
350    Applies the Tanh function element-wise, returns a new tensor with the hyperbolic tangent of the elements of input,
351    The input is a Tensor with any valid shape.
352
353    Tanh function is defined as:
354
355    .. math::
356        tanh(x_i) = \frac{\exp(x_i) - \exp(-x_i)}{\exp(x_i) + \exp(-x_i)} = \frac{\exp(2x_i) - 1}{\exp(2x_i) + 1},
357
358    where :math:`x_i` is an element of the input Tensor.
359
360    Inputs:
361        - **x** (Tensor) - The input of Tanh with data type of float16 or float32.
362          The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
363
364    Outputs:
365        Tensor, with the same type and shape as the `x`.
366
367    Raises:
368        TypeError: If dtype of `x` is neither float16 nor float32.
369
370    Supported Platforms:
371        ``Ascend`` ``GPU`` ``CPU``
372
373    Examples:
374        >>> x = Tensor(np.array([1, 2, 3, 2, 1]), mindspore.float16)
375        >>> tanh = nn.Tanh()
376        >>> output = tanh(x)
377        >>> print(output)
378        [0.7617 0.964  0.995  0.964  0.7617]
379    """
380
381    def __init__(self):
382        """Initialize Tanh."""
383        super(Tanh, self).__init__()
384        self.tanh = P.Tanh()
385
386    def construct(self, x):
387        return self.tanh(x)
388
389
390class GELU(Cell):
391    r"""
392    Gaussian error linear unit activation function.
393
394    Applies GELU function to each element of the input. The input is a Tensor with any valid shape.
395
396    GELU is defined as:
397
398    .. math::
399
400        GELU(x_i) = x_i*P(X < x_i),
401
402    where :math:`P` is the cumulative distribution function
403    of standard Gaussian distribution and :math:`x_i` is the element of the input.
404
405    The picture about GELU looks like this `GELU <https://en.wikipedia.org/wiki/
406    Activation_function#/media/File:Activation_gelu.png>`_.
407
408    Inputs:
409        - **x** (Tensor) - The input of GELU with data type of float16 or float32.
410          The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
411
412    Outputs:
413        Tensor, with the same type and shape as the `x`.
414
415    Raises:
416        TypeError: If dtype of `x` is neither float16 nor float32.
417
418    Supported Platforms:
419        ``Ascend`` ``GPU`` ``CPU``
420
421    Examples:
422        >>> x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
423        >>> gelu = nn.GELU()
424        >>> output = gelu(x)
425        >>> print(output)
426        [[-1.5880802e-01  3.9999299e+00 -3.1077917e-21]
427         [ 1.9545976e+00 -2.2918017e-07  9.0000000e+00]]
428    """
429
430    def __init__(self):
431        """Initialize GELU."""
432        super(GELU, self).__init__()
433        self.gelu = P.GeLU()
434
435    def construct(self, x):
436        return self.gelu(x)
437
438
439class FastGelu(Cell):
440    r"""
441    Fast Gaussian error linear unit activation function.
442
443    Applies FastGelu function to each element of the input. The input is a Tensor with any valid shape.
444
445    FastGelu is defined as:
446
447    .. math::
448        FastGelu(x_i) = \frac {x_i} {1 + \exp(-1.702 * \left| x_i \right|)} *
449                           \exp(0.851 * (x_i - \left| x_i \right|))
450
451    where :math:`x_i` is the element of the input.
452
453    Inputs:
454        - **x** (Tensor) - The input of FastGelu with data type of float16 or float32.
455          The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
456
457    Outputs:
458        Tensor, with the same type and shape as the `x`.
459
460    Raises:
461        TypeError: If dtype of `x` is neither float16 nor float32.
462
463    Supported Platforms:
464        ``Ascend``
465
466    Examples:
467        >>> x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
468        >>> fast_gelu = nn.FastGelu()
469        >>> output = fast_gelu(x)
470        >>> print(output)
471        [[-1.5418735e-01  3.9921875e+00 -9.7473649e-06]
472         [ 1.9375000e+00 -1.0052517e-03  8.9824219e+00]]
473    """
474
475    def __init__(self):
476        """Initialize FastGelu."""
477        super(FastGelu, self).__init__()
478        self.fast_gelu = P.FastGeLU()
479
480    def construct(self, x):
481        return self.fast_gelu(x)
482
483
484class Sigmoid(Cell):
485    r"""
486    Sigmoid activation function.
487
488    Applies sigmoid-type activation element-wise.
489
490    Sigmoid function is defined as:
491
492    .. math::
493
494        \text{sigmoid}(x_i) = \frac{1}{1 + \exp(-x_i)},
495
496    where :math:`x_i` is the element of the input.
497
498    The picture about Sigmoid looks like this `Sigmoid <https://en.wikipedia.org/wiki/
499    Sigmoid_function#/media/File:Logistic-curve.svg>`_.
500
501    Inputs:
502        - **x** (Tensor) - The input of Sigmoid with data type of float16 or float32.
503          The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
504
505    Outputs:
506        Tensor, with the same type and shape as the `x`.
507
508    Raises:
509        TypeError: If dtype of `x` is neither float16 nor float32.
510
511    Supported Platforms:
512        ``Ascend`` ``GPU`` ``CPU``
513
514    Examples:
515        >>> x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
516        >>> sigmoid = nn.Sigmoid()
517        >>> output = sigmoid(x)
518        >>> print(output)
519        [0.2688  0.11914 0.5     0.881   0.7305 ]
520    """
521
522    def __init__(self):
523        """Initialize Sigmoid."""
524        super(Sigmoid, self).__init__()
525        self.sigmoid = P.Sigmoid()
526
527    def construct(self, x):
528        return self.sigmoid(x)
529
530
531class PReLU(Cell):
532    r"""
533    PReLU activation function.
534
535    Applies the PReLU function element-wise.
536
537    PReLU is defined as:
538
539    .. math::
540
541        prelu(x_i)= \max(0, x_i) + w * \min(0, x_i),
542
543    where :math:`x_i` is an element of an channel of the input.
544
545    Here :math:`w` is a learnable parameter with a default initial value 0.25.
546    Parameter :math:`w` has dimensionality of the argument channel. If called without argument
547    channel, a single parameter :math:`w` will be shared across all channels.
548
549    The picture about PReLU looks like this `PReLU <https://en.wikipedia.org/wiki/
550    Activation_function#/media/File:Activation_prelu.svg>`_.
551
552    Args:
553        channel (int): The elements number of parameter.
554          It could be an int, and the value is 1 or the channels number of input tensor `x`. Default: 1.
555        w (Union[float, list, Tensor]): The initial value of parameter. It could be a float, a float list or
556          a tensor has the same dtype as the input tensor `x`. Default: 0.25.
557
558    Inputs:
559        - **x** (Tensor) - The input of PReLU with data type of float16 or float32.
560          The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
561
562    Outputs:
563        Tensor, with the same dtype and shape as the `x`.
564
565    Raises:
566        TypeError: If `channel` is not an int.
567        TypeError: If `w` is not one of a float, a float list, a float Tensor.
568        TypeError: If dtype of `x` is neither float16 nor float32.
569        ValueError: If the `x` is a 0-D or 1-D Tensor on Ascend.
570        ValueError: If `channel` is less than 1.
571
572    Supported Platforms:
573        ``Ascend`` ``GPU``
574
575    Examples:
576        >>> x = Tensor(np.array([[[[0.1, 0.6], [0.9, 0.9]]]]), mindspore.float32)
577        >>> prelu = nn.PReLU()
578        >>> output = prelu(x)
579        >>> print(output)
580        [[[[0.1 0.6]
581           [0.9 0.9]]]]
582
583    """
584    @cell_attr_register(attrs="")
585    def __init__(self, channel=1, w=0.25):
586        """Initialize PReLU."""
587        super(PReLU, self).__init__()
588        validator.check_positive_int(channel, 'channel', self.cls_name)
589        if isinstance(w, (float, np.float32)):
590            tmp = np.empty((channel,), dtype=np.float32)
591            tmp.fill(w)
592            w = Tensor(tmp, dtype=mstype.float32)
593        elif isinstance(w, list):
594            if len(w) != channel:
595                raise ValueError(f"For '{self.cls_name}', the length of 'w' should be equal to the 'channel' when "
596                                 f"the 'w' is a list, but got the length of 'w': {len(w)}, the 'channel': {channel}.")
597
598            for i in w:
599                if not isinstance(i, (float, np.float32)):
600                    raise ValueError(f"For '{self.cls_name}', all elements in 'w' should be "
601                                     f"float when the 'w' is a list, but got {i}.")
602            w = Tensor(w, dtype=mstype.float32)
603        elif isinstance(w, Tensor):
604            if w.dtype not in (mstype.float16, mstype.float32):
605                raise ValueError(f"For '{self.cls_name}', the dtype of 'w' should be float16 or "
606                                 f"float32 when the 'w' is a tensor, but got {w.dtype}.")
607            if len(w.shape) != 1 or w.shape[0] != channel:
608                raise ValueError(f"For '{self.cls_name}', the dimension of 'w' should be 1, and the elements number "
609                                 f"should be equal to the 'channel' when the 'w' is a tensor, "
610                                 f"but got 'w' shape {w.shape}, the 'channel' {channel}.")
611        else:
612            raise TypeError(f"For '{self.cls_name}', the 'w' only supported float, list and tensor, "
613                            f"but got {type(w).__name__}.")
614        self.w = Parameter(w, name='a')
615        self.prelu = P.PReLU()
616        self.relu = P.ReLU()
617        self.assign = P.Assign()
618
619    def construct(self, x):
620        u = self.relu(self.w)
621        v = self.prelu(x, F.cast(u, x.dtype))
622        if self.training:
623            self.assign(self.w, u)
624        return v
625
626
627class HSwish(Cell):
628    r"""
629    Hard swish activation function.
630
631    Applies hswish-type activation element-wise. The input is a Tensor with any valid shape.
632
633    Hard swish is defined as:
634
635    .. math::
636        \text{hswish}(x_{i}) = x_{i} * \frac{ReLU6(x_{i} + 3)}{6},
637
638    where :math:`x_{i}` is the :math:`i`-th slice in the given dimension of the input Tensor.
639
640    Inputs:
641        - **x** (Tensor) - The input of HSwish, data type must be float16 or float32.
642          The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
643
644    Outputs:
645        Tensor, with the same type and shape as the `x`.
646
647    Raises:
648        TypeError: If dtype of `x` is neither float16 nor float32.
649
650    Supported Platforms:
651        ``GPU`` ``CPU``
652
653    Examples:
654        >>> x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
655        >>> hswish = nn.HSwish()
656        >>> result = hswish(x)
657        >>> print(result)
658        [-0.3333  -0.3333  0  1.666  0.6665]
659    """
660
661    def __init__(self):
662        """Initialize HSwish."""
663        super(HSwish, self).__init__()
664        self.hswish = P.HSwish()
665
666    def construct(self, x):
667        return self.hswish(x)
668
669
670class HSigmoid(Cell):
671    r"""
672    Hard sigmoid activation function.
673
674    Applies hard sigmoid activation element-wise. The input is a Tensor with any valid shape.
675
676    Hard sigmoid is defined as:
677
678    .. math::
679        \text{hsigmoid}(x_{i}) = max(0, min(1, \frac{x_{i} + 3}{6})),
680
681    where :math:`x_{i}` is the :math:`i`-th slice in the given dimension of the input Tensor.
682
683    Inputs:
684        - **input_x** (Tensor) - The input of HSigmoid. The shape is :math:`(N,*)` where :math:`*` means, any number of
685          additional dimensions.
686
687    Outputs:
688        Tensor, with the same type and shape as the `input_x`.
689
690    Raises:
691        TypeError: If `input_x` is not a Tensor.
692
693    Supported Platforms:
694        ``Ascend`` ``GPU`` ``CPU``
695
696    Examples:
697        >>> x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
698        >>> hsigmoid = nn.HSigmoid()
699        >>> result = hsigmoid(x)
700        >>> print(result)
701        [0.3333 0.1666 0.5    0.8335 0.6665]
702    """
703
704    def __init__(self):
705        """Initialize HSigmoid."""
706        super(HSigmoid, self).__init__()
707        self.hsigmoid = P.HSigmoid()
708
709    def construct(self, input_x):
710        return self.hsigmoid(input_x)
711
712
713class LogSigmoid(Cell):
714    r"""
715    Logsigmoid activation function.
716
717    Applies logsigmoid activation element-wise. The input is a Tensor with any valid shape.
718
719    Logsigmoid is defined as:
720
721    .. math::
722        \text{logsigmoid}(x_{i}) = log(\frac{1}{1 + \exp(-x_i)}),
723
724    where :math:`x_{i}` is the element of the input.
725
726    Inputs:
727        - **x** (Tensor) - The input of LogSigmoid with data type of float16 or float32.
728          The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
729
730    Outputs:
731        Tensor, with the same type and shape as the `x`.
732
733    Raises:
734        TypeError: If dtype of `x` is neither float16 nor float32.
735
736    Supported Platforms:
737        ``Ascend`` ``GPU`` ``CPU``
738
739    Examples:
740        >>> net = nn.LogSigmoid()
741        >>> x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
742        >>> output = net(x)
743        >>> print(output)
744        [-0.31326166 -0.12692806 -0.04858734]
745    """
746
747    def __init__(self):
748        """Initialize LogSigmoid."""
749        super(LogSigmoid, self).__init__()
750        self.mul = P.Mul()
751        self.exp = P.Exp()
752        self.add = P.Add()
753        self.rec = P.Reciprocal()
754        self.log = P.Log()
755
756    def construct(self, input_x):
757        neg_input = self.mul(input_x, -1)
758        exp_neg_input = self.exp(neg_input)
759        exp_neg_input_1 = self.add(exp_neg_input, 1)
760        rec_exp_neg_input_1 = self.rec(exp_neg_input_1)
761        ret = self.log(rec_exp_neg_input_1)
762        return ret
763
764
765class SoftShrink(Cell):
766    r"""
767    Applies the soft shrinkage function elementwise.
768
769    .. math::
770        \text{SoftShrink}(x) =
771        \begin{cases}
772        x - \lambda, & \text{ if } x > \lambda \\
773        x + \lambda, & \text{ if } x < -\lambda \\
774        0, & \text{ otherwise }
775        \end{cases}
776
777    Args:
778        lambd: the :math:`\lambda` must be no less than zero value for the Softshrink formulation. Default: 0.5.
779
780    Inputs:
781        - **input_x** (Tensor) - The input of SoftShrink with data type of float16 or float32.
782          Any number of additional dimensions.
783
784    Outputs:
785        Tensor, has the same shape and data type as `input_x`.
786
787    Raises:
788        TypeError: If lambd is not a float.
789        TypeError: If input_x is not a Tensor.
790        TypeError: If dtype of input_x is neither float16 nor float32.
791        ValueError: If lambd is less than 0.
792
793    Supported Platforms:
794        ``Ascend``
795
796    Examples:
797        >>> input_x = Tensor(np.array([[ 0.5297,  0.7871,  1.1754], [ 0.7836,  0.6218, -1.1542]]), mstype.float16)
798        >>> softshrink = nn.SoftShrink()
799        >>> output = softshrink(input_x)
800        >>> print(output)
801        [[ 0.02979  0.287    0.676  ]
802         [ 0.2837   0.1216  -0.6543 ]]
803    """
804
805    def __init__(self, lambd=0.5):
806        super(SoftShrink, self).__init__()
807        self.softshrink = P.SoftShrink(lambd)
808
809    def construct(self, input_x):
810        output = self.softshrink(input_x)
811        return output
812
813
814class HShrink(Cell):
815    r"""
816    Applies the hard shrinkage function element-wise, each element complies the follow function:
817
818    .. math::
819        \text{HardShrink}(x) =
820        \begin{cases}
821        x, & \text{ if } x > \lambda \\
822        x, & \text{ if } x < -\lambda \\
823        0, & \text{ otherwise }
824        \end{cases}
825
826    Args:
827        lambd (float): The value for the HardShrink formulation. Default: 0.5
828
829    Inputs:
830        - **input_x** (Tensor) - The input of HardShrink with data type of float16 or float32.
831
832    Outputs:
833        Tensor, the same shape and data type as the input.
834
835    Supported Platforms:
836        ``Ascend``
837
838    Raises:
839        TypeError: If `lambd` is not a float.
840        TypeError: If dtype of `input_x` is neither float16 nor float32.
841
842    Examples:
843        >>> input_x = Tensor(np.array([[ 0.5,  1,  2.0],[0.0533,0.0776,-2.1233]]),mstype.float32)
844        >>> hshrink = nn.HShrink()
845        >>> output = hshrink(input_x)
846        >>> print(output)
847        [[ 0.      1.      2.    ]
848        [ 0.      0.     -2.1233]]
849    """
850
851    def __init__(self, lambd=0.5):
852        super(HShrink, self).__init__()
853        self.hshrink = P.HShrink(lambd)
854
855    def construct(self, input_x):
856        return self.hshrink(input_x)
857
858
859_activation = {
860    'softmax': Softmax,
861    'logsoftmax': LogSoftmax,
862    'relu': ReLU,
863    'relu6': ReLU6,
864    'tanh': Tanh,
865    'gelu': GELU,
866    'fast_gelu': FastGelu,
867    'elu': ELU,
868    'sigmoid': Sigmoid,
869    'prelu': PReLU,
870    'leakyrelu': LeakyReLU,
871    'hswish': HSwish,
872    'hsigmoid': HSigmoid,
873    'logsigmoid': LogSigmoid,
874    'softshrink': SoftShrink,
875    'hshrink': HShrink,
876}
877
878
879def get_activation(name, prim_name=None):
880    """
881    Gets the activation function.
882
883    Args:
884        name (str): The name of the activation function.
885
886    Returns:
887        Function, the activation function.
888
889    Supported Platforms:
890        ``Ascend`` ``GPU`` ``CPU``
891
892    Examples:
893        >>> sigmoid = nn.get_activation('sigmoid')
894        >>> print(sigmoid)
895        Sigmoid<>
896    """
897    msg_prefix = f"For '{prim_name}', the" if prim_name else "The"
898    if name is None:
899        return None
900
901    if name not in _activation:
902        raise KeyError(f"{msg_prefix} 'name' should be in {list(_activation.keys())}, but got {name}.")
903    return _activation[name]()
904