• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1------ ArmNN for Android NNAPI supported operations ------
2
3This release of ArmNN for Android supports use as a driver for the Android Neural Networks API. It implements the
4android.hardware.neuralnetworks@1.0, android.hardware.neuralnetworks@1.1, android.hardware.neuralnetworks@1.2 and
5android.hardware.neuralnetworks@1.3
6HAL interfaces.
7
8For more information on the Android Neural Networks API, see https://developer.android.com/ndk/guides/neuralnetworks/index.html
9
10For integration and usage documentation, please see README.md.
11
12--- Support for Android Neural Networks HAL operations ---
13
14The following AndroidNN HAL 1.0, 1.1, 1.2 and 1.3 operations are currently supported:
15
16AndroidNN operator           Tensor type supported
17ABS                          (FLOAT32)
18ADD                          (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
19ARGMAX                       (FLOAT32, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
20ARGMIN                       (FLOAT32, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
21AVERAGE_POOL_2D              (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
22BATCH_TO_SPACE_ND            (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
23CONCATENATION                (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
24CONV_2D                      (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
25DEPTH_TO_SPACE               (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
26DEPTHWISE_CONV_2D            (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
27DEQUANTIZE                   (FLOAT32 (output only), QUANT8_ASYMM and QUANT8_ASYMM_SIGNED (input only))
28DIV                          (FLOAT32, QUANT8_ASYMM)
29ELU                          (FLOAT32, QUANT8_ASYMM)
30EQUAL                        (FLOAT32, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
31EXP                          (FLOAT32, FLOAT16)
32EXPAND_DIMS                  (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
33FILL                         (FLOAT32, FLOAT16, INT32)
34FLOOR                        (FLOAT32)
35FULLY_CONNECTED              (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
36GREATER                      (FLOAT32, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
37GREATER_EQUAL                (FLOAT32, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
38GROUPED_CONV_2D              (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
39HARD_SWISH                   (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
40INSTANCE_NORMALIZATION       (FLOAT32)
41L2_NORMALIZATION             (FLOAT32)
42L2_POOL_2D                   (FLOAT32, QUANT8_ASYMM)
43LESS                         (FLOAT32, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
44LESS_EQUAL                   (FLOAT32, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
45LOCAL_RESPONSE_NORMALIZATION (FLOAT32)
46LOGISTIC                     (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
47LOG_SOFTMAX                  (FLOAT32)
48LSTM                         (FLOAT32)
49MAXIMUM                      (FLOAT32, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
50MAX_POOL_2D                  (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
51MEAN                         (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
52MINIMUM                      (FLOAT32, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
53MUL                          (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
54NEG                          (FLOAT32)
55NOT_EQUAL                    (FLOAT32, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
56PAD                          (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
57PAD_V2                       (FLOAT32, FLOAT16, QUANT8_ASYMM)
58PRELU                        (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
59QUANTIZE                     (FLOAT32 (input only), QUANT8_ASYMM and QUANT8_ASYMM_SIGNED (output only))
60QUANTIZED_16BIT_LSTM         (QUANT8_ASYMM)
61QUANTIZED_LSTM               (QUANT8_ASYMM)
62RELU                         (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
63RELU1                        (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
64RELU6                        (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
65RESHAPE                      (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
66RESIZE_BILINEAR              (FLOAT32, QUANT8_ASYMM)
67RESIZE_NEAREST_NEIGHBOR      (FLOAT32, QUANT8_ASYMM)
68RSQRT                        (FLOAT32)
69SOFTMAX                      (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
70SPACE_TO_BATCH_ND            (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
71SPACE_TO_DEPTH               (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
72SQRT                         (FLOAT32)
73SQUEEZE                      (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
74STRIDED_SLICE                (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
75SUB                          (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
76TANH                         (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
77TRANSPOSE                    (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
78TRANSPOSE_CONV_2D            (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
79
80Where operations are not supported by the ArmNN Android NN Driver, the driver indicates this to the framework
81appropriately and the framework implements those operations using a CPU implementation.
82
83NOTE: By convention, only those tensor types have been listed above, which are fully supported across all
84ArmNN backends.
85    - FLOAT16 input tensors are partially supported on most HAL 1.2 operators on the GpuAcc and
86    CpuRef backends, however not on CpuAcc.