Home
last modified time | relevance | path

Searched full:relu (Results 1 – 25 of 1510) sorted by relevance

12345678910>>...61

/external/pytorch/torch/ao/nn/intrinsic/modules/
Dfused.py11 ReLU,
43 r"""This is a sequential container which calls the Conv1d and ReLU modules.
46 def __init__(self, conv, relu): argument
49 and type_before_parametrizations(relu) == ReLU
50 …ct types for input modules{type_before_parametrizations(conv)}{type_before_parametrizations(relu)}"
51 super().__init__(conv, relu)
55 r"""This is a sequential container which calls the Conv2d and ReLU modules.
58 def __init__(self, conv, relu): argument
61 and type_before_parametrizations(relu) == ReLU
62 …ct types for input modules{type_before_parametrizations(conv)}{type_before_parametrizations(relu)}"
[all …]
/external/pytorch/test/fx/
Dtest_source_matcher_utils.py32 self.relu = torch.nn.ReLU()
38 x = self.relu(x)
47 gm.graph, [torch.nn.Linear, torch.nn.ReLU]
52 self.assertEqual(len(module_partitions[torch.nn.ReLU]), 1)
57 module_partitions[torch.nn.ReLU][0],
63 module_partitions[torch.nn.ReLU][0],
69 module_partitions[torch.nn.ReLU][0],
88 self.relu = torch.nn.ReLU()
96 return self.maxpool(self.relu(z))
105 gm.graph, [torch.nn.Conv2d, torch.nn.ReLU, torch.nn.MaxPool2d]
[all …]
/external/pytorch/torch/ao/quantization/backend_config/
D_common_operator_config_utils.py153 (op_with_quantized_bop_scalar_variant, nn.ReLU),
154 (op_with_quantized_bop_scalar_variant, F.relu),
155 (op_with_quantized_bop_scalar_variant, torch.relu),
209 # (2) Linear + relu
211 # 2.1 linear module + relu fusion config
212 # linear relu, linear module + relu module
214 BackendPatternConfig((torch.nn.Linear, torch.nn.ReLU))
219 # linear relu, linear module + functional relu
221 BackendPatternConfig((torch.nn.Linear, torch.nn.functional.relu))
227 # 2.2 linear module + relu, fused module configs
[all …]
Dexecutorch.py179 # (2) Conv + relu
181 # conv module + relu module
183 BackendPatternConfig((convs.root, nn.ReLU))
188 # conv module + functional relu
190 BackendPatternConfig((convs.root, F.relu))
195 # fused conv relu module
204 # conv relu, qat fused module
212 # functional conv + relu module
214 BackendPatternConfig((convs.func, nn.ReLU))
218 # functional conv + functional relu
[all …]
Donednn.py309 # (2) Conv2d + Add + Relu
315 # relu
318 def _fuse_conv_add_relu_left(is_qat, relu, add_pattern): argument
320 return nni.ConvAddReLU2d(conv, add, relu)
324 relu, add_pattern = pattern
333 relu, add_pattern = pattern
344 # relu
347 def _fuse_conv_bn_add_relu_left(is_qat, relu, add_pattern): argument
351 raise NotImplementedError(f"Cannot fuse train modules: {(conv, bn, add, relu)}")
354 return nni.ConvAddReLU2d(fused_conv, add, relu)
[all …]
/external/ComputeLibrary/examples/
Dgraph_vgg19.cpp85 …nLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv1_1/Relu") in do_setup()
92 …nLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv1_2/Relu") in do_setup()
101 …nLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv2_1/Relu") in do_setup()
108 …nLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv2_2/Relu") in do_setup()
117 …nLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv3_1/Relu") in do_setup()
124 …nLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv3_2/Relu") in do_setup()
131 …nLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv3_3/Relu") in do_setup()
138 …nLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv3_4/Relu") in do_setup()
147 …nLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv4_1/Relu") in do_setup()
154 …nLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv4_2/Relu") in do_setup()
[all …]
Dgraph_inception_v4.cpp93 …(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Conv2d_1a_3x3/Relu") in do_setup()
105 …(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Conv2d_2a_3x3/Relu") in do_setup()
117 …ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Conv2d_2b_3x3/Relu"); in do_setup()
217 …ayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Mixed_3a/Branch_1/Conv2d_0a_3x3… in get_mixed_3a()
237 …ayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Mixed_4a/Branch_0/Conv2d_0a_1x1… in get_mixed_4a()
248 …ayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Mixed_4a/Branch_0/Conv2d_1a_3x3… in get_mixed_4a()
261 …ayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Mixed_4a/Branch_1/Conv2d_0a_1x1… in get_mixed_4a()
272 …ayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Mixed_4a/Branch_1/Conv2d_0b_1x7… in get_mixed_4a()
283 …ayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Mixed_4a/Branch_1/Conv2d_0c_7x1… in get_mixed_4a()
294 …ayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Mixed_4a/Branch_1/Conv2d_1a_3x3… in get_mixed_4a()
[all …]
Dgraph_inception_v3.cpp92 …(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Conv2d_1a_3x3/Relu") in do_setup()
105 …(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Conv2d_2a_3x3/Relu") in do_setup()
119 …(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Conv2d_2b_3x3/Relu") in do_setup()
135 …(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Conv2d_3b_1x1/Relu") in do_setup()
149 …(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Conv2d_4a_3x3/Relu") in do_setup()
258 …rInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(param_path + "/Branch_0/Conv2d_0a_1… in get_inception_node_A()
274 …ActivationLayerInfo::ActivationFunction::RELU)).set_name(param_path + "/Branch_1/Conv2d" + conv_id… in get_inception_node_A()
288 …ActivationLayerInfo::ActivationFunction::RELU)).set_name(param_path + "/Branch_1/Conv2d" + conv_id… in get_inception_node_A()
304 …rInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(param_path + "/Branch_2/Conv2d_0a_1… in get_inception_node_A()
318 …rInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(param_path + "/Branch_2/Conv2d_0b_3… in get_inception_node_A()
[all …]
Dgraph_inception_resnet_v2.cpp104 …(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Conv2d_1a_3x3/Relu") in do_setup()
117 …(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Conv2d_2a_3x3/Relu") in do_setup()
130 …(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Conv2d_2b_3x3/Relu") in do_setup()
145 …(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Conv2d_3b_1x1/Relu") in do_setup()
158 …(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Conv2d_4a_3x3/Relu") in do_setup()
182 …(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Conv2d_7b_1x1/Relu") in do_setup()
233 …LayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Mixed_5b/Branch_0/Conv2d_1x1/R… in block_mixed_5b()
248 …ayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Mixed_5b/Branch_1/Conv2d_0a_1x1… in block_mixed_5b()
260 …ayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Mixed_5b/Branch_1/Conv2d_0b_5x5… in block_mixed_5b()
275 …ayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Mixed_5b/Branch_2/Conv2d_0a_1x1… in block_mixed_5b()
[all …]
Dgraph_vgg16.cpp87 …nLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv1_1/Relu") in do_setup()
95 …nLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv1_2/Relu") in do_setup()
104 …nLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv2_1/Relu") in do_setup()
112 …nLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv2_2/Relu") in do_setup()
121 …nLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv3_1/Relu") in do_setup()
129 …nLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv3_2/Relu") in do_setup()
137 …nLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv3_3/Relu") in do_setup()
146 …nLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv4_1/Relu") in do_setup()
154 …nLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv4_2/Relu") in do_setup()
162 …nLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv4_3/Relu") in do_setup()
[all …]
Dgraph_inception_resnet_v1.cpp120 …(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Conv2d_1a_3x3/Relu") in do_setup()
133 …(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Conv2d_2a_3x3/Relu") in do_setup()
146 …(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Conv2d_2b_3x3/Relu") in do_setup()
161 …(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Conv2d_3b_1x1/Relu") in do_setup()
174 …(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Conv2d_4a_3x3/Relu") in do_setup()
187 …ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Conv2d_4b_3x3/Relu"); in do_setup()
268 …ayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(unit_name + "Branch_0/Conv2d_1x1… in block35_repeat()
283 …erInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(unit_name + "Branch_1/Conv2d_0a_1x… in block35_repeat()
295 …erInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(unit_name + "Branch_1/Conv2d_0b_3x… in block35_repeat()
310 …erInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(unit_name + "Branch_2/Conv2d_0a_1x… in block35_repeat()
[all …]
/external/pytorch/test/quantization/eager/
Dtest_fuse_eager.py51 msg="Fused Conv + BN + Relu first layer")
53 msg="Fused Conv + BN + Relu (skipped BN)")
55 msg="Fused Conv + BN + Relu (skipped Relu)")
63 self.assertEqual(type(model.sub2.relu), torch.nn.ReLU,
64 msg="Non-fused submodule ReLU")
75 self.assertEqual(type(model.sub2.relu), nn.ReLU)
88 self.assertEqual(type(model.sub2.relu), nn.ReLU)
116 msg="Fused Conv + BN + Relu first layer (BN is folded)")
118 msg="Fused Conv + BN + Relu (Conv + folded BN only)")
119 self.assertEqual(type(model.conv1[1]), nn.ReLU,
[all …]
/external/tensorflow/tensorflow/compiler/mlir/tensorflow/tests/
Dgpu_fusion.mlir10 %relu = "tf.Relu"(%y#0) : (tensor<8x8x8x8xf32>) -> tensor<8x8x8x8xf32>
11 func.return %relu : tensor<8x8x8x8xf32>
20 %relu = "tf.Relu"(%add) : (tensor<8x8x8x8xf32>) -> tensor<8x8x8x8xf32>
21 func.return %relu : tensor<8x8x8x8xf32>
27 // Relu activation and we only fuse the add.
29 // CHECK-NEXT: %[[relu:[a-z0-9]*]] ={{.*}}Relu"(%[[Y]]
30 // CHECK-NEXT: return %[[relu]]
33 %relu = "tf.Relu"(%add) : (tensor<8x8x8x8xf32>) -> tensor<8x8x8x8xf32>
34 func.return %relu, %add : tensor<8x8x8x8xf32>, tensor<8x8x8x8xf32>
41 // CHECK-NEXT: %[[relu:[a-z0-9]*]] ={{.*}}Relu"(%[[Y]]
[all …]
/external/pytorch/torch/fx/passes/utils/
Dmatcher_with_name_node_map_utils.py55 relu = F.relu(conv)
56 return relu, {"conv": conv, "relu": relu}
60 relu = F.relu(conv)
61 relu *= 2
62 return relu
94 {"conv": target_conv_ndoe, "relu": target_relu_node}
101 return relu
107 return relu, {"conv": conv, "relu": relu}
/external/XNNPACK/scripts/
Dgenerate-f32-vbinary.sh26 ….in -D OP=ADD -D BATCH_TILE=1 -D WASM=0 -D ACTIVATION=RELU -o src/f32-vbinary/gen/vadd-relu-sc…
27 ….in -D OP=ADD -D BATCH_TILE=2 -D WASM=0 -D ACTIVATION=RELU -o src/f32-vbinary/gen/vadd-relu-sc…
28 ….in -D OP=ADD -D BATCH_TILE=4 -D WASM=0 -D ACTIVATION=RELU -o src/f32-vbinary/gen/vadd-relu-sc…
29 ….in -D OP=ADD -D BATCH_TILE=8 -D WASM=0 -D ACTIVATION=RELU -o src/f32-vbinary/gen/vadd-relu-sc…
30 ….in -D OP=DIV -D BATCH_TILE=1 -D WASM=0 -D ACTIVATION=RELU -o src/f32-vbinary/gen/vdiv-relu-sc…
31 ….in -D OP=DIV -D BATCH_TILE=2 -D WASM=0 -D ACTIVATION=RELU -o src/f32-vbinary/gen/vdiv-relu-sc…
32 ….in -D OP=DIV -D BATCH_TILE=4 -D WASM=0 -D ACTIVATION=RELU -o src/f32-vbinary/gen/vdiv-relu-sc…
33 ….in -D OP=DIV -D BATCH_TILE=8 -D WASM=0 -D ACTIVATION=RELU -o src/f32-vbinary/gen/vdiv-relu-sc…
34 ….in -D OP=MUL -D BATCH_TILE=1 -D WASM=0 -D ACTIVATION=RELU -o src/f32-vbinary/gen/vmul-relu-sc…
35 ….in -D OP=MUL -D BATCH_TILE=2 -D WASM=0 -D ACTIVATION=RELU -o src/f32-vbinary/gen/vmul-relu-sc…
[all …]
/external/tensorflow/tensorflow/core/kernels/mlir_generated/
Dgpu_op_relu.cc21 GENERATE_AND_REGISTER_UNARY_GPU_KERNEL(Relu, DT_HALF);
22 GENERATE_AND_REGISTER_UNARY_GPU_KERNEL(Relu, DT_FLOAT);
23 GENERATE_AND_REGISTER_UNARY_GPU_KERNEL(Relu, DT_DOUBLE);
26 GENERATE_AND_REGISTER_UNARY_JIT_GPU_KERNEL(Relu, DT_INT8);
27 GENERATE_AND_REGISTER_UNARY_JIT_GPU_KERNEL(Relu, DT_INT16);
28 GENERATE_AND_REGISTER_UNARY_JIT_GPU_KERNEL(Relu, DT_INT64);
29 GENERATE_AND_REGISTER_UNARY_JIT_GPU_KERNEL(Relu, DT_UINT8);
30 GENERATE_AND_REGISTER_UNARY_JIT_GPU_KERNEL(Relu, DT_UINT16);
31 GENERATE_AND_REGISTER_UNARY_JIT_GPU_KERNEL(Relu, DT_UINT32);
32 GENERATE_AND_REGISTER_UNARY_JIT_GPU_KERNEL(Relu, DT_UINT64);
/external/pytorch/benchmarks/operator_benchmark/pt/
Dqactivation_test.py11 # VGG-16 relu's with original shape: (-1, 3, 224, 224)
12 (64, 224, 224), # ReLU-1 # noqa: E201
13 (128, 112, 112), # ReLU-6
14 (256, 56, 56), # ReLU-11 # noqa: E241
15 (512, 28, 28), # ReLU-18 # noqa: E241
16 (512, 14, 14), # ReLU-25 # noqa: E241
18 (16, 64, 224, 224), # ReLU-1 # noqa: E241
19 (16, 128, 112, 112), # ReLU-6
20 (16, 256, 56, 56), # ReLU-11 # noqa: E241
21 (16, 512, 28, 28), # ReLU-18 # noqa: E241
[all …]
/external/tensorflow/tensorflow/core/common_runtime/
Dquantize_training_test.cc82 Relu Identity in TEST_F()
92 Node* relu = test::graph::Relu(g, a); in TEST_F() local
94 Node* m1 = test::graph::Matmul(g, relu, identity, false, false); in TEST_F()
102 Relu Identity in TEST_F()
118 // Quantize_and_dequantize node for relu should have signed_input==false. in TEST_F()
121 FindNode(g, strings::StrCat(relu->name(), "/QuantizeAndDequantizeV2"), in TEST_F()
133 Relu Relu6 in TEST_F()
143 Node* relu = test::graph::Relu(g, a); in TEST_F() local
145 Node* m1 = test::graph::Matmul(g, relu, relu6, false, false); in TEST_F()
153 Relu Relu6 in TEST_F()
[all …]
/external/pytorch/torch/ao/quantization/
Dfuser_method_mappings.py64 def fuse_conv_bn_relu(is_qat, conv, bn, relu): argument
79 >>> r1 = nn.ReLU(inplace=False)
84 conv.training == bn.training == relu.training
102 return fused_module(conv, bn, relu)
104 raise NotImplementedError(f"Cannot fuse train modules: {(conv, bn, relu)}")
114 return fused_module(fused_conv, relu)
116 raise NotImplementedError(f"Cannot fuse eval modules: {(conv, bn, relu)}")
196 (nn.Conv1d, nn.BatchNorm1d, nn.ReLU): fuse_conv_bn_relu,
198 (nn.Conv2d, nn.BatchNorm2d, nn.ReLU): fuse_conv_bn_relu,
200 (nn.Conv3d, nn.BatchNorm3d, nn.ReLU): fuse_conv_bn_relu,
[all …]
/external/pytorch/test/cpp/tensorexpr/
Dtest_memplanning.cpp107 Compute("relu", {M, N}, [&](const ExprHandle& m, const ExprHandle& n) { in TEST()
123 // Intermediate buffers and their liveness ranges: gemm [0, 1], relu [1, 2], in TEST()
139 // relu[i_3, i_4] = (gemm[i_3, i_4])<0.f ? 0.f : (gemm[i_3, i_4]); in TEST()
144 // E[i_5, i_6] = quint8((relu[i_5, i_6]) + (relu[i_5, i_6])); in TEST()
160 # CHECK: Allocate(relu); // dtype=float, dims=[4, 4] in TEST()
162 # CHECK: Free(relu); in TEST()
189 # CHECK: Allocate(relu); // dtype=float, dims=[4, 4] in TEST()
191 # CHECK: Free(relu); in TEST()
219 Compute("relu", {M, N}, [&](const ExprHandle& m, const ExprHandle& n) { in TEST()
235 // Intermediate buffers and their liveness ranges: gemm [0, 1], relu [1, 2], in TEST()
[all …]
/external/pytorch/test/jit/
Dtest_custom_operators.py61 output = torch.ops.aten.relu(input)
62 self.assertEqual(output, input.relu())
71 r"aten::relu\(\) expected at most 1 argument\(s\) but received 2 argument\(s\)",
74 torch.ops.aten.relu(1, 2)
78 RuntimeError, r"aten::relu\(\) is missing value for argument 'self'.", ""
80 torch.ops.aten.relu()
108 return torch.ops.aten.relu(x)
111 self.assertEqual(func(input), input.relu())
115 func = torch.jit.trace(torch.ops.aten.relu, [input])
116 self.assertEqual(func(input), input.relu())
[all …]
/external/pytorch/benchmarks/dynamo/microbenchmarks/
Dmatmul_relu.py17 return torch.nn.functional.relu(torch.mm(a, b))
57 time_with_torch_timer(torch_mm_relu, (a, b), string_id="torch mm + relu")
67 torch mm + relu mean: 0.0759 ms
71 torch mm + relu mean: 0.0316 ms
75 torch mm + relu mean: 0.0277 ms
79 torch mm + relu mean: 0.0290 ms
83 torch mm + relu mean: 0.0234 ms
87 torch mm + relu mean: 0.0322 ms
91 torch mm + relu mean: 0.0289 ms
95 torch mm + relu mean: 0.7896 ms
[all …]
/external/pytorch/test/distributed/fsdp/
Dtest_fsdp_fx.py22 torch.nn.ReLU(),
25 self.relu = torch.nn.ReLU()
28 z = self.relu(self.layer0(x))
29 z = self.relu(self.layer2(z))
32 z = self.relu(self.layer1(z))
36 z = self.relu(self.layer0(x))
61 model.relu,
66 model.relu,
68 model.relu,
70 model.relu,
[all …]
/external/armnn/docs/
D05_03_delegate.dox44 - AVERAGE_POOL_2D, Supported Fused Activation: RELU, RELU6, RELU_N1_TO_1, SIGMOID, TANH, NONE
46 - AVERAGE_POOL_3D, Supported Fused Activation: RELU, RELU6, RELU_N1_TO_1, SIGMOID, SIGN_BIT, TANH, …
54 - CONCATENATION, Supported Fused Activation: RELU, RELU6, RELU_N1_TO_1, SIGMOID, TANH, NONE
56 - CONV_2D, Supported Fused Activation: RELU, RELU6, RELU_N1_TO_1, SIGMOID, TANH, NONE
58 - CONV_3D, Supported Fused Activation: RELU, RELU6, RELU_N1_TO_1, SIGMOID, TANH, NONE
62 - DEPTHWISE_CONV_2D, Supported Fused Activation: RELU, RELU6, RELU_N1_TO_1, SIGMOID, TANH, NONE
82 - FULLY_CONNECTED, Supported Fused Activation: RELU, RELU6, RELU_N1_TO_1, SIGMOID, TANH, NONE
120 - MAX_POOL_2D, Supported Fused Activation: RELU, RELU6, RELU_N1_TO_1, SIGMOID, TANH, NONE
122 - MAX_POOL_3D, Supported Fused Activation: RELU, RELU6, RELU_N1_TO_1, SIGMOID, SIGN_BIT, TANH, NONE
154 - RELU
/external/pytorch/test/
Dtest_fx_passes.py43 relu = add_6.relu()
45 return add_4, add_6, relu
69 relu_1 = add_2.relu()
72 relu_2 = add_4.relu()
81 relu_1 = add_1.relu() # blocked by this
105 relu = add.relu()
108 return relu, add_1
115 relu = add.relu()
117 relu_1 = add.relu()
118 return relu, relu_1
[all …]

12345678910>>...61