Home
last modified time | relevance | path

Searched full:conv2d (Results 1 – 25 of 928) sorted by relevance

12345678910>>...38

/external/pytorch/torch/ao/pruning/_experimental/pruner/
Dbase_structured_sparsifier.py33 nn.Conv2d,
100 …Returns the patterns for conv2d / linear conversion for each element in the activation functions/m…
109 # conv2d -> conv2d
110 (nn.Conv2d, "output"): prune_conv2d,
111 (nn.Conv2d, nn.Conv2d): prune_conv2d_conv2d,
128 # conv2d -> activation -> conv2d
129 (nn.Conv2d, activation, nn.Conv2d): prune_conv2d_activation_conv2d,
130 # conv2d -> activation -> pool -> conv2d
132 nn.Conv2d,
135 nn.Conv2d,
[all …]
Dprune_functions.py3 Collection of conversion functions for linear / conv2d structured pruning
43 if isinstance(next_layer, nn.Conv2d): # checking for Conv2d
45 # involves more steps since the Conv2d scaling weight has extra dimensions,
171 # CONV2D
172 def _prune_conv2d_helper(conv2d: nn.Conv2d) -> Tensor: argument
173 parametrization_dict = cast(nn.ModuleDict, conv2d.parametrizations)
180 parametrize.remove_parametrizations(conv2d, "weight", leave_parametrized=True)
181 conv2d.weight = nn.Parameter(conv2d.weight[mask]) # type: ignore[possibly-undefined]
182 conv2d.out_channels = conv2d.weight.shape[0]
184 _remove_bias_handles(conv2d)
[all …]
DREADME.md85 - conv2d -> conv2d
86 - conv2d -> activation -> conv2d
87 - conv2d -> activation -> pool -> conv2d
88 - conv2d -> pool -> activation -> conv2d
89 - conv2d -> adaptive pool -> flatten -> linear
226 If you're working with linear/conv2d layers, it's very probable that you just need to add an entry …
234 c1: nn.Conv2d,
237 c2: nn.Conv2d,
242 my_patterns = {(nn.Conv2d, nn.MaxPool2d, nn.ReLU, nn.Conv2d): prune_conv2d_activation_conv2d}
/external/pytorch/torch/testing/_internal/
Dcommon_pruning.py150 r"""Model with only Conv2d layers, all without bias, some in a Sequential and some following.
151 Used to test pruned Conv2d-Conv2d fusion."""
156 nn.Conv2d(1, 32, 3, 1, bias=False),
157 nn.Conv2d(32, 64, 3, 1, bias=False),
159 self.conv2d1 = nn.Conv2d(64, 48, 3, 1, bias=False)
160 self.conv2d2 = nn.Conv2d(48, 52, 3, 1, bias=False)
170 r"""Model with only Conv2d layers, some with bias, some in a Sequential and some outside.
171 Used to test pruned Conv2d-Bias-Conv2d fusion."""
176 nn.Conv2d(1, 32, 3, 1, bias=True),
177 nn.Conv2d(32, 32, 3, 1, bias=True),
[all …]
/external/mesa3d/src/etnaviv/ci/
Detnaviv-vipnano-fails.txt260 Conv2D.Op/input_size_112_weight_size_1_input_channels_128_output_channels_256_stride_1_padding_same…
261 Conv2D.Op/input_size_112_weight_size_5_input_channels_256_output_channels_120_stride_1_padding_same…
262 Conv2D.Op/input_size_112_weight_size_5_input_channels_256_output_channels_120_stride_1_padding_same…
263 Conv2D.Op/input_size_5_weight_size_5_input_channels_1_output_channels_120_stride_1_padding_same_0_i…
264 Conv2D.Op/input_size_5_weight_size_5_input_channels_1_output_channels_120_stride_1_padding_same_1_i…
265 Conv2D.Op/input_size_5_weight_size_5_input_channels_1_output_channels_128_stride_1_padding_same_0_i…
266 Conv2D.Op/input_size_5_weight_size_5_input_channels_1_output_channels_128_stride_1_padding_same_1_i…
267 Conv2D.Op/input_size_5_weight_size_5_input_channels_1_output_channels_160_stride_1_padding_same_0_i…
268 Conv2D.Op/input_size_5_weight_size_5_input_channels_1_output_channels_160_stride_1_padding_same_1_i…
269 Conv2D.Op/input_size_5_weight_size_5_input_channels_1_output_channels_1_stride_1_padding_same_1_is_…
[all …]
/external/tensorflow/tensorflow/core/grappler/optimizers/
Dgeneric_layout_optimizer_transposer_test.cc111 auto conv2d = ops::Conv2D( in SimpleConv2D() local
112 scope->WithOpName("conv2d").WithDevice("/device:GPU:0"), input, filter, in SimpleConv2D()
113 {1, kStride1, kStride2, 1}, "SAME", ops::Conv2D::DataFormat(kSrcFormat)); in SimpleConv2D()
115 return conv2d; in SimpleConv2D()
121 auto conv2d = SimpleConv2D(&scope, data_type); in CreateSimpleConv2DGraph() local
122 auto output = ops::Identity(scope.WithOpName("output"), conv2d); in CreateSimpleConv2DGraph()
296 Output conv2d = ops::Conv2D( in CreateSimpleAddN() local
297 scope.WithOpName("conv2d").WithDevice("/device:GPU:0"), input, filter, in CreateSimpleAddN()
298 {1, 2, 4, 1}, "SAME", ops::Conv2D::DataFormat(kSrcFormat)); in CreateSimpleAddN()
306 {a, b, c, conv2d}); in CreateSimpleAddN()
[all …]
/external/executorch/backends/xnnpack/test/ops/
Dconv2d.py20 class Conv2d(torch.nn.Module): class
44 self.conv = torch.nn.Conv2d(
70 self.first = torch.nn.Conv2d(
77 self.second = torch.nn.Conv2d(
96 self.conv1 = torch.nn.Conv2d(
106 self.conv2 = torch.nn.Conv2d(
131 self.conv = torch.nn.Conv2d(
168 .check_count({"torch.ops.aten.conv2d": conv_count})
184 self._test(Conv2d(bias=has_bias, dtype=torch.float16))
188 self._test(Conv2d(bias=has_bias))
[all …]
/external/pytorch/test/quantization/pt2e/
Dtest_duplicate_dq.py35 self.conv = torch.nn.Conv2d(3, 3, 3)
51 self.conv1 = torch.nn.Conv2d(3, 3, 3)
52 self.conv2 = torch.nn.Conv2d(3, 3, 1)
69 self.conv1 = torch.nn.Conv2d(3, 3, 3)
70 self.conv2 = torch.nn.Conv2d(3, 3, 1)
124 conv2d -> avgpool -> hardtanh -> linear
125 Check quantization tags on conv2d, avgpool and linear are correctly set
151 conv2d -> conv2d -> add
159 first conv2d is fed to next conv2d, add, and view_copy + linear.
187 conv2d -> conv2d -> add
[all …]
Dtest_graph_utils.py21 self.conv1 = torch.nn.Conv2d(3, 3, 3)
23 self.conv2 = torch.nn.Conv2d(3, 3, 3)
41 m, [torch.nn.Conv2d, torch.nn.BatchNorm2d]
45 m, [torch.nn.Conv2d, torch.nn.BatchNorm2d, torch.nn.ReLU]
53 torch.nn.Conv2d,
56 torch.nn.functional.conv2d,
68 self.conv2 = torch.nn.Conv2d(3, 3, 3)
85 m, [torch.nn.Conv2d, torch.nn.BatchNorm2d]
89 m, [torch.nn.BatchNorm2d, torch.nn.Conv2d]
102 self.conv = torch.nn.Conv2d(3, 3, 3)
[all …]
/external/pytorch/torch/ao/quantization/
Dfuser_method_mappings.py27 conv: Module instance of type conv2d/conv3d
32 >>> m1 = nn.Conv2d(10, 20, 3)
43 nn.Conv2d: nni.ConvBn2d,
50 ), "Output channel of Conv2d must match num_features of BatchNorm2d"
72 conv: Module instance of type conv2d/conv3d
77 >>> m1 = nn.Conv2d(10, 20, 3)
90 nn.Conv2d: nni.ConvBnReLU2d,
108 nn.Conv2d: nni.ConvReLU2d,
197 (nn.Conv2d, nn.BatchNorm2d): fuse_conv_bn,
198 (nn.Conv2d, nn.BatchNorm2d, nn.ReLU): fuse_conv_bn_relu,
[all …]
/external/pytorch/torch/ao/quantization/backend_config/
Donednn.py122 # (1) Conv2d + Add
124 # conv2d Y
129 # conv2d conv2d
151 # conv2d
192 (add_op, (nn.BatchNorm2d, nn.Conv2d), MatchAllNode)
205 (add_op, nn.Conv2d, MatchAllNode)
215 # Y conv2d
237 # conv2d
278 (add_op, MatchAllNode, (nn.BatchNorm2d, nn.Conv2d))
291 (add_op, MatchAllNode, nn.Conv2d)
[all …]
/external/executorch/backends/arm/test/ops/
Dtest_conv_combos.py40 # 1. 1x1 CONV2d + ReLU6 (Pointwise)
41 self.pointwise_conv2d = torch.nn.Conv2d(
48 self.depthwise_conv2d = torch.nn.Conv2d(
57 # 3. Linear 1x1 Conv2d
58 self.pointwise_conv2d_linear = torch.nn.Conv2d(
67 # 1x1 CONV2d + ReLU6 (Pointwise)
77 # Linear 1x1 Conv2d
94 self.conv2d = torch.nn.Conv2d(
104 x = self.conv2d(x)
117 self.conv2d = torch.nn.Conv2d(
[all …]
Dtest_conv2d.py19 class Conv2d(torch.nn.Module): class
91 torch.nn.Conv2d(
114 conv2d_2x2_3x2x40x40_nobias = Conv2d(
126 conv2d_3x3_1x3x256x256_st1 = Conv2d(
137 conv2d_3x3_1x3x12x12_st2_pd1 = Conv2d(
148 conv2d_1x1_1x2x128x128_st1 = Conv2d(
159 conv2d_2x2_1x1x14x13_st2 = Conv2d(
170 conv2d_5x5_3x2x128x128_st1 = Conv2d(
181 conv2d_3x3_1x3x224x224_st2_pd1 = Conv2d(
192 conv2d_5x5_1x3x14x15_st3_pd1 = Conv2d(
[all …]
/external/pytorch/test/fx/
Dtest_source_matcher_utils.py79 self.conv1 = torch.nn.Conv2d(
82 self.conv2 = torch.nn.Conv2d(
85 self.conv3 = torch.nn.Conv2d(
105 gm.graph, [torch.nn.Conv2d, torch.nn.ReLU, torch.nn.MaxPool2d]
109 self.assertEqual(len(module_partitions[torch.nn.Conv2d]), 3)
115 module_partitions[torch.nn.Conv2d][0],
121 module_partitions[torch.nn.Conv2d][1],
127 module_partitions[torch.nn.Conv2d][2],
155 return torch.nn.functional.conv2d(
182 gm.graph, [torch.nn.functional.conv2d]
[all …]
/external/pytorch/test/inductor/
Dtest_mkldnn_pattern_matcher.py88 # while testing conv2d/3d/deconv2d
276 self.conv = torch.nn.Conv2d(3, 16, kernel_size=3, stride=1)
509 self.conv1 = torch.nn.Conv2d(3, 16, kernel_size=3, stride=1)
510 self.conv2 = torch.nn.Conv2d(3, 16, kernel_size=3, stride=1)
666 self.conv = torch.nn.Conv2d(3, 128, kernel_size=3, stride=1)
667 self.conv2 = torch.nn.Conv2d(128, 128, kernel_size=3, stride=1)
676 # 1. Dequant-Conv2D pattern matched in QConv2D weight prepack * 1
701 This testcase will quantize a single Conv2d module.
711 This testcase will quantize a single Conv2d module with int8_mixed_bf16 quantization.
727 self.conv = torch.nn.Conv2d(3, 128, kernel_size=3, stride=1)
[all …]
/external/executorch/exir/backend/test/
Dtest_graph_partition.py90 self.conv1 = torch.nn.Conv2d(32, 32, 1)
91 self.conv2 = torch.nn.Conv2d(32, 32, 1)
92 self.conv3 = torch.nn.Conv2d(32, 32, 1)
107 "torch.nn.modules.conv.Conv2d",
122 self.conv1 = torch.nn.Conv2d(32, 32, 1)
123 self.conv2 = torch.nn.Conv2d(32, 32, 1)
124 self.conv3 = torch.nn.Conv2d(32, 32, 1)
139 "torch.nn.modules.conv.Conv2d",
192 self.conv1 = torch.nn.Conv2d(32, 32, 1)
193 self.conv2 = torch.nn.Conv2d(32, 32, 1)
[all …]
/external/executorch/backends/arm/test/misc/
Dtest_dim_order_guards.py16 class Conv2D(torch.nn.Module): class
20 self.conv2d = torch.nn.Conv2d(in_channels=2, out_channels=3, kernel_size=(3, 3))
23 return self.conv2d(x.to(memory_format=torch.channels_last))
32 module = Conv2D()
46 module = Conv2D()
/external/ComputeLibrary/examples/
Dgraph_inception_v4.cpp86 .set_name("Conv2d_1a_3x3/Conv2D") in do_setup()
98 .set_name("Conv2d_2a_3x3/Conv2D") in do_setup()
110 .set_name("Conv2d_2b_3x3/Conv2D") in do_setup()
210 .set_name("Mixed_3a/Branch_1/Conv2d_0a_3x3/Conv2D") in get_mixed_3a()
230 .set_name("Mixed_4a/Branch_0/Conv2d_0a_1x1/Conv2D") in get_mixed_4a()
241 .set_name("Mixed_4a/Branch_0/Conv2d_1a_3x3/Conv2D") in get_mixed_4a()
254 .set_name("Mixed_4a/Branch_1/Conv2d_0a_1x1/Conv2D") in get_mixed_4a()
265 .set_name("Mixed_4a/Branch_1/Conv2d_0b_1x7/Conv2D") in get_mixed_4a()
276 .set_name("Mixed_4a/Branch_1/Conv2d_0c_7x1/Conv2D") in get_mixed_4a()
287 .set_name("Mixed_4a/Branch_1/Conv2d_1a_3x3/Conv2D") in get_mixed_4a()
[all …]
/external/tensorflow/tensorflow/core/kernels/
Dconv_ops_benchmark_test.cc46 Node* conv2d; member
51 Node* conv2d; member
57 Node* conv2d; member
64 Node* conv2d; member
70 Node* conv2d; member
82 // Creates a simple Tensorflow graph with single Conv2D node.
84 static Conv2DGraph Conv2D(int batch, int height, int width, int in_depth, in Conv2D() function
100 Node* conv2d; in Conv2D() local
104 : NodeBuilder(graph->NewName("conv"), "Conv2D"); in Conv2D()
111 .Finalize(graph, &conv2d)); in Conv2D()
[all …]
/external/pytorch/torch/ao/nn/intrinsic/quantized/modules/
Dconv_add.py12 class ConvAdd2d(nnq.Conv2d):
14 A ConvAdd2d module is a fused module of Conv2d and Add
16 We adopt the same interface as :class:`torch.ao.nn.quantized.Conv2d`.
19 Same as torch.ao.nn.quantized.Conv2d
80 class ConvAddReLU2d(nnq.Conv2d):
82 A ConvAddReLU2d module is a fused module of Conv2d, Add and Relu
84 We adopt the same interface as :class:`torch.ao.nn.quantized.Conv2d`.
87 Same as torch.ao.nn.quantized.Conv2d
/external/pytorch/test/quantization/jit/
Dtest_quantize_jit.py91 self.conv = torch.nn.Conv2d(3, 5, 3).float()
122 "aten::conv2d"
133 conv_module = {2: torch.nn.Conv2d, 3: torch.nn.Conv3d}
176 conv_module = {2: torch.nn.Conv2d, 3: torch.nn.Conv3d}
220 conv_module = {2: torch.nn.Conv2d, 3: torch.nn.Conv3d}
263 conv_module = {2: torch.nn.Conv2d, 3: torch.nn.Conv3d}
323 # This test case attempt to try combinations of conv2d/conv3d with bias/nobias
328 conv_module = {2: torch.nn.Conv2d, 3: torch.nn.Conv3d}
447 self.conv = torch.nn.Conv2d(3, 5, 3)
480 self.conv = torch.nn.Conv2d(3, 5, 3)
[all …]
/external/tensorflow/tensorflow/python/layers/
Dconvolutional.py22 Conv2D = convolutional.Conv2D variable
23 conv2d = convolutional.conv2d variable
38 Convolution2D = Conv2D
44 convolution2d = conv2d
/external/pytorch/docs/source/
Dmobile_optimizer.rst15Conv2D + BatchNorm fusion** (blocklisting option `mobile_optimizer.MobileOptimizerType.CONV_BN_FUS…
16 …ile`` pass rewrites the graph to replace ``Conv2D/Linear`` with 1) op that pre-packs weight for XN…
17 …th previous ``Conv2D`` or ``linear`` op in XNNPACK. This pass rewrites graph by finding ``ReLU/har…
/external/pytorch/benchmarks/operator_benchmark/pt/
Dconv_test.py50 Microbenchmarks for Conv2d, ConvTranspose2d, and Conv2dPointwise operators.
57 self.conv2d = nn.Conv2d(
60 self.set_module_name("Conv2d")
63 return self.conv2d(input)
82 self.conv2d = nn.Conv2d(IC, OC, 1, stride=stride, groups=G, padding=pad).to(
88 return self.conv2d(input)
/external/tensorflow/tensorflow/lite/toco/graph_transformations/
Didentify_dilated_conv.cc28 // SpaceToBatchND -> Conv2D -> BatchToSpaceND
30 // This method was common before Conv2D fully supported dilated convolution in
39 // SpaceToBatchND -> Expand -> Conv2D -> Squeeze -> BatchToSpaceND -> BiasAdd
41 // Pad -> SpaceToBatchND -> Expand -> Conv2D -> Squeeze -> BatchToSpaceND ->
44 // SpaceToBatchND -> Expand -> Conv2D -> Squeeze -> Pad -> BatchToSpaceND ->
47 // SpaceToBatchND -> Expand -> Conv2D -> Squeeze -> BiasAdd -> BatchToSpaceND
49 // SpaceToBatchND -> Conv2D -> Pad -> BatchToSpaceND -> BiasAdd
51 // SpaceToBatchND -> Conv2D -> BatchToSpaceND -> BiasAdd
55 // WaveNet) to the 4D arrays that Conv2D requires. Padding and BiasAdd are
126 // before STB Op like below Pad -> SpaceToBatchND -> Expand -> Conv2D -> in ResolveDilatedConv()
[all …]

12345678910>>...38