Home
last modified time | relevance | path

Searched refs:xnn_params (Results 1 – 25 of 38) sorted by relevance

12

/external/XNNPACK/src/
Dinit.c56 struct xnn_parameters xnn_params = { variable
76 xnn_params.q8.gemm = (struct gemm_parameters) { in init()
84 xnn_params.q8.dwconv[0] = (struct dwconv_parameters) { in init()
90 xnn_params.q8.dwconv[0] = (struct dwconv_parameters) { in init()
96 xnn_params.q8.avgpool = (struct avgpool_parameters) { in init()
102 xnn_params.q8.gavgpool = (struct gavgpool_parameters) { in init()
107 xnn_params.q8.vadd = (xnn_vadd_ukernel_function) xnn_q8_vadd_ukernel__neon; in init()
112 xnn_params.u8.maxpool = (struct maxpool_parameters) { in init()
117 xnn_params.u8.clamp = (xnn_univector_ukernel_function) xnn_u8_clamp_ukernel__neon; in init()
118 xnn_params.u8.rmax = xnn_u8_rmax_ukernel__neon; in init()
[all …]
Dsoftmax-nc.c35 if (!xnn_params.initialized) { in xnn_create_softmax_nc_q8()
146 if (!xnn_params.initialized) { in xnn_setup_softmax_nc_q8()
167 .rmax_ukernel = xnn_params.u8.rmax, in xnn_setup_softmax_nc_q8()
168 .lut_norm_ukernel = xnn_params.u8.lut32norm, in xnn_setup_softmax_nc_q8()
188 if (!xnn_params.initialized) { in xnn_create_softmax_nc_f32()
255 if (!xnn_params.initialized) { in xnn_setup_softmax_nc_f32()
275 .rmax_ukernel = xnn_params.f32.rmax, in xnn_setup_softmax_nc_f32()
276 .raddstoreexpminusmax_ukernel = xnn_params.f32.raddstoreexpminusmax, in xnn_setup_softmax_nc_f32()
277 .vmulc_ukernel = xnn_params.f32.vmul.opc_ukernel, in xnn_setup_softmax_nc_f32()
Dfully-connected-nc.c47 if (!xnn_params.initialized) { in xnn_create_fully_connected_nc_q8()
132 const uint32_t nr = xnn_params.q8.gemm.nr; in xnn_create_fully_connected_nc_q8()
133 const uint32_t kr = UINT32_C(1) << xnn_params.q8.gemm.log2_kr; in xnn_create_fully_connected_nc_q8()
178 .default_function = xnn_params.q8.gemm.gemm, in xnn_create_fully_connected_nc_q8()
179 .mr = xnn_params.q8.gemm.mr, in xnn_create_fully_connected_nc_q8()
209 if (!xnn_params.initialized) { in xnn_create_fully_connected_nc_f32()
273 const uint32_t nr = xnn_params.f32.gemm.nr; in xnn_create_fully_connected_nc_f32()
274 const uint32_t kr = UINT32_C(1) << xnn_params.f32.gemm.log2_kr; in xnn_create_fully_connected_nc_f32()
275 const uint32_t sr = UINT32_C(1) << xnn_params.f32.gemm.log2_sr; in xnn_create_fully_connected_nc_f32()
313 .default_function = xnn_params.f32.gemm.gemm, in xnn_create_fully_connected_nc_f32()
[all …]
Dconvolution-nchw.c61 if (!xnn_params.initialized) { in xnn_create_convolution2d_nchw_f32()
157 if (is_1x1 && !any_padding && !nhwc_input && groups == 1 && xnn_params.f32.spmm.ukernel != NULL) { in xnn_create_convolution2d_nchw_f32()
161 …nhwc_input && groups == 1 && xnn_params.f32.hwc2spchw_dconv3x3c3s2.ukernel_with_symm_padding != NU… in xnn_create_convolution2d_nchw_f32()
166 …!nhwc_input && group_input_channels == 1 && group_output_channels == 1 && xnn_params.f32.spchw_dwc… in xnn_create_convolution2d_nchw_f32()
169 dwconv_parameters = &xnn_params.f32.spchw_dwconv3x3; in xnn_create_convolution2d_nchw_f32()
172 …!nhwc_input && group_input_channels == 1 && group_output_channels == 1 && xnn_params.f32.spchw_dwc… in xnn_create_convolution2d_nchw_f32()
175 dwconv_parameters = &xnn_params.f32.spchw_dwconv3x3s2; in xnn_create_convolution2d_nchw_f32()
178 …!nhwc_input && group_input_channels == 1 && group_output_channels == 1 && xnn_params.f32.spchw_dwc… in xnn_create_convolution2d_nchw_f32()
181 dwconv_parameters = &xnn_params.f32.spchw_dwconv5x5; in xnn_create_convolution2d_nchw_f32()
184 …!nhwc_input && group_input_channels == 1 && group_output_channels == 1 && xnn_params.f32.spchw_dwc… in xnn_create_convolution2d_nchw_f32()
[all …]
Daverage-pooling-nhwc.c60 if (!xnn_params.initialized) { in xnn_create_average_pooling2d_nhwc_q8()
165 const uint32_t mr = xnn_params.q8.avgpool.mr; in xnn_create_average_pooling2d_nhwc_q8()
166 const uint32_t qr = xnn_params.q8.avgpool.qr; in xnn_create_average_pooling2d_nhwc_q8()
232 if (!xnn_params.initialized) { in xnn_create_average_pooling2d_nhwc_f32()
313 const uint32_t mr = xnn_params.f32.avgpool.mr; in xnn_create_average_pooling2d_nhwc_f32()
314 const uint32_t qr = xnn_params.f32.avgpool.qr; in xnn_create_average_pooling2d_nhwc_f32()
376 if (!xnn_params.initialized) { in xnn_setup_average_pooling2d_nhwc_q8()
428 const uint32_t mr = xnn_params.q8.avgpool.mr; in xnn_setup_average_pooling2d_nhwc_q8()
444 const uint32_t qr = xnn_params.q8.avgpool.qr; in xnn_setup_average_pooling2d_nhwc_q8()
473 average_pooling_op->context.average_pooling.unipass_ukernel = xnn_params.q8.avgpool.up; in xnn_setup_average_pooling2d_nhwc_q8()
[all …]
Dglobal-average-pooling-nwc.c39 if (!xnn_params.initialized) { in xnn_create_global_average_pooling_nwc_q8()
155 if (!xnn_params.initialized) { in xnn_create_global_average_pooling_nwc_f32()
253 if (!xnn_params.initialized) { in xnn_setup_global_average_pooling_nwc_q8()
297 if (width <= xnn_params.q8.gavgpool.mr) { in xnn_setup_global_average_pooling_nwc_q8()
299 …global_average_pooling_op->context.global_average_pooling_nwc.unipass_ukernel = xnn_params.q8.gavg… in xnn_setup_global_average_pooling_nwc_q8()
302 …global_average_pooling_op->context.global_average_pooling_nwc.multipass_ukernel = xnn_params.q8.ga… in xnn_setup_global_average_pooling_nwc_q8()
323 if (!xnn_params.initialized) { in xnn_setup_global_average_pooling_nwc_f32()
361 if (width <= xnn_params.f32.gavgpool.mr) { in xnn_setup_global_average_pooling_nwc_f32()
363 …global_average_pooling_op->context.global_average_pooling_nwc.unipass_ukernel = xnn_params.f32.gav… in xnn_setup_global_average_pooling_nwc_f32()
366 …global_average_pooling_op->context.global_average_pooling_nwc.multipass_ukernel = xnn_params.f32.g… in xnn_setup_global_average_pooling_nwc_f32()
Dsigmoid-nc.c37 if (!xnn_params.initialized) { in xnn_create_sigmoid_nc_q8()
160 if (!xnn_params.initialized) { in xnn_create_sigmoid_nc_f32()
191 if (xnn_params.f32.sigmoid == NULL) { in xnn_create_sigmoid_nc_f32()
236 if (!xnn_params.initialized) { in xnn_setup_sigmoid_nc_q8()
261 .ukernel = xnn_params.x8.lut, in xnn_setup_sigmoid_nc_q8()
275 .ukernel = xnn_params.x8.lut, in xnn_setup_sigmoid_nc_q8()
300 if (!xnn_params.initialized) { in xnn_setup_sigmoid_nc_f32()
320 .ukernel = xnn_params.f32.sigmoid, in xnn_setup_sigmoid_nc_f32()
333 .ukernel = xnn_params.f32.sigmoid, in xnn_setup_sigmoid_nc_f32()
Dclamp-nc.c34 if (!xnn_params.initialized) { in xnn_create_clamp_nc_u8()
108 if (!xnn_params.initialized) { in xnn_create_clamp_nc_f32()
195 if (!xnn_params.initialized) { in xnn_setup_clamp_nc_u8()
215 .ukernel = xnn_params.u8.clamp, in xnn_setup_clamp_nc_u8()
229 .ukernel = xnn_params.u8.clamp, in xnn_setup_clamp_nc_u8()
255 if (!xnn_params.initialized) { in xnn_setup_clamp_nc_f32()
275 .ukernel = xnn_params.f32.clamp, in xnn_setup_clamp_nc_f32()
289 .ukernel = xnn_params.f32.clamp, in xnn_setup_clamp_nc_f32()
Dconvolution-nhwc.c93 if (!xnn_params.initialized) { in xnn_create_convolution2d_nhwc_q8()
235 …(dwconv_parameters = find_dwigemm_ukernel(kernel_size, xnn_params.q8.dwconv, XNN_MAX_Q8_DWCONV_UKE… in xnn_create_convolution2d_nhwc_q8()
285 const uint32_t nr = xnn_params.q8.gemm.nr; in xnn_create_convolution2d_nhwc_q8()
286 const uint32_t kr = UINT32_C(1) << xnn_params.q8.gemm.log2_kr; in xnn_create_convolution2d_nhwc_q8()
307 .mr = xnn_params.q8.gemm.mr, in xnn_create_convolution2d_nhwc_q8()
310 .default_function = xnn_params.q8.gemm.gemm, in xnn_create_convolution2d_nhwc_q8()
328 .mr = xnn_params.q8.gemm.mr, in xnn_create_convolution2d_nhwc_q8()
331 .default_function = xnn_params.q8.gemm.igemm, in xnn_create_convolution2d_nhwc_q8()
422 if (!xnn_params.initialized) { in xnn_create_convolution2d_nhwc_f32()
547 … find_dwigemm_ukernel(kernel_size, xnn_params.f32.dwconv, XNN_MAX_F32_DWCONV_UKERNELS)) != NULL) in xnn_create_convolution2d_nhwc_f32()
[all …]
Dadd-nc.c42 if (!xnn_params.initialized) { in xnn_create_add_nc_q8()
167 if (!xnn_params.initialized) { in xnn_create_add_nc_f32()
264 if (!xnn_params.initialized) { in xnn_setup_add_nc_q8()
285 .ukernel = xnn_params.q8.vadd, in xnn_setup_add_nc_q8()
301 .ukernel = xnn_params.q8.vadd, in xnn_setup_add_nc_q8()
327 if (!xnn_params.initialized) { in xnn_setup_add_nc_f32()
348 .ukernel = xnn_params.f32.vadd.op_ukernel, in xnn_setup_add_nc_f32()
364 .ukernel = xnn_params.f32.vadd.op_ukernel, in xnn_setup_add_nc_f32()
Dglobal-average-pooling-ncw.c30 if (!xnn_params.initialized) { in xnn_create_global_average_pooling_ncw_f32()
65 if (xnn_params.f32.spchw_gavgpool.ukernel == NULL) { in xnn_create_global_average_pooling_ncw_f32()
110 if (!xnn_params.initialized) { in xnn_setup_global_average_pooling_ncw_f32()
136 .ukernel = xnn_params.f32.spchw_gavgpool.ukernel, in xnn_setup_global_average_pooling_ncw_f32()
Dchannel-pad-nc.c32 if (!xnn_params.initialized) { in xnn_create_channel_pad_nc_x32()
104 if (!xnn_params.initialized) { in xnn_setup_channel_pad_nc_x32()
127 .ukernel = xnn_params.x32.pad.ukernel, in xnn_setup_channel_pad_nc_x32()
133 channel_pad_op->compute.tile[0] = xnn_params.x32.pad.mr; in xnn_setup_channel_pad_nc_x32()
Dhardswish-nc.c29 if (!xnn_params.initialized) { in xnn_create_hardswish_nc_f32()
97 if (!xnn_params.initialized) { in xnn_setup_hardswish_nc_f32()
117 .ukernel = xnn_params.f32.hswish, in xnn_setup_hardswish_nc_f32()
131 .ukernel = xnn_params.f32.hswish, in xnn_setup_hardswish_nc_f32()
Dbinary-elementwise-nd.c30 if (!xnn_params.initialized) { in create_binary_elementwise_nd_f32()
156 if (!xnn_params.initialized) { in setup_binary_elementwise_nd_f32()
322 &xnn_params.f32.vadd, in xnn_setup_add_nd_f32()
342 &xnn_params.f32.vdiv, in xnn_setup_divide_nd_f32()
362 &xnn_params.f32.vmax, in xnn_setup_maximum_nd_f32()
382 &xnn_params.f32.vmin, in xnn_setup_minimum_nd_f32()
402 &xnn_params.f32.vmul, in xnn_setup_multiply_nd_f32()
422 &xnn_params.f32.vsub, in xnn_setup_subtract_nd_f32()
Dprelu-nc.c33 if (!xnn_params.initialized) { in xnn_create_prelu_nc_f32()
117 if (!xnn_params.initialized) { in xnn_setup_prelu_nc_f32()
135 .ukernel = xnn_params.f32.prelu.ukernel, in xnn_setup_prelu_nc_f32()
145 const uint32_t row_tile = xnn_params.f32.prelu.row_tile; in xnn_setup_prelu_nc_f32()
Ddeconvolution-nhwc.c73 if (!xnn_params.initialized) { in xnn_create_deconvolution2d_nhwc_q8()
190 const uint32_t mr = xnn_params.q8.gemm.mr; in xnn_create_deconvolution2d_nhwc_q8()
191 const uint32_t nr = xnn_params.q8.gemm.nr; in xnn_create_deconvolution2d_nhwc_q8()
192 const uint32_t kr = UINT32_C(1) << xnn_params.q8.gemm.log2_kr; in xnn_create_deconvolution2d_nhwc_q8()
193 const xnn_igemm_ukernel_function ukernel_function = xnn_params.q8.gemm.igemm; in xnn_create_deconvolution2d_nhwc_q8()
331 if (!xnn_params.initialized) { in xnn_create_deconvolution2d_nhwc_f32()
428 uint32_t mr = xnn_params.f32.gemm.mr; in xnn_create_deconvolution2d_nhwc_f32()
429 uint32_t nr = xnn_params.f32.gemm.nr; in xnn_create_deconvolution2d_nhwc_f32()
430 uint32_t kr = UINT32_C(1) << xnn_params.f32.gemm.log2_kr; in xnn_create_deconvolution2d_nhwc_f32()
431 uint32_t sr = UINT32_C(1) << xnn_params.f32.gemm.log2_sr; in xnn_create_deconvolution2d_nhwc_f32()
[all …]
Dsubgraph.c27 if (!xnn_params.initialized) { in xnn_create_subgraph()
136 if (!xnn_params.initialized) { in xnn_define_convolution_2d()
288 if (!xnn_params.initialized) { in xnn_define_depthwise_convolution_2d()
420 if (!xnn_params.initialized) { in xnn_define_add2()
493 if (!xnn_params.initialized) { in xnn_define_multiply2()
564 if (!xnn_params.initialized) { in xnn_define_prelu()
614 if (!xnn_params.initialized) { in xnn_define_clamp()
656 if (!xnn_params.initialized) { in xnn_define_hardswish()
696 if (!xnn_params.initialized) { in xnn_define_sigmoid()
736 if (!xnn_params.initialized) { in xnn_define_softmax()
/external/XNNPACK/test/
Dprelu-nc.cc14 …r (size_t channels = 1; channels < xnn_params.f32.prelu.channel_tile * 10; channels += std::max<si… in TEST()
24 …r (size_t channels = 1; channels < xnn_params.f32.prelu.channel_tile * 10; channels += std::max<si… in TEST()
35 …r (size_t channels = 1; channels < xnn_params.f32.prelu.channel_tile * 10; channels += std::max<si… in TEST()
46 …r (size_t channels = 1; channels < xnn_params.f32.prelu.channel_tile * 10; channels += std::max<si… in TEST()
48 .batch_size(xnn_params.f32.prelu.row_tile) in TEST()
56 …r (size_t channels = 1; channels < xnn_params.f32.prelu.channel_tile * 10; channels += std::max<si… in TEST()
58 .batch_size(xnn_params.f32.prelu.row_tile) in TEST()
67 …r (size_t channels = 1; channels < xnn_params.f32.prelu.channel_tile * 10; channels += std::max<si… in TEST()
69 .batch_size(xnn_params.f32.prelu.row_tile) in TEST()
78 …r (size_t channels = 1; channels < xnn_params.f32.prelu.channel_tile * 10; channels += std::max<si… in TEST()
[all …]
Dmax-pooling-nhwc.cc19 for (size_t pool_size = 2; pool_size <= xnn_params.u8.maxpool.mr; pool_size++) { in TEST()
35 for (size_t pool_size = 3; pool_size <= xnn_params.u8.maxpool.mr; pool_size++) { in TEST()
57 for (size_t pool_size = 2; pool_size <= xnn_params.u8.maxpool.mr; pool_size++) { in TEST()
74 for (size_t pool_size = 2; pool_size <= xnn_params.u8.maxpool.mr; pool_size++) { in TEST()
91 for (size_t pool_size = 2; pool_size <= xnn_params.u8.maxpool.mr; pool_size++) { in TEST()
107 for (size_t pool_size = 2; pool_size <= xnn_params.u8.maxpool.mr; pool_size++) { in TEST()
129 for (size_t pool_size = 2; pool_size <= xnn_params.u8.maxpool.mr; pool_size++) { in TEST()
146 for (size_t pool_size = 2; pool_size <= xnn_params.u8.maxpool.mr; pool_size++) { in TEST()
163 for (size_t pool_size = 2; pool_size <= xnn_params.u8.maxpool.mr; pool_size++) { in TEST()
189 for (size_t pool_size = 2; pool_size <= xnn_params.u8.maxpool.mr; pool_size++) { in TEST()
[all …]
Dglobal-average-pooling-nwc.cc19 for (size_t width = 1; width <= xnn_params.q8.gavgpool.mr; width++) { in TEST()
32 for (size_t width = 1; width <= xnn_params.q8.gavgpool.mr; width++) { in TEST()
46 for (size_t width = 1; width <= xnn_params.q8.gavgpool.mr; width++) { in TEST()
62 for (size_t width = 1; width <= xnn_params.q8.gavgpool.mr; width++) { in TEST()
78 for (size_t width = 1; width <= xnn_params.q8.gavgpool.mr; width++) { in TEST()
94 for (size_t width = 1; width <= xnn_params.q8.gavgpool.mr; width++) { in TEST()
110 for (size_t width = 1; width <= xnn_params.q8.gavgpool.mr; width++) { in TEST()
124 for (size_t width = 1; width <= xnn_params.q8.gavgpool.mr; width++) { in TEST()
138 … for (size_t width = xnn_params.q8.gavgpool.mr; width <= 4 * xnn_params.q8.gavgpool.mr; width++) { in TEST()
151 … for (size_t width = xnn_params.q8.gavgpool.mr; width <= 4 * xnn_params.q8.gavgpool.mr; width++) { in TEST()
[all …]
Ddeconvolution-nhwc.cc27 .group_output_channels(xnn_params.q8.gemm.nr * 2 + 3)
39 .group_output_channels(xnn_params.q8.gemm.nr * 2 + 3)
52 .group_output_channels(xnn_params.q8.gemm.nr * 2 + 3)
65 .group_output_channels(xnn_params.q8.gemm.nr * 2 + 3)
73 …for (size_t output_channels = 1; output_channels <= xnn_params.q8.gemm.nr * 2; output_channels *= …
90 .group_output_channels(xnn_params.q8.gemm.nr * 2 + 3)
102 .group_output_channels(xnn_params.q8.gemm.nr * 2 + 3)
103 .output_pixel_stride(xnn_params.q8.gemm.nr * 2 + 13)
114 .group_output_channels(xnn_params.q8.gemm.nr * 2 + 3)
126 .group_output_channels(xnn_params.q8.gemm.nr * 2 + 3)
[all …]
Daverage-pooling-nhwc.cc19 for (size_t pool_size = 2; pool_size <= xnn_params.q8.avgpool.mr; pool_size++) { in TEST()
35 for (size_t pool_size = 3; pool_size <= xnn_params.q8.avgpool.mr; pool_size++) { in TEST()
57 for (size_t pool_size = 2; pool_size <= xnn_params.q8.avgpool.mr; pool_size++) { in TEST()
74 for (size_t pool_size = 2; pool_size <= xnn_params.q8.avgpool.mr; pool_size++) { in TEST()
90 for (size_t pool_size = 2; pool_size <= xnn_params.q8.avgpool.mr; pool_size++) { in TEST()
112 for (size_t pool_size = 2; pool_size <= xnn_params.q8.avgpool.mr; pool_size++) { in TEST()
129 for (size_t pool_size = 2; pool_size <= xnn_params.q8.avgpool.mr; pool_size++) { in TEST()
155 for (size_t pool_size = 2; pool_size <= xnn_params.q8.avgpool.mr; pool_size++) { in TEST()
181 for (size_t pool_size = 2; pool_size <= xnn_params.q8.avgpool.mr; pool_size++) { in TEST()
209 for (size_t pool_size = 2; pool_size <= xnn_params.q8.avgpool.mr; pool_size++) { in TEST()
[all …]
Dargmax-pooling-nhwc.cc32 …for (size_t pool_size = 2; pool_size <= FindMaxSinglePassPoolingSize(xnn_params.f32.argmaxpool); p… in TEST()
48 …for (size_t pool_size = 3; pool_size <= FindMaxSinglePassPoolingSize(xnn_params.f32.argmaxpool); p… in TEST()
70 …for (size_t pool_size = 2; pool_size <= FindMaxSinglePassPoolingSize(xnn_params.f32.argmaxpool); p… in TEST()
86 …for (size_t pool_size = 2; pool_size <= FindMaxSinglePassPoolingSize(xnn_params.f32.argmaxpool); p… in TEST()
108 …for (size_t pool_size = 2; pool_size <= FindMaxSinglePassPoolingSize(xnn_params.f32.argmaxpool); p… in TEST()
134 …for (size_t pool_size = 2; pool_size <= FindMaxSinglePassPoolingSize(xnn_params.f32.argmaxpool); p… in TEST()
160 …for (size_t pool_size = 2; pool_size <= FindMaxSinglePassPoolingSize(xnn_params.f32.argmaxpool); p… in TEST()
186 …for (size_t pool_size = 2; pool_size <= FindMaxSinglePassPoolingSize(xnn_params.f32.argmaxpool); p… in TEST()
211 const auto multipass = FindMultiPassMicroKernel(xnn_params.f32.argmaxpool); in TEST()
228 const auto multipass = FindMultiPassMicroKernel(xnn_params.f32.argmaxpool); in TEST()
[all …]
Dchannel-pad-nc.cc67 .batch_size(xnn_params.x32.pad.mr) in TEST()
81 .batch_size(xnn_params.x32.pad.mr) in TEST()
96 .batch_size(xnn_params.x32.pad.mr) in TEST()
111 .batch_size(xnn_params.x32.pad.mr) in TEST()
127 .batch_size(3 * xnn_params.x32.pad.mr + 1) in TEST()
141 .batch_size(3 * xnn_params.x32.pad.mr + 1) in TEST()
156 .batch_size(3 * xnn_params.x32.pad.mr + 1) in TEST()
/external/XNNPACK/src/xnnpack/
Dallocator.h25 return xnn_params.allocator.allocate(xnn_params.allocator.context, memory_size); in xnn_allocate_memory()
29 void* memory_pointer = xnn_params.allocator.allocate(xnn_params.allocator.context, memory_size); in xnn_allocate_zero_memory()
37 return xnn_params.allocator.reallocate(xnn_params.allocator.context, memory_pointer, memory_size); in xnn_reallocate_memory()
41 xnn_params.allocator.deallocate(xnn_params.allocator.context, memory_pointer); in xnn_release_memory()
45 …return xnn_params.allocator.aligned_allocate(xnn_params.allocator.context, XNN_ALLOCATION_ALIGNMEN… in xnn_allocate_simd_memory()
49 void* memory_pointer = xnn_params.allocator.aligned_allocate( in xnn_allocate_zero_simd_memory()
50 xnn_params.allocator.context, XNN_ALLOCATION_ALIGNMENT, memory_size); in xnn_allocate_zero_simd_memory()
58 xnn_params.allocator.aligned_deallocate(xnn_params.allocator.context, memory_pointer); in xnn_release_simd_memory()

12