Home
last modified time | relevance | path

Searched refs:functor (Results 1 – 25 of 748) sorted by relevance

12345678910>>...30

/external/tensorflow/tensorflow/core/kernels/
Dunary_ops_composition.cc160 #define REGISTER_COMPUTE_FN_HELPER(name, functor) \ argument
161 static_assert(std::is_same<functor::in_type, functor::out_type>::value, \
165 *out = in.unaryExpr(functor::func()); \
168 return Eigen::internal::functor_traits<functor::func>::Cost; \
177 auto relu = functor::Relu<Eigen::DefaultDevice, T>(); \
186 auto relu6 = functor::Relu6<Eigen::DefaultDevice, T>(); \
195 auto elu = functor::Elu<Eigen::DefaultDevice, T>(); \
204 auto selu = functor::Selu<Eigen::DefaultDevice, T>(); \
261 REGISTER_COMPUTE_FN_HELPER(Abs, functor::abs<T>);
262 REGISTER_COMPUTE_FN_HELPER(Acos, functor::acos<T>);
[all …]
Dstrided_slice_op_gpu_impl.h34 template struct functor::StridedSlice<GPUDevice, T, 1>; \
35 template struct functor::StridedSlice<GPUDevice, T, 2>; \
36 template struct functor::StridedSlice<GPUDevice, T, 3>; \
37 template struct functor::StridedSlice<GPUDevice, T, 4>; \
38 template struct functor::StridedSlice<GPUDevice, T, 5>; \
39 template struct functor::StridedSlice<GPUDevice, T, 6>; \
40 template struct functor::StridedSlice<GPUDevice, T, 7>; \
41 template struct functor::StridedSlice<GPUDevice, T, 8>; \
42 template struct functor::StridedSliceGrad<GPUDevice, T, 1>; \
43 template struct functor::StridedSliceGrad<GPUDevice, T, 2>; \
[all …]
Dsegment_reduction_ops_gpu_1.cu.cc22 namespace functor { namespace
25 template struct SegmentReductionFunctor<T, Index, functor::Zero<T>, \
26 functor::NonAtomicSumOpGpu<T>, \
27 functor::AtomicSumOpGpu<T>>; \
28 template struct SegmentReductionFunctor<T, Index, functor::One<T>, \
29 functor::NonAtomicProdOpGpu<T>, \
30 functor::AtomicProdOpGpu<T>>; \
31 template struct SegmentReductionFunctor<T, Index, functor::Highest<T>, \
32 functor::NonAtomicMinOpGpu<T>, \
33 functor::AtomicMinOpGpu<T>>; \
[all …]
Dsegment_reduction_ops_gpu_0.cu.cc34 namespace functor { namespace
37 template struct SegmentReductionFunctor<T, Index, functor::Zero<T>, \
38 functor::NonAtomicSumOpGpu<T>, \
39 functor::AtomicSumOpGpu<T>>; \
40 template struct SegmentReductionFunctor<T, Index, functor::One<T>, \
41 functor::NonAtomicProdOpGpu<T>, \
42 functor::AtomicProdOpGpu<T>>; \
43 template struct SegmentReductionFunctor<T, Index, functor::Highest<T>, \
44 functor::NonAtomicMinOpGpu<T>, \
45 functor::AtomicMinOpGpu<T>>; \
[all …]
Dsegment_reduction_ops_impl_3.cc30 functor::UnsortedSegmentFunctor<CPUDevice, type, index_type, \
36 functor::Zero<type>, \
37 functor::SumOp<type>); \
39 functor::Lowest<type>, \
40 functor::MaxOp<type>); \
42 functor::Highest<type>, \
43 functor::MinOp<type>); \
45 functor::One<type>, \
46 functor::ProdOp<type>);
50 functor::Zero<type>, \
[all …]
Dsegment_reduction_ops_impl_4.cc30 functor::UnsortedSegmentFunctor<CPUDevice, type, index_type, \
36 functor::Zero<type>, \
37 functor::SumOp<type>); \
39 functor::Lowest<type>, \
40 functor::MaxOp<type>); \
42 functor::Highest<type>, \
43 functor::MinOp<type>); \
45 functor::One<type>, \
46 functor::ProdOp<type>);
50 functor::Zero<type>, \
[all …]
Dsegment_reduction_ops_impl_2.cc21 #define REGISTER_CPU_KERNEL_SEGMENT(name, functor, type, index_type, \ argument
28 SegmentReductionOp<CPUDevice, type, index_type, functor, default_value>)
76 functor::SegmentReductionFunctor< \
82 "SegmentSum", type, index_type, functor::Zero<type>, \
83 functor::NonAtomicSumOpGpu<type>, functor::AtomicSumOpGpu<type>); \
85 "SegmentProd", type, index_type, functor::One<type>, \
86 functor::NonAtomicProdOpGpu<type>, functor::AtomicProdOpGpu<type>); \
88 "SegmentMin", type, index_type, functor::Highest<type>, \
89 functor::NonAtomicMinOpGpu<type>, functor::AtomicMinOpGpu<type>); \
91 "SegmentMax", type, index_type, functor::Lowest<type>, \
[all …]
Drelu_op.h39 functor::Relu<Device, T> functor; in Operate() local
40 functor(context->eigen_device<Device>(), input.flat<T>(), in Operate()
87 functor::ReluGrad<Device, T> functor; in OperateNoTemplate() local
88 functor(context->eigen_device<Device>(), g.flat<T>(), a.flat<T>(), in OperateNoTemplate()
98 functor::Relu6<Device, T> functor; in Operate() local
99 functor(context->eigen_device<Device>(), input.flat<T>(), in Operate()
129 functor::Relu6Grad<Device, T> functor; in OperateNoTemplate() local
130 functor(context->eigen_device<Device>(), g.flat<T>(), a.flat<T>(), in OperateNoTemplate()
145 functor::LeakyRelu<Device, T> functor; in Operate() local
146 functor({context->eigen_device<Device>(), input.flat<T>(), alpha_, in Operate()
[all …]
Dcwise_op_div.cc20 REGISTER6(BinaryOp, CPU, "Div", functor::div, float, Eigen::half, double,
22 REGISTER8(BinaryOp, CPU, "Div", functor::safe_div, uint8, uint16, uint32,
24 REGISTER8(BinaryOp, CPU, "TruncateDiv", functor::safe_div, uint8, uint16,
26 REGISTER6(BinaryOp, CPU, "RealDiv", functor::div, float, Eigen::half, double,
28 REGISTER6(BinaryOp, CPU, "DivNoNan", functor::div_no_nan, Eigen::half, float,
37 BinaryOp<CPUDevice, functor::safe_div<int32>>);
41 REGISTER9(BinaryOp, GPU, "Div", functor::div, float, Eigen::half, double, uint8,
43 REGISTER4(BinaryOp, GPU, "TruncateDiv", functor::div, uint8, uint16, int16,
45 REGISTER5(BinaryOp, GPU, "RealDiv", functor::div, float, Eigen::half, double,
47 REGISTER5(BinaryOp, GPU, "DivNoNan", functor::div_no_nan, Eigen::half, float,
[all …]
Dpad_op_gpu.cu.cc31 template struct functor::Pad<GPUDevice, T, Tpadding, 0>; \
32 template struct functor::Pad<GPUDevice, T, Tpadding, 1>; \
33 template struct functor::Pad<GPUDevice, T, Tpadding, 2>; \
34 template struct functor::Pad<GPUDevice, T, Tpadding, 3>; \
35 template struct functor::Pad<GPUDevice, T, Tpadding, 4>; \
36 template struct functor::Pad<GPUDevice, T, Tpadding, 5>; \
37 template struct functor::Pad<GPUDevice, T, Tpadding, 6>; \
38 template struct functor::Pad<GPUDevice, T, Tpadding, 7>; \
39 template struct functor::Pad<GPUDevice, T, Tpadding, 8>;
Dsegment_reduction_ops_gpu_2.cu.cc22 namespace functor { namespace
26 GPUDevice, T, Index, functor::Lowest<T>, functor::AtomicMaxOpGpu<T>>; \
28 GPUDevice, T, Index, functor::Highest<T>, functor::AtomicMinOpGpu<T>>; \
29 template struct UnsortedSegmentFunctor<GPUDevice, T, Index, functor::One<T>, \
30 functor::AtomicProdOpGpu<T>>;
35 GPUDevice, T, Index, functor::Zero<T>, functor::AtomicSumOpGpu<T>>;
Dsegment_reduction_ops_impl_1.cc104 #define REGISTER_CPU_KERNEL_SEGMENT(name, functor, type, index_type, \ argument
111 SegmentReductionOp<CPUDevice, type, index_type, functor, default_value>)
159 functor::SegmentReductionFunctor< \
165 "SegmentSum", type, index_type, functor::Zero<type>, \
166 functor::NonAtomicSumOpGpu<type>, functor::AtomicSumOpGpu<type>); \
168 "SegmentProd", type, index_type, functor::One<type>, \
169 functor::NonAtomicProdOpGpu<type>, functor::AtomicProdOpGpu<type>); \
171 "SegmentMin", type, index_type, functor::Highest<type>, \
172 functor::NonAtomicMinOpGpu<type>, functor::AtomicMinOpGpu<type>); \
174 "SegmentMax", type, index_type, functor::Lowest<type>, \
[all …]
Dtraining_ops_gpu.cu.cc28 namespace functor { namespace
1079 template struct functor::ApplyGradientDescent<GPUDevice, Eigen::half>;
1080 template struct functor::ApplyGradientDescent<GPUDevice, float>;
1081 template struct functor::ApplyGradientDescent<GPUDevice, double>;
1082 template struct functor::ApplyGradientDescent<GPUDevice, complex64>;
1083 template struct functor::ApplyGradientDescent<GPUDevice, complex128>;
1085 template struct functor::ApplyAdagrad<GPUDevice, Eigen::half>;
1086 template struct functor::ApplyAdagrad<GPUDevice, float>;
1087 template struct functor::ApplyAdagrad<GPUDevice, double>;
1088 template struct functor::ApplyAdagrad<GPUDevice, complex64>;
[all …]
Dslice_op_gpu.cu.cc31 template struct functor::Slice<GPUDevice, T, 1>; \
32 template struct functor::Slice<GPUDevice, T, 2>; \
33 template struct functor::Slice<GPUDevice, T, 3>; \
34 template struct functor::Slice<GPUDevice, T, 4>; \
35 template struct functor::Slice<GPUDevice, T, 5>; \
36 template struct functor::Slice<GPUDevice, T, 6>; \
37 template struct functor::Slice<GPUDevice, T, 7>; \
38 template struct functor::Slice<GPUDevice, T, 8>;
Dcwise_op_add_1.cc19 REGISTER6(BinaryOp, CPU, "Add", functor::add, float, Eigen::half, double, int32,
24 REGISTER6(BinaryOp, CPU, "AddV2", functor::add, float, Eigen::half, double,
27 REGISTER(BinaryOp, CPU, "AddV2", functor::add, bfloat16);
33 REGISTER3(BinaryOp, GPU, "Add", functor::add, float, Eigen::half, double);
34 REGISTER3(BinaryOp, GPU, "AddV2", functor::add, float, Eigen::half, double);
46 BinaryOp<CPUDevice, functor::add<int32>>);
53 BinaryOp<CPUDevice, functor::add<int32>>);
61 BinaryOp<CPUDevice, functor::add<int32>>);
68 BinaryOp<CPUDevice, functor::add<int32>>);
Dcwise_op_reciprocal.cc19 REGISTER5(UnaryOp, CPU, "Inv", functor::inverse, float, Eigen::half, double,
23 REGISTER6(UnaryOp, GPU, "Inv", functor::inverse, float, Eigen::half, double,
28 REGISTER5(SimpleBinaryOp, CPU, "InvGrad", functor::inverse_grad, float,
31 REGISTER3(SimpleBinaryOp, GPU, "InvGrad", functor::inverse_grad, float,
35 REGISTER6(UnaryOp, CPU, "Reciprocal", functor::inverse, float, Eigen::half,
39 REGISTER6(UnaryOp, GPU, "Reciprocal", functor::inverse, float, Eigen::half,
44 REGISTER6(SimpleBinaryOp, CPU, "ReciprocalGrad", functor::inverse_grad, float,
47 REGISTER3(SimpleBinaryOp, GPU, "ReciprocalGrad", functor::inverse_grad, float,
Dcwise_op_mod.cc19 REGISTER2(BinaryOp, CPU, "Mod", functor::safe_mod, int32, int64);
20 REGISTER2(BinaryOp, CPU, "Mod", functor::fmod, float, double);
21 REGISTER2(BinaryOp, CPU, "TruncateMod", functor::safe_mod, int32, int64);
22 REGISTER2(BinaryOp, CPU, "TruncateMod", functor::fmod, float, double);
34 BinaryOp<CPUDevice, functor::safe_mod<int32>>);
41 BinaryOp<CPUDevice, functor::safe_mod<int32>>);
49 BinaryOp<CPUDevice, functor::safe_mod<int32>>);
56 BinaryOp<CPUDevice, functor::safe_mod<int32>>);
Dcwise_op_abs.cc22 REGISTER8(UnaryOp, CPU, "Abs", functor::abs, Eigen::half, bfloat16, float,
25 REGISTER(UnaryOp, CPU, "Abs", functor::abs, bfloat16);
28 REGISTER2(UnaryOp, CPU, "ComplexAbs", functor::abs, complex64, complex128);
32 REGISTER4(UnaryOp, GPU, "Abs", functor::abs, Eigen::half, float, double, int64);
33 REGISTER2(UnaryOp, GPU, "ComplexAbs", functor::abs, complex64, complex128);
44 UnaryOp<CPUDevice, functor::abs<int32>>);
51 UnaryOp<CPUDevice, functor::abs<int32>>);
Dcwise_op_mul_1.cc20 REGISTER6(BinaryOp, CPU, "Mul", functor::mul, float, Eigen::half, double, uint8,
22 REGISTER6(BinaryOp, CPU, "MulNoNan", functor::mul_no_nan, Eigen::half, float,
29 REGISTER(BinaryOp, CPU, "Mul", functor::mul, int32);
34 REGISTER4(BinaryOp, GPU, "Mul", functor::mul, Eigen::half, float, double,
46 BinaryOp<CPUDevice, functor::mul<int32>>);
54 BinaryOp<CPUDevice, functor::mul<int32>>);
58 REGISTER5(BinaryOp, GPU, "MulNoNan", functor::mul_no_nan, Eigen::half, float,
/external/tensorflow/tensorflow/core/kernels/special_math/
Dspecial_math_op_bessel.cc20 REGISTER3(UnaryOp, CPU, "BesselI0", functor::bessel_i0, Eigen::half, float,
22 REGISTER3(UnaryOp, CPU, "BesselI1", functor::bessel_i1, Eigen::half, float,
24 REGISTER3(UnaryOp, CPU, "BesselI0e", functor::bessel_i0e, Eigen::half, float,
26 REGISTER3(UnaryOp, CPU, "BesselI1e", functor::bessel_i1e, Eigen::half, float,
29 REGISTER3(UnaryOp, CPU, "BesselK0", functor::bessel_k0, Eigen::half, float,
31 REGISTER3(UnaryOp, CPU, "BesselK1", functor::bessel_k1, Eigen::half, float,
33 REGISTER3(UnaryOp, CPU, "BesselK0e", functor::bessel_k0e, Eigen::half, float,
35 REGISTER3(UnaryOp, CPU, "BesselK1e", functor::bessel_k1e, Eigen::half, float,
38 REGISTER3(UnaryOp, CPU, "BesselJ0", functor::bessel_j0, Eigen::half, float,
40 REGISTER3(UnaryOp, CPU, "BesselJ1", functor::bessel_j1, Eigen::half, float,
[all …]
/external/tensorflow/tensorflow/core/kernels/image/
Dmirror_pad_op_gpu.cu.cc28 template struct functor::MirrorPad<GpuDevice, T, int32, 1>; \
29 template struct functor::MirrorPad<GpuDevice, T, int32, 2>; \
30 template struct functor::MirrorPad<GpuDevice, T, int32, 3>; \
31 template struct functor::MirrorPad<GpuDevice, T, int32, 4>; \
32 template struct functor::MirrorPad<GpuDevice, T, int32, 5>; \
33 template struct functor::MirrorPad<GpuDevice, T, int64, 1>; \
34 template struct functor::MirrorPad<GpuDevice, T, int64, 2>; \
35 template struct functor::MirrorPad<GpuDevice, T, int64, 3>; \
36 template struct functor::MirrorPad<GpuDevice, T, int64, 4>; \
37 template struct functor::MirrorPad<GpuDevice, T, int64, 5>; \
[all …]
Dadjust_contrast_op_gpu.cu.cc29 template struct functor::AdjustContrastv2<GPUDevice, float>;
30 template struct functor::AdjustContrastv2<GPUDevice, Eigen::half>;
33 template struct functor::AdjustContrast<GPUDevice, uint8>;
34 template struct functor::AdjustContrast<GPUDevice, int8>;
35 template struct functor::AdjustContrast<GPUDevice, int16>;
36 template struct functor::AdjustContrast<GPUDevice, int32>;
37 template struct functor::AdjustContrast<GPUDevice, int64>;
38 template struct functor::AdjustContrast<GPUDevice, float>;
39 template struct functor::AdjustContrast<GPUDevice, double>;
/external/webrtc/rtc_base/
Dcallback.h77 Callback0(const T& functor) in Callback0() argument
78 : helper_(new RefCountedObject<HelperImpl<T> >(functor)) {} in Callback0()
93 explicit HelperImpl(const T& functor) : functor_(functor) {} in HelperImpl()
106 Callback1(const T& functor) in Callback1() argument
107 : helper_(new RefCountedObject<HelperImpl<T> >(functor)) {} in Callback1()
122 explicit HelperImpl(const T& functor) : functor_(functor) {} in HelperImpl()
135 Callback2(const T& functor) in Callback2() argument
136 : helper_(new RefCountedObject<HelperImpl<T> >(functor)) {} in Callback2()
151 explicit HelperImpl(const T& functor) : functor_(functor) {} in HelperImpl()
164 Callback3(const T& functor) in Callback3() argument
[all …]
/external/armnn/profiling/common/src/
DCommandHandlerRegistry.cpp18 void CommandHandlerRegistry::RegisterFunctor(CommandHandlerFunctor* functor, in RegisterFunctor() argument
23 ARM_PIPE_ASSERT_MSG(functor, "Provided functor should not be a nullptr"); in RegisterFunctor()
26 registry[key] = functor; in RegisterFunctor()
29 void CommandHandlerRegistry::RegisterFunctor(CommandHandlerFunctor* functor) in RegisterFunctor() argument
31 ARM_PIPE_ASSERT_MSG(functor, "Provided functor should not be a nullptr"); in RegisterFunctor()
33 RegisterFunctor(functor, functor->GetFamilyId(), functor->GetPacketId(), functor->GetVersion()); in RegisterFunctor()
/external/tensorflow/tensorflow/core/kernels/sparse/
Dtranspose_op.h24 namespace functor {
34 functor::UnaryFunctor<Device, functor::conj<complex64>> conj;
43 functor::UnaryFunctor<Device, functor::conj<complex128>> conj;
57 functor::UnaryFunctor<Device, functor::conj<complex64>> conj;
65 functor::UnaryFunctor<Device, functor::conj<complex128>> conj;

12345678910>>...30