/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/AVR/ |
D | AVRDevices.td | 2 // AVR Device Definitions 217 class Device<string Name, Family Fam, ELFArch Arch, 225 def : Device<"avr1", FamilyAVR1, ELFArchAVR1>; 226 def : Device<"avr2", FamilyAVR2, ELFArchAVR2>; 227 def : Device<"avr25", FamilyAVR25, ELFArchAVR25>; 228 def : Device<"avr3", FamilyAVR3, ELFArchAVR3>; 229 def : Device<"avr31", FamilyAVR31, ELFArchAVR31>; 230 def : Device<"avr35", FamilyAVR35, ELFArchAVR35>; 231 def : Device<"avr4", FamilyAVR4, ELFArchAVR4>; 232 def : Device<"avr5", FamilyAVR5, ELFArchAVR5>; [all …]
|
/external/llvm/lib/Target/AVR/ |
D | AVR.td | 235 class Device<string Name, Family Fam, ELFArch Arch, 243 def : Device<"avr1", FamilyAVR1, ELFArchAVR1>; 244 def : Device<"avr2", FamilyAVR2, ELFArchAVR2>; 245 def : Device<"avr25", FamilyAVR25, ELFArchAVR25>; 246 def : Device<"avr3", FamilyAVR3, ELFArchAVR3>; 247 def : Device<"avr31", FamilyAVR31, ELFArchAVR31>; 248 def : Device<"avr35", FamilyAVR35, ELFArchAVR35>; 249 def : Device<"avr4", FamilyAVR4, ELFArchAVR4>; 250 def : Device<"avr5", FamilyAVR5, ELFArchAVR5>; 251 def : Device<"avr51", FamilyAVR51, ELFArchAVR51>; [all …]
|
/external/tensorflow/tensorflow/compiler/jit/ |
D | xla_device_ops.h | 66 .Device(DEVICE) \ 73 .Device(DEVICE) \ 81 REGISTER_KERNEL_BUILDER(Name("_XlaRun").Device(DEVICE), KERNEL); 84 REGISTER_KERNEL_BUILDER(Name("_Send").Device(DEVICE), SendOp); \ 85 REGISTER_KERNEL_BUILDER(Name("_Recv").Device(DEVICE), RecvOp); \ 87 Name("_HostSend").Device(DEVICE).HostMemory("tensor"), SendOp); \ 89 Name("_HostRecv").Device(DEVICE).HostMemory("tensor"), RecvOp); \ 91 Name("_HostCast").Device(DEVICE).HostMemory("x").HostMemory("y"), \ 93 REGISTER_KERNEL_BUILDER(Name("NoOp").Device(DEVICE), NoOp); \ 95 Name("Const").Device(DEVICE).TypeConstraint("dtype", TYPES), \ [all …]
|
/external/tensorflow/tensorflow/core/kernels/ |
D | relu_op.h | 33 template <typename Device, typename T> 34 class ReluOp : public UnaryElementWiseOp<T, ReluOp<Device, T>> { 36 using UnaryElementWiseOp<T, ReluOp<Device, T>>::UnaryElementWiseOp; 39 functor::Relu<Device, T> functor; in Operate() 40 functor(context->eigen_device<Device>(), input.flat<T>(), in Operate() 61 template <typename Device, typename T> 62 class ReluGradOp : public BinaryElementWiseOp<T, ReluGradOp<Device, T>> { 64 using BinaryElementWiseOp<T, ReluGradOp<Device, T>>::BinaryElementWiseOp; 82 template <typename Device, typename T> 83 void ReluGradOp<Device, T>::OperateNoTemplate(OpKernelContext* context, in OperateNoTemplate() [all …]
|
D | shape_ops.cc | 26 .Device(DEVICE_CPU) 31 .Device(DEVICE_CPU) 39 .Device(DEVICE_SYCL) \ 45 .Device(DEVICE_SYCL) \ 56 .Device(DEVICE_SYCL) 63 .Device(DEVICE_SYCL) 74 .Device(DEVICE_GPU) \ 80 .Device(DEVICE_GPU) \ 95 .Device(DEVICE_GPU) 102 .Device(DEVICE_GPU) [all …]
|
D | stack_ops.cc | 43 REGISTER_KERNEL_BUILDER(Name("Stack").Device(DEVICE_CPU), StackOp); 44 REGISTER_KERNEL_BUILDER(Name("Stack").Device(DEVICE_GPU).HostMemory("handle"), 46 REGISTER_KERNEL_BUILDER(Name("StackV2").Device(DEVICE_CPU), StackOp); 48 .Device(DEVICE_GPU) 53 REGISTER_KERNEL_BUILDER(Name("Stack").Device(DEVICE_SYCL).HostMemory("handle"), 56 .Device(DEVICE_SYCL) 62 REGISTER_KERNEL_BUILDER(Name("StackPush").Device(DEVICE_CPU), 64 REGISTER_KERNEL_BUILDER(Name("StackPushV2").Device(DEVICE_CPU), 69 .Device(DEVICE_GPU) \ 74 .Device(DEVICE_GPU) \ [all …]
|
D | identity_op.cc | 26 REGISTER_KERNEL_BUILDER(Name("Identity").Device(DEVICE_CPU), IdentityOp); 29 REGISTER_KERNEL_BUILDER(Name("StopGradient").Device(DEVICE_CPU), IdentityOp); 32 REGISTER_KERNEL_BUILDER(Name("PreventGradient").Device(DEVICE_CPU), IdentityOp); 36 REGISTER_KERNEL_BUILDER(Name("PlaceholderWithDefault").Device(DEVICE_CPU), 39 REGISTER_KERNEL_BUILDER(Name("RefIdentity").Device(DEVICE_CPU), IdentityOp); 43 REGISTER_KERNEL_BUILDER(Name("DebugGradientIdentity").Device(DEVICE_CPU), 45 REGISTER_KERNEL_BUILDER(Name("DebugGradientRefIdentity").Device(DEVICE_CPU), 51 Name("Identity").Device(DEVICE_SYCL).TypeConstraint<type>("T"), \ 54 Name("PreventGradient").Device(DEVICE_SYCL).TypeConstraint<type>("T"), \ 57 Name("RefIdentity").Device(DEVICE_SYCL).TypeConstraint<type>("T"), \ [all …]
|
D | control_flow_ops.cc | 44 .Device(DEVICE_CPU) \ 51 .Device(DEVICE_CPU) \ 58 .Device(DEVICE_GPU) \ 65 .Device(DEVICE_GPU) \ 93 .Device(DEVICE_GPU) \ 103 .Device(DEVICE_GPU) \ 125 .Device(DEVICE_SYCL) \ 133 .Device(DEVICE_SYCL) \ 144 .Device(DEVICE_SYCL) \ 158 .Device(DEVICE_SYCL) \ [all …]
|
D | relu_op.cc | 38 Name("Relu").Device(DEVICE_CPU).TypeConstraint<type>("T"), \ 41 Name("ReluGrad").Device(DEVICE_CPU).TypeConstraint<type>("T"), \ 44 Name("Relu6").Device(DEVICE_CPU).TypeConstraint<type>("T"), \ 47 Name("Relu6Grad").Device(DEVICE_CPU).TypeConstraint<type>("T"), \ 50 Name("LeakyRelu").Device(DEVICE_CPU).TypeConstraint<type>("T"), \ 53 Name("LeakyReluGrad").Device(DEVICE_CPU).TypeConstraint<type>("T"), \ 61 Name("Elu").Device(DEVICE_CPU).TypeConstraint<type>("T"), \ 64 Name("EluGrad").Device(DEVICE_CPU).TypeConstraint<type>("T"), \ 67 Name("Selu").Device(DEVICE_CPU).TypeConstraint<type>("T"), \ 70 Name("SeluGrad").Device(DEVICE_CPU).TypeConstraint<type>("T"), \ [all …]
|
D | queue_ops.cc | 27 REGISTER_KERNEL_BUILDER(Name("QueueEnqueue").Device(DEVICE_CPU), EnqueueOp); 28 REGISTER_KERNEL_BUILDER(Name("QueueEnqueueV2").Device(DEVICE_CPU), EnqueueOp); 30 REGISTER_KERNEL_BUILDER(Name("QueueEnqueueMany").Device(DEVICE_CPU), 32 REGISTER_KERNEL_BUILDER(Name("QueueEnqueueManyV2").Device(DEVICE_CPU), 35 REGISTER_KERNEL_BUILDER(Name("QueueDequeue").Device(DEVICE_CPU), DequeueOp); 36 REGISTER_KERNEL_BUILDER(Name("QueueDequeueV2").Device(DEVICE_CPU), DequeueOp); 38 REGISTER_KERNEL_BUILDER(Name("QueueDequeueMany").Device(DEVICE_CPU), 40 REGISTER_KERNEL_BUILDER(Name("QueueDequeueManyV2").Device(DEVICE_CPU), 43 REGISTER_KERNEL_BUILDER(Name("QueueDequeueUpTo").Device(DEVICE_CPU), 45 REGISTER_KERNEL_BUILDER(Name("QueueDequeueUpToV2").Device(DEVICE_CPU), [all …]
|
D | aggregate_ops.cc | 41 template <typename Device, typename T> 88 functor::Add2Functor<Device, T> functor2; in Compute() 89 functor2(ctx->template eigen_device<Device>(), To, I(0), I(1)); in Compute() 96 functor::Add2Functor<Device, T> functor2; in Compute() 97 functor2(ctx->template eigen_device<Device>(), To, I(0), I(1)); in Compute() 101 functor::Add3Functor<Device, T> functor3; in Compute() 102 functor3(ctx->template eigen_device<Device>(), To, I(0), I(1), I(2)); in Compute() 106 functor::Add4Functor<Device, T> functor4; in Compute() 107 functor4(ctx->template eigen_device<Device>(), To, I(0), I(1), I(2), in Compute() 112 functor::Add5Functor<Device, T> functor5; in Compute() [all …]
|
D | transpose_functor.h | 34 template <typename Device> 35 Status DoTranspose(const Device& device, const Tensor& in, 45 template <typename Device> 46 Status DoConjugateTranspose(const Device& device, const Tensor& in, 51 template <typename Device> 52 Status DoMatrixTranspose(const Device& device, const Tensor& in, Tensor* out); 56 template <typename Device> 57 Status DoConjugateMatrixTranspose(const Device& device, const Tensor& in, 61 template <typename Device, typename T, bool conjugate = false> 63 static void run(const Device& d, const Tensor& in, [all …]
|
D | tile_functor.h | 30 template <typename Device, typename T> 31 void TileSimple(const Device& d, Tensor* out, const Tensor& in); 33 template <typename Device, typename T, typename Tmultiples, int NDIM> 34 void TileUsingEigen(const Device& d, Tensor* out, const Tensor& in, in TileUsingEigen() 43 if (use_32bit && Eigen::internal::is_same<Device, Eigen::GpuDevice>::value) { in TileUsingEigen() 51 template <typename Device, typename T, typename Tmultiples> 52 void TileUsingEigen(const Device& d, Tensor* out, const Tensor& in, in TileUsingEigen() 64 template <typename Device, typename T, typename Tmultiples> 66 void operator()(const Device& d, Tensor* out, const Tensor& in, in operator() 70 internal::TileUsingEigen<Device, T, Tmultiples>(d, out, in, in operator() [all …]
|
D | debug_ops.cc | 27 REGISTER_KERNEL_BUILDER(Name("Copy").Device(DEVICE_CPU), CopyOp); 29 REGISTER_KERNEL_BUILDER(Name("CopyHost").Device(DEVICE_CPU), CopyOp); 32 REGISTER_KERNEL_BUILDER(Name("Copy").Device(DEVICE_GPU), CopyOp); 35 .Device(DEVICE_GPU) 42 REGISTER_KERNEL_BUILDER(Name("Copy").Device(DEVICE_SYCL), CopyOp); 45 .Device(DEVICE_SYCL) 52 REGISTER_KERNEL_BUILDER(Name("DebugIdentity").Device(DEVICE_CPU), 57 .Device(DEVICE_GPU) 65 .Device(DEVICE_SYCL) 74 Name("DebugNanCount").Device(DEVICE_CPU).TypeConstraint<type>("T"), \ [all …]
|
D | training_ops.h | 30 template <typename Device, typename T> 32 void operator()(const Device& d, typename TTypes<T>::Flat var, 37 template <typename Device, typename T> 39 void operator()(const Device& d, typename TTypes<T>::Flat var, 48 template <typename Device, typename T> 50 void operator()(const Device& d, typename TTypes<T>::Flat var, 57 template <typename Device, typename T> 59 void operator()(const Device& d, typename TTypes<T>::Flat var, 66 template <typename Device, typename T> 68 void operator()(const Device& d, typename TTypes<T>::Flat var, [all …]
|
D | softsign_op.cc | 33 template <typename Device, typename T> 34 class SoftsignOp : public UnaryElementWiseOp<T, SoftsignOp<Device, T>> { 37 : UnaryElementWiseOp<T, SoftsignOp<Device, T>>(context) {} in SoftsignOp() 40 functor::Softsign<Device, T> functor; in Operate() 41 functor(context->eigen_device<Device>(), input.flat<T>(), in Operate() 46 template <typename Device, typename T> 48 : public BinaryElementWiseOp<T, SoftsignGradOp<Device, T>> { 51 : BinaryElementWiseOp<T, SoftsignGradOp<Device, T>>(context) {} in SoftsignGradOp() 68 template <typename Device, typename T> 69 void SoftsignGradOp<Device, T>::OperateNoTemplate(OpKernelContext* context, in OperateNoTemplate() [all …]
|
D | softplus_op.cc | 33 template <typename Device, typename T> 34 class SoftplusOp : public UnaryElementWiseOp<T, SoftplusOp<Device, T>> { 37 : UnaryElementWiseOp<T, SoftplusOp<Device, T>>(context) {} in SoftplusOp() 40 functor::Softplus<Device, T> functor; in Operate() 41 functor(context->eigen_device<Device>(), input.flat<T>(), in Operate() 46 template <typename Device, typename T> 48 : public BinaryElementWiseOp<T, SoftplusGradOp<Device, T>> { 51 : BinaryElementWiseOp<T, SoftplusGradOp<Device, T>>(context) {} in SoftplusGradOp() 67 template <typename Device, typename T> 68 void SoftplusGradOp<Device, T>::OperateNoTemplate(OpKernelContext* context, in OperateNoTemplate() [all …]
|
D | relu_op_functor.h | 27 template <typename Device, typename T> 33 void operator()(const Device& d, typename TTypes<T>::ConstTensor features, in operator() 40 template <typename Device, typename T> 48 void operator()(const Device& d, typename TTypes<T>::ConstTensor gradients, in operator() 60 template <typename Device, typename T> 66 void operator()(const Device& d, typename TTypes<T>::ConstTensor features, in operator() 74 template <typename Device, typename T> 81 void operator()(const Device& d, typename TTypes<T>::ConstTensor gradients, in operator() 95 template <typename Device, typename T> 101 void operator()(const Device& d, typename TTypes<T>::ConstTensor features, in operator() [all …]
|
D | constant_op.cc | 93 REGISTER_KERNEL_BUILDER(Name("Const").Device(DEVICE_CPU), ConstantOp); 98 Name("Const").Device(DEVICE_##D).TypeConstraint<TYPE>("dtype"), \ 125 Name("Const").Device(DEVICE_##D).TypeConstraint<TYPE>("dtype"), \ 150 template <typename Device, typename T, typename Index> 171 functor::FillFunctor<Device, T> functor; in Compute() 172 functor(context->eigen_device<Device>(), out->flat<T>(), in Compute() 179 .Device(DEVICE_##D) \ 183 FillOp<D##Device, TYPE, int32>); \ 185 .Device(DEVICE_##D) \ 189 FillOp<D##Device, TYPE, int64>); [all …]
|
/external/tensorflow/tensorflow/compiler/xrt/kernels/ |
D | xrt_state_ops.cc | 30 .Device(DEVICE_XLA_GPU) 35 .Device(DEVICE_XLA_CPU) 41 .Device(DEVICE_XLA_GPU) 46 .Device(DEVICE_XLA_CPU) 52 .Device(DEVICE_XLA_GPU) 58 .Device(DEVICE_XLA_CPU) 65 .Device(DEVICE_XLA_GPU) 71 .Device(DEVICE_XLA_CPU) 78 .Device(DEVICE_XLA_GPU) 84 .Device(DEVICE_XLA_CPU) [all …]
|
/external/eigen/unsupported/Eigen/CXX11/src/Tensor/ |
D | TensorEvaluator.h | 27 template<typename Derived, typename Device> 33 typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType; 48 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const Derived& m, const Device& device) in TensorEvaluator() 114 const Device& device() const{return m_device;} in device() 119 const Device& m_device; 147 template<typename Derived, typename Device> 148 struct TensorEvaluator<const Derived, Device> 153 typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType; 171 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const Derived& m, const Device& device) 213 const Device& device() const{return m_device;} [all …]
|
/external/dynamic_depth/internal/dynamic_depth/ |
D | device.cc | 34 std::unique_ptr<Device> ParseFields(const xmlDocPtr& xmlDoc) { in ParseFields() 58 auto pose = Pose::FromDeserializer(deserializer, DynamicDepthConst::Device()); in ParseFields() 61 VendorInfo::FromDeserializer(deserializer, DynamicDepthConst::Device()); in ParseFields() 63 AppInfo::FromDeserializer(deserializer, DynamicDepthConst::Device()); in ParseFields() 74 return Device::FromData(std::move(params)); in ParseFields() 78 std::unique_ptr<Device> ParseFields(const XmpData& xmp) { in ParseFields() 90 Device::Device(std::unique_ptr<DeviceParams> params) { in Device() function in dynamic_depth::Device 95 std::unique_ptr<Device> Device::FromData(std::unique_ptr<DeviceParams> params) { in FromData() 103 return std::unique_ptr<Device>(new Device(std::move(params))); // NOLINT in FromData() 106 std::unique_ptr<Device> Device::FromXmp(const XmpData& xmp) { in FromXmp() [all …]
|
/external/tensorflow/tensorflow/core/common_runtime/eager/ |
D | tensor_handle.h | 60 TensorHandle(const Tensor& t, Device* d, Device* op_device, 62 TensorHandle(uint64 node_id, Device* d, Device* op_device, 63 Device* resource_device, DataType dtype, EagerContext* ctx); 67 DataType dtype, std::function<void()> call_on_destroy, Device* d, 68 Device* op_device, Device* resource_device, EagerContext* ctx); 84 tensorflow::Device* device() const { return device_; } in device() 85 tensorflow::Device* op_device() const { return op_device_; } in op_device() 86 tensorflow::Device* resource_device() const { return resource_device_; } in resource_device() 89 tensorflow::Device** device, 90 tensorflow::Device** op_device); [all …]
|
/external/webrtc/talk/media/devices/ |
D | devicemanager.h | 69 virtual bool GetAudioInputDevices(std::vector<Device>* devices) = 0; 70 virtual bool GetAudioOutputDevices(std::vector<Device>* devices) = 0; 72 virtual bool GetAudioInputDevice(const std::string& name, Device* out) = 0; 73 virtual bool GetAudioOutputDevice(const std::string& name, Device* out) = 0; 75 virtual bool GetVideoCaptureDevices(std::vector<Device>* devs) = 0; 76 virtual bool GetVideoCaptureDevice(const std::string& name, Device* out) = 0; 97 virtual VideoCapturer* CreateVideoCapturer(const Device& device) const = 0; 140 virtual bool GetAudioInputDevices(std::vector<Device>* devices); 141 virtual bool GetAudioOutputDevices(std::vector<Device>* devices); 143 virtual bool GetAudioInputDevice(const std::string& name, Device* out); [all …]
|
D | fakedevicemanager.h | 52 std::vector<Device> devices; in GetCapabilities() 65 virtual bool GetAudioInputDevices(std::vector<Device>* devs) { in GetAudioInputDevices() 69 virtual bool GetAudioOutputDevices(std::vector<Device>* devs) { in GetAudioOutputDevices() 73 virtual bool GetAudioInputDevice(const std::string& name, Device* out) { in GetAudioInputDevice() 76 virtual bool GetAudioOutputDevice(const std::string& name, Device* out) { in GetAudioOutputDevice() 79 virtual bool GetVideoCaptureDevices(std::vector<Device>* devs) { in GetVideoCaptureDevices() 105 virtual VideoCapturer* CreateVideoCapturer(const Device& device) const { in CreateVideoCapturer() 151 virtual bool GetDefaultVideoCaptureDevice(Device* device) { in GetDefaultVideoCaptureDevice() 160 bool QtKitToSgDevice(const std::string& qtkit_name, Device* out) { in QtKitToSgDevice() 170 input_devices_.push_back(Device(devices[i], in SetAudioInputDevices() [all …]
|