Home
last modified time | relevance | path

Searched refs:GPU (Results 1 – 25 of 872) sorted by relevance

12345678910>>...35

/external/tensorflow/tensorflow/core/kernels/
Dconstant_op.cc105 REGISTER_KERNEL(GPU, Eigen::half);
106 REGISTER_KERNEL(GPU, bfloat16);
107 REGISTER_KERNEL(GPU, float);
108 REGISTER_KERNEL(GPU, double);
109 REGISTER_KERNEL(GPU, uint8);
110 REGISTER_KERNEL(GPU, int8);
111 REGISTER_KERNEL(GPU, qint8);
112 REGISTER_KERNEL(GPU, uint16);
113 REGISTER_KERNEL(GPU, int16);
114 REGISTER_KERNEL(GPU, qint16);
[all …]
Dcwise_op_div.cc34 REGISTER9(BinaryOp, GPU, "Div", functor::div, float, Eigen::half, double, uint8,
36 REGISTER5(BinaryOp, GPU, "RealDiv", functor::div, float, Eigen::half, double,
38 REGISTER4(BinaryOp, GPU, "TruncateDiv", functor::div, uint8, uint16, int16,
41 REGISTER4(BinaryOp, GPU, "Div", functor::div, uint8, uint16, complex64,
43 REGISTER2(BinaryOp, GPU, "RealDiv", functor::div, complex64, complex128);
44 REGISTER2(BinaryOp, GPU, "TruncateDiv", functor::div, uint8, uint16);
46 REGISTER5(BinaryOp, GPU, "DivNoNan", functor::div_no_nan, Eigen::half, float,
Ddepthwise_conv_ops_test.cc38 enum class Device { CPU, GPU }; enumerator
42 if (device == Device::GPU) { in Run()
106 TEST_F(DepthwiseConvOpTest, DepthwiseConvFloatGpu) { Run<float>(Device::GPU); } in TEST_F()
108 Run<double>(Device::GPU); in TEST_F()
111 Run<Eigen::half>(Device::GPU); in TEST_F()
Dcwise_op_add_2.cc34 REGISTER6(BinaryOp, GPU, "Add", functor::add, uint8, uint16, uint64, int64,
37 REGISTER7(BinaryOp, GPU, "AddV2", functor::add, uint8, uint16, uint32, uint64,
41 REGISTER5(BinaryOp, GPU, "Add", functor::add, uint8, uint16, uint64, complex64,
44 REGISTER6(BinaryOp, GPU, "AddV2", functor::add, uint8, uint16, uint32, uint64,
Dcwise_op_reciprocal.cc22 REGISTER4(UnaryOp, GPU, "Inv", functor::inverse, float, Eigen::half, double,
29 REGISTER3(SimpleBinaryOp, GPU, "InvGrad", functor::inverse_grad, float,
36 REGISTER4(UnaryOp, GPU, "Reciprocal", functor::inverse, float, Eigen::half,
43 REGISTER3(SimpleBinaryOp, GPU, "ReciprocalGrad", functor::inverse_grad, float,
Dcwise_op_igammas.cc24 REGISTER2(BinaryOp, GPU, "Igamma", functor::igamma, float, double);
25 REGISTER2(BinaryOp, GPU, "IgammaGradA", functor::igamma_grad_a, float, double);
26 REGISTER2(BinaryOp, GPU, "Igammac", functor::igammac, float, double);
/external/perfetto/test/trace_processor/graphics/
Dgpu_mem_total.out2 "GPU Memory","7","Total GPU memory used by the entire system",0,"[NULL]",123
3 "GPU Memory","7","Total GPU memory used by this process",0,1,100
4 "GPU Memory","7","Total GPU memory used by the entire system",5,"[NULL]",256
5 "GPU Memory","7","Total GPU memory used by this process",5,1,233
6 "GPU Memory","7","Total GPU memory used by the entire system",10,"[NULL]",123
7 "GPU Memory","7","Total GPU memory used by this process",10,1,0
Dgpu_log.out2 "gpu_log","GPU Log",1,0,"VERBOSE","message","message0"
3 "gpu_log","GPU Log",1,0,"VERBOSE","tag","tag0"
4 "gpu_log","GPU Log",2,0,"DEBUG","message","message1"
5 "gpu_log","GPU Log",2,0,"DEBUG","tag","tag0"
6 "gpu_log","GPU Log",3,0,"INFO","message","message2"
7 "gpu_log","GPU Log",3,0,"INFO","tag","tag0"
8 "gpu_log","GPU Log",4,0,"ERROR","message","message4"
9 "gpu_log","GPU Log",4,0,"ERROR","tag","tag0"
10 "gpu_log","GPU Log",4,0,"WARNING","message","message3"
11 "gpu_log","GPU Log",4,0,"WARNING","tag","tag0"
[all …]
/external/tensorflow/tensorflow/compiler/tests/
Dlstm_layer_inference.pbtxt6 device: "/device:GPU:*"
31 device: "/device:GPU:*"
53 device: "/device:GPU:*"
76 device: "/device:GPU:*"
107 device: "/device:GPU:*"
120 device: "/device:GPU:*"
133 device: "/device:GPU:*"
144 device: "/device:GPU:*"
182 device: "/device:GPU:*"
214 device: "/device:GPU:*"
[all …]
/external/tensorflow/tensorflow/core/kernels/mlir_generated/
Dbase_gpu_op.h24 GENERATE_AND_REGISTER_UNARY_KERNEL(tf_op, GPU, input_type)
27 GENERATE_UNARY_KERNEL(tf_op, GPU, input_type)
30 GENERATE_UNARY_KERNEL2(tf_op, GPU, input_type, output_type)
33 REGISTER_ALIASED_KERNEL(tf_op, mlir_op, GPU, input_type, output_type)
36 REGISTER_KERNEL(tf_op, GPU, input_type, output_type)
39 REGISTER_COMPLEX_KERNEL(tf_op, GPU, input_type, output_type)
42 REGISTER_KERNEL_NO_TYPE_CONSTRAINT(tf_op, GPU, input_type)
45 GENERATE_AND_REGISTER_BINARY_KERNEL(tf_op, GPU, input_type)
49 GENERATE_AND_REGISTER_BINARY_KERNEL2(tf_op, GPU, input_type, output_type)
52 GENERATE_BINARY_KERNEL(tf_op, GPU, input_type)
[all …]
/external/tensorflow/tensorflow/core/kernels/special_math/
Dspecial_math_op_bessel.cc50 REGISTER3(UnaryOp, GPU, "BesselI0", functor::bessel_i0, Eigen::half, float,
52 REGISTER3(UnaryOp, GPU, "BesselI1", functor::bessel_i1, Eigen::half, float,
54 REGISTER3(UnaryOp, GPU, "BesselI0e", functor::bessel_i0e, Eigen::half, float,
56 REGISTER3(UnaryOp, GPU, "BesselI1e", functor::bessel_i1e, Eigen::half, float,
59 REGISTER3(UnaryOp, GPU, "BesselK0", functor::bessel_k0, Eigen::half, float,
61 REGISTER3(UnaryOp, GPU, "BesselK1", functor::bessel_k1, Eigen::half, float,
63 REGISTER3(UnaryOp, GPU, "BesselK0e", functor::bessel_k0e, Eigen::half, float,
65 REGISTER3(UnaryOp, GPU, "BesselK1e", functor::bessel_k1e, Eigen::half, float,
68 REGISTER3(UnaryOp, GPU, "BesselJ0", functor::bessel_j0, Eigen::half, float,
70 REGISTER3(UnaryOp, GPU, "BesselJ1", functor::bessel_j1, Eigen::half, float,
[all …]
/external/tensorflow/tensorflow/compiler/jit/tests/
Dkeras_imagenet_main.pbtxt4 device: "/job:localhost/replica:0/task:0/device:GPU:0"
29 device: "/job:localhost/replica:0/task:0/device:GPU:0"
51 device: "/job:localhost/replica:0/task:0/device:GPU:0"
88 device: "/job:localhost/replica:0/task:0/device:GPU:0"
128 device: "/job:localhost/replica:0/task:0/device:GPU:0"
153 device: "/job:localhost/replica:0/task:0/device:GPU:0"
176 device: "/job:localhost/replica:0/task:0/device:GPU:0"
213 device: "/job:localhost/replica:0/task:0/device:GPU:0"
250 device: "/job:localhost/replica:0/task:0/device:GPU:0"
296 device: "/job:localhost/replica:0/task:0/device:GPU:0"
[all …]
Dkeras_imagenet_main_graph_mode.pbtxt4 device: "/job:localhost/replica:0/task:0/device:GPU:0"
54 device: "/job:localhost/replica:0/task:0/device:GPU:0"
98 device: "/job:localhost/replica:0/task:0/device:GPU:0"
139 device: "/job:localhost/replica:0/task:0/device:GPU:0"
180 device: "/job:localhost/replica:0/task:0/device:GPU:0"
221 device: "/job:localhost/replica:0/task:0/device:GPU:0"
262 device: "/job:localhost/replica:0/task:0/device:GPU:0"
303 device: "/job:localhost/replica:0/task:0/device:GPU:0"
344 device: "/job:localhost/replica:0/task:0/device:GPU:0"
385 device: "/job:localhost/replica:0/task:0/device:GPU:0"
[all …]
Dopens2s_gnmt_mixed_precision.pbtxt.gz
/external/llvm/lib/Target/AMDGPU/
DAMDGPUSubtarget.cpp38 StringRef GPU, StringRef FS) { in initializeSubtargetDependencies() argument
53 ParseSubtargetFeatures(GPU, FullFS); in initializeSubtargetDependencies()
70 AMDGPUSubtarget::AMDGPUSubtarget(const Triple &TT, StringRef GPU, StringRef FS, in AMDGPUSubtarget() argument
72 : AMDGPUGenSubtargetInfo(TT, GPU, FS), in AMDGPUSubtarget()
119 InstrItins(getInstrItineraryForCPU(GPU)) { in AMDGPUSubtarget()
120 initializeSubtargetDependencies(TT, GPU, FS); in AMDGPUSubtarget()
181 R600Subtarget::R600Subtarget(const Triple &TT, StringRef GPU, StringRef FS, in R600Subtarget() argument
183 AMDGPUSubtarget(TT, GPU, FS, TM), in R600Subtarget()
188 SISubtarget::SISubtarget(const Triple &TT, StringRef GPU, StringRef FS, in SISubtarget() argument
190 AMDGPUSubtarget(TT, GPU, FS, TM), in SISubtarget()
/external/skia/site/docs/dev/gardening/
Dgpu.md3 title: "GPU Gardener Documentation"
4 linkTitle: "GPU Gardener Documentation"
11 * [What does a GPU Gardener do?](#what_is_a_gpu_gardener)
12 * [Tracking GPU Gardener Work](#tracking)
15 * [Tips for GPU Gardeners](#tips)
19 What does a GPU Gardener do?
22 The GPU Gardener has three main jobs:
24 1) Stay on top of incoming GPU-related bugs from clients in various bug trackers. This means triagi…
27 2) Improve the reliability of the GPU bots. This includes dealing with flaky images, crashing bots,…
33 The GPU Gardener should always prioritize dealing with incoming bugs. The balance of a gardener's t…
[all …]
/external/skia/src/gpu/vk/
DGrVkUtil.h24 #define GR_VK_CALL_RESULT(GPU, RESULT, X) \ argument
26 (RESULT) = GR_VK_CALL(GPU->vkInterface(), X); \
28 if (RESULT != VK_SUCCESS && !GPU->isDeviceLost()) { \
31 GPU->checkVkResult(RESULT); \
34 #define GR_VK_CALL_RESULT_NOCHECK(GPU, RESULT, X) \ argument
36 (RESULT) = GR_VK_CALL(GPU->vkInterface(), X); \
37 GPU->checkVkResult(RESULT); \
41 #define GR_VK_CALL_ERRCHECK(GPU, X) \ argument
43 GR_VK_CALL_RESULT(GPU, SK_MACRO_APPEND_LINE(ret), X) \
/external/tensorflow/tensorflow/lite/g3doc/performance/
Dgpu_advanced.md1 # TensorFlow Lite on GPU
4 hardware accelerators. This document describes how to use the GPU backend using
8 ## Benefits of GPU acceleration
17 on the GPU may run fast enough to become suitable for real-time applications
25 neural network on a GPU may eliminate this concern.
29 Another benefit that comes with GPU inference is its power efficiency. A GPU
35 TensorFlow Lite on GPU supports the following ops in 16-bit and 32-bit float
86 Then run TensorFlow Lite on GPU with `TfLiteDelegate`. In Java, you can specify
102 // if the device has a supported GPU, add the GPU delegate
106 // if the GPU is not supported, run on 4 threads
[all …]
Dgpu.md1 # TensorFlow Lite GPU delegate
4 accelerators. This document describes how to use the GPU backend using the
11 resulting in lower latency. In the best scenario, inference on the GPU may now
19 Another benefit with GPU inference is its power efficiency. GPUs carry out the
25 The easiest way to try out the GPU delegate is to follow the below tutorials,
26 which go through building our classification demo applications with GPU support.
27 The GPU code is only binary for now; it will be open-sourced soon. Once you
34 [GPU Delegate for Android](https://youtu.be/Xkhgre8r5G0) video.
44 #### Step 2. Edit `app/build.gradle` to use the nightly GPU AAR
60 the GPU. Change from quantized to a float model and then click GPU to run on the
[all …]
/external/angle/doc/
DGPUMemoryAnalysis.md1 # GPU Memory Reporting and Analysis
6 GPU memory usage data can be reported when using the Vulkan back-end with drivers that support the
8 based on every allocation, free, import, unimport, and failed allocation of GPU memory. This
12 ## GPU Memory Reporting
16 each of the following GPU memory events:
18 - Allocation of GPU memory by ANGLE
19 - Free of GPU memory by ANGLE
20 - Import of GPU memory provided by another process (e.g. Android SurfaceFlinger)
21 - Unimport of GPU memory provided by another process
55 Note: At this time, GPU memory reporting has only been tested and used on Android, where the logged
[all …]
/external/perfetto/docs/data-sources/
Dgpu.md1 # GPU chapter
5 ## GPU Frequency
7 GPU frequency can be included in the trace by adding the ftrace category.
20 ## GPU Counters
22 GPU counters can be configured by adding the data source to the trace config as follows:
/external/tensorflow/tensorflow/lite/delegates/gpu/
DREADME.md1 # TFLite on GPU
4 describes how to use the GPU backend using the TFLite delegate APIs on Android
11 resulting in lower latency. In the best scenario, inference on the GPU may now
19 net models on the GPU.
21 Another benefit that comes with GPU inference is its power efficiency. GPUs
26 TFLite on GPU supports the following ops in 16-bit and 32-bit float precision:
54 **Note:** Following section describes the example usage for Android GPU delegate
58 Using TFLite on GPU is as simple as getting the GPU delegate via
72 // NEW: Prepare GPU delegate.
96 TFLite GPU backend uses OpenGL ES 3.1 compute shaders or OpenCL.
[all …]
/external/tensorflow/tensorflow/python/eager/
Dbenchmarks_test.py70 GPU = "/device:GPU:0" variable
161 if device == GPU:
274 self._benchmark_create_tensor([[3.0]], dtypes.float32.as_datatype_enum, GPU)
281 GPU)
287 self._benchmark_create_tensor([[3]], dtypes.int32.as_datatype_enum, GPU)
294 np.array([[3]], dtype=np.int32), dtypes.int32.as_datatype_enum, GPU)
336 with context.device(GPU):
348 with context.device(GPU):
361 with context.device(GPU):
645 with context.device(GPU):
[all …]
/external/llvm-project/llvm/unittests/Frontend/
DOpenMPContextTest.cpp115 VariantMatchInfo GPU; in TEST_F() local
116 GPU.addTrait(TraitProperty::device_kind_gpu, ""); in TEST_F()
117 EXPECT_FALSE(isVariantApplicableInContext(GPU, HostLinux)); in TEST_F()
118 EXPECT_FALSE(isVariantApplicableInContext(GPU, DeviceLinux)); in TEST_F()
119 EXPECT_TRUE(isVariantApplicableInContext(GPU, HostNVPTX)); in TEST_F()
120 EXPECT_TRUE(isVariantApplicableInContext(GPU, DeviceNVPTX)); in TEST_F()
206 VariantMatchInfo GPU; in TEST_F() local
207 GPU.addTrait(TraitProperty::device_kind_gpu, ""); in TEST_F()
208 EXPECT_FALSE(isVariantApplicableInContext(GPU, HostLinuxParallelParallel)); in TEST_F()
209 EXPECT_FALSE(isVariantApplicableInContext(GPU, DeviceLinuxTargetParallel)); in TEST_F()
[all …]
/external/angle/src/feature_support_util/
Dfeature_support_util.cpp409 class GPU class
412 GPU(StringPart vendor, IntegerPart deviceId, Version version) in GPU() function in angle::GPU
418 GPU(std::string vendor, uint32_t deviceId, Version version) in GPU() function in angle::GPU
419 : GPU(StringPart(std::move(vendor)), IntegerPart(deviceId), std::move(version)) in GPU()
421 GPU() = default;
422 ~GPU() = default;
423 bool match(const GPU &toCheck) const in match()
433 static bool CreateGpuFromJson(const Json::Value &jObject, GPU *out) in CreateGpuFromJson()
445 *out = GPU{std::move(vendor), std::move(deviceId), std::move(version)}; in CreateGpuFromJson()
506 void addGPU(GPU &&gpu) { mGpuList.addItem(std::move(gpu)); } in addGPU()
[all …]

12345678910>>...35