Home
last modified time | relevance | path

Searched +full:- +full:- +full:batch +full:- +full:mode (Results 1 – 25 of 1039) sorted by relevance

12345678910>>...42

/external/mesa3d/src/gallium/drivers/crocus/
Dcrocus_draw.c48 prim_is_points_or_lines(enum mesa_prim mode) in prim_is_points_or_lines() argument
50 /* We don't need to worry about adjacency - it can only be used with in prim_is_points_or_lines()
53 return mode == MESA_PRIM_POINTS || in prim_is_points_or_lines()
54 mode == MESA_PRIM_LINES || in prim_is_points_or_lines()
55 mode == MESA_PRIM_LINE_LOOP || in prim_is_points_or_lines()
56 mode == MESA_PRIM_LINE_STRIP; in prim_is_points_or_lines()
63 switch (draw->index_size) { in can_cut_index_handle_restart_index()
65 return draw->restart_index == 0xff; in can_cut_index_handle_restart_index()
67 return draw->restart_index == 0xffff; in can_cut_index_handle_restart_index()
69 return draw->restart_index == 0xffffffff; in can_cut_index_handle_restart_index()
[all …]
/external/tensorflow/tensorflow/python/keras/
Dcallbacks.py7 # http://www.apache.org/licenses/LICENSE-2.0
15 # pylint: disable=g-import-not-at-top
16 # pylint: disable=g-classes-have-attributes
83 mode=ModeKeys.TRAIN): argument
90 batch_size: Number of samples per batch.
95 count_mode: One of 'steps' or 'samples'. Per-batch or per-sample count.
96 mode: String. One of ModeKeys.TRAIN, ModeKeys.TEST, or ModeKeys.PREDICT.
97 Which loop mode to configure callbacks for.
110 if mode == ModeKeys.TRAIN:
118 callback_model = model._get_callback_model() # pylint: disable=protected-access
[all …]
/external/mesa3d/src/gallium/drivers/zink/
Dzink_draw.c19 struct zink_batch *batch, in allocate_descriptor_set() argument
22 assert(batch->descs_left >= prog->num_descriptors); in allocate_descriptor_set()
27 dsai.descriptorPool = batch->descpool; in allocate_descriptor_set()
29 dsai.pSetLayouts = &prog->dsl; in allocate_descriptor_set()
32 if (vkAllocateDescriptorSets(screen->dev, &dsai, &desc_set) != VK_SUCCESS) { in allocate_descriptor_set()
37 batch->descs_left -= prog->num_descriptors; in allocate_descriptor_set()
50 * - from VK_EXT_transform_feedback spec in zink_emit_xfb_counter_barrier()
55 for (unsigned i = 0; i < ctx->num_so_targets; i++) { in zink_emit_xfb_counter_barrier()
56 struct zink_so_target *t = zink_so_target(ctx->so_targets[i]); in zink_emit_xfb_counter_barrier()
57 if (t->counter_buffer_valid) { in zink_emit_xfb_counter_barrier()
[all …]
/external/tensorflow/tensorflow/core/kernels/image/
Dimage_ops.h7 http://www.apache.org/licenses/LICENSE-2.0
33 enum Mode { FILL_REFLECT, FILL_WRAP, FILL_CONSTANT, FILL_NEAREST }; enum
40 template <typename Device, Mode M>
46 struct MapCoordinate<Device, Mode::FILL_REFLECT> {
57 in_coord = sz2 * static_cast<DenseIndex>(-in_coord / sz2) + in_coord;
59 in_coord = (in_coord < -len) ? in_coord + sz2 : -in_coord - 1;
61 } else if (in_coord > len - 1) {
66 in_coord -= sz2 * static_cast<DenseIndex>(in_coord / sz2);
68 in_coord = sz2 - in_coord - 1;
74 return Eigen::internal::scalar_clamp_op<float>(0.0f, len - 1)(in_coord);
[all …]
/external/tensorflow/tensorflow/python/distribute/
Dcustom_training_loop_input_test.py7 # http://www.apache.org/licenses/LICENSE-2.0
80 mode=["eager"]
101 mode=["eager"]
123 mode=["eager"]))
125 dataset = get_dataset_from_tensor_slices([5., 6., 7., 8.]).batch(2)
140 distribution=strategy_combinations.all_strategies, mode=["eager"]))
143 dataset = get_dataset_from_tensor_slices(data).batch(2)
160 distribution=strategy_combinations.all_strategies, mode=["eager"]))
165 8, output_type=dtypes.int32).batch(global_batch_size)
185 distribution=strategy_combinations.tpu_strategies, mode=["eager"]))
[all …]
Dinput_lib_test.py7 # http://www.apache.org/licenses/LICENSE-2.0
61 # The passed input_context is to create a sharded dataset in between-graph
73 # MultiWorkerMirroredStrategy. It doesn't apply to in-graph case where
85 # Note: `input_workers.num_workers` is always 1 in between-graph
227 # After re-initializing the iterator, should be able to iterate again.
258 # re-initializing the iterator
286 mode=["eager"],
312 mode=["graph", "eager"],
338 mode=["eager"],
361 mode=["graph", "eager"],
[all …]
/external/google-cloud-java/java-video-intelligence/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/
DObjectTrackingAnnotationOrBuilder.java8 * https://www.apache.org/licenses/LICENSE-2.0
30 * Non-streaming batch mode ONLY.
43 * Non-streaming batch mode ONLY.
56 * Non-streaming batch mode ONLY.
68 * Streaming mode ONLY.
69 * In streaming mode, we do not know the end time of a tracked object
85 * Streaming mode ONLY.
86 * In streaming mode, we do not know the end time of a tracked object
152 * Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
154 * Streaming mode: it can only be one ObjectTrackingFrame message in frames.
[all …]
DObjectTrackingAnnotation.java8 * https://www.apache.org/licenses/LICENSE-2.0
123 * Non-streaming batch mode ONLY.
139 * Non-streaming batch mode ONLY.
158 * Non-streaming batch mode ONLY.
177 * Streaming mode ONLY.
178 * In streaming mode, we do not know the end time of a tracked object
197 * Streaming mode ONLY.
198 * In streaming mode, we do not know the end time of a tracked object
294 * Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
296 * Streaming mode: it can only be one ObjectTrackingFrame message in frames.
[all …]
/external/google-cloud-java/java-video-intelligence/proto-google-cloud-video-intelligence-v1p3beta1/src/main/java/com/google/cloud/videointelligence/v1p3beta1/
DObjectTrackingAnnotationOrBuilder.java8 * https://www.apache.org/licenses/LICENSE-2.0
30 * Non-streaming batch mode ONLY.
43 * Non-streaming batch mode ONLY.
56 * Non-streaming batch mode ONLY.
68 * Streaming mode ONLY.
69 * In streaming mode, we do not know the end time of a tracked object
85 * Streaming mode ONLY.
86 * In streaming mode, we do not know the end time of a tracked object
152 * Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
154 * Streaming mode: it can only be one ObjectTrackingFrame message in frames.
[all …]
DObjectTrackingAnnotation.java8 * https://www.apache.org/licenses/LICENSE-2.0
122 * Non-streaming batch mode ONLY.
138 * Non-streaming batch mode ONLY.
157 * Non-streaming batch mode ONLY.
176 * Streaming mode ONLY.
177 * In streaming mode, we do not know the end time of a tracked object
196 * Streaming mode ONLY.
197 * In streaming mode, we do not know the end time of a tracked object
293 * Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
295 * Streaming mode: it can only be one ObjectTrackingFrame message in frames.
[all …]
/external/tensorflow/tensorflow/python/keras/distribute/
Ddistributed_training_utils_v1.py7 # http://www.apache.org/licenses/LICENSE-2.0
16 # pylint:disable=protected-access
136 def unwrap_output_dict(strategy, grouped_outputs, mode): argument
138 if mode == ModeKeys.PREDICT:
156 # We only do this in eager mode for now since this function is used in
157 # both graph and eager mode and in the graph case we currently don't use
204 # We only do this in eager mode for now since this function is used in
205 # both graph and eager mode and in the graph case we currently don't use
229 # pylint: disable=g-complex-comprehension
377 all_variables = backend._get_variables(backend.get_graph()) # pylint: disable=protected-access
[all …]
/external/mesa3d/src/intel/vulkan/
DgenX_init_state.c43 struct anv_batch *batch) in genX()
47 for (unsigned i = 2; i < ARRAY_SIZE(device->info->ppipe_subslices); i++) in genX()
48 assert(device->info->ppipe_subslices[i] == 0); in genX()
50 if (device->info->ppipe_subslices[0] == device->info->ppipe_subslices[1]) in genX()
53 if (!device->slice_hash.alloc_size) { in genX()
55 device->slice_hash = in genX()
56 anv_state_pool_alloc(&device->dynamic_state_pool, size, 64); in genX()
58 const bool flip = device->info->ppipe_subslices[0] < in genX()
59 device->info->ppipe_subslices[1]; in genX()
63 GENX(SLICE_HASH_TABLE_pack)(NULL, device->slice_hash.map, &table); in genX()
[all …]
/external/mesa3d/src/gallium/drivers/iris/
Diris_draw.c46 /* We don't need to worry about adjacency - it can only be used with in prim_is_points_or_lines()
49 return draw->mode == MESA_PRIM_POINTS || in prim_is_points_or_lines()
50 draw->mode == MESA_PRIM_LINES || in prim_is_points_or_lines()
51 draw->mode == MESA_PRIM_LINE_LOOP || in prim_is_points_or_lines()
52 draw->mode == MESA_PRIM_LINE_STRIP; in prim_is_points_or_lines()
56 * Record the current primitive mode and restart information, flagging
66 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen; in iris_update_draw_info()
67 const struct intel_device_info *devinfo = screen->devinfo; in iris_update_draw_info()
69 if (ice->state.prim_mode != info->mode) { in iris_update_draw_info()
70 ice->state.prim_mode = info->mode; in iris_update_draw_info()
[all …]
/external/tensorflow/tensorflow/python/compiler/tensorrt/test/
Dtrt_mode_test.py7 # http://www.apache.org/licenses/LICENSE-2.0
15 """Model script to test TF-TensorRT integration."""
17 from unittest import SkipTest # pylint: disable=g-importing-member
27 """Test squeeze on batch dim and some unary operations in TF-TRT."""
43 "non-calibration")
50 In explicit batch mode, TensorRT can convert the whole graph. In this mode
51 it is possible to manipulate the batch dimension using the squeeze op.
53 In implicit batch mode TensorRT cannot convert the whole graph. We are not
54 allowed to manipulate (squeeze) the first dimension in implicit batch mode.
64 # The first dimension of the input is squeezed and the batch size for the
[all …]
/external/cronet/net/third_party/quiche/src/quiche/quic/core/
Dquic_packet_writer.h2 // Use of this source code is governed by a BSD-style license that can be
28 // Returns a heap-allocated copy of |this|.
52 // - PassThrough mode. This is the default mode. Caller calls WritePacket with
53 // caller-allocated packet buffer. Unless the writer is blocked, each call to
56 // - Batch mode. In this mode, a call to WritePacket may not cause a packet to
63 // In Batch mode, a writer manages an internal buffer, which is large enough to
65 // caller-allocated packet buffer, the writer will memcpy the buffer into the
74 // PassThrough mode:
75 // Sends the packet out to the peer, with some optional per-packet options.
80 // Batch mode:
[all …]
/external/tensorflow/tensorflow/python/keras/engine/
Dtraining_arrays_v1.py7 # http://www.apache.org/licenses/LICENSE-2.0
16 # pylint: disable=protected-access
37 from scipy.sparse import issparse # pylint: disable=g-import-not-at-top
58 mode=ModeKeys.TRAIN,
70 batch_size: Integer batch size or None if unknown.
72 verbose: 0, 1, or 2. Verbosity mode.
99 mode: One of ModeKeys.TRAIN/ModeKeys.TEST/ModeKeys.PREDICT.
114 - In TRAIN mode: `History` object.
115 - In TEST mode: Evaluation metrics.
116 - In PREDICT mode: Outputs of the Model called on inputs.
[all …]
/external/mesa3d/src/gallium/drivers/asahi/
Dagx_state.c3 * Copyright 2019-2020 Collabora, Ltd.
4 * Copyright 2014-2017 Broadcom
6 * SPDX-License-Identifier: MIT
73 if (rsrc->layout.tiling != AIL_TILING_TWIDDLED_COMPRESSED) in agx_legalize_compression()
77 enum pipe_format storage = rsrc->layout.format; in agx_legalize_compression()
86 * cases around XR formats, but is well-motivated and seems to work. in agx_legalize_compression()
102 ctx->stage[shader].dirty |= AGX_STAGE_DIRTY_IMAGE; in agx_set_shader_images()
108 pipe_resource_reference(&ctx->stage[shader].images[i].resource, NULL); in agx_set_shader_images()
111 ctx->stage[shader].image_mask &= in agx_set_shader_images()
119 * Driver-internal images are used by the compute blitter and are exempt in agx_set_shader_images()
[all …]
/external/igt-gpu-tools/tests/i915/
Dgem_streaming_writes.c24 * Chris Wilson <chris@chris-wilson.co.uk>
55 #define BATCH 2 macro
62 static void test_streaming(int fd, int mode, int sync) in test_streaming() argument
75 } *batch; in test_streaming() local
82 switch (mode) { in test_streaming()
137 exec[BATCH].relocation_count = 2; in test_streaming()
143 batch = malloc(sizeof(*batch) * (OBJECT_SIZE / CHUNK_SIZE / 64)); in test_streaming()
147 batch[i].handle = gem_create(fd, 4096); in test_streaming()
148 batch[i].offset = 0; in test_streaming()
150 base = gem_mmap__cpu(fd, batch[i].handle, 0, 4096, PROT_WRITE); in test_streaming()
[all …]
Dgem_mocs_settings.c35 #define GEN9_NUM_MOCS_ENTRIES 62 /* 62 out of 64 - 63 & 64 are reserved. */
36 #define GEN11_NUM_MOCS_ENTRIES 64 /* 63-64 are reserved, but configured. */
147 [3 ... GEN9_NUM_MOCS_ENTRIES - 1] = MOCS_PTE,
151 [0 ... GEN9_NUM_MOCS_ENTRIES - 1] = { 0x00003FFF, 0x003F, 0x1 },
158 [3 ... GEN9_NUM_MOCS_ENTRIES - 1] = MOCS_PTE,
162 [0 ... GEN9_NUM_MOCS_ENTRIES - 1] = { 0x00007FFF, 0x003F, 0x1 },
166 [0 ... GEN9_NUM_MOCS_ENTRIES - 1] = 0xFFFFFFFF,
181 table->size = ARRAY_SIZE(dirty_skylake_mocs_table); in get_mocs_settings()
182 table->table = dirty_skylake_mocs_table; in get_mocs_settings()
184 table->size = ARRAY_SIZE(skylake_mocs_table); in get_mocs_settings()
[all …]
/external/mesa3d/src/gallium/drivers/freedreno/a6xx/
Dfd6_vsc.cc35 * https://gitlab.freedesktop.org/freedreno/freedreno/-/wikis/Visibility-Stream-Format
45 * https://gitlab.freedesktop.org/freedreno/freedreno/-/wikis/Visibility-Stream-Format#numbers
52 return n + (n - 1); in number_size_bits()
57 * https://gitlab.freedesktop.org/freedreno/freedreno/-/wikis/Visibility-Stream-Format#bitfields
71 (info->mode == MESA_PRIM_COUNT) ? 2 : mesa_vertices_per_prim(info->mode); in prim_count()
72 return MAX2(1, (draw->count * info->instance_count) / vtx_per_prim); in prim_count()
76 * The primitive stream uses a run-length encoding, where each packet contains a
80 * - The (compressed) bitfield of bins covered
81 * - The number of primitives with this bitset
82 * - Checksum
[all …]
/external/mesa3d/src/gallium/drivers/panfrost/
Dpan_cmdstream.h104 return info->primitive_restart && in panfrost_is_implicit_prim_restart()
105 info->restart_index == (unsigned)BITFIELD_MASK(info->index_size * 8); in panfrost_is_implicit_prim_restart()
115 unsigned rt_mask = ctx->fb_rt_mask; in pan_allow_forward_pixel_to_kill()
116 uint64_t rt_written = (fs->info.outputs_written >> FRAG_RESULT_DATA0) & in pan_allow_forward_pixel_to_kill()
117 ctx->blend->enabled_mask; in pan_allow_forward_pixel_to_kill()
118 bool blend_reads_dest = (ctx->blend->load_dest_mask & rt_mask); in pan_allow_forward_pixel_to_kill()
119 bool alpha_to_coverage = ctx->blend->base.alpha_to_coverage; in pan_allow_forward_pixel_to_kill()
121 return fs->info.fs.can_fpk && !(rt_mask & ~rt_written) && in pan_allow_forward_pixel_to_kill()
137 const struct panfrost_blend_state *so = ctx->blend; in panfrost_overdraw_alpha()
139 for (unsigned i = 0; i < ctx->pipe_framebuffer.nr_cbufs; ++i) { in panfrost_overdraw_alpha()
[all …]
/external/angle/src/tests/test_utils/runner/
DREADME.md9 * multi-process execution
11 ## Command-Line Arguments
14 following additional command-line arguments:
16 * `--batch-size` limits the number of tests to run in each batch
17 * `--batch-timeout` limits the amount of time spent in each batch
18 * `--bot-mode` enables multi-process execution and test batching
19 * `--debug-test-groups` dumps the test config categories when using `bot-mode`
20 * `--filter-file` allows passing a larger `gtest_filter` via a file
21 * `--histogram-json-file` outputs a [formatted JSON file][HistogramSet] for perf dashboards
22 * `--max-processes` limits the number of simuntaneous processes
[all …]
/external/google-cloud-java/java-batch/proto-google-cloud-batch-v1alpha/src/main/java/com/google/cloud/batch/v1alpha/
DVolumeOrBuilder.java8 * https://www.apache.org/licenses/LICENSE-2.0
17 // source: google/cloud/batch/v1alpha/volume.proto
19 package com.google.cloud.batch.v1alpha;
23 // @@protoc_insertion_point(interface_extends:google.cloud.batch.v1alpha.Volume)
34 * <code>.google.cloud.batch.v1alpha.NFS nfs = 1;</code>
47 * <code>.google.cloud.batch.v1alpha.NFS nfs = 1;</code>
51 com.google.cloud.batch.v1alpha.NFS getNfs(); in getNfs()
60 * <code>.google.cloud.batch.v1alpha.NFS nfs = 1;</code>
62 com.google.cloud.batch.v1alpha.NFSOrBuilder getNfsOrBuilder(); in getNfsOrBuilder()
71 * <code>.google.cloud.batch.v1alpha.PD pd = 2 [deprecated = true];</code>
[all …]
/external/tensorflow/tensorflow/python/ops/
Dbatch_norm_benchmark.py7 # http://www.apache.org/licenses/LICENSE-2.0
15 """End-to-end benchmark for batch normalization."""
32 import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
37 """Fused kernel for batch normalization."""
40 # pylint: disable=protected-access
43 # pylint: enable=protected-access
47 # batch_norm = (tensor - mean) * tf.math.rsqrt(variance + 0.001)
52 """Python implementation of batch normalization."""
58 batch_norm = (tensor - mean) * math_ops.rsqrt(variance + 0.001)
64 def build_graph(device, input_shape, axes, num_layers, mode, scale, train): argument
[all …]
/external/mesa3d/src/gallium/drivers/freedreno/a5xx/
Dfd5_emit.h42 /* grouped together emit-state for prog/vertex/state emit: */
61 * figure that out up-front and stash it in the emit.
77 return fd5_pipe2color(surf->format); in fd5_emit_format()
83 if (!emit->vs) { in fd5_emit_get_vp()
87 if (emit->binning_pass && in fd5_emit_get_vp()
88 !emit->prog->vs->stream_output.num_outputs) in fd5_emit_get_vp()
89 emit->vs = emit->prog->bs; in fd5_emit_get_vp()
91 emit->vs = emit->prog->vs; in fd5_emit_get_vp()
93 return emit->vs; in fd5_emit_get_vp()
99 if (!emit->fs) { in fd5_emit_get_fp()
[all …]

12345678910>>...42