/external/autotest/frontend/tko/ |
D | rpc_interface.py | 3 from django.db import models as dbmodels 6 from autotest_lib.frontend.afe import models as afe_models, readonly_connection 7 from autotest_lib.frontend.tko import models, tko_rpc_utils 15 models.TestView.list_objects(filter_data)) 19 return models.TestView.query_count(filter_data) 47 query = models.TestView.objects.get_query_set_with_joins(filter_data) 49 query = models.TestView.query_objects(filter_data, initial_query=query, 51 count_alias, count_sql = models.TestView.objects.get_count_sql(query) 55 query = models.TestView.apply_presentation(query, filter_data) 68 query = models.TestView.objects.get_query_set_with_joins(filter_data) [all …]
|
D | models_test.py | 9 from autotest_lib.frontend.tko import models 16 self.machine1 = models.Machine.objects.create(hostname='myhost') 17 self.good_status = models.Status.objects.create(word='GOOD') 19 self.kernel1 = models.Kernel.objects.create(kernel_hash=kernel_name, 22 self.job1 = models.Job.objects.create( 26 self.job1_test1 = models.Test.objects.create( 38 models.Test.objects.populate_relationships( 39 [test], models.TestAttribute, 'attribute_list')
|
/external/XNNPACK/bench/ |
D | f32-gemm-e2e.cc | 26 models::ExecutionPlanFactory model_factory, in GEMMEnd2EndBenchmark() 76 …static void f32_gemm_4x12__aarch64_neonfma_cortex_a53(benchmark::State& state, models::ExecutionPl… in f32_gemm_4x12__aarch64_neonfma_cortex_a53() 85 …static void f32_gemm_4x8__aarch64_neonfma_cortex_a53(benchmark::State& state, models::ExecutionPla… in f32_gemm_4x8__aarch64_neonfma_cortex_a53() 94 …static void f32_gemm_4x8__aarch64_neonfma_cortex_a55(benchmark::State& state, models::ExecutionPla… in f32_gemm_4x8__aarch64_neonfma_cortex_a55() 103 …static void f32_gemm_4x8__aarch64_neonfma_cortex_a57(benchmark::State& state, models::ExecutionPla… in f32_gemm_4x8__aarch64_neonfma_cortex_a57() 112 …static void f32_gemm_4x8__aarch64_neonfma_cortex_a75(benchmark::State& state, models::ExecutionPla… in f32_gemm_4x8__aarch64_neonfma_cortex_a75() 121 …static void f32_gemm_4x8__aarch64_neonfma_ld64(benchmark::State& state, models::ExecutionPlanFacto… in f32_gemm_4x8__aarch64_neonfma_ld64() 130 …static void f32_gemm_4x8__aarch64_neonfma_ld128(benchmark::State& state, models::ExecutionPlanFact… in f32_gemm_4x8__aarch64_neonfma_ld128() 139 …static void f32_gemm_5x8__aarch64_neonfma_cortex_a57(benchmark::State& state, models::ExecutionPla… in f32_gemm_5x8__aarch64_neonfma_cortex_a57() 148 …static void f32_gemm_5x8__aarch64_neonfma_cortex_a75(benchmark::State& state, models::ExecutionPla… in f32_gemm_5x8__aarch64_neonfma_cortex_a75() [all …]
|
D | qs8-gemm-e2e.cc | 28 models::ExecutionPlanFactory model_factory, in GEMMEnd2EndBenchmark() 79 …static void qs8_gemm_minmax_ukernel_1x16c4__aarch64_neondot_ld32(benchmark::State& state, models::… in qs8_gemm_minmax_ukernel_1x16c4__aarch64_neondot_ld32() 88 …static void qs8_gemm_minmax_ukernel_1x16c4__aarch64_neondot_ld64(benchmark::State& state, models::… in qs8_gemm_minmax_ukernel_1x16c4__aarch64_neondot_ld64() 99 …nmax_ukernel_4x16c4__aarch64_neondot_cortex_a55(benchmark::State& state, models::ExecutionPlanFact… in qs8_gemm_minmax_ukernel_4x16c4__aarch64_neondot_cortex_a55() 108 …static void qs8_gemm_minmax_ukernel_4x16c4__aarch64_neondot_ld32(benchmark::State& state, models::… in qs8_gemm_minmax_ukernel_4x16c4__aarch64_neondot_ld32() 117 …static void qs8_gemm_minmax_ukernel_4x16c4__aarch64_neondot_ld64(benchmark::State& state, models::… in qs8_gemm_minmax_ukernel_4x16c4__aarch64_neondot_ld64() 127 …m_minmax_ukernel_2x8c8__aarch64_neon_mull_padal(benchmark::State& state, models::ExecutionPlanFact… in qs8_gemm_minmax_ukernel_2x8c8__aarch64_neon_mull_padal() 136 …m_minmax_ukernel_2x8c8__aarch64_neon_mlal_padal(benchmark::State& state, models::ExecutionPlanFact… in qs8_gemm_minmax_ukernel_2x8c8__aarch64_neon_mlal_padal() 145 …_minmax_ukernel_2x8c16__aarch64_neon_mlal_padal(benchmark::State& state, models::ExecutionPlanFact… in qs8_gemm_minmax_ukernel_2x8c16__aarch64_neon_mlal_padal() 169 …static void qs8_gemm_minmax_ukernel_1x8__neon_mlal_lane(benchmark::State& state, models::Execution… in BENCHMARK_QS8_END2END() [all …]
|
D | f32-dwconv-e2e.cc | 25 models::ExecutionPlanFactory model_factory, in DWConvEnd2EndBenchmark() 74 …static void f32_dwconv_up4x9__aarch64_neonfma(benchmark::State& state, models::ExecutionPlanFactor… in f32_dwconv_up4x9__aarch64_neonfma() 80 …static void f32_dwconv_up4x9__aarch64_neonfma_cortex_a55(benchmark::State& state, models::Executio… in f32_dwconv_up4x9__aarch64_neonfma_cortex_a55() 91 static void f32_dwconv_up4x9__neon(benchmark::State& state, models::ExecutionPlanFactory model) { in f32_dwconv_up4x9__neon() 97 …static void f32_dwconv_up4x9__neon_acc2(benchmark::State& state, models::ExecutionPlanFactory mode… in f32_dwconv_up4x9__neon_acc2() 103 static void f32_dwconv_up8x9__neon(benchmark::State& state, models::ExecutionPlanFactory model) { in f32_dwconv_up8x9__neon() 109 …static void f32_dwconv_up8x9__neon_acc2(benchmark::State& state, models::ExecutionPlanFactory mode… in f32_dwconv_up8x9__neon_acc2() 115 …static void f32_dwconv_up4x9__neonfma(benchmark::State& state, models::ExecutionPlanFactory model)… in f32_dwconv_up4x9__neonfma() 121 …static void f32_dwconv_up4x9__neonfma_acc2(benchmark::State& state, models::ExecutionPlanFactory m… in f32_dwconv_up4x9__neonfma_acc2() 127 …static void f32_dwconv_up8x9__neonfma(benchmark::State& state, models::ExecutionPlanFactory model)… in f32_dwconv_up8x9__neonfma() [all …]
|
D | end2end.cc | 22 models::ExecutionPlanFactory model_factory) in End2EndBenchmark() 56 End2EndBenchmark(state, models::FP32MobileNetV1); in FP32MobileNetV1() 60 End2EndBenchmark(state, models::FP32MobileNetV2); in FP32MobileNetV2() 64 End2EndBenchmark(state, models::FP32MobileNetV3Large); in FP32MobileNetV3Large() 68 End2EndBenchmark(state, models::FP32MobileNetV3Small); in FP32MobileNetV3Small() 73 return models::FP32SparseMobileNetV1(0.8f, threadpool); in FP32Sparse80MobileNetV1() 79 return models::FP32SparseMobileNetV2(0.8f, threadpool); in FP32Sparse80MobileNetV2() 85 return models::FP32SparseMobileNetV3Large(0.8f, threadpool); in FP32Sparse80MobileNetV3Large() 91 return models::FP32SparseMobileNetV3Small(0.8f, threadpool); in FP32Sparse80MobileNetV3Small() 96 End2EndBenchmark(state, models::FP16MobileNetV1); in FP16MobileNetV1() [all …]
|
D | end2end.h | 13 …BENCHMARK_CAPTURE(benchmark_fn, mobilenet_v1, models::FP32MobileNetV1)->Unit(benchmark::kMicroseco… 14 …BENCHMARK_CAPTURE(benchmark_fn, mobilenet_v2, models::FP32MobileNetV2)->Unit(benchmark::kMicroseco… 15 …BENCHMARK_CAPTURE(benchmark_fn, mobilenet_v3_large, models::FP32MobileNetV3Large)->Unit(benchmark:… 16 …BENCHMARK_CAPTURE(benchmark_fn, mobilenet_v3_small, models::FP32MobileNetV3Small)->Unit(benchmark:… 19 …BENCHMARK_CAPTURE(benchmark_fn, mobilenet_v1, models::QS8MobileNetV1)->Unit(benchmark::kMicrosecon… 20 …BENCHMARK_CAPTURE(benchmark_fn, mobilenet_v2, models::QS8MobileNetV2)->Unit(benchmark::kMicrosecon…
|
/external/autotest/frontend/afe/ |
D | rpc_interface.py | 45 from django.db.models import Count 57 from autotest_lib.frontend.afe import models 59 from autotest_lib.frontend.tko import models as tko_models 104 label_model = models.Label.smart_get(id) 124 label_model = models.Label.smart_get(id) 135 hosts.append(models.Host.smart_get(h.id)) 159 label = models.Label.add_object(name=name, **kwargs) 181 label = models.Label.smart_get(id) 183 label = models.StaticLabel.smart_get(label.name) 185 host_objs = models.Host.smart_get_bulk(hosts) [all …]
|
D | models_test.py | 10 from autotest_lib.frontend.afe import models, model_logic 31 everyone_acl = models.AclGroup.objects.get(name='Everyone') 38 models.AclGroup.on_host_membership_change() 55 models.Host.objects.populate_relationships( 56 [host], models.HostAttribute, 'attribute_list') 61 previous_config = models.RESPECT_STATIC_ATTRIBUTES 62 models.RESPECT_STATIC_ATTRIBUTES = False 63 host1 = models.Host.objects.create(hostname='test_host1') 73 models.RESPECT_STATIC_ATTRIBUTES = previous_config 77 previous_config = models.RESPECT_STATIC_ATTRIBUTES [all …]
|
D | rpc_interface_unittest.py | 18 from autotest_lib.frontend.afe import models 33 _hqe_status = models.HostQueueEntry.Status 96 return models.Job.objects.get(id=job_id) 101 label2 = models.Label.objects.create(name='bluetooth', platform=False) 111 host2 = models.Host.objects.create(hostname='test_host2', leased=False) 121 host2 = models.Host.objects.create(hostname='test_host2', leased=False) 126 self.mox.StubOutWithMock(models.Host, '_assign_to_shard_nothing_helper') 130 models.Host._assign_to_shard_nothing_helper().WithSideEffects( 136 host2 = models.Host.smart_get(host2.id) 144 leased_host = models.Host.objects.create(hostname='leased_host', [all …]
|
D | frontend_test_utils.py | 6 from autotest_lib.frontend.afe import models, model_attributes 14 if models.DroneSet.drone_sets_enabled(): 15 models.DroneSet.objects.create( 16 name=models.DroneSet.default_drone_set_name()) 18 acl_group = models.AclGroup.objects.create(name='my_acl') 19 acl_group.users.add(models.User.current_user()) 21 self.hosts = [models.Host.objects.create(hostname=hostname) 27 models.AclGroup.smart_get('Everyone').hosts = [] 29 self.labels = [models.Label.objects.create(name=name) for name in 33 platform = models.Label.objects.create(name='myplatform', platform=True) [all …]
|
D | shard_heartbeat_unittest.py | 14 from autotest_lib.frontend.afe import models 34 assigned = models.Job.assign_to_shard(shard, []) 45 assigned = models.Job.assign_to_shard(shard, []) 54 assigned_jobs = models.Job.assign_to_shard(shard, [known_job.id]) 65 assigned = models.Job.assign_to_shard(shard, []) 75 assigned = models.Job.assign_to_shard(shard, []) 81 old = models.Job.SKIP_JOBS_CREATED_BEFORE 83 models.Job.SKIP_JOBS_CREATED_BEFORE = value 86 models.Job.SKIP_JOBS_CREATED_BEFORE = old 96 job = models.Job.objects.create( [all …]
|
D | rpc_utils.py | 20 from autotest_lib.frontend.afe import models, model_logic 131 % models.HostQueueEntry.Status.QUEUED) 191 initial_query = models.Host.valid_objects.all() 193 initial_query = models.Host.objects.all() 196 hosts = models.Host.get_hosts_with_labels( 201 return models.Host.query_objects(filter_data, initial_query=hosts) 202 except models.Label.DoesNotExist: 203 return models.Host.objects.none() 256 test_objects = [models.Test.smart_get(test) for test in tests] 260 profiler_objects = [models.Profiler.smart_get(profiler) [all …]
|
/external/tensorflow/tensorflow/lite/g3doc/guide/ |
D | hosted_models.md | 1 # Hosted models 3 The following is an incomplete list of pre-trained models optimized to work with 6 To get started choosing a model, visit <a href="../models">Models</a> page with 13 models to find the optimal balance between size, performance, and accuracy. 18 <a href="../models/image_classification/overview.md">Image classification</a>. 20 [how to integrate image classification models](../inference_with_metadata/task_library/image_classi… 23 ### Quantized models 26 classification models offer the smallest model size and fastest performance, at 31 [quantized models](https://tfhub.dev/s?deployment-format=lite&module-type=image-classification&q=qu… 36 …pdf), [tflite&pb](https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_… [all …]
|
D | get_started.md | 4 models on mobile, embedded, and IoT devices. The following guide walks through 15 ways to obtain a TensorFlow model, from using pre-trained models to training 24 all models can be converted. For details, read about the 30 The TensorFlow Lite team provides a set of pre-trained models that solve a 31 variety of machine learning problems. These models have been converted to work 34 The pre-trained models include: 36 * [Image classification](../models/image_classification/overview.md) 37 * [Object detection](../models/object_detection/overview.md) 38 * [Smart reply](../models/smart_reply/overview.md) 39 * [Pose estimation](../models/pose_estimation/overview.md) [all …]
|
/external/tensorflow/tensorflow/python/keras/ |
D | keras_parameterized_test.py | 37 models = [] 47 models.append(testing_utils.get_small_mlp(1, 4, input_dim=3)) 62 self.assertTrue(models[0]._is_graph_network) 63 self.assertFalse(models[1]._is_graph_network) 64 self.assertNotIsInstance(models[0], keras.models.Sequential) 65 self.assertNotIsInstance(models[1], keras.models.Sequential) 66 self.assertIsInstance(models[2], keras.models.Sequential) 76 models = [] 90 models.append(testing_utils.get_small_mlp(1, 4, input_dim=3)) 111 self.assertTrue(models[0]._is_graph_network) [all …]
|
D | models_test.py | 34 from tensorflow.python.keras import models 106 keras.models._clone_sequential_model, layer_fn=models.share_weights) 108 clone_fn = keras.models.clone_model 111 model = models.Sequential(_get_layers(input_shape, add_input_layer)) 162 keras.models._clone_functional_model, layer_fn=models.share_weights) 164 clone_fn = keras.models.clone_model 181 model = keras.models.Model([input_a, input_b], outputs) 196 new_model = keras.models.clone_model( 228 keras.models._clone_functional_model, layer_fn=models.share_weights) 230 clone_fn = keras.models.clone_model [all …]
|
/external/tensorflow/tensorflow/lite/delegates/flex/ |
D | build_def.bzl | 27 models, 34 models: TFLite models to interpret. 45 if type(models) != type([]): 46 models = [models] 48 # List all flex ops from models. 50 ["$(location %s)" % f for f in models], 65 srcs = models, 68 message = "Listing flex ops from %s..." % ",".join(models), 91 models = [], 95 """A rule to generate a flex delegate with only ops to run listed models. [all …]
|
/external/tensorflow/tensorflow/ |
D | workspace0.bzl | 14 build_file = "//:models.BUILD", 17 "https://storage.googleapis.com/download.tensorflow.org/models/inception_v1.zip", 23 build_file = "//:models.BUILD", 26 …"https://storage.googleapis.com/download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_a… 32 build_file = "//:models.BUILD", 35 "https://storage.googleapis.com/download.tensorflow.org/models/mobile_multibox_v1a.zip", 41 build_file = "//:models.BUILD", 44 "https://storage.googleapis.com/download.tensorflow.org/models/stylize_v1.zip", 50 build_file = "//:models.BUILD", 53 … "https://storage.googleapis.com/download.tensorflow.org/models/speech_commands_v0.01.zip",
|
/external/swiftshader/third_party/SPIRV-Tools/source/val/ |
D | validate_mode_setting.cpp | 290 const auto* models = _.GetExecutionModels(entry_point_id); in ValidateExecutionMode() local 299 if (!std::all_of(models->begin(), models->end(), in ValidateExecutionMode() 309 if (!std::all_of(models->begin(), models->end(), in ValidateExecutionMode() 341 models->begin(), models->end(), in ValidateExecutionMode() 352 if (!std::all_of(models->begin(), models->end(), in ValidateExecutionMode() 369 if (!std::all_of(models->begin(), models->end(), in ValidateExecutionMode() 407 if (!std::all_of(models->begin(), models->end(), in ValidateExecutionMode() 420 if (!std::all_of(models->begin(), models->end(), in ValidateExecutionMode() 431 if (!std::all_of(models->begin(), models->end(), in ValidateExecutionMode()
|
/external/deqp-deps/SPIRV-Tools/source/val/ |
D | validate_mode_setting.cpp | 291 const auto* models = _.GetExecutionModels(entry_point_id); in ValidateExecutionMode() local 300 if (!std::all_of(models->begin(), models->end(), in ValidateExecutionMode() 310 if (!std::all_of(models->begin(), models->end(), in ValidateExecutionMode() 342 models->begin(), models->end(), in ValidateExecutionMode() 353 if (!std::all_of(models->begin(), models->end(), in ValidateExecutionMode() 370 if (!std::all_of(models->begin(), models->end(), in ValidateExecutionMode() 408 if (!std::all_of(models->begin(), models->end(), in ValidateExecutionMode() 421 if (!std::all_of(models->begin(), models->end(), in ValidateExecutionMode() 432 if (!std::all_of(models->begin(), models->end(), in ValidateExecutionMode()
|
/external/angle/third_party/vulkan-deps/spirv-tools/src/source/val/ |
D | validate_mode_setting.cpp | 291 const auto* models = _.GetExecutionModels(entry_point_id); in ValidateExecutionMode() local 300 if (!std::all_of(models->begin(), models->end(), in ValidateExecutionMode() 310 if (!std::all_of(models->begin(), models->end(), in ValidateExecutionMode() 342 models->begin(), models->end(), in ValidateExecutionMode() 353 if (!std::all_of(models->begin(), models->end(), in ValidateExecutionMode() 370 if (!std::all_of(models->begin(), models->end(), in ValidateExecutionMode() 408 if (!std::all_of(models->begin(), models->end(), in ValidateExecutionMode() 421 if (!std::all_of(models->begin(), models->end(), in ValidateExecutionMode() 432 if (!std::all_of(models->begin(), models->end(), in ValidateExecutionMode()
|
/external/python/oauth2client/tests/contrib/django_util/ |
D | models.py | 17 from django.contrib.auth.models import User 18 from django.db import models 20 from oauth2client.contrib.django_util.models import CredentialsField 23 class CredentialsModel(models.Model): 24 user_id = models.OneToOneField(User)
|
/external/tensorflow/tensorflow/examples/speech_commands/ |
D | models_test.py | 23 from tensorflow.examples.speech_commands import models 31 return models.prepare_model_settings( 42 models.prepare_model_settings( 56 logits, dropout_rate = models.create_model( 68 logits = models.create_model(fingerprint_input, model_settings, "conv", 78 logits, dropout_rate = models.create_model( 90 logits, dropout_rate = models.create_model( 102 models.create_model(fingerprint_input, model_settings, 111 logits, dropout_rate = models.create_model(
|
/external/tensorflow/tensorflow/python/keras/saving/ |
D | save_test.py | 173 model = keras.models.Model(input_layers, output) 216 model = keras.models.Model(input_layers, output) 401 keras.models.save_model( 405 loaded_model = keras.models.load_model(saved_model_dir) 420 model = keras.models.Sequential() 443 new_model = keras.models.load_model(saved_model_dir) 455 model = keras.models.Sequential() 464 keras.models.save_model(model, saved_model_dir, save_format=save_format) 466 new_model = keras.models.load_model(saved_model_dir) 485 model = keras.models.Sequential() [all …]
|