| /external/tensorflow/tensorflow/lite/g3doc/examples/trained/ |
| D | index.md | 1 # Pre-trained models for TensorFlow Lite 3 There are a variety of already trained, open source models you can use 5 Using pre-trained TensorFlow Lite models lets you add machine learning 8 models for use with TensorFlow Lite. 10 You can start browsing TensorFlow Lite models right away based on general use 12 larger set of models on [TensorFlow Hub](https://tfhub.dev/s?deployment- 15 **Important:** TensorFlow Hub lists both regular TensorFlow models and 16 TensorFlow Lite format models. These model formats are not interchangeable. 17 TensorFlow models can be converted into TensorFlow Lite models, but that process 25 to discover models for use with TensorFlow Lite: [all …]
|
| /external/tensorflow/tensorflow/lite/g3doc/examples/ |
| D | _index.yaml | 3 title: Models 10 Overview of models for TensorFlow Lite 16 TensorFlow Lite uses TensorFlow models converted into a smaller, more efficient machine 17 learning (ML) model format. You can use pre-trained models with TensorFlow Lite, modify 18 existing models, or build your own TensorFlow models and then convert them to 19 TensorFlow Lite format. TensorFlow Lite models can perform almost any task a regular 32 <a href="/lite/models/convert/index"><h3 class="no-link">Have a TensorFlow model?</h3></a> 33 Skip to the <a href="/lite/models/convert/index">Convert</a> section for information about 35 path: /lite/models/convert/index 40 For guidance on getting models for your use case, [all …]
|
| /external/XNNPACK/bench/ |
| D | qs8-gemm-e2e.cc | 18 #include "models/models.h" 29 models::ExecutionPlanFactory model_factory, in GEMMEnd2EndBenchmark() 83 …static void qs8_gemm_4x8c4__aarch32_neondot_cortex_a55(benchmark::State& state, models::ExecutionP… in qs8_gemm_4x8c4__aarch32_neondot_cortex_a55() 93 …static void qs8_gemm_4x8c4__aarch32_neondot_ld64(benchmark::State& state, models::ExecutionPlanFac… in qs8_gemm_4x8c4__aarch32_neondot_ld64() 110 …static void qs8_gemm_4x8__aarch32_neon_mlal_lane_cortex_a53(benchmark::State& state, models::Execu… in BENCHMARK_QS8_END2END() 120 …static void qs8_gemm_4x8__aarch32_neon_mlal_lane_prfm_cortex_a53(benchmark::State& state, models::… in qs8_gemm_4x8__aarch32_neon_mlal_lane_prfm_cortex_a53() 130 …static void qs8_gemm_4x8__aarch32_neon_mlal_lane_cortex_a7(benchmark::State& state, models::Execut… in qs8_gemm_4x8__aarch32_neon_mlal_lane_cortex_a7() 140 …static void qs8_gemm_4x8__aarch32_neon_mlal_lane_prfm_cortex_a7(benchmark::State& state, models::E… in qs8_gemm_4x8__aarch32_neon_mlal_lane_prfm_cortex_a7() 150 …static void qs8_gemm_4x8__aarch32_neon_mlal_lane_ld64(benchmark::State& state, models::ExecutionPl… in qs8_gemm_4x8__aarch32_neon_mlal_lane_ld64() 160 …static void qs8_gemm_4x8__aarch32_neon_mlal_lane_prfm_ld64(benchmark::State& state, models::Execut… in qs8_gemm_4x8__aarch32_neon_mlal_lane_prfm_ld64() [all …]
|
| D | qu8-gemm-e2e.cc | 18 #include "models/models.h" 29 models::ExecutionPlanFactory model_factory, in GEMMEnd2EndBenchmark() 82 …static void qu8_gemm_4x8__aarch32_neon_mlal_lane_cortex_a53(benchmark::State& state, models::Execu… in qu8_gemm_4x8__aarch32_neon_mlal_lane_cortex_a53() 92 …static void qu8_gemm_4x8__aarch32_neon_mlal_lane_prfm_cortex_a53(benchmark::State& state, models::… in qu8_gemm_4x8__aarch32_neon_mlal_lane_prfm_cortex_a53() 102 …static void qu8_gemm_4x8__aarch32_neon_mlal_lane_cortex_a7(benchmark::State& state, models::Execut… in qu8_gemm_4x8__aarch32_neon_mlal_lane_cortex_a7() 112 …static void qu8_gemm_4x8__aarch32_neon_mlal_lane_prfm_cortex_a7(benchmark::State& state, models::E… in qu8_gemm_4x8__aarch32_neon_mlal_lane_prfm_cortex_a7() 122 …static void qu8_gemm_4x8__aarch32_neon_mlal_lane_ld64(benchmark::State& state, models::ExecutionPl… in qu8_gemm_4x8__aarch32_neon_mlal_lane_ld64() 132 …static void qu8_gemm_4x8__aarch32_neon_mlal_lane_prfm_ld64(benchmark::State& state, models::Execut… in qu8_gemm_4x8__aarch32_neon_mlal_lane_prfm_ld64() 152 …static void qu8_gemm_4x16c4__aarch64_neondot_cortex_a55(benchmark::State& state, models::Execution… in BENCHMARK_QU8_END2END() 162 …static void qu8_gemm_4x16c4__aarch64_neondot_ld128(benchmark::State& state, models::ExecutionPlanF… in qu8_gemm_4x16c4__aarch64_neondot_ld128() [all …]
|
| D | qs8-dwconv-e2e.cc | 15 #include "models/models.h" 25 models::ExecutionPlanFactory model_factory, in DWConvEnd2EndBenchmark() 77 …static void qs8_dwconv_up8x9__neon_mul8_ld64(benchmark::State& state, models::ExecutionPlanFactory… in qs8_dwconv_up8x9__neon_mul8_ld64() 83 …static void qs8_dwconv_up16x9__neon_mul8_ld64(benchmark::State& state, models::ExecutionPlanFactor… in qs8_dwconv_up16x9__neon_mul8_ld64() 89 …static void qs8_dwconv_up16x9__neon_mul8_ld128(benchmark::State& state, models::ExecutionPlanFacto… in qs8_dwconv_up16x9__neon_mul8_ld128() 95 …static void qs8_dwconv_up8x9__neon_mla8_ld64(benchmark::State& state, models::ExecutionPlanFactory… in qs8_dwconv_up8x9__neon_mla8_ld64() 101 …static void qs8_dwconv_up16x9__neon_mla8_ld64(benchmark::State& state, models::ExecutionPlanFactor… in qs8_dwconv_up16x9__neon_mla8_ld64() 107 …static void qs8_dwconv_up16x9__neon_mla8_ld128(benchmark::State& state, models::ExecutionPlanFacto… in qs8_dwconv_up16x9__neon_mla8_ld128() 113 …static void qs8_dwconv_up8x9__neon_mul16(benchmark::State& state, models::ExecutionPlanFactory mod… in qs8_dwconv_up8x9__neon_mul16() 119 …static void qs8_dwconv_up16x9__neon_mul16(benchmark::State& state, models::ExecutionPlanFactory mo… in qs8_dwconv_up16x9__neon_mul16() [all …]
|
| D | qu8-dwconv-e2e.cc | 17 #include "models/models.h" 27 models::ExecutionPlanFactory model_factory, in DWConvEnd2EndBenchmark() 79 …static void qu8_dwconv_up8x9__neon_mul8(benchmark::State& state, models::ExecutionPlanFactory mode… in qu8_dwconv_up8x9__neon_mul8() 85 …static void qu8_dwconv_up16x9__neon_mul8(benchmark::State& state, models::ExecutionPlanFactory mod… in qu8_dwconv_up16x9__neon_mul8() 91 …static void qu8_dwconv_up24x9__neon_mul8(benchmark::State& state, models::ExecutionPlanFactory mod… in qu8_dwconv_up24x9__neon_mul8() 97 …static void qu8_dwconv_up32x9__neon_mul8(benchmark::State& state, models::ExecutionPlanFactory mod… in qu8_dwconv_up32x9__neon_mul8() 103 …static void qu8_dwconv_up8x9__neon_mul16(benchmark::State& state, models::ExecutionPlanFactory mod… in qu8_dwconv_up8x9__neon_mul16() 109 …static void qu8_dwconv_up16x9__neon_mul16(benchmark::State& state, models::ExecutionPlanFactory mo… in qu8_dwconv_up16x9__neon_mul16() 115 …static void qu8_dwconv_up24x9__neon_mul16(benchmark::State& state, models::ExecutionPlanFactory mo… in qu8_dwconv_up24x9__neon_mul16() 121 …static void qu8_dwconv_up32x9__neon_mul16(benchmark::State& state, models::ExecutionPlanFactory mo… in qu8_dwconv_up32x9__neon_mul16() [all …]
|
| D | f32-dwconv-e2e.cc | 18 #include "models/models.h" 28 models::ExecutionPlanFactory model_factory, in DWConvEnd2EndBenchmark() 83 …static void f32_dwconv_up4x9__aarch64_neonfma(benchmark::State& state, models::ExecutionPlanFactor… in f32_dwconv_up4x9__aarch64_neonfma() 91 …static void f32_dwconv_up4x9__aarch64_neonfma_cortex_a55(benchmark::State& state, models::Executio… in f32_dwconv_up4x9__aarch64_neonfma_cortex_a55() 104 static void f32_dwconv_up4x9__neon(benchmark::State& state, models::ExecutionPlanFactory model) { in f32_dwconv_up4x9__neon() 112 …static void f32_dwconv_up4x9__neon_acc2(benchmark::State& state, models::ExecutionPlanFactory mode… in f32_dwconv_up4x9__neon_acc2() 120 static void f32_dwconv_up8x9__neon(benchmark::State& state, models::ExecutionPlanFactory model) { in f32_dwconv_up8x9__neon() 128 …static void f32_dwconv_up8x9__neon_acc2(benchmark::State& state, models::ExecutionPlanFactory mode… in f32_dwconv_up8x9__neon_acc2() 136 static void f32_dwconv_up16x9__neon(benchmark::State& state, models::ExecutionPlanFactory model) { in f32_dwconv_up16x9__neon() 144 …static void f32_dwconv_up16x9__neon_acc2(benchmark::State& state, models::ExecutionPlanFactory mod… in f32_dwconv_up16x9__neon_acc2() [all …]
|
| D | f32-gemm-e2e.cc | 18 #include "models/models.h" 29 models::ExecutionPlanFactory model_factory, in GEMMEnd2EndBenchmark() 110 models::ExecutionPlanFactory model_factory, in GEMMEnd2EndBenchmark() 167 …static void f32_gemm_4x2__aarch64_neonfma_cortex_a75(benchmark::State& state, models::ExecutionPla… in f32_gemm_4x2__aarch64_neonfma_cortex_a75() 178 …static void f32_gemm_4x2__aarch64_neonfma_prfm_cortex_a75(benchmark::State& state, models::Executi… in f32_gemm_4x2__aarch64_neonfma_prfm_cortex_a75() 189 …static void f32_gemm_4x2__aarch64_neonfma_ld64(benchmark::State& state, models::ExecutionPlanFacto… in f32_gemm_4x2__aarch64_neonfma_ld64() 200 …static void f32_gemm_4x12__aarch64_neonfma_cortex_a53(benchmark::State& state, models::ExecutionPl… in f32_gemm_4x12__aarch64_neonfma_cortex_a53() 211 …static void f32_gemm_4x8__aarch64_neonfma_cortex_a53(benchmark::State& state, models::ExecutionPla… in f32_gemm_4x8__aarch64_neonfma_cortex_a53() 222 …static void f32_gemm_4x8__aarch64_neonfma_prfm_cortex_a53(benchmark::State& state, models::Executi… in f32_gemm_4x8__aarch64_neonfma_prfm_cortex_a53() 233 …static void f32_gemm_4x8__aarch64_neonfma_cortex_a55(benchmark::State& state, models::ExecutionPla… in f32_gemm_4x8__aarch64_neonfma_cortex_a55() [all …]
|
| D | end2end.cc | 17 #include "models/models.h" 22 models::ExecutionPlanFactory model_factory) in End2EndBenchmark() 56 End2EndBenchmark(state, models::FP32MobileNetV1); in FP32MobileNetV1() 60 End2EndBenchmark(state, models::FP32MobileNetV2); in FP32MobileNetV2() 64 End2EndBenchmark(state, models::FP32MobileNetV3Large); in FP32MobileNetV3Large() 68 End2EndBenchmark(state, models::FP32MobileNetV3Small); in FP32MobileNetV3Small() 73 return models::FP32SparseMobileNetV1(0.8f, threadpool); in FP32Sparse80MobileNetV1() 79 return models::FP32SparseMobileNetV2(0.8f, threadpool); in FP32Sparse80MobileNetV2() 85 return models::FP32SparseMobileNetV3Large(0.8f, threadpool); in FP32Sparse80MobileNetV3Large() 91 return models::FP32SparseMobileNetV3Small(0.8f, threadpool); in FP32Sparse80MobileNetV3Small() [all …]
|
| D | end2end.h | 8 #include "models/models.h" 13 …BENCHMARK_CAPTURE(benchmark_fn, mobilenet_v1, models::FP32MobileNetV1)->Unit(benchmark::kMicroseco… 14 …BENCHMARK_CAPTURE(benchmark_fn, mobilenet_v2, models::FP32MobileNetV2)->Unit(benchmark::kMicroseco… 15 …BENCHMARK_CAPTURE(benchmark_fn, mobilenet_v3_large, models::FP32MobileNetV3Large)->Unit(benchmark:… 16 …BENCHMARK_CAPTURE(benchmark_fn, mobilenet_v3_small, models::FP32MobileNetV3Small)->Unit(benchmark:… 19 …BENCHMARK_CAPTURE(benchmark_fn, mobilenet_v1, models::QS8MobileNetV1)->Unit(benchmark::kMicrosecon… 20 …BENCHMARK_CAPTURE(benchmark_fn, mobilenet_v2, models::QS8MobileNetV2)->Unit(benchmark::kMicrosecon… 23 …BENCHMARK_CAPTURE(benchmark_fn, mobilenet_v1, models::QU8MobileNetV1)->Unit(benchmark::kMicrosecon… 24 …BENCHMARK_CAPTURE(benchmark_fn, mobilenet_v2, models::QU8MobileNetV2)->Unit(benchmark::kMicrosecon…
|
| /external/tensorflow/tensorflow/lite/g3doc/android/ |
| D | index.md | 3 TensorFlow Lite lets you run TensorFlow machine learning (ML) models in your 5 execution environments for running models on Android quickly and efficiently, 41 <a href="../models"> 42 <h3 class="no-link hide-from-toc" id="ml-models" data-text="ML models">ML models</h3></a> 43 Learn about choosing and using ML models with TensorFlow Lite, see the 44 <a href="../models">Models</a> docs. 53 ## Machine learning models 55 TensorFlow Lite uses TensorFlow models that are converted into a smaller, 57 models with TensorFlow Lite on Android, or build your own TensorFlow models and 60 **Key Point:** TensorFlow Lite models and TensorFlow models have a *different [all …]
|
| /external/autotest/frontend/afe/ |
| D | rpc_interface_unittest.py | 17 models, rpc_interface, rpc_utils) 30 _hqe_status = models.HostQueueEntry.Status 93 return models.Job.objects.get(id=job_id) 98 label2 = models.Label.objects.create(name='bluetooth', platform=False) 108 host2 = models.Host.objects.create(hostname='test_host2', leased=False) 118 host2 = models.Host.objects.create(hostname='test_host2', leased=False) 124 models.Host, 132 host2 = models.Host.smart_get(host2.id) 141 leased_host = models.Host.objects.create(hostname='leased_host', 167 host3 = models.Host.objects.create(hostname='test_host3', leased=False) [all …]
|
| D | shard_heartbeat_unittest.py | 14 from autotest_lib.frontend.afe import models 34 assigned = models.Job.assign_to_shard(shard, []) 45 assigned = models.Job.assign_to_shard(shard, []) 54 assigned_jobs = models.Job.assign_to_shard(shard, [known_job.id]) 65 assigned = models.Job.assign_to_shard(shard, []) 75 assigned = models.Job.assign_to_shard(shard, []) 81 old = models.Job.SKIP_JOBS_CREATED_BEFORE 83 models.Job.SKIP_JOBS_CREATED_BEFORE = value 86 models.Job.SKIP_JOBS_CREATED_BEFORE = old 92 @param host: A models.Host object. [all …]
|
| D | models_test.py | 10 from autotest_lib.frontend.afe import models, model_logic 31 everyone_acl = models.AclGroup.objects.get(name='Everyone') 38 models.AclGroup.on_host_membership_change() 55 models.Host.objects.populate_relationships( 56 [host], models.HostAttribute, 'attribute_list') 61 previous_config = models.RESPECT_STATIC_ATTRIBUTES 62 models.RESPECT_STATIC_ATTRIBUTES = False 63 host1 = models.Host.objects.create(hostname='test_host1') 73 models.RESPECT_STATIC_ATTRIBUTES = previous_config 77 previous_config = models.RESPECT_STATIC_ATTRIBUTES [all …]
|
| D | frontend_test_utils.py | 6 from autotest_lib.frontend.afe import models, model_attributes 14 if models.DroneSet.drone_sets_enabled(): 15 models.DroneSet.objects.create( 16 name=models.DroneSet.default_drone_set_name()) 18 acl_group = models.AclGroup.objects.create(name='my_acl') 19 acl_group.users.add(models.User.current_user()) 21 self.hosts = [models.Host.objects.create(hostname=hostname) 27 models.AclGroup.smart_get('Everyone').hosts = [] 29 self.labels = [models.Label.objects.create(name=name) for name in 33 platform = models.Label.objects.create(name='myplatform', platform=True) [all …]
|
| D | rpc_interface.py | 48 from autotest_lib.frontend.afe import (model_attributes, model_logic, models, 50 from autotest_lib.frontend.tko import models as tko_models 62 from django.db.models import Count 96 label_model = models.Label.smart_get(id) 116 label_model = models.Label.smart_get(id) 127 hosts.append(models.Host.smart_get(h.id)) 146 # models.Label.add_object() throws model_logic.ValidationError 151 label = models.Label.add_object(name=name, **kwargs) 171 @raises models.Label.DoesNotExist: If the label with id doesn't exist. 173 label = models.Label.smart_get(id) [all …]
|
| /external/autotest/frontend/tko/ |
| D | rpc_interface.py | 3 from django.db import models as dbmodels 6 from autotest_lib.frontend.afe import models as afe_models, readonly_connection 7 from autotest_lib.frontend.tko import models, tko_rpc_utils 15 models.TestView.list_objects(filter_data)) 19 return models.TestView.query_count(filter_data) 47 query = models.TestView.objects.get_query_set_with_joins(filter_data) 49 query = models.TestView.query_objects(filter_data, initial_query=query, 51 count_alias, count_sql = models.TestView.objects.get_count_sql(query) 55 query = models.TestView.apply_presentation(query, filter_data) 68 query = models.TestView.objects.get_query_set_with_joins(filter_data) [all …]
|
| /external/tensorflow/tensorflow/lite/delegates/flex/ |
| D | build_def.bzl | 29 models, 36 models: TFLite models to interpret. 47 if type(models) != type([]): 48 models = [models] 50 # List all flex ops from models. 52 ["$(location %s)" % f for f in models], 67 srcs = models, 70 message = "Listing flex ops from %s..." % ",".join(models), 93 models = [], 98 """A rule to generate a flex delegate with only ops to run listed models. [all …]
|
| /external/lzma/CS/7zip/Compress/RangeCoder/ |
| D | RangeCoderBitTree.cs | 7 BitEncoder[] Models; field 13 Models = new BitEncoder[1 << numBitLevels]; in BitTreeEncoder() 19 Models[i].Init(); in Init() 29 Models[m].Encode(rangeEncoder, bit); in Encode() 40 Models[m].Encode(rangeEncoder, bit); in ReverseEncode() 54 price += Models[m].GetPrice(bit); in GetPrice() 68 price += Models[m].GetPrice(bit); in ReverseGetPrice() 74 public static UInt32 ReverseGetPrice(BitEncoder[] Models, UInt32 startIndex, in ReverseGetPrice() 83 price += Models[startIndex + m].GetPrice(bit); in ReverseGetPrice() 89 public static void ReverseEncode(BitEncoder[] Models, UInt32 startIndex, in ReverseEncode() [all …]
|
| /external/tensorflow/tensorflow/lite/g3doc/examples/build/ |
| D | index.md | 1 # Build TensorFlow Lite models 4 your TensorFlow models with the intention of converting to the TensorFlow 5 Lite model format. The machine learning (ML) models you use with TensorFlow 12 [Convert models overview](../convert/) 16 see the [Modify models overview](../modify/model_maker) for guidance. 28 constraints for TensorFlow Lite models and build your model with these 34 growing in compute power and specialized hardware compatibility, the models 36 * **Size of models** - The overall complexity of a model, including data 41 with a machine learning model is limited on a mobile or edge device. Models 47 regular TensorFlow models. As you develop a model for use with TensorFlow [all …]
|
| /external/flatbuffers/grpc/examples/go/greeter/server/ |
| D | main.go | 10 models "github.com/google/flatbuffers/grpc/examples/go/greeter/models" packageName 19 models.UnimplementedGreeterServer 22 func (s *greeterServer) SayHello(ctx context.Context, request *models.HelloRequest) (*flatbuffers.B… 32 models.HelloReplyStart(b) 33 models.HelloReplyAddMessage(b, idx) 34 b.Finish(models.HelloReplyEnd(b)) 38 func (s *greeterServer) SayManyHellos(request *models.HelloRequest, stream models.Greeter_SayManyHe… 50 models.HelloReplyStart(b) 51 models.HelloReplyAddMessage(b, idx) 52 b.Finish(models.HelloReplyEnd(b)) [all …]
|
| /external/tensorflow/tensorflow/lite/g3doc/ |
| D | _book.yaml | 21 - title: "Running ML models" 127 - heading: "Models with metadata" 202 - title: "Build and convert models" 206 - name: "Models" 209 path: /lite/models 211 - heading: "Use trained models" 213 path: /lite/models/trained/index 215 - heading: "Modify models" 220 path: /lite/models/modify/model_maker 223 path: /lite/models/modify/model_maker/image_classification [all …]
|
| /external/python/google-api-python-client/docs/dyn/ |
| D | bigquery_v2.models.html | 75 <h1><a href="bigquery_v2.html">BigQuery API</a> . <a href="bigquery_v2.models.html">models</a></h1> 88 …models in the specified dataset. Requires the READER dataset role. After retrieving the list of mo… 132 …models will be deleted and their storage reclaimed. The defaultTableExpirationMs property of the e… 162 …associated with this model. You can use these to organize and group your models. Label keys and va… 187 …r eval data was used during training. These are not present for imported models. # The evaluation … 188 …ics": { # Model evaluation metrics for ARIMA forecasting models. # Populated for ARIMA models. 238 …metrics for binary classification/classifier models. # Populated for binary classification/classif… 239 …s": { # Aggregate metrics for classification/classifier models. For multi-class models, the m… 246 …computed. For binary classification models this is the positive class threshold. For multi-class c… 249 { # Confusion matrix for binary classification models. [all …]
|
| /external/python/google-api-python-client/googleapiclient/discovery_cache/documents/ |
| D | firebaseml.v1beta2.json | 15 "description": "Access custom machine learning models hosted via Firebase ML.", 110 "models": { object 114 "flatPath": "v1beta2/projects/{projectsId}/models", 116 "id": "firebaseml.projects.models.create", 129 "path": "v1beta2/{+parent}/models", 142 "flatPath": "v1beta2/projects/{projectsId}/models/{modelsId}", 144 "id": "firebaseml.projects.models.delete", 150 …ame of the model to delete. The name must have the form `projects/{project_id}/models/{model_id}`", 152 "pattern": "^projects/[^/]+/models/[^/]+$", 167 "flatPath": "v1beta2/projects/{projectsId}/models/{modelsId}:download", [all …]
|
| /external/lzma/Java/SevenZip/Compression/RangeCoder/ |
| D | BitTreeEncoder.java | 6 short[] Models; field in BitTreeEncoder 12 Models = new short[1 << numBitLevels]; in BitTreeEncoder() 17 Decoder.InitBitModels(Models); in Init() 27 rangeEncoder.Encode(Models, m, bit); in Encode() 38 rangeEncoder.Encode(Models, m, bit); in ReverseEncode() 52 price += Encoder.GetPrice(Models[m], bit); in GetPrice() 66 price += Encoder.GetPrice(Models[m], bit); in ReverseGetPrice() 72 public static int ReverseGetPrice(short[] Models, int startIndex, in ReverseGetPrice() argument 81 price += Encoder.GetPrice(Models[startIndex + m], bit); in ReverseGetPrice() 87 public static void ReverseEncode(short[] Models, int startIndex, in ReverseEncode() argument [all …]
|