• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2024 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 package android.adservices.ondevicepersonalization;
18 
19 import android.annotation.IntRange;
20 import android.annotation.NonNull;
21 import android.os.Parcelable;
22 
23 import com.android.ondevicepersonalization.internal.util.AnnotationValidations;
24 import com.android.ondevicepersonalization.internal.util.DataClass;
25 
26 /**
27  * Parcelable version of {@link InferenceInput}.
28  *
29  * @hide
30  */
31 @DataClass(genAidl = false, genBuilder = false)
32 public class InferenceInputParcel implements Parcelable {
33     /**
34      * The location of TFLite model. The model is usually store in REMOTE_DATA or LOCAL_DATA table.
35      */
36     @NonNull private ModelId mModelId;
37 
38     /** The delegate to run model inference. If not specified, CPU delegate is used by default. */
39     private @InferenceInput.Params.Delegate int mDelegate;
40 
41     /**
42      * The number of threads available to the interpreter. Only set and take effective when input
43      * tensors are on CPU. Setting cpuNumThread to 0 has the effect to disable multithreading, which
44      * is equivalent to setting cpuNumThread to 1. If set to the value -1, the number of threads
45      * used will be implementation-defined and platform-dependent.
46      */
47     private @IntRange(from = 1) int mCpuNumThread;
48 
49     /**
50      * The byte array holds input data. The inputs should be in the same order as inputs of the
51      * model.
52      */
53     @NonNull private byte[] mInputData;
54 
55     /**
56      * The number of input examples. Adopter can set this field to run batching inference. The batch
57      * size is 1 by default.
58      */
59     private int mBatchSize;
60 
61     private @InferenceInput.Params.ModelType int mModelType =
62             InferenceInput.Params.MODEL_TYPE_TENSORFLOW_LITE;
63 
64     /**
65      * The empty InferenceOutput representing the expected output structure. For TFLite, the
66      * inference code will verify whether this expected output structure matches model output
67      * signature.
68      */
69     @NonNull private InferenceOutputParcel mExpectedOutputStructure;
70 
71     /** @hide */
InferenceInputParcel(@onNull InferenceInput value)72     public InferenceInputParcel(@NonNull InferenceInput value) {
73         this(
74                 new ModelId.Builder()
75                         .setTableId(value.getParams().getKeyValueStore().getTableId())
76                         .setKey(value.getParams().getModelKey())
77                         .build(),
78                 value.getParams().getDelegateType(),
79                 value.getParams().getRecommendedNumThreads(),
80                 value.getData(),
81                 value.getBatchSize(),
82                 value.getParams().getModelType(),
83                 new InferenceOutputParcel(value.getExpectedOutputStructure()));
84     }
85 
86     // Code below generated by codegen v1.0.23.
87     //
88     // DO NOT MODIFY!
89     // CHECKSTYLE:OFF Generated code
90     //
91     // To regenerate run:
92     // $ codegen
93     // $ANDROID_BUILD_TOP/packages/modules/OnDevicePersonalization/framework/java/android/adservices/ondevicepersonalization/InferenceInputParcel.java
94     //
95     // To exclude the generated code from IntelliJ auto-formatting enable (one-time):
96     //   Settings > Editor > Code Style > Formatter Control
97     // @formatter:off
98 
99     /**
100      * Creates a new InferenceInputParcel.
101      *
102      * @param modelId The location of TFLite model. The model is usually store in REMOTE_DATA or
103      *     LOCAL_DATA table.
104      * @param delegate The delegate to run model inference. If not specified, CPU delegate is used
105      *     by default.
106      * @param cpuNumThread The number of threads available to the interpreter. Only set and take
107      *     effective when input tensors are on CPU. Setting cpuNumThread to 0 has the effect to
108      *     disable multithreading, which is equivalent to setting cpuNumThread to 1. If set to the
109      *     value -1, the number of threads used will be implementation-defined and
110      *     platform-dependent.
111      * @param inputData The byte array holds input data. The inputs should be in the same order as
112      *     inputs of the model.
113      * @param batchSize The number of input examples. Adopter can set this field to run batching
114      *     inference. The batch size is 1 by default.
115      * @param expectedOutputStructure The empty InferenceOutput representing the expected output
116      *     structure. For TFLite, the inference code will verify whether this expected output
117      *     structure matches model output signature.
118      */
119     @DataClass.Generated.Member
InferenceInputParcel( @onNull ModelId modelId, @InferenceInput.Params.Delegate int delegate, @IntRange(from = 1) int cpuNumThread, @NonNull byte[] inputData, int batchSize, @InferenceInput.Params.ModelType int modelType, @NonNull InferenceOutputParcel expectedOutputStructure)120     public InferenceInputParcel(
121             @NonNull ModelId modelId,
122             @InferenceInput.Params.Delegate int delegate,
123             @IntRange(from = 1) int cpuNumThread,
124             @NonNull byte[] inputData,
125             int batchSize,
126             @InferenceInput.Params.ModelType int modelType,
127             @NonNull InferenceOutputParcel expectedOutputStructure) {
128         this.mModelId = modelId;
129         AnnotationValidations.validate(NonNull.class, null, mModelId);
130         this.mDelegate = delegate;
131         AnnotationValidations.validate(InferenceInput.Params.Delegate.class, null, mDelegate);
132         this.mCpuNumThread = cpuNumThread;
133         AnnotationValidations.validate(IntRange.class, null, mCpuNumThread, "from", 1);
134         this.mInputData = inputData;
135         AnnotationValidations.validate(NonNull.class, null, mInputData);
136         this.mBatchSize = batchSize;
137         this.mModelType = modelType;
138         AnnotationValidations.validate(InferenceInput.Params.ModelType.class, null, mModelType);
139         this.mExpectedOutputStructure = expectedOutputStructure;
140         AnnotationValidations.validate(NonNull.class, null, mExpectedOutputStructure);
141 
142         // onConstructed(); // You can define this method to get a callback
143     }
144 
145     /**
146      * The location of TFLite model. The model is usually store in REMOTE_DATA or LOCAL_DATA table.
147      */
148     @DataClass.Generated.Member
getModelId()149     public @NonNull ModelId getModelId() {
150         return mModelId;
151     }
152 
153     /** The delegate to run model inference. If not specified, CPU delegate is used by default. */
154     @DataClass.Generated.Member
getDelegate()155     public @InferenceInput.Params.Delegate int getDelegate() {
156         return mDelegate;
157     }
158 
159     /**
160      * The number of threads available to the interpreter. Only set and take effective when input
161      * tensors are on CPU. Setting cpuNumThread to 0 has the effect to disable multithreading, which
162      * is equivalent to setting cpuNumThread to 1. If set to the value -1, the number of threads
163      * used will be implementation-defined and platform-dependent.
164      */
165     @DataClass.Generated.Member
getCpuNumThread()166     public @IntRange(from = 1) int getCpuNumThread() {
167         return mCpuNumThread;
168     }
169 
170     /**
171      * The byte array holds input data. The inputs should be in the same order as inputs of the
172      * model.
173      */
174     @DataClass.Generated.Member
getInputData()175     public @NonNull byte[] getInputData() {
176         return mInputData;
177     }
178 
179     /**
180      * The number of input examples. Adopter can set this field to run batching inference. The batch
181      * size is 1 by default.
182      */
183     @DataClass.Generated.Member
getBatchSize()184     public int getBatchSize() {
185         return mBatchSize;
186     }
187 
188     @DataClass.Generated.Member
getModelType()189     public @InferenceInput.Params.ModelType int getModelType() {
190         return mModelType;
191     }
192 
193     /**
194      * The empty InferenceOutput representing the expected output structure. For TFLite, the
195      * inference code will verify whether this expected output structure matches model output
196      * signature.
197      */
198     @DataClass.Generated.Member
getExpectedOutputStructure()199     public @NonNull InferenceOutputParcel getExpectedOutputStructure() {
200         return mExpectedOutputStructure;
201     }
202 
203     @Override
204     @DataClass.Generated.Member
writeToParcel(@onNull android.os.Parcel dest, int flags)205     public void writeToParcel(@NonNull android.os.Parcel dest, int flags) {
206         // You can override field parcelling by defining methods like:
207         // void parcelFieldName(Parcel dest, int flags) { ... }
208 
209         dest.writeTypedObject(mModelId, flags);
210         dest.writeInt(mDelegate);
211         dest.writeInt(mCpuNumThread);
212         dest.writeByteArray(mInputData);
213         dest.writeInt(mBatchSize);
214         dest.writeInt(mModelType);
215         dest.writeTypedObject(mExpectedOutputStructure, flags);
216     }
217 
218     @Override
219     @DataClass.Generated.Member
describeContents()220     public int describeContents() {
221         return 0;
222     }
223 
224     /** @hide */
225     @SuppressWarnings({"unchecked", "RedundantCast"})
226     @DataClass.Generated.Member
InferenceInputParcel(@onNull android.os.Parcel in)227     protected InferenceInputParcel(@NonNull android.os.Parcel in) {
228         // You can override field unparcelling by defining methods like:
229         // static FieldType unparcelFieldName(Parcel in) { ... }
230 
231         ModelId modelId = (ModelId) in.readTypedObject(ModelId.CREATOR);
232         int delegate = in.readInt();
233         int cpuNumThread = in.readInt();
234         byte[] inputData = in.createByteArray();
235         int batchSize = in.readInt();
236         int modelType = in.readInt();
237         InferenceOutputParcel expectedOutputStructure =
238                 (InferenceOutputParcel) in.readTypedObject(InferenceOutputParcel.CREATOR);
239 
240         this.mModelId = modelId;
241         AnnotationValidations.validate(NonNull.class, null, mModelId);
242         this.mDelegate = delegate;
243         AnnotationValidations.validate(InferenceInput.Params.Delegate.class, null, mDelegate);
244         this.mCpuNumThread = cpuNumThread;
245         AnnotationValidations.validate(IntRange.class, null, mCpuNumThread, "from", 1);
246         this.mInputData = inputData;
247         AnnotationValidations.validate(NonNull.class, null, mInputData);
248         this.mBatchSize = batchSize;
249         this.mModelType = modelType;
250         AnnotationValidations.validate(InferenceInput.Params.ModelType.class, null, mModelType);
251         this.mExpectedOutputStructure = expectedOutputStructure;
252         AnnotationValidations.validate(NonNull.class, null, mExpectedOutputStructure);
253 
254         // onConstructed(); // You can define this method to get a callback
255     }
256 
257     @DataClass.Generated.Member
258     public static final @NonNull Parcelable.Creator<InferenceInputParcel> CREATOR =
259             new Parcelable.Creator<InferenceInputParcel>() {
260                 @Override
261                 public InferenceInputParcel[] newArray(int size) {
262                     return new InferenceInputParcel[size];
263                 }
264 
265                 @Override
266                 public InferenceInputParcel createFromParcel(@NonNull android.os.Parcel in) {
267                     return new InferenceInputParcel(in);
268                 }
269             };
270 
271     @DataClass.Generated(
272             time = 1730482564983L,
273             codegenVersion = "1.0.23",
274             sourceFile =
275                     "packages/modules/OnDevicePersonalization/framework/java/android/adservices/ondevicepersonalization/InferenceInputParcel.java",
276             inputSignatures =
277                     "private @android.annotation.NonNull android.adservices.ondevicepersonalization.ModelId mModelId\nprivate @android.adservices.ondevicepersonalization.InferenceInput.Params.Delegate int mDelegate\nprivate @android.annotation.IntRange int mCpuNumThread\nprivate @android.annotation.NonNull byte[] mInputData\nprivate  int mBatchSize\nprivate @android.adservices.ondevicepersonalization.InferenceInput.Params.ModelType int mModelType\nprivate @android.annotation.NonNull android.adservices.ondevicepersonalization.InferenceOutputParcel mExpectedOutputStructure\nclass InferenceInputParcel extends java.lang.Object implements [android.os.Parcelable]\n@com.android.ondevicepersonalization.internal.util.DataClass(genAidl=false, genBuilder=false)")
278     @Deprecated
__metadata()279     private void __metadata() {}
280 
281     // @formatter:on
282     // End of generated code
283 
284 }
285