1# 2# Copyright (C) 2018 The Android Open Source Project 3# 4# Licensed under the Apache License, Version 2.0 (the "License"); 5# you may not use this file except in compliance with the License. 6# You may obtain a copy of the License at 7# 8# http://www.apache.org/licenses/LICENSE-2.0 9# 10# Unless required by applicable law or agreed to in writing, software 11# distributed under the License is distributed on an "AS IS" BASIS, 12# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13# See the License for the specific language governing permissions and 14# limitations under the License. 15# 16def test(name, input0, input1, output0, input0_data, input1_data, output_data, do_variations=True): 17 model = Model().Operation("GREATER", input0, input1).To(output0) 18 example = Example({ 19 input0: input0_data, 20 input1: input1_data, 21 output0: output_data, 22 }, model=model, name=name) 23 if do_variations: 24 example.AddVariations("int32", "float16", "relaxed") 25 26test( 27 name="simple", 28 input0=Input("input0", "TENSOR_FLOAT32", "{3}"), 29 input1=Input("input1", "TENSOR_FLOAT32", "{3}"), 30 output0=Output("output0", "TENSOR_BOOL8", "{3}"), 31 input0_data=[5, 7, 10], 32 input1_data=[10, 7, 5], 33 output_data=[False, False, True], 34) 35 36test( 37 name="broadcast", 38 input0=Input("input0", "TENSOR_FLOAT32", "{2, 1}"), 39 input1=Input("input1", "TENSOR_FLOAT32", "{2}"), 40 output0=Output("output0", "TENSOR_BOOL8", "{2, 2}"), 41 input0_data=[5, 10], 42 input1_data=[10, 5], 43 output_data=[False, False, False, True], 44) 45 46test( 47 name="quantized_different_scale", 48 input0=Input("input0", ("TENSOR_QUANT8_ASYMM", [3], 1.0, 128)), 49 input1=Input("input1", ("TENSOR_QUANT8_ASYMM", [1], 2.0, 128)), 50 output0=Output("output0", "TENSOR_BOOL8", "{3}"), 51 input0_data=[129, 130, 131], # effectively 1, 2, 3 52 input1_data=[129], # effectively 2 53 output_data=[False, False, True], 54 do_variations=False, 55) 56 57test( 58 name="quantized_different_zero_point", 59 input0=Input("input0", ("TENSOR_QUANT8_ASYMM", [3], 1.0, 128)), 60 input1=Input("input1", ("TENSOR_QUANT8_ASYMM", [1], 1.0, 129)), 61 output0=Output("output0", "TENSOR_BOOL8", "{3}"), 62 input0_data=[129, 130, 131], # effectively 1, 2, 3 63 input1_data=[131], # effectively 2 64 output_data=[False, False, True], 65 do_variations=False, 66) 67 68test( 69 name="quantized_overflow_second_input_if_requantized", 70 input0=Input("input0", ("TENSOR_QUANT8_ASYMM", [1], 1.64771, 31)), 71 input1=Input("input1", ("TENSOR_QUANT8_ASYMM", [1], 1.49725, 240)), 72 output0=Output("output0", "TENSOR_BOOL8", "{1}"), 73 input0_data=[0], 74 input1_data=[200], 75 output_data=[True], 76 do_variations=False, 77) 78 79test( 80 name="quantized_overflow_first_input_if_requantized", 81 input0=Input("input0", ("TENSOR_QUANT8_ASYMM", [1], 1.49725, 240)), 82 input1=Input("input1", ("TENSOR_QUANT8_ASYMM", [1], 1.64771, 31)), 83 output0=Output("output0", "TENSOR_BOOL8", "{1}"), 84 input0_data=[200], 85 input1_data=[0], 86 output_data=[False], 87 do_variations=False, 88) 89 90test( 91 name="boolean", 92 input0=Input("input0", "TENSOR_BOOL8", "{4}"), 93 input1=Input("input1", "TENSOR_BOOL8", "{4}"), 94 output0=Output("output0", "TENSOR_BOOL8", "{4}"), 95 input0_data=[False, True, False, True], 96 input1_data=[False, False, True, True], 97 output_data=[False, True, False, False], 98 do_variations=False, 99) 100