1# Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2# 3# Licensed under the Apache License, Version 2.0 (the "License"); 4# you may not use this file except in compliance with the License. 5# You may obtain a copy of the License at 6# 7# http://www.apache.org/licenses/LICENSE-2.0 8# 9# Unless required by applicable law or agreed to in writing, software 10# distributed under the License is distributed on an "AS IS" BASIS, 11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12# See the License for the specific language governing permissions and 13# limitations under the License. 14# ============================================================================== 15"""Test configs for unidirectional_sequence_lstm.""" 16from __future__ import absolute_import 17from __future__ import division 18from __future__ import print_function 19 20import tensorflow.compat.v1 as tf 21from tensorflow.lite.testing.zip_test_utils import create_tensor_data 22from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests 23from tensorflow.lite.testing.zip_test_utils import register_make_test_function 24from tensorflow.python.framework import test_util 25 26 27@register_make_test_function("make_unidirectional_sequence_lstm_tests") 28@test_util.enable_control_flow_v2 29def make_unidirectional_sequence_lstm_tests(options): 30 """Make a set of tests to do unidirectional_sequence_lstm.""" 31 32 test_parameters = [{ 33 "batch_size": [2, 4, 6], 34 "seq_length": [1, 3], 35 "units": [4, 5], 36 "use_peepholes": [False, True], 37 "is_dynamic_rnn": [False, True] 38 }] 39 40 def build_graph(parameters): 41 """Build the graph for unidirectional_sequence_lstm.""" 42 input_values = [] 43 if parameters["is_dynamic_rnn"]: 44 shape = [ 45 parameters["seq_length"], parameters["batch_size"], 46 parameters["units"] 47 ] 48 input_value = tf.compat.v1.placeholder( 49 dtype=tf.float32, name="input", shape=shape) 50 input_values.append(input_value) 51 lstm_cell = tf.lite.experimental.nn.TFLiteLSTMCell( 52 parameters["units"], use_peepholes=parameters["use_peepholes"]) 53 outs, _ = tf.lite.experimental.nn.dynamic_rnn( 54 lstm_cell, input_value, dtype=tf.float32, time_major=True) 55 outs = tf.unstack(outs, axis=1) 56 else: 57 shape = [parameters["batch_size"], parameters["units"]] 58 for i in range(parameters["seq_length"]): 59 input_value = tf.compat.v1.placeholder( 60 dtype=tf.float32, name=("input_%d" % i), shape=shape) 61 input_values.append(input_value) 62 lstm_cell = tf.lite.experimental.nn.TFLiteLSTMCell( 63 parameters["units"], use_peepholes=parameters["use_peepholes"]) 64 outs, _ = tf.nn.static_rnn(lstm_cell, input_values, dtype=tf.float32) 65 66 real_output = tf.zeros([1], dtype=tf.float32) + outs[-1] 67 real_output = tf.identity(real_output) 68 return input_values, [real_output] 69 70 def build_inputs(parameters, sess, inputs, outputs): 71 """Build the inputs for unidirectional_sequence_lstm.""" 72 input_values = [] 73 if parameters["is_dynamic_rnn"]: 74 shape = [ 75 parameters["seq_length"], parameters["batch_size"], 76 parameters["units"] 77 ] 78 input_value = create_tensor_data(tf.float32, shape) 79 input_values.append(input_value) 80 else: 81 shape = [parameters["batch_size"], parameters["units"]] 82 for _ in range(parameters["seq_length"]): 83 input_value = create_tensor_data(tf.float32, shape) 84 input_values.append(input_value) 85 init = tf.compat.v1.global_variables_initializer() 86 sess.run(init) 87 # Tflite fused kernel takes input as [time, batch, input]. 88 # For static unidirectional sequence lstm, the input is an array sized of 89 # time, and pack the array together, however, for time = 1, the input is 90 # not packed. 91 tflite_input_values = input_values 92 if not parameters["is_dynamic_rnn"] and parameters["seq_length"] == 1: 93 tflite_input_values = [ 94 input_values[0].reshape( 95 (1, parameters["batch_size"], parameters["units"])) 96 ] 97 return tflite_input_values, sess.run( 98 outputs, feed_dict=dict(zip(inputs, input_values))) 99 100 make_zip_of_tests( 101 options, 102 test_parameters, 103 build_graph, 104 build_inputs, 105 use_frozen_graph=True) 106