1# Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2# 3# Licensed under the Apache License, Version 2.0 (the "License"); 4# you may not use this file except in compliance with the License. 5# You may obtain a copy of the License at 6# 7# http://www.apache.org/licenses/LICENSE-2.0 8# 9# Unless required by applicable law or agreed to in writing, software 10# distributed under the License is distributed on an "AS IS" BASIS, 11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12# See the License for the specific language governing permissions and 13# limitations under the License. 14# ============================================================================== 15"""Test configs for lstm.""" 16import tensorflow.compat.v1 as tf 17from tensorflow.lite.testing.zip_test_utils import create_tensor_data 18from tensorflow.lite.testing.zip_test_utils import ExtraConvertOptions 19from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests 20from tensorflow.lite.testing.zip_test_utils import register_make_test_function 21from tensorflow.python.ops import rnn 22 23 24@register_make_test_function() 25def make_lstm_tests(options): 26 """Make a set of tests to do basic Lstm cell.""" 27 28 test_parameters = [ 29 { 30 "dtype": [tf.float32], 31 "num_batchs": [1], 32 "time_step_size": [1], 33 "input_vec_size": [3], 34 "num_cells": [4], 35 "split_tflite_lstm_inputs": [False], 36 }, 37 ] 38 39 def build_graph(parameters): 40 """Build a simple graph with BasicLSTMCell.""" 41 42 num_batchs = parameters["num_batchs"] 43 time_step_size = parameters["time_step_size"] 44 input_vec_size = parameters["input_vec_size"] 45 num_cells = parameters["num_cells"] 46 inputs_after_split = [] 47 for i in range(time_step_size): 48 one_timestamp_input = tf.compat.v1.placeholder( 49 dtype=parameters["dtype"], 50 name="split_{}".format(i), 51 shape=[num_batchs, input_vec_size]) 52 inputs_after_split.append(one_timestamp_input) 53 # Currently lstm identifier has a few limitations: only supports 54 # forget_bias == 0, inner state activation == tanh. 55 # TODO(zhixianyan): Add another test with forget_bias == 1. 56 # TODO(zhixianyan): Add another test with relu as activation. 57 lstm_cell = tf.compat.v1.nn.rnn_cell.BasicLSTMCell( 58 num_cells, forget_bias=0.0, state_is_tuple=True) 59 cell_outputs, _ = rnn.static_rnn( 60 lstm_cell, inputs_after_split, dtype=tf.float32) 61 out = cell_outputs[-1] 62 return inputs_after_split, [out] 63 64 def build_inputs(parameters, sess, inputs, outputs): 65 """Feed inputs, assign variables, and freeze graph.""" 66 67 with tf.compat.v1.variable_scope("", reuse=True): 68 kernel = tf.get_variable("rnn/basic_lstm_cell/kernel") 69 bias = tf.get_variable("rnn/basic_lstm_cell/bias") 70 kernel_values = create_tensor_data(parameters["dtype"], 71 [kernel.shape[0], kernel.shape[1]], -1, 72 1) 73 bias_values = create_tensor_data(parameters["dtype"], [bias.shape[0]], 0, 74 1) 75 sess.run(tf.group(kernel.assign(kernel_values), bias.assign(bias_values))) 76 77 num_batchs = parameters["num_batchs"] 78 time_step_size = parameters["time_step_size"] 79 input_vec_size = parameters["input_vec_size"] 80 input_values = [] 81 for _ in range(time_step_size): 82 tensor_data = create_tensor_data(parameters["dtype"], 83 [num_batchs, input_vec_size], 0, 1) 84 input_values.append(tensor_data) 85 out = sess.run(outputs, feed_dict=dict(zip(inputs, input_values))) 86 return input_values, out 87 88 # TODO(zhixianyan): Automatically generate rnn_states for lstm cell. 89 extra_convert_options = ExtraConvertOptions() 90 extra_convert_options.rnn_states = ( 91 "{state_array:rnn/BasicLSTMCellZeroState/zeros," 92 "back_edge_source_array:rnn/basic_lstm_cell/Add_1,size:4}," 93 "{state_array:rnn/BasicLSTMCellZeroState/zeros_1," 94 "back_edge_source_array:rnn/basic_lstm_cell/Mul_2,size:4}") 95 96 make_zip_of_tests( 97 options, 98 test_parameters, 99 build_graph, 100 build_inputs, 101 extra_convert_options, 102 use_frozen_graph=True) 103