/external/tensorflow/tensorflow/lite/micro/examples/magic_wand/train/ |
D | data_load.py | 39 seq_length): argument 41 self.seq_length = seq_length 66 def pad(self, data, seq_length, dim): argument 71 tmp_data = (np.random.rand(seq_length, dim) - 0.5) * noise_level + data[0] 72 tmp_data[(seq_length - 73 min(len(data), seq_length)):] = data[:min(len(data), seq_length)] 76 tmp_data = (np.random.rand(seq_length, dim) - 0.5) * noise_level + data[-1] 77 tmp_data[:min(len(data), seq_length)] = data[:min(len(data), seq_length)] 85 features = np.zeros((length, self.seq_length, self.dim)) 89 padded_data = self.pad(data, self.seq_length, self.dim)
|
D | train.py | 51 def build_cnn(seq_length): argument 58 input_shape=(seq_length, 3, 1)), # output_shape=(batch, 128, 3, 8) 78 def build_lstm(seq_length): argument 83 input_shape=(seq_length, 3)), # output_shape=(batch, 44) 93 def load_data(train_data_path, valid_data_path, test_data_path, seq_length): argument 95 train_data_path, valid_data_path, test_data_path, seq_length=seq_length) 101 def build_net(args, seq_length): argument 103 model, model_path = build_cnn(seq_length) 105 model, model_path = build_lstm(seq_length) 185 seq_length = 128 variable [all …]
|
D | train_test.py | 36 self.seq_length = 128 40 self.seq_length) 48 cnn, cnn_path = build_cnn(self.seq_length) 49 lstm, lstm_path = build_lstm(self.seq_length)
|
D | data_load_test.py | 34 "./data/train", "./data/valid", "./data/test", seq_length=512) 57 padding_data1 = self.loader.pad(original_data1, seq_length=5, dim=2) 58 padding_data2 = self.loader.pad(original_data2, seq_length=5, dim=2)
|
/external/tensorflow/tensorflow/python/feature_column/ |
D | utils.py | 38 seq_length = math_ops.segment_max(column_ids, segment_ids=row_ids) 45 seq_length = math_ops.cast( 46 math_ops.ceil(seq_length / num_elements), dtypes.int64) 50 n_pad = array_ops.shape(sp_tensor)[:1] - array_ops.shape(seq_length)[:1] 51 padding = array_ops.zeros(n_pad, dtype=seq_length.dtype) 52 return array_ops.concat([seq_length, padding], axis=0, name=name_scope)
|
D | sequence_feature_column.py | 458 seq_length = fc_utils.sequence_length_from_sparse_tensor( 462 dense_tensor=dense_tensor, sequence_length=seq_length)
|
/external/tensorflow/tensorflow/core/ops/ |
D | cudnn_rnn_ops_test.cc | 42 int seq_length = 2; in TEST() local 47 std::vector<int> input_shape = {seq_length, batch_size, num_units}; in TEST() 50 std::vector<int> output_shape = {seq_length, batch_size, in TEST() 74 int seq_length = 2; in TEST() local 79 std::vector<int> input_shape = {seq_length, batch_size, num_units}; in TEST() 82 std::vector<int> output_shape = {seq_length, batch_size, in TEST()
|
D | cudnn_rnn_ops.cc | 86 auto seq_length = c->Dim(input_shape, 0); in __anon815c5c9f0302() local 96 auto output_shape = c->MakeShape({seq_length, batch_size, output_size}); in __anon815c5c9f0302() 129 auto seq_length = c->Dim(input_shape, 0); in __anon815c5c9f0402() local 139 auto output_shape = c->MakeShape({seq_length, batch_size, output_size}); in __anon815c5c9f0402()
|
/external/icu/icu4c/source/i18n/ |
D | csr2022.cpp | 49 int32_t seq_length = (int32_t)uprv_strlen((const char *) seq); in match_2022() local 51 if (textLen-i >= seq_length) { in match_2022() 53 while(j < seq_length) { in match_2022() 62 i += seq_length-1; in match_2022()
|
/external/tensorflow/tensorflow/compiler/tests/ |
D | lstm.py | 127 def RandomInputs(batch_size, seq_length, num_inputs): argument 132 for seq in range(seq_length): 140 def BuildLSTMLayer(batch_size, seq_length, num_inputs, num_nodes): argument 158 x_seq, pad_seq = RandomInputs(batch_size, seq_length, num_inputs)
|
D | lstm_test.py | 142 seq_length = 3 147 x_seq = [constant_op.constant(self._inputs)] * seq_length 162 seq_length = 3 167 x_seq = [constant_op.constant(self._inputs)] * seq_length 169 ] * seq_length 242 out_seq, weights = lstm.BuildLSTMLayer(FLAGS.batch_size, FLAGS.seq_length, 252 '%s_%d_%d_%d_%d' % (name, FLAGS.batch_size, FLAGS.seq_length,
|
/external/tensorflow/tensorflow/core/api_def/base_api/ |
D | api_def_CudnnRNNV3.pbtxt | 20 [seq_length, batch_size, input_size]. If time_major is false, the shape is 21 [batch_size, seq_length, input_size]. 33 [seq_length, batch_size, dir * num_units]. If time_major is false, the 34 shape is [batch_size, seq_length, dir * num_units].
|
D | api_def_CudnnRNNBackpropV3.pbtxt | 20 [seq_length, batch_size, input_size]. If time_major is false, the shape is 21 [batch_size, seq_length, input_size]. 33 [seq_length, batch_size, dir * num_units]. If time_major is false, the 34 shape is [batch_size, seq_length, dir * num_units].
|
D | api_def_CudnnRNN.pbtxt | 18 input: A 3-D tensor with the shape of [seq_length, batch_size, input_size]. 27 output: A 3-D tensor with the shape of [seq_length, batch_size,
|
D | api_def_CudnnRNNV2.pbtxt | 19 input: A 3-D tensor with the shape of [seq_length, batch_size, input_size]. 28 output: A 3-D tensor with the shape of [seq_length, batch_size,
|
D | api_def_CudnnRNNBackprop.pbtxt | 17 input: A 3-D tensor with the shape of [seq_length, batch_size, input_size]. 26 output: A 3-D tensor with the shape of [seq_length, batch_size,
|
D | api_def_CudnnRNNBackpropV2.pbtxt | 20 input: A 3-D tensor with the shape of [seq_length, batch_size, input_size]. 29 output: A 3-D tensor with the shape of [seq_length, batch_size,
|
/external/webrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/ |
D | signal_processing.py | 206 seq_length = 0 208 seq_length = seq_length + 1 if b else 0 209 if seq_length >= min_legth:
|
/external/tensorflow/tensorflow/python/keras/feature_column/ |
D | sequence_feature_column_integration_test.py | 141 seq_input, seq_length = ksfc.SequenceFeatures([shared_seq])(features) 147 [seq_input, seq_length, non_seq_input])
|
/external/tensorflow/tensorflow/python/data/experimental/ops/ |
D | grouping.py | 211 seq_length = element_length_func(*args) 217 math_ops.less_equal(buckets_min, seq_length), 218 math_ops.less(seq_length, buckets_max))
|
/external/tensorflow/tensorflow/python/ops/ |
D | ctc_ops.py | 1067 def collapse_repeated(labels, seq_length, name=None): argument 1085 with ops.name_scope(name, "collapse_repeated_labels", [labels, seq_length]): 1087 seq_length = ops.convert_to_tensor(seq_length, name="seq_length") 1098 seq_mask = array_ops.sequence_mask(seq_length, maxlen=maxlen) 1126 math_ops.cast(new_seq_len, seq_length.dtype))
|
/external/tensorflow/tensorflow/python/kernel_tests/ |
D | ctc_loss_op_test.py | 644 seq_length=[4, 5, 5]) 659 seq_length=constant_op.constant([4, 5, 5], dtype=dtypes.int64)) 674 seq_length=[4, 5, 5]) 687 seq_length=[5, 4, 3]) 700 seq_length=[4, 5, 1])
|
/external/tensorflow/tensorflow/stream_executor/rocm/ |
D | rocm_dnn.cc | 1864 MIOpenRnnSequenceTensorDescriptor(int seq_length, int batch_size, in MIOpenRnnSequenceTensorDescriptor() argument 1866 : seq_length_(seq_length), in MIOpenRnnSequenceTensorDescriptor() 1871 if (seq_length <= 0) { in MIOpenRnnSequenceTensorDescriptor() 1873 absl::StrCat("sequence length must be positive: ", seq_length); in MIOpenRnnSequenceTensorDescriptor() 1886 handles_.assign(seq_length, handle); in MIOpenRnnSequenceTensorDescriptor() 1902 int seq_length() const { return seq_length_; } in seq_length() function in stream_executor::gpu::MIOpenRnnSequenceTensorDescriptor 1965 int seq_length = 0; member 1989 model_dims->seq_length = input_desc.seq_length(); in ExtractAndCheckRnnForward() 2009 if (!(output_desc.seq_length() == model_dims->seq_length && in ExtractAndCheckRnnForward() 2057 input_desc.seq_length() /*seqLength*/, input_desc.handles() /*xDesc*/, in CreateRnnWorkspace() [all …]
|
/external/tensorflow/tensorflow/python/ops/numpy_ops/g3doc/ |
D | TensorFlow_NumPy_Text_Generation.ipynb | 288 …"Next divide the text into example sequences. Each input sequence will contain `seq_length` charac… 292 …"So break the text into chunks of `seq_length+1`. For example, say `seq_length` is 4 and our text … 304 "seq_length = 100\n", 305 "examples_per_epoch = len(text)//(seq_length+1)\n", 322 "sequences = char_dataset.batch(seq_length+1, drop_remainder=True)\n",
|
/external/tensorflow/tensorflow/python/keras/distribute/ |
D | custom_training_loop_models_test.py | 223 seq_length = 10 225 x_train = np.random.rand(batch_size, seq_length, 1).astype("float32")
|