Home
last modified time | relevance | path

Searched refs:num_units (Results 1 – 25 of 86) sorted by relevance

1234

/external/tensorflow/tensorflow/lite/kernels/internal/
Dkernel_utils.cc24 int input_size, int num_units, int batch_size, in RnnBatchStep() argument
31 bias_ptr, input_size, /*aux_input_size=*/0, num_units, in RnnBatchStep()
40 int input_size, int aux_input_size, int num_units, in RnnBatchStep() argument
46 if (output_batch_leading_dim == num_units) { in RnnBatchStep()
48 tensor_utils::VectorBatchVectorAssign(bias_ptr, num_units, batch_size, in RnnBatchStep()
53 input_weights_ptr, num_units, input_size, input_ptr_batch, batch_size, in RnnBatchStep()
59 aux_input_weights_ptr, num_units, aux_input_size, aux_input_ptr_batch, in RnnBatchStep()
65 recurrent_weights_ptr, num_units, num_units, hidden_state_ptr_batch, in RnnBatchStep()
70 output_ptr_batch, num_units * batch_size, activation, output_ptr_batch); in RnnBatchStep()
71 tensor_utils::CopyVector(output_ptr_batch, num_units * batch_size, in RnnBatchStep()
[all …]
Dkernel_utils.h41 int input_size, int num_units, int batch_size,
51 int input_size, int aux_input_size, int num_units,
70 int num_units, int batch_size, int output_batch_leading_dim,
80 const float* bias_ptr, int input_size, int aux_input_size, int num_units,
/external/tensorflow/tensorflow/contrib/grid_rnn/python/ops/
Dgrid_rnn_cell.py51 num_units, argument
123 num_units)
130 rnn.LSTMCell, num_units=num_units, state_is_tuple=state_is_tuple)
132 my_cell_fn = lambda: cell_fn(num_units)
245 cell_output_size = total_cell_state_size - conf.num_units
263 [-1, conf.num_units])
265 state, [0, start_idx + conf.num_units], [-1, cell_output_size])
268 [-1, conf.num_units])
301 'project_m_{}'.format(j), [input_sz, conf.num_units],
307 'project_c_{}'.format(j), [input_sz, conf.num_units],
[all …]
/external/tensorflow/tensorflow/contrib/cudnn_rnn/python/kernel_tests/
Dcudnn_rnn_ops_test.py66 num_units, argument
100 num_units).astype(dtype.as_numpy_dtype),
105 num_units).astype(dtype.as_numpy_dtype),
121 shape=[input_size + num_units, num_units * 4],
124 "rnn/lstm_cell/bias", shape=[num_units * 4], dtype=dtype)
127 cell = rnn_cell_impl.LSTMCell(num_units, forget_bias=0., reuse=True)
140 num_layers, num_units, input_size)
345 num_units, argument
360 num_units,
387 def test_training(self, num_units, input_size, batch_size, time, num_layers, argument
[all …]
Dcudnn_rnn_ops_benchmark.py66 num_units = config["num_units"]
70 return "y%d_u%d_b%d_q%d" % (num_layers, num_units, batch_size, seq_length)
92 num_units = config["num_units"]
97 model = cudnn_rnn_ops.CudnnLSTM(num_layers, num_units, num_units)
100 array_ops.ones([seq_length, batch_size, num_units]))
102 array_ops.ones([num_layers, batch_size, num_units]))
104 array_ops.ones([num_layers, batch_size, num_units]))
124 num_units = config["num_units"]
129 inputs = array_ops.zeros([batch_size, seq_length, num_units],
133 [contrib_rnn.BasicLSTMCell(num_units) for _ in range(num_layers)])
[all …]
Dcudnn_rnn_test.py87 num_units, argument
103 dtype=dtype, shape=[None, None, num_units], name="h")
105 dtype=dtype, shape=[None, None, num_units], name="c")
122 num_units,
169 num_units = self._rnn.num_units
176 num_units).astype(np_dtype)
179 num_units).astype(np_dtype)
188 num_units = self._rnn.num_units
192 num_units)).astype(np_dtype)
195 num_units)).astype(np_dtype)
[all …]
/external/tensorflow/tensorflow/contrib/slim/python/slim/nets/
Dresnet_v1.py227 def resnet_v1_block(scope, base_depth, num_units, stride): argument
244 }] * (num_units - 1) + [{
260 resnet_v1_block('block1', base_depth=64, num_units=3, stride=2),
261 resnet_v1_block('block2', base_depth=128, num_units=4, stride=2),
262 resnet_v1_block('block3', base_depth=256, num_units=6, stride=2),
263 resnet_v1_block('block4', base_depth=512, num_units=3, stride=1),
286 resnet_v1_block('block1', base_depth=64, num_units=3, stride=2),
287 resnet_v1_block('block2', base_depth=128, num_units=4, stride=2),
288 resnet_v1_block('block3', base_depth=256, num_units=23, stride=2),
289 resnet_v1_block('block4', base_depth=512, num_units=3, stride=1),
[all …]
Dresnet_v2.py240 def resnet_v2_block(scope, base_depth, num_units, stride): argument
257 }] * (num_units - 1) + [{
273 resnet_v2_block('block1', base_depth=64, num_units=3, stride=2),
274 resnet_v2_block('block2', base_depth=128, num_units=4, stride=2),
275 resnet_v2_block('block3', base_depth=256, num_units=6, stride=2),
276 resnet_v2_block('block4', base_depth=512, num_units=3, stride=1),
299 resnet_v2_block('block1', base_depth=64, num_units=3, stride=2),
300 resnet_v2_block('block2', base_depth=128, num_units=4, stride=2),
301 resnet_v2_block('block3', base_depth=256, num_units=23, stride=2),
302 resnet_v2_block('block4', base_depth=512, num_units=3, stride=1),
[all …]
/external/tensorflow/tensorflow/contrib/rnn/python/kernel_tests/
Drnn_cell_test.py196 num_units = 2
203 c = array_ops.zeros([batch_size, num_units])
207 num_units=num_units,
218 c.name: 0.1 * np.ones((batch_size, num_units)),
225 self.assertEqual(res[1][0].shape, (batch_size, num_units))
368 num_units = 2
369 state_size = num_units * 2
385 num_units=num_units, forget_bias=1.0, state_is_tuple=False)(x, m)
402 num_units = 8
403 state_size = num_units * 2
[all …]
/external/tensorflow/tensorflow/core/ops/
Dcudnn_rnn_ops_test.cc44 int num_units = 4; in TEST() local
47 std::vector<int> input_shape = {seq_length, batch_size, num_units}; in TEST()
49 num_units}; in TEST()
51 num_units * dir_count}; in TEST()
76 int num_units = 4; in TEST() local
79 std::vector<int> input_shape = {seq_length, batch_size, num_units}; in TEST()
81 num_units}; in TEST()
83 num_units * dir_count}; in TEST()
108 int num_units = 4; in TEST() local
111 std::vector<int> input_shape = {max_seq_length, batch_size, num_units}; in TEST()
[all …]
/external/tensorflow/tensorflow/lite/kernels/
Dunidirectional_sequence_rnn.cc75 const int num_units = input_weights->dims->data[0]; in Prepare() local
87 TF_LITE_ENSURE_EQ(context, hidden_state->dims->data[1], num_units); in Prepare()
95 output_size_array->data[2] = num_units; in Prepare()
160 const int num_units = input_weights->dims->data[0]; in EvalFloat() local
175 float* output_ptr_batch = output->data.f + s * num_units * batch_size; in EvalFloat()
179 input_size, num_units, batch_size, num_units, params->activation, in EvalFloat()
186 float* hidden_state_ptr_batch = hidden_state->data.f + b * num_units; in EvalFloat()
192 output->data.f + b * num_units * max_time + s * num_units; in EvalFloat()
196 input_size, num_units, /*batch_size=*/1, num_units, in EvalFloat()
215 const int num_units = input_weights->dims->data[0]; in EvalHybrid() local
[all …]
Dsvdf.cc47 int batch_size, int memory_size, int num_filters, int num_units, int rank, in ApplyTimeWeightsBiasAndActivation() argument
66 tensor_utils::VectorBatchVectorAssign(bias->data.f, num_units, batch_size, in ApplyTimeWeightsBiasAndActivation()
69 tensor_utils::ZeroVector(output->data.f, batch_size * num_units); in ApplyTimeWeightsBiasAndActivation()
74 float* output_ptr_batch = output->data.f + b * num_units; in ApplyTimeWeightsBiasAndActivation()
77 num_units, rank); in ApplyTimeWeightsBiasAndActivation()
82 float* output_ptr_batch = output->data.f + b * num_units; in ApplyTimeWeightsBiasAndActivation()
83 tensor_utils::ApplyActivationToVector(output_ptr_batch, num_units, in ApplyTimeWeightsBiasAndActivation()
150 const int num_units = num_filters / rank; in Prepare() local
158 TF_LITE_ENSURE_EQ(context, bias->dims->data[0], num_units); in Prepare()
174 output_size_array->data[1] = num_units; in Prepare()
[all …]
Dunidirectional_sequence_rnn_test.cc229 int num_units() { return units_; } in num_units() function in tflite::__anon2f8e60700111::UnidirectionalRNNOpModel
292 float* golden_end = golden_start + rnn.num_units() * rnn.sequence_len(); in TEST()
317 float* golden_end = golden_start + rnn.num_units() * rnn.sequence_len(); in TEST()
343 float* golden_end = golden_start + rnn.num_units() * rnn.sequence_len(); in TEST()
372 float* golden_batch_start = rnn_golden_output + i * rnn.num_units(); in TEST()
373 float* golden_batch_end = golden_batch_start + rnn.num_units(); in TEST()
401 float* golden_batch_start = rnn_golden_output + i * rnn.num_units(); in TEST()
402 float* golden_batch_end = golden_batch_start + rnn.num_units(); in TEST()
431 float* golden_batch_start = rnn_golden_output + i * rnn.num_units(); in TEST()
432 float* golden_batch_end = golden_batch_start + rnn.num_units(); in TEST()
Dbasic_rnn_test.cc217 int num_units() { return units_; } in num_units() function in tflite::__anon35c533a00111::RNNOpModel
275 float* golden_start = rnn_golden_output + i * rnn.num_units(); in TEST()
276 float* golden_end = golden_start + rnn.num_units(); in TEST()
302 float* golden_start = rnn_golden_output + i * rnn.num_units(); in TEST()
303 float* golden_end = golden_start + rnn.num_units(); in TEST()
330 float* golden_start = rnn_golden_output + i * rnn.num_units(); in TEST()
331 float* golden_end = golden_start + rnn.num_units(); in TEST()
Dfully_connected.cc110 const int num_units = filter->dims->data[0]; in Prepare() local
167 output_size_array->data[1] = num_units; in Prepare()
184 const int num_units = filter->dims->data[0]; in EvalPie() local
188 tensor_utils::VectorBatchVectorAssign(bias->data.f, num_units, batch_size, in EvalPie()
191 tensor_utils::ZeroVector(output->data.f, batch_size * num_units); in EvalPie()
196 filter->data.f, num_units, input_size, input->data.f, batch_size, in EvalPie()
200 tensor_utils::ApplyActivationToVector(output->data.f, batch_size * num_units, in EvalPie()
227 const int num_units = filter->dims->data[0]; in EvalHybrid() local
231 tensor_utils::VectorBatchVectorAssign(bias->data.f, num_units, batch_size, in EvalHybrid()
234 tensor_utils::ZeroVector(output->data.f, batch_size * num_units); in EvalHybrid()
[all …]
/external/tensorflow/tensorflow/contrib/cudnn_rnn/python/layers/
Dcudnn_rnn.py159 num_units, argument
209 self._num_units = num_units
225 def num_units(self): member in _CudnnRNN
460 num_units = self._num_units
465 wts_applied_on_inputs = [(num_units, input_size)] * num_gates
468 wts_applied_on_inputs = [(num_units, 2 * num_units)] * num_gates
470 wts_applied_on_inputs = [(num_units, num_units)] * num_gates
471 wts_applied_on_hidden_states = [(num_units, num_units)] * num_gates
489 num_units=self._num_units,
530 num_units=self.num_units,
[all …]
/external/tensorflow/tensorflow/contrib/cudnn_rnn/python/ops/
Dcudnn_rnn_ops.py69 def __init__(self, num_units, reuse=None): argument
71 num_units, forget_bias=0, cell_clip=None, use_peephole=False,
111 def __init__(self, num_units, reuse=None, kernel_initializer=None): argument
113 num_units,
185 num_units, argument
208 self._num_units = num_units
240 num_units=self._num_units,
261 num_units=self._num_units,
433 num_units = self._num_units
437 input_weight_width = num_units
[all …]
/external/tensorflow/tensorflow/contrib/learn/python/learn/estimators/
Drnn_common.py61 def _get_single_cell(cell_type, num_units): argument
78 return cell_type(num_units=num_units)
81 def construct_rnn_cell(num_units, cell_type='basic_rnn', argument
99 if not isinstance(num_units, (list, tuple)):
100 num_units = (num_units,)
102 cells = [_get_single_cell(cell_type, n) for n in num_units]
Dstate_saving_rnn_estimator_test.py327 num_units = [4]
341 num_units=num_units,
382 num_units = [4]
417 num_units=num_units,
473 num_units = [4] * num_rnn_layers
499 num_units=num_units,
532 num_units = [4]
556 num_units=num_units,
605 num_units = [4]
639 num_units=num_units,
/external/tensorflow/tensorflow/python/kernel_tests/
Drnn_test.py722 num_units=input_size,
744 num_units=input_size,
765 num_units = 512
771 np.random.randn(batch_size, num_units).astype(np.float32)
811 def static_vs_dynamic_rnn_benchmark(batch_size, max_time, num_units, use_gpu): argument
819 np.random.randn(batch_size, num_units).astype(np.float32)
845 (batch_size, max_time, num_units, use_gpu, delta_static, delta_dynamic,
855 num_units=input_size,
873 def half_seq_len_vs_unroll_half_rnn_benchmark(batch_size, max_time, num_units, argument
882 np.random.randn(batch_size, num_units).astype(np.float32)
[all …]
Drnn_cell_test.py384 num_units = 3
392 num_units, initializer=initializer, state_is_tuple=False)
399 self.assertEqual(out.get_shape().as_list(), [batch_size, num_units])
407 num_units = 3
415 num_units,
426 self.assertEqual(out.get_shape().as_list(), [batch_size, num_units])
434 self.assertAllEqual(value, np.zeros((batch_size, num_units)))
438 num_units = 3
445 state_saver = TestStateSaver(batch_size, 2 * num_units)
447 num_units,
[all …]
/external/tensorflow/tensorflow/core/api_def/base_api/
Dapi_def_CudnnRNNV3.pbtxt12 when input_size == num_units; 'auto_select' implies 'skip_input' when
13 input_size == num_units; otherwise, it implies 'linear_input'.
23 [num_layer * dir, batch_size, num_units]. If time_major is false, the shape
24 is [batch_size, num_layer * dir, num_units].
26 [num_layer * dir, batch, num_units]. For other models, it is ignored.
33 [seq_length, batch_size, dir * num_units]. If time_major is false, the
34 shape is [batch_size, seq_length, dir * num_units].
Dapi_def_CudnnRNNBackpropV3.pbtxt12 when input_size == num_units; 'auto_select' implies 'skip_input' when
13 input_size == num_units; otherwise, it implies 'linear_input'.
23 [num_layer * dir, batch_size, num_units]. If time_major is false, the shape
24 is [batch_size, num_layer * dir, num_units].
26 [num_layer * dir, batch, num_units]. For other models, it is ignored.
33 [seq_length, batch_size, dir * num_units]. If time_major is false, the
34 shape is [batch_size, seq_length, dir * num_units].
Dapi_def_CudnnRNN.pbtxt11 when input_size == num_units; 'auto_select' implies 'skip_input' when
12 input_size == num_units; otherwise, it implies 'linear_input'.
20 num_units].
22 [num_layer * dir, batch, num_units]. For other models, it is ignored.
28 dir * num_units].
/external/tensorflow/tensorflow/contrib/rnn/python/ops/
Drnn_cell.py136 num_units, argument
186 self._num_units = num_units
203 rnn_cell_impl.LSTMStateTuple(num_units, num_proj)
204 if state_is_tuple else num_units + num_proj)
208 rnn_cell_impl.LSTMStateTuple(num_units, num_units)
209 if state_is_tuple else 2 * num_units)
210 self._output_size = num_units
338 num_units, argument
369 self._num_units = num_units
377 self._state_size = 2 * num_units
[all …]

1234