Home
last modified time | relevance | path

Searched refs:vocabulary_size (Results 1 – 21 of 21) sorted by relevance

/external/tensorflow/tensorflow/contrib/feature_column/python/feature_column/
Dsequence_feature_column_test.py90 vocabulary_size = 3
105 self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
112 key='aaa', num_buckets=vocabulary_size)
118 key='bbb', num_buckets=vocabulary_size)
146 vocabulary_size = 3
155 key='aaa', num_buckets=vocabulary_size)
167 vocabulary_size = 3
191 self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
207 key='aaa', num_buckets=vocabulary_size)
209 key='bbb', num_buckets=vocabulary_size)
[all …]
Dsequence_feature_column.py272 key, vocabulary_file, vocabulary_size=None, num_oov_buckets=0, argument
327 vocabulary_size=vocabulary_size,
/external/tensorflow/tensorflow/python/feature_column/
Dsequence_feature_column_test.py93 vocabulary_size = 3
108 self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
115 key='aaa', num_buckets=vocabulary_size)
121 key='bbb', num_buckets=vocabulary_size)
148 vocabulary_size = 3
157 key='aaa', num_buckets=vocabulary_size)
171 vocabulary_size = 3
195 self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
211 key='aaa', num_buckets=vocabulary_size)
213 key='bbb', num_buckets=vocabulary_size)
[all …]
Dfeature_column_test.py3381 key='aaa', vocabulary_file='path_to_file', vocabulary_size=3)
3393 key=('aaa',), vocabulary_file='path_to_file', vocabulary_size=3)
3400 vocabulary_size=3,
3413 vocabulary_size=3,
3426 key='aaa', vocabulary_file=None, vocabulary_size=3)
3431 key='aaa', vocabulary_file='', vocabulary_size=3)
3436 key='aaa', vocabulary_file='file_does_not_exist', vocabulary_size=10)
3451 vocabulary_size=-1)
3456 vocabulary_size=0)
3463 vocabulary_size=self._wire_vocabulary_size + 1)
[all …]
Dfeature_column_v2_test.py4517 key='aaa', vocabulary_file='path_to_file', vocabulary_size=3)
4529 key=('aaa',), vocabulary_file='path_to_file', vocabulary_size=3)
4536 vocabulary_size=3,
4549 vocabulary_size=3,
4562 key='aaa', vocabulary_file=None, vocabulary_size=3)
4567 key='aaa', vocabulary_file='', vocabulary_size=3)
4572 key='aaa', vocabulary_file='file_does_not_exist', vocabulary_size=10)
4589 vocabulary_size=-1)
4594 vocabulary_size=0)
4601 vocabulary_size=self._wire_vocabulary_size + 1)
[all …]
Dsequence_feature_column.py297 key, vocabulary_file, vocabulary_size=None, num_oov_buckets=0, argument
354 vocabulary_size=vocabulary_size,
Dfeature_column.py1144 vocabulary_size=None, argument
1229 if vocabulary_size is None:
1234 vocabulary_size = sum(1 for _ in f)
1237 'in the vocabulary_file %s.', vocabulary_size, key, vocabulary_file)
1240 if vocabulary_size < 1:
1255 vocabulary_size=vocabulary_size,
2737 vocab_size=self.vocabulary_size,
2745 return self.vocabulary_size + self.num_oov_buckets
Dfeature_column_v2.py1485 vocabulary_size=None, argument
1568 key, vocabulary_file, vocabulary_size,
1576 vocabulary_size=None, argument
1661 if vocabulary_size is None:
1666 vocabulary_size = sum(1 for _ in f)
1669 'in the vocabulary_file %s.', vocabulary_size, key, vocabulary_file)
1672 if vocabulary_size < 1:
1687 vocabulary_size=vocabulary_size,
3569 vocab_size=self.vocabulary_size,
3589 return self.vocabulary_size + self.num_oov_buckets
/external/tensorflow/tensorflow/contrib/seq2seq/python/kernel_tests/
Dbasic_decoder_test.py130 vocabulary_size = 7
131 cell_depth = vocabulary_size # cell's logits must match vocabulary size
133 start_tokens = np.random.randint(0, vocabulary_size, size=batch_size)
137 embeddings = np.random.randn(vocabulary_size,
139 cell = rnn_cell.LSTMCell(vocabulary_size)
202 vocabulary_size = 7
203 cell_depth = vocabulary_size # cell's logits must match vocabulary size
206 start_tokens = np.random.randint(0, vocabulary_size, size=batch_size)
213 embeddings = np.random.randn(vocabulary_size,
215 cell = rnn_cell.LSTMCell(vocabulary_size)
[all …]
Dbasic_decoder_v2_test.py132 vocabulary_size = 7
133 cell_depth = vocabulary_size # cell's logits must match vocabulary size
135 start_tokens = np.random.randint(0, vocabulary_size, size=batch_size)
139 embeddings = np.random.randn(vocabulary_size,
142 cell = rnn_cell.LSTMCell(vocabulary_size)
208 vocabulary_size = 7
209 cell_depth = vocabulary_size # cell's logits must match vocabulary size
212 start_tokens = np.random.randint(0, vocabulary_size, size=batch_size)
216 embeddings = np.random.randn(vocabulary_size,
219 cell = rnn_cell.LSTMCell(vocabulary_size)
[all …]
/external/tensorflow/tensorflow/python/tpu/
Dtpu_embedding.py52 vocabulary_size, argument
81 if not isinstance(vocabulary_size, int) or vocabulary_size < 1:
82 raise ValueError('Invalid vocabulary_size {}.'.format(vocabulary_size))
96 return super(TableConfig, cls).__new__(cls, vocabulary_size, dimension,
460 table_descriptor.vocabulary_size = table_config.vocabulary_size
526 vocabulary_size=self._table_to_config_dict[table].vocabulary_size,
821 vocabulary_size=table_config.vocabulary_size,
901 vocabulary_size=table_config.vocabulary_size,
909 vocabulary_size=table_config.vocabulary_size,
1087 vocabulary_size, argument
[all …]
Dfeature_column_test.py84 vocabulary_size = 3
103 self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
122 key='aaa', num_buckets=vocabulary_size)
223 vocabulary_size = 3
244 self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
265 key='aaa', num_buckets=vocabulary_size)
267 key='bbb', num_buckets=vocabulary_size)
D_tpu_estimator_embedding.py138 vocabulary_size, dimension = column.get_embedding_table_size()
140 vocabulary_size=vocabulary_size,
/external/tensorflow/tensorflow/examples/tutorials/word2vec/
Dword2vec_basic.py78 vocabulary_size = 50000
105 vocabulary, vocabulary_size)
175 tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
181 tf.truncated_normal([vocabulary_size, embedding_size],
184 nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
199 num_classes=vocabulary_size))
286 for i in xrange(vocabulary_size):
/external/tensorflow/tensorflow/python/training/
Dwarm_starting_util_test.py597 "sc_vocab", vocabulary_file=vocab_path, vocabulary_size=4)
633 "sc_vocab", vocabulary_file=vocab_path, vocabulary_size=4)
678 "sc_vocab", vocabulary_file=current_vocab_path, vocabulary_size=2)
701 new_vocab_size=sc_vocab.vocabulary_size,
761 "sc_vocab", vocabulary_file=vocab_path, vocabulary_size=4)
821 new_vocab_size=sc_vocab.vocabulary_size,
854 "sc_vocab", vocabulary_file=new_vocab_path, vocabulary_size=6)
882 new_vocab_size=sc_vocab.vocabulary_size,
922 "sc_vocab", vocabulary_file=new_vocab_path, vocabulary_size=6)
945 new_vocab_size=sc_vocab.vocabulary_size,
[all …]
/external/tensorflow/tensorflow/tools/api/golden/v2/
Dtensorflow.feature_column.pbtxt17 …argspec: "args=[\'key\', \'vocabulary_file\', \'vocabulary_size\', \'dtype\', \'default_value\', \…
53 …argspec: "args=[\'key\', \'vocabulary_file\', \'vocabulary_size\', \'num_oov_buckets\', \'default_…
/external/tensorflow/tensorflow/tools/api/golden/v1/
Dtensorflow.feature_column.pbtxt17 …argspec: "args=[\'key\', \'vocabulary_file\', \'vocabulary_size\', \'num_oov_buckets\', \'default_…
61 …argspec: "args=[\'key\', \'vocabulary_file\', \'vocabulary_size\', \'num_oov_buckets\', \'default_…
/external/tensorflow/tensorflow/examples/udacity/
D6_lstm.ipynb300 "vocabulary_size = len(string.ascii_lowercase) + 1 # [a-z] + ' '\n",
395 " batch = np.zeros(shape=(self._batch_size, vocabulary_size), dtype=np.float)\n",
479 " p = np.zeros(shape=[1, vocabulary_size], dtype=np.float)\n",
485 " b = np.random.uniform(0.0, 1.0, size=[1, vocabulary_size])\n",
522 " ix = tf.Variable(tf.truncated_normal([vocabulary_size, num_nodes], -0.1, 0.1))\n",
526 " fx = tf.Variable(tf.truncated_normal([vocabulary_size, num_nodes], -0.1, 0.1))\n",
530 " cx = tf.Variable(tf.truncated_normal([vocabulary_size, num_nodes], -0.1, 0.1))\n",
534 " ox = tf.Variable(tf.truncated_normal([vocabulary_size, num_nodes], -0.1, 0.1))\n",
541 " w = tf.Variable(tf.truncated_normal([num_nodes, vocabulary_size], -0.1, 0.1))\n",
542 " b = tf.Variable(tf.zeros([vocabulary_size]))\n",
[all …]
D5_word2vec.ipynb249 "vocabulary_size = 50000\n",
253 " count.extend(collections.Counter(words).most_common(vocabulary_size - 1))\n",
434 " tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))\n",
436 " tf.truncated_normal([vocabulary_size, embedding_size],\n",
438 " softmax_biases = tf.Variable(tf.zeros([vocabulary_size]))\n",
446 … labels=train_labels, num_sampled=num_sampled, num_classes=vocabulary_size))\n",
/external/tensorflow/tensorflow/python/keras/preprocessing/
Dsequence_test.py87 np.arange(3), vocabulary_size=3)
94 np.arange(5), vocabulary_size=5, window_size=1, categorical=True)
/external/tensorflow/tensorflow/core/protobuf/tpu/
Dtpu_embedding_configuration.proto14 int32 vocabulary_size = 2; field
53 // of hosts, each of the first "table_descriptor.vocabulary_size % num_hosts"