Lines Matching full:padding_idx
129 padding_idx = 2
133 embeddings[padding_idx] = padding_vec
134 embedding_nn = nn.Embedding.from_pretrained(embeddings, padding_idx=padding_idx)
135 self.assertEqual(embedding_nn.weight[padding_idx], padding_vec)
138 padding_idx = 2
141 embeddings, padding_idx=padding_idx
179 embed_old = embed_old.from_pretrained(embeddings, padding_idx=2)
181 res_F = F.embedding(a, embeddings, padding_idx=2)
206 embed_old = embed_old.from_pretrained(embeddings, padding_idx=2)
208 res_F = F.embedding_bag(a, embeddings, padding_idx=2)
212 # Make sure that error is thrown if padding_idx is out of bounds
219 functional_err_msg = r"padding_idx must be within the number of embeddings"
220 module_err_msg = r"padding_idx must be within num_embeddings"
222 for padding_idx in range(-(num_embeddings + 2), (num_embeddings + 2)):
223 if (padding_idx < -num_embeddings) or (padding_idx >= num_embeddings):
225 F.embedding_bag(a, embeddings, padding_idx=padding_idx)
228 num_embeddings, num_features, padding_idx=padding_idx
231 F.embedding_bag(a, embeddings, padding_idx=padding_idx)
233 num_embeddings, num_features, padding_idx=padding_idx
267 embeddingbag = nn.EmbeddingBag(100, 3, include_last_offset=True, padding_idx=61)
401 embedding = nn.Embedding(10, 20, padding_idx=0).to(device, dtype)
407 embedding = nn.Embedding(10, 20, padding_idx=0, sparse=True).to(device, dtype)
413 # negative indexing check for padding_idx
414 # padding_idx=-2, num_embeddings=10 ==> index 8 padded
415 embedding = nn.Embedding(10, 20, padding_idx=-2).to(device, dtype)
421 embedding = nn.Embedding(10, 20, padding_idx=-2, sparse=True).to(device, dtype)
429 embedding = nn.Embedding(10, 20, padding_idx=2, sparse=True).to(device, dtype)
436 # out of bounds check for padding_idx
442 padding_idx=25,
449 padding_idx=-25,
452 padding_idx = 0
453 embedding = nn.Embedding(5, 2, padding_idx=padding_idx).to(device, dtype)
461 other_indices + [padding_idx] * n, dtype=torch.long
463 pre = embedding.weight[padding_idx].clone()
465 after = (embedding.weight + embedding.weight.grad)[padding_idx]
478 after = (embedding.weight + embedding.weight.grad)[padding_idx]
483 # backward functions with padding_idx, given a 1D input separated into bags
535 # indices with padding_idx
537 indices_1D, offsets, include_last_offset, padding_idx argument
558 indices_in_bag.append(padding_idx)
605 padding_idx=padding_idx_1D,
614 padding_idx=padding_idx_2D,
636 # backward functions with padding_idx, given a 2D indices input. Compare
642 # Use a Python implementation of embedding_bag with padding_idx support
644 def embedding_bag_check(indices, weights, mode, sparse, padding_idx): argument
645 assert padding_idx is not None
647 indices, weights, padding_idx=padding_idx, sparse=sparse
653 # We must avoid including elements at padding_idx in the
656 per_sample_weights = indices.ne(padding_idx).to(dtype).unsqueeze(-1)
664 # We must avoid allowing elements at padding_idx to be chosen
667 indices.unsqueeze(-1) == padding_idx, -float("inf")
678 indices.eq(padding_idx).all(dim=-1).unsqueeze(-1), 0
710 for padding_idx in list(set(indices.flatten(0, -1).tolist())):
721 f"mode: '{mode}', sparse: {sparse}, padding_idx: {padding_idx}, "
725 # Check forward with a Python implementation of padding_idx embedding_bag
727 indices, weights_check, mode, sparse, padding_idx
730 indices, weights, padding_idx=padding_idx, mode=mode, sparse=sparse
788 @parametrize_test("padding_idx", [None, 0])
790 def test_embedding_bag_out_of_bounds_idx(self, device, dtypes, padding_idx, mode): argument
791 padding_idx = 0
814 padding_idx=padding_idx,