1import torch.nn as nn 2import torch.nn.functional as F 3 4 5class DummyModel(nn.Module): 6 def __init__( 7 self, 8 num_embeddings: int, 9 embedding_dim: int, 10 dense_input_size: int, 11 dense_output_size: int, 12 dense_layers_count: int, 13 sparse: bool, 14 ): 15 r""" 16 A dummy model with an EmbeddingBag Layer and Dense Layer. 17 Args: 18 num_embeddings (int): size of the dictionary of embeddings 19 embedding_dim (int): the size of each embedding vector 20 dense_input_size (int): size of each input sample 21 dense_output_size (int): size of each output sample 22 dense_layers_count: (int): number of dense layers in dense Sequential module 23 sparse (bool): if True, gradient w.r.t. weight matrix will be a sparse tensor 24 """ 25 super().__init__() 26 self.embedding = nn.EmbeddingBag(num_embeddings, embedding_dim, sparse=sparse) 27 self.dense = nn.Sequential( 28 *[ 29 nn.Linear(dense_input_size, dense_output_size) 30 for _ in range(dense_layers_count) 31 ] 32 ) 33 34 def forward(self, x): 35 x = self.embedding(x) 36 return F.softmax(self.dense(x), dim=1) 37