Skip to content

Commit

Permalink
GH-17: GPU optimizations for LM embeddings
Browse files Browse the repository at this point in the history
  • Loading branch information
aakbik authored and tabergma committed Jul 31, 2018
1 parent 846392e commit 8986189
Showing 1 changed file with 2 additions and 5 deletions.
7 changes: 2 additions & 5 deletions flair/embeddings.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ def embedding_type(self) -> str:


class DocumentEmbeddings(Embeddings):
"""Abstract base class for all token-level embeddings. Ever new type of word embedding must implement these methods."""
"""Abstract base class for all document-level embeddings. Ever new type of document embedding must implement these methods."""

@property
@abstractmethod
Expand Down Expand Up @@ -208,9 +208,6 @@ def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
else:
word_embedding = np.zeros(self.embedding_length, dtype='float')

# if torch.cuda.is_available():
# word_embedding = torch.cuda.FloatTensor(word_embedding)
# else:
word_embedding = torch.FloatTensor(word_embedding)

token.set_embedding(self.name, word_embedding)
Expand Down Expand Up @@ -297,7 +294,7 @@ def _add_embeddings_internal(self, sentences: List[Sentence]):
character_embeddings[d[i]] = chars_embeds_temp[i]

for token_number, token in enumerate(sentence.tokens):
token.set_embedding(self.name, character_embeddings[token_number])
token.set_embedding(self.name, character_embeddings[token_number].cpu())


class CharLMEmbeddings(TokenEmbeddings):
Expand Down

0 comments on commit 8986189

Please sign in to comment.