From ae43d592bbc85b4889da3b46fe1799224476ed0a Mon Sep 17 00:00:00 2001 From: pyup-bot Date: Mon, 20 Apr 2020 09:39:45 +0200 Subject: [PATCH 1/7] Update sphinx from 2.4.4 to 3.0.2 --- requirements_dev.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements_dev.txt b/requirements_dev.txt index 9c85453..d13b7ad 100755 --- a/requirements_dev.txt +++ b/requirements_dev.txt @@ -1,5 +1,5 @@ # Documentation -sphinx==2.4.4 +sphinx==3.0.2 sphinx_rtd_theme==0.4.3 numpydoc==0.9.2 From bcc7047e0d06a0d7c27aed08a36d68e2ff86dfcd Mon Sep 17 00:00:00 2001 From: armand Date: Thu, 23 Apr 2020 12:02:08 +0200 Subject: [PATCH 2/7] Documentation update --- docs/tutorials/hole.rst | 40 ++++++++++++++++--------------- docs/tutorials/linkprediction.rst | 2 +- docs/tutorials/transe.rst | 40 ++++++++++++++++--------------- 3 files changed, 43 insertions(+), 39 deletions(-) diff --git a/docs/tutorials/hole.rst b/docs/tutorials/hole.rst index e0cda4d..5f099fb 100755 --- a/docs/tutorials/hole.rst +++ b/docs/tutorials/hole.rst @@ -4,64 +4,66 @@ HolE To train HolE on FB15k:: - from torch.optim import SGD + from torch.optim import Adam from torch import cuda - from torch.utils.data import DataLoader from torchkge.data.DataLoader import load_fb15k + from torchkge.evaluation import LinkPredictionEvaluator from torchkge.models import HolEModel - from torchkge.utils import LogisticLoss from torchkge.sampling import BernoulliNegativeSampler + from torchkge.utils import LogisticLoss, get_batches + + from tqdm.autonotebook import tqdm + # Load dataset - kg_train, _, _ = load_fb15k() + kg_train, kg_val, kg_test = load_fb15k() # Define some hyper-parameters for training - lr, nb_epochs, batch_size = 0.001, 500, 1024 + lr = 0.001 n_ent, n_rel = kg_train.n_ent, kg_train.n_rel - ent_emb_dim = 50 + ent_emb_dim = 150 + nb_epochs = 50 + batch_size = 16384 # Define the model and criterion model = HolEModel(ent_emb_dim, n_ent, n_rel) criterion = LogisticLoss() + sampler = BernoulliNegativeSampler(kg_train, kg_test=kg_test) # Move everything to CUDA if available if cuda.is_available(): + cuda.set_device(0) cuda.empty_cache() model.cuda() criterion.cuda() # Define the torch optimizer to be used - optimizer = SGD(model.parameters(), lr=lr) + optimizer = Adam(model.parameters(), lr=lr, weight_decay=1e-5) - # Define the sampler useful for negative sampling during training - sampler = BernoulliNegativeSampler(kg_train, kg_test=kg_test) + h, t, r = kg_train.head_idx.cuda(), kg_train.tail_idx.cuda(), kg_train.relations.cuda() - dataloader = DataLoader(kg_train, batch_size=batch_size, shuffle=False, - pin_memory=cuda.is_available()) + iterator = tqdm(range(nb_epochs), unit='epoch') - for epoch in range(nb_epochs): + for epoch in iterator: running_loss = 0.0 - for i, batch in enumerate(dataloader): + + for i, batch in enumerate(get_batches(h, t, r, batch_size)): # get the input heads, tails, rels = batch[0], batch[1], batch[2] - if heads.is_pinned(): - heads, tails, rels = heads.cuda(), tails.cuda(), rels.cuda() - - # Create Negative Samples neg_heads, neg_tails = sampler.corrupt_batch(heads, tails, rels) + # zero model gradient model.zero_grad() # forward + backward + optimize positive_triplets, negative_triplets = model(heads, tails, neg_heads, neg_tails, rels) - loss = criterion(positive_triplets, negative_triplets) loss.backward() optimizer.step() running_loss += loss.item() - print('[%d] loss: %.3f' % (epoch + 1, running_loss / (i + 1))) + iterator.set_description('Epoch mean loss: {:.5f}'.format(running_loss / len(kg_train))) model.normalize_parameters() diff --git a/docs/tutorials/linkprediction.rst b/docs/tutorials/linkprediction.rst index 6e8fa94..07a5527 100755 --- a/docs/tutorials/linkprediction.rst +++ b/docs/tutorials/linkprediction.rst @@ -14,6 +14,6 @@ To evaluate a model on link prediction:: # Link prediction evaluation on test set. evaluator = LinkPredictionEvaluator(model, kg_test) - evaluator.evaluate(batch_size=32, k_max=10) + evaluator.evaluate(batch_size=512, k_max=10) evaluator.evaluate(k=10) evaluator.print_results() diff --git a/docs/tutorials/transe.rst b/docs/tutorials/transe.rst index ffa7b7a..6198902 100755 --- a/docs/tutorials/transe.rst +++ b/docs/tutorials/transe.rst @@ -4,64 +4,66 @@ TransE To train TransE on FB15k:: - from torch.optim import SGD + from torch.optim import Adam from torch import cuda - from torch.utils.data import DataLoader from torchkge.data.DataLoader import load_fb15k + from torchkge.evaluation import LinkPredictionEvaluator from torchkge.models import TransEModel - from torchkge.utils import MarginLoss from torchkge.sampling import BernoulliNegativeSampler + from torchkge.utils import MarginLoss, get_batches + + from tqdm.autonotebook import tqdm + # Load dataset - kg_train, _, _ = load_fb15k() + kg_train, kg_val, kg_test = load_fb15k() # Define some hyper-parameters for training - lr, nb_epochs, batch_size, margin = 0.01, 500, 1024, 1 + lr, margin = 0.001, 0.5 n_ent, n_rel = kg_train.n_ent, kg_train.n_rel - ent_emb_dim = 50 + ent_emb_dim = 150 + nb_epochs = 50 + batch_size = 32768 # Define the model and criterion model = TransEModel(ent_emb_dim, n_ent, n_rel, dissimilarity_type='L2') criterion = MarginLoss(margin) + sampler = BernoulliNegativeSampler(kg_train, kg_test=kg_test) # Move everything to CUDA if available if cuda.is_available(): + cuda.set_device(0) cuda.empty_cache() model.cuda() criterion.cuda() # Define the torch optimizer to be used - optimizer = SGD(model.parameters(), lr=lr) + optimizer = Adam(model.parameters(), lr=lr, weight_decay=1e-5) - # Define the sampler useful for negative sampling during training - sampler = BernoulliNegativeSampler(kg_train, kg_test=kg_test) + h, t, r = kg_train.head_idx.cuda(), kg_train.tail_idx.cuda(), kg_train.relations.cuda() - dataloader = DataLoader(kg_train, batch_size=batch_size, shuffle=False, - pin_memory=cuda.is_available()) + iterator = tqdm(range(nb_epochs), unit='epoch') - for epoch in range(nb_epochs): + for epoch in iterator: running_loss = 0.0 - for i, batch in enumerate(dataloader): + + for i, batch in enumerate(get_batches(h, t, r, batch_size)): # get the input heads, tails, rels = batch[0], batch[1], batch[2] - if heads.is_pinned(): - heads, tails, rels = heads.cuda(), tails.cuda(), rels.cuda() - - # Create Negative Samples neg_heads, neg_tails = sampler.corrupt_batch(heads, tails, rels) + # zero model gradient model.zero_grad() # forward + backward + optimize positive_triplets, negative_triplets = model(heads, tails, neg_heads, neg_tails, rels) - loss = criterion(positive_triplets, negative_triplets) loss.backward() optimizer.step() running_loss += loss.item() - print('[%d] loss: %.3f' % (epoch + 1, running_loss / (i + 1))) + iterator.set_description('Epoch mean loss: {:.5f}'.format(running_loss / len(kg_train))) model.normalize_parameters() From 07fe76618780e9328835a539facb3b80d1cd572e Mon Sep 17 00:00:00 2001 From: armand Date: Thu, 23 Apr 2020 18:33:06 +0200 Subject: [PATCH 3/7] Doc update --- docs/tutorials/hole.rst | 8 +++++--- docs/tutorials/transe.rst | 8 +++++--- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/docs/tutorials/hole.rst b/docs/tutorials/hole.rst index 5f099fb..5732fcb 100755 --- a/docs/tutorials/hole.rst +++ b/docs/tutorials/hole.rst @@ -11,7 +11,7 @@ To train HolE on FB15k:: from torchkge.evaluation import LinkPredictionEvaluator from torchkge.models import HolEModel from torchkge.sampling import BernoulliNegativeSampler - from torchkge.utils import LogisticLoss, get_batches + from torchkge.utils import LogisticLoss, DataLoader from tqdm.autonotebook import tqdm @@ -37,14 +37,16 @@ To train HolE on FB15k:: cuda.empty_cache() model.cuda() criterion.cuda() + cuda_val = 'all' + else: + cuda_val = None # Define the torch optimizer to be used optimizer = Adam(model.parameters(), lr=lr, weight_decay=1e-5) - h, t, r = kg_train.head_idx.cuda(), kg_train.tail_idx.cuda(), kg_train.relations.cuda() + dataloader = DataLoader(kg_train, batch_size=batch_size, use_cuda=cuda_val) iterator = tqdm(range(nb_epochs), unit='epoch') - for epoch in iterator: running_loss = 0.0 diff --git a/docs/tutorials/transe.rst b/docs/tutorials/transe.rst index 6198902..18c36ad 100755 --- a/docs/tutorials/transe.rst +++ b/docs/tutorials/transe.rst @@ -11,7 +11,7 @@ To train TransE on FB15k:: from torchkge.evaluation import LinkPredictionEvaluator from torchkge.models import TransEModel from torchkge.sampling import BernoulliNegativeSampler - from torchkge.utils import MarginLoss, get_batches + from torchkge.utils import MarginLoss, DataLoader from tqdm.autonotebook import tqdm @@ -37,14 +37,16 @@ To train TransE on FB15k:: cuda.empty_cache() model.cuda() criterion.cuda() + cuda_val = 'all' + else: + cuda_val = None # Define the torch optimizer to be used optimizer = Adam(model.parameters(), lr=lr, weight_decay=1e-5) - h, t, r = kg_train.head_idx.cuda(), kg_train.tail_idx.cuda(), kg_train.relations.cuda() + dataloader = DataLoader(kg_train, batch_size=batch_size, use_cuda=cuda_val) iterator = tqdm(range(nb_epochs), unit='epoch') - for epoch in iterator: running_loss = 0.0 From a45cf8c8f98de21a9f241138775f959bee0b188e Mon Sep 17 00:00:00 2001 From: armand Date: Sat, 25 Apr 2020 00:37:31 +0200 Subject: [PATCH 4/7] Update mistake in the documentation. --- docs/tutorials/hole.rst | 2 +- docs/tutorials/transe.rst | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/tutorials/hole.rst b/docs/tutorials/hole.rst index 5732fcb..8bf6b49 100755 --- a/docs/tutorials/hole.rst +++ b/docs/tutorials/hole.rst @@ -56,7 +56,7 @@ To train HolE on FB15k:: neg_heads, neg_tails = sampler.corrupt_batch(heads, tails, rels) # zero model gradient - model.zero_grad() + optimizer.zero_grad() # forward + backward + optimize positive_triplets, negative_triplets = model(heads, tails, neg_heads, neg_tails, rels) diff --git a/docs/tutorials/transe.rst b/docs/tutorials/transe.rst index 18c36ad..5dc6a29 100755 --- a/docs/tutorials/transe.rst +++ b/docs/tutorials/transe.rst @@ -56,7 +56,7 @@ To train TransE on FB15k:: neg_heads, neg_tails = sampler.corrupt_batch(heads, tails, rels) # zero model gradient - model.zero_grad() + optimizer.zero_grad() # forward + backward + optimize positive_triplets, negative_triplets = model(heads, tails, neg_heads, neg_tails, rels) From ceae5bba4a706d3ec7dffb43089a26ab7da16a85 Mon Sep 17 00:00:00 2001 From: pyup-bot Date: Mon, 27 Apr 2020 08:41:05 +0200 Subject: [PATCH 5/7] Update sphinx from 3.0.2 to 3.0.3 --- requirements_dev.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements_dev.txt b/requirements_dev.txt index d13b7ad..191c1cb 100755 --- a/requirements_dev.txt +++ b/requirements_dev.txt @@ -1,5 +1,5 @@ # Documentation -sphinx==3.0.2 +sphinx==3.0.3 sphinx_rtd_theme==0.4.3 numpydoc==0.9.2 From 17b532510d5a4cba315e4e7be464b78bc31fe237 Mon Sep 17 00:00:00 2001 From: pyup-bot Date: Wed, 29 Apr 2020 03:39:12 +0000 Subject: [PATCH 6/7] Update pip from 20.0.2 to 20.1 --- requirements_dev.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements_dev.txt b/requirements_dev.txt index d13b7ad..a83f69c 100755 --- a/requirements_dev.txt +++ b/requirements_dev.txt @@ -10,6 +10,6 @@ pytest-runner==5.2 pytest==5.4.1 # Deployement -pip==20.0.2 +pip==20.1 bumpversion==0.5.3 wheel==0.34.2 From 36e67bfd6ec2ee1f292e80afc4d3918f65f4993a Mon Sep 17 00:00:00 2001 From: pyup-bot Date: Sun, 3 May 2020 09:17:21 +0000 Subject: [PATCH 7/7] Update tox from 3.14.6 to 3.15.0 --- requirements_dev.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements_dev.txt b/requirements_dev.txt index 42b3c88..151b9f3 100755 --- a/requirements_dev.txt +++ b/requirements_dev.txt @@ -5,7 +5,7 @@ numpydoc==0.9.2 # Tests flake8==3.7.9 -tox==3.14.6 +tox==3.15.0 pytest-runner==5.2 pytest==5.4.1