Skip to content

Commit

Permalink
GH-212: Add file handler to logger in training.
Browse files Browse the repository at this point in the history
  • Loading branch information
tabergma committed Nov 28, 2018
1 parent cf17507 commit ec6b77c
Show file tree
Hide file tree
Showing 3 changed files with 20 additions and 12 deletions.
1 change: 0 additions & 1 deletion flair/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@

import sys
import logging
import warnings

logger = logging.getLogger(__name__)

Expand Down
24 changes: 15 additions & 9 deletions flair/trainers/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,13 @@ def train(self,
**kwargs
) -> dict:

log_line()
fh = logging.FileHandler(base_path / 'training.log')
fh.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)-15s %(message)s')
fh.setFormatter(formatter)
log.addHandler(fh)

log_line(log)
log.info(f'Evaluation method: {evaluation_metric.name}')

# cast string to Path
Expand Down Expand Up @@ -170,7 +176,7 @@ def train(self,
previous_learning_rate = learning_rate

for epoch in range(0 + self.epoch, max_epochs + self.epoch):
log_line()
log_line(log)

# bad_epochs = scheduler.num_bad_epochs
bad_epochs = 0
Expand All @@ -187,9 +193,9 @@ def train(self,

# stop training if learning rate becomes too small
if learning_rate < 0.0001:
log_line()
log_line(log)
log.info('learning rate too small - quitting training!')
log_line()
log_line(log)
break

if not test_mode:
Expand Down Expand Up @@ -227,7 +233,7 @@ def train(self,

self.model.eval()

log_line()
log_line(log)
log.info(f'EPOCH {epoch + 1}: lr {learning_rate:.4f} - bad epochs {bad_epochs}')

dev_metric = None
Expand Down Expand Up @@ -288,7 +294,7 @@ def train(self,
self.model.save(base_path / 'final-model.pt')

except KeyboardInterrupt:
log_line()
log_line(log)
log.info('Exiting from training early.')
if not param_selection_mode:
log.info('Saving model ...')
Expand All @@ -307,7 +313,7 @@ def final_test(self,
evaluation_metric: EvaluationMetric,
mini_batch_size: int):

log_line()
log_line(log)
log.info('Testing using best model ...')

self.model.eval()
Expand All @@ -329,12 +335,12 @@ def final_test(self,
f'{test_metric.precision(class_name):.4f} - recall: {test_metric.recall(class_name):.4f} - '
f'accuracy: {test_metric.accuracy(class_name):.4f} - f1-score: '
f'{test_metric.f_score(class_name):.4f}')
log_line()
log_line(log)

# if we are training over multiple datasets, do evaluation for each
if type(self.corpus) is MultiCorpus:
for subcorpus in self.corpus.corpora:
log_line()
log_line(log)
self._calculate_evaluation_results_for(subcorpus.name, subcorpus.test, evaluation_metric,
embeddings_in_memory, mini_batch_size, base_path / 'test.tsv')

Expand Down
7 changes: 5 additions & 2 deletions flair/training_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -232,5 +232,8 @@ def convert_labels_to_one_hot(label_list: List[List[str]], label_dict: Dictionar
return [[1 if l in labels else 0 for l in label_dict.get_items()] for labels in label_list]


def log_line():
log.info('-' * 100)
def log_line(logger = None):
if logger is not None:
logger.info('-' * 100)
else:
log.info('-' * 100)

0 comments on commit ec6b77c

Please sign in to comment.