-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathpattern_eval.py
executable file
·59 lines (47 loc) · 1.89 KB
/
pattern_eval.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
#!/usr/bin/env python
"""
Compute pattern discovery evaluation metrics.
Usage:
./pattern_eval.py REFERENCE.TXT ESTIMATION.TXT
Example:
./pattern_eval.py ../tests/data/pattern/reference-mono.txt \
../tests/data/pattern/estimate-mono.txt
Written by Oriol Nieto ([email protected]), 2014
"""
from __future__ import print_function
import argparse
import os
import sys
import eval_utilities
import mir_eval
def main():
"""Main function to evaluate the pattern discovery task."""
parser = argparse.ArgumentParser(
description="mir_eval pattern discovery evaluation",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-o',
dest='output_file',
default=None,
type=str,
action='store',
help='Store results in json format')
parser.add_argument("reference_file",
action="store",
help="Path to the reference file.")
parser.add_argument("estimated_file",
action="store",
help="Path to the estimation file.")
parameters = vars(parser.parse_args(sys.argv[1:]))
# Load in data
ref_patterns = mir_eval.io.load_patterns(parameters['reference_file'])
est_patterns = mir_eval.io.load_patterns(parameters['estimated_file'])
# Compute all the scores
scores = mir_eval.pattern.evaluate(ref_patterns, est_patterns)
print("{} vs. {}".format(os.path.basename(parameters['reference_file']),
os.path.basename(parameters['estimated_file'])))
eval_utilities.print_evaluation(scores)
if parameters['output_file']:
print('Saving results to: ', parameters['output_file'])
eval_utilities.save_results(scores, parameters['output_file'])
if __name__ == '__main__':
main()