-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmelody_eval.py
executable file
·78 lines (58 loc) · 2.48 KB
/
melody_eval.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
#!/usr/bin/env python
'''
CREATED:2014-03-18 by Justin Salamon <[email protected]>
Compute melody extraction evaluation measures
Usage:
./melody_eval.py TRUTH.TXT PREDICTION.TXT
(CSV files also accepted)
For a detailed explanation of the measures please refer to:
J. Salamon, E. Gomez, D. P. W. Ellis and G. Richard, "Melody Extraction
from Polyphonic Music Signals: Approaches, Applications and Challenges",
IEEE Signal Processing Magazine, 31(2):118-134, Mar. 2014.
'''
from __future__ import print_function
import argparse
import sys
import os
import eval_utilities
import mir_eval
def process_arguments():
'''Argparse function to get the program parameters'''
parser = argparse.ArgumentParser(description='mir_eval melody extraction '
'evaluation')
parser.add_argument('-o',
dest='output_file',
default=None,
type=str,
action='store',
help='Store results in json format')
parser.add_argument('reference_file',
action='store',
help='path to the ground truth annotation')
parser.add_argument('estimated_file',
action='store',
help='path to the estimation file')
parser.add_argument("--hop",
dest='hop',
type=float,
default=None,
help="hop size (in seconds) to use for the evaluation"
" (optional)")
return vars(parser.parse_args(sys.argv[1:]))
if __name__ == '__main__':
# Get the parameters
parameters = process_arguments()
# Load in the data from the provided files
(ref_time,
ref_freq) = mir_eval.io.load_time_series(parameters['reference_file'])
(est_time,
est_freq) = mir_eval.io.load_time_series(parameters['estimated_file'])
# Compute all the scores
scores = mir_eval.melody.evaluate(ref_time, ref_freq, est_time, est_freq,
hop=parameters['hop'])
print("{} vs. {}".format(os.path.basename(parameters['reference_file']),
os.path.basename(parameters['estimated_file'])))
eval_utilities.print_evaluation(scores)
if parameters['output_file']:
print('Saving results to: ', parameters['output_file'])
eval_utilities.save_results(scores, parameters['output_file'])