-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathvqav2_eval.py
58 lines (44 loc) · 2.07 KB
/
vqav2_eval.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
import json
from tqdm.notebook import tqdm
import json
from pprint import PrettyPrinter
from vqa_eval_tools import VQA, VQAEval
from argparse import ArgumentParser
from pathlib import Path
pp = PrettyPrinter()
# The annotations are missing an "question_type" key, so we create a new annotation file which does
# have the key. We just copy the "answer_type" key to "question_type", they are the same thing, I think.
annotation_file = "/net/acadia10a/data/zkhan/direct_answer_evaluations/v2_mscoco_val2014_annotations.json"
question_file = "/net/acadia10a/data/zkhan/direct_answer_evaluations/v2_OpenEnded_mscoco_val2014_questions.json"
# annotation_file = '/home/zkhan/v2_mscoco_val2014_annotations.json'
# question_file = '/home/zkhan/v2_OpenEnded_mscoco_val2014_questions.json'
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument(
"result_file", help="Path to a JSON result file generated by an evaluation."
)
args = parser.parse_args()
results_file = args.result_file
vqa_v2_obj = VQA(annotation_file=annotation_file, question_file=question_file)
with open(results_file, "r") as f:
predicted = json.load(f)
for element in predicted:
element["question_id"] = int(element["question_id"])
# The JSON I use for the VQAv2 validation set is missing
# these two questions. It shouldn't make a big difference
# in the evaluations, so we just predict a nonsense answer
# for them.
missing_qids = (196280004, 362391000)
for missing_qid in missing_qids:
predicted.append(
{"question_id": missing_qid, "answer": "i forgor lol", "score": -5}
)
with open(results_file, "w") as f:
json.dump(predicted, f)
result_obj = vqa_v2_obj.loadRes(resFile=results_file, quesFile=question_file)
vqa_v2_eval = VQAEval(vqa_v2_obj, result_obj, n=2)
vqa_v2_eval.evaluate()
print(f"Completed evaluation of {results_file}")
pp.pprint(vqa_v2_eval.accuracy)
with open(Path(results_file).parent / "vqa_v2_eval.json", "w") as f:
json.dump(vqa_v2_eval.accuracy, f)