Skip to content

Commit

Permalink
v1.6.5rc1 (#48)
Browse files Browse the repository at this point in the history
* exclude examples

* update load_json

* add keypoints_crowd

* format

* fix hook

* fix linter

* upd history

* bump version
  • Loading branch information
MiXaiLL76 authored Nov 3, 2024
1 parent 1caac51 commit ca82f54
Show file tree
Hide file tree
Showing 14 changed files with 1,828 additions and 107 deletions.
4 changes: 1 addition & 3 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -59,10 +59,8 @@ repos:
hooks:
# try to fix what is possible
- id: ruff
args: ["--fix", "--exclude", "examples"]
args: ["--fix"]
# perform formatting updates
- id: ruff-format
args: ["--exclude", "examples"]
# validate if all is fine with preview mode
- id: ruff
args: ["--exclude", "examples"]
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ format:
pre-commit run --all-files

linter:
pre-commit check --all-files
ruff check --force-exclude

clean:
rm -rf build
Expand Down
182 changes: 182 additions & 0 deletions examples/crowdpose_example.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,182 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<https://github.com/jin-s13/xtcocoapi/blob/master/demos/demo_crowdpose.py>"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"faster_coco_eval.__version__='1.6.4'\n"
]
}
],
"source": [
"import logging\n",
"import numpy as np\n",
"import faster_coco_eval\n",
"from faster_coco_eval import COCO, COCOeval_faster\n",
"\n",
"print(f\"{faster_coco_eval.__version__=}\")\n",
"\n",
"logging.root.setLevel(\"INFO\")\n",
"logging.debug(\"Запись.\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"gt_file = '../tests/dataset/example_crowdpose_val.json'\n",
"preds = '../tests/dataset/example_crowdpose_preds.json'"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"sigmas = np.array([\n",
" .79, .79, .72, .72, .62, .62, 1.07, 1.07, .87, .87, .89, .89, .79,\n",
" .79\n",
" ]) / 10.0"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:faster_coco_eval.core.cocoeval:Evaluate annotation type *keypoints_crowd*\n",
"INFO:faster_coco_eval.core.cocoeval:COCOeval_opt.evaluate() finished...\n",
"INFO:faster_coco_eval.core.cocoeval:DONE (t=0.00s).\n",
"INFO:faster_coco_eval.core.cocoeval:Accumulating evaluation results...\n",
"INFO:faster_coco_eval.core.cocoeval:COCOeval_opt.accumulate() finished...\n",
"INFO:faster_coco_eval.core.cocoeval:DONE (t=0.00s).\n",
"INFO:faster_coco_eval.core.cocoeval: Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets= 20 ] = 0.788\n",
"INFO:faster_coco_eval.core.cocoeval: Average Precision (AP) @[ IoU=0.50 | area= all | maxDets= 20 ] = 0.988\n",
"INFO:faster_coco_eval.core.cocoeval: Average Precision (AP) @[ IoU=0.75 | area= all | maxDets= 20 ] = 0.731\n",
"INFO:faster_coco_eval.core.cocoeval: Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 20 ] = 0.822\n",
"INFO:faster_coco_eval.core.cocoeval: Average Recall (AR) @[ IoU=0.50 | area= all | maxDets= 20 ] = 1.000\n",
"INFO:faster_coco_eval.core.cocoeval: Average Recall (AR) @[ IoU=0.75 | area= all | maxDets= 20 ] = 0.778\n",
"INFO:faster_coco_eval.core.cocoeval: Average Precision (AP) @[ IoU=0.50:0.95 | type= easy | maxDets= 20 ] = 1.000\n",
"INFO:faster_coco_eval.core.cocoeval: Average Precision (AP) @[ IoU=0.50:0.95 | type=medium | maxDets= 20 ] = 0.980\n",
"INFO:faster_coco_eval.core.cocoeval: Average Precision (AP) @[ IoU=0.50:0.95 | type= hard | maxDets= 20 ] = 0.412\n"
]
},
{
"data": {
"text/plain": [
"{'AP_all': 0.7877215935879303,\n",
" 'AP_50': 0.9881188118811886,\n",
" 'AP_75': 0.7314356435643564,\n",
" 'AR_all': 0.8222222222222223,\n",
" 'AR_50': 1.0,\n",
" 'AR_75': 0.7777777777777778,\n",
" 'AP_easy': 1.0,\n",
" 'AP_medium': 0.9802,\n",
" 'AP_hard': 0.4116}"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"cocoGt = COCO(gt_file)\n",
"cocoDt = cocoGt.loadRes(preds)\n",
"cocoEval = COCOeval_faster(cocoGt, cocoDt, 'keypoints_crowd', kpt_oks_sigmas=sigmas, use_area=False)\n",
"cocoEval.evaluate()\n",
"cocoEval.accumulate()\n",
"cocoEval.summarize()\n",
"\n",
"cocoEval.stats_as_dict"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Orig Code\n",
"\n",
"```py\n",
"from xtcocotools.coco import COCO\n",
"from xtcocotools.cocoeval import COCOeval\n",
"\n",
"\n",
"cocoGt = COCO(gt_file)\n",
"cocoDt = cocoGt.loadRes(preds)\n",
"cocoEval = COCOeval(cocoGt, cocoDt, 'keypoints_crowd', sigmas, use_area=False)\n",
"cocoEval.evaluate()\n",
"cocoEval.accumulate()\n",
"cocoEval.summarize()\n",
"```\n",
"\n",
"## Orig result\n",
"\n",
"```text\n",
"loading annotations into memory...\n",
"Done (t=0.00s)\n",
"creating index...\n",
"index created!\n",
"Loading and preparing results...\n",
"DONE (t=0.00s)\n",
"creating index...\n",
"index created!\n",
"Running per image evaluation...\n",
"Evaluate annotation type *keypoints_crowd*\n",
"DONE (t=0.00s).\n",
"Accumulating evaluation results...\n",
"DONE (t=0.00s).\n",
" Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets= 20 ] = 0.788\n",
" Average Precision (AP) @[ IoU=0.50 | area= all | maxDets= 20 ] = 0.988\n",
" Average Precision (AP) @[ IoU=0.75 | area= all | maxDets= 20 ] = 0.731\n",
" Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 20 ] = 0.822\n",
" Average Recall (AR) @[ IoU=0.50 | area= all | maxDets= 20 ] = 1.000\n",
" Average Recall (AR) @[ IoU=0.75 | area= all | maxDets= 20 ] = 0.778\n",
" Average Precision (AP) @[ IoU=0.50:0.95 | type= easy | maxDets= 20 ] = 1.000\n",
" Average Precision (AP) @[ IoU=0.50:0.95 | type=medium | maxDets= 20 ] = 0.980\n",
" Average Precision (AP) @[ IoU=0.50:0.95 | type= hard | maxDets= 20 ] = 0.412\n",
"```"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.12"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
47 changes: 24 additions & 23 deletions faster_coco_eval/core/coco.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
import time
import warnings
from collections import defaultdict
from typing import List, Optional, Union
from typing import Callable, List, Optional, Union

import numpy as np

Expand All @@ -28,6 +28,7 @@ def __init__(
self,
annotation_file: Optional[Union[str, dict, os.PathLike, pathlib.PosixPath]] = None,
use_deepcopy: bool = False,
print_function: Callable = logger.debug,
):
"""Constructor of Microsoft COCO helper class.
Expand All @@ -45,21 +46,17 @@ def __init__(
{},
)
self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list)
self.print_function = logger.debug
self.print_function = print_function
self.use_deepcopy = use_deepcopy
self.annotation_file = annotation_file

if annotation_file is not None:
self._print_function("loading annotations into memory...")
tic = time.time()
if type(annotation_file) in [str, os.PathLike, pathlib.PosixPath]:
self.dataset = self.load_json(annotation_file)
elif type(annotation_file) is dict:
if self.use_deepcopy:
self.dataset = copy.deepcopy(annotation_file)
else:
self.dataset = annotation_file.copy()
if type(annotation_file) in [str, os.PathLike, pathlib.PosixPath, dict, list]:
self.dataset = self.load_json(annotation_file, self.use_deepcopy)
else:
self.dataset = None
raise TypeError(f"type {type(annotation_file)} is not supported")

assert type(self.dataset) is dict, f"annotation file format {type(self.dataset)} not supported"
self.print_function(f"Done (t={time.time() - tic:0.2f}s)")
Expand Down Expand Up @@ -290,29 +287,36 @@ def loadImgs(self, ids: Union[List[int], int] = []) -> List[dict]:
return [self.imgs[ids]]

@staticmethod
def load_json(json_file: Optional[Union[str, os.PathLike]]) -> dict:
def load_json(
json_file: Union[str, os.PathLike, pathlib.PosixPath, dict, list], use_deepcopy: Optional[bool] = False
) -> dict:
"""Load a json file.
Args:
json_file (str or os.PathLike): Path to the json file
json_file (str or os.PathLike or dict or list): Path to the json file or data dict
Return:
data (dict): Loaded json data
"""

with open(json_file) as io:
_data = json.load(io)
if type(json_file) in [str, os.PathLike]:
with open(json_file) as io:
_data = json.load(io)
else:
if use_deepcopy:
return copy.deepcopy(json_file)
else:
return json_file.copy()
return _data

def loadRes(
self,
resFile: Union[str, os.PathLike, np.ndarray],
resFile: Union[str, os.PathLike, pathlib.PosixPath, dict, list, np.ndarray],
min_score: float = 0.0,
) -> "COCO":
"""Load result file and return a result api object.
Args:
resFile (str) : file name of result file
resFile (str or os.PathLike or dict or list) : file name of result file
min_score (float) : minimum score to consider a result
Return:
Expand All @@ -324,15 +328,12 @@ def loadRes(

self.print_function("Loading and preparing results...")
tic = time.time()
if type(resFile) in [str, os.PathLike]:
anns = self.load_json(resFile)
if type(resFile) in [str, os.PathLike, pathlib.PosixPath, dict, list]:
anns = self.load_json(resFile, self.use_deepcopy)
elif type(resFile) is np.ndarray:
anns = self.loadNumpyAnnotations(resFile)
else:
if self.use_deepcopy:
anns = copy.deepcopy(resFile)
else:
anns = resFile.copy()
raise TypeError(f"type {type(resFile)} is not supported")

assert type(anns) is list, "results in not an array of objects"

Expand Down
Loading

0 comments on commit ca82f54

Please sign in to comment.