Skip to content

Commit

Permalink
[Add] Add EvalHook inherited from MMCV EvalHook (#90)
Browse files Browse the repository at this point in the history
* upgrade eval

* fix lint

* fix lint

* fix lint

* add a unit test: test eval hook

* add unit test

* fix unit test

* fix unit test: remove the requirement for cuda

* use kwargs to receive EvalHook args

* remove useless comments

* create the folder if it does not exist

* add new metric

* fix some bugs

* fix unit test

* remove joint_error metric

* fix unit test

* fix pck thresholds

* fix import error

* fix import error

* remove unused paramter

* add more unit test

* add unit test

* rename p-mpjpe to pa-mpjpe

* fix unit test

* remove `mpjpe` in `__all__`

* fix comments

* add more unit tests

* fix

* rename

* fix docsting

* fix typo

* update `getting_started.md`

* fix docstring

* add evaluation config

* fix unit test

* use mmhuman3d greater/less key
  • Loading branch information
ttxskk authored Feb 11, 2022
1 parent d2052c3 commit 26f821b
Show file tree
Hide file tree
Showing 22 changed files with 1,369 additions and 365 deletions.
2 changes: 2 additions & 0 deletions configs/hmr/resnet50_hmr_pw3d.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
_base_ = ['../_base_/default_runtime.py']
use_adversarial_train = True

# evaluate
evaluation = dict(metric=['pa-mpjpe', 'mpjpe'])
# optimizer
optimizer = dict(
backbone=dict(type='Adam', lr=2.5e-4),
Expand Down
15 changes: 13 additions & 2 deletions configs/hybrik/resnet34_hybrik_mixed.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
_base_ = ['../_base_/default_runtime.py']

# evaluate
evaluation = dict(metric=['pa-mpjpe', 'mpjpe'])
# optimizer
optimizer = dict(type='Adam', lr=1e-3, weight_decay=0)
optimizer_config = dict(grad_clip=None)
Expand All @@ -11,7 +13,7 @@
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
# dict(type='TensorboardLoggerHook')
])

img_res = 256
Expand Down Expand Up @@ -166,7 +168,16 @@
partition=[0.4, 0.1, 0.5]),
test=dict(
type=dataset_type,
body_model=dict(
type='GenderedSMPL', model_path='data/body_models/smpl'),
dataset_name='pw3d',
data_prefix='data',
pipeline=test_pipeline,
ann_file='hybrik_pw3d_test.npz'))
ann_file='hybrik_pw3d_test.npz'),
val=dict(
type=dataset_type,
dataset_name='pw3d',
data_prefix='data',
pipeline=test_pipeline,
ann_file='hybrik_pw3d_test.npz'),
)
3 changes: 3 additions & 0 deletions configs/spin/resnet50_spin_pw3d.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
_base_ = ['../_base_/default_runtime.py']
use_adversarial_train = True

# evaluate
evaluation = dict(metric=['pa-mpjpe', 'mpjpe'])

img_res = 224

body_model = dict(
Expand Down
3 changes: 3 additions & 0 deletions configs/vibe/resnet50_vibe_pw3d.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
_base_ = ['../_base_/default_runtime.py']
use_adversarial_train = True

# evaluate
evaluation = dict(metric=['pa-mpjpe', 'mpjpe'])

# optimizer
optimizer = dict(
neck=dict(type='Adam', lr=2.5e-4), head=dict(type='Adam', lr=2.5e-4))
Expand Down
8 changes: 4 additions & 4 deletions docs/getting_started.md
Original file line number Diff line number Diff line change
Expand Up @@ -110,23 +110,23 @@ We provide pretrained models in the respective method folders in [config](https:
### Evaluate with a single GPU / multiple GPUs

```shell
python tools/test.py ${CONFIG} --work-dir=${WORK_DIR} ${CHECKPOINT}
python tools/test.py ${CONFIG} --work-dir=${WORK_DIR} ${CHECKPOINT} --metrics=${METRICS}
```
Example:
```shell
python tools/test.py configs/hmr/resnet50_hmr_pw3d.py --work-dir=work_dirs/hmr work_dirs/hmr/latest.pth
python tools/test.py configs/hmr/resnet50_hmr_pw3d.py --work-dir=work_dirs/hmr work_dirs/hmr/latest.pth --metrics pa-mpjpe mpjpe
```

### Evaluate with slurm

If you can run MMHuman3D on a cluster managed with [slurm](https://slurm.schedmd.com/), you can use the script `slurm_test.sh`.

```shell
./tools/slurm_test.sh ${PARTITION} ${JOB_NAME} ${CONFIG} ${WORK_DIR} ${CHECKPOINT}
./tools/slurm_test.sh ${PARTITION} ${JOB_NAME} ${CONFIG} ${WORK_DIR} ${CHECKPOINT} --metrics ${METRICS}
```
Example:
```shell
./tools/slurm_test.sh my_partition test_hmr configs/hmr/resnet50_hmr_pw3d.py work_dirs/hmr work_dirs/hmr/latest.pth 8
./tools/slurm_test.sh my_partition test_hmr configs/hmr/resnet50_hmr_pw3d.py work_dirs/hmr work_dirs/hmr/latest.pth 8 --metrics pa-mpjpe mpjpe
```


Expand Down
42 changes: 1 addition & 41 deletions mmhuman3d/apis/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,18 +5,12 @@
import time

import mmcv
import numpy as np
import torch
import torch.distributed as dist
from mmcv.image import tensor2imgs
from mmcv.runner import get_dist_info


def single_gpu_test(model,
data_loader,
show=False,
out_dir=None,
**show_kwargs):
def single_gpu_test(model, data_loader):
"""Test with single gpu."""
model.eval()
results = []
Expand All @@ -32,40 +26,6 @@ def single_gpu_test(model,
else:
results.append(result)

if show or out_dir:
scores = np.vstack(result)
pred_score = np.max(scores, axis=1)
pred_label = np.argmax(scores, axis=1)
pred_class = [model.CLASSES[lb] for lb in pred_label]

img_metas = data['img_metas'].data[0]
imgs = tensor2imgs(data['img'], **img_metas[0]['img_norm_cfg'])
assert len(imgs) == len(img_metas)

for i, (img, img_meta) in enumerate(zip(imgs, img_metas)):
h, w, _ = img_meta['img_shape']
img_show = img[:h, :w, :]

ori_h, ori_w = img_meta['ori_shape'][:-1]
img_show = mmcv.imresize(img_show, (ori_w, ori_h))

if out_dir:
out_file = osp.join(out_dir, img_meta['ori_filename'])
else:
out_file = None

result_show = {
'pred_score': pred_score[i],
'pred_label': pred_label[i],
'pred_class': pred_class[i]
}
model.module.show_result(
img_show,
result_show,
show=show,
out_file=out_file,
**show_kwargs)

if 'img' in data.keys():
batch_size = data['img'].size(0)
else:
Expand Down
3 changes: 1 addition & 2 deletions mmhuman3d/apis/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,9 @@
OptimizerHook,
build_runner,
)
from mmcv.runner.hooks import DistEvalHook, EvalHook

from mmhuman3d.core.distributed_wrapper import DistributedDataParallelWrapper
from mmhuman3d.core.evaluation import DistEvalHook, EvalHook
from mmhuman3d.core.optimizer import build_optimizers
from mmhuman3d.data.datasets import build_dataloader, build_dataset
from mmhuman3d.utils import get_root_logger
Expand Down Expand Up @@ -156,7 +156,6 @@ def train_model(model,
round_up=True)
eval_cfg = cfg.get('evaluation', {})
eval_cfg['by_epoch'] = cfg.runner['type'] != 'IterBasedRunner'
eval_cfg['work_dir'] = cfg.work_dir
eval_hook = DistEvalHook if distributed else EvalHook
runner.register_hook(eval_hook(val_dataloader, **eval_cfg))

Expand Down
15 changes: 12 additions & 3 deletions mmhuman3d/core/evaluation/__init__.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,16 @@
from mmhuman3d.core.evaluation import mesh_eval, mpjpe
from mmhuman3d.core.evaluation import mesh_eval
from mmhuman3d.core.evaluation.eval_hooks import DistEvalHook, EvalHook
from mmhuman3d.core.evaluation.eval_utils import (
keypoint_3d_auc,
keypoint_3d_pck,
keypoint_accel_error,
keypoint_mpjpe,
vertice_pve,
)
from mmhuman3d.core.evaluation.mesh_eval import compute_similarity_transform
from mmhuman3d.core.evaluation.mpjpe import keypoint_mpjpe

__all__ = [
'compute_similarity_transform', 'keypoint_mpjpe', 'mesh_eval', 'mpjpe'
'compute_similarity_transform', 'keypoint_mpjpe', 'mesh_eval',
'DistEvalHook', 'EvalHook', 'vertice_pve', 'keypoint_3d_pck',
'keypoint_3d_auc', 'keypoint_accel_error'
]
139 changes: 139 additions & 0 deletions mmhuman3d/core/evaluation/eval_hooks.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,139 @@
# Copyright (c) OpenMMLab. All rights reserved.
import tempfile
import warnings

from mmcv.runner import DistEvalHook as BaseDistEvalHook
from mmcv.runner import EvalHook as BaseEvalHook

MMHUMAN3D_GREATER_KEYS = ['3dpck', 'pa-3dpck', '3dauc', 'pa-3dauc']
MMHUMAN3D_LESS_KEYS = ['mpjpe', 'pa-mpjpe', 'pve']


class EvalHook(BaseEvalHook):

def __init__(self,
dataloader,
start=None,
interval=1,
by_epoch=True,
save_best=None,
rule=None,
test_fn=None,
greater_keys=MMHUMAN3D_GREATER_KEYS,
less_keys=MMHUMAN3D_LESS_KEYS,
**eval_kwargs):
if test_fn is None:
from mmhuman3d.apis import single_gpu_test
test_fn = single_gpu_test

# remove "gpu_collect" from eval_kwargs
if 'gpu_collect' in eval_kwargs:
warnings.warn(
'"gpu_collect" will be deprecated in EvalHook.'
'Please remove it from the config.', DeprecationWarning)
_ = eval_kwargs.pop('gpu_collect')

# update "save_best" according to "key_indicator" and remove the
# latter from eval_kwargs
if 'key_indicator' in eval_kwargs or isinstance(save_best, bool):
warnings.warn(
'"key_indicator" will be deprecated in EvalHook.'
'Please use "save_best" to specify the metric key,'
'e.g., save_best="pa-mpjpe".', DeprecationWarning)

key_indicator = eval_kwargs.pop('key_indicator', None)
if save_best is True and key_indicator is None:
raise ValueError('key_indicator should not be None, when '
'save_best is set to True.')
save_best = key_indicator

super().__init__(dataloader, start, interval, by_epoch, save_best,
rule, test_fn, greater_keys, less_keys, **eval_kwargs)

def evaluate(self, runner, results):

with tempfile.TemporaryDirectory() as tmp_dir:
eval_res = self.dataloader.dataset.evaluate(
results,
res_folder=tmp_dir,
logger=runner.logger,
**self.eval_kwargs)

for name, val in eval_res.items():
runner.log_buffer.output[name] = val
runner.log_buffer.ready = True

if self.save_best is not None:
if self.key_indicator == 'auto':
self._init_rule(self.rule, list(eval_res.keys())[0])

return eval_res[self.key_indicator]

return None


class DistEvalHook(BaseDistEvalHook):

def __init__(self,
dataloader,
start=None,
interval=1,
by_epoch=True,
save_best=None,
rule=None,
test_fn=None,
greater_keys=MMHUMAN3D_GREATER_KEYS,
less_keys=MMHUMAN3D_LESS_KEYS,
broadcast_bn_buffer=True,
tmpdir=None,
gpu_collect=False,
**eval_kwargs):

if test_fn is None:
from mmhuman3d.apis import multi_gpu_test
test_fn = multi_gpu_test

# update "save_best" according to "key_indicator" and remove the
# latter from eval_kwargs
if 'key_indicator' in eval_kwargs or isinstance(save_best, bool):
warnings.warn(
'"key_indicator" will be deprecated in EvalHook.'
'Please use "save_best" to specify the metric key,'
'e.g., save_best="pa-mpjpe".', DeprecationWarning)

key_indicator = eval_kwargs.pop('key_indicator', None)
if save_best is True and key_indicator is None:
raise ValueError('key_indicator should not be None, when '
'save_best is set to True.')
save_best = key_indicator

super().__init__(dataloader, start, interval, by_epoch, save_best,
rule, test_fn, greater_keys, less_keys,
broadcast_bn_buffer, tmpdir, gpu_collect,
**eval_kwargs)

def evaluate(self, runner, results):
"""Evaluate the results.
Args:
runner (:obj:`mmcv.Runner`): The underlined training runner.
results (list): Output results.
"""
with tempfile.TemporaryDirectory() as tmp_dir:
eval_res = self.dataloader.dataset.evaluate(
results,
res_folder=tmp_dir,
logger=runner.logger,
**self.eval_kwargs)

for name, val in eval_res.items():
runner.log_buffer.output[name] = val
runner.log_buffer.ready = True

if self.save_best is not None:
if self.key_indicator == 'auto':
# infer from eval_results
self._init_rule(self.rule, list(eval_res.keys())[0])
return eval_res[self.key_indicator]

return None
Loading

0 comments on commit 26f821b

Please sign in to comment.