Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Format code with black and isort #45

Merged
merged 1 commit into from
Jan 13, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
184 changes: 114 additions & 70 deletions scripts/labelbox/upload.py
Original file line number Diff line number Diff line change
@@ -1,153 +1,197 @@
from __future__ import annotations

import argparse
import uuid
from typing import Dict, Any
from pathlib import Path
from typing import Any, Dict

from labelbox import Client, LabelingFrontend, Dataset, Project
from labelbox.schema.ontology import OntologyBuilder, Tool
import pandas as pd
from labelbox import Client, Dataset, LabelingFrontend, Project
from labelbox.schema.media_type import MediaType
from labelbox.schema.ontology import OntologyBuilder, Tool
from tqdm import tqdm

from pathlib import Path
import pandas as pd
import soccertrack
from soccertrack.logging import show_df # This just makes the df viewable in the notebook.
from tqdm import tqdm
import argparse
from soccertrack.logging import ( # This just makes the df viewable in the notebook.
show_df,
)

def create_ndjson(datarow_id: str, schema_id: str,
segments: Dict[str, Any]) -> Dict[str, Any]:

def create_ndjson(
datarow_id: str, schema_id: str, segments: Dict[str, Any]
) -> Dict[str, Any]:
return {
"uuid": str(uuid.uuid4()),
"schemaId": schema_id,
"dataRow": {
"id": datarow_id
},
"segments": segments
"dataRow": {"id": datarow_id},
"segments": segments,
}

def _fix_frame(bbdf_tmp, data_row_list, data_row, input_csv_file, SECOND_START_IDX, THRESHOLD_SECOND_START, THRESHOLD_SECOND_END, DATA_ROW_IDX):
#データフレームのズレの修正
start_second = int(data_row.external_id.split('_')[SECOND_START_IDX])
if start_second >= THRESHOLD_SECOND_START & start_second < THRESHOLD_SECOND_END - 30:

def _fix_frame(
bbdf_tmp,
data_row_list,
data_row,
input_csv_file,
SECOND_START_IDX,
THRESHOLD_SECOND_START,
THRESHOLD_SECOND_END,
DATA_ROW_IDX,
):
# データフレームのズレの修正
start_second = int(data_row.external_id.split("_")[SECOND_START_IDX])
if (
start_second
>= THRESHOLD_SECOND_START & start_second
< THRESHOLD_SECOND_END - 30
):
_data_row = list(reversed(data_row_list))[DATA_ROW_IDX + 1]
_bbdf_file_name = Path(input_csv_file) / f"{_data_row.external_id.split('.')[0]}.csv"
_bbdf_file_name = (
Path(input_csv_file) / f"{_data_row.external_id.split('.')[0]}.csv"
)
_bbdf = soccertrack.load_df(_bbdf_file_name)
bbdf = pd.concat([bbdf_tmp[2:], _bbdf[0:2]], axis=0)

elif start_second >= THRESHOLD_SECOND_END - 30:
bbdf = bbdf_tmp[2:]

else:
bbdf = bbdf_tmp
bbdf.index = [i+1 for i in range(len(bbdf))]
bbdf.index = [i + 1 for i in range(len(bbdf))]
return bbdf


def get_segment(bbdf, KEYFRAME_WINDOW):
segment = dict()
for (team_id, player_id), player_df in bbdf.iter_players():
if team_id == '3':
feature_name = 'BALL'
elif team_id == '1' and int(player_id) >= 11:
feature_name = team_id + '_' + str(int(player_id) - 11)
elif team_id == '0' and player_id == '21':
feature_name = '1' + '_' + str(int(player_id) - 11)
elif team_id == '0' and player_id == '11':
feature_name = '0' + '_' + str(int(player_id) - 11)

if team_id == "3":
feature_name = "BALL"
elif team_id == "1" and int(player_id) >= 11:
feature_name = team_id + "_" + str(int(player_id) - 11)
elif team_id == "0" and player_id == "21":
feature_name = "1" + "_" + str(int(player_id) - 11)
elif team_id == "0" and player_id == "11":
feature_name = "0" + "_" + str(int(player_id) - 11)

else:
feature_name = team_id + '_' + str(int(player_id))
feature_name = team_id + "_" + str(int(player_id))

key_frames_dict = dict()
key_frames_dict["keyframes"] = []

for idx, row in player_df.iterrows():
if idx % KEYFRAME_WINDOW == 0:
try:
key_frames_dict["keyframes"].append({
"frame": idx,
"bbox": {
"top": int(row['bb_top']),
"left": int(row['bb_left']),
"height": int(row['bb_height']),
"width": int(row['bb_width'])
key_frames_dict["keyframes"].append(
{
"frame": idx,
"bbox": {
"top": int(row["bb_top"]),
"left": int(row["bb_left"]),
"height": int(row["bb_height"]),
"width": int(row["bb_width"]),
},
}
})
)
except ValueError:
print('ValueError occured :', feature_name, 'frame_num :', idx)
print("ValueError occured :", feature_name, "frame_num :", idx)

segment[feature_name] = [key_frames_dict]
return segment


def main():
parser = argparse.ArgumentParser()
parser.add_argument("--API_key", help="Enter your API key here : ")
parser.add_argument("--PROJECT_NAME", help="Enter your project name here : ")
parser.add_argument("--DATASET_NAME", help="Enter your dataset name here : ")
parser.add_argument("--ONTOLOGY_NAME", help="Enter your ontology name here : ")
parser.add_argument("--input_csv_file", help="Enter the name of the csv file that contains the bbdf information : ")
parser.add_argument(
"--input_csv_file",
help="Enter the name of the csv file that contains the bbdf information : ",
)

args = parser.parse_args()


API_KEY=args.API_key # Add your api key
PROJECT_NAME = args.PROJECT_NAME # This is the name of the project you want to upload the data to.
DATASET_NAME = args.DATASET_NAME # This is the name of the dataset.
ONTOLOGY_NAME = args.ONTOLOGY_NAME # This is the name of the ontology.
API_KEY = args.API_key # Add your api key
PROJECT_NAME = (
args.PROJECT_NAME
) # This is the name of the project you want to upload the data to.
DATASET_NAME = args.DATASET_NAME # This is the name of the dataset.
ONTOLOGY_NAME = args.ONTOLOGY_NAME # This is the name of the ontology.

client = Client(api_key=API_KEY)
#get project information
# get project information
project = next(client.get_projects(where=Project.name == PROJECT_NAME), None)
#get dataset information
# get dataset information
dataset = next(client.get_datasets(where=Dataset.name == DATASET_NAME), None)
# We want to try out a few different tools here.
ontology_builder = OntologyBuilder(
tools=[Tool(tool=Tool.Type.BBOX, name=ONTOLOGY_NAME)]) # This is the name of the label tool you want to use.
tools=[Tool(tool=Tool.Type.BBOX, name=ONTOLOGY_NAME)]
) # This is the name of the label tool you want to use.

# When we created a project with the ontology defined above, all of the ids were assigned.
# So lets reconstruct the ontology builder with all of the ids.
ontology = ontology_builder.from_project(project)
# We want all of the feature schemas to be easily accessible by name.
schema_lookup = {tool.name: tool.feature_schema_id for tool in ontology.tools}


KEYFRAME_WINDOW = 1
SECOND_START_IDX = 3
THRESHOLD_SECOND_START = 840
THRESHOLD_SECOND_END = 1800


data_row_list = [data_row for data_row in dataset.data_rows()]
input_csv_file = args.input_csv_file # This is the name of the csv file that contains the video information.
input_csv_file = (
args.input_csv_file
) # This is the name of the csv file that contains the video information.

for DATA_ROW_IDX in tqdm(range(len(data_row_list))):

data_row = list(reversed(data_row_list))[DATA_ROW_IDX]
bbdf_file_name = Path(input_csv_file) / f"{data_row.external_id.split('.')[0]}.csv"
bbdf_file_name = (
Path(input_csv_file) / f"{data_row.external_id.split('.')[0]}.csv"
)
try:
bbdf_tmp = soccertrack.load_df(bbdf_file_name)
except FileNotFoundError: # If the file doesn't exist, we'll skip it.
print('FileNotFoundError', data_row.external_id)
except FileNotFoundError: # If the file doesn't exist, we'll skip it.
print("FileNotFoundError", data_row.external_id)
continue


#Correction of data frame misalignment
bbdf = _fix_frame(bbdf_tmp, data_row_list, data_row, input_csv_file, SECOND_START_IDX, THRESHOLD_SECOND_START, THRESHOLD_SECOND_END, DATA_ROW_IDX)
# Correction of data frame misalignment
bbdf = _fix_frame(
bbdf_tmp,
data_row_list,
data_row,
input_csv_file,
SECOND_START_IDX,
THRESHOLD_SECOND_START,
THRESHOLD_SECOND_END,
DATA_ROW_IDX,
)
segment = get_segment(bbdf, KEYFRAME_WINDOW)



uploads = []
for schema_name, schema_id in schema_lookup.items():
for schema_name, schema_id in schema_lookup.items():
if schema_name in segment:
uploads.append(create_ndjson(data_row.uid, schema_id, segment[schema_name]))
upload_task = project.upload_annotations(name=f"upload-job-{uuid.uuid4()}",
annotations=uploads,
validate=False)
uploads.append(
create_ndjson(data_row.uid, schema_id, segment[schema_name])
)
upload_task = project.upload_annotations(
name=f"upload-job-{uuid.uuid4()}", annotations=uploads, validate=False
)
# Wait for upload to finish (Will take up to five minutes)
upload_task.wait_until_done()
# Review the upload status
print('Done!', ' Video_name : ' ,data_row.external_id, ': Errors', upload_task.errors)
print(
"Done!",
" Video_name : ",
data_row.external_id,
": Errors",
upload_task.errors,
)


if __name__ == "__main__":
main()
main()
9 changes: 5 additions & 4 deletions scripts/yolov5/build_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,11 @@
"""
import argparse
from pathlib import Path
from joblib import Parallel, delayed

import cv2
import numpy as np
import yaml
from joblib import Parallel, delayed

import soccertrack
from soccertrack import Camera
Expand Down Expand Up @@ -60,10 +60,11 @@ def move_files_to_folder(files, folder):
def _parallel_imwrite(frame_num, frame):
file_path = f"{save_dir}/{path_to_csv.stem}/{frame_num:06d}.png"
cv2.imwrite(file_path, frame)

res = Parallel(n_jobs=-1)(
delayed(_parallel_imwrite)(frame_num, frame) for frame_num, frame in enumerate(tqdm(cam.iter_frames()))
)
delayed(_parallel_imwrite)(frame_num, frame)
for frame_num, frame in enumerate(tqdm(cam.iter_frames()))
)
break

# Partition the dataset
Expand Down
6 changes: 3 additions & 3 deletions soccertrack/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from contextlib import contextmanager
import logging
import warnings
from contextlib import contextmanager


@contextmanager
Expand All @@ -18,7 +18,7 @@ def all_logging_disabled(highest_level=logging.CRITICAL):
warnings.simplefilter("ignore")
from vidgear.gears import WriteGear, CamGear

from soccertrack.dataframe import CoordinatesDataFrame, BBoxDataFrame
from soccertrack.io import load_df
import soccertrack.datasets # noqa
from soccertrack.camera import Camera
from soccertrack.dataframe import BBoxDataFrame, CoordinatesDataFrame
from soccertrack.io import load_df
7 changes: 3 additions & 4 deletions soccertrack/cli.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,13 @@
from glob import glob
import os
from glob import glob
from typing import Optional

import numpy as np

from fire import Fire

from soccertrack.utils.camera import find_intrinsic_camera_parameters, Camera
from soccertrack.utils.utils import make_video
from soccertrack.utils import logger, set_log_level
from soccertrack.utils.camera import Camera, find_intrinsic_camera_parameters
from soccertrack.utils.utils import make_video


class CLI:
Expand Down
4 changes: 2 additions & 2 deletions soccertrack/dataframe/bboxdataframe.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,15 @@
from __future__ import annotations

from hashlib import md5
from pathlib import Path
from typing import Any, Optional, Type

import cv2
import numpy as np
import pandas as pd
from pathlib import Path

from soccertrack.utils import make_video
from soccertrack.logger import tqdm
from soccertrack.utils import make_video

from ..logger import logger
from ..utils import MovieIterator, get_fps
Expand Down
1 change: 1 addition & 0 deletions soccertrack/dataframe/gpsdataframe.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import pandas as pd

from .base import SoccerTrackMixin


Expand Down
5 changes: 3 additions & 2 deletions soccertrack/datasets/__init__.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
import os
from pathlib import Path
from typing import Optional

from soccertrack.logger import logger
from typing import Optional
from pathlib import Path

from .downloader import KaggleDownloader

__all__ = ["available", "get_path", "KaggleDownloader"]
Expand Down
14 changes: 7 additions & 7 deletions soccertrack/datasets/labelbox_uploader.py
Original file line number Diff line number Diff line change
@@ -1,18 +1,18 @@
from __future__ import annotations

import uuid
from typing import Dict, Any
from pathlib import Path
from typing import Any, Dict

from labelbox import Client, LabelingFrontend, Dataset, Project
from labelbox.schema.ontology import OntologyBuilder, Tool
from labelbox import Client, Dataset, LabelingFrontend, Project
from labelbox.schema.media_type import MediaType
from labelbox.schema.ontology import OntologyBuilder, Tool
from tqdm import tqdm

from pathlib import Path
import soccertrack
from soccertrack.logging import (
from soccertrack.logging import ( # This just makes the df viewable in the notebook.
show_df,
) # This just makes the df viewable in the notebook.
from tqdm import tqdm
)


def bbdf2ndjson(
Expand Down
Loading