-
Notifications
You must be signed in to change notification settings - Fork 7
/
Copy pathnotes_for_self.txt
executable file
·64 lines (43 loc) · 2.55 KB
/
notes_for_self.txt
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
# drone-sim
AirSim/UE4 based drone simulation environment generation and instruction labeling
# All Dependencies
pip install PyUserInput, yattag, sympy, PySimpleGUI
# Minimal Dependencies
pip install msgpack-rpc-python, multiprocess
## Demo recording command:
ffmpeg -f x11grab -video_size 1920x1080 -framerate 15 -i :0 -vcodec libx264 -crf 0 -preset ultrafast -qp 0 -pix_fmt yuv444p video.mkv
ffmpeg -f x11grab -video_size 1920x1080 -framerate 30 -i :0 -vcodec libx264 -crf 0 -preset ultrafast -qp 0 -pix_fmt yuv444p video.mkv
ffmpeg -f x11grab -video_size 1920x1080 -framerate 30 -i :0 -vcodec libx264 -preset ultrafast -qp 0 -pix_fmt yuv444p -filter:v "crop=1349:800:60:60" video.mkv
ffmpeg -f x11grab -video_size 1340x780 -i :0.0+60,60 -framerate 30 -vcodec libx264 -crf 0 -preset ultrafast -qp 0 -pix_fmt yuv444p video.mkv
## Assembling images into videos:
ffmpeg -start_number 0 -framerate 10 -i fpv_%d.png -vcodec libx264 -qp 0 fpv.mkv
# Generating synthetic datasets:
generate_curriculum_config.py environments/env_name
generate_template_curves.py environments/env_name
generate_template_annotations.py environments/env_name
# video capture command:
# TODO: Create a capture utility that is triggered at the start and end of every real-world rollout.
# It should read if the real_drone flag is True, and record if it is.
ffmpeg -i /dev/video2 -framerate 30 -video_size 1920x1080 -filter:v "crop=960:960:480:50" out.mkv
# Command for pulling files from AWS:
scp -i "v_aws.pem" -rp ubuntu@ :~/drn/unreal_config_nl_cage_augmented/models/tmp models_tmp_superdronerx
# Command for pushing files to AWS:
scp -i "v_aws.pem" -rp models_stages1 [email protected]:~/drn/unreal_config_nl_cage_augmented/models/stage1
# Code I found for using moviepy to put captions on videos:
````
from moviepy import editor
import os.path as op
def annotate(clip, txt, txt_color='red', fontsize=50, font='Xolonium-Bold'):
""" Writes a text at the bottom of the clip. """
txtclip = editor.TextClip(txt, fontsize=fontsize, font=font, color=txt_color)
cvc = editor.CompositeVideoClip([clip, txtclip.set_pos(('center', 'bottom'))])
return cvc.set_duration(clip.duration)
video = editor.VideoFileClip(op.join(movie_fol, movie_name))
subs = [((0, 4), 'subs1'),
((4, 9), 'subs2'),
((9, 12), 'subs3'),
((12, 16), 'subs4')]
annotated_clips = [annotate(video.subclip(from_t, to_t), txt) for (from_t, to_t), txt in subs]
final_clip = editor.concatenate_videoclips(annotated_clips)
final_clip.write_videofile(op.join(movie_fol, out_movie_name))
````