You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
import json
import os
import hashlib
from moviepy.audio.io.AudioFileClip import AudioFileClip
from moviepy.video.compositing.CompositeVideoClip import CompositeVideoClip
from moviepy.video.compositing.concatenate import concatenate_videoclips
from moviepy.video.io.VideoFileClip import VideoFileClip
from moviepy.video.VideoClip import ColorClip, ImageClip, TextClip
from textwrap import wrap
from layout import Layout
from text import Cut
from translate import trans
from utils import blur, boundary, flowtext, middletext, opacity
from voice import get_voice
# from moviepy.editor import (concatenate_videoclips, vfx, VideoFileClip,
# TextClip, AudioFileClip, CompositeVideoClip,
# CompositeAudioClip, ImageClip)
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
def get_path(*args):
return os.path.normpath(os.path.join(BASE_DIR, *args))
def addtext(clip, texts, dur=1, space=(-20, 0), align='left', inline='center',
pos=('left', 'bottom'), fxs=[]):
result = []
layout = Layout(clip.size, space, align, inline, pos)
for i, line in enumerate(texts):
for j, letter in enumerate(line):
layout.add_box(letter.size, j, i)
index = 0
for i, line in enumerate(texts):
for j, letter in enumerate(line):
screenpos = layout.get_screenpos(j, i)
for fx in fxs:
item = fx(letter, screenpos=screenpos,
index=index, i=i, j=j, texts=texts, dur=dur)
result.append(item)
index += 1
fin = CompositeVideoClip([clip]+result).set_duration(dur)
# [i.close() for i in result]
return fin
def gen_text(raw_texts, cut=('!', '。', '?', ';', ',', ':', '’', '”', '、'),
*args, **kwargs):
result = []
def dot(text):
for i, t in enumerate(text):
if i == 0:
continue
else:
for p in cut:
if t.startswith(p):
text[i-1] = text[i-1]+p
t.lstrip(p)
return text
texts = dot(wrap(raw_texts, width=13))
for line in texts:
result.append([TextClip(letter, *args, **kwargs) for letter in line])
return result
def make_start(t_biaoti, t_zuozhe, way):
print('生成标题:'+biaoti)
# t_biaoti, t_zuozhe = trans([biaoti, zuozhe], way=way)
t = t_biaoti+'<break time="700ms"/>'+t_zuozhe
filename = hashlib.md5(t.encode(encoding='UTF-8')).hexdigest()
filepath = get_voice(t, filename=filename)
a_clip = AudioFileClip(filepath)
voice_len = a_clip.duration
text_config = {
'color': 'black',
'font': get_path('font/方正大标宋_GBK.ttf')
}
text = gen_text('《'+t_biaoti+'》\n'+t_zuozhe, fontsize=80, **text_config)
trans_name = gen_text(translator_name[way], fontsize=60, **text_config)
fxs = [textinexpand, textoutexpand]
clip = bgclip.set_duration(voice_len+1)\
.subfx(blur, tb=0.5)\
.subfx(blur, start=1, end=1, ta=0.5, tb=0.5+voice_len)\
.subfx(blur, start=1, end=0, ta=0.5+voice_len)\
.fx(addtext, texts=text, fxs=fxs, pos=('center', 'center'),
align='center', dur=voice_len+1)\
.fx(addtext, texts=trans_name, fxs=fxs, pos=('center', 'bottom'),
align='center', dur=voice_len+1)
fin = CompositeVideoClip(
[bgclip, clip], size=screensize)\
.set_duration(voice_len+1)\
.set_audio(a_clip)
return fin
def make_content(text, text_trans):
bg = CompositeVideoClip(
[bgclip_blur, bgclip.set_pos(('center', -250))], size=screensize)
text_config = {
'color': '#2C2C2C',
'font': get_path('font/方正大标宋_GBK.ttf')
}
texts = []
for t, t_trans in zip(text, text_trans):
t_trans = t_trans.replace(',', ',')\
.replace('.', '。')\
.replace('?', '?')
# print('生成内容:'+t)
print('生成语音:'+t_trans)
filename = hashlib.md5(t_trans.encode(encoding='UTF-8')).hexdigest()
filepath = get_voice(t_trans, filename=filename)
a_clip = AudioFileClip(filepath)
voice_len = a_clip.duration
text_clip = TextClip(
t, fontsize=40, **text_config).set_pos(('left', 850))
trans_text_clip = TextClip(
t_trans, fontsize=80, **text_config).set_pos(('left', 900))
main = CompositeVideoClip([bg, text_clip, trans_text_clip])\
.set_duration(voice_len+1)\
.set_audio(a_clip)
# text_clip = gen_text(t, fontsize=40, **text_config)
# trans_text_clip = gen_text(t_trans, fontsize=80, **text_config)
# main = bg\
# .fx(addtext, texts=text_clip, fxs=[textinflow, textoutflow],
# pos=('left', 850), align='left', dur=voice_len+1)\
# .fx(addtext, texts=trans_text_clip, fxs=[textinflow, textoutflow],
# space=(-40, 0), pos=(-10, 900), align='left', dur=voice_len+1)\
# .set_duration(voice_len+1)\
# .set_audio(a_clip)
texts.append(main)
# texts = [concatenate_videoclips(texts)]
# return texts[0]
return concatenate_videoclips(texts)
def make_finish(zuozhe):
bgm = VideoFileClip(get_path('三连加速BGM.mp4'))
return bgm
def textinflow(letter, screenpos=None,
index=0, i=0, j=0, texts=None, dur=0):
n = sum([len(i) for i in texts])-1
item = letter\
.set_start(index*0.02)\
.set_duration(dur-n*0.02-0.5)\
.fx(flowtext, screenpos, dur=0.5, f_x=100, f_y=-300)\
.fx(opacity, dur=0.5)
return item
def textoutflow(letter, screenpos=None,
index=0, i=0, j=0, texts=None, dur=0):
n = sum([len(i) for i in texts])-1
item = letter\
.set_start(dur+(index-n)*0.02-0.5)\
.set_duration((n-index)*0.02+0.5)\
.fx(flowtext, screenpos, start=0, end=-1, dur=0.5, f_x=200, f_y=-300)\
.fx(opacity, start=1, end=0, dur=0.5)
return item
def textinexpand(letter, screenpos=None,
index=0, i=0, j=0, texts=None, dur=0):
n = middletext(j, len(texts[i]))
item = letter\
.set_start(0)\
.set_duration(dur-abs(n)*0.06-0.5)\
.fx(flowtext, screenpos, dur=dur, f_x=10*n, f_y=0, start=0, end=1)\
.fx(opacity, dur=dur)
return item
def textoutexpand(letter, screenpos=None,
index=0, i=0, j=0, texts=None, dur=0):
n = middletext(j, len(texts[i]))
item = letter\
.set_start(dur-abs(n)*0.06-0.5)\
.set_duration(abs(n)*0.06+0.5)\
.fx(flowtext, screenpos, f_x=10*n, f_y=0, start=1, end=1.5)\
.fx(opacity, start=1, end=0)
return item
def get_text(way, biaoti):
path = get_path('translate_text', '{way}_{biaoti}.json'.format(
way=way, biaoti=biaoti))
if os.path.exists(path):
file = json.load(open(path, 'r'))
result = [l['text'] for l in file['text']]
return result, file['author'], file['title']
else:
return None
def get_trans(biaoti, zuozhe, text, way):
path = get_path('translate_text', '{way}_{biaoti}.json'.format(
way=way, biaoti=biaoti))
with open(path, 'w') as f:
text_trans = trans(text, way)
author_trans, title_trans = trans([zuozhe, biaoti], way)
json_data = {
'title': title_trans,
'author': author_trans,
'text': [{'text': t} for t in text_trans]
}
f.write(json.dumps(json_data))
result = [l['text'] for l in json_data['text']]
return result, json_data['author'], json_data['title']
if __name__ == '__main__':
biaoti, zuozhe, text = '晏子使楚', '刘向', '''晏子使楚。
楚人以晏子短,楚人为小门于大门之侧而延晏子。
晏子不入,曰:
“使狗国者从狗门入,今臣使楚,不当从此门入。”
傧者更道,从大门入。见楚王。
王曰:“齐无人耶?使子为使。”
晏子对曰:“齐之临淄三百闾,
张袂成阴,挥汗成雨,比肩继踵而在,何为无人?”
王曰:“然则何为使予?”
晏子对曰:“齐命使,各有所主:
其贤者使使贤主,不肖者使使不肖主。
婴最不肖,故宜使楚矣!”晏子将使楚。
楚王闻之,谓左右曰:“晏婴,齐之习辞者也。
今方来,吾欲辱之,何以也?”
左右对曰:“为其来也,臣请缚一人,过王而行,
王曰:‘何为者也?’对曰:‘齐人也。’
王曰:‘何坐?’曰:‘坐盗。’
晏子至,楚王赐晏子酒,酒酣,吏二缚一人诣王。
王曰:“缚者曷为者也?”对曰:“齐人也,坐盗。”
王视晏子曰:“齐人固善盗乎?”晏子避席对曰:
“婴闻之,橘生淮南则为橘,生于淮北则为枳,
叶徒相似,其实味不同。所以然者何?
水土异也。今民生长于齐不盗,入楚则盗,
得无楚之水土使民善盗耶?”
王笑曰:“ 圣人非所与熙也,寡人反取病焉。”'''.split('\n')
screensize = (1920, 1080)
bgclip = ImageClip(get_path('background/4.jpg'))
bgclip_blur = ImageClip(get_path('background/5.jpg'))
slideup_clip = CompositeVideoClip(
[bgclip_blur, bgclip.fx(boundary, dur=1)], size=screensize)\
.set_duration(1)
slidedown_clip = CompositeVideoClip(
[bgclip_blur, bgclip.fx(boundary, start=1, end=0, dur=1)],
bg_color=[207, 203, 192], size=screensize)\
.set_duration(1)
finish = make_finish(zuozhe)
ways = ['bd', 'gd', 'bi']
all_clip = []
for way in ways:
p = get_text(way, biaoti)
if p:
text_trans, t_author, t_title = p
else:
text_trans, t_author, t_title = get_trans(
biaoti, zuozhe, text, way)
print(text_trans)
start = make_start(t_title, t_author, way)
content = make_content(text, text_trans)
cap = concatenate_videoclips(
[start, slideup_clip, content, slidedown_clip])
all_clip.append(cap)
all_clip.append(finish)
final = concatenate_videoclips(all_clip)
final.write_videofile(get_path(biaoti+'.mp4'), fps=60, threads=20)
I have AMD 3700 and 32g memory on my PC, I run all these code on M.2 SSD,but incredibly the speed is just 2s/it, I have no idea how could it gonna be happend.And it take over 12GB memory on linux but almost nothing on windows.
The text was updated successfully, but these errors were encountered:
I got the answer, the reason why it is really slow is that using nested CompositeVideoClip, use single CompositeVideoClip can really speed up, for ram maybe I should try export clips more instead once at the end.
I have AMD 3700 and 32g memory on my PC, I run all these code on M.2 SSD,but incredibly the speed is just 2s/it, I have no idea how could it gonna be happend.And it take over 12GB memory on linux but almost nothing on windows.
The text was updated successfully, but these errors were encountered: