# -*- coding: utf-8 -*- """ 剪映脚本生成自动化 """ import asyncio import uuid from pathlib import Path from typing import Any, Dict, List, Optional, Tuple import edge_tts import pycapcut as capcut from mutagen.mp3 import MP3 from pycapcut import trange from pycapcut.keyframe import KeyframeProperty from pycapcut.segment import ClipSettings class EdgeTTS: """ edge在线语音合成 """ # 中文音色 TIMBRES = { "女声-晓晓": "zh-CN-XiaoxiaoNeural", "女声-晓辰": "zh-CN-XiaochenNeural", "女声-晓倩": "zh-CN-XiaoqianNeural", } def __init__( self, materials_path: Path, ): """ 初始化语音合成器 :param materials_path: 素材文件夹路径 """ # 素材文件夹路径 self.materials_path = materials_path def generate_audio( self, content: str, timbre: Optional[str] = "女声-晓晓", rate: str = "+0%", volume: str = "+0%", ) -> Tuple[str, int]: """ 根据文本内容合成语音并返回音频素材名称 :param content: 文本内容 :param timbre: 音色名称,例如女声-晓晓 :param rate: 语速 :param volume: 音量 :return 音频素材名称和持续时长 """ # noinspection PyBroadException try: # 异步处理:根据文本内容合成语音并保存为音频素材 async def _async_generate_audio(): # 实例化Communicate communicator = edge_tts.Communicate( text=content, voice=self.TIMBRES[timbre], rate=rate, volume=volume, ) # 音频素材名称 name = f"{uuid.uuid4().hex[-16:].upper()}.mp3" # 音频素材路径 audio_path = self.materials_path / name await communicator.save(audio_path := audio_path.as_posix()) # 音频持续时长(单位为微妙) duration = int(round(MP3(audio_path).info.length * 1000000)) return name, duration # 同步调用异步逻辑,对外暴露纯同步接口 return asyncio.run(_async_generate_audio()) except Exception as exception: raise RuntimeError( f"根据文本内容合成语音并保存为音频素材发声异常:{str(exception)}" ) class GenerateDraft: """生成剪映脚本""" # noinspection PyShadowingNames def __init__( self, name: str, video_width: int = 1080, video_height: int = 1920, video_fps: int = 30, drafts_path: str = r"C:\Users\admin\AppData\Local\JianyingPro\User Data\Projects\com.lveditor.draft", materials_path: str = r"C:\Users\admin\PycharmProjects\Python\剪映脚本生成自动化\materials", ): """ 初始化草稿 :param name: 草稿名称 :param video_width: 视频宽度 :param video_height: 视频高度 :param video_fps: 视频帧率 :param drafts_path: 剪映草稿文件夹路径 :param materials_path: 素材文件夹路径 """ print("正在生成剪映脚本...") # noinspection PyBroadException try: # 草稿名称 self.name = name # 视频宽度和高度 self.video_width, self.video_height = video_width, video_height # 视频帧率 self.video_fps = video_fps # 剪映草稿文件夹路径 self.drafts_path = drafts_path # 素材文件夹路径 self.materials_path = Path(materials_path) # 转为path对象 # 检查素材文件夹是否存在,若不存在则抛出异常 if not self.materials_path.exists(): raise FileNotFoundError(f"素材文件夹不存在") # 初始化草稿文件夹管理器 self.draft_folder = capcut.DraftFolder(self.drafts_path) # 新建草稿 self.draft = self.draft_folder.create_draft( draft_name=self.name, width=self.video_width, height=self.video_height, fps=self.video_fps, allow_replace=True, # 允许覆盖 ) self.draft.add_track(track_type=capcut.TrackType.video) # 音频持续时长 self.audio_duration = 0 # 实例化EdgeTTS self.synthesizer = EdgeTTS(self.materials_path) except Exception as exception: print(f"发生异常:{str(exception)}") raise def _get_material(self, name: str) -> str: """ 获取素材 :param name: 素材名称 :return 素材路径 """ # 素材路径 material_path = self.materials_path / name if not material_path.exists(): raise FileNotFoundError(f"素材文件不存在") return material_path.as_posix() def _add_audio( self, name: str, target_timerange: Tuple[Optional[int, str], Optional[int, str]], source_timerange: Optional[Tuple[str, str]] = None, speed: Optional[float] = None, volume: float = 1.0, fade: Optional[Tuple[str, str]] = None, ) -> None: """ 添加音频片段 :param name: 音频素材名称 :param target_timerange: 音频素材在轨道上的范围,包括开始时间和持续时长 :param source_timerange: 截取音频素材范围,包括开始时间和持续时长,默认根据音频素材开始时间根据播放速度截取与音频素材持续时长等长的部分 :param speed: 播放速度 :param volume: 播放音量 :param fade: 淡入淡出设置 :return: 无 """ print(f"正在添加音频片段 {name}...", end="") try: # 构建音频片段 audio_segment = capcut.AudioSegment( material=self._get_material(name), target_timerange=trange(*target_timerange), source_timerange=( trange(*source_timerange) if source_timerange else None ), speed=speed, volume=volume, ) # 添加淡入淡出效果 if fade: audio_segment.add_fade(*fade) self.draft.add_segment(audio_segment) print("已完成") return except Exception as exception: print(f"发生异常:{str(exception)}") raise def _add_video( self, name: str, target_timerange: Tuple[Optional[int, str], Optional[int, str]], source_timerange: Optional[ Tuple[Optional[int, str], Optional[int, str]], ] = None, speed: Optional[float] = None, volume: float = 1.0, clip_settings: Optional[Dict[str, Any]] = None, keyframes: Optional[List[Tuple[KeyframeProperty, str, float]]] = None, animation: Optional[Dict[str, Any]] = None, transition: Optional[Dict[str, Any]] = None, background_filling: Optional[Tuple[str, Any]] = None, ) -> None: """ 添加视频/图片片段 :param name: 视频/图片素材名称 :param target_timerange: 视频素材在轨道上的范围,包括开始时间和持续时长 :param source_timerange: 截取视频素材范围,包括开始时间和持续时长 :param speed: 播放速度 :param volume: 播放音量 :param clip_settings: 图像调节设置 :param keyframes: 关键帧设置 :param animation: 动画设置 :param transition: 转场设置 :param background_filling: 背景填充设置 :return: 无 """ print(f"正在添加视频/图片片段 {name}...", end="") try: # 构建视频片段 video_segment = capcut.VideoSegment( material=self._get_material(name), target_timerange=trange(*target_timerange), source_timerange=( trange(*source_timerange) if source_timerange else None ), speed=speed, volume=volume, clip_settings=ClipSettings(**clip_settings) if clip_settings else None, ) # (视频素材)添加关键帧 if keyframes: # noinspection PyShadowingBuiltins for property, time, value in keyframes: video_segment.add_keyframe(property, time, value) # (视频素材)添加动画 if animation: video_segment.add_animation(**animation) # (视频素材)添加转场 if transition: video_segment.add_transition(**transition) # (图片素材)添加背景填充 if background_filling: video_segment.add_background_filling(*background_filling) self.draft.add_segment(video_segment) print("已完成") return except Exception as exception: print(f"发生异常:{str(exception)}") raise def _add_text( self, content: str, timerange: Tuple[Optional[int, str], Optional[int, str]], border: Optional[Dict[str, Any]] = None, background: Optional[Dict[str, Any]] = None, font: Optional[str] = None, style: Optional[Dict[str, Any]] = None, clip_settings: Optional[Dict[str, Any]] = None, bubble: Optional[Dict[str, Any]] = None, effect: Optional[str] = None, animation: Optional[Dict[str, Any]] = None, ) -> None: """ 构建文本片段 :param content: 文本内容 :param timerange: 文本素材在轨道上的范围,包括开始时间和持续时长 :param border: 文本描边设置 :param background: 文本背景设置 :param font: 字体类型 :param style: 字体样式 :param clip_settings: 图像调节设置 :param bubble: 气泡设置 :param effect: 花字设置 :param animation: 动画设置 :return: 不返回 """ print(f"正在添加文本片段 {content}...", end="") try: # 创建文字片段 text_segment = capcut.TextSegment( text=content, timerange=trange(*timerange), border=capcut.TextBorder(**border) if border else None, background=capcut.TextBackground(**background) if border else None, font=capcut.FontType(font) if font else None, style=capcut.TextStyle(**style) if style else None, clip_settings=( capcut.ClipSettings(**clip_settings) if clip_settings else None ), ) # 添加气泡 if bubble: text_segment.add_bubble(**bubble) # 添加花字,可先保存预设,再在C:/Users/admin/AppData/Local/JianyingPro/User Data/Presets/Text_V2/预设文本?.textpreset获取花字resource_id if effect: text_segment.add_effect(effect) # 添加动画 if animation: text_segment.add_animation(**animation) self.draft.add_segment(text_segment) print("已完成") return except Exception as exception: print(f"发生异常:{str(exception)}") raise def _save(self) -> None: """保存草稿""" try: self.draft.save() print("草稿保存成功") except Exception as exception: print(f"发生异常:{str(exception)}") raise def create_audio_and_text( self, script: str, timbre: Optional[str] = "女声-晓晓", rate: str = "+25%", volume: str = "+0%", ): """ 根据脚本合成音频和文本素材 :param script: 脚本 :param timbre: 声音音色 :param rate: 语速 :param volume: 音量 :return: 无 """ print("正在根据脚本合成音频和文本素材...") # 添加音频和文本轨道 self.draft.add_track(track_type=capcut.TrackType.audio) self.draft.add_track(track_type=capcut.TrackType.text) start = 0 for content in script.split(","): # 根据文本内容合成语音并返回音频素材名称 name, duration = self.synthesizer.generate_audio( content, timbre, rate, volume ) # 构建音频片段 self._add_audio(name=name, target_timerange=(start, duration)) # 构建文本片段 self._add_text( content=content, timerange=(start, duration), style={"size": 12.0, "align": 1}, # 字号为12,对齐方式为水平居中 clip_settings={"transform_y": -0.6}, # 垂直位移 effect="6896137858998930701", # 第二行第三列花字 ) start += duration self.audio_duration = start def create_video( self, name: str, target_timerange: Tuple[Optional[int, str], Optional[int, str]], source_timerange: Optional[ Tuple[Optional[int, str], Optional[int, str]], ] = None, speed: Optional[float] = None, volume: float = 1.0, clip_settings: Optional[Dict[str, Any]] = None, keyframes: Optional[List[Tuple[KeyframeProperty, str, float]]] = None, animation: Optional[Dict[str, Any]] = None, transition: Optional[Dict[str, Any]] = None, background_filling: Optional[Tuple[str, Any]] = None, ): """ 根据脚本合成音频和文本素材 :param script: 脚本 :param timbre: 声音音色 :param rate: 语速 :param volume: 音量 :return: 无 """ print("正在根据脚本合成音频和文本素材...") # 添加音频和文本轨道 # ======================== 调用示例(使用抽象后的方法) ======================== def execute_workflow(): """生成剪映草稿""" # 实例化 draft = GenerateDraft( name="demo2", ) # 根据脚本合成音频和文本素材 draft.generate_audio_and_text( script="所有人今天准备狂点外卖,是真的0.1元起一杯的霸王茶姬,还外卖到家怎么能不来一杯呢,现在淘宝闪购给大家发福利,最高22元无门槛红包,官方链接就在下方,奶茶脑袋快冲" ) # 构建背景视频片段 draft._add_video(name="background.mp4", target_timerange=(0, draft.audio_duration)) # draft._add_video(name="arrow.gif", target_timerange=(0, draft.audio_duration)) # 保存草稿 draft._save() if __name__ == "__main__": execute_workflow()