python

关注公众号 jb51net

关闭
首页 > 脚本专栏 > python > Python视频按照时间切割

使用Python如何将视频按照一定时间切割(比如:每10s进行裁切)

作者:这就是算法

这篇文章主要介绍了使用Python将视频按照一定时间切割(比如:每10s进行裁切),本文通过实例代码给大家介绍的非常详细,对大家的学习或工作具有一定的参考借鉴价值,需要的朋友可以参考下

使用Python将视频按照一定时间切割(比如:每10s进行裁切)

平台: Ubuntu 16.04
函数库: ffmpeg、subprocess
需求: 将path路径下的所有.mp4视频每delta_X(10s)进行裁切,并保存在save_path下,并裁切好的视频以id00001.mp4、id00002.mp4、id00003.mp4…命名保存

注意:

1.每个视频不超过1小时、最后不足delta_X时间的会被舍弃。(根据需求自行修改)

参考代码见文末补充介绍。

2.关于path和save_path路径的问题(今天有小可爱问我这个问题):最好是独立分开,别有包含关系。比如:不要出现 path=‘/home/video’ save_path=‘/home/video/save’ 这种情况,因为我的video_list的获取方式是os.listdir(path) ,已经默认了path下的文件都是视频格式的,这一点确实鲁棒性不是很好,,我考虑欠佳,希望可以帮助到你:)

import subprocess
import os
path = '/home/dataset'  # 待切割视频存储目录
video_list = os.listdir(path)
delta_X = 10   # 每10s切割
save_path = '/home/save'
mark = 0 
# 获取视频的时长
def get_length(filename):
    result = subprocess.run(["ffprobe", "-v", "error", "-show_entries",
                             "format=duration", "-of",
                             "default=noprint_wrappers=1:nokey=1", filename],
        stdout=subprocess.PIPE,
        stderr=subprocess.STDOUT)
    return float(result.stdout)
for file_name in video_list:
	min = int(get_length(os.path.join(path, file_name))) // 60    # file_name视频的分钟数
 	second = int(get_length(os.path.join(path, file_name))) % 60    # file_name视频的秒数
 	for i in range(min+1):
 		if second >= delta_X:   # 至少保证一次切割
 			start_time = 0
 			end_time = start_time+delta_X
 			for j in range((second//10)+1):
 				min_temp = str(i)
 				start = str(start_time)
 				end = str(end_time)
				# crop video
				# 保证两位数
				if len(str(min_temp)) == 1:
					min_temp = '0'+str(min_temp)
				if len(str(start_time)) == 1:
					start = '0'+str(start_time)
				if len(str(end_time)) == 1:
					end = '0'+str(end_time)
				# 设置保存视频的名字
				if len(str(mark)) < 6:
					name = '0'*(6-len(str(mark))-1)+str(mark)
				else:
					name = str(mark)
				command = 'ffmpeg -i {} -ss 00:{}:{} -to 00:{}:{} -strict -2 {}'.format(os.path.join(path,file_name),
												min_temp,start,min_temp,end,
												os.path.join(save_path,'id'+str(name))+'.mp4')
				mark += 1
				os.system(command)
				if i != min or (i == min and (end_time+delta_X) < second):
					start_time += delta_X
					end_time += delta_X
				elif (end_time+delta_X) <= second:
					start_time += delta_X
					end_time += delta_X
				elif (end_time+delta_X) > second:  # 最后不足delta_X的部分会被舍弃
					break

补充介绍:python处理视频的几个操作

两组图片序列帧合成一个视频(左右排列),只合成一个的自行修改

参数介绍:

        real_1.png、real_2.png、..........

        fake_1.png、fake_2.png、...........

import os
import cv2
import numpy as np
def picvideo(path,size,file,name):
    filelist = os.listdir(path) # 获取path中的所有序列帧
    fps = 35
    file_path = os.path.join(file,name)
    fourcc = cv2.VideoWriter_fourcc('I','4','2','0')
    # fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')  # mp4
    video = cv2.VideoWriter(file_path,fourcc,fps,size)
    real = []
    fake = []
    for item in filelist:
        # if 后的判断根据自己的实际情况去编写,如果用到以aaa结尾的,自行改成item.endswith('aaa')
        if item.startswith('real'):
            item=path+'/'+item
            real.append(item)
        if item.startswith('fake'):
            item=path+'/'+item
            fake.append(item)
    real.sort()
    fake.sort()
    for path1,path2 in zip(real,fake):
        img1=cv2.imread(path1)
        img2=cv2.imread(path2)
        assert img1.shape==img2.shape, "shape error"
        # 竖排用 image=np.vstack([img1,img2])
        image=np.hstack([img1,img2]) # 横排
        video.write(image)
    video.release()
number=2
path = 'source'
# 竖排用 size=(1024,512*number)
size = (1024*number,512)
file = './results'
name = 'final.avi'
picvideo(path, size, file, name)
    

计算一个视频的FPS

import cv2
if __name__ == '__main__' :
  video = cv2.VideoCapture("video.mp4");
  # Find OpenCV version
  (major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')
  if int(major_ver) < 3 :
    fps = video.get(cv2.cv.CV_CAP_PROP_FPS)
    print "Frames per second using video.get(cv2.cv.CV_CAP_PROP_FPS): {0}".format(fps)
  else :
    fps = video.get(cv2.CAP_PROP_FPS)
    print "Frames per second using video.get(cv2.CAP_PROP_FPS) : {0}".format(fps)
  video.release();

将一个视频中的音频提取出来

from moviepy.editor import *
video = VideoFileClip('test.mp4')
audio = video.audio
audio.write_audiofile('test.wav')

将A.mp4的音频加到B.mp4

from moviepy.editor import VideoFileClip
origin_video = "A.mp4"
add_video = "B.mp4"
res_video = "res.mp4"
voice_video = VideoFileClip(origin_video)
audio = voice_video.audio
video = VideoFileClip(add_video)
new = video.set_audio(audio)
new.write_videofile(res_video)

将格式不同(mp4,avi)两个视频合成为一个avi(256,256*4),一个大小为(256,256*3),另一个大小为(256,256)

import cv2
import numpy as np
import imageio
# 将statue.avi 和 voice3.mp4 (两个格式不同的视频) 合并成 final3.avi
path = './results/final3.avi'
video1 = imageio.get_reader('./results/statue.avi')
video2 = imageio.get_reader('./results/voice3.mp4')
video1_L = []
for im in video1:
    video1_L.append(im)
video1.close
video2_L = []
for im in video2:
    video2_L.append(im)
video2.close
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
out1 = cv2.VideoWriter(path,fourcc,20,(256*4,256),True)
for frame1,frame2 in zip(video1_L ,video2_L):
    frame1 = cv2.cvtColor(frame1.astype('float32'), cv2.COLOR_BGR2RGB)    
    frame2 = cv2.cvtColor(frame2.astype('float32'), cv2.COLOR_BGR2RGB) 
    # 对于不同格式的视频 下面这个步骤至关重要
    frame1 = frame1.astype(np.uint8)
    frame2 = frame2.astype(np.uint8)
    image = np.concatenate((frame1,frame2),axis=1)
    out1.write(image)

将一个文件夹中的图片resize成(256,256),并把jpg转成png

import PIL.Image
import os
path = './data'
path_list = os.listdir(path)
for file in path_list:
    im = PIL.Image.open(os.path.join(path,file))
    im = im.resize((256,256))
    im.save(os.path.join(path,file[:-3]+'png'))
    os.remove(os.path.join(path,file))

将视频test.mp4进行裁切(比如:00:00:01-00:05:00)保存为crop.mp4(需要安装ffmpeg)

ffmpeg -i test.mp4 -ss 00:00:01 -to 00:05:00 -c:v copy -c:a copy crop.mp4

如果使用上面的命令出现黑视频的情况则使用下面的命令

ffmpeg -i test.mp4 -ss 00:00:01 -to 00:05:00 -strict -2 crop.mp4

将视频test.mp4每5s保存一次图片(fps=1时一秒1张图保存、fps=1/5=0.2时5秒一张图保存)

ffmpeg -i test.mp4 -vf fps&#61;0.2 out%d.png

将视频进行旋转(需要安装moviepy)

from moviepy.editor import *
clip = VideoFileClip("result.mp4")
# clip = clip.rotate(-90) # 顺时针旋转90
clip = clip.rotate(90) # 逆时针旋转90
clip.write_videofile("res.mp4")  # save

获取视频的时长(按秒计算)

import subprocess
def get_length(filename):
    result = subprocess.run(["ffprobe", "-v", "error", "-show_entries",
                             "format=duration", "-of",
                             "default=noprint_wrappers=1:nokey=1", filename],
        stdout=subprocess.PIPE,
        stderr=subprocess.STDOUT)
    return float(result.stdout)
print("minute:"+str(int(get_length("test.mp4")) // 60))
print("second:"+str(int(get_length("test.mp4")) % 60))

将视频转换成帧序列

import cv2
import numpy as np
import os
video = cv2.VideoCapture("test.mp4")
result_path = './save_result' # 保存的文件夹
success, frame = video.read()
i = 0
while success:
    cv2.imwrite(os.path.join(result_path,str(i)+'.png'),frame)
    i = i + 1
    success, frame = video.read()

从图片中提取人脸

from PIL import Image
import face_recognition
inputImg = "biden.jpg"
image = face_recognition.load_image_file(inputImg)
faces = face_recognition.face_locations(image)
for i in range(len(faces)):
    top, right, bottom, left = faces[i]
    faceImage = image[top:bottom, left:right]
    final = Image.fromarray(faceImage)
    final.save("img%s.png" % (str(i)), "PNG")

使用ffmpeg >1.1将本地的a.mp4与b.mp4合并成output.mp4(无损合并)

先创建一个文本文件filelist.txt,并且在文本文件中添加如下信息:

file 'a.mp4'

file 'b.mp4'

ffmpeg -f concat -i filelist.txt -c copy output.mp4

视频帧转视频

ffmpeg -f image2 -i /home/ttwang/images/image%d.jpg tt.mp4

到此这篇关于使用Python将视频按照一定时间切割(比如:每10s进行裁切)的文章就介绍到这了,更多相关Python视频按照时间切割内容请搜索脚本之家以前的文章或继续浏览下面的相关文章希望大家以后多多支持脚本之家!

您可能感兴趣的文章:
阅读全文