Method 1:
The video is long and has a large resolution, which is very effective and does not consume memory.
import subprocess import glob import os from natsort import natsorted base_dir = r'C:\Users\Administrator\Videos\shuiyin\result' output_file = r'output_shuiyin.mp4' video_paths = (base_dir + '/*.mp4') video_paths = natsorted(video_paths) with open('file_list.txt', 'w') as f: for file in video_paths: (f"file '{file}'\n") ffmpeg_command = [ 'ffmpeg', '-f', 'concat', # Specify the stitching mode '-safe', '0', # Allow absolute paths '-i', 'file_list.txt', # Entered file list '-c:v', 'libx264', # Use the libx264 encoder '-c:a', 'aac', # Encoding audio using aac '-strict', 'experimental',# Use experimental coding output_file # Output file path] (ffmpeg_command, check=True) print(f"Video stitching is completed,Output file:{output_file}")
Method 2:
Use imageio, suitable for shorter videos
import glob from natsort import natsorted from import VideoFileClip, concatenate_videoclips import glob import from natsort import natsorted import cv2 import imageio if __name__ == '__main__': #Memory base_dir =r"C:\Users\Administrator\Videos\shuiyin\0127" base_dir =r'C:\Users\Administrator\Videos\shuiyin\result' output_path = "pinjie_shuiyin.mp4" video_paths =(base_dir +'/*.mp4') video_paths=natsorted(video_paths) imgs=[] res = [] for file in video_paths: cap_a = (file) # Open Video B fps = cap_a.get(cv2.CAP_PROP_FPS) frame_count = 0 print(file) while True: ret, frame_a = cap_a.read() if not ret: break # If no frame is read, the loop will be jumped ((frame_a, cv2.COLOR_BGR2RGB)) frame_count += 1 # Release video resources cap_a.release() (output_path, res, "mp4", fps=fps, macro_block_size=None)
Method 3:
Use FFmpeg to stitch videos, mainly to merge two files as an example, and only convert the video streams in them
vector<string> fileList = { url_origin,url_add };// These are two files//Get the original input video file encoding and other informationconst AVOutputFormat* ofmt = NULL;//Output formatAVFormatContext* ifmt_ctx = NULL, * ofmt_ctx = NULL;//Video data maintenance objectAVPacket* pkt = NULL;//Data Packet int ret;//The function execution return codeint stream_index;//Data flow index pkt = av_packet_alloc();//Initialize the packet structureif (!pkt) { return; } if ((ret = avformat_open_input(&ifmt_ctx, url_origin, 0, 0) < 0)) { goto end;//Failed to open the file} //Get the output file namestring out_file; auto name = ifmt_ctx->iformat->name;//Automatically identify the encapsulation type of the file//Hevc can only be used to convert using MP4 or hevc packages. Other packages report errors, because automatic identification is performed here, you can ignore the specific format.out_file.replace(out_file.find('.')+1, 3, name); const char* out_filename = out_file.c_str(); //Acquiring the encoding and other parameters in the first file, the encoding formats of the two files are required here because the same configuration is used when writing to the file, and no transcoding is performed.if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) { goto end; } avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename); if (!ofmt_ctx) { goto end; } ofmt = ofmt_ctx->oformat; //Find the video stream and copy the parameters of the video stream to the output streamfor (int i = 0; i < ifmt_ctx->nb_streams; ++i) { AVStream* in_stream = ifmt_ctx->streams[i]; AVCodecParameters* in_codecpar = in_stream->codecpar; if (in_codecpar->codec_type != AVMEDIA_TYPE_VIDEO)//Not video stream skipped { continue; } AVStream* out_stream = avformat_new_stream(ofmt_ctx, NULL);//Create output stream if (!out_stream) { goto end; } ret = avcodec_parameters_copy(out_stream->codecpar, in_codecpar);//Copy the decoder parameters if (ret < 0) { goto end; } out_stream->time_base = in_stream->time_base;//Copy time base stream_index = i; out_stream->codecpar->codec_tag = 0; break; } avformat_close_input(&ifmt_ctx);//Close the file //Open the output fileif (!(ofmt->flags & AVFMT_NOFILE)) { ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE); if (ret < 0) { goto end; } } ret = avformat_write_header(ofmt_ctx, NULL);//Write header information, such as encoding, etc.if (ret < 0) { goto end; } int64_t i = 0;// Used to calculate timestamps, and also frame countsint64_t p_max_dts = 0;//Time stamp for spelling files for (int index = 0; index < (); ++index)//Transfer the file{ if ((ret = avformat_open_input(&ifmt_ctx, fileList[index].c_str(), 0, 0)) < 0) { goto end; } if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0)//Find file flow information { goto end; } //Transfer directly to the stream while (1) { AVStream* in_stream, * out_stream; ret = av_read_frame(ifmt_ctx, pkt); if (ret < 0) { break; } pkt->stream_index = stream_index;//Video Streaming Number // Here is a tip, because the above example only has video and no audio, so it will not cross the boundary. If there are multiple streams, here you need to see if you have new several streams and whether it will cross the boundary. in_stream = ifmt_ctx->streams[stream_index]; out_stream = ofmt_ctx->streams[stream_index]; //The timestamp must be processed here, otherwise it will fail when writing. //Single frame duration int64_t frameDuration = av_rescale_q(1, av_inv_q(in_stream->time_base), in_stream->r_frame_rate); //Convert the time of a single frame from the input stream to the output stream time int64_t _t = av_rescale_q(frameDuration, in_stream->time_base, out_stream->time_base); // Calculate the timestamp and accumulate to calculate the subsequent timestamp p_max_dts = _t * (i); pkt->dts = p_max_dts; pkt->pts = pkt->dts; //If audio and video needs to be written, this function may be required: av_interleaved_write_frame, which will perform cross-write //pkt is now empty. This function will get ownership of pkt content and reset it, so there is no need for unref, but the write_frame situation is different and needs to be released manually. ret = av_write_frame(ofmt_ctx, pkt);//Write the package directly to the output file without decoding av_packet_unref(pkt); if (ret < 0) { break; } ++i; } //Close the file avformat_close_input(&ifmt_ctx); } av_write_trailer(ofmt_ctx);//Write the end of the file end: av_packet_free(&pkt);// Pass the pointer here because you want to set pkt to null avformat_close_input(&ifmt_ctx);//Same if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE)) { avio_closep(&ofmt_ctx->pb);//Avio open to release } avformat_free_context(ofmt_ctx); if (ret < 0 && ret != AVERROR_EOF) { return;//Exception ends }
This example can complete the copy and splicing of video streams. It is a relatively simple example. It requires that file encoding and other information must be consistent, and no transcoding is performed, which is relatively fast.
This is the end of this article about using Python to achieve video stitching effect. For more related Python video stitching content, please search for my previous articles or continue browsing the related articles below. I hope everyone will support me in the future!