FFmpeg中AVIOContext的使用方法详解
作者:fengbingchun
通过FFmpeg对视频进行编解码时,如果输入文件存在本机或通过USB摄像头、笔记本内置摄像头获取数据时,可通过avformat_open_input接口中的第二个参数直接指定即可。但如果待处理的视频数据存在于内存块中时,该如何指定,可通过FFmpeg中的结构体AVIOContext实现,此时avformat_open_input中的第二个参数传nullptr。
涉及到FFmpeg中的主要函数是avio_alloc_context,声明如下:
AVIOContext *avio_alloc_context( unsigned char *buffer, int buffer_size, int write_flag, void *opaque, int (*read_packet)(void *opaque, uint8_t *buf, int buf_size), int (*write_packet)(void *opaque, uint8_t *buf, int buf_size), int64_t (*seek)(void *opaque, int64_t offset, int whence))
(1).buffer:通过AVIOContext进行输入/输出操作的内存块,由av_malloc分配,av_free释放。av_read_frame会持续从此处取数据。
(2).buffer_size: 内存块大小。
(3).write_flag: 如果buffer作为输出即写入则为1(FFmpeg将处理后的数据写入buffer),如果buffer作为输入则设置为0(FFmpeg从buffer获取数据).
(4).opaque: 指向用户特定数据的不透明指针。
(5).read_packet: 回调函数,当buffer作为输入时必须指定,否则可为nullptr。此回调函数的参数依次为avio_alloc_context中的opaque、buffer、buffer_size。
(6).write_packet:回调函数,当buffer作为输出时必须指定,否则可为nullptr。此回调函数的参数依次为avio_alloc_context中的opaque、buffer、buffer_size。
(7).seek:回调函数,用于查找指定字节位置的函数,可为nullptr。
调用完此接口后,需要将此接口返回的指针赋值给AVFormatContext的pb即I/O context。
以下为测试代码段:
(1).主线程用于实时显示内存块内容。另有一个单独线程用于创建数据。这里使用队列:线程set_packet持续向队列中push数据;回调函数read_packet持续从队列中pop数据
typedef struct Buffer { unsigned char* data; unsigned int length; } Buffer; class BufferQueue { public: BufferQueue() = default; ~BufferQueue() {} void push(Buffer& buffer) { std::unique_lock<std::mutex> lck(mtx); queue.push(buffer); cv.notify_all(); } void pop(Buffer& buffer) { std::unique_lock<std::mutex> lck(mtx); while (queue.empty()) { cv.wait(lck); } buffer = queue.front(); queue.pop(); } unsigned int size() { return queue.size(); } private: std::queue<Buffer> queue; std::mutex mtx; std::condition_variable cv; }; class PacketScaleQueue { public: PacketScaleQueue() = default; ~PacketScaleQueue() { Buffer buffer; while (getPacketSize() > 0) { popPacket(buffer); delete[] buffer.data; } while (getScaleSize() > 0) { popScale(buffer); delete[] buffer.data; } } void init(unsigned int buffer_num = 16, unsigned int buffer_size = 1024 * 1024 * 4) { for (unsigned int i = 0; i < buffer_num; ++i) { Buffer buffer = { new unsigned char[buffer_size], buffer_num}; pushPacket(buffer); } } void pushPacket(Buffer& buffer) { packet_queue.push(buffer); } void popPacket(Buffer& buffer) { packet_queue.pop(buffer); } unsigned int getPacketSize() { return packet_queue.size(); } void pushScale(Buffer& buffer) { scale_queue.push(buffer); } void popScale(Buffer& buffer) { scale_queue.pop(buffer); } unsigned int getScaleSize() { return scale_queue.size(); } private: BufferQueue packet_queue, scale_queue; };
(2).线程函数set_packet内容如下:类PacketScaleQueue中有两个BufferQueue: packet_queue:未被使用的;scale_queue:已被使用的
void set_packet(PacketScaleQueue& packet_encode) { while (packet_encode_flag) { static unsigned char v1 = 0, v2 = 0, v3 = 255; static const size_t size = height * width; Buffer buffer; packet_encode.popPacket(buffer); memset(buffer.data, v1, size); memset(buffer.data + size, v2, size); memset(buffer.data + size * 2, v3, size); packet_encode.pushScale(buffer); ++v1; ++v2; --v3; if (v1 == 255) v1 = 0; if (v2 == 255) v2 = 0; if (v3 == 0) v3 = 255; std::this_thread::sleep_for(std::chrono::milliseconds(40)); } }
(3).回调函数read_packet内容如下:
int read_packet(void* opaque, uint8_t* buf, int buf_size) { PacketScaleQueue* packet_encode = static_cast<PacketScaleQueue*>(opaque); Buffer buffer; packet_encode->popScale(buffer); memcpy(buf, buffer.data, buf_size); packet_encode->pushPacket(buffer); return buf_size; }
(4).主函数test_ffmpeg_avio_show内容如下:
int test_ffmpeg_avio_show() { PacketScaleQueue packet_encode; packet_encode.init(30, block_size); std::thread thread_packet(set_packet, std::ref(packet_encode)); uint8_t* avio_ctx_buffer = static_cast<uint8_t*>(av_malloc(block_size)); if (!avio_ctx_buffer) { print_error_string(AVERROR(ENOMEM)); return -1; } AVIOContext* avio_ctx = avio_alloc_context(avio_ctx_buffer, block_size, 0, &packet_encode, &read_packet, nullptr, nullptr); if (!avio_ctx) { print_error_string(AVERROR(ENOMEM)); return -1; } AVFormatContext* ifmt_ctx = avformat_alloc_context(); if (!ifmt_ctx) { print_error_string(AVERROR(ENOMEM)); return -1; } ifmt_ctx->pb = avio_ctx; AVDictionary* dict = nullptr; av_dict_set(&dict, "video_size", "640x480", 0); av_dict_set(&dict, "pixel_format", "bgr24", 0); auto ret = avformat_open_input(&ifmt_ctx, nullptr, av_find_input_format("rawvideo"), &dict); if (ret < 0) { fprintf(stderr, "Could not open input\n"); print_error_string(ret); return ret; } ret = avformat_find_stream_info(ifmt_ctx, nullptr); if (ret < 0) { fprintf(stderr, "Could not find stream information\n"); print_error_string(ret); return ret; } av_dump_format(ifmt_ctx, 0, "nothing", 0); int video_stream_index = -1; for (unsigned int i = 0; i < ifmt_ctx->nb_streams; ++i) { const AVStream* stream = ifmt_ctx->streams[i]; if (stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) { video_stream_index = i; fprintf(stdout, "type of the encoded data: %d, dimensions of the video frame in pixels: width: %d, height: %d, pixel format: %d\n", stream->codecpar->codec_id, stream->codecpar->width, stream->codecpar->height, stream->codecpar->format); } } if (video_stream_index == -1) { fprintf(stderr, "error: no video stream\n"); return -1; } AVCodecParameters* codecpar = ifmt_ctx->streams[video_stream_index]->codecpar; if (codecpar->codec_id != AV_CODEC_ID_RAWVIDEO) { fprintf(stderr, "error: this test code only support rawvideo encode: %d\n", codecpar->codec_id); return -1; } AVPacket* packet = static_cast<AVPacket*>(av_malloc(sizeof(AVPacket))); if (!packet) { fprintf(stderr, "fail to av_malloc\n"); return -1; } cv::Mat mat(height, width, CV_8UC3); const char* winname = "show video"; cv::namedWindow(winname); while (1) { ret = av_read_frame(ifmt_ctx, packet); if (ret >= 0 && packet->stream_index == video_stream_index && packet->size > 0) { mat.data = packet->data; cv::imshow(winname, mat); av_packet_unref(packet); int key = cv::waitKey(30); if (key == 27) { packet_encode_flag = false; break; } } } av_freep(packet); cv::destroyWindow(winname); avformat_close_input(&ifmt_ctx); // note: the internal buffer could have changed, and be != avio_ctx_buffer if (avio_ctx) { av_freep(&avio_ctx->buffer); av_freep(&avio_ctx); } //avio_context_free(&avio_ctx); ==> av_freep(&avio_ctx); av_dict_free(&dict); thread_packet.join(); fprintf(stdout, "test finish\n"); return 0; }
执行结果如下图所示:
GitHub:https://github.com/fengbingchun/OpenCV_Test
到此这篇关于FFmpeg中AVIOContext的使用方法详解的文章就介绍到这了,更多相关FFmpeg AVIOContext内容请搜索脚本之家以前的文章或继续浏览下面的相关文章希望大家以后多多支持脚本之家!