20 #include <com/avpkit/core/BufferSource.h>
21 #include <com/avpkit/core/AudioSamples.h>
22 #include <com/avpkit/core/VideoPicture.h>
23 #include <com/avpkit/ferry/Logger.h>
25 VS_LOG_SETUP(VS_CPP_PACKAGE);
29 #include "libavcodec/avcodec.h"
30 #include "libavfilter/avfilter.h"
31 #include "libavutil/opt.h"
40 if (!mFilterContext || !ready) {
41 VS_LOG_ERROR(
"Try to add samples to an unitialized abuffer");
46 AVFrame* frame = av_frame_alloc();
52 frame->channel_layout = (uint64_t) inSamples->getChannelLayout();
54 frame->pts = mTimeBase->rescale(inSamples->
getPts(), timeBase);
55 VS_REF_RELEASE(timeBase);
56 int data_size = av_samples_get_buffer_size(&frame->linesize[0],
59 (AVSampleFormat) frame->format,
62 retval = avcodec_fill_audio_frame(frame,
64 (AVSampleFormat) frame->format,
65 (
const uint8_t*) inSamples->getRawSamples(0),
68 retval = av_buffersrc_write_frame(mFilterContext, frame);
70 av_frame_free(&frame);
73 av_frame_free(&frame);
75 av_frame_free(&frame);
78 retval = av_buffersrc_add_frame_flags(mFilterContext, NULL, AV_BUFFERSRC_FLAG_PUSH);
87 if (!mFilterContext || !ready) {
88 VS_LOG_ERROR(
"Try to add picture to an unitialized buffer");
92 AVFrame* frame = av_frame_alloc();
96 int64_t pts = mTimeBase->rescale(inPicture->
getPts(), timeBase);
97 VS_REF_RELEASE(timeBase);
100 retval = av_buffersrc_add_frame(mFilterContext, frame);
101 av_frame_free(&frame);
104 retval = av_buffersrc_add_frame_flags(mFilterContext, NULL, AV_BUFFERSRC_FLAG_PUSH);
110 BufferSource::BufferSource() {
112 mParams = av_buffersrc_parameters_alloc();
115 BufferSource::~BufferSource() {
117 if (mParams->hw_frames_ctx) {
118 av_buffer_unref(&mParams->hw_frames_ctx);
126 IRational* time_base,
129 BufferSource* retval = NULL;
132 if (sample_rate > 0 && time_base) {
133 retval = BufferSource::make();
136 retval->mFilter = avfilter_get_by_name(
"abuffer");
137 retval->mFilterContext = avfilter_graph_alloc_filter(graph, retval->mFilter,
"abuffer");
138 if (!retval->mFilterContext) {
139 VS_REF_RELEASE(retval);
142 av_get_channel_layout_string(ch_layout,
145 channel_layout == IAudioSamples::ChannelLayout::CH_NONE ? av_get_default_channel_layout(channels) : (uint64_t) channel_layout);
146 av_opt_set_q(retval->mFilterContext,
"time_base", (AVRational) {
147 time_base->getNumerator(), time_base->getDenominator()
148 }, AV_OPT_SEARCH_CHILDREN);
149 av_opt_set_int(retval->mFilterContext,
"sample_rate", sample_rate, AV_OPT_SEARCH_CHILDREN);
150 av_opt_set(retval->mFilterContext,
"sample_fmt", av_get_sample_fmt_name((AVSampleFormat) format), AV_OPT_SEARCH_CHILDREN);
151 av_opt_set(retval->mFilterContext,
"channel_layout", ch_layout, AV_OPT_SEARCH_CHILDREN);
152 av_opt_set_int(retval->mFilterContext,
"channels", channels, AV_OPT_SEARCH_CHILDREN);
153 if (avfilter_init_str(retval->mFilterContext, NULL) < 0) {
154 VS_REF_RELEASE(retval);
156 retval->mTimeBase.reset(time_base,
true);
157 retval->mFilterGraph = graph;
168 IRational* frame_rate,
169 IRational* time_base) {
171 BufferSource* retval = NULL;
174 if (width > 0 && height > 0 && time_base && frame_rate) {
175 retval = BufferSource::make();
178 retval->mFilter = avfilter_get_by_name(
"buffer");
179 retval->mFilterContext = avfilter_graph_alloc_filter(graph, retval->mFilter,
"buffer");
180 if (!retval->mFilterContext) {
181 VS_REF_RELEASE(retval);
183 av_opt_set_q(retval->mFilterContext,
"frame_rate", (AVRational) {
184 frame_rate->getNumerator(), frame_rate->getDenominator()
185 }, AV_OPT_SEARCH_CHILDREN);
186 av_opt_set_q(retval->mFilterContext,
"time_base", (AVRational) {
187 time_base->getNumerator(), time_base->getDenominator()
188 }, AV_OPT_SEARCH_CHILDREN);
189 av_opt_set_q(retval->mFilterContext,
"pixel_aspect", (AVRational) {
191 }, AV_OPT_SEARCH_CHILDREN);
192 av_opt_set_int(retval->mFilterContext,
"width", width, AV_OPT_SEARCH_CHILDREN);
193 av_opt_set_int(retval->mFilterContext,
"height", height, AV_OPT_SEARCH_CHILDREN);
194 av_opt_set(retval->mFilterContext,
"pix_fmt", av_get_pix_fmt_name((AVPixelFormat) format), AV_OPT_SEARCH_CHILDREN);
195 if (avfilter_init_str(retval->mFilterContext, NULL) < 0) {
196 VS_REF_RELEASE(retval);
198 retval->mTimeBase.reset(time_base,
true);
199 retval->mFilterGraph = graph;
207 BufferSource* BufferSource::make(AVFilterGraph* graph, IVideoPicture* picture, IRational* frame_rate) {
208 VideoPicture* inPicture =
static_cast<VideoPicture*
> (picture);
209 AVFrame* frame = inPicture->getAVFrame();
210 IRational* timeBase = picture->getTimeBase();
211 BufferSource* retval = make(graph, picture->getPixelType(), frame->width, frame->height, frame_rate, timeBase);
212 VS_REF_RELEASE(timeBase);
213 if (av_pix_fmt_desc_get((AVPixelFormat) picture->getPixelType())->flags & AV_PIX_FMT_FLAG_HWACCEL) {
214 AVBufferRef* hwCtx = inPicture->getAVFrame()->hw_frames_ctx;
216 retval->mParams->hw_frames_ctx = inPicture->getAVFrame()->hw_frames_ctx;
217 av_buffersrc_parameters_set(retval->mFilterContext, retval->mParams);
219 VS_LOG_WARN(
"hw_frames_ctx is NULL for hardware accelerated pixel format ");
virtual int32_t getSampleRate()
Find the sample rate of the samples in this audio buffer.
virtual Format getFormat()
Find the Format of the samples in this buffer.
virtual int32_t getChannels()
Return the number of channels of the samples in this buffer.
virtual IRational * getTimeBase()
Get the time base that time stamps of this object are represented in.
virtual int64_t getPts()
What is the Presentation Time Stamp of this set of audio samples.
virtual int32_t getNumSamples()
Get the number of samples in this video.
virtual int addVideoPicture(IVideoPicture *picture)
Adds picture to this filter.
virtual int addAudioSamples(IAudioSamples *samples)
Adds audio samples to this filter.
A set of raw (decoded) samples, plus a timestamp for when to play those samples relative to other ite...
Format
The format we use to represent audio.
This class wraps represents a Rational number for the AVPKit.
static IRational * make()
Get a new rational that will be set to 0/0.
Represents one raw (undecoded) picture in a video stream, plus a timestamp for when to display that v...
virtual int64_t getPts()
What is the Presentation Time Stamp (in Microseconds) of this picture.
virtual IRational * getTimeBase()
Get the time base that time stamps of this object are represented in.
VS_API_AVPKIT AVFrame * getAVFrame()
Call to get the raw underlying AVFrame we manage; don't pass this to ffmpeg directly as ffmpeg often ...
WARNING: Do not use logging in this class, and do not set any static file variables to values other t...