本文介绍了FFMPEG C api h.264编码/ MPEG2 ts流媒体问题的处理方法,对大家解决问题具有一定的参考价值,需要的朋友们下面随着小编来一起学习吧!

问题描述

限时删除!!

类原型如下:

  #ifndef _FULL_MOTION_VIDEO_STREAM_H_ 
#define _FULL_MOTION_VIDEO_STREAM_H_

#include< memory>
#include< string>

#ifndef INT64_C
#define INT64_C(c)(c ## LL)
#define UINT64_C(c)(c ## ULL)
#endif

externC
{
#includelibavutil / opt.h
#includelibavcodec / avcodec.h
#includelibavutil /channel_layout.h
#includelibavutil / common.h
#includelibavutil / imgutils.h
#includelibavutil / mathematics.h
#include libavutil / samplefmt.h
#includelibavformat / avformat.h

#include< libavutil / timestamp.h>
#include< libswscale / swscale.h>
#include< libswresample / swresample.h>
}

class FMVStream
{
public:
struct OutputStream
{
OutputStream():
st (0),
next_pts(0),
samples_count(0),
框架(0),
tmpFrame(0),
sws_ctx(0)
{
}

AVStream * st;

/ *将生成的下一个框架的pts * /
int64_t next_pts;
int samples_count;

AVFrame * frame;
AVFrame * tmpFrame;

struct SwsContext * sws_ctx;
};

///
///构造函数
///
FMVStream();

///
///析构函数
///
〜FMVStream();

///
///帧编码器帮助函数
///
///将原始RGB帧编码到传输流
/ //
int EncodeFrame(uint8_t * frame);

///
/// Frame width setter
///
void setFrameWidth(int width);

///
/// Frame width getter
///
int getFrameWidth()const;

///
/// Frame height setter
///
void setFrameHeight(int height);

///
/// Frame height getter
///
int getFrameHeight()const;

///
///流地址设置器
///
void setStreamAddress(const std :: string& address);

///
///流地址getter
///
std :: string getStreamAddress()const;

私人:

///
///视频流创建
///
AVStream * initVideoStream(AVFormatContext * oc) ;

///
///原始帧代码转换器
///
///这将将原始RGB帧转换为h所需的原始YUV帧.264编码
///
void CopyFrameData(uint8_t * src_frame);

///
///视频帧分配器
///
AVFrame * AllocPicture(PixelFormat pix_fmt,int width,int height);

///
///调试打印帮助函数
///
void print_sdp(AVFormatContext ** avc,int n);

///
///将帧写入流
///
int write_frame(AVFormatContext * fmt_ctx,const AVRational * time_base,AVStream * st ,AVPacket * pkt);

///
///初始化框架数据
///
void initFrame();

//输出流和输出容器(MPEG 2 TS)所需的格式化数据
AVOutputFormat *格式;
AVFormatContext * format_ctx;

//我们的视频流的结构容器
OutputStream stream;

AVIOContext * io_ctx;

std :: string streamFilename;

int frameWidth;
int frameHeight;
};

#endif

此块启动类声明。

  #includeFullMotionVideoStream.h

#include< stdexcept>
#include< iostream>

FMVStream :: FMVStream()
:格式(0),
格式_ctx(0),
流(),
io_ctx(0)
streamFilename(test.mpeg),
frameWidth(640),
frameHeight(480)
{
//注册所有格式和编解码器
av_register_all();
avcodec_register_all();

// Init networking
avformat_network_init();

//查找格式
this-> format = av_guess_format(mpegts,NULL,NULL);

//分配AVFormatContext
this-> format_ctx = avformat_alloc_context();

如果(!this-> format_ctx)
{
throw std :: runtime_error(avformat_alloc_context failed);
}

this-> format_ctx-> oformat = this->格式;
// sprintf_s(this-> format_ctx-> filename,sizeof(this-> format_ctx-> filename),%s,this-> streamFilename.c_str());

this-> stream.st = initVideoStream(this-> format_ctx);

this-> initFrame();

//分配AVIOContext
int ret = avio_open(& this-> io_ctx,this-> streamFilename.c_str(),AVIO_FLAG_WRITE);

if(ret!= 0)
{
throw std :: runtime_error(avio_open failed);
}

this-> format_ctx-> pb = this-> io_ctx;

//打印有关格式的一些调试信息
av_dump_format(this-> format_ctx,0,NULL,1);

//通过写容器头开始输出
avformat_write_header(this-> format_ctx,NULL);

AVFormatContext * ac [] = {this-> format_ctx};
print_sdp(ac,1);
}

FMVStream ::〜FMVStream()
{
av_write_trailer(this-> format_ctx);
avcodec_close(this-> stream.st-> codec);

avio_close(io_ctx);

avformat_free_context(this-> format_ctx);

av_frame_free(& this-> stream.frame);
av_free(this-> format);
}

AVFrame * FMVStream :: AllocPicture(PixelFormat pix_fmt,int width,int height)
{
//分配一个框架
AVFrame *框架= av_frame_alloc();

if(frame == nullptr)
{
throw std :: runtime_error(avcodec_alloc_frame failed);
}

if(av_image_alloc(frame-> data,frame-> linesize,width,height,pix_fmt,1)< 0)
{
throw std :: runtime_error(av_image_alloc failed);
}

frame-> width = width;
frame-> height = height;
frame-> format = pix_fmt;

return frame;
}

void FMVStream :: print_sdp(AVFormatContext ** avc,int n)
{
char sdp [2048];
av_sdp_create(avc,n,sdp,sizeof(sdp));
printf(SDP:\\\
%s\\\
,sdp);
fflush(stdout);
}

AVStream * FMVStream :: initVideoStream(AVFormatContext * oc)
{
AVStream * st = avformat_new_stream(oc,NULL);

if(st == nullptr)
{
std :: runtime_error(Could not alloc stream);
}

AVCodec * codec = avcodec_find_encoder(AV_CODEC_ID_H264);

if(codec == nullptr)
{
throw std :: runtime_error(找不到mpeg2 encoder);
}

st-> codec = avcodec_alloc_context3(codec);

st-> codec-> codec_id = AV_CODEC_ID_H264;
st-> codec-> codec_type = AVMEDIA_TYPE_VIDEO;
st-> codec-> bit_rate = 400000;

st-> codec-> width = this-> frameWidth;
st-> codec-> height = this-> frameHeight;

st-> time_base.num = 1;
st-> time_base.den = 30;

st-> codec-> framerate.num = 1;
st-> codec-> framerate.den = 30;

st-> codec-> max_b_frames = 2;
st-> codec-> gop_size = 12;
st-> codec-> pix_fmt = PIX_FMT_YUV420P;

st-> id = oc-> nb_streams - 1;

if(oc-> oformat-> flags& AVFMT_GLOBALHEADER)
{
st-> codec-> flags | = CODEC_FLAG_GLOBAL_HEADER;
}

//编解码器的选项设置
av_opt_set(st-> codec-> priv_data,profile,baseline,AV_OPT_SEARCH_CHILDREN);

if(avcodec_open2(st-> codec,codec,NULL)< 0)
{
throw std :: runtime_error(avcodec_open failed);
}

return st;
}

void FMVStream :: initFrame()
{
//分配一个tmp帧,将原始RGB数据转换为YUV编码
- > stream.tmpFrame = this-> AllocPicture(PIX_FMT_RGB24,this-> frameWidth,this-> frameHeight);

//分配一个主框架
this-> stream.frame = this-> AllocPicture(PIX_FMT_YUV420P,this-> frameWidth,this-> frameHeight);
}

此块正在尝试从原始RGB转换为所需的YUV格式h.264编码

  void FMVStream :: CopyFrameData(uint8_t * data)
{
//用我们的原始RGB数据填充图像
//avpicture_alloc((AVPicture*)this->stream.tmpFrame,PIX_FMT_RGB24,this-> stream.st-> codec-> width,this-> stream .st-> codec->高度);

int numBytes = avpicture_get_size(PIX_FMT_RGB24,this-> stream.st-> codec-> width,this-> stream.st-> codec-> height);

uint8_t * buffer =(uint8_t *)av_malloc(numBytes * sizeof(uint8_t));

avpicture_fill((AVPicture *)this-> stream.tmpFrame,buffer,PIX_FMT_RGB24,this-> stream.st-> codec-> width,this-> stream.st- > codec->高度); (int y = 0; y< this-> stream.st-> codec-> height; y ++)
{
for(int x = 0; x< this-> stream.st-> codec-> width; x ++)
{
int offset = 3 *(x + y * this-> stream.st- > codec->宽度);
this-> stream.tmpFrame-> data [0] [offset + 0] = data [x + y * this-> stream.st-> codec-> width]; // R
this-> stream.tmpFrame-> data [0] [offset + 1] = data [x + y * this-> stream.st-> codec-> width + 1 ]。 // G
this-> stream.tmpFrame-> data [0] [offset + 2] = data [x + y * this-> stream.st-> codec-> width + 2 ]。 // B
}
}

//使用sws将RGB帧转换为YUV帧Context
this-> stream.sws_ctx = sws_getContext(this - > stream.st-> codec-> width,this-> stream.st-> codec-> height,PIX_FMT_RGB32,this-> stream.st-> codec-> width,this - > stream.st-> codec-> height,PIX_FMT_YUV420P,SWS_FAST_BILINEAR,NULL,NULL,NULL);

//使用缩放函数将此原始帧转码为正确的类型
sws_scale(this-> stream.sws_ctx,this-> stream.tmpFrame->数据,此 - > stream.tmpFrame-> linesize,0,this-> stream.st-> codec-> height,this-> stream.frame-> data,this-> stream.frame-> ; LINESIZE);
}

这是将原始数据编码为h.264的块,然后发出Mpeg2 ts。我相信这个问题在这个方面。我可以在我的写框块中插入一个断点,看到框架正在被写入,但是在VLC中打开生成的文件会导致一个空白的视频。文件约2Mb。

  int FMVStream :: EncodeFrame(uint8_t * data)
{
AVCodecContext * c = this-> ; stream.st->编解码器;

AVRational一;
one.den = one.num = 1;

//检查我们是否想保留写入框架,我们可以将其更改为切换开关
if(av_compare_ts(this-> stream.next_pts,this-> stream .st-> codec-> time_base,10,one)> = 0)
{
this-> stream.frame = nullptr;
}
else
{
//将帧数据转换并加载到AVFrame结构体
CopyFrameData(data);
}

//设置时间戳步进
AVPacket pkt = {0};
av_init_packet(& pkt);
this-> stream.frame-> pts =(int64_t)((1.0 / this-> stream.st-> codec-> framerate.den)* 90000.0 * this->流。 next_pts ++);

int gotPacket,out_size,ret;

out_size = avcodec_encode_video2(c,& pkt,this-> stream.frame& gotPacket);


if(gotPacket == 1)
{
ret = write_frame(this-> format_ctx,& c-> time_base,this-> stream.st,& pkt);
}
else
{
ret = 0;
}

if(ret< 0)
{
std :: cerr<< 写入视频帧错误<<的std :: ENDL;
}

av_free_packet(& pkt);

return((this-> stream.frame!= nullptr)|| gotPacket)? 0:1;
}

int FMVStream :: write_frame(AVFormatContext * fmt_ctx,const AVRational * time_base,AVStream * st,AVPacket * pkt)
{
/ *重新缩放输出包从编解码器到流时基的时间戳值* /
av_packet_rescale_ts(pkt,* time_base,st-> time_base);
pkt-> stream_index = st-> index;

return av_interleaved_write_frame(fmt_ctx,pkt);
}

void FMVStream :: setFrameWidth(const int width)
{
this-> frameWidth = width;
}

int FMVStream :: getFrameWidth()const
{
return this-> frameWidth;
}

void FMVStream :: setFrameHeight(const int height)
{
this-> frameHeight = height;
}

int FMVStream :: getFrameHeight()const
{
return this-> frameHeight;
}

void FMVStream :: setStreamAddress(const std :: string& address)
{
this-> streamFilename = address;
}

std :: string FMVStream :: getStreamAddress()const
{
return this-> streamFilename;
}

这是主要功能。

  #includeFullMotionVideoStream.h

#include< iostream>
#include< thread>
#include< chrono>

int main(int argc,char ** argv)
{
FMVStream * fmv = new FMVStream;

fmv-> setFrameWidth(640);
fmv-> setFrameHeight(480);

std :: cout<<< 串流地址:<< fmv-> getStreamAddress()<<<的std :: ENDL;

//创建我们的交替框架的黑白测试流功能
uint8_t white [640 * 480 * sizeof(uint8_t)* 3];
uint8_t black [640 * 480 * sizeof(uint8_t)* 3];

std :: memset(white,255,640 * 480 * sizeof(uint8_t)* 3);
std :: memset(black,0,640 * 480 * sizeof(uint8_t)* 3); (auto i = 0; i {
auto ret = fmv-> EncodeFrame(white);



if(ret!= 0)
{
std :: cerr<< 编码框架时出现问题:<< i<的std :: ENDL;
}

std :: this_thread :: sleep_for(std :: chrono :: milliseconds(10));
}

for(auto i = 0; i< 100; i ++)
{
auto ret = fmv-> EncodeFrame(black)

if(ret!= 0)
{
std :: cerr<< 编码框架时出现问题:<< i<的std :: ENDL;
}

std :: this_thread :: sleep_for(std :: chrono :: milliseconds(10));
}

删除fmv;
}

这是通过控制台/我的打印SDP功能的结果输出。 p>

  [libx264 @ 000000ac95f58440]使用cpu功能:MMX2 SSE2Fast SSSE3 SSE4.2 
AVX FMA3 AVX2 LZCNT BMI2
[libx264 @ 000000ac95f58440] profile限制基线,级别3.0
输出#0,mpegts,到'(null)':
流#0:0:视频:h264(libx264),yuv420p,640x480,q = 1--1,400 kb / s,30
tbn
SDP:
v = 0
o = - 0 0 IN IP4 127.0.0.1
s =否名称
t = 0 0
a =工具:libavformat 56.23.104
m =视频0 RTP / AVP 96
b = AS:400
a = rtpmap:96 H264 / 90000
a = fmtp:96 packetization-mode = 1
a = control:streamid = 0

流式地址:test.mpeg
[libx264 @ 000000ac95f58440] frame I:45 Avg QP :0.51 size:1315
[libx264 @ 000000ac95f58440] frame P:136 Avg QP:0.29 size:182
[libx264 @ 000000ac95f58440] mb I I16..4:99.7%0.0%0.3%
[libx264 @ 000000ac95f58440] mb P I16..4:0 0.1%0.0%0.1%P16..4:0.1%0.0
%0.0%0.0%0.0%跳过:99.7%
[libx264 @ 000000ac95f58440]最终速率因子:-68.99
[libx264 @ 000000ac95f58440]编码的y,uvDC,uvAC intra:0.5%0.5%0.5%inter:0.0%
0.1%0.1%
[libx264 @ 000000ac95f58440] i16 v,h,dc,p:96%0 %3%0%
[libx264 @ 000000ac95f58440] i4 v,h,dc,ddl,ddr,vr,hd,vl,hu:1%10%85%0%3%
0%1 %0%0%
[libx264 @ 000000ac95f58440] i8c dc,h,v,p:100%0%0%0%
[libx264 @ 000000ac95f58440] ref P L0:46.8%25.2%28.0%
[libx264 @ 000000ac95f58440] kb / s:0.03

我知道可能有很多问题通过这个程序,我非常喜欢FFMPEG和多媒体编程。我通过搜索google / stack overflow找到了很多代码,以达到这一点。该文件具有很好的尺寸,但长度为0.04告诉我,我的时间戳必须在帧/ pkts之间被打破,但我不确定如何解决这个问题。



我尝试使用ffmpeg -i使用ffmpeg.exe检查文件,并输出到常规TS。似乎我的代码的工作原理比我原本打算的更多,我只是想输出一堆所有的白框。

  ffmpeg -i test.mpeg test.ts 
ffmpeg版本N-70125-g6c9537b版权所有(c)2000-2015 FFmpeg开发人员
内置gcc 4.9.2(GCC)
配置: disable-static --enable-shared --enable-gpl --enable-version3
--disable-w32threads --enable-avisynth --enable-bzlib --enable-fontconfig --ena
-frei0r --enable-gnutls --enable-iconv --enable-libass --enable-libbluray --e
nable-libbs2b --enable-libcaca --enable-libfreetype --enable-libgme --enable -lib
gsm --enable-libilbc --enable-libmodplug --enable-libmp3lame --enable-libopencor
e-amrnb --enable-libopencore-amrwb --enable-libopenjpeg --enable- libopus --enabl
e-librtmp --enable-libschroedinger --enable-libsoxr --enable-libspeex --enable-l
ibtheora --enable-libtwolame --enable-libvidstab --enable- libvo-aacenc --enable-
libvo-amrwben c --enable-libvorbis --enable-libvpx --enable-libwavpack --enable-l
ibwebp --enable-libx264 --enable-libx265 --enable-libxavs --enable-libxvid --ena
ble-lzma --enable-decklink --enable-zlib
libavutil 54. 19.100 / 54. 19.100
libavcodec 56. 26.100 / 56. 26.100
libavformat 56. 23.104 / 56 。23.104
libavdevice 56. 4.100 / 56. 4.100
libavfilter 5. 11.101 / 5. 11.101
libswscale 3. 1.101 / 3. 1.101
libswresample 1. 1.100 / 1. 1.100
libpostproc 53. 3.100 / 53. 3.100
输入#0,mpegts,from'test.mpeg':
持续时间:00:00:00.04,开始:0.000000,比特率:24026 kb / s
程序1
元数据:
service_name:Service01
service_provider:FFmpeg
流#0:0 [0x100]:视频:h264(约束基线) ] [0] [0] [0] / 0x00
1B),yuv420p,640x480,25 fps,25 tbr,90k tbn,50 tbc
文件'test.ts'已经存在。覆盖? [y / N] y
输出#0,mpegts到'test.ts':
元数据:
编码器:Lavf56.23.104
流#0:0:视频: mpeg2video,yuv420p,640x480,q = 2-31,200 kb / s,25 fps,
90k tbn,25 tbc
元数据:
编码器:Lavc56.26.100 mpeg2video
流映射:
流#0:0 - > #0:0(h264(native))> mpeg2video(native))
按[q]停止,[?]帮助
frame = 3 fps = 0.0 q = 2.0 Lsize = 9kB time = 00:00:00.08 bitrate = 883.6kbits /
s dup = 0 drop = 178
video:7kB audio:0kB subtitle:0kB其他流:0kB全局头文件:0kB muxing ove
rhead: 22.450111%


解决方案

avpicture_fill 不会做你认为的事情。它不使用来自ptr的数据填充图片,作为源,它使用ptr作为目的地填充图片。所以基本上,你在编码之前要清除图像。


Class prototype is as follows:

#ifndef _FULL_MOTION_VIDEO_STREAM_H_
#define _FULL_MOTION_VIDEO_STREAM_H_

#include <memory>
#include <string>

#ifndef INT64_C
# define INT64_C(c) (c ## LL)
# define UINT64_C(c) (c ## ULL)
#endif

extern "C"
{
    #include "libavutil/opt.h"
    #include "libavcodec/avcodec.h"
    #include "libavutil/channel_layout.h"
    #include "libavutil/common.h"
    #include "libavutil/imgutils.h"
    #include "libavutil/mathematics.h"
    #include "libavutil/samplefmt.h"
    #include "libavformat/avformat.h"

    #include <libavutil/timestamp.h>
    #include <libswscale/swscale.h>
    #include <libswresample/swresample.h>
}

class FMVStream
{
    public:
        struct OutputStream
        {
            OutputStream() :
            st(0),
            next_pts(0),
            samples_count(0),
            frame(0),
            tmpFrame(0),
            sws_ctx(0)
            {
            }

            AVStream *st;

            /* pts of the next frame that will be generated */
            int64_t next_pts;
            int samples_count;

            AVFrame *frame;
            AVFrame *tmpFrame;

            struct SwsContext *sws_ctx;
        };

        ///
        /// Constructor
        ///
        FMVStream();

        ///
        /// Destructor
        ///
        ~FMVStream();

        ///
        /// Frame encoder helper function
        ///
        /// Encodes a raw RGB frame into the transport stream
        ///
        int EncodeFrame(uint8_t* frame);

        ///
        /// Frame width setter
        ///
        void setFrameWidth(int width);

        ///
        /// Frame width getter
        ///
        int getFrameWidth() const;

        ///
        /// Frame height setter
        ///
        void setFrameHeight(int height);

        ///
        /// Frame height getter
        ///
        int getFrameHeight() const;

        ///
        /// Stream address setter
        ///
        void setStreamAddress(const std::string& address);

        ///
        /// Stream address getter
        ///
        std::string getStreamAddress() const;

    private:

        ///
        /// Video Stream creation
        ///
        AVStream* initVideoStream(AVFormatContext* oc);

        ///
        /// Raw frame transcoder
        ///
        /// This will convert the raw RGB frame to a raw YUV frame necessary for h.264 encoding
        ///
        void CopyFrameData(uint8_t* src_frame);

        ///
        /// Video frame allocator
        ///
        AVFrame* AllocPicture(PixelFormat pix_fmt, int width, int height);

        ///
        /// Debug print helper function
        ///
        void print_sdp(AVFormatContext **avc, int n);

        ///
        /// Write the frame to the stream
        ///
        int write_frame(AVFormatContext *fmt_ctx, const AVRational *time_base, AVStream *st, AVPacket *pkt);

        ///
        /// initialize the frame data
        ///
        void initFrame();

        // formatting data needed for output streaming and the output container (MPEG 2 TS)
        AVOutputFormat* format;
        AVFormatContext* format_ctx;

        // structure container for our video stream
        OutputStream stream;

        AVIOContext* io_ctx;

        std::string streamFilename;

        int frameWidth;
        int frameHeight;
};

#endif

This block starts the class declaration.

#include "FullMotionVideoStream.h"

#include <stdexcept>
#include <iostream>

FMVStream::FMVStream()
    : format(0),
    format_ctx(0),
    stream(),
    io_ctx(0),
    streamFilename("test.mpeg"),
    frameWidth(640),
    frameHeight(480)
{
    // Register all formats and codecs
    av_register_all();
    avcodec_register_all();

    // Init networking
    avformat_network_init();

    // Find format
    this->format = av_guess_format("mpegts", NULL, NULL);

    // allocate the AVFormatContext
    this->format_ctx = avformat_alloc_context();

    if (!this->format_ctx)
    {
        throw std::runtime_error("avformat_alloc_context failed");
    }

    this->format_ctx->oformat = this->format;
    //sprintf_s(this->format_ctx->filename, sizeof(this->format_ctx->filename), "%s", this->streamFilename.c_str());

    this->stream.st = initVideoStream(this->format_ctx);

    this->initFrame();

    // Allocate AVIOContext
    int ret = avio_open(&this->io_ctx, this->streamFilename.c_str(), AVIO_FLAG_WRITE);

    if (ret != 0)
    {
        throw std::runtime_error("avio_open failed");
    }

    this->format_ctx->pb = this->io_ctx;

    // Print some debug info about the format
    av_dump_format(this->format_ctx, 0, NULL, 1);

    // Begin the output by writing the container header
    avformat_write_header(this->format_ctx, NULL);

    AVFormatContext* ac[] = { this->format_ctx };
    print_sdp(ac, 1);
}

FMVStream::~FMVStream()
{
    av_write_trailer(this->format_ctx);
    avcodec_close(this->stream.st->codec);

    avio_close(io_ctx);

    avformat_free_context(this->format_ctx);

    av_frame_free(&this->stream.frame);
    av_free(this->format);
}

AVFrame* FMVStream::AllocPicture(PixelFormat pix_fmt, int width, int height)
{
    // Allocate a frame
    AVFrame* frame = av_frame_alloc();

    if (frame == nullptr)
    {
        throw std::runtime_error("avcodec_alloc_frame failed");
    }

    if (av_image_alloc(frame->data, frame->linesize, width, height, pix_fmt, 1) < 0)
    {
        throw std::runtime_error("av_image_alloc failed");
    }

    frame->width = width;
    frame->height = height;
    frame->format = pix_fmt;

    return frame;
}

void FMVStream::print_sdp(AVFormatContext **avc, int n)
{
    char sdp[2048];
    av_sdp_create(avc, n, sdp, sizeof(sdp));
    printf("SDP:\n%s\n", sdp);
    fflush(stdout);
}

AVStream* FMVStream::initVideoStream(AVFormatContext *oc)
{
    AVStream* st = avformat_new_stream(oc, NULL);

    if (st == nullptr)
    {
        std::runtime_error("Could not alloc stream");
    }

    AVCodec* codec = avcodec_find_encoder(AV_CODEC_ID_H264);

    if (codec == nullptr)
    {
        throw std::runtime_error("couldn't find mpeg2 encoder");
    }

    st->codec = avcodec_alloc_context3(codec);

    st->codec->codec_id = AV_CODEC_ID_H264;
    st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
    st->codec->bit_rate = 400000;

    st->codec->width = this->frameWidth;
    st->codec->height = this->frameHeight;

    st->time_base.num = 1;
    st->time_base.den = 30;

    st->codec->framerate.num = 1;
    st->codec->framerate.den = 30;

    st->codec->max_b_frames = 2;
    st->codec->gop_size = 12;
    st->codec->pix_fmt = PIX_FMT_YUV420P;

    st->id = oc->nb_streams - 1;

    if (oc->oformat->flags & AVFMT_GLOBALHEADER)
    {
        st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
    }

    // option setup for the codec
    av_opt_set(st->codec->priv_data, "profile", "baseline", AV_OPT_SEARCH_CHILDREN);

    if (avcodec_open2(st->codec, codec, NULL) < 0)
    {
        throw std::runtime_error("avcodec_open failed");
    }

    return st;
}

void FMVStream::initFrame()
{
    // Allocate a tmp frame for converting our raw RGB data to YUV for encoding
    this->stream.tmpFrame = this->AllocPicture(PIX_FMT_RGB24, this->frameWidth, this->frameHeight);

    // Allocate a main frame
    this->stream.frame = this->AllocPicture(PIX_FMT_YUV420P, this->frameWidth, this->frameHeight);
}

This block is attempting to convert from the raw RGB to our needed YUV format for h.264 encoding.

void FMVStream::CopyFrameData(uint8_t* data)
{
    // fill image with our raw RGB data
    //avpicture_alloc((AVPicture*)this->stream.tmpFrame, PIX_FMT_RGB24, this->stream.st->codec->width, this->stream.st->codec->height);

    int numBytes = avpicture_get_size(PIX_FMT_RGB24, this->stream.st->codec->width, this->stream.st->codec->height);

    uint8_t* buffer = (uint8_t*) av_malloc(numBytes * sizeof(uint8_t));

    avpicture_fill((AVPicture*)this->stream.tmpFrame, buffer, PIX_FMT_RGB24, this->stream.st->codec->width, this->stream.st->codec->height);

    for (int y = 0; y < this->stream.st->codec->height; y++)
    {
        for (int x = 0; x < this->stream.st->codec->width; x++)
        {
            int offset = 3 * (x + y * this->stream.st->codec->width);
            this->stream.tmpFrame->data[0][offset + 0] = data[x + y * this->stream.st->codec->width]; // R
            this->stream.tmpFrame->data[0][offset + 1] = data[x + y * this->stream.st->codec->width + 1]; // G
            this->stream.tmpFrame->data[0][offset + 2] = data[x + y * this->stream.st->codec->width + 2]; // B
        }
    }

    // convert the RGB frame to a YUV frame using the sws Context
    this->stream.sws_ctx = sws_getContext(this->stream.st->codec->width, this->stream.st->codec->height, PIX_FMT_RGB32, this->stream.st->codec->width, this->stream.st->codec->height, PIX_FMT_YUV420P, SWS_FAST_BILINEAR, NULL, NULL, NULL);

    // use the scale function to transcode this raw frame to the correct type
    sws_scale(this->stream.sws_ctx, this->stream.tmpFrame->data, this->stream.tmpFrame->linesize, 0, this->stream.st->codec->height, this->stream.frame->data, this->stream.frame->linesize);
}

This is the block that encodes the raw data to h.264, and then send it out the Mpeg2 ts. I believe the problem lies within this block. I can put a break point in my write frame block and see that frames are being written, however, opening the resulting file in VLC results in a blank video. The file is approx 2Mb.

int FMVStream::EncodeFrame(uint8_t* data)
{
    AVCodecContext* c = this->stream.st->codec;

    AVRational one;
    one.den = one.num = 1;

    // check to see if we want to keep writing frames we can probably change this to a toggle switch
    if (av_compare_ts(this->stream.next_pts, this->stream.st->codec->time_base, 10, one) >= 0)
    {
        this->stream.frame = nullptr;
    }
    else
    {
        // Convert and load the frame data into the AVFrame struct
        CopyFrameData(data);
    }

    // setup the timestamp stepping
    AVPacket pkt = { 0 };
    av_init_packet(&pkt);
    this->stream.frame->pts = (int64_t)((1.0 / this->stream.st->codec->framerate.den) * 90000.0 * this->stream.next_pts++);

    int gotPacket, out_size, ret;

    out_size = avcodec_encode_video2(c, &pkt, this->stream.frame, &gotPacket);


    if (gotPacket == 1)
    {
        ret = write_frame(this->format_ctx, &c->time_base, this->stream.st, &pkt);
    }
    else
    {
        ret = 0;
    }

    if (ret < 0)
    {
        std::cerr << "Error writing video frame" << std::endl;
    }

    av_free_packet(&pkt);

    return ((this->stream.frame != nullptr) || gotPacket) ? 0 : 1;
}

int FMVStream::write_frame(AVFormatContext *fmt_ctx, const AVRational *time_base, AVStream *st, AVPacket *pkt)
{
    /* rescale output packet timestamp values from codec to stream timebase */
    av_packet_rescale_ts(pkt, *time_base, st->time_base);
    pkt->stream_index = st->index;

    return av_interleaved_write_frame(fmt_ctx, pkt);
}

void FMVStream::setFrameWidth(const int width)
{
    this->frameWidth = width;
}

int FMVStream::getFrameWidth() const
{
    return this->frameWidth;
}

void FMVStream::setFrameHeight(const int height)
{
    this->frameHeight = height;
}

int FMVStream::getFrameHeight() const
{
    return this->frameHeight;
}

void FMVStream::setStreamAddress(const std::string& address)
{
    this->streamFilename = address;
}

std::string FMVStream::getStreamAddress() const
{
    return this->streamFilename;
}

Here is the Main function.

#include "FullMotionVideoStream.h"

#include <iostream>
#include <thread>
#include <chrono>

int main(int argc, char** argv)
{
    FMVStream* fmv = new FMVStream;

    fmv->setFrameWidth(640);
    fmv->setFrameHeight(480);

    std::cout << "Streaming Address: " << fmv->getStreamAddress() << std::endl;

    // create our alternating frame of black and white to test the streaming functionality
    uint8_t white[640 * 480 * sizeof(uint8_t) * 3];
    uint8_t black[640 * 480 * sizeof(uint8_t) * 3];

    std::memset(white, 255, 640 * 480 * sizeof(uint8_t) * 3);
    std::memset(black, 0, 640 * 480 * sizeof(uint8_t)* 3);

    for (auto i = 0; i < 100; i++)
    {
        auto ret = fmv->EncodeFrame(white);

        if (ret != 0)
        {
            std::cerr << "There was a problem encoding the frame: " << i << std::endl;
        }

        std::this_thread::sleep_for(std::chrono::milliseconds(10));
    }

    for (auto i = 0; i < 100; i++)
    {
        auto ret = fmv->EncodeFrame(black);

        if (ret != 0)
        {
            std::cerr << "There was a problem encoding the frame: " << i << std::endl;
        }

        std::this_thread::sleep_for(std::chrono::milliseconds(10));
    }

    delete fmv;
}

Here is the resultant output via the console / my print SDP function.

[libx264 @ 000000ac95f58440] using cpu capabilities: MMX2 SSE2Fast SSSE3 SSE4.2
AVX FMA3 AVX2 LZCNT BMI2
[libx264 @ 000000ac95f58440] profile Constrained Baseline, level 3.0
Output #0, mpegts, to '(null)':
    Stream #0:0: Video: h264 (libx264), yuv420p, 640x480, q=-1--1, 400 kb/s, 30
tbn
SDP:
v=0
o=- 0 0 IN IP4 127.0.0.1
s=No Name
t=0 0
a=tool:libavformat 56.23.104
m=video 0 RTP/AVP 96
b=AS:400
a=rtpmap:96 H264/90000
a=fmtp:96 packetization-mode=1
a=control:streamid=0

Streaming Address: test.mpeg
[libx264 @ 000000ac95f58440] frame I:45    Avg QP: 0.51  size:  1315
[libx264 @ 000000ac95f58440] frame P:136   Avg QP: 0.29  size:   182
[libx264 @ 000000ac95f58440] mb I  I16..4: 99.7%  0.0%  0.3%
[libx264 @ 000000ac95f58440] mb P  I16..4:  0.1%  0.0%  0.1%  P16..4:  0.1%  0.0
%  0.0%  0.0%  0.0%    skip:99.7%
[libx264 @ 000000ac95f58440] final ratefactor: -68.99
[libx264 @ 000000ac95f58440] coded y,uvDC,uvAC intra: 0.5% 0.5% 0.5% inter: 0.0%
 0.1% 0.1%
[libx264 @ 000000ac95f58440] i16 v,h,dc,p: 96%  0%  3%  0%
[libx264 @ 000000ac95f58440] i4 v,h,dc,ddl,ddr,vr,hd,vl,hu:  1% 10% 85%  0%  3%
 0%  1%  0%  0%
[libx264 @ 000000ac95f58440] i8c dc,h,v,p: 100%  0%  0%  0%
[libx264 @ 000000ac95f58440] ref P L0: 46.8% 25.2% 28.0%
[libx264 @ 000000ac95f58440] kb/s:0.03

I know there are probably many issues with this program, I am very new with FFMPEG and multimedia programming in general. Ive used many pieces of code found through searching google/ stack overflow to get to this point as is. The file has a good size but comes up as length 0.04 tells me that my time stamping must be broken between the frames / pkts, but I am unsure on how to fix this issue.

I tried inspecting the file with ffmpeg.exe using ffmpeg -i and outputting to a regular TS. It seems my code works more then I originally intended however, I am simply trying to output a bunch of all white frames.

ffmpeg -i test.mpeg test.ts
ffmpeg version N-70125-g6c9537b Copyright (c) 2000-2015 the FFmpeg developers
  built with gcc 4.9.2 (GCC)
  configuration: --disable-static --enable-shared --enable-gpl --enable-version3
 --disable-w32threads --enable-avisynth --enable-bzlib --enable-fontconfig --ena
ble-frei0r --enable-gnutls --enable-iconv --enable-libass --enable-libbluray --e
nable-libbs2b --enable-libcaca --enable-libfreetype --enable-libgme --enable-lib
gsm --enable-libilbc --enable-libmodplug --enable-libmp3lame --enable-libopencor
e-amrnb --enable-libopencore-amrwb --enable-libopenjpeg --enable-libopus --enabl
e-librtmp --enable-libschroedinger --enable-libsoxr --enable-libspeex --enable-l
ibtheora --enable-libtwolame --enable-libvidstab --enable-libvo-aacenc --enable-
libvo-amrwbenc --enable-libvorbis --enable-libvpx --enable-libwavpack --enable-l
ibwebp --enable-libx264 --enable-libx265 --enable-libxavs --enable-libxvid --ena
ble-lzma --enable-decklink --enable-zlib
  libavutil      54. 19.100 / 54. 19.100
  libavcodec     56. 26.100 / 56. 26.100
  libavformat    56. 23.104 / 56. 23.104
  libavdevice    56.  4.100 / 56.  4.100
  libavfilter     5. 11.101 /  5. 11.101
  libswscale      3.  1.101 /  3.  1.101
  libswresample   1.  1.100 /  1.  1.100
  libpostproc    53.  3.100 / 53.  3.100
Input #0, mpegts, from 'test.mpeg':
  Duration: 00:00:00.04, start: 0.000000, bitrate: 24026 kb/s
  Program 1
    Metadata:
      service_name    : Service01
      service_provider: FFmpeg
    Stream #0:0[0x100]: Video: h264 (Constrained Baseline) ([27][0][0][0] / 0x00
1B), yuv420p, 640x480, 25 fps, 25 tbr, 90k tbn, 50 tbc
File 'test.ts' already exists. Overwrite ? [y/N] y
Output #0, mpegts, to 'test.ts':
  Metadata:
    encoder         : Lavf56.23.104
    Stream #0:0: Video: mpeg2video, yuv420p, 640x480, q=2-31, 200 kb/s, 25 fps,
90k tbn, 25 tbc
    Metadata:
      encoder         : Lavc56.26.100 mpeg2video
Stream mapping:
  Stream #0:0 -> #0:0 (h264 (native) -> mpeg2video (native))
Press [q] to stop, [?] for help
frame=    3 fps=0.0 q=2.0 Lsize=       9kB time=00:00:00.08 bitrate= 883.6kbits/
s dup=0 drop=178
video:7kB audio:0kB subtitle:0kB other streams:0kB global headers:0kB muxing ove
rhead: 22.450111%
解决方案

avpicture_fill does not do what you think it does. It does not fill the picture using data from ptr, as the source, It fills the picture using ptr as the destination. So basically, you are clearing the image before you encode it.

这篇关于FFMPEG C api h.264编码/ MPEG2 ts流媒体问题的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持!

1403页,肝出来的..

09-07 01:16