// media.cpp : 定义控制台应用程序的入口点。
// https://wenku.baidu.com/view/e910c474c5da50e2524d7fb4.html https://blog.csdn.net/leixiaohua1020/article/details/10528443 最主要这个\examples\demuxing_decoding.c #include "stdafx.h" /*
int _tmain(int argc, _TCHAR* argv[])
{
return 0;
}
*/
#include "stdafx.h"
extern "C"
{
#include <stdio.h> #include "libavformat/avformat.h"
#include "libavutil/dict.h" #include <libavcodec/avcodec.h>
#include <libavformat/avformat.h> #include <libswscale/swscale.h>
}
#pragma comment(lib,"lib/avcodec.lib")
#pragma comment(lib,"lib/avformat.lib")
#pragma comment(lib,"lib/avutil")
//#pragma comment(lib,"lib/SDLmain.lib")
#pragma comment(lib,"lib/avdevice.lib")
#pragma comment(lib,"lib/avfilter.lib")
#pragma comment(lib,"lib/postproc.lib") #pragma comment(lib,"lib/swresample.lib")
#pragma comment(lib,"lib/swscale.lib") #pragma comment(lib, "sdl2.lib")
#include <iostream> #include <SDL2/SDL.h>
#include <SDL2/SDL_thread.h> #ifdef __MINGW32__
#undef main
#endif #include <stdio.h> int
randomInt(int min, int max)
{
return min + rand() % (max - min + );
}
#define MAX_AUDIO_FRAME_SIZE 192000
SDL_AudioSpec wanted_spec, spec; int audio_decode_frame(AVCodecContext *aCodecCtx, uint8_t *audio_buf, int buf_size) {
static AVPacket pkt;
static uint8_t *audio_pkt_data = NULL;
static int audio_pkt_size = ;
static AVFrame frame; int len1, data_size = ; for (;;) {
while(audio_pkt_size > ) {
int got_frame = ;
len1 = avcodec_decode_audio4(aCodecCtx, &frame, &got_frame, &pkt);
if (len1 < ) {
// if error, skip frame.
audio_pkt_size = ;
break;
}
audio_pkt_data += len1;
audio_pkt_size -= len1;
if (got_frame) {
data_size = av_samples_get_buffer_size(NULL, aCodecCtx->channels, frame.nb_samples, aCodecCtx->sample_fmt, );
memcpy(audio_buf, frame.data[], data_size);
}
if (data_size <= ) {
// No data yet, get more frames.
continue;
}
// We have data, return it and come back for more later.
return data_size;
}
if (pkt.data) {
av_packet_unref(&pkt);
} //if (packet_queue_get(&audioq, &pkt, 1) < 0)
{
return -;
}
audio_pkt_data = pkt.data;
audio_pkt_size = pkt.size;
}
} void audio_callback(void *userdata, Uint8 *stream, int len) {
AVCodecContext *aCodecCtx = (AVCodecContext *)userdata;
int len1, audio_size; static uint8_t audio_buf[(MAX_AUDIO_FRAME_SIZE * ) / ];
static unsigned int audio_buf_size = ;
static unsigned int audio_buf_index = ; while (len > ) {
if (audio_buf_index >= audio_buf_size) {
// We have already sent all our data; get more.
audio_size = audio_decode_frame(aCodecCtx, audio_buf, audio_buf_size);
if (audio_size < ) {
// If error, output silence.
audio_buf_size = ; // arbitrary?
memset(audio_buf, , audio_buf_size);
} else {
audio_buf_size = audio_size;
}
audio_buf_index = ;
}
len1 = audio_buf_size - audio_buf_index;
if (len1 > len) {
len1 = len;
}
memcpy(stream, (uint8_t *) audio_buf + audio_buf_index, len1);
len -= len1;
stream += len1;
audio_buf_index += len1;
}
} int _tmain(int argc, char *argv[]) {
AVFormatContext *pFormatCtx = NULL;
int i, videoStream,audioStream;
AVCodecContext *pCodecCtx = NULL,*pCodecAudioCtx = NULL;
AVCodec *pCodec = NULL,*pCodecAudio = NULL;
AVFrame *pFrame = NULL;
AVPacket packet;
int frameFinished;
//float aspect_ratio; AVDictionary *optionsDict = NULL ,*audioOptionsDict=NULL;
struct SwsContext *sws_ctx = NULL;
//SDL_CreateTexture();
SDL_Texture *bmp = NULL;
SDL_Window *screen = NULL;
SDL_Rect rect;
SDL_Event event;
/*
if(argc < 2) {
fprintf(stderr, "Usage: test <file>\n");
exit(1);
}*/
// Register all formats and codecs
av_register_all(); if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
exit();
} // Open video file
if(avformat_open_input(&pFormatCtx,"D://shitu.mkv"/* argv[1]"D://3s.mp4"*/, NULL, NULL)!=)
return -; // Couldn't open file if(avformat_find_stream_info(pFormatCtx, NULL)<)
return -; // Couldn't find stream information // Dump information about file onto standard error
av_dump_format(pFormatCtx, , argv[], ); // Find the first video stream
videoStream=-;
for(i=; i<pFormatCtx->nb_streams; i++)
if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
videoStream=i;
break;
}
else if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO) {
audioStream=i;
break;
}
if(videoStream==-||audioStream==-)
return -; // Didn't find a video stream // Get a pointer to the codec context for the video stream
pCodecCtx=pFormatCtx->streams[videoStream]->codec;
pCodecAudioCtx=pFormatCtx->streams[audioStream]->codec;
// Find the decoder for the video stream
pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
if(pCodec==NULL) {
fprintf(stderr, "Unsupported codec!\n");
return -; // Codec not found
} pCodecAudio=avcodec_find_decoder(pCodecAudioCtx->codec_id);
if(pCodecAudio==NULL) {
fprintf(stderr, "Unsupported audio codec!\n");
return -; // Codec not found
} // Open codec
if(avcodec_open2(pCodecCtx, pCodec, &optionsDict)<)
return -; // Could not open codec //--
if (SDL_OpenAudio(&wanted_spec, &spec) < ) {
fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
return -; } avcodec_open2(pCodecAudioCtx, pCodecAudio, &audioOptionsDict);
//-- pFrame=av_frame_alloc();
AVFrame* pFrameYUV =av_frame_alloc(); if( pFrameYUV == NULL )
return -;
//--
AVFrame* pAudioFrame =av_frame_alloc();
//--
screen = SDL_CreateWindow("My Game Window",SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED,
pCodecCtx->width, pCodecCtx->height,
SDL_WINDOW_FULLSCREEN | SDL_WINDOW_OPENGL);
SDL_Renderer *renderer = SDL_CreateRenderer(screen, -, ); if(!screen) {
fprintf(stderr, "SDL: could not set video mode - exiting\n");
exit();
} bmp = SDL_CreateTexture(renderer,SDL_PIXELFORMAT_YV12,SDL_TEXTUREACCESS_STREAMING,pCodecCtx->width,pCodecCtx->height); sws_ctx = sws_getContext(pCodecCtx->width,pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width,pCodecCtx->height,
AV_PIX_FMT_YUV420P,SWS_BILINEAR,NULL,NULL, NULL); int numBytes = avpicture_get_size(AV_PIX_FMT_YUV420P, pCodecCtx->width,
pCodecCtx->height);
uint8_t* buffer = (uint8_t *)av_malloc(numBytes*sizeof(uint8_t)); avpicture_fill((AVPicture *)pFrameYUV, buffer, AV_PIX_FMT_YUV420P,
pCodecCtx->width, pCodecCtx->height); i=; rect.x = ;
rect.y = ;
rect.w = pCodecCtx->width;
rect.h = pCodecCtx->height; while(av_read_frame(pFormatCtx, &packet)>=) {
// Is this a packet from the video stream?
if(packet.stream_index==videoStream) {
// Decode video frame
avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished,
&packet); // Did we get a video frame?
if(frameFinished) {
sws_scale(sws_ctx,(uint8_t const * const *)pFrame->data, pFrame->linesize, ,
pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize);
////iPitch 计算yuv一行数据占的字节数
SDL_UpdateTexture( bmp, &rect, pFrameYUV->data[], pFrameYUV->linesize[] );
SDL_RenderClear( renderer );
SDL_RenderCopy( renderer, bmp, &rect, &rect );
SDL_RenderPresent( renderer );
}
SDL_Delay();
//Sleep(500);
}else if (packet.stream_index == audioStream) {
} // Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
SDL_PollEvent(&event);
switch(event.type) {
case SDL_QUIT:
SDL_Quit();
exit();
break;
default:
break;
} } SDL_DestroyTexture(bmp); // Free the YUV frame
av_free(pFrame);
av_free(pFrameYUV);
// Close the codec
avcodec_close(pCodecCtx); // Close the video file
avformat_close_input(&pFormatCtx); return ;
}

https://www.cnblogs.com/lgh1992314/p/5834639.html

这个_-to thi tha

回看tha视频,不要杂念,杂容易慢,一段只一件

05-02 10:14