我正在进行视频解码,以获取一些后期视频处理的帧。我正在使用opencv Cuda,所以我真的需要opencv cuda的视频阅读器。但是,我意识到:
在NVIDIA Video Codec SDK 8.2.15中,它表示
但是要在OpenCV Cuda中使用“createVideoReader”,我们需要链接包含以下内容的“dynlink_nvcuvid.h”
tcuvidCreateVideoSource *cuvidCreateVideoSource;
tcuvidCreateVideoSourceW *cuvidCreateVideoSourceW;
tcuvidDestroyVideoSource *cuvidDestroyVideoSource;
tcuvidSetVideoSourceState *cuvidSetVideoSourceState;
tcuvidGetVideoSourceState *cuvidGetVideoSourceState;
tcuvidGetSourceVideoFormat *cuvidGetSourceVideoFormat;
tcuvidGetSourceAudioFormat *cuvidGetSourceAudioFormat;
opencv是否有任何视频解码更新?我们如何在OpenCV中使用FFmpeg替换“dynlink_nvcuvid.h”?我正在尝试使用NVIDIA视频解码,但是我仍然坚持将视频帧转换为GpuMat。
我在用
Ubuntu 18
CUDA 9.2
OpenCV 3.4.2
对于Cuda部分,为了解码,我使用了以下代码:
FFmpegDemuxer demuxer(szInFilePath);
NvDecoder dec(cuContext, demuxer.GetWidth(), demuxer.GetHeight(), true, FFmpeg2NvCodecId(demuxer.GetVideoCodec()));
FramePresenterGL presenter(cuContext, demuxer.GetWidth(), demuxer.GetHeight());
uint8_t *dpFrame = 0;
int nPitch = 0;
int nVideoBytes = 0, nFrameReturned = 0, nFrame = 0;
uint8_t *pVideo = NULL;
uint8_t **ppFrame;
do {
demuxer.Demux(&pVideo, &nVideoBytes);
dec.Decode(pVideo, nVideoBytes, &ppFrame, &nFrameReturned);
if (!nFrame && nFrameReturned)
LOG(INFO) << dec.GetVideoInfo();
for (int i = 0; i < nFrameReturned; i++) {
presenter.GetDeviceFrameBuffer(&dpFrame, &nPitch);
if (dec.GetBitDepth() == 8)
Nv12ToBgra32((uint8_t *)ppFrame[i], dec.GetWidth(), (uint8_t *)dpFrame, nPitch, dec.GetWidth(), dec.GetHeight());
else
P016ToBgra32((uint8_t *)ppFrame[i], 2 * dec.GetWidth(), (uint8_t *)dpFrame, nPitch, dec.GetWidth(), dec.GetHeight());
cv::Size sz(dec.GetWidth(),dec.GetHeight());
cv::Mat mat1(sz,CV_8UC3, ppFrame);
cv::imshow ("test", mat1);
cv::waitKey(0);
}
nFrame += nFrameReturned;
} while (nVideoBytes);
任何人都有将opencv cuda与实际的视频编解码器api一起使用的经验吗?如何将视频帧放入GpuMat?
更新:
我尝试转换uint8_t,但图像显示错误,如下所示:
我使用的代码已在上面更新。我应该更改什么才能正确显示图像?我尝试了https://stackoverflow.com/a/51520728/7939409,但对于下面的代码,downloadedLeft为空。
cv::Mat downloadedLeft;
cv::cuda::GpuMat gpuLeft;
cudaMalloc((void **)&ppFrame, gpuLeft.rows*gpuLeft.step);
cudaMemcpyAsync(ppFrame, gpuLeft.ptr<uint8_t>(), gpuLeft.rows*gpuLeft.step, cudaMemcpyDeviceToDevice);
gpuLeft.download(downloadedLeft);
cv::imshow ("test", downloadedLeft);
cv::waitKey(1);
最佳答案
我找到了一种无需任何第三方即可读取图像的简单c方法。
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
int read_image(FILE *rfile, void *buf, int width, int height, int stride, int elem_size)
{
char *byte_ptr = buf;
int i;
int ret = 1;
if (width <= 0 || height <= 0 || elem_size <= 0)
{
goto fail_or_end;
}
for (i = 0; i < height; ++i)
{
if (fread(byte_ptr, elem_size, width, rfile) != (size_t)width)
{
goto fail_or_end;
}
byte_ptr += stride;
}
ret = 0;
fail_or_end:
return ret;
}
归功于netflix vmaf开源(file_io.c)。谢谢netflix。
读取图像后,我设法继续使用cuda对其进行处理。