本文介绍了使用live555进行JPEG流的处理方法,对大家解决问题具有一定的参考价值,需要的朋友们下面随着小编来一起学习吧!

问题描述

我想通过实时555流式传输JPEG图像或动态JPEG文件.但是问题是在实时555中无法实现Jpegs.任何人都可以帮助??

I want to stream JPEG images or motion-JPEG file through live 555. But the problem is that in live 555 implementation for Jpegs is not available. Anyone can help ??

推荐答案

您可以找到发布到devel邮件列表中的实现 http://lists.live555.com/pipermail/live-devel/2012-February/014672.html .该代码和示例可用,但此修改被live555维护人员拒绝.

You can find a implementation that was posted to the devel mailing list http://lists.live555.com/pipermail/live-devel/2012-February/014672.html.The code and a sample is available but this modification was rejected by live555 maintainer.

首先,我们需要实现一个MJPEGVideoSource,以使其能够满足JPEGVideoRTPSink的需求.

First we need to implement an MJPEGVideoSource than can feed a JPEGVideoRTPSink.

#include "JPEGVideoSource.hh"

class MJPEGVideoSource : public JPEGVideoSource
{
        public:
                static MJPEGVideoSource* createNew (UsageEnvironment& env, FramedSource* source)
                {
                        return new MJPEGVideoSource(env,source);
                }
                virtual void doGetNextFrame()
                {
                    if (m_inputSource)
                        m_inputSource->getNextFrame(fTo, fMaxSize, afterGettingFrameSub, this, FramedSource::handleClosure, this);
                }
                virtual void doStopGettingFrames()
                {
                    FramedSource::doStopGettingFrames();
                    if (m_inputSource)
                        m_inputSource->stopGettingFrames();
                }
                static void afterGettingFrameSub(void* clientData, unsigned frameSize,unsigned numTruncatedBytes,struct timeval presentationTime,unsigned durationInMicroseconds)
                {
                                MJPEGVideoSource* source = (MJPEGVideoSource*)clientData;
                                source->afterGettingFrame(frameSize, numTruncatedBytes, presentationTime, durationInMicroseconds);
                }
                void afterGettingFrame(unsigned frameSize,unsigned numTruncatedBytes,struct timeval presentationTime,unsigned durationInMicroseconds)
                {
                    int headerSize = 0;
                    bool headerOk = false;
                    fFrameSize = 0;

                    for (unsigned int i = 0; i < frameSize ; ++i)
                    {
                        // SOF
                        if ( (i+8) < frameSize  && fTo[i] == 0xFF && fTo[i+1] == 0xC0 )
                        {
                             m_height = (fTo[i+5]<<5)|(fTo[i+6]>>3);
                             m_width = (fTo[i+7]<<5)|(fTo[i+8]>>3);
                        }
                        // DQT
                        if ( (i+5+64) < frameSize && fTo[i] == 0xFF && fTo[i+1] == 0xDB)
                        {
                            if (fTo[i+4] ==0)
                            {
                                memcpy(m_qTable, fTo + i + 5, 64);
                                m_qTable0Init = true;
                            }
                            else if (fTo[i+4] ==1)
                            {
                                memcpy(m_qTable + 64, fTo + i + 5, 64);
                                m_qTable1Init = true;
                            }
                        }
                        // End of header
                        if ( (i+1) < frameSize && fTo[i] == 0x3F && fTo[i+1] == 0x00 )
                        {
                             headerOk = true;
                             headerSize = i+2;
                             break;
                        }
                    }

                    if (headerOk)
                    {
                        fFrameSize = frameSize - headerSize;
                        memmove( fTo, fTo + headerSize, fFrameSize );
                    }

                    fNumTruncatedBytes = numTruncatedBytes;
                    fPresentationTime = presentationTime;
                    fDurationInMicroseconds = durationInMicroseconds;
                    afterGetting(this);
                }
                virtual u_int8_t type() { return 1; };
                virtual u_int8_t qFactor() { return 128; };
                virtual u_int8_t width() { return m_width; };
                virtual u_int8_t height() { return m_height; };
                u_int8_t const* quantizationTables( u_int8_t& precision, u_int16_t& length )
                {
                    length = 0;
                    precision = 0;
                    if ( m_qTable0Init && m_qTable1Init )
                    {
                        precision = 8;
                        length = sizeof(m_qTable);
                    }
                    return m_qTable;
                }

        protected:
                MJPEGVideoSource(UsageEnvironment& env, FramedSource* source) : JPEGVideoSource(env),
                m_inputSource(source),
                m_width(0),
                m_height(0),
                m_qTable0Init(false),
                m_qTable1Init(false)
                {
                    memset(&m_qTable,0,sizeof(m_qTable));
                }
                virtual ~MJPEGVideoSource()
                {
                    Medium::close(m_inputSource);
                }

        protected:
                FramedSource* m_inputSource;
                u_int8_t      m_width;
                u_int8_t      m_height;
                u_int8_t      m_qTable[128];
                bool          m_qTable0Init;
                bool          m_qTable1Init;
};

接下来,我们可以将其用作视频源,以构建简单的RTSP服务器:

Next we can use it as a video source in order to build a simple RTSP server:

#include "liveMedia.hh"
#include "BasicUsageEnvironment.hh"
#include "GroupsockHelper.hh"
#include "MJPEGVideoSource.hh"

char const* inputFileName = "test.mjpeg";

int main(int argc, char** argv) {
  // Begin by setting up our usage environment:
  TaskScheduler* scheduler = BasicTaskScheduler::createNew();
  UsageEnvironment* env = BasicUsageEnvironment::createNew(*scheduler);

  // Create 'groupsocks' for RTP and RTCP:
  struct in_addr destinationAddress;
  destinationAddress.s_addr = chooseRandomIPv4SSMAddress(*env);

  const unsigned short rtpPortNum = 18888;
  const unsigned short rtcpPortNum = rtpPortNum+1;
  const unsigned char ttl = 255;

  const Port rtpPort(rtpPortNum);
  const Port rtcpPort(rtcpPortNum);

  Groupsock rtpGroupsock(*env, destinationAddress, rtpPort, ttl);
  rtpGroupsock.multicastSendOnly(); // we're a SSM source
  Groupsock rtcpGroupsock(*env, destinationAddress, rtcpPort, ttl);
  rtcpGroupsock.multicastSendOnly(); // we're a SSM source

  // Create a 'JPEG Video RTP' sink from the RTP 'groupsock':
  RTPSink* videoSink = JPEGVideoRTPSink::createNew(*env, &rtpGroupsock);

  // Create (and start) a 'RTCP instance' for this RTP sink:
  const unsigned estimatedSessionBandwidth = 5000; // in kbps; for RTCP b/w share
  const unsigned maxCNAMElen = 100;
  unsigned char CNAME[maxCNAMElen+1];
  gethostname((char*)CNAME, maxCNAMElen);
  CNAME[maxCNAMElen] = '\0'; // just in case
  RTCPInstance* rtcp = RTCPInstance::createNew(*env, &rtcpGroupsock,
                estimatedSessionBandwidth, CNAME,
                videoSink, NULL /* we're a server */,
                True /* we're a SSM source */);
  // Note: This starts RTCP running automatically

  RTSPServer* rtspServer = RTSPServer::createNew(*env, 8554);
  if (rtspServer == NULL) {
    *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n";
    exit(1);
  }
  ServerMediaSession* sms = ServerMediaSession::createNew(*env, "testStream", inputFileName,"Session streamed by \"testMJPEGVideoStreamer\"",
                       True /*SSM*/);
  sms->addSubsession(PassiveServerMediaSubsession::createNew(*videoSink, rtcp));
  rtspServer->addServerMediaSession(sms);

  char* url = rtspServer->rtspURL(sms);
  *env << "Play this stream using the URL \"" << url << "\"\n";
  delete[] url;

  // Start the streaming:
  *env << "Beginning streaming...\n";
  // Open the input file as a 'byte-stream file source':
  ByteStreamFileSource* fileSource = ByteStreamFileSource::createNew(*env, inputFileName);
  if (fileSource == NULL) {
    *env << "Unable to open file \"" << inputFileName
     << "\" as a byte-stream file source\n";
    exit(1);
  }

  // Create the MJPEG video source:
  MJPEGVideoSource* videoSource = MJPEGVideoSource::createNew(*env, fileSource);

  // Finally, start playing:
  *env << "Beginning to read from file...\n";
  videoSink->startPlaying(*videoSource, NULL, NULL);

  env->taskScheduler().doEventLoop();

  return 0;
}

这篇关于使用live555进行JPEG流的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持!

07-13 21:03