"Added documentation to Connector_RTSP/main.cpp for #8"
This commit is contained in:
parent
53588811a7
commit
ff15e6c1a6
1 changed files with 68 additions and 13 deletions
|
@ -23,7 +23,9 @@
|
||||||
#include "rtp.h"
|
#include "rtp.h"
|
||||||
|
|
||||||
/// Reads a single NALU from std::cin. Expected is H.264 Bytestream format.
|
/// Reads a single NALU from std::cin. Expected is H.264 Bytestream format.
|
||||||
|
/// Function was used as a way of debugging data. FLV does not contain all the metadata we need, so we had to try different approaches.
|
||||||
/// \return The Nalu data.
|
/// \return The Nalu data.
|
||||||
|
/// \todo Throw this function away when everything works, it is not needed.
|
||||||
std::string ReadNALU( ) {
|
std::string ReadNALU( ) {
|
||||||
static char Separator[3] = { (char)0x00, (char)0x00, (char)0x01 };
|
static char Separator[3] = { (char)0x00, (char)0x00, (char)0x01 };
|
||||||
std::string Buffer;
|
std::string Buffer;
|
||||||
|
@ -38,62 +40,95 @@ std::string ReadNALU( ) {
|
||||||
return Result;
|
return Result;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The main function of the connector
|
/// The main function of the connector.
|
||||||
/// \param conn A connection with the client
|
/// Used by server_setup.cpp in the bottom of the file, to start up the Connector.
|
||||||
|
/// This function contains the while loop the accepts connections, and sends them data.
|
||||||
|
/// \param conn A connection with the client.
|
||||||
int RTSP_Handler( Socket::Connection conn ) {
|
int RTSP_Handler( Socket::Connection conn ) {
|
||||||
FLV::Tag tag;///< Temporary tag buffer for incoming video data.
|
/// \todo Convert this to DTSC::DTMI, with an additional DTSC::Stream/
|
||||||
|
FLV::Tag tag;// Temporary tag buffer for incoming video data.
|
||||||
bool PlayVideo = false;
|
bool PlayVideo = false;
|
||||||
bool PlayAudio = true;
|
bool PlayAudio = true;
|
||||||
|
//JRTPlib Objects to handle the RTP connection, which runs "parallel" to RTSP.
|
||||||
jrtplib::RTPSession VideoSession;
|
jrtplib::RTPSession VideoSession;
|
||||||
jrtplib::RTPSessionParams VideoParams;
|
jrtplib::RTPSessionParams VideoParams;
|
||||||
jrtplib::RTPUDPv6TransmissionParams VideoTransParams;
|
jrtplib::RTPUDPv6TransmissionParams VideoTransParams;
|
||||||
std::string PreviousRequest = "";
|
std::string PreviousRequest = "";
|
||||||
Socket::Connection ss(-1);
|
Socket::Connection ss(-1);
|
||||||
HTTP::Parser HTTP_R, HTTP_S;
|
HTTP::Parser HTTP_R, HTTP_S;
|
||||||
|
//Some clients appear to expect a single request per connection. Don't know which ones.
|
||||||
bool PerRequest = false;
|
bool PerRequest = false;
|
||||||
|
//The main loop of the function
|
||||||
while(conn.connected() && !FLV::Parse_Error) {
|
while(conn.connected() && !FLV::Parse_Error) {
|
||||||
if( HTTP_R.Read(conn ) ) {
|
if( HTTP_R.Read(conn ) ) {
|
||||||
|
//send Debug info to stderr.
|
||||||
|
//send the appropriate responses on RTSP Commands.
|
||||||
fprintf( stderr, "REQUEST:\n%s\n", HTTP_R.BuildRequest().c_str() );
|
fprintf( stderr, "REQUEST:\n%s\n", HTTP_R.BuildRequest().c_str() );
|
||||||
HTTP_S.protocol = "RTSP/1.0";
|
HTTP_S.protocol = "RTSP/1.0";
|
||||||
if( HTTP_R.method == "OPTIONS" ) {
|
if( HTTP_R.method == "OPTIONS" ) {
|
||||||
|
//Always return the requested CSeq value.
|
||||||
HTTP_S.SetHeader( "CSeq", HTTP_R.GetHeader( "CSeq" ).c_str() );
|
HTTP_S.SetHeader( "CSeq", HTTP_R.GetHeader( "CSeq" ).c_str() );
|
||||||
|
//The minimal set of options required for RTSP, add new options here as well if we want to support these.
|
||||||
HTTP_S.SetHeader( "Public", "OPTIONS, DESCRIBE, SETUP, TEARDOWN, PLAY" );
|
HTTP_S.SetHeader( "Public", "OPTIONS, DESCRIBE, SETUP, TEARDOWN, PLAY" );
|
||||||
|
//End the HTTP body, IMPORTANT!! Connection hangs otherwise!!
|
||||||
HTTP_S.SetBody( "\r\n\r\n" );
|
HTTP_S.SetBody( "\r\n\r\n" );
|
||||||
fprintf( stderr, "RESPONSE:\n%s\n", HTTP_S.BuildResponse( "200", "OK" ).c_str() );
|
fprintf( stderr, "RESPONSE:\n%s\n", HTTP_S.BuildResponse( "200", "OK" ).c_str() );
|
||||||
conn.write( HTTP_S.BuildResponse( "200", "OK" ) );
|
conn.write( HTTP_S.BuildResponse( "200", "OK" ) );
|
||||||
} else if ( HTTP_R.method == "DESCRIBE" ) {
|
} else if ( HTTP_R.method == "DESCRIBE" ) {
|
||||||
|
///\todo Implement DESCRIBE option.
|
||||||
|
//Don't know if a 501 response is seen as valid. If it is, don't bother changing it.
|
||||||
if( HTTP_R.GetHeader( "Accept" ).find( "application/sdp" ) == std::string::npos ) {
|
if( HTTP_R.GetHeader( "Accept" ).find( "application/sdp" ) == std::string::npos ) {
|
||||||
fprintf( stderr, "RESPONSE:\n%s\n", HTTP_S.BuildResponse( "501", "Not Implemented" ).c_str() );
|
fprintf( stderr, "RESPONSE:\n%s\n", HTTP_S.BuildResponse( "501", "Not Implemented" ).c_str() );
|
||||||
conn.write( HTTP_S.BuildResponse( "501", "Not Implemented" ) );
|
conn.write( HTTP_S.BuildResponse( "501", "Not Implemented" ) );
|
||||||
} else {
|
} else {
|
||||||
HTTP_S.SetHeader( "CSeq", HTTP_R.GetHeader( "CSeq" ).c_str() );
|
HTTP_S.SetHeader( "CSeq", HTTP_R.GetHeader( "CSeq" ).c_str() );
|
||||||
HTTP_S.SetHeader( "Content-Type", "application/sdp" );
|
HTTP_S.SetHeader( "Content-Type", "application/sdp" );
|
||||||
/// \todo Retrieve presence of video and audio data, and process into response
|
/// \todo Retrieve presence of video and audio data, and process into response. Can now easily be done through DTSC::DTMI
|
||||||
/// \todo Retrieve Packetization mode ( is 0 for now ). Where can I retrieve this?
|
/// \todo Retrieve Packetization mode ( is 0 for now ). I suppose this is the H264 packetization mode. Can maybe be retrieved from the docs on H64.
|
||||||
|
/// \todo Send a valid SDP file.
|
||||||
|
/// \todo Add audio to SDP file.
|
||||||
|
//This is just a dummy with data that was supposedly right for our teststream.
|
||||||
|
//SDP Docs: http://tools.ietf.org/html/rfc4566
|
||||||
|
//v=0
|
||||||
|
//o=- 0 0 IN IP4 ddvtech.com
|
||||||
|
//s=Fifa Test
|
||||||
|
//c=IN IP4 127.0.0.1
|
||||||
|
//t=0 0
|
||||||
|
//a=recvonly
|
||||||
|
//m=video 0 RTP/AVP 98
|
||||||
|
//a=control:rtsp://localhost/fifa/video
|
||||||
|
//a=rtpmap:98 H264/90000
|
||||||
|
//a=fmtp:98 packetization-mode=0
|
||||||
HTTP_S.SetBody( "v=0\r\no=- 0 0 IN IP4 ddvtech.com\r\ns=Fifa Test\r\nc=IN IP4 127.0.0.1\r\nt=0 0\r\na=recvonly\r\nm=video 0 RTP/AVP 98\r\na=control:rtsp://localhost/fifa/video\r\na=rtpmap:98 H264/90000\r\na=fmtp:98 packetization-mode=0\r\n\r\n");//m=audio 0 RTP/AAP 96\r\na=control:rtsp://localhost/fifa/audio\r\na=rtpmap:96 mpeg4-generic/16000/2\r\n\r\n");
|
HTTP_S.SetBody( "v=0\r\no=- 0 0 IN IP4 ddvtech.com\r\ns=Fifa Test\r\nc=IN IP4 127.0.0.1\r\nt=0 0\r\na=recvonly\r\nm=video 0 RTP/AVP 98\r\na=control:rtsp://localhost/fifa/video\r\na=rtpmap:98 H264/90000\r\na=fmtp:98 packetization-mode=0\r\n\r\n");//m=audio 0 RTP/AAP 96\r\na=control:rtsp://localhost/fifa/audio\r\na=rtpmap:96 mpeg4-generic/16000/2\r\n\r\n");
|
||||||
fprintf( stderr, "RESPONSE:\n%s\n", HTTP_S.BuildResponse( "200", "OK" ).c_str() );
|
fprintf( stderr, "RESPONSE:\n%s\n", HTTP_S.BuildResponse( "200", "OK" ).c_str() );
|
||||||
conn.write( HTTP_S.BuildResponse( "200", "OK" ) );
|
conn.write( HTTP_S.BuildResponse( "200", "OK" ) );
|
||||||
}
|
}
|
||||||
} else if ( HTTP_R.method == "SETUP" ) {
|
} else if ( HTTP_R.method == "SETUP" ) {
|
||||||
std::string temp = HTTP_R.GetHeader("Transport");
|
std::string temp = HTTP_R.GetHeader("Transport");
|
||||||
|
//Extract the random UTP pair for video data ( RTP/RTCP)
|
||||||
int ClientRTPLoc = temp.find( "client_port=" ) + 12;
|
int ClientRTPLoc = temp.find( "client_port=" ) + 12;
|
||||||
int PortSpacer = temp.find( "-", ClientRTPLoc );
|
int PortSpacer = temp.find( "-", ClientRTPLoc );
|
||||||
int RTPClientPort = atoi( temp.substr( ClientRTPLoc, ( PortSpacer - ClientRTPLoc ) ).c_str() );
|
int RTPClientPort = atoi( temp.substr( ClientRTPLoc, ( PortSpacer - ClientRTPLoc ) ).c_str() );
|
||||||
if( HTTP_S.GetHeader( "Session" ) != "" ) {
|
if( HTTP_S.GetHeader( "Session" ) != "" ) {
|
||||||
|
//Return an error if a second client tries to connect with an already running stream.
|
||||||
fprintf( stderr, "RESPONSE:\n%s\n", HTTP_S.BuildResponse( "459", "Aggregate Operation Not Allowed" ).c_str() );
|
fprintf( stderr, "RESPONSE:\n%s\n", HTTP_S.BuildResponse( "459", "Aggregate Operation Not Allowed" ).c_str() );
|
||||||
conn.write( HTTP_S.BuildResponse( "459", "Aggregate Operation Not Allowed" ) );
|
conn.write( HTTP_S.BuildResponse( "459", "Aggregate Operation Not Allowed" ) );
|
||||||
} else {
|
} else {
|
||||||
HTTP_S.SetHeader( "CSeq", HTTP_R.GetHeader( "CSeq" ).c_str() );
|
HTTP_S.SetHeader( "CSeq", HTTP_R.GetHeader( "CSeq" ).c_str() );
|
||||||
HTTP_S.SetHeader( "Session", time(NULL) );
|
HTTP_S.SetHeader( "Session", time(NULL) );
|
||||||
/// \todo "Random" generation of server_ports
|
/// \todo "Random" generation of server_ports
|
||||||
if( HTTP_R.url.find( "audio" ) != std::string::npos ) {
|
/// \todo Add support for audio
|
||||||
HTTP_S.SetHeader( "Transport", HTTP_R.GetHeader( "Transport" ) + ";server_port=50002-50003" );
|
// if( HTTP_R.url.find( "audio" ) != std::string::npos ) {
|
||||||
} else {
|
// HTTP_S.SetHeader( "Transport", HTTP_R.GetHeader( "Transport" ) + ";server_port=50002-50003" );
|
||||||
|
// } else {
|
||||||
|
//send video data
|
||||||
HTTP_S.SetHeader( "Transport", HTTP_R.GetHeader( "Transport" ) + ";server_port=50000-50001" );
|
HTTP_S.SetHeader( "Transport", HTTP_R.GetHeader( "Transport" ) + ";server_port=50000-50001" );
|
||||||
|
//Stub data for testing purposes. This should now be extracted somehow from DTSC::DTMI
|
||||||
VideoParams.SetOwnTimestampUnit( ( 1.0 / 29.917 ) * 90000.0 );
|
VideoParams.SetOwnTimestampUnit( ( 1.0 / 29.917 ) * 90000.0 );
|
||||||
VideoParams.SetMaximumPacketSize( 10000 );
|
VideoParams.SetMaximumPacketSize( 10000 );
|
||||||
//pick the right port here
|
//pick the right port here
|
||||||
VideoTransParams.SetPortbase( 50000 );
|
VideoTransParams.SetPortbase( 50000 );
|
||||||
|
//create a JRTPlib session
|
||||||
int VideoStatus = VideoSession.Create( VideoParams, &VideoTransParams, jrtplib::RTPTransmitter::IPv6UDPProto );
|
int VideoStatus = VideoSession.Create( VideoParams, &VideoTransParams, jrtplib::RTPTransmitter::IPv6UDPProto );
|
||||||
if( VideoStatus < 0 ) {
|
if( VideoStatus < 0 ) {
|
||||||
std::cerr << jrtplib::RTPGetErrorString( VideoStatus ) << std::endl;
|
std::cerr << jrtplib::RTPGetErrorString( VideoStatus ) << std::endl;
|
||||||
|
@ -101,13 +136,15 @@ int RTSP_Handler( Socket::Connection conn ) {
|
||||||
} else {
|
} else {
|
||||||
std::cerr << "Created video session\n";
|
std::cerr << "Created video session\n";
|
||||||
}
|
}
|
||||||
/// \todo retrieve other client than localhost --> Socket::Connection has no support for this yet?
|
|
||||||
|
|
||||||
|
/// \todo Connect with clients other than localhost
|
||||||
uint8_t localip[32];
|
uint8_t localip[32];
|
||||||
int status = inet_pton( AF_INET6, conn.getHost().c_str(), localip ) ;
|
int status = inet_pton( AF_INET6, conn.getHost().c_str(), localip ) ;
|
||||||
|
//Debug info
|
||||||
std::cerr << "Status: " << status << "\n";
|
std::cerr << "Status: " << status << "\n";
|
||||||
jrtplib::RTPIPv6Address addr(localip,RTPClientPort);
|
jrtplib::RTPIPv6Address addr(localip,RTPClientPort);
|
||||||
|
|
||||||
|
//add the destination address to the VideoSession
|
||||||
VideoStatus = VideoSession.AddDestination(addr);
|
VideoStatus = VideoSession.AddDestination(addr);
|
||||||
if (VideoStatus < 0) {
|
if (VideoStatus < 0) {
|
||||||
std::cerr << jrtplib::RTPGetErrorString(VideoStatus) << std::endl;
|
std::cerr << jrtplib::RTPGetErrorString(VideoStatus) << std::endl;
|
||||||
|
@ -115,19 +152,24 @@ int RTSP_Handler( Socket::Connection conn ) {
|
||||||
} else {
|
} else {
|
||||||
std::cerr << "Destination Set\n";
|
std::cerr << "Destination Set\n";
|
||||||
}
|
}
|
||||||
|
//Stub data for testing purposes.
|
||||||
|
//Payload type should confirm with the SDP File. 98 == H264 / AVC
|
||||||
VideoSession.SetDefaultPayloadType(98);
|
VideoSession.SetDefaultPayloadType(98);
|
||||||
VideoSession.SetDefaultMark(false);
|
VideoSession.SetDefaultMark(false);
|
||||||
|
//We have no idea if this timestamp has to correspond with the OwnTimeStampUnit() above.
|
||||||
VideoSession.SetDefaultTimestampIncrement( ( 1.0 / 29.917 ) * 90000 );
|
VideoSession.SetDefaultTimestampIncrement( ( 1.0 / 29.917 ) * 90000 );
|
||||||
}
|
// }
|
||||||
HTTP_S.SetBody( "\r\n\r\n" );
|
HTTP_S.SetBody( "\r\n\r\n" );
|
||||||
fprintf( stderr, "RESPONSE:\n%s\n", HTTP_S.BuildResponse( "200", "OK" ).c_str() );
|
fprintf( stderr, "RESPONSE:\n%s\n", HTTP_S.BuildResponse( "200", "OK" ).c_str() );
|
||||||
conn.write( HTTP_S.BuildResponse( "200", "OK" ) );
|
conn.write( HTTP_S.BuildResponse( "200", "OK" ) );
|
||||||
}
|
}
|
||||||
} else if( HTTP_R.method == "PLAY" ) {
|
} else if( HTTP_R.method == "PLAY" ) {
|
||||||
if( HTTP_R.GetHeader( "Range" ).substr(0,4) != "npt=" ) {
|
if( HTTP_R.GetHeader( "Range" ).substr(0,4) != "npt=" ) {
|
||||||
|
//We do not support this, whatever it is. Not needed for minimal compliance.
|
||||||
fprintf( stderr, "RESPONSE:\n%s\n", HTTP_S.BuildResponse( "501", "Not Implemented" ).c_str() );
|
fprintf( stderr, "RESPONSE:\n%s\n", HTTP_S.BuildResponse( "501", "Not Implemented" ).c_str() );
|
||||||
conn.write( HTTP_S.BuildResponse( "501", "Not Implemented" ) );
|
conn.write( HTTP_S.BuildResponse( "501", "Not Implemented" ) );
|
||||||
} else {
|
} else {
|
||||||
|
//Initializes for actual streaming over the SETUP connection.
|
||||||
HTTP_S.SetHeader( "CSeq", HTTP_R.GetHeader( "CSeq" ).c_str() );
|
HTTP_S.SetHeader( "CSeq", HTTP_R.GetHeader( "CSeq" ).c_str() );
|
||||||
HTTP_S.SetHeader( "Session", HTTP_R.GetHeader( "Session" ) );
|
HTTP_S.SetHeader( "Session", HTTP_R.GetHeader( "Session" ) );
|
||||||
HTTP_S.SetHeader( "Range", HTTP_R.GetHeader( "Range" ) );
|
HTTP_S.SetHeader( "Range", HTTP_R.GetHeader( "Range" ) );
|
||||||
|
@ -135,15 +177,20 @@ int RTSP_Handler( Socket::Connection conn ) {
|
||||||
HTTP_S.SetBody( "\r\n\r\n" );
|
HTTP_S.SetBody( "\r\n\r\n" );
|
||||||
fprintf( stderr, "RESPONSE:\n%s\n", HTTP_S.BuildResponse( "200", "OK" ).c_str() );
|
fprintf( stderr, "RESPONSE:\n%s\n", HTTP_S.BuildResponse( "200", "OK" ).c_str() );
|
||||||
conn.write( HTTP_S.BuildResponse( "200", "OK" ) );
|
conn.write( HTTP_S.BuildResponse( "200", "OK" ) );
|
||||||
|
//Used further down, to start streaming video.
|
||||||
|
//PlayAudio = true;
|
||||||
PlayVideo = true;
|
PlayVideo = true;
|
||||||
}
|
}
|
||||||
} else if( HTTP_R.method == "TEARDOWN" ) {
|
} else if( HTTP_R.method == "TEARDOWN" ) {
|
||||||
|
//If we were sending any stream data at this point, stop it, but keep the setup.
|
||||||
HTTP_S.SetHeader( "CSeq", HTTP_R.GetHeader( "CSeq" ).c_str() );
|
HTTP_S.SetHeader( "CSeq", HTTP_R.GetHeader( "CSeq" ).c_str() );
|
||||||
HTTP_S.SetBody( "\r\n\r\n" );
|
HTTP_S.SetBody( "\r\n\r\n" );
|
||||||
fprintf( stderr, "RESPONSE:\n%s\n", HTTP_S.BuildResponse( "200", "OK" ).c_str() );
|
fprintf( stderr, "RESPONSE:\n%s\n", HTTP_S.BuildResponse( "200", "OK" ).c_str() );
|
||||||
conn.write( HTTP_S.BuildResponse( "200", "OK" ) );
|
conn.write( HTTP_S.BuildResponse( "200", "OK" ) );
|
||||||
|
//PlayAudio = false;
|
||||||
PlayVideo = false;
|
PlayVideo = false;
|
||||||
} else {
|
} else {
|
||||||
|
//We do not implement other commands ( yet )
|
||||||
fprintf( stderr, "RESPONSE:\n%s\n", HTTP_S.BuildResponse( "501", "Not Implemented" ).c_str() );
|
fprintf( stderr, "RESPONSE:\n%s\n", HTTP_S.BuildResponse( "501", "Not Implemented" ).c_str() );
|
||||||
conn.write( HTTP_S.BuildResponse( "501", "Not Implemented" ) );
|
conn.write( HTTP_S.BuildResponse( "501", "Not Implemented" ) );
|
||||||
}
|
}
|
||||||
|
@ -154,14 +201,18 @@ int RTSP_Handler( Socket::Connection conn ) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if( PlayVideo ) {
|
if( PlayVideo ) {
|
||||||
/// \todo Select correct source
|
/// \todo Select correct source. This should become the DTSC::DTMI or the DTSC::Stream, whatever seems more natural.
|
||||||
std::string VideoBuf = ReadNALU( );
|
std::string VideoBuf = ReadNALU( );
|
||||||
if( VideoBuf == "" ) {
|
if( VideoBuf == "" ) {
|
||||||
|
//videobuffer is empty, no more data.
|
||||||
jrtplib::RTPTime delay = jrtplib::RTPTime(10.0);
|
jrtplib::RTPTime delay = jrtplib::RTPTime(10.0);
|
||||||
VideoSession.BYEDestroy(delay,"Out of data",11);
|
VideoSession.BYEDestroy(delay,"Out of data",11);
|
||||||
conn.close();
|
conn.close();
|
||||||
} else {
|
} else {
|
||||||
|
//Send a single NALU (H264 block) here.
|
||||||
VideoSession.SendPacket( VideoBuf.c_str(), VideoBuf.size(), 98, false, ( 1.0 / 29.917 ) * 90000 );
|
VideoSession.SendPacket( VideoBuf.c_str(), VideoBuf.size(), 98, false, ( 1.0 / 29.917 ) * 90000 );
|
||||||
|
//we can add delays here as follows:
|
||||||
|
//don't know if these are nescecary or not, but good for testing nonetheless
|
||||||
// jrtplib::RTPTime delay( ( 1.0 / 29.917 ) * 90000 );
|
// jrtplib::RTPTime delay( ( 1.0 / 29.917 ) * 90000 );
|
||||||
// jrtplib::RTPTime::Wait( delay );
|
// jrtplib::RTPTime::Wait( delay );
|
||||||
}
|
}
|
||||||
|
@ -170,7 +221,11 @@ int RTSP_Handler( Socket::Connection conn ) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//Set Default Port
|
||||||
#define DEFAULT_PORT 554
|
#define DEFAULT_PORT 554
|
||||||
|
//Set the function that should be forked for each client
|
||||||
#define MAINHANDLER RTSP_Handler
|
#define MAINHANDLER RTSP_Handler
|
||||||
|
//Set the section in the Config file, though we will not use this yet
|
||||||
#define CONFIGSECT RTSP
|
#define CONFIGSECT RTSP
|
||||||
|
//Include the main functionality, as well as fork support and everything.
|
||||||
#include "../util/server_setup.cpp"
|
#include "../util/server_setup.cpp"
|
||||||
|
|
Loading…
Add table
Reference in a new issue