Added the TS2DTSC converter sourcefile, a working TS to DTSC converter

This commit is contained in:
Erik Zandvliet 2012-07-09 15:33:30 +02:00 committed by Thulinma
parent 01dc2a1a00
commit 3703f6e859
4 changed files with 173 additions and 47 deletions

View file

@ -9,33 +9,27 @@ NAL_Unit::NAL_Unit( std::string & InputData ) {
}
bool NAL_Unit::ReadData( std::string & InputData ) {
std::string FullAnnexB;
FullAnnexB += (char)0x00;
FullAnnexB += (char)0x00;
FullAnnexB += (char)0x00;
FullAnnexB += (char)0x01;
std::string ShortAnnexB;
ShortAnnexB += (char)0x00;
ShortAnnexB += (char)0x00;
ShortAnnexB += (char)0x01;
// fprintf( stderr, "NAL_Unit::ReadData --- DataSize: %d\n", InputData.size() );
if( InputData.size() < 3 ) { return false; }
bool AnnexB = false;
if( InputData[0] == 0x00 && InputData[1] == 0x00 ) {
if( InputData[2] == 0x01 ) {
AnnexB = true;
}
if( InputData[2] == 0x00 && InputData[3] == 0x01 ) {
InputData.erase(0,1);
AnnexB = true;
}
}
if( InputData.substr(0,3) == ShortAnnexB ) { AnnexB = true; }
if( InputData.substr(0,4) == FullAnnexB ) { InputData.erase(0,1); AnnexB = true; }
if( AnnexB ) {
MyData = "";
InputData.erase(0,3);//Intro Bytes
bool FinalByteRead = false;
while( !FinalByteRead ) {
MyData += InputData[0];
InputData.erase(0,1);
if( InputData[0] == 0x00 && InputData[1] == 0x00 ) {
if( InputData[2] == 0x01 ) {
FinalByteRead = true;
}
if( InputData[2] == 0x00 && InputData[3] == 0x01 ) {
InputData.erase(0,1);
FinalByteRead= true;
}
}
}
int Location = std::min( InputData.find( ShortAnnexB ), InputData.find( FullAnnexB ) );
MyData = InputData.substr(0,Location);
InputData.erase(0,Location);
} else {
if( InputData.size() < 4 ) { return false; }
int UnitLen = (InputData[0] << 24) + (InputData[1] << 16) + (InputData[2] << 8) + InputData[3];

View file

@ -1,4 +1,5 @@
#include <string>
#include <cstdio>
class NAL_Unit {
public:

View file

@ -365,54 +365,55 @@ int TS::Packet::PESTimeStamp( ) {
MyTimestamp = (MyTimestamp << 8) + Buffer[PesOffset+12];
MyTimestamp = (MyTimestamp << 7) + ((Buffer[PesOffset+13]) >> 1);
fprintf( stderr, "PES Timestamp: %d\n", MyTimestamp );
return 0;
return MyTimestamp;
}
int TS::Packet::GetDataOffset( ) {
int Offset = 4;
fprintf( stderr,"\tBefore Adapt: %d\n", Offset );
if( AdaptationField( ) >= 2 ) {
Offset += 1 + AdaptationFieldLen( );
}
fprintf( stderr,"\tBefore UnitStart: %d\n", Offset );
if( UnitStart() ) {
fprintf( stderr, "\t\tPES Header Len: %d\n", Buffer[Offset+8] );
Offset += 8;//Default Header + Flag Bytes
Offset += 1 + Buffer[Offset];//HeaderLengthByte + HeaderLength
}
fprintf( stderr,"\tBefore Return: %d\n", Offset );
return Offset;
}
/*
DTSC::DTMI TS::Packet::toDTSC(DTSC::DTMI & metadata, std::string Type) {
DTSC::DTMI outPack = DTSC::DTMI(Type, DTSC::DTMI_ROOT);
outPack.addContent(DTSC::DTMI("datatype", Type));
if( UnitStart() ){ outPack.addContent(DTSC::DTMI("time", PESTimeStamp( ))); }
if( Type == "video" && (RandomAccess() > 0) ){ outPack.addContent(DTSC::DTMI("keyframe", 1)); }
int DataOffset = GetDataOffset();
fprintf( stderr, "Data Offset: %d\n", DataOffset );
outPack.addContent(DTSC::DTMI("data", std::string((char*)Buffer+DataOffset, (size_t)188-DataOffset)));
return outPack;
}
*/
void TS::Packet::toDTSC( std::string Type, DTSC::DTMI & CurrentDTSC ) {
if( !CurrentDTSC.getContentP( "datatype" ) ) {
CurrentDTSC.addContent( DTSC::DTMI("datatype", Type ) );
}
if( UnitStart() ) {
if( !CurrentDTSC.getContentP( "time" ) ) {
CurrentDTSC.addContent( DTSC::DTMI( "time", PESTimeStamp( ) ) );
if( Type == "video" ) {
if ( (RandomAccess() > 0) ){
if( !CurrentDTSC.getContentP( "keyframe" ) && !CurrentDTSC.getContentP( "interframe" ) ) {
CurrentDTSC.addContent(DTSC::DTMI("keyframe", 1));
}
} else {
if( !CurrentDTSC.getContentP( "keyframe" ) && !CurrentDTSC.getContentP( "interframe" ) ) {
CurrentDTSC.addContent(DTSC::DTMI("interframe", 1));
}
}
}
if( Type == "video" && (RandomAccess() > 0) ){
if( !CurrentDTSC.getContentP( "keyframe" ) ) {
CurrentDTSC.addContent(DTSC::DTMI("keyframe", 1));
if( UnitStart() ) {
if( !CurrentDTSC.getContentP( "time" ) ) {
if( Type == "audio" ) {
CurrentDTSC.addContent( DTSC::DTMI( "time", PESTimeStamp( ) / 81000 ) );
} else {
//CurrentDTSC.addContent( DTSC::DTMI( "time", PESTimeStamp( ) / 27000 ) );
CurrentDTSC.addContent( DTSC::DTMI( "time", (PESTimeStamp( ) - 27000000) / 91 ) );
}
}
}
if( Type == "video" ) {
if( !CurrentDTSC.getContentP( "nalu" ) ) {
CurrentDTSC.addContent( DTSC::DTMI( "nalu", 1 ) );
}
if( !CurrentDTSC.getContentP( "offset" ) ) {
CurrentDTSC.addContent( DTSC::DTMI( "offset", 0 ) );
}
}
int DataOffset = GetDataOffset();
fprintf( stderr, "Data Offset: %d\n", DataOffset );
std::string ToAppend = std::string((char*)Buffer+DataOffset, (size_t)188-DataOffset);
std::string CurrentData;
if( CurrentDTSC.getContentP( "data" ) ) {

130
src/converters/ts2dtsc.cpp Normal file
View file

@ -0,0 +1,130 @@
#include <string>
#include "../../lib/ts_packet.h" //TS support
#include "../../lib/dtsc.h" //DTSC support
#include "../../lib/nal.h" //NAL Unit operations
//DTSC::DTMI MetaData
//DTSC::DTMI OutData
//X if( PID() == 0x00 ) --> PAT --> Extract first PMT PID()
//X if( PID() == PMTStream ) --> PMT --> Extract first video PMT() && Extract first audio PMT()
// if( PID() == AudioStream ) --> Audio --> Extract Timestamp IF keyframe --> DTSC
// if( PID() == VideoStream ) --> Video --> AnnexB_to_Regular --> Extract Timestamp IF keyframe --> Remove PPS? --> Remove SPS? --> DTSC
//Copied from FLV_TAG
void Meta_Put(DTSC::DTMI & meta, std::string cat, std::string elem, std::string val){
if (meta.getContentP(cat) == 0){meta.addContent(DTSC::DTMI(cat));}
meta.getContentP(cat)->addContent(DTSC::DTMI(elem, val));
}
void Meta_Put(DTSC::DTMI & meta, std::string cat, std::string elem, int val){
if (meta.getContentP(cat) == 0){meta.addContent(DTSC::DTMI(cat));}
meta.getContentP(cat)->addContent(DTSC::DTMI(elem, val));
}
int main( ) {
char charBuffer[1024*10];
unsigned int charCount;
std::string StrData;
TS::Packet TSData;
int PMT_PID = -1;
int VideoPID = -1;
int AudioPID = -1;
DTSC::DTMI Meta;
DTSC::DTMI VideoOut;
DTSC::DTMI AudioOut;
//Default MetaData, not NEARLY all options used, because encoded in video rather than parameters
//Combined with Stubdata, for alignment with original DTSC of testcase
Meta_Put(Meta, "video", "codec", "H264");
Meta_Put(Meta, "video", "width", 1280);
Meta_Put(Meta, "video", "height", 720);
Meta_Put(Meta, "video", "fpks", 2997000);
Meta_Put(Meta, "video", "bps", 832794);
Meta_Put(Meta, "audio", "codec", "AAC");
Meta_Put(Meta, "audio", "bps", 24021);
Meta_Put(Meta, "audio", "rate", 48000);
Meta_Put(Meta, "audio", "size", 16);
Meta_Put(Meta, "audio", "channels", 2);
Meta.Pack(true);
Meta.packed.replace(0, 4, DTSC::Magic_Header);
std::cout << Meta.packed;
std::string PrevType = "";
while( std::cin.good() ) {
std::cin.read(charBuffer, 1024*10);
charCount = std::cin.gcount();
StrData.append(charBuffer, charCount);
while( TSData.FromString( StrData ) ) {
// fprintf( stderr, "PID: %d\n", TSData.PID() );
if( TSData.PID() == 0 ) {
int TmpPMTPid = TSData.ProgramMapPID( );
if( TmpPMTPid != -1 ) { PMT_PID = TmpPMTPid; }
// fprintf( stderr, "\tPMT PID: %d\n", PMT_PID );
}
if( TSData.PID() == PMT_PID ) {
TSData.UpdateStreamPID( VideoPID, AudioPID );
// fprintf( stderr, "\tVideoStream: %d\n\tAudioStream: %d\n", VideoPID, AudioPID );
}
if( TSData.PID() == VideoPID ) {
if( PrevType == "Audio" ) {
fprintf( stderr, "\tVideopacket, sending audiobuffer\n" );
std::string AudioData = AudioOut.getContent("data").StrValue();
AudioData.erase(0,7);//remove the header
AudioOut.addContent( DTSC::DTMI( "data", AudioData ) );
std::cout << AudioOut.Pack(true);
AudioOut = DTSC::DTMI();
AudioOut.addContent( DTSC::DTMI( "datatype", "audio" ) );
}
if( TSData.UnitStart( ) && PrevType == "Video" ) {
fprintf( stderr, "\tNew VideoPacket, Writing old\n" );
std::string AnnexBData = VideoOut.getContent("data").StrValue();
std::string NewData;
NAL_Unit Transformer;
int i = 0;
while( Transformer.ReadData( AnnexBData ) ) {
if( Transformer.Type() < 6 || Transformer.Type() > 9 ) { //Extract SPS/PPS/SEI/Separator Data
NewData += Transformer.SizePrepended( );
}
}
VideoOut.addContent( DTSC::DTMI( "data", NewData ) );
std::cout << VideoOut.Pack(true);
VideoOut = DTSC::DTMI();
}
TSData.toDTSC( "video", VideoOut );
PrevType = "Video";
}
if( TSData.PID() == AudioPID ) {
if( PrevType == "Video" ) {
fprintf( stderr, "\tAudiopacket, sending videobuffer\n" );
std::string AnnexBData = VideoOut.getContent("data").StrValue();
std::string NewData;
NAL_Unit Transformer;
int i = 0;
while( Transformer.ReadData( AnnexBData ) ) {
if( Transformer.Type() < 6 || Transformer.Type() > 9 ) { //Extract SPS/PPS/SEI/Separator Data
NewData += Transformer.SizePrepended( );
}
}
VideoOut.addContent( DTSC::DTMI( "data", NewData ) );
std::cout << VideoOut.Pack(true);
VideoOut = DTSC::DTMI();
}
if( TSData.UnitStart( ) && PrevType == "Audio" ) {
fprintf( stderr, "\tNew AudioPacket, Writing old\n" );
std::string AudioData = AudioOut.getContent("data").StrValue();
AudioData.erase(0,7);//remove the header
AudioOut.addContent( DTSC::DTMI( "data", AudioData ) );
std::cout << AudioOut.Pack(true);
AudioOut = DTSC::DTMI();
AudioOut.addContent( DTSC::DTMI( "datatype", "audio" ) );
}
TSData.toDTSC( "audio", AudioOut );
PrevType = "Audio";
}
}
}
return 0;
}