Almost working HTTP connector - mid-rewrite, running into some issues, sleepy, going to bed...

This commit is contained in:
Thulinma 2011-09-14 00:05:54 +02:00
parent 46bfeefb03
commit 78fc9f8ab2
8 changed files with 414 additions and 129 deletions

View file

@ -1,4 +1,4 @@
SRC = main.cpp ../util/socket.cpp ../util/http_parser.cpp ../util/flv_tag.cpp ../util/amf.cpp ../util/util.cpp SRC = main.cpp ../util/socket.cpp ../util/http_parser.cpp ../util/flv_tag.cpp ../util/amf.cpp ../util/dtsc.cpp ../util/util.cpp
OBJ = $(SRC:.cpp=.o) OBJ = $(SRC:.cpp=.o)
OUT = DDV_Conn_HTTP OUT = DDV_Conn_HTTP
INCLUDES = INCLUDES =

View file

@ -13,6 +13,7 @@
#include <ctime> #include <ctime>
#include "../util/socket.h" #include "../util/socket.h"
#include "../util/http_parser.h" #include "../util/http_parser.h"
#include "../util/dtsc.h"
#include "../util/flv_tag.h" #include "../util/flv_tag.h"
#include "../util/MP4/interface.cpp" #include "../util/MP4/interface.cpp"
#include "../util/amf.h" #include "../util/amf.h"
@ -149,26 +150,110 @@ namespace Connector_HTTP{
return Result; return Result;
}//BuildManifest }//BuildManifest
/// Handles Progressive download streaming requests
void Progressive(FLV::Tag & tag, HTTP::Parser HTTP_S, Socket::Connection & conn, DTSC::Stream & Strm){
static bool progressive_has_sent_header = false;
if (!progressive_has_sent_header){
HTTP_S.Clean();//troep opruimen die misschien aanwezig is...
HTTP_S.SetHeader("Content-Type", "video/x-flv");//FLV files hebben altijd dit content-type.
//HTTP_S.SetHeader("Transfer-Encoding", "chunked");
HTTP_S.protocol = "HTTP/1.0";
HTTP_S.SendResponse(conn, "200", "OK");//geen SetBody = unknown length! Dat willen we hier.
//HTTP_S.SendBodyPart(CONN_fd, FLVHeader, 13);//schrijf de FLV header
conn.write(FLV::Header, 13);
FLV::Tag tmp;
tmp.DTSCMetaInit(Strm);
conn.write(tmp.data, tmp.len);
if (Strm.metadata.getContentP("audio") && Strm.metadata.getContentP("audio")->getContentP("init")){
tmp.DTSCAudioInit(Strm);
conn.write(tmp.data, tmp.len);
}
if (Strm.metadata.getContentP("video") && Strm.metadata.getContentP("video")->getContentP("init")){
tmp.DTSCVideoInit(Strm);
conn.write(tmp.data, tmp.len);
}
progressive_has_sent_header = true;
#if DEBUG >= 1
fprintf(stderr, "Sent progressive FLV header\n");
#endif
}
//HTTP_S.SendBodyPart(CONN_fd, tag->data, tag->len);//schrijf deze FLV tag onbewerkt weg
conn.write(tag.data, tag.len);
}
/// Handles Flash Dynamic HTTP streaming requests
void FlashDynamic(FLV::Tag & tag, HTTP::Parser HTTP_S, Socket::Connection & conn, DTSC::Stream & Strm){
static std::queue<std::string> Flash_FragBuffer;
static unsigned int Flash_StartTime = 0;
static std::string FlashBuf;
static std::string FlashMeta;
static bool FlashFirstVideo = false;
static bool FlashFirstAudio = false;
static bool Flash_ManifestSent = false;
static int Flash_RequestPending = 0;
if (tag.tagTime() > 0){
if (Flash_StartTime == 0){
Flash_StartTime = tag.tagTime();
}
tag.tagTime(tag.tagTime() - Flash_StartTime);
}
if (tag.data[0] != 0x12 ) {
if (tag.isKeyframe){
if (FlashBuf != "" && !FlashFirstVideo && !FlashFirstAudio){
Flash_FragBuffer.push(FlashBuf);
#if DEBUG >= 4
fprintf(stderr, "Received a fragment. Now %i in buffer.\n", (int)Flash_FragBuffer.size());
#endif
}
FlashBuf.clear();
FlashFirstVideo = true;
FlashFirstAudio = true;
}
/// \todo Check metadata for video/audio, append if needed.
/*
if (FlashFirstVideo && (tag.data[0] == 0x09) && (Video_Init.len > 0)){
Video_Init.tagTime(tag.tagTime());
FlashBuf.append(Video_Init.data, Video_Init.len);
FlashFirstVideo = false;
}
if (FlashFirstAudio && (tag.data[0] == 0x08) && (Audio_Init.len > 0)){
Audio_Init.tagTime(tag.tagTime());
FlashBuf.append(Audio_Init.data, Audio_Init.len);
FlashFirstAudio = false;
}
#if DEBUG >= 5
fprintf(stderr, "Received a tag of type %2hhu and length %i\n", tag.data[0], tag.len);
#endif
if ((Video_Init.len > 0) && (Audio_Init.len > 0)){
FlashBuf.append(tag.data,tag.len);
}
*/
} else {
/*
FlashMeta = "";
FlashMeta.append(tag.data+11,tag.len-15);
if( !Flash_ManifestSent ) {
HTTP_S.Clean();
HTTP_S.SetHeader("Content-Type","text/xml");
HTTP_S.SetHeader("Cache-Control","no-cache");
HTTP_S.SetBody(BuildManifest(FlashMeta, Movie, tag.tagTime()));
HTTP_S.SendResponse(conn, "200", "OK");
}
*/
}
}
/// Main function for Connector_HTTP /// Main function for Connector_HTTP
int Connector_HTTP(Socket::Connection conn){ int Connector_HTTP(Socket::Connection conn){
int handler = HANDLER_PROGRESSIVE;///< The handler used for processing this request. int handler = HANDLER_PROGRESSIVE;///< The handler used for processing this request.
bool ready4data = false;///< Set to true when streaming is to begin. bool ready4data = false;///< Set to true when streaming is to begin.
bool inited = false; bool inited = false;
bool progressive_has_sent_header = false;
Socket::Connection ss(-1); Socket::Connection ss(-1);
std::string streamname; std::string streamname;
std::string extension; FLV::Tag tag;///< Temporary tag buffer.
std::string FlashBuf; std::string recBuffer = "";
std::string FlashMeta; DTSC::Stream Strm;///< Incoming stream buffer.
bool Flash_ManifestSent = false;
int Flash_RequestPending = 0;
unsigned int Flash_StartTime;
std::queue<std::string> Flash_FragBuffer;
FLV::Tag tag;///< Temporary tag buffer for incoming video data.
FLV::Tag Audio_Init;///< Audio initialization data, if available.
FLV::Tag Video_Init;///< Video initialization data, if available.
bool FlashFirstVideo = false;
bool FlashFirstAudio = false;
HTTP::Parser HTTP_R, HTTP_S;//HTTP Receiver en HTTP Sender. HTTP::Parser HTTP_R, HTTP_S;//HTTP Receiver en HTTP Sender.
std::string Movie = ""; std::string Movie = "";
@ -179,7 +264,7 @@ namespace Connector_HTTP{
unsigned int lastStats = 0; unsigned int lastStats = 0;
//int CurrentFragment = -1; later herbruiken? //int CurrentFragment = -1; later herbruiken?
while (conn.connected() && !FLV::Parse_Error){ while (conn.connected()){
//only parse input if available or not yet init'ed //only parse input if available or not yet init'ed
if (HTTP_R.Read(conn, ready4data)){ if (HTTP_R.Read(conn, ready4data)){
handler = HANDLER_PROGRESSIVE; handler = HANDLER_PROGRESSIVE;
@ -209,8 +294,11 @@ namespace Connector_HTTP{
printf( "URL: %s\n", HTTP_R.url.c_str()); printf( "URL: %s\n", HTTP_R.url.c_str());
printf( "Movie: %s, Quality: %s, Seg %d Frag %d\n", Movie.c_str(), Quality.c_str(), Segment, ReqFragment); printf( "Movie: %s, Quality: %s, Seg %d Frag %d\n", Movie.c_str(), Quality.c_str(), Segment, ReqFragment);
#endif #endif
/// \todo Handle these requests properly...
/*
Flash_ManifestSent = true;//stop manifest from being sent multiple times Flash_ManifestSent = true;//stop manifest from being sent multiple times
Flash_RequestPending++; Flash_RequestPending++;
*/
}else{ }else{
Movie = HTTP_R.url.substr(1); Movie = HTTP_R.url.substr(1);
Movie = Movie.substr(0,Movie.find("/")); Movie = Movie.substr(0,Movie.find("/"));
@ -255,6 +343,8 @@ namespace Connector_HTTP{
#endif #endif
inited = true; inited = true;
} }
/// \todo Send pending flash requests...
/*
if ((Flash_RequestPending > 0) && !Flash_FragBuffer.empty()){ if ((Flash_RequestPending > 0) && !Flash_FragBuffer.empty()){
HTTP_S.Clean(); HTTP_S.Clean();
HTTP_S.SetHeader("Content-Type","video/mp4"); HTTP_S.SetHeader("Content-Type","video/mp4");
@ -284,97 +374,16 @@ namespace Connector_HTTP{
break; break;
case 0: break;//not ready yet case 0: break;//not ready yet
default: default:
if (tag.SockLoader(ss)){//able to read a full packet? if (ss.iread(recBuffer)){
if (handler == HANDLER_FLASH){ if (Strm.parsePacket(recBuffer)){
if (tag.tagTime() > 0){ tag.DTSCLoader(Strm);
if (Flash_StartTime == 0){ if (handler == HANDLER_FLASH){
Flash_StartTime = tag.tagTime(); FlashDynamic(tag, HTTP_S, conn, Strm);
}
tag.tagTime(tag.tagTime() - Flash_StartTime);
} }
if (tag.data[0] != 0x12 ) { if (handler == HANDLER_PROGRESSIVE){
if ( (tag.data[0] == 0x09) && tag.isInitData()){ Progressive(tag, HTTP_S, conn, Strm);
if (((tag.data[11] & 0x0f) == 7) && (tag.data[12] == 0)){
tag.tagTime(0);//timestamp to zero
Video_Init = tag;
break;
}
}
if ((tag.data[0] == 0x08) && tag.isInitData()){
if (((tag.data[11] & 0xf0) >> 4) == 10){//aac packet
tag.tagTime(0);//timestamp to zero
Audio_Init = tag;
break;
}
}
if (tag.isKeyframe){
if (FlashBuf != "" && !FlashFirstVideo && !FlashFirstAudio){
Flash_FragBuffer.push(FlashBuf);
#if DEBUG >= 4
fprintf(stderr, "Received a fragment. Now %i in buffer.\n", (int)Flash_FragBuffer.size());
#endif
}
FlashBuf.clear();
FlashFirstVideo = true;
FlashFirstAudio = true;
}
if (FlashFirstVideo){
if (Video_Init.len > 0){
Video_Init.tagTime(tag.tagTime());
FlashBuf.append(Video_Init.data, Video_Init.len);
}
FlashFirstVideo = false;
if (Audio_Init.len > 0){
Audio_Init.tagTime(tag.tagTime());
FlashBuf.append(Audio_Init.data, Audio_Init.len);
}
FlashFirstAudio = false;
}
#if DEBUG >= 5
fprintf(stderr, "Received a tag of type %2hhu and length %i\n", tag.data[0], tag.len);
#endif
if (!FlashFirstVideo && !FlashFirstAudio){
FlashBuf.append(tag.data,tag.len);
}
} else {
FlashMeta = "";
FlashMeta.append(tag.data+11,tag.len-15);
if( !Flash_ManifestSent ) {
HTTP_S.Clean();
HTTP_S.SetHeader("Content-Type","text/xml");
HTTP_S.SetHeader("Cache-Control","no-cache");
HTTP_S.SetBody(BuildManifest(FlashMeta, Movie, tag.tagTime()));
HTTP_S.SendResponse(conn, "200", "OK");
}
} }
} }
if (handler == HANDLER_PROGRESSIVE){
if (!progressive_has_sent_header){
HTTP_S.Clean();//troep opruimen die misschien aanwezig is...
if (extension == ".mp3"){
HTTP_S.SetHeader("Content-Type", "audio/mpeg3");//MP3 files hebben dit content-type.
HTTP_S.protocol = "HTTP/1.0";
HTTP_S.SendResponse(conn, "200", "OK");//geen SetBody = unknown length! Dat willen we hier.
}else{
HTTP_S.SetHeader("Content-Type", "video/x-flv");//FLV files hebben altijd dit content-type.
//HTTP_S.SetHeader("Transfer-Encoding", "chunked");
HTTP_S.protocol = "HTTP/1.0";
HTTP_S.SendResponse(conn, "200", "OK");//geen SetBody = unknown length! Dat willen we hier.
//HTTP_S.SendBodyPart(CONN_fd, FLVHeader, 13);//schrijf de FLV header
conn.write(FLV::Header, 13);
}
progressive_has_sent_header = true;
}
if (extension == ".mp3"){
if (tag.data[0] == 0x08){
if (((tag.data[11] & 0xf0) >> 4) == 2){//mp3 packet
conn.write(tag.data+12, tag.len-16);//write only the MP3 data of the tag
}
}
}else{
conn.write(tag.data, tag.len);
}
}//PROGRESSIVE handler
} }
break; break;
} }

View file

@ -161,10 +161,10 @@ namespace Converters{
} }
if (!Meta_Has(meta_out, "audio", "rate")){ if (!Meta_Has(meta_out, "audio", "rate")){
switch (audiodata & 0x0C){ switch (audiodata & 0x0C){
case 0x0: Meta_Put(meta_out, "audio", "rate", 5500); break; case 0x0: Meta_Put(meta_out, "audio", "rate", 5512); break;
case 0x4: Meta_Put(meta_out, "audio", "rate", 11000); break; case 0x4: Meta_Put(meta_out, "audio", "rate", 11025); break;
case 0x8: Meta_Put(meta_out, "audio", "rate", 22000); break; case 0x8: Meta_Put(meta_out, "audio", "rate", 22050); break;
case 0xC: Meta_Put(meta_out, "audio", "rate", 44000); break; case 0xC: Meta_Put(meta_out, "audio", "rate", 44100); break;
} }
} }
if (!Meta_Has(meta_out, "audio", "size")){ if (!Meta_Has(meta_out, "audio", "size")){
@ -222,11 +222,8 @@ namespace Converters{
case 1: pack_out.addContent(DTSC::DTMI("nalu", 1)); break; case 1: pack_out.addContent(DTSC::DTMI("nalu", 1)); break;
case 2: pack_out.addContent(DTSC::DTMI("nalu_end", 1)); break; case 2: pack_out.addContent(DTSC::DTMI("nalu_end", 1)); break;
} }
int offset = 0; int offset = (FLV_in.data[13] << 16) + (FLV_in.data[14] << 8) + FLV_in.data[15];
((char*)(&offset))[0] = FLV_in.data[13]; offset = (offset << 8) >> 8;
((char*)(&offset))[1] = FLV_in.data[14];
((char*)(&offset))[2] = FLV_in.data[15];
offset >>= 8;
pack_out.addContent(DTSC::DTMI("offset", offset)); pack_out.addContent(DTSC::DTMI("offset", offset));
} }
pack_out.addContent(DTSC::DTMI("time", FLV_in.tagTime())); pack_out.addContent(DTSC::DTMI("time", FLV_in.tagTime()));

View file

@ -44,7 +44,7 @@ bool DTSC::Stream::parsePacket(std::string & buffer){
buffers.front() = DTSC::parseDTMI((unsigned char*)buffer.c_str() + 8, len); buffers.front() = DTSC::parseDTMI((unsigned char*)buffer.c_str() + 8, len);
datapointertype = INVALID; datapointertype = INVALID;
if (buffers.front().getContentP("data")){ if (buffers.front().getContentP("data")){
datapointer = buffers.front().getContentP("data")->StrValue().c_str(); datapointer = &(buffers.front().getContentP("data")->StrValue());
if (buffers.front().getContentP("datatype")){ if (buffers.front().getContentP("datatype")){
std::string tmp = buffers.front().getContentP("datatype")->StrValue(); std::string tmp = buffers.front().getContentP("datatype")->StrValue();
if (tmp == "video"){datapointertype = VIDEO;} if (tmp == "video"){datapointertype = VIDEO;}
@ -68,8 +68,8 @@ bool DTSC::Stream::parsePacket(std::string & buffer){
/// Returns a direct pointer to the data attribute of the last received packet, if available. /// Returns a direct pointer to the data attribute of the last received packet, if available.
/// Returns NULL if no valid pointer or packet is available. /// Returns NULL if no valid pointer or packet is available.
const char * DTSC::Stream::lastData(){ std::string & DTSC::Stream::lastData(){
return datapointer; return *datapointer;
} }
/// Returns the packed in this buffer number. /// Returns the packed in this buffer number.
@ -177,11 +177,11 @@ DTSC::DTMItype DTSC::DTMI::GetType(){return myType;};
/// Returns the numeric value of this object, if available. /// Returns the numeric value of this object, if available.
/// If this object holds no numeric value, 0 is returned. /// If this object holds no numeric value, 0 is returned.
uint64_t DTSC::DTMI::NumValue(){return numval;}; uint64_t & DTSC::DTMI::NumValue(){return numval;};
/// Returns the std::string value of this object, if available. /// Returns the std::string value of this object, if available.
/// If this object holds no string value, an empty string is returned. /// If this object holds no string value, an empty string is returned.
std::string DTSC::DTMI::StrValue(){return strval;}; std::string & DTSC::DTMI::StrValue(){return strval;};
/// Returns the C-string value of this object, if available. /// Returns the C-string value of this object, if available.
/// If this object holds no string value, an empty C-string is returned. /// If this object holds no string value, an empty C-string is returned.

View file

@ -9,16 +9,39 @@
#include <deque> #include <deque>
#include <set> #include <set>
// video
// codec (string)
// audio
// codec (string)
// sampingrate (int, Hz)
// samplesize (int, bytesize)
// channels (int, channelcount)
/// Holds all DDVTECH Stream Container classes and parsers. /// Holds all DDVTECH Stream Container classes and parsers.
///Video:
/// - codec (string: AAC, MP3)
/// - width (int, pixels)
/// - height (int, pixels)
/// - fpks (int, frames per kilosecond (FPS * 1000))
/// - bps (int, bytes per second)
/// - init (string, init data)
///
///Audio:
/// - codec (string: H264, H263, VP6)
/// - rate (int, Hz)
/// - size (int, bitsize)
/// - bps (int, bytes per second)
/// - channels (int, channelcount)
/// - init (string, init data)
///
///All packets:
/// - datatype (string: audio, video, meta (unused))
/// - data (string: data)
/// - time (int: ms into video)
///
///Video packets:
/// - keyframe (int, if set, is a seekable keyframe)
/// - interframe (int, if set, is a non-seekable interframe)
/// - disposableframe (int, if set, is a disposable interframe)
///
///H264 video packets:
/// - nalu (int, if set, is a nalu)
/// - nalu_end (int, if set, is a end-of-sequence)
/// - offset (int, unsigned version of signed int! Holds the ms offset between timestamp and proper display time for B-frames)
namespace DTSC{ namespace DTSC{
/// Enumerates all possible DTMI types. /// Enumerates all possible DTMI types.
@ -35,8 +58,8 @@ namespace DTSC{
public: public:
std::string Indice(); std::string Indice();
DTMItype GetType(); DTMItype GetType();
uint64_t NumValue(); uint64_t & NumValue();
std::string StrValue(); std::string & StrValue();
const char * Str(); const char * Str();
int hasContent(); int hasContent();
void addContent(DTMI c); void addContent(DTMI c);
@ -99,7 +122,7 @@ namespace DTSC{
DTSC::DTMI metadata; DTSC::DTMI metadata;
DTSC::DTMI & getPacket(unsigned int num = 0); DTSC::DTMI & getPacket(unsigned int num = 0);
datatype lastType(); datatype lastType();
const char * lastData(); std::string & lastData();
bool hasVideo(); bool hasVideo();
bool hasAudio(); bool hasAudio();
bool parsePacket(std::string & buffer); bool parsePacket(std::string & buffer);
@ -112,7 +135,7 @@ namespace DTSC{
std::set<DTSC::Ring *> rings; std::set<DTSC::Ring *> rings;
std::deque<DTSC::Ring> keyframes; std::deque<DTSC::Ring> keyframes;
void advanceRings(); void advanceRings();
const char * datapointer; std::string * datapointer;
datatype datapointertype; datatype datapointertype;
unsigned int buffercount; unsigned int buffercount;
}; };

View file

@ -2,6 +2,7 @@
/// Holds all code for the FLV namespace. /// Holds all code for the FLV namespace.
#include "flv_tag.h" #include "flv_tag.h"
#include "amf.h"
#include "rtmpchunks.h" #include "rtmpchunks.h"
#include <stdio.h> //for Tag::FileLoader #include <stdio.h> //for Tag::FileLoader
#include <unistd.h> //for Tag::FileLoader #include <unistd.h> //for Tag::FileLoader
@ -245,6 +246,256 @@ FLV::Tag & FLV::Tag::operator= (const FLV::Tag& O){
return *this; return *this;
}//assignment operator }//assignment operator
/// FLV loader function from DTSC.
/// Takes the DTSC data and makes it into FLV.
bool FLV::Tag::DTSCLoader(DTSC::Stream & S){
switch (S.lastType()){
case DTSC::VIDEO:
len = S.lastData().length() + 16;
if (S.metadata.getContentP("video") && S.metadata.getContentP("video")->getContentP("codec")){
if (S.metadata.getContentP("video")->getContentP("codec")->StrValue() == "H264"){len += 4;}
}
break;
case DTSC::AUDIO:
len = S.lastData().length() + 16;
if (S.metadata.getContentP("audio") && S.metadata.getContentP("audio")->getContentP("codec")){
if (S.metadata.getContentP("audio")->getContentP("codec")->StrValue() == "AAC"){len += 1;}
}
break;
case DTSC::META:
len = S.lastData().length() + 15;
break;
default://ignore all other types (there are currently no other types...)
break;
}
if (len > 0){
if (!data){
data = (char*)malloc(len);
buf = len;
}else{
if (buf < len){
data = (char*)realloc(data, len);
buf = len;
}
}
switch (S.lastType()){
case DTSC::VIDEO:
if ((unsigned int)len == S.lastData().length() + 16){
memcpy(data+12, S.lastData().c_str(), S.lastData().length());
}else{
memcpy(data+16, S.lastData().c_str(), S.lastData().length());
if (S.getPacket().getContentP("nalu")){data[12] = 1;}else{data[12] = 2;}
int offset = S.getPacket().getContentP("offset")->NumValue();
data[13] = (offset >> 16) & 0xFF;
data[14] = (offset >> 8) & 0XFF;
data[15] = offset & 0xFF;
}
data[11] = 0;
if (S.metadata.getContentP("video")->getContentP("codec")->StrValue() == "H264"){data[11] += 7;}
if (S.metadata.getContentP("video")->getContentP("codec")->StrValue() == "H263"){data[11] += 2;}
if (S.getPacket().getContentP("keyframe")){data[11] += 0x10;}
if (S.getPacket().getContentP("interframe")){data[11] += 0x20;}
if (S.getPacket().getContentP("disposableframe")){data[11] += 0x30;}
break;
case DTSC::AUDIO:
if ((unsigned int)len == S.lastData().length() + 16){
memcpy(data+12, S.lastData().c_str(), S.lastData().length());
}else{
memcpy(data+13, S.lastData().c_str(), S.lastData().length());
data[12] = 1;//raw AAC data, not sequence header
}
data[11] = 0;
if (S.metadata.getContentP("audio")->getContentP("codec")->StrValue() == "AAC"){data[11] += 0xA0;}
if (S.metadata.getContentP("audio")->getContentP("codec")->StrValue() == "MP3"){data[11] += 0x20;}
if (S.metadata.getContentP("audio")->getContentP("rate")->NumValue() == 11025){data[11] += 0x04;}
if (S.metadata.getContentP("audio")->getContentP("rate")->NumValue() == 22050){data[11] += 0x08;}
if (S.metadata.getContentP("audio")->getContentP("rate")->NumValue() == 44100){data[11] += 0x0C;}
if (S.metadata.getContentP("audio")->getContentP("size")->NumValue() == 16){data[11] += 0x02;}
if (S.metadata.getContentP("audio")->getContentP("channels")->NumValue() > 1){data[11] += 0x01;}
break;
case DTSC::META:
memcpy(data+11, S.lastData().c_str(), S.lastData().length());
break;
default: break;
}
}
((unsigned int *)(data+len-4))[0] = len-15;
switch (S.lastType()){
case DTSC::VIDEO: data[0] = 0x09; break;
case DTSC::AUDIO: data[0] = 0x08; break;
case DTSC::META: data[0] = 0x12; break;
default: break;
}
data[1] = ((len-15) >> 16) & 0xFF;
data[2] = ((len-15) >> 8) & 0xFF;
data[3] = (len-15) & 0xFF;
tagTime(S.getPacket().getContentP("time")->NumValue());
return true;
}
/// FLV Video init data loader function from DTSC.
/// Takes the DTSC Video init data and makes it into FLV.
/// Assumes init data is available - so check before calling!
bool FLV::Tag::DTSCVideoInit(DTSC::Stream & S){
if (S.metadata.getContentP("video")->getContentP("codec")->StrValue() == "H264"){
len = S.metadata.getContentP("video")->getContentP("init")->StrValue().length() + 20;
}
if (len > 0){
if (!data){
data = (char*)malloc(len);
buf = len;
}else{
if (buf < len){
data = (char*)realloc(data, len);
buf = len;
}
}
memcpy(data+16, S.metadata.getContentP("video")->getContentP("init")->StrValue().c_str(), len-20);
data[12] = 0;//H264 sequence header
data[13] = 0;
data[14] = 0;
data[15] = 0;
data[11] = 0x57;//H264 init data (0x07 & 0x50)
}
((unsigned int *)(data+len-4))[0] = len-15;
switch (S.lastType()){
case DTSC::VIDEO: data[0] = 0x09; break;
case DTSC::AUDIO: data[0] = 0x08; break;
case DTSC::META: data[0] = 0x12; break;
default: break;
}
data[1] = ((len-15) >> 16) & 0xFF;
data[2] = ((len-15) >> 8) & 0xFF;
data[3] = (len-15) & 0xFF;
tagTime(0);
return true;
}
/// FLV Audio init data loader function from DTSC.
/// Takes the DTSC Audio init data and makes it into FLV.
/// Assumes init data is available - so check before calling!
bool FLV::Tag::DTSCAudioInit(DTSC::Stream & S){
len = 0;
if (S.metadata.getContentP("audio")->getContentP("codec")->StrValue() == "AAC"){
len = S.metadata.getContentP("audio")->getContentP("init")->StrValue().length() + 17;
}
if (len > 0){
if (!data){
data = (char*)malloc(len);
buf = len;
}else{
if (buf < len){
data = (char*)realloc(data, len);
buf = len;
}
}
memcpy(data+13, S.metadata.getContentP("audio")->getContentP("init")->StrValue().c_str(), len-17);
data[12] = 0;//AAC sequence header
data[11] = 0;
if (S.metadata.getContentP("audio")->getContentP("codec")->StrValue() == "AAC"){data[11] += 0xA0;}
if (S.metadata.getContentP("audio")->getContentP("codec")->StrValue() == "MP3"){data[11] += 0x20;}
if (S.metadata.getContentP("audio")->getContentP("rate")->NumValue() == 11000){data[11] += 0x04;}
if (S.metadata.getContentP("audio")->getContentP("rate")->NumValue() == 22000){data[11] += 0x08;}
if (S.metadata.getContentP("audio")->getContentP("rate")->NumValue() == 44000){data[11] += 0x0C;}
if (S.metadata.getContentP("audio")->getContentP("size")->NumValue() == 16){data[11] += 0x02;}
if (S.metadata.getContentP("audio")->getContentP("channels")->NumValue() > 1){data[11] += 0x01;}
}
((unsigned int *)(data+len-4))[0] = len-15;
switch (S.lastType()){
case DTSC::VIDEO: data[0] = 0x09; break;
case DTSC::AUDIO: data[0] = 0x08; break;
case DTSC::META: data[0] = 0x12; break;
default: break;
}
data[1] = ((len-15) >> 16) & 0xFF;
data[2] = ((len-15) >> 8) & 0xFF;
data[3] = (len-15) & 0xFF;
tagTime(0);
return true;
}
/// FLV metadata loader function from DTSC.
/// Takes the DTSC metadata and makes it into FLV.
/// Assumes metadata is available - so check before calling!
bool FLV::Tag::DTSCMetaInit(DTSC::Stream & S){
AMF::Object amfdata("root", AMF::AMF0_DDV_CONTAINER);
amfdata.addContent(AMF::Object("", "onMetaData"));
amfdata.addContent(AMF::Object("", AMF::AMF0_ECMA_ARRAY));
if (S.metadata.getContentP("video")){
amfdata.getContentP(1)->addContent(AMF::Object("hasVideo", 1, AMF::AMF0_BOOL));
if (S.metadata.getContentP("video")->getContentP("codec")->StrValue() == "H264"){
amfdata.getContentP(1)->addContent(AMF::Object("videocodecid", 7, AMF::AMF0_NUMBER));
}
if (S.metadata.getContentP("video")->getContentP("codec")->StrValue() == "VP6"){
amfdata.getContentP(1)->addContent(AMF::Object("videocodecid", 4, AMF::AMF0_NUMBER));
}
if (S.metadata.getContentP("video")->getContentP("codec")->StrValue() == "H263"){
amfdata.getContentP(1)->addContent(AMF::Object("videocodecid", 2, AMF::AMF0_NUMBER));
}
if (S.metadata.getContentP("video")->getContentP("width")){
amfdata.getContentP(1)->addContent(AMF::Object("width", S.metadata.getContentP("video")->getContentP("width")->NumValue(), AMF::AMF0_NUMBER));
}
if (S.metadata.getContentP("video")->getContentP("height")){
amfdata.getContentP(1)->addContent(AMF::Object("height", S.metadata.getContentP("video")->getContentP("height")->NumValue(), AMF::AMF0_NUMBER));
}
if (S.metadata.getContentP("video")->getContentP("fpks")){
amfdata.getContentP(1)->addContent(AMF::Object("framerate", (double)S.metadata.getContentP("video")->getContentP("fpks")->NumValue() / 1000.0, AMF::AMF0_NUMBER));
}
if (S.metadata.getContentP("video")->getContentP("bps")){
amfdata.getContentP(1)->addContent(AMF::Object("videodatarate", ((double)S.metadata.getContentP("video")->getContentP("bps")->NumValue() * 8.0) / 1024.0, AMF::AMF0_NUMBER));
}
}
if (S.metadata.getContentP("audio")){
amfdata.getContentP(1)->addContent(AMF::Object("hasAudio", 1, AMF::AMF0_BOOL));
amfdata.getContentP(1)->addContent(AMF::Object("audiodelay", 0, AMF::AMF0_NUMBER));
if (S.metadata.getContentP("audio")->getContentP("codec")->StrValue() == "AAC"){
amfdata.getContentP(1)->addContent(AMF::Object("audiocodecid", 10, AMF::AMF0_NUMBER));
}
if (S.metadata.getContentP("audio")->getContentP("codec")->StrValue() == "MP3"){
amfdata.getContentP(1)->addContent(AMF::Object("audiocodecid", 2, AMF::AMF0_NUMBER));
}
if (S.metadata.getContentP("audio")->getContentP("channels")){
if (S.metadata.getContentP("audio")->getContentP("channels")->NumValue() > 1){
amfdata.getContentP(1)->addContent(AMF::Object("stereo", 1, AMF::AMF0_BOOL));
}else{
amfdata.getContentP(1)->addContent(AMF::Object("stereo", 0, AMF::AMF0_BOOL));
}
}
if (S.metadata.getContentP("audio")->getContentP("rate")){
amfdata.getContentP(1)->addContent(AMF::Object("audiosamplerate", S.metadata.getContentP("audio")->getContentP("rate")->NumValue(), AMF::AMF0_NUMBER));
}
if (S.metadata.getContentP("audio")->getContentP("size")){
amfdata.getContentP(1)->addContent(AMF::Object("audiosamplesize", S.metadata.getContentP("audio")->getContentP("size")->NumValue(), AMF::AMF0_NUMBER));
}
if (S.metadata.getContentP("audio")->getContentP("bps")){
amfdata.getContentP(1)->addContent(AMF::Object("audiodatarate", ((double)S.metadata.getContentP("audio")->getContentP("bps")->NumValue() * 8.0) / 1024.0, AMF::AMF0_NUMBER));
}
}
std::string tmp = amfdata.Pack();
len = tmp.length() + 15;
if (len > 0){
if (!data){
data = (char*)malloc(len);
buf = len;
}else{
if (buf < len){
data = (char*)realloc(data, len);
buf = len;
}
}
memcpy(data+11, tmp.c_str(), len-15);
}
((unsigned int *)(data+len-4))[0] = len-15;
data[0] = 0x12;
data[1] = ((len-15) >> 16) & 0xFF;
data[2] = ((len-15) >> 8) & 0xFF;
data[3] = (len-15) & 0xFF;
tagTime(0);
return true;
}
/// FLV loader function from chunk. /// FLV loader function from chunk.
/// Copies the contents and wraps it in a FLV header. /// Copies the contents and wraps it in a FLV header.
bool FLV::Tag::ChunkLoader(const RTMPStream::Chunk& O){ bool FLV::Tag::ChunkLoader(const RTMPStream::Chunk& O){

View file

@ -3,6 +3,7 @@
#pragma once #pragma once
#include "socket.h" #include "socket.h"
#include "dtsc.h"
#include <string> #include <string>
//forward declaration of RTMPStream::Chunk to avoid circular dependencies. //forward declaration of RTMPStream::Chunk to avoid circular dependencies.
@ -38,6 +39,10 @@ namespace FLV {
Tag(const RTMPStream::Chunk& O); ///<Copy constructor from a RTMP chunk. Tag(const RTMPStream::Chunk& O); ///<Copy constructor from a RTMP chunk.
//loader functions //loader functions
bool ChunkLoader(const RTMPStream::Chunk& O); bool ChunkLoader(const RTMPStream::Chunk& O);
bool DTSCLoader(DTSC::Stream & S);
bool DTSCVideoInit(DTSC::Stream & S);
bool DTSCAudioInit(DTSC::Stream & S);
bool DTSCMetaInit(DTSC::Stream & S);
bool MemLoader(char * D, unsigned int S, unsigned int & P); bool MemLoader(char * D, unsigned int S, unsigned int & P);
bool SockLoader(int sock); bool SockLoader(int sock);
bool SockLoader(Socket::Connection sock); bool SockLoader(Socket::Connection sock);

View file

@ -45,7 +45,7 @@ namespace Socket{
int iwrite(const void * buffer, int len); ///< Incremental write call. int iwrite(const void * buffer, int len); ///< Incremental write call.
int iread(void * buffer, int len); ///< Incremental read call. int iread(void * buffer, int len); ///< Incremental read call.
bool read(std::string & buffer); ///< Read call that is compatible with std::string. bool read(std::string & buffer); ///< Read call that is compatible with std::string.
bool swrite(std::string & buffer); ///< Read call that is compatible with std::string. bool swrite(std::string & buffer); ///< Write call that is compatible with std::string.
bool iread(std::string & buffer); ///< Incremental write call that is compatible with std::string. bool iread(std::string & buffer); ///< Incremental write call that is compatible with std::string.
bool iwrite(std::string & buffer); ///< Write call that is compatible with std::string. bool iwrite(std::string & buffer); ///< Write call that is compatible with std::string.
void spool(); ///< Updates the downbuffer and upbuffer internal variables. void spool(); ///< Updates the downbuffer and upbuffer internal variables.