Compatibility improvements as well as simplification to RTMP push input.

This commit is contained in:
Thulinma 2015-05-21 19:10:05 +02:00
parent 7e2c4a8318
commit 17aa6bbbb6
5 changed files with 90 additions and 147 deletions

View file

@ -1,7 +1,6 @@
/// \file flv_tag.cpp /// \file flv_tag.cpp
/// Holds all code for the FLV namespace. /// Holds all code for the FLV namespace.
#include "amf.h"
#include "rtmpchunks.h" #include "rtmpchunks.h"
#include "flv_tag.h" #include "flv_tag.h"
#include "timing.h" #include "timing.h"
@ -1033,9 +1032,18 @@ bool FLV::Tag::FileLoader(FILE * f) {
return false; return false;
} //FLV_GetPacket } //FLV_GetPacket
JSON::Value FLV::Tag::toJSON(DTSC::Meta & metadata) { JSON::Value FLV::Tag::toJSON(DTSC::Meta & metadata, AMF::Object & amf_storage, unsigned int reTrack) {
JSON::Value pack_out; // Storage for outgoing metadata. JSON::Value pack_out; // Storage for outgoing metadata.
if (!reTrack){
switch (data[0]){
case 0x09: reTrack = 1; break;//video
case 0x08: reTrack = 2; break;//audio
case 0x12: reTrack = 3; break;//meta
}
}
pack_out["trackid"] = reTrack;
if (data[0] == 0x12) { if (data[0] == 0x12) {
AMF::Object meta_in = AMF::parse((unsigned char *)data + 11, len - 15); AMF::Object meta_in = AMF::parse((unsigned char *)data + 11, len - 15);
AMF::Object * tmp = 0; AMF::Object * tmp = 0;
@ -1047,126 +1055,97 @@ JSON::Value FLV::Tag::toJSON(DTSC::Meta & metadata) {
} }
} }
if (tmp) { if (tmp) {
if (tmp->getContentP("width")) { amf_storage = *tmp;
metadata.tracks[1].width = (long long int)tmp->getContentP("width")->NumValue(); bool empty = true;
} else {
metadata.tracks[1].width = 0;
}
if (tmp->getContentP("height")) {
metadata.tracks[1].height = (long long int)tmp->getContentP("height")->NumValue();
} else {
metadata.tracks[1].height = 0;
}
if (tmp->getContentP("videoframerate")) {
if (tmp->getContentP("videoframerate")->NumValue()){
metadata.tracks[1].fpks = (long long int)(tmp->getContentP("videoframerate")->NumValue() * 1000.0);
}else{
metadata.tracks[1].fpks = JSON::Value(tmp->getContentP("videoframerate")->StrValue()).asInt() * 1000.0;
}
} else {
metadata.tracks[1].fpks = 0;
}
if (tmp->getContentP("videodatarate")) {
metadata.tracks[1].bps = (long long int)(tmp->getContentP("videodatarate")->NumValue() * 1024) / 8;
} else {
metadata.tracks[1].bps = 0;
}
if (tmp->getContentP("audiodatarate")) {
metadata.tracks[2].bps = (long long int)(tmp->getContentP("audiodatarate")->NumValue() * 1024) / 8;
} else {
metadata.tracks[2].bps = 0;
}
if (tmp->getContentP("audiosamplerate")) {
metadata.tracks[2].rate = (long long int)tmp->getContentP("audiosamplerate")->NumValue();
} else {
metadata.tracks[2].rate = 0;
}
if (tmp->getContentP("audiosamplesize")) {
metadata.tracks[2].size = (long long int)tmp->getContentP("audiosamplesize")->NumValue();
} else {
metadata.tracks[2].size = 0;
}
if (tmp->getContentP("stereo")) {
if (tmp->getContentP("stereo")->NumValue() == 1) {
metadata.tracks[2].channels = 2;
} else {
metadata.tracks[2].channels = 1;
}
} else {
metadata.tracks[2].channels = 1;
}
for (int i = 0; i < tmp->hasContent(); ++i) { for (int i = 0; i < tmp->hasContent(); ++i) {
if (tmp->getContentP(i)->Indice() == "videocodecid" || tmp->getContentP(i)->Indice() == "audiocodecid" || tmp->getContentP(i)->Indice() == "width" || tmp->getContentP(i)->Indice() == "height" || tmp->getContentP(i)->Indice() == "videodatarate" || tmp->getContentP(i)->Indice() == "videoframerate" || tmp->getContentP(i)->Indice() == "audiodatarate" || tmp->getContentP(i)->Indice() == "audiosamplerate" || tmp->getContentP(i)->Indice() == "audiosamplesize" || tmp->getContentP(i)->Indice() == "audiochannels") { if (tmp->getContentP(i)->Indice() == "videocodecid" || tmp->getContentP(i)->Indice() == "audiocodecid" || tmp->getContentP(i)->Indice() == "width" || tmp->getContentP(i)->Indice() == "height" || tmp->getContentP(i)->Indice() == "videodatarate" || tmp->getContentP(i)->Indice() == "videoframerate" || tmp->getContentP(i)->Indice() == "audiodatarate" || tmp->getContentP(i)->Indice() == "audiosamplerate" || tmp->getContentP(i)->Indice() == "audiosamplesize" || tmp->getContentP(i)->Indice() == "audiochannels") {
continue; continue;
} }
if (tmp->getContentP(i)->NumValue()) { if (tmp->getContentP(i)->NumValue()) {
pack_out["data"][tmp->getContentP(i)->Indice()] = (long long)tmp->getContentP(i)->NumValue(); pack_out["data"][tmp->getContentP(i)->Indice()] = (long long)tmp->getContentP(i)->NumValue();
empty = false;
} else { } else {
if (tmp->getContentP(i)->StrValue() != "") { if (tmp->getContentP(i)->StrValue() != "") {
pack_out["data"][tmp->getContentP(i)->Indice()] = tmp->getContentP(i)->StrValue(); pack_out["data"][tmp->getContentP(i)->Indice()] = tmp->getContentP(i)->StrValue();
empty = false;
} }
} }
} }
if (pack_out) { if (!empty) {
pack_out["datatype"] = "meta"; pack_out["datatype"] = "meta";
pack_out["time"] = tagTime(); pack_out["time"] = tagTime();
}else{
pack_out.null();
} }
} }
return pack_out; //empty return pack_out; //empty
} }
if (data[0] == 0x08) { if (data[0] == 0x08) {
char audiodata = data[11]; char audiodata = data[11];
metadata.tracks[2].trackID = 2; metadata.tracks[reTrack].trackID = reTrack;
metadata.tracks[2].type = "audio"; metadata.tracks[reTrack].type = "audio";
if (metadata.tracks[2].codec == "") { if (metadata.tracks[reTrack].codec == "") {
metadata.tracks[2].codec = getAudioCodec(); metadata.tracks[reTrack].codec = getAudioCodec();
} }
if (!metadata.tracks[2].rate) { if (!metadata.tracks[reTrack].rate) {
switch (audiodata & 0x0C) { switch (audiodata & 0x0C) {
case 0x0: case 0x0:
metadata.tracks[2].rate = 5512; metadata.tracks[reTrack].rate = 5512;
break; break;
case 0x4: case 0x4:
metadata.tracks[2].rate = 11025; metadata.tracks[reTrack].rate = 11025;
break; break;
case 0x8: case 0x8:
metadata.tracks[2].rate = 22050; metadata.tracks[reTrack].rate = 22050;
break; break;
case 0xC: case 0xC:
metadata.tracks[2].rate = 44100; metadata.tracks[reTrack].rate = 44100;
break; break;
} }
if (amf_storage.getContentP("audiosamplerate")) {
metadata.tracks[reTrack].rate = (long long int)amf_storage.getContentP("audiosamplerate")->NumValue();
}
} }
if (!metadata.tracks[2].size) { if (!metadata.tracks[reTrack].size) {
switch (audiodata & 0x02) { switch (audiodata & 0x02) {
case 0x0: case 0x0:
metadata.tracks[2].size = 8; metadata.tracks[reTrack].size = 8;
break; break;
case 0x2: case 0x2:
metadata.tracks[2].size = 16; metadata.tracks[reTrack].size = 16;
break; break;
} }
if (amf_storage.getContentP("audiosamplesize")) {
metadata.tracks[reTrack].size = (long long int)amf_storage.getContentP("audiosamplesize")->NumValue();
}
} }
if (!metadata.tracks[2].channels) { if (!metadata.tracks[reTrack].channels) {
switch (audiodata & 0x01) { switch (audiodata & 0x01) {
case 0x0: case 0x0:
metadata.tracks[2].channels = 1; metadata.tracks[reTrack].channels = 1;
break; break;
case 0x1: case 0x1:
metadata.tracks[2].channels = 2; metadata.tracks[reTrack].channels = 2;
break; break;
} }
if (amf_storage.getContentP("stereo")) {
if (amf_storage.getContentP("stereo")->NumValue() == 1) {
metadata.tracks[reTrack].channels = 2;
} else {
metadata.tracks[reTrack].channels = 1;
}
}
} }
if (needsInitData() && isInitData()) { if (needsInitData() && isInitData()) {
if ((audiodata & 0xF0) == 0xA0) { if ((audiodata & 0xF0) == 0xA0) {
metadata.tracks[2].init = std::string((char *)data + 13, (size_t)len - 17); metadata.tracks[reTrack].init = std::string((char *)data + 13, (size_t)len - 17);
} else { } else {
metadata.tracks[2].init = std::string((char *)data + 12, (size_t)len - 16); metadata.tracks[reTrack].init = std::string((char *)data + 12, (size_t)len - 16);
} }
pack_out.null();
return pack_out; //skip rest of parsing, get next tag. return pack_out; //skip rest of parsing, get next tag.
} }
pack_out["time"] = tagTime(); pack_out["time"] = tagTime();
pack_out["trackid"] = 2;
if ((audiodata & 0xF0) == 0xA0) { if ((audiodata & 0xF0) == 0xA0) {
if (len < 18) { if (len < 18) {
return JSON::Value(); return JSON::Value();
@ -1182,36 +1161,43 @@ JSON::Value FLV::Tag::toJSON(DTSC::Meta & metadata) {
} }
if (data[0] == 0x09) { if (data[0] == 0x09) {
char videodata = data[11]; char videodata = data[11];
if (metadata.tracks[1].codec == "") { if (metadata.tracks[reTrack].codec == "") {
metadata.tracks[1].codec = getVideoCodec(); metadata.tracks[reTrack].codec = getVideoCodec();
}
metadata.tracks[reTrack].type = "video";
metadata.tracks[reTrack].trackID = reTrack;
if (!metadata.tracks[reTrack].width || !metadata.tracks[reTrack].height){
if (amf_storage.getContentP("width")) {
metadata.tracks[reTrack].width = (long long int)amf_storage.getContentP("width")->NumValue();
}
if (amf_storage.getContentP("height")) {
metadata.tracks[reTrack].height = (long long int)amf_storage.getContentP("height")->NumValue();
}
}
if (!metadata.tracks[reTrack].fpks && amf_storage.getContentP("videoframerate")) {
if (amf_storage.getContentP("videoframerate")->NumValue()){
metadata.tracks[reTrack].fpks = (long long int)(amf_storage.getContentP("videoframerate")->NumValue() * 1000.0);
}else{
metadata.tracks[reTrack].fpks = JSON::Value(amf_storage.getContentP("videoframerate")->StrValue()).asInt() * 1000.0;
}
} }
metadata.tracks[1].type = "video";
metadata.tracks[1].trackID = 1;
if (needsInitData() && isInitData()) { if (needsInitData() && isInitData()) {
if ((videodata & 0x0F) == 7) { if ((videodata & 0x0F) == 7) {
if (len < 21) { if (len < 21) {
return JSON::Value(); return JSON::Value();
} }
metadata.tracks[1].init = std::string((char *)data + 16, (size_t)len - 20); metadata.tracks[reTrack].init = std::string((char *)data + 16, (size_t)len - 20);
} else { } else {
if (len < 17) { if (len < 17) {
return JSON::Value(); return JSON::Value();
} }
metadata.tracks[1].init = std::string((char *)data + 12, (size_t)len - 16); metadata.tracks[reTrack].init = std::string((char *)data + 12, (size_t)len - 16);
} }
pack_out.null();
return pack_out; //skip rest of parsing, get next tag. return pack_out; //skip rest of parsing, get next tag.
} }
pack_out["trackid"] = 1;
switch (videodata & 0xF0) { switch (videodata & 0xF0) {
case 0x10: case 0x10:
pack_out["keyframe"] = 1;
break;
case 0x20:
pack_out["interframe"] = 1;
break;
case 0x30:
pack_out["disposableframe"] = 1;
break;
case 0x40: case 0x40:
pack_out["keyframe"] = 1; pack_out["keyframe"] = 1;
break; break;

View file

@ -5,6 +5,7 @@
#include "socket.h" #include "socket.h"
#include "dtsc.h" #include "dtsc.h"
#include "json.h" #include "json.h"
#include "amf.h"
#include <string> #include <string>
@ -52,7 +53,7 @@ namespace FLV {
bool DTSCAudioInit(DTSC::Track & audio); bool DTSCAudioInit(DTSC::Track & audio);
bool DTSCMetaInit(DTSC::Meta & M, std::set<long unsigned int> & selTracks); bool DTSCMetaInit(DTSC::Meta & M, std::set<long unsigned int> & selTracks);
bool DTSCMetaInit(DTSC::Stream & S, DTSC::Track & videoRef, DTSC::Track & audioRef); bool DTSCMetaInit(DTSC::Stream & S, DTSC::Track & videoRef, DTSC::Track & audioRef);
JSON::Value toJSON(DTSC::Meta & metadata); JSON::Value toJSON(DTSC::Meta & metadata, AMF::Object & amf_storage, unsigned int reTrack = 0);
bool MemLoader(char * D, unsigned int S, unsigned int & P); bool MemLoader(char * D, unsigned int S, unsigned int & P);
bool FileLoader(FILE * f); bool FileLoader(FILE * f);
protected: protected:

View file

@ -67,11 +67,12 @@ namespace Mist {
//Create header file from FLV data //Create header file from FLV data
fseek(inFile, 13, SEEK_SET); fseek(inFile, 13, SEEK_SET);
FLV::Tag tmpTag; FLV::Tag tmpTag;
AMF::Object amf_storage;
long long int lastBytePos = 13; long long int lastBytePos = 13;
while (!feof(inFile) && !FLV::Parse_Error){ while (!feof(inFile) && !FLV::Parse_Error){
if (tmpTag.FileLoader(inFile)){ if (tmpTag.FileLoader(inFile)){
lastPack.null(); lastPack.null();
lastPack = tmpTag.toJSON(myMeta); lastPack = tmpTag.toJSON(myMeta, amf_storage);
lastPack["bpos"] = lastBytePos; lastPack["bpos"] = lastBytePos;
myMeta.update(lastPack); myMeta.update(lastPack);
lastBytePos = ftell(inFile); lastBytePos = ftell(inFile);
@ -89,12 +90,13 @@ namespace Mist {
void inputFLV::getNext(bool smart) { void inputFLV::getNext(bool smart) {
static JSON::Value thisPack; static JSON::Value thisPack;
static AMF::Object amf_storage;
thisPack.null(); thisPack.null();
long long int lastBytePos = ftell(inFile); long long int lastBytePos = ftell(inFile);
FLV::Tag tmpTag; FLV::Tag tmpTag;
while (!feof(inFile) && !FLV::Parse_Error){ while (!feof(inFile) && !FLV::Parse_Error){
if (tmpTag.FileLoader(inFile)){ if (tmpTag.FileLoader(inFile)){
thisPack = tmpTag.toJSON(myMeta); thisPack = tmpTag.toJSON(myMeta, amf_storage);
thisPack["bpos"] = lastBytePos; thisPack["bpos"] = lastBytePos;
if ( !selectedTracks.count(thisPack["trackid"].asInt())){ if ( !selectedTracks.count(thisPack["trackid"].asInt())){
getNext(); getNext();

View file

@ -792,60 +792,28 @@ namespace Mist {
case 8: //audio data case 8: //audio data
case 9: //video data case 9: //video data
case 18: {//meta data case 18: {//meta data
pushData & p = pushes[next.cs_id]; static std::map<unsigned int, AMF::Object> pushMeta;
if (!isInitialized) { if (!isInitialized) {
DEBUG_MSG(DLVL_MEDIUM, "Received useless media data\n"); DEBUG_MSG(DLVL_MEDIUM, "Received useless media data\n");
myConn.close(); myConn.close();
break; break;
} }
F.ChunkLoader(next); F.ChunkLoader(next);
JSON::Value pack_out = F.toJSON(p.meta); AMF::Object * amf_storage = 0;
if (F.data[0] == 0x12 || pushMeta.count(next.cs_id) || !pushMeta.size()){
amf_storage = &(pushMeta[next.cs_id]);
}else{
amf_storage = &(pushMeta.begin()->second);
}
JSON::Value pack_out = F.toJSON(myMeta, *amf_storage, next.cs_id*3 + (F.data[0] == 0x09 ? 0 : (F.data[0] == 0x08 ? 1 : 2) ));
if ( !pack_out.isNull()){ if ( !pack_out.isNull()){
//Check for backwards timestamps if (!userClient.getData()){
if (pack_out["time"].asInt() < p.meta.tracks[pack_out["trackid"].asInt()].lastms){ char userPageName[NAME_BUFFER_SIZE];
///Reset all internals snprintf(userPageName, NAME_BUFFER_SIZE, SHM_USERS, streamName.c_str());
p.sending = false; userClient = IPC::sharedClient(userPageName, 30, true);
p.counter = 0;
p.preBuf.clear();
p.meta = DTSC::Meta();
pack_out = F.toJSON(p.meta);//Reinitialize the metadata with this packet.
///Reset negotiation with buffer
userClient.finish();
userClient = IPC::sharedClient(streamName + "_users", PLAY_EX_SIZE, true);
}
pack_out["trackid"] = pack_out["trackid"].asInt() + next.cs_id * 3;
if ( !p.sending){
p.counter++;
if (p.counter > 8){
p.sending = true;
if (myMeta.tracks.count(1)){
myMeta = DTSC::Meta();
}
for (unsigned int i = 1; i < 4; ++i){
if (p.meta.tracks.count(i)){
myMeta.tracks[next.cs_id*3+i] = p.meta.tracks[i];
}
}
if (!userClient.getData()){
char userPageName[NAME_BUFFER_SIZE];
snprintf(userPageName, NAME_BUFFER_SIZE, SHM_USERS, streamName.c_str());
userClient = IPC::sharedClient(userPageName, 30, true);
}
for (std::map<unsigned int,DTSC::Track>::iterator it = myMeta.tracks.begin(); it != myMeta.tracks.end(); it++){
DEBUG_MSG(DLVL_MEDIUM, "Starting negotiation for track %d", it->first);
continueNegotiate(it->first);
}
for (std::deque<JSON::Value>::iterator it = p.preBuf.begin(); it != p.preBuf.end(); it++){
bufferLivePacket((*it));
}
p.preBuf.clear(); //clear buffer
bufferLivePacket(pack_out);
}else{
p.preBuf.push_back(pack_out);
}
}else{
bufferLivePacket(pack_out);
} }
continueNegotiate(pack_out["trackid"].asInt());
bufferLivePacket(pack_out);
} }
break; break;
} }

View file

@ -6,19 +6,6 @@
namespace Mist { namespace Mist {
class pushData {
public:
DTSC::Meta meta;
bool sending;
int counter;
std::deque<JSON::Value> preBuf;
pushData(){
sending = false;
counter = 0;
}
};
class OutRTMP : public Output { class OutRTMP : public Output {
public: public:
OutRTMP(Socket::Connection & conn); OutRTMP(Socket::Connection & conn);
@ -33,7 +20,6 @@ namespace Mist {
void parseChunk(Socket::Buffer & inputBuffer); void parseChunk(Socket::Buffer & inputBuffer);
void parseAMFCommand(AMF::Object & amfData, int messageType, int streamId); void parseAMFCommand(AMF::Object & amfData, int messageType, int streamId);
void sendCommand(AMF::Object & amfReply, int messageType, int streamId); void sendCommand(AMF::Object & amfReply, int messageType, int streamId);
std::map<unsigned int, pushData> pushes;
}; };
} }