Merge branch 'development' into LTS_development

This commit is contained in:
Thulinma 2016-10-11 15:21:58 +02:00
commit 7f6b919e4f
11 changed files with 211 additions and 228 deletions

View file

@ -295,6 +295,7 @@ namespace DTSC {
std::deque<Part> parts;
std::deque<Ivec> ivecs; /*LTS*/
Key & getKey(unsigned int keyNum);
Fragment & getFrag(unsigned int fragNum);
unsigned int timeToKeynum(unsigned int timestamp);
unsigned int timeToFragnum(unsigned int timestamp);
void reset();
@ -321,8 +322,11 @@ namespace DTSC {
int width;
int height;
int fpks;
void removeFirstKey();
uint32_t secsSinceFirstFragmentInsert();
private:
std::string cachedIdent;
std::deque<uint32_t> fragInsertTime;
};
///\brief Class for storage of meta data

View file

@ -1233,6 +1233,8 @@ namespace DTSC {
newFrag.setDuration(0);
newFrag.setSize(0);
fragments.push_back(newFrag);
//We set the insert time lastms-firstms in the future, to prevent unstable playback
fragInsertTime.push_back(Util::bootSecs() + ((lastms - firstms)/1000));
} else {
Fragment & lastFrag = fragments[fragments.size() - 1];
lastFrag.setLength(lastFrag.getLength() + 1);
@ -1242,6 +1244,41 @@ namespace DTSC {
(*keySizes.rbegin()) += packSendSize;
fragments.rbegin()->setSize(fragments.rbegin()->getSize() + packDataSize);
}
/// Removes the first buffered key, including any fragments it was part of
void Track::removeFirstKey(){
HIGH_MSG("Erasing key %d:%lu", trackID, keys[0].getNumber());
//remove all parts of this key
for (int i = 0; i < keys[0].getParts(); i++) {
parts.pop_front();
}
//remove the key itself
keys.pop_front();
keySizes.pop_front();
//update firstms
firstms = keys[0].getTime();
//delete any fragments no longer fully buffered
while (fragments[0].getNumber() < keys[0].getNumber()) {
fragments.pop_front();
fragInsertTime.pop_front();
//and update the missed fragment counter
++missedFrags;
}
}
/// Returns the amount of whole seconds since the first fragment was inserted into the buffer.
/// This assumes playback from the start of the buffer at time of insert, meaning that
/// the time is offset by that difference. E.g.: if a buffer is 50s long, the newest fragment
/// will have a value of 0 until 50s have passed, after which it will increase at a rate of
/// 1 per second.
uint32_t Track::secsSinceFirstFragmentInsert(){
uint32_t bs = Util::bootSecs();
if (bs > fragInsertTime.front()){
return bs - fragInsertTime.front();
}else{
return 0;
}
}
void Track::finalize(){
keys.rbegin()->setLength(lastms - keys.rbegin()->getTime() + parts.rbegin()->getDuration());
@ -1270,6 +1307,7 @@ namespace DTSC {
return keys[keyNum - keys[0].getNumber()];
}
/// Returns the number of the key containing timestamp, or last key if nowhere.
unsigned int Track::timeToKeynum(unsigned int timestamp){
unsigned int result = 0;
for (std::deque<Key>::iterator it = keys.begin(); it != keys.end(); it++){
@ -1281,13 +1319,12 @@ namespace DTSC {
return result;
}
/// Gets indice of the fragment containing timestamp, or last fragment if nowhere.
unsigned int Track::timeToFragnum(unsigned int timestamp){
unsigned long long int totalTime = firstms;
for (unsigned int i = 0; i<fragments.size(); i++){
if (timestamp <= totalTime){
if (timestamp <= getKey(fragments[i].getNumber()).getTime() + fragments[i].getDuration()){
return i;
}
totalTime += fragments[i].getDuration();
}
return fragments.size()-1;
}
@ -1295,6 +1332,7 @@ namespace DTSC {
///\brief Resets a track, clears all meta values
void Track::reset() {
fragments.clear();
fragInsertTime.clear();
parts.clear();
keySizes.clear();
keys.clear();

View file

@ -518,7 +518,7 @@ void FLV::Tag::setLen() {
data[ --i] = (len4) & 0xFF;
}
/// FLV Video init data loader function from JSON.
/// FLV Video init data loader function from metadata.
bool FLV::Tag::DTSCVideoInit(DTSC::Track & video) {
//Unknown? Assume H264.
len = 0;
@ -549,7 +549,7 @@ bool FLV::Tag::DTSCVideoInit(DTSC::Track & video) {
return true;
}
/// FLV Audio init data loader function from JSON.
/// FLV Audio init data loader function from metadata.
bool FLV::Tag::DTSCAudioInit(DTSC::Track & audio) {
len = 0;
//Unknown? Assume AAC.
@ -709,6 +709,7 @@ bool FLV::Tag::ChunkLoader(const RTMPStream::Chunk & O) {
data[2] = (O.len >> 8) & 0xFF;
data[1] = (O.len >> 16) & 0xFF;
tagTime(O.timestamp);
isKeyframe = ((data[0] == 0x09) && (((data[11] & 0xf0) >> 4) == 1));
return true;
}
@ -794,11 +795,7 @@ bool FLV::Tag::MemLoader(char * D, unsigned int S, unsigned int & P) {
//read tag body
if (MemReadUntil(data, len, sofar, D, S, P)) {
//calculate keyframeness, next time read header again, return true
if ((data[0] == 0x09) && (((data[11] & 0xf0) >> 4) == 1)) {
isKeyframe = true;
} else {
isKeyframe = false;
}
isKeyframe = ((data[0] == 0x09) && (((data[11] & 0xf0) >> 4) == 1));
done = true;
sofar = 0;
return true;
@ -892,11 +889,7 @@ bool FLV::Tag::FileLoader(FILE * f) {
//read tag body
if (FileReadUntil(data, len, sofar, f)) {
//calculate keyframeness, next time read header again, return true
if ((data[0] == 0x09) && (((data[11] & 0xf0) >> 4) == 1)) {
isKeyframe = true;
} else {
isKeyframe = false;
}
isKeyframe = ((data[0] == 0x09) && (((data[11] & 0xf0) >> 4) == 1));
done = true;
sofar = 0;
fcntl(fileno(f), F_SETFL, preflags);
@ -944,9 +937,7 @@ unsigned int FLV::Tag::getDataLen(){
return len - 16;
}
JSON::Value FLV::Tag::toJSON(DTSC::Meta & metadata, AMF::Object & amf_storage, unsigned int reTrack) {
JSON::Value pack_out; // Storage for outgoing metadata.
void FLV::Tag::toMeta(DTSC::Meta & metadata, AMF::Object & amf_storage, unsigned int reTrack){
if (!reTrack){
switch (data[0]){
case 0x09: reTrack = 1; break;//video
@ -954,7 +945,6 @@ JSON::Value FLV::Tag::toJSON(DTSC::Meta & metadata, AMF::Object & amf_storage, u
case 0x12: reTrack = 3; break;//meta
}
}
pack_out["trackid"] = reTrack;
if (data[0] == 0x12) {
AMF::Object meta_in = AMF::parse((unsigned char *)data + 11, len - 15);
@ -968,78 +958,56 @@ JSON::Value FLV::Tag::toJSON(DTSC::Meta & metadata, AMF::Object & amf_storage, u
}
if (tmp) {
amf_storage = *tmp;
bool empty = true;
for (int i = 0; i < tmp->hasContent(); ++i) {
if (tmp->getContentP(i)->Indice() == "videocodecid" || tmp->getContentP(i)->Indice() == "audiocodecid" || tmp->getContentP(i)->Indice() == "width" || tmp->getContentP(i)->Indice() == "height" || tmp->getContentP(i)->Indice() == "videodatarate" || tmp->getContentP(i)->Indice() == "videoframerate" || tmp->getContentP(i)->Indice() == "audiodatarate" || tmp->getContentP(i)->Indice() == "audiosamplerate" || tmp->getContentP(i)->Indice() == "audiosamplesize" || tmp->getContentP(i)->Indice() == "audiochannels") {
continue;
}
if (tmp->getContentP(i)->NumValue()) {
pack_out["data"][tmp->getContentP(i)->Indice()] = (long long)tmp->getContentP(i)->NumValue();
empty = false;
} else {
if (tmp->getContentP(i)->StrValue() != "") {
pack_out["data"][tmp->getContentP(i)->Indice()] = tmp->getContentP(i)->StrValue();
empty = false;
}
}
}
if (!empty) {
pack_out["datatype"] = "meta";
pack_out["time"] = tagTime();
}else{
pack_out.null();
}
}
return pack_out; //empty
return;
}
if (data[0] == 0x08) {
if (data[0] == 0x08 && (metadata.tracks[reTrack].codec == "" || metadata.tracks[reTrack].codec != getAudioCodec() || (needsInitData() && isInitData()))) {
char audiodata = data[11];
metadata.tracks[reTrack].trackID = reTrack;
metadata.tracks[reTrack].type = "audio";
if (metadata.tracks[reTrack].codec == "" || metadata.tracks[reTrack].codec != getAudioCodec()) {
metadata.tracks[reTrack].codec = getAudioCodec();
switch (audiodata & 0x0C) {
case 0x0:
metadata.tracks[reTrack].rate = 5512;
break;
case 0x4:
metadata.tracks[reTrack].rate = 11025;
break;
case 0x8:
metadata.tracks[reTrack].rate = 22050;
break;
case 0xC:
metadata.tracks[reTrack].rate = 44100;
break;
}
if (amf_storage.getContentP("audiosamplerate")) {
metadata.tracks[reTrack].rate = (long long int)amf_storage.getContentP("audiosamplerate")->NumValue();
}
switch (audiodata & 0x02) {
case 0x0:
metadata.tracks[reTrack].size = 8;
break;
case 0x2:
metadata.tracks[reTrack].size = 16;
break;
}
if (amf_storage.getContentP("audiosamplesize")) {
metadata.tracks[reTrack].size = (long long int)amf_storage.getContentP("audiosamplesize")->NumValue();
}
switch (audiodata & 0x01) {
case 0x0:
metadata.tracks[reTrack].channels = 1;
break;
case 0x1:
metadata.tracks[reTrack].channels = 2;
break;
}
if (amf_storage.getContentP("stereo")) {
if (amf_storage.getContentP("stereo")->NumValue() == 1) {
metadata.tracks[reTrack].channels = 2;
} else {
metadata.tracks[reTrack].channels = 1;
}
metadata.tracks[reTrack].codec = getAudioCodec();
switch (audiodata & 0x0C) {
case 0x0:
metadata.tracks[reTrack].rate = 5512;
break;
case 0x4:
metadata.tracks[reTrack].rate = 11025;
break;
case 0x8:
metadata.tracks[reTrack].rate = 22050;
break;
case 0xC:
metadata.tracks[reTrack].rate = 44100;
break;
}
if (amf_storage.getContentP("audiosamplerate")) {
metadata.tracks[reTrack].rate = (long long int)amf_storage.getContentP("audiosamplerate")->NumValue();
}
switch (audiodata & 0x02) {
case 0x0:
metadata.tracks[reTrack].size = 8;
break;
case 0x2:
metadata.tracks[reTrack].size = 16;
break;
}
if (amf_storage.getContentP("audiosamplesize")) {
metadata.tracks[reTrack].size = (long long int)amf_storage.getContentP("audiosamplesize")->NumValue();
}
switch (audiodata & 0x01) {
case 0x0:
metadata.tracks[reTrack].channels = 1;
break;
case 0x1:
metadata.tracks[reTrack].channels = 2;
break;
}
if (amf_storage.getContentP("stereo")) {
if (amf_storage.getContentP("stereo")->NumValue() == 1) {
metadata.tracks[reTrack].channels = 2;
} else {
metadata.tracks[reTrack].channels = 1;
}
}
if (needsInitData() && isInitData()) {
@ -1048,54 +1016,36 @@ JSON::Value FLV::Tag::toJSON(DTSC::Meta & metadata, AMF::Object & amf_storage, u
} else {
metadata.tracks[reTrack].init = std::string((char *)data + 12, (size_t)len - 16);
}
pack_out.null();
return pack_out; //skip rest of parsing, get next tag.
}
pack_out["time"] = tagTime();
if ((audiodata & 0xF0) == 0xA0) {
if (len < 18) {
return JSON::Value();
}
pack_out["data"] = std::string((char *)data + 13, (size_t)len - 17);
} else {
if (len < 17) {
return JSON::Value();
}
pack_out["data"] = std::string((char *)data + 12, (size_t)len - 16);
}
return pack_out;
}
if (data[0] == 0x09) {
if (data[0] == 0x09 && ((needsInitData() && isInitData()) || !metadata.tracks[reTrack].codec.size())){
char videodata = data[11];
if (metadata.tracks[reTrack].codec == "") {
metadata.tracks[reTrack].codec = getVideoCodec();
}
metadata.tracks[reTrack].codec = getVideoCodec();
metadata.tracks[reTrack].type = "video";
metadata.tracks[reTrack].trackID = reTrack;
if (!metadata.tracks[reTrack].width || !metadata.tracks[reTrack].height){
if (amf_storage.getContentP("width")) {
metadata.tracks[reTrack].width = (long long int)amf_storage.getContentP("width")->NumValue();
}
if (amf_storage.getContentP("height")) {
metadata.tracks[reTrack].height = (long long int)amf_storage.getContentP("height")->NumValue();
}
if (amf_storage.getContentP("width")) {
metadata.tracks[reTrack].width = (long long int)amf_storage.getContentP("width")->NumValue();
}
if (amf_storage.getContentP("height")) {
metadata.tracks[reTrack].height = (long long int)amf_storage.getContentP("height")->NumValue();
}
if (!metadata.tracks[reTrack].fpks && amf_storage.getContentP("videoframerate")) {
if (amf_storage.getContentP("videoframerate")->NumValue()){
metadata.tracks[reTrack].fpks = (long long int)(amf_storage.getContentP("videoframerate")->NumValue() * 1000.0);
}else{
metadata.tracks[reTrack].fpks = JSON::Value(amf_storage.getContentP("videoframerate")->StrValue()).asInt() * 1000.0;
metadata.tracks[reTrack].fpks = atoi(amf_storage.getContentP("videoframerate")->StrValue().c_str()) * 1000.0;
}
}
if (needsInitData() && isInitData()) {
if ((videodata & 0x0F) == 7) {
if (len < 21) {
return JSON::Value();
return;
}
metadata.tracks[reTrack].init = std::string((char *)data + 16, (size_t)len - 20);
} else {
if (len < 17) {
return JSON::Value();
return;
}
metadata.tracks[reTrack].init = std::string((char *)data + 12, (size_t)len - 16);
}
@ -1108,48 +1058,9 @@ JSON::Value FLV::Tag::toJSON(DTSC::Meta & metadata, AMF::Object & amf_storage, u
metadata.tracks[reTrack].height = spsChar.height;
metadata.tracks[reTrack].fpks = spsChar.fps * 1000;
}
pack_out.null();
return pack_out; //skip rest of parsing, get next tag.
}
switch (videodata & 0xF0) {
case 0x10:
case 0x40:
pack_out["keyframe"] = 1;
break;
case 0x50:
return JSON::Value();
break; //the video info byte we just throw away - useless to us...
}
pack_out["time"] = tagTime();
if (!getDataLen()){
//empty packet
pack_out["data"] = "";
return pack_out;
}
if ((videodata & 0x0F) == 7) {
switch (data[12]) {
case 1:
pack_out["nalu"] = 1;
break;
case 2:
pack_out["nalu_end"] = 1;
break;
}
pack_out["offset"] = offset();
if (len < 21) {
return JSON::Value();
}
pack_out["data"] = std::string((char *)data + 16, (size_t)len - 20);
} else {
if (len < 17) {
return JSON::Value();
}
pack_out["data"] = std::string((char *)data + 12, (size_t)len - 16);
}
return pack_out;
}
return pack_out; //should never get here
} //FLV::Tag::toJSON
}
/// Checks if buf is large enough to contain len.
/// Attempts to resize data buffer if not/

View file

@ -4,7 +4,6 @@
#pragma once
#include "socket.h"
#include "dtsc.h"
#include "json.h"
#include "amf.h"
#include <string>
@ -51,7 +50,7 @@ namespace FLV {
bool DTSCVideoInit(DTSC::Track & video);
bool DTSCAudioInit(DTSC::Track & audio);
bool DTSCMetaInit(DTSC::Meta & M, std::set<long unsigned int> & selTracks);
JSON::Value toJSON(DTSC::Meta & metadata, AMF::Object & amf_storage, unsigned int reTrack = 0);
void toMeta(DTSC::Meta & metadata, AMF::Object & amf_storage, unsigned int reTrack = 0);
bool MemLoader(char * D, unsigned int S, unsigned int & P);
bool FileLoader(FILE * f);
unsigned int getTrackID();
@ -66,10 +65,6 @@ namespace FLV {
//loader helper functions
bool MemReadUntil(char * buffer, unsigned int count, unsigned int & sofar, char * D, unsigned int S, unsigned int & P);
bool FileReadUntil(char * buffer, unsigned int count, unsigned int & sofar, FILE * f);
//JSON writer helpers
void Meta_Put(JSON::Value & meta, std::string cat, std::string elem, std::string val);
void Meta_Put(JSON::Value & meta, std::string cat, std::string elem, uint64_t val);
bool Meta_Has(JSON::Value & meta, std::string cat, std::string elem);
};
//Tag

View file

@ -1,4 +1,8 @@
//This line will make ftello/fseeko work with 64 bits numbers
#define _FILE_OFFSET_BITS 64
#include "util.h"
#include <stdio.h>
#include <iostream>
namespace Util {
@ -36,5 +40,19 @@ namespace Util {
}
return result.size() == positions.size();
}
/// 64-bits version of ftell
uint64_t ftell(FILE * stream){
/// \TODO Windows implementation (e.g. _ftelli64 ?)
return ftello(stream);
}
/// 64-bits version of fseek
uint64_t fseek(FILE * stream, uint64_t offset, int whence){
/// \TODO Windows implementation (e.g. _fseeki64 ?)
return fseeko(stream, offset, whence);
}
}

View file

@ -3,4 +3,6 @@
namespace Util {
bool stringScan(const std::string & src, const std::string & pattern, std::deque<std::string> & result);
uint64_t ftell(FILE * stream);
uint64_t fseek(FILE * stream, uint64_t offset, int whence);
}