New Meta commit

This commit is contained in:
Phencys 2021-04-21 18:10:03 +02:00 committed by Thulinma
parent fccf66fba2
commit 2b99f2f5ea
183 changed files with 13333 additions and 14421 deletions

View file

@ -12,6 +12,7 @@ int spawnForked(Socket::Connection &S){
void handleUSR1(int signum, siginfo_t *sigInfo, void *ignore){
HIGH_MSG("USR1 received - triggering rolling restart");
Util::Config::is_restarting = true;
Util::Config::logExitReason("setting is_active to false because of received USR1");
Util::Config::is_active = false;
}

BIN
src/output/noffmpeg.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 13 KiB

BIN
src/output/noh264.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 11 KiB

File diff suppressed because it is too large Load diff

View file

@ -2,6 +2,7 @@
#include "../io.h"
#include <cstdlib>
#include <map>
#include <mist/comms.h>
#include <mist/config.h>
#include <mist/dtsc.h>
#include <mist/flv_tag.h>
@ -20,9 +21,10 @@ namespace Mist{
if (time < rhs.time){return true;}
return (time == rhs.time && tid < rhs.tid);
}
uint64_t tid;
size_t tid;
uint64_t time;
uint32_t offset;
uint64_t offset;
size_t partIndex;
};
/// The output class is intended to be inherited by MistOut process classes.
@ -44,18 +46,17 @@ namespace Mist{
// non-virtual generic functions
virtual int run();
virtual void stats(bool force = false);
void seek(unsigned long long pos, bool toKey = false);
bool seek(unsigned int tid, unsigned long long pos, bool getNextKey = false);
void seek(uint64_t pos, bool toKey = false);
bool seek(size_t tid, uint64_t pos, bool getNextKey);
void seekKeyframesIn(unsigned long long pos, unsigned long long maxDelta);
void stop();
uint64_t currentTime();
uint64_t startTime();
uint64_t endTime();
uint64_t liveTime();
void setBlocking(bool blocking);
void updateMeta();
void selectTrack(const std::string &trackType, const std::string &trackVal); /*LTS*/
bool selectDefaultTracks();
bool connectToFile(std::string file);
bool connectToFile(std::string file, bool append = false);
static bool listenMode(){return true;}
uint32_t currTrackCount() const;
virtual bool isReadyForPlay();
@ -64,11 +65,13 @@ namespace Mist{
/// This function is called whenever a packet is ready for sending.
/// Inside it, thisPacket is guaranteed to contain a valid packet.
virtual void sendNext(){}// REQUIRED! Others are optional.
bool getKeyFrame();
bool prepareNext();
virtual void dropTrack(uint32_t trackId, std::string reason, bool probablyBad = true);
virtual void dropTrack(size_t trackId, const std::string &reason, bool probablyBad = true);
virtual void onRequest();
static void listener(Util::Config &conf, int (*callback)(Socket::Connection &S));
virtual void initialSeek();
uint64_t getMinKeepAway();
virtual bool liveSeek();
virtual bool onFinish(){return false;}
void reconnect();
@ -80,6 +83,8 @@ namespace Mist{
static Util::Config *config;
void playbackSleep(uint64_t millis);
void selectAllTracks();
private: // these *should* not be messed with in child classes.
/*LTS-START*/
void Log(std::string type, std::string message);
@ -90,15 +95,16 @@ namespace Mist{
std::string getCountry(std::string ip);
void doSync(bool force = false);
/*LTS-END*/
std::map<unsigned long, unsigned int> currKeyOpen;
void loadPageForKey(long unsigned int trackId, long long int keyNum);
int pageNumForKey(long unsigned int trackId, long long int keyNum);
int pageNumMax(long unsigned int trackId);
std::map<size_t, size_t> currentPage;
void loadPageForKey(size_t trackId, size_t keyNum);
uint64_t pageNumForKey(size_t trackId, size_t keyNum);
uint64_t pageNumMax(size_t trackId);
bool isRecordingToFile;
unsigned int lastStats; ///< Time of last sending of stats.
std::map<unsigned long, unsigned long> nxtKeyNum; ///< Contains the number of the next key, for page seeking purposes.
uint64_t lastStats; ///< Time of last sending of stats.
std::set<sortedPageInfo> buffer; ///< A sorted list of next-to-be-loaded packets.
bool sought; ///< If a seek has been done, this is set to true. Used for seeking on prepareNext().
bool sought; ///< If a seek has been done, this is set to true. Used for seeking on
///< prepareNext().
protected: // these are to be messed with by child classes
virtual bool inlineRestartCapable() const{
return false;
@ -106,25 +112,28 @@ namespace Mist{
bool pushing;
std::map<std::string, std::string> targetParams; /*LTS*/
std::string UA; ///< User Agent string, if known.
uint16_t uaDelay; ///< Seconds to wait before setting the UA.
uint64_t uaDelay; ///< Seconds to wait before setting the UA.
uint64_t lastRecv;
uint64_t extraKeepAway;
long long unsigned int firstTime; ///< Time of first packet after last seek. Used for real-time sending.
uint64_t firstTime; ///< Time of first packet after last seek. Used for real-time sending.
virtual std::string getConnectedHost();
virtual std::string getConnectedBinHost();
virtual std::string getStatsName();
virtual bool hasSessionIDs(){return false;}
IPC::sharedClient statsPage; ///< Shared memory used for statistics reporting.
bool isBlocking; ///< If true, indicates that myConn is blocking.
uint32_t crc; ///< Checksum, if any, for usage in the stats.
unsigned int getKeyForTime(long unsigned int trackId, long long timeStamp);
uint64_t nextKeyTime();
std::set<size_t> getSupportedTracks(const std::string &type = "") const;
inline bool keepGoing(){return config->is_active && myConn;}
Comms::Statistics statComm;
bool isBlocking; ///< If true, indicates that myConn is blocking.
uint32_t crc; ///< Checksum, if any, for usage in the stats.
// stream delaying variables
unsigned int maxSkipAhead; ///< Maximum ms that we will go ahead of the intended timestamps.
unsigned int realTime; ///< Playback speed in ms of data per second. eg: 0 is infinite, 1000 real-time, 5000 is 0.2X speed, 500 = 2X speed.
uint32_t needsLookAhead; ///< Amount of millis we need to be able to look ahead in the metadata
uint64_t maxSkipAhead; ///< Maximum ms that we will go ahead of the intended timestamps.
uint64_t realTime; ///< Playback speed in ms of wallclock time per data-second. eg: 0 is
///< infinite, 1000 real-time, 5000 is 0.2X speed, 500 = 2X speed.
uint64_t needsLookAhead; ///< Amount of millis we need to be able to look ahead in the metadata
// Read/write status variables
Socket::Connection &myConn; ///< Connection to the client.
@ -134,17 +143,17 @@ namespace Mist{
bool isInitialized; ///< If false, triggers initialization if parseData is true.
bool sentHeader; ///< If false, triggers sendHeader if parseData is true.
std::map<int, DTSCPageData> bookKeeping;
virtual bool isRecording();
virtual bool isFileTarget();
virtual bool isPushing(){return pushing;};
bool allowPush(const std::string &passwd);
void waitForStreamPushReady();
bool pushIsOngoing;
void bufferLivePacket(const DTSC::Packet &packet);
uint64_t firstPacketTime;
uint64_t lastPacketTime;
inline bool keepGoing(){return config->is_active && myConn;}
size_t thisIdx;
};
}// namespace Mist

678
src/output/output_cmaf.cpp Normal file
View file

@ -0,0 +1,678 @@
#include "output_cmaf.h"
#include <iomanip>
#include <mist/bitfields.h>
#include <mist/checksum.h>
#include <mist/cmaf.h>
#include <mist/defines.h>
#include <mist/encode.h>
#include <mist/langcodes.h> /*LTS*/
#include <mist/mp4.h>
#include <mist/mp4_dash.h>
#include <mist/mp4_encryption.h>
#include <mist/mp4_generic.h>
#include <mist/stream.h>
#include <mist/timing.h>
namespace Mist{
OutCMAF::OutCMAF(Socket::Connection &conn) : HTTPOutput(conn){
uaDelay = 0;
realTime = 0;
}
OutCMAF::~OutCMAF(){}
void OutCMAF::init(Util::Config *cfg){
HTTPOutput::init(cfg);
capa["name"] = "CMAF";
capa["friendly"] = "CMAF (fMP4) over HTTP (DASH, HLS7, HSS)";
capa["desc"] = "Segmented streaming in CMAF (fMP4) format over HTTP";
capa["url_rel"] = "/cmaf/$/";
capa["url_prefix"] = "/cmaf/$/";
capa["socket"] = "http_dash_mp4";
capa["codecs"][0u][0u].append("+H264");
capa["codecs"][0u][1u].append("+HEVC");
capa["codecs"][0u][2u].append("+AAC");
capa["codecs"][0u][3u].append("+AC3");
capa["codecs"][0u][4u].append("+MP3");
capa["codecs"][0u][5u].append("+subtitle");
capa["encryption"].append("CTR128");
capa["methods"][0u]["handler"] = "http";
capa["methods"][0u]["type"] = "dash/video/mp4";
capa["methods"][0u]["url_rel"] = "/cmaf/$/index.mpd";
capa["methods"][0u]["priority"] = 8;
capa["methods"][1u]["handler"] = "http";
capa["methods"][1u]["type"] = "html5/application/vnd.apple.mpegurl;version=7";
capa["methods"][1u]["url_rel"] = "/cmaf/$/index.m3u8";
capa["methods"][1u]["priority"] = 8;
capa["methods"][2u]["handler"] = "http";
capa["methods"][2u]["type"] = "html5/application/vnd.ms-sstr+xml";
capa["methods"][2u]["url_rel"] = "/cmaf/$/Manifest";
capa["methods"][2u]["priority"] = 8;
// MP3 does not work in browsers
capa["exceptions"]["codec:MP3"] = JSON::fromString("[[\"blacklist\",[\"Mozilla/\"]]]");
cfg->addOption("nonchunked",
JSON::fromString("{\"short\":\"C\",\"long\":\"nonchunked\",\"help\":\"Do not "
"send chunked, but buffer whole segments.\"}"));
capa["optional"]["nonchunked"]["name"] = "Send whole segments";
capa["optional"]["nonchunked"]["help"] =
"Disables chunked transfer encoding, forcing per-segment buffering. Reduces performance "
"significantly, but increases compatibility somewhat.";
capa["optional"]["nonchunked"]["option"] = "--nonchunked";
}
void OutCMAF::onHTTP(){
initialize();
if (H.url.size() < streamName.length() + 7){
H.Clean();
H.SendResponse("404", "Stream not found", myConn);
H.Clean();
return;
}
std::string method = H.method;
std::string url = H.url.substr(streamName.length() + 7); // Strip /cmaf/<streamname>/ from url
// Send a dash manifest for any URL with .mpd in the path
if (url.find(".mpd") != std::string::npos){
sendDashManifest();
return;
}
// Send a hls manifest for any URL with index.m3u8 in the path
if (url.find("index.m3u8") != std::string::npos){
size_t loc = url.find("index.m3u8");
if (loc == 0){
sendHlsManifest();
return;
}
size_t idx = atoll(url.c_str());
if (url.find("?") == std::string::npos){
sendHlsManifest(idx);
return;
}
return;
}
// Send a smooth manifest for any URL with .mpd in the path
if (url.find("Manifest") != std::string::npos){
sendSmoothManifest();
return;
}
H.Clean();
H.SetHeader("Content-Type", "video/mp4");
H.SetHeader("Cache-Control", "no-cache");
H.setCORSHeaders();
if (method == "OPTIONS" || method == "HEAD"){
H.SendResponse("200", "OK", myConn);
H.Clean();
return;
}
size_t idx = atoll(url.c_str());
if (url.find("Q(") != std::string::npos){
idx = atoll(url.c_str() + url.find("Q(") + 2) % 100;
}
if (!M.getValidTracks().count(idx)){
H.Clean();
H.SendResponse("404", "Track not found", myConn);
H.Clean();
return;
}
if (url.find(".m4s") == std::string::npos){
H.Clean();
H.SendResponse("404", "File not found", myConn);
H.Clean();
return;
}
// Select the right track
userSelect.clear();
userSelect[idx].reload(streamName, idx);
H.StartResponse(H, myConn, config->getBool("nonchunked"));
if (url.find("init.m4s") != std::string::npos){
std::string headerData = CMAF::trackHeader(M, idx);
H.Chunkify(headerData.c_str(), headerData.size(), myConn);
H.Chunkify("", 0, myConn);
H.Clean();
return;
}
uint64_t startTime = atoll(url.c_str() + url.find("/chunk_") + 7);
if (M.getVod()){startTime += M.getFirstms(idx);}
uint64_t fragmentIndex = M.getFragmentIndexForTime(idx, startTime);
targetTime = M.getTimeForFragmentIndex(idx, fragmentIndex + 1);
std::string headerData = CMAF::fragmentHeader(M, idx, fragmentIndex);
H.Chunkify(headerData.c_str(), headerData.size(), myConn);
uint64_t mdatSize = 8 + CMAF::payloadSize(M, idx, fragmentIndex);
char mdatHeader[] ={0x00, 0x00, 0x00, 0x00, 'm', 'd', 'a', 't'};
Bit::htobl(mdatHeader, mdatSize);
H.Chunkify(mdatHeader, 8, myConn);
seek(startTime);
wantRequest = false;
parseData = true;
}
void OutCMAF::sendNext(){
if (thisPacket.getTime() >= targetTime){
HIGH_MSG("Finished playback to %" PRIu64, targetTime);
wantRequest = true;
parseData = false;
H.Chunkify("", 0, myConn);
H.Clean();
return;
}
char *data;
size_t dataLen;
thisPacket.getString("data", data, dataLen);
H.Chunkify(data, dataLen, myConn);
}
/***************************************************************************************************/
/* Utility */
/***************************************************************************************************/
bool OutCMAF::tracksAligned(const std::set<size_t> &trackList){
if (trackList.size() <= 1){return true;}
size_t baseTrack = *trackList.begin();
for (std::set<size_t>::iterator it = trackList.begin(); it != trackList.end(); ++it){
if (*it == baseTrack){continue;}
if (!M.tracksAlign(*it, baseTrack)){return false;}
}
return true;
}
void OutCMAF::generateSegmentlist(size_t idx, std::stringstream &s,
void callBack(uint64_t, uint64_t, std::stringstream &, bool)){
DTSC::Fragments fragments(M.fragments(idx));
uint32_t firstFragment = fragments.getFirstValid();
uint32_t endFragment = fragments.getEndValid();
bool first = true;
// skip the first two fragments if live
if (M.getLive() && (endFragment - firstFragment) > 6){firstFragment += 2;}
if (M.getType(idx) == "audio"){
uint32_t mainTrack = M.mainTrack();
if (mainTrack == INVALID_TRACK_ID){return;}
DTSC::Fragments f(M.fragments(mainTrack));
uint64_t firstVidTime = M.getTimeForFragmentIndex(mainTrack, f.getFirstValid());
firstFragment = M.getFragmentIndexForTime(idx, firstVidTime);
}
DTSC::Keys keys(M.keys(idx));
for (; firstFragment < endFragment; ++firstFragment){
uint32_t duration = fragments.getDuration(firstFragment);
uint64_t starttime = keys.getTime(fragments.getFirstKey(firstFragment));
if (!duration){
if (M.getLive()){continue;}// skip last fragment when live
duration = M.getLastms(idx) - starttime;
}
if (M.getVod()){starttime -= M.getFirstms(idx);}
callBack(starttime, duration, s, first);
first = false;
}
/*LTS-START
// remove lines to reduce size towards listlimit setting - but keep at least 4X target
// duration available
uint64_t listlimit = config->getInteger("listlimit");
if (listlimit){
while (lines.size() > listlimit &&
(totalDuration - durations.front()) > (targetDuration * 4000)){
lines.pop_front();
totalDuration -= durations.front();
durations.pop_front();
++skippedLines;
}
}
LTS-END*/
}
std::string OutCMAF::buildNalUnit(size_t len, const char *data){
char *res = (char *)malloc(len + 4);
Bit::htobl(res, len);
memcpy(res + 4, data, len);
return std::string(res, len + 4);
}
std::string OutCMAF::h264init(const std::string &initData){
char res[7];
snprintf(res, 7, "%.2X%.2X%.2X", initData[1], initData[2], initData[3]);
return res;
}
std::string OutCMAF::h265init(const std::string &initData){
char res[17];
snprintf(res, 17, "%.2X%.2X%.2X%.2X%.2X%.2X%.2X%.2X", initData[1], initData[6], initData[7],
initData[8], initData[9], initData[10], initData[11], initData[12]);
return res;
}
/*********************************/
/* MPEG-DASH Manifest Generation */
/*********************************/
void OutCMAF::sendDashManifest(){
std::string method = H.method;
H.Clean();
H.SetHeader("Content-Type", "application/dash+xml");
H.SetHeader("Cache-Control", "no-cache");
H.setCORSHeaders();
if (method == "OPTIONS" || method == "HEAD"){
H.SendResponse("200", "OK", myConn);
H.Clean();
return;
}
H.SetBody(dashManifest());
H.SendResponse("200", "OK", myConn);
H.Clean();
}
void dashSegment(uint64_t start, uint64_t duration, std::stringstream &s, bool first){
s << "<S ";
if (first){s << "t=\"" << start << "\" ";}
s << "d=\"" << duration << "\" />" << std::endl;
}
std::string OutCMAF::dashTime(uint64_t time){
std::stringstream r;
r << "PT";
if (time >= 3600000){r << (time / 3600000) << "H";}
if (time >= 60000){r << (time / 60000) % 60 << "M";}
r << (time / 1000) % 60 << "." << std::setfill('0') << std::setw(3) << (time % 1000) << "S";
return r.str();
}
void OutCMAF::dashAdaptationSet(size_t id, size_t idx, std::stringstream &r){
std::string type = M.getType(idx);
r << "<AdaptationSet group=\"" << id << "\" mimeType=\"" << type << "/mp4\" ";
if (type == "video"){
r << "width=\"" << M.getWidth(idx) << "\" height=\"" << M.getHeight(idx) << "\" frameRate=\""
<< M.getFpks(idx) / 1000 << "\" ";
}
r << "segmentAlignment=\"true\" id=\"" << idx
<< "\" startWithSAP=\"1\" subsegmentAlignment=\"true\" subsegmentStartsWithSAP=\"1\">" << std::endl;
}
void OutCMAF::dashRepresentation(size_t id, size_t idx, std::stringstream &r){
std::string codec = M.getCodec(idx);
std::string type = M.getType(idx);
r << "<Representation id=\"" << idx << "\" bandwidth=\"" << M.getBps(idx) * 8 << "\" codecs=\"";
r << Util::codecString(M.getCodec(idx), M.getInit(idx));
r << "\" ";
if (type == "audio"){
r << "audioSamplingRate=\"" << M.getRate(idx)
<< "\"> <AudioChannelConfiguration "
"schemeIdUri=\"urn:mpeg:dash:23003:3:audio_channel_configuration:2011\" value=\""
<< M.getChannels(idx) << "\" /></Representation>" << std::endl;
}else{
r << "/>";
}
}
void OutCMAF::dashSegmentTemplate(std::stringstream &r){
r << "<SegmentTemplate timescale=\"1000"
"\" media=\"$RepresentationID$/chunk_$Time$.m4s\" "
"initialization=\"$RepresentationID$/init.m4s\"><SegmentTimeline>"
<< std::endl;
}
void OutCMAF::dashAdaptation(size_t id, std::set<size_t> tracks, bool aligned, std::stringstream &r){
if (!tracks.size()){return;}
if (aligned){
size_t firstTrack = *tracks.begin();
dashAdaptationSet(id, *tracks.begin(), r);
dashSegmentTemplate(r);
generateSegmentlist(firstTrack, r, dashSegment);
r << "</SegmentTimeline></SegmentTemplate>" << std::endl;
for (std::set<size_t>::iterator it = tracks.begin(); it != tracks.end(); it++){
dashRepresentation(id, *it, r);
}
r << "</AdaptationSet>" << std::endl;
return;
}
for (std::set<size_t>::iterator it = tracks.begin(); it != tracks.end(); it++){
std::string codec = M.getCodec(*it);
std::string type = M.getType(*it);
dashAdaptationSet(id, *tracks.begin(), r);
dashSegmentTemplate(r);
generateSegmentlist(*it, r, dashSegment);
r << "</SegmentTimeline></SegmentTemplate>" << std::endl;
dashRepresentation(id, *it, r);
r << "</AdaptationSet>" << std::endl;
}
}
/// Returns a string with the full XML DASH manifest MPD file.
std::string OutCMAF::dashManifest(bool checkAlignment){
initialize();
selectDefaultTracks();
std::set<size_t> vTracks;
std::set<size_t> aTracks;
std::set<size_t> sTracks;
for (std::map<size_t, Comms::Users>::iterator it = userSelect.begin(); it != userSelect.end(); it++){
if (M.getType(it->first) == "video"){vTracks.insert(it->first);}
if (M.getType(it->first) == "audio"){aTracks.insert(it->first);}
if (M.getType(it->first) == "subtitle"){sTracks.insert(it->first);}
}
if (!vTracks.size() && !aTracks.size()){return "";}
bool videoAligned = checkAlignment && tracksAligned(vTracks);
bool audioAligned = checkAlignment && tracksAligned(aTracks);
std::stringstream r;
r << "<?xml version=\"1.0\" encoding=\"UTF-8\"?>" << std::endl;
r << "<MPD ";
size_t mainTrack = getMainSelectedTrack();
size_t mainDuration = M.getDuration(mainTrack);
if (M.getVod()){
r << "type=\"static\" mediaPresentationDuration=\"" << dashTime(mainDuration) << "\" minBufferTime=\"PT1.5S\" ";
}else{
r << "type=\"dynamic\" minimumUpdatePeriod=\"PT2.0S\" availabilityStartTime=\""
<< Util::getUTCString(Util::epoch() - M.getLastms(mainTrack) / 1000)
<< "\" timeShiftBufferDepth=\"" << dashTime(mainDuration)
<< "\" suggestedPresentationDelay=\"PT5.0S\" minBufferTime=\"PT2.0S\" publishTime=\""
<< Util::getUTCString(Util::epoch()) << "\" ";
}
r << "profiles=\"urn:mpeg:dash:profile:isoff-live:2011\" "
"xmlns=\"urn:mpeg:dash:schema:mpd:2011\" >"
<< std::endl;
r << "<ProgramInformation><Title>" << streamName << "</Title></ProgramInformation>" << std::endl;
r << "<Period " << (M.getLive() ? "start=\"0\"" : "") << ">" << std::endl;
dashAdaptation(1, vTracks, videoAligned, r);
dashAdaptation(2, aTracks, audioAligned, r);
if (sTracks.size()){
for (std::set<size_t>::iterator it = sTracks.begin(); it != sTracks.end(); it++){
std::string lang = (M.getLang(*it) == "" ? "unknown" : M.getLang(*it));
r << "<AdaptationSet id=\"" << *it << "\" group=\"3\" mimeType=\"text/vtt\" lang=\"" << lang
<< "\"><Representation id=\"" << *it << "\" bandwidth=\"256\"><BaseURL>../../" << streamName
<< ".vtt?track=" << *it << "</BaseURL></Representation></AdaptationSet>" << std::endl;
}
}
r << "</Period></MPD>" << std::endl;
return r.str();
}
/******************************/
/* HLS v7 Manifest Generation */
/******************************/
void OutCMAF::sendHlsManifest(size_t idx, const std::string &sessId){
std::string method = H.method;
H.Clean();
// H.SetHeader("Content-Type", "application/vnd.apple.mpegurl");
H.SetHeader("Content-Type", "audio/mpegurl");
H.SetHeader("Cache-Control", "no-cache");
H.setCORSHeaders();
if (method == "OPTIONS" || method == "HEAD"){
H.SendResponse("200", "OK", myConn);
H.Clean();
return;
}
if (idx == INVALID_TRACK_ID){
H.SetBody(hlsManifest());
}else{
H.SetBody(hlsManifest(idx, sessId));
}
H.SendResponse("200", "OK", myConn);
H.Clean();
}
void hlsSegment(uint64_t start, uint64_t duration, std::stringstream &s, bool first){
s << "#EXTINF:" << (((double)duration) / 1000) << ",\r\nchunk_" << start << ".m4s" << std::endl;
}
///\brief Builds an index file for HTTP Live streaming.
///\return The index file for HTTP Live Streaming.
std::string OutCMAF::hlsManifest(){
std::stringstream result;
result << "#EXTM3U\r\n#EXT-X-VERSION:7\r\n#EXT-X-INDEPENDENT-SEGMENTS\r\n";
selectDefaultTracks();
std::set<size_t> vTracks;
std::set<size_t> aTracks;
std::set<size_t> sTracks;
for (std::map<size_t, Comms::Users>::iterator it = userSelect.begin(); it != userSelect.end(); it++){
if (M.getType(it->first) == "video"){vTracks.insert(it->first);}
if (M.getType(it->first) == "audio"){aTracks.insert(it->first);}
if (M.getType(it->first) == "subtitle"){sTracks.insert(it->first);}
}
for (std::set<size_t>::iterator it = vTracks.begin(); it != vTracks.end(); it++){
std::string codec = M.getCodec(*it);
if (codec == "H264" || codec == "HEVC" || codec == "MPEG2"){
int bWidth = M.getBps(*it);
if (bWidth < 5){bWidth = 5;}
if (aTracks.size()){bWidth += M.getBps(*aTracks.begin());}
result << "#EXT-X-STREAM-INF:PROGRAM-ID=1,BANDWIDTH=" << (bWidth * 8)
<< ",RESOLUTION=" << M.getWidth(*it) << "x" << M.getHeight(*it);
if (M.getFpks(*it)){result << ",FRAME-RATE=" << (float)M.getFpks(*it) / 1000;}
if (aTracks.size()){result << ",AUDIO=\"aud1\"";}
if (sTracks.size()){result << ",SUBTITLES=\"sub1\"";}
if (codec == "H264" || codec == "HEVC"){
result << ",CODECS=\"";
result << Util::codecString(M.getCodec(*it), M.getInit(*it));
result << "\"";
}
result << "\r\n" << *it;
if (hasSessionIDs()){
result << "/index.m3u8?sessId=" << getpid() << "\r\n";
}else{
result << "/index.m3u8\r\n";
}
}else if (codec == "subtitle"){
if (M.getLang(*it).empty()){meta.setLang(*it, "und");}
result << "#EXT-X-MEDIA:TYPE=SUBTITLES,GROUP-ID=\"sub1\",LANGUAGE=\"" << M.getLang(*it)
<< "\",NAME=\"" << Encodings::ISO639::decode(M.getLang(*it))
<< "\",AUTOSELECT=NO,DEFAULT=NO,FORCED=NO,URI=\"" << *it << "/index.m3u8\""
<< "\r\n";
}
}
for (std::set<size_t>::iterator it = aTracks.begin(); it != aTracks.end(); it++){
if (M.getLang(*it).empty()){meta.setLang(*it, "und");}
result << "#EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID=\"aud1\",LANGUAGE=\"" << M.getLang(*it)
<< "\",NAME=\"" << Encodings::ISO639::decode(M.getLang(*it))
<< "\",AUTOSELECT=YES,DEFAULT=YES,URI=\"" << *it << "/index.m3u8\""
<< "\r\n";
}
for (std::set<size_t>::iterator it = sTracks.begin(); it != sTracks.end(); it++){
if (M.getLang(*it).empty()){meta.setLang(*it, "und");}
result << "#EXT-X-MEDIA:TYPE=SUBTITLES,GROUP-ID=\"sub1\",LANGUAGE=\"" << M.getLang(*it)
<< "\",NAME=\"" << Encodings::ISO639::decode(M.getLang(*it))
<< "\",AUTOSELECT=NO,DEFAULT=NO,FORCED=NO,URI=\"" << *it << "/index.m3u8\""
<< "\r\n";
}
if (aTracks.size() && !vTracks.size()){
std::string codec = M.getCodec(*aTracks.begin());
result << "#EXT-X-STREAM-INF:PROGRAM-ID=1,BANDWIDTH=" << M.getBps(*aTracks.begin()) * 8;
result << ",CODECS=\""
<< Util::codecString(M.getCodec(*aTracks.begin()), M.getInit(*aTracks.begin())) << "\"\r\n";
result << *aTracks.begin() << "/index.m3u8\r\n";
}
HIGH_MSG("Sending this index: %s", result.str().c_str());
return result.str();
}
std::string OutCMAF::hlsManifest(size_t idx, const std::string &sessId){
std::stringstream result;
// parse single track
uint32_t targetDuration = (M.biggestFragment(idx) / 1000) + 1;
DTSC::Fragments fragments(M.fragments(idx));
uint32_t firstFragment = fragments.getFirstValid();
uint32_t endFragment = fragments.getEndValid();
// skip the first two fragments if live
if (M.getLive() && (endFragment - firstFragment) > 6){firstFragment += 2;}
if (M.getType(idx) == "audio"){
uint32_t mainTrack = M.mainTrack();
if (mainTrack == INVALID_TRACK_ID){return "";}
DTSC::Fragments f(M.fragments(mainTrack));
uint64_t firstVidTime = M.getTimeForFragmentIndex(mainTrack, f.getFirstValid());
firstFragment = M.getFragmentIndexForTime(idx, firstVidTime);
}
result << "#EXTM3U\r\n"
"#EXT-X-VERSION:7\r\n"
"#EXT-X-DISCONTINUITY\r\n"
"#EXT-X-TARGETDURATION:"
<< targetDuration << "\r\n";
if (M.getLive()){result << "#EXT-X-MEDIA-SEQUENCE:" << firstFragment << "\r\n";}
result << "#EXT-X-MAP:URI=\"init.m4s"
<< "\"\r\n";
generateSegmentlist(idx, result, hlsSegment);
if (M.getVod()){result << "#EXT-X-ENDLIST\r\n";}
return result.str();
}
/****************************************/
/* Smooth Streaming Manifest Generation */
/****************************************/
std::string toUTF16(const std::string &original){
std::string result;
result.append("\377\376", 2);
for (std::string::const_iterator it = original.begin(); it != original.end(); it++){
result += (*it);
result.append("\000", 1);
}
return result;
}
/// Converts bytes per second and track ID into a single bits per second value, where the last two
/// digits are the track ID. Breaks for track IDs > 99. But really, this is MS-SS, so who cares..?
uint64_t bpsAndIdToBitrate(uint32_t bps, uint64_t tid){
return ((uint64_t)((bps * 8) / 100)) * 100 + tid;
}
void smoothSegment(uint64_t start, uint64_t duration, std::stringstream &s, bool first){
s << "<c ";
if (first){s << "t=\"" << start << "\" ";}
s << "d=\"" << duration << "\" />" << std::endl;
}
void OutCMAF::sendSmoothManifest(){
std::string method = H.method;
H.Clean();
H.SetHeader("Content-Type", "application/dash+xml");
H.SetHeader("Cache-Control", "no-cache");
H.setCORSHeaders();
if (method == "OPTIONS" || method == "HEAD"){
H.SendResponse("200", "OK", myConn);
H.Clean();
return;
}
H.SetBody(smoothManifest());
H.SendResponse("200", "OK", myConn);
H.Clean();
}
void OutCMAF::smoothAdaptation(const std::string &type, std::set<size_t> tracks, std::stringstream &r){
if (!tracks.size()){return;}
DTSC::Keys keys(M.keys(*tracks.begin()));
r << "<StreamIndex Type=\"" << type << "\" QualityLevels=\"" << tracks.size() << "\" Name=\""
<< type << "\" Chunks=\"" << keys.getValidCount() << "\" Url=\"Q({bitrate})/"
<< "chunk_{start_time}.m4s\" ";
if (type == "video"){
size_t maxWidth = 0;
size_t maxHeight = 0;
for (std::set<size_t>::iterator it = tracks.begin(); it != tracks.end(); it++){
size_t width = M.getWidth(*it);
size_t height = M.getHeight(*it);
if (width > maxWidth){maxWidth = width;}
if (height > maxHeight){maxHeight = height;}
}
r << "MaxWidth=\"" << maxWidth << "\" MaxHeight=\"" << maxHeight << "\" DisplayWidth=\""
<< maxWidth << "\" DisplayHeight=\"" << maxHeight << "\"";
}
r << ">\n";
size_t index = 0;
for (std::set<size_t>::iterator it = tracks.begin(); it != tracks.end(); it++){
r << "<QualityLevel Index=\"" << index++ << "\" Bitrate=\""
<< bpsAndIdToBitrate(M.getBps(*it) * 8, *it) << "\" CodecPrivateData=\"" << std::hex;
if (type == "audio"){
std::string init = M.getInit(*it);
for (unsigned int i = 0; i < init.size(); i++){
r << std::setfill('0') << std::setw(2) << std::right << (int)init[i];
}
r << std::dec << "\" SamplingRate=\"" << M.getRate(*it)
<< "\" Channels=\"2\" BitsPerSample=\"16\" PacketSize=\"4\" AudioTag=\"255\" "
"FourCC=\"AACL\" />\n";
}
if (type == "video"){
MP4::AVCC avccbox;
avccbox.setPayload(M.getInit(*it));
std::string tmpString = avccbox.asAnnexB();
for (size_t i = 0; i < tmpString.size(); i++){
r << std::setfill('0') << std::setw(2) << std::right << (int)tmpString[i];
}
r << std::dec << "\" MaxWidth=\"" << M.getWidth(*it) << "\" MaxHeight=\""
<< M.getHeight(*it) << "\" FourCC=\"AVC1\" />\n";
}
}
generateSegmentlist(*tracks.begin(), r, smoothSegment);
r << "</StreamIndex>\n";
}
/// Returns a string with the full XML DASH manifest MPD file.
std::string OutCMAF::smoothManifest(bool checkAlignment){
initialize();
std::stringstream r;
r << "<?xml version=\"1.0\" encoding=\"utf-16\"?>\n"
"<SmoothStreamingMedia MajorVersion=\"2\" MinorVersion=\"0\" TimeScale=\"1000\" ";
selectDefaultTracks();
std::set<size_t> vTracks;
std::set<size_t> aTracks;
for (std::map<size_t, Comms::Users>::iterator it = userSelect.begin(); it != userSelect.end(); it++){
if (M.getType(it->first) == "video"){vTracks.insert(it->first);}
if (M.getType(it->first) == "audio"){aTracks.insert(it->first);}
}
if (!aTracks.size() && !vTracks.size()){
FAIL_MSG("No valid tracks found");
return "";
}
if (M.getVod()){
r << "Duration=\"" << M.getLastms(vTracks.size() ? *vTracks.begin() : *aTracks.begin()) << "\">\n";
}else{
r << "Duration=\"0\" IsLive=\"TRUE\" LookAheadFragmentCount=\"2\" DVRWindowLength=\""
<< M.getBufferWindow() << "\" CanSeek=\"TRUE\" CanPause=\"TRUE\">\n";
}
smoothAdaptation("audio", aTracks, r);
smoothAdaptation("video", vTracks, r);
r << "</SmoothStreamingMedia>\n";
return toUTF16(r.str());
}// namespace Mist
}// namespace Mist

43
src/output/output_cmaf.h Normal file
View file

@ -0,0 +1,43 @@
#include "output_http.h"
#include <mist/http_parser.h>
#include <mist/mp4_generic.h>
namespace Mist{
class OutCMAF : public HTTPOutput{
public:
OutCMAF(Socket::Connection &conn);
~OutCMAF();
static void init(Util::Config *cfg);
void onHTTP();
void sendNext();
void sendHeader(){};
protected:
void sendDashManifest();
void dashAdaptationSet(size_t id, size_t idx, std::stringstream &r);
void dashRepresentation(size_t id, size_t idx, std::stringstream &r);
void dashSegmentTemplate(std::stringstream &r);
void dashAdaptation(size_t id, std::set<size_t> tracks, bool aligned, std::stringstream &r);
std::string dashTime(uint64_t time);
std::string dashManifest(bool checkAlignment = true);
void sendHlsManifest(size_t idx = INVALID_TRACK_ID, const std::string &sessId = "");
std::string hlsManifest();
std::string hlsManifest(size_t idx, const std::string &sessId);
void sendSmoothManifest();
std::string smoothManifest(bool checkAlignment = true);
void smoothAdaptation(const std::string &type, std::set<size_t> tracks, std::stringstream &r);
void generateSegmentlist(size_t idx, std::stringstream &s,
void callBack(uint64_t, uint64_t, std::stringstream &, bool));
bool tracksAligned(const std::set<size_t> &trackList);
std::string buildNalUnit(size_t len, const char *data);
uint64_t targetTime;
std::string h264init(const std::string &initData);
std::string h265init(const std::string &initData);
};
}// namespace Mist
typedef Mist::OutCMAF mistOut;

View file

@ -1,613 +0,0 @@
#include "output_dash_mp4.h"
#include <iomanip>
#include <mist/checksum.h>
#include <mist/defines.h>
#include <mist/mp4.h>
#include <mist/mp4_dash.h>
#include <mist/mp4_generic.h>
#include <mist/stream.h>
#include <mist/timing.h>
namespace Mist{
OutDashMP4::OutDashMP4(Socket::Connection &conn) : HTTPOutput(conn){
uaDelay = 0;
realTime = 0;
}
OutDashMP4::~OutDashMP4(){}
std::string OutDashMP4::makeTime(uint64_t time){
std::stringstream r;
r << "PT";
if (time >= 3600000){r << (time / 3600000) << "H";}
if (time >= 60000){r << (time / 60000) % 60 << "M";}
r << (time / 1000) % 60 << "." << std::setfill('0') << std::setw(3) << (time % 1000) << "S";
return r.str();
}
/// Sends an empty moov box for the given track to the connected client, for following up with moof box(es).
void OutDashMP4::sendMoov(uint32_t tid){
DTSC::Track &Trk = myMeta.tracks[tid];
MP4::MOOV moovBox;
MP4::MVHD mvhdBox(0);
mvhdBox.setTrackID(1);
mvhdBox.setDuration(0xFFFFFFFF);
moovBox.setContent(mvhdBox, 0);
MP4::IODS iodsBox;
if (Trk.type == "video"){
iodsBox.setODVideoLevel(0xFE);
}else{
iodsBox.setODAudioLevel(0xFE);
}
moovBox.setContent(iodsBox, 1);
MP4::MVEX mvexBox;
MP4::MEHD mehdBox;
mehdBox.setFragmentDuration(0xFFFFFFFF);
mvexBox.setContent(mehdBox, 0);
MP4::TREX trexBox;
trexBox.setTrackID(1);
mvexBox.setContent(trexBox, 1);
moovBox.setContent(mvexBox, 2);
MP4::TRAK trakBox;
MP4::TKHD tkhdBox(1, 0, Trk.width, Trk.height);
tkhdBox.setFlags(3);
if (Trk.type == "audio"){
tkhdBox.setVolume(256);
tkhdBox.setWidth(0);
tkhdBox.setHeight(0);
}
tkhdBox.setDuration(0xFFFFFFFF);
trakBox.setContent(tkhdBox, 0);
MP4::MDIA mdiaBox;
MP4::MDHD mdhdBox(0);
mdhdBox.setLanguage(0x44);
mdhdBox.setDuration(Trk.lastms);
mdiaBox.setContent(mdhdBox, 0);
if (Trk.type == "video"){
MP4::HDLR hdlrBox(Trk.type, "VideoHandler");
mdiaBox.setContent(hdlrBox, 1);
}else{
MP4::HDLR hdlrBox(Trk.type, "SoundHandler");
mdiaBox.setContent(hdlrBox, 1);
}
MP4::MINF minfBox;
MP4::DINF dinfBox;
MP4::DREF drefBox;
dinfBox.setContent(drefBox, 0);
minfBox.setContent(dinfBox, 0);
MP4::STBL stblBox;
MP4::STSD stsdBox;
stsdBox.setVersion(0);
if (Trk.codec == "H264"){
MP4::AVC1 avc1Box;
avc1Box.setWidth(Trk.width);
avc1Box.setHeight(Trk.height);
MP4::AVCC avccBox;
avccBox.setPayload(Trk.init);
avc1Box.setCLAP(avccBox);
stsdBox.setEntry(avc1Box, 0);
}
if (Trk.codec == "HEVC"){
MP4::HEV1 hev1Box;
hev1Box.setWidth(Trk.width);
hev1Box.setHeight(Trk.height);
MP4::HVCC hvccBox;
hvccBox.setPayload(Trk.init);
hev1Box.setCLAP(hvccBox);
stsdBox.setEntry(hev1Box, 0);
}
if (Trk.codec == "AAC" || Trk.codec == "MP3"){
MP4::AudioSampleEntry ase;
ase.setCodec("mp4a");
ase.setDataReferenceIndex(1);
ase.setSampleRate(Trk.rate);
ase.setChannelCount(Trk.channels);
ase.setSampleSize(Trk.size);
MP4::ESDS esdsBox(Trk.init);
ase.setCodecBox(esdsBox);
stsdBox.setEntry(ase, 0);
}
if (Trk.codec == "AC3"){
///\todo Note: this code is copied, note for muxing seperation
MP4::AudioSampleEntry ase;
ase.setCodec("ac-3");
ase.setDataReferenceIndex(1);
ase.setSampleRate(Trk.rate);
ase.setChannelCount(Trk.channels);
ase.setSampleSize(Trk.size);
MP4::DAC3 dac3Box(Trk.rate, Trk.channels);
ase.setCodecBox(dac3Box);
stsdBox.setEntry(ase, 0);
}
stblBox.setContent(stsdBox, 0);
MP4::STTS sttsBox;
sttsBox.setVersion(0);
stblBox.setContent(sttsBox, 1);
MP4::STSC stscBox;
stscBox.setVersion(0);
stblBox.setContent(stscBox, 2);
MP4::STCO stcoBox;
stcoBox.setVersion(0);
stblBox.setContent(stcoBox, 3);
MP4::STSZ stszBox;
stszBox.setVersion(0);
stblBox.setContent(stszBox, 4);
minfBox.setContent(stblBox, 1);
if (Trk.type == "video"){
MP4::VMHD vmhdBox;
vmhdBox.setFlags(1);
minfBox.setContent(vmhdBox, 2);
}else{
MP4::SMHD smhdBox;
minfBox.setContent(smhdBox, 2);
}
mdiaBox.setContent(minfBox, 2);
trakBox.setContent(mdiaBox, 1);
moovBox.setContent(trakBox, 3);
H.Chunkify(moovBox.asBox(), moovBox.boxedSize(), myConn);
}
void OutDashMP4::sendMoof(uint32_t tid, uint32_t fragIndice){
DTSC::Track &Trk = myMeta.tracks[tid];
MP4::MOOF moofBox;
MP4::MFHD mfhdBox;
mfhdBox.setSequenceNumber(fragIndice + Trk.missedFrags);
moofBox.setContent(mfhdBox, 0);
MP4::TRAF trafBox;
MP4::TFHD tfhdBox;
tfhdBox.setTrackID(1);
if (Trk.type == "audio"){
tfhdBox.setFlags(MP4::tfhdSampleFlag);
tfhdBox.setDefaultSampleFlags(MP4::isKeySample);
}
trafBox.setContent(tfhdBox, 0);
MP4::TFDT tfdtBox;
tfdtBox.setBaseMediaDecodeTime(Trk.getKey(Trk.fragments[fragIndice].getNumber()).getTime());
trafBox.setContent(tfdtBox, 1);
MP4::TRUN trunBox;
if (Trk.type == "video"){
uint32_t headSize = 0;
if (Trk.codec == "H264"){
MP4::AVCC avccBox;
avccBox.setPayload(Trk.init);
headSize = 14 + avccBox.getSPSLen() + avccBox.getPPSLen();
}
if (Trk.codec == "HEVC"){
MP4::HVCC hvccBox;
hvccBox.setPayload(myMeta.tracks[tid].init);
std::deque<MP4::HVCCArrayEntry> content = hvccBox.getArrays();
for (std::deque<MP4::HVCCArrayEntry>::iterator it = content.begin(); it != content.end(); it++){
for (std::deque<std::string>::iterator it2 = it->nalUnits.begin(); it2 != it->nalUnits.end(); it2++){
headSize += 4 + (*it2).size();
}
}
}
trunBox.setFlags(MP4::trundataOffset | MP4::trunsampleSize | MP4::trunsampleDuration |
MP4::trunfirstSampleFlags | MP4::trunsampleOffsets);
trunBox.setFirstSampleFlags(MP4::isKeySample);
trunBox.setDataOffset(0);
uint32_t j = 0;
for (DTSC::PartIter parts(Trk, Trk.fragments[fragIndice]); parts; ++parts){
MP4::trunSampleInformation trunEntry;
trunEntry.sampleSize = parts->getSize();
if (!j){trunEntry.sampleSize += headSize;}
trunEntry.sampleDuration = parts->getDuration();
trunEntry.sampleOffset = parts->getOffset();
trunBox.setSampleInformation(trunEntry, j);
++j;
}
trunBox.setDataOffset(92 + (12 * j) + 8);
}
if (Trk.type == "audio"){
trunBox.setFlags(MP4::trundataOffset | MP4::trunsampleSize | MP4::trunsampleDuration);
trunBox.setDataOffset(0);
uint32_t j = 0;
for (DTSC::PartIter parts(Trk, Trk.fragments[fragIndice]); parts; ++parts){
MP4::trunSampleInformation trunEntry;
trunEntry.sampleSize = parts->getSize();
trunEntry.sampleDuration = parts->getDuration();
trunBox.setSampleInformation(trunEntry, j);
++j;
}
trunBox.setDataOffset(92 + (8 * j) + 8);
}
trafBox.setContent(trunBox, 2);
moofBox.setContent(trafBox, 1);
H.Chunkify(moofBox.asBox(), moofBox.boxedSize(), myConn);
}
std::string OutDashMP4::buildNalUnit(unsigned int len, const char *data){
std::stringstream r;
r << (char)((len >> 24) & 0xFF);
r << (char)((len >> 16) & 0xFF);
r << (char)((len >> 8) & 0xFF);
r << (char)((len)&0xFF);
r << std::string(data, len);
return r.str();
}
void OutDashMP4::sendMdat(uint32_t tid, uint32_t fragIndice){
DTSC::Track &Trk = myMeta.tracks[tid];
DTSC::Fragment &Frag = Trk.fragments[fragIndice];
uint32_t size = 8 + Frag.getSize();
if (Trk.codec == "H264"){
MP4::AVCC avccBox;
avccBox.setPayload(Trk.init);
size += 14 + avccBox.getSPSLen() + avccBox.getPPSLen();
}
if (Trk.codec == "HEVC"){
MP4::HVCC hvccBox;
hvccBox.setPayload(Trk.init);
std::deque<MP4::HVCCArrayEntry> content = hvccBox.getArrays();
for (std::deque<MP4::HVCCArrayEntry>::iterator it = content.begin(); it != content.end(); it++){
for (std::deque<std::string>::iterator it2 = it->nalUnits.begin(); it2 != it->nalUnits.end(); it2++){
size += 4 + (*it2).size();
}
}
}
char mdatstr[8] ={0, 0, 0, 0, 'm', 'd', 'a', 't'};
mdatstr[0] = (char)((size >> 24) & 0xFF);
mdatstr[1] = (char)((size >> 16) & 0xFF);
mdatstr[2] = (char)((size >> 8) & 0xFF);
mdatstr[3] = (char)((size)&0xFF);
H.Chunkify(mdatstr, 8, myConn);
std::string init;
if (Trk.codec == "H264"){
MP4::AVCC avccBox;
avccBox.setPayload(Trk.init);
init = buildNalUnit(2, "\011\340");
H.Chunkify(init, myConn); // 09E0
init = buildNalUnit(avccBox.getSPSLen(), avccBox.getSPS());
H.Chunkify(init, myConn);
init = buildNalUnit(avccBox.getPPSLen(), avccBox.getPPS());
H.Chunkify(init, myConn);
}
if (Trk.codec == "HEVC"){
MP4::HVCC hvccBox;
hvccBox.setPayload(Trk.init);
std::deque<MP4::HVCCArrayEntry> content = hvccBox.getArrays();
for (std::deque<MP4::HVCCArrayEntry>::iterator it = content.begin(); it != content.end(); it++){
for (std::deque<std::string>::iterator it2 = it->nalUnits.begin(); it2 != it->nalUnits.end(); it2++){
init = buildNalUnit((*it2).size(), (*it2).c_str());
H.Chunkify(init, myConn);
}
}
}
// we pull these values first, because seek() destroys our Trk reference
uint64_t startTime = Trk.getKey(Frag.getNumber()).getTime();
targetTime = startTime + Frag.getDuration();
HIGH_MSG("Starting playback from %llu to %llu", startTime, targetTime);
wantRequest = false;
parseData = true;
// select only the tid track, and seek to the start time
selectedTracks.clear();
selectedTracks.insert(tid);
seek(startTime);
}
void OutDashMP4::sendNext(){
if (thisPacket.getTime() >= targetTime){
HIGH_MSG("Finished playback to %llu", targetTime);
wantRequest = true;
parseData = false;
H.Chunkify("", 0, myConn);
H.Clean();
return;
}
char *data;
size_t dataLen;
thisPacket.getString("data", data, dataLen);
H.Chunkify(data, dataLen, myConn);
}
/// Examines Trk and adds playable fragments from it to r.
void OutDashMP4::addSegmentTimeline(std::stringstream &r, DTSC::Track &Trk, bool live){
std::deque<DTSC::Fragment>::iterator it = Trk.fragments.begin();
bool first = true;
// skip the first two fragments if live
if (live && Trk.fragments.size() > 6){++(++it);}
for (; it != Trk.fragments.end(); it++){
uint64_t starttime = Trk.getKey(it->getNumber()).getTime();
uint32_t duration = it->getDuration();
if (!duration){
if (live){continue;}// skip last fragment when live
duration = Trk.lastms - starttime;
}
if (first){
r << " <S t=\"" << starttime << "\" d=\"" << duration << "\" />" << std::endl;
first = false;
}else{
r << " <S d=\"" << duration << "\" />" << std::endl;
}
}
}
/// Returns a string with the full XML DASH manifest MPD file.
std::string OutDashMP4::buildManifest(){
initialize();
selectDefaultTracks();
uint64_t lastVidTime = 0;
uint64_t vidInitTrack = 0;
uint64_t lastAudTime = 0;
uint64_t audInitTrack = 0;
uint64_t subInitTrack = 0;
/// \TODO DASH pretends there is only one audio/video track, and then prints them all using the same timing information. This is obviously wrong if the tracks are not in sync.
for (std::set<unsigned long>::iterator it = selectedTracks.begin(); it != selectedTracks.end(); ++it){
if (myMeta.tracks[*it].type == "video" && myMeta.tracks[*it].lastms > lastVidTime){
lastVidTime = myMeta.tracks[*it].lastms;
vidInitTrack = *it;
}
if (myMeta.tracks[*it].type == "audio" && myMeta.tracks[*it].lastms > lastAudTime){
lastAudTime = myMeta.tracks[*it].lastms;
audInitTrack = *it;
}
if (myMeta.tracks[*it].codec == "subtitle"){subInitTrack = *it;}
}
std::stringstream r;
r << "<?xml version=\"1.0\" encoding=\"UTF-8\"?>" << std::endl;
r << "<MPD xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" "
"xmlns=\"urn:mpeg:dash:schema:mpd:2011\" xmlns:xlink=\"http://www.w3.org/1999/xlink\" "
"xsi:schemaLocation=\"urn:mpeg:DASH:schema:MPD:2011 "
"http://standards.iso.org/ittf/PubliclyAvailableStandards/MPEG-DASH_schema_files/"
"DASH-MPD.xsd\" profiles=\"urn:mpeg:dash:profile:isoff-live:2011\" ";
if (myMeta.vod){
r << "type=\"static\" mediaPresentationDuration=\""
<< makeTime(myMeta.tracks[getMainSelectedTrack()].lastms -
myMeta.tracks[getMainSelectedTrack()].firstms)
<< "\" minBufferTime=\"PT1.5S\" >" << std::endl;
}else{
r << "type=\"dynamic\" minimumUpdatePeriod=\"PT2.0S\" availabilityStartTime=\""
<< Util::getUTCString(Util::epoch() - myMeta.tracks[getMainSelectedTrack()].lastms / 1000) << "\" "
<< "timeShiftBufferDepth=\""
<< makeTime(myMeta.tracks[getMainSelectedTrack()].lastms -
myMeta.tracks[getMainSelectedTrack()].firstms)
<< "\" suggestedPresentationDelay=\"PT5.0S\" minBufferTime=\"PT2.0S\" publishTime=\""
<< Util::getUTCString(Util::epoch()) << "\" >" << std::endl;
}
r << " <ProgramInformation><Title>" << streamName << "</Title></ProgramInformation>" << std::endl;
r << " <Period ";
if (myMeta.live){r << "start=\"0\" ";}
r << ">" << std::endl;
if (vidInitTrack){
DTSC::Track &trackRef = myMeta.tracks[vidInitTrack];
r << " <AdaptationSet group=\"1\" id=\"9998\" mimeType=\"video/mp4\" width=\""
<< trackRef.width << "\" height=\"" << trackRef.height << "\" frameRate=\""
<< trackRef.fpks / 1000 << "\" segmentAlignment=\"true\" startWithSAP=\"1\" subsegmentAlignment=\"true\" subsegmentStartsWithSAP=\"1\">"
<< std::endl;
r << " <SegmentTemplate timescale=\"1000\" "
"media=\"chunk_$RepresentationID$_$Time$.m4s\" "
"initialization=\"chunk_$RepresentationID$_init.m4s\">"
<< std::endl;
r << " <SegmentTimeline>" << std::endl;
addSegmentTimeline(r, trackRef, myMeta.live);
r << " </SegmentTimeline>" << std::endl;
r << " </SegmentTemplate>" << std::endl;
for (std::set<unsigned long>::iterator it = selectedTracks.begin(); it != selectedTracks.end(); ++it){
if (myMeta.tracks[*it].codec == "H264"){
r << " <Representation id=\"" << *it << "\" ";
r << "codecs=\"" << Util::codecString(myMeta.tracks[*it].codec, myMeta.tracks[*it].init) << "\" ";
// bandwidth is in bits per seconds, we have bytes, so times 8
r << "bandwidth=\"" << (myMeta.tracks[*it].bps * 8) << "\" ";
r << "/>" << std::endl;
}
if (myMeta.tracks[*it].codec == "HEVC"){
r << " <Representation ";
r << "id=\"" << *it << "\" ";
r << "codecs=\"" << Util::codecString(myMeta.tracks[*it].codec, myMeta.tracks[*it].init) << "\" ";
// bandwidth is in bits per seconds, we have bytes, so times 8
r << "bandwidth=\"" << (myMeta.tracks[*it].bps * 8) << "\" ";
r << "/>" << std::endl;
}
}
r << " </AdaptationSet>" << std::endl;
}
if (audInitTrack){
DTSC::Track &trackRef = myMeta.tracks[audInitTrack];
r << " <AdaptationSet group=\"2\" id=\"9999\" mimeType=\"audio/mp4\" "
"segmentAlignment=\"true\" startWithSAP=\"1\" subsegmentAlignment=\"true\" "
"subsegmentStartsWithSAP=\"1\" >"
<< std::endl;
r << " <Role schemeIdUri=\"urn:mpeg:dash:role:2011\" value=\"main\"/>" << std::endl;
r << " <SegmentTemplate timescale=\"1000\" "
"media=\"chunk_$RepresentationID$_$Time$.m4s\" "
"initialization=\"chunk_$RepresentationID$_init.m4s\">"
<< std::endl;
r << " <SegmentTimeline>" << std::endl;
addSegmentTimeline(r, trackRef, myMeta.live);
r << " </SegmentTimeline>" << std::endl;
r << " </SegmentTemplate>" << std::endl;
for (std::set<unsigned long>::iterator it = selectedTracks.begin(); it != selectedTracks.end(); ++it){
if (myMeta.tracks[*it].codec == "AAC" || myMeta.tracks[*it].codec == "MP3" ||
myMeta.tracks[*it].codec == "AC3"){
r << " <Representation id=\"" << *it << "\" ";
// (see RFC6381): sample description entry , ObjectTypeIndication [MP4RA, RFC], ObjectTypeIndication [MP4A ISO/IEC 14496-3:2009]
r << "codecs=\"" << Util::codecString(myMeta.tracks[*it].codec, myMeta.tracks[*it].init) << "\" ";
r << "audioSamplingRate=\"" << myMeta.tracks[*it].rate << "\" ";
// bandwidth is in bits per seconds, we have bytes, so times 8
r << "bandwidth=\"" << (myMeta.tracks[*it].bps * 8) << "\">" << std::endl;
r << " <AudioChannelConfiguration "
"schemeIdUri=\"urn:mpeg:dash:23003:3:audio_channel_configuration:2011\" value=\""
<< myMeta.tracks[*it].channels << "\" />" << std::endl;
r << " </Representation>" << std::endl;
}
}
r << " </AdaptationSet>" << std::endl;
}
if (subInitTrack){
for (std::set<unsigned long>::iterator it = selectedTracks.begin(); it != selectedTracks.end(); ++it){
if (myMeta.tracks[*it].codec == "subtitle"){
subInitTrack = *it;
std::string lang = (myMeta.tracks[*it].lang == "" ? "unknown" : myMeta.tracks[*it].lang);
r << "<AdaptationSet id=\"" << *it << "\" group=\"3\" mimeType=\"text/vtt\" lang=\"" << lang << "\">";
r << " <Representation id=\"" << *it << "\" bandwidth=\"256\">";
r << " <BaseURL>../../" << streamName << ".vtt?track=" << *it << "</BaseURL>";
r << " </Representation></AdaptationSet>" << std::endl;
}
}
}
r << " </Period>" << std::endl;
r << "</MPD>" << std::endl;
return r.str();
}
void OutDashMP4::init(Util::Config *cfg){
HTTPOutput::init(cfg);
capa["name"] = "DASHMP4";
capa["friendly"] = "DASH (fMP4) over HTTP";
capa["desc"] = "Segmented streaming in DASH (fMP4) format over HTTP";
capa["url_rel"] = "/dash/$/index.mpd";
capa["url_prefix"] = "/dash/$/";
capa["socket"] = "http_dash_mp4";
capa["codecs"][0u][0u].append("+H264");
capa["codecs"][0u][1u].append("+HEVC");
capa["codecs"][0u][2u].append("+AAC");
capa["codecs"][0u][3u].append("+AC3");
capa["codecs"][0u][4u].append("+MP3");
capa["codecs"][0u][5u].append("+subtitle");
capa["methods"][0u]["handler"] = "http";
capa["methods"][0u]["type"] = "dash/video/mp4";
// MP3 does not work in browsers
capa["exceptions"]["codec:MP3"] = JSON::fromString("[[\"blacklist\",[\"Mozilla/\"]]]");
// HEVC does not work in browsers
capa["exceptions"]["codec:HEVC"] = JSON::fromString("[[\"blacklist\",[\"Mozilla/\"]]]");
capa["methods"][0u]["priority"] = 8;
cfg->addOption("nonchunked",
JSON::fromString("{\"short\":\"C\",\"long\":\"nonchunked\",\"help\":\"Do not "
"send chunked, but buffer whole segments.\"}"));
capa["optional"]["nonchunked"]["name"] = "Send whole segments";
capa["optional"]["nonchunked"]["help"] =
"Disables chunked transfer encoding, forcing per-segment buffering. Reduces performance "
"significantly, but increases compatibility somewhat.";
capa["optional"]["nonchunked"]["option"] = "--nonchunked";
}
void OutDashMP4::onHTTP(){
std::string method = H.method;
initialize();
if (myMeta.live){updateMeta();}
std::string url = H.url;
// Send a manifest for any URL with .mpd in the path
if (url.find(".mpd") != std::string::npos){
H.Clean();
H.SetHeader("Content-Type", "application/dash+xml");
H.SetHeader("Cache-Control", "no-cache");
H.setCORSHeaders();
if (method == "OPTIONS" || method == "HEAD"){
H.SendResponse("200", "OK", myConn);
H.Clean();
return;
}
H.SetBody(buildManifest());
H.SendResponse("200", "OK", myConn);
H.Clean();
return;
}
// Not a manifest - either an init segment or data segment
size_t pos = url.find("chunk_") + 6; // find the track ID position
uint32_t tid = atoi(url.substr(pos).c_str());
if (!myMeta.tracks.count(tid)){
H.Clean();
H.SendResponse("404", "Track not found", myConn);
H.Clean();
return;
}
H.Clean();
H.SetHeader("Content-Type", "video/mp4");
H.SetHeader("Cache-Control", "no-cache");
H.setCORSHeaders();
if (method == "OPTIONS" || method == "HEAD"){
H.SendResponse("200", "OK", myConn);
H.Clean();
return;
}
H.StartResponse(H, myConn, config->getBool("nonchunked"));
if (url.find("init.m4s") != std::string::npos){
// init segment
if (myMeta.tracks[tid].type == "video"){
H.Chunkify("\000\000\000\040ftypisom\000\000\000\000isomavc1mp42dash", 32, myConn);
}else{
H.Chunkify("\000\000\000\040ftypisom\000\000\000\000isomM4A mp42dash", 32, myConn);
}
sendMoov(tid);
H.Chunkify("", 0, myConn);
H.Clean();
return;
}
// data segment
pos = url.find("_", pos + 1) + 1;
uint64_t timeStamp = atoll(url.substr(pos).c_str());
uint32_t fragIndice = myMeta.tracks[tid].timeToFragnum(timeStamp);
uint32_t fragNum = myMeta.tracks[tid].fragments[fragIndice].getNumber();
HIGH_MSG("Getting T%llu for track %lu, indice %lu, number %lu", timeStamp, tid, fragIndice, fragNum);
if (myMeta.live && !myMeta.tracks[tid].fragments[fragIndice].getDuration()){
size_t ctr = 0;
do{
if (ctr){Util::sleep(250);}
updateMeta();
stats();
}while (!myMeta.tracks[tid].fragments[fragIndice].getDuration() && ++ctr < 120);
if (!myMeta.tracks[tid].fragments[fragIndice].getDuration()){
WARN_MSG("Sending zero-length segment. This should never happen.");
H.SendResponse("404", "Segment download error", myConn);
H.Clean();
return;
}
}
DTSC::Track &Trk = myMeta.tracks[tid];
H.Chunkify("\000\000\000\030stypmsdh\000\000\000\000msdhmsix", 24, myConn);
MP4::SIDX sidxBox;
sidxBox.setReferenceID(1);
sidxBox.setTimescale(1000);
sidxBox.setEarliestPresentationTime(Trk.getKey(fragNum).getTime());
sidxBox.setFirstOffset(0);
MP4::sidxReference refItem;
refItem.referenceType = false;
if (Trk.fragments[fragIndice].getDuration()){
refItem.subSegmentDuration = Trk.fragments[fragIndice].getDuration();
}else{
refItem.subSegmentDuration = Trk.lastms - Trk.getKey(fragNum).getTime();
}
refItem.sapStart = false;
refItem.sapType = 0;
refItem.sapDeltaTime = 0;
sidxBox.setReference(refItem, 0);
H.Chunkify(sidxBox.asBox(), sidxBox.boxedSize(), myConn);
sendMoof(tid, fragIndice);
sendMdat(tid, fragIndice);
}
}// namespace Mist

View file

@ -1,30 +0,0 @@
#include "output_http.h"
#include <mist/http_parser.h>
#include <mist/mp4_generic.h>
namespace Mist{
class OutDashMP4 : public HTTPOutput{
public:
OutDashMP4(Socket::Connection &conn);
~OutDashMP4();
static void init(Util::Config *cfg);
void onHTTP();
void sendNext();
void sendHeader(){};
protected:
void addSegmentTimeline(std::stringstream &r, DTSC::Track &Trk, bool live);
std::string makeTime(uint64_t time);
std::string buildManifest();
void sendMoov(uint32_t trackid);
void sendMoof(uint32_t trackid, uint32_t fragIndice);
void sendMdat(uint32_t trackid, uint32_t fragIndice);
std::string buildNalUnit(unsigned int len, const char *data);
uint64_t targetTime;
std::string h264init(const std::string &initData);
std::string h265init(const std::string &initData);
};
}// namespace Mist
typedef Mist::OutDashMP4 mistOut;

View file

@ -37,7 +37,6 @@ namespace Mist{
void OutDTSC::sendCmd(const JSON::Value &data){
MEDIUM_MSG("Sending DTCM: %s", data.toString().c_str());
unsigned long sendSize = data.packedSize();
myConn.SendNow("DTCM");
char sSize[4] ={0, 0, 0, 0};
Bit::htobl(sSize, data.packedSize());
@ -64,61 +63,58 @@ namespace Mist{
config = cfg;
}
std::string OutDTSC::getStatsName(){
if (pushing){
return "INPUT";
}else{
return "OUTPUT";
}
}
std::string OutDTSC::getStatsName(){return (pushing ? "INPUT" : "OUTPUT");}
/// Seeks to the first sync'ed keyframe of the main track.
/// Aborts if there is no main track or it has no keyframes.
void OutDTSC::initialSeek(){
unsigned long long seekPos = 0;
if (myMeta.live){
long unsigned int mainTrack = getMainSelectedTrack();
uint64_t seekPos = 0;
if (M.getLive()){
size_t mainTrack = getMainSelectedTrack();
// cancel if there are no keys in the main track
if (!myMeta.tracks.count(mainTrack) || !myMeta.tracks[mainTrack].keys.size()){return;}
if (mainTrack == INVALID_TRACK_ID){return;}
DTSC::Keys keys(M.keys(mainTrack));
if (!keys.getValidCount()){return;}
// seek to the oldest keyframe
for (std::deque<DTSC::Key>::iterator it = myMeta.tracks[mainTrack].keys.begin();
it != myMeta.tracks[mainTrack].keys.end(); ++it){
seekPos = it->getTime();
std::set<size_t> validTracks = M.getValidTracks();
for (size_t i = keys.getFirstValid(); i < keys.getEndValid(); ++i){
seekPos = keys.getTime(i);
bool good = true;
// check if all tracks have data for this point in time
for (std::set<unsigned long>::iterator ti = selectedTracks.begin(); ti != selectedTracks.end(); ++ti){
if (mainTrack == *ti){continue;}// skip self
if (!myMeta.tracks.count(*ti)){
HIGH_MSG("Skipping track %lu, not in tracks", *ti);
for (std::map<size_t, Comms::Users>::iterator ti = userSelect.begin(); ti != userSelect.end(); ++ti){
if (mainTrack == ti->first){continue;}// skip self
if (!validTracks.count(ti->first)){
HIGH_MSG("Skipping track %zu, not in tracks", ti->first);
continue;
}// ignore missing tracks
if (myMeta.tracks[*ti].lastms == myMeta.tracks[*ti].firstms){
HIGH_MSG("Skipping track %lu, last equals first", *ti);
if (M.getLastms(ti->first) == M.getFirstms(ti->first)){
HIGH_MSG("Skipping track %zu, last equals first", ti->first);
continue;
}// ignore point-tracks
if (myMeta.tracks[*ti].firstms > seekPos){
if (M.getFirstms(ti->first) > seekPos){
good = false;
break;
}
HIGH_MSG("Track %lu is good", *ti);
HIGH_MSG("Track %zu is good", ti->first);
}
// if yes, seek here
if (good){break;}
}
}
MEDIUM_MSG("Initial seek to %llums", seekPos);
MEDIUM_MSG("Initial seek to %" PRIu64 "ms", seekPos);
seek(seekPos);
}
void OutDTSC::sendNext(){
// If there are now more selectable tracks, select the new track and do a seek to the current
// timestamp Set sentHeader to false to force it to send init data
if (selectedTracks.size() < 2){
static unsigned long long lastMeta = 0;
if (userSelect.size() < 2){
static uint64_t lastMeta = 0;
if (Util::epoch() > lastMeta + 5){
lastMeta = Util::epoch();
updateMeta();
if (myMeta.tracks.size() > 1){
std::set<size_t> validTracks = getSupportedTracks();
if (validTracks.size() > 1){
if (selectDefaultTracks()){
INFO_MSG("Track selection changed - resending headers and continuing");
sentHeader = false;
@ -127,21 +123,24 @@ namespace Mist{
}
}
}
myConn.SendNow(thisPacket.getData(), thisPacket.getDataLen());
DTSC::Packet p(thisPacket, thisIdx + 1);
myConn.SendNow(p.getData(), p.getDataLen());
lastActive = Util::epoch();
}
void OutDTSC::sendHeader(){
sentHeader = true;
selectedTracks.clear();
for (std::map<unsigned int, DTSC::Track>::iterator it = myMeta.tracks.begin();
it != myMeta.tracks.end(); it++){
if (it->second.type == "video" || it->second.type == "audio"){
selectedTracks.insert(it->first);
userSelect.clear();
std::set<size_t> validTracks = M.getValidTracks();
std::set<size_t> selectedTracks;
for (std::set<size_t>::iterator it = validTracks.begin(); it != validTracks.end(); it++){
if (M.getType(*it) == "video" || M.getType(*it) == "audio"){
userSelect[*it].reload(streamName, *it);
selectedTracks.insert(*it);
}
}
myMeta.send(myConn, true, selectedTracks);
if (myMeta.live){realTime = 0;}
M.send(myConn, true, selectedTracks, true);
if (M.getLive()){realTime = 0;}
}
void OutDTSC::onFail(const std::string &msg, bool critical){
@ -184,7 +183,7 @@ namespace Mist{
continue;
}
if (dScan.getMember("cmd").asString() == "reset"){
myMeta.reset();
meta.reInit(streamName);
sendOk("Internal state reset");
continue;
}
@ -200,9 +199,9 @@ namespace Mist{
if (!myConn.Received().available(8 + rSize)){return;}// abort - not enough data yet
std::string dataPacket = myConn.Received().remove(8 + rSize);
DTSC::Packet metaPack(dataPacket.data(), dataPacket.size());
myMeta.reinit(metaPack);
meta.reInit(streamName, metaPack.getScan());
std::stringstream rep;
rep << "DTSC_HEAD received with " << myMeta.tracks.size() << " tracks. Bring on those data packets!";
rep << "DTSC_HEAD received with " << M.getValidTracks().size() << " tracks. Bring on those data packets!";
sendOk(rep.str());
}else if (myConn.Received().copy(4) == "DTP2"){
if (!isPushing()){
@ -215,7 +214,7 @@ namespace Mist{
if (!myConn.Received().available(8 + rSize)){return;}// abort - not enough data yet
std::string dataPacket = myConn.Received().remove(8 + rSize);
DTSC::Packet inPack(dataPacket.data(), dataPacket.size(), true);
if (!myMeta.tracks.count(inPack.getTrackId())){
if (M.trackIDToIndex(inPack.getTrackId(), getpid()) == INVALID_TRACK_ID){
onFail("DTSC_V2 received for a track that was not announced in the DTSC_HEAD!", true);
return;
}

View file

@ -22,7 +22,6 @@ namespace Mist{
std::string salt;
void handlePush(DTSC::Scan &dScan);
void handlePlay(DTSC::Scan &dScan);
unsigned long long fastAsPossibleTime;
};
}// namespace Mist

View file

@ -16,7 +16,7 @@ namespace Mist{
if (config->getString("target").size()){
if (config->getString("target").find(".webm") != std::string::npos){doctype = "webm";}
initialize();
if (myMeta.vod){calcVodSizes();}
if (M.getVod()){calcVodSizes();}
if (!streamName.size()){
WARN_MSG("Recording unconnected EBML output to file! Cancelled.");
conn.close();
@ -28,7 +28,7 @@ namespace Mist{
INFO_MSG("Outputting %s to stdout in EBML format", streamName.c_str());
return;
}
if (!myMeta.tracks.size()){
if (!M.getValidTracks().size()){
INFO_MSG("Stream not available - aborting");
conn.close();
return;
@ -106,28 +106,31 @@ namespace Mist{
bool OutEBML::isRecording(){return config->getString("target").size();}
/// Calculates the size of a Cluster (contents only) and returns it.
/// Bases the calculation on the currently selected tracks and the given start/end time for the cluster.
uint32_t OutEBML::clusterSize(uint64_t start, uint64_t end){
uint32_t sendLen = EBML::sizeElemUInt(EBML::EID_TIMECODE, start);
for (std::set<long unsigned int>::iterator it = selectedTracks.begin(); it != selectedTracks.end(); it++){
DTSC::Track &thisTrack = myMeta.tracks[*it];
uint32_t firstPart = 0;
/// Bases the calculation on the currently selected tracks and the given start/end time for the
/// cluster.
size_t OutEBML::clusterSize(uint64_t start, uint64_t end){
size_t sendLen = EBML::sizeElemUInt(EBML::EID_TIMECODE, start);
for (std::map<size_t, Comms::Users>::iterator it = userSelect.begin(); it != userSelect.end(); it++){
DTSC::Keys keys(M.keys(it->first));
DTSC::Parts parts(M.parts(it->first));
uint32_t firstPart = parts.getFirstValid();
unsigned long long int prevParts = 0;
uint64_t curMS = 0;
for (std::deque<DTSC::Key>::iterator it2 = thisTrack.keys.begin(); it2 != thisTrack.keys.end(); it2++){
if (it2->getTime() > start && it2 != thisTrack.keys.begin()){break;}
for (size_t i = keys.getFirstValid(); i < keys.getEndValid(); ++i){
if (keys.getTime(i) > start && i != keys.getFirstValid()){break;}
firstPart += prevParts;
prevParts = it2->getParts();
curMS = it2->getTime();
prevParts = keys.getParts(i);
curMS = keys.getTime(i);
}
size_t maxParts = thisTrack.parts.size();
for (size_t i = firstPart; i < maxParts; i++){
for (size_t i = firstPart; i < parts.getEndValid(); ++i){
if (curMS >= end){break;}
if (curMS >= start){
uint32_t blkLen = EBML::sizeSimpleBlock(thisTrack.trackID, thisTrack.parts[i].getSize());
uint32_t blkLen = EBML::sizeSimpleBlock(it->first + 1, parts.getSize(i));
sendLen += blkLen;
}
curMS += thisTrack.parts[i].getDuration();
curMS += parts.getDuration(i);
}
}
return sendLen;
@ -137,19 +140,21 @@ namespace Mist{
if (thisPacket.getTime() >= newClusterTime){
if (liveSeek()){return;}
currentClusterTime = thisPacket.getTime();
if (myMeta.vod){
if (M.getVod()){
// In case of VoD, clusters are aligned with the main track fragments
// EXCEPT when they are more than 30 seconds long, because clusters are limited to -32 to 32 seconds.
DTSC::Track &Trk = myMeta.tracks[getMainSelectedTrack()];
uint32_t fragIndice = Trk.timeToFragnum(currentClusterTime);
newClusterTime = Trk.getKey(Trk.fragments[fragIndice].getNumber()).getTime() +
Trk.fragments[fragIndice].getDuration();
// EXCEPT when they are more than 30 seconds long, because clusters are limited to -32 to 32
// seconds.
size_t idx = getMainSelectedTrack();
DTSC::Fragments fragments(M.fragments(idx));
uint32_t fragIndice = M.getFragmentIndexForTime(idx, currentClusterTime);
newClusterTime = M.getTimeForFragmentIndex(idx, fragIndice) + fragments.getDuration(fragIndice);
// Limit clusters to 30s, and the last fragment should always be 30s, just in case.
if ((newClusterTime - currentClusterTime > 30000) || (fragIndice == Trk.fragments.size() - 1)){
if ((newClusterTime - currentClusterTime > 30000) || (fragIndice == fragments.getEndValid() - 1)){
newClusterTime = currentClusterTime + 30000;
}
EXTREME_MSG("Cluster: %llu - %llu (%lu/%lu) = %llu", currentClusterTime, newClusterTime,
fragIndice, Trk.fragments.size(), clusterSize(currentClusterTime, newClusterTime));
EXTREME_MSG("Cluster: %" PRIu64 " - %" PRIu64 " (%" PRIu32 "/%zu) = %zu",
currentClusterTime, newClusterTime, fragIndice, fragments.getEndValid(),
clusterSize(currentClusterTime, newClusterTime));
}else{
// In live, clusters are aligned with the lookAhead time
newClusterTime = currentClusterTime + (needsLookAhead ? needsLookAhead : 1);
@ -162,152 +167,167 @@ namespace Mist{
EBML::sendElemUInt(myConn, EBML::EID_TIMECODE, currentClusterTime);
}
EBML::sendSimpleBlock(myConn, thisPacket, currentClusterTime,
myMeta.tracks[thisPacket.getTrackId()].type != "video");
DTSC::Packet p(thisPacket, thisIdx + 1);
EBML::sendSimpleBlock(myConn, p, currentClusterTime, M.getType(thisIdx) != "video");
}
std::string OutEBML::trackCodecID(const DTSC::Track &Trk){
if (Trk.codec == "opus"){return "A_OPUS";}
if (Trk.codec == "H264"){return "V_MPEG4/ISO/AVC";}
if (Trk.codec == "HEVC"){return "V_MPEGH/ISO/HEVC";}
if (Trk.codec == "VP8"){return "V_VP8";}
if (Trk.codec == "VP9"){return "V_VP9";}
if (Trk.codec == "AV1"){return "V_AV1";}
if (Trk.codec == "AAC"){return "A_AAC";}
if (Trk.codec == "vorbis"){return "A_VORBIS";}
if (Trk.codec == "theora"){return "V_THEORA";}
if (Trk.codec == "MPEG2"){return "V_MPEG2";}
if (Trk.codec == "PCM"){return "A_PCM/INT/BIG";}
if (Trk.codec == "MP2"){return "A_MPEG/L2";}
if (Trk.codec == "MP3"){return "A_MPEG/L3";}
if (Trk.codec == "AC3"){return "A_AC3";}
if (Trk.codec == "ALAW"){return "A_MS/ACM";}
if (Trk.codec == "ULAW"){return "A_MS/ACM";}
if (Trk.codec == "FLOAT"){return "A_PCM/FLOAT/IEEE";}
if (Trk.codec == "DTS"){return "A_DTS";}
if (Trk.codec == "JSON"){return "M_JSON";}
std::string OutEBML::trackCodecID(size_t idx){
std::string codec = M.getCodec(idx);
if (codec == "opus"){return "A_OPUS";}
if (codec == "H264"){return "V_MPEG4/ISO/AVC";}
if (codec == "HEVC"){return "V_MPEGH/ISO/HEVC";}
if (codec == "VP8"){return "V_VP8";}
if (codec == "VP9"){return "V_VP9";}
if (codec == "AV1"){return "V_AV1";}
if (codec == "AAC"){return "A_AAC";}
if (codec == "vorbis"){return "A_VORBIS";}
if (codec == "theora"){return "V_THEORA";}
if (codec == "MPEG2"){return "V_MPEG2";}
if (codec == "PCM"){return "A_PCM/INT/BIG";}
if (codec == "MP2"){return "A_MPEG/L2";}
if (codec == "MP3"){return "A_MPEG/L3";}
if (codec == "AC3"){return "A_AC3";}
if (codec == "ALAW"){return "A_MS/ACM";}
if (codec == "ULAW"){return "A_MS/ACM";}
if (codec == "FLOAT"){return "A_PCM/FLOAT/IEEE";}
if (codec == "DTS"){return "A_DTS";}
if (codec == "JSON"){return "M_JSON";}
return "E_UNKNOWN";
}
void OutEBML::sendElemTrackEntry(const DTSC::Track &Trk){
void OutEBML::sendElemTrackEntry(size_t idx){
// First calculate the sizes of the TrackEntry and Audio/Video elements.
uint32_t sendLen = 0;
uint32_t subLen = 0;
sendLen += EBML::sizeElemUInt(EBML::EID_TRACKNUMBER, Trk.trackID);
sendLen += EBML::sizeElemUInt(EBML::EID_TRACKUID, Trk.trackID);
sendLen += EBML::sizeElemStr(EBML::EID_CODECID, trackCodecID(Trk));
sendLen += EBML::sizeElemStr(EBML::EID_LANGUAGE, Trk.lang.size() ? Trk.lang : "und");
size_t sendLen = 0;
size_t subLen = 0;
sendLen += EBML::sizeElemUInt(EBML::EID_TRACKNUMBER, idx + 1);
sendLen += EBML::sizeElemUInt(EBML::EID_TRACKUID, idx + 1);
sendLen += EBML::sizeElemStr(EBML::EID_CODECID, trackCodecID(idx));
sendLen += EBML::sizeElemStr(EBML::EID_LANGUAGE, M.getLang(idx).size() ? M.getLang(idx) : "und");
sendLen += EBML::sizeElemUInt(EBML::EID_FLAGLACING, 0);
if (Trk.codec == "ALAW" || Trk.codec == "ULAW"){
std::string codec = M.getCodec(idx);
if (codec == "ALAW" || codec == "ULAW"){
sendLen += EBML::sizeElemStr(EBML::EID_CODECPRIVATE, std::string((size_t)18, '\000'));
}else{
if (Trk.init.size()){sendLen += EBML::sizeElemStr(EBML::EID_CODECPRIVATE, Trk.init);}
if (M.getInit(idx).size()){
sendLen += EBML::sizeElemStr(EBML::EID_CODECPRIVATE, M.getInit(idx));
}
}
if (Trk.codec == "opus" && Trk.init.size() > 11){
sendLen += EBML::sizeElemUInt(EBML::EID_CODECDELAY, Opus::getPreSkip(Trk.init.data()) * 1000000 / 48);
if (codec == "opus" && M.getInit(idx).size() > 11){
sendLen += EBML::sizeElemUInt(EBML::EID_CODECDELAY, Opus::getPreSkip(M.getInit(idx).data()) * 1000000 / 48);
sendLen += EBML::sizeElemUInt(EBML::EID_SEEKPREROLL, 80000000);
}
if (Trk.type == "video"){
std::string type = M.getType(idx);
if (type == "video"){
sendLen += EBML::sizeElemUInt(EBML::EID_TRACKTYPE, 1);
subLen += EBML::sizeElemUInt(EBML::EID_PIXELWIDTH, Trk.width);
subLen += EBML::sizeElemUInt(EBML::EID_PIXELHEIGHT, Trk.height);
subLen += EBML::sizeElemUInt(EBML::EID_DISPLAYWIDTH, Trk.width);
subLen += EBML::sizeElemUInt(EBML::EID_DISPLAYHEIGHT, Trk.height);
subLen += EBML::sizeElemUInt(EBML::EID_PIXELWIDTH, M.getWidth(idx));
subLen += EBML::sizeElemUInt(EBML::EID_PIXELHEIGHT, M.getHeight(idx));
subLen += EBML::sizeElemUInt(EBML::EID_DISPLAYWIDTH, M.getWidth(idx));
subLen += EBML::sizeElemUInt(EBML::EID_DISPLAYHEIGHT, M.getHeight(idx));
sendLen += EBML::sizeElemHead(EBML::EID_VIDEO, subLen);
}
if (Trk.type == "audio"){
if (type == "audio"){
sendLen += EBML::sizeElemUInt(EBML::EID_TRACKTYPE, 2);
subLen += EBML::sizeElemUInt(EBML::EID_CHANNELS, Trk.channels);
subLen += EBML::sizeElemDbl(EBML::EID_SAMPLINGFREQUENCY, Trk.rate);
subLen += EBML::sizeElemUInt(EBML::EID_BITDEPTH, Trk.size);
subLen += EBML::sizeElemUInt(EBML::EID_CHANNELS, M.getChannels(idx));
subLen += EBML::sizeElemDbl(EBML::EID_SAMPLINGFREQUENCY, M.getRate(idx));
subLen += EBML::sizeElemUInt(EBML::EID_BITDEPTH, M.getSize(idx));
sendLen += EBML::sizeElemHead(EBML::EID_AUDIO, subLen);
}
if (Trk.type == "meta"){sendLen += EBML::sizeElemUInt(EBML::EID_TRACKTYPE, 3);}
if (type == "meta"){sendLen += EBML::sizeElemUInt(EBML::EID_TRACKTYPE, 3);}
sendLen += subLen;
// Now actually send.
EBML::sendElemHead(myConn, EBML::EID_TRACKENTRY, sendLen);
EBML::sendElemUInt(myConn, EBML::EID_TRACKNUMBER, Trk.trackID);
EBML::sendElemUInt(myConn, EBML::EID_TRACKUID, Trk.trackID);
EBML::sendElemStr(myConn, EBML::EID_CODECID, trackCodecID(Trk));
EBML::sendElemStr(myConn, EBML::EID_LANGUAGE, Trk.lang.size() ? Trk.lang : "und");
EBML::sendElemUInt(myConn, EBML::EID_TRACKNUMBER, idx + 1);
EBML::sendElemUInt(myConn, EBML::EID_TRACKUID, idx + 1);
EBML::sendElemStr(myConn, EBML::EID_CODECID, trackCodecID(idx));
EBML::sendElemStr(myConn, EBML::EID_LANGUAGE, M.getLang(idx).size() ? M.getLang(idx) : "und");
EBML::sendElemUInt(myConn, EBML::EID_FLAGLACING, 0);
if (Trk.codec == "ALAW" || Trk.codec == "ULAW"){
std::string init = RIFF::fmt::generate(((Trk.codec == "ALAW") ? 6 : 7), Trk.channels, Trk.rate,
Trk.bps, Trk.channels * (Trk.size << 3), Trk.size);
if (codec == "ALAW" || codec == "ULAW"){
std::string init =
RIFF::fmt::generate(((codec == "ALAW") ? 6 : 7), M.getChannels(idx), M.getRate(idx),
M.getBps(idx), M.getChannels(idx) * (M.getSize(idx) << 3), M.getSize(idx));
EBML::sendElemStr(myConn, EBML::EID_CODECPRIVATE, init.substr(8));
}else{
if (Trk.init.size()){EBML::sendElemStr(myConn, EBML::EID_CODECPRIVATE, Trk.init);}
if (M.getInit(idx).size()){
EBML::sendElemStr(myConn, EBML::EID_CODECPRIVATE, M.getInit(idx));
}
}
if (Trk.codec == "opus"){
EBML::sendElemUInt(myConn, EBML::EID_CODECDELAY, Opus::getPreSkip(Trk.init.data()) * 1000000 / 48);
if (codec == "opus" && M.getInit(idx).size() > 11){
EBML::sendElemUInt(myConn, EBML::EID_CODECDELAY, Opus::getPreSkip(M.getInit(idx).data()) * 1000000 / 48);
EBML::sendElemUInt(myConn, EBML::EID_SEEKPREROLL, 80000000);
}
if (Trk.type == "video"){
if (type == "video"){
EBML::sendElemUInt(myConn, EBML::EID_TRACKTYPE, 1);
EBML::sendElemHead(myConn, EBML::EID_VIDEO, subLen);
EBML::sendElemUInt(myConn, EBML::EID_PIXELWIDTH, Trk.width);
EBML::sendElemUInt(myConn, EBML::EID_PIXELHEIGHT, Trk.height);
EBML::sendElemUInt(myConn, EBML::EID_DISPLAYWIDTH, Trk.width);
EBML::sendElemUInt(myConn, EBML::EID_DISPLAYHEIGHT, Trk.height);
EBML::sendElemUInt(myConn, EBML::EID_PIXELWIDTH, M.getWidth(idx));
EBML::sendElemUInt(myConn, EBML::EID_PIXELHEIGHT, M.getHeight(idx));
EBML::sendElemUInt(myConn, EBML::EID_DISPLAYWIDTH, M.getWidth(idx));
EBML::sendElemUInt(myConn, EBML::EID_DISPLAYHEIGHT, M.getHeight(idx));
}
if (Trk.type == "audio"){
if (type == "audio"){
EBML::sendElemUInt(myConn, EBML::EID_TRACKTYPE, 2);
EBML::sendElemHead(myConn, EBML::EID_AUDIO, subLen);
EBML::sendElemUInt(myConn, EBML::EID_CHANNELS, Trk.channels);
EBML::sendElemDbl(myConn, EBML::EID_SAMPLINGFREQUENCY, Trk.rate);
EBML::sendElemUInt(myConn, EBML::EID_BITDEPTH, Trk.size);
EBML::sendElemUInt(myConn, EBML::EID_CHANNELS, M.getChannels(idx));
EBML::sendElemDbl(myConn, EBML::EID_SAMPLINGFREQUENCY, M.getRate(idx));
EBML::sendElemUInt(myConn, EBML::EID_BITDEPTH, M.getSize(idx));
}
if (Trk.type == "meta"){EBML::sendElemUInt(myConn, EBML::EID_TRACKTYPE, 3);}
if (type == "meta"){EBML::sendElemUInt(myConn, EBML::EID_TRACKTYPE, 3);}
}
uint32_t OutEBML::sizeElemTrackEntry(const DTSC::Track &Trk){
size_t OutEBML::sizeElemTrackEntry(size_t idx){
// Calculate the sizes of the TrackEntry and Audio/Video elements.
uint32_t sendLen = 0;
uint32_t subLen = 0;
sendLen += EBML::sizeElemUInt(EBML::EID_TRACKNUMBER, Trk.trackID);
sendLen += EBML::sizeElemUInt(EBML::EID_TRACKUID, Trk.trackID);
sendLen += EBML::sizeElemStr(EBML::EID_CODECID, trackCodecID(Trk));
sendLen += EBML::sizeElemStr(EBML::EID_LANGUAGE, Trk.lang.size() ? Trk.lang : "und");
size_t sendLen = 0;
size_t subLen = 0;
sendLen += EBML::sizeElemUInt(EBML::EID_TRACKNUMBER, idx + 1);
sendLen += EBML::sizeElemUInt(EBML::EID_TRACKUID, idx + 1);
sendLen += EBML::sizeElemStr(EBML::EID_CODECID, trackCodecID(idx));
sendLen += EBML::sizeElemStr(EBML::EID_LANGUAGE, M.getLang(idx).size() ? M.getLang(idx) : "und");
sendLen += EBML::sizeElemUInt(EBML::EID_FLAGLACING, 0);
if (Trk.codec == "ALAW" || Trk.codec == "ULAW"){
std::string codec = M.getCodec(idx);
if (codec == "ALAW" || codec == "ULAW"){
sendLen += EBML::sizeElemStr(EBML::EID_CODECPRIVATE, std::string((size_t)18, '\000'));
}else{
if (Trk.init.size()){sendLen += EBML::sizeElemStr(EBML::EID_CODECPRIVATE, Trk.init);}
if (M.getInit(idx).size()){
sendLen += EBML::sizeElemStr(EBML::EID_CODECPRIVATE, M.getInit(idx));
}
}
if (Trk.codec == "opus"){
sendLen += EBML::sizeElemUInt(EBML::EID_CODECDELAY, Opus::getPreSkip(Trk.init.data()) * 1000000 / 48);
std::string type = M.getType(idx);
if (codec == "opus" && M.getInit(idx).size() > 11){
sendLen += EBML::sizeElemUInt(EBML::EID_CODECDELAY, Opus::getPreSkip(M.getInit(idx).data()) * 1000000 / 48);
sendLen += EBML::sizeElemUInt(EBML::EID_SEEKPREROLL, 80000000);
}
if (Trk.type == "video"){
if (type == "video"){
sendLen += EBML::sizeElemUInt(EBML::EID_TRACKTYPE, 1);
subLen += EBML::sizeElemUInt(EBML::EID_PIXELWIDTH, Trk.width);
subLen += EBML::sizeElemUInt(EBML::EID_PIXELHEIGHT, Trk.height);
subLen += EBML::sizeElemUInt(EBML::EID_DISPLAYWIDTH, Trk.width);
subLen += EBML::sizeElemUInt(EBML::EID_DISPLAYHEIGHT, Trk.height);
subLen += EBML::sizeElemUInt(EBML::EID_PIXELWIDTH, M.getWidth(idx));
subLen += EBML::sizeElemUInt(EBML::EID_PIXELHEIGHT, M.getHeight(idx));
subLen += EBML::sizeElemUInt(EBML::EID_DISPLAYWIDTH, M.getWidth(idx));
subLen += EBML::sizeElemUInt(EBML::EID_DISPLAYHEIGHT, M.getHeight(idx));
sendLen += EBML::sizeElemHead(EBML::EID_VIDEO, subLen);
}
if (Trk.type == "audio"){
if (type == "audio"){
sendLen += EBML::sizeElemUInt(EBML::EID_TRACKTYPE, 2);
subLen += EBML::sizeElemUInt(EBML::EID_CHANNELS, Trk.channels);
subLen += EBML::sizeElemDbl(EBML::EID_SAMPLINGFREQUENCY, Trk.rate);
subLen += EBML::sizeElemUInt(EBML::EID_BITDEPTH, Trk.size);
subLen += EBML::sizeElemUInt(EBML::EID_CHANNELS, M.getChannels(idx));
subLen += EBML::sizeElemDbl(EBML::EID_SAMPLINGFREQUENCY, M.getRate(idx));
subLen += EBML::sizeElemUInt(EBML::EID_BITDEPTH, M.getSize(idx));
sendLen += EBML::sizeElemHead(EBML::EID_AUDIO, subLen);
}
if (Trk.type == "meta"){sendLen += EBML::sizeElemUInt(EBML::EID_TRACKTYPE, 3);}
if (type == "meta"){sendLen += EBML::sizeElemUInt(EBML::EID_TRACKTYPE, 3);}
sendLen += subLen;
return EBML::sizeElemHead(EBML::EID_TRACKENTRY, sendLen) + sendLen;
}
void OutEBML::sendHeader(){
double duration = 0;
DTSC::Track &Trk = myMeta.tracks[getMainSelectedTrack()];
if (myMeta.vod){duration = Trk.lastms - Trk.firstms;}
if (myMeta.live){needsLookAhead = 420;}
size_t idx = getMainSelectedTrack();
if (M.getVod()){
duration = M.getLastms(idx) - M.getFirstms(idx);
}else{
needsLookAhead = 420;
}
// EBML header and Segment
EBML::sendElemEBML(myConn, doctype);
EBML::sendElemHead(myConn, EBML::EID_SEGMENT, segmentSize); // Default = Unknown size
if (myMeta.vod){
if (M.getVod()){
// SeekHead
EBML::sendElemHead(myConn, EBML::EID_SEEKHEAD, seekSize);
EBML::sendElemSeek(myConn, EBML::EID_INFO, seekheadSize);
@ -317,38 +337,39 @@ namespace Mist{
// Info
EBML::sendElemInfo(myConn, "MistServer " PACKAGE_VERSION, duration);
// Tracks
uint32_t trackSizes = 0;
for (std::set<long unsigned int>::iterator it = selectedTracks.begin(); it != selectedTracks.end(); it++){
trackSizes += sizeElemTrackEntry(myMeta.tracks[*it]);
size_t trackSizes = 0;
for (std::map<size_t, Comms::Users>::iterator it = userSelect.begin(); it != userSelect.end(); it++){
trackSizes += sizeElemTrackEntry(it->first);
}
EBML::sendElemHead(myConn, EBML::EID_TRACKS, trackSizes);
for (std::set<long unsigned int>::iterator it = selectedTracks.begin(); it != selectedTracks.end(); it++){
sendElemTrackEntry(myMeta.tracks[*it]);
for (std::map<size_t, Comms::Users>::iterator it = userSelect.begin(); it != userSelect.end(); it++){
sendElemTrackEntry(it->first);
}
if (myMeta.vod){
if (M.getVod()){
EBML::sendElemHead(myConn, EBML::EID_CUES, cuesSize);
uint64_t tmpsegSize = infoSize + tracksSize + seekheadSize + cuesSize +
EBML::sizeElemHead(EBML::EID_CUES, cuesSize);
for (std::map<uint64_t, uint64_t>::iterator it = clusterSizes.begin(); it != clusterSizes.end(); ++it){
EBML::sendElemCuePoint(myConn, it->first, Trk.trackID, tmpsegSize, 0);
EBML::sendElemCuePoint(myConn, it->first, idx + 1, tmpsegSize, 0);
tmpsegSize += it->second;
}
}
sentHeader = true;
}
/// Seeks to the given byte position by doing a regular seek and remembering the byte offset from that point
/// Seeks to the given byte position by doing a regular seek and remembering the byte offset from
/// that point
void OutEBML::byteSeek(uint64_t startPos){
INFO_MSG("Seeking to %llu bytes", startPos);
INFO_MSG("Seeking to %" PRIu64 " bytes", startPos);
sentHeader = false;
newClusterTime = 0;
if (startPos == 0){
seek(0);
return;
}
uint64_t headerSize = EBML::sizeElemEBML(doctype) +
EBML::sizeElemHead(EBML::EID_SEGMENT, segmentSize) + seekheadSize + infoSize +
tracksSize + EBML::sizeElemHead(EBML::EID_CUES, cuesSize) + cuesSize;
size_t headerSize = EBML::sizeElemEBML(doctype) +
EBML::sizeElemHead(EBML::EID_SEGMENT, segmentSize) + seekheadSize + infoSize +
tracksSize + EBML::sizeElemHead(EBML::EID_CUES, cuesSize) + cuesSize;
if (startPos < headerSize){
HIGH_MSG("Seek went into or before header");
seek(0);
@ -357,11 +378,10 @@ namespace Mist{
}
startPos -= headerSize;
sentHeader = true; // skip the header
DTSC::Track &Trk = myMeta.tracks[getMainSelectedTrack()];
for (std::map<uint64_t, uint64_t>::iterator it = clusterSizes.begin(); it != clusterSizes.end(); ++it){
VERYHIGH_MSG("Cluster %llu (%llu bytes) -> %llu to go", it->first, it->second, startPos);
VERYHIGH_MSG("Cluster %" PRIu64 " (%" PRIu64 " bytes) -> %" PRIu64 " to go", it->first, it->second, startPos);
if (startPos < it->second){
HIGH_MSG("Seek to fragment at %llu ms", it->first);
HIGH_MSG("Seek to fragment at %" PRIu64 " ms", it->first);
myConn.skipBytes(startPos);
seek(it->first);
newClusterTime = it->first;
@ -389,15 +409,15 @@ namespace Mist{
}
// Calculate the sizes of various parts, if we're VoD.
uint64_t totalSize = 0;
if (myMeta.vod){
size_t totalSize = 0;
if (M.getVod()){
calcVodSizes();
// We now know the full size of the segment, thus can calculate the total size
totalSize = EBML::sizeElemEBML(doctype) + EBML::sizeElemHead(EBML::EID_SEGMENT, segmentSize) + segmentSize;
}
uint64_t byteEnd = totalSize - 1;
uint64_t byteStart = 0;
size_t byteEnd = totalSize - 1;
size_t byteStart = 0;
/*LTS-START*/
// allow setting of max lead time through buffer variable.
@ -424,12 +444,12 @@ namespace Mist{
/*LTS-END*/
char rangeType = ' ';
if (!myMeta.live){
if (M.getVod()){
if (H.GetHeader("Range") != ""){
if (parseRange(byteStart, byteEnd)){
if (H.GetVar("buffer") == ""){
DTSC::Track &Trk = myMeta.tracks[getMainSelectedTrack()];
maxSkipAhead = (Trk.lastms - Trk.firstms) / 20 + 7500;
size_t idx = getMainSelectedTrack();
maxSkipAhead = (M.getLastms(idx) - M.getFirstms(idx)) / 20 + 7500;
}
}
rangeType = H.GetHeader("Range")[0];
@ -438,33 +458,29 @@ namespace Mist{
H.Clean(); // make sure no parts of old requests are left in any buffers
H.setCORSHeaders();
H.SetHeader("Content-Type", "video/webm");
if (myMeta.vod){H.SetHeader("Accept-Ranges", "bytes, parsec");}
if (M.getVod()){H.SetHeader("Accept-Ranges", "bytes, parsec");}
if (rangeType != ' '){
if (!byteEnd){
if (rangeType == 'p'){
H.SetBody("Starsystem not in communications range");
H.SendResponse("416", "Starsystem not in communications range", myConn);
return;
}else{
H.SetBody("Requested Range Not Satisfiable");
H.SendResponse("416", "Requested Range Not Satisfiable", myConn);
return;
}
}else{
std::stringstream rangeReply;
rangeReply << "bytes " << byteStart << "-" << byteEnd << "/" << totalSize;
H.SetHeader("Content-Length", byteEnd - byteStart + 1);
H.SetHeader("Content-Range", rangeReply.str());
/// \todo Switch to chunked?
H.SendResponse("206", "Partial content", myConn);
// H.StartResponse("206", "Partial content", HTTP_R, conn);
byteSeek(byteStart);
H.SetBody("Requested Range Not Satisfiable");
H.SendResponse("416", "Requested Range Not Satisfiable", myConn);
return;
}
std::stringstream rangeReply;
rangeReply << "bytes " << byteStart << "-" << byteEnd << "/" << totalSize;
H.SetHeader("Content-Length", byteEnd - byteStart + 1);
H.SetHeader("Content-Range", rangeReply.str());
/// \todo Switch to chunked?
H.SendResponse("206", "Partial content", myConn);
byteSeek(byteStart);
}else{
if (myMeta.vod){H.SetHeader("Content-Length", byteEnd - byteStart + 1);}
if (M.getVod()){H.SetHeader("Content-Length", byteEnd - byteStart + 1);}
/// \todo Switch to chunked?
H.SendResponse("200", "OK", myConn);
// HTTP_S.StartResponse(HTTP_R, conn);
}
parseData = true;
wantRequest = false;
@ -475,8 +491,8 @@ namespace Mist{
// Already calculated
return;
}
DTSC::Track &Trk = myMeta.tracks[getMainSelectedTrack()];
double duration = Trk.lastms - Trk.firstms;
size_t idx = getMainSelectedTrack();
double duration = M.getLastms(idx) - M.getFirstms(idx);
// Calculate the segment size
// Segment contains SeekHead, Info, Tracks, Cues (in that order)
// Howeveer, SeekHead is dependent on Info/Tracks sizes, so we calculate those first.
@ -484,8 +500,8 @@ namespace Mist{
infoSize = EBML::sizeElemInfo("MistServer " PACKAGE_VERSION, duration);
// Calculating Tracks size
tracksSize = 0;
for (std::set<long unsigned int>::iterator it = selectedTracks.begin(); it != selectedTracks.end(); it++){
tracksSize += sizeElemTrackEntry(myMeta.tracks[*it]);
for (std::map<size_t, Comms::Users>::iterator it = userSelect.begin(); it != userSelect.end(); it++){
tracksSize += sizeElemTrackEntry(it->first);
}
tracksSize += EBML::sizeElemHead(EBML::EID_TRACKS, tracksSize);
// Calculating SeekHead size
@ -504,16 +520,17 @@ namespace Mist{
// Which, in turn, is dependent on the Cluster offsets.
// We make this a bit easier by pre-calculating the sizes of all clusters first
uint64_t fragNo = 0;
for (std::deque<DTSC::Fragment>::iterator it = Trk.fragments.begin(); it != Trk.fragments.end(); ++it){
uint64_t clusterStart = Trk.getKey(it->getNumber()).getTime();
uint64_t clusterEnd = clusterStart + it->getDuration();
DTSC::Fragments fragments(M.fragments(idx));
for (size_t i = fragments.getFirstValid(); i < fragments.getEndValid(); i++){
uint64_t clusterStart = M.getTimeForFragmentIndex(idx, i);
uint64_t clusterEnd = clusterStart + fragments.getDuration(i);
// The first fragment always starts at time 0, even if the main track does not.
if (!fragNo){clusterStart = 0;}
uint64_t clusterTmpEnd = clusterEnd;
do{
clusterTmpEnd = clusterEnd;
// The last fragment always ends at the end, even if the main track does not.
if (fragNo == Trk.fragments.size() - 1){clusterTmpEnd = clusterStart + 30000;}
if (fragNo == fragments.getEndValid() - 1){clusterTmpEnd = clusterStart + 30000;}
// Limit clusters to 30 seconds.
if (clusterTmpEnd - clusterStart > 30000){clusterTmpEnd = clusterStart + 30000;}
uint64_t cSize = clusterSize(clusterStart, clusterTmpEnd);
@ -534,7 +551,7 @@ namespace Mist{
EBML::sizeElemHead(EBML::EID_CUES, cuesSize);
uint32_t cuesInside = 0;
for (std::map<uint64_t, uint64_t>::iterator it = clusterSizes.begin(); it != clusterSizes.end(); ++it){
cuesInside += EBML::sizeElemCuePoint(it->first, Trk.trackID, segmentSize, 0);
cuesInside += EBML::sizeElemCuePoint(it->first, idx + 1, segmentSize, 0);
segmentSize += it->second;
}
cuesSize = cuesInside;

View file

@ -8,8 +8,8 @@ namespace Mist{
static void init(Util::Config *cfg);
void onHTTP();
void sendNext();
virtual void sendHeader();
uint32_t clusterSize(uint64_t start, uint64_t end);
void sendHeader();
size_t clusterSize(uint64_t start, uint64_t end);
protected:
virtual bool inlineRestartCapable() const{return true;}
@ -17,21 +17,21 @@ namespace Mist{
private:
bool isRecording();
std::string doctype;
void sendElemTrackEntry(const DTSC::Track &Trk);
uint32_t sizeElemTrackEntry(const DTSC::Track &Trk);
std::string trackCodecID(const DTSC::Track &Trk);
void sendElemTrackEntry(size_t idx);
size_t sizeElemTrackEntry(size_t idx);
std::string trackCodecID(size_t idx);
uint64_t currentClusterTime;
uint64_t newClusterTime;
// VoD-only
void calcVodSizes();
uint64_t segmentSize; // size of complete segment contents (excl. header)
uint32_t tracksSize; // size of Tracks (incl. header)
uint32_t infoSize; // size of Info (incl. header)
uint32_t cuesSize; // size of Cues (excl. header)
uint32_t seekheadSize; // size of SeekHead (incl. header)
uint32_t seekSize; // size of contents of SeekHead (excl. header)
std::map<uint64_t, uint64_t> clusterSizes; // sizes of Clusters by start time (incl. header)
void byteSeek(uint64_t startPos);
size_t segmentSize; // size of complete segment contents (excl. header)
size_t tracksSize; // size of Tracks (incl. header)
size_t infoSize; // size of Info (incl. header)
size_t cuesSize; // size of Cues (excl. header)
size_t seekheadSize; // size of SeekHead (incl. header)
size_t seekSize; // size of contents of SeekHead (excl. header)
std::map<size_t, size_t> clusterSizes; // sizes of Clusters (incl. header)
void byteSeek(size_t startPos);
};
}// namespace Mist

View file

@ -1,9 +1,10 @@
#include "output_progressive_flv.h"
#include "output_flv.h"
#include <mist/h264.h>
namespace Mist{
OutProgressiveFLV::OutProgressiveFLV(Socket::Connection &conn) : HTTPOutput(conn){}
OutFLV::OutFLV(Socket::Connection &conn) : HTTPOutput(conn){}
void OutProgressiveFLV::init(Util::Config *cfg){
void OutFLV::init(Util::Config *cfg){
HTTPOutput::init(cfg);
capa["name"] = "FLV";
capa["friendly"] = "Flash progressive over HTTP (FLV)";
@ -45,24 +46,25 @@ namespace Mist{
cfg->addOption("keyframeonly", opt);
}
bool OutProgressiveFLV::isRecording(){return config->getString("target").size();}
bool OutFLV::isRecording(){return config->getString("target").size();}
void OutProgressiveFLV::sendNext(){
// If there are now more selectable tracks, select the new track and do a seek to the current timestamp
if (myMeta.live && selectedTracks.size() < 2){
static unsigned long long lastMeta = 0;
void OutFLV::sendNext(){
// If there are now more selectable tracks, select the new track and do a seek to the current
// timestamp
if (M.getLive() && userSelect.size() < 2){
static uint64_t lastMeta = 0;
if (Util::epoch() > lastMeta + 5){
lastMeta = Util::epoch();
updateMeta();
if (myMeta.tracks.size() > 1){
std::set<size_t> validTracks = getSupportedTracks();
if (validTracks.size() > 1){
if (selectDefaultTracks()){
INFO_MSG("Track selection changed - resending headers and continuing");
for (std::set<long unsigned int>::iterator it = selectedTracks.begin();
it != selectedTracks.end(); it++){
if (myMeta.tracks[*it].type == "video" && tag.DTSCVideoInit(myMeta.tracks[*it])){
for (std::map<size_t, Comms::Users>::iterator it = userSelect.begin();
it != userSelect.end(); it++){
if (M.getType(it->first) == "video" && tag.DTSCVideoInit(meta, it->first)){
myConn.SendNow(tag.data, tag.len);
}
if (myMeta.tracks[*it].type == "audio" && tag.DTSCAudioInit(myMeta.tracks[*it])){
if (M.getType(it->first) == "audio" && tag.DTSCAudioInit(meta, it->first)){
myConn.SendNow(tag.data, tag.len);
}
}
@ -71,10 +73,8 @@ namespace Mist{
}
}
}
DTSC::Track &trk = myMeta.tracks[thisPacket.getTrackId()];
tag.DTSCLoader(thisPacket, trk);
if (trk.codec == "PCM" && trk.size == 16){
tag.DTSCLoader(thisPacket, M, thisIdx);
if (M.getCodec(thisIdx) == "PCM" && M.getSize(thisIdx) == 16){
char *ptr = tag.getData();
uint32_t ptrSize = tag.getDataLen();
for (uint32_t i = 0; i < ptrSize; i += 2){
@ -87,7 +87,7 @@ namespace Mist{
if (config->getBool("keyframeonly")){config->is_active = false;}
}
void OutProgressiveFLV::sendHeader(){
void OutFLV::sendHeader(){
if (!isRecording()){
H.Clean();
H.SetHeader("Content-Type", "video/x-flv");
@ -96,38 +96,43 @@ namespace Mist{
H.SendResponse("200", "OK", myConn);
}
if (config->getBool("keyframeonly")){
selectedTracks.clear();
for (std::map<unsigned int, DTSC::Track>::iterator it = myMeta.tracks.begin();
it != myMeta.tracks.end(); it++){
if (it->second.type == "video"){
selectedTracks.insert(it->first);
userSelect.clear();
std::set<size_t> validTracks = M.getValidTracks();
for (std::set<size_t>::iterator it = validTracks.begin(); it != validTracks.end(); it++){
if (M.getType(*it) == "video"){
userSelect[*it].reload(streamName, *it);
break;
}
}
}
myConn.SendNow(FLV::Header, 13);
tag.DTSCMetaInit(myMeta, selectedTracks);
std::set<size_t> selectedTracks;
for (std::map<size_t, Comms::Users>::iterator it = userSelect.begin(); it != userSelect.end(); it++){
selectedTracks.insert(it->first);
}
tag.DTSCMetaInit(M, selectedTracks);
myConn.SendNow(tag.data, tag.len);
for (std::set<long unsigned int>::iterator it = selectedTracks.begin(); it != selectedTracks.end(); it++){
if (myMeta.tracks[*it].type == "video" && tag.DTSCVideoInit(myMeta.tracks[*it])){
for (std::set<size_t>::iterator it = selectedTracks.begin(); it != selectedTracks.end(); it++){
if (M.getType(*it) == "video" && tag.DTSCVideoInit(meta, *it)){
myConn.SendNow(tag.data, tag.len);
}
if (myMeta.tracks[*it].type == "audio" && tag.DTSCAudioInit(myMeta.tracks[*it])){
if (M.getType(*it) == "audio" && tag.DTSCAudioInit(meta, *it)){
myConn.SendNow(tag.data, tag.len);
}
}
if (config->getBool("keyframeonly")){
unsigned int tid = *selectedTracks.begin();
int keyNum = myMeta.tracks[tid].keys.rbegin()->getNumber();
int keyTime = myMeta.tracks[tid].getKey(keyNum).getTime();
INFO_MSG("Seeking for time %d on track %d key %d", keyTime, tid, keyNum);
size_t tid = userSelect.begin()->first;
DTSC::Keys keys(M.keys(tid));
uint32_t endKey = keys.getEndValid();
uint64_t keyTime = keys.getTime(endKey - 1);
INFO_MSG("Seeking for time %" PRIu64 " on track %zu key %" PRIu32, keyTime, tid, endKey - 1);
seek(keyTime);
}
sentHeader = true;
}
void OutProgressiveFLV::onHTTP(){
void OutFLV::onHTTP(){
std::string method = H.method;
H.Clean();

View file

@ -1,9 +1,9 @@
#include "output_http.h"
namespace Mist{
class OutProgressiveFLV : public HTTPOutput{
class OutFLV : public HTTPOutput{
public:
OutProgressiveFLV(Socket::Connection &conn);
OutFLV(Socket::Connection &conn);
static void init(Util::Config *cfg);
void onHTTP();
void sendNext();
@ -17,4 +17,4 @@ namespace Mist{
};
}// namespace Mist
typedef Mist::OutProgressiveFLV mistOut;
typedef Mist::OutFLV mistOut;

View file

@ -64,9 +64,9 @@ namespace Mist{
void OutH264::sendHeader(){
MP4::AVCC avccbox;
unsigned int mainTrack = getMainSelectedTrack();
if (mainTrack && myMeta.tracks.count(mainTrack)){
avccbox.setPayload(myMeta.tracks[mainTrack].init);
size_t mainTrack = getMainSelectedTrack();
if (mainTrack != INVALID_TRACK_ID){
avccbox.setPayload(M.getInit(mainTrack));
myConn.SendNow(avccbox.asAnnexB());
}
sentHeader = true;

View file

@ -7,23 +7,22 @@
namespace Mist{
void OutHDS::getTracks(){
/// \todo Why do we have only one audio track option?
videoTracks.clear();
audioTrack = 0;
audioTrack = INVALID_TRACK_ID;
JSON::Value &vidCapa = capa["codecs"][0u][0u];
JSON::Value &audCapa = capa["codecs"][0u][1u];
for (std::map<unsigned int, DTSC::Track>::iterator it = myMeta.tracks.begin();
it != myMeta.tracks.end(); it++){
std::set<size_t> validTracks = M.getValidTracks();
for (std::set<size_t>::iterator it = validTracks.begin(); it != validTracks.end(); ++it){
jsonForEach(vidCapa, itb){
if (it->second.codec == (*itb).asStringRef()){
videoTracks.insert(it->first);
if (M.getCodec(*it) == itb->asStringRef()){
videoTracks.insert(*it);
break;
}
}
if (!audioTrack){
if (audioTrack == INVALID_TRACK_ID){
jsonForEach(audCapa, itb){
if (it->second.codec == (*itb).asStringRef()){
audioTrack = it->first;
if (M.getCodec(*it) == itb->asStringRef()){
audioTrack = *it;
break;
}
}
@ -34,18 +33,19 @@ namespace Mist{
///\brief Builds a bootstrap for use in HTTP Dynamic streaming.
///\param tid The track this bootstrap is generated for.
///\return The generated bootstrap.
std::string OutHDS::dynamicBootstrap(int tid){
updateMeta();
std::string OutHDS::dynamicBootstrap(size_t idx){
DTSC::Fragments fragments(M.fragments(idx));
DTSC::Keys keys(M.keys(idx));
std::string empty;
MP4::ASRT asrt;
asrt.setUpdate(false);
asrt.setVersion(1);
// asrt.setQualityEntry(empty, 0);
if (myMeta.live){
if (M.getLive()){
asrt.setSegmentRun(1, 4294967295ul, 0);
}else{
asrt.setSegmentRun(1, myMeta.tracks[tid].fragments.size(), 0);
asrt.setSegmentRun(1, fragments.getValidCount(), 0);
}
MP4::AFRT afrt;
@ -54,26 +54,21 @@ namespace Mist{
afrt.setTimeScale(1000);
// afrt.setQualityEntry(empty, 0);
MP4::afrt_runtable afrtrun;
int i = 0;
int j = 0;
if (myMeta.tracks[tid].fragments.size()){
std::deque<DTSC::Fragment>::iterator fragIt = myMeta.tracks[tid].fragments.begin();
unsigned int firstTime = myMeta.tracks[tid].getKey(fragIt->getNumber()).getTime();
while (fragIt != myMeta.tracks[tid].fragments.end()){
if (myMeta.vod || fragIt->getDuration() > 0){
afrtrun.firstFragment = myMeta.tracks[tid].missedFrags + j + 1;
afrtrun.firstTimestamp = myMeta.tracks[tid].getKey(fragIt->getNumber()).getTime() - firstTime;
if (fragIt->getDuration() > 0){
afrtrun.duration = fragIt->getDuration();
}else{
afrtrun.duration = myMeta.tracks[tid].lastms - afrtrun.firstTimestamp;
}
afrt.setFragmentRun(afrtrun, i);
++i;
size_t i = 0;
size_t j = 0;
uint64_t firstTime = keys.getTime(fragments.getFirstKey(fragments.getFirstValid()));
for (size_t fragIdx = fragments.getFirstValid() + 1; fragIdx < fragments.getEndValid(); ++fragIdx){
if (M.getVod() || fragments.getDuration(fragIdx) > 0){
afrtrun.firstFragment = M.getMissedFragments(idx) + j + 1;
afrtrun.firstTimestamp = keys.getTime(fragments.getFirstKey(fragIdx)) - firstTime;
if (fragments.getDuration(fragIdx) > 0){
afrtrun.duration = fragments.getDuration(fragIdx);
}else{
afrtrun.duration = M.getLastms(idx) - afrtrun.firstTimestamp;
}
++j;
++fragIt;
afrt.setFragmentRun(afrtrun, i++);
}
++j;
}
MP4::ABST abst;
@ -82,15 +77,15 @@ namespace Mist{
abst.setProfile(0);
abst.setUpdate(false);
abst.setTimeScale(1000);
abst.setLive(myMeta.live);
abst.setCurrentMediaTime(myMeta.tracks[tid].lastms);
abst.setLive(M.getLive());
abst.setCurrentMediaTime(M.getLastms(idx));
abst.setSmpteTimeCodeOffset(0);
abst.setMovieIdentifier(streamName);
abst.setSegmentRunTable(asrt, 0);
abst.setFragmentRunTable(afrt, 0);
DEBUG_MSG(DLVL_VERYHIGH, "Sending bootstrap: %s", abst.toPrettyString(0).c_str());
return std::string((char *)abst.asBox(), (int)abst.boxedSize());
VERYHIGH_MSG("Sending bootstrap: %s", abst.toPrettyString(0).c_str());
return std::string(abst.asBox(), abst.boxedSize());
}
///\brief Builds an index file for HTTP Dynamic streaming.
@ -103,53 +98,53 @@ namespace Mist{
Result << " <id>" << streamName << "</id>" << std::endl;
Result << " <mimeType>video/mp4</mimeType>" << std::endl;
Result << " <deliveryType>streaming</deliveryType>" << std::endl;
if (myMeta.vod){
Result << " <duration>" << myMeta.tracks[*videoTracks.begin()].lastms / 1000
if (M.getVod()){
Result << " <duration>" << M.getLastms(videoTracks.size() ? *videoTracks.begin() : audioTrack) / 1000
<< ".000</duration>" << std::endl;
Result << " <streamType>recorded</streamType>" << std::endl;
}else{
Result << " <duration>0.00</duration>" << std::endl;
Result << " <streamType>live</streamType>" << std::endl;
}
for (std::set<int>::iterator it = videoTracks.begin(); it != videoTracks.end(); it++){
for (std::set<size_t>::iterator it = videoTracks.begin(); it != videoTracks.end(); it++){
Result << " <bootstrapInfo "
"profile=\"named\" "
"id=\"boot"
<< (*it)
<< *it
<< "\" "
"url=\""
<< (*it)
<< *it
<< ".abst\">"
"</bootstrapInfo>"
<< std::endl;
Result << " <media "
"url=\""
<< (*it)
<< *it
<< "-\" "
// bitrate in kbit/s, we have bps so divide by 128
"bitrate=\""
<< (myMeta.tracks[(*it)].bps / 128)
<< M.getBps(*it) / 128
<< "\" "
"bootstrapInfoId=\"boot"
<< (*it)
<< *it
<< "\" "
"width=\""
<< myMeta.tracks[(*it)].width
<< M.getWidth(*it)
<< "\" "
"height=\""
<< myMeta.tracks[(*it)].height << "\">" << std::endl;
<< M.getHeight(*it) << "\">" << std::endl;
Result << " <metadata>AgAKb25NZXRhRGF0YQMAAAk=</metadata>" << std::endl;
Result << " </media>" << std::endl;
}
Result << "</manifest>" << std::endl;
DEBUG_MSG(DLVL_HIGH, "Sending manifest: %s", Result.str().c_str());
HIGH_MSG("Sending manifest: %s", Result.str().c_str());
return Result.str();
}// BuildManifest
OutHDS::OutHDS(Socket::Connection &conn) : HTTPOutput(conn){
uaDelay = 0;
realTime = 0;
audioTrack = 0;
audioTrack = INVALID_TRACK_ID;
playUntil = 0;
}
@ -186,15 +181,14 @@ namespace Mist{
void OutHDS::sendNext(){
if (thisPacket.getTime() >= playUntil){
VERYHIGH_MSG("Done sending fragment (%llu >= %llu)", thisPacket.getTime(), playUntil);
VERYHIGH_MSG("Done sending fragment (%" PRIu64 " >= %" PRIu64 ")", thisPacket.getTime(), playUntil);
stop();
wantRequest = true;
H.Chunkify("", 0, myConn);
return;
}
DTSC::Track &trk = myMeta.tracks[thisPacket.getTrackId()];
tag.DTSCLoader(thisPacket, trk);
if (trk.codec == "PCM" && trk.size == 16){
tag.DTSCLoader(thisPacket, M, thisIdx);
if (M.getCodec(thisIdx) == "PCM" && M.getSize(thisIdx) == 16){
char *ptr = tag.getData();
uint32_t ptrSize = tag.getDataLen();
for (uint32_t i = 0; i < ptrSize; i += 2){
@ -230,47 +224,45 @@ namespace Mist{
if (H.url.find("f4m") == std::string::npos){
initialize();
std::string tmp_qual = H.url.substr(H.url.find("/", 10) + 1);
unsigned int tid;
unsigned int fragNum;
tid = atoi(tmp_qual.substr(0, tmp_qual.find("Seg") - 1).c_str());
size_t idx = atoi(tmp_qual.substr(0, tmp_qual.find("Seg") - 1).c_str());
if (idx == INVALID_TRACK_ID){FAIL_MSG("Requested fragment for invalid track id");}
int temp;
temp = H.url.find("Seg") + 3;
temp = H.url.find("Frag") + 4;
fragNum = atoi(H.url.substr(temp).c_str()) - 1;
DEBUG_MSG(DLVL_MEDIUM, "Video track %d, fragment %d", tid, fragNum);
if (!audioTrack){getTracks();}
unsigned int mstime = 0;
unsigned int mslen = 0;
if (fragNum < (unsigned int)myMeta.tracks[tid].missedFrags){
size_t fragIdx = atoi(H.url.substr(temp).c_str()) - 1;
MEDIUM_MSG("Video track %zu, fragment %zu", idx, fragIdx);
if (audioTrack == INVALID_TRACK_ID){getTracks();}
uint64_t mstime = 0;
uint64_t mslen = 0;
if (fragIdx < M.getMissedFragments(idx)){
H.Clean();
H.setCORSHeaders();
H.SetBody("The requested fragment is no longer kept in memory on the server and cannot be "
"served.\n");
H.SendResponse("412", "Fragment out of range", myConn);
H.Clean(); // clean for any possible next requests
std::cout << "Fragment " << fragNum << " too old" << std::endl;
FAIL_MSG("Fragment %zu too old", fragIdx);
return;
}
// delay if we don't have the next fragment available yet
unsigned int timeout = 0;
while (myConn && fragNum >= myMeta.tracks[tid].missedFrags + myMeta.tracks[tid].fragments.size() - 1){
DTSC::Fragments fragments(M.fragments(idx));
DTSC::Keys keys(M.keys(idx));
while (myConn && fragIdx >= fragments.getEndValid() - 1){
// time out after 21 seconds
if (++timeout > 42){
myConn.close();
onFail("Timeout triggered", true);
break;
}
Util::wait(500);
updateMeta();
}
mstime = myMeta.tracks[tid]
.getKey(myMeta.tracks[tid].fragments[fragNum - myMeta.tracks[tid].missedFrags].getNumber())
.getTime();
mslen = myMeta.tracks[tid].fragments[fragNum - myMeta.tracks[tid].missedFrags].getDuration();
VERYHIGH_MSG("Playing from %llu for %llu ms", mstime, mslen);
mstime = keys.getTime(fragments.getFirstKey(fragIdx));
mslen = fragments.getDuration(fragIdx);
VERYHIGH_MSG("Playing from %" PRIu64 " for %" PRIu64 " ms", mstime, mslen);
selectedTracks.clear();
selectedTracks.insert(tid);
if (audioTrack){selectedTracks.insert(audioTrack);}
userSelect.clear();
userSelect[idx].reload(streamName, idx);
if (audioTrack != INVALID_TRACK_ID){userSelect[audioTrack].reload(streamName, audioTrack);}
seek(mstime);
playUntil = mstime + mslen;
@ -284,19 +276,18 @@ namespace Mist{
}
H.StartResponse(H, myConn);
// send the bootstrap
std::string bootstrap = dynamicBootstrap(tid);
H.Chunkify(bootstrap, myConn);
H.Chunkify(dynamicBootstrap(idx), myConn);
// send a zero-size mdat, meaning it stretches until end of file.
H.Chunkify("\000\000\000\000mdat", 8, myConn);
// send init data, if needed.
if (audioTrack > 0 && myMeta.tracks[audioTrack].init != ""){
if (tag.DTSCAudioInit(myMeta.tracks[audioTrack])){
if (audioTrack != INVALID_TRACK_ID && M.getInit(audioTrack) != ""){
if (tag.DTSCAudioInit(meta, audioTrack)){
tag.tagTime(mstime);
H.Chunkify(tag.data, tag.len, myConn);
}
}
if (tid > 0){
if (tag.DTSCVideoInit(myMeta.tracks[tid])){
if (idx != INVALID_TRACK_ID){
if (tag.DTSCVideoInit(meta, idx)){
tag.tagTime(mstime);
H.Chunkify(tag.data, tag.len, myConn);
}
@ -305,8 +296,6 @@ namespace Mist{
wantRequest = false;
}else{
initialize();
std::stringstream tmpstr;
myMeta.toPrettyString(tmpstr);
H.Clean();
H.SetHeader("Content-Type", "text/xml");
H.SetHeader("Cache-Control", "no-cache");

View file

@ -14,11 +14,11 @@ namespace Mist{
protected:
void getTracks();
std::string dynamicBootstrap(int tid);
std::string dynamicBootstrap(size_t idx);
std::string dynamicIndex();
std::set<int> videoTracks; ///<< Holds valid video tracks for playback
long long int audioTrack; ///<< Holds audio track ID for playback
long long unsigned int playUntil;
std::set<size_t> videoTracks; ///<< Holds valid video tracks for playback
size_t audioTrack; ///<< Holds audio track ID for playback
uint64_t playUntil;
FLV::Tag tag;
};
}// namespace Mist

View file

@ -6,10 +6,11 @@
namespace Mist{
bool OutHLS::isReadyForPlay(){
if (myMeta.tracks.size()){
if (myMeta.mainTrack().fragments.size() > 4){return true;}
}
return false;
if (!M.getValidTracks().size()){return false;}
uint32_t mainTrack = M.mainTrack();
if (mainTrack == INVALID_TRACK_ID){return false;}
DTSC::Fragments fragments(M.fragments(mainTrack));
return fragments.getValidCount() > 4;
}
///\brief Builds an index file for HTTP Live streaming.
@ -18,256 +19,139 @@ namespace Mist{
std::stringstream result;
selectDefaultTracks();
result << "#EXTM3U\r\n";
int audioId = -1;
unsigned int vidTracks = 0;
size_t audioId = INVALID_TRACK_ID;
size_t vidTracks = 0;
bool hasSubs = false;
for (std::set<unsigned long>::iterator it = selectedTracks.begin(); it != selectedTracks.end(); ++it){
if (audioId == -1 && myMeta.tracks[*it].type == "audio"){audioId = *it;}
if (!hasSubs && myMeta.tracks[*it].codec == "subtitle"){hasSubs = true;}
for (std::map<size_t, Comms::Users>::iterator it = userSelect.begin(); it != userSelect.end(); ++it){
if (audioId == INVALID_TRACK_ID && M.getType(it->first) == "audio"){audioId = it->first;}
if (!hasSubs && M.getCodec(it->first) == "subtitle"){hasSubs = true;}
}
for (std::set<unsigned long>::iterator it = selectedTracks.begin(); it != selectedTracks.end(); ++it){
if (myMeta.tracks[*it].type == "video"){
vidTracks++;
int bWidth = myMeta.tracks[*it].bps;
for (std::map<size_t, Comms::Users>::iterator it = userSelect.begin(); it != userSelect.end(); ++it){
if (M.getType(it->first) == "video"){
++vidTracks;
int bWidth = M.getBps(it->first);
if (bWidth < 5){bWidth = 5;}
if (audioId != -1){bWidth += myMeta.tracks[audioId].bps;}
if (audioId != INVALID_TRACK_ID){bWidth += M.getBps(audioId);}
result << "#EXT-X-STREAM-INF:PROGRAM-ID=1,BANDWIDTH=" << (bWidth * 8);
result << ",RESOLUTION=" << myMeta.tracks[*it].width << "x" << myMeta.tracks[*it].height;
if (myMeta.tracks[*it].fpks){
result << ",FRAME-RATE=" << (float)myMeta.tracks[*it].fpks / 1000;
result << ",RESOLUTION=" << M.getWidth(it->first) << "x" << M.getHeight(it->first);
if (M.getFpks(it->first)){
result << ",FRAME-RATE=" << (float)M.getFpks(it->first) / 1000;
}
if (hasSubs){result << ",SUBTITLES=\"sub1\"";}
result << ",CODECS=\"";
result << Util::codecString(myMeta.tracks[*it].codec, myMeta.tracks[*it].init);
if (audioId != -1){
result << "," << Util::codecString(myMeta.tracks[audioId].codec, myMeta.tracks[audioId].init);
result << Util::codecString(M.getCodec(it->first), M.getInit(it->first));
if (audioId != INVALID_TRACK_ID){
result << "," << Util::codecString(M.getCodec(audioId), M.getInit(audioId));
}
result << "\"";
result << "\r\n";
result << *it;
if (audioId != -1){result << "_" << audioId;}
result << "\"\r\n" << it->first;
if (audioId != INVALID_TRACK_ID){result << "_" << audioId;}
if (hasSessionIDs()){
result << "/index.m3u8?sessId=" << getpid() << "\r\n";
}else{
result << "/index.m3u8\r\n";
}
}else if (myMeta.tracks[*it].codec == "subtitle"){
}else if (M.getCodec(it->first) == "subtitle"){
if (myMeta.tracks[*it].lang.empty()){myMeta.tracks[*it].lang = "und";}
if (M.getLang(it->first).empty()){meta.setLang(it->first, "und");}
result << "#EXT-X-MEDIA:TYPE=SUBTITLES,GROUP-ID=\"sub1\",LANGUAGE=\"" << myMeta.tracks[*it].lang
<< "\",NAME=\"" << Encodings::ISO639::decode(myMeta.tracks[*it].lang)
<< "\",AUTOSELECT=NO,DEFAULT=NO,FORCED=NO,URI=\"" << *it << "/index.m3u8\""
result << "#EXT-X-MEDIA:TYPE=SUBTITLES,GROUP-ID=\"sub1\",LANGUAGE=\"" << M.getLang(it->first)
<< "\",NAME=\"" << Encodings::ISO639::decode(M.getLang(it->first))
<< "\",AUTOSELECT=NO,DEFAULT=NO,FORCED=NO,URI=\"" << it->first << "/index.m3u8\""
<< "\r\n";
}
}
if (!vidTracks && audioId){
result << "#EXT-X-STREAM-INF:PROGRAM-ID=1,BANDWIDTH=" << (myMeta.tracks[audioId].bps * 8);
result << ",CODECS=\""
<< Util::codecString(myMeta.tracks[audioId].codec, myMeta.tracks[audioId].init) << "\"";
if (!vidTracks && audioId != INVALID_TRACK_ID){
result << "#EXT-X-STREAM-INF:PROGRAM-ID=1,BANDWIDTH=" << (M.getBps(audioId) * 8);
result << ",CODECS=\"" << Util::codecString(M.getCodec(audioId), M.getInit(audioId)) << "\"";
result << "\r\n";
result << audioId << "/index.m3u8\r\n";
}
DEBUG_MSG(DLVL_HIGH, "Sending this index: %s", result.str().c_str());
return result.str();
}
std::string OutHLS::pushLiveIndex(){
std::stringstream result;
result << "#EXTM3U\r\n";
std::set<unsigned int> audioTracks;
for (std::map<unsigned int, DTSC::Track>::iterator it = myMeta.tracks.begin();
it != myMeta.tracks.end(); it++){
if (it->second.codec == "AAC" || it->second.codec == "MP3" || it->second.codec == "AC3" ||
it->second.codec == "MP2"){
audioTracks.insert(it->first);
}
}
if (!audioTracks.size()){audioTracks.insert(-1);}
unsigned int vidTracks = 0;
for (std::map<unsigned int, DTSC::Track>::iterator it = myMeta.tracks.begin();
it != myMeta.tracks.end(); it++){
if (it->second.codec == "H264" || it->second.codec == "HEVC" || it->second.codec == "MPEG2"){
for (std::set<unsigned int>::iterator audIt = audioTracks.begin(); audIt != audioTracks.end(); audIt++){
vidTracks++;
int bWidth = it->second.bps;
if (bWidth < 5){bWidth = 5;}
if (*audIt != -1){bWidth += myMeta.tracks[*audIt].bps;}
result << "#EXT-X-STREAM-INF:PROGRAM-ID=1,BANDWIDTH=" << (bWidth * 8) << "\r\n";
result << it->first;
if (*audIt != -1){result << "_" << *audIt;}
result << "/index.m3u8\r\n";
}
}
}
if (!vidTracks && audioTracks.size()){
result << "#EXT-X-STREAM-INF:PROGRAM-ID=1,BANDWIDTH=" << (myMeta.tracks[*audioTracks.begin()].bps * 8)
<< "\r\n";
result << *audioTracks.begin() << "/index.m3u8\r\n";
}
return result.str();
}
std::string OutHLS::pushLiveIndex(int tid, unsigned long bTime, unsigned long eTime){
updateMeta();
std::string OutHLS::liveIndex(size_t tid, const std::string &sessId){
std::stringstream result;
// parse single track
result << "#EXTM3U\r\n#EXT-X-TARGETDURATION:" << (myMeta.tracks[tid].biggestFragment() / 1000) + 1 << "\r\n";
uint32_t targetDuration = (M.biggestFragment(tid) / 1000) + 1;
result << "#EXTM3U\r\n#EXT-X-VERSION:";
std::deque<std::string> lines;
unsigned int skippedLines = 0;
for (std::deque<DTSC::Fragment>::iterator it = myMeta.tracks[tid].fragments.begin();
it != myMeta.tracks[tid].fragments.end(); it++){
long long int starttime = myMeta.tracks[tid].getKey(it->getNumber()).getTime();
long long duration = it->getDuration();
if (duration <= 0){duration = myMeta.tracks[tid].lastms - starttime;}
if (starttime < bTime){skippedLines++;}
if (starttime >= bTime && (starttime + duration) <= eTime){
char lineBuf[400];
snprintf(lineBuf, 400, "#EXTINF:%lld, no desc\r\n%lld_%lld.ts\r\n",
((duration + 500) / 1000), starttime, starttime + duration);
lines.push_back(lineBuf);
}
result << (M.getEncryption(tid) == "" ? "3" : "5");
result << "\r\n#EXT-X-TARGETDURATION:" << targetDuration << "\r\n";
if (M.getEncryption(tid) != ""){
result << "#EXT-X-KEY:METHOD=SAMPLE-AES,URI=\"";
result << "urlHere";
result << "\",KEYFORMAT=\"com.apple.streamingkeydelivery" << std::endl;
}
result << "#EXT-X-MEDIA-SEQUENCE:" << myMeta.tracks[tid].missedFrags + skippedLines << "\r\n";
while (lines.size()){
result << lines.front();
lines.pop_front();
}
if (!myMeta.live && eTime >= myMeta.tracks[tid].lastms){result << "#EXT-X-ENDLIST\r\n";}
return result.str();
}
std::string OutHLS::liveIndex(int tid, std::string &sessId){
updateMeta();
std::stringstream result;
// parse single track
uint32_t target_dur = (myMeta.tracks[tid].biggestFragment() / 1000) + 1;
result << "#EXTM3U\r\n#EXT-X-VERSION:3\r\n#EXT-X-TARGETDURATION:" << target_dur << "\r\n";
std::deque<std::string> lines;
std::deque<uint16_t> durs;
uint32_t total_dur = 0;
for (std::deque<DTSC::Fragment>::iterator it = myMeta.tracks[tid].fragments.begin();
it != myMeta.tracks[tid].fragments.end(); it++){
long long int starttime = myMeta.tracks[tid].getKey(it->getNumber()).getTime();
long long duration = it->getDuration();
if (duration <= 0){duration = myMeta.tracks[tid].lastms - starttime;}
std::deque<uint16_t> durations;
uint32_t totalDuration = 0;
DTSC::Keys keys(M.keys(tid));
DTSC::Fragments fragments(M.fragments(tid));
uint32_t firstFragment = fragments.getFirstValid();
uint32_t endFragment = fragments.getEndValid();
for (int i = firstFragment; i < endFragment; i++){
uint64_t duration = fragments.getDuration(i);
size_t keyNumber = fragments.getFirstKey(i);
uint64_t startTime = keys.getTime(keyNumber);
if (!duration){duration = M.getLastms(tid) - startTime;}
double floatDur = (double)duration / 1000;
char lineBuf[400];
if (myMeta.tracks[tid].codec == "subtitle"){
snprintf(lineBuf, 400, "#EXTINF:%f,\r\n../../../%s.vtt?track=%d&from=%lld&to=%lld\r\n",
(double)duration / 1000, streamName.c_str(), tid, starttime, starttime + duration);
if (M.getCodec(tid) == "subtitle"){
snprintf(lineBuf, 400, "#EXTINF:%f,\r\n../../../%s.vtt?track=%zu&from=%" PRIu64 "&to=%" PRIu64 "\r\n",
(double)duration / 1000, streamName.c_str(), tid, startTime, startTime + duration);
}else{
if (sessId.size()){
snprintf(lineBuf, 400, "#EXTINF:%f,\r\n%lld_%lld.ts?sessId=%s\r\n",
(double)duration / 1000, starttime, starttime + duration, sessId.c_str());
snprintf(lineBuf, 400, "#EXTINF:%f,\r\n%" PRIu64 "_%" PRIu64 ".ts?sessId=%s\r\n",
floatDur, startTime, startTime + duration, sessId.c_str());
}else{
snprintf(lineBuf, 400, "#EXTINF:%f,\r\n%lld_%lld.ts\r\n", (double)duration / 1000,
starttime, starttime + duration);
snprintf(lineBuf, 400, "#EXTINF:%f,\r\n%" PRIu64 "_%" PRIu64 ".ts\r\n", floatDur,
startTime, startTime + duration);
}
}
durs.push_back(duration);
total_dur += duration;
totalDuration += duration;
durations.push_back(duration);
lines.push_back(lineBuf);
}
unsigned int skippedLines = 0;
if (myMeta.live && lines.size()){
size_t skippedLines = 0;
if (M.getLive() && lines.size()){
// only print the last segment when VoD
lines.pop_back();
total_dur -= durs.back();
durs.pop_back();
totalDuration -= durations.back();
durations.pop_back();
// skip the first two segments when live, unless that brings us under 4 target durations
while ((total_dur - durs.front()) > (target_dur * 4000) && skippedLines < 2){
while ((totalDuration - durations.front()) > (targetDuration * 4000) && skippedLines < 2){
lines.pop_front();
total_dur -= durs.front();
durs.pop_front();
totalDuration -= durations.front();
durations.pop_front();
++skippedLines;
}
/*LTS-START*/
// remove lines to reduce size towards listlimit setting - but keep at least 4X target duration available
if (config->getInteger("listlimit")){
unsigned long listlimit = config->getInteger("listlimit");
while (lines.size() > listlimit && (total_dur - durs.front()) > (target_dur * 4000)){
// remove lines to reduce size towards listlimit setting - but keep at least 4X target
// duration available
uint64_t listlimit = config->getInteger("listlimit");
if (listlimit){
while (lines.size() > listlimit && (totalDuration - durations.front()) > (targetDuration * 4000)){
lines.pop_front();
total_dur -= durs.front();
durs.pop_front();
totalDuration -= durations.front();
durations.pop_front();
++skippedLines;
}
}
/*LTS-END*/
}
result << "#EXT-X-MEDIA-SEQUENCE:" << myMeta.tracks[tid].missedFrags + skippedLines << "\r\n";
result << "#EXT-X-MEDIA-SEQUENCE:" << M.getMissedFragments(tid) + skippedLines << "\r\n";
while (lines.size()){
result << lines.front();
lines.pop_front();
for (std::deque<std::string>::iterator it = lines.begin(); it != lines.end(); it++){
result << *it;
}
if (!myMeta.live || total_dur == 0){result << "#EXT-X-ENDLIST\r\n";}
DEBUG_MSG(DLVL_HIGH, "Sending this index: %s", result.str().c_str());
if (!M.getLive() || !totalDuration){result << "#EXT-X-ENDLIST\r\n";}
HIGH_MSG("Sending this index: %s", result.str().c_str());
return result.str();
}// liveIndex
std::string OutHLS::generatePushList(){
updateMeta();
std::set<unsigned int> videoTracks;
std::set<unsigned int> audioTracks;
for (std::map<unsigned int, DTSC::Track>::iterator it = myMeta.tracks.begin();
it != myMeta.tracks.end(); it++){
if (it->second.codec == "AAC" || it->second.codec == "MP3" || it->second.codec == "AC3"){
audioTracks.insert(it->first);
}
if (it->second.codec == "H264" || it->second.codec == "HEVC"){
videoTracks.insert(it->first);
}
}
JSON::Value result;
for (std::map<unsigned int, DTSC::Track>::iterator it = myMeta.tracks.begin();
it != myMeta.tracks.end(); it++){
std::stringstream tid;
tid << it->second.trackID;
result["tracks"][tid.str()] = it->second.toJSON(true);
}
for (std::set<unsigned int>::iterator it = videoTracks.begin(); it != videoTracks.end(); it++){
for (std::set<unsigned int>::iterator it2 = audioTracks.begin(); it2 != audioTracks.end(); it2++){
JSON::Value quality;
std::stringstream identifier;
identifier << "/" << *it << "_" << *it2;
quality["index"] = "/push" + identifier.str() + "/index_\%llu_\%llu.m3u8";
quality["segment"] = identifier.str() + "/\%llu_\%llu.ts";
quality["video"] = *it;
quality["audio"] = *it2;
quality["id"] = identifier.str();
std::deque<DTSC::Fragment>::iterator it3 = myMeta.tracks[*it].fragments.begin();
for (int i = 0; i < 2; i++){
if (it3 != myMeta.tracks[*it].fragments.end()){++it3;}
}
for (; it3 != myMeta.tracks[*it].fragments.end(); it3++){
if (myMeta.live && it3 == (myMeta.tracks[*it].fragments.end() - 1)){
// Skip the current last fragment if we are live
continue;
}
uint64_t starttime = myMeta.tracks[*it].getKey(it3->getNumber()).getTime();
std::stringstream line;
uint64_t duration = it3->getDuration();
if (duration <= 0){duration = myMeta.tracks[*it].lastms - starttime;}
std::stringstream segmenturl;
segmenturl << identifier.str() << "/" << starttime << "_" << duration + starttime << ".ts";
JSON::Value segment;
// segment["url"] = segmenturl.str();
segment["time"] = starttime;
segment["duration"] = duration;
segment["number"] = (uint64_t)it3->getNumber();
quality["segments"].append(segment);
}
result["qualities"].append(quality);
}
}
return result.toString();
;
}
OutHLS::OutHLS(Socket::Connection &conn) : TSOutput(conn){
@ -286,7 +170,6 @@ namespace Mist{
"Segmented streaming in Apple (TS-based) format over HTTP ( = HTTP Live Streaming)";
capa["url_rel"] = "/hls/$/index.m3u8";
capa["url_prefix"] = "/hls/$/";
capa["url_pushlist"] = "/hls/$/push/list";
capa["codecs"][0u][0u].append("+HEVC");
capa["codecs"][0u][1u].append("+H264");
capa["codecs"][0u][2u].append("+MPEG2");
@ -380,11 +263,12 @@ namespace Mist{
return;
}
std::string userAgent = H.GetHeader("User-Agent");
bool VLCworkaround = false;
if (H.GetHeader("User-Agent").substr(0, 3) == "VLC"){
std::string vlcver = H.GetHeader("User-Agent").substr(4);
if (userAgent.substr(0, 3) == "VLC"){
std::string vlcver = userAgent.substr(4);
if (vlcver[0] == '0' || vlcver[0] == '1' || (vlcver[0] == '2' && vlcver[2] < '2')){
DEBUG_MSG(DLVL_INFO, "Enabling VLC version < 2.2.0 bug workaround.");
INFO_MSG("Enabling VLC version < 2.2.0 bug workaround.");
VLCworkaround = true;
}
}
@ -392,82 +276,40 @@ namespace Mist{
initialize();
if (!keepGoing()){return;}
if (H.url.substr(5 + streamName.size(), 5) == "/push"){
std::string relPushUrl = H.url.substr(10 + streamName.size());
H.Clean();
if (relPushUrl == "/list"){
H.SetBody(generatePushList());
H.SendResponse("200", "OK", myConn);
H.Clean();
return;
}
H.SetHeader("Content-Type", "application/vnd.apple.mpegurl");
if (relPushUrl == "/index.m3u8"){
H.setCORSHeaders();
H.SetBody(pushLiveIndex());
H.SendResponse("200", "OK", myConn);
H.Clean(); // clean for any possible next requests
return;
}else{
unsigned int vTrack;
unsigned int aTrack;
unsigned long long bTime;
unsigned long long eTime;
if (sscanf(relPushUrl.c_str(), "/%u_%u/index_%llu_%llu.m3u", &vTrack, &aTrack, &bTime, &eTime) == 4){
if (eTime < bTime){eTime = bTime;}
H.setCORSHeaders();
H.SetBody(pushLiveIndex(vTrack, bTime, eTime));
H.SendResponse("200", "OK", myConn);
H.Clean(); // clean for any possible next requests
return;
}
}
H.SetBody("The HLS URL wasn't understood - what did you want, exactly?\n");
myConn.SendNow(H.BuildResponse("404", "URL mismatch"));
H.Clean(); // clean for any possible next requests
return;
}else if (HTTP::URL(H.url).getExt().substr(0, 3) != "m3u"){
size_t slashPos = H.getUrl().find('/', 5);
std::string tmpStr = H.getUrl().substr(slashPos);
long long unsigned int from;
if (sscanf(tmpStr.c_str(), "/%u_%u/%llu_%llu.ts", &vidTrack, &audTrack, &from, &until) != 4){
if (sscanf(tmpStr.c_str(), "/%u/%llu_%llu.ts", &vidTrack, &from, &until) != 3){
DEBUG_MSG(DLVL_MEDIUM, "Could not parse URL: %s", H.getUrl().c_str());
if (H.url.find(".m3u") == std::string::npos){
std::string tmpStr = H.getUrl().substr(5 + streamName.size());
uint64_t from;
if (sscanf(tmpStr.c_str(), "/%zu_%zu/%" PRIu64 "_%" PRIu64 ".ts", &vidTrack, &audTrack, &from, &until) != 4){
if (sscanf(tmpStr.c_str(), "/%zu/%" PRIu64 "_%" PRIu64 ".ts", &vidTrack, &from, &until) != 3){
MEDIUM_MSG("Could not parse URL: %s", H.getUrl().c_str());
H.Clean();
H.setCORSHeaders();
H.SetBody("The HLS URL wasn't understood - what did you want, exactly?\n");
myConn.SendNow(H.BuildResponse("404", "URL mismatch"));
H.Clean(); // clean for any possible next requests
return;
}else{
selectedTracks.clear();
selectedTracks.insert(vidTrack);
}
userSelect.clear();
userSelect[vidTrack].reload(streamName, vidTrack);
}else{
selectedTracks.clear();
selectedTracks.insert(vidTrack);
selectedTracks.insert(audTrack);
userSelect.clear();
userSelect[vidTrack].reload(streamName, vidTrack);
userSelect[audTrack].reload(streamName, audTrack);
}
for (std::map<unsigned int, DTSC::Track>::iterator it = myMeta.tracks.begin();
it != myMeta.tracks.end(); it++){
if (it->second.codec == "ID3"){selectedTracks.insert(it->first);}
std::set<size_t> validTracks = getSupportedTracks();
for (std::set<size_t>::iterator it = validTracks.begin(); it != validTracks.end(); ++it){
if (M.getCodec(*it) == "ID3"){userSelect[*it].reload(streamName, *it);}
}
// Keep a reference to the main track
// This is called vidTrack, even for audio-only streams
DTSC::Track &Trk = myMeta.tracks[vidTrack];
if (myMeta.live){
if (from < Trk.firstms){
H.Clean();
H.setCORSHeaders();
H.SetBody("The requested fragment is no longer kept in memory on the server and cannot "
"be served.\n");
myConn.SendNow(H.BuildResponse("404", "Fragment out of range"));
H.Clean(); // clean for any possible next requests
WARN_MSG("Fragment @ %llu too old", from);
return;
}
if (M.getLive() && from < M.getFirstms(vidTrack)){
H.Clean();
H.setCORSHeaders();
H.SetBody("The requested fragment is no longer kept in memory on the server and cannot be "
"served.\n");
myConn.SendNow(H.BuildResponse("404", "Fragment out of range"));
H.Clean(); // clean for any possible next requests
WARN_MSG("Fragment @ %" PRIu64 " too old", from);
return;
}
H.SetHeader("Content-Type", "video/mp2t");
@ -487,10 +329,10 @@ namespace Mist{
H.StartResponse(H, myConn, VLCworkaround || config->getBool("nonchunked"));
// we assume whole fragments - but timestamps may be altered at will
uint32_t fragIndice = Trk.timeToFragnum(from);
contPAT = Trk.missedFrags + fragIndice; // PAT continuity counter
contPMT = Trk.missedFrags + fragIndice; // PMT continuity counter
contSDT = Trk.missedFrags + fragIndice; // SDT continuity counter
uint32_t fragIndice = M.getFragmentIndexForTime(vidTrack, from);
contPAT = M.getMissedFragments(vidTrack) + fragIndice; // PAT continuity counter
contPMT = M.getMissedFragments(vidTrack) + fragIndice; // PMT continuity counter
contSDT = M.getMissedFragments(vidTrack) + fragIndice; // SDT continuity counter
packCounter = 0;
parseData = true;
wantRequest = false;
@ -501,8 +343,7 @@ namespace Mist{
std::string request = H.url.substr(H.url.find("/", 5) + 1);
H.Clean();
H.setCORSHeaders();
H.SetHeader("Content-Type", "application/vnd.apple.mpegurl");
if (!myMeta.tracks.size()){
if (!M.getValidTracks().size()){
H.SendResponse("404", "Not online or found", myConn);
H.Clean();
return;
@ -516,8 +357,14 @@ namespace Mist{
if (request.find("/") == std::string::npos){
manifest = liveIndex();
}else{
int selectId = atoi(request.substr(0, request.find("/")).c_str());
manifest = liveIndex(selectId, sessId);
size_t idx = atoi(request.substr(0, request.find("/")).c_str());
if (!M.getValidTracks().count(idx)){
H.SendResponse("404", "No corresponding track found", myConn);
H.Clean();
return;
}
manifest = liveIndex(idx, sessId);
}
H.SetBody(manifest);
H.SendResponse("200", "OK", myConn);
@ -532,10 +379,9 @@ namespace Mist{
parseData = false;
// Ensure alignment of contCounters for selected tracks, to prevent discontinuities.
for (std::set<unsigned long>::iterator it = selectedTracks.begin(); it != selectedTracks.end(); ++it){
DTSC::Track &Trk = myMeta.tracks[*it];
uint32_t pkgPid = 255 + *it;
int &contPkg = contCounters[pkgPid];
for (std::map<size_t, Comms::Users>::iterator it = userSelect.begin(); it != userSelect.end(); it++){
uint32_t pkgPid = 255 + it->first;
uint16_t &contPkg = contCounters[pkgPid];
if (contPkg % 16 != 0){
packData.clear();
packData.setPID(pkgPid);
@ -556,7 +402,7 @@ namespace Mist{
TSOutput::sendNext();
}
void OutHLS::sendTS(const char *tsData, unsigned int len){H.Chunkify(tsData, len, myConn);}
void OutHLS::sendTS(const char *tsData, size_t len){H.Chunkify(tsData, len, myConn);}
void OutHLS::onFail(const std::string &msg, bool critical){
if (H.url.find(".m3u") == std::string::npos){

View file

@ -7,7 +7,7 @@ namespace Mist{
OutHLS(Socket::Connection &conn);
~OutHLS();
static void init(Util::Config *cfg);
void sendTS(const char *tsData, unsigned int len = 188);
void sendTS(const char *tsData, size_t len = 188);
void sendNext();
void onHTTP();
bool isReadyForPlay();
@ -19,17 +19,11 @@ namespace Mist{
bool hasSessionIDs(){return !config->getBool("mergesessions");}
std::string liveIndex();
std::string liveIndex(int tid, std::string &sessId);
std::string liveIndex(size_t tid, const std::string &sessId);
std::string pushLiveIndex();
std::string pushLiveIndex(int tid, unsigned long bTime, unsigned long eTime);
std::string generatePushList();
int canSeekms(unsigned int ms);
int keysToSend;
unsigned int vidTrack;
unsigned int audTrack;
long long unsigned int until;
size_t vidTrack;
size_t audTrack;
uint64_t until;
};
}// namespace Mist

View file

@ -1,597 +0,0 @@
#include "output_hss.h"
#include <mist/bitfields.h>
#include <mist/checksum.h>
#include <mist/defines.h>
#include <mist/encode.h>
#include <mist/http_parser.h>
#include <mist/mp4.h>
#include <mist/mp4_encryption.h> /*LTS*/
#include <mist/mp4_generic.h>
#include <mist/mp4_ms.h>
#include <mist/nal.h>/*LTS*/
#include <mist/stream.h>
#include <unistd.h>
///\todo Maybe move to util?
long long unsigned int binToInt(std::string &binary){
long long int result = 0;
for (int i = 0; i < 8; i++){
result <<= 8;
result += binary[i];
}
return result;
}
std::string intToBin(long long unsigned int number){
std::string result;
result.resize(8);
for (int i = 7; i >= 0; i--){
result[i] = number & 0xFF;
number >>= 8;
}
return result;
}
std::string toUTF16(std::string original){
std::string result;
result += (char)0xFF;
result += (char)0xFE;
for (std::string::iterator it = original.begin(); it != original.end(); it++){
result += (*it);
result += (char)0x00;
}
return result;
}
/// Converts bytes per second and track ID into a single bits per second value, where the last two
/// digits are the track ID. Breaks for track IDs > 99. But really, this is MS-SS, so who cares..?
uint64_t bpsAndIdToBitrate(uint32_t bps, uint64_t tid){
return ((uint64_t)((bps * 8) / 100)) * 100 + tid;
}
namespace Mist{
OutHSS::OutHSS(Socket::Connection &conn) : HTTPOutput(conn){
uaDelay = 0;
realTime = 0;
}
OutHSS::~OutHSS(){}
void OutHSS::init(Util::Config *cfg){
HTTPOutput::init(cfg);
capa["name"] = "HSS";
capa["friendly"] = "Microsoft segmented over HTTP (HSS)";
capa["desc"] = "Segmented streaming in Microsoft Silverlight (fMP4-based) format over HTTP ( = "
"HTTP Smooth Streaming)";
capa["url_rel"] = "/smooth/$.ism/Manifest";
capa["url_prefix"] = "/smooth/$.ism/";
capa["codecs"][0u][0u].append("H264");
capa["codecs"][0u][1u].append("AAC");
capa["methods"][0u]["handler"] = "http";
capa["methods"][0u]["type"] = "silverlight";
capa["methods"][0u]["priority"] = 1;
}
void OutHSS::sendNext(){
if (thisPacket.getTime() >= playUntil){
stop();
wantRequest = true;
H.Chunkify("", 0, myConn);
H.Clean();
return;
}
char *dataPointer = 0;
size_t len = 0;
thisPacket.getString("data", dataPointer, len);
H.Chunkify(dataPointer, len, myConn);
}
int OutHSS::canSeekms(unsigned int ms){
// no tracks? Frame too new by definition.
if (!myMeta.tracks.size()){
DEBUG_MSG(DLVL_DONTEVEN, "HSS Canseek to %d returns 1 because no tracks", ms);
return 1;
}
// loop trough all selected tracks
for (std::set<unsigned long>::iterator it = selectedTracks.begin(); it != selectedTracks.end(); it++){
// return "too late" if one track is past this point
if (ms < myMeta.tracks[*it].firstms){
DEBUG_MSG(DLVL_DONTEVEN, "HSS Canseek to %d returns -1 because track %lu firstms == %llu",
ms, *it, myMeta.tracks[*it].firstms);
return -1;
}
// return "too early" if one track is not yet at this point
if (ms > myMeta.tracks[*it].lastms){
DEBUG_MSG(DLVL_DONTEVEN, "HSS Canseek to %d returns 1 because track %lu lastms == %llu", ms,
*it, myMeta.tracks[*it].lastms);
return 1;
}
}
return 0;
}
void OutHSS::sendHeader(){
// We have a non-manifest request, parse it.
std::string Quality = H.url.substr(H.url.find("Q(", 2) + 2);
Quality = Quality.substr(0, Quality.find(")"));
std::string parseString = H.url.substr(H.url.find(")/") + 2);
parseString = parseString.substr(parseString.find("(") + 1);
long long int seekTime = atoll(parseString.substr(0, parseString.find(")")).c_str()) / 10000;
unsigned int tid = atoll(Quality.c_str()) % 100;
selectedTracks.clear();
selectedTracks.insert(tid);
if (myMeta.live){
updateMeta();
unsigned int timeout = 0;
int seekable;
do{
seekable = canSeekms(seekTime);
if (seekable == 0){
// iff the fragment in question is available, check if the next is available too
for (std::deque<DTSC::Key>::iterator it = myMeta.tracks[tid].keys.begin();
it != myMeta.tracks[tid].keys.end(); it++){
if (it->getTime() >= seekTime){
if ((it + 1) == myMeta.tracks[tid].keys.end()){seekable = 1;}
break;
}
}
}
if (seekable > 0){
// time out after 21 seconds
if (++timeout > 42){
myConn.close();
break;
}
Util::wait(500);
updateMeta();
}
}while (myConn && seekable > 0);
if (seekable < 0){
H.Clean();
H.SetBody("The requested fragment is no longer kept in memory on the server and cannot be "
"served.\n");
myConn.SendNow(H.BuildResponse("412", "Fragment out of range"));
H.Clean(); // clean for any possible next requests
std::cout << "Fragment @ " << seekTime << "ms too old (" << myMeta.tracks[tid].firstms
<< " - " << myMeta.tracks[tid].lastms << " ms)" << std::endl;
stop();
wantRequest = true;
return;
}
}
seek(seekTime);
///\todo Rewrite to fragments
for (std::deque<DTSC::Key>::iterator it2 = myMeta.tracks[tid].keys.begin();
it2 != myMeta.tracks[tid].keys.end(); it2++){
if (it2->getTime() > seekTime){
playUntil = it2->getTime();
break;
}
}
myTrackStor = tid;
myKeyStor = seekTime;
keysToSend = 1;
// Seek to the right place and send a play-once for a single fragment.
std::stringstream sstream;
int partOffset = 0;
DTSC::Key keyObj;
for (std::deque<DTSC::Key>::iterator it = myMeta.tracks[tid].keys.begin();
it != myMeta.tracks[tid].keys.end(); it++){
if (it->getTime() >= seekTime){
keyObj = (*it);
std::deque<DTSC::Key>::iterator nextIt = it;
nextIt++;
if (nextIt == myMeta.tracks[tid].keys.end()){
if (myMeta.live){
H.Clean();
H.SetBody("Proxy, re-request this in a second or two.\n");
myConn.SendNow(H.BuildResponse("208", "Ask again later"));
H.Clean(); // clean for any possible next requests
std::cout << "Fragment after fragment @ " << seekTime << " not available yet" << std::endl;
}
}
break;
}
partOffset += it->getParts();
}
if (H.url == "/"){
return; // Don't continue, but continue instead.
}
/*
if (myMeta.live){
if (mstime == 0 && seekTime > 1){
H.Clean();
H.SetBody("The requested fragment is no longer kept in memory on the server and cannot be
served.\n"); myConn.SendNow(H.BuildResponse("412", "Fragment out of range")); H.Clean(); //clean
for any possible next requests std::cout << "Fragment @ " << seekTime << " too old" <<
std::endl; continue;
}
}
*/
///\todo Select correct track (tid);
// Wrap everything in mp4 boxes
MP4::MFHD mfhd_box;
mfhd_box.setSequenceNumber(((keyObj.getNumber() - 1) * 2) + (myMeta.tracks[tid].type == "video" ? 1 : 2));
MP4::TFHD tfhd_box;
tfhd_box.setFlags(MP4::tfhdSampleFlag);
tfhd_box.setTrackID((myMeta.tracks[tid].type == "video" ? 1 : 2));
if (myMeta.tracks[tid].type == "video"){
tfhd_box.setDefaultSampleFlags(0x00004001);
}else{
tfhd_box.setDefaultSampleFlags(0x00008002);
}
MP4::TRUN trun_box;
trun_box.setDataOffset(42); ///\todo Check if this is a placeholder, or an actually correct number
unsigned int keySize = 0;
if (myMeta.tracks[tid].type == "video"){
trun_box.setFlags(MP4::trundataOffset | MP4::trunfirstSampleFlags | MP4::trunsampleDuration |
MP4::trunsampleSize | MP4::trunsampleOffsets);
}else{
trun_box.setFlags(MP4::trundataOffset | MP4::trunsampleDuration | MP4::trunsampleSize);
}
trun_box.setFirstSampleFlags(0x00004002);
for (int i = 0; i < keyObj.getParts(); i++){
MP4::trunSampleInformation trunSample;
trunSample.sampleSize = myMeta.tracks[tid].parts[i + partOffset].getSize();
keySize += myMeta.tracks[tid].parts[i + partOffset].getSize();
trunSample.sampleDuration = myMeta.tracks[tid].parts[i + partOffset].getDuration() * 10000;
if (myMeta.tracks[tid].type == "video"){
trunSample.sampleOffset = myMeta.tracks[tid].parts[i + partOffset].getOffset() * 10000;
}
trun_box.setSampleInformation(trunSample, i);
}
MP4::SDTP sdtp_box;
sdtp_box.setVersion(0);
if (myMeta.tracks[tid].type == "video"){
sdtp_box.setValue(36, 4);
for (int i = 1; i < keyObj.getParts(); i++){sdtp_box.setValue(20, 4 + i);}
}else{
sdtp_box.setValue(40, 4);
for (int i = 1; i < keyObj.getParts(); i++){sdtp_box.setValue(40, 4 + i);}
}
MP4::TRAF traf_box;
traf_box.setContent(tfhd_box, 0);
traf_box.setContent(trun_box, 1);
traf_box.setContent(sdtp_box, 2);
// If the stream is live, we want to have a fragref box if possible
//////HEREHEREHERE
if (myMeta.live){
MP4::UUID_TFXD tfxd_box;
tfxd_box.setTime(keyObj.getTime());
tfxd_box.setDuration(keyObj.getLength());
traf_box.setContent(tfxd_box, 3);
MP4::UUID_TrackFragmentReference fragref_box;
fragref_box.setVersion(1);
fragref_box.setFragmentCount(0);
int fragCount = 0;
for (unsigned int i = 0; fragCount < 2 && i < myMeta.tracks[tid].keys.size() - 1; i++){
if (myMeta.tracks[tid].keys[i].getTime() > seekTime){
DEBUG_MSG(DLVL_HIGH, "Key %d added to fragRef box, time %llu > %lld", i,
myMeta.tracks[tid].keys[i].getTime(), seekTime);
fragref_box.setTime(fragCount, myMeta.tracks[tid].keys[i].getTime() * 10000);
fragref_box.setDuration(fragCount, myMeta.tracks[tid].keys[i].getLength() * 10000);
fragref_box.setFragmentCount(++fragCount);
}
}
traf_box.setContent(fragref_box, 4);
}
MP4::MOOF moof_box;
moof_box.setContent(mfhd_box, 0);
moof_box.setContent(traf_box, 1);
/*LTS-START*/
///\TODO This encryption-handling section does not handle thisPacket correctly!
if (nProxy.encrypt){
MP4::UUID_SampleEncryption sEnc;
sEnc.setVersion(0);
if (myMeta.tracks[tid].type == "audio"){
sEnc.setFlags(0);
for (int i = 0; i < keyObj.getParts(); i++){
MP4::UUID_SampleEncryption_Sample newSample;
prepareNext();
thisPacket.getString("ivec", newSample.InitializationVector);
sEnc.setSample(newSample, i);
}
}else{
sEnc.setFlags(2);
std::deque<long long int> tmpParts;
for (int i = 0; i < keyObj.getParts(); i++){
// Get the correct packet
prepareNext();
MP4::UUID_SampleEncryption_Sample newSample;
thisPacket.getString("ivec", newSample.InitializationVector);
std::deque<int> nalSizes = nalu::parseNalSizes(thisPacket);
for (std::deque<int>::iterator it = nalSizes.begin(); it != nalSizes.end(); it++){
int encrypted = (*it - 5) & ~0xF; // Bitmask to a multiple of 16
MP4::UUID_SampleEncryption_Sample_Entry newEntry;
newEntry.BytesClear = *it - encrypted; // Size + nal_unit_type
newEntry.BytesEncrypted = encrypted; // Entire NAL except nal_unit_type;
newSample.Entries.push_back(newEntry);
}
sEnc.setSample(newSample, i);
}
}
traf_box.setContent(sEnc, 3);
}
seek(seekTime);
/*LTS-END*/
// Setting the correct offsets.
moof_box.setContent(traf_box, 1);
trun_box.setDataOffset(moof_box.boxedSize() + 8);
traf_box.setContent(trun_box, 1);
moof_box.setContent(traf_box, 1);
H.Clean();
H.SetHeader("Content-Type", "video/mp4");
H.setCORSHeaders();
H.StartResponse(H, myConn);
H.Chunkify(moof_box.asBox(), moof_box.boxedSize(), myConn);
int size = htonl(keySize + 8);
H.Chunkify((char *)&size, 4, myConn);
H.Chunkify("mdat", 4, myConn);
sentHeader = true;
H.Clean();
}
/*LTS-START*/
void OutHSS::loadEncryption(){
static bool encryptionLoaded = false;
if (!encryptionLoaded){
// Load the encryption data page
char pageName[NAME_BUFFER_SIZE];
snprintf(pageName, NAME_BUFFER_SIZE, SHM_STREAM_ENCRYPT, streamName.c_str());
nProxy.encryptionPage.init(pageName, 8 * 1024 * 1024, false, false);
if (nProxy.encryptionPage.mapped){
nProxy.vmData.read(nProxy.encryptionPage.mapped);
nProxy.encrypt = true;
}
encryptionLoaded = true;
}
}
std::string OutHSS::protectionHeader(){
loadEncryption();
std::string xmlGen =
"<WRMHEADER xmlns=\"http://schemas.microsoft.com/DRM/2007/03/PlayReadyHeader\" "
"version=\"4.0.0.0\"><DATA><PROTECTINFO><KEYLEN>16</KEYLEN><ALGID>AESCTR</ALGID></"
"PROTECTINFO><KID>";
xmlGen += nProxy.vmData.keyid;
xmlGen += "</KID><LA_URL>";
xmlGen += nProxy.vmData.laurl;
xmlGen += "</LA_URL></DATA></WRMHEADER>";
std::string tmp = toUTF16(xmlGen);
tmp = tmp.substr(2);
std::stringstream resGen;
resGen << (char)((tmp.size() + 10) & 0xFF);
resGen << (char)(((tmp.size() + 10) >> 8) & 0xFF);
resGen << (char)(((tmp.size() + 10) >> 16) & 0xFF);
resGen << (char)(((tmp.size() + 10) >> 24) & 0xFF);
resGen << (char)0x01 << (char)0x00;
resGen << (char)0x01 << (char)0x00;
resGen << (char)((tmp.size()) & 0xFF);
resGen << (char)(((tmp.size()) >> 8) & 0xFF);
resGen << tmp;
return Encodings::Base64::encode(resGen.str());
}
/*LTS-END*/
///\brief Builds an index file for HTTP Smooth streaming.
///\param encParams The encryption parameters. /*LTS*/
///\return The index file for HTTP Smooth Streaming.
std::string OutHSS::smoothIndex(){
loadEncryption(); // LTS
updateMeta();
std::stringstream Result;
Result << "<?xml version=\"1.0\" encoding=\"utf-16\"?>\n";
Result << "<SmoothStreamingMedia "
"MajorVersion=\"2\" "
"MinorVersion=\"0\" "
"TimeScale=\"10000000\" ";
std::deque<std::map<unsigned int, DTSC::Track>::iterator> audioIters;
std::deque<std::map<unsigned int, DTSC::Track>::iterator> videoIters;
long long int maxWidth = 0;
long long int maxHeight = 0;
long long int minWidth = 99999999;
long long int minHeight = 99999999;
for (std::map<unsigned int, DTSC::Track>::iterator it = myMeta.tracks.begin();
it != myMeta.tracks.end(); it++){
if (it->second.codec == "AAC"){audioIters.push_back(it);}
if (it->second.codec == "H264"){
videoIters.push_back(it);
if (it->second.width > maxWidth){maxWidth = it->second.width;}
if (it->second.width < minWidth){minWidth = it->second.width;}
if (it->second.height > maxHeight){maxHeight = it->second.height;}
if (it->second.height < minHeight){minHeight = it->second.height;}
}
}
DEBUG_MSG(DLVL_DONTEVEN, "Buffer window here %lld", myMeta.bufferWindow);
if (myMeta.vod){
Result << "Duration=\""
<< ((*videoIters.begin())->second.lastms - (*videoIters.begin())->second.firstms) << "0000\"";
}else{
Result << "Duration=\"0\" "
"IsLive=\"TRUE\" "
"LookAheadFragmentCount=\"2\" "
"DVRWindowLength=\""
<< myMeta.bufferWindow
<< "0000\" "
"CanSeek=\"TRUE\" "
"CanPause=\"TRUE\" ";
}
Result << ">\n";
// Add audio entries
if (audioIters.size()){
Result << "<StreamIndex "
"Type=\"audio\" "
"QualityLevels=\""
<< audioIters.size()
<< "\" "
"Name=\"audio\" "
"Chunks=\""
<< (*audioIters.begin())->second.keys.size()
<< "\" "
"Url=\"Q({bitrate})/A({start time})\">\n";
int index = 0;
for (std::deque<std::map<unsigned int, DTSC::Track>::iterator>::iterator it = audioIters.begin();
it != audioIters.end(); it++){
Result << "<QualityLevel "
"Index=\""
<< index
<< "\" "
"Bitrate=\""
<< bpsAndIdToBitrate((*it)->second.bps, (*it)->first)
<< "\" "
"CodecPrivateData=\""
<< std::hex;
for (unsigned int i = 0; i < (*it)->second.init.size(); i++){
Result << std::setfill('0') << std::setw(2) << std::right << (int)(*it)->second.init[i];
}
Result << std::dec
<< "\" "
"SamplingRate=\""
<< (*it)->second.rate
<< "\" "
"Channels=\"2\" "
"BitsPerSample=\"16\" "
"PacketSize=\"4\" "
"AudioTag=\"255\" "
"FourCC=\"AACL\" >\n";
Result << "</QualityLevel>\n";
index++;
}
if ((*audioIters.begin())->second.keys.size()){
for (std::deque<DTSC::Key>::iterator it = (*audioIters.begin())->second.keys.begin();
it != (((*audioIters.begin())->second.keys.end()) - 1); it++){
Result << "<c ";
if (it == (*audioIters.begin())->second.keys.begin()){
Result << "t=\"" << it->getTime() * 10000 << "\" ";
}
Result << "d=\"" << it->getLength() * 10000 << "\" />\n";
}
}
Result << "</StreamIndex>\n";
}
// Add video entries
if (videoIters.size()){
Result << "<StreamIndex "
"Type=\"video\" "
"QualityLevels=\""
<< videoIters.size()
<< "\" "
"Name=\"video\" "
"Chunks=\""
<< (*videoIters.begin())->second.keys.size()
<< "\" "
"Url=\"Q({bitrate})/V({start time})\" "
"MaxWidth=\""
<< maxWidth
<< "\" "
"MaxHeight=\""
<< maxHeight
<< "\" "
"DisplayWidth=\""
<< maxWidth
<< "\" "
"DisplayHeight=\""
<< maxHeight << "\">\n";
int index = 0;
for (std::deque<std::map<unsigned int, DTSC::Track>::iterator>::iterator it = videoIters.begin();
it != videoIters.end(); it++){
// Add video qualities
Result << "<QualityLevel "
"Index=\""
<< index
<< "\" "
"Bitrate=\""
<< bpsAndIdToBitrate((*it)->second.bps, (*it)->first)
<< "\" "
"CodecPrivateData=\""
<< std::hex;
MP4::AVCC avccbox;
avccbox.setPayload((*it)->second.init);
std::string tmpString = avccbox.asAnnexB();
for (unsigned int i = 0; i < tmpString.size(); i++){
Result << std::setfill('0') << std::setw(2) << std::right << (int)tmpString[i];
}
Result << std::dec
<< "\" "
"MaxWidth=\""
<< (*it)->second.width
<< "\" "
"MaxHeight=\""
<< (*it)->second.height
<< "\" "
"FourCC=\"AVC1\" >\n";
Result << "</QualityLevel>\n";
index++;
}
if ((*videoIters.begin())->second.keys.size()){
for (std::deque<DTSC::Key>::iterator it = (*videoIters.begin())->second.keys.begin();
it != (((*videoIters.begin())->second.keys.end()) - 1); it++){
Result << "<c ";
if (it == (*videoIters.begin())->second.keys.begin()){
Result << "t=\"" << it->getTime() * 10000 << "\" ";
}
Result << "d=\"" << it->getLength() * 10000 << "\" />\n";
}
}
Result << "</StreamIndex>\n";
}
/*LTS-START*/
if (nProxy.encrypt){
Result << "<Protection><ProtectionHeader SystemID=\"9a04f079-9840-4286-ab92-e65be0885f95\">";
Result << protectionHeader();
Result << "</ProtectionHeader></Protection>";
}
/*LTS-END*/
Result << "</SmoothStreamingMedia>\n";
#if DEBUG >= 8
std::cerr << "Sending this manifest:" << std::endl << Result << std::endl;
#endif
return toUTF16(Result.str());
}// smoothIndex
void OutHSS::onHTTP(){
if ((H.method == "OPTIONS" || H.method == "HEAD") && H.url.find("Manifest") == std::string::npos){
H.Clean();
H.SetHeader("Content-Type", "application/octet-stream");
H.SetHeader("Cache-Control", "no-cache");
H.setCORSHeaders();
H.SendResponse("200", "OK", myConn);
H.Clean();
return;
}
initialize();
loadEncryption(); // LTS
if (H.url.find("Manifest") != std::string::npos){
// Manifest, direct reply
H.Clean();
H.SetHeader("Content-Type", "text/xml");
H.SetHeader("Cache-Control", "no-cache");
H.setCORSHeaders();
if (H.method == "OPTIONS" || H.method == "HEAD"){
H.SendResponse("200", "OK", myConn);
return;
}
std::string manifest = smoothIndex();
H.SetBody(manifest);
H.SendResponse("200", "OK", myConn);
H.Clean();
}else{
parseData = true;
wantRequest = false;
sendHeader();
}
}
}// namespace Mist

View file

@ -1,26 +0,0 @@
#include "output_http.h"
#include <mist/http_parser.h>
namespace Mist{
class OutHSS : public HTTPOutput{
public:
OutHSS(Socket::Connection &conn);
~OutHSS();
static void init(Util::Config *cfg);
void onHTTP();
void sendNext();
void sendHeader();
protected:
std::string protectionHeader(); /*LTS*/
std::string smoothIndex();
void loadEncryption(); /*LTS*/
int canSeekms(unsigned int ms);
int keysToSend;
int myTrackStor;
int myKeyStor;
unsigned long long playUntil;
};
}// namespace Mist
typedef Mist::OutHSS mistOut;

View file

@ -223,8 +223,8 @@ namespace Mist{
MEDIUM_MSG("Switching from %s (%s) to %s (%s)", capa["name"].asStringRef().c_str(),
streamName.c_str(), handler.c_str(), H.GetVar("stream").c_str());
streamName = H.GetVar("stream");
nProxy.userClient.finish();
statsPage.finish();
userSelect.clear();
if (statComm){statComm.setStatus(COMM_STATUS_DISCONNECT);}
reConnector(handler);
onFail("Server error - could not start connector", true);
return;
@ -285,7 +285,6 @@ namespace Mist{
webSock = 0;
return;
}
crc = getpid();
onWebsocketConnect();
H.Clean();
return;
@ -388,7 +387,6 @@ namespace Mist{
DTSC::Scan capa = rCapa.getMember("connectors");
pipedCapa = capa.getMember(connector).asJSON();
}
// build arguments for starting output process
std::string tmparg = Util::getMyPath() + std::string("MistOut") + connector;
std::string tmpPrequest;

View file

@ -110,7 +110,6 @@ namespace Mist{
capa["desc"] = "HTTP connection handler, provides all enabled HTTP-based outputs";
capa["provides"] = "HTTP";
capa["protocol"] = "http://";
capa["codecs"][0u][0u].append("*");
capa["url_rel"] = "/$.html";
capa["url_match"].append("/crossdomain.xml");
capa["url_match"].append("/clientaccesspolicy.xml");
@ -237,6 +236,15 @@ namespace Mist{
}
}
}
bool allowBFrames = true;
if (conncapa.isMember("methods")){
jsonForEach(conncapa["methods"], mthd){
if (mthd->isMember("nobframes") && (*mthd)["nobframes"]){
allowBFrames = false;
break;
}
}
}
const std::string &rel = conncapa["url_rel"].asStringRef();
unsigned int most_simul = 0;
unsigned int total_matches = 0;
@ -250,31 +258,31 @@ namespace Mist{
jsonForEach((*itb), itc){
const std::string &strRef = (*itc).asStringRef();
bool byType = false;
bool multiSel = false;
uint8_t shift = 0;
if (strRef[shift] == '@'){
byType = true;
++shift;
}
if (strRef[shift] == '+'){
multiSel = true;
++shift;
}
jsonForEach(strmMeta["tracks"], trit){
if ((!byType && (*trit)["codec"].asStringRef() == strRef.substr(shift)) ||
(byType && (*trit)["type"].asStringRef() == strRef.substr(shift)) ||
strRef.substr(shift) == "*"){
matches++;
total_matches++;
if (conncapa.isMember("exceptions") && conncapa["exceptions"].isObject() &&
conncapa["exceptions"].size()){
jsonForEach(conncapa["exceptions"], ex){
if (ex.key() == "codec:" + strRef.substr(shift)){
if (!Util::checkException(*ex, useragent)){
matches--;
total_matches--;
if (allowBFrames || !(trit->isMember("bframes") && (*trit)["bframes"])){
matches++;
total_matches++;
if (conncapa.isMember("exceptions") && conncapa["exceptions"].isObject() &&
conncapa["exceptions"].size()){
jsonForEach(conncapa["exceptions"], ex){
if (ex.key() == "codec:" + strRef.substr(shift)){
if (!Util::checkException(*ex, useragent)){
matches--;
total_matches--;
}
break;
}
break;
}
}
}
@ -392,9 +400,13 @@ namespace Mist{
"'),MistVideoObject:mv" + forceType + devSkin + "});" + seekTo + "</script></div></body></html>");
if ((uAgent.find("iPad") != std::string::npos) || (uAgent.find("iPod") != std::string::npos) ||
(uAgent.find("iPhone") != std::string::npos)){
H.SetHeader("Location", hlsUrl);
H.SendResponse("307", "HLS redirect", myConn);
return;
if (uAgent.find("OS 11") == std::string::npos && uAgent.find("OS 12") == std::string::npos &&
uAgent.find("OS 13") == std::string::npos && uAgent.find("OS 14") == std::string::npos &&
uAgent.find("OS 15") == std::string::npos && uAgent.find("OS 16") == std::string::npos){
H.SetHeader("Location", hlsUrl);
H.SendResponse("307", "HLS redirect", myConn);
return;
}
}
H.SendResponse("200", "OK", myConn);
}
@ -450,39 +462,32 @@ namespace Mist{
if (!myConn){return json_resp;}
bool hasVideo = false;
for (std::map<unsigned int, DTSC::Track>::iterator trit = myMeta.tracks.begin();
trit != myMeta.tracks.end(); trit++){
if (trit->second.type == "video"){
std::set<size_t> validTracks = M.getValidTracks();
for (std::set<size_t>::iterator it = validTracks.begin(); it != validTracks.end(); it++){
if (M.getType(*it) == "video"){
hasVideo = true;
if (trit->second.width > json_resp["width"].asInt()){
json_resp["width"] = trit->second.width;
}
if (trit->second.height > json_resp["height"].asInt()){
json_resp["height"] = trit->second.height;
if (M.getWidth(*it) > json_resp["width"].asInt()){json_resp["width"] = M.getWidth(*it);}
if (M.getHeight(*it) > json_resp["height"].asInt()){
json_resp["height"] = M.getHeight(*it);
}
}
}
if (json_resp["width"].asInt() < 1 || json_resp["height"].asInt() < 1){
json_resp["width"] = 640;
json_resp["height"] = 480;
if (!hasVideo){json_resp["height"] = 20;}
json_resp["height"] = (hasVideo ? 480 : 20);
}
if (myMeta.vod){json_resp["type"] = "vod";}
if (myMeta.live){json_resp["type"] = "live";}
json_resp["type"] = (M.getVod() ? "vod" : "live");
// show ALL the meta datas!
json_resp["meta"] = myMeta.toJSON();
M.toJSON(json_resp["meta"], true);
jsonForEach(json_resp["meta"]["tracks"], it){
if (it->isMember("lang")){
(*it)["language"] = Encodings::ISO639::decode((*it)["lang"].asStringRef());
}
it->removeMember("fragments");
it->removeMember("keys");
it->removeMember("keysizes");
it->removeMember("parts");
it->removeMember("ivecs"); /*LTS*/
if (M.hasBFrames((*it)["idx"].asInt())){(*it)["bframes"] = 1;}
}
json_resp["meta"].removeMember("source");
json_resp["meta"]["bframes"] = (M.hasBFrames() ? 1 : 0);
// Get sources/protocols information
Util::DTSCShmReader rCapa(SHM_CAPA);
@ -704,14 +709,13 @@ namespace Mist{
initialize();
if (!myConn){return;}
for (std::map<unsigned int, DTSC::Track>::iterator trit = myMeta.tracks.begin();
trit != myMeta.tracks.end(); trit++){
if (trit->second.type == "video"){
trackSources += " <video src='" + streamName +
"?track=" + JSON::Value(trit->first).asString() + "' height='" +
JSON::Value(trit->second.height).asString() + "' system-bitrate='" +
JSON::Value(trit->second.bps).asString() + "' width='" +
JSON::Value(trit->second.width).asString() + "' />\n";
std::set<size_t> validTracks = M.getValidTracks();
for (std::set<size_t>::iterator it = validTracks.begin(); it != validTracks.end(); it++){
if (M.getType(*it) == "video"){
trackSources += " <video src='" + streamName + "?track=" + JSON::Value(*it).asString() +
"' height='" + JSON::Value(M.getHeight(*it)).asString() +
"' system-bitrate='" + JSON::Value(M.getBps(*it)).asString() +
"' width='" + JSON::Value(M.getWidth(*it)).asString() + "' />\n";
}
}
}
@ -1023,7 +1027,7 @@ namespace Mist{
currStreamName = streamName;
snprintf(pageName, NAME_BUFFER_SIZE, SHM_STREAM_STATE, streamName.c_str());
IPC::sharedPage streamStatus(pageName, 1, false, false);
uint8_t prevState, newState, metaCounter;
uint8_t prevState, newState, pingCounter = 0;
uint64_t prevTracks;
prevState = newState = STRMSTAT_INVALID;
while (keepGoing()){
@ -1034,11 +1038,10 @@ namespace Mist{
newState = streamStatus.mapped[0];
}
if (newState != prevState || (newState == STRMSTAT_READY && myMeta.tracks.size() != prevTracks)){
if (newState != prevState || (newState == STRMSTAT_READY && M.getValidTracks().size() != prevTracks)){
if (newState == STRMSTAT_READY){
reconnect();
updateMeta();
prevTracks = myMeta.tracks.size();
prevTracks = M.getValidTracks().size();
}else{
disconnect();
}
@ -1057,8 +1060,7 @@ namespace Mist{
}else{
Util::sleep(250);
}
if (newState == STRMSTAT_READY && (++metaCounter % 4) == 0){updateMeta();}
if ((metaCounter % 40) == 0){ws.sendFrame("", 0, 0x9);}
if ((++pingCounter % 40) == 0){ws.sendFrame("", 0, 0x9);}
}
}
return true;

View file

@ -95,7 +95,7 @@ namespace Mist{
C.close();
return;
}else{
Util::sleep(100);
Util::sleep(20);
}
}
HIGH_MSG("Started SSL connection handler");
@ -162,10 +162,10 @@ namespace Mist{
// We have data - pass it on
activity = true;
while (http_buf.size() && http){
int todo = http_buf.get().size();
int toSend = http_buf.get().size();
int done = 0;
while (done < todo){
ret = mbedtls_ssl_write(&ssl, (const unsigned char *)http_buf.get().data() + done, todo - done);
while (done < toSend){
ret = mbedtls_ssl_write(&ssl, (const unsigned char *)http_buf.get().data() + done, toSend - done);
if (ret == MBEDTLS_ERR_NET_CONN_RESET || ret == MBEDTLS_ERR_SSL_CLIENT_RECONNECT){
HIGH_MSG("SSL disconnect!");
http.close();
@ -174,19 +174,18 @@ namespace Mist{
if (ret != MBEDTLS_ERR_SSL_WANT_READ && ret != MBEDTLS_ERR_SSL_WANT_WRITE){
done += ret;
}else{
Util::sleep(50);
Util::sleep(20);
}
}
http_buf.get().clear();
}
}
if (!activity){Util::sleep(50);}
if (!activity){Util::sleep(20);}
}
// close the HTTP process (close stdio, kill its PID)
http.close();
Util::Procs::Stop(http_proc);
uint16_t waiting = 0;
while (++waiting < 100){
while (++waiting < 50){
if (!Util::Procs::isRunning(http_proc)){break;}
Util::sleep(100);
}

View file

@ -51,13 +51,7 @@ namespace Mist{
void OutHTTPTS::initialSeek(){
// Adds passthrough support to the regular initialSeek function
if (targetParams.count("passthrough")){
selectedTracks.clear();
for (std::map<unsigned int, DTSC::Track>::iterator it = myMeta.tracks.begin();
it != myMeta.tracks.end(); it++){
selectedTracks.insert(it->first);
}
}
if (targetParams.count("passthrough")){selectAllTracks();}
Output::initialSeek();
}
@ -69,13 +63,13 @@ namespace Mist{
capa["url_rel"] = "/$.ts";
capa["url_match"] = "/$.ts";
capa["socket"] = "http_ts";
capa["codecs"][0u][0u].append("H264");
capa["codecs"][0u][0u].append("HEVC");
capa["codecs"][0u][0u].append("MPEG2");
capa["codecs"][0u][1u].append("AAC");
capa["codecs"][0u][1u].append("MP3");
capa["codecs"][0u][1u].append("AC3");
capa["codecs"][0u][1u].append("MP2");
capa["codecs"][0u][0u].append("+H264");
capa["codecs"][0u][0u].append("+HEVC");
capa["codecs"][0u][0u].append("+MPEG2");
capa["codecs"][0u][1u].append("+AAC");
capa["codecs"][0u][1u].append("+MP3");
capa["codecs"][0u][1u].append("+AC3");
capa["codecs"][0u][1u].append("+MP2");
capa["methods"][0u]["handler"] = "http";
capa["methods"][0u]["type"] = "html5/video/mpeg";
capa["methods"][0u]["priority"] = 1;
@ -83,7 +77,6 @@ namespace Mist{
capa["push_urls"].append("ts-exec:*");
{
int fin = 0, fout = 0, ferr = 0;
pid_t srt_tx = -1;
const char *args[] ={"srt-live-transmit", 0};
srt_tx = Util::Procs::StartPiped(args, 0, 0, 0);
@ -130,11 +123,12 @@ namespace Mist{
wantRequest = false;
}
void OutHTTPTS::sendTS(const char *tsData, unsigned int len){
if (!isRecording()){
H.Chunkify(tsData, len, myConn);
}else{
void OutHTTPTS::sendTS(const char *tsData, size_t len){
if (isRecording()){
myConn.SendNow(tsData, len);
return;
}
H.Chunkify(tsData, len, myConn);
if (targetParams.count("passthrough")){selectAllTracks();}
}
}// namespace Mist

View file

@ -8,7 +8,7 @@ namespace Mist{
~OutHTTPTS();
static void init(Util::Config *cfg);
void onHTTP();
void sendTS(const char *tsData, unsigned int len = 188);
void sendTS(const char *tsData, size_t len = 188);
void initialSeek();
private:

297
src/output/output_jpg.cpp Normal file
View file

@ -0,0 +1,297 @@
#include "output_jpg.h"
#include <fstream>
#include <mist/bitfields.h>
#include <mist/mp4_generic.h>
#include <mist/procs.h>
#include <sys/stat.h> //for stat
#include <sys/types.h> //for stat
#include <unistd.h> //for stat
namespace Mist{
OutJPG::OutJPG(Socket::Connection &conn) : HTTPOutput(conn){
HTTP = false;
cachedir = config->getString("cachedir");
if (cachedir.size()){
cachedir += "/MstJPEG" + streamName;
cachetime = config->getInteger("cachetime");
}else{
cachetime = 0;
}
if (config->getString("target").size()){
initialize();
if (!streamName.size()){
WARN_MSG("Recording unconnected JPG output to file! Cancelled.");
conn.close();
return;
}
if (!M){
INFO_MSG("Stream not available - aborting");
conn.close();
return;
}
if (!userSelect.size()){
INFO_MSG("Stream codec not supported - aborting");
conn.close();
return;
}
// We generate a thumbnail first, then output it if successful
generate();
if (!jpg_buffer.str().size()){
// On failure, report, but do not open the file or write anything
FAIL_MSG("Could not generate thumbnail for %s", streamName.c_str());
myConn.close();
return;
}
if (config->getString("target") == "-"){
INFO_MSG("Outputting %s to stdout in JPG format", streamName.c_str());
}else{
if (!connectToFile(config->getString("target"))){
myConn.close();
return;
}
INFO_MSG("Recording %s to %s in JPG format", streamName.c_str(), config->getString("target").c_str());
}
myConn.SendNow(jpg_buffer.str().c_str(), jpg_buffer.str().size());
myConn.close();
return;
}
}
/// Pretends the stream is always ready to play - we don't care about waiting times or whatever
bool OutJPG::isReadyForPlay(){return true;}
void OutJPG::initialSeek(){
size_t mainTrack = getMainSelectedTrack();
if (mainTrack == INVALID_TRACK_ID){return;}
INFO_MSG("Doing initial seek");
if (M.getLive()){
liveSeek();
uint32_t targetKey = M.getKeyIndexForTime(mainTrack, currentTime());
seek(M.getTimeForKeyIndex(mainTrack, targetKey));
return;
}
// cancel if there are no keys in the main track
if (!M.getValidTracks().count(mainTrack) || !M.getLastms(mainTrack)){
WARN_MSG("Aborted vodSeek because no tracks selected");
return;
}
uint64_t seekPos = M.getFirstms(mainTrack) + (M.getLastms(mainTrack) - M.getFirstms(mainTrack)) / 2;
MEDIUM_MSG("VoD seek to %" PRIu64 "ms", seekPos);
uint32_t targetKey = M.getKeyIndexForTime(mainTrack, seekPos);
seek(M.getTimeForKeyIndex(mainTrack, targetKey));
}
void OutJPG::init(Util::Config *cfg){
HTTPOutput::init(cfg);
capa["name"] = "JPG";
capa["desc"] = "Allows getting a representative key frame as JPG image. Requires ffmpeg (with "
"h264 decoding and jpeg encoding) to be "
"installed in the PATH.";
capa["url_rel"] = "/$.jpg";
capa["url_match"] = "/$.jpg";
capa["codecs"][0u][0u].append("H264");
capa["methods"][0u]["handler"] = "http";
capa["methods"][0u]["type"] = "html5/image/jpeg";
capa["methods"][0u]["priority"] = 0;
capa["push_urls"].append("/*.jpg");
capa["optional"]["cachedir"]["name"] = "Cache directory";
capa["optional"]["cachedir"]["help"] =
"Location to store cached images, preferably in RAM somewhere";
capa["optional"]["cachedir"]["option"] = "--cachedir";
capa["optional"]["cachedir"]["short"] = "D";
capa["optional"]["cachedir"]["default"] = "/tmp";
capa["optional"]["cachedir"]["type"] = "string";
capa["optional"]["cachetime"]["name"] = "Cache time";
capa["optional"]["cachetime"]["help"] =
"Duration in seconds to wait before refreshing cached images. Does not apply to VoD "
"streams (VoD is cached infinitely)";
capa["optional"]["cachetime"]["option"] = "--cachetime";
capa["optional"]["cachetime"]["short"] = "T";
capa["optional"]["cachetime"]["default"] = 30;
capa["optional"]["cachetime"]["type"] = "uint";
capa["optional"]["ffopts"]["name"] = "Ffmpeg arguments";
capa["optional"]["ffopts"]["help"] =
"Extra arguments to use when generating the jpg file through ffmpeg";
capa["optional"]["ffopts"]["option"] = "--ffopts";
capa["optional"]["ffopts"]["short"] = "F";
capa["optional"]["ffopts"]["default"] = "-qscale:v 4";
capa["optional"]["ffopts"]["type"] = "string";
cfg->addOptionsFromCapabilities(capa);
JSON::Value opt;
opt["arg"] = "string";
opt["default"] = "";
opt["arg_num"] = 1;
opt["help"] = "Target filename to store JPG file as, or - for stdout.";
cfg->addOption("target", opt);
}
void OutJPG::onHTTP(){
std::string method = H.method;
H.clearHeader("Range");
H.clearHeader("Icy-MetaData");
H.clearHeader("User-Agent");
H.setCORSHeaders();
if (method == "OPTIONS" || method == "HEAD"){
H.SetHeader("Content-Type", "image/jpeg");
H.protocol = "HTTP/1.1";
H.SendResponse("200", "OK", myConn);
H.Clean();
return;
}
initialize();
if (!userSelect.size()){
H.protocol = "HTTP/1.0";
H.setCORSHeaders();
H.body.clear();
H.SendResponse("200", "Unprocessable: not H264", myConn);
#include "noh264.h"
myConn.SendNow(noh264, noh264_len);
myConn.close();
return;
}
H.SetHeader("Content-Type", "image/jpeg");
H.protocol = "HTTP/1.0";
H.setCORSHeaders();
H.StartResponse(H, myConn);
HTTP = true;
generate();
if (!jpg_buffer.str().size()){
NoFFMPEG();
}else{
H.Chunkify(jpg_buffer.str().c_str(), jpg_buffer.str().size(), myConn);
if (cachedir.size()){
std::ofstream cachefile;
cachefile.open(cachedir.c_str());
cachefile << jpg_buffer.str();
cachefile.close();
}
}
H.Chunkify("", 0, myConn);
H.Clean();
HTTP = false;
}
void OutJPG::NoFFMPEG(){
FAIL_MSG("Could not start ffmpeg! Is it installed on the system?");
#include "noffmpeg.h"
if (HTTP){
H.Chunkify(noffmpeg, noffmpeg_len, myConn);
}else{
myConn.SendNow(noffmpeg, noffmpeg_len);
}
}
void OutJPG::generate(){
// If we're caching, check if the cache hasn't expired yet...
if (cachedir.size() && cachetime){
struct stat statData;
if (stat(cachedir.c_str(), &statData) != -1){
if (Util::epoch() - statData.st_mtime <= cachetime || M.getVod()){
std::ifstream cachefile;
cachefile.open(cachedir.c_str());
char buffer[8 * 1024];
while (cachefile.good() && myConn){
cachefile.read(buffer, 8 * 1024);
uint32_t s = cachefile.gcount();
if (HTTP){
H.Chunkify(buffer, s, myConn);
}else{
myConn.SendNow(buffer, s);
}
}
cachefile.close();
return;
}
}
}
initialSeek();
int fin = -1, fout = -1, ferr = 2;
pid_t ffmpeg = -1;
// Start ffmpeg quietly if we're < MEDIUM debug level
char ffcmd[256];
ffcmd[255] = 0; // ensure there is an ending null byte
snprintf(ffcmd, 255, "ffmpeg %s -f h264 -i - %s -vframes 1 -f mjpeg -",
(Util::Config::printDebugLevel >= DLVL_MEDIUM ? "" : "-v quiet"),
config->getString("ffopts").c_str());
HIGH_MSG("Starting JPG command: %s", ffcmd);
char *args[128];
uint8_t argCnt = 0;
char *startCh = 0;
for (char *i = ffcmd; i - ffcmd < 256; ++i){
if (!*i){
if (startCh){args[argCnt++] = startCh;}
break;
}
if (*i == ' '){
if (startCh){
args[argCnt++] = startCh;
startCh = 0;
*i = 0;
}
}else{
if (!startCh){startCh = i;}
}
}
args[argCnt] = 0;
ffmpeg = Util::Procs::StartPiped(args, &fin, &fout, &ferr);
if (ffmpeg < 2){
Socket::Connection failure(fin, fout);
failure.close();
NoFFMPEG();
return;
}
VERYHIGH_MSG("Started ffmpeg, PID %" PRIu64 ", pipe %" PRIu32 "/%" PRIu32, (uint64_t)ffmpeg,
(uint32_t)fin, (uint32_t)fout);
Socket::Connection ffconn(fin, -1);
// Send H264 init data in Annex B format
MP4::AVCC avccbox;
avccbox.setPayload(M.getInit(getMainSelectedTrack()));
ffconn.SendNow(avccbox.asAnnexB());
INSANE_MSG("Sent init data to ffmpeg...");
if (ffconn && prepareNext() && thisPacket){
uint64_t keytime = thisPacket.getTime();
do{
char *p = 0;
size_t l = 0;
uint32_t o = 0;
thisPacket.getString("data", p, l);
// Send all NAL units in the key frame, in Annex B format
while (o + 4 < l){
// get NAL unit size
uint32_t s = Bit::btohl(p + o);
// make sure we don't go out of bounds of packet
if (o + s + 4 > l){break;}
// Send H264 Annex B start code
ffconn.SendNow("\000\000\000\001", 4);
// Send NAL unit
ffconn.SendNow(p + o + 4, s);
INSANE_MSG("Sent h264 %" PRIu32 "b NAL unit to ffmpeg (time: %" PRIu64 ")...", s,
thisPacket.getTime());
// Skip to next NAL unit
o += s + 4;
}
INSANE_MSG("Sent whole packet, checking next...");
}while (ffconn && prepareNext() && thisPacket && thisPacket.getTime() == keytime);
}
ffconn.close();
// Output ffmpeg result data to socket
jpg_buffer.clear();
Socket::Connection ffout(-1, fout);
while (myConn && ffout && (ffout.spool() || ffout.Received().size())){
while (myConn && ffout.Received().size()){
jpg_buffer << ffout.Received().get();
ffout.Received().get().clear();
}
}
ffout.close();
}
}// namespace Mist

22
src/output/output_jpg.h Normal file
View file

@ -0,0 +1,22 @@
#include "output_http.h"
namespace Mist{
class OutJPG : public HTTPOutput{
public:
OutJPG(Socket::Connection &conn);
static void init(Util::Config *cfg);
void onHTTP();
bool isReadyForPlay();
private:
void generate();
void initialSeek();
void NoFFMPEG();
std::string cachedir;
uint64_t cachetime;
bool HTTP;
std::stringstream jpg_buffer;
};
}// namespace Mist
typedef Mist::OutJPG mistOut;

View file

@ -9,6 +9,7 @@ namespace Mist{
keepReselecting = false;
dupcheck = false;
noReceive = false;
pushTrack = INVALID_TRACK_ID;
}
void OutJSON::init(Util::Config *cfg){
@ -36,13 +37,13 @@ namespace Mist{
}
}
JSON::Value jPack;
if (myMeta.tracks[thisPacket.getTrackId()].codec == "JSON"){
if (M.getCodec(thisIdx) == "JSON"){
char *dPtr;
size_t dLen;
thisPacket.getString("data", dPtr, dLen);
jPack["data"] = JSON::fromString(dPtr, dLen);
jPack["time"] = thisPacket.getTime();
jPack["track"] = (uint64_t)thisPacket.getTrackId();
jPack["track"] = thisIdx;
}else{
jPack = thisPacket.toJSON();
}
@ -93,7 +94,7 @@ namespace Mist{
static bool recursive = false;
if (recursive){return true;}
recursive = true;
if (keepReselecting && !isPushing() && !myMeta.vod){
if (keepReselecting && !isPushing() && !M.getVod()){
uint64_t maxTimer = 7200;
while (--maxTimer && keepGoing()){
if (!isBlocking){myConn.spool();}
@ -154,56 +155,39 @@ namespace Mist{
return;
}
}
if (!bootMsOffset){
if (myMeta.bootMsOffset){
bootMsOffset = myMeta.bootMsOffset;
}else{
bootMsOffset = Util::bootMS();
}
}
if (!M.getBootMsOffset()){meta.setBootMsOffset(Util::bootMS());}
// We now know we're allowed to push. Read a JSON object.
JSON::Value inJSON = JSON::fromString(webSock->data, webSock->data.size());
if (!inJSON || !inJSON.isObject()){
// Ignore empty and/or non-parsable JSON packets
MEDIUM_MSG("Ignoring non-JSON object: %s", webSock->data);
MEDIUM_MSG("Ignoring non-JSON object: %s", (char *)webSock->data);
return;
}
// Let's create a new track for pushing purposes, if needed
if (!pushTrack){
pushTrack = 1;
while (myMeta.tracks.count(pushTrack)){++pushTrack;}
}
myMeta.tracks[pushTrack].type = "meta";
myMeta.tracks[pushTrack].codec = "JSON";
if (pushTrack == INVALID_TRACK_ID){pushTrack = meta.addTrack();}
meta.setType(pushTrack, "meta");
meta.setCodec(pushTrack, "JSON");
meta.setID(pushTrack, pushTrack);
// We have a track set correctly. Let's attempt to buffer a frame.
lastSendTime = Util::bootMS();
if (!inJSON.isMember("unix")){
// Base timestamp on arrival time
lastOutTime = (lastSendTime - bootMsOffset);
lastOutTime = (lastSendTime - M.getBootMsOffset());
}else{
// Base timestamp on unix time
lastOutTime = (lastSendTime - bootMsOffset) + (inJSON["unix"].asInt() - Util::epoch()) * 1000;
lastOutTime = (lastSendTime - M.getBootMsOffset()) + (inJSON["unix"].asInt() - Util::epoch()) * 1000;
}
lastOutData = inJSON.toString();
static DTSC::Packet newPack;
newPack.genericFill(lastOutTime, 0, pushTrack, lastOutData.data(), lastOutData.size(), 0, true, bootMsOffset);
bufferLivePacket(newPack);
if (!idleInterval){idleInterval = 100;}
bufferLivePacket(lastOutTime, 0, pushTrack, lastOutData.data(), lastOutData.size(), 0, true);
if (!idleInterval){idleInterval = 5000;}
if (isBlocking){setBlocking(false);}
}
/// Repeats last JSON packet every 5 seconds to keep stream alive.
void OutJSON::onIdle(){
if (nProxy.trackState[pushTrack] != FILL_ACC){
continueNegotiate(pushTrack);
if (nProxy.trackState[pushTrack] == FILL_ACC){idleInterval = 5000;}
return;
}
lastOutTime += (Util::bootMS() - lastSendTime);
lastSendTime = Util::bootMS();
static DTSC::Packet newPack;
newPack.genericFill(lastOutTime, 0, pushTrack, lastOutData.data(), lastOutData.size(), 0, true, bootMsOffset);
bufferLivePacket(newPack);
bufferLivePacket(lastOutTime, 0, pushTrack, lastOutData.data(), lastOutData.size(), 0, true);
}
void OutJSON::onHTTP(){

View file

@ -1,9 +1,9 @@
#include "output_progressive_mp3.h"
#include "output_mp3.h"
namespace Mist{
OutProgressiveMP3::OutProgressiveMP3(Socket::Connection &conn) : HTTPOutput(conn){}
OutMP3::OutMP3(Socket::Connection &conn) : HTTPOutput(conn){}
void OutProgressiveMP3::init(Util::Config *cfg){
void OutMP3::init(Util::Config *cfg){
HTTPOutput::init(cfg);
capa["name"] = "MP3";
capa["friendly"] = "MP3 over HTTP";
@ -23,16 +23,16 @@ namespace Mist{
cfg->addOption("target", opt);
}
bool OutProgressiveMP3::isRecording(){return config->getString("target").size();}
bool OutMP3::isRecording(){return config->getString("target").size();}
void OutProgressiveMP3::sendNext(){
void OutMP3::sendNext(){
char *dataPointer = 0;
size_t len = 0;
thisPacket.getString("data", dataPointer, len);
myConn.SendNow(dataPointer, len);
}
void OutProgressiveMP3::sendHeader(){
void OutMP3::sendHeader(){
if (!isRecording()){
std::string method = H.method;
H.Clean();
@ -48,7 +48,7 @@ namespace Mist{
sentHeader = true;
}
void OutProgressiveMP3::onHTTP(){
void OutMP3::onHTTP(){
std::string method = H.method;
H.Clean();

View file

@ -1,9 +1,9 @@
#include "output_http.h"
namespace Mist{
class OutProgressiveMP3 : public HTTPOutput{
class OutMP3 : public HTTPOutput{
public:
OutProgressiveMP3(Socket::Connection &conn);
OutMP3(Socket::Connection &conn);
static void init(Util::Config *cfg);
void onHTTP();
void sendNext();
@ -15,4 +15,4 @@ namespace Mist{
};
}// namespace Mist
typedef Mist::OutProgressiveMP3 mistOut;
typedef Mist::OutMP3 mistOut;

1144
src/output/output_mp4.cpp Normal file

File diff suppressed because it is too large Load diff

View file

@ -3,7 +3,7 @@
#include <mist/http_parser.h>
namespace Mist{
struct keyPart{
class keyPart{
public:
bool operator<(const keyPart &rhs) const{
if (time < rhs.time){return true;}
@ -15,7 +15,6 @@ namespace Mist{
uint64_t time;
uint64_t byteOffset; // Stores relative bpos for fragmented MP4
uint64_t index;
uint32_t size;
};
struct fragSet{
@ -24,28 +23,27 @@ namespace Mist{
uint64_t firstTime;
uint64_t lastTime;
};
class OutProgressiveMP4 : public HTTPOutput{
class OutMP4 : public HTTPOutput{
public:
OutProgressiveMP4(Socket::Connection &conn);
~OutProgressiveMP4();
OutMP4(Socket::Connection &conn);
~OutMP4();
static void init(Util::Config *cfg);
uint64_t mp4HeaderSize(uint64_t &fileSize, int fragmented = 0);
std::string DTSCMeta2MP4Header(uint64_t &size, int fragmented = 0);
// int fragmented values: 0 = non fragmented stream, 1 = frag stream main header
void buildFragment(); // this builds the structure of the fragment header and stores it in a member variable
void sendFragmentHeader(); // this builds the moof box for fragmented MP4
uint64_t mp4HeaderSize(uint64_t &fileSize, int fragmented = 0) const;
std::string mp4Header(uint64_t &size, int fragmented = 0);
uint64_t mp4moofSize(uint64_t startFragmentTime, uint64_t endFragmentTime, uint64_t &mdatSize) const;
virtual void sendFragmentHeaderTime(uint64_t startFragmentTime,
uint64_t endFragmentTime); // this builds the moof box for fragmented MP4
void findSeekPoint(uint64_t byteStart, uint64_t &seekPoint, uint64_t headerSize);
void onHTTP();
void sendNext();
void sendHeader();
bool doesWebsockets(){return true;}
void onIdle();
bool onFinish();
virtual void onWebsocketFrame();
virtual void onWebsocketConnect();
protected:
Util::ResizeablePointer webBuf;
uint64_t fileSize;
uint64_t byteStart;
uint64_t byteEnd;
@ -53,23 +51,33 @@ namespace Mist{
uint64_t currPos;
uint64_t seekPoint;
uint64_t nextHeaderTime;
uint64_t headerSize;
// variables for standard MP4
std::set<keyPart> sortSet; // needed for unfragmented MP4, remembers the order of keyparts
// variables for fragmented
size_t fragSeqNum; // the sequence number of the next keyframe/fragment when producing fragmented MP4's
size_t fragSeqNum; // the sequence number of the next keyframe/fragment when producing
// fragmented MP4's
size_t vidTrack; // the video track we use as fragmenting base
uint64_t realBaseOffset; // base offset for every moof packet
// from sendnext
bool sending3GP;
uint64_t startTime;
uint64_t endTime;
bool chromeWorkaround;
int keysOnly;
uint64_t estimateFileSize();
uint64_t estimateFileSize() const;
// This is a dirty solution... but it prevents copying and copying and copying again
std::map<size_t, fragSet> currentPartSet;
std::string protectionHeader(size_t idx);
};
}// namespace Mist
typedef Mist::OutProgressiveMP4 mistOut;
typedef Mist::OutMP4 mistOut;

203
src/output/output_ogg.cpp Normal file
View file

@ -0,0 +1,203 @@
#include "output_ogg.h"
#include <algorithm>
#include <mist/bitfields.h>
#include <mist/bitstream.h>
#include <mist/defines.h>
namespace Mist{
OutOGG::OutOGG(Socket::Connection &conn) : HTTPOutput(conn){realTime = 0;}
OutOGG::~OutOGG(){}
void OutOGG::init(Util::Config *cfg){
HTTPOutput::init(cfg);
capa["name"] = "OGG";
capa["friendly"] = "OGG over HTTP";
capa["desc"] = "Pseudostreaming in OGG format over HTTP";
capa["deps"] = "HTTP";
capa["url_rel"] = "/$.ogg";
capa["url_match"] = "/$.ogg";
capa["codecs"][0u][0u].append("theora");
capa["codecs"][0u][1u].append("vorbis");
capa["codecs"][0u][1u].append("opus");
capa["methods"][0u]["handler"] = "http";
capa["methods"][0u]["type"] = "html5/video/ogg";
capa["methods"][0u]["priority"] = 8u;
capa["methods"][0u]["nolive"] = 1;
}
void OutOGG::sendNext(){
OGG::oggSegment newSegment;
thisPacket.getString("data", newSegment.dataString);
pageBuffer[thisIdx].totalFrames = ((double)thisPacket.getTime() / (1000000.0f / M.getFpks(thisIdx))) +
1.5; // should start at 1. added .5 for rounding.
if (pageBuffer[thisIdx].codec == OGG::THEORA){
newSegment.isKeyframe = thisPacket.getFlag("keyframe");
if (newSegment.isKeyframe == true){
pageBuffer[thisIdx].sendTo(myConn); // send data remaining in buffer (expected to fit on a
// page), keyframe will allways start on new page
pageBuffer[thisIdx].lastKeyFrame = pageBuffer[thisIdx].totalFrames;
}
newSegment.framesSinceKeyFrame = pageBuffer[thisIdx].totalFrames - pageBuffer[thisIdx].lastKeyFrame;
newSegment.lastKeyFrameSeen = pageBuffer[thisIdx].lastKeyFrame;
}
newSegment.frameNumber = pageBuffer[thisIdx].totalFrames;
newSegment.timeStamp = thisPacket.getTime();
pageBuffer[thisIdx].oggSegments.push_back(newSegment);
if (pageBuffer[thisIdx].codec == OGG::VORBIS){
pageBuffer[thisIdx].vorbisStuff(); // this updates lastKeyFrame
}
while (pageBuffer[thisIdx].shouldSend()){pageBuffer[thisIdx].sendTo(myConn);}
}
bool OutOGG::onFinish(){
for (std::map<size_t, OGG::Page>::iterator it = pageBuffer.begin(); it != pageBuffer.end(); it++){
it->second.setHeaderType(OGG::EndOfStream);
it->second.sendTo(myConn);
}
return false;
}
bool OutOGG::parseInit(const std::string &initData, std::deque<std::string> &output){
size_t index = 0;
if (initData[0] == 0x02){//"special" case, requires interpretation similar to table
if (initData.size() < 7){
FAIL_MSG("initData size too tiny (size: %lu)", initData.size());
return false;
}
size_t len1 = 0;
size_t len2 = 0;
index = 1;
while (initData[index] == 255){// get len 1
len1 += initData[index++];
}
len1 += initData[index++];
while (initData[index] == 255){// get len 1
len2 += initData[index++];
}
len2 += initData[index++];
if (initData.size() < (len1 + len2 + 4)){
FAIL_MSG("initData size too tiny (size: %zu)", initData.size());
return false;
}
output.push_back(initData.substr(index, len1));
index += len1;
output.push_back(initData.substr(index, len2));
index += len2;
output.push_back(initData.substr(index));
}else{
if (initData.size() < 7){
FAIL_MSG("initData size too tiny (size: %lu)", initData.size());
return false;
}
unsigned int len = 0;
for (unsigned int i = 0; i < 3; i++){
std::string temp = initData.substr(index, 2);
len = Bit::btohs(temp.data());
index += 2; // start of data
if (index + len > initData.size()){
FAIL_MSG("index+len > initData size");
return false;
}
output.push_back(initData.substr(index, len)); // add data to output deque
index += len;
INFO_MSG("init data len[%d]: %d ", i, len);
}
}
return true;
}
void OutOGG::sendHeader(){
HTTP_S.Clean(); // make sure no parts of old requests are left in any buffers
HTTP_S.SetHeader("Content-Type", "video/ogg");
HTTP_S.protocol = "HTTP/1.0";
myConn.SendNow(HTTP_S.BuildResponse("200",
"OK")); // no SetBody = unknown length - this is intentional, we will stream the entire file
std::map<size_t, std::deque<std::string> > initData;
OGG::oggSegment newSegment;
for (std::map<size_t, Comms::Users>::iterator it = userSelect.begin(); it != userSelect.end(); it++){
if (M.getCodec(it->first) == "theora"){// get size and position of init data for this page.
parseInit(M.getInit(it->first), initData[it->first]);
pageBuffer[it->first].codec = OGG::THEORA;
pageBuffer[it->first].totalFrames =
1; // starts at frame number 1, according to weird offDetectMeta function.
std::string tempStr = initData[it->first][0];
theora::header tempHead((char *)tempStr.c_str(), 42);
pageBuffer[it->first].split = tempHead.getKFGShift();
INFO_MSG("got theora KFG shift: %d", pageBuffer[it->first].split); // looks OK.
}else if (M.getCodec(it->first) == "vorbis"){
parseInit(M.getInit(it->first), initData[it->first]);
pageBuffer[it->first].codec = OGG::VORBIS;
pageBuffer[it->first].totalFrames = 0;
pageBuffer[it->first].sampleRate = M.getRate(it->first);
pageBuffer[it->first].prevBlockFlag = -1;
vorbis::header tempHead((char *)initData[it->first][0].data(), initData[it->first][0].size());
pageBuffer[it->first].blockSize[0] = std::min(tempHead.getBlockSize0(), tempHead.getBlockSize1());
pageBuffer[it->first].blockSize[1] = std::max(tempHead.getBlockSize0(), tempHead.getBlockSize1());
char audioChannels = tempHead.getAudioChannels(); //?
vorbis::header tempHead2((char *)initData[it->first][2].data(), initData[it->first][2].size());
pageBuffer[it->first].vorbisModes = tempHead2.readModeDeque(audioChannels); // getting modes
}else if (M.getCodec(it->first) == "opus"){
pageBuffer[it->first].totalFrames = 0; //?
pageBuffer[it->first].codec = OGG::OPUS;
initData[it->first].push_back(M.getInit(it->first));
initData[it->first].push_back(
std::string("OpusTags\000\000\000\012MistServer\000\000\000\000", 26));
}
pageBuffer[it->first].clear(OGG::BeginOfStream, 0, it->first,
0); // CREATES a (map)pageBuffer object, *it = id, pagetype=BOS
newSegment.dataString = initData[it->first].front();
initData[it->first].pop_front();
pageBuffer[it->first].oggSegments.push_back(newSegment);
pageBuffer[it->first].sendTo(myConn, 0); // granule position of 0
}
for (std::map<size_t, Comms::Users>::iterator it = userSelect.begin(); it != userSelect.end(); it++){
while (initData[it->first].size()){
newSegment.dataString = initData[it->first].front();
initData[it->first].pop_front();
pageBuffer[it->first].oggSegments.push_back(newSegment);
}
while (pageBuffer[it->first].oggSegments.size()){
pageBuffer[it->first].sendTo(myConn, 0); // granule position of 0
}
}
sentHeader = true;
}
void OutOGG::onRequest(){
if (HTTP_R.Read(myConn)){
DEVEL_MSG("Received request %s", HTTP_R.getUrl().c_str());
if (HTTP_R.method == "OPTIONS" || HTTP_R.method == "HEAD"){
HTTP_S.Clean();
HTTP_S.SetHeader("Content-Type", "video/ogg");
HTTP_S.protocol = "HTTP/1.0";
HTTP_S.SendResponse("200", "OK", myConn);
HTTP_S.Clean();
return;
}
if (HTTP_R.GetVar("audio") != ""){
size_t track = atoll(HTTP_R.GetVar("audio").c_str());
userSelect[track].reload(streamName, track);
}
if (HTTP_R.GetVar("video") != ""){
size_t track = atoll(HTTP_R.GetVar("video").c_str());
userSelect[track].reload(streamName, track);
}
parseData = true;
wantRequest = false;
HTTP_R.Clean();
}
}
}// namespace Mist

24
src/output/output_ogg.h Normal file
View file

@ -0,0 +1,24 @@
#include "output_http.h"
#include <mist/http_parser.h>
#include <mist/ogg.h>
namespace Mist{
class OutOGG : public HTTPOutput{
public:
OutOGG(Socket::Connection &conn);
~OutOGG();
static void init(Util::Config *cfg);
void onRequest();
void sendNext();
void sendHeader();
bool onFinish();
bool parseInit(const std::string &initData, std::deque<std::string> &output);
protected:
HTTP::Parser HTTP_R; // Received HTTP
HTTP::Parser HTTP_S; // Sent HTTP
std::map<size_t, OGG::Page> pageBuffer; // OGG specific variables
};
}// namespace Mist
typedef Mist::OutOGG mistOut;

File diff suppressed because it is too large Load diff

View file

@ -1,205 +0,0 @@
#include "output_progressive_ogg.h"
#include <algorithm>
#include <mist/bitstream.h>
#include <mist/defines.h>
namespace Mist{
OutProgressiveOGG::OutProgressiveOGG(Socket::Connection &conn) : HTTPOutput(conn){
realTime = 0;
}
OutProgressiveOGG::~OutProgressiveOGG(){}
void OutProgressiveOGG::init(Util::Config *cfg){
HTTPOutput::init(cfg);
capa["name"] = "OGG";
capa["friendly"] = "OGG over HTTP";
capa["desc"] = "Pseudostreaming in OGG format over HTTP";
capa["deps"] = "HTTP";
capa["url_rel"] = "/$.ogg";
capa["url_match"] = "/$.ogg";
capa["codecs"][0u][0u].append("theora");
capa["codecs"][0u][1u].append("vorbis");
capa["codecs"][0u][1u].append("opus");
capa["methods"][0u]["handler"] = "http";
capa["methods"][0u]["type"] = "html5/video/ogg";
capa["methods"][0u]["priority"] = 8;
capa["methods"][0u]["nolive"] = 1;
}
void OutProgressiveOGG::sendNext(){
unsigned int track = thisPacket.getTrackId();
OGG::oggSegment newSegment;
thisPacket.getString("data", newSegment.dataString);
pageBuffer[track].totalFrames =
((double)thisPacket.getTime() / (1000000.0f / myMeta.tracks[track].fpks)) +
1.5; // should start at 1. added .5 for rounding.
if (pageBuffer[track].codec == OGG::THEORA){
newSegment.isKeyframe = thisPacket.getFlag("keyframe");
if (newSegment.isKeyframe == true){
pageBuffer[track].sendTo(myConn); // send data remaining in buffer (expected to fit on a page), keyframe will allways start on new page
pageBuffer[track].lastKeyFrame = pageBuffer[track].totalFrames;
}
newSegment.framesSinceKeyFrame = pageBuffer[track].totalFrames - pageBuffer[track].lastKeyFrame;
newSegment.lastKeyFrameSeen = pageBuffer[track].lastKeyFrame;
}
newSegment.frameNumber = pageBuffer[track].totalFrames;
newSegment.timeStamp = thisPacket.getTime();
pageBuffer[track].oggSegments.push_back(newSegment);
if (pageBuffer[track].codec == OGG::VORBIS){
pageBuffer[track].vorbisStuff(); // this updates lastKeyFrame
}
while (pageBuffer[track].shouldSend()){pageBuffer[track].sendTo(myConn);}
}
bool OutProgressiveOGG::onFinish(){
for (std::map<long long unsigned int, OGG::Page>::iterator it = pageBuffer.begin();
it != pageBuffer.end(); it++){
it->second.setHeaderType(OGG::EndOfStream);
it->second.sendTo(myConn);
}
return false;
}
bool OutProgressiveOGG::parseInit(std::string &initData, std::deque<std::string> &output){
std::string temp;
unsigned int index = 0;
if (initData[0] == 0x02){//"special" case, requires interpretation similar to table
if (initData.size() < 7){
FAIL_MSG("initData size too tiny (size: %lu)", initData.size());
return false;
}
unsigned int len1 = 0;
unsigned int len2 = 0;
index = 1;
while (initData[index] == 255){// get len 1
len1 += initData[index++];
}
len1 += initData[index++];
while (initData[index] == 255){// get len 1
len2 += initData[index++];
}
len2 += initData[index++];
if (initData.size() < (len1 + len2 + 4)){
FAIL_MSG("initData size too tiny (size: %lu)", initData.size());
return false;
}
temp = initData.substr(index, len1);
output.push_back(temp);
index += len1;
temp = initData.substr(index, len2);
output.push_back(temp);
index += len2;
temp = initData.substr(index); // remainder of string:
output.push_back(temp); // add data to output deque
}else{
if (initData.size() < 7){
FAIL_MSG("initData size too tiny (size: %lu)", initData.size());
return false;
}
unsigned int len = 0;
for (unsigned int i = 0; i < 3; i++){
temp = initData.substr(index, 2);
len = (((unsigned int)temp[0]) << 8) | (temp[1]); // 2 bytes len
index += 2; // start of data
if (index + len > initData.size()){
FAIL_MSG("index+len > initData size");
return false;
}
temp = initData.substr(index, len);
output.push_back(temp); // add data to output deque
index += len;
INFO_MSG("init data len[%d]: %d ", i, len);
}
}
return true;
}
void OutProgressiveOGG::sendHeader(){
HTTP_S.Clean(); // make sure no parts of old requests are left in any buffers
HTTP_S.SetHeader("Content-Type", "video/ogg");
HTTP_S.protocol = "HTTP/1.0";
myConn.SendNow(HTTP_S.BuildResponse("200", "OK")); // no SetBody = unknown length - this is intentional, we will stream the entire file
std::map<int, std::deque<std::string> > initData;
OGG::oggSegment newSegment;
for (std::set<long unsigned int>::iterator it = selectedTracks.begin(); it != selectedTracks.end(); it++){
if (myMeta.tracks[*it].codec == "theora"){// get size and position of init data for this page.
parseInit(myMeta.tracks[*it].init, initData[*it]);
pageBuffer[*it].codec = OGG::THEORA;
pageBuffer[*it].totalFrames = 1; // starts at frame number 1, according to weird offDetectMeta function.
std::string tempStr = initData[*it][0];
theora::header tempHead((char *)tempStr.c_str(), 42);
pageBuffer[*it].split = tempHead.getKFGShift();
INFO_MSG("got theora KFG shift: %d", pageBuffer[*it].split); // looks OK.
}else if (myMeta.tracks[*it].codec == "vorbis"){
parseInit(myMeta.tracks[*it].init, initData[*it]);
pageBuffer[*it].codec = OGG::VORBIS;
pageBuffer[*it].totalFrames = 0;
pageBuffer[*it].sampleRate = myMeta.tracks[*it].rate;
pageBuffer[*it].prevBlockFlag = -1;
vorbis::header tempHead((char *)initData[*it][0].data(), initData[*it][0].size());
pageBuffer[*it].blockSize[0] = std::min(tempHead.getBlockSize0(), tempHead.getBlockSize1());
pageBuffer[*it].blockSize[1] = std::max(tempHead.getBlockSize0(), tempHead.getBlockSize1());
char audioChannels = tempHead.getAudioChannels(); //?
vorbis::header tempHead2((char *)initData[*it][2].data(), initData[*it][2].size());
pageBuffer[*it].vorbisModes = tempHead2.readModeDeque(audioChannels); // getting modes
}else if (myMeta.tracks[*it].codec == "opus"){
pageBuffer[*it].totalFrames = 0; //?
pageBuffer[*it].codec = OGG::OPUS;
initData[*it].push_back(myMeta.tracks[*it].init);
initData[*it].push_back(std::string("OpusTags\000\000\000\012MistServer\000\000\000\000", 26));
}
pageBuffer[*it].clear(OGG::BeginOfStream, 0, *it, 0); // CREATES a (map)pageBuffer object, *it = id, pagetype=BOS
newSegment.dataString = initData[*it].front();
initData[*it].pop_front();
pageBuffer[*it].oggSegments.push_back(newSegment);
pageBuffer[*it].sendTo(myConn, 0); // granule position of 0
}
for (std::set<long unsigned int>::iterator it = selectedTracks.begin(); it != selectedTracks.end(); it++){
while (initData[*it].size()){
newSegment.dataString = initData[*it].front();
initData[*it].pop_front();
pageBuffer[*it].oggSegments.push_back(newSegment);
}
while (pageBuffer[*it].oggSegments.size()){
pageBuffer[*it].sendTo(myConn, 0); // granule position of 0
}
}
sentHeader = true;
}
void OutProgressiveOGG::onRequest(){
if (HTTP_R.Read(myConn)){
DEBUG_MSG(DLVL_DEVEL, "Received request %s", HTTP_R.getUrl().c_str());
if (HTTP_R.method == "OPTIONS" || HTTP_R.method == "HEAD"){
HTTP_S.Clean();
HTTP_S.SetHeader("Content-Type", "video/ogg");
HTTP_S.protocol = "HTTP/1.0";
HTTP_S.SendResponse("200", "OK", myConn);
HTTP_S.Clean();
return;
}
if (HTTP_R.GetVar("audio") != ""){
selectedTracks.insert(JSON::Value(HTTP_R.GetVar("audio")).asInt());
}
if (HTTP_R.GetVar("video") != ""){
selectedTracks.insert(JSON::Value(HTTP_R.GetVar("video")).asInt());
}
parseData = true;
wantRequest = false;
HTTP_R.Clean();
}
}
}// namespace Mist

View file

@ -1,24 +0,0 @@
#include "output_http.h"
#include <mist/http_parser.h>
#include <mist/ogg.h>
namespace Mist{
class OutProgressiveOGG : public HTTPOutput{
public:
OutProgressiveOGG(Socket::Connection &conn);
~OutProgressiveOGG();
static void init(Util::Config *cfg);
void onRequest();
void sendNext();
void sendHeader();
bool onFinish();
bool parseInit(std::string &initData, std::deque<std::string> &output);
protected:
HTTP::Parser HTTP_R; // Received HTTP
HTTP::Parser HTTP_S; // Sent HTTP
std::map<long long unsigned int, OGG::Page> pageBuffer; // OGG specific variables
};
}// namespace Mist
typedef Mist::OutProgressiveOGG mistOut;

View file

@ -1,327 +0,0 @@
#include "output_push.h"
#include <mist/http_parser.h>
#include <mist/shared_memory.h>
#include <mist/tinythread.h>
#include <sys/stat.h>
#define PUSH_INDEX_SIZE 5 // Build index based on most recent X segments
Util::Config *pConf;
std::string sName;
std::string baseURL;
long long srcPort;
std::string srcHost;
std::string dstHost;
long long dstPort;
std::string dstUrl;
// Used to keep track of all segments that can be pushed
std::map<std::string, std::map<int, std::string> > pushableSegments;
// Used to keep track of the timestamp of each pushableSegment
std::map<std::string, std::map<int, int> > pushableTimes;
// Used to keep track of the duration of each pushableSegment
std::map<std::string, std::map<int, int> > pushableDurations;
// For each quality, store the latest number found in the push list
std::map<std::string, int> latestNumber;
// For each quality, store whether it is currently being pushed.
std::map<std::string, bool> parsing;
// For each quality, store an fprint-style string of the relative url to the index_<beginTime>_<endTime>.m3u8
std::map<std::string, std::string> qualityIndex;
// For each quality, store an fprint-style string of the relative url to the segment.
std::map<std::string, std::string> qualitySegment;
// For each quality, store the last PUSH_INDEX_SIZE - 1 timestamps. Used to generate a time-constrained index.m3u8.
std::map<std::string, std::deque<int> > qualityBeginTimes;
// Parses a uri of the form 'http://<host>[:<port>]/<url>, and split it into variables
void parseURI(const std::string &uri, std::string &host, long long &port, std::string &url){
int loc = 0;
if (uri.find("http://") == 0){loc += 7;}
host = uri.substr(loc, uri.find_first_of(":/", 7) - 7);
loc += host.size();
if (uri[loc] == ':'){
port = atoll(uri.c_str() + loc + 1);
loc = uri.find("/", loc);
}
url = uri.substr(loc);
}
// Do an HTTP request, and route it into a post request on a different socket.
void proxyToPost(Socket::Connection &src, const std::string &srcUrl, Socket::Connection &dst,
const std::string &dstUrl){
INFO_MSG("Routing %s to %s", srcUrl.c_str(), dstUrl.c_str());
// Send the initial request
HTTP::Parser H;
H.url = srcUrl;
H.SendRequest(src);
H.Clean();
// Read only the headers of the reply
H.headerOnly = true;
while (src.connected()){
if (src.Received().size() || src.spool()){
if (H.Read(src)){break;}
}
}
H.headerOnly = false;
INFO_MSG("Reply from %s: %s %s", src.getHost().c_str(), H.url.c_str(), H.method.c_str());
// Change the headers of the reply to form a post request
H.method = "POST";
H.url = dstUrl;
H.protocol = "HTTP/1.1";
H.SetHeader("Host", dstHost);
// Start the post request
H.SendRequest(dst);
// Route the original payload.
H.Proxy(src, dst);
H.Clean();
while (dst.connected()){
if (dst.Received().size() || dst.spool()){
if (H.Read(dst)){break;}
}
}
INFO_MSG("Reply from %s: %s %s", dst.getHost().c_str(), H.url.c_str(), H.method.c_str());
}
/// Push the first registered segment for this quality
void pushFirstElement(std::string qId){
std::string semName = "MstPushLock" + sName;
IPC::semaphore pushLock(semName.c_str(), O_CREAT | O_RDWR, ACCESSPERMS, 1);
std::string url;
int time;
int beginTime;
int duration;
// Wait for exclusive access to all globals
pushLock.wait();
// Retrieve all globals for the segment to be pushed
if (pushableSegments[qId].size()){
url = pushableSegments[qId].begin()->second;
time = pushableTimes[qId].begin()->second;
duration = pushableDurations[qId].begin()->second;
if (qualityBeginTimes[qId].size()){
beginTime = qualityBeginTimes[qId].front();
}else{
beginTime = time;
}
}
// Give up exclusive access to all globals
pushLock.post();
// Return if we do not have a segment to push
if (url == ""){return;}
// Create both source and destination connections
Socket::Connection srcConn(srcHost, srcPort, true);
Socket::Connection dstConn(dstHost, dstPort, true);
// Set the locations to push to for this segment
std::string srcLocation = baseURL + url;
std::string dstLocation = dstUrl.substr(0, dstUrl.rfind("/")) + url;
// Push the segment
proxyToPost(srcConn, srcLocation, dstConn, dstLocation);
srcConn.open(srcHost, srcPort, true);
// Set the location to push to for the index containing this segment.
// The index will contain (at most) the last PUSH_INDEX_SIZE segments.
char srcIndex[200];
snprintf(srcIndex, 200, qualityIndex[qId].c_str(), beginTime, time + duration);
srcLocation = baseURL + srcIndex;
dstLocation = dstLocation.substr(0, dstLocation.rfind("/")) + "/index.m3u8";
// Push the index
proxyToPost(srcConn, srcLocation, dstConn, dstLocation);
srcConn.open(srcHost, srcPort, true);
// Set the location to push to for the global index containing all qualities.
srcLocation = baseURL + "/push/index.m3u8";
dstLocation = dstLocation.substr(0, dstLocation.rfind("/"));
dstLocation = dstLocation.substr(0, dstLocation.rfind("/")) + "/index.m3u8";
// Push the global index
proxyToPost(srcConn, srcLocation, dstConn, dstLocation);
// Close both connections
///\todo Make the dstConn "persistent" for each thread?
srcConn.close();
dstConn.close();
// Wait for exclusive access to all globals
pushLock.wait();
// Update all globals to indicate the segment has been pushed correctly
pushableSegments[qId].erase(pushableSegments[qId].begin());
pushableTimes[qId].erase(pushableTimes[qId].begin());
pushableDurations[qId].erase(pushableDurations[qId].begin());
qualityBeginTimes[qId].push_back(time);
// Remove the first elements fromt he beginTimes map to make sure we have PUSH_INDEX_SIZE elements in our index.
// We use -1 here, because we use the segment to currently push as well as everything stored in the map
while (qualityBeginTimes[qId].size() > PUSH_INDEX_SIZE - 1){
qualityBeginTimes[qId].pop_front();
}
// Give up exclusive access to all globals
pushLock.post();
}
/// Thread used to push data.
void pushThread(void *nullPointer){
std::string myThread;
// Attempt to claim a non-claimed quality.
std::string semName = "MstPushClaim" + sName;
IPC::semaphore pushThreadLock(semName.c_str(), O_CREAT | O_RDWR, ACCESSPERMS, 1);
pushThreadLock.wait();
for (std::map<std::string, std::map<int, std::string> >::iterator it = pushableSegments.begin();
it != pushableSegments.end(); it++){
if (it->second.size()){// Make sure we dont try to "claim" pushing an empty track
if (!parsing.count(it->first) || !parsing[it->first]){
INFO_MSG("Claiming thread %s", it->first.c_str());
myThread = it->first;
parsing[it->first] = true;
break;
}
}
}
pushThreadLock.post();
// Return if we were unable to claim a quality
if (myThread == ""){
INFO_MSG("No thread claimed");
return;
}
// While this output is active, push the first element in the list
while (pConf->is_active){
pushFirstElement(myThread);
if (!pushableSegments[myThread].size()){Util::wait(1000);}
}
parsing[myThread] = false;
}
namespace Mist{
OutPush::OutPush(Socket::Connection &conn) : Output(conn){config->activate();}
OutPush::~OutPush(){}
void OutPush::requestHandler(){
// Set aal basic data only the first time.
if (streamName == ""){
srcPort = 80;
parseURI(config->getString("pushlist"), srcHost, srcPort, pushURL);
dstPort = 80;
parseURI(config->getString("destination"), dstHost, dstPort, dstUrl);
// Strip "/push/list" from the URL
baseURL = pushURL.substr(0, pushURL.rfind("/"));
baseURL = baseURL.substr(0, baseURL.rfind("/"));
// Locate the streamname from the pushURL
int loc = baseURL.find("/", 1) + 1;
streamName = pushURL.substr(loc, pushURL.rfind("/") - loc);
sName = streamName;
INFO_MSG("host: %s, port %lld, url %s, baseURL %s, streamName %s", srcHost.c_str(), srcPort,
pushURL.c_str(), baseURL.c_str(), streamName.c_str());
}
// Reconnect when disconnected
if (!listConn.connected()){listConn.open(srcHost, srcPort, true);}
// Request the push list
if (listConn.connected()){
HTTP::Parser hReq;
hReq.url = baseURL + "/push/list";
hReq.SendRequest(listConn);
hReq.Clean();
// Read the entire response, not just the headers!
while (!hReq.Read(listConn) && listConn.connected()){
Util::sleep(100);
listConn.spool();
}
// Construct and parse the json list
JSON::Value reply = JSON::fromString(hReq.body);
int numQualities = reply["qualities"].size();
for (int i = 0; i < numQualities; i++){
JSON::Value &qRef = reply["qualities"][i];
std::string qId = qRef["id"].asString();
// Set both the index and segment urls when not yet set.
if (!qualityIndex.count(qId)){
qualityIndex[qId] = qRef["index"].asString();
qualitySegment[qId] = qRef["segment"].asString();
}
// Save latest segment number before parsing
int curLatestNumber = latestNumber[qId];
// Loop over all segments
for (int j = 0; j < qRef["segments"].size(); j++){
JSON::Value &segRef = qRef["segments"][j];
int thisNumber = segRef["number"].asInt();
// Check if this segment is newer than the newest segment before parsing
if (thisNumber > curLatestNumber){
// If it is the highest so far, store its number
if (thisNumber > latestNumber[qId]){latestNumber[qId] = thisNumber;}
// If it is not yet added, add it.
if (!pushableSegments[qId].count(thisNumber)){
char segmentUrl[200];
// The qualitySegment map contains a printf-style string
snprintf(segmentUrl, 200, qualitySegment[qId].c_str(), segRef["time"].asInt(),
segRef["time"].asInt() + segRef["duration"].asInt());
pushableSegments[qId][segRef["number"].asInt()] = segmentUrl;
pushableTimes[qId][segRef["number"].asInt()] = segRef["time"].asInt();
pushableDurations[qId][segRef["number"].asInt()] = segRef["duration"].asInt();
}
}
}
}
}
// Calculate how many qualities are not yet being pushed
int threadsToSpawn = pushableSegments.size();
for (std::map<std::string, std::map<int, std::string> >::iterator it = pushableSegments.begin();
it != pushableSegments.end(); it++){
if (parsing.count(it->first) && parsing[it->first]){threadsToSpawn--;}
}
// And start a thread for each unpushed quality.
// Threads determine which quality to push for themselves.
for (int i = 0; i < threadsToSpawn; i++){
tthread::thread thisThread(pushThread, 0);
thisThread.detach();
}
Util::sleep(100);
}
void OutPush::init(Util::Config *cfg){
Output::init(cfg);
capa["name"] = "Push";
capa["desc"] = "Enables HTTP Pushing.";
capa["required"]["pushlist"]["name"] = "URL location of the pushing list";
capa["required"]["pushlist"]["help"] =
"This is the location that will be checked for pushable data.";
capa["required"]["pushlist"]["option"] = "--pushlist";
capa["required"]["pushlist"]["short"] = "p";
capa["required"]["pushlist"]["type"] = "str";
capa["required"]["destination"]["name"] = "URL location of the destination";
capa["required"]["destination"]["help"] =
"This is the location that the data will be pushed to.";
capa["required"]["destination"]["option"] = "--destination";
capa["required"]["destination"]["short"] = "D";
capa["required"]["destination"]["type"] = "str";
cfg->addBasicConnectorOptions(capa);
pConf = cfg;
config = cfg;
}
}// namespace Mist

View file

@ -1,20 +0,0 @@
#include <mist/socket.h>
#include "output.h"
namespace Mist{
class OutPush : public Output{
public:
OutPush(Socket::Connection &conn);
~OutPush();
static bool listenMode(){return false;}
virtual void requestHandler();
static void init(Util::Config *cfg);
protected:
Socket::Connection listConn;
std::string pushURL;
};
}// namespace Mist
typedef Mist::OutPush mistOut;

View file

@ -54,8 +54,8 @@ namespace Mist{
"\"t\",\"long\":\"tracks\",\"help\":\"The track IDs of the stream "
"that this connector will transmit separated by spaces.\"}"));
cfg->addOption("seek", JSON::fromString("{\"arg\":\"integer\",\"value\":[0],\"short\": "
"\"S\",\"long\":\"seek\",\"help\":\"The time in "
"milliseconds to seek to, 0 by default.\"}"));
"\"S\",\"long\":\"seek\",\"help\":\"The "
"time in milliseconds to seek to, 0 by default.\"}"));
cfg->addConnectorOptions(666, capa);
config = cfg;
}

View file

@ -2,6 +2,7 @@
#include <cstdlib>
#include <cstring>
#include <mist/auth.h>
#include <mist/bitfields.h>
#include <mist/defines.h>
#include <mist/encode.h>
#include <mist/http_parser.h>
@ -13,8 +14,8 @@
namespace Mist{
OutRTMP::OutRTMP(Socket::Connection &conn) : Output(conn){
lastOutTime = 0;
setRtmpOffset = false;
rtmpOffset = 0;
bootMsOffset = 0;
authAttempts = 0;
maxbps = config->getInteger("maxkbps") * 128;
if (config->getString("target").size()){
@ -163,26 +164,26 @@ namespace Mist{
}
myConn.SendNow(RTMPStream::SendUSR(1, 1)); // send UCM StreamEOF (1), stream 1
AMF::Object amfreply("container", AMF::AMF0_DDV_CONTAINER);
amfreply.addContent(AMF::Object("", "onStatus")); // status reply
amfreply.addContent(AMF::Object("", (double)0)); // transaction ID
amfreply.addContent(AMF::Object("", (double)0, AMF::AMF0_NULL)); // null - command info
amfreply.addContent(AMF::Object("")); // info
amfreply.addContent(AMF::Object("", "onStatus")); // status reply
amfreply.addContent(AMF::Object("", 0.0)); // transaction ID
amfreply.addContent(AMF::Object("", 0.0, AMF::AMF0_NULL)); // null - command info
amfreply.addContent(AMF::Object("")); // info
amfreply.getContentP(3)->addContent(AMF::Object("level", "status"));
amfreply.getContentP(3)->addContent(AMF::Object("code", "NetStream.Play.Stop"));
amfreply.getContentP(3)->addContent(AMF::Object("description", "Stream stopped"));
amfreply.getContentP(3)->addContent(AMF::Object("details", "DDV"));
amfreply.getContentP(3)->addContent(AMF::Object("clientid", (double)1337));
amfreply.getContentP(3)->addContent(AMF::Object("clientid", 1337.0));
sendCommand(amfreply, 20, 1);
amfreply = AMF::Object("container", AMF::AMF0_DDV_CONTAINER);
amfreply.addContent(AMF::Object("", "onStatus")); // status reply
amfreply.addContent(AMF::Object("", (double)0)); // transaction ID
amfreply.addContent(AMF::Object("", (double)0, AMF::AMF0_NULL)); // null - command info
amfreply.addContent(AMF::Object("")); // info
amfreply.addContent(AMF::Object("", "onStatus")); // status reply
amfreply.addContent(AMF::Object("", 0.0)); // transaction ID
amfreply.addContent(AMF::Object("", 0.0, AMF::AMF0_NULL)); // null - command info
amfreply.addContent(AMF::Object("")); // info
amfreply.getContentP(3)->addContent(AMF::Object("level", "status"));
amfreply.getContentP(3)->addContent(AMF::Object("code", "NetStream.Play.UnpublishNotify"));
amfreply.getContentP(3)->addContent(AMF::Object("description", "Stream stopped"));
amfreply.getContentP(3)->addContent(AMF::Object("clientid", (double)1337));
amfreply.getContentP(3)->addContent(AMF::Object("clientid", 1337.0));
sendCommand(amfreply, 20, 1);
myConn.close();
@ -257,12 +258,12 @@ namespace Mist{
void OutRTMP::sendNext(){
// If there are now more selectable tracks, select the new track and do a seek to the current
// timestamp Set sentHeader to false to force it to send init data
if (myMeta.live && selectedTracks.size() < 2){
static unsigned long long lastMeta = 0;
if (M.getLive() && userSelect.size() < 2){
static uint64_t lastMeta = 0;
if (Util::epoch() > lastMeta + 5){
lastMeta = Util::epoch();
updateMeta();
if (myMeta.tracks.size() > 1){
std::set<size_t> validTracks = getSupportedTracks();
if (validTracks.size() > 1){
if (selectDefaultTracks()){
INFO_MSG("Track selection changed - resending headers and continuing");
sentHeader = false;
@ -276,7 +277,7 @@ namespace Mist{
if (thisPacket.getTime() - rtmpOffset < lastOutTime){
int64_t OLD = rtmpOffset;
rtmpOffset -= (1 + lastOutTime - (thisPacket.getTime() - rtmpOffset));
INFO_MSG("Changing rtmpOffset from %lld to %lld", OLD, rtmpOffset);
INFO_MSG("Changing rtmpOffset from %" PRId64 " to %" PRId64, OLD, rtmpOffset);
realTime = 800;
}
lastOutTime = thisPacket.getTime() - rtmpOffset;
@ -294,49 +295,44 @@ namespace Mist{
char *tmpData = 0; // pointer to raw media data
size_t data_len = 0; // length of processed media data
thisPacket.getString("data", tmpData, data_len);
DTSC::Track &track = myMeta.tracks[thisPacket.getTrackId()];
std::string type = M.getType(thisIdx);
std::string codec = M.getCodec(thisIdx);
// set msg_type_id
if (track.type == "video"){
if (type == "video"){
rtmpheader[7] = 0x09;
if (track.codec == "H264"){
if (codec == "H264"){
dheader_len += 4;
dataheader[0] = 7;
dataheader[1] = 1;
if (thisPacket.getInt("offset") > 0){
long long offset = thisPacket.getInt("offset");
int64_t offset = thisPacket.getInt("offset");
if (offset){
dataheader[2] = (offset >> 16) & 0xFF;
dataheader[3] = (offset >> 8) & 0xFF;
dataheader[4] = offset & 0xFF;
}
}
if (track.codec == "H263"){dataheader[0] = 2;}
if (thisPacket.getFlag("keyframe")){
dataheader[0] |= 0x10;
}else{
dataheader[0] |= 0x20;
}
if (codec == "H263"){dataheader[0] = 2;}
dataheader[0] |= (thisPacket.getFlag("keyframe") ? 0x10 : 0x20);
if (thisPacket.getFlag("disposableframe")){dataheader[0] |= 0x30;}
}
if (track.type == "audio"){
if (type == "audio"){
uint32_t rate = M.getRate(thisIdx);
rtmpheader[7] = 0x08;
if (track.codec == "AAC"){
if (codec == "AAC"){
dataheader[0] += 0xA0;
dheader_len += 1;
dataheader[1] = 1; // raw AAC data, not sequence header
}
if (track.codec == "MP3"){
if (codec == "MP3"){
dataheader[0] += 0x20;
if (track.rate == 8000){
dataheader[0] |= 0xE0;
}else{
dataheader[0] |= 0x20;
}
dataheader[0] |= (rate == 8000 ? 0xE0 : 0x20);
}
if (track.codec == "ADPCM"){dataheader[0] |= 0x10;}
if (track.codec == "PCM"){
if (track.size == 16 && swappy.allocate(data_len)){
if (codec == "ADPCM"){dataheader[0] |= 0x10;}
if (codec == "PCM"){
if (M.getSize(thisIdx) == 16 && swappy.allocate(data_len)){
for (uint32_t i = 0; i < data_len; i += 2){
swappy[i] = tmpData[i + 1];
swappy[i + 1] = tmpData[i];
@ -345,31 +341,26 @@ namespace Mist{
}
dataheader[0] |= 0x30;
}
if (track.codec == "Nellymoser"){
if (track.rate == 8000){
dataheader[0] |= 0x50;
}else if (track.rate == 16000){
dataheader[0] |= 0x40;
}else{
dataheader[0] |= 0x60;
}
if (codec == "Nellymoser"){
dataheader[0] |= (rate == 8000 ? 0x50 : (rate == 16000 ? 0x40 : 0x60));
}
if (track.codec == "ALAW"){dataheader[0] |= 0x70;}
if (track.codec == "ULAW"){dataheader[0] |= 0x80;}
if (track.codec == "Speex"){dataheader[0] |= 0xB0;}
if (track.rate >= 44100){
if (codec == "ALAW"){dataheader[0] |= 0x70;}
if (codec == "ULAW"){dataheader[0] |= 0x80;}
if (codec == "Speex"){dataheader[0] |= 0xB0;}
if (rate >= 44100){
dataheader[0] |= 0x0C;
}else if (track.rate >= 22050){
}else if (rate >= 22050){
dataheader[0] |= 0x08;
}else if (track.rate >= 11025){
}else if (rate >= 11025){
dataheader[0] |= 0x04;
}
if (track.size != 8){dataheader[0] |= 0x02;}
if (track.channels > 1){dataheader[0] |= 0x01;}
if (M.getSize(thisIdx) != 8){dataheader[0] |= 0x02;}
if (M.getChannels(thisIdx) > 1){dataheader[0] |= 0x01;}
}
data_len += dheader_len;
unsigned int timestamp = thisPacket.getTime() - rtmpOffset;
uint64_t timestamp = thisPacket.getTime() - rtmpOffset;
// make sure we don't go negative
if (rtmpOffset > (int64_t)thisPacket.getTime()){
timestamp = 0;
@ -378,8 +369,8 @@ namespace Mist{
bool allow_short = RTMPStream::lastsend.count(4);
RTMPStream::Chunk &prev = RTMPStream::lastsend[4];
unsigned char chtype = 0x00;
unsigned int header_len = 12;
uint8_t chtype = 0x00;
size_t header_len = 12;
bool time_is_diff = false;
if (allow_short && (prev.cs_id == 4)){
if (prev.msg_stream_id == 1){
@ -454,9 +445,9 @@ namespace Mist{
// sent actual data - never send more than chunk_snd_max at a time
// interleave blocks of max chunk_snd_max bytes with 0xC4 bytes to indicate continue
unsigned int len_sent = 0;
size_t len_sent = 0;
while (len_sent < data_len){
unsigned int to_send = std::min(data_len - len_sent, RTMPStream::chunk_snd_max);
size_t to_send = std::min(data_len - len_sent, RTMPStream::chunk_snd_max);
if (!len_sent){
myConn.SendNow(dataheader, dheader_len);
RTMPStream::snd_cnt += dheader_len; // update the sent data counter
@ -480,15 +471,20 @@ namespace Mist{
void OutRTMP::sendHeader(){
FLV::Tag tag;
tag.DTSCMetaInit(myMeta, selectedTracks);
std::set<size_t> selectedTracks;
for (std::map<size_t, Comms::Users>::iterator it = userSelect.begin(); it != userSelect.end(); it++){
selectedTracks.insert(it->first);
}
tag.DTSCMetaInit(meta, selectedTracks);
if (tag.len){myConn.SendNow(RTMPStream::SendMedia(tag));}
for (std::set<long unsigned int>::iterator it = selectedTracks.begin(); it != selectedTracks.end(); it++){
if (myMeta.tracks[*it].type == "video"){
if (tag.DTSCVideoInit(myMeta.tracks[*it])){myConn.SendNow(RTMPStream::SendMedia(tag));}
for (std::set<size_t>::iterator it = selectedTracks.begin(); it != selectedTracks.end(); it++){
std::string type = M.getType(*it);
if (type == "video"){
if (tag.DTSCVideoInit(meta, *it)){myConn.SendNow(RTMPStream::SendMedia(tag));}
}
if (myMeta.tracks[*it].type == "audio"){
if (tag.DTSCAudioInit(myMeta.tracks[*it])){myConn.SendNow(RTMPStream::SendMedia(tag));}
if (type == "audio"){
if (tag.DTSCAudioInit(meta, *it)){myConn.SendNow(RTMPStream::SendMedia(tag));}
}
}
sentHeader = true;
@ -500,12 +496,13 @@ namespace Mist{
if (maxbps && (Util::bootSecs() - myConn.connTime()) &&
myConn.dataDown() / (Util::bootSecs() - myConn.connTime()) > maxbps){
if (!slowWarned){
WARN_MSG("Slowing down connection from %s because rate of %llukbps > %llukbps",
WARN_MSG("Slowing down connection from %s because rate of %" PRIu64 "kbps > %" PRIu32
"kbps",
getConnectedHost().c_str(),
(myConn.dataDown() / (Util::bootSecs() - myConn.connTime())) / 128, maxbps / 128);
slowWarned = true;
}
Util::sleep(250);
Util::sleep(50);
}
Output::requestHandler();
}
@ -585,13 +582,13 @@ namespace Mist{
amfReply.addContent(amfData.getContent(1)); // same transaction ID
amfReply.addContent(AMF::Object("")); // server properties
amfReply.getContentP(2)->addContent(AMF::Object("fmsVer", "FMS/3,5,5,2004"));
amfReply.getContentP(2)->addContent(AMF::Object("capabilities", (double)31));
amfReply.getContentP(2)->addContent(AMF::Object("mode", (double)1));
amfReply.getContentP(2)->addContent(AMF::Object("capabilities", 31.0));
amfReply.getContentP(2)->addContent(AMF::Object("mode", 1.0));
amfReply.addContent(AMF::Object("")); // info
amfReply.getContentP(3)->addContent(AMF::Object("level", "status"));
amfReply.getContentP(3)->addContent(AMF::Object("code", "NetConnection.Connect.Success"));
amfReply.getContentP(3)->addContent(AMF::Object("description", "Connection succeeded."));
amfReply.getContentP(3)->addContent(AMF::Object("clientid", 1337));
amfReply.getContentP(3)->addContent(AMF::Object("clientid", 1337.0));
amfReply.getContentP(3)->addContent(AMF::Object("objectEncoding", objencoding));
// amfReply.getContentP(3)->addContent(AMF::Object("data", AMF::AMF0_ECMA_ARRAY));
// amfReply.getContentP(3)->getContentP(4)->addContent(AMF::Object("version", "3,5,4,1004"));
@ -613,10 +610,10 @@ namespace Mist{
if (amfData.getContentP(0)->StrValue() == "createStream"){
// send a _result reply
AMF::Object amfReply("container", AMF::AMF0_DDV_CONTAINER);
amfReply.addContent(AMF::Object("", "_result")); // result success
amfReply.addContent(amfData.getContent(1)); // same transaction ID
amfReply.addContent(AMF::Object("", (double)0, AMF::AMF0_NULL)); // null - command info
amfReply.addContent(AMF::Object("", (double)1)); // stream ID - we use 1
amfReply.addContent(AMF::Object("", "_result")); // result success
amfReply.addContent(amfData.getContent(1)); // same transaction ID
amfReply.addContent(AMF::Object("", 0.0, AMF::AMF0_NULL)); // null - command info
amfReply.addContent(AMF::Object("", 1.0)); // stream ID - we use 1
sendCommand(amfReply, messageType, streamId);
// myConn.SendNow(RTMPStream::SendUSR(0, 1)); //send UCM StreamBegin (0), stream 1
return;
@ -624,25 +621,25 @@ namespace Mist{
if (amfData.getContentP(0)->StrValue() == "ping"){
// send a _result reply
AMF::Object amfReply("container", AMF::AMF0_DDV_CONTAINER);
amfReply.addContent(AMF::Object("", "_result")); // result success
amfReply.addContent(amfData.getContent(1)); // same transaction ID
amfReply.addContent(AMF::Object("", (double)0, AMF::AMF0_NULL)); // null - command info
amfReply.addContent(AMF::Object("", "Pong!")); // stream ID - we use 1
amfReply.addContent(AMF::Object("", "_result")); // result success
amfReply.addContent(amfData.getContent(1)); // same transaction ID
amfReply.addContent(AMF::Object("", 0.0, AMF::AMF0_NULL)); // null - command info
amfReply.addContent(AMF::Object("", "Pong!")); // stream ID - we use 1
sendCommand(amfReply, messageType, streamId);
return;
}// createStream
if (amfData.getContentP(0)->StrValue() == "closeStream"){
myConn.SendNow(RTMPStream::SendUSR(1, 1)); // send UCM StreamEOF (1), stream 1
AMF::Object amfreply("container", AMF::AMF0_DDV_CONTAINER);
amfreply.addContent(AMF::Object("", "onStatus")); // status reply
amfreply.addContent(AMF::Object("", (double)0)); // transaction ID
amfreply.addContent(AMF::Object("", (double)0, AMF::AMF0_NULL)); // null - command info
amfreply.addContent(AMF::Object("")); // info
amfreply.addContent(AMF::Object("", "onStatus")); // status reply
amfreply.addContent(AMF::Object("", 0.0)); // transaction ID
amfreply.addContent(AMF::Object("", 0.0, AMF::AMF0_NULL)); // null - command info
amfreply.addContent(AMF::Object("")); // info
amfreply.getContentP(3)->addContent(AMF::Object("level", "status"));
amfreply.getContentP(3)->addContent(AMF::Object("code", "NetStream.Play.Stop"));
amfreply.getContentP(3)->addContent(AMF::Object("description", "Stream stopped"));
amfreply.getContentP(3)->addContent(AMF::Object("details", "DDV"));
amfreply.getContentP(3)->addContent(AMF::Object("clientid", (double)1337));
amfreply.getContentP(3)->addContent(AMF::Object("clientid", 1337.0));
sendCommand(amfreply, 20, 1);
stop();
return;
@ -659,10 +656,10 @@ namespace Mist{
if ((amfData.getContentP(0)->StrValue() == "FCSubscribe")){
// send a FCPublish reply
AMF::Object amfReply("container", AMF::AMF0_DDV_CONTAINER);
amfReply.addContent(AMF::Object("", "onFCSubscribe")); // status reply
amfReply.addContent(amfData.getContent(1)); // same transaction ID
amfReply.addContent(AMF::Object("", (double)0, AMF::AMF0_NULL)); // null - command info
amfReply.addContent(AMF::Object("")); // info
amfReply.addContent(AMF::Object("", "onFCSubscribe")); // status reply
amfReply.addContent(amfData.getContent(1)); // same transaction ID
amfReply.addContent(AMF::Object("", 0.0, AMF::AMF0_NULL)); // null - command info
amfReply.addContent(AMF::Object("")); // info
amfReply.getContentP(3)->addContent(AMF::Object("code", "NetStream.Play.Start"));
amfReply.getContentP(3)->addContent(AMF::Object("level", "status"));
amfReply.getContentP(3)->addContent(
@ -674,10 +671,10 @@ namespace Mist{
if ((amfData.getContentP(0)->StrValue() == "FCPublish")){
// send a FCPublish reply
AMF::Object amfReply("container", AMF::AMF0_DDV_CONTAINER);
amfReply.addContent(AMF::Object("", "onFCPublish")); // status reply
amfReply.addContent(amfData.getContent(1)); // same transaction ID
amfReply.addContent(AMF::Object("", (double)0, AMF::AMF0_NULL)); // null - command info
amfReply.addContent(AMF::Object("")); // info
amfReply.addContent(AMF::Object("", "onFCPublish")); // status reply
amfReply.addContent(amfData.getContent(1)); // same transaction ID
amfReply.addContent(AMF::Object("", 0.0, AMF::AMF0_NULL)); // null - command info
amfReply.addContent(AMF::Object("")); // info
amfReply.getContentP(3)->addContent(AMF::Object("code", "NetStream.Publish.Start"));
amfReply.getContentP(3)->addContent(AMF::Object(
"description", "Please follow up with publish command, as we ignore this command."));
@ -687,10 +684,10 @@ namespace Mist{
if (amfData.getContentP(0)->StrValue() == "releaseStream"){
// send a _result reply
AMF::Object amfReply("container", AMF::AMF0_DDV_CONTAINER);
amfReply.addContent(AMF::Object("", "_result")); // result success
amfReply.addContent(amfData.getContent(1)); // same transaction ID
amfReply.addContent(AMF::Object("", (double)0, AMF::AMF0_NULL)); // null - command info
amfReply.addContent(AMF::Object("", AMF::AMF0_UNDEFINED)); // stream ID?
amfReply.addContent(AMF::Object("", "_result")); // result success
amfReply.addContent(amfData.getContent(1)); // same transaction ID
amfReply.addContent(AMF::Object("", 0.0, AMF::AMF0_NULL)); // null - command info
amfReply.addContent(AMF::Object("", AMF::AMF0_UNDEFINED)); // stream ID?
sendCommand(amfReply, messageType, streamId);
return;
}// releaseStream
@ -698,10 +695,10 @@ namespace Mist{
(amfData.getContentP(0)->StrValue() == "getMovLen")){
// send a _result reply
AMF::Object amfReply("container", AMF::AMF0_DDV_CONTAINER);
amfReply.addContent(AMF::Object("", "_result")); // result success
amfReply.addContent(amfData.getContent(1)); // same transaction ID
amfReply.addContent(AMF::Object("", (double)0, AMF::AMF0_NULL)); // null - command info
amfReply.addContent(AMF::Object("", (double)0)); // zero length
amfReply.addContent(AMF::Object("", "_result")); // result success
amfReply.addContent(amfData.getContent(1)); // same transaction ID
amfReply.addContent(AMF::Object("", 0.0, AMF::AMF0_NULL)); // null - command info
amfReply.addContent(AMF::Object("", 0.0)); // zero length
sendCommand(amfReply, messageType, streamId);
return;
}// getStreamLength
@ -723,6 +720,7 @@ namespace Mist{
}
if (amfData.getContentP(3)){
streamName = Encodings::URL::decode(amfData.getContentP(3)->StrValue());
Util::Config::streamName = streamName;
reqUrl += "/" + streamName; // LTS
/*LTS-START*/
@ -740,13 +738,18 @@ namespace Mist{
size_t lSlash = newUrl.rfind('/');
if (lSlash != std::string::npos){
streamName = newUrl.substr(lSlash + 1);
Util::Config::streamName = streamName;
}else{
streamName = newUrl;
Util::Config::streamName = streamName;
}
}
/*LTS-END*/
if (streamName.find('/')){streamName = streamName.substr(0, streamName.find('/'));}
if (streamName.find('/')){
streamName = streamName.substr(0, streamName.find('/'));
Util::Config::streamName = streamName;
}
size_t colonPos = streamName.find(':');
if (colonPos != std::string::npos && colonPos < 6){
@ -756,6 +759,7 @@ namespace Mist{
}else{
streamName = oldName.substr(colonPos + 1) + std::string(".") + oldName.substr(0, colonPos);
}
Util::Config::streamName = streamName;
}
Util::sanitizeName(streamName);
@ -767,14 +771,14 @@ namespace Mist{
}
// send a status reply
AMF::Object amfReply("container", AMF::AMF0_DDV_CONTAINER);
amfReply.addContent(AMF::Object("", "onStatus")); // status reply
amfReply.addContent(amfData.getContent(1)); // same transaction ID
amfReply.addContent(AMF::Object("", (double)0, AMF::AMF0_NULL)); // null - command info
amfReply.addContent(AMF::Object("")); // info
amfReply.addContent(AMF::Object("", "onStatus")); // status reply
amfReply.addContent(amfData.getContent(1)); // same transaction ID
amfReply.addContent(AMF::Object("", 0.0, AMF::AMF0_NULL)); // null - command info
amfReply.addContent(AMF::Object("")); // info
amfReply.getContentP(3)->addContent(AMF::Object("level", "status"));
amfReply.getContentP(3)->addContent(AMF::Object("code", "NetStream.Publish.Start"));
amfReply.getContentP(3)->addContent(AMF::Object("description", "Stream is now published!"));
amfReply.getContentP(3)->addContent(AMF::Object("clientid", (double)1337));
amfReply.getContentP(3)->addContent(AMF::Object("clientid", 1337.0));
sendCommand(amfReply, messageType, streamId);
/*
//send a _result reply
@ -791,10 +795,10 @@ namespace Mist{
if (amfData.getContentP(0)->StrValue() == "checkBandwidth"){
// send a _result reply
AMF::Object amfReply("container", AMF::AMF0_DDV_CONTAINER);
amfReply.addContent(AMF::Object("", "_result")); // result success
amfReply.addContent(amfData.getContent(1)); // same transaction ID
amfReply.addContent(AMF::Object("", (double)0, AMF::AMF0_NULL)); // null - command info
amfReply.addContent(AMF::Object("", (double)0, AMF::AMF0_NULL)); // null - command info
amfReply.addContent(AMF::Object("", "_result")); // result success
amfReply.addContent(amfData.getContent(1)); // same transaction ID
amfReply.addContent(AMF::Object("", 0.0, AMF::AMF0_NULL)); // null - command info
amfReply.addContent(AMF::Object("", 0.0, AMF::AMF0_NULL)); // null - command info
sendCommand(amfReply, messageType, streamId);
return;
}// checkBandwidth
@ -802,16 +806,18 @@ namespace Mist{
if ((amfData.getContentP(0)->StrValue() == "play") ||
(amfData.getContentP(0)->StrValue() == "play2")){
// set reply number and stream name, actual reply is sent up in the ss.spool() handler
int playTransaction = amfData.getContentP(1)->NumValue();
int playMessageType = messageType;
int playStreamId = streamId;
double playTransaction = amfData.getContentP(1)->NumValue();
int8_t playMessageType = messageType;
int32_t playStreamId = streamId;
streamName = Encodings::URL::decode(amfData.getContentP(3)->StrValue());
Util::Config::streamName = streamName;
reqUrl += "/" + streamName; // LTS
// handle variables
if (streamName.find('?') != std::string::npos){
std::string tmpVars = streamName.substr(streamName.find('?') + 1);
streamName = streamName.substr(0, streamName.find('?'));
Util::Config::streamName = streamName;
HTTP::parseVars(tmpVars, targetParams);
}
@ -823,6 +829,7 @@ namespace Mist{
}else{
streamName = oldName.substr(colonPos + 1) + std::string(".") + oldName.substr(0, colonPos);
}
Util::Config::streamName = streamName;
}
Util::sanitizeName(streamName);
@ -846,33 +853,33 @@ namespace Mist{
// send a status reply
AMF::Object amfreply("container", AMF::AMF0_DDV_CONTAINER);
amfreply.addContent(AMF::Object("", "onStatus")); // status reply
amfreply.addContent(AMF::Object("", (double)playTransaction)); // same transaction ID
amfreply.addContent(AMF::Object("", (double)0, AMF::AMF0_NULL)); // null - command info
amfreply.addContent(AMF::Object("")); // info
amfreply.addContent(AMF::Object("", "onStatus")); // status reply
amfreply.addContent(AMF::Object("", playTransaction)); // same transaction ID
amfreply.addContent(AMF::Object("", 0.0, AMF::AMF0_NULL)); // null - command info
amfreply.addContent(AMF::Object("")); // info
amfreply.getContentP(3)->addContent(AMF::Object("level", "status"));
amfreply.getContentP(3)->addContent(AMF::Object("code", "NetStream.Play.Reset"));
amfreply.getContentP(3)->addContent(AMF::Object("description", "Playing and resetting..."));
amfreply.getContentP(3)->addContent(AMF::Object("details", "DDV"));
amfreply.getContentP(3)->addContent(AMF::Object("clientid", (double)1337));
amfreply.getContentP(3)->addContent(AMF::Object("clientid", 1337.0));
sendCommand(amfreply, playMessageType, playStreamId);
// send streamisrecorded if stream, well, is recorded.
if (myMeta.vod){// isMember("length") && Strm.metadata["length"].asInt() > 0){
if (M.getVod()){// isMember("length") && Strm.metadata["length"].asInt() > 0){
myConn.SendNow(RTMPStream::SendUSR(4, 1)); // send UCM StreamIsRecorded (4), stream 1
}
// send streambegin
myConn.SendNow(RTMPStream::SendUSR(0, 1)); // send UCM StreamBegin (0), stream 1
// and more reply
amfreply = AMF::Object("container", AMF::AMF0_DDV_CONTAINER);
amfreply.addContent(AMF::Object("", "onStatus")); // status reply
amfreply.addContent(AMF::Object("", (double)playTransaction)); // same transaction ID
amfreply.addContent(AMF::Object("", (double)0, AMF::AMF0_NULL)); // null - command info
amfreply.addContent(AMF::Object("")); // info
amfreply.addContent(AMF::Object("", "onStatus")); // status reply
amfreply.addContent(AMF::Object("", playTransaction)); // same transaction ID
amfreply.addContent(AMF::Object("", 0.0, AMF::AMF0_NULL)); // null - command info
amfreply.addContent(AMF::Object("")); // info
amfreply.getContentP(3)->addContent(AMF::Object("level", "status"));
amfreply.getContentP(3)->addContent(AMF::Object("code", "NetStream.Play.Start"));
amfreply.getContentP(3)->addContent(AMF::Object("description", "Playing!"));
amfreply.getContentP(3)->addContent(AMF::Object("details", "DDV"));
amfreply.getContentP(3)->addContent(AMF::Object("clientid", (double)1337));
amfreply.getContentP(3)->addContent(AMF::Object("clientid", 1337.0));
initialSeek();
rtmpOffset = currentTime();
amfreply.getContentP(3)->addContent(AMF::Object("timecodeOffset", (double)rtmpOffset));
@ -887,54 +894,54 @@ namespace Mist{
}// play
if ((amfData.getContentP(0)->StrValue() == "seek")){
// set reply number and stream name, actual reply is sent up in the ss.spool() handler
int playTransaction = amfData.getContentP(1)->NumValue();
int playMessageType = messageType;
int playStreamId = streamId;
double playTransaction = amfData.getContentP(1)->NumValue();
int8_t playMessageType = messageType;
int32_t playStreamId = streamId;
AMF::Object amfReply("container", AMF::AMF0_DDV_CONTAINER);
amfReply.addContent(AMF::Object("", "onStatus")); // status reply
amfReply.addContent(amfData.getContent(1)); // same transaction ID
amfReply.addContent(AMF::Object("", (double)0, AMF::AMF0_NULL)); // null - command info
amfReply.addContent(AMF::Object("")); // info
amfReply.addContent(AMF::Object("", "onStatus")); // status reply
amfReply.addContent(amfData.getContent(1)); // same transaction ID
amfReply.addContent(AMF::Object("", 0.0, AMF::AMF0_NULL)); // null - command info
amfReply.addContent(AMF::Object("")); // info
amfReply.getContentP(3)->addContent(AMF::Object("level", "status"));
amfReply.getContentP(3)->addContent(AMF::Object("code", "NetStream.Seek.Notify"));
amfReply.getContentP(3)->addContent(
AMF::Object("description", "Seeking to the specified time"));
amfReply.getContentP(3)->addContent(AMF::Object("details", "DDV"));
amfReply.getContentP(3)->addContent(AMF::Object("clientid", (double)1337));
amfReply.getContentP(3)->addContent(AMF::Object("clientid", 1337.0));
sendCommand(amfReply, playMessageType, playStreamId);
seek((long long int)amfData.getContentP(3)->NumValue(), true);
seek((long long int)amfData.getContentP(3)->NumValue());
// send a status reply
AMF::Object amfreply("container", AMF::AMF0_DDV_CONTAINER);
amfreply.addContent(AMF::Object("", "onStatus")); // status reply
amfreply.addContent(AMF::Object("", (double)playTransaction)); // same transaction ID
amfreply.addContent(AMF::Object("", (double)0, AMF::AMF0_NULL)); // null - command info
amfreply.addContent(AMF::Object("")); // info
amfreply.addContent(AMF::Object("", "onStatus")); // status reply
amfreply.addContent(AMF::Object("", playTransaction)); // same transaction ID
amfreply.addContent(AMF::Object("", 0.0, AMF::AMF0_NULL)); // null - command info
amfreply.addContent(AMF::Object("")); // info
amfreply.getContentP(3)->addContent(AMF::Object("level", "status"));
amfreply.getContentP(3)->addContent(AMF::Object("code", "NetStream.Play.Reset"));
amfreply.getContentP(3)->addContent(AMF::Object("description", "Playing and resetting..."));
amfreply.getContentP(3)->addContent(AMF::Object("details", "DDV"));
amfreply.getContentP(3)->addContent(AMF::Object("clientid", (double)1337));
amfreply.getContentP(3)->addContent(AMF::Object("clientid", 1337.0));
sendCommand(amfreply, playMessageType, playStreamId);
// send streamisrecorded if stream, well, is recorded.
if (myMeta.vod){// isMember("length") && Strm.metadata["length"].asInt() > 0){
if (M.getVod()){// isMember("length") && Strm.metadata["length"].asInt() > 0){
myConn.SendNow(RTMPStream::SendUSR(4, 1)); // send UCM StreamIsRecorded (4), stream 1
}
// send streambegin
myConn.SendNow(RTMPStream::SendUSR(0, 1)); // send UCM StreamBegin (0), stream 1
// and more reply
amfreply = AMF::Object("container", AMF::AMF0_DDV_CONTAINER);
amfreply.addContent(AMF::Object("", "onStatus")); // status reply
amfreply.addContent(AMF::Object("", (double)playTransaction)); // same transaction ID
amfreply.addContent(AMF::Object("", (double)0, AMF::AMF0_NULL)); // null - command info
amfreply.addContent(AMF::Object("")); // info
amfreply.addContent(AMF::Object("", "onStatus")); // status reply
amfreply.addContent(AMF::Object("", playTransaction)); // same transaction ID
amfreply.addContent(AMF::Object("", 0.0, AMF::AMF0_NULL)); // null - command info
amfreply.addContent(AMF::Object("")); // info
amfreply.getContentP(3)->addContent(AMF::Object("level", "status"));
amfreply.getContentP(3)->addContent(AMF::Object("code", "NetStream.Play.Start"));
amfreply.getContentP(3)->addContent(AMF::Object("description", "Playing!"));
amfreply.getContentP(3)->addContent(AMF::Object("details", "DDV"));
amfreply.getContentP(3)->addContent(AMF::Object("clientid", (double)1337));
if (myMeta.live){
amfreply.getContentP(3)->addContent(AMF::Object("clientid", 1337.0));
if (M.getLive()){
rtmpOffset = currentTime();
amfreply.getContentP(3)->addContent(AMF::Object("timecodeOffset", (double)rtmpOffset));
}
@ -947,35 +954,35 @@ namespace Mist{
return;
}// seek
if ((amfData.getContentP(0)->StrValue() == "pauseRaw") || (amfData.getContentP(0)->StrValue() == "pause")){
int playMessageType = messageType;
int playStreamId = streamId;
int8_t playMessageType = messageType;
int32_t playStreamId = streamId;
if (amfData.getContentP(3)->NumValue()){
parseData = false;
// send a status reply
AMF::Object amfReply("container", AMF::AMF0_DDV_CONTAINER);
amfReply.addContent(AMF::Object("", "onStatus")); // status reply
amfReply.addContent(amfData.getContent(1)); // same transaction ID
amfReply.addContent(AMF::Object("", (double)0, AMF::AMF0_NULL)); // null - command info
amfReply.addContent(AMF::Object("")); // info
amfReply.addContent(AMF::Object("", "onStatus")); // status reply
amfReply.addContent(amfData.getContent(1)); // same transaction ID
amfReply.addContent(AMF::Object("", 0.0, AMF::AMF0_NULL)); // null - command info
amfReply.addContent(AMF::Object("")); // info
amfReply.getContentP(3)->addContent(AMF::Object("level", "status"));
amfReply.getContentP(3)->addContent(AMF::Object("code", "NetStream.Pause.Notify"));
amfReply.getContentP(3)->addContent(AMF::Object("description", "Pausing playback"));
amfReply.getContentP(3)->addContent(AMF::Object("details", "DDV"));
amfReply.getContentP(3)->addContent(AMF::Object("clientid", (double)1337));
amfReply.getContentP(3)->addContent(AMF::Object("clientid", 1337.0));
sendCommand(amfReply, playMessageType, playStreamId);
}else{
parseData = true;
// send a status reply
AMF::Object amfReply("container", AMF::AMF0_DDV_CONTAINER);
amfReply.addContent(AMF::Object("", "onStatus")); // status reply
amfReply.addContent(amfData.getContent(1)); // same transaction ID
amfReply.addContent(AMF::Object("", (double)0, AMF::AMF0_NULL)); // null - command info
amfReply.addContent(AMF::Object("")); // info
amfReply.addContent(AMF::Object("", "onStatus")); // status reply
amfReply.addContent(amfData.getContent(1)); // same transaction ID
amfReply.addContent(AMF::Object("", 0.0, AMF::AMF0_NULL)); // null - command info
amfReply.addContent(AMF::Object("")); // info
amfReply.getContentP(3)->addContent(AMF::Object("level", "status"));
amfReply.getContentP(3)->addContent(AMF::Object("code", "NetStream.Unpause.Notify"));
amfReply.getContentP(3)->addContent(AMF::Object("description", "Resuming playback"));
amfReply.getContentP(3)->addContent(AMF::Object("details", "DDV"));
amfReply.getContentP(3)->addContent(AMF::Object("clientid", (double)1337));
amfReply.getContentP(3)->addContent(AMF::Object("clientid", 1337.0));
sendCommand(amfReply, playMessageType, playStreamId);
}
return;
@ -1071,34 +1078,34 @@ namespace Mist{
amfData.getContentP(1)->NumValue() == 1){
{
AMF::Object amfReply("container", AMF::AMF0_DDV_CONTAINER);
amfReply.addContent(AMF::Object("", "releaseStream")); // command
amfReply.addContent(AMF::Object("", (double)2)); // transaction ID
amfReply.addContent(AMF::Object("", (double)0, AMF::AMF0_NULL)); // options
amfReply.addContent(AMF::Object("", streamOut)); // stream name
amfReply.addContent(AMF::Object("", "releaseStream")); // command
amfReply.addContent(AMF::Object("", 2.0)); // transaction ID
amfReply.addContent(AMF::Object("", 0.0, AMF::AMF0_NULL)); // options
amfReply.addContent(AMF::Object("", streamOut)); // stream name
sendCommand(amfReply, 20, 0);
}
{
AMF::Object amfReply("container", AMF::AMF0_DDV_CONTAINER);
amfReply.addContent(AMF::Object("", "FCPublish")); // command
amfReply.addContent(AMF::Object("", (double)3)); // transaction ID
amfReply.addContent(AMF::Object("", (double)0, AMF::AMF0_NULL)); // options
amfReply.addContent(AMF::Object("", streamOut)); // stream name
amfReply.addContent(AMF::Object("", "FCPublish")); // command
amfReply.addContent(AMF::Object("", 3.0)); // transaction ID
amfReply.addContent(AMF::Object("", 0.0, AMF::AMF0_NULL)); // options
amfReply.addContent(AMF::Object("", streamOut)); // stream name
sendCommand(amfReply, 20, 0);
}
{
AMF::Object amfReply("container", AMF::AMF0_DDV_CONTAINER);
amfReply.addContent(AMF::Object("", "createStream")); // command
amfReply.addContent(AMF::Object("", (double)4)); // transaction ID
amfReply.addContent(AMF::Object("", (double)0, AMF::AMF0_NULL)); // options
amfReply.addContent(AMF::Object("", "createStream")); // command
amfReply.addContent(AMF::Object("", 4.0)); // transaction ID
amfReply.addContent(AMF::Object("", 0.0, AMF::AMF0_NULL)); // options
sendCommand(amfReply, 20, 0);
}
{
AMF::Object amfReply("container", AMF::AMF0_DDV_CONTAINER);
amfReply.addContent(AMF::Object("", "publish")); // command
amfReply.addContent(AMF::Object("", (double)5)); // transaction ID
amfReply.addContent(AMF::Object("", (double)0, AMF::AMF0_NULL)); // options
amfReply.addContent(AMF::Object("", streamOut)); // stream name
amfReply.addContent(AMF::Object("", "live")); // stream name
amfReply.addContent(AMF::Object("", "publish")); // command
amfReply.addContent(AMF::Object("", 5.0)); // transaction ID
amfReply.addContent(AMF::Object("", 0.0, AMF::AMF0_NULL)); // options
amfReply.addContent(AMF::Object("", streamOut)); // stream name
amfReply.addContent(AMF::Object("", "live")); // stream name
sendCommand(amfReply, 20, 1);
}
HIGH_MSG("Publish starting");
@ -1157,8 +1164,8 @@ namespace Mist{
onFinish();
break; // happens when connection breaks unexpectedly
case 1: // set chunk size
RTMPStream::chunk_rec_max = ntohl(*(int *)next.data.c_str());
MEDIUM_MSG("CTRL: Set chunk size: %i", RTMPStream::chunk_rec_max);
RTMPStream::chunk_rec_max = Bit::btohl(next.data.data());
MEDIUM_MSG("CTRL: Set chunk size: %" PRIu64, RTMPStream::chunk_rec_max);
break;
case 2: // abort message - we ignore this one
MEDIUM_MSG("CTRL: Abort message");
@ -1166,7 +1173,7 @@ namespace Mist{
break;
case 3: // ack
VERYHIGH_MSG("CTRL: Acknowledgement");
RTMPStream::snd_window_at = ntohl(*(int *)next.data.c_str());
RTMPStream::snd_window_at = Bit::btohl(next.data.data());
RTMPStream::snd_window_at = RTMPStream::snd_cnt;
break;
case 4:{
@ -1180,51 +1187,48 @@ namespace Mist{
// 6 = pingrequest, 4 bytes data
// 7 = pingresponse, 4 bytes data
// we don't need to process this
short int ucmtype = ntohs(*(short int *)next.data.c_str());
int16_t ucmtype = Bit::btohs(next.data.data());
switch (ucmtype){
case 0:
MEDIUM_MSG("CTRL: UCM StreamBegin %i", ntohl(*((int *)(next.data.c_str() + 2))));
break;
case 1:
MEDIUM_MSG("CTRL: UCM StreamEOF %i", ntohl(*((int *)(next.data.c_str() + 2))));
break;
case 2:
MEDIUM_MSG("CTRL: UCM StreamDry %i", ntohl(*((int *)(next.data.c_str() + 2))));
MEDIUM_MSG("CTRL: UCM StreamBegin %" PRIu32, Bit::btohl(next.data.data() + 2));
break;
case 1: MEDIUM_MSG("CTRL: UCM StreamEOF %" PRIu32, Bit::btohl(next.data.data() + 2)); break;
case 2: MEDIUM_MSG("CTRL: UCM StreamDry %" PRIu32, Bit::btohl(next.data.data() + 2)); break;
case 3:
MEDIUM_MSG("CTRL: UCM SetBufferLength %i %i", ntohl(*((int *)(next.data.c_str() + 2))),
ntohl(*((int *)(next.data.c_str() + 6))));
MEDIUM_MSG("CTRL: UCM SetBufferLength %" PRIu32 " %" PRIu32,
Bit::btohl(next.data.data() + 2), Bit::btohl(next.data.data() + 6));
break;
case 4:
MEDIUM_MSG("CTRL: UCM StreamIsRecorded %i", ntohl(*((int *)(next.data.c_str() + 2))));
MEDIUM_MSG("CTRL: UCM StreamIsRecorded %" PRIu32, Bit::btohl(next.data.data() + 2));
break;
case 6:
MEDIUM_MSG("CTRL: UCM PingRequest %i", ntohl(*((int *)(next.data.c_str() + 2))));
MEDIUM_MSG("CTRL: UCM PingRequest %" PRIu32, Bit::btohl(next.data.data() + 2));
myConn.SendNow(RTMPStream::SendUSR(7, 1)); // send UCM PingResponse (7)
break;
case 7:
MEDIUM_MSG("CTRL: UCM PingResponse %i", ntohl(*((int *)(next.data.c_str() + 2))));
MEDIUM_MSG("CTRL: UCM PingResponse %" PRIu32, Bit::btohl(next.data.data() + 2));
break;
default: MEDIUM_MSG("CTRL: UCM Unknown (%hi)", ucmtype); break;
default: MEDIUM_MSG("CTRL: UCM Unknown (%" PRId16 ")", ucmtype); break;
}
}break;
case 5: // window size of other end
MEDIUM_MSG("CTRL: Window size");
RTMPStream::rec_window_size = ntohl(*(int *)next.data.c_str());
RTMPStream::rec_window_size = Bit::btohl(next.data.data());
RTMPStream::rec_window_at = RTMPStream::rec_cnt;
myConn.SendNow(RTMPStream::SendCTL(3, RTMPStream::rec_cnt)); // send ack (msg 3)
break;
case 6:
MEDIUM_MSG("CTRL: Set peer bandwidth");
// 4 bytes window size, 1 byte limit type (ignored)
RTMPStream::snd_window_size = ntohl(*(int *)next.data.c_str());
RTMPStream::snd_window_size = Bit::btohl(next.data.data());
myConn.SendNow(RTMPStream::SendCTL(5, RTMPStream::snd_window_size)); // send window acknowledgement size (msg 5)
break;
case 8: // audio data
case 9: // video data
case 18:{// meta data
static std::map<unsigned int, AMF::Object> pushMeta;
static std::map<uint64_t, uint64_t> lastTagTime;
static std::map<size_t, AMF::Object> pushMeta;
static std::map<size_t, uint64_t> lastTagTime;
static std::map<size_t, size_t> reTrackToID;
if (!isInitialized){
MEDIUM_MSG("Received useless media data");
onFinish();
@ -1239,18 +1243,18 @@ namespace Mist{
amf_storage = &(pushMeta.begin()->second);
}
unsigned int reTrack = next.cs_id * 3 + (F.data[0] == 0x09 ? 1 : (F.data[0] == 0x08 ? 2 : 3));
F.toMeta(myMeta, *amf_storage, reTrack);
size_t reTrack = next.cs_id * 3 + (F.data[0] == 0x09 ? 1 : (F.data[0] == 0x08 ? 2 : 3));
if (!reTrackToID.count(reTrack)){reTrackToID[reTrack] = INVALID_TRACK_ID;}
F.toMeta(meta, *amf_storage, reTrackToID[reTrack]);
if (F.getDataLen() && !(F.needsInitData() && F.isInitData())){
uint64_t tagTime = next.timestamp;
if (!bootMsOffset){
if (myMeta.bootMsOffset){
bootMsOffset = myMeta.bootMsOffset;
rtmpOffset = (Util::bootMS() - tagTime) - bootMsOffset;
}else{
bootMsOffset = Util::bootMS() - tagTime;
rtmpOffset = 0;
}
if (!M.getBootMsOffset()){
meta.setBootMsOffset(Util::bootMS() - tagTime);
rtmpOffset = 0;
setRtmpOffset = true;
}else if (!setRtmpOffset){
rtmpOffset = (Util::bootMS() - tagTime) - M.getBootMsOffset();
setRtmpOffset = true;
}
tagTime += rtmpOffset;
uint64_t &ltt = lastTagTime[reTrack];
@ -1258,17 +1262,23 @@ namespace Mist{
// We allow wrapping around the 32 bits maximum value if the most significant 8 bits are set.
/// \TODO Provide time continuity for wrap-around.
if (ltt && tagTime < ltt && ltt < 0xFF000000ull){
FAIL_MSG("Timestamps went from %llu to %llu (decreased): disconnecting!", ltt, tagTime);
FAIL_MSG("Timestamps went from %" PRIu64 " to %" PRIu64 " (decreased): disconnecting!", ltt, tagTime);
onFinish();
break;
}
// Check if we went more than 10 minutes into the future
if (ltt && tagTime > ltt + 600000){
FAIL_MSG("Timestamps went from %llu to %llu (> 10m in future): disconnecting!", ltt, tagTime);
FAIL_MSG("Timestamps went from %" PRIu64 " to %" PRIu64
" (> 10m in future): disconnecting!",
ltt, tagTime);
onFinish();
break;
}
if (myMeta.tracks[reTrack].codec == "PCM" && myMeta.tracks[reTrack].size == 16){
uint64_t idx = reTrackToID[reTrack];
if (idx != INVALID_TRACK_ID && !userSelect.count(idx)){
userSelect[idx].reload(streamName, idx, COMM_STATUS_SOURCE);
}
if (M.getCodec(idx) == "PCM" && M.getSize(idx) == 16){
char *ptr = F.getData();
uint32_t ptrSize = F.getDataLen();
for (uint32_t i = 0; i < ptrSize; i += 2){
@ -1277,15 +1287,10 @@ namespace Mist{
ptr[i + 1] = tmpchar;
}
}
thisPacket.genericFill(tagTime, F.offset(), reTrack, F.getData(), F.getDataLen(), 0,
F.isKeyframe, F.isKeyframe ? bootMsOffset : 0);
ltt = tagTime;
if (!nProxy.userClient.getData()){
char userPageName[NAME_BUFFER_SIZE];
snprintf(userPageName, NAME_BUFFER_SIZE, SHM_USERS, streamName.c_str());
nProxy.userClient = IPC::sharedClient(userPageName, PLAY_EX_SIZE, true);
}
bufferLivePacket(thisPacket);
// bufferLivePacket(thisPacket);
bufferLivePacket(tagTime, F.offset(), idx, F.getData(), F.getDataLen(), 0, F.isKeyframe);
if (!meta){config->is_active = false;}
}
break;
}

View file

@ -20,10 +20,10 @@ namespace Mist{
protected:
std::string streamOut; ///< When pushing out, the output stream name
bool setRtmpOffset;
int64_t rtmpOffset;
uint64_t lastOutTime;
unsigned int maxbps;
int64_t bootMsOffset;
uint32_t maxbps;
std::string app_name;
void parseChunk(Socket::Buffer &inputBuffer);
void parseAMFCommand(AMF::Object &amfData, int messageType, int streamId);

View file

@ -22,23 +22,19 @@ namespace Mist{
/// Takes incoming packets and buffers them.
void OutRTSP::incomingPacket(const DTSC::Packet &pkt){
if (!bootMsOffset){
if (myMeta.bootMsOffset){
bootMsOffset = myMeta.bootMsOffset;
packetOffset = (Util::bootMS() - pkt.getTime()) - bootMsOffset;
}else{
bootMsOffset = Util::bootMS() - pkt.getTime();
packetOffset = 0;
}
if (!M.getBootMsOffset()){
meta.setBootMsOffset(Util::bootMS() - pkt.getTime());
packetOffset = 0;
setPacketOffset = true;
}else if (!setPacketOffset){
packetOffset = (Util::bootMS() - pkt.getTime()) - M.getBootMsOffset();
setPacketOffset = true;
}
/// \TODO Make this less inefficient. Seriously. Maybe use DTSC::RetimedPacket by extending with bmo functionality...?
static DTSC::Packet newPkt;
char *pktData;
size_t pktDataLen;
pkt.getString("data", pktData, pktDataLen);
newPkt.genericFill(pkt.getTime() + packetOffset, pkt.getInt("offset"), pkt.getTrackId(),
pktData, pktDataLen, 0, pkt.getFlag("keyframe"), bootMsOffset);
bufferLivePacket(newPkt);
bufferLivePacket(pkt.getTime() + packetOffset, pkt.getInt("offset"), pkt.getTrackId(), pktData,
pktDataLen, 0, pkt.getFlag("keyframe"));
// bufferLivePacket(DTSC::RetimedPacket(pkt.getTime() + packetOffset, pkt));
}
void OutRTSP::incomingRTP(const uint64_t track, const RTP::Packet &p){
@ -46,9 +42,8 @@ namespace Mist{
}
OutRTSP::OutRTSP(Socket::Connection &myConn) : Output(myConn){
connectedAt = Util::epoch() + 2208988800ll;
pausepoint = 0;
bootMsOffset = 0;
setPacketOffset = false;
packetOffset = 0;
setBlocking(false);
maxSkipAhead = 0;
@ -58,7 +53,7 @@ namespace Mist{
mainConn = &myConn;
classPointer = this;
sdpState.incomingPacketCallback = insertPacket;
sdpState.myMeta = &myMeta;
sdpState.myMeta = &meta;
}
/// Function used to send RTP packets over UDP
@ -66,7 +61,7 @@ namespace Mist{
///\param data The RTP Packet that needs to be sent
///\param len The size of data
///\param channel Not used here, but is kept for compatibility with sendTCP
void sendUDP(void *socket, char *data, unsigned int len, unsigned int channel){
void sendUDP(void *socket, const char *data, size_t len, uint8_t){
((Socket::UDPConnection *)socket)->SendNow(data, len);
if (mainConn){mainConn->addUp(len);}
}
@ -76,7 +71,7 @@ namespace Mist{
///\param data The RTP Packet that needs to be sent
///\param len The size of data
///\param channel Used to distinguish different data streams when sending RTP over TCP
void sendTCP(void *socket, char *data, unsigned int len, unsigned int channel){
void sendTCP(void *socket, const char *data, size_t len, uint8_t channel){
// 1 byte '$', 1 byte channel, 2 bytes length
char buf[] = "$$$$";
buf[1] = channel;
@ -130,7 +125,6 @@ namespace Mist{
char *dataPointer = 0;
size_t dataLen = 0;
thisPacket.getString("data", dataPointer, dataLen);
uint32_t tid = thisPacket.getTrackId();
uint64_t timestamp = thisPacket.getTime();
// if we're past the pausing point, seek to it, and pause immediately
@ -140,31 +134,78 @@ namespace Mist{
return;
}
if (myMeta.live && lastTimeSync + 666 < timestamp){
if (M.getLive() && lastTimeSync + 200 < timestamp){
lastTimeSync = timestamp;
updateMeta();
if (liveSeek()){return;}
}
void *socket = 0;
void (*callBack)(void *, char *, unsigned int, unsigned int) = 0;
void (*callBack)(void *, const char *, size_t, uint8_t) = 0;
if (sdpState.tracks[tid].channel == -1){// UDP connection
socket = &sdpState.tracks[tid].data;
if (sdpState.tracks[thisIdx].channel == -1){// UDP connection
socket = &sdpState.tracks[thisIdx].data;
callBack = sendUDP;
if (Util::epoch() / 5 != sdpState.tracks[tid].rtcpSent){
sdpState.tracks[tid].rtcpSent = Util::epoch() / 5;
sdpState.tracks[tid].pack.sendRTCP_SR(connectedAt, &sdpState.tracks[tid].rtcp, tid, myMeta, sendUDP);
if (Util::bootSecs() != sdpState.tracks[thisIdx].rtcpSent){
sdpState.tracks[thisIdx].pack.setTimestamp(timestamp * SDP::getMultiplier(&M, thisIdx));
sdpState.tracks[thisIdx].rtcpSent = Util::bootSecs();
sdpState.tracks[thisIdx].pack.sendRTCP_SR(&sdpState.tracks[thisIdx].rtcp, sendUDP);
}
}else{
socket = &myConn;
callBack = sendTCP;
if (Util::bootSecs() != sdpState.tracks[thisIdx].rtcpSent){
sdpState.tracks[thisIdx].pack.setTimestamp(timestamp * SDP::getMultiplier(&M, thisIdx));
sdpState.tracks[thisIdx].rtcpSent = Util::bootSecs();
sdpState.tracks[thisIdx].pack.sendRTCP_SR(socket, sendTCP);
}
}
uint64_t offset = thisPacket.getInt("offset");
sdpState.tracks[tid].pack.setTimestamp((timestamp + offset) * SDP::getMultiplier(myMeta.tracks[tid]));
sdpState.tracks[tid].pack.sendData(socket, callBack, dataPointer, dataLen,
sdpState.tracks[tid].channel, myMeta.tracks[tid].codec);
sdpState.tracks[thisIdx].pack.setTimestamp((timestamp + offset) * SDP::getMultiplier(&M, thisIdx));
sdpState.tracks[thisIdx].pack.sendData(socket, callBack, dataPointer, dataLen,
sdpState.tracks[thisIdx].channel, meta.getCodec(thisIdx));
static uint64_t lastAnnounce = Util::bootSecs();
if (reqUrl.size() && lastAnnounce + 5 < Util::bootSecs()){
INFO_MSG("Sending announce");
lastAnnounce = Util::bootSecs();
std::stringstream transportString;
transportString.precision(3);
transportString << std::fixed
<< "v=0\r\n"
"o=- "
<< Util::getMS()
<< " 1 IN IP4 127.0.0.1\r\n"
"s="
<< streamName
<< "\r\n"
"c=IN IP4 0.0.0.0\r\n"
"i="
<< streamName
<< "\r\n"
"u="
<< reqUrl
<< "\r\n"
"t=0 0\r\n"
"a=tool:MistServer\r\n"
"a=type:broadcast\r\n"
"a=control:*\r\n"
<< "a=range:npt=" << ((double)startTime()) / 1000.0 << "-"
<< ((double)endTime()) / 1000.0 << "\r\n";
std::set<size_t> validTracks = M.getValidTracks();
for (std::set<size_t>::iterator it = validTracks.begin(); it != validTracks.end(); ++it){
transportString << SDP::mediaDescription(&M, *it);
}
HTTP_S.Clean();
HTTP_S.SetHeader("Content-Type", "application/sdp");
HTTP_S.SetHeader("Content-Base", reqUrl);
HTTP_S.method = "ANNOUNCE";
HTTP_S.url = reqUrl;
HTTP_S.protocol = "RTSP/1.0";
HTTP_S.SendRequest(myConn, transportString.str());
HTTP_R.Clean();
}
}
/// This request handler also checks for UDP packets
@ -177,10 +218,20 @@ namespace Mist{
RTP::MAX_SEND = config->getInteger("maxsend");
// if needed, parse TCP packets, and cancel if it is not safe (yet) to read HTTP/RTSP packets
while ((!expectTCP || handleTCP()) && HTTP_R.Read(myConn)){
// check for response codes. Assume a 3-letter URL is a response code.
if (HTTP_R.url.size() == 3){
INFO_MSG("Received response: %s %s", HTTP_R.url.c_str(), HTTP_R.protocol.c_str());
if (HTTP_R.url == "501"){
// Not implemented = probably a response to our ANNOUNCE. Turn them off.
reqUrl.clear();
}
HTTP_R.Clean();
continue;
}
// cancel broken URLs
if (HTTP_R.url.size() < 8){
WARN_MSG("Invalid data found in RTSP input around ~%llub - disconnecting!", myConn.dataDown());
myConn.close();
WARN_MSG("Invalid data found in RTSP input around ~%" PRIu64 "b - disconnecting!", myConn.dataDown());
onFail("Invalid RTSP Data", true);
break;
}
HTTP_S.Clean();
@ -189,8 +240,7 @@ namespace Mist{
// set the streamname and session
if (!source.size()){
std::string source = HTTP_R.url.substr(7);
unsigned int loc = std::min(source.find(':'), source.find('/'));
source = source.substr(0, loc);
source = source.substr(0, std::min(source.find(':'), source.find('/')));
}
size_t found = HTTP_R.url.find('/', 7);
if (found != std::string::npos && !streamName.size()){
@ -254,10 +304,13 @@ namespace Mist{
continue;
}
if (HTTP_R.method == "DESCRIBE"){
reqUrl = HTTP::URL(HTTP_R.url).link(streamName).getProxyUrl();
initialize();
selectedTracks.clear();
userSelect.clear();
std::stringstream transportString;
transportString << "v=0\r\n"
transportString.precision(3);
transportString << std::fixed
<< "v=0\r\n"
"o=- "
<< Util::getMS()
<< " 1 IN IP4 127.0.0.1\r\n"
@ -269,26 +322,21 @@ namespace Mist{
<< streamName
<< "\r\n"
"u="
<< HTTP_R.url.substr(0, HTTP_R.url.rfind('/')) << "/" << streamName
<< reqUrl
<< "\r\n"
"t=0 0\r\n"
"a=tool:MistServer\r\n"
"a=type:broadcast\r\n"
"a=control:*\r\n";
if (myMeta.live){
transportString << "a=range:npt=" << ((double)startTime()) / 1000.0 << "-\r\n";
}else{
transportString << "a=range:npt=" << ((double)startTime()) / 1000.0 << "-"
<< ((double)endTime()) / 1000.0 << "\r\n";
}
"a=control:*\r\n"
<< "a=range:npt=" << ((double)startTime()) / 1000.0 << "-"
<< ((double)endTime()) / 1000.0 << "\r\n";
for (std::map<unsigned int, DTSC::Track>::iterator objIt = myMeta.tracks.begin();
objIt != myMeta.tracks.end(); ++objIt){
transportString << SDP::mediaDescription(objIt->second);
std::set<size_t> validTracks = M.getValidTracks();
for (std::set<size_t>::iterator it = validTracks.begin(); it != validTracks.end(); ++it){
transportString << SDP::mediaDescription(&M, *it);
}
transportString << "\r\n";
HIGH_MSG("Reply: %s", transportString.str().c_str());
HTTP_S.SetHeader("Content-Base", HTTP_R.url.substr(0, HTTP_R.url.rfind('/')) + "/" + streamName);
HTTP_S.SetHeader("Content-Base", reqUrl);
HTTP_S.SetHeader("Content-Type", "application/sdp");
HTTP_S.SetBody(transportString.str());
HTTP_S.SendResponse("200", "OK", myConn);
@ -296,17 +344,18 @@ namespace Mist{
continue;
}
if (HTTP_R.method == "SETUP"){
uint32_t trackNo = sdpState.parseSetup(HTTP_R, getConnectedHost(), source);
size_t trackNo = sdpState.parseSetup(HTTP_R, getConnectedHost(), source);
HTTP_S.SetHeader("Expires", HTTP_S.GetHeader("Date"));
HTTP_S.SetHeader("Cache-Control", "no-cache");
if (trackNo){
selectedTracks.insert(trackNo);
if (trackNo != INVALID_TRACK_ID){
userSelect[trackNo].reload(streamName, trackNo);
if (isPushing()){userSelect[trackNo].setStatus(COMM_STATUS_SOURCE);}
SDP::Track &sdpTrack = sdpState.tracks[trackNo];
if (sdpTrack.channel != -1){expectTCP = true;}
HTTP_S.SetHeader("Transport", sdpTrack.transportString);
HTTP_S.SendResponse("200", "OK", myConn);
INFO_MSG("Setup completed for track %lu (%s): %s", trackNo,
myMeta.tracks[trackNo].codec.c_str(), sdpTrack.transportString.c_str());
INFO_MSG("Setup completed for track %zu (%s): %s", trackNo, M.getCodec(trackNo).c_str(),
sdpTrack.transportString.c_str());
}else{
HTTP_S.SendResponse("404", "Track not known or allowed", myConn);
FAIL_MSG("Could not handle setup for %s", HTTP_R.url.c_str());
@ -321,12 +370,12 @@ namespace Mist{
range = range.substr(range.find("npt=") + 4);
if (!range.empty()){
range = range.substr(0, range.find('-'));
uint64_t targetPos = 1000 * atof(range.c_str());
if (targetPos || myMeta.vod){seek(targetPos, true);}
uint64_t targetPos = 1000 * atoll(range.c_str());
if (targetPos || meta.getVod()){seek(targetPos);}
}
}
std::stringstream rangeStr;
if (myMeta.live){
if (meta.getLive()){
rangeStr << "npt=" << currentTime() / 1000 << "." << std::setw(3) << std::setfill('0')
<< currentTime() % 1000 << "-";
}else{
@ -336,13 +385,10 @@ namespace Mist{
}
HTTP_S.SetHeader("Range", rangeStr.str());
std::stringstream infoString;
if (selectedTracks.size()){
for (std::set<unsigned long>::iterator it = selectedTracks.begin();
it != selectedTracks.end(); ++it){
if (!infoString.str().empty()){infoString << ",";}
infoString << sdpState.tracks[*it].rtpInfo(myMeta.tracks[*it],
source + "/" + streamName, currentTime());
}
for (std::map<size_t, Comms::Users>::iterator it = userSelect.begin(); it != userSelect.end(); it++){
if (!infoString.str().empty()){infoString << ", ";}
infoString << sdpState.tracks[it->first].rtpInfo(M, it->first, source + "/" + streamName,
currentTime());
}
HTTP_S.SetHeader("RTP-Info", infoString.str());
HTTP_S.SendResponse("200", "OK", myConn);
@ -357,7 +403,7 @@ namespace Mist{
if (range.empty()){
stop();
}else{
pausepoint = 1000 * (int)atof(range.c_str());
pausepoint = 1000 * atoll(range.c_str());
if (pausepoint > currentTime()){
pausepoint = 0;
stop();
@ -408,14 +454,14 @@ namespace Mist{
// We have a TCP packet! Read it...
// Format: 1 byte '$', 1 byte channel, 2 bytes len, len bytes binary data
std::string tcpHead = myConn.Received().copy(4);
uint16_t len = ntohs(*(short *)(tcpHead.data() + 2));
uint16_t len = Bit::btohs(tcpHead.data() + 2);
if (!myConn.Received().available(len + 4)){
return false;
}// a TCP RTP packet, but not complete yet
// remove whole packet from buffer, including 4 byte header
std::string tcpPacket = myConn.Received().remove(len + 4);
uint32_t trackNo = sdpState.getTrackNoForChannel(tcpHead.data()[1]);
if (trackNo && isPushing()){
size_t trackNo = sdpState.getTrackNoForChannel(tcpHead.data()[1]);
if ((trackNo != INVALID_TRACK_ID) && isPushing()){
RTP::Packet pkt(tcpPacket.data() + 4, len);
sdpState.tracks[trackNo].sorter.rtpSeq = pkt.getSequence();
incomingRTP(trackNo, pkt);
@ -427,7 +473,7 @@ namespace Mist{
/// Reads and handles RTP packets over UDP, if needed
void OutRTSP::handleUDP(){
if (!isPushing()){return;}
for (std::map<uint32_t, SDP::Track>::iterator it = sdpState.tracks.begin();
for (std::map<size_t, SDP::Track>::iterator it = sdpState.tracks.begin();
it != sdpState.tracks.end(); ++it){
Socket::UDPConnection &s = it->second.data;
it->second.sorter.setCallback(it->first, insertRTP);
@ -436,15 +482,15 @@ namespace Mist{
// wrong sending port, ignore packet
continue;
}
lastRecv = Util::epoch(); // prevent disconnect of idle TCP connection when using UDP
lastRecv = Util::bootSecs(); // prevent disconnect of idle TCP connection when using UDP
myConn.addDown(s.data_len);
RTP::Packet pack(s.data, s.data_len);
if (!it->second.theirSSRC){it->second.theirSSRC = pack.getSSRC();}
it->second.sorter.addPacket(pack);
}
if (selectedTracks.count(it->first) && Util::epoch() / 5 != it->second.rtcpSent){
it->second.rtcpSent = Util::epoch() / 5;
it->second.pack.sendRTCP_RR(connectedAt, it->second, it->first, myMeta, sendUDP);
if (userSelect.count(it->first) && Util::bootSecs() / 5 != it->second.rtcpSent){
it->second.rtcpSent = Util::bootSecs() / 5;
it->second.pack.sendRTCP_RR(it->second, sendUDP);
}
}
}

View file

@ -21,14 +21,12 @@ namespace Mist{
void incomingRTP(const uint64_t track, const RTP::Packet &p);
private:
long long connectedAt; ///< The timestamp the connection was made, as reference point for RTCP
/// packets.
unsigned int pausepoint; ///< Position to pause at, when reached
uint64_t pausepoint; ///< Position to pause at, when reached
SDP::State sdpState;
HTTP::Parser HTTP_R, HTTP_S;
std::string source;
uint64_t lastTimeSync;
int64_t bootMsOffset;
bool setPacketOffset;
int64_t packetOffset;
bool expectTCP;
bool checkPort;

View file

@ -1,4 +1,5 @@
#include "output_sanitycheck.h"
#include <iomanip>
#include <mist/bitfields.h>
#include <mist/checksum.h>
#include <mist/defines.h>
@ -6,41 +7,61 @@
namespace Mist{
OutSanityCheck::OutSanityCheck(Socket::Connection &conn) : Output(conn){
streamName = config->getString("streamname");
if (config->getOption("fakepush", true).size()){
pushMultiplier = config->getInteger("fakepush");
if (!allowPush("testing")){onFinish();}
return;
}
parseData = true;
wantRequest = false;
initialize();
initialSeek();
sortSet.clear();
for (std::set<long unsigned int>::iterator subIt = selectedTracks.begin();
subIt != selectedTracks.end(); subIt++){
keyPart temp;
temp.trackID = *subIt;
temp.time = myMeta.tracks[*subIt].firstms; // timeplace of frame
temp.endTime = myMeta.tracks[*subIt].firstms + myMeta.tracks[*subIt].parts[0].getDuration();
temp.size = myMeta.tracks[*subIt].parts[0].getSize(); // bytesize of frame (alle parts all together)
temp.index = 0;
sortSet.insert(temp);
}
realTime = 0;
if (config->getInteger("seek")){
uint64_t seekPoint = config->getInteger("seek");
while (!sortSet.empty() && sortSet.begin()->time < seekPoint){
if (!M.getLive()){
realTime = 0;
for (std::map<size_t, Comms::Users>::const_iterator it = userSelect.begin();
it != userSelect.end(); it++){
keyPart temp;
temp.index = sortSet.begin()->index + 1;
temp.trackID = sortSet.begin()->trackID;
if (temp.index < myMeta.tracks[temp.trackID].parts.size()){// only insert when there are parts left
temp.time = sortSet.begin()->endTime; // timeplace of frame
temp.endTime =
sortSet.begin()->endTime + myMeta.tracks[temp.trackID].parts[temp.index].getDuration();
temp.size = myMeta.tracks[temp.trackID].parts[temp.index].getSize(); // bytesize of frame
sortSet.insert(temp);
}
// remove highest keyPart
sortSet.erase(sortSet.begin());
temp.trackID = it->first;
temp.time = M.getFirstms(it->first); // timeplace of frame
DTSC::Parts parts(M.parts(it->first));
temp.endTime = M.getFirstms(it->first) + parts.getDuration(parts.getFirstValid());
temp.size = parts.getSize(parts.getFirstValid()); // bytesize of frame (alle parts all together)
temp.index = 0;
sortSet.insert(temp);
}
seek(seekPoint);
if (config->getInteger("seek")){
uint64_t seekPoint = config->getInteger("seek");
while (!sortSet.empty() && sortSet.begin()->time < seekPoint){
keyPart temp = *sortSet.begin();
temp.index++;
DTSC::Parts parts(M.parts(temp.trackID));
if (temp.index < parts.getEndValid()){// only insert when there are parts left
temp.time = temp.endTime; // timeplace of frame
temp.endTime = temp.time + parts.getDuration(temp.index);
temp.size = parts.getSize(temp.index);
; // bytesize of frame
sortSet.insert(temp);
}
// remove highest keyPart
sortSet.erase(sortSet.begin());
}
seek(seekPoint);
}
}
}
void OutSanityCheck::initialSeek(){
if (M.getLive()){
liveSeek();
if (getKeyFrame() && thisPacket){
sendNext();
INFO_MSG("Initial sent!");
}
firstTime = Util::getMS() - currentTime();
}else{
Output::initialSeek();
}
}
@ -59,17 +80,55 @@ namespace Mist{
config = cfg;
}
void OutSanityCheck::requestHandler(){
if (!pushing){
Output::requestHandler();
return;
}
}
void OutSanityCheck::sendNext(){
if ((unsigned long)thisPacket.getTrackId() != sortSet.begin()->trackID ||
thisPacket.getTime() != sortSet.begin()->time){
if (M.getLive()){
static uint64_t prevTime = 0;
static size_t prevTrack = 0;
uint64_t t = thisPacket.getTime();
if (t < prevTime){
std::cout << "Time error: ";
std::cout << std::setfill('0') << std::setw(2) << (t / 3600000) << ":" << std::setw(2)
<< ((t % 3600000) / 60000) << ":" << std::setw(2) << ((t % 60000) / 1000) << "."
<< std::setw(3) << (t % 1000);
std::cout << " (" << thisIdx << ")";
std::cout << " < ";
std::cout << std::setfill('0') << std::setw(2) << (prevTime / 3600000) << ":"
<< std::setw(2) << ((prevTime % 3600000) / 60000) << ":" << std::setw(2)
<< ((prevTime % 60000) / 1000) << "." << std::setw(3) << (prevTime % 1000);
std::cout << " (" << prevTrack << ")";
std::cout << std::endl << std::endl;
}else{
prevTime = t;
prevTrack = thisIdx;
}
std::cout << "\033[A" << std::setfill('0') << std::setw(2) << (t / 3600000) << ":"
<< std::setw(2) << ((t % 3600000) / 60000) << ":" << std::setw(2)
<< ((t % 60000) / 1000) << "." << std::setw(3) << (t % 1000) << " ";
uint32_t mainTrack = M.mainTrack();
if (mainTrack == INVALID_TRACK_ID){return;}
t = M.getLastms(mainTrack);
std::cout << std::setfill('0') << std::setw(2) << (t / 3600000) << ":" << std::setw(2)
<< ((t % 3600000) / 60000) << ":" << std::setw(2) << ((t % 60000) / 1000) << "."
<< std::setw(3) << (t % 1000) << " " << std::endl;
return;
}
if (thisIdx != sortSet.begin()->trackID || thisPacket.getTime() != sortSet.begin()->time){
while (packets.size()){
std::cout << packets.front() << std::endl;
packets.pop_front();
}
std::cout << "Input is inconsistent! Expected " << sortSet.begin()->trackID << ":"
<< sortSet.begin()->time << " but got " << thisPacket.getTrackId() << ":"
<< thisPacket.getTime() << " (part " << sortSet.begin()->index << " in "
<< myMeta.tracks[sortSet.begin()->trackID].codec << " track)" << std::endl;
<< sortSet.begin()->time << " but got " << thisIdx << ":" << thisPacket.getTime()
<< " (expected part " << sortSet.begin()->index << " in "
<< M.getCodec(sortSet.begin()->trackID) << " track)" << std::endl;
myConn.close();
return;
}
@ -80,13 +139,13 @@ namespace Mist{
// keep track of where we are
if (!sortSet.empty()){
keyPart temp;
temp.index = sortSet.begin()->index + 1;
temp.trackID = sortSet.begin()->trackID;
if (temp.index < myMeta.tracks[temp.trackID].parts.size()){// only insert when there are parts left
temp.time = sortSet.begin()->endTime; // timeplace of frame
temp.endTime = sortSet.begin()->endTime + myMeta.tracks[temp.trackID].parts[temp.index].getDuration();
temp.size = myMeta.tracks[temp.trackID].parts[temp.index].getSize(); // bytesize of frame
keyPart temp = *sortSet.begin();
temp.index++;
DTSC::Parts parts(M.parts(temp.trackID));
if (temp.index < parts.getEndValid()){// only insert when there are parts left
temp.time = temp.endTime; // timeplace of frame
temp.endTime = temp.time + parts.getDuration(temp.index);
temp.size = parts.getSize(temp.index); // bytesize of frame
sortSet.insert(temp);
}
// remove highest keyPart

View file

@ -27,6 +27,7 @@ namespace Mist{
OutSanityCheck(Socket::Connection &conn);
static void init(Util::Config *cfg);
void sendNext();
void initialSeek();
static bool listenMode(){return false;}
protected:

View file

@ -5,12 +5,10 @@
#include <mist/http_parser.h>
namespace Mist{
OutProgressiveSRT::OutProgressiveSRT(Socket::Connection &conn) : HTTPOutput(conn){
realTime = 0;
}
OutProgressiveSRT::~OutProgressiveSRT(){}
OutSRT::OutSRT(Socket::Connection &conn) : HTTPOutput(conn){realTime = 0;}
OutSRT::~OutSRT(){}
void OutProgressiveSRT::init(Util::Config *cfg){
void OutSRT::init(Util::Config *cfg){
HTTPOutput::init(cfg);
capa["name"] = "SRT";
capa["friendly"] = "SubRip/WebVTT over HTTP";
@ -28,16 +26,15 @@ namespace Mist{
capa["methods"][1u]["url_rel"] = "/$.vtt";
}
void OutProgressiveSRT::sendNext(){
void OutSRT::sendNext(){
char *dataPointer = 0;
size_t len = 0;
thisPacket.getString("data", dataPointer, len);
// INFO_MSG("getting sub: %s", dataPointer);
// ignore empty subs
if (len == 0 || (len == 1 && dataPointer[0] == ' ')){return;}
std::stringstream tmp;
if (!webVTT){tmp << lastNum++ << std::endl;}
long long unsigned int time = thisPacket.getTime();
uint64_t time = thisPacket.getTime();
// filter subtitle in specific timespan
if (filter_from > 0 && time < filter_from){
@ -52,13 +49,14 @@ namespace Mist{
}
char tmpBuf[50];
int tmpLen = sprintf(tmpBuf, "%.2llu:%.2llu:%.2llu.%.3llu", (time / 3600000),
((time % 3600000) / 60000), (((time % 3600000) % 60000) / 1000), time % 1000);
size_t tmpLen =
sprintf(tmpBuf, "%.2" PRIu64 ":%.2" PRIu64 ":%.2" PRIu64 ".%.3" PRIu64, (time / 3600000),
((time % 3600000) / 60000), (((time % 3600000) % 60000) / 1000), time % 1000);
tmp.write(tmpBuf, tmpLen);
tmp << " --> ";
time += thisPacket.getInt("duration");
if (time == thisPacket.getTime()){time += len * 75 + 800;}
tmpLen = sprintf(tmpBuf, "%.2llu:%.2llu:%.2llu.%.3llu", (time / 3600000),
tmpLen = sprintf(tmpBuf, "%.2" PRIu64 ":%.2" PRIu64 ":%.2" PRIu64 ".%.3" PRIu64, (time / 3600000),
((time % 3600000) / 60000), (((time % 3600000) % 60000) / 1000), time % 1000);
tmp.write(tmpBuf, tmpLen);
tmp << std::endl;
@ -69,25 +67,24 @@ namespace Mist{
myConn.SendNow("\n\n");
}
void OutProgressiveSRT::sendHeader(){
void OutSRT::sendHeader(){
H.setCORSHeaders();
if (webVTT){
H.SetHeader("Content-Type", "text/vtt; charset=utf-8");
}else{
H.SetHeader("Content-Type", "text/plain; charset=utf-8");
}
H.SetHeader("Content-Type", (webVTT ? "text/vtt; charset=utf-8" : "text/plain; charset=utf-8"));
H.protocol = "HTTP/1.0";
H.SendResponse("200", "OK", myConn);
if (webVTT){myConn.SendNow("WEBVTT\n\n");}
sentHeader = true;
}
void OutProgressiveSRT::onHTTP(){
void OutSRT::onHTTP(){
std::string method = H.method;
webVTT = (H.url.find(".vtt") != std::string::npos);
if (H.GetVar("track") != ""){
selectedTracks.clear();
selectedTracks.insert(JSON::Value(H.GetVar("track")).asInt());
size_t tid = atoll(H.GetVar("track").c_str());
if (M.getValidTracks().count(tid)){
userSelect.clear();
userSelect[tid].reload(streamName, tid);
}
}
filter_from = 0;
@ -100,11 +97,7 @@ namespace Mist{
H.Clean();
H.setCORSHeaders();
if (method == "OPTIONS" || method == "HEAD"){
if (webVTT){
H.SetHeader("Content-Type", "text/vtt; charset=utf-8");
}else{
H.SetHeader("Content-Type", "text/plain; charset=utf-8");
}
H.SetHeader("Content-Type", (webVTT ? "text/vtt; charset=utf-8" : "text/plain; charset=utf-8"));
H.protocol = "HTTP/1.0";
H.SendResponse("200", "OK", myConn);
H.Clean();

View file

@ -1,10 +1,10 @@
#include "output_http.h"
namespace Mist{
class OutProgressiveSRT : public HTTPOutput{
class OutSRT : public HTTPOutput{
public:
OutProgressiveSRT(Socket::Connection &conn);
~OutProgressiveSRT();
OutSRT(Socket::Connection &conn);
~OutSRT();
static void init(Util::Config *cfg);
void onHTTP();
void sendNext();
@ -12,11 +12,11 @@ namespace Mist{
protected:
bool webVTT;
int lastNum;
size_t lastNum;
uint32_t filter_from;
uint32_t filter_to;
uint32_t index;
};
}// namespace Mist
typedef Mist::OutProgressiveSRT mistOut;
typedef Mist::OutSRT mistOut;

View file

@ -7,23 +7,18 @@ namespace Mist{
OutTS::OutTS(Socket::Connection &conn) : TSOutput(conn){
sendRepeatingHeaders = 500; // PAT/PMT every 500ms (DVB spec)
streamName = config->getString("streamname");
parseData = true;
wantRequest = false;
pushOut = false;
initialize();
std::string tracks = config->getString("tracks");
if (config->getString("target").size()){
HTTP::URL target(config->getString("target"));
if (target.protocol != "tsudp"){
FAIL_MSG("Target %s must begin with tsudp://, aborting", target.getUrl().c_str());
parseData = false;
myConn.close();
onFail("Invalid ts udp target: doesn't start with tsudp://", true);
return;
}
if (!target.getPort()){
FAIL_MSG("Target %s must contain a port, aborting", target.getUrl().c_str());
parseData = false;
myConn.close();
onFail("Invalid ts udp target: missing port", true);
return;
}
pushOut = true;
@ -35,26 +30,55 @@ namespace Mist{
if (!pushSock.bind(0, target.path)){
disconnect();
streamName = "";
selectedTracks.clear();
userSelect.clear();
config->is_active = false;
return;
}
}
pushSock.SetDestination(target.host, target.getPort());
}
unsigned int currTrack = 0;
// loop over tracks, add any found track IDs to selectedTracks
if (tracks != ""){
selectedTracks.clear();
for (unsigned int i = 0; i < tracks.size(); ++i){
if (tracks[i] >= '0' && tracks[i] <= '9'){
currTrack = currTrack * 10 + (tracks[i] - '0');
}else{
if (currTrack > 0){selectedTracks.insert(currTrack);}
currTrack = 0;
}
setBlocking(false);
size_t ctr = 0;
while (++ctr <= 50 && !pushing){
Util::wait(100);
pushing = conn.spool() || conn.Received().size();
}
setBlocking(true);
wantRequest = pushing;
parseData = !pushing;
if (pushing){
if (!allowPush("")){
FAIL_MSG("Pushing not allowed");
config->is_active = false;
return;
}
}
initialize();
size_t currTrack = 0;
bool hasTrack = false;
// loop over tracks, add any found track IDs to selectedTracks
if (!pushing && tracks != ""){
userSelect.clear();
if (tracks == "passthrough"){
std::set<size_t> validTracks = getSupportedTracks();
for (std::set<size_t>::iterator it = validTracks.begin(); it != validTracks.end(); ++it){
userSelect[*it].reload(streamName, *it);
}
}else{
for (unsigned int i = 0; i < tracks.size(); ++i){
if (tracks[i] >= '0' && tracks[i] <= '9'){
currTrack = currTrack * 10 + (tracks[i] - '0');
hasTrack = true;
}else{
if (hasTrack){userSelect[currTrack].reload(streamName, currTrack);}
currTrack = 0;
hasTrack = false;
}
}
if (hasTrack){userSelect[currTrack].reload(streamName, currTrack);}
}
if (currTrack > 0){selectedTracks.insert(currTrack);}
}
}
@ -66,7 +90,7 @@ namespace Mist{
capa["friendly"] = "TS over TCP";
capa["desc"] = "Real time streaming in MPEG2/TS format over raw TCP";
capa["deps"] = "";
capa["required"]["streamname"]["name"] = "Source stream";
capa["required"]["streamname"]["name"] = "Stream";
capa["required"]["streamname"]["help"] = "What streamname to serve. For multiple streams, add "
"this protocol multiple times using different ports.";
capa["required"]["streamname"]["type"] = "str";
@ -100,19 +124,13 @@ namespace Mist{
void OutTS::initialSeek(){
// Adds passthrough support to the regular initialSeek function
if (targetParams.count("passthrough")){
selectedTracks.clear();
for (std::map<unsigned int, DTSC::Track>::iterator it = myMeta.tracks.begin();
it != myMeta.tracks.end(); it++){
selectedTracks.insert(it->first);
}
}
if (targetParams.count("passthrough")){selectAllTracks();}
Output::initialSeek();
}
void OutTS::sendTS(const char *tsData, unsigned int len){
void OutTS::sendTS(const char *tsData, size_t len){
if (pushOut){
static int curFilled = 0;
static size_t curFilled = 0;
if (curFilled == udpSize){
pushSock.SendNow(packetBuffer);
myConn.addUp(packetBuffer.size());
@ -129,4 +147,54 @@ namespace Mist{
bool OutTS::listenMode(){return !(config->getString("target").size());}
void OutTS::onRequest(){
while (myConn.Received().available(188)){
std::string spy = myConn.Received().copy(5);
// check for sync byte, skip to next one if needed
if (spy[0] != 0x47){
if (spy[4] == 0x47){
INSANE_MSG("Stripping time from 192-byte packet");
myConn.Received().remove(4);
}else{
myConn.Received().remove(1);
MEDIUM_MSG("Lost sync - resyncing...");
continue;
}
}
if (parseData){
parseData = false;
if (!allowPush("")){
onFinish();
return;
}
}
// we now know we probably have a packet ready at the next 188 bytes
// remove from buffer and insert into TS input
spy = myConn.Received().remove(188);
tsIn.parse((char *)spy.data(), 0);
while (tsIn.hasPacketOnEachTrack()){
tsIn.getEarliestPacket(thisPacket);
if (!thisPacket){
FAIL_MSG("Could not getNext TS packet!");
return;
}
size_t idx = M.trackIDToIndex(thisPacket.getTrackId(), getpid());
if (M.trackIDToIndex(idx == INVALID_TRACK_ID) || !M.getCodec(idx).size()){
tsIn.initializeMetadata(meta, thisPacket.getTrackId());
}
bufferLivePacket(thisPacket);
}
}
}
std::string OutTS::getStatsName(){
if (!parseData){
return "INPUT";
}else{
return Output::getStatsName();
}
}
bool OutTS::isReadyForPlay(){return true;}
}// namespace Mist

View file

@ -1,4 +1,5 @@
#include "output_ts_base.h"
#include <mist/ts_stream.h>
namespace Mist{
class OutTS : public TSOutput{
@ -6,15 +7,19 @@ namespace Mist{
OutTS(Socket::Connection &conn);
~OutTS();
static void init(Util::Config *cfg);
void sendTS(const char *tsData, unsigned int len = 188);
void sendTS(const char *tsData, size_t len = 188);
static bool listenMode();
void initialSeek();
bool isReadyForPlay();
void onRequest();
private:
unsigned int udpSize;
size_t udpSize;
bool pushOut;
std::string packetBuffer;
Socket::UDPConnection pushSock;
TS::Stream tsIn;
std::string getStatsName();
};
}// namespace Mist

View file

@ -11,16 +11,22 @@ namespace Mist{
}
void TSOutput::fillPacket(char const *data, size_t dataLen, bool &firstPack, bool video,
bool keyframe, uint32_t pkgPid, int &contPkg){
bool keyframe, size_t pkgPid, uint16_t &contPkg){
do{
if (!packData.getBytesFree()){
if ((sendRepeatingHeaders && thisPacket.getTime() - lastHeaderTime > sendRepeatingHeaders) || !packCounter){
std::set<size_t> selectedTracks;
for (std::map<size_t, Comms::Users>::iterator it = userSelect.begin(); it != userSelect.end(); it++){
selectedTracks.insert(it->first);
}
lastHeaderTime = thisPacket.getTime();
TS::Packet tmpPack;
tmpPack.FromPointer(TS::PAT);
tmpPack.setContinuityCounter(++contPAT);
sendTS(tmpPack.checkAndGetBuffer());
sendTS(TS::createPMT(selectedTracks, myMeta, ++contPMT));
sendTS(TS::createPMT(selectedTracks, M, ++contPMT));
sendTS(TS::createSDT(streamName, ++contSDT));
packCounter += 3;
}
@ -56,39 +62,44 @@ namespace Mist{
void TSOutput::sendNext(){
// Get ready some data to speed up accesses
uint32_t trackId = thisPacket.getTrackId();
DTSC::Track &Trk = myMeta.tracks[trackId];
bool &firstPack = first[trackId];
uint32_t pkgPid = 255 + trackId;
int &contPkg = contCounters[pkgPid];
std::string type = M.getType(thisIdx);
std::string codec = M.getCodec(thisIdx);
bool video = (type == "video");
size_t pkgPid = M.getID(thisIdx);
if (pkgPid < 255){pkgPid += 255;}
bool &firstPack = first[thisIdx];
uint16_t &contPkg = contCounters[pkgPid];
uint64_t packTime = thisPacket.getTime();
bool video = (Trk.type == "video");
bool keyframe = thisPacket.getInt("keyframe");
firstPack = true;
char *dataPointer = 0;
size_t tmpDataLen = 0;
thisPacket.getString("data", dataPointer, tmpDataLen); // data
uint64_t dataLen = tmpDataLen;
size_t dataLen = 0;
thisPacket.getString("data", dataPointer, dataLen); // data
packTime *= 90;
std::string bs;
// prepare bufferstring
if (video){
if (Trk.codec == "H264" || Trk.codec == "HEVC"){
if (codec == "H264" || codec == "HEVC"){
unsigned int extraSize = 0;
// dataPointer[4] & 0x1f is used to check if this should be done later: fillPacket("\000\000\000\001\011\360", 6);
if (Trk.codec == "H264" && (dataPointer[4] & 0x1f) != 0x09){extraSize += 6;}
// dataPointer[4] & 0x1f is used to check if this should be done later:
// fillPacket("\000\000\000\001\011\360", 6);
if (codec == "H264" && (dataPointer[4] & 0x1f) != 0x09){extraSize += 6;}
if (keyframe){
if (Trk.codec == "H264"){
if (codec == "H264"){
MP4::AVCC avccbox;
avccbox.setPayload(Trk.init);
avccbox.setPayload(M.getInit(thisIdx));
bs = avccbox.asAnnexB();
extraSize += bs.size();
}
/*LTS-START*/
if (Trk.codec == "HEVC"){
if (codec == "HEVC"){
MP4::HVCC hvccbox;
hvccbox.setPayload(Trk.init);
hvccbox.setPayload(M.getInit(thisIdx));
bs = hvccbox.asAnnexB();
extraSize += bs.size();
}
@ -107,26 +118,26 @@ namespace Mist{
unsigned int alreadySent = 0;
bs = TS::Packet::getPESVideoLeadIn(
(currPack != splitCount ? watKunnenWeIn1Ding : dataLen + extraSize - currPack * watKunnenWeIn1Ding),
packTime, offset, !currPack, Trk.bps);
packTime, offset, !currPack, M.getBps(thisIdx));
fillPacket(bs.data(), bs.size(), firstPack, video, keyframe, pkgPid, contPkg);
if (!currPack){
if (Trk.codec == "H264" && (dataPointer[4] & 0x1f) != 0x09){
if (codec == "H264" && (dataPointer[4] & 0x1f) != 0x09){
// End of previous nal unit, if not already present
fillPacket("\000\000\000\001\011\360", 6, firstPack, video, keyframe, pkgPid, contPkg);
alreadySent += 6;
}
if (keyframe){
if (Trk.codec == "H264"){
if (codec == "H264"){
MP4::AVCC avccbox;
avccbox.setPayload(Trk.init);
avccbox.setPayload(M.getInit(thisIdx));
bs = avccbox.asAnnexB();
fillPacket(bs.data(), bs.size(), firstPack, video, keyframe, pkgPid, contPkg);
alreadySent += bs.size();
}
/*LTS-START*/
if (Trk.codec == "HEVC"){
if (codec == "HEVC"){
MP4::HVCC hvccbox;
hvccbox.setPayload(Trk.init);
hvccbox.setPayload(M.getInit(thisIdx));
bs = hvccbox.asAnnexB();
fillPacket(bs.data(), bs.size(), firstPack, video, keyframe, pkgPid, contPkg);
alreadySent += bs.size();
@ -136,7 +147,7 @@ namespace Mist{
}
while (i + 4 < (unsigned int)dataLen){
if (nalLead){
fillPacket("\000\000\000\001" + 4 - nalLead, nalLead, firstPack, video, keyframe, pkgPid, contPkg);
fillPacket(&"\000\000\000\001"[4 - nalLead], nalLead, firstPack, video, keyframe, pkgPid, contPkg);
i += nalLead;
alreadySent += nalLead;
nalLead = 0;
@ -183,17 +194,17 @@ namespace Mist{
}
}else{
uint64_t offset = thisPacket.getInt("offset") * 90;
bs = TS::Packet::getPESVideoLeadIn(0, packTime, offset, true, Trk.bps);
bs = TS::Packet::getPESVideoLeadIn(0, packTime, offset, true, M.getBps(thisIdx));
fillPacket(bs.data(), bs.size(), firstPack, video, keyframe, pkgPid, contPkg);
fillPacket(dataPointer, dataLen, firstPack, video, keyframe, pkgPid, contPkg);
}
}else if (Trk.type == "audio"){
long unsigned int tempLen = dataLen;
if (Trk.codec == "AAC"){
}else if (type == "audio"){
size_t tempLen = dataLen;
if (codec == "AAC"){
tempLen += 7;
// Make sure TS timestamp is sample-aligned, if possible
uint32_t freq = Trk.rate;
uint32_t freq = M.getRate(thisIdx);
if (freq){
uint64_t aacSamples = (packTime / 90) * freq / 1000;
// round to nearest packet, assuming all 1024 samples (probably wrong, but meh)
@ -204,13 +215,18 @@ namespace Mist{
packTime = aacSamples * 90000 / freq;
}
}
bs = TS::Packet::getPESAudioLeadIn(tempLen, packTime, Trk.bps);
bs = TS::Packet::getPESAudioLeadIn(tempLen, packTime, M.getBps(thisIdx));
fillPacket(bs.data(), bs.size(), firstPack, video, keyframe, pkgPid, contPkg);
if (Trk.codec == "AAC"){
bs = TS::getAudioHeader(dataLen, Trk.init);
if (codec == "AAC"){
bs = TS::getAudioHeader(dataLen, M.getInit(thisIdx));
fillPacket(bs.data(), bs.size(), firstPack, video, keyframe, pkgPid, contPkg);
}
fillPacket(dataPointer, dataLen, firstPack, video, keyframe, pkgPid, contPkg);
}else if (type == "meta"){
long unsigned int tempLen = dataLen;
bs = TS::Packet::getPESMetaLeadIn(tempLen, packTime, M.getBps(thisIdx));
fillPacket(bs.data(), bs.size(), firstPack, video, keyframe, pkgPid, contPkg);
fillPacket(dataPointer, dataLen, firstPack, video, keyframe, pkgPid, contPkg);
}
if (packData.getBytesFree() < 184){
packData.addStuffing();

View file

@ -15,9 +15,9 @@ namespace Mist{
TSOutput(Socket::Connection &conn);
virtual ~TSOutput(){};
virtual void sendNext();
virtual void sendTS(const char *tsData, unsigned int len = 188){};
virtual void sendTS(const char *tsData, size_t len = 188){};
void fillPacket(char const *data, size_t dataLen, bool &firstPack, bool video, bool keyframe,
uint32_t pkgPid, int &contPkg);
size_t pkgPid, uint16_t &contPkg);
virtual void sendHeader(){
sentHeader = true;
packCounter = 0;
@ -25,12 +25,12 @@ namespace Mist{
protected:
virtual bool inlineRestartCapable() const{return true;}
std::map<unsigned int, bool> first;
std::map<unsigned int, int> contCounters;
int contPAT;
int contPMT;
int contSDT;
unsigned int packCounter;
std::map<size_t, bool> first;
std::map<size_t, uint16_t> contCounters;
uint16_t contPAT;
uint16_t contPMT;
uint16_t contSDT;
size_t packCounter; ///\todo update constructors?
TS::Packet packData;
uint64_t sendRepeatingHeaders; ///< Amount of ms between PAT/PMT. Zero means do not repeat.
uint64_t lastHeaderTime; ///< Timestamp last PAT/PMT were sent.

View file

@ -39,16 +39,15 @@ namespace Mist{
// PCM must be converted to little-endian if > 8 bits per sample
static Util::ResizeablePointer swappy;
DTSC::Track &trk = myMeta.tracks[thisPacket.getTrackId()];
if (trk.codec == "PCM"){
if (trk.size > 8 && swappy.allocate(len)){
if (trk.size == 16){
if (M.getCodec(thisIdx) == "PCM"){
if (M.getSize(thisIdx) > 8 && swappy.allocate(len)){
if (M.getSize(thisIdx) == 16){
for (uint32_t i = 0; i < len; i += 2){
swappy[i] = dataPointer[i + 1];
swappy[i + 1] = dataPointer[i];
}
}
if (trk.size == 24){
if (M.getSize(thisIdx) == 24){
for (uint32_t i = 0; i < len; i += 3){
swappy[i] = dataPointer[i + 2];
swappy[i + 1] = dataPointer[i + 1];
@ -70,33 +69,36 @@ namespace Mist{
H.setCORSHeaders();
H.SendResponse("200", "OK", myConn);
}
DTSC::Track &Trk = myMeta.tracks[getMainSelectedTrack()];
size_t mainTrack = getMainSelectedTrack();
// Send WAV header
char riffHeader[] = "RIFF\377\377\377\377WAVE";
// For live we send max allowed size
// VoD size of the whole thing is RIFF(4)+fmt(26)+fact(12)+LIST(30)+data(8)+data itself
uint32_t total_data = 0xFFFFFFFFul - 80;
if (!myMeta.live){
if (!M.getLive()){
DTSC::Keys keys(M.keys(mainTrack));
total_data = 0;
for (std::deque<unsigned long>::iterator it = Trk.keySizes.begin(); it != Trk.keySizes.end(); ++it){
total_data += *it;
}
size_t keyCount = keys.getEndValid();
for (size_t i = 0; i < keyCount; ++i){total_data += keys.getSize(i);}
}
Bit::htobl_le(riffHeader + 4, 80 + total_data);
myConn.SendNow(riffHeader, 12);
// Send format details
uint16_t fmt = 0;
if (Trk.codec == "ALAW"){fmt = 6;}
if (Trk.codec == "ULAW"){fmt = 7;}
if (Trk.codec == "PCM"){fmt = 1;}
if (Trk.codec == "FLOAT"){fmt = 3;}
if (Trk.codec == "MP3"){fmt = 85;}
myConn.SendNow(RIFF::fmt::generate(fmt, Trk.channels, Trk.rate, Trk.bps,
Trk.channels * (Trk.size << 3), Trk.size));
std::string codec = M.getCodec(mainTrack);
if (codec == "ALAW"){fmt = 6;}
if (codec == "ULAW"){fmt = 7;}
if (codec == "PCM"){fmt = 1;}
if (codec == "FLOAT"){fmt = 3;}
if (codec == "MP3"){fmt = 85;}
myConn.SendNow(RIFF::fmt::generate(
fmt, M.getChannels(mainTrack), M.getRate(mainTrack), M.getBps(mainTrack),
M.getChannels(mainTrack) * (M.getSize(mainTrack) << 3), M.getSize(mainTrack)));
// Send sample count per channel
if (fmt != 1){// Not required for PCM
if (!myMeta.live){
myConn.SendNow(RIFF::fact::generate(((Trk.lastms - Trk.firstms) * Trk.rate) / 1000));
if (!M.getLive()){
myConn.SendNow(RIFF::fact::generate(
((M.getLastms(mainTrack) - M.getFirstms(mainTrack)) * M.getRate(mainTrack)) / 1000));
}else{
myConn.SendNow(RIFF::fact::generate(0xFFFFFFFFul));
}

View file

@ -1,6 +1,7 @@
#include "output_webrtc.h"
#include <ifaddrs.h> // ifaddr, listing ip addresses.
#include <mist/procs.h>
#include <mist/sdp.h>
#include <mist/timing.h>
#include <netdb.h> // ifaddr, listing ip addresses.
@ -15,24 +16,32 @@ namespace Mist{
static int onDTLSHandshakeWantsToWriteCallback(const uint8_t *data, int *nbytes);
static void onDTSCConverterHasPacketCallback(const DTSC::Packet &pkt);
static void onDTSCConverterHasInitDataCallback(const uint64_t track, const std::string &initData);
static void
onRTPSorterHasPacketCallback(const uint64_t track,
const RTP::Packet &p); // when we receive RTP packets we store them in a sorter. Whenever there is a valid, sorted RTP packet that can be used this function is called.
static void onRTPPacketizerHasDataCallback(void *socket, char *data, unsigned int len, unsigned int channel);
static void onRTPPacketizerHasRTCPDataCallback(void *socket, const char *data, uint32_t nbytes);
static void onRTPSorterHasPacketCallback(const uint64_t track,
const RTP::Packet &p); // when we receive RTP packets we store them in a sorter. Whenever there is a valid,
// sorted RTP packet that can be used this function is called.
static void onRTPPacketizerHasDataCallback(void *socket, const char *data, size_t len, uint8_t channel);
static void onRTPPacketizerHasRTCPDataCallback(void *socket, const char *data, size_t nbytes, uint8_t channel);
/* ------------------------------------------------ */
WebRTCTrack::WebRTCTrack()
: payloadType(0), SSRC(0), timestampMultiplier(0), ULPFECPayloadType(0), REDPayloadType(0),
RTXPayloadType(0), prevReceivedSequenceNumber(0){}
: payloadType(0), SSRC(0), ULPFECPayloadType(0), REDPayloadType(0), RTXPayloadType(0),
prevReceivedSequenceNumber(0){}
/* ------------------------------------------------ */
OutWebRTC::OutWebRTC(Socket::Connection &myConn) : HTTPOutput(myConn){
vidTrack = INVALID_TRACK_ID;
prevVidTrack = INVALID_TRACK_ID;
audTrack = INVALID_TRACK_ID;
stayLive = true;
firstKey = true;
repeatInit = true;
vidTrack = 0;
audTrack = 0;
lastTimeSync = 0;
packetOffset = 0;
maxSkipAhead = 0;
needsLookAhead = 0;
webRTCInputOutputThread = NULL;
udpPort = 0;
SSRC = generateSSRC();
@ -40,7 +49,7 @@ namespace Mist{
rtcpKeyFrameDelayInMillis = 2000;
rtcpKeyFrameTimeoutInMillis = 0;
videoBitrate = 6 * 1000 * 1000;
RTP::MAX_SEND = 1200 - 28;
RTP::MAX_SEND = 1350 - 28;
didReceiveKeyFrame = false;
if (cert.init("NL", "webrtc", "webrtc") != 0){
@ -92,6 +101,7 @@ namespace Mist{
capa["methods"][0u]["handler"] = "webrtc";
capa["methods"][0u]["type"] = "webrtc";
capa["methods"][0u]["priority"] = 2;
capa["methods"][0u]["nobframes"] = 1;
capa["optional"]["preferredvideocodec"]["name"] = "Preferred video codecs";
capa["optional"]["preferredvideocodec"]["help"] =
@ -119,6 +129,14 @@ namespace Mist{
capa["optional"]["bindhost"]["option"] = "--bindhost";
capa["optional"]["bindhost"]["short"] = "B";
capa["optional"]["mergesessions"]["name"] = "Merge sessions";
capa["optional"]["mergesessions"]["help"] =
"If enabled, merges together all views from a single user into a single combined session. "
"If disabled, each view (reconnection of the signalling websocket) is a separate session.";
capa["optional"]["mergesessions"]["option"] = "--mergesessions";
capa["optional"]["mergesessions"]["short"] = "m";
capa["optional"]["mergesessions"]["default"] = 0;
config->addOptionsFromCapabilities(capa);
}
@ -165,10 +183,10 @@ namespace Mist{
}
bool ret = false;
if (sdpParser.hasReceiveOnlyMedia()){
ret = handleSignalingCommandRemoteOfferForOutput(sdpParser);
}else{
if (sdpParser.hasSendOnlyMedia()){
ret = handleSignalingCommandRemoteOfferForInput(sdpParser);
}else{
ret = handleSignalingCommandRemoteOfferForOutput(sdpParser);
}
// create result message.
JSON::Value commandResult;
@ -200,18 +218,18 @@ namespace Mist{
return;
}
std::set<size_t> validTracks = M.getValidTracks();
if (command["type"] == "tracks"){
if (command.isMember("audio")){
if (!command["audio"].isNull()){
targetParams["audio"] = command["audio"].asString();
if (audTrack && command["audio"].asInt()){
uint64_t tId = command["audio"].asInt();
if (myMeta.tracks.count(tId) && myMeta.tracks[tId].codec != myMeta.tracks[audTrack].codec){
if (validTracks.count(tId) && M.getCodec(tId) != M.getCodec(audTrack)){
targetParams["audio"] = "none";
sendSignalingError("tracks", "Cannot select track because it is encoded as " +
myMeta.tracks[tId].codec + " but the already negotiated track is " +
myMeta.tracks[audTrack].codec +
". Please re-negotiate to play this track.");
M.getCodec(tId) + " but the already negotiated track is " +
M.getCodec(audTrack) + ". Please re-negotiate to play this track.");
}
}
}else{
@ -223,32 +241,30 @@ namespace Mist{
targetParams["video"] = command["video"].asString();
if (vidTrack && command["video"].asInt()){
uint64_t tId = command["video"].asInt();
if (myMeta.tracks.count(tId) && myMeta.tracks[tId].codec != myMeta.tracks[vidTrack].codec){
if (validTracks.count(tId) && M.getCodec(tId) != M.getCodec(vidTrack)){
targetParams["video"] = "none";
sendSignalingError("tracks", "Cannot select track because it is encoded as " +
myMeta.tracks[tId].codec + " but the already negotiated track is " +
myMeta.tracks[vidTrack].codec +
". Please re-negotiate to play this track.");
M.getCodec(tId) + " but the already negotiated track is " +
M.getCodec(vidTrack) + ". Please re-negotiate to play this track.");
}
}
}else{
targetParams.erase("video");
}
}
// Remember the previous video track, if any.
for (std::map<size_t, Comms::Users>::iterator it = userSelect.begin(); it != userSelect.end(); it++){
if (M.getType(it->first) == "video"){
prevVidTrack = it->first;
break;
}
}
selectDefaultTracks();
for (std::set<unsigned long>::iterator it = selectedTracks.begin(); it != selectedTracks.end(); it++){
DTSC::Track &dtscTrack = myMeta.tracks[*it];
WebRTCTrack *trackPointer = 0;
if (dtscTrack.type == "video" && webrtcTracks.count(vidTrack)){
trackPointer = &webrtcTracks[vidTrack];
}
if (dtscTrack.type == "audio" && webrtcTracks.count(audTrack)){
trackPointer = &webrtcTracks[audTrack];
}
if (webrtcTracks.count(*it)){trackPointer = &webrtcTracks[*it];}
if (!trackPointer){continue;}
WebRTCTrack &rtcTrack = *trackPointer;
sendSPSPPS(dtscTrack, rtcTrack);
// Add the previous video track back, if we had one.
if (prevVidTrack != INVALID_TRACK_ID && !userSelect.count(prevVidTrack)){
uint64_t seekTarget = currentTime();
userSelect[prevVidTrack].reload(streamName, prevVidTrack);
seek(seekTarget);
}
onIdle();
return;
@ -264,10 +280,13 @@ namespace Mist{
parseData = true;
selectDefaultTracks();
}
stayLive = (endTime() < seek_time + 5000);
if (stayLive){seek_time = endTime();}
seek(seek_time, true);
JSON::Value commandResult;
commandResult["type"] = "on_seek";
commandResult["result"] = true;
if (M.getLive()){commandResult["live_point"] = stayLive;}
webSock->sendFrame(commandResult.toString());
onIdle();
return;
@ -281,8 +300,8 @@ namespace Mist{
commandResult["current"] = currentTime();
commandResult["begin"] = startTime();
commandResult["end"] = endTime();
for (std::set<unsigned long>::iterator it = selectedTracks.begin(); it != selectedTracks.end(); it++){
commandResult["tracks"].append(*it);
for (std::map<size_t, Comms::Users>::iterator it = userSelect.begin(); it != userSelect.end(); it++){
commandResult["tracks"].append(it->first);
}
webSock->sendFrame(commandResult.toString());
return;
@ -306,7 +325,7 @@ namespace Mist{
rtcpKeyFrameDelayInMillis = 500;
}
rtcpKeyFrameTimeoutInMillis = Util::getMS() + rtcpKeyFrameDelayInMillis;
rtcpKeyFrameTimeoutInMillis = Util::bootMS() + rtcpKeyFrameDelayInMillis;
JSON::Value commandResult;
commandResult["type"] = "on_keyframe_interval";
commandResult["result"] = rtcpKeyFrameDelayInMillis;
@ -340,39 +359,36 @@ namespace Mist{
if (0 == udpPort){bindUDPSocketOnLocalCandidateAddress(0);}
// get codecs from selected stream which are used to create our SDP answer.
std::string videoCodec;
std::string audioCodec;
capa["codecs"][0u][0u].null();
capa["codecs"][0u][1u].null();
std::set<unsigned long>::iterator it = selectedTracks.begin();
while (it != selectedTracks.end()){
DTSC::Track &Trk = myMeta.tracks[*it];
if (Trk.type == "video"){
videoCodec = Trk.codec;
vidTrack = Trk.trackID;
for (std::map<size_t, Comms::Users>::iterator it = userSelect.begin(); it != userSelect.end(); it++){
if (M.getType(it->first) == "video"){
vidTrack = it->first;
videoCodec = M.getCodec(it->first);
capa["codecs"][0u][0u].append(videoCodec);
}else if (Trk.type == "audio"){
audioCodec = Trk.codec;
audTrack = Trk.trackID;
}
if (M.getType(it->first) == "audio"){
audTrack = it->first;
audioCodec = M.getCodec(it->first);
capa["codecs"][0u][1u].append(audioCodec);
}
++it;
}
sdpAnswer.setDirection("sendonly");
// setup video WebRTC Track.
if (!videoCodec.empty()){
if (sdpAnswer.enableVideo(videoCodec)){
WebRTCTrack videoTrack;
if (vidTrack != INVALID_TRACK_ID){
if (sdpAnswer.enableVideo(M.getCodec(vidTrack))){
WebRTCTrack &videoTrack = webrtcTracks[vidTrack];
if (!createWebRTCTrackFromAnswer(sdpAnswer.answerVideoMedia, sdpAnswer.answerVideoFormat, videoTrack)){
FAIL_MSG("Failed to create the WebRTCTrack for the selected video.");
webrtcTracks.erase(vidTrack);
return false;
}
videoTrack.rtpPacketizer = RTP::Packet(videoTrack.payloadType, rand(), 0, videoTrack.SSRC, 0);
videoTrack.timestampMultiplier = 90;
webrtcTracks[vidTrack] = videoTrack;
// Enabled NACKs
sdpAnswer.videoLossPrevention = SDP_LOSS_PREVENTION_NACK;
videoTrack.sorter.tmpVideoLossPrevention = sdpAnswer.videoLossPrevention;
@ -380,16 +396,15 @@ namespace Mist{
}
// setup audio WebRTC Track
if (!audioCodec.empty()){
if (sdpAnswer.enableAudio(audioCodec)){
WebRTCTrack audioTrack;
if (audTrack != INVALID_TRACK_ID){
if (sdpAnswer.enableAudio(M.getCodec(audTrack))){
WebRTCTrack &audioTrack = webrtcTracks[audTrack];
if (!createWebRTCTrackFromAnswer(sdpAnswer.answerAudioMedia, sdpAnswer.answerAudioFormat, audioTrack)){
FAIL_MSG("Failed to create the WebRTCTrack for the selected audio.");
webrtcTracks.erase(audTrack);
return false;
}
audioTrack.rtpPacketizer = RTP::Packet(audioTrack.payloadType, rand(), 0, audioTrack.SSRC, 0);
audioTrack.timestampMultiplier = 48;
webrtcTracks[audTrack] = audioTrack;
}
}
@ -411,8 +426,8 @@ namespace Mist{
commandResult["current"] = currentTime();
commandResult["begin"] = startTime();
commandResult["end"] = endTime();
for (std::set<unsigned long>::iterator it = selectedTracks.begin(); it != selectedTracks.end(); it++){
commandResult["tracks"].append(*it);
for (std::map<size_t, Comms::Users>::iterator it = userSelect.begin(); it != userSelect.end(); it++){
commandResult["tracks"].append(it->first);
}
webSock->sendFrame(commandResult.toString());
}
@ -462,7 +477,7 @@ namespace Mist{
result.localIcePwd = formatAnswer.icePwd;
result.localIceUFrag = formatAnswer.iceUFrag;
if (mediaAnswer.SSRC != 0){
if (mediaAnswer.SSRC){
result.SSRC = mediaAnswer.SSRC;
}else{
result.SSRC = rand();
@ -538,10 +553,25 @@ namespace Mist{
}
}
// allow peer to push video/audio
if (!allowPush("")){
FAIL_MSG("Failed to allow push");
return false;
}
INFO_MSG("Push accepted");
meta.reInit(streamName, false);
// video
if (sdpAnswer.enableVideo(prefVideoCodec)){
WebRTCTrack videoTrack;
size_t vIdx = meta.addDelayedTrack();
if (!sdpAnswer.setupVideoDTSCTrack(meta, vIdx)){
FAIL_MSG("Failed to setup video DTSC track.");
return false;
}
WebRTCTrack &videoTrack = webrtcTracks[vIdx];
videoTrack.payloadType = sdpAnswer.answerVideoFormat.getPayloadType();
videoTrack.localIcePwd = sdpAnswer.answerVideoFormat.icePwd;
videoTrack.localIceUFrag = sdpAnswer.answerVideoFormat.iceUFrag;
@ -557,55 +587,47 @@ namespace Mist{
sdpAnswer.videoLossPrevention = SDP_LOSS_PREVENTION_NACK;
videoTrack.sorter.tmpVideoLossPrevention = sdpAnswer.videoLossPrevention;
DTSC::Track dtscVideo;
if (!sdpAnswer.setupVideoDTSCTrack(dtscVideo)){
if (!sdpAnswer.setupVideoDTSCTrack(meta, vIdx)){
FAIL_MSG("Failed to setup video DTSC track.");
return false;
}
videoTrack.rtpToDTSC.setProperties(dtscVideo);
videoTrack.rtpToDTSC.setProperties(meta, vIdx);
videoTrack.rtpToDTSC.setCallbacks(onDTSCConverterHasPacketCallback, onDTSCConverterHasInitDataCallback);
videoTrack.sorter.setCallback(videoTrack.payloadType, onRTPSorterHasPacketCallback);
videoTrack.sorter.setCallback(vIdx, onRTPSorterHasPacketCallback);
webrtcTracks[videoTrack.payloadType] = videoTrack;
myMeta.tracks[dtscVideo.trackID] = dtscVideo;
userSelect[vIdx].reload(streamName, vIdx, COMM_STATUS_SOURCE);
INFO_MSG("Video push received on track %zu", vIdx);
}
// audio setup
if (sdpAnswer.enableAudio(prefAudioCodec)){
WebRTCTrack audioTrack;
size_t aIdx = meta.addDelayedTrack();
if (!sdpAnswer.setupAudioDTSCTrack(meta, aIdx)){
FAIL_MSG("Failed to setup audio DTSC track.");
}
WebRTCTrack &audioTrack = webrtcTracks[aIdx];
audioTrack.payloadType = sdpAnswer.answerAudioFormat.getPayloadType();
audioTrack.localIcePwd = sdpAnswer.answerAudioFormat.icePwd;
audioTrack.localIceUFrag = sdpAnswer.answerAudioFormat.iceUFrag;
audioTrack.SSRC = sdpAnswer.answerAudioMedia.SSRC;
DTSC::Track dtscAudio;
if (!sdpAnswer.setupAudioDTSCTrack(dtscAudio)){
FAIL_MSG("Failed to setup audio DTSC track.");
}
audioTrack.rtpToDTSC.setProperties(dtscAudio);
audioTrack.rtpToDTSC.setProperties(meta, aIdx);
audioTrack.rtpToDTSC.setCallbacks(onDTSCConverterHasPacketCallback, onDTSCConverterHasInitDataCallback);
audioTrack.sorter.setCallback(audioTrack.payloadType, onRTPSorterHasPacketCallback);
audioTrack.sorter.setCallback(aIdx, onRTPSorterHasPacketCallback);
webrtcTracks[audioTrack.payloadType] = audioTrack;
myMeta.tracks[dtscAudio.trackID] = dtscAudio;
userSelect[aIdx].reload(streamName, aIdx, COMM_STATUS_SOURCE);
INFO_MSG("Audio push received on track %zu", aIdx);
}
sdpAnswer.setDirection("recvonly");
// allow peer to push video/audio
if (!allowPush("")){
FAIL_MSG("Failed to allow push for stream %s.", streamName.c_str());
/* \todo when I try to send a error message back to the browser it fails; probably because socket gets closed (?). */
return false;
}
// start our receive thread (handles STUN, DTLS, RTP input)
webRTCInputOutputThread = new tthread::thread(webRTCInputOutputThreadFunc, NULL);
rtcpTimeoutInMillis = Util::getMS() + 2000;
rtcpKeyFrameTimeoutInMillis = Util::getMS() + 2000;
rtcpTimeoutInMillis = Util::bootMS() + 2000;
rtcpKeyFrameTimeoutInMillis = Util::bootMS() + 2000;
return true;
}
@ -649,7 +671,6 @@ namespace Mist{
bool hadPack = false;
while (udp.Receive()){
hadPack = true;
myConn.addDown(udp.data_len);
uint8_t fb = (uint8_t)udp.data[0];
@ -664,6 +685,7 @@ namespace Mist{
FAIL_MSG("Unhandled WebRTC data. Type: %02X", fb);
}
}
if (udp.getSock() == -1){onFail("UDP socket closed", true);}
return hadPack;
}
@ -760,17 +782,17 @@ namespace Mist{
void OutWebRTC::ackNACK(uint32_t pSSRC, uint16_t seq){
if (!outBuffers.count(pSSRC)){
WARN_MSG("Could not answer NACK for %lu: we don't know this track", pSSRC);
WARN_MSG("Could not answer NACK for %" PRIu32 ": we don't know this track", pSSRC);
return;
}
nackBuffer &nb = outBuffers[pSSRC];
if (!nb.isBuffered(seq)){
WARN_MSG("Could not answer NACK for %lu #%u: packet not buffered", pSSRC, seq);
HIGH_MSG("Could not answer NACK for %" PRIu32 " #%" PRIu16 ": packet not buffered", pSSRC, seq);
return;
}
udp.SendNow(nb.getData(seq), nb.getSize(seq));
myConn.addUp(nb.getSize(seq));
INFO_MSG("Answered NACK for %lu #%u", pSSRC, seq);
HIGH_MSG("Answered NACK for %" PRIu32 " #%" PRIu16, pSSRC, seq);
}
void OutWebRTC::handleReceivedRTPOrRTCPPacket(){
@ -780,23 +802,24 @@ namespace Mist{
if ((pt < 64) || (pt >= 96)){
RTP::Packet rtp_pkt((const char *)udp.data, (unsigned int)udp.data_len);
uint8_t payloadType = rtp_pkt.getPayloadType();
uint64_t rtcTrackID = payloadType;
uint16_t currSeqNum = rtp_pkt.getSequence();
size_t idx = M.trackIDToIndex(rtp_pkt.getPayloadType(), getpid());
// Do we need to map the payload type to a WebRTC Track? (e.g. RED)
if (payloadTypeToWebRTCTrack.count(payloadType) != 0){
rtcTrackID = payloadTypeToWebRTCTrack[payloadType];
if (payloadTypeToWebRTCTrack.count(rtp_pkt.getPayloadType()) != 0){
idx = M.trackIDToIndex(payloadTypeToWebRTCTrack[rtp_pkt.getPayloadType()], getpid());
}
if (webrtcTracks.count(rtcTrackID) == 0){
FAIL_MSG(
"Received an RTP packet for a track that we didn't prepare for. PayloadType is %llu", payloadType);
if (idx == INVALID_TRACK_ID || !webrtcTracks.count(idx)){
FAIL_MSG("Received an RTP packet for a track that we didn't prepare for. PayloadType is "
"%" PRIu32,
rtp_pkt.getPayloadType());
return;
}
// Find the WebRTCTrack corresponding to the packet we received
WebRTCTrack &rtcTrack = webrtcTracks[rtcTrackID];
WebRTCTrack &rtcTrack = webrtcTracks[idx];
// Do not parse packets we don't care about
if (!rtcTrack.sorter.wantSeq(currSeqNum)){return;}
@ -811,7 +834,6 @@ namespace Mist{
// Here follows a very rudimentary algo for requesting lost
// packets; I guess after some experimentation a better
// algorithm should be used; this is used to trigger NACKs.
uint16_t expectedSeqNum = rtcTrack.prevReceivedSequenceNumber + 1;
if (rtcTrack.prevReceivedSequenceNumber != 0 && (rtcTrack.prevReceivedSequenceNumber + 1) != currSeqNum){
while (rtcTrack.prevReceivedSequenceNumber < currSeqNum){
sendRTCPFeedbackNACK(rtcTrack, rtcTrack.prevReceivedSequenceNumber);
@ -821,7 +843,7 @@ namespace Mist{
rtcTrack.prevReceivedSequenceNumber = currSeqNum;
if (payloadType == rtcTrack.REDPayloadType){
if (rtp_pkt.getPayloadType() == rtcTrack.REDPayloadType){
rtcTrack.sorter.addREDPacket(udp.data, len, rtcTrack.payloadType, rtcTrack.REDPayloadType,
rtcTrack.ULPFECPayloadType);
}else{
@ -887,42 +909,47 @@ namespace Mist{
void OutWebRTC::onDTSCConverterHasPacket(const DTSC::Packet &pkt){
// extract meta data (init data, width/height, etc);
uint64_t trackID = pkt.getTrackId();
DTSC::Track &DTSCTrack = myMeta.tracks[trackID];
if (DTSCTrack.codec == "H264"){
if (DTSCTrack.init.empty()){
FAIL_MSG("No init data found for trackID %llu (note: we use payloadType as trackID)", trackID);
size_t idx = pkt.getTrackId();
std::string codec = M.getCodec(idx);
if (codec == "H264"){
if (M.getInit(idx).empty()){
FAIL_MSG("No init data found for track on index %zu, payloadType %zu", idx, M.getID(idx));
return;
}
}else if (DTSCTrack.codec == "VP8"){
if (pkt.getFlag("keyframe")){extractFrameSizeFromVP8KeyFrame(pkt);}
}
if (codec == "VP8" && pkt.getFlag("keyframe")){extractFrameSizeFromVP8KeyFrame(pkt);}
// create rtcp packet (set bitrate and request keyframe).
if (DTSCTrack.codec == "H264" || DTSCTrack.codec == "VP8"){
uint64_t now = Util::getMS();
if (codec == "H264" || codec == "VP8"){
uint64_t now = Util::bootMS();
if (now >= rtcpTimeoutInMillis){
WebRTCTrack &rtcTrack = webrtcTracks[trackID];
WebRTCTrack &rtcTrack = webrtcTracks[idx];
sendRTCPFeedbackREMB(rtcTrack);
sendRTCPFeedbackRR(rtcTrack);
rtcpTimeoutInMillis = now + 1000; /* was 5000, lowered for FEC */
}
if (now >= rtcpKeyFrameTimeoutInMillis){
WebRTCTrack &rtcTrack = webrtcTracks[trackID];
WebRTCTrack &rtcTrack = webrtcTracks[idx];
sendRTCPFeedbackPLI(rtcTrack);
rtcpKeyFrameTimeoutInMillis = now + rtcpKeyFrameDelayInMillis;
}
}
if (!M.trackValid(idx)){
INFO_MSG("Validated track %zu in meta", idx);
meta.validateTrack(idx);
}
bufferLivePacket(pkt);
}
void OutWebRTC::onDTSCConverterHasInitData(const uint64_t trackID, const std::string &initData){
if (webrtcTracks.count(trackID) == 0){
ERROR_MSG("Recieved init data for a track that we don't manager. TrackID/PayloadType: %llu", trackID);
void OutWebRTC::onDTSCConverterHasInitData(size_t idx, const std::string &initData){
if (idx == INVALID_TRACK_ID || !webrtcTracks.count(idx)){
ERROR_MSG(
"Recieved init data for a track that we don't manager. TrackID %zu /PayloadType: %zu",
idx, M.getID(idx));
return;
}
@ -936,28 +963,26 @@ namespace Mist{
h264::sequenceParameterSet sps(avccbox.getSPS(), avccbox.getSPSLen());
h264::SPSMeta hMeta = sps.getCharacteristics();
DTSC::Track &Trk = myMeta.tracks[trackID];
Trk.width = hMeta.width;
Trk.height = hMeta.height;
Trk.fpks = hMeta.fps * 1000;
meta.setWidth(idx, hMeta.width);
meta.setHeight(idx, hMeta.height);
meta.setFpks(idx, hMeta.fps * 1000);
avccbox.multiplyPPS(57); // Inject all possible PPS packets into init
myMeta.tracks[trackID].init = std::string(avccbox.payload(), avccbox.payloadSize());
meta.setInit(idx, avccbox.payload(), avccbox.payloadSize());
}
void OutWebRTC::onRTPSorterHasPacket(const uint64_t trackID, const RTP::Packet &pkt){
if (webrtcTracks.count(trackID) == 0){
ERROR_MSG("Received a sorted RTP packet for track %llu but we don't manage this track.", trackID);
void OutWebRTC::onRTPSorterHasPacket(size_t idx, const RTP::Packet &pkt){
if (idx == INVALID_TRACK_ID || !webrtcTracks.count(idx)){
ERROR_MSG("Received a sorted RTP packet for track %zu but we don't manage this track.", idx);
return;
}
webrtcTracks[trackID].rtpToDTSC.addRTP(pkt);
webrtcTracks[idx].rtpToDTSC.addRTP(pkt);
}
// This function will be called when we're sending data
// to the browser (other peer).
void OutWebRTC::onRTPPacketizerHasRTPPacket(char *data, uint32_t nbytes){
void OutWebRTC::onRTPPacketizerHasRTPPacket(const char *data, size_t nbytes){
rtpOutBuffer.allocate(nbytes + 256);
rtpOutBuffer.assign(data, nbytes);
@ -974,11 +999,10 @@ namespace Mist{
uint32_t pSSRC = tmpPkt.getSSRC();
uint16_t seq = tmpPkt.getSequence();
outBuffers[pSSRC].assign(seq, rtpOutBuffer, protectedSize);
myConn.addUp(protectedSize);
}
void OutWebRTC::onRTPPacketizerHasRTCPPacket(char *data, uint32_t nbytes){
void OutWebRTC::onRTPPacketizerHasRTCPPacket(const char *data, uint32_t nbytes){
if (nbytes > 2048){
FAIL_MSG("The received RTCP packet is too big to handle.");
@ -1010,7 +1034,7 @@ namespace Mist{
void OutWebRTC::sendHeader(){
// first make sure that we complete the DTLS handshake.
while (!dtlsHandshake.hasKeyingMaterial()){
while (keepGoing() && !dtlsHandshake.hasKeyingMaterial()){
if (!handleWebRTCInputOutput()){Util::sleep(10);}
}
@ -1018,6 +1042,23 @@ namespace Mist{
}
void OutWebRTC::sendNext(){
// Handle nice move-over to new track ID
if (prevVidTrack != INVALID_TRACK_ID && thisIdx != prevVidTrack && M.getType(thisIdx) == "video"){
if (!thisPacket.getFlag("keyframe")){
// Ignore the packet if not a keyframe
return;
}
dropTrack(prevVidTrack, "Smoothly switching to new video track", false);
prevVidTrack = INVALID_TRACK_ID;
repeatInit = true;
firstKey = true;
onIdle();
}
if (M.getLive() && stayLive && lastTimeSync + 666 < thisPacket.getTime()){
lastTimeSync = thisPacket.getTime();
if (liveSeek()){return;}
}
// once the DTLS handshake has been done, we still have to
// deal with STUN consent messages and RTCP.
@ -1028,16 +1069,15 @@ namespace Mist{
thisPacket.getString("data", dataPointer, dataLen);
// make sure the webrtcTracks were setup correctly for output.
uint32_t tid = thisPacket.getTrackId();
DTSC::Track &dtscTrack = myMeta.tracks[tid];
uint32_t tid = thisIdx;
WebRTCTrack *trackPointer = 0;
// If we see this is audio or video, use the webrtc track we negotiated
if (dtscTrack.type == "video" && webrtcTracks.count(vidTrack)){
if (M.getType(tid) == "video" && webrtcTracks.count(vidTrack)){
trackPointer = &webrtcTracks[vidTrack];
}
if (dtscTrack.type == "audio" && webrtcTracks.count(audTrack)){
if (M.getType(tid) == "audio" && webrtcTracks.count(audTrack)){
trackPointer = &webrtcTracks[audTrack];
}
@ -1052,20 +1092,40 @@ namespace Mist{
WebRTCTrack &rtcTrack = *trackPointer;
if (rtcTrack.timestampMultiplier == 0){
FAIL_MSG("The WebRTCTrack::timestampMultiplier is 0; invalid.");
return;
}
uint64_t timestamp = thisPacket.getTime();
rtcTrack.rtpPacketizer.setTimestamp(timestamp * rtcTrack.timestampMultiplier);
rtcTrack.rtpPacketizer.setTimestamp(timestamp * SDP::getMultiplier(&M, thisIdx));
bool isKeyFrame = thisPacket.getFlag("keyframe");
didReceiveKeyFrame = isKeyFrame;
if (isKeyFrame && dtscTrack.codec == "H264"){sendSPSPPS(dtscTrack, rtcTrack);}
if (M.getCodec(thisIdx) == "H264"){
if (isKeyFrame && firstKey){
char *data;
size_t dataLen;
thisPacket.getString("data", data, dataLen);
size_t offset = 0;
while (offset + 4 < dataLen){
size_t nalLen = Bit::btohl(data + offset);
uint8_t nalType = data[offset + 4] & 0x1F;
if (nalType == 7 || nalType == 8){// Init data already provided in-band, skip repeating
// it.
repeatInit = false;
break;
}
offset += 4 + nalLen;
INFO_MSG("Offset now %zu", offset);
}
firstKey = false;
}
if (repeatInit && isKeyFrame){sendSPSPPS(thisIdx, rtcTrack);}
}
rtcTrack.rtpPacketizer.sendData(&udp, onRTPPacketizerHasDataCallback, dataPointer, dataLen,
rtcTrack.payloadType, dtscTrack.codec);
rtcTrack.payloadType, M.getCodec(thisIdx));
if (!lastSR.count(thisIdx) || lastSR[thisIdx] != Util::bootSecs()){
lastSR[thisIdx] = Util::bootSecs();
rtcTrack.rtpPacketizer.sendRTCP_SR((void *)&udp, onRTPPacketizerHasRTCPDataCallback);
}
}
// When the RTP::toDTSC converter collected a complete VP8
@ -1105,15 +1165,14 @@ namespace Mist{
return;
}
uint64_t trackID = pkt.getTrackId();
if (myMeta.tracks.count(trackID) == 0){
FAIL_MSG("No track found with ID %llu.", trackID);
size_t idx = M.trackIDToIndex(pkt.getTrackId(), getpid());
if (idx == INVALID_TRACK_ID){
FAIL_MSG("No track found with ID %zu.", pkt.getTrackId());
return;
}
DTSC::Track &Trk = myMeta.tracks[trackID];
Trk.width = width;
Trk.height = height;
meta.setWidth(idx, width);
meta.setHeight(idx, height);
}
void OutWebRTC::sendRTCPFeedbackREMB(const WebRTCTrack &rtcTrack){
@ -1253,16 +1312,16 @@ namespace Mist{
((RTP::FECPacket *)&(rtcTrack.rtpPacketizer))->sendRTCP_RR(rtcTrack.sorter, SSRC, rtcTrack.SSRC, (void *)&udp, onRTPPacketizerHasRTCPDataCallback);
}
void OutWebRTC::sendSPSPPS(DTSC::Track &dtscTrack, WebRTCTrack &rtcTrack){
void OutWebRTC::sendSPSPPS(size_t dtscIdx, WebRTCTrack &rtcTrack){
if (dtscTrack.init.empty()){
if (M.getInit(dtscIdx).empty()){
WARN_MSG("No init data found in the DTSC::Track. Not sending SPS and PPS");
return;
}
std::vector<char> buf;
MP4::AVCC avcc;
avcc.setPayload(dtscTrack.init);
avcc.setPayload(M.getInit(dtscIdx));
/* SPS */
for (uint32_t i = 0; i < avcc.getSPSCount(); ++i){
@ -1279,7 +1338,7 @@ namespace Mist{
std::copy(avcc.getSPS(i), avcc.getSPS(i) + avcc.getSPSLen(i), std::back_inserter(buf));
rtcTrack.rtpPacketizer.sendData(&udp, onRTPPacketizerHasDataCallback, &buf[0], buf.size(),
rtcTrack.payloadType, dtscTrack.codec);
rtcTrack.payloadType, M.getCodec(dtscIdx));
}
/* PPS */
@ -1297,7 +1356,7 @@ namespace Mist{
std::copy(avcc.getPPS(i), avcc.getPPS(i) + avcc.getPPSLen(i), std::back_inserter(buf));
rtcTrack.rtpPacketizer.sendData(&udp, onRTPPacketizerHasDataCallback, &buf[0], buf.size(),
rtcTrack.payloadType, dtscTrack.codec);
rtcTrack.payloadType, M.getCodec(dtscIdx));
}
}
@ -1347,7 +1406,7 @@ namespace Mist{
classPointer->onDTSCConverterHasPacket(pkt);
}
static void onRTPPacketizerHasDataCallback(void *socket, char *data, unsigned int len, unsigned int channel){
static void onRTPPacketizerHasDataCallback(void *socket, const char *data, size_t len, uint8_t channel){
if (!classPointer){
FAIL_MSG("Received a RTP packet but our `classPointer` is invalid.");
return;
@ -1355,12 +1414,12 @@ namespace Mist{
classPointer->onRTPPacketizerHasRTPPacket(data, len);
}
static void onRTPPacketizerHasRTCPDataCallback(void *socket, const char *data, uint32_t len){
static void onRTPPacketizerHasRTCPDataCallback(void *socket, const char *data, size_t len, uint8_t){
if (!classPointer){
FAIL_MSG("Received a RTCP packet, but out `classPointer` is invalid.");
return;
}
classPointer->onRTPPacketizerHasRTCPPacket((char *)data, len);
classPointer->onRTPPacketizerHasRTCPPacket(data, len);
}
static uint32_t generateSSRC(){

View file

@ -99,7 +99,6 @@ namespace Mist{
public:
WebRTCTrack(); ///< Initializes to some defaults.
public:
RTP::toDTSC rtpToDTSC; ///< Converts RTP packets into DTSC packets.
RTP::FECSorter sorter; ///< Takes care of sorting the received RTP packet and keeps track of some
///< statistics. Will call a callback whenever a packet can be used. (e.g. not lost, in correct order).
@ -107,12 +106,16 @@ namespace Mist{
uint64_t payloadType; ///< The payload type that was extracted from the `m=` media line in the SDP.
std::string localIcePwd;
std::string localIceUFrag;
uint32_t SSRC; ///< The SSRC of the RTP packets.
uint32_t timestampMultiplier; ///< Used for outgoing streams to convert the DTSC timestamps into RTP timestamps.
uint8_t ULPFECPayloadType; ///< When we've enabled FEC for a video stream this holds the payload type that is used to distinguish between ordinary video RTP packets and FEC packets.
uint8_t REDPayloadType; ///< When using RED and ULPFEC this holds the payload type of the RED stream.
uint8_t RTXPayloadType; ///< The retransmission payload type when we use RTX (retransmission with separate SSRC/payload type)
uint16_t prevReceivedSequenceNumber; ///< The previously received sequence number. This is used to NACK packets when we loose one.
uint32_t SSRC; ///< The SSRC of the RTP packets.
uint8_t ULPFECPayloadType; ///< When we've enabled FEC for a video stream this holds the payload
///< type that is used to distinguish between ordinary video RTP
///< packets and FEC packets.
uint8_t REDPayloadType; ///< When using RED and ULPFEC this holds the payload type of the RED
///< stream.
uint8_t RTXPayloadType; ///< The retransmission payload type when we use RTX (retransmission
///< with separate SSRC/payload type)
uint16_t prevReceivedSequenceNumber; ///< The previously received sequence number. This is used
///< to NACK packets when we loose one.
};
/* ------------------------------------------------ */
@ -121,6 +124,7 @@ namespace Mist{
public:
OutWebRTC(Socket::Connection &myConn);
~OutWebRTC();
bool hasSessionIDs(){return !config->getBool("mergesessions");}
static void init(Util::Config *cfg);
virtual void sendHeader();
virtual void sendNext();
@ -131,16 +135,17 @@ namespace Mist{
bool doesWebsockets(){return true;}
void handleWebRTCInputOutputFromThread();
int onDTLSHandshakeWantsToWrite(const uint8_t *data, int *nbytes);
void onRTPSorterHasPacket(const uint64_t trackID, const RTP::Packet &pkt);
void onRTPSorterHasPacket(size_t tid, const RTP::Packet &pkt);
void onDTSCConverterHasPacket(const DTSC::Packet &pkt);
void onDTSCConverterHasInitData(const uint64_t trackID, const std::string &initData);
void onRTPPacketizerHasRTPPacket(char *data, uint32_t nbytes);
void onRTPPacketizerHasRTCPPacket(char *data, uint32_t nbytes);
void onRTPPacketizerHasRTPPacket(const char *data, size_t nbytes);
void onRTPPacketizerHasRTCPPacket(const char *data, uint32_t nbytes);
private:
std::string externalAddr;
void ackNACK(uint32_t SSRC, uint16_t seq);
bool handleWebRTCInputOutput(); ///< Reads data from the UDP socket. Returns true when we read some data, othewise false.
bool handleWebRTCInputOutput(); ///< Reads data from the UDP socket. Returns true when we read
///< some data, othewise false.
void handleReceivedSTUNPacket();
void handleReceivedDTLSPacket();
void handleReceivedRTPOrRTCPPacket();
@ -155,44 +160,64 @@ namespace Mist{
void sendRTCPFeedbackRR(WebRTCTrack &rtcTrack);
void sendRTCPFeedbackNACK(const WebRTCTrack &rtcTrack,
uint16_t missingSequenceNumber); ///< Notify sender that we're missing a sequence number.
void sendSPSPPS(DTSC::Track &dtscTrack, WebRTCTrack &rtcTrack); ///< When we're streaming H264 to e.g. the browser we inject the PPS and SPS nals.
void sendSPSPPS(size_t dtscIdx,
WebRTCTrack &rtcTrack); ///< When we're streaming H264 to e.g. the browser we
///< inject the PPS and SPS nals.
void extractFrameSizeFromVP8KeyFrame(const DTSC::Packet &pkt);
void updateCapabilitiesWithSDPOffer(SDP::Session &sdpSession);
bool bindUDPSocketOnLocalCandidateAddress(
uint16_t port); ///< Binds our UDP socket onto the IP address that we shared via our SDP answer.
///< We *have to* bind on a specific IP, see https://gist.github.com/roxlu/6c5ab696840256dac71b6247bab59ce9
bool bindUDPSocketOnLocalCandidateAddress(uint16_t port); ///< Binds our UDP socket onto the IP address that we shared via our SDP
///< answer. We *have to* bind on a specific IP, see
///< https://gist.github.com/roxlu/6c5ab696840256dac71b6247bab59ce9
std::string getLocalCandidateAddress();
private:
SDP::Session sdp; ///< SDP parser.
SDP::Answer sdpAnswer; ///< WIP: Replacing our `sdp` member ..
Certificate cert; ///< The TLS certificate. Used to generate a fingerprint in SDP answers.
DTLSSRTPHandshake dtlsHandshake; ///< Implements the DTLS handshake using the mbedtls library (fork).
SRTPReader srtpReader; ///< Used to unprotect incoming RTP and RTCP data. Uses the keys that were exchanged with DTLS.
SRTPWriter srtpWriter; ///< Used to protect our RTP and RTCP data when sending data to another peer. Uses the keys that were exchanged with DTLS.
SRTPReader srtpReader; ///< Used to unprotect incoming RTP and RTCP data. Uses the keys that
///< were exchanged with DTLS.
SRTPWriter srtpWriter; ///< Used to protect our RTP and RTCP data when sending data to another
///< peer. Uses the keys that were exchanged with DTLS.
Socket::UDPConnection udp; ///< Our UDP socket over which WebRTC data is received and sent.
StunReader stunReader; ///< Decodes STUN messages; during a session we keep receiving STUN messages to which we need to reply.
std::map<uint64_t, WebRTCTrack> webrtcTracks; ///< WebRTCTracks indexed by payload type for incoming data and indexed by myMeta.tracks[].trackID for outgoing data.
tthread::thread *webRTCInputOutputThread; ///< The thread in which we read WebRTC data when we're receive media from another peer.
uint16_t udpPort; ///< The port on which our webrtc socket is bound. This is where we receive RTP, STUN, DTLS, etc. */
StunReader stunReader; ///< Decodes STUN messages; during a session we keep receiving STUN
///< messages to which we need to reply.
std::map<uint64_t, WebRTCTrack> webrtcTracks; ///< WebRTCTracks indexed by payload type for incoming data and indexed by
///< myMeta.tracks[].trackID for outgoing data.
tthread::thread *webRTCInputOutputThread; ///< The thread in which we read WebRTC data when
///< we're receive media from another peer.
uint16_t udpPort; ///< The port on which our webrtc socket is bound. This is where we receive
///< RTP, STUN, DTLS, etc. */
uint32_t SSRC; ///< The SSRC for this local instance. Is used when generating RTCP reports. */
uint64_t rtcpTimeoutInMillis; ///< When current time in millis exceeds this timeout we have to send a new RTCP packet.
uint64_t rtcpTimeoutInMillis; ///< When current time in millis exceeds this timeout we have to
///< send a new RTCP packet.
uint64_t rtcpKeyFrameTimeoutInMillis;
uint64_t rtcpKeyFrameDelayInMillis;
Util::ResizeablePointer rtpOutBuffer; ///< Buffer into which we copy (unprotected) RTP data that we need to deliver to the other peer. This gets protected.
uint32_t videoBitrate; ///< The bitrate to use for incoming video streams. Can be configured via the signaling channel. Defaults to 6mbit.
Util::ResizeablePointer rtpOutBuffer; ///< Buffer into which we copy (unprotected) RTP data that we need to deliver
///< to the other peer. This gets protected.
uint32_t videoBitrate; ///< The bitrate to use for incoming video streams. Can be configured via
///< the signaling channel. Defaults to 6mbit.
uint32_t audTrack, vidTrack;
size_t audTrack, vidTrack, prevVidTrack;
bool didReceiveKeyFrame; /* TODO burst delay */
int64_t packetOffset; ///< For timestamp rewrite with BMO
uint64_t lastTimeSync;
bool firstKey;
bool repeatInit;
bool stayLive;
#if defined(WEBRTC_PCAP)
PCAPWriter pcapOut; ///< Used during development to write unprotected packets that can be inspected in e.g. wireshark.
PCAPWriter pcapIn; ///< Used during development to write unprotected packets that can be inspected in e.g. wireshark.
PCAPWriter pcapOut; ///< Used during development to write unprotected packets that can be
///< inspected in e.g. wireshark.
PCAPWriter pcapIn; ///< Used during development to write unprotected packets that can be
///< inspected in e.g. wireshark.
#endif
std::map<uint8_t, uint64_t> payloadTypeToWebRTCTrack; ///< Maps e.g. RED to the corresponding track. Used when input supports RED/ULPFEC; can also be used to map RTX in the future.
std::map<uint8_t, uint64_t> payloadTypeToWebRTCTrack; ///< Maps e.g. RED to the corresponding track. Used when input
///< supports RED/ULPFEC; can also be used to map RTX in the
///< future.
std::map<uint32_t, nackBuffer> outBuffers;
std::map<size_t, uint64_t> lastSR;
};
}// namespace Mist