Shared memory rewrite
This commit is contained in:
parent
afcddbfca6
commit
cd2fe225c5
81 changed files with 7775 additions and 5411 deletions
21
src/output/mist_out.cpp
Normal file
21
src/output/mist_out.cpp
Normal file
|
@ -0,0 +1,21 @@
|
|||
#include OUTPUTTYPE
|
||||
#include <mist/config.h>
|
||||
#include <mist/socket.h>
|
||||
|
||||
int spawnForked(Socket::Connection & S){
|
||||
mistOut tmp(S);
|
||||
return tmp.run();
|
||||
}
|
||||
|
||||
int main(int argc, char * argv[]) {
|
||||
Util::Config conf(argv[0], PACKAGE_VERSION);
|
||||
mistOut::init(&conf);
|
||||
if (conf.parseArgs(argc, argv)) {
|
||||
if (conf.getBool("json")) {
|
||||
std::cout << mistOut::capa.toString() << std::endl;
|
||||
return -1;
|
||||
}
|
||||
conf.serveForkedSocket(spawnForked);
|
||||
}
|
||||
return 0;
|
||||
}
|
493
src/output/output.cpp
Normal file
493
src/output/output.cpp
Normal file
|
@ -0,0 +1,493 @@
|
|||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <fcntl.h>
|
||||
#include <sys/wait.h>
|
||||
#include <unistd.h>
|
||||
#include <semaphore.h>
|
||||
#include <iterator> //std::distance
|
||||
|
||||
#include <mist/stream.h>
|
||||
#include <mist/defines.h>
|
||||
#include <mist/http_parser.h>
|
||||
#include <mist/timing.h>
|
||||
#include "output.h"
|
||||
|
||||
namespace Mist {
|
||||
Util::Config * Output::config = NULL;
|
||||
JSON::Value Output::capa = JSON::Value();
|
||||
|
||||
int getDTSCLen(char * mapped, long long int offset){
|
||||
return ntohl(((int*)(mapped + offset))[1]);
|
||||
}
|
||||
|
||||
long long int getDTSCTime(char * mapped, long long int offset){
|
||||
char * timePoint = mapped + offset + 12;
|
||||
return ((long long int)timePoint[0] << 56) | ((long long int)timePoint[1] << 48) | ((long long int)timePoint[2] << 40) | ((long long int)timePoint[3] << 32) | ((long long int)timePoint[4] << 24) | ((long long int)timePoint[5] << 16) | ((long long int)timePoint[6] << 8) | timePoint[7];
|
||||
}
|
||||
|
||||
Output::Output(Socket::Connection & conn) : myConn(conn) {
|
||||
firstTime = 0;
|
||||
parseData = false;
|
||||
wantRequest = true;
|
||||
isInitialized = false;
|
||||
isBlocking = false;
|
||||
lastStats = 0;
|
||||
maxSkipAhead = 7500;
|
||||
minSkipAhead = 5000;
|
||||
realTime = 1000;
|
||||
if (myConn){
|
||||
setBlocking(true);
|
||||
}else{
|
||||
DEBUG_MSG(DLVL_WARN, "Warning: MistOut created with closed socket!");
|
||||
}
|
||||
sentHeader = false;
|
||||
}
|
||||
|
||||
void Output::setBlocking(bool blocking){
|
||||
isBlocking = blocking;
|
||||
myConn.setBlocking(isBlocking);
|
||||
}
|
||||
|
||||
Output::~Output(){
|
||||
statsPage.finish();
|
||||
playerConn.finish();
|
||||
}
|
||||
|
||||
void Output::updateMeta(){
|
||||
unsigned int i = 0;
|
||||
//read metadata from page to myMeta variable
|
||||
JSON::Value jsonMeta;
|
||||
JSON::fromDTMI((const unsigned char*)streamIndex.mapped + 8, streamIndex.len - 8, i, jsonMeta);
|
||||
myMeta = DTSC::Meta(jsonMeta);
|
||||
}
|
||||
|
||||
/// Called when stream initialization has failed.
|
||||
/// The standard implementation will set isInitialized to false and close the client connection,
|
||||
/// thus causing the process to exit cleanly.
|
||||
void Output::onFail(){
|
||||
isInitialized = false;
|
||||
myConn.close();
|
||||
}
|
||||
|
||||
void Output::initialize(){
|
||||
if (isInitialized){
|
||||
return;
|
||||
}
|
||||
if (streamIndex.mapped){
|
||||
return;
|
||||
}
|
||||
isInitialized = true;
|
||||
streamIndex.init(streamName,0,false,false);
|
||||
if (!streamIndex.mapped){
|
||||
sem_t * waiting = sem_open(std::string("/wait_" + streamName).c_str(), O_CREAT | O_RDWR, ACCESSPERMS, 0);
|
||||
Util::Stream::getStream(streamName);
|
||||
if (waiting == SEM_FAILED){
|
||||
DEBUG_MSG(DLVL_FAIL, "Failed to open semaphore - cancelling");
|
||||
onFail();
|
||||
return;
|
||||
}
|
||||
#ifdef __APPLE__
|
||||
unsigned int timeout = 0;
|
||||
while (++timeout < 300 && sem_trywait(waiting) == -1 && (errno == EINTR || errno == EAGAIN) ){
|
||||
Util::sleep(100);
|
||||
}
|
||||
#else
|
||||
struct timespec ts;
|
||||
ts.tv_sec = Util::epoch() + 30;
|
||||
ts.tv_nsec = 0;
|
||||
while (sem_timedwait(waiting, &ts) == -1 && errno == EINTR) continue;
|
||||
#endif
|
||||
sem_post(waiting);
|
||||
sem_close(waiting);
|
||||
streamIndex.init(streamName,0);
|
||||
}
|
||||
if (!streamIndex.mapped){
|
||||
DEBUG_MSG(DLVL_FAIL, "Could not connect to server for %s\n", streamName.c_str());
|
||||
onFail();
|
||||
return;
|
||||
}
|
||||
statsPage = IPC::sharedClient("statistics", 88, true);
|
||||
playerConn = IPC::sharedClient(streamName + "_users", 30, true);
|
||||
|
||||
updateMeta();
|
||||
|
||||
//check which tracks don't actually exist
|
||||
std::set<long unsigned int> toRemove;
|
||||
for (std::set<long unsigned int>::iterator it = selectedTracks.begin(); it != selectedTracks.end(); it++){
|
||||
if (!myMeta.tracks.count(*it)){
|
||||
toRemove.insert(*it);
|
||||
}
|
||||
}
|
||||
//remove those from selectedtracks
|
||||
for (std::set<long unsigned int>::iterator it = toRemove.begin(); it != toRemove.end(); it++){
|
||||
selectedTracks.erase(*it);
|
||||
}
|
||||
|
||||
//loop through all codec combinations, count max simultaneous active
|
||||
unsigned int bestSoFar = 0;
|
||||
unsigned int bestSoFarCount = 0;
|
||||
unsigned int index = 0;
|
||||
for (JSON::ArrIter it = capa["codecs"].ArrBegin(); it != capa["codecs"].ArrEnd(); it++){
|
||||
unsigned int genCounter = 0;
|
||||
unsigned int selCounter = 0;
|
||||
if ((*it).size() > 0){
|
||||
for (JSON::ArrIter itb = (*it).ArrBegin(); itb != (*it).ArrEnd(); itb++){
|
||||
if ((*itb).size() > 0){
|
||||
bool found = false;
|
||||
for (JSON::ArrIter itc = (*itb).ArrBegin(); itc != (*itb).ArrEnd() && !found; itc++){
|
||||
for (std::set<long unsigned int>::iterator itd = selectedTracks.begin(); itd != selectedTracks.end(); itd++){
|
||||
if (myMeta.tracks[*itd].codec == (*itc).asStringRef()){
|
||||
selCounter++;
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!found){
|
||||
for (std::map<int,DTSC::Track>::iterator trit = myMeta.tracks.begin(); trit != myMeta.tracks.end(); trit++){
|
||||
if (trit->second.codec == (*itc).asStringRef()){
|
||||
genCounter++;
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (selCounter == selectedTracks.size()){
|
||||
if (selCounter + genCounter > bestSoFarCount){
|
||||
bestSoFarCount = selCounter + genCounter;
|
||||
bestSoFar = index;
|
||||
DEBUG_MSG(DLVL_HIGH, "Match (%u/%u): %s", selCounter, selCounter+genCounter, (*it).toString().c_str());
|
||||
}
|
||||
}else{
|
||||
DEBUG_MSG(DLVL_VERYHIGH, "Not a match for currently selected tracks: %s", (*it).toString().c_str());
|
||||
}
|
||||
}
|
||||
index++;
|
||||
}
|
||||
|
||||
DEBUG_MSG(DLVL_MEDIUM, "Trying to fill: %s", capa["codecs"][bestSoFar].toString().c_str());
|
||||
//try to fill as many codecs simultaneously as possible
|
||||
if (capa["codecs"][bestSoFar].size() > 0){
|
||||
for (JSON::ArrIter itb = capa["codecs"][bestSoFar].ArrBegin(); itb != capa["codecs"][bestSoFar].ArrEnd(); itb++){
|
||||
if ((*itb).size() > 0){
|
||||
bool found = false;
|
||||
for (JSON::ArrIter itc = (*itb).ArrBegin(); itc != (*itb).ArrEnd() && !found; itc++){
|
||||
for (std::set<long unsigned int>::iterator itd = selectedTracks.begin(); itd != selectedTracks.end(); itd++){
|
||||
if (myMeta.tracks[*itd].codec == (*itc).asStringRef()){
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!found){
|
||||
for (std::map<int,DTSC::Track>::iterator trit = myMeta.tracks.begin(); trit != myMeta.tracks.end(); trit++){
|
||||
if (trit->second.codec == (*itc).asStringRef()){
|
||||
selectedTracks.insert(trit->first);
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#if DEBUG >= DLVL_MEDIUM
|
||||
//print the selected tracks
|
||||
std::stringstream selected;
|
||||
for (std::set<long unsigned int>::iterator it = selectedTracks.begin(); it != selectedTracks.end(); it++){
|
||||
if (it != selectedTracks.begin()){
|
||||
selected << ", ";
|
||||
}
|
||||
selected << (*it);
|
||||
}
|
||||
DEBUG_MSG(DLVL_MEDIUM, "Selected tracks: %s", selected.str().c_str());
|
||||
#endif
|
||||
|
||||
unsigned int firstms = 0x0;
|
||||
for (std::set<long unsigned int>::iterator it = selectedTracks.begin(); it != selectedTracks.end(); it++){
|
||||
lastKeyTime[*it] = 0xFFFFFFFF;
|
||||
if (myMeta.tracks[*it].firstms > firstms){
|
||||
firstms = myMeta.tracks[*it].firstms;
|
||||
}
|
||||
}
|
||||
if (myMeta.live){
|
||||
if (firstms < 5000){
|
||||
firstms = 0;
|
||||
}
|
||||
seek(firstms);
|
||||
}else{
|
||||
seek(0);
|
||||
}
|
||||
}
|
||||
|
||||
/// Clears the buffer, sets parseData to false, and generally makes not very much happen at all.
|
||||
void Output::stop(){
|
||||
buffer.clear();
|
||||
parseData = false;
|
||||
}
|
||||
|
||||
unsigned int Output::getKeyForTime(long unsigned int trackId, long long timeStamp){
|
||||
unsigned int keyNo = 0;
|
||||
for (std::deque<DTSC::Key>::iterator it = myMeta.tracks[trackId].keys.begin(); it != myMeta.tracks[trackId].keys.end(); it++){
|
||||
if (it->getTime() <= timeStamp){
|
||||
keyNo = it->getNumber();
|
||||
}else{
|
||||
break;
|
||||
}
|
||||
}
|
||||
return keyNo;
|
||||
}
|
||||
|
||||
void Output::loadPageForKey(long unsigned int trackId, long long int keyNum){
|
||||
if (keyNum >= myMeta.tracks[trackId].keys.rbegin()->getNumber()){
|
||||
//curPages.erase(trackId);
|
||||
return;
|
||||
}
|
||||
DEBUG_MSG(DLVL_MEDIUM, "Loading track %lu, containing key %lld", trackId, keyNum);
|
||||
int pageNum = -1;
|
||||
int keyAmount = -1;
|
||||
unsigned int timeout = 0;
|
||||
if (!indexPages.count(trackId)){
|
||||
char id[100];
|
||||
sprintf(id, "%s%lu", streamName.c_str(), trackId);
|
||||
indexPages[trackId].init(id, 8192);
|
||||
}
|
||||
while (pageNum == -1 || keyAmount == -1){
|
||||
for (int i = 0; i < indexPages[trackId].len / 8; i++){
|
||||
long tmpKey = ntohl(((((long long int*)indexPages[trackId].mapped)[i]) >> 32) & 0xFFFFFFFF);
|
||||
long amountKey = ntohl((((long long int*)indexPages[trackId].mapped)[i]) & 0xFFFFFFFF);
|
||||
if (tmpKey <= keyNum && (tmpKey + amountKey) > keyNum){
|
||||
pageNum = tmpKey;
|
||||
keyAmount = amountKey;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (pageNum == -1 || keyAmount == -1){
|
||||
if (!timeout){
|
||||
DEBUG_MSG(DLVL_DEVEL, "Requesting/waiting for page that has key %lu:%lld...", trackId, keyNum);
|
||||
}
|
||||
if (timeout++ > 100){
|
||||
DEBUG_MSG(DLVL_FAIL, "Timeout while waiting for requested page. Aborting.");
|
||||
curPages.erase(trackId);
|
||||
return;
|
||||
}
|
||||
nxtKeyNum[trackId] = keyNum-1;
|
||||
stats();
|
||||
Util::sleep(100);
|
||||
}
|
||||
}
|
||||
|
||||
nxtKeyNum[trackId] = pageNum;
|
||||
|
||||
if (currKeyOpen.count(trackId) && currKeyOpen[trackId] == pageNum){
|
||||
return;
|
||||
}
|
||||
char id[100];
|
||||
sprintf(id, "%s%lu_%d", streamName.c_str(), trackId, pageNum);
|
||||
curPages[trackId].init(std::string(id),0);
|
||||
if (!(curPages[trackId].mapped)){
|
||||
DEBUG_MSG(DLVL_FAIL, "(%d) Initializing page %s failed", getpid(), curPages[trackId].name.c_str());
|
||||
return;
|
||||
}
|
||||
currKeyOpen[trackId] = pageNum;
|
||||
}
|
||||
|
||||
/// Prepares all tracks from selectedTracks for seeking to the specified ms position.
|
||||
/// \todo Make this actually seek, instead of always loading position zero.
|
||||
void Output::seek(long long pos){
|
||||
firstTime = Util::getMS() - pos;
|
||||
if (!isInitialized){
|
||||
initialize();
|
||||
}
|
||||
buffer.clear();
|
||||
currentPacket.null();
|
||||
updateMeta();
|
||||
for (std::set<long unsigned int>::iterator it = selectedTracks.begin(); it != selectedTracks.end(); it++){
|
||||
seek(*it, pos);
|
||||
}
|
||||
}
|
||||
|
||||
bool Output::seek(int tid, long long pos, bool getNextKey){
|
||||
loadPageForKey(tid, getKeyForTime(tid, pos) + (getNextKey?1:0));
|
||||
if (!curPages.count(tid) || !curPages[tid].mapped){
|
||||
DEBUG_MSG(DLVL_DEVEL, "Aborting seek to %llims in track %d, not available.", pos, tid);
|
||||
return false;
|
||||
}
|
||||
sortedPageInfo tmp;
|
||||
tmp.tid = tid;
|
||||
tmp.offset = 0;
|
||||
DTSC::Packet tmpPack;
|
||||
tmpPack.reInit(curPages[tid].mapped + tmp.offset, 0, true);
|
||||
tmp.time = tmpPack.getTime();
|
||||
while ((long long)tmp.time < pos && tmpPack){
|
||||
tmp.offset += tmpPack.getDataLen();
|
||||
tmpPack.reInit(curPages[tid].mapped + tmp.offset, 0, true);
|
||||
tmp.time = tmpPack.getTime();
|
||||
}
|
||||
if (tmpPack){
|
||||
buffer.insert(tmp);
|
||||
return true;
|
||||
}else{
|
||||
//don't print anything for empty packets - not sign of corruption, just unfinished stream.
|
||||
if (curPages[tid].mapped[tmp.offset] != 0){
|
||||
DEBUG_MSG(DLVL_FAIL, "Noes! Couldn't find packet on track %d because of some kind of corruption error or somesuch.", tid);
|
||||
}else{
|
||||
DEBUG_MSG(DLVL_FAIL, "Track %d no data (key %u) - waiting...", tid, getKeyForTime(tid, pos) + (getNextKey?1:0));
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
int Output::run() {
|
||||
bool firstData = true;//only the first time, we call OnRequest if there's data buffered already.
|
||||
DEBUG_MSG(DLVL_MEDIUM, "MistOut client handler started");
|
||||
while (myConn.connected() && (wantRequest || parseData)){
|
||||
stats();
|
||||
if (wantRequest){
|
||||
if ((firstData && myConn.Received().size()) || myConn.spool()){
|
||||
firstData = false;
|
||||
DEBUG_MSG(DLVL_VERYHIGH, "(%d) OnRequest", getpid());
|
||||
onRequest();
|
||||
}else{
|
||||
if (!isBlocking && !parseData){
|
||||
Util::sleep(500);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (parseData){
|
||||
if (!isInitialized){
|
||||
initialize();
|
||||
}
|
||||
if ( !sentHeader){
|
||||
DEBUG_MSG(DLVL_VERYHIGH, "(%d) SendHeader", getpid());
|
||||
sendHeader();
|
||||
}
|
||||
prepareNext();
|
||||
if (currentPacket){
|
||||
sendNext();
|
||||
}else{
|
||||
if (!onFinish()){
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
DEBUG_MSG(DLVL_MEDIUM, "MistOut client handler shutting down: %s, %s, %s", myConn.connected() ? "conn_active" : "conn_closed", wantRequest ? "want_request" : "no_want_request", parseData ? "parsing_data" : "not_parsing_data");
|
||||
myConn.close();
|
||||
return 0;
|
||||
}
|
||||
|
||||
void Output::prepareNext(){
|
||||
static unsigned int emptyCount = 0;
|
||||
if (!buffer.size()){
|
||||
currentPacket.null();
|
||||
DEBUG_MSG(DLVL_DEVEL, "Buffer completely played out");
|
||||
return;
|
||||
}
|
||||
sortedPageInfo nxt = *(buffer.begin());
|
||||
buffer.erase(buffer.begin());
|
||||
|
||||
DEBUG_MSG(DLVL_VERYHIGH, "Loading track %u (next=%lu), part @ %u/%lld", nxt.tid, nxtKeyNum[nxt.tid], nxt.offset, curPages[nxt.tid].len);
|
||||
|
||||
if (nxt.offset >= curPages[nxt.tid].len){
|
||||
loadPageForKey(nxt.tid, ++nxtKeyNum[nxt.tid]);
|
||||
nxt.offset = 0;
|
||||
}
|
||||
|
||||
if (!curPages.count(nxt.tid) || !curPages[nxt.tid].mapped){
|
||||
//mapping failure? Drop this track and go to next.
|
||||
//not an error - usually means end of stream.
|
||||
DEBUG_MSG(DLVL_DEVEL, "Track %u no page - dropping track.", nxt.tid);
|
||||
prepareNext();
|
||||
return;
|
||||
}
|
||||
|
||||
if (!memcmp(curPages[nxt.tid].mapped + nxt.offset, "\000\000\000\000", 4)){
|
||||
if (!currentPacket.getTime()){
|
||||
DEBUG_MSG(DLVL_DEVEL, "Timeless empty packet on track %u - dropping track.", nxt.tid);
|
||||
prepareNext();
|
||||
return;
|
||||
}
|
||||
Util::sleep(500);
|
||||
updateMeta();
|
||||
if (myMeta && ++emptyCount < 20){
|
||||
if (!seek(nxt.tid, currentPacket.getTime(), true)){
|
||||
buffer.insert(nxt);
|
||||
}
|
||||
}else{
|
||||
DEBUG_MSG(DLVL_DEVEL, "Empty packet on track %u - could not reload, dropping track.", nxt.tid);
|
||||
}
|
||||
prepareNext();
|
||||
return;
|
||||
}
|
||||
currentPacket.reInit(curPages[nxt.tid].mapped + nxt.offset, 0, true);
|
||||
if (currentPacket){
|
||||
nxtKeyNum[nxt.tid] = getKeyForTime(nxt.tid, currentPacket.getTime());
|
||||
emptyCount = 0;
|
||||
}
|
||||
nxt.offset += currentPacket.getDataLen();
|
||||
if (realTime && !myMeta.live){
|
||||
while (nxt.time > (Util::getMS() - firstTime + maxSkipAhead)*1000/realTime) {
|
||||
Util::sleep(nxt.time - (Util::getMS() - firstTime + minSkipAhead)*1000/realTime);
|
||||
}
|
||||
}
|
||||
if (curPages[nxt.tid]){
|
||||
if (nxt.offset < curPages[nxt.tid].len){
|
||||
nxt.time = getDTSCTime(curPages[nxt.tid].mapped, nxt.offset);
|
||||
}
|
||||
buffer.insert(nxt);
|
||||
}
|
||||
playerConn.keepAlive();
|
||||
}
|
||||
|
||||
void Output::stats(){
|
||||
if (!statsPage.getData()){
|
||||
return;
|
||||
}
|
||||
unsigned long long int now = Util::epoch();
|
||||
if (now != lastStats){
|
||||
lastStats = now;
|
||||
IPC::statExchange tmpEx(statsPage.getData());
|
||||
tmpEx.now(now);
|
||||
tmpEx.host(myConn.getHost());
|
||||
tmpEx.streamName(streamName);
|
||||
tmpEx.connector(capa["name"].asString());
|
||||
tmpEx.up(myConn.dataUp());
|
||||
tmpEx.down(myConn.dataDown());
|
||||
tmpEx.time(now - myConn.connTime());
|
||||
statsPage.keepAlive();
|
||||
}
|
||||
int tNum = 0;
|
||||
for (std::set<unsigned long>::iterator it = selectedTracks.begin(); it != selectedTracks.end() && tNum < 5; it++){
|
||||
char thisData[6];
|
||||
thisData[0] = ((*it >> 24) & 0xFF);
|
||||
thisData[1] = ((*it >> 16) & 0xFF);
|
||||
thisData[2] = ((*it >> 8) & 0xFF);
|
||||
thisData[3] = ((*it) & 0xFF);
|
||||
thisData[4] = ((nxtKeyNum[*it] >> 8) & 0xFF);
|
||||
thisData[5] = ((nxtKeyNum[*it]) & 0xFF);
|
||||
memcpy(playerConn.getData() + (6 * tNum), thisData, 6);
|
||||
tNum ++;
|
||||
playerConn.keepAlive();
|
||||
}
|
||||
if (tNum >= 5){
|
||||
DEBUG_MSG(DLVL_WARN, "Too many tracks selected, using only first 5");
|
||||
}
|
||||
}
|
||||
|
||||
void Output::onRequest(){
|
||||
//simply clear the buffer, we don't support any kind of input by default
|
||||
myConn.Received().clear();
|
||||
wantRequest = false;
|
||||
}
|
||||
|
||||
void Output::sendHeader(){
|
||||
//just set the sentHeader bool to true, by default
|
||||
sentHeader = true;
|
||||
}
|
||||
|
||||
}
|
||||
|
98
src/output/output.h
Normal file
98
src/output/output.h
Normal file
|
@ -0,0 +1,98 @@
|
|||
#include <set>
|
||||
#include <cstdlib>
|
||||
#include <map>
|
||||
#include <mist/config.h>
|
||||
#include <mist/json.h>
|
||||
#include <mist/flv_tag.h>
|
||||
#include <mist/timing.h>
|
||||
#include <mist/dtsc.h>
|
||||
#include <mist/socket.h>
|
||||
#include <mist/shared_memory.h>
|
||||
|
||||
namespace Mist {
|
||||
|
||||
/// This struct keeps packet information sorted in playback order, so the
|
||||
/// Mist::Output class knows when to buffer which packet.
|
||||
struct sortedPageInfo{
|
||||
bool operator < (const sortedPageInfo & rhs) const {
|
||||
if (time < rhs.time){
|
||||
return true;
|
||||
}
|
||||
return (time == rhs.time && tid < rhs.tid);
|
||||
}
|
||||
int tid;
|
||||
long long unsigned int time;
|
||||
unsigned int offset;
|
||||
};
|
||||
|
||||
/// The output class is intended to be inherited by MistOut process classes.
|
||||
/// It contains all generic code and logic, while the child classes implement
|
||||
/// anything specific to particular protocols or containers.
|
||||
/// It contains several virtual functions, that may be overridden to "hook" into
|
||||
/// the streaming process at those particular points, simplifying child class
|
||||
/// logic and implementation details.
|
||||
class Output {
|
||||
public:
|
||||
//constructor and destructor
|
||||
Output(Socket::Connection & conn);
|
||||
virtual ~Output();
|
||||
//static members for initialization and capabilities
|
||||
static void init(Util::Config * cfg) {}
|
||||
static JSON::Value capa;
|
||||
//non-virtual generic functions
|
||||
int run();
|
||||
void stats();
|
||||
void seek(long long pos);
|
||||
bool seek(int tid, long long pos, bool getNextKey = false);
|
||||
void stop();
|
||||
void setBlocking(bool blocking);
|
||||
void updateMeta();
|
||||
//virtuals. The optional virtuals have default implementations that do as little as possible.
|
||||
virtual void sendNext() {}//REQUIRED! Others are optional.
|
||||
virtual void prepareNext();
|
||||
virtual void onRequest();
|
||||
virtual bool onFinish(){return false;}
|
||||
virtual void initialize();
|
||||
virtual void sendHeader();
|
||||
virtual void onFail();
|
||||
private://these *should* not be messed with in child classes.
|
||||
std::map<unsigned long, unsigned int> currKeyOpen;
|
||||
void loadPageForKey(long unsigned int trackId, long long int keyNum);
|
||||
bool isBlocking;///< If true, indicates that myConn is blocking.
|
||||
unsigned int lastStats;///<Time of last sending of stats.
|
||||
IPC::sharedClient statsPage;///< Shared memory used for statistics reporting.
|
||||
long long unsigned int firstTime;///< Time of first packet after last seek. Used for real-time sending.
|
||||
std::map<unsigned long, unsigned long> nxtKeyNum;///< Contains the number of the next key, for page seeking purposes.
|
||||
std::set<sortedPageInfo> buffer;///< A sorted list of next-to-be-loaded packets.
|
||||
std::map<unsigned long, unsigned long> lastKeyTime;///< Stores the time of the last keyframe, for preventing duplicates
|
||||
protected://these are to be messed with by child classes
|
||||
unsigned int getKeyForTime(long unsigned int trackId, long long timeStamp);
|
||||
IPC::sharedPage streamIndex;///< Shared memory used for metadata
|
||||
std::map<int,IPC::sharedPage> indexPages;///< Maintains index pages of each track, holding information about available pages with DTSC packets.
|
||||
std::map<int,IPC::sharedPage> curPages;///< Holds the currently used pages with DTSC packets for each track.
|
||||
/// \todo Privitize keyTimes
|
||||
IPC::sharedClient playerConn;///< Shared memory used for connection to MistIn process.
|
||||
std::map<int,std::set<int> > keyTimes;///< Per-track list of keyframe times, for keyframe detection.
|
||||
//static member for initialization
|
||||
static Util::Config * config;///< Static, global configuration for the MistOut process
|
||||
|
||||
//stream delaying variables
|
||||
unsigned int maxSkipAhead;///< Maximum ms that we will go ahead of the intended timestamps.
|
||||
unsigned int minSkipAhead;///< Minimum ms that we will go ahead of the intended timestamps.
|
||||
unsigned int realTime;///< Playback speed times 1000 (1000 == 1.0X). Zero is infinite.
|
||||
|
||||
//Read/write status variables
|
||||
Socket::Connection & myConn;///< Connection to the client.
|
||||
std::string streamName;///< Name of the stream that will be opened by initialize()
|
||||
std::set<unsigned long> selectedTracks; ///< Tracks that are selected for playback
|
||||
bool wantRequest;///< If true, waits for a request.
|
||||
bool parseData;///< If true, triggers initalization if not already done, sending of header, sending of packets.
|
||||
bool isInitialized;///< If false, triggers initialization if parseData is true.
|
||||
bool sentHeader;///< If false, triggers sendHeader if parseData is true.
|
||||
|
||||
//Read-only stream data variables
|
||||
DTSC::Packet currentPacket;///< The packet that is ready for sending now.
|
||||
DTSC::Meta myMeta;///< Up to date stream metadata
|
||||
};
|
||||
|
||||
}
|
266
src/output/output_hds.cpp
Normal file
266
src/output/output_hds.cpp
Normal file
|
@ -0,0 +1,266 @@
|
|||
#include "output_hds.h"
|
||||
#include <mist/defines.h>
|
||||
#include <mist/http_parser.h>
|
||||
#include <mist/stream.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include <mist/amf.h>
|
||||
#include <mist/mp4_adobe.h>
|
||||
|
||||
namespace Mist {
|
||||
|
||||
void OutHDS::getTracks(){
|
||||
/// \todo Why do we have only one audio track option?
|
||||
videoTracks.clear();
|
||||
audioTrack = 0;
|
||||
for (std::map<int,DTSC::Track>::iterator it = myMeta.tracks.begin(); it != myMeta.tracks.end(); it++){
|
||||
if (it->second.codec == "H264" || it->second.codec == "H263" || it->second.codec == "VP6"){
|
||||
videoTracks.insert(it->first);
|
||||
}
|
||||
if (it->second.codec == "AAC" || it->second.codec == "MP3"){
|
||||
audioTrack = it->first;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
///\brief Builds a bootstrap for use in HTTP Dynamic streaming.
|
||||
///\param tid The track this bootstrap is generated for.
|
||||
///\return The generated bootstrap.
|
||||
std::string OutHDS::dynamicBootstrap(int tid){
|
||||
updateMeta();
|
||||
std::string empty;
|
||||
|
||||
MP4::ASRT asrt;
|
||||
asrt.setUpdate(false);
|
||||
asrt.setVersion(1);
|
||||
//asrt.setQualityEntry(empty, 0);
|
||||
if (myMeta.live){
|
||||
asrt.setSegmentRun(1, 4294967295ul, 0);
|
||||
}else{
|
||||
asrt.setSegmentRun(1, myMeta.tracks[tid].keys.size(), 0);
|
||||
}
|
||||
|
||||
MP4::AFRT afrt;
|
||||
afrt.setUpdate(false);
|
||||
afrt.setVersion(1);
|
||||
afrt.setTimeScale(1000);
|
||||
//afrt.setQualityEntry(empty, 0);
|
||||
MP4::afrt_runtable afrtrun;
|
||||
int i = 0;
|
||||
for (std::deque<DTSC::Key>::iterator it = myMeta.tracks[tid].keys.begin(); it != myMeta.tracks[tid].keys.end(); it++){
|
||||
if (it->getLength()){
|
||||
afrtrun.firstFragment = it->getNumber();
|
||||
afrtrun.firstTimestamp = it->getTime();
|
||||
afrtrun.duration = it->getLength();
|
||||
afrt.setFragmentRun(afrtrun, i);
|
||||
i++;
|
||||
}
|
||||
}
|
||||
|
||||
MP4::ABST abst;
|
||||
abst.setVersion(1);
|
||||
abst.setBootstrapinfoVersion(1);
|
||||
abst.setProfile(0);
|
||||
abst.setUpdate(false);
|
||||
abst.setTimeScale(1000);
|
||||
abst.setLive(myMeta.live);
|
||||
abst.setCurrentMediaTime(myMeta.tracks[tid].lastms);
|
||||
abst.setSmpteTimeCodeOffset(0);
|
||||
abst.setMovieIdentifier(streamName);
|
||||
abst.setSegmentRunTable(asrt, 0);
|
||||
abst.setFragmentRunTable(afrt, 0);
|
||||
|
||||
DEBUG_MSG(DLVL_VERYHIGH, "Sending bootstrap: %s", abst.toPrettyString(0).c_str());
|
||||
return std::string((char*)abst.asBox(), (int)abst.boxedSize());
|
||||
}
|
||||
|
||||
///\brief Builds an index file for HTTP Dynamic streaming.
|
||||
///\return The index file for HTTP Dynamic Streaming.
|
||||
std::string OutHDS::dynamicIndex(){
|
||||
getTracks();
|
||||
std::stringstream Result;
|
||||
Result << "<?xml version=\"1.0\" encoding=\"utf-8\"?>" << std::endl;
|
||||
Result << " <manifest xmlns=\"http://ns.adobe.com/f4m/1.0\">" << std::endl;
|
||||
Result << " <id>" << streamName << "</id>" << std::endl;
|
||||
Result << " <mimeType>video/mp4</mimeType>" << std::endl;
|
||||
Result << " <deliveryType>streaming</deliveryType>" << std::endl;
|
||||
if (myMeta.vod){
|
||||
Result << " <duration>" << myMeta.tracks[*videoTracks.begin()].lastms / 1000 << ".000</duration>" << std::endl;
|
||||
Result << " <streamType>recorded</streamType>" << std::endl;
|
||||
}else{
|
||||
Result << " <duration>0.00</duration>" << std::endl;
|
||||
Result << " <streamType>live</streamType>" << std::endl;
|
||||
}
|
||||
for (std::set<int>::iterator it = videoTracks.begin(); it != videoTracks.end(); it++){
|
||||
Result << " <bootstrapInfo "
|
||||
"profile=\"named\" "
|
||||
"id=\"boot" << (*it) << "\" "
|
||||
"url=\"" << (*it) << ".abst\">"
|
||||
"</bootstrapInfo>" << std::endl;
|
||||
Result << " <media "
|
||||
"url=\"" << (*it) << "-\" "
|
||||
"bitrate=\"" << myMeta.tracks[(*it)].bps * 8 << "\" "
|
||||
"bootstrapInfoId=\"boot" << (*it) << "\" "
|
||||
"width=\"" << myMeta.tracks[(*it)].width << "\" "
|
||||
"height=\"" << myMeta.tracks[(*it)].height << "\">" << std::endl;
|
||||
Result << " <metadata>AgAKb25NZXRhRGF0YQMAAAk=</metadata>" << std::endl;
|
||||
Result << " </media>" << std::endl;
|
||||
}
|
||||
Result << "</manifest>" << std::endl;
|
||||
DEBUG_MSG(DLVL_HIGH, "Sending manifest: %s", Result.str().c_str());
|
||||
return Result.str();
|
||||
} //BuildManifest
|
||||
|
||||
OutHDS::OutHDS(Socket::Connection & conn) : Output(conn) {
|
||||
audioTrack = 0;
|
||||
playUntil = 0;
|
||||
}
|
||||
|
||||
void OutHDS::onFail(){
|
||||
HTTP_S.Clean(); //make sure no parts of old requests are left in any buffers
|
||||
HTTP_S.SetBody("Stream not found. Sorry, we tried.");
|
||||
HTTP_S.SendResponse("404", "Stream not found", myConn);
|
||||
Output::onFail();
|
||||
}
|
||||
|
||||
OutHDS::~OutHDS() {}
|
||||
|
||||
void OutHDS::init(Util::Config * cfg){
|
||||
capa["desc"] = "Enables HTTP protocol Adobe-specific dynamic streaming (also known as HDS).";
|
||||
capa["deps"] = "HTTP";
|
||||
capa["url_rel"] = "/dynamic/$/manifest.f4m";
|
||||
capa["url_prefix"] = "/dynamic/$/";
|
||||
capa["socket"] = "http_hds";
|
||||
capa["codecs"][0u][0u].append("H264");
|
||||
capa["codecs"][0u][0u].append("H263");
|
||||
capa["codecs"][0u][0u].append("VP6");
|
||||
capa["codecs"][0u][1u].append("AAC");
|
||||
capa["codecs"][0u][1u].append("MP3");
|
||||
capa["methods"][0u]["handler"] = "http";
|
||||
capa["methods"][0u]["type"] = "flash/11";
|
||||
capa["methods"][0u]["priority"] = 7ll;
|
||||
cfg->addBasicConnectorOptions(capa);
|
||||
config = cfg;
|
||||
}
|
||||
|
||||
void OutHDS::sendNext(){
|
||||
if (currentPacket.getTime() >= playUntil){
|
||||
DEBUG_MSG(DLVL_DEVEL, "(%d) Done sending fragment", getpid() );
|
||||
stop();
|
||||
wantRequest = true;
|
||||
HTTP_S.Chunkify("", 0, myConn);
|
||||
return;
|
||||
}
|
||||
tag.DTSCLoader(currentPacket, myMeta.tracks[currentPacket.getTrackId()]);
|
||||
HTTP_S.Chunkify(tag.data, tag.len, myConn);
|
||||
}
|
||||
|
||||
void OutHDS::onRequest(){
|
||||
HTTP_R.Clean();
|
||||
while (HTTP_R.Read(myConn)){
|
||||
DEBUG_MSG(DLVL_DEVEL, "Received request: %s", HTTP_R.getUrl().c_str());
|
||||
if (HTTP_R.url.find(".abst") != std::string::npos){
|
||||
myConn.setHost(HTTP_R.GetHeader("X-Origin"));
|
||||
streamName = HTTP_R.GetHeader("X-Stream");
|
||||
std::string streamID = HTTP_R.url.substr(streamName.size() + 10);
|
||||
streamID = streamID.substr(0, streamID.find(".abst"));
|
||||
HTTP_S.Clean();
|
||||
HTTP_S.SetBody(dynamicBootstrap(atoll(streamID.c_str())));
|
||||
HTTP_S.SetHeader("Content-Type", "binary/octet");
|
||||
HTTP_S.SetHeader("Cache-Control", "no-cache");
|
||||
HTTP_S.SendResponse("200", "OK", myConn);
|
||||
HTTP_R.Clean(); //clean for any possible next requests
|
||||
continue;
|
||||
}
|
||||
if (HTTP_R.url.find("f4m") == std::string::npos){
|
||||
myConn.setHost(HTTP_R.GetHeader("X-Origin"));
|
||||
streamName = HTTP_R.GetHeader("X-Stream");
|
||||
initialize();
|
||||
std::string tmp_qual = HTTP_R.url.substr(HTTP_R.url.find("/", 10) + 1);
|
||||
unsigned int tid;
|
||||
unsigned int fragNum;
|
||||
tid = atoi(tmp_qual.substr(0, tmp_qual.find("Seg") - 1).c_str());
|
||||
int temp;
|
||||
temp = HTTP_R.url.find("Seg") + 3;
|
||||
temp = HTTP_R.url.find("Frag") + 4;
|
||||
fragNum = atoi(HTTP_R.url.substr(temp).c_str());
|
||||
DEBUG_MSG(DLVL_MEDIUM, "Video track %d, fragment %d\n", tid, fragNum);
|
||||
if (!audioTrack){getTracks();}
|
||||
unsigned int mstime = 0;
|
||||
unsigned int mslen = 0;
|
||||
for (std::deque<DTSC::Key>::iterator it = myMeta.tracks[tid].keys.begin(); it != myMeta.tracks[tid].keys.end(); it++){
|
||||
if (it->getNumber() >= fragNum){
|
||||
mstime = it->getTime();
|
||||
mslen = it->getLength();
|
||||
if (myMeta.live){
|
||||
if (it == myMeta.tracks[tid].keys.end() - 2){
|
||||
HTTP_S.Clean();
|
||||
HTTP_S.SetBody("Proxy, re-request this in a second or two.\n");
|
||||
HTTP_S.SendResponse("208", "Ask again later", myConn);
|
||||
HTTP_R.Clean(); //clean for any possible next requests
|
||||
std::cout << "Fragment after fragment " << fragNum << " not available yet" << std::endl;
|
||||
/*
|
||||
///\todo patch this back in?
|
||||
if (ss.spool()){
|
||||
while (Strm.parsePacket(ss.Received())){}
|
||||
}
|
||||
*/
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (HTTP_R.url == "/"){continue;}//Don't continue, but continue instead.
|
||||
if (myMeta.live){
|
||||
if (mstime == 0 && fragNum > 1){
|
||||
HTTP_S.Clean();
|
||||
HTTP_S.SetBody("The requested fragment is no longer kept in memory on the server and cannot be served.\n");
|
||||
HTTP_S.SendResponse("412", "Fragment out of range", myConn);
|
||||
HTTP_R.Clean(); //clean for any possible next requests
|
||||
std::cout << "Fragment " << fragNum << " too old" << std::endl;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
selectedTracks.clear();
|
||||
selectedTracks.insert(tid);
|
||||
selectedTracks.insert(audioTrack);
|
||||
seek(mstime);
|
||||
playUntil = mstime + mslen;
|
||||
|
||||
HTTP_S.Clean();
|
||||
HTTP_S.SetHeader("Content-Type", "video/mp4");
|
||||
HTTP_S.StartResponse(HTTP_R, myConn);
|
||||
//send the bootstrap
|
||||
std::string bootstrap = dynamicBootstrap(tid);
|
||||
HTTP_S.Chunkify(bootstrap, myConn);
|
||||
//send a zero-size mdat, meaning it stretches until end of file.
|
||||
HTTP_S.Chunkify("\000\000\000\000mdat", 8, myConn);
|
||||
//send init data, if needed.
|
||||
if (audioTrack > 0){
|
||||
tag.DTSCAudioInit(myMeta.tracks[audioTrack]);
|
||||
tag.tagTime(mstime);
|
||||
HTTP_S.Chunkify(tag.data, tag.len, myConn);
|
||||
}
|
||||
if (tid > 0){
|
||||
tag.DTSCVideoInit(myMeta.tracks[tid]);
|
||||
tag.tagTime(mstime);
|
||||
HTTP_S.Chunkify(tag.data, tag.len, myConn);
|
||||
}
|
||||
parseData = true;
|
||||
wantRequest = false;
|
||||
}else{
|
||||
myConn.setHost(HTTP_R.GetHeader("X-Origin"));
|
||||
streamName = HTTP_R.GetHeader("X-Stream");
|
||||
initialize();
|
||||
std::stringstream tmpstr;
|
||||
myMeta.toPrettyString(tmpstr);
|
||||
HTTP_S.Clean();
|
||||
HTTP_S.SetHeader("Content-Type", "text/xml");
|
||||
HTTP_S.SetHeader("Cache-Control", "no-cache");
|
||||
HTTP_S.SetBody(dynamicIndex());
|
||||
HTTP_S.SendResponse("200", "OK", myConn);
|
||||
}
|
||||
HTTP_R.Clean(); //clean for any possible next requests
|
||||
}
|
||||
}
|
||||
}
|
30
src/output/output_hds.h
Normal file
30
src/output/output_hds.h
Normal file
|
@ -0,0 +1,30 @@
|
|||
#include "output.h"
|
||||
#include <mist/http_parser.h>
|
||||
#include <mist/ts_packet.h>
|
||||
#include <mist/mp4.h>
|
||||
#include <mist/mp4_generic.h>
|
||||
|
||||
namespace Mist {
|
||||
class OutHDS : public Output {
|
||||
public:
|
||||
OutHDS(Socket::Connection & conn);
|
||||
~OutHDS();
|
||||
static void init(Util::Config * cfg);
|
||||
|
||||
void onRequest();
|
||||
void onFail();
|
||||
void sendNext();
|
||||
protected:
|
||||
void getTracks();
|
||||
std::string dynamicBootstrap(int tid);
|
||||
std::string dynamicIndex();
|
||||
HTTP::Parser HTTP_S;
|
||||
HTTP::Parser HTTP_R;
|
||||
std::set<int> videoTracks;///<< Holds valid video tracks for playback
|
||||
long long int audioTrack;///<< Holds audio track ID for playback
|
||||
long long unsigned int playUntil;
|
||||
FLV::Tag tag;
|
||||
};
|
||||
}
|
||||
|
||||
typedef Mist::OutHDS mistOut;
|
282
src/output/output_hls.cpp
Normal file
282
src/output/output_hls.cpp
Normal file
|
@ -0,0 +1,282 @@
|
|||
#include "output_hls.h"
|
||||
#include <mist/defines.h>
|
||||
#include <mist/http_parser.h>
|
||||
#include <mist/stream.h>
|
||||
#include <unistd.h>
|
||||
|
||||
namespace Mist {
|
||||
///\brief Builds an index file for HTTP Live streaming.
|
||||
///\return The index file for HTTP Live Streaming.
|
||||
std::string OutHLS::liveIndex(){
|
||||
std::stringstream result;
|
||||
result << "#EXTM3U\r\n";
|
||||
int audioId = -1;
|
||||
std::string audioName;
|
||||
for (std::map<int,DTSC::Track>::iterator it = myMeta.tracks.begin(); it != myMeta.tracks.end(); it++){
|
||||
if (it->second.codec == "AAC"){
|
||||
audioId = it->first;
|
||||
audioName = it->second.getIdentifier();
|
||||
break;
|
||||
}
|
||||
}
|
||||
for (std::map<int,DTSC::Track>::iterator it = myMeta.tracks.begin(); it != myMeta.tracks.end(); it++){
|
||||
if (it->second.codec == "H264"){
|
||||
int bWidth = it->second.bps * 2;
|
||||
if (audioId != -1){
|
||||
bWidth += myMeta.tracks[audioId].bps * 2;
|
||||
}
|
||||
result << "#EXT-X-STREAM-INF:PROGRAM-ID=1,BANDWIDTH=" << bWidth * 10 << "\r\n";
|
||||
result << it->first;
|
||||
if (audioId != -1){
|
||||
result << "_" << audioId;
|
||||
}
|
||||
result << "/index.m3u8\r\n";
|
||||
}
|
||||
}
|
||||
#if DEBUG >= 8
|
||||
std::cerr << "Sending this index:" << std::endl << result.str() << std::endl;
|
||||
#endif
|
||||
return result.str();
|
||||
}
|
||||
|
||||
std::string OutHLS::liveIndex(int tid){
|
||||
updateMeta();
|
||||
std::stringstream result;
|
||||
//parse single track
|
||||
int longestFragment = 0;
|
||||
if (!myMeta.tracks[tid].fragments.size()){
|
||||
DEBUG_MSG(DLVL_FAIL, "liveIndex called with track %d, which has no fragments!", tid);
|
||||
return "";
|
||||
}
|
||||
for (std::deque<DTSC::Fragment>::iterator it = myMeta.tracks[tid].fragments.begin(); (it + 1) != myMeta.tracks[tid].fragments.end(); it++){
|
||||
if (it->getDuration() > longestFragment){
|
||||
longestFragment = it->getDuration();
|
||||
}
|
||||
}
|
||||
result << "#EXTM3U\r\n"
|
||||
"#EXT-X-TARGETDURATION:" << (longestFragment / 1000) + 1 << "\r\n"
|
||||
"#EXT-X-MEDIA-SEQUENCE:" << myMeta.tracks[tid].missedFrags << "\r\n";
|
||||
for (std::deque<DTSC::Fragment>::iterator it = myMeta.tracks[tid].fragments.begin(); it != myMeta.tracks[tid].fragments.end(); it++){
|
||||
long long int starttime = myMeta.tracks[tid].getKey(it->getNumber()).getTime();
|
||||
|
||||
if (it != (myMeta.tracks[tid].fragments.end() - 1)){
|
||||
result << "#EXTINF:" << ((it->getDuration() + 500) / 1000) << ", no desc\r\n" << starttime << "_" << it->getDuration() + starttime << ".ts\r\n";
|
||||
}
|
||||
}
|
||||
if ( !myMeta.live){
|
||||
result << "#EXT-X-ENDLIST\r\n";
|
||||
}
|
||||
#if DEBUG >= 8
|
||||
std::cerr << "Sending this index:" << std::endl << result.str() << std::endl;
|
||||
#endif
|
||||
return result.str();
|
||||
} //liveIndex
|
||||
|
||||
|
||||
OutHLS::OutHLS(Socket::Connection & conn) : Output(conn) {
|
||||
haveAvcc = false;
|
||||
}
|
||||
|
||||
OutHLS::~OutHLS() {}
|
||||
|
||||
void OutHLS::onFail(){
|
||||
HTTP_S.Clean(); //make sure no parts of old requests are left in any buffers
|
||||
HTTP_S.SetBody("Stream not found. Sorry, we tried.");
|
||||
HTTP_S.SendResponse("404", "Stream not found", myConn);
|
||||
Output::onFail();
|
||||
}
|
||||
|
||||
void OutHLS::init(Util::Config * cfg){
|
||||
capa["name"] = "HTTP_Live";
|
||||
capa["desc"] = "Enables HTTP protocol Apple-specific streaming (also known as HLS).";
|
||||
capa["deps"] = "HTTP";
|
||||
capa["url_rel"] = "/hls/$/index.m3u8";
|
||||
capa["url_prefix"] = "/hls/$/";
|
||||
capa["socket"] = "http_hls";
|
||||
capa["codecs"][0u][0u].append("H264");
|
||||
capa["codecs"][0u][1u].append("AAC");
|
||||
capa["methods"][0u]["handler"] = "http";
|
||||
capa["methods"][0u]["type"] = "html5/application/vnd.apple.mpegurl";
|
||||
capa["methods"][0u]["priority"] = 9ll;
|
||||
cfg->addBasicConnectorOptions(capa);
|
||||
config = cfg;
|
||||
}
|
||||
|
||||
void OutHLS::sendNext(){
|
||||
Socket::Buffer ToPack;
|
||||
char * ContCounter = 0;
|
||||
bool IsKeyFrame = false;
|
||||
|
||||
char * dataPointer = 0;
|
||||
int dataLen = 0;
|
||||
currentPacket.getString("data", dataPointer, dataLen);
|
||||
|
||||
if (currentPacket.getTime() >= until){
|
||||
DEBUG_MSG(DLVL_DEVEL, "(%d) Done sending fragment", getpid() );
|
||||
stop();
|
||||
wantRequest = true;
|
||||
HTTP_S.Chunkify("", 0, myConn);
|
||||
HTTP_S.Clean();
|
||||
return;
|
||||
}
|
||||
|
||||
//detect packet type, and put converted data into ToPack.
|
||||
if (myMeta.tracks[currentPacket.getTrackId()].type == "video"){
|
||||
ToPack.append(TS::Packet::getPESVideoLeadIn(0ul, currentPacket.getTime() * 90));
|
||||
|
||||
IsKeyFrame = currentPacket.getInt("keyframe");
|
||||
if (IsKeyFrame){
|
||||
if (!haveAvcc){
|
||||
avccbox.setPayload(myMeta.tracks[currentPacket.getTrackId()].init);
|
||||
haveAvcc = true;
|
||||
}
|
||||
ToPack.append(avccbox.asAnnexB());
|
||||
}
|
||||
unsigned int i = 0;
|
||||
while (i + 4 < (unsigned int)dataLen){
|
||||
unsigned int ThisNaluSize = (dataPointer[i] << 24) + (dataPointer[i+1] << 16) + (dataPointer[i+2] << 8) + dataPointer[i+3];
|
||||
if (ThisNaluSize + i + 4 > (unsigned int)dataLen){
|
||||
DEBUG_MSG(DLVL_WARN, "Too big NALU detected (%u > %d) - skipping!", ThisNaluSize + i + 4, dataLen);
|
||||
break;
|
||||
}
|
||||
ToPack.append("\000\000\000\001", 4);
|
||||
i += 4;
|
||||
ToPack.append(dataPointer + i, ThisNaluSize);
|
||||
i += ThisNaluSize;
|
||||
}
|
||||
ContCounter = &VideoCounter;
|
||||
}else if (myMeta.tracks[currentPacket.getTrackId()].type == "audio"){
|
||||
if (AppleCompat){
|
||||
ToPack.append(TS::Packet::getPESAudioLeadIn(7+dataLen, lastVid));
|
||||
}else{
|
||||
ToPack.append(TS::Packet::getPESAudioLeadIn(7+dataLen, currentPacket.getTime() * 90));
|
||||
}
|
||||
ToPack.append(TS::GetAudioHeader(dataLen, myMeta.tracks[currentPacket.getTrackId()].init));
|
||||
ToPack.append(dataPointer, dataLen);
|
||||
ContCounter = &AudioCounter;
|
||||
}
|
||||
|
||||
bool first = true;
|
||||
//send TS packets
|
||||
while (ToPack.size()){
|
||||
if (PacketNumber % 42 == 0){
|
||||
HTTP_S.Chunkify(TS::PAT, 188, myConn);
|
||||
HTTP_S.Chunkify(TS::PMT, 188, myConn);
|
||||
PacketNumber += 2;
|
||||
}
|
||||
PackData.Clear();
|
||||
/// \todo Update according to sendHeader()'s generated data.
|
||||
//0x100 - 1 + currentPacket.getTrackId()
|
||||
if (myMeta.tracks[currentPacket.getTrackId()].type == "video"){
|
||||
PackData.PID(0x100);
|
||||
}else{
|
||||
PackData.PID(0x101);
|
||||
}
|
||||
PackData.ContinuityCounter((*ContCounter)++);
|
||||
if (first){
|
||||
PackData.UnitStart(1);
|
||||
if (IsKeyFrame){
|
||||
PackData.RandomAccess(1);
|
||||
PackData.PCR(currentPacket.getTime() * 27000);
|
||||
}
|
||||
first = false;
|
||||
}
|
||||
unsigned int toSend = PackData.AddStuffing(ToPack.bytes(184));
|
||||
std::string gonnaSend = ToPack.remove(toSend);
|
||||
PackData.FillFree(gonnaSend);
|
||||
HTTP_S.Chunkify(PackData.ToString(), 188, myConn);
|
||||
PacketNumber ++;
|
||||
}
|
||||
}
|
||||
|
||||
int OutHLS::canSeekms(unsigned int ms){
|
||||
//no tracks? Frame too new by definition.
|
||||
if ( !myMeta.tracks.size()){
|
||||
return 1;
|
||||
}
|
||||
//loop trough all the tracks
|
||||
for (std::map<int,DTSC::Track>::iterator it = myMeta.tracks.begin(); it != myMeta.tracks.end(); it++){
|
||||
//return "too late" if one track is past this point
|
||||
if (ms < it->second.firstms){
|
||||
return -1;
|
||||
}
|
||||
//return "too early" if one track is not yet at this point
|
||||
if (ms > it->second.lastms){
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void OutHLS::onRequest(){
|
||||
while (HTTP_R.Read(myConn)){
|
||||
DEBUG_MSG(DLVL_DEVEL, "Received request: %s", HTTP_R.getUrl().c_str());
|
||||
myConn.setHost(HTTP_R.GetHeader("X-Origin"));
|
||||
AppleCompat = (HTTP_R.GetHeader("User-Agent").find("Apple") != std::string::npos);
|
||||
streamName = HTTP_R.GetHeader("X-Stream");
|
||||
initialize();
|
||||
if (HTTP_R.url.find(".m3u") == std::string::npos){
|
||||
std::string tmpStr = HTTP_R.getUrl();
|
||||
std::string fmtStr = "/hls/" + streamName + "/%u_%u/%llu_%llu.ts";
|
||||
long long unsigned int from;
|
||||
sscanf(tmpStr.c_str(), fmtStr.c_str(), &vidTrack, &audTrack, &from, &until);
|
||||
DEBUG_MSG(DLVL_DEVEL, "Vid %u, Aud %u, From %llu, Until %llu", vidTrack, audTrack, from, until);
|
||||
selectedTracks.clear();
|
||||
selectedTracks.insert(vidTrack);
|
||||
selectedTracks.insert(audTrack);
|
||||
|
||||
if (myMeta.live){
|
||||
/// \todo Detection of out-of-range parts.
|
||||
int seekable = canSeekms(from);
|
||||
if (seekable < 0){
|
||||
HTTP_S.Clean();
|
||||
HTTP_S.SetBody("The requested fragment is no longer kept in memory on the server and cannot be served.\n");
|
||||
myConn.SendNow(HTTP_S.BuildResponse("412", "Fragment out of range"));
|
||||
HTTP_R.Clean(); //clean for any possible next requests
|
||||
DEBUG_MSG(DLVL_WARN, "Fragment @ %llu too old", from);
|
||||
continue;
|
||||
}
|
||||
if (seekable > 0){
|
||||
HTTP_S.Clean();
|
||||
HTTP_S.SetBody("Proxy, re-request this in a second or two.\n");
|
||||
myConn.SendNow(HTTP_S.BuildResponse("208", "Ask again later"));
|
||||
HTTP_R.Clean(); //clean for any possible next requests
|
||||
DEBUG_MSG(DLVL_WARN, "Fragment @ %llu not available yet", from);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
seek(from);
|
||||
lastVid = from * 90;
|
||||
|
||||
HTTP_S.Clean();
|
||||
HTTP_S.SetHeader("Content-Type", "video/mp2t");
|
||||
HTTP_S.StartResponse(HTTP_R, myConn);
|
||||
PacketNumber = 0;
|
||||
parseData = true;
|
||||
wantRequest = false;
|
||||
}else{
|
||||
streamName = HTTP_R.GetHeader("X-Stream");
|
||||
initialize();
|
||||
std::string request = HTTP_R.url.substr(HTTP_R.url.find("/", 5) + 1);
|
||||
HTTP_S.Clean();
|
||||
if (HTTP_R.url.find(".m3u8") != std::string::npos){
|
||||
HTTP_S.SetHeader("Content-Type", "audio/x-mpegurl");
|
||||
}else{
|
||||
HTTP_S.SetHeader("Content-Type", "audio/mpegurl");
|
||||
}
|
||||
HTTP_S.SetHeader("Cache-Control", "no-cache");
|
||||
std::string manifest;
|
||||
if (request.find("/") == std::string::npos){
|
||||
manifest = liveIndex();
|
||||
}else{
|
||||
int selectId = atoi(request.substr(0,request.find("/")).c_str());
|
||||
manifest = liveIndex(selectId);
|
||||
}
|
||||
HTTP_S.SetBody(manifest);
|
||||
HTTP_S.SendResponse("200", "OK", myConn);
|
||||
}
|
||||
HTTP_R.Clean(); //clean for any possible next requests
|
||||
}
|
||||
}
|
||||
}
|
39
src/output/output_hls.h
Normal file
39
src/output/output_hls.h
Normal file
|
@ -0,0 +1,39 @@
|
|||
#include "output.h"
|
||||
#include <mist/http_parser.h>
|
||||
#include <mist/ts_packet.h>
|
||||
#include <mist/mp4.h>
|
||||
#include <mist/mp4_generic.h>
|
||||
|
||||
namespace Mist {
|
||||
class OutHLS : public Output {
|
||||
public:
|
||||
OutHLS(Socket::Connection & conn);
|
||||
~OutHLS();
|
||||
static void init(Util::Config * cfg);
|
||||
|
||||
void onRequest();
|
||||
void onFail();
|
||||
void sendNext();
|
||||
protected:
|
||||
HTTP::Parser HTTP_S;
|
||||
HTTP::Parser HTTP_R;
|
||||
std::string liveIndex();
|
||||
std::string liveIndex(int tid);
|
||||
int canSeekms(unsigned int ms);
|
||||
int keysToSend;
|
||||
long long int playUntil;
|
||||
TS::Packet PackData;
|
||||
unsigned int PacketNumber;
|
||||
bool haveAvcc;
|
||||
char VideoCounter;
|
||||
char AudioCounter;
|
||||
MP4::AVCC avccbox;
|
||||
bool AppleCompat;
|
||||
long long unsigned int lastVid;
|
||||
long long unsigned int until;
|
||||
unsigned int vidTrack;
|
||||
unsigned int audTrack;
|
||||
};
|
||||
}
|
||||
|
||||
typedef Mist::OutHLS mistOut;
|
484
src/output/output_hss.cpp
Normal file
484
src/output/output_hss.cpp
Normal file
|
@ -0,0 +1,484 @@
|
|||
#include "output_hss.h"
|
||||
#include <mist/defines.h>
|
||||
#include <mist/mp4.h>
|
||||
#include <mist/mp4_ms.h>
|
||||
#include <mist/mp4_generic.h>
|
||||
#include <mist/mp4_encryption.h>
|
||||
#include <mist/base64.h>
|
||||
#include <mist/http_parser.h>
|
||||
#include <mist/stream.h>
|
||||
#include <unistd.h>
|
||||
|
||||
|
||||
|
||||
///\todo Maybe move to util?
|
||||
long long unsigned int binToInt(std::string & binary) {
|
||||
long long int result = 0;
|
||||
for (int i = 0; i < 8; i++) {
|
||||
result <<= 8;
|
||||
result += binary[i];
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
std::string intToBin(long long unsigned int number) {
|
||||
std::string result;
|
||||
result.resize(8);
|
||||
for (int i = 7; i >= 0; i--) {
|
||||
result[i] = number & 0xFF;
|
||||
number >>= 8;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
std::string toUTF16(std::string original) {
|
||||
std::string result;
|
||||
result += (char)0xFF;
|
||||
result += (char)0xFE;
|
||||
for (std::string::iterator it = original.begin(); it != original.end(); it++) {
|
||||
result += (*it);
|
||||
result += (char)0x00;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
|
||||
namespace Mist {
|
||||
OutHSS::OutHSS(Socket::Connection & conn) : Output(conn) { }
|
||||
|
||||
OutHSS::~OutHSS() {}
|
||||
|
||||
void OutHSS::init(Util::Config * cfg) {
|
||||
capa["name"] = "HTTP_Smooth";
|
||||
capa["desc"] = "Enables HTTP protocol Microsoft-specific smooth streaming through silverlight (also known as HSS).";
|
||||
capa["deps"] = "HTTP";
|
||||
capa["url_rel"] = "/smooth/$.ism/Manifest";
|
||||
capa["url_prefix"] = "/smooth/$.ism/";
|
||||
capa["socket"] = "http_hss";
|
||||
capa["codecs"][0u][0u].append("H264");
|
||||
capa["codecs"][0u][1u].append("AAC");
|
||||
capa["methods"][0u]["handler"] = "http";
|
||||
capa["methods"][0u]["type"] = "html5/application/vnd.ms-ss";
|
||||
capa["methods"][0u]["priority"] = 9ll;
|
||||
capa["methods"][0u]["nolive"] = 1;
|
||||
capa["methods"][1u]["handler"] = "http";
|
||||
capa["methods"][1u]["type"] = "silverlight";
|
||||
capa["methods"][1u]["priority"] = 1ll;
|
||||
capa["methods"][1u]["nolive"] = 1;
|
||||
cfg->addBasicConnectorOptions(capa);
|
||||
config = cfg;
|
||||
}
|
||||
|
||||
void OutHSS::sendNext() {
|
||||
if (currentPacket.getTime() >= playUntil) {
|
||||
DEBUG_MSG(DLVL_DEVEL, "(%d) Done sending fragment %d:%d", getpid(), myTrackStor, myKeyStor);
|
||||
stop();
|
||||
wantRequest = true;
|
||||
HTTP_S.Chunkify("", 0, myConn);
|
||||
HTTP_R.Clean();
|
||||
return;
|
||||
}
|
||||
char * dataPointer = 0;
|
||||
int len = 0;
|
||||
currentPacket.getString("data", dataPointer, len);
|
||||
HTTP_S.Chunkify(dataPointer, len, myConn);
|
||||
}
|
||||
|
||||
void OutHSS::onFail(){
|
||||
HTTP_S.Clean(); //make sure no parts of old requests are left in any buffers
|
||||
HTTP_S.SetBody("Stream not found. Sorry, we tried.");
|
||||
HTTP_S.SendResponse("404", "Stream not found", myConn);
|
||||
Output::onFail();
|
||||
}
|
||||
|
||||
int OutHSS::canSeekms(unsigned int ms) {
|
||||
//no tracks? Frame too new by definition.
|
||||
if (!myMeta.tracks.size()) {
|
||||
DEBUG_MSG(DLVL_DEVEL, "HSS Canseek to %d returns 1 because no tracks", ms);
|
||||
return 1;
|
||||
}
|
||||
//loop trough all selected tracks
|
||||
for (std::set<unsigned long>::iterator it = selectedTracks.begin(); it != selectedTracks.end(); it++) {
|
||||
//return "too late" if one track is past this point
|
||||
if (ms < myMeta.tracks[*it].firstms) {
|
||||
DEBUG_MSG(DLVL_DEVEL, "HSS Canseek to %d returns -1 because track %lu firstms == %d", ms, *it, myMeta.tracks[*it].firstms);
|
||||
return -1;
|
||||
}
|
||||
//return "too early" if one track is not yet at this point
|
||||
if (ms > myMeta.tracks[*it].lastms) {
|
||||
DEBUG_MSG(DLVL_DEVEL, "HSS Canseek to %d returns 1 because track %lu lastms == %d", ms, *it, myMeta.tracks[*it].lastms);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
void OutHSS::sendHeader() {
|
||||
//We have a non-manifest request, parse it.
|
||||
std::string Quality = HTTP_R.url.substr(HTTP_R.url.find("TrackID=", 8) + 8);
|
||||
Quality = Quality.substr(0, Quality.find(")"));
|
||||
std::string parseString = HTTP_R.url.substr(HTTP_R.url.find(")/") + 2);
|
||||
parseString = parseString.substr(parseString.find("(") + 1);
|
||||
long long int seekTime = atoll(parseString.substr(0, parseString.find(")")).c_str()) / 10000;
|
||||
unsigned int tid = atoll(Quality.c_str());
|
||||
selectedTracks.clear();
|
||||
selectedTracks.insert(tid);
|
||||
if (myMeta.live) {
|
||||
updateMeta();
|
||||
int seekable = canSeekms(seekTime / 10000);
|
||||
if (seekable == 0){
|
||||
// iff the fragment in question is available, check if the next is available too
|
||||
for (std::deque<DTSC::Key>::iterator it = myMeta.tracks[tid].keys.begin(); it != myMeta.tracks[tid].keys.end(); it++){
|
||||
if (it->getTime() >= (seekTime / 10000)){
|
||||
if ((it + 1) == myMeta.tracks[tid].keys.end()){
|
||||
seekable = 1;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (seekable < 0){
|
||||
HTTP_S.Clean();
|
||||
HTTP_S.SetBody("The requested fragment is no longer kept in memory on the server and cannot be served.\n");
|
||||
myConn.SendNow(HTTP_S.BuildResponse("412", "Fragment out of range"));
|
||||
HTTP_R.Clean(); //clean for any possible next requests
|
||||
std::cout << "Fragment @ " << seekTime / 10000 << "ms too old (" << myMeta.tracks[tid].firstms << " - " << myMeta.tracks[tid].lastms << " ms)" << std::endl;
|
||||
stop();
|
||||
wantRequest = true;
|
||||
return;
|
||||
}
|
||||
if (seekable > 0){
|
||||
HTTP_S.Clean();
|
||||
HTTP_S.SetBody("Proxy, re-request this in a second or two.\n");
|
||||
myConn.SendNow(HTTP_S.BuildResponse("208", "Ask again later"));
|
||||
HTTP_R.Clean(); //clean for any possible next requests
|
||||
std::cout << "Fragment @ " << seekTime / 10000 << "ms not available yet (" << myMeta.tracks[tid].firstms << " - " << myMeta.tracks[tid].lastms << " ms)" << std::endl;
|
||||
stop();
|
||||
wantRequest = true;
|
||||
return;
|
||||
}
|
||||
}
|
||||
DEBUG_MSG(DLVL_DEVEL, "(%d) Seeking to time %lld on track %d", getpid(), seekTime, tid);
|
||||
seek(seekTime);
|
||||
playUntil = (*(keyTimes[tid].upper_bound(seekTime)));
|
||||
DEBUG_MSG(DLVL_DEVEL, "Set playUntil to %lld", playUntil);
|
||||
myTrackStor = tid;
|
||||
myKeyStor = seekTime;
|
||||
keysToSend = 1;
|
||||
//Seek to the right place and send a play-once for a single fragment.
|
||||
std::stringstream sstream;
|
||||
|
||||
int partOffset = 0;
|
||||
int keyDur = 0;
|
||||
DTSC::Key keyObj;
|
||||
for (std::deque<DTSC::Key>::iterator it = myMeta.tracks[tid].keys.begin(); it != myMeta.tracks[tid].keys.end(); it++) {
|
||||
if (it->getTime() >= seekTime) {
|
||||
keyObj = (*it);
|
||||
keyDur = it->getLength();
|
||||
std::deque<DTSC::Key>::iterator nextIt = it;
|
||||
nextIt++;
|
||||
if (nextIt == myMeta.tracks[tid].keys.end()) {
|
||||
if (myMeta.live) {
|
||||
HTTP_S.Clean();
|
||||
HTTP_S.SetBody("Proxy, re-request this in a second or two.\n");
|
||||
myConn.SendNow(HTTP_S.BuildResponse("208", "Ask again later"));
|
||||
HTTP_R.Clean(); //clean for any possible next requests
|
||||
std::cout << "Fragment after fragment @ " << (seekTime / 10000) << " not available yet" << std::endl;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
partOffset += it->getParts();
|
||||
}
|
||||
if (HTTP_R.url == "/") {
|
||||
return; //Don't continue, but continue instead.
|
||||
}
|
||||
/*
|
||||
if (myMeta.live) {
|
||||
if (mstime == 0 && (seekTime / 10000) > 1){
|
||||
HTTP_S.Clean();
|
||||
HTTP_S.SetBody("The requested fragment is no longer kept in memory on the server and cannot be served.\n");
|
||||
myConn.SendNow(HTTP_S.BuildResponse("412", "Fragment out of range"));
|
||||
HTTP_R.Clean(); //clean for any possible next requests
|
||||
std::cout << "Fragment @ " << (seekTime / 10000) << " too old" << std::endl;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
///\todo Select correct track (tid);
|
||||
|
||||
//Wrap everything in mp4 boxes
|
||||
MP4::MFHD mfhd_box;
|
||||
mfhd_box.setSequenceNumber(((keyObj.getNumber() - 1) * 2) + tid);///\todo Urgent: Check this for multitrack... :P wtf... :P
|
||||
|
||||
MP4::TFHD tfhd_box;
|
||||
tfhd_box.setFlags(MP4::tfhdSampleFlag);
|
||||
tfhd_box.setTrackID(tid);
|
||||
if (myMeta.tracks[tid].type == "video") {
|
||||
tfhd_box.setDefaultSampleFlags(0x00004001);
|
||||
} else {
|
||||
tfhd_box.setDefaultSampleFlags(0x00008002);
|
||||
}
|
||||
|
||||
MP4::TRUN trun_box;
|
||||
trun_box.setDataOffset(42);///\todo Check if this is a placeholder, or an actually correct number
|
||||
unsigned int keySize = 0;
|
||||
if (myMeta.tracks[tid].type == "video") {
|
||||
trun_box.setFlags(MP4::trundataOffset | MP4::trunfirstSampleFlags | MP4::trunsampleDuration | MP4::trunsampleSize | MP4::trunsampleOffsets);
|
||||
} else {
|
||||
trun_box.setFlags(MP4::trundataOffset | MP4::trunsampleDuration | MP4::trunsampleSize);
|
||||
}
|
||||
trun_box.setFirstSampleFlags(0x00004002);
|
||||
for (int i = 0; i < keyObj.getParts(); i++) {
|
||||
MP4::trunSampleInformation trunSample;
|
||||
trunSample.sampleSize = myMeta.tracks[tid].parts[i + partOffset].getSize();
|
||||
keySize += myMeta.tracks[tid].parts[i + partOffset].getSize();
|
||||
trunSample.sampleDuration = myMeta.tracks[tid].parts[i + partOffset].getDuration() * 10000;
|
||||
if (myMeta.tracks[tid].type == "video") {
|
||||
trunSample.sampleOffset = myMeta.tracks[tid].parts[i + partOffset].getOffset() * 10000;
|
||||
}
|
||||
trun_box.setSampleInformation(trunSample, i);
|
||||
}
|
||||
|
||||
MP4::SDTP sdtp_box;
|
||||
sdtp_box.setVersion(0);
|
||||
if (myMeta.tracks[tid].type == "video") {
|
||||
sdtp_box.setValue(36, 4);
|
||||
for (int i = 1; i < keyObj.getParts(); i++) {
|
||||
sdtp_box.setValue(20, 4 + i);
|
||||
}
|
||||
} else {
|
||||
sdtp_box.setValue(40, 4);
|
||||
for (int i = 1; i < keyObj.getParts(); i++) {
|
||||
sdtp_box.setValue(40, 4 + i);
|
||||
}
|
||||
}
|
||||
|
||||
MP4::TRAF traf_box;
|
||||
traf_box.setContent(tfhd_box, 0);
|
||||
traf_box.setContent(trun_box, 1);
|
||||
traf_box.setContent(sdtp_box, 2);
|
||||
|
||||
//If the stream is live, we want to have a fragref box if possible
|
||||
//////HEREHEREHERE
|
||||
if (myMeta.live) {
|
||||
MP4::UUID_TrackFragmentReference fragref_box;
|
||||
fragref_box.setVersion(1);
|
||||
fragref_box.setFragmentCount(0);
|
||||
int fragCount = 0;
|
||||
for (unsigned int i = 0; fragCount < 2 && i < myMeta.tracks[tid].keys.size() - 1; i++) {
|
||||
if (myMeta.tracks[tid].keys[i].getTime() > seekTime) {
|
||||
DEBUG_MSG(DLVL_DEVEL, "Key %d added to fragRef box, time %ld > %lld", i, myMeta.tracks[tid].keys[i].getTime(), seekTime);
|
||||
fragref_box.setTime(fragCount, myMeta.tracks[tid].keys[i].getTime() * 10000);
|
||||
fragref_box.setDuration(fragCount, myMeta.tracks[tid].keys[i].getLength() * 10000);
|
||||
fragref_box.setFragmentCount(++fragCount);
|
||||
}
|
||||
}
|
||||
traf_box.setContent(fragref_box, 3);
|
||||
}
|
||||
|
||||
MP4::MOOF moof_box;
|
||||
moof_box.setContent(mfhd_box, 0);
|
||||
moof_box.setContent(traf_box, 1);
|
||||
//Setting the correct offsets.
|
||||
moof_box.setContent(traf_box, 1);
|
||||
trun_box.setDataOffset(moof_box.boxedSize() + 8);
|
||||
traf_box.setContent(trun_box, 1);
|
||||
moof_box.setContent(traf_box, 1);
|
||||
|
||||
HTTP_S.Clean();
|
||||
HTTP_S.SetHeader("Content-Type", "video/mp4");
|
||||
HTTP_S.StartResponse(HTTP_R, myConn);
|
||||
HTTP_S.Chunkify(moof_box.asBox(), moof_box.boxedSize(), myConn);
|
||||
int size = htonl(keySize + 8);
|
||||
HTTP_S.Chunkify((char *)&size, 4, myConn);
|
||||
HTTP_S.Chunkify("mdat", 4, myConn);
|
||||
sentHeader = true;
|
||||
HTTP_R.Clean();
|
||||
DEBUG_MSG(DLVL_DEVEL, "(%d) Sent full header", getpid());
|
||||
}
|
||||
|
||||
|
||||
///\brief Builds an index file for HTTP Smooth streaming.
|
||||
///\return The index file for HTTP Smooth Streaming.
|
||||
std::string OutHSS::smoothIndex(){
|
||||
updateMeta();
|
||||
std::stringstream Result;
|
||||
Result << "<?xml version=\"1.0\" encoding=\"utf-16\"?>\n";
|
||||
Result << "<SmoothStreamingMedia "
|
||||
"MajorVersion=\"2\" "
|
||||
"MinorVersion=\"0\" "
|
||||
"TimeScale=\"10000000\" ";
|
||||
std::deque<std::map<int, DTSC::Track>::iterator> audioIters;
|
||||
std::deque<std::map<int, DTSC::Track>::iterator> videoIters;
|
||||
long long int maxWidth = 0;
|
||||
long long int maxHeight = 0;
|
||||
long long int minWidth = 99999999;
|
||||
long long int minHeight = 99999999;
|
||||
for (std::map<int, DTSC::Track>::iterator it = myMeta.tracks.begin(); it != myMeta.tracks.end(); it++) {
|
||||
if (it->second.codec == "AAC") {
|
||||
audioIters.push_back(it);
|
||||
}
|
||||
if (it->second.codec == "H264") {
|
||||
videoIters.push_back(it);
|
||||
if (it->second.width > maxWidth) {
|
||||
maxWidth = it->second.width;
|
||||
}
|
||||
if (it->second.width < minWidth) {
|
||||
minWidth = it->second.width;
|
||||
}
|
||||
if (it->second.height > maxHeight) {
|
||||
maxHeight = it->second.height;
|
||||
}
|
||||
if (it->second.height < minHeight) {
|
||||
minHeight = it->second.height;
|
||||
}
|
||||
}
|
||||
}
|
||||
DEBUG_MSG(DLVL_DEVEL, "Buffer window here %lld", myMeta.bufferWindow);
|
||||
if (myMeta.vod) {
|
||||
Result << "Duration=\"" << (*videoIters.begin())->second.lastms << "0000\"";
|
||||
} else {
|
||||
Result << "Duration=\"0\" "
|
||||
"IsLive=\"TRUE\" "
|
||||
"LookAheadFragmentCount=\"2\" "
|
||||
"DVRWindowLength=\"" << myMeta.bufferWindow << "0000\" "
|
||||
"CanSeek=\"TRUE\" "
|
||||
"CanPause=\"TRUE\" ";
|
||||
}
|
||||
Result << ">\n";
|
||||
|
||||
//Add audio entries
|
||||
if (audioIters.size()) {
|
||||
Result << "<StreamIndex "
|
||||
"Type=\"audio\" "
|
||||
"QualityLevels=\"" << audioIters.size() << "\" "
|
||||
"Name=\"audio\" "
|
||||
"Chunks=\"" << (*audioIters.begin())->second.keys.size() << "\" "
|
||||
"Url=\"Q({bitrate},{CustomAttributes})/A({start time})\">\n";
|
||||
int index = 0;
|
||||
for (std::deque<std::map<int, DTSC::Track>::iterator>::iterator it = audioIters.begin(); it != audioIters.end(); it++) {
|
||||
Result << "<QualityLevel "
|
||||
"Index=\"" << index << "\" "
|
||||
"Bitrate=\"" << (*it)->second.bps * 8 << "\" "
|
||||
"CodecPrivateData=\"" << std::hex;
|
||||
for (unsigned int i = 0; i < (*it)->second.init.size(); i++) {
|
||||
Result << std::setfill('0') << std::setw(2) << std::right << (int)(*it)->second.init[i];
|
||||
}
|
||||
Result << std::dec << "\" "
|
||||
"SamplingRate=\"" << (*it)->second.rate << "\" "
|
||||
"Channels=\"2\" "
|
||||
"BitsPerSample=\"16\" "
|
||||
"PacketSize=\"4\" "
|
||||
"AudioTag=\"255\" "
|
||||
"FourCC=\"AACL\" >\n";
|
||||
Result << "<CustomAttributes>\n"
|
||||
"<Attribute Name = \"TrackID\" Value = \"" << (*it)->first << "\" />"
|
||||
"</CustomAttributes>";
|
||||
Result << "</QualityLevel>\n";
|
||||
index++;
|
||||
}
|
||||
if ((*audioIters.begin())->second.keys.size()) {
|
||||
for (std::deque<DTSC::Key>::iterator it = (*audioIters.begin())->second.keys.begin(); it != (((*audioIters.begin())->second.keys.end()) - 1); it++) {
|
||||
Result << "<c ";
|
||||
if (it == (*audioIters.begin())->second.keys.begin()) {
|
||||
Result << "t=\"" << it->getTime() * 10000 << "\" ";
|
||||
}
|
||||
Result << "d=\"" << it->getLength() * 10000 << "\" />\n";
|
||||
}
|
||||
}
|
||||
Result << "</StreamIndex>\n";
|
||||
}
|
||||
//Add video entries
|
||||
if (videoIters.size()) {
|
||||
Result << "<StreamIndex "
|
||||
"Type=\"video\" "
|
||||
"QualityLevels=\"" << videoIters.size() << "\" "
|
||||
"Name=\"video\" "
|
||||
"Chunks=\"" << (*videoIters.begin())->second.keys.size() << "\" "
|
||||
"Url=\"Q({bitrate},{CustomAttributes})/V({start time})\" "
|
||||
"MaxWidth=\"" << maxWidth << "\" "
|
||||
"MaxHeight=\"" << maxHeight << "\" "
|
||||
"DisplayWidth=\"" << maxWidth << "\" "
|
||||
"DisplayHeight=\"" << maxHeight << "\">\n";
|
||||
int index = 0;
|
||||
for (std::deque<std::map<int, DTSC::Track>::iterator>::iterator it = videoIters.begin(); it != videoIters.end(); it++) {
|
||||
//Add video qualities
|
||||
Result << "<QualityLevel "
|
||||
"Index=\"" << index << "\" "
|
||||
"Bitrate=\"" << (*it)->second.bps * 8 << "\" "
|
||||
"CodecPrivateData=\"" << std::hex;
|
||||
MP4::AVCC avccbox;
|
||||
avccbox.setPayload((*it)->second.init);
|
||||
std::string tmpString = avccbox.asAnnexB();
|
||||
for (unsigned int i = 0; i < tmpString.size(); i++) {
|
||||
Result << std::setfill('0') << std::setw(2) << std::right << (int)tmpString[i];
|
||||
}
|
||||
Result << std::dec << "\" "
|
||||
"MaxWidth=\"" << (*it)->second.width << "\" "
|
||||
"MaxHeight=\"" << (*it)->second.height << "\" "
|
||||
"FourCC=\"AVC1\" >\n";
|
||||
Result << "<CustomAttributes>\n"
|
||||
"<Attribute Name = \"TrackID\" Value = \"" << (*it)->first << "\" />"
|
||||
"</CustomAttributes>";
|
||||
Result << "</QualityLevel>\n";
|
||||
index++;
|
||||
}
|
||||
if ((*videoIters.begin())->second.keys.size()) {
|
||||
for (std::deque<DTSC::Key>::iterator it = (*videoIters.begin())->second.keys.begin(); it != (((*videoIters.begin())->second.keys.end()) - 1); it++) {
|
||||
Result << "<c ";
|
||||
if (it == (*videoIters.begin())->second.keys.begin()) {
|
||||
Result << "t=\"" << it->getTime() * 10000 << "\" ";
|
||||
}
|
||||
Result << "d=\"" << it->getLength() * 10000 << "\" />\n";
|
||||
}
|
||||
}
|
||||
Result << "</StreamIndex>\n";
|
||||
}
|
||||
Result << "</SmoothStreamingMedia>\n";
|
||||
|
||||
#if DEBUG >= 8
|
||||
std::cerr << "Sending this manifest:" << std::endl << Result << std::endl;
|
||||
#endif
|
||||
return toUTF16(Result.str());
|
||||
} //smoothIndex
|
||||
|
||||
|
||||
void OutHSS::onRequest() {
|
||||
sentHeader = false;
|
||||
while (HTTP_R.Read(myConn)) {
|
||||
DEBUG_MSG(DLVL_DEVEL, "(%d) Received request %s", getpid(), HTTP_R.getUrl().c_str());
|
||||
myConn.setHost(HTTP_R.GetHeader("X-Origin"));
|
||||
streamName = HTTP_R.GetHeader("X-Stream");
|
||||
initialize();
|
||||
if (HTTP_R.url.find("Manifest") != std::string::npos) {
|
||||
//Manifest, direct reply
|
||||
HTTP_S.Clean();
|
||||
HTTP_S.SetHeader("Content-Type", "text/xml");
|
||||
HTTP_S.SetHeader("Cache-Control", "no-cache");
|
||||
std::string manifest = smoothIndex();
|
||||
HTTP_S.SetBody(manifest);
|
||||
HTTP_S.SendResponse("200", "OK", myConn);
|
||||
HTTP_R.Clean();
|
||||
} else {
|
||||
parseData = true;
|
||||
wantRequest = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void OutHSS::initialize() {
|
||||
Output::initialize();
|
||||
for (std::map<int, DTSC::Track>::iterator it = myMeta.tracks.begin(); it != myMeta.tracks.end(); it++) {
|
||||
for (std::deque<DTSC::Key>::iterator it2 = it->second.keys.begin(); it2 != it->second.keys.end(); it2++) {
|
||||
keyTimes[it->first].insert(it2->getTime());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
29
src/output/output_hss.h
Normal file
29
src/output/output_hss.h
Normal file
|
@ -0,0 +1,29 @@
|
|||
#include "output.h"
|
||||
#include <mist/http_parser.h>
|
||||
|
||||
namespace Mist {
|
||||
class OutHSS : public Output {
|
||||
public:
|
||||
OutHSS(Socket::Connection & conn);
|
||||
~OutHSS();
|
||||
static void init(Util::Config * cfg);
|
||||
|
||||
void onRequest();
|
||||
void sendNext();
|
||||
void initialize();
|
||||
void onFail();
|
||||
void sendHeader();
|
||||
protected:
|
||||
HTTP::Parser HTTP_S;
|
||||
HTTP::Parser HTTP_R;
|
||||
JSON::Value encryption;
|
||||
std::string smoothIndex();
|
||||
int canSeekms(unsigned int ms);
|
||||
int keysToSend;
|
||||
int myTrackStor;
|
||||
int myKeyStor;
|
||||
long long int playUntil;
|
||||
};
|
||||
}
|
||||
|
||||
typedef Mist::OutHSS mistOut;
|
84
src/output/output_json.cpp
Normal file
84
src/output/output_json.cpp
Normal file
|
@ -0,0 +1,84 @@
|
|||
#include "output_json.h"
|
||||
#include <mist/http_parser.h>
|
||||
#include <mist/defines.h>
|
||||
#include <iomanip>
|
||||
|
||||
namespace Mist {
|
||||
OutJSON::OutJSON(Socket::Connection & conn) : Output(conn){
|
||||
realTime = 0;
|
||||
}
|
||||
|
||||
OutJSON::~OutJSON() {}
|
||||
|
||||
void OutJSON::init(Util::Config * cfg){
|
||||
capa["desc"] = "Enables HTTP protocol JSON streaming.";
|
||||
capa["deps"] = "HTTP";
|
||||
capa["url_rel"] = "/$.json";
|
||||
capa["url_match"] = "/$.json";
|
||||
capa["url_handler"] = "http";
|
||||
capa["url_type"] = "json";
|
||||
capa["socket"] = "http_json";
|
||||
cfg->addBasicConnectorOptions(capa);
|
||||
config = cfg;
|
||||
}
|
||||
|
||||
void OutJSON::sendNext(){
|
||||
if(!first) {
|
||||
myConn.SendNow(", ", 2);
|
||||
}else{
|
||||
if (jsonp == ""){
|
||||
myConn.SendNow("[", 1);
|
||||
}else{
|
||||
myConn.SendNow(jsonp + "([");
|
||||
}
|
||||
first = false;
|
||||
}
|
||||
myConn.SendNow(currentPacket.toJSON().toString());
|
||||
}
|
||||
|
||||
void OutJSON::sendHeader(){
|
||||
HTTP::Parser HTTP_S;
|
||||
FLV::Tag tag;
|
||||
HTTP_S.SetHeader("Content-Type", "text/javascript");
|
||||
HTTP_S.protocol = "HTTP/1.0";
|
||||
myConn.SendNow(HTTP_S.BuildResponse("200", "OK"));
|
||||
sentHeader = true;
|
||||
}
|
||||
|
||||
bool OutJSON::onFinish(){
|
||||
if (jsonp == ""){
|
||||
myConn.SendNow("]\n\n", 3);
|
||||
}else{
|
||||
myConn.SendNow("]);\n\n", 5);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void OutJSON::onRequest(){
|
||||
HTTP::Parser HTTP_R;
|
||||
while (HTTP_R.Read(myConn)){
|
||||
DEBUG_MSG(DLVL_DEVEL, "Received request %s", HTTP_R.getUrl().c_str());
|
||||
first = true;
|
||||
myConn.setHost(HTTP_R.GetHeader("X-Origin"));
|
||||
streamName = HTTP_R.GetHeader("X-Stream");
|
||||
jsonp = "";
|
||||
if (HTTP_R.GetVar("callback") != ""){
|
||||
jsonp = HTTP_R.GetVar("callback");
|
||||
}
|
||||
if (HTTP_R.GetVar("jsonp") != ""){
|
||||
jsonp = HTTP_R.GetVar("jsonp");
|
||||
}
|
||||
initialize();
|
||||
for (std::map<int,DTSC::Track>::iterator it = myMeta.tracks.begin(); it != myMeta.tracks.end(); it++){
|
||||
if (it->second.type == "meta" ){
|
||||
selectedTracks.insert(it->first);
|
||||
}
|
||||
}
|
||||
seek(0);
|
||||
parseData = true;
|
||||
wantRequest = false;
|
||||
HTTP_R.Clean();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
20
src/output/output_json.h
Normal file
20
src/output/output_json.h
Normal file
|
@ -0,0 +1,20 @@
|
|||
#include "output.h"
|
||||
|
||||
|
||||
namespace Mist {
|
||||
class OutJSON : public Output {
|
||||
public:
|
||||
OutJSON(Socket::Connection & conn);
|
||||
~OutJSON();
|
||||
static void init(Util::Config * cfg);
|
||||
void onRequest();
|
||||
bool onFinish();
|
||||
void sendNext();
|
||||
void sendHeader();
|
||||
protected:
|
||||
std::string jsonp;
|
||||
bool first;
|
||||
};
|
||||
}
|
||||
|
||||
typedef Mist::OutJSON mistOut;
|
88
src/output/output_progressive_flv.cpp
Normal file
88
src/output/output_progressive_flv.cpp
Normal file
|
@ -0,0 +1,88 @@
|
|||
#include "output_progressive_flv.h"
|
||||
#include <mist/http_parser.h>
|
||||
#include <mist/defines.h>
|
||||
|
||||
namespace Mist {
|
||||
OutProgressiveFLV::OutProgressiveFLV(Socket::Connection & conn) : Output(conn) { }
|
||||
|
||||
OutProgressiveFLV::~OutProgressiveFLV() {}
|
||||
|
||||
void OutProgressiveFLV::init(Util::Config * cfg){
|
||||
capa["name"] = "HTTP_Progressive_FLV";
|
||||
capa["desc"] = "Enables HTTP protocol progressive streaming.";
|
||||
capa["deps"] = "HTTP";
|
||||
capa["url_rel"] = "/$.flv";
|
||||
capa["url_match"] = "/$.flv";
|
||||
capa["socket"] = "http_progressive_flv";
|
||||
capa["codecs"][0u][0u].append("H264");
|
||||
capa["codecs"][0u][0u].append("H263");
|
||||
capa["codecs"][0u][0u].append("VP6");
|
||||
capa["codecs"][0u][1u].append("AAC");
|
||||
capa["codecs"][0u][1u].append("MP3");
|
||||
capa["methods"][0u]["handler"] = "http";
|
||||
capa["methods"][0u]["type"] = "flash/7";
|
||||
capa["methods"][0u]["priority"] = 5ll;
|
||||
|
||||
cfg->addBasicConnectorOptions(capa);
|
||||
config = cfg;
|
||||
}
|
||||
|
||||
void OutProgressiveFLV::sendNext(){
|
||||
FLV::Tag tag;
|
||||
bool tmp = tag.DTSCLoader(currentPacket, myMeta.tracks[currentPacket.getTrackId()]);
|
||||
if (!tmp){
|
||||
DEBUG_MSG(DLVL_DEVEL, "Invalid JSON");
|
||||
}
|
||||
myConn.SendNow(tag.data, tag.len);
|
||||
}
|
||||
|
||||
void OutProgressiveFLV::sendHeader(){
|
||||
HTTP::Parser HTTP_S;
|
||||
FLV::Tag tag;
|
||||
HTTP_S.SetHeader("Content-Type", "video/x-flv");
|
||||
HTTP_S.protocol = "HTTP/1.0";
|
||||
myConn.SendNow(HTTP_S.BuildResponse("200", "OK"));
|
||||
myConn.SendNow(FLV::Header, 13);
|
||||
tag.DTSCMetaInit(myMeta, selectedTracks);
|
||||
myConn.SendNow(tag.data, tag.len);
|
||||
|
||||
for (std::set<long unsigned int>::iterator it = selectedTracks.begin(); it != selectedTracks.end(); it++){
|
||||
if (myMeta.tracks[*it].type == "video"){
|
||||
tag.DTSCVideoInit(myMeta.tracks[*it]);
|
||||
myConn.SendNow(tag.data, tag.len);
|
||||
}
|
||||
if (myMeta.tracks[*it].type == "audio"){
|
||||
tag.DTSCAudioInit(myMeta.tracks[*it]);
|
||||
myConn.SendNow(tag.data, tag.len);
|
||||
}
|
||||
}
|
||||
sentHeader = true;
|
||||
}
|
||||
|
||||
void OutProgressiveFLV::onFail(){
|
||||
HTTP::Parser HTTP_S;
|
||||
HTTP_S.Clean(); //make sure no parts of old requests are left in any buffers
|
||||
HTTP_S.SetBody("Stream not found. Sorry, we tried.");
|
||||
HTTP_S.SendResponse("404", "Stream not found", myConn);
|
||||
Output::onFail();
|
||||
}
|
||||
|
||||
void OutProgressiveFLV::onRequest(){
|
||||
HTTP::Parser HTTP_R;
|
||||
while (HTTP_R.Read(myConn)){
|
||||
DEBUG_MSG(DLVL_DEVEL, "Received request %s", HTTP_R.getUrl().c_str());
|
||||
if (HTTP_R.GetVar("audio") != ""){
|
||||
selectedTracks.insert(JSON::Value(HTTP_R.GetVar("audio")).asInt());
|
||||
}
|
||||
if (HTTP_R.GetVar("video") != ""){
|
||||
selectedTracks.insert(JSON::Value(HTTP_R.GetVar("video")).asInt());
|
||||
}
|
||||
myConn.setHost(HTTP_R.GetHeader("X-Origin"));
|
||||
streamName = HTTP_R.GetHeader("X-Stream");
|
||||
parseData = true;
|
||||
wantRequest = false;
|
||||
HTTP_R.Clean();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
18
src/output/output_progressive_flv.h
Normal file
18
src/output/output_progressive_flv.h
Normal file
|
@ -0,0 +1,18 @@
|
|||
#include "output.h"
|
||||
|
||||
|
||||
namespace Mist {
|
||||
class OutProgressiveFLV : public Output {
|
||||
public:
|
||||
OutProgressiveFLV(Socket::Connection & conn);
|
||||
~OutProgressiveFLV();
|
||||
static void init(Util::Config * cfg);
|
||||
void onRequest();
|
||||
void sendNext();
|
||||
void onFail();
|
||||
void sendHeader();
|
||||
protected:
|
||||
};
|
||||
}
|
||||
|
||||
typedef Mist::OutProgressiveFLV mistOut;
|
65
src/output/output_progressive_mp3.cpp
Normal file
65
src/output/output_progressive_mp3.cpp
Normal file
|
@ -0,0 +1,65 @@
|
|||
#include "output_progressive_mp3.h"
|
||||
#include <mist/http_parser.h>
|
||||
#include <mist/defines.h>
|
||||
|
||||
namespace Mist {
|
||||
OutProgressiveMP3::OutProgressiveMP3(Socket::Connection & conn) : Output(conn) { }
|
||||
|
||||
OutProgressiveMP3::~OutProgressiveMP3() {}
|
||||
|
||||
void OutProgressiveMP3::init(Util::Config * cfg){
|
||||
capa["name"] = "HTTP_Progressive_MP3";
|
||||
capa["desc"] = "Enables HTTP protocol progressive streaming.";
|
||||
capa["deps"] = "HTTP";
|
||||
capa["url_rel"] = "/$.mp3";
|
||||
capa["url_match"] = "/$.mp3";
|
||||
capa["socket"] = "http_progressive_mp3";
|
||||
capa["codecs"][0u][0u].append("MP3");
|
||||
capa["methods"][0u]["handler"] = "http";
|
||||
capa["methods"][0u]["type"] = "mp3";
|
||||
capa["methods"][0u]["priority"] = 8ll;
|
||||
|
||||
cfg->addBasicConnectorOptions(capa);
|
||||
config = cfg;
|
||||
}
|
||||
|
||||
void OutProgressiveMP3::sendNext(){
|
||||
char * dataPointer = 0;
|
||||
int len = 0;
|
||||
currentPacket.getString("data", dataPointer, len);
|
||||
myConn.SendNow(dataPointer, len);
|
||||
}
|
||||
|
||||
void OutProgressiveMP3::sendHeader(){
|
||||
HTTP::Parser HTTP_S;
|
||||
FLV::Tag tag;
|
||||
HTTP_S.SetHeader("Content-Type", "audio/mpeg");
|
||||
HTTP_S.protocol = "HTTP/1.0";
|
||||
myConn.SendNow(HTTP_S.BuildResponse("200", "OK"));
|
||||
sentHeader = true;
|
||||
}
|
||||
|
||||
void OutProgressiveMP3::onFail(){
|
||||
HTTP::Parser HTTP_S;
|
||||
HTTP_S.Clean(); //make sure no parts of old requests are left in any buffers
|
||||
HTTP_S.SetBody("Stream not found. Sorry, we tried.");
|
||||
HTTP_S.SendResponse("404", "Stream not found", myConn);
|
||||
Output::onFail();
|
||||
}
|
||||
|
||||
void OutProgressiveMP3::onRequest(){
|
||||
HTTP::Parser HTTP_R;
|
||||
while (HTTP_R.Read(myConn)){
|
||||
DEBUG_MSG(DLVL_DEVEL, "Received request %s", HTTP_R.getUrl().c_str());
|
||||
if (HTTP_R.GetVar("audio") != ""){
|
||||
selectedTracks.insert(JSON::Value(HTTP_R.GetVar("audio")).asInt());
|
||||
}
|
||||
myConn.setHost(HTTP_R.GetHeader("X-Origin"));
|
||||
streamName = HTTP_R.GetHeader("X-Stream");
|
||||
parseData = true;
|
||||
wantRequest = false;
|
||||
HTTP_R.Clean();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
18
src/output/output_progressive_mp3.h
Normal file
18
src/output/output_progressive_mp3.h
Normal file
|
@ -0,0 +1,18 @@
|
|||
#include "output.h"
|
||||
|
||||
|
||||
namespace Mist {
|
||||
class OutProgressiveMP3 : public Output {
|
||||
public:
|
||||
OutProgressiveMP3(Socket::Connection & conn);
|
||||
~OutProgressiveMP3();
|
||||
static void init(Util::Config * cfg);
|
||||
void onRequest();
|
||||
void sendNext();
|
||||
void onFail();
|
||||
void sendHeader();
|
||||
protected:
|
||||
};
|
||||
}
|
||||
|
||||
typedef Mist::OutProgressiveMP3 mistOut;
|
558
src/output/output_progressive_mp4.cpp
Normal file
558
src/output/output_progressive_mp4.cpp
Normal file
|
@ -0,0 +1,558 @@
|
|||
#include "output_progressive_mp4.h"
|
||||
#include <mist/defines.h>
|
||||
#include <mist/mp4.h>
|
||||
#include <mist/mp4_generic.h>
|
||||
|
||||
namespace Mist {
|
||||
OutProgressiveMP4::OutProgressiveMP4(Socket::Connection & conn) : Output(conn) { }
|
||||
|
||||
OutProgressiveMP4::~OutProgressiveMP4() {}
|
||||
|
||||
void OutProgressiveMP4::init(Util::Config * cfg){
|
||||
capa["name"] = "HTTP_Progressive_MP4";
|
||||
capa["desc"] = "Enables HTTP protocol progressive streaming.";
|
||||
capa["deps"] = "HTTP";
|
||||
capa["url_rel"] = "/$.mp4";
|
||||
capa["url_match"] = "/$.mp4";
|
||||
capa["socket"] = "http_progressive_mp4";
|
||||
capa["codecs"][0u][0u].append("H264");
|
||||
capa["codecs"][0u][1u].append("AAC");
|
||||
capa["methods"][0u]["handler"] = "http";
|
||||
capa["methods"][0u]["type"] = "html5/video/mp4";
|
||||
capa["methods"][0u]["priority"] = 8ll;
|
||||
capa["methods"][0u]["nolive"] = 1;
|
||||
|
||||
|
||||
cfg->addBasicConnectorOptions(capa);
|
||||
config = cfg;
|
||||
}
|
||||
|
||||
std::string OutProgressiveMP4::DTSCMeta2MP4Header(long long & size){
|
||||
std::stringstream header;
|
||||
//ftyp box
|
||||
MP4::FTYP ftypBox;
|
||||
header << std::string(ftypBox.asBox(),ftypBox.boxedSize());
|
||||
|
||||
uint64_t mdatSize = 0;
|
||||
//moov box
|
||||
MP4::MOOV moovBox;
|
||||
unsigned int moovOffset = 0;
|
||||
{
|
||||
//calculating longest duration
|
||||
long long int firstms = -1;
|
||||
long long int lastms = -1;
|
||||
for (std::set<long unsigned int>::iterator it = selectedTracks.begin(); it != selectedTracks.end(); it++) {
|
||||
if (lastms == -1 || lastms < myMeta.tracks[*it].lastms){
|
||||
lastms = myMeta.tracks[*it].lastms;
|
||||
}
|
||||
if (firstms == -1 || firstms > myMeta.tracks[*it].firstms){
|
||||
firstms = myMeta.tracks[*it].firstms;
|
||||
}
|
||||
}
|
||||
MP4::MVHD mvhdBox(lastms - firstms);
|
||||
moovBox.setContent(mvhdBox, moovOffset++);
|
||||
}
|
||||
for (std::set<long unsigned int>::iterator it = selectedTracks.begin(); it != selectedTracks.end(); it++) {
|
||||
MP4::TRAK trakBox;
|
||||
{
|
||||
{
|
||||
MP4::TKHD tkhdBox(*it, myMeta.tracks[*it].lastms - myMeta.tracks[*it].firstms, myMeta.tracks[*it].width, myMeta.tracks[*it].height);
|
||||
trakBox.setContent(tkhdBox, 0);
|
||||
}{
|
||||
MP4::MDIA mdiaBox;
|
||||
unsigned int mdiaOffset = 0;
|
||||
{
|
||||
MP4::MDHD mdhdBox(myMeta.tracks[*it].lastms - myMeta.tracks[*it].firstms);
|
||||
mdiaBox.setContent(mdhdBox, mdiaOffset++);
|
||||
}//MDHD box
|
||||
{
|
||||
MP4::HDLR hdlrBox(myMeta.tracks[*it].type, myMeta.tracks[*it].getIdentifier());
|
||||
mdiaBox.setContent(hdlrBox, mdiaOffset++);
|
||||
}//hdlr box
|
||||
{
|
||||
MP4::MINF minfBox;
|
||||
unsigned int minfOffset = 0;
|
||||
if (myMeta.tracks[*it].type== "video"){
|
||||
MP4::VMHD vmhdBox;
|
||||
vmhdBox.setFlags(1);
|
||||
minfBox.setContent(vmhdBox,minfOffset++);
|
||||
}else if (myMeta.tracks[*it].type == "audio"){
|
||||
MP4::SMHD smhdBox;
|
||||
minfBox.setContent(smhdBox,minfOffset++);
|
||||
}//type box
|
||||
{
|
||||
MP4::DINF dinfBox;
|
||||
MP4::DREF drefBox;
|
||||
dinfBox.setContent(drefBox,0);
|
||||
minfBox.setContent(dinfBox,minfOffset++);
|
||||
}//dinf box
|
||||
{
|
||||
MP4::STBL stblBox;
|
||||
unsigned int offset = 0;
|
||||
{
|
||||
MP4::STSD stsdBox;
|
||||
stsdBox.setVersion(0);
|
||||
if (myMeta.tracks[*it].type == "video"){//boxname = codec
|
||||
MP4::VisualSampleEntry vse;
|
||||
if (myMeta.tracks[*it].codec == "H264"){
|
||||
vse.setCodec("avc1");
|
||||
}
|
||||
vse.setDataReferenceIndex(1);
|
||||
vse.setWidth(myMeta.tracks[*it].width);
|
||||
vse.setHeight(myMeta.tracks[*it].height);
|
||||
MP4::AVCC avccBox;
|
||||
avccBox.setPayload(myMeta.tracks[*it].init);
|
||||
vse.setCLAP(avccBox);
|
||||
stsdBox.setEntry(vse,0);
|
||||
}else if(myMeta.tracks[*it].type == "audio"){//boxname = codec
|
||||
MP4::AudioSampleEntry ase;
|
||||
if (myMeta.tracks[*it].codec == "AAC"){
|
||||
ase.setCodec("mp4a");
|
||||
ase.setDataReferenceIndex(1);
|
||||
}
|
||||
ase.setSampleRate(myMeta.tracks[*it].rate);
|
||||
ase.setChannelCount(myMeta.tracks[*it].channels);
|
||||
ase.setSampleSize(myMeta.tracks[*it].size);
|
||||
//MP4::ESDS esdsBox(myMeta.tracks[*it].init, myMeta.tracks[*it].bps);
|
||||
MP4::ESDS esdsBox;
|
||||
|
||||
//outputting these values first, so malloc isn't called as often.
|
||||
esdsBox.setESHeaderStartCodes(myMeta.tracks[*it].init);
|
||||
esdsBox.setSLValue(2);
|
||||
|
||||
esdsBox.setESDescriptorTypeLength(32+myMeta.tracks[*it].init.size());
|
||||
esdsBox.setESID(2);
|
||||
esdsBox.setStreamPriority(0);
|
||||
esdsBox.setDecoderConfigDescriptorTypeLength(18 + myMeta.tracks[*it].init.size());
|
||||
esdsBox.setByteObjectTypeID(0x40);
|
||||
esdsBox.setStreamType(5);
|
||||
esdsBox.setReservedFlag(1);
|
||||
esdsBox.setBufferSize(1250000);
|
||||
esdsBox.setMaximumBitRate(10000000);
|
||||
esdsBox.setAverageBitRate(myMeta.tracks[*it].bps * 8);
|
||||
esdsBox.setConfigDescriptorTypeLength(5);
|
||||
esdsBox.setSLConfigDescriptorTypeTag(0x6);
|
||||
esdsBox.setSLConfigExtendedDescriptorTypeTag(0x808080);
|
||||
esdsBox.setSLDescriptorTypeLength(1);
|
||||
ase.setCodecBox(esdsBox);
|
||||
stsdBox.setEntry(ase,0);
|
||||
}
|
||||
stblBox.setContent(stsdBox,offset++);
|
||||
}//stsd box
|
||||
{
|
||||
MP4::STTS sttsBox;
|
||||
sttsBox.setVersion(0);
|
||||
if (myMeta.tracks[*it].parts.size()){
|
||||
for (unsigned int part = 0; part < myMeta.tracks[*it].parts.size(); part++){
|
||||
MP4::STTSEntry newEntry;
|
||||
newEntry.sampleCount = 1;
|
||||
newEntry.sampleDelta = myMeta.tracks[*it].parts[part].getDuration();
|
||||
sttsBox.setSTTSEntry(newEntry, part);
|
||||
}
|
||||
}
|
||||
stblBox.setContent(sttsBox,offset++);
|
||||
}//stts box
|
||||
if (myMeta.tracks[*it].type == "video"){
|
||||
//STSS Box here
|
||||
MP4::STSS stssBox;
|
||||
stssBox.setVersion(0);
|
||||
int tmpCount = 0;
|
||||
int tmpItCount = 0;
|
||||
for ( std::deque< DTSC::Key>::iterator tmpIt = myMeta.tracks[*it].keys.begin(); tmpIt != myMeta.tracks[*it].keys.end(); tmpIt ++) {
|
||||
stssBox.setSampleNumber(tmpCount,tmpItCount);
|
||||
tmpCount += tmpIt->getParts();
|
||||
tmpItCount ++;
|
||||
}
|
||||
stblBox.setContent(stssBox,offset++);
|
||||
}//stss box
|
||||
{
|
||||
MP4::STSC stscBox;
|
||||
stscBox.setVersion(0);
|
||||
MP4::STSCEntry stscEntry;
|
||||
stscEntry.firstChunk = 1;
|
||||
stscEntry.samplesPerChunk = 1;
|
||||
stscEntry.sampleDescriptionIndex = 1;
|
||||
stscBox.setSTSCEntry(stscEntry, 0);
|
||||
stblBox.setContent(stscBox,offset++);
|
||||
}//stsc box
|
||||
{
|
||||
uint32_t total = 0;
|
||||
MP4::STSZ stszBox;
|
||||
stszBox.setVersion(0);
|
||||
total = 0;
|
||||
for (std::deque< DTSC::Part>::iterator partIt = myMeta.tracks[*it].parts.begin(); partIt != myMeta.tracks[*it].parts.end(); partIt ++) {
|
||||
stszBox.setEntrySize(partIt->getSize(), total);//in bytes in file
|
||||
size += partIt->getSize();
|
||||
total++;
|
||||
}
|
||||
stblBox.setContent(stszBox,offset++);
|
||||
}//stsz box
|
||||
//add STCO boxes here
|
||||
{
|
||||
MP4::STCO stcoBox;
|
||||
stcoBox.setVersion(1);
|
||||
//Inserting empty values on purpose here, will be fixed later.
|
||||
if (myMeta.tracks[*it].parts.size() != 0){
|
||||
stcoBox.setChunkOffset(0, myMeta.tracks[*it].parts.size() - 1);//this inserts all empty entries at once
|
||||
}
|
||||
stblBox.setContent(stcoBox,offset++);
|
||||
}//stco box
|
||||
minfBox.setContent(stblBox,minfOffset++);
|
||||
}//stbl box
|
||||
mdiaBox.setContent(minfBox, mdiaOffset++);
|
||||
}//minf box
|
||||
trakBox.setContent(mdiaBox, 1);
|
||||
}
|
||||
}//trak Box
|
||||
moovBox.setContent(trakBox, moovOffset++);
|
||||
}
|
||||
//initial offset length ftyp, length moov + 8
|
||||
unsigned long long int byteOffset = ftypBox.boxedSize() + moovBox.boxedSize() + 8;
|
||||
//update all STCO from the following map;
|
||||
std::map <int, MP4::STCO> checkStcoBoxes;
|
||||
//for all tracks
|
||||
for (unsigned int i = 1; i < moovBox.getContentCount(); i++){
|
||||
//10 lines to get the STCO box.
|
||||
MP4::TRAK checkTrakBox;
|
||||
MP4::Box checkMdiaBox;
|
||||
MP4::Box checkTkhdBox;
|
||||
MP4::MINF checkMinfBox;
|
||||
MP4::STBL checkStblBox;
|
||||
//MP4::STCO checkStcoBox;
|
||||
checkTrakBox = ((MP4::TRAK&)moovBox.getContent(i));
|
||||
for (unsigned int j = 0; j < checkTrakBox.getContentCount(); j++){
|
||||
if (checkTrakBox.getContent(j).isType("mdia")){
|
||||
checkMdiaBox = checkTrakBox.getContent(j);
|
||||
break;
|
||||
}
|
||||
if (checkTrakBox.getContent(j).isType("tkhd")){
|
||||
checkTkhdBox = checkTrakBox.getContent(j);
|
||||
}
|
||||
}
|
||||
for (unsigned int j = 0; j < ((MP4::MDIA&)checkMdiaBox).getContentCount(); j++){
|
||||
if (((MP4::MDIA&)checkMdiaBox).getContent(j).isType("minf")){
|
||||
checkMinfBox = ((MP4::MINF&)((MP4::MDIA&)checkMdiaBox).getContent(j));
|
||||
break;
|
||||
}
|
||||
}
|
||||
for (unsigned int j = 0; j < checkMinfBox.getContentCount(); j++){
|
||||
if (checkMinfBox.getContent(j).isType("stbl")){
|
||||
checkStblBox = ((MP4::STBL&)checkMinfBox.getContent(j));
|
||||
break;
|
||||
}
|
||||
}
|
||||
for (unsigned int j = 0; j < checkStblBox.getContentCount(); j++){
|
||||
if (checkStblBox.getContent(j).isType("stco")){
|
||||
checkStcoBoxes.insert( std::pair<int, MP4::STCO>(((MP4::TKHD&)checkTkhdBox).getTrackID(), ((MP4::STCO&)checkStblBox.getContent(j)) ));
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
//inserting right values in the STCO box header
|
||||
//total = 0;
|
||||
long long unsigned int totalByteOffset = 0;
|
||||
//Current values are actual byte offset without header-sized offset
|
||||
std::set <keyPart> sortSet;//filling sortset for interleaving parts
|
||||
for (std::set<long unsigned int>::iterator subIt = selectedTracks.begin(); subIt != selectedTracks.end(); subIt++) {
|
||||
keyPart temp;
|
||||
temp.trackID = *subIt;
|
||||
temp.time = myMeta.tracks[*subIt].firstms;//timeplace of frame
|
||||
temp.endTime = myMeta.tracks[*subIt].firstms + myMeta.tracks[*subIt].parts[0].getDuration();
|
||||
temp.size = myMeta.tracks[*subIt].parts[0].getSize();//bytesize of frame (alle parts all together)
|
||||
temp.index = 0;
|
||||
sortSet.insert(temp);
|
||||
}
|
||||
while (!sortSet.empty()){
|
||||
//setting the right STCO size in the STCO box
|
||||
checkStcoBoxes[sortSet.begin()->trackID].setChunkOffset(totalByteOffset + byteOffset, sortSet.begin()->index);
|
||||
totalByteOffset += sortSet.begin()->size;
|
||||
//add keyPart to sortSet
|
||||
keyPart temp;
|
||||
temp.index = sortSet.begin()->index + 1;
|
||||
temp.trackID = sortSet.begin()->trackID;
|
||||
if(temp.index < myMeta.tracks[temp.trackID].parts.size() ){//only insert when there are parts left
|
||||
temp.time = sortSet.begin()->endTime;//timeplace of frame
|
||||
temp.endTime = sortSet.begin()->endTime + myMeta.tracks[temp.trackID].parts[temp.index].getDuration();
|
||||
temp.size = myMeta.tracks[temp.trackID].parts[temp.index].getSize();//bytesize of frame
|
||||
sortSet.insert(temp);
|
||||
}
|
||||
//remove highest keyPart
|
||||
sortSet.erase(sortSet.begin());
|
||||
}
|
||||
|
||||
mdatSize = totalByteOffset+8;
|
||||
|
||||
header << std::string(moovBox.asBox(),moovBox.boxedSize());
|
||||
|
||||
header << (char)((mdatSize>>24) & 0xFF) << (char)((mdatSize>>16) & 0xFF) << (char)((mdatSize>>8) & 0xFF) << (char)(mdatSize & 0xFF) << "mdat";
|
||||
//end of header
|
||||
|
||||
size += header.str().size();
|
||||
return header.str();
|
||||
}
|
||||
|
||||
/// Calculate a seekPoint, based on byteStart, metadata, tracks and headerSize.
|
||||
/// The seekPoint will be set to the timestamp of the first packet to send.
|
||||
void OutProgressiveMP4::findSeekPoint(long long byteStart, long long & seekPoint, unsigned int headerSize){
|
||||
seekPoint = 0;
|
||||
//if we're starting in the header, seekPoint is always zero.
|
||||
if (byteStart <= headerSize){return;}
|
||||
//okay, we're past the header. Substract the headersize from the starting postion.
|
||||
byteStart -= headerSize;
|
||||
//initialize a list of sorted parts that this file contains
|
||||
std::set <keyPart> sortSet;
|
||||
for (std::set<long unsigned int>::iterator subIt = selectedTracks.begin(); subIt != selectedTracks.end(); subIt++) {
|
||||
keyPart temp;
|
||||
temp.trackID = *subIt;
|
||||
temp.time = myMeta.tracks[*subIt].firstms;//timeplace of frame
|
||||
temp.endTime = myMeta.tracks[*subIt].firstms + myMeta.tracks[*subIt].parts[0].getDuration();
|
||||
temp.size = myMeta.tracks[*subIt].parts[0].getSize();//bytesize of frame (alle parts all together)
|
||||
temp.index = 0;
|
||||
sortSet.insert(temp);
|
||||
}
|
||||
//forward through the file by headers, until we reach the point where we need to be
|
||||
while (!sortSet.empty()){
|
||||
//substract the size of this fragment from byteStart
|
||||
byteStart -= sortSet.begin()->size;
|
||||
//if that put us past the point where we wanted to be, return right now
|
||||
if (byteStart < 0){return;}
|
||||
//otherwise, set seekPoint to where we are now
|
||||
seekPoint = sortSet.begin()->time;
|
||||
//then find the next part
|
||||
keyPart temp;
|
||||
temp.index = sortSet.begin()->index + 1;
|
||||
temp.trackID = sortSet.begin()->trackID;
|
||||
if(temp.index < myMeta.tracks[temp.trackID].parts.size() ){//only insert when there are parts left
|
||||
temp.time = sortSet.begin()->endTime;//timeplace of frame
|
||||
temp.endTime = sortSet.begin()->endTime + myMeta.tracks[temp.trackID].parts[temp.index].getDuration();
|
||||
temp.size = myMeta.tracks[temp.trackID].parts[temp.index].getSize();//bytesize of frame
|
||||
sortSet.insert(temp);
|
||||
}
|
||||
//remove highest keyPart
|
||||
sortSet.erase(sortSet.begin());
|
||||
}
|
||||
//If we're here, we're in the last fragment.
|
||||
//That's technically legal, of course.
|
||||
}
|
||||
|
||||
/// Parses a "Range: " header, setting byteStart, byteEnd and seekPoint using data from metadata and tracks to do
|
||||
/// the calculations.
|
||||
/// On error, byteEnd is set to zero.
|
||||
void OutProgressiveMP4::parseRange(std::string header, long long & byteStart, long long & byteEnd, long long & seekPoint, unsigned int headerSize){
|
||||
if (header.size() < 6 || header.substr(0, 6) != "bytes="){
|
||||
byteEnd = 0;
|
||||
DEBUG_MSG(DLVL_WARN, "Invalid range header: %s", header.c_str());
|
||||
return;
|
||||
}
|
||||
header.erase(0, 6);
|
||||
if (header.size() && header[0] == '-'){
|
||||
//negative range = count from end
|
||||
byteStart = 0;
|
||||
for (unsigned int i = 1; i < header.size(); ++i){
|
||||
if (header[i] >= '0' && header[i] <= '9'){
|
||||
byteStart *= 10;
|
||||
byteStart += header[i] - '0';
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
if (byteStart > byteEnd){
|
||||
//entire file if starting before byte zero
|
||||
byteStart = 0;
|
||||
DEBUG_MSG(DLVL_DEVEL, "Full negative range: %lli-%lli", byteStart, byteEnd);
|
||||
findSeekPoint(byteStart, seekPoint, headerSize);
|
||||
return;
|
||||
}else{
|
||||
//start byteStart bytes before byteEnd
|
||||
byteStart = byteEnd - byteStart;
|
||||
DEBUG_MSG(DLVL_DEVEL, "Partial negative range: %lli-%lli", byteStart, byteEnd);
|
||||
findSeekPoint(byteStart, seekPoint, headerSize);
|
||||
return;
|
||||
}
|
||||
}else{
|
||||
long long size = byteEnd;
|
||||
byteEnd = 0;
|
||||
byteStart = 0;
|
||||
unsigned int i = 0;
|
||||
for ( ; i < header.size(); ++i){
|
||||
if (header[i] >= '0' && header[i] <= '9'){
|
||||
byteStart *= 10;
|
||||
byteStart += header[i] - '0';
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
if (header[i] != '-'){
|
||||
DEBUG_MSG(DLVL_WARN, "Invalid range header: %s", header.c_str());
|
||||
byteEnd = 0;
|
||||
return;
|
||||
}
|
||||
++i;
|
||||
if (i < header.size()){
|
||||
for ( ; i < header.size(); ++i){
|
||||
if (header[i] >= '0' && header[i] <= '9'){
|
||||
byteEnd *= 10;
|
||||
byteEnd += header[i] - '0';
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
if (byteEnd > size-1){byteEnd = size;}
|
||||
}else{
|
||||
byteEnd = size;
|
||||
}
|
||||
DEBUG_MSG(DLVL_DEVEL, "Range request: %lli-%lli (%s)", byteStart, byteEnd, header.c_str());
|
||||
findSeekPoint(byteStart, seekPoint, headerSize);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
void OutProgressiveMP4::onRequest(){
|
||||
while (HTTP_R.Read(myConn)){
|
||||
DEBUG_MSG(DLVL_DEVEL, "Received request: %s", HTTP_R.getUrl().c_str());
|
||||
myConn.setHost(HTTP_R.GetHeader("X-Origin"));
|
||||
streamName = HTTP_R.GetHeader("X-Stream");
|
||||
if (HTTP_R.GetVar("audio") != ""){
|
||||
DEBUG_MSG(DLVL_DEVEL, "GetVar Aud = %s", HTTP_R.GetVar("audio").c_str());
|
||||
selectedTracks.insert(JSON::Value(HTTP_R.GetVar("audio")).asInt());
|
||||
}else{
|
||||
DEBUG_MSG(DLVL_DEVEL, "No audio param given");
|
||||
}
|
||||
if (HTTP_R.GetVar("video") != ""){
|
||||
DEBUG_MSG(DLVL_DEVEL, "GetVar Vid = %s", HTTP_R.GetVar("video").c_str());
|
||||
selectedTracks.insert(JSON::Value(HTTP_R.GetVar("video")).asInt());
|
||||
}else{
|
||||
DEBUG_MSG(DLVL_DEVEL, "No video param given");
|
||||
}
|
||||
|
||||
parseData = true;
|
||||
wantRequest = false;
|
||||
}
|
||||
}
|
||||
|
||||
bool OutProgressiveMP4::onFinish(){
|
||||
HTTP_R.Clean();
|
||||
parseData = false;
|
||||
wantRequest = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
void OutProgressiveMP4::onFail(){
|
||||
HTTP_S.Clean(); //make sure no parts of old requests are left in any buffers
|
||||
HTTP_S.SetBody("Stream not found. Sorry, we tried.");
|
||||
HTTP_S.SendResponse("404", "Stream not found", myConn);
|
||||
Output::onFail();
|
||||
}
|
||||
|
||||
void OutProgressiveMP4::sendNext(){
|
||||
char * dataPointer = 0;
|
||||
int len = 0;
|
||||
currentPacket.getString("data", dataPointer, len);
|
||||
|
||||
//keep track of where we are - fast-forward until where we are now
|
||||
while (!sortSet.empty() && ((long long)sortSet.begin()->trackID != currentPacket.getTrackId() || (long long)sortSet.begin()->time != currentPacket.getTime())){
|
||||
keyPart temp;
|
||||
temp.index = sortSet.begin()->index + 1;
|
||||
temp.trackID = sortSet.begin()->trackID;
|
||||
if(temp.index < myMeta.tracks[temp.trackID].parts.size() ){//only insert when there are parts left
|
||||
temp.time = sortSet.begin()->endTime;//timeplace of frame
|
||||
temp.endTime = sortSet.begin()->endTime + myMeta.tracks[temp.trackID].parts[temp.index].getDuration();
|
||||
temp.size = myMeta.tracks[temp.trackID].parts[temp.index].getSize();//bytesize of frame
|
||||
sortSet.insert(temp);
|
||||
}
|
||||
currPos += sortSet.begin()->size;
|
||||
//remove highest keyPart
|
||||
sortSet.erase(sortSet.begin());
|
||||
}
|
||||
if (currPos >= byteStart){
|
||||
sortSet.clear();//we don't need you anymore!
|
||||
myConn.SendNow(dataPointer, std::min(leftOver, (long long)len));
|
||||
//HTTP_S.Chunkify(Strm.lastData().data(), Strm.lastData().size(), conn);
|
||||
leftOver -= len;
|
||||
}else{
|
||||
if (currPos + (long long)len > byteStart){
|
||||
myConn.SendNow(dataPointer+(byteStart-currPos), len-(byteStart-currPos));
|
||||
leftOver -= len-(byteStart-currPos);
|
||||
currPos = byteStart;
|
||||
sortSet.clear();//we don't need you anymore!
|
||||
}
|
||||
}
|
||||
if (leftOver < 1){
|
||||
//stop playback, wait for new request
|
||||
stop();
|
||||
wantRequest = true;
|
||||
}
|
||||
}
|
||||
|
||||
void OutProgressiveMP4::sendHeader(){
|
||||
fileSize = 0;
|
||||
std::string headerData = DTSCMeta2MP4Header(fileSize);
|
||||
byteStart = 0;
|
||||
byteEnd = fileSize - 1;
|
||||
long long seekPoint = 0;
|
||||
char rangeType = ' ';
|
||||
if (HTTP_R.GetHeader("Range") != ""){
|
||||
parseRange(HTTP_R.GetHeader("Range"), byteStart, byteEnd, seekPoint, headerData.size());
|
||||
rangeType = HTTP_R.GetHeader("Range")[0];
|
||||
}
|
||||
sortSet.clear();
|
||||
for (std::set<long unsigned int>::iterator subIt = selectedTracks.begin(); subIt != selectedTracks.end(); subIt++) {
|
||||
keyPart temp;
|
||||
temp.trackID = *subIt;
|
||||
temp.time = myMeta.tracks[*subIt].firstms;//timeplace of frame
|
||||
temp.endTime = myMeta.tracks[*subIt].firstms + myMeta.tracks[*subIt].parts[0].getDuration();
|
||||
temp.size = myMeta.tracks[*subIt].parts[0].getSize();//bytesize of frame (alle parts all together)
|
||||
temp.index = 0;
|
||||
sortSet.insert(temp);
|
||||
}
|
||||
HTTP_S.Clean(); //make sure no parts of old requests are left in any buffers
|
||||
HTTP_S.SetHeader("Content-Type", "video/MP4"); //Send the correct content-type for MP4 files
|
||||
HTTP_S.SetHeader("Accept-Ranges", "bytes, parsec");
|
||||
if (rangeType != ' '){
|
||||
DEBUG_MSG(DLVL_DEVEL, "Ranged request");
|
||||
if (!byteEnd){
|
||||
if (rangeType == 'p'){
|
||||
HTTP_S.SetBody("Starsystem not in communications range");
|
||||
HTTP_S.SendResponse("416", "Starsystem not in communications range", myConn);
|
||||
return;
|
||||
}else{
|
||||
HTTP_S.SetBody("Requested Range Not Satisfiable");
|
||||
HTTP_S.SendResponse("416", "Requested Range Not Satisfiable", myConn);
|
||||
return;
|
||||
}
|
||||
}else{
|
||||
std::stringstream rangeReply;
|
||||
rangeReply << "bytes " << byteStart << "-" << byteEnd << "/" << fileSize;
|
||||
HTTP_S.SetHeader("Content-Length", byteEnd - byteStart + 1);
|
||||
//do not multiplex requests that are > 1MiB
|
||||
if (byteEnd - byteStart + 1 > 1024*1024){
|
||||
HTTP_S.SetHeader("MistMultiplex", "No");
|
||||
}
|
||||
HTTP_S.SetHeader("Content-Range", rangeReply.str());
|
||||
/// \todo Switch to chunked?
|
||||
HTTP_S.SendResponse("206", "Partial content", myConn);
|
||||
//HTTP_S.StartResponse("206", "Partial content", HTTP_R, conn);
|
||||
}
|
||||
}else{
|
||||
DEBUG_MSG(DLVL_DEVEL, "Non-Ranged request");
|
||||
HTTP_S.SetHeader("Content-Length", byteEnd - byteStart + 1);
|
||||
//do not multiplex requests that aren't ranged
|
||||
HTTP_S.SetHeader("MistMultiplex", "No");
|
||||
/// \todo Switch to chunked?
|
||||
HTTP_S.SendResponse("200", "OK", myConn);
|
||||
//HTTP_S.StartResponse(HTTP_R, conn);
|
||||
}
|
||||
leftOver = byteEnd - byteStart + 1;//add one byte, because range "0-0" = 1 byte of data
|
||||
currPos = 0;
|
||||
if (byteStart < (long long)headerData.size()){
|
||||
/// \todo Switch to chunked?
|
||||
//HTTP_S.Chunkify(headerData.data()+byteStart, std::min((long long)headerData.size(), byteEnd) - byteStart, conn);//send MP4 header
|
||||
myConn.SendNow(headerData.data()+byteStart, std::min((long long)headerData.size(), byteEnd) - byteStart);//send MP4 header
|
||||
leftOver -= std::min((long long)headerData.size(), byteEnd) - byteStart;
|
||||
}
|
||||
currPos = headerData.size();//we're now guaranteed to be past the header point, no matter what
|
||||
seek(seekPoint);
|
||||
sentHeader = true;
|
||||
}
|
||||
|
||||
}
|
50
src/output/output_progressive_mp4.h
Normal file
50
src/output/output_progressive_mp4.h
Normal file
|
@ -0,0 +1,50 @@
|
|||
#include "output.h"
|
||||
#include <mist/http_parser.h>
|
||||
|
||||
namespace Mist {
|
||||
struct keyPart{
|
||||
public:
|
||||
bool operator < (const keyPart& rhs) const {
|
||||
if (time < rhs.time){
|
||||
return true;
|
||||
}
|
||||
if (time == rhs.time){
|
||||
if (trackID < rhs.trackID){
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
long unsigned int trackID;
|
||||
long unsigned int size;
|
||||
long long unsigned int time;
|
||||
long long unsigned int endTime;
|
||||
long unsigned int index;
|
||||
};
|
||||
|
||||
class OutProgressiveMP4 : public Output {
|
||||
public:
|
||||
OutProgressiveMP4(Socket::Connection & conn);
|
||||
~OutProgressiveMP4();
|
||||
static void init(Util::Config * cfg);
|
||||
void parseRange(std::string header, long long & byteStart, long long & byteEnd, long long & seekPoint, unsigned int headerSize);
|
||||
std::string DTSCMeta2MP4Header(long long & size);
|
||||
void findSeekPoint(long long byteStart, long long & seekPoint, unsigned int headerSize);
|
||||
|
||||
void onRequest();
|
||||
void sendNext();
|
||||
bool onFinish();
|
||||
void sendHeader();
|
||||
void onFail();
|
||||
protected:
|
||||
long long fileSize;
|
||||
long long byteStart;
|
||||
long long byteEnd;
|
||||
long long leftOver;
|
||||
long long currPos;
|
||||
std::set <keyPart> sortSet;//filling sortset for interleaving parts
|
||||
HTTP::Parser HTTP_R, HTTP_S;
|
||||
};
|
||||
}
|
||||
|
||||
typedef Mist::OutProgressiveMP4 mistOut;
|
69
src/output/output_raw.cpp
Normal file
69
src/output/output_raw.cpp
Normal file
|
@ -0,0 +1,69 @@
|
|||
#include "output_raw.h"
|
||||
|
||||
namespace Mist {
|
||||
OutRaw::OutRaw(Socket::Connection & conn) : Output(conn) {
|
||||
streamName = config->getString("streamname");
|
||||
initialize();
|
||||
selectedTracks.clear();
|
||||
std::string tracks = config->getString("tracks");
|
||||
unsigned int currTrack = 0;
|
||||
//loop over tracks, add any found track IDs to selectedTracks
|
||||
if (tracks != ""){
|
||||
for (unsigned int i = 0; i < tracks.size(); ++i){
|
||||
if (tracks[i] >= '0' && tracks[i] <= '9'){
|
||||
currTrack = currTrack*10 + (tracks[i] - '0');
|
||||
}else{
|
||||
if (currTrack > 0){
|
||||
selectedTracks.insert(currTrack);
|
||||
}
|
||||
currTrack = 0;
|
||||
}
|
||||
}
|
||||
if (currTrack > 0){
|
||||
selectedTracks.insert(currTrack);
|
||||
}
|
||||
}
|
||||
parseData = true;
|
||||
seek(config->getInteger("seek"));
|
||||
}
|
||||
|
||||
OutRaw::~OutRaw() {}
|
||||
|
||||
void OutRaw::init(Util::Config * cfg){
|
||||
capa["name"] = "RAW";
|
||||
capa["desc"] = "Enables raw DTSC over TCP.";
|
||||
capa["deps"] = "";
|
||||
capa["required"]["streamname"]["name"] = "Stream";
|
||||
capa["required"]["streamname"]["help"] = "What streamname to serve. For multiple streams, add this protocol multiple times using different ports.";
|
||||
capa["required"]["streamname"]["type"] = "str";
|
||||
capa["required"]["streamname"]["option"] = "--stream";
|
||||
capa["optional"]["tracks"]["name"] = "Tracks";
|
||||
capa["optional"]["tracks"]["help"] = "The track IDs of the stream that this connector will transmit separated by spaces";
|
||||
capa["optional"]["tracks"]["type"] = "str";
|
||||
capa["optional"]["tracks"]["option"] = "--tracks";
|
||||
capa["optional"]["seek"]["name"] = "Seek point";
|
||||
capa["optional"]["seek"]["help"] = "The time in milliseconds to seek to, 0 by default.";
|
||||
capa["optional"]["seek"]["type"] = "int";
|
||||
capa["optional"]["seek"]["option"] = "--seek";
|
||||
capa["codecs"][0u][0u].append("H264");
|
||||
capa["codecs"][0u][1u].append("AAC");
|
||||
cfg->addOption("streamname",
|
||||
JSON::fromString("{\"arg\":\"string\",\"short\":\"s\",\"long\":\"stream\",\"help\":\"The name of the stream that this connector will transmit.\"}"));
|
||||
cfg->addOption("tracks",
|
||||
JSON::fromString("{\"arg\":\"string\",\"value\":[\"\"],\"short\": \"t\",\"long\":\"tracks\",\"help\":\"The track IDs of the stream that this connector will transmit separated by spaces.\"}"));
|
||||
cfg->addOption("seek",
|
||||
JSON::fromString("{\"arg\":\"integer\",\"value\":[0],\"short\": \"S\",\"long\":\"seek\",\"help\":\"The time in milliseconds to seek to, 0 by default.\"}"));
|
||||
cfg->addConnectorOptions(666, capa);
|
||||
config = cfg;
|
||||
}
|
||||
|
||||
void OutRaw::sendNext(){
|
||||
myConn.SendNow(currentPacket.getData(), currentPacket.getDataLen());
|
||||
}
|
||||
|
||||
void OutRaw::sendHeader(){
|
||||
myMeta.send(myConn);
|
||||
sentHeader = true;
|
||||
}
|
||||
|
||||
}
|
15
src/output/output_raw.h
Normal file
15
src/output/output_raw.h
Normal file
|
@ -0,0 +1,15 @@
|
|||
#include "output.h"
|
||||
|
||||
|
||||
namespace Mist {
|
||||
class OutRaw : public Output {
|
||||
public:
|
||||
OutRaw(Socket::Connection & conn);
|
||||
~OutRaw();
|
||||
static void init(Util::Config * cfg);
|
||||
void sendNext();
|
||||
void sendHeader();
|
||||
};
|
||||
}
|
||||
|
||||
typedef Mist::OutRaw mistOut;
|
754
src/output/output_rtmp.cpp
Normal file
754
src/output/output_rtmp.cpp
Normal file
|
@ -0,0 +1,754 @@
|
|||
#include "output_rtmp.h"
|
||||
#include <mist/http_parser.h>
|
||||
#include <mist/defines.h>
|
||||
#include <mist/stream.h>
|
||||
#include <cstring>
|
||||
#include <cstdlib>
|
||||
|
||||
namespace Mist {
|
||||
OutRTMP::OutRTMP(Socket::Connection & conn) : Output(conn) {
|
||||
playTransaction = -1;
|
||||
playMessageType = -1;
|
||||
playStreamId = -1;
|
||||
setBlocking(false);
|
||||
while (!conn.Received().available(1537) && conn.connected()) {
|
||||
conn.spool();
|
||||
Util::sleep(5);
|
||||
}
|
||||
RTMPStream::handshake_in = conn.Received().remove(1537);
|
||||
RTMPStream::rec_cnt += 1537;
|
||||
|
||||
if (RTMPStream::doHandshake()) {
|
||||
conn.SendNow(RTMPStream::handshake_out);
|
||||
while (!conn.Received().available(1536) && conn.connected()) {
|
||||
conn.spool();
|
||||
Util::sleep(5);
|
||||
}
|
||||
conn.Received().remove(1536);
|
||||
RTMPStream::rec_cnt += 1536;
|
||||
DEBUG_MSG(DLVL_HIGH, "Handshake success!");
|
||||
} else {
|
||||
DEBUG_MSG(DLVL_DEVEL, "Handshake fail!");
|
||||
}
|
||||
counter = 0;
|
||||
sending = false;
|
||||
streamReset = false;
|
||||
}
|
||||
|
||||
OutRTMP::~OutRTMP() {}
|
||||
|
||||
void OutRTMP::init(Util::Config * cfg) {
|
||||
capa["name"] = "RTMP";
|
||||
capa["desc"] = "Enables the RTMP protocol which is used by Adobe Flash Player.";
|
||||
capa["deps"] = "";
|
||||
capa["url_rel"] = "/play/$";
|
||||
capa["codecs"][0u][0u].append("H264");
|
||||
capa["codecs"][0u][0u].append("H263");
|
||||
capa["codecs"][0u][0u].append("VP6");
|
||||
capa["codecs"][0u][1u].append("AAC");
|
||||
capa["codecs"][0u][1u].append("MP3");
|
||||
capa["methods"][0u]["handler"] = "rtmp";
|
||||
capa["methods"][0u]["type"] = "flash/10";
|
||||
capa["methods"][0u]["priority"] = 6ll;
|
||||
cfg->addConnectorOptions(1935, capa);
|
||||
config = cfg;
|
||||
}
|
||||
|
||||
void OutRTMP::sendNext() {
|
||||
//sent a tag
|
||||
FLV::Tag tag;
|
||||
if (tag.DTSCLoader(currentPacket, myMeta.tracks[currentPacket.getTrackId()])) {
|
||||
if (tag.len) {
|
||||
myConn.SendNow(RTMPStream::SendMedia(tag));
|
||||
#if DEBUG >= 8
|
||||
fprintf(stderr, "Sent tag to %i: [%u] %s\n", myConn.getSocket(), tag.tagTime(), tag.tagType().c_str());
|
||||
#endif
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void OutRTMP::sendHeader() {
|
||||
FLV::Tag tag;
|
||||
tag.DTSCMetaInit(myMeta, selectedTracks);
|
||||
if (tag.len) {
|
||||
myConn.SendNow(RTMPStream::SendMedia(tag));
|
||||
}
|
||||
|
||||
for (std::set<long unsigned int>::iterator it = selectedTracks.begin(); it != selectedTracks.end(); it++) {
|
||||
if (myMeta.tracks[*it].type == "video") {
|
||||
tag.DTSCVideoInit(myMeta.tracks[*it]);
|
||||
if (tag.len) {
|
||||
myConn.SendNow(RTMPStream::SendMedia(tag));
|
||||
}
|
||||
}
|
||||
if (myMeta.tracks[*it].type == "audio") {
|
||||
tag.DTSCAudioInit(myMeta.tracks[*it]);
|
||||
if (tag.len) {
|
||||
myConn.SendNow(RTMPStream::SendMedia(tag));
|
||||
}
|
||||
}
|
||||
}
|
||||
sentHeader = true;
|
||||
}
|
||||
|
||||
void OutRTMP::onRequest() {
|
||||
parseChunk(myConn.Received());
|
||||
}
|
||||
|
||||
///\brief Sends a RTMP command either in AMF or AMF3 mode.
|
||||
///\param amfReply The data to be sent over RTMP.
|
||||
///\param messageType The type of message.
|
||||
///\param streamId The ID of the AMF stream.
|
||||
void OutRTMP::sendCommand(AMF::Object & amfReply, int messageType, int streamId) {
|
||||
#if DEBUG >= 8
|
||||
std::cerr << amfReply.Print() << std::endl;
|
||||
#endif
|
||||
if (messageType == 17) {
|
||||
myConn.SendNow(RTMPStream::SendChunk(3, messageType, streamId, (char)0 + amfReply.Pack()));
|
||||
} else {
|
||||
myConn.SendNow(RTMPStream::SendChunk(3, messageType, streamId, amfReply.Pack()));
|
||||
}
|
||||
} //sendCommand
|
||||
|
||||
///\brief Parses a single AMF command message, and sends a direct response through sendCommand().
|
||||
///\param amfData The received request.
|
||||
///\param messageType The type of message.
|
||||
///\param streamId The ID of the AMF stream.
|
||||
void OutRTMP::parseAMFCommand(AMF::Object & amfData, int messageType, int streamId) {
|
||||
#if DEBUG >= 5
|
||||
fprintf(stderr, "Received command: %s\n", amfData.Print().c_str());
|
||||
#endif
|
||||
#if DEBUG >= 8
|
||||
fprintf(stderr, "AMF0 command: %s\n", amfData.getContentP(0)->StrValue().c_str());
|
||||
#endif
|
||||
if (amfData.getContentP(0)->StrValue() == "connect") {
|
||||
double objencoding = 0;
|
||||
if (amfData.getContentP(2)->getContentP("objectEncoding")) {
|
||||
objencoding = amfData.getContentP(2)->getContentP("objectEncoding")->NumValue();
|
||||
}
|
||||
#if DEBUG >= 6
|
||||
int tmpint;
|
||||
if (amfData.getContentP(2)->getContentP("videoCodecs")) {
|
||||
tmpint = (int)amfData.getContentP(2)->getContentP("videoCodecs")->NumValue();
|
||||
if (tmpint & 0x04) {
|
||||
fprintf(stderr, "Sorensen video support detected\n");
|
||||
}
|
||||
if (tmpint & 0x80) {
|
||||
fprintf(stderr, "H264 video support detected\n");
|
||||
}
|
||||
}
|
||||
if (amfData.getContentP(2)->getContentP("audioCodecs")) {
|
||||
tmpint = (int)amfData.getContentP(2)->getContentP("audioCodecs")->NumValue();
|
||||
if (tmpint & 0x04) {
|
||||
fprintf(stderr, "MP3 audio support detected\n");
|
||||
}
|
||||
if (tmpint & 0x400) {
|
||||
fprintf(stderr, "AAC audio support detected\n");
|
||||
}
|
||||
}
|
||||
#endif
|
||||
app_name = amfData.getContentP(2)->getContentP("tcUrl")->StrValue();
|
||||
app_name = app_name.substr(app_name.find('/', 7) + 1);
|
||||
RTMPStream::chunk_snd_max = 4096;
|
||||
myConn.Send(RTMPStream::SendCTL(1, RTMPStream::chunk_snd_max)); //send chunk size max (msg 1)
|
||||
myConn.Send(RTMPStream::SendCTL(5, RTMPStream::snd_window_size)); //send window acknowledgement size (msg 5)
|
||||
myConn.Send(RTMPStream::SendCTL(6, RTMPStream::rec_window_size)); //send rec window acknowledgement size (msg 6)
|
||||
myConn.Send(RTMPStream::SendUSR(0, 1)); //send UCM StreamBegin (0), stream 1
|
||||
//send a _result reply
|
||||
AMF::Object amfReply("container", AMF::AMF0_DDV_CONTAINER);
|
||||
amfReply.addContent(AMF::Object("", "_result")); //result success
|
||||
amfReply.addContent(amfData.getContent(1)); //same transaction ID
|
||||
amfReply.addContent(AMF::Object("")); //server properties
|
||||
amfReply.getContentP(2)->addContent(AMF::Object("fmsVer", "FMS/3,5,5,2004"));
|
||||
amfReply.getContentP(2)->addContent(AMF::Object("capabilities", (double)31));
|
||||
amfReply.getContentP(2)->addContent(AMF::Object("mode", (double)1));
|
||||
amfReply.addContent(AMF::Object("")); //info
|
||||
amfReply.getContentP(3)->addContent(AMF::Object("level", "status"));
|
||||
amfReply.getContentP(3)->addContent(AMF::Object("code", "NetConnection.Connect.Success"));
|
||||
amfReply.getContentP(3)->addContent(AMF::Object("description", "Connection succeeded."));
|
||||
amfReply.getContentP(3)->addContent(AMF::Object("clientid", 1337));
|
||||
amfReply.getContentP(3)->addContent(AMF::Object("objectEncoding", objencoding));
|
||||
//amfReply.getContentP(3)->addContent(AMF::Object("data", AMF::AMF0_ECMA_ARRAY));
|
||||
//amfReply.getContentP(3)->getContentP(4)->addContent(AMF::Object("version", "3,5,4,1004"));
|
||||
sendCommand(amfReply, messageType, streamId);
|
||||
//send onBWDone packet - no clue what it is, but real server sends it...
|
||||
//amfReply = AMF::Object("container", AMF::AMF0_DDV_CONTAINER);
|
||||
//amfReply.addContent(AMF::Object("", "onBWDone"));//result
|
||||
//amfReply.addContent(amfData.getContent(1));//same transaction ID
|
||||
//amfReply.addContent(AMF::Object("", (double)0, AMF::AMF0_NULL));//null
|
||||
//sendCommand(amfReply, messageType, streamId);
|
||||
return;
|
||||
} //connect
|
||||
if (amfData.getContentP(0)->StrValue() == "createStream") {
|
||||
//send a _result reply
|
||||
AMF::Object amfReply("container", AMF::AMF0_DDV_CONTAINER);
|
||||
amfReply.addContent(AMF::Object("", "_result")); //result success
|
||||
amfReply.addContent(amfData.getContent(1)); //same transaction ID
|
||||
amfReply.addContent(AMF::Object("", (double)0, AMF::AMF0_NULL)); //null - command info
|
||||
amfReply.addContent(AMF::Object("", (double)1)); //stream ID - we use 1
|
||||
sendCommand(amfReply, messageType, streamId);
|
||||
myConn.Send(RTMPStream::SendUSR(0, 1)); //send UCM StreamBegin (0), stream 1
|
||||
return;
|
||||
} //createStream
|
||||
if ((amfData.getContentP(0)->StrValue() == "closeStream") || (amfData.getContentP(0)->StrValue() == "deleteStream")) {
|
||||
stop();
|
||||
return;
|
||||
}
|
||||
if ((amfData.getContentP(0)->StrValue() == "FCUnpublish") || (amfData.getContentP(0)->StrValue() == "releaseStream")) {
|
||||
// ignored
|
||||
return;
|
||||
}
|
||||
if ((amfData.getContentP(0)->StrValue() == "FCPublish")) {
|
||||
//send a FCPublic reply
|
||||
AMF::Object amfReply("container", AMF::AMF0_DDV_CONTAINER);
|
||||
amfReply.addContent(AMF::Object("", "onFCPublish")); //status reply
|
||||
amfReply.addContent(AMF::Object("", 0, AMF::AMF0_NUMBER)); //same transaction ID
|
||||
amfReply.addContent(AMF::Object("", (double)0, AMF::AMF0_NULL)); //null - command info
|
||||
amfReply.addContent(AMF::Object("")); //info
|
||||
amfReply.getContentP(3)->addContent(AMF::Object("code", "NetStream.Publish.Start"));
|
||||
amfReply.getContentP(3)->addContent(AMF::Object("description", "Please followup with publish command..."));
|
||||
sendCommand(amfReply, messageType, streamId);
|
||||
return;
|
||||
} //FCPublish
|
||||
if (amfData.getContentP(0)->StrValue() == "releaseStream") {
|
||||
//send a _result reply
|
||||
AMF::Object amfReply("container", AMF::AMF0_DDV_CONTAINER);
|
||||
amfReply.addContent(AMF::Object("", "_result")); //result success
|
||||
amfReply.addContent(amfData.getContent(1)); //same transaction ID
|
||||
amfReply.addContent(AMF::Object("", (double)0, AMF::AMF0_NULL)); //null - command info
|
||||
amfReply.addContent(AMF::Object("", AMF::AMF0_UNDEFINED)); //stream ID?
|
||||
sendCommand(amfReply, messageType, streamId);
|
||||
return;
|
||||
}//releaseStream
|
||||
if ((amfData.getContentP(0)->StrValue() == "getStreamLength") || (amfData.getContentP(0)->StrValue() == "getMovLen")) {
|
||||
//send a _result reply
|
||||
AMF::Object amfReply("container", AMF::AMF0_DDV_CONTAINER);
|
||||
amfReply.addContent(AMF::Object("", "_result")); //result success
|
||||
amfReply.addContent(amfData.getContent(1)); //same transaction ID
|
||||
amfReply.addContent(AMF::Object("", (double)0, AMF::AMF0_NULL)); //null - command info
|
||||
amfReply.addContent(AMF::Object("", (double)0)); //zero length
|
||||
sendCommand(amfReply, messageType, streamId);
|
||||
return;
|
||||
} //getStreamLength
|
||||
if ((amfData.getContentP(0)->StrValue() == "publish")) {
|
||||
if (amfData.getContentP(3)) {
|
||||
streamName = amfData.getContentP(3)->StrValue();
|
||||
//pull the server configuration
|
||||
JSON::Value servConf = JSON::fromFile(Util::getTmpFolder() + "streamlist");
|
||||
if (servConf.isMember("streams") && servConf["streams"].isMember(streamName)){
|
||||
JSON::Value & streamConfig = servConf["streams"][streamName];
|
||||
if (!streamConfig.isMember("source") || streamConfig["source"].asStringRef().substr(0, 7) != "push://"){
|
||||
DEBUG_MSG(DLVL_FAIL, "Push rejected - stream not a push-able stream. (%s != push://*)", streamConfig["source"].asStringRef().c_str());
|
||||
myConn.close();
|
||||
return;
|
||||
}
|
||||
std::string source = streamConfig["source"].asStringRef().substr(7);
|
||||
std::string IP = source.substr(0, source.find('@'));
|
||||
if (IP != ""){
|
||||
if (!myConn.isAddress(IP)){
|
||||
DEBUG_MSG(DLVL_FAIL, "Push rejected - source host not whitelisted");
|
||||
myConn.close();
|
||||
return;
|
||||
}
|
||||
}
|
||||
}else{
|
||||
DEBUG_MSG(DLVL_FAIL, "Push rejected - stream not configured.");
|
||||
myConn.close();
|
||||
return;
|
||||
}
|
||||
initialize();
|
||||
}
|
||||
//send a _result reply
|
||||
AMF::Object amfReply("container", AMF::AMF0_DDV_CONTAINER);
|
||||
amfReply.addContent(AMF::Object("", "_result")); //result success
|
||||
amfReply.addContent(amfData.getContent(1)); //same transaction ID
|
||||
amfReply.addContent(AMF::Object("", (double)0, AMF::AMF0_NULL)); //null - command info
|
||||
amfReply.addContent(AMF::Object("", 1, AMF::AMF0_BOOL)); //publish success?
|
||||
sendCommand(amfReply, messageType, streamId);
|
||||
myConn.Send(RTMPStream::SendUSR(0, 1)); //send UCM StreamBegin (0), stream 1
|
||||
//send a status reply
|
||||
amfReply = AMF::Object("container", AMF::AMF0_DDV_CONTAINER);
|
||||
amfReply.addContent(AMF::Object("", "onStatus")); //status reply
|
||||
amfReply.addContent(AMF::Object("", 0, AMF::AMF0_NUMBER)); //same transaction ID
|
||||
amfReply.addContent(AMF::Object("", (double)0, AMF::AMF0_NULL)); //null - command info
|
||||
amfReply.addContent(AMF::Object("")); //info
|
||||
amfReply.getContentP(3)->addContent(AMF::Object("level", "status"));
|
||||
amfReply.getContentP(3)->addContent(AMF::Object("code", "NetStream.Publish.Start"));
|
||||
amfReply.getContentP(3)->addContent(AMF::Object("description", "Stream is now published!"));
|
||||
amfReply.getContentP(3)->addContent(AMF::Object("clientid", (double)1337));
|
||||
sendCommand(amfReply, messageType, streamId);
|
||||
return;
|
||||
} //getStreamLength
|
||||
if (amfData.getContentP(0)->StrValue() == "checkBandwidth") {
|
||||
//send a _result reply
|
||||
AMF::Object amfReply("container", AMF::AMF0_DDV_CONTAINER);
|
||||
amfReply.addContent(AMF::Object("", "_result")); //result success
|
||||
amfReply.addContent(amfData.getContent(1)); //same transaction ID
|
||||
amfReply.addContent(AMF::Object("", (double)0, AMF::AMF0_NULL)); //null - command info
|
||||
amfReply.addContent(AMF::Object("", (double)0, AMF::AMF0_NULL)); //null - command info
|
||||
sendCommand(amfReply, messageType, streamId);
|
||||
return;
|
||||
} //checkBandwidth
|
||||
if ((amfData.getContentP(0)->StrValue() == "play") || (amfData.getContentP(0)->StrValue() == "play2")) {
|
||||
//set reply number and stream name, actual reply is sent up in the ss.spool() handler
|
||||
int playTransaction = amfData.getContentP(1)->NumValue();
|
||||
int playMessageType = messageType;
|
||||
int playStreamId = streamId;
|
||||
streamName = amfData.getContentP(3)->StrValue();
|
||||
initialize();
|
||||
|
||||
//send a status reply
|
||||
AMF::Object amfreply("container", AMF::AMF0_DDV_CONTAINER);
|
||||
amfreply.addContent(AMF::Object("", "onStatus")); //status reply
|
||||
amfreply.addContent(AMF::Object("", (double)playTransaction)); //same transaction ID
|
||||
amfreply.addContent(AMF::Object("", (double)0, AMF::AMF0_NULL)); //null - command info
|
||||
amfreply.addContent(AMF::Object("")); //info
|
||||
amfreply.getContentP(3)->addContent(AMF::Object("level", "status"));
|
||||
amfreply.getContentP(3)->addContent(AMF::Object("code", "NetStream.Play.Reset"));
|
||||
amfreply.getContentP(3)->addContent(AMF::Object("description", "Playing and resetting..."));
|
||||
amfreply.getContentP(3)->addContent(AMF::Object("details", "DDV"));
|
||||
amfreply.getContentP(3)->addContent(AMF::Object("clientid", (double)1337));
|
||||
sendCommand(amfreply, playMessageType, playStreamId);
|
||||
//send streamisrecorded if stream, well, is recorded.
|
||||
if (myMeta.vod) { //isMember("length") && Strm.metadata["length"].asInt() > 0){
|
||||
myConn.Send(RTMPStream::SendUSR(4, 1)); //send UCM StreamIsRecorded (4), stream 1
|
||||
}
|
||||
//send streambegin
|
||||
myConn.Send(RTMPStream::SendUSR(0, 1)); //send UCM StreamBegin (0), stream 1
|
||||
//and more reply
|
||||
amfreply = AMF::Object("container", AMF::AMF0_DDV_CONTAINER);
|
||||
amfreply.addContent(AMF::Object("", "onStatus")); //status reply
|
||||
amfreply.addContent(AMF::Object("", (double)playTransaction)); //same transaction ID
|
||||
amfreply.addContent(AMF::Object("", (double)0, AMF::AMF0_NULL)); //null - command info
|
||||
amfreply.addContent(AMF::Object("")); //info
|
||||
amfreply.getContentP(3)->addContent(AMF::Object("level", "status"));
|
||||
amfreply.getContentP(3)->addContent(AMF::Object("code", "NetStream.Play.Start"));
|
||||
amfreply.getContentP(3)->addContent(AMF::Object("description", "Playing!"));
|
||||
amfreply.getContentP(3)->addContent(AMF::Object("details", "DDV"));
|
||||
amfreply.getContentP(3)->addContent(AMF::Object("clientid", (double)1337));
|
||||
sendCommand(amfreply, playMessageType, playStreamId);
|
||||
RTMPStream::chunk_snd_max = 102400; //100KiB
|
||||
myConn.Send(RTMPStream::SendCTL(1, RTMPStream::chunk_snd_max)); //send chunk size max (msg 1)
|
||||
//send dunno?
|
||||
myConn.Send(RTMPStream::SendUSR(32, 1)); //send UCM no clue?, stream 1
|
||||
|
||||
parseData = true;
|
||||
return;
|
||||
} //play
|
||||
if ((amfData.getContentP(0)->StrValue() == "seek")) {
|
||||
//set reply number and stream name, actual reply is sent up in the ss.spool() handler
|
||||
int playTransaction = amfData.getContentP(1)->NumValue();
|
||||
int playMessageType = messageType;
|
||||
int playStreamId = streamId;
|
||||
|
||||
AMF::Object amfReply("container", AMF::AMF0_DDV_CONTAINER);
|
||||
amfReply.addContent(AMF::Object("", "onStatus")); //status reply
|
||||
amfReply.addContent(amfData.getContent(1)); //same transaction ID
|
||||
amfReply.addContent(AMF::Object("", (double)0, AMF::AMF0_NULL)); //null - command info
|
||||
amfReply.addContent(AMF::Object("")); //info
|
||||
amfReply.getContentP(3)->addContent(AMF::Object("level", "status"));
|
||||
amfReply.getContentP(3)->addContent(AMF::Object("code", "NetStream.Seek.Notify"));
|
||||
amfReply.getContentP(3)->addContent(AMF::Object("description", "Seeking to the specified time"));
|
||||
amfReply.getContentP(3)->addContent(AMF::Object("details", "DDV"));
|
||||
amfReply.getContentP(3)->addContent(AMF::Object("clientid", (double)1337));
|
||||
sendCommand(amfReply, playMessageType, playStreamId);
|
||||
seek((long long int)amfData.getContentP(3)->NumValue());
|
||||
|
||||
//send a status reply
|
||||
AMF::Object amfreply("container", AMF::AMF0_DDV_CONTAINER);
|
||||
amfreply.addContent(AMF::Object("", "onStatus")); //status reply
|
||||
amfreply.addContent(AMF::Object("", (double)playTransaction)); //same transaction ID
|
||||
amfreply.addContent(AMF::Object("", (double)0, AMF::AMF0_NULL)); //null - command info
|
||||
amfreply.addContent(AMF::Object("")); //info
|
||||
amfreply.getContentP(3)->addContent(AMF::Object("level", "status"));
|
||||
amfreply.getContentP(3)->addContent(AMF::Object("code", "NetStream.Play.Reset"));
|
||||
amfreply.getContentP(3)->addContent(AMF::Object("description", "Playing and resetting..."));
|
||||
amfreply.getContentP(3)->addContent(AMF::Object("details", "DDV"));
|
||||
amfreply.getContentP(3)->addContent(AMF::Object("clientid", (double)1337));
|
||||
sendCommand(amfreply, playMessageType, playStreamId);
|
||||
//send streamisrecorded if stream, well, is recorded.
|
||||
if (myMeta.vod) { //isMember("length") && Strm.metadata["length"].asInt() > 0){
|
||||
myConn.Send(RTMPStream::SendUSR(4, 1)); //send UCM StreamIsRecorded (4), stream 1
|
||||
}
|
||||
//send streambegin
|
||||
myConn.Send(RTMPStream::SendUSR(0, 1)); //send UCM StreamBegin (0), stream 1
|
||||
//and more reply
|
||||
amfreply = AMF::Object("container", AMF::AMF0_DDV_CONTAINER);
|
||||
amfreply.addContent(AMF::Object("", "onStatus")); //status reply
|
||||
amfreply.addContent(AMF::Object("", (double)playTransaction)); //same transaction ID
|
||||
amfreply.addContent(AMF::Object("", (double)0, AMF::AMF0_NULL)); //null - command info
|
||||
amfreply.addContent(AMF::Object("")); //info
|
||||
amfreply.getContentP(3)->addContent(AMF::Object("level", "status"));
|
||||
amfreply.getContentP(3)->addContent(AMF::Object("code", "NetStream.Play.Start"));
|
||||
amfreply.getContentP(3)->addContent(AMF::Object("description", "Playing!"));
|
||||
amfreply.getContentP(3)->addContent(AMF::Object("details", "DDV"));
|
||||
amfreply.getContentP(3)->addContent(AMF::Object("clientid", (double)1337));
|
||||
sendCommand(amfreply, playMessageType, playStreamId);
|
||||
RTMPStream::chunk_snd_max = 102400; //100KiB
|
||||
myConn.Send(RTMPStream::SendCTL(1, RTMPStream::chunk_snd_max)); //send chunk size max (msg 1)
|
||||
//send dunno?
|
||||
myConn.Send(RTMPStream::SendUSR(32, 1)); //send UCM no clue?, stream 1
|
||||
|
||||
return;
|
||||
} //seek
|
||||
if ((amfData.getContentP(0)->StrValue() == "pauseRaw") || (amfData.getContentP(0)->StrValue() == "pause")) {
|
||||
if (amfData.getContentP(3)->NumValue()) {
|
||||
parseData = false;
|
||||
//send a status reply
|
||||
AMF::Object amfReply("container", AMF::AMF0_DDV_CONTAINER);
|
||||
amfReply.addContent(AMF::Object("", "onStatus")); //status reply
|
||||
amfReply.addContent(amfData.getContent(1)); //same transaction ID
|
||||
amfReply.addContent(AMF::Object("", (double)0, AMF::AMF0_NULL)); //null - command info
|
||||
amfReply.addContent(AMF::Object("")); //info
|
||||
amfReply.getContentP(3)->addContent(AMF::Object("level", "status"));
|
||||
amfReply.getContentP(3)->addContent(AMF::Object("code", "NetStream.Pause.Notify"));
|
||||
amfReply.getContentP(3)->addContent(AMF::Object("description", "Pausing playback"));
|
||||
amfReply.getContentP(3)->addContent(AMF::Object("details", "DDV"));
|
||||
amfReply.getContentP(3)->addContent(AMF::Object("clientid", (double)1337));
|
||||
sendCommand(amfReply, playMessageType, playStreamId);
|
||||
} else {
|
||||
parseData = true;
|
||||
//send a status reply
|
||||
AMF::Object amfReply("container", AMF::AMF0_DDV_CONTAINER);
|
||||
amfReply.addContent(AMF::Object("", "onStatus")); //status reply
|
||||
amfReply.addContent(amfData.getContent(1)); //same transaction ID
|
||||
amfReply.addContent(AMF::Object("", (double)0, AMF::AMF0_NULL)); //null - command info
|
||||
amfReply.addContent(AMF::Object("")); //info
|
||||
amfReply.getContentP(3)->addContent(AMF::Object("level", "status"));
|
||||
amfReply.getContentP(3)->addContent(AMF::Object("code", "NetStream.Unpause.Notify"));
|
||||
amfReply.getContentP(3)->addContent(AMF::Object("description", "Resuming playback"));
|
||||
amfReply.getContentP(3)->addContent(AMF::Object("details", "DDV"));
|
||||
amfReply.getContentP(3)->addContent(AMF::Object("clientid", (double)1337));
|
||||
sendCommand(amfReply, playMessageType, playStreamId);
|
||||
}
|
||||
return;
|
||||
} //seek
|
||||
|
||||
#if DEBUG >= 2
|
||||
fprintf(stderr, "AMF0 command not processed!\n%s\n", amfData.Print().c_str());
|
||||
#endif
|
||||
} //parseAMFCommand
|
||||
|
||||
void OutRTMP::bufferPacket(JSON::Value & pack){
|
||||
if (!trackMap.count(pack["trackid"].asInt())){
|
||||
//declined track;
|
||||
return;
|
||||
}
|
||||
pack["trackid"] = trackMap[pack["trackid"].asInt()];
|
||||
long long unsigned int tNum = pack["trackid"].asInt();
|
||||
if (!bookKeeping.count(tNum)){
|
||||
return;
|
||||
}
|
||||
int pageNum = bookKeeping[tNum].pageNum;
|
||||
std::string tmp = pack.toNetPacked();
|
||||
if (bookKeeping[tNum].curOffset > 8388608 && pack.isMember("keyframe") && pack["keyframe"]){
|
||||
Util::sleep(500);
|
||||
//open new page
|
||||
char nextPage[100];
|
||||
sprintf(nextPage, "%s%llu_%d", streamName.c_str(), tNum, bookKeeping[tNum].pageNum + bookKeeping[tNum].keyNum);
|
||||
curPages[tNum].init(nextPage, 0, false);
|
||||
bookKeeping[tNum].pageNum += bookKeeping[tNum].keyNum;
|
||||
bookKeeping[tNum].keyNum = 0;
|
||||
bookKeeping[tNum].curOffset = 0;
|
||||
}
|
||||
if (bookKeeping[tNum].curOffset + tmp.size() < curPages[tNum].len){
|
||||
bookKeeping[tNum].keyNum += (pack.isMember("keyframe") && pack["keyframe"]);
|
||||
memcpy(curPages[tNum].mapped + bookKeeping[tNum].curOffset, tmp.data(), tmp.size());
|
||||
bookKeeping[tNum].curOffset += tmp.size();
|
||||
}else{
|
||||
bookKeeping[tNum].curOffset += tmp.size();
|
||||
DEBUG_MSG(DLVL_WARN, "Can't buffer frame on page %d, track %llu, time %lld, keyNum %d, offset %llu", pageNum, tNum, pack["time"].asInt(), bookKeeping[tNum].pageNum + bookKeeping[tNum].keyNum, bookKeeping[tNum].curOffset);
|
||||
///\todo Open next page plx
|
||||
}
|
||||
playerConn.keepAlive();
|
||||
}
|
||||
|
||||
|
||||
void OutRTMP::negotiatePushTracks() {
|
||||
char * tmp = playerConn.getData();
|
||||
if (!tmp){
|
||||
DEBUG_MSG(DLVL_FAIL, "No userpage allocated");
|
||||
return;
|
||||
}
|
||||
memset(tmp, 0, 30);
|
||||
unsigned int i = 0;
|
||||
for (std::map<int, DTSC::Track>::iterator it = meta_out.tracks.begin(); it != meta_out.tracks.end() && i < 5; it++){
|
||||
DEBUG_MSG(DLVL_DEVEL, "Negotiating tracknum for id %d", it->first);
|
||||
(tmp + 6 * i)[0] = 0x80;
|
||||
(tmp + 6 * i)[1] = 0x00;
|
||||
(tmp + 6 * i)[2] = 0x00;
|
||||
(tmp + 6 * i)[3] = 0x00;
|
||||
(tmp + 6 * i)[4] = (it->first >> 8) & 0xFF;
|
||||
(tmp + 6 * i)[5] = (it->first) & 0xFF;
|
||||
i++;
|
||||
}
|
||||
playerConn.keepAlive();
|
||||
bool gotAllNumbers = false;
|
||||
while (!gotAllNumbers){
|
||||
Util::sleep(100);
|
||||
gotAllNumbers = true;
|
||||
i = 0;
|
||||
for (std::map<int, DTSC::Track>::iterator it = meta_out.tracks.begin(); it != meta_out.tracks.end() && i < 5; it++){
|
||||
unsigned long tNum = (((long)(tmp + (6 * i))[0]) << 24) | (((long)(tmp + (6 * i))[1]) << 16) | (((long)(tmp + (6 * i))[2]) << 8) | (long)(tmp + (6 * i))[3];
|
||||
unsigned short oldNum = (((long)(tmp + (6 * i))[4]) << 8) | (long)(tmp + (6 * i))[5];
|
||||
if( tNum & 0x80000000){
|
||||
gotAllNumbers = false;
|
||||
break;
|
||||
}else{
|
||||
DEBUG_MSG(DLVL_DEVEL, "Mapped %d -> %lu", oldNum, tNum);
|
||||
trackMap[oldNum] = tNum;
|
||||
}
|
||||
i++;
|
||||
}
|
||||
}
|
||||
for (std::map<int, int>::iterator it = trackMap.begin(); it != trackMap.end(); it++){
|
||||
char tmp[100];
|
||||
sprintf( tmp, "liveStream_%s%d", streamName.c_str(), it->second);
|
||||
metaPages[it->second].init(std::string(tmp), 0, false);
|
||||
DTSC::Meta tmpMeta = meta_out;
|
||||
tmpMeta.tracks.clear();
|
||||
tmpMeta.tracks[it->second] = meta_out.tracks[it->first];
|
||||
tmpMeta.tracks[it->second].trackID = it->second;
|
||||
JSON::Value tmpVal = tmpMeta.toJSON();
|
||||
std::string tmpStr = tmpVal.toNetPacked();
|
||||
memcpy(metaPages[it->second].mapped, tmpStr.data(), tmpStr.size());
|
||||
DEBUG_MSG(DLVL_DEVEL, "Written meta for track %d", it->second);
|
||||
}
|
||||
gotAllNumbers = false;
|
||||
while (!gotAllNumbers){
|
||||
Util::sleep(100);
|
||||
gotAllNumbers = true;
|
||||
i = 0;
|
||||
unsigned int j = 0;
|
||||
//update Metadata;
|
||||
JSON::Value jsonMeta;
|
||||
JSON::fromDTMI((const unsigned char*)streamIndex.mapped + 8, streamIndex.len - 8, j, jsonMeta);
|
||||
myMeta = DTSC::Meta(jsonMeta);
|
||||
tmp = playerConn.getData();
|
||||
for (std::map<int, DTSC::Track>::iterator it = meta_out.tracks.begin(); it != meta_out.tracks.end() && i < 5; it++){
|
||||
unsigned long tNum = (((long)(tmp + (6 * i))[0]) << 24) | (((long)(tmp + (6 * i))[1]) << 16) | (((long)(tmp + (6 * i))[2]) << 8) | (long)(tmp + (6 * i))[3];
|
||||
if( tNum == 0xFFFFFFFF){
|
||||
DEBUG_MSG(DLVL_DEVEL, "Skipping a declined track");
|
||||
i++;
|
||||
continue;
|
||||
}
|
||||
if(!myMeta.tracks.count(tNum)){
|
||||
gotAllNumbers = false;
|
||||
break;
|
||||
}
|
||||
i++;
|
||||
}
|
||||
}
|
||||
i = 0;
|
||||
tmp = playerConn.getData();
|
||||
for (std::map<int, DTSC::Track>::iterator it = meta_out.tracks.begin(); it != meta_out.tracks.end() && i < 5; it++){
|
||||
unsigned long tNum = ((long)(tmp[6*i]) << 24) | ((long)(tmp[6 * i + 1]) << 16) | ((long)(tmp[6 * i + 2]) << 8) | tmp[6 * i + 3];
|
||||
if( tNum == 0xFFFFFFFF){
|
||||
tNum = ((long)(tmp[6 * i + 4]) << 8) | (long)tmp[6 * i + 5];
|
||||
DEBUG_MSG(DLVL_WARN, "Buffer declined track %i", trackMap[tNum]);
|
||||
trackMap.erase(tNum);
|
||||
tmp[6*i] = 0;
|
||||
tmp[6*i+1] = 0;
|
||||
tmp[6*i+2] = 0;
|
||||
tmp[6*i+3] = 0;
|
||||
tmp[6*i+4] = 0;
|
||||
tmp[6*i+5] = 0;
|
||||
}else{
|
||||
char firstPage[100];
|
||||
sprintf(firstPage, "%s%lu_%d", streamName.c_str(), tNum, 0);
|
||||
curPages[tNum].init(firstPage, 0, false);
|
||||
bookKeeping[tNum] = DTSCPageData();
|
||||
DEBUG_MSG(DLVL_WARN, "Buffer accepted track %lu", tNum);
|
||||
}
|
||||
i++;
|
||||
}
|
||||
}
|
||||
|
||||
///\brief Gets and parses one RTMP chunk at a time.
|
||||
///\param inputBuffer A buffer filled with chunk data.
|
||||
void OutRTMP::parseChunk(Socket::Buffer & inputBuffer) {
|
||||
//for DTSC conversion
|
||||
static std::stringstream prebuffer; // Temporary buffer before sending real data
|
||||
//for chunk parsing
|
||||
static RTMPStream::Chunk next;
|
||||
static FLV::Tag F;
|
||||
static AMF::Object amfdata("empty", AMF::AMF0_DDV_CONTAINER);
|
||||
static AMF::Object amfelem("empty", AMF::AMF0_DDV_CONTAINER);
|
||||
static AMF::Object3 amf3data("empty", AMF::AMF3_DDV_CONTAINER);
|
||||
static AMF::Object3 amf3elem("empty", AMF::AMF3_DDV_CONTAINER);
|
||||
|
||||
while (next.Parse(inputBuffer)) {
|
||||
|
||||
//send ACK if we received a whole window
|
||||
if ((RTMPStream::rec_cnt - RTMPStream::rec_window_at > RTMPStream::rec_window_size)) {
|
||||
RTMPStream::rec_window_at = RTMPStream::rec_cnt;
|
||||
myConn.Send(RTMPStream::SendCTL(3, RTMPStream::rec_cnt)); //send ack (msg 3)
|
||||
}
|
||||
|
||||
switch (next.msg_type_id) {
|
||||
case 0: //does not exist
|
||||
#if DEBUG >= 2
|
||||
fprintf(stderr, "UNKN: Received a zero-type message. Possible data corruption? Aborting!\n");
|
||||
#endif
|
||||
while (inputBuffer.size()) {
|
||||
inputBuffer.get().clear();
|
||||
}
|
||||
stop();
|
||||
myConn.close();
|
||||
break; //happens when connection breaks unexpectedly
|
||||
case 1: //set chunk size
|
||||
RTMPStream::chunk_rec_max = ntohl(*(int *)next.data.c_str());
|
||||
#if DEBUG >= 5
|
||||
fprintf(stderr, "CTRL: Set chunk size: %i\n", RTMPStream::chunk_rec_max);
|
||||
#endif
|
||||
break;
|
||||
case 2: //abort message - we ignore this one
|
||||
#if DEBUG >= 5
|
||||
fprintf(stderr, "CTRL: Abort message\n");
|
||||
#endif
|
||||
//4 bytes of stream id to drop
|
||||
break;
|
||||
case 3: //ack
|
||||
#if DEBUG >= 8
|
||||
fprintf(stderr, "CTRL: Acknowledgement\n");
|
||||
#endif
|
||||
RTMPStream::snd_window_at = ntohl(*(int *)next.data.c_str());
|
||||
RTMPStream::snd_window_at = RTMPStream::snd_cnt;
|
||||
break;
|
||||
case 4: {
|
||||
//2 bytes event type, rest = event data
|
||||
//types:
|
||||
//0 = stream begin, 4 bytes ID
|
||||
//1 = stream EOF, 4 bytes ID
|
||||
//2 = stream dry, 4 bytes ID
|
||||
//3 = setbufferlen, 4 bytes ID, 4 bytes length
|
||||
//4 = streamisrecorded, 4 bytes ID
|
||||
//6 = pingrequest, 4 bytes data
|
||||
//7 = pingresponse, 4 bytes data
|
||||
//we don't need to process this
|
||||
#if DEBUG >= 5
|
||||
short int ucmtype = ntohs(*(short int *)next.data.c_str());
|
||||
switch (ucmtype) {
|
||||
case 0:
|
||||
fprintf(stderr, "CTRL: UCM StreamBegin %i\n", ntohl(*((int *)(next.data.c_str() + 2))));
|
||||
break;
|
||||
case 1:
|
||||
fprintf(stderr, "CTRL: UCM StreamEOF %i\n", ntohl(*((int *)(next.data.c_str() + 2))));
|
||||
break;
|
||||
case 2:
|
||||
fprintf(stderr, "CTRL: UCM StreamDry %i\n", ntohl(*((int *)(next.data.c_str() + 2))));
|
||||
break;
|
||||
case 3:
|
||||
fprintf(stderr, "CTRL: UCM SetBufferLength %i %i\n", ntohl(*((int *)(next.data.c_str() + 2))), ntohl(*((int *)(next.data.c_str() + 6))));
|
||||
break;
|
||||
case 4:
|
||||
fprintf(stderr, "CTRL: UCM StreamIsRecorded %i\n", ntohl(*((int *)(next.data.c_str() + 2))));
|
||||
break;
|
||||
case 6:
|
||||
fprintf(stderr, "CTRL: UCM PingRequest %i\n", ntohl(*((int *)(next.data.c_str() + 2))));
|
||||
break;
|
||||
case 7:
|
||||
fprintf(stderr, "CTRL: UCM PingResponse %i\n", ntohl(*((int *)(next.data.c_str() + 2))));
|
||||
break;
|
||||
default:
|
||||
fprintf(stderr, "CTRL: UCM Unknown (%hi)\n", ucmtype);
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
break;
|
||||
case 5: //window size of other end
|
||||
#if DEBUG >= 5
|
||||
fprintf(stderr, "CTRL: Window size\n");
|
||||
#endif
|
||||
RTMPStream::rec_window_size = ntohl(*(int *)next.data.c_str());
|
||||
RTMPStream::rec_window_at = RTMPStream::rec_cnt;
|
||||
myConn.Send(RTMPStream::SendCTL(3, RTMPStream::rec_cnt)); //send ack (msg 3)
|
||||
break;
|
||||
case 6:
|
||||
#if DEBUG >= 5
|
||||
fprintf(stderr, "CTRL: Set peer bandwidth\n");
|
||||
#endif
|
||||
//4 bytes window size, 1 byte limit type (ignored)
|
||||
RTMPStream::snd_window_size = ntohl(*(int *)next.data.c_str());
|
||||
myConn.Send(RTMPStream::SendCTL(5, RTMPStream::snd_window_size)); //send window acknowledgement size (msg 5)
|
||||
break;
|
||||
case 8: //audio data
|
||||
case 9: //video data
|
||||
case 18: {//meta data
|
||||
if (!isInitialized) {
|
||||
DEBUG_MSG(DLVL_MEDIUM, "Received useless media data\n");
|
||||
myConn.close();
|
||||
break;
|
||||
}
|
||||
if (streamReset) {
|
||||
//reset push data to empty, in case stream properties change
|
||||
meta_out.reset();
|
||||
preBuf.clear();
|
||||
sending = false;
|
||||
counter = 0;
|
||||
streamReset = false;
|
||||
}
|
||||
F.ChunkLoader(next);
|
||||
JSON::Value pack_out = F.toJSON(meta_out);
|
||||
if ( !pack_out.isNull()){
|
||||
if ( !sending){
|
||||
counter++;
|
||||
if (counter > 8){
|
||||
sending = true;
|
||||
negotiatePushTracks();
|
||||
for (std::deque<JSON::Value>::iterator it = preBuf.begin(); it != preBuf.end(); it++){
|
||||
bufferPacket((*it));
|
||||
}
|
||||
preBuf.clear(); //clear buffer
|
||||
bufferPacket(pack_out);
|
||||
}else{
|
||||
preBuf.push_back(pack_out);
|
||||
}
|
||||
}else{
|
||||
bufferPacket(pack_out);
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 15:
|
||||
DEBUG_MSG(DLVL_MEDIUM, "Received AMF3 data message");
|
||||
break;
|
||||
case 16:
|
||||
DEBUG_MSG(DLVL_MEDIUM, "Received AMF3 shared object");
|
||||
break;
|
||||
case 17: {
|
||||
DEBUG_MSG(DLVL_MEDIUM, "Received AMF3 command message");
|
||||
if (next.data[0] != 0) {
|
||||
next.data = next.data.substr(1);
|
||||
amf3data = AMF::parse3(next.data);
|
||||
#if DEBUG >= 5
|
||||
amf3data.Print();
|
||||
#endif
|
||||
} else {
|
||||
DEBUG_MSG(DLVL_MEDIUM, "Received AMF3-0 command message");
|
||||
next.data = next.data.substr(1);
|
||||
amfdata = AMF::parse(next.data);
|
||||
parseAMFCommand(amfdata, 17, next.msg_stream_id);
|
||||
} //parsing AMF0-style
|
||||
}
|
||||
break;
|
||||
case 19:
|
||||
DEBUG_MSG(DLVL_MEDIUM, "Received AMF0 shared object");
|
||||
break;
|
||||
case 20: { //AMF0 command message
|
||||
amfdata = AMF::parse(next.data);
|
||||
parseAMFCommand(amfdata, 20, next.msg_stream_id);
|
||||
}
|
||||
break;
|
||||
case 22:
|
||||
DEBUG_MSG(DLVL_MEDIUM, "Received aggregate message");
|
||||
break;
|
||||
default:
|
||||
DEBUG_MSG(DLVL_FAIL, "Unknown chunk received! Probably protocol corruption, stopping parsing of incoming data.");
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
48
src/output/output_rtmp.h
Normal file
48
src/output/output_rtmp.h
Normal file
|
@ -0,0 +1,48 @@
|
|||
#include "output.h"
|
||||
#include <mist/flv_tag.h>
|
||||
#include <mist/amf.h>
|
||||
#include <mist/rtmpchunks.h>
|
||||
|
||||
|
||||
namespace Mist {
|
||||
struct DTSCPageData {
|
||||
DTSCPageData() : pageNum(0), keyNum(0), partNum(0), dataSize(0), curOffset(0), firstTime(0){}
|
||||
int pageNum;///<The current page number
|
||||
int keyNum;///<The number of keyframes in this page.
|
||||
int partNum;///<The number of parts in this page.
|
||||
unsigned long long int dataSize;///<The full size this page should be.
|
||||
unsigned long long int curOffset;///<The current write offset in the page.
|
||||
unsigned long long int firstTime;///<The first timestamp of the page.
|
||||
};
|
||||
|
||||
class OutRTMP : public Output {
|
||||
public:
|
||||
OutRTMP(Socket::Connection & conn);
|
||||
~OutRTMP();
|
||||
static void init(Util::Config * cfg);
|
||||
|
||||
void onRequest();
|
||||
void sendNext();
|
||||
void sendHeader();
|
||||
void bufferPacket(JSON::Value & pack);
|
||||
protected:
|
||||
DTSC::Meta meta_out;
|
||||
void negotiatePushTracks();
|
||||
std::string app_name;
|
||||
bool sending;
|
||||
int counter;
|
||||
bool streamReset;
|
||||
int playTransaction;///<The transaction number of the reply.
|
||||
int playStreamId;///<The stream id of the reply.
|
||||
int playMessageType;///<The message type of the reply.
|
||||
void parseChunk(Socket::Buffer & inputBuffer);
|
||||
void parseAMFCommand(AMF::Object & amfData, int messageType, int streamId);
|
||||
void sendCommand(AMF::Object & amfReply, int messageType, int streamId);
|
||||
std::deque<JSON::Value> preBuf;
|
||||
std::map<int,int> trackMap;
|
||||
std::map<int,IPC::sharedPage> metaPages;
|
||||
std::map<int,DTSCPageData> bookKeeping;
|
||||
};
|
||||
}
|
||||
|
||||
typedef Mist::OutRTMP mistOut;
|
82
src/output/output_srt.cpp
Normal file
82
src/output/output_srt.cpp
Normal file
|
@ -0,0 +1,82 @@
|
|||
#include "output_srt.h"
|
||||
#include <mist/http_parser.h>
|
||||
#include <mist/defines.h>
|
||||
#include <iomanip>
|
||||
|
||||
namespace Mist {
|
||||
OutProgressiveSRT::OutProgressiveSRT(Socket::Connection & conn) : Output(conn) {
|
||||
realTime = 0;
|
||||
}
|
||||
|
||||
void OutProgressiveSRT::onFail(){
|
||||
HTTP::Parser HTTP_S;
|
||||
HTTP_S.Clean(); //make sure no parts of old requests are left in any buffers
|
||||
HTTP_S.SetBody("Stream not found. Sorry, we tried.");
|
||||
HTTP_S.SendResponse("404", "Stream not found", myConn);
|
||||
Output::onFail();
|
||||
}
|
||||
|
||||
OutProgressiveSRT::~OutProgressiveSRT() {}
|
||||
|
||||
void OutProgressiveSRT::init(Util::Config * cfg){
|
||||
capa["desc"] = "Enables HTTP protocol subtitle streaming.";
|
||||
capa["deps"] = "HTTP";
|
||||
capa["url_rel"] = "/$.srt";
|
||||
capa["url_match"] = "/$.srt";
|
||||
capa["url_handler"] = "http";
|
||||
capa["url_type"] = "subtitle";
|
||||
capa["socket"] = "http_srt";
|
||||
|
||||
cfg->addBasicConnectorOptions(capa);
|
||||
config = cfg;
|
||||
}
|
||||
|
||||
void OutProgressiveSRT::sendNext(){
|
||||
char * dataPointer = 0;
|
||||
int len = 0;
|
||||
currentPacket.getString("data", dataPointer, len);
|
||||
std::stringstream tmp;
|
||||
if(!webVTT) {
|
||||
tmp << lastNum++ << std::endl;
|
||||
}
|
||||
long long unsigned int time = currentPacket.getTime();
|
||||
char tmpBuf[50];
|
||||
int tmpLen = sprintf(tmpBuf, "%0.2llu:%0.2llu:%0.2llu,%0.3llu", (time / 3600000), ((time % 3600000) / 60000), (((time % 3600000) % 60000) / 1000), time % 1000);
|
||||
tmp.write(tmpBuf, tmpLen);
|
||||
tmp << " --> ";
|
||||
time += currentPacket.getInt("duration");
|
||||
tmpLen = sprintf(tmpBuf, "%0.2llu:%0.2llu:%0.2llu,%0.3llu", (time / 3600000), ((time % 3600000) / 60000), (((time % 3600000) % 60000) / 1000), time % 1000);
|
||||
tmp.write(tmpBuf, tmpLen);
|
||||
tmp << std::endl;
|
||||
myConn.SendNow(tmp.str());
|
||||
myConn.SendNow(dataPointer, len);
|
||||
myConn.SendNow("\n");
|
||||
}
|
||||
|
||||
void OutProgressiveSRT::sendHeader(){
|
||||
HTTP::Parser HTTP_S;
|
||||
FLV::Tag tag;
|
||||
HTTP_S.SetHeader("Content-Type", "text/plain");
|
||||
HTTP_S.protocol = "HTTP/1.0";
|
||||
myConn.SendNow(HTTP_S.BuildResponse("200", "OK"));
|
||||
sentHeader = true;
|
||||
}
|
||||
|
||||
void OutProgressiveSRT::onRequest(){
|
||||
HTTP::Parser HTTP_R;
|
||||
while (HTTP_R.Read(myConn)){
|
||||
DEBUG_MSG(DLVL_DEVEL, "Received request %s", HTTP_R.getUrl().c_str());
|
||||
lastNum = 0;
|
||||
webVTT = (HTTP_R.url.find(".webvtt") != std::string::npos);
|
||||
if (HTTP_R.GetVar("track") != ""){
|
||||
selectedTracks.insert(JSON::Value(HTTP_R.GetVar("track")).asInt());
|
||||
}
|
||||
myConn.setHost(HTTP_R.GetHeader("X-Origin"));
|
||||
streamName = HTTP_R.GetHeader("X-Stream");
|
||||
parseData = true;
|
||||
wantRequest = false;
|
||||
HTTP_R.Clean();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
20
src/output/output_srt.h
Normal file
20
src/output/output_srt.h
Normal file
|
@ -0,0 +1,20 @@
|
|||
#include "output.h"
|
||||
|
||||
|
||||
namespace Mist {
|
||||
class OutProgressiveSRT : public Output {
|
||||
public:
|
||||
OutProgressiveSRT(Socket::Connection & conn);
|
||||
~OutProgressiveSRT();
|
||||
static void init(Util::Config * cfg);
|
||||
void onRequest();
|
||||
void sendNext();
|
||||
void onFail();
|
||||
void sendHeader();
|
||||
protected:
|
||||
bool webVTT;
|
||||
int lastNum;
|
||||
};
|
||||
}
|
||||
|
||||
typedef Mist::OutProgressiveSRT mistOut;
|
134
src/output/output_ts.cpp
Normal file
134
src/output/output_ts.cpp
Normal file
|
@ -0,0 +1,134 @@
|
|||
#include "output_ts.h"
|
||||
#include <mist/http_parser.h>
|
||||
#include <mist/defines.h>
|
||||
|
||||
namespace Mist {
|
||||
OutTS::OutTS(Socket::Connection & conn) : Output(conn){
|
||||
haveAvcc = false;
|
||||
AudioCounter = 0;
|
||||
VideoCounter = 0;
|
||||
std::string tracks = config->getString("tracks");
|
||||
unsigned int currTrack = 0;
|
||||
//loop over tracks, add any found track IDs to selectedTracks
|
||||
if (tracks != ""){
|
||||
for (unsigned int i = 0; i < tracks.size(); ++i){
|
||||
if (tracks[i] >= '0' && tracks[i] <= '9'){
|
||||
currTrack = currTrack*10 + (tracks[i] - '0');
|
||||
}else{
|
||||
if (currTrack > 0){
|
||||
selectedTracks.insert(currTrack);
|
||||
}
|
||||
currTrack = 0;
|
||||
}
|
||||
}
|
||||
if (currTrack > 0){
|
||||
selectedTracks.insert(currTrack);
|
||||
}
|
||||
}
|
||||
streamName = config->getString("streamname");
|
||||
parseData = true;
|
||||
wantRequest = false;
|
||||
initialize();
|
||||
}
|
||||
|
||||
OutTS::~OutTS() {}
|
||||
|
||||
void OutTS::init(Util::Config * cfg){
|
||||
capa["name"] = "TS";
|
||||
capa["desc"] = "Enables the raw MPEG Transport Stream protocol over TCP.";
|
||||
capa["deps"] = "";
|
||||
capa["required"]["streamname"]["name"] = "Stream";
|
||||
capa["required"]["streamname"]["help"] = "What streamname to serve. For multiple streams, add this protocol multiple times using different ports.";
|
||||
capa["required"]["streamname"]["type"] = "str";
|
||||
capa["required"]["streamname"]["option"] = "--stream";
|
||||
capa["optional"]["tracks"]["name"] = "Tracks";
|
||||
capa["optional"]["tracks"]["help"] = "The track IDs of the stream that this connector will transmit separated by spaces";
|
||||
capa["optional"]["tracks"]["type"] = "str";
|
||||
capa["optional"]["tracks"]["option"] = "--tracks";
|
||||
capa["codecs"][0u][0u].append("H264");
|
||||
capa["codecs"][0u][1u].append("AAC");
|
||||
cfg->addOption("streamname",
|
||||
JSON::fromString("{\"arg\":\"string\",\"short\":\"s\",\"long\":\"stream\",\"help\":\"The name of the stream that this connector will transmit.\"}"));
|
||||
cfg->addOption("tracks",
|
||||
JSON::fromString("{\"arg\":\"string\",\"value\":[\"\"],\"short\": \"t\",\"long\":\"tracks\",\"help\":\"The track IDs of the stream that this connector will transmit separated by spaces.\"}"));
|
||||
cfg->addConnectorOptions(8888, capa);
|
||||
config = cfg;
|
||||
}
|
||||
|
||||
void OutTS::sendNext(){
|
||||
Socket::Buffer ToPack;
|
||||
char * ContCounter = 0;
|
||||
bool IsKeyFrame = false;
|
||||
|
||||
char * dataPointer = 0;
|
||||
int dataLen = 0;
|
||||
currentPacket.getString("data", dataPointer, dataLen);
|
||||
|
||||
//detect packet type, and put converted data into ToPack.
|
||||
if (myMeta.tracks[currentPacket.getTrackId()].type == "video"){
|
||||
ToPack.append(TS::Packet::getPESVideoLeadIn(0ul, currentPacket.getTime() * 90));
|
||||
|
||||
IsKeyFrame = currentPacket.getInt("keyframe");
|
||||
if (IsKeyFrame){
|
||||
if (!haveAvcc){
|
||||
avccbox.setPayload(myMeta.tracks[currentPacket.getTrackId()].init);
|
||||
haveAvcc = true;
|
||||
}
|
||||
ToPack.append(avccbox.asAnnexB());
|
||||
}
|
||||
unsigned int i = 0;
|
||||
while (i + 4 < (unsigned int)dataLen){
|
||||
unsigned int ThisNaluSize = (dataPointer[i] << 24) + (dataPointer[i+1] << 16) + (dataPointer[i+2] << 8) + dataPointer[i+3];
|
||||
if (ThisNaluSize + i + 4 > (unsigned int)dataLen){
|
||||
DEBUG_MSG(DLVL_WARN, "Too big NALU detected (%u > %d) - skipping!", ThisNaluSize + i + 4, dataLen);
|
||||
break;
|
||||
}
|
||||
ToPack.append("\000\000\000\001", 4);
|
||||
i += 4;
|
||||
ToPack.append(dataPointer + i, ThisNaluSize);
|
||||
i += ThisNaluSize;
|
||||
}
|
||||
ContCounter = &VideoCounter;
|
||||
}else if (myMeta.tracks[currentPacket.getTrackId()].type == "audio"){
|
||||
ToPack.append(TS::Packet::getPESAudioLeadIn(7+dataLen, currentPacket.getTime() * 90));
|
||||
ToPack.append(TS::GetAudioHeader(dataLen, myMeta.tracks[currentPacket.getTrackId()].init));
|
||||
ToPack.append(dataPointer, dataLen);
|
||||
ContCounter = &AudioCounter;
|
||||
}
|
||||
|
||||
bool first = true;
|
||||
//send TS packets
|
||||
while (ToPack.size()){
|
||||
PackData.Clear();
|
||||
/// \todo Update according to sendHeader()'s generated data.
|
||||
//0x100 - 1 + currentPacket.getTrackId()
|
||||
if (myMeta.tracks[currentPacket.getTrackId()].type == "video"){
|
||||
PackData.PID(0x100);
|
||||
}else{
|
||||
PackData.PID(0x101);
|
||||
}
|
||||
PackData.ContinuityCounter((*ContCounter)++);
|
||||
if (first){
|
||||
PackData.UnitStart(1);
|
||||
if (IsKeyFrame){
|
||||
PackData.RandomAccess(1);
|
||||
PackData.PCR(currentPacket.getTime() * 27000);
|
||||
}
|
||||
first = false;
|
||||
}
|
||||
unsigned int toSend = PackData.AddStuffing(ToPack.bytes(184));
|
||||
std::string gonnaSend = ToPack.remove(toSend);
|
||||
PackData.FillFree(gonnaSend);
|
||||
myConn.SendNow(PackData.ToString(), 188);
|
||||
}
|
||||
}
|
||||
|
||||
void OutTS::sendHeader(){
|
||||
/// \todo Update this to actually generate these from the selected tracks.
|
||||
/// \todo ts_packet.h contains all neccesary info for this
|
||||
myConn.SendNow(TS::PAT, 188);
|
||||
myConn.SendNow(TS::PMT, 188);
|
||||
sentHeader = true;
|
||||
}
|
||||
|
||||
}
|
23
src/output/output_ts.h
Normal file
23
src/output/output_ts.h
Normal file
|
@ -0,0 +1,23 @@
|
|||
#include "output.h"
|
||||
#include <mist/mp4_generic.h>
|
||||
#include <mist/ts_packet.h>
|
||||
|
||||
namespace Mist {
|
||||
class OutTS : public Output {
|
||||
public:
|
||||
OutTS(Socket::Connection & conn);
|
||||
~OutTS();
|
||||
static void init(Util::Config * cfg);
|
||||
void sendNext();
|
||||
void sendHeader();
|
||||
protected:
|
||||
TS::Packet PackData;
|
||||
unsigned int PacketNumber;
|
||||
bool haveAvcc;
|
||||
char VideoCounter;
|
||||
char AudioCounter;
|
||||
MP4::AVCC avccbox;
|
||||
};
|
||||
}
|
||||
|
||||
typedef Mist::OutTS mistOut;
|
Loading…
Add table
Add a link
Reference in a new issue