V4L2 camera support, raw pixel video support, added MistProcAV, improved MistProcFFMPEG

Co-authored-by: Thulinma <jaron@vietors.com>
Co-authored-by: Balder <balder.vietor@ddvtech.com>
This commit is contained in:
Marco van Dijk 2023-11-22 16:33:45 +01:00 committed by Thulinma
parent c990f49b0e
commit f009856b64
35 changed files with 3934 additions and 633 deletions

View file

@ -468,10 +468,6 @@ makeInput(FLV flv)
makeInput(FLAC flac)
option(WITH_AV "Build a generic libav-based input (not distributable!)")
if (WITH_AV)
makeInput(AV av)
target_link_libraries(MistInAV avformat avcodec avutil)
endif()
makeInput(OGG ogg)
makeInput(Buffer buffer)
makeInput(H264 h264)
@ -494,6 +490,7 @@ if(RIST_LIB)
endif()
makeInput(AAC aac)
makeInput(V4L2 v4l2)
########################################
# MistServer - Outputs #

View file

@ -173,6 +173,9 @@ static inline void show_stackframe(){}
// Pages get marked for deletion after X seconds of no one watching
#define DEFAULT_PAGE_TIMEOUT 15
/// Count of raw frames for raw frame buffers
#define RAW_FRAME_COUNT 30
/// \TODO These values are hardcoded for now, but the dtsc_sizing_test binary can calculate them accurately.
#define META_META_OFFSET 138
#define META_META_RECORDSIZE 548

View file

@ -8,6 +8,7 @@
#include "shared_memory.h"
#include "util.h"
#include "stream.h"
#include "timing.h"
#include <arpa/inet.h> //for htonl/ntohl
#include <cstdlib>
#include <cstring>
@ -1240,10 +1241,6 @@ namespace DTSC{
Track &t = tracks[i];
t.track = Util::RelAccX(p.mapped, true);
t.parts = Util::RelAccX(t.track.getPointer("parts"), true);
t.keys = Util::RelAccX(t.track.getPointer("keys"), true);
t.fragments = Util::RelAccX(t.track.getPointer("fragments"), true);
t.pages = Util::RelAccX(t.track.getPointer("pages"), true);
t.trackIdField = t.track.getFieldData("id");
t.trackTypeField = t.track.getFieldData("type");
@ -1265,22 +1262,39 @@ namespace DTSC{
t.trackFpksField = t.track.getFieldData("fpks");
t.trackMissedFragsField = t.track.getFieldData("missedFrags");
t.partSizeField = t.parts.getFieldData("size");
t.partDurationField = t.parts.getFieldData("duration");
t.partOffsetField = t.parts.getFieldData("offset");
if (t.track.hasField("frames")){
t.parts = Util::RelAccX();
t.keys = Util::RelAccX();
t.fragments = Util::RelAccX();
t.pages = Util::RelAccX();
t.frames = Util::RelAccX(t.track.getPointer("frames"), true);
t.keyFirstPartField = t.keys.getFieldData("firstpart");
t.keyBposField = t.keys.getFieldData("bpos");
t.keyDurationField = t.keys.getFieldData("duration");
t.keyNumberField = t.keys.getFieldData("number");
t.keyPartsField = t.keys.getFieldData("parts");
t.keyTimeField = t.keys.getFieldData("time");
t.keySizeField = t.keys.getFieldData("size");
t.framesTimeField = t.frames.getFieldData("time");
t.framesDataField = t.frames.getFieldData("data");
}else{
t.frames = Util::RelAccX();
t.parts = Util::RelAccX(t.track.getPointer("parts"), true);
t.keys = Util::RelAccX(t.track.getPointer("keys"), true);
t.fragments = Util::RelAccX(t.track.getPointer("fragments"), true);
t.pages = Util::RelAccX(t.track.getPointer("pages"), true);
t.fragmentDurationField = t.fragments.getFieldData("duration");
t.fragmentKeysField = t.fragments.getFieldData("keys");
t.fragmentFirstKeyField = t.fragments.getFieldData("firstkey");
t.fragmentSizeField = t.fragments.getFieldData("size");
t.partSizeField = t.parts.getFieldData("size");
t.partDurationField = t.parts.getFieldData("duration");
t.partOffsetField = t.parts.getFieldData("offset");
t.keyFirstPartField = t.keys.getFieldData("firstpart");
t.keyBposField = t.keys.getFieldData("bpos");
t.keyDurationField = t.keys.getFieldData("duration");
t.keyNumberField = t.keys.getFieldData("number");
t.keyPartsField = t.keys.getFieldData("parts");
t.keyTimeField = t.keys.getFieldData("time");
t.keySizeField = t.keys.getFieldData("size");
t.fragmentDurationField = t.fragments.getFieldData("duration");
t.fragmentKeysField = t.fragments.getFieldData("keys");
t.fragmentFirstKeyField = t.fragments.getFieldData("firstkey");
t.fragmentSizeField = t.fragments.getFieldData("size");
}
}
}
@ -1334,10 +1348,6 @@ namespace DTSC{
}
t.track = Util::RelAccX(p.mapped, true);
t.parts = Util::RelAccX(t.track.getPointer("parts"), true);
t.keys = Util::RelAccX(t.track.getPointer("keys"), true);
t.fragments = Util::RelAccX(t.track.getPointer("fragments"), true);
t.pages = Util::RelAccX(t.track.getPointer("pages"), true);
t.trackIdField = t.track.getFieldData("id");
t.trackTypeField = t.track.getFieldData("type");
@ -1359,22 +1369,39 @@ namespace DTSC{
t.trackFpksField = t.track.getFieldData("fpks");
t.trackMissedFragsField = t.track.getFieldData("missedFrags");
t.partSizeField = t.parts.getFieldData("size");
t.partDurationField = t.parts.getFieldData("duration");
t.partOffsetField = t.parts.getFieldData("offset");
if (t.track.hasField("frames")){
t.parts = Util::RelAccX();
t.keys = Util::RelAccX();
t.fragments = Util::RelAccX();
t.pages = Util::RelAccX();
t.frames = Util::RelAccX(t.track.getPointer("frames"), true);
t.keyFirstPartField = t.keys.getFieldData("firstpart");
t.keyBposField = t.keys.getFieldData("bpos");
t.keyDurationField = t.keys.getFieldData("duration");
t.keyNumberField = t.keys.getFieldData("number");
t.keyPartsField = t.keys.getFieldData("parts");
t.keyTimeField = t.keys.getFieldData("time");
t.keySizeField = t.keys.getFieldData("size");
t.framesTimeField = t.frames.getFieldData("time");
t.framesDataField = t.frames.getFieldData("data");
}else{
t.frames = Util::RelAccX();
t.parts = Util::RelAccX(t.track.getPointer("parts"), true);
t.keys = Util::RelAccX(t.track.getPointer("keys"), true);
t.fragments = Util::RelAccX(t.track.getPointer("fragments"), true);
t.pages = Util::RelAccX(t.track.getPointer("pages"), true);
t.fragmentDurationField = t.fragments.getFieldData("duration");
t.fragmentKeysField = t.fragments.getFieldData("keys");
t.fragmentFirstKeyField = t.fragments.getFieldData("firstkey");
t.fragmentSizeField = t.fragments.getFieldData("size");
t.partSizeField = t.parts.getFieldData("size");
t.partDurationField = t.parts.getFieldData("duration");
t.partOffsetField = t.parts.getFieldData("offset");
t.keyFirstPartField = t.keys.getFieldData("firstpart");
t.keyBposField = t.keys.getFieldData("bpos");
t.keyDurationField = t.keys.getFieldData("duration");
t.keyNumberField = t.keys.getFieldData("number");
t.keyPartsField = t.keys.getFieldData("parts");
t.keyTimeField = t.keys.getFieldData("time");
t.keySizeField = t.keys.getFieldData("size");
t.fragmentDurationField = t.fragments.getFieldData("duration");
t.fragmentKeysField = t.fragments.getFieldData("keys");
t.fragmentFirstKeyField = t.fragments.getFieldData("firstkey");
t.fragmentSizeField = t.fragments.getFieldData("size");
}
}
}
@ -1485,7 +1512,7 @@ namespace DTSC{
/// Resizes a given track to be able to hold the given amount of fragments, keys, parts and pages.
/// Currently called exclusively from Meta::update(), to resize the internal structures.
void Meta::resizeTrack(size_t source, size_t fragCount, size_t keyCount, size_t partCount, size_t pageCount, const char * reason){
void Meta::resizeTrack(size_t source, size_t fragCount, size_t keyCount, size_t partCount, size_t pageCount, const char * reason, size_t frameSize){
IPC::semaphore resizeLock;
if (!isMemBuf){
@ -1513,6 +1540,12 @@ namespace DTSC{
(TRACK_PART_OFFSET + (TRACK_PART_RECORDSIZE * partCount)) +
(TRACK_PAGE_OFFSET + (TRACK_PAGE_RECORDSIZE * pageCount));
// Raw track! Embed the data instead
if (frameSize){
// Reserve room for RAW_FRAME_COUNT frames
newPageSize = TRACK_TRACK_OFFSET + TRACK_TRACK_RECORDSIZE + (8 + frameSize) * RAW_FRAME_COUNT;
}
if (isMemBuf){
free(tMemBuf[source]);
tMemBuf.erase(source);
@ -1537,21 +1570,9 @@ namespace DTSC{
t.track = Util::RelAccX(tM[source].mapped, false);
}
initializeTrack(t, fragCount, keyCount, partCount, pageCount);
initializeTrack(t, fragCount, keyCount, partCount, pageCount, frameSize);
Util::RelAccX origAccess(orig);
Util::RelAccX origFragments(origAccess.getPointer("fragments"));
Util::RelAccX origKeys(origAccess.getPointer("keys"));
Util::RelAccX origParts(origAccess.getPointer("parts"));
Util::RelAccX origPages(origAccess.getPointer("pages"));
MEDIUM_MSG("Track %zu resizing (reason: %s): frags %" PRIu32 "->%zu, keys %" PRIu32 "->%zu, parts %" PRIu32 "->%zu, pages %" PRIu32 "->%zu",
source, reason,
origFragments.getRCount(), fragCount,
origKeys.getRCount(), keyCount,
origParts.getRCount(), partCount,
origPages.getRCount(), pageCount);
t.track.setInt(t.trackIdField, origAccess.getInt("id"));
t.track.setString(t.trackTypeField, origAccess.getPointer("type"));
t.track.setString(t.trackCodecField, origAccess.getPointer("codec"));
@ -1574,115 +1595,154 @@ namespace DTSC{
t.track.setInt(t.trackFpksField, origAccess.getInt("fpks"));
t.track.setInt(t.trackMissedFragsField, origAccess.getInt("missedFrags"));
t.parts.setEndPos(origParts.getEndPos());
t.parts.setStartPos(origParts.getStartPos());
t.parts.setDeleted(origParts.getDeleted());
t.parts.setPresent(origParts.getPresent());
if (frameSize){
Util::RelAccX origFrames(origAccess.getPointer("frames"));
Util::FieldAccX origPartSizeAccX = origParts.getFieldAccX("size");
Util::FieldAccX origPartDurationAccX = origParts.getFieldAccX("duration");
Util::FieldAccX origPartOffsetAccX = origParts.getFieldAccX("offset");
MEDIUM_MSG("Track %zu resizing (reason: %s): frameSize %zu -> %zu", source, reason,
(size_t)origFrames.getFieldData("data").size, frameSize);
Util::FieldAccX partSizeAccX = t.parts.getFieldAccX("size");
Util::FieldAccX partDurationAccX = t.parts.getFieldAccX("duration");
Util::FieldAccX partOffsetAccX = t.parts.getFieldAccX("offset");
t.frames.setEndPos(origFrames.getEndPos());
t.frames.setStartPos(origFrames.getStartPos());
t.frames.setDeleted(origFrames.getDeleted());
t.frames.setPresent(origFrames.getPresent());
size_t firstPart = origParts.getStartPos();
size_t endPart = origParts.getEndPos();
for (size_t i = firstPart; i < endPart; i++){
partSizeAccX.set(origPartSizeAccX.uint(i), i);
partDurationAccX.set(origPartDurationAccX.uint(i), i);
partOffsetAccX.set(origPartOffsetAccX.uint(i), i);
}
Util::FieldAccX origFramesTimeAccX = origFrames.getFieldAccX("time");
Util::FieldAccX origFramesDataAccX = origFrames.getFieldAccX("data");
t.keys.setEndPos(origKeys.getEndPos());
t.keys.setStartPos(origKeys.getStartPos());
t.keys.setDeleted(origKeys.getDeleted());
t.keys.setPresent(origKeys.getPresent());
Util::FieldAccX framesTimeAccX = t.frames.getFieldAccX("time");
Util::FieldAccX framesDataAccX = t.frames.getFieldAccX("data");
Util::FieldAccX origKeyFirstpartAccX = origKeys.getFieldAccX("firstpart");
Util::FieldAccX origKeyBposAccX = origKeys.getFieldAccX("bpos");
Util::FieldAccX origKeyDurationAccX = origKeys.getFieldAccX("duration");
Util::FieldAccX origKeyNumberAccX = origKeys.getFieldAccX("number");
Util::FieldAccX origKeyPartsAccX = origKeys.getFieldAccX("parts");
Util::FieldAccX origKeyTimeAccX = origKeys.getFieldAccX("time");
Util::FieldAccX origKeySizeAccX = origKeys.getFieldAccX("size");
size_t firstPage = origFrames.getStartPos();
size_t endPage = origFrames.getEndPos();
size_t minData = origFrames.getFieldData("data").size;
if (minData > frameSize){minData = frameSize;}
for (size_t i = firstPage; i < endPage; i++){
framesTimeAccX.set(origFramesTimeAccX.uint(i), i);
memcpy((void*)framesDataAccX.ptr(i), origFramesDataAccX.ptr(i), minData);
}
}else{
Util::RelAccX origFragments(origAccess.getPointer("fragments"));
Util::RelAccX origKeys(origAccess.getPointer("keys"));
Util::RelAccX origParts(origAccess.getPointer("parts"));
Util::RelAccX origPages(origAccess.getPointer("pages"));
Util::FieldAccX keyFirstpartAccX = t.keys.getFieldAccX("firstpart");
Util::FieldAccX keyBposAccX = t.keys.getFieldAccX("bpos");
Util::FieldAccX keyDurationAccX = t.keys.getFieldAccX("duration");
Util::FieldAccX keyNumberAccX = t.keys.getFieldAccX("number");
Util::FieldAccX keyPartsAccX = t.keys.getFieldAccX("parts");
Util::FieldAccX keyTimeAccX = t.keys.getFieldAccX("time");
Util::FieldAccX keySizeAccX = t.keys.getFieldAccX("size");
MEDIUM_MSG("Track %zu resizing (reason: %s): frags %" PRIu32 "->%zu, keys %" PRIu32 "->%zu, parts %" PRIu32 "->%zu, pages %" PRIu32 "->%zu",
source, reason,
origFragments.getRCount(), fragCount,
origKeys.getRCount(), keyCount,
origParts.getRCount(), partCount,
origPages.getRCount(), pageCount);
size_t firstKey = origKeys.getStartPos();
size_t endKey = origKeys.getEndPos();
for (size_t i = firstKey; i < endKey; i++){
keyFirstpartAccX.set(origKeyFirstpartAccX.uint(i), i);
keyBposAccX.set(origKeyBposAccX.uint(i), i);
keyDurationAccX.set(origKeyDurationAccX.uint(i), i);
keyNumberAccX.set(origKeyNumberAccX.uint(i), i);
keyPartsAccX.set(origKeyPartsAccX.uint(i), i);
keyTimeAccX.set(origKeyTimeAccX.uint(i), i);
keySizeAccX.set(origKeySizeAccX.uint(i), i);
}
t.parts.setEndPos(origParts.getEndPos());
t.parts.setStartPos(origParts.getStartPos());
t.parts.setDeleted(origParts.getDeleted());
t.parts.setPresent(origParts.getPresent());
t.fragments.setEndPos(origFragments.getEndPos());
t.fragments.setStartPos(origFragments.getStartPos());
t.fragments.setDeleted(origFragments.getDeleted());
t.fragments.setPresent(origFragments.getPresent());
Util::FieldAccX origPartSizeAccX = origParts.getFieldAccX("size");
Util::FieldAccX origPartDurationAccX = origParts.getFieldAccX("duration");
Util::FieldAccX origPartOffsetAccX = origParts.getFieldAccX("offset");
Util::FieldAccX origFragmentDurationAccX = origFragments.getFieldAccX("duration");
Util::FieldAccX origFragmentKeysAccX = origFragments.getFieldAccX("keys");
Util::FieldAccX origFragmentFirstkeyAccX = origFragments.getFieldAccX("firstkey");
Util::FieldAccX origFragmentSizeAccX = origFragments.getFieldAccX("size");
Util::FieldAccX partSizeAccX = t.parts.getFieldAccX("size");
Util::FieldAccX partDurationAccX = t.parts.getFieldAccX("duration");
Util::FieldAccX partOffsetAccX = t.parts.getFieldAccX("offset");
Util::FieldAccX fragmentDurationAccX = t.fragments.getFieldAccX("duration");
Util::FieldAccX fragmentKeysAccX = t.fragments.getFieldAccX("keys");
Util::FieldAccX fragmentFirstkeyAccX = t.fragments.getFieldAccX("firstkey");
Util::FieldAccX fragmentSizeAccX = t.fragments.getFieldAccX("size");
size_t firstPart = origParts.getStartPos();
size_t endPart = origParts.getEndPos();
for (size_t i = firstPart; i < endPart; i++){
partSizeAccX.set(origPartSizeAccX.uint(i), i);
partDurationAccX.set(origPartDurationAccX.uint(i), i);
partOffsetAccX.set(origPartOffsetAccX.uint(i), i);
}
size_t firstFragment = origFragments.getStartPos();
size_t endFragment = origFragments.getEndPos();
for (size_t i = firstFragment; i < endFragment; i++){
fragmentDurationAccX.set(origFragmentDurationAccX.uint(i), i);
fragmentKeysAccX.set(origFragmentKeysAccX.uint(i), i);
fragmentFirstkeyAccX.set(origFragmentFirstkeyAccX.uint(i), i);
fragmentSizeAccX.set(origFragmentSizeAccX.uint(i), i);
}
t.keys.setEndPos(origKeys.getEndPos());
t.keys.setStartPos(origKeys.getStartPos());
t.keys.setDeleted(origKeys.getDeleted());
t.keys.setPresent(origKeys.getPresent());
t.pages.setEndPos(origPages.getEndPos());
t.pages.setStartPos(origPages.getStartPos());
t.pages.setDeleted(origPages.getDeleted());
t.pages.setPresent(origPages.getPresent());
Util::FieldAccX origKeyFirstpartAccX = origKeys.getFieldAccX("firstpart");
Util::FieldAccX origKeyBposAccX = origKeys.getFieldAccX("bpos");
Util::FieldAccX origKeyDurationAccX = origKeys.getFieldAccX("duration");
Util::FieldAccX origKeyNumberAccX = origKeys.getFieldAccX("number");
Util::FieldAccX origKeyPartsAccX = origKeys.getFieldAccX("parts");
Util::FieldAccX origKeyTimeAccX = origKeys.getFieldAccX("time");
Util::FieldAccX origKeySizeAccX = origKeys.getFieldAccX("size");
Util::FieldAccX origPageFirstkeyAccX = origPages.getFieldAccX("firstkey");
Util::FieldAccX origPageKeycountAccX = origPages.getFieldAccX("keycount");
Util::FieldAccX origPagePartsAccX = origPages.getFieldAccX("parts");
Util::FieldAccX origPageSizeAccX = origPages.getFieldAccX("size");
Util::FieldAccX origPageAvailAccX = origPages.getFieldAccX("avail");
Util::FieldAccX origPageFirsttimeAccX = origPages.getFieldAccX("firsttime");
Util::FieldAccX origPageLastkeytimeAccX = origPages.getFieldAccX("lastkeytime");
Util::FieldAccX keyFirstpartAccX = t.keys.getFieldAccX("firstpart");
Util::FieldAccX keyBposAccX = t.keys.getFieldAccX("bpos");
Util::FieldAccX keyDurationAccX = t.keys.getFieldAccX("duration");
Util::FieldAccX keyNumberAccX = t.keys.getFieldAccX("number");
Util::FieldAccX keyPartsAccX = t.keys.getFieldAccX("parts");
Util::FieldAccX keyTimeAccX = t.keys.getFieldAccX("time");
Util::FieldAccX keySizeAccX = t.keys.getFieldAccX("size");
Util::FieldAccX pageFirstkeyAccX = t.pages.getFieldAccX("firstkey");
Util::FieldAccX pageKeycountAccX = t.pages.getFieldAccX("keycount");
Util::FieldAccX pagePartsAccX = t.pages.getFieldAccX("parts");
Util::FieldAccX pageSizeAccX = t.pages.getFieldAccX("size");
Util::FieldAccX pageAvailAccX = t.pages.getFieldAccX("avail");
Util::FieldAccX pageFirsttimeAccX = t.pages.getFieldAccX("firsttime");
Util::FieldAccX pageLastkeytimeAccX = t.pages.getFieldAccX("lastkeytime");
size_t firstKey = origKeys.getStartPos();
size_t endKey = origKeys.getEndPos();
for (size_t i = firstKey; i < endKey; i++){
keyFirstpartAccX.set(origKeyFirstpartAccX.uint(i), i);
keyBposAccX.set(origKeyBposAccX.uint(i), i);
keyDurationAccX.set(origKeyDurationAccX.uint(i), i);
keyNumberAccX.set(origKeyNumberAccX.uint(i), i);
keyPartsAccX.set(origKeyPartsAccX.uint(i), i);
keyTimeAccX.set(origKeyTimeAccX.uint(i), i);
keySizeAccX.set(origKeySizeAccX.uint(i), i);
}
size_t firstPage = origPages.getStartPos();
size_t endPage = origPages.getEndPos();
for (size_t i = firstPage; i < endPage; i++){
pageFirstkeyAccX.set(origPageFirstkeyAccX.uint(i), i);
pageKeycountAccX.set(origPageKeycountAccX.uint(i), i);
pagePartsAccX.set(origPagePartsAccX.uint(i), i);
pageSizeAccX.set(origPageSizeAccX.uint(i), i);
pageAvailAccX.set(origPageAvailAccX.uint(i), i);
pageFirsttimeAccX.set(origPageFirsttimeAccX.uint(i), i);
pageLastkeytimeAccX.set(origPageLastkeytimeAccX.uint(i), i);
t.fragments.setEndPos(origFragments.getEndPos());
t.fragments.setStartPos(origFragments.getStartPos());
t.fragments.setDeleted(origFragments.getDeleted());
t.fragments.setPresent(origFragments.getPresent());
Util::FieldAccX origFragmentDurationAccX = origFragments.getFieldAccX("duration");
Util::FieldAccX origFragmentKeysAccX = origFragments.getFieldAccX("keys");
Util::FieldAccX origFragmentFirstkeyAccX = origFragments.getFieldAccX("firstkey");
Util::FieldAccX origFragmentSizeAccX = origFragments.getFieldAccX("size");
Util::FieldAccX fragmentDurationAccX = t.fragments.getFieldAccX("duration");
Util::FieldAccX fragmentKeysAccX = t.fragments.getFieldAccX("keys");
Util::FieldAccX fragmentFirstkeyAccX = t.fragments.getFieldAccX("firstkey");
Util::FieldAccX fragmentSizeAccX = t.fragments.getFieldAccX("size");
size_t firstFragment = origFragments.getStartPos();
size_t endFragment = origFragments.getEndPos();
for (size_t i = firstFragment; i < endFragment; i++){
fragmentDurationAccX.set(origFragmentDurationAccX.uint(i), i);
fragmentKeysAccX.set(origFragmentKeysAccX.uint(i), i);
fragmentFirstkeyAccX.set(origFragmentFirstkeyAccX.uint(i), i);
fragmentSizeAccX.set(origFragmentSizeAccX.uint(i), i);
}
t.pages.setEndPos(origPages.getEndPos());
t.pages.setStartPos(origPages.getStartPos());
t.pages.setDeleted(origPages.getDeleted());
t.pages.setPresent(origPages.getPresent());
Util::FieldAccX origPageFirstkeyAccX = origPages.getFieldAccX("firstkey");
Util::FieldAccX origPageKeycountAccX = origPages.getFieldAccX("keycount");
Util::FieldAccX origPagePartsAccX = origPages.getFieldAccX("parts");
Util::FieldAccX origPageSizeAccX = origPages.getFieldAccX("size");
Util::FieldAccX origPageAvailAccX = origPages.getFieldAccX("avail");
Util::FieldAccX origPageFirsttimeAccX = origPages.getFieldAccX("firsttime");
Util::FieldAccX origPageLastkeytimeAccX = origPages.getFieldAccX("lastkeytime");
Util::FieldAccX pageFirstkeyAccX = t.pages.getFieldAccX("firstkey");
Util::FieldAccX pageKeycountAccX = t.pages.getFieldAccX("keycount");
Util::FieldAccX pagePartsAccX = t.pages.getFieldAccX("parts");
Util::FieldAccX pageSizeAccX = t.pages.getFieldAccX("size");
Util::FieldAccX pageAvailAccX = t.pages.getFieldAccX("avail");
Util::FieldAccX pageFirsttimeAccX = t.pages.getFieldAccX("firsttime");
Util::FieldAccX pageLastkeytimeAccX = t.pages.getFieldAccX("lastkeytime");
size_t firstPage = origPages.getStartPos();
size_t endPage = origPages.getEndPos();
for (size_t i = firstPage; i < endPage; i++){
pageFirstkeyAccX.set(origPageFirstkeyAccX.uint(i), i);
pageKeycountAccX.set(origPageKeycountAccX.uint(i), i);
pagePartsAccX.set(origPagePartsAccX.uint(i), i);
pageSizeAccX.set(origPageSizeAccX.uint(i), i);
pageAvailAccX.set(origPageAvailAccX.uint(i), i);
pageFirsttimeAccX.set(origPageFirsttimeAccX.uint(i), i);
pageLastkeytimeAccX.set(origPageLastkeytimeAccX.uint(i), i);
}
}
t.track.setReady();
@ -1696,7 +1756,7 @@ namespace DTSC{
/// Adds a track to the metadata structure.
/// To be called from the various inputs/outputs whenever they want to add a track.
size_t Meta::addTrack(size_t fragCount, size_t keyCount, size_t partCount, size_t pageCount, bool setValid){
size_t Meta::addTrack(size_t fragCount, size_t keyCount, size_t partCount, size_t pageCount, bool setValid, size_t frameSize){
char pageName[NAME_BUFFER_SIZE];
IPC::semaphore trackLock;
if (!isMemBuf){
@ -1719,6 +1779,11 @@ namespace DTSC{
(TRACK_KEY_OFFSET + (TRACK_KEY_RECORDSIZE * keyCount)) +
(TRACK_PART_OFFSET + (TRACK_PART_RECORDSIZE * partCount)) +
(TRACK_PAGE_OFFSET + (TRACK_PAGE_RECORDSIZE * pageCount));
// Raw track! Embed the data instead
if (frameSize){
// Reserve room for RAW_FRAME_COUNT frames
pageSize = TRACK_TRACK_OFFSET + TRACK_TRACK_RECORDSIZE + (8 + frameSize) * RAW_FRAME_COUNT;
}
size_t tNumber = trackList.getPresent();
@ -1738,7 +1803,7 @@ namespace DTSC{
t.track = Util::RelAccX(tM[tNumber].mapped, false);
}
initializeTrack(t, fragCount, keyCount, partCount, pageCount);
initializeTrack(t, fragCount, keyCount, partCount, pageCount, frameSize);
t.track.setReady();
trackList.setString(trackPageField, pageName, tNumber);
trackList.setInt(trackPidField, getpid(), tNumber);
@ -1769,9 +1834,30 @@ namespace DTSC{
trackList.setInt(trackPidField, 0, trackIdx);
}
bool Meta::hasEmbeddedFrames(size_t trackIdx) const{
return tracks.at(trackIdx).frames.isReady();
}
bool Meta::getEmbeddedData(size_t trackIdx, size_t num, char * & dataPtr, size_t & dataLen) const{
const Track & t = tracks.at(trackIdx);
const Util::RelAccX & R = t.frames;
if (R.getEndPos() <= num || R.getEndPos() > num + RAW_FRAME_COUNT*0.75){return false;}
dataPtr = R.getPointer(t.framesDataField, num);
dataLen = t.framesDataField.size;
return true;
}
bool Meta::getEmbeddedTime(size_t trackIdx, size_t num, uint64_t & time) const{
const Track & t = tracks.at(trackIdx);
const Util::RelAccX & R = t.frames;
if (R.getEndPos() <= num || R.getEndPos() > num + RAW_FRAME_COUNT*0.75){return false;}
time = R.getInt(t.framesTimeField, num);
return true;
}
/// Internal function that is called whenever a track is (re)written to the memory structures.
/// Adds the needed fields and sets all the RelAccXFieldData members to point to them.
void Meta::initializeTrack(Track &t, size_t fragCount, size_t keyCount, size_t partCount, size_t pageCount){
void Meta::initializeTrack(Track &t, size_t fragCount, size_t keyCount, size_t partCount, size_t pageCount, size_t frameSize){
t.track.addField("id", RAX_32UINT);
t.track.addField("type", RAX_STRING, 8);
t.track.addField("codec", RAX_STRING, 8);
@ -1781,7 +1867,11 @@ namespace DTSC{
t.track.addField("bps", RAX_32UINT);
t.track.addField("maxbps", RAX_32UINT);
t.track.addField("lang", RAX_STRING, 4);
t.track.addField("init", RAX_RAW, 1 * 1024 * 1024); // 1megabyte init data
if (!frameSize){
t.track.addField("init", RAX_RAW, 1 * 1024 * 1024); // 1megabyte init data
}else{
t.track.addField("init", RAX_RAW, 1); // 1 byte init data
}
t.track.addField("rate", RAX_16UINT);
t.track.addField("size", RAX_16UINT);
t.track.addField("channels", RAX_16UINT);
@ -1789,40 +1879,20 @@ namespace DTSC{
t.track.addField("height", RAX_32UINT);
t.track.addField("fpks", RAX_16UINT);
t.track.addField("missedFrags", RAX_32UINT);
t.track.addField("parts", RAX_NESTED, TRACK_PART_OFFSET + (TRACK_PART_RECORDSIZE * partCount));
t.track.addField("keys", RAX_NESTED, TRACK_KEY_OFFSET + (TRACK_KEY_RECORDSIZE * keyCount));
t.track.addField("fragments", RAX_NESTED, TRACK_FRAGMENT_OFFSET + (TRACK_FRAGMENT_RECORDSIZE * fragCount));
t.track.addField("pages", RAX_NESTED, TRACK_PAGE_OFFSET + (TRACK_PAGE_RECORDSIZE * pageCount));
if (!frameSize){
t.track.addField("parts", RAX_NESTED, TRACK_PART_OFFSET + (TRACK_PART_RECORDSIZE * partCount));
t.track.addField("keys", RAX_NESTED, TRACK_KEY_OFFSET + (TRACK_KEY_RECORDSIZE * keyCount));
t.track.addField("fragments", RAX_NESTED, TRACK_FRAGMENT_OFFSET + (TRACK_FRAGMENT_RECORDSIZE * fragCount));
t.track.addField("pages", RAX_NESTED, TRACK_PAGE_OFFSET + (TRACK_PAGE_RECORDSIZE * pageCount));
}else{
// 36 = RAX_REQDFIELDS_LEN
// 6 bytes for "time" uint64_t + 10 bytes for "data" raw field = 16
t.track.addField("frames", RAX_NESTED, 36 + 16 + (8+frameSize)*RAW_FRAME_COUNT);
}
t.track.setRCount(1);
t.track.addRecords(1);
t.parts = Util::RelAccX(t.track.getPointer("parts"), false);
t.parts.addField("size", RAX_32UINT);
t.parts.addField("duration", RAX_16UINT);
t.parts.addField("offset", RAX_16INT);
t.parts.setRCount(partCount);
t.parts.setReady();
t.keys = Util::RelAccX(t.track.getPointer("keys"), false);
t.keys.addField("firstpart", RAX_64UINT);
t.keys.addField("bpos", RAX_64UINT);
t.keys.addField("duration", RAX_32UINT);
t.keys.addField("number", RAX_32UINT);
t.keys.addField("parts", RAX_32UINT);
t.keys.addField("time", RAX_64UINT);
t.keys.addField("size", RAX_32UINT);
t.keys.setRCount(keyCount);
t.keys.setReady();
t.fragments = Util::RelAccX(t.track.getPointer("fragments"), false);
t.fragments.addField("duration", RAX_32UINT);
t.fragments.addField("keys", RAX_16UINT);
t.fragments.addField("firstkey", RAX_32UINT);
t.fragments.addField("size", RAX_32UINT);
t.fragments.setRCount(fragCount);
t.fragments.setReady();
t.trackIdField = t.track.getFieldData("id");
t.trackTypeField = t.track.getFieldData("type");
t.trackCodecField = t.track.getFieldData("codec");
@ -1843,10 +1913,38 @@ namespace DTSC{
t.trackFpksField = t.track.getFieldData("fpks");
t.trackMissedFragsField = t.track.getFieldData("missedFrags");
if (frameSize){
t.frames = Util::RelAccX(t.track.getPointer("frames"), false);
t.frames.addField("time", RAX_64UINT);
t.frames.addField("data", RAX_RAW, frameSize);
t.frames.setRCount(RAW_FRAME_COUNT);
t.frames.setReady();
t.framesTimeField = t.frames.getFieldData("time");
t.framesDataField = t.frames.getFieldData("data");
return;
}
t.parts = Util::RelAccX(t.track.getPointer("parts"), false);
t.parts.addField("size", RAX_32UINT);
t.parts.addField("duration", RAX_16UINT);
t.parts.addField("offset", RAX_16INT);
t.parts.setRCount(partCount);
t.parts.setReady();
t.partSizeField = t.parts.getFieldData("size");
t.partDurationField = t.parts.getFieldData("duration");
t.partOffsetField = t.parts.getFieldData("offset");
t.keys = Util::RelAccX(t.track.getPointer("keys"), false);
t.keys.addField("firstpart", RAX_64UINT);
t.keys.addField("bpos", RAX_64UINT);
t.keys.addField("duration", RAX_32UINT);
t.keys.addField("number", RAX_32UINT);
t.keys.addField("parts", RAX_32UINT);
t.keys.addField("time", RAX_64UINT);
t.keys.addField("size", RAX_32UINT);
t.keys.setRCount(keyCount);
t.keys.setReady();
t.keyFirstPartField = t.keys.getFieldData("firstpart");
t.keyBposField = t.keys.getFieldData("bpos");
t.keyDurationField = t.keys.getFieldData("duration");
@ -1855,6 +1953,13 @@ namespace DTSC{
t.keyTimeField = t.keys.getFieldData("time");
t.keySizeField = t.keys.getFieldData("size");
t.fragments = Util::RelAccX(t.track.getPointer("fragments"), false);
t.fragments.addField("duration", RAX_32UINT);
t.fragments.addField("keys", RAX_16UINT);
t.fragments.addField("firstkey", RAX_32UINT);
t.fragments.addField("size", RAX_32UINT);
t.fragments.setRCount(fragCount);
t.fragments.setReady();
t.fragmentDurationField = t.fragments.getFieldData("duration");
t.fragmentKeysField = t.fragments.getFieldData("keys");
t.fragmentFirstKeyField = t.fragments.getFieldData("firstkey");
@ -2149,6 +2254,7 @@ namespace DTSC{
for (std::set<size_t>::iterator it = vTracks.begin(); it != vTracks.end(); it++){
if (idx != INVALID_TRACK_ID && idx != *it){continue;}
if (getType(*it) != "video"){continue;}
if (hasEmbeddedFrames(*it)){continue;}
DTSC::Parts p(parts(*it));
size_t ctr = 0;
int64_t prevOffset = 0;
@ -2201,15 +2307,20 @@ namespace DTSC{
uint64_t firstValid = trackList.getDeleted();
uint64_t beyondLast = trackList.getEndPos();
for (size_t i = firstValid; i < beyondLast; i++){
if (trackList.getInt(trackValidField, i) & trackValidMask){res.insert(i);}
if (!(trackList.getInt(trackValidField, i) & trackValidMask)){continue;}
if (!tracks.count(i)){continue;}
const Track & t = tracks.at(i);
if (!t.track.isReady()){continue;}
if (skipEmpty){
if (t.frames.isReady() && !t.frames.getPresent()){continue;}
if (t.parts.isReady() && !t.parts.getPresent()){continue;}
}
// Remove track this is based on, if this track is encrypted
if (trackList.getInt(trackSourceTidField, i) != INVALID_TRACK_ID &&
std::string(trackList.getPointer(trackEncryptionField, i)) != ""){
res.erase(trackList.getInt(trackSourceTidField, i));
}
if (!tracks.count(i) || !tracks.at(i).track.isReady()){res.erase(i);}
if (skipEmpty){
if (res.count(i) && !tracks.at(i).parts.getPresent()){res.erase(i);}
}
res.insert(i);
}
return res;
}
@ -2245,13 +2356,15 @@ namespace DTSC{
void Meta::removeTrack(size_t trackIdx){
if (!getValidTracks().count(trackIdx)){return;}
Track &t = tracks[trackIdx];
for (uint64_t i = t.pages.getDeleted(); i < t.pages.getEndPos(); i++){
if (t.pages.getInt("avail", i) == 0){continue;}
char thisPageName[NAME_BUFFER_SIZE];
snprintf(thisPageName, NAME_BUFFER_SIZE, SHM_TRACK_DATA, streamName.c_str(), trackIdx,
(uint32_t)t.pages.getInt("firstkey", i));
IPC::sharedPage p(thisPageName, 20971520);
p.master = true;
if (t.pages.isReady()){
for (uint64_t i = t.pages.getDeleted(); i < t.pages.getEndPos(); i++){
if (t.pages.getInt("avail", i) == 0){continue;}
char thisPageName[NAME_BUFFER_SIZE];
snprintf(thisPageName, NAME_BUFFER_SIZE, SHM_TRACK_DATA, streamName.c_str(), trackIdx,
(uint32_t)t.pages.getInt("firstkey", i));
IPC::sharedPage p(thisPageName, 20971520);
p.master = true;
}
}
tM[trackIdx].master = true;
tM.erase(trackIdx);
@ -2609,9 +2722,54 @@ namespace DTSC{
const Util::RelAccX &Meta::keys(size_t idx) const{return tracks.at(idx).keys;}
const Keys Meta::getKeys(size_t trackIdx) const{
DTSC::Keys k(keys(trackIdx));
if (isLimited){k.applyLimiter(limitMin, limitMax, DTSC::Parts(parts(trackIdx)));}
return k;
const Track & t = tracks.at(trackIdx);
if (t.frames.isReady()){
DTSC::Keys k(t.frames);
if (isLimited){k.applyLimiter(limitMin, limitMax);}
return k;
}else{
DTSC::Keys k(t.keys);
if (isLimited){k.applyLimiter(limitMin, limitMax, DTSC::Parts(t.parts));}
return k;
}
}
void Meta::storeFrame(size_t trackIdx, uint64_t time, const char * data, size_t dataSize){
Track & t = tracks.at(trackIdx);
if (time < getLastms(trackIdx)){
static bool warned = false;
if (!warned){
ERROR_MSG("Received packets for track %zu in wrong order (%" PRIu64 " < %" PRIu64
") - ignoring! Further messages on HIGH level.",
trackIdx, time, getLastms(trackIdx));
warned = true;
}else{
HIGH_MSG("Received packets for track %zu in wrong order (%" PRIu64 " < %" PRIu64
") - ignoring!",
trackIdx, time, getLastms(trackIdx));
}
return;
}
uint64_t endPos = t.frames.getEndPos();
if (!endPos){
setFirstms(trackIdx, time);
}
if ((endPos - t.frames.getDeleted()) >= t.frames.getRCount()){
t.frames.deleteRecords(1);
setFirstms(trackIdx, t.frames.getInt(t.framesTimeField, t.frames.getDeleted()));
}
t.frames.setInt(t.framesTimeField, time, endPos);
if (t.framesDataField.size < dataSize){dataSize = t.framesDataField.size;}
memcpy(t.frames.getPointer(t.framesDataField, endPos), data, dataSize);
t.frames.addRecords(1);
setMinKeepAway(trackIdx, theJitters[trackIdx].addPack(time));
t.track.setInt(t.trackLastmsField, time);
t.track.setInt(t.trackNowmsField, time);
markUpdated(trackIdx);
}
const Util::RelAccX &Meta::fragments(size_t idx) const{return tracks.at(idx).fragments;}
@ -3034,7 +3192,9 @@ namespace DTSC{
if (!trackList.getPresent()){return 0;}
uint32_t trackIdx = (idx == INVALID_TRACK_ID ? mainTrack() : idx);
if (!tM.count(trackIdx)){return 0;}
DTSC::Fragments fragments(tracks.at(trackIdx).fragments);
const Util::RelAccX & fRela = tracks.at(trackIdx).fragments;
if (!fRela.isReady()){return 0;}
DTSC::Fragments fragments(fRela);
uint64_t firstFragment = fragments.getFirstValid();
uint64_t endFragment = fragments.getEndValid();
uint32_t ret = 0;
@ -3086,7 +3246,7 @@ namespace DTSC{
/// Gets indice of the fragment containing timestamp, or last fragment if nowhere.
uint32_t Meta::getFragmentIndexForTime(uint32_t idx, uint64_t timestamp) const{
DTSC::Fragments fragments(tracks.at(idx).fragments);
DTSC::Keys keys(tracks.at(idx).keys);
DTSC::Keys keys(getKeys(idx));
uint32_t firstFragment = fragments.getFirstValid();
uint32_t endFragment = fragments.getEndValid();
for (size_t i = firstFragment; i < endFragment; i++){
@ -3102,13 +3262,12 @@ namespace DTSC{
/// Returns the timestamp for the given key index in the given track index
uint64_t Meta::getTimeForKeyIndex(uint32_t idx, uint32_t keyIdx) const{
DTSC::Keys keys(tracks.at(idx).keys);
return keys.getTime(keyIdx);
return getKeys(idx).getTime(keyIdx);
}
/// Returns indice of the key containing timestamp, or last key if nowhere.
uint32_t Meta::getKeyIndexForTime(uint32_t idx, uint64_t timestamp) const{
DTSC::Keys keys(tracks.at(idx).keys);
DTSC::Keys keys(getKeys(idx));
uint32_t firstKey = keys.getFirstValid();
uint32_t endKey = keys.getEndValid();
@ -3121,8 +3280,7 @@ namespace DTSC{
/// Returns the tiestamp for the given fragment index in the given track index.
uint64_t Meta::getTimeForFragmentIndex(uint32_t idx, uint32_t fragmentIdx) const{
DTSC::Fragments fragments(tracks.at(idx).fragments);
DTSC::Keys keys(tracks.at(idx).keys);
return keys.getTime(fragments.getFirstKey(fragmentIdx));
return getKeys(idx).getTime(fragments.getFirstKey(fragmentIdx));
}
/// Returns the part index for the given timestamp.
@ -3135,7 +3293,7 @@ namespace DTSC{
uint32_t res = 0;
uint32_t keyIdx = getKeyIndexForTime(idx, timestamp);
DTSC::Keys Keys(keys(idx));
DTSC::Keys Keys(getKeys(idx));
DTSC::Parts Parts(parts(idx));
uint64_t currentTime = Keys.getTime(keyIdx);
res = Keys.getFirstPart(keyIdx);
@ -3155,7 +3313,7 @@ namespace DTSC{
/// index is invalid or if the timestamp cannot be found.
uint64_t Meta::getPartTime(uint32_t partIndex, size_t idx) const{
if (idx == INVALID_TRACK_ID){return 0;}
DTSC::Keys Keys(keys(idx));
DTSC::Keys Keys(getKeys(idx));
DTSC::Parts Parts(parts(idx));
size_t kId = 0;
for (kId = 0; kId < Keys.getEndValid(); ++kId){
@ -3218,6 +3376,17 @@ namespace DTSC{
/// If the time is in the gap before a key, returns that next key instead.
size_t Meta::getKeyNumForTime(uint32_t idx, uint64_t time) const{
const Track &trk = tracks.at(idx);
if (trk.frames.isReady()){
if (!trk.frames.getEndPos()){return INVALID_KEY_NUM;}
size_t res = trk.frames.getDeleted();
for (size_t i = res; i < trk.frames.getEndPos(); i++){
if (trk.frames.getInt(trk.framesTimeField, i) > time){
return res;
}
res = i;
}
return res;
}
const Util::RelAccX &keys = trk.keys;
const Util::RelAccX &parts = trk.parts;
if (!keys.getEndPos()){return INVALID_KEY_NUM;}
@ -3266,7 +3435,7 @@ namespace DTSC{
uint32_t longest_prt = 0;
uint32_t shrtest_cnt = 0xFFFFFFFFul;
uint32_t longest_cnt = 0;
DTSC::Keys Mkeys(keys(i));
DTSC::Keys Mkeys(getKeys(i));
uint32_t firstKey = Mkeys.getFirstValid();
uint32_t endKey = Mkeys.getEndValid();
for (uint32_t k = firstKey; k+1 < endKey; k++){
@ -3396,24 +3565,38 @@ namespace DTSC{
int64_t Parts::getOffset(size_t idx) const{return parts.getInt(offsetField, idx);}
Keys::Keys(Util::RelAccX &_keys) : isConst(false), keys(_keys), cKeys(_keys){
firstPartField = cKeys.getFieldData("firstpart");
bposField = cKeys.getFieldData("bpos");
durationField = cKeys.getFieldData("duration");
numberField = cKeys.getFieldData("number");
partsField = cKeys.getFieldData("parts");
timeField = cKeys.getFieldData("time");
sizeField = cKeys.getFieldData("size");
if (cKeys.hasField("firstpart")){
isFrames = false;
firstPartField = cKeys.getFieldData("firstpart");
bposField = cKeys.getFieldData("bpos");
durationField = cKeys.getFieldData("duration");
numberField = cKeys.getFieldData("number");
partsField = cKeys.getFieldData("parts");
timeField = cKeys.getFieldData("time");
sizeField = cKeys.getFieldData("size");
}else{
isFrames = true;
timeField = cKeys.getFieldData("time");
sizeField = cKeys.getFieldData("data");
}
isLimited = false;
}
Keys::Keys(const Util::RelAccX &_keys) : isConst(true), keys(empty), cKeys(_keys){
firstPartField = cKeys.getFieldData("firstpart");
bposField = cKeys.getFieldData("bpos");
durationField = cKeys.getFieldData("duration");
numberField = cKeys.getFieldData("number");
partsField = cKeys.getFieldData("parts");
timeField = cKeys.getFieldData("time");
sizeField = cKeys.getFieldData("size");
if (cKeys.hasField("firstpart")){
isFrames = false;
firstPartField = cKeys.getFieldData("firstpart");
bposField = cKeys.getFieldData("bpos");
durationField = cKeys.getFieldData("duration");
numberField = cKeys.getFieldData("number");
partsField = cKeys.getFieldData("parts");
timeField = cKeys.getFieldData("time");
sizeField = cKeys.getFieldData("size");
}else{
isFrames = true;
timeField = cKeys.getFieldData("time");
sizeField = cKeys.getFieldData("data");
}
isLimited = false;
}
@ -3426,17 +3609,26 @@ namespace DTSC{
size_t Keys::getValidCount() const{return getEndValid() - getFirstValid();}
size_t Keys::getFirstPart(size_t idx) const{
if (isFrames){return idx;}
if (isLimited && idx == limMin){return limMinFirstPart;}
return cKeys.getInt(firstPartField, idx);
}
size_t Keys::getBpos(size_t idx) const{return cKeys.getInt(bposField, idx);}
size_t Keys::getBpos(size_t idx) const{
if (isFrames){return 0;}
return cKeys.getInt(bposField, idx);
}
uint64_t Keys::getDuration(size_t idx) const{
if (isFrames){return 0;}
if (isLimited && idx + 1 == limMax){return limMaxDuration;}
if (isLimited && idx == limMin){return limMinDuration;}
return cKeys.getInt(durationField, idx);
}
size_t Keys::getNumber(size_t idx) const{return cKeys.getInt(numberField, idx);}
size_t Keys::getNumber(size_t idx) const{
if (isFrames){return idx;}
return cKeys.getInt(numberField, idx);
}
size_t Keys::getParts(size_t idx) const{
if (isFrames){return 1;}
if (isLimited && idx + 1 == limMax){return limMaxParts;}
if (isLimited && idx == limMin){return limMinParts;}
return cKeys.getInt(partsField, idx);
@ -3446,16 +3638,19 @@ namespace DTSC{
return cKeys.getInt(timeField, idx);
}
void Keys::setSize(size_t idx, size_t _size){
if (isFrames){return;}
if (isConst){return;}
keys.setInt(sizeField, _size, idx);
}
size_t Keys::getSize(size_t idx) const{
if (isFrames){return sizeField.size;}
if (isLimited && idx + 1 == limMax){return limMaxSize;}
if (isLimited && idx == limMin){return limMinSize;}
return cKeys.getInt(sizeField, idx);
}
uint64_t Keys::getTotalPartCount(){
if (isFrames){return getValidCount();}
return getParts(getEndValid()-1) + getFirstPart(getEndValid()-1) - getFirstPart(getFirstValid());
}
@ -3464,7 +3659,8 @@ namespace DTSC{
uint32_t endKey = getEndValid();
for (size_t i = firstKey; i < endKey; i++){
if (getTime(i) + getDuration(i) > timestamp){return i;}
const uint64_t t = getTime(i);
if (t >= timestamp || t + getDuration(i) > timestamp){return i;}
}
return endKey;
}
@ -3551,6 +3747,25 @@ namespace DTSC{
isLimited = true;
}
void Keys::applyLimiter(uint64_t _min, uint64_t _max){
// Determine first and last key available within the limits
// Note: limMax replaces getEndValid(), and is thus one _past_ the end key index!
limMin = getFirstValid();
limMax = getEndValid();
for (size_t i = limMin; i < limMax; i++){
if (getTime(i) <= _min){limMin = i;}
if (getTime(i) >= _max){
limMax = i;
break;
}
}
// We can't have 0 keys, so force at least 1 key in cases where min >= max.
if (limMin >= limMax){limMax = limMin + 1;}
HIGH_MSG("Frame limiter applied from %" PRIu64 " to %" PRIu64 ", key times %" PRIu64 " to %" PRIu64 ", %lld parts, %lld parts", _min, _max, getTime(limMin), getTime(limMax-1), (long long)limMinParts-(long long)getParts(limMin), (long long)limMaxParts-(long long)getParts(limMax-1));
isLimited = true;
}
Fragments::Fragments(const Util::RelAccX &_fragments) : fragments(_fragments){}
size_t Fragments::getFirstValid() const{return fragments.getDeleted();}
size_t Fragments::getEndValid() const{return fragments.getEndPos();}

View file

@ -8,13 +8,11 @@
#include "socket.h"
#include "timing.h"
#include "util.h"
#include <deque>
#include <iostream>
#include <set>
#include <stdint.h> //for uint64_t
#include <stdio.h> //for FILE
#include <string>
#include <vector>
#define DTSC_INT 0x01
#define DTSC_STR 0x02
@ -195,10 +193,12 @@ namespace DTSC{
uint32_t getIndexForTime(uint64_t timestamp);
void applyLimiter(uint64_t _min, uint64_t _max, DTSC::Parts _p);
void applyLimiter(uint64_t _min, uint64_t _max);
private:
bool isConst;
bool isLimited;
bool isFrames;
size_t limMin;
size_t limMax;
//Overrides for max key
@ -243,13 +243,12 @@ namespace DTSC{
class Track{
public:
Util::RelAccX track;
Util::RelAccX parts;
Util::RelAccX keys;
Util::RelAccX fragments;
Util::RelAccX pages;
Util::RelAccX track;
Util::RelAccX frames;
// Internal buffers so we don't always need to search for everything
Util::RelAccXFieldData trackIdField;
@ -286,6 +285,9 @@ namespace DTSC{
Util::RelAccXFieldData fragmentKeysField;
Util::RelAccXFieldData fragmentFirstKeyField;
Util::RelAccXFieldData fragmentSizeField;
Util::RelAccXFieldData framesTimeField;
Util::RelAccXFieldData framesDataField;
};
@ -334,11 +336,12 @@ namespace DTSC{
size_t partCount = DEFAULT_PART_COUNT, size_t pageCount = DEFAULT_PAGE_COUNT);
size_t addTrack(size_t fragCount = DEFAULT_FRAGMENT_COUNT, size_t keyCount = DEFAULT_KEY_COUNT,
size_t partCount = DEFAULT_PART_COUNT, size_t pageCount = DEFAULT_PAGE_COUNT,
bool setValid = true);
bool setValid = true, size_t frameSize = 0);
void resizeTrack(size_t source, size_t fragCount = DEFAULT_FRAGMENT_COUNT, size_t keyCount = DEFAULT_KEY_COUNT,
size_t partCount = DEFAULT_PART_COUNT, size_t pageCount = DEFAULT_PAGE_COUNT, const char * reason = "");
size_t partCount = DEFAULT_PART_COUNT, size_t pageCount = DEFAULT_PAGE_COUNT, const char * reason = "",
size_t frameSize = 0);
void initializeTrack(Track &t, size_t fragCount = DEFAULT_FRAGMENT_COUNT, size_t keyCount = DEFAULT_KEY_COUNT,
size_t parCount = DEFAULT_PART_COUNT, size_t pageCount = DEFAULT_PAGE_COUNT);
size_t parCount = DEFAULT_PART_COUNT, size_t pageCount = DEFAULT_PAGE_COUNT, size_t frameSize = 0);
void merge(const DTSC::Meta &M, bool deleteTracks = true, bool copyData = true);
@ -422,6 +425,9 @@ namespace DTSC{
void claimTrack(size_t trackIdx);
bool isClaimed(size_t trackIdx) const;
void abandonTrack(size_t trackIdx);
bool hasEmbeddedFrames(size_t trackIdx) const;
bool getEmbeddedData(size_t trackIdx, size_t num, char * & dataPtr, size_t & dataLen) const;
bool getEmbeddedTime(size_t trackIdx, size_t num, uint64_t & time) const;
/*LTS-START*/
void setSourceTrack(size_t trackIdx, size_t sourceTrack);
@ -499,6 +505,8 @@ namespace DTSC{
const Keys getKeys(size_t trackIdx) const;
void storeFrame(size_t trackIdx, uint64_t time, const char * data, size_t dataSize);
std::string toPrettyString() const;
void remap(const std::string &_streamName = "");

View file

@ -146,6 +146,7 @@ namespace EBML{
case 0x1A: return "FlagInterlaced";
case EID_DISPLAYWIDTH: return "DisplayWidth";
case EID_DISPLAYHEIGHT: return "DisplayHeight";
case EID_UNCOMPRESSEDFOURCC: return "UncompressedFourCC";
case 0x15B0: return "Colour";
case 0x15B7: return "ChromaSitingHorz";
case 0x15B8: return "ChromaSitingVert";
@ -329,6 +330,7 @@ namespace EBML{
case EID_LANGUAGE:
case 0x660:
case 0x37C:
case EID_UNCOMPRESSEDFOURCC:
case EID_CODECID: return ELEM_STRING;
case EID_MUXINGAPP:
case EID_WRITINGAPP:

View file

@ -50,6 +50,7 @@ namespace EBML{
EID_PIXELHEIGHT = 0x3A,
EID_DISPLAYWIDTH = 0x14B0,
EID_DISPLAYHEIGHT = 0x14BA,
EID_UNCOMPRESSEDFOURCC = 0xEB524,
EID_TRACKNUMBER = 0x57,
EID_CODECPRIVATE = 0x23A2,
EID_LANGUAGE = 0x2B59C,

View file

@ -699,7 +699,7 @@ namespace HLS{
u_int64_t getInitFragment(const DTSC::Meta &M, const MasterData &masterData){
if (M.getLive()){
DTSC::Fragments fragments(M.fragments(masterData.mainTrack));
DTSC::Keys keys(M.keys(masterData.mainTrack));
DTSC::Keys keys(M.getKeys(masterData.mainTrack));
u_int64_t iFrag = std::max(fragments.getEndValid() -
(masterData.noLLHLS ? 10 : getLiveLengthLimit(masterData)),
fragments.getFirstValid());

View file

@ -598,6 +598,7 @@ void Socket::Connection::clear(){
Error = false;
Blocking = false;
skipCount = 0;
memset(&remoteaddr, 0, sizeof(remoteaddr));
#ifdef SSL
sslConnected = false;
server_fd = 0;

View file

@ -926,10 +926,13 @@ namespace Util{
std::stringstream r;
uint64_t delled = getDeleted();
uint64_t max = getEndPos();
if (max - delled > getRCount()){max = delled + getRCount();}
if (delled >= max || max - delled > getRCount()){
r << std::string(indent, ' ') << "(Note: deleted count (" << delled << ") >= total count (" << max << "))" << std::endl;
delled = max - getRCount();
}
if (max == 0){max = getRCount();}
r << std::string(indent, ' ') << "RelAccX: " << getRCount() << " x " << getRSize() << "b @"
<< getOffset() << " (#" << getDeleted() << " - #" << getEndPos() - 1 << ")" << std::endl;
<< getOffset() << " (#" << delled << " - #" << max - 1 << ")" << std::endl;
for (uint64_t i = delled; i < max; ++i){
r << std::string(indent + 2, ' ') << "#" << i << ":" << std::endl;
for (std::map<std::string, RelAccXFieldData>::const_iterator it = fields.begin();
@ -1318,4 +1321,20 @@ namespace Util{
return true;
}
/// Converts a width and height in a given pixel format to the size needed to store those pixels.
/// Returns zero if the pixel format is non-constant or unknown
size_t pixfmtToSize(const std::string & pixfmt, size_t width, size_t height){
if (pixfmt == "UYVY" || pixfmt == "YUYV"){
// 8-bit YUV422, 2 bytes per pixel, no padding
return width*height*2;
}
if (pixfmt == "V210"){
// 10-bit YUV422, 16 bytes per 6 pixels, width padded to 128-byte multiple
size_t rowBytes = width * 16 / 6;
if (rowBytes % 128){rowBytes += 128 - (rowBytes % 128);}
return rowBytes*height;
}
return 0;
}
}// namespace Util

View file

@ -27,6 +27,8 @@ namespace Util{
bool sysSetNrOpenFiles(int n);
size_t pixfmtToSize(const std::string & pixfmt, size_t width, size_t height);
class DataCallback{
public:
virtual void dataCallback(const char *ptr, size_t size){

View file

@ -168,6 +168,15 @@ if not get_option('NORIST')
endif
have_librist = not get_option('NORIST') and librist.found()
av_libs = []
if get_option('WITH_AV')
av_libs += dependency('libswscale')
av_libs += dependency('libavformat')
av_libs += dependency('libavcodec')
av_libs += dependency('libavutil')
av_libs += dependency('libswresample')
endif
# Add thread dependency since we always have thread code in libmist
mist_deps += dependency('threads')

View file

@ -883,8 +883,30 @@ void Controller::handleAPICommands(JSON::Value &Request, JSON::Value &Response){
}
if (Request.isMember("capabilities")){
Controller::checkCapable(capabilities);
Response["capabilities"] = capabilities;
if (Request["capabilities"].isString()){
Response["capabilities"].null();
const std::string & tmpFn = Request["capabilities"].asStringRef();
jsonForEachConst(capabilities["inputs"], it){
if (it->isMember("dynamic_capa")){
std::string source = (*it)["source_match"].asStringRef();
std::string front = source.substr(0, source.find('*'));
std::string back = source.substr(source.find('*') + 1);
if (tmpFn.size() >= front.size()+back.size() && tmpFn.substr(0, front.size()) == front && tmpFn.substr(tmpFn.size() - back.size()) == back){
std::string arg_one = Util::getMyPath() + "MistIn" + it.key();
char const *conn_args[] ={0, "--getcapa", 0, 0};
conn_args[0] = arg_one.c_str();
conn_args[2] = Request["capabilities"].asStringRef().c_str();
configMutex.unlock();
Response["capabilities"] = JSON::fromString(Util::Procs::getOutputOf((char **)conn_args));
configMutex.lock();
break;
}
}
}
}else{
Controller::checkCapable(capabilities);
Response["capabilities"] = capabilities;
}
}
if (Request.isMember("browse")){
@ -1320,6 +1342,25 @@ void Controller::handleAPICommands(JSON::Value &Request, JSON::Value &Response){
Controller::listExternalWriters(Response["external_writer_list"]);
}
if (Request.isMember("enumerate_sources")){
if (!Request["enumerate_sources"].isString()){
Response["enumerate_sources"].null();
}else{
jsonForEachConst(capabilities["inputs"], it){
if (it->isMember("enum_static_prefix") && (*it)["enum_static_prefix"].asStringRef().size() <= Request["enumerate_sources"].asStringRef().size() && Request["enumerate_sources"].asStringRef().substr(0, (*it)["enum_static_prefix"].asStringRef().size()) == (*it)["enum_static_prefix"].asStringRef()){
std::string arg_one = Util::getMyPath() + "MistIn" + it.key();
char const *conn_args[] ={0, "--enumerate", 0, 0};
conn_args[0] = arg_one.c_str();
conn_args[2] = Request["enumerate_sources"].asStringRef().c_str();
configMutex.unlock();
Response["enumerate_sources"] = JSON::fromString(Util::Procs::getOutputOf((char **)conn_args));
configMutex.lock();
break;
}
}
}
}
Controller::writeConfig();
if (Request.isMember("save")){

View file

@ -391,6 +391,16 @@ namespace Mist{
return 0;
}
if (config->hasOption("enumerate") && config->getString("enumerate").size()){
std::cout << enumerateSources(config->getString("enumerate")).toString() << std::endl;
return 0;
}
if (config->hasOption("getcapa") && config->getString("getcapa").size()){
std::cout << getSourceCapa(config->getString("getcapa")).toString() << std::endl;
return 0;
}
INFO_MSG("Input booting");
//Check if the input uses the name-based-override, and strip it

View file

@ -98,6 +98,8 @@ namespace Mist{
virtual void userLeadOut();
virtual void connStats(Comms::Connections & statComm);
virtual void parseHeader();
virtual JSON::Value enumerateSources(const std::string &){ return JSON::Value(); };
virtual JSON::Value getSourceCapa(const std::string &){ return JSON::Value(); };
bool bufferFrame(size_t track, uint32_t keyNum);
void doInputAbortTrigger(pid_t pid, char *mRExitReason, char *exitReason);
bool exitAndLogReason();

View file

@ -143,7 +143,11 @@ namespace Mist{
meta.setType(idx, "audio");
meta.setRate(idx, strm->codecpar->sample_rate);
meta.setSize(idx, strm->codecpar->frame_size);
#if (LIBAVUTIL_VERSION_MAJOR < 57 || (LIBAVUTIL_VERSION_MAJOR == 57 && LIBAVUTIL_VERSION_MINOR < 24))
meta.setChannels(idx, strm->codecpar->channels);
#else
meta.setChannels(idx, strm->codecpar->ch_layout.nb_channels);
#endif
}
}

View file

@ -209,8 +209,12 @@ namespace Mist{
}else{
if (initData.count(i)){meta.setInit(i, initData[i]);}
}
DTSC::Fragments fragments(M.fragments(i));
if (fragments.getEndValid() < fragCount){fragCount = fragments.getEndValid();}
if (M.hasEmbeddedFrames(i)){
fragCount = FRAG_BOOT;
}else{
DTSC::Fragments fragments(M.fragments(i));
if (fragments.getEndValid() < fragCount){fragCount = fragments.getEndValid();}
}
if (M.getFirstms(i) < firstms){firstms = M.getFirstms(i);}
if (M.getLastms(i) > lastms){lastms = M.getLastms(i);}
}
@ -404,6 +408,7 @@ namespace Mist{
}
for (std::set<size_t>::iterator idx = tracks.begin(); idx != tracks.end(); idx++){
size_t i = *idx;
if (M.hasEmbeddedFrames(i)){continue;}
std::string type = M.getType(i);
DTSC::Keys keys(M.keys(i));
// non-video tracks need to have a second keyframe that is <= firstVideo

View file

@ -46,6 +46,7 @@ namespace Mist{
capa["codecs"]["video"].append("AV1");
capa["codecs"]["video"].append("theora");
capa["codecs"]["video"].append("MPEG2");
capa["codecs"]["video"].append("JPEG");
capa["codecs"]["audio"].append("opus");
capa["codecs"]["audio"].append("vorbis");
capa["codecs"]["audio"].append("AAC");
@ -376,6 +377,10 @@ namespace Mist{
trueCodec = "MPEG2";
trueType = "video";
}
if (codec == "V_MJPEG"){
trueCodec = "JPEG";
trueType = "video";
}
if (codec == "A_PCM/FLOAT/IEEE"){
trueCodec = "FLOAT";
trueType = "audio";

531
src/input/input_v4l2.cpp Normal file
View file

@ -0,0 +1,531 @@
#include <mist/defines.h>
#include <mist/stream.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <dirent.h>
#include <unistd.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <stdio.h>
#include <fcntl.h>
#include <sstream>
#include "input_v4l2.h"
namespace Mist{
inputVideo4Linux::inputVideo4Linux(Util::Config *cfg) : Input(cfg){
capa["name"] = "V4L2";
capa["desc"] = "";
capa["source_match"] = "v4l2:*";
capa["always_match"] = capa["source_match"];
capa["priority"] = 10;
width = 0;
height = 0;
fpsDenominator = 0;
fpsNumerator = 0;
pixelFmt = 0;
JSON::Value option;
option["arg"] = "string";
option["long"] = "format";
option["short"] = "F";
option["help"] = "Requested resolution, framerate and pixel format, like 'MJPG-1920x1080@90.00'. FPS is optional. Defaults to using the highest surface area and FPS if not given";
option["value"].append("");
config->addOption("format", option);
capa["optional"]["format"]["name"] = "Device resolution, framerate and pixel format";
capa["optional"]["format"]["help"] = "Requested format, like 'MJPG-1920x1080@90.00'. FPS is optional. Defaults to using the highest surface area and FPS if not given";
capa["optional"]["format"]["option"] = "--format";
capa["optional"]["format"]["short"] = "F";
capa["optional"]["format"]["default"] = "";
capa["optional"]["format"]["type"] = "string";
capa["enum_static_prefix"] = "v4l2:";
option.null();
option["long"] = "enumerate";
option["short"] = "e";
option["help"] = "Output MistIn supported devices in JSON format, then exit";
option["value"].append("");
config->addOption("enumerate", option);
capa["dynamic_capa"] = true;
option.null();
option["long"] = "getcapa";
option["arg"] = "string";
option["short"] = "q";
option["help"] = "(string) Output device capabilities for given device in JSON format, then exit";
option["value"].append("");
config->addOption("getcapa", option);
}
/// @brief Writes a JSON list of connected video inputs to stdout
JSON::Value inputVideo4Linux::enumerateSources(const std::string & device){
JSON::Value output;
DIR *d = opendir("/sys/class/video4linux");
if (!d){
FAIL_MSG("Unable to enumerate video devices. Is v4l2 available on the system?");
return output;
}
// Cycle through all devices
struct dirent *dp;
do{
errno = 0;
if ((dp = readdir(d))){
// Only consider devices starting with video
if (dp->d_type != DT_LNK || strncmp(dp->d_name, "video", 5) != 0){continue;}
// Open FD to the corresponding /dev/videoN device
std::string path = "/dev/" + std::string(dp->d_name);
fd = open(path.c_str() ,O_RDWR);
if(fd < 0){
FAIL_MSG("Failed to check device %s, continuing", dp->d_name);
continue;
}
// Query the device for any video input capabilities
struct v4l2_fmtdesc fmt;
fmt.index = 0;
fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (ioctl(fd, VIDIOC_ENUM_FMT, &fmt) >= 0) {
output.append("v4l2:"+path);
}
close(fd);
}
}while (dp != NULL);
closedir(d);
return output;
}
/// @brief Writes a JSON list compatible pixel formats, resolution and FPS for a video input to stdout
/// \param device: path to the device to query
JSON::Value inputVideo4Linux::getSourceCapa(const std::string & device){
JSON::Value output = capa;
std::string input = getInput(device);
// Open FD to the corresponding device
fd = open(input.c_str(), O_RDWR);
if(fd < 0){
FAIL_MSG("Failed to open device, aborting");
return output;
}
output["optional"]["format"]["short"] = "F";
output["optional"]["format"]["type"] = "string";
JSON::Value & opts = output["optional"]["format"]["datalist"];
// Query the device for pixel formats
struct v4l2_fmtdesc fmt;
fmt.index = 0;
fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
while (ioctl(fd, VIDIOC_ENUM_FMT, &fmt) >= 0) {
// For each pixel format, query supported resolutions
struct v4l2_frmsizeenum frmSizes;
frmSizes.pixel_format = fmt.pixelformat;
frmSizes.index = 0;
while (ioctl(fd, VIDIOC_ENUM_FRAMESIZES, &frmSizes) >= 0) {
// Only support discrete frame size types for now
if (frmSizes.type == V4L2_FRMSIZE_TYPE_DISCRETE) {
// For each frame size, query supported FPS values
struct v4l2_frmivalenum frmIntervals;
memset(&frmIntervals, 0, sizeof(frmIntervals));
frmIntervals.pixel_format = fmt.pixelformat;
frmIntervals.width = frmSizes.discrete.width;
frmIntervals.height = frmSizes.discrete.height;
bool setHighestFPS = false;
if (frmSizes.discrete.width * frmSizes.discrete.height > width * height){
width = frmSizes.discrete.width;
height = frmSizes.discrete.height;
setHighestFPS = true;
}
ioctl(fd, VIDIOC_ENUM_FRAMEINTERVALS, &frmIntervals);
double maxFPS = 0;
while (ioctl(fd, VIDIOC_ENUM_FRAMEINTERVALS, &frmIntervals) != -1) {
if (frmIntervals.type == V4L2_FRMIVAL_TYPE_DISCRETE){
double fps = (double)frmIntervals.discrete.denominator / (double)frmIntervals.discrete.numerator;
std::stringstream ss;
ss << intToString(fmt.pixelformat) << "-" << frmSizes.discrete.width << "x" << frmSizes.discrete.height << "@";
ss.setf(std::ios::fixed);
ss.precision(2);
// Use a human readable format for FPS
ss << fps;
opts.append(ss.str());
if (setHighestFPS && fps >= maxFPS){
maxFPS = fps;
output["optional"]["format"]["default"] = ss.str();
}
}
frmIntervals.index += 1;
}
}
frmSizes.index++;
}
fmt.index++;
}
close(fd);
return output;
}
/// \brief Checks whether the device supports the given config and sets defaults for any missing properties
bool inputVideo4Linux::checkArguments(){
std::string input = getInput(config->getString("input"));
// Open file descriptor to the requested device
INFO_MSG("Opening video device %s", input.c_str());
fd = open(input.c_str() ,O_RDWR);
if(fd < 0){
FAIL_MSG("Failed to open device %s, aborting", config->getString("input").c_str());
return false;
}
// Init params to requested format if it was given
// If not set, we will default to the highest surface area and pick
// the highest FPS the camera supports for that resolution
std::string format = "";
if (config->hasOption("format") && config->getString("format").size()){
format = config->getString("format");
// Anything before a - is the requested pixel format
size_t fmtDelPos = format.find('-');
if (fmtDelPos != std::string::npos){
pixelFmt = strToInt(format.substr(0, fmtDelPos));
format = format.substr(fmtDelPos + 1);
}else{
FAIL_MSG("Unable to find pixel format in requested format %s", config->getString("format").c_str());
close(fd);
return false;
}
// Anything before the @ sign is the resolution
size_t resolutionDelPos = format.find('@');
size_t widthDelPos = format.find('x');
if (resolutionDelPos != std::string::npos && widthDelPos != std::string::npos){
width = atoi(format.substr(0, widthDelPos).c_str());
format = format.substr(widthDelPos + 1);
height = atoi(format.substr(0, resolutionDelPos - widthDelPos - 1).c_str());
format = format.substr(resolutionDelPos - widthDelPos);
}else{
FAIL_MSG("Unable to find resolution in requested format %s", config->getString("format").c_str());
close(fd);
return false;
}
// Remaining string is the target FPS, which we will match to a fraction in the following loop
}
// Set defaults for unset parameters, set FPS and sanity checks
struct v4l2_fmtdesc fmt;
fmt.index = 0;
fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
bool hasFPS = format.size(); //< Automatically adjust FPS is none was set
bool hasResolution = width && height; //< Automatically adjust resolution is none was set
bool hasPixFmt = pixelFmt; //< Automatically adjust pixel format is none was set
while (ioctl(fd, VIDIOC_ENUM_FMT, &fmt) >= 0) {
// If we have a requested pixelFmt, skip any non-matching formats
if (hasPixFmt && fmt.pixelformat != pixelFmt){
fmt.index++;
continue;
}
// Else go through supported resolution and FPS combos
struct v4l2_frmsizeenum frmSizes;
frmSizes.pixel_format = fmt.pixelformat;
frmSizes.index = 0;
while (ioctl(fd, VIDIOC_ENUM_FRAMESIZES, &frmSizes) >= 0) {
if (frmSizes.type == V4L2_FRMSIZE_TYPE_DISCRETE) {
if (!hasResolution){
// If we have no resolution set, select the largest supported surface area
if (frmSizes.discrete.width * frmSizes.discrete.height > width * height){
width = frmSizes.discrete.width;
height = frmSizes.discrete.height;
pixelFmt = fmt.pixelformat;
}else{
// Current surface area is lower, so skip it
frmSizes.index++;
continue;
}
}else if (frmSizes.discrete.width != width || frmSizes.discrete.height != height){
// Current resolution does not match requested resolution, so skip it
frmSizes.index++;
continue;
}
// At this point we found the requested resolution or adjusted it upwards, so check supported FPS values
struct v4l2_frmivalenum frmIntervals;
memset(&frmIntervals, 0, sizeof(frmIntervals));
frmIntervals.pixel_format = pixelFmt;
frmIntervals.width = width;
frmIntervals.height = height;
ioctl(fd, VIDIOC_ENUM_FRAMEINTERVALS, &frmIntervals);
while (ioctl(fd, VIDIOC_ENUM_FRAMEINTERVALS, &frmIntervals) != -1) {
if (frmIntervals.type == V4L2_FRMIVAL_TYPE_DISCRETE){
if (!hasFPS){
// If we have no FPS set, select the largest FPS we can get for the current resolution
if (fpsNumerator && (float)frmIntervals.discrete.denominator / (float)frmIntervals.discrete.numerator
<= (float)fpsDenominator / (float)fpsNumerator){
// Current FPS is lower, so skip it
frmIntervals.index++;
continue;
}
}else if (int(frmIntervals.discrete.denominator / frmIntervals.discrete.numerator) != atoi(format.c_str())){
// Current FPS does not match requested FPS, so skip it
frmIntervals.index++;
continue;
}
// Store the denominator and numerator for the requested FPS
fpsDenominator = frmIntervals.discrete.denominator;
fpsNumerator = frmIntervals.discrete.numerator;
}
frmIntervals.index++;
}
}
frmSizes.index++;
}
fmt.index++;
}
// Abort if this input does not support the requested pixel format
std::string pixFmtStr = intToString(pixelFmt);
if (pixFmtStr != "MJPG" && pixFmtStr != "YUYV" && pixFmtStr != "UYVY") {
FAIL_MSG("Unsupported pixel format %s, aborting", pixFmtStr.c_str());
close(fd);
return false;
}
// Abort if we have no resolution
if (!width || !height) {
FAIL_MSG("Unable to determine resolution, aborting");
close(fd);
return false;
}
// Abort if we have no FPS
if (!fpsDenominator || !fpsNumerator) {
FAIL_MSG("Unable to determine FPS, aborting");
close(fd);
return false;
}
return true;
}
/// \brief Applies config to the video device and maps its buffer to a local pointer
bool inputVideo4Linux::openStreamSource(){
if(fd < 0){
FAIL_MSG("Lost connection to the device, aborting");
return false;
}
std::string pixFmtStr = intToString(pixelFmt);
INFO_MSG("Opening video device with pixel format %s, resolution %lux%lu @ %.1f fps", pixFmtStr.c_str(), width, height, (float)fpsDenominator / (float)fpsNumerator);
// Set requested pixel format and resolution
struct v4l2_format imageFormat;
imageFormat.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
imageFormat.fmt.pix.width = width;
imageFormat.fmt.pix.height = height;
imageFormat.fmt.pix.pixelformat = pixelFmt;
imageFormat.fmt.pix.field = V4L2_FIELD_NONE;
if(ioctl(fd, VIDIOC_S_FMT, &imageFormat) < 0){
FAIL_MSG("Could not apply image parameters, aborting");
close(fd);
return false;
}
// Set requested framerate
struct v4l2_streamparm streamParam;
streamParam.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (ioctl(fd, VIDIOC_G_PARM, &streamParam) != 0){
FAIL_MSG("Could not apply stream parameters, aborting");
close(fd);
return false;
}
streamParam.parm.capture.capturemode |= V4L2_CAP_TIMEPERFRAME;
streamParam.parm.capture.timeperframe.denominator = fpsDenominator;
streamParam.parm.capture.timeperframe.numerator = fpsNumerator;
if(ioctl(fd, VIDIOC_S_PARM, &streamParam) != 0){
FAIL_MSG("Could not apply stream parameters, aborting");
close(fd);
return false;
}
// Initiate memory mapping
v4l2_requestbuffers requestBuffer = {0};
requestBuffer.count = 1;
requestBuffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
requestBuffer.memory = V4L2_MEMORY_MMAP;
if(ioctl(fd, VIDIOC_REQBUFS, &requestBuffer) < 0){
FAIL_MSG("Could not initiate memory mapping, aborting");
close(fd);
return false;
}
// Query location of the buffers in device memory
v4l2_buffer queryBuffer = {0};
queryBuffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
queryBuffer.memory = V4L2_MEMORY_MMAP;
queryBuffer.index = 0;
if(ioctl(fd, VIDIOC_QUERYBUF, &queryBuffer) < 0){
FAIL_MSG("Unable to query buffer information, aborting");
close(fd);
return false;
}
// Map buffer to local address space
buffer = (char*)mmap(NULL, queryBuffer.length, PROT_READ | PROT_WRITE, MAP_SHARED, fd, queryBuffer.m.offset);
memset(buffer, 0, queryBuffer.length);
// Init buffer info struct, which is going to contain pointers to buffers and meta information
memset(&bufferinfo, 0, sizeof(bufferinfo));
bufferinfo.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
bufferinfo.memory = V4L2_MEMORY_MMAP;
bufferinfo.index = 0;
// Activate streaming I/O
int type = bufferinfo.type;
if(ioctl(fd, VIDIOC_STREAMON, &type) < 0){
FAIL_MSG("Unable to start streaming I/O, aborting");
close(fd);
return false;
}
// Create video track
// Note: pixFmtStr is not always identical to "codec", but it is for all currently-supported raw formats at least
size_t staticSize = Util::pixfmtToSize(pixFmtStr, imageFormat.fmt.pix.width, imageFormat.fmt.pix.height);
if (staticSize){
//Known static frame sizes: raw track mode
tNumber = meta.addTrack(0, 0, 0, 0, true, staticSize);
}else{
// Other cases: standard track mode
tNumber = meta.addTrack();
}
meta.setLive(true);
meta.setVod(false);
meta.setID(tNumber, tNumber);
meta.setType(tNumber, "video");
meta.setWidth(tNumber, imageFormat.fmt.pix.width);
meta.setHeight(tNumber, imageFormat.fmt.pix.height);
meta.setFpks(tNumber, 1000 * fpsDenominator / fpsNumerator);
if (pixFmtStr == "MJPG"){
meta.setCodec(tNumber, "JPEG");
}else if (pixFmtStr == "YUYV"){
meta.setCodec(tNumber, "YUYV");
}else if (pixFmtStr == "UYVY"){
meta.setCodec(tNumber, "UYVY");
}else{
FAIL_MSG("Unsupported pixel format %s, aborting", pixFmtStr.c_str());
closeStreamSource();
return false;
}
return true;
}
void inputVideo4Linux::streamMainLoop(){
uint64_t statTimer = 0;
uint64_t startTime = Util::bootSecs();
uint64_t timeOffset = 0;
if (tNumber){
timeOffset = meta.getBootMsOffset();
}else{
timeOffset = Util::bootMS();
meta.setBootMsOffset(timeOffset);
}
Comms::Connections statComm;
thisIdx = tNumber;
if (!userSelect.count(thisIdx)){
userSelect[thisIdx].reload(streamName, thisIdx, COMM_STATUS_ACTIVE | COMM_STATUS_SOURCE | COMM_STATUS_DONOTTRACK);
}
while (config->is_active && userSelect[thisIdx]){
if (userSelect[thisIdx].getStatus() & COMM_STATUS_REQDISCONNECT){
Util::logExitReason(ER_CLEAN_LIVE_BUFFER_REQ, "buffer requested shutdown");
break;
}
// Enqueue an empty buffer to the driver's incoming queue
if(ioctl(fd, VIDIOC_QBUF, &bufferinfo) < 0){
ERROR_MSG("Could not enqueue buffer, aborting");
return;
}
// Dequeue the filled buffer from the drivers outgoing queue
if(ioctl(fd, VIDIOC_DQBUF, &bufferinfo) < 0){
ERROR_MSG("Could not dequeue the buffer, aborting");
return;
}
if (!bufferinfo.bytesused){
Util::logExitReason(ER_CLEAN_EOF, "no more data");
break;
}
INSANE_MSG("Buffer has %f KBytes of data", (double)bufferinfo.bytesused / 1024);
thisIdx = tNumber;
thisTime = Util::bootMS() - timeOffset;
bufferLivePacket(thisTime, 0, tNumber, buffer, bufferinfo.bytesused, 0, true);
if (!userSelect.count(thisIdx)){
userSelect[thisIdx].reload(streamName, thisIdx, COMM_STATUS_ACTIVE | COMM_STATUS_SOURCE | COMM_STATUS_DONOTTRACK);
}
if (Util::bootSecs() - statTimer > 1){
// Connect to stats for INPUT detection
if (!statComm){statComm.reload(streamName, getConnectedBinHost(), JSON::Value(getpid()).asString(), "INPUT:" + capa["name"].asStringRef(), "");}
if (statComm){
if (!statComm){
config->is_active = false;
Util::logExitReason(ER_CLEAN_CONTROLLER_REQ, "received shutdown request from controller");
return;
}
uint64_t now = Util::bootSecs();
statComm.setNow(now);
statComm.setStream(streamName);
statComm.setTime(now - startTime);
statComm.setLastSecond(0);
connStats(statComm);
}
statTimer = Util::bootSecs();
}
}
}
void inputVideo4Linux::closeStreamSource(){
if (fd){
int type = bufferinfo.type;
if(ioctl(fd, VIDIOC_STREAMOFF, &type) < 0){
ERROR_MSG("Could not stop camera streaming I/O");
}
close(fd);
}
}
/// \brief converts an int representing an encoded string back to it's original form
std::string inputVideo4Linux::intToString(int n){
std::string output;
while(n){
output += (char)n & 0xFF;
n >>= 8;
}
return output;
}
/// \brief Converts a string to a hex encoded int
int inputVideo4Linux::strToInt(std::string str){
int output = 0;
for (int i = str.size() - 1; i >= 0; i--){
output <<= 8;
output += (char)str[i];
}
return output;
}
/// \brief Translates an input string to it's matching device path
std::string inputVideo4Linux::getInput(std::string input){
// Remove 'v4l2://' prefix to get the requested video device
if (input.substr(0, 5) == "v4l2:"){
input = input.substr(5);
}
// If /dev/ is not prepended to the input, add it
if (input.substr(0, 5) != "/dev/"){
input = "/dev/" + input;
}
return input;
}
}// namespace Mist

41
src/input/input_v4l2.h Normal file
View file

@ -0,0 +1,41 @@
#include "input.h"
#include <mist/dtsc.h>
#include <fstream>
#include <linux/videodev2.h>
namespace Mist{
class inputVideo4Linux : public Input{
public:
inputVideo4Linux(Util::Config *cfg);
protected:
bool checkArguments();
virtual bool needHeader(){return false;}
virtual bool isSingular(){return true;}
bool needsLock(){return false;}
JSON::Value enumerateSources(const std::string & device);
JSON::Value getSourceCapa(const std::string & device);
void parseStreamHeader(){};
bool openStreamSource();
void closeStreamSource();
void streamMainLoop();
std::string intToString(int n);
int strToInt(std::string str);
std::string getInput(std::string input);
uint64_t width;
uint64_t height;
uint64_t fpsDenominator;
uint64_t fpsNumerator;
uint pixelFmt;
uint64_t startTime;
size_t tNumber;
int fd;
v4l2_buffer bufferinfo;
char* buffer;
};
}// namespace Mist
typedef Mist::inputVideo4Linux mistIn;

View file

@ -32,15 +32,12 @@ if have_srt
inputs += {'name' : 'TSSRT', 'format' : 'tssrt', 'extra' : 'with_srt'}
endif
av_libs = []
if get_option('WITH_AV')
inputs += {'name' : 'AV', 'format' : 'av'}
av_libs = [
dependency('libavformat'),
dependency('libavcodec'),
dependency('libavutil'),
]
inputs += {'name' : 'AV', 'format' : 'av', 'extra': 'with_av'}
endif
if ccpp.has_header('linux/videodev2.h')
inputs += {'name' : 'V4L2', 'format' : 'v4l2'}
endif
inputs_tgts = []
@ -56,6 +53,9 @@ foreach input : inputs
deps += libmist_srt_dep
deps += libsrt
endif
if input.get('extra').contains('with_av')
deps += av_libs
endif
endif
if input.get('name').contains('AV')
deps += av_libs

View file

@ -23,7 +23,7 @@ namespace Mist{
size_t bestSoFar = INVALID_TRACK_ID;
meta.reloadReplacedPagesIfNeeded();
for (std::map<size_t, Comms::Users>::iterator it = userSelect.begin(); it != userSelect.end(); it++){
if (meta.trackValid(it->first)){
if (meta.trackLoaded(it->first)){
if (meta.getType(it->first) == "video"){return it->first;}
bestSoFar = it->first;
}
@ -60,8 +60,9 @@ namespace Mist{
Util::RelAccX &tPages = aMeta.pages(idx);
uint32_t pageIdx = INVALID_KEY_NUM;
Util::RelAccXFieldData firstKeyAccx = tPages.getFieldData("firstkey");
for (uint32_t i = tPages.getDeleted(); i < tPages.getEndPos(); i++){
if (tPages.getInt("firstkey", i) == pageNumber){
if (tPages.getInt(firstKeyAccx, i) == pageNumber){
pageIdx = i;
break;
}
@ -358,6 +359,11 @@ namespace Mist{
// Do nothing if the trackid is invalid
if (packTrack == INVALID_TRACK_ID){return;}
if (aMeta.hasEmbeddedFrames(packTrack)){
aMeta.storeFrame(packTrack, packTime, packData, packDataSize);
return;
}
// Store the trackid for easier access
Util::RelAccX &tPages = aMeta.pages(packTrack);

View file

@ -79,6 +79,9 @@ foreach output : outputs
endif
if extra.contains('jpg')
endif
if extra.contains('avlibs')
deps += av_libs
endif
if extra.contains('ts')
sources += files('output_ts_base.cpp')
endif

View file

@ -248,7 +248,7 @@ namespace Mist{
if (!userSelect.size()){selectDefaultTracks();}
size_t mainTrack = getMainSelectedTrack();
if (mainTrack != INVALID_TRACK_ID){
DTSC::Keys keys(M.keys(mainTrack));
DTSC::Keys keys(M.getKeys(mainTrack));
if (keys.getValidCount() >= minTracks || M.getNowms(mainTrack) - M.getFirstms(mainTrack) > minMs){
return true;
}
@ -368,6 +368,10 @@ namespace Mist{
}
Util::wait(500);
meta.reloadReplacedPagesIfNeeded();
if (!meta){
isInitialized = true;
return;
}
stats();
}
}
@ -496,7 +500,7 @@ namespace Mist{
}
//Abort if the track is not loaded
if (!M.trackLoaded(trk)){return 0;}
const DTSC::Keys &keys = M.keys(trk);
const DTSC::Keys &keys = M.getKeys(trk);
//Abort if there are no keys
if (!keys.getValidCount()){return 0;}
//Get the key for the current time
@ -650,6 +654,7 @@ namespace Mist{
return;
}
if (!M.trackLoaded(trackId)){meta.reloadReplacedPagesIfNeeded();}
// Note: specifically uses `keys` rather than `getKeys` because pages must not be limited-based
DTSC::Keys keys(M.keys(trackId));
if (!keys.getValidCount()){
WARN_MSG("Load for track %zu key %zu aborted - track is empty", trackId, keyNum);
@ -814,13 +819,13 @@ namespace Mist{
return seek(pos);
}
if (M.getType(mainTrack) == "video"){
DTSC::Keys keys(M.keys(mainTrack));
uint32_t keyNum = M.getKeyNumForTime(mainTrack, pos);
if (keyNum == INVALID_KEY_NUM){
DTSC::Keys keys(M.getKeys(mainTrack));
if (!keys.getValidCount()){
FAIL_MSG("Attempted seek on empty track %zu", mainTrack);
return false;
}
pos = keys.getTime(keyNum);
// Snap to the keyframe's exact starting time
pos = keys.getTime(keys.getIndexForTime(pos));
}
}
MEDIUM_MSG("Seeking to %" PRIu64 "ms (%s)", pos, toKey ? "sync" : "direct");
@ -867,7 +872,7 @@ namespace Mist{
userSelect.erase(tid);
return false;
}
DTSC::Keys keys(M.keys(tid));
DTSC::Keys keys(M.getKeys(tid));
if (M.getLive() && !pos && !buffer.getSyncMode()){
uint64_t tmpTime = (M.getFirstms(tid) + M.getLastms(tid))/2;
uint32_t tmpKey = M.getKeyNumForTime(tid, tmpTime);
@ -885,6 +890,17 @@ namespace Mist{
pos = actualKeyTime;
userSelect[tid].setKeyNum(keyNum);
}
if (M.hasEmbeddedFrames(tid)){
Util::sortedPageInfo tmp;
tmp.tid = tid;
tmp.offset = 0;
tmp.partIndex = keyNum;
tmp.time = actualKeyTime;
tmp.ghostPacket = false;
buffer.insert(tmp);
INFO_MSG("Sought to #%zu, T %" PRIu64, tmp.partIndex, tmp.time);
return true;
}
loadPageForKey(tid, keyNum + (getNextKey ? 1 : 0));
if (!curPage.count(tid) || !curPage[tid].mapped){
//Sometimes the page load fails because of a connection loss to the user. This is fine.
@ -951,7 +967,7 @@ namespace Mist{
if (meta.getLive() && buffer.getSyncMode()){
size_t mainTrack = getMainSelectedTrack();
if (mainTrack == INVALID_TRACK_ID){return;}
DTSC::Keys keys(M.keys(mainTrack));
DTSC::Keys keys(M.getKeys(mainTrack));
if (!keys.getValidCount()){return;}
// seek to the newest keyframe, unless that is <5s, then seek to the oldest keyframe
uint32_t firstKey = keys.getFirstValid();
@ -1072,7 +1088,7 @@ namespace Mist{
targetParams["recstart"] = JSON::Value(startRec).asString();
}
size_t mainTrack = getMainSelectedTrack();
if (M.getType(mainTrack) == "video"){
if (mainTrack != INVALID_TRACK_ID && M.getType(mainTrack) == "video"){
seekPos = M.getTimeForKeyIndex(mainTrack, M.getKeyIndexForTime(mainTrack, startRec));
if (seekPos != startRec){
INFO_MSG("Shifting recording start from %" PRIu64 " to %" PRIu64 " so that it starts with a keyframe", startRec, seekPos);
@ -1131,9 +1147,11 @@ namespace Mist{
// apply a limiter to the stream to make it appear like a VoD asset
if (targetParams.count("recstop") || !M.getLive()){
size_t mainTrack = getMainSelectedTrack();
uint64_t stopPos = M.getLastms(mainTrack);
if (targetParams.count("recstop")){stopPos = atoll(targetParams["recstop"].c_str());}
if (!M.getLive() || stopPos <= M.getLastms(mainTrack)){meta.applyLimiter(seekPos, stopPos);}
if (mainTrack != INVALID_TRACK_ID){
uint64_t stopPos = M.getLastms(mainTrack);
if (targetParams.count("recstop")){stopPos = atoll(targetParams["recstop"].c_str());}
if (!M.getLive() || stopPos <= M.getLastms(mainTrack)){meta.applyLimiter(seekPos, stopPos);}
}
}
}else{
if (M.getLive() && targetParams.count("pushdelay")){
@ -1185,40 +1203,42 @@ namespace Mist{
}
if (targetParams.count("start") && atoll(targetParams["start"].c_str()) != 0){
size_t mainTrack = getMainSelectedTrack();
int64_t startRec = atoll(targetParams["start"].c_str());
if (startRec > M.getNowms(mainTrack)){
if (!M.getLive()){
onFail("Playback start past end of non-live source", true);
return;
}
int64_t streamAvail = M.getNowms(mainTrack);
int64_t lastUpdated = Util::getMS();
INFO_MSG("Waiting for stream to reach playback starting point (%" PRIu64 " -> %" PRIu64 "). Time left: " PRETTY_PRINT_MSTIME, startRec, streamAvail, PRETTY_ARG_MSTIME(startRec - streamAvail));
while (Util::getMS() - lastUpdated < 5000 && startRec > streamAvail && keepGoing()){
Util::sleep(500);
if (M.getNowms(mainTrack) > streamAvail){
HIGH_MSG("Waiting for stream to reach playback starting point (%" PRIu64 " -> %" PRIu64 "). Time left: " PRETTY_PRINT_MSTIME, startRec, streamAvail, PRETTY_ARG_MSTIME(startRec - streamAvail));
stats();
streamAvail = M.getNowms(mainTrack);
lastUpdated = Util::getMS();
if (mainTrack != INVALID_TRACK_ID){
int64_t startRec = atoll(targetParams["start"].c_str());
if (startRec > M.getNowms(mainTrack)){
if (!M.getLive()){
onFail("Playback start past end of non-live source", true);
return;
}
int64_t streamAvail = M.getNowms(mainTrack);
int64_t lastUpdated = Util::getMS();
INFO_MSG("Waiting for stream to reach playback starting point (%" PRIu64 " -> %" PRIu64 "). Time left: " PRETTY_PRINT_MSTIME, startRec, streamAvail, PRETTY_ARG_MSTIME(startRec - streamAvail));
while (Util::getMS() - lastUpdated < 5000 && startRec > streamAvail && keepGoing()){
Util::sleep(500);
if (M.getNowms(mainTrack) > streamAvail){
HIGH_MSG("Waiting for stream to reach playback starting point (%" PRIu64 " -> %" PRIu64 "). Time left: " PRETTY_PRINT_MSTIME, startRec, streamAvail, PRETTY_ARG_MSTIME(startRec - streamAvail));
stats();
streamAvail = M.getNowms(mainTrack);
lastUpdated = Util::getMS();
}
}
}
}
if (startRec < 0 || startRec < startTime()){
WARN_MSG("Playback begin at %" PRId64 " ms not available, starting at %" PRIu64
" ms instead",
startRec, startTime());
startRec = startTime();
}
if (M.getType(mainTrack) == "video"){
seekPos = M.getTimeForKeyIndex(mainTrack, M.getKeyIndexForTime(mainTrack, startRec));
if (seekPos != startRec){
INFO_MSG("Shifting recording start from %" PRIu64 " to %" PRIu64 " so that it starts with a keyframe", startRec, seekPos);
if (startRec < 0 || startRec < startTime()){
WARN_MSG("Playback begin at %" PRId64 " ms not available, starting at %" PRIu64
" ms instead",
startRec, startTime());
startRec = startTime();
}
}else{
seekPos = startRec;
if (M.getType(mainTrack) == "video"){
seekPos = M.getTimeForKeyIndex(mainTrack, M.getKeyIndexForTime(mainTrack, startRec));
if (seekPos != startRec){
INFO_MSG("Shifting recording start from %" PRIu64 " to %" PRIu64 " so that it starts with a keyframe", startRec, seekPos);
}
}else{
seekPos = startRec;
}
INFO_MSG("Playback will start at %" PRIu64, seekPos);
}
INFO_MSG("Playback will start at %" PRIu64, seekPos);
}
// Duration to record in seconds. Overrides stop.
if (targetParams.count("duration")){
@ -1329,7 +1349,7 @@ namespace Mist{
}
// cancel if there are no keys in the main track
if (mainTrack == INVALID_TRACK_ID){return false;}
DTSC::Keys mainKeys(meta.keys(mainTrack));
DTSC::Keys mainKeys(meta.getKeys(mainTrack));
if (!mainKeys.getValidCount()){return false;}
for (uint32_t keyNum = mainKeys.getEndValid() - 1; keyNum >= mainKeys.getFirstValid(); keyNum--){
@ -1966,18 +1986,26 @@ namespace Mist{
size_t printLevel = (probablyBad ? DLVL_WARN : DLVL_INFO);
//The rest of the operations depends on userSelect, so we ignore it if it doesn't exist.
if (!userSelect.count(trackId)){
DEBUG_MSG(printLevel, "Dropping %s track %zu (lastP=%" PRIu64 "): %s",
meta.getCodec(trackId).c_str(), trackId, pageNumMax(trackId), reason.c_str());
if (M.hasEmbeddedFrames(trackId)){
DEBUG_MSG(printLevel, "Dropping %s track %zu (raw): %s", meta.getCodec(trackId).c_str(), trackId, reason.c_str());
}else{
DEBUG_MSG(printLevel, "Dropping %s track %zu (lastP=%" PRIu64 "): %s",
meta.getCodec(trackId).c_str(), trackId, pageNumMax(trackId), reason.c_str());
}
return;
}
const Comms::Users &usr = userSelect.at(trackId);
if (!usr){
DEBUG_MSG(printLevel, "Dropping %s track %zu (lastP=%" PRIu64 "): %s",
meta.getCodec(trackId).c_str(), trackId, pageNumMax(trackId), reason.c_str());
if (M.hasEmbeddedFrames(trackId)){
DEBUG_MSG(printLevel, "Dropping %s track %zu (raw): %s", meta.getCodec(trackId).c_str(), trackId, reason.c_str());
}else{
DEBUG_MSG(printLevel, "Dropping %s track %zu@k%zu (nextP=%" PRIu64 ", lastP=%" PRIu64 "): %s",
meta.getCodec(trackId).c_str(), trackId, usr.getKeyNum() + 1,
pageNumForKey(trackId, usr.getKeyNum() + 1), pageNumMax(trackId), reason.c_str());
if (!usr){
DEBUG_MSG(printLevel, "Dropping %s track %zu (lastP=%" PRIu64 "): %s",
meta.getCodec(trackId).c_str(), trackId, pageNumMax(trackId), reason.c_str());
}else{
DEBUG_MSG(printLevel, "Dropping %s track %zu@k%zu (nextP=%" PRIu64 ", lastP=%" PRIu64 "): %s",
meta.getCodec(trackId).c_str(), trackId, usr.getKeyNum() + 1,
pageNumForKey(trackId, usr.getKeyNum() + 1), pageNumMax(trackId), reason.c_str());
}
}
userSelect.erase(trackId);
}
@ -1997,7 +2025,7 @@ namespace Mist{
// find the main track, check if it is video. Abort if not.
size_t mainTrack = getMainSelectedTrack();
if (M.getType(mainTrack) != "video"){return false;}
if (mainTrack == INVALID_TRACK_ID || M.getType(mainTrack) != "video"){return false;}
// we now know that mainTrack is a video track - let's do some work!
// first, we remove all selected tracks and the buffer. Then we select only the main track.
@ -2006,13 +2034,12 @@ namespace Mist{
userSelect.clear();
userSelect[mainTrack].reload(streamName, mainTrack);
// now, seek to the exact timestamp of the keyframe
DTSC::Keys keys(M.keys(mainTrack));
uint32_t targetKey = M.getKeyNumForTime(mainTrack, currTime);
DTSC::Keys keys(M.getKeys(mainTrack));
bool ret = false;
if (targetKey == INVALID_KEY_NUM){
if (!keys.getValidCount()){
FAIL_MSG("No keyframes available on track %zu", mainTrack);
}else{
seek(keys.getTime(targetKey));
seek(keys.getTime(keys.getIndexForTime(currTime)));
// attempt to load the key into thisPacket
ret = prepareNext();
if (!ret){
@ -2089,6 +2116,49 @@ namespace Mist{
return false;
}
if (M.hasEmbeddedFrames(nxt.tid)){
if (nxt.ghostPacket){
if (M.getEmbeddedTime(nxt.tid, nxt.partIndex, nxt.time)){
nxt.ghostPacket = false;
}else{
nxt.time = M.getNowms(nxt.tid);
uint64_t newPart = M.getKeyNumForTime(nxt.tid, nxt.time);
if (newPart >= nxt.partIndex){
seek(nxt.time);
}else{
buffer.replaceFirst(nxt);
playbackSleep(5);
}
return false;
}
}
thisTime = nxt.time;
thisIdx = nxt.tid;
char * d = 0;
size_t dz = 0;
M.getEmbeddedData(nxt.tid, nxt.partIndex, d, dz);
thisPacket.genericFill(thisTime, 0, thisIdx, d, dz, 0, true);
userSelect[nxt.tid].setKeyNum(nxt.partIndex);
++nxt.partIndex;
// exchange the current packet in the buffer for the next one
if (M.getEmbeddedTime(nxt.tid, nxt.partIndex, nxt.time)){
nxt.ghostPacket = false;
}else{
nxt.time = M.getNowms(nxt.tid);
uint64_t newPart = M.getKeyNumForTime(nxt.tid, nxt.time);
if (newPart >= nxt.partIndex){
seek(nxt.time);
return false;
}else{
nxt.ghostPacket = true;
}
}
buffer.replaceFirst(nxt);
return true;
}
// if we're going to read past the end of the data page...
if (nxt.offset >= curPage[nxt.tid].len ||
(!memcmp(curPage[nxt.tid].mapped + nxt.offset, "\000\000\000\000", 4))){
@ -2164,7 +2234,8 @@ namespace Mist{
//Check if there exists a different page for the next key
uint32_t thisKey = M.getKeyNumForTime(nxt.tid, nxt.time);
uint32_t nextKeyPage = INVALID_KEY_NUM;
//Make sure we only try to read the page for the next key if it actually should be available
// Make sure we only try to read the page for the next key if it actually should be available
// Note: specifically uses `keys` instead of `getKeys` because these are page-related operations
DTSC::Keys keys(M.keys(nxt.tid));
if (keys.getEndValid() >= thisKey+1){nextKeyPage = M.getPageNumberForKey(nxt.tid, thisKey + 1);}
if (nextKeyPage != INVALID_KEY_NUM && nextKeyPage != currentPage[nxt.tid]){
@ -2174,12 +2245,23 @@ namespace Mist{
//If the next packet should've been before the current packet, something is wrong. Abort, abort!
if (nextTime < nxt.time){
std::stringstream errMsg;
errMsg << "next key (" << (thisKey+1) << ") time " << nextTime << " but current time " << nxt.time;
errMsg << "; currPage=" << currentPage[nxt.tid] << ", nxtPage=" << nextKeyPage;
errMsg << ", firstKey=" << keys.getFirstValid() << ", endKey=" << keys.getEndValid();
dropTrack(nxt.tid, errMsg.str().c_str());
return false;
//Re-try the read in ~50ms, hoping this is a race condition we missed somewhere.
Util::sleep(50);
meta.reloadReplacedPagesIfNeeded();
// Note: specifically uses `keys` instead of `getKeys` because these are page-related operations
DTSC::Keys keys(M.keys(nxt.tid));
nextTime = keys.getTime(thisKey + 1);
//Still wrong? Abort, abort!
if (nextTime < nxt.time){
std::stringstream errMsg;
errMsg << "next key (" << (thisKey+1) << ") time " << nextTime << " but current time " << nxt.time;
errMsg << "; currPage=" << currentPage[nxt.tid] << ", nxtPage=" << nextKeyPage;
errMsg << ", firstKey=" << keys.getFirstValid() << ", endKey=" << keys.getEndValid();
dropTrack(nxt.tid, errMsg.str().c_str());
return false;
}else{
WARN_MSG("Recovered from race condition");
}
}
break;//Valid packet!
}

View file

@ -284,7 +284,7 @@ namespace Mist{
// Fragment & Key handlers
DTSC::Fragments fragments(M.fragments(trackData.timingTrackId));
DTSC::Keys keys(M.keys(trackData.timingTrackId));
DTSC::Keys keys(M.getKeys(trackData.timingTrackId));
uint32_t bprErrCode = HLS::blockPlaylistReload(M, userSelect, trackData, hlsSpec, fragments, keys);
if (bprErrCode == 400){
@ -508,7 +508,7 @@ namespace Mist{
// skip the first two fragments if live
if (M.getLive() && (lastFragment - firstFragment) > 6){firstFragment += 2;}
DTSC::Keys keys(M.keys(mainTrack));
DTSC::Keys keys(M.getKeys(mainTrack));
for (; firstFragment < lastFragment; ++firstFragment){
uint32_t duration = fragments.getDuration(firstFragment);
uint64_t starttime = keys.getTime(fragments.getFirstKey(firstFragment));
@ -765,7 +765,7 @@ namespace Mist{
void OutCMAF::smoothAdaptation(const std::string &type, std::set<size_t> tracks,
std::stringstream &r){
if (!tracks.size()){return;}
DTSC::Keys keys(M.keys(*tracks.begin()));
DTSC::Keys keys(M.getKeys(*tracks.begin()));
r << "<StreamIndex Type=\"" << type << "\" QualityLevels=\"" << tracks.size() << "\" Name=\""
<< type << "\" Chunks=\"" << keys.getValidCount() << "\" Url=\"Q({bitrate})/"
<< "chunk_{start_time}.m4s\" ";
@ -887,7 +887,7 @@ namespace Mist{
uint64_t mTrk = getMainSelectedTrack();
size_t currentKey = M.getKeyIndexForTime(mTrk, thisTime);
uint64_t startTime = Util::bootMS();
DTSC::Keys keys(M.keys(mTrk));
DTSC::Keys keys(M.getKeys(mTrk));
while (startTime + maxWait > Util::bootMS() && keepGoing()){
if (keys.getEndValid() > currentKey + 1 &&
M.getLastms(thisIdx) >= M.getTimeForKeyIndex(mTrk, currentKey + 1)){

View file

@ -212,6 +212,7 @@ namespace Mist{
return;
}
uint32_t longest_key = 0;
// Note: specifically uses `keys` instead of `getKeys` since we want _all_ data, regardless of limiting
DTSC::Keys Mkeys(M.keys(idx));
uint32_t firstKey = Mkeys.getFirstValid();
uint32_t endKey = Mkeys.getEndValid();

View file

@ -67,6 +67,10 @@ namespace Mist{
capa["codecs"][0u][0u].append("theora");
capa["codecs"][0u][0u].append("MPEG2");
capa["codecs"][0u][0u].append("AV1");
capa["codecs"][0u][0u].append("JPEG");
capa["codecs"][0u][0u].append("YUYV");
capa["codecs"][0u][0u].append("UYVY");
capa["codecs"][0u][0u].append("NV12");
capa["codecs"][0u][1u].append("AAC");
capa["codecs"][0u][1u].append("FLAC");
capa["codecs"][0u][1u].append("vorbis");
@ -123,24 +127,35 @@ namespace Mist{
size_t OutEBML::clusterSize(uint64_t start, uint64_t end){
size_t sendLen = EBML::sizeElemUInt(EBML::EID_TIMECODE, start);
for (std::map<size_t, Comms::Users>::iterator it = userSelect.begin(); it != userSelect.end(); it++){
DTSC::Keys keys(M.keys(it->first));
DTSC::Parts parts(M.parts(it->first));
DTSC::Keys keys(M.getKeys(it->first));
uint32_t firstPart = keys.getFirstPart(keys.getFirstValid());
uint64_t curMS = 0;
for (size_t i = keys.getFirstValid(); i < keys.getEndValid(); ++i){
if (keys.getTime(i) > start){break;}
firstPart = keys.getFirstPart(i);
curMS = keys.getTime(i);
}
for (size_t i = firstPart; i < parts.getEndValid(); ++i){
if (curMS >= end){break;}
if (curMS >= start){
uint32_t blkLen = EBML::sizeSimpleBlock(it->first + 1, parts.getSize(i));
sendLen += blkLen;
if (M.hasEmbeddedFrames(it->first)){
for (size_t i = keys.getFirstValid(); i < keys.getEndValid(); ++i){
curMS = keys.getTime(i);
if (curMS > end){break;}
if (curMS >= start){
uint32_t blkLen = EBML::sizeSimpleBlock(it->first + 1, keys.getSize(i));
sendLen += blkLen;
}
}
}else{
for (size_t i = keys.getFirstValid(); i < keys.getEndValid(); ++i){
if (keys.getTime(i) > start){break;}
firstPart = keys.getFirstPart(i);
curMS = keys.getTime(i);
}
DTSC::Parts parts(M.parts(it->first));
for (size_t i = firstPart; i < parts.getEndValid(); ++i){
if (curMS >= end){break;}
if (curMS >= start){
uint32_t blkLen = EBML::sizeSimpleBlock(it->first + 1, parts.getSize(i));
sendLen += blkLen;
}
curMS += parts.getDuration(i);
}
curMS += parts.getDuration(i);
}
}
return sendLen;
@ -193,6 +208,7 @@ namespace Mist{
if (codec == "vorbis"){return "A_VORBIS";}
if (codec == "theora"){return "V_THEORA";}
if (codec == "MPEG2"){return "V_MPEG2";}
if (codec == "JPEG"){return "V_MJPEG";}
if (codec == "PCM"){return "A_PCM/INT/BIG";}
if (codec == "MP2"){return "A_MPEG/L2";}
if (codec == "MP3"){return "A_MPEG/L3";}
@ -203,6 +219,9 @@ namespace Mist{
if (codec == "FLOAT"){return "A_PCM/FLOAT/IEEE";}
if (codec == "DTS"){return "A_DTS";}
if (codec == "JSON"){return "M_JSON";}
if (codec == "YUYV"){return "V_UNCOMPRESSED";}
if (codec == "NV12"){return "V_UNCOMPRESSED";}
if (codec == "UYVY"){return "V_UNCOMPRESSED";}
return "E_UNKNOWN";
}
@ -234,6 +253,9 @@ namespace Mist{
subLen += EBML::sizeElemUInt(EBML::EID_PIXELHEIGHT, M.getHeight(idx));
subLen += EBML::sizeElemUInt(EBML::EID_DISPLAYWIDTH, M.getWidth(idx));
subLen += EBML::sizeElemUInt(EBML::EID_DISPLAYHEIGHT, M.getHeight(idx));
if (codec == "YUYV" || codec == "NV12" || codec == "UYVY"){
subLen += EBML::sizeElemStr(EBML::EID_UNCOMPRESSEDFOURCC, codec);
}
sendLen += EBML::sizeElemHead(EBML::EID_VIDEO, subLen);
}
if (type == "audio"){
@ -274,6 +296,9 @@ namespace Mist{
EBML::sendElemUInt(myConn, EBML::EID_PIXELHEIGHT, M.getHeight(idx));
EBML::sendElemUInt(myConn, EBML::EID_DISPLAYWIDTH, M.getWidth(idx));
EBML::sendElemUInt(myConn, EBML::EID_DISPLAYHEIGHT, M.getHeight(idx));
if (codec == "YUYV" || codec == "NV12" || codec == "UYVY"){
EBML::sendElemStr(myConn, EBML::EID_UNCOMPRESSEDFOURCC, codec);
}
}
if (type == "audio"){
EBML::sendElemUInt(myConn, EBML::EID_TRACKTYPE, 2);
@ -313,6 +338,9 @@ namespace Mist{
subLen += EBML::sizeElemUInt(EBML::EID_PIXELHEIGHT, M.getHeight(idx));
subLen += EBML::sizeElemUInt(EBML::EID_DISPLAYWIDTH, M.getWidth(idx));
subLen += EBML::sizeElemUInt(EBML::EID_DISPLAYHEIGHT, M.getHeight(idx));
if (codec == "YUYV" || codec == "NV12" || codec == "UYVY"){
subLen += EBML::sizeElemStr(EBML::EID_UNCOMPRESSEDFOURCC, codec);
}
sendLen += EBML::sizeElemHead(EBML::EID_VIDEO, subLen);
}
if (type == "audio"){

View file

@ -130,7 +130,7 @@ namespace Mist{
}
if (config->getBool("keyframeonly")){
size_t tid = userSelect.begin()->first;
DTSC::Keys keys(M.keys(tid));
DTSC::Keys keys(M.getKeys(tid));
uint32_t endKey = keys.getEndValid();
uint64_t keyTime = keys.getTime(endKey - 1);
INFO_MSG("Seeking for time %" PRIu64 " on track %zu key %" PRIu32, keyTime, tid, endKey - 1);

View file

@ -35,7 +35,7 @@ namespace Mist{
///\return The generated bootstrap.
std::string OutHDS::dynamicBootstrap(size_t idx){
DTSC::Fragments fragments(M.fragments(idx));
DTSC::Keys keys(M.keys(idx));
DTSC::Keys keys(M.getKeys(idx));
std::string empty;
MP4::ASRT asrt;
@ -248,7 +248,7 @@ namespace Mist{
// delay if we don't have the next fragment available yet
unsigned int timeout = 0;
DTSC::Fragments fragments(M.fragments(idx));
DTSC::Keys keys(M.keys(idx));
DTSC::Keys keys(M.getKeys(idx));
while (myConn && fragIdx >= fragments.getEndValid() - 1){
// time out after 21 seconds
if (++timeout > 42){

View file

@ -149,29 +149,31 @@ namespace Mist{
}
}
size_t keyIndex = M.getKeyIndexForTime(thisIdx, thisTime);
uint64_t keyTime = M.getTimeForKeyIndex(thisIdx, keyIndex);
bool isKey = thisPacket.getFlag("keyframe");
if (keyTime > thisTime){
std::cout << "Corruption? Our time is " << thisTime << ", but our key time is " << keyTime << std::endl;
writeContext();
myConn.close();
return;
}else{
if (M.getType(thisIdx) == "video"){
if (keyTime == thisTime){
if (!isKey){
std::cout << "Corruption? Video packet at time " << thisTime << " should be a keyframe, but isn't!" << std::endl;
writeContext();
myConn.close();
return;
}
}else{
if (isKey){
std::cout << "Corruption? Video packet at time " << thisTime << " should not be a keyframe, but is!" << std::endl;
writeContext();
myConn.close();
return;
if (!M.hasEmbeddedFrames(thisIdx)){
size_t keyIndex = M.getKeyIndexForTime(thisIdx, thisTime);
uint64_t keyTime = M.getTimeForKeyIndex(thisIdx, keyIndex);
bool isKey = thisPacket.getFlag("keyframe");
if (keyTime > thisTime){
std::cout << "Corruption? Our time is " << thisTime << ", but our key time is " << keyTime << std::endl;
writeContext();
myConn.close();
return;
}else{
if (M.getType(thisIdx) == "video"){
if (keyTime == thisTime){
if (!isKey){
std::cout << "Corruption? Video packet at time " << thisTime << " should be a keyframe, but isn't!" << std::endl;
writeContext();
myConn.close();
return;
}
}else{
if (isKey){
std::cout << "Corruption? Video packet at time " << thisTime << " should not be a keyframe, but is!" << std::endl;
writeContext();
myConn.close();
return;
}
}
}
}

View file

@ -78,10 +78,10 @@ namespace Mist{
// VoD size of the whole thing is RIFF(4)+fmt(26)+fact(12)+LIST(30)+data(8)+data itself
uint32_t total_data = 0xFFFFFFFFul - 80;
if (!M.getLive()){
DTSC::Keys keys(M.keys(mainTrack));
DTSC::Keys keys(M.getKeys(mainTrack));
total_data = 0;
size_t keyCount = keys.getEndValid();
for (size_t i = 0; i < keyCount; ++i){total_data += keys.getSize(i);}
for (size_t i = keys.getFirstValid(); i < keyCount; ++i){total_data += keys.getSize(i);}
}
Bit::htobl_le(riffHeader + 4, 80 + total_data);
myConn.SendNow(riffHeader, 12);

View file

@ -46,3 +46,17 @@ executables += {
'defines': [],
}
if get_option('WITH_AV')
executables += {
'name' : 'MistProcAV',
'sources' : [
files('process_av.cpp'),
header_tgts,
io_cpp,
input_cpp,
output_cpp,
],
'deps' :[libmist_dep, av_libs],
'defines': [],
}
endif

2038
src/process/process_av.cpp Normal file

File diff suppressed because it is too large Load diff

19
src/process/process_av.h Normal file
View file

@ -0,0 +1,19 @@
#include <mist/defines.h>
#include <mist/json.h>
namespace Mist{
bool getFirst = false;
bool sendFirst = false;
uint64_t packetTimeDiff;
uint64_t sendPacketTime;
JSON::Value opt; /// Options
class ProcAV{
public:
ProcAV(){};
bool CheckConfig();
void Run();
};
}// namespace Mist

View file

@ -30,8 +30,146 @@ uint32_t res_x = 0;
uint32_t res_y = 0;
Mist::OutENC Enc;
//Stat related stuff
JSON::Value pStat;
JSON::Value & pData = pStat["proc_status_update"]["status"];
tthread::mutex statsMutex;
uint64_t statSinkMs = 0;
uint64_t statSourceMs = 0;
int64_t bootMsOffset = 0;
namespace Mist{
class ProcessSink : public InputEBML{
public:
ProcessSink(Util::Config *cfg) : InputEBML(cfg){
capa["name"] = "FFMPEG";
};
void getNext(size_t idx = INVALID_TRACK_ID){
{
tthread::lock_guard<tthread::mutex> guard(statsMutex);
if (pData["sink_tracks"].size() != userSelect.size()){
pData["sink_tracks"].null();
for (std::map<size_t, Comms::Users>::iterator it = userSelect.begin(); it != userSelect.end(); it++){
pData["sink_tracks"].append((uint64_t)it->first);
}
}
}
static bool recurse = false;
if (recurse){return InputEBML::getNext(idx);}
recurse = true;
InputEBML::getNext(idx);
recurse = false;
uint64_t pTime = thisPacket.getTime();
if (thisPacket){
if (!getFirst){
packetTimeDiff = sendPacketTime - pTime;
getFirst = true;
}
pTime += packetTimeDiff;
// change packettime
char *data = thisPacket.getData();
Bit::htobll(data + 12, pTime);
if (pTime >= statSinkMs){statSinkMs = pTime;}
if (meta && meta.getBootMsOffset() != bootMsOffset){meta.setBootMsOffset(bootMsOffset);}
}
}
void setInFile(int stdin_val){
inFile.open(stdin_val);
streamName = opt["sink"].asString();
if (!streamName.size()){streamName = opt["source"].asString();}
Util::streamVariables(streamName, opt["source"].asString());
Util::setStreamName(opt["source"].asString() + "" + streamName);
{
tthread::lock_guard<tthread::mutex> guard(statsMutex);
pStat["proc_status_update"]["sink"] = streamName;
pStat["proc_status_update"]["source"] = opt["source"];
}
if (opt.isMember("target_mask") && !opt["target_mask"].isNull() && opt["target_mask"].asString() != ""){
DTSC::trackValidDefault = opt["target_mask"].asInt();
}
}
bool needsLock(){return false;}
bool isSingular(){return false;}
void connStats(Comms::Connections &statComm){
for (std::map<size_t, Comms::Users>::iterator it = userSelect.begin(); it != userSelect.end(); it++){
if (it->second){it->second.setStatus(COMM_STATUS_DONOTTRACK | it->second.getStatus());}
}
InputEBML::connStats(statComm);
}
};
class ProcessSource : public OutEBML{
public:
bool isRecording(){return false;}
ProcessSource(Socket::Connection &c) : OutEBML(c){
capa["name"] = "FFMPEG";
targetParams["keeptimes"] = true;
realTime = 0;
};
virtual bool onFinish(){
if (opt.isMember("exit_unmask") && opt["exit_unmask"].asBool()){
if (userSelect.size()){
for (std::map<size_t, Comms::Users>::iterator it = userSelect.begin(); it != userSelect.end(); it++){
INFO_MSG("Unmasking source track %zu" PRIu64, it->first);
meta.validateTrack(it->first, TRACK_VALID_ALL);
}
}
}
return OutEBML::onFinish();
}
virtual void dropTrack(size_t trackId, const std::string &reason, bool probablyBad = true){
if (opt.isMember("exit_unmask") && opt["exit_unmask"].asBool()){
INFO_MSG("Unmasking source track %zu" PRIu64, trackId);
meta.validateTrack(trackId, TRACK_VALID_ALL);
}
OutEBML::dropTrack(trackId, reason, probablyBad);
}
void sendHeader(){
if (opt["source_mask"].asBool()){
for (std::map<size_t, Comms::Users>::iterator ti = userSelect.begin(); ti != userSelect.end(); ++ti){
if (ti->first == INVALID_TRACK_ID){continue;}
INFO_MSG("Masking source track %zu", ti->first);
meta.validateTrack(ti->first, meta.trackValid(ti->first) & ~(TRACK_VALID_EXT_HUMAN | TRACK_VALID_EXT_PUSH));
}
}
realTime = 0;
OutEBML::sendHeader();
};
void connStats(uint64_t now, Comms::Connections &statComm){
for (std::map<size_t, Comms::Users>::iterator it = userSelect.begin(); it != userSelect.end(); it++){
if (it->second){it->second.setStatus(COMM_STATUS_DONOTTRACK | it->second.getStatus());}
}
OutEBML::connStats(now, statComm);
}
void sendNext(){
{
tthread::lock_guard<tthread::mutex> guard(statsMutex);
if (pData["source_tracks"].size() != userSelect.size()){
pData["source_tracks"].null();
for (std::map<size_t, Comms::Users>::iterator it = userSelect.begin(); it != userSelect.end(); it++){
pData["source_tracks"].append((uint64_t)it->first);
}
}
}
if (thisTime > statSourceMs){statSourceMs = thisTime;}
needsLookAhead = 0;
maxSkipAhead = 0;
if (!sendFirst){
sendPacketTime = thisPacket.getTime();
bootMsOffset = M.getBootMsOffset();
sendFirst = true;
}
OutEBML::sendNext();
}
};
}
void sinkThread(void *){
Mist::EncodeInputEBML in(&co);
Mist::ProcessSink in(&co);
co.getOption("output", true).append("-");
co.activate();
@ -45,7 +183,7 @@ void sinkThread(void *){
}
void sourceThread(void *){
Mist::EncodeOutputEBML::init(&conf);
Mist::ProcessSource::init(&conf);
conf.getOption("streamname", true).append(opt["source"].c_str());
if (Enc.isAudio){
@ -60,7 +198,7 @@ void sourceThread(void *){
conf.is_active = true;
Socket::Connection c(pipein[1], 0);
Mist::EncodeOutputEBML out(c);
Mist::ProcessSource out(c);
MEDIUM_MSG("Running source thread...");
out.run();
@ -96,7 +234,7 @@ int main(int argc, char *argv[]){
capa["name"] = "FFMPEG"; // internal name of process
capa["hrn"] = "Encoder: FFMPEG"; // human readable name
capa["desc"] = "Use a local FFMPEG installed binary to do encoding"; // description
capa["sort"] = "n"; // sort the parameters by this key
capa["sort"] = "sort"; // sort the parameters by this key
capa["optional"]["source_mask"]["name"] = "Source track mask";
capa["optional"]["source_mask"]["help"] = "What internal processes should have access to the source track(s)";
@ -112,6 +250,7 @@ int main(int argc, char *argv[]){
capa["optional"]["source_mask"]["select"][4u][0u] = 5;
capa["optional"]["source_mask"]["select"][4u][1u] = "Processing and viewer tasks (not pushes)";
capa["optional"]["source_mask"]["default"] = "";
capa["optional"]["source_mask"]["sort"] = "dba";
capa["optional"]["target_mask"]["name"] = "Output track mask";
capa["optional"]["target_mask"]["help"] = "What internal processes should have access to the output track(s)";
@ -135,10 +274,12 @@ int main(int argc, char *argv[]){
capa["optional"]["target_mask"]["select"][8u][0u] = 0;
capa["optional"]["target_mask"]["select"][8u][1u] = "Nothing";
capa["optional"]["target_mask"]["default"] = "";
capa["optional"]["target_mask"]["sort"] = "dca";
capa["optional"]["exit_unmask"]["name"] = "Undo masks on process exit/fail";
capa["optional"]["exit_unmask"]["help"] = "If/when the process exits or fails, the masks for input tracks will be reset to defaults. (NOT to previous value, but to defaults!)";
capa["optional"]["exit_unmask"]["default"] = false;
capa["optional"]["exit_unmask"]["sort"] = "dda";
capa["required"]["x-LSP-kind"]["name"] = "Input type"; // human readable name of option
capa["required"]["x-LSP-kind"]["help"] = "The type of input to use"; // extra information
@ -147,19 +288,21 @@ int main(int argc, char *argv[]){
capa["required"]["x-LSP-kind"]["select"][0u][1u] = "Video"; // label of first select field
capa["required"]["x-LSP-kind"]["select"][1u][0u] = "audio";
capa["required"]["x-LSP-kind"]["select"][1u][1u] = "Audio";
capa["required"]["x-LSP-kind"]["n"] = 0; // sorting index
capa["required"]["x-LSP-kind"]["influences"][0u] =
"codec"; // changing this parameter influences the parameters listed here
capa["required"]["x-LSP-kind"]["influences"][1u] = "resolution";
capa["required"]["x-LSP-kind"]["influences"][2u] = "sources";
capa["required"]["x-LSP-kind"]["influences"][3u] = "x-LSP-rate_or_crf";
capa["required"]["x-LSP-kind"]["sort"] = "aaaa"; // sorting index
capa["required"]["x-LSP-kind"]["influences"].append("codec");
capa["required"]["x-LSP-kind"]["influences"].append("resolution");
capa["required"]["x-LSP-kind"]["influences"].append("sources");
capa["required"]["x-LSP-kind"]["influences"].append("x-LSP-rate_or_crf");
capa["required"]["x-LSP-kind"]["influences"].append("keys");
capa["required"]["x-LSP-kind"]["influences"].append("keyfrms");
capa["required"]["x-LSP-kind"]["influences"].append("keysecs");
capa["required"]["x-LSP-kind"]["value"] = "video"; // preselect this value
capa["optional"]["source_track"]["name"] = "Input selection";
capa["optional"]["source_track"]["help"] =
"Track ID, codec or language of the source stream to encode.";
capa["optional"]["source_track"]["type"] = "string";
capa["optional"]["source_track"]["n"] = 1;
capa["optional"]["source_track"]["sort"] = "aaa";
capa["optional"]["source_track"]["default"] = "automatic";
capa["optional"]["source_track"]["validate"][0u] = "track_selector_parameter";
@ -170,7 +313,7 @@ int main(int argc, char *argv[]){
capa["required"]["codec"][0u]["select"][0u] = "H264";
capa["required"]["codec"][0u]["select"][1u] = "VP9";
capa["required"]["codec"][0u]["influences"][0u] = "crf";
capa["required"]["codec"][0u]["n"] = 2;
capa["required"]["codec"][0u]["sort"] = "aaab";
capa["required"]["codec"][0u]["dependent"]["x-LSP-kind"] =
"video"; // this field is only shown if x-LSP-kind is set to "video"
@ -182,7 +325,7 @@ int main(int argc, char *argv[]){
capa["required"]["codec"][1u]["select"][2u][0u] = "opus";
capa["required"]["codec"][1u]["select"][2u][1u] = "Opus";
capa["required"]["codec"][1u]["influences"][0u] = "x-LSP-rate_or_crf";
capa["required"]["codec"][1u]["n"] = 2;
capa["required"]["codec"][1u]["sort"] = "aaab";
capa["required"]["codec"][1u]["dependent"]["x-LSP-kind"] = "audio";
capa["optional"]["sink"]["name"] = "Target stream";
@ -191,12 +334,13 @@ int main(int argc, char *argv[]){
capa["optional"]["sink"]["placeholder"] = "source stream";
capa["optional"]["sink"]["type"] = "str";
capa["optional"]["sink"]["validate"][0u] = "streamname_with_wildcard_and_variables";
capa["optional"]["sink"]["n"] = 3;
capa["optional"]["sink"]["sort"] = "daa";
capa["optional"]["resolution"]["name"] = "resolution";
capa["optional"]["resolution"]["help"] = "Resolution of the output stream";
capa["optional"]["resolution"]["help"] = "Resolution of the output stream, e.g. 1920x1080";
capa["optional"]["resolution"]["type"] = "str";
capa["optional"]["resolution"]["n"] = 4;
capa["optional"]["resolution"]["default"] = "keep source resolution";
capa["optional"]["resolution"]["sort"] = "aca";
capa["optional"]["resolution"]["dependent"]["x-LSP-kind"] = "video";
capa["optional"]["x-LSP-rate_or_crf"][0u]["name"] = "Quality";
@ -207,7 +351,7 @@ int main(int argc, char *argv[]){
capa["optional"]["x-LSP-rate_or_crf"][0u]["select"][1u][1u] = "Target bitrate";
capa["optional"]["x-LSP-rate_or_crf"][0u]["select"][2u][0u] = "crf";
capa["optional"]["x-LSP-rate_or_crf"][0u]["select"][2u][1u] = "Target constant rate factor";
capa["optional"]["x-LSP-rate_or_crf"][0u]["n"] = 5;
capa["optional"]["x-LSP-rate_or_crf"][0u]["sort"] = "caa";
capa["optional"]["x-LSP-rate_or_crf"][0u]["influences"][0u] = "crf";
capa["optional"]["x-LSP-rate_or_crf"][0u]["influences"][1u] = "rate";
capa["optional"]["x-LSP-rate_or_crf"][0u]["dependent"]["x-LSP-kind"] = "video";
@ -218,31 +362,123 @@ int main(int argc, char *argv[]){
capa["optional"]["x-LSP-rate_or_crf"][1u]["select"][0u][1u] = "automatic";
capa["optional"]["x-LSP-rate_or_crf"][1u]["select"][1u][0u] = "rate";
capa["optional"]["x-LSP-rate_or_crf"][1u]["select"][1u][1u] = "Target bitrate";
capa["optional"]["x-LSP-rate_or_crf"][1u]["n"] = 5;
capa["optional"]["x-LSP-rate_or_crf"][0u]["sort"] = "caa";
capa["optional"]["x-LSP-rate_or_crf"][1u]["influences"][0u] = "rate";
capa["optional"]["x-LSP-rate_or_crf"][1u]["dependent"]["x-LSP-kind"] = "audio";
capa["optional"]["crf"][0u]["help"] = "Video quality";
capa["optional"]["crf"][0u]["help"] = "Video quality, ranging from 0 (best) to 51 (worst). This value automatically scales with resolution. Around 17 is 'visually lossless', and we find 25 to be a reasonable trade off between quality and bit rate but your mileage may vary.";
capa["optional"]["crf"][0u]["min"] = "0";
capa["optional"]["crf"][0u]["max"] = "51";
capa["optional"]["crf"][0u]["type"] = "int";
capa["optional"]["crf"][0u]["dependent"]["codec"] = "H264";
capa["optional"]["crf"][0u]["dependent"]["x-LSP-rate_or_crf"] = "crf";
capa["optional"]["crf"][0u]["n"] = 6;
capa["optional"]["crf"][0u]["sort"] = "cba";
capa["optional"]["crf"][1u]["help"] = "Video quality";
capa["optional"]["crf"][1u]["help"] = "Video quality, ranging from 0 (best) to 63 (worst). Higher resolution requires a better quality to match, and HD (720p/1080p) generally looks good around 31, but your mileage may vary.";
capa["optional"]["crf"][1u]["min"] = "0";
capa["optional"]["crf"][1u]["max"] = "63";
capa["optional"]["crf"][1u]["type"] = "int";
capa["optional"]["crf"][1u]["dependent"]["codec"] = "VP9";
capa["optional"]["crf"][1u]["dependent"]["x-LSP-rate_or_crf"] = "crf";
capa["optional"]["crf"][1u]["n"] = 7;
capa["optional"]["crf"][1u]["sort"] = "cba";
capa["optional"]["rate"]["name"] = "rate";
capa["optional"]["rate"]["name"] = "Bitrate";
capa["optional"]["rate"]["help"] = "Bitrate of the encoding";
capa["optional"]["rate"]["type"] = "str";
capa["optional"]["rate"]["dependent"]["x-LSP-rate_or_crf"] = "rate";
capa["optional"]["rate"]["n"] = 8;
capa["optional"]["rate"]["sort"] = "cba";
capa["optional"]["min_rate"]["name"] = "Minimum bitrate";
capa["optional"]["min_rate"]["help"] = "Minimum bitrate of the encoding";
capa["optional"]["min_rate"]["type"] = "str";
capa["optional"]["min_rate"]["dependent"]["x-LSP-rate_or_crf"] = "rate";
capa["optional"]["min_rate"]["sort"] = "cbb";
capa["optional"]["max_rate"]["name"] = "Maximum bitrate";
capa["optional"]["max_rate"]["help"] = "Maximum bitrate of the encoding";
capa["optional"]["max_rate"]["type"] = "str";
capa["optional"]["max_rate"]["dependent"]["x-LSP-rate_or_crf"] = "rate";
capa["optional"]["max_rate"]["sort"] = "cbc";
capa["optional"]["profile"]["name"] = "Transcode profile";
capa["optional"]["profile"]["help"] = "Limits the output to a specific H.264 profile";
capa["optional"]["profile"]["type"] = "select";
capa["optional"]["profile"]["select"][0u][0u] = "";
capa["optional"]["profile"]["select"][0u][1u] = "automatic";
capa["optional"]["profile"]["select"][1u][0u] = "baseline";
capa["optional"]["profile"]["select"][1u][1u] = "baseline";
capa["optional"]["profile"]["select"][2u][0u] = "main";
capa["optional"]["profile"]["select"][2u][1u] = "main";
capa["optional"]["profile"]["select"][3u][0u] = "high";
capa["optional"]["profile"]["select"][3u][1u] = "high";
capa["optional"]["profile"]["select"][4u][0u] = "high10";
capa["optional"]["profile"]["select"][4u][1u] = "high10";
capa["optional"]["profile"]["select"][5u][0u] = "high422";
capa["optional"]["profile"]["select"][5u][1u] = "high422";
capa["optional"]["profile"]["select"][6u][0u] = "high444";
capa["optional"]["profile"]["select"][6u][1u] = "high444";
capa["optional"]["profile"]["default"] = "";
capa["optional"]["profile"]["sort"] = "cca";
capa["optional"]["preset"]["name"] = "Transcode preset";
capa["optional"]["preset"]["help"] = "Preset for encoding speed and compression ratio";
capa["optional"]["preset"]["type"] = "select";
capa["optional"]["preset"]["select"][0u][0u] = "ultrafast";
capa["optional"]["preset"]["select"][0u][1u] = "ultrafast";
capa["optional"]["preset"]["select"][1u][0u] = "superfast";
capa["optional"]["preset"]["select"][1u][1u] = "superfast";
capa["optional"]["preset"]["select"][2u][0u] = "veryfast";
capa["optional"]["preset"]["select"][2u][1u] = "veryfast";
capa["optional"]["preset"]["select"][3u][0u] = "faster";
capa["optional"]["preset"]["select"][3u][1u] = "faster";
capa["optional"]["preset"]["select"][4u][0u] = "fast";
capa["optional"]["preset"]["select"][4u][1u] = "fast";
capa["optional"]["preset"]["select"][5u][0u] = "medium";
capa["optional"]["preset"]["select"][5u][1u] = "medium";
capa["optional"]["preset"]["select"][6u][0u] = "slow";
capa["optional"]["preset"]["select"][6u][1u] = "slow";
capa["optional"]["preset"]["select"][7u][0u] = "slower";
capa["optional"]["preset"]["select"][7u][1u] = "slower";
capa["optional"]["preset"]["select"][8u][0u] = "veryslow";
capa["optional"]["preset"]["select"][8u][1u] = "veryslow";
capa["optional"]["preset"]["default"] = "medium";
capa["optional"]["preset"]["sort"] = "ccb";
capa["optional"]["keys"]["name"] = "Keyframes";
capa["optional"]["keys"]["help"] = "What to do with keyframes";
capa["optional"]["keys"]["type"] = "select";
capa["optional"]["keys"]["select"][0u][0u] = "";
capa["optional"]["keys"]["select"][0u][1u] = "Match input keyframes";
capa["optional"]["keys"]["select"][1u][0u] = "frames";
capa["optional"]["keys"]["select"][1u][1u] = "Every X frames";
capa["optional"]["keys"]["select"][2u][0u] = "secs";
capa["optional"]["keys"]["select"][2u][1u] = "Every X seconds";
capa["optional"]["keys"]["default"] = "";
capa["optional"]["keys"]["sort"] = "cda";
capa["optional"]["keys"]["influences"][0u] = "keyfrms";
capa["optional"]["keys"]["influences"][0u] = "keysecs";
capa["optional"]["keys"]["dependent"]["X-LSP-kind"] = "video";
capa["optional"]["keyfrms"]["name"] = "Key interval";
capa["optional"]["keyfrms"]["type"] = "int";
capa["optional"]["keyfrms"]["help"] = "Key interval in frames";
capa["optional"]["keyfrms"]["unit"] = "frames";
capa["optional"]["keyfrms"]["dependent"]["X-LSP-kind"] = "video";
capa["optional"]["keyfrms"]["dependent"]["keys"] = "frames";
capa["optional"]["keyfrms"]["sort"] = "cdb";
capa["optional"]["keysecs"]["name"] = "Key interval";
capa["optional"]["keysecs"]["type"] = "float";
capa["optional"]["keysecs"]["help"] = "Key interval in seconds";
capa["optional"]["keysecs"]["unit"] = "seconds";
capa["optional"]["keysecs"]["dependent"]["X-LSP-kind"] = "video";
capa["optional"]["keysecs"]["dependent"]["keys"] = "secs";
capa["optional"]["keysecs"]["sort"] = "cdb";
capa["optional"]["flags"]["name"] = "Flags";
capa["optional"]["flags"]["help"] = "Extra flags to add to the end of the transcode command";
capa["optional"]["flags"]["type"] = "str";
capa["optional"]["flags"]["sort"] = "cea";
capa["optional"]["sources"]["name"] = "Layers";
capa["optional"]["sources"]["type"] = "sublist";
@ -250,11 +486,11 @@ int main(int argc, char *argv[]){
capa["optional"]["sources"]["help"] =
"List of sources to overlay on top of each other, in order. If left empty, simply uses the "
"input track without modifications and nothing else.";
capa["optional"]["sources"]["n"] = 9;
capa["optional"]["sources"]["sort"] = "n";
capa["optional"]["sources"]["sort"] = "baa";
capa["optional"]["sources"]["dependent"]["x-LSP-kind"] = "video";
capa["optional"]["track_inhibit"]["name"] = "Track inhibitor(s)";
capa["optional"]["track_inhibit"]["sort"] = "aba";
capa["optional"]["track_inhibit"]["help"] =
"What tracks to use as inhibitors. If this track selector is able to select a track, the "
"process does not start. Defaults to none.";
@ -269,6 +505,9 @@ int main(int argc, char *argv[]){
capa["codecs"][0u][0u].append("theora");
capa["codecs"][0u][0u].append("MPEG2");
capa["codecs"][0u][0u].append("AV1");
capa["codecs"][0u][0u].append("YUYV");
capa["codecs"][0u][0u].append("UYVY");
capa["codecs"][0u][0u].append("JPEG");
capa["codecs"][0u][1u].append("AAC");
capa["codecs"][0u][1u].append("vorbis");
capa["codecs"][0u][1u].append("opus");
@ -344,19 +583,88 @@ int main(int argc, char *argv[]){
}
Enc.SetConfig(opt);
// check config for generic options
if (!Enc.CheckConfig()){
FAIL_MSG("Error config syntax error!");
return 1;
}
const std::string & srcStrm = opt["source"].asStringRef();
//connect to source metadata
DTSC::Meta M(srcStrm, false);
//find source video track
std::map<std::string, std::string> targetParams;
targetParams["video"] = "maxbps";
JSON::Value sourceCapa;
sourceCapa["name"] = "FFMPEG";
sourceCapa["codecs"][0u][0u].append("H264");
sourceCapa["codecs"][0u][0u].append("HEVC");
sourceCapa["codecs"][0u][0u].append("VP8");
sourceCapa["codecs"][0u][0u].append("VP9");
sourceCapa["codecs"][0u][0u].append("theora");
sourceCapa["codecs"][0u][0u].append("MPEG2");
sourceCapa["codecs"][0u][0u].append("AV1");
sourceCapa["codecs"][0u][0u].append("JPEG");
sourceCapa["codecs"][0u][0u].append("YUYV");
sourceCapa["codecs"][0u][0u].append("UYVY");
sourceCapa["codecs"][0u][0u].append("NV12");
sourceCapa["codecs"][0u][1u].append("AAC");
sourceCapa["codecs"][0u][1u].append("FLAC");
sourceCapa["codecs"][0u][1u].append("vorbis");
sourceCapa["codecs"][0u][1u].append("opus");
sourceCapa["codecs"][0u][1u].append("PCM");
sourceCapa["codecs"][0u][1u].append("ALAW");
sourceCapa["codecs"][0u][1u].append("ULAW");
sourceCapa["codecs"][0u][1u].append("MP2");
sourceCapa["codecs"][0u][1u].append("MP3");
sourceCapa["codecs"][0u][1u].append("FLOAT");
sourceCapa["codecs"][0u][1u].append("AC3");
sourceCapa["codecs"][0u][1u].append("DTS");
if (Enc.isVideo){
if (opt.isMember("source_track") && opt["source_track"].isString() && opt["source_track"]){
targetParams["video"] = opt["source_track"].asStringRef();
}else{
targetParams["video"] = "";
}
}else{
targetParams["video"] = "none";
}
if (Enc.isAudio){
if (opt.isMember("source_track") && opt["source_track"].isString() && opt["source_track"]){
targetParams["audio"] = opt["source_track"].asStringRef();
}else{
targetParams["audio"] = "";
}
}else{
targetParams["audio"] = "none";
}
size_t sourceIdx = INVALID_TRACK_ID;
size_t sleeps = 0;
while (++sleeps < 60 && (sourceIdx == INVALID_TRACK_ID || (Enc.isVideo && (!M.getWidth(sourceIdx) || !M.getHeight(sourceIdx))))){
M.reloadReplacedPagesIfNeeded();
std::set<size_t> vidTrack = Util::wouldSelect(M, targetParams, sourceCapa);
sourceIdx = vidTrack.size() ? (*(vidTrack.begin())) : INVALID_TRACK_ID;
if (sourceIdx == INVALID_TRACK_ID || (Enc.isVideo && (!M.getWidth(sourceIdx) || !M.getHeight(sourceIdx)))){
Util::sleep(250);
}
}
if (sourceIdx == INVALID_TRACK_ID || (Enc.isVideo && (!M.getWidth(sourceIdx) || !M.getHeight(sourceIdx)))){
FAIL_MSG("No valid source track!");
return 1;
}
if (Enc.isVideo){
Enc.setResolution(M.getWidth(sourceIdx), M.getHeight(sourceIdx));
}
// create pipe pair before thread
if (pipe(pipein) || pipe(pipeout)){
FAIL_MSG("Could not create pipes for process!");
return 1;
}
Util::Procs::socketList.insert(pipeout[0]);
Util::Procs::socketList.insert(pipeout[1]);
Util::Procs::socketList.insert(pipein[0]);
Util::Procs::socketList.insert(pipein[1]);
// stream which connects to input
@ -392,107 +700,6 @@ int main(int argc, char *argv[]){
namespace Mist{
bool EncodeOutputEBML::onFinish(){
if (opt.isMember("exit_unmask") && opt["exit_unmask"].asBool()){
if (userSelect.size()){
for (std::map<size_t, Comms::Users>::iterator it = userSelect.begin(); it != userSelect.end(); it++){
INFO_MSG("Unmasking source track %zu" PRIu64, it->first);
meta.validateTrack(it->first, TRACK_VALID_ALL);
}
}
}
return OutEBML::onFinish();
}
void EncodeOutputEBML::dropTrack(size_t trackId, const std::string &reason, bool probablyBad){
if (opt.isMember("exit_unmask") && opt["exit_unmask"].asBool()){
INFO_MSG("Unmasking source track %zu" PRIu64, trackId);
meta.validateTrack(trackId, TRACK_VALID_ALL);
}
OutEBML::dropTrack(trackId, reason, probablyBad);
}
void EncodeInputEBML::getNext(size_t idx){
static bool recurse = false;
// getNext is called recursively, only process the first call
if (recurse){return InputEBML::getNext(idx);}
recurse = true;
InputEBML::getNext(idx);
if (thisPacket){
if (!getFirst){
packetTimeDiff = sendPacketTime - thisPacket.getTime();
getFirst = true;
}
uint64_t tmpLong;
uint64_t packTime = thisPacket.getTime() + packetTimeDiff;
// change packettime
char *data = thisPacket.getData();
tmpLong = htonl((int)(packTime >> 32));
memcpy(data + 12, (char *)&tmpLong, 4);
tmpLong = htonl((int)(packTime & 0xFFFFFFFF));
memcpy(data + 16, (char *)&tmpLong, 4);
}
recurse = false;
}
void EncodeInputEBML::setInFile(int stdin_val){
inFile.open(stdin_val);
streamName = opt["sink"].asString();
if (!streamName.size()){streamName = opt["source"].asString();}
Util::streamVariables(streamName, opt["source"].asString());
Util::setStreamName(opt["source"].asString() + "" + streamName);
if (opt.isMember("target_mask") && !opt["target_mask"].isNull() && opt["target_mask"].asString() != ""){
DTSC::trackValidDefault = opt["target_mask"].asInt();
}
}
std::string EncodeOutputEBML::getTrackType(int tid){return M.getType(tid);}
void EncodeOutputEBML::setVideoTrack(std::string tid){
std::set<size_t> tracks = Util::findTracks(M, capa, "video", tid);
for (std::set<size_t>::iterator it = tracks.begin(); it != tracks.end(); it++){
userSelect[*it].reload(streamName, *it);
}
}
void EncodeOutputEBML::setAudioTrack(std::string tid){
std::set<size_t> tracks = Util::findTracks(M, capa, "audio", tid);
for (std::set<size_t>::iterator it = tracks.begin(); it != tracks.end(); it++){
userSelect[*it].reload(streamName, *it);
}
}
void EncodeOutputEBML::sendHeader(){
realTime = 0;
size_t idx = getMainSelectedTrack();
if (opt.isMember("source_mask") && !opt["source_mask"].isNull() && opt["source_mask"].asString() != ""){
uint64_t sourceMask = opt["source_mask"].asInt();
INFO_MSG("Masking source track %zu to %" PRIu64, idx, sourceMask);
meta.validateTrack(idx, sourceMask);
}
res_x = M.getWidth(idx);
res_y = M.getHeight(idx);
Enc.setResolution(res_x, res_y);
OutEBML::sendHeader();
}
void EncodeOutputEBML::sendNext(){
if (!sendFirst){
sendPacketTime = thisPacket.getTime();
sendFirst = true;
}
OutEBML::sendNext();
}
void OutENC::SetConfig(JSON::Value &config){opt = config;}
bool OutENC::checkAudioConfig(){
@ -531,128 +738,134 @@ namespace Mist{
}
bool OutENC::buildVideoCommand(){
uint64_t t_limiter = Util::bootSecs();
while (res_x == 0){
if (Util::bootSecs() < t_limiter + 5){
Util::sleep(100);
MEDIUM_MSG("waiting res_x to be set!");
}else{
FAIL_MSG("timeout, resolution is not set!");
return false;
}
MEDIUM_MSG("source resolution: %dx%d", res_x, res_y);
if (!res_x){
FAIL_MSG("Resolution is not set!");
return false;
}
std::string s_input = "";
std::string s_overlay = "";
std::string s_scale = "";
std::string options = "";
MEDIUM_MSG("source resolution: %dx%d", res_x, res_y);
// load all sources and construct overlay code
if (opt["sources"].isArray()){
char in[255] = "";
char ov[255] = "";
// Init variables used to construct the FFMPEG command
char in[255] = "";
char ov[255] = "";
std::string s_base = "ffmpeg -fflags nobuffer -probesize 32 -max_probe_packets 1 -hide_banner -loglevel warning"; //< Base FFMPEG command
std::string s_input = ""; //< Inputs of the filter graph
std::string s_filter = ""; //< Filter graph to use
std::string s_scale = ""; //< Scaling inputs of the filter graph
std::string s_overlay = ""; //< Positioning inputs of the filter graph
std::string options = ""; //< Transcode params
for (JSON::Iter it(opt["sources"]); it; ++it){
if ((*it).isMember("src") && (*it)["src"].isString() && (*it)["src"].asString().size() > 3){
std::string src = (*it)["src"].asString();
std::string ext = src.substr(src.length() - 3);
if (ext == "gif"){// for animated gif files, prepend extra parameter
sprintf(in, " -ignore_loop 0 -i %s", src.c_str());
}else{
sprintf(in, " -i %s", src.c_str());
}
MEDIUM_MSG("Loading Input: %s", src.c_str());
}else{
sprintf(in, " -i %s", "-");
INFO_MSG("no src given, asume reading data from stdin");
MEDIUM_MSG("Loading Input: -");
}
s_input += in;
uint32_t i_width = -1;
uint32_t i_height = -1;
int32_t i_x = 0;
int32_t i_y = 0;
std::string i_anchor = "topleft";
if ((*it).isMember("width") && (*it)["width"].asInt()){i_width = (*it)["width"].asInt();}
if ((*it).isMember("height") && (*it)["height"].asInt()){
i_height = (*it)["height"].asInt();
}
if ((*it).isMember("x")){i_x = (*it)["x"].asInt();}
if ((*it).isMember("y")){i_y = (*it)["y"].asInt();}
if ((*it).isMember("anchor") && (*it)["anchor"].isString()){
i_anchor = (*it)["anchor"].asString();
}
char scale[200];
sprintf(scale, ";[%d:v]scale=%d:%d[s%d]", it.num() + 1, i_width, i_height, it.num());
s_scale.append(scale);
char in_chain[16];
if (it.num() == 0){
sprintf(in_chain, ";[0:v][s%d]", it.num());
}else{
sprintf(in_chain, ";[out][s%d]", it.num());
}
if ((*it)["anchor"] == "topright"){
sprintf(ov, "overlay=W-w-%d:%d[out]", i_x, i_y);
}else if ((*it)["anchor"] == "bottomleft"){
sprintf(ov, "overlay=%d:H-h-%d[out]", i_x, i_y);
}else if ((*it)["anchor"] == "bottomright"){
sprintf(ov, "overlay=W-w-%d:H-h-%d[out]", i_x, i_y);
}else if ((*it)["anchor"] == "center"){
sprintf(ov, "overlay=(W-w)/2:(H-h)/2[out]");
}else{// topleft default
sprintf(ov, "overlay=%d:%d[out]", i_x, i_y);
}
s_overlay.append(in_chain);
s_overlay.append(ov);
}
s_scale = s_scale.substr(1);
s_overlay = s_scale + s_overlay;
if (res_x > 0 || res_y > 0){// video scaling
sprintf(ov, ";[out]scale=%d:%d,setsar=1:1[out]", res_x, res_y);
}
s_overlay.append(ov);
HIGH_MSG("overlay: %s", s_overlay.c_str());
}
// video scaling
if (res_x > 0 || res_y > 0){
if (s_overlay.size() == 0){
char ov[100];
sprintf(ov, " -filter_complex '[0:v]scale=%d:%d,setsar=1:1[out]' -map [out]", res_x, res_y);
s_overlay.append(ov);
}else{
s_overlay = "-filter_complex " + s_overlay + " -map [out]";
}
// Init filter graph
bool requiresOverlay = opt["sources"].size() > 1;
if (requiresOverlay){
// Complex filter graph: overlay each source over a black background
s_base.append(" -f lavfi");
s_filter = " -filter_complex ";
char in[50] = "";
sprintf(in, " -i color=c=black:s=%dx%d", res_x, res_y);
s_input = in;
}else{
if (s_overlay.size() > 0){s_overlay = "-filter_complex '" + s_overlay + "' -map [out]";}
// Simple filter graph
s_filter = " -vf ";
}
// Add sources to input, scaling and positioning strings
for (JSON::Iter it(opt["sources"]); it; ++it){
// Add source to input string
if ((*it).isMember("src") && (*it)["src"].isString() && (*it)["src"].asString().size() > 3){
std::string src = (*it)["src"].asString();
std::string ext = src.substr(src.length() - 3);
if (ext == "gif"){// for animated gif files, prepend extra parameter
sprintf(in, " -ignore_loop 0 -i %s", src.c_str());
}else{
sprintf(in, " -i %s", src.c_str());
}
MEDIUM_MSG("Loading Input: %s", src.c_str());
}else{
sprintf(in, " -i %s", "-");
INFO_MSG("no src given, assume reading data from stdin");
MEDIUM_MSG("Loading Input: -");
}
s_input += in;
// No complex scaling and positioning required if there's only one source
if(!requiresOverlay){ continue; }
// Init scaling and positioning params
uint32_t i_width = -1;
uint32_t i_height = -1;
int32_t i_x = 0;
int32_t i_y = 0;
std::string i_anchor = "topleft";
if ((*it).isMember("width") && (*it)["width"].asInt()){i_width = (*it)["width"].asInt();}
if ((*it).isMember("height") && (*it)["height"].asInt()){i_height = (*it)["height"].asInt();}
if ((*it).isMember("x")){i_x = (*it)["x"].asInt();}
if ((*it).isMember("y")){i_y = (*it)["y"].asInt();}
if ((*it).isMember("anchor") && (*it)["anchor"].isString()){
i_anchor = (*it)["anchor"].asString();
}
// Scale input
char scale[200];
sprintf(scale, ";[%d:v]scale=%d:%d[s%d]", it.num() + 1, i_width, i_height, it.num());
s_scale.append(scale);
// Position input
char in_chain[16];
if (it.num() == 0){
sprintf(in_chain, ";[0:v][s%d]", it.num());
}else{
sprintf(in_chain, ";[out][s%d]", it.num());
}
if ((*it)["anchor"] == "topright"){
sprintf(ov, "overlay=W-w-%d:%d[out]", i_x, i_y);
}else if ((*it)["anchor"] == "bottomleft"){
sprintf(ov, "overlay=%d:H-h-%d[out]", i_x, i_y);
}else if ((*it)["anchor"] == "bottomright"){
sprintf(ov, "overlay=W-w-%d:H-h-%d[out]", i_x, i_y);
}else if ((*it)["anchor"] == "center"){
sprintf(ov, "overlay=(W-w)/2:(H-h)/2[out]");
}else{// topleft default
sprintf(ov, "overlay=%d:%d[out]", i_x, i_y);
}
s_overlay.append(in_chain);
s_overlay.append(ov);
}
// Finish filter graph
if (requiresOverlay){
s_scale = s_scale.substr(1); //< Remove `;` char at the start
sprintf(ov, ";[out]scale=%d:%d,setsar=1:1[out] -map [out]", res_x, res_y);
s_overlay.append(ov);
}else{
sprintf(ov, "scale=%d:%d,setsar=1:1", res_x, res_y);
s_scale.append(ov);
}
// Set transcode parameters
options = codec;
if (!profile.empty()){options.append(" -profile:v " + profile);}
if (!preset.empty()){options.append(" -preset " + preset);}
std::string bitrateSettings = getBitrateSetting();
if (!bitrateSettings.empty()){options.append(" " + bitrateSettings);}
if (!flags.empty()){options.append(" " + flags);}
if (!opt.isMember("keys") || !opt["keys"].asStringRef().size()){
options += " -force_key_frames source";
}else if (opt["keys"].asStringRef() == "frames"){
options += " -g ";
options += opt["keyfrms"].asString();
}else if (opt["keys"].asStringRef() == "secs"){
options += " -force_key_frames expr:gte(t,n_forced*";
options += opt["keysecs"].asString();
options += ")";
}else{
options += " -force_key_frames source";
}
snprintf(ffcmd, 10240, "ffmpeg -fflags nobuffer -hide_banner -loglevel warning -f lavfi -i color=c=black:s=%dx%d %s %s -c:v %s %s %s %s -an -force_key_frames source -f matroska - ",
res_x, res_y, s_input.c_str(), s_overlay.c_str(), codec.c_str(), options.c_str(),
getBitrateSetting().c_str(), flags.c_str());
// Construct final command
snprintf(ffcmd, 10240, "%s%s%s%s%s -c:v %s -an -f matroska - ",
s_base.c_str(), s_input.c_str(), s_filter.c_str(), s_scale.c_str(), s_overlay.c_str(), options.c_str());
INFO_MSG("Constructed FFMPEG video command: %s", ffcmd);
return true;
}
@ -740,7 +953,9 @@ namespace Mist{
profile = opt["profile"].asString();
}
if (opt.isMember("preset") && opt["preset"].isString()){preset = opt["preset"].asString();}
if (opt.isMember("preset") && opt["preset"].isString()){
preset = opt["preset"].asString();
}
if (opt.isMember("crf") && opt["crf"].isInt()){setCRF(opt["crf"].asInt());}
@ -809,22 +1024,22 @@ namespace Mist{
std::string min_rate;
std::string max_rate;
if (opt.isMember("bitrate") && opt["bitrate"].isString()){
b_rate = opt["bitrate"].asString();
if (opt.isMember("rate") && opt["rate"].isString()){
b_rate = opt["rate"].asString();
}
if (opt.isMember("min_bitrate") && opt["min_bitrate"].isString()){
min_rate = opt["min_bitrate"].asString();
if (opt.isMember("min_rate") && opt["min_rate"].isString()){
min_rate = opt["min_rate"].asString();
}
if (opt.isMember("max_bitrate") && opt["max_bitrate"].isString()){
max_rate = opt["max_bitrate"].asString();
if (opt.isMember("max_rate") && opt["max_rate"].isString()){
max_rate = opt["max_rate"].asString();
}
setBitrate(b_rate, min_rate, max_rate);
// extra ffmpeg flags
if (opt.isMember("flags") && opt["flags"].isString()){flags = opt["bitrate"].asString();}
if (opt.isMember("flags") && opt["flags"].isString()){flags = opt["flags"].asString();}
// Check configuration and construct ffmpeg command based on audio or video encoding
if (isVideo){
@ -866,7 +1081,6 @@ namespace Mist{
}
void OutENC::Run(){
Util::Procs p;
int ffer = 2;
pid_t ffout = -1;
@ -887,13 +1101,32 @@ namespace Mist{
}
prepareCommand();
ffout = p.StartPiped(args, &pipein[0], &pipeout[1], &ffer);
ffout = Util::Procs::StartPiped(args, &pipein[0], &pipeout[1], &ffer);
while (conf.is_active && p.isRunning(ffout)){Util::sleep(200);}
uint64_t lastProcUpdate = Util::bootSecs();
{
tthread::lock_guard<tthread::mutex> guard(statsMutex);
pStat["proc_status_update"]["id"] = getpid();
pStat["proc_status_update"]["proc"] = "FFMPEG";
pData["ainfo"]["child_pid"] = ffout;
//pData["ainfo"]["cmd"] = opt["exec"];
}
uint64_t startTime = Util::bootSecs();
while (conf.is_active && Util::Procs::isRunning(ffout)){
Util::sleep(200);
if (lastProcUpdate + 5 <= Util::bootSecs()){
tthread::lock_guard<tthread::mutex> guard(statsMutex);
pData["active_seconds"] = (Util::bootSecs() - startTime);
pData["ainfo"]["sourceTime"] = statSourceMs;
pData["ainfo"]["sinkTime"] = statSinkMs;
Util::sendUDPApi(pStat);
lastProcUpdate = Util::bootSecs();
}
}
while (p.isRunning(ffout)){
MEDIUM_MSG("stopping ffmpeg...");
p.StopAll();
while (Util::Procs::isRunning(ffout)){
INFO_MSG("Stopping process...");
Util::Procs::StopAll();
Util::sleep(200);
}

View file

@ -40,25 +40,4 @@ namespace Mist{
std::set<std::string> supportedAudioCodecs;
};
class EncodeInputEBML : public InputEBML{
public:
EncodeInputEBML(Util::Config *cfg) : InputEBML(cfg){};
void getNext(size_t idx = INVALID_TRACK_ID);
void setInFile(int stdin_val);
bool needsLock(){return false;}
bool isSingular(){return false;}
};
class EncodeOutputEBML : public OutEBML{
public:
virtual bool onFinish();
virtual void dropTrack(size_t trackId, const std::string &reason, bool probablyBad = true);
EncodeOutputEBML(Socket::Connection &c) : OutEBML(c){}; // realTime = 0;};
bool isRecording(){return false;}
void setVideoTrack(std::string tid);
void setAudioTrack(std::string tid);
void sendNext();
void sendHeader();
std::string getTrackType(int tid);
};
}// namespace Mist