Validators for load tests, and load test scripts.

This commit is contained in:
Thulinma 2016-05-28 17:43:49 +02:00
parent 2cec1f7836
commit 482da790ea
18 changed files with 2222 additions and 11 deletions

View file

@ -277,6 +277,8 @@ makeAnalyser(RAX rax)
makeAnalyser(RTSP rtsp_rtp) #LTS
makeAnalyser(TS ts) #LTS
makeAnalyser(H264 h264) #LTS
makeAnalyser(HLS hls) #LTS
makeAnalyser(DASH dash) #LTS
makeAnalyser(TSStream tsstream) #LTS
makeAnalyser(Stats stats) #LTS

233
scripts/capa_service.cpp Executable file
View file

@ -0,0 +1,233 @@
#include <stdio.h>
#include <string.h>
#include <fstream>
#include <set>
#include <iostream>
#include <mist/json.h>
#include <mist/timing.h>
///\brief A class storing information about the cpu the server is running on.
class cpudata{
public:
std::string model;///<A string describing the model of the cpu.
int cores;///<The amount of cores in the cpu.
int threads;///<The amount of threads this cpu can run.
int mhz;///<The speed of the cpu in mhz.
int id;///<The id of the cpu in the system.
///\brief The default constructor
cpudata(){
model = "Unknown";
cores = 1;
threads = 1;
mhz = 0;
id = 0;
}
;
///\brief Fills the structure by parsing a given description.
///\param data A description of the cpu.
void fill(char * data){
int i;
i = 0;
if (sscanf(data, "model name : %n", &i) != EOF && i > 0){
model = (data + i);
}
if (sscanf(data, "cpu cores : %d", &i) == 1){
cores = i;
}
if (sscanf(data, "siblings : %d", &i) == 1){
threads = i;
}
if (sscanf(data, "physical id : %d", &i) == 1){
id = i;
}
if (sscanf(data, "cpu MHz : %d", &i) == 1){
mhz = i;
}
}
;
};
///\brief Checks the capabilities of the system.
///\param capa The location to store the capabilities.
void checkCapable(JSON::Value & capa){
//capa.null();
capa.removeMember("cpu");
std::ifstream cpuinfo("/proc/cpuinfo");
if (cpuinfo){
std::map<int, cpudata> cpus;
char line[300];
int proccount = -1;
while (cpuinfo.good()){
cpuinfo.getline(line, 300);
if (cpuinfo.fail()){
//empty lines? ignore them, clear flags, continue
if ( !cpuinfo.eof()){
cpuinfo.ignore();
cpuinfo.clear();
}
continue;
}
if (memcmp(line, "processor", 9) == 0){
proccount++;
}
cpus[proccount].fill(line);
}
//fix wrong core counts
std::map<int, int> corecounts;
for (int i = 0; i <= proccount; ++i){
corecounts[cpus[i].id]++;
}
//remove double physical IDs - we only want real CPUs.
std::set<int> used_physids;
int total_speed = 0;
int total_threads = 0;
for (int i = 0; i <= proccount; ++i){
if ( !used_physids.count(cpus[i].id)){
used_physids.insert(cpus[i].id);
JSON::Value thiscpu;
thiscpu["model"] = cpus[i].model;
thiscpu["cores"] = cpus[i].cores;
if (cpus[i].cores < 2 && corecounts[cpus[i].id] > cpus[i].cores){
thiscpu["cores"] = corecounts[cpus[i].id];
}
thiscpu["threads"] = cpus[i].threads;
if (thiscpu["cores"].asInt() > thiscpu["threads"].asInt()){
thiscpu["threads"] = thiscpu["cores"];
}
thiscpu["mhz"] = cpus[i].mhz;
capa["cpu"].append(thiscpu);
total_speed += cpus[i].cores * cpus[i].mhz;
total_threads += cpus[i].threads;
}
}
capa["speed"] = total_speed;
capa["threads"] = total_threads;
}
std::ifstream cpuUsage("/proc/stat");
if (cpuUsage){
char line[300];
cpuUsage.getline(line, 300);
long long int i, o, p, q;
if (sscanf(line, "cpu %lli %lli %lli %lli ", &i, &o, &p, &q) == 4){
capa["usage"]["user"] = i;
capa["usage"]["nice"] = o;
capa["usage"]["system"] = p;
capa["usage"]["idle"] = q;
}else{
std::cerr << "HALP!" << std::endl;
}
}
std::ifstream meminfo("/proc/meminfo");
if (meminfo){
char line[300];
int bufcache = 0;
while (meminfo.good()){
meminfo.getline(line, 300);
if (meminfo.fail()){
//empty lines? ignore them, clear flags, continue
if ( !meminfo.eof()){
meminfo.ignore();
meminfo.clear();
}
continue;
}
long long int i;
if (sscanf(line, "MemTotal : %lli kB", &i) == 1){
capa["mem"]["total"] = i;
}
if (sscanf(line, "MemFree : %lli kB", &i) == 1){
capa["mem"]["free"] = i;
}
if (sscanf(line, "SwapTotal : %lli kB", &i) == 1){
capa["mem"]["swaptotal"] = i;
}
if (sscanf(line, "SwapFree : %lli kB", &i) == 1){
capa["mem"]["swapfree"] = i;
}
if (sscanf(line, "Buffers : %lli kB", &i) == 1){
bufcache += i;
}
if (sscanf(line, "Cached : %lli kB", &i) == 1){
bufcache += i;
}
}
capa["mem"]["used"] = capa["mem"]["total"].asInt() - capa["mem"]["free"].asInt() - bufcache;
capa["mem"]["cached"] = bufcache;
capa["load"]["memory"] = ((capa["mem"]["used"].asInt() + (capa["mem"]["swaptotal"].asInt() - capa["mem"]["swapfree"].asInt())) * 100) / capa["mem"]["total"].asInt();
}
std::ifstream loadavg("/proc/loadavg");
if (loadavg){
char line[300];
loadavg.getline(line, 300);
//parse lines here
float onemin;
if (sscanf(line, "%f %*f %*f", &onemin) == 1){
capa["load"]["one"] = (long long int)(onemin*1000);
}
}
std::ifstream netUsage("/proc/net/dev");
capa["net"]["sent"] = 0;
capa["net"]["recv"] = 0;
while (netUsage){
char line[300];
netUsage.getline(line, 300);
long long unsigned sent = 0;
long long unsigned recv = 0;
//std::cout << line;
if (sscanf(line, "%*s %llu %*u %*u %*u %*u %*u %*u %*u %llu", &recv, &sent) == 2){
//std::cout << "Net: " << recv << ", " << sent << std::endl;
capa["net"]["recv"] = (long long int)(capa["net"]["recv"].asInt() + recv);
capa["net"]["sent"] = (long long int)(capa["net"]["sent"].asInt() + sent);
}
}
}
int main(int argc, char** argv){
JSON::Value stats;
checkCapable(stats);
std::ofstream file(argv[1]);
file << "Time in seconds,1m load average,Memory use in bytes,CPU percentage,Uploaded bytes,Downloaded bytes" << std::endl;
long long int totalCpu = 0;
long long int grandTotal = 0;
long long int usrCpu = 0;
long long int niceCpu = 0;
long long int systemCpu = 0;
long long int prevUsrCpu = stats["usage"]["user"].asInt();
long long int prevNiceCpu = stats["usage"]["nice"].asInt();
long long int prevSystemCpu = stats["usage"]["system"].asInt();
long long int prevIdleCpu = stats["usage"]["idle"].asInt();
long long int startUpload = stats["net"]["sent"].asInt();
long long int startDownload = stats["net"]["recv"].asInt();
long long int startTime = Util::epoch();
long long int lastTime = 0;
while (true){
Util::sleep(500);//faster than once per second, just in case we go out of sync somewhere
if (lastTime == Util::epoch()){
continue;//if a second hasn't passed yet, skip this run
}
lastTime = Util::epoch();
checkCapable(stats);
file << (lastTime - startTime) << ",";//time since start
file << (double)stats["load"]["one"].asInt()/1000.0 << ","; //LoadAvg
file << stats["mem"]["used"].asString() << ","; //MemUse
usrCpu = stats["usage"]["user"].asInt() - prevUsrCpu;
niceCpu = stats["usage"]["nice"].asInt() - prevNiceCpu;
systemCpu = stats["usage"]["system"].asInt() - prevSystemCpu;
totalCpu = usrCpu + niceCpu + systemCpu;
grandTotal = totalCpu + stats["usage"]["idle"].asInt() - prevIdleCpu;
if (grandTotal != 0){
file << 100 * (double)totalCpu / grandTotal << ",";//totalCpu
}else{
file << "," << std::endl;//unknown CPU usage
}
file << (stats["net"]["sent"].asInt() - startUpload) << "," << (stats["net"]["recv"].asInt() - startDownload) << std::endl;
prevUsrCpu = stats["usage"]["user"].asInt();
prevNiceCpu = stats["usage"]["nice"].asInt();
prevSystemCpu = stats["usage"]["system"].asInt();
prevIdleCpu = stats["usage"]["idle"].asInt();
}
return 0;
}

53
scripts/crashTests Executable file
View file

@ -0,0 +1,53 @@
#!/bin/bash
#Q3 2014 style
#BATCHES="-b \"1 1\" -b \"1 300\" -b \"1 600\" -b \"1 1200\""
#TIMES="-t 60 -t 600"
#SERVCONF="-s krabs -l /home/thulinma/capa_service/ -fresh"
#Q2 2014 style
#BATCHES="-b \"1 1\" -b \"1 100\" -b \"1 1000\" -b \"1 1200\""
#TIMES="-t 60 -t 180 -t 600"
#SERVCONF="-s krabs -l /home/thulinma/capa_service/ -fresh"
#Q1 2014 style
#BATCHES="-b \"2 50\" -b \"4 50\" -b \"8 50\" -b \"16 50\" -b \"24 50\""
#TIMES="-t 60 -t 300 -t 600"
#SERVCONF="-s krabs -l /home/thulinma/capa_service/ -fresh"
#single run
BATCHES="-b \"1 42\""
TIMES="-t 60"
#BATCHES="-b \"1 1000\" -b \"1 5000\" -b \"1 10000\""
#TIMES="-t 5"
SERVCONF="-s krabs -l /home/thulinma/capa_service/"
#empty the run-script
rm scriptje.sh
touch scriptje.sh
#DASH
#echo "./loadTest $SERVCONF -p wowza4 $BATCHES $TIMES -m http://krabs:1935/vod/_definst_/mp4:example1_low.mp4/manifest_mvtime_w1216364513.mpd" >> scriptje.sh
#echo "./loadTest $SERVCONF -p mistserver $BATCHES $TIMES -m http://krabs:8080/dash/example1_low/index.mpd" >> scriptje.sh
#HLS
#echo "./loadTest $SERVCONF -p wowza4 $BATCHES $TIMES -m http://krabs:1935/vod/mp4:example1_low.mp4/playlist.m3u8" >> scriptje.sh
echo "./loadTest $SERVCONF -p mistserver $BATCHES $TIMES -m http://krabs:8080/hls/example1_low/index.m3u8" >> scriptje.sh
#echo "./loadTest $SERVCONF -p flussonic $BATCHES $TIMES -m http://krabs/vod/example1_low.mp4/tracks-1,2/index.m3u8" >> scriptje.sh
#RTMP
#echo "./loadTest $SERVCONF -p wowza4 $BATCHES $TIMES -m rtmp://krabs/vod/mp4:example1_low.mp4" >> scriptje.sh
#echo "./loadTest $SERVCONF -p nginx $BATCHES $TIMES -m rtmp://krabs/vod2/example1_low.mp4" >> scriptje.sh
#echo "./loadTest $SERVCONF -p adobe $BATCHES $TIMES -m rtmp://krabs/vod/mp4:tests/example1_low.flv" >> scriptje.sh
#echo "./loadTest $SERVCONF -p mistserver $BATCHES $TIMES -m rtmp://krabs/a/example1_low" >> scriptje.sh
#echo "./loadTest $SERVCONF -p flussonic $BATCHES $TIMES -m rtmp://krabs/vod/example1_low.mp4.mp4" >> scriptje.sh
#FLV
#echo "./loadTest $SERVCONF -p mistserver $BATCHES $TIMES -m http://krabs:8080/example1_low.flv" >> scriptje.sh
#run it
. scriptje.sh
~/yup

56
scripts/genericTests Executable file
View file

@ -0,0 +1,56 @@
#!/bin/bash
#Q3 2014 style
#BATCHES="-b \"1 1\" -b \"1 300\" -b \"1 600\" -b \"1 1200\""
#TIMES="-t 60 -t 600"
#SERVCONF="-s krabs -l /home/thulinma/capa_service/ -fresh"
#Q2 2014 style
#BATCHES="-b \"1 1\" -b \"1 100\" -b \"1 1000\" -b \"1 1200\""
#TIMES="-t 60 -t 180 -t 600"
#SERVCONF="-s krabs -l /home/thulinma/capa_service/ -fresh"
#Q1 2014 style
#BATCHES="-b \"2 50\" -b \"4 50\" -b \"8 50\" -b \"16 50\" -b \"24 50\""
#TIMES="-t 60 -t 300 -t 600"
#SERVCONF="-s krabs -l /home/thulinma/capa_service/ -fresh"
#single run
#BATCHES="-b \"1 1200\""
BATCHES="-b \"600 1\""
TIMES="-t 10"
#BATCHES="-b \"1 1000\" -b \"1 5000\" -b \"1 10000\""
#TIMES="-t 5"
SERVCONF="-s krabs -l /home/thulinma/capa_service/"
#empty the run-script
rm scriptje.sh
touch scriptje.sh
#DASH
#echo "./loadTest $SERVCONF -p wowza4 $BATCHES $TIMES -m http://krabs:1935/vod/_definst_/mp4:example1_low.mp4/manifest_mvtime_w1216364513.mpd" >> scriptje.sh
#echo "./loadTest $SERVCONF -p mistserver $BATCHES $TIMES -m http://krabs:8080/dash/example1_low/index.mpd" >> scriptje.sh
#HLS
#echo "./loadTest $SERVCONF -p wowza4 $BATCHES $TIMES -m http://krabs:1935/vod/mp4:example1_low.mp4/playlist.m3u8" >> scriptje.sh
#echo "./loadTest $SERVCONF -p mistserver $BATCHES $TIMES -m http://krabs:8080/hls/example1_low/index.m3u8" >> scriptje.sh
#echo "./loadTest $SERVCONF -p flussonic $BATCHES $TIMES -m http://krabs/vod/example1_low.mp4/tracks-1,2/index.m3u8" >> scriptje.sh
#RTMP
#echo "./loadTest $SERVCONF -p wowza4 $BATCHES $TIMES -m rtmp://krabs/vod/mp4:example1_low.mp4" >> scriptje.sh
#echo "./loadTest $SERVCONF -p nginx $BATCHES $TIMES -m rtmp://krabs/vod2/example1_low.mp4" >> scriptje.sh
#echo "./loadTest $SERVCONF -p adobe $BATCHES $TIMES -m rtmp://krabs/vod/mp4:tests/example1_low.flv" >> scriptje.sh
#echo "./loadTest $SERVCONF -p mistserver $BATCHES $TIMES -m rtmp://krabs/a/example1_low" >> scriptje.sh
#echo "./loadTest $SERVCONF -p flussonic $BATCHES $TIMES -m rtmp://krabs/vod/example1_low.mp4.mp4" >> scriptje.sh
#FLV
echo "./loadTest $SERVCONF -p mistserver $BATCHES $TIMES -m http://krabs:8080/example1_low.flv" >> scriptje.sh
#MP4
#echo "./loadTest $SERVCONF -p mistserver $BATCHES $TIMES -m http://krabs:8080/example1_low.mp4" >> scriptje.sh
#run it
. scriptje.sh
~/yup

58
scripts/genericTestsSimple Executable file
View file

@ -0,0 +1,58 @@
#!/bin/bash
#Q3 2014 style
#BATCHES="-b \"1 1\" -b \"1 300\" -b \"1 600\" -b \"1 1200\""
#TIMES="-t 60 -t 600"
#SERVCONF="-s krabs -l /home/thulinma/capa_service/ -fresh"
#Q2 2014 style
#BATCHES="-b \"1 1\" -b \"1 100\" -b \"1 1000\" -b \"1 1200\""
#TIMES="-t 60 -t 180 -t 600"
#SERVCONF="-s krabs -l /home/thulinma/capa_service/ -fresh"
#Q1 2014 style
#BATCHES="-b \"2 50\" -b \"4 50\" -b \"8 50\" -b \"16 50\" -b \"24 50\""
#TIMES="-t 60 -t 300 -t 600"
#SERVCONF="-s krabs -l /home/thulinma/capa_service/ -fresh"
#single run
#BATCHES="-b \"1 127\""
#TIMES="-t 30"
BATCHES="-b \"1 50\" -b \"1 100\" -b \"1 150\" -b \"1 200\" -b \"1 250\" -b \"1 300\" -b \"1 350\""
TIMES="-t 30"
#BATCHES="-b \"1 1000\" -b \"1 5000\" -b \"1 10000\""
#TIMES="-t 5"
#empty the run-script
rm scriptje.sh
touch scriptje.sh
#DASH
#echo "./loadTest $SERVCONF -p wowza4 $BATCHES $TIMES -m http://krabs:1935/vod/_definst_/mp4:example1_low.mp4/manifest_mvtime_w1216364513.mpd" >> scriptje.sh
#echo "./loadTest $SERVCONF -p mistserver $BATCHES $TIMES -m http://krabs:8080/dash/example1_low/index.mpd" >> scriptje.sh
#HLS
#echo "./loadTest $SERVCONF -p wowza4 $BATCHES $TIMES -m http://krabs:1935/vod/mp4:example1_low.mp4/playlist.m3u8" >> scriptje.sh
#echo "./loadTest $SERVCONF -p mistserver $BATCHES $TIMES -m http://krabs:8080/hls/example1_low/index.m3u8" >> scriptje.sh
#echo "./loadTest $SERVCONF -p flussonic $BATCHES $TIMES -m http://krabs/vod/example1_low.mp4/tracks-1,2/index.m3u8" >> scriptje.sh
#RTMP
#echo "./loadTest $SERVCONF -p wowza4 $BATCHES $TIMES -m rtmp://krabs/vod/mp4:example1_low.mp4" >> scriptje.sh
#echo "./loadTest $SERVCONF -p nginx $BATCHES $TIMES -m rtmp://krabs/vod2/example1_low.mp4" >> scriptje.sh
#echo "./loadTest $SERVCONF -p adobe $BATCHES $TIMES -m rtmp://krabs/vod/mp4:tests/example1_low.flv" >> scriptje.sh
#echo "./loadTestSimple $BATCHES $TIMES -m rtmp://mistpuma/a/example1_low" >> scriptje.sh
echo "./loadTestSimple $BATCHES $TIMES -m http://192.168.137.86:8080/schijtaap.flv" >> scriptje.sh
#echo "./loadTestSimple $BATCHES $TIMES -m rtmp://poema/a/b1" >> scriptje.sh
#echo "./loadTestSimple $BATCHES $TIMES -m rtmp://poema/a/b2" >> scriptje.sh
#echo "./loadTestSimple $BATCHES $TIMES -m rtmp://poema/a/b3" >> scriptje.sh
#echo "./loadTest $SERVCONF -p flussonic $BATCHES $TIMES -m rtmp://krabs/vod/example1_low.mp4.mp4" >> scriptje.sh
#run it
. scriptje.sh
~/yup

32
scripts/genericTests_larry Executable file
View file

@ -0,0 +1,32 @@
#!/bin/bash
BATCHES="-b \"1 1\" -b \"1 100\" -b \"1 1000\" -b \"1 1200\""
TIMES="-t 60 -t 180 -t 600"
#BATCHES="-b \"1 500\""
#TIMES="-t 30"
#BATCHES="-b \"16 50\" -b \"24 50\""
#BATCHES="-b \"10 1\""
#TIMES="-t 10 -t 60 -t 180 -t 600"
SERVCONF="-s krabs -l /home/thulinma/capa_service/ -fresh"
#SERVCONF="-s krabs -l /home/thulinma/capa_service/"
rm scriptje.sh
touch scriptje.sh
#echo "./loadTest $SERVCONF -p mistserver -b \"20 27\" -t 40 -m http://krabs:8080/example1_low.mp4" >> scriptje.sh
#echo "./loadTest $SERVCONF -p mistserver $BATCHES $TIMES -m http://krabs:8080/example1_low.flv" >> scriptje.sh
#echo "./loadTest $SERVCONF -p mistserver $BATCHES $TIMES -m rtmp://krabs/a/example1_low -m http://krabs:8080/example1_low.flv" >> scriptje.sh
#echo "./loadTest $SERVCONF -p mistserver -b \"16 50\" -t 600 -m rtmp://krabs/a/example1_low" >> scriptje.sh
echo "./loadTest $SERVCONF -p wowza4 $BATCHES $TIMES -m rtmp://krabs/vod/mp4:example1_low.mp4" >> scriptje.sh
echo "./loadTest $SERVCONF -p nginx $BATCHES $TIMES -m rtmp://krabs/vod2/example1_low.mp4" >> scriptje.sh
echo "./loadTest $SERVCONF -p adobe $BATCHES $TIMES -m rtmp://krabs/vod/mp4:tests/example1_low.flv" >> scriptje.sh
echo "./loadTest $SERVCONF -p mistserver $BATCHES $TIMES -m rtmp://krabs/a/example1_low" >> scriptje.sh
. scriptje.sh

20
scripts/install_capa.sh Executable file
View file

@ -0,0 +1,20 @@
#!/bin/bash
g++ -lmist -Wall -o capa_service capa_service.cpp
if [ $? -eq 0 ]; then
bestand=/etc/systemd/system/capa_service.service
echo "[Unit]" > $bestand
echo "Description=capa_service" >> $bestand
echo "After=networkmanager.service" >> $bestand
echo "" >> $bestand
echo "[Service]" >> $bestand
echo "Type=simple" >> $bestand
echo "User=$USER" >> $bestand
echo "ExecStart="`pwd`"/capa_service" `pwd`/log.csv >> $bestand
echo "Restart=always" >> $bestand
echo "RestartSec=5" >> $bestand
echo "" >> $bestand
echo "[Install]" >> $bestand
echo "WantedBy=multi-user.target" >> $bestand
systemctl daemon-reload
fi

17
scripts/install_mist_service.sh Executable file
View file

@ -0,0 +1,17 @@
#!/bin/bash
bestand=/etc/systemd/system/mistserver.service
echo "[Unit]" > $bestand
echo "Description=MistServer" >> $bestand
echo "After=networkmanager.service" >> $bestand
echo "" >> $bestand
echo "[Service]" >> $bestand
echo "Type=simple" >> $bestand
echo "User=$USER" >> $bestand
echo "ExecStart=/usr/bin/MistController -nc "`pwd`"/config.json" >> $bestand
echo "Restart=always" >> $bestand
echo "RestartSec=5" >> $bestand
echo "" >> $bestand
echo "[Install]" >> $bestand
echo "WantedBy=multi-user.target" >> $bestand

263
scripts/loadTest Executable file
View file

@ -0,0 +1,263 @@
#!/bin/bash
#starting with define of stream getter functions
#for all funcions: $1 = stream, $2 = time to download
function genericGetter {
echo filesize `curl -s -m $2 $1| wc -c` 1>&2
}
function flvGetter {
curl -s -m $2 $1 2> /dev/null | ../MistAnalyserFLV -m validate 2> /dev/null
}
function hlsGetter {
../MistAnalyserHLS -m validate -a $2 $1 2>/dev/null
}
function oggGetter {
curl -s -m $2 $1 | ../MistAnalyserOGG -m validate
}
function dashGetter {
../MistAnalyserDASH -m validate -a $2 $1 2>/dev/null
}
function rtmpGetter {
#$rtmpGetter ignores $2, because rtmpdump has no time setting
#rtmpdump is ended with killall in parent function
rtmpdump -qRr $1 -o - 2> /dev/null | ../MistAnalyserFLV -m validate 2> /dev/null
}
function serverTest {
rm /tmp/res*_*
#this functions creates a csv file with all statistics during the tests
#$1 = number of stream batches
#$2 = amount of streams per batch (amount of streams = $! * $2)
#$3 = current stream test
#$4 = ssh access to server where our log file is located
#$5 = duration of test in seconds
#$6 = location of log file on server
#$7 = getter used for stream testing
#$8 = Extra comments added to .csv file
#$9 = Output basename
echo "Test variables:" > $9.info
echo "Start time: `date`" >> $9.info
echo "Client count: `echo "$1 * $2" | bc`" >> $9.info
echo "Batch Size: $2" >> $9.info
echo "Stream URL: $3" >> $9.info
echo "Duration: $5 seconds" >> $9.info
val="none"
logdir="$6"
if [ -n "$7" ] ; then
echo "Validator: $7" >> $9.info
val="$7"
fi
ssh $4 sudo systemctl restart capa_service
#sleeping, so service can properly start
sleep 2
getter="${val}Getter"
#starting all tests
prefix="res"`date +%s`_
for x in `seq 1 1 $1`; do
for y in `seq 1 1 $2`; do
eval "$getter $3 $5" >& /tmp/$prefix`echo "$x * $2 + $y" | bc`.txt &
done
sleep 1
done
start=`date +%s`
f=$(( `date +%s` - $start ))
while [ $f -lt $5 ]; do
sleep 2
f=$(( `date +%s` - $start ))
done
if [ "$val" == "rtmp" ] ; then
killall rtmpdump
fi
#wait 20 seconds after terminating
start=`date +%s`
f=$(( `date +%s` - $start ))
while [ $f -lt 20 ]; do
sleep 2
f=$(( `date +%s` - $start ))
done
cat /tmp/$prefix* > $9.times
ssh $4 sudo systemctl stop capa_service
ssh $4 "cat $logdir/log.csv" > $9.csv
wait
}
function rebootHost {
nowtime=`ssh $1 uptime -s`
timeOut=$(( `date +%s` + 3600 ))
echo "Rebooting host $1..."
ssh $1 sudo shutdown -r 3
echo "Waiting for host to come back..."
sleep 3
while [ true ]; do
sleep 5
#ping for connection
ping -c1 $1 > /dev/null
if [ `echo $?` == 0 ]; then
#check ssh
if [ "`ssh -o 'ConnectTimeout 5' $1 echo DDVtesting`" == "DDVtesting" ]; then
#check uptime
if [ "`ssh -o 'ConnectTimeout 5' $1 uptime -s`" != "$nowtime" ]; then
echo host succesfully rebooted
return 0
fi
fi
fi
if [ $timeOut -lt `date +%s` ]; then
echo timeout
return 1
fi
done
}
function checkService {
echo "doing runcheck"
echo "ssh $1 systemctl status $2 | grep Active\:"
status="`ssh $1 systemctl status $2 | grep Active\:`"
if [ "${status:11:6}" != "active" ]; then
echo starting ${2}...
ssh $1 sudo systemctl stop wowza
ssh $1 sudo systemctl stop wowza4
ssh $1 sudo systemctl stop nginx
ssh $1 sudo systemctl stop mistserver
ssh $1 sudo systemctl stop adobe
ssh $1 sudo systemctl disable wowza
ssh $1 sudo systemctl disable wowza4
ssh $1 sudo systemctl disable nginx
ssh $1 sudo systemctl disable mistserver
ssh $1 sudo systemctl disable adobe
ssh $1 sudo systemctl enable $2
ssh $1 sudo systemctl start $2
else
echo $2 is running...
fi
}
#setting default values
server="localhost"
comment=""
logdir="`pwd`"
freshBoot="y"
product="mistserver"
declare -a timegroup
declare -a request
declare -a stream
#parsing arguments
red=1
while [ $red -le $# ]; do
case ${!red} in
"-s") #defines server
red=$(( $red + 1 ))
server=${!red}
;;
"-l") #defines directory on the server with the capabilities log
red=$(( $red + 1 ))
logdir=${!red}
;;
"-t") #defines timelengths for tests
red=$(( $red + 1 ))
timegroup+=( ${!red} )
;;
"-b") #defines user batches for tests (must be in format "batchamount batchsize")
red=$(( $red + 1 ))
request+=( "${!red}" )
;;
"-c") #add a comment
red=$(( $red + 1 ))
comment=${!red}
;;
"-m") #defines a media stream and validator
red=$(( $red + 1 ))
stream+=( "${!red}" )
;;
"-p") #defines the product to be tested, default is mistserver
red=$(( $red + 1 ))
product=${!red}
;;
"-fresh")
freshBoot="x"
;;
*)
comment=`echo $comment ${!red}`
esac
red=$(( $red + 1 ))
done
#determining validators
for (( i=0; i<${#stream[@]}; i++ )) ; do
if [ "${stream[$i]:0:4}" == "rtmp" ]; then
validator[$i]="rtmp"
else
tempFile=$(basename "${stream[$i]}")
tempExt="${tempFile##*.}"
case $tempExt in
"flv")
validator[$i]="flv"
;;
"ogg")
validator[$i]="ogg"
;;
"m3u8")
validator[$i]="hls"
;;
"m3u")
validator[$i]="hls"
;;
"mpd")
validator[$i]="dash"
;;
*)
validator[$i]="generic"
esac
fi
done
if [ ${#request[@]} == 0 ]; then
request=( "1 1000" )
fi
if [ ${#timegroup[@]} == 0 ]; then
timegroup=( 60 )
fi
#checking if the right product is enabled on the server is
checkService $server $product
#making directory for test
temp="$product"`date +%y%m%d%H%M%S`
mkdir $temp
#starting test
for (( i=0; i<${#stream[@]}; i++ )) ; do
for (( p=0; p<${#timegroup[@]}; p++ )) ; do
for (( o=0; o<${#request[@]}; o++ )) ; do
if [ $freshBoot == "x" ]; then
rebootHost $server
if [ $? -ne 0 ] ; then
echo lost host in reboot process, exiting...
exit $?;
fi
checkService $server $product
fi
tnr=` echo "$i * ${#timegroup[@]} * ${#request[@]} + $o * ${#timegroup[@]} + $p" | bc `
name=$temp"/run"
if [ $tnr -lt 100 ] ; then
name="${name}0"
fi
if [ $tnr -lt 10 ] ; then
name="${name}0"
fi
amount=`echo ${request[$o]} | sed -e 's/ /*/g' | bc`
name="${name}${tnr}_${amount}_${validator[$i]}_${timegroup[$p]}"
echo "$name ${request[$o]} ${stream[$i]} $server ${timegroup[$p]} ${validator[$i]}"
serverTest ${request[$o]} ${stream[$i]} $server ${timegroup[$p]} $logdir ${validator[$i]} "$comment" $name
done
done
done

200
scripts/loadTestSimple Executable file
View file

@ -0,0 +1,200 @@
#!/bin/bash
#starting with define of stream getter functions
#for all funcions: $1 = stream, $2 = time to download
function genericGetter {
echo filesize `curl -s -m $2 $1| wc -c` 1>&2
}
function flvGetter {
curl -s -m $2 $1 2> /dev/null | ../MistAnalyserFLV -m validate 2> /dev/null
}
function hlsGetter {
../MistAnalyserHLS -m validate -a $2 $1 2>/dev/null
}
function oggGetter {
curl -s -m $2 $1 | ../MistAnalyserOGG -m validate
}
function dashGetter {
../MistAnalyserDASH -m validate -a $2 $1 2>/dev/null
}
function rtmpGetter {
#$rtmpGetter ignores $2, because rtmpdump has no time setting
#rtmpdump is ended with killall in parent function
rtmpdump -qRr $1 -o - 2> /dev/null | ../MistAnalyserFLV -m validate 2> /dev/null
}
function serverTest {
rm /tmp/res*_*
#this functions creates a csv file with all statistics during the tests
#$1 = number of stream batches
#$2 = amount of streams per batch (amount of streams = $! * $2)
#$3 = current stream test
#$4 = ssh access to server where our log file is located
#$5 = duration of test in seconds
#$6 = location of log file on server
#$7 = getter used for stream testing
#$8 = Extra comments added to .csv file
#$9 = Output basename
echo "Test variables:" > $9.info
echo "Start time: `date`" >> $9.info
echo "Client count: `echo "$1 * $2" | bc`" >> $9.info
echo "Batch Size: $2" >> $9.info
echo "Stream URL: $3" >> $9.info
echo "Duration: $5 seconds" >> $9.info
val="none"
logdir="$6"
if [ -n "$7" ] ; then
echo "Validator: $7" >> $9.info
val="$7"
fi
getter="${val}Getter"
#starting all tests
prefix="res"`date +%s`_
for x in `seq 1 1 $1`; do
for y in `seq 1 1 $2`; do
eval "$getter $3 $5" >& /tmp/$prefix`echo "$x * $2 + $y" | bc`.txt &
done
sleep 1
done
start=`date +%s`
f=$(( `date +%s` - $start ))
while [ $f -lt $5 ]; do
sleep 2
f=$(( `date +%s` - $start ))
done
if [ "$val" == "rtmp" ] ; then
killall rtmpdump
fi
#wait 20 seconds after terminating
start=`date +%s`
f=$(( `date +%s` - $start ))
while [ $f -lt 20 ]; do
sleep 2
f=$(( `date +%s` - $start ))
done
cat /tmp/$prefix* > $9.times
wait
}
#setting default values
server="localhost"
comment=""
logdir="`pwd`"
freshBoot="y"
product="mistserver"
declare -a timegroup
declare -a request
declare -a stream
#parsing arguments
red=1
while [ $red -le $# ]; do
case ${!red} in
"-s") #defines server
red=$(( $red + 1 ))
server=${!red}
;;
"-l") #defines directory on the server with the capabilities log
red=$(( $red + 1 ))
logdir=${!red}
;;
"-t") #defines timelengths for tests
red=$(( $red + 1 ))
timegroup+=( ${!red} )
;;
"-b") #defines user batches for tests (must be in format "batchamount batchsize")
red=$(( $red + 1 ))
request+=( "${!red}" )
;;
"-c") #add a comment
red=$(( $red + 1 ))
comment=${!red}
;;
"-m") #defines a media stream and validator
red=$(( $red + 1 ))
stream+=( "${!red}" )
;;
"-p") #defines the product to be tested, default is mistserver
red=$(( $red + 1 ))
product=${!red}
;;
"-fresh")
freshBoot="x"
;;
*)
comment=`echo $comment ${!red}`
esac
red=$(( $red + 1 ))
done
#determining validators
for (( i=0; i<${#stream[@]}; i++ )) ; do
if [ "${stream[$i]:0:4}" == "rtmp" ]; then
validator[$i]="rtmp"
else
tempFile=$(basename "${stream[$i]}")
tempExt="${tempFile##*.}"
case $tempExt in
"flv")
validator[$i]="flv"
;;
"ogg")
validator[$i]="ogg"
;;
"m3u8")
validator[$i]="hls"
;;
"m3u")
validator[$i]="hls"
;;
"mpd")
validator[$i]="dash"
;;
*)
validator[$i]="generic"
esac
fi
done
if [ ${#request[@]} == 0 ]; then
request=( "1 1000" )
fi
if [ ${#timegroup[@]} == 0 ]; then
timegroup=( 60 )
fi
#making directory for test
temp="$product"`date +%y%m%d%H%M%S`
mkdir $temp
#starting test
for (( i=0; i<${#stream[@]}; i++ )) ; do
for (( p=0; p<${#timegroup[@]}; p++ )) ; do
for (( o=0; o<${#request[@]}; o++ )) ; do
tnr=` echo "$i * ${#timegroup[@]} * ${#request[@]} + $o * ${#timegroup[@]} + $p" | bc `
name=$temp"/run"
if [ $tnr -lt 100 ] ; then
name="${name}0"
fi
if [ $tnr -lt 10 ] ; then
name="${name}0"
fi
amount=`echo ${request[$o]} | sed -e 's/ /*/g' | bc`
name="${name}${tnr}_${amount}_${validator[$i]}_${timegroup[$p]}"
echo "$name ${request[$o]} ${stream[$i]} $server ${timegroup[$p]} ${validator[$i]}"
serverTest ${request[$o]} ${stream[$i]} $server ${timegroup[$p]} $logdir ${validator[$i]} "$comment" $name
done
done
done
./plotDieShit $temp

187
scripts/makeplots Executable file
View file

@ -0,0 +1,187 @@
#!/bin/bash
MBIT="0.6Mbit stream"
T="pngcairo size 1000,600 enhanced font \"LiberationSerif,20\""
EXT=png
#T="pdfcairo"
#EXT=pdf
function runplot {
for R in `seq -w 0 100`; do
FLDS=( $1 )
NAMS=( $2 )
FN=`ls ${FLDS[0]}/run${R}_*.csv 2> /dev/null`
if [ -z $FN ] ; then
return
fi
FILE=`basename $FN`
FN=`ls ${FLDS[0]}/run${R}_*.times 2> /dev/null`
TIMES=`basename $FN`
VIEWERS=`echo $FILE | cut -d '_' -f 2`
TECH=`echo $FILE | cut -d '_' -f 3`
SECS=`echo $FILE | cut -d '.' -f 1 | cut -d '_' -f 4`
TIME=$(( $SECS / 60 ))m
FILES=()
TIME_FN=()
for i in `seq $(( ${#FLDS[@]} - 1 )) -1 0`; do
FN=`ls ${FLDS[$i]}/run${R}_*.csv 2> /dev/null`
if [ -z $FN ] ; then
return
fi
FILES[$i]="${FLDS[$i]}/`basename $FN`"
FN=`ls ${FLDS[$i]}/run${R}_*.times 2> /dev/null`
TIME_FN[$i]="${FLDS[$i]}/`basename $FN`"
done
if [ -n "$3" ] ; then
COMMON_DATA="${TIME}, ${MBIT}, ${VIEWERS} $3"
CFN="$3_${VIEWERS}_${TIME}"
COMMON_FILENAME=${CFN,,}
else
COMMON_DATA="${TIME}, ${MBIT}, ${VIEWERS} ${TECH}"
COMMON_FILENAME="${TECH}_${VIEWERS}_${TIME}"
fi
PLOT1=""
PLOT2=""
PLOT3=""
PLOT4=""
PLOT5=""
PLOT6=""
PLOT7=""
for i in `seq $(( ${#FLDS[@]} - 1 )) -1 0`; do
if [ -n "${PLOT1}" ] ; then
PLOT1="${PLOT1}, "
PLOT2="${PLOT2}, "
PLOT3="${PLOT3}, "
PLOT4="${PLOT4}, "
PLOT5="${PLOT5}, "
PLOT6="${PLOT6}, "
PLOT7="${PLOT7}, "
fi
sort --field-separator=',' --key=4g < "${TIME_FN[$i]}" > "${TIME_FN[$i]}_sorted"
VC=`cut -f 4 -d ',' < "${TIME_FN[$i]}" | awk "(int(\\\$1) >= ${SECS}000-5000) {sum++} END{print sum}"`
SC=`cut -f 4 -d ',' < "${TIME_FN[$i]}" | awk "(int(\\\$1) >= ${SECS}000-30000) {sum++} END{print sum}"`
#smooth CPU and network
PLOT6="${PLOT6}'${FILES[$i]}' using 1:4:(1) with lines lw 2 title '${NAMS[$i]} (${VC}, ${SC})' smooth acsplines"
PLOT7="${PLOT7}'${FILES[$i]}' using (\$1-dx):(d(\$1,\$5/131072.0)):(1) with lines lw 2 title '${NAMS[$i]} (${VC}, ${SC})' smooth acsplines"
#actual CPU and network
PLOT1="${PLOT1}'${FILES[$i]}' using 1:4 with lines lw 2 title '${NAMS[$i]} (${VC}, ${SC})'"
PLOT4="${PLOT4}'${FILES[$i]}' using (\$1-dx):(d(\$1,\$5/131072.0)) with lines lw 2 title '${NAMS[$i]} (${VC}, ${SC})'"
#memory - no need for smoothing, it's already pretty smooth
PLOT2="${PLOT2}'${FILES[$i]}' using 1:(\$3/1024.0) with lines lw 2 title '${NAMS[$i]} (${VC}, ${SC})'"
#total upload - same here, no need to smooth anything over
PLOT3="${PLOT3}'${FILES[$i]}' using 1:(\$5/134217728.0) with lines lw 2 title '${NAMS[$i]} (${VC}, ${SC})'"
#and buffer times. Smoothing makes no sense here. Don't.
PLOT5="${PLOT5}'${TIME_FN[$i]}_sorted' using (\$4-${SECS}000)/1000 with linespoints lw 1 title '${NAMS[$i]} (${VC}, ${SC})'"
done
gnuplot << EOF
set terminal ${T}
set datafile separator ","
set key on under center
#set style fill solid 0.25 border -1
#set style boxplot outliers pointtype 7
#set style data boxplot
#set boxwidth 0.5
set pointsize 0.2
set arrow from 0,-5 to ${VIEWERS}-1,-5 nohead
set title "Available buffer time, ${COMMON_DATA} clients"
set output '${COMMON_FILENAME}_buffers.${EXT}'
set format y "%gs"
set ylabel "Buffer size (above line = success)"
set xlabel "Each point represents a single connection"
unset xtics
plot ${PLOT5}
unset style
unset xlabel
unset arrow
set timefmt "%s"
set xtics
set xdata time
#set xlabel "Time (mm:ss)"
set format x "%M:%S"
set grid
set title "CPU usage, ${COMMON_DATA} clients"
set output '${COMMON_FILENAME}_cpu.${EXT}'
set yrange [0:*<100]
set format y "%.0f %%"
set ylabel "CPU use"
plot ${PLOT1}
set title "Memory usage, ${COMMON_DATA} clients"
set output '${COMMON_FILENAME}_mem.${EXT}'
set yrange [0<*<0:0<*]
set ytics auto
set format y "%g MiB"
set ylabel "Memory use"
plot ${PLOT2}
set title "Network upload, ${COMMON_DATA} clients"
set output '${COMMON_FILENAME}_upload.${EXT}'
set yrange [0<*<0:0<*]
set ytics auto
set format y "%g Gbit"
set ylabel "Total uploaded data"
plot ${PLOT3}
# derivative function. Return 1/0 for first point, otherwise delta y or (delta y)/(delta x)
d(x,y) = (\$0 == 1) ? (x1 = x, y1 = y, 1/0) : (x2 = x1, x1 = x, y2 = y1, y1 = y, (y1-y2)/(x1-x2))
set title "Network speed, ${COMMON_DATA} clients"
set output '${COMMON_FILENAME}_netspeed.${EXT}'
set yrange [0<*<0:0<*]
set ytics auto
set format y "%g Mbps"
set ylabel "Upload speed"
dx=0.5
plot ${PLOT4}
set title "Smoothed CPU usage, ${COMMON_DATA} clients"
set output '${COMMON_FILENAME}_cpu_smooth.${EXT}'
set yrange [0:*<100]
set format y "%.0f %%"
set ylabel "CPU use"
plot ${PLOT6}
set title "Smoothed network speed, ${COMMON_DATA} clients"
set output '${COMMON_FILENAME}_netspeed_smooth.${EXT}'
set yrange [0<*<0:0<*]
set ytics auto
set format y "%g Mbps"
set ylabel "Upload speed"
dx=0.5
plot ${PLOT7}
EOF
for i in `seq $(( ${#FLDS[@]} - 1 )) -1 0`; do
rm -f "${FLDS[$i]}/${TIMES}_sorted"
done
done
}
#runplot "adobe_rtmp nginx_rtmp wowza_rtmp mist_rtmp flus_rtmp" "Adobe Nginx Wowza MistServer Flussonic"
#runplot "wowza_hls flus_hls mist_hls" "Wowza Flussonic MistServer"
#runplot "wowza_dash mist_dash" "Wowza MistServer"
#runplot "wowza_rtmp wowza_hls wowza_dash" "RTMP HLS DASH" "Wowza"
#runplot "mist_rtmp mist_hls mist_dash" "RTMP HLS DASH" "MistServer"
runplot "mistserver*" "test" "RTMP"
#runplot "wowza_dash mist_dash wowza_dash_new mist_dash_new mist_dash3" "Wowza Mist WowNew MistNew MistNewer"
#runplot "mist_dash mist_dash_new mist_dash3" "Old New Newer" "MistDash"
#runplot "wowza_hls_new flus_hls_new mist_hls_new mist_hls5" "Wowza Flus Mist MistNew"
#runplot "adobe_rtmp nginx_rtmp wowza_rtmp mist_rtmp flus_rtmp mist_rtmp5" "Adobe Nginx Wowza MistServer Flussonic MistNew"

101
scripts/plotDieShit Executable file
View file

@ -0,0 +1,101 @@
#!/bin/bash
FOLDER=$1
SPEED=$1
LABEL=$1
MBIT="$SPEED stream"
T="pngcairo size 1000,600 enhanced font \"LiberationSerif,20\""
EXT=png
#T="pdfcairo"
#EXT=pdf
function runplot {
for R in `seq -w 0 100`; do
FLDS=( $1 )
NAMS=( $2 )
FN=`ls ${FLDS[0]}/run${R}_*.times 2> /dev/null`
if [ -z $FN ] ; then
return
fi
FILE=`basename $FN`
FN=`ls ${FLDS[0]}/run${R}_*.times 2> /dev/null`
TIMES=`basename $FN`
VIEWERS=`echo $FILE | cut -d '_' -f 2`
TECH=`echo $FILE | cut -d '_' -f 3`
SECS=`echo $FILE | cut -d '.' -f 1 | cut -d '_' -f 4`
TIME=$(( $SECS / 60 ))m
FILES=()
TIME_FN=()
for i in `seq $(( ${#FLDS[@]} - 1 )) -1 0`; do
FN=`ls ${FLDS[$i]}/run${R}_*.times 2> /dev/null`
if [ -z $FN ] ; then
return
fi
FILES[$i]="${FLDS[$i]}/`basename $FN`"
FN=`ls ${FLDS[$i]}/run${R}_*.times 2> /dev/null`
TIME_FN[$i]="${FLDS[$i]}/`basename $FN`"
done
if [ -n "$3" ] ; then
COMMON_DATA="${TIME}, ${MBIT}, ${VIEWERS} $3"
CFN="$3_${VIEWERS}_${TIME}"
COMMON_FILENAME=${CFN,,}
else
COMMON_DATA="${TIME}, ${MBIT}, ${VIEWERS} ${TECH}"
COMMON_FILENAME="${TECH}_${VIEWERS}_${TIME}"
fi
PLOT5=""
for i in `seq $(( ${#FLDS[@]} - 1 )) -1 0`; do
if [ -n "${PLOT5}" ] ; then
PLOT5="${PLOT5}, "
fi
sort --field-separator=',' --key=4g < "${TIME_FN[$i]}" > "${TIME_FN[$i]}_sorted"
VC=`cut -f 4 -d ',' < "${TIME_FN[$i]}" | awk "(int(\\\$1) >= ${SECS}000-5000) {sum++} END{print sum}"`
SC=`cut -f 4 -d ',' < "${TIME_FN[$i]}" | awk "(int(\\\$1) >= ${SECS}000-30000) {sum++} END{print sum}"`
#and buffer times. Smoothing makes no sense here. Don't.
PLOT5="${PLOT5}'${TIME_FN[$i]}_sorted' using (\$4-${SECS}000)/1000 with linespoints lw 1 title '${NAMS[$i]} (${VC}, ${SC})'"
done
gnuplot << EOF
set terminal ${T}
set datafile separator ","
set key on under center
#set style fill solid 0.25 border -1
#set style boxplot outliers pointtype 7
#set style data boxplot
#set boxwidth 0.5
set pointsize 0.2
set arrow from 0,-5 to ${VIEWERS}-1,-5 nohead
set title "Available buffer time, ${COMMON_DATA} clients"
set output '${COMMON_FILENAME}_buffers.${EXT}'
set format y "%gs"
set ylabel "Buffer size (above line = success)"
set xlabel "Each point represents a single connection"
unset xtics
plot ${PLOT5}
EOF
for i in `seq $(( ${#FLDS[@]} - 1 )) -1 0`; do
rm -f "${FLDS[$i]}/${TIMES}_sorted"
done
done
}
#runplot "adobe_rtmp nginx_rtmp wowza_rtmp mist_rtmp flus_rtmp" "Adobe Nginx Wowza MistServer Flussonic"
runplot "$LABEL" "$FOLDER" "$SPEED"
#runplot "wowza_dash mist_dash wowza_dash_new mist_dash_new mist_dash3" "Wowza Mist WowNew MistNew MistNewer"
#runplot "mist_dash mist_dash_new mist_dash3" "Old New Newer" "MistDash"
#runplot "wowza_hls_new flus_hls_new mist_hls_new mist_hls5" "Wowza Flus Mist MistNew"
#runplot "adobe_rtmp nginx_rtmp wowza_rtmp mist_rtmp flus_rtmp mist_rtmp5" "Adobe Nginx Wowza MistServer Flussonic MistNew"

192
scripts/plot_run Executable file
View file

@ -0,0 +1,192 @@
#!/bin/sh
if [ -z $R ] ; then
R=000
fi
if [ -z $MBIT ] ; then
MBIT="0.6Mbit stream"
fi
T="pngcairo size 1000,600 enhanced font \"LiberationSerif,20\""
EXT=png
#T="pdfcairo"
#EXT=pdf
if [ -z "$FOLDERS" ] ; then
FOLDERS="wowza nginx mist adobe"
fi
if [ -z "$NAMES" ] ; then
NAMES="Wowza Nginx Mist Adobe"
fi
FLDS=( $FOLDERS )
NAMS=( $NAMES )
FN=`ls ${FLDS[0]}/run${R}_*.csv 2> /dev/null`
if [ -z $FN ] ; then
exit
fi
FILE=`basename $FN`
FN=`ls ${FLDS[0]}/run${R}_*.times 2> /dev/null`
TIMES=`basename $FN`
VIEWERS=`echo $FILE | cut -d '_' -f 2`
TECH=`echo $FILE | cut -d '_' -f 3`
SECS=`echo $FILE | cut -d '.' -f 1 | cut -d '_' -f 4`
TIME=$(( $SECS / 60 ))m
FILES=()
TIME_FN=()
for i in `seq $(( ${#FLDS[@]} - 1 )) -1 0`; do
FN=`ls ${FLDS[$i]}/run${R}_*.csv 2> /dev/null`
if [ -z $FN ] ; then
exit
fi
FILES[$i]="${FLDS[$i]}/`basename $FN`"
FN=`ls ${FLDS[$i]}/run${R}_*.times 2> /dev/null`
TIME_FN[$i]="${FLDS[$i]}/`basename $FN`"
done
# FN=`ls ${FLDS[0]}/run${R}_*.times 2> /dev/null`
# VNAME="TIMES_${i}"
# ${!VNAME}=`basename $FN`
# VNAME="VIEWERS_${i}"
# ${!VNAME}=`echo $FILE | cut -d '_' -f 2`
# VNAME="TECH_${i}"
# ${!VNAME}=`echo $FILE | cut -d '_' -f 3`
# SECS=`echo $FILE | cut -d '.' -f 1 | cut -d '_' -f 4`
# VNAME="TIME_${i}"
# ${!VNAME}=$(( $SECS / 60 ))m
#
# done
COMMON_DATA="${TIME}, ${MBIT}, ${VIEWERS} ${TECH}"
COMMON_FILENAME="${TECH}_${VIEWERS}_${TIME}"
PLOT1=""
PLOT2=""
PLOT3=""
PLOT4=""
PLOT5=""
PLOT6=""
PLOT7=""
for i in `seq $(( ${#FLDS[@]} - 1 )) -1 0`; do
if [ -n "${PLOT1}" ] ; then
PLOT1="${PLOT1}, "
PLOT2="${PLOT2}, "
PLOT3="${PLOT3}, "
PLOT4="${PLOT4}, "
PLOT5="${PLOT5}, "
PLOT6="${PLOT6}, "
PLOT7="${PLOT7}, "
fi
sort --field-separator=',' --key=4g < "${TIME_FN[$i]}" > "${TIME_FN[$i]}_sorted"
VC=`cut -f 4 -d ',' < "${TIME_FN[$i]}" | awk "(int(\\\$1) >= ${SECS}000-5000) {sum++} END{print sum}"`
SC=`cut -f 4 -d ',' < "${TIME_FN[$i]}" | awk "(int(\\\$1) >= ${SECS}000-30000) {sum++} END{print sum}"`
#smooth CPU and network
PLOT6="${PLOT6}'${FLDS[$i]}/${FILE}' using 1:4:(1) with lines lw 2 title '${NAMS[$i]} (${VC}, ${SC})' smooth acsplines"
PLOT7="${PLOT7}'${FLDS[$i]}/${FILE}' using (\$1-dx):(d(\$1,\$5/131072.0)):(1) with lines lw 2 title '${NAMS[$i]} (${VC}, ${SC})' smooth acsplines"
#actual CPU and network
PLOT1="${PLOT1}'${FILES[$i]}' using 1:4 with lines lw 2 title '${NAMS[$i]} (${VC}, ${SC})'"
PLOT4="${PLOT4}'${FILES[$i]}' using (\$1-dx):(d(\$1,\$5/131072.0)) with lines lw 2 title '${NAMS[$i]} (${VC}, ${SC})'"
#memory - no need for smoothing, it's already pretty smooth
PLOT2="${PLOT2}'${FILES[$i]}' using 1:(\$3/1024.0) with lines lw 2 title '${NAMS[$i]} (${VC}, ${SC})'"
#total upload - same here, no need to smooth anything over
PLOT3="${PLOT3}'${FILES[$i]}' using 1:(\$5/134217728.0) with lines lw 2 title '${NAMS[$i]} (${VC}, ${SC})'"
#and buffer times. Smoothing makes no sense here. Don't.
PLOT5="${PLOT5}'${TIME_FN[$i]}_sorted' using (\$4-${SECS}000)/1000 with linespoints lw 1 title '${NAMS[$i]} (${VC}, ${SC})'"
done
gnuplot << EOF
set terminal ${T}
set datafile separator ","
set key on under center
#set style fill solid 0.25 border -1
#set style boxplot outliers pointtype 7
#set style data boxplot
#set boxwidth 0.5
set pointsize 0.2
set arrow from 0,-5 to ${VIEWERS}-1,-5 nohead
set title "Available buffer time, ${COMMON_DATA} clients"
set output '${TECH}_${VIEWERS}_${TIME}_buffers.${EXT}'
set format y "%gs"
set ylabel "Buffer size (above line = success)"
set xlabel "Each point represents a single connection"
unset xtics
plot ${PLOT5}
unset style
unset xlabel
unset arrow
set timefmt "%s"
set xtics
set xdata time
#set xlabel "Time (mm:ss)"
set format x "%M:%S"
set grid
set title "CPU usage, ${COMMON_DATA} clients"
set output '${COMMON_FILENAME}_cpu.${EXT}'
set yrange [0:*<100]
set format y "%.0f %%"
set ylabel "CPU use"
plot ${PLOT1}
set title "Memory usage, ${COMMON_DATA} clients"
set output '${COMMON_FILENAME}_mem.${EXT}'
set yrange [0<*<0:0<*]
set ytics auto
set format y "%g MiB"
set ylabel "Memory use"
plot ${PLOT2}
set title "Network upload, ${COMMON_DATA} clients"
set output '${COMMON_FILENAME}_upload.${EXT}'
set yrange [0<*<0:0<*]
set ytics auto
set format y "%g Gbit"
set ylabel "Total uploaded data"
plot ${PLOT3}
# derivative function. Return 1/0 for first point, otherwise delta y or (delta y)/(delta x)
d(x,y) = (\$0 == 1) ? (x1 = x, y1 = y, 1/0) : (x2 = x1, x1 = x, y2 = y1, y1 = y, (y1-y2)/(x1-x2))
set title "Network speed, ${COMMON_DATA} clients"
set output '${COMMON_FILENAME}_netspeed.${EXT}'
set yrange [0<*<0:0<*]
set ytics auto
set format y "%g Mbps"
set ylabel "Upload speed"
dx=0.5
plot ${PLOT4}
set title "Smoothed CPU usage, ${COMMON_DATA} clients"
set output '${COMMON_FILENAME}_cpu_smooth.${EXT}'
set yrange [0:*<100]
set format y "%.0f %%"
set ylabel "CPU use"
plot ${PLOT6}
set title "Smoothed network speed, ${COMMON_DATA} clients"
set output '${COMMON_FILENAME}_netspeed_smooth.${EXT}'
set yrange [0<*<0:0<*]
set ytics auto
set format y "%g Mbps"
set ylabel "Upload speed"
dx=0.5
plot ${PLOT7}
EOF
for i in `seq $(( ${#FLDS[@]} - 1 )) -1 0`; do
rm -f "${FLDS[$i]}/${TIMES}_sorted"
done

6
scripts/plot_runs Executable file
View file

@ -0,0 +1,6 @@
#!/bin/bash
for i in `seq -w 0 100`; do
R=$i ./plot_run
done

View file

@ -0,0 +1,490 @@
/// \file dash_analyzer.cpp
/// Contains the code for the DASH Analysing tool.
/// Currently, only mp4 is supported, and the xml parser assumes a representation id tag exists
#include <mist/config.h>
#include <mist/timing.h>
#include <mist/defines.h>
#include <mist/http_parser.h>
#include <iostream>
#include <fstream>
#include <set>
#include <mist/mp4.h>
#include <mist/mp4_generic.h>
#define OTHER 0x00
#define VIDEO 0x01
#define AUDIO 0x02
///\brief simple struct for storage of stream-specific data
struct StreamData{
long timeScale;
std::string media;
std::string initialization;
std::string initURL;
long trackID;
unsigned int adaptationSet;
unsigned char trackType;
};
StreamData tempSD; //temp global
///\brief another simple structure used for ordering byte seek positions.
struct seekPos {
///\brief Less-than comparison for seekPos structures.
///\param rhs The seekPos to compare with.
///\return Whether this object is smaller than rhs.
bool operator < (const seekPos & rhs) const {
if ((seekTime*rhs.timeScale) < (rhs.seekTime*timeScale)) {
return true;
} else {
if ( (seekTime*rhs.timeScale) == (rhs.seekTime*timeScale)){
if (adaptationSet < rhs.adaptationSet){
return true;
} else if (adaptationSet == rhs.adaptationSet){
if (trackID < rhs.trackID) {
return true;
}
}
}
}
return false;
}
long timeScale;
long long unsigned int bytePos; /// ?
long long unsigned int seekTime; ///start
long long unsigned int duration; ///duration
unsigned int trackID; ///stores representation ID
unsigned int adaptationSet; ///stores type
unsigned char trackType; ///stores type
std::string url;
};
bool getDelimBlock(std::string & data, std::string name, size_t &blockStart, size_t &blockEnd, std::string delim){
size_t offset=data.find(name);
if(offset==std::string::npos){
return false; //name string not found.
}
//expected: delim character BEFORE blockstart.
offset--;
blockStart=data.find(delim,offset);
//DEBUG_MSG(DLVL_INFO, "offset: %i blockStart: %i ", offset, blockStart);
offset=blockStart+1;//skip single character!
blockEnd=data.find(delim,offset);
//DEBUG_MSG(DLVL_INFO, "offset: %i blockEnd: %i ", offset, blockEnd);
if(blockStart==std::string::npos || blockEnd==std::string::npos){
return false; //no start/end quotes found
}
blockEnd++; //include delim
//DEBUG_MSG(DLVL_INFO, "getDelimPos: data.size() %i start %i end %i num %i", data.size(), blockStart,blockEnd,(blockEnd-blockStart) );
return true;
}
bool getValueBlock(std::string & data, std::string name, size_t &blockStart, size_t &blockEnd, std::string delim){
size_t offset=data.find(name);
if(offset==std::string::npos){
return false; //name string not found.
}
blockStart=data.find(delim,offset);
//DEBUG_MSG(DLVL_INFO, "offset: %i blockStart: %i ", offset, blockStart);
blockStart++; //clip off quote characters
offset=blockStart;//skip single character!
blockEnd=data.find(delim,offset);
//DEBUG_MSG(DLVL_INFO, "offset: %i blockEnd: %i ", offset, blockEnd);
if(blockStart==std::string::npos || blockEnd==std::string::npos){
return false; //no start/end quotes found
}
//DEBUG_MSG(DLVL_INFO, "getValueBlock: data.size() %i start %i end %i num %i", data.size(), blockStart,blockEnd,(blockEnd-blockStart) );
return true;
}
bool getString(std::string &data, std::string name, std::string &output){
size_t blockStart=0;
size_t blockEnd=0;
if(!getValueBlock(data, name, blockStart,blockEnd, "\"")){
//DEBUG_MSG(DLVL_FAIL, "could not find \"%s\" in data block", name.c_str());
return false; //could not find value in this data block.
}
//DEBUG_MSG(DLVL_INFO, "data.size() %i start %i end %i num %i", data.size(), blockStart,blockEnd,(blockEnd-blockStart) )
output=data.substr(blockStart,(blockEnd-blockStart));
//looks like this function is working as expected
//DEBUG_MSG(DLVL_INFO, "data in getstring %s", (data.substr(blockStart,(blockEnd-blockStart))).c_str());
return true;
}
bool getLong(std::string &data, std::string name, long &output){
size_t blockStart, blockEnd;
if(!getValueBlock(data, name, blockStart,blockEnd, "\"")){
//DEBUG_MSG(DLVL_FAIL, "could not find \"%s\" in data block", name.c_str());
return false; //could not find value in this data block.
}
//DEBUG_MSG(DLVL_INFO, "name: %s data in atol %s",name.c_str(), (data.substr(blockStart,(blockEnd-blockStart))).c_str());
output=atol( (data.substr(blockStart,(blockEnd-blockStart))).c_str() );
return true;
}
//block expecting separate name and /name occurence, or name and /> before another occurence of <.
bool getBlock(std::string & data, std::string name, int offset, size_t &blockStart, size_t &blockEnd){
blockStart=data.find("<"+name+">",offset);
if(blockStart==std::string::npos){
blockStart=data.find("<"+name+" ",offset); //this considers both valid situations <name> and <name bla="bla"/>
}
if(blockStart==std::string::npos){
DEBUG_MSG(DLVL_INFO, "no block start found for name: %s at offset: %i",name.c_str(), offset);
return false;
}
blockEnd=data.find("/" + name+ ">", blockStart);
if(blockEnd==std::string::npos){
blockEnd=data.find("/>", blockStart);
if(blockEnd==std::string::npos){
DEBUG_MSG(DLVL_INFO, "no block end found.");
return false;
}
size_t temp=data.find("<", blockStart+1, (blockEnd-blockStart-1)); //the +1 is to avoid re-interpreting the starting < //TODO!!
if(temp!=std::string::npos){ //all info is epxected between <name ... />
DEBUG_MSG(DLVL_FAIL, "block start found before block end. offset: %lu block: %s", temp, data.c_str());
return false;
}
//DEBUG_MSG(DLVL_FAIL, "special block end found");
blockEnd+=2; //position after />
} else {
blockEnd += name.size()+2; //position after /name>
}
//DEBUG_MSG(DLVL_INFO, "getBlock: start: %i end: %i",blockStart,blockEnd);
return true;
}
bool parseAdaptationSet(std::string & data, std::set<seekPos> &currentPos){
//DEBUG_MSG(DLVL_INFO, "Parsing adaptationSet: %s", data.c_str());
size_t offset =0;
size_t blockStart, blockEnd;
tempSD.trackType=OTHER;
//get value: mimetype //todo: handle this!
std::string mimeType;
if(!getString(data,"mimeType", mimeType)){ //get first occurence of mimeType. --> this will break if multiple mimetypes should be read from this block because no offset is provided. solution: use this on a substring containing the desired information.
DEBUG_MSG(DLVL_FAIL, "mimeType not found");
return false;
}
DEBUG_MSG(DLVL_INFO, "mimeType: %s",mimeType.c_str()); //checked, OK
if(mimeType.find("video")!=std::string::npos){tempSD.trackType=VIDEO;}
if(mimeType.find("audio")!=std::string::npos){tempSD.trackType=AUDIO;}
if(tempSD.trackType==OTHER){
DEBUG_MSG(DLVL_FAIL, "no audio or video type found. giving up.");
return false;
}
//find an ID within this adaptationSet block.
if(!getBlock(data,(std::string)"Representation", offset, blockStart, blockEnd)){
DEBUG_MSG(DLVL_FAIL, "Representation not found");
return false;
}
//representation string
std::string block=data.substr(blockStart,(blockEnd-blockStart));
DEBUG_MSG(DLVL_INFO, "Representation block: %s",block.c_str());
//check if block is not junk?
if(!getLong(block,"id", tempSD.trackID) ){
DEBUG_MSG(DLVL_FAIL, "Representation id not found in block %s",block.c_str());
return false;
}
DEBUG_MSG(DLVL_INFO, "Representation/id: %li",tempSD.trackID); //checked, OK
offset =0;
//get values from SegmentTemplate
if(!getBlock(data,(std::string)"SegmentTemplate", offset, blockStart, blockEnd)){
DEBUG_MSG(DLVL_FAIL, "SegmentTemplate not found");
return false;
}
block=data.substr(blockStart,(blockEnd-blockStart));
//DEBUG_MSG(DLVL_INFO, "SegmentTemplate block: %s",block.c_str()); //OK
getLong(block,"timescale", tempSD.timeScale);
getString(block,"media", tempSD.media);
getString(block,"initialization", tempSD.initialization);
size_t tmpBlockStart=0;
size_t tmpBlockEnd=0;
if(!getDelimBlock(tempSD.media,"RepresentationID",tmpBlockStart,tmpBlockEnd, "$")){
DEBUG_MSG(DLVL_FAIL, "Failed to find and replace $RepresentationID$ in %s",tempSD.media.c_str());
return false;
}
tempSD.media.replace(tmpBlockStart,(tmpBlockEnd-tmpBlockStart),"%d");
if(!getDelimBlock(tempSD.media,"Time",tmpBlockStart,tmpBlockEnd, "$")){
DEBUG_MSG(DLVL_FAIL, "Failed to find and replace $Time$ in %s",tempSD.media.c_str());
return false;
}
tempSD.media.replace(tmpBlockStart,(tmpBlockEnd-tmpBlockStart),"%d");
if(!getDelimBlock(tempSD.initialization,"RepresentationID",tmpBlockStart,tmpBlockEnd, "$")){
DEBUG_MSG(DLVL_FAIL, "Failed to find and replace $RepresentationID$ in %s",tempSD.initialization.c_str());
return false;
}
tempSD.initialization.replace(tmpBlockStart,(tmpBlockEnd-tmpBlockStart),"%d");
//get segment timeline block from within segment template:
size_t blockOffset=0; //offset should be 0 because this is a new block
if(!getBlock(block,"SegmentTimeline", blockOffset, blockStart, blockEnd)){
DEBUG_MSG(DLVL_FAIL, "SegmentTimeline block not found");
return false;
}
std::string block2=block.substr(blockStart,(blockEnd-blockStart)); //overwrites previous block (takes just the segmentTimeline part
//DEBUG_MSG(DLVL_INFO, "SegmentTimeline block: %s",block2.c_str()); //OK
int numS=0;
offset=0;
long long unsigned int totalDuration=0;
long timeValue;
while(1){
if(!getBlock(block2,"S",offset, blockStart, blockEnd)){
if(numS==0){
DEBUG_MSG(DLVL_FAIL, "no S found within SegmentTimeline");
return false;
} else {
DEBUG_MSG(DLVL_INFO, "all S found within SegmentTimeline %i", numS);
return true; //break; //escape from while loop (to return true)
}
}
numS++;
//stuff S data into: currentPos
//searching for t(start position)
std::string sBlock=block2.substr(blockStart,(blockEnd-blockStart));
//DEBUG_MSG(DLVL_INFO, "S found. offset: %i blockStart: %i blockend: %i block: %s",offset,blockStart, blockEnd, sBlock.c_str()); //OK!
if(getLong(sBlock,"t", timeValue)){
totalDuration=timeValue; //reset totalDuration to value of t
}
if(!getLong(sBlock,"d", timeValue)){ //expected duration in every S.
DEBUG_MSG(DLVL_FAIL, "no d found within S");
return false;
}
//stuff data with old value (start of block)
//DEBUG_MSG(DLVL_INFO, "stuffing info from S into set");
seekPos thisPos;
thisPos.trackType=tempSD.trackType;
thisPos.trackID=tempSD.trackID;
thisPos.adaptationSet=tempSD.adaptationSet;
//thisPos.trackID=id;
thisPos.seekTime=totalDuration; //previous total duration is start time of this S.
thisPos.duration=timeValue;
thisPos.timeScale=tempSD.timeScale;
static char charBuf[512];
snprintf(charBuf, 512, tempSD.media.c_str(), tempSD.trackID, totalDuration);
thisPos.url.assign(charBuf);
//DEBUG_MSG(DLVL_INFO, "media url (from rep.ID %d, startTime %d): %s", tempSD.trackID, totalDuration,thisPos.url.c_str());
currentPos.insert(thisPos); //assumes insert copies all data in seekPos struct.
totalDuration+=timeValue;//update totalDuration
offset=blockEnd; //blockEnd and blockStart are absolute values within string, offset is not relevant.
}
return true;
}
bool parseXML(std::string & body, std::set<seekPos> &currentPos, std::vector<StreamData> &streamData){
//for all adaptation sets
//representation ID
int numAdaptationSet=0;
size_t currentOffset=0;
size_t adaptationSetStart;
size_t adaptationSetEnd;
//DEBUG_MSG(DLVL_INFO, "body received: %s", body.c_str());
while(getBlock(body,"AdaptationSet",currentOffset, adaptationSetStart, adaptationSetEnd)){
tempSD.adaptationSet=numAdaptationSet;
numAdaptationSet++;
DEBUG_MSG(DLVL_INFO, "adaptationSet found. start: %lu end: %lu num: %lu ",adaptationSetStart,adaptationSetEnd,(adaptationSetEnd-adaptationSetStart));
//get substring: from <adaptationSet... to /adaptationSet>
std::string adaptationSet=body.substr(adaptationSetStart,(adaptationSetEnd-adaptationSetStart));
//function was verified: output as expected.
if(!parseAdaptationSet(adaptationSet, currentPos)){
DEBUG_MSG(DLVL_FAIL, "parseAdaptationSet returned false."); //this also happens in the case of OTHER mimetype. in that case it might be desirable to continue searching for valid data instead of quitting.
return false;
}
streamData.push_back(tempSD); //put temp values into adaptation set vector
currentOffset=adaptationSetEnd;//the getblock function should make sure End is at the correct offset.
}
if(numAdaptationSet==0){
DEBUG_MSG(DLVL_FAIL, "no adaptationSet found.");
return false;
}
DEBUG_MSG(DLVL_INFO, "all adaptation sets found. total: %i", numAdaptationSet);
return true;
}
int main(int argc, char ** argv) {
Util::Config conf = Util::Config(argv[0]);
conf.addOption("mode", JSON::fromString("{\"long\":\"mode\", \"arg\":\"string\", \"short\":\"m\", \"default\":\"analyse\", \"help\":\"What to do with the stream. Valid modes are 'analyse', 'validate', 'output'.\"}"));
conf.addOption("url", JSON::fromString("{\"arg_num\":1, \"arg\":\"string\", \"help\":\"URL to HLS stream index file to retrieve.\"}"));
conf.addOption("abort", JSON::fromString("{\"long\":\"abort\", \"short\":\"a\", \"arg\":\"integer\", \"default\":-1, \"help\":\"Abort after this many seconds of downloading. Negative values mean unlimited, which is the default.\"}"));
conf.parseArgs(argc, argv);
conf.activate();
unsigned int port = 80;
std::string url = conf.getString("url");
if (url.substr(0, 7) != "http://") {
DEBUG_MSG(DLVL_FAIL, "The URL must start with http://");
return -1;
}
url = url.substr(7); //found problem if url is to short!! it gives out of range when entering http://meh.meh
std::string server = url.substr(0, url.find('/'));
url = url.substr(url.find('/'));
if (server.find(':') != std::string::npos) {
port = atoi(server.substr(server.find(':') + 1).c_str());
server = server.substr(0, server.find(':'));
}
long long int startTime = Util::bootSecs();
long long int abortTime = conf.getInteger("abort");
Socket::Connection conn(server, port, false);
//url:
DEBUG_MSG(DLVL_INFO, "url %s server: %s port: %d", url.c_str(), server.c_str(), port);
std::string urlPrependStuff= url.substr(0, url.rfind("/")+1);
DEBUG_MSG(DLVL_INFO, "prepend stuff: %s", urlPrependStuff.c_str());
if (!conn) {
conn = Socket::Connection(server, port, false);
}
unsigned int pos = 0;
HTTP::Parser H;
H.url = url;
H.SetHeader("Host", server + ":" + JSON::Value((long long)port).toString());
H.SendRequest(conn);
H.Clean();
while (conn && (!conn.spool() || !H.Read(conn))) {}
H.BuildResponse();
std::set<seekPos> currentPos;
std::vector<StreamData> streamData;
//DEBUG_MSG(DLVL_INFO, "body received: %s", H.body.c_str()); //keeps giving empty stuff :(
// DEBUG_MSG(DLVL_INFO, "url %s ", url.c_str());
//std::ifstream in(url.c_str());
//std::string s((std::istreambuf_iterator<char>(in)), std::istreambuf_iterator<char>());
if(!parseXML(H.body, currentPos,streamData)){
DEBUG_MSG(DLVL_FAIL, "Manifest parsing failed. body: \n %s", H.body.c_str());
if (conf.getString("mode") == "validate") {
long long int endTime = Util::bootSecs();
std::cout << startTime << ", " << endTime << ", " << (endTime - startTime) << ", " << pos << std::endl;
}
return -1;
}
H.Clean();
DEBUG_MSG(DLVL_INFO, "*********");
DEBUG_MSG(DLVL_INFO, "*SUMMARY*");
DEBUG_MSG(DLVL_INFO, "*********");
DEBUG_MSG(DLVL_INFO, "num streams: %lu", streamData.size());
for(unsigned int i=0; i<streamData.size();i++){
DEBUG_MSG(DLVL_INFO, "");
DEBUG_MSG(DLVL_INFO, "ID in vector %d", i);
DEBUG_MSG(DLVL_INFO, "trackID %ld", streamData[i].trackID);
DEBUG_MSG(DLVL_INFO, "adaptationSet %d", streamData[i].adaptationSet);
DEBUG_MSG(DLVL_INFO, "trackType (audio 0x02, video 0x01) %d", streamData[i].trackType);
DEBUG_MSG(DLVL_INFO, "TimeScale %ld", streamData[i].timeScale);
DEBUG_MSG(DLVL_INFO, "Media string %s", streamData[i].media.c_str());
DEBUG_MSG(DLVL_INFO, "Init string %s", streamData[i].initialization.c_str());
}
DEBUG_MSG(DLVL_INFO, "");
for(unsigned int i=0; i<streamData.size();i++){ //get init url
static char charBuf[512];
snprintf(charBuf, 512, streamData[i].initialization.c_str(), streamData[i].trackID);
streamData[i].initURL.assign(charBuf);
DEBUG_MSG(DLVL_INFO, "init url for adaptationSet %d trackID %ld: %s ", streamData[i].adaptationSet, streamData[i].trackID, streamData[i].initURL.c_str());
}
while(currentPos.size() && (abortTime <= 0 || Util::bootSecs() < startTime + abortTime)){
//DEBUG_MSG(DLVL_INFO, "next url: %s", currentPos.begin()->url.c_str());
//match adaptation set and track id?
int tempID=0;
for(unsigned int i=0; i<streamData.size();i++){
if( streamData[i].trackID == currentPos.begin()->trackID && streamData[i].adaptationSet == currentPos.begin()->adaptationSet ) tempID=i;
}
if (!conn) {
conn = Socket::Connection(server,port, false);
}
HTTP::Parser H;
H.url = urlPrependStuff;
H.url.append(currentPos.begin()->url);
DEBUG_MSG(DLVL_INFO, "Retrieving segment: %s (%llu-%llu)", H.url.c_str(),currentPos.begin()->seekTime, currentPos.begin()->seekTime+currentPos.begin()->duration);
H.SetHeader("Host", server + ":" + JSON::Value((long long)port).toString()); //wut?
H.SendRequest(conn);
//TODO: get response?
H.Clean();
while (conn && (!conn.spool() || !H.Read(conn))) {} //ehm...
//std::cout << "leh vomi: "<<H.body <<std::endl;
//DEBUG_MSG(DLVL_INFO, "zut: %s", H.body.c_str());
//strBuf[tempID].append(H.body);
if(!H.body.size()){
DEBUG_MSG(DLVL_FAIL, "No data downloaded from %s",H.url.c_str());
break;
}
size_t beforeParse = H.body.size();
MP4::Box mp4Data;
bool mdatSeen = false;
while(mp4Data.read(H.body)){
if (mp4Data.isType("mdat")){
mdatSeen = true;
}
}
if (!mdatSeen){
DEBUG_MSG(DLVL_FAIL, "No mdat present. Sadface. :-(");
break;
}
if(H.body.size()){
DEBUG_MSG(DLVL_FAIL, "%lu bytes left in body. Assuming horrible things...", H.body.size());//,H.body.c_str());
std::cerr << H.body << std::endl;
if (beforeParse == H.body.size()){
break;
}
}
H.Clean();
pos = 1000*(currentPos.begin()->seekTime+currentPos.begin()->duration)/streamData[tempID].timeScale;
if (conf.getString("mode") == "validate" && (Util::bootSecs()-startTime+5)*1000 < pos) {
Util::wait(pos - (Util::bootSecs()-startTime+5)*1000);
}
currentPos.erase(currentPos.begin());
}
if (conf.getString("mode") == "validate") {
long long int endTime = Util::bootSecs();
std::cout << startTime << ", " << endTime << ", " << (endTime - startTime) << ", " << pos << std::endl;
}
return 0;
}

View file

@ -13,23 +13,37 @@
#include <signal.h>
#include <mist/flv_tag.h> //FLV support
#include <mist/config.h>
#include <mist/timing.h>
#include <sys/sysinfo.h>
///Debugging tool for FLV data.
/// Expects FLV data through stdin, outputs human-readable information to stderr.
int main(int argc, char ** argv){
Util::Config conf = Util::Config(argv[0]);
conf.addOption("filter", JSON::fromString("{\"arg\":\"num\", \"short\":\"f\", \"long\":\"filter\", \"default\":0, \"help\":\"Only print info about this tag type (8 = audio, 9 = video, 0 = all)\"}"));
conf.addOption("mode", JSON::fromString("{\"long\":\"mode\", \"arg\":\"string\", \"short\":\"m\", \"default\":\"analyse\", \"help\":\"What to do with the stream. Valid modes are 'analyse', 'validate', 'output'.\"}"));
conf.parseArgs(argc, argv);
bool analyse = conf.getString("mode") == "analyse";
bool validate = conf.getString("mode") == "validate";
long long filter = conf.getInteger("filter");
FLV::Tag flvData; // Temporary storage for incoming FLV data.
long long int endTime = 0;
long long int upTime = Util::bootSecs();
while ( !feof(stdin)){
if (flvData.FileLoader(stdin)){
if (!filter || filter == flvData.data[0]){
std::cout << "[" << flvData.tagTime() << "+" << flvData.offset() << "] " << flvData.tagType() << std::endl;
if (analyse){
if (!filter || filter == flvData.data[0]){
std::cout << "[" << flvData.tagTime() << "+" << flvData.offset() << "] " << flvData.tagType() << std::endl;
}
}
endTime = flvData.tagTime();
}
}
long long int finTime = Util::bootSecs();
if (validate){
std::cout << upTime << ", " << finTime << ", " << (finTime-upTime) << ", " << endTime << std::endl;
}
return 0;
}

View file

@ -0,0 +1,199 @@
/// \file hls_analyser.cpp
/// Contains the code for the HLS Analysing tool.
#include <mist/config.h>
#include <mist/timing.h>
#include <mist/defines.h>
#include <mist/http_parser.h>
#include <iostream>
class HLSPart {
public:
HLSPart(std::string u, unsigned int s, unsigned int d) {
uri = u;
start = s;
dur = d;
}
std::string uri;
unsigned int start;
unsigned int dur;
};
std::deque<HLSPart> getParts(std::string & body, std::string & uri) {
size_t slashPos = uri.rfind('/');
std::string uri_prefix = uri.substr(0, slashPos + 1);
std::deque<HLSPart> out;
std::stringstream data(body);
std::string line;
unsigned int start = 0;
unsigned int durat = 0;
do {
line = "";
std::getline(data, line);
if (line.size() && *line.rbegin() == '\r'){
line.resize(line.size() - 1);
}
if (line != "") {
if (line[0] != '#') {
out.push_back(HLSPart(uri_prefix + line, start, durat));
start += durat;
} else {
if (line.substr(0, 8) == "#EXTINF:") {
durat = atof(line.substr(8).c_str()) * 1000;
}
}
}
} while (line != "");
return out;
}
int main(int argc, char ** argv) {
Util::Config conf = Util::Config(argv[0]);
conf.addOption("mode", JSON::fromString("{\"long\":\"mode\", \"arg\":\"string\", \"short\":\"m\", \"default\":\"analyse\", \"help\":\"What to do with the stream. Valid modes are 'analyse', 'validate', 'output'.\"}"));
conf.addOption("url", JSON::fromString("{\"arg_num\":1, \"arg\":\"string\", \"help\":\"URL to HLS stream index file to retrieve.\"}"));
conf.addOption("abort", JSON::fromString("{\"long\":\"abort\", \"short\":\"a\", \"arg\":\"integer\", \"default\":-1, \"help\":\"Abort after this many seconds of downloading. Negative values mean unlimited, which is the default.\"}"));
conf.parseArgs(argc, argv);
conf.activate();
unsigned int port = 80;
std::string url = conf.getString("url");
if (url.substr(0, 7) != "http://") {
DEBUG_MSG(DLVL_FAIL, "The URL must start with http://");
return -1;
}
url = url.substr(7);
std::string server = url.substr(0, url.find('/'));
url = url.substr(url.find('/'));
if (server.find(':') != std::string::npos) {
port = atoi(server.substr(server.find(':') + 1).c_str());
server = server.substr(0, server.find(':'));
}
long long int startTime = Util::bootSecs();
long long int abortTime = conf.getInteger("abort");
std::deque<HLSPart> parts;
Socket::Connection conn;
std::string playlist = url;
bool repeat = false;
std::string lastDown = "";
unsigned int pos = 0;
bool output = (conf.getString("mode") == "output");
do {
repeat = false;
while (url.size() > 4 && (url.find(".m3u") != std::string::npos || url.find(".m3u8") != std::string::npos)) {
playlist = url;
DEBUG_MSG(DLVL_DEVEL, "Retrieving playlist: %s", url.c_str());
if (!conn) {
conn = Socket::Connection(server, port, false);
}
HTTP::Parser H;
H.url = url;
H.SetHeader("Host", server + ":" + JSON::Value((long long)port).toString());
H.SendRequest(conn);
H.Clean();
while (conn && (abortTime <= 0 || Util::bootSecs() < startTime + abortTime) && (!conn.spool() || !H.Read(conn))) {}
parts = getParts(H.body, url);
if (!parts.size()) {
DEBUG_MSG(DLVL_FAIL, "Playlist parsing error - cancelling. state: %s/%s body size: %u", conn ? "Conn" : "Disconn", (Util::bootSecs() < (startTime + abortTime))?"NoTimeout":"TimedOut", H.body.size());
if (conf.getString("mode") == "validate") {
long long int endTime = Util::bootSecs();
std::cout << startTime << ", " << endTime << ", " << (endTime - startTime) << ", " << pos << std::endl;
}
return -1;
}
H.Clean();
url = parts.begin()->uri;
}
if (lastDown != "") {
while (parts.size() && parts.begin()->uri != lastDown) {
parts.pop_front();
}
if (parts.size() < 2) {
repeat = true;
Util::sleep(1000);
continue;
}
parts.pop_front();
}
unsigned int lastRepeat = 0;
unsigned int numRepeat = 0;
while (parts.size() > 0 && (abortTime <= 0 || Util::bootSecs() < startTime + abortTime)) {
HLSPart part = *parts.begin();
parts.pop_front();
DEBUG_MSG(DLVL_DEVEL, "Retrieving segment: %s (%u-%u)", part.uri.c_str(), part.start, part.start + part.dur);
if (!conn) {
conn = Socket::Connection(server, port, false);
}
HTTP::Parser H;
H.url = part.uri;
H.SetHeader("Host", server + ":" + JSON::Value((long long)port).toString());
H.SendRequest(conn);
H.Clean();
while (conn && (abortTime <= 0 || Util::bootSecs() < startTime + abortTime) && (!conn.spool() || !H.Read(conn))) {}
if (H.GetHeader("Content-Length") != "") {
if (H.body.size() != atoi(H.GetHeader("Content-Length").c_str())) {
DEBUG_MSG(DLVL_FAIL, "Expected %s bytes of data, but only received %lu.", H.GetHeader("Content-Length").c_str(), H.body.size());
if (lastRepeat != part.start || numRepeat < 500){
DEBUG_MSG(DLVL_FAIL,"Retrying");
if (lastRepeat != part.start){
numRepeat = 0;
lastRepeat = part.start;
}else{
numRepeat ++;
}
parts.push_front(part);
Util::wait(1000);
continue;
}else{
DEBUG_MSG(DLVL_FAIL,"Aborting further downloading");
repeat = false;
break;
}
}
}
if (H.body.size() % 188){
DEBUG_MSG(DLVL_FAIL, "Expected a multiple of 188 bytes, received %d bytes", H.body.size());
if (lastRepeat != part.start || numRepeat < 500){
DEBUG_MSG(DLVL_FAIL,"Retrying");
if (lastRepeat != part.start){
numRepeat = 0;
lastRepeat = part.start;
}else{
numRepeat ++;
}
parts.push_front(part);
Util::wait(1000);
continue;
}else{
DEBUG_MSG(DLVL_FAIL,"Aborting further downloading");
repeat = false;
break;
}
}
pos = part.start + part.dur;
if (conf.getString("mode") == "validate" && (Util::bootSecs()-startTime+5)*1000 < pos) {
Util::wait(pos - (Util::bootSecs()-startTime+5)*1000);
}
lastDown = part.uri;
if (output) {
std::cout << H.body;
}
H.Clean();
}
} while (repeat);
DEBUG_MSG(DLVL_INFO, "mode: %s", conf.getString("mode").c_str());
if (conf.getString("mode") == "validate") {
long long int endTime = Util::bootSecs();
std::cout << startTime << ", " << endTime << ", " << (endTime - startTime) << ", " << pos << std::endl;
}
return 0;
}

View file

@ -8,9 +8,13 @@
#include <mist/ogg.h>
#include <mist/config.h>
#include <mist/theora.h>
#include <mist/defines.h>
#include <sys/sysinfo.h>
#include <cmath>
///\todo rewrite this analyser.
namespace Analysers {
std::string Opus_prettyPacket(const char * part, int len){
namespace Analysers{
std::string Opus_prettyPacket(const char * part,int len){
if (len < 1){
return "Invalid packet (0 byte length)";
}
@ -114,12 +118,8 @@ namespace Analysers {
return r.str();
}
int analyseOGG(int argc, char ** argv){
Util::Config conf = Util::Config(argv[0]);
conf.addOption("pages", JSON::fromString("{\"long\":\"pages\", \"short\":\"p\", \"long_off\":\"nopages\", \"short_off\":\"P\", \"default\":0, \"help\":\"Enable/disable printing of Ogg pages\"}"));
conf.parseArgs(argc, argv);
std::map<int, std::string> sn2Codec;
int analyseOGG(Util::Config & conf){
std::map<int,std::string> sn2Codec;
std::string oggBuffer;
OGG::Page oggPage;
int kfgshift;
@ -223,12 +223,100 @@ namespace Analysers {
}
std::cout << std::endl;
}
return 0;
}
int validateOGG(bool analyse){
std::map<int,std::string> sn2Codec;
std::string oggBuffer;
OGG::Page oggPage;
long long int lastTime =0;
double mspft = 0;
std::map<long long unsigned int,long long int> oggMap;
theora::header * theader = 0;
bool seenIDheader = false;
struct sysinfo sinfo;
sysinfo(&sinfo);
long long int upTime = sinfo.uptime;
while (std::cin.good()){
for (unsigned int i = 0; (i < 1024) && (std::cin.good()); i++){
oggBuffer += std::cin.get();
}
while (oggPage.read(oggBuffer)){//reading ogg to ogg::page
if(oggMap.find(oggPage.getBitstreamSerialNumber()) == oggMap.end()){
//checking header
//check if vorbis or theora
if (memcmp(oggPage.getSegment(0)+1, "vorbis", 6) == 0){
sn2Codec[oggPage.getBitstreamSerialNumber()] = "vorbis";
vorbis::header vheader((char*)oggPage.getSegment(0),oggPage.getSegmentLen(0));
//if (vheader){
oggMap[oggPage.getBitstreamSerialNumber()] = ntohl(vheader.getAudioSampleRate());
//oggPage.setInternalCodec(sn2Codec[oggPage.getBitstreamSerialNumber()]);
//}
}else if(memcmp(oggPage.getSegment(0)+1, "theora", 6) == 0){
sn2Codec[oggPage.getBitstreamSerialNumber()] = "theora";
if(!seenIDheader){
if (theader){delete theader; theader = 0;}
theader = new theora::header((char*)oggPage.getSegment(0),oggPage.getSegmentLen(0));
if(theader->getHeaderType() == 0){
seenIDheader = true;
}
mspft = (double)(theader->getFRD() * 1000) / theader->getFRN();
}
}
}
if (sn2Codec[oggPage.getBitstreamSerialNumber()] == "vorbis"){
// std::cout <<oggPage.toPrettyString() << std::endl<< "--------------------------------" << std::endl;
lastTime = (double)oggPage.getGranulePosition()/(double)oggMap[oggPage.getBitstreamSerialNumber()];
}else if(sn2Codec[oggPage.getBitstreamSerialNumber()] == "theora"){
//theora getKFGShift()
if(oggPage.getGranulePosition() != 0xffffffffffffffff){
lastTime = ((oggPage.getGranulePosition()>>(int)theader->getKFGShift())*mspft);
}
}
if(analyse){
std::cout << oggPage.toPrettyString() << std::endl;
}
}
//while OGG::page check function read
//save last time
sysinfo(&sinfo);
long long int tTime = sinfo.uptime;
if((tTime-upTime) > 5 && (tTime-upTime)>(int)(lastTime) ){
std::cerr << "data received too slowly" << std::endl;
return 42;
}
}
if (theader){delete theader; theader = 0;}
sysinfo(&sinfo);
long long int finTime = sinfo.uptime;
fprintf(stdout,"time since boot,time at completion,real time duration of data receival,video duration\n");
fprintf(stdout, "%lli000,%lli000,%lli000,%lli \n",upTime,finTime,finTime-upTime,lastTime);
//print last time
return 0;
}
}
int main(int argc, char ** argv){
return Analysers::analyseOGG(argc, argv);
Util::Config conf = Util::Config(argv[0]);
conf.addOption("pages", JSON::fromString("{\"long\":\"pages\", \"short\":\"p\", \"long_off\":\"nopages\", \"short_off\":\"P\", \"default\":0, \"help\":\"Enable/disable printing of Ogg pages\"}"));
conf.addOption("analyse", JSON::fromString("{\"long\":\"analyse\", \"short\":\"a\", \"default\":1, \"long_off\":\"notanalyse\", \"short_off\":\"b\", \"help\":\"Analyse a file's contents (-a), or don't (-b) returning false on error. Default is analyse.\"}"));
conf.addOption("validate", JSON::fromString("{\"long\":\"validate\", \"short\":\"V\", \"default\":0, \"long_off\":\"notvalidate\", \"short_off\":\"x\", \"help\":\"Validate (-V) the file contents or don't validate (-X) its integrity, returning false on error. Default is don't validate.\"}"));
conf.parseArgs(argc, argv);
conf.activate();
if (conf.getBool("validate")){
return Analysers::validateOGG(conf.getBool("analyse"));
}else if(conf.getBool("analyse")){
return Analysers::analyseOGG(conf);
}
}