From c0027d8eae7323fd7cc28f4ae7befcb4a32e4470 Mon Sep 17 00:00:00 2001 From: Gjmustc Date: Tue, 19 Nov 2024 16:46:28 +0000 Subject: [PATCH] first version of pipeline, still disable audio-packet divert analysis --- .gitignore | 11 + README.MD | 4 + code/postprocess/calculate_score.py | 0 code/postprocess/compress_logs.py | 48 ++ code/postprocess/draw.py | 253 +++++++++ code/postprocess/metrics/eval.py | 46 ++ code/postprocess/metrics/eval_audio.py | 121 ++++ code/postprocess/metrics/eval_network.py | 70 +++ code/postprocess/metrics/eval_video.py | 154 +++++ .../metrics/utils/audio_eval_method.py | 60 ++ code/postprocess/metrics/utils/audio_info.py | 58 ++ .../metrics/utils/net_eval_method.py | 75 +++ code/postprocess/metrics/utils/net_info.py | 36 ++ .../metrics/utils/ocr_frame_align.sh | 537 ++++++++++++++++++ .../metrics/utils/video_align_method.py | 61 ++ .../metrics/utils/video_eval_method.py | 52 ++ code/postprocess/metrics/utils/video_info.py | 92 +++ code/scripts/pipeline.sh | 34 ++ code/scripts/postprocess.sh | 78 +++ code/scripts/preparedirs.sh | 58 ++ code/scripts/set_network_configs.sh | 69 +++ code/scripts/transmit.sh | 167 ++++++ code/setconfig/set_media_config.py | 203 +++++++ code/setconfig/set_network_trace.py | 61 ++ .../models/1Mbps/BandwidthEstimator.py | 20 + .../pyinfer/cmdinfer/BandwidthEstimator.py | 20 + .../runtime/pyinfer/cmdinfer/cmdinfer.cc | 38 ++ .../runtime/pyinfer/cmdinfer/cmdinfer.h | 21 + .../runtime/pyinfer/cmdinfer/cmdinfer.py | 58 ++ .../cmdinfer/peerconnection_serverless | 44 ++ code/transmit/tc_setup.py | 72 +++ 31 files changed, 2621 insertions(+) create mode 100644 .gitignore create mode 100644 README.MD create mode 100644 code/postprocess/calculate_score.py create mode 100644 code/postprocess/compress_logs.py create mode 100644 code/postprocess/draw.py create mode 100755 code/postprocess/metrics/eval.py create mode 100755 code/postprocess/metrics/eval_audio.py create mode 100755 code/postprocess/metrics/eval_network.py create mode 100755 code/postprocess/metrics/eval_video.py create mode 100644 code/postprocess/metrics/utils/audio_eval_method.py create mode 100644 code/postprocess/metrics/utils/audio_info.py create mode 100644 code/postprocess/metrics/utils/net_eval_method.py create mode 100644 code/postprocess/metrics/utils/net_info.py create mode 100755 code/postprocess/metrics/utils/ocr_frame_align.sh create mode 100644 code/postprocess/metrics/utils/video_align_method.py create mode 100644 code/postprocess/metrics/utils/video_eval_method.py create mode 100644 code/postprocess/metrics/utils/video_info.py create mode 100644 code/scripts/pipeline.sh create mode 100755 code/scripts/postprocess.sh create mode 100755 code/scripts/preparedirs.sh create mode 100755 code/scripts/set_network_configs.sh create mode 100755 code/scripts/transmit.sh create mode 100644 code/setconfig/set_media_config.py create mode 100644 code/setconfig/set_network_trace.py create mode 100644 code/transmit/models/1Mbps/BandwidthEstimator.py create mode 100644 code/transmit/runtime/pyinfer/cmdinfer/BandwidthEstimator.py create mode 100644 code/transmit/runtime/pyinfer/cmdinfer/cmdinfer.cc create mode 100644 code/transmit/runtime/pyinfer/cmdinfer/cmdinfer.h create mode 100755 code/transmit/runtime/pyinfer/cmdinfer/cmdinfer.py create mode 100755 code/transmit/runtime/pyinfer/cmdinfer/peerconnection_serverless create mode 100644 code/transmit/tc_setup.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..89ab417 --- /dev/null +++ b/.gitignore @@ -0,0 +1,11 @@ +**/__pycache__/ +code/transmit/runtime/exe/* +code/transmit/runtime/dll/* +data/input/audios/* +data/input/videos/* +data/output/util_logs/* +data/output/results/figures/* +data/output/results/metrics/* +data/temp/network_traces/* +data/temp/raw_logs/* +data/workdir/* \ No newline at end of file diff --git a/README.MD b/README.MD new file mode 100644 index 0000000..63412e5 --- /dev/null +++ b/README.MD @@ -0,0 +1,4 @@ +## Implementation of the ONL-EMU +before running the pipeline.sh script, you should change the current directory to the onl-emu directory. +And change the subprocess exe path in peerconnection_serverless to absolute path. +Running this script will complete a series of bandwidth estimation performance tests for various network environments, video scenarios, and different models. \ No newline at end of file diff --git a/code/postprocess/calculate_score.py b/code/postprocess/calculate_score.py new file mode 100644 index 0000000..e69de29 diff --git a/code/postprocess/compress_logs.py b/code/postprocess/compress_logs.py new file mode 100644 index 0000000..f2d63bc --- /dev/null +++ b/code/postprocess/compress_logs.py @@ -0,0 +1,48 @@ +import re +import sys +import argparse +import os + +def compress_logs(log_file, patterns, output_path): + patterns = [re.compile(pattern) for pattern in patterns] + if os.path.exists(output_path): + os.remove(output_path) + with open(output_path, 'w') as outfile: + with open(log_file, 'r') as infile: + for line in infile: + for pattern in patterns: + match = pattern.search(line) + if match: + outfile.write(line) + break + +def get_output_path(input_path, base_input_dir, base_output_dir): + relative_path = os.path.relpath(input_path, base_input_dir) + return os.path.join(base_output_dir, relative_path) + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description='Process and compress log files.') + parser.add_argument('--receiver_log', type=str, required=True, help='receiver log file path') + parser.add_argument('--sender_log', type=str, required=True, help='sender log file path') + parser.add_argument('--receiver_patterns', type=lambda s: [p for p in s.split('|') if p], help='pipe-separated list of receiver log regex patterns') + parser.add_argument('--sender_patterns', type=lambda s: [p for p in s.split('|') if p], help='pipe-separated list of sender log regex patterns') + parser.add_argument('--base_input_dir', type=str, required=True, help='Base input directory') + parser.add_argument('--base_output_dir', type=str, required=True, help='Base output directory') + + args = parser.parse_args() + + receiver_log = args.receiver_log + sender_log = args.sender_log + receiver_patterns = args.receiver_patterns + sender_patterns = args.sender_patterns + base_input_dir = args.base_input_dir + base_output_dir = args.base_output_dir + + output_path = get_output_path(receiver_log, base_input_dir, base_output_dir) + os.makedirs(os.path.dirname(output_path), exist_ok=True) + compress_logs(receiver_log, receiver_patterns, output_path) + + output_path = get_output_path(sender_log, base_input_dir, base_output_dir) + os.makedirs(os.path.dirname(output_path), exist_ok=True) + compress_logs(sender_log, sender_patterns, output_path) + diff --git a/code/postprocess/draw.py b/code/postprocess/draw.py new file mode 100644 index 0000000..50a4726 --- /dev/null +++ b/code/postprocess/draw.py @@ -0,0 +1,253 @@ +import argparse +import re +import matplotlib.pyplot as plt +import os +import json +from collections import defaultdict +import itertools + +def process_receiver_log(log_file, patterns, bitrate_interval=1000, loss_interval=1, delay_interval=20, first_delay_=200): + + bitrate = defaultdict(float) + lossrate = defaultdict(float) + delay = defaultdict(float) + time_delta = defaultdict(float) + payload_size_dict = defaultdict(int) + + first_packet_ = True + first_packet_timestamp = 0 + loss_util_count = loss_interval + ed_packet_arrivaltime = 0 + st_packet_sequencenum = 0 + ed_packet_sequencenum = 0 + first_packet_timediff = 0 + delay_util_count = delay_interval + delay_cumulative = 0 + last_time = 0 + with open(log_file, 'r') as file: + for line in file: + for pattern in patterns: + match = pattern.search(line) + if match: + if pattern == patterns[0]: + # print("Matched receiver pattern 1") + log_dict = json.loads(match.group(1)) + packet_info = log_dict.get('packetInfo',{}) + arrival_time = packet_info.get('arrivalTimeMs',0) + payload_size = packet_info.get('payloadSize',0) + seqnum = packet_info.get('header').get('sequenceNumber', 0) + sendtime_stamp = packet_info.get('header').get('sendTimestamp', 0) + + loss_util_count -= 1 + delay_util_count -= 1 + if first_packet_: + st_packet_sequencenum = seqnum + first_packet_timediff = arrival_time - sendtime_stamp + first_packet_timestamp = arrival_time + first_packet_ = False + if loss_util_count == loss_interval -1: + st_packet_sequencenum = seqnum + if loss_util_count == 0: + ed_packet_arrivaltime = arrival_time + ed_packet_sequencenum = seqnum + lossrate[ed_packet_arrivaltime - first_packet_timestamp] = (ed_packet_sequencenum - st_packet_sequencenum +1 - loss_interval)/(ed_packet_sequencenum - st_packet_sequencenum+1)*100 + loss_util_count = loss_interval + delay_cumulative += (arrival_time - sendtime_stamp - first_packet_timediff + first_delay_) + if delay_util_count == 0: + delay[arrival_time - first_packet_timestamp] = delay_cumulative / delay_interval + delay_util_count = delay_interval + delay_cumulative = 0 + if last_time != 0: + time_delta[(arrival_time - first_packet_timestamp)/1000] = arrival_time - last_time + last_time = arrival_time + payload_size_dict[(arrival_time - first_packet_timestamp)/bitrate_interval] = payload_size + bitrate[(arrival_time - first_packet_timestamp)//bitrate_interval] += payload_size * 8 + + else: + print("Invalid pattern") + pass + for key in bitrate: + bitrate[key] /= 1e6 + return bitrate, lossrate, delay, time_delta, payload_size_dict + +def process_sender_log(log_file, patterns, bwe_interval=1000, bitrate_interval=1000): + bandwidth_estimate = defaultdict(float) + bitrate = defaultdict(float) + time_delta = defaultdict(float) + resolution_vary = {} + first_packet_pattern1 = True + first_packet_pattern2 = True + first_packet_timestamp_pattern1 = 0 + first_packet_timestamp_pattern2 = 0 + last_time = 0 + with open(log_file, 'r') as file: + for line in file: + for pattern in patterns: + match = pattern.search(line) + if match: + if pattern == patterns[0]: + # print("Matched sender pattern 1") + sendtime = float(match.group(2)) + bandwidth = float(match.group(1)) + if first_packet_pattern1: + first_packet_timestamp_pattern1 = sendtime + first_packet_pattern1 = False + bandwidth_estimate[(sendtime - first_packet_timestamp_pattern1)/bwe_interval] = bandwidth/1e6 # convert to Mbps and seconds + elif pattern == patterns[1]: + # print("Matched sender pattern 2") + seqnum = int(match.group(1)) + sendtime = float(match.group(2)) + send_payload_size = float(match.group(3)) + if first_packet_pattern2: + first_packet_timestamp_pattern2 = sendtime + first_packet_pattern2 = False + bitrate[(sendtime - first_packet_timestamp_pattern2)//bitrate_interval] += send_payload_size*8 + if last_time != 0: + time_delta[(sendtime - first_packet_timestamp_pattern2)/1000] = sendtime - last_time + last_time = sendtime + elif pattern == patterns[2]: + # print("Matched sender pattern 3") + resolution_vary[(int(match.group(3))-first_packet_timestamp_pattern2)/1000] = (int(match.group(1)), int(match.group(2))) + else: + print("Invalid pattern") + pass + for key in bitrate: + bitrate[key] /= 1e6 + return bandwidth_estimate, bitrate, time_delta, resolution_vary + + +def draw( receive_bitrate, + receive_lossrate, + receive_delay, + receive_time_delta, + receive_payload_size, + send_bandwidth_estimate, + send_bitrate, + send_time_delta, + send_resolution_vary, + log_path, + output_path, + receive_bitrate_interval=1000, + send_bitrate_interval=1000, + duration=30): + modelname = log_path.split('/')[-2] + network = log_path.split('/')[-3] + videoname = log_path.split('/')[-4] + + receive_bitrate = dict(sorted(receive_bitrate.items())) + receive_lossrate = dict(sorted(receive_lossrate.items())) + receive_delay = dict(sorted(receive_delay.items())) + receive_time_delta = dict(sorted(receive_time_delta.items())) + receive_payload_size = dict(sorted(receive_payload_size.items())) + send_bandwidth_estimate = dict(sorted(send_bandwidth_estimate.items())) + send_bitrate = dict(sorted(send_bitrate.items())) + send_time_delta = dict(sorted(send_time_delta.items())) + send_resolution_vary = dict(sorted(send_resolution_vary.items())) + + print(f"Drawing plot for {log_path}") + fig, axs = plt.subplots(4, 2, figsize=(25, 20)) + fig.suptitle('Video Streaming Analysis', fontsize=20) + axs[0, 0].scatter(send_bandwidth_estimate.keys(), send_bandwidth_estimate.values(), marker='o', label=modelname) + axs[0, 0].set_title('Bandwidth Estimate of '+ network + ' on '+ videoname) + axs[0, 0].set_xlabel(f'Time / s') + axs[0, 0].set_ylabel(f'Bandwidth Estimate / Mbps') + axs[0, 1].plot(send_bitrate.keys(), send_bitrate.values(), marker='o', label=modelname+'-Send') + axs[0, 1].plot(receive_bitrate.keys(), receive_bitrate.values(),marker='o', label=modelname+'-Receive') + axs[0, 1].set_title('Bitrate of '+ network + ' on '+ videoname) + axs[0, 1].set_xlabel(f'Time / {receive_bitrate_interval} ms') + axs[0, 1].set_ylabel(f'Bitrate / Mbps') + axs[1, 0].scatter(receive_lossrate.keys(), receive_lossrate.values(), marker='o', label=modelname) + axs[1, 0].set_title('Receiving Loss Rate of '+ network + ' on '+ videoname) + axs[1, 0].set_xlabel(f'Time / ms') + axs[1, 0].set_ylabel(f'Receiving Loss Ratio / %') + axs[1, 1].scatter(receive_delay.keys(), receive_delay.values(), marker='o', label=modelname) + axs[1, 1].set_title('Receiving Delay of '+ network + ' on '+ videoname) + axs[1, 1].set_xlabel(f'Time / ms') + axs[1, 1].set_ylabel(f'Receiving Delay / ms') + axs[2, 0].scatter(receive_time_delta.keys(), receive_time_delta.values(), marker='o', label=modelname) + axs[2, 0].set_title('Receiving Time Delta of '+ network + ' on '+ videoname) + axs[2, 0].set_xlabel(f'Time / s') + axs[2, 0].set_ylabel(f'Packet receiving Time Delta / ms') + axs[2, 1].scatter(send_time_delta.keys(), send_time_delta.values(), marker='o', label=modelname) + axs[2, 1].set_title('Sending Time Delta of '+ network + ' on '+ videoname) + axs[2, 1].set_xlabel(f'Time / s') + axs[2, 1].set_ylabel(f'Packet sending Time Delta / ms') + axs[3, 0].scatter(receive_payload_size.keys(), receive_payload_size.values(), marker='o', label=modelname) + axs[3, 0].set_title('Receiving Payload Size of '+ network + ' on '+ videoname) + axs[3, 0].set_xlabel(f'Time / s') + axs[3, 0].set_ylabel(f'Receiving Payload Size / Bytes') + send_resolution_vary_y = {time: resolution[1] for time, resolution in send_resolution_vary.items()} + axs[3, 1].scatter(send_resolution_vary.keys(), send_resolution_vary_y.values(), marker='o', label=modelname) + for time, resolution in send_resolution_vary.items(): + axs[3, 1].annotate(f'({resolution[0]},{resolution[1]})', (time, resolution[1]), textcoords="offset points", xytext=(0,10), ha='center') + axs[3, 1].set_title('Sending Resolution Vary of '+ network + ' on '+ videoname) + axs[3, 1].set_xlabel(f'Time / s') + axs[3, 1].set_xlim(0, duration) + + axs[3, 1].yaxis.set_ticks(list(send_resolution_vary_y.values())) + axs[3, 1].set_ylabel(f'Sending Resolution / px') + + for ax in axs.flat: + ax.legend() + ax.grid(True) + plt.tight_layout() + print(f"Saving plot to {output_path}") + plt.savefig(output_path) + +def get_output_path(input_path, base_input_dir, base_output_dir): + relative_path = os.path.relpath(input_path, base_input_dir) + return os.path.join(base_output_dir, relative_path) + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description='Process and draw figures for log files.') + parser.add_argument('--receiver_log', type=str, required=True, help='receiver log file path') + parser.add_argument('--sender_log', type=str, required=True, help='sender log file path') + parser.add_argument('--receiver_patterns', type=lambda s: [p for p in s.split('|') if p], help='pipe-separated list of receiver log regex patterns') + parser.add_argument('--sender_patterns', type=lambda s: [p for p in s.split('|') if p], help='pipe-separated list of sender log regex patterns') + parser.add_argument('--base_input_dir', type=str, required=True, help='Base input directory') + parser.add_argument('--base_output_dir', type=str, required=True, help='Base output directory') + parser.add_argument('--duration', type=int, default=30, help='Duration of the video') + parser.add_argument('--receive_bitrate_interval', type=int, default=1000, help='Interval of receive bitrate') + parser.add_argument('--send_bitrate_interval', type=int, default=1000, help='Interval of send bitrate') + parser.add_argument('--first_delay', type=int, default=200, help='First delay of the video') + parser.add_argument('--loss_interval', type=int, default=1, help='Interval of loss rate') + parser.add_argument('--delay_interval', type=int, default=20, help='Interval of delay') + parser.add_argument('--verbose', action='store_true', help='Print verbose output') + + args = parser.parse_args() + if args.verbose: + print(f"processing logs") + + receiver_log = args.receiver_log + sender_log = args.sender_log + receiver_patterns = args.receiver_patterns + sender_patterns = args.sender_patterns + base_input_dir = args.base_input_dir + base_output_dir = args.base_output_dir + duration = args.duration + receive_bitrate_interval = args.receive_bitrate_interval + send_bitrate_interval = args.send_bitrate_interval + first_delay = args.first_delay + loss_interval = args.loss_interval + delay_interval = args.delay_interval + + receiver_patterns = [re.compile(pattern) for pattern in receiver_patterns] + sender_patterns = [re.compile(pattern) for pattern in sender_patterns] + + if receiver_log and sender_log: + if args.verbose: + print(f"Processing receiver log: {receiver_log} and sender log: {sender_log}") + + receive_bitrate, receive_lossrate, receive_delay, receive_time_delta, receive_payload_size = process_receiver_log( + receiver_log, receiver_patterns, bitrate_interval=receive_bitrate_interval, loss_interval=loss_interval, delay_interval=delay_interval, first_delay_=first_delay) + + send_bandwidth_estimate, send_bitrate, send_time_delta, send_resolution_vary = process_sender_log( + sender_log, sender_patterns, bwe_interval=send_bitrate_interval, bitrate_interval=send_bitrate_interval) + + output_path = get_output_path(receiver_log, base_input_dir, base_output_dir).replace('.log', '.png') + draw(receive_bitrate, receive_lossrate, receive_delay, receive_time_delta, receive_payload_size, + send_bandwidth_estimate, send_bitrate, send_time_delta, send_resolution_vary, + receiver_log, output_path, receive_bitrate_interval, send_bitrate_interval, duration) + else: + if args.verbose: + print(f"Skipping unmatched logs: receiver_log={receiver_log}, sender_log={sender_log}") \ No newline at end of file diff --git a/code/postprocess/metrics/eval.py b/code/postprocess/metrics/eval.py new file mode 100755 index 0000000..e3db8be --- /dev/null +++ b/code/postprocess/metrics/eval.py @@ -0,0 +1,46 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import argparse, json +from eval_video import VideoEvaluation, init_video_argparse, get_video_score +from eval_audio import AudioEvaluation, init_audio_argparse, get_audio_score, get_remote_ground +from eval_network import NetworkEvaluation, init_network_argparse, get_network_score + + +description = \ +''' +This script provide multi methods to evaluate quality of video, audio and network. +''' + +def init_argparse(): + video_parser = init_video_argparse() + audio_parser = init_audio_argparse() + network_parser = init_network_argparse() + parser = argparse.ArgumentParser(description=description, parents=[video_parser, audio_parser, network_parser], conflict_handler='resolve') + + args = parser.parse_args() + + return args + + +if __name__ == "__main__": + + args = init_argparse() + if args.scenario: + args = get_remote_ground(args) + + out_dict = {} + + out_dict["video"] = get_video_score(args) + out_dict["network"] = get_network_score(args) + # We don't consider audio now. Give full score for the audio directly. + out_dict["audio"] = 100.0 + # final_score = 0.2 * video + 0.1 * audio + (0.2 * delay_score + 0.2 * recv_rate_score + 0.3 * loss_score) + # We don't consider audio now. Give full score for the audio directly. + out_dict["final_score"] = 0.2 * out_dict["video"] + out_dict["network"] + 10 + if args.output: + with open(args.output, 'w') as f: + f.write(json.dumps(out_dict)) + else: + print(json.dumps(out_dict)) + diff --git a/code/postprocess/metrics/eval_audio.py b/code/postprocess/metrics/eval_audio.py new file mode 100755 index 0000000..8fa2e89 --- /dev/null +++ b/code/postprocess/metrics/eval_audio.py @@ -0,0 +1,121 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import subprocess, json, argparse, requests, os +from tempfile import NamedTemporaryFile +from utils.audio_info import AudioInfo +from utils.audio_eval_method import AudioEvalMethod, AudioEvalMethodDNSMOS + + +description = \ +''' +This script provide multi methods to evaluate audio quality. +For example, the method of DNSMOS https://github.com/microsoft/DNS-Challenge. +''' + + +class AudioEvaluation(): + def __init__(self, eval_method : AudioEvalMethod, args): + self.eval_method = eval_method + self.args = args + + def change_audio_config(self, audio_info : AudioInfo): + if audio_info.sample_rate in self.eval_method.required_sample_rate and audio_info.channel in self.eval_method.required_channel: + return None + output = NamedTemporaryFile('w+t', suffix=".%s" % (audio_info.format_name)) + cmd = ["ffmpeg", "-i", audio_info.audio_path, "-ar", self.eval_method.required_sample_rate[0], \ + "-ac", self.eval_method.required_channel[0], "-vn", "-y", output.name] + subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding="utf8") + + return output + + def eval(self, dst_audio_path): + dst_audio_info = AudioInfo(dst_audio_path) + + # check audio type + fo_new_video = self.change_audio_config(dst_audio_info) + dst_audio_info = AudioInfo(fo_new_video.name) if fo_new_video else dst_audio_info + + audio_score = self.eval_method.eval(dst_audio_info) + # Use ground truth + if self.args.ground_audio < 0: + return audio_score + + return 100.0 if audio_score > self.args.ground_audio * self.args.binarize_bound else .0 + + +def init_audio_argparse(): + parser = argparse.ArgumentParser(description=description) + parser.add_argument("--output", type=str, default=None, help="the path of output file") + parser.add_argument("--scenario", type=str, default=None, help="the name of scenario") + parser.add_argument("--ground_service", type=str, default=None, help="the url where you want to get the score of ground truth") + # for audio evaluation + parser.add_argument("--audio_eval_method", type=str, default="dnsmos", choices=["dnsmos"], help="the method to evaluate audio, like DNSMOS") + parser.add_argument("--dst_audio", type=str, default=None, required=True, help="the path of destination audio") + parser.add_argument("--audio_sample_rate", type=str, default='16000', help="the sample rate of audio") + parser.add_argument("--audio_channel", type=str, default='1', help="the numbers of audio channels") + parser.add_argument("--ground_audio", type=float, default=-1, help="the audio score of a special scenario ground truth. -1 means not use ground") + parser.add_argument("--binarize_bound", type=float, default=0.6, help="the bound to binarize audio score") + # for DNSMOS + parser.add_argument("--dnsmos_uri", type=str, default=None, help="the uri to evaluate audio provided by DNSMOS") + parser.add_argument("--dnsmos_key", type=str, default=None, help="the key to evaluate audio provided by DNSMOS") + + return parser + + +def get_audio_score(args): + eval_method = None + + if args.audio_eval_method == "dnsmos": + eval_method = AudioEvalMethodDNSMOS(args.dnsmos_uri, args.dnsmos_key) + else: + raise ValueError("Not supoort such method to evaluate audio") + + audio_eval_tool = AudioEvaluation(eval_method, args) + audio_out = audio_eval_tool.eval(args.dst_audio) + + return audio_out + + +def get_remote_ground(args): + if args.ground_service[-4:] == "json": + with NamedTemporaryFile('w+t') as f: + cmd = ["wget", args.ground_service, "-O", f.name] + subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + resp = f.read() + if len(resp) == 0: + raise ValueError("Error request to ground service %s" % (args.ground_service)) + resp = json.loads(resp) + if args.scenario not in resp: + raise ValueError("Not find scenario of %s" % (args.scenario)) + scenario_score = resp[args.scenario] + else: + resp = requests.get("http://%s/get_ground_truth/%s" % (args.ground_service, args.scenario)).text + if len(resp) == 0: + raise ValueError("Error request to ground service %s" % (args.ground_service)) + scenario_score = json.loads(resp) + + if "ground_video" in args: + args.ground_video = scenario_score["video"] + if "ground_audio" in args: + args.ground_audio = scenario_score["audio"] + if "ground_recv_rate" in args: + args.ground_recv_rate = scenario_score["recv_rate"] + + return args + + +if __name__ == "__main__": + parser = init_audio_argparse() + args = parser.parse_args() + if args.scenario and args.ground_service: + args = get_remote_ground(args) + + out_dict = {} + out_dict["audio"] = get_audio_score(args) + + if args.output: + with open(args.output, 'w') as f: + f.write(json.dumps(out_dict)) + else: + print(json.dumps(out_dict)) \ No newline at end of file diff --git a/code/postprocess/metrics/eval_network.py b/code/postprocess/metrics/eval_network.py new file mode 100755 index 0000000..5dcb8f3 --- /dev/null +++ b/code/postprocess/metrics/eval_network.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import argparse, json +import numpy as np +from utils.net_info import NetInfo +from utils.net_eval_method import NetEvalMethod, NetEvalMethodNormal +from eval_audio import get_remote_ground + + +description = \ +''' +This script provide multi methods to evaluate network quality. +''' + + +class NetworkEvaluation(): + def __init__(self, eval_method : NetEvalMethod, args): + self.eval_method = eval_method + self.args = args + + def eval(self, dst_network_path): + dst_network_info = NetInfo(dst_network_path) + ret = self.eval_method.eval(dst_network_info) + + return ret + + +def get_network_score(args): + eval_method = None + + if args.network_eval_method == "normal": + eval_method = NetEvalMethodNormal(args.max_delay, args.ground_recv_rate) + else: + raise ValueError("Not supoort such method to evaluate network") + + network_eval_tool = NetworkEvaluation(eval_method, args) + network_out = network_eval_tool.eval(args.dst_network_log) + + return network_out + + +def init_network_argparse(): + parser = argparse.ArgumentParser(description=description) + parser.add_argument("--output", type=str, default=None, help="the path of output file. It will print the result in terminal if you don't specify its value.") + parser.add_argument("--scenario", type=str, default=None, help="the name of scenario") + parser.add_argument("--ground_service", type=str, default=None, help="the url where you want to get the score of ground truth") + # for network evaluation + parser.add_argument("--network_eval_method", type=str, default="normal", choices=["normal"], help="the method to evaluate network.") + parser.add_argument("--dst_network_log", type=str, required=True, default=None, help="the path of network log.") + parser.add_argument("--max_delay", type=float, default=400, help="the max packet delay.") + parser.add_argument("--ground_recv_rate", type=float, default=500, help="the receive rate of a special scenario ground truth.") + + return parser + + +if __name__ == "__main__": + parser = init_network_argparse() + args = parser.parse_args() + if args.scenario: + args = get_remote_ground(args) + + out_dict = {} + out_dict["network"] = get_network_score(args) + + if args.output: + with open(args.output, 'w') as f: + f.write(json.dumps(out_dict)) + else: + print(json.dumps(out_dict)) diff --git a/code/postprocess/metrics/eval_video.py b/code/postprocess/metrics/eval_video.py new file mode 100755 index 0000000..32fb512 --- /dev/null +++ b/code/postprocess/metrics/eval_video.py @@ -0,0 +1,154 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import argparse, json, subprocess +from tempfile import NamedTemporaryFile +from utils.video_info import VideoInfo +from utils.video_eval_method import VideoEvalMethodVmaf, VideoEvalMethod +from utils.video_align_method import VideoAlignMethodFfmpeg, VideoAlignMethod, VideoAlignMethodOcr +from eval_audio import get_remote_ground + + +description = \ +''' +This script provide multi methods to evaluate network quality. +For example, the method of Vmaf https://github.com/Netflix/vmaf. +''' + + +class VideoEvaluation(object): + def __init__(self, eval_method : VideoEvalMethod, align_method : VideoAlignMethod, args): + self.eval_method = eval_method + self.align_method = align_method + self.args = args + + ### ffmpeg orders below ### + + def change_video_type(self, video_info : VideoInfo, new_type): + output = NamedTemporaryFile('w+t', suffix=".%s" % (new_type)) + cmd = ["ffmpeg"] + if video_info.format_name == "rawvideo": + cmd.extend(["-video_size", video_info.video_size]) + cmd.extend(["-i", video_info.video_path, "-y"]) + if new_type == "yuv": + cmd.extend(["-video_size", video_info.video_size]) + cmd.append(output.name) + subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding="utf8") + + return output + + def eval(self, src_video_path, dst_video_path): + # get the correspond VideoInfo according to path + src_video_info = VideoInfo(src_video_path, self.args.video_size) + dst_video_info = VideoInfo(dst_video_path, self.args.video_size) + + # init video size + if not src_video_info.video_size and not dst_video_info.video_size: + raise ValueError("Please specify the arguments --video_size") + + if not dst_video_info.video_size: + dst_video_info.update_video_size(src_video_info.video_size) + elif not src_video_info.video_size: + src_video_info.update_video_size(dst_video_info.video_size) + + # update video info + src_video_info.parse_video_by_ffprobe(video_size=src_video_info.video_size) + dst_video_info.parse_video_by_ffprobe(video_size=dst_video_info.video_size) + video_size = src_video_info.video_size + + # check video type + if src_video_info.format_name not in self.eval_method.support_type: + fo_new_src_video = self.change_video_type(src_video_info, self.eval_method.support_type_abbreviation[0]) + src_video_info = VideoInfo(fo_new_src_video.name) + if dst_video_info.format_name not in self.eval_method.support_type: + fo_new_dst_video = self.change_video_type(dst_video_info, self.eval_method.support_type_abbreviation[0]) + dst_video_info = VideoInfo(fo_new_dst_video.name) + + # keep same video type + if src_video_info.format_abbreviation != "y4m": + fo_new_src_video = self.change_video_type(src_video_info, "y4m") + src_video_info = VideoInfo(fo_new_src_video.name, video_size=video_size) + if dst_video_info.format_abbreviation != "y4m": + fo_new_dst_video = self.change_video_type(dst_video_info, "y4m") + dst_video_info = VideoInfo(fo_new_dst_video.name, video_size=video_size) + + if self.args.frame_align_method != "None": + if not src_video_info.fps and not dst_video_info.fps: + raise ValueError("Can't get fps from video") + # get align video from src video + tmp_fo = self.align_method.frame_align(src_video_info, dst_video_info) + # update video if need to do align + if tmp_fo: + fo_new_src_video = tmp_fo + src_video_info = VideoInfo(fo_new_src_video.name) + + tmp_fo = self.align_method.frame_align(dst_video_info, src_video_info) + if tmp_fo: + fo_new_dst_video = tmp_fo + dst_video_info = VideoInfo(fo_new_dst_video.name) + + # Calculate video quality + ret = self.eval_method.eval(src_video_info, dst_video_info) + # Use ground truth + if self.args.ground_video <= 0: + return ret + + return min(100.0, 100 * ret / self.args.ground_video) + + +def get_video_score(args): + eval_method = None + align_method = None + + if args.video_eval_method == "vmaf": + eval_method = VideoEvalMethodVmaf(args.model_path) + else: + raise ValueError("Not supoort such method to evaluate video") + + if args.frame_align_method == "ffmpeg": + align_method = VideoAlignMethodFfmpeg() + elif args.frame_align_method == "ocr": + align_method = VideoAlignMethodOcr() + elif args.frame_align_method != "None": + raise ValueError("Not supoort such method to align video") + + video_eval_tool = VideoEvaluation(eval_method, align_method, args) + video_out = video_eval_tool.eval(args.src_video, args.dst_video) + + return video_out + + +def init_video_argparse(): + parser = argparse.ArgumentParser(description=description) + parser.add_argument("--output", type=str, default=None, help="the path of output file") + parser.add_argument("--scenario", type=str, default=None, help="the name of scenario") + parser.add_argument("--ground_service", type=str, default=None, help="the url where you want to get the score of ground truth") + parser.add_argument("--ground_video", type=float, default=-1, help="the video score of a special scenario ground truth. -1 means not use ground") + # for video evaluation + parser.add_argument("--video_eval_method", type=str, default="vmaf", choices=["vmaf"], help="the method to evaluate video, like vmaf") + parser.add_argument("--src_video", type=str, required=True, default=None, help="the path of source video") + parser.add_argument("--dst_video", type=str, required=True, default=None, help="the path of destination video") + parser.add_argument("--frame_align_method", type=str, default="ffmpeg", choices=["None", "ffmpeg", "ocr"], help="how to do frame alignment. None means not to do frame align") + parser.add_argument("--model_path", type=str, default=None, help="the path of vmaf model") + # required by the video format of yuv raw video + parser.add_argument("--video_size", type=str, default=None, help="the size of video, like 1920x1080. Required by the video format of yuv") + parser.add_argument("--pixel_format", type=str, default=None, choices=["420", "422", "444"], help="pixel format (420/422/444)") + parser.add_argument("--bitdepth", type=str, default=None, choices=["8", "10", "12"], help="bitdepth (8/10/12)") + + return parser + + +if __name__ == "__main__": + parser = init_video_argparse() + args = parser.parse_args() + if args.scenario: + args = get_remote_ground(args) + + out_dict = {} + out_dict["video"] = get_video_score(args) + + if args.output: + with open(args.output, 'w') as f: + f.write(json.dumps(out_dict)) + else: + print(json.dumps(out_dict)) diff --git a/code/postprocess/metrics/utils/audio_eval_method.py b/code/postprocess/metrics/utils/audio_eval_method.py new file mode 100644 index 0000000..3419a48 --- /dev/null +++ b/code/postprocess/metrics/utils/audio_eval_method.py @@ -0,0 +1,60 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import requests, json +import soundfile as sf +from utils.audio_info import AudioInfo +from urllib.parse import urlparse, urljoin +from abc import ABC, abstractmethod + + +class AudioEvalMethod(ABC): + @abstractmethod + def __init__(self): + self.eval_name = "base" + self.required_sample_rate = [] + self.required_channel = [] + + @abstractmethod + def eval(self, dst_audio_info : AudioInfo): + pass + + +class AudioEvalMethodDNSMOS(AudioEvalMethod): + def __init__(self, dnsmos_uri, dnsmos_key): + super(AudioEvalMethodDNSMOS, self).__init__() + if not dnsmos_uri or not dnsmos_key: + raise ValueError("Please specify the arguments dnsmos_uri and dnsmos_key.") + + self.eval_name = "dnsmos" + self.dnsmos_uri = dnsmos_uri + self.dnsmos_key = dnsmos_key + self.required_sample_rate = ["16000"] + self.required_channel = ["1"] + + def eval(self, dst_audio_info : AudioInfo): + # Set the content type + headers = {'Content-Type': 'application/json'} + # If authentication is enabled, set the authorization header + headers['Authorization'] = f'Basic {self.dnsmos_key}' + + audio, fs = sf.read(dst_audio_info.audio_path) + if fs != 16000: + print('Only sampling rate of 16000 is supported as of now') + data = {"data": audio.tolist()} + input_data = json.dumps(data) + """ + Dec 6, 2022 Updates: + We submit request to DNSMOS API here and obtain the audio raw scores + However, the API is down now, we return 100 points directly. + + # Make the request and display the response + u = urlparse(self.dnsmos_uri) + resp = requests.post(urljoin("https://" + u.netloc, 'score'), data=input_data, headers=headers) + score_dict = resp.json() + # scale [1, 5] -> [0, 100] + audio_score = (score_dict["mos"] - 1) / 4 * 100 + """ + audio_score = 100.0 + + return audio_score diff --git a/code/postprocess/metrics/utils/audio_info.py b/code/postprocess/metrics/utils/audio_info.py new file mode 100644 index 0000000..e2cb0bd --- /dev/null +++ b/code/postprocess/metrics/utils/audio_info.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import subprocess, re, os + + +class AudioInfo(object): + def __init__(self, audio_path): + self.audio_path = audio_path + self.sample_rate = None + self.channel = None + self.duration_sec = None + self.format_name = None + self.size = None + self.bit_rate = None + + self.parse_audio_by_ffprobe() + + def parse_audio_by_ffprobe(self): + if not self.audio_path or not os.path.exists(self.audio_path): + raise ValueError("Error audio path") + cmd = ["ffprobe", "-show_format", self.audio_path] + + cmd_result = subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding="utf8") + + sample_rate = re.search(r'Stream.*?([\d]+) Hz,', cmd_result.stdout) + if sample_rate: + self.sample_rate = sample_rate.group(1) + + channel = re.search(r'Stream.*?([\d]+) channels,', cmd_result.stdout) + if channel: + self.channel = channel.group(1) + + duration_sec = re.search(r'duration=([\d\.]+)', cmd_result.stdout) + if duration_sec: + self.duration_sec = duration_sec.group(1) + + format_name = re.search(r'format_name=([\w]+)', cmd_result.stdout) + if format_name: + self.format_name = format_name.group(1) + + size = re.search(r'size=([\d\.]+)', cmd_result.stdout) + if size: + self.size = size.group(1) + + bit_rate = re.search(r'bit_rate=([\d\.]+)', cmd_result.stdout) + if bit_rate: + self.bit_rate = bit_rate.group(1) + + def check_all_info(self): + assert self.audio_path + assert self.sample_rate + assert self.channel + assert self.duration_sec + assert self.format_name + assert self.size + assert self.bit_rate + print(self.audio_path, self.sample_rate, self.channel, self.duration_sec, self.format_name, self.size, self.bit_rate) diff --git a/code/postprocess/metrics/utils/net_eval_method.py b/code/postprocess/metrics/utils/net_eval_method.py new file mode 100644 index 0000000..ebb6a96 --- /dev/null +++ b/code/postprocess/metrics/utils/net_eval_method.py @@ -0,0 +1,75 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +from utils.net_info import NetInfo +import numpy as np +from abc import ABC, abstractmethod + + +class NetEvalMethod(ABC): + @abstractmethod + def __init__(self): + self.eval_name = "base" + + @abstractmethod + def eval(self, dst_audio_info : NetInfo): + pass + + +class NetEvalMethodNormal(NetEvalMethod): + def __init__(self, max_delay=400, ground_recv_rate=500): + super(NetEvalMethodNormal, self).__init__() + self.eval_name = "normal" + self.max_delay = max_delay + self.ground_recv_rate = ground_recv_rate + + def eval(self, dst_audio_info : NetInfo): + net_data = dst_audio_info.net_data + ssrc_info = {} + + delay_list = [] + loss_count = 0 + self.last_seqNo = {} + for item in net_data: + ssrc = item["packetInfo"]["header"]["ssrc"] + sequence_number = item["packetInfo"]["header"]["sequenceNumber"] + tmp_delay = item["packetInfo"]["arrivalTimeMs"] - item["packetInfo"]["header"]["sendTimestamp"] + if (ssrc not in ssrc_info): + ssrc_info[ssrc] = { + "time_delta" : -tmp_delay, + "delay_list" : [], + "received_nbytes" : 0, + "start_recv_time" : item["packetInfo"]["arrivalTimeMs"], + "avg_recv_rate" : 0 + } + if ssrc in self.last_seqNo: + loss_count += max(0, sequence_number - self.last_seqNo[ssrc] - 1) + self.last_seqNo[ssrc] = sequence_number + + ssrc_info[ssrc]["delay_list"].append(ssrc_info[ssrc]["time_delta"] + tmp_delay) + ssrc_info[ssrc]["received_nbytes"] += item["packetInfo"]["payloadSize"] + if item["packetInfo"]["arrivalTimeMs"] != ssrc_info[ssrc]["start_recv_time"]: + ssrc_info[ssrc]["avg_recv_rate"] = ssrc_info[ssrc]["received_nbytes"] / (item["packetInfo"]["arrivalTimeMs"] - ssrc_info[ssrc]["start_recv_time"]) + + # scale delay list + for ssrc in ssrc_info: + min_delay = min(ssrc_info[ssrc]["delay_list"]) + ssrc_info[ssrc]["scale_delay_list"] = [min(self.max_delay, delay) for delay in ssrc_info[ssrc]["delay_list"]] + delay_pencentile_95 = np.percentile(ssrc_info[ssrc]["scale_delay_list"], 95) + ssrc_info[ssrc]["delay_score"] = (self.max_delay - delay_pencentile_95) / (self.max_delay - min_delay) + # delay score + avg_delay_score = np.mean([np.mean(ssrc_info[ssrc]["delay_score"]) for ssrc in ssrc_info]) + + # receive rate score + recv_rate_list = [ssrc_info[ssrc]["avg_recv_rate"] for ssrc in ssrc_info if ssrc_info[ssrc]["avg_recv_rate"] > 0] + avg_recv_rate_score = min(1, np.mean(recv_rate_list) / self.ground_recv_rate) + + # higher loss rate, lower score + avg_loss_rate = loss_count / (loss_count + len(net_data)) + + # calculate result score + network_score = 100 * 0.2 * avg_delay_score + \ + 100 * 0.2 * avg_recv_rate_score + \ + 100 * 0.3 * (1 - avg_loss_rate) + + return network_score \ No newline at end of file diff --git a/code/postprocess/metrics/utils/net_info.py b/code/postprocess/metrics/utils/net_info.py new file mode 100644 index 0000000..bc7dd51 --- /dev/null +++ b/code/postprocess/metrics/utils/net_info.py @@ -0,0 +1,36 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import os, json + + +class NetInfo(object): + def __init__(self, net_path): + self.net_path = net_path + self.net_data = None + + self.parse_net_log() + + def parse_net_log(self): + if not self.net_path or not os.path.exists(self.net_path): + raise ValueError("Error net path") + + ret = [] + with open(self.net_path, 'r') as f: + for line in f.readlines(): + if ("remote_estimator_proxy.cc" not in line): + continue + try: + raw_json = line[line.index('{'):] + json_network = json.loads(raw_json) + # it seems no use + del json_network["mediaInfo"] + ret.append(json_network) + # can not parser json + except ValueError as e: + pass + # other exception that need to care + except Exception as e: + raise ValueError("Exception when parser json log") + + self.net_data = ret \ No newline at end of file diff --git a/code/postprocess/metrics/utils/ocr_frame_align.sh b/code/postprocess/metrics/utils/ocr_frame_align.sh new file mode 100755 index 0000000..92d7a25 --- /dev/null +++ b/code/postprocess/metrics/utils/ocr_frame_align.sh @@ -0,0 +1,537 @@ +#!/bin/bash + +################################################################################## +# DEFAULT VALUES +################################################################################## + +PREFIX=0 +WIDTH=1280 +HEIGHT=720 +SOURCE_FOLDER= +FPS=30 +PESQ_AUDIO_SAMPLE_RATE=16000 +VIDEO_BITRATE=3M +JPG_FOLDER=jpg +FFMPEG_LOG="-loglevel error" +P_SUFFIX="-p.jpg" +V_SUFFIX="-v.jpg" +VIDEO_LENGTH_SEC=30 +YUV_PROFILE=yuv420p +# FFMPEG_OPTIONS="-c:v libvpx -b:v $VIDEO_BITRATE -pix_fmt $YUV_PROFILE" +FFMPEG_OPTIONS="-pix_fmt $YUV_PROFILE" +CALCULATE_AUDIO_QOE=false +CLEANUP=true +ALIGN_OCR=true +ONLY_VMAF=true +USAGE="Usage: `basename $0` [-w=width] [-h=height] [--src=src_video] [-d=duration] [--src_out=src_video_output] \ + [--no_cleanup] [--clean] [--align_ocr] [--suffix=suffix] [--fps=fps]" + +################################################################################## +# FUNCTIONS +################################################################################## + +init() { + mkdir -p $JPG_FOLDER + + REMUXED_PRESENTER=$PREFIX-remux-p.$SUFFIX + REMUXED_VIEWER=$PREFIX-remux-v.$SUFFIX + TMP_PRESENTER=$PREFIX-p.$SUFFIX + TMP_VIEWER=$PREFIX-v.$SUFFIX + CUT_PRESENTER=$PREFIX-p-cut.$SUFFIX + CUT_VIEWER=$PREFIX-v-cut.$SUFFIX + OCR_PRESENTER=$PREFIX-p-ocr.$SUFFIX + OCR_VIEWER=$PREFIX-v-ocr.$SUFFIX + SUFFIX=y4m +} + +cleanup() { + echo "Removing temporal files" + rm -rf $JPG_FOLDER \ + ${PREFIX}_vmaf.json \ + $REMUXED_PRESENTER $REMUXED_VIEWER \ + $TMP_PRESENTER $TMP_VIEWER \ + $YUV_PRESENTER $YUV_VIEWER \ + $WAV_PRESENTER $WAV_VIEWER resampled_$WAV_PRESENTER resampled_$WAV_VIEWER resampled_ref.wav \ + $CUT_PRESENTER $CUT_VIEWER \ + $OCR_PRESENTER $OCR_VIEWER \ + # alphartc/out* 0-* 0_* +} + +duration() { + num=$1 + decimals= + + strindex $1 . + if [ $retval -gt 0 ]; then + num=$(echo $1 | cut -d'.' -f 1) + decimals=$(echo $1 | cut -d'.' -f 2) + fi + + ((h=num/3600)) + ((m=num%3600/60)) + ((s=num%60)) + + retval=$(printf "%02d:%02d:%02d\n" $h $m $s) + + if [ ! -z "$decimals" ]; then + retval="${retval}.${decimals}" + fi +} + +strindex() { + x="${1%%$2*}" + retval=$([[ "$x" = "$1" ]] && echo -1 || echo "${#x}") +} + +find_index() { + x="${1%%$2*}" + retval=${#x} +} + +get_r() { + input=$1 + find_index $input "$rgb(" + i=$retval + str=${input:i} + find_index $str "," + j=$retval + retval=$(echo $str | cut -c2-$j) +} + + +get_g() { + input=$1 + find_index $input "," + i=$(($retval + 1)) + str=${input:i} + find_index $str "," + j=$retval + retval=$(echo $str | cut -c1-$j) +} + +get_b() { + input=$1 + find_index $input "," + i=$(($retval + 1)) + str=${input:i} + find_index $str "," + i=$(($retval + 1)) + str=${str:i} + find_index $str "," + j=$(($retval - 1)) + retval=$(echo $str | cut -c1-$j) +} + +get_rgb() { + image=$1 + width=$2 + height=$3 + + retval=$(convert $image -format "%[pixel:u.p{$width,$height}]" -colorspace rgb info:) +} + +match_threshold() { + input=$1 + expected=$2 + threshold=$3 + diff=$((input-expected)) + absdiff=${diff#-} + + #echo "$input==$expected -> $absdiff<=$threshold" + + if [ "$absdiff" -le "$threshold" ]; then + retval=true + else + retval=false + fi +} + +match_rgb() { + r=$1 + g=$2 + b=$3 + exp_r=$4 + exp_g=$5 + exp_b=$6 + + match_threshold $r $exp_r $threshold + match_r=$retval + match_threshold $g $exp_g $threshold + match_g=$retval + match_threshold $b $exp_b $threshold + match_b=$retval + + #echo "$r==$exp_r->$match_r $g==$exp_g->$match_g $b==$exp_b->$match_b" + + if $match_r && $match_g && $match_b; then + retval=true + else + retval=false + fi +} + +match_color() { + image=$1 + width=$2 + height=$3 + threshold=$4 + expected_r=$5 + expected_g=$6 + expected_b=$7 + + #echo "match_color($1): $2,$3 -- $5 $6 $7" + + get_rgb $image $width $height + color=$retval + get_r "$color" + r=$retval + get_g "$color" + g=$retval + get_b "$color" + b=$retval + + match_rgb $r $g $b $expected_r $expected_g $expected_b +} + +# Expected values for lavfi padding video: width,height rgb(r,g,b) [color]: +# 120,240 rgb(0,255,255) [cyan] +# 200,240 rgb(255,0,253) [purple] +# 280,240 rgb(0,0,253) [blue] +# 360,240 rgb(253,255,0) [yellow] +# 420,240 rgb(0,255,0) [green] +# 500,240 rgb(253,0,0) [red] +match_image() { + image=$1 + threshold=50 + + height=$(($HEIGHT / 3)) + bar=$(($WIDTH / 8)) + halfbar=$(($bar / 2)) + + cyan=$((halfbar + ($bar * 1))) + purple=$((halfbar + ($bar * 2))) + blue=$((halfbar + ($bar * 3))) + yellow=$((halfbar + ($bar * 4))) + green=$((halfbar + ($bar * 5))) + red=$((halfbar + ($bar * 6))) + + match_color $image $cyan $height $threshold 0 255 255 + match_cyan=$retval + match_color $image $purple $height $threshold 255 0 255 + match_purple=$retval + match_color $image $blue $height $threshold 0 0 255 + match_blue=$retval + match_color $image $yellow $height $threshold 255 255 0 + match_yellow=$retval + match_color $image $green $height $threshold 0 255 0 + match_green=$retval + match_color $image $red $height $threshold 255 0 0 + match_red=$retval + + if $match_cyan && $match_purple && $match_blue && $match_yellow && $match_red; then + retval=true + else + retval=false + fi +} + +check_input() { + input=$1 + + if [ ! -f $input ]; then + echo "$input does not exist" + exit 1 + fi + echo "Copying $input to current folder" + cp $input . +} + +remux() { + input=$1 + output1=$2 + # output2=$3 + + echo "Remuxing $input to $output2" + ffmpeg $FFMPEG_LOG -y -i $input -s ${WIDTH}x${HEIGHT} $FFMPEG_OPTIONS $output1 + # ffmpeg $FFMPEG_LOG -y -i $output1 -filter:v "minterpolate='mi_mode=dup:fps=$FPS'" $output2 +} + +cut_video() { + input_cut=$1 + output_cut=$2 + suffix_cut=$3 + + echo "Checking padding in $input_cut (result: $output_cut)" + + # Extract images per frame (to find out change from padding to video and viceversa) + ffmpeg $FFMPEG_LOG -i $input_cut $JPG_FOLDER/%04d$suffix_cut + jpgs=("$JPG_FOLDER/*$suffix_cut") + i_jpgs=$(ls -r $jpgs) + + for i in $jpgs; do + file=$(echo $i) + match_image "$file" + match=$retval + if ! $match; then + cut_frame_from=$(echo "$file" | tr -dc '0-9') + break + fi + done + + for i in $i_jpgs; do + file=$(echo $i) + match_image "$file" + match=$retval + if ! $match; then + cut_frame_to=$(echo "$file" | tr -dc '0-9') + break + fi + done + + echo "Cutting $1 from frame $cut_frame_from to $cut_frame_to" + + cut_time_from=$(jq -n $cut_frame_from/$FPS) + cut_time_to=$(jq -n $cut_frame_to/$FPS) + cut_time=$(jq -n $cut_time_to-$cut_time_from) + + duration $cut_time_from + from=$retval + duration $cut_time + to=$retval + ffmpeg $FFMPEG_LOG -y -i $input_cut -ss $from -t $to $FFMPEG_OPTIONS $output_cut + echo "from $from to $to" +} + +extract_wav() { + input=$1 + output=$2 + + echo "Extracting $output from $input" + ffmpeg $FFMPEG_LOG -y -i $input $output + + echo "Resampling audio for PESQ analysis ($PESQ_AUDIO_SAMPLE_RATE Hz)" + ffmpeg $FFMPEG_LOG -y -i $input -ar $PESQ_AUDIO_SAMPLE_RATE resampled_$output + ffmpeg $FFMPEG_LOG -y -i $AUDIO_REF -ar $PESQ_AUDIO_SAMPLE_RATE resampled_ref.wav +} + +convert_yuv() { + input=$1 + output=$2 + + echo "Converting $input to $output ($YUV_PROFILE profile)" + ffmpeg $FFMPEG_LOG -i $input -pix_fmt $YUV_PROFILE -c:v rawvideo -an -y $output +} + +convert_y4m() { + input=$1 + output=$2 + + echo "Converting $input to $output ($YUV_PROFILE profile)" + ffmpeg $FFMPEG_LOG -i $input -pix_fmt $YUV_PROFILE -an -y $output +} + +check_number() { + re='^[0-9]+$' + input=$1 + + if [ -n "$input" ] && [ "$input" -eq "$input" ] 2>/dev/null; then + retval=true + else + retval=false + fi +} + +align_ocr() { + video_ocr=$1 + output_ocr=$2 + wav_ocr=$3 + + echo "Aligning $video_ocr based on frame OCR recognition" + + cut_folder=$JPG_FOLDER/cut + mkdir -p $cut_folder + rm -f $JPG_FOLDER/*.jpg + rm -f $cut_folder/*.jpg + + ffmpeg $FFMPEG_LOG -i $video_ocr -qscale:v 2 $JPG_FOLDER/%04d.jpg + + # next=$(($VIDEO_LENGTH_SEC * $FPS)) + next=`awk -v x=$VIDEO_LENGTH_SEC -v y=$FPS 'BEGIN{printf "%d\n", x*y}'` + skipped=0 + ocr_errors=0 + files=($JPG_FOLDER/*.jpg) + for ((i=${#files[@]}-1; i>=0; i--)); do + f=${files[$i]} + filename=$(basename $f) + + left=`expr $WIDTH / 2 - 50` + top=`expr $HEIGHT - 50` + crop_value=100x45+$left+$top + convert $f -crop $crop_value $cut_folder/_$filename + + #frame=$(tesseract $cut_folder/_$filename stdout --psm 7 digits 2>/dev/null | sed -r '/^\s*$/d') + frame=$(gocr -C 0-9 $cut_folder/_$filename | tr -d '[:space:]') + frame=${frame#0} + rm $cut_folder/_$filename + + check_number $frame + is_number=$retval + + if $is_number; then + #echo "$filename = $frame" + j=$frame + while [ $j -le $next ];do + output=$(printf "%04d\n" $j) + cp $f $cut_folder/${output}.jpg + if [ $j -ne $next ]; then + skipped=$(($skipped+1)) + fi + j=$(($j+1)) + done + next=$(($frame-1)) + else + echo "Skipping $filename (recognized: $frame)" + ocr_errors=$(($ocr_errors+1)) + fi + done + i=1 + while [ $i -le $next ]; do + output=$(printf "%04d\n" $i) + cp $f $cut_folder/${output}.jpg + i=$(($i+1)) + done + + echo "Number of frames skipped in $output_ocr: $skipped" + echo "Number of frames not recognized by OCR in $output_ocr: $ocr_errors" + + if $CALCULATE_AUDIO_QOE; then + ffmpeg $FFMPEG_LOG -y -framerate $FPS -f image2 -i $cut_folder/%04d.jpg -i $wav_ocr $output_ocr + else + ffmpeg $FFMPEG_LOG -y -framerate $FPS -f image2 -i $cut_folder/%04d.jpg -pix_fmt $YUV_PROFILE $output_ocr + fi +} + + +################################################################################## +# PARSE ARGUMENTS +################################################################################## + +for i in "$@"; do + case $i in + --only_vmaf) + ONLY_VMAF=true + shift + ;; + --use_default_ref) + VIDEO_REF="../test-no-padding.yuv" + AUDIO_REF="../test-no-padding.wav" + CALCULATE_AUDIO_QOE=true + shift + ;; + --fps=*) + FPS="${i#*=}" + shift + ;; + -d=*|--duration=*) + VIDEO_LENGTH_SEC="${i#*=}" + shift + ;; + -vr=*|--video_ref=*) + VIDEO_REF="${i#*=}" + shift + ;; + --src=*) + PRESENTER="${i#*=}" + shift + ;; + --src_out=*) + Y4M_PRESENTER="${i#*=}" + shift + ;; + -p=*|--prefix=*) + PREFIX="${i#*=}" + shift + ;; + -w=*|--width=*) + WIDTH="${i#*=}" + shift + ;; + -h=*|--height=*) + HEIGHT="${i#*=}" + shift + ;; + -sf=*|--suffix=*) + SUFFIX="${i#*=}" + shift + ;; + --align_ocr) + ALIGN_OCR=true + shift + ;; + --no_cleanup) + CLEANUP=false + shift + ;; + --clean) + init + cleanup + rm $PREFIX_*.csv $PREFIX_*.txt 2>/dev/null + exit 0 + shift + ;; + *) # unknown option + echo "$USAGE" + echo $i + exit 0 + ;; + esac +done + +################################################################################## +# INIT SCRIPT +################################################################################## + +echo "*** Getting align video ***" + +###################################### +# 1. Check VMAF and VQMT path and init +###################################### +init + +##################################### +# 2. Check presenter and viewer files +##################################### +if [ -z "$VIDEO_REF" ] && [ ! -f $PRESENTER ]; then + check_input $PRESENTER +fi + +################################################################### +# 3. Remux presenter and viewer with a fixed bitrate and resolution +################################################################### +if [ -z "$VIDEO_REF" ] && [ ! -f $TMP_PRESENTER ]; then + remux $PRESENTER $REMUXED_PRESENTER $TMP_PRESENTER +fi + +####################################### +# 4. Alignment based on OCR recognition +####################################### +if $ALIGN_OCR && [ -z "$VIDEO_REF" ]; then + if [ ! -f $OCR_PRESENTER ]; then + align_ocr $REMUXED_PRESENTER $OCR_PRESENTER $WAV_PRESENTER + fi + CUT_PRESENTER=$OCR_PRESENTER +fi + +######################### +# 5. Convert video to Y4M +######################### +if [ -z "$VIDEO_REF" ]; then + convert_y4m $CUT_PRESENTER $Y4M_PRESENTER +fi + +######################## +# 6. Cleanup and finish +######################## +if $CLEANUP; then + cleanup +fi diff --git a/code/postprocess/metrics/utils/video_align_method.py b/code/postprocess/metrics/utils/video_align_method.py new file mode 100644 index 0000000..0be4d57 --- /dev/null +++ b/code/postprocess/metrics/utils/video_align_method.py @@ -0,0 +1,61 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import subprocess, os +from tempfile import NamedTemporaryFile +from utils.video_info import VideoInfo +from abc import ABC, abstractmethod + + +class VideoAlignMethod(ABC): + @abstractmethod + def __init__(self): + self.align_method_name = "base" + + @abstractmethod + def frame_align(self, src_video_info : VideoInfo, dst_video_info : VideoInfo): + pass + + +class VideoAlignMethodFfmpeg(VideoAlignMethod): + def __init__(self): + super(VideoAlignMethodFfmpeg, self).__init__() + self.align_method_name = "ffmpeg" + + def change_video_fps_by_ffmepg(self, video_info : VideoInfo, fps : int): + output = NamedTemporaryFile('w+t', suffix=".%s" % (video_info.format_abbreviation)) + cmd = ["ffmpeg", "-i", video_info.video_path, "-r", str(fps), "-y"] + if video_info.video_size: + cmd.extend(["-s", video_info.video_size]) + cmd.append(output.name) + subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding="utf8") + return output + + def frame_align(self, src_video_info : VideoInfo, dst_video_info : VideoInfo): + + fo_new_video = None + # Frame alignment + if not src_video_info.fps or \ + abs(src_video_info.get_frame_count() - dst_video_info.get_frame_count()) >= 0.000001: + new_fps = dst_video_info.get_frame_count() / float(src_video_info.duration_sec) + fo_new_video = self.change_video_fps_by_ffmepg(src_video_info, new_fps) + + return fo_new_video + + +class VideoAlignMethodOcr(VideoAlignMethod): + def __init__(self): + super(VideoAlignMethodOcr, self).__init__() + self.align_method_name = "ocr" + self.file_dir = os.path.dirname(os.path.abspath(__file__)) + self.file_path = os.path.join(self.file_dir, "ocr_frame_align.sh") + + def frame_align(self, src_video_info : VideoInfo, dst_video_info : VideoInfo): + + fo_new_src_video = NamedTemporaryFile('w+t', suffix=".%s" % (src_video_info.format_abbreviation)) + cmd = [self.file_path, "-p=%s" % (os.path.splitext(os.path.basename(src_video_info.video_path))[0]), "--src=%s" % (src_video_info.video_path), \ + "--src_out=%s" % (fo_new_src_video.name), "--fps=%s" % (src_video_info.fps), "-d=%s" % (src_video_info.duration_sec), \ + "-w=%s" % (src_video_info.width), "-h=%s" % (src_video_info.height), "--suffix=%s" % (src_video_info.format_abbreviation)] + out = subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding="utf8") + + return fo_new_src_video \ No newline at end of file diff --git a/code/postprocess/metrics/utils/video_eval_method.py b/code/postprocess/metrics/utils/video_eval_method.py new file mode 100644 index 0000000..8116814 --- /dev/null +++ b/code/postprocess/metrics/utils/video_eval_method.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import subprocess, tempfile, re +from utils.video_info import VideoInfo +from tempfile import NamedTemporaryFile +from abc import ABC, abstractmethod + + +class VideoEvalMethod(ABC): + @abstractmethod + def __init__(self): + self.method_name = "base" + self.support_type = [] + self.support_type_abbreviation = [] + + @abstractmethod + def eval(self, src_video_info : VideoInfo, dst_video_info : VideoInfo): + pass + + +class VideoEvalMethodVmaf(VideoEvalMethod): + def __init__(self, model_path=None): + super(VideoEvalMethodVmaf, self).__init__() + self.method_name = "ffmpeg" + self.support_type = ["yuv4mpegpipe", "rawvideo"] + self.support_type_abbreviation = ["y4m", "yuv"] + self.model_path = model_path + + def eval(self, src_video_info : VideoInfo, dst_video_info : VideoInfo): + if src_video_info.format_name != dst_video_info.format_name: + raise ValueError("Can't compare bewteen different video type") + if src_video_info.format_name not in self.support_type: + raise ValueError("Video type don't support") + + cmd = ["vmaf", "--reference", src_video_info.video_path, "--distorted", dst_video_info.video_path] + if self.model_path: + cmd.extend(["-m", "path=%s" % (self.model_path)]) + + if src_video_info.format_name == "rawvideo": + cmd.extend(["--width", src_video_info.width, "--height", src_video_info.height, \ + "--pixel_format", src_video_info.pixel_format, "--bitdepth", src_video_info.bitdepth]) + + with NamedTemporaryFile('w+t', suffix=".xml") as f: + cmd.extend(["--output", f.name]) + cmd_result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding="utf8") + re_result = re.search(r'metric name="vmaf".*?mean="([\d]+\.[\d]+)"', f.read()) + if not re_result: + raise ValueError("Can not get vmaf score from terminal output") + vmaf_score = float(re_result.group(1)) + + return vmaf_score diff --git a/code/postprocess/metrics/utils/video_info.py b/code/postprocess/metrics/utils/video_info.py new file mode 100644 index 0000000..31feb18 --- /dev/null +++ b/code/postprocess/metrics/utils/video_info.py @@ -0,0 +1,92 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import subprocess, tempfile, re, os + + +class VideoInfo(object): + def __init__(self, video_path, video_size=None, bitdepth="8"): + self.video_path = video_path + self.video_size = video_size + self.width = None + self.height = None + self.duration_sec = None + self.format_name = None + self.format_abbreviation = None + self.fps = None + self.size = None + self.bit_rate = None + self.pixel_format = None + self.bitdepth = bitdepth + + self.parse_video_by_ffprobe(video_size) + + def parse_video_by_ffprobe(self, video_size=None): + if not self.video_path or not os.path.exists(self.video_path): + raise ValueError("Error video path") + cmd = ["ffprobe", "-show_format", self.video_path] + + cmd_result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding="utf8") + if cmd_result.returncode: + if video_size: + cmd.extend(["-video_size", video_size]) + cmd_result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding="utf8") + + re_video_size = re.search(r'Stream.*?([\d]+x[\d]+),', cmd_result.stdout) + if re_video_size: + if video_size: + assert re_video_size.group(1) == video_size.lower() + self.video_size = re_video_size.group(1) + self.width = self.video_size.split('x')[0] + self.height = self.video_size.split('x')[1] + + duration_sec = re.search(r'duration=([\d\.]+)', cmd_result.stdout) + if duration_sec: + self.duration_sec = duration_sec.group(1) + + format_name = re.search(r'format_name=([\w\d]+)', cmd_result.stdout) + if format_name: + self.format_name = format_name.group(1) + self.format_abbreviation = "yuv" if self.format_name == "rawvideo" else "y4m" + + fps = re.search(r'([\d]+)\sfps', cmd_result.stdout) + if fps: + self.fps = fps.group(1) + + size = re.search(r'size=([\d]+)', cmd_result.stdout) + if size: + self.size = size.group(1) + + bit_rate = re.search(r'bit_rate=([\d]+)', cmd_result.stdout) + if bit_rate: + self.bit_rate = bit_rate.group(1) + + pixel_format = re.search(r'Stream.*?(4[\d]+)p,', cmd_result.stdout) + if pixel_format: + self.pixel_format = pixel_format.group(1) + + def update_video_size(self, size): + if self.video_size: + assert self.video_size == size + return + self.video_size = size + re_result = re.search(r'([\d]+)[xX]([\d]+)', size) + assert re_result + self.width = re_result.group(1) + self.height = re_result.group(2) + + def get_frame_count(self): + assert self.duration_sec + assert self.fps + return float(self.duration_sec) * float(self.fps) + + def check_all_info(self): + assert self.video_size + assert self.duration_sec + assert self.format_name + if "rawvideo" != self.format_name: + assert self.fps + assert self.size + assert self.bit_rate + assert self.pixel_format + print(self.video_size, self.duration_sec, self.format_name, self.fps, self.size, self.bit_rate, self.pixel_format) diff --git a/code/scripts/pipeline.sh b/code/scripts/pipeline.sh new file mode 100644 index 0000000..aad1643 --- /dev/null +++ b/code/scripts/pipeline.sh @@ -0,0 +1,34 @@ +#!/bin/bash +# Step 1: Set network configurations +echo "Step 1: Setting network configurations..." +bash set_network_configs.sh +if [ $? -ne 0 ]; then + echo "Failed to set network configurations." + exit 1 +fi + +# Step 2: Prepare directories +echo "Step 2: Preparing directories..." +bash preparedirs.sh +if [ $? -ne 0 ]; then + echo "Failed to prepare directories." + exit 1 +fi + +# Step 3: Transmit data +echo "Step 3: Transmitting data..." +bash transmit.sh +if [ $? -ne 0 ]; then + echo "Failed to transmit data." + exit 1 +fi + +# Step 4: Post-process logs +echo "Step 4: Post-processing logs..." +bash postprocess.sh +if [ $? -ne 0 ]; then + echo "Failed to post-process logs." + exit 1 +fi + +echo "Pipeline executed successfully." \ No newline at end of file diff --git a/code/scripts/postprocess.sh b/code/scripts/postprocess.sh new file mode 100755 index 0000000..dbdf156 --- /dev/null +++ b/code/scripts/postprocess.sh @@ -0,0 +1,78 @@ +#!/bin/bash + +# Get the current terminal path +CURRENT_PATH=$(pwd) +echo "Current terminal path: $CURRENT_PATH" +# Directories +RAW_LOGS_DIR="${CURRENT_PATH}/data/temp/raw_logs" +COMPRESSED_LOGS_DIR="${CURRENT_PATH}/data/output/util_logs" +FIGURES_DIR="${CURRENT_PATH}/data/output/results/figures" +PROCESS_DIR="${CURRENT_PATH}/code/postprocess" +# Patterns for compressing logs + +RECEIVER_PATTERNS=( + "\(remote_estimator_proxy\.cc:152\): (\{.*\})"\ +) + + +SENDER_PATTERNS=( + "\(rtp_transport_controller_send\.cc:\d+\): PostUpdates SetTargetRate: (\d+), PostUpdates SetTargetRate Time: (\d+)"\ + "\(rtp_sender_egress\.cc:\d+\): sendpacket sequence_number: (\d+), packet_sendtime: (\d+) ms, packet_capture_time: \d+ ms, packet_type: \d+, packet_payload_size: (\d+)"\ + "\(video_stream_encoder\.cc:\d+\): Video frame parameters changed: dimensions=(\d+)x(\d+), texture=\d+ at time= (\d+)ms." +) +# Create output directories if they don't exist +mkdir -p "$COMPRESSED_LOGS_DIR" +mkdir -p "$FIGURES_DIR" +# 将模式数组转换为以空格分隔的字符串,以便传递给Python脚本 + +RECEIVER_PATTERNS_STR=$(printf "%s|" "${RECEIVER_PATTERNS[@]}") +SENDER_PATTERNS_STR=$(printf "%s|" "${SENDER_PATTERNS[@]}") +if [ -z "$RECEIVER_PATTERNS_STR" ] || [ -z "$SENDER_PATTERNS_STR" ]; then + echo "Error: receiver_patterns or sender_patterns is empty." + exit 1 +fi + +# Function to process logs +process_logs() { + local receiver_log=$1 + local sender_log=$2 + local base_input_dir=$3 + local base_output_dir_util=$4 + local base_output_dir_fig=$5 + + # Compress logs + python3 compress_logs.py \ + --receiver_log "$receiver_log" \ + --sender_log "$sender_log" \ + --receiver_patterns "$RECEIVER_PATTERNS_STR" \ + --sender_patterns "$SENDER_PATTERNS_STR" \ + --base_input_dir "$base_input_dir" \ + --base_output_dir "$base_output_dir_util" + + # Draw figures + python3 draw.py \ + --receiver_log "$receiver_log" \ + --sender_log "$sender_log" \ + --receiver_patterns "$RECEIVER_PATTERNS_STR" \ + --sender_patterns "$SENDER_PATTERNS_STR" \ + --base_input_dir "$base_input_dir" \ + --base_output_dir "$base_output_dir_fig" \ + --verbose +} + +cd "$PROCESS_DIR" +# Main loop to traverse raw logs directory +for video_dir in "$RAW_LOGS_DIR"/*; do + for network_dir in "$video_dir"/*; do + for model_dir in "$network_dir"/*; do + receiver_log="$model_dir/receiver.log" + sender_log="$model_dir/sender.log" + if [ -f "$receiver_log" ] && [ -f "$sender_log" ]; then + echo "Processing logs for video: $(basename "$video_dir"), network: $(basename "$network_dir"), model: $(basename "$model_dir")" + process_logs "$receiver_log" "$sender_log" "$RAW_LOGS_DIR" "$COMPRESSED_LOGS_DIR" "$FIGURES_DIR" + else + echo "Skipping logs for video: $(basename "$video_dir"), network: $(basename "$network_dir"), model: $(basename "$model_dir") - Missing receiver or sender log" + fi + done + done +done \ No newline at end of file diff --git a/code/scripts/preparedirs.sh b/code/scripts/preparedirs.sh new file mode 100755 index 0000000..26060e7 --- /dev/null +++ b/code/scripts/preparedirs.sh @@ -0,0 +1,58 @@ +#!/bin/bash + +# Get the current terminal path +CURRENT_PATH=$(pwd) +echo "Current terminal path: $CURRENT_PATH" +# Directories +input_dir="${CURRENT_PATH}/data/input/videos" +temp_dir="${CURRENT_PATH}/data/temp" +output_dir="${CURRENT_PATH}/data/output" +network_traces_dir="${CURRENT_PATH}/data/temp/network_traces" +models_dir="${CURRENT_PATH}/code/transmit/models" + +# Function to create directory and handle errors +create_dir() { + if [ ! -d "$1" ]; then + mkdir -p "$1" + if [ $? -ne 0 ]; then + echo "Failed to create directory: $1" + exit 1 + fi + fi +} + +rm -rf "$temp_dir/raw_logs" +rm -rf "$output_dir/util_logs" +rm -rf "$output_dir/results/figures" + +# Iterate over each video in the input directory +for video in "$input_dir"/*; do + video_name=$(basename "$video" | cut -d. -f1) + + # Create directories for each video + create_dir "$temp_dir/raw_logs/$video_name" + create_dir "$output_dir/util_logs/$video_name" + create_dir "$output_dir/results/figures/$video_name" + + # Iterate over each network trace + for network_trace in "$network_traces_dir"/*; do + network_trace_name=$(basename "$network_trace" | cut -d. -f1) + + # Create network trace directories + create_dir "$temp_dir/raw_logs/$video_name/$network_trace_name" + create_dir "$output_dir/util_logs/$video_name/$network_trace_name" + create_dir "$output_dir/results/figures/$video_name/$network_trace_name" + + # Iterate over each model + for model in "$models_dir"/*; do + model_name=$(basename "$model" | cut -d. -f1) + + # Create model directories + create_dir "$temp_dir/raw_logs/$video_name/$network_trace_name/$model_name" + create_dir "$output_dir/util_logs/$video_name/$network_trace_name/$model_name" + create_dir "$output_dir/results/figures/$video_name/$network_trace_name/$model_name" + done + done +done + +echo "Directory structure created successfully." \ No newline at end of file diff --git a/code/scripts/set_network_configs.sh b/code/scripts/set_network_configs.sh new file mode 100755 index 0000000..35ea0d1 --- /dev/null +++ b/code/scripts/set_network_configs.sh @@ -0,0 +1,69 @@ +#!/bin/bash + +# Get the current terminal path +CURRENT_PATH=$(pwd) +echo "Current terminal path: $CURRENT_PATH" +# Directory to save the generated network traces +OUTPUT_DIR="${CURRENT_PATH}/data/temp/network_traces" +OUTPUT_DIR="${CURRENT_PATH}/data/temp/network_traces" +SETNETWORK_DIR="${CURRENT_PATH}/code/setconfig" +# Check if the output directory exists, if not, create it +if [ ! -d "$OUTPUT_DIR" ]; then + mkdir -p "$OUTPUT_DIR" +fi + +# Function to generate network trace +generate_trace() { + local trace_type=$1 + local downlink=$2 + local uplink=$3 + local output_file=$4 + local patterns=$5 + + python3 set_network_trace.py \ + --type "$trace_type" --downlink "$downlink" --uplink "$uplink" \ + --output "$output_file" --patterns "$patterns" +} + +# Example usage of the function +# You can modify the parameters as needed +trace_type="video" +downlink='{"trace_pattern": []}' +uplink='{"trace_pattern": []}' + +# Array of different patterns configurations +patterns_array=( + "60000,100000,0,0,0" + # "60000,400,0,0,0" + # "60000,1000,0,0,0" + # "60000,2000,0,0,0" + # "60000,1000,2,0,0" + # "60000,1000,5,0,0" + # "60000,1000,10,0,0" + # "60000,1000,20,0,0" + # "60000,1000,0,50,0" + # "60000,1000,0,100,0" + # "60000,1000,0,200,0" + # "60000,1000,0,400,0" + # "60000,1000,0,1000,0" + # "60000,100,0,0,0 60000,400,0,0,0" + # "60000,100,0,0,0 60000,1000,0,0,0" + # "60000,100,0,0,0 60000,2000,0,0,0" + # "60000,400,0,0,0 60000,1000,0,0,0" + # "60000,400,0,0,0 60000,2000,0,0,0" + # "60000,1000,0,0,0 60000,2000,0,0,0" +) + +rm -rf "$OUTPUT_DIR"/* +cd $SETNETWORK_DIR +# Loop to generate multiple traces with different patterns +for i in {1..1}; do + output_file="$OUTPUT_DIR/trace_$i.json" + patterns="${patterns_array[$((i-1))]}" + echo "Generating trace $i with patterns: $patterns..." + if ! generate_trace "$trace_type" "$downlink" "$uplink" "$output_file" "$patterns"; then + echo "Error generating trace $i" >&2 + else + echo "Trace $i generated successfully" + fi +done \ No newline at end of file diff --git a/code/scripts/transmit.sh b/code/scripts/transmit.sh new file mode 100755 index 0000000..22a546a --- /dev/null +++ b/code/scripts/transmit.sh @@ -0,0 +1,167 @@ +#!/bin/bash + +# Get the current terminal path +CURRENT_PATH=$(pwd) +echo "Current terminal path: $CURRENT_PATH" +# Directories +MODEL_DIR="${CURRENT_PATH}/code/transmit/models" +WORKDIR="${CURRENT_PATH}/data/workdir" +INPUT_VIDEO_DIR="${CURRENT_PATH}/data/input/videos" +INPUT_AUDIO_DIR="${CURRENT_PATH}/data/input/audios" +NETWORK_TRACES_DIR="${CURRENT_PATH}/data/temp/network_traces" +RAW_LOGS_DIR="${CURRENT_PATH}/data/temp/raw_logs" +DLL_PATH="${CURRENT_PATH}/code/transmit/runtime/dll" +CMD_PATH="${CURRENT_PATH}/code/transmit/runtime/pyinfer/cmdinfer" +EXE_PATH="${CURRENT_PATH}/code/transmit/runtime/pyinfer/cmdinfer/peerconnection_serverless" +# Default values for media config +DEFAULT_AUTOCLOSE=30 +DEFAULT_LISTENING_IP="0.0.0.0" +DEFAULT_LISTENING_PORT=8000 +DEFAULT_BWE_FEEDBACK_DURATION=200 +DEFAULT_VIDEO_HEIGHT=1080 +DEFAULT_VIDEO_WIDTH=1920 +DEFAULT_VIDEO_FPS=30 +DEFAULT_IF_SAVE_MEDIA=true + +# Function to generate media config +generate_media_config() { + local video_file=$1 + local audio_file=$2 + local save_video=$3 + local save_audio=$4 + local receiver_logging=$5 + local sender_logging=$6 + local receiver_output=$7 + local sender_output=$8 + + python3 ${CURRENT_PATH}/code/setconfig/set_media_config.py \ + --autoclose "$DEFAULT_AUTOCLOSE" \ + --listening_ip "$DEFAULT_LISTENING_IP" \ + --listening_port "$DEFAULT_LISTENING_PORT" \ + --bwe_feedback_duration "$DEFAULT_BWE_FEEDBACK_DURATION" \ + --video_file "$video_file" \ + --audio_file "$audio_file" \ + --save_video "$save_video" \ + --save_audio "$save_audio" \ + --receiver_logging "$receiver_logging" \ + --sender_logging "$sender_logging" \ + --receiver_output "$receiver_output" \ + --sender_output "$sender_output" \ + --video_height "$DEFAULT_VIDEO_HEIGHT" \ + --video_width "$DEFAULT_VIDEO_WIDTH" \ + --video_fps "$DEFAULT_VIDEO_FPS" \ + --if_save_media "$DEFAULT_IF_SAVE_MEDIA" +} + +# Function to apply network configuration +apply_network_config() { + local config_file=$1 + python3 ${CURRENT_PATH}/code/transmit/tc_setup.py --config "$config_file" +} + +# Clean runtime directory +clean_runtime_dir() { + find "$CMD_PATH" -type f ! -name 'cmdinfer.cc' ! -name 'cmdinfer.h' ! -name 'cmdinfer.py' ! -name 'peerconnection_serverless' -delete +} + +# Main loop +printf "Starting transmission\n" +printf "Mounting tmpfs\n" +sudo mount -t tmpfs -o size=20G media "$WORKDIR" +for model in "$MODEL_DIR"/*; do + if [ -d "$model" ]; then + model_name=$(basename "$model") + printf "Running model: %s\n" "$model_name" + # Clean runtime directory and copy model files + clean_runtime_dir + printf "Cleaned runtime directory\n" + printf "Copying model files\n" + cp -r "$model"/* "$CMD_PATH" + + # Loop through each video and audio file + for video_file in "$INPUT_VIDEO_DIR"/*.yuv; do + base_name=$(basename "$video_file" .yuv) + audio_file="$INPUT_AUDIO_DIR/$base_name.wav" + if [ -f "$video_file" ] && [ -f "$audio_file" ]; then + # Clear workdir + rm -rf "$WORKDIR"/* + printf "Cleared workdir\n" + + base_name=$(basename "$video_file" .yuv) + workdir_video="$WORKDIR/$base_name.yuv" + workdir_audio="$WORKDIR/$base_name.wav" + cp "$video_file" "$workdir_video" + cp "$audio_file" "$workdir_audio" + printf "Copied video and audio files\n" + # Loop through each network trace file + for network_trace in "$NETWORK_TRACES_DIR"/*; do + if [ -f "$network_trace" ]; then + trace_name=$(basename "$network_trace" .json) + trace_dir="$WORKDIR/$trace_name" + mkdir -p "$trace_dir" + printf "Created trace directory\n" + + receiver_logging="$trace_dir/receiver.log" + sender_logging="$trace_dir/sender.log" + save_video="$trace_dir/outvideo.yuv" + save_audio="$trace_dir/outaudio.wav" + + receiver_output="$trace_dir/receiver_config.json" + sender_output="$trace_dir/sender_config.json" + + printf "Generated media config\n" + # Generate media config + generate_media_config "$workdir_video" "$workdir_audio" "$save_video" "$save_audio" "$receiver_logging" "$sender_logging" "$receiver_output" "$sender_output" + + # Apply network configuration + printf "Applying network configuration\n" + apply_network_config "$network_trace" & + network_pid=$! + + # Terminate existing screen sessions with the same name + screen -ls | grep receiver_session | awk '{print $1}' | xargs -I {} screen -S {} -X quit + screen -ls | grep sender_session | awk '{print $1}' | xargs -I {} screen -S {} -X quit + + printf "Starting screen sessions\n" + # 注意这里要是端口被占用,会导致screen传输失败 + screen -dmS receiver_session bash -c "cd $CURRENT_PATH; \ + export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$DLL_PATH; \ + python $EXE_PATH $receiver_output" & + screen -dmS sender_session bash -c "cd $CURRENT_PATH; \ + export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$DLL_PATH; \ + python $EXE_PATH $sender_output" & + printf "Running model: %s, video: %s, trace: %s\n" "$model_name" "$base_name" "$trace_name" + # Wait for both screen sessions to finish + while screen -list | grep -q "sender_session" || screen -list | grep -q "receiver_session"; do + sleep 1 + done + printf "receive-send finished\n" + # Terminate the network configuration process + + if kill -9 $network_pid; then + printf "network process killed finished\n" + else + printf "failed to kill network process with PID %s\n" "$network_pid" + fi + # wait + + # Copy logs to raw logs directory + printf "Copying logs to raw logs directory\n" + raw_log_dir="$RAW_LOGS_DIR/$base_name/$trace_name/$model_name" + mkdir -p "$raw_log_dir" + cp "$receiver_logging" "$raw_log_dir" + cp "$sender_logging" "$raw_log_dir" + printf "Logs copied\n" + fi + done + fi + done + fi +done +sudo umount media +if mountpoint -q media; then + printf "Failed to unmount tmpfs\n" +else + printf "Successfully unmounted tmpfs\n" +fi +printf "Transmission finished\n" \ No newline at end of file diff --git a/code/setconfig/set_media_config.py b/code/setconfig/set_media_config.py new file mode 100644 index 0000000..4e77cdc --- /dev/null +++ b/code/setconfig/set_media_config.py @@ -0,0 +1,203 @@ +import json +import argparse +import os + +AUTOCLOSE = 30 +LISTENING_IP = "0.0.0.0" +LISTENING_PORT = 8000 +BWE_FEEDBACK_DURATION = 200 +BASE_INPUT_DIR = "./data/workdir" +BASE_OUTPUT_DIR = "./data/workdir" +INPUT_VIDEO_FILE = os.path.join(BASE_INPUT_DIR, "test.yuv") +INPUT_AUDIO_FILE = os.path.join(BASE_INPUT_DIR, "test.wav") +OUTPUT_VIDEO_FILE = os.path.join(BASE_OUTPUT_DIR, "outvideo.yuv") +OUTPUT_AUDIO_FILE = os.path.join(BASE_OUTPUT_DIR, "outaudio.wav") +RECEIVER_LOGGING = os.path.join(BASE_OUTPUT_DIR, "receiver.log") +SENDER_LOGGING = os.path.join(BASE_OUTPUT_DIR, "sender.log") +VIDEO_HEIGHT = 1080 +VIDEO_WIDTH = 1920 +VIDEO_FPS = 30 + +class MediaConfig: + def __init__(self, + autoclose=AUTOCLOSE, + listening_ip=LISTENING_IP, + listening_port=LISTENING_PORT, + bwe_feedback_duration=BWE_FEEDBACK_DURATION, + video_file=INPUT_VIDEO_FILE, + audio_file=INPUT_AUDIO_FILE, + save_video=OUTPUT_VIDEO_FILE, + save_audio=OUTPUT_AUDIO_FILE, + receiver_logging=RECEIVER_LOGGING, + sender_logging=SENDER_LOGGING, + video_height=VIDEO_HEIGHT, + video_width=VIDEO_WIDTH, + video_fps=VIDEO_FPS, + if_save_media=True + ): + self.autoclose = autoclose + self.listening_ip = listening_ip + self.listening_port = listening_port + self.bwe_feedback_duration = bwe_feedback_duration + self.video_file = video_file + self.audio_file = audio_file + self.save_video = save_video + self.save_audio = save_audio + self.receiver_logging = receiver_logging + self.sender_logging = sender_logging + self.video_height = video_height + self.video_width = video_width + self.video_fps = video_fps + self.if_save_media = if_save_media + + def generate_receiver_config(self): + config = { + "serverless_connection": { + "autoclose": self.autoclose, + "sender": { + "enabled": False + }, + "receiver": { + "enabled": True, + "listening_ip": self.listening_ip, + "listening_port": self.listening_port + } + }, + "bwe_feedback_duration": self.bwe_feedback_duration, + "video_source": { + "video_disabled": { + "enabled": True + }, + "webcam": { + "enabled": False + }, + "video_file": { + "enabled": False, + "height": self.video_height, + "width": self.video_width, + "fps": self.video_fps, + "file_path": self.video_file + } + }, + "audio_source": { + "microphone": { + "enabled": False + }, + "audio_file": { + "enabled": True, + "file_path": self.audio_file + } + }, + "save_to_file": { + "enabled": self.if_save_media, + "audio": { + "file_path": self.save_audio + }, + "video": { + "width": self.video_width, + "height": self.video_height, + "fps": self.video_fps, + "file_path": self.save_video + } + }, + "logging": { + "enabled": True, + "log_output_path": self.receiver_logging + } + } + return config + + def generate_sender_config(self): + config = { + "serverless_connection": { + "autoclose": self.autoclose, + "sender": { + "enabled": True, + "dest_ip": self.listening_ip, + "dest_port": self.listening_port + }, + "receiver": { + "enabled": False + } + }, + "bwe_feedback_duration": self.bwe_feedback_duration, + "video_source": { + "video_disabled": { + "enabled": False + }, + "webcam": { + "enabled": False + }, + "video_file": { + "enabled": True, + "height": self.video_height, + "width": self.video_width, + "fps": self.video_fps, + "file_path": self.video_file + } + }, + "audio_source": { + "microphone": { + "enabled": False + }, + "audio_file": { + "enabled": True, + "file_path": self.audio_file + } + }, + "save_to_file": { + "enabled": False + }, + "logging": { + "enabled": True, + "log_output_path": self.sender_logging + } + } + return config + + def save_config(self, config, path): + with open(path, 'w') as f: + json.dump(config, f, indent=4) + +def main(): + parser = argparse.ArgumentParser(description="Generate media configuration files.") + parser.add_argument('--autoclose', type=int, default=AUTOCLOSE, help='Autoclose duration') + parser.add_argument('--listening_ip', type=str, default=LISTENING_IP, help='Listening IP address') + parser.add_argument('--listening_port', type=int, default=LISTENING_PORT, help='Listening port') + parser.add_argument('--bwe_feedback_duration', type=int, default=BWE_FEEDBACK_DURATION, help='BWE feedback duration') + parser.add_argument('--video_file', type=str, default=INPUT_VIDEO_FILE, help='Path to the video file') + parser.add_argument('--audio_file', type=str, default=INPUT_AUDIO_FILE, help='Path to the audio file') + parser.add_argument('--save_video', type=str, default=OUTPUT_VIDEO_FILE, help='Path to save the video file') + parser.add_argument('--save_audio', type=str, default=OUTPUT_AUDIO_FILE, help='Path to save the audio file') + parser.add_argument('--receiver_logging', type=str, default=RECEIVER_LOGGING, help='Path to save the receiver log file') + parser.add_argument('--sender_logging', type=str, default=SENDER_LOGGING, help='Path to save the sender log file') + parser.add_argument('--receiver_output', type=str, default=os.path.join(BASE_OUTPUT_DIR, "receiver_config.json"), help='Path to save the receiver config file') + parser.add_argument('--sender_output', type=str, default=os.path.join(BASE_OUTPUT_DIR, "sender_config.json"), help='Path to save the sender config file') + parser.add_argument('--video_height', type=int, default=VIDEO_HEIGHT, help='Video height') + parser.add_argument('--video_width', type=int, default=VIDEO_WIDTH, help='Video width') + parser.add_argument('--video_fps', type=int, default=VIDEO_FPS, help='Video frames per second') + parser.add_argument('--if_save_media', type=bool, default=True, help='Flag to save media') + + args = parser.parse_args() + media_config = MediaConfig( + autoclose=args.autoclose, + listening_ip=args.listening_ip, + listening_port=args.listening_port, + bwe_feedback_duration=args.bwe_feedback_duration, + video_file=args.video_file, + audio_file=args.audio_file, + save_video=args.save_video, + save_audio=args.save_audio, + receiver_logging=args.receiver_logging, + sender_logging=args.sender_logging, + video_height=args.video_height, + video_width=args.video_width, + video_fps=args.video_fps, + if_save_media=args.if_save_media + ) + receiver_config = media_config.generate_receiver_config() + media_config.save_config(receiver_config, args.receiver_output) + sender_config = media_config.generate_sender_config() + media_config.save_config(sender_config, args.sender_output) +if __name__ == "__main__": + main() diff --git a/code/setconfig/set_network_trace.py b/code/setconfig/set_network_trace.py new file mode 100644 index 0000000..7e8999b --- /dev/null +++ b/code/setconfig/set_network_trace.py @@ -0,0 +1,61 @@ +import json +import argparse + +class NetworkTraceConfig: + def __init__(self, trace_type, downlink, uplink): + self.config = { + "type": trace_type, + "downlink": downlink, + "uplink": uplink + } + + def add_uplink_trace_pattern(self, duration, capacity, loss, rtt, jitter): + pattern = { + "duration": duration, + "capacity": capacity, + "loss": loss, + "rtt": rtt, + "jitter": jitter + } + self.config["uplink"]["trace_pattern"].append(pattern) + + def add_downlink_trace_pattern(self, duration, capacity, loss, rtt, jitter): + pattern = { + "duration": duration, + "capacity": capacity, + "loss": loss, + "rtt": rtt, + "jitter": jitter + } + self.config["downlink"]["trace_pattern"].append(pattern) + + def save_to_file(self, file_path): + with open(file_path, 'w') as file: + json.dump(self.config, file, indent=4) + +def parse_args(): + parser = argparse.ArgumentParser(description='Generate network trace config JSON file.') + parser.add_argument('--type', required=True, help='Type of the network trace') + parser.add_argument('--downlink', default='{"trace_pattern": []}', type=json.loads, help='Downlink configuration in JSON format') + parser.add_argument('--uplink', default='{"trace_pattern": []}', type=json.loads, help='Uplink configuration in JSON format') + parser.add_argument('--output', required=True, help='Output file path') + parser.add_argument('--patterns', nargs='+', help='Trace patterns in the format duration,capacity,loss,rtt,jitter') + + return parser.parse_args() + +def main(): + args = parse_args() + config = NetworkTraceConfig(args.type, args.downlink, args.uplink) + + if args.patterns: + for pattern in args.patterns: + for p in pattern.split(): + duration, capacity, loss, rtt, jitter = map(int, p.split(',')) + config.add_uplink_trace_pattern(duration, capacity, loss, rtt, jitter) + + config.save_to_file(args.output) + +if __name__ == '__main__': + main() + + \ No newline at end of file diff --git a/code/transmit/models/1Mbps/BandwidthEstimator.py b/code/transmit/models/1Mbps/BandwidthEstimator.py new file mode 100644 index 0000000..9b30c97 --- /dev/null +++ b/code/transmit/models/1Mbps/BandwidthEstimator.py @@ -0,0 +1,20 @@ + +class Estimator(object): + def report_states(self, stats: dict): + ''' + stats is a dict with the following items + { + "send_time_ms": uint, + "arrival_time_ms": uint, + "payload_type": int, + "sequence_number": uint, + "ssrc": int, + "padding_length": uint, + "header_length": uint, + "payload_size": uint + } + ''' + pass + + def get_estimated_bandwidth(self)->int: + return int(1e6) # 1Mbps diff --git a/code/transmit/runtime/pyinfer/cmdinfer/BandwidthEstimator.py b/code/transmit/runtime/pyinfer/cmdinfer/BandwidthEstimator.py new file mode 100644 index 0000000..9b30c97 --- /dev/null +++ b/code/transmit/runtime/pyinfer/cmdinfer/BandwidthEstimator.py @@ -0,0 +1,20 @@ + +class Estimator(object): + def report_states(self, stats: dict): + ''' + stats is a dict with the following items + { + "send_time_ms": uint, + "arrival_time_ms": uint, + "payload_type": int, + "sequence_number": uint, + "ssrc": int, + "padding_length": uint, + "header_length": uint, + "payload_size": uint + } + ''' + pass + + def get_estimated_bandwidth(self)->int: + return int(1e6) # 1Mbps diff --git a/code/transmit/runtime/pyinfer/cmdinfer/cmdinfer.cc b/code/transmit/runtime/pyinfer/cmdinfer/cmdinfer.cc new file mode 100644 index 0000000..1c7192f --- /dev/null +++ b/code/transmit/runtime/pyinfer/cmdinfer/cmdinfer.cc @@ -0,0 +1,38 @@ +#include "cmdinfer.h" + +#include "modules/third_party/statcollect/json.hpp" + +#include + + +const char * RequestBandwidthCommand = "RequestBandwidth"; + +void cmdinfer::ReportStates( + std::uint64_t sendTimeMs, + std::uint64_t receiveTimeMs, + std::size_t payloadSize, + std::uint8_t payloadType, + std::uint16_t sequenceNumber, + std::uint32_t ssrc, + std::size_t paddingLength, + std::size_t headerLength) { + + nlohmann::json j; + j["send_time_ms"] = sendTimeMs; + j["arrival_time_ms"] = receiveTimeMs; + j["payload_type"] = payloadType; + j["sequence_number"] = sequenceNumber; + j["ssrc"] = ssrc; + j["padding_length"] = paddingLength; + j["header_length"] = headerLength; + j["payload_size"] = payloadSize; + + std::cout << j.dump() << std::endl; +} + +float cmdinfer::GetEstimatedBandwidth() { + std::uint64_t bandwidth = 0; + std::cout << RequestBandwidthCommand << std::endl; + std::cin >> bandwidth; + return static_cast(bandwidth); +} diff --git a/code/transmit/runtime/pyinfer/cmdinfer/cmdinfer.h b/code/transmit/runtime/pyinfer/cmdinfer/cmdinfer.h new file mode 100644 index 0000000..0847fe4 --- /dev/null +++ b/code/transmit/runtime/pyinfer/cmdinfer/cmdinfer.h @@ -0,0 +1,21 @@ +#ifndef MODULES_THIRD_PARTY_CMDINFER_H_ +#define MODULES_THIRD_PARTY_CMDINFER_H_ + +#include +#include + +namespace cmdinfer { + void ReportStates( + std::uint64_t sendTimeMs, + std::uint64_t receiveTimeMs, + std::size_t payloadSize, + std::uint8_t payloadType, + std::uint16_t sequenceNumber, + std::uint32_t ssrc, + std::size_t paddingLength, + std::size_t headerLength); + + float GetEstimatedBandwidth(); +} + +#endif diff --git a/code/transmit/runtime/pyinfer/cmdinfer/cmdinfer.py b/code/transmit/runtime/pyinfer/cmdinfer/cmdinfer.py new file mode 100755 index 0000000..e699e92 --- /dev/null +++ b/code/transmit/runtime/pyinfer/cmdinfer/cmdinfer.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import sys +import json +import glob + + +RequestBandwidthCommand = "RequestBandwidth" + +def fetch_stats(line: str)->dict: + line = line.strip() + try: + stats = json.loads(line) + return stats + except json.decoder.JSONDecodeError: + return None + + +def request_estimated_bandwidth(line: str, count): + line = line.strip() + if RequestBandwidthCommand == line: + count += 1 + return True, count + return False, count + + +def find_estimator_class(): + import BandwidthEstimator as BandwidthEstimator + return BandwidthEstimator.Estimator + + +def main(ifd = sys.stdin, ofd = sys.stdout): + estimator_class = find_estimator_class() + estimator = estimator_class() + count = 0 + while True: + line = ifd.readline() + if not line: + break + if isinstance(line, bytes): + line = line.decode("utf-8") + stats = fetch_stats(line) + if stats: + estimator.report_states(stats) + continue + request, count = request_estimated_bandwidth(line, count) + if request: + sys.stdout.write(f"-------------------------RequestBandwidth Count:{count}-----------------") + bandwidth = estimator.get_estimated_bandwidth() + ofd.write("{}\n".format(int(bandwidth)).encode("utf-8")) + ofd.flush() + continue + sys.stdout.write(line) + sys.stdout.flush() + +if __name__ == '__main__': + main() diff --git a/code/transmit/runtime/pyinfer/cmdinfer/peerconnection_serverless b/code/transmit/runtime/pyinfer/cmdinfer/peerconnection_serverless new file mode 100755 index 0000000..bbe3851 --- /dev/null +++ b/code/transmit/runtime/pyinfer/cmdinfer/peerconnection_serverless @@ -0,0 +1,44 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import sys +import os +import subprocess +import traceback +import json + +sys.path.append(os.getcwd()) + +import cmdinfer + + +def main(): + app = subprocess.Popen( + ["/home/onl/onl-emu/code/transmit/runtime/exe/peerconnection_serverless_pyinfer"] + sys.argv[1:], + bufsize=1, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + try: + cmdinfer.main(app.stdout, app.stdin) + app.wait() + except: + app.terminate() + app.wait() + error_message = traceback.format_exc() + error_message = "\n{}".format(error_message) + sys.stderr.write(error_message) + if len(sys.argv[1:]) == 0: + return + config_file = sys.argv[1] + config_file = json.load(open(config_file, "r")) + if "logging" not in config_file: + return + if "enabled" not in config_file["logging"] or not config_file["logging"]["enabled"]: + return + with open(config_file["logging"]["log_output_path"], "a") as log_file: + log_file.write(error_message) + + +if __name__ == "__main__": + main() diff --git a/code/transmit/tc_setup.py b/code/transmit/tc_setup.py new file mode 100644 index 0000000..a23bc44 --- /dev/null +++ b/code/transmit/tc_setup.py @@ -0,0 +1,72 @@ +import json +import subprocess +import time +from datetime import datetime +import argparse + +INTERFACE = "lo" # Apply traffic control on the loopback interface + +def current_time(): + """Returns the current time as a formatted string.""" + return datetime.now().strftime("%Y-%m-%d %H:%M:%S") + +def apply_network_config(config): + duration_ms = config.get("duration", 60000) # Duration in ms, defaulting to 60000 ms (60 seconds) + capacity = config.get("capacity", 1000) / 1000 # Convert to Mbit + loss = config.get("loss", 0) + rtt = config.get("rtt", 0) / 2 + jitter = config.get("jitter", 0) + + print(f"{current_time()} - Applying: capacity={capacity}Mbit, loss={loss}%, rtt={rtt}ms, jitter={jitter}ms for {duration_ms}ms") + + # Delete any existing configuration on the loopback interface, ignoring errors if none exists + del_command = f"sudo tc qdisc del dev {INTERFACE} root" + print(f"{current_time()} - Executing: {del_command}") + subprocess.run(del_command, shell=True, stderr=subprocess.DEVNULL) + + # Apply a netem qdisc directly with rate limiting, delay, and loss on the loopback interface + netem_command = f"sudo tc qdisc add dev {INTERFACE} root netem rate {capacity}Mbit" + + # Add delay and jitter if specified + if rtt > 0: + netem_command += f" delay {rtt}ms" + if jitter > 0: + netem_command += f" {jitter}ms distribution normal" + + # Add packet loss if specified + if loss > 0: + netem_command += f" loss {loss}%" + + print(f"{current_time()} - Executing: {netem_command}") + subprocess.run(netem_command, shell=True) + + # Maintain the configuration for the specified duration in milliseconds + time.sleep(duration_ms / 1000) + + # Clear the configuration after the duration, ignoring errors if there's no existing qdisc + print(f"{current_time()} - Executing: {del_command}") + subprocess.run(del_command, shell=True, stderr=subprocess.DEVNULL) + +def main(): + # Parse command line arguments + parser = argparse.ArgumentParser(description="Apply network configuration from a JSON file.") + parser.add_argument("--config", type=str, required=True, help="Path to the JSON configuration file") + args = parser.parse_args() + + # Path to the configuration file + CONFIG_FILE = args.config + # Load JSON configuration + with open(CONFIG_FILE, 'r') as f: + config_data = json.load(f) + + # Get the trace_pattern list + trace_patterns = config_data.get("uplink", {}).get("trace_pattern", []) + + # Loop through the configuration patterns + while True: + for config in trace_patterns: + apply_network_config(config) + +if __name__ == "__main__": + main() +