Add unit tests. Add new libraries. Add test scripts in TypeScript to generate podcasts. Add creating temporary directories and files. Add temporary logic of podcast generation to interface.

This commit is contained in:
Jarosław Wieczorek 2020-12-21 19:08:28 +01:00
parent d4bafdf766
commit f18c4a8eb7
12 changed files with 2901 additions and 24 deletions

56
find-loudness.ts Normal file
View File

@ -0,0 +1,56 @@
import { createReadStream } from "fs";
export type SwapPoint = {
position_start: number;
duration: number;
loud: boolean;
label: string;
};
export default async function (
file: import("fs").PathLike,
threshold_at_point: number,
inertia_samples: number,
label: string
): Promise<SwapPoint[]> {
const stream = createReadStream(file);
let position = 0;
const results: SwapPoint[] = [];
let last_swap_position = 0;
let keep_loud_until = 0;
let was_loud_last_time = false;
return new Promise((resolve, reject) => {
stream.on("readable", () => {
let chunk: Buffer | null;
while ((chunk = stream.read()) !== null) {
for (let i = 0; i < chunk.byteLength; i++) {
position++;
const byte = chunk[i];
const volume = Math.abs(byte - 128);
if (position >= keep_loud_until) {
const is_loud: boolean = volume > threshold_at_point;
if (is_loud != was_loud_last_time) {
const swap_point = {
position_start: last_swap_position,
duration: position - last_swap_position,
loud: was_loud_last_time,
label,
};
results.push(swap_point);
last_swap_position = position;
was_loud_last_time = is_loud;
}
}
if (volume > threshold_at_point) {
keep_loud_until = position + inertia_samples;
}
}
}
});
stream.on("end", () => {
resolve(results);
});
stream.on("error", reject);
});
}

98
generate-demuxer.ts Normal file
View File

@ -0,0 +1,98 @@
const yargs = require('yargs');
const argv = yargs
.command('tmp_dir', 'Path to temporary dir', {
description: 'the year to check for',
alias: 'tmp',
type: 'string',
}
)
.argv;
//console.log("The current temporary dir is: ${argv.tmp_dir}");
import findLoudness, { SwapPoint } from "./find-loudness";
const graph_density = 8000;
const threshold_at_point = 1;
const inertia_s = 0.3;
const inertia_samples = inertia_s * graph_density;
const s = (n: number) => n / graph_density;
const minutes = (units: number) => Math.floor(s(units) / 60);
const hours = (units: number) => Math.floor(units / graph_density / 60 / 60);
const formatTime = (units: number) =>
`${hours(units)}:${minutes(units)}:${Math.floor(s(units) % 60)}`;
type Mode = { left: boolean; right: boolean };
async function run() {
const [left_breaks, right_breaks] = await Promise.all([
findLoudness(argv.tmp_dir + "/leftraw",
threshold_at_point,
inertia_samples,
"left"
),
findLoudness(
argv.tmp_dir + "/rightraw",
threshold_at_point,
inertia_samples,
"right"
),
]);
const merged = [...left_breaks, ...right_breaks].sort((a, b) =>
a.position_start < b.position_start
? -1
: a.position_start > b.position_start
? 1
: 0
);
// console.log("left breaks:", left_breaks);
// console.log(`right_breaks`, right_breaks);
// console.log(`merged`, merged);
function new_mode(m: Mode, s: SwapPoint): Mode {
return { ...m, [s.label]: s.loud };
}
function mode_to_string(mode: Mode) {
if (mode.left && mode.right) {
return "both";
}
for (const side of ["left", "right"]) {
if (mode[side as keyof Mode]) {
return side;
}
}
return "none";
}
console.log("file", `${argv.tmp_dir}/pics/none.png`);
let last_point = 0;
let mode: Mode = { left: false, right: false };
let last_file;
let total = 0;
for (let i = 2; i < merged.length; i++) {
const point = merged[i];
mode = new_mode(mode, point);
const file = `${argv.tmp_dir}/pics/${mode_to_string(mode)}.png`;
const duration = (point.position_start - last_point) / graph_density;
console.log(
"duration",
(point.position_start - last_point) / graph_density
);
console.log("file", file);
last_point = point.position_start;
last_file = file;
total += duration * graph_density;
}
console.log("duration", merged[merged.length - 1].duration / graph_density);
console.log("file", last_file);
console.error(total, formatTime(total));
}
run();

43
generate.sh Normal file
View File

@ -0,0 +1,43 @@
#!/bin/bash
tmp_dir=$1
audio_left=$2
audio_right=$3
both_image=$4
none_image=$5
left_image=$6
right_image=$7
aresample=$8
echo "tmp_dir: $1";
echo "audio_left: $2";
echo "audio_right: $3";
echo "both_image: $4";
echo "none_image: $5";
echo "left_image: $6";
echo "right_image: $7";
echo "aresample: $8";
echo "dzielimy mp3 na dwa osobne wav"
ffmpeg -i $audio_left -map_channel 0.0.0 $tmp_dir/left.wav -map_channel 0.0.1 $tmp_dir/right.wav
echo "na dwóch wątkach generujemy surowe pliki"
ffmpeg -i $tmp_dir/left.wav -ac 1 -filter:a aresample=$aresample -map 0:a -c:a pcm_u8 -f data - > $tmp_dir/leftraw &
ffmpeg -i $tmp_dir/right.wav -ac 1 -filter:a aresample=$aresample -map 0:a -c:a pcm_u8 -f data - > $tmp_dir/rightraw &
# czekamy aż obydwa wątki się zakończą
wait;
echo "generating the demuxers...";
# generuje ścieżki do złożenia przez ffmpega:
mkdir -p ./out
ts-node generate-demuxer.ts --tmp_dir=$tmp_dir > ./out/demuxer.txt
echo "generowanie całości"
ffmpeg -y -f concat -safe 0 -i ./out/demuxer.txt -r 30 -tune stillimage -vsync vfr -pix_fmt yuv420p ./out/video.mp4
# ^ daję safe 0 aby przyjmowało bezwzględne ścieżki
echo "łączenie video z dźwiękiem:"
ffmpeg -i ./out/video.mp4 -i $audio_left -ac 1 -tune stillimage ./out/video-and-audio.mp4

2528
package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

18
package.json Normal file
View File

@ -0,0 +1,18 @@
{
"name": "vizualizer",
"version": "1.0.0",
"description": "ffmpeg -i odcinek1-kuba.wav -ac 1 -filter:a aresample=8000 -map 0:a -c:a pcm_s16le -f data - | hexdump",
"main": "index.js",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1"
},
"author": "",
"license": "ISC",
"devDependencies": {
"@types/node": "^14.0.1"
},
"dependencies": {
"editly": "^0.3.0",
"yargs": "^16.2.0"
}
}

View File

@ -6,8 +6,8 @@
<rect>
<x>0</x>
<y>0</y>
<width>496</width>
<height>711</height>
<width>554</width>
<height>745</height>
</rect>
</property>
<property name="windowTitle">
@ -143,6 +143,13 @@
</item>
</layout>
</item>
<item>
<widget class="QCheckBox" name="check_box_connected_channels">
<property name="text">
<string>Zaznacz jeśli kanały rozmówców są połączone w jednym nagraniu</string>
</property>
</widget>
</item>
<item>
<widget class="Line" name="h_line">
<property name="orientation">
@ -575,13 +582,14 @@
<zorder>h_slider</zorder>
<zorder>h_line_3</zorder>
<zorder>v_spacer_1</zorder>
<zorder>check_box_connected_channels</zorder>
</widget>
<widget class="QMenuBar" name="menu_bar">
<property name="geometry">
<rect>
<x>0</x>
<y>0</y>
<width>496</width>
<width>554</width>
<height>32</height>
</rect>
</property>

View File

@ -1,10 +1,12 @@
from PyQt5.QtGui import QPixmap
from PyQt5.QtWidgets import QMainWindow, QLabel,\
QFileDialog, QDialog, QDialogButtonBox, QVBoxLayout
QFileDialog, QDialog, QDialogButtonBox, QVBoxLayout, QCheckBox
from PyQt5.QtCore import Qt
from src.python.ui.mainwindow_ui import Ui_MainWindow
import tempfile
import subprocess
import os
import shutil
class MainWindow(QMainWindow, Ui_MainWindow):
@ -14,11 +16,15 @@ class MainWindow(QMainWindow, Ui_MainWindow):
self.setup_logic()
self.setup_detail()
self.aresample = 8000
self.test_data()
def setup_logic(self):
self.button_audio_1.clicked.connect(lambda: self.open_audio_import(audio_number=1))
self.button_audio_2.clicked.connect(lambda: self.open_audio_import(audio_number=2))
self.check_box_connected_channels.stateChanged.connect(self.channels_connected)
self.button_select_avatar_1.clicked.connect(lambda: self.open_image_import(image_number=1))
self.button_select_avatar_2.clicked.connect(lambda: self.open_image_import(image_number=2))
self.button_select_avatar_3.clicked.connect(lambda: self.open_image_import(image_number=3))
@ -26,6 +32,8 @@ class MainWindow(QMainWindow, Ui_MainWindow):
self.button_generate.clicked.connect(self.generate_video_podcast)
self.check_box_connected_channels.setChecked(True)
def setup_detail(self):
self.line_edit_name_1.setDisabled(True)
self.line_edit_name_2.setDisabled(True)
@ -35,6 +43,7 @@ class MainWindow(QMainWindow, Ui_MainWindow):
dialog.setWindowTitle(title)
label = QLabel()
label.setText(msg)
label.setObjectName('message_box')
buttons = QDialogButtonBox.Ok
button_box = QDialogButtonBox(buttons)
button_box.accepted.connect(dialog.accept)
@ -44,6 +53,13 @@ class MainWindow(QMainWindow, Ui_MainWindow):
dialog.setLayout(layout)
return dialog
def test_data(self):
self.line_edit_audio_1.setText('./src/sounds/oba_kanały.mp3')
self.preview_label_avatar_1.setProperty('path', './src/gui/images/both.png')
self.preview_label_avatar_2.setProperty('path', './src/gui/images/none.png')
self.preview_label_avatar_3.setProperty('path', './src/gui/images/left.png')
self.preview_label_avatar_4.setProperty('path', './src/gui/images/right.png')
def open_image_import(self, image_number: int):
dialog = QFileDialog()
dialog.setOption(dialog.DontUseNativeDialog, True)
@ -61,26 +77,29 @@ class MainWindow(QMainWindow, Ui_MainWindow):
dialog.exec_()
else:
pixmap = QPixmap(file)
scaled_pixmap = pixmap.scaled(pixmap.size(), Qt.KeepAspectRatio)
if image_number == 1:
self.preview_label_avatar_1.setProperty('path', file)
self.preview_label_avatar_1.setPixmap(scaled_pixmap)
if file:
pixmap = QPixmap(file)
elif image_number == 2:
self.preview_label_avatar_2.setProperty('path', file)
self.preview_label_avatar_2.setPixmap(scaled_pixmap)
scaled_pixmap = pixmap.scaled(pixmap.size(), Qt.KeepAspectRatio)
elif image_number == 3:
self.preview_label_avatar_3.setProperty('path', file)
self.preview_label_avatar_3.setPixmap(scaled_pixmap)
if image_number == 1:
self.preview_label_avatar_1.setProperty('path', file)
self.preview_label_avatar_1.setPixmap(scaled_pixmap)
elif image_number == 4:
self.preview_label_avatar_4.setProperty('path', file)
self.preview_label_avatar_4.setPixmap(scaled_pixmap)
elif image_number == 2:
self.preview_label_avatar_2.setProperty('path', file)
self.preview_label_avatar_2.setPixmap(scaled_pixmap)
print(F"[*] Zaimportowano garfikę {image_number} z pliku: '{file}'.")
print(F"[*] Rodzielczość grafiki: {pixmap.width()}x{pixmap.height()}")
elif image_number == 3:
self.preview_label_avatar_3.setProperty('path', file)
self.preview_label_avatar_3.setPixmap(scaled_pixmap)
elif image_number == 4:
self.preview_label_avatar_4.setProperty('path', file)
self.preview_label_avatar_4.setPixmap(scaled_pixmap)
print(F"[*] Zaimportowano garfikę {image_number} z pliku: '{file}'.")
print(F"[*] Rodzielczość grafiki: {pixmap.width()}x{pixmap.height()}")
def open_audio_import(self, audio_number: int):
dialog = QFileDialog()
@ -107,12 +126,52 @@ class MainWindow(QMainWindow, Ui_MainWindow):
print(F"[*] Zaimportowano ścieżkę {audio_number} pliku: '{file}'.")
def channels_connected(self, checked):
print(checked)
if checked:
self.button_audio_2.setDisabled(True)
self.line_edit_audio_2.setDisabled(True)
self.line_edit_audio_1.setPlaceholderText("Plik audio rozmówców")
else:
self.button_audio_2.setDisabled(False)
self.line_edit_audio_2.setDisabled(False)
self.line_edit_audio_1.setPlaceholderText("Plik audio 1 rozmówcy")
def generate_video_podcast(self):
# TODO: Change to pure python
"""Generate podcast based on values from UI."""
audio_1 = self.line_edit_audio_1.text()
audio_2 = self.line_edit_audio_2.text()
both_image = self.preview_label_avatar_1.property('path')
none_image = self.preview_label_avatar_2.property('path')
left_image = self.preview_label_avatar_3.property('path')
right_image = self.preview_label_avatar_4.property('path')
with tempfile.TemporaryDirectory() as tmp_dir_name:
print('created temporary directory', tmp_dir_name)
print('[*] Created temporary directory', tmp_dir_name)
pics_dir = tmp_dir_name + "/pics/"
print(f'[*] Created pics directory in {tmp_dir_name}')
os.mkdir(pics_dir)
print(f'[*] Copy images to {pics_dir}')
shutil.copy(both_image, pics_dir + "both.png")
shutil.copy(none_image, pics_dir + "none.png")
shutil.copy(left_image, pics_dir + "left.png")
shutil.copy(right_image, pics_dir + "right.png")
print(f'[*] Images in {pics_dir}: {os.listdir(pics_dir)}')
echo_temp_dir_name = subprocess.check_output(["echo", tmp_dir_name]).decode('utf-8')
ech = subprocess.check_output(["bash", "./generate.sh",
tmp_dir_name,
audio_1, audio_2,
both_image, none_image,
left_image, right_image,
str(self.aresample)]).decode('utf-8')
print(echo_temp_dir_name)
print(ech)
pass

View File

@ -14,7 +14,7 @@ from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(496, 711)
MainWindow.resize(554, 745)
self.central_widget = QtWidgets.QWidget(MainWindow)
self.central_widget.setMinimumSize(QtCore.QSize(0, 0))
self.central_widget.setObjectName("central_widget")
@ -74,6 +74,9 @@ class Ui_MainWindow(object):
self.button_audio_2.setObjectName("button_audio_2")
self.h_layout_select_audio_2.addWidget(self.button_audio_2)
self.verticalLayout.addLayout(self.h_layout_select_audio_2)
self.check_box_connected_channels = QtWidgets.QCheckBox(self.central_widget)
self.check_box_connected_channels.setObjectName("check_box_connected_channels")
self.verticalLayout.addWidget(self.check_box_connected_channels)
self.h_line = QtWidgets.QFrame(self.central_widget)
self.h_line.setFrameShape(QtWidgets.QFrame.HLine)
self.h_line.setFrameShadow(QtWidgets.QFrame.Sunken)
@ -246,9 +249,10 @@ class Ui_MainWindow(object):
self.h_line_2.raise_()
self.h_slider.raise_()
self.h_line_3.raise_()
self.check_box_connected_channels.raise_()
MainWindow.setCentralWidget(self.central_widget)
self.menu_bar = QtWidgets.QMenuBar(MainWindow)
self.menu_bar.setGeometry(QtCore.QRect(0, 0, 496, 32))
self.menu_bar.setGeometry(QtCore.QRect(0, 0, 554, 32))
self.menu_bar.setObjectName("menu_bar")
self.menu_project = QtWidgets.QMenu(self.menu_bar)
self.menu_project.setObjectName("menu_project")
@ -294,6 +298,7 @@ class Ui_MainWindow(object):
self.line_edit_audio_2.setPlaceholderText(_translate("MainWindow", "Plik audio 2 rozmówcy"))
self.line_edit_name_2.setPlaceholderText(_translate("MainWindow", "Imię 2 rozmówcy"))
self.button_audio_2.setText(_translate("MainWindow", "Dodaj audio 2"))
self.check_box_connected_channels.setText(_translate("MainWindow", "Zaznacz jeśli kanały rozmówców są połączone w jednym nagraniu"))
self.preview_label_avatar_1.setText(_translate("MainWindow", "Podgląd - rozmówcy aktywni"))
self.button_select_avatar_1.setText(_translate("MainWindow", "Rozmówcy - aktywni"))
self.preview_label_avatar_2.setText(_translate("MainWindow", "Podgląd - rozmówcy nieaktywni"))

View File

@ -1,8 +1,12 @@
from src.python.ui.mainwindow_ui import Ui_MainWindow
from src.python.classes.mainwindow import MainWindow
import unittest
from PyQt5.QtWidgets import QApplication, QDialog, QLabel
import sys
class Test_Ui_MainWindow(unittest.TestCase):
app = QApplication(sys.argv)
def setUp(self) -> None:
pass
@ -10,10 +14,19 @@ class Test_Ui_MainWindow(unittest.TestCase):
def tearDown(self) -> None:
pass
def test_check_create__mainwindow(self):
def test_create_mainwindow(self):
ui_mainwindow = Ui_MainWindow()
self.assertIsInstance(ui_mainwindow, Ui_MainWindow)
def test_create_dialog(self):
mainwindow = MainWindow()
title = "test create dialog"
msg = "test msg"
dialog: QDialog = mainwindow.create_custom_dialog(title=title, msg=msg)
self.assertIsInstance(dialog, QDialog)
self.assertEqual(dialog.windowTitle(), title)
self.assertEqual(dialog.findChild(QLabel, name='message_box').text(), msg)
if __name__ == '__main__':
unittest.main()

View File

@ -0,0 +1,9 @@
from unittest.mock import Mock
class TestMockMainwindown(Mock):
def __init__(self, module_name=''):
self.module_name = 'Test'
def test_nothing(self):
print(self.module_name)

14
tsconfig.json Normal file
View File

@ -0,0 +1,14 @@
{
"compilerOptions": {
"moduleResolution": "node",
"noImplicitAny": true,
"noImplicitThis": true,
"sourceMap": true,
"module": "commonjs",
"target": "es6",
"allowJs": false,
"strictNullChecks": true,
"esModuleInterop": true,
"outDir": "build"
}
}

26
window.ts Normal file
View File

@ -0,0 +1,26 @@
export default class Window {
length: number;
buffer: number[] = [];
constructor(length: number) {
this.length = length;
}
push(element: number) {
if (this.buffer.length == this.length) {
this.buffer.shift();
}
this.buffer.push(element);
}
sum() {
return this.buffer.reduce((a, b) => a + b, 0);
}
clear() {
this.buffer = [];
}
isFull() {
return this.buffer.length == this.length;
}
}