Removing the TypeScript from the podcast generation process. Pylama fixes. Add icons.

This commit is contained in:
Jarosław Wieczorek 2021-01-10 11:09:45 +01:00
parent 3ec5ec3e14
commit 2e5202e33d
20 changed files with 304 additions and 25 deletions

View File

@ -10,6 +10,7 @@ pipfile-requirements = "*"
pylama = "*" pylama = "*"
ffmpeg = "*" ffmpeg = "*"
pip = "*" pip = "*"
ffmpeg-python = "*"
[dev-packages] [dev-packages]

17
Pipfile.lock generated
View File

@ -1,7 +1,7 @@
{ {
"_meta": { "_meta": {
"hash": { "hash": {
"sha256": "a59428c79a266fc1237cdd4e74b0803bb5f39803495f1fc6c134df7333cdc2bc" "sha256": "6c12b28b456a859b8139dd5175876beeec58f47abcfbcb6d98dbfdc3a955320d"
}, },
"pipfile-spec": 6, "pipfile-spec": 6,
"requires": { "requires": {
@ -31,6 +31,14 @@
"index": "pypi", "index": "pypi",
"version": "==1.4" "version": "==1.4"
}, },
"ffmpeg-python": {
"hashes": [
"sha256:65225db34627c578ef0e11c8b1eb528bb35e024752f6f10b78c011f6f64c4127",
"sha256:ac441a0404e053f8b6a1113a77c0f452f1cfc62f6344a769475ffdc0f56c23c5"
],
"index": "pypi",
"version": "==0.2.0"
},
"flask": { "flask": {
"hashes": [ "hashes": [
"sha256:4efa1ae2d7c9865af48986de8aeb8504bf32c7f3d6fdc9353d34b21f4b127060", "sha256:4efa1ae2d7c9865af48986de8aeb8504bf32c7f3d6fdc9353d34b21f4b127060",
@ -39,6 +47,13 @@
"index": "pypi", "index": "pypi",
"version": "==1.1.2" "version": "==1.1.2"
}, },
"future": {
"hashes": [
"sha256:b1bead90b70cf6ec3f0710ae53a525360fa360d306a86583adc6bf83a4db537d"
],
"markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==0.18.2"
},
"itsdangerous": { "itsdangerous": {
"hashes": [ "hashes": [
"sha256:321b033d07f2a4136d3ec762eac9f16a10ccd60f53c0c91af90217ace7ba1f19", "sha256:321b033d07f2a4136d3ec762eac9f16a10ccd60f53c0c91af90217ace7ba1f19",

View File

@ -0,0 +1,5 @@
#!/bin/bash
tmp_dir=$1
audio=$2
ffmpeg -i $tmp_dir/video/video.mp4 -i $audio -ac 1 -tune stillimage ./out/video-and-audio.mp4 -y

View File

@ -0,0 +1,8 @@
#!/bin/bash
tmp_dir=$1
aresample=$2
ffmpeg -i $tmp_dir/left.wav -ac 1 -filter:a aresample=$aresample -map 0:a -c:a pcm_u8 -f data - > $tmp_dir/leftraw &
ffmpeg -i $tmp_dir/right.wav -ac 1 -filter:a aresample=$aresample -map 0:a -c:a pcm_u8 -f data - > $tmp_dir/rightraw &
wait;

View File

@ -0,0 +1,5 @@
#!/bin/bash
tmp_dir=$1
tmp_video_dir=$2
ffmpeg -y -f concat -safe 0 -i demuxer.txt -r 30 -tune stillimage -vsync vfr -pix_fmt yuv420p $tmp_video_dir/video.mp4

View File

@ -0,0 +1,5 @@
#!/bin/bash
tmp_dir=$1
both_channels=$2
ffmpeg -i $both_channels -map_channel 0.0.0 $tmp_dir/left.wav -map_channel 0.0.1 $tmp_dir/right.wav

33
demuxer.txt Normal file
View File

@ -0,0 +1,33 @@
file /tmp/tmpz9izgjti/pics/none.png
duration 0.286
file /tmp/tmpz9izgjti/pics/right.png
duration 3.47975
file /tmp/tmpz9izgjti/pics/none.png
duration 0.427375
file /tmp/tmpz9izgjti/pics/right.png
duration 0.300375
file /tmp/tmpz9izgjti/pics/none.png
duration 0.24225
file /tmp/tmpz9izgjti/pics/right.png
duration 1.837375
file /tmp/tmpz9izgjti/pics/both.png
duration 1.1325
file /tmp/tmpz9izgjti/pics/left.png
duration 0.199
file /tmp/tmpz9izgjti/pics/both.png
duration 3.920625
file /tmp/tmpz9izgjti/pics/right.png
duration 0.7935
file /tmp/tmpz9izgjti/pics/both.png
duration 2.446375
file /tmp/tmpz9izgjti/pics/right.png
duration 0.331875
file /tmp/tmpz9izgjti/pics/both.png
duration 0.58725
file /tmp/tmpz9izgjti/pics/right.png
duration 0.082
file /tmp/tmpz9izgjti/pics/both.png
duration 3.36025
file /tmp/tmpz9izgjti/pics/right.png
duration 0.1615
file /tmp/tmpz9izgjti/pics/right.png

52
find_loudness.py Normal file
View File

@ -0,0 +1,52 @@
def createReadStream(file):
"""Create Bytes Stream"""
data = open(file, 'rb')
return data
def process_find_loudness(file_path: str, threshold_at_point: int, inertia_samples: float, label: str):
print("Start process to find loudness in:")
print(
F"\tfile_path: {file_path}\n"
F"\tthreshold_at_point: {threshold_at_point}\n"
F"\tinertia_samples: {inertia_samples}\n"
F"\tlabel: {label}\n"
)
stream = createReadStream(file_path)
position = 0
results = []
last_swap_position = 0
keep_loud_until = -1
was_loud_last_time = False
print("Read chunks")
chunks = stream.read()
print(F"Length: {len(chunks)}")
try:
for i, byte in enumerate(chunks):
position += 1
volume = abs(byte - 128)
if position >= keep_loud_until:
is_loud = volume > threshold_at_point
if is_loud != was_loud_last_time:
swap_point = {
'position_start': last_swap_position,
'duration': position - last_swap_position,
'loud': was_loud_last_time,
'label': label
}
results.append(swap_point)
last_swap_position = position
was_loud_last_time = is_loud
if volume > threshold_at_point:
keep_loud_until = position + inertia_samples
return results
except Exception as err:
print(err)

91
generate_demuxer.py Normal file
View File

@ -0,0 +1,91 @@
import math
from find_loudness import process_find_loudness
graph_density = 8000
threshold_at_point = 7
inertia_s = 0.3
inertia_samples = inertia_s * graph_density
def s(n: int):
global graph_density
return n / graph_density
def seconds(units: int):
return math.floor(s(units) % 60)
def minutes(units: int):
return math.floor(s(units) / 60)
def hours(units: int):
return math.floor(s(units) / 60 / 60)
def formatTime(units: int):
return f"{hours(units)}:{minutes(units)}:{seconds(units)}"
def new_mode(m, s):
data = m
data[s['label']] = s['loud']
return data
def mode_to_string(mode):
if mode['left'] and mode['right']:
return 'both'
elif mode['left']:
return 'left'
elif mode['right']:
return 'right'
else:
return "none"
def run(tmp_dir):
global inertia_samples
out_demuxer = 'demuxer.txt'
with open(out_demuxer, 'w') as demuxer:
# Execute process_find_loudness for left and right side
left_loudness = process_find_loudness(
tmp_dir + "/audio/leftraw",
threshold_at_point=threshold_at_point,
inertia_samples=inertia_samples,
label="left")
right_loudness = process_find_loudness(
tmp_dir + "/audio/rightraw",
threshold_at_point=threshold_at_point,
inertia_samples=inertia_samples,
label="right")
merged = [*left_loudness, *right_loudness]
sorted_list = sorted(merged, key=lambda x: x['position_start'])
demuxer.write(F"file {tmp_dir}/pics/none.png\n")
last_point = 0
mode = {'left': False, 'right': False}
last_file = ''
total = 0
for i in range(2, len(sorted_list)):
point = sorted_list[i]
mode = new_mode(m=mode, s=point)
file = F"{tmp_dir}/pics/{mode_to_string(mode)}.png"
duration = (point['position_start'] - last_point) / graph_density
demuxer.write(F"duration {duration}\n")
demuxer.write(F"file {file}\n")
last_point = point['position_start']
last_file = file
total += duration * graph_density
demuxer.write(F"duration {sorted_list[len(sorted_list) - 1]['duration'] / graph_density}\n")
demuxer.write(F"file {last_file}\n")
print(F"{total} {formatTime(total)}\n")

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.7 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 840 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 950 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.2 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 780 B

View File

@ -2,14 +2,18 @@ import os
import shutil import shutil
import subprocess import subprocess
import tempfile import tempfile
import resources_rc
# import resources_rc
from PyQt5.QtCore import Qt from PyQt5.QtCore import Qt
from PyQt5.QtGui import QPixmap from PyQt5.QtGui import QPixmap
from PyQt5.QtWidgets import QMainWindow, QLabel, QFileDialog, QDialog, QDialogButtonBox, QVBoxLayout, QApplication from PyQt5.QtWidgets import QMainWindow, QLabel, QFileDialog, QDialog, QDialogButtonBox, QVBoxLayout, QApplication
from src.python.classes.settings_dialog import SettingsDialog from src.python.classes.settings_dialog import SettingsDialog
from src.python.ui.mainwindow_ui import Ui_MainWindow from src.python.ui.mainwindow_ui import Ui_MainWindow
from src.python.classes.translate import Translator from src.python.classes.translate import Translator
import generate_demuxer
class MainWindow(QMainWindow, QApplication, Ui_MainWindow): class MainWindow(QMainWindow, QApplication, Ui_MainWindow):
def __init__(self, parent=None): def __init__(self, parent=None):
@ -22,7 +26,7 @@ class MainWindow(QMainWindow, QApplication, Ui_MainWindow):
self.setup_logic() self.setup_logic()
self.setup_detail() self.setup_detail()
self.retranslateUi(self.window()) self.retranslateUi(self.window())
self.aresample = 8000 self.aresample = "8000"
self.test_data() self.test_data()
def setup_logic(self): def setup_logic(self):
@ -176,36 +180,96 @@ class MainWindow(QMainWindow, QApplication, Ui_MainWindow):
def generate_video_podcast(self): def generate_video_podcast(self):
# TODO: Change to pure python # TODO: Change to pure python
"""Generate podcast based on values from UI.""" """Generate podcast based on values from UI."""
audio_1 = self.line_edit_audio_1.text() connected_channels = self.check_box_connected_channels.isChecked()
audio_2 = self.line_edit_audio_2.text() # Setup images
image_files = {
'both': self.preview_label_avatar_1.property('path'),
'none': self.preview_label_avatar_2.property('path'),
'left': self.preview_label_avatar_3.property('path'),
'right': self.preview_label_avatar_4.property('path')
}
audio_files = []
both_image = self.preview_label_avatar_1.property('path') if not connected_channels:
none_image = self.preview_label_avatar_2.property('path') audio_files.append({'file': self.line_edit_audio_1.text()})
left_image = self.preview_label_avatar_3.property('path') audio_files.append({'file': self.line_edit_audio_2.text()})
right_image = self.preview_label_avatar_4.property('path')
else:
audio_files.append({'file': self.line_edit_audio_1.text()})
# Split name and extension of the file
for dictionary in audio_files:
dictionary['ext'] = dictionary['file'].rsplit('.')[-1]
with tempfile.TemporaryDirectory() as tmp_dir_name: with tempfile.TemporaryDirectory() as tmp_dir_name:
print('[*] Created temporary directory', tmp_dir_name) print(tmp_dir_name)
pics_dir = tmp_dir_name + "/pics/" print(f'[*] Create temporary directory: {tmp_dir_name}')
tmp_out_dir = tmp_dir_name + "/out"
pics_dir = tmp_dir_name + "/pics"
audio_dir = tmp_dir_name + "/audio"
video_dir = tmp_dir_name + "/video"
print(f'[*] Created pics directory in {tmp_dir_name}') print(F"[!] Create tmp out dir: {tmp_out_dir}")
os.mkdir(tmp_out_dir)
print(f'[*] Create pics dir: {pics_dir}')
os.mkdir(pics_dir) os.mkdir(pics_dir)
print(f'[*] Create audio dir: {audio_dir}')
os.mkdir(audio_dir)
print(f'[*] Create video dir: {video_dir}\n')
os.mkdir(video_dir)
print(f'[*] Copy images to {pics_dir}') print(f'[*] Copy images to {pics_dir}')
shutil.copy(both_image, pics_dir + "both.png") shutil.copy(image_files['both'], pics_dir + "/both.png")
shutil.copy(none_image, pics_dir + "none.png") shutil.copy(image_files['none'], pics_dir + "/none.png")
shutil.copy(left_image, pics_dir + "left.png") shutil.copy(image_files['left'], pics_dir + "/left.png")
shutil.copy(right_image, pics_dir + "right.png") shutil.copy(image_files['right'], pics_dir + "/right.png")
print(f'[*] Copy audio to {audio_dir}\n')
if not self.check_box_connected_channels.isChecked():
audio_files[0]['tmp'] = audio_dir + "/left_channel" + "." + audio_files[0]['ext']
audio_files[1]['tmp'] = audio_dir + "/right_channel" + "." + audio_files[1]['ext']
shutil.copy(audio_files[0]['file'], audio_files[0]['tmp'])
shutil.copy(audio_files[1]['file'], audio_files[1]['tmp'])
else:
audio_files[0]['tmp'] = audio_dir + "/both_channel" + "." + audio_files[0]['ext']
shutil.copy(audio_files[0]['file'], audio_files[0]['tmp'])
print(f'[*] Images in {pics_dir}: {os.listdir(pics_dir)}') print(f'[*] Images in {pics_dir}: {os.listdir(pics_dir)}')
echo_temp_dir_name = subprocess.check_output(["echo", tmp_dir_name]).decode('utf-8') print(f'[*] Audo files in {audio_dir}: {os.listdir(audio_dir)}')
ech = subprocess.check_output(["bash", "./generate.sh", subprocess.check_output(["echo", tmp_dir_name]).decode('utf-8')
tmp_dir_name,
audio_1, audio_2,
both_image, none_image,
left_image, right_image,
str(self.aresample)]).decode('utf-8')
print(echo_temp_dir_name) if connected_channels:
print(ech) # Split channels
print("[-] Split channels - start:")
subprocess.check_output(["bash",
"bash_commands/split_channels_to_two_ways.sh",
audio_dir,
audio_files[0]['tmp']])
print("[+] Split channels - done")
print("[-] Create raw files - start:")
subprocess.check_output(['bash',
'bash_commands/create_raw_files.sh',
audio_dir, self.aresample])
print("[+] Create raw files - done")
print("[-] Create demuxer - start:")
generate_demuxer.run(tmp_dir=tmp_dir_name)
print("[+] Create demuxer - done")
print('[-] Create video - start:')
subprocess.check_output(
['bash', 'bash_commands/generate_video_by_demuxer.sh', tmp_dir_name, video_dir])
print('[+] Create video - done')
# while True: pass
print('[-] Create final podcast - start:')
subprocess.check_output(
['bash', 'bash_commands/connect_sound.sh', tmp_dir_name, audio_files[0]['file']])
print('[+] Create final podcast - done')