alpha 0.0.1
This commit is contained in:
commit
55de001c95
22
generate.py
Normal file
22
generate.py
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
import numpy as np
|
||||||
|
from keras.layers import Input, Dense, Conv2D
|
||||||
|
from keras.models import Model
|
||||||
|
GENERATED_BEAT_PATH = 'data/output/generated_bar'
|
||||||
|
MODEL_PATH = 'data/autoencoder_model.h5'
|
||||||
|
SAMPLES_PATH = 'data/samples.npz'
|
||||||
|
|
||||||
|
input = Input(shape=(1,96,128))
|
||||||
|
encoded = Conv2D(filters = 32, kernel_size = 1)(input)
|
||||||
|
decoded = Conv2D(filters = 128, kernel_size = 1)(encoded)
|
||||||
|
autoencoder = Model(input, decoded)
|
||||||
|
|
||||||
|
# load weights into new model
|
||||||
|
autoencoder.load_weights(MODEL_PATH)
|
||||||
|
print("Loaded model from disk")
|
||||||
|
|
||||||
|
# generate_seed = np.random.rand(1,1,96,128)
|
||||||
|
|
||||||
|
generate_seed = np.load(SAMPLES_PATH)['arr_0'][0:]
|
||||||
|
|
||||||
|
generated_beat = autoencoder.predict(generate_seed)
|
||||||
|
np.savez_compressed(GENERATED_BEAT_PATH, generated_beat)
|
76
midi_to_samples.py
Normal file
76
midi_to_samples.py
Normal file
@ -0,0 +1,76 @@
|
|||||||
|
import settings
|
||||||
|
import pypianoroll as roll
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
import numpy as np
|
||||||
|
import os
|
||||||
|
from math import floor
|
||||||
|
|
||||||
|
MIDI_DIRECTORY = settings.midi_path
|
||||||
|
SAMPLES_DIRECTORY = settings.samples_path
|
||||||
|
MIDI_RESOLUTION = settings.midi_resolution
|
||||||
|
BEAT_PER_BATCH = settings.beats_per_sample
|
||||||
|
|
||||||
|
samples = np.empty((0,BEAT_PER_BATCH,96,128))
|
||||||
|
|
||||||
|
def erase_note_lenth(pianoroll):
|
||||||
|
if pianoroll.ndim != 2:
|
||||||
|
raise ValueError('pianoroll should be two dimentional')
|
||||||
|
now_block = []
|
||||||
|
for x in pianoroll:
|
||||||
|
this = None
|
||||||
|
prev = None
|
||||||
|
new_line =[]
|
||||||
|
for y in x:
|
||||||
|
this = y
|
||||||
|
if prev != None:
|
||||||
|
if this > 0 and prev > 0:
|
||||||
|
new_line.append(0)
|
||||||
|
else:
|
||||||
|
new_line.append(y)
|
||||||
|
else:
|
||||||
|
new_line.append(y)
|
||||||
|
prev = this
|
||||||
|
now_block.append(new_line)
|
||||||
|
return np.array(now_block)
|
||||||
|
|
||||||
|
print('Start convertion')
|
||||||
|
for midi_file in os.listdir(MIDI_DIRECTORY):
|
||||||
|
try:
|
||||||
|
print('Reading file: {}'.format(midi_file))
|
||||||
|
song = roll.Multitrack('{}/{}'.format(MIDI_DIRECTORY, midi_file))
|
||||||
|
# no_drums_mt = roll.Multitrack(tempo=120.0, downbeat=[0, 96, 192, 288], beat_resolution=24)
|
||||||
|
intruments_only = roll.Multitrack(tempo=120.0, beat_resolution=24)
|
||||||
|
|
||||||
|
for track in song.tracks:
|
||||||
|
if track.is_drum == False:
|
||||||
|
print(track.name, track.program)
|
||||||
|
intruments_only.append_track(track=track, pianoroll=track.pianoroll)
|
||||||
|
instrument_track = track.pianoroll
|
||||||
|
|
||||||
|
# plt.imshow(instrument_track[24*8:24*24].T)
|
||||||
|
# plt.savefig('data/0_{}.png'.format(midi_file))
|
||||||
|
|
||||||
|
instrument_track = erase_note_lenth(instrument_track.T).T
|
||||||
|
# plt.imshow(instrument_track[24*8:24*24].T)
|
||||||
|
# plt.savefig('data/1_{}.png'.format(midi_file))
|
||||||
|
|
||||||
|
|
||||||
|
# instruments = no_drums_mt.get_merged_pianoroll(mode='sum')
|
||||||
|
|
||||||
|
beats = floor( (instrument_track.shape[0] / MIDI_RESOLUTION) / BEAT_PER_BATCH) * BEAT_PER_BATCH
|
||||||
|
notes_for_beats = beats * MIDI_RESOLUTION
|
||||||
|
|
||||||
|
print('beats: ', beats)
|
||||||
|
samples_of_song = np.asarray(np.split(instrument_track[:notes_for_beats], beats))
|
||||||
|
samples_of_song = samples_of_song.reshape(int(beats/BEAT_PER_BATCH),BEAT_PER_BATCH,96,128)
|
||||||
|
|
||||||
|
print('Converted samples: {}'.format(samples_of_song.shape))
|
||||||
|
samples = np.concatenate([samples_of_song,samples], axis=0)
|
||||||
|
np.savez_compressed(SAMPLES_DIRECTORY,samples)
|
||||||
|
|
||||||
|
except Exception as error:
|
||||||
|
print('Convertion faild: {}'.format(error))
|
||||||
|
pass
|
||||||
|
|
||||||
|
finally:
|
||||||
|
print('Done!')
|
26
samples_to_midi.py
Normal file
26
samples_to_midi.py
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
GENERATED_BEAT_PATH = 'data/output/generated_bar.npz'
|
||||||
|
OUTPUT_PATH = 'data/output/generated_midi.mid'
|
||||||
|
OUTPUT_PATH_PIANOROLL = 'data/output/pianoroll.png'
|
||||||
|
|
||||||
|
import pypianoroll as roll
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
import numpy as np
|
||||||
|
import os
|
||||||
|
|
||||||
|
instruments = np.load(GENERATED_BEAT_PATH)['arr_0'][0]
|
||||||
|
|
||||||
|
instruments = instruments.reshape(96,128)
|
||||||
|
instruments = instruments>instruments.min()*0.3
|
||||||
|
instruments = instruments*255
|
||||||
|
|
||||||
|
# zeros_up = np.zeros((instruments.shape[0],24))
|
||||||
|
# zeros_down = np.zeros((instruments.shape[0], 20))
|
||||||
|
# instruments_full = np.concatenate([zeros_up,instruments], axis=1)
|
||||||
|
# instruments_full = np.concatenate([instruments_full,zeros_down], axis=1)
|
||||||
|
|
||||||
|
i = roll.Track(instruments, program=0)
|
||||||
|
return_midi = roll.Multitrack(tracks=[i], tempo=120.0, downbeat=[0, 96, 192, 288], beat_resolution=24)
|
||||||
|
roll.write(return_midi, OUTPUT_PATH)
|
||||||
|
|
||||||
|
plt.imshow(instruments.T, cmap='gray')
|
||||||
|
plt.savefig(OUTPUT_PATH_PIANOROLL)
|
4
settings.py
Normal file
4
settings.py
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
midi_path = 'data/midi'
|
||||||
|
samples_path = 'data/samples'
|
||||||
|
midi_resolution = 96
|
||||||
|
beats_per_sample = 1
|
BIN
settings.pyc
Normal file
BIN
settings.pyc
Normal file
Binary file not shown.
38
train.py
Normal file
38
train.py
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
SAMPLES_PATH = 'data/samples.npz'
|
||||||
|
MODEL_PATH = 'data/autoencoder_model.h5'
|
||||||
|
EPOCHS = 100
|
||||||
|
|
||||||
|
import tensorflow as tf
|
||||||
|
from tensorflow.keras import layers
|
||||||
|
from keras.layers import Input, Dense, Conv2D, Flatten
|
||||||
|
from keras.models import Model
|
||||||
|
import numpy as np
|
||||||
|
from sys import exit
|
||||||
|
import pickle
|
||||||
|
|
||||||
|
print('Reading samples from: {}'.format(SAMPLES_PATH))
|
||||||
|
|
||||||
|
train_samples = np.load(SAMPLES_PATH)['arr_0']
|
||||||
|
train_samples = train_samples.reshape(train_samples.shape[0], 1*96*128)
|
||||||
|
# input = Input(shape=(1,96,128))
|
||||||
|
# encoded = Conv2D(filters = 32, kernel_size = 1, activation='relu')(input)
|
||||||
|
# decoded = Conv2D(filters = 128, kernel_size = 1, activation='sigmoid')(encoded)
|
||||||
|
# autoencoder = Model(input, decoded)
|
||||||
|
#
|
||||||
|
# autoencoder.compile(optimizer='adadelta',
|
||||||
|
# loss='binary_crossentropy',
|
||||||
|
# metrics=['accuracy'])
|
||||||
|
|
||||||
|
|
||||||
|
encoded = Dense(128, input_shape=(1*96*128))
|
||||||
|
decoded = Dense(96*128)(encoded)
|
||||||
|
autoencoder = Model(input,decoded)
|
||||||
|
|
||||||
|
autoencoder.compile(optimizer='adadelta',
|
||||||
|
loss='binary_crossentropy',
|
||||||
|
metrics=['accuracy'])
|
||||||
|
|
||||||
|
autoencoder.fit(train_samples, train_samples, epochs=EPOCHS, batch_size=150)
|
||||||
|
|
||||||
|
autoencoder.save_weights(MODEL_PATH)
|
||||||
|
print("Saved model to disk")
|
Loading…
Reference in New Issue
Block a user