Simple LSTM Music Generator #1

Merged
s444337 merged 1 commits from LSTM_music21_midi_encoding into master 2019-06-19 15:53:17 +02:00
3 changed files with 67 additions and 43 deletions
Showing only changes of commit 33db5a7c6d - Show all commits

View File

@ -48,18 +48,14 @@ output_path = sys.argv[2]
# this dictionary is generated with model # this dictionary is generated with model
print('Loading... {}'.format(trained_model_path)) print('Loading... {}'.format(trained_model_path))
model = pickle.load(open(trained_model_path, 'rb')) model = pickle.load(open(trained_model_path, 'rb'))
int_to_note = pickle.load(open('{}_dict'.format(trained_model_path), 'rb')) int_to_note, n_vocab, seq_len = pickle.load(open('{}_dict'.format(trained_model_path), 'rb'))
# TODO: 16 it should a variable by integrated with model seq_len
# TODO: random.randint(0,50), the range should be a variable of lenght of vocab size
seed = [random.randint(0,250) for x in range(16)]
seed = [random.randint(0,n_vocab) for x in range(seq_len)]
music = [] music = []
print('Generating...') print('Generating...')
for i in trange(124): for i in trange(124):
#TODO: 16 it should a variable by integrated with model seq_len predicted_vector = model.predict(np.array(seed).reshape(1,seq_len,1))
predicted_vector = model.predict(np.array(seed).reshape(1,16,1))
# using best fitted note # using best fitted note
# predicted_index = np.argmax(predicted_vector) # predicted_index = np.argmax(predicted_vector)
# using propability distribution for choosing note # using propability distribution for choosing note
@ -67,14 +63,14 @@ for i in trange(124):
predicted_index = choose_by_prob(predicted_vector) predicted_index = choose_by_prob(predicted_vector)
music.append(int_to_note[predicted_index]) music.append(int_to_note[predicted_index])
seed.append(predicted_index) seed.append(predicted_index)
#TODO: 16 it should a variable by integrated with model seq_len seed = seed[1:1+seq_len]
seed = seed[1:1+16]
print('Saving...') print('Saving...')
offset = 0 offset = 0
output_notes = [] output_notes = []
for event in tqdm(music): for _event in tqdm(music):
event, note_len = _event.split(';')
if (' ' in event) or event.isdigit(): if (' ' in event) or event.isdigit():
notes_in_chord = event.split(' ') notes_in_chord = event.split(' ')
notes = [] notes = []
@ -91,7 +87,7 @@ for event in tqdm(music):
new_note.storedInstrument = instrument.Piano() new_note.storedInstrument = instrument.Piano()
output_notes.append(new_note) output_notes.append(new_note)
offset += 0.5 offset += float(note_len)
midi_stream = stream.Stream(output_notes) midi_stream = stream.Stream(output_notes)

View File

@ -8,8 +8,7 @@ that is prepared for model training.
output_path - the output path where will be created samples of data output_path - the output path where will be created samples of data
Usage: Usage:
>>> ./midi.py <midi_folder_path> <output_path> >>> ./midi.py <midi_folder_path> <output_path> <sequence_lenth>
''' '''
import settings import settings
@ -24,9 +23,22 @@ import pickle
from music21 import converter, instrument, note, chord, stream from music21 import converter, instrument, note, chord, stream
import music21 import music21
midi_folder_path = sys.argv[1] class MidiParseError(Exception):
output_path = sys.argv[2] """Error that is raised then midi file cannot be parsed"""
seq_len = int(sys.argv[3]) pass
def parse_argv(argv):
'''This function is parsing given arguments when running a midi script.
Returns a tuple consinting of midi_folder_path, output_path, seq_len'''
try:
midi_folder_path = argv[1]
output_path = argv[2]
seq_len = int(argv[3])
return midi_folder_path, output_path, seq_len
except IndexError:
raise AttributeError('You propably didnt pass parameters to run midi.py script.\
>>> ./midi.py <midi_folder_path> <output_path> <sequence_lenth>')
def to_sequence(midi_path, seq_len): def to_sequence(midi_path, seq_len):
''' This function is supposed to be used on one midi file in directory loop. ''' This function is supposed to be used on one midi file in directory loop.
@ -40,22 +52,24 @@ def to_sequence(midi_path, seq_len):
- midi_path: path to midi file - midi_path: path to midi file
- seq_len: lenght of sequance before prediction - seq_len: lenght of sequance before prediction
Returns: Tuple of train_X, train_y directories''' Returns: Tuple of train_X, train_y dictionaries consisinting of samples of song grouped by instruments
'''
seq_by_instrument = defaultdict( lambda : [] ) seq_by_instrument = defaultdict( lambda : [] )
try:
midi_file = music21.converter.parse(midi_path) midi_file = music21.converter.parse(midi_path)
except music21.midi.MidiException:
raise MidiParseError
stream = music21.instrument.partitionByInstrument(midi_file) stream = music21.instrument.partitionByInstrument(midi_file)
for part in stream: for part in stream:
for event in part: for event in part:
if part.partName != None: if part.partName != None:
# TODO: add note lenght as parameter
if isinstance(event, music21.note.Note): if isinstance(event, music21.note.Note):
# to_export_event = (str(event.pitch), event.quarterLength) to_export_event = '{};{}'.format(str(event.pitch), float(event.quarterLength))
to_export_event = str(event.pitch)
seq_by_instrument[part.partName].append(to_export_event) seq_by_instrument[part.partName].append(to_export_event)
elif isinstance(event, music21.chord.Chord): elif isinstance(event, music21.chord.Chord):
to_export_event = ' '.join(str(note) for note in event.pitches) to_export_event = '{};{}'.format(' '.join(str(note) for note in event.pitches), float(event.quarterLength))
# to_export_event = (' '.join(str(note) for note in event.pitches), event.quarterLength)
seq_by_instrument[part.partName].append(to_export_event) seq_by_instrument[part.partName].append(to_export_event)
X_train_by_instrument = defaultdict( lambda : [] ) X_train_by_instrument = defaultdict( lambda : [] )
@ -65,39 +79,54 @@ def to_sequence(midi_path, seq_len):
for i in range(len(sequence)-(seq_len)) : for i in range(len(sequence)-(seq_len)) :
X_train_by_instrument[instrument].append(np.array(sequence[i:i+seq_len])) # <seq lenth X_train_by_instrument[instrument].append(np.array(sequence[i:i+seq_len])) # <seq lenth
y_train_by_instrument[instrument].append(np.array(sequence[i+seq_len])) y_train_by_instrument[instrument].append(np.array(sequence[i+seq_len]))
# TODO: Notes to integers
return X_train_by_instrument, y_train_by_instrument return X_train_by_instrument, y_train_by_instrument
def main(): def colect_samples(midi_folder_path, seq_len):
print('Exporting...') '''This function is looping throuth given directories and
collecting samples from midi files.
Parameters: midi_folder_path - a path to directory with midi files
seq_len - a lenth of train_X sample that tells
how many notes is given do LSTM to predict the next note.
Returns: Tuple of train_X, train_y dictionaries consisinting
of samples of all songs in directory grouped by instruments.
'''
print('Collecting samples...')
train_X = defaultdict( lambda : [] ) train_X = defaultdict( lambda : [] )
train_y = defaultdict( lambda : [] ) train_y = defaultdict( lambda : [] )
for directory, subdirectories, files in os.walk(midi_folder_path): for directory, subdirectories, files in os.walk(midi_folder_path):
for midi_file in tqdm(files): for midi_file in tqdm(files):
midi_file_path = os.path.join(directory, midi_file) midi_file_path = os.path.join(directory, midi_file)
# some midi files can be corupted, and cannot be parsed
# so we just omit corupted files, and go to the next file.
try: try:
_X_train, _y_train = to_sequence(midi_file_path, seq_len) _X_train, _y_train = to_sequence(midi_file_path, seq_len)
except music21.midi.MidiException: except MidiParseError:
continue continue
for (X_key, X_value), (y_key, y_value) in zip(_X_train.items(), _y_train.items()): for (X_key, X_value), (y_key, y_value) in zip(_X_train.items(), _y_train.items()):
train_X[X_key].extend(np.array(X_value)) train_X[X_key].extend(np.array(X_value))
train_y[y_key].extend(np.array(y_value)) train_y[y_key].extend(np.array(y_value))
# this is for intrument separation return train_X, train_y
def save_samples(output_path, samples):
'''This function save samples to npz packages, splitted by instrument.'''
print('Saving...') print('Saving...')
if not os.path.exists(output_path): if not os.path.exists(output_path):
os.makedirs(output_path) os.makedirs(output_path)
train_X, train_y = samples
for (X_key, X_value), (y_key, y_value) in tqdm(zip(train_X.items(), train_y.items())): for (X_key, X_value), (y_key, y_value) in tqdm(zip(train_X.items(), train_y.items())):
if X_key == y_key: if X_key == y_key:
np.savez_compressed('{}/{}.npz'.format(output_path, X_key), np.array(X_value), np.array(y_value)) np.savez_compressed('{}/{}.npz'.format(output_path, X_key), np.array(X_value), np.array(y_value))
def main():
midi_folder_path, output_path, seq_len = parse_argv(sys.argv)
save_samples(output_path, colect_samples(midi_folder_path, seq_len))
print('Done!') print('Done!')
if __name__ == '__main__': if __name__ == '__main__':

View File

@ -1,15 +1,14 @@
#!python3 #!python3
#!/usr/bin/env python3 #!/usr/bin/env python3
import sys
import tensorflow as tf import pickle
import settings import settings
#from tensorflow.keras import layers
import numpy as np
from keras.layers import Input, Dense, Conv2D, Flatten, LSTM, Dropout, TimeDistributed, RepeatVector, Activation, Bidirectional, Reshape from keras.layers import Input, Dense, Conv2D, Flatten, LSTM, Dropout, TimeDistributed, RepeatVector, Activation, Bidirectional, Reshape
from keras.models import Model, Sequential from keras.models import Model, Sequential
from keras.utils.np_utils import to_categorical from keras.utils.np_utils import to_categorical
import numpy as np
import sys
import pickle
def load_data(samples_path): def load_data(samples_path):
print('Loading... {}'.format(train_data_path)) print('Loading... {}'.format(train_data_path))
@ -25,7 +24,6 @@ def preprocess_samples(train_X, train_y):
n_vocab = vocab.shape[0] n_vocab = vocab.shape[0]
note_to_int = dict((note, number) for number, note in enumerate(vocab)) note_to_int = dict((note, number) for number, note in enumerate(vocab))
int_to_note = dict((number, note) for number, note in enumerate(vocab)) int_to_note = dict((number, note) for number, note in enumerate(vocab))
_train_X = [] _train_X = []
_train_y = [] _train_y = []
for sample in train_X: for sample in train_X:
@ -58,12 +56,13 @@ model.add(Dense(n_vocab))
model.add(Activation('softmax')) model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop') model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
# model training # This code will train our model, with given by parameter number of epochs
print('Training...') print('Training...')
model.fit(train_X, train_y, epochs=epochs, batch_size=64) model.fit(train_X, train_y, epochs=epochs, batch_size=64)
# save trained model # it saves model, and additional informations of model
# that is needed to generate music from it
pickle.dump(model, open(save_model_path,'wb')) pickle.dump(model, open(save_model_path,'wb'))
pickle.dump(int_to_note, open('{}_dict'.format(save_model_path),'wb')) pickle.dump((int_to_note, n_vocab, train_X.shape[1]), open('{}_dict'.format(save_model_path),'wb'))
print('Done!') print('Done!')
print("Model saved to: {}".format(save_model_path)) print("Model saved to: {}".format(save_model_path))