2019-06-19 13:40:35 +02:00
|
|
|
#!python3
|
2019-05-30 11:23:34 +02:00
|
|
|
#!/usr/bin/env python3
|
2019-06-19 13:40:35 +02:00
|
|
|
''' This module generates a sample, and create a midi file.
|
|
|
|
|
|
|
|
Usage:
|
|
|
|
>>> ./generate.py [trained_model_path] [output_path]
|
|
|
|
|
|
|
|
'''
|
|
|
|
import settings
|
|
|
|
import sys
|
|
|
|
import random
|
|
|
|
import pickle
|
2019-05-30 11:23:34 +02:00
|
|
|
|
2019-05-28 12:40:26 +02:00
|
|
|
import numpy as np
|
|
|
|
import tensorflow as tf
|
2019-05-30 23:13:03 +02:00
|
|
|
import pypianoroll as roll
|
2019-06-19 13:40:35 +02:00
|
|
|
import matplotlib.pyplot as plt
|
|
|
|
from tqdm import trange, tqdm
|
|
|
|
from music21 import converter, instrument, note, chord, stream
|
2019-05-28 12:40:26 +02:00
|
|
|
from keras.layers import Input, Dense, Conv2D
|
|
|
|
from keras.models import Model
|
2019-05-30 11:23:34 +02:00
|
|
|
from keras.layers import Input, Dense, Conv2D, Flatten, LSTM, Dropout, TimeDistributed, RepeatVector
|
|
|
|
from keras.models import Model, Sequential
|
2019-06-19 13:40:35 +02:00
|
|
|
|
|
|
|
|
|
|
|
def choose_by_prob(list_of_probs):
|
|
|
|
''' This functions a list of values and assumed
|
|
|
|
that if the value is bigger it should by returned often
|
|
|
|
|
|
|
|
It was crated to give more options to choose than argmax function,
|
|
|
|
thus is more than one way that you can develop a melody.
|
|
|
|
|
|
|
|
Returns a index of choosen value from given list.
|
|
|
|
'''
|
|
|
|
sum_prob = np.array(list_of_probs).sum()
|
|
|
|
prob_normalized = [x/sum_prob for x in list_of_probs]
|
|
|
|
cumsum = np.array(prob_normalized).cumsum()
|
|
|
|
prob_cum = cumsum.tolist()
|
|
|
|
random_x = random.random()
|
|
|
|
for i, x in enumerate(prob_cum):
|
|
|
|
if random_x < x:
|
|
|
|
return i
|
2019-05-30 11:23:34 +02:00
|
|
|
|
|
|
|
trained_model_path = sys.argv[1]
|
|
|
|
output_path = sys.argv[2]
|
|
|
|
|
2019-06-19 13:40:35 +02:00
|
|
|
# load model and dictionary that can translate back index_numbers to notes
|
|
|
|
# this dictionary is generated with model
|
2019-06-01 17:05:38 +02:00
|
|
|
print('Loading... {}'.format(trained_model_path))
|
2019-05-30 11:23:34 +02:00
|
|
|
model = pickle.load(open(trained_model_path, 'rb'))
|
2019-06-01 17:05:38 +02:00
|
|
|
int_to_note = pickle.load(open('{}_dict'.format(trained_model_path), 'rb'))
|
2019-05-30 23:13:03 +02:00
|
|
|
|
2019-06-19 13:40:35 +02:00
|
|
|
# TODO: 16 it should a variable by integrated with model seq_len
|
|
|
|
# TODO: random.randint(0,50), the range should be a variable of lenght of vocab size
|
|
|
|
seed = [random.randint(0,250) for x in range(16)]
|
2019-05-30 23:13:03 +02:00
|
|
|
|
2019-06-01 17:05:38 +02:00
|
|
|
music = []
|
2019-05-30 11:23:34 +02:00
|
|
|
|
2019-06-01 17:05:38 +02:00
|
|
|
print('Generating...')
|
2019-06-19 13:40:35 +02:00
|
|
|
for i in trange(124):
|
|
|
|
#TODO: 16 it should a variable by integrated with model seq_len
|
|
|
|
predicted_vector = model.predict(np.array(seed).reshape(1,16,1))
|
|
|
|
# using best fitted note
|
|
|
|
# predicted_index = np.argmax(predicted_vector)
|
|
|
|
# using propability distribution for choosing note
|
|
|
|
# to prevent looping
|
|
|
|
predicted_index = choose_by_prob(predicted_vector)
|
2019-06-01 17:05:38 +02:00
|
|
|
music.append(int_to_note[predicted_index])
|
|
|
|
seed.append(predicted_index)
|
2019-06-19 13:40:35 +02:00
|
|
|
#TODO: 16 it should a variable by integrated with model seq_len
|
|
|
|
seed = seed[1:1+16]
|
2019-06-01 17:05:38 +02:00
|
|
|
|
|
|
|
|
|
|
|
print('Saving...')
|
|
|
|
offset = 0
|
|
|
|
output_notes = []
|
|
|
|
for event in tqdm(music):
|
|
|
|
if (' ' in event) or event.isdigit():
|
|
|
|
notes_in_chord = event.split(' ')
|
|
|
|
notes = []
|
|
|
|
for current_note in notes_in_chord:
|
|
|
|
new_note = note.Note(current_note)
|
|
|
|
new_note.storedInstrument = instrument.Piano()
|
|
|
|
notes.append(new_note)
|
|
|
|
new_chord = chord.Chord(notes)
|
|
|
|
new_chord.offset = offset
|
|
|
|
output_notes.append(new_chord)
|
|
|
|
else:
|
|
|
|
new_note = note.Note(event)
|
|
|
|
new_note.offset = offset
|
|
|
|
new_note.storedInstrument = instrument.Piano()
|
|
|
|
output_notes.append(new_note)
|
|
|
|
|
|
|
|
offset += 0.5
|
|
|
|
|
|
|
|
midi_stream = stream.Stream(output_notes)
|
|
|
|
|
|
|
|
midi_stream.write('midi', fp='{}.mid'.format(output_path))
|
|
|
|
|
|
|
|
print('Done!')
|