#!/usr/bin/env python3 import numpy as np import midi import tensorflow as tf from keras.layers import Input, Dense, Conv2D from keras.models import Model from tensorflow.keras import layers from keras.layers import Input, Dense, Conv2D, Flatten, LSTM, Dropout, TimeDistributed, RepeatVector from keras.models import Model, Sequential import matplotlib.pyplot as plt import settings import pickle import sys trained_model_path = sys.argv[1] output_path = sys.argv[2] # treshold = float(sys.argv[3]) #random seed generate_seed = np.random.rand(12288).reshape(1,96,128) # load and predict model = pickle.load(open(trained_model_path, 'rb')) generated_music = np.empty((0,128)) for note in range(100): generated_vector = model.predict(generate_seed).reshape(1,4,128) generated_notes = np.zeros((4,128)) for i, col in enumerate(generated_vector[0]): best_note = np.argmax(col) generated_notes[i][best_note] = 1 generate_seed = np.concatenate([generated_notes, generate_seed[0][:-4]]).reshape(1,96,128) generated_music = np.concatenate([generated_music, generated_notes]) # generated_sample = generated_sample.reshape(96,128) generated_sample = generated_music # print(generated_music) # binarize generated music # generated_sample = generated_sample > 0 * generated_sample.max() #save to midi midi.to_midi(generated_sample, output_path='{}.mid'.format(output_path) ) #save piano roll to png plt.imshow(generated_sample, cmap = plt.get_cmap('gray')) plt.savefig('{}.png'.format(output_path))