#!/usr/bin/env python3 import numpy as np import tensorflow as tf import pypianoroll as roll from keras.layers import Input, Dense, Conv2D from keras.models import Model from tensorflow.keras import layers from keras.layers import Input, Dense, Conv2D, Flatten, LSTM, Dropout, TimeDistributed, RepeatVector from keras.models import Model, Sequential import matplotlib.pyplot as plt import settings import random import pickle from tqdm import trange, tqdm import sys from music21 import converter, instrument, note, chord, stream trained_model_path = sys.argv[1] output_path = sys.argv[2] # load and predict print('Loading... {}'.format(trained_model_path)) model = pickle.load(open(trained_model_path, 'rb')) int_to_note = pickle.load(open('{}_dict'.format(trained_model_path), 'rb')) seed = [random.randint(0,50) for x in range(8)] music = [] print('Generating...') for i in trange(500): predicted_vector = model.predict(np.array(seed).reshape(1,8,1)) predicted_index = np.argmax(predicted_vector) music.append(int_to_note[predicted_index]) seed.append(predicted_index) seed = seed[1:9] print('Saving...') offset = 0 output_notes = [] for event in tqdm(music): if (' ' in event) or event.isdigit(): notes_in_chord = event.split(' ') notes = [] for current_note in notes_in_chord: new_note = note.Note(current_note) new_note.storedInstrument = instrument.Piano() notes.append(new_note) new_chord = chord.Chord(notes) new_chord.offset = offset output_notes.append(new_chord) else: new_note = note.Note(event) new_note.offset = offset new_note.storedInstrument = instrument.Piano() output_notes.append(new_note) offset += 0.5 midi_stream = stream.Stream(output_notes) midi_stream.write('midi', fp='{}.mid'.format(output_path)) print('Done!')