From fede93c36ed9aabff39528a8aa7e61e973ab83b8 Mon Sep 17 00:00:00 2001 From: Cezary Pukownik Date: Sat, 1 Jun 2019 17:05:38 +0200 Subject: [PATCH] get it working, on music21 and sequence style enoding --- project/generate.py | 57 +++++++++++++++++++++++-------- project/midi.py | 82 +++++++++++++++++++-------------------------- project/train.py | 53 ++++++++++++++++++++++------- 3 files changed, 118 insertions(+), 74 deletions(-) diff --git a/project/generate.py b/project/generate.py index 9a5023b..14e78ff 100644 --- a/project/generate.py +++ b/project/generate.py @@ -1,7 +1,6 @@ #!/usr/bin/env python3 import numpy as np -import midi import tensorflow as tf import pypianoroll as roll from keras.layers import Input, Dense, Conv2D @@ -11,29 +10,59 @@ from keras.layers import Input, Dense, Conv2D, Flatten, LSTM, Dropout, TimeDistr from keras.models import Model, Sequential import matplotlib.pyplot as plt import settings +import random import pickle +from tqdm import trange, tqdm import sys +from music21 import converter, instrument, note, chord, stream trained_model_path = sys.argv[1] output_path = sys.argv[2] -treshold = float(sys.argv[3]) # load and predict +print('Loading... {}'.format(trained_model_path)) model = pickle.load(open(trained_model_path, 'rb')) +int_to_note = pickle.load(open('{}_dict'.format(trained_model_path), 'rb')) -music = np.empty((4,96,128)) -for x in range(4): - generate_seed = np.random.randint(0, 127, 12288).reshape(1,96,128) - music[x] = model.predict(generate_seed).reshape(96,128) +seed = [random.randint(0,50) for x in range(8)] -generated_sample = music.reshape(4*96,128) +music = [] -# binarize generated music -generated_sample = generated_sample > treshold * generated_sample.max() -# generated_sample = np.clip(generated_sample,0,1) * 128 +print('Generating...') +for i in trange(500): + predicted_vector = model.predict(np.array(seed).reshape(1,8,1)) + predicted_index = np.argmax(predicted_vector) -# save to midi -generated_midi = midi.to_midi(generated_sample, output_path='{}.mid'.format(output_path), is_drum=True, program=0, ) + music.append(int_to_note[predicted_index]) -#save plot for preview -roll.plot(generated_midi, filename='{}.png'.format(output_path)) + seed.append(predicted_index) + seed = seed[1:9] + + +print('Saving...') +offset = 0 +output_notes = [] +for event in tqdm(music): + if (' ' in event) or event.isdigit(): + notes_in_chord = event.split(' ') + notes = [] + for current_note in notes_in_chord: + new_note = note.Note(current_note) + new_note.storedInstrument = instrument.Piano() + notes.append(new_note) + new_chord = chord.Chord(notes) + new_chord.offset = offset + output_notes.append(new_chord) + else: + new_note = note.Note(event) + new_note.offset = offset + new_note.storedInstrument = instrument.Piano() + output_notes.append(new_note) + + offset += 0.5 + +midi_stream = stream.Stream(output_notes) + +midi_stream.write('midi', fp='{}.mid'.format(output_path)) + +print('Done!') diff --git a/project/midi.py b/project/midi.py index 447bbc2..e0a97bd 100644 --- a/project/midi.py +++ b/project/midi.py @@ -9,74 +9,62 @@ from math import floor import sys from collections import defaultdict import pickle +from music21 import converter, instrument, note, chord, stream +import music21 midi_folder_path = sys.argv[1] output_path = sys.argv[2] -def to_samples(multitrack, midi_res=settings.midi_resolution, how='by_group'): +def to_sequence(midi_path): + seq_by_instrument = defaultdict( lambda : [] ) + midi_file = music21.converter.parse(midi_path) + stream = music21.instrument.partitionByInstrument(midi_file) + for part in stream: + for event in part: + if part.partName != None: + # TODO: add note lenght as parameter + if isinstance(event, music21.note.Note): + # to_export_event = (str(event.pitch), event.quarterLength) + to_export_event = str(event.pitch) + seq_by_instrument[part.partName].append(to_export_event) + elif isinstance(event, music21.chord.Chord): + to_export_event = ' '.join(str(note) for note in event.pitches) + # to_export_event = (' '.join(str(note) for note in event.pitches), event.quarterLength) + seq_by_instrument[part.partName].append(to_export_event) - #how = 'by_group', 'by_instrument', 'merged', + X_train_by_instrument = defaultdict( lambda : [] ) + y_train_by_instrument = defaultdict( lambda : [] ) - # TODO: add transpositions of every sample to every possible key transposition - # np.roll(sample, pitch_interval, axis=1) for transposition - # np.roll(sample, time_steps, axis=0) for time shifting + for instrument, sequence in seq_by_instrument.items(): + for i in range(len(sequence)-8) : + X_train_by_instrument[instrument].append(np.array(sequence[i: i + 8])) #