diff --git a/project/midi.py b/project/midi.py index 04ca47f..7e03254 100644 --- a/project/midi.py +++ b/project/midi.py @@ -39,6 +39,7 @@ def to_midi(samples, output_path=settings.generated_midi_path, program=0, tempo= return_midi = roll.Multitrack(tracks=tracks, tempo=tempo, downbeat=[0, 96, 192, 288], beat_resolution=beat_resolution) roll.write(return_midi, settings.generated_midi_path) +# todo: this function is running too slow. def delete_empty_samples(sample_pack): print('Deleting empty samples...') temp_sample_pack = sample_pack @@ -64,6 +65,7 @@ def main(): continue sample_pack = np.concatenate((midi_samples, sample_pack), axis=0) + # I commented out this line, because it was too slow # sample_pack = delete_empty_samples(sample_pack) np.savez_compressed(settings.samples_dir, sample_pack) diff --git a/project/readme b/project/readme new file mode 100644 index 0000000..b65f2d1 --- /dev/null +++ b/project/readme @@ -0,0 +1,16 @@ +## MUSIC GENERATION USING DEEP LEARNING ## +## AUTHOR: CEZARY PUKOWNIK + +How to use: + +1. Use midi.py to export data from midi files + + ./midi.py export + +2. Use train.py to train a model (this can take a while) + + ./train.py + +3. Use generate.py to generate music from trained models + + ./generate.py