├── __init__.py ├── src ├── __init__.py ├── melody.py ├── Polyphony.py ├── Tonality.py ├── Play.py ├── MusicalNote.py ├── lstm_seq2seq.py ├── CreateMusic.py └── ReadMusic.py ├── .DS_Store ├── merge_with_master.sh ├── README.md ├── test ├── test_tonality.py ├── test_major_scale.py └── test_notes.py ├── CODE_OF_CONDUCT.md └── notebook └── Learning LSTM.ipynb /__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdeSantosSierra/chopyn/HEAD/.DS_Store -------------------------------------------------------------------------------- /merge_with_master.sh: -------------------------------------------------------------------------------- 1 | git checkout master 2 | git merge dev 3 | git push origin master 4 | git checkout dev 5 | git merge master 6 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # chopyn 2 | Music and Artificial Intelligence 3 | 4 | Chopyn is an Artificial Intelligence system whose main capability is to create music. 5 | Implemented in Python, Chopyn will be able to create music out of a given melody, design harmonies similar to a specific music 6 | style and who knows what else... 7 | 8 | Chopyn will be an open-source project, combining RNN, Deep Learning and the most cutting-edge AI technologies. 9 | 10 | You may find some cherry-picking samples here: http://soundcloud.com/alberto-de-santos-sierra 11 | 12 | Copyright 2018 - This is an original project proposed and created by Alberto de Santos Sierra. 13 | -------------------------------------------------------------------------------- /src/melody.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | from MusicalNote import * 5 | from Play import CSVtoMIDI 6 | 7 | import numpy as np 8 | import pandas as pd 9 | 10 | # Random Melody 11 | # A Melody is a sequence of notes 12 | 13 | 14 | class Melody (object): 15 | 16 | 17 | 18 | def convert_to_midi(self): 19 | 20 | start_ms_array = \ 21 | (np.cumsum([0]+[note_from_melody.get_duration() 22 | for note_from_melody in self.sequence_of_notes])) 23 | 24 | # Create dataframe with musical properties from sequence/melody 25 | # print([note_from_melody.get_play_props() for note_from_melody in self.sequence_of_notes]) 26 | # print('pppppppp') 27 | melody_dataframe = pd.DataFrame.from_records([note_from_melody.get_play_props() 28 | for note_from_melody in self.sequence_of_notes]) 29 | 30 | # print(melody_dataframe) 31 | 32 | # Add the time sequence 33 | melody_dataframe['start_ms'] = start_ms_array[:-1] 34 | 35 | # Rename columns as the MIDI has already specific names for the columns 36 | # print(melody_dataframe.columns) 37 | melody_dataframe.columns = ['dur_ms','velocity','pitch','part','start_ms'] 38 | 39 | return melody_dataframe 40 | 41 | class RandomMelody(Melody): 42 | 43 | def __init__(self): 44 | 45 | notes_props = ({'duration':np.random.randint(500,1000), 46 | 'intensity':np.random.randint(50), 47 | 'timbre':np.random.randint(10)}) 48 | 49 | notes_names = [Do,Re,Mi,Fa,Sol,La,Si] 50 | melody_length = 20 51 | random_sequence = np.random.choice(notes_names, melody_length, replace=True) 52 | 53 | self.sequence_of_notes = [iterator_note(**{'duration':np.random.randint(500,1000), 54 | 'intensity':70, 55 | 'timbre':1}) 56 | for iterator_note in random_sequence] 57 | 58 | 59 | class SequenceMelody(Melody): 60 | def __init__(self,sequence_of_notes): 61 | self.sequence_of_notes = sequence_of_notes 62 | 63 | 64 | 65 | 66 | if __name__ == '__main__': 67 | melody = RandomMelody() 68 | CSVtoMIDI(melody.convert_to_midi()) 69 | 70 | -------------------------------------------------------------------------------- /test/test_tonality.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | import sys 5 | # import folders with the code 6 | # At the moment, there is not other way of importing 7 | sys.path.insert(0, '/Users/adesant3/Documents/Kindergarten/chopyn/chopyn/src/') 8 | 9 | from MusicalNote import * 10 | from Tonality import * 11 | from ReadMusic import * 12 | 13 | import numpy as np 14 | import pandas as pd 15 | 16 | from collections import Counter, OrderedDict 17 | 18 | 19 | name_folder = '../scores/' 20 | 21 | dict_scores = \ 22 | {'Do':'Chopin_Etude_Op_10_n_1.csv', 23 | 24 | #'Solb':'Chopin_Etude_Op_10_n_5.csv', 25 | 'Reb':'Debussy_Claire_de_Lune.csv', 26 | 'Mib':'Schubert_Piano_Trio_2nd_Movement.csv', 27 | #'Solb':'Chopin_Etude_6.csv', # Mib m 28 | #'Mib':'Chopin_Etude_Opus_10_No_12.csv', #Do m 29 | 'Mi':'Beethoven_Moonlight_Sonata_third_movement.csv', #Although it is Do# m 30 | #'Mi':'Chopin_Etude_Opus_10_No_3.csv', #Although it is Do# m 31 | 'Fa':'Schubert_S560_Schwanengesang_no7.csv', # Although it is Re m 32 | 'Sol':'Albeniz_Asturias.csv', # Although, it is Mi m 33 | 'Solb':'Schuber_Impromptu_D_899_No_3.csv', 34 | 'Si':'Chopin_Etude_Op25_No_6.csv' #Although it is Sol# m 35 | } 36 | 37 | def test_tonality_Do(): 38 | assert Read(name_folder+dict_scores['Do']).get_tonality() == 'Do' 39 | 40 | def test_tonality_Sol(): 41 | assert Read(name_folder+dict_scores['Sol']).get_tonality() == 'Sol' 42 | 43 | def test_tonality_Solb(): 44 | assert Read(name_folder+dict_scores['Solb']).get_tonality() == 'Solb' 45 | 46 | def test_tonality_Fa(): 47 | assert Read(name_folder+dict_scores['Fa']).get_tonality() == 'Fa' 48 | 49 | def test_tonality_Reb(): 50 | assert Read(name_folder+dict_scores['Reb']).get_tonality() == 'Reb' 51 | 52 | def test_tonality_Mib_num1(): 53 | assert Read(name_folder+dict_scores['Mib']).get_tonality() == 'Mib' 54 | 55 | def test_tonality_Mib_num2(): 56 | assert Read(name_folder+'Chopin_Etude_6.csv').get_tonality() == 'Solb' 57 | 58 | def test_tonality_Mib_num3(): 59 | assert Read(name_folder+'Chopin_Etude_Opus_10_No_12.csv').get_tonality() == 'Mib' 60 | 61 | def test_tonality_Mi_num1(): 62 | assert Read(name_folder+dict_scores['Mi']).get_tonality() == 'Mi' 63 | 64 | def test_tonality_Mi_num2(): 65 | assert Read(name_folder+'Chopin_Etude_Opus_10_No_3.csv').get_tonality() == 'Mi' 66 | 67 | def test_tonality_Si(): 68 | assert Read(name_folder+dict_scores['Si']).get_tonality() == 'Si' -------------------------------------------------------------------------------- /src/Polyphony.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | from MusicalNote import * 5 | from Play import CSVtoMIDI 6 | 7 | import numpy as np 8 | import pandas as pd 9 | 10 | # Random Melody 11 | # A Melody is a sequence of notes 12 | 13 | 14 | class Polyphony (object): 15 | 16 | def convert_to_midi(self): 17 | 18 | individual_notes = list() 19 | chord_id = list() 20 | 21 | duration_length = 150 22 | factor_duration = 2 23 | 24 | for id_iter_chord, iter_chord in enumerate(self.sequenceChord): 25 | for part_note_in_chord, iter_note_in_chord in enumerate(iter_chord): 26 | iter_note_in_chord.update_props({'timbre':part_note_in_chord+1}) 27 | individual_notes.append(iter_note_in_chord) 28 | # Store the id of the chord to know which notes are together 29 | chord_id.append((id_iter_chord+1)*factor_duration*duration_length) 30 | 31 | 32 | # Create dataframe with musical properties from sequence/melody 33 | # print([note_from_melody.get_play_props() for note_from_melody in self.sequence_of_notes]) 34 | # print('pppppppp') 35 | music_dataframe = pd.DataFrame.from_records([note_from_melody.get_play_props() 36 | for note_from_melody in individual_notes]) 37 | 38 | music_dataframe['start_ms'] = chord_id 39 | music_dataframe['duration'] = duration_length*factor_duration 40 | 41 | # Rename columns as the MIDI has already specific names for the columns 42 | music_dataframe.columns = ['dur_ms','velocity','pitch','part','start_ms'] 43 | 44 | # Merge together those repeated notes 45 | extended_music_dataframe_list = [] 46 | 47 | print('-------Fallan aqui los .loc') 48 | for _, df_gb in music_dataframe.groupby(['velocity','pitch','part']): 49 | df_gb.loc[:,'next_start_ms'] = ((df_gb['start_ms']+df_gb['dur_ms']) 50 | .shift(1) 51 | .fillna(0) 52 | ) 53 | df_gb.loc[:,'diff_start_ms'] = ((df_gb['start_ms']-df_gb['next_start_ms'])>0).astype(int) 54 | df_gb.loc[:,'cum_sum'] = np.cumsum(df_gb['diff_start_ms']) 55 | # print(df_gb[['dur_ms','start_ms','next_start_ms','diff_start_ms','grad','cum_sum']]) 56 | extended_music_dataframe_list.append(df_gb 57 | .groupby(['cum_sum','pitch','velocity','part']) 58 | .agg({'start_ms':min, 'dur_ms':sum}) 59 | .reset_index() 60 | [['dur_ms','velocity','pitch','part','start_ms']] 61 | ) 62 | 63 | print('-------Fallan aqui los .loc --- 2') 64 | #print(pd.concat(extended_music_dataframe_list).sort_values(['part','start_ms'])) 65 | 66 | return pd.concat(extended_music_dataframe_list).sort_values(['part','start_ms']) 67 | 68 | 69 | 70 | class SequenceChordPolyphony (Polyphony): 71 | 72 | def __init__(self, sequenceChord): 73 | self.sequenceChord = sequenceChord -------------------------------------------------------------------------------- /src/Tonality.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | from MusicalNote import * 5 | from melody import * 6 | from Polyphony import * 7 | from Play import CSVtoMIDI 8 | 9 | import numpy as np 10 | 11 | # Tonality 12 | # Two types of tonalities: Major and Minor 13 | 14 | 15 | class Tonality (object): 16 | # The class tonality defines the structure of the music 17 | # (2) The relationships between grades and notes 18 | # (3) Which is the Tonica/Dominant/... function of every note 19 | 20 | def __init__(self, Tonic): 21 | # Tonic is the main note - This must be set as a parameter 22 | 23 | # Names of the sequences/grades I, II, III, IV, ... 24 | self.dict_grades_positions = {'I': 0, 'II': 1, 'III': 2, 'IV': 3, 'V': 4, 'VI': 5, 'VII': 6} 25 | 26 | # Which is the Tonic Note 27 | self.tonic = Tonic 28 | 29 | def get_tonic(self): 30 | return self.tonic 31 | 32 | 33 | class Major (Tonality): 34 | # (1) Different chords of every functional note 35 | # (2) How to create music from chords and functional notes (I, II, III, ...) 36 | def __init__(self, Tonic): 37 | self.major_chords = ['I', 'IV', 'V'] 38 | self.minor_chords = ['II', 'III', 'VI'] 39 | self.dis_chords = ['VII'] 40 | 41 | super(self.__class__, self).__init__(Tonic) 42 | 43 | 44 | 45 | def create_music_from_grades_sequences(self, 46 | musical_sequence, 47 | number_of_notes_per_compass): 48 | 49 | melody_sequence = [] 50 | 51 | self.grades = self.get_tonic().get_major_scale() 52 | print([note.to_string() for note in self.grades]) 53 | 54 | for grade in musical_sequence: 55 | # (1) Iterate every single grade within musical_sequence 56 | # (2) Extract, for every note, the corresponding chord 57 | # (3) Play random music ONLY with those notes within the chord 58 | # (4) Concatenate all the notes 59 | # main_note_from_grade = (globals() 60 | # [self.grades[self.dict_grades_positions[grade]].__name__]()) 61 | 62 | main_note_from_grade = (self.grades[self.dict_grades_positions[grade]]) 63 | 64 | # (2) Extract the chord for every note 65 | if grade in self.major_chords: 66 | notes_from_chord = main_note_from_grade.get_major_chord() 67 | else: 68 | notes_from_chord = main_note_from_grade.get_minor_chord() 69 | 70 | 71 | 72 | 73 | # (3) and (4) Play random music ONLY with notes from the chord 74 | melody_sequence.extend(np.random.choice(notes_from_chord, 75 | number_of_notes_per_compass, 76 | replace=True)) 77 | #print(melody_sequence) 78 | 79 | # (5) First and Last note must be the Tonic 80 | melody_sequence[0] = self.get_tonic() 81 | melody_sequence[-1] = self.get_tonic() 82 | 83 | # (6) Add properties (duration, intensity, timbre) 84 | melody_sequence_to_play = [] 85 | for note in melody_sequence: 86 | note.update_props({'duration': np.random.choice([125, 250], 1, replace=True)[0], 87 | 'intensity': 70, 'timbre': 1}) 88 | melody_sequence_to_play.append(note) 89 | 90 | return melody_sequence_to_play 91 | 92 | 93 | if __name__ == '__main__': 94 | tonal = Tonality(Sol) 95 | 96 | do = Major(La(**{'alteration':'b'})) 97 | musical_sequence = ['I', 'V', 'VI', 'III', 'IV','I', 'IV', 'V', 'I'] 98 | number_of_notes_per_compass = 10 99 | melody_sequence = do.create_music_from_grades_sequences(musical_sequence, 100 | number_of_notes_per_compass) 101 | 102 | melody = SequenceMelody(melody_sequence) 103 | CSVtoMIDI(melody.convert_to_midi()) 104 | -------------------------------------------------------------------------------- /src/Play.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | class CSVtoMIDI: 5 | 6 | # Class to convert CSV into MIDI 7 | # Add DOC later 8 | 9 | 10 | def __init__(self,dataframe, name_file_midi = 'my_first_midi_file'): 11 | 12 | 13 | # Create the names for the csv and midi files 14 | # name_file_midi = 'my_first_midi_file' 15 | csv_file = '../../output/CSV/'+name_file_midi+'.csv' 16 | midi_file = '../../output/MIDI/'+name_file_midi+'.midi' 17 | 18 | # Store dataframe 19 | self.dataframe = dataframe 20 | 21 | # Create and convert the file 22 | self.create_csv(csv_file) 23 | self.convert_csv_to_midi(csv_file,midi_file) 24 | 25 | def init_dict (self,all_the_voices): 26 | # Init the dictionary which will collect all the lines from the MIDI file 27 | # all_the_voices contains all the instruments/voices of the composition 28 | # The idea is to create a dictionary with the header of every voice, which must include 29 | # Start_track and Program_c 30 | self.dic = {key: [str(key+1)+', 0, Start_track', 31 | # 0, Program_c, Channel, Instrument 32 | str(key+1)+', 0, Program_c, 1, 1'] for key in all_the_voices} 33 | 34 | 35 | def appendNote (self,row): 36 | # For every single note and voice/instrument, this method will include one row per note, 37 | # for the corresponding voice. Sometimes, voice/instrument is called part. 38 | 39 | # Example of the result: 40 | # 3, 0, Note_on_c, 3, 62, 28 41 | # 3, 250, Note_off_c, 3, 62, 0 42 | # 3, 250, Note_on_c, 3, 53, 34 43 | # 3, 375, Note_off_c, 3, 53, 0 44 | # 3, 375, Note_on_c, 3, 62, 30 45 | # 3, 500, Note_off_c, 3, 62, 0 46 | 47 | # Note_on_c 48 | part = row['part'] 49 | self.dic[part].append(', '.join([str(row['part']+1), 50 | str(row['start_ms']), 51 | 'Note_on_c', 52 | str(row['part']+1), # Channel 53 | str(row['pitch']), 54 | str(row['velocity']) 55 | ])) 56 | 57 | # Note_off_c 58 | self.dic[part].append(', '.join([str(row['part']+1), 59 | (str(row['start_ms']+row['dur_ms'])), 60 | 'Note_off_c', 61 | str(row['part']+1), # Channel 62 | str(row['pitch']), 63 | '0' 64 | ])) 65 | 66 | 67 | def create_csv(self, csv_file): 68 | # Create CSV file 69 | 70 | csv = open(csv_file, 'w') 71 | self.init_dict(self.dataframe.part.unique()) 72 | 73 | # Number of voices/instruments should be given by datamart_to_save.part.max() 74 | header_list = \ 75 | ['0, 0, Header, 1, '+str(self.dataframe.part.max()+1)+', 480', 76 | # Not needed now 77 | # '1, 0, Start_track', 78 | # '1, 0, Title_t, "Close Encounters"', 79 | # '1, 0, Text_t, "Sample for MIDIcsv Distribution"', 80 | # '1, 0, Copyright_t, "This file is in the public domain"', 81 | # '1, 0, Time_signature, 4, 2, 24, 8', 82 | # '1, 0, Tempo, 500000', 83 | # '1, 0, End_track', 84 | ''] 85 | 86 | # Write the header into the CSV file 87 | csv.write('\n'.join(header_list)) 88 | 89 | # For every row in the dataframe, apply the method appendNote, which will store in self.dic 90 | # one line per note 91 | self.dataframe.apply(self.appendNote,axis=1) 92 | 93 | # Add the last line per voice: 94 | # Example: 95 | # 2, 83125, End_track 96 | 97 | for iterDic in self.dic.keys(): 98 | v = self.dic[iterDic][-1].split(',')[:2] 99 | v.append(' End_track\n') 100 | self.dic[iterDic].append(','.join(v)) 101 | 102 | # Save all the notes per voice/instrument in the CSV file. 103 | # Notice, that every voice will contain: 104 | # Start_track 105 | # Notes 106 | # End_track 107 | # Example: 108 | [csv.write('\n'.join(self.dic[iterDict])) for iterDict in self.dic.keys()] 109 | 110 | # End of file 111 | csv.write('0, 0, End_of_file') 112 | 113 | # Close the file 114 | csv.close() 115 | 116 | def convert_csv_to_midi(self,csv_file,midi_file): 117 | # Convert CSV to MIDI 118 | # It is needed to use the csvmidi command 119 | import os 120 | os.system(' '.join(['csvmidi',csv_file,midi_file])) 121 | 122 | 123 | if __name__== "__main__": 124 | import pandas as pd 125 | 126 | datamart = pd.read_csv('../../data/music.csv') 127 | print(datamart.head()) 128 | CSVtoMIDI(datamart) 129 | print('Hi there, it works!') 130 | 131 | 132 | 133 | 134 | -------------------------------------------------------------------------------- /test/test_major_scale.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | import sys 5 | # import folders with the code 6 | # At the moment, there is not other way of importing 7 | sys.path.insert(0, '/Users/adesant3/Documents/Kindergarten/chopyn/chopyn/src/') 8 | 9 | from MusicalNote import * 10 | 11 | import numpy as np 12 | import pandas as pd 13 | 14 | 15 | def test_get_major_scale_do(): 16 | assert ([nota.to_string() for nota in Do().get_major_scale()] == 17 | ['Do','Re','Mi','Fa','Sol','La','Si','Do']) 18 | 19 | def test_get_major_scale_do_sharp(): 20 | assert ([nota.to_string() for nota in Do(**{'alteration':'#'}).get_major_scale()] == 21 | ['Do#','Re#','Mi#','Fa#','Sol#','La#','Si#','Do#']) 22 | 23 | def test_get_major_scale_do_flat(): 24 | assert ([nota.to_string() for nota in Do(**{'alteration':'b'}).get_major_scale()] == 25 | ['Dob','Reb','Mib','Fab','Solb','Lab','Sib','Dob']) 26 | 27 | 28 | 29 | ############### 30 | ### RE 31 | ############### 32 | 33 | def test_get_major_scale_re(): 34 | assert ([nota.to_string() for nota in Re().get_major_scale()] == 35 | ['Re','Mi','Fa#','Sol','La','Si','Do#','Re']) 36 | 37 | def test_get_major_scale_re_sharp(): 38 | assert ([nota.to_string() for nota in Re(**{'alteration':'#'}).get_major_scale()] == 39 | ['Re#','Mi#','Fax','Sol#','La#','Si#','Dox','Re#']) 40 | 41 | def test_get_major_scale_re_flat(): 42 | assert ([nota.to_string() for nota in Re(**{'alteration':'b'}).get_major_scale()] == 43 | ['Reb','Mib','Fa','Solb','Lab','Sib','Do','Reb']) 44 | 45 | 46 | ############### 47 | ### MI 48 | ############### 49 | 50 | def test_get_major_scale_mi(): 51 | assert ([nota.to_string() for nota in Mi().get_major_scale()] == 52 | ['Mi','Fa#','Sol#','La','Si','Do#','Re#','Mi']) 53 | 54 | def test_get_major_scale_mi_sharp(): 55 | assert ([nota.to_string() for nota in Mi(**{'alteration':'#'}).get_major_scale()] == 56 | ['Mi#','Fax','Solx','La#','Si#','Dox','Rex','Mi#']) 57 | 58 | def test_get_major_scale_mi_flat(): 59 | assert ([nota.to_string() for nota in Mi(**{'alteration':'b'}).get_major_scale()] == 60 | ['Mib','Fa','Sol','Lab','Sib','Do','Re','Mib']) 61 | 62 | 63 | ############### 64 | ### Fa 65 | ############### 66 | 67 | def test_get_major_scale_fa(): 68 | assert ([nota.to_string() for nota in Fa().get_major_scale()] == 69 | ['Fa','Sol','La','Sib','Do','Re','Mi','Fa']) 70 | 71 | def test_get_major_scale_fa_sharp(): 72 | assert ([nota.to_string() for nota in Fa(**{'alteration':'#'}).get_major_scale()] == 73 | ['Fa#','Sol#','La#','Si','Do#','Re#','Mi#','Fa#']) 74 | 75 | def test_get_major_scale_fa_flat(): 76 | assert ([nota.to_string() for nota in Fa(**{'alteration':'b'}).get_major_scale()] == 77 | ['Fab','Solb','Lab','Sibb','Dob','Reb','Mib','Fab']) 78 | 79 | 80 | ############### 81 | ### Sol 82 | ############### 83 | 84 | def test_get_major_scale_sol(): 85 | assert ([nota.to_string() for nota in Sol().get_major_scale()] == 86 | ['Sol','La','Si','Do','Re','Mi','Fa#','Sol']) 87 | 88 | def test_get_major_scale_sol_sharp(): 89 | assert ([nota.to_string() for nota in Sol(**{'alteration':'#'}).get_major_scale()] == 90 | ['Sol#','La#','Si#','Do#','Re#','Mi#','Fax','Sol#']) 91 | 92 | def test_get_major_scale_sol_flat(): 93 | assert ([nota.to_string() for nota in Sol(**{'alteration':'b'}).get_major_scale()] == 94 | ['Solb','Lab','Sib','Dob','Reb','Mib','Fa','Solb']) 95 | 96 | 97 | ############### 98 | ### La 99 | ############### 100 | 101 | def test_get_major_scale_la(): 102 | assert ([nota.to_string() for nota in La().get_major_scale()] == 103 | ['La','Si','Do#','Re','Mi','Fa#','Sol#','La']) 104 | 105 | def test_get_major_scale_la_sharp(): 106 | assert ([nota.to_string() for nota in La(**{'alteration':'#'}).get_major_scale()] == 107 | ['La#','Si#','Dox','Re#','Mi#','Fax','Solx','La#']) 108 | 109 | def test_get_major_scale_la_flat(): 110 | assert ([nota.to_string() for nota in La(**{'alteration':'b'}).get_major_scale()] == 111 | ['Lab','Sib','Do','Reb','Mib','Fa','Sol','Lab']) 112 | 113 | 114 | ############### 115 | ### Si 116 | ############### 117 | 118 | def test_get_major_scale_si(): 119 | assert ([nota.to_string() for nota in Si().get_major_scale()] == 120 | ['Si','Do#','Re#','Mi','Fa#','Sol#','La#','Si']) 121 | 122 | def test_get_major_scale_si_sharp(): 123 | assert ([nota.to_string() for nota in Si(**{'alteration':'#'}).get_major_scale()] == 124 | ['Si#','Dox','Rex','Mi#','Fax','Solx','Lax','Si#']) 125 | 126 | def test_get_major_scale_si_flat(): 127 | assert ([nota.to_string() for nota in Si(**{'alteration':'b'}).get_major_scale()] == 128 | ['Sib','Do','Re','Mib','Fa','Sol','La','Sib']) -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Code of Conduct 2 | 3 | ## 1. Purpose 4 | 5 | A primary goal of Chopyn is to be inclusive to the largest number of contributors, with the most varied and diverse backgrounds possible. As such, we are committed to providing a friendly, safe and welcoming environment for all, regardless of gender, sexual orientation, ability, ethnicity, socioeconomic status, and religion (or lack thereof). 6 | 7 | This code of conduct outlines our expectations for all those who participate in our community, as well as the consequences for unacceptable behavior. 8 | 9 | We invite all those who participate in Chopyn to help us create safe and positive experiences for everyone. 10 | 11 | ## 2. Open Source Citizenship 12 | 13 | A supplemental goal of this Code of Conduct is to increase open source citizenship by encouraging participants to recognize and strengthen the relationships between our actions and their effects on our community. 14 | 15 | Communities mirror the societies in which they exist and positive action is essential to counteract the many forms of inequality and abuses of power that exist in society. 16 | 17 | If you see someone who is making an extra effort to ensure our community is welcoming, friendly, and encourages all participants to contribute to the fullest extent, we want to know. 18 | 19 | ## 3. Expected Behavior 20 | 21 | The following behaviors are expected and requested of all community members: 22 | 23 | * Participate in an authentic and active way. In doing so, you contribute to the health and longevity of this community. 24 | * Exercise consideration and respect in your speech and actions. 25 | * Attempt collaboration before conflict. 26 | * Refrain from demeaning, discriminatory, or harassing behavior and speech. 27 | * Be mindful of your surroundings and of your fellow participants. Alert community leaders if you notice a dangerous situation, someone in distress, or violations of this Code of Conduct, even if they seem inconsequential. 28 | * Remember that community event venues may be shared with members of the public; please be respectful to all patrons of these locations. 29 | 30 | ## 4. Unacceptable Behavior 31 | 32 | The following behaviors are considered harassment and are unacceptable within our community: 33 | 34 | * Violence, threats of violence or violent language directed against another person. 35 | * Sexist, racist, homophobic, transphobic, ableist or otherwise discriminatory jokes and language. 36 | * Posting or displaying sexually explicit or violent material. 37 | * Posting or threatening to post other people’s personally identifying information ("doxing"). 38 | * Personal insults, particularly those related to gender, sexual orientation, race, religion, or disability. 39 | * Inappropriate photography or recording. 40 | * Inappropriate physical contact. You should have someone’s consent before touching them. 41 | * Unwelcome sexual attention. This includes, sexualized comments or jokes; inappropriate touching, groping, and unwelcomed sexual advances. 42 | * Deliberate intimidation, stalking or following (online or in person). 43 | * Advocating for, or encouraging, any of the above behavior. 44 | * Sustained disruption of community events, including talks and presentations. 45 | 46 | ## 5. Consequences of Unacceptable Behavior 47 | 48 | Unacceptable behavior from any community member, including sponsors and those with decision-making authority, will not be tolerated. 49 | 50 | Anyone asked to stop unacceptable behavior is expected to comply immediately. 51 | 52 | If a community member engages in unacceptable behavior, the community organizers may take any action they deem appropriate, up to and including a temporary ban or permanent expulsion from the community without warning (and without refund in the case of a paid event). 53 | 54 | ## 6. Reporting Guidelines 55 | 56 | If you are subject to or witness unacceptable behavior, or have any other concerns, please notify a community organizer as soon as possible. a.desantos.sierra@gmail.com. 57 | 58 | 59 | 60 | Additionally, community organizers are available to help community members engage with local law enforcement or to otherwise help those experiencing unacceptable behavior feel safe. In the context of in-person events, organizers will also provide escorts as desired by the person experiencing distress. 61 | 62 | ## 7. Addressing Grievances 63 | 64 | If you feel you have been falsely or unfairly accused of violating this Code of Conduct, you should notify Ade Santos Sierra with a concise description of your grievance. Your grievance will be handled in accordance with our existing governing policies. 65 | 66 | 67 | 68 | ## 8. Scope 69 | 70 | We expect all community participants (contributors, paid or otherwise; sponsors; and other guests) to abide by this Code of Conduct in all community venues–online and in-person–as well as in all one-on-one communications pertaining to community business. 71 | 72 | This code of conduct and its related procedures also applies to unacceptable behavior occurring outside the scope of community activities when such behavior has the potential to adversely affect the safety and well-being of community members. 73 | 74 | ## 9. Contact info 75 | 76 | a.desantos.sierra@gmail.com 77 | 78 | ## 10. License and attribution 79 | 80 | This Code of Conduct is distributed under a [Creative Commons Attribution-ShareAlike license](http://creativecommons.org/licenses/by-sa/3.0/). 81 | 82 | Portions of text derived from the [Django Code of Conduct](https://www.djangoproject.com/conduct/) and the [Geek Feminism Anti-Harassment Policy](http://geekfeminism.wikia.com/wiki/Conference_anti-harassment/Policy). 83 | 84 | Retrieved on November 22, 2016 from [http://citizencodeofconduct.org/](http://citizencodeofconduct.org/) 85 | -------------------------------------------------------------------------------- /test/test_notes.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | import sys 5 | # import folders with the code 6 | # At the moment, there is not other way of importing 7 | sys.path.insert(0, '/Users/adesant3/Documents/Kindergarten/chopyn/chopyn/src/') 8 | 9 | from MusicalNote import * 10 | 11 | import numpy as np 12 | import pandas as pd 13 | 14 | # content of test_sample.py 15 | def test_get_pitch(): 16 | random_pitch = np.random.randint(50) 17 | notes_props = {'pitch':random_pitch, 'duration':100, 'intensity': 200, 'timbre': 1} 18 | do = Do(**notes_props) 19 | assert (do.get_pitch() != random_pitch) 20 | 21 | def test_update_pitch_do(): 22 | random_pitch = np.random.randint(50) 23 | notes_props = {'pitch':random_pitch, 'duration':100, 'intensity': 200, 'timbre': 1} 24 | do = Do(**notes_props) 25 | assert do.get_pitch() == 60 26 | 27 | def test_update_pitch_re(): 28 | random_pitch = np.random.randint(50) 29 | notes_props = {'pitch':random_pitch, 'duration':100, 'intensity': 200, 'timbre': 1} 30 | re = Re(**notes_props) 31 | assert re.get_pitch() == 62 32 | 33 | def test_standard_octave(): 34 | assert Do().get_octave() == 4 35 | 36 | def test_update_octave(): 37 | octave = 6 38 | notes_props = {'duration':100, 'intensity': 200, 'timbre': 1, 'alteration':'b', 'octave':octave} 39 | assert Fa(**notes_props).get_octave() == octave 40 | 41 | def test_higher_octave(): 42 | notes_props = {'duration':100, 'intensity': 200, 'timbre': 1, 'alteration':'b', 'octave':6} 43 | re = Re(**notes_props) 44 | assert re.get_pitch() == 85 45 | 46 | def test_lower_octave(): 47 | notes_props = {'duration':100, 'intensity': 200, 'timbre': 1, 'alteration':'b', 'octave':2} 48 | re = Re(**notes_props) 49 | assert re.get_pitch() == 37 50 | 51 | ######################################## 52 | # 53 | # Test get_note_from_interval_do 54 | # 55 | ######################################## 56 | 57 | def test_get_note_from_interval_do_3M(): 58 | assert Do().get_note_from_interval('3M').to_string() == 'Mi' 59 | 60 | def test_get_note_from_interval_do_3m(): 61 | assert Do().get_note_from_interval('3m').to_string() == 'Mib' 62 | 63 | def test_get_note_from_interval_do_2M(): 64 | assert Do().get_note_from_interval('2M').to_string() == 'Re' 65 | 66 | def test_get_note_from_interval_do_2m(): 67 | assert Do().get_note_from_interval('2m').to_string() == 'Reb' 68 | 69 | def test_get_note_from_interval_do_5P(): 70 | assert Do().get_note_from_interval('5P').to_string() == 'Sol' 71 | 72 | 73 | ######################################## 74 | # 75 | # Test get_note_from_interval_do_sharp 76 | # 77 | ######################################## 78 | 79 | def test_get_note_from_interval_do_sharp_3M(): 80 | assert Do(**{'alteration':'#'}).get_note_from_interval('3M').to_string() == 'Mi#' 81 | 82 | def test_get_note_from_interval_do_sharp_3m(): 83 | assert Do(**{'alteration':'#'}).get_note_from_interval('3m').to_string() == 'Mi' 84 | 85 | def test_get_note_from_interval_do_sharp_2M(): 86 | assert Do(**{'alteration':'#'}).get_note_from_interval('2M').to_string() == 'Re#' 87 | 88 | def test_get_note_from_interval_do_sharp_2m(): 89 | assert Do(**{'alteration':'#'}).get_note_from_interval('2m').to_string() == 'Re' 90 | 91 | def test_get_note_from_interval_do_sharp_5P(): 92 | assert Do(**{'alteration':'#'}).get_note_from_interval('5P').to_string() == 'Sol#' 93 | 94 | 95 | ######################################## 96 | # 97 | # Test get_note_from_interval_do_flat 98 | # 99 | ######################################## 100 | 101 | def test_get_note_from_interval_do_flat_3M(): 102 | assert Do(**{'alteration':'b'}).get_note_from_interval('3M').to_string() == 'Mib' 103 | 104 | def test_get_note_from_interval_do_flat_3m(): 105 | assert Do(**{'alteration':'b'}).get_note_from_interval('3m').to_string() == 'Mibb' 106 | 107 | def test_get_note_from_interval_do_flat_2M(): 108 | assert Do(**{'alteration':'b'}).get_note_from_interval('2M').to_string() == 'Reb' 109 | 110 | def test_get_note_from_interval_do_flat_2m(): 111 | assert Do(**{'alteration':'b'}).get_note_from_interval('2m').to_string() == 'Rebb' 112 | 113 | def test_get_note_from_interval_do_flat_5P(): 114 | assert Do(**{'alteration':'b'}).get_note_from_interval('5P').to_string() == 'Solb' 115 | 116 | 117 | 118 | 119 | ######################################## 120 | # 121 | # Test get_note_from_interval_re 122 | # 123 | ######################################## 124 | 125 | def test_get_note_from_interval_re_3M(): 126 | assert Re().get_note_from_interval('3M').to_string() == 'Fa#' 127 | 128 | def test_get_note_from_interval_re_3m(): 129 | assert Re().get_note_from_interval('3m').to_string() == 'Fa' 130 | 131 | def test_get_note_from_interval_re_2M(): 132 | assert Re().get_note_from_interval('2M').to_string() == 'Mi' 133 | 134 | def test_get_note_from_interval_re_2m(): 135 | assert Re().get_note_from_interval('2m').to_string() == 'Mib' 136 | 137 | def test_get_note_from_interval_re_5P(): 138 | assert Re().get_note_from_interval('5P').to_string() == 'La' 139 | 140 | 141 | ######################################## 142 | # 143 | # Test get_note_from_interval_re_sharp 144 | # 145 | ######################################## 146 | 147 | def test_get_note_from_interval_re_sharp_3M(): 148 | assert Re(**{'alteration':'#'}).get_note_from_interval('3M').to_string() == 'Fax' 149 | 150 | def test_get_note_from_interval_re_sharp_3m(): 151 | assert Re(**{'alteration':'#'}).get_note_from_interval('3m').to_string() == 'Fa#' 152 | 153 | def test_get_note_from_interval_re_sharp_2M(): 154 | assert Re(**{'alteration':'#'}).get_note_from_interval('2M').to_string() == 'Mi#' 155 | 156 | def test_get_note_from_interval_re_sharp_2m(): 157 | assert Re(**{'alteration':'#'}).get_note_from_interval('2m').to_string() == 'Mi' 158 | 159 | def test_get_note_from_interval_re_sharp_5P(): 160 | assert Re(**{'alteration':'#'}).get_note_from_interval('5P').to_string() == 'La#' 161 | 162 | 163 | ######################################## 164 | # 165 | # Test get_note_from_interval_re_flat 166 | # 167 | ######################################## 168 | 169 | def test_get_note_from_interval_re_flat_3M(): 170 | assert Re(**{'alteration':'b'}).get_note_from_interval('3M').to_string() == 'Fa' 171 | 172 | def test_get_note_from_interval_re_flat_3m(): 173 | assert Re(**{'alteration':'b'}).get_note_from_interval('3m').to_string() == 'Fab' 174 | 175 | def test_get_note_from_interval_re_flat_2M(): 176 | assert Re(**{'alteration':'b'}).get_note_from_interval('2M').to_string() == 'Mib' 177 | 178 | def test_get_note_from_interval_re_flat_2m(): 179 | assert Re(**{'alteration':'b'}).get_note_from_interval('2m').to_string() == 'Mibb' 180 | 181 | def test_get_note_from_interval_re_flat_5P(): 182 | assert Re(**{'alteration':'b'}).get_note_from_interval('5P').to_string() == 'Lab' 183 | 184 | 185 | 186 | ######################################## 187 | # 188 | # Test get_note_from_interval_mi 189 | # 190 | ######################################## 191 | 192 | def test_get_note_from_interval_mi_3M(): 193 | assert Mi().get_note_from_interval('3M').to_string() == 'Sol#' 194 | 195 | def test_get_note_from_interval_mi_3m(): 196 | assert Mi().get_note_from_interval('3m').to_string() == 'Sol' 197 | 198 | def test_get_note_from_interval_mi_2M(): 199 | assert Mi().get_note_from_interval('2M').to_string() == 'Fa#' 200 | 201 | def test_get_note_from_interval_mi_2m(): 202 | assert Mi().get_note_from_interval('2m').to_string() == 'Fa' 203 | 204 | def test_get_note_from_interval_mi_5P(): 205 | assert Mi().get_note_from_interval('5P').to_string() == 'Si' 206 | 207 | 208 | ######################################## 209 | # 210 | # Test get_note_from_interval_mi_sharp 211 | # 212 | ######################################## 213 | 214 | def test_get_note_from_interval_mi_sharp_3M(): 215 | assert Mi(**{'alteration':'#'}).get_note_from_interval('3M').to_string() == 'Solx' 216 | 217 | def test_get_note_from_interval_mi_sharp_3m(): 218 | assert Mi(**{'alteration':'#'}).get_note_from_interval('3m').to_string() == 'Sol#' 219 | 220 | def test_get_note_from_interval_mi_sharp_2M(): 221 | assert Mi(**{'alteration':'#'}).get_note_from_interval('2M').to_string() == 'Fax' 222 | 223 | def test_get_note_from_interval_mi_sharp_2m(): 224 | assert Mi(**{'alteration':'#'}).get_note_from_interval('2m').to_string() == 'Fa#' 225 | 226 | def test_get_note_from_interval_mi_sharp_5P(): 227 | assert Mi(**{'alteration':'#'}).get_note_from_interval('5P').to_string() == 'Si#' 228 | 229 | 230 | ######################################## 231 | # 232 | # Test get_note_from_interval_mi_flat 233 | # 234 | ######################################## 235 | 236 | def test_get_note_from_interval_mi_flat_3M(): 237 | assert Mi(**{'alteration':'b'}).get_note_from_interval('3M').to_string() == 'Sol' 238 | 239 | def test_get_note_from_interval_mi_flat_3m(): 240 | assert Mi(**{'alteration':'b'}).get_note_from_interval('3m').to_string() == 'Solb' 241 | 242 | def test_get_note_from_interval_mi_flat_2M(): 243 | assert Mi(**{'alteration':'b'}).get_note_from_interval('2M').to_string() == 'Fa' 244 | 245 | def test_get_note_from_interval_mi_flat_2m(): 246 | assert Mi(**{'alteration':'b'}).get_note_from_interval('2m').to_string() == 'Fab' 247 | 248 | def test_get_note_from_interval_mi_flat_5P(): 249 | assert Mi(**{'alteration':'b'}).get_note_from_interval('5P').to_string() == 'Sib' -------------------------------------------------------------------------------- /src/MusicalNote.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | import numpy as np 5 | 6 | # Create Musical Notes (Do, Re, Mi, ...) 7 | class MusicalNote(object): 8 | 9 | # Duration (lon/short notes) 10 | # Pitch Frequency (high/low pitch) 11 | # Intensity (louder/soft) 12 | # Timbre (instrument you use to play) 13 | 14 | global half_tone, tone, number_notes, diatonic_scale, distance_major_scale, dic_alterations 15 | 16 | # Config main properties of music 17 | half_tone = 1 18 | tone = 2 19 | number_notes = 7 20 | diatonic_scale = ['Do','Re','Mi','Fa','Sol','La','Si'] 21 | distance_major_scale = [tone, tone, half_tone, tone, tone, tone, half_tone] 22 | # dic_alterations = {'bb':-2,'b':-1,'n':0,'#':1,'x':2} 23 | dic_alterations = {'-2':'bb','-1':'b','0':'','1':'#','2':'x'} 24 | 25 | def __init__(self, **notes_props): 26 | 27 | # Only pitch is present 28 | # if (len(notes_props) == 1) & (notes_props.keys() == ['pitch']): 29 | # self.pitch = notes_props['pitch'] 30 | # else: 31 | # self.notes_props = notes_props 32 | # self.duration = notes_props['duration'] 33 | # self.pitch = notes_props['pitch'] 34 | # self.intensity = notes_props['intensity'] 35 | # self.timbre = notes_props['timbre'] 36 | 37 | self.alteration = '' 38 | # Why 4? Because 4 is the default octave 39 | self.octave = 4 40 | 41 | # As it is used by Melody class 42 | self.notes_props = notes_props 43 | 44 | if 'pitch' in notes_props: 45 | self.pitch = notes_props['pitch'] 46 | 47 | if 'duration' in notes_props: 48 | self.duration = notes_props['duration'] 49 | 50 | if 'intensity' in notes_props: 51 | self.intensity = notes_props['intensity'] 52 | 53 | if 'timbre' in notes_props: 54 | self.timbre = notes_props['timbre'] 55 | 56 | # Check whether there is alteration on the properties of the notes 57 | if 'alteration' in notes_props: 58 | self.alteration = notes_props['alteration'] 59 | self.pitch = self.pitch + int(dic_alterations.keys()[dic_alterations.values().index(self.alteration)]) 60 | self.notes_props['pitch'] = self.pitch 61 | 62 | if 'octave' in notes_props: 63 | 64 | octave_increment = notes_props['octave']-self.octave 65 | # Why 12? Because an octaves are separate 12 numbers 66 | self.pitch = self.pitch+12*octave_increment 67 | # Change octave in the end so that self.octave default is kept as 3 68 | self.octave = notes_props['octave'] 69 | self.notes_props['pitch'] = self.pitch 70 | self.notes_props['octave'] = self.octave 71 | 72 | 73 | def get_pitch(self): 74 | return self.pitch 75 | 76 | def get_duration(self): 77 | return self.duration 78 | 79 | def get_octave(self): 80 | return self.octave 81 | 82 | def get_props(self): 83 | return self.notes_props 84 | 85 | def get_play_props(self): 86 | # Only those props useful to play music 87 | #print(self.notes_props['duration']) 88 | # print(dict((k, self.notes_props[k]) for k in ('pitch','duration','intensity','timbre') if k in self.notes_props)) 89 | # print(dict((k, self.notes_props[k]) for k in ('pitch','duration','intensity','timbre') if k in self.notes_props)) 90 | return dict((k, self.notes_props[k]) for k in ('pitch','duration','intensity','timbre') if k in self.notes_props) 91 | 92 | def update_props(self,new_notes_props): 93 | 94 | if 'pitch' in new_notes_props: 95 | self.notes_props.update({'pitch':new_notes_props['pitch']}) 96 | self.pitch = self.notes_props['pitch'] 97 | 98 | if 'duration' in new_notes_props: 99 | self.notes_props.update({'duration':new_notes_props['duration']}) 100 | self.duration = self.notes_props['duration'] 101 | 102 | if 'intensity' in new_notes_props: 103 | self.notes_props.update({'intensity':new_notes_props['intensity']}) 104 | self.intensity = self.notes_props['intensity'] 105 | 106 | if 'timbre' in new_notes_props: 107 | self.notes_props.update({'timbre':new_notes_props['timbre']}) 108 | self.timbre = self.notes_props['timbre'] 109 | 110 | 111 | def get_note_from_interval(self,interval): 112 | 113 | #print('get_note_from_interval') 114 | #print(self.alteration) 115 | 116 | base_note = self.__class__.__name__ 117 | 118 | position_base_note = diatonic_scale.index(self.__class__.__name__) 119 | 120 | # Dictionary with intervals definition 121 | dic_interval_definition = ({'3m':[tone, half_tone],'3M':[tone, tone], 122 | '5P':[tone, tone, half_tone, tone], 123 | '2M':[tone], '2m':[half_tone]}) 124 | 125 | # Estimate length of the interval (Ex. if '3m', length_interval = 2) 126 | length_interval = len(dic_interval_definition[interval]) 127 | 128 | # Sum up all the tones within the wanted interval (Ex. if '3m', sum_interval = 3) 129 | sum_interval = np.sum(dic_interval_definition[interval]) 130 | 131 | # Rotate distance_major_scale accordingly 132 | rotated_distances = distance_major_scale[position_base_note:] + distance_major_scale[:position_base_note] 133 | rotated_diatonic_scale = diatonic_scale[(position_base_note+length_interval) % number_notes:] + diatonic_scale[:(position_base_note+length_interval) % number_notes] 134 | sum_tones_interval = np.cumsum(rotated_distances)[length_interval % number_notes -1] 135 | 136 | # Step 1: Obtain distance between base_note and the chord_note 137 | chord_note = rotated_diatonic_scale[0] 138 | 139 | 140 | correction = int(dic_alterations.keys()[dic_alterations.values().index(self.alteration)]) 141 | # print(base_note+self.alteration+' - '+interval+' - '+chord_note+str(dic_alterations[str(sum_interval-sum_tones_interval+correction)])) 142 | update_note_props = {'alteration':str(dic_alterations[str(sum_interval-sum_tones_interval+correction)])} 143 | #'pitch':self.pitch+correction} 144 | return globals()[chord_note](**update_note_props) 145 | 146 | 147 | 148 | def to_string(self): 149 | # Combine the name of the note (Do, Re, ...) with their corresponding alteration if any 150 | return self.__class__.__name__+self.alteration 151 | 152 | ################### 153 | # Chords 154 | ################### 155 | 156 | def get_major_chord(self): 157 | # Arreglar esto para que se pueda hacer un acorde mayor apartir de una nota alterada 158 | return [self, self.get_note_from_interval('3M'), self.get_note_from_interval('5P')] 159 | 160 | def get_minor_chord(self): 161 | return [self, self.get_note_from_interval('3m'), self.get_note_from_interval('5P')] 162 | 163 | def get_dis_chord(self): 164 | return self.pitch + np.cumsum([0, half_tone+tone, half_tone+tone]) 165 | 166 | 167 | ################### 168 | # Scales 169 | ################### 170 | 171 | def get_major_scale(self): 172 | 173 | tonic_scale = [self] 174 | 175 | # Iterate distance_major_scale 176 | for idx, distance in enumerate(distance_major_scale): 177 | if distance == tone: 178 | tonic_scale.append(tonic_scale[idx] 179 | .get_note_from_interval('2M')) 180 | else: 181 | tonic_scale.append(tonic_scale[idx] 182 | .get_note_from_interval('2m')) 183 | return tonic_scale 184 | 185 | 186 | 187 | class Do(MusicalNote): 188 | # Create Musical Note - Do 189 | def __init__(self,**notes_props): 190 | # According to MIDI, Central Do has a specific pitch (60) 191 | # Focus only on one single Octave 192 | # Sharp (#) and flat (b) 193 | notes_props.update({'pitch':60}) 194 | super(self.__class__, self).__init__(**notes_props) 195 | 196 | class Re(MusicalNote): 197 | # Create Musical Note - Re 198 | def __init__(self,**notes_props): 199 | # According to MIDI, Central Re has a specific pitch (62) 200 | # Focus only on one single Octave 201 | # Sharp (#) and flat (b) 202 | notes_props.update({'pitch':62}) 203 | super(self.__class__, self).__init__(**notes_props) 204 | 205 | class Mi(MusicalNote): 206 | # Create Musical Note - Mi 207 | def __init__(self,**notes_props): 208 | # According to MIDI, Central Mi has a specific pitch (64) 209 | # Focus only on one single Octave 210 | # Sharp (#) and flat (b) 211 | notes_props.update({'pitch':64}) 212 | super(self.__class__, self).__init__(**notes_props) 213 | 214 | class Fa(MusicalNote): 215 | # Create Musical Note - Fa 216 | def __init__(self,**notes_props): 217 | # According to MIDI, Central Fa has a specific pitch (65) 218 | # Focus only on one single Octave 219 | # Sharp (#) and flat (b) 220 | notes_props.update({'pitch':65}) 221 | super(self.__class__, self).__init__(**notes_props) 222 | 223 | class Sol(MusicalNote): 224 | # Create Musical Note - Sol 225 | def __init__(self,**notes_props): 226 | # According to MIDI, Central Sol has a specific pitch (67) 227 | # Focus only on one single Octave 228 | # Sharp (#) and flat (b) 229 | notes_props.update({'pitch':67}) 230 | super(self.__class__, self).__init__(**notes_props) 231 | 232 | class La(MusicalNote): 233 | # Create Musical Note - La 234 | def __init__(self,**notes_props): 235 | # According to MIDI, Central La has a specific pitch (69) 236 | # Focus only on one single Octave 237 | # Sharp (#) and flat (b) 238 | notes_props.update({'pitch':69}) 239 | super(self.__class__, self).__init__(**notes_props) 240 | 241 | 242 | class Si(MusicalNote): 243 | # Create Musical Note - Si 244 | def __init__(self,**notes_props): 245 | # According to MIDI, Central Si has a specific pitch (71) 246 | # Focus only on one single Octave 247 | # Sharp (#) and flat (b) 248 | notes_props.update({'pitch':71}) 249 | super(self.__class__, self).__init__(**notes_props) 250 | 251 | 252 | -------------------------------------------------------------------------------- /src/lstm_seq2seq.py: -------------------------------------------------------------------------------- 1 | '''Sequence to sequence example in Keras (character-level). 2 | 3 | This script demonstrates how to implement a basic character-level 4 | sequence-to-sequence model. We apply it to translating 5 | short English sentences into short French sentences, 6 | character-by-character. Note that it is fairly unusual to 7 | do character-level machine translation, as word-level 8 | models are more common in this domain. 9 | 10 | # Summary of the algorithm 11 | 12 | - We start with input sequences from a domain (e.g. English sentences) 13 | and corresponding target sequences from another domain 14 | (e.g. French sentences). 15 | - An encoder LSTM turns input sequences to 2 state vectors 16 | (we keep the last LSTM state and discard the outputs). 17 | - A decoder LSTM is trained to turn the target sequences into 18 | the same sequence but offset by one timestep in the future, 19 | a training process called "teacher forcing" in this context. 20 | Is uses as initial state the state vectors from the encoder. 21 | Effectively, the decoder learns to generate `targets[t+1...]` 22 | given `targets[...t]`, conditioned on the input sequence. 23 | - In inference mode, when we want to decode unknown input sequences, we: 24 | - Encode the input sequence into state vectors 25 | - Start with a target sequence of size 1 26 | (just the start-of-sequence character) 27 | - Feed the state vectors and 1-char target sequence 28 | to the decoder to produce predictions for the next character 29 | - Sample the next character using these predictions 30 | (we simply use argmax). 31 | - Append the sampled character to the target sequence 32 | - Repeat until we generate the end-of-sequence character or we 33 | hit the character limit. 34 | 35 | # Data download 36 | 37 | English to French sentence pairs. 38 | http://www.manythings.org/anki/fra-eng.zip 39 | 40 | Lots of neat sentence pairs datasets can be found at: 41 | http://www.manythings.org/anki/ 42 | 43 | # References 44 | 45 | - Sequence to Sequence Learning with Neural Networks 46 | https://arxiv.org/abs/1409.3215 47 | - Learning Phrase Representations using 48 | RNN Encoder-Decoder for Statistical Machine Translation 49 | https://arxiv.org/abs/1406.1078 50 | ''' 51 | from __future__ import print_function 52 | 53 | from keras.models import Model 54 | from keras.layers import Input, LSTM, Dense 55 | import numpy as np 56 | 57 | batch_size = 64 # Batch size for training. 58 | epochs = 100 # Number of epochs to train for. 59 | latent_dim = 256 # Latent dimensionality of the encoding space. 60 | num_samples = 10000 # Number of samples to train on. 61 | # Path to the data txt file on disk. 62 | data_path = 'fra.txt' 63 | 64 | # Vectorize the data. 65 | input_texts = [] 66 | target_texts = [] 67 | input_characters = set() 68 | target_characters = set() 69 | with open(data_path, 'r') as f: 70 | lines = f.read().split('\n') 71 | for line in lines[: min(num_samples, len(lines) - 1)]: 72 | input_text, target_text = line.split('\t') 73 | # We use "tab" as the "start sequence" character 74 | # for the targets, and "\n" as "end sequence" character. 75 | target_text = '\t' + target_text + '\n' 76 | input_texts.append(input_text) 77 | target_texts.append(target_text) 78 | for char in input_text: 79 | if char not in input_characters: 80 | input_characters.add(char) 81 | for char in target_text: 82 | if char not in target_characters: 83 | target_characters.add(char) 84 | 85 | input_characters = sorted(list(input_characters)) 86 | target_characters = sorted(list(target_characters)) 87 | num_encoder_tokens = len(input_characters) 88 | num_decoder_tokens = len(target_characters) 89 | max_encoder_seq_length = max([len(txt) for txt in input_texts]) 90 | max_decoder_seq_length = max([len(txt) for txt in target_texts]) 91 | 92 | print('Number of samples:', len(input_texts)) 93 | print('Number of unique input tokens:', num_encoder_tokens) 94 | print('Number of unique output tokens:', num_decoder_tokens) 95 | print('Max sequence length for inputs:', max_encoder_seq_length) 96 | print('Max sequence length for outputs:', max_decoder_seq_length) 97 | 98 | input_token_index = dict( 99 | [(char, i) for i, char in enumerate(input_characters)]) 100 | target_token_index = dict( 101 | [(char, i) for i, char in enumerate(target_characters)]) 102 | 103 | encoder_input_data = np.zeros( 104 | (len(input_texts), max_encoder_seq_length, num_encoder_tokens), 105 | dtype='float32') 106 | decoder_input_data = np.zeros( 107 | (len(input_texts), max_decoder_seq_length, num_decoder_tokens), 108 | dtype='float32') 109 | decoder_target_data = np.zeros( 110 | (len(input_texts), max_decoder_seq_length, num_decoder_tokens), 111 | dtype='float32') 112 | 113 | for i, (input_text, target_text) in enumerate(zip(input_texts, target_texts)): 114 | for t, char in enumerate(input_text): 115 | encoder_input_data[i, t, input_token_index[char]] = 1. 116 | for t, char in enumerate(target_text): 117 | # decoder_target_data is ahead of decoder_input_data by one timestep 118 | decoder_input_data[i, t, target_token_index[char]] = 1. 119 | if t > 0: 120 | # decoder_target_data will be ahead by one timestep 121 | # and will not include the start character. 122 | decoder_target_data[i, t - 1, target_token_index[char]] = 1. 123 | 124 | # Define an input sequence and process it. 125 | encoder_inputs = Input(shape=(None, num_encoder_tokens)) 126 | encoder = LSTM(latent_dim, return_state=True) 127 | encoder_outputs, state_h, state_c = encoder(encoder_inputs) 128 | # We discard `encoder_outputs` and only keep the states. 129 | encoder_states = [state_h, state_c] 130 | 131 | # Set up the decoder, using `encoder_states` as initial state. 132 | decoder_inputs = Input(shape=(None, num_decoder_tokens)) 133 | # We set up our decoder to return full output sequences, 134 | # and to return internal states as well. We don't use the 135 | # return states in the training model, but we will use them in inference. 136 | decoder_lstm = LSTM(latent_dim, return_sequences=True, return_state=True) 137 | decoder_outputs, _, _ = decoder_lstm(decoder_inputs, 138 | initial_state=encoder_states) 139 | decoder_dense = Dense(num_decoder_tokens, activation='softmax') 140 | decoder_outputs = decoder_dense(decoder_outputs) 141 | 142 | # Define the model that will turn 143 | # `encoder_input_data` & `decoder_input_data` into `decoder_target_data` 144 | model = Model([encoder_inputs, decoder_inputs], decoder_outputs) 145 | 146 | # Run training 147 | model.compile(optimizer='rmsprop', loss='categorical_crossentropy') 148 | model.fit([encoder_input_data, decoder_input_data], decoder_target_data, 149 | batch_size=batch_size, 150 | epochs=epochs, 151 | validation_split=0.2) 152 | # Save model 153 | model.save('s2s.h5') 154 | 155 | # Next: inference mode (sampling). 156 | # Here's the drill: 157 | # 1) encode input and retrieve initial decoder state 158 | # 2) run one step of decoder with this initial state 159 | # and a "start of sequence" token as target. 160 | # Output will be the next target token 161 | # 3) Repeat with the current target token and current states 162 | 163 | # Define sampling models 164 | encoder_model = Model(encoder_inputs, encoder_states) 165 | 166 | decoder_state_input_h = Input(shape=(latent_dim,)) 167 | decoder_state_input_c = Input(shape=(latent_dim,)) 168 | decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c] 169 | decoder_outputs, state_h, state_c = decoder_lstm( 170 | decoder_inputs, initial_state=decoder_states_inputs) 171 | decoder_states = [state_h, state_c] 172 | decoder_outputs = decoder_dense(decoder_outputs) 173 | decoder_model = Model( 174 | [decoder_inputs] + decoder_states_inputs, 175 | [decoder_outputs] + decoder_states) 176 | 177 | # Reverse-lookup token index to decode sequences back to 178 | # something readable. 179 | reverse_input_char_index = dict( 180 | (i, char) for char, i in input_token_index.items()) 181 | reverse_target_char_index = dict( 182 | (i, char) for char, i in target_token_index.items()) 183 | 184 | 185 | def decode_sequence(input_seq): 186 | # Encode the input as state vectors. 187 | states_value = encoder_model.predict(input_seq) 188 | 189 | # Generate empty target sequence of length 1. 190 | target_seq = np.zeros((1, 1, num_decoder_tokens)) 191 | # Populate the first character of target sequence with the start character. 192 | target_seq[0, 0, target_token_index['\t']] = 1. 193 | 194 | # Sampling loop for a batch of sequences 195 | # (to simplify, here we assume a batch of size 1). 196 | stop_condition = False 197 | decoded_sentence = '' 198 | while not stop_condition: 199 | output_tokens, h, c = decoder_model.predict( 200 | [target_seq] + states_value) 201 | 202 | # Sample a token 203 | sampled_token_index = np.argmax(output_tokens[0, -1, :]) 204 | sampled_char = reverse_target_char_index[sampled_token_index] 205 | decoded_sentence += sampled_char 206 | 207 | # Exit condition: either hit max length 208 | # or find stop character. 209 | if (sampled_char == '\n' or 210 | len(decoded_sentence) > max_decoder_seq_length): 211 | stop_condition = True 212 | 213 | # Update the target sequence (of length 1). 214 | target_seq = np.zeros((1, 1, num_decoder_tokens)) 215 | target_seq[0, 0, sampled_token_index] = 1. 216 | 217 | # Update states 218 | states_value = [h, c] 219 | 220 | return decoded_sentence 221 | 222 | 223 | for seq_index in range(100): 224 | # Take one sequence (part of the training set) 225 | # for trying out decoding. 226 | input_seq = encoder_input_data[seq_index: seq_index + 1] 227 | decoded_sentence = decode_sequence(input_seq) 228 | print('-') 229 | print('Input sentence:', input_texts[seq_index]) 230 | print('Decoded sentence:', decoded_sentence) -------------------------------------------------------------------------------- /src/CreateMusic.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | from __future__ import print_function 5 | 6 | from MusicalNote import * 7 | from melody import * 8 | from Polyphony import * 9 | from Play import CSVtoMIDI 10 | from Tonality import * 11 | from ReadMusic import * 12 | 13 | import numpy as np 14 | import pandas as pd 15 | 16 | from collections import Counter, OrderedDict 17 | import itertools 18 | 19 | import logging 20 | FORMAT = '%(asctime)-15s %(message)s' 21 | logging.basicConfig(format=FORMAT) 22 | logger = logging.getLogger(__name__) 23 | logger.setLevel('INFO') 24 | 25 | 26 | #LSTM Packages 27 | import tensorflow as tf 28 | from tensorflow.contrib import rnn 29 | import random 30 | import time 31 | import os 32 | 33 | class CreateMusicFromChords(object): 34 | 35 | def __init__(self, music_data, training_iters, n_input): 36 | 37 | self.training_iters = training_iters 38 | self.display_step = 1000 39 | self.n_input = n_input 40 | 41 | # Read musical data 42 | self.training_data = music_data['sorted_grades'] 43 | 44 | # Target log path 45 | path_logs = '../tmp' 46 | self.writer = tf.summary.FileWriter(path_logs) 47 | 48 | # Extract alphabet dictionary 49 | alphabet = np.unique(self.training_data) 50 | self.dictionary = dict(zip(alphabet,range(len(alphabet)))) 51 | 52 | def config_LSTM(self): 53 | # Parameters 54 | learning_rate = 0.001 55 | 56 | # number of units in RNN cell 57 | n_hidden = 1024 58 | vocab_size = len(self.dictionary) 59 | 60 | # tf Graph input 61 | self.x = tf.placeholder("float", [None, self.n_input, 1], name = 'x') 62 | self.y = tf.placeholder("float", [None, vocab_size]) 63 | 64 | 65 | # RNN output node weights and biases 66 | weights = { 67 | 'out': tf.Variable(tf.random_normal([n_hidden, vocab_size])) 68 | } 69 | biases = { 70 | 'out': tf.Variable(tf.random_normal([vocab_size])) 71 | } 72 | 73 | pred = self.RNN(self.x, weights, biases, n_hidden) 74 | 75 | # Loss and optimizer 76 | cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=self.y), name='cost') 77 | optimizer = tf.train.RMSPropOptimizer(learning_rate=learning_rate).minimize(cost) 78 | 79 | # Model evaluation 80 | correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(self.y,1)) 81 | accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) 82 | 83 | # Initializing the variables 84 | self.init = tf.global_variables_initializer() 85 | self.saver = tf.train.Saver() 86 | 87 | return optimizer, accuracy, cost, pred 88 | 89 | def train(self, optimizer, accuracy, cost, pred, name_model): 90 | 91 | # Launch the graph 92 | with tf.Session() as session: 93 | session.run(self.init) 94 | self.saver.save(session, name_model) 95 | step = 0 96 | offset = random.randint(0,self.n_input+1) 97 | end_offset = self.n_input + 1 98 | acc_total = 0 99 | loss_total = 0 100 | 101 | vocab_size = len(self.dictionary) 102 | 103 | reverse_dictionary = dict(zip(self.dictionary.values(), 104 | self.dictionary.keys())) 105 | 106 | self.writer.add_graph(session.graph) 107 | 108 | while step < self.training_iters: 109 | # Generate a minibatch. Add some randomness on selection process. 110 | if offset > (len(self.training_data)-end_offset): 111 | offset = random.randint(0, self.n_input+1) 112 | 113 | symbols_in_keys = ([[self.dictionary[self.training_data[i]]] 114 | for i in range(offset, offset+self.n_input) ]) 115 | symbols_in_keys = np.reshape(np.array(symbols_in_keys), [-1, self.n_input, 1]) 116 | 117 | symbols_out_onehot = np.zeros([vocab_size], dtype=float) 118 | symbols_out_onehot[self.dictionary[self.training_data[offset+self.n_input]]] = 1.0 119 | symbols_out_onehot = np.reshape(symbols_out_onehot,[1,-1]) 120 | 121 | _, acc, loss, onehot_pred = session.run([optimizer, accuracy, cost, pred], \ 122 | feed_dict={self.x: symbols_in_keys, self.y: symbols_out_onehot}) 123 | 124 | #print('pred') 125 | #print(pred) 126 | loss_total += loss 127 | acc_total += acc 128 | if (step+1) % self.display_step == 0: 129 | print("Iter= " + str(step+1) + ", Average Loss= " + \ 130 | "{:.6f}".format(loss_total/self.display_step) + ", Average Accuracy= " + \ 131 | "{:.2f}%".format(100*acc_total/self.display_step)) 132 | acc_total = 0 133 | loss_total = 0 134 | symbols_in = [self.training_data[i] for i in range(offset, offset + self.n_input)] 135 | symbols_out = self.training_data[offset + self.n_input] 136 | symbols_out_pred = reverse_dictionary[int(tf.argmax(onehot_pred, 1).eval())] 137 | print("%s - [%s] vs [%s]" % (symbols_in,symbols_out,symbols_out_pred)) 138 | self.saver.save(session, name_model, global_step=step+1) 139 | step += 1 140 | offset += (self.n_input+1) 141 | 142 | def train_and_predict(self, optimizer, accuracy, cost, pred, name_model, sequence_length, starting_sequence): 143 | 144 | # Launch the graph 145 | with tf.Session() as session: 146 | session.run(self.init) 147 | self.saver.save(session, name_model) 148 | step = 0 149 | offset = random.randint(0,self.n_input+1) 150 | end_offset = self.n_input + 1 151 | acc_total = 0 152 | loss_total = 0 153 | 154 | vocab_size = len(self.dictionary) 155 | 156 | reverse_dictionary = dict(zip(self.dictionary.values(), 157 | self.dictionary.keys())) 158 | 159 | self.writer.add_graph(session.graph) 160 | 161 | while step < self.training_iters: 162 | # Generate a minibatch. Add some randomness on selection process. 163 | if offset > (len(self.training_data)-end_offset): 164 | offset = random.randint(0, self.n_input+1) 165 | 166 | symbols_in_keys = ([[self.dictionary[self.training_data[i]]] 167 | for i in range(offset, offset+self.n_input) ]) 168 | symbols_in_keys = np.reshape(np.array(symbols_in_keys), [-1, self.n_input, 1]) 169 | 170 | symbols_out_onehot = np.zeros([vocab_size], dtype=float) 171 | symbols_out_onehot[self.dictionary[self.training_data[offset+self.n_input]]] = 1.0 172 | symbols_out_onehot = np.reshape(symbols_out_onehot,[1,-1]) 173 | 174 | _, acc, loss, onehot_pred = session.run([optimizer, accuracy, cost, pred], \ 175 | feed_dict={self.x: symbols_in_keys, self.y: symbols_out_onehot}) 176 | 177 | #print('pred') 178 | #print(pred) 179 | loss_total += loss 180 | acc_total += acc 181 | if (step+1) % self.display_step == 0: 182 | print("Iter= " + str(step+1) + ", Average Loss= " + \ 183 | "{:.6f}".format(loss_total/self.display_step) + ", Average Accuracy= " + \ 184 | "{:.2f}%".format(100*acc_total/self.display_step)) 185 | acc_total = 0 186 | loss_total = 0 187 | symbols_in = [self.training_data[i] for i in range(offset, offset + self.n_input)] 188 | symbols_out = self.training_data[offset + self.n_input] 189 | symbols_out_pred = reverse_dictionary[int(tf.argmax(onehot_pred, 1).eval())] 190 | print("%s - [%s] vs [%s]" % (symbols_in,symbols_out,symbols_out_pred)) 191 | self.saver.save(session, name_model, global_step=step+1) 192 | step += 1 193 | offset += (self.n_input+1) 194 | 195 | symbols_in_keys = [self.dictionary[(iter_sequence)] for iter_sequence in starting_sequence] 196 | output_sequence = list() 197 | 198 | for i in range(sequence_length): 199 | keys = np.reshape(symbols_in_keys, [-1, self.n_input, 1]) 200 | onehot_pred = session.run(pred, feed_dict={self.x: keys}) 201 | onehot_pred_index = int(tf.argmax(onehot_pred, 1).eval()) 202 | print('symbols_out') 203 | print(onehot_pred_index) 204 | output_sequence.append(reverse_dictionary[onehot_pred_index]) 205 | symbols_in_keys = symbols_in_keys[1:] 206 | symbols_in_keys.append(onehot_pred_index) 207 | print('symbols_in_keys') 208 | print(symbols_in_keys) 209 | 210 | return output_sequence 211 | 212 | def load_and_predict (self, dir_name_model, model_metadata, starting_sequence, sequence_length): 213 | 214 | output_sequence = list() 215 | 216 | with tf.Session() as session: 217 | 218 | # Other attempts 219 | # tf.saved_model.loader.load(session,[name_model], '/tmp') 220 | 221 | #First let's load meta graph and restore weights 222 | saver = tf.train.import_meta_graph(model_metadata) 223 | 224 | # Initialize variables 225 | session.run(tf.global_variables_initializer()) 226 | saver.restore(session,tf.train.latest_checkpoint(dir_name_model)) 227 | 228 | graph = tf.get_default_graph() 229 | pred = graph.get_tensor_by_name("pred:0") 230 | x = graph.get_tensor_by_name("x:0") 231 | 232 | 233 | 234 | 235 | reverse_dictionary = dict(zip(self.dictionary.values(), 236 | self.dictionary.keys())) 237 | 238 | symbols_in_keys = [self.dictionary[(iter_sequence)] for iter_sequence in starting_sequence] 239 | 240 | for i in range(sequence_length): 241 | 242 | keys = np.reshape(np.array(symbols_in_keys), [-1, self.n_input, 1]) 243 | onehot_pred = session.run(pred, feed_dict={x: keys}) 244 | onehot_pred_index = int(tf.argmax(onehot_pred, 1).eval()) 245 | # print('symbols_out') 246 | # print(onehot_pred_index) 247 | output_sequence.append(reverse_dictionary[onehot_pred_index]) 248 | symbols_in_keys = symbols_in_keys[1:] 249 | symbols_in_keys.append(onehot_pred_index) 250 | # print('symbols_in_keys') 251 | # print(symbols_in_keys) 252 | 253 | return output_sequence 254 | 255 | 256 | 257 | def RNN(self, x, weights, biases, n_hidden): 258 | 259 | # reshape to [1, n_input] 260 | x = tf.reshape(x, [-1, self.n_input]) 261 | 262 | # Generate a n_input-element sequence of inputs 263 | # (eg. [had] [a] [general] -> [20] [6] [33]) 264 | x = tf.split(x,self.n_input,1) 265 | 266 | # 2-layer LSTM, each layer has n_hidden units. 267 | # Average Accuracy= 95.20% at 50k iter 268 | rnn_cell = rnn.MultiRNNCell([rnn.BasicLSTMCell(n_hidden), 269 | rnn.BasicLSTMCell(n_hidden), 270 | rnn.BasicLSTMCell(n_hidden)]) 271 | 272 | # generate prediction 273 | outputs, states = rnn.static_rnn(rnn_cell, x, dtype=tf.float32) 274 | 275 | # there are n_input outputs but 276 | # we only want the last output 277 | pred = tf.matmul(outputs[-1], weights['out']) + biases['out'] 278 | tf.identity(pred, 'pred') 279 | return pred 280 | 281 | 282 | class PlayMusicFromChords(object): 283 | 284 | def __init__(self, name_file_midi, 285 | n_input = 20, 286 | training_iters = 100000, 287 | sequence_length = 500, 288 | model_version_to_load = 99000, 289 | bool_train = False): 290 | 291 | # musical_piece = Read(name_file_midi) 292 | 293 | # print('La tonalidad es: '+musical_piece.get_tonality()) 294 | 295 | # logger.info('Calculate the tonality and apply it to the whole music piece') 296 | # musical_piece.apply_tonality() 297 | 298 | # logger.info('Extract the sequence of chords') 299 | 300 | 301 | # name_model = 'n_input_'+str(n_input)+'_chromatic'+'_iters_'+str(training_iters)+'_'+name_file_midi[13:-4] 302 | # dir_name_model = '../models/'+name_model 303 | 304 | # if not os.path.exists(dir_name_model): 305 | # os.makedirs(dir_name_model) 306 | 307 | 308 | # logger.info('Create the Deep Learning object') 309 | # music_creator = CreateMusicFromChords(musical_piece.get_chord_df(), 310 | # training_iters = training_iters, 311 | # n_input = n_input 312 | # ) 313 | 314 | 315 | # Many MIDI files 316 | 317 | chord_list = list() 318 | name_list = list() 319 | 320 | 321 | for iter_name_file_midi in name_file_midi: 322 | 323 | musical_piece = Read(iter_name_file_midi) 324 | 325 | print('La tonalidad es: '+musical_piece.get_tonality()) 326 | 327 | logger.info('Calculate the tonality and apply it to the whole music piece') 328 | musical_piece.apply_tonality() 329 | 330 | logger.info('Extract the sequence of chords') 331 | chord_list.append(musical_piece.get_chord_df()) 332 | 333 | logger.info('Extract the name of the file') 334 | name_list.append(iter_name_file_midi.split('/')[-1].split('.csv')[0]) 335 | 336 | 337 | name_model = ('n_input_'+str(n_input)+'_chromatic_ordered'+'_iters_'+str(training_iters)+'_' 338 | +'_'.join(name_list)) 339 | dir_name_model = '../models/'+name_model 340 | 341 | if not os.path.exists(dir_name_model): 342 | os.makedirs(dir_name_model) 343 | 344 | logger.info('Create the Deep Learning object') 345 | music_creator = CreateMusicFromChords(pd.concat(chord_list, 346 | ignore_index = True), 347 | training_iters = training_iters, 348 | n_input = n_input 349 | ) 350 | 351 | 352 | if bool_train: 353 | logger.info('Config LSTM') 354 | optimizer, accuracy, cost, pred = music_creator.config_LSTM() 355 | 356 | 357 | logger.info('Estimate initial sequence to predict based on LSTM') 358 | grades_chords_values = musical_piece.get_chord_df()['sorted_grades'] 359 | # grades_chords_values = musical_piece.get_chord_df()['grades'] 360 | 361 | initial_point = random.randint(0,len(grades_chords_values)-n_input-1) 362 | initial_sequence_chords = list(grades_chords_values 363 | [initial_point:(initial_point+n_input) 364 | ] 365 | ) 366 | 367 | if bool_train: 368 | logger.info('Train and save LSTM') 369 | music_creation = \ 370 | music_creator.train(optimizer, accuracy, cost, pred, dir_name_model+'/'+name_model, 371 | #sequence_length = sequence_length, 372 | #starting_sequence = initial_sequence_chords 373 | ) 374 | 375 | logger.info('Create Music!!') 376 | music_creation = \ 377 | music_creator.load_and_predict(dir_name_model, 378 | dir_name_model+'/'+name_model+'-'+str(model_version_to_load)+'.meta', 379 | initial_sequence_chords, 380 | sequence_length = sequence_length 381 | ) 382 | 383 | logger.info('Convert grades to sequences') 384 | logger.info(musical_piece.get_tonality()) 385 | chords_notes = (musical_piece 386 | .convert_grades_sequence_to_notes(music_creation, 387 | musical_piece.get_tonality() 388 | ) 389 | ) 390 | 391 | 392 | logger.info('Convert it to MIDI') 393 | polyphony = SequenceChordPolyphony(chords_notes) 394 | CSVtoMIDI(polyphony 395 | .convert_to_midi(), 396 | 'polyphony_'+'_'.join(name_list)#name_file_midi[13:-4] 397 | ) 398 | 399 | logger.info('Finished!!!') 400 | 401 | 402 | class CreateMusicFromDataframe(object): 403 | 404 | def __init__(self, music_data, training_iters, n_input): 405 | 406 | self.training_iters = training_iters 407 | self.display_step = 100 408 | self.n_input = n_input 409 | 410 | # Read musical data 411 | self.training_data = music_data 412 | self.num_columns_training_data = self.training_data.shape[1] 413 | 414 | # Target log path 415 | path_logs = '../tmp' 416 | self.writer = tf.summary.FileWriter(path_logs) 417 | 418 | 419 | def config_LSTM(self): 420 | # Parameters 421 | learning_rate = 0.001 422 | 423 | # number of units in RNN cell 424 | n_hidden = 1024 425 | 426 | type_data = tf.float32 427 | 428 | # tf Graph input 429 | self.x = tf.placeholder(dtype=type_data, 430 | shape=(None, self.num_columns_training_data), 431 | name = 'x') 432 | self.y = tf.placeholder(dtype=type_data, 433 | shape=(None, self.num_columns_training_data), 434 | name = 'y' 435 | ) 436 | 437 | # RNN output node weights and biases 438 | weights = tf.Variable(tf.random_uniform([n_hidden, self.num_columns_training_data], 439 | minval = 0, 440 | maxval = 100, dtype=type_data)) 441 | biases = tf.Variable(tf.random_uniform([self.num_columns_training_data], 442 | minval = 0, 443 | maxval = 100, dtype=type_data)) 444 | 445 | 446 | 447 | # weights = tf.Print(weights, [weights], 448 | # message="This is weights: ", 449 | # summarize = 100) 450 | 451 | # biases = tf.Print(biases, [biases], 452 | # message="This is biases: ", 453 | # summarize = 100) 454 | 455 | pred = (self.RNN(self.x, weights, biases, n_hidden)) 456 | 457 | 458 | # Loss and optimizer 459 | # cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, 460 | # labels=self.y), name='cost') 461 | # pred = tf.Print(pred, [pred], 462 | # message="This is pred: ", 463 | # summarize = 10) 464 | # pred = tf.Print(pred, [tf.shape(pred)], message="This is pred: ") 465 | 466 | # cost = tf.reduce_sum(tf.square(self.y - pred)) 467 | # print(cost) 468 | # cost = tf.reduce_all(tf.equal(tf.sign(self.y), 469 | # tf.sign(pred))) 470 | 471 | 472 | 473 | # Those that are zero, which value do they have? 474 | # complementary = (tf.constant(1.0)-self.y) 475 | # cost_zero = tf.multiply(complementary, pred) 476 | 477 | # cost_zero = tf.Print(cost_zero, [cost_zero], message="This is cost_zero: ") 478 | # # Those that are one, which value do they have? 479 | # cost_ones = tf.multiply(self.y, pred) 480 | # cost_ones = tf.Print(cost_ones, [cost_ones], message="This is cost_ones: ") 481 | 482 | # cost = tf.reduce_sum(tf.add(cost_ones,cost_zero), name='cost') 483 | 484 | # cost = tf.reduce_sum(tf.losses.cosine_distance(self.y, pred, axis = 1)) 485 | 486 | selfy = self.y 487 | 488 | selfy = tf.Print(selfy, [selfy], 489 | message="This is selfy: ", 490 | summarize = 100) 491 | 492 | pred = tf.Print(pred, [pred], 493 | message="This is pred: ", 494 | summarize = 100) 495 | 496 | argmax = tf.cast(tf.argmax(selfy, 1), tf.float32) 497 | 498 | argmax = tf.Print(argmax, [argmax], 499 | message="This is biases: ", 500 | summarize = 100) 501 | 502 | # cost = tf.reduce_sum(tf.norm(self.y - pred), name='cost') 503 | cost = tf.reduce_sum(tf.norm(selfy - pred), name='cost') 504 | 505 | optimizer = tf.train.RMSPropOptimizer(learning_rate=learning_rate).minimize(cost) 506 | 507 | # Model evaluation 508 | # correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(self.y,1)) 509 | # correct_pred = tf.cast(pred, tf.int32) 510 | # correct_pred = tf.Print(correct_pred, [correct_pred], message="This is correct_pred: ") 511 | accuracy = cost 512 | 513 | # Initializing the variables 514 | self.init = tf.global_variables_initializer() 515 | self.saver = tf.train.Saver() 516 | 517 | return optimizer, accuracy, cost, pred 518 | 519 | def RNN(self, x, weights, biases, n_hidden): 520 | 521 | # reshape to [1, n_input] 522 | # -1 means to be inferred 523 | # tensor 't' is [[[1, 1, 1], 524 | # [2, 2, 2]], 525 | # [[3, 3, 3], 526 | # [4, 4, 4]], 527 | # [[5, 5, 5], 528 | # [6, 6, 6]]] 529 | # tensor 't' has shape [3, 2, 3] 530 | 531 | # -1 is inferred to be 2: 532 | # reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3], 533 | # [4, 4, 4, 5, 5, 5, 6, 6, 6]] 534 | 535 | x = tf.reshape(x, [-1, self.num_columns_training_data]) 536 | 537 | # Generate a n_input-element sequence of inputs 538 | # (eg. [had] [a] [general] -> [20] [6] [33]) 539 | # 0 means per n_input (horizontal dimension) 540 | x = tf.split(x, self.n_input, 0) 541 | 542 | # # reshape to [1, n_input] 543 | # x = tf.reshape(x, [-1, self.n_input]) 544 | 545 | # # Generate a n_input-element sequence of inputs 546 | # # (eg. [had] [a] [general] -> [20] [6] [33]) 547 | # x = tf.split(x,self.n_input,1) 548 | 549 | # 2-layer LSTM, each layer has n_hidden units. 550 | # Average Accuracy= 95.20% at 50k iter 551 | rnn_cell = rnn.MultiRNNCell([rnn.BasicLSTMCell(n_hidden), 552 | rnn.BasicLSTMCell(n_hidden), 553 | rnn.BasicLSTMCell(n_hidden), 554 | rnn.BasicLSTMCell(n_hidden)]) 555 | 556 | # generate prediction 557 | outputs, states = rnn.static_rnn(rnn_cell, x, dtype=tf.float32) 558 | 559 | # there are n_input outputs but 560 | # we only want the last output 561 | pred = tf.matmul(outputs[-1], weights) + biases 562 | tf.identity(pred, 'pred') 563 | return pred 564 | 565 | def train(self, optimizer, accuracy, cost, pred, name_model): 566 | 567 | # Launch the graph 568 | with tf.Session() as session: 569 | session.run(self.init) 570 | self.saver.save(session, name_model) 571 | step = 0 572 | offset = random.randint(0,self.n_input+1) 573 | end_offset = self.n_input + 1 574 | acc_total = 0 575 | loss_total = 0 576 | 577 | # vocab_size = len(self.dictionary) 578 | 579 | # reverse_dictionary = dict(zip(self.dictionary.values(), 580 | # self.dictionary.keys())) 581 | 582 | self.writer.add_graph(session.graph) 583 | 584 | while step < self.training_iters: 585 | # Generate a minibatch. Add some randomness on selection process. 586 | if offset > (len(self.training_data)-end_offset): 587 | offset = random.randint(0, self.n_input+1) 588 | 589 | input_x = self.training_data.loc[offset:(offset+self.n_input-1),:] 590 | input_y = self.training_data.loc[(offset+self.n_input-1),:].to_frame().T 591 | 592 | _, acc, loss, onehot_pred = session.run([optimizer, accuracy, cost, pred], \ 593 | feed_dict={self.x: input_x, self.y: input_y}) 594 | 595 | #print('pred') 596 | #print(pred) 597 | loss_total += loss 598 | acc_total += acc 599 | if (step+1) % self.display_step == 0: 600 | print("Iter= " + str(step+1) + ", Average Loss= " + \ 601 | "{:.6f}".format(loss_total/self.display_step) + ", Average Accuracy= " + \ 602 | "{:.2f}%".format(100*acc_total/self.display_step)) 603 | acc_total = 0 604 | loss_total = 0 605 | # symbols_in = [self.training_data[i] for i in range(offset, offset + self.n_input)] 606 | # symbols_out = self.training_data[offset + self.n_input] 607 | # symbols_out_pred = reverse_dictionary[int(tf.argmax(onehot_pred, 1).eval())] 608 | # print("%s - [%s] vs [%s]" % (symbols_in,symbols_out,symbols_out_pred)) 609 | self.saver.save(session, name_model, global_step=step+1) 610 | step += 1 611 | offset += (self.n_input+1) 612 | 613 | def load_and_predict(self, dir_name_model, model_metadata, starting_sequence, sequence_length): 614 | 615 | output_sequence = list() 616 | 617 | with tf.Session() as session: 618 | 619 | # Other attempts 620 | # tf.saved_model.loader.load(session,[name_model], '/tmp') 621 | 622 | #First let's load meta graph and restore weights 623 | saver = tf.train.import_meta_graph(model_metadata) 624 | 625 | # Initialize variables 626 | session.run(tf.global_variables_initializer()) 627 | saver.restore(session,tf.train.latest_checkpoint(dir_name_model)) 628 | 629 | graph = tf.get_default_graph() 630 | pred = graph.get_tensor_by_name("pred:0") 631 | x = graph.get_tensor_by_name("x:0") 632 | 633 | for i in range(sequence_length): 634 | 635 | chord_prediction = session.run((pred), 636 | feed_dict={x: starting_sequence}) 637 | 638 | # print(chord_prediction[0]) 639 | 640 | histogram = np.histogram(chord_prediction[0]) 641 | print(histogram[0]) 642 | print(np.sum(chord_prediction[0]>0.1)) 643 | # print(np.median(chord_prediction[0])) 644 | index_max_histogram = (np.argmax(histogram[0], axis = 0)) 645 | threshold = 0 #histogram[1][index_max_histogram+1] 646 | # print(threshold) 647 | 648 | output_sequence.append(chord_prediction[0]>threshold) 649 | 650 | # Update Starting Sequence 651 | # print(starting_sequence) 652 | starting_sequence.reset_index(inplace=True, drop=True) 653 | starting_sequence = starting_sequence.iloc[1:] 654 | # starting_sequence.loc[sequence_length] = duration*(chord_prediction[0] > (threshold)) 655 | starting_sequence.loc[sequence_length] = chord_prediction[0]>threshold 656 | starting_sequence.reset_index(inplace=True, drop=True) 657 | # print(np.histogram(chord_prediction[0])[1]) 658 | # print(sum(chord_prediction[0]>threshold)) 659 | 660 | return output_sequence 661 | 662 | 663 | class PlayMusicFromDataframe(object): 664 | 665 | def __init__(self, name_file_midi, 666 | n_input = 20, 667 | training_iters = 100000, 668 | sequence_length = 500, 669 | model_version_to_load = 99000, 670 | bool_train = False): 671 | 672 | 673 | musical_piece = Read(name_file_midi) 674 | 675 | print('La tonalidad es: '+musical_piece.get_tonality()) 676 | 677 | logger.info('Obtain the main dataframe of the musical piece') 678 | musical_dataframe = musical_piece.convert_tonality_to_music_dataframe() 679 | # musical_dataframe = (musical_dataframe>0).astype(int) 680 | 681 | 682 | name_model = 'n_input_'+str(n_input)+'_standard'+'_iters_'+str(training_iters)+'_'+name_file_midi[13:-4] 683 | dir_name_model = '../models/'+name_model 684 | 685 | if not os.path.exists(dir_name_model): 686 | os.makedirs(dir_name_model) 687 | 688 | 689 | music_creator = CreateMusicFromDataframe(musical_dataframe, 690 | training_iters = training_iters, 691 | n_input = n_input 692 | ) 693 | 694 | if bool_train: 695 | logger.info('Config LSTM') 696 | optimizer, accuracy, cost, pred = music_creator.config_LSTM() 697 | 698 | logger.info('Train') 699 | music_creation = \ 700 | music_creator.train(optimizer, accuracy, cost, pred, 701 | dir_name_model+'/'+name_model) 702 | 703 | 704 | logger.info('Create Music!!') 705 | offset = random.randint(0, musical_dataframe.shape[0]-(n_input+1)) 706 | initial_sequence_chords = musical_dataframe.iloc[offset:(offset+n_input)] 707 | 708 | music_creation = \ 709 | music_creator.load_and_predict(dir_name_model, 710 | dir_name_model+'/'+name_model+'-'+str(model_version_to_load)+'.meta', 711 | initial_sequence_chords, 712 | sequence_length = sequence_length 713 | ) 714 | 715 | logger.info('Convert grades to sequences') 716 | chords_notes = (musical_piece 717 | .convert_music_dataframe_to_notes(music_creation, 718 | musical_piece.get_tonality() 719 | ) 720 | ) 721 | 722 | logger.info('Convert it to MIDI') 723 | polyphony = SequenceChordPolyphony(chords_notes) 724 | CSVtoMIDI(polyphony 725 | .convert_to_midi(), 726 | 'dataframe_'+name_file_midi[13:-4] 727 | ) 728 | 729 | logger.info('Finished!!!') 730 | 731 | 732 | class CreateMusicFromChordSequences(object): 733 | 734 | # def __init__(self, music_data, training_iters, n_input): 735 | 736 | # self.training_iters = training_iters 737 | # self.display_step = 1000 738 | # self.n_input = n_input 739 | 740 | # # Read musical data 741 | # self.training_data = music_data['grades'] 742 | 743 | # # Target log path 744 | # path_logs = '../tmp' 745 | # self.writer = tf.summary.FileWriter(path_logs) 746 | 747 | # # Extract alphabet dictionary 748 | # alphabet = np.unique(self.training_data) 749 | # self.dictionary = dict(zip(alphabet,range(len(alphabet)))) 750 | 751 | def __init__(self, name_file_midi): 752 | save_path = '../checkpoint/' 753 | 754 | display_step = 300 755 | 756 | epochs = 13 757 | source_batch_size = 3 758 | target_batch_size = 1 759 | 760 | rnn_size = 128 761 | num_layers = 3 762 | 763 | encoding_embedding_size = 200 764 | decoding_embedding_size = 200 765 | 766 | learning_rate = 0.001 767 | keep_probability = 0.5 768 | 769 | musical_piece = Read(name_file_midi) 770 | musical_piece.apply_tonality() 771 | musical_piece.enrich_grades_with_duration() 772 | musical_piece.get_chord_df() 773 | 774 | training_data = musical_piece.get_chord_df()['enriched_grades'] 775 | 776 | self.get_batches(training_data, source_batch_size, target_batch_size) 777 | 778 | # Get dictionary with the mapping 779 | musical_notes_dictionary = musical_piece.get_notes_dictionary()+['',''] 780 | musical_map_dictionary = dict(zip(musical_notes_dictionary, 781 | range(1, len(musical_notes_dictionary)+1))) 782 | 783 | 784 | 785 | # (source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = load_preprocess() 786 | # max_target_sentence_length = max([len(sentence) for sentence in source_int_text]) 787 | train_graph = tf.Graph() 788 | with train_graph.as_default(): 789 | input_data, targets, target_sequence_length, max_target_sequence_length = self.enc_dec_model_inputs() 790 | lr, keep_prob = self.hyperparam_inputs() 791 | 792 | train_logits, inference_logits = self.seq2seq_model(tf.reverse(input_data, [-1]), targets, 793 | keep_prob, 794 | batch_size, 795 | target_sequence_length, 796 | max_target_sequence_length, 797 | len(musical_notes_dictionary), 798 | len(musical_notes_dictionary), 799 | encoding_embedding_size, 800 | decoding_embedding_size, 801 | rnn_size, 802 | num_layers, 803 | musical_map_dictionary) 804 | 805 | training_logits = tf.identity(train_logits.rnn_output, name='logits') 806 | inference_logits = tf.identity(inference_logits.sample_id, name='predictions') 807 | 808 | # https://www.tensorflow.org/api_docs/python/tf/sequence_mask 809 | # - Returns a mask tensor representing the first N positions of each cell. 810 | masks = tf.sequence_mask(target_sequence_length, max_target_sequence_length, 811 | dtype=tf.float32, name='masks') 812 | 813 | with tf.name_scope("optimization"): 814 | # Loss function - weighted softmax cross entropy 815 | cost = tf.contrib.seq2seq.sequence_loss( 816 | training_logits, 817 | targets, 818 | masks) 819 | 820 | # Optimizer 821 | optimizer = tf.train.AdamOptimizer(lr) 822 | 823 | # Gradient Clipping 824 | gradients = optimizer.compute_gradients(cost) 825 | capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None] 826 | train_op = optimizer.apply_gradients(capped_gradients) 827 | 828 | def get_batches(self, training_data, 829 | source_batch_size, 830 | target_batch_size): 831 | 832 | tensor_source = list() 833 | tensor_target = list() 834 | 835 | for iter_tuples in range(0, training_data.shape[0]-source_batch_size): 836 | chord_sequence_items = list() 837 | 838 | for row_tuple in training_data[iter_tuples:(iter_tuples+source_batch_size)]: 839 | chord_sequence_items += row_tuple 840 | 841 | tensor_source.append(chord_sequence_items) 842 | tensor_target.append(training_data[iter_tuples+source_batch_size]) 843 | 844 | print(tensor_source) 845 | print(tensor_target) 846 | 847 | 848 | 849 | def hyperparam_inputs(self): 850 | lr_rate = tf.placeholder(tf.float32, name='lr_rate') 851 | keep_prob = tf.placeholder(tf.float32, name='keep_prob') 852 | return lr_rate, keep_prob 853 | 854 | 855 | def enc_dec_model_inputs(self): 856 | inputs = tf.placeholder(tf.int32, [None, None], name='input') 857 | targets = tf.placeholder(tf.int32, [None, None], name='targets') 858 | 859 | target_sequence_length = tf.placeholder(tf.int32, [None], name='target_sequence_length') 860 | max_target_len = tf.reduce_max(target_sequence_length) 861 | 862 | return inputs, targets, target_sequence_length, max_target_len 863 | 864 | 865 | def encoding_layer(self, rnn_inputs, rnn_size, num_layers, keep_prob, 866 | source_vocab_size, encoding_embedding_size): 867 | """ 868 | :return: tuple (RNN output, RNN state) 869 | """ 870 | embed = tf.contrib.layers.embed_sequence(rnn_inputs, 871 | vocab_size=source_vocab_size, 872 | embed_dim=encoding_embedding_size) 873 | 874 | stacked_cells = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.DropoutWrapper(tf.contrib.rnn.LSTMCell(rnn_size), 875 | keep_prob) for _ in range(num_layers)]) 876 | 877 | outputs, state = tf.nn.dynamic_rnn(stacked_cells, 878 | embed, 879 | dtype=tf.float32) 880 | return outputs, state 881 | 882 | def decoding_layer_train(self, encoder_state, dec_cell, dec_embed_input, 883 | target_sequence_length, max_summary_length, 884 | output_layer, keep_prob): 885 | """ 886 | Create a training process in decoding layer 887 | :return: BasicDecoderOutput containing training logits and sample_id 888 | """ 889 | dec_cell = tf.contrib.rnn.DropoutWrapper(dec_cell, output_keep_prob=keep_prob) 890 | 891 | # for only input layer 892 | helper = tf.contrib.seq2seq.TrainingHelper(dec_embed_input, target_sequence_length) 893 | 894 | decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell, helper, encoder_state, output_layer) 895 | 896 | # unrolling the decoder layer 897 | outputs, _, _ = tf.contrib.seq2seq.dynamic_decode(decoder, impute_finished=True, maximum_iterations=max_summary_length) 898 | 899 | return outputs 900 | 901 | 902 | def decoding_layer_infer(self, encoder_state, dec_cell, dec_embeddings, start_of_sequence_id, 903 | end_of_sequence_id, max_target_sequence_length, 904 | vocab_size, output_layer, batch_size, keep_prob): 905 | """ 906 | Create a inference process in decoding layer 907 | :return: BasicDecoderOutput containing inference logits and sample_id 908 | """ 909 | dec_cell = tf.contrib.rnn.DropoutWrapper(dec_cell, output_keep_prob=keep_prob) 910 | 911 | helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(dec_embeddings, tf.fill([batch_size], start_of_sequence_id), end_of_sequence_id) 912 | 913 | decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell, helper, encoder_state, output_layer) 914 | 915 | outputs, _, _ = tf.contrib.seq2seq.dynamic_decode(decoder, impute_finished=True, maximum_iterations=max_target_sequence_length) 916 | return outputs 917 | 918 | 919 | def decoding_layer(self, dec_input, encoder_state, 920 | target_sequence_length, max_target_sequence_length, 921 | rnn_size, 922 | num_layers, target_vocab_to_int, target_vocab_size, 923 | batch_size, keep_prob, decoding_embedding_size): 924 | """ 925 | Create decoding layer 926 | :return: Tuple of (Training BasicDecoderOutput, Inference BasicDecoderOutput) 927 | """ 928 | target_vocab_size = len(target_vocab_to_int) 929 | dec_embeddings = tf.Variable(tf.random_uniform([target_vocab_size, decoding_embedding_size])) 930 | dec_embed_input = tf.nn.embedding_lookup(dec_embeddings, dec_input) 931 | 932 | cells = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.LSTMCell(rnn_size) for _ in range(num_layers)]) 933 | 934 | with tf.variable_scope("decode"): 935 | output_layer = tf.layers.Dense(target_vocab_size) 936 | train_output = self.decoding_layer_train(encoder_state, 937 | cells, 938 | dec_embed_input, 939 | target_sequence_length, 940 | max_target_sequence_length, 941 | output_layer, 942 | keep_prob) 943 | 944 | with tf.variable_scope("decode", reuse=True): 945 | infer_output = self.decoding_layer_infer(encoder_state, 946 | cells, 947 | dec_embeddings, 948 | target_vocab_to_int[''], 949 | target_vocab_to_int[''], 950 | max_target_sequence_length, 951 | target_vocab_size, 952 | output_layer, 953 | batch_size, 954 | keep_prob) 955 | 956 | return (train_output, infer_output) 957 | 958 | def process_decoder_input(self, target_data, target_vocab_to_int, batch_size): 959 | """ 960 | Preprocess target data for encoding 961 | :return: Preprocessed target data 962 | """ 963 | # get '' id 964 | go_id = target_vocab_to_int[''] 965 | 966 | after_slice = tf.strided_slice(target_data, [0, 0], [batch_size, -1], [1, 1]) 967 | after_concat = tf.concat( [tf.fill([batch_size, 1], go_id), after_slice], 1) 968 | 969 | return after_concat 970 | 971 | 972 | def seq2seq_model(self,input_data, target_data, keep_prob, batch_size, 973 | target_sequence_length, 974 | max_target_sentence_length, 975 | source_vocab_size, target_vocab_size, 976 | enc_embedding_size, dec_embedding_size, 977 | rnn_size, num_layers, target_vocab_to_int): 978 | """ 979 | Build the Sequence-to-Sequence model 980 | :return: Tuple of (Training BasicDecoderOutput, Inference BasicDecoderOutput) 981 | """ 982 | enc_outputs, enc_states = self.encoding_layer(input_data, 983 | rnn_size, num_layers, keep_prob, 984 | source_vocab_size, enc_embedding_size) 985 | 986 | dec_input = self.process_decoder_input(target_data, target_vocab_to_int, batch_size) 987 | 988 | train_output, infer_output = self.decoding_layer(dec_input,enc_states, target_sequence_length, 989 | max_target_sentence_length,rnn_size,num_layers,target_vocab_to_int, 990 | target_vocab_size,batch_size,keep_prob,dec_embedding_size) 991 | 992 | return train_output, infer_output 993 | 994 | 995 | 996 | 997 | 998 | 999 | if __name__ == '__main__': 1000 | 1001 | # python ../../Data\ Beers/src/midicsv-process.py Gymnopedie_No_1.midi > Gymnopedie_No_1.csv 1002 | name_file_midi = '../../scores/Schubert_S560_Schwanengesang_no7.csv' 1003 | name_file_midi = '../../scores/Brahms_symphony_2_2.csv' # Si M 1004 | name_file_midi = '../../scores/Albeniz_Asturias.csv' # Doesn't detect properly 1005 | name_file_midi = '../../scores/Chopin_Etude_Op_10_n_5.csv' 1006 | name_file_midi = '../../scores/Schuber_Impromptu_D_899_No_3.csv' 1007 | name_file_midi = '../../scores/Mozart_Rondo.csv' 1008 | name_file_midi = '../../scores/Mozart_Sonata_16.csv' 1009 | name_file_midi = '../../scores/Bach-Partita_No1_in_Bb_BWV825_7Gigue.csv' 1010 | name_file_midi = '../../scores/Brahms_symphony_2_1.csv' 1011 | name_file_midi = '../../scores/Bach_Cello_Suite_No_1.csv' 1012 | name_file_midi = '../../scores/Chopin_Etude_Op_10_n_1.csv' 1013 | name_file_midi = '../../scores/Gymnopedie_No_1.csv' 1014 | name_file_midi = '../../scores/Debussy_Claire_de_Lune.csv' 1015 | #name_file_midi = '../../scores/Beethoven_Moonlight_Sonata_third_movement.csv' 1016 | #name_file_midi = '../../scores/Schubert_Piano_Trio_2nd_Movement.csv' 1017 | 1018 | name_file_metallica = '../../scores/Metallica_-_Nothing_else_matters_-_Piano_solo_lite.csv' 1019 | name_file_bach = '../../scores/Bach_Cello_Suite_No_1.csv' 1020 | name_file_mozart = '../../scores/Mozart_Sonata_16.csv' 1021 | 1022 | 1023 | # First approach similar to a dictionary prediction 1024 | PlayMusicFromChords([name_file_mozart, name_file_metallica], 1025 | n_input = 20, 1026 | training_iters = 100000, 1027 | sequence_length = 1000, 1028 | model_version_to_load = 48000, 1029 | bool_train = False) 1030 | 1031 | # Second approach similar to a dataframe, where all possible values were present 1032 | # and the music is just a value in every column. Hence, it is called dataframe. 1033 | # PlayMusicFromDataframe(name_file_metallica, 1034 | # n_input = 20, 1035 | # training_iters = 100000, 1036 | # sequence_length = 200, 1037 | # model_version_to_load = 100000, 1038 | # bool_train = False) 1039 | 1040 | 1041 | # CreateMusicFromChordSequences(name_file_metallica) 1042 | 1043 | # musical_piece = Read(name_file_midi) 1044 | # grades_chords = musical_piece.apply_tonality() 1045 | 1046 | # print(grades_chords.head(100).to_string()) 1047 | 1048 | # print(grades_chords.groupby('dur').size()) 1049 | # print(musical_piece 1050 | # .get_music_data() 1051 | # .groupby('dur_ticks') 1052 | # .size() 1053 | # ) 1054 | 1055 | # print(musical_piece.music_df.columns) 1056 | # print(musical_piece.music_df[[u'start_ticks', u'start_ms', u'dur_ticks', u'dur_ms']]) 1057 | # print(grades_chords) 1058 | 1059 | 1060 | -------------------------------------------------------------------------------- /src/ReadMusic.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | from MusicalNote import * 5 | from melody import * 6 | from Polyphony import * 7 | from Play import CSVtoMIDI 8 | from Tonality import * 9 | 10 | import numpy as np 11 | import pandas as pd 12 | 13 | from collections import Counter, OrderedDict 14 | import itertools 15 | 16 | import logging 17 | FORMAT = '%(asctime)-15s %(message)s' 18 | logging.basicConfig(format=FORMAT) 19 | logger = logging.getLogger(__name__) 20 | logger.setLevel('INFO') 21 | 22 | import sys 23 | 24 | import os 25 | import urllib2 26 | 27 | # import folders with the code 28 | # At the moment, there is not other way of importing 29 | 30 | # Importing files from c-rnn-gan to read music 31 | # sys.path.insert(0, '/Users/adesant3/Documents/Kindergarten/chopyn/c-rnn-gan') 32 | # from music_data_utils import * 33 | 34 | from pymongo import MongoClient 35 | 36 | 37 | 38 | class Score(object): 39 | 40 | pass 41 | 42 | 43 | class Read(Score): 44 | 45 | def __init__(self, name_file_midi): 46 | 47 | logger.info('INFO: %s', 'Creating class '+self.__class__.__name__) 48 | 49 | self.map_tonic_with_scale = \ 50 | ({'Do':['Do','Re','Mi','Fa','Sol','La','Si'], 51 | 'Re':['Re','Mi','Fa#','Sol','La','Si','Do#'], 52 | 'Reb':['Reb','Mib','Fa','Solb','Lab','Sib','Do'], 53 | 'Mi':['Mi','Fa#','Sol#','La','Si','Do#','Re#'], 54 | 'Mib':['Mib','Fa','Sol','Lab','Sib','Do','Re'], 55 | 'Fa':['Fa','Sol','La','Sib','Do','Re','Mi'], 56 | # Fa# is the same as Solb 57 | 'Sol':['Sol','La','Si','Do','Re','Mi','Fa#'], 58 | 'Solb':['Solb','Lab','Sib','Dob','Reb','Mib','Fa'], 59 | 'La':['La','Si','Do#','Re','Mi','Fa#','Sol#'], 60 | 'Lab':['Lab','Sib','Do','Reb','Mib','Fa','Sol'], 61 | 'Si':['Si','Do#','Re#','Mi','Fa#','Sol#','La#'], 62 | 'Sib':['Sib','Do','Re','Mib','Fa','Sol','La'], 63 | }) 64 | 65 | self.map_note_with_alias = \ 66 | {'Do#':'Reb', 'Reb':'Do#', 67 | 'Re#':'Mib', 'Mib':'Re#', 68 | 'Fa':'Mi#', 'Mi#':'Fa', 69 | 'Fa#':'Solb', 'Solb':'Fa#', 70 | 'Sol#':'Lab', 'Lab':'Sol#', 71 | 'La#':'Sib', 'Sib':'La#', 72 | 'Si#':'Do', 'Do':'Si#', 73 | } 74 | 75 | self.map_grades_with_scale_position = \ 76 | {'I':0, 77 | 'II':1, 78 | 'III':2, 79 | 'IV':3, 80 | 'V':4, 81 | 'VI':5, 82 | 'VII':6 83 | } 84 | 85 | 86 | self.grades = ['I','II','III','IV','V','VI','VII'] 87 | self.all_grades = ['I','I+','II','II+','III','IV','IV+','V','V+','VI','VI+','VII'] 88 | self.map_duration = [1,2,3,4,6,8,12,16,24,32] 89 | self.num_octaves = 8 90 | 91 | 92 | # Read midi file 93 | self.music_df = pd.read_csv(name_file_midi) 94 | # Calculate attribute name_note 95 | self.calculate_name_note() 96 | self.divide_music_with_most_granular_tick() 97 | 98 | def get_map_tonic_with_scale(self): 99 | return self.map_tonic_with_scale 100 | 101 | def get_map_note_with_alias(self): 102 | return self.map_note_with_alias 103 | 104 | def get_map_grades_with_scale_position(self): 105 | return self.map_grades_with_scale_position 106 | 107 | def get_music_data(self): 108 | return self.music_df 109 | 110 | def get_max_tick(self): 111 | return np.max(self.music_df['start_ticks']+ 112 | self.music_df['dur_ticks']+1 113 | ) 114 | 115 | def get_notes_dictionary(self): 116 | notes = list() 117 | for iter_map_duration in self.map_duration: 118 | for iter_num_octaves in range(1, self.num_octaves+1): 119 | for iter_all_grades in self.all_grades: 120 | if iter_all_grades[-1] == '+': 121 | notes.append(str(iter_map_duration)+'.'+str(iter_all_grades[:-1])+str(iter_num_octaves)+'+') 122 | else: 123 | notes.append(str(iter_map_duration)+'.'+str(iter_all_grades)+str(iter_num_octaves)) 124 | 125 | 126 | return notes 127 | 128 | def calculate_name_note(self): 129 | 130 | # Names can be F3#, A2#, then return F# and A# 131 | # Names can be F3, A2, then return F and A 132 | self.music_df['name_note'] = \ 133 | (self.music_df 134 | .apply(lambda x: 135 | _get_note_name_without_octave(x['fullNoteOctave']), axis=1) 136 | ) 137 | 138 | # Names can be F3#, A2#, then return F# and A# 139 | # Names can be F3, A2, then return F and A 140 | self.music_df['octave_name_note'] = \ 141 | (self.music_df 142 | .apply(lambda x: 143 | _get_note_name_with_octave(x['fullNoteOctave']), axis=1) 144 | ) 145 | 146 | 147 | 148 | def get_most_common_note(self): 149 | 150 | return (self 151 | .music_df 152 | # Group by name of the note (A, F#, ...) 153 | .groupby(['name_note']) 154 | # Take the total length that note has been played 155 | .agg({'dur_ms':sum}) 156 | .rename(columns={'dur_ms': 'cum_duration'}) 157 | # Order in descending manner 158 | .sort_values(['cum_duration'],ascending=False) 159 | .reset_index() 160 | # Take the first element and the corresponding column 161 | .head(1)['name_note'][0] 162 | ) 163 | 164 | def get_tonality(self): 165 | tonalities = \ 166 | ({'Do':['Do','Re','Mi','Fa','Sol','La','Si'], 167 | 'Re':['Do#','Re','Mi','Fa#','Sol','La','Si'], 168 | #'Reb':['Do','Reb','Mib','Fa','Solb','Lab','Sib'], 169 | 'Reb':['Do','Do#','Re#','Fa','Fa#','Sol#','La#'], 170 | 'Mi':['Do#','Re#','Mi','Fa#','Sol#','La','Si'], 171 | #'Mib':['Do','Re','Mib','Fa','Sol','Lab','Sib'], 172 | 'Mib':['Do','Re','Re#','Fa','Sol','Sol#','La#'], 173 | 'Fa':['Do','Re','Mi','Fa','Sol','La','La#'], 174 | # Fa# is the same as Solb 175 | #'Fa#':['Do#','Re#','Fa','Fa#','Sol#','La#','Si'], 176 | 'Sol':['Do','Re','Mi','Fa#','Sol','La','Si'], 177 | #'Solb':['Dob','Reb','Mib','Fa','Solb','Lab','Sib'], 178 | 'Solb':['Si','Do#','Re#','Fa','Fa#','Sol#','La#'], 179 | 'La':['Do#','Re','Mi','Fa#','Sol#','La','Si'], 180 | #'Lab':['Do','Reb','Mib','Fa','Sol','Lab','Sib'], 181 | 'Lab':['Do','Do#','Re#','Fa','Sol','Sol#','La#'], 182 | 'Si':['Do#','Re#','Mi','Fa#','Sol#','La#','Si'], 183 | #'Sib':['Do','Re','Mib','Fa','Sol','La','Sib'], 184 | 'Sib':['Do','Re','Re#','Fa','Sol','La','La#'], 185 | }) 186 | 187 | # The music is stored in self.music_df 188 | note_histogram = \ 189 | (self.music_df 190 | # Group by name of the note 191 | .groupby(['name_note']) 192 | # Take the total amount of time that every note has been played 193 | # Do#, Re#, Fa#, La#, Fa, Sol# -> Reb 194 | .agg({'dur_ms':sum}) 195 | .rename(columns = {'dur_ms': 'cum_duration'}) 196 | # Order in descending manner 197 | .sort_values(['cum_duration'], ascending = False) 198 | .reset_index() 199 | ).set_index('name_note')['cum_duration'].to_dict() 200 | 201 | 202 | # Join of note_histogram to tonalities 203 | tonality_candidates = {} 204 | # (1) Join note_histogram to tonalties 205 | # (2) Calculate the total amount of time the intersection sounds 206 | # (3) Take the argmax 207 | for tonality_name, tonality_scale in tonalities.iteritems(): 208 | tonality_candidates[tonality_name] = np.sum([note_histogram.get(iter_tonality_scale, 0) 209 | for iter_tonality_scale in tonality_scale 210 | ]) 211 | 212 | return next(iter(OrderedDict(sorted(tonality_candidates.items(), key=lambda t: -t[1])))) 213 | 214 | def apply_tonality(self): 215 | 216 | map_tonic_with_scale = self.get_map_tonic_with_scale() 217 | map_note_with_alias = self.get_map_note_with_alias() 218 | 219 | 220 | 221 | # Return the aggregates of the chord and their sequence 222 | agg_criteria = 'octave_name_note' 223 | #agg_criteria = 'name_note' 224 | chord_df = self.aggregate_chord_from_tick(aggregation_criteria = agg_criteria) 225 | 226 | all_notes = list(np.unique(list(itertools.chain(*chord_df[agg_criteria])))) 227 | 228 | tonic = self.get_tonality() 229 | 230 | # Find the intersection between notes in the scale and notes in the piece of music 231 | tonic_scale_notes = map_tonic_with_scale[tonic] 232 | if agg_criteria == 'octave_name_note': 233 | # In the case, there are, for instance, values such as F4# or G5# 234 | common_notes = list(set(tonic_scale_notes) & set([iter_notas[:-1] for iter_notas in all_notes])) 235 | else: 236 | common_notes = list(set(tonic_scale_notes) & set(all_notes)) 237 | 238 | missing_notes_in_scale = list(set(tonic_scale_notes) - set(common_notes)) 239 | 240 | # Convert notes in music to the closest in tonic scale 241 | renamed_missing_notes = \ 242 | ([map_note_with_alias[renamed_notes] 243 | for renamed_notes in missing_notes_in_scale 244 | if renamed_notes in map_note_with_alias.keys()] 245 | ) 246 | 247 | # Apply the mapping transformation to the remaining notes 248 | if agg_criteria == 'octave_name_note': 249 | chord_df['chord'] = \ 250 | (chord_df[agg_criteria] 251 | .apply(lambda tuple_x: 252 | tuple([map_note_with_alias[renamed_notes[:-1]]+renamed_notes[-1] 253 | if renamed_notes[:-1] in renamed_missing_notes 254 | else renamed_notes 255 | for renamed_notes in tuple_x 256 | ]))) 257 | else: 258 | chord_df['chord'] = \ 259 | (chord_df[agg_criteria] 260 | .apply(lambda tuple_x: 261 | tuple([map_note_with_alias[renamed_notes] 262 | if renamed_notes in renamed_missing_notes 263 | else renamed_notes 264 | for renamed_notes in tuple_x 265 | ]))) 266 | 267 | # Convert chord into grades 268 | if agg_criteria == 'octave_name_note': 269 | chord_df['grades'] = \ 270 | (chord_df['chord'] 271 | .apply(lambda tuple_x: 272 | tuple([self.grades[tonic_scale_notes 273 | .index(chord_element[:-1])]+chord_element[-1] 274 | if chord_element[:-1] in tonic_scale_notes 275 | else self._apply_tonality_to_altered_notes(chord_element, tonic_scale_notes) 276 | # else 'X' 277 | for chord_element in tuple_x 278 | ]))) 279 | else: 280 | chord_df['grades'] = \ 281 | (chord_df['chord'] 282 | .apply(lambda tuple_x: 283 | tuple([self.grades[tonic_scale_notes 284 | .index(chord_element)] 285 | if chord_element in tonic_scale_notes 286 | else 'X' 287 | for chord_element in tuple_x 288 | ]))) 289 | 290 | self.chord_df = chord_df[['chord','grades','time']] 291 | 292 | def get_chord_df(self): 293 | self.sort_chord_df() 294 | return self.chord_df 295 | 296 | 297 | def sort_chord_df(self): 298 | # Sort grades within chord_df 299 | # For instance, (III3, VI3, VI2, I3, VI4) -> (VI2, I3, III3, VI3, VI4) 300 | 301 | 302 | self.chord_df['sorted_grades'] = \ 303 | (self.chord_df 304 | .apply(lambda row_df: self._sort_chord_df(row_df['grades']), axis = 1) 305 | ) 306 | 307 | def _sort_chord_df(self, tuple_row): 308 | return tuple(sorted(tuple_row, key=lambda x: (int(_clean_tuple_of_alterations(x)[-1]), 309 | self.map_grades_with_scale_position[_clean_tuple_of_alterations(x)[:-1]]))) 310 | 311 | def _apply_tonality_to_altered_notes(self, chord_element, tonic_scale_notes): 312 | 313 | # position_in_tonic_scale is an array of one element, i.e. [2] 314 | # If chord_element[:-1] is, for instance, Si, and the scale is Reb 315 | # then, position_in_tonic_scale will contain [6] 316 | 317 | # It attempts at finding which note within the scale is called in the same way 318 | position_in_tonic_scale = [position for position, scale_note in enumerate(tonic_scale_notes) 319 | # Sol and Sol# (compare if they coincide in more than one) 320 | # Avoid Si and Sol# which will give you one coincidence 321 | if scale_note[:2] == chord_element[:2]][0] 322 | 323 | # In case, there are flats, and the note is called similarly, 324 | # Then the grade must be the same than the corresponding note in the scale but with + 325 | # Sib (IV) -> Si natural -> (IV+) 326 | if tonic_scale_notes[position_in_tonic_scale][-1] == 'b': 327 | chord = self.grades[position_in_tonic_scale] 328 | 329 | # If the note is Si and Dob is within the scale (only happens in Solb) 330 | # Then they are the same note and should be corrected. 331 | if ('Dob' in tonic_scale_notes) & (chord_element[:-1] == 'Si'): 332 | chord = self.grades[tonic_scale_notes.index('Dob')] 333 | 334 | elif tonic_scale_notes[position_in_tonic_scale][-1] == '#': 335 | chord = self.grades[(position_in_tonic_scale-1) % 7] 336 | 337 | else: 338 | chord = self.grades[position_in_tonic_scale] 339 | 340 | return chord+chord_element[-1]+'+' 341 | 342 | def divide_music_with_most_granular_tick(self): 343 | 344 | # Obtain the histograms of ticks 345 | # Counter({80.0: 946, 0.0: 642, 40.0: 15, 240.0: 8, 60.0: 6, 20.0: 3, 480.0: 2, 120.0: 2, nan: 1, 160.0: 1, 300.0: 1, 360.0: 1}) 346 | hist_ticks = Counter(self.music_df['start_ticks'].diff()) 347 | # Take the minimum_tick different from 0 348 | # [ 0. 20. 40. 60. 80. 120. 160. 240. 300. 360. 480. nan] 349 | # In this case, it is 20. 350 | minimum_tick = int([ticks for ticks in np.sort(hist_ticks.keys()) if ticks > 0][0]) 351 | 352 | self.minimum_tick = minimum_tick 353 | 354 | # Divide music with minimum_tick 355 | 356 | self.music_df['num_minimum_ticks'] = \ 357 | ((self.music_df['dur_ticks']+1) / minimum_tick) 358 | 359 | granular_music_list = list() 360 | 361 | for index, iter_note in self.music_df.iterrows(): 362 | for iter_num_minimum_ticks in range(int(iter_note['num_minimum_ticks'])): 363 | granular_music_list.append(([iter_note['start_ticks']+(iter_num_minimum_ticks)*minimum_tick, 364 | iter_note['pitch'],iter_note['velocity'], 365 | iter_note['part'],iter_note['fullNoteOctave'], 366 | iter_note['name_note'],iter_note['octave_name_note']] 367 | )) 368 | 369 | self.granular_music_df = (pd.DataFrame(granular_music_list, 370 | columns = ['start_ticks','pitch','velocity','part', 371 | 'fullNoteOctave','name_note','octave_name_note'])) 372 | 373 | def get_chord_from_tick(self): 374 | # Given a sequence of notes such as: 375 | # 0,0,79,85,90,F6#,96,1 376 | # 0,0,119,128,66,F4#,96,2 377 | # 0,0,119,128,61,C4#,96,2 378 | # 0,0,119,128,58,A3#,96,2 379 | # 0,0,119,128,54,F3#,96,2 380 | # 80,86,79,85,94,A6#,96,1 381 | # 160,172,79,85,85,C6#,96,1 382 | # 240,258,79,85,90,F6#,96,1 383 | # 240,258,119,128,66,F4#,96,2 384 | # 240,258,119,128,63,D4#,96,2 385 | # 240,258,119,128,59,B3,96,2 386 | # 240,258,119,128,54,F3#,96,2 387 | 388 | # This method will extract the chords for 0, 80, 160, 240 and so forth. 389 | 390 | return (self.granular_music_df 391 | .groupby('start_ticks') 392 | .agg({'fullNoteOctave':lambda x: Counter(x), 393 | # 'name_note':lambda x: tuple(dict(Counter(x)).keys())} 394 | 'name_note':lambda x: Counter(x), 395 | 'octave_name_note':lambda x: Counter(x), 396 | } 397 | ) 398 | .reset_index() 399 | ) 400 | # Do not forget the aggregated time per granular chord. 401 | 402 | def aggregate_chord_from_tick(self, aggregation_criteria = 'name_note', dataframe = None): 403 | 404 | # The other option for aggregation_criteria is: 405 | # aggregation_criteria = 'fullNoteOctave' 406 | 407 | # Given a sequence of notes such as: 408 | # {u'F2#': 1, u'F1#': 1, u'F3#': 1} 409 | # {u'F2#': 1, u'F1#': 1, u'F3#': 1} 410 | # {u'F2#': 1, u'F1#': 1, u'F3#': 1} 411 | # {u'F2#': 1, u'F1#': 1, u'F3#': 1} 412 | # {u'F2#': 1, u'F1#': 1, u'F3#': 1} 413 | # {u'F2#': 1, u'F4#': 1, u'F3#': 1} 414 | # {u'F2#': 1, u'F4#': 2, u'F3#': 1} 415 | 416 | # The result must be 417 | # {u'F2#': 5, u'F1#': 5, u'F3#': 5} 418 | # {u'F2#': 2, u'F4#': 3, u'F3#': 2} 419 | 420 | # First of all calculate chord per ticks 421 | chord_per_ticks = self.get_chord_from_tick() 422 | 423 | # See the changes in chord per ticks 424 | changes_in_chords = chord_per_ticks[aggregation_criteria].diff() 425 | # Since the first element is Nan, force it to be {} 426 | # It is {} since we are working with dicts 427 | changes_in_chords[0] = {} 428 | 429 | # Store the final column into id_aggregation_criteria 430 | chord_per_ticks['id_aggregation_criteria'] = np.cumsum([len(element)>0 431 | for element in changes_in_chords]) 432 | 433 | # print(chord_per_ticks[0:100].to_string()) 434 | 435 | aggregated_chord_per_ticks = (chord_per_ticks 436 | .groupby(['id_aggregation_criteria', 437 | chord_per_ticks[aggregation_criteria].map(tuple)]) 438 | .agg({'start_ticks':['min','max'] 439 | } 440 | ) 441 | .reset_index() 442 | ) 443 | 444 | 445 | # Rename the columns 446 | aggregated_chord_per_ticks.columns = ['_'.join(col) for col in aggregated_chord_per_ticks.columns] 447 | 448 | # In order to avoid duration to be negative, start_ticks_min must be sorted. 449 | # The main reason is due to the fact that when aggregating per id_aggregation_criteria it could be possible 450 | # that many chords get some time advance in relation to real order. 451 | aggregated_chord_per_ticks.sort_values(by = 'start_ticks_min', axis=0, inplace = True) 452 | 453 | # Set maximum value of the start_ticks_min 454 | aggregated_chord_per_ticks['new_duration'] = np.roll(aggregated_chord_per_ticks['start_ticks_min'],-1)-aggregated_chord_per_ticks['start_ticks_min'] 455 | aggregated_chord_per_ticks.loc[aggregated_chord_per_ticks.index[-1], 'new_duration'] = self.get_max_tick()-aggregated_chord_per_ticks.loc[aggregated_chord_per_ticks.index[-1], 'start_ticks_min'] 456 | 457 | aggregated_chord_per_ticks.columns = \ 458 | ['seq_id', aggregation_criteria, 'min_tick','max_tick', 'time'] 459 | 460 | return aggregated_chord_per_ticks 461 | 462 | def convert_grades_sequence_to_notes(self, grades_sequence, tonic): 463 | 464 | tonality_scale = self.get_map_tonic_with_scale()[tonic] 465 | grade_mapping = self.get_map_grades_with_scale_position() 466 | 467 | notes_sequence = list() 468 | 469 | for chord in grades_sequence: 470 | notes_chord = list() 471 | for note in chord: 472 | if note != 'X': 473 | # Extract octave 474 | if note[-1] != '+': 475 | octave = int(note[-1]) 476 | note_name = note[:-1] 477 | converted_note = tonality_scale[grade_mapping[note_name]] 478 | if converted_note[-1] == 'b' or converted_note[-1] == '#': 479 | alteration = converted_note[-1] 480 | name_note = converted_note[:-1] 481 | else: 482 | alteration = '' 483 | name_note = converted_note 484 | 485 | else: 486 | # Here, we have VI4+, II5+, ... 487 | octave = int(note[-2]) 488 | note_name = note[:-2] 489 | converted_note = tonality_scale[grade_mapping[note_name]] 490 | 491 | if converted_note[-1] == 'b': 492 | alteration = '' 493 | name_note = converted_note[:-1] 494 | else: 495 | alteration = '#' 496 | name_note = converted_note 497 | 498 | print([octave, note_name, name_note]) 499 | else: 500 | # In case of X, then use tonic 501 | converted_note = tonic 502 | octave = 4 503 | if tonic[-1] == 'b' or tonic[-1] == '#': 504 | alteration = tonic[-1] 505 | name_note = tonic[:-1] 506 | else: 507 | alteration = '' 508 | name_note = tonic 509 | 510 | notes_props = {'duration':200, 'intensity':70, 'timbre':1, 511 | 'alteration':alteration, 'octave':octave} 512 | notes_chord.append(globals()[name_note](**notes_props)) 513 | 514 | notes_sequence.append(notes_chord) 515 | 516 | # notes_props = {'duration':200, 'intensity':70, 'timbre':1, 517 | # 'alteration':alteration, 'octave':octave} 518 | 519 | # notes_sequence.append(globals()[name_note](**notes_props)) 520 | 521 | return notes_sequence 522 | 523 | def enrich_grades_with_duration(self): 524 | 525 | # Make a histogram of the duration of each chord 526 | aggregated_time = (self.chord_df.groupby('time').size().reset_index(name='histogram')) 527 | aggregated_time.sort_values(by = 'histogram', ascending = False, inplace = True) 528 | # aggregated_time.reset_index(inplace = True) 529 | 530 | # Take the 8-top most common times 531 | top_most_common_time = 8 532 | 533 | # Take the minimum of the time within that 8-top most common times 534 | # Why? Because that minimum is suppose to be the minimum temporal division 535 | min_duration = np.min(aggregated_time.loc[:top_most_common_time, 'time']) 536 | 537 | self.chord_df['prop_duration'] = (self.chord_df['time']/min_duration).astype(int) 538 | 539 | self.chord_df['general_duration'] = self.chord_df['prop_duration'].apply(_apply_general_duration) 540 | 541 | self.chord_df['enriched_grades'] = \ 542 | (self.chord_df 543 | .apply(lambda row_df: tuple(str(row_df['general_duration'])+'.'+ chord_element for chord_element in row_df['grades']), 544 | axis = 1) 545 | ) 546 | 547 | self.chord_df = self.chord_df[['chord','grades','time','enriched_grades']] 548 | 549 | def convert_tonality_to_music_dataframe(self): 550 | 551 | grades_as_columns = list() 552 | 553 | # Calculate the names of all the columns depending on the num of octaves 554 | # ['I','I+', ...] -> ['I1','I1+', ..., 'V5','V5+'] 555 | for iter_num_octaves in range(1, self.num_octaves+1): 556 | for iter_grade in self.all_grades: 557 | if iter_grade[-1] != '+': 558 | grades_as_columns.extend([iter_grade+str(iter_num_octaves)]) 559 | else: 560 | grades_as_columns.extend([iter_grade[:-1]+str(iter_num_octaves)+'+']) 561 | 562 | # Create a dataframe with all the columns 563 | grades_dataframe = pd.DataFrame(columns = grades_as_columns) 564 | self.empty_grades_dataframe = grades_dataframe 565 | 566 | # Obtain tonality and grades sequence 567 | # grades_and_duration = self.apply_tonality() 568 | self.apply_tonality() 569 | 570 | # In every position, store the duration 571 | for iter_grades_dataframe in range(0,self.get_chord_df().shape[0]): 572 | grades_dataframe.loc[iter_grades_dataframe,self.get_chord_df()['grades'][iter_grades_dataframe]] = \ 573 | self.get_chord_df()['time'][iter_grades_dataframe] 574 | 575 | # Return dataframe with grades and duration 576 | return grades_dataframe.fillna(0) 577 | 578 | def convert_music_dataframe_to_notes(self, music_array, tonic): 579 | 580 | grades_as_columns = self.empty_grades_dataframe.columns 581 | tonality_scale = self.get_map_tonic_with_scale()[tonic] 582 | grade_mapping = self.get_map_grades_with_scale_position() 583 | 584 | notes_sequence = list() 585 | 586 | num_chords_music_array = len(music_array) 587 | 588 | for iter_music_array in music_array: 589 | # Take the positions of those notes that were played 590 | # [0,0,0,80,0,80] -> [3,5] 591 | notes_positions = iter_music_array.nonzero() 592 | # From former array, convert it to the corresponding grades 593 | # [3,5] -> ['V4', 'II5'] 594 | grades_chords = [grades_as_columns[iter_notes_position] for iter_notes_position in notes_positions][0] 595 | 596 | # List to store all the notes associated to the same chord 597 | notes_chord = list() 598 | 599 | # Iterate every chord 600 | for note_in_chord in grades_chords: 601 | if note_in_chord[-1]!='+': 602 | # If the note is not altered 603 | octave = int(note_in_chord[-1]) 604 | note_name = note_in_chord[:-1] 605 | converted_note = tonality_scale[grade_mapping[note_name]] 606 | if converted_note[-1] == 'b' or converted_note[-1] == '#': 607 | alteration = converted_note[-1] 608 | name_note = converted_note[:-1] 609 | else: 610 | alteration = '' 611 | name_note = converted_note 612 | 613 | else: 614 | # Here, we have VI4+, II5+, ... 615 | octave = int(note_in_chord[-2]) 616 | note_name = note_in_chord[:-2] 617 | converted_note = tonality_scale[grade_mapping[note_name]] 618 | 619 | if converted_note[-1] == 'b': 620 | alteration = '' 621 | name_note = converted_note[:-1] 622 | else: 623 | alteration = '#' 624 | name_note = converted_note 625 | 626 | notes_props = {'duration':200, 'intensity':70, 'timbre':1, 627 | 'alteration':alteration, 'octave':octave} 628 | # Stores all the notes under a same chord 629 | notes_chord.append(globals()[name_note](**notes_props)) 630 | 631 | # Store all the chords under a sequence 632 | notes_sequence.append(notes_chord) 633 | 634 | return notes_sequence 635 | 636 | def download_midi_music(self): 637 | # Based on the work done by 638 | # Written by Olof Mogren, http://mogren.one/ 639 | 640 | ignore_patterns = ['xoom'] 641 | sources = {} 642 | sources['classical'] = {} 643 | sources['classical']['alkan'] = ['http://www.classicalmidi.co.uk/alkan.htm'] 644 | sources['classical']['aguado'] = ['http://www.classicalmidi.co.uk/aguadodion.htm'] 645 | sources['classical']['adam'] = ['http://www.classicalmidi.co.uk/adam.htm'] 646 | sources['classical']['albenizisaac'] = ['http://www.classicalmidi.co.uk/albeniz.htm'] 647 | sources['classical']['albenizmateo'] = ['http://www.classicalmidi.co.uk/albenizmateo.htm'] 648 | sources['classical']['albinoni'] = ['http://www.classicalmidi.co.uk/albinoni.htm'] 649 | sources['classical']['alford'] = ['http://www.classicalmidi.co.uk/alford.htm'] 650 | sources['classical']['anderson'] = ['http://www.classicalmidi.co.uk/anderson.htm'] 651 | sources['classical']['ansell'] = ['http://www.classicalmidi.co.uk/anselljohn.htm'] 652 | sources['classical']['arensky'] = ['http://www.classicalmidi.co.uk/arensky.htm'] 653 | sources['classical']['arriaga'] = ['http://www.classicalmidi.co.uk/arriag.htm'] 654 | sources['classical']['bach'] = ['http://www.midiworld.com/bach.htm','http://www.classicalmidi.co.uk/bach.htm'] 655 | sources['classical']['bartok'] = ['http://www.midiworld.com/bartok.htm','http://www.classicalmidi.co.uk/bartok.htm'] 656 | sources['classical']['barber'] = ['http://www.classicalmidi.co.uk/barber.htm'] 657 | sources['classical']['barbieri'] = ['http://www.classicalmidi.co.uk/barbie.htm'] 658 | sources['classical']['bax'] = ['http://www.classicalmidi.co.uk/bax.htm'] 659 | sources['classical']['beethoven'] = ['http://www.midiworld.com/beethoven.htm','http://www.classicalmidi.co.uk/beethoven.htm'] 660 | sources['classical']['bellini'] = ['http://www.classicalmidi.co.uk/bellini.htm'] 661 | sources['classical']['berlin'] = ['http://www.classicalmidi.co.uk/berlin.htm'] 662 | sources['classical']['berlioz'] = ['http://www.classicalmidi.co.uk/berlioz.htm'] 663 | sources['classical']['binge'] = ['http://www.classicalmidi.co.uk/binge.htm'] 664 | sources['classical']['bizet'] = ['http://www.classicalmidi.co.uk/bizet.htm'] 665 | sources['classical']['boccherini'] = ['http://www.classicalmidi.co.uk/bocc.htm'] 666 | sources['classical']['boellman'] = ['http://www.classicalmidi.co.uk/boell.htm'] 667 | sources['classical']['borodin'] = ['http://www.classicalmidi.co.uk/borodin.htm'] 668 | sources['classical']['boyce'] = ['http://www.classicalmidi.co.uk/boyce.htm'] 669 | sources['classical']['brahms'] = ['http://www.midiworld.com/brahms.htm','http://www.classicalmidi.co.uk/brahms.htm'] 670 | sources['classical']['breton'] = ['http://www.classicalmidi.co.uk/breton.htm'] 671 | sources['classical']['britten'] = ['http://www.classicalmidi.co.uk/britten.htm'] 672 | sources['classical']['bouwer'] = ['http://www.classicalmidi.co.uk/bouwer.htm'] 673 | sources['classical']['bruch'] = ['http://www.classicalmidi.co.uk/bruch.htm'] 674 | sources['classical']['bruckner'] = ['http://www.classicalmidi.co.uk/bruck.htm'] 675 | sources['classical']['bergmuller'] = ['http://www.classicalmidi.co.uk/bergmuller.htm'] 676 | sources['classical']['busoni'] = ['http://www.classicalmidi.co.uk/busoni.htm'] 677 | sources['classical']['byrd'] = ['http://www.midiworld.com/byrd.htm','http://www.classicalmidi.co.uk/byrd.htm'] 678 | sources['classical']['carulli'] = ['http://www.classicalmidi.co.uk/carull.htm'] 679 | sources['classical']['chabrier'] = ['http://www.classicalmidi.co.uk/chabrier.htm'] 680 | sources['classical']['chaminade'] = ['http://www.classicalmidi.co.uk/chaminad.htm'] 681 | sources['classical']['chapi'] = ['http://www.classicalmidi.co.uk/chapie.htm'] 682 | sources['classical']['cherubini'] = ['http://www.classicalmidi.co.uk/cherub.htm'] 683 | sources['classical']['chopin'] = ['http://www.midiworld.com/chopin.htm','http://www.classicalmidi.co.uk/chopin.htm'] 684 | sources['classical']['clementi'] = ['http://www.classicalmidi.co.uk/clemen.htm'] 685 | sources['classical']['coates'] = ['http://www.classicalmidi.co.uk/coates.htm'] 686 | sources['classical']['copland'] = ['http://www.classicalmidi.co.uk/copland.htm'] 687 | sources['classical']['corelli'] = ['http://www.classicalmidi.co.uk/cor.htm'] 688 | sources['classical']['cramer'] = ['http://www.classicalmidi.co.uk/cramer.htm'] 689 | sources['classical']['curzon'] = ['http://www.classicalmidi.co.uk/cuzon.htm'] 690 | sources['classical']['czerny'] = ['http://www.classicalmidi.co.uk/czerny.htm'] 691 | sources['classical']['debussy'] = ['http://www.classicalmidi.co.uk/debussy.htm'] 692 | sources['classical']['delibes'] = ['http://www.classicalmidi.co.uk/del.htm'] 693 | sources['classical']['delius'] = ['http://www.classicalmidi.co.uk/delius.htm'] 694 | sources['classical']['dialoc'] = ['http://www.classicalmidi.co.uk/diaoc.htm'] 695 | sources['classical']['dupre'] = ['http://www.classicalmidi.co.uk/dupre.htm'] 696 | sources['classical']['dussek'] = ['http://www.classicalmidi.co.uk/dussek.htm'] 697 | # sources['classical']['dvorak'] = ['http://www.classicalmidi.co.uk/dvok.htm'] 698 | sources['classical']['elgar'] = ['http://www.classicalmidi.co.uk/elgar.htm'] 699 | sources['classical']['eshpai'] = ['http://www.classicalmidi.co.uk/Eshpai.htm', 'http://www.classicalmidi.co.uk/Eshpai%20.htm'] 700 | sources['classical']['faure'] = ['http://www.classicalmidi.co.uk/faure.htm'] 701 | sources['classical']['field'] = ['http://www.classicalmidi.co.uk/field.htm'] 702 | sources['classical']['flotow'] = ['http://www.classicalmidi.co.uk/flotow.htm'] 703 | sources['classical']['foster'] = ['http://www.classicalmidi.co.uk/foster.htm'] 704 | sources['classical']['franck'] = ['http://www.classicalmidi.co.uk/franck.htm'] 705 | sources['classical']['fresc'] = ['http://www.classicalmidi.co.uk/fresc.htm'] 706 | sources['classical']['garoto'] = ['http://www.classicalmidi.co.uk/garoto.htm'] 707 | sources['classical']['german'] = ['http://www.classicalmidi.co.uk/german.htm'] 708 | sources['classical']['gershwin'] = ['http://www.classicalmidi.co.uk/gershwin.htm'] 709 | sources['classical']['gilbert'] = ['http://www.classicalmidi.co.uk/gilbert.htm'] 710 | # sources['classical']['ginast'] = ['http://www.classicalmidi.co.uk/ginast.htm'] 711 | sources['classical']['gott'] = ['http://www.classicalmidi.co.uk/gott.htm'] 712 | sources['classical']['gounod'] = ['http://www.classicalmidi.co.uk/gounod.htm'] 713 | sources['classical']['grain'] = ['http://www.classicalmidi.co.uk/grain.htm'] 714 | sources['classical']['grieg'] = ['http://www.classicalmidi.co.uk/grieg.htm'] 715 | sources['classical']['griff'] = ['http://www.classicalmidi.co.uk/griff.htm'] 716 | sources['classical']['haydn'] = ['http://www.midiworld.com/haydn.htm','http://www.classicalmidi.co.uk/haydn.htm'] 717 | sources['classical']['handel'] = ['http://www.midiworld.com/handel.htm','http://www.classicalmidi.co.uk/handel.htm'] 718 | sources['classical']['heller'] = ['http://www.classicalmidi.co.uk/heller.htm'] 719 | sources['classical']['herold'] = ['http://www.classicalmidi.co.uk/herold.htm'] 720 | sources['classical']['hiller'] = ['http://www.classicalmidi.co.uk/hiller.htm'] 721 | sources['classical']['holst'] = ['http://www.classicalmidi.co.uk/holst.htm'] 722 | sources['classical']['hummel'] = ['http://www.midiworld.com/hummel.htm','http://www.classicalmidi.co.uk/hummel.htm'] 723 | sources['classical']['ibert'] = ['http://www.classicalmidi.co.uk/ibert.htm'] 724 | sources['classical']['ives'] = ['http://www.classicalmidi.co.uk/ives.htm'] 725 | sources['classical']['janacek'] = ['http://www.classicalmidi.co.uk/janacek.htm'] 726 | sources['classical']['joplin'] = ['http://www.classicalmidi.co.uk/joplin.htm'] 727 | sources['classical']['jstrauss'] = ['http://www.classicalmidi.co.uk/jstrauss.htm'] 728 | sources['classical']['karg'] = ['http://www.classicalmidi.co.uk/karl.htm'] 729 | sources['classical']['khach'] = ['http://www.classicalmidi.co.uk/khach.htm'] 730 | sources['classical']['kuhlau'] = ['http://www.classicalmidi.co.uk/kuhlau.htm'] 731 | sources['classical']['lalo'] = ['http://www.classicalmidi.co.uk/lalo.htm'] 732 | sources['classical']['lemire'] = ['http://www.classicalmidi.co.uk/lemire.htm'] 733 | sources['classical']['lenar'] = ['http://www.classicalmidi.co.uk/lenar.htm'] 734 | sources['classical']['liszt'] = ['http://www.midiworld.com/liszt.htm','http://www.classicalmidi.co.uk/liszt.htm'] 735 | sources['classical']['lobos'] = ['http://www.classicalmidi.co.uk/lobos.htm'] 736 | sources['classical']['lovland'] = ['http://www.classicalmidi.co.uk/lovland.htm'] 737 | sources['classical']['lyssen'] = ['http://www.classicalmidi.co.uk/lyssen.htm'] 738 | sources['classical']['maccunn'] = ['http://www.classicalmidi.co.uk/maccunn.htm'] 739 | sources['classical']['mahler'] = ['http://www.classicalmidi.co.uk/mahler.htm'] 740 | sources['classical']['maier'] = ['http://www.classicalmidi.co.uk/maier.htm'] 741 | sources['classical']['marcello'] = ['http://www.classicalmidi.co.uk/marcello.htm'] 742 | sources['classical']['martini'] = ['http://www.classicalmidi.co.uk/martini.htm'] 743 | sources['classical']['mehul'] = ['http://www.classicalmidi.co.uk/mehul.htm'] 744 | sources['classical']['mendelssohn'] = ['http://www.midiworld.com/mendelssohn.htm'] 745 | sources['classical']['messager'] = ['http://www.classicalmidi.co.uk/messager.htm'] 746 | sources['classical']['messia'] = ['http://www.classicalmidi.co.uk/messia.htm'] 747 | sources['classical']['meyerbeer'] = ['http://www.classicalmidi.co.uk/meyerbeer.htm'] 748 | sources['classical']['modest'] = ['http://www.classicalmidi.co.uk/modest.htm'] 749 | sources['classical']['moszkowski'] = ['http://www.classicalmidi.co.uk/moszk.htm'] 750 | sources['classical']['mozart'] = ['http://www.midiworld.com/mozart.htm','http://www.classicalmidi.co.uk/mozart.htm'] 751 | sources['classical']['nikolaievich'] = ['http://www.classicalmidi.co.uk/scab.htm'] 752 | sources['classical']['orff'] = ['http://www.classicalmidi.co.uk/orff.htm'] 753 | sources['classical']['pachelbel'] = ['http://www.classicalmidi.co.uk/pach.htm'] 754 | sources['classical']['paderewski'] = ['http://www.classicalmidi.co.uk/paderewski.htm'] 755 | sources['classical']['pagg'] = ['http://www.classicalmidi.co.uk/pagg.htm'] 756 | sources['classical']['palestrina'] = ['http://www.classicalmidi.co.uk/palestrina.htm'] 757 | sources['classical']['paradisi'] = ['http://www.classicalmidi.co.uk/paradisi.htm'] 758 | sources['classical']['poulenc'] = ['http://www.classicalmidi.co.uk/poulenc.htm'] 759 | sources['classical']['pres'] = ['http://www.classicalmidi.co.uk/pres.htm'] 760 | sources['classical']['prokif'] = ['http://www.classicalmidi.co.uk/prokif.htm'] 761 | sources['classical']['puccini'] = ['http://www.classicalmidi.co.uk/puccini.htm'] 762 | sources['classical']['rachmaninov'] = ['http://www.midiworld.com/rachmaninov.htm','http://www.classicalmidi.co.uk/rach.htm'] 763 | sources['classical']['ravel'] = ['http://www.classicalmidi.co.uk/ravel1.htm'] 764 | # sources['classical']['respig'] = ['http://www.classicalmidi.co.uk/respig.htm'] 765 | sources['classical']['rimsky'] = ['http://www.classicalmidi.co.uk/rimsky.htm'] 766 | sources['classical']['rossini'] = ['http://www.classicalmidi.co.uk/rossini.htm'] 767 | sources['classical']['strauss'] = ['http://www.classicalmidi.co.uk/rstrauss.htm'] 768 | sources['classical']['sacrlatt'] = ['http://www.classicalmidi.co.uk/sacrlatt.htm'] 769 | sources['classical']['saens'] = ['http://www.classicalmidi.co.uk/saens.htm'] 770 | sources['classical']['sanz'] = ['http://www.classicalmidi.co.uk/sanz.htm'] 771 | sources['classical']['satie'] = ['http://www.classicalmidi.co.uk/satie.htm'] 772 | sources['classical']['scarlatti'] = ['http://www.midiworld.com/scarlatti.htm','http://www.classicalmidi.co.uk/scarlatt.htm'] 773 | sources['classical']['schoberg'] = ['http://www.classicalmidi.co.uk/schoberg.htm'] 774 | sources['classical']['schubert'] = ['http://www.classicalmidi.co.uk/schubert.htm'] 775 | sources['classical']['schumann'] = ['http://www.midiworld.com/schumann.htm', 'http://www.classicalmidi.co.uk/schuman.htm'] 776 | sources['classical']['scriabin'] = ['http://www.midiworld.com/scriabin.htm'] 777 | # sources['classical']['shostakovich'] = ['http://www.classicalmidi.co.uk/shost.htm'] 778 | sources['classical']['sibelius'] = ['http://www.classicalmidi.co.uk/sibelius.htm'] 779 | sources['classical']['soler'] = ['http://www.classicalmidi.co.uk/soler.htm'] 780 | sources['classical']['sor'] = ['http://www.classicalmidi.co.uk/sor.htm'] 781 | sources['classical']['sousa'] = ['http://www.classicalmidi.co.uk/sousa.htm'] 782 | sources['classical']['stravinsky'] = ['http://www.classicalmidi.co.uk/strav.htm'] 783 | sources['classical']['sullivan'] = ['http://www.classicalmidi.co.uk/sull.htm'] 784 | sources['classical']['susato'] = ['http://www.classicalmidi.co.uk/susato.htm'] 785 | sources['classical']['taylor'] = ['http://www.classicalmidi.co.uk/taylor.htm'] 786 | sources['classical']['tchaikovsky'] = ['http://www.midiworld.com/tchaikovsky.htm','http://www.classicalmidi.co.uk/tch.htm'] 787 | sources['classical']['thomas'] = ['http://www.classicalmidi.co.uk/thomas.htm'] 788 | sources['classical']['vaughan'] = ['http://www.classicalmidi.co.uk/vaughan.htm'] 789 | sources['classical']['verdi'] = ['http://www.classicalmidi.co.uk/verdi.htm'] 790 | sources['classical']['vivaldi'] = ['http://www.classicalmidi.co.uk/vivaldi.htm'] 791 | sources['classical']['wagner'] = ['http://www.classicalmidi.co.uk/wagner.htm'] 792 | sources['classical']['walton'] = ['http://www.classicalmidi.co.uk/walton.htm'] 793 | sources['classical']['wyschnegradsky'] = ['http://www.classicalmidi.co.uk/Wyschnegradsky.htm'] 794 | sources['classical']['yradier'] = ['http://www.classicalmidi.co.uk/yradier.htm'] 795 | 796 | 797 | midi_files = {} 798 | datadir = '/Users/adesant3/Documents/Kindergarten/chopyn/data/' 799 | 800 | 801 | import urlparse, urllib2, os, math, random, re, string, sys 802 | 803 | # if os.path.exists(os.path.join(datadir, 'do-not-redownload.txt')): 804 | # print 'Already completely downloaded, delete do-not-redownload.txt to check for files to download.' 805 | # return 806 | for genre in sources: 807 | midi_files[genre] = {} 808 | for composer in sources[genre]: 809 | midi_files[genre][composer] = [] 810 | print('---------------'+composer+'--------------') 811 | for url in sources[genre][composer]: 812 | try: 813 | print('**************'+url+'**************') 814 | response = urllib2.urlopen(url) 815 | #if 'classicalmidi' in url: 816 | # headers = response.info() 817 | # print headers 818 | data = response.read() 819 | 820 | #htmlinks = re.findall('"( ?[^"]+\.htm)"', data) 821 | #for link in htmlinks: 822 | # print 'http://www.classicalmidi.co.uk/'+strip(link) 823 | 824 | # make urls absolute: 825 | urlparsed = urlparse.urlparse(url) 826 | data = re.sub('href="\/', 'href="http://'+urlparsed.hostname+'/', data, flags= re.IGNORECASE) 827 | data = re.sub('href="(?!http:)', 'href="http://'+urlparsed.hostname+urlparsed.path[:urlparsed.path.rfind('/')]+'/', data, flags= re.IGNORECASE) 828 | #if 'classicalmidi' in url: 829 | # print data 830 | 831 | links = re.findall('"(http://[^"]+\.mid)"', data) 832 | for link in links: 833 | cont = False 834 | for p in ignore_patterns: 835 | if p in link: 836 | print 'Not downloading links with {}'.format(p) 837 | cont = True 838 | continue 839 | if cont: continue 840 | # print link 841 | filename = link.split('/')[-1] 842 | valid_chars = "-_.()%s%s" % (string.ascii_letters, string.digits) 843 | filename = ''.join(c for c in filename if c in valid_chars) 844 | #print genre+'/'+composer+'/'+filename 845 | midi_files[genre][composer].append(filename) 846 | localdir = os.path.join(os.path.join(datadir, genre), composer) 847 | localpath = os.path.join(localdir, filename) 848 | if os.path.exists(localpath): 849 | print 'File exists. Not redownloading: {}'.format(localpath) 850 | else: 851 | try: 852 | response_midi = urllib2.urlopen(link) 853 | try: os.makedirs(localdir) 854 | except: pass 855 | data_midi = response_midi.read() 856 | if 'DOCTYPE html PUBLIC' in data_midi: 857 | print 'Seems to have been served an html page instead of a midi file. Continuing with next file.' 858 | elif 'RIFF' in data_midi[0:9]: 859 | print 'Seems to have been served an RIFF file instead of a midi file. Continuing with next file.' 860 | else: 861 | with open(localpath, 'w') as f: 862 | f.write(data_midi) 863 | except: 864 | print 'Failed to fetch {}'.format(link) 865 | except: 866 | print('The composer '+composer+' is not available') 867 | with open(os.path.join(datadir, 'do-not-redownload.txt'), 'w') as f: 868 | f.write('This directory is considered completely downloaded.') 869 | 870 | def _get_note_name_without_octave(fullNoteOctave): 871 | # Function to get the name, regardless the octave 872 | 873 | notes_dict = {'A':'La','B':'Si','C':'Do','D':'Re','E':'Mi','F':'Fa','G':'Sol'} 874 | 875 | if len(fullNoteOctave) == 3: 876 | # Names can be F3#, A2#, then return F# and A# 877 | return notes_dict[fullNoteOctave[0]]+fullNoteOctave[2] 878 | else: 879 | # Names can be F3, A2, then return F and A 880 | return notes_dict[fullNoteOctave[0]] 881 | 882 | def _get_note_name_with_octave(fullNoteOctave): 883 | # Function to get the name INCLUDING the octave 884 | 885 | notes_dict = {'A':'La','B':'Si','C':'Do','D':'Re','E':'Mi','F':'Fa','G':'Sol'} 886 | 887 | if len(fullNoteOctave) == 3: 888 | # Names can be F3#, A2#, then return F# and A# 889 | return notes_dict[fullNoteOctave[0]]+fullNoteOctave[2]+fullNoteOctave[1] 890 | else: 891 | # Names can be F3, A2, then return F and A 892 | return notes_dict[fullNoteOctave[0]]+fullNoteOctave[1] 893 | 894 | def _convert_note_into_grade(chord_tuple): 895 | # Function to convert a chord such as (F4#, C5#, F5, A4) into 896 | # grade functionalities (II,III,I,IV) for instance. 897 | # In this way, music is normalized and harmonic structure may be learned. 898 | 899 | tonality = self.get_tonality() 900 | 901 | def _apply_general_duration(prop_duration): 902 | # These are the only possibilities allowed in terms of time 903 | map_duration = [1,2,3,4,6,8,12,16,24,32] 904 | return map_duration[np.argmin(np.abs(np.array(map_duration)-prop_duration))] 905 | 906 | def _clean_tuple_of_alterations(tuple_element): 907 | return tuple_element.replace("+", "") 908 | 909 | 910 | if __name__ == "__main__": 911 | 912 | name_file_midi = '../../scores/Schubert_S560_Schwanengesang_no7.csv' 913 | name_file_midi = '../../scores/Brahms_symphony_2_2.csv' # Si M 914 | name_file_midi = '../../scores/Chopin_Etude_Op_10_n_5.csv' 915 | name_file_midi = '../../scores/Bach_Cello_Suite_No_1.csv' 916 | name_file_midi = '../../scores/Gymnopedie_No_1.csv' 917 | name_file_midi = '../../scores/Brahms_symphony_2_1.csv' # Very Slow 918 | name_file_midi = '../../scores/Bach-Partita_No1_in_Bb_BWV825_7Gigue.csv' 919 | name_file_midi = '../../scores/Schuber_Impromptu_D_899_No_3.csv' 920 | name_file_midi = '../../scores/Albeniz_Asturias.csv' 921 | name_file_midi = '../../scores/Chopin_Etude_Op_10_n_1.csv' 922 | name_file_midi = '../../scores/Debussy_Claire_de_Lune.csv' 923 | name_file_midi = '../../scores/Mozart_Sonata_16.csv' 924 | #name_file_midi = '../../scores/Beethoven_Moonlight_Sonata_third_movement.csv' 925 | #name_file_midi = '../../scores/Schubert_Piano_Trio_2nd_Movement.csv' 926 | 927 | musical_piece = Read(name_file_midi) 928 | 929 | #print(musical_piece.granular_music_df.groupby('start_ticks').size()) 930 | print('holaaaaaaa') 931 | musical_piece.apply_tonality() 932 | musical_piece.sort_chord_df() 933 | #musical_piece.enrich_grades_with_duration() 934 | #print(musical_piece.get_notes_dictionary()) 935 | #print(len(musical_piece.get_notes_dictionary())) 936 | # musical_piece.convert_tonality_to_music_dict() 937 | 938 | 939 | 940 | 941 | 942 | -------------------------------------------------------------------------------- /notebook/Learning LSTM.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": { 7 | "collapsed": true 8 | }, 9 | "outputs": [], 10 | "source": [ 11 | "'''\n", 12 | "A Recurrent Neural Network (LSTM) implementation example using TensorFlow..\n", 13 | "Next word prediction after n_input words learned from text file.\n", 14 | "A story is automatically generated if the predicted word is fed back as input.\n", 15 | "Author: Rowel Atienza\n", 16 | "Project: https://github.com/roatienza/Deep-Learning-Experiments\n", 17 | "'''\n", 18 | "\n", 19 | "from __future__ import print_function\n", 20 | "\n", 21 | "import numpy as np\n", 22 | "import pandas as pd\n", 23 | "import tensorflow as tf\n", 24 | "from tensorflow.contrib import rnn\n", 25 | "import random\n", 26 | "import collections\n", 27 | "import time" 28 | ] 29 | }, 30 | { 31 | "cell_type": "code", 32 | "execution_count": 2, 33 | "metadata": { 34 | "collapsed": true 35 | }, 36 | "outputs": [], 37 | "source": [ 38 | "# Read musical data\n", 39 | "music_chord = pd.read_csv('../tmp/Chopin_Etude_Op_10_n_1_grades_chords.csv')" 40 | ] 41 | }, 42 | { 43 | "cell_type": "code", 44 | "execution_count": 3, 45 | "metadata": {}, 46 | "outputs": [ 47 | { 48 | "name": "stdout", 49 | "output_type": "stream", 50 | "text": [ 51 | "Loaded training data...\n" 52 | ] 53 | } 54 | ], 55 | "source": [ 56 | "start_time = time.time()\n", 57 | "def elapsed(sec):\n", 58 | " if sec<60:\n", 59 | " return str(sec) + \" sec\"\n", 60 | " elif sec<(60*60):\n", 61 | " return str(sec/60) + \" min\"\n", 62 | " else:\n", 63 | " return str(sec/(60*60)) + \" hr\"\n", 64 | "\n", 65 | "\n", 66 | "# Target log path\n", 67 | "logs_path = '../tmp'\n", 68 | "writer = tf.summary.FileWriter(logs_path)\n", 69 | "\n", 70 | "# training_data = read_data(training_file)\n", 71 | "\n", 72 | "\n", 73 | "training_data = music_chord['grades']\n", 74 | "print(\"Loaded training data...\")\n", 75 | "\n", 76 | "def build_dataset(words):\n", 77 | " # A very sad way of creating a dictionary\n", 78 | " count = np.unique(words)\n", 79 | " dictionary = dict()\n", 80 | " for word in count:\n", 81 | " dictionary[word] = len(dictionary)\n", 82 | " reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))\n", 83 | " return dictionary, reverse_dictionary" 84 | ] 85 | }, 86 | { 87 | "cell_type": "code", 88 | "execution_count": 4, 89 | "metadata": { 90 | "collapsed": true 91 | }, 92 | "outputs": [], 93 | "source": [ 94 | "dictionary, reverse_dictionary = build_dataset(training_data)\n", 95 | "vocab_size = len(dictionary)" 96 | ] 97 | }, 98 | { 99 | "cell_type": "code", 100 | "execution_count": 5, 101 | "metadata": { 102 | "collapsed": true 103 | }, 104 | "outputs": [], 105 | "source": [ 106 | "# Parameters\n", 107 | "learning_rate = 0.001\n", 108 | "training_iters = 50000\n", 109 | "display_step = 1000\n", 110 | "n_input = 10\n", 111 | "\n", 112 | "# number of units in RNN cell\n", 113 | "n_hidden = 512\n", 114 | "\n", 115 | "# tf Graph input\n", 116 | "x = tf.placeholder(\"float\", [None, n_input, 1])\n", 117 | "y = tf.placeholder(\"float\", [None, vocab_size])\n", 118 | "\n", 119 | "# RNN output node weights and biases\n", 120 | "weights = {\n", 121 | " 'out': tf.Variable(tf.random_normal([n_hidden, vocab_size]))\n", 122 | "}\n", 123 | "biases = {\n", 124 | " 'out': tf.Variable(tf.random_normal([vocab_size]))\n", 125 | "}\n", 126 | "\n", 127 | "def RNN(x, weights, biases):\n", 128 | " \n", 129 | " # reshape to [1, n_input]\n", 130 | " x = tf.reshape(x, [-1, n_input])\n", 131 | " print(x)\n", 132 | "\n", 133 | " # Generate a n_input-element sequence of inputs\n", 134 | " # (eg. [had] [a] [general] -> [20] [6] [33])\n", 135 | " x = tf.split(x,n_input,1)\n", 136 | " print(x)\n", 137 | "\n", 138 | " # 2-layer LSTM, each layer has n_hidden units.\n", 139 | " # Average Accuracy= 95.20% at 50k iter\n", 140 | " rnn_cell = rnn.MultiRNNCell([rnn.BasicLSTMCell(n_hidden),rnn.BasicLSTMCell(n_hidden)])\n", 141 | " print(rnn_cell)\n", 142 | " \n", 143 | " # 1-layer LSTM with n_hidden units but with lower accuracy.\n", 144 | " # Average Accuracy= 90.60% 50k iter\n", 145 | " # Uncomment line below to test but comment out the 2-layer rnn.MultiRNNCell above\n", 146 | " # rnn_cell = rnn.BasicLSTMCell(n_hidden)\n", 147 | "\n", 148 | " # generate prediction\n", 149 | " outputs, states = rnn.static_rnn(rnn_cell, x, dtype=tf.float32)\n", 150 | " print(outputs)\n", 151 | "\n", 152 | " # there are n_input outputs but\n", 153 | " # we only want the last output\n", 154 | " return tf.matmul(outputs[-1], weights['out']) + biases['out']" 155 | ] 156 | }, 157 | { 158 | "cell_type": "code", 159 | "execution_count": 6, 160 | "metadata": {}, 161 | "outputs": [ 162 | { 163 | "name": "stdout", 164 | "output_type": "stream", 165 | "text": [ 166 | "Tensor(\"Reshape:0\", shape=(?, 10), dtype=float32)\n", 167 | "[, , , , , , , , , ]\n", 168 | "\n", 169 | "[, , , , , , , , , ]\n" 170 | ] 171 | } 172 | ], 173 | "source": [ 174 | "pred = RNN(x, weights, biases)" 175 | ] 176 | }, 177 | { 178 | "cell_type": "code", 179 | "execution_count": 7, 180 | "metadata": {}, 181 | "outputs": [ 182 | { 183 | "name": "stdout", 184 | "output_type": "stream", 185 | "text": [ 186 | "WARNING:tensorflow:From :2: softmax_cross_entropy_with_logits (from tensorflow.python.ops.nn_ops) is deprecated and will be removed in a future version.\n", 187 | "Instructions for updating:\n", 188 | "\n", 189 | "Future major versions of TensorFlow will allow gradients to flow\n", 190 | "into the labels input on backprop by default.\n", 191 | "\n", 192 | "See @{tf.nn.softmax_cross_entropy_with_logits_v2}.\n", 193 | "\n" 194 | ] 195 | } 196 | ], 197 | "source": [ 198 | "# Loss and optimizer\n", 199 | "cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))\n", 200 | "optimizer = tf.train.RMSPropOptimizer(learning_rate=learning_rate).minimize(cost)\n", 201 | "\n", 202 | "# Model evaluation\n", 203 | "correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))\n", 204 | "accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n", 205 | "\n", 206 | "# Initializing the variables\n", 207 | "init = tf.global_variables_initializer()" 208 | ] 209 | }, 210 | { 211 | "cell_type": "code", 212 | "execution_count": 8, 213 | "metadata": {}, 214 | "outputs": [ 215 | { 216 | "name": "stdout", 217 | "output_type": "stream", 218 | "text": [ 219 | "Iter= 1000, Average Loss= 7.526896, Average Accuracy= 0.20%\n", 220 | "[\"('X', 'V2', 'V1')\", \"('V2', 'IV4', 'V1')\", \"('VII3', 'V2', 'V1')\", \"('V2', 'II4', 'V1')\", \"('V2', 'X', 'V1')\", \"('IV3', 'V2', 'V1')\", \"('VII2', 'V2', 'V1')\", \"('V2', 'V1')\", \"('X', 'V2', 'V1')\", \"('III3', 'V2', 'V1')\"] - [('V2', 'V3', 'V1')] vs [('II5', 'V2', 'V1')]\n", 221 | "Iter= 2000, Average Loss= 5.678412, Average Accuracy= 1.60%\n", 222 | "[\"('III1', 'III2', 'III6')\", \"('III1', 'III2', 'VII5')\", \"('III1', 'X', 'III2')\", \"('III1', 'III2', 'VII6')\", \"('III3', 'III2', 'III7')\", \"('III3', 'III2', 'VII6')\", \"('X', 'III3', 'III2')\", \"('III3', 'III2', 'VII5')\", \"('III3', 'III2', 'III6')\", \"('III3', 'III2', 'VII5')\"] - [('X', 'III3', 'III2')] vs [('II6', 'V2', 'V1')]\n", 223 | "Iter= 3000, Average Loss= 5.295520, Average Accuracy= 2.70%\n", 224 | "[\"('IV6', 'VI1', 'VI2')\", \"('I6', 'VI1', 'VI2')\", \"('IV6', 'VI1', 'VI2')\", \"('VI1', 'VI2', 'I7')\", \"('III7', 'VI1', 'VI2')\", \"('VI1', 'VI2', 'I7')\", \"('X', 'VI1', 'VI2')\", \"('I6', 'VI1', 'VI2')\", \"('VI1', 'VI2', 'III6')\", \"('I6', 'VI1', 'VI2')\"] - [('VI1', 'VI2', 'X')] vs [('I5', 'V2', 'V1')]\n", 225 | "Iter= 4000, Average Loss= 5.307717, Average Accuracy= 3.90%\n", 226 | "[\"('II2', 'VI6', 'II1')\", \"('II2', 'II1', 'X')\", \"('II2', 'II1', 'I6')\", \"('II2', 'II1', 'II6')\", \"('II2', 'VI5', 'II1')\", \"('II2', 'II1', 'X')\", \"('II2', 'II1', 'I5')\", \"('II2', 'II1', 'II5')\", \"('II2', 'VI4', 'II1')\", \"('II2', 'II1', 'X')\"] - [('II2', 'II1', 'I4')] vs [('I2', 'I3', 'I5')]\n", 227 | "Iter= 5000, Average Loss= 5.041913, Average Accuracy= 4.70%\n", 228 | "[\"('III1', 'III2', 'VII4')\", \"('III1', 'III2', 'III5')\", \"('III1', 'X', 'III2')\", \"('III1', 'III2', 'III6')\", \"('III1', 'III2', 'VII5')\", \"('III1', 'III2', 'III6')\", \"('III1', 'X', 'III2')\", \"('III1', 'III2', 'III7')\", \"('III1', 'X', 'III2')\", \"('III1', 'III2', 'III6')\"] - [('III1', 'III2', 'VII5')] vs [('V5', 'V2', 'V1')]\n", 229 | "Iter= 6000, Average Loss= 5.117774, Average Accuracy= 4.30%\n", 230 | "[\"('III1', 'III2', 'II6')\", \"('III1', 'III2', 'VII5')\", \"('III1', 'III2', 'III5')\", \"('III1', 'III2', 'V5')\", \"('III1', 'III2', 'II5')\", \"('III1', 'III2', 'VII4')\", \"('III1', 'III2', 'III4')\", \"('VI4', 'VI1', 'VI2')\", \"('VI1', 'VI2', 'I5')\", \"('V5', 'VI1', 'VI2')\"] - [('III5', 'VI1', 'VI2')] vs [('III3', 'VI1', 'VI2')]\n", 231 | "Iter= 7000, Average Loss= 4.843269, Average Accuracy= 6.30%\n", 232 | "[\"('VI4', 'VI1', 'VI2')\", \"('VI1', 'VI2', 'III4')\", \"('VI1', 'VI2', 'VI3')\", \"('X', 'VI1', 'VI2')\", \"('VI1', 'VI2', 'VI3')\", \"('III3', 'VI1', 'VI2')\", \"('VI1', 'VI2')\", \"('II2',)\", \"('II2', 'VI2')\", \"('II2', 'II3')\"] - [('II2', 'X')] vs [('IV6', 'V2', 'V1')]\n", 233 | "Iter= 8000, Average Loss= 4.974282, Average Accuracy= 6.90%\n", 234 | "[\"('X', 'X', 'X')\", \"('X', 'X', 'X')\", \"('X', 'X', 'I4')\", \"('X', 'X', 'X')\", \"('X', 'X', 'X')\", \"('X', 'X', 'X')\", \"('X', 'I3', 'X')\", \"('IV2', 'IV1')\", \"('IV2', 'I3', 'IV1')\", \"('IV2', 'X', 'IV1')\"] - [('IV2', 'IV1', 'VI3')] vs [('X', 'VI1', 'VI2')]\n", 235 | "Iter= 9000, Average Loss= 4.628155, Average Accuracy= 10.30%\n", 236 | "[\"('III1', 'VII3', 'III2')\", \"('III1', 'III2', 'III4')\", \"('III1', 'VII3', 'III2')\", \"('III1', 'III2', 'X')\", \"('III1', 'VII2', 'III2')\", \"('VI1', 'VI2')\", \"('III3', 'VI1', 'VI2')\", \"('VI1', 'VI2', 'VI3')\", \"('X', 'VI1', 'VI2')\", \"('V4', 'VI1', 'VI2')\"] - [('VI1', 'VI2', 'III4')] vs [('IV2', 'IV1', 'I4')]\n", 237 | "Iter= 10000, Average Loss= 4.818898, Average Accuracy= 8.90%\n", 238 | "[\"('II3', 'X', 'X')\", \"('VII3', 'X', 'X')\", \"('X', 'II4', 'X')\", \"('X', 'III4', 'X')\", \"('X', 'II4', 'X')\", \"('VII4', 'X', 'X')\", \"('II5', 'X', 'X')\", \"('III5', 'X', 'X')\", \"('II5', 'X', 'X')\", \"('X', 'VII5', 'X')\"] - [('II6', 'X', 'X')] vs [('V2', 'II4', 'V1')]\n", 239 | "Iter= 11000, Average Loss= 4.873021, Average Accuracy= 8.10%\n", 240 | "[\"('I4', 'VI1', 'VI2')\", \"('VI1', 'VI2', 'III4')\", \"('I4', 'VI1', 'VI2')\", \"('X', 'VI1', 'VI2')\", \"('I3', 'VI1', 'VI2')\", \"('V2', 'V1')\", \"('I3', 'V2', 'V1')\", \"('V2', 'V3', 'V1')\", \"('V2', 'I4', 'V1')\", \"('V2', 'II4', 'V1')\"] - [('V2', 'I4', 'V1')] vs [('I5', 'V2', 'V1')]\n", 241 | "Iter= 12000, Average Loss= 4.766455, Average Accuracy= 10.90%\n", 242 | "[\"('II2', 'II1', 'I4')\", \"('II2', 'II1', 'II4')\", \"('II2', 'II1', 'VI3')\", \"('II2', 'X', 'II1')\", \"('II2', 'I3', 'II1')\", \"('V2', 'V1')\", \"('I3', 'V2', 'V1')\", \"('IV3', 'V2', 'V1')\", \"('V2', 'X', 'V1')\", \"('V2', 'II4', 'V1')\"] - [('V2', 'I4', 'V1')] vs [('V2', 'II4', 'V1')]\n", 243 | "Iter= 13000, Average Loss= 4.667726, Average Accuracy= 10.70%\n", 244 | "[\"('I2', 'I3', 'I5')\", \"('I2', 'I3', 'V5')\", \"('I2', 'I3', 'I6')\", \"('I2', 'I3', 'III6')\", \"('I2', 'I3', 'I6')\", \"('I2', 'I3', 'V6')\", \"('I2', 'I3', 'I7')\", \"('I2', 'I3', 'III7')\", \"('I2', 'I3', 'I7')\", \"('I2', 'I3', 'V6')\"] - [('I2', 'I3', 'I6')] vs [('I2', 'I3', 'I5')]\n", 245 | "Iter= 14000, Average Loss= 4.610355, Average Accuracy= 10.90%\n", 246 | "[\"('I2', 'V5')\", \"('I2', 'I5')\", \"('V2', 'X', 'V1')\", \"('VI5', 'V2', 'V1')\", \"('X', 'V2', 'V1')\", \"('I5', 'V2', 'V1')\", \"('X', 'V2', 'V1')\", \"('VI4', 'V2', 'V1')\", \"('V2', 'X', 'V1')\", \"('V2', 'I4', 'V1')\"] - [('II5', 'V2', 'V1')] vs [('V2', 'VII5', 'V1')]\n", 247 | "Iter= 15000, Average Loss= 4.244154, Average Accuracy= 16.60%\n", 248 | "[\"('IV2', 'VII3', 'IV1')\", \"('IV2', 'IV1', 'VI3')\", \"('IV2', 'VII2', 'IV1')\", \"('III1', 'III2')\", \"('III1', 'VII2', 'III2')\", \"('III1', 'III2', 'X')\", \"('III1', 'VII3', 'III2')\", \"('III1', 'III2', 'III4')\", \"('III1', 'VII3', 'III2')\", \"('III1', 'X', 'III2')\"] - [('III1', 'III2', 'VII4')] vs [('III1', 'III2', 'VII4')]\n", 249 | "Iter= 16000, Average Loss= 4.299348, Average Accuracy= 18.70%\n", 250 | "[\"('I4', 'VI1', 'VI2')\", \"('IV4', 'VI1', 'VI2')\", \"('I4', 'VI1', 'VI2')\", \"('IV4', 'VI1', 'VI2')\", \"('VI1', 'VI2', 'I5')\", \"('VI1', 'VI2', 'IV5')\", \"('VI1', 'VI2', 'I5')\", \"('VI1', 'VI2', 'IV5')\", \"('I6', 'VI1', 'VI2')\", \"('IV6', 'VI1', 'VI2')\"] - [('I6', 'VI1', 'VI2')] vs [('X', 'V2', 'V1')]\n", 251 | "Iter= 17000, Average Loss= 4.541798, Average Accuracy= 15.40%\n", 252 | "[\"('III1', 'III2', 'II6')\", \"('III1', 'III2', 'VII5')\", \"('III1', 'V6', 'III2')\", \"('III1', 'VI6', 'III2')\", \"('II2', 'II1', 'II7')\", \"('II2', 'VI6', 'II1')\", \"('II2', 'II1', 'X')\", \"('II2', 'II1', 'I6')\", \"('II2', 'II1', 'II6')\", \"('II2', 'VI5', 'II1')\"] - [('II2', 'II1', 'X')] vs [('II2', 'I5')]\n", 253 | "Iter= 18000, Average Loss= 4.455303, Average Accuracy= 16.40%\n", 254 | "[\"('III1', 'X', 'III2')\", \"('III1', 'III2', 'III5')\", \"('III1', 'III2', 'VII4')\", \"('III1', 'III2', 'III5')\", \"('III1', 'X', 'III2')\", \"('III1', 'III2', 'III4')\", \"('III1', 'VII3', 'III2')\", \"('II2', 'II1', 'IV4')\", \"('II2', 'VII3', 'II1')\", \"('II2', 'II1', 'V3')\"] - [('II2', 'II3', 'II1')] vs [('II2', 'VI4')]\n", 255 | "Iter= 19000, Average Loss= 4.394896, Average Accuracy= 18.40%\n", 256 | "[\"('III1', 'III2', 'II5')\", \"('III1', 'III2', 'VII4')\", \"('III1', 'III2', 'III4')\", \"('VI4', 'VI1', 'VI2')\", \"('VI1', 'VI2', 'I5')\", \"('V5', 'VI1', 'VI2')\", \"('III5', 'VI1', 'VI2')\", \"('VI5', 'VI1', 'VI2')\", \"('I6', 'VI1', 'VI2')\", \"('V6', 'VI1', 'VI2')\"] - [('VI1', 'VI2', 'III6')] vs [('X', 'X', 'X')]\n", 257 | "Iter= 20000, Average Loss= 4.145183, Average Accuracy= 19.10%\n", 258 | "[\"('X', 'VI1', 'VI2')\", \"('VI5', 'VI1', 'VI2')\", \"('VI1', 'VI2', 'III6')\", \"('VI6', 'VI1', 'VI2')\", \"('VI1', 'VI2', 'X')\", \"('VI6', 'VI1', 'VI2')\", \"('VI1', 'VI2', 'III6')\", \"('VI5', 'VI1', 'VI2')\", \"('X', 'VI1', 'VI2')\", \"('VI5', 'VI1', 'VI2')\"] - [('III5', 'VI1', 'VI2')] vs [('VII2', 'VII1', 'X')]\n", 259 | "Iter= 21000, Average Loss= 4.153001, Average Accuracy= 22.60%\n", 260 | "[\"('X', 'X', 'X')\", \"('X', 'X', 'I5')\", \"('X', 'X', 'X')\", \"('X', 'X', 'X')\", \"('X', 'X', 'X')\", \"('X', 'X', 'I4')\", \"('X', 'X', 'X')\", \"('X', 'X', 'X')\", \"('X', 'X', 'X')\", \"('X', 'I3', 'X')\"] - [('IV2', 'IV1')] vs [('X', 'X', 'VI3')]\n", 261 | "Iter= 22000, Average Loss= 3.563530, Average Accuracy= 30.30%\n", 262 | "[\"('IV2', 'IV1', 'I7')\", \"('IV2', 'IV1', 'X')\", \"('IV2', 'IV1', 'VII6')\", \"('IV2', 'VI6', 'IV1')\", \"('IV2', 'IV1', 'VII5')\", \"('IV2', 'IV1', 'X')\", \"('IV2', 'IV1', 'VII5')\", \"('IV2', 'VI5', 'IV1')\", \"('IV2', 'IV1', 'VII4')\", \"('IV2', 'IV1', 'X')\"] - [('IV2', 'IV1', 'VII4')] vs [('IV2', 'VI6', 'IV1')]\n", 263 | "Iter= 23000, Average Loss= 4.032217, Average Accuracy= 25.80%\n", 264 | "[\"('I2', 'I3', 'V6')\", \"('I2', 'I3', 'I6')\", \"('I2', 'I3', 'III6')\", \"('I2', 'I3', 'I6')\", \"('I2', 'I3', 'V5')\", \"('I2', 'I3', 'I5')\", \"('I2', 'I3', 'III5')\", \"('I2', 'I3', 'I5')\", \"('I2', 'I3', 'V4')\", \"('I2', 'I3', 'I4')\"] - [('I2', 'I3', 'III4')] vs [('IV2', 'IV1')]\n", 265 | "Iter= 24000, Average Loss= 3.872265, Average Accuracy= 27.60%\n", 266 | "[\"('I2', 'I3', 'V5')\", \"('I2', 'I3', 'I5')\", \"('I2', 'I3', 'III5')\", \"('I2', 'I3', 'I5')\", \"('I2', 'I3', 'V4')\", \"('I2', 'I3', 'I4')\", \"('I2', 'I3', 'III4')\", \"('I2', 'I3', 'I4')\", \"('I2', 'I3', 'V3')\", \"('I2', 'I3')\"] - [('VI1', 'VI2')] vs [('I2', 'I3', 'III4')]\n" 267 | ] 268 | }, 269 | { 270 | "name": "stdout", 271 | "output_type": "stream", 272 | "text": [ 273 | "Iter= 25000, Average Loss= 3.795834, Average Accuracy= 30.40%\n", 274 | "[\"('X', 'X', 'I5')\", \"('X', 'VI4', 'X')\", \"('X', 'X', 'I4')\", \"('X', 'X', 'II4')\", \"('X', 'X', 'I4')\", \"('X', 'X', 'VI3')\", \"('X', 'I3', 'X')\", \"('V2', 'V1')\", \"('VII2', 'V2', 'V1')\", \"('V2', 'V3', 'V1')\"] - [('VII3', 'V2', 'V1')] vs [('IV2', 'IV1', 'I6')]\n", 275 | "Iter= 26000, Average Loss= 3.738593, Average Accuracy= 30.30%\n", 276 | "[\"('X', 'V2', 'V1')\", \"('V2', 'VII6', 'V1')\", \"('IV6', 'V2', 'V1')\", \"('II6', 'V2', 'V1')\", \"('V5', 'V2', 'V1')\", \"('V2', 'VII5', 'V1')\", \"('IV5', 'V2', 'V1')\", \"('II5', 'V2', 'V1')\", \"('V4', 'V2', 'V1')\", \"('VII4', 'V2', 'V1')\"] - [('V2', 'IV4', 'V1')] vs [('V2', 'X', 'V1')]\n", 277 | "Iter= 27000, Average Loss= 3.803202, Average Accuracy= 31.20%\n", 278 | "[\"('I2', 'I3', 'X')\", \"('I2', 'I3', 'II4')\", \"('I2', 'I3', 'IV4')\", \"('I2', 'I3', 'VII3')\", \"('I2', 'I3', 'X')\", \"('I2',)\", \"('I2', 'I3', 'II3')\", \"('I2', 'I3')\", \"('I2', 'V3')\", \"('I2', 'I4')\"] - [('I2', 'III4')] vs [('I2', 'X')]\n", 279 | "Iter= 28000, Average Loss= 4.047772, Average Accuracy= 28.80%\n", 280 | "[\"('IV2', 'VII3', 'IV1')\", \"('IV2', 'IV1', 'VI3')\", \"('IV2', 'VII2', 'IV1')\", \"('III1', 'III2')\", \"('III1', 'VII2', 'III2')\", \"('III1', 'III2', 'X')\", \"('III1', 'VII3', 'III2')\", \"('III1', 'III2', 'III4')\", \"('III1', 'VII3', 'III2')\", \"('III1', 'X', 'III2')\"] - [('III1', 'III2', 'VII4')] vs [('IV2', 'IV1', 'X')]\n", 281 | "Iter= 29000, Average Loss= 3.829003, Average Accuracy= 31.90%\n", 282 | "[\"('I2', 'I3', 'III5')\", \"('I2', 'I3', 'I5')\", \"('I2', 'I3', 'V4')\", \"('I2', 'I3', 'I4')\", \"('I2', 'I3', 'III4')\", \"('I2', 'I3', 'I4')\", \"('I2', 'I3', 'V3')\", \"('I2', 'I3')\", \"('VI1', 'VI2')\", \"('I3', 'VI1', 'VI2')\"] - [('IV3', 'VI1', 'VI2')] vs [('X', 'X', 'X')]\n", 283 | "Iter= 30000, Average Loss= 3.011989, Average Accuracy= 41.20%\n", 284 | "[\"('IV2', 'IV1', 'IV6')\", \"('IV2', 'IV1', 'I6')\", \"('IV2', 'VI6', 'IV1')\", \"('IV2', 'IV1', 'I7')\", \"('X', 'X', 'III7')\", \"('X', 'X', 'I7')\", \"('X', 'VI6', 'X')\", \"('X', 'X', 'I6')\", \"('X', 'X', 'III6')\", \"('X', 'X', 'I6')\"] - [('X', 'VI5', 'X')] vs [('VII1',)]\n", 285 | "Iter= 31000, Average Loss= 2.626084, Average Accuracy= 52.80%\n", 286 | "[\"('IV2', 'VI5', 'IV1')\", \"('IV2', 'IV1', 'III6')\", \"('IV2', 'IV1', 'I6')\", \"('VII2', 'VII1', 'II6')\", \"('VII2', 'VII1', 'VI5')\", \"('VII2', 'VII1', 'IV5')\", \"('VII2', 'VII1', 'VII4')\", \"('VII2', 'VII1', 'II5')\", \"('VI4', 'VII2', 'VII1')\", \"('VII2', 'VII1', 'IV4')\"] - [('VII3', 'VII2', 'VII1')] vs [('IV3', 'VII1')]\n", 287 | "Iter= 32000, Average Loss= 3.669053, Average Accuracy= 33.60%\n", 288 | "[\"('I2', 'VII4')\", \"('I2', 'V4')\", \"('I2', 'I5')\", \"('I2', 'III5')\", \"('I2', 'VII5')\", \"('I2', 'V5')\", \"('I2', 'I6')\", \"('I2', 'III6')\", \"('IV2', 'IV1', 'VII6')\", \"('IV2', 'IV1', 'III6')\"] - [('IV2', 'IV1', 'I6')] vs [('IV2', 'IV1', 'I6')]\n", 289 | "Iter= 33000, Average Loss= 2.987713, Average Accuracy= 44.10%\n", 290 | "[\"('VII2', 'VII1', 'X')\", \"('VII3', 'VII2', 'VII1')\", \"('VII2', 'VII1', 'X')\", \"('VII2', 'VII1', 'X')\", \"('VII2', 'VII1', 'X')\", \"('VII2', 'VII1')\", \"('X',)\", \"('X', 'X')\", \"('X', 'IV3')\", \"('X', 'X')\"] - [('X', 'II4')] vs [('VII2', 'VII1', 'X')]\n", 291 | "Iter= 34000, Average Loss= 2.996353, Average Accuracy= 45.50%\n", 292 | "[\"('II2', 'II3', 'VI5')\", \"('II2', 'II3', 'III5')\", \"('II2', 'II3', 'V5')\", \"('II2', 'II3', 'I5')\", \"('II2', 'II3', 'VI4')\", \"('II2', 'II3', 'III4')\", \"('II2', 'II3', 'V4')\", \"('II2', 'II3', 'I4')\", \"('II2', 'II3', 'VI3')\", \"('II2', 'II3', 'III3')\"] - [('II2', 'II3', 'X')] vs [('II2', 'II1', 'I4')]\n", 293 | "Iter= 35000, Average Loss= 2.540326, Average Accuracy= 55.70%\n", 294 | "[\"('II4', 'VI1', 'VI2')\", \"('VII3', 'VI1', 'VI2')\", \"('II3', 'VI1', 'VI2')\", \"('X', 'X')\", \"('II3', 'X', 'X')\", \"('VII3', 'X', 'X')\", \"('X', 'II4', 'X')\", \"('X', 'III4', 'X')\", \"('X', 'II4', 'X')\", \"('VII4', 'X', 'X')\"] - [('II5', 'X', 'X')] vs [('IV3', 'V2', 'V1')]\n", 295 | "Iter= 36000, Average Loss= 2.737323, Average Accuracy= 51.10%\n", 296 | "[\"('I6', 'VI1', 'VI2')\", \"('IV6', 'VI1', 'VI2')\", \"('I6', 'VI1', 'VI2')\", \"('IV6', 'VI1', 'VI2')\", \"('VI1', 'VI2', 'I7')\", \"('III7', 'VI1', 'VI2')\", \"('VI1', 'VI2', 'I7')\", \"('X', 'VI1', 'VI2')\", \"('I6', 'VI1', 'VI2')\", \"('VI1', 'VI2', 'III6')\"] - [('I6', 'VI1', 'VI2')] vs [('I6', 'VI1', 'VI2')]\n", 297 | "Iter= 37000, Average Loss= 3.401993, Average Accuracy= 44.00%\n", 298 | "[\"('X', 'X', 'VII5')\", \"('III1', 'III2', 'II6')\", \"('III1', 'III2', 'VII5')\", \"('III1', 'V6', 'III2')\", \"('III1', 'VI6', 'III2')\", \"('II2', 'II1', 'II7')\", \"('II2', 'VI6', 'II1')\", \"('II2', 'II1', 'X')\", \"('II2', 'II1', 'I6')\", \"('II2', 'II1', 'II6')\"] - [('II2', 'VI5', 'II1')] vs [('II2', 'VI5', 'II1')]\n", 299 | "Iter= 38000, Average Loss= 3.142444, Average Accuracy= 42.30%\n", 300 | "[\"('V2', 'II4', 'V1')\", \"('V2', 'V3', 'V1')\", \"('VII3', 'V2', 'V1')\", \"('IV3', 'V2', 'V1')\", \"('II3', 'V2', 'V1')\", \"('V2', 'V1')\", \"('I2', 'I1')\", \"('I2', 'I1', 'V2')\", \"('I2', 'III3', 'I1')\", \"('I2', 'I1', 'V3')\"] - [('I2', 'I1', 'I4')] vs [('I2', 'I1', 'I4')]\n", 301 | "Iter= 39000, Average Loss= 3.270942, Average Accuracy= 40.90%\n", 302 | "[\"('I2', 'III5')\", \"('I2', 'I5')\", \"('I2', 'V5')\", \"('I2', 'I6')\", \"('I2', 'III6')\", \"('I2', 'I6')\", \"('I2', 'V5')\", \"('I2', 'I5')\", \"('V2', 'X', 'V1')\", \"('VI5', 'V2', 'V1')\"] - [('X', 'V2', 'V1')] vs [('X', 'V2', 'V1')]\n", 303 | "Iter= 40000, Average Loss= 3.017188, Average Accuracy= 44.00%\n", 304 | "[\"('IV2', 'VII3', 'IV1')\", \"('IV2', 'IV1', 'VI3')\", \"('IV2', 'VII2', 'IV1')\", \"('III1', 'III2')\", \"('III1', 'VII2', 'III2')\", \"('III1', 'III2', 'X')\", \"('III1', 'VII3', 'III2')\", \"('III1', 'III2', 'III4')\", \"('III1', 'VII3', 'III2')\", \"('III1', 'X', 'III2')\"] - [('III1', 'III2', 'VII4')] vs [('IV2', 'VI6', 'IV1')]\n", 305 | "Iter= 41000, Average Loss= 2.976872, Average Accuracy= 45.60%\n", 306 | "[\"('IV4', 'VI1', 'VI2')\", \"('VI1', 'VI2', 'I5')\", \"('VI1', 'VI2', 'IV5')\", \"('VI1', 'VI2', 'I5')\", \"('VI1', 'VI2', 'IV5')\", \"('I6', 'VI1', 'VI2')\", \"('IV6', 'VI1', 'VI2')\", \"('I6', 'VI1', 'VI2')\", \"('IV6', 'VI1', 'VI2')\", \"('VI1', 'VI2', 'I7')\"] - [('III7', 'VI1', 'VI2')] vs [('III7', 'VI1', 'VI2')]\n", 307 | "Iter= 42000, Average Loss= 2.712063, Average Accuracy= 47.80%\n", 308 | "[\"('VII2', 'V2', 'V1')\", \"('V2', 'V3', 'V1')\", \"('VII3', 'V2', 'V1')\", \"('V2', 'II4', 'V1')\", \"('VII3', 'V2', 'V1')\", \"('V4', 'V2', 'V1')\", \"('VII4', 'V2', 'V1')\", \"('X', 'X', 'II5')\", \"('X', 'X', 'VII4')\", \"('X', 'X', 'V5')\"] - [('X', 'X', 'VII5')] vs [('X', 'X', 'I6')]\n", 309 | "Iter= 43000, Average Loss= 2.789623, Average Accuracy= 46.80%\n", 310 | "[\"('III1', 'III2', 'VII4')\", \"('III1', 'III2', 'III5')\", \"('III1', 'X', 'III2')\", \"('III1', 'III2', 'III6')\", \"('III1', 'III2', 'VII5')\", \"('III1', 'III2', 'III6')\", \"('III1', 'X', 'III2')\", \"('III1', 'III2', 'III7')\", \"('III1', 'X', 'III2')\", \"('III1', 'III2', 'III6')\"] - [('III1', 'III2', 'VII5')] vs [('III1', 'X', 'III2')]\n", 311 | "Iter= 44000, Average Loss= 2.231489, Average Accuracy= 57.60%\n", 312 | "[\"('VII1', 'II6')\", \"('V6', 'III1', 'III2')\", \"('III1', 'III2', 'II6')\", \"('III1', 'III2', 'VII5')\", \"('III1', 'III2', 'III5')\", \"('III1', 'III2', 'V5')\", \"('III1', 'III2', 'II5')\", \"('III1', 'III2', 'VII4')\", \"('III1', 'III2', 'III4')\", \"('VI4', 'VI1', 'VI2')\"] - [('VI1', 'VI2', 'I5')] vs [('VI1', 'VI2', 'I5')]\n", 313 | "Iter= 45000, Average Loss= 3.152986, Average Accuracy= 46.60%\n", 314 | "[\"('X', 'VI1', 'VI2')\", \"('VI5', 'VI1', 'VI2')\", \"('III5', 'VI1', 'VI2')\", \"('VI4', 'VI1', 'VI2')\", \"('VI1', 'VI2', 'X')\", \"('VI4', 'VI1', 'VI2')\", \"('VI1', 'VI2', 'III4')\", \"('VI1', 'VI2', 'VI3')\", \"('X', 'VI1', 'VI2')\", \"('VI1', 'VI2', 'VI3')\"] - [('III3', 'VI1', 'VI2')] vs [('VI1', 'VI2', 'III6')]\n", 315 | "Iter= 46000, Average Loss= 2.557790, Average Accuracy= 51.50%\n", 316 | "[\"('X', 'X', 'X')\", \"('X', 'X', 'I6')\", \"('X', 'X', 'X')\", \"('X', 'X', 'X')\", \"('X', 'X', 'X')\", \"('X', 'X', 'I5')\", \"('X', 'X', 'X')\", \"('X', 'X', 'X')\", \"('X', 'X', 'X')\", \"('X', 'X', 'I4')\"] - [('X', 'X', 'X')] vs [('X', 'X', 'X')]\n", 317 | "Iter= 47000, Average Loss= 2.479185, Average Accuracy= 53.40%\n", 318 | "[\"('III1', 'III2', 'VII4')\", \"('III1', 'X', 'III2')\", \"('III1', 'VII3', 'III2')\", \"('III1', 'III2', 'III4')\", \"('III1', 'VII3', 'III2')\", \"('III1', 'III2', 'X')\", \"('III1', 'VII2', 'III2')\", \"('VI1', 'VI2')\", \"('III3', 'VI1', 'VI2')\", \"('VI1', 'VI2', 'VI3')\"] - [('X', 'VI1', 'VI2')] vs [('I2', 'I3', 'III6')]\n", 319 | "Iter= 48000, Average Loss= 2.321161, Average Accuracy= 55.10%\n", 320 | "[\"('VI1', 'VI2', 'III6')\", \"('IV6', 'VI1', 'VI2')\", \"('VI1', 'VI2', 'III6')\", \"('VI1', 'VI2', 'I7')\", \"('III7', 'VI1', 'VI2')\", \"('VII2', 'VII1', 'IV7')\", \"('VII2', 'VII1', 'II7')\", \"('VII2', 'VII1', 'VII6')\", \"('VII2', 'VII1', 'II6')\", \"('VII2', 'VII1', 'IV6')\"] - [('VII2', 'VII1', 'II6')] vs [('I6', 'VI1', 'VI2')]\n" 321 | ] 322 | }, 323 | { 324 | "name": "stdout", 325 | "output_type": "stream", 326 | "text": [ 327 | "Iter= 49000, Average Loss= 2.186606, Average Accuracy= 58.10%\n", 328 | "[\"('III7', 'VI1', 'VI2')\", \"('VI1', 'VI2', 'I7')\", \"('X', 'VI1', 'VI2')\", \"('I6', 'VI1', 'VI2')\", \"('VI1', 'VI2', 'III6')\", \"('I6', 'VI1', 'VI2')\", \"('VI1', 'VI2', 'X')\", \"('VI1', 'VI2', 'I5')\", \"('III5', 'VI1', 'VI2')\", \"('VI1', 'VI2', 'I5')\"] - [('X', 'VI1', 'VI2')] vs [('X', 'VI1', 'VI2')]\n", 329 | "Iter= 50000, Average Loss= 2.059569, Average Accuracy= 59.10%\n", 330 | "[\"('VII4', 'V2', 'V1')\", \"('X', 'X', 'II5')\", \"('X', 'X', 'VII4')\", \"('X', 'X', 'V5')\", \"('X', 'X', 'VII5')\", \"('III1', 'III2', 'II6')\", \"('III1', 'III2', 'VII5')\", \"('III1', 'V6', 'III2')\", \"('III1', 'VI6', 'III2')\", \"('II2', 'II1', 'II7')\"] - [('II2', 'VI6', 'II1')] vs [('II2', 'VI6', 'II1')]\n", 331 | "Optimization Finished!\n", 332 | "Elapsed time: 1.31881250415 hr\n", 333 | "Run on command line.\n", 334 | "\ttensorboard --logdir=../tmp\n", 335 | "Point your web browser to: http://localhost:6006/\n", 336 | "10 words: [\"('V2', 'II4', 'V1')\", \"('V2', 'V3', 'V1')\", \"('VII3', 'V2', 'V1')\", \"('IV3', 'V2', 'V1')\", \"('II3', 'V2', 'V1')\", \"('V2', 'V1')\", \"('I2', 'I1')\", \"('I2', 'I1', 'V2')\", \"('I2', 'III3', 'I1')\", \"('I2', 'I1', 'V3')\"]\n", 337 | "[\"('V2', 'II4', 'V1')\", \"('V2', 'V3', 'V1')\", \"('VII3', 'V2', 'V1')\", \"('IV3', 'V2', 'V1')\", \"('II3', 'V2', 'V1')\", \"('V2', 'V1')\", \"('I2', 'I1')\", \"('I2', 'I1', 'V2')\", \"('I2', 'III3', 'I1')\", \"('I2', 'I1', 'V3')\"]\n", 338 | "('I2', 'I1', 'I4')\n", 339 | "('I2', 'I1', 'V3')\n", 340 | "('I2', 'I1', 'III4')\n", 341 | "('I2', 'V4', 'I1')\n", 342 | "('I2', 'I1', 'I5')\n", 343 | "('I2', 'V4', 'I1')\n", 344 | "('I2', 'I1', 'III5')\n", 345 | "('I2', 'I1', 'V5')\n", 346 | "('I2', 'I1', 'I6')\n", 347 | "('I2', 'I1', 'V5')\n", 348 | "('I2', 'I1', 'III6')\n", 349 | "('I2', 'V6', 'I1')\n", 350 | "('I2', 'I1', 'I7')\n", 351 | "('I2', 'V4', 'I1')\n", 352 | "('V2', 'X', 'V1')\n", 353 | "('X', 'X', 'X')\n", 354 | "('IV2', 'IV1', 'I5')\n", 355 | "('IV2', 'IV1', 'III5')\n", 356 | "('IV2', 'IV1', 'I4')\n", 357 | "('IV2', 'IV1', 'III6')\n", 358 | "('IV2', 'IV1', 'I6')\n", 359 | "('X', 'X', 'X')\n", 360 | "('IV2', 'IV1', 'I5')\n", 361 | "('II3', 'V2', 'V1')\n", 362 | "('III3', 'III2', 'VII4')\n", 363 | "('X', 'X', 'VII4')\n", 364 | "('V4', 'V2', 'V1')\n", 365 | "('II3', 'V2', 'V1')\n", 366 | "('III3', 'III2', 'VII4')\n", 367 | "('I6', 'VI1', 'VI2')\n", 368 | "('I2', 'I3')\n", 369 | "('I2', 'I3')\n", 370 | "('I2', 'I1', 'V3')\n", 371 | "('I2', 'I1', 'I4')\n", 372 | "('I2', 'I1', 'III5')\n", 373 | "('I2', 'I1', 'VII4')\n", 374 | "('I2', 'V4', 'I1')\n", 375 | "('I2', 'I1', 'I4')\n", 376 | "('I2', 'V4', 'I1')\n", 377 | "('I2', 'I4')\n", 378 | "('I2', 'I3', 'I6')\n", 379 | "('I2', 'I3', 'I6')\n", 380 | "('I2', 'I3', 'V5')\n", 381 | "('I2', 'I3', 'X')\n", 382 | "('I2', 'I3', 'III6')\n", 383 | "('I2', 'I3', 'I4')\n", 384 | "('I2', 'I3', 'V4')\n", 385 | "('I2', 'I3', 'X')\n", 386 | "('I2', 'I3', 'III6')\n", 387 | "('I2', 'I3', 'I6')\n", 388 | "('I2', 'I3', 'V5')\n", 389 | "('I2', 'I3', 'X')\n", 390 | "('I2', 'I3', 'III6')\n", 391 | "('I2', 'I3', 'I6')\n", 392 | "('I2', 'I3', 'V4')\n", 393 | "('I2', 'I3', 'X')\n", 394 | "('I2', 'I3', 'III6')\n", 395 | "('I2', 'I3', 'I6')\n", 396 | "('I2', 'I3', 'V4')\n", 397 | "('I2', 'I3', 'X')\n", 398 | "('I2', 'I3', 'III6')\n", 399 | "('I2', 'I3', 'I6')\n", 400 | "('I2', 'I3', 'V4')\n", 401 | "('I2', 'I3', 'X')\n", 402 | "('I2', 'I3', 'III6')\n", 403 | "('I2', 'I3', 'I6')\n", 404 | "('I2', 'I3', 'V4')\n", 405 | "('I2', 'I3', 'X')\n", 406 | "('I2', 'I3', 'III6')\n", 407 | "('I2', 'I3', 'I6')\n", 408 | "('I2', 'I3', 'V4')\n", 409 | "('I2', 'I3', 'X')\n", 410 | "('I2', 'I3', 'III6')\n", 411 | "('I2', 'I3', 'I6')\n", 412 | "('I2', 'I3', 'V4')\n", 413 | "('I2', 'I3', 'X')\n", 414 | "('I2', 'I3', 'III6')\n", 415 | "('I2', 'I3', 'I6')\n", 416 | "('I2', 'I3', 'V4')\n", 417 | "('I2', 'I3', 'X')\n", 418 | "('I2', 'I3', 'III6')\n", 419 | "('I2', 'I3', 'I6')\n", 420 | "('I2', 'I3', 'V4')\n", 421 | "('I2', 'I3', 'X')\n", 422 | "('I2', 'I3', 'III6')\n", 423 | "('I2', 'I3', 'I6')\n", 424 | "('I2', 'I3', 'V4')\n", 425 | "('I2', 'I3', 'X')\n", 426 | "('I2', 'I3', 'III6')\n", 427 | "('I2', 'I3', 'I6')\n", 428 | "('I2', 'I3', 'V4')\n", 429 | "('I2', 'I3', 'X')\n", 430 | "('I2', 'I3', 'III6')\n", 431 | "('I2', 'I3', 'I6')\n", 432 | "('I2', 'I3', 'V4')\n", 433 | "('I2', 'I3', 'X')\n", 434 | "('I2', 'I3', 'III6')\n", 435 | "('I2', 'I3', 'I6')\n", 436 | "('I2', 'I3', 'V4')\n", 437 | "('I2', 'I3', 'X')\n", 438 | "10 words: \"('VI1', 'VI2', 'III6')\", \"('IV6', 'VI1', 'VI2')\", \"('VI1', 'VI2', 'III6')\", \"('VI1', 'VI2', 'I7')\", \"('III7', 'VI1', 'VI2')\", \"('VII2', 'VII1', 'IV7')\", \"('VII2', 'VII1', 'II7')\", \"('VII2', 'VII1', 'VII6')\", \"('VII2', 'VII1', 'II6')\", \"('VII2', 'VII1', 'IV6')\"\n", 439 | "(\"('VI1', 'VI2', 'III6')\", \"('IV6', 'VI1', 'VI2')\", \"('VI1', 'VI2', 'III6')\", \"('VI1', 'VI2', 'I7')\", \"('III7', 'VI1', 'VI2')\", \"('VII2', 'VII1', 'IV7')\", \"('VII2', 'VII1', 'II7')\", \"('VII2', 'VII1', 'VII6')\", \"('VII2', 'VII1', 'II6')\", \"('VII2', 'VII1', 'IV6')\")\n", 440 | "('V4', 'V2', 'V1')\n", 441 | "('VII2', 'VII1', 'VI3')\n", 442 | "('I6', 'VI1', 'VI2')\n", 443 | "('V2', 'X', 'V1')\n", 444 | "('I6', 'VI1', 'VI2')\n", 445 | "('V2', 'V3', 'V1')\n", 446 | "('VI1', 'VI2', 'I5')\n", 447 | "('V2', 'II4', 'V1')\n", 448 | "('V2', 'I4', 'V1')\n", 449 | "('V2', 'IV4', 'V1')\n", 450 | "('I5', 'V2', 'V1')\n", 451 | "('II5', 'V2', 'V1')\n", 452 | "('I6', 'VI1', 'VI2')\n", 453 | "('IV6', 'VI1', 'VI2')\n", 454 | "('I6', 'V2', 'V1')\n", 455 | "('II6', 'V2', 'V1')\n", 456 | "('VI1', 'VI2', 'I5')\n", 457 | "('V2', 'II4', 'V1')\n", 458 | "('V2', 'I4', 'V1')\n", 459 | "('V6', 'V2', 'V1')\n", 460 | "('X', 'V2', 'V1')\n", 461 | "('II5', 'V2', 'V1')\n", 462 | "('I5', 'V2', 'V1')\n", 463 | "('IV6', 'VI1', 'VI2')\n", 464 | "('I6', 'VI1', 'VI2')\n", 465 | "('II6', 'V2', 'V1')\n", 466 | "('I6', 'V2', 'V1')\n", 467 | "('III7', 'VI1', 'VI2')\n", 468 | "('VI1', 'VI2', 'I5')\n", 469 | "('V2', 'II4', 'V1')\n", 470 | "('V2', 'I4', 'V1')\n", 471 | "('V6', 'V2', 'V1')\n", 472 | "('V2', 'VII5', 'V1')\n", 473 | "('II6', 'V2', 'V1')\n", 474 | "('I5', 'V2', 'V1')\n", 475 | "('IV5', 'V2', 'V1')\n", 476 | "('X', 'V2', 'V1')\n", 477 | "('II6', 'V2', 'V1')\n", 478 | "('I6', 'V2', 'V1')\n", 479 | "('V5', 'V2', 'V1')\n", 480 | "('X', 'V2', 'V1')\n", 481 | "('V2', 'II7', 'V1')\n", 482 | "('VI1', 'VI2', 'I5')\n", 483 | "('II6', 'V2', 'V1')\n", 484 | "('V2', 'VII5', 'V1')\n", 485 | "('II6', 'V2', 'V1')\n", 486 | "('V5', 'V2', 'V1')\n", 487 | "('IV5', 'V2', 'V1')\n", 488 | "('VII4', 'V2', 'V1')\n", 489 | "('V2', 'II4', 'V1')\n", 490 | "('VII2', 'V2', 'V1')\n", 491 | "('IV3', 'V2', 'V1')\n", 492 | "('VII2', 'V2', 'V1')\n", 493 | "('I6', 'VI1', 'VI2')\n", 494 | "('V6', 'VI1', 'VI2')\n", 495 | "('VI1', 'VI2', 'I5')\n", 496 | "('V2', 'V3', 'V1')\n", 497 | "('VI1', 'VI2', 'I5')\n", 498 | "('V4', 'V2', 'V1')\n", 499 | "('I5', 'V2', 'V1')\n", 500 | "('IV6', 'VI1', 'VI2')\n", 501 | "('I4', 'VI1', 'VI2')\n", 502 | "('V2', 'II4', 'V1')\n", 503 | "('I6', 'V2', 'V1')\n", 504 | "('V2', 'II4', 'V1')\n", 505 | "('VI1', 'VI2', 'I5')\n", 506 | "('V2', 'II4', 'V1')\n", 507 | "('V2', 'I4', 'V1')\n", 508 | "('V4', 'V2', 'V1')\n", 509 | "('V2', 'VII5', 'V1')\n", 510 | "('VI1', 'VI2', 'IV5')\n", 511 | "('I6', 'VI1', 'VI2')\n", 512 | "('IV6', 'VI1', 'VI2')\n", 513 | "('I6', 'VI1', 'VI2')\n", 514 | "('V2', 'V3', 'V1')\n", 515 | "('VI1', 'VI2', 'I5')\n", 516 | "('V2', 'II4', 'V1')\n", 517 | "('VI1', 'VI2', 'I5')\n", 518 | "('VI1', 'VI2', 'IV5')\n", 519 | "('I6', 'VI1', 'VI2')\n", 520 | "('IV6', 'VI1', 'VI2')\n", 521 | "('I6', 'VI1', 'VI2')\n", 522 | "('V2', 'II4', 'V1')\n", 523 | "('I6', 'V2', 'V1')\n", 524 | "('V2', 'II4', 'V1')\n", 525 | "('VI1', 'VI2', 'I5')\n", 526 | "('V2', 'II4', 'V1')\n", 527 | "('V2', 'I4', 'V1')\n", 528 | "('V4', 'V2', 'V1')\n", 529 | "('V2', 'VII5', 'V1')\n", 530 | "('VI1', 'VI2', 'IV5')\n", 531 | "('I6', 'VI1', 'VI2')\n", 532 | "('IV6', 'VI1', 'VI2')\n", 533 | "('I6', 'VI1', 'VI2')\n", 534 | "('V2', 'V3', 'V1')\n", 535 | "('VI1', 'VI2', 'I5')\n", 536 | "('V2', 'II4', 'V1')\n", 537 | "('VI1', 'VI2', 'I5')\n", 538 | "('VI1', 'VI2', 'IV5')\n", 539 | "('I6', 'VI1', 'VI2')\n" 540 | ] 541 | }, 542 | { 543 | "ename": "KeyboardInterrupt", 544 | "evalue": "", 545 | "output_type": "error", 546 | "traceback": [ 547 | "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", 548 | "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", 549 | "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 42\u001b[0m \u001b[0;32mwhile\u001b[0m \u001b[0mTrue\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 43\u001b[0m \u001b[0mprompt\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m\"%s words: \"\u001b[0m \u001b[0;34m%\u001b[0m \u001b[0mn_input\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 44\u001b[0;31m \u001b[0mwords\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0minput\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mprompt\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 45\u001b[0m \u001b[0;32mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mwords\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 46\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mwords\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m!=\u001b[0m \u001b[0mn_input\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", 550 | "\u001b[0;32m/usr/local/Cellar/ipython@5/5.6.0/libexec/vendor/lib/python2.7/site-packages/ipykernel/ipkernel.pyc\u001b[0m in \u001b[0;36m\u001b[0;34m(prompt)\u001b[0m\n\u001b[1;32m 174\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_sys_eval_input\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mbuiltin_mod\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minput\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 175\u001b[0m \u001b[0mbuiltin_mod\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mraw_input\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mraw_input\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 176\u001b[0;31m \u001b[0mbuiltin_mod\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minput\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mlambda\u001b[0m \u001b[0mprompt\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m''\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0meval\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mraw_input\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mprompt\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 177\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_save_getpass\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mgetpass\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgetpass\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 178\u001b[0m \u001b[0mgetpass\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgetpass\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgetpass\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", 551 | "\u001b[0;32m/usr/local/Cellar/ipython@5/5.6.0/libexec/vendor/lib/python2.7/site-packages/ipykernel/kernelbase.pyc\u001b[0m in \u001b[0;36mraw_input\u001b[0;34m(self, prompt)\u001b[0m\n\u001b[1;32m 702\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_parent_ident\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 703\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_parent_header\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 704\u001b[0;31m \u001b[0mpassword\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mFalse\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 705\u001b[0m )\n\u001b[1;32m 706\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", 552 | "\u001b[0;32m/usr/local/Cellar/ipython@5/5.6.0/libexec/vendor/lib/python2.7/site-packages/ipykernel/kernelbase.pyc\u001b[0m in \u001b[0;36m_input_request\u001b[0;34m(self, prompt, ident, parent, password)\u001b[0m\n\u001b[1;32m 732\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mKeyboardInterrupt\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 733\u001b[0m \u001b[0;31m# re-raise KeyboardInterrupt, to truncate traceback\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 734\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mKeyboardInterrupt\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 735\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 736\u001b[0m \u001b[0;32mbreak\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", 553 | "\u001b[0;31mKeyboardInterrupt\u001b[0m: " 554 | ] 555 | } 556 | ], 557 | "source": [ 558 | "# Launch the graph\n", 559 | "with tf.Session() as session:\n", 560 | " session.run(init)\n", 561 | " step = 0\n", 562 | " offset = random.randint(0,n_input+1)\n", 563 | " end_offset = n_input + 1\n", 564 | " acc_total = 0\n", 565 | " loss_total = 0\n", 566 | "\n", 567 | " writer.add_graph(session.graph)\n", 568 | "\n", 569 | " while step < training_iters:\n", 570 | " # Generate a minibatch. Add some randomness on selection process.\n", 571 | " if offset > (len(training_data)-end_offset):\n", 572 | " offset = random.randint(0, n_input+1)\n", 573 | "\n", 574 | " symbols_in_keys = [ [dictionary[ str(training_data[i])]] for i in range(offset, offset+n_input) ]\n", 575 | " symbols_in_keys = np.reshape(np.array(symbols_in_keys), [-1, n_input, 1])\n", 576 | "\n", 577 | " symbols_out_onehot = np.zeros([vocab_size], dtype=float)\n", 578 | " symbols_out_onehot[dictionary[str(training_data[offset+n_input])]] = 1.0\n", 579 | " symbols_out_onehot = np.reshape(symbols_out_onehot,[1,-1])\n", 580 | "\n", 581 | " _, acc, loss, onehot_pred = session.run([optimizer, accuracy, cost, pred], \\\n", 582 | " feed_dict={x: symbols_in_keys, y: symbols_out_onehot})\n", 583 | " loss_total += loss\n", 584 | " acc_total += acc\n", 585 | " if (step+1) % display_step == 0:\n", 586 | " print(\"Iter= \" + str(step+1) + \", Average Loss= \" + \\\n", 587 | " \"{:.6f}\".format(loss_total/display_step) + \", Average Accuracy= \" + \\\n", 588 | " \"{:.2f}%\".format(100*acc_total/display_step))\n", 589 | " acc_total = 0\n", 590 | " loss_total = 0\n", 591 | " symbols_in = [training_data[i] for i in range(offset, offset + n_input)]\n", 592 | " symbols_out = training_data[offset + n_input]\n", 593 | " symbols_out_pred = reverse_dictionary[int(tf.argmax(onehot_pred, 1).eval())]\n", 594 | " print(\"%s - [%s] vs [%s]\" % (symbols_in,symbols_out,symbols_out_pred))\n", 595 | " step += 1\n", 596 | " offset += (n_input+1)\n", 597 | " print(\"Optimization Finished!\")\n", 598 | " print(\"Elapsed time: \", elapsed(time.time() - start_time))\n", 599 | " print(\"Run on command line.\")\n", 600 | " print(\"\\ttensorboard --logdir=%s\" % (logs_path))\n", 601 | " print(\"Point your web browser to: http://localhost:6006/\")\n", 602 | " while True:\n", 603 | " prompt = \"%s words: \" % n_input\n", 604 | " words = input(prompt)\n", 605 | " print(words)\n", 606 | " if len(words) != n_input:\n", 607 | " continue\n", 608 | " try:\n", 609 | " symbols_in_keys = [dictionary[str(words[i])] for i in range(len(words))]\n", 610 | " for i in range(100):\n", 611 | " keys = np.reshape(np.array(symbols_in_keys), [-1, n_input, 1])\n", 612 | " onehot_pred = session.run(pred, feed_dict={x: keys})\n", 613 | " onehot_pred_index = int(tf.argmax(onehot_pred, 1).eval())\n", 614 | " sentence = reverse_dictionary[onehot_pred_index]\n", 615 | " symbols_in_keys = symbols_in_keys[1:]\n", 616 | " symbols_in_keys.append(onehot_pred_index)\n", 617 | " print(sentence)\n", 618 | " except:\n", 619 | " print(\"Word not in dictionary\")\n", 620 | " tf.saved_model.simple_save(\n", 621 | " session, '../tmp', symbols_in_keys, symbols_out_onehot\n", 622 | " )" 623 | ] 624 | }, 625 | { 626 | "cell_type": "code", 627 | "execution_count": 31, 628 | "metadata": {}, 629 | "outputs": [ 630 | { 631 | "data": { 632 | "text/plain": [ 633 | "[,\n", 634 | " ]" 635 | ] 636 | }, 637 | "execution_count": 31, 638 | "metadata": {}, 639 | "output_type": "execute_result" 640 | } 641 | ], 642 | "source": [ 643 | "[pred, cost]" 644 | ] 645 | }, 646 | { 647 | "cell_type": "code", 648 | "execution_count": null, 649 | "metadata": {}, 650 | "outputs": [], 651 | "source": [ 652 | "tf.saved_model.simple_save(session,'../tmp/model.ckpt',inputs=dictionary,outputs=dictionary)" 653 | ] 654 | }, 655 | { 656 | "cell_type": "code", 657 | "execution_count": null, 658 | "metadata": {}, 659 | "outputs": [], 660 | "source": [ 661 | "saver = tf.train.Saver()\n", 662 | "saver.save(session, \"/tmp/model.ckpt\")" 663 | ] 664 | }, 665 | { 666 | "cell_type": "code", 667 | "execution_count": null, 668 | "metadata": { 669 | "collapsed": true 670 | }, 671 | "outputs": [], 672 | "source": [] 673 | } 674 | ], 675 | "metadata": { 676 | "kernelspec": { 677 | "display_name": "Python 2", 678 | "language": "python", 679 | "name": "python2" 680 | }, 681 | "language_info": { 682 | "codemirror_mode": { 683 | "name": "ipython", 684 | "version": 2 685 | }, 686 | "file_extension": ".py", 687 | "mimetype": "text/x-python", 688 | "name": "python", 689 | "nbconvert_exporter": "python", 690 | "pygments_lexer": "ipython2", 691 | "version": "2.7.15" 692 | } 693 | }, 694 | "nbformat": 4, 695 | "nbformat_minor": 2 696 | } 697 | --------------------------------------------------------------------------------