├── Samples ├── README.md ├── Chords-Progressions-Transformer-Composition.mid ├── Chords-Progressions-Transformer-Composition (1).mid ├── Chords-Progressions-Transformer-Composition (2).mid ├── Chords-Progressions-Transformer-Composition (3).mid ├── Chords-Progressions-Transformer-Composition (4).mid └── Chords-Progressions-Transformer-Composition (5).mid ├── Models ├── Aux │ ├── losses_accuracies.pickle │ ├── training_acc_graph.png │ ├── training_loss_graph.png │ ├── validation_acc_graph.png │ ├── README.md │ └── validation_loss_graph.png ├── Small │ ├── training_acc_graph.png │ ├── losses_accuracies.pickle │ ├── training_loss_graph.png │ ├── validation_acc_graph.png │ ├── validation_loss_graph.png │ ├── Chords-Progressions-Transformer-Tokens-Embeddings-Plot.png │ └── README.md ├── Melody │ ├── README.md │ ├── losses_accuracies.pickle │ └── Chords-Progressions-Transformer-Melody-Tokens-Embeddings-Plot.png ├── Small_2048 │ ├── training_acc_graph.png │ ├── losses_accuracies.pickle │ ├── training_loss_graph.png │ ├── validation_acc_graph.png │ ├── validation_loss_graph.png │ ├── Chords-Progressions-Transformer-Tokens-Embeddings-Plot.png │ └── README.md ├── Small_2048_FP32 │ ├── training_acc_graph.png │ ├── losses_accuracies.pickle │ ├── training_loss_graph.png │ ├── validation_acc_graph.png │ ├── validation_loss_graph.png │ ├── Chords-Progressions-Transformer-Tokens-Embeddings-Plot.png │ └── README.md ├── README.md └── Medium │ └── README.md ├── Seeds ├── README.md ├── Chords-Progressions-Transformer-MI-Seed-1.mid ├── Chords-Progressions-Transformer-MI-Seed-2.mid ├── Chords-Progressions-Transformer-MI-Seed-3.mid ├── Chords-Progressions-Transformer-MI-Seed-4.mid ├── Chords-Progressions-Transformer-MI-Seed-5.mid ├── Chords-Progressions-Transformer-MI-Seed-6.mid ├── Chords-Progressions-Transformer-Piano-Seed-1.mid ├── Chords-Progressions-Transformer-Piano-Seed-2.mid ├── Chords-Progressions-Transformer-Piano-Seed-3.mid ├── Chords-Progressions-Transformer-Piano-Seed-4.mid ├── Chords-Progressions-Transformer-Piano-Seed-5.mid └── Chords-Progressions-Transformer-Piano-Seed-6.mid ├── Artwork ├── Chords-Progressions-Transformer (1).jpg ├── Chords-Progressions-Transformer (2).jpg ├── Chords-Progressions-Transformer (3).jpg ├── Chords-Progressions-Transformer (4).jpg ├── Chords-Progressions-Transformer (5).jpg ├── Chords-Progressions-Transformer (6).jpg ├── Chords-Progressions-Transformer (7).jpg ├── Chords-Progressions-Transformer (8).jpg ├── Chords-Progressions-Transformer (9).jpg ├── Chords-Progressions-Transformer (10).jpg ├── Chords-Progressions-Transformer (11).jpg ├── Chords-Progressions-Transformer (12).jpg ├── Chords-Progressions-Transformer (13).jpg ├── Chords-Progressions-Transformer (14).jpg ├── Chords-Progressions-Transformer (15).jpg ├── Chords-Progressions-Transformer (16).jpg ├── Chords-Progressions-Transformer (17).jpg ├── Chords-Progressions-Transformer (18).jpg ├── Chords-Progressions-Transformer (19).jpg ├── Chords-Progressions-Transformer (20).jpg ├── Chords-Progressions-Transformer (21).jpg ├── Chords-Progressions-Transformer (22).jpg ├── Chords-Progressions-Transformer (23).jpg ├── Chords-Progressions-Transformer (24).jpg ├── Chords-Progressions-Transformer (25).jpg ├── Chords-Progressions-Transformer (26).jpg ├── Chords-Progressions-Transformer (27).jpg ├── Chords-Progressions-Transformer (28).jpg ├── Chords-Progressions-Transformer (29).jpg ├── Chords-Progressions-Transformer (30).jpg ├── Chords-Progressions-Transformer (31).jpg ├── Chords-Progressions-Transformer (32).jpg ├── Chords-Progressions-Transformer (33).jpg ├── Chords-Progressions-Transformer (34).jpg └── README.md ├── Training-Data ├── README.md ├── chords_progressions_transformer_augmented_training_dataset_maker.py ├── chords_progressions_transformer_training_dataset_maker.py ├── Chords_Progressions_Transformer_Augmented_Training_Dataset_Maker.ipynb └── Chords_Progressions_Transformer_Training_Dataset_Maker.ipynb ├── Training-Code ├── README.md ├── chords_progressions_transformer_maker.py ├── chords_progressions_transformer_aux_maker.py └── chords_progressions_transformer_melody_maker.py ├── README.md ├── LICENSE ├── chords_progressions_transformer_aux.py ├── chords_progressions_transformer_melody.py └── Chords_Progressions_Transformer_Aux.ipynb /Samples/README.md: -------------------------------------------------------------------------------- 1 | # Chords Progressions Transformer Output Samples 2 | 3 | *** 4 | 5 | ### Project Los Angeles 6 | ### Tegridy Code 2024 7 | -------------------------------------------------------------------------------- /Models/Aux/losses_accuracies.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Models/Aux/losses_accuracies.pickle -------------------------------------------------------------------------------- /Models/Aux/training_acc_graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Models/Aux/training_acc_graph.png -------------------------------------------------------------------------------- /Models/Aux/training_loss_graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Models/Aux/training_loss_graph.png -------------------------------------------------------------------------------- /Models/Aux/validation_acc_graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Models/Aux/validation_acc_graph.png -------------------------------------------------------------------------------- /Models/Small/training_acc_graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Models/Small/training_acc_graph.png -------------------------------------------------------------------------------- /Seeds/README.md: -------------------------------------------------------------------------------- 1 | # Chords Progressions Transformer Sample Seeed MIDIs 2 | 3 | *** 4 | 5 | ### Project Los Angeles 6 | ### Tegridy Code 2024 7 | -------------------------------------------------------------------------------- /Models/Aux/README.md: -------------------------------------------------------------------------------- 1 | # Chords Progressions Transformer Aux Pre-Trained Model 2 | 3 | *** 4 | 5 | ### Project Los Angeles 6 | ### Tegridy Code 2024 7 | -------------------------------------------------------------------------------- /Models/Aux/validation_loss_graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Models/Aux/validation_loss_graph.png -------------------------------------------------------------------------------- /Models/Small/losses_accuracies.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Models/Small/losses_accuracies.pickle -------------------------------------------------------------------------------- /Models/Small/training_loss_graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Models/Small/training_loss_graph.png -------------------------------------------------------------------------------- /Models/Small/validation_acc_graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Models/Small/validation_acc_graph.png -------------------------------------------------------------------------------- /Models/Melody/README.md: -------------------------------------------------------------------------------- 1 | # Chords Progressions Transformer Melody Pre-Trained Model 2 | 3 | *** 4 | 5 | ### Project Los Angeles 6 | ### Tegridy Code 2024 7 | -------------------------------------------------------------------------------- /Models/Melody/losses_accuracies.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Models/Melody/losses_accuracies.pickle -------------------------------------------------------------------------------- /Models/Small/validation_loss_graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Models/Small/validation_loss_graph.png -------------------------------------------------------------------------------- /Models/Small_2048/training_acc_graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Models/Small_2048/training_acc_graph.png -------------------------------------------------------------------------------- /Models/Small_2048/losses_accuracies.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Models/Small_2048/losses_accuracies.pickle -------------------------------------------------------------------------------- /Models/Small_2048/training_loss_graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Models/Small_2048/training_loss_graph.png -------------------------------------------------------------------------------- /Models/Small_2048/validation_acc_graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Models/Small_2048/validation_acc_graph.png -------------------------------------------------------------------------------- /Models/Small_2048/validation_loss_graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Models/Small_2048/validation_loss_graph.png -------------------------------------------------------------------------------- /Models/Small_2048_FP32/training_acc_graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Models/Small_2048_FP32/training_acc_graph.png -------------------------------------------------------------------------------- /Artwork/Chords-Progressions-Transformer (1).jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Artwork/Chords-Progressions-Transformer (1).jpg -------------------------------------------------------------------------------- /Artwork/Chords-Progressions-Transformer (2).jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Artwork/Chords-Progressions-Transformer (2).jpg -------------------------------------------------------------------------------- /Artwork/Chords-Progressions-Transformer (3).jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Artwork/Chords-Progressions-Transformer (3).jpg -------------------------------------------------------------------------------- /Artwork/Chords-Progressions-Transformer (4).jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Artwork/Chords-Progressions-Transformer (4).jpg -------------------------------------------------------------------------------- /Artwork/Chords-Progressions-Transformer (5).jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Artwork/Chords-Progressions-Transformer (5).jpg -------------------------------------------------------------------------------- /Artwork/Chords-Progressions-Transformer (6).jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Artwork/Chords-Progressions-Transformer (6).jpg -------------------------------------------------------------------------------- /Artwork/Chords-Progressions-Transformer (7).jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Artwork/Chords-Progressions-Transformer (7).jpg -------------------------------------------------------------------------------- /Artwork/Chords-Progressions-Transformer (8).jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Artwork/Chords-Progressions-Transformer (8).jpg -------------------------------------------------------------------------------- /Artwork/Chords-Progressions-Transformer (9).jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Artwork/Chords-Progressions-Transformer (9).jpg -------------------------------------------------------------------------------- /Models/Small_2048_FP32/losses_accuracies.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Models/Small_2048_FP32/losses_accuracies.pickle -------------------------------------------------------------------------------- /Models/Small_2048_FP32/training_loss_graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Models/Small_2048_FP32/training_loss_graph.png -------------------------------------------------------------------------------- /Models/Small_2048_FP32/validation_acc_graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Models/Small_2048_FP32/validation_acc_graph.png -------------------------------------------------------------------------------- /Artwork/Chords-Progressions-Transformer (10).jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Artwork/Chords-Progressions-Transformer (10).jpg -------------------------------------------------------------------------------- /Artwork/Chords-Progressions-Transformer (11).jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Artwork/Chords-Progressions-Transformer (11).jpg -------------------------------------------------------------------------------- /Artwork/Chords-Progressions-Transformer (12).jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Artwork/Chords-Progressions-Transformer (12).jpg -------------------------------------------------------------------------------- /Artwork/Chords-Progressions-Transformer (13).jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Artwork/Chords-Progressions-Transformer (13).jpg -------------------------------------------------------------------------------- /Artwork/Chords-Progressions-Transformer (14).jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Artwork/Chords-Progressions-Transformer (14).jpg -------------------------------------------------------------------------------- /Artwork/Chords-Progressions-Transformer (15).jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Artwork/Chords-Progressions-Transformer (15).jpg -------------------------------------------------------------------------------- /Artwork/Chords-Progressions-Transformer (16).jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Artwork/Chords-Progressions-Transformer (16).jpg -------------------------------------------------------------------------------- /Artwork/Chords-Progressions-Transformer (17).jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Artwork/Chords-Progressions-Transformer (17).jpg -------------------------------------------------------------------------------- /Artwork/Chords-Progressions-Transformer (18).jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Artwork/Chords-Progressions-Transformer (18).jpg -------------------------------------------------------------------------------- /Artwork/Chords-Progressions-Transformer (19).jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Artwork/Chords-Progressions-Transformer (19).jpg -------------------------------------------------------------------------------- /Artwork/Chords-Progressions-Transformer (20).jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Artwork/Chords-Progressions-Transformer (20).jpg -------------------------------------------------------------------------------- /Artwork/Chords-Progressions-Transformer (21).jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Artwork/Chords-Progressions-Transformer (21).jpg -------------------------------------------------------------------------------- /Artwork/Chords-Progressions-Transformer (22).jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Artwork/Chords-Progressions-Transformer (22).jpg -------------------------------------------------------------------------------- /Artwork/Chords-Progressions-Transformer (23).jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Artwork/Chords-Progressions-Transformer (23).jpg -------------------------------------------------------------------------------- /Artwork/Chords-Progressions-Transformer (24).jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Artwork/Chords-Progressions-Transformer (24).jpg -------------------------------------------------------------------------------- /Artwork/Chords-Progressions-Transformer (25).jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Artwork/Chords-Progressions-Transformer (25).jpg -------------------------------------------------------------------------------- /Artwork/Chords-Progressions-Transformer (26).jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Artwork/Chords-Progressions-Transformer (26).jpg -------------------------------------------------------------------------------- /Artwork/Chords-Progressions-Transformer (27).jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Artwork/Chords-Progressions-Transformer (27).jpg -------------------------------------------------------------------------------- /Artwork/Chords-Progressions-Transformer (28).jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Artwork/Chords-Progressions-Transformer (28).jpg -------------------------------------------------------------------------------- /Artwork/Chords-Progressions-Transformer (29).jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Artwork/Chords-Progressions-Transformer (29).jpg -------------------------------------------------------------------------------- /Artwork/Chords-Progressions-Transformer (30).jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Artwork/Chords-Progressions-Transformer (30).jpg -------------------------------------------------------------------------------- /Artwork/Chords-Progressions-Transformer (31).jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Artwork/Chords-Progressions-Transformer (31).jpg -------------------------------------------------------------------------------- /Artwork/Chords-Progressions-Transformer (32).jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Artwork/Chords-Progressions-Transformer (32).jpg -------------------------------------------------------------------------------- /Artwork/Chords-Progressions-Transformer (33).jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Artwork/Chords-Progressions-Transformer (33).jpg -------------------------------------------------------------------------------- /Artwork/Chords-Progressions-Transformer (34).jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Artwork/Chords-Progressions-Transformer (34).jpg -------------------------------------------------------------------------------- /Models/Small_2048_FP32/validation_loss_graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Models/Small_2048_FP32/validation_loss_graph.png -------------------------------------------------------------------------------- /Seeds/Chords-Progressions-Transformer-MI-Seed-1.mid: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Seeds/Chords-Progressions-Transformer-MI-Seed-1.mid -------------------------------------------------------------------------------- /Seeds/Chords-Progressions-Transformer-MI-Seed-2.mid: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Seeds/Chords-Progressions-Transformer-MI-Seed-2.mid -------------------------------------------------------------------------------- /Seeds/Chords-Progressions-Transformer-MI-Seed-3.mid: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Seeds/Chords-Progressions-Transformer-MI-Seed-3.mid -------------------------------------------------------------------------------- /Seeds/Chords-Progressions-Transformer-MI-Seed-4.mid: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Seeds/Chords-Progressions-Transformer-MI-Seed-4.mid -------------------------------------------------------------------------------- /Seeds/Chords-Progressions-Transformer-MI-Seed-5.mid: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Seeds/Chords-Progressions-Transformer-MI-Seed-5.mid -------------------------------------------------------------------------------- /Seeds/Chords-Progressions-Transformer-MI-Seed-6.mid: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Seeds/Chords-Progressions-Transformer-MI-Seed-6.mid -------------------------------------------------------------------------------- /Samples/Chords-Progressions-Transformer-Composition.mid: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Samples/Chords-Progressions-Transformer-Composition.mid -------------------------------------------------------------------------------- /Seeds/Chords-Progressions-Transformer-Piano-Seed-1.mid: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Seeds/Chords-Progressions-Transformer-Piano-Seed-1.mid -------------------------------------------------------------------------------- /Seeds/Chords-Progressions-Transformer-Piano-Seed-2.mid: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Seeds/Chords-Progressions-Transformer-Piano-Seed-2.mid -------------------------------------------------------------------------------- /Seeds/Chords-Progressions-Transformer-Piano-Seed-3.mid: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Seeds/Chords-Progressions-Transformer-Piano-Seed-3.mid -------------------------------------------------------------------------------- /Seeds/Chords-Progressions-Transformer-Piano-Seed-4.mid: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Seeds/Chords-Progressions-Transformer-Piano-Seed-4.mid -------------------------------------------------------------------------------- /Seeds/Chords-Progressions-Transformer-Piano-Seed-5.mid: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Seeds/Chords-Progressions-Transformer-Piano-Seed-5.mid -------------------------------------------------------------------------------- /Seeds/Chords-Progressions-Transformer-Piano-Seed-6.mid: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Seeds/Chords-Progressions-Transformer-Piano-Seed-6.mid -------------------------------------------------------------------------------- /Samples/Chords-Progressions-Transformer-Composition (1).mid: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Samples/Chords-Progressions-Transformer-Composition (1).mid -------------------------------------------------------------------------------- /Samples/Chords-Progressions-Transformer-Composition (2).mid: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Samples/Chords-Progressions-Transformer-Composition (2).mid -------------------------------------------------------------------------------- /Samples/Chords-Progressions-Transformer-Composition (3).mid: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Samples/Chords-Progressions-Transformer-Composition (3).mid -------------------------------------------------------------------------------- /Samples/Chords-Progressions-Transformer-Composition (4).mid: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Samples/Chords-Progressions-Transformer-Composition (4).mid -------------------------------------------------------------------------------- /Samples/Chords-Progressions-Transformer-Composition (5).mid: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Samples/Chords-Progressions-Transformer-Composition (5).mid -------------------------------------------------------------------------------- /Artwork/README.md: -------------------------------------------------------------------------------- 1 | # Chords Progressions Transformer Concept Artwork 2 | 3 | ## Images are a courtesy of Stable Diffusion XL 4 | 5 | *** 6 | 7 | ### Project Los Angeles 8 | ### Tegridy Code 2024 9 | -------------------------------------------------------------------------------- /Models/Small/Chords-Progressions-Transformer-Tokens-Embeddings-Plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Models/Small/Chords-Progressions-Transformer-Tokens-Embeddings-Plot.png -------------------------------------------------------------------------------- /Models/Small_2048/Chords-Progressions-Transformer-Tokens-Embeddings-Plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Models/Small_2048/Chords-Progressions-Transformer-Tokens-Embeddings-Plot.png -------------------------------------------------------------------------------- /Models/Melody/Chords-Progressions-Transformer-Melody-Tokens-Embeddings-Plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Models/Melody/Chords-Progressions-Transformer-Melody-Tokens-Embeddings-Plot.png -------------------------------------------------------------------------------- /Models/Small_2048_FP32/Chords-Progressions-Transformer-Tokens-Embeddings-Plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asigalov61/Chords-Progressions-Transformer/main/Models/Small_2048_FP32/Chords-Progressions-Transformer-Tokens-Embeddings-Plot.png -------------------------------------------------------------------------------- /Models/README.md: -------------------------------------------------------------------------------- 1 | # Chords Progressions Transformer Pre-Trained Models 2 | 3 | *** 4 | 5 | ## Models are hosted on [Hugging Face](https://huggingface.co/asigalov61/Chords-Progressions-Transformer) 6 | 7 | *** 8 | 9 | ### Project Los Angeles 10 | ### Tegridy Code 2024 11 | -------------------------------------------------------------------------------- /Models/Medium/README.md: -------------------------------------------------------------------------------- 1 | # Chords Progression Transformer Medium Pre-Trained Model 2 | 3 | *** 4 | 5 | ## Model was trained on augmented [Monster MIDI Dataset Sample Search Results](https://huggingface.co/datasets/projectlosangeles/Monster-MIDI-Dataset/blob/main/Monster_MIDI_Dataset_Search_Results_Ver_1_0_CC_BY_NC_SA.zip) 6 | 7 | *** 8 | 9 | ### Project Los Angeles 10 | ### Tegridy Code 2024 11 | -------------------------------------------------------------------------------- /Models/Small_2048_FP32/README.md: -------------------------------------------------------------------------------- 1 | # Chords Progression Transformer Small (2048 embed, FP32 precision) Pre-Trained Model 2 | 3 | *** 4 | 5 | ## Model was trained on full [Los Angeles MIDI Dataset](https://huggingface.co/datasets/projectlosangeles/Los-Angeles-MIDI-Dataset) 6 | 7 | ## Model was trained for 3.5 hours (1 full epochs) @ 64 batches on eight A100 GPUs 8 | 9 | *** 10 | 11 | ### Project Los Angeles 12 | ### Tegridy Code 2024 13 | -------------------------------------------------------------------------------- /Models/Small/README.md: -------------------------------------------------------------------------------- 1 | # Chords Progression Transformer Small Pre-Trained Model 2 | 3 | *** 4 | 5 | ## Model was trained on [Monster MIDI Dataset Sample Search Results](https://huggingface.co/datasets/projectlosangeles/Monster-MIDI-Dataset/blob/main/Monster_MIDI_Dataset_Search_Results_Ver_1_0_CC_BY_NC_SA.zip) 6 | 7 | ## Model was trained for 3 hours (3 full epochs) @ 28 batches on a single H100 GPU 8 | 9 | *** 10 | 11 | ### Project Los Angeles 12 | ### Tegridy Code 2024 13 | -------------------------------------------------------------------------------- /Models/Small_2048/README.md: -------------------------------------------------------------------------------- 1 | # Chords Progression Transformer Small (2048 embed) Pre-Trained Model 2 | 3 | *** 4 | 5 | ## Model was trained on [Monster MIDI Dataset Sample Search Results](https://huggingface.co/datasets/projectlosangeles/Monster-MIDI-Dataset/blob/main/Monster_MIDI_Dataset_Search_Results_Ver_1_0_CC_BY_NC_SA.zip) 6 | 7 | ## Model was trained for 4 hours (4 full epochs) @ 28 batches on a single H100 GPU 8 | 9 | *** 10 | 11 | ### Project Los Angeles 12 | ### Tegridy Code 2024 13 | -------------------------------------------------------------------------------- /Training-Data/README.md: -------------------------------------------------------------------------------- 1 | # Chords Progressions Transformer Training Dataset Maker 2 | 3 | *** 4 | 5 | ## Original Version 6 | 7 | [![Open In Colab][colab-badge]][colab-notebook1] 8 | 9 | [colab-notebook1]: 10 | [colab-badge]: 11 | 12 | ### This is the version which was used to train original small model 13 | 14 | *** 15 | 16 | ## Augmented Version 17 | 18 | [![Open In Colab][colab-badge]][colab-notebook2] 19 | 20 | [colab-notebook2]: 21 | [colab-badge]: 22 | 23 | ### This is the version which was used to train medium model 24 | 25 | *** 26 | 27 | ## Recommended MIDI datasets: 28 | ### [Monster MIDI Dataset Sample Search Results](https://huggingface.co/datasets/projectlosangeles/Monster-MIDI-Dataset/blob/main/Monster_MIDI_Dataset_Search_Results_Ver_1_0_CC_BY_NC_SA.zip) (included with code/colabs) 29 | 30 | *** 31 | 32 | ### Project Los Angeles 33 | ### Tegridy Code 2024 34 | -------------------------------------------------------------------------------- /Training-Code/README.md: -------------------------------------------------------------------------------- 1 | # Chords Progressions Transformer Training Code 2 | 3 | *** 4 | 5 | ## Original Version 6 | 7 | [![Open In Colab][colab-badge]][colab-notebook1] 8 | 9 | [colab-notebook1]: 10 | [colab-badge]: 11 | 12 | ### This is a version which was used to train all Chords Progressions Transformer models 13 | 14 | *** 15 | 16 | ## Aux Version 17 | 18 | [![Open In Colab][colab-badge]][colab-notebook2] 19 | 20 | [colab-notebook2]: 21 | [colab-badge]: 22 | 23 | ### This is a version which was used to create Chords Progressions Transformer Aux model 24 | 25 | *** 26 | 27 | ## Melody Version 28 | 29 | [![Open In Colab][colab-badge]][colab-notebook3] 30 | 31 | [colab-notebook3]: 32 | [colab-badge]: 33 | 34 | ### This is a version which was used to create Chords Progressions Transformer Melody model 35 | 36 | *** 37 | 38 | ## Recommended DL/ML cloud provider: [Lambda Labs](https://lambdalabs.com/) 39 | 40 | *** 41 | 42 | ### Project Los Angeles 43 | ### Tegridy Code 2024 44 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Chords Progressions Transformer 2 | ## Chords-conditioned music transformer for chords progressions generation 3 | 4 | ![Chords-Progressions-Transformer](https://github.com/asigalov61/Chords-Progressions-Transformer/assets/56325539/ac182d42-7894-4d7a-9bd5-3f521bf983d4) 5 | 6 | *** 7 | 8 | ## Original Version 9 | 10 | [![Open In Colab][colab-badge]][colab-notebook1] 11 | 12 | [colab-notebook1]: 13 | [colab-badge]: 14 | 15 | ### Improvs, continuations and custom chords progressions generation 16 | 17 | *** 18 | 19 | ## Dual Models Version 20 | 21 | [![Open In Colab][colab-badge]][colab-notebook2] 22 | 23 | [colab-notebook2]: 24 | [colab-badge]: 25 | 26 | ### Dual model version for automatic unique chords progressions generation 27 | 28 | *** 29 | 30 | ## Melody Version 31 | 32 | [![Open In Colab][colab-badge]][colab-notebook3] 33 | 34 | [colab-notebook3]: 35 | [colab-badge]: 36 | 37 | ### Generate unique melody for any chords progressions 38 | 39 | *** 40 | 41 | ## Other chords-related projects 42 | 43 | ### [chords_recognizer](https://github.com/asigalov61/chord_recognizer) 44 | 45 | ### [magenta note-seq chords labeler](https://colab.research.google.com/github/asigalov61/tegridy-tools/blob/main/tegridy-tools/notebooks/Magenta_note_seq_chords_labeler.ipynb) 46 | 47 | ### [free-midi-chords](https://github.com/ldrolez/free-midi-chords) 48 | 49 | ### [chords2midi](https://github.com/Miserlou/chords2midi) 50 | 51 | ### [chorder](https://github.com/joshuachang2311/chorder) 52 | 53 | ### [pychorelib](https://github.com/asigalov61/tegridy-tools/tree/main/tegridy-tools/PyChoReLib) 54 | 55 | ### [pychord](https://github.com/yuma-m/pychord) 56 | 57 | ### [accomontage2](https://github.com/billyblu2000/AccoMontage2) 58 | 59 | ### [chord-melody-dataset](https://github.com/shiehn/chord-melody-dataset) 60 | 61 | ### [autoharmonizer](https://github.com/sander-wood/autoharmonizer) 62 | 63 | ### [chords-suggester](https://github.com/huanlui/chord-suggester) 64 | 65 | ### [jchord](https://github.com/jonathangjertsen/jchord) 66 | 67 | ### [chord-generation](https://github.com/sander-wood/chord_generation) 68 | 69 | ### [chord-composer](https://github.com/ynot4/chord-composer) 70 | 71 | ### [hooktheory](https://www.hooktheory.com/) 72 | 73 | *** 74 | 75 | ### Project Los Angeles 76 | ### Tegridy Code 2024 77 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /Training-Data/chords_progressions_transformer_augmented_training_dataset_maker.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """Chords_Progressions_Transformer_Augmented_Training_Dataset_Maker.ipynb 3 | 4 | Automatically generated by Colaboratory. 5 | 6 | Original file is located at 7 | https://colab.research.google.com/drive/1TKUF33Uxfsdi1DRNMbU5nl1e1HFitS8q 8 | 9 | # Chords Progressions Transformer Training Dataset Maker (ver. 1.0) 10 | 11 | *** 12 | 13 | Powered by tegridy-tools: https://github.com/asigalov61/tegridy-tools 14 | 15 | *** 16 | 17 | #### Project Los Angeles 18 | 19 | #### Tegridy Code 2024 20 | 21 | *** 22 | 23 | # (SETUP ENVIRONMENT) 24 | """ 25 | 26 | #@title Install all dependencies (run only once per session) 27 | 28 | !git clone --depth 1 https://github.com/asigalov61/tegridy-tools 29 | 30 | #@title Import all needed modules 31 | 32 | print('Loading core modules. Please wait...') 33 | 34 | import os 35 | import copy 36 | import math 37 | import statistics 38 | import random 39 | 40 | from joblib import Parallel, delayed, parallel_config 41 | 42 | from collections import Counter 43 | 44 | from tqdm import tqdm 45 | 46 | from google.colab import files 47 | 48 | print('Creating IO dirs...') 49 | 50 | if not os.path.exists('/content/Dataset'): 51 | os.makedirs('/content/Dataset') 52 | 53 | if not os.path.exists('/content/INTS'): 54 | os.makedirs('/content/INTS') 55 | 56 | print('Loading TMIDIX module...') 57 | os.chdir('/content/tegridy-tools/tegridy-tools') 58 | 59 | import TMIDIX 60 | 61 | print('Done!') 62 | 63 | os.chdir('/content/') 64 | print('Enjoy! :)') 65 | 66 | """# (DOWNLOAD MIDI DATASET)""" 67 | 68 | # Commented out IPython magic to ensure Python compatibility. 69 | # @title Download and unzip Monster MIDI Dataset Sample Search Results 70 | # %cd /content/Dataset/ 71 | !wget https://huggingface.co/datasets/projectlosangeles/Monster-MIDI-Dataset/resolve/main/Monster_MIDI_Dataset_Search_Results_Ver_1_0_CC_BY_NC_SA.zip 72 | !unzip Monster_MIDI_Dataset_Search_Results_Ver_1_0_CC_BY_NC_SA.zip 73 | !rm Monster_MIDI_Dataset_Search_Results_Ver_1_0_CC_BY_NC_SA.zip 74 | # %cd /content/ 75 | 76 | """# (FILE LIST)""" 77 | 78 | #@title Save file list 79 | ########### 80 | 81 | print('=' * 70) 82 | print('Loading MIDI files...') 83 | print('This may take a while on a large dataset in particular.') 84 | 85 | dataset_addr = "/content/Dataset" 86 | 87 | filez = list() 88 | for (dirpath, dirnames, filenames) in os.walk(dataset_addr): 89 | for file in filenames: 90 | if file.endswith(('.mid', '.midi', '.kar')): 91 | filez.append(os.path.join(dirpath, file)) 92 | print('=' * 70) 93 | 94 | if filez == []: 95 | print('Could not find any MIDI files. Please check Dataset dir...') 96 | print('=' * 70) 97 | 98 | else: 99 | print('Randomizing file list...') 100 | random.shuffle(filez) 101 | print('=' * 70) 102 | 103 | TMIDIX.Tegridy_Any_Pickle_File_Writer(filez, '/content/filez') 104 | print('=' * 70) 105 | 106 | #@title Load file list 107 | 108 | print('=' * 70) 109 | filez = TMIDIX.Tegridy_Any_Pickle_File_Reader('/content/filez') 110 | print('Done!') 111 | print('=' * 70) 112 | 113 | """# (PROCESS)""" 114 | 115 | # @title Load TMIDIX MIDI Processor 116 | 117 | print('=' * 70) 118 | print('Loading TMIDIX MIDI Processor...') 119 | 120 | def TMIDIX_MIDI_Processor(midi_file): 121 | 122 | try: 123 | 124 | fn = os.path.basename(midi_file) 125 | 126 | #======================================================= 127 | # START PROCESSING 128 | 129 | #=============================================================================== 130 | # Raw single-track ms score 131 | 132 | raw_score = TMIDIX.midi2single_track_ms_score(midi_file) 133 | 134 | #=============================================================================== 135 | # Enhanced score notes 136 | 137 | escore_notes = TMIDIX.advanced_score_processor(raw_score, return_enhanced_score_notes=True)[0] 138 | 139 | no_drums_escore_notes = [e for e in escore_notes if e[6] < 80] 140 | 141 | if len(no_drums_escore_notes) > 0: 142 | 143 | #======================================================= 144 | # PRE-PROCESSING 145 | 146 | #=============================================================================== 147 | # Augmented enhanced score notes 148 | 149 | no_drums_escore_notes = TMIDIX.augment_enhanced_score_notes(no_drums_escore_notes) 150 | 151 | cscore = TMIDIX.chordify_score([1000, no_drums_escore_notes]) 152 | 153 | clean_cscore = [] 154 | 155 | for c in cscore: 156 | pitches = [] 157 | cho = [] 158 | for cc in c: 159 | if cc[4] not in pitches: 160 | cho.append(cc) 161 | pitches.append(cc[4]) 162 | 163 | clean_cscore.append(cho) 164 | 165 | #======================================================= 166 | # FINAL PROCESSING 167 | 168 | all_melody_chords = [] 169 | 170 | #======================================================= 171 | # Pitches augmentation 172 | 173 | for pa in range(-2, 2): 174 | 175 | melody_chords = [] 176 | 177 | #======================================================= 178 | # MAIN PROCESSING CYCLE 179 | #======================================================= 180 | 181 | pe = clean_cscore[0][0] 182 | 183 | first_chord = True 184 | 185 | for c in clean_cscore: 186 | 187 | # Chords 188 | 189 | c.sort(key=lambda x: x[4], reverse=True) 190 | 191 | tones_chord = sorted(set([(cc[4]+pa) % 12 for cc in c])) 192 | 193 | try: 194 | chord_token = TMIDIX.ALL_CHORDS_SORTED.index(tones_chord) 195 | except: 196 | checked_tones_chord = TMIDIX.check_and_fix_tones_chord(tones_chord) 197 | chord_token = TMIDIX.ALL_CHORDS_SORTED.index(checked_tones_chord) 198 | 199 | melody_chords.extend([chord_token+384]) 200 | 201 | if first_chord: 202 | melody_chords.extend([0]) 203 | first_chord = False 204 | 205 | for e in c: 206 | 207 | #======================================================= 208 | # Timings... 209 | 210 | time = e[1]-pe[1] 211 | 212 | dur = e[2] 213 | 214 | if time != 0 and time % 2 != 0: 215 | time += 1 216 | if dur % 2 != 0: 217 | dur += 1 218 | 219 | delta_time = int(max(0, min(255, time)) / 2) 220 | 221 | # Durations 222 | 223 | dur = int(max(0, min(255, dur)) / 2) 224 | 225 | # Pitches 226 | 227 | ptc = max(1, min(127, e[4]+pa)) 228 | 229 | #======================================================= 230 | # FINAL NOTE SEQ 231 | 232 | # Writing final note asynchronously 233 | 234 | if delta_time != 0: 235 | melody_chords.extend([delta_time, dur+128, ptc+256]) 236 | else: 237 | melody_chords.extend([dur+128, ptc+256]) 238 | 239 | pe = e 240 | 241 | if len(melody_chords) > 8192: 242 | break 243 | 244 | #======================================================= 245 | 246 | all_melody_chords.append(melody_chords[:8193]) 247 | 248 | #======================================================= 249 | 250 | # TOTAL DICTIONARY SIZE 706+1=707 251 | #======================================================= 252 | 253 | return all_melody_chords 254 | 255 | else: 256 | return None 257 | 258 | except Exception as e: 259 | print('=' * 70) 260 | print('ERROR!!!') 261 | print('File name:', midi_file) 262 | print('Error:', e) 263 | print('=' * 70) 264 | return None 265 | 266 | print('Done!') 267 | print('=' * 70) 268 | 269 | #@title Process MIDIs with TMIDIX MIDI processor 270 | 271 | NUMBER_OF_PARALLEL_JOBS = 16 # Number of parallel jobs 272 | NUMBER_OF_FILES_PER_ITERATION = 16 # Number of files to queue for each parallel iteration 273 | SAVE_EVERY_NUMBER_OF_ITERATIONS = 160 # Save every 2560 files 274 | 275 | print('=' * 70) 276 | print('TMIDIX MIDI Processor') 277 | print('=' * 70) 278 | print('Starting up...') 279 | print('=' * 70) 280 | 281 | ########### 282 | 283 | melody_chords_f = [] 284 | 285 | files_count = 0 286 | good_files_count = 0 287 | 288 | print('Processing MIDI files. Please wait...') 289 | print('=' * 70) 290 | 291 | for i in tqdm(range(0, len(filez), NUMBER_OF_FILES_PER_ITERATION)): 292 | 293 | with parallel_config(backend='threading', n_jobs=NUMBER_OF_PARALLEL_JOBS, verbose = 0): 294 | output = Parallel(n_jobs=NUMBER_OF_PARALLEL_JOBS, verbose=0)(delayed(TMIDIX_MIDI_Processor)(f) for f in filez[i:i+NUMBER_OF_FILES_PER_ITERATION]) 295 | 296 | for o in output: 297 | 298 | if o is not None: 299 | melody_chords_f.append(o) 300 | 301 | # Saving every 2560 processed files 302 | if i % (SAVE_EVERY_NUMBER_OF_ITERATIONS * NUMBER_OF_FILES_PER_ITERATION) == 0 and i != 0: 303 | print('SAVING !!!') 304 | print('=' * 70) 305 | good_files_count += len(melody_chords_f) 306 | print('Saving processed files...') 307 | print('=' * 70) 308 | print('Data check:', min(melody_chords_f[0][0]), '===', max(melody_chords_f[0][0]), '===', len(list(set(melody_chords_f[0][0]))), '===', len(melody_chords_f[0][0])) 309 | print('=' * 70) 310 | print('Processed so far:', good_files_count, 'out of', i, '===', good_files_count / i, 'good files ratio') 311 | print('=' * 70) 312 | count = str(i) 313 | TMIDIX.Tegridy_Any_Pickle_File_Writer(melody_chords_f, '/content/INTS/CPT_INTs_'+count) 314 | melody_chords_f = [] 315 | print('=' * 70) 316 | 317 | print('SAVING !!!') 318 | print('=' * 70) 319 | good_files_count += len(melody_chords_f) 320 | print('Saving processed files...') 321 | print('=' * 70) 322 | print('Data check:', min(melody_chords_f[0][0]), '===', max(melody_chords_f[0][0]), '===', len(list(set(melody_chords_f[0][0]))), '===', len(melody_chords_f[0][0])) 323 | print('=' * 70) 324 | print('Processed so far:', good_files_count, 'out of', i, '===', good_files_count / i, 'good files ratio') 325 | print('=' * 70) 326 | count = str(i) 327 | TMIDIX.Tegridy_Any_Pickle_File_Writer(melody_chords_f, '/content/INTS/CPT_INTs_'+count) 328 | print('=' * 70) 329 | 330 | """# (TEST INTS)""" 331 | 332 | #@title Test INTs 333 | 334 | train_data1 = random.choice(melody_chords_f[0]) 335 | 336 | print('=' * 70) 337 | print('Seq len:', len(train_data1)) 338 | print('Sample INTs', train_data1[:15]) 339 | print('=' * 70) 340 | 341 | out = train_data1 342 | 343 | if len(out) != 0: 344 | 345 | song = out 346 | song_f = [] 347 | 348 | time = 0 349 | dur = 0 350 | vel = 90 351 | pitch = 0 352 | channel = 0 353 | 354 | patches = [0] * 16 355 | 356 | for ss in song: 357 | 358 | if 0 <= ss < 128: 359 | 360 | time += ss 361 | 362 | if 128 <= ss < 256: 363 | 364 | dur = (ss-128) 365 | 366 | if 256 <= ss < 384: 367 | 368 | pitch = (ss-256) 369 | 370 | song_f.append(['note', time, dur, channel, pitch, vel ]) 371 | 372 | detailed_stats = TMIDIX.Tegridy_ms_SONG_to_MIDI_Converter(song_f, 373 | output_signature = 'Chords Progressions Transformer', 374 | output_file_name = '/content/Chords-Progressions-Transformer-Composition', 375 | track_name='Project Los Angeles', 376 | list_of_MIDI_patches=patches, 377 | timings_multiplier=32 378 | ) 379 | print('=' * 70) 380 | 381 | """# (ZIP AND DOWNLOAD INTS)""" 382 | 383 | # Commented out IPython magic to ensure Python compatibility. 384 | #@title Zip and download training INTs 385 | 386 | print('=' * 70) 387 | 388 | try: 389 | os.remove('Chords_progressions_Transformer_INTs.zip') 390 | except OSError: 391 | pass 392 | 393 | print('Zipping... Please wait...') 394 | print('=' * 70) 395 | 396 | # %cd /content/INTS/ 397 | !zip Chords_progressions_Transformer_INTs.zip *.pickle 398 | # %cd /content/ 399 | 400 | print('=' * 70) 401 | print('Done!') 402 | print('=' * 70) 403 | 404 | print('Downloading final zip file...') 405 | print('=' * 70) 406 | 407 | files.download('/content/INTS/Chords_progressions_Transformer_INTs.zip') 408 | 409 | print('Done!') 410 | print('=' * 70) 411 | 412 | """# Congrats! You did it! :)""" -------------------------------------------------------------------------------- /Training-Data/chords_progressions_transformer_training_dataset_maker.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """Chords_Progressions_Transformer_Training_Dataset_Maker.ipynb 3 | 4 | Automatically generated by Colab. 5 | 6 | Original file is located at 7 | https://colab.research.google.com/github/asigalov61/Chords-Progressions-Transformer/blob/main/Training-Data/Chords_Progressions_Transformer_Training_Dataset_Maker.ipynb 8 | 9 | # Chords Progressions Transformer Training Dataset Maker (ver. 1.0) 10 | 11 | *** 12 | 13 | Powered by tegridy-tools: https://github.com/asigalov61/tegridy-tools 14 | 15 | *** 16 | 17 | #### Project Los Angeles 18 | 19 | #### Tegridy Code 2024 20 | 21 | *** 22 | 23 | # (SETUP ENVIRONMENT) 24 | """ 25 | 26 | #@title Install all dependencies (run only once per session) 27 | 28 | !git clone --depth 1 https://github.com/asigalov61/tegridy-tools 29 | 30 | #@title Import all needed modules 31 | 32 | print('Loading core modules. Please wait...') 33 | 34 | import os 35 | import copy 36 | import math 37 | import statistics 38 | import random 39 | 40 | from joblib import Parallel, delayed, parallel_config 41 | 42 | from collections import Counter 43 | 44 | from tqdm import tqdm 45 | 46 | from google.colab import files 47 | 48 | print('Creating IO dirs...') 49 | 50 | if not os.path.exists('/content/Dataset'): 51 | os.makedirs('/content/Dataset') 52 | 53 | if not os.path.exists('/content/INTS'): 54 | os.makedirs('/content/INTS') 55 | 56 | print('Loading TMIDIX module...') 57 | os.chdir('/content/tegridy-tools/tegridy-tools') 58 | 59 | import TMIDIX 60 | 61 | print('Done!') 62 | 63 | os.chdir('/content/') 64 | print('Enjoy! :)') 65 | 66 | """# (DOWNLOAD MIDI DATASET)""" 67 | 68 | # Commented out IPython magic to ensure Python compatibility. 69 | # @title Download and unzip Monster MIDI Dataset Sample Search Results 70 | # %cd /content/Dataset/ 71 | !wget https://huggingface.co/datasets/projectlosangeles/Monster-MIDI-Dataset/resolve/main/Monster_MIDI_Dataset_Search_Results_Ver_1_0_CC_BY_NC_SA.zip 72 | !unzip Monster_MIDI_Dataset_Search_Results_Ver_1_0_CC_BY_NC_SA.zip 73 | !rm Monster_MIDI_Dataset_Search_Results_Ver_1_0_CC_BY_NC_SA.zip 74 | # %cd /content/ 75 | 76 | """# (FILE LIST)""" 77 | 78 | #@title Save file list 79 | ########### 80 | 81 | print('=' * 70) 82 | print('Loading MIDI files...') 83 | print('This may take a while on a large dataset in particular.') 84 | 85 | dataset_addr = "/content/Dataset" 86 | 87 | filez = list() 88 | for (dirpath, dirnames, filenames) in os.walk(dataset_addr): 89 | for file in filenames: 90 | if file.endswith(('.mid', '.midi', '.kar')): 91 | filez.append(os.path.join(dirpath, file)) 92 | print('=' * 70) 93 | 94 | if filez == []: 95 | print('Could not find any MIDI files. Please check Dataset dir...') 96 | print('=' * 70) 97 | 98 | else: 99 | print('Randomizing file list...') 100 | random.shuffle(filez) 101 | print('=' * 70) 102 | 103 | TMIDIX.Tegridy_Any_Pickle_File_Writer(filez, '/content/filez') 104 | print('=' * 70) 105 | 106 | #@title Load file list 107 | 108 | print('=' * 70) 109 | filez = TMIDIX.Tegridy_Any_Pickle_File_Reader('/content/filez') 110 | print('Done!') 111 | print('=' * 70) 112 | 113 | """# (PROCESS)""" 114 | 115 | # @title Load TMIDIX MIDI Processor 116 | apply_timings_compression_and_quantization = False # @param {type:"boolean"} 117 | 118 | print('=' * 70) 119 | print('Loading TMIDIX MIDI Processor...') 120 | 121 | def TMIDIX_MIDI_Processor(midi_file): 122 | 123 | try: 124 | 125 | fn = os.path.basename(midi_file) 126 | 127 | #======================================================= 128 | # START PROCESSING 129 | 130 | #=============================================================================== 131 | # Raw single-track ms score 132 | 133 | raw_score = TMIDIX.midi2single_track_ms_score(midi_file) 134 | 135 | #=============================================================================== 136 | # Enhanced score notes 137 | 138 | escore_notes = TMIDIX.advanced_score_processor(raw_score, return_enhanced_score_notes=True)[0] 139 | 140 | no_drums_escore_notes = [e for e in escore_notes if e[6] < 80] 141 | 142 | if len(no_drums_escore_notes) > 0: 143 | 144 | #======================================================= 145 | # PRE-PROCESSING 146 | 147 | #=============================================================================== 148 | # Augmented enhanced score notes 149 | 150 | if apply_timings_compression_and_quantization: 151 | no_drums_escore_notes = TMIDIX.augment_enhanced_score_notes(no_drums_escore_notes) 152 | 153 | else: 154 | no_drums_escore_notes = TMIDIX.augment_enhanced_score_notes(no_drums_escore_notes, 155 | timings_divider=32 156 | ) 157 | 158 | cscore = TMIDIX.chordify_score([1000, no_drums_escore_notes]) 159 | 160 | clean_cscore = [] 161 | 162 | for c in cscore: 163 | pitches = [] 164 | cho = [] 165 | for cc in c: 166 | if cc[4] not in pitches: 167 | cho.append(cc) 168 | pitches.append(cc[4]) 169 | 170 | clean_cscore.append(cho) 171 | 172 | #======================================================= 173 | # FINAL PROCESSING 174 | 175 | melody_chords = [] 176 | 177 | #======================================================= 178 | # MAIN PROCESSING CYCLE 179 | #======================================================= 180 | 181 | pe = clean_cscore[0][0] 182 | 183 | first_chord = True 184 | 185 | for c in clean_cscore: 186 | 187 | # Chords 188 | 189 | c.sort(key=lambda x: x[4], reverse=True) 190 | 191 | tones_chord = sorted(set([cc[4] % 12 for cc in c])) 192 | 193 | try: 194 | chord_token = TMIDIX.ALL_CHORDS_SORTED.index(tones_chord) 195 | except: 196 | checked_tones_chord = TMIDIX.check_and_fix_tones_chord(tones_chord) 197 | chord_token = TMIDIX.ALL_CHORDS_SORTED.index(checked_tones_chord) 198 | 199 | melody_chords.extend([chord_token+384]) 200 | 201 | if first_chord: 202 | melody_chords.extend([0]) 203 | first_chord = False 204 | 205 | for e in c: 206 | 207 | #======================================================= 208 | # Timings... 209 | 210 | time = e[1]-pe[1] 211 | 212 | dur = e[2] 213 | 214 | if apply_timings_compression_and_quantization: 215 | 216 | if time != 0 and time % 2 != 0: 217 | time += 1 218 | if dur % 2 != 0: 219 | dur += 1 220 | 221 | delta_time = int(max(0, min(255, time)) / 2) 222 | 223 | dur = int(max(0, min(255, dur)) / 2) 224 | 225 | else: 226 | delta_time = max(0, min(127, time)) 227 | dur = max(1, min(127, dur)) 228 | 229 | # Pitches 230 | 231 | ptc = max(1, min(127, e[4])) 232 | 233 | #======================================================= 234 | # FINAL NOTE SEQ 235 | 236 | # Writing final note asynchronously 237 | 238 | if delta_time != 0: 239 | melody_chords.extend([delta_time, dur+128, ptc+256]) 240 | else: 241 | melody_chords.extend([dur+128, ptc+256]) 242 | 243 | pe = e 244 | 245 | if len(melody_chords) > 8192: 246 | break 247 | 248 | #======================================================= 249 | 250 | # TOTAL DICTIONARY SIZE 706+1=707 251 | #======================================================= 252 | 253 | return melody_chords[:8193] 254 | 255 | else: 256 | return None 257 | 258 | except Exception as e: 259 | print('=' * 70) 260 | print('ERROR!!!') 261 | print('File name:', midi_file) 262 | print('Error:', e) 263 | print('=' * 70) 264 | return None 265 | 266 | print('Done!') 267 | print('=' * 70) 268 | 269 | #@title Process MIDIs with TMIDIX MIDI processor 270 | 271 | NUMBER_OF_PARALLEL_JOBS = 16 # Number of parallel jobs 272 | NUMBER_OF_FILES_PER_ITERATION = 16 # Number of files to queue for each parallel iteration 273 | SAVE_EVERY_NUMBER_OF_ITERATIONS = 160 # Save every 2560 files 274 | 275 | print('=' * 70) 276 | print('TMIDIX MIDI Processor') 277 | print('=' * 70) 278 | print('Starting up...') 279 | print('=' * 70) 280 | 281 | ########### 282 | 283 | melody_chords_f = [] 284 | 285 | files_count = 0 286 | good_files_count = 0 287 | 288 | print('Processing MIDI files. Please wait...') 289 | print('=' * 70) 290 | 291 | for i in tqdm(range(0, len(filez), NUMBER_OF_FILES_PER_ITERATION)): 292 | 293 | with parallel_config(backend='threading', n_jobs=NUMBER_OF_PARALLEL_JOBS, verbose = 0): 294 | output = Parallel(n_jobs=NUMBER_OF_PARALLEL_JOBS, verbose=0)(delayed(TMIDIX_MIDI_Processor)(f) for f in filez[i:i+NUMBER_OF_FILES_PER_ITERATION]) 295 | 296 | for o in output: 297 | 298 | if o is not None: 299 | melody_chords_f.append(o) 300 | 301 | # Saving every 2560 processed files 302 | if i % (SAVE_EVERY_NUMBER_OF_ITERATIONS * NUMBER_OF_FILES_PER_ITERATION) == 0 and i != 0: 303 | print('SAVING !!!') 304 | print('=' * 70) 305 | good_files_count += len(melody_chords_f) 306 | print('Saving processed files...') 307 | print('=' * 70) 308 | print('Data check:', min(melody_chords_f[0]), '===', max(melody_chords_f[0]), '===', len(list(set(melody_chords_f[0]))), '===', len(melody_chords_f[0])) 309 | print('=' * 70) 310 | print('Processed so far:', good_files_count, 'out of', i, '===', good_files_count / i, 'good files ratio') 311 | print('=' * 70) 312 | count = str(i) 313 | TMIDIX.Tegridy_Any_Pickle_File_Writer(melody_chords_f, '/content/INTS/CPT_INTs_'+count) 314 | melody_chords_f = [] 315 | print('=' * 70) 316 | 317 | print('SAVING !!!') 318 | print('=' * 70) 319 | good_files_count += len(melody_chords_f) 320 | print('Saving processed files...') 321 | print('=' * 70) 322 | print('Data check:', min(melody_chords_f[0]), '===', max(melody_chords_f[0]), '===', len(list(set(melody_chords_f[0]))), '===', len(melody_chords_f[0])) 323 | print('=' * 70) 324 | print('Processed so far:', good_files_count, 'out of', i, '===', good_files_count / i, 'good files ratio') 325 | print('=' * 70) 326 | count = str(i) 327 | TMIDIX.Tegridy_Any_Pickle_File_Writer(melody_chords_f, '/content/INTS/CPT_INTs_'+count) 328 | print('=' * 70) 329 | 330 | """# (TEST INTS)""" 331 | 332 | #@title Test INTs 333 | 334 | train_data1 = random.choice(melody_chords_f) 335 | 336 | print('=' * 70) 337 | print('Seq len:', len(train_data1)) 338 | print('Sample INTs', train_data1[:15]) 339 | print('=' * 70) 340 | 341 | out = train_data1 342 | 343 | if len(out) != 0: 344 | 345 | song = out 346 | song_f = [] 347 | 348 | time = 0 349 | dur = 0 350 | vel = 90 351 | pitch = 0 352 | channel = 0 353 | 354 | patches = [0] * 16 355 | 356 | for ss in song: 357 | 358 | if 0 <= ss < 128: 359 | 360 | time += ss 361 | 362 | if 128 <= ss < 256: 363 | 364 | dur = (ss-128) 365 | 366 | if 256 <= ss < 384: 367 | 368 | pitch = (ss-256) 369 | 370 | song_f.append(['note', time, dur, channel, pitch, vel ]) 371 | 372 | detailed_stats = TMIDIX.Tegridy_ms_SONG_to_MIDI_Converter(song_f, 373 | output_signature = 'Chords Progressions Transformer', 374 | output_file_name = '/content/Chords-Progressions-Transformer-Composition', 375 | track_name='Project Los Angeles', 376 | list_of_MIDI_patches=patches, 377 | timings_multiplier=32 378 | ) 379 | print('=' * 70) 380 | 381 | """# (ZIP AND DOWNLOAD INTS)""" 382 | 383 | # Commented out IPython magic to ensure Python compatibility. 384 | #@title Zip and download training INTs 385 | 386 | print('=' * 70) 387 | 388 | try: 389 | os.remove('Chords_progressions_Transformer_INTs.zip') 390 | except OSError: 391 | pass 392 | 393 | print('Zipping... Please wait...') 394 | print('=' * 70) 395 | 396 | # %cd /content/INTS/ 397 | !zip Chords_progressions_Transformer_INTs.zip *.pickle 398 | # %cd /content/ 399 | 400 | print('=' * 70) 401 | print('Done!') 402 | print('=' * 70) 403 | 404 | print('Downloading final zip file...') 405 | print('=' * 70) 406 | 407 | files.download('/content/INTS/Chords_progressions_Transformer_INTs.zip') 408 | 409 | print('Done!') 410 | print('=' * 70) 411 | 412 | """# Congrats! You did it! :)""" -------------------------------------------------------------------------------- /chords_progressions_transformer_aux.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """Chords_Progressions_Transformer_Aux.ipynb 3 | 4 | Automatically generated by Colab. 5 | 6 | Original file is located at 7 | https://colab.research.google.com/github/asigalov61/Chords-Progressions-Transformer/blob/main/Chords_Progressions_Transformer_Aux.ipynb 8 | 9 | # Chords Progressions Transformer Aux (ver. 1.0) 10 | 11 | *** 12 | 13 | Powered by tegridy-tools: https://github.com/asigalov61/tegridy-tools 14 | 15 | *** 16 | 17 | WARNING: This complete implementation is a functioning model of the Artificial Intelligence. Please excercise great humility, care, and respect. https://www.nscai.gov/ 18 | 19 | *** 20 | 21 | #### Project Los Angeles 22 | 23 | #### Tegridy Code 2024 24 | 25 | *** 26 | 27 | # (GPU CHECK) 28 | """ 29 | 30 | #@title NVIDIA GPU check 31 | !nvidia-smi 32 | 33 | """# (SETUP ENVIRONMENT)""" 34 | 35 | #@title Install dependencies 36 | !git clone --depth 1 https://github.com/asigalov61/Chords-Progressions-Transformer 37 | !pip install huggingface_hub 38 | !pip install einops 39 | !pip install torch-summary 40 | !apt install fluidsynth #Pip does not work for some reason. Only apt works 41 | 42 | # Commented out IPython magic to ensure Python compatibility. 43 | #@title Import modules 44 | 45 | print('=' * 70) 46 | print('Loading core Chords Progressions Transformer modules...') 47 | 48 | import os 49 | import copy 50 | import pickle 51 | import secrets 52 | import statistics 53 | from time import time 54 | import tqdm 55 | 56 | print('=' * 70) 57 | print('Loading main Chords Progressions Transformer modules...') 58 | import torch 59 | 60 | # %cd /content/Chords-Progressions-Transformer 61 | 62 | import TMIDIX 63 | 64 | from midi_to_colab_audio import midi_to_colab_audio 65 | 66 | from x_transformer_1_23_2 import * 67 | 68 | import random 69 | 70 | # %cd /content/ 71 | print('=' * 70) 72 | print('Loading aux Chords Progressions Transformer modules...') 73 | 74 | import matplotlib.pyplot as plt 75 | 76 | from torchsummary import summary 77 | from sklearn import metrics 78 | 79 | from IPython.display import Audio, display 80 | 81 | from huggingface_hub import hf_hub_download 82 | 83 | from google.colab import files 84 | 85 | print('=' * 70) 86 | print('Done!') 87 | print('Enjoy! :)') 88 | print('=' * 70) 89 | 90 | """# (LOAD MODEL)""" 91 | 92 | #@title Load Chords Progressions Transformer Pre-Trained Model 93 | 94 | #@markdown Choose model 95 | 96 | select_model_to_load = "187M-2048E-4L-16H-FP32-Fast-Small" # @param ["93M-1024E-8L-8H-Very-Fast-Small", "187M-2048E-4L-16H-Fast-Small", "187M-2048E-4L-16H-FP32-Fast-Small"] 97 | 98 | #@markdown Model precision option 99 | 100 | model_precision = "bfloat16" # @param ["bfloat16", "float16", "float32"] 101 | 102 | #@markdown bfloat16 == Half precision/faster speed (if supported, otherwise the model will default to float16) 103 | 104 | #@markdown float16 == Full precision/fast speed 105 | 106 | plot_tokens_embeddings = False # @param {type:"boolean"} 107 | 108 | print('=' * 70) 109 | print('Loading Chords Progressions Transformer Pre-Trained Model...') 110 | print('Please wait...') 111 | print('=' * 70) 112 | 113 | full_path_to_models_dir = "/content/Chords-Progressions-Transformer/Models" 114 | 115 | if select_model_to_load == '93M-1024E-8L-8H-Very-Fast-Small': 116 | 117 | dim = 1024 118 | depth = 8 119 | heads = 8 120 | 121 | model_checkpoint_file_name = 'Chords_Progressions_Transformer_Small_Trained_Model_9609_steps_1.0704_loss_0.6927_acc.pth' 122 | model_path = full_path_to_models_dir+'/Small/'+model_checkpoint_file_name 123 | if os.path.isfile(model_path): 124 | print('Model already exists...') 125 | 126 | else: 127 | hf_hub_download(repo_id='asigalov61/Chords-Progressions-Transformer', 128 | filename=model_checkpoint_file_name, 129 | local_dir='/content/Chords-Progressions-Transformer/Models/Small', 130 | local_dir_use_symlinks=False) 131 | 132 | elif select_model_to_load == '187M-2048E-4L-16H-Fast-Small': 133 | 134 | dim = 2048 135 | depth = 4 136 | heads = 16 137 | 138 | model_checkpoint_file_name = 'Chords_Progressions_Transformer_Small_2048_Trained_Model_12947_steps_0.9316_loss_0.7386_acc.pth' 139 | model_path = full_path_to_models_dir+'/Small_2048/'+model_checkpoint_file_name 140 | if os.path.isfile(model_path): 141 | print('Model already exists...') 142 | 143 | else: 144 | hf_hub_download(repo_id='asigalov61/Chords-Progressions-Transformer', 145 | filename=model_checkpoint_file_name, 146 | local_dir='/content/Chords-Progressions-Transformer/Models/Small_2048', 147 | local_dir_use_symlinks=False) 148 | 149 | elif select_model_to_load == '187M-2048E-4L-16H-FP32-Fast-Small': 150 | 151 | dim = 2048 152 | depth = 4 153 | heads = 16 154 | 155 | model_checkpoint_file_name = 'Chords_Progressions_Transformer_Small_2048_FP32_Trained_Model_6265_steps_0.9272_loss_0.7369_acc.pth' 156 | model_path = full_path_to_models_dir+'/Small_2048_FP32/'+model_checkpoint_file_name 157 | if os.path.isfile(model_path): 158 | print('Model already exists...') 159 | 160 | else: 161 | hf_hub_download(repo_id='asigalov61/Chords-Progressions-Transformer', 162 | filename=model_checkpoint_file_name, 163 | local_dir='/content/Chords-Progressions-Transformer/Models/Small_2048_FP32', 164 | local_dir_use_symlinks=False) 165 | 166 | print('=' * 70) 167 | print('Instantiating model...') 168 | 169 | device_type = 'cuda' 170 | 171 | if model_precision == 'bfloat16' and torch.cuda.is_bf16_supported(): 172 | dtype = 'bfloat16' 173 | else: 174 | dtype = 'float16' 175 | 176 | if model_precision == 'float16': 177 | dtype = 'float16' 178 | 179 | if model_precision == 'float32': 180 | dtype = 'float32' 181 | 182 | ptdtype = {'bfloat16': torch.bfloat16, 'float16': torch.float16, 'float32': torch.float32}[dtype] 183 | ctx = torch.amp.autocast(device_type=device_type, dtype=ptdtype) 184 | 185 | SEQ_LEN = 8192 # Models seq len 186 | PAD_IDX = 707 # Models pad index 187 | 188 | # instantiate the model 189 | 190 | model = TransformerWrapper( 191 | num_tokens = PAD_IDX+1, 192 | max_seq_len = SEQ_LEN, 193 | attn_layers = Decoder(dim = dim, depth = depth, heads = heads, attn_flash = True) 194 | ) 195 | 196 | model = AutoregressiveWrapper(model, ignore_index = PAD_IDX) 197 | 198 | model.cuda() 199 | print('=' * 70) 200 | 201 | print('Loading model checkpoint...') 202 | 203 | model.load_state_dict(torch.load(model_path)) 204 | print('=' * 70) 205 | 206 | model.eval() 207 | 208 | print('Done!') 209 | print('=' * 70) 210 | 211 | print('Model will use', dtype, 'precision...') 212 | print('=' * 70) 213 | 214 | # Model stats 215 | print('Model summary...') 216 | summary(model) 217 | 218 | # Plot Token Embeddings 219 | if plot_tokens_embeddings: 220 | tok_emb = model.net.token_emb.emb.weight.detach().cpu().tolist() 221 | 222 | cos_sim = metrics.pairwise_distances( 223 | tok_emb, metric='cosine' 224 | ) 225 | plt.figure(figsize=(7, 7)) 226 | plt.imshow(cos_sim, cmap="inferno", interpolation="nearest") 227 | im_ratio = cos_sim.shape[0] / cos_sim.shape[1] 228 | plt.colorbar(fraction=0.046 * im_ratio, pad=0.04) 229 | plt.xlabel("Position") 230 | plt.ylabel("Position") 231 | plt.tight_layout() 232 | plt.plot() 233 | plt.savefig("/content/Chords-Progressions-Transformer-Tokens-Embeddings-Plot.png", bbox_inches="tight") 234 | 235 | print('=' * 70) 236 | print('Loading Chords Progressions Transformer Aux Model...') 237 | print('=' * 70) 238 | 239 | aux_model_path = '/content/Chords-Progressions-Transformer/Models/Aux/Chords_Progressions_Transformer_Aux_Trained_Model_4374_steps_0.1185_loss_0.9673_acc.pth' 240 | 241 | if os.path.isfile(aux_model_path): 242 | print('Model already exists...') 243 | 244 | else: 245 | hf_hub_download(repo_id='asigalov61/Chords-Progressions-Transformer', 246 | filename='Chords_Progressions_Transformer_Aux_Trained_Model_4374_steps_0.1185_loss_0.9673_acc.pth', 247 | local_dir='/content/Chords-Progressions-Transformer/Models/Aux', 248 | local_dir_use_symlinks=False) 249 | 250 | AUX_SEQ_LEN = 1024 # Models seq len 251 | AUX_PAD_IDX = 322 # Models pad index 252 | 253 | # instantiate the model 254 | 255 | aux_model = TransformerWrapper( 256 | num_tokens = AUX_PAD_IDX+1, 257 | max_seq_len = AUX_SEQ_LEN, 258 | attn_layers = Decoder(dim = 1024, depth = 4, heads = 16, attn_flash = True) 259 | ) 260 | 261 | aux_model = AutoregressiveWrapper(aux_model, ignore_index = AUX_PAD_IDX) 262 | 263 | aux_model.cuda() 264 | print('=' * 70) 265 | 266 | print('Loading model checkpoint...') 267 | 268 | aux_model.load_state_dict(torch.load(aux_model_path)) 269 | print('=' * 70) 270 | 271 | aux_model.eval() 272 | 273 | # Model stats 274 | print('Model summary...') 275 | summary(aux_model) 276 | 277 | print('Done!') 278 | print('=' * 70) 279 | 280 | """# (CHORDS PROGRESSIONS)""" 281 | 282 | # @title Generate chords progressions from custom MIDI chords 283 | 284 | #@markdown NOTE: You can stop the generation at any time to render partial results 285 | 286 | #@markdown Generation settings 287 | 288 | output_MIDI_patch_number = 0 # @param {type:"slider", min:0, max:127, step:1} 289 | number_of_chords_to_generate = 128 # @param {type:"slider", min:8, max:1020, step:1} 290 | max_number_of_notes_per_chord = 8 # @param {type:"slider", min:1, max:10, step:1} 291 | number_of_memory_tokens = 4096 # @param {type:"slider", min:32, max:8188, step:4} 292 | temperature = 0.9 # @param {type:"slider", min:0.1, max:1, step:0.05} 293 | 294 | #@markdown Other settings 295 | 296 | render_MIDI_to_audio = True # @param {type:"boolean"} 297 | 298 | #=============================================================================== 299 | 300 | def generate_chords(chords, 301 | max_chords_limit = 8, 302 | num_memory_tokens = 4096, 303 | temperature=0.9 304 | ): 305 | 306 | chords = chords[-num_memory_tokens:] 307 | 308 | x = torch.tensor([chords] * 1, dtype=torch.long, device='cuda') 309 | 310 | o = 0 311 | 312 | ncount = 0 313 | 314 | while o < 384 and ncount < max_chords_limit: 315 | with ctx: 316 | out = model.generate(x, 317 | 1, 318 | temperature=temperature, 319 | return_prime=False, 320 | verbose=False) 321 | 322 | o = out.tolist()[0][0] 323 | 324 | if 256 <= o < 384: 325 | ncount += 1 326 | 327 | if o < 384: 328 | x = torch.cat((x, out), 1) 329 | 330 | return x.tolist()[0][len(chords):] 331 | 332 | #=============================================================================== 333 | 334 | print('=' * 70) 335 | print('Chords Progressions Transformer Aux Model Generator') 336 | print('=' * 70) 337 | 338 | torch.cuda.empty_cache() 339 | 340 | x = torch.tensor([[321]] * 1, dtype=torch.long, device='cuda') 341 | 342 | out = aux_model.generate(x, 343 | number_of_chords_to_generate, 344 | temperature=temperature, 345 | return_prime=False, 346 | verbose=True) 347 | 348 | chords = [c+384 for c in out.tolist()[0]] 349 | #=============================================================================== 350 | 351 | print('=' * 70) 352 | print('Chords Progressions Transformer Primary Model Generator') 353 | print('=' * 70) 354 | 355 | torch.cuda.empty_cache() 356 | 357 | output = [] 358 | 359 | idx = 0 360 | 361 | for c in tqdm.tqdm(chords): 362 | 363 | try: 364 | 365 | output.append(c) 366 | 367 | out = generate_chords(output, 368 | temperature=temperature, 369 | max_chords_limit=max_number_of_notes_per_chord, 370 | num_memory_tokens=number_of_memory_tokens 371 | ) 372 | output.extend(out) 373 | 374 | idx += 1 375 | 376 | except KeyboardInterrupt: 377 | print('=' * 70) 378 | print('Stopping generation...') 379 | break 380 | 381 | except Exception as e: 382 | print('=' * 70) 383 | print('Error:', e) 384 | break 385 | 386 | torch.cuda.empty_cache() 387 | 388 | #=============================================================================== 389 | print('=' * 70) 390 | 391 | out1 = output 392 | 393 | print('Sample INTs', out1[:12]) 394 | print('=' * 70) 395 | 396 | if len(out) != 0: 397 | 398 | song = out1 399 | song_f = [] 400 | 401 | time = 0 402 | dur = 0 403 | vel = 90 404 | pitch = 0 405 | channel = 0 406 | 407 | patches = [0] * 16 408 | patches[0] = output_MIDI_patch_number 409 | 410 | for ss in song: 411 | 412 | if 0 <= ss < 128: 413 | 414 | time += ss * 32 415 | 416 | if 128 <= ss < 256: 417 | 418 | dur = (ss-128) * 32 419 | 420 | if 256 <= ss < 384: 421 | 422 | pitch = (ss-256) 423 | 424 | vel = max(40, pitch) 425 | 426 | song_f.append(['note', time, dur, channel, pitch, vel, output_MIDI_patch_number]) 427 | 428 | detailed_stats = TMIDIX.Tegridy_ms_SONG_to_MIDI_Converter(song_f, 429 | output_signature = 'Chords Progressions Transformer', 430 | output_file_name = '/content/Chords-Progressions-Transformer-Composition', 431 | track_name='Project Los Angeles', 432 | list_of_MIDI_patches=patches 433 | ) 434 | 435 | 436 | 437 | print('=' * 70) 438 | print('Displaying resulting composition...') 439 | print('=' * 70) 440 | 441 | fname = '/content/Chords-Progressions-Transformer-Composition' 442 | 443 | if render_MIDI_to_audio: 444 | midi_audio = midi_to_colab_audio(fname + '.mid') 445 | display(Audio(midi_audio, rate=16000, normalize=False)) 446 | 447 | TMIDIX.plot_ms_SONG(song_f, plot_title=fname) 448 | 449 | """# Congrats! You did it! :)""" -------------------------------------------------------------------------------- /chords_progressions_transformer_melody.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """Chords_Progressions_Transformer_Melody.ipynb 3 | 4 | Automatically generated by Colaboratory. 5 | 6 | Original file is located at 7 | https://colab.research.google.com/drive/13YJSFR0K6Wxa_mX5HCKADwEDns6X7HIS 8 | 9 | # Chords Progressions Transformer Melody (ver. 1.0) 10 | 11 | *** 12 | 13 | Powered by tegridy-tools: https://github.com/asigalov61/tegridy-tools 14 | 15 | *** 16 | 17 | WARNING: This complete implementation is a functioning model of the Artificial Intelligence. Please excercise great humility, care, and respect. https://www.nscai.gov/ 18 | 19 | *** 20 | 21 | #### Project Los Angeles 22 | 23 | #### Tegridy Code 2024 24 | 25 | *** 26 | 27 | # (GPU CHECK) 28 | """ 29 | 30 | #@title NVIDIA GPU check 31 | !nvidia-smi 32 | 33 | """# (SETUP ENVIRONMENT)""" 34 | 35 | #@title Install dependencies 36 | !git clone --depth 1 https://github.com/asigalov61/Chords-Progressions-Transformer 37 | !pip install huggingface_hub 38 | !pip install einops 39 | !pip install torch-summary 40 | !apt install fluidsynth #Pip does not work for some reason. Only apt works 41 | 42 | # Commented out IPython magic to ensure Python compatibility. 43 | #@title Import modules 44 | 45 | print('=' * 70) 46 | print('Loading core Chords Progressions Transformer modules...') 47 | 48 | import os 49 | import copy 50 | import pickle 51 | import secrets 52 | import statistics 53 | from time import time 54 | import tqdm 55 | 56 | print('=' * 70) 57 | print('Loading main Chords Progressions Transformer modules...') 58 | import torch 59 | 60 | # %cd /content/Chords-Progressions-Transformer 61 | 62 | import TMIDIX 63 | 64 | from midi_to_colab_audio import midi_to_colab_audio 65 | 66 | from x_transformer_1_23_2 import * 67 | 68 | import random 69 | 70 | # %cd /content/ 71 | print('=' * 70) 72 | print('Loading aux Chords Progressions Transformer modules...') 73 | 74 | import matplotlib.pyplot as plt 75 | 76 | from torchsummary import summary 77 | from sklearn import metrics 78 | 79 | from IPython.display import Audio, display 80 | 81 | from huggingface_hub import hf_hub_download 82 | 83 | from google.colab import files 84 | 85 | print('=' * 70) 86 | print('Done!') 87 | print('Enjoy! :)') 88 | print('=' * 70) 89 | 90 | """# (LOAD MODEL)""" 91 | 92 | #@title Load Chords Progressions Transformer Pre-Trained Model 93 | 94 | #@markdown Model precision option 95 | 96 | model_precision = "bfloat16" # @param ["bfloat16", "float16"] 97 | 98 | #@markdown bfloat16 == Half precision/faster speed (if supported, otherwise the model will default to float16) 99 | 100 | #@markdown float16 == Full precision/fast speed 101 | 102 | plot_tokens_embeddings = False # @param {type:"boolean"} 103 | 104 | print('=' * 70) 105 | print('Loading Chords Progressions Transformer Melody Pre-Trained Model...') 106 | print('Please wait...') 107 | print('=' * 70) 108 | 109 | full_path_to_models_dir = "/content/Chords-Progressions-Transformer/Models" 110 | 111 | dim = 1024 112 | depth = 4 113 | heads = 8 114 | 115 | model_checkpoint_file_name = 'Chords_Progressions_Transformer_Melody_Trained_Model_31061_steps_0.3114_loss_0.9002_acc.pth' 116 | model_path = full_path_to_models_dir+'/Melody/'+model_checkpoint_file_name 117 | if os.path.isfile(model_path): 118 | print('Model already exists...') 119 | 120 | else: 121 | hf_hub_download(repo_id='asigalov61/Chords-Progressions-Transformer', 122 | filename=model_checkpoint_file_name, 123 | local_dir='/content/Chords-Progressions-Transformer/Models/Melody', 124 | local_dir_use_symlinks=False) 125 | 126 | 127 | print('=' * 70) 128 | print('Instantiating model...') 129 | 130 | device_type = 'cuda' 131 | 132 | if model_precision == 'bfloat16' and torch.cuda.is_bf16_supported(): 133 | dtype = 'bfloat16' 134 | else: 135 | dtype = 'float16' 136 | 137 | if model_precision == 'float16': 138 | dtype = 'float16' 139 | 140 | ptdtype = {'bfloat16': torch.bfloat16, 'float16': torch.float16}[dtype] 141 | ctx = torch.amp.autocast(device_type=device_type, dtype=ptdtype) 142 | 143 | SEQ_LEN = 4096 # Models seq len 144 | PAD_IDX = 449 # Models pad index 145 | 146 | # instantiate the model 147 | 148 | model = TransformerWrapper( 149 | num_tokens = PAD_IDX+1, 150 | max_seq_len = SEQ_LEN, 151 | attn_layers = Decoder(dim = dim, depth = depth, heads = heads, attn_flash = True) 152 | ) 153 | 154 | model = AutoregressiveWrapper(model, ignore_index = PAD_IDX, pad_value=PAD_IDX) 155 | 156 | model.cuda() 157 | print('=' * 70) 158 | 159 | print('Loading model checkpoint...') 160 | 161 | model.load_state_dict(torch.load(model_path)) 162 | print('=' * 70) 163 | 164 | model.eval() 165 | 166 | print('Done!') 167 | print('=' * 70) 168 | 169 | print('Model will use', dtype, 'precision...') 170 | print('=' * 70) 171 | 172 | # Model stats 173 | print('Model summary...') 174 | summary(model) 175 | 176 | # Plot Token Embeddings 177 | if plot_tokens_embeddings: 178 | tok_emb = model.net.token_emb.emb.weight.detach().cpu().tolist() 179 | 180 | cos_sim = metrics.pairwise_distances( 181 | tok_emb, metric='cosine' 182 | ) 183 | plt.figure(figsize=(7, 7)) 184 | plt.imshow(cos_sim, cmap="inferno", interpolation="nearest") 185 | im_ratio = cos_sim.shape[0] / cos_sim.shape[1] 186 | plt.colorbar(fraction=0.046 * im_ratio, pad=0.04) 187 | plt.xlabel("Position") 188 | plt.ylabel("Position") 189 | plt.tight_layout() 190 | plt.plot() 191 | plt.savefig("/content/Chords-Progressions-Transformer-Tokens-Embeddings-Plot.png", bbox_inches="tight") 192 | 193 | """# (GENERATE)""" 194 | 195 | #@title Load Seed MIDI 196 | 197 | #@markdown Press play button to to upload your own seed MIDI or to load one of the provided sample seed MIDIs from the dropdown list below 198 | 199 | select_seed_MIDI = "Upload your own custom MIDI" # @param ["Upload your own custom MIDI", "Chords-Progressions-Transformer-Piano-Seed-1", "Chords-Progressions-Transformer-Piano-Seed-2", "Chords-Progressions-Transformer-Piano-Seed-3", "Chords-Progressions-Transformer-Piano-Seed-4", "Chords-Progressions-Transformer-Piano-Seed-5", "Chords-Progressions-Transformer-Piano-Seed-6", "Chords-Progressions-Transformer-MI-Seed-1", "Chords-Progressions-Transformer-MI-Seed-2", "Chords-Progressions-Transformer-MI-Seed-3", "Chords-Progressions-Transformer-MI-Seed-4", "Chords-Progressions-Transformer-MI-Seed-5", "Chords-Progressions-Transformer-MI-Seed-6"] 200 | render_MIDI_to_audio = False # @param {type:"boolean"} 201 | 202 | print('=' * 70) 203 | print('Chords Progressions Transformer Seed MIDI Loader') 204 | print('=' * 70) 205 | 206 | f = '' 207 | 208 | if select_seed_MIDI != "Upload your own custom MIDI": 209 | print('Loading seed MIDI...') 210 | f = '/content/Chords-Progressions-Transformer/Seeds/'+select_seed_MIDI+'.mid' 211 | 212 | else: 213 | print('Upload your own custom MIDI...') 214 | print('=' * 70) 215 | uploaded_MIDI = files.upload() 216 | if list(uploaded_MIDI.keys()): 217 | f = list(uploaded_MIDI.keys())[0] 218 | 219 | if f != '': 220 | 221 | print('=' * 70) 222 | print('File:', f) 223 | print('=' * 70) 224 | 225 | #======================================================= 226 | # START PROCESSING 227 | 228 | raw_score = TMIDIX.midi2single_track_ms_score(open(f, 'rb').read()) 229 | 230 | raw_escore = TMIDIX.advanced_score_processor(raw_score, return_enhanced_score_notes=True)[0] 231 | 232 | raw_escore = [e for e in raw_escore if e[3] != 9] 233 | 234 | escore = TMIDIX.augment_enhanced_score_notes(raw_escore) 235 | 236 | cscore = TMIDIX.chordify_score([1000, escore]) 237 | 238 | chords_tokens = [] 239 | cho_toks = [] 240 | 241 | for c in cscore: 242 | tones_chord = sorted(set([t[4] % 12 for t in c])) 243 | 244 | try: 245 | chord_token = TMIDIX.ALL_CHORDS_SORTED.index(tones_chord) 246 | except: 247 | chord_token = TMIDIX.ALL_CHORDS_SORTED.index(TMIDIX.check_and_fix_tones_chord(tones_chord)) 248 | 249 | cho_toks.append(chord_token+128) 250 | 251 | if cho_toks: 252 | if len(cho_toks) > 1: 253 | 254 | chords_tokens.append(cho_toks) 255 | cho_toks = [cho_toks[-1]] 256 | 257 | cho_toks = cho_toks + cho_toks 258 | 259 | chords_tokens.append(cho_toks) 260 | #======================================================= 261 | 262 | song = raw_escore 263 | song_f = [] 264 | 265 | time = 0 266 | dur = 0 267 | vel = 90 268 | pitch = 0 269 | channel = 0 270 | 271 | patches = [0] * 16 272 | 273 | channel = 0 274 | 275 | for ss in song: 276 | 277 | time = ss[1] 278 | 279 | dur = ss[2] 280 | 281 | pitch = ss[4] 282 | 283 | vel = ss[5] 284 | 285 | song_f.append(['note', time, dur, channel, pitch, vel, 0]) 286 | 287 | detailed_stats = TMIDIX.Tegridy_ms_SONG_to_MIDI_Converter(song_f, 288 | output_signature = 'Chords Progressions Transformer', 289 | output_file_name = '/content/Chords-Progressions-Transformer-Seed-Composition', 290 | track_name='Project Los Angeles', 291 | list_of_MIDI_patches=patches 292 | ) 293 | 294 | #======================================================= 295 | 296 | print('=' * 70) 297 | print('Composition stats:') 298 | print('Composition has', len(cscore), 'chords') 299 | print('Composition has', len(chords_tokens), 'chords tokens') 300 | print('=' * 70) 301 | 302 | print('Displaying resulting composition...') 303 | print('=' * 70) 304 | 305 | fname = '/content/Chords-Progressions-Transformer-Seed-Composition' 306 | 307 | if render_MIDI_to_audio: 308 | midi_audio = midi_to_colab_audio(fname + '.mid') 309 | display(Audio(midi_audio, rate=16000, normalize=False)) 310 | 311 | TMIDIX.plot_ms_SONG(song_f, plot_title=fname) 312 | 313 | else: 314 | print('=' * 70) 315 | 316 | # @title Generate chords progressions melody from custom MIDI chords 317 | 318 | #@markdown NOTE: You can stop the generation at any time to render partial results 319 | 320 | #@markdown Generation settings 321 | 322 | melody_MIDI_patch_number = 40 # @param {type:"slider", min:0, max:127, step:1} 323 | chords_MIDI_patch_number = 0 # @param {type:"slider", min:0, max:127, step:1} 324 | chords_duration = 32 # @param {type:"slider", min:4, max:128, step:4} 325 | number_of_chords_to_generate_melody_for = 128 # @param {type:"slider", min:8, max:4096, step:1} 326 | max_number_of_melody_notes_per_chord = 4 # @param {type:"slider", min:1, max:10, step:1} 327 | number_of_memory_tokens = 4096 # @param {type:"slider", min:32, max:8188, step:4} 328 | temperature = 0.9 # @param {type:"slider", min:0.1, max:1, step:0.05} 329 | 330 | #@markdown Other settings 331 | 332 | render_MIDI_to_audio = True # @param {type:"boolean"} 333 | 334 | #=============================================================================== 335 | 336 | print('=' * 70) 337 | print('Chords Progressions Transformer Melody Model Generator') 338 | print('=' * 70) 339 | 340 | torch.cuda.empty_cache() 341 | 342 | output = [] 343 | 344 | for i in tqdm.tqdm(range(len(chords_tokens[:number_of_chords_to_generate_melody_for]))): 345 | try: 346 | 347 | output.extend(chords_tokens[i]) 348 | 349 | o = 0 350 | 351 | count = 0 352 | 353 | while o < 128 and count < max_number_of_melody_notes_per_chord: 354 | 355 | x = torch.LongTensor([[output]]).cuda() 356 | 357 | with ctx: 358 | out = model.generate(x[-number_of_memory_tokens:], 359 | 1, 360 | temperature=temperature, 361 | return_prime=False, 362 | verbose=False) 363 | 364 | o = out.tolist()[0][0] 365 | 366 | if o < 128: 367 | output.append(o) 368 | count += 1 369 | 370 | except KeyboardInterrupt: 371 | print('=' * 70) 372 | print('Stopping generation...') 373 | break 374 | 375 | except Exception as e: 376 | print('=' * 70) 377 | print('Error:', e) 378 | break 379 | 380 | torch.cuda.empty_cache() 381 | 382 | #=============================================================================== 383 | print('=' * 70) 384 | 385 | out1 = output 386 | 387 | print('Sample INTs', out1[:12]) 388 | print('=' * 70) 389 | 390 | patches = [0] * 16 391 | 392 | patches[3] = melody_MIDI_patch_number 393 | patches[0] = chords_MIDI_patch_number 394 | 395 | if len(output) != 0: 396 | 397 | song = output 398 | song_f = [] 399 | 400 | time = 0 401 | dur = 10 402 | vel = 90 403 | pitch = 0 404 | channel = 0 405 | 406 | song1 = [] 407 | ptc = [] 408 | cho = [] 409 | 410 | for s in song: 411 | if s < 128: 412 | ptc.append(s) 413 | else: 414 | if ptc: 415 | grp = [cho, ptc] 416 | song1.append(grp) 417 | cho = [] 418 | ptc = [] 419 | 420 | cho.append(s) 421 | 422 | for ss in song1: 423 | 424 | tones_chord = TMIDIX.ALL_CHORDS_SORTED[(ss[0][0]-128)] 425 | 426 | dur = chords_duration 427 | 428 | for t in tones_chord: 429 | song_f.append(['note', time * 16, dur * 16, 0, 60+t, vel, chords_MIDI_patch_number]) 430 | song_f.append(['note', time * 16, dur * 16, 0, 48+t, vel, chords_MIDI_patch_number]) 431 | 432 | ptc_count = len(ss[1]) 433 | ptc_time_dur = dur // ptc_count 434 | 435 | for p in ss[1]: 436 | song_f.append(['note', time * 16, ptc_time_dur * 16, 3, p, vel, melody_MIDI_patch_number]) 437 | time += ptc_time_dur 438 | 439 | detailed_stats = TMIDIX.Tegridy_ms_SONG_to_MIDI_Converter(song_f, 440 | output_signature = 'Chords Progressions Transformer', 441 | output_file_name = '/content/Chords-Progressions-Transformer-Composition', 442 | track_name='Project Los Angeles', 443 | list_of_MIDI_patches=patches 444 | ) 445 | 446 | 447 | 448 | print('=' * 70) 449 | print('Displaying resulting composition...') 450 | print('=' * 70) 451 | 452 | fname = '/content/Chords-Progressions-Transformer-Composition' 453 | 454 | if render_MIDI_to_audio: 455 | midi_audio = midi_to_colab_audio(fname + '.mid') 456 | display(Audio(midi_audio, rate=16000, normalize=False)) 457 | 458 | TMIDIX.plot_ms_SONG(song_f, plot_title=fname) 459 | 460 | """# Congrats! You did it! :)""" -------------------------------------------------------------------------------- /Training-Code/chords_progressions_transformer_maker.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """Chords_Progressions_Transformer_Maker.ipynb 3 | 4 | Automatically generated by Colaboratory. 5 | 6 | Original file is located at 7 | https://colab.research.google.com/drive/1-PEf2UKSrRP8BuQh6OvHvwceXM1eZpib 8 | 9 | # Chords Progressions Transformer Maker (ver. 1.0) 10 | 11 | *** 12 | 13 | Powered by tegridy-tools: https://github.com/asigalov61/tegridy-tools 14 | 15 | *** 16 | 17 | WARNING: This complete implementation is a functioning model of the Artificial Intelligence. Please excercise great humility, care, and respect. https://www.nscai.gov/ 18 | 19 | *** 20 | 21 | #### Project Los Angeles 22 | 23 | #### Tegridy Code 2024 24 | 25 | *** 26 | 27 | # GPU check 28 | """ 29 | 30 | !nvidia-smi 31 | 32 | """# Setup environment""" 33 | 34 | !git clone --depth 1 https://github.com/asigalov61/tegridy-tools 35 | 36 | !pip install einops 37 | !pip install torch-summary 38 | 39 | # Commented out IPython magic to ensure Python compatibility. 40 | # Load modules and make data dir 41 | 42 | print('Loading modules...') 43 | 44 | import os 45 | import pickle 46 | import random 47 | import secrets 48 | import tqdm 49 | import math 50 | import torch 51 | import torch.optim as optim 52 | from torch.utils.data import DataLoader, Dataset 53 | 54 | import matplotlib.pyplot as plt 55 | 56 | from torchsummary import summary 57 | from sklearn import metrics 58 | 59 | # %cd /content/tegridy-tools/tegridy-tools/ 60 | 61 | import TMIDIX 62 | 63 | # %cd /content/tegridy-tools/tegridy-tools/X-Transformer 64 | 65 | from x_transformer_1_23_2 import * 66 | 67 | torch.set_float32_matmul_precision('high') 68 | torch.backends.cuda.matmul.allow_tf32 = True # allow tf32 on matmul 69 | torch.backends.cudnn.allow_tf32 = True # allow tf32 on cudnn 70 | 71 | # %cd /content/ 72 | 73 | if not os.path.exists('/content/INTS'): 74 | os.makedirs('/content/INTS') 75 | 76 | import random 77 | 78 | print('Done') 79 | 80 | print('Torch version:', torch.__version__) 81 | 82 | """# Load training data""" 83 | 84 | dataset_addr = "/content/INTS" 85 | 86 | #========================================================================== 87 | 88 | filez = list() 89 | for (dirpath, dirnames, filenames) in os.walk(dataset_addr): 90 | filez += [os.path.join(dirpath, file) for file in filenames] 91 | print('=' * 70) 92 | 93 | random.shuffle(filez) 94 | 95 | print('Loaded', len(filez), 'data files') 96 | print('=' * 70) 97 | 98 | """# Setup model""" 99 | 100 | # Setup model 101 | 102 | # constants 103 | 104 | NUM_DATA_FILES_TO_LOAD_PER_ITER = 160 105 | 106 | SEQ_LEN = 8192 # Models seq len 107 | PAD_IDX = 707 # Models pad index 108 | 109 | NUM_EPOCHS = 20 110 | 111 | BATCH_SIZE = 4 112 | GRADIENT_ACCUMULATE_EVERY = 4 113 | 114 | LEARNING_RATE = 1e-4 115 | 116 | VALIDATE_EVERY = 100 117 | SAVE_EVERY = 500 118 | GENERATE_EVERY = 250 119 | GENERATE_LENGTH = 512 120 | PRINT_STATS_EVERY = 20 121 | 122 | # helpers 123 | 124 | def cycle(loader): 125 | while True: 126 | for data in loader: 127 | yield data 128 | 129 | # instantiate the model 130 | 131 | model = TransformerWrapper( 132 | num_tokens = PAD_IDX+1, 133 | max_seq_len = SEQ_LEN, 134 | attn_layers = Decoder(dim = 1024, depth = 8, heads = 8, attn_flash = True) 135 | ) 136 | 137 | model = AutoregressiveWrapper(model, ignore_index = PAD_IDX) 138 | 139 | # model = torch.nn.DataParallel(model) 140 | 141 | model.cuda() 142 | 143 | print('Done!') 144 | 145 | summary(model) 146 | 147 | # Dataloader 148 | 149 | class MusicDataset(Dataset): 150 | def __init__(self, data, seq_len): 151 | super().__init__() 152 | self.data = data 153 | self.seq_len = seq_len 154 | 155 | def __getitem__(self, index): 156 | 157 | # consequtive sampling 158 | 159 | full_seq = torch.Tensor(self.data[index][:self.seq_len+1]).long() 160 | 161 | return full_seq.cuda() 162 | 163 | def __len__(self): 164 | return (len(self.data) // BATCH_SIZE) * BATCH_SIZE 165 | 166 | # precision/optimizer/scaler 167 | 168 | dtype = torch.float16 169 | 170 | ctx = torch.amp.autocast(device_type='cuda', dtype=dtype) 171 | 172 | optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE) 173 | 174 | scaler = torch.cuda.amp.GradScaler() 175 | 176 | CHUNKS_LENGTH = 8193 177 | 178 | train_data = [] 179 | 180 | chunks_counter = 0 181 | discarted_chunks_counter = 1 182 | 183 | for lfa in tqdm.tqdm(filez): 184 | 185 | train_d = pickle.load(open(lfa, 'rb')) 186 | random.shuffle(train_d) 187 | for t1 in train_d: 188 | for t in t1: 189 | 190 | if t: 191 | 192 | #========================================================================= 193 | # collecting all possible chunks of chunks length 194 | 195 | if 0 <= max(t) < PAD_IDX: # final data integrity check 196 | if len(t) == CHUNKS_LENGTH: 197 | train_data.append(t) 198 | 199 | else: 200 | 201 | td = t + [PAD_IDX] * (CHUNKS_LENGTH-len(t)) # padding with pad index 202 | td = td[:CHUNKS_LENGTH] 203 | train_data.append(td) 204 | 205 | 206 | chunks_counter += 1 207 | 208 | else: 209 | print('Bad data!!!') 210 | print(t) 211 | print(max(t), min(t)) 212 | continue 213 | 214 | else: 215 | print('empty') 216 | 217 | #========================================================================== 218 | 219 | print('Done!') 220 | print('=' * 70) 221 | print('Total number of imput chunks:', chunks_counter) 222 | print('Total number of good chunks:', len(train_data)) 223 | print('All data is good:', len(max(train_data, key=len)) == len(min(train_data, key=len))) 224 | print('=' * 70) 225 | print('Final data randomization...') 226 | random.shuffle(train_data) 227 | print('Done!') 228 | print('=' * 70) 229 | 230 | """# Train""" 231 | 232 | # @title Train the model 233 | 234 | train_losses = [] 235 | val_losses = [] 236 | 237 | train_accs = [] 238 | val_accs = [] 239 | 240 | nsteps = 0 241 | 242 | for ep in range(NUM_EPOCHS): 243 | 244 | print('=' * 70) 245 | print('Epoch #', ep) 246 | print('=' * 70) 247 | 248 | random.shuffle(train_data) 249 | 250 | train_dataset = MusicDataset(train_data, SEQ_LEN) 251 | val_dataset = MusicDataset(train_data, SEQ_LEN) 252 | train_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE)) 253 | val_loader = cycle(DataLoader(val_dataset, batch_size = BATCH_SIZE)) 254 | 255 | NUM_BATCHES = len(train_data) // BATCH_SIZE // GRADIENT_ACCUMULATE_EVERY 256 | 257 | for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='Training'): 258 | model.train() 259 | 260 | for __ in range(GRADIENT_ACCUMULATE_EVERY): 261 | with ctx: 262 | loss, acc = model(next(train_loader)) 263 | loss = loss / GRADIENT_ACCUMULATE_EVERY 264 | scaler.scale(loss).backward(torch.ones(loss.shape).cuda()) 265 | 266 | if i % PRINT_STATS_EVERY == 0: 267 | print(f'Training loss: {loss.mean().item() * GRADIENT_ACCUMULATE_EVERY}') 268 | print(f'Training acc: {acc.mean().item()}') 269 | 270 | train_losses.append(loss.mean().item() * GRADIENT_ACCUMULATE_EVERY) 271 | train_accs.append(acc.mean().item()) 272 | 273 | scaler.unscale_(optim) 274 | torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0) 275 | scaler.step(optim) 276 | scaler.update() 277 | optim.zero_grad(set_to_none=True) 278 | 279 | nsteps += 1 280 | 281 | if i % VALIDATE_EVERY == 0: 282 | model.eval() 283 | with torch.no_grad(): 284 | with ctx: 285 | val_loss, val_acc = model(next(val_loader)) 286 | 287 | print(f'Validation loss: {val_loss.mean().item()}') 288 | print(f'Validation acc: {val_acc.mean().item()}') 289 | 290 | val_losses.append(val_loss.mean().item()) 291 | val_accs.append(val_acc.mean().item()) 292 | 293 | print('Plotting training loss graph...') 294 | 295 | tr_loss_list = train_losses 296 | plt.plot([i for i in range(len(tr_loss_list))] ,tr_loss_list, 'b') 297 | plt.show() 298 | plt.close() 299 | print('Done!') 300 | 301 | print('Plotting training acc graph...') 302 | 303 | tr_loss_list = train_accs 304 | plt.plot([i for i in range(len(tr_loss_list))] ,tr_loss_list, 'b') 305 | plt.show() 306 | plt.close() 307 | print('Done!') 308 | 309 | print('Plotting validation loss graph...') 310 | tr_loss_list = val_losses 311 | plt.plot([i for i in range(len(tr_loss_list))] ,tr_loss_list, 'b') 312 | plt.show() 313 | plt.close() 314 | print('Done!') 315 | 316 | print('Plotting validation acc graph...') 317 | tr_loss_list = val_accs 318 | plt.plot([i for i in range(len(tr_loss_list))] ,tr_loss_list, 'b') 319 | plt.show() 320 | plt.close() 321 | print('Done!') 322 | 323 | if i % GENERATE_EVERY == 0: 324 | model.eval() 325 | 326 | inp = random.choice(val_dataset)[:512] 327 | 328 | print(inp) 329 | 330 | with ctx: 331 | 332 | sample = model.generate(inp[None, ...], GENERATE_LENGTH) 333 | 334 | print(sample) 335 | 336 | if i % SAVE_EVERY == 0: 337 | 338 | print('Saving model progress. Please wait...') 339 | print('model_checkpoint_' + str(nsteps) + '_steps_' + str(round(float(train_losses[-1]), 4)) + '_loss_' + str(round(float(train_accs[-1]), 4)) + '_acc.pth') 340 | 341 | fname = '/content/model_checkpoint_' + str(nsteps) + '_steps_' + str(round(float(train_losses[-1]), 4)) + '_loss_' + str(round(float(train_accs[-1]), 4)) + '_acc.pth' 342 | 343 | torch.save(model.state_dict(), fname) 344 | 345 | data = [train_losses, train_accs, val_losses, val_accs] 346 | 347 | TMIDIX.Tegridy_Any_Pickle_File_Writer(data, '/content/losses_accs') 348 | 349 | print('Done!') 350 | 351 | #====================================================================================================== 352 | 353 | print('Saving model progress. Please wait...') 354 | print('model_checkpoint_' + str(nsteps) + '_steps_' + str(round(float(train_losses[-1]), 4)) + '_loss_' + str(round(float(train_accs[-1]), 4)) + '_acc.pth') 355 | 356 | fname = '/content/model_checkpoint_' + str(nsteps) + '_steps_' + str(round(float(train_losses[-1]), 4)) + '_loss_' + str(round(float(train_accs[-1]), 4)) + '_acc.pth' 357 | 358 | torch.save(model.state_dict(), fname) 359 | 360 | print('Done!') 361 | 362 | data = [train_losses, train_accs, val_losses, val_accs] 363 | 364 | TMIDIX.Tegridy_Any_Pickle_File_Writer(data, '/content/losses_accuracies') 365 | 366 | # Save training loss graph 367 | 368 | plt.plot([i for i in range(len(train_losses))] ,train_losses, 'b') 369 | plt.savefig('/content/training_loss_graph.png') 370 | plt.close() 371 | print('Done!') 372 | 373 | # Save training acc graph 374 | 375 | plt.plot([i for i in range(len(train_accs))] ,train_accs, 'b') 376 | plt.savefig('/content/training_acc_graph.png') 377 | plt.close() 378 | print('Done!') 379 | 380 | # Save validation loss graph 381 | 382 | plt.plot([i for i in range(len(val_losses))] ,val_losses, 'b') 383 | plt.savefig('/content/validation_loss_graph.png') 384 | plt.close() 385 | print('Done!') 386 | 387 | # Save validation acc graph 388 | 389 | plt.plot([i for i in range(len(val_accs))] ,val_accs, 'b') 390 | plt.savefig('/content/validation_acc_graph.png') 391 | plt.close() 392 | print('Done!') 393 | 394 | """# Final Save""" 395 | 396 | print('Saving model progress. Please wait...') 397 | print('model_checkpoint_' + str(nsteps) + '_steps_' + str(round(float(train_losses[-1]), 4)) + '_loss_' + str(round(float(train_accs[-1]), 4)) + '_acc.pth') 398 | 399 | fname = '/content/model_checkpoint_' + str(nsteps) + '_steps_' + str(round(float(train_losses[-1]), 4)) + '_loss_' + str(round(float(train_accs[-1]), 4)) + '_acc.pth' 400 | 401 | torch.save(model.state_dict(), fname) 402 | 403 | print('Done!') 404 | 405 | data = [train_losses, train_accs, val_losses, val_accs] 406 | 407 | TMIDIX.Tegridy_Any_Pickle_File_Writer(data, '/content/losses_accuracies') 408 | 409 | train_losses, train_accs, val_losses, val_accs = TMIDIX.Tegridy_Any_Pickle_File_Reader('/content/losses_accuracies') 410 | 411 | # Save training loss graph 412 | 413 | plt.plot([i for i in range(len(train_losses))] ,train_losses, 'b') 414 | plt.savefig('/content/training_loss_graph.png') 415 | plt.close() 416 | print('Done!') 417 | 418 | # Save training acc graph 419 | 420 | plt.plot([i for i in range(len(train_accs))] ,train_accs, 'b') 421 | plt.savefig('/content/training_acc_graph.png') 422 | plt.close() 423 | print('Done!') 424 | 425 | # Save validation loss graph 426 | 427 | plt.plot([i for i in range(len(val_losses))] ,val_losses, 'b') 428 | plt.savefig('/content/validation_loss_graph.png') 429 | plt.close() 430 | print('Done!') 431 | 432 | # Save validation acc graph 433 | 434 | plt.plot([i for i in range(len(val_accs))] ,val_accs, 'b') 435 | plt.savefig('/content/validation_acc_graph.png') 436 | plt.close() 437 | print('Done!') 438 | 439 | """# Eval""" 440 | 441 | model.load_state_dict(torch.load('/content/model_checkpoint_8134_steps_0.3745_loss_0.8736_acc.pth')) 442 | model.eval() 443 | 444 | model.eval() 445 | 446 | x = torch.tensor(random.choice(train_data)[:256], dtype=torch.long, device='cuda') 447 | x = torch.tensor([[0]] * 1, dtype=torch.long, device='cuda') 448 | 449 | # run generation 450 | 451 | with ctx: 452 | out = model.generate(x, 453 | 1024, 454 | temperature=0.9, 455 | return_prime=True, 456 | verbose=True) 457 | 458 | y = out.tolist() 459 | 460 | print('---------------') 461 | 462 | #@title Test INTs 463 | 464 | train_data1 = y[0] # batch number goes here 465 | 466 | print('Sample INTs', train_data1[:15]) 467 | 468 | out = train_data1 469 | 470 | if len(out) != 0: 471 | 472 | song = out 473 | song_f = [] 474 | 475 | time = 0 476 | dur = 0 477 | vel = 90 478 | pitch = 0 479 | channel = 0 480 | 481 | patches = [0] * 16 482 | 483 | for ss in song: 484 | 485 | if 0 <= ss < 128: 486 | 487 | time += ss 488 | 489 | if 128 <= ss < 256: 490 | 491 | dur = (ss-128) 492 | 493 | if 256 <= ss < 384: 494 | 495 | pitch = (ss-256) 496 | 497 | song_f.append(['note', time, dur, channel, pitch, vel ]) 498 | 499 | detailed_stats = TMIDIX.Tegridy_ms_SONG_to_MIDI_Converter(song_f, 500 | output_signature = 'Chords Progressions Transformer', 501 | output_file_name = '/content/Chords-Progressions-Transformer-Composition', 502 | track_name='Project Los Angeles', 503 | list_of_MIDI_patches=patches, 504 | timings_multiplier=32 505 | ) 506 | 507 | print('Done!') 508 | 509 | tok_emb = model.net.token_emb.emb.weight.detach().cpu().tolist() 510 | 511 | cos_sim = metrics.pairwise_distances( 512 | tok_emb, metric='cosine' 513 | ) 514 | plt.figure(figsize=(7, 7)) 515 | plt.imshow(cos_sim, cmap="inferno", interpolation="nearest") 516 | im_ratio = cos_sim.shape[0] / cos_sim.shape[1] 517 | plt.colorbar(fraction=0.046 * im_ratio, pad=0.04) 518 | plt.xlabel("Position") 519 | plt.ylabel("Position") 520 | plt.tight_layout() 521 | plt.plot() 522 | plt.savefig("/content/Chords-Progressions-Transformer-Tokens-Embeddings-Plot.png", bbox_inches="tight") 523 | 524 | """# Congrats! You did it! :)""" -------------------------------------------------------------------------------- /Training-Code/chords_progressions_transformer_aux_maker.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """Chords_Progressions_Transformer_Aux_Maker.ipynb 3 | 4 | Automatically generated by Colaboratory. 5 | 6 | Original file is located at 7 | https://colab.research.google.com/drive/1WOkuW3Kq4Xp2BNiaF7FNMqghuuz5mH8x 8 | 9 | # Chords Progressions Transformer Aux Maker (ver. 1.0) 10 | 11 | *** 12 | 13 | Powered by tegridy-tools: https://github.com/asigalov61/tegridy-tools 14 | 15 | *** 16 | 17 | WARNING: This complete implementation is a functioning model of the Artificial Intelligence. Please excercise great humility, care, and respect. https://www.nscai.gov/ 18 | 19 | *** 20 | 21 | #### Project Los Angeles 22 | 23 | #### Tegridy Code 2024 24 | 25 | *** 26 | 27 | # (SETUP ENVIRONMENT) 28 | """ 29 | 30 | # @title NVIDIA GPU Check 31 | !nvidia-smi 32 | 33 | #@title Install all dependencies (run only once per session) 34 | !git clone --depth 1 https://github.com/asigalov61/tegridy-tools 35 | !pip install einops 36 | !pip install torch-summary 37 | 38 | # Commented out IPython magic to ensure Python compatibility. 39 | #@title Import all needed modules 40 | 41 | print('Loading modules...') 42 | 43 | import os 44 | import pickle 45 | import secrets 46 | import statistics 47 | import tqdm 48 | import math 49 | import copy 50 | import torch 51 | import torch.optim as optim 52 | from torch.utils.data import DataLoader, Dataset 53 | 54 | import matplotlib.pyplot as plt 55 | 56 | from torchsummary import summary 57 | from sklearn import metrics 58 | 59 | # %cd /content/tegridy-tools/tegridy-tools/ 60 | 61 | import TMIDIX 62 | 63 | # %cd /content/tegridy-tools/tegridy-tools/X-Transformer 64 | 65 | from x_transformer_1_23_2 import * 66 | 67 | torch.set_float32_matmul_precision('high') 68 | torch.backends.cuda.matmul.allow_tf32 = True # allow tf32 on matmul 69 | torch.backends.cudnn.allow_tf32 = True # allow tf32 on cudnn 70 | 71 | # %cd /content/ 72 | 73 | if not os.path.exists('/content/Dataset'): 74 | os.makedirs('/content/Dataset') 75 | 76 | if not os.path.exists('/content/INTS'): 77 | os.makedirs('/content/INTS') 78 | 79 | import random 80 | 81 | from joblib import Parallel, delayed, parallel_config 82 | 83 | print('PyTorch version:', torch.__version__) 84 | print('Done') 85 | 86 | """# (DOWNLOAD AND UNZIP MIDI DATASET)""" 87 | 88 | # Commented out IPython magic to ensure Python compatibility. 89 | # @title Download and unzip Beautiful Music Seeds MIDI Dataset 90 | # %cd /content/Dataset 91 | !wget https://github.com/asigalov61/Tegridy-MIDI-Dataset/raw/master/Beautiful-Music-Seeds-CC-BY-NC-SA.zip 92 | !unzip Beautiful-Music-Seeds-CC-BY-NC-SA.zip 93 | !rm Beautiful-Music-Seeds-CC-BY-NC-SA.zip 94 | # %cd /content/ 95 | 96 | """# (MIDI PROCESSOR)""" 97 | 98 | #@title Load TMIDIX MIDI Processor 99 | 100 | print('=' * 70) 101 | print('Loading TMIDIX MIDI Processor...') 102 | print('=' * 70) 103 | 104 | def TMIDIX_MIDI_Processor(midi_file): 105 | 106 | melody_chords = [] 107 | 108 | try: 109 | 110 | fn = os.path.basename(midi_file) 111 | 112 | #======================================================= 113 | # START PROCESSING 114 | 115 | raw_score = TMIDIX.midi2single_track_ms_score(midi_file) 116 | 117 | escore_notes = TMIDIX.advanced_score_processor(raw_score, return_enhanced_score_notes=True)[0] 118 | 119 | escore_notes = TMIDIX.augment_enhanced_score_notes(escore_notes) 120 | 121 | all_scores = [] 122 | 123 | for pa in range(-6, 6): 124 | 125 | escore_paug = copy.deepcopy(escore_notes) 126 | 127 | for e in escore_paug: 128 | e[4] += pa 129 | 130 | cscore = TMIDIX.chordify_score([1000, escore_paug]) 131 | 132 | chords = [321] 133 | 134 | for c in cscore: 135 | 136 | tones_chord = sorted(set([cc[4] % 12 for cc in c])) 137 | 138 | if tones_chord: 139 | 140 | try: 141 | chord_tok = TMIDIX.ALL_CHORDS_SORTED.index(tones_chord) 142 | chords.append(chord_tok) 143 | except: 144 | continue 145 | 146 | if chords: 147 | all_scores.append(chords) 148 | 149 | return all_scores 150 | 151 | except Exception as e: 152 | print('Error!') 153 | print('Exception', e) 154 | return None 155 | 156 | print('Done!') 157 | print('=' * 70) 158 | 159 | """# (FILE LIST)""" 160 | 161 | #@title Save file list 162 | ########### 163 | 164 | print('=' * 70) 165 | print('Loading MIDI files...') 166 | print('This may take a while on a large dataset in particular.') 167 | 168 | dataset_addr = "/content/Dataset" 169 | 170 | # os.chdir(dataset_addr) 171 | filez = list() 172 | for (dirpath, dirnames, filenames) in os.walk(dataset_addr): 173 | filez += [os.path.join(dirpath, file) for file in filenames] 174 | print('=' * 70) 175 | 176 | if not filez: 177 | print('Could not find any MIDI files. Please check Dataset dir...') 178 | print('=' * 70) 179 | 180 | else: 181 | print('Randomizing file list...') 182 | random.shuffle(filez) 183 | print('Done!') 184 | print('=' * 70) 185 | print('Total files:', len(filez)) 186 | print('=' * 70) 187 | 188 | """# (PROCESS MIDIs)""" 189 | 190 | #@title Process MIDIs with TMIDIX MIDI processor 191 | 192 | print('=' * 70) 193 | print('TMIDIX MIDI Processor') 194 | print('=' * 70) 195 | print('Starting up...') 196 | print('=' * 70) 197 | 198 | ########### 199 | 200 | melody_chords_f = [] 201 | 202 | print('Processing MIDI files. Please wait...') 203 | print('=' * 70) 204 | 205 | for i in tqdm.tqdm(range(0, len(filez), 16)): 206 | 207 | with parallel_config(backend='threading', n_jobs=16, verbose = 0): 208 | 209 | output = Parallel()(delayed(TMIDIX_MIDI_Processor)(f) for f in filez[i:i+16]) 210 | 211 | for o in output: 212 | 213 | if o is not None: 214 | melody_chords_f.append(o) 215 | 216 | print('Done!') 217 | print('=' * 70) 218 | 219 | """# (SAVE/LOAD PROCESSED MIDIs)""" 220 | 221 | # @title Save processed MIDIs 222 | TMIDIX.Tegridy_Any_Pickle_File_Writer(melody_chords_f, '/content/Processed_MIDIs') 223 | 224 | # @title Load processed MIDIs 225 | melody_chords_f = TMIDIX.Tegridy_Any_Pickle_File_Reader('/content/Processed_MIDIs') 226 | print('Done!') 227 | 228 | """# (TEST PROCESSED MIDIs)""" 229 | 230 | #@title Test Processed MIDIs 231 | 232 | train_data1 = random.choice(melody_chords_f)[6] 233 | 234 | #train_data1 = max(melody_chords_f, key = len) 235 | 236 | print('Sample data:', train_data1[:15]) 237 | 238 | out = train_data1 239 | 240 | patches = [0] * 16 241 | patches[3] = 40 242 | 243 | if len(out) != 0: 244 | 245 | song = out 246 | song_f = [] 247 | 248 | time = 0 249 | dur = 0 250 | vel = 90 251 | pitch = 0 252 | channel = 0 253 | 254 | for ss in song[1:]: 255 | 256 | chord = TMIDIX.ALL_CHORDS_SORTED[ss] 257 | 258 | time += 200 259 | dur = 200 260 | 261 | for c in chord: 262 | 263 | ptc1 = 48+c 264 | ptc2 = 60+c 265 | 266 | song_f.append(['note', time, dur, channel, ptc1, vel ]) 267 | song_f.append(['note', time, dur, channel, ptc2, vel ]) 268 | 269 | detailed_stats = TMIDIX.Tegridy_ms_SONG_to_MIDI_Converter(song_f, 270 | output_signature = 'Chords Progressions Transformer', 271 | output_file_name = '/content/Chords-Progressions-Transformer-Composition', 272 | track_name='Project Los Angeles', 273 | ) 274 | 275 | print('Done!') 276 | 277 | """# (PREP INTs)""" 278 | 279 | # @title Convert processed MIDIs to INTs 280 | SEQ_LEN = 1024 281 | PAD_IDX = 322 282 | 283 | print('=' * 70) 284 | 285 | train_data = [] 286 | 287 | for m in tqdm.tqdm(melody_chords_f): 288 | for dat in m: 289 | 290 | dat = dat[:SEQ_LEN+1] 291 | dat += [PAD_IDX] * ((SEQ_LEN+1) - len(dat)) 292 | 293 | train_data.append(dat) 294 | 295 | # Total dict size 644 296 | 297 | random.shuffle(train_data) 298 | 299 | print('Done!') 300 | print('=' * 70) 301 | print(len(train_data), max(train_data, key=len) == min(train_data, key=len)) 302 | print('=' * 70) 303 | print(len(max(train_data, key=len)), len(min(train_data, key=len))) 304 | print('=' * 70) 305 | print(train_data[0][:15]) 306 | print('=' * 70) 307 | 308 | """# (SAVE/LOAD INTs)""" 309 | 310 | # @title Save INTs 311 | TMIDIX.Tegridy_Any_Pickle_File_Writer(train_data, '/content/Training_INTs') 312 | 313 | # @title Load INTs 314 | train_data = TMIDIX.Tegridy_Any_Pickle_File_Reader('/content/Training_INTs') 315 | print('Done!') 316 | 317 | """# (PREP MODEL)""" 318 | 319 | # @title Setup and init the model 320 | 321 | # constants 322 | 323 | SEQ_LEN = 1024 # Models seq len 324 | PAD_IDX = 322 # Models pad index 325 | 326 | BATCH_SIZE = 32 327 | NUM_EPOCHS = 200 328 | GRADIENT_ACCUMULATE_EVERY = 1 329 | 330 | LEARNING_RATE = 1e-4 331 | 332 | VALIDATE_EVERY = 100 333 | SAVE_EVERY = 500 334 | GENERATE_EVERY = 100 335 | PRINT_STATS_EVERY = 20 336 | 337 | GENERATE_LENGTH = 32 338 | 339 | # helpers 340 | 341 | def cycle(loader): 342 | while True: 343 | for data in loader: 344 | yield data 345 | 346 | # instantiate the model 347 | 348 | model = TransformerWrapper( 349 | num_tokens = PAD_IDX+1, 350 | max_seq_len = SEQ_LEN, 351 | attn_layers = Decoder(dim = 1024, depth = 4, heads = 16, attn_flash = True) 352 | ) 353 | 354 | model = AutoregressiveWrapper(model, ignore_index=PAD_IDX) 355 | 356 | model.cuda() 357 | 358 | print('Done!') 359 | 360 | summary(model) 361 | 362 | # Dataloader 363 | 364 | class MusicDataset(Dataset): 365 | def __init__(self, data, seq_len): 366 | super().__init__() 367 | self.data = data 368 | self.seq_len = seq_len 369 | 370 | def __getitem__(self, index): 371 | 372 | full_seq = torch.Tensor(self.data[index][:self.seq_len+1]).long() 373 | 374 | return full_seq.cuda() 375 | 376 | def __len__(self): 377 | return (len(self.data) // BATCH_SIZE) * BATCH_SIZE 378 | 379 | # precision/optimizer/scaler 380 | 381 | dtype = torch.float16 382 | 383 | ctx = torch.amp.autocast(device_type='cuda', dtype=dtype, enabled=False) 384 | 385 | optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE) 386 | 387 | scaler = torch.cuda.amp.GradScaler(enabled=False) 388 | 389 | """# (TRAIN MODEL)""" 390 | 391 | # @title Train the model 392 | 393 | train_losses = [] 394 | val_losses = [] 395 | 396 | train_accs = [] 397 | val_accs = [] 398 | 399 | nsteps = 0 400 | 401 | for ep in range(NUM_EPOCHS): 402 | 403 | print('=' * 70) 404 | print('Epoch #', ep) 405 | print('=' * 70) 406 | 407 | random.shuffle(train_data) 408 | 409 | train_dataset = MusicDataset(train_data, SEQ_LEN) 410 | val_dataset = MusicDataset(train_data, SEQ_LEN) 411 | train_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE)) 412 | val_loader = cycle(DataLoader(val_dataset, batch_size = BATCH_SIZE)) 413 | 414 | NUM_BATCHES = len(train_data) // BATCH_SIZE // GRADIENT_ACCUMULATE_EVERY 415 | 416 | for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='Training'): 417 | model.train() 418 | 419 | for __ in range(GRADIENT_ACCUMULATE_EVERY): 420 | with ctx: 421 | loss, acc = model(next(train_loader)) 422 | # loss = loss / GRADIENT_ACCUMULATE_EVERY 423 | scaler.scale(loss).backward(torch.ones(loss.shape).cuda()) 424 | 425 | if i % PRINT_STATS_EVERY == 0: 426 | print(f'Training loss: {loss.mean().item() * GRADIENT_ACCUMULATE_EVERY}') 427 | print(f'Training acc: {acc.mean().item()}') 428 | 429 | train_losses.append(loss.mean().item() * GRADIENT_ACCUMULATE_EVERY) 430 | train_accs.append(acc.mean().item()) 431 | 432 | scaler.unscale_(optim) 433 | torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0) 434 | scaler.step(optim) 435 | scaler.update() 436 | optim.zero_grad(set_to_none=True) 437 | 438 | nsteps += 1 439 | 440 | if i % VALIDATE_EVERY == 0: 441 | model.eval() 442 | with torch.no_grad(): 443 | with ctx: 444 | val_loss, val_acc = model(next(val_loader)) 445 | 446 | print(f'Validation loss: {val_loss.mean().item()}') 447 | print(f'Validation acc: {val_acc.mean().item()}') 448 | 449 | val_losses.append(val_loss.mean().item()) 450 | val_accs.append(val_acc.mean().item()) 451 | 452 | print('Plotting training loss graph...') 453 | 454 | tr_loss_list = train_losses 455 | plt.plot([i for i in range(len(tr_loss_list))] ,tr_loss_list, 'b') 456 | plt.show() 457 | plt.close() 458 | print('Done!') 459 | 460 | print('Plotting training acc graph...') 461 | 462 | tr_loss_list = train_accs 463 | plt.plot([i for i in range(len(tr_loss_list))] ,tr_loss_list, 'b') 464 | plt.show() 465 | plt.close() 466 | print('Done!') 467 | 468 | print('Plotting validation loss graph...') 469 | tr_loss_list = val_losses 470 | plt.plot([i for i in range(len(tr_loss_list))] ,tr_loss_list, 'b') 471 | plt.show() 472 | plt.close() 473 | print('Done!') 474 | 475 | print('Plotting validation acc graph...') 476 | tr_loss_list = val_accs 477 | plt.plot([i for i in range(len(tr_loss_list))] ,tr_loss_list, 'b') 478 | plt.show() 479 | plt.close() 480 | print('Done!') 481 | 482 | if i % GENERATE_EVERY == 0: 483 | model.eval() 484 | 485 | inp = random.choice(val_dataset)[:512] 486 | 487 | print(inp) 488 | 489 | with ctx: 490 | 491 | sample = model.generate(inp[None, ...], GENERATE_LENGTH) 492 | 493 | print(sample) 494 | 495 | if i % SAVE_EVERY == 0: 496 | 497 | print('Saving model progress. Please wait...') 498 | print('model_checkpoint_' + str(nsteps) + '_steps_' + str(round(float(train_losses[-1]), 4)) + '_loss_' + str(round(float(train_accs[-1]), 4)) + '_acc.pth') 499 | 500 | fname = '/content/model_checkpoint_' + str(nsteps) + '_steps_' + str(round(float(train_losses[-1]), 4)) + '_loss_' + str(round(float(train_accs[-1]), 4)) + '_acc.pth' 501 | 502 | torch.save(model.state_dict(), fname) 503 | 504 | data = [train_losses, train_accs, val_losses, val_accs] 505 | 506 | TMIDIX.Tegridy_Any_Pickle_File_Writer(data, '/content/losses_accs') 507 | 508 | print('Done!') 509 | 510 | #====================================================================================================== 511 | 512 | print('Saving model progress. Please wait...') 513 | print('model_checkpoint_' + str(nsteps) + '_steps_' + str(round(float(train_losses[-1]), 4)) + '_loss_' + str(round(float(train_accs[-1]), 4)) + '_acc.pth') 514 | 515 | fname = '/content/model_checkpoint_' + str(nsteps) + '_steps_' + str(round(float(train_losses[-1]), 4)) + '_loss_' + str(round(float(train_accs[-1]), 4)) + '_acc.pth' 516 | 517 | torch.save(model.state_dict(), fname) 518 | 519 | print('Done!') 520 | 521 | data = [train_losses, train_accs, val_losses, val_accs] 522 | 523 | TMIDIX.Tegridy_Any_Pickle_File_Writer(data, '/content/losses_accuracies') 524 | 525 | # Save training loss graph 526 | 527 | plt.plot([i for i in range(len(train_losses))] ,train_losses, 'b') 528 | plt.savefig('/content/training_loss_graph.png') 529 | plt.close() 530 | print('Done!') 531 | 532 | # Save training acc graph 533 | 534 | plt.plot([i for i in range(len(train_accs))] ,train_accs, 'b') 535 | plt.savefig('/content/training_acc_graph.png') 536 | plt.close() 537 | print('Done!') 538 | 539 | # Save validation loss graph 540 | 541 | plt.plot([i for i in range(len(val_losses))] ,val_losses, 'b') 542 | plt.savefig('/content/validation_loss_graph.png') 543 | plt.close() 544 | print('Done!') 545 | 546 | # Save validation acc graph 547 | 548 | plt.plot([i for i in range(len(val_accs))] ,val_accs, 'b') 549 | plt.savefig('/content/validation_acc_graph.png') 550 | plt.close() 551 | print('Done!') 552 | 553 | """# (SAVE MODEL)""" 554 | 555 | # @title Manual save 556 | print('Saving model progress. Please wait...') 557 | print('model_checkpoint_' + str(nsteps) + '_steps_' + str(round(float(train_losses[-1]), 4)) + '_loss_' + str(round(float(train_accs[-1]), 4)) + '_acc.pth') 558 | 559 | fname = '/content/model_checkpoint_' + str(nsteps) + '_steps_' + str(round(float(train_losses[-1]), 4)) + '_loss_' + str(round(float(train_accs[-1]), 4)) + '_acc.pth' 560 | 561 | torch.save(model.state_dict(), fname) 562 | 563 | print('Done!') 564 | 565 | data = [train_losses, train_accs, val_losses, val_accs] 566 | 567 | TMIDIX.Tegridy_Any_Pickle_File_Writer(data, '/content/losses_accuracies') 568 | 569 | # Save training loss graph 570 | 571 | plt.plot([i for i in range(len(train_losses))] ,train_losses, 'b') 572 | plt.savefig('/content/training_loss_graph.png') 573 | plt.close() 574 | print('Done!') 575 | 576 | # Save training acc graph 577 | 578 | plt.plot([i for i in range(len(train_accs))] ,train_accs, 'b') 579 | plt.savefig('/content/training_acc_graph.png') 580 | plt.close() 581 | print('Done!') 582 | 583 | # Save validation loss graph 584 | 585 | plt.plot([i for i in range(len(val_losses))] ,val_losses, 'b') 586 | plt.savefig('/content/validation_loss_graph.png') 587 | plt.close() 588 | print('Done!') 589 | 590 | # Save validation acc graph 591 | 592 | plt.plot([i for i in range(len(val_accs))] ,val_accs, 'b') 593 | plt.savefig('/content/validation_acc_graph.png') 594 | plt.close() 595 | print('Done!') 596 | 597 | """# (EVAL MODEL)""" 598 | 599 | # @title Eval model 600 | dtype = 'float16' 601 | device_type = 'cuda' 602 | ptdtype = {'float32': torch.float32, 'bfloat16': torch.bfloat16, 'float16': torch.float16}[dtype] 603 | ctx = torch.amp.autocast(device_type=device_type, dtype=ptdtype) 604 | 605 | model.eval() 606 | 607 | x = torch.tensor(random.choice(train_data)[:128], dtype=torch.long, device='cuda')[None, ...] 608 | x = torch.tensor([[321]] * 1, dtype=torch.long, device='cuda') 609 | 610 | 611 | # run generation 612 | 613 | #with ctx: 614 | out = model.generate(x, 615 | 128, 616 | temperature=0.9, 617 | return_prime=False, 618 | verbose=True) 619 | 620 | y = out.tolist() 621 | 622 | print('---------------') 623 | print(y[0]) 624 | 625 | #@title Convert output INTs to MIDI 626 | 627 | train_data1 = y[0] # batch number goes here 628 | 629 | print('Sample INTs', train_data1[:15]) 630 | 631 | out = train_data1 632 | 633 | patches = [0] * 16 634 | patches[3] = 40 635 | 636 | if len(out) != 0: 637 | 638 | song = out 639 | song_f = [] 640 | 641 | time = 0 642 | dur = 0 643 | vel = 90 644 | pitch = 0 645 | channel = 0 646 | 647 | for ss in song: 648 | 649 | chord = TMIDIX.ALL_CHORDS_SORTED[ss] 650 | 651 | time += 200 652 | dur = 200 653 | 654 | for c in chord: 655 | 656 | ptc1 = 48+c 657 | ptc2 = 60+c 658 | 659 | song_f.append(['note', time, dur, channel, ptc1, vel ]) 660 | song_f.append(['note', time, dur, channel, ptc2, vel ]) 661 | 662 | detailed_stats = TMIDIX.Tegridy_ms_SONG_to_MIDI_Converter(song_f, 663 | output_signature = 'Chords Progressions Transformer', 664 | output_file_name = '/content/Chords-Progressions-Transformer-Composition', 665 | track_name='Project Los Angeles', 666 | ) 667 | 668 | print('Done!') 669 | 670 | """# (PLOT TOKENS EMBEDDINGS)""" 671 | 672 | # @title Plot model tokens embeddings 673 | tok_emb = model.net.token_emb.emb.weight.detach().cpu().tolist() 674 | 675 | cos_sim = metrics.pairwise_distances( 676 | tok_emb, metric='cosine' 677 | ) 678 | plt.figure(figsize=(7, 7)) 679 | plt.imshow(cos_sim, cmap="inferno", interpolation="nearest") 680 | im_ratio = cos_sim.shape[0] / cos_sim.shape[1] 681 | plt.colorbar(fraction=0.046 * im_ratio, pad=0.04) 682 | plt.xlabel("Position") 683 | plt.ylabel("Position") 684 | plt.tight_layout() 685 | plt.plot() 686 | plt.savefig("/content/Chords-Progressions-Transformer-Tokens-Embeddings-Plot.png", bbox_inches="tight") 687 | 688 | """# Congrats! You did it! :)""" -------------------------------------------------------------------------------- /Training-Code/chords_progressions_transformer_melody_maker.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """Chords_Progressions_Transformer_Melody_Maker.ipynb 3 | 4 | Automatically generated by Colaboratory. 5 | 6 | Original file is located at 7 | https://colab.research.google.com/drive/15TXQnSMyS4UFAWvigyV_f2Ow62cqBE4A 8 | 9 | # Chords Progressions Transformer Melody Maker (ver. 1.0) 10 | 11 | *** 12 | 13 | Powered by tegridy-tools: https://github.com/asigalov61/tegridy-tools 14 | 15 | *** 16 | 17 | WARNING: This complete implementation is a functioning model of the Artificial Intelligence. Please excercise great humility, care, and respect. https://www.nscai.gov/ 18 | 19 | *** 20 | 21 | #### Project Los Angeles 22 | 23 | #### Tegridy Code 2024 24 | 25 | *** 26 | 27 | # (SETUP ENVIRONMENT) 28 | """ 29 | 30 | #@title Install all dependencies (run only once per session) 31 | 32 | !git clone --depth 1 https://github.com/asigalov61/tegridy-tools 33 | !pip install torch 34 | !pip install einops 35 | !pip install torch-summary 36 | 37 | # Commented out IPython magic to ensure Python compatibility. 38 | #@title Import all needed modules 39 | 40 | print('Loading needed modules. Please wait...') 41 | 42 | import os 43 | import pickle 44 | from collections import Counter 45 | import secrets 46 | import tqdm 47 | import math 48 | import copy 49 | from joblib import Parallel, delayed, parallel_config 50 | 51 | import torch 52 | import torch.optim as optim 53 | from torch.utils.data import DataLoader, Dataset 54 | 55 | torch.set_float32_matmul_precision('high') 56 | torch.backends.cuda.matmul.allow_tf32 = True # allow tf32 on matmul 57 | torch.backends.cudnn.allow_tf32 = True # allow tf32 on cudnn 58 | 59 | import matplotlib.pyplot as plt 60 | 61 | from torchsummary import summary 62 | from sklearn import metrics 63 | 64 | print('Loading TMIDIX module...') 65 | 66 | # %cd /content/tegridy-tools/tegridy-tools/ 67 | 68 | import TMIDIX 69 | #import midi_to_colab_audio 70 | 71 | print('Loading X Transformer module...') 72 | 73 | # %cd /content/tegridy-tools/tegridy-tools/X-Transformer 74 | 75 | from x_transformer_1_23_2 import * 76 | import random 77 | 78 | # %cd /content/ 79 | 80 | print('Creating I/O dirs...') 81 | 82 | if not os.path.exists('/content/Dataset'): 83 | os.makedirs('/content/Dataset') 84 | 85 | if not os.path.exists('/content/DATA'): 86 | os.makedirs('/content/DATA') 87 | 88 | print('Done!') 89 | print('PyTorch version:', torch.__version__) 90 | print('Enjoy! :)') 91 | 92 | """# (DOWNLOAD AND UNZIP MIDI DATASET)""" 93 | 94 | # Commented out IPython magic to ensure Python compatibility. 95 | # @title Download and unzip POP909 Piano Violin MIDI dataset 96 | # %cd /content/Dataset 97 | !wget https://github.com/asigalov61/Tegridy-MIDI-Dataset/raw/master/Misc/POP909-Piano-Violin-CC-BY-NC-SA.zip 98 | !unzip POP909-Piano-Violin-CC-BY-NC-SA.zip 99 | !rm POP909-Piano-Violin-CC-BY-NC-SA.zip 100 | # %cd /content/ 101 | 102 | """# (LOAD MIDI PROCESSOR)""" 103 | 104 | #@title TMIDIX MIDI Processor 105 | 106 | print('=' * 70) 107 | print('Loading TMIDIX MIDI Processor...') 108 | print('=' * 70) 109 | 110 | def TMIDIX_MIDI_Processor(midi_file): 111 | 112 | melody_chords = [] 113 | 114 | try: 115 | 116 | fn = os.path.basename(midi_file) 117 | 118 | raw_score = TMIDIX.midi2single_track_ms_score(open(midi_file, 'rb').read()) 119 | 120 | raw_escore = TMIDIX.advanced_score_processor(raw_score, return_enhanced_score_notes=True)[0] 121 | 122 | out_scores = [] 123 | 124 | for tv in range(-6, 6): 125 | 126 | escore = copy.deepcopy(raw_escore) 127 | 128 | for e in escore: 129 | e[1] = int(e[1] / 16) 130 | e[2] = int(e[2] / 16) 131 | e[4] += tv 132 | 133 | cscore = TMIDIX.chordify_score([1000, escore]) 134 | 135 | out_score = [] 136 | 137 | chords_tokens = [] 138 | chords_pitches = [] 139 | 140 | for c in cscore: 141 | 142 | chans = sorted(set([t[3] for t in c])) 143 | 144 | tones_chord = sorted(set([t[4] % 12 for t in c])) 145 | 146 | if 3 in chans and 0 in chans: 147 | try: 148 | chord_token = TMIDIX.ALL_CHORDS_SORTED.index(tones_chord) 149 | except: 150 | chord_token = TMIDIX.ALL_CHORDS_SORTED.index(TMIDIX.check_and_fix_tones_chord(tones_chord)) 151 | 152 | chords_tokens.append(chord_token+128) 153 | 154 | if chords_tokens: 155 | if len(chords_tokens) > 1: 156 | chords_pitches_group = chords_tokens + chords_pitches 157 | out_score.extend(chords_pitches_group) 158 | chords_tokens = [chords_tokens[-1]] 159 | chords_pitches = [] 160 | 161 | if 3 in chans: 162 | pitches = sorted(set([t[4] for t in c if t[3] == 3]))[-1] 163 | 164 | chords_pitches.append(pitches) 165 | 166 | out_scores.append(out_score) 167 | 168 | return out_scores 169 | 170 | except: 171 | return None 172 | 173 | print('Done!') 174 | print('=' * 70) 175 | 176 | """# (FILES LIST)""" 177 | 178 | #@title Save file list 179 | ########### 180 | 181 | print('=' * 70) 182 | print('Loading MIDI files...') 183 | print('This may take a while on a large dataset in particular.') 184 | 185 | dataset_addr = "/content/Dataset" 186 | 187 | # os.chdir(dataset_addr) 188 | filez = list() 189 | for (dirpath, dirnames, filenames) in os.walk(dataset_addr): 190 | filez += [os.path.join(dirpath, file) for file in filenames] 191 | print('=' * 70) 192 | 193 | if not filez: 194 | print('Could not find any MIDI files. Please check Dataset dir...') 195 | print('=' * 70) 196 | 197 | else: 198 | print('Randomizing file list...') 199 | random.shuffle(filez) 200 | print('Done!') 201 | print('=' * 70) 202 | print('Total files:', len(filez)) 203 | print('=' * 70) 204 | 205 | """# (PROCESS MIDIs)""" 206 | 207 | #@title Process MIDIs with TMIDIX MIDI processor 208 | 209 | print('=' * 70) 210 | print('TMIDIX MIDI Processor') 211 | print('=' * 70) 212 | print('Starting up...') 213 | print('=' * 70) 214 | 215 | ########### 216 | 217 | melody_chords_f = [] 218 | 219 | print('Processing MIDI files. Please wait...') 220 | print('=' * 70) 221 | 222 | for i in tqdm.tqdm(range(0, len(filez), 16)): 223 | 224 | with parallel_config(backend='threading', n_jobs=4, verbose = 0): 225 | 226 | output = Parallel()(delayed(TMIDIX_MIDI_Processor)(f) for f in filez[i:i+16]) 227 | 228 | for o in output: 229 | if o is not None: 230 | melody_chords_f.append(o) 231 | 232 | print('Done!') 233 | print('=' * 70) 234 | 235 | """# (SAVE/LOAD PROCESSED MIDIs)""" 236 | 237 | #@title Save processed MIDIs 238 | TMIDIX.Tegridy_Any_Pickle_File_Writer(melody_chords_f, '/content/DATA/Processed_MIDIs') 239 | 240 | # @title Load processed MIDIs 241 | melody_chords_f = TMIDIX.Tegridy_Any_Pickle_File_Reader('/content/DATA/Processed_MIDIs') 242 | print('Done!') 243 | 244 | """# (PREP INTs)""" 245 | 246 | # @title Convert processed MIDIs to INTs for training 247 | 248 | print('=' * 70) 249 | 250 | train_data = [] 251 | 252 | for m in tqdm.tqdm(melody_chords_f): 253 | 254 | for mm in m: 255 | 256 | dat = copy.deepcopy(mm) 257 | 258 | dat = dat[:4097] 259 | dat += [449] * (4097 - len(dat)) 260 | train_data.append(dat) 261 | 262 | random.shuffle(train_data) 263 | 264 | print('Done!') 265 | print('=' * 70) 266 | if len(max(train_data, key=len)) == len(min(train_data, key=len)): 267 | print('All data is good!') 268 | else: 269 | print('WARNING!!! BAD DATA!!!') 270 | print('=' * 70) 271 | 272 | lens = [] 273 | 274 | for t in train_data: 275 | lens.append(len(t)) 276 | 277 | print(max(lens), sum(lens) / len(lens), min(lens)) 278 | print('=' * 70) 279 | print('Done!') 280 | print('=' * 70) 281 | 282 | """# (SAVE/LOAD TRAINING INTs)""" 283 | 284 | # @title Save INTs 285 | TMIDIX.Tegridy_Any_Pickle_File_Writer(train_data, '/content/DATA/Training_INTs') 286 | 287 | # @title Load INTs 288 | train_data = TMIDIX.Tegridy_Any_Pickle_File_Reader('/content/DATA/Training_INTs') 289 | print('Done!') 290 | 291 | """# (TEST INTs BEFORE TRAINING)""" 292 | 293 | #@title Test INTs 294 | 295 | train_data1 = random.choice(train_data) 296 | 297 | #train_data1 = max(melody_chords_f, key = len) 298 | 299 | print('Sample INTs', train_data1[:15]) 300 | 301 | out = train_data1 302 | 303 | patches = [0] * 16 304 | patches[3] = 40 305 | 306 | if len(out) != 0: 307 | 308 | song = out 309 | song_f = [] 310 | 311 | time = 0 312 | dur = 10 313 | vel = 90 314 | pitch = 0 315 | channel = 0 316 | 317 | song1 = [] 318 | ptc = [] 319 | cho = [] 320 | 321 | for s in song: 322 | if s < 128: 323 | ptc.append(s) 324 | else: 325 | if ptc: 326 | grp = [cho, ptc] 327 | song1.append(grp) 328 | cho = [] 329 | ptc = [] 330 | 331 | cho.append(s) 332 | 333 | for ss in song1: 334 | 335 | tones_chord = TMIDIX.ALL_CHORDS_SORTED[(ss[0][0]-128)] 336 | 337 | dur = 64 338 | 339 | for t in tones_chord: 340 | song_f.append(['note', time * 16, dur * 16, 0, 60+t, vel ]) 341 | song_f.append(['note', time * 16, dur * 16, 0, 48+t, vel ]) 342 | 343 | ptc_count = len(ss[1]) 344 | ptc_time_dur = dur // ptc_count 345 | 346 | for p in ss[1]: 347 | song_f.append(['note', time * 16, ptc_time_dur * 16, 3, p, vel ]) 348 | time += ptc_time_dur 349 | 350 | detailed_stats = TMIDIX.Tegridy_ms_SONG_to_MIDI_Converter(song_f, 351 | output_signature = 'Chords Progressions Transformer', 352 | output_file_name = '/content/Chords-progressions-Transformer-Composition', 353 | track_name='Project Los Angeles', 354 | list_of_MIDI_patches=patches 355 | ) 356 | 357 | """# (INIT THE MODEL)""" 358 | 359 | # @title Setup and init the model 360 | 361 | # constants 362 | 363 | SEQ_LEN = 4096 # Models seq len 364 | PAD_IDX = 449 # Models pad index 365 | 366 | BATCH_SIZE = 16 367 | NUM_EPOCHS = 300 368 | GRADIENT_ACCUMULATE_EVERY = 1 369 | 370 | 371 | LEARNING_RATE = 1e-4 372 | 373 | VALIDATE_EVERY = 100 374 | SAVE_EVERY = 500 375 | GENERATE_EVERY = 100 376 | PRINT_STATS_EVERY = 20 377 | 378 | GENERATE_LENGTH = 32 379 | 380 | # helpers 381 | 382 | def cycle(loader): 383 | while True: 384 | for data in loader: 385 | yield data 386 | 387 | # instantiate the model 388 | 389 | model = TransformerWrapper( 390 | num_tokens = PAD_IDX+1, 391 | max_seq_len = SEQ_LEN, 392 | attn_layers = Decoder(dim = 1024, depth = 4, heads = 8, attn_flash = True) 393 | ) 394 | 395 | model = AutoregressiveWrapper(model, ignore_index=PAD_IDX, pad_value=PAD_IDX) 396 | 397 | model.cuda() 398 | 399 | print('Done!') 400 | 401 | summary(model) 402 | 403 | # Dataloader 404 | 405 | class MusicDataset(Dataset): 406 | def __init__(self, data, seq_len): 407 | super().__init__() 408 | self.data = data 409 | self.seq_len = seq_len 410 | 411 | def __getitem__(self, index): 412 | 413 | full_seq = torch.Tensor(self.data[index][:self.seq_len+1]).long() 414 | 415 | return full_seq.cuda() 416 | 417 | def __len__(self): 418 | return (len(self.data) // BATCH_SIZE) * BATCH_SIZE 419 | 420 | # precision/optimizer/scaler 421 | 422 | dtype = torch.float16 423 | 424 | ctx = torch.amp.autocast(device_type='cuda', dtype=dtype, enabled=True) 425 | 426 | optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE) 427 | 428 | scaler = torch.cuda.amp.GradScaler(enabled=True) 429 | 430 | """# (TRAIN)""" 431 | 432 | # @title Train the model 433 | 434 | train_losses = [] 435 | val_losses = [] 436 | 437 | train_accs = [] 438 | val_accs = [] 439 | 440 | nsteps = 0 441 | 442 | for ep in range(NUM_EPOCHS): 443 | 444 | print('=' * 70) 445 | print('Epoch #', ep) 446 | print('=' * 70) 447 | 448 | random.shuffle(train_data) 449 | 450 | train_dataset = MusicDataset(train_data, SEQ_LEN) 451 | val_dataset = MusicDataset(train_data, SEQ_LEN) 452 | train_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE)) 453 | val_loader = cycle(DataLoader(val_dataset, batch_size = BATCH_SIZE)) 454 | 455 | NUM_BATCHES = len(train_data) // BATCH_SIZE // GRADIENT_ACCUMULATE_EVERY 456 | 457 | for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='Training'): 458 | model.train() 459 | 460 | for __ in range(GRADIENT_ACCUMULATE_EVERY): 461 | with ctx: 462 | loss, acc = model(next(train_loader)) 463 | #loss = loss / GRADIENT_ACCUMULATE_EVERY 464 | scaler.scale(loss).backward() 465 | 466 | if i % PRINT_STATS_EVERY == 0: 467 | print(f'Training loss: {loss.mean().item() * GRADIENT_ACCUMULATE_EVERY}') 468 | print(f'Training acc: {acc.mean().item()}') 469 | 470 | train_losses.append(loss.mean().item() * GRADIENT_ACCUMULATE_EVERY) 471 | train_accs.append(acc.mean().item()) 472 | 473 | scaler.unscale_(optim) 474 | torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5) 475 | scaler.step(optim) 476 | scaler.update() 477 | optim.zero_grad(set_to_none=True) 478 | 479 | nsteps += 1 480 | 481 | if i % VALIDATE_EVERY == 0: 482 | model.eval() 483 | with torch.no_grad(): 484 | with ctx: 485 | val_loss, val_acc = model(next(val_loader)) 486 | 487 | print(f'Validation loss: {val_loss.mean().item()}') 488 | print(f'Validation acc: {val_acc.mean().item()}') 489 | 490 | val_losses.append(val_loss.mean().item()) 491 | val_accs.append(val_acc.mean().item()) 492 | 493 | print('Plotting training loss graph...') 494 | 495 | tr_loss_list = train_losses 496 | plt.plot([i for i in range(len(tr_loss_list))] ,tr_loss_list, 'b') 497 | plt.show() 498 | plt.close() 499 | print('Done!') 500 | 501 | print('Plotting training acc graph...') 502 | 503 | tr_loss_list = train_accs 504 | plt.plot([i for i in range(len(tr_loss_list))] ,tr_loss_list, 'b') 505 | plt.show() 506 | plt.close() 507 | print('Done!') 508 | 509 | print('Plotting validation loss graph...') 510 | tr_loss_list = val_losses 511 | plt.plot([i for i in range(len(tr_loss_list))] ,tr_loss_list, 'b') 512 | plt.show() 513 | plt.close() 514 | print('Done!') 515 | 516 | print('Plotting validation acc graph...') 517 | tr_loss_list = val_accs 518 | plt.plot([i for i in range(len(tr_loss_list))] ,tr_loss_list, 'b') 519 | plt.show() 520 | plt.close() 521 | print('Done!') 522 | 523 | if i % GENERATE_EVERY == 0: 524 | model.eval() 525 | 526 | inp = random.choice(val_dataset)[:-1] 527 | 528 | print(inp) 529 | 530 | with ctx: 531 | 532 | sample = model.generate(inp[None, ...], GENERATE_LENGTH) 533 | 534 | print(sample) 535 | 536 | if i % SAVE_EVERY == 0: 537 | 538 | print('Saving model progress. Please wait...') 539 | print('model_checkpoint_' + str(nsteps) + '_steps_' + str(round(float(train_losses[-1]), 4)) + '_loss_' + str(round(float(train_accs[-1]), 4)) + '_acc.pth') 540 | 541 | fname = '/content/model_checkpoint_' + str(nsteps) + '_steps_' + str(round(float(train_losses[-1]), 4)) + '_loss_' + str(round(float(train_accs[-1]), 4)) + '_acc.pth' 542 | 543 | torch.save(model.state_dict(), fname) 544 | 545 | data = [train_losses, train_accs, val_losses, val_accs] 546 | 547 | TMIDIX.Tegridy_Any_Pickle_File_Writer(data, '/content/losses_accs') 548 | 549 | print('Done!') 550 | 551 | #====================================================================================================== 552 | 553 | print('Saving model progress. Please wait...') 554 | print('model_checkpoint_' + str(nsteps) + '_steps_' + str(round(float(train_losses[-1]), 4)) + '_loss_' + str(round(float(train_accs[-1]), 4)) + '_acc.pth') 555 | 556 | fname = '/content/model_checkpoint_' + str(nsteps) + '_steps_' + str(round(float(train_losses[-1]), 4)) + '_loss_' + str(round(float(train_accs[-1]), 4)) + '_acc.pth' 557 | 558 | torch.save(model.state_dict(), fname) 559 | 560 | print('Done!') 561 | 562 | data = [train_losses, train_accs, val_losses, val_accs] 563 | 564 | TMIDIX.Tegridy_Any_Pickle_File_Writer(data, '/content/losses_accuracies') 565 | 566 | # Save training loss graph 567 | 568 | plt.plot([i for i in range(len(train_losses))] ,train_losses, 'b') 569 | plt.savefig('/content/training_loss_graph.png') 570 | plt.close() 571 | print('Done!') 572 | 573 | # Save training acc graph 574 | 575 | plt.plot([i for i in range(len(train_accs))] ,train_accs, 'b') 576 | plt.savefig('/content/training_acc_graph.png') 577 | plt.close() 578 | print('Done!') 579 | 580 | # Save validation loss graph 581 | 582 | plt.plot([i for i in range(len(val_losses))] ,val_losses, 'b') 583 | plt.savefig('/content/validation_loss_graph.png') 584 | plt.close() 585 | print('Done!') 586 | 587 | # Save validation acc graph 588 | 589 | plt.plot([i for i in range(len(val_accs))] ,val_accs, 'b') 590 | plt.savefig('/content/validation_acc_graph.png') 591 | plt.close() 592 | print('Done!') 593 | 594 | """# EVAL""" 595 | 596 | # @title Eval the model 597 | 598 | model.eval() 599 | 600 | x = torch.tensor(train_data[0][:512], dtype=torch.long, device='cuda')[None, ...] 601 | #x = torch.tensor([[128]] * 1, dtype=torch.long, device='cuda') 602 | 603 | # run generation 604 | 605 | with ctx: 606 | out = model.generate(x, 607 | 512, 608 | temperature=0.9, 609 | return_prime=False, 610 | verbose=True) 611 | 612 | y = out.tolist() 613 | 614 | print('=' * 70) 615 | print(y[0][:15]) 616 | print('=' * 70) 617 | 618 | #@title Test model output 619 | 620 | train_data1 = y[0] 621 | 622 | print('Sample INTs', train_data1[:15]) 623 | 624 | out = train_data1 625 | 626 | patches = [0] * 16 627 | patches[3] = 40 628 | 629 | if len(out) != 0: 630 | 631 | song = out 632 | song_f = [] 633 | 634 | time = 0 635 | dur = 10 636 | vel = 90 637 | pitch = 0 638 | channel = 0 639 | 640 | song1 = [] 641 | ptc = [] 642 | cho = [] 643 | 644 | for s in song: 645 | if s < 128: 646 | ptc.append(s) 647 | else: 648 | if ptc: 649 | grp = [cho, ptc] 650 | song1.append(grp) 651 | cho = [] 652 | ptc = [] 653 | 654 | cho.append(s) 655 | 656 | for ss in song1: 657 | 658 | tones_chord = TMIDIX.ALL_CHORDS_SORTED[(ss[0][0]-128)] 659 | 660 | dur = 64 661 | 662 | for t in tones_chord: 663 | song_f.append(['note', time * 16, dur * 16, 0, 60+t, vel ]) 664 | song_f.append(['note', time * 16, dur * 16, 0, 48+t, vel ]) 665 | 666 | ptc_count = len(ss[1]) 667 | ptc_time_dur = dur // ptc_count 668 | 669 | for p in ss[1]: 670 | song_f.append(['note', time * 16, ptc_time_dur * 16, 3, p, vel ]) 671 | time += ptc_time_dur 672 | 673 | detailed_stats = TMIDIX.Tegridy_ms_SONG_to_MIDI_Converter(song_f, 674 | output_signature = 'Chords Progressions Transformer', 675 | output_file_name = '/content/Chords-Progressions-Transformer-Composition', 676 | track_name='Project Los Angeles', 677 | list_of_MIDI_patches=patches 678 | ) 679 | 680 | """# (TOKENS EMBEDDINGS)""" 681 | 682 | # @title Explore model tokens embeddings 683 | tok_emb = model.net.token_emb.emb.weight.detach().cpu().tolist() 684 | 685 | cos_sim = metrics.pairwise_distances( 686 | tok_emb, metric='cosine' 687 | ) 688 | plt.figure(figsize=(7, 7)) 689 | plt.imshow(cos_sim, cmap="inferno", interpolation="nearest") 690 | im_ratio = cos_sim.shape[0] / cos_sim.shape[1] 691 | plt.colorbar(fraction=0.046 * im_ratio, pad=0.04) 692 | plt.xlabel("Position") 693 | plt.ylabel("Position") 694 | plt.tight_layout() 695 | plt.plot() 696 | plt.savefig("/content/Chords-Progressions-Transformer-Tokens-Embeddings-Plot.png", bbox_inches="tight") 697 | 698 | """# Congrats! You did it! :)""" -------------------------------------------------------------------------------- /Chords_Progressions_Transformer_Aux.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "source": [ 6 | "# Chords Progressions Transformer Aux (ver. 1.0)\n", 7 | "\n", 8 | "***\n", 9 | "\n", 10 | "Powered by tegridy-tools: https://github.com/asigalov61/tegridy-tools\n", 11 | "\n", 12 | "***\n", 13 | "\n", 14 | "WARNING: This complete implementation is a functioning model of the Artificial Intelligence. Please excercise great humility, care, and respect. https://www.nscai.gov/\n", 15 | "\n", 16 | "***\n", 17 | "\n", 18 | "#### Project Los Angeles\n", 19 | "\n", 20 | "#### Tegridy Code 2024\n", 21 | "\n", 22 | "***" 23 | ], 24 | "metadata": { 25 | "id": "gpy3qsulqHa5" 26 | } 27 | }, 28 | { 29 | "cell_type": "markdown", 30 | "source": [ 31 | "# (GPU CHECK)" 32 | ], 33 | "metadata": { 34 | "id": "W_So4w8fqPGL" 35 | } 36 | }, 37 | { 38 | "cell_type": "code", 39 | "execution_count": null, 40 | "metadata": { 41 | "id": "X3rABEpKCO02", 42 | "cellView": "form" 43 | }, 44 | "outputs": [], 45 | "source": [ 46 | "#@title NVIDIA GPU check\n", 47 | "!nvidia-smi" 48 | ] 49 | }, 50 | { 51 | "cell_type": "markdown", 52 | "source": [ 53 | "# (SETUP ENVIRONMENT)" 54 | ], 55 | "metadata": { 56 | "id": "C0XxnXGFqVyh" 57 | } 58 | }, 59 | { 60 | "cell_type": "code", 61 | "execution_count": null, 62 | "metadata": { 63 | "id": "vK40g6V_BTNj", 64 | "cellView": "form" 65 | }, 66 | "outputs": [], 67 | "source": [ 68 | "#@title Install dependencies\n", 69 | "!git clone --depth 1 https://github.com/asigalov61/Chords-Progressions-Transformer\n", 70 | "!pip install huggingface_hub\n", 71 | "!pip install einops\n", 72 | "!pip install torch-summary\n", 73 | "!apt install fluidsynth #Pip does not work for some reason. Only apt works" 74 | ] 75 | }, 76 | { 77 | "cell_type": "code", 78 | "execution_count": null, 79 | "metadata": { 80 | "id": "DzCOZU_gBiQV", 81 | "cellView": "form" 82 | }, 83 | "outputs": [], 84 | "source": [ 85 | "#@title Import modules\n", 86 | "\n", 87 | "print('=' * 70)\n", 88 | "print('Loading core Chords Progressions Transformer modules...')\n", 89 | "\n", 90 | "import os\n", 91 | "import copy\n", 92 | "import pickle\n", 93 | "import secrets\n", 94 | "import statistics\n", 95 | "from time import time\n", 96 | "import tqdm\n", 97 | "\n", 98 | "print('=' * 70)\n", 99 | "print('Loading main Chords Progressions Transformer modules...')\n", 100 | "import torch\n", 101 | "\n", 102 | "%cd /content/Chords-Progressions-Transformer\n", 103 | "\n", 104 | "import TMIDIX\n", 105 | "\n", 106 | "from midi_to_colab_audio import midi_to_colab_audio\n", 107 | "\n", 108 | "from x_transformer_1_23_2 import *\n", 109 | "\n", 110 | "import random\n", 111 | "\n", 112 | "%cd /content/\n", 113 | "print('=' * 70)\n", 114 | "print('Loading aux Chords Progressions Transformer modules...')\n", 115 | "\n", 116 | "import matplotlib.pyplot as plt\n", 117 | "\n", 118 | "from torchsummary import summary\n", 119 | "from sklearn import metrics\n", 120 | "\n", 121 | "from IPython.display import Audio, display\n", 122 | "\n", 123 | "from huggingface_hub import hf_hub_download\n", 124 | "\n", 125 | "from google.colab import files\n", 126 | "\n", 127 | "print('=' * 70)\n", 128 | "print('Done!')\n", 129 | "print('Enjoy! :)')\n", 130 | "print('=' * 70)" 131 | ] 132 | }, 133 | { 134 | "cell_type": "markdown", 135 | "metadata": { 136 | "id": "eI3aQtHzqSnp" 137 | }, 138 | "source": [ 139 | "# (LOAD MODEL)" 140 | ] 141 | }, 142 | { 143 | "cell_type": "code", 144 | "source": [ 145 | "#@title Load Chords Progressions Transformer Pre-Trained Model\n", 146 | "\n", 147 | "#@markdown Choose model\n", 148 | "\n", 149 | "select_model_to_load = \"187M-2048E-4L-16H-FP32-Fast-Small\" # @param [\"93M-1024E-8L-8H-Very-Fast-Small\", \"187M-2048E-4L-16H-Fast-Small\", \"187M-2048E-4L-16H-FP32-Fast-Small\"]\n", 150 | "\n", 151 | "#@markdown Model precision option\n", 152 | "\n", 153 | "model_precision = \"bfloat16\" # @param [\"bfloat16\", \"float16\", \"float32\"]\n", 154 | "\n", 155 | "#@markdown bfloat16 == Half precision/faster speed (if supported, otherwise the model will default to float16)\n", 156 | "\n", 157 | "#@markdown float16 == Full precision/fast speed\n", 158 | "\n", 159 | "plot_tokens_embeddings = False # @param {type:\"boolean\"}\n", 160 | "\n", 161 | "print('=' * 70)\n", 162 | "print('Loading Chords Progressions Transformer Pre-Trained Model...')\n", 163 | "print('Please wait...')\n", 164 | "print('=' * 70)\n", 165 | "\n", 166 | "full_path_to_models_dir = \"/content/Chords-Progressions-Transformer/Models\"\n", 167 | "\n", 168 | "if select_model_to_load == '93M-1024E-8L-8H-Very-Fast-Small':\n", 169 | "\n", 170 | " dim = 1024\n", 171 | " depth = 8\n", 172 | " heads = 8\n", 173 | "\n", 174 | " model_checkpoint_file_name = 'Chords_Progressions_Transformer_Small_Trained_Model_9609_steps_1.0704_loss_0.6927_acc.pth'\n", 175 | " model_path = full_path_to_models_dir+'/Small/'+model_checkpoint_file_name\n", 176 | " if os.path.isfile(model_path):\n", 177 | " print('Model already exists...')\n", 178 | "\n", 179 | " else:\n", 180 | " hf_hub_download(repo_id='asigalov61/Chords-Progressions-Transformer',\n", 181 | " filename=model_checkpoint_file_name,\n", 182 | " local_dir='/content/Chords-Progressions-Transformer/Models/Small',\n", 183 | " local_dir_use_symlinks=False)\n", 184 | "\n", 185 | "elif select_model_to_load == '187M-2048E-4L-16H-Fast-Small':\n", 186 | "\n", 187 | " dim = 2048\n", 188 | " depth = 4\n", 189 | " heads = 16\n", 190 | "\n", 191 | " model_checkpoint_file_name = 'Chords_Progressions_Transformer_Small_2048_Trained_Model_12947_steps_0.9316_loss_0.7386_acc.pth'\n", 192 | " model_path = full_path_to_models_dir+'/Small_2048/'+model_checkpoint_file_name\n", 193 | " if os.path.isfile(model_path):\n", 194 | " print('Model already exists...')\n", 195 | "\n", 196 | " else:\n", 197 | " hf_hub_download(repo_id='asigalov61/Chords-Progressions-Transformer',\n", 198 | " filename=model_checkpoint_file_name,\n", 199 | " local_dir='/content/Chords-Progressions-Transformer/Models/Small_2048',\n", 200 | " local_dir_use_symlinks=False)\n", 201 | "\n", 202 | "elif select_model_to_load == '187M-2048E-4L-16H-FP32-Fast-Small':\n", 203 | "\n", 204 | " dim = 2048\n", 205 | " depth = 4\n", 206 | " heads = 16\n", 207 | "\n", 208 | " model_checkpoint_file_name = 'Chords_Progressions_Transformer_Small_2048_FP32_Trained_Model_6265_steps_0.9272_loss_0.7369_acc.pth'\n", 209 | " model_path = full_path_to_models_dir+'/Small_2048_FP32/'+model_checkpoint_file_name\n", 210 | " if os.path.isfile(model_path):\n", 211 | " print('Model already exists...')\n", 212 | "\n", 213 | " else:\n", 214 | " hf_hub_download(repo_id='asigalov61/Chords-Progressions-Transformer',\n", 215 | " filename=model_checkpoint_file_name,\n", 216 | " local_dir='/content/Chords-Progressions-Transformer/Models/Small_2048_FP32',\n", 217 | " local_dir_use_symlinks=False)\n", 218 | "\n", 219 | "print('=' * 70)\n", 220 | "print('Instantiating model...')\n", 221 | "\n", 222 | "device_type = 'cuda'\n", 223 | "\n", 224 | "if model_precision == 'bfloat16' and torch.cuda.is_bf16_supported():\n", 225 | " dtype = 'bfloat16'\n", 226 | "else:\n", 227 | " dtype = 'float16'\n", 228 | "\n", 229 | "if model_precision == 'float16':\n", 230 | " dtype = 'float16'\n", 231 | "\n", 232 | "if model_precision == 'float32':\n", 233 | " dtype = 'float32'\n", 234 | "\n", 235 | "ptdtype = {'bfloat16': torch.bfloat16, 'float16': torch.float16, 'float32': torch.float32}[dtype]\n", 236 | "ctx = torch.amp.autocast(device_type=device_type, dtype=ptdtype)\n", 237 | "\n", 238 | "SEQ_LEN = 8192 # Models seq len\n", 239 | "PAD_IDX = 707 # Models pad index\n", 240 | "\n", 241 | "# instantiate the model\n", 242 | "\n", 243 | "model = TransformerWrapper(\n", 244 | " num_tokens = PAD_IDX+1,\n", 245 | " max_seq_len = SEQ_LEN,\n", 246 | " attn_layers = Decoder(dim = dim, depth = depth, heads = heads, attn_flash = True)\n", 247 | " )\n", 248 | "\n", 249 | "model = AutoregressiveWrapper(model, ignore_index = PAD_IDX)\n", 250 | "\n", 251 | "model.cuda()\n", 252 | "print('=' * 70)\n", 253 | "\n", 254 | "print('Loading model checkpoint...')\n", 255 | "\n", 256 | "model.load_state_dict(torch.load(model_path))\n", 257 | "print('=' * 70)\n", 258 | "\n", 259 | "model.eval()\n", 260 | "\n", 261 | "print('Done!')\n", 262 | "print('=' * 70)\n", 263 | "\n", 264 | "print('Model will use', dtype, 'precision...')\n", 265 | "print('=' * 70)\n", 266 | "\n", 267 | "# Model stats\n", 268 | "print('Model summary...')\n", 269 | "summary(model)\n", 270 | "\n", 271 | "# Plot Token Embeddings\n", 272 | "if plot_tokens_embeddings:\n", 273 | " tok_emb = model.net.token_emb.emb.weight.detach().cpu().tolist()\n", 274 | "\n", 275 | " cos_sim = metrics.pairwise_distances(\n", 276 | " tok_emb, metric='cosine'\n", 277 | " )\n", 278 | " plt.figure(figsize=(7, 7))\n", 279 | " plt.imshow(cos_sim, cmap=\"inferno\", interpolation=\"nearest\")\n", 280 | " im_ratio = cos_sim.shape[0] / cos_sim.shape[1]\n", 281 | " plt.colorbar(fraction=0.046 * im_ratio, pad=0.04)\n", 282 | " plt.xlabel(\"Position\")\n", 283 | " plt.ylabel(\"Position\")\n", 284 | " plt.tight_layout()\n", 285 | " plt.plot()\n", 286 | " plt.savefig(\"/content/Chords-Progressions-Transformer-Tokens-Embeddings-Plot.png\", bbox_inches=\"tight\")\n", 287 | "\n", 288 | "print('=' * 70)\n", 289 | "print('Loading Chords Progressions Transformer Aux Model...')\n", 290 | "print('=' * 70)\n", 291 | "\n", 292 | "aux_model_path = '/content/Chords-Progressions-Transformer/Models/Aux/Chords_Progressions_Transformer_Aux_Trained_Model_4374_steps_0.1185_loss_0.9673_acc.pth'\n", 293 | "\n", 294 | "if os.path.isfile(aux_model_path):\n", 295 | " print('Model already exists...')\n", 296 | "\n", 297 | "else:\n", 298 | " hf_hub_download(repo_id='asigalov61/Chords-Progressions-Transformer',\n", 299 | " filename='Chords_Progressions_Transformer_Aux_Trained_Model_4374_steps_0.1185_loss_0.9673_acc.pth',\n", 300 | " local_dir='/content/Chords-Progressions-Transformer/Models/Aux',\n", 301 | " local_dir_use_symlinks=False)\n", 302 | "\n", 303 | "AUX_SEQ_LEN = 1024 # Models seq len\n", 304 | "AUX_PAD_IDX = 322 # Models pad index\n", 305 | "\n", 306 | "# instantiate the model\n", 307 | "\n", 308 | "aux_model = TransformerWrapper(\n", 309 | " num_tokens = AUX_PAD_IDX+1,\n", 310 | " max_seq_len = AUX_SEQ_LEN,\n", 311 | " attn_layers = Decoder(dim = 1024, depth = 4, heads = 16, attn_flash = True)\n", 312 | " )\n", 313 | "\n", 314 | "aux_model = AutoregressiveWrapper(aux_model, ignore_index = AUX_PAD_IDX)\n", 315 | "\n", 316 | "aux_model.cuda()\n", 317 | "print('=' * 70)\n", 318 | "\n", 319 | "print('Loading model checkpoint...')\n", 320 | "\n", 321 | "aux_model.load_state_dict(torch.load(aux_model_path))\n", 322 | "print('=' * 70)\n", 323 | "\n", 324 | "aux_model.eval()\n", 325 | "\n", 326 | "# Model stats\n", 327 | "print('Model summary...')\n", 328 | "summary(aux_model)\n", 329 | "\n", 330 | "print('Done!')\n", 331 | "print('=' * 70)" 332 | ], 333 | "metadata": { 334 | "cellView": "form", 335 | "id": "V4s_G8yUL0cH" 336 | }, 337 | "execution_count": null, 338 | "outputs": [] 339 | }, 340 | { 341 | "cell_type": "markdown", 342 | "source": [ 343 | "# (CHORDS PROGRESSIONS)" 344 | ], 345 | "metadata": { 346 | "id": "SC4GfMfYA4z7" 347 | } 348 | }, 349 | { 350 | "cell_type": "code", 351 | "source": [ 352 | "# @title Generate chords progressions from custom MIDI chords\n", 353 | "\n", 354 | "#@markdown NOTE: You can stop the generation at any time to render partial results\n", 355 | "\n", 356 | "#@markdown Generation settings\n", 357 | "\n", 358 | "output_MIDI_patch_number = 0 # @param {type:\"slider\", min:0, max:127, step:1}\n", 359 | "number_of_chords_to_generate = 128 # @param {type:\"slider\", min:8, max:1020, step:1}\n", 360 | "max_number_of_notes_per_chord = 8 # @param {type:\"slider\", min:1, max:10, step:1}\n", 361 | "number_of_memory_tokens = 4096 # @param {type:\"slider\", min:32, max:8188, step:4}\n", 362 | "temperature = 0.9 # @param {type:\"slider\", min:0.1, max:1, step:0.05}\n", 363 | "\n", 364 | "#@markdown Other settings\n", 365 | "\n", 366 | "render_MIDI_to_audio = True # @param {type:\"boolean\"}\n", 367 | "\n", 368 | "#===============================================================================\n", 369 | "\n", 370 | "def generate_chords(chords,\n", 371 | " max_chords_limit = 8,\n", 372 | " num_memory_tokens = 4096,\n", 373 | " temperature=0.9\n", 374 | " ):\n", 375 | "\n", 376 | " chords = chords[-num_memory_tokens:]\n", 377 | "\n", 378 | " x = torch.tensor([chords] * 1, dtype=torch.long, device='cuda')\n", 379 | "\n", 380 | " o = 0\n", 381 | "\n", 382 | " ncount = 0\n", 383 | "\n", 384 | " while o < 384 and ncount < max_chords_limit:\n", 385 | " with ctx:\n", 386 | " out = model.generate(x,\n", 387 | " 1,\n", 388 | " temperature=temperature,\n", 389 | " return_prime=False,\n", 390 | " verbose=False)\n", 391 | "\n", 392 | " o = out.tolist()[0][0]\n", 393 | "\n", 394 | " if 256 <= o < 384:\n", 395 | " ncount += 1\n", 396 | "\n", 397 | " if o < 384:\n", 398 | " x = torch.cat((x, out), 1)\n", 399 | "\n", 400 | " return x.tolist()[0][len(chords):]\n", 401 | "\n", 402 | "#===============================================================================\n", 403 | "\n", 404 | "print('=' * 70)\n", 405 | "print('Chords Progressions Transformer Aux Model Generator')\n", 406 | "print('=' * 70)\n", 407 | "\n", 408 | "torch.cuda.empty_cache()\n", 409 | "\n", 410 | "x = torch.tensor([[321]] * 1, dtype=torch.long, device='cuda')\n", 411 | "\n", 412 | "out = aux_model.generate(x,\n", 413 | " number_of_chords_to_generate,\n", 414 | " temperature=temperature,\n", 415 | " return_prime=False,\n", 416 | " verbose=True)\n", 417 | "\n", 418 | "chords = [c+384 for c in out.tolist()[0]]\n", 419 | "#===============================================================================\n", 420 | "\n", 421 | "print('=' * 70)\n", 422 | "print('Chords Progressions Transformer Primary Model Generator')\n", 423 | "print('=' * 70)\n", 424 | "\n", 425 | "torch.cuda.empty_cache()\n", 426 | "\n", 427 | "output = []\n", 428 | "\n", 429 | "idx = 0\n", 430 | "\n", 431 | "for c in tqdm.tqdm(chords):\n", 432 | "\n", 433 | " try:\n", 434 | "\n", 435 | " output.append(c)\n", 436 | "\n", 437 | " out = generate_chords(output,\n", 438 | " temperature=temperature,\n", 439 | " max_chords_limit=max_number_of_notes_per_chord,\n", 440 | " num_memory_tokens=number_of_memory_tokens\n", 441 | " )\n", 442 | " output.extend(out)\n", 443 | "\n", 444 | " idx += 1\n", 445 | "\n", 446 | " except KeyboardInterrupt:\n", 447 | " print('=' * 70)\n", 448 | " print('Stopping generation...')\n", 449 | " break\n", 450 | "\n", 451 | " except Exception as e:\n", 452 | " print('=' * 70)\n", 453 | " print('Error:', e)\n", 454 | " break\n", 455 | "\n", 456 | "torch.cuda.empty_cache()\n", 457 | "\n", 458 | "#===============================================================================\n", 459 | "print('=' * 70)\n", 460 | "\n", 461 | "out1 = output\n", 462 | "\n", 463 | "print('Sample INTs', out1[:12])\n", 464 | "print('=' * 70)\n", 465 | "\n", 466 | "if len(out) != 0:\n", 467 | "\n", 468 | " song = out1\n", 469 | " song_f = []\n", 470 | "\n", 471 | " time = 0\n", 472 | " dur = 0\n", 473 | " vel = 90\n", 474 | " pitch = 0\n", 475 | " channel = 0\n", 476 | "\n", 477 | " patches = [0] * 16\n", 478 | " patches[0] = output_MIDI_patch_number\n", 479 | "\n", 480 | " for ss in song:\n", 481 | "\n", 482 | " if 0 <= ss < 128:\n", 483 | "\n", 484 | " time += ss * 32\n", 485 | "\n", 486 | " if 128 <= ss < 256:\n", 487 | "\n", 488 | " dur = (ss-128) * 32\n", 489 | "\n", 490 | " if 256 <= ss < 384:\n", 491 | "\n", 492 | " pitch = (ss-256)\n", 493 | "\n", 494 | " vel = max(40, pitch)\n", 495 | "\n", 496 | " song_f.append(['note', time, dur, channel, pitch, vel, output_MIDI_patch_number])\n", 497 | "\n", 498 | " detailed_stats = TMIDIX.Tegridy_ms_SONG_to_MIDI_Converter(song_f,\n", 499 | " output_signature = 'Chords Progressions Transformer',\n", 500 | " output_file_name = '/content/Chords-Progressions-Transformer-Composition',\n", 501 | " track_name='Project Los Angeles',\n", 502 | " list_of_MIDI_patches=patches\n", 503 | " )\n", 504 | "\n", 505 | "\n", 506 | "\n", 507 | " print('=' * 70)\n", 508 | " print('Displaying resulting composition...')\n", 509 | " print('=' * 70)\n", 510 | "\n", 511 | " fname = '/content/Chords-Progressions-Transformer-Composition'\n", 512 | "\n", 513 | " if render_MIDI_to_audio:\n", 514 | " midi_audio = midi_to_colab_audio(fname + '.mid')\n", 515 | " display(Audio(midi_audio, rate=16000, normalize=False))\n", 516 | "\n", 517 | " TMIDIX.plot_ms_SONG(song_f, plot_title=fname)" 518 | ], 519 | "metadata": { 520 | "cellView": "form", 521 | "id": "GTpejF_T7JwT" 522 | }, 523 | "execution_count": null, 524 | "outputs": [] 525 | }, 526 | { 527 | "cell_type": "markdown", 528 | "source": [ 529 | "# Congrats! You did it! :)" 530 | ], 531 | "metadata": { 532 | "id": "eoWDEy6CwDr6" 533 | } 534 | } 535 | ], 536 | "metadata": { 537 | "accelerator": "GPU", 538 | "colab": { 539 | "private_outputs": true, 540 | "provenance": [], 541 | "gpuType": "T4", 542 | "gpuClass": "premium" 543 | }, 544 | "kernelspec": { 545 | "display_name": "Python 3", 546 | "name": "python3" 547 | }, 548 | "language_info": { 549 | "codemirror_mode": { 550 | "name": "ipython", 551 | "version": 3 552 | }, 553 | "file_extension": ".py", 554 | "mimetype": "text/x-python", 555 | "name": "python", 556 | "nbconvert_exporter": "python", 557 | "pygments_lexer": "ipython3", 558 | "version": "3.9.13" 559 | } 560 | }, 561 | "nbformat": 4, 562 | "nbformat_minor": 0 563 | } -------------------------------------------------------------------------------- /Training-Data/Chords_Progressions_Transformer_Augmented_Training_Dataset_Maker.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "gradient": { 7 | "editing": false, 8 | "id": "ac5a4cf0-d9d2-47b5-9633-b53f8d99a4d2", 9 | "kernelId": "" 10 | }, 11 | "id": "SiTIpPjArIyr" 12 | }, 13 | "source": [ 14 | "# Chords Progressions Transformer Training Dataset Maker (ver. 1.0)\n", 15 | "\n", 16 | "***\n", 17 | "\n", 18 | "Powered by tegridy-tools: https://github.com/asigalov61/tegridy-tools\n", 19 | "\n", 20 | "***\n", 21 | "\n", 22 | "#### Project Los Angeles\n", 23 | "\n", 24 | "#### Tegridy Code 2024\n", 25 | "\n", 26 | "***" 27 | ] 28 | }, 29 | { 30 | "cell_type": "markdown", 31 | "metadata": { 32 | "gradient": { 33 | "editing": false, 34 | "id": "fa0a611c-1803-42ae-bdf6-a49b5a4e781b", 35 | "kernelId": "" 36 | }, 37 | "id": "gOd93yV0sGd2" 38 | }, 39 | "source": [ 40 | "# (SETUP ENVIRONMENT)" 41 | ] 42 | }, 43 | { 44 | "cell_type": "code", 45 | "execution_count": null, 46 | "metadata": { 47 | "cellView": "form", 48 | "gradient": { 49 | "editing": false, 50 | "id": "a1a45a91-d909-4fd4-b67a-5e16b971d179", 51 | "kernelId": "" 52 | }, 53 | "id": "fX12Yquyuihc", 54 | "scrolled": true 55 | }, 56 | "outputs": [], 57 | "source": [ 58 | "#@title Install all dependencies (run only once per session)\n", 59 | "\n", 60 | "!git clone --depth 1 https://github.com/asigalov61/tegridy-tools" 61 | ] 62 | }, 63 | { 64 | "cell_type": "code", 65 | "execution_count": null, 66 | "metadata": { 67 | "cellView": "form", 68 | "gradient": { 69 | "editing": false, 70 | "id": "b8207b76-9514-4c07-95db-95a4742e52c5", 71 | "kernelId": "" 72 | }, 73 | "id": "z7n9vnKmug1J", 74 | "scrolled": true 75 | }, 76 | "outputs": [], 77 | "source": [ 78 | "#@title Import all needed modules\n", 79 | "\n", 80 | "print('Loading core modules. Please wait...')\n", 81 | "\n", 82 | "import os\n", 83 | "import copy\n", 84 | "import math\n", 85 | "import statistics\n", 86 | "import random\n", 87 | "\n", 88 | "from joblib import Parallel, delayed, parallel_config\n", 89 | "\n", 90 | "from collections import Counter\n", 91 | "\n", 92 | "from tqdm import tqdm\n", 93 | "\n", 94 | "from google.colab import files\n", 95 | "\n", 96 | "print('Creating IO dirs...')\n", 97 | "\n", 98 | "if not os.path.exists('/content/Dataset'):\n", 99 | " os.makedirs('/content/Dataset')\n", 100 | "\n", 101 | "if not os.path.exists('/content/INTS'):\n", 102 | " os.makedirs('/content/INTS')\n", 103 | "\n", 104 | "print('Loading TMIDIX module...')\n", 105 | "os.chdir('/content/tegridy-tools/tegridy-tools')\n", 106 | "\n", 107 | "import TMIDIX\n", 108 | "\n", 109 | "print('Done!')\n", 110 | "\n", 111 | "os.chdir('/content/')\n", 112 | "print('Enjoy! :)')" 113 | ] 114 | }, 115 | { 116 | "cell_type": "markdown", 117 | "source": [ 118 | "# (DOWNLOAD MIDI DATASET)" 119 | ], 120 | "metadata": { 121 | "id": "GIaKRqIHYied" 122 | } 123 | }, 124 | { 125 | "cell_type": "code", 126 | "source": [ 127 | "# @title Download and unzip Monster MIDI Dataset Sample Search Results\n", 128 | "%cd /content/Dataset/\n", 129 | "!wget https://huggingface.co/datasets/projectlosangeles/Monster-MIDI-Dataset/resolve/main/Monster_MIDI_Dataset_Search_Results_Ver_1_0_CC_BY_NC_SA.zip\n", 130 | "!unzip Monster_MIDI_Dataset_Search_Results_Ver_1_0_CC_BY_NC_SA.zip\n", 131 | "!rm Monster_MIDI_Dataset_Search_Results_Ver_1_0_CC_BY_NC_SA.zip\n", 132 | "%cd /content/" 133 | ], 134 | "metadata": { 135 | "id": "CsDFx18o71l9", 136 | "cellView": "form" 137 | }, 138 | "execution_count": null, 139 | "outputs": [] 140 | }, 141 | { 142 | "cell_type": "markdown", 143 | "metadata": { 144 | "id": "JwrqQeie08t0" 145 | }, 146 | "source": [ 147 | "# (FILE LIST)" 148 | ] 149 | }, 150 | { 151 | "cell_type": "code", 152 | "execution_count": null, 153 | "metadata": { 154 | "cellView": "form", 155 | "id": "DuVWtdDNcqKh", 156 | "scrolled": true 157 | }, 158 | "outputs": [], 159 | "source": [ 160 | "#@title Save file list\n", 161 | "###########\n", 162 | "\n", 163 | "print('=' * 70)\n", 164 | "print('Loading MIDI files...')\n", 165 | "print('This may take a while on a large dataset in particular.')\n", 166 | "\n", 167 | "dataset_addr = \"/content/Dataset\"\n", 168 | "\n", 169 | "filez = list()\n", 170 | "for (dirpath, dirnames, filenames) in os.walk(dataset_addr):\n", 171 | " for file in filenames:\n", 172 | " if file.endswith(('.mid', '.midi', '.kar')):\n", 173 | " filez.append(os.path.join(dirpath, file))\n", 174 | "print('=' * 70)\n", 175 | "\n", 176 | "if filez == []:\n", 177 | " print('Could not find any MIDI files. Please check Dataset dir...')\n", 178 | " print('=' * 70)\n", 179 | "\n", 180 | "else:\n", 181 | " print('Randomizing file list...')\n", 182 | " random.shuffle(filez)\n", 183 | " print('=' * 70)\n", 184 | "\n", 185 | " TMIDIX.Tegridy_Any_Pickle_File_Writer(filez, '/content/filez')\n", 186 | " print('=' * 70)" 187 | ] 188 | }, 189 | { 190 | "cell_type": "code", 191 | "execution_count": null, 192 | "metadata": { 193 | "cellView": "form", 194 | "id": "qI_adhjojrJ9", 195 | "scrolled": true 196 | }, 197 | "outputs": [], 198 | "source": [ 199 | "#@title Load file list\n", 200 | "\n", 201 | "print('=' * 70)\n", 202 | "filez = TMIDIX.Tegridy_Any_Pickle_File_Reader('/content/filez')\n", 203 | "print('Done!')\n", 204 | "print('=' * 70)" 205 | ] 206 | }, 207 | { 208 | "cell_type": "markdown", 209 | "metadata": { 210 | "id": "FLxHvO-wlwfU" 211 | }, 212 | "source": [ 213 | "# (PROCESS)" 214 | ] 215 | }, 216 | { 217 | "cell_type": "code", 218 | "execution_count": null, 219 | "metadata": { 220 | "id": "NOMrDxSm7Iz8", 221 | "cellView": "form" 222 | }, 223 | "outputs": [], 224 | "source": [ 225 | "# @title Load TMIDIX MIDI Processor\n", 226 | "\n", 227 | "print('=' * 70)\n", 228 | "print('Loading TMIDIX MIDI Processor...')\n", 229 | "\n", 230 | "def TMIDIX_MIDI_Processor(midi_file):\n", 231 | "\n", 232 | " try:\n", 233 | "\n", 234 | " fn = os.path.basename(midi_file)\n", 235 | "\n", 236 | " #=======================================================\n", 237 | " # START PROCESSING\n", 238 | "\n", 239 | " #===============================================================================\n", 240 | " # Raw single-track ms score\n", 241 | "\n", 242 | " raw_score = TMIDIX.midi2single_track_ms_score(midi_file)\n", 243 | "\n", 244 | " #===============================================================================\n", 245 | " # Enhanced score notes\n", 246 | "\n", 247 | " escore_notes = TMIDIX.advanced_score_processor(raw_score, return_enhanced_score_notes=True)[0]\n", 248 | "\n", 249 | " no_drums_escore_notes = [e for e in escore_notes if e[6] < 80]\n", 250 | "\n", 251 | " if len(no_drums_escore_notes) > 0:\n", 252 | "\n", 253 | " #=======================================================\n", 254 | " # PRE-PROCESSING\n", 255 | "\n", 256 | " #===============================================================================\n", 257 | " # Augmented enhanced score notes\n", 258 | "\n", 259 | " no_drums_escore_notes = TMIDIX.augment_enhanced_score_notes(no_drums_escore_notes)\n", 260 | "\n", 261 | " cscore = TMIDIX.chordify_score([1000, no_drums_escore_notes])\n", 262 | "\n", 263 | " clean_cscore = []\n", 264 | "\n", 265 | " for c in cscore:\n", 266 | " pitches = []\n", 267 | " cho = []\n", 268 | " for cc in c:\n", 269 | " if cc[4] not in pitches:\n", 270 | " cho.append(cc)\n", 271 | " pitches.append(cc[4])\n", 272 | "\n", 273 | " clean_cscore.append(cho)\n", 274 | "\n", 275 | " #=======================================================\n", 276 | " # FINAL PROCESSING\n", 277 | "\n", 278 | " all_melody_chords = []\n", 279 | "\n", 280 | " #=======================================================\n", 281 | " # Pitches augmentation\n", 282 | "\n", 283 | " for pa in range(-2, 2):\n", 284 | "\n", 285 | " melody_chords = []\n", 286 | "\n", 287 | " #=======================================================\n", 288 | " # MAIN PROCESSING CYCLE\n", 289 | " #=======================================================\n", 290 | "\n", 291 | " pe = clean_cscore[0][0]\n", 292 | "\n", 293 | " first_chord = True\n", 294 | "\n", 295 | " for c in clean_cscore:\n", 296 | "\n", 297 | " # Chords\n", 298 | "\n", 299 | " c.sort(key=lambda x: x[4], reverse=True)\n", 300 | "\n", 301 | " tones_chord = sorted(set([(cc[4]+pa) % 12 for cc in c]))\n", 302 | "\n", 303 | " try:\n", 304 | " chord_token = TMIDIX.ALL_CHORDS_SORTED.index(tones_chord)\n", 305 | " except:\n", 306 | " checked_tones_chord = TMIDIX.check_and_fix_tones_chord(tones_chord)\n", 307 | " chord_token = TMIDIX.ALL_CHORDS_SORTED.index(checked_tones_chord)\n", 308 | "\n", 309 | " melody_chords.extend([chord_token+384])\n", 310 | "\n", 311 | " if first_chord:\n", 312 | " melody_chords.extend([0])\n", 313 | " first_chord = False\n", 314 | "\n", 315 | " for e in c:\n", 316 | "\n", 317 | " #=======================================================\n", 318 | " # Timings...\n", 319 | "\n", 320 | " time = e[1]-pe[1]\n", 321 | "\n", 322 | " dur = e[2]\n", 323 | "\n", 324 | " if time != 0 and time % 2 != 0:\n", 325 | " time += 1\n", 326 | " if dur % 2 != 0:\n", 327 | " dur += 1\n", 328 | "\n", 329 | " delta_time = int(max(0, min(255, time)) / 2)\n", 330 | "\n", 331 | " # Durations\n", 332 | "\n", 333 | " dur = int(max(0, min(255, dur)) / 2)\n", 334 | "\n", 335 | " # Pitches\n", 336 | "\n", 337 | " ptc = max(1, min(127, e[4]+pa))\n", 338 | "\n", 339 | " #=======================================================\n", 340 | " # FINAL NOTE SEQ\n", 341 | "\n", 342 | " # Writing final note asynchronously\n", 343 | "\n", 344 | " if delta_time != 0:\n", 345 | " melody_chords.extend([delta_time, dur+128, ptc+256])\n", 346 | " else:\n", 347 | " melody_chords.extend([dur+128, ptc+256])\n", 348 | "\n", 349 | " pe = e\n", 350 | "\n", 351 | " if len(melody_chords) > 8192:\n", 352 | " break\n", 353 | "\n", 354 | " #=======================================================\n", 355 | "\n", 356 | " all_melody_chords.append(melody_chords[:8193])\n", 357 | "\n", 358 | " #=======================================================\n", 359 | "\n", 360 | " # TOTAL DICTIONARY SIZE 706+1=707\n", 361 | " #=======================================================\n", 362 | "\n", 363 | " return all_melody_chords\n", 364 | "\n", 365 | " else:\n", 366 | " return None\n", 367 | "\n", 368 | " except Exception as e:\n", 369 | " print('=' * 70)\n", 370 | " print('ERROR!!!')\n", 371 | " print('File name:', midi_file)\n", 372 | " print('Error:', e)\n", 373 | " print('=' * 70)\n", 374 | " return None\n", 375 | "\n", 376 | "print('Done!')\n", 377 | "print('=' * 70)" 378 | ] 379 | }, 380 | { 381 | "cell_type": "code", 382 | "execution_count": null, 383 | "metadata": { 384 | "scrolled": true, 385 | "cellView": "form", 386 | "id": "_ia0G3sy7Iz9" 387 | }, 388 | "outputs": [], 389 | "source": [ 390 | "#@title Process MIDIs with TMIDIX MIDI processor\n", 391 | "\n", 392 | "NUMBER_OF_PARALLEL_JOBS = 16 # Number of parallel jobs\n", 393 | "NUMBER_OF_FILES_PER_ITERATION = 16 # Number of files to queue for each parallel iteration\n", 394 | "SAVE_EVERY_NUMBER_OF_ITERATIONS = 160 # Save every 2560 files\n", 395 | "\n", 396 | "print('=' * 70)\n", 397 | "print('TMIDIX MIDI Processor')\n", 398 | "print('=' * 70)\n", 399 | "print('Starting up...')\n", 400 | "print('=' * 70)\n", 401 | "\n", 402 | "###########\n", 403 | "\n", 404 | "melody_chords_f = []\n", 405 | "\n", 406 | "files_count = 0\n", 407 | "good_files_count = 0\n", 408 | "\n", 409 | "print('Processing MIDI files. Please wait...')\n", 410 | "print('=' * 70)\n", 411 | "\n", 412 | "for i in tqdm(range(0, len(filez), NUMBER_OF_FILES_PER_ITERATION)):\n", 413 | "\n", 414 | " with parallel_config(backend='threading', n_jobs=NUMBER_OF_PARALLEL_JOBS, verbose = 0):\n", 415 | " output = Parallel(n_jobs=NUMBER_OF_PARALLEL_JOBS, verbose=0)(delayed(TMIDIX_MIDI_Processor)(f) for f in filez[i:i+NUMBER_OF_FILES_PER_ITERATION])\n", 416 | "\n", 417 | " for o in output:\n", 418 | "\n", 419 | " if o is not None:\n", 420 | " melody_chords_f.append(o)\n", 421 | "\n", 422 | " # Saving every 2560 processed files\n", 423 | " if i % (SAVE_EVERY_NUMBER_OF_ITERATIONS * NUMBER_OF_FILES_PER_ITERATION) == 0 and i != 0:\n", 424 | " print('SAVING !!!')\n", 425 | " print('=' * 70)\n", 426 | " good_files_count += len(melody_chords_f)\n", 427 | " print('Saving processed files...')\n", 428 | " print('=' * 70)\n", 429 | " print('Data check:', min(melody_chords_f[0][0]), '===', max(melody_chords_f[0][0]), '===', len(list(set(melody_chords_f[0][0]))), '===', len(melody_chords_f[0][0]))\n", 430 | " print('=' * 70)\n", 431 | " print('Processed so far:', good_files_count, 'out of', i, '===', good_files_count / i, 'good files ratio')\n", 432 | " print('=' * 70)\n", 433 | " count = str(i)\n", 434 | " TMIDIX.Tegridy_Any_Pickle_File_Writer(melody_chords_f, '/content/INTS/CPT_INTs_'+count)\n", 435 | " melody_chords_f = []\n", 436 | " print('=' * 70)\n", 437 | "\n", 438 | "print('SAVING !!!')\n", 439 | "print('=' * 70)\n", 440 | "good_files_count += len(melody_chords_f)\n", 441 | "print('Saving processed files...')\n", 442 | "print('=' * 70)\n", 443 | "print('Data check:', min(melody_chords_f[0][0]), '===', max(melody_chords_f[0][0]), '===', len(list(set(melody_chords_f[0][0]))), '===', len(melody_chords_f[0][0]))\n", 444 | "print('=' * 70)\n", 445 | "print('Processed so far:', good_files_count, 'out of', i, '===', good_files_count / i, 'good files ratio')\n", 446 | "print('=' * 70)\n", 447 | "count = str(i)\n", 448 | "TMIDIX.Tegridy_Any_Pickle_File_Writer(melody_chords_f, '/content/INTS/CPT_INTs_'+count)\n", 449 | "print('=' * 70)" 450 | ] 451 | }, 452 | { 453 | "cell_type": "markdown", 454 | "metadata": { 455 | "id": "-ye9rNzOHX90" 456 | }, 457 | "source": [ 458 | "# (TEST INTS)" 459 | ] 460 | }, 461 | { 462 | "cell_type": "code", 463 | "execution_count": null, 464 | "metadata": { 465 | "scrolled": false, 466 | "id": "pNXByX_S7Iz-", 467 | "cellView": "form" 468 | }, 469 | "outputs": [], 470 | "source": [ 471 | "#@title Test INTs\n", 472 | "\n", 473 | "train_data1 = random.choice(melody_chords_f[0])\n", 474 | "\n", 475 | "print('=' * 70)\n", 476 | "print('Seq len:', len(train_data1))\n", 477 | "print('Sample INTs', train_data1[:15])\n", 478 | "print('=' * 70)\n", 479 | "\n", 480 | "out = train_data1\n", 481 | "\n", 482 | "if len(out) != 0:\n", 483 | "\n", 484 | " song = out\n", 485 | " song_f = []\n", 486 | "\n", 487 | " time = 0\n", 488 | " dur = 0\n", 489 | " vel = 90\n", 490 | " pitch = 0\n", 491 | " channel = 0\n", 492 | "\n", 493 | " patches = [0] * 16\n", 494 | "\n", 495 | " for ss in song:\n", 496 | "\n", 497 | " if 0 <= ss < 128:\n", 498 | "\n", 499 | " time += ss\n", 500 | "\n", 501 | " if 128 <= ss < 256:\n", 502 | "\n", 503 | " dur = (ss-128)\n", 504 | "\n", 505 | " if 256 <= ss < 384:\n", 506 | "\n", 507 | " pitch = (ss-256)\n", 508 | "\n", 509 | " song_f.append(['note', time, dur, channel, pitch, vel ])\n", 510 | "\n", 511 | "detailed_stats = TMIDIX.Tegridy_ms_SONG_to_MIDI_Converter(song_f,\n", 512 | " output_signature = 'Chords Progressions Transformer',\n", 513 | " output_file_name = '/content/Chords-Progressions-Transformer-Composition',\n", 514 | " track_name='Project Los Angeles',\n", 515 | " list_of_MIDI_patches=patches,\n", 516 | " timings_multiplier=32\n", 517 | " )\n", 518 | "print('=' * 70)" 519 | ] 520 | }, 521 | { 522 | "cell_type": "markdown", 523 | "source": [ 524 | "# (ZIP AND DOWNLOAD INTS)" 525 | ], 526 | "metadata": { 527 | "id": "Ooe7lcOZbpz1" 528 | } 529 | }, 530 | { 531 | "cell_type": "code", 532 | "source": [ 533 | "#@title Zip and download training INTs\n", 534 | "\n", 535 | "print('=' * 70)\n", 536 | "\n", 537 | "try:\n", 538 | " os.remove('Chords_progressions_Transformer_INTs.zip')\n", 539 | "except OSError:\n", 540 | " pass\n", 541 | "\n", 542 | "print('Zipping... Please wait...')\n", 543 | "print('=' * 70)\n", 544 | "\n", 545 | "%cd /content/INTS/\n", 546 | "!zip Chords_progressions_Transformer_INTs.zip *.pickle\n", 547 | "%cd /content/\n", 548 | "\n", 549 | "print('=' * 70)\n", 550 | "print('Done!')\n", 551 | "print('=' * 70)\n", 552 | "\n", 553 | "print('Downloading final zip file...')\n", 554 | "print('=' * 70)\n", 555 | "\n", 556 | "files.download('/content/INTS/Chords_progressions_Transformer_INTs.zip')\n", 557 | "\n", 558 | "print('Done!')\n", 559 | "print('=' * 70)" 560 | ], 561 | "metadata": { 562 | "id": "f2Ric8In9ju0", 563 | "cellView": "form" 564 | }, 565 | "execution_count": null, 566 | "outputs": [] 567 | }, 568 | { 569 | "cell_type": "markdown", 570 | "metadata": { 571 | "id": "YzCMd94Tu_gz" 572 | }, 573 | "source": [ 574 | "# Congrats! You did it! :)" 575 | ] 576 | } 577 | ], 578 | "metadata": { 579 | "colab": { 580 | "private_outputs": true, 581 | "provenance": [], 582 | "machine_shape": "hm" 583 | }, 584 | "kernelspec": { 585 | "display_name": "Python 3", 586 | "name": "python3" 587 | }, 588 | "language_info": { 589 | "codemirror_mode": { 590 | "name": "ipython", 591 | "version": 3 592 | }, 593 | "file_extension": ".py", 594 | "mimetype": "text/x-python", 595 | "name": "python", 596 | "nbconvert_exporter": "python", 597 | "pygments_lexer": "ipython3", 598 | "version": "3.9.7" 599 | } 600 | }, 601 | "nbformat": 4, 602 | "nbformat_minor": 0 603 | } -------------------------------------------------------------------------------- /Training-Data/Chords_Progressions_Transformer_Training_Dataset_Maker.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "gradient": { 7 | "editing": false, 8 | "id": "ac5a4cf0-d9d2-47b5-9633-b53f8d99a4d2", 9 | "kernelId": "" 10 | }, 11 | "id": "SiTIpPjArIyr" 12 | }, 13 | "source": [ 14 | "# Chords Progressions Transformer Training Dataset Maker (ver. 1.0)\n", 15 | "\n", 16 | "***\n", 17 | "\n", 18 | "Powered by tegridy-tools: https://github.com/asigalov61/tegridy-tools\n", 19 | "\n", 20 | "***\n", 21 | "\n", 22 | "#### Project Los Angeles\n", 23 | "\n", 24 | "#### Tegridy Code 2024\n", 25 | "\n", 26 | "***" 27 | ] 28 | }, 29 | { 30 | "cell_type": "markdown", 31 | "metadata": { 32 | "gradient": { 33 | "editing": false, 34 | "id": "fa0a611c-1803-42ae-bdf6-a49b5a4e781b", 35 | "kernelId": "" 36 | }, 37 | "id": "gOd93yV0sGd2" 38 | }, 39 | "source": [ 40 | "# (SETUP ENVIRONMENT)" 41 | ] 42 | }, 43 | { 44 | "cell_type": "code", 45 | "execution_count": null, 46 | "metadata": { 47 | "cellView": "form", 48 | "gradient": { 49 | "editing": false, 50 | "id": "a1a45a91-d909-4fd4-b67a-5e16b971d179", 51 | "kernelId": "" 52 | }, 53 | "id": "fX12Yquyuihc", 54 | "scrolled": true 55 | }, 56 | "outputs": [], 57 | "source": [ 58 | "#@title Install all dependencies (run only once per session)\n", 59 | "\n", 60 | "!git clone --depth 1 https://github.com/asigalov61/tegridy-tools" 61 | ] 62 | }, 63 | { 64 | "cell_type": "code", 65 | "execution_count": null, 66 | "metadata": { 67 | "cellView": "form", 68 | "gradient": { 69 | "editing": false, 70 | "id": "b8207b76-9514-4c07-95db-95a4742e52c5", 71 | "kernelId": "" 72 | }, 73 | "id": "z7n9vnKmug1J", 74 | "scrolled": true 75 | }, 76 | "outputs": [], 77 | "source": [ 78 | "#@title Import all needed modules\n", 79 | "\n", 80 | "print('Loading core modules. Please wait...')\n", 81 | "\n", 82 | "import os\n", 83 | "import copy\n", 84 | "import math\n", 85 | "import statistics\n", 86 | "import random\n", 87 | "\n", 88 | "from joblib import Parallel, delayed, parallel_config\n", 89 | "\n", 90 | "from collections import Counter\n", 91 | "\n", 92 | "from tqdm import tqdm\n", 93 | "\n", 94 | "from google.colab import files\n", 95 | "\n", 96 | "print('Creating IO dirs...')\n", 97 | "\n", 98 | "if not os.path.exists('/content/Dataset'):\n", 99 | " os.makedirs('/content/Dataset')\n", 100 | "\n", 101 | "if not os.path.exists('/content/INTS'):\n", 102 | " os.makedirs('/content/INTS')\n", 103 | "\n", 104 | "print('Loading TMIDIX module...')\n", 105 | "os.chdir('/content/tegridy-tools/tegridy-tools')\n", 106 | "\n", 107 | "import TMIDIX\n", 108 | "\n", 109 | "print('Done!')\n", 110 | "\n", 111 | "os.chdir('/content/')\n", 112 | "print('Enjoy! :)')" 113 | ] 114 | }, 115 | { 116 | "cell_type": "markdown", 117 | "source": [ 118 | "# (DOWNLOAD MIDI DATASET)" 119 | ], 120 | "metadata": { 121 | "id": "GIaKRqIHYied" 122 | } 123 | }, 124 | { 125 | "cell_type": "code", 126 | "source": [ 127 | "# @title Download and unzip Monster MIDI Dataset Sample Search Results\n", 128 | "%cd /content/Dataset/\n", 129 | "!wget https://huggingface.co/datasets/projectlosangeles/Monster-MIDI-Dataset/resolve/main/Monster_MIDI_Dataset_Search_Results_Ver_1_0_CC_BY_NC_SA.zip\n", 130 | "!unzip Monster_MIDI_Dataset_Search_Results_Ver_1_0_CC_BY_NC_SA.zip\n", 131 | "!rm Monster_MIDI_Dataset_Search_Results_Ver_1_0_CC_BY_NC_SA.zip\n", 132 | "%cd /content/" 133 | ], 134 | "metadata": { 135 | "id": "CsDFx18o71l9", 136 | "cellView": "form" 137 | }, 138 | "execution_count": null, 139 | "outputs": [] 140 | }, 141 | { 142 | "cell_type": "markdown", 143 | "metadata": { 144 | "id": "JwrqQeie08t0" 145 | }, 146 | "source": [ 147 | "# (FILE LIST)" 148 | ] 149 | }, 150 | { 151 | "cell_type": "code", 152 | "execution_count": null, 153 | "metadata": { 154 | "cellView": "form", 155 | "id": "DuVWtdDNcqKh", 156 | "scrolled": true 157 | }, 158 | "outputs": [], 159 | "source": [ 160 | "#@title Save file list\n", 161 | "###########\n", 162 | "\n", 163 | "print('=' * 70)\n", 164 | "print('Loading MIDI files...')\n", 165 | "print('This may take a while on a large dataset in particular.')\n", 166 | "\n", 167 | "dataset_addr = \"/content/Dataset\"\n", 168 | "\n", 169 | "filez = list()\n", 170 | "for (dirpath, dirnames, filenames) in os.walk(dataset_addr):\n", 171 | " for file in filenames:\n", 172 | " if file.endswith(('.mid', '.midi', '.kar')):\n", 173 | " filez.append(os.path.join(dirpath, file))\n", 174 | "print('=' * 70)\n", 175 | "\n", 176 | "if filez == []:\n", 177 | " print('Could not find any MIDI files. Please check Dataset dir...')\n", 178 | " print('=' * 70)\n", 179 | "\n", 180 | "else:\n", 181 | " print('Randomizing file list...')\n", 182 | " random.shuffle(filez)\n", 183 | " print('=' * 70)\n", 184 | "\n", 185 | " TMIDIX.Tegridy_Any_Pickle_File_Writer(filez, '/content/filez')\n", 186 | " print('=' * 70)" 187 | ] 188 | }, 189 | { 190 | "cell_type": "code", 191 | "execution_count": null, 192 | "metadata": { 193 | "cellView": "form", 194 | "id": "qI_adhjojrJ9", 195 | "scrolled": true 196 | }, 197 | "outputs": [], 198 | "source": [ 199 | "#@title Load file list\n", 200 | "\n", 201 | "print('=' * 70)\n", 202 | "filez = TMIDIX.Tegridy_Any_Pickle_File_Reader('/content/filez')\n", 203 | "print('Done!')\n", 204 | "print('=' * 70)" 205 | ] 206 | }, 207 | { 208 | "cell_type": "markdown", 209 | "metadata": { 210 | "id": "FLxHvO-wlwfU" 211 | }, 212 | "source": [ 213 | "# (PROCESS)" 214 | ] 215 | }, 216 | { 217 | "cell_type": "code", 218 | "execution_count": null, 219 | "metadata": { 220 | "id": "NOMrDxSm7Iz8", 221 | "cellView": "form" 222 | }, 223 | "outputs": [], 224 | "source": [ 225 | "# @title Load TMIDIX MIDI Processor\n", 226 | "apply_timings_compression_and_quantization = False # @param {type:\"boolean\"}\n", 227 | "\n", 228 | "print('=' * 70)\n", 229 | "print('Loading TMIDIX MIDI Processor...')\n", 230 | "\n", 231 | "def TMIDIX_MIDI_Processor(midi_file):\n", 232 | "\n", 233 | " try:\n", 234 | "\n", 235 | " fn = os.path.basename(midi_file)\n", 236 | "\n", 237 | " #=======================================================\n", 238 | " # START PROCESSING\n", 239 | "\n", 240 | " #===============================================================================\n", 241 | " # Raw single-track ms score\n", 242 | "\n", 243 | " raw_score = TMIDIX.midi2single_track_ms_score(midi_file)\n", 244 | "\n", 245 | " #===============================================================================\n", 246 | " # Enhanced score notes\n", 247 | "\n", 248 | " escore_notes = TMIDIX.advanced_score_processor(raw_score, return_enhanced_score_notes=True)[0]\n", 249 | "\n", 250 | " no_drums_escore_notes = [e for e in escore_notes if e[6] < 80]\n", 251 | "\n", 252 | " if len(no_drums_escore_notes) > 0:\n", 253 | "\n", 254 | " #=======================================================\n", 255 | " # PRE-PROCESSING\n", 256 | "\n", 257 | " #===============================================================================\n", 258 | " # Augmented enhanced score notes\n", 259 | "\n", 260 | " if apply_timings_compression_and_quantization:\n", 261 | " no_drums_escore_notes = TMIDIX.augment_enhanced_score_notes(no_drums_escore_notes)\n", 262 | "\n", 263 | " else:\n", 264 | " no_drums_escore_notes = TMIDIX.augment_enhanced_score_notes(no_drums_escore_notes,\n", 265 | " timings_divider=32\n", 266 | " )\n", 267 | "\n", 268 | " cscore = TMIDIX.chordify_score([1000, no_drums_escore_notes])\n", 269 | "\n", 270 | " clean_cscore = []\n", 271 | "\n", 272 | " for c in cscore:\n", 273 | " pitches = []\n", 274 | " cho = []\n", 275 | " for cc in c:\n", 276 | " if cc[4] not in pitches:\n", 277 | " cho.append(cc)\n", 278 | " pitches.append(cc[4])\n", 279 | "\n", 280 | " clean_cscore.append(cho)\n", 281 | "\n", 282 | " #=======================================================\n", 283 | " # FINAL PROCESSING\n", 284 | "\n", 285 | " melody_chords = []\n", 286 | "\n", 287 | " #=======================================================\n", 288 | " # MAIN PROCESSING CYCLE\n", 289 | " #=======================================================\n", 290 | "\n", 291 | " pe = clean_cscore[0][0]\n", 292 | "\n", 293 | " first_chord = True\n", 294 | "\n", 295 | " for c in clean_cscore:\n", 296 | "\n", 297 | " # Chords\n", 298 | "\n", 299 | " c.sort(key=lambda x: x[4], reverse=True)\n", 300 | "\n", 301 | " tones_chord = sorted(set([cc[4] % 12 for cc in c]))\n", 302 | "\n", 303 | " try:\n", 304 | " chord_token = TMIDIX.ALL_CHORDS_SORTED.index(tones_chord)\n", 305 | " except:\n", 306 | " checked_tones_chord = TMIDIX.check_and_fix_tones_chord(tones_chord)\n", 307 | " chord_token = TMIDIX.ALL_CHORDS_SORTED.index(checked_tones_chord)\n", 308 | "\n", 309 | " melody_chords.extend([chord_token+384])\n", 310 | "\n", 311 | " if first_chord:\n", 312 | " melody_chords.extend([0])\n", 313 | " first_chord = False\n", 314 | "\n", 315 | " for e in c:\n", 316 | "\n", 317 | " #=======================================================\n", 318 | " # Timings...\n", 319 | "\n", 320 | " time = e[1]-pe[1]\n", 321 | "\n", 322 | " dur = e[2]\n", 323 | "\n", 324 | " if apply_timings_compression_and_quantization:\n", 325 | "\n", 326 | " if time != 0 and time % 2 != 0:\n", 327 | " time += 1\n", 328 | " if dur % 2 != 0:\n", 329 | " dur += 1\n", 330 | "\n", 331 | " delta_time = int(max(0, min(255, time)) / 2)\n", 332 | "\n", 333 | " dur = int(max(0, min(255, dur)) / 2)\n", 334 | "\n", 335 | " else:\n", 336 | " delta_time = max(0, min(127, time))\n", 337 | " dur = max(1, min(127, dur))\n", 338 | "\n", 339 | " # Pitches\n", 340 | "\n", 341 | " ptc = max(1, min(127, e[4]))\n", 342 | "\n", 343 | " #=======================================================\n", 344 | " # FINAL NOTE SEQ\n", 345 | "\n", 346 | " # Writing final note asynchronously\n", 347 | "\n", 348 | " if delta_time != 0:\n", 349 | " melody_chords.extend([delta_time, dur+128, ptc+256])\n", 350 | " else:\n", 351 | " melody_chords.extend([dur+128, ptc+256])\n", 352 | "\n", 353 | " pe = e\n", 354 | "\n", 355 | " if len(melody_chords) > 8192:\n", 356 | " break\n", 357 | "\n", 358 | " #=======================================================\n", 359 | "\n", 360 | " # TOTAL DICTIONARY SIZE 706+1=707\n", 361 | " #=======================================================\n", 362 | "\n", 363 | " return melody_chords[:8193]\n", 364 | "\n", 365 | " else:\n", 366 | " return None\n", 367 | "\n", 368 | " except Exception as e:\n", 369 | " print('=' * 70)\n", 370 | " print('ERROR!!!')\n", 371 | " print('File name:', midi_file)\n", 372 | " print('Error:', e)\n", 373 | " print('=' * 70)\n", 374 | " return None\n", 375 | "\n", 376 | "print('Done!')\n", 377 | "print('=' * 70)" 378 | ] 379 | }, 380 | { 381 | "cell_type": "code", 382 | "execution_count": null, 383 | "metadata": { 384 | "scrolled": true, 385 | "cellView": "form", 386 | "id": "_ia0G3sy7Iz9" 387 | }, 388 | "outputs": [], 389 | "source": [ 390 | "#@title Process MIDIs with TMIDIX MIDI processor\n", 391 | "\n", 392 | "NUMBER_OF_PARALLEL_JOBS = 16 # Number of parallel jobs\n", 393 | "NUMBER_OF_FILES_PER_ITERATION = 16 # Number of files to queue for each parallel iteration\n", 394 | "SAVE_EVERY_NUMBER_OF_ITERATIONS = 160 # Save every 2560 files\n", 395 | "\n", 396 | "print('=' * 70)\n", 397 | "print('TMIDIX MIDI Processor')\n", 398 | "print('=' * 70)\n", 399 | "print('Starting up...')\n", 400 | "print('=' * 70)\n", 401 | "\n", 402 | "###########\n", 403 | "\n", 404 | "melody_chords_f = []\n", 405 | "\n", 406 | "files_count = 0\n", 407 | "good_files_count = 0\n", 408 | "\n", 409 | "print('Processing MIDI files. Please wait...')\n", 410 | "print('=' * 70)\n", 411 | "\n", 412 | "for i in tqdm(range(0, len(filez), NUMBER_OF_FILES_PER_ITERATION)):\n", 413 | "\n", 414 | " with parallel_config(backend='threading', n_jobs=NUMBER_OF_PARALLEL_JOBS, verbose = 0):\n", 415 | " output = Parallel(n_jobs=NUMBER_OF_PARALLEL_JOBS, verbose=0)(delayed(TMIDIX_MIDI_Processor)(f) for f in filez[i:i+NUMBER_OF_FILES_PER_ITERATION])\n", 416 | "\n", 417 | " for o in output:\n", 418 | "\n", 419 | " if o is not None:\n", 420 | " melody_chords_f.append(o)\n", 421 | "\n", 422 | " # Saving every 2560 processed files\n", 423 | " if i % (SAVE_EVERY_NUMBER_OF_ITERATIONS * NUMBER_OF_FILES_PER_ITERATION) == 0 and i != 0:\n", 424 | " print('SAVING !!!')\n", 425 | " print('=' * 70)\n", 426 | " good_files_count += len(melody_chords_f)\n", 427 | " print('Saving processed files...')\n", 428 | " print('=' * 70)\n", 429 | " print('Data check:', min(melody_chords_f[0]), '===', max(melody_chords_f[0]), '===', len(list(set(melody_chords_f[0]))), '===', len(melody_chords_f[0]))\n", 430 | " print('=' * 70)\n", 431 | " print('Processed so far:', good_files_count, 'out of', i, '===', good_files_count / i, 'good files ratio')\n", 432 | " print('=' * 70)\n", 433 | " count = str(i)\n", 434 | " TMIDIX.Tegridy_Any_Pickle_File_Writer(melody_chords_f, '/content/INTS/CPT_INTs_'+count)\n", 435 | " melody_chords_f = []\n", 436 | " print('=' * 70)\n", 437 | "\n", 438 | "print('SAVING !!!')\n", 439 | "print('=' * 70)\n", 440 | "good_files_count += len(melody_chords_f)\n", 441 | "print('Saving processed files...')\n", 442 | "print('=' * 70)\n", 443 | "print('Data check:', min(melody_chords_f[0]), '===', max(melody_chords_f[0]), '===', len(list(set(melody_chords_f[0]))), '===', len(melody_chords_f[0]))\n", 444 | "print('=' * 70)\n", 445 | "print('Processed so far:', good_files_count, 'out of', i, '===', good_files_count / i, 'good files ratio')\n", 446 | "print('=' * 70)\n", 447 | "count = str(i)\n", 448 | "TMIDIX.Tegridy_Any_Pickle_File_Writer(melody_chords_f, '/content/INTS/CPT_INTs_'+count)\n", 449 | "print('=' * 70)" 450 | ] 451 | }, 452 | { 453 | "cell_type": "markdown", 454 | "metadata": { 455 | "id": "-ye9rNzOHX90" 456 | }, 457 | "source": [ 458 | "# (TEST INTS)" 459 | ] 460 | }, 461 | { 462 | "cell_type": "code", 463 | "execution_count": null, 464 | "metadata": { 465 | "scrolled": false, 466 | "id": "pNXByX_S7Iz-", 467 | "cellView": "form" 468 | }, 469 | "outputs": [], 470 | "source": [ 471 | "#@title Test INTs\n", 472 | "\n", 473 | "train_data1 = random.choice(melody_chords_f)\n", 474 | "\n", 475 | "print('=' * 70)\n", 476 | "print('Seq len:', len(train_data1))\n", 477 | "print('Sample INTs', train_data1[:15])\n", 478 | "print('=' * 70)\n", 479 | "\n", 480 | "out = train_data1\n", 481 | "\n", 482 | "if len(out) != 0:\n", 483 | "\n", 484 | " song = out\n", 485 | " song_f = []\n", 486 | "\n", 487 | " time = 0\n", 488 | " dur = 0\n", 489 | " vel = 90\n", 490 | " pitch = 0\n", 491 | " channel = 0\n", 492 | "\n", 493 | " patches = [0] * 16\n", 494 | "\n", 495 | " for ss in song:\n", 496 | "\n", 497 | " if 0 <= ss < 128:\n", 498 | "\n", 499 | " time += ss\n", 500 | "\n", 501 | " if 128 <= ss < 256:\n", 502 | "\n", 503 | " dur = (ss-128)\n", 504 | "\n", 505 | " if 256 <= ss < 384:\n", 506 | "\n", 507 | " pitch = (ss-256)\n", 508 | "\n", 509 | " song_f.append(['note', time, dur, channel, pitch, vel ])\n", 510 | "\n", 511 | "detailed_stats = TMIDIX.Tegridy_ms_SONG_to_MIDI_Converter(song_f,\n", 512 | " output_signature = 'Chords Progressions Transformer',\n", 513 | " output_file_name = '/content/Chords-Progressions-Transformer-Composition',\n", 514 | " track_name='Project Los Angeles',\n", 515 | " list_of_MIDI_patches=patches,\n", 516 | " timings_multiplier=32\n", 517 | " )\n", 518 | "print('=' * 70)" 519 | ] 520 | }, 521 | { 522 | "cell_type": "markdown", 523 | "source": [ 524 | "# (ZIP AND DOWNLOAD INTS)" 525 | ], 526 | "metadata": { 527 | "id": "Ooe7lcOZbpz1" 528 | } 529 | }, 530 | { 531 | "cell_type": "code", 532 | "source": [ 533 | "#@title Zip and download training INTs\n", 534 | "\n", 535 | "print('=' * 70)\n", 536 | "\n", 537 | "try:\n", 538 | " os.remove('Chords_progressions_Transformer_INTs.zip')\n", 539 | "except OSError:\n", 540 | " pass\n", 541 | "\n", 542 | "print('Zipping... Please wait...')\n", 543 | "print('=' * 70)\n", 544 | "\n", 545 | "%cd /content/INTS/\n", 546 | "!zip Chords_progressions_Transformer_INTs.zip *.pickle\n", 547 | "%cd /content/\n", 548 | "\n", 549 | "print('=' * 70)\n", 550 | "print('Done!')\n", 551 | "print('=' * 70)\n", 552 | "\n", 553 | "print('Downloading final zip file...')\n", 554 | "print('=' * 70)\n", 555 | "\n", 556 | "files.download('/content/INTS/Chords_progressions_Transformer_INTs.zip')\n", 557 | "\n", 558 | "print('Done!')\n", 559 | "print('=' * 70)" 560 | ], 561 | "metadata": { 562 | "id": "f2Ric8In9ju0", 563 | "cellView": "form" 564 | }, 565 | "execution_count": null, 566 | "outputs": [] 567 | }, 568 | { 569 | "cell_type": "markdown", 570 | "metadata": { 571 | "id": "YzCMd94Tu_gz" 572 | }, 573 | "source": [ 574 | "# Congrats! You did it! :)" 575 | ] 576 | } 577 | ], 578 | "metadata": { 579 | "colab": { 580 | "private_outputs": true, 581 | "provenance": [] 582 | }, 583 | "kernelspec": { 584 | "display_name": "Python 3", 585 | "name": "python3" 586 | }, 587 | "language_info": { 588 | "codemirror_mode": { 589 | "name": "ipython", 590 | "version": 3 591 | }, 592 | "file_extension": ".py", 593 | "mimetype": "text/x-python", 594 | "name": "python", 595 | "nbconvert_exporter": "python", 596 | "pygments_lexer": "ipython3", 597 | "version": "3.9.7" 598 | } 599 | }, 600 | "nbformat": 4, 601 | "nbformat_minor": 0 602 | } --------------------------------------------------------------------------------