├── .gitignore
├── LICENSE
├── README.md
├── osunn_structure.jpg
├── v6.2
├── 00_environment_buildup.ipynb
├── 01_osumap_loader.ipynb
├── 02_osurhythm_estimator.ipynb
├── 03_osurhythm_momentum_estimator.ipynb
├── 04_osurhythm_slider_classifier.ipynb
├── 05_newsong_importer.ipynb
├── 06_osurhythm_evaluator.ipynb
├── 07_osuflow_evaluator_from_rhythm.ipynb
├── README.md
├── flow_dataset.npz
├── install
├── install.bat
├── load_map.js
├── maplist creator.py
├── maplist.txt
├── momentum_minmax.npy
├── newcombo.js
├── osureader.py
├── package-lock.json
├── package.json
├── plthelper.py
├── requirements.txt
├── saved_rhythm_model
├── saved_rhythm_model_momentums
└── tfhelper.py
└── v7.0
├── 01_Training.ipynb
├── 02_Mapmaking.ipynb
├── Colab.ipynb
├── Colab_Training.ipynb
├── README.md
├── TimingAnlyz.exe
├── act_data_prep.py
├── act_final.py
├── act_flow_ds.py
├── act_gan.py
├── act_modding.py
├── act_newmap_prep.py
├── act_rhythm_calc.py
├── act_taiko_hitsounds.py
├── act_timing.py
├── act_train_rhythm.py
├── act_train_speed.py
├── assets
└── template.osu
├── audio_tools.py
├── bass.dll
├── gen_maplist.js
├── hitsound_tools.py
├── include
└── id3reader_p3.py
├── install
├── install.bat
├── load_map.js
├── losses.py
├── lost_losses.py
├── mania_Colab.ipynb
├── mania_Mapmaking.ipynb
├── mania_act_data_prep.py
├── mania_act_final.py
├── mania_act_rhythm_calc.py
├── mania_analyze.py
├── mania_audio_tools.py
├── mania_setup_colab.py
├── map_analyze.py
├── maplist.txt
├── maplist_maker
├── html
│ ├── Inter-Regular.osu.woff
│ ├── Inter-Regular.osu.woff2
│ ├── Torus-Regular.osu.otf
│ ├── font-face.css
│ ├── index.html
│ ├── main.css
│ └── main.js
├── osu-db-parser
│ ├── index.js
│ └── src
│ │ ├── OsuDB.js
│ │ ├── Reader.js
│ │ └── Struct.js
├── osuDBGetter.js
└── osuPathFinder.js
├── metadata.py
├── models
├── catch
│ ├── flow_dataset.npz
│ ├── maps.txt
│ └── rhythm_model
├── cryo
│ ├── flow_dataset.npz
│ ├── maps.txt
│ └── rhythm_model
├── default
│ ├── flow_dataset.npz
│ └── rhythm_model
├── flower
│ ├── flow_dataset.npz
│ ├── maps.txt
│ └── rhythm_model
├── hard
│ ├── flow_dataset.npz
│ ├── maps.txt
│ └── rhythm_model
├── inst
│ ├── flow_dataset.npz
│ ├── maps.txt
│ └── rhythm_model
├── lowbpm
│ ├── flow_dataset.npz
│ ├── maps.txt
│ └── rhythm_model
├── mania_highkey
│ ├── maps.txt
│ └── rhythm_model
├── mania_lowkey
│ ├── maps.txt
│ └── rhythm_model
├── mania_pattern
│ ├── mania_pattern_dataset.npz
│ └── maps.txt
├── normal
│ ├── flow_dataset.npz
│ ├── maps.txt
│ └── rhythm_model
├── sota
│ ├── flow_dataset.npz
│ ├── maps.txt
│ └── rhythm_model
├── taiko
│ ├── flow_dataset.npz
│ ├── hs_dataset.npz
│ ├── maps.txt
│ └── rhythm_model
├── tvsize
│ ├── flow_dataset.npz
│ ├── maps.txt
│ └── rhythm_model
└── vtuber
│ ├── flow_dataset.npz
│ ├── maps.txt
│ └── rhythm_model
├── newcombo.js
├── os_tools.py
├── package-lock.json
├── package.json
├── package_colab.json
├── plot_tools.py
├── requirements.txt
├── requirements_colab.txt
├── rhythm_loader.py
├── setup_colab.py
├── slider_tools.py
├── stream_tools.py
└── timing.py
/.gitignore:
--------------------------------------------------------------------------------
1 | # ignore map files
2 | v*.*/mapdata/*.npz
3 |
4 | # intermediate results
5 | v*.*/evaluatedRhythm.json
6 | v*.*/mapthis.json
7 | v*.*/mapthis.npz
8 | v*.*/rhythm_data.npz
9 | v*.*/wavdata.json
10 | v*.*/wavfile.wav
11 | v*.*/temp_json_file.json
12 | v*.*/temp/*
13 | v*.*/timing.osu
14 | v*.*/audio.mp3
15 |
16 | # generated map
17 | v*.*/*.osu
18 |
19 | # model
20 | v*.*/flow_dataset.npz
21 | v*.*/saved_rhythm_model
22 | v*.*/hs_dataset.npz
23 | v*.*/mania_pattern_dataset.npz
24 | # saved_rhythm_model_momentums
25 | # saved_slider_model1
26 | # saved_slider_model2
27 | # momentum_minmax.npy
28 |
29 | # maps
30 | # *.osu
31 | # *.osz
32 | # *.osb
33 |
34 | # ffmpeg
35 | ffmpeg.exe
36 |
37 | # as the name suggests
38 | unused/
39 |
40 | # did not work
41 | v*.*/16_osurhythm_evaluator_lesser.ipynb
42 | v*.*/saved_rhythm_model_lesser
43 | v*.*/saved_rhythm_model_momentums_lesser
44 | v*.*/momentum_minmax_lesser.json
45 | js/
46 | docker/
47 | test/
48 |
49 | # generated files
50 | v*.*/node_modules/
51 | v*.*/.ipynb_checkpoints/
52 | **/__pycache__/
53 | v*.*/logs/
54 | *.sublime-project
55 | *.sublime-workspace
56 | .vscode/
57 |
58 | # local reference files
59 | full_maplist.txt
60 | osunn.pptx
61 | predictor_input.json
62 | predictor_output.json
63 | test/
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # osumapper
2 |
3 | An automatic beatmap generator using Tensorflow / Deep Learning.
4 |
5 | Demo map 1 (low BPM): https://osu.ppy.sh/beatmapsets/1290030
6 |
7 | Demo map 2 (high BPM): https://osu.ppy.sh/beatmapsets/1290026
8 |
9 | ## Colaboratory
10 |
11 | https://colab.research.google.com/github/kotritrona/osumapper/blob/master/v7.0/Colab.ipynb
12 |
13 | For mania mode: [mania_Colab.ipynb](https://colab.research.google.com/github/kotritrona/osumapper/blob/master/v7.0/mania_Colab.ipynb)
14 |
15 | ## Complete guide for a newcomer in osu! mapping
16 |
17 | https://github.com/kotritrona/osumapper/wiki/Complete-guide:-creating-beatmap-using-osumapper
18 |
19 | ## Installation & Model Running
20 |
21 | - Refer to https://github.com/kotritrona/osumapper/tree/master/v6.2 for version 6.2
22 | - Refer to https://github.com/kotritrona/osumapper/tree/master/v7.0 for version 7.0
23 |
24 | ## Important tip for model training
25 |
26 | Don't train with every single map in your osu!. That's not how machine learning works!
27 |
28 | I would suggest you select only maps you think are well made, for instance a mapset that contains all 5.0 ~ 6.5☆ maps mapped by (insert mapper name).
29 |
30 | ## Maplist.txt creation:
31 | - I have made a maplist generator under `v7.0/` folder. Run `node gen_maplist.js` under the directory to start.
32 | - the other way to create a maplist.txt file to train the model is by using the maplist creator.py script (found in v6.2 folder). running this should overwrite the maplist.txt in the folder with a new one using the maps from the collection folder you have specified.
33 |
34 | ## Model Specification
35 | [Structure diagram](osunn_structure.jpg)
36 |
37 | - Rhythm model
38 | - CNN/LSTM + dense layers
39 | - input music FFTs (7 time_windows x 32 fft_size x 2 (magnitude, phase))
40 | - additional input timing (is_1/1, is_1/4, is_1/2, is_the_other_1/4, BPM, tick_length, slider_length)
41 | - output (is_note, is_circle, is_slider, is_spinner, is_sliding, is_spinning) for 1/-1 classification
42 | - Momentum model
43 | - Same structure as above
44 | - output (momentum, angular_momentum) as regression
45 | - momentum is distance over time. It should be proportional to circle size which I may implement later.
46 | - angular_momentum is angle over time. currently unused.
47 | - it's only used in v6.2
48 | - Slider model
49 | - was designed to classify slider lengths and shapes
50 | - currently unused
51 | - Flow model
52 | - uses GAN to generate the flow.
53 | - takes 10 notes as a group and train them each time
54 | - Generator: some dense layers, input (randomness x 50), output (cos_list x 20, sin_list x 20)
55 | - this output is then fed into a map generator to build a map corresponding to the angular values
56 | - map constructor output: (x_start, y_start, vector_out_x, vector_out_y, x_end, y_end) x 10
57 | - Discriminator: simpleRNN, some dense layers, input ↑, output (1,) ranging from 0 to 1
58 | - every big epoch(?), trains generator for 7 epochs and then discriminator 3 epochs
59 | - trains 6 ~ 25 big epochs each group. mostly 6 epochs unless the generated map is out of the mapping region (0:512, 0:384).
60 | - Beatmap Converter
61 | - uses node.js to convert map data between JSON and .osu formats
62 |
63 | ## Citing
64 |
65 | If you want to cite osumapper in a scholarly work, please cite the github page. I'm not going to write a paper for it.
--------------------------------------------------------------------------------
/osunn_structure.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kotritrona/osumapper/db1eeabccef4adf822551580731b9ec3d4caec68/osunn_structure.jpg
--------------------------------------------------------------------------------
/v6.2/01_osumap_loader.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {
6 | "deletable": true,
7 | "editable": true
8 | },
9 | "source": [
10 | "### osu!nn #1: Map Dataset Reader\n",
11 | "\n",
12 | "This notebook reads a file \"maplist.txt\", then reads the .osu files and the relevant music files to convert into some data.\n",
13 | "\n",
14 | "Data that feeds the Deep Neural Network.\n",
15 | "\n",
16 | "Last edit: 2019/4/22"
17 | ]
18 | },
19 | {
20 | "cell_type": "markdown",
21 | "metadata": {
22 | "deletable": true,
23 | "editable": true
24 | },
25 | "source": [
26 | "First of all, we need to install FFmpeg and specify its path here. It is needed to convert the .mp3 files to .wavs which Python can read.\n",
27 | "\n",
28 | "It's also fine to use any other converter, such as LAME: just edit the 24th line of osureader.py (starting with \"subprocess.call\") for the converter's parameters.\n",
29 | "\n",
30 | "**Then, fill maplist.txt with the paths of .osu files you want to train with.** Otherwise it cannot find any of the maps because the maps are on my computer. The default model is trained with the Sota dataset including 44 maps of Sota Fujimori music.\n",
31 | "\n",
32 | "After that run the grid below to convert the maps."
33 | ]
34 | },
35 | {
36 | "cell_type": "code",
37 | "execution_count": null,
38 | "metadata": {
39 | "collapsed": false,
40 | "deletable": true,
41 | "editable": true
42 | },
43 | "outputs": [],
44 | "source": [
45 | "import os, re, time\n",
46 | "from osureader import * \n",
47 | "\n",
48 | "# set the ffmpeg path here!!\n",
49 | "# add \"r\" before the path string\n",
50 | "\n",
51 | "GLOBAL_VARS[\"ffmpeg_path\"] = r\"D:\\StudyData\\Tensorflow\\ffmpeg\\bin\\ffmpeg.exe\";\n",
52 | "\n",
53 | "# in linux, it is installed globally, so use this\n",
54 | "# GLOBAL_VARS[\"ffmpeg_path\"] = \"ffmpeg\";\n",
55 | "\n",
56 | "mapdata_path = \"mapdata/\";\n",
57 | "\n",
58 | "# check if it works\n",
59 | "test_process_path(GLOBAL_VARS[\"ffmpeg_path\"]);\n",
60 | "\n",
61 | "# check if nodejs works\n",
62 | "test_process_path(\"node\");\n",
63 | "\n",
64 | "# the divisor parameter\n",
65 | "divisor = 4;\n",
66 | "\n",
67 | "# make sure the mapdata folder exists\n",
68 | "if not os.path.isdir(mapdata_path):\n",
69 | " os.mkdir(mapdata_path);\n",
70 | "\n",
71 | "with open(\"maplist.txt\") as fp:\n",
72 | " fcont = fp.readlines();\n",
73 | "\n",
74 | "# The following part is something I used to filter maps with difficulty names\n",
75 | "results = [];\n",
76 | "# exclude_words = [\"Easy\", \"Normal\", \"Hard\", \"Taiko\", \"Salad\", \"Platter\", \"Overdose\", \"Rain\", \"4K\", \"5K\", \"6K\", \"7K\", \"8K\", \"9K\",\n",
77 | "# \"Kantan\", \"Futsuu\", \"Muzukashii\", \"Oni\", \"Field \"];\n",
78 | "for line in fcont:\n",
79 | "# if re.search(\"TV\", line):\n",
80 | "# apd = True;\n",
81 | "# for kw in exclude_words:\n",
82 | "# if kw.lower() in line.strip().lower():\n",
83 | "# apd = False;\n",
84 | "# break;\n",
85 | "# if apd:\n",
86 | "# results.append(line.strip());\n",
87 | " results.append(line);\n",
88 | "\n",
89 | "# Remove the originally existing npzs\n",
90 | "for file in os.listdir(mapdata_path):\n",
91 | " if file.endswith(\".npz\"):\n",
92 | " os.remove(os.path.join(mapdata_path, file));\n",
93 | "\n",
94 | "print(\"Number of filtered maps: {}\".format(len(results)));\n",
95 | "\n",
96 | "for k, mname in enumerate(results):\n",
97 | " try:\n",
98 | " start = time.time()\n",
99 | " read_and_save_osu_file(mname.strip(), filename=os.path.join(mapdata_path, str(k)), divisor=divisor);\n",
100 | " end = time.time()\n",
101 | " print(\"Map data #\" + str(k) + \" saved! time = \" + str(end - start) + \" secs\");\n",
102 | " except Exception as e:\n",
103 | " print(\"Error on #{}, path = {}, error = {}\".format(str(k), mname.strip(), e));\n",
104 | "\n",
105 | "# If some map causes bug please tell me!! https://discord.gg/npmSy7K"
106 | ]
107 | }
108 | ],
109 | "metadata": {
110 | "kernelspec": {
111 | "display_name": "Python 3",
112 | "language": "python",
113 | "name": "python3"
114 | },
115 | "language_info": {
116 | "codemirror_mode": {
117 | "name": "ipython",
118 | "version": 3
119 | },
120 | "file_extension": ".py",
121 | "mimetype": "text/x-python",
122 | "name": "python",
123 | "nbconvert_exporter": "python",
124 | "pygments_lexer": "ipython3",
125 | "version": "3.5.2"
126 | }
127 | },
128 | "nbformat": 4,
129 | "nbformat_minor": 2
130 | }
131 |
--------------------------------------------------------------------------------
/v6.2/05_newsong_importer.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {
6 | "deletable": true,
7 | "editable": true
8 | },
9 | "source": [
10 | "### osu!nn #5: New Map Reader\n",
11 | "\n",
12 | "Reads the data from the music. This data will be used to create a whole map!\n",
13 | "\n",
14 | "Final edit: 2018/8/16"
15 | ]
16 | },
17 | {
18 | "cell_type": "markdown",
19 | "metadata": {
20 | "deletable": true,
21 | "editable": true
22 | },
23 | "source": [
24 | "Before you read data from the music, it needs timing.\n",
25 | "\n",
26 | "Luckily there are some BPM analyzers on the web, and those are pretty accurate, so no need of Deep Learning for that!\n",
27 | "\n",
28 | "The analyzer I used (in '14) was MixMeister BPM analyzer - its problem is lack of output for offset. There should be something better than that now!\n",
29 | "\n",
30 | "And of course, it is still better to time it yourself; some music has multiple timing sections, and BPM analyzers don't seem to support that.\n",
31 | "\n",
32 | "After timing, save the empty .osu file, and fill in the file_path variable below.\n",
33 | "\n",
34 | "You should also adjust the map parameters; namely, slider velocity. The model doesn't care about the others."
35 | ]
36 | },
37 | {
38 | "cell_type": "markdown",
39 | "metadata": {
40 | "deletable": true,
41 | "editable": true
42 | },
43 | "source": [
44 | "Also, FFmpeg path needed here."
45 | ]
46 | },
47 | {
48 | "cell_type": "code",
49 | "execution_count": 4,
50 | "metadata": {
51 | "collapsed": false,
52 | "deletable": true,
53 | "editable": true
54 | },
55 | "outputs": [],
56 | "source": [
57 | "import os, re, time\n",
58 | "from osureader import *\n",
59 | "\n",
60 | "# set the ffmpeg path here!!\n",
61 | "# add \"r\" before the path string\n",
62 | "\n",
63 | "GLOBAL_VARS[\"ffmpeg_path\"] = r\"D:\\StudyData\\Tensorflow\\ffmpeg\\bin\\ffmpeg.exe\";\n",
64 | "\n",
65 | "# linux\n",
66 | "# GLOBAL_VARS[\"ffmpeg_path\"] = \"ffmpeg\";\n",
67 | "\n",
68 | "# check if it works\n",
69 | "test_process_path(GLOBAL_VARS[\"ffmpeg_path\"]);\n",
70 | "\n",
71 | "divisor = 4;\n",
72 | "\n",
73 | "def read_new_map(file_path):\n",
74 | " start = time.time()\n",
75 | " read_and_save_osu_tester_file(file_path.strip(), filename=\"mapthis\", divisor=divisor);\n",
76 | " end = time.time()\n",
77 | " print(\"Map data saved! time = \" + str(end - start) + \" secs.\");"
78 | ]
79 | },
80 | {
81 | "cell_type": "code",
82 | "execution_count": 5,
83 | "metadata": {
84 | "collapsed": false,
85 | "deletable": true,
86 | "editable": true
87 | },
88 | "outputs": [
89 | {
90 | "name": "stdout",
91 | "output_type": "stream",
92 | "text": [
93 | "Map data saved! time = 3.715170383453369 secs.\n"
94 | ]
95 | }
96 | ],
97 | "source": [
98 | "# input file here!\n",
99 | "# file_path = \"D:\\\\osu!\\\\Songs\\\\whitegreat poem\\\\SeikoP - Shirotae no Uta ([CSGA]Ar3sgice) [(() = ())();].osu\";\n",
100 | "# file_path = \"D:\\\\osu!\\\\Songs\\\\597684 Blue Reflection - Overdose\\\\Asano Hayato - OVERDOSE ([CSGA]Ar3sgice) [BR+].osu\"\n",
101 | "# file_path = \"D:\\\\osu!\\\\Songs\\\\Albireo Lake\\\\Asano Hayato - Albireo Lake ([CSGA]Ar3sgice) [ORibt_].osu\"\n",
102 | "# file_path = \"D:\\\\osu!\\\\Songs\\\\LetsLoveIkimashou\\\\NanamoriGoraku-bu - Let's Love~ de Ikimashou ([CSGA]Ar3sgice) [1234].osu\"\n",
103 | "# file_path = \"D:\\\\osu!\\\\Songs\\\\My Maps\\\\ask for alms\\\\emroots - ask for alms ([CSGA]Ar3sgice) [,,,,,].osu\"\n",
104 | "# file_path = \"D:\\\\osu!\\\\Songs\\\\My Maps\\\\cosminox192\\\\Erik McClure - Cosminox ([CSGA]Ar3sgice) [xxxxx].osu\"\n",
105 | "# file_path = \"D:\\\\osu!\\\\Songs\\\\952035 Nakae Mitsuki - Alchemia\\\\Nakae Mitsuki - Alchemia (Shurelia) [Aristocrat].osu\"\n",
106 | "# file_path = \"D:\\\\osu!\\\\Songs\\\\Need You Back\\\\ave;new feat. Sakura Saori - Need You Back ([CSGA]Ar3sgice) [history.setState()].osu\"\n",
107 | "file_path = \"D:\\\\osu!\\\\Songs\\\\Reboot Tactics\\\\sweet ARMS - Reboot Tactics ([CSGA]Ar3sgice) [Ctrl+Alt+Delete].osu\"\n",
108 | "\n",
109 | "# start here!!!\n",
110 | "# file_path = \"D:\\\\osu!\\\\Songs\\\\Nhato - Hello World\\\\Nhato - Hello World ([CSGA]Ar3sgice) [Unaaaa].osu\"\n",
111 | "# file_path = \"D:\\\\osu!\\\\Songs\\\\My Maps\\\\LookHome2\\\\emroots - Look to the Hometown ([CSGA]Ar3sgice) [233].osu\"\n",
112 | "# file_path = \"D:\\\\osu!\\\\Songs\\\\Scarlet -gravitation-\\\\ave;new - Scarlet -gravitation- ([CSGA]Ar3sgice) [Scarlet Rainbow].osu\"\n",
113 | "# file_path = \"D:\\\\osu!\\\\Songs\\\\shinri\\\\Yano Tatsuya - Shinri e no Kestui ~Lydie~ ([CSGA]Ar3sgice) [Divine Strength].osu\"\n",
114 | "# file_path = \"D:\\\\osu!\\\\Songs\\\\Onaji Hoshi wo Miagete\\\\Yanagawa Kazuki - Onaji Hoshi wo Miagete ~Lydie~ ([CSGA]Ar3sgice) [Star8].osu\"\n",
115 | "\n",
116 | "# file_path = \"D:\\\\osu!\\\\Songs\\\\You're not a heroine, it's me\\\\Asano Hayato - You're not a heroine, it's me. ([CSGA]Ar3sgice) [Megumi].osu\"\n",
117 | "\n",
118 | "read_new_map(file_path);"
119 | ]
120 | },
121 | {
122 | "cell_type": "markdown",
123 | "metadata": {
124 | "deletable": true,
125 | "editable": true
126 | },
127 | "source": [
128 | "That's it! We can now proceed to the next notebook."
129 | ]
130 | }
131 | ],
132 | "metadata": {
133 | "kernelspec": {
134 | "display_name": "Python 3",
135 | "language": "python",
136 | "name": "python3"
137 | },
138 | "language_info": {
139 | "codemirror_mode": {
140 | "name": "ipython",
141 | "version": 3
142 | },
143 | "file_extension": ".py",
144 | "mimetype": "text/x-python",
145 | "name": "python",
146 | "nbconvert_exporter": "python",
147 | "pygments_lexer": "ipython3",
148 | "version": "3.5.2"
149 | }
150 | },
151 | "nbformat": 4,
152 | "nbformat_minor": 2
153 | }
154 |
--------------------------------------------------------------------------------
/v6.2/06_osurhythm_evaluator.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {
6 | "deletable": true,
7 | "editable": true
8 | },
9 | "source": [
10 | "### osu!nn #6: Rhythm Predictor\n",
11 | "\n",
12 | "Calculates a map's rhythm from the music and the timing.\n",
13 | "\n",
14 | "Synthesis of \"rhythmData\"\n",
15 | "* rhythmModel x 1\n",
16 | "* momentumModel x 1\n",
17 | "* timingData x 1\n",
18 | "* (Music) x 1\n",
19 | "\n",
20 | "Synthesis Time: ~2 seconds\n",
21 | "\n",
22 | "Final edit: 2018/8/16"
23 | ]
24 | },
25 | {
26 | "cell_type": "code",
27 | "execution_count": 1,
28 | "metadata": {
29 | "collapsed": true,
30 | "deletable": true,
31 | "editable": true
32 | },
33 | "outputs": [],
34 | "source": [
35 | "import tensorflow as tf\n",
36 | "from tensorflow import keras\n",
37 | "import pandas as pd\n",
38 | "import numpy as np\n",
39 | "import matplotlib.pyplot as plt\n",
40 | "import os, re"
41 | ]
42 | },
43 | {
44 | "cell_type": "markdown",
45 | "metadata": {
46 | "deletable": true,
47 | "editable": true
48 | },
49 | "source": [
50 | "Some parameters here. \n",
51 | "\n",
52 | "\"note_density\" determines how many notes will be placed on the timeline, ranges from 0 to 1.\n",
53 | "\n",
54 | "\"slider_favor\" determines how the model favors sliders against circles, ranges from -1 to 1.\n",
55 | "\n",
56 | "\"dist_multiplier\" determines how the flow model has your distance multiplied. ranges from 0 to +∞. Of course +∞ is not advisable.\n",
57 | "\n",
58 | "\"divisor_favor\" determines how the model favors notes to be on X divisors starting from a beat (white, blue, red, blue), ranges from -1 to 1 each.\n",
59 | "\n",
60 | "Ranges not inclusive."
61 | ]
62 | },
63 | {
64 | "cell_type": "code",
65 | "execution_count": 2,
66 | "metadata": {
67 | "collapsed": true,
68 | "deletable": true,
69 | "editable": true
70 | },
71 | "outputs": [],
72 | "source": [
73 | "# TODO parameter here!!\n",
74 | "dist_multiplier = 1;\n",
75 | "note_density = 0.36;\n",
76 | "slider_favor = 0;\n",
77 | "divisor = 4;\n",
78 | "divisor_favor = [0] * divisor;"
79 | ]
80 | },
81 | {
82 | "cell_type": "code",
83 | "execution_count": 3,
84 | "metadata": {
85 | "collapsed": false,
86 | "deletable": true,
87 | "editable": true
88 | },
89 | "outputs": [],
90 | "source": [
91 | "model = tf.keras.models.load_model(\n",
92 | " \"saved_rhythm_model\",\n",
93 | " custom_objects=None,\n",
94 | " compile=False\n",
95 | ");\n",
96 | "model.compile(loss='mse',\n",
97 | " optimizer=tf.optimizers.RMSprop(0.001),\n",
98 | " metrics=[keras.metrics.mae]);"
99 | ]
100 | },
101 | {
102 | "cell_type": "code",
103 | "execution_count": 4,
104 | "metadata": {
105 | "collapsed": false,
106 | "deletable": true,
107 | "editable": true
108 | },
109 | "outputs": [],
110 | "source": [
111 | "# root = \"..\\\\osureader\\\\mapdata_test\";\n",
112 | "fn = \"mapthis.npz\";\n",
113 | "\n",
114 | "def read_npz(fn):\n",
115 | " with np.load(fn) as data:\n",
116 | " wav_data = data[\"wav\"];\n",
117 | " wav_data = np.swapaxes(wav_data, 2, 3);\n",
118 | " ticks = data[\"ticks\"];\n",
119 | " timestamps = data[\"timestamps\"];\n",
120 | " extra = data[\"extra\"];\n",
121 | " \n",
122 | " # Extra vars\n",
123 | " bpms = extra[0];\n",
124 | " slider_lengths = extra[1];\n",
125 | " ex1 = (60000 / bpms) / 500 - 1;\n",
126 | " ex2 = bpms / 120 - 1;\n",
127 | " ex3 = slider_lengths / 150 - 1;\n",
128 | " \n",
129 | " div_data = np.array([divisor_array(k) + [ex1[k], ex2[k], ex3[k]] for k in ticks]);\n",
130 | " return wav_data, div_data, ticks, timestamps;\n",
131 | "\n",
132 | "def divisor_array(k):\n",
133 | " d_range = list(range(0, divisor));\n",
134 | " return [int(k % divisor == d) for d in d_range];\n",
135 | "\n",
136 | "test_data, div_data, ticks, timestamps = read_npz(fn);"
137 | ]
138 | },
139 | {
140 | "cell_type": "code",
141 | "execution_count": 5,
142 | "metadata": {
143 | "collapsed": false,
144 | "deletable": true,
145 | "editable": true
146 | },
147 | "outputs": [
148 | {
149 | "name": "stdout",
150 | "output_type": "stream",
151 | "text": [
152 | "1116 notes predicted.\n"
153 | ]
154 | }
155 | ],
156 | "source": [
157 | "# Make time intervals from test data\n",
158 | "time_interval = 16;\n",
159 | "if test_data.shape[0]%time_interval > 0:\n",
160 | " test_data = test_data[:-(test_data.shape[0]%time_interval)];\n",
161 | " div_data = div_data[:-(div_data.shape[0]%time_interval)];\n",
162 | "test_data2 = np.reshape(test_data, (-1, time_interval, test_data.shape[1], test_data.shape[2], test_data.shape[3]))\n",
163 | "div_data2 = np.reshape(div_data, (-1, time_interval, div_data.shape[1]))\n",
164 | "\n",
165 | "test_predictions = model.predict([test_data2, div_data2]);\n",
166 | "preds = test_predictions.reshape(-1, test_predictions.shape[2]);\n",
167 | "\n",
168 | "# Favor sliders a little\n",
169 | "preds[:, 2] += slider_favor;\n",
170 | "divs = div_data2.reshape(-1, div_data2.shape[2]);\n",
171 | "margin = np.sum([divisor_favor[k] * divs[:, k] for k in range(0, divisor)]);\n",
172 | "\n",
173 | "preds[:, 0] += margin;\n",
174 | "\n",
175 | "# Predict is_obj using note_density\n",
176 | "obj_preds = preds[:, 0];\n",
177 | "target_count = np.round(note_density * obj_preds.shape[0]).astype(int);\n",
178 | "borderline = np.sort(obj_preds)[obj_preds.shape - target_count];\n",
179 | "is_obj_pred = np.expand_dims(np.where(preds[:, 0] > borderline, 1, 0), axis=1);\n",
180 | "\n",
181 | "obj_type_pred = np.sign(preds[:, 1:4] - np.tile(np.expand_dims(np.max(preds[:, 1:4], axis=1), 1), (1, 3))) + 1;\n",
182 | "others_pred = (1 + np.sign(preds[:, 4:test_predictions.shape[1]] + 0.5)) / 2;\n",
183 | "another_pred_result = np.concatenate([is_obj_pred, is_obj_pred * obj_type_pred, others_pred], axis=1);\n",
184 | "\n",
185 | "print(\"{} notes predicted.\".format(np.sum(is_obj_pred)));"
186 | ]
187 | },
188 | {
189 | "cell_type": "code",
190 | "execution_count": 6,
191 | "metadata": {
192 | "collapsed": false,
193 | "deletable": true,
194 | "editable": true
195 | },
196 | "outputs": [],
197 | "source": [
198 | "def load_momentum_minmax(fn):\n",
199 | " data = np.load(fn);\n",
200 | " return data;\n",
201 | "mommax, mommin = load_momentum_minmax(\"momentum_minmax.npy\");\n",
202 | "\n",
203 | "momentum_model = tf.keras.models.load_model(\n",
204 | " \"saved_rhythm_model_momentums\",\n",
205 | " custom_objects=None,\n",
206 | " compile=False\n",
207 | ");\n",
208 | "momentum_model.compile(loss='mse',\n",
209 | " optimizer=tf.optimizers.RMSprop(0.001),\n",
210 | " metrics=[keras.metrics.mae]);"
211 | ]
212 | },
213 | {
214 | "cell_type": "code",
215 | "execution_count": 7,
216 | "metadata": {
217 | "collapsed": false,
218 | "deletable": true,
219 | "editable": true
220 | },
221 | "outputs": [],
222 | "source": [
223 | "momentum_predictions_output = momentum_model.predict([test_data2, div_data2]);\n",
224 | "momentum_predictions = (momentum_predictions_output.reshape(-1, 2) + 1) / 2 / 0.8 * (mommax - mommin) + mommin;"
225 | ]
226 | },
227 | {
228 | "cell_type": "markdown",
229 | "metadata": {
230 | "deletable": true,
231 | "editable": true
232 | },
233 | "source": [
234 | "Save the rhythm data and progress to #7."
235 | ]
236 | },
237 | {
238 | "cell_type": "code",
239 | "execution_count": 8,
240 | "metadata": {
241 | "collapsed": true,
242 | "deletable": true,
243 | "editable": true
244 | },
245 | "outputs": [],
246 | "source": [
247 | "np.savez_compressed(\"rhythm_data\", objs = is_obj_pred[:, 0], predictions = another_pred_result, timestamps = timestamps, ticks = ticks, momenta = momentum_predictions, sv = (div_data[:,6] + 1) * 150, dist_multiplier = dist_multiplier);"
248 | ]
249 | },
250 | {
251 | "cell_type": "code",
252 | "execution_count": 9,
253 | "metadata": {
254 | "collapsed": false,
255 | "deletable": true,
256 | "editable": true
257 | },
258 | "outputs": [],
259 | "source": [
260 | "import json\n",
261 | "\n",
262 | "rhythm_json = {\n",
263 | " \"objs\": is_obj_pred[:, 0].tolist(), \n",
264 | " \"predictions\": another_pred_result.tolist(),\n",
265 | " \"timestamps\": timestamps.tolist(),\n",
266 | " \"ticks\": ticks.tolist(),\n",
267 | " \"momenta\": momentum_predictions.tolist(),\n",
268 | " \"sv\": ((div_data[:,6] + 1) * 150).tolist(),\n",
269 | " \"distMultiplier\": dist_multiplier\n",
270 | "};\n",
271 | "with open(\"evaluatedRhythm.json\", \"w\") as er:\n",
272 | " json.dump(rhythm_json, er);"
273 | ]
274 | }
275 | ],
276 | "metadata": {
277 | "kernelspec": {
278 | "display_name": "Python 3",
279 | "language": "python",
280 | "name": "python3"
281 | },
282 | "language_info": {
283 | "codemirror_mode": {
284 | "name": "ipython",
285 | "version": 3
286 | },
287 | "file_extension": ".py",
288 | "mimetype": "text/x-python",
289 | "name": "python",
290 | "nbconvert_exporter": "python",
291 | "pygments_lexer": "ipython3",
292 | "version": "3.5.2"
293 | }
294 | },
295 | "nbformat": 4,
296 | "nbformat_minor": 2
297 | }
298 |
--------------------------------------------------------------------------------
/v6.2/README.md:
--------------------------------------------------------------------------------
1 | # osumapper v6.2
2 |
3 | This version uses Tensorflow v2.0.0-beta1.
4 |
5 | v6.2 demo map: https://osu.ppy.sh/beatmapsets/834264
6 |
7 | ## Installation:
8 | - install Python (3.5.x or 3.6.x or 3.7.x) and Jupyter notebook
9 | - install [node.js](https://nodejs.org/)
10 | - install [ffmpeg](https://ffmpeg.org/download.html)
11 | - git clone or download this repository
12 | - cd into this folder
13 | - run `install.bat` if you are on Windows
14 | - run `./install` if on Linux
15 |
16 | ## Running:
17 | - run the notebooks 01, 02, 03 for training
18 | - run the notebooks 05, 06, 07 for creating map
19 | - notebook 04 is unused for now
20 |
--------------------------------------------------------------------------------
/v6.2/flow_dataset.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kotritrona/osumapper/db1eeabccef4adf822551580731b9ec3d4caec68/v6.2/flow_dataset.npz
--------------------------------------------------------------------------------
/v6.2/install:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | pip install -r requirements.txt
3 | npm i
--------------------------------------------------------------------------------
/v6.2/install.bat:
--------------------------------------------------------------------------------
1 | pip install -r requirements.txt
2 | npm i
--------------------------------------------------------------------------------
/v6.2/maplist creator.py:
--------------------------------------------------------------------------------
1 | import glob
2 |
3 | osupath = input('Please enter the path to your osu songs folder for training: ')
4 | verbose = input('Show verbose output? y/n: ')
5 | files = glob.glob(osupath + '/**/*.osu', recursive = True)
6 | numwritten = 0
7 | f = open('maplist.txt','w+')
8 |
9 | for filename in glob.iglob(osupath + '/**/*.osu', recursive = True):
10 | if(verbose == 'y' or verbose == 'Y' or verbose == 'Yes' or verbose == 'YES'):
11 | print(filename)
12 | f.write('\n' + filename)
13 | numwritten+=1
14 |
15 | print('#######################################################################################')
16 | print('Wrote ' + str(numwritten) + ' map paths to maplist.txt')
17 | input('maplist.txt generated in the same directory as this script, press enter to exit')
18 |
--------------------------------------------------------------------------------
/v6.2/maplist.txt:
--------------------------------------------------------------------------------
1 | # .osu paths here!
--------------------------------------------------------------------------------
/v6.2/momentum_minmax.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kotritrona/osumapper/db1eeabccef4adf822551580731b9ec3d4caec68/v6.2/momentum_minmax.npy
--------------------------------------------------------------------------------
/v6.2/package-lock.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "ipynb",
3 | "version": "1.0.0",
4 | "lockfileVersion": 1,
5 | "requires": true,
6 | "dependencies": {
7 | "big-integer": {
8 | "version": "1.6.34",
9 | "resolved": "https://registry.npmjs.org/big-integer/-/big-integer-1.6.34.tgz",
10 | "integrity": "sha512-+w6B0Uo0ZvTSzDkXjoBCTNK0oe+aVL+yPi7kwGZm8hd8+Nj1AFPoxoq1Bl/mEu/G/ivOkUc1LRqVR0XeWFUzuA=="
11 | },
12 | "complex.js": {
13 | "version": "2.0.11",
14 | "resolved": "https://registry.npmjs.org/complex.js/-/complex.js-2.0.11.tgz",
15 | "integrity": "sha512-6IArJLApNtdg1P1dFtn3dnyzoZBEF0MwMnrfF1exSBRpZYoy4yieMkpZhQDC0uwctw48vii0CFVyHfpgZ/DfGw=="
16 | },
17 | "fraction.js": {
18 | "version": "4.0.9",
19 | "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-4.0.9.tgz",
20 | "integrity": "sha512-qP1sNwdrcA+Vs5TTvGETuaaUmz4Tm48V6Jc+8Oh/gqvkb1d42s99w5kvSrZkIATp/mz3rV4CTef6xINkCofu+A=="
21 | },
22 | "polynomial": {
23 | "version": "1.4.3",
24 | "resolved": "https://registry.npmjs.org/polynomial/-/polynomial-1.4.3.tgz",
25 | "integrity": "sha512-Yf9er7dXiA5jTVaaJc9oGSFB41JW7wigbc1m/nUQ0bOzz0gaY0Ti3HSrvIc4K6vwJ6MsN4eja+8ytLno1z/y1A==",
26 | "requires": {
27 | "big-integer": "1.6.34",
28 | "complex.js": "2.0.11",
29 | "fraction.js": "4.0.9",
30 | "quaternion": "1.0.5"
31 | }
32 | },
33 | "quaternion": {
34 | "version": "1.0.5",
35 | "resolved": "https://registry.npmjs.org/quaternion/-/quaternion-1.0.5.tgz",
36 | "integrity": "sha512-StmkfFTHZ2CUA9b2qVCOaPZnuiR4W7YgRtrdCefICs4z+ny1qG3lpQ6UN33aC54h0r+i4D7sikvdg8Dv2DLGpw=="
37 | }
38 | }
39 | }
40 |
--------------------------------------------------------------------------------
/v6.2/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "osumapper",
3 | "version": "6.2.0",
4 | "description": "An automatic beatmap generator using Tensorflow / Deep Learning.",
5 | "main": "load_map.js",
6 | "dependencies": {
7 | "polynomial": "^1.4.3"
8 | },
9 | "devDependencies": {},
10 | "scripts": {
11 | "test": "echo \"Error: no test specified\" && exit 1"
12 | },
13 | "keywords": [],
14 | "repository": {
15 | "type": "git",
16 | "url": "git+https://github.com/kotritrona/osumapper.git"
17 | },
18 | "author": "kotritrona",
19 | "license": "Apache-2.0"
20 | }
--------------------------------------------------------------------------------
/v6.2/plthelper.py:
--------------------------------------------------------------------------------
1 | #define TRUE 0
2 | #define FALSE 1
3 |
4 | import matplotlib.pyplot as plt
5 | import matplotlib.lines as lines
6 | import matplotlib.transforms as mtransforms
7 | import matplotlib.text as mtext
8 |
9 |
10 | class MyLine(lines.Line2D):
11 | def __init__(self, *args, **kwargs):
12 | # we'll update the position when the line data is set
13 | self.text = mtext.Text(0, 0, '')
14 | lines.Line2D.__init__(self, *args, **kwargs)
15 |
16 | # we can't access the label attr until *after* the line is
17 | # inited
18 | self.text.set_text(self.get_label())
19 |
20 | def set_figure(self, figure):
21 | self.text.set_figure(figure)
22 | lines.Line2D.set_figure(self, figure)
23 |
24 | def set_axes(self, axes):
25 | self.text.set_axes(axes)
26 | lines.Line2D.set_axes(self, axes)
27 |
28 | def set_transform(self, transform):
29 | # 2 pixel offset
30 | texttrans = transform + mtransforms.Affine2D().translate(2, 2)
31 | self.text.set_transform(texttrans)
32 | lines.Line2D.set_transform(self, transform)
33 |
34 | def set_data(self, x, y):
35 | if len(x):
36 | self.text.set_position((x[-1], y[-1]))
37 |
38 | lines.Line2D.set_data(self, x, y)
39 |
40 | def draw(self, renderer):
41 | # draw my label at the end of the line with 2 pixel offset
42 | lines.Line2D.draw(self, renderer)
43 | self.text.draw(renderer)
44 |
45 | def plot_history(history):
46 | plt.figure()
47 | plt.xlabel('Epoch')
48 | plt.ylabel('Mean Abs Error [Limitless]')
49 | plt.plot(history.epoch, np.array(history.history['loss']),
50 | label='Train Loss')
51 | plt.plot(history.epoch, np.array(history.history['val_loss']),
52 | label = 'Val loss')
53 | plt.legend()
54 | plt.show()
--------------------------------------------------------------------------------
/v6.2/requirements.txt:
--------------------------------------------------------------------------------
1 | numpy==1.16.2
2 | SoundFile==0.10.2
3 | matplotlib==3.0.3
4 | pandas==0.24.2
5 | tensorflow==2.0.0-beta1
6 | scikit-learn==0.19.1
7 | scipy==1.1.0
--------------------------------------------------------------------------------
/v6.2/saved_rhythm_model:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kotritrona/osumapper/db1eeabccef4adf822551580731b9ec3d4caec68/v6.2/saved_rhythm_model
--------------------------------------------------------------------------------
/v6.2/saved_rhythm_model_momentums:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kotritrona/osumapper/db1eeabccef4adf822551580731b9ec3d4caec68/v6.2/saved_rhythm_model_momentums
--------------------------------------------------------------------------------
/v6.2/tfhelper.py:
--------------------------------------------------------------------------------
1 | #define TRUE 0
2 | #define FALSE 1
3 |
4 | import tensorflow as tf
5 |
6 | def stack_loss(tensor):
7 | complex_list = tf.complex(tensor[:, :, 0] * 512, tensor[:, :, 1] * 384);
8 | stack_limit = 30;
9 | precise_limit = 1;
10 | a = [];
11 | for k in range(tensor.shape[1]):
12 | w = tf.tile(tf.expand_dims(complex_list[:, k], axis=1), [1, tensor.shape[1]]);
13 | r = tf.abs(w - complex_list);
14 | rless = tf.cast(tf.less(r, stack_limit), tf.float32) * tf.cast(tf.greater(r, precise_limit), tf.float32);
15 | rmean = tf.reduce_mean(rless * (stack_limit - r) / stack_limit);
16 | a.append(rmean);
17 | b = tf.reduce_sum(a);
18 | return b;
19 |
20 | # This polygon loss was an attempt to make the map less likely to overlap each other.
21 | # The idea is: calculate the area of polygon formed from the note positions;
22 | # If it is big, then it is good - they form a convex shape, no overlap.
23 | # ... of course it totally doesn't work like that.
24 | def polygon_loss(tensor):
25 | tensor_this = tensor[:, :, 0:2];
26 | tensor_next = tf.concat([tensor[:, 1:, 0:2], tensor[:, 0:1, 0:2]], axis=1);
27 | sa = (tensor_this[:, :, 0] + tensor_next[:, :, 0]) * (tensor_next[:, :, 1] - tensor_this[:, :, 0]);
28 | surface = tf.abs(tf.reduce_sum(sa, axis=1))/2;
29 | return surface;
--------------------------------------------------------------------------------
/v7.0/01_Training.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "## osumapper #1: Map Dataset Reader\n",
8 | "\n",
9 | "Read the map list and convert maps to formats Python can use."
10 | ]
11 | },
12 | {
13 | "cell_type": "markdown",
14 | "metadata": {},
15 | "source": [
16 | "### IMPORTANT!! First fill \"maplist.txt\" with paths of .osu files you want to train with !!!\n",
17 | "\n",
18 | "You can use the maplist generator by running `node gen_maplist` under this directory.\n",
19 | "\n",
20 | "**Tip:** Don't train with your every single map. Find good maps (maps you like) with relatively similar difficulty.\n",
21 | "\n",
22 | "After that, run the grid below to convert the maps."
23 | ]
24 | },
25 | {
26 | "cell_type": "code",
27 | "execution_count": null,
28 | "metadata": {
29 | "scrolled": true
30 | },
31 | "outputs": [],
32 | "source": [
33 | "from act_data_prep import *\n",
34 | "\n",
35 | "# for osu!mania use this instead of above\n",
36 | "# from mania_act_data_prep import * \n",
37 | "\n",
38 | "step1_load_maps();\n",
39 | "\n",
40 | "# If any map causes bug please tell me!! https://discord.gg/npmSy7K"
41 | ]
42 | },
43 | {
44 | "cell_type": "markdown",
45 | "metadata": {},
46 | "source": [
47 | "## osumapper #2: rhythm model\n",
48 | "\n",
49 | "Train a rhythm model that decides where to place circles/sliders based on music.\n",
50 | "\n",
51 | "If you're using GPU and it reports a memory error, try setting batch_size parameter to a smaller value (that GPU can handle)."
52 | ]
53 | },
54 | {
55 | "cell_type": "code",
56 | "execution_count": null,
57 | "metadata": {},
58 | "outputs": [],
59 | "source": [
60 | "from act_train_rhythm import *;\n",
61 | "\n",
62 | "train_params_p2 = {\n",
63 | " \"divisor\" : 4,\n",
64 | " \"train_epochs\" : 16,\n",
65 | " \"train_batch_size\" : None, # Default is 32 or based on machine specs\n",
66 | " \"plot_history\" : True,\n",
67 | " \"too_many_maps_threshold\" : 200,\n",
68 | " \"train_epochs_many_maps\" : 6,\n",
69 | " \"data_split_count\" : 80\n",
70 | "};\n",
71 | "model_p2 = step2_build_model()"
72 | ]
73 | },
74 | {
75 | "cell_type": "markdown",
76 | "metadata": {},
77 | "source": [
78 | "Train the model and evaluate.
\n",
79 | "is_note_start accuracy should be about 0.8 to 0.9 based on my tests, others should be lower.\n",
80 | "\n",
81 | "**Note:** I changed the metrics from F1 to AUC in this version!! 0.5=guessing 1=perfect for AUC"
82 | ]
83 | },
84 | {
85 | "cell_type": "code",
86 | "execution_count": null,
87 | "metadata": {},
88 | "outputs": [],
89 | "source": [
90 | "model_p2 = step2_train_model(model_p2, train_params_p2)\n",
91 | "step2_evaluate(model_p2)"
92 | ]
93 | },
94 | {
95 | "cell_type": "markdown",
96 | "metadata": {},
97 | "source": [
98 | "Done! now save the model to the disk."
99 | ]
100 | },
101 | {
102 | "cell_type": "code",
103 | "execution_count": null,
104 | "metadata": {},
105 | "outputs": [],
106 | "source": [
107 | "step2_save(model_p2)"
108 | ]
109 | },
110 | {
111 | "cell_type": "markdown",
112 | "metadata": {},
113 | "source": [
114 | "## osumapper #3: flow dataset construction\n",
115 | "\n",
116 | "Construct a dataset for the map flow generator."
117 | ]
118 | },
119 | {
120 | "cell_type": "code",
121 | "execution_count": null,
122 | "metadata": {},
123 | "outputs": [],
124 | "source": [
125 | "from act_flow_ds import *;\n",
126 | "\n",
127 | "flow_dataset_params = step3_set_params(note_group_size=10, step_size=5);\n",
128 | "maps_flow = step3_read_maps_flow(flow_dataset_params);\n",
129 | "step3_save_flow_dataset(maps_flow);\n",
130 | "\n",
131 | "# hitsounds dataset, only for taiko maps\n",
132 | "# maps_hs_af, maps_hs = step3_read_maps_hs(flow_dataset_params);\n",
133 | "# step3_save_hs_dataset(maps_hs_af, maps_hs);\n",
134 | "\n",
135 | "# pattern dataset, only for mania (remove the flow part for mania)\n",
136 | "# data = step3_read_maps_pattern([]);\n",
137 | "# step3_save_pattern_dataset(data);"
138 | ]
139 | },
140 | {
141 | "cell_type": "markdown",
142 | "metadata": {},
143 | "source": [
144 | "That's it! The models are trained. Start making a new map with the other notebook."
145 | ]
146 | }
147 | ],
148 | "metadata": {
149 | "kernelspec": {
150 | "display_name": "Python 3",
151 | "language": "python",
152 | "name": "python3"
153 | },
154 | "language_info": {
155 | "codemirror_mode": {
156 | "name": "ipython",
157 | "version": 3
158 | },
159 | "file_extension": ".py",
160 | "mimetype": "text/x-python",
161 | "name": "python",
162 | "nbconvert_exporter": "python",
163 | "pygments_lexer": "ipython3",
164 | "version": "3.8.3"
165 | }
166 | },
167 | "nbformat": 4,
168 | "nbformat_minor": 2
169 | }
170 |
--------------------------------------------------------------------------------
/v7.0/02_Mapmaking.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "## osumapper #4: New Map Reader\n"
8 | ]
9 | },
10 | {
11 | "cell_type": "markdown",
12 | "metadata": {},
13 | "source": [
14 | "Set the input file string to a timed (having the right BPM/offset) .osu file.\n",
15 | "\n",
16 | "It converts the map/music to Python readable format."
17 | ]
18 | },
19 | {
20 | "cell_type": "code",
21 | "execution_count": null,
22 | "metadata": {},
23 | "outputs": [],
24 | "source": [
25 | "from act_newmap_prep import *\n",
26 | "\n",
27 | "# input file here! (don't remove the \"r\" before string)\n",
28 | "file_path = r'..\\..\\test_data\\test.osu'\n",
29 | "\n",
30 | "# Or use auto timing with music file only!!\n",
31 | "\n",
32 | "# from act_timing import *;\n",
33 | "# music_path = r\"..\\..\\test_data\\audio.mp3\"\n",
34 | "# file_path = get_timed_osu_file(music_path, game_mode=0);\n",
35 | "\n",
36 | "step4_read_new_map(file_path);"
37 | ]
38 | },
39 | {
40 | "cell_type": "markdown",
41 | "metadata": {},
42 | "source": [
43 | "## osumapper #5: Rhythm Predictor\n",
44 | "\n",
45 | "Calculates a map's rhythm based on the music and timing."
46 | ]
47 | },
48 | {
49 | "cell_type": "markdown",
50 | "metadata": {},
51 | "source": [
52 | "Parameters:\n",
53 | "\n",
54 | "\"note_density\" determines how many notes will be placed on the timeline, ranges from 0 to 1.
\n",
55 | "\"slider_favor\" determines how the model favors sliders against circles, ranges from -1 to 1.
\n",
56 | "\"dist_multiplier\" determines the distance snap. ranges from 0 to +∞. Of course 0/+∞ are not advisable.
\n",
57 | "\"divisor_favor\" determines how the model favors notes to be on X divisors starting from a beat (white, blue, red, blue), ranges from -1 to 1 each.
\n",
58 | "\"slider_max_ticks\" determines the max amount of time a slider can slide, ranges from 1 to +∞."
59 | ]
60 | },
61 | {
62 | "cell_type": "code",
63 | "execution_count": null,
64 | "metadata": {},
65 | "outputs": [],
66 | "source": [
67 | "from act_rhythm_calc import *\n",
68 | "\n",
69 | "model = step5_load_model();\n",
70 | "npz = step5_load_npz();\n",
71 | "params = step5_set_params(dist_multiplier=1, note_density=0.35, slider_favor=0, divisor_favor=[0] * 4, slider_max_ticks=8);\n",
72 | "\n",
73 | "predictions = step5_predict_notes(model, npz, params);\n",
74 | "converted = step5_convert_sliders(predictions, params);"
75 | ]
76 | },
77 | {
78 | "cell_type": "code",
79 | "execution_count": null,
80 | "metadata": {},
81 | "outputs": [],
82 | "source": [
83 | "step5_save_predictions(converted);"
84 | ]
85 | },
86 | {
87 | "cell_type": "markdown",
88 | "metadata": {},
89 | "source": [
90 | "## osumapper #6: Map flow generator\n",
91 | "\n",
92 | "Generate the final map using a Generative Adversarial Network.\n",
93 | "\n",
94 | "Parameters:\n",
95 | "\n",
96 | "- note_distance_basis: the baseline for distance snap between notes\n",
97 | "- max_ticks_for_ds: max number of time ticks (each 1/4) that it uses the distance snap\n",
98 | "- next_from_slider_end: use slider end instead of slider head for calculating distance\n",
99 | "- box_loss_border, box_loss_value: it's like a barrier on the map edges that bounces off the circles\n",
100 | "- divisor, note_group_size: don't change unless you're using a special model built for it\n",
101 | "- good_epoch, max_epoch: controls the training time. less time makes it faster but risks less quality\n",
102 | "- g_\\*, c_\\*: hyperparameters used by GAN. No one knows how they work but they mysterically affect the result"
103 | ]
104 | },
105 | {
106 | "cell_type": "code",
107 | "execution_count": null,
108 | "metadata": {
109 | "scrolled": true
110 | },
111 | "outputs": [],
112 | "source": [
113 | "from act_gan import *;\n",
114 | "\n",
115 | "gan_params = {\n",
116 | " \"divisor\" : 4,\n",
117 | " \"good_epoch\" : 12,\n",
118 | " \"max_epoch\" : 30,\n",
119 | " \"note_group_size\" : 10,\n",
120 | " \"g_epochs\" : 1,\n",
121 | " \"c_epochs\" : 1,\n",
122 | " \"g_batch\" : 50,\n",
123 | " \"g_input_size\" : 50,\n",
124 | " \"c_true_batch\" : 140,\n",
125 | " \"c_false_batch\" : 5,\n",
126 | " \"c_randfalse_batch\" : 5,\n",
127 | " \"note_distance_basis\" : 200,\n",
128 | " \"next_from_slider_end\" : False,\n",
129 | " \"max_ticks_for_ds\" : 1,\n",
130 | " \"box_loss_border\" : 0.1,\n",
131 | " \"box_loss_value\" : 0.4,\n",
132 | " \"box_loss_weight\" : 1\n",
133 | "};\n",
134 | "\n",
135 | "step6_set_gan_params(gan_params);\n",
136 | "osu_a, data = step6_run_all();"
137 | ]
138 | },
139 | {
140 | "cell_type": "markdown",
141 | "metadata": {},
142 | "source": [
143 | "### Since the generation will take a while...\n",
144 | "\n",
145 | "we can appreciate a nice picture of Cute Sophie!!\n",
146 | "\n",
147 | ""
148 | ]
149 | },
150 | {
151 | "cell_type": "markdown",
152 | "metadata": {},
153 | "source": [
154 | "Do a little modding to the map.\n",
155 | "\n",
156 | "Parameters:\n",
157 | "\n",
158 | "- stream_regularizer: fix bad streams. integer for modes (0,1,2,3,4) 0=inactive\n",
159 | "- slider_mirror: mirror slider ends if they go outside map area. (0,1) 0=inactive 1=active"
160 | ]
161 | },
162 | {
163 | "cell_type": "code",
164 | "execution_count": null,
165 | "metadata": {},
166 | "outputs": [],
167 | "source": [
168 | "from act_modding import *\n",
169 | "\n",
170 | "modding_params = {\n",
171 | " \"stream_regularizer\" : 1,\n",
172 | " \"slider_mirror\" : 1\n",
173 | "}\n",
174 | "\n",
175 | "osu_a, data = step7_modding(osu_a, data, modding_params);"
176 | ]
177 | },
178 | {
179 | "cell_type": "markdown",
180 | "metadata": {},
181 | "source": [
182 | "Finally, save the data into an .osu file!"
183 | ]
184 | },
185 | {
186 | "cell_type": "code",
187 | "execution_count": null,
188 | "metadata": {},
189 | "outputs": [],
190 | "source": [
191 | "from act_final import *\n",
192 | "\n",
193 | "saved_osu_name = step8_save_osu_file(osu_a, data);\n",
194 | "\n",
195 | "# for taiko mode only (comment out the above line and use below)\n",
196 | "# from act_taiko_hitsounds import *\n",
197 | "# taiko_hitsounds_params = step8_taiko_hitsounds_set_params(divisor=4, metronome_count=4)\n",
198 | "# hitsounds = step8_apply_taiko_hitsounds(osu_a, data, params=taiko_hitsounds_params)\n",
199 | "# saved_osu_name = step8_save_osu_file(osu_a, data, hitsounds=hitsounds);\n",
200 | "\n",
201 | "# clean up the folder\n",
202 | "step8_clean_up();"
203 | ]
204 | },
205 | {
206 | "cell_type": "markdown",
207 | "metadata": {},
208 | "source": [
209 | "If it works alright, you should have a nice .osu file under the folder of these notebooks now!\n",
210 | "\n",
211 | "If it does not work, please tell me the problem so probably I could fix it!\n",
212 | "\n",
213 | "For bug reports and feedbacks either report it on github or use discord:
\n",
214 | "[https://discord.com/invite/npmSy7K](https://discord.com/invite/npmSy7K)"
215 | ]
216 | }
217 | ],
218 | "metadata": {
219 | "kernelspec": {
220 | "display_name": "Python 3",
221 | "language": "python",
222 | "name": "python3"
223 | },
224 | "language_info": {
225 | "codemirror_mode": {
226 | "name": "ipython",
227 | "version": 3
228 | },
229 | "file_extension": ".py",
230 | "mimetype": "text/x-python",
231 | "name": "python",
232 | "nbconvert_exporter": "python",
233 | "pygments_lexer": "ipython3",
234 | "version": "3.8.3"
235 | }
236 | },
237 | "nbformat": 4,
238 | "nbformat_minor": 2
239 | }
240 |
--------------------------------------------------------------------------------
/v7.0/Colab_Training.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "## osumapper: create osu! map using Tensorflow and Colab\n",
8 | "\n",
9 | "### Model Training\n",
10 | "\n",
11 | "Github: https://github.com/kotritrona/osumapper"
12 | ]
13 | },
14 | {
15 | "cell_type": "markdown",
16 | "metadata": {
17 | "id": "EroyvoE7qr_P"
18 | },
19 | "source": [
20 | "### Step 0: Installation\n",
21 | "\n",
22 | "First of all, check the Notebook Settings under Edit tab.
\n",
23 | "Activate GPU to make the training faster.\n",
24 | "\n",
25 | "Then, clone the git repository and install dependencies."
26 | ]
27 | },
28 | {
29 | "cell_type": "code",
30 | "execution_count": null,
31 | "metadata": {
32 | "id": "3APpbRI8qrxm"
33 | },
34 | "outputs": [],
35 | "source": [
36 | "%cd /content/\n",
37 | "!git clone https://github.com/kotritrona/osumapper.git\n",
38 | "%cd osumapper/v7.0\n",
39 | "!apt install -y ffmpeg\n",
40 | "!apt install -y nodejs\n",
41 | "!cp requirements_colab.txt requirements.txt\n",
42 | "!cp package_colab.json package.json\n",
43 | "!pip install -r requirements.txt\n",
44 | "!npm install"
45 | ]
46 | },
47 | {
48 | "cell_type": "markdown",
49 | "metadata": {
50 | "id": "76KQyHYgrFEy"
51 | },
52 | "source": [
53 | "### Step 1: Upload training maps\n",
54 | "\n",
55 | "Write the maplist.txt and run the first block of `01_Training.ipynb` (`act_data_prep.step1_load_maps()`) locally.
\n",
56 | "After that, make a folder `NPZ/` under your google drive, and upload the generated npz files under local `mapdata/` in there."
57 | ]
58 | },
59 | {
60 | "cell_type": "code",
61 | "execution_count": null,
62 | "metadata": {
63 | "id": "aFWVEtE2vtoT"
64 | },
65 | "outputs": [],
66 | "source": [
67 | "# Wait for the upload to finish"
68 | ]
69 | },
70 | {
71 | "cell_type": "markdown",
72 | "metadata": {
73 | "id": "83RcU3yap-N_"
74 | },
75 | "source": [
76 | "Mount your google drive in Colaboratory.
\n",
77 | "It will ask you for an auth code.\n"
78 | ]
79 | },
80 | {
81 | "cell_type": "code",
82 | "execution_count": null,
83 | "metadata": {
84 | "id": "XF6WtFFupmyD"
85 | },
86 | "outputs": [],
87 | "source": [
88 | "from google.colab import drive\n",
89 | "drive.mount('/gdrive')"
90 | ]
91 | },
92 | {
93 | "cell_type": "markdown",
94 | "metadata": {
95 | "id": "5OjRVBotq9A7"
96 | },
97 | "source": [
98 | "Copy .npz files to the training data folder."
99 | ]
100 | },
101 | {
102 | "cell_type": "code",
103 | "execution_count": null,
104 | "metadata": {
105 | "id": "Tx9X_LIZqGGi"
106 | },
107 | "outputs": [],
108 | "source": [
109 | "# One of mkdir or rm will pop an error. Ignore it.\n",
110 | "!mkdir mapdata/\n",
111 | "!rm mapdata/*.npz\n",
112 | "!cp /gdrive/'My Drive'/NPZ/*.npz mapdata/\n",
113 | "print(\"Copy complete!\")"
114 | ]
115 | },
116 | {
117 | "cell_type": "markdown",
118 | "metadata": {
119 | "id": "FNQZjKoer8Fy"
120 | },
121 | "source": [
122 | "## Step 2: rhythm model\n",
123 | "\n",
124 | "(after this point it's copypaste from `01_Training.ipynb` from the second block)\n",
125 | "\n",
126 | "Train a rhythm model that decides where to place circles/sliders based on music.\n",
127 | "\n",
128 | "If you're using GPU and it reports a memory error, try setting batch_size parameter to a smaller value (that GPU can handle)."
129 | ]
130 | },
131 | {
132 | "cell_type": "code",
133 | "execution_count": null,
134 | "metadata": {
135 | "id": "svgt9Fs2r7iy"
136 | },
137 | "outputs": [],
138 | "source": [
139 | "from act_train_rhythm import *;\n",
140 | "\n",
141 | "train_params = {\n",
142 | " \"divisor\" : 4,\n",
143 | " \"train_epochs\" : 32,\n",
144 | " \"train_batch_size\" : None, # Default is 32 or based on machine specs\n",
145 | " \"plot_history\" : True,\n",
146 | " \"too_many_maps_threshold\" : 240,\n",
147 | " \"train_epochs_many_maps\" : 6,\n",
148 | " \"data_split_count\" : 80\n",
149 | "};\n",
150 | "model = step2_build_model()"
151 | ]
152 | },
153 | {
154 | "cell_type": "markdown",
155 | "metadata": {
156 | "id": "Qv88gsdasKYh"
157 | },
158 | "source": [
159 | "Train the model and evaluate.\n",
160 | "is_note_start accuracy should be about 0.8 to 0.9 based on my tests, others should be lower.\n",
161 | "\n",
162 | "**Note:** I changed the metrics from F1 to AUC in this version!! 0.5=guessing 1=perfect for AUC"
163 | ]
164 | },
165 | {
166 | "cell_type": "code",
167 | "execution_count": null,
168 | "metadata": {
169 | "id": "y4IAxnpUqqy9"
170 | },
171 | "outputs": [],
172 | "source": [
173 | "model = step2_train_model(model, train_params)\n",
174 | "step2_evaluate(model)"
175 | ]
176 | },
177 | {
178 | "cell_type": "markdown",
179 | "metadata": {
180 | "id": "FNxOeuT2sOz5"
181 | },
182 | "source": [
183 | "Done! now save the model to the disk."
184 | ]
185 | },
186 | {
187 | "cell_type": "code",
188 | "execution_count": null,
189 | "metadata": {
190 | "id": "LPnmz5twsPOJ"
191 | },
192 | "outputs": [],
193 | "source": [
194 | "from google.colab import files\n",
195 | "step2_save(model)\n",
196 | "\n",
197 | "files.download(\"saved_rhythm_model\")"
198 | ]
199 | },
200 | {
201 | "cell_type": "markdown",
202 | "metadata": {
203 | "id": "CfeE3AjbsVoi"
204 | },
205 | "source": [
206 | "## Step 3: flow dataset construction\n",
207 | "\n",
208 | "Construct a dataset for the map flow generator."
209 | ]
210 | },
211 | {
212 | "cell_type": "code",
213 | "execution_count": null,
214 | "metadata": {
215 | "id": "hxUhLFoEsWRx"
216 | },
217 | "outputs": [],
218 | "source": [
219 | "from act_flow_ds import *;\n",
220 | "\n",
221 | "flow_dataset_params = step3_set_params(note_group_size=10, step_size=5);\n",
222 | "maps_flow = step3_read_maps_flow(flow_dataset_params);\n",
223 | "step3_save_flow_dataset(maps_flow);\n",
224 | "files.download(\"flow_dataset.npz\")\n",
225 | "\n",
226 | "# hitsounds dataset, only for taiko maps\n",
227 | "# maps_hs_af, maps_hs = step3_read_maps_hs(flow_dataset_params);\n",
228 | "# step3_save_hs_dataset(maps_hs_af, maps_hs);\n",
229 | "# files.download(\"hs_dataset.npz\")\n",
230 | "\n",
231 | "# pattern dataset, only for mania (remove the flow part for mania)\n",
232 | "# data = step3_read_maps_pattern([]);\n",
233 | "# step3_save_pattern_dataset(data);\n",
234 | "# files.download(\"mania_pattern_dataset.npz\")"
235 | ]
236 | },
237 | {
238 | "cell_type": "markdown",
239 | "metadata": {
240 | "id": "QsGdBZ-UtPVk"
241 | },
242 | "source": [
243 | "Replace the default model files to use it in Colab map creator."
244 | ]
245 | },
246 | {
247 | "cell_type": "code",
248 | "execution_count": null,
249 | "metadata": {
250 | "id": "pSI3WWi_tPqY"
251 | },
252 | "outputs": [],
253 | "source": [
254 | "!cp saved_rhythm_model models/default/rhythm_model\n",
255 | "!cp flow_dataset.npz models/default/flow_dataset.npz\n",
256 | "# !cp hs_dataset.npz models/default/hs_dataset.npz\n",
257 | "# !cp mania_pattern_dataset.npz models/default/mania_pattern_dataset.npz"
258 | ]
259 | },
260 | {
261 | "cell_type": "markdown",
262 | "metadata": {
263 | "id": "O_pA6cIusoXQ"
264 | },
265 | "source": [
266 | "That's it! The models are trained. Start making a new map with the other notebook.\n",
267 | "\n",
268 | "For bug reports and feedbacks either report it on github or use discord:
\n",
269 | "[https://discord.com/invite/npmSy7K](https://discord.com/invite/npmSy7K)"
270 | ]
271 | }
272 | ],
273 | "metadata": {
274 | "accelerator": "GPU",
275 | "colab": {
276 | "collapsed_sections": [],
277 | "name": "Colab Training.ipynb",
278 | "provenance": []
279 | },
280 | "kernelspec": {
281 | "display_name": "Python 3",
282 | "language": "python",
283 | "name": "python3"
284 | },
285 | "language_info": {
286 | "codemirror_mode": {
287 | "name": "ipython",
288 | "version": 3
289 | },
290 | "file_extension": ".py",
291 | "mimetype": "text/x-python",
292 | "name": "python",
293 | "nbconvert_exporter": "python",
294 | "pygments_lexer": "ipython3",
295 | "version": "3.8.3"
296 | }
297 | },
298 | "nbformat": 4,
299 | "nbformat_minor": 1
300 | }
301 |
--------------------------------------------------------------------------------
/v7.0/README.md:
--------------------------------------------------------------------------------
1 | # osumapper v7.0
2 |
3 | This version uses Tensorflow v2.3.1.
4 |
5 | v7.0 demo map 1 (low BPM): https://osu.ppy.sh/beatmapsets/1290030
6 |
7 | v7.0 demo map 2 (high BPM): https://osu.ppy.sh/beatmapsets/1290026
8 |
9 | ## Colaboratory
10 |
11 | https://colab.research.google.com/github/kotritrona/osumapper/blob/master/v7.0/Colab.ipynb
12 |
13 | For mania mode: [mania_Colab.ipynb](https://colab.research.google.com/github/kotritrona/osumapper/blob/master/v7.0/mania_Colab.ipynb)
14 |
15 | ## Complete guide for a newcomer in osu! mapping
16 |
17 | https://github.com/kotritrona/osumapper/wiki/Complete-guide:-creating-beatmap-using-osumapper
18 |
19 | ## Installation
20 |
21 | Windows
22 |
23 | - install [Anaconda3](https://www.anaconda.com/products/individual#windows)
24 | - install [node.js](https://nodejs.org/)
25 | - git clone or download this repository
26 | - use Anaconda Prompt and cd into this directory (osumapper/v7.0/)
27 | - run `install.bat`
28 |
29 | Linux (Ubuntu)
30 |
31 | - install Python 3.8
32 | - run `./install`
33 |
34 | Other Linux
35 |
36 | - install Python 3.8
37 | - Open `install` file with a text editor
38 | - change "apt" to the correct package manager
39 | - run `./install`
40 |
41 | ## Running
42 |
43 | - start Jupyter Notebook
44 | - run 01_Training.ipynb for training
45 | - run 02_Mapmaking.ipynb for map making
46 |
47 | ## Maplist Generator
48 |
49 | - Run `node gen_maplist.js` under the directory to use the maplist generator
50 |
51 | ## Training in Colaboratory
52 |
53 | - You have to generate .npz map data using the first code block of 01_Training.ipynb and upload them to Google Drive
54 | - After that, use https://colab.research.google.com/github/kotritrona/osumapper/blob/master/v7.0/Colab_Training.ipynb
55 |
56 | ## Difference from previous versions
57 |
58 | - Cleaned up code, removed much useless code
59 | - Moved code from notebook to python files and integrated pipeline together
60 | - Uses librosa to read audio file
61 | - Removed soundfile and pandas dependency
62 | - Added TimingAnalyz support to achieve full auto-mapping (great tool made by [statementreply](https://osu.ppy.sh/users/126198))
63 |
64 | ## Citing
65 |
66 | If you want to cite osumapper in a scholarly work, please cite the github page. I'm not going to write a paper for it.
--------------------------------------------------------------------------------
/v7.0/TimingAnlyz.exe:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kotritrona/osumapper/db1eeabccef4adf822551580731b9ec3d4caec68/v7.0/TimingAnlyz.exe
--------------------------------------------------------------------------------
/v7.0/act_data_prep.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | #
4 | # Part 1 action script
5 | #
6 |
7 | from audio_tools import *;
8 | from os_tools import *;
9 |
10 | import os, re, time;
11 |
12 | mapdata_path = "mapdata/";
13 | try:
14 | divisor = GLOBAL["divisor"];
15 | except:
16 | divisor = 4;
17 |
18 | def step1_load_maps():
19 | # fix the path..?
20 | fix_path()
21 |
22 | # Test paths and node
23 | test_process_path("node");
24 | if not os.path.isdir(mapdata_path):
25 | os.mkdir(mapdata_path);
26 |
27 | # Test node modules..?
28 | test_node_modules()
29 |
30 | # Test ffmpeg..?
31 | test_process_path("ffmpeg", "-version");
32 |
33 | # Open maplist
34 | with open("maplist.txt", encoding="utf8") as fp:
35 | fcont = fp.readlines();
36 |
37 | # Reset results
38 | results = [];
39 | for line in fcont:
40 | results.append(line);
41 |
42 | # Remove maps
43 | for file in os.listdir(mapdata_path):
44 | if file.endswith(".npz"):
45 | os.remove(os.path.join(mapdata_path, file));
46 |
47 | print("Number of filtered maps: {}".format(len(results)));
48 |
49 | for k, mname in enumerate(results):
50 | try:
51 | start = time.time()
52 | read_and_save_osu_file(mname.strip(), filename=os.path.join(mapdata_path, str(k)), divisor=divisor);
53 | end = time.time()
54 | print("Map data #" + str(k) + " saved! time = " + str(end - start) + " secs");
55 | except Exception as e:
56 | print("Error on #{}, path = {}, error = {}".format(str(k), mname.strip(), e));
--------------------------------------------------------------------------------
/v7.0/act_final.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | #
4 | # Part 8 .osu file JSON processing / output
5 | #
6 |
7 | import re, json, datetime;
8 | from os_tools import *;
9 |
10 | def convert_to_osu_obj(obj_array, data, hitsounds=None):
11 | """
12 | Converts map data from python format to json format.
13 | """
14 | objs, predictions, ticks, timestamps, is_slider, is_spinner, is_note_end, sv, slider_ticks, dist_multiplier, slider_types, slider_length_base = data;
15 |
16 | if hitsounds is None:
17 | hitsounds = [0] * len(obj_array);
18 |
19 | output = [];
20 | for i, obj in enumerate(obj_array):
21 | if not is_slider[i]: # is a circle; does not consider spinner for now.
22 | obj_dict = {
23 | "x": int(obj[0]),
24 | "y": int(obj[1]),
25 | "type": 1,
26 | "time": int(timestamps[i]),
27 | "hitsounds": int(hitsounds[i]),
28 | "extHitsounds": "0:0:0",
29 | "index": i
30 | };
31 | else:
32 | obj_dict = {
33 | "x": int(obj[0]),
34 | "y": int(obj[1]),
35 | "type": 2,
36 | "time": int(timestamps[i]),
37 | "hitsounds": int(hitsounds[i]),
38 | "extHitsounds": "0:0:0",
39 | "sliderGenerator": {
40 | "type": int(slider_types[i]),
41 | "dOut": [float(obj[2]), float(obj[3])],
42 | "len": float(slider_length_base[i] * slider_ticks[i]),
43 | "ticks": int(slider_ticks[i]),
44 | "endpoint": [int(obj[4]), int(obj[5])]
45 | },
46 | "index": i
47 | };
48 | output.append(obj_dict);
49 | return output;
50 |
51 | def get_osu_file_name(metadata):
52 | """
53 | Construct the .osu file name from the metadata.
54 | """
55 | artist = metadata["artist"];
56 | title = metadata["title"];
57 | creator = metadata["creator"];
58 | diffname = metadata["diffname"];
59 | outname = (artist+" - " if len(artist) > 0 else "") + title + " (" + creator + ") [" + diffname + "].osu";
60 | outname = re.sub("[^a-zA-Z0-9\(\)\[\] \.\,\!\~\`\{\}\-\_\=\+\&\^\@\#\$\%\;\']","", outname);
61 | return outname;
62 |
63 | def step8_save_osu_file(osu_map, data, hitsounds=None):
64 | """
65 | Save trained map to disk, using filename generated from its metadata.
66 | """
67 | osu_obj_array = convert_to_osu_obj(osu_map, data, hitsounds=hitsounds);
68 |
69 | with open("mapthis.json", encoding="utf-8") as map_json:
70 | map_dict = json.load(map_json);
71 | map_meta = map_dict["meta"];
72 | filename = get_osu_file_name(map_meta);
73 | map_dict["obj"] = osu_obj_array;
74 |
75 | with open('mapthis.json', 'w', encoding="utf-8") as outfile:
76 | json.dump(map_dict, outfile, ensure_ascii=False);
77 |
78 | c = run_command(["node", "load_map.js", "c", "mapthis.json", filename]);
79 | if(len(c) > 1):
80 | print(c.decode("utf-8"));
81 |
82 | print("finished on: {}".format(datetime.datetime.now()));
83 |
84 | return filename;
85 |
86 | def step8_clean_up():
87 | # clean up intermediate files
88 | for item in ["mapthis.json", "audio.mp3", "timing.osu", "rhythm_data.npz", "mapthis.npz", "temp_json_file.json", "wavfile.wav", "temp/temp_json_file.json", "temp/wavfile.wav", "evaluatedRhythm.json"]:
89 | try:
90 | os.remove(item);
91 | except:
92 | pass
--------------------------------------------------------------------------------
/v7.0/act_flow_ds.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | #
4 | # Part 4 Save Flow Dataset
5 | #
6 |
7 | import numpy as np;
8 | import os;
9 |
10 | root = "mapdata/";
11 |
12 | divisor = 4;
13 |
14 | def step3_set_params(note_group_size=10, step_size=5):
15 | return note_group_size, step_size;
16 |
17 | def read_map_npz_flow(file_path):
18 | with np.load(file_path) as data:
19 | flow_data = data["flow"];
20 | return flow_data;
21 |
22 | # TICK, TIME, TYPE, X, Y, IN_DX, IN_DY, OUT_DX, OUT_DY
23 | def step3_read_maps_flow(params):
24 | chunk_size, step_size = params;
25 |
26 | max_x = 512;
27 | max_y = 384;
28 |
29 | result = [];
30 | for file in os.listdir(root):
31 | if file.endswith(".npz"):
32 | #print(os.path.join(root, file));
33 | flow_data = read_map_npz_flow(os.path.join(root, file));
34 | for i in range(0, (flow_data.shape[0] - chunk_size) // step_size):
35 | chunk = flow_data[i * step_size:i * step_size + chunk_size];
36 | result.append(chunk);
37 |
38 | # normalize the TICK col and remove TIME col
39 | result = np.array(result)
40 | result[:, :, 0] %= divisor;
41 | result[:, :, 3] /= max_x;
42 | result[:, :, 4] /= max_y;
43 | result[:, :, 9] /= max_x;
44 | result[:, :, 10] /= max_y;
45 |
46 | # TICK, TIME, TYPE, X, Y, IN_DX, IN_DY, OUT_DX, OUT_DY, END_X, END_Y
47 | # only use X,Y,OUT_DX,OUT_DY,END_X,END_Y
48 | used_indices = [3, 4, 7, 8, 9, 10]#np.concatenate([, range(11, 11 + divisor + 1)])
49 | result = np.array(result)[:, :, used_indices];
50 | return result;
51 |
52 | def step3_save_flow_dataset(maps_flow):
53 | np.savez_compressed("flow_dataset", maps = maps_flow);
54 |
55 | def read_map_npz_hs(file_path):
56 | with np.load(file_path) as data:
57 | hs_data = data["hs"];
58 | return hs_data;
59 |
60 | def step3_read_maps_hs(params):
61 | result_avail_flags = [];
62 | result_hitsounds = [];
63 | for file in os.listdir(root):
64 | if file.endswith(".npz"):
65 | hs_data = read_map_npz_hs(os.path.join(root, file));
66 |
67 | avail_flags = hs_data[:, 0]
68 | hitsounds = hs_data[:, 1:]
69 |
70 | result_avail_flags.append(avail_flags)
71 | result_hitsounds.append(hitsounds)
72 |
73 | af = np.concatenate(result_avail_flags, axis=0)
74 | hs = np.concatenate(result_hitsounds, axis=0)
75 |
76 | return af[af != 0], hs[af != 0]
77 |
78 | def step3_save_hs_dataset(hs_avail_flags, hs):
79 | np.savez_compressed("hs_dataset", avail_flags = hs_avail_flags, hs = hs);
80 |
81 | def read_map_npz_pattern(file_path):
82 | with np.load(file_path) as data:
83 | pattern_data = data["pattern"];
84 | return pattern_data;
85 |
86 | def array_to_flags(arr):
87 | return sum([k*2**i for i,k in enumerate(arr)])
88 |
89 | def step3_read_maps_pattern(params):
90 | pattern_length = -1
91 | result_avail_note_begin = [[] for i in range(18)];
92 | result_avail_note_end = [[] for i in range(18)];
93 | result_avail_hold = [[] for i in range(18)];
94 | result_pattern_note_begin = [[] for i in range(18)];
95 | result_pattern_note_end = [[] for i in range(18)];
96 | for file in os.listdir(root):
97 | if file.endswith(".npz"):
98 | pattern_data = read_map_npz_pattern(os.path.join(root, file));
99 |
100 | try:
101 | key_count = (pattern_data.shape[2] - 1) // 2
102 | key_index = key_count - 1
103 |
104 | pattern_length = pattern_data.shape[1]
105 |
106 | avail_hold = pattern_data[:, :, 0]
107 | pattern_note_begin = pattern_data[:, :, 1:1+key_count]
108 | pattern_note_end = pattern_data[:, :, 1+key_count:1+key_count*2]
109 |
110 | avail_note_begin = np.max(pattern_note_begin, axis=2)
111 | avail_note_end = np.max(pattern_note_end, axis=2)
112 |
113 | result_avail_note_begin[key_index].append(avail_note_begin)
114 | result_avail_note_end[key_index].append(avail_note_end)
115 | result_avail_hold[key_index].append(avail_hold)
116 | result_pattern_note_begin[key_index].append(pattern_note_begin)
117 | result_pattern_note_end[key_index].append(pattern_note_end)
118 | except:
119 | print("Error on file {}".format(file))
120 |
121 | outdata = []
122 |
123 | if pattern_length == -1:
124 | pattern_length = 16
125 |
126 | for key_index in range(18):
127 | if len(result_avail_note_begin[key_index]) == 0:
128 | outdata.append([np.array([]), np.array([]), np.array([]), np.zeros((0, pattern_length, 1 + key_index)), np.zeros((0, pattern_length, 1 + key_index))])
129 | continue
130 | anb = np.concatenate(result_avail_note_begin[key_index], axis=0)
131 | ane = np.concatenate(result_avail_note_end[key_index], axis=0)
132 | ah = np.concatenate(result_avail_hold[key_index], axis=0)
133 | pnb = np.concatenate(result_pattern_note_begin[key_index], axis=0)
134 | pne = np.concatenate(result_pattern_note_end[key_index], axis=0)
135 |
136 | begin_flag = np.max(anb, axis=1)
137 | end_flag = np.max(ane, axis=1)
138 |
139 | anbf = np.array([array_to_flags(k) for k in anb])
140 | anef = np.array([array_to_flags(k) for k in ane])
141 | ahf = np.array([array_to_flags(k) for k in ah])
142 |
143 | outdata.append([anbf[begin_flag != 0], anef[end_flag != 0], ahf[begin_flag != 0], pnb[begin_flag != 0], pne[end_flag != 0]])
144 |
145 | return outdata
146 |
147 | def step3_save_pattern_dataset(data):
148 | save_dict = {}
149 | for key_index in range(18):
150 | key_count = key_index + 1
151 | try:
152 | avail_note_begin, avail_note_end, avail_hold, pattern_note_begin, pattern_note_end = data[key_index]
153 | except:
154 | avail_note_begin, avail_note_end, avail_hold, pattern_note_begin, pattern_note_end = [[]] * 5
155 | save_dict["{}k_avail_note_begin".format(key_count)] = avail_note_begin
156 | save_dict["{}k_avail_note_end".format(key_count)] = avail_note_end
157 | save_dict["{}k_avail_hold".format(key_count)] = avail_hold
158 | save_dict["{}k_pattern_note_begin".format(key_count)] = pattern_note_begin
159 | save_dict["{}k_pattern_note_end".format(key_count)] = pattern_note_end
160 | np.savez_compressed("mania_pattern_dataset", **save_dict)
--------------------------------------------------------------------------------
/v7.0/act_modding.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | #
4 | # Part 7 Modding
5 | #
6 |
7 | from stream_tools import stream_regularizer;
8 | from slider_tools import slider_mirror;
9 |
10 | def step7_modding(obj_array, data, params):
11 | if "stream_regularizer" not in params:
12 | params["stream_regularizer"] = 0;
13 |
14 | obj_array, data = stream_regularizer(obj_array, data, mode = params["stream_regularizer"]);
15 | obj_array, data = slider_mirror(obj_array, data, mode = params["slider_mirror"]);
16 |
17 | return obj_array, data;
--------------------------------------------------------------------------------
/v7.0/act_newmap_prep.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | #
4 | # Part 5 action script
5 | #
6 |
7 | from audio_tools import *;
8 | from os_tools import *
9 |
10 | import os, re, time;
11 |
12 | mapdata_path = "mapdata/";
13 |
14 | def step4_read_new_map(file_path, divisor = 4):
15 | # fix the path
16 | fix_path()
17 |
18 | # Test paths and node
19 | test_process_path("node");
20 |
21 | # Test ffmpeg..?
22 | test_process_path("ffmpeg", "-version");
23 |
24 | # Test node modules
25 | test_node_modules()
26 |
27 | start = time.time()
28 | read_and_save_osu_tester_file(file_path.strip(), filename="mapthis", divisor=divisor);
29 | end = time.time()
--------------------------------------------------------------------------------
/v7.0/act_rhythm_calc.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | #
4 | # Step 6 action script
5 | #
6 |
7 | import tensorflow as tf
8 | from tensorflow import keras
9 | import numpy as np
10 | import os, re, json
11 |
12 | divisor = 4;
13 |
14 | def read_npz(fn):
15 | with np.load(fn) as data:
16 | wav_data = data["wav"];
17 | wav_data = np.swapaxes(wav_data, 2, 3);
18 | ticks = data["ticks"];
19 | timestamps = data["timestamps"];
20 | extra = data["extra"];
21 |
22 | # Extra vars
23 | bpms = extra[0];
24 | slider_lengths = extra[1];
25 | ex1 = (60000 / bpms) / 500 - 1;
26 | ex2 = bpms / 120 - 1;
27 | ex3 = slider_lengths / 150 - 1;
28 |
29 | # This might be confusing: "i" is the index of the tick, "k" is the tick count inside the uninherited timing section (red line)
30 | # For most of the cases these are the same numbers, but for maps with multiple timing sections they're different
31 | div_data = np.array([divisor_array(k) + [ex1[i], ex2[i], ex3[i]] for i, k in enumerate(ticks)]);
32 | return wav_data, div_data, ticks, timestamps;
33 |
34 | def divisor_array(k):
35 | d_range = list(range(0, divisor));
36 | return [int(k % divisor == d) for d in d_range];
37 |
38 | def step5_set_divisor(x = 4):
39 | global divisor;
40 | divisor = x;
41 |
42 | def step5_set_params(dist_multiplier=1, note_density=0.24, slider_favor=0, divisor_favor=[0] * divisor, slider_max_ticks=8):
43 | return dist_multiplier, note_density, slider_favor, divisor_favor, slider_max_ticks;
44 |
45 | def step5_load_model(model_file="saved_rhythm_model"):
46 | # Fallback for local version
47 | if not os.path.isfile(model_file) and model_file == "saved_rhythm_model":
48 | print("Model not trained! Trying default model...")
49 | model_file = "models/default/rhythm_model"
50 |
51 | model = tf.keras.models.load_model(
52 | model_file,
53 | custom_objects=None,
54 | compile=False
55 | );
56 | model.compile(loss='mse',
57 | optimizer=tf.optimizers.RMSprop(0.001),
58 | metrics=[keras.metrics.mae]);
59 | return model;
60 |
61 | def step5_load_npz():
62 | fn = "mapthis.npz";
63 |
64 | return read_npz(fn);
65 |
66 | def step5_predict_notes(model, npz, params):
67 |
68 | # Get npz data
69 | test_data, div_data, ticks, timestamps = npz;
70 |
71 | dist_multiplier, note_density, slider_favor, divisor_favor, slider_max_ticks = params;
72 |
73 | # Make time intervals from test data
74 | time_interval = 16;
75 | if test_data.shape[0]%time_interval > 0:
76 | test_data = test_data[:-(test_data.shape[0]%time_interval)];
77 | div_data = div_data[:-(div_data.shape[0]%time_interval)];
78 | test_data2 = np.reshape(test_data, (-1, time_interval, test_data.shape[1], test_data.shape[2], test_data.shape[3]))
79 | div_data2 = np.reshape(div_data, (-1, time_interval, div_data.shape[1]))
80 |
81 | test_predictions = model.predict([test_data2, div_data2]);
82 | preds = test_predictions.reshape(-1, test_predictions.shape[2]);
83 |
84 | # Favor sliders a little
85 | preds[:, 2] += slider_favor;
86 | divs = div_data2.reshape(-1, div_data2.shape[2]);
87 | margin = np.sum([divisor_favor[k] * divs[:, k] for k in range(0, divisor)]);
88 |
89 | preds[:, 0] += margin;
90 |
91 | # Predict is_obj using note_density
92 | obj_preds = preds[:, 0];
93 | target_count = np.round(note_density * obj_preds.shape[0]).astype(int);
94 | borderline = np.sort(obj_preds)[obj_preds.shape - target_count];
95 | is_obj_pred = np.expand_dims(np.where(preds[:, 0] > borderline, 1, 0), axis=1);
96 |
97 | obj_type_pred = np.sign(preds[:, 1:4] - np.tile(np.expand_dims(np.max(preds[:, 1:4], axis=1), 1), (1, 3))) + 1;
98 | others_pred = (1 + np.sign(preds[:, 4:test_predictions.shape[1]] + 0.5)) / 2;
99 | another_pred_result = np.concatenate([is_obj_pred, is_obj_pred * obj_type_pred, others_pred], axis=1);
100 |
101 | print("{} notes predicted.".format(np.sum(is_obj_pred)));
102 |
103 | return is_obj_pred, another_pred_result, timestamps, ticks, div_data, dist_multiplier;
104 |
105 | def step5_convert_sliders(data, params):
106 | unfiltered_is_obj_pred, unfiltered_predictions, unfiltered_timestamps, unfiltered_ticks, unfiltered_div_data, dist_multiplier = data;
107 | dist_multiplier, note_density, slider_favor, divisor_favor, slider_max_ticks = params;
108 |
109 | unfiltered_objs = unfiltered_is_obj_pred[:, 0];
110 | unfiltered_sv = (unfiltered_div_data[:,2 + divisor] + 1) * 150;
111 |
112 | obj_indices = [i for i,k in enumerate(unfiltered_objs) if k == 1 or unfiltered_predictions[i, 4] == 1];
113 |
114 | first_step_objs = unfiltered_objs[obj_indices];
115 | first_step_predictions = unfiltered_predictions[obj_indices];
116 | first_step_ticks = unfiltered_ticks[obj_indices];
117 | first_step_timestamps = unfiltered_timestamps[obj_indices];
118 | first_step_sv = unfiltered_sv[obj_indices];
119 |
120 | first_step_is_slider = first_step_predictions[:, 2];
121 | first_step_is_spinner = first_step_predictions[:, 3];
122 | first_step_is_note_end = first_step_predictions[:, 4];
123 |
124 | # convert notes with is_slider flag to sliders
125 | # if there is next note, slide to next note
126 | # else, slide for [max] ticks
127 |
128 | skip_this = False;
129 | new_obj_indices = [];
130 | slider_ticks = [];
131 | for i in range(len(first_step_objs)):
132 | if skip_this or not first_step_objs[i]: # not first_step_objs = slider end
133 | first_step_is_slider[i] = 0;
134 | skip_this = False;
135 | continue;
136 | if first_step_is_slider[i]: # this one is a slider!!
137 | if i == first_step_objs.shape[0]-1: # Last Note.
138 | new_obj_indices.append(i);
139 | slider_ticks.append(slider_max_ticks);
140 | continue;
141 | if first_step_ticks[i+1] >= first_step_ticks[i] + slider_max_ticks + 1: # too long! end here
142 | new_obj_indices.append(i);
143 | slider_ticks.append(slider_max_ticks);
144 | else:
145 | skip_this = True; # skip the next note or slider end, and slide to that tick
146 | new_obj_indices.append(i);
147 | slider_ticks.append(max(1, first_step_ticks[i+1] - first_step_ticks[i]));
148 | else: # not a slider!
149 | new_obj_indices.append(i);
150 | slider_ticks.append(0);
151 |
152 | # Filter the removed objects out!
153 | objs = first_step_objs[new_obj_indices];
154 | predictions = first_step_predictions[new_obj_indices];
155 | ticks = first_step_ticks[new_obj_indices];
156 | timestamps = first_step_timestamps[new_obj_indices];
157 | is_slider = first_step_is_slider[new_obj_indices];
158 | is_spinner = first_step_is_spinner[new_obj_indices];
159 | is_note_end = first_step_is_note_end[new_obj_indices];
160 | sv = first_step_sv[new_obj_indices];
161 | slider_ticks = np.array(slider_ticks);
162 |
163 | return objs, predictions, ticks, timestamps, is_slider, is_spinner, is_note_end, sv, slider_ticks, dist_multiplier;
164 |
165 | def step5_save_predictions(data):
166 | objs, predictions, ticks, timestamps, is_slider, is_spinner, is_note_end, sv, slider_ticks, dist_multiplier = data;
167 |
168 | np.savez_compressed("rhythm_data",
169 | objs = objs,
170 | predictions = predictions,
171 | ticks = ticks,
172 | timestamps = timestamps,
173 | is_slider = is_slider,
174 | is_spinner = is_spinner,
175 | is_note_end = is_note_end,
176 | sv = sv,
177 | slider_ticks = slider_ticks,
178 | dist_multiplier = dist_multiplier);
--------------------------------------------------------------------------------
/v7.0/act_taiko_hitsounds.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | #
4 | # Part 8 Taiko hitsounds
5 | #
6 |
7 | from hitsound_tools import *
8 | import os
9 |
10 | def step8_taiko_hitsounds_set_params(divisor=4, metronome_count=4):
11 | return divisor, metronome_count
12 |
13 | def step8_apply_taiko_hitsounds(obj_array, data, hs_dataset="hs_dataset.npz", params=(4,4)):
14 | _, _, ticks, _, _, _, _, _, _, _, _, _ = data
15 | divisor, metronome_count = params
16 |
17 | # Fallback for local version
18 | if not os.path.isfile(hs_dataset) and hs_dataset == "hs_dataset.npz":
19 | print("Hitsound dataset not found! Trying taiko model...")
20 | hs_dataset = "models/taiko/hs_dataset.npz"
21 |
22 | hs_avail_flags, hs_data = read_hitsound_dataset(hs_dataset)
23 | hitsounds = apply_hitsounds(hs_avail_flags, hs_data, ticks, divisor=divisor, metronome_count=metronome_count)
24 | hitsounds = fix_taiko_big_drum(ticks, hitsounds)
25 |
26 | return hitsounds
--------------------------------------------------------------------------------
/v7.0/act_timing.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | #
4 | # Timing
5 | #
6 |
7 | import numpy as np
8 | import re, os
9 | import include.id3reader_p3 as id3
10 | from shutil import copy
11 |
12 | from timing import *
13 | from metadata import *
14 |
15 | def get_timed_osu_file(music_path, input_filename = "assets/template.osu", output_filename = "timing.osu", game_mode = 0, mania_key_count = None):
16 | with open(input_filename) as osu_file:
17 | osu_text = osu_file.read()
18 |
19 | rdr = id3.Reader(music_path)
20 | artist = rdr.get_value("performer")
21 | if artist is None:
22 | artist = "unknown"
23 | title = rdr.get_value("title")
24 | if title is None:
25 | title = re.sub("\.[^\.]*$", "", os.path.basename(music_path))
26 |
27 | bpm, offset = get_timing(music_path)
28 |
29 | osu_text = re.sub("{audio_filename}", "audio.mp3", osu_text)
30 | osu_text = re.sub("{game_mode}", str(game_mode), osu_text)
31 | osu_text = re.sub("{artist}", artist, osu_text)
32 | osu_text = re.sub("{title}", title, osu_text)
33 | osu_text = re.sub("{version}", get_difficulty_name(), osu_text)
34 | osu_text = re.sub("{hp_drain}", "{}".format(np.random.randint(0, 101) / 10), osu_text)
35 | if mania_key_count is None:
36 | osu_text = re.sub("{circle_size}", "{}".format(np.random.randint(30, 51) / 10), osu_text)
37 | else:
38 | osu_text = re.sub("{circle_size}", "{}".format(mania_key_count), osu_text)
39 | osu_text = re.sub("{overall_difficulty}", "{}".format(np.random.randint(50, 91) / 10), osu_text)
40 | osu_text = re.sub("{approach_rate}", "{}".format(np.random.randint(70, 96) / 10), osu_text)
41 | osu_text = re.sub("{slider_velocity}", "{}".format(np.random.randint(12, 26) / 10), osu_text)
42 | osu_text = re.sub("{tickLength}", "{}".format(60000 / bpm), osu_text)
43 | osu_text = re.sub("{offset}", "{}".format(int(offset)), osu_text)
44 | osu_text = re.sub("{colors}", get_colors(), osu_text)
45 | osu_text = re.sub("{hit_objects}", "", osu_text)
46 |
47 | with open(output_filename, 'w', encoding="utf8") as osu_output:
48 | osu_output.write(osu_text)
49 |
50 | copy(music_path, "./audio.mp3")
51 |
52 | return output_filename
--------------------------------------------------------------------------------
/v7.0/assets/template.osu:
--------------------------------------------------------------------------------
1 | osu file format v14
2 |
3 | [General]
4 | AudioFilename: {audio_filename}
5 | AudioLeadIn: 0
6 | PreviewTime: -1
7 | Countdown: 0
8 | SampleSet: Soft
9 | StackLeniency: 0.5
10 | Mode: {game_mode}
11 | LetterboxInBreaks: 0
12 | WidescreenStoryboard: 1
13 |
14 | [Editor]
15 | DistanceSpacing: 1.0
16 | BeatDivisor: 4
17 | GridSize: 8
18 | TimelineZoom: 1
19 |
20 | [Metadata]
21 | Title:{title}
22 | TitleUnicode:{title}
23 | Artist:{artist}
24 | ArtistUnicode:{artist}
25 | Creator:osumapper
26 | Version:{version}
27 | Source:
28 | Tags:
29 |
30 | [Difficulty]
31 | HPDrainRate:{hp_drain}
32 | CircleSize:{circle_size}
33 | OverallDifficulty:{overall_difficulty}
34 | ApproachRate:{approach_rate}
35 | SliderMultiplier:{slider_velocity}
36 | SliderTickRate:1
37 |
38 | [Events]
39 | //Background and Video events
40 | //Break Periods
41 | //Storyboard Layer 0 (Background)
42 | //Storyboard Layer 1 (Fail)
43 | //Storyboard Layer 2 (Pass)
44 | //Storyboard Layer 3 (Foreground)
45 | //Storyboard Layer 4 (Overlay)
46 | //Storyboard Sound Samples
47 |
48 | [TimingPoints]
49 | {offset},{tickLength},4,2,1,100,1,0
50 |
51 |
52 | [Colours]
53 | {colors}
54 |
55 | [HitObjects]
56 | {hit_objects}
--------------------------------------------------------------------------------
/v7.0/bass.dll:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kotritrona/osumapper/db1eeabccef4adf822551580731b9ec3d4caec68/v7.0/bass.dll
--------------------------------------------------------------------------------
/v7.0/gen_maplist.js:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env node
2 |
3 | 'use strict';
4 |
5 | const osuPathFinder = require("./maplist_maker/osuPathFinder");
6 | const osuDBGetter = require("./maplist_maker/osuDBGetter");
7 | const express = require('express');
8 | const bodyParser = require('body-parser');
9 | const opn = require('opn');
10 | const fs = require('fs');
11 |
12 | async function main() {
13 | const osuPaths = await osuPathFinder();
14 | const osuDB = osuDBGetter(osuPaths.db);
15 |
16 | let app = express();
17 | app.use("/beatmaps", function(req, res, next) {
18 | res.set("Content-Type", "application/json");
19 | res.send(JSON.stringify({
20 | path: osuPaths.root,
21 | maps: osuDB.beatmaps
22 | }));
23 | res.end();
24 | });
25 | app.use(bodyParser.json({limit: '200mb'}));
26 | app.use("/save", function(req, res, next) {
27 | try {
28 | let contents = req.body.contents;
29 | fs.writeFileSync("maplist.txt", contents);
30 | res.send("saved");
31 | }
32 | catch(e) {
33 | res.send("fail");
34 | }
35 | res.end();
36 | });
37 | app.use("/", express.static('maplist_maker/html'));
38 |
39 | app.use(function(req, res, next) {
40 | res.status(404);
41 | res.send("404");
42 | res.end();
43 | });
44 |
45 | app.listen(3424, function () {
46 | opn("http://127.0.0.1:3424/");
47 | console.log('Node server listening on port 3424!');
48 | });
49 | }
50 | main();
--------------------------------------------------------------------------------
/v7.0/hitsound_tools.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | #
4 | # Hitsound helpers
5 | #
6 |
7 | from map_analyze import *
8 |
9 | def get_metronome_count(map_json, tick):
10 | uts_a = map_json["timing"]["uts"];
11 | if tick < uts_a[0]["beginTime"]:
12 | return uts_a[0]["whiteLines"];
13 | for uts in reversed(uts_a):
14 | if tick >= uts["beginTime"]:
15 | return uts["whiteLines"];
16 |
17 | def get_circle_hitsounds(map_json, **kwargs):
18 | """
19 | Reads JSON map data and creates a list of hitsound groups.
20 |
21 | It only reads circle hitsounds because it's dedicated for taiko mode.
22 | osu mode hitsounds use another function in load_map.js.
23 | it will not work for osu mode because of custom hitsounds.
24 | """
25 | length = kwargs.get("length", -1);
26 | divisor = kwargs.get("divisor", 4);
27 | tick_times = get_map_timing_array(map_json, length = length, divisor = divisor);
28 |
29 | objs = map_json["obj"];
30 | obj_times = [obj["time"] for obj in objs]
31 | hitsounds = [k['hitsounds'] for k in objs]
32 |
33 | hs_groups = []
34 | hs_group = []
35 |
36 | hs_avails = []
37 | hs_avail = []
38 |
39 | po = 0
40 | note_max_wait_time = kwargs.get("note_max_wait_time", 1000)
41 | start_time = obj_times[0] - note_max_wait_time
42 | last_obj_time = start_time
43 |
44 | for i, tick in enumerate(tick_times):
45 | metronome = get_metronome_count(map_json, tick)
46 | if i % (metronome * divisor) == 0:
47 | if len(hs_group) > 0:
48 | hs_groups.append(hs_group)
49 | hs_avails.append(hs_avail)
50 | hs_group = []
51 | hs_avail = []
52 |
53 | while obj_times[po] < tick - 5 and po < len(obj_times) - 1:
54 | po += 1
55 | if obj_times[po] >= tick - 5 and obj_times[po] <= tick + 5: # found note
56 | last_obj_time = tick
57 |
58 | hs_group.append(objs[po]["hitsounds"])
59 | hs_avail.append(1)
60 | else:
61 | hs_group.append(0)
62 | hs_avail.append(0)
63 |
64 | # everything limit to 4 metronomes
65 | for i, hs_group in enumerate(hs_groups):
66 | hs_avail = hs_avails[i]
67 | if len(hs_group) < 4 * divisor:
68 | hs_group += ([0] * (4 * divisor - len(hs_group)))
69 | hs_groups[i] = hs_group
70 | hs_avail += ([0] * (4 * divisor - len(hs_group)))
71 | hs_avails[i] = hs_avail
72 | if len(hs_group) > 4 * divisor:
73 | hs_group = hs_group[:4 * divisor]
74 | hs_groups[i] = hs_group
75 | hs_avail = hs_avail[:4 * divisor]
76 | hs_avails[i] = hs_avail
77 |
78 | # convert hs_avail to flags
79 | hs_avail_flags = [sum([k*2**i for i,k in enumerate(hs_avail)]) for hs_avail in hs_avails]
80 |
81 | return_data = [np.array([hs_avail_flags[i]] + hs_groups[i]) for i in range(len(hs_groups))]
82 |
83 | return return_data
84 |
85 | def bitwise_contains(container, item):
86 | return np.bitwise_and(np.bitwise_not(container), item) == 0
87 |
88 | def read_hitsound_dataset(hs_dataset):
89 | with np.load(hs_dataset) as data:
90 | avail_flags = data["avail_flags"]
91 | hs = data["hs"]
92 | return avail_flags, hs
93 |
94 | def get_hitsound_groups(hs_avail_flags, hs_data, note_metronome_group):
95 | """
96 | note_metronome_group should be from a single metronome (16 ticks)
97 | np.array of integers 0-15
98 | """
99 | metronome_length = hs_data.shape[1]
100 | note_avail = [(1 if i in note_metronome_group else 0) for i in range(metronome_length)]
101 | note_avail_flags = sum([k*2**i for i,k in enumerate(note_avail)])
102 |
103 | possible_hs_groups = hs_data[bitwise_contains(hs_avail_flags, note_avail_flags)]
104 |
105 | return possible_hs_groups
106 |
107 | def get_random_hitsound_group(hs_avail_flags, hs_data, note_metronome_group, default_mask=2):
108 | """
109 | get a random group of hitsounds.
110 | if it cannot find possible group in hs_data, uses a random group of only whistles.
111 | """
112 | possible_hs_groups = get_hitsound_groups(hs_avail_flags, hs_data, note_metronome_group)
113 | if len(possible_hs_groups) > 0:
114 | return possible_hs_groups[np.random.randint(0, possible_hs_groups.shape[0])]
115 | else:
116 | return np.bitwise_and(np.random.randint(0, 16, size=hs_data.shape[1]), default_mask)
117 |
118 | def apply_hitsounds(hs_avail_flags, hs_data, ticks, divisor=4, metronome_count=4):
119 | max_tick = ticks[-1]
120 | hs_current = []
121 | hs_applied = []
122 | metronome_offset = 0
123 |
124 | for tick in range(max_tick+1):
125 | if tick in ticks:
126 | hs_current.append(metronome_offset)
127 |
128 | metronome_offset += 1
129 |
130 | if metronome_offset >= divisor * metronome_count or tick == max_tick:
131 | hs_group = get_random_hitsound_group(hs_avail_flags, hs_data, hs_current)
132 | hs_applied.append(hs_group)
133 | hs_current = []
134 |
135 | hs_full = np.concatenate(hs_applied, axis=0)
136 | hs_objs = hs_full[ticks]
137 |
138 | return hs_objs
139 |
140 | def fix_taiko_big_drum(ticks, hitsounds):
141 | """
142 | Remove finishes when there is another note next tick
143 | """
144 | for i,tick in enumerate(ticks):
145 | if tick+1 in ticks:
146 | if hitsounds[i] & 4 == 4: # has finish hitsound == big drum
147 | hitsounds[i] -= 4
148 |
149 | return hitsounds
--------------------------------------------------------------------------------
/v7.0/install:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | apt install -y ffmpeg
3 | apt install -y nodejs
4 | apt install -y npm
5 | pip install -r requirements.txt
6 | npm i
--------------------------------------------------------------------------------
/v7.0/install.bat:
--------------------------------------------------------------------------------
1 | @echo off
2 | pip install -r requirements.txt
3 | call npm i
4 | call conda install -y -c conda-forge ffmpeg
5 | echo Install complete.
--------------------------------------------------------------------------------
/v7.0/losses.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | #
4 | # Part 7 Loss Functions
5 | #
6 |
7 | import tensorflow as tf
8 | from tensorflow.python.keras.utils import losses_utils
9 | from tensorflow.python.keras.losses import LossFunctionWrapper
10 |
11 | # A regularizer to keep the map inside the box.
12 | # It's so the sliders and notes don't randomly fly out of the screen!
13 | def inblock_loss(vg, border, value):
14 | wall_var_l = tf.where(tf.less(vg, border), tf.square(value - vg), 0 * vg);
15 | wall_var_r = tf.where(tf.greater(vg, 1 - border), tf.square(vg - (1 - value)), 0 * vg);
16 | return tf.reduce_mean(tf.reduce_mean(wall_var_l + wall_var_r, axis=2), axis=1);
17 |
18 | # Loss functions and mapping layer, to adapt to TF 2.0
19 | class GenerativeCustomLoss(LossFunctionWrapper):
20 | """
21 | This loss function is used in the generative model.
22 | It uses "1 - classification" as loss = good if it's classified as true sample, bad if classified as false.
23 | """
24 | def __init__(self,
25 | reduction=losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE,
26 | name='generative_custom_loss'):
27 |
28 | def loss_function_for_generative_model(y_true, y_pred):
29 | classification = y_pred;
30 | loss1 = 1 - tf.reduce_mean(classification, axis=1);
31 | return loss1;
32 |
33 | super(GenerativeCustomLoss, self).__init__(loss_function_for_generative_model, name=name, reduction=reduction)
34 |
35 | class BoxCustomLoss(LossFunctionWrapper):
36 | """
37 | Checks if note_start and note_end positions are within boundaries.
38 | If it gets close to the boundary then this loss function will produce positive value. Otherwise it is zero.
39 | """
40 | def __init__(self,
41 | border,
42 | value,
43 | reduction=losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE,
44 | name='generative_custom_loss'):
45 |
46 | self.loss_border = border
47 | self.loss_value = value
48 |
49 | def box_loss(y_true, y_pred):
50 | map_part = y_pred;
51 | return inblock_loss(map_part[:, :, 0:2], self.loss_border, self.loss_value) + inblock_loss(map_part[:, :, 4:6], self.loss_border, self.loss_value)
52 |
53 | super(BoxCustomLoss, self).__init__(box_loss, name=name, reduction=reduction)
54 |
55 | class AlwaysZeroCustomLoss(LossFunctionWrapper):
56 | """
57 | Why does TF not include this? This is very useful in certain situations
58 | """
59 | def __init__(self,
60 | reduction=losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE,
61 | name='generative_custom_loss'):
62 |
63 | def alw_zero(y_true, y_pred):
64 | return tf.convert_to_tensor(0, dtype=tf.float32);
65 |
66 | super(AlwaysZeroCustomLoss, self).__init__(alw_zero, name=name, reduction=reduction)
67 |
68 |
--------------------------------------------------------------------------------
/v7.0/lost_losses.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | #
4 | # redundant loss calculation
5 | #
6 |
7 | import tensorflow as tf
8 |
9 | def stack_loss(tensor):
10 | complex_list = tf.complex(tensor[:, :, 0] * 512, tensor[:, :, 1] * 384);
11 | stack_limit = 30;
12 | precise_limit = 1;
13 | a = [];
14 | for k in range(tensor.shape[1]):
15 | w = tf.tile(tf.expand_dims(complex_list[:, k], axis=1), [1, tensor.shape[1]]);
16 | r = tf.abs(w - complex_list);
17 | rless = tf.cast(tf.less(r, stack_limit), tf.float32) * tf.cast(tf.greater(r, precise_limit), tf.float32);
18 | rmean = tf.reduce_mean(rless * (stack_limit - r) / stack_limit);
19 | a.append(rmean);
20 | b = tf.reduce_sum(a);
21 | return b;
22 |
23 | # This polygon loss was an attempt to make the map less likely to overlap each other.
24 | # The idea is: calculate the area of polygon formed from the note positions;
25 | # If it is big, then it is good - they form a convex shape, no overlap.
26 | # ... of course it totally doesn't work like that.
27 | def polygon_loss(tensor):
28 | tensor_this = tensor[:, :, 0:2];
29 | tensor_next = tf.concat([tensor[:, 1:, 0:2], tensor[:, 0:1, 0:2]], axis=1);
30 | sa = (tensor_this[:, :, 0] + tensor_next[:, :, 0]) * (tensor_next[:, :, 1] - tensor_this[:, :, 0]);
31 | surface = tf.abs(tf.reduce_sum(sa, axis=1))/2;
32 | return surface;
--------------------------------------------------------------------------------
/v7.0/mania_Mapmaking.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "## osumapper #4: New Map Reader\n"
8 | ]
9 | },
10 | {
11 | "cell_type": "markdown",
12 | "metadata": {},
13 | "source": [
14 | "Set the input file string to a timed (having the right BPM/offset) .osu file.\n",
15 | "\n",
16 | "It converts the map/music to Python readable format."
17 | ]
18 | },
19 | {
20 | "cell_type": "code",
21 | "execution_count": null,
22 | "metadata": {},
23 | "outputs": [],
24 | "source": [
25 | "from act_newmap_prep import *\n",
26 | "\n",
27 | "# input file here! (don't remove the \"r\" before string)\n",
28 | "file_path = r'..\\..\\test_data\\test_mania.osu'\n",
29 | "file_path = r'D:\\osu!\\Songs\\beatmap-637381895878231005-03.Mystic Pendulum\\RURUTIA - Mystic Pendulum ([CSGA]Ar3sgice) [Mania 1K].osu'\n",
30 | "\n",
31 | "# Or use auto timing with music file only!!\n",
32 | "\n",
33 | "# from act_timing import *;\n",
34 | "# music_path = r\"..\\..\\test_data\\audio.mp3\"\n",
35 | "# file_path = get_timed_osu_file(music_path, game_mode=3, mania_key_count=9);\n",
36 | "\n",
37 | "step4_read_new_map(file_path);"
38 | ]
39 | },
40 | {
41 | "cell_type": "markdown",
42 | "metadata": {},
43 | "source": [
44 | "## osumapper #5: Rhythm Predictor\n",
45 | "\n",
46 | "Calculates a map's rhythm based on the music and timing."
47 | ]
48 | },
49 | {
50 | "cell_type": "markdown",
51 | "metadata": {},
52 | "source": [
53 | "Parameters:\n",
54 | "\n",
55 | "\"note_density\" determines how many notes will be placed on the timeline, ranges from 0 to 1.
\n",
56 | "\"hold_favor\" determines how the model favors holds against circles, ranges from -1 to 1.
\n",
57 | "\"divisor_favor\" determines how the model favors notes to be on X divisors starting from a beat (white, blue, red, blue), ranges from -1 to 1 each.
\n",
58 | "\"hold_max_ticks\" determines the max amount of time a hold can hold off, ranges from 1 to +∞.
\n",
59 | "\"hold_min_return\" determines the final granularity of the pattern dataset, ranges from 1 to +∞.
\n",
60 | "\"rotate_mode\" determines how the patterns from the dataset gets rotated. modes (0,1,2,3,4)\n",
61 | "- 0 = no rotation\n",
62 | "- 1 = random\n",
63 | "- 2 = mirror\n",
64 | "- 3 = circulate\n",
65 | "- 4 = circulate + mirror"
66 | ]
67 | },
68 | {
69 | "cell_type": "code",
70 | "execution_count": null,
71 | "metadata": {},
72 | "outputs": [],
73 | "source": [
74 | "from mania_act_rhythm_calc import *\n",
75 | "\n",
76 | "model = step5_load_model();\n",
77 | "npz = step5_load_npz();\n",
78 | "params = step5_set_params(note_density=0.6, hold_favor=0.2, divisor_favor=[0] * divisor, hold_max_ticks=8, hold_min_return=1, rotate_mode=4);\n",
79 | "\n",
80 | "predictions = step5_predict_notes(model, npz, params);"
81 | ]
82 | },
83 | {
84 | "cell_type": "code",
85 | "execution_count": null,
86 | "metadata": {},
87 | "outputs": [],
88 | "source": [
89 | "notes_each_key = step5_build_pattern(predictions, params);"
90 | ]
91 | },
92 | {
93 | "cell_type": "markdown",
94 | "metadata": {},
95 | "source": [
96 | "Do a little modding to the map.\n",
97 | "\n",
98 | "Parameters:\n",
99 | "\n",
100 | "- key_fix: remove continuous notes on single key modes (0,1,2,3) 0=inactive 1=remove late note 2=remove early note 3=divert
\n",
101 | " should be set to 0 for low key count"
102 | ]
103 | },
104 | {
105 | "cell_type": "code",
106 | "execution_count": null,
107 | "metadata": {},
108 | "outputs": [],
109 | "source": [
110 | "modding_params = {\n",
111 | " \"key_fix\" : 3\n",
112 | "}\n",
113 | "\n",
114 | "notes_each_key = mania_modding(notes_each_key, modding_params);\n",
115 | "notes, key_count = merge_objects_each_key(notes_each_key)"
116 | ]
117 | },
118 | {
119 | "cell_type": "markdown",
120 | "metadata": {},
121 | "source": [
122 | "Finally, save the data into an .osu file!"
123 | ]
124 | },
125 | {
126 | "cell_type": "code",
127 | "execution_count": null,
128 | "metadata": {},
129 | "outputs": [],
130 | "source": [
131 | "from mania_act_final import *\n",
132 | "\n",
133 | "saved_osu_name = step8_save_osu_mania_file(notes, key_count);\n",
134 | "\n",
135 | "# clean up the folder\n",
136 | "step8_clean_up();"
137 | ]
138 | },
139 | {
140 | "cell_type": "markdown",
141 | "metadata": {},
142 | "source": [
143 | "If it works alright, you should have a nice .osu file under the folder of these notebooks now!\n",
144 | "\n",
145 | "If it does not work, please tell me the problem so probably I could fix it!\n",
146 | "\n",
147 | "For bug reports and feedbacks either report it on github or use discord:
\n",
148 | "[https://discord.com/invite/npmSy7K](https://discord.com/invite/npmSy7K)\n",
149 | "\n",
150 | ""
151 | ]
152 | }
153 | ],
154 | "metadata": {
155 | "kernelspec": {
156 | "display_name": "Python 3",
157 | "language": "python",
158 | "name": "python3"
159 | },
160 | "language_info": {
161 | "codemirror_mode": {
162 | "name": "ipython",
163 | "version": 3
164 | },
165 | "file_extension": ".py",
166 | "mimetype": "text/x-python",
167 | "name": "python",
168 | "nbconvert_exporter": "python",
169 | "pygments_lexer": "ipython3",
170 | "version": "3.8.3"
171 | }
172 | },
173 | "nbformat": 4,
174 | "nbformat_minor": 2
175 | }
176 |
--------------------------------------------------------------------------------
/v7.0/mania_act_data_prep.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | #
4 | # Part 1 action script
5 | #
6 |
7 | from mania_audio_tools import *;
8 |
9 | import os, re, time;
10 |
11 | mapdata_path = "mapdata/";
12 | try:
13 | divisor = GLOBAL["divisor"];
14 | except:
15 | divisor = 4;
16 |
17 | def step1_load_maps():
18 | # Test paths and node
19 | test_process_path("node");
20 | if not os.path.isdir(mapdata_path):
21 | os.mkdir(mapdata_path);
22 |
23 | # Open maplist
24 | with open("maplist.txt", encoding="utf8") as fp:
25 | fcont = fp.readlines();
26 |
27 | # Reset results
28 | results = [];
29 | for line in fcont:
30 | results.append(line);
31 |
32 | # Remove maps
33 | for file in os.listdir(mapdata_path):
34 | if file.endswith(".npz"):
35 | os.remove(os.path.join(mapdata_path, file));
36 |
37 | print("Number of filtered maps: {}".format(len(results)));
38 |
39 | for k, mname in enumerate(results):
40 | try:
41 | start = time.time()
42 | read_and_save_osu_file(mname.strip(), filename=os.path.join(mapdata_path, str(k)), divisor=divisor);
43 | end = time.time()
44 | print("Map data #" + str(k) + " saved! time = " + str(end - start) + " secs");
45 | except Exception as e:
46 | print("Error on #{}, path = {}, error = {}".format(str(k), mname.strip(), e));
--------------------------------------------------------------------------------
/v7.0/mania_act_final.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | #
4 | # Part 8 .osu file JSON processing / output
5 | #
6 |
7 | import re, json, datetime;
8 | from os_tools import *;
9 |
10 | def convert_to_osu_mania_obj(notes, key_count):
11 | """
12 | Converts map data from python format to json format.
13 | """
14 | output = [];
15 |
16 | # X coordinates each key
17 | x_coords = [int(round((0.5+k)/key_count * 512)) for k in range(key_count)];
18 |
19 | for i, note in enumerate(notes):
20 | begin, end, key, tick = note;
21 | if begin == end: # is a circle; does not consider spinner for now.
22 | obj_dict = {
23 | "x": x_coords[key],
24 | "y": 192,
25 | "type": 1,
26 | "time": int(begin),
27 | "hitsounds": 0,
28 | "extHitsounds": "0:0:0",
29 | "index": i
30 | };
31 | else:
32 | obj_dict = {
33 | "x": x_coords[key],
34 | "y": 192,
35 | "type": 128,
36 | "time": int(begin),
37 | "hitsounds": 0,
38 | "extHitsounds": "0:0:0",
39 | "holdEndTime": int(end),
40 | "index": i
41 | };
42 | output.append(obj_dict);
43 | return output;
44 |
45 | def get_osu_file_name(metadata):
46 | """
47 | Construct the .osu file name from the metadata.
48 | """
49 | artist = metadata["artist"];
50 | title = metadata["title"];
51 | creator = metadata["creator"];
52 | diffname = metadata["diffname"];
53 | outname = (artist+" - " if len(artist) > 0 else "") + title + " (" + creator + ") [" + diffname + "].osu";
54 | outname = re.sub("[^a-zA-Z0-9\(\)\[\] \.\,\!\~\`\{\}\-\_\=\+\&\^\@\#\$\%\;\']","", outname);
55 | return outname;
56 |
57 | def step8_save_osu_mania_file(notes, key_count):
58 | """
59 | Save trained map to disk, using filename generated from its metadata.
60 | """
61 | osu_obj_array = convert_to_osu_mania_obj(notes, key_count);
62 |
63 | with open("mapthis.json", encoding="utf-8") as map_json:
64 | map_dict = json.load(map_json);
65 | map_meta = map_dict["meta"];
66 | filename = get_osu_file_name(map_meta);
67 | map_dict["obj"] = osu_obj_array;
68 |
69 | with open('mapthis.json', 'w', encoding="utf-8") as outfile:
70 | json.dump(map_dict, outfile, ensure_ascii=False);
71 |
72 | c = run_command(["node", "load_map.js", "c", "mapthis.json", filename]);
73 | if(len(c) > 1):
74 | print(c.decode("utf-8"));
75 |
76 | print("finished on: {}".format(datetime.datetime.now()));
77 |
78 | return filename;
79 |
80 | def step8_clean_up():
81 | # clean up intermediate files
82 | for item in ["mapthis.json", "audio.mp3", "timing.osu", "rhythm_data.npz", "mapthis.npz", "temp_json_file.json", "wavfile.wav", "temp/temp_json_file.json", "temp/wavfile.wav", "evaluatedRhythm.json"]:
83 | try:
84 | os.remove(item);
85 | except:
86 | pass
--------------------------------------------------------------------------------
/v7.0/mania_audio_tools.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | #
4 | # For osu! file reading and analysis
5 | #
6 |
7 | import librosa;
8 | import re, os, subprocess, json;
9 | import numpy as np;
10 | from os_tools import *;
11 | from mania_analyze import *;
12 |
13 | # It will always fail. Soundfile doesn't support mp3
14 | import warnings;
15 | warnings.filterwarnings("ignore", message="PySoundFile failed. Trying audioread instead.");
16 |
17 | workingdir = os.path.dirname(os.path.abspath(__file__));
18 | os.chdir(workingdir);
19 |
20 | def read_osu_file(path, convert=False, wav_name="wavfile.wav", json_name="temp_json_file.json"):
21 | """
22 | Read .osu file to get audio path and JSON formatted map data
23 | "convert" will also read the music file (despite the name it doesn't convert)
24 | """
25 | file_dir = os.path.dirname(os.path.abspath(path));
26 |
27 | # ask node.js to convert the .osu file to .json format
28 | result = run_command(["node", "load_map.js", "jq", path, json_name]);
29 | if(len(result) > 1):
30 | print(result.decode("utf-8"));
31 | raise Exception("Map Convert Failure");
32 |
33 | with open(json_name, encoding="utf-8") as map_json:
34 | map_dict = json.load(map_json);
35 |
36 | if convert:
37 | mp3_file = os.path.join(file_dir, map_dict["general"]["AudioFilename"]);
38 | # result = run_command([FFMPEG_PATH, "-y", "-i", mp3_file, wav_name]);
39 | # if(len(result) > 1):
40 | # print(result.decode("utf-8"));
41 | # raise Exception("FFMPEG Failure");
42 |
43 | # delete the temp json later
44 | # if json_name == "temp_json_file.json":
45 | # os.remove(json_name);
46 |
47 | return map_dict, mp3_file;
48 |
49 | def get_freqs(sig, fft_size):
50 | """
51 | Do Fourier Transform and map imaginary to length/angle coordinates
52 | """
53 | Lf = np.fft.fft(sig, fft_size);
54 | Lc = Lf[0:fft_size//2];
55 | La = np.abs(Lc[0:fft_size//2]);
56 | Lg = np.angle(Lc[0:fft_size//2]);
57 | return La, Lg;
58 |
59 | def slice_wave_at(ms, sig, samplerate, size):
60 | ind = (ms/1000 * samplerate)//1;
61 | return sig[max(0, int(ind - size//2)):int(ind + size - size//2)];
62 |
63 | def lrmix(sig):
64 | """
65 | Get mono from stereo audio data. Unused in this version (already mono)
66 | """
67 | return (sig[:,0]+sig[:,1])/2;
68 |
69 | def get_wav_data_at(ms, sig, samplerate, fft_size=2048, freq_low=0, freq_high=-1):
70 | if freq_high == -1:
71 | freq_high = samplerate//2;
72 | waveslice = slice_wave_at(ms, sig, samplerate, fft_size);
73 |
74 | # since osu! maps are usually not mapped to stereo wave, let's mix it to reduce 50% of data
75 | # waveslice_lr = lrmix(waveslice);
76 |
77 | # do a nice FFT
78 | La, Lg = get_freqs(waveslice, fft_size);
79 |
80 | # cut the frequency bins
81 | La = La[fft_size*freq_low//samplerate:fft_size*freq_high//samplerate];
82 | Lg = Lg[fft_size*freq_low//samplerate:fft_size*freq_high//samplerate];
83 |
84 | return La, Lg;
85 |
86 | def read_wav_data(timestamps, wavfile, snapint=[-0.3, -0.2, -0.1, 0, 0.1, 0.2, 0.3], fft_size = 1024):
87 | """
88 | Read audio data based on timestamps.
89 |
90 | Snapint are percentages of difference between two timestamps.
91 | These are read to handle potential small offset differences between python and osu!.
92 |
93 | Resampling disabled for librosa because it is too slow.
94 | """
95 | sig, samplerate = librosa.load(wavfile, sr=None, mono=True);
96 | data = list();
97 |
98 | # normalize sound wave
99 | # sig = sig / np.sqrt(np.mean(sig**2, axis=0));
100 | # sig = sig / np.max(np.max(np.abs(sig), axis=0));
101 | sig = sig / np.max(np.abs(sig));
102 |
103 | # calc a length array
104 | tmpts = np.array(timestamps);
105 | timestamp_interval = tmpts[1:] - tmpts[:-1];
106 | timestamp_interval = np.append(timestamp_interval, timestamp_interval[-1]);
107 |
108 | for sz in snapint:
109 | data_r = np.array([get_wav_data_at(max(0, min(len(sig) - fft_size, coord + timestamp_interval[i] * sz)), sig, samplerate, fft_size=fft_size, freq_high=samplerate//4) for i, coord in enumerate(timestamps)]);
110 | data.append(data_r);
111 |
112 |
113 | raw_data = np.array(data);
114 | norm_data = np.tile(np.expand_dims(np.mean(raw_data, axis=1), 1), (1, raw_data.shape[1], 1, 1));
115 | std_data = np.tile(np.expand_dims(np.std(raw_data, axis=1), 1), (1, raw_data.shape[1], 1, 1));
116 | return (raw_data - norm_data) / std_data;
117 |
118 | def mania_transformed_lst_data(data):
119 | transformed_data = [];
120 | for d in data:
121 | if d[3] == 1:
122 | transformed_data.append([d[0], d[1], d[2], 1, 0, 0, 1, 0, d[4], d[5], d[6], d[7], d[8], d[9]]);
123 | elif d[3] == 2:
124 | transformed_data.append([d[0], d[1], d[2], 0, 1, 0, 0, 0, d[4], d[5], d[6], d[7], d[8], d[9]]);
125 | elif d[3] == 3:
126 | transformed_data.append([d[0], d[1], d[2], 1, 1, 0, 1, 0, d[4], d[5], d[6], d[7], d[8], d[9]]);
127 | elif d[3] == 4:
128 | transformed_data.append([d[0], d[1], d[2], 0, 0, 0, 1, 0, d[4], d[5], d[6], d[7], d[8], d[9]]);
129 | else:
130 | transformed_data.append([d[0], d[1], d[2], 0, 0, 0, 0, 0, d[4], d[5], d[6], d[7], d[8], d[9]]);
131 | return transformed_data;
132 |
133 | def read_and_save_osu_file(path, filename = "saved", divisor=4):
134 | """
135 | # Main function
136 | # Generated data shape:
137 | # - "lst" array, length MAPTICKS
138 | # table of [TICK, TIME, NOTE, IS_CIRCLE, IS_SLIDER, IS_SPINNER, IS_NOTE_END, UNUSED, SLIDING, SPINNING, MOMENTUM, EX1, EX2, EX3],
139 | # 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10
140 | # - "wav" array, shape of [len(snapsize), MAPTICKS, 2, fft_size//4]
141 | # - "pattern" array, shape [num_groups, main_metronome * divisor, 2 * key_count + 1]
142 | # [:, :, 0] pattern_avail_hold
143 | # [:, :, 1:1+key_count] pattern_note_begin
144 | # [:, :, 1+key_count:1+2*key_count] pattern_note_end
145 | #
146 | # MAPTICKS = (Total map time + 3000) / tickLength / (divisor = 4) - EMPTY_TICKS
147 | # EMPTY_TICKS = ticks where no note around in 5 secs
148 | """
149 | osu_dict, wav_file = read_osu_file(path, convert = True);
150 | data, pattern_data = get_map_notes_and_patterns(osu_dict, divisor=divisor);
151 | timestamps = [c[1] for c in data];
152 | wav_data = read_wav_data(timestamps, wav_file, snapint=[-0.3, -0.2, -0.1, 0, 0.1, 0.2, 0.3], fft_size = 128);
153 | # in order to match first dimension
154 | wav_data = np.swapaxes(wav_data, 0, 1);
155 |
156 | # change the representation of note_type
157 | # a bit of copypaste code because I changed the data structure many times here
158 | transformed_data = mania_transformed_lst_data(data);
159 |
160 | np.savez_compressed(filename, lst = transformed_data, wav = wav_data, pattern = pattern_data);
161 |
162 | def read_and_save_osu_tester_file(path, filename = "saved", json_name="mapthis.json", divisor=4):
163 | osu_dict, wav_file = read_osu_file(path, convert = True, json_name=json_name);
164 | sig, samplerate = librosa.load(wav_file, sr=None, mono=True);
165 | file_len = (sig.shape[0] / samplerate * 1000 - 3000);
166 |
167 | # ticks = ticks from each uninherited timing section
168 | ticks, timestamps, tick_lengths, slider_lengths = get_all_ticks_and_lengths_from_ts(osu_dict["timing"]["uts"], osu_dict["timing"]["ts"], file_len, divisor=divisor);
169 |
170 | # old version to determine ticks (all from start)
171 | # ticks = np.array([i for i,k in enumerate(timestamps)]);
172 | extra = np.array([60000 / tick_lengths, slider_lengths]);
173 |
174 | wav_data = read_wav_data(timestamps, wav_file, snapint=[-0.3, -0.2, -0.1, 0, 0.1, 0.2, 0.3], fft_size = 128);
175 | # in order to match first dimension
176 | wav_data = np.swapaxes(wav_data, 0, 1);
177 |
178 | np.savez_compressed(filename, ticks = ticks, timestamps = timestamps, wav = wav_data, extra = extra);
179 |
180 | def read_and_return_osu_file(path, divisor=4):
181 | osu_dict, wav_file = read_osu_file(path, convert = True);
182 | data, flow_data = get_map_notes(osu_dict, divisor=divisor);
183 | timestamps = [c[1] for c in data];
184 | wav_data = read_wav_data(timestamps, wav_file, snapint=[-0.3, -0.2, -0.1, 0, 0.1, 0.2, 0.3], fft_size = 128);
185 | return data, wav_data, flow_data;
186 |
187 | def test_process_path(path):
188 | """
189 | Use the version command to test if a dependency works
190 | """
191 | try:
192 | subprocess.call([path, "--version"]);
193 | return True;
194 | except:
195 | print("Cannot find executable on {}".format(path));
196 | return False;
197 |
--------------------------------------------------------------------------------
/v7.0/mania_setup_colab.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | #
4 | # Colab functions
5 | #
6 |
7 | import os
8 |
9 | def colab_clean_up(input_file_name):
10 | for item in [input_file_name, "mapthis.json", "audio.mp3", "timing.osu", "rhythm_data.npz", "mapthis.npz"]:
11 | try:
12 | os.remove(item);
13 | except:
14 | pass
15 | print("intermediate files cleaned up!")
16 |
17 | def load_pretrained_model(model_name):
18 | model_data = {
19 | "default" : {
20 | "rhythm_model" : "models/{}/rhythm_model".format(model_name),
21 | "pattern_dataset" : "models/{}/mania_pattern_dataset.npz".format(model_name),
22 | "rhythm_param" : [0.5, 0.2, [0, 0, 0, 0], 8, 1, 4],
23 | "modding" : {
24 | "key_fix" : 3
25 | }
26 | },
27 | "lowkey" : {
28 | "rhythm_model" : "models/mania_lowkey/rhythm_model".format(model_name),
29 | "pattern_dataset" : "models/mania_pattern/mania_pattern_dataset.npz".format(model_name),
30 | "rhythm_param" : [0.65, 0.4, [0, 0, 0, 0], 8, 5, 4],
31 | "modding" : {
32 | "key_fix" : 0
33 | }
34 | },
35 | "highkey" : {
36 | "rhythm_model" : "models/mania_highkey/rhythm_model".format(model_name),
37 | "pattern_dataset" : "models/mania_pattern/mania_pattern_dataset.npz".format(model_name),
38 | "rhythm_param" : [0.45, 0.12, [0, 0, 0, 0], 8, 5, 4],
39 | "modding" : {
40 | "key_fix" : 3
41 | }
42 | }
43 | };
44 | if model_name not in model_data:
45 | return model_data["default"];
46 | return model_data[model_name];
--------------------------------------------------------------------------------
/v7.0/maplist.txt:
--------------------------------------------------------------------------------
1 | # .osu paths here
--------------------------------------------------------------------------------
/v7.0/maplist_maker/html/Inter-Regular.osu.woff:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kotritrona/osumapper/db1eeabccef4adf822551580731b9ec3d4caec68/v7.0/maplist_maker/html/Inter-Regular.osu.woff
--------------------------------------------------------------------------------
/v7.0/maplist_maker/html/Inter-Regular.osu.woff2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kotritrona/osumapper/db1eeabccef4adf822551580731b9ec3d4caec68/v7.0/maplist_maker/html/Inter-Regular.osu.woff2
--------------------------------------------------------------------------------
/v7.0/maplist_maker/html/Torus-Regular.osu.otf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kotritrona/osumapper/db1eeabccef4adf822551580731b9ec3d4caec68/v7.0/maplist_maker/html/Torus-Regular.osu.otf
--------------------------------------------------------------------------------
/v7.0/maplist_maker/html/font-face.css:
--------------------------------------------------------------------------------
1 | @font-face{
2 | font-family: Inter;
3 | font-style: normal;
4 | font-display: swap;
5 | src: url(Inter-Regular.osu.woff2) format("woff2"),
6 | url(Inter-Regular.osu.woff) format("woff");
7 | }
8 | @font-face{
9 | font-family: Torus;
10 | font-style: normal;
11 | font-display: swap;
12 | src: url(Torus-Regular.osu.otf);
13 | }
--------------------------------------------------------------------------------
/v7.0/maplist_maker/html/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |