├── .gitignore ├── Star_Rating_Rebirth.pdf ├── test.py ├── pyproject.toml ├── README.md ├── srcalc-script.py ├── .github └── workflows │ └── build.yml ├── other_params.py ├── osu_file_parser.py └── algorithm.py /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | Test 3 | -------------------------------------------------------------------------------- /Star_Rating_Rebirth.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunnyxxy/Star-Rating-Rebirth/HEAD/Star_Rating_Rebirth.pdf -------------------------------------------------------------------------------- /test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import algorithm 3 | 4 | folder_path = 'Test' # Update this to the path of your Test folder 5 | 6 | w_0, w_1, p_1, w_2, p_0 = 0.4, 2.7, 1.5, 0.27, 1.0 7 | # Traverse the directory and process each .osu file 8 | for root, dirs, files in os.walk(folder_path): 9 | for file in files: 10 | if file.endswith('.osu'): 11 | file_path = os.path.join(root, file) 12 | result = algorithm.calculate(file_path, 'NM') 13 | print(file, "|", f'{result:.4f}') 14 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "star-rating-rebirth" 3 | version = "0.1.0" 4 | description = "" 5 | authors = ["U1d "] 6 | readme = "README.md" 7 | 8 | [tool.poetry.dependencies] 9 | python = ">=3.12,<3.14" 10 | pandas = "^2.2.3" 11 | 12 | 13 | [tool.poetry.group.dev.dependencies] 14 | matplotlib = "^3.10.0" 15 | jupyter = "^1.1.1" 16 | 17 | 18 | [tool.poetry.group.pyinstaller.dependencies] 19 | pyinstaller = "^6.11.1" 20 | 21 | [build-system] 22 | requires = ["poetry-core"] 23 | build-backend = "poetry.core.masonry.api" 24 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## Steps to use this program: 2 | 3 | ### **Method 1: Download and run the executable file (`.exe`)** 4 | 5 | 1. Go to the [Releases](https://github.com/sunnyxxy/Star-Rating-Rebirth/releases) section of the repository; 6 | 2. Download the latest version of the `srcalc.exe` file; 7 | 3. Place the `srcalc.exe` file and your `.osu` files in the same folder; 8 | 4. Run `srcalc.exe`. 9 | 10 | 11 | ### **Method 2: Run the source code** 12 | 13 | 1. Make sure you have a python environment; 14 | 2. Create a Test folder next to the python files; 15 | 3. Throw .osu files in the Test folder; 16 | 4. Run test.py. 17 | -------------------------------------------------------------------------------- /srcalc-script.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import sys 3 | from enum import Enum 4 | from pathlib import Path 5 | 6 | import algorithm 7 | 8 | 9 | class Mod(Enum): 10 | NM = "NM" 11 | DT = "DT" 12 | HT = "HT" 13 | 14 | 15 | def main(): 16 | parser = argparse.ArgumentParser(description="Calculate SR for osu! beatmaps.") 17 | parser.add_argument("folder_path", nargs='?', default=Path.cwd(), type=Path, help='Path to the folder containing .osu files.') 18 | parser.add_argument("--mod", "-M", type=str, choices=[mod.value for mod in Mod], default=Mod.NM.value, help='Mod to apply (NM, DT, HT).') 19 | parser.add_argument("--version", "-V", action="store_true", help="Show build version (build time) and exit.") 20 | args = parser.parse_args() 21 | 22 | def resource_path(relative_path: str) -> Path: 23 | base_path = Path(getattr(sys, '_MEIPASS', Path(__file__).parent)) 24 | return base_path / relative_path 25 | 26 | build_time_file = resource_path("build_time") 27 | if build_time_file.exists(): 28 | version_str = f" (algorithm version: {build_time_file.read_text(encoding="utf-8").strip()})" 29 | else: 30 | version_str = "" 31 | credit_str = f"Star-Rating-Rebirth by [Crz]sunnyxxy{version_str}" 32 | 33 | if args.version: 34 | print(credit_str) 35 | sys.exit(0) 36 | 37 | folder_path = args.folder_path 38 | if not folder_path.is_dir(): 39 | print(f"Error: {folder_path} is not a valid directory.") 40 | sys.exit(1) 41 | 42 | print(credit_str) 43 | 44 | mod = args.mod 45 | print(f"Dir: {folder_path}, Mod: {mod}\n") 46 | 47 | while True: 48 | for file in Path(folder_path).iterdir(): 49 | if file.suffix == ".osu": 50 | result = algorithm.calculate(file, mod) 51 | print(f"({mod}) {file.stem} | {result:.4f}") 52 | try: 53 | input("SR calculation completed. Press Enter to run again or 'Ctrl+C' to exit.") 54 | print() 55 | except KeyboardInterrupt: 56 | sys.exit(0) 57 | 58 | 59 | if __name__ == "__main__": 60 | main() 61 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | name: Build and Release Windows Executable 2 | 3 | on: 4 | workflow_dispatch: 5 | 6 | # 1. Grant write access to repository contents. 7 | permissions: 8 | contents: write 9 | 10 | jobs: 11 | build: 12 | runs-on: windows-latest 13 | steps: 14 | - name: Checkout code 15 | uses: actions/checkout@v4 16 | with: 17 | # 2. Ensure we have full history for tagging 18 | fetch-depth: 0 19 | 20 | - name: Set up Python 21 | uses: actions/setup-python@v5 22 | with: 23 | python-version: '3.12' 24 | 25 | - name: Install Poetry 26 | run: | 27 | (Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python - 28 | echo "$env:APPDATA\Python\Scripts" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append 29 | 30 | - name: Configure Poetry 31 | run: poetry config virtualenvs.in-project true 32 | 33 | - name: Install project dependencies 34 | run: poetry install --only=main --only=pyinstaller --no-root 35 | 36 | - name: Generate build time file 37 | shell: pwsh 38 | run: | 39 | $buildTime = Get-Date -Format "yyyy/MM/dd" 40 | "$buildTime" | Out-File -FilePath "build_time" -Encoding UTF8 41 | echo "VERSION=$buildTime" >> $env:GITHUB_ENV 42 | working-directory: ${{ github.workspace }} 43 | 44 | - name: Build 45 | run: poetry run pyinstaller --onefile --add-data "build_time;." --name srcalc srcalc-script.py 46 | 47 | - name: Test the exe file 48 | shell: pwsh 49 | run: | 50 | $exePath = Join-Path $env:GITHUB_WORKSPACE "dist/srcalc.exe" 51 | Write-Host "Testing executable at $exePath" 52 | 53 | $output = & $exePath --version 54 | Write-Host "Exe output:" 55 | Write-Host $output 56 | 57 | if ($output -notmatch "Star-Rating-Rebirth by $VERSION") { 58 | Write-Error "Test failed: exe output did not contain the expected text." 59 | } 60 | 61 | - name: Create tag 62 | id: tag 63 | shell: pwsh 64 | run: | 65 | Write-Host "Tagging version: $VERSION" 66 | git config user.name "github-actions[bot]" 67 | git config user.email "github-actions[bot]@users.noreply.github.com" 68 | git tag $VERSION 69 | git push origin $VERSION 70 | 71 | - name: Upload artifact 72 | uses: actions/upload-artifact@v4 73 | with: 74 | name: executable 75 | path: dist/srcalc.exe 76 | 77 | - name: Release 78 | id: create_release 79 | uses: softprops/action-gh-release@v2 80 | with: 81 | tag_name: ${{ env.VERSION }} 82 | name: ${{ env.VERSION }} 83 | files: dist/srcalc.exe 84 | env: 85 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 86 | -------------------------------------------------------------------------------- /other_params.py: -------------------------------------------------------------------------------- 1 | import functools 2 | import numpy as np 3 | 4 | def rao_quadratic_entropy_log(values, log_iterations=1): 5 | values = np.array(values) 6 | 7 | # Determine the unique categories and their counts 8 | unique, counts = np.unique(values, return_counts=True) 9 | p = counts / counts.sum() # relative frequencies 10 | distance_func = lambda x, y, log_it: functools.reduce(lambda acc, _: np.log1p(acc), range(log_it), abs(x-y)) 11 | 12 | # Compute the distance (dissimilarity) matrix for the unique values 13 | n = len(unique) 14 | dist_matrix = np.zeros((n, n)) 15 | for i in range(n): 16 | for j in range(n): 17 | dist_matrix[i, j] = distance_func(unique[i], unique[j], log_iterations) 18 | 19 | # Compute Rao's Quadratic Entropy: Q = sum_{i,j} p_i * p_j * d(i, j) 20 | Q = np.sum(np.outer(p, p) * dist_matrix) 21 | return Q 22 | 23 | def variety(note_seq, note_seq_by_column): # assume that note_seq already is sorted by head 24 | heads = [n[1] for n in note_seq] 25 | tails = [n[2] for n in note_seq] # -1 for rice is included 26 | tails.sort() 27 | head_gaps = [int(heads[i+1] - heads[i]) for i in range(len(heads)-1)] 28 | tail_gaps = [int(tails[i+1] - tails[i]) for i in range(len(tails)-1)] 29 | head_variety = rao_quadratic_entropy_log(head_gaps, log_iterations=1) 30 | tail_variety = rao_quadratic_entropy_log(tail_gaps, log_iterations=1) 31 | 32 | head_gaps = [] 33 | for k in range(len(note_seq_by_column)): 34 | heads = [n[1] for n in note_seq_by_column[k]] 35 | head_gaps = head_gaps + [int(heads[i+1] - heads[i]) for i in range(len(heads)-1)] 36 | col_variety = 2.5*rao_quadratic_entropy_log(head_gaps, log_iterations=2) 37 | 38 | return 0.5*head_variety + 0.11*tail_variety + 0.45*col_variety 39 | 40 | def spikiness(D_sorted, w_sorted): 41 | weighted_mean = (np.sum(D_sorted**5 * w_sorted) / np.sum(w_sorted))**(1 / 5) 42 | weighted_variance = (np.sum((D_sorted**8 - weighted_mean**8)**2 * w_sorted) / np.sum(w_sorted))**(1 / 8) 43 | 44 | return np.sqrt(weighted_variance) / weighted_mean 45 | 46 | def switch(note_seq, tail_seq, all_corners, Ks_arr, weights): 47 | heads = [n[1] for n in note_seq] 48 | idx_list = np.searchsorted(all_corners, heads, side='left') 49 | Ks_arr_at_note = Ks_arr[idx_list][:-1] 50 | weights_at_note = weights[idx_list][:-1] 51 | head_gaps = [int(heads[i+1] - heads[i]) for i in range(len(heads)-1)] 52 | head_gaps = np.array(head_gaps)/1000 53 | avgs = np.array([ 54 | np.mean(head_gaps[max(0, i-50) : min(i+50, len(head_gaps)-1) + 1]) 55 | for i in range(len(head_gaps)) 56 | ]) 57 | signature_head = np.sum(np.sqrt(head_gaps / avgs / head_gaps.size * weights_at_note) * Ks_arr_at_note**(1/4)) 58 | ref_signature_head = np.sqrt(np.sum(head_gaps / avgs * weights_at_note)) 59 | 60 | tails = [n[2] for n in tail_seq] 61 | idx_list = np.searchsorted(all_corners, tails, side='left') 62 | Ks_arr_at_note = Ks_arr[idx_list][:-1] 63 | weights_at_note = weights[idx_list][:-1] 64 | tail_gaps = [int(tails[i+1] - tails[i]) for i in range(len(tails)-1)] 65 | tail_gaps = np.array(tail_gaps)/1000 66 | signature_tail = 0 67 | ref_signature_tail = 0 68 | if len(tails)>0 and tails[-1] > tails[0]: 69 | avgs = np.array([ 70 | np.mean(tail_gaps[max(0, i-50) : min(i+50, len(tail_gaps)-1) + 1]) 71 | for i in range(len(tail_gaps)) 72 | ]) 73 | signature_tail = np.sum(np.sqrt(tail_gaps / avgs / tail_gaps.size * weights_at_note) * Ks_arr_at_note**(1/4)) 74 | ref_signature_tail = np.sqrt(np.sum(tail_gaps / avgs * weights_at_note)) 75 | switches = (signature_head*head_gaps.size + signature_tail*tail_gaps.size) / (ref_signature_head*head_gaps.size + ref_signature_tail*tail_gaps.size) 76 | return switches/2 + 0.5 -------------------------------------------------------------------------------- /osu_file_parser.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | # ...Why does python 3.x change next to __next__...... 4 | 5 | 6 | def string_to_int(str): 7 | return int(float(str)) 8 | 9 | 10 | def collect_data(data, new_datum): 11 | data = data.append(new_datum) 12 | 13 | # Parser Class that can be used on other class. 14 | 15 | 16 | class parser: 17 | def __init__(self, file_path): 18 | # Need to find some way to escape \. 19 | # self.file_path = file_path.replace("\\", "\\\\") 20 | self.file_path = file_path 21 | self.od = -1 22 | self.column_count = -1 23 | self.columns = [] 24 | self.note_starts = [] 25 | self.note_ends = [] 26 | self.note_types = [] 27 | 28 | def process(self): 29 | with open(self.file_path, "r+", encoding='utf-8') as f: 30 | try: 31 | for line in f: 32 | self.read_metadata(f, line) 33 | 34 | temp_cc = self.read_column_count(f, line) 35 | if temp_cc != -1: 36 | self.column_count = temp_cc 37 | 38 | temp_od = self.read_overall_difficulty(f, line) 39 | if temp_od != -1: 40 | self.od = temp_od 41 | 42 | if self.column_count != -1: 43 | self.read_note(f, line, self.column_count) 44 | 45 | except StopIteration: 46 | pass 47 | 48 | # Read metadata from .osu file. 49 | def read_metadata(self, f, line): 50 | if "[Metadata]" in line: 51 | while "Source:" not in line: 52 | # print(line, end="") 53 | line = f.__next__() 54 | 55 | def read_overall_difficulty(self, f, line): 56 | od = -1 57 | if "OverallDifficulty:" in line: 58 | temp = line.strip() 59 | pos_of_point = temp.index(':') 60 | if (pos_of_point == None): 61 | od = float(temp[-1]) 62 | else: 63 | od = float(temp[pos_of_point+1:]) 64 | # line = f.__next__() 65 | return float(od) 66 | 67 | # Read mode: key count. 68 | def read_column_count(self, f, line): 69 | column_count = -1 70 | if "CircleSize:" in line: 71 | temp = line.strip() 72 | column_count = temp[-1] 73 | if column_count=='0': 74 | column_count='10' 75 | # line = f.__next__() 76 | # print(line, end='') 77 | return string_to_int(column_count) 78 | 79 | def read_Timing_Points(self, f, line): 80 | if "[TimingPoints]" in line: 81 | line = f.__next__() 82 | params = object_line.split(",") 83 | offset = string_to_int(params[0]) 84 | # mpb = 60000 / bpm... 85 | mpb = string_to_int(params[1]) 86 | # meter: number of beats in a measure. . 87 | meter = string_to_int(params[2]) 88 | # Other parameters are not important for measuring difficulty. 89 | 90 | # Main function for parsing note data. 91 | # https://osu.ppy.sh/help/wiki/osu!_File_Formats/Osu_(file_format) 92 | 93 | def read_note(self, f, line, column_count): 94 | if "[HitObjects]" in line: 95 | line = f.__next__() 96 | while line != None: 97 | self.parse_hit_object(f, line, column_count) 98 | line = f.__next__() 99 | 100 | # Helper function for read_note(). 101 | # Store all note information in 4 arrays: column, type, start, end. 102 | # If note_end is 0, the note is a single note, otherwise a hold. 103 | def parse_hit_object(self, f, object_line, column_count): 104 | params = object_line.split(",") 105 | column = string_to_int((params[0])) 106 | column_width = int(512 / column_count) 107 | column = int(column / column_width) 108 | collect_data(self.columns, column) 109 | 110 | note_start = int(params[2]) 111 | collect_data(self.note_starts, note_start) 112 | 113 | # 1: single note 114 | # 128: Hold(LN) 115 | note_type = int(params[3]) 116 | collect_data(self.note_types, note_type) 117 | 118 | last_param_chunk = params[5].split(":") 119 | note_end = int(last_param_chunk[0]) 120 | collect_data(self.note_ends, note_end) 121 | 122 | def get_parsed_data(self): 123 | return [self.column_count, 124 | self.columns, 125 | self.note_starts, 126 | self.note_ends, 127 | self.note_types, 128 | self.od] 129 | -------------------------------------------------------------------------------- /algorithm.py: -------------------------------------------------------------------------------- 1 | import osu_file_parser as osu_parser 2 | from collections import defaultdict 3 | import numpy as np 4 | import heapq 5 | import pandas as pd 6 | import bisect 7 | import math 8 | 9 | # -----Start of Helper methods-------- 10 | 11 | def cumulative_sum(x, f): 12 | """ 13 | Given sorted positions x (length N) and function values f defined piecewise constant on [x[i], x[i+1]), 14 | return an array F of cumulative integrals such that F[0]=0 and for i>=1: 15 | F[i] = sum_{j=0}^{i-1} f[j]*(x[j+1]-x[j]) 16 | """ 17 | F = np.zeros(len(x)) 18 | for i in range(1, len(x)): 19 | F[i] = F[i-1] + f[i-1]*(x[i] - x[i-1]) 20 | return F 21 | 22 | def query_cumsum(q, x, F, f): 23 | """ 24 | Given cumulative data (x, F, f) as above, return the cumulative sum at an arbitrary point q. 25 | Here we assume that f is constant on each interval. 26 | """ 27 | if q <= x[0]: 28 | return 0.0 29 | if q >= x[-1]: 30 | return F[-1] 31 | # Find index i such that x[i] <= q < x[i+1] 32 | i = np.searchsorted(x, q) - 1 33 | return F[i] + f[i]*(q - x[i]) 34 | 35 | def smooth_on_corners(x, f, window, scale=1.0, mode='sum'): 36 | """ 37 | Given positions x (a sorted 1D array) and function values f (piecewise constant on intervals defined by x), 38 | return an array g defined at x by applying a symmetric sliding window: 39 | if mode=='sum': g(s) = scale * ∫[s-window, s+window] f(t) dt 40 | if mode=='avg': g(s) = (∫[s-window, s+window] f(t) dt) / (length of window actually used) 41 | This is computed exactly using the cumulative–sum technique. 42 | """ 43 | F = cumulative_sum(x, f) 44 | g = np.empty_like(f) 45 | for i, s in enumerate(x): 46 | a = max(s - window, x[0]) 47 | b = min(s + window, x[-1]) 48 | val = query_cumsum(b, x, F, f) - query_cumsum(a, x, F, f) 49 | if mode == 'avg': 50 | g[i] = val / (b - a) if (b - a) > 0 else 0.0 51 | else: 52 | g[i] = scale * val 53 | return g 54 | 55 | def interp_values(new_x, old_x, old_vals): 56 | """Return new_vals at positions new_x using linear interpolation from old_x, old_vals.""" 57 | return np.interp(new_x, old_x, old_vals) 58 | 59 | def step_interp(new_x, old_x, old_vals): 60 | """ 61 | For each position in new_x, return the value of old_vals corresponding to the greatest old_x 62 | that is less than or equal to new_x. This implements a step–function (zero–order hold) 63 | interpolation. 64 | """ 65 | indices = np.searchsorted(old_x, new_x, side='right') - 1 66 | indices = np.clip(indices, 0, len(old_vals)-1) 67 | return old_vals[indices] 68 | 69 | def rescale_high(sr): 70 | if sr <= 9: 71 | return sr 72 | return 9 + (sr - 9) * (1 / 1.2) 73 | 74 | def find_next_note_in_column(note, times, note_seq_by_column): 75 | k, h, t = note 76 | idx = bisect.bisect_left(times, h) 77 | return note_seq_by_column[k][idx+1] if idx+1 < len(note_seq_by_column[k]) else (0, 10**9, 10**9) 78 | 79 | # -----End of Helper methods-------- 80 | 81 | def preprocess_file(file_path, mod): 82 | p_obj = osu_parser.parser(file_path) 83 | p_obj.process() 84 | p = p_obj.get_parsed_data() 85 | 86 | # Build note_seq as a list of tuples (column, head_time, tail_time) 87 | note_seq = [] 88 | for i in range(len(p[1])): 89 | k = p[1][i] 90 | h = p[2][i] 91 | # Only set tail_time when p[4]==128; otherwise use -1. 92 | t = p[3][i] if p[4][i] == 128 else -1 93 | if mod == "DT": 94 | h = int(math.floor(h * 2/3)) 95 | t = int(math.floor(t * 2/3)) if t >= 0 else t 96 | elif mod == "HT": 97 | h = int(math.floor(h * 4/3)) 98 | t = int(math.floor(t * 4/3)) if t >= 0 else t 99 | note_seq.append((k, h, t)) 100 | 101 | # Hit leniency x 102 | x = 0.3 * ((64.5 - math.ceil(p[5] * 3)) / 500)**0.5 103 | x = min(x, 0.6*(x-0.09)+0.09) 104 | note_seq.sort(key=lambda tup: (tup[1], tup[0])) 105 | 106 | # Group notes by column 107 | note_dict = defaultdict(list) 108 | for tup in note_seq: 109 | note_dict[tup[0]].append(tup) 110 | note_seq_by_column = sorted(list(note_dict.values()), key=lambda lst: lst[0][0]) 111 | 112 | # Long notes (LN) are those with a tail (t>=0) 113 | LN_seq = [n for n in note_seq if n[2] >= 0] 114 | tail_seq = sorted(LN_seq, key=lambda tup: tup[2]) 115 | 116 | LN_dict = defaultdict(list) 117 | for tup in LN_seq: 118 | LN_dict[tup[0]].append(tup) 119 | LN_seq_by_column = sorted(list(LN_dict.values()), key=lambda lst: lst[0][0]) 120 | 121 | K = p[0] 122 | T = max( max(n[1] for n in note_seq), 123 | max(n[2] for n in note_seq)) + 1 124 | 125 | return x, K, T, note_seq, note_seq_by_column, LN_seq, tail_seq, LN_seq_by_column 126 | 127 | def get_corners(T, note_seq): 128 | corners_base = set() 129 | for (_, h, t) in note_seq: 130 | corners_base.add(h) 131 | if t >= 0: 132 | corners_base.add(t) 133 | for s in list(corners_base): 134 | corners_base.add(s + 501) 135 | corners_base.add(s - 499) 136 | corners_base.add(s + 1) # To resolve the Dirac-Delta additions exactly at notes 137 | corners_base.add(0) 138 | corners_base.add(T) 139 | corners_base = sorted(s for s in corners_base if 0 <= s <= T) 140 | 141 | # For Abar, unsmoothed values (KU and A) usually change at ±500 relative to note boundaries, hence ±1000 overall. 142 | corners_A = set() 143 | for (_, h, t) in note_seq: 144 | corners_A.add(h) 145 | if t >= 0: 146 | corners_A.add(t) 147 | for s in list(corners_A): 148 | corners_A.add(s + 1000) 149 | corners_A.add(s - 1000) 150 | corners_A.add(0) 151 | corners_A.add(T) 152 | corners_A = sorted(s for s in corners_A if 0 <= s <= T) 153 | 154 | # Finally, take the union of all corners for final interpolation 155 | all_corners = sorted(set(corners_base) | set(corners_A)) 156 | all_corners = np.array(all_corners, dtype=float) 157 | base_corners = np.array(corners_base, dtype=float) 158 | A_corners = np.array(corners_A, dtype=float) 159 | return all_corners, base_corners, A_corners 160 | 161 | def get_key_usage(K, T, note_seq, base_corners): 162 | key_usage = {k: np.zeros(len(base_corners), dtype=bool) for k in range(K)} 163 | for (k, h, t) in note_seq: 164 | startTime = max(h - 150, 0) 165 | endTime = (h + 150) if t < 0 else min(t + 150, T-1) 166 | left_idx = np.searchsorted(base_corners, startTime, side='left') 167 | right_idx = np.searchsorted(base_corners, endTime, side='left') 168 | idx = np.arange(left_idx, right_idx) 169 | key_usage[k][idx] = True 170 | return key_usage 171 | 172 | def get_key_usage_400(K, T, note_seq, base_corners): 173 | key_usage_400 = {k: np.zeros(len(base_corners), dtype=float) for k in range(K)} 174 | for (k, h, t) in note_seq: 175 | startTime = max(h, 0) 176 | endTime = h if t < 0 else min(t, T-1) 177 | left400_idx = np.searchsorted(base_corners, startTime - 400, side='left') 178 | left_idx = np.searchsorted(base_corners, startTime, side='left') 179 | right_idx = np.searchsorted(base_corners, endTime, side='left') 180 | right400_idx = np.searchsorted(base_corners, endTime + 400, side='left') 181 | idx = np.arange(left_idx, right_idx) 182 | key_usage_400[k][idx] += 3.75 + np.minimum(endTime - startTime, 1500)/150 183 | idx = np.arange(left400_idx, left_idx) 184 | key_usage_400[k][idx] += 3.75 - 3.75/400**2*(base_corners[idx] - np.array(startTime))**2 185 | idx = np.arange(right_idx, right400_idx) 186 | key_usage_400[k][idx] += 3.75 - 3.75/400**2*np.abs(base_corners[idx] - np.array(endTime))**2 187 | return key_usage_400 188 | 189 | def compute_anchor(K, key_usage_400, base_corners): 190 | anchor = np.zeros(len(base_corners)) 191 | for idx in range(len(base_corners)): 192 | # Collect the counts for each group at this base corner 193 | counts = np.array([key_usage_400[k][idx] for k in range(K)]) 194 | counts[::-1].sort() # e.g. 8, 5, 2, 2, 0 195 | nonzero_counts = counts[counts != 0] 196 | if nonzero_counts.size > 1: 197 | walk = np.sum(nonzero_counts[:-1]*(1-4*(0.5-nonzero_counts[1:]/nonzero_counts[:-1])**2)) 198 | max_walk = np.sum(nonzero_counts[:-1]) 199 | anchor[idx] = walk/max_walk 200 | else: 201 | anchor[idx] = 0 202 | anchor = 1 + np.minimum(anchor-0.18, 5*(anchor-0.22)**3) 203 | return anchor 204 | 205 | def LN_bodies_count_sparse_representation(LN_seq, T): 206 | diff = {} # dictionary: index -> change in LN_bodies (before transformation) 207 | 208 | for (k, h, t) in LN_seq: 209 | t0 = min(h + 60, t) 210 | t1 = min(h + 120, t) 211 | diff[t0] = diff.get(t0, 0) + 1.3 212 | diff[t1] = diff.get(t1, 0) + (-1.3 + 1) # net change at t1: -1.3 from first part, then +1 213 | diff[t] = diff.get(t, 0) - 1 214 | 215 | # The breakpoints are the times where changes occur. 216 | points = sorted(set([0, T] + list(diff.keys()))) 217 | 218 | # Build piecewise constant values (after transformation) and a cumulative sum. 219 | values = [] 220 | cumsum = [0] # cumulative sum at the breakpoints 221 | curr = 0.0 222 | 223 | for i in range(len(points) - 1): 224 | t = points[i] 225 | # If there is a change at t, update the running value. 226 | if t in diff: 227 | curr += diff[t] 228 | 229 | v = min(curr, 2.5 + 0.5 * curr) 230 | values.append(v) 231 | # Compute cumulative sum on the interval [points[i], points[i+1]) 232 | seg_length = points[i+1] - points[i] 233 | cumsum.append(cumsum[-1] + seg_length * v) 234 | return points, cumsum, values 235 | 236 | def LN_sum(a, b, LN_rep): 237 | points, cumsum, values = LN_rep 238 | # Locate the segments that contain a and b. 239 | i = bisect.bisect_right(points, a) - 1 240 | j = bisect.bisect_right(points, b) - 1 241 | 242 | total = 0.0 243 | if i == j: 244 | # Both a and b lie in the same segment. 245 | total = (b - a) * values[i] 246 | else: 247 | # First segment: from a to the end of the i-th segment. 248 | total += (points[i+1] - a) * values[i] 249 | # Full segments between i+1 and j-1. 250 | total += cumsum[j] - cumsum[i+1] 251 | # Last segment: from start of segment j to b. 252 | total += (b - points[j]) * values[j] 253 | return total 254 | 255 | def compute_Jbar(K, T, x, note_seq_by_column, base_corners): 256 | J_ks = {k: np.zeros(len(base_corners)) for k in range(K)} 257 | delta_ks = {k: np.full(len(base_corners), 1e9) for k in range(K)} 258 | jack_nerfer = lambda delta: 1 - 7e-5 * (0.15 + abs(delta - 0.08))**(-4) 259 | for k in range(K): 260 | notes = note_seq_by_column[k] 261 | for i in range(len(notes) - 1): 262 | start = notes[i][1] 263 | end = notes[i+1][1] 264 | # Find indices in base_corners that lie in [start, end) 265 | left_idx = np.searchsorted(base_corners, start, side='left') 266 | right_idx = np.searchsorted(base_corners, end, side='left') 267 | idx = np.arange(left_idx, right_idx) 268 | if len(idx) == 0: 269 | continue 270 | delta = 0.001 * (end - start) 271 | val = (delta**(-1)) * (delta + 0.11 * x**(1/4))**(-1) 272 | J_val = val * jack_nerfer(delta) 273 | J_ks[k][idx] = J_val 274 | delta_ks[k][idx] = delta 275 | 276 | # Now smooth each column's J_ks 277 | Jbar_ks = {} 278 | for k in range(K): 279 | Jbar_ks[k] = smooth_on_corners(base_corners, J_ks[k], window=500, scale=0.001, mode='sum') 280 | 281 | # Aggregate across columns using weighted average 282 | Jbar = np.empty(len(base_corners)) 283 | for i, s in enumerate(base_corners): 284 | vals = [Jbar_ks[k][i] for k in range(K)] 285 | weights = [1 / delta_ks[k][i] for k in range(K)] 286 | num = sum((max(v, 0) ** 5) * w for v, w in zip(vals, weights)) 287 | den = sum(weights) 288 | Jbar[i] = num / max(1e-9, den) 289 | Jbar[i] = Jbar[i]**(1/5) 290 | 291 | return delta_ks, Jbar 292 | 293 | def compute_Xbar(K, T, x, note_seq_by_column, active_columns, base_corners): 294 | cross_matrix = [ 295 | [-1], 296 | [0.075, 0.075], 297 | [0.125, 0.05, 0.125], 298 | [0.125, 0.125, 0.125, 0.125], 299 | [0.175, 0.25, 0.05, 0.25, 0.175], 300 | [0.175, 0.25, 0.175, 0.175, 0.25, 0.175], 301 | [0.225, 0.35, 0.25, 0.05, 0.25, 0.35, 0.225], 302 | [0.225, 0.35, 0.25, 0.225, 0.225, 0.25, 0.35, 0.225], 303 | [0.275, 0.45, 0.35, 0.25, 0.05, 0.25, 0.35, 0.45, 0.275], 304 | [0.275, 0.45, 0.35, 0.25, 0.275, 0.275, 0.25, 0.35, 0.45, 0.275], 305 | [0.325, 0.55, 0.45, 0.35, 0.25, 0.05, 0.25, 0.35, 0.45, 0.55, 0.325] 306 | ] 307 | X_ks = {k: np.zeros(len(base_corners)) for k in range(K+1)} 308 | 309 | fast_cross = {k: np.zeros(len(base_corners)) for k in range(K+1)} 310 | cross_coeff = cross_matrix[K] 311 | for k in range(K+1): 312 | if k == 0: 313 | notes_in_pair = note_seq_by_column[0] 314 | elif k == K: 315 | notes_in_pair = note_seq_by_column[K-1] 316 | else: 317 | notes_in_pair = list(heapq.merge(note_seq_by_column[k-1], note_seq_by_column[k], key=lambda tup: tup[1])) 318 | for i in range(1, len(notes_in_pair)): 319 | start = notes_in_pair[i-1][1] 320 | end = notes_in_pair[i][1] 321 | idx_start = np.searchsorted(base_corners, start, side='left') 322 | idx_end = np.searchsorted(base_corners, end, side='left') 323 | idx = np.arange(idx_start, idx_end) 324 | if len(idx) == 0: 325 | continue 326 | delta = 0.001 * (notes_in_pair[i][1] - notes_in_pair[i-1][1]) 327 | val = 0.16 * max(x, delta)**(-2) 328 | if ((k - 1) not in active_columns[idx_start] and (k - 1) not in active_columns[idx_end]) or (k not in active_columns[idx_start] and k not in active_columns[idx_end]): 329 | val*=(1-cross_coeff[k]) 330 | X_ks[k][idx] = val 331 | fast_cross[k][idx] = max(0, 0.4*max(delta, 0.06, 0.75*x)**(-2) - 80) 332 | X_base = np.zeros(len(base_corners)) 333 | for i in range(len(base_corners)): 334 | X_base[i] = sum(X_ks[k][i] * cross_coeff[k] for k in range(K+1)) + sum(np.sqrt(fast_cross[k][i]*cross_coeff[k]*fast_cross[k+1][i]*cross_coeff[k+1]) for k in range(0, K)) 335 | 336 | Xbar = smooth_on_corners(base_corners, X_base, window=500, scale=0.001, mode='sum') 337 | return Xbar 338 | 339 | def compute_Pbar(K, T, x, note_seq, LN_rep, anchor, base_corners): 340 | stream_booster = lambda delta: 1 + 1.7e-7 * ((7.5 / delta) - 160) * ((7.5 / delta) - 360)**2 if 160 < (7.5 / delta) < 360 else 1 341 | 342 | P_step = np.zeros(len(base_corners)) 343 | for i in range(len(note_seq) - 1): 344 | h_l = note_seq[i][1] 345 | h_r = note_seq[i+1][1] 346 | delta_time = h_r - h_l 347 | if delta_time < 1e-9: 348 | # Dirac delta case: when notes occur at the same time. 349 | # Add the spike exactly at the note head in the base grid. 350 | spike = 1000 * (0.02 * (4 / x - 24))**(1/4) 351 | left_idx = np.searchsorted(base_corners, h_l, side='left') 352 | right_idx = np.searchsorted(base_corners, h_l, side='right') 353 | idx = np.arange(left_idx, right_idx) 354 | if len(idx) > 0: 355 | P_step[idx] += spike 356 | # Continue so that we add a spike for each additional simultaneous note. 357 | continue 358 | # For the regular case where delta_time > 0, identify the base grid indices in [h_l, h_r) 359 | left_idx = np.searchsorted(base_corners, h_l, side='left') 360 | right_idx = np.searchsorted(base_corners, h_r, side='left') 361 | idx = np.arange(left_idx, right_idx) 362 | if len(idx) == 0: 363 | continue 364 | delta = 0.001 * delta_time 365 | v = 1 + 6 * 0.001 * LN_sum(h_l, h_r, LN_rep) 366 | b_val = stream_booster(delta) 367 | if delta < 2 * x / 3: 368 | inc = delta**(-1) * (0.08 * x**(-1) * (1 - 24 * x**(-1) * (delta - x/2)**2))**(1/4) * max(b_val, v) 369 | else: 370 | inc = delta**(-1) * (0.08 * x**(-1) * (1 - 24 * x**(-1) * (x/6)**2))**(1/4) * max(b_val, v) 371 | P_step[idx] += np.minimum(inc * anchor[idx], np.maximum(inc, inc*2-10)) 372 | 373 | Pbar = smooth_on_corners(base_corners, P_step, window=500, scale=0.001, mode='sum') 374 | return Pbar 375 | 376 | def compute_Abar(K, T, x, note_seq_by_column, active_columns, delta_ks, A_corners, base_corners): 377 | dks = {k: np.zeros(len(base_corners)) for k in range(K-1)} 378 | for i in range(len(base_corners)): 379 | cols = active_columns[i] 380 | for j in range(len(cols) - 1): 381 | k0 = cols[j] 382 | k1 = cols[j+1] 383 | # Use the delta_ks computed before on base_corners 384 | dks[k0][i] = abs(delta_ks[k0][i] - delta_ks[k1][i]) + 0.4*max(0, max(delta_ks[k0][i], delta_ks[k1][i]) - 0.11) 385 | 386 | A_step = np.ones(len(A_corners)) 387 | 388 | for i, s in enumerate(A_corners): 389 | idx = np.searchsorted(base_corners, s) 390 | if idx >= len(base_corners): 391 | idx = len(base_corners) - 1 392 | cols = active_columns[idx] 393 | for j in range(len(cols) - 1): 394 | k0 = cols[j] 395 | k1 = cols[j+1] 396 | d_val = dks[k0][idx] 397 | if d_val < 0.02: 398 | A_step[i] *= min(0.75 + 0.5 * max(delta_ks[k0][idx], delta_ks[k1][idx]), 1) 399 | elif d_val < 0.07: 400 | A_step[i] *= min(0.65 + 5*d_val + 0.5 * max(delta_ks[k0][idx], delta_ks[k1][idx]), 1) 401 | # Otherwise leave A_step[i] unchanged. 402 | 403 | Abar = smooth_on_corners(A_corners, A_step, window=250, mode='avg') 404 | return Abar 405 | 406 | def compute_Rbar(K, T, x, note_seq_by_column, tail_seq, base_corners): 407 | I_arr = np.zeros(len(base_corners)) 408 | R_step = np.zeros(len(base_corners)) 409 | 410 | times_by_column = {i: [note[1] for note in column] 411 | for i, column in enumerate(note_seq_by_column)} 412 | 413 | # Release Index 414 | I_list = [] 415 | for i in range(len(tail_seq)): 416 | k, h_i, t_i = tail_seq[i] 417 | _, h_j, _ = find_next_note_in_column((k, h_i, t_i), times_by_column[k], note_seq_by_column) 418 | I_h = 0.001 * abs(t_i - h_i - 80) / x 419 | I_t = 0.001 * abs(h_j - t_i - 80) / x 420 | I_list.append(2 / (2 + math.exp(-5*(I_h-0.75)) + math.exp(-5*(I_t-0.75)))) 421 | 422 | # For each interval between successive tail times, assign I and R. 423 | for i in range(len(tail_seq)-1): 424 | t_start = tail_seq[i][2] 425 | t_end = tail_seq[i+1][2] 426 | left_idx = np.searchsorted(base_corners, t_start, side='left') 427 | right_idx = np.searchsorted(base_corners, t_end, side='left') 428 | idx = np.arange(left_idx, right_idx) 429 | if len(idx) == 0: 430 | continue 431 | I_arr[idx] = 1 + I_list[i] 432 | delta_r = 0.001 * (tail_seq[i+1][2] - tail_seq[i][2]) 433 | R_step[idx] = 0.08 * (delta_r)**(-0.5) * x**(-1) * (1 + 0.8*(I_list[i] + I_list[i+1])) 434 | Rbar = smooth_on_corners(base_corners, R_step, window=500, scale=0.001, mode='sum') 435 | return Rbar 436 | 437 | def compute_C_and_Ks(K, T, note_seq, key_usage, base_corners): 438 | # C(s): count of notes within 500 ms 439 | note_hit_times = sorted(n[1] for n in note_seq) 440 | C_step = np.zeros(len(base_corners)) 441 | for i, s in enumerate(base_corners): 442 | low = s - 500 443 | high = s + 500 444 | # Use binary search on note_hit_times: 445 | cnt = bisect.bisect_left(note_hit_times, high) - bisect.bisect_left(note_hit_times, low) 446 | C_step[i] = cnt 447 | 448 | # Ks: local key usage count (minimum 1) 449 | Ks_step = np.array([max(sum(1 for k in range(K) if key_usage[k][i]), 1) for i in range(len(base_corners))]) 450 | return C_step, Ks_step 451 | 452 | def calculate(file_path, mod): 453 | # === Basic Setup and Parsing === 454 | x, K, T, note_seq, note_seq_by_column, LN_seq, tail_seq, LN_seq_by_column = preprocess_file(file_path, mod) 455 | 456 | all_corners, base_corners, A_corners = get_corners(T, note_seq) 457 | 458 | # For each column, store a boolean of its usage (whether non-empty within 150 ms) over time. Example: key_usage[k][idx]. 459 | key_usage = get_key_usage(K, T, note_seq, base_corners) 460 | # At each time in base_corners, build a list of columns that are active: 461 | active_columns = [ [k for k in range(K) if key_usage[k][i]] for i in range(len(base_corners)) ] 462 | 463 | key_usage_400 = get_key_usage_400(K, T, note_seq, base_corners) 464 | 465 | anchor = compute_anchor(K, key_usage_400, base_corners) 466 | 467 | delta_ks, Jbar = compute_Jbar(K, T, x, note_seq_by_column, base_corners) 468 | Jbar = interp_values(all_corners, base_corners, Jbar) 469 | 470 | Xbar = compute_Xbar(K, T, x, note_seq_by_column, active_columns, base_corners) 471 | Xbar = interp_values(all_corners, base_corners, Xbar) 472 | 473 | # Build the sparse representation of cumulative LN bodies. 474 | LN_rep = LN_bodies_count_sparse_representation(LN_seq, T) 475 | 476 | Pbar = compute_Pbar(K, T, x, note_seq, LN_rep, anchor, base_corners) 477 | Pbar = interp_values(all_corners, base_corners, Pbar) 478 | 479 | Abar = compute_Abar(K, T, x, note_seq_by_column, active_columns, delta_ks, A_corners, base_corners) 480 | Abar = interp_values(all_corners, A_corners, Abar) 481 | 482 | Rbar = compute_Rbar(K, T, x, note_seq_by_column, tail_seq, base_corners) 483 | Rbar = interp_values(all_corners, base_corners, Rbar) 484 | 485 | C_step, Ks_step = compute_C_and_Ks(K, T, note_seq, key_usage, base_corners) 486 | C_arr = step_interp(all_corners, base_corners, C_step) 487 | Ks_arr = step_interp(all_corners, base_corners, Ks_step) 488 | 489 | # === Final Computations === 490 | # Compute Difficulty D on all_corners: 491 | S_all = ((0.4 * (Abar**(3/ Ks_arr) * np.minimum(Jbar, 8+0.85*Jbar))**1.5) + 492 | ((1-0.4) * (Abar**(2/3) * (0.8*Pbar + Rbar*35/(C_arr+8)))**1.5))**(2/3) 493 | T_all = (Abar**(3/ Ks_arr) * Xbar) / (Xbar + S_all + 1) 494 | D_all = 2.7 * (S_all**0.5) * (T_all**1.5) + S_all * 0.27 495 | 496 | df_corners = pd.DataFrame({ 497 | 'time': all_corners, 498 | 'Jbar': Jbar, 499 | 'Xbar': Xbar, 500 | 'Pbar': Pbar, 501 | 'Abar': Abar, 502 | 'Rbar': Rbar, 503 | 'C': C_arr, 504 | 'Ks': Ks_arr, 505 | 'D': D_all 506 | }) 507 | 508 | # Compute the gaps between consecutive times in a vectorised way. 509 | # For interior points, the effective gap is the average of the left and right gap. 510 | gaps = np.empty_like(all_corners, dtype=float) 511 | gaps[0] = (all_corners[1] - all_corners[0]) / 2.0 512 | gaps[-1] = (all_corners[-1] - all_corners[-2]) / 2.0 513 | gaps[1:-1] = (all_corners[2:] - all_corners[:-2]) / 2.0 514 | 515 | # The effective weight for each corner is the product of its density and its gap. 516 | effective_weights = C_arr * gaps 517 | df_sorted = df_corners.sort_values('D') 518 | D_sorted = df_sorted['D'].values 519 | sorted_indices = df_sorted.index.to_numpy() 520 | w_sorted = effective_weights[sorted_indices] 521 | 522 | # Compute the cumulative sum of the effective weights. 523 | cum_weights = np.cumsum(w_sorted) 524 | total_weight = cum_weights[-1] 525 | norm_cum_weights = cum_weights / total_weight 526 | 527 | target_percentiles = np.array([0.945, 0.935, 0.925, 0.915, 0.845, 0.835, 0.825, 0.815]) 528 | 529 | indices = np.searchsorted(norm_cum_weights, target_percentiles, side='left') 530 | 531 | percentile_93 = np.mean(D_sorted[indices[:4]]) 532 | percentile_83 = np.mean(D_sorted[indices[4:8]]) 533 | 534 | weighted_mean = (np.sum(D_sorted**5 * w_sorted) / np.sum(w_sorted))**(1 / 5) 535 | 536 | # Final SR calculation 537 | SR = (0.88 * percentile_93) * 0.25 + (0.94 * percentile_83) * 0.2 + weighted_mean * 0.55 538 | SR = SR**(1.0) / (8**1.0) * 8 539 | 540 | total_notes = len(note_seq) + 0.5*sum(np.minimum((t-h), 1000)/200 for (k, h, t) in LN_seq) 541 | SR *= total_notes / (total_notes + 60) 542 | 543 | SR = rescale_high(SR) 544 | SR *= 0.975 545 | 546 | return SR 547 | --------------------------------------------------------------------------------