├── test2
├── judgetool.pyc
└── test2.py
├── assets
└── test5_output.png
├── test0.py
├── README.md
├── test1.py
└── test5.py
/test2/judgetool.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/paolordls/cs-145-scripts/HEAD/test2/judgetool.pyc
--------------------------------------------------------------------------------
/assets/test5_output.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/paolordls/cs-145-scripts/HEAD/assets/test5_output.png
--------------------------------------------------------------------------------
/test0.py:
--------------------------------------------------------------------------------
1 | # Must be placed in same folder as task0.py and cs145lib
2 |
3 | import argparse
4 | import subprocess
5 | from math import log2
6 | import os
7 | import re
8 | import time
9 |
10 | DEFAULT_NUM_TESTS = 100
11 | BITS_REGEX = r"Exactly (\d+) bits were written by the sender\."
12 | SENT_REGEX = r"\[Sender stderr\] The data to be sent is \'(.*)\'"
13 | RECEIVED_REGEX = r"The string returned by the receiver is \'(.*)\'"
14 |
15 | if __name__ == '__main__':
16 | parser = argparse.ArgumentParser(prog="test0.py", description="Tests your Programming Task 0 (PT 0) solution.")
17 |
18 | parser.add_argument("-s", "--seed", default=int(0xC0DEBABE), help="Random seed for test program (integer; default: decimal equivalent of 0xC0DEBABE).", type=int)
19 | parser.add_argument("-n", "--num-tests", default=100, help="Number of tests to run (integer; default: 100).", type=int)
20 |
21 | args = parser.parse_args()
22 |
23 | seed_arg, num_tests_arg = args.seed, args.num_tests
24 |
25 | if not os.path.exists("task0.py"):
26 | print(f'ERROR\n\
27 | \tAre you in the right folder?')
28 | exit()
29 |
30 | outputs = []
31 | errors = []
32 | times = []
33 | # Loop through seeds from 0 to num_tests_arg
34 | for i in range(num_tests_arg):
35 | # Construct the command with the current seed
36 | command = f"python3 -m cs145lib.task0.make_sentence --seed {i + seed_arg} | \
37 | python3 -m cs145lib.task0.test --seed {i + seed_arg} \
38 | python3 task0.py"
39 |
40 | # Time the process
41 | start_time = time.time()
42 |
43 | # Run the command and capture the output
44 | process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
45 |
46 | # Read the output and errors
47 | output, error = process.communicate()
48 |
49 | # Calculate elapsed time
50 | times.append(time.time() - start_time)
51 |
52 | # Store output and error
53 | outputs.append(output)
54 | errors.append(error)
55 |
56 | # Parse each output
57 | total_bits = 0
58 | test_errors = 0
59 | time_errors = 0
60 | for i, (output, error, t) in enumerate(zip(outputs, errors, times)):
61 | bits = int(re.search(BITS_REGEX, output).group(1))
62 |
63 | # Parse error
64 | received_str = re.search(RECEIVED_REGEX, output).group(1)
65 | sent_str = re.search(SENT_REGEX, error).group(1)
66 | if received_str != sent_str or t >= 10:
67 | print(f'ERROR FOR SEED {i + seed_arg}:\n\
68 | \tBits: {bits}\n\
69 | \tTime: {t} s\n\
70 | \tSent: {sent_str}\n\
71 | \tReceived: {received_str}')
72 | test_errors += 1
73 |
74 | total_bits += bits
75 |
76 | if t >= 10:
77 | time_errors += 1
78 |
79 | # Display results
80 | print(f'TESTS\n\
81 | \tSeed: {seed_arg}\n\
82 | \tTests performed: {len(outputs)}\n\
83 | \tTests timed out: {time_errors}\n\
84 | \tFailed tests: {test_errors}')
85 |
86 | # End if all tests failed - otherwise we encounter div-by-0 errors
87 | if test_errors == len(outputs):
88 | exit()
89 |
90 | # Compute score
91 | avg = total_bits / len(outputs)
92 | x = log2(total_bits if num_tests_arg == DEFAULT_NUM_TESTS else avg * DEFAULT_NUM_TESTS)
93 | if time_errors >= 3:
94 | score = 0
95 | elif x > 18.8:
96 | score = 0
97 | elif x < 12.8:
98 | score = 100
99 | else:
100 | score = 154 - 5 * x
101 |
102 | # 1% of test cases failed = -1.5
103 | score -= (test_errors / (num_tests_arg * 0.01)) * 1.5
104 |
105 | if score < 0:
106 | score = 0
107 |
108 | print(f'PERFORMANCE\n\
109 | \tTotal bits: {total_bits}\n\
110 | \tAverage bits per message: {avg}\n\
111 | \tX: {x}\n\
112 | \tScore: {score}')
113 |
114 | if num_tests_arg != 100:
115 | print(f'NOTE\n\
116 | \tNumber of tests is not 100.\n\
117 | \tScore here might not accurately reflect correctness of solution:\n\
118 | \t- Average bits per message was used to determine X.\n\
119 | \t- Penalties were scaled according to number of tests ran.')
120 |
--------------------------------------------------------------------------------
/test2/test2.py:
--------------------------------------------------------------------------------
1 | from judgetool import Pass, Fail, judge
2 |
3 | import argparse
4 | import math
5 | import os
6 | import subprocess
7 | import time
8 |
9 |
10 | if __name__ == "__main__":
11 | parser = argparse.ArgumentParser(
12 | prog="test2.py", description="Tests your Programming Task 2 (PT 2) solution."
13 | )
14 |
15 | parser.add_argument(
16 | "-s",
17 | "--seed",
18 | default=int(0xC0DEBABE),
19 | help="Random seed for test program (integer; default: decimal equivalent of 0xC0DEBABE).",
20 | type=int,
21 | )
22 | parser.add_argument(
23 | "-n",
24 | "--num-tests",
25 | default=50,
26 | help="Number of tests to run (integer; default: 50).",
27 | type=int,
28 | )
29 |
30 | args = parser.parse_args()
31 |
32 | seed_arg, num_tests_arg = args.seed, args.num_tests
33 |
34 | if not os.path.exists("task2.py"):
35 | print(
36 | f"ERROR\n\
37 | \tAre you in the right folder?"
38 | )
39 | exit()
40 |
41 | print()
42 | print(f" Running {num_tests_arg} test{'' if num_tests_arg == 1 else 's'}...")
43 | if num_tests_arg > 5:
44 | print(" This may take a while...")
45 |
46 | outputs = []
47 | errors = []
48 | times = []
49 | results = []
50 | # Loop through seeds from 0 to num_tests_arg
51 | for i in range(num_tests_arg):
52 | # Construct the command with the current seed
53 | command = f"python3 -m cs145lib.task2.gen --seed {i + seed_arg} | \
54 | python3 -m cs145lib.task2.test --quiet \
55 | python3 task2.py"
56 |
57 | # Time the process
58 | start_time = time.time()
59 |
60 | # Run the command and capture the output
61 | process = subprocess.Popen(
62 | command,
63 | shell=True,
64 | stdout=subprocess.PIPE,
65 | stderr=subprocess.PIPE,
66 | text=True,
67 | )
68 |
69 | # Read the output and errors
70 | output, error = process.communicate()
71 |
72 | # Calculate elapsed time
73 | times.append(time.time() - start_time)
74 |
75 | # Store output and error
76 | outputs.append(output)
77 | errors.append(error)
78 |
79 | results.append(judge())
80 |
81 | frames_used = 0
82 | bytes_used = 0
83 | failures = 0
84 | timeouts = 0
85 |
86 | for i, (result, t) in enumerate(zip(results, times)):
87 | if t > 20:
88 | timeouts += 1
89 | else:
90 | match result:
91 | case Pass(f, b):
92 | frames_used += f
93 | bytes_used += b
94 | case Fail(logs):
95 | print(f"\n Error logs for test #{i+1}:")
96 | for log in logs:
97 | print(f" {log}")
98 | failures += 1
99 |
100 | ave_time = round(sum(times) / len(times), 3)
101 | ave_frames_used = frames_used / num_tests_arg
102 | ave_bytes_used = bytes_used / num_tests_arg
103 |
104 | bad_news = "All tests failed..."
105 |
106 | if 5 * ave_frames_used + ave_bytes_used < 10**-6: # zero
107 | x = bad_news
108 | else:
109 | x = math.log2(5 * ave_frames_used + ave_bytes_used)
110 |
111 | print(
112 | f"""
113 | Tests ran\t\t\t{num_tests_arg}
114 | Starting seed\t\t{seed_arg}
115 |
116 | Average time\t\t{ave_time} s
117 | Average frames used\t\t{ave_frames_used}
118 | Average bytes used\t\t{ave_bytes_used}
119 | X\t\t\t\t{x}
120 | Failures\t\t\t{failures}
121 | Timeouts\t\t\t{timeouts}"""
122 | )
123 |
124 | if x != bad_news:
125 | if x > 16.6:
126 | score = 0
127 | elif x >= 15.5:
128 | score = 60 - (10 / 1.1) * (x - 15.5)
129 | elif x >= 14.6:
130 | score = 70 - (10 / 0.9) * (x - 14.6)
131 | elif x >= 14:
132 | score = 80 - (10 / 0.6) * (x - 14)
133 | elif x >= 13.55:
134 | score = 90 - (10 / 0.45) * (x - 13.55)
135 | elif x >= 13.41:
136 | score = 98 - (8 / 0.14) * (x - 13.41)
137 | else:
138 | score = 100
139 |
140 | score -= 25 * failures
141 | score = max(0, score)
142 |
143 | if x == bad_news or timeouts >= 3:
144 | score = 0
145 |
146 | print(
147 | f"""
148 | Score\t\t\t{score}{' (yay!)' if score == 100 else ''}
149 | """
150 | )
151 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # cs-145-scripts
2 |
3 | Scripts for testing CS 145 2324B Programming Tasks.
4 |
5 | ## PT 5 usage instructions
6 |
7 | You need at least **Python 3.12** to run this.
8 |
9 | 1) Download `test5.py` and place it in your `task5attachments` folder.
10 | 2) Make sure you're currently inside the `task5attachments` folder. To test your solution, run `python3 test5.py`. The output will look like this:
11 |
12 |
13 |
14 | The PT 5 tester carries out two things:
15 |
16 | - It first creates 30 test files using `cs145lib.task5.gen`. A test file takes around 2-3 seconds to make; this step is done separately so that future runs of the tester (**using the same seed**) do not need to repeat this step. Running the tester for the first time may have an overhead of 60-90 seconds!
17 | - It then runs your solution on each of those test files until it reaches the time limit or it encounters a failed test case.
18 |
19 | You may customize the following things:
20 |
21 | - The starting seed, via the `-s` flag; the tester will generate files `tests/{seed}.txt` until `tests/{seed+29}.txt` if they don't already exist.
22 | - The time limit, via the `-tl` flag; the `elapsed` field in the resulting `output.json` file is consulted for keeping track of the total elapsed time.
23 | - The file to test, via the `-f` flag.
24 |
25 | If one of your tests fails, you may get an error about port unavailability for subsequent uses of `cs145lib.task5.test`. One possible fix for this is as follows:
26 |
27 | - Find the offending process ID via `lsof -n -i | grep ":{port_number}"`.
28 | - Kill the process via `kill -9 {process_ID}`.
29 |
30 |
31 | PT 3 usage instructions (archived)
32 |
33 | ## PT 3 usage instructions
34 |
35 | First things first: you need at least **Python 3.12** to run this.
36 |
37 | 1) Download the files inside the `test3` folder and place them in your `task3attachments` folder.
38 | 2) Install the Python `tabulate` package by running `pip install -r requirements.txt` or `pip install tabulate`.
39 | 3) Make sure you're currently inside the `task3attachments` folder. To test your solution, run `python3 test3.py`. The output will look like this:
40 |
41 |
42 |
43 | In particular:
44 |
45 | - You'll see a **table** consisting of the results for each kind of topology. This will allow you to easily see which topolog(y/ies) you need to improve/fix your solution on.
46 | - You'll see a list of **overall statistics**, just like with the testers for previous PTs.
47 |
48 | You can customize the starting seed via the `-s` flag, as always. Unlike previous PTs, however, there are **two** ways of configuring how many tests you want to run this time:
49 |
50 | - If you want to run the same number of tests $`t`$ for each topology, pass that number to the `-n` flag. This will run a total of $`5t`$ tests.
51 | - If you want to specify a specific number of tests for each topology, pass **five nonnegative integers** to the `-d` flag. If the numbers you passed are $`t_1, t_2, \dots, t_5`$, this will run a total of $`t_1 + t_2 + t_3 + t_4 + t_5`$ tests.
52 |
53 | By default, the tester will run five tests for each topology, like how our solutions will actually be tested.
54 |
55 | You may only specify **at most one way** to customize the number of test cases; using both the `-n` and `-d` flags won't work.
56 |
57 | As mentioned in the specs, the statistics (frame/byte counts) for a particular topology only count iff all tests under that topology pass, and your score will be multiplied by the fraction of topologies you handle correctly (shown in the output's `Multiplier` field).
58 |
59 | Good luck! We can make it through this sem! `^_^`
60 |
61 |
62 |
63 |
64 | PT 0-2 usage instructions (archived)
65 |
66 | **IMPORTANT: test2.py requires Python 3.12.2 to run.**
67 |
68 | ### What is it
69 |
70 |
71 | Runs tests on your `taskN.py`. Measures time-outs and output errors, and displays problematic test cases. Calculates your solution's performance. Total bits, average bits per message, x-value (formula given in each PT doc), and your score over 100.
72 |
73 | The **number of tests** ran and **random seed** for each test program may be customized. Defaults are 100 tests and a random seed of `0xC0DEBABE`.
74 |
75 | ### How to use
76 | 1. For PT number `n` (`n = 0, 1`), download `test[n].py` and place in your `task[n]attachments` folder.
77 | - For PT2, download `test2.py` and `judgetool.pyc` and place in your `task2attachments` folder.
78 | 3. **Important:** In your terminal, navigate to `task[n]attachments/` and run `python3 test[n].py -h` to see the available options for running the script.
79 | - To check if your PT2 solution is valid, run a single test via `python test2.py -n 1`. Running the full 50 tests can take a while, and is recommended only for checking your score.
80 |
81 |
82 |
83 | ## Extra
84 | Contributions are welcome! Raise an issue or a PR and I'll get to it ASAP.
85 |
86 | ## Contributors
87 | jproads, daryll-ko, Ulyzses
88 |
89 | *Released with permission from Sir Kevin and Sir Jem.*
90 |
--------------------------------------------------------------------------------
/test1.py:
--------------------------------------------------------------------------------
1 | # Must be placed in same folder as task1.py and cs145lib
2 |
3 | import argparse
4 | import subprocess
5 | from math import log2
6 | import os
7 | import re
8 | import time
9 |
10 | DEFAULT_NUM_TESTS = 100
11 | BITS_REGEX = r"Exactly (\d+) bits were written by the sender\."
12 | SENT_REGEX = r"\[Sender stderr\] The data to be sent is \'(.*)\'"
13 | RECEIVED_REGEX = r"The string returned by the receiver is \'(.*)\'"
14 |
15 | if __name__ == '__main__':
16 | parser = argparse.ArgumentParser(prog="test0.py", description="Tests your Programming Task 1 (PT 1) solution.")
17 |
18 | parser.add_argument("-s", "--seed", default=int(0xC0DEBABE), help="Random seed for test program (integer; default: decimal equivalent of 0xC0DEBABE).", type=int)
19 | parser.add_argument("-n", "--num-tests", default=100, help="Number of tests to run (integer; default: 100).", type=int)
20 |
21 | args = parser.parse_args()
22 |
23 | seed_arg, num_tests_arg = args.seed, args.num_tests
24 |
25 | if not os.path.exists("task1.py"):
26 | print(f'ERROR\n\
27 | \tAre you in the right folder?')
28 | exit()
29 |
30 | outputs = []
31 | errors = []
32 | times = []
33 | # Loop through seeds from 0 to num_tests_arg
34 | for i in range(num_tests_arg):
35 | # Construct the command with the current seed
36 | command = f"python3 -m cs145lib.task1.make_sentence --seed {i + seed_arg} | \
37 | python3 -m cs145lib.task1.test --seed {i + seed_arg} \
38 | python3 task1.py"
39 |
40 | # Time the process
41 | start_time = time.time()
42 |
43 | # Run the command and capture the output
44 | process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
45 |
46 | # Read the output and errors
47 | output, error = process.communicate()
48 |
49 | # Calculate elapsed time
50 | times.append(time.time() - start_time)
51 |
52 | # Store output and error
53 | outputs.append(output)
54 | errors.append(error)
55 |
56 | # Parse each output
57 | total_bits = 0
58 | test_errors = 0
59 | time_errors = 0
60 | for i, (output, error, t) in enumerate(zip(outputs, errors, times)):
61 | bits = int(re.search(BITS_REGEX, output).group(1))
62 | total_bits += bits
63 |
64 | # Parse error
65 | try:
66 | received_str = re.search(RECEIVED_REGEX, output).group(1)
67 | except AttributeError:
68 | print(f'ERROR FOR SEED {i + seed_arg}:\n\
69 | \tBits: {bits}\n\
70 | \tTime: {t} s\n\
71 | \tNo string returned by the receiver.\n\
72 | \tReceived output:\n{output}\n\
73 | \tReceived error:\n{error}')
74 | test_errors += 1
75 | continue
76 |
77 | try:
78 | sent_str = re.search(SENT_REGEX, error).group(1)
79 | except AttributeError:
80 | print(f'ERROR FOR SEED {i + seed_arg}:\n\
81 | \tBits: {bits}\n\
82 | \tTime: {t} s\n\
83 | \tNo string sent by the sender.\n\
84 | \tReceived output:\n{output}\n\
85 | \tReceived error:\n{error}')
86 | test_errors += 1
87 | continue
88 |
89 | if received_str != sent_str or t >= 10:
90 | print(f'ERROR FOR SEED {i + seed_arg}:\n\
91 | \tBits: {bits}\n\
92 | \tTime: {t} s\n\
93 | \tSent: {sent_str}\n\
94 | \tReceived: {received_str}')
95 | test_errors += 1
96 |
97 | if t >= 10:
98 | time_errors += 1
99 |
100 | # Display results
101 | print(f'TESTS\n\
102 | \tSeed: {seed_arg}\n\
103 | \tTests performed: {len(outputs)}\n\
104 | \tTests timed out: {time_errors}\n\
105 | \tFailed tests: {test_errors}')
106 |
107 | # End if all tests failed - otherwise we encounter div-by-0 errors
108 | if test_errors == len(outputs):
109 | exit()
110 |
111 | # Compute score
112 | avg = total_bits / len(outputs)
113 | x = log2(total_bits if num_tests_arg == DEFAULT_NUM_TESTS else avg * DEFAULT_NUM_TESTS)
114 | if time_errors >= 3:
115 | score = 0
116 | elif x > 21.99:
117 | score = 0
118 | elif x < 16.99:
119 | score = 100
120 | else:
121 | score = 214 - 7 * x
122 |
123 | # 1% of test cases failed = -1.5
124 | score -= (test_errors / (num_tests_arg * 0.01)) * 1.5
125 |
126 | if score < 0:
127 | score = 0
128 |
129 | print(f'PERFORMANCE\n\
130 | \tTotal bits: {total_bits}\n\
131 | \tAverage bits per message: {avg}\n\
132 | \tX: {x}\n\
133 | \tScore: {score}')
134 |
135 | if num_tests_arg != 100:
136 | print(f'NOTE\n\
137 | \tNumber of tests is not 100.\n\
138 | \tScore here might not accurately reflect correctness of solution:\n\
139 | \t- Average bits per message was used to determine X.\n\
140 | \t- Penalties were scaled according to number of tests ran.')
141 |
--------------------------------------------------------------------------------
/test5.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import json
3 | import math
4 | import os
5 | import signal
6 | import subprocess
7 |
8 | from contextlib import contextmanager
9 | from dataclasses import dataclass
10 |
11 |
12 | @dataclass
13 | class Pass:
14 | messages_used: int
15 | bytes_used: int
16 | time_taken: float
17 |
18 |
19 | @dataclass
20 | class Fail:
21 | logs: list[str]
22 |
23 |
24 | def clamp(x: float, l: float, h: float) -> float:
25 | if x < l:
26 | return l
27 | elif x > h:
28 | return h
29 | else:
30 | return x
31 |
32 |
33 | def approx(a: float, b: float, tol: float = 10**-6) -> bool:
34 | return abs(a - b) < tol
35 |
36 |
37 | @contextmanager
38 | def time_limit(seconds):
39 | def signal_handler(signum, frame):
40 | raise Exception("timeout")
41 |
42 | signal.signal(signal.SIGALRM, signal_handler)
43 | signal.alarm(seconds)
44 | try:
45 | yield
46 | finally:
47 | signal.alarm(0)
48 |
49 |
50 | if __name__ == "__main__":
51 | parser = argparse.ArgumentParser(
52 | prog="test5.py", description="Tests your Programming Task 5 (PT 5) solution."
53 | )
54 | parser.add_argument(
55 | "-f",
56 | "--file",
57 | default="task5.py",
58 | help="name of program to test (string; default: task5.py)",
59 | type=str,
60 | )
61 | parser.add_argument(
62 | "-s",
63 | "--seed",
64 | default=int(0xC0DEBABE),
65 | help="random seed for test program (integer; default: decimal equivalent of 0xC0DEBABE)",
66 | type=int,
67 | )
68 | parser.add_argument(
69 | "-tl",
70 | "--time-limit",
71 | default=60,
72 | help="time limit (in seconds) to give your program (integer; default: 60)",
73 | type=int,
74 | )
75 |
76 | args = parser.parse_args()
77 |
78 | if not os.path.isdir("tests"):
79 | os.mkdir("tests")
80 |
81 | reused_tests: int = 0
82 |
83 | print()
84 | print("\tGenerating tests...")
85 | for i in range(30):
86 | filename = f"tests/{args.seed + i}.txt"
87 | if not os.path.isfile(filename):
88 | command = f"python3 -m cs145lib.task5.gen -s {args.seed + i} > {filename}"
89 | process = subprocess.Popen(
90 | command,
91 | shell=True,
92 | stdout=subprocess.PIPE,
93 | stderr=subprocess.PIPE,
94 | text=True,
95 | )
96 | output, error = process.communicate()
97 | else:
98 | reused_tests += 1
99 | print(f"\tDone generating tests! (reused {reused_tests} tests)")
100 | print()
101 |
102 | results: list[Pass | Fail] = []
103 | total_elapsed: float = 0
104 |
105 | for i in range(30):
106 | filename = f"tests/{args.seed + i}.txt"
107 | command = f"cat {filename} | \
108 | python3 -m cs145lib.task5.test python3 {args.file}"
109 |
110 | process = subprocess.Popen(
111 | command,
112 | shell=True,
113 | stdout=subprocess.PIPE,
114 | stderr=subprocess.PIPE,
115 | text=True,
116 | )
117 |
118 | try:
119 | with time_limit(45):
120 | output, error = process.communicate()
121 | except:
122 | results.append(Fail(["-- time limit exceeded"]))
123 | break
124 |
125 | try:
126 | with open("output.json", "r") as f:
127 | data = json.loads(f.read())
128 |
129 | m = sum(data["total_send_cts_from"].values())
130 | b = sum(data["total_send_lns_from"].values())
131 | t = data["elapsed"]
132 | passed = data["correct"]
133 |
134 | if total_elapsed + t > args.time_limit:
135 | break
136 |
137 | if passed:
138 | results.append(Pass(m, b, t))
139 | total_elapsed += t
140 | else:
141 | results.append(Fail(["-- wrong answer given"]))
142 | break
143 | except Exception as e:
144 | results.append(Fail([repr(e)]))
145 |
146 | if len(results) > 0:
147 | print("\tTest #\t\tSeed\t\tResult\t\tMessages\tBytes\t\tTime")
148 | print()
149 |
150 | failed: bool = False
151 | mt: int = 0
152 | bt: int = 0
153 |
154 | for i, result in enumerate(results, start=1):
155 | match result:
156 | case Pass(m, b, t):
157 | print(
158 | f"\t{i}\t\t{str(args.seed+i-1).ljust(10)}\tPass\t\t{m}\t\t{b}\t\t{t}"
159 | )
160 | mt += m
161 | bt += b
162 | case Fail(l):
163 | print(f"\t{i}\t\t{str(args.seed+i-1).ljust(10)}\tFail\t\t-\t\t-\t\t-")
164 | print()
165 | for log in l:
166 | print(f"\t\t{log}")
167 | print()
168 | failed = True
169 | print()
170 |
171 | t = len(results)
172 | tp = clamp((t - 2) / 28, 0, 1)
173 |
174 | if mt > 0 and bt > 0:
175 | x = math.log2(5 * (mt / t) + (bt / t))
176 | xp = clamp((12.5 - x) / 3.4, 0, 1)
177 | else:
178 | x = -1
179 | xp = -1
180 |
181 | print(f"\tt\t{t}")
182 | print(f"\tx\t{x if x > -1 else '-'}")
183 | print()
184 | print(f"\tt'\t{tp}")
185 | print(f"\tx'\t{xp if xp > -1 else '-'}")
186 | print()
187 |
188 | if failed:
189 | score = 0
190 | else:
191 | if approx(tp, 0) or approx(xp, 0):
192 | score = 0
193 | elif approx(tp, 1) and approx(xp, 1):
194 | score = 100
195 | else:
196 | score = 49 + 50 * math.sqrt(tp * xp)
197 |
198 | print(f"\tScore\t{score}{' (yay!)' if score == 100 else ''}")
199 | print()
200 |
--------------------------------------------------------------------------------