├── .gitignore ├── example.config ├── readme.md └── supersubmiterator.py /.gitignore: -------------------------------------------------------------------------------- 1 | *.tsv 2 | *.results 3 | *.input 4 | *.properties 5 | *.success 6 | *.question 7 | *.RData 8 | *.Rhistory -------------------------------------------------------------------------------- /example.config: -------------------------------------------------------------------------------- 1 | { 2 | "liveHIT":"no", 3 | "title":"a title to show to turkers", 4 | "description":"a description to show to turkers", 5 | "experimentURL":"https://www.stanford.edu/~you/path/to/experiment.html", 6 | "keywords":"language research stanford fun cognitive science university explanations", 7 | "USonly?":"yes", 8 | "minPercentPreviousHITsApproved":"95", 9 | "minNumPreviousHITsApproved": "1000", 10 | "frameheight":"650", 11 | "reward":"0.00", 12 | "numberofassignments":"10", 13 | "assignmentsperhit": "9", 14 | "assignmentduration":"1800", 15 | "hitlifetime":"2592000", 16 | "autoapprovaldelay":"60000", 17 | "doesHaveQualification": "none", 18 | "doesNotHaveQualification": "none" 19 | } 20 | -------------------------------------------------------------------------------- /readme.md: -------------------------------------------------------------------------------- 1 | # Supersubmiterator 2 | 3 | A tool for managing external HITS on Amazon Mechanical Turk. This tool makes it easy to post external HITS and to download the data after participants completed the experiment. Supersubmiterator also supports automatic batching, i.e., splitting one HIT with a large number of assignments into several hits with fewer assignments per HIT. 4 | 5 | The behavior is similar to the [original Submiterator tool](https://github.com/erindb/Submiterator) by Dan Lassiter and Erin Bennett. However, this is a pure Python implementation which does not require the Mechanical Turk CLI tools (which are no longer supported). 6 | 7 | ## Setup 8 | 9 | _(**Note**: supersubmiterator is written in Python 3. If your default Python command is python2, make sure to run `pip3` instead of `pip` and change the first line of `supersubmiterator.py` to `#!/usr/bin/env python3`.)_ 10 | 11 | 12 | 1. If you do not already have an MTurk requester account, follow [these instructions](https://docs.aws.amazon.com/AWSMechTurk/latest/AWSMechanicalTurkGettingStartedGuide/SetUp.html) to sign up as a requester and create an access key/secret pair on Amazon Mechanical Turk. 13 | 14 | 15 | 2. Install the `boto3` and the `xmltodict` packages: 16 | 17 | ``` 18 | pip install boto3 19 | pip install xmltodict 20 | ``` 21 | 22 | 3. Install your MTurk credentials (access key and secret), following the [boto3 23 | directions](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/quickstart.html#configuration). 24 | There are multiple ways to set these up, but the two easiest are: 25 | 26 | 2. Put them in a file called `~/.aws/credentials`, formatted like 27 | 28 | ``` 29 | [default] 30 | aws_access_key_id = YOUR_ACCESS_KEY 31 | aws_secret_access_key = YOUR_SECRET_KEY 32 | ``` 33 | 34 | 1. Add environment variables in your Bash (or other shell) profile (e.g., 35 | `~/.bash_profile` on most Macs). Add the following lines: 36 | 37 | ``` 38 | export MTURK_ACCESS_KEY= 39 | export MTURK_SECRET= 40 | ``` 41 | 42 | Supersubmiterator will use the environment variables if they are available 43 | by default. Otherwise, it will fall back to `boto3` to find the 44 | credentials. For details on how this works, see the [`boto3` 45 | documentation](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html#guide-configuration) 46 | 47 | ## How to use supersubmiterator mturk tools 48 | 49 | To post the HIT, first setup the config file. 50 | 51 | Give this config file a unique label as its name: `[LABEL].config`. 52 | 53 | { 54 | "liveHIT":"no", 55 | "title":"a title to show to turkers", 56 | "description":"a description to show to turkers", 57 | "experimentURL":"https://www.stanford.edu/~you/path/to/experiment.html", 58 | "keywords":"language research stanford fun cognitive science university explanations", 59 | "USonly?":"yes", 60 | "minPercentPreviousHITsApproved":"95", 61 | "minNumPreviousHITsApproved":"1000", 62 | "frameheight":"650", 63 | "reward":"0.00", 64 | "numberofassignments":"10", 65 | "assignmentsperhit": "9", 66 | "assignmentduration":"1800", 67 | "hitlifetime":"2592000", 68 | "autoapprovaldelay":"60000", 69 | "doesHaveQualification":" or none", 70 | "doesNotHaveQualification": " or none" 71 | } 72 | 73 | The tool supports the following options: 74 | 75 | 76 | | Option | Value | Description | 77 | | --- | --- | --- | 78 | | liveHIT | "yes" or "no" | If set to "no", the HIT is posted to the sandbox (useful for debugging). | 79 | | title | _string_ | A title that is shown to MTurk workers. | 80 | | description | _string_ | A description that is shown to MTurk workers. | 81 | | experimentURL | _string_ |A public **HTTPS** URL where your experiment is located. | 82 | | keywords | _string_ | A list of keywords that is shown to MTurk workers. | 83 | | USOnly? | "yes" or "no" | If set to "yes", only MTurk workers with a US IP address can see and accept your HIT.| 84 | | minPercentPreviousHITsApproved | _integer_ or "none" |If set to an integer _x_ between 0 and 100, only participants with at least _x_% previous HITs approved can see and accept your hit. | 85 | | minNumPreviousHITsApproved | _integer_ or "none" |If set to an integer _x_ , only participants with at least _x_ previous HITs approved can see and accept your hit. | 86 | | frameheight | _integer_ | The height (in pixel) of the iframe in which your experiment is displayed. Set this to at least the height of the largest trial in your experiment. | 87 | |reward | _float_ | The reward (in USD) that MTurk workers get for completing your HIT. | 88 | | numberofassignments | _integer_ | The total number of assignments (i.e., the total number of participants) for your experiment. | 89 | | assignmentsperhit (optional) | _integer_ | The number of assignmets per HIT. If this is set to a lower number than _numberofassignments_, the tool will automatically create multiple HITs with at most _assignmentsperhit_ assignments per HIT.| 90 | | assignmentduration | _integer_ | Maximum time (in seconds) for MTurk workers to complete your experiment. | 91 | | hitlifetime | _integer_ | Lifetime (in seconds) of your HIT. After this period expires, MTurk workers can no longer see and accept your HIT.| 92 | | autoapprovaldelay | _integer_ | Time (in seconds) after which completed assigments are automatically approved. | 93 | | doesHaveQualification (optional) | _string_ | If set to a qualification ID, only MTurk workers with this qualification can see and accept your HIT.| 94 | | doesNotHaveQualification (optional) | _string_ | If set to a qualification ID, only MTurk workers without this qualification can see and accept your HIT. | 95 | 96 | 97 | Once you have setup the config file, run the following command in the terminal: 98 | 99 | python supersubmiterator.py posthit [LABEL] 100 | 101 | And then when you want to get the results: 102 | 103 | python supersubmiterator.py getresults [LABEL] 104 | 105 | This will create a long-form table of your data (several `[LABEL]-*.csv` files). 106 | 107 | If you want to assign workers to a qualification (workers should be in `[LABEL]-workerids.csv`): 108 | 109 | python supersubmiterator.py assignqualification -qualification_id [LABEL] 110 | 111 | ## How to make this even cooler 112 | 113 | N.B. this will only work on unix. 114 | 115 | If you want, you can make `supersubmiterator` a system-wide command, so you can just type (for example): 116 | 117 | supersubmiterator posthit [LABEL] 118 | supersubmiterator getresults [LABEL] 119 | 120 | To do this, save the Submiterator repo somewhere where it won't move, copy-paste and run the following command: 121 | 122 | chmod u+x supersubmiterator.py 123 | 124 | Then make a directory called "bin" in your home folder and make sym-links to the Submiterator file: 125 | 126 | cd ~ 127 | mkdir bin 128 | cd bin 129 | ln -s [PATH_TO_SUBMITERATOR_DIRECTORY]/supersubmiterator.py supersubmiterator 130 | 131 | Then open up or create the file `.bash_profile` or `.bashrc` in your home directory and add the following line: 132 | 133 | PATH=$PATH:~/bin 134 | 135 | Then once you open up a new terminal, you should be able to use the `supersubmiterator` command as above. 136 | 137 | ## Assigning qualifications 138 | 139 | You can also use supersubmiterator to assign qualifications to workers: 140 | 141 | ```supersubmiterator assignqualification -qualification_id [MTURK_QUALIFICATION_ID] [LABEL]``` 142 | 143 | This will assign the qualifiaction `[MTURK_QUALIFICATION_ID]` to all workers in `[LABEL]-workerids.csv`. 144 | 145 | ## Bonus payments 146 | 147 | To pay workers a bonus, create a file `[LABEL]-bonus.csv` in the same directory as the `[LABEL].config` file, using the following format: 148 | 149 | ``` 150 | workerid,assignmentid,bonus 151 | XYZAUMQ231,ABCCAHTNV34VFG76320,3.00 152 | XYZPXP2345,DEFCAY5L23TKNC75GTVIP,0.50 153 | ``` 154 | 155 | Then run 156 | 157 | ```supersubmiterator paybonus [LABEL]``` 158 | 159 | This will pay the worker with the ID `XYZAUMQ231` a bonus of $3.00, and the worker with the ID `XYZPXP2345` a bonus of $0.50. 160 | 161 | 162 | 163 | -------------------------------------------------------------------------------- /supersubmiterator.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import json, argparse, os, csv 4 | import boto3 5 | import xmltodict 6 | 7 | 8 | MTURK_SANDBOX_URL = "https://mturk-requester-sandbox.us-east-1.amazonaws.com" 9 | try: 10 | MTURK_ACCESS_KEY = os.environ["MTURK_ACCESS_KEY"] 11 | except: 12 | MTURK_ACCESS_KEY = None 13 | 14 | try: 15 | MTURK_SECRET = os.environ["MTURK_SECRET"] 16 | except: 17 | MTURK_SECRET = None 18 | 19 | def main(): 20 | parser = argparse.ArgumentParser(description='Interface with MTurk.') 21 | parser.add_argument("subcommand", choices=['posthit', 'getresults', 'assignqualification', 'paybonus'], 22 | type=str, action="store", 23 | help="choose a specific subcommand.") 24 | parser.add_argument("nameofexperimentfiles", metavar="label", type=str, nargs="+", 25 | help="you must have at least one label that corresponds to the " + 26 | "experiment you want to work with. each experiment has a unique label. " + 27 | "this will be the beginning of the name of the config file (everything " + 28 | "before the dot). [label].config.") 29 | parser.add_argument("-qualification_id", metavar="qualificationid", type=str, 30 | default = None) 31 | 32 | args = parser.parse_args() 33 | 34 | subcommand = args.subcommand 35 | labels = args.nameofexperimentfiles 36 | 37 | for label in labels: 38 | if subcommand == "posthit": 39 | live_hit, hit_configs = parse_config(label) 40 | post_hit(label, hit_configs, live_hit) 41 | elif subcommand == "getresults": 42 | live_hit, _ = parse_config(label) 43 | results, results_types = get_results(label, live_hit) 44 | if len(results["trials"]) > 0: 45 | write_results(label, results, results_types) 46 | elif subcommand == "assignqualification": 47 | live_hit, _ = parse_config(label) 48 | assign_qualification(label, live_hit, args.qualification_id) 49 | elif subcommand == "paybonus": 50 | live_hit, _ = parse_config(label) 51 | pay_bonus(label, live_hit) 52 | 53 | 54 | def mturk_client(live_hit=True): 55 | if live_hit: 56 | mturk = boto3.client('mturk', 57 | aws_access_key_id = MTURK_ACCESS_KEY, 58 | aws_secret_access_key = MTURK_SECRET, 59 | region_name='us-east-1' 60 | ) 61 | else: 62 | mturk = boto3.client('mturk', 63 | aws_access_key_id = MTURK_ACCESS_KEY, 64 | aws_secret_access_key = MTURK_SECRET, 65 | region_name='us-east-1', 66 | endpoint_url=MTURK_SANDBOX_URL 67 | ) 68 | return mturk 69 | 70 | def preview_url(hit_id, live_hit=True): 71 | if live_hit: 72 | return "https://worker.mturk.com/mturk/preview?groupId=" + hit_id 73 | else: 74 | return "https://workersandbox.mturk.com/mturk/preview?groupId=" + hit_id 75 | 76 | def post_hit(experiment_label, hit_configs, live_hit=True): 77 | hit_id_filename = experiment_label + ".hits" 78 | 79 | mturk = mturk_client(live_hit = live_hit) 80 | with open(hit_id_filename, "a") as hit_id_file: 81 | print("-" * 80) 82 | for hit_config in hit_configs: 83 | new_hit = mturk.create_hit(**hit_config) 84 | print("Succesfully created hit with {} assignments!".format(new_hit['HIT']["MaxAssignments"]) ) 85 | print("Preview: {}".format(preview_url(new_hit['HIT']['HITGroupId'], live_hit=live_hit))) 86 | print("-" * 80) 87 | print(new_hit['HIT']['HITId'], new_hit['HIT']["MaxAssignments"], file=hit_id_file) 88 | 89 | def parse_answer(json_str): 90 | try: 91 | return json.loads(json_str) 92 | except json.decoder.JSONDecodeError: 93 | return json_str 94 | 95 | def add_workerid(workerid, answer_name, answer_obj): 96 | if isinstance(answer_obj, dict): 97 | answer_obj["workerid"] = workerid 98 | elif isinstance(answer_obj, list): 99 | if len(answer_obj) > 0: 100 | if isinstance(answer_obj[0], dict): 101 | for x in answer_obj: 102 | x["workerid"] = workerid 103 | else: 104 | new_answer_obj = [{answer_name: x, "workerid": workerid} for x in answer_obj] 105 | answer_obj = new_answer_obj 106 | 107 | return answer_obj 108 | 109 | def get_results(experiment_label, live_hit=True): 110 | hit_id_filename = experiment_label + ".hits" 111 | mturk = mturk_client(live_hit = live_hit) 112 | print("Retrieving results...") 113 | print("-" * 80) 114 | results = {"trials": [], "assignments": []} 115 | result_types = {"trials": "list", "assignments": "list"} 116 | with open(hit_id_filename, "r") as hit_id_file: 117 | for hit_id in hit_id_file: 118 | hit_id, assignments = hit_id.strip().split() 119 | worker_results = mturk.list_assignments_for_hit(HITId=hit_id, MaxResults=100) 120 | print("Completed assignments for HIT \"{}\": {}/{}".format(hit_id, worker_results['NumResults'], assignments)) 121 | print("-" * 80) 122 | if worker_results['NumResults'] > 0: 123 | for a in worker_results['Assignments']: 124 | xml_doc = xmltodict.parse(a['Answer']) 125 | additional_trial_cols = {} 126 | worker_id = a["WorkerId"] 127 | assignment_id = a["AssignmentId"] 128 | for answer_field in xml_doc['QuestionFormAnswers']['Answer']: 129 | field_name = answer_field['QuestionIdentifier'] 130 | if field_name == "trials": 131 | trials = parse_answer(answer_field['FreeText']) 132 | else: 133 | answer_obj = parse_answer(answer_field['FreeText']) 134 | if field_name not in result_types: 135 | if isinstance(answer_obj, list): 136 | result_types[field_name] = "list" 137 | results[field_name] = [] 138 | elif isinstance(answer_obj, dict): 139 | result_types[field_name] = "dict" 140 | results[field_name] = [] 141 | else: 142 | result_types[field_name] = "value" 143 | 144 | if result_types[field_name] == "list": 145 | l = add_workerid(worker_id, field_name, answer_obj) 146 | results[field_name].extend(l) 147 | elif result_types[field_name] == "dict": 148 | d = add_workerid(worker_id, field_name, answer_obj) 149 | results[field_name].append(d) 150 | elif result_types[field_name] == "value": 151 | additional_trial_cols["Answer." + field_name] = answer_obj 152 | 153 | d = add_workerid(worker_id, "assignments", {"assignmentid": assignment_id}) 154 | results["assignments"].append(d) 155 | trials = add_workerid(worker_id, "trials", trials) 156 | for t in trials: 157 | for col in additional_trial_cols: 158 | t[col] = additional_trial_cols[col] 159 | results["trials"].extend(trials) 160 | 161 | return results, result_types 162 | 163 | def anonymize(results, results_types): 164 | filtered_results = {} 165 | for field in results_types: 166 | if results_types[field] != "value": 167 | filtered_results[field] = [] 168 | for row in results[field]: 169 | if not isinstance(row, dict): 170 | print("WARNING: Removed invalid data point.") 171 | continue 172 | filtered_results[field].append(row) 173 | 174 | anon_workerids = {} 175 | c = 0 176 | for field in results_types: 177 | if results_types[field] != "value": 178 | for row in filtered_results[field]: 179 | if row["workerid"] not in anon_workerids: 180 | anon_workerids[row["workerid"]] = c 181 | c += 1 182 | row["workerid"] = anon_workerids[row["workerid"]] 183 | 184 | return filtered_results, anon_workerids 185 | 186 | def write_results(label, results, results_types): 187 | results, anon_workerids = anonymize(results, results_types) 188 | for field in results_types: 189 | if results_types[field] != "value" and len(results[field]) > 0: 190 | out_file_name = label + "-" + field + ".csv" 191 | with open(out_file_name, "w") as out_file: 192 | print("Writing results to {} ...".format(out_file_name)) 193 | fieldnames = set().union(*[set(x.keys()) for x in results[field]]) 194 | writer = csv.DictWriter(out_file, fieldnames=fieldnames) 195 | writer.writeheader() 196 | writer.writerows(results[field]) 197 | 198 | out_file_name = label + "-workerids.csv" 199 | with open(out_file_name, "w") as out_file: 200 | writer = csv.DictWriter(out_file, fieldnames=["workerid", "anon_workerid"]) 201 | writer.writeheader() 202 | for workerid, anon_workerid in anon_workerids.items(): 203 | writer.writerow({"workerid": workerid, "anon_workerid": anon_workerid}) 204 | print("-" * 80) 205 | 206 | 207 | def assign_qualification(label, live_hit, qualificationid): 208 | f = open(label + '-workerids.csv') 209 | content = f.read() 210 | content = content.split('\n') 211 | 212 | workerids = [] 213 | for x in range(1, len(content)-1): 214 | info = content[x].split(',') 215 | workerids.append(info[0]) 216 | 217 | print("Number of workerids: ", len(workerids)) 218 | 219 | mturk = mturk_client(live_hit) 220 | 221 | for workerid in workerids: 222 | response = mturk.associate_qualification_with_worker( 223 | QualificationTypeId=qualificationid, 224 | WorkerId=workerid, 225 | IntegerValue=100, 226 | SendNotification=False 227 | ) 228 | print(response) 229 | 230 | def pay_bonus(label, live_hit): 231 | 232 | mturk = mturk_client(live_hit) 233 | bonus_total = 0 234 | n_boni = 0 235 | new_rows = [] 236 | with open(label + '-bonus.csv') as f: 237 | reader = csv.DictReader(f) 238 | for row in reader: 239 | if float(row["bonus"]) > 0: 240 | response = mturk.send_bonus( 241 | WorkerId=row["workerid"], 242 | BonusAmount=row["bonus"], 243 | AssignmentId=row["assignmentid"], 244 | Reason='Participation in experiment.' 245 | ) 246 | if ("ResponseMetadata" not in response or 247 | "HTTPStatusCode" not in response["ResponseMetadata"] or 248 | response["ResponseMetadata"]["HTTPStatusCode"] != 200 249 | ): 250 | print("ERROR: Bonus payment to worker {} failed!", row["workerid"]) 251 | new_rows.append(row) 252 | continue 253 | n_boni += 1 254 | 255 | if "bonus_paid" not in row: 256 | row["bonus_paid"] = 0 257 | row["bonus_paid"] = float(row["bonus_paid"]) + float(row["bonus"]) 258 | bonus_total = bonus_total + float(row["bonus"]) 259 | row["bonus"] = 0 260 | new_rows.append(row) 261 | 262 | if len(new_rows) < 1: 263 | return 264 | 265 | with open(label + "-bonus.csv", 'w') as f: 266 | writer = csv.DictWriter(f, new_rows[0].keys()) 267 | writer.writeheader() 268 | writer.writerows(new_rows) 269 | 270 | print("-" * 80) 271 | print("Paid boni to {} workers, totalling ${}".format(n_boni, bonus_total)) 272 | print("-" * 80) 273 | 274 | 275 | 276 | def parse_config(experiment_label, output_dir=""): 277 | config_filename = experiment_label + ".config" 278 | 279 | # load config file 280 | with open(config_filename, "r") as config_file: 281 | config = json.load(config_file) 282 | 283 | is_live_hit = config["liveHIT"] == "yes" 284 | 285 | hit_options = dict() 286 | # prepare question XML 287 | hit_options["Question"] = """ 288 | 289 | {} 290 | {} 291 | 292 | """.format(config["experimentURL"] , config["frameheight"]) 293 | 294 | # set other properties 295 | hit_options["Title"] = config["title"] 296 | hit_options["Description"] = config["description"] 297 | hit_options["Keywords"] = config["description"] 298 | hit_options["Reward"] = config["reward"] 299 | hit_options["LifetimeInSeconds"] = int(config["hitlifetime"]) 300 | hit_options["AssignmentDurationInSeconds"] = int(config["assignmentduration"]) 301 | hit_options["AutoApprovalDelayInSeconds"] = int(config["autoapprovaldelay"]) 302 | hit_options["QualificationRequirements"] = [] 303 | 304 | 305 | if config["USonly?"].lower() in ["y", "true", "yes", "t", "1"]: 306 | hit_options["QualificationRequirements"].append({ 307 | "QualificationTypeId": "00000000000000000071", 308 | "Comparator": "EqualTo", 309 | "LocaleValues": [ 310 | {"Country": "US"} 311 | ], 312 | "ActionsGuarded": "DiscoverPreviewAndAccept" 313 | }) 314 | 315 | if config["minPercentPreviousHITsApproved"] != "none": 316 | hit_options["QualificationRequirements"].append({ 317 | "QualificationTypeId": "000000000000000000L0", 318 | "Comparator": "GreaterThanOrEqualTo", 319 | "IntegerValues": [ 320 | int(config["minPercentPreviousHITsApproved"]) 321 | ], 322 | "ActionsGuarded": "DiscoverPreviewAndAccept" 323 | }) 324 | 325 | if "minNumPreviousHITsApproved" in config and config["minNumPreviousHITsApproved"] != "none": 326 | hit_options["QualificationRequirements"].append({ 327 | "QualificationTypeId": "00000000000000000040", 328 | "Comparator": "GreaterThanOrEqualTo", 329 | "IntegerValues": [ 330 | int(config["minNumPreviousHITsApproved"]) 331 | ], 332 | "ActionsGuarded": "DiscoverPreviewAndAccept" 333 | }) 334 | 335 | if "doesNotHaveQualification" in config and config["doesNotHaveQualification"] != "none": 336 | hit_options["QualificationRequirements"].append({ 337 | "QualificationTypeId": config["doesNotHaveQualification"], 338 | "Comparator": "DoesNotExist", 339 | "ActionsGuarded": "DiscoverPreviewAndAccept" 340 | }) 341 | 342 | if "doesHaveQualification" in config and config["doesHaveQualification"] != "none": 343 | qualification_ids = (config["doesHaveQualification"] 344 | if "," not in config["doesHaveQualification"] 345 | else config["doesHaveQualification"].split(",")) 346 | hit_options["QualificationRequirements"].append({ 347 | "QualificationTypeId": qualification_ids, 348 | "Comparator": "Exists", 349 | "ActionsGuarded": "DiscoverPreviewAndAccept" 350 | }) 351 | 352 | max_assignments = int(config["numberofassignments"]) 353 | if "assignmentsperhit" in config: 354 | assignments_per_hit = int(config["assignmentsperhit"]) 355 | else: 356 | assignments_per_hit = max_assignments 357 | 358 | hit_assignments = [assignments_per_hit] * int(max_assignments / assignments_per_hit) 359 | if max_assignments % assignments_per_hit > 0: 360 | hit_assignments.append(max_assignments % assignments_per_hit) 361 | 362 | 363 | hit_options_list = [] 364 | 365 | # create an options dictionary for each batch 366 | for assignments in hit_assignments: 367 | options = dict(hit_options) 368 | options["MaxAssignments"] = assignments 369 | hit_options_list.append(options) 370 | 371 | return is_live_hit, hit_options_list 372 | 373 | if __name__ == '__main__': 374 | main() 375 | --------------------------------------------------------------------------------