├── 6_genre_clean_training_data.txt ├── 6_genre_eval_data.txt ├── LICENSE ├── README.md ├── main.py ├── requirements.txt ├── streamlit_app.py ├── streamlit_story_gen.py └── train_6_genre_checkpoint.py /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Pranav Sai Vadrevu 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # GPT2 genre-based story generator 2 | 3 | 4 | Generates stories based on genres and user-inputted prompts. 5 | 6 | If you want to generate some stories, you can test it out [here](https://huggingface.co/pranavpsv/gpt2-genre-story-generator). 7 | If you want to use the above link, the input prompt has to be in the format: 8 | 9 | **\ \ Small input prompt...** 10 | 11 | Supported genres: superhero, sci_fi, horror, action, thriller, drama 12 | 13 | Alternatively, an experimental Streamlit web app is hosted [here](https://share.streamlit.io/pranavpsv/genre-based-story-generator). 14 | 15 | Created by fine-tuning GPT2 on genre-based stories. 16 | 17 | 18 | 19 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | from transformers import pipeline 2 | from fastapi import FastAPI 3 | 4 | app = FastAPI() 5 | 6 | generate_story = pipeline("text-generation", "pranavpsv/gpt2-genre-story-generator") 7 | from pydantic import BaseModel 8 | 9 | class Item(BaseModel): 10 | prompt: str 11 | length: int 12 | 13 | @app.post("/generate") 14 | def generate(item: Item): 15 | prompt_text = item.prompt 16 | max_length = item.length 17 | return generate_story(prompt_text, max_length=max_length) 18 | 19 | 20 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | altair==4.1.0 2 | appnope==0.1.2 3 | argon2-cffi==20.1.0 4 | astor==0.8.1 5 | async-generator==1.10 6 | attrs==20.3.0 7 | backcall==0.2.0 8 | base58==2.1.0 9 | bleach==3.2.2 10 | blinker==1.4 11 | cachetools==4.2.0 12 | certifi==2020.12.5 13 | cffi==1.14.4 14 | chardet==4.0.0 15 | click==7.1.2 16 | decorator==4.4.2 17 | defusedxml==0.6.0 18 | entrypoints==0.3 19 | filelock==3.0.12 20 | gitdb==4.0.5 21 | GitPython==3.1.12 22 | idna==2.10 23 | ipykernel==5.4.3 24 | ipython==7.19.0 25 | ipython-genutils==0.2.0 26 | ipywidgets==7.6.3 27 | jedi==0.18.0 28 | Jinja2==2.11.2 29 | joblib==1.0.0 30 | jsonschema==3.2.0 31 | jupyter-client==6.1.11 32 | jupyter-core==4.7.0 33 | jupyterlab-pygments==0.1.2 34 | jupyterlab-widgets==1.0.0 35 | MarkupSafe==1.1.1 36 | mistune==0.8.4 37 | nbclient==0.5.1 38 | nbconvert==6.0.7 39 | nbformat==5.1.2 40 | nest-asyncio==1.4.3 41 | notebook==6.2.0 42 | numpy==1.19.5 43 | packaging==20.8 44 | pandas==1.2.1 45 | pandocfilters==1.4.3 46 | parso==0.8.1 47 | pexpect==4.8.0 48 | pickleshare==0.7.5 49 | Pillow==8.1.0 50 | prometheus-client==0.9.0 51 | prompt-toolkit==3.0.11 52 | protobuf==3.14.0 53 | ptyprocess==0.7.0 54 | pycparser==2.20 55 | pydeck==0.5.0 56 | Pygments==2.7.4 57 | pyparsing==2.4.7 58 | pyrsistent==0.17.3 59 | python-dateutil==2.8.1 60 | pytz==2020.5 61 | pyzmq==21.0.1 62 | regex==2020.11.13 63 | requests==2.25.1 64 | sacremoses==0.0.43 65 | Send2Trash==1.5.0 66 | six==1.15.0 67 | smmap==3.0.4 68 | streamlit==0.74.1 69 | terminado==0.9.2 70 | testpath==0.4.4 71 | tokenizers==0.9.4 72 | toml==0.10.2 73 | toolz==0.11.1 74 | torch==1.7.1 75 | tornado==6.1 76 | tqdm==4.56.0 77 | traitlets==5.0.5 78 | transformers==4.0.1 79 | typing-extensions==3.7.4.3 80 | tzlocal==2.1 81 | urllib3==1.26.2 82 | validators==0.18.2 83 | wcwidth==0.2.5 84 | webencodings==0.5.1 85 | widgetsnbextension==3.5.1 86 | -------------------------------------------------------------------------------- /streamlit_app.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | import random 3 | import time 4 | import re 5 | #import requests 6 | import time 7 | from transformers import pipeline 8 | 9 | 10 | # Adding cache with output mutation for "Continue Generated Story" Feature 11 | @st.cache(allow_output_mutation=True) 12 | def Content(): 13 | return ["", 0] 14 | 15 | content = Content() 16 | 17 | # Render this content 18 | st.title('Story Generator') 19 | st.subheader('Generate Stories based on genres') 20 | option = st.selectbox("Which genre?", ("Superhero", "sci_fi", "Horror", "Thriller", "Action", "Drama")) 21 | 22 | suggested_prompt = None 23 | 24 | suggested_prompt_subtitle = "Select one of these Starting Prompts" 25 | if option == "Horror": 26 | suggested_prompt = st.selectbox(suggested_prompt_subtitle, ("", "A Demon hunter", "A Ghost", "Luigi enters a haunted resort", "An apparition", "A monster", "In the mist,","A zombie", "A scary", "A spooky",)) 27 | elif option == "Superhero": 28 | suggested_prompt = st.selectbox(suggested_prompt_subtitle, ("", "Batman", "Spider-Man", "Superman", "Gotham city is under attack", "Darkseid", "Thanos", 29 | "The Justice League", "The Avengers")) 30 | elif option == "sci_fi": 31 | suggested_prompt = st.selectbox(suggested_prompt_subtitle, ("", "Aliens", "In the future, ", "A satellite", "After discovering time travel,")) 32 | elif option == "Thriller": 33 | suggested_prompt = st.selectbox(suggested_prompt_subtitle, ("", "A detective must investigate the", "In the dark shadows, ", "A serial killer")) 34 | elif option == "Action": 35 | suggested_prompt = st.selectbox(suggested_prompt_subtitle, ("", "Special Agent Bart is hunting down", "After a robbery gone wrong,", "A treasure hunter", "An undercover cop poses as a gangster")) 36 | 37 | 38 | title_input = st.text_input("Alternatively, Enter a Title") 39 | text_input = st.text_input("Enter a Small Starting Prompt") 40 | 41 | text_input = suggested_prompt if (suggested_prompt and (text_input == "") and (suggested_prompt != 0)) else text_input 42 | text_input = f'"{title_input}" is a(n) {option.lower()} film about' if (title_input != "") else text_input 43 | story_length = st.number_input("Enter length of generated text (50 - 100):", min_value=50, max_value=100, key="1") 44 | 45 | generate_button = st.button("Generate Story") 46 | generate_more = st.button("Continue Generated Story") 47 | model_name = "pranavpsv/gpt2-genre-story-generator" 48 | 49 | @st.cache(allow_output_mutation=True) 50 | def get_model(): 51 | story_generator = pipeline("text-generation", model_name) 52 | return story_generator 53 | 54 | 55 | story_generator = get_model() 56 | def generate_story(input_prompt, story_length): 57 | """ 58 | generate_story(input_prompt, story_length) generates a story with story_length number of tokens from an input prompt 59 | """ 60 | global story 61 | i = 1 62 | with st.spinner("Story Generating..."): 63 | contents = story_generator(f" <{option.lower()}> {input_prompt}", max_length=story_length)[0]["generated_text"] 64 | print(contents) 65 | 66 | st.success("Story Generated!") 67 | 68 | # Postprocess generated story 69 | content = contents.split(">")[2] 70 | story = re.sub('\[.*?\]','', content) 71 | st.write("Generated Story:") 72 | st.write(story) 73 | 74 | # Write story to frontend 75 | return story, story_length 76 | 77 | if generate_button: 78 | content[0], content[1] = generate_story(text_input, story_length) 79 | if generate_more: 80 | content[1] += 50 81 | content[0], content[1] = generate_story(content[0], content[1]) 82 | -------------------------------------------------------------------------------- /streamlit_story_gen.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | import re 3 | import requests 4 | import time 5 | from multiprocessing.dummy import Pool 6 | 7 | pool = Pool(4) 8 | 9 | # Adding cache with output mutation for "Continue Generated Story" Feature 10 | @st.cache(allow_output_mutation=True) 11 | def Content(): 12 | return ["", 0] 13 | 14 | content = Content() 15 | 16 | # Render this content 17 | st.title('Story Generator') 18 | st.subheader('Generate Stories based on genres') 19 | option = st.selectbox("Which genre?", ("Action", "Superhero", "Drama", "sci_fi", "Thriller", "Horror")) 20 | text_input = st.text_input("Enter a Small Starting Prompt") 21 | title_input = st.text_input("Alternatively, Enter a Title") 22 | 23 | suggested_prompt = None 24 | 25 | suggested_prompt_subtitle = "Alternatively, Select one of these Starting Prompts" 26 | if option == "Horror": 27 | suggested_prompt = st.selectbox(suggested_prompt_subtitle, ("", "A Demon hunter", "A Ghost", "An apparition", "A monster", "In the mist,","A zombie", "A scary", "A spooky")) 28 | elif option == "Superhero": 29 | suggested_prompt = st.selectbox(suggested_prompt_subtitle, ("", "Batman", "Spider-Man", "Superman", "Gotham city is under attack", "Darkseid", "Thanos", 30 | "The Justice League", "The Avengers")) 31 | elif option == "sci_fi": 32 | suggested_prompt = st.selectbox(suggested_prompt_subtitle, ("", "Aliens", "In the future, ", "A satellite", "After discovering time travel,")) 33 | elif option == "Thriller": 34 | suggested_prompt = st.selectbox(suggested_prompt_subtitle, ("", "A detective must investigate the", "In the dark shadows, ", "A serial killer")) 35 | elif option == "Action": 36 | suggested_prompt = st.selectbox(suggested_prompt_subtitle, ("", "Special Agent Bart is hunting down", "After a robbery gone wrong,", "A treasure hunter", "An undercover cop poses as a gangster")) 37 | 38 | text_input = suggested_prompt if (suggested_prompt and (text_input == "") and (suggested_prompt != 0)) else text_input 39 | text_input = f'"{title_input}" is a film about' if (title_input != "") else text_input 40 | story_length = st.number_input("Enter length of generated text (50 - 100):", min_value=50, max_value=100, key="1") 41 | 42 | generate_button = st.button("Generate Story") 43 | generate_more = st.button("Continue Generated Story") 44 | 45 | 46 | def generate_story(input_prompt, story_length): 47 | """ 48 | generate_story(input_prompt, story_length) generates a story with story_length number of tokens from an input prompt 49 | """ 50 | 51 | with st.spinner("Story Generating..."): 52 | my_bar = st.progress(0) 53 | r = pool.apply_async(requests.post, ["http://127.0.0.1:8000/generate"], {"json": {"prompt": f" <{option.lower()}> {input_prompt}", "length": story_length}}) 54 | 55 | # Display progress bar while generating 56 | for percent_complete in range(100): 57 | time.sleep(0.05) 58 | my_bar.progress(percent_complete + 1) 59 | 60 | st.success("Story Generated!") 61 | contents = [text["generated_text"] for text in eval(r.get().content)] 62 | 63 | # Postprocess generated story 64 | content = contents[0].split(">")[2] 65 | content = re.sub('\[.*?\]','', content) 66 | 67 | # Write story to frontend 68 | st.write(content) 69 | return content, story_length 70 | 71 | if generate_button: 72 | content[0], content[1] = generate_story(text_input, story_length) 73 | if generate_more: 74 | content[1] += 50 75 | content[0], content[1] = generate_story(content[0], content[1]) 76 | -------------------------------------------------------------------------------- /train_6_genre_checkpoint.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import math 3 | import os 4 | from dataclasses import dataclass, field 5 | from typing import Optional 6 | 7 | from transformers import ( 8 | CONFIG_MAPPING, 9 | MODEL_WITH_LM_HEAD_MAPPING, 10 | AutoConfig, 11 | AutoModelWithLMHead, 12 | AutoTokenizer, 13 | DataCollatorForLanguageModeling, 14 | LineByLineTextDataset, 15 | PreTrainedTokenizer, 16 | TextDataset, 17 | Trainer, 18 | TrainingArguments, 19 | set_seed, 20 | ) 21 | 22 | 23 | logger = logging.getLogger(__name__) 24 | 25 | 26 | MODEL_CONFIG_CLASSES = list(MODEL_WITH_LM_HEAD_MAPPING.keys()) 27 | MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) 28 | 29 | 30 | @dataclass 31 | class ModelArguments: 32 | """ 33 | Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. 34 | """ 35 | 36 | model_name_or_path: Optional[str] = field( 37 | default=None, 38 | metadata={ 39 | "help": "The model checkpoint for weights initialization. Leave None if you want to train a model from scratch." 40 | }, 41 | ) 42 | model_type: Optional[str] = field( 43 | default=None, 44 | metadata={ 45 | "help": "If training from scratch, pass a model type from the list: " 46 | + ", ".join(MODEL_TYPES) 47 | }, 48 | ) 49 | config_name: Optional[str] = field( 50 | default=None, 51 | metadata={ 52 | "help": "Pretrained config name or path if not the same as model_name" 53 | }, 54 | ) 55 | tokenizer_name: Optional[str] = field( 56 | default=None, 57 | metadata={ 58 | "help": "Pretrained tokenizer name or path if not the same as model_name" 59 | }, 60 | ) 61 | cache_dir: Optional[str] = field( 62 | default=None, 63 | metadata={ 64 | "help": "Where do you want to store the pretrained models downloaded from s3" 65 | }, 66 | ) 67 | 68 | 69 | @dataclass 70 | class DataTrainingArguments: 71 | """ 72 | Arguments pertaining to what data we are going to input our model for training and eval. 73 | """ 74 | 75 | train_data_file: Optional[str] = field( 76 | default=None, metadata={"help": "The input training data file (a text file)."} 77 | ) 78 | eval_data_file: Optional[str] = field( 79 | default=None, 80 | metadata={ 81 | "help": "An optional input evaluation data file to evaluate the perplexity on (a text file)." 82 | }, 83 | ) 84 | line_by_line: bool = field( 85 | default=False, 86 | metadata={ 87 | "help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences." 88 | }, 89 | ) 90 | 91 | mlm: bool = field( 92 | default=False, 93 | metadata={ 94 | "help": "Train with masked-language modeling loss instead of language modeling." 95 | }, 96 | ) 97 | mlm_probability: float = field( 98 | default=0.15, 99 | metadata={"help": "Ratio of tokens to mask for masked language modeling loss"}, 100 | ) 101 | 102 | block_size: int = field( 103 | default=-1, 104 | metadata={ 105 | "help": "Optional input sequence length after tokenization." 106 | "The training dataset will be truncated in block of this size for training." 107 | "Default to the model max input length for single sentence inputs (take into account special tokens)." 108 | }, 109 | ) 110 | overwrite_cache: bool = field( 111 | default=False, 112 | metadata={"help": "Overwrite the cached training and evaluation sets"}, 113 | ) 114 | 115 | 116 | def get_dataset( 117 | args: DataTrainingArguments, tokenizer: PreTrainedTokenizer, evaluate=False 118 | ): 119 | file_path = args.eval_data_file if evaluate else args.train_data_file 120 | if args.line_by_line: 121 | return LineByLineTextDataset( 122 | tokenizer=tokenizer, file_path=file_path, block_size=args.block_size 123 | ) 124 | else: 125 | return TextDataset( 126 | tokenizer=tokenizer, 127 | file_path=file_path, 128 | block_size=args.block_size, 129 | overwrite_cache=args.overwrite_cache, 130 | ) 131 | 132 | 133 | def main(): 134 | # See all possible arguments in src/transformers/training_args.py 135 | # or by passing the --help flag to this script. 136 | # We now keep distinct sets of args, for a cleaner separation of concerns. 137 | 138 | model_args = ModelArguments( 139 | model_name_or_path="gpt2", model_type="gpt2", cache_dir="/u0/psvadrev/.cache" 140 | ) 141 | data_args = DataTrainingArguments( 142 | train_data_file="6_genre_clean_training_data.txt", 143 | eval_data_file="6_genre_eval_data.txt", 144 | line_by_line=True, 145 | mlm=False, 146 | block_size=512, 147 | overwrite_cache=True, 148 | ) 149 | training_args = TrainingArguments( 150 | output_dir="story_generator_6_genre_eval", 151 | overwrite_output_dir=True, 152 | do_train=True, 153 | do_eval=True, 154 | do_predict=False, 155 | evaluate_during_training=False, 156 | logging_steps=500, 157 | per_device_train_batch_size=4, 158 | num_train_epochs=10, 159 | save_total_limit=1, 160 | save_steps=1000000, 161 | ) 162 | 163 | if data_args.eval_data_file is None and training_args.do_eval: 164 | raise ValueError( 165 | "Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file " 166 | "or remove the --do_eval argument." 167 | ) 168 | 169 | if ( 170 | os.path.exists(training_args.output_dir) 171 | and os.listdir(training_args.output_dir) 172 | and training_args.do_train 173 | and not training_args.overwrite_output_dir 174 | ): 175 | raise ValueError( 176 | f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome." 177 | ) 178 | 179 | # Setup logging 180 | logging.basicConfig( 181 | format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", 182 | datefmt="%m/%d/%Y %H:%M:%S", 183 | level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, 184 | ) 185 | logger.warning( 186 | "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", 187 | training_args.local_rank, 188 | training_args.device, 189 | training_args.n_gpu, 190 | bool(training_args.local_rank != -1), 191 | training_args.fp16, 192 | ) 193 | logger.info("Training/evaluation parameters %s", training_args) 194 | 195 | # Set seed 196 | set_seed(training_args.seed) 197 | 198 | # Load pretrained model and tokenizer 199 | # 200 | # Distributed training: 201 | # The .from_pretrained methods guarantee that only one local process can concurrently 202 | # download model & vocab. 203 | 204 | if model_args.config_name: 205 | config = AutoConfig.from_pretrained( 206 | model_args.config_name, cache_dir=model_args.cache_dir 207 | ) 208 | elif model_args.model_name_or_path: 209 | config = AutoConfig.from_pretrained( 210 | model_args.model_name_or_path, cache_dir=model_args.cache_dir 211 | ) 212 | else: 213 | config = CONFIG_MAPPING[model_args.model_type]() 214 | logger.warning("You are instantiating a new config instance from scratch.") 215 | 216 | if model_args.tokenizer_name: 217 | tokenizer = AutoTokenizer.from_pretrained( 218 | model_args.tokenizer_name, cache_dir=model_args.cache_dir 219 | ) 220 | elif model_args.model_name_or_path: 221 | tokenizer = AutoTokenizer.from_pretrained( 222 | model_args.model_name_or_path, cache_dir=model_args.cache_dir 223 | ) 224 | else: 225 | raise ValueError( 226 | "You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another script, save it," 227 | "and load it from here, using --tokenizer_name" 228 | ) 229 | 230 | if model_args.model_name_or_path: 231 | model = AutoModelWithLMHead.from_pretrained( 232 | model_args.model_name_or_path, 233 | from_tf=bool(".ckpt" in model_args.model_name_or_path), 234 | config=config, 235 | cache_dir=model_args.cache_dir, 236 | ) 237 | else: 238 | logger.info("Training new model from scratch") 239 | model = AutoModelWithLMHead.from_config(config) 240 | 241 | special_tokens_dict = { 242 | "bos_token": "", 243 | "eos_token": "", 244 | "pad_token": "", 245 | "additional_special_tokens": [ 246 | "", 247 | "", 248 | "", 249 | "", 250 | "", 251 | "", 252 | ], 253 | } 254 | num_added_toks = tokenizer.add_special_tokens(special_tokens_dict) 255 | model.resize_token_embeddings(len(tokenizer)) 256 | 257 | if data_args.block_size <= 0: 258 | data_args.block_size = tokenizer.max_len 259 | # Our input block size will be the max possible for the model 260 | else: 261 | data_args.block_size = min(data_args.block_size, tokenizer.max_len) 262 | 263 | # Get datasets 264 | 265 | train_dataset = ( 266 | get_dataset(data_args, tokenizer=tokenizer) if training_args.do_train else None 267 | ) 268 | eval_dataset = ( 269 | get_dataset(data_args, tokenizer=tokenizer, evaluate=True) 270 | if training_args.do_eval 271 | else None 272 | ) 273 | data_collator = DataCollatorForLanguageModeling( 274 | tokenizer=tokenizer, 275 | mlm=data_args.mlm, 276 | mlm_probability=data_args.mlm_probability, 277 | ) 278 | 279 | # Initialize our Trainer 280 | trainer = Trainer( 281 | model=model, 282 | args=training_args, 283 | data_collator=data_collator, 284 | train_dataset=train_dataset, 285 | eval_dataset=eval_dataset, 286 | prediction_loss_only=True, 287 | ) 288 | 289 | # Training 290 | if training_args.do_train: 291 | model_path = ( 292 | model_args.model_name_or_path 293 | if model_args.model_name_or_path is not None 294 | and os.path.isdir(model_args.model_name_or_path) 295 | else None 296 | ) 297 | trainer.train(model_path=model_path) 298 | trainer.save_model() 299 | # For convenience, we also re-save the tokenizer to the same directory, 300 | # so that you can share your model easily on huggingface.co/models =) 301 | if trainer.is_world_master(): 302 | tokenizer.save_pretrained(training_args.output_dir) 303 | 304 | # Evaluation 305 | results = {} 306 | if training_args.do_eval: 307 | logger.info("*** Evaluate ***") 308 | 309 | eval_output = trainer.evaluate() 310 | 311 | perplexity = math.exp(eval_output["eval_loss"]) 312 | result = {"perplexity": perplexity} 313 | 314 | output_eval_file = os.path.join(training_args.output_dir, "eval_results_lm.txt") 315 | if trainer.is_world_master(): 316 | with open(output_eval_file, "w") as writer: 317 | logger.info("***** Eval results *****") 318 | for key in sorted(result.keys()): 319 | logger.info(" %s = %s", key, str(result[key])) 320 | writer.write("%s = %s\n" % (key, str(result[key]))) 321 | 322 | results.update(result) 323 | 324 | return results 325 | 326 | 327 | if __name__ == "__main__": 328 | main() 329 | --------------------------------------------------------------------------------