├── .gitignore ├── requirements.txt ├── GenerateVideoFromText.py ├── SubtitleUtil.py ├── GenerateVideoFromGPT.py ├── VideoUtil.py ├── README.md ├── ImageUtil.py ├── config.example.json ├── TextGenUtil.py ├── GenerateVideo.py ├── VoiceUtil.py └── LICENSE /.gitignore: -------------------------------------------------------------------------------- 1 | storyprompt.txt 2 | gptprompt.txt 3 | config.json 4 | output/ 5 | music/ 6 | __pycache__/ -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | gender_guesser==0.4.0 2 | nltk==3.8.1 3 | openai==0.23.1 4 | Pillow==9.4.0 5 | pyttsx3==2.90 6 | pywin32==305 7 | requests==2.28.1 8 | textblob==0.17.1 9 | -------------------------------------------------------------------------------- /GenerateVideoFromText.py: -------------------------------------------------------------------------------- 1 | from textblob import TextBlob 2 | import nltk 3 | import json 4 | import requests 5 | from pathlib import Path 6 | from datetime import datetime 7 | from time import sleep 8 | import re 9 | import os 10 | import subprocess 11 | import win32api 12 | 13 | import ImageUtil 14 | import VoiceUtil 15 | import VideoUtil 16 | import GenerateVideo 17 | 18 | text=Path('storyprompt.txt').read_text(encoding="utf-8") 19 | config = GenerateVideo.LoadConfig(text) 20 | GenerateVideo.Generate(text,config) 21 | 22 | 23 | -------------------------------------------------------------------------------- /SubtitleUtil.py: -------------------------------------------------------------------------------- 1 | import mutagen 2 | 3 | 4 | separator = " --> " 5 | 6 | def convertToSrtTimestamp(seconds): 7 | hours, remainder = divmod(seconds, 3600) 8 | minutes, seconds = divmod(remainder, 60) 9 | milliseconds = round((seconds % 1) * 1000) 10 | return f"{int(hours):02d}:{int(minutes):02d}:{int(seconds):02d}:{milliseconds:03d}" 11 | 12 | def updateSubtitle(subtitle, path, text, ind): 13 | subtitletxt = subtitle[0] + str(ind) + "\n\n" 14 | duration = mutagen.File(path).info.length 15 | currentlength = subtitle[1] + duration 16 | subtitletxt = subtitletxt + convertToSrtTimestamp(subtitle[1]) + separator + convertToSrtTimestamp(currentlength) + "\n\n" 17 | subtitletxt = subtitletxt + text + "\n\n" 18 | return [subtitletxt, currentlength] 19 | -------------------------------------------------------------------------------- /GenerateVideoFromGPT.py: -------------------------------------------------------------------------------- 1 | from textblob import TextBlob 2 | import nltk 3 | import json 4 | import requests 5 | from pathlib import Path 6 | from datetime import datetime 7 | from time import sleep 8 | import re 9 | import os 10 | import subprocess 11 | import win32api 12 | 13 | import ImageUtil 14 | import VoiceUtil 15 | import TextGenUtil 16 | import VideoUtil 17 | import GenerateVideo 18 | 19 | prompt = Path('gptprompt.txt').read_text(encoding="utf-8") 20 | config = GenerateVideo.LoadConfig(prompt) 21 | if config["paragraphs"]: 22 | prompt=config["paragraphs"][0] 23 | else: 24 | prompt="" 25 | if "text_prompt_prefix" in config.keys(): 26 | prompt=config["text_prompt_prefix"] + prompt 27 | if "text_prompt_suffix" in config.keys(): 28 | prompt=prompt + config["text_prompt_suffix"] 29 | print("generating text with prompt: " + prompt) 30 | text = TextGenUtil.generateText(prompt, config) 31 | config["paragraphs"]=text.split('\n') 32 | config["text_prompt"]=prompt 33 | GenerateVideo.Generate(text,config) 34 | 35 | -------------------------------------------------------------------------------- /VideoUtil.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import os, random 3 | 4 | def combine_videos(path, imagefiles, audiofiles, config): 5 | 6 | folder_path = "." + path 7 | output_filename = path + "\\" + "final.mp4" 8 | count = len(imagefiles) 9 | concat_command= "[0][1][2][3][4][5]concat=n=" + str(count) + ":v=1:a=1[vv][a];[vv]format=yuv420p[v]" 10 | ffmpeg_command = ["ffmpeg"] + generate_source_list(imagefiles, audiofiles) + ["-filter_complex"] + [concat_command] + ["-map", "[v]", "-map", "[a]", output_filename] 11 | print (" ".join(ffmpeg_command)) 12 | # Run the FFmpeg command 13 | subprocess.run(ffmpeg_command, check=True, shell=True) 14 | 15 | def generate_source_list(imagefiles, audiofiles): 16 | image_index=0 17 | sourceList = [] 18 | for i in imagefiles: 19 | sourceList.append("-i") 20 | sourceList.append(i) 21 | sourceList.append("-i") 22 | sourceList.append(audiofiles[image_index]) 23 | image_index=image_index + 1 24 | image_index=image_index+1 25 | return sourceList 26 | 27 | def retrieve_music(config): 28 | path = "music\\" + config["music_genre"] + "\\" + random.choice(os.listdir("music\\" + config["music_genre"])) 29 | return ["-i", path] 30 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # AI Video StoryGen 2 | Uses GPT, TTS, and Stable Diffusion (AUTOMATIC1111) to automatically generate videos 3 | ## Features 4 | - Can either parse existing text or generate new text using GPT API 5 | - Uses the AUTOMATIC1111 API to generate images based on the text of the story 6 | - Adds subtitles to images 7 | - Currently supports pyttsx3 or ElevenLabs AI to generate audio 8 | - Will automatically detect speaker (if text is in transcript format) 9 | - Combines generated audio and images using FFMPEG to create a video 10 | ## Installation and Running 11 | - Make sure Automatic1111 API is enabled and that its running 12 | - copy config.example.json, rename it to config.json and add your keys 13 | - Run GenerateVideoFromGPT.py if you want to generate using a GPT prompt in gptprompt.txt 14 | - Run GenerateVideoFromText.py if you want to generate using existing text 15 | 16 | ## Using Styles 17 | you can define styles in config.json. Styles will override the default settings in config.json if you use them. In order to use them put all the styles you want to use on the first line between "!", seperated by "," 18 | 19 | Here is an example output: 20 | [![example](https://img.youtube.com/vi/lm_peWz93a4/maxresdefault.jpg)](https://www.youtube.com/watch?v=lm_peWz93a4) 21 | 22 | This is still in development. I'm still not sure how I want to implement the UI. 23 | -------------------------------------------------------------------------------- /ImageUtil.py: -------------------------------------------------------------------------------- 1 | import requests 2 | from PIL import Image 3 | import io 4 | import base64 5 | 6 | 7 | upscale=False 8 | def generate_image(para, ind, path, config): 9 | payload = { 10 | "prompt": para + config["prompt"], 11 | "negative_prompt": config["negative_prompt"], 12 | "steps": config["steps"], 13 | "width": config["image_width"], 14 | "height": config["image_height"], 15 | "sampler_index": config["sampler_index"] 16 | } 17 | r = requests.post(url=f'http://127.0.0.1:7860/sdapi/v1/txt2img', json=payload).json() 18 | for i in r['images']: 19 | image = Image.open(io.BytesIO(base64.b64decode(i.split(",",1)[0]))) 20 | imagepath=path + "\\" + "image_" + str(ind) + '.png' 21 | if config["upscale_enabled"]: 22 | image=upscale_image(image,config) 23 | image.save(imagepath) 24 | return imagepath 25 | 26 | def upscale_image(image, config): 27 | with io.BytesIO() as buffer: 28 | image.save(buffer, format='PNG') 29 | payload = { 30 | "upscaling_resize": config["upscaling_resize"], 31 | "upscaler_1": config["upscaler_1"], 32 | "image": base64.b64encode(buffer.getvalue()).decode('utf-8') 33 | } 34 | r = requests.post(url=f'http://127.0.0.1:7860/sdapi/v1/extra-single-image', json=payload).json() 35 | return Image.open(io.BytesIO(base64.b64decode(r["image"].split(",",1)[0]))) 36 | -------------------------------------------------------------------------------- /config.example.json: -------------------------------------------------------------------------------- 1 | { 2 | "prompt": ", RAW Photo, sharp, detailed, 256k film still from a color movie made in 1980, good lighting, good photography, sharp focus, movie still, film grain", 3 | "negative_prompt": "blurry, frame, topless", 4 | "image_width": 704, 5 | "image_height": 512, 6 | "sampler_index": "DPM++ SDE Karras", 7 | "steps": 60, 8 | "open_ai_key": "", 9 | "chatgpt_api": "True", 10 | "elevenlabs_key": "", 11 | "voice_type": "default", 12 | "music_genre": "folk", 13 | "music_volume": "0.5", 14 | "voice_gender": 15 | { 16 | "domi":"female" 17 | }, 18 | "voice_exclude": ["sam", "worf", "picard"], 19 | "character_mappings": 20 | { 21 | "Josh":["host"] 22 | }, 23 | "upscale_enabled": "True", 24 | "upscaler_1" : "Lanczos", 25 | "upscaling_resize": 2, 26 | "text_prompt_prefix":"", 27 | "text_prompt_suffix":"", 28 | "styles": 29 | { 30 | "antiques":{ 31 | "music_genre": "myuu_dark_ambient", 32 | "text_prompt_prefix":"write a transcript of an episode of a show called \"Antiques and appraisals\".", 33 | "character_mappings": 34 | { 35 | "antoni":["host"] 36 | } 37 | }, 38 | "star":{ 39 | "text_prompt_prefix":"write a transcript of an episode of Star Trek the Next Generation", 40 | "prompt": ", RAW Photo, sharp, detailed, 256k sci-fi film still from a color movie made in 1990, good lighting, good photography, sharp focus, movie still, film grain, startrekthenextgeneration", 41 | "negative_prompt": "blurry, frame, topless, cartoon", 42 | "voice_exclude": ["sam"], 43 | "character_mappings": 44 | { 45 | "antoni":["narrator"], 46 | "worf":["worf"], 47 | "picard":["picard"], 48 | "data":["data"], 49 | "geordi":["geordi"], 50 | "riker":["riker"] 51 | 52 | } 53 | } 54 | } 55 | } -------------------------------------------------------------------------------- /TextGenUtil.py: -------------------------------------------------------------------------------- 1 | import openai 2 | 3 | import requests 4 | import json 5 | 6 | def chatGPT(prompt): 7 | headers = { 8 | 'Authorization': 'Bearer ' + openai.api_key, 9 | "content-type": "application/json" 10 | } 11 | data = { 12 | "model": "gpt-3.5-turbo", 13 | "messages": [{"role": "user", "content": prompt}] 14 | } 15 | url = "https://api.openai.com/v1/chat/completions" 16 | response = requests.post(url, headers=headers, json=data).json() 17 | return response["choices"][0]["message"]["content"] 18 | 19 | def gpt3_completion(prompt, engine='text-davinci-003', temp=1.1, top_p=1.0, tokens=4000, freq_pen=0.0, pres_pen=0.0, stop=['asdfasdf']): 20 | max_retry = 5 21 | retry = 0 22 | while True: 23 | try: 24 | response = openai.Completion.create( 25 | engine=engine, # use this for standard models 26 | #model=engine, # use this for finetuned model 27 | prompt=prompt, 28 | temperature=temp, 29 | max_tokens=tokens, 30 | top_p=top_p, 31 | frequency_penalty=freq_pen, 32 | presence_penalty=pres_pen, 33 | stop=stop) 34 | text = response['choices'][0]['text'].strip() 35 | #save_gpt3_log(prompt, text) 36 | return text 37 | except Exception as oops: 38 | retry += 1 39 | if retry >= max_retry: 40 | return None 41 | print('Error communicating with OpenAI:', oops) 42 | sleep(1) 43 | 44 | def generateText(prompt, config): 45 | openai.api_key = config["open_ai_key"] 46 | if config["chatgpt_api"]: 47 | return chatGPT(prompt) 48 | else: 49 | return gpt3_completion(prompt) 50 | 51 | -------------------------------------------------------------------------------- /GenerateVideo.py: -------------------------------------------------------------------------------- 1 | from textblob import TextBlob 2 | import nltk 3 | import json 4 | import requests 5 | from pathlib import Path 6 | from datetime import datetime 7 | from time import sleep 8 | import re 9 | import os 10 | 11 | import ImageUtil 12 | import VoiceUtil 13 | import VideoUtil 14 | import SubtitleUtil 15 | 16 | def LoadConfig(text): 17 | with open('config.json', 'r') as f: 18 | config = json.load(f) 19 | paragraphs=text.split('\n') 20 | if paragraphs[0].startswith("!") and paragraphs[0].strip().endswith("!"): 21 | match = re.search(r'(?<=!)(.*?)(?=!)', paragraphs[0]) 22 | styles= match.group(1).split(",") 23 | 24 | for s in styles: 25 | if s.lower() in config["styles"].keys(): 26 | for k in config["styles"][s].keys(): 27 | config[k]=config["styles"][s][k] 28 | del paragraphs[0] 29 | config["paragraphs"]=paragraphs 30 | return config 31 | 32 | def BackupText(text, config): 33 | path="output\\" + text[:10].replace(":","_").replace(" ","_").replace("\n","_") +"_" + datetime.now().strftime("%m_%d_%Y_%H_%M_%S") 34 | os.makedirs(path) 35 | filename = "Gen_" + datetime.now().strftime("%m_%d_%Y%H_%M_%S")+ "_.txt" 36 | with open(path + "\\" + filename, 'w', encoding='utf-8') as outfile: 37 | if "text_prompt" in config.keys(): 38 | outfile.write(config["text_prompt"] + "\n") 39 | outfile.write(text + "\n") 40 | return path 41 | 42 | def Generate(text, config): 43 | VoiceUtil.setup(config) 44 | ind=0 45 | imageFiles=[] 46 | audioFiles=[] 47 | path=BackupText(text, config) 48 | paragraphs = config["paragraphs"] 49 | subtitle=["",0] 50 | for para in paragraphs: 51 | if para.strip(): 52 | print(para + "\n") 53 | imagepath=ImageUtil.generate_image(para, ind, path, config) 54 | imageFiles.append(imagepath) 55 | voicepath=VoiceUtil.create_dialogue(para, path, ind, config) 56 | audioFiles.append(voicepath) 57 | subtitle= SubtitleUtil.updateSubtitle(subtitle, voicepath, para, ind) 58 | ind=ind+1 59 | with open(path + "\\" + "final.srt", 'w', encoding='utf-8') as outfile: 60 | outfile.write(subtitle[0]) 61 | VideoUtil.combine_videos(path, imageFiles, audioFiles, config) 62 | -------------------------------------------------------------------------------- /VoiceUtil.py: -------------------------------------------------------------------------------- 1 | import json 2 | import requests 3 | import random 4 | import pyttsx3 5 | import gender_guesser.detector as gender 6 | import re 7 | 8 | apiKey = "" 9 | availableMaleVoices ={} 10 | availableFemaleVoices = {} 11 | voiceGender = {"domi":"female"} 12 | voices = {} 13 | narratorName = "narrator" 14 | 15 | def setup(config): 16 | apiKey=config["elevenlabs_key"] 17 | voice_type=config["voice_type"] 18 | if config["voice_type"]=="ElevenLabs": 19 | api_url = "https://api.elevenlabs.io/v1/voices" 20 | headers = { 21 | 'xi-api-key': apiKey 22 | } 23 | response = requests.get(api_url, headers=headers).json() 24 | for item in response["voices"]: 25 | if item["name"].lower() in config["voice_exclude"]: 26 | continue 27 | elif item["name"].lower() in config["character_mappings"].keys(): 28 | for m in config["character_mappings"][item["name"].lower()]: 29 | voices[m]=item["voice_id"] 30 | elif item["name"].lower() in voiceGender.keys(): 31 | if voiceGender[item["name"].lower()] == "female": 32 | availableFemaleVoices[item["name"].lower()]=item["voice_id"] 33 | else: 34 | availableMaleVoices[item["name"].lower()]=item["voice_id"] 35 | else: 36 | d = gender.Detector(case_sensitive=False) 37 | if detect_gender(item["name"],d): 38 | availableFemaleVoices[item["name"].lower()]=item["voice_id"] 39 | else: 40 | availableMaleVoices[item["name"].lower()]=item["voice_id"] 41 | else: 42 | return 43 | 44 | def detect_gender(text, detector): 45 | genderCheck = detector.get_gender(text.lower().split(" ")[0]) 46 | if "female" in genderCheck: 47 | return True 48 | return False 49 | 50 | def create_dialogue(text, path, ind, config): 51 | curVoice="None" 52 | line=text.split(":",maxsplit=1) 53 | if(len(line)>1): 54 | dialogue=line[1] 55 | if config["voice_type"]=="ElevenLabs": 56 | #Don't voice bracketed text 57 | pattern = r"[\(\[].*?[\)\]]" 58 | dialogue= re.sub(pattern, '', dialogue) 59 | if line[0].lower() in voices: 60 | curvoice=voices[line[0].lower()] 61 | else: 62 | d = gender.Detector(case_sensitive=False) 63 | if detect_gender(line[0],d): 64 | if len(availableFemaleVoices.keys())>0: 65 | key = random.choice(list(availableFemaleVoices.keys())) 66 | curvoice=voices[line[0].lower()]= availableFemaleVoices[key] 67 | availableFemaleVoices.pop(key) 68 | else: 69 | curvoice=voices[line[0].lower()]=random.choice(list(voices.values())) 70 | else: 71 | if len(availableMaleVoices.keys())>0: 72 | key = random.choice(list(availableMaleVoices.keys())) 73 | curvoice=voices[line[0].lower()]= availableMaleVoices[key] 74 | availableMaleVoices.pop(key) 75 | else: 76 | curvoice=voices[line[0].lower()]=random.choice(list(voices.values())) 77 | return generate_voice_ElevenAI(path, ind, dialogue, curvoice, config["elevenlabs_key"]) 78 | else: 79 | return generate_voice_pyttsx3(path, ind, dialogue) 80 | 81 | else: 82 | #Need default if line has no speaker 83 | if config["voice_type"]=="ElevenLabs": 84 | if narratorName.lower() in voices.keys(): 85 | return generate_voice_ElevenAI(path, ind, text, voices[narratorName.lower()], config["elevenlabs_key"]) 86 | else: 87 | key = random.choice(list(availableMaleVoices.keys())) 88 | curvoice=voices[narratorName.lower()]=availableMaleVoices[key] 89 | availableMaleVoices.pop(key) 90 | return generate_voice_ElevenAI(path, ind, text, curvoice, config["elevenlabs_key"]) 91 | else: 92 | return generate_voice_pyttsx3(path, ind, text) 93 | 94 | def generate_voice_pyttsx3(path, ind, dialogue): 95 | engine = pyttsx3.init() 96 | voice_path=path+ "\\audio_" + str(ind) + ".mp3" 97 | engine.save_to_file(dialogue, voice_path) 98 | engine.runAndWait() 99 | engine.stop() 100 | return voice_path 101 | 102 | def generate_voice_ElevenAI(path, ind, dialogue, voice_id, apiKey): 103 | url = 'https://api.elevenlabs.io/v1/text-to-speech/' + voice_id + '/stream' 104 | headers = { 105 | 'accept': 'audio/mpeg', 106 | 'xi-api-key': apiKey, 107 | 'Content-Type': 'application/json' 108 | } 109 | data = { 110 | "text": dialogue, 111 | "voice_settings": { 112 | "stability": 0, 113 | "similarity_boost": 0 114 | } 115 | } 116 | 117 | response = requests.post(url, headers=headers, json=data) 118 | if response.status_code == 200: 119 | voice_path=path + "\\audio_" + str(ind) + ".mp3" 120 | with open(voice_path, 'wb') as f: 121 | f.write(response.content) 122 | return voice_path 123 | else: 124 | print('Error:', response.status_code, response.content.decode()) 125 | return None 126 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Creative Commons Legal Code 2 | 3 | CC0 1.0 Universal 4 | 5 | CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE 6 | LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN 7 | ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS 8 | INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES 9 | REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS 10 | PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM 11 | THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED 12 | HEREUNDER. 13 | 14 | Statement of Purpose 15 | 16 | The laws of most jurisdictions throughout the world automatically confer 17 | exclusive Copyright and Related Rights (defined below) upon the creator 18 | and subsequent owner(s) (each and all, an "owner") of an original work of 19 | authorship and/or a database (each, a "Work"). 20 | 21 | Certain owners wish to permanently relinquish those rights to a Work for 22 | the purpose of contributing to a commons of creative, cultural and 23 | scientific works ("Commons") that the public can reliably and without fear 24 | of later claims of infringement build upon, modify, incorporate in other 25 | works, reuse and redistribute as freely as possible in any form whatsoever 26 | and for any purposes, including without limitation commercial purposes. 27 | These owners may contribute to the Commons to promote the ideal of a free 28 | culture and the further production of creative, cultural and scientific 29 | works, or to gain reputation or greater distribution for their Work in 30 | part through the use and efforts of others. 31 | 32 | For these and/or other purposes and motivations, and without any 33 | expectation of additional consideration or compensation, the person 34 | associating CC0 with a Work (the "Affirmer"), to the extent that he or she 35 | is an owner of Copyright and Related Rights in the Work, voluntarily 36 | elects to apply CC0 to the Work and publicly distribute the Work under its 37 | terms, with knowledge of his or her Copyright and Related Rights in the 38 | Work and the meaning and intended legal effect of CC0 on those rights. 39 | 40 | 1. Copyright and Related Rights. A Work made available under CC0 may be 41 | protected by copyright and related or neighboring rights ("Copyright and 42 | Related Rights"). Copyright and Related Rights include, but are not 43 | limited to, the following: 44 | 45 | i. the right to reproduce, adapt, distribute, perform, display, 46 | communicate, and translate a Work; 47 | ii. moral rights retained by the original author(s) and/or performer(s); 48 | iii. publicity and privacy rights pertaining to a person's image or 49 | likeness depicted in a Work; 50 | iv. rights protecting against unfair competition in regards to a Work, 51 | subject to the limitations in paragraph 4(a), below; 52 | v. rights protecting the extraction, dissemination, use and reuse of data 53 | in a Work; 54 | vi. database rights (such as those arising under Directive 96/9/EC of the 55 | European Parliament and of the Council of 11 March 1996 on the legal 56 | protection of databases, and under any national implementation 57 | thereof, including any amended or successor version of such 58 | directive); and 59 | vii. other similar, equivalent or corresponding rights throughout the 60 | world based on applicable law or treaty, and any national 61 | implementations thereof. 62 | 63 | 2. Waiver. To the greatest extent permitted by, but not in contravention 64 | of, applicable law, Affirmer hereby overtly, fully, permanently, 65 | irrevocably and unconditionally waives, abandons, and surrenders all of 66 | Affirmer's Copyright and Related Rights and associated claims and causes 67 | of action, whether now known or unknown (including existing as well as 68 | future claims and causes of action), in the Work (i) in all territories 69 | worldwide, (ii) for the maximum duration provided by applicable law or 70 | treaty (including future time extensions), (iii) in any current or future 71 | medium and for any number of copies, and (iv) for any purpose whatsoever, 72 | including without limitation commercial, advertising or promotional 73 | purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each 74 | member of the public at large and to the detriment of Affirmer's heirs and 75 | successors, fully intending that such Waiver shall not be subject to 76 | revocation, rescission, cancellation, termination, or any other legal or 77 | equitable action to disrupt the quiet enjoyment of the Work by the public 78 | as contemplated by Affirmer's express Statement of Purpose. 79 | 80 | 3. Public License Fallback. Should any part of the Waiver for any reason 81 | be judged legally invalid or ineffective under applicable law, then the 82 | Waiver shall be preserved to the maximum extent permitted taking into 83 | account Affirmer's express Statement of Purpose. In addition, to the 84 | extent the Waiver is so judged Affirmer hereby grants to each affected 85 | person a royalty-free, non transferable, non sublicensable, non exclusive, 86 | irrevocable and unconditional license to exercise Affirmer's Copyright and 87 | Related Rights in the Work (i) in all territories worldwide, (ii) for the 88 | maximum duration provided by applicable law or treaty (including future 89 | time extensions), (iii) in any current or future medium and for any number 90 | of copies, and (iv) for any purpose whatsoever, including without 91 | limitation commercial, advertising or promotional purposes (the 92 | "License"). The License shall be deemed effective as of the date CC0 was 93 | applied by Affirmer to the Work. Should any part of the License for any 94 | reason be judged legally invalid or ineffective under applicable law, such 95 | partial invalidity or ineffectiveness shall not invalidate the remainder 96 | of the License, and in such case Affirmer hereby affirms that he or she 97 | will not (i) exercise any of his or her remaining Copyright and Related 98 | Rights in the Work or (ii) assert any associated claims and causes of 99 | action with respect to the Work, in either case contrary to Affirmer's 100 | express Statement of Purpose. 101 | 102 | 4. Limitations and Disclaimers. 103 | 104 | a. No trademark or patent rights held by Affirmer are waived, abandoned, 105 | surrendered, licensed or otherwise affected by this document. 106 | b. Affirmer offers the Work as-is and makes no representations or 107 | warranties of any kind concerning the Work, express, implied, 108 | statutory or otherwise, including without limitation warranties of 109 | title, merchantability, fitness for a particular purpose, non 110 | infringement, or the absence of latent or other defects, accuracy, or 111 | the present or absence of errors, whether or not discoverable, all to 112 | the greatest extent permissible under applicable law. 113 | c. Affirmer disclaims responsibility for clearing rights of other persons 114 | that may apply to the Work or any use thereof, including without 115 | limitation any person's Copyright and Related Rights in the Work. 116 | Further, Affirmer disclaims responsibility for obtaining any necessary 117 | consents, permissions or other rights required for any use of the 118 | Work. 119 | d. Affirmer understands and acknowledges that Creative Commons is not a 120 | party to this document and has no duty or obligation with respect to 121 | this CC0 or use of the Work. 122 | --------------------------------------------------------------------------------