├── requirements.txt ├── .isort.cfg ├── requirements-dev.txt ├── .gitignore ├── .flake8 ├── pyproject.toml ├── .editorconfig ├── .pre-commit-config.yaml ├── sample_cli.py ├── video_creator_main.py ├── tester.py ├── cli_interface.py └── README.md /requirements.txt: -------------------------------------------------------------------------------- 1 | sentence-mixing==1.1.3 2 | -------------------------------------------------------------------------------- /.isort.cfg: -------------------------------------------------------------------------------- 1 | [settings] 2 | known_third_party = sentence_mixing -------------------------------------------------------------------------------- /requirements-dev.txt: -------------------------------------------------------------------------------- 1 | -r requirements.txt 2 | pre-commit==2.4.0 3 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .downloads/ 2 | **/__pycache__/ 3 | .env/ 4 | config.json 5 | out.wav 6 | out.mp4 7 | -------------------------------------------------------------------------------- /.flake8: -------------------------------------------------------------------------------- 1 | 2 | [flake8] 3 | ignore = E203, E266, E501, W503, F403, F401 4 | max-line-length = 79 5 | max-complexity = 18 6 | select = B,C,E,F,W,T4,B9 7 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.black] 2 | line-length = 79 3 | include = '\.pyi?$' 4 | exclude = ''' 5 | /( 6 | \.git 7 | | \.env 8 | )/ 9 | ''' 10 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | # https://editorconfig.org/ 2 | 3 | root = true 4 | 5 | [*] 6 | indent_style = space 7 | indent_size = 4 8 | insert_final_newline = true 9 | trim_trailing_whitespace = true 10 | end_of_line = lf 11 | charset = utf-8 12 | 13 | # Docstrings and comments use max_line_length = 88 14 | [*.py] 15 | max_line_length = 88 16 | 17 | # Use 2 spaces for the HTML files 18 | [*.html] 19 | indent_size = 2 20 | 21 | # The JSON files contain newlines inconsistently 22 | [*.json] 23 | indent_size = 4 24 | insert_final_newline = unset 25 | 26 | # YAML Files 27 | [*.{yml,yaml}] 28 | indent_size = 2 29 | 30 | [*.txt] 31 | max_line_length = 79 32 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | fail_fast: true 2 | repos: 3 | # Format code to pattern (Black) 4 | - repo: https://github.com/psf/black 5 | rev: 19.10b0 6 | hooks: 7 | - id: black 8 | language_version: python3 9 | # Checking if large files are present 10 | - repo: https://github.com/pre-commit/pre-commit-hooks 11 | rev: v2.5.0 12 | hooks: 13 | - id: check-added-large-files 14 | # Checking code compliance to pep8 (Flake 8, McCabe) 15 | - repo: https://gitlab.com/pycqa/flake8 16 | rev: 3.7.9 17 | hooks: 18 | - id: flake8 19 | # Sorting imports (isort) 20 | - repo: https://github.com/asottile/seed-isort-config 21 | rev: v2.1.0 22 | hooks: 23 | - id: seed-isort-config 24 | - repo: https://github.com/pre-commit/mirrors-isort 25 | rev: v4.3.21 26 | hooks: 27 | - id: isort -------------------------------------------------------------------------------- /sample_cli.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | import sentence_mixing.logic.parameters as params 4 | import sentence_mixing.sentence_mixer as sm 5 | 6 | DESCRIPTION = "CLI Interface to build a sentence from a video" 7 | 8 | SEED_HELP = f"change the seed used in phonem association's score attribution (default: {params.DEFAULT_SEED})" 9 | TARGET_SENTENCE_HELP = "a sentence you want to hear from the video" 10 | CONFIG_PATH_HELP = "path to the json config file" 11 | VIDEO_URL_HELP = "a YouTube url of the wanted video" 12 | 13 | if __name__ == "__main__": 14 | parser = argparse.ArgumentParser(description=DESCRIPTION) 15 | parser.add_argument( 16 | "-s", "--seed", default=params.DEFAULT_SEED, help=SEED_HELP, 17 | ) 18 | parser.add_argument( 19 | "sentence", 20 | metavar="TARGET_SENTENCE", 21 | action="store", 22 | help=TARGET_SENTENCE_HELP, 23 | ) 24 | parser.add_argument( 25 | "config_path", 26 | metavar="CONFIG_PATH", 27 | action="store", 28 | help=CONFIG_PATH_HELP, 29 | ) 30 | parser.add_argument( 31 | "video_urls", 32 | metavar="VIDEO_URL", 33 | nargs="+", 34 | action="store", 35 | help=VIDEO_URL_HELP, 36 | ) 37 | 38 | args = parser.parse_args() 39 | 40 | sm.prepare_sm_config_file(args.config_path) 41 | videos = sm.get_videos(args.video_urls) 42 | print(sm.process_sm(args.sentence, videos, args.seed)[0]) 43 | -------------------------------------------------------------------------------- /video_creator_main.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import argparse 4 | import concurrent.futures 5 | 6 | import sentence_mixing.sentence_mixer as sm 7 | from sentence_mixing.video_creator.download import dl_video 8 | from sentence_mixing.video_creator.video import create_video_file 9 | 10 | from cli_interface import loop_interface 11 | 12 | VIDEO_OUT = "out.mp4" 13 | 14 | 15 | def main(audio_command, config_path, skip_first, urls, seed=0): 16 | sm.prepare_sm_config_file(config_path) 17 | 18 | with concurrent.futures.ThreadPoolExecutor() as executor: 19 | futures_vids = executor.map(dl_video, urls) 20 | futures_vids_audio = executor.map(sm.get_videos, [urls], [seed]) 21 | 22 | total_timestamps, total_text, videos = loop_interface( 23 | audio_command, futures_vids_audio 24 | ) 25 | 26 | paths = list(futures_vids) 27 | for v, p in zip(videos, paths): 28 | n = len(v._base_path) 29 | assert p[:n] == v._base_path 30 | v.extension = p[n + 1 :] 31 | 32 | create_video_file(total_timestamps, VIDEO_OUT) 33 | 34 | return total_text 35 | 36 | 37 | DEFAULT_AUDIO_COMMAND = 'tycat "{}"' 38 | 39 | DESCRIPTION = "CLI Interface to create sentence mixing videos." 40 | 41 | AUDIO_COMMAND_HELP = f"a command to launch a playback of an audio file passed as a format parameter (default: {DEFAULT_AUDIO_COMMAND})" 42 | CONFIG_PATH_HELP = "path to the json config file" 43 | VIDEO_URL_HELP = "a YouTube url of the wanted video" 44 | SKIP_ANALYSIS_HELP = "tell the generator to skip the analysis (default: false)" 45 | 46 | if __name__ == "__main__": 47 | 48 | parser = argparse.ArgumentParser(description=DESCRIPTION) 49 | parser.add_argument( 50 | "-c", 51 | "--audio-command", 52 | default=DEFAULT_AUDIO_COMMAND, 53 | help=AUDIO_COMMAND_HELP, 54 | ) 55 | parser.add_argument( 56 | "config_path", 57 | metavar="CONFIG_PATH", 58 | action="store", 59 | help=CONFIG_PATH_HELP, 60 | ) 61 | parser.add_argument( 62 | "video_urls", 63 | metavar="VIDEO_URL", 64 | nargs="+", 65 | action="store", 66 | help=VIDEO_URL_HELP, 67 | ) 68 | parser.add_argument( 69 | "-s", 70 | "--skip", 71 | dest="skip_first_analysis", 72 | action="store_true", 73 | default=False, 74 | help=SKIP_ANALYSIS_HELP, 75 | ) 76 | 77 | args = parser.parse_args() 78 | 79 | print( 80 | main( 81 | args.audio_command, 82 | args.config_path, 83 | args.skip_first_analysis, 84 | args.video_urls, 85 | ) 86 | ) 87 | -------------------------------------------------------------------------------- /tester.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | 4 | import sentence_mixing.logic.parameters as params 5 | import sentence_mixing.logic.video_processing 6 | import sentence_mixing.sentence_mixer as sm 7 | from sentence_mixing.logic.display import combo_displayer 8 | from sentence_mixing.serialize import load, save 9 | 10 | import cli_interface 11 | 12 | 13 | def preprocess_and_align(video_urls): 14 | """ 15 | Build all the model objects for several video urls by downloading videos and analysing the 16 | videos using Montreal aligner. 17 | 18 | Argument: 19 | video_urls - list containing youtube videos url 20 | 21 | Returns a list containing all the video objects 22 | """ 23 | 24 | # Creates basic Video objects 25 | videos = sentence_mixing.logic.video_processing._create_videos(video_urls) 26 | 27 | # Enriches Video objects with SubtitleLine objects 28 | sentence_mixing.logic.video_processing._create_subs(videos) 29 | 30 | # Enriches Video objects with AudioWords and AudioPhonem objects 31 | for video in videos: 32 | for i, subtitle in enumerate(video.subtitles): 33 | textgrid_path = os.path.join( 34 | out_dir, video.get_hashed_basename() + str(i) + ".TextGrid" 35 | ) 36 | sentence_mixing.logic.video_processing._parse_align_result( 37 | textgrid_path, subtitle 38 | ) 39 | 40 | return videos 41 | 42 | 43 | DESCRIPTION = "CLI Interface to build a sentence from a video" 44 | 45 | SEED_HELP = f"change the seed used in phonem association's score attribution (default: {params.DEFAULT_SEED})" 46 | TARGET_SENTENCE_HELP = "a sentence you want to hear from the video" 47 | CONFIG_PATH_HELP = "path to the json config file" 48 | VIDEO_URL_HELP = "a YouTube url of the wanted video" 49 | 50 | out_dir = None 51 | 52 | if __name__ == "__main__": 53 | parser = argparse.ArgumentParser() 54 | parser.add_argument( 55 | "-s", "--seed", default=params.DEFAULT_SEED, help=SEED_HELP, 56 | ) 57 | parser.add_argument( 58 | "out_dir", action="store", help="textgrids directory", 59 | ) 60 | parser.add_argument( 61 | "sentence", 62 | metavar="TARGET_SENTENCE", 63 | action="store", 64 | help=TARGET_SENTENCE_HELP, 65 | ) 66 | parser.add_argument( 67 | "config_path", 68 | metavar="CONFIG_PATH", 69 | action="store", 70 | help=CONFIG_PATH_HELP, 71 | ) 72 | parser.add_argument( 73 | "video_urls", 74 | metavar="VIDEO_URL", 75 | nargs="+", 76 | action="store", 77 | help=VIDEO_URL_HELP, 78 | ) 79 | 80 | args = parser.parse_args() 81 | out_dir = args.out_dir 82 | 83 | sm.prepare_sm_config_file(args.config_path) 84 | videos = sm.get_videos(args.video_urls) 85 | combos = sm.process_sm(args.sentence, videos, args.seed) 86 | 87 | print(combo_displayer(combos[0])) 88 | sentence_mixing.video_creator.audio.concat_wav( 89 | cli_interface.AUDIO_FILE_PATH, combos[0].get_audio_phonems() 90 | ) 91 | -------------------------------------------------------------------------------- /cli_interface.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import sentence_mixing.sentence_mixer as sm 4 | from sentence_mixing.logic.display import combo_displayer 5 | from sentence_mixing.model.exceptions import PhonemError, TokenAmbiguityError 6 | from sentence_mixing.serialize import load, save 7 | from sentence_mixing.video_creator.audio import concat_wav 8 | 9 | # Allows command history 10 | try: 11 | READLINE_IMPORTED = True 12 | import readline 13 | readline.set_auto_history(False) 14 | except: 15 | # If python have been compiled without readline 16 | READLINE_IMPORTED = False 17 | 18 | AUDIO_FILE_PATH = "out.wav" 19 | 20 | 21 | def clear_screen(): 22 | os.system("cls" if os.name == "nt" else "clear") 23 | 24 | 25 | def get_sentence(text): 26 | if text is not None: 27 | print("Previous sentences:\n", text) 28 | sentence = input("Enter a sentence: ") 29 | 30 | if READLINE_IMPORTED: 31 | readline.add_history(sentence) 32 | 33 | return sentence 34 | 35 | 36 | def loop_interface(audio_command, video_futures): 37 | total_timestamps = [] 38 | total_text = "" 39 | timestamps_buffer = [] 40 | timestamps_buffer_sentence = [] 41 | 42 | sentence = get_sentence(None) 43 | videos = None 44 | 45 | while sentence != "": 46 | timestamps = [] 47 | combo = None 48 | available_combos = [] 49 | 50 | edit = False 51 | store = False 52 | valid = False 53 | load_audio_index = None 54 | i = 0 55 | while not valid and not edit: 56 | 57 | # Stores previous audio in buffer 58 | if store: 59 | timestamps_buffer.append(timestamps) 60 | timestamps_buffer_sentence.append(sentence) 61 | store = False 62 | 63 | if load_audio_index is not None: 64 | timestamps = timestamps_buffer[load_audio_index] 65 | load_audio_index = None 66 | else: 67 | if len(available_combos) == 0: 68 | bad_sentence = True 69 | while bad_sentence: 70 | try: 71 | if videos is None: 72 | print("downloading...") 73 | videos = list(video_futures)[0] 74 | available_combos = sm.process_sm(sentence, videos) 75 | bad_sentence = False 76 | except KeyError as e: 77 | print(e, "not recognized") 78 | sentence = get_sentence(total_text) 79 | except PhonemError as e: 80 | print( 81 | e, 82 | "Try to change your sentence or add more videos.", 83 | ) 84 | sentence = get_sentence(total_text) 85 | except TokenAmbiguityError as e: 86 | print( 87 | e, "Please change this word", 88 | ) 89 | sentence = get_sentence(total_text) 90 | combo = available_combos.pop(0) 91 | timestamps = combo.get_audio_phonems() 92 | 93 | print(combo_displayer(combo)) 94 | concat_wav(AUDIO_FILE_PATH, timestamps) 95 | 96 | os.system(audio_command.format(AUDIO_FILE_PATH)) 97 | 98 | if timestamps_buffer_sentence: 99 | print("Stashed audios:") 100 | for i, stashed_sentence in enumerate( 101 | timestamps_buffer_sentence 102 | ): 103 | print(i, ".", stashed_sentence) 104 | print("") 105 | 106 | line = input( 107 | "Enter 'y' to validate, 'e' to edit the sentence, 's' to store this audio in the buffer, 'l' + index for loading previously stored audio, otherwise just press enter: " 108 | ) 109 | valid = line == "y" 110 | edit = line == "e" 111 | store = line == "s" 112 | 113 | if line.startswith("l "): 114 | index = line.split(" ")[1] 115 | if index.isdigit(): 116 | index = int(index) 117 | if -1 < index < len(timestamps_buffer): 118 | load_audio_index = index 119 | print(load_audio_index) 120 | 121 | i += 1 122 | clear_screen() 123 | if not edit: 124 | total_timestamps.extend(timestamps) 125 | total_text += "\n" + sentence 126 | 127 | save(total_timestamps, total_text, name="video.json") 128 | sentence = get_sentence(total_text) 129 | clear_screen() 130 | return total_timestamps, total_text, videos 131 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Sentence Mixing generator - command line interface 2 | 3 | This program uses the [Sentence Mixing library](http://github.com/pop123123123/sentence-mixing) to **generate** sentence mixing styled **Youtube Poops**. 4 | 5 | You can see plenty of examples of application results [here](https://www.youtube.com/channel/UCwuz-2Hjya-RSD_7rdg3OJg) (French language) 6 | 7 | You can get it via the release tab or install it manually. 8 | 9 | ## Installation 10 | 11 | Here is a [tutorial video](https://www.youtube.com/watch?v=JL6l4rNN3tU) showing both installation and usage of the project ([French version](https://www.youtube.com/watch?v=emhjAkYXr-8)). 12 | 13 | ### Release 14 | 15 | 1. Choose a [release version](https://github.com/pop123123123/CLI_sentence_mixing/releases) 16 | 2. Uncompress the release archive 17 | 18 | #### Language support 19 | 20 | The default language of releases is French. You can however use another language among the ones proposed in ```aligner/SM-Dictionaries```. 21 | 22 | All you have to do is to download the appropriate [MFA pretrained model](https://montreal-forced-aligner.readthedocs.io/en/v1.0/pretrained_models.html#pretrained-acoustic-models) and to update ```config.json``` in the release's root directory. 23 | 24 | ##### Example (English): 25 | 1. Download the [English accoustic model](https://github.com/MontrealCorpusTools/mfa-models/releases/download/acoustic-archive-v1.0/english.zip) and put it in ```aligner/pretrained_models``` 26 | 2. In ```config.json```, change: 27 | 1. ```lang``` value to ```en``` 28 | 2. ```trained_model``` value to ```aligner/pretrained_models/english.zip``` 29 | 3. ```dict_path``` value to ```aligner/SM-Dictionaries/en/english.dict``` 30 | 4. ```dict_consonant_vowel_path``` value to ```aligner/SM-Dictionaries/en/en_consonant_vowel.dict``` 31 | 32 | (For Windows release, replace ```/``` characters by ```\\``` characters). 33 | 34 | ### From source 35 | 36 | 1. Install requirements with ```pip install -r requirements.txt``` 37 | 2. Set up the external dependencies by following the tutorial in [SM library github page](https://github.com/pop123123123/sentence-mixing) 38 | 39 | #### Sanity check 40 | 41 | Use the file ```sample_cli.py```, a minimal example for the sentence mixing library. 42 | If no error pops, the Sentence Mixing library and the external dependencies are correctly installed. 43 | 44 | Example: 45 | ```python sample_cli.py "Bonjour" ../config.json https://www.youtube.com/watch?v=4czmCgJryUM``` 46 | 47 | ## Program usage 48 | 49 | Here is a [tutorial video](https://www.youtube.com/watch?v=JL6l4rNN3tU) showing both installation and usage of the project ([French version](https://www.youtube.com/watch?v=emhjAkYXr-8)). 50 | The following sections will explain you how to run and interact with the CLI Youtube Poop generation program. 51 | 52 | ### Running the script 53 | 54 | The file to run may vary: 55 | * **Linux** release: ```SentenceMixerCLI``` 56 | * **Windows** release: ```SentenceMixerCLI.exe``` 57 | * **From source:** ```video_creator_main.py``` 58 | 59 | Run the executable with your json configuration file as first argument, and as many Youtube URLs as you want. 60 | 61 | Examples: 62 | * **Linux:** ```./SentenceMixerCLI config.json 'https://www.youtube.com/watch?v=udmDOaHN9no' 'https://www.youtube.com/watch?v=7CTH59GCNko'``` 63 | * **Windows:** ```.\SentenceMixerCLI.exe config.json 'https://www.youtube.com/watch?v=udmDOaHN9no' 'https://www.youtube.com/watch?v=7CTH59GCNko'``` 64 | * **From source:** ```python video_creator_main.py config.json 'https://www.youtube.com/watch?v=udmDOaHN9no' 'https://www.youtube.com/watch?v=7CTH59GCNko'``` 65 | 66 | For windows, the program might be blocked by your antivirus, be aware of this possibility. 67 | 68 | #### Audio feedback 69 | 70 | By default, the ```video_creator_main.py``` script uses the ```tycat``` command, available on [Terminology](https://github.com/borisfaure/terminology) terminal emulator to play the audio feedback while building the video. 71 | 72 | If you want to use any other program, give it to argument with -c. Replace the filename in the command by ```{}```. 73 | 74 | Example: 75 | ```python video_creator_main.py -c "vlc --play-and-exit --no-repeat --no-loop {}" config.json 'https://www.youtube.com/watch?v=udmDOaHN9no'``` 76 | 77 | ##### For Windows release 78 | 79 | You will face a problem if the command you want to use contains a space in its path. 80 | 81 | Here is a simple trick to work around the problem. Let's say we want to use ```vlc``` as audio feedback command. 82 | Create ```feedback.bat``` with the following content: 83 | ```bat 84 | "C:\Program Files\VideoLAN\VLC\vlc.exe" --play-and-exit --no-repeat --no-loop %* 85 | ``` 86 | (Path to vlc might change depending of your setup) 87 | 88 | Then, call the program with the followin ```-c``` parameter: 89 | ```.\SentenceMixerCLI.exe config.json 'https://www.youtube.com/watch?v=VTaoMHjnRbM' -c ".\feeback.bat {}"``` 90 | 91 | 92 | ### Interactive process 93 | 94 | The audio sequence creation process works as follows: 95 | 1. Enter the chunk of the sentence you want to hear 96 | 2. You will hear an audio feedback from the chosen combo. There you can press: 97 | * ```ENTER``` if you are not satisfied with current combo for this chunk and want another phonemes 98 | * ```e``` to change the written chunk 99 | * ```s``` to store the current combo in a buffer, so you can browse other combos without fearing to lose this one 100 | * ```l + number``` to load a previously stored combo 101 | * ```y``` to accept the current combo and go to next chunk 102 | 3. Loop to 1. until you enter an empty chunk 103 | 4. The full video is formed from the association of all the selected combo 104 | 105 | ### Tips 106 | 107 | * Punctuation symbols count as blank phonemes. It can be very useful to add pause in your sentences. 108 | * Do not hesitate to put a lot of Youtube URLs in argument. The more the video material, the more choice you will have to form the sentences you want. 109 | * An already downloaded video will not be downloaded again. So, do not hesitate then to download long videos to boost accuracy. 110 | * Advance little by little. There is no big difference in accuracy between long and short chunks, so, it is generally preferable to write short chunks (+/- 1 word by chunk) to have more control of how each word/syllabus sound. For example, it is probably better to split sentence *"Salut les gens"* in three chunks "*Salut*", "*les*", "*gens"* than in one big chunk "*Salut les gens*". 111 | * For some long words, it can **sometimes** be interesting to split it in little chunks. For example, you can split the word *Compagnie* in three chunks *"Con"*, "*Pas*", *"Nie"*. 112 | * If a word you want to hear can be directly heard in one of the source video (same word or homophone), don't split it in chunks, it will directly been taken from the source video. For example, if the word "Camp" is present in source subtitles, words "Camp", "Quand", "Quant" will automatically associate to the original "Camp". 113 | * Related to previous point, try to use a maximum of words you can find in the original video. The result will be way more accurate and understandable. 114 | 115 | ### Restrictions and workarounds 116 | 117 | #### Subtitles free videos 118 | 119 | Only subtitled youtube videos are usable. Automatically generated youtube subtitles work as well. 120 | If you really want to use a non subtitled video, you will have to write the subtitles yourself. 121 | 122 | #### Phoneme not found 123 | 124 | If your source video corpus is very short or contains very few words, it might be possible that some phonemes are not prounounced at all through the whole video material. 125 | In that case, you will not be able to use these particular phonemes. 126 | The problem can be easily solved by adding video material to the corpus, or not using these missing phonemes. 127 | 128 | #### Unrecognized word 129 | 130 | You can only enter a word that already exists in the dictionnary. If you want to hear an external word, you have two solutions: 131 | 1. Add the word in the dictionnary 132 | 2. Split the word in sub-dictionnary-registered words 133 | 134 | For example, the word "Bolos" can be either: 135 | 1. Added in the dictionnary by adding the line "BOLOS b o l o s" 136 | 2. Decomposed in three valid words "Beau" "l'eau" "se" 137 | 138 | #### Ambiguity intolerance 139 | 140 | Unfortunately, the system doesn't tolerates ambiguous words for the moment. 141 | For exemple, word "Est" can be pronounced: 142 | 1. With phoneme "e" (to be) 143 | 2. With phonemes "E s t" (east) 144 | 145 | Thus, it is for the moment forbidden to write down word "Est". Waiting for a disambiguation feature, just **change the word for an homophone**. 146 | 147 | For word "Est", if you want to hear phoneme "e", you can use word "Et" ; if you want to hear phonemes "e s t", you can use the sequence of words "S" "te" (not perfect coverage of phonemes in this case however). 148 | 149 | #### Unknown encoding idna 150 | 151 | This error is particulary serious and randomly shows when using an outdated version of youtube-dl. 152 | If you have installed the application from source, you can simply update youtube-dl via pip. 153 | There is no solution for the moment for release versions. The only thing you can do is to install the program from source. 154 | We are looking for a solution concerning this problem . 155 | 156 | #### Unsupported languages 157 | 158 | Please refer to [this section](https://github.com/pop123123123/sentence-mixing/blob/master/README.md#add-an-unsupported-language) of the Sentence Mixing library documentation. 159 | 160 | ## Building project 161 | 162 | You can make your own release of the program in order to use it freely without dependancies. 163 | Everything concerning this feature is explained in folder ```builder```. 164 | --------------------------------------------------------------------------------