├── pages ├── __init__.py ├── about.py ├── terms.py ├── privacy_policy.py └── home.py ├── robots.txt ├── assets ├── favicon.ico ├── buyMeACoffee.png ├── footer.css ├── audio.py ├── message_correction.py ├── chat_request.py └── app.css ├── README.md ├── requirements.txt ├── callbacks ├── tooltips.py ├── placeholder_text.py ├── translate.py ├── conversation_settings.py └── display_components.py ├── Dockerfile ├── sitemap.xml ├── footer.py ├── .gitignore └── app.py /pages/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /robots.txt: -------------------------------------------------------------------------------- 1 | User-agent: * 2 | Sitemap: https://practicealanguage.xyz/sitemap.xml 3 | -------------------------------------------------------------------------------- /assets/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Currie32/practice-a-language/HEAD/assets/favicon.ico -------------------------------------------------------------------------------- /assets/buyMeACoffee.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Currie32/practice-a-language/HEAD/assets/buyMeACoffee.png -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # practice-a-language 2 | 3 | Practice speaking a language in different settings at [https://practicealanguage.xyz](https://practicealanguage.xyz) 4 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | dash==2.12.1 2 | dash-bootstrap-components==1.4.2 3 | dash-daq==0.5.0 4 | dash-selectable==0.0.1 5 | deep-translator==1.11.4 6 | Flask==2.2.5 7 | gTTS==2.3.2 8 | langdetect==1.0.9 9 | openai==1.12.0 10 | orjson==3.9.5 11 | pydub==0.25.1 12 | tenacity==8.2.2 13 | -------------------------------------------------------------------------------- /callbacks/tooltips.py: -------------------------------------------------------------------------------- 1 | from dash import Input, Output, callback 2 | 3 | 4 | @callback( 5 | Output("tooltip-translate-language-known", "children"), 6 | Input("language-known", "value"), 7 | Input("language-learn", "value"), 8 | ) 9 | def tooltip_translate_language_known_text( 10 | language_known: str, language_learn: str 11 | ) -> str: 12 | """ 13 | The tooltip text for the translate-language-known icon. 14 | 15 | Params: 16 | language_known: The language that the user speaks. 17 | language_learn: The language that the user wants to learn. 18 | 19 | Returns: 20 | The text for the tooltip. 21 | """ 22 | 23 | return f"If you type your response in {language_known}, it will automatically be translated to {language_learn}." 24 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Use the official Python base image 2 | FROM python:3.11-slim 3 | 4 | # Install ffmpeg 5 | RUN apt-get update && apt-get install -y ffmpeg 6 | 7 | # Set the working directory in the container 8 | WORKDIR /app 9 | 10 | # Expose port 8080 for running the website 11 | EXPOSE 8080 12 | 13 | # Copy the required files 14 | COPY app.py footer.py requirements.txt robots.txt sitemap.xml ./ 15 | COPY assets assets 16 | COPY callbacks callbacks 17 | COPY pages pages 18 | 19 | # Create a virtual environment and activate it 20 | RUN python -m venv venv 21 | ENV PATH="/app/venv/bin:$PATH" 22 | 23 | # Install dependencies 24 | RUN pip3 install --no-cache-dir -r requirements.txt 25 | 26 | # Specify the command to run the app 27 | CMD ["python3", "-m", "flask", "run", "--host=0.0.0.0", "--port=8080"] 28 | -------------------------------------------------------------------------------- /callbacks/placeholder_text.py: -------------------------------------------------------------------------------- 1 | from dash import Input, Output, callback 2 | 3 | 4 | @callback( 5 | Output("user-response-text", "placeholder"), 6 | Input("language-known", "value"), 7 | Input("language-learn", "value"), 8 | ) 9 | def user_input_placeholder(language_known: str, language_learn: str) -> str: 10 | """ 11 | Set the placeholder text for the user response Input field. 12 | 13 | Params: 14 | language_known: The language that the user speaks. 15 | language_learn: The language that the user wants to learn. 16 | 17 | Returns: 18 | Placeholder text for the user reponse Input field. 19 | """ 20 | 21 | if language_known and language_learn: 22 | return f"Type your response in {language_learn} or {language_known}" 23 | else: 24 | return "Type your response" 25 | -------------------------------------------------------------------------------- /assets/footer.css: -------------------------------------------------------------------------------- 1 | #buy-me-a-coffee-logo { 2 | max-width: 100px; 3 | margin-top: -3px; 4 | } 5 | #email { 6 | text-align: center; 7 | } 8 | #footer { 9 | border-top: 1px solid #cccccc; 10 | display: flex; 11 | font-size: 13px; 12 | justify-content: center; 13 | margin: 20px auto 5px; 14 | width: 100%; 15 | } 16 | #footer a { 17 | margin: 23px 15px 10px; 18 | } 19 | #footer p { 20 | margin: 25px 15px 10px; 21 | } 22 | .footer-pipe { 23 | color: #cccccc; 24 | margin-top: 25px; 25 | } 26 | @media (max-width: 800px) { 27 | #buy-me-a-coffee-logo { 28 | margin-top: 0px; 29 | } 30 | #footer { 31 | display: block; 32 | padding: 20px; 33 | } 34 | #footer a, #footer p { 35 | display: block; 36 | margin: 0px auto 10px; 37 | } 38 | .footer-pipe { 39 | display: none; 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /sitemap.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | https://practicealanguage.xyz/ 5 | 2024-07-29 6 | monthly 7 | 1.0 8 | 9 | 10 | https://practicealanguage.xyz/about 11 | 2024-02-20 12 | yearly 13 | 0.5 14 | 15 | 16 | https://practicealanguage.xyz/terms 17 | 2023-10-14 18 | yearly 19 | 0.1 20 | 21 | 22 | https://practicealanguage.xyz/privacy_policy 23 | 2023-10-14 24 | yearly 25 | 0.1 26 | 27 | 28 | -------------------------------------------------------------------------------- /footer.py: -------------------------------------------------------------------------------- 1 | from dash import dcc, html 2 | 3 | 4 | footer = html.Div(id='footer', children=[ 5 | html.P("Practice a Language. All rights reserved."), 6 | html.Div("|", className="footer-pipe"), 7 | dcc.Link("About", href="/about"), 8 | html.Div("|", className="footer-pipe"), 9 | html.A("We're open source!", target="_blank", href="https://github.com/Currie32/practice-a-language"), 10 | html.Div("|", className="footer-pipe"), 11 | html.A( 12 | html.Img(src='assets/buyMeACoffee.png', alt='Link to Currie32 Buy me a Coffee page.', id="buy-me-a-coffee-logo"), 13 | target="_blank", 14 | href="https://www.buymeacoffee.com/Currie32", 15 | ), 16 | html.Div("|", className="footer-pipe"), 17 | html.P("david.currie32@gmail.com"), 18 | html.Div("|", className="footer-pipe"), 19 | dcc.Link("Terms", href="/terms"), 20 | html.Div("|", className="footer-pipe"), 21 | dcc.Link("Privacy Policy", href="/privacy_policy"), 22 | ]) 23 | -------------------------------------------------------------------------------- /assets/audio.py: -------------------------------------------------------------------------------- 1 | import base64 2 | 3 | from gtts import gTTS 4 | from pydub import AudioSegment 5 | 6 | 7 | def get_audio_file(text: str, language: str, playback_speed: float) -> str: 8 | """ 9 | Create and return an mp3 file that contains the audio 10 | for a message to be played in the desired language's accent. 11 | 12 | Params: 13 | text: The text for the audio 14 | language: The language for the accent of the audio 15 | 16 | Returns: 17 | A path to the mp3 file 18 | """ 19 | 20 | # Perform text-to-speech conversion 21 | tts = gTTS(text, lang=language) 22 | audio_path = "temp_audio.mp3" 23 | tts.save(audio_path) 24 | 25 | # Create a new audio segment with adjusted speed 26 | audio = AudioSegment.from_file(audio_path) 27 | playback_speed = 1 + (playback_speed / 100) 28 | adjusted_audio = audio.speedup(playback_speed=playback_speed) 29 | 30 | # Save the adjusted audio to a new file 31 | adjusted_audio_file = f"adjusted_audio.mp3" 32 | adjusted_audio.export(adjusted_audio_file, format="mp3") 33 | 34 | with open(adjusted_audio_file, "rb") as audio_file: 35 | audio_data = audio_file.read() 36 | audio_base64 = base64.b64encode(audio_data).decode("utf-8") 37 | audio_src = f"data:audio/mpeg;base64,{audio_base64}" 38 | 39 | return audio_src 40 | -------------------------------------------------------------------------------- /callbacks/translate.py: -------------------------------------------------------------------------------- 1 | from dash import Input, Output, State, callback 2 | from deep_translator import GoogleTranslator 3 | from gtts import lang 4 | 5 | LANGUAGES_DICT = {name: abbreviation for abbreviation, name in lang.tts_langs().items()} 6 | 7 | 8 | @callback( 9 | Output("translation", "children"), 10 | Input("conversation", "selectedValue"), 11 | State("language-known", "value"), 12 | State("language-learn", "value"), 13 | ) 14 | def translate_highlighted_text( 15 | text_to_translate: str, language_known: str, language_learn: str 16 | ) -> str: 17 | """ 18 | Translate any highlighted text from the language the user wants to learn 19 | to the language the user knows. 20 | 21 | Params: 22 | text_to_translate: the highlighted text that will be translated. 23 | language_known: The language that the user speaks. 24 | language_learn: The language that the user wants to learn. 25 | 26 | Returns: 27 | A translation of the highlighted text. 28 | """ 29 | 30 | translation = "" 31 | if text_to_translate: 32 | language_learn_abbreviation = LANGUAGES_DICT[language_learn] 33 | language_known_abbreviation = LANGUAGES_DICT[language_known] 34 | translator = GoogleTranslator( 35 | source=language_learn_abbreviation, target=language_known_abbreviation 36 | ) 37 | translation = translator.translate(text_to_translate) 38 | translation = f"Translation: {translation}" 39 | 40 | return translation 41 | -------------------------------------------------------------------------------- /assets/message_correction.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from openai import OpenAI 4 | from tenacity import retry, stop_after_attempt, wait_random_exponential 5 | 6 | client = OpenAI(api_key=os.environ.get("OPENAI_KEY")) 7 | 8 | 9 | def get_corrected_message(message: str, language_learn: str) -> str: 10 | """ 11 | Get and process the assistant's (OpenAI's model) message to continue the conversation. 12 | 13 | Params: 14 | message: The message from the assistant. 15 | language_learn: The language that the user wants to learn. 16 | 17 | Returns: 18 | The corrected message from the assistant. 19 | """ 20 | 21 | message_corrected = _chat_completion_request(message, language_learn) 22 | if message_corrected != message: 23 | return message_corrected 24 | 25 | 26 | @retry(wait=wait_random_exponential(multiplier=1, max=40), stop=stop_after_attempt(3)) 27 | def _chat_completion_request(message: str, language_learn: str) -> str: 28 | """ 29 | Request a response to the user's statement from one of OpenAI's chat models. 30 | 31 | Params: 32 | messages: The conversation history between the user and the chat model. 33 | language_learn: The language that the user wants to learn. 34 | 35 | Returns: 36 | The corrected message from OpenAI's model. 37 | """ 38 | 39 | try: 40 | content = f"You are an excellent {language_learn} teacher. Correct this sentence for any mistakes:\n{message}" 41 | completion = client.chat.completions.create( 42 | model="gpt-3.5-turbo", messages=[{"role": "system", "content": content}] 43 | ) 44 | return completion.choices[0].message.content 45 | except Exception as e: 46 | return e 47 | -------------------------------------------------------------------------------- /pages/about.py: -------------------------------------------------------------------------------- 1 | from dash import html, register_page 2 | 3 | 4 | register_page(__name__, path="/about") 5 | 6 | meta_tags = [ 7 | { 8 | "name": "description", 9 | "content": "Practice A Language - Learn and practice languages through conversations.", 10 | }, 11 | ] 12 | 13 | layout = html.Div( 14 | id="content", 15 | children=[ 16 | html.H1("About Practice a Language"), 17 | html.P( 18 | "Welcome to Practice A Language, a website to help you practice a language by having conversations. This website started from wanting to make it easier to learn a language before going on trips abroad. I became annoyed with the over-repetition of apps like Duolingo and losing track of how many times I translated “Juan come manzanas”." 19 | ), 20 | html.H2("Learn what you want faster"), 21 | html.P( 22 | "Unlike other tools that force you to learn according to their lesson plans, you can practice the conversation topics and phrases that you want, whenever you want. This control should help you to be ready for your next trip abroad much faster." 23 | ), 24 | html.H2("Practice at your level"), 25 | html.P( 26 | "You chat in either the language you’re learning or your native language. This allows experienced speakers to practice their vocabulary and grammar, while beginners can write in their native language and it will automatically be translated into the language they are learning." 27 | ), 28 | html.H2("Practice writing and speaking"), 29 | html.P( 30 | "You have the choice to practice your new language by either writing your response or recording your voice. If you record your voice, it will be transcribed so that you can see what was understood. If you want to make a change, then you can edit the text or rerecord yourself." 31 | ), 32 | html.H2("Learn from your mistakes"), 33 | html.P( 34 | "When speaking or writing in your new language, your responses are always analyzed for mistakes and will be automatically corrected. This quick feedback will help you to learn more from each conversation." 35 | ), 36 | ], 37 | ) 38 | -------------------------------------------------------------------------------- /callbacks/conversation_settings.py: -------------------------------------------------------------------------------- 1 | from typing import Tuple 2 | 3 | from dash import Input, Output, State, callback, callback_context 4 | 5 | 6 | @callback( 7 | Output("button-start-conversation", "disabled"), 8 | Input("language-known", "value"), 9 | Input("language-learn", "value"), 10 | Input("conversation-setting", "value"), 11 | State("conversation-setting-custom", "value"), 12 | ) 13 | def start_conversation_button_disabled( 14 | language_known: str, 15 | language_learn: str, 16 | conversation_setting: str, 17 | conversation_setting_custom: str, 18 | ) -> bool: 19 | """ 20 | Whether to disable the start conversation button based on the values in the required fields. 21 | 22 | Params: 23 | language_known: The language that the user speaks. 24 | language_learn: The language that the user wants to learn. 25 | conversation_setting: A conversation setting provided from the dropdown menu. 26 | conversation_setting_custom: A custom conversation setting provided by the user. 27 | 28 | Returns: 29 | True if the conversation button should be disabled, otherwise False. 30 | """ 31 | has_two_languages = (language_known is not None) & (language_learn is not None) 32 | has_different_languages = language_known != language_learn 33 | 34 | has_setting = ( 35 | (conversation_setting == "other") & (conversation_setting_custom is not None) 36 | ) | ((conversation_setting != "other") & (conversation_setting is not None)) 37 | 38 | return not (has_two_languages & has_different_languages & has_setting) 39 | 40 | 41 | @callback( 42 | Output("conversation-setting", "value"), 43 | Output("conversation-setting-custom", "value"), 44 | Input("conversation-setting", "value"), 45 | Input("conversation-setting-custom", "value"), 46 | ) 47 | def update_conversation_setting_values( 48 | conversation_setting: str, 49 | conversation_setting_custom: str, 50 | ) -> Tuple[str, str]: 51 | """ 52 | Update the value of a conversation setting based on the new value 53 | for the other setting. 54 | 55 | Params: 56 | conversation_setting: A conversation setting provided from the dropdown menu. 57 | conversation_setting_custom: A custom conversation setting provided by the user. 58 | 59 | Returns: 60 | The updated values for conversation_setting and conversation_setting_custom. 61 | """ 62 | 63 | # Determine which input triggered the callback 64 | triggered_input_id = callback_context.triggered[0]["prop_id"].split(".")[0] 65 | 66 | # Reset conversation_setting_custom when conversation_setting changes to something other than 'other' 67 | if triggered_input_id == "conversation-setting": 68 | if conversation_setting != "other": 69 | conversation_setting_custom = "" 70 | 71 | # If a value is provided for conversation_setting_custom, change conversation_setting to 'other' 72 | elif triggered_input_id == "conversation-setting-custom": 73 | if conversation_setting_custom: 74 | conversation_setting = "other" 75 | 76 | return conversation_setting, conversation_setting_custom 77 | -------------------------------------------------------------------------------- /pages/terms.py: -------------------------------------------------------------------------------- 1 | from dash import html, register_page 2 | 3 | 4 | register_page(__name__, path="/terms") 5 | 6 | layout = html.Div( 7 | id="content", 8 | children=[ 9 | html.H3("TERMS AND CONDITIONS"), 10 | html.P( 11 | 'These terms and conditions (the "Terms and Conditions") govern the user of www.practicealanguage.xyz (the "Site"). This Site is owned and operated by David Currie Software Development Ltd.. This Site is for helps its users to practice a language by writing and speaking.' 12 | ), 13 | html.P( 14 | "By using this Site, you indicate that you have read and understand these Terms and Conditions and agree to abide by them at all times." 15 | ), 16 | html.H4("Intellectual Property"), 17 | html.P( 18 | "All content published and made available on our Site is the property of David Currie Software Development Ltd. and the Site's creators. This includes, but is not limited to images, text, logos, documents, and anything that contributes to the composition of our Site." 19 | ), 20 | html.H4("Links to Other Websites"), 21 | html.P( 22 | "Our Site contains links to third party websites or services that we do not own or control. We are not responsible for the content, policies, or practices of any third party website or service linked to on our Site. It is your responsibility to read the terms and conditions and privacy policies of these third party websites before using these sites." 23 | ), 24 | html.H4("Limitation of Liability"), 25 | html.P( 26 | "David Currie Software Development Ltd. and our directors, employees, and affiliates will not be liable for any actions, claims, losses, damages, liabilities and expenses including legal fees from your use of the Site." 27 | ), 28 | html.H4("Indemnity"), 29 | html.P( 30 | "Except where prohibited by law, by using this Site you indemnify and hold harmless David Currie Software Development Ltd. and our directors, employees, and affiliates from any actions, claims, losses, damages, liabilities, and expenses including legal fees arising out of your use of our Site or your violation of these Terms and Conditions." 31 | ), 32 | html.H4("Applicable Law"), 33 | html.P( 34 | "These Terms and Conditions are governed by the laws of the Province of British Columbia." 35 | ), 36 | html.H4("Severability"), 37 | html.P( 38 | "If at any time any of the provisions set forth in these Terms and Conditions are found to be inconsistent or invalid under applicable laws, those provisions will be deemed void and will be removed from these Terms and Conditions. All other provisions will not be affected by the removal and the rest of these Terms and Conditions will still be considered valid." 39 | ), 40 | html.H4("Changes"), 41 | html.P( 42 | "These Terms and Conditions may be amended from time to time in order to maintain compliance with the law and to reflect any changes to the way we operate our Site and the way we expect users to behave on our Site." 43 | ), 44 | html.H4("Contact Details"), 45 | html.P( 46 | "Please contact us if you have any questions or concerns at: david.currie32@gmail.com" 47 | ), 48 | ], 49 | ) 50 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # mkdocs documentation 139 | /site 140 | 141 | # mypy 142 | .mypy_cache/ 143 | .dmypy.json 144 | dmypy.json 145 | 146 | # Pyre type checker 147 | .pyre/ 148 | 149 | # pytype static type analyzer 150 | .pytype/ 151 | 152 | # Cython debug symbols 153 | cython_debug/ 154 | 155 | # PyCharm 156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 158 | # and can be added to the global gitignore or merged into this file. For a more nuclear 159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 160 | #.idea/ 161 | 162 | # Custom 163 | .DS_Store 164 | *.ipynb 165 | ads.txt 166 | temp_audio.mp3 167 | -------------------------------------------------------------------------------- /app.py: -------------------------------------------------------------------------------- 1 | import base64 2 | import io 3 | 4 | import dash_bootstrap_components as dbc 5 | from dash import Dash, html, page_container 6 | from flask import Flask, request, send_from_directory 7 | 8 | from footer import footer 9 | 10 | server = Flask(__name__) 11 | app = Dash( 12 | __name__, 13 | use_pages=True, 14 | pages_folder="pages", 15 | external_stylesheets=[dbc.icons.BOOTSTRAP, dbc.themes.BOOTSTRAP], 16 | server=server, 17 | ) 18 | app.config.suppress_callback_exceptions = True 19 | 20 | 21 | @server.route("/robots.txt") 22 | def serve_robots(): 23 | return send_from_directory(".", "robots.txt", mimetype="text/plain") 24 | 25 | 26 | @server.route("/sitemap.xml") 27 | def serve_sitemap(): 28 | return send_from_directory(".", "sitemap.xml", mimetype="application/xml") 29 | 30 | 31 | app.index_string = """ 32 | 33 | 34 | 35 | 36 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | {%metas%} 57 | {%title%} 58 | {%favicon%} 59 | {%css%} 60 | 61 | 62 | {%app_entry%} 63 | 68 | 69 | """ 70 | 71 | app.layout = html.Div( 72 | [ 73 | html.Div( 74 | className="container", 75 | children=[ 76 | html.Div( 77 | id="header", 78 | children=[ 79 | html.H1(id="title", children="Practice a Language"), 80 | ], 81 | ), 82 | page_container, 83 | footer, 84 | ], 85 | ) 86 | ], 87 | ) 88 | 89 | 90 | @server.route("/save_audio_recording", methods=["POST"]) 91 | def save_audio_recording(): 92 | """ 93 | Save the audio that the user has recorded so that it can be sent 94 | to OpenAI's Whisper-1 API. 95 | """ 96 | try: 97 | data = request.get_json() 98 | audio_data = data["audio_data"] 99 | # Decode the Base64 audio data 100 | audio_bytes = base64.b64decode(audio_data) 101 | 102 | # Save the audio recording 103 | with io.BytesIO(audio_bytes) as f: 104 | with open("audio_recording.wav", "wb") as audio_file: 105 | audio_file.write(f.read()) 106 | 107 | return "Audio data received successfully", 200 108 | 109 | except Exception: 110 | return "An error occurred", 500 111 | 112 | 113 | if __name__ == "__main__": 114 | app.run_server(debug=True, port=8080) 115 | -------------------------------------------------------------------------------- /assets/chat_request.py: -------------------------------------------------------------------------------- 1 | import os 2 | import re 3 | import time 4 | from typing import Dict, List 5 | 6 | 7 | import requests 8 | from dash import Input, Output, callback, no_update 9 | from openai import OpenAI 10 | from tenacity import retry, stop_after_attempt, wait_random_exponential 11 | 12 | client = OpenAI(api_key=os.environ.get("OPENAI_KEY")) 13 | 14 | 15 | @callback( 16 | Output("user-response-text", "value", allow_duplicate=True), 17 | Output("loading", "style", allow_duplicate=True), 18 | Output("check-for-audio-file", "data", allow_duplicate=True), 19 | Input("check-for-audio-file", "data"), 20 | prevent_initial_call=True, 21 | ) 22 | def convert_audio_recording_to_text(check_for_audio_file: bool) -> str: 23 | """ 24 | Convert the audio recording from the user into text using OpenAI's 25 | Whisper-1 model. 26 | 27 | Params: 28 | check_for_audio_file: Whether to check for the audio recording file. 29 | 30 | Returns: 31 | The text of the user's audio recording. 32 | The style of the loading icons. 33 | Stop checking for the user's audio recording. 34 | """ 35 | 36 | audio_recording = "audio_recording.wav" 37 | 38 | while check_for_audio_file: 39 | if os.path.exists(audio_recording): 40 | audio_file = open(audio_recording, "rb") 41 | os.remove(audio_recording) 42 | transcript = client.audio.transcriptions.create( 43 | model="whisper-1", file=audio_file 44 | ) 45 | message_user = transcript.to_dict()["text"] 46 | 47 | return message_user, {"display": "none"}, False 48 | 49 | # Wait 0.1 seconds before looking for the audio file again 50 | time.sleep(0.1) 51 | 52 | return no_update 53 | 54 | 55 | def get_assistant_message(messages: List[Dict[str, str]]) -> str: 56 | """ 57 | Get and process the assistant's (OpenAI's model) message to continue the conversation. 58 | 59 | Params: 60 | messages: The conversation history between the user and the chat model. 61 | 62 | Returns: 63 | The message from the assistant. 64 | """ 65 | 66 | chat_response = _chat_completion_request(messages) 67 | message_assistant = chat_response.choices[0].message.content 68 | 69 | # Remove space before "!" or "?" 70 | message_assistant = re.sub(r"\s+([!?])", r"\1", message_assistant) 71 | 72 | return message_assistant 73 | 74 | 75 | @retry(wait=wait_random_exponential(multiplier=1, max=40), stop=stop_after_attempt(3)) 76 | def _chat_completion_request(messages: List[Dict[str, str]]) -> Dict: 77 | """ 78 | Request a response to the user's statement from one of OpenAI's chat models. 79 | 80 | Params: 81 | messages: The conversation history between the user and the chat model. 82 | 83 | Returns: 84 | A response from OpenAI's model to the user's statement. 85 | """ 86 | 87 | try: 88 | completion = client.chat.completions.create( 89 | model="gpt-4o-mini", temperature=1.5, max_tokens=50, messages=messages 90 | ) 91 | return completion 92 | except Exception as e: 93 | return e 94 | 95 | 96 | def system_content( 97 | conversation_setting: str, 98 | language_learn: str, 99 | language_known: str, 100 | ) -> str: 101 | """ 102 | Write the content message for the system as part of call OpenAI's chat completion API. 103 | This provide OpenAI's model with some context about the conversation. 104 | 105 | Params: 106 | conversation_setting: The setting of the conversation between the user and OpenAI's model. 107 | language_learn: The language that the user wants to learn. 108 | language_known: The language that the user speaks. 109 | 110 | Returns: 111 | The content message for the system. 112 | """ 113 | 114 | content = f"Act as an excellent {language_learn} teacher who is helping me to practice {language_learn}. \ 115 | Start a conversation about {conversation_setting} in {language_learn}. \ 116 | Provide one statement in {language_learn}, then wait for my response. \ 117 | Do not write in {language_known}. \ 118 | Always finish your response with a question. \ 119 | Example response: Bonjour, qu'est-ce que je peux vous servir aujourd'hui?" 120 | 121 | content = re.sub(r"\s+", " ", content) 122 | 123 | return content 124 | -------------------------------------------------------------------------------- /assets/app.css: -------------------------------------------------------------------------------- 1 | html { 2 | margin: 0px; 3 | } 4 | body { 5 | font-family: Georgia, 'Times New Roman', Times, serif; 6 | } 7 | #button-play-audio { 8 | cursor: pointer; 9 | } 10 | .button-play-audio-wrapper { 11 | clear: both; 12 | font-size: 22px; 13 | margin-left: 10px; 14 | } 15 | #button-record-audio { 16 | margin-bottom: 5px; 17 | } 18 | #button-start-conversation { 19 | background-color: #0061F3; 20 | color: white; 21 | } 22 | #button-start-conversation:hover { 23 | background-color: #003d9b; 24 | } 25 | .container { 26 | margin: 0px; 27 | min-width: 100%; 28 | padding: 0px; 29 | } 30 | #content { 31 | margin: auto; 32 | max-width: 600px; 33 | min-height: 470px; 34 | padding: 0px 20px 10px; 35 | } 36 | #conversation { 37 | display: block; 38 | min-height: fit-content; 39 | min-width: 100%; 40 | } 41 | #conversation-id { 42 | height: 10px; 43 | min-width: 100%; 44 | } 45 | .conversation-setting-custom-input { 46 | padding: 10px 0px; 47 | } 48 | #conversation-setting-custom::placeholder { 49 | color: #aaa; 50 | } 51 | .conversation-setting-menu { 52 | padding: 10px 0px 0px; 53 | } 54 | .conversation-setting-wrapper { 55 | min-width: 100%; 56 | } 57 | #header { 58 | background-color: #003d9b; 59 | color: white; 60 | margin-bottom: 40px; 61 | padding: 10px 10px; 62 | text-align: center; 63 | } 64 | #help-highlight-for-translation { 65 | color: #aaa; 66 | font-size: 13px; 67 | font-style: italic; 68 | margin: 10px 0px -15px; 69 | width: fit-content; 70 | } 71 | #intro { 72 | color: #333; 73 | font-size: 20px; 74 | font-style: italic; 75 | margin: 0px 20px 20px; 76 | text-align: center; 77 | } 78 | .languages { 79 | display: flex; 80 | min-width: 100%; 81 | } 82 | #language-menu-known { 83 | min-width: 50%; 84 | padding-right: 10px; 85 | } 86 | #language-menu-learn { 87 | min-width: 50%; 88 | padding-left: 10px; 89 | } 90 | #loading { 91 | align-items: center; 92 | justify-content: center; 93 | margin: 20px auto; 94 | } 95 | .loading-icon { 96 | margin: 10px; 97 | } 98 | .message-ai-wrapper { 99 | align-items: center; 100 | display: flex; 101 | margin: 15px 0px; 102 | width: 100%; 103 | } 104 | .message-ai { 105 | border: 1px solid #87b7ff; 106 | border-radius: 0px 4px 4px 4px; 107 | clear: both; 108 | color: black; 109 | float: left; 110 | max-width: 75%; 111 | padding: 5px 10px; 112 | text-decoration: underline; 113 | text-decoration-color: #87b7ff; 114 | text-decoration-style: dotted; 115 | width: fit-content; 116 | } 117 | .message-user { 118 | background-color: #87b7ff; 119 | border-radius: 4px 0px 4px 4px; 120 | clear: both; 121 | padding: 5px 10px; 122 | text-decoration: underline; 123 | text-decoration-color: #000; 124 | text-decoration-style: dotted; 125 | width: fit-content; 126 | } 127 | .message-user-wrapper { 128 | display: flex; 129 | justify-content: flex-end; 130 | margin: 15px 0px; 131 | width: 100%; 132 | } 133 | #audio-settings-text { 134 | color: #aaa; 135 | font-size: 14px; 136 | font-style: italic; 137 | margin-right: 10px; 138 | padding-top: 2px; 139 | } 140 | #audio-settings { 141 | display: flex; 142 | } 143 | #toggle-play-audio-div { 144 | display: flex; 145 | margin: 0px 120px 10px 0px; 146 | } 147 | #slider-audio-speed-div { 148 | display: flex; 149 | margin: 0px 0px 10px; 150 | } 151 | #audio-speed { 152 | margin-left: 10px; 153 | margin-top: 5px; 154 | width: 50px; 155 | } 156 | #translation { 157 | clear: both; 158 | display: block; 159 | font-style: italic; 160 | width: 100%; 161 | } 162 | #title { 163 | margin: 0px; 164 | } 165 | #user-response { 166 | display: flex; 167 | margin: 0px 0px 30px; 168 | width: 100%; 169 | } 170 | #user-response-buttons { 171 | width: 42px; 172 | } 173 | #user-response-text { 174 | height: 80px; 175 | margin-right: 10px; 176 | } 177 | #user-response-text.form-control { 178 | border: 1px solid rgba(7, 76, 179, 0.9); 179 | } 180 | #user-response-text::placeholder { 181 | color: #aaa; 182 | } 183 | 184 | @media screen and (max-width: 600px) { 185 | #audio-settings { 186 | display: block; 187 | } 188 | #intro { 189 | font-size: 18px; 190 | } 191 | #toggle-play-audio-div { 192 | margin: 0px; 193 | } 194 | } 195 | -------------------------------------------------------------------------------- /callbacks/display_components.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, List, Tuple 2 | 3 | from dash import Input, Output, State, callback, callback_context, html 4 | 5 | 6 | @callback( 7 | Output("help-highlight-for-translation", "style"), 8 | Output("user-response-helper-icons", "style"), 9 | Input("conversation", "children"), 10 | ) 11 | def display_conversation_helpers( 12 | conversation: List, 13 | ) -> Tuple[Dict[str, str], Dict[str, str]]: 14 | """ 15 | Show helper text and icons if there is a conversation, otherwise keep them hidden. 16 | 17 | Params: 18 | conversation: The conversation between the user and OpenAI's GPT. 19 | 20 | Returns: 21 | The style value for the highlight-to-translate text 22 | The style value for the user-response-helper-icons div. 23 | """ 24 | 25 | if conversation: 26 | return ( 27 | {"display": "block"}, 28 | { 29 | "display": "flex", 30 | "margin": "20px 0px 0px", 31 | "justify-content": "space-between", 32 | }, 33 | ) 34 | 35 | return {"display": "none"}, {"display": "none"} 36 | 37 | 38 | @callback(Output("user-response", "style"), Input("conversation", "children")) 39 | def display_user_input(conversation: List) -> Dict[str, str]: 40 | """ 41 | Display the user response Input field if there is a conversation, otherwise hide it. 42 | 43 | params: 44 | conversation: The conversation between the user and OpenAI's GPT. 45 | 46 | returns: 47 | The display value for the user response Input field. 48 | """ 49 | 50 | if conversation: 51 | return {"display": "flex"} 52 | 53 | return {"display": "none"} 54 | 55 | 56 | @callback( 57 | Output("button-record-audio", "children"), 58 | Output("check-for-audio-file", "data", allow_duplicate=True), 59 | Input("button-record-audio", "n_clicks"), 60 | prevent_initial_call=True, 61 | ) 62 | def is_user_recording_audio(button_record_audio_n_clicks: int) -> Tuple[html.I, bool]: 63 | """ 64 | Change the icon for the audio recording button based on if 65 | a recording is taking place or not. Also, check for the audio 66 | recording after it has been completed. 67 | 68 | Params: 69 | button_record_audio_n_clicks: Number of times the button to record the user's audio has been clicked. 70 | 71 | Returns: 72 | The icon of the button. 73 | Whether to check for a file of the user's audio recording. 74 | """ 75 | 76 | # Recording taking place 77 | if button_record_audio_n_clicks % 2 == 1: 78 | return html.I(className="bi bi-headphones"), False 79 | 80 | # Not recording right now 81 | else: 82 | return html.I(className="bi bi-mic-fill"), True 83 | 84 | 85 | @callback( 86 | Output("loading", "style", allow_duplicate=True), 87 | Input("button-start-conversation", "n_clicks"), 88 | Input("button-submit-response-text", "n_clicks"), 89 | Input("user-response-text", "n_submit"), 90 | Input("button-record-audio", "n_clicks"), 91 | State("user-response-text", "value"), 92 | prevent_initial_call="initial_duplicate", 93 | ) 94 | def loading_visible( 95 | button_start_conversation_n_clicks: int, 96 | button_submit_text_n_clicks: int, 97 | user_response_text_n_submits: int, 98 | user_response_audio_n_clicks: int, 99 | user_response_text: str, 100 | ) -> Dict[str, str]: 101 | """ 102 | Whether to make the loading icons visible. 103 | 104 | Params: 105 | button_start_conversation_n_clicks: Number of time the start conversation button was clicked. 106 | button_submit_text_n_clicks: Number of times the button to submit the user's text reponse was clicked. 107 | user_response_text_n_submits: Number of times the user's text response was submitted (by clicking enter/return). 108 | user_response_audio_n_clicks: Number of times the button to record the user's audio was clicked. 109 | user_response_text: The text of the user_response field when it was submitted. 110 | 111 | Returns: 112 | The display status for the loading icons. 113 | """ 114 | 115 | # Determine which input triggered the callback 116 | triggered_input_id = callback_context.triggered[0]["prop_id"].split(".")[0] 117 | 118 | if triggered_input_id == "button-start-conversation": 119 | if button_start_conversation_n_clicks: 120 | return {"display": "flex"} 121 | 122 | if triggered_input_id == "button-submit-response-text": 123 | if button_submit_text_n_clicks: 124 | return {"display": "flex"} 125 | 126 | elif triggered_input_id == "user-response-text": 127 | if user_response_text_n_submits is not None and user_response_text: 128 | return {"display": "flex"} 129 | 130 | elif triggered_input_id == "button-record-audio": 131 | if user_response_audio_n_clicks: 132 | return {"display": "flex"} 133 | 134 | return {"display": "none"} 135 | -------------------------------------------------------------------------------- /pages/privacy_policy.py: -------------------------------------------------------------------------------- 1 | from dash import html, register_page 2 | 3 | 4 | register_page(__name__, path="/privacy_policy") 5 | 6 | layout = html.Div( 7 | id="content", 8 | children=[ 9 | html.H3("Practice a Language Privacy Policy"), 10 | html.P("Type of website: Practice a language by speaking and writing"), 11 | html.P("Effective date: November 14th, 2023"), 12 | html.P( 13 | 'www.practicealanguage.xyz (the "Site") is owned and operated by David Currie Software Development Ltd.. David Currie Software Development Ltd. is the data controller and can be contacted at: david.currie32@gmail.com' 14 | ), 15 | html.H4("Purpose"), 16 | html.P( 17 | 'The purpose of this privacy policy (this "Privacy Policy") is to inform users of our Site of the following:' 18 | ), 19 | html.P("1. The personal data we will collect;"), 20 | html.P("2. Use of collected data;"), 21 | html.P("3. Who has access to the data collected;"), 22 | html.P("4. The rights of Site users; and"), 23 | html.P("5. The Site's cookie policy."), 24 | html.P( 25 | "This Privacy Policy applies in addition to the terms and conditions of our Site." 26 | ), 27 | html.H4("GDPR"), 28 | html.P( 29 | 'For users in the European Union, we adhere to the Regulation (EU) 2016/679 of the European Parliament and of the Council of 27 April 2016, known as the General Data Protection Regulation (the "GDPR"). For users in the United Kingdom, we adhere to the GDPR as enshrined in the Data Protection Act 2018.' 30 | ), 31 | html.H4("Constent"), 32 | html.P("By using our Site users agree that they consent to:"), 33 | html.P("1. The conditions set out in this Privacy Policy."), 34 | html.P( 35 | "When the legal basis for us processing your personal data is that you have provided your consent to that processing, you may withdraw your consent at any time. If you withdraw your consent, it will not make processing which we completed before you withdrew your consent unlawful." 36 | ), 37 | html.P( 38 | "You can withdraw your consent by emailing us at david.currie32@gmail.com." 39 | ), 40 | html.H4("Legal Basis for Processing"), 41 | html.P( 42 | "We collect and process personal data about users in the EU only when we have a legal basis for doing so under Article 6 of the GDPR." 43 | ), 44 | html.P( 45 | "We rely on the following legal basis to collect and process the personal data of users in the EU:" 46 | ), 47 | html.P( 48 | "1. Users have provided their consent to the processing of their data for one or more specific purposes." 49 | ), 50 | html.H4("Personal Data We Collect"), 51 | html.P( 52 | "We only collect data that helps us achieve the purpose set out in this Privacy Policy. We will not collect any additional data beyond the data listed below without notifying you first." 53 | ), 54 | html.H4("Data Collected Automatically"), 55 | html.P( 56 | "When you visit and use our Site, we may automatically collect and store the following information:" 57 | ), 58 | html.P("1. IP address;"), 59 | html.P("2. Location"), 60 | html.P("3. Hardware and software details; and"), 61 | html.P("4. Clicked links."), 62 | html.H4("Data Collected in a Non-Automatic Way"), 63 | html.P( 64 | "We may also collect the following data when you perform certain functions on our Site:" 65 | ), 66 | html.P("1. Choose a conversation setting for practicing."), 67 | html.P("This data may be collected using the following methods:"), 68 | html.P("1. With an API call, then stored in our database."), 69 | html.H4("How We Use Personal Data"), 70 | html.P( 71 | "Data collected on our Site will only be used for the purposes specified in this Privacy Policy or indicated on the relevant pages of our Site. We will not use your data beyond what we disclose in this Privacy Policy." 72 | ), 73 | html.P("The data we collect automatically is used for the following purposes:"), 74 | html.P("1. Providing more relevant ads using Google Adsense."), 75 | html.P( 76 | "The data we collect when the user performs certain functions may be used for the following purposes:" 77 | ), 78 | html.P("1. To add more default conversation settings to the Site."), 79 | html.H4("Who We Share Personal Data With"), 80 | html.H5("Employees"), 81 | html.P( 82 | "We may disclose user data to any member of our organization who reasonably needs access to user data to achieve the purposes set out in this Privacy Policy." 83 | ), 84 | html.H5("Third Parties"), 85 | html.P("We may share user data with the following third parties:"), 86 | html.P("1. Google Adsense."), 87 | html.P("We may share the follwoing user data with third parties:"), 88 | html.P( 89 | "1. User IP addresses, browsing histories, website preferences, device location and device preferences." 90 | ), 91 | html.P("We may share user data with third parties for the following purposes:"), 92 | html.P("1. Targeted advertising."), 93 | html.P( 94 | "Third parties will not be able to access user data beyond what is reasonably necessary to achieve the given purpose." 95 | ), 96 | html.H5("Other Disclosures"), 97 | html.P( 98 | "We will not sell or share your data with other third parties, except in the following cases:" 99 | ), 100 | html.P("1. If the law requries it;"), 101 | html.P("2. If it is required for any legal proceeding;"), 102 | html.P("3. To prove or protect our legal rights; and"), 103 | html.P( 104 | "4. To buyers or potential buyers of this company in the event that we seek to sell the company." 105 | ), 106 | html.P( 107 | "If you follow hyperlinks from our Site to another site, please note that we are not responsible for and have no control over their privacy policies and practices." 108 | ), 109 | html.H4("How Long We Store Personal Data"), 110 | html.P( 111 | "User data will be stored until the purpose the data was collected for has been achieved." 112 | ), 113 | html.P( 114 | "You will be notified if your data is kept for longer than this period." 115 | ), 116 | html.H4("How We Protect Your Personal Data"), 117 | html.P( 118 | "The company will use products developed and provided by Google to store personal data." 119 | ), 120 | html.P( 121 | "While we take all reasonable precautions to ensure that user data is secure and that users are protected, there always remains the risk of harm. The Internet as a whole can be insecure at times and therefore we are unable to guarantee the security of user data beyond what is reasonably practical." 122 | ), 123 | html.H4("Your Rights as a User"), 124 | html.P("Under the GDPR, you have the following rights:"), 125 | html.P("1. Right to be informed;"), 126 | html.P("2. Right of access;"), 127 | html.P("3. Right to rectification;"), 128 | html.P("4. Right to erasure;"), 129 | html.P("5. Right to restrict processing;"), 130 | html.P("6. Right to data protability; and"), 131 | html.P("7. Right to object."), 132 | html.H4("Children"), 133 | html.P( 134 | "We do not knowingly collect or use personal data from children under 16 years of age. If we learn that we have collected personal data from a child under 16 years of age, the personal data will be deleted as soon as possible. If a child under 16 years of age has provided us with personal data their parent or guardian may contact the company." 135 | ), 136 | html.H4("How to Access, Modify, Delete, or Challenge the Data Collected"), 137 | html.P( 138 | "If you would like to know if we have collected your personal data, how we have used your personal data, if we have disclosed your personal data and to who we disclosed your personal data, if you would like your data to be deleted or modified in any way, or if you would like to exercise any of your other rights under the GDPR, please contact us at: david.currie32@gmail.com" 139 | ), 140 | html.H4("How to Opt-Out of Data Collection, Use or Disclosure"), 141 | html.P( 142 | "In addition to the method(s) described in the How to Access, Modify, Delete, or Challenge the Data Collected section, we provide the following specific opt-out methods for the forms of collection, use, or disclosure of your personal data:" 143 | ), 144 | html.P( 145 | "1. All collected data. You can opt-out by selecting that they do not consent to their data being collected." 146 | ), 147 | html.H4("Cookie Policy"), 148 | html.P( 149 | "A cookie is a small file, stored on a user's hard drive by a website. Its purpose is to collect data relating to the user's browsing habits. You can choose to be notified each time a cookie is transmitted. You can also choose to disable cookies entirely in your internet browser, but this may decrease the quality of your user experience." 150 | ), 151 | html.P("We use the following types of cookies on our Site:"), 152 | html.H5("1. Third-Party Cookies"), 153 | html.P( 154 | "Third-party cookies are created by a website other than ours. We may use third-party cookies to achieve the following purposes:" 155 | ), 156 | html.P( 157 | "1. Monitor user preferences to tailor advertisements around their interests." 158 | ), 159 | html.H4("Modifications"), 160 | html.P( 161 | 'This Privacy Policy may be amended from time to time in order to maintain compliance with the law and to reflect any changes to our data collection process. When we amend this Privacy Policy we will update the "Effective Date" at the top of this Privacy Policy. We recommend that our users periodically review our Privacy Policy to ensure that they are notified of any updates. If necessary, we may notify users by email of changes to this Privacy Policy.' 162 | ), 163 | html.H4("Complaints"), 164 | html.P( 165 | "If you have any complaints about how we process your personal data, please contact us through the contact methods listed in the Contact Information section so that we can, where possible, resolve the issue. If you feel we have not addressed your concern in a satisfactory manner you may contact a supervisory authority. You also have the right to directly make a complaint to a supervisory authority. You can lodge a complaint with a supervisory authority by contacting the Information Commissioner's Office in the UK, Data Protection Commission in Ireland." 166 | ), 167 | html.H4("Contact Information"), 168 | html.P( 169 | "If you have any questions, concerns, or complaints, you can contact us at: david.currie32@gmail.com" 170 | ), 171 | ], 172 | ) 173 | -------------------------------------------------------------------------------- /pages/home.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, List, Tuple 2 | 3 | import dash_bootstrap_components as dbc 4 | import dash_daq as daq 5 | from dash import ( 6 | Input, 7 | Output, 8 | State, 9 | callback, 10 | callback_context, 11 | clientside_callback, 12 | dcc, 13 | html, 14 | no_update, 15 | register_page, 16 | ) 17 | from dash_selectable import DashSelectable 18 | from deep_translator import GoogleTranslator 19 | from gtts import lang 20 | from langdetect import detect 21 | from langdetect.lang_detect_exception import LangDetectException 22 | 23 | from assets.audio import get_audio_file 24 | from assets.chat_request import ( 25 | convert_audio_recording_to_text, 26 | get_assistant_message, 27 | system_content, 28 | ) 29 | from assets.message_correction import get_corrected_message 30 | from callbacks.conversation_settings import ( 31 | start_conversation_button_disabled, 32 | update_conversation_setting_values, 33 | ) 34 | from callbacks.display_components import ( 35 | display_conversation_helpers, 36 | display_user_input, 37 | is_user_recording_audio, 38 | loading_visible, 39 | ) 40 | from callbacks.placeholder_text import user_input_placeholder 41 | from callbacks.tooltips import tooltip_translate_language_known_text 42 | from callbacks.translate import translate_highlighted_text 43 | 44 | 45 | register_page(__name__, path="") 46 | MESSAGES = [] 47 | LANGUAGES_DICT = {name: abbreviation for abbreviation, name in lang.tts_langs().items()} 48 | LANGUAGES = sorted(LANGUAGES_DICT) # Get just the names of the languages 49 | 50 | 51 | layout = html.Div( 52 | children=[ 53 | # Content section 54 | html.Div( 55 | id="content", 56 | children=[ 57 | html.Div( 58 | id="intro", 59 | children=[ 60 | html.P( 61 | children="Practice a language by having conversations about the topic of your choice." 62 | ), 63 | ], 64 | ), 65 | # Language selection section 66 | html.Div( 67 | className="languages", 68 | children=[ 69 | # Known language dropdown 70 | html.Div( 71 | id="language-menu-known", 72 | children=[ 73 | dcc.Dropdown( 74 | LANGUAGES, 75 | placeholder="I speak", 76 | id="language-known", 77 | clearable=False, 78 | ), 79 | ], 80 | ), 81 | # Learn language dropdown 82 | html.Div( 83 | id="language-menu-learn", 84 | children=[ 85 | dcc.Dropdown( 86 | LANGUAGES, 87 | placeholder="I want to learn", 88 | id="language-learn", 89 | clearable=False, 90 | ), 91 | ], 92 | ), 93 | ], 94 | ), 95 | # Conversation setting section 96 | html.Div( 97 | className="conversation-setting-wrapper", 98 | children=[ 99 | # Conversation setting dropdown 100 | html.Div( 101 | className="conversation-setting-menu", 102 | children=[ 103 | dcc.Dropdown( 104 | [ 105 | "a book you read", 106 | "a movie you watched", 107 | "a recent holiday", 108 | "a restaurant you went to", 109 | "asking for directions", 110 | "booking a hotel", 111 | "buying a bus ticket", 112 | "buying groceries", 113 | "cooking a meal", 114 | "favourite foods", 115 | "going to a concert", 116 | "going to a movie", 117 | "going to a restaurant", 118 | "going to a show", 119 | "hobbies", 120 | "making a dinner reservation", 121 | "meeting someone for the first time", 122 | "music", 123 | "ordering at a cafe", 124 | "ordering at a restaurant", 125 | "pets", 126 | "planning a trip", 127 | "renting a car", 128 | "shopping in a store", 129 | "weekend plans", 130 | "other", 131 | ], 132 | placeholder="Choose a topic", 133 | id="conversation-setting", 134 | ), 135 | ], 136 | ), 137 | # Custom conversation setting input 138 | html.Div( 139 | className="conversation-setting-custom-input", 140 | children=[ 141 | dbc.Input( 142 | id="conversation-setting-custom", 143 | placeholder="Or type a custom topic for a conversation", 144 | type="text", 145 | ), 146 | ], 147 | ), 148 | ], 149 | ), 150 | # Toggle to play audio of new messages 151 | html.Div( 152 | id="audio-settings", 153 | children=[ 154 | html.Div( 155 | id="toggle-play-audio-div", 156 | children=[ 157 | html.P( 158 | "Play audio of new message", 159 | id="audio-settings-text", 160 | ), 161 | daq.ToggleSwitch( 162 | id="toggle-play-audio", value=True, color="#322CA1" 163 | ), 164 | ], 165 | ), 166 | html.Div( 167 | id="slider-audio-speed-div", 168 | children=[ 169 | html.P("Audio speed", id="audio-settings-text"), 170 | daq.Slider( 171 | id="audio-speed", 172 | min=1, 173 | max=21, 174 | value=11, 175 | step=10, 176 | size=100, 177 | color="#322CA1", 178 | ), 179 | ], 180 | ), 181 | ], 182 | ), 183 | # Button to start a conversation 184 | dbc.Button( 185 | "Start a new conversation", 186 | id="button-start-conversation", 187 | n_clicks=0, 188 | disabled=True, 189 | ), 190 | # Conversation section 191 | html.Div( 192 | id="conversation-div", 193 | children=[ 194 | # Helper text to highlight for translation 195 | html.P( 196 | "Highlight text to see the translation.", 197 | id="help-highlight-for-translation", 198 | style={"display": "none"}, 199 | ), 200 | # Show translated text that is highlighted 201 | DashSelectable(id="conversation"), 202 | html.Div(id="translation"), 203 | # Icons to show when loading a new message 204 | html.Div( 205 | id="loading", 206 | children=[ 207 | dbc.Spinner( 208 | color="#85b5ff", 209 | type="grow", 210 | size="sm", 211 | spinner_class_name="loading-icon", 212 | ), 213 | dbc.Spinner( 214 | color="#85b5ff", 215 | type="grow", 216 | size="sm", 217 | spinner_class_name="loading-icon", 218 | ), 219 | dbc.Spinner( 220 | color="#85b5ff", 221 | type="grow", 222 | size="sm", 223 | spinner_class_name="loading-icon", 224 | ), 225 | ], 226 | ), 227 | # Helper icons and tooltip about writing and recording user response 228 | html.Div( 229 | id="user-response-helper-icons", 230 | children=[ 231 | html.Div( 232 | children=[ 233 | html.I( 234 | className="bi bi-question-circle", 235 | id="help-translate-language-known", 236 | ), 237 | dbc.Tooltip( 238 | id="tooltip-translate-language-known", 239 | target="help-translate-language-known", 240 | ), 241 | ] 242 | ), 243 | html.Div( 244 | children=[ 245 | html.I( 246 | className="bi bi-question-circle", 247 | id="help-change-microphone-setting", 248 | ), 249 | dbc.Tooltip( 250 | id="tooltip-change-microphone-setting", 251 | target="help-change-microphone-setting", 252 | children="If you are unable to record audio, you might need to change your device's microphone settings.", 253 | ), 254 | ] 255 | ), 256 | ], 257 | ), 258 | # User response section 259 | html.Div( 260 | id="user-response", 261 | children=[ 262 | dbc.Textarea(id="user-response-text"), 263 | html.Div( 264 | id="user-response-buttons", 265 | children=[ 266 | dbc.Button( 267 | html.I(className="bi bi-mic-fill"), 268 | id="button-record-audio", 269 | n_clicks=0, 270 | ), 271 | dbc.Button( 272 | html.I(className="bi bi-arrow-return-left"), 273 | id="button-submit-response-text", 274 | n_clicks=0, 275 | ), 276 | ], 277 | ), 278 | ], 279 | style={"display": "none"}, 280 | ), 281 | # Boolean for when to look for the user's audio recording 282 | dcc.Store(id="check-for-audio-file", data=False), 283 | # Store for messages 284 | dcc.Store(id="messages-store", data=[]), 285 | ], 286 | ), 287 | ], 288 | ), 289 | ] 290 | ) 291 | 292 | 293 | @callback( 294 | Output("conversation", "children", allow_duplicate=True), 295 | Output("loading", "style", allow_duplicate=True), 296 | Output("messages-store", "data", allow_duplicate=True), 297 | Input("button-start-conversation", "n_clicks"), 298 | State("language-known", "value"), 299 | State("language-learn", "value"), 300 | State("conversation-setting", "value"), 301 | State("conversation-setting-custom", "value"), 302 | prevent_initial_call=True, 303 | ) 304 | def start_conversation( 305 | button_start_conversation_n_clicks: int, 306 | language_known: str, 307 | language_learn: str, 308 | conversation_setting: str, 309 | conversation_setting_custom: str, 310 | ) -> Tuple[List[html.Div], Dict[str, str]]: 311 | """ 312 | Start the practice conversation by providing information about 313 | the language the user wants to practice and the setting for the conversation. 314 | 315 | Params: 316 | button_start_conversation_clicks: Number of time the start conversation button was clicked 317 | language_known: The language that the user speaks. 318 | language_learn: The language that the user wants to learn. 319 | conversation_setting: A conversation setting provided from the dropdown menu. 320 | conversation_setting_custom: A custom conversation setting provided by the user. 321 | 322 | Returns: 323 | A history of the conversation. 324 | The display value for the loading icons. 325 | """ 326 | 327 | # Replace conversation_setting with conversation_setting_custom if it has a value 328 | if conversation_setting_custom: 329 | conversation_setting = conversation_setting_custom 330 | 331 | if button_start_conversation_n_clicks: 332 | messages = [] 333 | messages.append( 334 | { 335 | "role": "system", 336 | # Provide content about the conversation for the system (OpenAI's GPT) 337 | "content": system_content( 338 | conversation_setting, 339 | language_learn, 340 | language_known, 341 | ), 342 | } 343 | ) 344 | 345 | # Get the first message in the conversation from OpenAI's GPT 346 | message_assistant = get_assistant_message(messages) 347 | # message_assistant = 'Guten morgen, wie kann ich ihnen helfen!' # <- Testing message 348 | 349 | messages.append({"role": "assistant", "content": message_assistant}) 350 | 351 | # Create a list to store the conversation history 352 | conversation = [ 353 | html.Div( 354 | className="message-ai-wrapper", 355 | children=[ 356 | html.Div( 357 | className="message-ai", 358 | id="message-1", 359 | children=[message_assistant], 360 | ), 361 | html.Div( 362 | html.I(className="bi bi-play-circle", id="button-play-audio"), 363 | id="button-message-1", 364 | className="button-play-audio-wrapper", 365 | ), 366 | # For initial audio play 367 | html.Audio(id="audio-player-0", autoPlay=True), 368 | # Need two audio elements to always provide playback after conversation has been created 369 | html.Audio(id=f"audio-player-1", autoPlay=True), 370 | html.Audio(id=f"audio-player-2", autoPlay=True), 371 | ], 372 | ) 373 | ] 374 | 375 | return conversation, {"display": "none"}, messages 376 | 377 | 378 | @callback( 379 | Output("conversation", "children", allow_duplicate=True), 380 | Output("user-response-text", "value", allow_duplicate=True), 381 | Output("loading", "style", allow_duplicate=True), 382 | Output("messages-store", "data", allow_duplicate=True), 383 | Input("user-response-text", "n_submit"), 384 | Input("button-submit-response-text", "n_clicks"), 385 | State("user-response-text", "value"), 386 | State("conversation", "children"), 387 | State("language-known", "value"), 388 | State("language-learn", "value"), 389 | State("messages-store", "data"), 390 | prevent_initial_call="initial_duplicate", 391 | ) 392 | def continue_conversation_text( 393 | user_response_n_submits: int, 394 | button_submit_n_clicks: int, 395 | message_user: str, 396 | conversation: List, 397 | language_known: str, 398 | language_learn: str, 399 | messages_store: List[Dict[str, str]], 400 | ) -> Tuple[List, str, Dict[str, str]]: 401 | """ 402 | Continue the conversation by adding the user's response, then calling OpenAI 403 | for its response. 404 | 405 | Params: 406 | user_response_n_submits: Number of times the user response was submitted. 407 | button_submit_n_clicks: Number of times the button to submit the user's response was clicked. 408 | message_user: The text of the user_response field when it was submitted. 409 | conversation: The conversation between the user and OpenAI's GPT. 410 | language_known: The language that the user speaks. 411 | language_learn: The language that the user wants to learn. 412 | messages_store: Store of messages. 413 | 414 | Returns: 415 | The conversation with the new messages from the user and OpenAI's GPT. 416 | An empty string for the user response Input field. 417 | The new display value to hide the loading icons. 418 | """ 419 | 420 | if ( 421 | user_response_n_submits is not None or button_submit_n_clicks is not None 422 | ) and message_user: 423 | try: 424 | language_detected = detect(message_user) 425 | if language_detected == LANGUAGES_DICT[language_known]: 426 | translator = GoogleTranslator( 427 | source=LANGUAGES_DICT[language_known], 428 | target=LANGUAGES_DICT[language_learn], 429 | ) 430 | message_user = translator.translate(message_user) 431 | else: 432 | message_user = get_corrected_message(message_user, language_learn) 433 | except LangDetectException: 434 | pass 435 | 436 | messages = messages_store.copy() 437 | messages.append({"role": "user", "content": message_user}) 438 | message_new = format_new_message("user", len(messages), message_user) 439 | conversation = conversation + message_new 440 | 441 | messages_to_send = [messages[0]] + messages[1:][-5:] 442 | 443 | message_assistant = get_assistant_message(messages_to_send) 444 | # message_assistant = 'Natürlich!' # <- testing message 445 | messages.append({"role": "assistant", "content": message_assistant}) 446 | message_new = format_new_message("ai", len(messages), message_assistant) 447 | conversation = conversation + message_new 448 | 449 | return conversation, "", {"display": "none"}, messages 450 | 451 | return no_update 452 | 453 | 454 | def format_new_message( 455 | who: str, messages_count: int, message: str, message_corrected: str = "" 456 | ) -> List[html.Div]: 457 | """ 458 | Format a new message so that it is ready to be added to the conversation. 459 | 460 | Params: 461 | who: Whether the message was from the ai or user. Only valid values are "ai" and "user". 462 | messages_count: The number of messages in the conversation. 463 | message: The new message to be added to the conversation. 464 | 465 | Returns: 466 | The new message that has been formatted so that it can be viewed on the website. 467 | """ 468 | 469 | return [ 470 | html.Div( 471 | className=f"message-{who}-wrapper", 472 | children=[ 473 | html.Div( 474 | className=f"message-{who}", 475 | id=f"message-{messages_count - 1}", 476 | children=[message], 477 | ), 478 | html.Div( 479 | className=f"message-{who}-corrected", 480 | id=f"message-{messages_count - 1}-corrected", 481 | children=[message_corrected], 482 | ), 483 | html.Div( 484 | html.I(className="bi bi-play-circle", id="button-play-audio"), 485 | id=f"button-message-{messages_count - 1}", 486 | className="button-play-audio-wrapper", 487 | ), 488 | # Need two audio elements to always provide playback 489 | html.Audio(id=f"audio-player-1", autoPlay=True), 490 | html.Audio(id=f"audio-player-2", autoPlay=True), 491 | ], 492 | ) 493 | ] 494 | 495 | 496 | @callback( 497 | Output("audio-player-1", "src"), 498 | Input("conversation", "children"), 499 | State("toggle-play-audio", "value"), 500 | State("audio-speed", "value"), 501 | State("language-learn", "value"), 502 | ) 503 | def play_newest_message( 504 | conversation: List, toggle_audio: bool, audio_speed: int, language_learn: str 505 | ) -> str: 506 | """ 507 | Play the newest message in the conversation. 508 | 509 | Params: 510 | conversation: Contains all of the data about the conversation 511 | toggle_audio: Whether to play the audio of the newest message 512 | audio_speed: The speed of the audio 513 | language_learn: The language that the user wants to learn. 514 | 515 | Returns: 516 | A path to the mp3 file for the newest message. 517 | """ 518 | 519 | if conversation and toggle_audio: 520 | newest_message = conversation[-1]["props"]["children"][0]["props"]["children"][ 521 | 0 522 | ] 523 | language_learn_abbreviation = LANGUAGES_DICT[language_learn] 524 | 525 | return get_audio_file(newest_message, language_learn_abbreviation, audio_speed) 526 | 527 | return no_update 528 | 529 | 530 | # Loop through the messages to determine which one should have its audio played 531 | # Use 100 as a safe upper limit. Using len(MESSAGES) didn't work 532 | for i in range(100): 533 | 534 | @callback( 535 | Output(f"audio-player-1", "src", allow_duplicate=True), 536 | Output(f"audio-player-2", "src", allow_duplicate=True), 537 | Input(f"button-message-{i+1}", "n_clicks"), 538 | State(f"conversation", "children"), 539 | State("toggle-play-audio", "value"), 540 | State("audio-speed", "value"), 541 | State("language-learn", "value"), 542 | prevent_initial_call="initial_duplicate", 543 | ) 544 | def play_audio_of_clicked_message( 545 | button_message_n_clicks: int, 546 | conversation: List, 547 | toggle_audio: bool, 548 | audio_speed: int, 549 | language_learn: str, 550 | ) -> str: 551 | """ 552 | Play the audio of the message that had its play-audio button clicked. 553 | 554 | Params: 555 | button_message_n_clicks: The number of times the play-audio button was clicked. 556 | conversation: The conversation between the user and OpenAI's GPT. 557 | toggle_audio: Whether to play the audio of the new message 558 | audio_speed: The speed of the audio 559 | language_learn: The language that the user wants to learn. 560 | 561 | Returns: 562 | A path to the message's audio that is to be played 563 | """ 564 | 565 | if button_message_n_clicks and toggle_audio: 566 | triggered_input_id = callback_context.triggered[0]["prop_id"].split(".")[0] 567 | message_number_clicked = triggered_input_id.split("-")[-1] 568 | 569 | if message_number_clicked: 570 | message_number_clicked = int(message_number_clicked) 571 | message_clicked = conversation[message_number_clicked - 1]["props"][ 572 | "children" 573 | ][0]["props"]["children"][0] 574 | language_learn_abbreviation = LANGUAGES_DICT[language_learn] 575 | 576 | # Rotate between audio elements so that the audio is always played 577 | if button_message_n_clicks % 2 == 0: 578 | return ( 579 | get_audio_file( 580 | message_clicked, language_learn_abbreviation, audio_speed 581 | ), 582 | "", 583 | ) 584 | else: 585 | return "", get_audio_file( 586 | message_clicked, language_learn_abbreviation, audio_speed 587 | ) 588 | 589 | return ("", "") 590 | 591 | 592 | # A clientside callback to start recording the user's audio when they click on 593 | # "button-record-audio". Need to be a clientside callback to access the user's 594 | # microphone as dash code runs on the server side and cannot access the 595 | # microphone after the app has been deployed to Google Cloud Run. 596 | clientside_callback( 597 | """ 598 | function (n_clicks) { 599 | if (n_clicks % 2 === 1) { 600 | if (navigator.mediaDevices && navigator.mediaDevices.getUserMedia) { 601 | navigator.mediaDevices.getUserMedia({ audio: true }).then(function(stream) { 602 | var audioContext = new (window.AudioContext || window.webkitAudioContext)(); 603 | window.mediaRecorder = new MediaRecorder(stream); 604 | window.audioChunks = []; 605 | 606 | window.mediaRecorder.ondataavailable = function(e) { 607 | if (e.data.size > 0) { 608 | window.audioChunks.push(e.data); 609 | } 610 | }; 611 | window.mediaRecorder.start(); 612 | }) 613 | } 614 | } 615 | return "" 616 | } 617 | """, 618 | Output("user-response-text", "value", allow_duplicate=True), 619 | Input("button-record-audio", "n_clicks"), 620 | prevent_initial_call=True, 621 | ) 622 | 623 | 624 | # A clientside callback to stop the recording of the user's audio when they click on 625 | # "button-record-audio". 626 | clientside_callback( 627 | """ 628 | function (n_clicks) { 629 | if (n_clicks % 2 === 0) { 630 | window.mediaRecorder.onstop = function() { 631 | var audioBlob = new Blob(window.audioChunks, { type: 'audio/wav' }); 632 | var reader = new FileReader(); 633 | reader.onload = function(event){ 634 | var base64data = event.target.result.split(',')[1]; 635 | fetch('/save_audio_recording', { 636 | method: 'POST', 637 | headers: { 638 | 'Content-Type': 'application/json', 639 | }, 640 | body: JSON.stringify({ audio_data: base64data }), 641 | }); 642 | }; 643 | reader.readAsDataURL(audioBlob); 644 | }; 645 | window.mediaRecorder.stop(); 646 | } 647 | } 648 | """, 649 | Output("user-response-text", "children"), 650 | Input("button-record-audio", "n_clicks"), 651 | prevent_initial_call=True, 652 | ) 653 | --------------------------------------------------------------------------------