├── ocrTranslate
├── __init__.py
├── gui
│ ├── __init__.py
│ ├── bindingEntry.py
│ ├── auto_resize_text_box.py
│ ├── AnimatedGif.py
│ ├── AnimatedGifButton.py
│ ├── tabviewChats.py
│ ├── auto_complete_combobox.py
│ └── complex_tk_gui.py
├── services
│ ├── __init__.py
│ ├── webspeechdemo
│ │ ├── mic.gif
│ │ ├── mic-slash.gif
│ │ ├── mic-animate.gif
│ │ └── webspeechdemo.html
│ ├── rapid_ocr.py
│ ├── capture_2_text.py
│ ├── tesseract.py
│ ├── multi_translators.py
│ ├── baidu.py
│ ├── deepL.py
│ ├── edge_gpt.py
│ ├── Get-Win10OcrTextFromImage.ps1
│ ├── speach_to_text_multi_services.py
│ ├── chatGPT_free3.py
│ ├── speach_to_text_web_google.py
│ ├── google_api.py
│ └── google_free.py
├── assets
│ ├── icon.ico
│ ├── icon2.ico
│ ├── point.jpg
│ ├── home_dark.png
│ ├── loading.png
│ ├── reverse.png
│ ├── home_light.png
│ ├── test_image.png
│ ├── chatai_black.png
│ ├── chatai_white.png
│ ├── reverse_black.png
│ ├── settings_dark.png
│ ├── settings_light.png
│ ├── microphone_white.png
│ ├── microphone_active.png
│ ├── open_side_menu_dark.png
│ ├── send_message_black.png
│ ├── send_message_white.png
│ ├── microphone_white_org.png
│ └── open_side_menu_white.png
├── utils.py
├── config_files.py
└── assets.py
├── requirements.txt
├── documentation_images
├── Showrun.gif
├── GUI_preview.png
└── GUI_preview_example.png
├── LICENSE.md
├── .gitignore
├── README.md
└── main.py
/ocrTranslate/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ocrTranslate/gui/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ocrTranslate/services/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azornes/ocrTranslator/HEAD/requirements.txt
--------------------------------------------------------------------------------
/ocrTranslate/assets/icon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azornes/ocrTranslator/HEAD/ocrTranslate/assets/icon.ico
--------------------------------------------------------------------------------
/ocrTranslate/assets/icon2.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azornes/ocrTranslator/HEAD/ocrTranslate/assets/icon2.ico
--------------------------------------------------------------------------------
/ocrTranslate/assets/point.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azornes/ocrTranslator/HEAD/ocrTranslate/assets/point.jpg
--------------------------------------------------------------------------------
/documentation_images/Showrun.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azornes/ocrTranslator/HEAD/documentation_images/Showrun.gif
--------------------------------------------------------------------------------
/ocrTranslate/assets/home_dark.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azornes/ocrTranslator/HEAD/ocrTranslate/assets/home_dark.png
--------------------------------------------------------------------------------
/ocrTranslate/assets/loading.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azornes/ocrTranslator/HEAD/ocrTranslate/assets/loading.png
--------------------------------------------------------------------------------
/ocrTranslate/assets/reverse.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azornes/ocrTranslator/HEAD/ocrTranslate/assets/reverse.png
--------------------------------------------------------------------------------
/ocrTranslate/assets/home_light.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azornes/ocrTranslator/HEAD/ocrTranslate/assets/home_light.png
--------------------------------------------------------------------------------
/ocrTranslate/assets/test_image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azornes/ocrTranslator/HEAD/ocrTranslate/assets/test_image.png
--------------------------------------------------------------------------------
/documentation_images/GUI_preview.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azornes/ocrTranslator/HEAD/documentation_images/GUI_preview.png
--------------------------------------------------------------------------------
/ocrTranslate/assets/chatai_black.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azornes/ocrTranslator/HEAD/ocrTranslate/assets/chatai_black.png
--------------------------------------------------------------------------------
/ocrTranslate/assets/chatai_white.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azornes/ocrTranslator/HEAD/ocrTranslate/assets/chatai_white.png
--------------------------------------------------------------------------------
/ocrTranslate/assets/reverse_black.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azornes/ocrTranslator/HEAD/ocrTranslate/assets/reverse_black.png
--------------------------------------------------------------------------------
/ocrTranslate/assets/settings_dark.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azornes/ocrTranslator/HEAD/ocrTranslate/assets/settings_dark.png
--------------------------------------------------------------------------------
/ocrTranslate/assets/settings_light.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azornes/ocrTranslator/HEAD/ocrTranslate/assets/settings_light.png
--------------------------------------------------------------------------------
/ocrTranslate/assets/microphone_white.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azornes/ocrTranslator/HEAD/ocrTranslate/assets/microphone_white.png
--------------------------------------------------------------------------------
/ocrTranslate/assets/microphone_active.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azornes/ocrTranslator/HEAD/ocrTranslate/assets/microphone_active.png
--------------------------------------------------------------------------------
/ocrTranslate/assets/open_side_menu_dark.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azornes/ocrTranslator/HEAD/ocrTranslate/assets/open_side_menu_dark.png
--------------------------------------------------------------------------------
/ocrTranslate/assets/send_message_black.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azornes/ocrTranslator/HEAD/ocrTranslate/assets/send_message_black.png
--------------------------------------------------------------------------------
/ocrTranslate/assets/send_message_white.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azornes/ocrTranslator/HEAD/ocrTranslate/assets/send_message_white.png
--------------------------------------------------------------------------------
/ocrTranslate/services/webspeechdemo/mic.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azornes/ocrTranslator/HEAD/ocrTranslate/services/webspeechdemo/mic.gif
--------------------------------------------------------------------------------
/documentation_images/GUI_preview_example.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azornes/ocrTranslator/HEAD/documentation_images/GUI_preview_example.png
--------------------------------------------------------------------------------
/ocrTranslate/assets/microphone_white_org.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azornes/ocrTranslator/HEAD/ocrTranslate/assets/microphone_white_org.png
--------------------------------------------------------------------------------
/ocrTranslate/assets/open_side_menu_white.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azornes/ocrTranslator/HEAD/ocrTranslate/assets/open_side_menu_white.png
--------------------------------------------------------------------------------
/ocrTranslate/services/webspeechdemo/mic-slash.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azornes/ocrTranslator/HEAD/ocrTranslate/services/webspeechdemo/mic-slash.gif
--------------------------------------------------------------------------------
/ocrTranslate/services/webspeechdemo/mic-animate.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azornes/ocrTranslator/HEAD/ocrTranslate/services/webspeechdemo/mic-animate.gif
--------------------------------------------------------------------------------
/ocrTranslate/services/rapid_ocr.py:
--------------------------------------------------------------------------------
1 | from rapidocr_openvino import RapidOCR
2 |
3 |
4 | class RapidOcr:
5 | def __init__(self) -> None:
6 | self.rapid_ocr = RapidOCR()
7 |
8 | def run_ocr(self, path: str) -> str:
9 | result, elapse = self.rapid_ocr(path)
10 | text_result = ""
11 |
12 | if result is not None:
13 | for res in result:
14 | print(res[1])
15 | text_result += res[1].replace("\n", "") + " "
16 | else:
17 | text_result = "text not found"
18 | return text_result
19 |
20 |
21 | # rapid_ocr = RapidOcrr()
22 | # test = rapid_ocr.ocr_by_rapid("C:\\Programowanie\\Projekty\\Python\\HelpApps\\ocrTranslator3\\ocrTranslate\\temp\\temp.png")
23 | # print(test)
--------------------------------------------------------------------------------
/ocrTranslate/services/capture_2_text.py:
--------------------------------------------------------------------------------
1 | import os
2 | import subprocess
3 |
4 | from ocrTranslate.assets import Assets as assets
5 | from ocrTranslate.utils import parse_html_link
6 |
7 |
8 | class Capture2Text:
9 | def __init__(self, path_to_Capture2Text_CLI_exe="", ) -> None:
10 | if os.path.exists(path_to_Capture2Text_CLI_exe):
11 | self.path_to_Capture2Text_CLI_exe = parse_html_link(path_to_Capture2Text_CLI_exe)
12 | self.is_active = True
13 | else:
14 | self.is_active = False
15 |
16 | def run_ocr(self) -> str:
17 | result = subprocess.check_output(self.path_to_Capture2Text_CLI_exe + ' --image {path_to_tmp} '.format(path_to_tmp=assets.path_to_tmp) + '--output-format ${capture}')
18 | return result
19 |
20 |
--------------------------------------------------------------------------------
/ocrTranslate/services/tesseract.py:
--------------------------------------------------------------------------------
1 | import os
2 | from ocrTranslate.utils import parse_html_link
3 | import pytesseract
4 |
5 |
6 | class Tesseract:
7 | def __init__(self, path_to_tesseract_exe="", ) -> None:
8 | if os.path.exists(path_to_tesseract_exe):
9 | self.path_to_tesseract_exe = parse_html_link(path_to_tesseract_exe)
10 | pytesseract.pytesseract.tesseract_cmd = self.path_to_tesseract_exe
11 | self.is_active = True
12 | else:
13 | self.is_active = False
14 |
15 | def run_ocr(self, path_image: str) -> str:
16 | if self.is_active:
17 | result = pytesseract.image_to_string(path_image)
18 | print(result)
19 | return result
20 | else:
21 | return "Path to tesseract.exe is invalid"
22 |
--------------------------------------------------------------------------------
/ocrTranslate/services/multi_translators.py:
--------------------------------------------------------------------------------
1 | from requests import ReadTimeout
2 | import translators as ts
3 | from ocrTranslate.utils import format_words
4 | from ocrTranslate.langs import convert_language
5 |
6 |
7 | class MultiTranslators:
8 | def __init__(self) -> None:
9 | print(ts.translators_pool)
10 | #ts.preaccelerate()
11 |
12 |
13 | def run_translate(self, word, language_from="auto", language_to="English", translator="bing"):
14 | _language_from, _language_to = convert_language(language_from, language_to)
15 | words = format_words(word)
16 | # print(ts.translators_pool)
17 | try:
18 | result = ts.translate_text(query_text=words, translator=translator, from_language=_language_from, to_language=_language_to, timeout=30)
19 | except ReadTimeout:
20 | result = "Timeout, service is busy, try again later"
21 | except ts.server.TranslatorError:
22 | result = "Bad language, choose another language"
23 |
24 | return result
25 |
--------------------------------------------------------------------------------
/LICENSE.md:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 Azornes
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/ocrTranslate/services/baidu.py:
--------------------------------------------------------------------------------
1 | from secrets import compare_digest
2 | from aip import AipOcr
3 |
4 |
5 | class Baidu:
6 | def __init__(self, appid="", apikey="", secretkey="") -> None:
7 | self.baidu_client = None
8 | if not compare_digest(appid, "") and not compare_digest(apikey, "") and not compare_digest(secretkey, ""):
9 | self.appid = appid
10 | self.apikey = apikey
11 | self.secretkey = secretkey
12 | self.is_active = True
13 | self.renew_chatbot_session_password()
14 | else:
15 | self.is_active = False
16 |
17 | def renew_chatbot_session_password(self):
18 | try:
19 | self.baidu_client = AipOcr(self.appid, self.apikey, self.secretkey)
20 | except Exception as e:
21 | self.is_active = False
22 | print(e)
23 | print("appid or apikey or secretkey are invalid")
24 |
25 | def run_ocr(self, img) -> str:
26 | result = self.baidu_client.basicGeneral(img)
27 | result = self.get_result_text(result)
28 | return result
29 |
30 | def get_result_text(self, json):
31 | s = ''
32 | if json.__contains__('words_result_num') and json['words_result_num'] > 0:
33 | for i in range(0, json['words_result_num']):
34 | s += json['words_result'][i]['words']
35 | s += '\r\n'
36 | else:
37 | # s += '"text not found"!'
38 | s += ''
39 | return s
40 |
--------------------------------------------------------------------------------
/ocrTranslate/utils.py:
--------------------------------------------------------------------------------
1 | def list_to_string(s):
2 | # initialize an empty string
3 | str1 = ""
4 |
5 | # traverse in the string
6 | for ele in s:
7 | str1 += ele + " "
8 | return str1
9 |
10 |
11 | def parse_html_link(link):
12 | if '//' in link:
13 | link = link.replace('//', '\\')
14 | elif '/' in link:
15 | link = link.replace('/', '\\')
16 | if '\\' in link:
17 | pass
18 | elif "\\\\" in link:
19 | link = link.replace('\\\\', '\\')
20 | return link
21 |
22 |
23 | def print_error_msg(msg, where):
24 | print("\n\033[91m" + "Error name: " + str(msg) + "\nException occurs in: " + str(where) + "\033[0m")
25 |
26 |
27 | def format_words(words_list):
28 | """
29 | Formatuje listę słów i zwraca je w postaci ciągu znaków z oddzieleniem przez nową linię.
30 | Jeśli argumentem jest ciąg znaków, to zwraca ten ciąg bez żadnych zmian.
31 | """
32 | formatted_words = ""
33 | if isinstance(words_list, str):
34 | formatted_words = words_list
35 | elif isinstance(words_list, list):
36 | for word in words_list:
37 | if word.strip(): # sprawdza, czy słowo nie jest puste ani nie składa się tylko ze znaków białych
38 | formatted_words += word.strip() + "\n"
39 | return formatted_words.strip()
40 |
41 |
42 | def format_words2(word):
43 | words = ""
44 | if type(word) != str:
45 | for i in word:
46 | if i != "":
47 | words = words + i + "\n"
48 | else:
49 | words = word
50 | return words
--------------------------------------------------------------------------------
/ocrTranslate/services/deepL.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 |
3 | import deepl
4 | from ocrTranslate.langs import _langs, convert_language
5 | from ocrTranslate.utils import format_words
6 |
7 |
8 | class DeepL:
9 | def __init__(self) -> None:
10 | self.email = None
11 | try:
12 | self.deepl_init = deepl.DeepLCLI("auto", "en")
13 | except Exception as e:
14 | self.deepl_init = None
15 | print(e)
16 |
17 | # deepl-translate library
18 | def run_translate_old(self, word, language_from: str = "English", language_to: str = " Polish"):
19 | words = ""
20 | if type(word) != str:
21 | for i in word:
22 | if i != "":
23 | words = words + i + "\n"
24 | else:
25 | words = word
26 |
27 | # print(str(word.replace("%0A", "")))
28 | result = deepl.translate(source_language=language_from, target_language=language_to, text=words)
29 | return result
30 |
31 | # deepl-cli library
32 | def run_translate(self, word, language_from: str = "English", language_to: str = "Polish"):
33 | self.deepl_init.from_lang, self.deepl_init.to_lang = convert_language(language_from, language_to)
34 | words = format_words(word)
35 | loop = asyncio.new_event_loop()
36 | asyncio.set_event_loop(loop)
37 | result = self.deepl_init.translate(words)
38 | return result
39 |
40 | async def run_translate_async(self, word, language_from: str = "English", language_to: str = "Polish"):
41 | self.deepl_init.from_lang, self.deepl_init.to_lang = convert_language(language_from, language_to)
42 | words = format_words(word)
43 | return await self.deepl_init.translate_async(words)
44 |
--------------------------------------------------------------------------------
/ocrTranslate/gui/bindingEntry.py:
--------------------------------------------------------------------------------
1 | import customtkinter as ctk
2 | import keyboard
3 |
4 |
5 | class BindingEntry(ctk.CTkEntry):
6 | def __init__(self, root, focus_on=None, hotkey_function=None):
7 | ctk.CTkEntry.__init__(self, root, placeholder_text="")
8 | self.root = root
9 | self.focus_on = focus_on
10 | self.hotkey_function = hotkey_function
11 | self.key_combination = []
12 | self.bind("", self.on_key_press)
13 | self.bind("", self.on_key_release)
14 | self.bind("", self.on_mouse_press)
15 |
16 | def on_key_press(self, event):
17 | separator = "_"
18 | if separator in event.keysym:
19 | first_part, second_part = event.keysym.split(separator, 1)
20 | new_string = separator.join([first_part])
21 | else:
22 | new_string = event.keysym
23 | if new_string not in self.key_combination:
24 | self.key_combination.append(new_string)
25 |
26 | self.configure(state="normal", border_color="orange")
27 | self.delete(0, 'end')
28 | self.insert(0, "+".join(self.key_combination))
29 | self.configure(state="readonly")
30 |
31 | def on_key_release(self, event):
32 | self.configure(border_color="grey")
33 | self.focus_on.focus_set()
34 | self.key_combination = []
35 | if self.hotkey_function is not None and self.get() != "":
36 | try:
37 | keyboard.clear_all_hotkeys()
38 | keyboard.add_hotkey(self.get(), self.hotkey_function, args=('From global keystroke',))
39 | except AttributeError:
40 | pass
41 |
42 | def on_mouse_press(self, event):
43 | self.configure(state="readonly", border_color="orange")
44 |
--------------------------------------------------------------------------------
/ocrTranslate/gui/auto_resize_text_box.py:
--------------------------------------------------------------------------------
1 | import customtkinter as ctk
2 |
3 | class ModifiedMixin:
4 | def _init(self):
5 | self.clearModifiedFlag()
6 | self.bind('<>', self._beenModified)
7 |
8 | def _beenModified(self, event=None):
9 | if self._resetting_modified_flag: return
10 | self.clearModifiedFlag()
11 | self.beenModified(event)
12 |
13 | def beenModified(self, event=None):
14 | pass
15 |
16 | def clearModifiedFlag(self):
17 | self._resetting_modified_flag = True
18 |
19 | try:
20 | self._textbox.tk.call(self._textbox._w, 'edit', 'modified', 0)
21 |
22 | finally:
23 | self._resetting_modified_flag = False
24 |
25 |
26 | class AutoResizeTextBox(ModifiedMixin, ctk.CTkTextbox):
27 | def __init__(self, root, button = None):
28 | ctk.CTkTextbox.__init__(self, root, height=50, undo=True, autoseparators=True, border_width=2)
29 | # Initialize the ModifiedMixin.
30 | self._init()
31 | self.root = root
32 | self.button = button
33 | self.bind("", self.shift_enter)
34 | self.bind("", self.shift_enter)
35 | self.bind("", lambda e: "break")
36 |
37 | def beenModified(self, event=None):
38 | self.change_size_textbox(event)
39 |
40 | def change_size_textbox(self, event):
41 | cursor_index = self._textbox.count('1.0', 'end', 'displaylines')[0]
42 | new_height = (cursor_index * 15) + 15
43 | # print(cursor_index)
44 | # print(self.textbox_chat_frame.cget('height'))
45 | if new_height != self.cget('height') and self.cget('height') <= 105:
46 | if new_height >= 105:
47 | self.configure(height=105)
48 | else:
49 | self.configure(height=new_height)
50 |
51 | def shift_enter(self, event=None):
52 | if event is not None and event.keysym == 'Return' and event.state == 9:
53 | #self.change_size_textbox(event)
54 | pass
55 | elif event is not None and event.keysym == 'Return' and event.state == 8:
56 | if self.get("0.0", "end").strip():
57 | self.button.invoke()
58 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | pip-wheel-metadata/
24 | share/python-wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 | MANIFEST
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .nox/
44 | .coverage
45 | .coverage.*
46 | .cache
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | *.py,cover
51 | .hypothesis/
52 | .pytest_cache/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | target/
76 |
77 | # Jupyter Notebook
78 | .ipynb_checkpoints
79 |
80 | # IPython
81 | profile_default/
82 | ipython_config.py
83 |
84 | # pyenv
85 | .python-version
86 |
87 | # pipenv
88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
91 | # install all needed dependencies.
92 | #Pipfile.lock
93 |
94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
95 | __pypackages__/
96 |
97 | # Celery stuff
98 | celerybeat-schedule
99 | celerybeat.pid
100 |
101 | # SageMath parsed files
102 | *.sage.py
103 |
104 | # Environments
105 | .env
106 | .venv
107 | env/
108 | venv/
109 | ENV/
110 | env.bak/
111 | venv.bak/
112 |
113 | # Spyder project settings
114 | .spyderproject
115 | .spyproject
116 |
117 | # Rope project settings
118 | .ropeproject
119 |
120 | # mkdocs documentation
121 | /site
122 |
123 | # mypy
124 | .mypy_cache/
125 | .dmypy.json
126 | dmypy.json
127 |
128 | # Pyre type checker
129 | .pyre/
130 |
131 | .idea
132 |
133 | GoogleFreeSpeach
134 | GoogleFreeVision
135 | na wierzchu.ahk
136 | RunScreenshot2.bat
137 | configs
138 | test
139 | temp/
140 | .chatgpt_cache.json
141 | /ocrTranslate/translatesubs/
142 | /komendy_do_budowania.txt
143 |
--------------------------------------------------------------------------------
/ocrTranslate/config_files.py:
--------------------------------------------------------------------------------
1 | from configparser import RawConfigParser
2 |
3 | from ocrTranslate.services.baidu import Baidu
4 | from ocrTranslate.services.capture_2_text import Capture2Text
5 | from ocrTranslate.services.edge_gpt import EdgeGPTFree
6 | from ocrTranslate.services.google_free import GoogleFree
7 | from ocrTranslate.services.google_api import GoogleAPI
8 | from ocrTranslate.services.chatGPT_free3 import ChatGPTFree
9 | from ocrTranslate.services.deepL import DeepL
10 | from ocrTranslate.services.multi_translators import MultiTranslators
11 | from ocrTranslate.assets import Assets as assets
12 | from ocrTranslate.services.rapid_ocr import RapidOcr
13 |
14 | from ocrTranslate.services.tesseract import Tesseract
15 |
16 | google_free = GoogleFree()
17 | google_api = GoogleAPI(path_service_account_creds=assets.path_service_account_creds)
18 |
19 | config = RawConfigParser()
20 |
21 | # config.read(assets.path_config_ini)
22 | #
23 | # capture2Text = Capture2Text(path_to_Capture2Text_CLI_exe=config['Capture2Text']["path_to_Capture2Text_CLI_exe"])
24 | # # chatGpt = ChatGPT(config['ChatGPT']["ApiKey"], "api")
25 | # # chatGpt = ChatGPTFree(config['ChatGPT']["session_token"])
26 | # chatGpt = ChatGPTFree(config['ChatGPT']["email"], config['ChatGPT']["password"])
27 | # baidu_client = AipOcr(config['Baidu']["AppId"], config['Baidu']["ApiKey"], config['Baidu']["SecretKey"])
28 |
29 | config.read(assets.path_settings_gui)
30 |
31 | try:
32 | capture2Text = Capture2Text(path_to_Capture2Text_CLI_exe=config["settings"]['entry_capture2text_path_to_capture2text_cli_exe'])
33 | except KeyError:
34 | capture2Text = Capture2Text()
35 |
36 | try:
37 | tesseract = Tesseract(path_to_tesseract_exe=config["settings"]['entry_tesseract_path_to_tesseract_exe'])
38 | except KeyError:
39 | tesseract = Tesseract()
40 |
41 |
42 | try:
43 | chatGpt = ChatGPTFree(access_token=config["settings"]['entry_chatgpt_access_token'])
44 | except KeyError:
45 | chatGpt = ChatGPTFree()
46 |
47 | # try:
48 | # chatGpt = ChatGPTFree(email=config["settings"]['entry_chatgpt_email'],
49 | # password=config["settings"]['entry_chatgpt_password'],
50 | # session_token=config["settings"]['entry_chatgpt_session_token'],
51 | # access_token=config["settings"]['entry_chatgpt_access_token']
52 | # )
53 | # except KeyError:
54 | # chatGpt = ChatGPTFree()
55 |
56 | try:
57 | edgeGpt = EdgeGPTFree(cookie_path=assets.path_cookies_edge_gpt)
58 | except KeyError:
59 | edgeGpt = EdgeGPTFree()
60 |
61 |
62 | try:
63 | baidu = Baidu(appid=config["settings"]['entry_baidu_appid'], apikey=config["settings"]['entry_baidu_apikey'], secretkey=config["settings"]['entry_baidu_secretkey'])
64 | except KeyError:
65 | baidu = Baidu()
66 |
67 |
68 | deepL = DeepL()
69 | multi_translators = MultiTranslators()
70 | rapid_ocr = RapidOcr()
71 |
--------------------------------------------------------------------------------
/ocrTranslate/assets.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 |
4 |
5 | def resolve_path(path):
6 | if getattr(sys, "frozen", False):
7 | resolved_path = os.path.abspath(os.path.join(sys._MEIPASS, path))
8 | else:
9 | resolved_path = os.path.abspath(os.path.join(os.getcwd(), path))
10 | return resolved_path
11 |
12 |
13 | if not os.path.exists(resolve_path("ocrTranslate/temp")):
14 | os.makedirs(resolve_path("ocrTranslate/temp"))
15 |
16 | if not os.path.exists(resolve_path("ocrTranslate/configs")):
17 | os.makedirs(resolve_path("ocrTranslate/configs"))
18 |
19 |
20 | class Assets:
21 | path_to_icon = resolve_path("ocrTranslate/assets/icon.ico")
22 | path_to_icon2 = resolve_path("ocrTranslate/assets/icon2.ico")
23 | reverse_icon = resolve_path("ocrTranslate/assets/reverse.png")
24 |
25 | path_to_tmp = resolve_path("ocrTranslate/temp/temp.bmp")
26 | path_to_tmp2 = resolve_path("ocrTranslate/temp/temp.png")
27 | path_to_tmp3 = resolve_path("ocrTranslate/temp/temp.gif")
28 | path_to_tmp4 = resolve_path("ocrTranslate/temp/screencapture.png")
29 |
30 | path_to_test_image = resolve_path("ocrTranslate/assets/test_image.png")
31 | path_to_point_image = resolve_path("ocrTranslate/assets/point.jpg")
32 |
33 | path_config_ini = resolve_path("ocrTranslate/configs/config.ini")
34 | path_web_speech_demo = resolve_path("ocrTranslate\services\webspeechdemo\webspeechdemo.html")
35 | path_service_account_creds = resolve_path("ocrTranslate/configs/service_account_creds.json")
36 | path_cookies_edge_gpt = resolve_path("ocrTranslate/configs/cookies.json")
37 | path_settings_gui = resolve_path("ocrTranslate/configs/settings.ini")
38 |
39 | path_to_win_ocr = resolve_path("ocrTranslate/services/Get-Win10OcrTextFromImage.ps1")
40 |
41 | path_to_home_dark = resolve_path("ocrTranslate/assets/home_dark.png")
42 | path_to_home_light = resolve_path("ocrTranslate/assets/home_light.png")
43 | path_to_settings_dark = resolve_path("ocrTranslate/assets/settings_dark.png")
44 | path_to_settings_light = resolve_path("ocrTranslate/assets/settings_light.png")
45 | path_to_loading_gif = resolve_path("ocrTranslate/assets/loading.gif")
46 | path_to_loading_png = resolve_path("ocrTranslate/assets/loading.png")
47 | path_to_microphone_active_png = resolve_path("ocrTranslate/assets/microphone_active.png")
48 | path_to_chatai_black = resolve_path("ocrTranslate/assets/chatai_black.png")
49 | path_to_chatai_white = resolve_path("ocrTranslate/assets/chatai_white.png")
50 | path_to_microphone_white = resolve_path("ocrTranslate/assets/microphone_white.png")
51 | path_to_send_message_black = resolve_path("ocrTranslate/assets/send_message_black.png")
52 | path_to_send_message_white = resolve_path("ocrTranslate/assets/send_message_white.png")
53 | path_to_open_side_menu_dark = resolve_path("ocrTranslate/assets/open_side_menu_dark.png")
54 | path_to_open_side_menu_white = resolve_path("ocrTranslate/assets/open_side_menu_white.png")
--------------------------------------------------------------------------------
/ocrTranslate/gui/AnimatedGif.py:
--------------------------------------------------------------------------------
1 | """ AnimatedGIF - a class to show an animated gif without blocking the tkinter mainloop()
2 |
3 | Copyright (c) 2016 Ole Jakob Skjelten
4 | Released under the terms of the MIT license (https://opensource.org/licenses/MIT) as described in LICENSE.md
5 |
6 | """
7 | import sys
8 | import time
9 | from ocrTranslate.assets import Assets as assets
10 |
11 | try:
12 | import Tkinter as tk # for Python2
13 | except ImportError:
14 | import tkinter as tk # for Python3
15 |
16 | import customtkinter as ctk
17 |
18 | from PIL import Image
19 |
20 |
21 | class AnimatedGif(ctk.CTkLabel):
22 | """
23 | Class to show animated GIF file in a label
24 | Use start() method to begin animation, and set the stop flag to stop it
25 | """
26 |
27 | def __init__(self, root, gif_file, delay=0.04, size=(40, 40), hide=True):
28 | """
29 | :param root: tk.parent
30 | :param gif_file: filename (and path) of animated gif
31 | :param delay: delay between frames in the gif animation (float)
32 | """
33 | ctk.CTkLabel.__init__(self, root, text="")
34 | self.root = root
35 | self.hide = hide
36 | self.gif_file = gif_file
37 | self.delay = delay # Animation delay - try low floats, like 0.04 (depends on the gif in question)
38 | self.stop_animation = False # Thread exit request flag
39 | self.size = size
40 | self.grid_saved = self.grid_info()
41 | self._num = 0
42 |
43 | def grid_save(self):
44 | self.grid_saved = self.grid_info()
45 |
46 | def grid_restore(self):
47 | self.grid(**self.grid_saved)
48 |
49 | def start(self):
50 | """ Starts non-threaded version that we need to manually update() """
51 | self.start_time = time.time() # Starting timer
52 | self.stop_animation = False
53 | if self.hide:
54 | self.grid_restore()
55 | self._animate()
56 |
57 | def stop(self):
58 | """ This stops the after loop that runs the animation, if we are using the after() approach """
59 | self.stop_animation = True
60 | if self.hide:
61 | self.grid_forget()
62 |
63 | def _animate(self):
64 | with Image.open(assets.path_to_loading_png) as im:
65 | try:
66 | im.seek(self._num + 1)
67 | self.gif = ctk.CTkImage(light_image=im, dark_image=im, size=self.size)
68 | self.configure(image=self.gif) # do something to im
69 | self._num += 1
70 | except EOFError:
71 | self._num = -1 # end of sequence
72 | if not self.stop_animation: # If the stop flag is set, we don't repeat
73 | self.root.after(int(self.delay * 1000), self._animate)
74 |
75 | def start_thread(self):
76 | """ This starts the thread that runs the animation, if we are using a threaded approach """
77 | from threading import Thread # We only import the module if we need it
78 | self._animation_thread = Thread()
79 | self._animation_thread = Thread(target=self._animate_thread).start() # Forks a thread for the animation
80 |
81 | def stop_thread(self):
82 | """ This stops the thread that runs the animation, if we are using a threaded approach """
83 | self.stop_animation = True
84 |
85 | def _animate_thread(self):
86 | """ Updates animation, if it is running as a separate thread """
87 | while self.stop_animation is False: # Normally this would block mainloop(), but not here, as this runs in separate thread
88 | try:
89 | time.sleep(self.delay)
90 | self.gif = tk.PhotoImage(file=self.gif_file, format='gif -index {}'.format(self._num)) # Looping through the frames
91 | self.configure(image=self.gif)
92 | self._num += 1
93 | except tk.TclError: # When we try a frame that doesn't exist, we know we have to start over from zero
94 | self._num = 0
95 | except RuntimeError:
96 | sys.exit()
97 |
--------------------------------------------------------------------------------
/ocrTranslate/services/edge_gpt.py:
--------------------------------------------------------------------------------
1 | import os
2 | import uuid
3 |
4 | import asyncio
5 | from enum import Enum
6 |
7 | from EdgeGPT import Chatbot
8 | from ocrTranslate.assets import Assets as assets
9 |
10 | from secrets import compare_digest
11 |
12 | from ocrTranslate.utils import format_words, format_words2
13 |
14 | class ConversationStyle(Enum):
15 | creative = "h3imaginative,clgalileo,gencontentv3"
16 | balanced = "galileo"
17 | precise = "h3precise,clgalileo"
18 |
19 | class EdgeGPTFree:
20 | def __init__(self, cookie_path="") -> None:
21 | self.edge_gpt_free = None
22 | self.error_message = "Not initialized. Please go to https://bing.com/ and retrieve the cookies.json file, then paste it into the config folder."
23 | if os.path.exists(cookie_path):
24 | self.cookie_path = cookie_path
25 | self.is_active = True
26 | self.error_message = "Your cookies are expired. Please go to https://bing.com/ and retrieve the cookies.json file, then paste it into the config folder."
27 | self.renew_chatbot_cookies()
28 | else:
29 | self.is_active = False
30 |
31 | def renew_chatbot_cookies(self):
32 | try:
33 | self.edge_gpt_free = Chatbot(cookie_path=self.cookie_path)
34 | except Exception as e:
35 | self.is_active = False
36 | print(e)
37 | print(self.error_message)
38 |
39 | async def run_translate_async(self, word, language_to="English", prompt=""):
40 | if self.is_active:
41 | words = format_words(word)
42 | if prompt == "":
43 | prompt = "Please, translate the following text into {languageInText} yourself, without using a web translation service and without any explanation. Write only translated text. This is text to translate: {wordInText}".format(wordInText=words, languageInText=language_to)
44 | else:
45 | prompt = prompt + " " + words
46 |
47 | prev_text = ""
48 | async for data in self.edge_gpt_free.ask_stream(prompt=prompt, conversation_style="precise", wss_link="wss://sydney.bing.com/sydney/ChatHub"):
49 | print(data[0])
50 | print(data[1])
51 | if not data[0]:
52 | response = data[1][len(prev_text):]
53 | prev_text = data[1]
54 | yield response
55 | else:
56 | break
57 | else:
58 | yield self.error_message
59 |
60 | async def run_chat_ai(self, prompt=""):
61 | print("run_chat_ai")
62 | if self.is_active:
63 | response = await self.edge_gpt_free.ask(prompt=prompt, conversation_style="balanced", wss_link="wss://sydney.bing.com/sydney/ChatHub")
64 | return response
65 | else:
66 | return self.error_message
67 |
68 | async def run_chat_ai_async(self, prompt=""):
69 | if self.is_active:
70 | prev_text = ""
71 | async for data in self.edge_gpt_free.ask_stream(prompt=prompt, conversation_style="balanced", wss_link="wss://sydney.bing.com/sydney/ChatHub"):
72 | #print(data[0])
73 | #print(data[1])
74 | if not data[0]:
75 | response = data[1][len(prev_text):]
76 | prev_text = data[1]
77 | yield response
78 | #print(data)
79 | #print(data['item']['messages'][1]['text'])
80 | #response = data['item']['messages'][1]['text']
81 | #yield data
82 | else:
83 | yield self.error_message
84 |
85 |
86 | def test_edge_gpt():
87 | try:
88 | edgeGpt = EdgeGPTFree(cookie_path="C:\Programowanie\Projekty\Python\HelpApps\ocrTranslator3\ocrTranslate\configs\cookies.json", )
89 | except KeyError:
90 | edgeGpt = EdgeGPTFree()
91 |
92 | async def display_chat_ChatGPT(word):
93 | async for response in edgeGpt.run_chat_ai_async(word):
94 | print(response)
95 |
96 | asyncio.run(display_chat_ChatGPT("can you remember the first time you saw a computer?"))
97 |
98 | print(asyncio.run(edgeGpt.run_chat_ai("can you remember the first time you saw a computer?")))
99 | print(asyncio.run(edgeGpt.run_chat_ai("what is your favorite color?")))
100 |
101 | #test_edge_gpt()
--------------------------------------------------------------------------------
/ocrTranslate/services/Get-Win10OcrTextFromImage.ps1:
--------------------------------------------------------------------------------
1 | # https://github.com/HumanEquivalentUnit/PowerShell-Misc
2 |
3 | using namespace Windows.Storage
4 | using namespace Windows.Graphics.Imaging
5 |
6 | <#
7 | .Synopsis
8 | Runs Windows 10 OCR on an image.
9 | .DESCRIPTION
10 | Takes a path to an image file, with some text on it.
11 | Runs Windows 10 OCR against the image.
12 | Returns an [OcrResult], hopefully with a .Text property containing the text
13 | .EXAMPLE
14 | $result = .\Get-Win10OcrTextFromImage.ps1 -Path 'c:\test.bmp'
15 | $result.Text
16 | #>
17 | [CmdletBinding()]
18 | Param
19 | (
20 | # Path to an image file
21 | [Parameter(Mandatory=$true,
22 | ValueFromPipeline=$true,
23 | ValueFromPipelineByPropertyName=$true,
24 | Position=0,
25 | HelpMessage='Path to an image file, to run OCR on')]
26 | [ValidateNotNullOrEmpty()]
27 | $Path
28 | )
29 |
30 | Begin {
31 | # Add the WinRT assembly, and load the appropriate WinRT types
32 | Add-Type -AssemblyName System.Runtime.WindowsRuntime
33 |
34 | $null = [Windows.Storage.StorageFile, Windows.Storage, ContentType = WindowsRuntime]
35 | $null = [Windows.Media.Ocr.OcrEngine, Windows.Foundation, ContentType = WindowsRuntime]
36 | $null = [Windows.Globalization.Language, Windows.Foundation, ContentType = WindowsRuntime]
37 | $null = [Windows.Foundation.IAsyncOperation`1, Windows.Foundation, ContentType = WindowsRuntime]
38 | $null = [Windows.Graphics.Imaging.SoftwareBitmap, Windows.Foundation, ContentType = WindowsRuntime]
39 | $null = [Windows.Storage.Streams.RandomAccessStream, Windows.Storage.Streams, ContentType = WindowsRuntime]
40 |
41 |
42 | # [Windows.Media.Ocr.OcrEngine]::AvailableRecognizerLanguages
43 | $ocrEngine = [Windows.Media.Ocr.OcrEngine]::TryCreateFromUserProfileLanguages()
44 | # $language = [Windows.Globalization.Language]("pl")
45 | # $ocrEngine = [Windows.Media.Ocr.OcrEngine]::TryCreateFromLanguage($language)
46 |
47 |
48 | # PowerShell doesn't have built-in support for Async operations,
49 | # but all the WinRT methods are Async.
50 | # This function wraps a way to call those methods, and wait for their results.
51 | $getAwaiterBaseMethod = [WindowsRuntimeSystemExtensions].GetMember('GetAwaiter').
52 | Where({
53 | $PSItem.GetParameters()[0].ParameterType.Name -eq 'IAsyncOperation`1'
54 | }, 'First')[0]
55 |
56 | Function Await {
57 | param($AsyncTask, $ResultType)
58 |
59 | $getAwaiterBaseMethod.
60 | MakeGenericMethod($ResultType).
61 | Invoke($null, @($AsyncTask)).
62 | GetResult()
63 | }
64 | }
65 |
66 | Process
67 | {
68 | foreach ($p in $Path)
69 | {
70 |
71 | # From MSDN, the necessary steps to load an image are:
72 | # Call the OpenAsync method of the StorageFile object to get a random access stream containing the image data.
73 | # Call the static method BitmapDecoder.CreateAsync to get an instance of the BitmapDecoder class for the specified stream.
74 | # Call GetSoftwareBitmapAsync to get a SoftwareBitmap object containing the image.
75 | #
76 | # https://docs.microsoft.com/en-us/windows/uwp/audio-video-camera/imaging#save-a-softwarebitmap-to-a-file-with-bitmapencoder
77 |
78 | # .Net method needs a full path, or at least might not have the same relative path root as PowerShell
79 | $p = $ExecutionContext.SessionState.Path.GetUnresolvedProviderPathFromPSPath($p)
80 |
81 | $params = @{
82 | AsyncTask = [StorageFile]::GetFileFromPathAsync($p)
83 | ResultType = [StorageFile]
84 | }
85 | $storageFile = Await @params
86 |
87 |
88 | $params = @{
89 | AsyncTask = $storageFile.OpenAsync([FileAccessMode]::Read)
90 | ResultType = [Streams.IRandomAccessStream]
91 | }
92 | $fileStream = Await @params
93 |
94 |
95 | $params = @{
96 | AsyncTask = [BitmapDecoder]::CreateAsync($fileStream)
97 | ResultType = [BitmapDecoder]
98 | }
99 | $bitmapDecoder = Await @params
100 |
101 |
102 | $params = @{
103 | AsyncTask = $bitmapDecoder.GetSoftwareBitmapAsync()
104 | ResultType = [SoftwareBitmap]
105 | }
106 | $softwareBitmap = Await @params
107 |
108 | # Run the OCR
109 | Await $ocrEngine.RecognizeAsync($softwareBitmap) ([Windows.Media.Ocr.OcrResult])
110 |
111 | }
112 | }
113 |
--------------------------------------------------------------------------------
/ocrTranslate/gui/AnimatedGifButton.py:
--------------------------------------------------------------------------------
1 | """ AnimatedGIF - a class to show an animated gif without blocking the tkinter mainloop()
2 |
3 | Copyright (c) 2016 Ole Jakob Skjelten
4 | Released under the terms of the MIT license (https://opensource.org/licenses/MIT) as described in LICENSE.md
5 |
6 | """
7 | import sys
8 | import time
9 | from ocrTranslate.assets import Assets as assets
10 |
11 | try:
12 | import Tkinter as tk # for Python2
13 | except ImportError:
14 | import tkinter as tk # for Python3
15 |
16 | import customtkinter as ctk
17 | from PIL import Image
18 |
19 |
20 | class AnimatedGifButton(ctk.CTkButton):
21 | def __init__(self, root, gif_file, stop_icon, delay=0.04, size=(40, 40), hide=True):
22 | self.stop_icon = ctk.CTkImage(Image.open(stop_icon), size=size)
23 | ctk.CTkButton.__init__(self, root, text="", fg_color="transparent", border_width=0, anchor="left", command=self.start_button, image=self.stop_icon, width=size[0], height=size[1])
24 | self.root = root
25 | self.hide = hide
26 | self.gif_file = gif_file
27 | self.delay = delay # Animation delay - try low floats, like 0.04 (depends on the gif in question)
28 | self.stop_animation = False # Thread exit request flag
29 | self.started_animation = False # Thread exit request flag
30 | self.size = size
31 | self.grid_saved = self.grid_info()
32 | self._num = 0
33 |
34 | def grid_save(self):
35 | self.grid_saved = self.grid_info()
36 |
37 | def grid_restore(self):
38 | self.grid(**self.grid_saved)
39 |
40 | def start_button(self, started_animation=None):
41 | if started_animation is not None:
42 | started_forced_animation = not started_animation
43 | if self.started_animation and started_forced_animation:
44 | self.started_animation = False
45 | self.stop()
46 | elif not self.started_animation and not started_forced_animation:
47 | self.started_animation = True
48 | self.start()
49 | else:
50 | if self.started_animation:
51 | self.started_animation = False
52 | self.stop()
53 | else:
54 | self.started_animation = True
55 | self.start()
56 |
57 | def start(self):
58 | """ Starts non-threaded version that we need to manually update() """
59 | self.start_time = time.time() # Starting timer
60 | self.stop_animation = False
61 | if self.hide:
62 | self.grid_restore()
63 | self._animate()
64 |
65 | def stop(self):
66 | """ This stops the after loop that runs the animation, if we are using the after() approach """
67 | self.stop_animation = True
68 | if self.hide:
69 | self.grid_forget()
70 |
71 | def _animate(self):
72 | with Image.open(self.gif_file) as im:
73 | try:
74 | im.seek(self._num + 1)
75 | self.gif = ctk.CTkImage(light_image=im, dark_image=im, size=self.size)
76 | self.configure(image=self.gif) # do something to im
77 | self._num += 1
78 | except EOFError:
79 | self._num = -1 # end of sequence
80 | if not self.stop_animation: # If the stop flag is set, we don't repeat
81 | self.root.after(int(self.delay * 1000), self._animate)
82 | else:
83 | self.configure(image=self.stop_icon)
84 |
85 | def start_thread(self):
86 | """ This starts the thread that runs the animation, if we are using a threaded approach """
87 | from threading import Thread # We only import the module if we need it
88 | self._animation_thread = Thread()
89 | self._animation_thread = Thread(target=self._animate_thread).start() # Forks a thread for the animation
90 |
91 | def stop_thread(self):
92 | """ This stops the thread that runs the animation, if we are using a threaded approach """
93 | self.stop_animation = True
94 |
95 | def _animate_thread(self):
96 | """ Updates animation, if it is running as a separate thread """
97 | while self.stop_animation is False: # Normally this would block mainloop(), but not here, as this runs in separate thread
98 | try:
99 | time.sleep(self.delay)
100 | self.gif = tk.PhotoImage(file=self.gif_file, format='gif -index {}'.format(self._num)) # Looping through the frames
101 | self.configure(image=self.gif)
102 | self._num += 1
103 | except tk.TclError: # When we try a frame that doesn't exist, we know we have to start over from zero
104 | self._num = 0
105 | except RuntimeError:
106 | sys.exit()
107 |
--------------------------------------------------------------------------------
/ocrTranslate/services/speach_to_text_multi_services.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import threading
3 | import time
4 | from typing import AsyncGenerator
5 | import speech_recognition as sr
6 | from speech_recognition import WaitTimeoutError
7 | import multiprocessing.pool
8 | import functools
9 |
10 | from ocrTranslate.langs import convert_language_sst
11 |
12 |
13 | def timeout(max_timeout):
14 | """Timeout decorator, parameter in seconds."""
15 |
16 | def timeout_decorator(item):
17 | """Wrap the original function."""
18 |
19 | @functools.wraps(item)
20 | def func_wrapper(*args, **kwargs):
21 | """Closure for function."""
22 | pool = multiprocessing.pool.ThreadPool(processes=1)
23 | async_result = pool.apply_async(item, args, kwargs)
24 | # raises a TimeoutError if execution exceeds max_timeout
25 | return async_result.get(max_timeout)
26 |
27 | return func_wrapper
28 |
29 | return timeout_decorator
30 |
31 |
32 | class VoiceRecognizerMulti:
33 | def __init__(self, start_button=None, text_box=None, combobox_sst_language=None, language='pl-pl', name_service="google"):
34 | self.language = language
35 | self.stop = False
36 | self.combobox_sst_language = combobox_sst_language
37 | self.buttons_sst_list = start_button
38 | self.sst_frame_textbox_list = text_box
39 | self.name_service = name_service
40 |
41 | for button in self.buttons_sst_list:
42 | button.configure(command=lambda b=button: self.on_start_button_click(b))
43 |
44 | self.thread = threading.Thread(target=lambda: None)
45 |
46 | def change_language(self):
47 | self.language = convert_language_sst(self.combobox_sst_language.get())
48 | print(self.language)
49 |
50 | def on_start_button_click(self, button = None):
51 | self.change_language()
52 | text_box = None
53 | if button is not None:
54 | for iteration, button2 in enumerate(self.buttons_sst_list):
55 | if button == button2:
56 | text_box = self.sst_frame_textbox_list[iteration]
57 | break
58 | print("klik")
59 | if not self.thread.is_alive():
60 | self.stop = True
61 | self.thread = threading.Thread(target=self.recognize_voice_wrapper, args=(button, text_box))
62 | self.thread.start()
63 | else:
64 | self.stop = False
65 |
66 | @timeout(20.0)
67 | def listen_voice2(self, source):
68 | r = sr.Recognizer()
69 | r.pause_threshold = 0.4
70 | r.non_speaking_duration = 0.39
71 | r.phrase_threshold = 0.3
72 | audio = r.listen(source, 1)
73 | return audio
74 |
75 | def listen_voice(self) -> AsyncGenerator[sr.AudioData, None]:
76 | import speech_recognition as sr
77 | sr = sr.Microphone()
78 | print(self.stop)
79 | with sr as source:
80 | while self.stop:
81 | try:
82 | audio = self.listen_voice2(source)
83 | except WaitTimeoutError:
84 | continue
85 | except Exception as e:
86 | print("Error: " + str(e))
87 | continue
88 | yield audio
89 | print("koniec")
90 |
91 | def recognize_voice(self, r, response, textbox):
92 | try:
93 | method_name = 'r.recognize_{}'.format(self.name_service.lower())
94 | method = eval(method_name)
95 | query = method(response, language=self.language)
96 | #query = r.recognize_whisper(response, language="english")
97 | print(f"{query}")
98 | textbox.insert("0.0 lineend", " " + query)
99 | except Exception as e:
100 | print("Error: " + str(e))
101 | query = "Nie rozpoznano"
102 | print(f"{query}")
103 |
104 | def recognize_voice_wrapper(self, button, textbox):
105 | print("start animacja")
106 | button.start_button(started_animation=self.stop)
107 | for button2 in self.buttons_sst_list:
108 | if button != button2:
109 | button2.configure(state="disabled")
110 |
111 | if textbox.get("0.0", "0.0 lineend") != "":
112 | textbox.insert("0.0", "\n")
113 | r = sr.Recognizer()
114 | for response in self.listen_voice():
115 | print("koniec2")
116 | threading.Thread(target=self.recognize_voice, args=(r, response, textbox, )).start()
117 |
118 | print("koniec3")
119 | for button2 in self.buttons_sst_list:
120 | button2.configure(state="normal")
121 | print("koniec animacja")
122 | button.start_button(started_animation=self.stop)
123 |
124 |
125 | def run(self):
126 | self.thread = threading.Thread(target=self.recognize_voice_wrapper)
127 | self.thread.start()
128 |
129 | def start(self, name_service):
130 | self.name_service = name_service
131 | for button in self.buttons_sst_list:
132 | button.configure(command=lambda b=button: self.on_start_button_click(b))
133 |
134 |
135 | # if __name__ == "__main__":
136 | # VoiceRecognizer().run()
137 |
--------------------------------------------------------------------------------
/ocrTranslate/services/chatGPT_free3.py:
--------------------------------------------------------------------------------
1 | import uuid
2 |
3 | from revChatGPT.V1 import Chatbot as ChatbotFree
4 | from secrets import compare_digest
5 |
6 | from ocrTranslate.utils import format_words, format_words2
7 | import json
8 |
9 | class ChatGPTFree:
10 | def __init__(self, email="", password="", session_token="", access_token="") -> None:
11 | self.chat_gpt_free = None
12 | self.error_message = "Not initialized, go to Settings and enter your access token"
13 | if not compare_digest(email, "") and not compare_digest(password, ""):
14 | self.email = email
15 | self.password = password
16 | self.error_message = "Your email or password is invalid"
17 | self.is_active = True
18 | self.renew_chatbot_session_password()
19 | elif not compare_digest(session_token, ""):
20 | self.session_token = session_token
21 | self.error_message = "Your session token is invalid"
22 | self.is_active = True
23 | self.renew_chatbot_session_token()
24 | elif not compare_digest(access_token, ""):
25 | self.access_token = access_token
26 | self.error_message = "Your access token is expired, please log in at https://chat.openai.com/ and next get the access token from the https://chat.openai.com/api/auth/session"
27 | self.is_active = True
28 | self.renew_chatbot_access_token()
29 | else:
30 | self.is_active = False
31 |
32 | def renew_chatbot_session_password(self):
33 | try:
34 | self.chat_gpt_free = ChatbotFree(config={"email": "{email}".format(email=self.email), "password": "{password}".format(password=self.password)})
35 | except Exception as e:
36 | self.is_active = False
37 | print(e)
38 | print("password or email are invalid")
39 |
40 | def renew_chatbot_session_token(self):
41 | try:
42 | self.chat_gpt_free = ChatbotFree(config={"session_token": "{session_token}".format(session_token=self.session_token)})
43 | except Exception as e:
44 | self.is_active = False
45 | print(e)
46 | print("session token has been out of date, please renew token")
47 |
48 | def renew_chatbot_access_token(self):
49 | try:
50 | self.chat_gpt_free = ChatbotFree(config={"access_token": "{access_token}".format(access_token=self.access_token)})
51 | except Exception as e:
52 | self.is_active = False
53 | print(e)
54 | print("access token has been out of date, please renew token")
55 |
56 | async def run_translate_async(self, word, language_to="English"):
57 | if self.is_active:
58 | words = format_words(word)
59 | prev_text = ""
60 | for data in self.chat_gpt_free.ask("Just translate the following sentence into {languageInText}, without any explanation and write only the translated sentence:\n{wordInText}".format(wordInText=words, languageInText=language_to)):
61 | response = data["message"][len(prev_text):]
62 | prev_text = data["message"]
63 | yield response
64 | else:
65 | yield self.error_message
66 |
67 | def run_translate(self, word, language_to="English", prompt=""):
68 | print("translate_by_chat_gpt")
69 | if self.is_active:
70 | words = format_words(word)
71 | if prompt == "":
72 | prompt = "Just translate the following sentence into {languageInText}, without any explanation and write only the translated sentence: {wordInText}".format(wordInText=words, languageInText=language_to)
73 | else:
74 | prompt = prompt + " " + words
75 |
76 | response = ""
77 | prev_text = ""
78 | #print(self.chat_gpt_free.conversation_id)
79 | #print(self.chat_gpt_free.parent_id)
80 |
81 | for data in self.chat_gpt_free.ask(prompt=prompt, conversation_id=str(uuid.uuid4())):
82 | response = data["message"]
83 | message = data["message"][len(prev_text):]
84 | print(message, end="", flush=True)
85 | prev_text = data["message"]
86 | print("")
87 | return response
88 | else:
89 | return self.error_message
90 |
91 | def run_chat_ai(self, prompt=""):
92 | print("run_chat_ai")
93 | if self.is_active:
94 | response = ""
95 | prev_text = ""
96 |
97 | for data in self.chat_gpt_free.ask(prompt=prompt, conversation_id=str(uuid.uuid4())):
98 | response = data["message"]
99 | message = data["message"][len(prev_text):]
100 | print(message, end="", flush=True)
101 | prev_text = data["message"]
102 | print("")
103 | return response
104 | else:
105 | return self.error_message
106 |
107 | async def run_chat_ai_async(self, prompt=""):
108 | if self.is_active:
109 | prev_text = ""
110 | try:
111 | for data in self.chat_gpt_free.ask(prompt=prompt):
112 | response = data["message"][len(prev_text):]
113 | prev_text = data["message"]
114 | yield response
115 | except Exception as e:
116 | print(e)
117 | #res = json.loads(e.__dict__.get("message")).get("detail").get("message")
118 | #print(res)
119 | yield self.error_message
120 | print(self.chat_gpt_free.conversation_id)
121 | else:
122 | yield self.error_message
--------------------------------------------------------------------------------
/ocrTranslate/services/speach_to_text_web_google.py:
--------------------------------------------------------------------------------
1 | import threading
2 | import time
3 | from selenium import webdriver
4 | from selenium.common import JavascriptException, NoSuchWindowException
5 | from selenium.webdriver.common.by import By
6 | from ocrTranslate.langs import convert_language_sst
7 | from ocrTranslate.assets import Assets as assets
8 |
9 | class SpeechRecognitionGUI:
10 | def __init__(self, start_button=None, change_language_button=None, text_box=None, combobox_sst_language=None):
11 | self.options = webdriver.ChromeOptions()
12 | self.options.add_argument("use-fake-ui-for-media-stream")
13 | self.driver = None
14 | # self.options.add_argument('headless')
15 | # self.options.add_argument('--headless')
16 | self.initialize_driver()
17 | self.combobox_sst_language = combobox_sst_language
18 | #self.combobox_sst_language.configure(command=self.change_language)
19 | #self.start_button = start_button
20 | self.buttons_sst_list = start_button
21 | self.sst_frame_textbox_list = text_box
22 | self.stop = True
23 | # command = self.start_button._command
24 | # if callable(command):
25 | # print("callable")
26 | # self.start_button.configure(command=lambda: command() or self.on_start_button_click())
27 | # else:
28 | # print("callable2")
29 | # self.start_button.configure(command=self.on_start_button_click)
30 | #self.start_button.configure(command=self.on_start_button_click)
31 |
32 | for button in self.buttons_sst_list:
33 | button.configure(command=lambda b=button: self.on_start_button_click(b))
34 |
35 | #self.start_button.configure(command=lambda: self.on_start_button_click(self.start_button))
36 | #self.text_box = text_box
37 | self.thread = threading.Thread(target=lambda: None)
38 |
39 | def is_browser_alive(self, driver):
40 | try:
41 | driver.current_url
42 | # or driver.title
43 | return True
44 | except AttributeError:
45 | return False
46 | except NoSuchWindowException:
47 | return False
48 |
49 | def initialize_driver(self):
50 | if not self.is_browser_alive(self.driver):
51 | self.driver = webdriver.Chrome(chrome_options=self.options)
52 | self.driver.get(assets.path_web_speech_demo)
53 | self.driver.minimize_window()
54 |
55 | def change_language(self, option):
56 | self.initialize_driver()
57 | # from selenium.webdriver.support.ui import Select
58 | # select = Select(self.driver.find_element(By.ID, "select_language"))
59 | # select.select_by_visible_text("Deutsch")
60 | # print(option)
61 | # print(convert_language_sst(language_sst=option))
62 | script = f"for (var i = select_dialect.options.length - 1; i >= 0; i--) {{select_dialect.remove(i);}} select_dialect.options.add(new Option('{option}', '{convert_language_sst(language_sst=option)}'));"
63 | #script = '''for (var i = select_dialect.options.length - 1; i >= 0; i--) {{select_dialect.remove(i);}} select_dialect.options.add(new Option("Polski", "pl-PL"));'''
64 | # print(script)
65 | self.driver.execute_script(script)
66 |
67 | def on_start_button_click(self, button = None):
68 | text_box = None
69 | if button is not None:
70 | for iteration, button2 in enumerate(self.buttons_sst_list):
71 | #print(iteration)
72 | if button == button2:
73 | text_box = self.sst_frame_textbox_list[iteration]
74 | break
75 | #print(button)
76 | #button_name = button.winfo_name()
77 | #print(f"The '{button_name}' button was clicked.")
78 | #if button == self.start_button:
79 | # print("ten sam")
80 | self.initialize_driver()
81 | self.change_language(self.combobox_sst_language.get())
82 | try:
83 | self.driver.execute_script("startButton();")
84 | except JavascriptException:
85 | pass
86 | time.sleep(0.1)
87 | if not self.thread.is_alive():
88 | self.thread = threading.Thread(target=self.update_recognized_text, args=(button, text_box))
89 | self.thread.start()
90 |
91 | def update_recognized_text(self, button, textbox):
92 | recognizing = self.driver.execute_script("return recognizing;")
93 | button.start_button(started_animation=recognizing)
94 | for button2 in self.buttons_sst_list:
95 | if button != button2:
96 | button2.configure(state="disabled")
97 | if recognizing:
98 | textbox.insert("0.0", "\n")
99 | while recognizing:
100 | recognizing = self.driver.execute_script("return recognizing;")
101 | # print(recognizing)
102 | text_output_final = self.driver.find_element(By.ID, "final_span")
103 | recognized_text = text_output_final.text
104 | text_output_interim = self.driver.find_element(By.ID, "interim_span")
105 | recognized_text = recognized_text + text_output_interim.text
106 | # print(recognized_text)
107 | textbox.delete("0.0", "0.0 lineend")
108 | # response = recognized_text[len(prev_text):]
109 | # prev_text = recognized_text
110 | textbox.insert("0.0 lineend", recognized_text)
111 | time.sleep(0.1)
112 | # print(recognizing)
113 | for button2 in self.buttons_sst_list:
114 | button2.configure(state="normal")
115 | button.start_button(started_animation=recognizing)
116 |
117 | def start(self):
118 | self.initialize_driver()
119 | for button in self.buttons_sst_list:
120 | button.configure(command=lambda b=button: self.on_start_button_click(b))
121 |
122 | # if __name__ == "__main__":
123 | # SpeechRecognitionGUI()
124 |
--------------------------------------------------------------------------------
/ocrTranslate/services/google_api.py:
--------------------------------------------------------------------------------
1 | import io
2 | import os
3 | import platform
4 | import subprocess
5 |
6 | import wx.adv
7 | from PIL.Image import Image
8 |
9 | from google.cloud import texttospeech
10 | from google.cloud.translate import TranslationServiceClient as TranslateClient
11 | from google.cloud.vision import ImageAnnotatorClient
12 | from google.oauth2 import service_account
13 |
14 |
15 | class GoogleAPI:
16 | def __init__(self, path_service_account_creds="", ) -> None:
17 | if os.path.exists(path_service_account_creds):
18 | self.credentials = service_account.Credentials.from_service_account_file(path_service_account_creds)
19 | self.translate_client = TranslateClient(credentials=self.credentials)
20 | self.vision_client = ImageAnnotatorClient(credentials=self.credentials)
21 | self.text_to_speech_client = texttospeech.TextToSpeechClient(credentials=self.credentials)
22 | self.is_active = True
23 | else:
24 | self.is_active = False
25 |
26 | def text_to_wav(self, voice_name: str, text: str):
27 | language_code = "-".join(voice_name.split("-")[:2])
28 | text_input = texttospeech.SynthesisInput(text=text)
29 | voice_params = texttospeech.VoiceSelectionParams(language_code=language_code, name=voice_name)
30 | audio_config = texttospeech.AudioConfig(audio_encoding=texttospeech.AudioEncoding.LINEAR16)
31 |
32 | client = texttospeech.TextToSpeechClient(credentials=self.credentials)
33 | response = client.synthesize_speech(input=text_input, voice=voice_params, audio_config=audio_config)
34 |
35 | print(response)
36 | print(response.audio_content)
37 |
38 | filename = f"{language_code}.wav"
39 | with open(filename, "wb") as out:
40 | out.write(response.audio_content)
41 | print(f'Generated speech saved to "{filename}"')
42 |
43 | # np. jak wywolac funkcje: text_to_wav("pl-PL-Wavenet-B", "jakis tekst do powiedzenia a co?")
44 |
45 | # text_to_wav("pl-PL-Wavenet-B", "jakis tekst do powiedzenia a co?")
46 |
47 | def run_ocr(self, image: Image) -> str:
48 | """Detect text from PIL.Image data using Google Cloud Translate."""
49 |
50 | # Create bytestream of the given image
51 | bytes_io = io.BytesIO()
52 | image.save(bytes_io, 'png')
53 | bytes_io.seek(0)
54 | content = bytes_io.read()
55 | bytes_io.close()
56 |
57 | res = self.vision_client.text_detection({
58 | 'content': content, })
59 | # print(res)
60 |
61 | annotations = res.text_annotations
62 | if len(annotations) > 0:
63 | text = annotations[0].description
64 | else:
65 | text = ""
66 | # print("Extracted text:\n {} from image ({} chars).".format(text, len(text)))
67 | # print("{}".format(text))
68 |
69 | '''
70 | # Filter small characters
71 | def _calc_height(word):
72 | """Calculate the height of the word boundary box."""
73 | ys = list(map(lambda v: v.y, word.bounding_box.vertices))
74 | return max(ys) - min(ys)
75 |
76 | texts = []
77 | max_height = 0
78 | for page in res.full_text_annotation.pages:
79 | for block in page.blocks:
80 | for paragraph in block.paragraphs:
81 | max_height = max(max_height, max(map(_calc_height, paragraph.words)))
82 | for block in page.blocks:
83 | for paragraph in block.paragraphs:
84 | for word in paragraph.words:
85 | if _calc_height(word) > max_height * 0.60:
86 | texts.append(''.join([symbol.text for symbol in word.symbols]))
87 | texts.append('\n')
88 | '''
89 | return "{}".format(text)
90 |
91 | def run_translate(self, text: str, target_language: str = 'en', source_language: str = 'ja') -> str:
92 | """
93 | from google.cloud import translate_v2 as translate2
94 | translate_client2 = translate2.Client(credentials=credentials)
95 |
96 | import six
97 |
98 | if isinstance(text, six.binary_type):
99 | text = text.decode("utf-8")
100 |
101 | # Text can also be a sequence of strings, in which case this method
102 | # will return a sequence of results for each text.
103 | result = translate_client2.translate(text, target_language=target)
104 |
105 | print(u"Text: {}".format(result["input"]))
106 | print(u"Translation: {}".format(result["translatedText"]))
107 | print(u"Detected source language: {}".format(result["detectedSourceLanguage"]))
108 | """
109 | return text
110 |
111 | def speech(self, text: str, langage_code: str = 'ja-JP') -> None:
112 | """Speak the text by Google Cloud Text-to-Speech voice synthesis."""
113 |
114 | # Create synthesis voice data
115 | temp_file = 'tmp.mp3'
116 | synthesis_input = texttospeech.types.SynthesisInput(text=text)
117 | voice = texttospeech.types.VoiceSelectionParams(language_code=langage_code, ssml_gender=texttospeech.enums.SsmlVoiceGender.FEMALE, )
118 | audio_config = texttospeech.types.AudioConfig(audio_encoding=texttospeech.enums.AudioEncoding.MP3)
119 | response = self.text_to_speech_client.synthesize_speech(synthesis_input, voice, audio_config)
120 | with open(temp_file, 'wb') as f:
121 | f.write(response.audio_content)
122 |
123 | # Play sound
124 | system = platform.system()
125 | if system == 'Windows':
126 | cmd = 'cmdmp3 {}'.format(temp_file)
127 | subprocess.call(cmd)
128 | else:
129 | wx.adv.Sound.PlaySound(temp_file, flags=wx.adv.SOUND_SYNC)
130 |
131 | # Windows has a problem in making temp files
132 | # ref: https://github.com/bravoserver/bravo/issues/111
133 | try:
134 | os.unlink(temp_file)
135 | except FileNotFoundError:
136 | pass
137 |
--------------------------------------------------------------------------------
/ocrTranslate/gui/tabviewChats.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import sys
3 | import time
4 | from threading import Thread
5 |
6 | from ocrTranslate.assets import Assets as assets
7 |
8 | import customtkinter
9 |
10 | from ocrTranslate.config_files import chatGpt, edgeGpt
11 | from ocrTranslate.gui.AnimatedGif import AnimatedGif
12 | from ocrTranslate.gui.auto_resize_text_box import AutoResizeTextBox
13 |
14 |
15 | class TabviewChats(customtkinter.CTkTabview):
16 | def __init__(self, root, ai_services, send_message_icon, column=0):
17 | customtkinter.CTkTabview.__init__(self, root, width=25)
18 | self.root = root
19 | self.ai_services = ai_services
20 | self.send_message_icon = send_message_icon
21 | self.column = column
22 |
23 | self.textbox_chatgpt_frame = None
24 | self.textbox_chatgpt_send_frame = None
25 | self.button_send_message_chatgpt = None
26 |
27 | self.textbox_bing_frame = None
28 | self.textbox_bing_send_frame = None
29 | self.button_send_message_bing = None
30 |
31 | self.textbox_Bard_frame = None
32 | self.textbox_Bard_send_frame = None
33 | self.button_send_message_Bard = None
34 |
35 | method_name = 'tabview_chat_ai'
36 | self.grid(row=0, column=self.column, padx=(0, 20), pady=(5, 5), sticky="nsew")
37 | for ai_service in ai_services:
38 | self.add(str(ai_service[0]))
39 | self.tab(str(ai_service[0])).grid_columnconfigure(0, weight=1) # configure grid of individual tabs
40 | self.tab(str(ai_service[0])).grid_rowconfigure(0, weight=1) # configure grid of individual tabs
41 | setattr(self, method_name, self)
42 | for ai_service in ai_services:
43 | textbox_ai_frame = customtkinter.CTkTextbox(self.tab(ai_service[0]), undo=True, autoseparators=True)
44 | textbox_ai_frame.grid(row=0, column=0, padx=(0, 0), pady=(0, 0), sticky="nsew")
45 | textbox_ai_frame.tag_config("user_name", justify='left', foreground='white', font=customtkinter.CTkFont(size=14, weight="bold"))
46 | textbox_ai_frame.tag_config("user_message", justify='left', foreground='white')
47 | textbox_ai_frame.tag_config("chatbot_name", justify='left', foreground='lightblue', font=customtkinter.CTkFont(size=14, weight="bold"))
48 | textbox_ai_frame.tag_config("chatbot_message", justify='left', foreground='lightblue')
49 |
50 | #textbox_ai_frame.insert('end', "You: ", 'user_name')
51 | textbox_ai_send_frame = AutoResizeTextBox(textbox_ai_frame)
52 | textbox_ai_send_frame.grid(row=1, column=0, padx=(15, 0), pady=(0, 0), sticky="nsew")
53 | button_send_message_ai = customtkinter.CTkButton(master=textbox_ai_send_frame, text="", fg_color="transparent", bg_color="transparent", border_width=0, width=26, anchor="left", height=26, text_color=("gray10", "#DCE4EE"), image=self.send_message_icon)
54 | button_send_message_ai.grid(row=0, column=1, padx=(14, 7), pady=(4, 0), sticky="e")
55 | textbox_ai_send_frame.button = button_send_message_ai
56 |
57 | loading_icon_chat = AnimatedGif(textbox_ai_send_frame, assets.path_to_loading_gif, 0.04, size=(50, 50))
58 | loading_icon_chat.grid(row=0, column=1, padx=(14, 7), pady=(4, 0), sticky="e")
59 | loading_icon_chat.grid_save()
60 |
61 | method_name = 'textbox_{}_frame'.format(ai_service[0].lower().replace(" ", "_"))
62 | setattr(self, method_name, textbox_ai_frame)
63 |
64 | method_name = 'textbox_{}_send_frame'.format(ai_service[0].lower().replace(" ", "_"))
65 | setattr(self, method_name, textbox_ai_send_frame)
66 |
67 | method_name = 'button_send_message_{}'.format(ai_service[0].lower().replace(" ", "_"))
68 | setattr(self, method_name, button_send_message_ai)
69 |
70 | method_name = 'loading_icon_chat_{}'.format(ai_service[0].lower().replace(" ", "_"))
71 | setattr(self, method_name, loading_icon_chat)
72 |
73 | self.button_send_message_chatgpt.configure(command=lambda arg1=self.textbox_chatgpt_send_frame, arg2=self.textbox_chatgpt_frame, arg3="ChatGPT": self.send_message_button(arg1, arg2, arg3))
74 | self.button_send_message_bing.configure(command=lambda arg1=self.textbox_bing_send_frame, arg2=self.textbox_bing_frame, arg3="Bing": self.send_message_button(arg1, arg2, arg3))
75 |
76 |
77 | async def display_chat_ChatGPT(self, word, widget):
78 | async for response in chatGpt.run_chat_ai_async(word):
79 | widget.insert('end', response, 'chatbot_message')
80 | widget.insert('end', "\n\n")
81 |
82 | async def display_chat_Bing(self, word, widget):
83 | async for response in edgeGpt.run_chat_ai_async(word):
84 | widget.insert('end', response, 'chatbot_message')
85 | widget.insert('end', "\n\n")
86 |
87 | def send_message_button(self, textbox_ai_send_frame, textbox_ai_frame, name_service):
88 | thread = Thread(target=self.send_message_ai, args=(textbox_ai_send_frame, textbox_ai_frame, name_service,))
89 | thread.start()
90 |
91 | def send_message_ai(self, textbox_ai_send_frame, textbox_ai_frame, name_service):
92 | method_name = 'self.loading_icon_chat_{}'.format(name_service.lower())
93 | method2 = eval(method_name)
94 | method2.start()
95 | method_name = 'self.button_send_message_{}'.format(name_service.lower())
96 | method3 = eval(method_name)
97 | method3.configure(state="disabled")
98 | message = textbox_ai_send_frame.get(0.0, 'end')
99 | print(message)
100 | textbox_ai_send_frame.delete(0.0, 'end')
101 | textbox_ai_frame.insert('end', f"You:\n", 'user_name')
102 | textbox_ai_frame.insert('end', f"{message}\n", 'user_message')
103 | textbox_ai_frame.insert('end', f"{name_service}:\n", 'chatbot_name')
104 | method_name = 'self.display_chat_{}'.format(name_service)
105 | method = eval(method_name)
106 | asyncio.run(method(message, textbox_ai_frame))
107 | method2.stop()
108 | method3.configure(state="normal")
109 |
110 | def get_key(self, valu):
111 | for key, value in self.__dict__.items():
112 | # print(str(key) + " | " + str(value))
113 | if valu == str(value):
114 | return key
115 | return "key doesn't exist"
116 |
--------------------------------------------------------------------------------
/ocrTranslate/gui/auto_complete_combobox.py:
--------------------------------------------------------------------------------
1 | """
2 | Authors: Mitja Martini and Russell Adams
3 | License: "Licensed same as original by Mitja Martini or public domain, whichever is less restrictive"
4 | Source: https://mail.python.org/pipermail/tkinter-discuss/2012-January/003041.html
5 |
6 | Edited by RedFantom for ttk and Python 2 and 3 cross-compatibility and binding
7 | Edited by Juliette Monsel to include Tcl code to navigate the dropdown by Pawel Salawa
8 | (https://wiki.tcl-lang.org/page/ttk%3A%3Acombobox, copyright 2011)
9 | Edited by Azornes to working with customtkinter library (https://github.com/TomSchimansky/CustomTkinter)
10 | """
11 |
12 | import customtkinter as tk
13 |
14 | tk_umlauts = ['odiaeresis', 'adiaeresis', 'udiaeresis', 'Odiaeresis', 'Adiaeresis', 'Udiaeresis', 'ssharp']
15 |
16 |
17 | class AutocompleteCombobox(tk.CTkComboBox):
18 | """:class:`ttk.Combobox` widget that features autocompletion."""
19 | def __init__(self, master=None, completevalues=None, **kwargs):
20 | """
21 | Create an AutocompleteCombobox.
22 |
23 | :param master: master widget
24 | :type master: widget
25 | :param completevalues: autocompletion values
26 | :type completevalues: list
27 | :param kwargs: keyword arguments passed to the :class:`ttk.Combobox` initializer
28 | """
29 |
30 | tk.CTkComboBox.__init__(self, master, values=completevalues, **kwargs)
31 | self._completion_list = completevalues
32 | if isinstance(completevalues, list):
33 | self.set_completion_list(completevalues)
34 | self._hits = []
35 | self._hit_index = 0
36 | self.position = 0
37 | # navigate on keypress in the dropdown:
38 | # code taken from https://wiki.tcl-lang.org/page/ttk%3A%3Acombobox by Pawel Salawa, copyright 2011
39 |
40 | self.tk.eval("""
41 | proc ComboListKeyPressed {w key} {
42 | if {[string length $key] > 1 && [string tolower $key] != $key} {
43 | return
44 | }
45 |
46 | set cb [winfo parent [winfo toplevel $w]]
47 | set text [string map [list {[} {\[} {]} {\]}] $key]
48 | if {[string equal $text ""]} {
49 | return
50 | }
51 |
52 | set values [$cb cget -values]
53 | set x [lsearch -glob -nocase $values $text*]
54 | if {$x < 0} {
55 | return
56 | }
57 |
58 | set current [$w curselection]
59 | if {$current == $x && [string match -nocase $text* [lindex $values [expr {$x+1}]]]} {
60 | incr x
61 | }
62 |
63 | $w selection clear 0 end
64 | $w selection set $x
65 | $w activate $x
66 | $w see $x
67 | }
68 |
69 | set popdown [ttk::combobox::PopdownWindow %s]
70 | bind $popdown.f.l [list ComboListKeyPressed %%W %%K]
71 | """ % (self._entry))
72 |
73 | def set_completion_list(self, completion_list):
74 | """
75 | Use the completion list as drop down selection menu, arrows move through menu.
76 |
77 | :param completion_list: completion values
78 | :type completion_list: list
79 | """
80 | self._completion_list = sorted(completion_list, key=str.lower) # Work with a sorted list
81 | self.configure(values=completion_list)
82 | self._hits = []
83 | self._hit_index = 0
84 | self.position = 0
85 | self.bind('', self.handle_keyrelease)
86 | self['values'] = self._completion_list # Setup our popup menu
87 |
88 | def set_completion_list_without_bind(self, completion_list):
89 | """
90 | Use the completion list as drop down selection menu, arrows move through menu.
91 |
92 | :param completion_list: completion values
93 | :type completion_list: list
94 | """
95 | self._completion_list = sorted(completion_list, key=str.lower) # Work with a sorted list
96 | self.configure(values=completion_list)
97 | self._hits = []
98 | self._hit_index = 0
99 | self.position = 0
100 | self['values'] = self._completion_list # Setup our popup menu
101 |
102 | def autocomplete(self, delta=0):
103 | """
104 | Autocomplete the Combobox.
105 |
106 | :param delta: 0, 1 or -1: how to cycle through possible hits
107 | :type delta: int
108 | """
109 | if delta: # need to delete selection otherwise we would fix the current position
110 | self._entry.delete(self.position, tk.END)
111 | #self.delete(self.position, tk.END)
112 | else: # set position to end so selection starts where textentry ended
113 | self.position = len(self.get())
114 | # collect hits
115 | _hits = []
116 | for element in self._completion_list:
117 | if element.lower().startswith(self.get().lower()): # Match case insensitively
118 | _hits.append(element)
119 | # if we have a new hit list, keep this in mind
120 | if _hits != self._hits:
121 | self._hit_index = 0
122 | self._hits = _hits
123 | # only allow cycling if we are in a known hit list
124 | if _hits == self._hits and self._hits:
125 | self._hit_index = (self._hit_index + delta) % len(self._hits)
126 | # now finally perform the auto completion
127 | if self._hits:
128 | self._entry.delete(0, tk.END)
129 | self._entry.insert(0, self._hits[self._hit_index])
130 | self._entry.select_range(self.position, tk.END)
131 |
132 | def handle_keyrelease(self, event):
133 | """
134 | Event handler for the keyrelease event on this widget.
135 |
136 | :param event: Tkinter event
137 | """
138 |
139 | if event.keysym == "BackSpace":
140 | self._entry.delete(self._entry.index(tk.INSERT), tk.END)
141 | self.position = self._entry.index(tk.END)
142 | if event.keysym == "Left":
143 | if self.position < self._entry.index(tk.END): # delete the selection
144 | self._entry.delete(self.position, tk.END)
145 | else:
146 | self.position -= 1 # delete one character
147 | self._entry.delete(self.position, tk.END)
148 | if event.keysym == "Right":
149 | self.position = self._entry.index(tk.END) # go to end (no selection)
150 | if event.keysym == "Return":
151 | self.handle_return(None)
152 | return
153 | if len(event.keysym) == 1:
154 | self.autocomplete()
155 | # No need for up/down, we'll jump to the popup
156 | # list at the position of the autocompletion
157 |
158 | def handle_return(self, event):
159 | """
160 | Function to bind to the Enter/Return key so if Enter is pressed the selection is cleared
161 | """
162 | self._entry.icursor(tk.END)
163 | self.selection_clear()
164 |
165 | def config(self, **kwargs):
166 | """Alias for configure"""
167 | self.configure(**kwargs)
168 |
169 | def configure(self, **kwargs):
170 | """Configure widget specific keyword arguments in addition to :class:`ttk.Combobox` keyword arguments."""
171 | if "completevalues" in kwargs:
172 | self.set_completion_list_without_bind(kwargs.pop("completevalues"))
173 | return tk.CTkComboBox.configure(self, **kwargs)
174 |
175 | def cget(self, key):
176 | """Return value for widget specific keyword arguments"""
177 | if key == "completevalues":
178 | return self._completion_list
179 | return tk.CTkComboBox.cget(self, key)
180 |
181 | def keys(self):
182 | """Return a list of all resource names of this widget."""
183 | keys = tk.CTkComboBox.keys(self)
184 | keys.append("completevalues")
185 | return keys
186 |
187 | def __setitem__(self, key, value):
188 | self.configure(**{key: value})
189 |
190 | def __getitem__(self, item):
191 | return self.cget(item)
192 |
--------------------------------------------------------------------------------
/ocrTranslate/services/google_free.py:
--------------------------------------------------------------------------------
1 | import os
2 | import re
3 | import time
4 | import logging
5 | import base64
6 | import requests
7 |
8 | from selenium import webdriver
9 | from selenium.webdriver.firefox.options import Options
10 | from pyshadow.main import Shadow
11 |
12 | from ocrTranslate.assets import Assets as assets
13 | from ocrTranslate.utils import list_to_string, format_words
14 | from ocrTranslate.langs import _langs
15 |
16 |
17 | class GoogleFree:
18 | def __init__(self, ) -> None:
19 | self.script_string: str = "var items = {}; for (index = 0; index < arguments[0].attributes.length; ++index) { items[arguments[0].attributes[index].name] = arguments[0].attributes[index].value }; return items;"
20 | with open(assets.path_to_test_image, 'rb') as img:
21 | print("Initializing ocr_google_free...")
22 | #self.ocr_google_free(img)
23 |
24 | def print_web_element(self, web_element_info):
25 | driver = web_element_info.parent
26 |
27 | attributes_element_dict = driver.execute_script(self.script_string, web_element_info)
28 | element_text = web_element_info.text
29 | element_size = web_element_info.size
30 | element_location = web_element_info.location
31 |
32 | print("█▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀█")
33 | print(web_element_info)
34 | print("attributes: " + str(attributes_element_dict))
35 | print('element.text: {0}'.format(element_text))
36 | print('size: {0}'.format(element_size))
37 | print('location: {0}'.format(element_location))
38 | print("█▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄█")
39 |
40 | def show_captcha(self, driver):
41 | print("Spawning browser for google captcha...")
42 | print("Scroll down on site to resolve captcha")
43 | #file_path = "file:///" + os.getcwd() + "/GoogleFreeVision/1.html"
44 | #file_path = "https://cloud.google.com/vision"
45 | file_path = "https://cloud.google.com/vision/docs/drag-and-drop"
46 | driver.get(file_path)
47 | # print(driver.page_source)
48 |
49 | shadow = Shadow(driver)
50 | shadow.set_explicit_wait(5, 1) # delay
51 |
52 | #iframe = shadow.find_element("body > div > p > cloudx-demo > iframe:nth-child(1)") # Iframe #documents object
53 | #iframe = shadow.find_element("#section-2 > div > cloudx-demo > iframe:nth-child(1)") # Iframe #documents object
54 | iframe = shadow.find_element("#gc-wrapper > main > devsite-content > article > div.devsite-article-body.clearfix > p:nth-child(4) > cloudx-demo > iframe") # Iframe #documents object
55 |
56 | driver.switch_to.frame(iframe) # switching to #documents
57 | shadow = Shadow(driver)
58 |
59 | shadowDrop = shadow.find_element("div:nth-child(3) > div > label")
60 | shadowDrop = shadow.get_next_sibling_element(shadowDrop)
61 | # print_web_element(shadowDrop)
62 |
63 | # makes the item visible
64 | driver.execute_script("arguments[0].setAttribute(arguments[1], arguments[2]);", shadowDrop, "style", "display:inline-block!important")
65 |
66 | # shadowDrop.click()
67 | shadowDrop.send_keys(assets.path_to_point_image)
68 |
69 | shadowDrop = shadow.get_parent_element(shadowDrop)
70 | # self.print_web_element(shadowDrop)
71 | shadowDrop = shadow.get_parent_element(shadowDrop)
72 | # self.print_web_element(shadowDrop)
73 | shadowDrop = shadow.get_next_sibling_element(shadowDrop)
74 | # self.print_web_element(shadowDrop)
75 | shadowDrop = shadow.get_next_sibling_element(shadowDrop)
76 | # self.print_web_element(shadowDrop)
77 |
78 | style_element1 = driver.execute_script(self.script_string, shadowDrop)
79 | while True:
80 | try:
81 | style_element = driver.execute_script(self.script_string, shadowDrop)
82 | except Exception:
83 | #logging.exception("An exception was thrown!")
84 | print("Too many requests, despawning browser...")
85 | break
86 | if style_element != style_element1:
87 | print("Success resolve captcha, despawning browser...")
88 | break
89 | time.sleep(1)
90 | driver.close()
91 |
92 | def refresh_session(self):
93 | option = Options()
94 | option.add_argument('--headless')
95 |
96 | chrome_options = webdriver.ChromeOptions()
97 | chrome_options.add_argument('--user-agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.104 Safari/537.36"')
98 |
99 | driver = webdriver.Chrome(options=chrome_options)
100 |
101 | self.show_captcha(driver)
102 |
103 | def run_translate(self, word, language_to: str = "English") -> str:
104 | _language_to = list(_langs.keys())[list(_langs.values()).index(language_to)]
105 | words = ""
106 | words = format_words(word)
107 | url = "https://clients5.google.com/translate_a/t?client=dict-chrome-ex&sl=auto&tl={language_to}&q=".format(language_to=_language_to) + words
108 | headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.104 Safari/537.36'}
109 |
110 | request_result = requests.get(url, headers=headers).json()
111 | # pprint.pprint(request_result)
112 | # print('[In English]: ' + request_result['alternative_translations'][0]['alternative'][0]['word_postproc'])
113 | # print('[Language Dectected]: ' + request_result['src'])
114 |
115 | # print(request_result)
116 | list_of_words_translated = []
117 |
118 | # line = request_result['sentences'][0]['trans']
119 |
120 | # print(line)
121 |
122 | # print(range(len(request_result['sentences'])))
123 | # for x in range(len(request_result['sentences'])):
124 | # # print(request_result['sentences'][x]['trans'])
125 | # line = request_result['sentences'][x]['trans'].strip().replace('\n', '')
126 | # list_of_words_translated.append(line)
127 |
128 | # for x in range(len(request_result)):
129 | # print(request_result['sentences'][x]['trans'])
130 | # line = request_result[0][0].strip().replace('\n', '')
131 | # list_of_words_translated.append(line)
132 | # line = request_result[0][0].strip().replace('\n', ' ')
133 | line = request_result[0][0].strip()
134 | list_of_words_translated.append(line)
135 |
136 | result = list_to_string(list_of_words_translated)
137 | return result
138 |
139 | def run_ocr(self, img) -> str:
140 | image = base64.b64encode(img.read()).decode('utf-8')
141 |
142 | token = "03AG"
143 |
144 | url = "https://cxl-services.appspot.com/proxy"
145 | querystring = {"url": "https://vision.googleapis.com/v1/images:annotate", "token": token}
146 |
147 | payload = "{\"requests\":[{\"image\":{\"content\":\"" + image + "\"},\"features\":[{\"type\":\"LANDMARK_DETECTION\",\"maxResults\":50},{\"type\":\"FACE_DETECTION\",\"maxResults\":50},{\"type\":\"OBJECT_LOCALIZATION\",\"maxResults\":50},{\"type\":\"LOGO_DETECTION\",\"maxResults\":50},{\"type\":\"LABEL_DETECTION\",\"maxResults\":50},{\"type\":\"DOCUMENT_TEXT_DETECTION\",\"maxResults\":50},{\"type\":\"SAFE_SEARCH_DETECTION\",\"maxResults\":50},{\"type\":\"IMAGE_PROPERTIES\",\"maxResults\":50},{\"type\":\"CROP_HINTS\",\"maxResults\":50}],\"imageContext\":{\"cropHintsParams\":{\"aspectRatios\":[0.8,1,1.2]}}}]}"
148 |
149 | headers = {'sec-ch-ua': "\" Not A;Brand\";v=\"99\", \"Chromium\";v=\"96\", \"Google Chrome\";v=\"96\"", 'dnt': "1", 'sec-ch-ua-mobile': "?0", 'user-agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.45 Safari/537.36", 'sec-ch-ua-platform': "\"Windows\"", 'content-type': "text/plain;charset=UTF-8", 'accept': "*/*", 'sec-fetch-site': "cross-site", 'sec-fetch-mode': "cors", 'sec-fetch-dest': "empty", 'cache-control': "no-cache"}
150 |
151 | response = requests.request("POST", url, data=payload, headers=headers, params=querystring)
152 |
153 | # print(response.status_code)
154 | # print(response.reason)
155 |
156 | if response.status_code == 200:
157 | try:
158 | response_json = response.json()
159 | except Exception:
160 | logging.exception("An exception was thrown!")
161 |
162 | try:
163 | text = response_json.get("responses")[0].get("fullTextAnnotation").get("text")
164 | except Exception:
165 | logging.exception("An exception was thrown!")
166 | text = "text not found"
167 | print("OCR text by Google Free: ")
168 | print(text)
169 | else:
170 | print("Code Error: " + str(response.status_code) + " Reason Error: " + str(response.reason) + "\nError occurs in ocr_google_free")
171 | text = "text not found"
172 | self.refresh_session()
173 |
174 | return text
175 |
176 | def ocr_google_free_get_token(self):
177 | url = "https://www.google.com/recaptcha/api2/anchor"
178 |
179 | headers = {'sec-ch-ua': "\" Not A;Brand\";v=\"99\", \"Chromium\";v=\"96\", \"Google Chrome\";v=\"96\"", 'sec-ch-ua-mobile': "?0", 'sec-ch-ua-platform': "\"Windows\"", 'upgrade-insecure-requests': "1", 'dnt': "1", 'user-agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.45 Safari/537.36", 'accept': "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9", 'sec-fetch-site': "cross-site", 'sec-fetch-mode': "navigate", 'sec-fetch-dest': "iframe", 'cache-control': "no-cache"}
180 |
181 | response = requests.request("GET", url, headers=headers)
182 | print(response.text)
183 |
184 | m = re.findall(r'(?<=)', response.text)
185 | print(m[0])
186 |
--------------------------------------------------------------------------------
/ocrTranslate/services/webspeechdemo/webspeechdemo.html:
--------------------------------------------------------------------------------
1 |
2 |
3 | Web Speech API Demo
4 |
97 |
98 |
Click on the microphone icon and begin speaking.
99 |
Speak now.
100 |
No speech was detected. You may need to adjust your
101 |
102 | microphone settings.
103 |
104 | No microphone was found. Ensure that a microphone is installed and that
105 |
106 | microphone settings are configured correctly.
107 |
Click the "Allow" button above to enable your microphone.
108 |
Permission to use microphone was denied.
109 |
Permission to use microphone is blocked. To change,
110 | go to chrome://settings/contentExceptions#media-stream
111 |
Web Speech API is not supported by this browser.
112 | Upgrade to Chrome
113 | version 25 or later.
114 |
115 |
116 |
118 |
119 |
124 |
125 |
126 |
127 |
128 |
129 |
130 |
131 |
132 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
OCR Translator
3 | Convert captured images into text and then translate that text.
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 | ---
16 | With this app, you can select your preferred OCR and translation services. After clicking on START or using the keyboard shortcut Alt+Win+T, the program will launch and you can choose the area of the screen to scan for text using OCR. If you have selected a translation service, the text will then be automatically translated.
17 |
18 | preview:
19 |
20 | https://user-images.githubusercontent.com/20650591/233107070-f9a14ed8-5c77-4947-8fa5-8d1c86d4a04f.mp4
21 |
22 | # 🔥 Features
23 |
24 | - Desktop application with a user-friendly graphical user interface (GUI) provided by customtkinter.
25 | - Ability to select preferred OCR and translation services.
26 | - Option to run the program using either the START button or the keyboard shortcut (Alt+Win+T or bound from options).
27 | - Capability to choose the area of the screen to scan for text using OCR and save the position (for example, when watching a movie and the subtitles always appear in one spot, so you don't have to select the text area again).
28 | - Automatic translation of the captured text if a translation service has been selected.
29 | - Ability to capture subtitles from movies or games by selecting the corresponding area of the screen and displaying the translated text next to them.
30 | - Chat with chatGPT or edgeGPT.
31 | - Ability to translate from the clipboard or manually entered text (similar to a typical translation app).
32 | - Save all selected options and settings to a file and load them when the program is launched.
33 |
34 | ## Desktop App
35 | Download the desktop app [here](https://github.com/Azornes/ocrTranslator/releases)
36 | Tested only on Windows 10.
37 |
38 | ---
39 | ## Dependency
40 | 1. [Python 3.9](https://www.python.org/downloads/release/python-390/). (If you want run from source)
41 | 2. (optional) [Capture2Text](https://sourceforge.net/projects/capture2text/).
42 | 3. (optional) [Tesseract](https://github.com/UB-Mannheim/tesseract/wiki).
43 | 4. (optional) Google api generate a [service_account_creds.json](https://developers.google.com/workspace/guides/create-credentials). Then, put file into the `ocrTranslate/configs` directory.
44 |
45 |
46 |
47 | ### 5. (optional) ChatGPT
48 | > #### [Source](https://github.com/acheong08/ChatGPT)
49 |
50 |
51 | ## Configuration
52 | 1. Create account on [OpenAI's ChatGPT](https://chat.openai.com/)
53 | 2. Save your email and password
54 | ### Authentication method: (Choose 1 and paste to app settings)
55 |
56 | #### - Email/Password
57 | > _Currently broken for free users. Do `export PUID="..."` if you have a plus account. The PUID is a cookie named `_puid`_
58 | > Not supported for Google/Microsoft accounts.
59 |
60 | #### - Access token
61 | https://chat.openai.com/api/auth/session
62 |
63 |
64 |
65 |
66 |
67 |
68 | ### 6. (optional) EdgeGPT
69 | > #### [Source](https://github.com/acheong08/ChatGPT)
70 |
71 |
72 |
73 |
74 |
75 | #### Checking access (Required)
76 |
77 |
78 |
79 | - Install the latest version of Microsoft Edge
80 | - Alternatively, you can use any browser and set the user-agent to look like you're using Edge (e.g., `Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36 Edg/111.0.1661.51`). You can do this easily with an extension like "User-Agent Switcher and Manager" for [Chrome](https://chrome.google.com/webstore/detail/user-agent-switcher-and-m/bhchdcejhohfmigjafbampogmaanbfkg) and [Firefox](https://addons.mozilla.org/en-US/firefox/addon/user-agent-string-switcher/).
81 | - Open [bing.com/chat](https://bing.com/chat)
82 | - If you see a chat feature, you are good to go
83 |
84 |
85 |
86 |
87 |
88 |
89 | #### Getting authentication (Required)
90 |
91 |
92 |
93 | - Install the cookie editor extension for [Chrome](https://chrome.google.com/webstore/detail/cookie-editor/hlkenndednhfkekhgcdicdfddnkalmdm) or [Firefox](https://addons.mozilla.org/en-US/firefox/addon/cookie-editor/)
94 | - Go to `bing.com`
95 | - Open the extension
96 | - Click "Export" on the bottom right, then "Export as JSON" (This saves your cookies to clipboard)
97 | - Paste your cookies into a file `cookies.json`
98 | - Paste your file `cookies.json` to `ocrTranslate/configs/`
99 |
100 |
101 |
102 |
103 |
104 | ---
105 |
106 | # 📊 Tables with information
107 |
108 |
109 |
110 |
111 | ### Supported OCR Services
112 |
113 |
114 | | ID | OCR | Internet/Local | Status |
115 | |-----|-------------------------------------------------------------------------------------------------------|----------------|--------|
116 | | 1 | [Google Vision Api](https://cloud.google.com/vision/docs/ocr) | Internet | stable |
117 | | 2 | [Google Vision Free Demo](https://cloud.google.com/vision/docs/drag-and-drop) | Internet | stable |
118 | | 3 | [Baidu Api](https://intl.cloud.baidu.com/product/ocr.html) | Internet | stable |
119 | | 4 | [Windows OCR](https://learn.microsoft.com/en-us/uwp/api/windows.media.ocr.ocrengine?view=winrt-22621) | Local | stable |
120 | | 5 | [Capture2Text](https://capture2text.sourceforge.net/) | Local | stable |
121 | | 6 | [Tesseract](https://tesseract-ocr.github.io/tessdoc/) | Local | stable |
122 | | 7 | [RapidOCR](https://github.com/RapidAI/RapidOCR) | Local | stable |
123 |
124 |
125 |
126 |
127 |
128 |
129 | ### Supported Translation Services
130 | > #### [Source](https://github.com/uliontse/translators)
131 |
132 |
133 | | ID | Translator | Number of Supported Languages | Advantage | Service | Status |
134 | |-----|-----------------------------------------------------------------------------------|-------------------------------|---------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------|---------------------------------|
135 | | 1 | [Niutrans](https://niutrans.com/trans) | 302 | support the most languages in the world | [Northeastern University](http://english.neu.edu.cn/) / [Niutrans](https://github.com/NiuTrans), China | / |
136 | | 2 | [Alibaba](https://translate.alibaba.com) | 221 | support most languages, support professional field | [Alibaba](https://damo.alibaba.com/about?lang=en), China | stable |
137 | | 3 | [Baidu](https://fanyi.baidu.com) | 201 | support most languages, support professional field, support Classical Chinese | [Baidu](https://ir.baidu.com/company-overview), China | stable |
138 | | 4 | [Iciba](https://www.iciba.com/fy) | 187 | support the most languages in the world | [Kingsoft](https://www.wps.com/about-us/) / [Xiaomi](https://www.mi.com/us/about/), China | stable |
139 | | 5 | [MyMemory](https://mymemory.translated.net) | 151 | support the most languages in the world, good at Creole English, Creole French | [Translated](https://translatedlabs.com/welcome), Italy | stable |
140 | | 6 | [Iflytek](https://fanyi.xfyun.cn/console/trans/text) | 140 | support the most languages in the world | [Iflytek](https://www.iflytek.com/en/about-us.html), China | / |
141 | | 7 | [Google](https://translate.google.com) | 134 | support more languages in the world | [Google](https://about.google/), America | stable(offline in China inland) |
142 | | 8 | [VolcEngine](https://translate.volcengine.com) | 122 | support more languages in the world, support professional field | [ByteDance](https://www.bytedance.com/en/), China | / |
143 | | 9 | [Lingvanex](https://lingvanex.com/demo) | 112 | support translation of different regions but the same language | [Lingvanex](https://lingvanex.com/about-us/), Cyprus | stable |
144 | | 10 | [Bing](https://www.bing.com/Translator) | 110 | support more languages in the world | [Microsoft](https://www.microsoft.com/en-us/about), America | stable |
145 | | 11 | [Yandex](https://translate.yandex.com) | 102 | support more languages in the world, support word to emoji | [Yandex](https://yandex.com/company/), Russia | / |
146 | | 12 | [Itranslate](https://itranslate.com/webapp) | 101 | support translation of different regions but the same language, such as en-US, en-UK, en-AU | [Itranslate](https://itranslate.com/about), Austria | stable |
147 | | 13 | [Sogou](https://fanyi.sogou.com) | 61 | support more languages in the world | [Tencent](https://www.tencent.com/en-us/about.html), China | stable |
148 | | 14 | [ModernMt](https://www.modernmt.com/translate) | 56 | open-source, support more languages in the world | [Modernmt](https://github.com/modernmt) / [Translated](https://translatedlabs.com/welcome), Italy | stable |
149 | | 15 | [SysTran](https://www.systran.net/translate/) | 52 | support more languages in the world | [SysTran](https://www.systran.net/about/), France | stable |
150 | | 16 | [Apertium](https://www.apertium.org/) | 45 | open-source | [Apertium](https://github.com/apertium) | stable |
151 | | 17 | [Reverso](https://www.reverso.net/text-translation) | 42 | popular on Mac and Iphone | [Reverso](https://www.corporate-translation.reverso.com/about-us), France | stable |
152 | | 18 | [CloudYi](https://www.cloudtranslation.com/#/translate) | 28 | support main languages | [Xiamen University](http://nlp.xmu.edu.cn/) / [CloudTranslation](https://www.cloudtranslation.com/#/about), China | stable |
153 | | 19 | [Deepl](https://www.deepl.com/translator) | 27 | high quality to translate but response slowly | [Deepl](https://jobs.deepl.com/l/en), Germany | stable |
154 | | 20 | [QQTranSmart](https://transmart.qq.com) | 22 | support main languages | [Tencent](https://www.tencent.com/en-us/about.html), China | stable |
155 | | 21 | [TranslateCom](https://www.translate.com/machine-translation) | 21 | good at English translation | [TranslateCom](https://www.translate.com/about-us), America | stable |
156 | | 22 | [Tilde](https://translate.tilde.com/) | 21 | good at lv, de, fr translation | [Tilde](https://tilde.com/about), Latvia | / |
157 | | 23 | [QQFanyi](https://fanyi.qq.com) | 17 | support main languages | [Tencent](https://www.tencent.com/en-us/about.html), China | stable |
158 | | 24 | [Argos](https://translate.argosopentech.com) | 17 | open-source | [Argos](https://github.com/argosopentech) / [Libre](https://github.com/LibreTranslate), America | stable |
159 | | 25 | [TranslateMe](https://translateme.network/) | 16 | good at English translation | [TranslateMe](https://translateme.network/our-team/) / [Neosus](https://neosus.net/about/), Lithuania | stable |
160 | | 26 | [Youdao](https://ai.youdao.com/product-fanyi-text.s) | 15 | support main languages, high quality | [Netease](https://ir.netease.com/company-overview/corporate-profile), China | stable |
161 | | 27 | [Papago](https://papago.naver.com) | 15 | good at Korean translation | [Naver](https://www.navercorp.com/en/naver/company), South Korea | stable |
162 | | 28 | [Marai](https://miraitranslate.com/trial/) | 15 | good at Japanese translation | [MaraiTranslate](https://miraitranslate.com/en/company/), Japan | / |
163 | | 29 | [Iflyrec](https://fanyi.iflyrec.com) | 12 | good at Chinese translation | [Iflytek](https://www.iflytek.com/en/about-us.html), China | stable |
164 | | 30 | [Yeekit](https://www.yeekit.com/site/translate) | 10 | support main languages | [CTC](https://www.ctpc.com.cn/cms/enAboutUs.htm), China | stable |
165 | | 31 | [LanguageWire](https://www.languagewire.com/en/technology/languagewire-translate) | 8 | good at English translation | [LanguageWire](https://www.languagewire.com/about-us), Denmark | stable |
166 | | 32 | [Caiyun](https://fanyi.caiyunapp.com) | 7 | high quality to translate but response slowly, support professional field | [ColorfulClouds](http://caiyunapp.com/jobs/), China | stable |
167 | | 33 | [Elia](https://elia.eus/translator) | 6 | good at Basque translation | [Elhuyar](https://www.elhuyar.eus/eu/nor-gara), Spain | stable |
168 | | 34 | [Judic](https://judic.io/en/translate) | 4 | good at European translation | [CrossLang](https://crosslang.com/about-us/), Belgium | stable |
169 | | 35 | [Mglip](http://fy.mglip.com/pc) | 3 | good at Mongolia translation | [Inner Mongolia University](https://www.imu.edu.cn/yw/Home.htm), China | stable |
170 | | 36 | [Utibet](http://mt.utibet.edu.cn/mt) | 2 | good at Tibet translation | [Tibet University](http://www.utibet.edu.cn/), China | stable |
171 |
172 |
173 |
174 |
175 | ---
176 |
--------------------------------------------------------------------------------
/main.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import ctypes
3 | import logging
4 | import threading
5 |
6 | import tkinter
7 |
8 | import configparser
9 | import win32gui
10 | from BlurWindow.blurWindow import blur
11 |
12 | from ocrTranslate.assets import Assets as assets
13 | from ocrTranslate.config_files import google_api, google_free, chatGpt, deepL, multi_translators, capture2Text, tesseract, baidu, rapid_ocr, config, edgeGpt
14 | from ocrTranslate.gui.complex_tk_gui import ComplexTkGui, result_boxes
15 |
16 | import os
17 | import time
18 | import subprocess
19 |
20 | from win32api import GetSystemMetrics
21 | from threading import Thread
22 | from queue import Queue
23 | import keyboard
24 |
25 | from PIL import ImageEnhance
26 | from PIL import ImageGrab
27 | from mss import mss
28 |
29 | root = ComplexTkGui()
30 |
31 | myappid = 'Azornes.ocrTranslator'
32 | ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)
33 |
34 | # get current path
35 | path = os.path.dirname(os.path.abspath(__file__))
36 |
37 |
38 | # subprocess.call(["C:\Program Files\AutoHotkey\AutoHotkey.exe", "na wierzchu.ahk"])
39 |
40 |
41 | class MyCapture:
42 | def __init__(self):
43 | # Variables X and Y are used to register the position of the left mouse button
44 | self.X = tkinter.IntVar(value=0)
45 | self.Y = tkinter.IntVar(value=0)
46 | self.X2 = tkinter.IntVar(value=0)
47 | self.Y2 = tkinter.IntVar(value=0)
48 | if root.seg_button_last_ocr_area.get() == "Off" or root.seg_button_last_ocr_area.get() == "On":
49 | self.sel = False
50 | # screen size
51 | self.screen_width = GetSystemMetrics(78)
52 | self.screen_height = GetSystemMetrics(79)
53 | print("screen width and height: " + str(self.screen_width) + "x" + str(self.screen_height))
54 | # Create the highest level container
55 | self.top = tkinter.Toplevel(root, width=self.screen_width, height=self.screen_height)
56 | # The maximization and minimization buttons are not displayed
57 | self.top.overrideredirect(True)
58 | self.canvas = tkinter.Canvas(self.top, bg='white', width=self.screen_width, height=self.screen_height)
59 |
60 | # Show a screenshot on the full screen, take a screenshot on the screenshot on the full screen
61 | self.image = tkinter.PhotoImage(file=assets.path_to_tmp4)
62 | self.canvas.create_image(self.screen_width // 2, self.screen_height // 2, image=self.image)
63 | self.canvas.pack()
64 |
65 | # A position in which the left mouse button is pressed
66 | def onLeftButtonDown(event):
67 | self.X.set(event.x)
68 | self.Y.set(event.y)
69 | # start screenshot
70 | self.sel = True
71 |
72 | self.canvas.bind('', onLeftButtonDown)
73 |
74 | # Move the left mouse button to display the selected area
75 | def onLeftButtonMove(event):
76 | global lastDraw, r, c
77 | try:
78 | # Delete the newly drawn graphic, otherwise when the mouse is moved it will be a black rectangle.
79 | self.canvas.delete(lastDraw)
80 | self.canvas.delete(r)
81 | self.canvas.delete(c)
82 | except:
83 | pass
84 | # Draw a crosshair when the left button is not clicked
85 | r = self.canvas.create_line(0, event.y, self.screen_width, event.y, fill='white')
86 | c = self.canvas.create_line(event.x, 0, event.x, self.screen_height, fill='white')
87 | if not self.sel:
88 | # print(event.x, event.y, self.screenWidth, self.screenHeight)
89 | pass
90 | else:
91 | lastDraw = self.canvas.create_rectangle(self.X.get(), self.Y.get(), event.x, event.y, outline='orange') # print(event.x, event.y, self.screenWidth, self.screenWidth)
92 |
93 | self.canvas.bind('', onLeftButtonMove)
94 |
95 | def onMouseMove(event): # Mouse movement without clicking, drawing crosshair lines.
96 | global r, c
97 | try:
98 | # Delete the newly drawn graphic, otherwise when the mouse is moved it's a black rectangle.
99 | self.canvas.delete(r)
100 | self.canvas.delete(c)
101 | except Exception:
102 | pass
103 | # Draw cross-hairs without clicking the left mouse button.
104 | r = self.canvas.create_line(0, event.y, self.screen_width, event.y, fill='white')
105 | c = self.canvas.create_line(event.x, 0, event.x, self.screen_height, fill='white')
106 |
107 | self.canvas.bind('', onMouseMove)
108 |
109 | def onEscPressed(event):
110 | self.top.destroy()
111 |
112 | self.canvas.bind('', onEscPressed)
113 |
114 | # Get the position where the mouse left button is lifted, save the screenshot of the region.
115 | def onLeftButtonUp(event):
116 | self.sel = False
117 | self.X2.set(event.x)
118 | self.Y2.set(event.y)
119 | try:
120 | self.canvas.delete(lastDraw)
121 | except Exception:
122 | pass
123 |
124 | # Consider a screenshot taken with the mouse left button pressed down from the lower right and lifted
125 | # from the upper left.
126 | left, right = sorted([self.X.get(), event.x])
127 | top, bottom = sorted([self.Y.get(), event.y])
128 |
129 | # pic = ImageGrab.grab((left + 1, top + 1, right, bottom)) #one monitor
130 | pic = ImageGrab.grab((left + 1, top + 1, right, bottom), all_screens=True)
131 |
132 | self.top.destroy() # Close the top-level container.
133 | if pic:
134 | pic.save(assets.path_to_tmp)
135 | pic.save(assets.path_to_tmp2) # Close the current window. # self.top.destroy()
136 |
137 | if root.seg_button_last_ocr_area.get() == "On":
138 | config.read(assets.path_settings_gui)
139 | dict_settings = {"last_ocr_area_x": str(self.X.get()), "last_ocr_area_y": str(self.Y.get()), "last_ocr_area_x2": str(self.X2.get()), "last_ocr_area_y2": str(self.Y2.get())}
140 | if not config.has_section("settings"):
141 | config.add_section("settings")
142 | for key, value in dict_settings.items():
143 | config.set("settings", key, value)
144 | with open(assets.path_settings_gui, "w") as config_file:
145 | config.write(config_file)
146 | root.seg_button_last_ocr_area.set("Saved")
147 |
148 | self.canvas.bind('', onLeftButtonUp)
149 |
150 | self.canvas.pack(fill=tkinter.BOTH, expand=tkinter.YES)
151 | self.top.attributes("-topmost", True)
152 | self.top.focus_force()
153 | else:
154 | config.read(assets.path_settings_gui)
155 | self.X = tkinter.IntVar(value=int(config.get("settings", "last_ocr_area_x")))
156 | self.Y = tkinter.IntVar(value=int(config.get("settings", "last_ocr_area_y")))
157 | self.X2 = tkinter.IntVar(value=int(config.get("settings", "last_ocr_area_x2")))
158 | self.Y2 = tkinter.IntVar(value=int(config.get("settings", "last_ocr_area_y2")))
159 |
160 | left, right = sorted([self.X.get(), self.X2.get()])
161 | top, bottom = sorted([self.Y.get(), self.Y2.get()])
162 |
163 | pic = ImageGrab.grab((left + 1, top + 1, right, bottom), all_screens=True)
164 | if pic:
165 | pic.save(assets.path_to_tmp)
166 | pic.save(assets.path_to_tmp2) # Close the current window. # self.top.destroy()
167 |
168 | def get_text_from_ocr(self):
169 | def OCRGoogle(test):
170 | with open(assets.path_to_tmp, 'rb') as img:
171 | ocr_result = google_api.run_ocr(ImageEnhance.Image.open(img))
172 | return ocr_result
173 |
174 | def OCRBaiduu(test):
175 | with open(assets.path_to_tmp2, 'rb') as img:
176 | ocr_result = baidu.run_ocr(img.read())
177 | return ocr_result
178 |
179 | def OCRCapture2Text(test):
180 | ocr_result = capture2Text.run_ocr()
181 | return ocr_result.decode('UTF-8')
182 |
183 | def OCRGoogleFree(test):
184 | with open(assets.path_to_tmp, 'rb') as img:
185 | ocr_result = google_free.run_ocr(img)
186 | return ocr_result
187 |
188 | def OCRWindows(test):
189 | command = "{path_to_win_ocr} -Path '{path_to_tmp}' | Select-Object -ExpandProperty Text".format(path_to_tmp=assets.path_to_tmp2, path_to_win_ocr=assets.path_to_win_ocr)
190 | ocr_result = subprocess.check_output(["powershell.exe", command])
191 | try:
192 | result_text = ocr_result.decode("cp852").strip()
193 | except UnicodeDecodeError:
194 | result_text = "Error decoding"
195 | return result_text
196 |
197 | def OCRTesseract(test):
198 | ocr_result = tesseract.run_ocr(assets.path_to_tmp2)
199 | return ocr_result
200 |
201 | def OCRRapid(test):
202 | ocr_result = rapid_ocr.run_ocr(assets.path_to_tmp2)
203 | return ocr_result
204 |
205 | list_functions = [OCRGoogle, OCRBaiduu, OCRCapture2Text, OCRGoogleFree, OCRWindows, OCRTesseract, OCRRapid]
206 | list_states_of_switches = [root.switch_ocr_google_api.get(), root.switch_ocr_baidu_api.get(), root.switch_ocr_capture2text.get(), root.switch_ocr_google_free.get(), root.switch_ocr_windows_local.get(), root.switch_ocr_tesseract.get(), root.switch_ocr_rapidocr.get()]
207 | string_results = {0: "-Google API:\n", 1: "-Baidu:\n", 2: "-Capture2Text:\n", 3: "-Google Free:\n", 4: "-Windows OCR:\n", 5: "-Tesseract:\n", 6: "-Rapid OCR:\n"}
208 |
209 | queues = [Queue() for _ in range(len(list_functions))]
210 | threads = []
211 |
212 | for i, func in enumerate(list_functions):
213 | if list_states_of_switches[i] == 1:
214 | # print(func.__name__)
215 | t = Thread(target=lambda q, func, arg1: q.put(func(arg1)), args=(queues[i], func, 'world!'))
216 | threads.append(t)
217 | t.start()
218 |
219 | for t in threads:
220 | t.join()
221 |
222 | result = ""
223 | for i, queue in enumerate(queues):
224 | if list_states_of_switches[i] == 1:
225 | if list_states_of_switches.count(1) == 1 and root.switch_game_mode.get() == 1:
226 | result = queue.get()
227 | else:
228 | result += string_results[i] + queue.get() + "\n\n"
229 |
230 | root.scrollable_frame_textbox.insert("0.0", result + "\n\n")
231 | if root.switch_results_to_clipboard.get() == 1:
232 | root.clipboard_clear()
233 | root.clipboard_append(result)
234 |
235 | def translate_from_ocr():
236 | if root.option_menu_translation.get() != "Disabled":
237 | translated = translate(result)
238 | self.show_text_window(translated)
239 | else:
240 | self.show_text_window(result)
241 |
242 | thread = Thread(target=translate_from_ocr)
243 | thread.start()
244 |
245 | def show_text_window(self, text):
246 | if root.switch_game_mode.get() == 1:
247 | def destroy_result(event):
248 | result_toplevel.destroy()
249 |
250 | def start_move(event):
251 | result_toplevel.x = event.x
252 | result_toplevel.y = event.y
253 |
254 | def stop_move(event):
255 | result_toplevel.x = None
256 | result_toplevel.y = None
257 |
258 | def do_move(event):
259 | deltax = event.x - result_toplevel.x
260 | deltay = event.y - result_toplevel.y
261 | x = result_toplevel.winfo_x() + deltax
262 | y = result_toplevel.winfo_y() + deltay
263 | result_toplevel.geometry(f"+{x}+{y}")
264 |
265 | # result_toplevel = customtkinter.CTkToplevel()
266 | result_toplevel = tkinter.Toplevel()
267 | result_toplevel.title('OCR window')
268 | result_toplevel.overrideredirect(1) # will remove the top badge of window
269 | result_toplevel.attributes('-topmost', 'true')
270 | result_toplevel.wm_attributes('-transparentcolor', "#202020")
271 |
272 | width_geometry = abs(self.X2.get() - self.X.get())
273 | height_geometry = abs(self.Y2.get() - self.Y.get())
274 | coordinate_x_geometry = min(self.X.get(), self.X2.get())
275 | coordinate_y_geometry = abs(min(self.Y.get(), self.Y2.get()) - abs(self.Y2.get() - self.Y.get()))
276 | result_toplevel.geometry("%dx%d+%d+%d" % (width_geometry, height_geometry, coordinate_x_geometry, coordinate_y_geometry))
277 | result_toplevel.configure(bg="#202020")
278 |
279 | def stroke_text(text, textcolor, strokecolor, fontsize):
280 | xy = [(0, -1), (0, 1), (-1, 0), (1, 0)]
281 | max_width = int(canvas.cget('width'))
282 | font_size = fontsize
283 | length_of_one_char = fontsize * 0.7
284 | font = ('Helvetica', font_size, 'bold')
285 | line = ""
286 | lines = []
287 | for i, char in enumerate(text):
288 | line += char
289 | text_width = canvas.create_text(0, 0, anchor="nw", text=line, font=font, tags=("text",))
290 | width = canvas.bbox(text_width)[2] - canvas.bbox(text_width)[0]
291 | canvas.delete(text_width)
292 | if char == "\n" or width > max_width:
293 | lines.append(line.replace("\n", ""))
294 | line = ""
295 | elif char == " ":
296 | next_word = ""
297 | for next_char in text[i + 1:]:
298 | if next_char == " ":
299 | break
300 | next_word += next_char
301 | text_width = canvas.create_text(0, 0, anchor="nw", text=next_word, font=font, tags=("text",))
302 | next_width = canvas.bbox(text_width)[2] - canvas.bbox(text_width)[0]
303 | canvas.delete(text_width)
304 | if width + next_width > max_width:
305 | lines.append(line[:-1])
306 | line = char
307 | if line:
308 | lines.append(line)
309 | text2 = "\n".join(lines)
310 | if '\n' in text2:
311 | lines2 = text2.split('\n')
312 | for i in range(len(lines2)):
313 | for x, y in xy:
314 | canvas.create_text((canvas.winfo_reqwidth() // 2) + x, length_of_one_char + y + i * 20, text=lines2[i], font=font, fill=strokecolor)
315 | canvas.create_text(canvas.winfo_reqwidth() // 2, length_of_one_char + i * 20, text=lines2[i], font=font, fill=textcolor)
316 | else:
317 | text_width = canvas.create_text(0, 0, anchor="nw", text=text2, font=font, tags=("text",))
318 | width = canvas.bbox(text_width)[2] - canvas.bbox(text_width)[0]
319 | height = canvas.bbox(text_width)[3] - canvas.bbox(text_width)[1]
320 | canvas.delete(text_width)
321 | canvas.config(width=width, height=height)
322 | result_toplevel.geometry("%dx%d+%d+%d" % (width, height, (width_geometry - width) / 2 + coordinate_x_geometry, (height_geometry - height) + coordinate_y_geometry))
323 | for x, y in xy:
324 | canvas.create_text((canvas.winfo_reqwidth() // 2) + x, length_of_one_char + y, text=text2, font=font, fill=strokecolor)
325 | canvas.create_text(canvas.winfo_reqwidth() // 2, length_of_one_char, text=text2, font=font, fill=textcolor)
326 |
327 | canvas = tkinter.Canvas(result_toplevel, bg='#202020', width=width_geometry, height=height_geometry)
328 | # determination of the font size based on the window size
329 | font_size = 1
330 | while True:
331 | font = ("Helvetica", font_size)
332 | text_width = canvas.create_text(0, 0, anchor="nw", text=text, font=font, tags=("text",))
333 | width = canvas.bbox(text_width)[2] - canvas.bbox(text_width)[0]
334 | height = canvas.bbox(text_width)[3] - canvas.bbox(text_width)[1]
335 | canvas.delete(text_width)
336 | if width > int(canvas.cget('width')) or height > int(canvas.cget('height')):
337 | font_size -= 3
338 | break
339 | font_size += 1
340 | if font_size < 10:
341 | font_size = 10
342 | stroke_text(text, 'white', 'black', font_size)
343 | canvas.configure(borderwidth=0, highlightthickness=0, relief="flat")
344 | canvas.bind('', destroy_result)
345 | canvas.bind("", start_move)
346 | canvas.bind("", stop_move)
347 | canvas.bind("", do_move)
348 | canvas.pack()
349 | # print(canvas.cget('width'))
350 | # canvas.cget('height')
351 |
352 | # result_text = tkinter.Text(result_toplevel, width=abs(self.X2.get() - self.X.get()), height=abs(self.Y2.get() - self.Y.get()))
353 | # result_text.configure(font=("Arial", 13, "bold"), fg="white", bg="black", borderwidth=0, highlightthickness=0, relief="flat")
354 |
355 | # result_text = customtkinter.CTkTextbox(result_toplevel, width=abs(self.X2.get() - self.X.get()), height=abs(self.Y2.get() - self.Y.get()))
356 | # helv36 = customtkinter.CTkFont(family="Helvetica", size=13, weight="bold")
357 | # result_text.configure(font=helv36, text_color="white", fg_color="transparent", bg_color="#60b26c", border_spacing=0)
358 |
359 | # result_text.bind('', destroy_result)
360 | # result_text.bind("", start_move)
361 | # result_text.bind("", stop_move)
362 | # result_text.bind("", do_move)
363 | # result_text.insert(tkinter.END, text)
364 | # result_text.pack(ipadx=0, ipady=0, padx=0, pady=0)
365 | self.resultbox = tkinter.Message(result_toplevel)
366 | self.resultbox.pack()
367 | result_boxes.append(result_toplevel)
368 | hWnd = win32gui.GetParent(result_toplevel.winfo_id())
369 | # blur(hWnd, hexColor='#12121240')
370 | blur(hWnd)
371 | result_toplevel.after(60000, lambda: result_toplevel.destroy()) # Destroy the widget after 60 seconds
372 | elif root.switch_window_mode.get() == 1:
373 | result_toplevel = tkinter.Toplevel()
374 | result_toplevel.title('OCR window')
375 | result_toplevel.iconbitmap(assets.path_to_icon2)
376 |
377 | def top_close():
378 | result_toplevel.destroy()
379 |
380 | result_toplevel.protocol('WM_DELETE_WINDOW', top_close)
381 | L1 = tkinter.Label(result_toplevel, text='OCR Text:')
382 | L1.pack()
383 | result_text = tkinter.Text(result_toplevel, width=100, height=50)
384 | result_text.insert(tkinter.END, text)
385 | result_text.pack()
386 | self.resultbox = tkinter.Message(result_toplevel)
387 | self.resultbox.pack()
388 | result_boxes.append(result_toplevel)
389 |
390 |
391 | async def display_translations_ChatGPT(word, language_to="English"):
392 | root.translation_frame_textbox.insert("0.0", "\n\n")
393 | final_result = ""
394 | line_num = 0
395 | async for response in chatGpt.run_translate_async(word, language_to):
396 | if "\n" in response:
397 | root.translation_frame_textbox.insert(f"{line_num}.0 lineend", response)
398 | line_num += 2
399 | else:
400 | root.translation_frame_textbox.insert(f"{line_num}.0 lineend", response)
401 | final_result += response
402 | return final_result
403 |
404 |
405 | async def display_translations_EdgeGPT(word, language_to="English"):
406 | root.translation_frame_textbox.insert("0.0", "\n\n")
407 | final_result = ""
408 | line_num = 0
409 |
410 | async for response in edgeGpt.run_translate_async(word, language_to):
411 | if "\n" in response:
412 | root.translation_frame_textbox.insert(f"{line_num}.0 lineend", response)
413 | line_num += 2
414 | else:
415 | root.translation_frame_textbox.insert(f"{line_num}.0 lineend", response)
416 | final_result += response
417 | return final_result
418 |
419 |
420 | def translate(results):
421 | root.loading_icon.start()
422 | translated = ""
423 | if root.option_menu_translation.get() == "GoogleFree":
424 | translated = google_free.run_translate(results, root.combobox_to_language.get())
425 | elif root.option_menu_translation.get() == "DeepL":
426 | translated = deepL.run_translate(results, root.combobox_from_language.get(), root.combobox_to_language.get())
427 | elif root.option_menu_translation.get() == "ChatGPT":
428 | translated = asyncio.run(display_translations_ChatGPT(results, root.combobox_to_language.get()))
429 | elif root.option_menu_translation.get() == "EdgeGPT":
430 | translated = asyncio.run(display_translations_EdgeGPT(results, root.combobox_to_language.get()))
431 | elif root.option_menu_translation.get() in ['alibaba', 'apertium', 'argos', 'baidu', 'bing', 'caiyun', 'cloudYi', 'deepl', 'elia', 'google', 'iciba', 'iflytek', 'iflyrec', 'itranslate', 'judic', 'languageWire', 'lingvanex', 'niutrans', 'mglip', 'modernMt', 'myMemory', 'papago', 'qqFanyi', 'qqTranSmart', 'reverso', 'sogou', 'sysTran', 'tilde', 'translateCom', 'translateMe', 'utibet', 'volcEngine', 'yandex', 'yeekit', 'youdao']:
432 | translated = multi_translators.run_translate(results, root.combobox_from_language.get(), root.combobox_to_language.get(), root.option_menu_translation.get())
433 | if root.switch_results_to_clipboard.get() == 1:
434 | root.clipboard_clear()
435 | root.clipboard_append(translated)
436 | elif root.option_menu_translation.get() != "ChatGPT" and root.option_menu_translation.get() != "EdgeGPT":
437 | root.translation_frame_textbox.insert("0.0", translated + "\n\n")
438 | root.loading_icon.stop()
439 | return translated
440 |
441 |
442 | def translate_without_ocr():
443 | if root.switch_from_text.get() == 1:
444 | translate(root.scrollable_frame_textbox.get('0.0', 'end'))
445 | else:
446 | translate(root.clipboard_get())
447 |
448 |
449 | def handle_app_windows(win_state):
450 | if win_state == 'normal' and root.state() == 'normal':
451 | root.state('icon')
452 | for box in result_boxes:
453 | try:
454 | box.state('icon')
455 | except Exception:
456 | pass
457 | elif win_state == 'normal':
458 | root.state('normal')
459 | for box in result_boxes:
460 | try:
461 | box.state('normal')
462 | except Exception:
463 | pass
464 |
465 |
466 | def buttonCaptureClick():
467 | win_state = root.state()
468 | if root.switch_from_text.get() != 1 and root.switch_from_clipboard.get() != 1:
469 | handle_app_windows(win_state)
470 | with mss() as sct:
471 | sct.shot(mon=-1, output=assets.path_to_tmp4)
472 | time.sleep(0.2)
473 | w = MyCapture()
474 | if root.seg_button_last_ocr_area.get() != "Saved":
475 | root.button_start.wait_window(w.top)
476 | w.get_text_from_ocr()
477 | handle_app_windows(win_state)
478 | else:
479 | thread = Thread(target=translate_without_ocr)
480 | thread.start()
481 |
482 |
483 | def key(event):
484 | buttonCaptureClick()
485 |
486 |
487 | def load_hotkey():
488 | config.read(assets.path_settings_gui)
489 | try:
490 | keyboard.clear_all_hotkeys()
491 | except AttributeError:
492 | pass
493 | try:
494 | if config.get("settings", "entry_binding_start_ocr") != "":
495 | keyboard.add_hotkey(config.get("settings", "entry_binding_start_ocr"), key, args=('From global keystroke',))
496 | else:
497 | keyboard.add_hotkey('alt+win+t', key, args=('From global keystroke',))
498 | except KeyError:
499 | keyboard.add_hotkey('alt+win+t', key, args=('From global keystroke',))
500 |
501 | root.button_start.configure(command=buttonCaptureClick)
502 | root.entry_binding_start_ocr.hotkey_function = key
503 | load_hotkey()
504 |
505 | try:
506 | root.mainloop()
507 | except Exception as e:
508 | logging.exception("An exception was thrown!")
509 | root.destroy()
510 |
--------------------------------------------------------------------------------
/ocrTranslate/gui/complex_tk_gui.py:
--------------------------------------------------------------------------------
1 | import base64
2 | import configparser
3 | import os
4 |
5 | from tktooltip import ToolTip
6 | import customtkinter
7 |
8 | from ocrTranslate.assets import Assets as assets
9 | from ocrTranslate.config_files import google_api, capture2Text, chatGpt, tesseract, baidu
10 | from ocrTranslate.gui.AnimatedGif import AnimatedGif
11 | from ocrTranslate.gui.AnimatedGifButton import AnimatedGifButton
12 | from ocrTranslate.gui.auto_complete_combobox import AutocompleteCombobox
13 | from ocrTranslate.gui.auto_resize_text_box import AutoResizeTextBox
14 | from ocrTranslate.gui.bindingEntry import BindingEntry
15 | from ocrTranslate.gui.tabviewChats import TabviewChats
16 | from ocrTranslate.langs import _langs, services_translators_languages, _langs2, get_subdictionary, services_stt_languages
17 | from ocrTranslate.services.speach_to_text_multi_services import VoiceRecognizerMulti
18 | from ocrTranslate.services.speach_to_text_web_google import SpeechRecognitionGUI
19 |
20 | customtkinter.set_appearance_mode("System") # Modes: "System" (standard), "Dark", "Light"
21 | customtkinter.set_default_color_theme("blue") # Themes: "blue" (standard), "green", "dark-blue"
22 |
23 | result_boxes = [] # Result window identified earlier
24 | from PIL import Image
25 |
26 |
27 | def new_tag_config(self, tagName, **kwargs):
28 | return self._textbox.tag_config(tagName, **kwargs)
29 |
30 |
31 | customtkinter.CTkTextbox.tag_config = new_tag_config
32 |
33 |
34 | class ComplexTkGui(customtkinter.CTk):
35 | def __init__(self):
36 | super().__init__()
37 |
38 | # ||||||||||||||||||| configure window |||||||||||||||||||
39 | self.title("OCR_Translator")
40 | self.geometry(f"{1100}x{580}")
41 | # ||||||||||||||||||| configure tooltip style |||||||||||||||||||
42 | tooltip_style = {"delay": 0.01, "follow": True, "parent_kwargs": {"bg": "black", "padx": 1, "pady": 1}, "fg": "white", "bg": "#1c1c1c", "padx": 4, "pady": 4}
43 | # ||||||||||||||||||| configure grid layout |||||||||||||||||||
44 | self.grid_rowconfigure(0, weight=1)
45 | self.grid_columnconfigure(1, weight=1)
46 | # ||||||||||||||||||| configure variable |||||||||||||||||||
47 | self.speechRecognitionGUI = None
48 | self.voiceRecognizerMulti = None
49 | # ||||||||||||||||||| load images |||||||||||||||||||
50 | self.rev_translate_icon = customtkinter.CTkImage(Image.open(assets.reverse_icon), size=(26, 26))
51 | self.send_message_icon = customtkinter.CTkImage(light_image=Image.open(assets.path_to_send_message_black), dark_image=Image.open(assets.path_to_send_message_white), size=(26, 26))
52 | self.chat_ai_icon = customtkinter.CTkImage(light_image=Image.open(assets.path_to_chatai_black), dark_image=Image.open(assets.path_to_chatai_white), size=(20, 20))
53 | self.home_image = customtkinter.CTkImage(light_image=Image.open(assets.path_to_home_dark), dark_image=Image.open(assets.path_to_home_light), size=(20, 20))
54 | self.settings_image = customtkinter.CTkImage(light_image=Image.open(assets.path_to_settings_dark), dark_image=Image.open(assets.path_to_settings_light), size=(20, 20))
55 | self.open_side_menu_icon = customtkinter.CTkImage(light_image=Image.open(assets.path_to_open_side_menu_dark), dark_image=Image.open(assets.path_to_open_side_menu_white), size=(20, 20))
56 | # ||||||||||||||||||| create sidebar frame with widgets |||||||||||||||||||
57 | self.sidebar_frame = customtkinter.CTkFrame(self, width=100, corner_radius=10)
58 |
59 | self.logo_label = customtkinter.CTkLabel(self.sidebar_frame, text="OCR_Translator", font=customtkinter.CTkFont(size=20, weight="bold"))
60 | self.logo_label.grid(row=0, column=0, padx=20, pady=(10, 10))
61 |
62 | self.sidebar_frame.grid_rowconfigure(4, weight=1)
63 |
64 | self.home_button = customtkinter.CTkButton(self.sidebar_frame, corner_radius=0, height=40, border_spacing=10, text="Home", fg_color="transparent", text_color=("gray10", "gray90"), hover_color=("gray70", "gray30"), image=self.home_image, anchor="w", command=self.home_button_event)
65 | self.home_button.grid(row=1, column=0, sticky="ew")
66 | self.chat_ai_button = customtkinter.CTkButton(self.sidebar_frame, corner_radius=0, height=40, border_spacing=10, text="Chat AI", fg_color="transparent", text_color=("gray10", "gray90"), hover_color=("gray70", "gray30"), image=self.chat_ai_icon, anchor="w", command=self.chat_ai_button_event)
67 | self.chat_ai_button.grid(row=2, column=0, sticky="ew")
68 | self.settings_button = customtkinter.CTkButton(self.sidebar_frame, corner_radius=0, height=40, border_spacing=10, text="Settings", fg_color="transparent", text_color=("gray10", "gray90"), hover_color=("gray70", "gray30"), image=self.settings_image, anchor="w", command=self.settings_button_event)
69 | self.settings_button.grid(row=3, column=0, sticky="ew")
70 |
71 | self.sidebar_button_save = customtkinter.CTkButton(self.sidebar_frame, text="Save Settings", command=self.save_setting)
72 | self.sidebar_button_save.grid(row=5, column=0, padx=20, pady=(10, 10))
73 |
74 | self.sidebar_button_load = customtkinter.CTkButton(self.sidebar_frame, text="Load Settings", command=self.load_setting)
75 | self.sidebar_button_load.grid(row=6, column=0, padx=20, pady=(10, 10))
76 |
77 | self.appearance_mode_label = customtkinter.CTkLabel(self.sidebar_frame, text="Appearance Mode:", anchor="w")
78 | self.appearance_mode_label.grid(row=7, column=0, padx=20, pady=(10, 0))
79 | self.appearance_mode_optionemenu = customtkinter.CTkOptionMenu(self.sidebar_frame, values=["Light", "Dark", "System"], command=self.change_appearance_mode_event)
80 | self.appearance_mode_optionemenu.grid(row=8, column=0, padx=20, pady=(10, 10))
81 | self.scaling_label = customtkinter.CTkLabel(self.sidebar_frame, text="UI Scaling:", anchor="w")
82 | self.scaling_label.grid(row=9, column=0, padx=20, pady=(10, 0))
83 | self.scaling_optionemenu = customtkinter.CTkOptionMenu(self.sidebar_frame, values=["80%", "90%", "100%", "110%", "120%"], command=self.change_scaling_event)
84 | self.scaling_optionemenu.grid(row=10, column=0, padx=20, pady=(10, 20))
85 | # section Home Frame
86 | # |_████████████████████████████████████████████████████████████████████|
87 | # |_________________________ create home frame _________________________|
88 | # |_████████████████████████████████████████████████████████████████████|
89 | self.home_frame = customtkinter.CTkFrame(self, corner_radius=0, fg_color="transparent")
90 | self.home_frame.grid_columnconfigure(0, weight=1)
91 | self.home_frame.grid_columnconfigure(1, weight=1)
92 | self.home_frame.grid_rowconfigure(0, weight=1)
93 | self.home_frame.grid_rowconfigure(1, weight=0)
94 |
95 | # ||||||||||||||||||| create scrollable frame |||||||||||||||||||
96 | self.scrollable_frame = customtkinter.CTkScrollableFrame(self.home_frame, label_text="OCR Checker")
97 | self.scrollable_frame.grid(row=0, column=0, padx=(0, 0), pady=(20, 0), sticky="nsew")
98 | self.scrollable_frame.grid_columnconfigure((0, 1, 2, 3), weight=1)
99 | # self.scrollable_frame.grid_columnconfigure("all", weight=1)
100 |
101 | self.switch_ocr_google_api = None
102 | self.switch_ocr_google_free = None
103 | self.switch_ocr_baidu_api = None
104 | self.switch_ocr_capture2text = None
105 | self.switch_ocr_windows_local = None
106 | self.switch_ocr_tesseract = None
107 | self.switch_ocr_rapidocr = None
108 | self.switches_ocr = []
109 |
110 | switchers = [("Google api",), ("Google Free",), ("Baidu api",), ("Capture2Text",), ("Windows local",), ("Tesseract",), ("RapidOCR",), ]
111 |
112 | rows = len(switchers) // 2
113 | for i, switcher in enumerate(switchers):
114 | method_name = 'switch_ocr_{}'.format(switcher[0].lower().replace(" ", "_"))
115 | switch = customtkinter.CTkSwitch(self.scrollable_frame, text="Turn " + switcher[0])
116 | switch.grid(row=i // 2, column=(i % 2) + 1, padx=0, pady=(0, 10), sticky="NSEW")
117 | setattr(self, method_name, switch)
118 | self.switches_ocr.append(switch)
119 |
120 | self.switch_from_text = customtkinter.CTkSwitch(self.scrollable_frame, text="Only from below text", command=self.disable_all_ocr_switchers)
121 | self.switch_from_text.grid(row=rows + 1, column=1, padx=0, pady=(0, 10), sticky="NSEW")
122 | ToolTip(self.switch_from_text, msg="Translate text only from below text box", **tooltip_style)
123 |
124 | self.switch_from_clipboard = customtkinter.CTkSwitch(self.scrollable_frame, text="Only from clipboard", command=self.disable_all_ocr_switchers)
125 | self.switch_from_clipboard.grid(row=rows + 1, column=2, padx=0, pady=(0, 10), sticky="NSEW")
126 | ToolTip(self.switch_from_clipboard, msg="Translate text only from clipboard", **tooltip_style)
127 |
128 | # self.scrollable_frame_textbox = customtkinter.CTkTextbox(self, width=250)
129 | # self.scrollable_frame_textbox.grid(row=1, column=1,padx=(10, 10), pady=(10, 10), sticky="nsew")
130 |
131 | self.button_start_stt_ocr_tab = AnimatedGifButton(root= self.scrollable_frame, gif_file=assets.path_to_microphone_active_png, stop_icon=assets.path_to_microphone_white, delay=0.03, size=(25, 25), hide=False)
132 | self.button_start_stt_ocr_tab.grid(row=rows + 2, column=1, padx=0, pady=0, sticky="e")
133 | self.button_start_stt_ocr_tab.grid_save()
134 |
135 | self.scrollable_frame_textbox = customtkinter.CTkTextbox(self.scrollable_frame, height=1500, undo=True, autoseparators=True)
136 | self.scrollable_frame_textbox.grid(row=rows + 3, column=0, columnspan=4, rowspan=10, padx=(10, 10), pady=(10, 10), sticky="nsew")
137 |
138 | # ||||||||||||||||||| create tabview |||||||||||||||||||
139 | self.tabview = customtkinter.CTkTabview(self.home_frame, width=25)
140 | self.tabview.grid(row=0, column=1, padx=(20, 20), pady=(3, 0), sticky="nsew")
141 | self.tabview.add("Translation")
142 | self.tabview.add("Speech to Text")
143 | self.tabview.add("Text to Speech")
144 | self.tabview.tab("Translation").grid_columnconfigure(0, weight=1) # configure grid of individual tabs
145 | self.tabview.tab("Translation").grid_rowconfigure(0, weight=1) # configure grid of individual tabs
146 | self.tabview.tab("Speech to Text").grid_columnconfigure(0, weight=1)
147 | self.tabview.tab("Speech to Text").grid_rowconfigure(0, weight=1)
148 | self.tabview.tab("Text to Speech").grid_columnconfigure(0, weight=1)
149 |
150 | # section Translation tab
151 | # ||| Translation tab |||
152 | self.scrollable_frame_translation = customtkinter.CTkScrollableFrame(self.tabview.tab("Translation"))
153 | self.scrollable_frame_translation.grid(row=0, column=0, padx=(0, 0), pady=(0, 0), sticky="nsew")
154 | self.scrollable_frame_translation.grid_columnconfigure((0, 1, 2), weight=1)
155 |
156 | services_translators_languages_tab = ["Disabled", "ChatGPT", "EdgeGPT"]
157 | for a in services_translators_languages.keys():
158 | services_translators_languages_tab.append(a)
159 |
160 | self.option_menu_translation = customtkinter.CTkOptionMenu(self.scrollable_frame_translation, dynamic_resizing=False, values=services_translators_languages_tab, command=self.change_languages)
161 | self.option_menu_translation.grid(row=0, column=1, padx=20, pady=(20, 10))
162 |
163 | self.switch_window_mode = customtkinter.CTkSwitch(self.scrollable_frame_translation, text="Turn window mode", )
164 | self.switch_window_mode.grid(row=0, column=0, padx=(10, 0), pady=(20, 10))
165 | ToolTip(self.switch_window_mode, msg="Show an additional window with the translation or OCR.", **tooltip_style)
166 |
167 | self.switch_game_mode = customtkinter.CTkSwitch(self.scrollable_frame_translation, text="Turn game mode", )
168 | self.switch_game_mode.grid(row=1, column=0, padx=10, pady=(0, 20))
169 | ToolTip(self.switch_game_mode, msg="Show an additional window with the translation above the selected text. Click middle button of mouse to close window", **tooltip_style)
170 |
171 | self.loading_icon = AnimatedGif(self.scrollable_frame_translation, assets.path_to_loading_gif, 0.04, size=(50, 50))
172 | self.loading_icon.grid(row=1, column=1, rowspan=2, padx=0, pady=0, sticky="nsew")
173 | self.loading_icon.grid_save()
174 |
175 | self.switch_results_to_clipboard = customtkinter.CTkSwitch(self.scrollable_frame_translation, text="Results to Clipboard", )
176 | self.switch_results_to_clipboard.grid(row=1, column=2, padx=10, pady=(0, 20))
177 |
178 | self.label_from_language = customtkinter.CTkLabel(self.scrollable_frame_translation, text="From language:", anchor="w")
179 | self.label_from_language.grid(row=2, column=0, padx=(20, 0), pady=(0, 0))
180 | self.label_to_language = customtkinter.CTkLabel(self.scrollable_frame_translation, text="To language:", anchor="w")
181 | self.label_to_language.grid(row=2, column=2, padx=(0, 20), pady=(0, 0))
182 |
183 | self.combobox_from_language = AutocompleteCombobox(self.scrollable_frame_translation, completevalues=list(_langs2.values()))
184 | self.combobox_from_language.grid(row=3, column=0, padx=(20, 5), pady=(0, 0))
185 | self.button_reverse_language = customtkinter.CTkButton(master=self.scrollable_frame_translation, text="", fg_color="transparent", border_width=0, width=26, anchor="left", height=26, text_color=("gray10", "#DCE4EE"), image=self.rev_translate_icon, command=self.reverse_languages)
186 | self.button_reverse_language.grid(row=3, column=1, padx=(0, 0), pady=(0, 0))
187 | self.combobox_to_language = AutocompleteCombobox(self.scrollable_frame_translation, completevalues=list(_langs2.values()))
188 | self.combobox_to_language.grid(row=3, column=2, padx=(5, 20), pady=(0, 0))
189 |
190 | self.button_start_stt_tran_tab = AnimatedGifButton(root= self.scrollable_frame_translation, gif_file=assets.path_to_microphone_active_png, stop_icon=assets.path_to_microphone_white, delay=0.03, size=(25, 25), hide=False)
191 | self.button_start_stt_tran_tab.grid(row=4, column=1, padx=0, pady=0)
192 | self.button_start_stt_tran_tab.grid_save()
193 |
194 | self.translation_frame_textbox = customtkinter.CTkTextbox(self.scrollable_frame_translation, width=250, height=1500, undo=True, autoseparators=True)
195 | self.translation_frame_textbox.grid(row=5, column=0, columnspan=3, padx=(10, 10), pady=(10, 10), sticky="nsew")
196 |
197 | # section Speech to Text tab
198 | # ||| Speech to Text tab |||
199 | # self.label_tab_2 = customtkinter.CTkLabel(self.tabview.tab("Speech to Text"), text="WORK IN PROGRESS")
200 | # self.label_tab_2.grid(row=0, column=0, padx=20, pady=20)
201 |
202 | self.scrollable_frame_stt = customtkinter.CTkScrollableFrame(self.tabview.tab("Speech to Text"))
203 | self.scrollable_frame_stt.grid(row=0, column=0, padx=(0, 0), pady=(0, 0), sticky="nsew")
204 | self.scrollable_frame_stt.grid_columnconfigure((0, 1, 2), weight=1)
205 |
206 | services_stt_tab = ["Disabled", "WebGoogle", "Google", "Whisper"]
207 |
208 | self.option_menu_stt = customtkinter.CTkOptionMenu(self.scrollable_frame_stt, dynamic_resizing=False, values=services_stt_tab, command=self.change_sst_service)
209 | self.option_menu_stt.grid(row=0, column=1, padx=20, pady=(20, 10))
210 |
211 | self.sst_frame_textbox = customtkinter.CTkTextbox(self.scrollable_frame_stt, width=250, height=1500, undo=True, autoseparators=True)
212 | self.sst_frame_textbox.grid(row=2, column=0, columnspan=3, padx=(10, 10), pady=(10, 10), sticky="nsew")
213 |
214 | self.button_start_stt = AnimatedGifButton(root= self.scrollable_frame_stt, gif_file=assets.path_to_microphone_active_png, stop_icon=assets.path_to_microphone_white, delay=0.03, size=(50, 50), hide=False)
215 | self.button_start_stt.grid(row=1, column=1, rowspan=1, padx=0, pady=0)
216 | self.button_start_stt.grid_save()
217 |
218 | self.combobox_sst_language = AutocompleteCombobox(self.scrollable_frame_stt, completevalues=list(_langs2.values()))
219 |
220 |
221 | #SpeechRecognitionGUI(start_button=self.button_start_stt2, text_box=self.sst_frame_textbox)
222 |
223 |
224 |
225 | # ||| Text to Speech tab |||
226 | self.label_tab_3 = customtkinter.CTkLabel(self.tabview.tab("Text to Speech"), text="WORK IN PROGRESS")
227 | self.label_tab_3.grid(row=0, column=0, padx=20, pady=20)
228 |
229 | # ||||||||||||||||||| create main entry and button |||||||||||||||||||
230 | self.button_options = customtkinter.CTkButton(master=self, corner_radius=0, height=20, width=20, border_spacing=0, text="", fg_color="transparent", text_color=("gray10", "gray90"), hover_color=("gray70", "gray30"), image=self.open_side_menu_icon, anchor="w", command=self.hide_show_side_bar)
231 | # self.button_options = customtkinter.CTkButton(master=self, text="Options", fg_color="transparent", border_width=2, text_color=("gray10", "#DCE4EE"), width= 25, command=self.hide_show_side_bar)
232 | self.button_options.grid(row=0, column=0, padx=(0, 0), pady=(0, 0), sticky="nw")
233 |
234 | self.button_start = customtkinter.CTkButton(master=self.home_frame, text="START", fg_color="transparent", border_width=2, text_color=("gray10", "#DCE4EE"), command=self.pressed_print)
235 | self.button_start.grid(row=1, column=0, columnspan=2, padx=(20, 20), pady=5)
236 | self.seg_button_last_ocr_area = customtkinter.CTkSegmentedButton(self.home_frame)
237 | self.seg_button_last_ocr_area.grid(row=1, column=0, padx=(20, 10), pady=(10, 10), sticky="")
238 | self.seg_button_last_ocr_area.configure(values=["Off", "On", "Saved"])
239 | self.seg_button_last_ocr_area.winfo_children()[2].configure(state="disabled")
240 | ToolTip(self.seg_button_last_ocr_area.winfo_children()[0], msg="Off - the option is turned off.", **tooltip_style)
241 | ToolTip(self.seg_button_last_ocr_area.winfo_children()[1], msg="On - the next time the area will be saved.", **tooltip_style)
242 | ToolTip(self.seg_button_last_ocr_area.winfo_children()[2], msg="Saved - the area is already saved, and does not need to be selected again for the next OCR run.", **tooltip_style)
243 |
244 | # section Chat Ai Frame
245 | # |_██████████████████████████████████████████████████████████████████████|
246 | # |_________________________ create Chat Ai frame ________________________|
247 | # |_██████████████████████████████████████████████████████████████████████|
248 |
249 | self.chat_ai_frame = customtkinter.CTkFrame(self, corner_radius=0, fg_color="transparent")
250 | self.chat_ai_frame.grid_rowconfigure(0, weight=1)
251 | self.chat_ai_frame.grid_columnconfigure(0, weight=1)
252 |
253 | # ||||||||||||||||||| create tabview |||||||||||||||||||
254 |
255 |
256 |
257 | # self.tabviews_chat_ai = []
258 | # self.textbox_ai_frames = []
259 | # self.textbox_ai_send_frames = []
260 | # self.buttons_send_message_ai = []
261 |
262 | ai_services = [("ChatGPT",), ("Bing",), ("Bard",), ]
263 |
264 | for ai_service in ai_services:
265 | print(str(ai_service[0]))
266 |
267 | #for i in range(len(ai_services)):
268 | # self.tabview_chat_ai1 = TabviewChats(self.chat_ai_frame, ai_services)
269 |
270 | self.tabview_chat_ai1 = TabviewChats(root = self.chat_ai_frame, ai_services=ai_services, send_message_icon=self.send_message_icon, column=0)
271 |
272 | # self.tabview_chat_ai = customtkinter.CTkTabview(self.chat_ai_frame, width=25)
273 | # self.tabview_chat_ai.grid(row=0, column=0, padx=(0, 20), pady=(5, 5), sticky="nsew")
274 | # self.tabview_chat_ai.add("ChatGPT")
275 | # self.tabview_chat_ai.add("Bing")
276 | # self.tabview_chat_ai.add("Bard")
277 | # self.tabview_chat_ai.tab("ChatGPT").grid_columnconfigure(0, weight=1) # configure grid of individual tabs
278 | # self.tabview_chat_ai.tab("ChatGPT").grid_rowconfigure(0, weight=1) # configure grid of individual tabs
279 | # self.tabview_chat_ai.tab("Bing").grid_columnconfigure(0, weight=1) # configure grid of individual tabs
280 | # self.tabview_chat_ai.tab("Bing").grid_rowconfigure(0, weight=1) # configure grid of individual tabs
281 | # # ||| ChatGPT tab |||
282 | # self.textbox_chatgpt_frame = customtkinter.CTkTextbox(self.tabview_chat_ai.tab("ChatGPT"), undo=True, autoseparators=True)
283 | # self.textbox_chatgpt_frame.grid(row=0, column=0, padx=(0, 0), pady=(0, 0), sticky="nsew")
284 | # self.textbox_chatgpt_frame.tag_config("user_name", justify='left', foreground='white', font=customtkinter.CTkFont(size=14, weight="bold"))
285 | # self.textbox_chatgpt_frame.tag_config("user_message", justify='left', foreground='white')
286 | # self.textbox_chatgpt_frame.tag_config("chatbot_name", justify='left', foreground='lightblue', font=customtkinter.CTkFont(size=14, weight="bold"))
287 | # self.textbox_chatgpt_frame.tag_config("chatbot_message", justify='left', foreground='lightblue')
288 | #
289 | # self.textbox_chatgpt_send_frame = AutoResizeTextBox(self.textbox_chatgpt_frame)
290 | # self.textbox_chatgpt_send_frame.grid(row=1, column=0, padx=(15, 0), pady=(0, 0), sticky="nsew")
291 | # self.button_send_message_chatgpt_ai = customtkinter.CTkButton(master=self.textbox_chatgpt_send_frame, text="", fg_color="transparent", bg_color="transparent", border_width=0, width=26, anchor="left", height=26, text_color=("gray10", "#DCE4EE"), image=self.send_message_icon)
292 | # self.button_send_message_chatgpt_ai.grid(row=0, column=1, padx=(14, 7), pady=(4, 0), sticky="e")
293 | # self.textbox_chatgpt_send_frame.button = self.button_send_message_chatgpt_ai
294 | # # ||| Bing tab |||
295 | # self.textbox_bing_frame = customtkinter.CTkTextbox(self.tabview_chat_ai.tab("Bing"), undo=True, autoseparators=True)
296 | # self.textbox_bing_frame.grid(row=0, column=0, padx=(0, 0), pady=(0, 0), sticky="nsew")
297 | # self.textbox_bing_frame.tag_config("user_name", justify='left', foreground='white', font=customtkinter.CTkFont(size=14, weight="bold"))
298 | # self.textbox_bing_frame.tag_config("user_message", justify='left', foreground='white')
299 | # self.textbox_bing_frame.tag_config("chatbot_name", justify='left', foreground='lightblue', font=customtkinter.CTkFont(size=14, weight="bold"))
300 | # self.textbox_bing_frame.tag_config("chatbot_message", justify='left', foreground='lightblue')
301 | #
302 | # self.textbox_bing_send_frame = AutoResizeTextBox(self.textbox_bing_frame)
303 | # self.textbox_bing_send_frame.grid(row=1, column=0, padx=(15, 0), pady=(0, 0), sticky="nsew")
304 | # self.button_send_message_bing_ai = customtkinter.CTkButton(master=self.textbox_bing_send_frame, text="", fg_color="transparent", bg_color="transparent", border_width=0, width=26, anchor="left", height=26, text_color=("gray10", "#DCE4EE"), image=self.send_message_icon)
305 | # self.button_send_message_bing_ai.grid(row=0, column=1, padx=(14, 7), pady=(4, 0), sticky="e")
306 | # self.textbox_bing_send_frame.button = self.button_send_message_bing_ai
307 | #
308 | # self.button_add_chat = customtkinter.CTkButton(master=self.chat_ai_frame, text="", fg_color="transparent", bg_color="transparent", border_width=0, width=26, anchor="left", height=26, text_color=("gray10", "#DCE4EE"), image=self.send_message_icon, command=self.add_new_chat)
309 | # self.button_add_chat.grid(row=0, column=0, padx=(14, 7), pady=(4, 0), sticky="w")
310 |
311 | # # ||||||||||||||||||| create tabview |||||||||||||||||||
312 | # self.tabview_chat_ai2 = customtkinter.CTkTabview(self.chat_ai_frame, width=25)
313 | # self.tabview_chat_ai2.grid(row=0, column=1, padx=(0, 20), pady=(5, 5), sticky="nsew")
314 | # self.tabview_chat_ai2.add("ChatGPT")
315 | # self.tabview_chat_ai2.add("Bing")
316 | # self.tabview_chat_ai2.add("Bard")
317 | # self.tabview_chat_ai2.tab("ChatGPT").grid_columnconfigure(0, weight=1) # configure grid of individual tabs
318 | # self.tabview_chat_ai2.tab("ChatGPT").grid_rowconfigure(0, weight=1) # configure grid of individual tabs
319 | # self.tabview_chat_ai2.tab("Bing").grid_columnconfigure(0, weight=1) # configure grid of individual tabs
320 | # self.tabview_chat_ai2.tab("Bing").grid_rowconfigure(0, weight=1) # configure grid of individual tabs
321 | #
322 | # self.tabview_chat_ai2.grid_forget()
323 | # self.tabview_chat_ai2.grid(row=0, column=1, padx=(0, 20), pady=(5, 5), sticky="nsew")
324 |
325 | # ||| text send to all |||
326 | self.textbox_chat_frame = AutoResizeTextBox(self.chat_ai_frame)
327 | self.textbox_chat_frame.grid(row=1, column=0, padx=(5, 20), pady=(15, 15), sticky="nsew")
328 | self.button_send_message_chat_ai = customtkinter.CTkButton(master=self.textbox_chat_frame, text="", fg_color="transparent", bg_color="transparent", border_width=0, width=26, anchor="left", height=26, text_color=("gray10", "#DCE4EE"), image=self.send_message_icon)
329 | self.button_send_message_chat_ai.grid(row=0, column=1, padx=(14, 7), pady=(4, 0), sticky="e")
330 | self.textbox_chat_frame.button = self.button_send_message_chat_ai
331 |
332 | # section Settings Frame
333 | # |_████████████████████████████████████████████████████████████████████████|
334 | # |_________________________ create settings frame _________________________|
335 | # |_████████████████████████████████████████████████████████████████████████|
336 | self.settings_frame = customtkinter.CTkFrame(self, corner_radius=0, fg_color="transparent")
337 |
338 | self.settings_frame.grid_rowconfigure(0, weight=1)
339 | self.settings_frame.grid_columnconfigure(0, weight=1)
340 |
341 | # ||||||||||||||||||| create tabview |||||||||||||||||||
342 | self.tabview_settings = customtkinter.CTkTabview(self.settings_frame, width=25)
343 | self.tabview_settings.grid(row=0, column=0, columnspan=2, padx=(0, 20), pady=(0, 20), sticky="nsew")
344 | self.tabview_settings.add("Services")
345 | self.tabview_settings.add("Other")
346 | self.tabview_settings.tab("Services").grid_columnconfigure(0, weight=1) # configure grid of individual tabs
347 | self.tabview_settings.tab("Services").grid_rowconfigure(0, weight=1) # configure grid of individual tabs
348 | self.tabview_settings.tab("Other").grid_columnconfigure(0, weight=1) # configure grid of individual tabs
349 | self.tabview_settings.tab("Other").grid_rowconfigure(0, weight=1) # configure grid of individual tabs
350 |
351 | # ||||||||||||||||||| create scrollable frame Services settings |||||||||||||||||||
352 | self.scrollable_settings_frame = customtkinter.CTkScrollableFrame(self.tabview_settings.tab("Services"), label_text="Services settings")
353 | self.scrollable_settings_frame.grid(row=0, column=0, padx=0, pady=0, sticky="nsew")
354 | self.scrollable_settings_frame.grid_columnconfigure(0, weight=0)
355 | self.scrollable_settings_frame.grid_columnconfigure(1, weight=1)
356 |
357 | self.entry_settings = []
358 |
359 | # settings_dict = {
360 | # 'ChatGPT': ("ApiKey", 'session_token', "access_token", "email", "password"),
361 | # 'Baidu': ("AppId", 'ApiKey', "SecretKey"),
362 | # 'Capture2Text': ("path_to_Capture2Text_CLI_exe",),
363 | # 'Tesseract': ("path_to_tesseract_exe",)
364 | # }
365 |
366 | settings_dict = {
367 | 'ChatGPT': ("ApiKey", "access_token"),
368 | 'Baidu': ("AppId", 'ApiKey', "SecretKey"),
369 | 'Capture2Text': ("path_to_Capture2Text_CLI_exe",),
370 | 'Tesseract': ("path_to_tesseract_exe",)
371 | }
372 |
373 | iterations = 0
374 | for service in settings_dict.items():
375 | label_main_name = 'label_{}'.format(service[0].lower().replace(" ", "_"))
376 | # print(label_main_name)
377 | label_main = customtkinter.CTkLabel(self.scrollable_settings_frame, text=service[0] + ":", anchor="w", font=customtkinter.CTkFont(size=14, weight="bold"))
378 | label_main.grid(row=iterations, column=0, padx=(5, 5), pady=(5, 5))
379 | setattr(self, label_main_name, label_main)
380 | iterations = iterations + 1
381 | for element in service[1]:
382 | label_name = 'label_{}_{}'.format(service[0].lower().replace(" ", "_"), element.lower().replace(" ", "_"))
383 | # print(label_name)
384 | label = customtkinter.CTkLabel(self.scrollable_settings_frame, text=element, anchor="w")
385 | label.grid(row=iterations, column=0, padx=(5, 5), pady=(5, 5), sticky="nsew")
386 | setattr(self, label_name, label)
387 |
388 | entry_name = 'entry_{}_{}'.format(service[0].lower().replace(" ", "_"), element.lower().replace(" ", "_"))
389 | # print(entry_name)
390 | entry = customtkinter.CTkEntry(self.scrollable_settings_frame, placeholder_text=element)
391 | entry.grid(row=iterations, column=1, padx=(5, 5), pady=(5, 5), sticky="nsew")
392 | setattr(self, entry_name, entry)
393 | self.entry_settings.append(entry)
394 | iterations = iterations + 1
395 |
396 | # ||||||||||||||||||| create scrollable frame Other settings |||||||||||||||||||
397 | self.scrollable_settings_frame_others = customtkinter.CTkScrollableFrame(self.tabview_settings.tab("Other"), label_text="Other settings")
398 | self.scrollable_settings_frame_others.grid(row=0, column=0, padx=0, pady=0, sticky="nsew")
399 | self.scrollable_settings_frame_others.grid_columnconfigure(0, weight=0)
400 | self.scrollable_settings_frame_others.grid_columnconfigure(1, weight=1)
401 |
402 | self.label_main_bindings = customtkinter.CTkLabel(self.scrollable_settings_frame_others, text="Bindings" + ":", anchor="w", font=customtkinter.CTkFont(size=14, weight="bold"))
403 | self.label_main_bindings.grid(row=0, column=0, padx=(5, 5), pady=(5, 5))
404 |
405 | self.label_binding_start_ocr = customtkinter.CTkLabel(self.scrollable_settings_frame_others, text="Bind START", anchor="w")
406 | self.label_binding_start_ocr.grid(row=1, column=0, padx=(5, 5), pady=(5, 5), sticky="nsew")
407 |
408 | self.entry_binding_start_ocr = BindingEntry(self.scrollable_settings_frame_others, self.label_binding_start_ocr)
409 | self.entry_binding_start_ocr.grid(row=1, column=1, padx=(5, 5), pady=(5, 5), sticky="nsew")
410 |
411 | # |_████████████████████████████████████████████████████████████████████████|
412 | # |___________________________ set default values __________________________|
413 | # |_████████████████████████████████████████████████████████████████████████|
414 | self.seg_button_last_ocr_area.set("Off")
415 | self.appearance_mode_optionemenu.set("Dark")
416 | self.scaling_optionemenu.set("100%")
417 | self.option_menu_translation.set("Disabled")
418 | # select default frame
419 | self.select_frame_by_name("home")
420 | self.check_ocrs_are_active()
421 |
422 | self.load_setting()
423 |
424 | # section Functions
425 | # |_████████████████████████████████████████████████████████████████████████|
426 | # |___________________________ Functions __________________________|
427 | # |_████████████████████████████████████████████████████████████████████████|
428 |
429 | def add_new_chat(self):
430 | for i, tabview_chat in enumerate(self.tabviews_chat_ai):
431 | if tabview_chat.winfo_manager() == "grid":
432 | tabview_chat.grid_forget()
433 | self.chat_ai_frame.grid_columnconfigure((1, 2), weight=0)
434 | else:
435 | tabview_chat.grid(row=0, column=i, padx=(0, 20), pady=(5, 5), sticky="nsew")
436 | self.chat_ai_frame.grid_columnconfigure((0, 1, 2), weight=1)
437 |
438 | def clone_widget(self, widget, master=None):
439 | parent = master if master else widget.master
440 | cls = widget.__class__
441 |
442 | print(parent)
443 | print(cls)
444 | print(widget.configure())
445 | # Clone the widget configuration
446 | if widget.configure() != None:
447 | cfg = {key: widget.cget(key) for key in widget.configure()}
448 | else:
449 | cfg = {}
450 | cloned = cls(parent, **cfg)
451 |
452 | # Clone the widget's children
453 | for child in widget.winfo_children():
454 | child_cloned = self.clone_widget(child, master=cloned)
455 | print(child)
456 | if child.grid_info():
457 | grid_info = {k: v for k, v in child.grid_info().items() if k not in {'in'}}
458 | child_cloned.grid(**grid_info)
459 | elif child.place_info():
460 | place_info = {k: v for k, v in child.place_info().items() if k not in {'in'}}
461 | child_cloned.place(**place_info)
462 | else:
463 | try:
464 | pack_info = {k: v for k, v in child.pack_info().items() if k not in {'in'}}
465 | child_cloned.pack(**pack_info)
466 | except:
467 | pass
468 |
469 | return cloned
470 |
471 | def hide_show_side_bar(self):
472 | if self.sidebar_frame.winfo_manager() == "grid":
473 | self.sidebar_frame.grid_forget()
474 | else:
475 | self.sidebar_frame.grid(row=0, column=0, rowspan=4, pady=(30, 0), padx=(0, 20), sticky="nsew")
476 | self.sidebar_frame.grid_columnconfigure(0, weight=1)
477 |
478 | def disable_all_ocr_switchers(self):
479 | if self.switch_from_text.get() == 1 or self.switch_from_clipboard.get() == 1:
480 | for switch_ocr in self.switches_ocr:
481 | switch_ocr.deselect()
482 | switch_ocr.configure(state="disabled")
483 | if self.switch_from_text.get() == 1:
484 | self.switch_from_clipboard.deselect()
485 | self.switch_from_clipboard.configure(state="disabled")
486 | if self.switch_from_clipboard.get() == 1:
487 | self.switch_from_text.deselect()
488 | self.switch_from_text.configure(state="disabled")
489 | else:
490 | for switch_ocr in self.switches_ocr:
491 | switch_ocr.configure(state="enabled")
492 | self.check_ocrs_are_active()
493 |
494 | self.switch_from_clipboard.configure(state="enabled")
495 | self.switch_from_text.configure(state="enabled")
496 |
497 | def open_input_dialog_event(self):
498 | dialog = customtkinter.CTkInputDialog(text="Type in a number:", title="CTkInputDialog")
499 | print("CTkInputDialog:", dialog.get_input())
500 |
501 | def change_appearance_mode_event(self, new_appearance_mode: str):
502 | customtkinter.set_appearance_mode(new_appearance_mode)
503 |
504 | def change_scaling_event(self, new_scaling: str):
505 | new_scaling_float = int(new_scaling.replace("%", "")) / 100
506 | customtkinter.set_widget_scaling(new_scaling_float)
507 |
508 | def pressed_print(self):
509 | print("button click")
510 |
511 | def reverse_languages(self):
512 | from_lang = self.combobox_from_language.get()
513 | to_lang = self.combobox_to_language.get()
514 |
515 | self.combobox_from_language.set(to_lang)
516 | self.combobox_to_language.set(from_lang)
517 |
518 | def change_languages(self, test):
519 | c = []
520 | if self.option_menu_translation.get() in services_translators_languages.keys():
521 | lista = services_translators_languages.get(self.option_menu_translation.get(), [])
522 | c = [_langs.get(a, a) for a in lista]
523 | self.combobox_from_language.configure(completevalues=c)
524 | self.combobox_to_language.configure(completevalues=c)
525 |
526 | if self.combobox_from_language.get() not in c:
527 | self.combobox_from_language.set("auto")
528 | if self.combobox_to_language.get() not in c:
529 | self.combobox_to_language.set("")
530 | elif self.option_menu_translation.get() == "ChatGPT" or self.option_menu_translation.get() == "GoogleFree" or self.option_menu_translation.get() == "EdgeGPT":
531 | self.combobox_from_language.configure(completevalues=list(_langs2.values()))
532 | self.combobox_to_language.configure(completevalues=list(_langs2.values()))
533 |
534 | if self.combobox_from_language.get() not in list(_langs2.values()):
535 | self.combobox_from_language.set("auto")
536 | if self.combobox_to_language.get() not in list(_langs2.values()):
537 | self.combobox_to_language.set("")
538 |
539 | def change_sst_service(self, option):
540 | buttons_sst_list = [self.button_start_stt, self.button_start_stt_ocr_tab, self.button_start_stt_tran_tab]
541 | sst_frame_textbox_list = [self.sst_frame_textbox, self.scrollable_frame_textbox, self.translation_frame_textbox]
542 | if option == "Disabled":
543 | self.combobox_sst_language.grid_forget()
544 | for button in buttons_sst_list:
545 | button.configure(state="disabled")
546 | if option == "WebGoogle":
547 | for button in buttons_sst_list:
548 | button.configure(state="normal")
549 | print("WebGoogle")
550 | self.combobox_sst_language.grid(row=1, column=2, padx=(20, 5), pady=(0, 0))
551 | self.combobox_sst_language.configure(completevalues=list(get_subdictionary(services_stt_languages, "WebGoogle").values()))
552 | self.combobox_sst_language.set("English-United Kingdom")
553 | if self.speechRecognitionGUI is None:
554 | self.speechRecognitionGUI = SpeechRecognitionGUI(start_button=buttons_sst_list, text_box=sst_frame_textbox_list, combobox_sst_language = self.combobox_sst_language)
555 | else:
556 | self.speechRecognitionGUI.start()
557 | if option == "Google" or option == "Whisper":
558 | for button in buttons_sst_list:
559 | button.configure(state="normal")
560 | print("test_universal")
561 | self.combobox_sst_language.grid(row=1, column=2, padx=(20, 5), pady=(0, 0))
562 | self.combobox_sst_language.configure(completevalues=list(get_subdictionary(services_stt_languages, option).values()))
563 | if self.voiceRecognizerMulti is None:
564 | self.voiceRecognizerMulti = VoiceRecognizerMulti(start_button=buttons_sst_list, text_box=sst_frame_textbox_list, combobox_sst_language=self.combobox_sst_language, name_service=option)
565 | else:
566 | self.voiceRecognizerMulti.start(option)
567 |
568 | def select_frame_by_name(self, name):
569 | # set button color for selected button
570 | self.home_button.configure(fg_color=("gray75", "gray25") if name == "home" else "transparent")
571 | self.settings_button.configure(fg_color=("gray75", "gray25") if name == "settings" else "transparent")
572 | self.chat_ai_button.configure(fg_color=("gray75", "gray25") if name == "chat_ai" else "transparent")
573 |
574 | # show selected frame
575 | if name == "home":
576 | self.home_frame.grid(row=0, column=1, sticky="nsew")
577 | else:
578 | self.home_frame.grid_forget()
579 | if name == "settings":
580 | self.settings_frame.grid(row=0, column=1, sticky="nsew")
581 | else:
582 | self.settings_frame.grid_forget()
583 | if name == "chat_ai":
584 | self.chat_ai_frame.grid(row=0, column=1, sticky="nsew")
585 | else:
586 | self.chat_ai_frame.grid_forget()
587 |
588 | def home_button_event(self):
589 | self.select_frame_by_name("home")
590 |
591 | def chat_ai_button_event(self):
592 | self.select_frame_by_name("chat_ai")
593 |
594 | def settings_button_event(self):
595 | self.select_frame_by_name("settings")
596 |
597 | def get_key(self, valu):
598 | for key, value in self.__dict__.items():
599 | # print(str(key) + " | " + str(value))
600 | if valu == str(value):
601 | return key
602 | return "key doesn't exist"
603 |
604 | def check_ocrs_are_active(self):
605 | if google_api.is_active:
606 | self.switch_ocr_google_api.configure(state="enabled")
607 | else:
608 | self.switch_ocr_google_api.deselect()
609 | self.switch_ocr_google_api.configure(state="disabled")
610 |
611 | if capture2Text.is_active:
612 | self.switch_ocr_capture2text.configure(state="enabled")
613 | else:
614 | self.switch_ocr_capture2text.deselect()
615 | self.switch_ocr_capture2text.configure(state="disabled")
616 |
617 | if tesseract.is_active:
618 | self.switch_ocr_tesseract.configure(state="enabled")
619 | else:
620 | self.switch_ocr_tesseract.deselect()
621 | self.switch_ocr_tesseract.configure(state="disabled")
622 |
623 | if baidu.is_active:
624 | self.switch_ocr_baidu_api.configure(state="enabled")
625 | else:
626 | self.switch_ocr_baidu_api.deselect()
627 | self.switch_ocr_baidu_api.configure(state="disabled")
628 |
629 | # save all gui elements into an ini file
630 | def save_setting(self, settings_name="settings", first_time=False):
631 | config = configparser.ConfigParser()
632 | if first_time:
633 | dict_settings = {"geometry": "1100x580+52+52"}
634 | dict_settings.update(self.save_mass_tree(self))
635 | config[settings_name] = dict_settings
636 | else:
637 | config.read(assets.path_settings_gui)
638 | dict_settings = {"geometry": self.geometry()}
639 | for key, value in dict_settings.items():
640 | config.set("settings", str(key), str(value))
641 | for key, value in self.save_mass_tree(self).items():
642 | config.set("settings", str(key), str(value))
643 |
644 | with open(assets.path_settings_gui, "w") as configfile:
645 | config.write(configfile)
646 |
647 | capture2Text.__init__(path_to_Capture2Text_CLI_exe=config["settings"]['entry_capture2text_path_to_capture2text_cli_exe'])
648 | tesseract.__init__(path_to_tesseract_exe=config["settings"]['entry_tesseract_path_to_tesseract_exe'])
649 | #chatGpt.__init__(email=config["settings"]['entry_chatgpt_email'], password=config["settings"]['entry_chatgpt_password'], session_token=config["settings"]['entry_chatgpt_session_token'], access_token=config["settings"]['entry_chatgpt_access_token'])
650 | chatGpt.__init__(access_token=config["settings"]['entry_chatgpt_access_token'])
651 | baidu.__init__(appid=config["settings"]['entry_baidu_appid'], apikey=config["settings"]['entry_baidu_apikey'], secretkey=config["settings"]['entry_baidu_secretkey'])
652 |
653 | self.disable_all_ocr_switchers()
654 |
655 | def save_mass(self, child, result={}):
656 | # print(child)
657 | if "checkbox" in child.winfo_name():
658 | # print("checkbox")
659 | # print(child.get())
660 | result[self.get_key(str(child))] = child.get()
661 | if "switch" in child.winfo_name():
662 | # print("switch")
663 | # print(child.get())
664 | result[self.get_key(str(child))] = child.get()
665 | if "ctkentry" in child.winfo_name():
666 | # print("ctkentry")
667 | # print(child.get())
668 | result[self.get_key(str(child))] = child.get()
669 | if "bindingentry" in child.winfo_name():
670 | # print("ctkentry")
671 | # print(child.get())
672 | result[self.get_key(str(child))] = child.get()
673 | if "slider" in child.winfo_name():
674 | # print("slider")
675 | # print(child.get())
676 | result[self.get_key(str(child))] = child.get()
677 | if "segmented" in child.winfo_name():
678 | # print("segmented")
679 | # print(child.get())
680 | result[self.get_key(str(child))] = child.get()
681 | if "combobox" in child.winfo_name():
682 | # print("combobox")
683 | # print(child.get())
684 | result[self.get_key(str(child))] = child.get()
685 | if "optionmenu" in child.winfo_name():
686 | # print("optionmenu")
687 | # print(child.get())
688 | result[self.get_key(str(child))] = child.get().replace('%', '%%')
689 | if "textbox" in child.winfo_name():
690 | # print("textbox")
691 | # print(child.get(0.0, 'end-1c'))
692 | # print(str(child))
693 | # print(self.get_key(str(child)))
694 | # print(self.tabview_chat_ai1.get_key(str(child)))
695 | if self.get_key(str(child)) != "key doesn't exist":
696 | result[self.get_key(str(child))] = base64.b64encode(child.get("0.0", "end-1c").encode("utf-8")).decode("utf-8")
697 | else:
698 | result[self.tabview_chat_ai1.get_key(str(child))] = base64.b64encode(child.get("0.0", "end-1c").encode("utf-8")).decode("utf-8")
699 | #result[self.get_key(str(child))] = base64.b64encode(child.get("0.0", "end-1c").encode("utf-8")).decode("utf-8"),
700 | if "radiobutton" in child.winfo_name():
701 | print("radiobutton")
702 | return result
703 |
704 | def save_mass_tree(self, widget, dict_result_atr={}):
705 | dict_result_atr = self.save_mass(widget, dict_result_atr)
706 | for child in widget.winfo_children():
707 | dict_result_atr = self.save_mass_tree(child, dict_result_atr)
708 | # print(dict_result_atr)
709 | return dict_result_atr
710 |
711 | # load all gui elements from an ini file
712 | def load_mass(self, child, config, settings_name):
713 | # print(child)
714 | if "checkbox" in child.winfo_name():
715 | child.select() if int(config[settings_name][self.get_key(str(child))]) else child.deselect()
716 | if callable(child._command):
717 | child._command()
718 | if "switch" in child.winfo_name():
719 | child.select() if int(config[settings_name][self.get_key(str(child))]) else child.deselect()
720 | if callable(child._command):
721 | child._command()
722 | if "slider" in child.winfo_name():
723 | child.set(float(config[settings_name][self.get_key(str(child))]))
724 | if callable(child._command):
725 | child._command()
726 | if "segmented" in child.winfo_name():
727 | child.set(config[settings_name][self.get_key(str(child))])
728 | if callable(child._command):
729 | child._command()
730 | if "combobox" in child.winfo_name():
731 | child.set(config[settings_name][self.get_key(str(child))])
732 | if callable(child._command):
733 | try:
734 | child._command()
735 | except TypeError:
736 | child._command(config[settings_name][self.get_key(str(child))])
737 | if "ctkentry" in child.winfo_name():
738 | child.delete(0, "end")
739 | child.insert(0, config[settings_name][self.get_key(str(child))])
740 | if "bindingentry" in child.winfo_name():
741 | #print(child.winfo_name())
742 | child.configure(state="normal")
743 | child.delete(0, "end")
744 | child.insert(0, config[settings_name][self.get_key(str(child))])
745 | child.configure(state="readonly")
746 | if "textbox" in child.winfo_name():
747 | child.delete("0.0", "end")
748 | if self.get_key(str(child)) != "key doesn't exist":
749 | child.insert("0.0", base64.b64decode(config[settings_name][self.get_key(str(child))]).decode("utf-8"))
750 | else:
751 | child.insert("0.0", base64.b64decode(config[settings_name][self.tabview_chat_ai1.get_key(str(child))]).decode("utf-8"))
752 | #child.insert("0.0", base64.b64decode(config[settings_name][self.get_key(str(child))]).decode("utf-8"))
753 | if "radiobutton" in child.winfo_name():
754 | print("radiobutton")
755 | if "optionmenu" in child.winfo_name():
756 | # if re.search(r"ctkoptionmenu(?![\w\d\.])", str(child)):
757 | child.set(config[settings_name][self.get_key(str(child))])
758 | if callable(child._command):
759 | try:
760 | child._command()
761 | except TypeError:
762 | child._command(config[settings_name][self.get_key(str(child))])
763 |
764 | def load_mass_tree(self, widget, config={}, settings_name=""):
765 | self.load_mass(widget, config, settings_name)
766 | for child in widget.winfo_children():
767 | self.load_mass_tree(child, config, settings_name)
768 |
769 | def load_setting(self, settings_name="settings", first_time=False):
770 | config = configparser.ConfigParser()
771 | if not os.path.exists(assets.path_settings_gui) or first_time:
772 | self.save_setting(first_time=True)
773 | first_time = True
774 | config.read(assets.path_settings_gui)
775 | try:
776 | self.geometry(config[settings_name]["geometry"])
777 | self.load_mass_tree(self, config, settings_name)
778 | if first_time:
779 | self.geometry(f"{1100}x{580}")
780 | except KeyError:
781 | self.load_setting(first_time=True)
782 |
783 |
784 | if __name__ == "__main__":
785 | app = ComplexTkGui()
786 | app.mainloop()
787 |
--------------------------------------------------------------------------------