├── .github
└── FUNDING.yml
├── .gitignore
├── LICENSE
├── README.md
├── package
├── letmedoit
│ ├── README.md
│ ├── config.py
│ ├── main.py
│ ├── package_name.txt
│ └── requirements.txt
├── letmedoit_b4_v3
│ ├── LICENSE
│ ├── README.md
│ ├── __init__.py
│ ├── audio
│ │ ├── notification1.mp3
│ │ ├── notification1_mild.mp3
│ │ ├── notification2.mp3
│ │ └── notification2_mild.mp3
│ ├── autoassist.py
│ ├── autobuilder.py
│ ├── automath.py
│ ├── autoretriever.py
│ ├── chatgpt.py
│ ├── codey.py
│ ├── commandprompt.py
│ ├── config.py
│ ├── eTextEdit.py
│ ├── files
│ │ ├── Readme.md
│ │ ├── audio
│ │ │ └── Readme.md
│ │ ├── chats
│ │ │ └── Readme.md
│ │ └── video
│ │ │ └── Readme.md
│ ├── geminipro.py
│ ├── geminiprovision.py
│ ├── gui
│ │ ├── chatgui.py
│ │ ├── quicktask.py
│ │ └── worker.py
│ ├── health_check.py
│ ├── history
│ │ └── Readme.md
│ ├── icons
│ │ ├── CyberTask.ico
│ │ ├── CyberTask.png
│ │ ├── GeminiPro.ico
│ │ ├── GeminiPro.png
│ │ ├── LetMeDoIt.ico
│ │ ├── LetMeDoIt.png
│ │ ├── MyHand.ico
│ │ ├── MyHand.png
│ │ ├── TaskWiz.ico
│ │ ├── TaskWiz.png
│ │ ├── collections
│ │ │ ├── 1.jpeg
│ │ │ ├── 10.jpeg
│ │ │ ├── 11.jpeg
│ │ │ ├── 12.jpeg
│ │ │ ├── 2.jpeg
│ │ │ ├── 3.jpeg
│ │ │ ├── 4.jpeg
│ │ │ ├── 5.jpeg
│ │ │ ├── 6.jpeg
│ │ │ ├── 7.jpeg
│ │ │ ├── 8.jpeg
│ │ │ └── 9.jpeg
│ │ ├── systemtray.ico
│ │ └── systemtray.png
│ ├── macOS_service
│ │ ├── LetMeDoIt_Download_workflow
│ │ │ └── Contents
│ │ │ │ ├── Info.plist
│ │ │ │ ├── QuickLook
│ │ │ │ └── Thumbnail.png
│ │ │ │ └── document.wflow
│ │ ├── LetMeDoIt_Explanation_workflow
│ │ │ └── Contents
│ │ │ │ ├── Info.plist
│ │ │ │ ├── QuickLook
│ │ │ │ └── Thumbnail.png
│ │ │ │ └── document.wflow
│ │ ├── LetMeDoIt_Files_workflow
│ │ │ └── Contents
│ │ │ │ ├── Info.plist
│ │ │ │ ├── QuickLook
│ │ │ │ └── Thumbnail.png
│ │ │ │ └── document.wflow
│ │ ├── LetMeDoIt_Pronounce_workflow
│ │ │ └── Contents
│ │ │ │ ├── Info.plist
│ │ │ │ ├── QuickLook
│ │ │ │ └── Thumbnail.png
│ │ │ │ └── document.wflow
│ │ ├── LetMeDoIt_Summary_workflow
│ │ │ └── Contents
│ │ │ │ ├── Info.plist
│ │ │ │ ├── QuickLook
│ │ │ │ └── Thumbnail.png
│ │ │ │ └── document.wflow
│ │ ├── LetMeDoIt_Text_workflow
│ │ │ └── Contents
│ │ │ │ ├── Info.plist
│ │ │ │ ├── QuickLook
│ │ │ │ └── Thumbnail.png
│ │ │ │ └── document.wflow
│ │ ├── LetMeDoIt_Translation_workflow
│ │ │ └── Contents
│ │ │ │ ├── Info.plist
│ │ │ │ ├── QuickLook
│ │ │ │ └── Thumbnail.png
│ │ │ │ └── document.wflow
│ │ └── LetMeDoIt_YoutubeMP3_workflow
│ │ │ └── Contents
│ │ │ ├── Info.plist
│ │ │ ├── QuickLook
│ │ │ └── Thumbnail.png
│ │ │ └── document.wflow
│ ├── main.py
│ ├── ollamachat.py
│ ├── package_name.txt
│ ├── palm2.py
│ ├── plugins
│ │ ├── 000_check_ffmpeg.py
│ │ ├── 000_check_pyaudio.py
│ │ ├── 000_check_vlc.py
│ │ ├── Readme.md
│ │ ├── add calendar event.py
│ │ ├── aliases.py
│ │ ├── analyze audio.py
│ │ ├── analyze files.py
│ │ ├── analyze images with gemini.py
│ │ ├── analyze images.py
│ │ ├── analyze web content.py
│ │ ├── ask chatgpt.py
│ │ ├── ask codey.py
│ │ ├── ask gemini pro.py
│ │ ├── ask gemma.py
│ │ ├── ask llama2.py
│ │ ├── ask llava.py
│ │ ├── ask mistral.py
│ │ ├── ask ollama.py
│ │ ├── ask palm2.py
│ │ ├── auto heal python code.py
│ │ ├── awesome prompts.py
│ │ ├── character analysis.py
│ │ ├── contexts.py
│ │ ├── counselling.py
│ │ ├── create ai assistants.py
│ │ ├── create images.py
│ │ ├── create maps.py
│ │ ├── create qrcode.py
│ │ ├── create statistical graphics.py
│ │ ├── dates and times.py
│ │ ├── download youtube or web content.py
│ │ ├── edit text.py
│ │ ├── execute python code.py
│ │ ├── execute termux command.py
│ │ ├── global finance.py
│ │ ├── improve British English.py
│ │ ├── input suggestions.py
│ │ ├── install python package.py
│ │ ├── integrate google searches.py
│ │ ├── manipulate files.py
│ │ ├── memory.py
│ │ ├── modify images.py
│ │ ├── open web browser.py
│ │ ├── pronounce words.py
│ │ ├── remove image background.py
│ │ ├── search chat records.py
│ │ ├── search financial data.py
│ │ ├── search latest news.py
│ │ ├── search sqlite.py
│ │ ├── search weather info.py
│ │ ├── send email.py
│ │ ├── send tweet.py
│ │ ├── send whatsapp messages.py
│ │ ├── simplified Chinese to traditional Chinese.py
│ │ └── solve math problems.py
│ ├── qt.py
│ ├── requirements.txt
│ ├── systemtray.py
│ ├── temp
│ │ └── Readme.md
│ └── utils
│ │ ├── assistant.py
│ │ ├── config_essential.py
│ │ ├── config_tools.py
│ │ ├── file_utils.py
│ │ ├── get_path_prompt.py
│ │ ├── install.py
│ │ ├── ollama_models.py
│ │ ├── promptValidator.py
│ │ ├── prompt_multiline_shared_key_bindings.py
│ │ ├── prompt_shared_key_bindings.py
│ │ ├── prompts.py
│ │ ├── shared_utils.py
│ │ ├── shortcuts.py
│ │ ├── streaming_word_wrapper.py
│ │ ├── sttLanguages.py
│ │ ├── terminal_mode_dialogs.py
│ │ ├── terminal_system_command_prompt.py
│ │ ├── text_utils.py
│ │ ├── ttsLanguages.py
│ │ ├── tts_utils.py
│ │ └── vlc_utils.py
├── package_name.txt
├── setup.py
├── setup_android.py
├── upload_to_pypi.sh
└── upload_to_pypi_android.sh
├── tests
├── agent_builder.py
├── geminipro_vision.py
├── my_package
│ ├── README.md
│ ├── my_package
│ │ ├── __init__.py
│ │ ├── cli.py
│ │ └── main.py
│ └── setup.py
├── old
│ └── bible_old.zip
├── pip_instructions_keep.md
├── prompt_toolkit_dynamic_layout.py
├── stream_function_response.py
├── terminal.py
└── tray_mini.py
└── www
├── index.html
└── myhand_bot
└── index.html
/.github/FUNDING.yml:
--------------------------------------------------------------------------------
1 | custom: [https://www.paypal.me/letmedoitai]
2 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *venv*
2 | /files*
3 | /temp*
4 | /Documents*
5 | /history*
6 | /plugins*
7 | *__pycache__*
8 | *.DS_Store*
9 | .python-version
10 | .cache/*
11 | coding/*
12 |
--------------------------------------------------------------------------------
/package/letmedoit/config.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/eliranwong/letmedoit/46416a1d2c363fd0f99697edcfa7d56b5a4ba88a/package/letmedoit/config.py
--------------------------------------------------------------------------------
/package/letmedoit/main.py:
--------------------------------------------------------------------------------
1 | from agentmake import agentmake, DEFAULT_AI_BACKEND
2 | import argparse, os
3 |
4 | def lite():
5 | main(agent="letmedoit_lite")
6 |
7 | def main(agent="letmedoit"):
8 | # Create the parser
9 | parser = argparse.ArgumentParser(description = """LetMeDoIt AI cli options""")
10 | # Add arguments for running `agentmake` function
11 | parser.add_argument("default", nargs="+", default=None, help="user prompt")
12 | parser.add_argument("-b", "--backend", action="store", dest="backend", help="AI backend")
13 | parser.add_argument("-m", "--model", action="store", dest="model", help="AI model")
14 | parser.add_argument("-mka", "--model_keep_alive", action="store", dest="model_keep_alive", help="time to keep the model loaded in memory; applicable to ollama only")
15 | parser.add_argument("-tem", "--temperature", action='store', dest="temperature", type=float, help="temperature for sampling")
16 | parser.add_argument("-mt", "--max_tokens", action='store', dest="max_tokens", type=int, help="maximum number of tokens to generate")
17 | parser.add_argument("-cw", "--context_window", action='store', dest="context_window", type=int, help="context window size; applicable to ollama only")
18 | parser.add_argument("-bs", "--batch_size", action='store', dest="batch_size", type=int, help="batch size; applicable to ollama only")
19 | parser.add_argument("-pre", "--prefill", action='append', dest="prefill", help="prefill of assistant message; applicable to deepseek, mistral, ollama and groq only")
20 | parser.add_argument("-sto", "--stop", action='append', dest="stop", help="stop sequences")
21 | parser.add_argument("-key", "--api_key", action="store", dest="api_key", help="API key")
22 | parser.add_argument("-end", "--api_endpoint", action="store", dest="api_endpoint", help="API endpoint")
23 | parser.add_argument("-pi", "--api_project_id", action="store", dest="api_project_id", help="project id; applicable to Vertex AI only")
24 | parser.add_argument("-sl", "--api_service_location", action="store", dest="api_service_location", help="cloud service location; applicable to Vertex AI only")
25 | parser.add_argument("-tim", "--api_timeout", action="store", dest="api_timeout", type=float, help="timeout for API request")
26 | parser.add_argument("-ww", "--word_wrap", action="store_true", dest="word_wrap", help="wrap output text according to current terminal width")
27 | #parser.add_argument("-p", "--prompts", action="store_true", dest="prompts", help="enable mult-turn prompts for the user interface")
28 | #parser.add_argument("-u", "--upgrade", action="store_true", dest="upgrade", help="upgrade `agentmake` pip package")
29 | parser.add_argument("-dtc", "--default_tool_choices", action="store", dest="default_tool_choices", help="override the default tool choices for agents to select, e.g. '@chat @magic'")
30 | # Parse arguments
31 | args = parser.parse_args()
32 |
33 | if args.default_tool_choices:
34 | os.environ["DEFAULT_TOOL_CHOICES"] = args.default_tool_choices
35 |
36 | user_prompt = " ".join(args.default) if args.default is not None else ""
37 | if user_prompt:
38 | agentmake(
39 | messages=user_prompt,
40 | backend=args.backend if args.backend else DEFAULT_AI_BACKEND,
41 | model=args.model,
42 | model_keep_alive=args.model_keep_alive,
43 | agent=agent,
44 | temperature=args.temperature,
45 | max_tokens=args.max_tokens,
46 | context_window=args.context_window,
47 | batch_size=args.batch_size,
48 | prefill=args.prefill,
49 | stop=args.stop,
50 | api_key=args.api_key,
51 | api_endpoint=args.api_endpoint,
52 | api_project_id=args.api_project_id,
53 | api_service_location=args.api_service_location,
54 | api_timeout=int(args.api_timeout) if args.api_timeout and args.backend and args.backend in ("cohere", "mistral", "genai", "vertexai") else args.api_timeout,
55 | word_wrap=args.word_wrap,
56 | stream=True,
57 | print_on_terminal=True,
58 | )
59 |
60 | if __name__ == "__main__":
61 | test = main()
--------------------------------------------------------------------------------
/package/letmedoit/package_name.txt:
--------------------------------------------------------------------------------
1 | letmedoit
--------------------------------------------------------------------------------
/package/letmedoit/requirements.txt:
--------------------------------------------------------------------------------
1 | agentmake>=1.0.14
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/eliranwong/letmedoit/46416a1d2c363fd0f99697edcfa7d56b5a4ba88a/package/letmedoit_b4_v3/__init__.py
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/audio/notification1.mp3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/eliranwong/letmedoit/46416a1d2c363fd0f99697edcfa7d56b5a4ba88a/package/letmedoit_b4_v3/audio/notification1.mp3
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/audio/notification1_mild.mp3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/eliranwong/letmedoit/46416a1d2c363fd0f99697edcfa7d56b5a4ba88a/package/letmedoit_b4_v3/audio/notification1_mild.mp3
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/audio/notification2.mp3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/eliranwong/letmedoit/46416a1d2c363fd0f99697edcfa7d56b5a4ba88a/package/letmedoit_b4_v3/audio/notification2.mp3
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/audio/notification2_mild.mp3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/eliranwong/letmedoit/46416a1d2c363fd0f99697edcfa7d56b5a4ba88a/package/letmedoit_b4_v3/audio/notification2_mild.mp3
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/autoassist.py:
--------------------------------------------------------------------------------
1 | import os
2 | thisFile = os.path.realpath(__file__)
3 | packageFolder = os.path.dirname(thisFile)
4 | package = os.path.basename(packageFolder)
5 | if os.getcwd() != packageFolder:
6 | os.chdir(packageFolder)
7 | configFile = os.path.join(packageFolder, "config.py")
8 | if not os.path.isfile(configFile):
9 | open(configFile, "a", encoding="utf-8").close()
10 | from letmedoit import config
11 | if not hasattr(config, "max_consecutive_auto_reply"):
12 | config.max_consecutive_auto_reply = 10
13 |
14 | from letmedoit.health_check import HealthCheck
15 | if not hasattr(config, "currentMessages"):
16 | HealthCheck.setBasicConfig()
17 | if not hasattr(config, "openaiApiKey") or not config.openaiApiKey:
18 | HealthCheck.changeAPIkey()
19 | config.saveConfig()
20 | #print("Configurations updated!")
21 | HealthCheck.checkCompletion()
22 |
23 | import autogen, os, json, traceback
24 | from letmedoit.utils.prompts import Prompts
25 | from letmedoit.utils.shared_utils import SharedUtil
26 | from prompt_toolkit import print_formatted_text, HTML
27 | from prompt_toolkit.styles import Style
28 | #from prompt_toolkit import PromptSession
29 | #from prompt_toolkit.history import FileHistory
30 |
31 |
32 | class AutoGenAssistant:
33 |
34 | def __init__(self):
35 | #config_list = autogen.get_config_list(
36 | # [config.openaiApiKey], # assume openaiApiKey is in place in config.py
37 | # api_type="openai",
38 | # api_version=None,
39 | #)
40 | oai_config_list = []
41 | for model in HealthCheck.tokenLimits.keys():
42 | oai_config_list.append({"model": model, "api_key": config.openaiApiKey})
43 | if not config.chatGPTApiModel in HealthCheck.tokenLimits:
44 | oai_config_list.append({"model": config.chatGPTApiModel, "api_key": config.openaiApiKey})
45 | os.environ["OAI_CONFIG_LIST"] = json.dumps(oai_config_list)
46 | """
47 | Code execution is set to be run in docker (default behaviour) but docker is not running.
48 | The options available are:
49 | - Make sure docker is running (advised approach for code execution)
50 | - Set "use_docker": False in code_execution_config
51 | - Set AUTOGEN_USE_DOCKER to "0/False/no" in your environment variables
52 | """
53 | os.environ["AUTOGEN_USE_DOCKER"] = "False"
54 |
55 | def getResponse(self, message, auto=False):
56 |
57 | message = f"""Current device information is given below:
58 | {SharedUtil.getDeviceInfo()}
59 |
60 | Below is my message:
61 | {message}"""
62 |
63 | config_list = autogen.config_list_from_json(
64 | env_or_file="OAI_CONFIG_LIST", # or OAI_CONFIG_LIST.json if file extension is added
65 | filter_dict={
66 | "model": {
67 | config.chatGPTApiModel,
68 | }
69 | }
70 | )
71 |
72 | assistant = autogen.AssistantAgent(
73 | name="assistant",
74 | llm_config={
75 | #"cache_seed": 42, # seed for caching and reproducibility
76 | "config_list": config_list, # a list of OpenAI API configurations
77 | "temperature": config.llmTemperature, # temperature for sampling
78 | "timeout": 300,
79 | }, # configuration for autogen's enhanced inference API which is compatible with OpenAI API
80 | )
81 | # create a UserProxyAgent instance named "user_proxy"
82 | user_proxy = autogen.UserProxyAgent(
83 | name="user_proxy",
84 | human_input_mode="NEVER" if auto else "ALWAYS",
85 | max_consecutive_auto_reply=config.max_consecutive_auto_reply,
86 | is_termination_msg=lambda x: x.get("content", "").rstrip().endswith("TERMINATE"),
87 | code_execution_config={
88 | "work_dir": os.path.join(config.letMeDoItAIFolder, "coding") if hasattr(config, "letMeDoItAIFolder") else "coding",
89 | "use_docker": False, # set to True or image name like "python:3" to use docker
90 | },
91 | )
92 | # the assistant receives a message from the user_proxy, which contains the task description
93 | user_proxy.initiate_chat(
94 | assistant,
95 | message=message,
96 | )
97 |
98 | def print(self, message):
99 | #print(message)
100 | print_formatted_text(HTML(message))
101 |
102 | def run(self):
103 | promptStyle = Style.from_dict({
104 | # User input (default text).
105 | "": config.terminalCommandEntryColor2,
106 | # Prompt.
107 | "indicator": config.terminalPromptIndicatorColor2,
108 | })
109 | prompts = Prompts()
110 |
111 | auto = False
112 | self.print("Do you want auto-reply (y/yes/N/NO)?")
113 | userInput = prompts.simplePrompt(style=promptStyle, default="NO")
114 | if userInput.strip().lower() in ("y", "yes"):
115 | auto = True
116 | self.print("Enter maximum consecutive auto-reply below:")
117 | max_consecutive_auto_reply = prompts.simplePrompt(numberOnly=True, style=promptStyle, default=str(config.max_consecutive_auto_reply),)
118 | if max_consecutive_auto_reply and int(max_consecutive_auto_reply) > 1:
119 | config.max_consecutive_auto_reply = int(max_consecutive_auto_reply)
120 |
121 | self.print(f"<{config.terminalCommandEntryColor1}>AutoGen Assistant launched!{config.terminalCommandEntryColor1}>")
122 | self.print(f"""[press '{str(config.hotkey_exit).replace("'", "")[1:-1]}' to exit]""")
123 | while True:
124 | self.print(f"<{config.terminalCommandEntryColor1}>New chat started!{config.terminalCommandEntryColor1}>")
125 | self.print(f"<{config.terminalCommandEntryColor1}>Enter your message below:{config.terminalCommandEntryColor1}>")
126 | message = prompts.simplePrompt(style=promptStyle)
127 | if message == config.exit_entry:
128 | break
129 | try:
130 | self.getResponse(message, auto)
131 | except:
132 | self.print(traceback.format_exc())
133 | break
134 | self.print(f"<{config.terminalCommandEntryColor1}>\n\nAutoGen Assistant closed!{config.terminalCommandEntryColor1}>")
135 |
136 | def main():
137 | config.includeIpInSystemMessageTemp = config.includeIpInSystemMessage
138 | AutoGenAssistant().run()
139 |
140 | if __name__ == '__main__':
141 | main()
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/automath.py:
--------------------------------------------------------------------------------
1 | import os
2 | thisFile = os.path.realpath(__file__)
3 | packageFolder = os.path.dirname(thisFile)
4 | package = os.path.basename(packageFolder)
5 | if os.getcwd() != packageFolder:
6 | os.chdir(packageFolder)
7 | configFile = os.path.join(packageFolder, "config.py")
8 | if not os.path.isfile(configFile):
9 | open(configFile, "a", encoding="utf-8").close()
10 | from letmedoit import config
11 |
12 | from letmedoit.health_check import HealthCheck
13 | if not hasattr(config, "currentMessages"):
14 | HealthCheck.setBasicConfig()
15 | if not hasattr(config, "openaiApiKey") or not config.openaiApiKey:
16 | HealthCheck.changeAPIkey()
17 | config.saveConfig()
18 | #print("Configurations updated!")
19 | HealthCheck.checkCompletion()
20 |
21 | import autogen, os, json, traceback
22 | from pathlib import Path
23 | from letmedoit.utils.prompts import Prompts
24 | from prompt_toolkit import print_formatted_text, HTML
25 | from prompt_toolkit.styles import Style
26 | from autogen import config_list_from_json
27 | from autogen.agentchat.contrib.math_user_proxy_agent import MathUserProxyAgent
28 |
29 |
30 | class AutoGenMath:
31 |
32 | def __init__(self):
33 | #config_list = autogen.get_config_list(
34 | # [config.openaiApiKey], # assume openaiApiKey is in place in config.py
35 | # api_type="openai",
36 | # api_version=None,
37 | #)
38 | oai_config_list = []
39 | for model in HealthCheck.tokenLimits.keys():
40 | oai_config_list.append({"model": model, "api_key": config.openaiApiKey})
41 | if not config.chatGPTApiModel in HealthCheck.tokenLimits:
42 | oai_config_list.append({"model": config.chatGPTApiModel, "api_key": config.openaiApiKey})
43 | os.environ["OAI_CONFIG_LIST"] = json.dumps(oai_config_list)
44 | """
45 | Code execution is set to be run in docker (default behaviour) but docker is not running.
46 | The options available are:
47 | - Make sure docker is running (advised approach for code execution)
48 | - Set "use_docker": False in code_execution_config
49 | - Set AUTOGEN_USE_DOCKER to "0/False/no" in your environment variables
50 | """
51 | os.environ["AUTOGEN_USE_DOCKER"] = "False"
52 |
53 | def getResponse(self, math_problem):
54 | config_list = autogen.config_list_from_json(
55 | env_or_file="OAI_CONFIG_LIST", # or OAI_CONFIG_LIST.json if file extension is added
56 | filter_dict={
57 | "model": {
58 | config.chatGPTApiModel,
59 | }
60 | }
61 | )
62 |
63 | # reference https://microsoft.github.io/autogen/docs/reference/agentchat/contrib/math_user_proxy_agent
64 | # 1. create an AssistantAgent instance named "assistant"
65 | assistant = autogen.AssistantAgent(
66 | name="assistant",
67 | system_message="You are a helpful assistant.",
68 | llm_config={
69 | #"cache_seed": 42, # seed for caching and reproducibility
70 | "config_list": config_list, # a list of OpenAI API configurations
71 | "temperature": config.llmTemperature, # temperature for sampling
72 | "timeout": 600,
73 | }, # configuration for autogen's enhanced inference API which is compatible with OpenAI API
74 | )
75 |
76 | # 2. create the MathUserProxyAgent instance named "mathproxyagent"
77 | # By default, the human_input_mode is "NEVER", which means the agent will not ask for human input.
78 | mathproxyagent = MathUserProxyAgent(
79 | name="mathproxyagent",
80 | human_input_mode="NEVER",
81 | code_execution_config={"use_docker": False},
82 | )
83 |
84 | mathproxyagent.initiate_chat(assistant, problem=math_problem)
85 |
86 | try:
87 | last_message = assistant.last_message()
88 | if type(last_message) == list:
89 | last_message = last_message[:1]
90 | elif type(last_message) == dict:
91 | last_message = [last_message]
92 | else:
93 | last_message = []
94 | except:
95 | last_message = []
96 | return last_message
97 |
98 | def print(self, message):
99 | #print(message)
100 | print_formatted_text(HTML(message))
101 |
102 | def run(self):
103 | promptStyle = Style.from_dict({
104 | # User input (default text).
105 | "": config.terminalCommandEntryColor2,
106 | # Prompt.
107 | "indicator": config.terminalPromptIndicatorColor2,
108 | })
109 | prompts = Prompts()
110 | self.print(f"<{config.terminalCommandEntryColor1}>AutoGen Math Solver launched!{config.terminalCommandEntryColor1}>")
111 | self.print(f"""[press '{str(config.hotkey_exit).replace("'", "")[1:-1]}' to exit]""")
112 | while True:
113 | self.print(f"<{config.terminalCommandEntryColor1}>New session started!{config.terminalCommandEntryColor1}>")
114 | self.print(f"<{config.terminalCommandEntryColor1}>Enter a math problem below:{config.terminalCommandEntryColor1}>")
115 | self.print(f"""[press '{str(config.hotkey_exit).replace("'", "")[1:-1]}' to exit]""")
116 | math_problem = prompts.simplePrompt(style=promptStyle)
117 | if math_problem == config.exit_entry:
118 | break
119 | try:
120 | self.getResponse(math_problem)
121 | except:
122 | self.print(traceback.format_exc())
123 | break
124 | self.print(f"<{config.terminalCommandEntryColor1}>\n\nAutoGen Math Solver closed!{config.terminalCommandEntryColor1}>")
125 |
126 | def main():
127 | AutoGenMath().run()
128 |
129 | if __name__ == '__main__':
130 | main()
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/commandprompt.py:
--------------------------------------------------------------------------------
1 | from letmedoit import config
2 | from letmedoit.health_check import HealthCheck
3 | if not hasattr(config, "currentMessages"):
4 | HealthCheck.setBasicConfig()
5 | config.saveConfig()
6 | #print("Configurations updated!")
7 | from letmedoit.utils.prompts import Prompts
8 | from letmedoit.utils.terminal_system_command_prompt import SystemCommandPrompt
9 | import platform
10 |
11 | def setOsOpenCmd():
12 | thisPlatform = platform.system()
13 | config.thisPlatform = thisPlatform
14 | if config.terminalEnableTermuxAPI:
15 | config.open = "termux-share"
16 | elif thisPlatform == "Linux":
17 | config.open = "xdg-open"
18 | elif thisPlatform == "Darwin":
19 | config.open = "open"
20 | elif thisPlatform == "Windows":
21 | config.open = "start"
22 | # name macOS
23 | if config.thisPlatform == "Darwin":
24 | config.thisPlatform = "macOS"
25 |
26 | def main():
27 | config.systemCommandPromptEntry = ""
28 | config.print = print
29 | setOsOpenCmd()
30 | Prompts()
31 | SystemCommandPrompt().run(allowPathChanges=True)
32 |
33 | if __name__ == '__main__':
34 | main()
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/config.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/eliranwong/letmedoit/46416a1d2c363fd0f99697edcfa7d56b5a4ba88a/package/letmedoit_b4_v3/config.py
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/files/Readme.md:
--------------------------------------------------------------------------------
1 | Created files are located here.
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/files/audio/Readme.md:
--------------------------------------------------------------------------------
1 | audio files are placed in this directory.
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/files/chats/Readme.md:
--------------------------------------------------------------------------------
1 | Chat records are saved here.
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/files/video/Readme.md:
--------------------------------------------------------------------------------
1 | video files are placed in this directory.
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/gui/quicktask.py:
--------------------------------------------------------------------------------
1 | from letmedoit import config
2 | from letmedoit.utils.shared_utils import SharedUtil
3 | from PySide6.QtWidgets import QApplication, QInputDialog
4 | import json, sys
5 |
6 |
7 | class QuickTask:
8 |
9 | def __init__(self) -> None:
10 | SharedUtil.setAPIkey() # create a gui for it later
11 | SharedUtil.runPlugins()
12 | def stopSpinning():
13 | ...
14 | config.stopSpinning = stopSpinning
15 |
16 | def run(self, default="", standalone=True) -> None:
17 | if standalone:
18 | QApplication(sys.argv)
19 | gui_title = "LetMeDoIt AI"
20 | about = "Enter your request:"
21 | def runGui(thisDefault) -> bool:
22 | dialog = QInputDialog()
23 |
24 | dialog.setInputMode(QInputDialog.TextInput)
25 | dialog.setWindowTitle(gui_title)
26 | dialog.setLabelText(about)
27 | dialog.setTextValue(thisDefault)
28 |
29 | # Set size of dialog here
30 | dialog.resize(500, 100) # You can adjust the values according to your requirement
31 |
32 | if dialog.exec():
33 | request = dialog.textValue()
34 | self.resolve(request)
35 | return True
36 | else:
37 | return False
38 | if config.isTermux:
39 | if not default:
40 | print(gui_title)
41 | print(about)
42 | default = input(">>> ")
43 | self.resolve(default)
44 | else:
45 | while runGui(default):
46 | default = ""
47 |
48 | def resolve(self, request) -> str:
49 | messages=SharedUtil.resetMessages(prompt=request)
50 | completion = config.oai_client.chat.completions.create(
51 | model=config.chatGPTApiModel,
52 | messages=messages,
53 | n=1,
54 | temperature=config.llmTemperature,
55 | max_tokens=SharedUtil.getDynamicTokens(messages, config.chatGPTApiFunctionSignatures.values()),
56 | tools=SharedUtil.convertFunctionSignaturesIntoTools(config.chatGPTApiFunctionSignatures.values()),
57 | tool_choice="auto",
58 | #stream=True,
59 | )
60 | output = completion.choices[0].message
61 | if output.content:
62 | response = output.content
63 | elif output.tool_calls:
64 | for tool_call in output.tool_calls:
65 | tool = tool_call.function
66 | name, arguments = tool.name, tool.arguments
67 | response = config.chatGPTApiAvailableFunctions[name](json.loads(arguments))
68 | else:
69 | response = ""
70 | # ignore response for now
71 | return response
72 |
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/gui/worker.py:
--------------------------------------------------------------------------------
1 | import sys, traceback, openai, os, json, traceback, re, textwrap
2 | from PySide6.QtCore import QRunnable, Slot, Signal, QObject, QThreadPool
3 | from letmedoit import config
4 |
5 | class WorkerSignals(QObject):
6 | '''
7 | Defines the signals available from a running worker thread.
8 |
9 | Supported signals are:
10 |
11 | finished
12 | No data
13 |
14 | error
15 | tuple (exctype, value, traceback.format_exc() )
16 |
17 | result
18 | object data returned from processing, anything
19 |
20 | progress
21 | int indicating % progress
22 |
23 | '''
24 | finished = Signal()
25 | error = Signal(tuple)
26 | result = Signal(object)
27 | progress = Signal(str)
28 |
29 |
30 | class Worker(QRunnable):
31 | '''
32 | Worker thread
33 |
34 | Inherits from QRunnable to handler worker thread setup, signals and wrap-up.
35 |
36 | :param callback: The function callback to run on this worker thread. Supplied args and
37 | kwargs will be passed through to the runner.
38 | :type callback: function
39 | :param args: Arguments to pass to the callback function
40 | :param kwargs: Keywords to pass to the callback function
41 |
42 | '''
43 |
44 | def __init__(self, fn, *args, **kwargs):
45 | super(Worker, self).__init__()
46 |
47 | # Store constructor arguments (re-used for processing)
48 | self.fn = fn
49 | self.args = args
50 | self.kwargs = kwargs
51 | self.signals = WorkerSignals()
52 |
53 | # Add the callback to our kwargs
54 | self.kwargs["progress_callback"] = self.signals.progress
55 |
56 | @Slot()
57 | def run(self):
58 | '''
59 | Initialise the runner function with passed args, kwargs.
60 | '''
61 |
62 | # assign a reference to this current thread
63 | #config.workerThread = QThread.currentThread()
64 |
65 | # Retrieve args/kwargs here; and fire processing using them
66 | try:
67 | result = self.fn(*self.args, **self.kwargs)
68 | except:
69 | traceback.print_exc()
70 | exctype, value = sys.exc_info()[:2]
71 | self.signals.error.emit((exctype, value, traceback.format_exc()))
72 | else:
73 | self.signals.result.emit(result) # Return the result of the processing
74 | finally:
75 | self.signals.finished.emit() # Done
76 |
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/history/Readme.md:
--------------------------------------------------------------------------------
1 | Input records are saved here.
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/icons/CyberTask.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/eliranwong/letmedoit/46416a1d2c363fd0f99697edcfa7d56b5a4ba88a/package/letmedoit_b4_v3/icons/CyberTask.ico
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/icons/CyberTask.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/eliranwong/letmedoit/46416a1d2c363fd0f99697edcfa7d56b5a4ba88a/package/letmedoit_b4_v3/icons/CyberTask.png
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/icons/GeminiPro.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/eliranwong/letmedoit/46416a1d2c363fd0f99697edcfa7d56b5a4ba88a/package/letmedoit_b4_v3/icons/GeminiPro.ico
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/icons/GeminiPro.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/eliranwong/letmedoit/46416a1d2c363fd0f99697edcfa7d56b5a4ba88a/package/letmedoit_b4_v3/icons/GeminiPro.png
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/icons/LetMeDoIt.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/eliranwong/letmedoit/46416a1d2c363fd0f99697edcfa7d56b5a4ba88a/package/letmedoit_b4_v3/icons/LetMeDoIt.ico
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/icons/LetMeDoIt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/eliranwong/letmedoit/46416a1d2c363fd0f99697edcfa7d56b5a4ba88a/package/letmedoit_b4_v3/icons/LetMeDoIt.png
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/icons/MyHand.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/eliranwong/letmedoit/46416a1d2c363fd0f99697edcfa7d56b5a4ba88a/package/letmedoit_b4_v3/icons/MyHand.ico
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/icons/MyHand.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/eliranwong/letmedoit/46416a1d2c363fd0f99697edcfa7d56b5a4ba88a/package/letmedoit_b4_v3/icons/MyHand.png
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/icons/TaskWiz.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/eliranwong/letmedoit/46416a1d2c363fd0f99697edcfa7d56b5a4ba88a/package/letmedoit_b4_v3/icons/TaskWiz.ico
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/icons/TaskWiz.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/eliranwong/letmedoit/46416a1d2c363fd0f99697edcfa7d56b5a4ba88a/package/letmedoit_b4_v3/icons/TaskWiz.png
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/icons/collections/1.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/eliranwong/letmedoit/46416a1d2c363fd0f99697edcfa7d56b5a4ba88a/package/letmedoit_b4_v3/icons/collections/1.jpeg
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/icons/collections/10.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/eliranwong/letmedoit/46416a1d2c363fd0f99697edcfa7d56b5a4ba88a/package/letmedoit_b4_v3/icons/collections/10.jpeg
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/icons/collections/11.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/eliranwong/letmedoit/46416a1d2c363fd0f99697edcfa7d56b5a4ba88a/package/letmedoit_b4_v3/icons/collections/11.jpeg
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/icons/collections/12.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/eliranwong/letmedoit/46416a1d2c363fd0f99697edcfa7d56b5a4ba88a/package/letmedoit_b4_v3/icons/collections/12.jpeg
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/icons/collections/2.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/eliranwong/letmedoit/46416a1d2c363fd0f99697edcfa7d56b5a4ba88a/package/letmedoit_b4_v3/icons/collections/2.jpeg
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/icons/collections/3.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/eliranwong/letmedoit/46416a1d2c363fd0f99697edcfa7d56b5a4ba88a/package/letmedoit_b4_v3/icons/collections/3.jpeg
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/icons/collections/4.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/eliranwong/letmedoit/46416a1d2c363fd0f99697edcfa7d56b5a4ba88a/package/letmedoit_b4_v3/icons/collections/4.jpeg
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/icons/collections/5.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/eliranwong/letmedoit/46416a1d2c363fd0f99697edcfa7d56b5a4ba88a/package/letmedoit_b4_v3/icons/collections/5.jpeg
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/icons/collections/6.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/eliranwong/letmedoit/46416a1d2c363fd0f99697edcfa7d56b5a4ba88a/package/letmedoit_b4_v3/icons/collections/6.jpeg
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/icons/collections/7.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/eliranwong/letmedoit/46416a1d2c363fd0f99697edcfa7d56b5a4ba88a/package/letmedoit_b4_v3/icons/collections/7.jpeg
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/icons/collections/8.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/eliranwong/letmedoit/46416a1d2c363fd0f99697edcfa7d56b5a4ba88a/package/letmedoit_b4_v3/icons/collections/8.jpeg
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/icons/collections/9.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/eliranwong/letmedoit/46416a1d2c363fd0f99697edcfa7d56b5a4ba88a/package/letmedoit_b4_v3/icons/collections/9.jpeg
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/icons/systemtray.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/eliranwong/letmedoit/46416a1d2c363fd0f99697edcfa7d56b5a4ba88a/package/letmedoit_b4_v3/icons/systemtray.ico
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/icons/systemtray.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/eliranwong/letmedoit/46416a1d2c363fd0f99697edcfa7d56b5a4ba88a/package/letmedoit_b4_v3/icons/systemtray.png
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/macOS_service/LetMeDoIt_Download_workflow/Contents/Info.plist:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | NSServices
6 |
7 |
8 | NSBackgroundColorName
9 | background
10 | NSIconName
11 | NSTouchBarSend
12 | NSMenuItem
13 |
14 | default
15 | LetMeDoIt Download
16 |
17 | NSMessage
18 | runWorkflowAsService
19 | NSSendTypes
20 |
21 | public.utf8-plain-text
22 |
23 |
24 |
25 |
26 |
27 |
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/macOS_service/LetMeDoIt_Download_workflow/Contents/QuickLook/Thumbnail.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/eliranwong/letmedoit/46416a1d2c363fd0f99697edcfa7d56b5a4ba88a/package/letmedoit_b4_v3/macOS_service/LetMeDoIt_Download_workflow/Contents/QuickLook/Thumbnail.png
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/macOS_service/LetMeDoIt_Explanation_workflow/Contents/Info.plist:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | NSServices
6 |
7 |
8 | NSBackgroundColorName
9 | background
10 | NSIconName
11 | NSTouchBarSend
12 | NSMenuItem
13 |
14 | default
15 | LetMeDoIt Explanation
16 |
17 | NSMessage
18 | runWorkflowAsService
19 | NSSendTypes
20 |
21 | public.utf8-plain-text
22 |
23 |
24 |
25 |
26 |
27 |
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/macOS_service/LetMeDoIt_Explanation_workflow/Contents/QuickLook/Thumbnail.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/eliranwong/letmedoit/46416a1d2c363fd0f99697edcfa7d56b5a4ba88a/package/letmedoit_b4_v3/macOS_service/LetMeDoIt_Explanation_workflow/Contents/QuickLook/Thumbnail.png
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/macOS_service/LetMeDoIt_Files_workflow/Contents/Info.plist:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | NSServices
6 |
7 |
8 | NSBackgroundColorName
9 | background
10 | NSIconName
11 | NSTouchBarShare
12 | NSMenuItem
13 |
14 | default
15 | LetMeDoIt Files
16 |
17 | NSMessage
18 | runWorkflowAsService
19 | NSSendFileTypes
20 |
21 | public.item
22 |
23 |
24 |
25 |
26 |
27 |
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/macOS_service/LetMeDoIt_Files_workflow/Contents/QuickLook/Thumbnail.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/eliranwong/letmedoit/46416a1d2c363fd0f99697edcfa7d56b5a4ba88a/package/letmedoit_b4_v3/macOS_service/LetMeDoIt_Files_workflow/Contents/QuickLook/Thumbnail.png
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/macOS_service/LetMeDoIt_Pronounce_workflow/Contents/Info.plist:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | NSServices
6 |
7 |
8 | NSBackgroundColorName
9 | background
10 | NSIconName
11 | NSTouchBarSend
12 | NSMenuItem
13 |
14 | default
15 | LetMeDoIt Pronunciation
16 |
17 | NSMessage
18 | runWorkflowAsService
19 | NSSendTypes
20 |
21 | public.utf8-plain-text
22 |
23 |
24 |
25 |
26 |
27 |
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/macOS_service/LetMeDoIt_Pronounce_workflow/Contents/QuickLook/Thumbnail.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/eliranwong/letmedoit/46416a1d2c363fd0f99697edcfa7d56b5a4ba88a/package/letmedoit_b4_v3/macOS_service/LetMeDoIt_Pronounce_workflow/Contents/QuickLook/Thumbnail.png
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/macOS_service/LetMeDoIt_Summary_workflow/Contents/Info.plist:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | NSServices
6 |
7 |
8 | NSBackgroundColorName
9 | background
10 | NSIconName
11 | NSTouchBarSend
12 | NSMenuItem
13 |
14 | default
15 | LetMeDoIt Summary
16 |
17 | NSMessage
18 | runWorkflowAsService
19 | NSSendTypes
20 |
21 | public.utf8-plain-text
22 |
23 |
24 |
25 |
26 |
27 |
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/macOS_service/LetMeDoIt_Summary_workflow/Contents/QuickLook/Thumbnail.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/eliranwong/letmedoit/46416a1d2c363fd0f99697edcfa7d56b5a4ba88a/package/letmedoit_b4_v3/macOS_service/LetMeDoIt_Summary_workflow/Contents/QuickLook/Thumbnail.png
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/macOS_service/LetMeDoIt_Text_workflow/Contents/Info.plist:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | NSServices
6 |
7 |
8 | NSBackgroundColorName
9 | background
10 | NSIconName
11 | NSTouchBarSend
12 | NSMenuItem
13 |
14 | default
15 | LetMeDoIt Text
16 |
17 | NSMessage
18 | runWorkflowAsService
19 | NSSendTypes
20 |
21 | public.utf8-plain-text
22 |
23 |
24 |
25 |
26 |
27 |
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/macOS_service/LetMeDoIt_Text_workflow/Contents/QuickLook/Thumbnail.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/eliranwong/letmedoit/46416a1d2c363fd0f99697edcfa7d56b5a4ba88a/package/letmedoit_b4_v3/macOS_service/LetMeDoIt_Text_workflow/Contents/QuickLook/Thumbnail.png
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/macOS_service/LetMeDoIt_Text_workflow/Contents/document.wflow:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | AMApplicationBuild
6 | 523
7 | AMApplicationVersion
8 | 2.10
9 | AMDocumentVersion
10 | 2
11 | actions
12 |
13 |
14 | action
15 |
16 | AMAccepts
17 |
18 | Container
19 | List
20 | Optional
21 |
22 | Types
23 |
24 | com.apple.cocoa.string
25 |
26 |
27 | AMActionVersion
28 | 2.0.3
29 | AMApplication
30 |
31 | Automator
32 |
33 | AMParameterProperties
34 |
35 | COMMAND_STRING
36 |
37 | CheckedForUserDefaultShell
38 |
39 | inputMethod
40 |
41 | shell
42 |
43 | source
44 |
45 |
46 | AMProvides
47 |
48 | Container
49 | List
50 | Types
51 |
52 | com.apple.cocoa.string
53 |
54 |
55 | ActionBundlePath
56 | /System/Library/Automator/Run Shell Script.action
57 | ActionName
58 | Run Shell Script
59 | ActionParameters
60 |
61 | COMMAND_STRING
62 | mkdir -p ~/letmedoit
63 | echo "$1" > ~/letmedoit/selected_text.txt
64 | osascript -e 'tell application "Terminal" to do script "[LETMEDOIT_PATH] -u false -n true -i false -f ~/letmedoit/selected_text.txt"'
65 | osascript -e 'tell application "Terminal" to activate'
66 | CheckedForUserDefaultShell
67 |
68 | inputMethod
69 | 1
70 | shell
71 | /bin/bash
72 | source
73 |
74 |
75 | BundleIdentifier
76 | com.apple.RunShellScript
77 | CFBundleVersion
78 | 2.0.3
79 | CanShowSelectedItemsWhenRun
80 |
81 | CanShowWhenRun
82 |
83 | Category
84 |
85 | AMCategoryUtilities
86 |
87 | Class Name
88 | RunShellScriptAction
89 | InputUUID
90 | 2A8C75F7-7C18-42C9-B2EE-16E74D936846
91 | Keywords
92 |
93 | Shell
94 | Script
95 | Command
96 | Run
97 | Unix
98 |
99 | OutputUUID
100 | E1C079B5-566B-4637-AA45-74041859F47A
101 | UUID
102 | 38EC4A4E-D842-4A8B-985D-13CE570B4D1E
103 | UnlocalizedApplications
104 |
105 | Automator
106 |
107 | arguments
108 |
109 | 0
110 |
111 | default value
112 | 0
113 | name
114 | inputMethod
115 | required
116 | 0
117 | type
118 | 0
119 | uuid
120 | 0
121 |
122 | 1
123 |
124 | default value
125 |
126 | name
127 | CheckedForUserDefaultShell
128 | required
129 | 0
130 | type
131 | 0
132 | uuid
133 | 1
134 |
135 | 2
136 |
137 | default value
138 |
139 | name
140 | source
141 | required
142 | 0
143 | type
144 | 0
145 | uuid
146 | 2
147 |
148 | 3
149 |
150 | default value
151 |
152 | name
153 | COMMAND_STRING
154 | required
155 | 0
156 | type
157 | 0
158 | uuid
159 | 3
160 |
161 | 4
162 |
163 | default value
164 | /bin/sh
165 | name
166 | shell
167 | required
168 | 0
169 | type
170 | 0
171 | uuid
172 | 4
173 |
174 |
175 | isViewVisible
176 | 1
177 | location
178 | 336.500000:305.000000
179 | nibPath
180 | /System/Library/Automator/Run Shell Script.action/Contents/Resources/Base.lproj/main.nib
181 |
182 | isViewVisible
183 | 1
184 |
185 |
186 | connectors
187 |
188 | workflowMetaData
189 |
190 | applicationBundleIDsByPath
191 |
192 | applicationPaths
193 |
194 | inputTypeIdentifier
195 | com.apple.Automator.text
196 | outputTypeIdentifier
197 | com.apple.Automator.nothing
198 | presentationMode
199 | 11
200 | processesInput
201 |
202 | serviceInputTypeIdentifier
203 | com.apple.Automator.text
204 | serviceOutputTypeIdentifier
205 | com.apple.Automator.nothing
206 | serviceProcessesInput
207 |
208 | systemImageName
209 | NSTouchBarSend
210 | useAutomaticInputType
211 |
212 | workflowTypeIdentifier
213 | com.apple.Automator.servicesMenu
214 |
215 |
216 |
217 |
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/macOS_service/LetMeDoIt_Translation_workflow/Contents/Info.plist:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | NSServices
6 |
7 |
8 | NSBackgroundColorName
9 | background
10 | NSIconName
11 | NSTouchBarSend
12 | NSMenuItem
13 |
14 | default
15 | LetMeDoIt Translation
16 |
17 | NSMessage
18 | runWorkflowAsService
19 | NSSendTypes
20 |
21 | public.utf8-plain-text
22 |
23 |
24 |
25 |
26 |
27 |
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/macOS_service/LetMeDoIt_Translation_workflow/Contents/QuickLook/Thumbnail.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/eliranwong/letmedoit/46416a1d2c363fd0f99697edcfa7d56b5a4ba88a/package/letmedoit_b4_v3/macOS_service/LetMeDoIt_Translation_workflow/Contents/QuickLook/Thumbnail.png
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/macOS_service/LetMeDoIt_YoutubeMP3_workflow/Contents/Info.plist:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | NSServices
6 |
7 |
8 | NSBackgroundColorName
9 | background
10 | NSIconName
11 | NSTouchBarSend
12 | NSMenuItem
13 |
14 | default
15 | LetMeDoIt Youtube MP3
16 |
17 | NSMessage
18 | runWorkflowAsService
19 | NSSendTypes
20 |
21 | public.utf8-plain-text
22 |
23 |
24 |
25 |
26 |
27 |
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/macOS_service/LetMeDoIt_YoutubeMP3_workflow/Contents/QuickLook/Thumbnail.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/eliranwong/letmedoit/46416a1d2c363fd0f99697edcfa7d56b5a4ba88a/package/letmedoit_b4_v3/macOS_service/LetMeDoIt_YoutubeMP3_workflow/Contents/QuickLook/Thumbnail.png
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/package_name.txt:
--------------------------------------------------------------------------------
1 | letmedoit
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/plugins/000_check_ffmpeg.py:
--------------------------------------------------------------------------------
1 | from letmedoit import config
2 | from letmedoit.utils.shared_utils import SharedUtil
3 | import os
4 |
5 | if not SharedUtil.isPackageInstalled("ffmpeg"):
6 |
7 | checks = {
8 | "pkg": "pkg install ffmpeg", # Android Termux
9 | "brew": "brew install ffmpeg", # on MacOS using Homebrew (https://brew.sh/)
10 | "choco": "choco install ffmpeg", # on Windows using Chocolatey (https://chocolatey.org/)
11 | "scoop": "scoop install ffmpeg", # on Windows using Scoop (https://scoop.sh/)
12 | "apt": "sudo apt update && sudo apt install ffmpeg", # Ubuntu or Debian-based distributions
13 | "dnf": "sudo dnf install ffmpeg", # Fedora or CentOS
14 | "pacman": "sudo pacman -Sy ffmpeg", # Arch Linux
15 | "zypper": "sudo zypper install ffmpeg", # openSUSE
16 | "yum": "sudo yum localinstall --nogpgcheck https://download1.rpmfusion.org/free/el/rpmfusion-free-release-7.noarch.rpm && sudo yum install ffmpeg", # RHEL, CentOS 7
17 | }
18 |
19 | for command, commandline in checks.items():
20 | if SharedUtil.isPackageInstalled(command):
21 | os.system(commandline)
22 | if SharedUtil.isPackageInstalled("ffmpeg"):
23 | break
24 |
25 | if not SharedUtil.isPackageInstalled("ffmpeg"):
26 | config.print3("Note: 'ffmpeg' is not installed.")
27 | config.print("It is essential for voice typing with openai whisper offline model, downloading YouTube media, video / audio conversion, etc.")
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/plugins/000_check_pyaudio.py:
--------------------------------------------------------------------------------
1 | from letmedoit import config
2 | from letmedoit.utils.install import installmodule
3 | from letmedoit.utils.shared_utils import SharedUtil
4 | import os
5 |
6 | # For more information, read https://github.com/Uberi/speech_recognition#pyaudio-for-microphone-users
7 |
8 | try:
9 | import sounddevice
10 | import speech_recognition as sr
11 | mic = sr.Microphone()
12 | del mic
13 | config.pyaudioInstalled = True
14 | except:
15 | if config.isTermux:
16 | config.pyaudioInstalled = False
17 | #config.print2("Installing 'portaudio' and 'Pyaudio' ...")
18 | #os.system("pkg install portaudio")
19 | #config.pyaudioInstalled = True if installmodule("--upgrade PyAudio") else False
20 | elif SharedUtil.isPackageInstalled("apt"):
21 | config.print2("Installing 'portaudio19-dev' and 'Pyaudio' ...")
22 | os.system("sudo apt update && sudo apt install portaudio19-dev")
23 | config.pyaudioInstalled = True if installmodule("--upgrade PyAudio") else False
24 | elif SharedUtil.isPackageInstalled("dnf"):
25 | config.print2("Installing 'portaudio-devel' and 'Pyaudio' ...")
26 | os.system("sudo dnf update && sudo dnf install portaudio-devel")
27 | config.pyaudioInstalled = True if installmodule("--upgrade PyAudio") else False
28 | elif SharedUtil.isPackageInstalled("brew"):
29 | config.print2("Installing 'portaudio' and 'Pyaudio' ...")
30 | os.system("brew install portaudio")
31 | config.pyaudioInstalled = True if installmodule("--upgrade PyAudio") else False
32 | else:
33 | config.pyaudioInstalled = False
34 |
35 | if not config.pyaudioInstalled:
36 | config.print3("Note: 'ffmpeg' is not installed.")
37 | config.print("It is essential for built-in voice typing feature.")
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/plugins/000_check_vlc.py:
--------------------------------------------------------------------------------
1 | from letmedoit import config
2 | from letmedoit.utils.shared_utils import SharedUtil
3 | import os
4 |
5 | macVlc = "/Applications/VLC.app/Contents/MacOS/VLC"
6 | windowsVlc = r'C:\Program Files\VideoLAN\VLC\vlc.exe'
7 |
8 | if not SharedUtil.isPackageInstalled("vlc") and not os.path.isfile(macVlc) and not os.path.isfile(windowsVlc):
9 |
10 | checks = {
11 | "pkg": "pkg install vlc", # Android Termux
12 | "brew": "brew install vlc", # on MacOS using Homebrew (https://brew.sh/)
13 | "choco": "choco install vlc", # on Windows using Chocolatey (https://chocolatey.org/)
14 | "scoop": "scoop install vlc", # on Windows using Scoop (https://scoop.sh/)
15 | "apt": "sudo apt update && sudo apt install vlc", # Ubuntu or Debian-based distributions
16 | "dnf": "sudo dnf install vlc", # Fedora or CentOS
17 | "pacman": "sudo pacman -Sy vlc", # Arch Linux
18 | "zypper": "sudo zypper install vlc", # openSUSE
19 | "yum": "sudo yum install epel-release && sudo yum install vlc", # RHEL, CentOS 7
20 | }
21 |
22 | for command, commandline in checks.items():
23 | if SharedUtil.isPackageInstalled(command):
24 | os.system(commandline)
25 | if SharedUtil.isPackageInstalled("vlc"):
26 | break
27 |
28 | if not SharedUtil.isPackageInstalled("vlc") and not os.path.isfile(macVlc) and not os.path.isfile(windowsVlc):
29 | config.print3("Note: 'vlc' is not installed.")
30 | config.print("It is nice to have VLC player installed for video / audio playback. It is required if you want to control the LetMeDoIt AI audio response speed.")
31 |
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/plugins/Readme.md:
--------------------------------------------------------------------------------
1 | This directory contains letMeDoIt plugins.
2 | letMeDoIt plugins are Python files to be executed when letMeDoIt starts.
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/plugins/aliases.py:
--------------------------------------------------------------------------------
1 | """
2 | LetMeDoIt AI Plugin - aliases
3 |
4 | add aliases
5 | """
6 |
7 | from letmedoit import config
8 | import sys, os
9 |
10 | # add python python to work with virtual environment
11 | if not config.isTermux:
12 | # integrated AutoGen agents
13 | config.aliases["!autoassist"] = f"!{sys.executable} {os.path.join(config.letMeDoItAIFolder, 'autoassist.py')}"
14 | config.aliases["!automath"] = f"!{sys.executable} {os.path.join(config.letMeDoItAIFolder, 'automath.py')}"
15 | config.aliases["!autoretriever"] = f"!{sys.executable} {os.path.join(config.letMeDoItAIFolder, 'autoretriever.py')}"
16 | config.aliases["!autobuilder"] = f"!{sys.executable} {os.path.join(config.letMeDoItAIFolder, 'autobuilder.py')}"
17 | # integrated Google AI tools
18 | config.aliases["!geminiprovision"] = f"!{sys.executable} {os.path.join(config.letMeDoItAIFolder, 'geminiprovision.py')}"
19 | config.aliases["!geminipro"] = f"!{sys.executable} {os.path.join(config.letMeDoItAIFolder, 'geminipro.py')}"
20 | config.aliases["!palm2"] = f"!{sys.executable} {os.path.join(config.letMeDoItAIFolder, 'palm2.py')}"
21 | config.aliases["!codey"] = f"!{sys.executable} {os.path.join(config.letMeDoItAIFolder, 'codey.py')}"
22 | # integrated Ollama chatbots
23 | config.aliases["!ollamachat"] = f"!{sys.executable} {os.path.join(config.letMeDoItAIFolder, 'ollamachat.py')}"
24 | config.aliases["!mistral"] = f"!{sys.executable} {os.path.join(config.letMeDoItAIFolder, 'ollamachat.py -m mistral')}"
25 | config.aliases["!llama2"] = f"!{sys.executable} {os.path.join(config.letMeDoItAIFolder, 'ollamachat.py -m llama2')}"
26 | config.aliases["!llama213b"] = f"!{sys.executable} {os.path.join(config.letMeDoItAIFolder, 'ollamachat.py -m llama213b')}"
27 | config.aliases["!llama270b"] = f"!{sys.executable} {os.path.join(config.letMeDoItAIFolder, 'ollamachat.py -m llama270b')}"
28 | config.aliases["!codellama"] = f"!{sys.executable} {os.path.join(config.letMeDoItAIFolder, 'ollamachat.py -m codellama')}"
29 | config.aliases["!gemma2b"] = f"!{sys.executable} {os.path.join(config.letMeDoItAIFolder, 'ollamachat.py -m gemma2b')}"
30 | config.aliases["!gemma7b"] = f"!{sys.executable} {os.path.join(config.letMeDoItAIFolder, 'ollamachat.py -m gemma7b')}"
31 | config.aliases["!llava"] = f"!{sys.executable} {os.path.join(config.letMeDoItAIFolder, 'ollamachat.py -m llava')}"
32 | config.aliases["!phi"] = f"!{sys.executable} {os.path.join(config.letMeDoItAIFolder, 'ollamachat.py -m phi')}"
33 | config.aliases["!vicuna"] = f"!{sys.executable} {os.path.join(config.letMeDoItAIFolder, 'ollamachat.py -m vicuna')}"
34 | # integrated text editor
35 | config.aliases["!etextedit"] = f"!{sys.executable} {os.path.join(config.letMeDoItAIFolder, 'eTextEdit.py')}"
36 | config.aliases["!chatgpt"] = f"!{sys.executable} {os.path.join(config.letMeDoItAIFolder, 'chatgpt.py')}"
37 |
38 | if not config.isTermux:
39 | config.inputSuggestions += [
40 | "!autoassist",
41 | "!autobuilder",
42 | "!automath",
43 | "!autoretriever",
44 | "!geminipro",
45 | "geminipro",
46 | "!geminiprovision",
47 | "geminiprovision",
48 | "!palm2",
49 | "palm2",
50 | "!codey",
51 | "codey",
52 | ]
53 | config.inputSuggestions += [
54 | "!etextedit",
55 | "!chatgpt",
56 | "chatgpt",
57 | ]
58 |
59 | # Example to set an alias to open-interpreter
60 | #config.aliases["!interpreter"] = f"!env OPENAI_API_KEY={config.openaiApiKey} ~/open-interpreter/venv/bin/interpreter"
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/plugins/analyze audio.py:
--------------------------------------------------------------------------------
1 | """
2 | LetMeDoIt AI Plugin - analyze audio file
3 |
4 | analyze audio file
5 |
6 | [FUNCTION_CALL]
7 | """
8 |
9 | from letmedoit import config
10 | from letmedoit.utils.shared_utils import SharedUtil
11 | import os
12 |
13 | # Function method
14 | def analyze_audio(function_args):
15 | def check_file_format(file_path):
16 | # List of allowed file extensions
17 | allowed_extensions = ('.mp3', '.mp4', '.mpeg', '.mpga', '.m4a', '.wav', '.webm')
18 | # Getting the file extension
19 | _, file_extension = os.path.splitext(file_path)
20 | # Checking if the file extension is in the list of allowed extensions
21 | return True if file_extension.lower() in allowed_extensions else False
22 |
23 | audio_file = function_args.get("audio") # required
24 |
25 | if audio_file and os.path.isfile(audio_file):
26 | if not check_file_format(audio_file):
27 | config.print3("This feature supports the following input file types only: '.mp3', '.mp4', '.mpeg', '.mpga', '.m4a', '.wav', '.webm'!")
28 | return ""
29 | elif os.path.getsize(audio_file) / (1024*1024) > 25:
30 | config.print3("Audio files are currently limited to 25 MB!")
31 | return ""
32 | try:
33 | with open(audio_file, "rb") as audio_file:
34 | transcript = config.oai_client.audio.transcriptions.create(
35 | model="whisper-1",
36 | file=audio_file,
37 | response_format="text"
38 | )
39 | transcript = f"The transcript of the audio is: {transcript}"
40 | if config.developer:
41 | config.print2(config.divider)
42 | config.print3(transcript)
43 | config.print2(config.divider)
44 | config.print2("Answer to your enquiry:")
45 | return transcript
46 | except:
47 | SharedUtil.showErrors()
48 | return "[INVALID]"
49 |
50 | # Function Signature
51 | functionSignature = {
52 | "intent": [
53 | "analyze files",
54 | ],
55 | "examples": [
56 | "analyze audio",
57 | ],
58 | "name": "analyze_audio",
59 | "description": f'''Answer questions about the content of an audio file or transcribe a audio speech file into text''',
60 | "parameters": {
61 | "type": "object",
62 | "properties": {
63 | "audio": {
64 | "type": "string",
65 | "description": "Return the audio file path that I specified in my requests. Return an empty string '' if it is not specified.",
66 | },
67 | },
68 | "required": ["audio"],
69 | },
70 | }
71 |
72 | # Integrate the signature and method into LetMeDoIt AI
73 | config.addFunctionCall(signature=functionSignature, method=analyze_audio)
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/plugins/analyze files.py:
--------------------------------------------------------------------------------
1 | """
2 | LetMeDoIt AI Plugin - analyze files
3 |
4 | analyze files with integrated "AutoGen Retriever"
5 |
6 | [FUNCTION_CALL]
7 | """
8 |
9 |
10 | from letmedoit import config
11 | import os
12 | from letmedoit.autoretriever import AutoGenRetriever
13 | from PIL import Image
14 |
15 |
16 | def analyze_files(function_args):
17 | def is_valid_image_file(file_path):
18 | try:
19 | # Open the image file
20 | with Image.open(file_path) as img:
21 | # Check if the file format is supported by PIL
22 | img.verify()
23 | return True
24 | except (IOError, SyntaxError) as e:
25 | # The file path is not a valid image file path
26 | return False
27 |
28 | query = function_args.get("query") # required
29 | files = function_args.get("files") # required
30 | if os.path.exists(files):
31 | if os.path.isfile(files) and is_valid_image_file(files):
32 | # call function "analyze image" instead if it is an image
33 | function_args = {
34 | "query": query,
35 | "files": [files],
36 | }
37 | config.print3("Running function: 'analyze_images'")
38 | return config.chatGPTApiAvailableFunctions["analyze_images"](function_args)
39 | config.stopSpinning()
40 | config.print2("AutoGen Retriever launched!")
41 | last_message = AutoGenRetriever().getResponse(files, query, True)
42 | config.currentMessages += last_message
43 | config.print2("AutoGen Retriever closed!")
44 | return ""
45 |
46 | return "[INVALID]"
47 |
48 | functionSignature = {
49 | "intent": [
50 | "analyze files",
51 | ],
52 | "examples": [
53 | "analyze files",
54 | ],
55 | "name": "analyze_files",
56 | "description": "retrieve information from files",
57 | "parameters": {
58 | "type": "object",
59 | "properties": {
60 | "query": {
61 | "type": "string",
62 | "description": "Detailed queries about the given files",
63 | },
64 | "files": {
65 | "type": "string",
66 | "description": """Return a directory or non-image file path. Return an empty string '' if it is not given.""",
67 | },
68 | },
69 | "required": ["query", "files"],
70 | },
71 | }
72 |
73 | config.addFunctionCall(signature=functionSignature, method=analyze_files)
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/plugins/analyze images with gemini.py:
--------------------------------------------------------------------------------
1 | """
2 | LetMeDoIt AI Plugin - analyze images with gemini
3 |
4 | analyze images with model "gemini pro vision"
5 |
6 | reference: https://platform.openai.com/docs/guides/vision
7 |
8 | [FUNCTION_CALL]
9 | """
10 |
11 | from letmedoit import config
12 | from letmedoit.geminiprovision import GeminiProVision
13 |
14 | def analyze_images_with_gemini(function_args):
15 | answer = GeminiProVision(temperature=config.llmTemperature).analyze_images(function_args)
16 | if answer:
17 | config.tempContent = answer
18 | return "[INVALID]" if not answer else ""
19 |
20 | functionSignature = {
21 | "intent": [
22 | "analyze files",
23 | ],
24 | "examples": [
25 | "analyze image with Gemini",
26 | ],
27 | "name": "analyze_images_with_gemini",
28 | "description": "Use Gemini Pro Vision to describe or analyze images",
29 | "parameters": {
30 | "type": "object",
31 | "properties": {
32 | "query": {
33 | "type": "string",
34 | "description": "Questions or requests that users ask about the given images",
35 | },
36 | "files": {
37 | "type": "string",
38 | "description": """Return a list of image paths or urls, e.g. '["image1.png", "/tmp/image2.png", "https://letmedoit.ai/image.png"]'. Return '[]' if image path is not provided.""",
39 | },
40 | },
41 | "required": ["query", "files"],
42 | },
43 | }
44 |
45 | config.addFunctionCall(signature=functionSignature, method=analyze_images_with_gemini)
46 | config.inputSuggestions.append("Ask Gemini Pro Vision to describe this image in detail: ")
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/plugins/analyze images.py:
--------------------------------------------------------------------------------
1 | """
2 | LetMeDoIt AI Plugin - analyze images
3 |
4 | analyze images with model "gpt-4-vision-preview"
5 |
6 | reference: https://platform.openai.com/docs/guides/vision
7 |
8 | [FUNCTION_CALL]
9 | """
10 |
11 | from letmedoit import config
12 | from letmedoit.utils.shared_utils import SharedUtil, check_openai_errors
13 | import openai, os
14 | from openai import OpenAI
15 |
16 | @check_openai_errors
17 | def analyze_images(function_args):
18 | query = function_args.get("query") # required
19 | files = function_args.get("files") # required
20 | #print(files)
21 | if isinstance(files, str):
22 | if not files.startswith("["):
23 | files = f'["{files}"]'
24 | files = eval(files)
25 |
26 | filesCopy = files[:]
27 | for item in filesCopy:
28 | if os.path.isdir(item):
29 | for root, _, allfiles in os.walk(item):
30 | for file in allfiles:
31 | file_path = os.path.join(root, file)
32 | files.append(file_path)
33 | files.remove(item)
34 |
35 | content = []
36 | # valid image paths
37 | for i in files:
38 | if SharedUtil.is_valid_url(i) and SharedUtil.is_valid_image_url(i):
39 | content.append({"type": "image_url", "image_url": {"url": i,},})
40 | elif os.path.isfile(i) and SharedUtil.is_valid_image_file(i):
41 | content.append({"type": "image_url", "image_url": SharedUtil.encode_image(i),})
42 |
43 | if content:
44 | content.insert(0, {"type": "text", "text": query,})
45 |
46 | response = OpenAI().chat.completions.create(
47 | model="gpt-4-vision-preview",
48 | messages=[
49 | {
50 | "role": "user",
51 | "content": content,
52 | }
53 | ],
54 | max_tokens=4096,
55 | )
56 | answer = response.choices[0].message.content
57 | config.print(answer)
58 | config.tempContent = answer
59 | return ""
60 |
61 | functionSignature = {
62 | "intent": [
63 | "analyze files",
64 | ],
65 | "examples": [
66 | "analyze image",
67 | ],
68 | "name": "analyze_images",
69 | "description": "describe or analyze images",
70 | "parameters": {
71 | "type": "object",
72 | "properties": {
73 | "query": {
74 | "type": "string",
75 | "description": "Questions or requests that users ask about the given images",
76 | },
77 | "files": {
78 | "type": "string",
79 | "description": """Return a list of image paths or urls, e.g. '["image1.png", "/tmp/image2.png", "https://letmedoit.ai/image.png"]'. Return '[]' if image path is not provided.""",
80 | },
81 | },
82 | "required": ["query", "files"],
83 | },
84 | }
85 |
86 | config.addFunctionCall(signature=functionSignature, method=analyze_images)
87 | config.inputSuggestions.append("Describe this image in detail: ")
88 | config.inputSuggestions.append("Extract text from this image: ")
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/plugins/analyze web content.py:
--------------------------------------------------------------------------------
1 | """
2 | LetMeDoIt AI Plugin - analyze webpage
3 |
4 | analyze web content with "AutoGen Retriever"
5 |
6 | [FUNCTION_CALL]
7 | """
8 |
9 |
10 | from letmedoit import config
11 | from letmedoit.utils.shared_utils import SharedUtil
12 | from letmedoit.autoretriever import AutoGenRetriever
13 |
14 | def analyze_web_content(function_args):
15 | query = function_args.get("query") # required
16 | url = function_args.get("url") # required
17 | if not url or not SharedUtil.is_valid_url(url):
18 | config.print(f"'{url}' is not a valid url" if url else "No url is provided!")
19 | return "[INVALID]"
20 | config.stopSpinning()
21 | kind, filename = SharedUtil.downloadWebContent(url)
22 | if not filename:
23 | return "[INVALID]"
24 | elif kind == "image":
25 | # call function "analyze image" instead if it is an image
26 | function_args = {
27 | "query": query,
28 | "files": [filename],
29 | }
30 | config.print3("Running function: 'analyze_images'")
31 | return config.chatGPTApiAvailableFunctions["analyze_images"](function_args)
32 |
33 | # process with AutoGen Retriever
34 | config.print2("AutoGen Retriever launched!")
35 | last_message = AutoGenRetriever().getResponse(filename, query)
36 | config.currentMessages += last_message
37 | config.print2("AutoGen Retriever closed!")
38 |
39 | return ""
40 |
41 | functionSignature = {
42 | "intent": [
43 | "analyze files",
44 | "access to internet real-time information",
45 | ],
46 | "examples": [
47 | "summarize this webpage",
48 | ],
49 | "name": "analyze_web_content",
50 | "description": "retrieve information from a webpage if an url is provided",
51 | "parameters": {
52 | "type": "object",
53 | "properties": {
54 | "query": {
55 | "type": "string",
56 | "description": "Questions that users ask about the given url",
57 | },
58 | "url": {
59 | "type": "string",
60 | "description": """Return the given url. Return an empty string '' if it is not given.""",
61 | },
62 | },
63 | "required": ["query", "url"],
64 | },
65 | }
66 |
67 | config.addFunctionCall(signature=functionSignature, method=analyze_web_content)
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/plugins/ask chatgpt.py:
--------------------------------------------------------------------------------
1 | """
2 | LetMeDoIt AI Plugin - ask chatgpt
3 |
4 | Ask ChatGPT for conversation only; no function calling
5 |
6 | [FUNCTION_CALL]
7 | """
8 |
9 |
10 | from letmedoit import config
11 | from letmedoit.chatgpt import ChatGPT
12 |
13 | def ask_chatgpt(function_args):
14 | config.stopSpinning()
15 | query = function_args.get("query") # required
16 | config.currentMessages = config.currentMessages[:-1]
17 | ChatGPT().run(query)
18 | return ""
19 |
20 | functionSignature = {
21 | "intent": [
22 | "ask a chatbot",
23 | ],
24 | "examples": [
25 | "Ask ChatGPT about",
26 | ],
27 | "name": "ask_chatgpt",
28 | "description": "Ask ChatGPT to chat or provide information",
29 | "parameters": {
30 | "type": "object",
31 | "properties": {
32 | "query": {
33 | "type": "string",
34 | "description": "The original request in detail, including any supplementary information",
35 | },
36 | },
37 | "required": ["query"],
38 | },
39 | }
40 |
41 | config.addFunctionCall(signature=functionSignature, method=ask_chatgpt)
42 | config.inputSuggestions.append("Ask ChatGPT: ")
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/plugins/ask codey.py:
--------------------------------------------------------------------------------
1 | """
2 | LetMeDoIt AI Plugin - ask Codey
3 |
4 | Ask Google Codey for information about coding
5 |
6 | [FUNCTION_CALL]
7 | """
8 |
9 |
10 | from letmedoit import config
11 | from letmedoit.codey import Codey
12 |
13 | def ask_codey(function_args):
14 | query = function_args.get("query") # required
15 | config.stopSpinning()
16 | Codey().run(query, temperature=config.llmTemperature)
17 | return ""
18 |
19 | functionSignature = {
20 | "intent": [
21 | "ask a chatbot",
22 | ],
23 | "examples": [
24 | "Ask Codey about",
25 | ],
26 | "name": "ask_codey",
27 | "description": "Ask Codey for information about coding",
28 | "parameters": {
29 | "type": "object",
30 | "properties": {
31 | "query": {
32 | "type": "string",
33 | "description": "The request in detail, including any supplementary information",
34 | },
35 | },
36 | "required": ["query"],
37 | },
38 | }
39 |
40 | config.addFunctionCall(signature=functionSignature, method=ask_codey)
41 | config.inputSuggestions.append("Ask Codey: ")
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/plugins/ask gemini pro.py:
--------------------------------------------------------------------------------
1 | """
2 | LetMeDoIt AI Plugin - ask gemini pro
3 |
4 | Ask Google Gemini Pro for information
5 |
6 | [FUNCTION_CALL]
7 | """
8 |
9 |
10 | from letmedoit import config
11 | from letmedoit.geminipro import GeminiPro
12 |
13 | def ask_gemini_pro(function_args):
14 | config.stopSpinning()
15 | query = function_args.get("query") # required
16 | GeminiPro(temperature=config.llmTemperature).run(query)
17 | return ""
18 |
19 | functionSignature = {
20 | "intent": [
21 | "ask a chatbot",
22 | ],
23 | "examples": [
24 | "Ask Gemini about",
25 | ],
26 | "name": "ask_gemini_pro",
27 | "description": "Ask Gemini Pro to chat or provide information",
28 | "parameters": {
29 | "type": "object",
30 | "properties": {
31 | "query": {
32 | "type": "string",
33 | "description": "The original request in detail, including any supplementary information",
34 | },
35 | },
36 | "required": ["query"],
37 | },
38 | }
39 |
40 | config.addFunctionCall(signature=functionSignature, method=ask_gemini_pro)
41 | config.inputSuggestions.append("Ask Gemini Pro: ")
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/plugins/ask gemma.py:
--------------------------------------------------------------------------------
1 | """
2 | LetMeDoIt AI Plugin - ask Gemma
3 |
4 | Ask Google Gemma for information
5 |
6 | [FUNCTION_CALL]
7 | """
8 |
9 |
10 | from letmedoit import config
11 | from letmedoit.ollamachat import OllamaChat
12 |
13 | def ask_gemma(function_args):
14 | query = function_args.get("query") # required
15 | config.stopSpinning()
16 | OllamaChat().run(query, model="gemma:7b")
17 | return ""
18 |
19 | functionSignature = {
20 | "intent": [
21 | "ask a chatbot",
22 | ],
23 | "examples": [
24 | "Ask Gemma about",
25 | ],
26 | "name": "ask_gemma",
27 | "description": "Ask Gemma to chat or provide information",
28 | "parameters": {
29 | "type": "object",
30 | "properties": {
31 | "query": {
32 | "type": "string",
33 | "description": "The request in detail, including any supplementary information",
34 | },
35 | },
36 | "required": ["query"],
37 | },
38 | }
39 |
40 | config.addFunctionCall(signature=functionSignature, method=ask_gemma)
41 | config.inputSuggestions.append("Ask Gemma: ")
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/plugins/ask llama2.py:
--------------------------------------------------------------------------------
1 | """
2 | LetMeDoIt AI Plugin - ask Llama2
3 |
4 | Ask Llama2 for information
5 |
6 | [FUNCTION_CALL]
7 | """
8 |
9 |
10 | from letmedoit import config
11 | from letmedoit.ollamachat import OllamaChat
12 |
13 | def ask_llama2(function_args):
14 | query = function_args.get("query") # required
15 | config.stopSpinning()
16 | OllamaChat().run(query, model="llama2")
17 | return ""
18 |
19 | functionSignature = {
20 | "intent": [
21 | "ask a chatbot",
22 | ],
23 | "examples": [
24 | "Ask Llama2 Pro about",
25 | ],
26 | "name": "ask_llama2",
27 | "description": "Ask Llama2 to chat or provide information",
28 | "parameters": {
29 | "type": "object",
30 | "properties": {
31 | "query": {
32 | "type": "string",
33 | "description": "The request in detail, including any supplementary information",
34 | },
35 | },
36 | "required": ["query"],
37 | },
38 | }
39 |
40 | config.addFunctionCall(signature=functionSignature, method=ask_llama2)
41 | config.inputSuggestions.append("Ask Llama2: ")
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/plugins/ask llava.py:
--------------------------------------------------------------------------------
1 | """
2 | LetMeDoIt AI Plugin - ask LLava
3 |
4 | Ask LLava for information
5 |
6 | [FUNCTION_CALL]
7 | """
8 |
9 |
10 | from letmedoit import config
11 | from letmedoit.ollamachat import OllamaChat
12 |
13 | def ask_llava(function_args):
14 | query = function_args.get("query") # required
15 | config.stopSpinning()
16 | OllamaChat().run(query, model="llava")
17 | return ""
18 |
19 | functionSignature = {
20 | "intent": [
21 | "ask a chatbot",
22 | ],
23 | "examples": [
24 | "Ask Llava about",
25 | ],
26 | "name": "ask_llava",
27 | "description": "Ask LLava to chat or provide information",
28 | "parameters": {
29 | "type": "object",
30 | "properties": {
31 | "query": {
32 | "type": "string",
33 | "description": "The request in detail, including any supplementary information",
34 | },
35 | },
36 | "required": ["query"],
37 | },
38 | }
39 |
40 | config.addFunctionCall(signature=functionSignature, method=ask_llava)
41 | config.inputSuggestions.append("Ask LLava: ")
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/plugins/ask mistral.py:
--------------------------------------------------------------------------------
1 | """
2 | LetMeDoIt AI Plugin - ask Mistral
3 |
4 | Ask Mistral for information
5 |
6 | [FUNCTION_CALL]
7 | """
8 |
9 |
10 | from letmedoit import config
11 | from letmedoit.ollamachat import OllamaChat
12 |
13 | def ask_mistral(function_args):
14 | query = function_args.get("query") # required
15 | config.stopSpinning()
16 | OllamaChat().run(query, model="mistral")
17 | return ""
18 |
19 | functionSignature = {
20 | "intent": [
21 | "ask a chatbot",
22 | ],
23 | "examples": [
24 | "Ask Mistral about",
25 | ],
26 | "name": "ask_mistral",
27 | "description": "Ask Mistral to chat or provide information",
28 | "parameters": {
29 | "type": "object",
30 | "properties": {
31 | "query": {
32 | "type": "string",
33 | "description": "The request in detail, including any supplementary information",
34 | },
35 | },
36 | "required": ["query"],
37 | },
38 | }
39 |
40 | config.addFunctionCall(signature=functionSignature, method=ask_mistral)
41 | config.inputSuggestions.append("Ask Mistral: ")
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/plugins/ask ollama.py:
--------------------------------------------------------------------------------
1 | """
2 | LetMeDoIt AI Plugin - ask Ollama Chat
3 |
4 | Ask Ollama Chat for information
5 |
6 | [FUNCTION_CALL]
7 | """
8 |
9 | from letmedoit import config
10 | from letmedoit.ollamachat import OllamaChat
11 | from letmedoit.utils.ollama_models import ollama_models
12 | from prompt_toolkit import PromptSession
13 | from prompt_toolkit.history import FileHistory
14 | from prompt_toolkit.completion import WordCompleter, FuzzyCompleter
15 | from prompt_toolkit.styles import Style
16 | from letmedoit.health_check import HealthCheck
17 | from pathlib import Path
18 | import os
19 |
20 |
21 | def ask_ollama(function_args):
22 | query = function_args.get("query") # required
23 | config.stopSpinning()
24 |
25 | # model
26 | promptStyle = Style.from_dict({
27 | # User input (default text).
28 | "": config.terminalCommandEntryColor2,
29 | # Prompt.
30 | "indicator": config.terminalPromptIndicatorColor2,
31 | })
32 | historyFolder = os.path.join(HealthCheck.getLocalStorage(), "history")
33 | Path(historyFolder).mkdir(parents=True, exist_ok=True)
34 | model_history = os.path.join(historyFolder, "ollama_default")
35 | model_session = PromptSession(history=FileHistory(model_history))
36 | completer = FuzzyCompleter(WordCompleter(sorted(ollama_models), ignore_case=True))
37 | bottom_toolbar = f""" {str(config.hotkey_exit).replace("'", "")} {config.exit_entry}"""
38 |
39 | HealthCheck.print2("Ollama chat launched!")
40 | print("Select a model below:")
41 | print("Note: You should have at least 8 GB of RAM available to run the 7B models, 16 GB to run the 13B models, and 32 GB to run the 33B models.")
42 | model = HealthCheck.simplePrompt(style=promptStyle, promptSession=model_session, bottom_toolbar=bottom_toolbar, default=config.ollamaDefaultModel, completer=completer)
43 | if model:
44 | if model.lower() == config.exit_entry:
45 | return ""
46 | else:
47 | model = config.ollamaDefaultModel
48 | OllamaChat().run(query, model=model)
49 | return ""
50 |
51 | functionSignature = {
52 | "intent": [
53 | "ask a chatbot",
54 | ],
55 | "examples": [
56 | "Ask Ollama about",
57 | ],
58 | "name": "ask_ollama",
59 | "description": "Ask Ollama to chat or provide information",
60 | "parameters": {
61 | "type": "object",
62 | "properties": {
63 | "query": {
64 | "type": "string",
65 | "description": "The request in detail, including any supplementary information",
66 | },
67 | },
68 | "required": ["query"],
69 | },
70 | }
71 |
72 | config.addFunctionCall(signature=functionSignature, method=ask_ollama)
73 | config.inputSuggestions.append("Ask Ollama: ")
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/plugins/ask palm2.py:
--------------------------------------------------------------------------------
1 | """
2 | LetMeDoIt AI Plugin - ask PaLM 2
3 |
4 | Ask Google PaLM 2 for information
5 |
6 | [FUNCTION_CALL]
7 | """
8 |
9 |
10 | from letmedoit import config
11 | from letmedoit.palm2 import Palm2
12 |
13 | def ask_palm2(function_args):
14 | query = function_args.get("query") # required
15 | config.stopSpinning()
16 | Palm2().run(query, temperature=config.llmTemperature)
17 | return ""
18 |
19 | functionSignature = {
20 | "intent": [
21 | "ask a chatbot",
22 | ],
23 | "examples": [
24 | "Ask PaLM about",
25 | ],
26 | "name": "ask_palm2",
27 | "description": "Ask PaLM 2 to chat or provide information",
28 | "parameters": {
29 | "type": "object",
30 | "properties": {
31 | "query": {
32 | "type": "string",
33 | "description": "The request in detail, including any supplementary information",
34 | },
35 | },
36 | "required": ["query"],
37 | },
38 | }
39 |
40 | config.addFunctionCall(signature=functionSignature, method=ask_palm2)
41 | config.inputSuggestions.append("Ask PaLM 2: ")
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/plugins/auto heal python code.py:
--------------------------------------------------------------------------------
1 | from letmedoit import config
2 | import traceback
3 | from letmedoit.utils.install import installmodule
4 | from letmedoit.utils.shared_utils import SharedUtil
5 |
6 | """
7 | LetMeDoIt AI Plugin - auto heal python code
8 |
9 | functionalities:
10 | * install missing packages
11 | * fixed broken codes
12 |
13 | User can define the maximum number of auto-healing attempts by editing "max_consecutive_auto_heal" in config.py.
14 | The default value of config.max_consecutive_auto_heal is 3.
15 |
16 | [FUNCTION_CALL]
17 | """
18 |
19 | def heal_python(function_args):
20 | # get the sql query statement
21 | issue = function_args.get("issue") # required
22 | config.print3(f"Issue: {issue}")
23 |
24 | fix = function_args.get("fix") # required
25 | missing = function_args.get("missing") # required
26 | if isinstance(missing, str):
27 | missing = eval(missing)
28 |
29 | try:
30 | if missing:
31 | config.print2("Installing missing packages ...")
32 | for i in missing:
33 | installmodule(f"--upgrade {i}")
34 | config.print2("Running improved code ...")
35 | if config.developer or config.codeDisplay:
36 | SharedUtil.displayPythonCode(fix)
37 | exec(SharedUtil.fineTunePythonCode(fix), globals())
38 | return "EXECUTED"
39 | except:
40 | return traceback.format_exc()
41 |
42 | functionSignature = {
43 | "intent": [
44 | "generate code",
45 | ],
46 | "examples": [
47 | "Fix python code",
48 | ],
49 | "name": "heal_python",
50 | "description": "Fix python code if both original code and traceback error are provided",
51 | "parameters": {
52 | "type": "object",
53 | "properties": {
54 | "fix": {
55 | "type": "string",
56 | "description": "Improved version of python code that resolved the traceback error. Return the original code instead only if traceback shows an import error.",
57 | },
58 | "missing": {
59 | "type": "string",
60 | "description": """List of missing packages identified from import errors, e.g. "['datetime', 'requests']". Return "[]" if there is no import error in the traceback.""",
61 | },
62 | "issue": {
63 | "type": "string",
64 | "description": """Briefly explain the error""",
65 | },
66 | },
67 | "required": ["fix", "missing", "issue"],
68 | },
69 | }
70 |
71 | # configs particular to this plugin
72 | # persistent
73 | persistentConfigs = (
74 | ("max_consecutive_auto_heal", 5),
75 | )
76 | config.setConfig(persistentConfigs)
77 |
78 | config.addFunctionCall(signature=functionSignature, method=heal_python)
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/plugins/awesome prompts.py:
--------------------------------------------------------------------------------
1 | from letmedoit import config
2 |
3 | """
4 | LetMeDoIt AI Plugin - awesome prompts
5 |
6 | add selected awesome prompts https://github.com/f/awesome-chatgpt-prompts as predefined contexts
7 |
8 | """
9 |
10 | config.predefinedContexts["English Translator and Improver"] = """I want you to act as an English translator, spelling corrector and improver. I will speak to you in any language and you will detect the language, translate it and answer in the corrected and improved version of my text, in English. I want you to replace my simplified A0-level words and sentences with more beautiful and elegant, upper level English words and sentences. Keep the meaning same, but make them more literary. I want you to only reply the correction, the improvements and nothing else, do not write explanations. [CHAT]"
11 | """
12 |
13 | config.predefinedContexts["Spoken English Teacher"] = """I want you to act as a spoken English teacher and improver. I will speak to you in English and you will reply to me in English to practice my spoken English. I want you to keep your reply neat, limiting the reply to 100 words. I want you to strictly correct my grammar mistakes, typos, and factual errors. I want you to ask me a question in your reply. Now let's start practicing, you could ask me a question first. Remember, I want you to strictly correct my grammar mistakes, typos, and factual errors. [CHAT]"""
14 |
15 | config.predefinedContexts["Biblical Translator"] = """I want you to act as an biblical translator. I will speak to you in english and you will translate it and answer in the corrected and improved version of my text, in a biblical dialect. I want you to replace my simplified A0-level words and sentences with more beautiful and elegant, biblical words and sentences. Keep the meaning same. I want you to only reply the correction, the improvements and nothing else, do not write explanations. [CHAT]"""
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/plugins/character analysis.py:
--------------------------------------------------------------------------------
1 | """
2 | LetMeDoIt AI Plugin - character analysis
3 |
4 | add contexts for character analysis
5 |
6 | """
7 |
8 | from letmedoit import config
9 |
10 | config.predefinedContexts["Character - Close Reading"] = """Use the following step-by-step instructions to respond to my inputs.
11 | Step 1: Give me a brief summary of the narrative.
12 | Step 2: Apply 'Close Reading' to analyze the character given in my input. (Remember, Close Reading involves analyzing the text carefully to gather information about the character. It focuses on examining the character's actions, dialogue, thoughts, and interactions with other characters.)
13 | Below is my input:
14 | [CHAT]"""
15 |
16 | config.predefinedContexts["Character - Character Analysis"] = """Use the following step-by-step instructions to respond to my inputs.
17 | Step 1: Give me a brief summary of the narrative.
18 | Step 2: Apply 'Character Analysis' to analyze the character given in my input. (Remember, Character Analysis involves examining the character's traits, motivations, and development throughout the narrative. It may include analyzing the character's background, relationships, and conflicts.)
19 | Below is my input:
20 | [CHAT]"""
21 |
22 | config.predefinedContexts["Character - Archetypal Analysis"] = """Use the following step-by-step instructions to respond to my inputs.
23 | Step 1: Give me a brief summary of the narrative.
24 | Step 2: Apply 'Archetypal Analysis' to analyze the character given in my input. (Remember, Archetypal Analysis involves identifying and analyzing the character's archetype or symbolic role in the narrative. It explores how the character embodies certain universal themes or patterns.)
25 | Below is my input:
26 | [CHAT]"""
27 |
28 | config.predefinedContexts["Character - Psychological Analysis"] = """Use the following step-by-step instructions to respond to my inputs.
29 | Step 1: Give me a brief summary of the narrative.
30 | Step 2: Apply 'Psychological Analysis' to analyze the character given in my input. (Remember, Psychological Analysis involves applying psychological theories and concepts to understand the character's behavior and motivations. It may involve exploring the character's personality, desires, fears, and conflicts.)
31 | Below is my input:
32 | [CHAT]"""
33 |
34 | config.predefinedContexts["Character - Historical and Cultural Analysis"] = """Use the following step-by-step instructions to respond to my inputs.
35 | Step 1: Give me a brief summary of the narrative.
36 | Step 2: Apply 'Historical and Cultural Analysis' to analyze the character given in my input. (Remember, Historical and Cultural Analysis involves examining the character in the context of the historical and cultural setting of the narrative. It explores how the character's actions and beliefs may be influenced by their social, political, or cultural environment.)
37 | Below is my input:
38 | [CHAT]"""
39 |
40 | config.predefinedContexts["Character - Comparative Analysis"] = """Use the following step-by-step instructions to respond to my inputs.
41 | Step 1: Give me a brief summary of the narrative.
42 | Step 2: Apply 'Comparative Analysis' to analyze the character given in my input. (Remember, Comparative Analysis involves comparing the character to other characters in the narrative or to characters from other texts. It may focus on similarities and differences in their traits, roles, or thematic significance.)
43 | Below is my input:
44 | [CHAT]"""
45 |
46 | config.predefinedContexts["Character - Narrative Therapy 1"] = """Use the following step-by-step instructions to respond to my inputs.
47 | Step 1: Give me a brief summary of the narrative.
48 | Step 2: Apply the principles of narrative therapy to analyze the character given in my input. Please explore the character's narrative, beliefs, values, and the meanings they attribute to their experiences. Please also give insights into their identity, relationships, struggles, and potential for growth or change.
49 | Below is my input:
50 | [CHAT]"""
51 |
52 | config.predefinedContexts["Character - Narrative Therapy 2"] = """Use the following step-by-step instructions to respond to my inputs.
53 | Step 1: Give me a brief summary of the narrative.
54 | Step 2: Apply principles of Narrative Therapy, that are 1) Externalizing Problems, 2) Re-authoring Stories, 3) Deconstructing Dominant Discourses, 4) Encouraging Unique Outcomes, 5) Externalizing and Privileging Alternative Stories, to analyze the given character in my input.
55 | Below is my input:
56 | [CHAT]"""
57 |
58 | config.predefinedContexts["Character - Narrative Therapy 3"] = """Apply narrative therapy to analyze this character below:
59 | [CHAT]"""
60 |
61 | """
62 | The key principles of narrative therapy include:
63 |
64 | 1. Externalizing Problems: Instead of seeing problems as inherent to the individual, narrative therapy views them as separate entities that can be examined and addressed objectively.
65 |
66 | 2. Re-authoring Stories: Individuals are encouraged to reframe their narratives and create new meanings and understandings of their experiences. This can help shift focus from problems to strengths and resources.
67 |
68 | 3. Deconstructing Dominant Discourses: The therapy aims to challenge societal and cultural narratives that may contribute to individuals feeling marginalized or oppressed. By questioning these dominant discourses, individuals can gain a sense of agency and empowerment.
69 |
70 | 4. Encouraging Unique Outcomes: Narrative therapy seeks to identify and amplify unique outcomes or instances in which the individual has exhibited resilience or problem-solving skills. This helps to counterbalance the dominant problem-focused narratives.
71 |
72 | 5. Externalizing and Privileging Alternative Stories: The therapist collaborates with the individual to explore alternative stories or narratives that challenge the dominant problem-saturated ones. These alternative stories highlight the individual's strengths, values, and preferred ways of being.
73 |
74 | These principles guide the practice of narrative therapy and help individuals to explore and transform their stories in ways that promote healing and growth.
75 | """
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/plugins/contexts.py:
--------------------------------------------------------------------------------
1 | from letmedoit import config
2 |
3 | """
4 | LetMeDoIt AI Plugin - contexts
5 |
6 | add pre-defined contexts
7 | """
8 |
9 | config.predefinedContexts["Let me Summarize"] = """Provide me with a summary of the following content:
10 | [NO_FUNCTION_CALL]"""
11 |
12 | config.predefinedContexts["Let me Explain"] = """Explain the meaning of the following content:
13 | [NO_FUNCTION_CALL]"""
14 |
15 | config.predefinedContexts["Let me Translate"] = """Assist me by acting as a translator.
16 | Please translate the following content:
17 | [NO_FUNCTION_CALL]"""
18 |
19 | config.predefinedContexts["Let me Pronounce"] = """Pronounce the following content:"""
20 |
21 | config.predefinedContexts["Let me Download"] = """Download the following web content:"""
22 |
23 | config.predefinedContexts["Let me Download Youtube MP3"] = """Download the following Youtube media into mp3 format:"""
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/plugins/create ai assistants.py:
--------------------------------------------------------------------------------
1 | """
2 | LetMeDoIt AI Plugin - build agents
3 |
4 | build a group of agents to execute a task with integrated "AutoGen Agent Builder"
5 |
6 | [FUNCTION_CALL]
7 | """
8 |
9 |
10 | from letmedoit import config
11 | from letmedoit.autobuilder import AutoGenBuilder
12 |
13 | def build_agents(function_args):
14 | task = function_args.get("task") # required
15 | title = function_args.get("title") # required
16 | config.print2("AutoGen Agent Builder launched!")
17 | config.print3(f"Title: {title}")
18 | config.print3(f"Description: {task}")
19 | messages = AutoGenBuilder().getResponse(task, title)
20 | if not messages[-1]["content"]:
21 | del messages[-1]
22 | # add context to the message chain
23 | config.currentMessages += messages
24 | config.print2("\nAutoGen Agent Builder closed!")
25 | return ""
26 |
27 | functionSignature = {
28 | "intent": [
29 | "ask an auto assistant",
30 | "create content",
31 | ],
32 | "examples": [
33 | "Ask autobuilder to",
34 | "Create a team of agents / assistants to",
35 | ],
36 | "name": "build_agents",
37 | "description": "build a group of AI assistants or agents to execute a complicated task that other functions cannot resolve",
38 | "parameters": {
39 | "type": "object",
40 | "properties": {
41 | "task": {
42 | "type": "string",
43 | "description": "Task description in as much detail as possible",
44 | },
45 | "title": {
46 | "type": "string",
47 | "description": "A short title to describe the task",
48 | },
49 | },
50 | "required": ["task", "title"],
51 | },
52 | }
53 |
54 | config.addFunctionCall(signature=functionSignature, method=build_agents)
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/plugins/create images.py:
--------------------------------------------------------------------------------
1 | """
2 | LetMeDoIt AI Plugin - create images
3 |
4 | generate images with model "dall-e-3"
5 |
6 | [FUNCTION_CALL]
7 | """
8 |
9 | from letmedoit import config
10 | import os
11 | from base64 import b64decode
12 | from letmedoit.utils.shared_utils import SharedUtil, check_openai_errors
13 | from letmedoit.utils.terminal_mode_dialogs import TerminalModeDialogs
14 | from openai import OpenAI
15 | from pathlib import Path
16 |
17 | @check_openai_errors
18 | def create_image(function_args):
19 | prompt = function_args.get("prompt") # required
20 | dialogs = TerminalModeDialogs(None)
21 | # size selection
22 | options = ("1024x1024", "1024x1792", "1792x1024")
23 | size = dialogs.getValidOptions(
24 | options=options,
25 | title="Generating an image ...",
26 | default="1024x1024",
27 | text="Select size below:"
28 | )
29 | if not size:
30 | config.stopSpinning()
31 | return "[INVALID]"
32 | # quality selection
33 | options = ("standard", "hd")
34 | quality = dialogs.getValidOptions(
35 | options=options,
36 | title="Generating an image ...",
37 | default="hd",
38 | text="Select quality below:"
39 | )
40 | if not quality:
41 | config.stopSpinning()
42 | return "[INVALID]"
43 |
44 | # get responses
45 | #https://platform.openai.com/docs/guides/images/introduction
46 | response = OpenAI().images.generate(
47 | model="dall-e-3",
48 | prompt=f"I NEED to test how the tool works with extremely simple prompts. DO NOT add any detail, just use it AS-IS:\n{prompt}",
49 | size=size,
50 | quality=quality, # "hd" or "standard"
51 | response_format="b64_json",
52 | n=1,
53 | )
54 | # open image
55 | #imageUrl = response.data[0].url
56 | #jsonFile = os.path.join(config.letMeDoItAIFolder, "temp", "openai_image.json")
57 | #with open(jsonFile, mode="w", encoding="utf-8") as fileObj:
58 | # json.dump(response.data[0].b64_json, fileObj)
59 | folder = config.getLocalStorage()
60 | folder = os.path.join(folder, "images")
61 | Path(folder).mkdir(parents=True, exist_ok=True)
62 | imageFile = os.path.join(folder, f"{SharedUtil.getCurrentDateTime()}.png")
63 | image_data = b64decode(response.data[0].b64_json)
64 | with open(imageFile, mode="wb") as pngObj:
65 | pngObj.write(image_data)
66 | config.stopSpinning()
67 | if config.terminalEnableTermuxAPI:
68 | SharedUtil.getCliOutput(f"termux-share {imageFile}")
69 | else:
70 | os.system(f"{config.open} {imageFile}")
71 | return f"Saved as '{imageFile}'"
72 |
73 | functionSignature = {
74 | "intent": [
75 | "create content",
76 | ],
77 | "examples": [
78 | "Create image",
79 | ],
80 | "name": "create_image",
81 | "description": "create an image",
82 | "parameters": {
83 | "type": "object",
84 | "properties": {
85 | "prompt": {
86 | "type": "string",
87 | "description": "Description of the image in as much detail as possible",
88 | },
89 | },
90 | "required": ["prompt"],
91 | },
92 | }
93 |
94 | config.addFunctionCall(signature=functionSignature, method=create_image)
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/plugins/create maps.py:
--------------------------------------------------------------------------------
1 | """
2 | LetMeDoIt AI Plugin - create maps
3 |
4 | Create maps
5 |
6 | [FUNCTION_CALL]
7 | """
8 |
9 | from letmedoit import config
10 | from letmedoit.utils.shared_utils import SharedUtil
11 | import re, os
12 |
13 | def create_map(function_args):
14 | code = function_args.get("code") # required
15 | information = SharedUtil.showAndExecutePythonCode(code)
16 | htmlPattern = """\.save\(["']([^\(\)]+\.html)["']\)"""
17 | match = re.search(htmlPattern, code)
18 | if match:
19 | htmlFile = match.group(1)
20 | os.system(f"{config.open} {htmlFile}")
21 | return f"Saved as '{htmlFile}'"
22 | elif information:
23 | return information
24 | return ""
25 |
26 | functionSignature = {
27 | "intent": [
28 | "create content",
29 | ],
30 | "examples": [
31 | "Create a map",
32 | ],
33 | "name": "create_map",
34 | "description": f'''Create maps''',
35 | "parameters": {
36 | "type": "object",
37 | "properties": {
38 | "code": {
39 | "type": "string",
40 | "description": "Python code that integrates package folium to resolve my request. Created maps are saved in html format",
41 | },
42 | },
43 | "required": ["code"],
44 | },
45 | }
46 |
47 | config.addFunctionCall(signature=functionSignature, method=create_map)
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/plugins/create qrcode.py:
--------------------------------------------------------------------------------
1 | """
2 | LetMeDoIt AI Plugin - create qr code
3 |
4 | Create qr code image
5 |
6 | [FUNCTION_CALL]
7 | """
8 |
9 | from letmedoit import config
10 | from letmedoit.utils.shared_utils import SharedUtil
11 | import os, json
12 |
13 | def create_qrcode(function_args):
14 | code = function_args.get("code") # required
15 | information = SharedUtil.showAndExecutePythonCode(code)
16 | if information:
17 | filepath = json.loads(information)["information"]
18 | if os.path.isfile(filepath):
19 | config.print3(f"File saved at: {filepath}")
20 | try:
21 | os.system(f'''{config.open} "{filepath}"''')
22 | except:
23 | pass
24 | return ""
25 |
26 | functionSignature = {
27 | "intent": [
28 | "create content",
29 | ],
30 | "examples": [
31 | "Create a QR code",
32 | ],
33 | "name": "create_qrcode",
34 | "description": f'''Create QR code''',
35 | "parameters": {
36 | "type": "object",
37 | "properties": {
38 | "code": {
39 | "type": "string",
40 | "description": "Python code that integrates package qrcode to resolve my request. Always save the qr code image in png format and use 'print' function to print its full path only, without additional description or comment, in the last line of your code.",
41 | },
42 | },
43 | "required": ["code"],
44 | },
45 | }
46 |
47 | config.addFunctionCall(signature=functionSignature, method=create_qrcode)
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/plugins/create statistical graphics.py:
--------------------------------------------------------------------------------
1 | """
2 | LetMeDoIt AI Plugin - create statistical graphics
3 |
4 | Create statistical graphics to visulize data
5 |
6 | [FUNCTION_CALL]
7 | """
8 |
9 | from letmedoit import config
10 | from letmedoit.utils.shared_utils import SharedUtil
11 | import os, re
12 |
13 | def create_statistical_graphics(function_args):
14 | config.stopSpinning()
15 |
16 | code = function_args.get("code") # required
17 | information = SharedUtil.showAndExecutePythonCode(code)
18 |
19 | pngPattern = """\.savefig\(["']([^\(\)]+\.png)["']\)"""
20 | match = re.search(pngPattern, code)
21 | if match:
22 | pngFile = match.group(1)
23 | os.system(f"{config.open} {pngFile}")
24 | return f"Saved as '{pngFile}'"
25 | elif information:
26 | return information
27 | return ""
28 |
29 | functionSignature = {
30 | "intent": [
31 | "create content",
32 | ],
33 | "examples": [
34 | "Create a plot / graph / chart",
35 | "Visualize data",
36 | ],
37 | "name": "create_statistical_graphics",
38 | "description": f'''Create statistical plots, such as pie charts or bar charts, to visualize statistical data''',
39 | "parameters": {
40 | "type": "object",
41 | "properties": {
42 | "code": {
43 | "type": "string",
44 | "description": "Python code that integrates package seaborn to resolve my request. Use TkAgg as backend. Created plots are saved in png format",
45 | },
46 | },
47 | "required": ["code"],
48 | },
49 | }
50 |
51 | config.addFunctionCall(signature=functionSignature, method=create_statistical_graphics)
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/plugins/dates and times.py:
--------------------------------------------------------------------------------
1 | """
2 | LetMeDoIt AI Plugin - dates and times
3 |
4 | Retrieve information about dates and times
5 |
6 | [FUNCTION_CALL]
7 | """
8 |
9 | from letmedoit import config
10 | from letmedoit.utils.shared_utils import SharedUtil
11 |
12 | def datetimes(function_args):
13 | code = function_args.get("code") # required
14 | information = SharedUtil.showAndExecutePythonCode(code)
15 | return information
16 |
17 | functionSignature = {
18 | "intent": [
19 | "access to internet real-time information",
20 | ],
21 | "examples": [
22 | "What is the current time in Hong Kong?",
23 | "Tell me the date of the upcoming Friday?",
24 | ],
25 | "name": "datetimes",
26 | "description": f'''Get information about dates and times''',
27 | "parameters": {
28 | "type": "object",
29 | "properties": {
30 | "code": {
31 | "type": "string",
32 | "description": "Python code that integrates package pendulum to resolve my query",
33 | },
34 | },
35 | "required": ["code"],
36 | },
37 | }
38 |
39 | config.addFunctionCall(signature=functionSignature, method=datetimes)
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/plugins/download youtube or web content.py:
--------------------------------------------------------------------------------
1 | # install binary ffmpeg and python package yt-dlp to work with this plugin
2 |
3 | """
4 | LetMeDoIt AI Plugin - download youtube or web content
5 |
6 | * download Youtube video
7 | * download Youtube audio and convert it into mp3
8 | * download webcontent
9 |
10 | [FUNCTION_CALL]
11 | """
12 |
13 | from letmedoit import config
14 | import re, subprocess, os
15 | from letmedoit.utils.shared_utils import SharedUtil
16 | from pathlib import Path
17 |
18 |
19 | def download_web_content(function_args):
20 | def is_youtube_url(url_string):
21 | pattern = r'(?:https?:\/\/)?(?:www\.)?youtu(?:\.be|be\.com)\/(?:watch\?v=|embed\/|v\/)?([a-zA-Z0-9\-_]+)'
22 | match = re.match(pattern, url_string)
23 | return match is not None
24 |
25 | def isFfmpegInstalled():
26 | ffmpegVersion = subprocess.Popen("ffmpeg -version", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
27 | *_, stderr = ffmpegVersion.communicate()
28 | return False if stderr else True
29 |
30 | def terminalDownloadYoutubeFile(downloadCommand, url_string, outputFolder):
31 | if isFfmpegInstalled():
32 | try:
33 | config.print("--------------------")
34 | # use os.system, as it displays download status ...
35 | os.system("cd {2}; {0} {1}".format(downloadCommand, url_string, outputFolder))
36 | if SharedUtil.isPackageInstalled("pkill"):
37 | os.system("pkill yt-dlp")
38 | config.print3(f"Downloaded in: '{outputFolder}'")
39 | try:
40 | os.system(f'''{config.open} {outputFolder}''')
41 | except:
42 | pass
43 | except:
44 | SharedUtil.showErrors()
45 | else:
46 | config.print("Tool 'ffmpeg' is not found on your system!")
47 | config.print("Read https://github.com/eliranwong/letmedoit/wiki/Install-ffmpeg")
48 |
49 |
50 | url = function_args.get("url") # required
51 | if is_youtube_url(url):
52 | config.print("Loading youtube downloader ...")
53 | format = function_args.get("format") # required
54 | location = function_args.get("location", "") # optional
55 | if not (location and os.path.isdir(location)):
56 | location = os.path.join(config.getLocalStorage(), "audio" if format == "audio" else "video")
57 | Path(location).mkdir(parents=True, exist_ok=True)
58 | downloadCommand = "yt-dlp -x --audio-format mp3" if format == "audio" else "yt-dlp -f bestvideo[ext=mp4]+bestaudio[ext=m4a]/mp4"
59 | terminalDownloadYoutubeFile(downloadCommand, url, location)
60 | return "Finished! Youtube downloader closed!"
61 | elif SharedUtil.is_valid_url(url):
62 | try:
63 | folder = config.getLocalStorage()
64 | folder = os.path.join(folder, "Downloads")
65 | Path(folder).mkdir(parents=True, exist_ok=True)
66 | SharedUtil.downloadWebContent(url, folder=folder, ignoreKind=True)
67 | return "Downloaded!"
68 | except:
69 | SharedUtil.showErrors()
70 | return "[INVALID]"
71 | else:
72 | config.print("invalid link given")
73 | return "[INVALID]"
74 |
75 | functionSignature = {
76 | "intent": [
77 | "access to internet real-time information",
78 | ],
79 | "examples": [
80 | "Download Youtube video",
81 | "Download Youtube audio into mp3 format",
82 | "Download this webpage",
83 | ],
84 | "name": "download_web_content",
85 | "description": "download Youtube video into mp4 file or download audio into mp3 file or download webcontent",
86 | "parameters": {
87 | "type": "object",
88 | "properties": {
89 | "url": {
90 | "type": "string",
91 | "description": "Youtube url given by user",
92 | },
93 | "format": {
94 | "type": "string",
95 | "description": "Media format to be downloaded. Return 'video' if not given.",
96 | "enum": ["video", "audio"],
97 | },
98 | "location": {
99 | "type": "string",
100 | "description": "Output folder where downloaded file is to be saved",
101 | },
102 | },
103 | "required": ["url", "format"],
104 | },
105 | }
106 |
107 | config.addFunctionCall(signature=functionSignature, method=download_web_content)
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/plugins/edit text.py:
--------------------------------------------------------------------------------
1 | """
2 | LetMeDoIt AI Plugin - edit text
3 |
4 | edit text files
5 |
6 | [FUNCTION_CALL]
7 | """
8 |
9 | from letmedoit import config
10 | import os, re, sys
11 | from letmedoit.utils.shared_utils import SharedUtil
12 |
13 | # persistent
14 | # users can customise 'textEditor' and 'textFileExtensions' in config.py
15 | persistentConfigs = (
16 | #("textEditor", "micro -softwrap true -wordwrap true"), # read options at https://github.com/zyedidia/micro/blob/master/runtime/help/options.md
17 | ("textFileExtensions", ['txt', 'md', 'py']), # edit this option to support more or less extensions
18 | )
19 | config.setConfig(persistentConfigs)
20 |
21 | if config.customTextEditor:
22 | textEditor = re.sub(" .*?$", "", config.customTextEditor)
23 | if not textEditor or not SharedUtil.isPackageInstalled(textEditor):
24 | config.customTextEditor = ""
25 |
26 | def edit_text(function_args):
27 | customTextEditor = config.customTextEditor if config.customTextEditor else f"{sys.executable} {os.path.join(config.letMeDoItAIFolder, 'eTextEdit.py')}"
28 | filename = function_args.get("filename") # required
29 | # in case folder name is mistaken
30 | if os.path.isdir(filename):
31 | os.system(f"""{config.open} {filename}""")
32 | return "Finished! Directory opened!"
33 | else:
34 | command = f"{customTextEditor} {filename}" if filename else customTextEditor
35 | config.stopSpinning()
36 | os.system(command)
37 | return "Finished! Text editor closed!"
38 |
39 | functionSignature = {
40 | "intent": [
41 | "change files",
42 | ],
43 | "examples": [
44 | "Edit test.txt",
45 | ],
46 | "name": "edit_text",
47 | "description": f'''Edit text files with extensions: '*.{"', '*.".join(config.textFileExtensions)}'.''',
48 | "parameters": {
49 | "type": "object",
50 | "properties": {
51 | "filename": {
52 | "type": "string",
53 | "description": "Text file path given by user. Return an empty string if not given.",
54 | },
55 | },
56 | "required": ["filename"],
57 | },
58 | }
59 |
60 | config.addFunctionCall(signature=functionSignature, method=edit_text)
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/plugins/execute python code.py:
--------------------------------------------------------------------------------
1 | """
2 | LetMeDoIt AI Plugin - execute python code
3 |
4 | execute python code
5 |
6 | [FUNCTION_CALL]
7 | """
8 |
9 | from letmedoit import config
10 | from letmedoit.utils.shared_utils import SharedUtil
11 | from letmedoit.health_check import HealthCheck
12 | import pygments
13 | from pygments.lexers.python import PythonLexer
14 | from prompt_toolkit.formatted_text import PygmentsTokens
15 | from prompt_toolkit import print_formatted_text
16 | from prompt_toolkit.styles import Style
17 |
18 | def execute_python_code(function_args):
19 | # retrieve argument values from a dictionary
20 | risk = function_args.get("risk") # required
21 | title = function_args.get("title") # required
22 | python_code = function_args.get("code") # required
23 | refinedCode = SharedUtil.fineTunePythonCode(python_code)
24 |
25 | promptStyle = Style.from_dict({
26 | # User input (default text).
27 | "": config.terminalCommandEntryColor2,
28 | # Prompt.
29 | "indicator": config.terminalPromptIndicatorColor2,
30 | })
31 |
32 | # show pyton code for developer
33 | config.print(config.divider)
34 | config.print(f"Python: {title}")
35 | SharedUtil.showRisk(risk)
36 | if config.developer or config.codeDisplay:
37 | config.print("```")
38 | #print(python_code)
39 | # pygments python style
40 | tokens = list(pygments.lex(python_code, lexer=PythonLexer()))
41 | print_formatted_text(PygmentsTokens(tokens), style=SharedUtil.getPygmentsStyle())
42 | config.print("```")
43 | config.print(config.divider)
44 |
45 | config.stopSpinning()
46 | if not config.runPython:
47 | return "[INVALID]"
48 | elif SharedUtil.confirmExecution(risk):
49 | config.print("Do you want to execute it? [y]es / [N]o")
50 | confirmation = HealthCheck.simplePrompt(style=promptStyle, default="y")
51 | if not confirmation.lower() in ("y", "yes"):
52 | config.runPython = False
53 | return "[INVALID]"
54 | return SharedUtil.executePythonCode(refinedCode)
55 |
56 | functionSignature = {
57 | "intent": [
58 | "access to device information",
59 | "execute a computing task or run a command",
60 | "generate code",
61 | ],
62 | "examples": [
63 | "What is my operating system",
64 | "Open media player",
65 | "Run python code",
66 | "Run system command",
67 | ],
68 | "name": "execute_python_code",
69 | "description": "Execute python code to resolve a computing task",
70 | "parameters": {
71 | "type": "object",
72 | "properties": {
73 | "code": {
74 | "type": "string",
75 | "description": "Python code that integrates any relevant packages to resolve my request",
76 | },
77 | "title": {
78 | "type": "string",
79 | "description": "title for the python code",
80 | },
81 | "risk": {
82 | "type": "string",
83 | "description": "Assess the risk level of damaging my device upon executing the task. e.g. file deletions or similar significant impacts are regarded as 'high' level.",
84 | "enum": ["high", "medium", "low"],
85 | },
86 | },
87 | "required": ["code", "title", "risk"],
88 | },
89 | }
90 |
91 | config.addFunctionCall(signature=functionSignature, method=execute_python_code)
92 |
93 |
94 | ### A dummy function to redirect q&a task about python, otherwise, it may be mistaken by execute_python_code
95 | def python_qa(_):
96 | return "[INVALID]"
97 | functionSignature = {
98 | "intent": [
99 | "answer a question that you have sufficient knowledge",
100 | ],
101 | "examples": [
102 | "How to use decorators in python",
103 | ],
104 | "name": "python_qa",
105 | "description": f'''Answer questions or provide information about python''',
106 | "parameters": {
107 | "type": "object",
108 | "properties": {
109 | "qa": {
110 | "type": "string",
111 | "description": "empty string ''",
112 | },
113 | },
114 | },
115 | }
116 | config.addFunctionCall(signature=functionSignature, method=python_qa)
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/plugins/execute termux command.py:
--------------------------------------------------------------------------------
1 |
2 | """
3 | LetMeDoIt AI Plugin - execute termux command
4 |
5 | execute termux command
6 |
7 | [FUNCTION_CALL]
8 | """
9 |
10 | from letmedoit import config
11 | from letmedoit.utils.shared_utils import SharedUtil
12 | from letmedoit.health_check import HealthCheck
13 | import textwrap, re, pygments, json, pydoc
14 | from pygments.lexers.shell import BashLexer
15 | from prompt_toolkit.formatted_text import PygmentsTokens
16 | from prompt_toolkit import print_formatted_text
17 | from prompt_toolkit.styles import Style
18 |
19 |
20 | if config.terminalEnableTermuxAPI:
21 | def execute_termux_command(function_args):
22 | # retrieve argument values from a dictionary
23 | risk = function_args.get("risk") # required
24 | title = function_args.get("title") # required
25 | #sharedText = function_args.get("message", "") # optional
26 | function_args = textwrap.dedent(function_args.get("code")).strip() # required
27 | sharedText = re.sub("^termux-share .*?'([^']+?)'$", r"\1", function_args)
28 | sharedText = re.sub('^termux-share .*?"([^"]+?)"$', r"\1", sharedText)
29 | sharedText = re.sub("""^[\d\D]*?subprocess.run\(\['termux-share'[^\[\]]*?'([^']+?)'\]\)[\d\D]*?$""", r"\1", sharedText)
30 | sharedText = re.sub('''^[\d\D]*?subprocess.run\(\["termux-share"[^\[\]]*?"([^']+?)"\]\)[\d\D]*?$''', r"\1", sharedText)
31 | function_args = function_args if sharedText == function_args else f'''termux-share -a send "{sharedText}"'''
32 |
33 | promptStyle = Style.from_dict({
34 | # User input (default text).
35 | "": config.terminalCommandEntryColor2,
36 | # Prompt.
37 | "indicator": config.terminalPromptIndicatorColor2,
38 | })
39 |
40 | # show Termux command for developer
41 | config.print(config.divider)
42 | config.print(f"Termux: {title}")
43 | SharedUtil.showRisk(risk)
44 | if config.developer or config.codeDisplay:
45 | config.print("```")
46 | #print(function_args)
47 | tokens = list(pygments.lex(function_args, lexer=BashLexer()))
48 | print_formatted_text(PygmentsTokens(tokens), style=SharedUtil.getPygmentsStyle())
49 | config.print("```")
50 | config.print(config.divider)
51 |
52 | config.stopSpinning()
53 | if SharedUtil.confirmExecution(risk):
54 | config.print("Do you want to execute it? [y]es / [N]o")
55 | confirmation = HealthCheck.simplePrompt(style=promptStyle, default="y")
56 | if not confirmation.lower() in ("y", "yes"):
57 | return "[INVALID]"
58 |
59 | try:
60 | if not sharedText == function_args:
61 | pydoc.pipepager(sharedText, cmd="termux-share -a send")
62 | function_response = "Done!"
63 | else:
64 | # display both output and error
65 | function_response = SharedUtil.runSystemCommand(function_args)
66 | config.print(function_response)
67 | except:
68 | SharedUtil.showErrors()
69 | config.print(config.divider)
70 | return "[INVALID]"
71 | info = {"information": function_response}
72 | function_response = json.dumps(info)
73 | return json.dumps(info)
74 |
75 | functionSignature = {
76 | "intent": [
77 | "access to device information",
78 | "execute a computing task or run a command",
79 | "generate code",
80 | ],
81 | "examples": [
82 | "Run Termux command",
83 | ],
84 | "name": "execute_termux_command",
85 | "description": "Execute Termux command on Android",
86 | "parameters": {
87 | "type": "object",
88 | "properties": {
89 | "code": {
90 | "type": "string",
91 | "description": "Termux command, e.g. am start -n com.android.chrome/com.google.android.apps.chrome.Main",
92 | },
93 | "title": {
94 | "type": "string",
95 | "description": "title for the termux command",
96 | },
97 | "risk": {
98 | "type": "string",
99 | "description": "Assess the risk level of damaging my device upon executing the task. e.g. file deletions or similar significant impacts are regarded as 'high' level.",
100 | "enum": ["high", "medium", "low"],
101 | },
102 | },
103 | "required": ["code", "title", "risk"],
104 | },
105 | }
106 |
107 | config.addFunctionCall(signature=functionSignature, method=execute_termux_command)
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/plugins/improve British English.py:
--------------------------------------------------------------------------------
1 | """
2 | LetMeDoIt AI Plugin - improve British English
3 |
4 | add context to help users to improve British English
5 | """
6 |
7 | from letmedoit import config
8 |
9 | config.predefinedContexts["British English Teacher"] = """You're here to be my friendly British English teacher. Your goal is to assist me in enhancing my language skills, whether it's improving my grammar, refining my speaking style, or selecting the appropriate words. You're dedicated to supporting my progress and together, we can collaborate on enhancing my British English. Let's engage in a conversation, in the style of British spoken English, and imagine that we're based in London. Feel free to initiate discussions about anything related to London or the UK. Remember, you want to chat with me like a real friend, so instead of just giving me lots of information, let's have a random talk and you'll help me improve my English as we chat. Please always strive to maintain an engaging and continuous conversation. [CHAT]"""
10 |
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/plugins/input suggestions.py:
--------------------------------------------------------------------------------
1 | """
2 | LetMeDoIt AI Plugin - input suggestions
3 |
4 | add input suggestions
5 | """
6 |
7 | from letmedoit import config
8 | import sys, os
9 |
10 | config.inputSuggestions += [
11 | "[CHAT] ",
12 | "[CHAT_chatgpt] ",
13 | "[CHAT_geminipro] ",
14 | "[CHAT_palm2] ",
15 | "[CHAT_codey] ",
16 | "[NO_FUNCTION_CALL] ",
17 | f"!{config.open} ",
18 | f"!{sys.executable} ",
19 | "open with default application: ",
20 | "open with file manager: ",
21 | "open with web browser: ",
22 | "read ",
23 | "search ",
24 | "analyze ",
25 | "tell me about ",
26 | "write a summary ",
27 | "explain ",
28 | "What does it mean? ",
29 | "Craft a prompt for ChatGPT that outlines the necessary steps it should take to complete the following task at hand:\n[CHAT]\n",
30 | f"Improve the following content according to {config.improvedWritingSytle}:\n[CHAT]\n",
31 | "Before you start, please ask me any questions you have about this so I can give you more context. Be extremely comprehensive.",
32 | ]
33 |
34 | config.inputSuggestions.append("""Translate Content. Assist me by acting as a translator. Once I have provided you with the content, you should inquire about the language I need it translated into. After I inform you of the desired language, proceed with the translation.
35 | [CHAT]
36 | Please translate the content below:
37 | """)
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/plugins/install python package.py:
--------------------------------------------------------------------------------
1 | """
2 | LetMeDoIt AI Plugin - install python package
3 |
4 | install python package into the environment that runs LetMeDoIt AI
5 |
6 | [FUNCTION_CALL]
7 | """
8 |
9 | from letmedoit import config
10 | from letmedoit.utils.install import installmodule
11 |
12 | # Function method
13 | def install_package(function_args):
14 | package = function_args.get("package") # required
15 | if package:
16 | config.stopSpinning()
17 | install = installmodule(f"--upgrade {package}")
18 | return "Installed!" if install else f"Failed to install '{package}'!"
19 | return ""
20 |
21 | # Function Signature
22 | functionSignature = {
23 | "intent": [
24 | "installation",
25 | ],
26 | "examples": [
27 | "Install package",
28 | ],
29 | "name": "install_package",
30 | "description": f'''Install python package''',
31 | "parameters": {
32 | "type": "object",
33 | "properties": {
34 | "package": {
35 | "type": "string",
36 | "description": "Package name",
37 | },
38 | },
39 | "required": ["package"],
40 | },
41 | }
42 |
43 | # Integrate the signature and method into LetMeDoIt AI
44 | config.addFunctionCall(signature=functionSignature, method=install_package)
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/plugins/integrate google searches.py:
--------------------------------------------------------------------------------
1 | """
2 | LetMeDoIt AI Plugin - integrate google searches
3 |
4 | Search internet for keywords when ChatGPT lacks information or when user ask about news or latest updates
5 |
6 | [FUNCTION_CALL]
7 | """
8 |
9 | from letmedoit import config
10 | import json, googlesearch
11 |
12 | # pip3 install googlesearch-python
13 | # Use google https://pypi.org/project/googlesearch-python/ to search internet for information, about which ChatGPT doesn't know.
14 |
15 | def integrate_google_searches(function_args):
16 | # retrieve argument values from a dictionary
17 | #print(function_args)
18 | keywords = function_args.get("keywords") # required
19 |
20 | config.print("Loading internet searches ...")
21 |
22 | info = {}
23 | for index, item in enumerate(googlesearch.search(keywords, advanced=True, num_results=config.maximumInternetSearchResults)):
24 | info[f"information {index}"] = {
25 | "title": item.title,
26 | "url": item.url,
27 | "description": item.description,
28 | }
29 |
30 | config.print("Loaded!\n")
31 |
32 | return json.dumps(info)
33 |
34 | functionSignature = {
35 | "intent": [
36 | "access to internet real-time information",
37 | ],
38 | "examples": [
39 | "Search internet",
40 | ],
41 | "name": "integrate_google_searches",
42 | "description": "Search internet for keywords when ChatGPT lacks information or when user ask about latest updates",
43 | "parameters": {
44 | "type": "object",
45 | "properties": {
46 | "keywords": {
47 | "type": "string",
48 | "description": "keywords for searches, e.g. ChatGPT",
49 | },
50 | },
51 | "required": ["keywords"],
52 | },
53 | }
54 |
55 | config.addFunctionCall(signature=functionSignature, method=integrate_google_searches)
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/plugins/manipulate files.py:
--------------------------------------------------------------------------------
1 | """
2 | LetMeDoIt AI Plugin - manipulate files
3 |
4 | Manipulate files, such as navigation, rename, removal, conversion, etc.
5 |
6 | This plugin is created to avoid conflicts with plugin "analyze files"
7 |
8 | [FUNCTION_CALL]
9 | """
10 |
11 | from letmedoit import config
12 | from letmedoit.utils.shared_utils import SharedUtil
13 | import re, os
14 |
15 | def manipulate_files(function_args):
16 | code = function_args.get("code") # required
17 | return SharedUtil.showAndExecutePythonCode(code)
18 |
19 | functionSignature = {
20 | "intent": [
21 | "change files",
22 | ],
23 | "examples": [
24 | "Edit test.txt",
25 | ],
26 | "name": "manipulate_files",
27 | "description": f'''Manipulate files, such as opening, launching, navigation, renaming, editing, removal, conversion, etc.''',
28 | "parameters": {
29 | "type": "object",
30 | "properties": {
31 | "code": {
32 | "type": "string",
33 | "description": "Python code that integrates any relevant packages to resolve my request",
34 | },
35 | },
36 | "required": ["code"],
37 | },
38 | }
39 |
40 | config.addFunctionCall(signature=functionSignature, method=manipulate_files)
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/plugins/memory.py:
--------------------------------------------------------------------------------
1 | """
2 | LetMeDoIt AI Plugin - memory
3 |
4 | Save and retrieve memory
5 |
6 | modified from source: https://medium.com/@simon_attard/building-a-memory-layer-for-gpt-using-function-calling-da17d66920d0
7 |
8 | [FUNCTION_CALL]
9 | """
10 |
11 | from letmedoit import config
12 | from letmedoit.health_check import HealthCheck
13 | from pathlib import Path
14 | from chromadb.config import Settings
15 | import uuid, os, chromadb, getpass, geocoder, datetime, json
16 |
17 | persistentConfigs = (
18 | ("memory_categories", ["general", "instruction", "fact", "event", "concept"]),
19 | )
20 | config.setConfig(persistentConfigs)
21 |
22 | memory_store = os.path.join(config.getLocalStorage(), "memory")
23 | Path(memory_store).mkdir(parents=True, exist_ok=True)
24 | chroma_client = chromadb.PersistentClient(memory_store, Settings(anonymized_telemetry=False))
25 |
26 | #import numpy as np
27 | #from numpy.linalg import norm
28 | #def cosine_similarity(A, B):
29 | # cosine = np.dot(A, B) / (norm(A) * norm(B))
30 | # return cosine
31 |
32 | def get_or_create_collection(collection_name):
33 | collection = chroma_client.get_or_create_collection(
34 | name=collection_name,
35 | metadata={"hnsw:space": "cosine"},
36 | embedding_function=HealthCheck.getEmbeddingFunction(),
37 | )
38 | return collection
39 |
40 | def add_vector(collection, text, metadata):
41 | id = str(uuid.uuid4())
42 | collection.add(
43 | documents = [text],
44 | metadatas = [metadata],
45 | ids = [id]
46 | )
47 |
48 | def save_memory(function_args):
49 | memory = function_args.get("memory") # required
50 | memory_title = function_args.get("title") # required
51 | memory_type = function_args.get("type") # required
52 | memory_tags = function_args.get("tags") # required
53 | if not isinstance(memory_tags, str):
54 | print(type(memory_tags), memory_tags)
55 | memory_tags = str(memory_tags)
56 | collection = get_or_create_collection("memories")
57 | g = geocoder.ip('me')
58 | metadata = {
59 | "timestamp": str(datetime.datetime.now()),
60 | "tags": memory_tags,
61 | "title": memory_title,
62 | "type": memory_type,
63 | "user": getpass.getuser(),
64 | "location": f"{g.city}, {g.state}, {g.country}",
65 | }
66 | if config.developer:
67 | config.print(config.divider)
68 | print(">>> saving memory: ")
69 | config.print(f"memory: {memory}")
70 | print(metadata)
71 | config.print(config.divider)
72 | add_vector(collection, memory, metadata)
73 | config.stopSpinning()
74 | return "I saved it in my memory!"
75 |
76 | def query_vectors(collection, query, n):
77 | #query_embedding = get_embedding(query)
78 | return collection.query(
79 | #query_embeddings = [query_embedding],
80 | query_texts=[query],
81 | n_results = n,
82 | )
83 |
84 | def retrieve_memory(function_args):
85 | query = function_args.get("query") # required
86 | collection = get_or_create_collection("memories")
87 | res = query_vectors(collection, query, config.memoryClosestMatches)
88 | if config.developer:
89 | config.print(config.divider)
90 | print(">>> retrieved memories: ")
91 | print(res["documents"])
92 | config.print(config.divider)
93 | info = {}
94 | for index, description in enumerate(res["documents"][0]):
95 | info[f"memory {index}"] = {
96 | "description": description,
97 | }
98 | config.stopSpinning()
99 | return json.dumps(info)
100 |
101 | functionSignature1 = {
102 | "intent": [
103 | "memory / record access",
104 | "arrange activities",
105 | ],
106 | "examples": [
107 | "Remember that",
108 | ],
109 | "name": "save_memory",
110 | "description": """Use this function if I mention something which you think would be useful in the future and should be saved as a memory. Saved memories will allow you to retrieve snippets of past conversations when needed.""",
111 | "parameters": {
112 | "type": "object",
113 | "properties": {
114 | "memory": {
115 | "type": "string",
116 | "description": "Full description of the memory to be saved. I would like you to help me with converting relative dates and times, if any, into exact dates and times based on the given current date and time.",
117 | },
118 | "title": {
119 | "type": "string",
120 | "description": "Title of the memory",
121 | },
122 | "type": {
123 | "type": "string",
124 | "description": "Type of the memory, return either 'general', 'instruction', 'fact', 'event', or 'concept'",
125 | "enum": config.memory_categories,
126 | },
127 | "tags": {
128 | "type": "string",
129 | "description": """Return a list of tags about the memory, e.g. '["work", "to_do", "follow_up"]'""",
130 | },
131 | },
132 | "required": ["memory", "title", "type", "tags"]
133 | }
134 | }
135 | functionSignature2 = {
136 | "intent": [
137 | "memory / record access",
138 | "arrange activities",
139 | ],
140 | "examples": [
141 | "Do you remember that",
142 | ],
143 | "name": "retrieve_memory",
144 | "description": """Use this function to query and retrieve memories of important conversation snippets that we had in the past. Use this function if the information you require is not in the current prompt or you need additional information to refresh your memory.""",
145 | "parameters": {
146 | "type": "object",
147 | "properties": {
148 | "query": {
149 | "type": "string",
150 | "description": "The query to be used to look up memories from a vector database"
151 | },
152 | },
153 | "required": ["query"]
154 | }
155 | }
156 |
157 | config.inputSuggestions += ["Remember, ", "Do you remember?"]
158 | config.addFunctionCall(signature=functionSignature1, method=save_memory)
159 | config.addFunctionCall(signature=functionSignature2, method=retrieve_memory)
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/plugins/modify images.py:
--------------------------------------------------------------------------------
1 | """
2 | LetMeDoIt AI Plugin - modify images
3 |
4 | modify the given images according to changes specified by users
5 |
6 | [FUNCTION_CALL]
7 | """
8 |
9 | from letmedoit import config
10 | import openai, os
11 | from openai import OpenAI
12 | from letmedoit.utils.shared_utils import SharedUtil
13 | from letmedoit.utils.shared_utils import check_openai_errors
14 | from letmedoit.utils.terminal_mode_dialogs import TerminalModeDialogs
15 | from pathlib import Path
16 | from base64 import b64decode
17 | from urllib.parse import quote
18 |
19 | def modify_images(function_args):
20 | changes = function_args.get("changes") # required
21 | files = function_args.get("files") # required
22 | #print(files)
23 | if isinstance(files, str):
24 | if not files.startswith("["):
25 | files = f'["{files}"]'
26 | files = eval(files)
27 | if not files:
28 | return "[INVALID]"
29 |
30 | filesCopy = files[:]
31 | for item in filesCopy:
32 | if os.path.isdir(item):
33 | for root, _, allfiles in os.walk(item):
34 | for file in allfiles:
35 | file_path = os.path.join(root, file)
36 | files.append(file_path)
37 | files.remove(item)
38 |
39 | for i in files:
40 | description, filename = get_description(i)
41 | if description:
42 | if changes:
43 | description = f"""Description of the original image:
44 | {description}
45 |
46 | Make the following changes:
47 | {changes}"""
48 | else:
49 | description = f"Image description:\n{description}"
50 | if config.developer:
51 | config.print(description)
52 | response = create_image(description, filename)
53 | if response == "[INVALID]" and len(files) == 1:
54 | return response
55 | return ""
56 |
57 | @check_openai_errors
58 | def get_description(filename):
59 | content = []
60 | # validate image path
61 | if SharedUtil.is_valid_image_url(filename):
62 | content.append({"type": "image_url", "image_url": {"url": filename,},})
63 | filename = quote(filename, safe="")
64 | elif SharedUtil.is_valid_image_file(filename):
65 | content.append({"type": "image_url", "image_url": SharedUtil.encode_image(filename),})
66 |
67 | if content:
68 | content.insert(0, {"type": "text", "text": "Describe this image in as much detail as possible, including color patterns, positions and orientations of all objects and backgrounds in the image",})
69 |
70 | response = OpenAI().chat.completions.create(
71 | model="gpt-4-vision-preview",
72 | messages=[
73 | {
74 | "role": "user",
75 | "content": content,
76 | }
77 | ],
78 | max_tokens=4096,
79 | )
80 | answer = response.choices[0].message.content
81 | #print(answer)
82 | return (answer, filename)
83 |
84 | return ("", "")
85 |
86 | @check_openai_errors
87 | def create_image(description, original_filename):
88 | basename = os.path.basename(original_filename)
89 | title = f"Modifying '{basename}' ..."
90 | dialogs = TerminalModeDialogs(None)
91 | # size selection
92 | options = ("1024x1024", "1024x1792", "1792x1024")
93 | size = dialogs.getValidOptions(
94 | options=options,
95 | title=title,
96 | default="1024x1024",
97 | text="Select size below:"
98 | )
99 | if not size:
100 | config.stopSpinning()
101 | return "[INVALID]"
102 | # quality selection
103 | options = ("standard", "hd")
104 | quality = dialogs.getValidOptions(
105 | options=options,
106 | title=title,
107 | default="hd",
108 | text="Select quality below:"
109 | )
110 | if not quality:
111 | config.stopSpinning()
112 | return "[INVALID]"
113 |
114 | # get responses
115 | #https://platform.openai.com/docs/guides/images/introduction
116 | response = OpenAI().images.generate(
117 | model="dall-e-3",
118 | prompt=f"I NEED to test how the tool works with extremely simple prompts. DO NOT add any detail, just use it AS-IS:\n{description}",
119 | size=size,
120 | quality=quality, # "hd" or "standard"
121 | response_format="b64_json",
122 | n=1,
123 | )
124 | # open image
125 | #imageUrl = response.data[0].url
126 | #jsonFile = os.path.join(config.letMeDoItAIFolder, "temp", "openai_image.json")
127 | #with open(jsonFile, mode="w", encoding="utf-8") as fileObj:
128 | # json.dump(response.data[0].b64_json, fileObj)
129 | image_data = b64decode(response.data[0].b64_json)
130 | imageFile = f"{original_filename}_modified.png"
131 | with open(imageFile, mode="wb") as pngObj:
132 | pngObj.write(image_data)
133 | config.stopSpinning()
134 | if config.terminalEnableTermuxAPI:
135 | SharedUtil.getCliOutput(f"termux-share {imageFile}")
136 | else:
137 | os.system(f"{config.open} {imageFile}")
138 |
139 | config.stopSpinning()
140 | return ""
141 |
142 | functionSignature = {
143 | "intent": [
144 | "change files",
145 | ],
146 | "examples": [
147 | "Modify image",
148 | ],
149 | "name": "modify_images",
150 | "description": "modify the images that I provide",
151 | "parameters": {
152 | "type": "object",
153 | "properties": {
154 | "changes": {
155 | "type": "string",
156 | "description": "The requested changes in as much detail as possible. Return an empty string '' if changes are not specified.",
157 | },
158 | "files": {
159 | "type": "string",
160 | "description": """Return a list of image paths, e.g. '["image1.png", "/tmp/image2.png"]'. Return '[]' if image path is not provided.""",
161 | },
162 | },
163 | "required": ["changes", "files"],
164 | },
165 | }
166 |
167 | config.addFunctionCall(signature=functionSignature, method=modify_images)
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/plugins/open web browser.py:
--------------------------------------------------------------------------------
1 | """
2 | LetMeDoIt AI Plugin - open web browser
3 |
4 | open a url with default web browser
5 |
6 | [FUNCTION_CALL]
7 | """
8 |
9 | from letmedoit import config
10 | from letmedoit.utils.shared_utils import SharedUtil
11 |
12 | # Function method
13 | def open_browser(function_args):
14 | url = function_args.get("url") # required
15 | if url:
16 | SharedUtil.openURL(url)
17 | return ""
18 |
19 | # Function Signature
20 | functionSignature = {
21 | "intent": [
22 | "access to internet real-time information",
23 | ],
24 | "examples": [
25 | "Open web browser",
26 | "Open website",
27 | "Open https://letmedoit.ai",
28 | ],
29 | "name": "open_browser",
30 | "description": f'''Open an url with default web browser''',
31 | "parameters": {
32 | "type": "object",
33 | "properties": {
34 | "url": {
35 | "type": "string",
36 | "description": "The url",
37 | },
38 | },
39 | "required": ["url"],
40 | },
41 | }
42 |
43 | # Integrate the signature and method into LetMeDoIt AI
44 | config.addFunctionCall(signature=functionSignature, method=open_browser)
45 | config.inputSuggestions.append("Open url: ")
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/plugins/pronounce words.py:
--------------------------------------------------------------------------------
1 | """
2 | LetMeDoIt AI Plugin - pronunce words
3 |
4 | pronunce words
5 |
6 | [FUNCTION_CALL]
7 | """
8 |
9 | try:
10 | from gtts import gTTS
11 | except:
12 | from letmedoit.utils.install import installmodule
13 | installmodule(f"--upgrade gTTS")
14 |
15 | from letmedoit import config
16 | from letmedoit.utils.tts_utils import TTSUtil
17 |
18 |
19 | from gtts import gTTS
20 |
21 | def pronunce_words(function_args):
22 | words = function_args.get("words") # required
23 | language = function_args.get("language") # required
24 | config.print("Loading speech feature ...")
25 | TTSUtil.play(words, language)
26 | return "Finished! Speech engine closed!"
27 |
28 | functionSignature = {
29 | "intent": [
30 | "interact with user",
31 | ],
32 | "examples": [
33 | "Pronunce",
34 | ],
35 | "name": "pronunce_words",
36 | "description": "pronounce words or sentences",
37 | "parameters": {
38 | "type": "object",
39 | "properties": {
40 | "words": {
41 | "type": "string",
42 | "description": "Words to be pronounced",
43 | },
44 | "language": {
45 | "type": "string",
46 | "description": "Language of the words",
47 | "enum": config.ttsLanguages,
48 | },
49 | },
50 | "required": ["words", "language"],
51 | },
52 | }
53 |
54 | config.addFunctionCall(signature=functionSignature, method=pronunce_words)
55 | config.inputSuggestions.append("pronunce ")
56 |
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/plugins/remove image background.py:
--------------------------------------------------------------------------------
1 | """
2 | LetMeDoIt AI Plugin - remove image background
3 |
4 | Remove image background
5 |
6 | [FUNCTION_CALL]
7 | """
8 |
9 | from letmedoit import config
10 | from letmedoit.utils.shared_utils import SharedUtil
11 | import os, json, rembg
12 |
13 |
14 | def remove_image_background(function_args):
15 | files = function_args.get("files") # required
16 | if isinstance(files, str):
17 | if not files.startswith("["):
18 | files = f'["{files}"]'
19 | files = eval(files)
20 | if not files:
21 | return "[INVALID]"
22 |
23 | filesCopy = files[:]
24 | for item in filesCopy:
25 | if os.path.isdir(item):
26 | for root, _, allfiles in os.walk(item):
27 | for file in allfiles:
28 | file_path = os.path.join(root, file)
29 | files.append(file_path)
30 | files.remove(item)
31 |
32 | for input_path in files:
33 | if SharedUtil.is_valid_image_file(input_path):
34 | output_path = f"{input_path}_no_bg.png"
35 | with open(input_path, 'rb') as i:
36 | with open(output_path, 'wb') as o:
37 | config.print3(f"Reading image file: {input_path}")
38 | img = rembg.remove(i.read())
39 | o.write(img)
40 | config.print3(f"File saved at: {output_path}")
41 | else:
42 | config.print2(f"'{input_path}' is not an image file!")
43 |
44 | def remove_image_background2(function_args):
45 | code = function_args.get("code") # required
46 | information = SharedUtil.showAndExecutePythonCode(code)
47 | if information:
48 | filepath = json.loads(information)["information"]
49 | if os.path.isfile(filepath):
50 | config.print3(f"File saved at: {filepath}")
51 | try:
52 | os.system(f'''{config.open} "{filepath}"''')
53 | except:
54 | pass
55 | return ""
56 |
57 | functionSignature = {
58 | "intent": [
59 | "change files",
60 | ],
61 | "examples": [
62 | "Remove image background",
63 | ],
64 | "name": "remove_image_background",
65 | "description": f'''Remove image background''',
66 | "parameters": {
67 | "type": "object",
68 | "properties": {
69 | "files": {
70 | "type": "string",
71 | "description": """Return a list of image paths, e.g. '["image1.png", "/tmp/image2.png"]'. Return '[]' if image path is not provided.""",
72 | },
73 | },
74 | "required": ["files"],
75 | },
76 | }
77 |
78 | config.addFunctionCall(signature=functionSignature, method=remove_image_background)
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/plugins/search financial data.py:
--------------------------------------------------------------------------------
1 | """
2 | LetMeDoIt AI Plugin - search financial data
3 |
4 | search financial data with yfinance
5 |
6 | [FUNCTION_CALL]
7 | """
8 |
9 | from letmedoit import config
10 | from letmedoit.utils.shared_utils import SharedUtil
11 | import json
12 |
13 | def search_finance(function_args):
14 | code = function_args.get("code") # required
15 | information = SharedUtil.showAndExecutePythonCode(code)
16 | if information:
17 | info = {
18 | "information": information,
19 | }
20 | return json.dumps(info)
21 | return ""
22 |
23 | functionSignature = {
24 | "intent": [
25 | "access to internet real-time information",
26 | ],
27 | "examples": [
28 | "Check stock price",
29 | ],
30 | "name": "search_finance",
31 | "description": f'''Search or analyze financial data. Use this function ONLY WHEN package yfinance is useful to resolve my request''',
32 | "parameters": {
33 | "type": "object",
34 | "properties": {
35 | "code": {
36 | "type": "string",
37 | "description": "Python code that integrates package yfinance to resolve my request. Integrate package matplotlib to visualize data, if applicable.",
38 | },
39 | },
40 | "required": ["code"],
41 | },
42 | }
43 |
44 | config.addFunctionCall(signature=functionSignature, method=search_finance)
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/plugins/search latest news.py:
--------------------------------------------------------------------------------
1 | """
2 | LetMeDoIt AI Plugin - search latest news
3 |
4 | search latest news
5 |
6 | [FUNCTION_CALL]
7 | """
8 |
9 | from letmedoit import config
10 | import feedparser, re
11 |
12 | # Function method to get the latest news from a specific RSS feed
13 | def search_latest_news(function_args: dict) -> str:
14 | keywords = function_args.get("keywords").replace(" ", "+")
15 | feed_url = f"https://news.google.com/rss/search?q={keywords}&hl=en-US&gl=US&ceid=US:en"
16 | feed = feedparser.parse(feed_url)
17 |
18 | # Print the title and link of each news item
19 | config.stopSpinning()
20 | config.print2(config.divider)
21 | for index, entry in enumerate(feed.entries):
22 | if index < 10:
23 | if not index == 0:
24 | config.print2(config.divider)
25 | # title
26 | title = re.sub("<[^<>]*?>", "", entry.title)
27 | config.print3(f"Title: {title}")
28 | # link
29 | link = re.sub("<[^<>]*?>", "", entry.link)
30 | config.print3(f"Link: {link}")
31 | config.print2(config.divider)
32 | return ""
33 |
34 | # Function signature to work with ChatGPT function calling
35 | functionSignature = {
36 | "intent": [
37 | "access to internet real-time information",
38 | ],
39 | "examples": [
40 | "Tell me latest news about",
41 | ],
42 | "name": "search_latest_news",
43 | "description": "Search the latest news with given keywords",
44 | "parameters": {
45 | "type": "object",
46 | "properties": {
47 | "keywords": {
48 | "type": "string",
49 | "description": "The keywords for searching the latest news, delimited by plus sign '+'. For example, return 'London+UK' if keywords are 'London' and 'UK'.",
50 | },
51 | },
52 | "required": ["keywords"],
53 | },
54 | }
55 |
56 | # The following line integrate the function method and signature into LetMeDoIt AI
57 | config.addFunctionCall(signature=functionSignature, method=search_latest_news)
58 |
59 | # The following line is optional. It adds an input suggestion to LetMeDoIt AI user input prompt
60 | config.inputSuggestions.append("Tell me the latest news about ")
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/plugins/search sqlite.py:
--------------------------------------------------------------------------------
1 | """
2 | LetMeDoIt AI Plugin - ask sqlite
3 |
4 | Ask SQLite file. To retrieve information from or make changes in a sqlite file, e.g. fetch data, update records, etc.
5 |
6 | [FUNCTION_CALL]
7 | """
8 |
9 | from letmedoit import config
10 | from letmedoit.utils.shared_utils import SharedUtil
11 | import os, sqlite3, json, pprint
12 |
13 | def search_sqlite(function_args):
14 | db = function_args.get("path") # required
15 | request = function_args.get("request") # required
16 | if not os.path.isfile(db):
17 | return "[INVALID]"
18 | try:
19 | info = {}
20 | config.print2("Reading table information ...")
21 | with sqlite3.connect(db) as conn:
22 | cursor = conn.cursor()
23 | cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
24 | tables = cursor.fetchall()
25 | for table in tables:
26 | table_name = table[0]
27 | cursor.execute(f"PRAGMA table_info({table_name})")
28 | rows = cursor.fetchall()
29 | columns = [i[1] for i in rows]
30 | #cursor.execute(f"SELECT * FROM {table_name} LIMIT 1")
31 | #example = cursor.fetchone()
32 | info[table_name] = {
33 | "name": table_name,
34 | "schema": rows,
35 | "columns labels": columns,
36 | }
37 | """if example:
38 | info[table_name] = {
39 | "table name": table_name,
40 | "table schema": rows,
41 | "data row example": dict(zip(columns, example)),
42 | }
43 | else:
44 | info[table_name] = {
45 | "table name": table_name,
46 | "table schema": rows,
47 | }"""
48 | #if config.developer:
49 | # config.print2("# Table information")
50 | # pprint.pprint(info)
51 | info = json.dumps(info)
52 |
53 | if "describe tables" in request.lower():
54 | return info
55 |
56 | userInput = f"""Connect this sqlite file: sqlite file: {db}
57 |
58 | And run python code to resolve my request: {request}
59 |
60 | Please consider individual table information below for code generation:
61 | {info}"""
62 | _, function_call_response = SharedUtil.getSingleFunctionResponse(userInput, [config.chatGPTApiFunctionSignatures["execute_python_code"]], "execute_python_code")
63 | return function_call_response
64 | except:
65 | SharedUtil.showErrors()
66 | return "[INVALID]"
67 |
68 | functionSignature = {
69 | "intent": [
70 | "database access",
71 | "analyze files",
72 | ],
73 | "examples": [
74 | "Connect to SQLite file",
75 | "Search SQLite file",
76 | ],
77 | "name": "search_sqlite",
78 | "description": f'''Ask SQLite file. To retrieve information from or make changes in a sqlite file, e.g. fetch data, update records, etc. Remember, use this function ONLY IF I provide you with a sqlite file path.''',
79 | "parameters": {
80 | "type": "object",
81 | "properties": {
82 | "path": {
83 | "type": "string",
84 | "description": "File path of the sqlite file",
85 | },
86 | "request": {
87 | "type": "string",
88 | "description": "The request about fetching data or making changes in the sqlite file, including all available supplementary information in detail, if any. If there is no specific request apart from connection or query about table schema / information, return 'Describe tables' without extra comment or information.",
89 | },
90 | },
91 | "required": ["code", "request"],
92 | },
93 | }
94 |
95 | config.addFunctionCall(signature=functionSignature, method=search_sqlite)
96 |
97 | config.inputSuggestions.append("""Connect the following SQLite file and tell me about the tables that it contains:
98 | [CALL_search_sqlite]
99 | \n""")
100 |
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/plugins/search weather info.py:
--------------------------------------------------------------------------------
1 | """
2 | LetMeDoIt AI Plugin - search weather info
3 |
4 | search for weather information
5 |
6 | [FUNCTION_CALL]
7 | """
8 |
9 | if not config.openweathermapApi:
10 | config.changeOpenweathermapApi()
11 |
12 | if config.openweathermapApi:
13 | from letmedoit import config
14 | from letmedoit.utils.shared_utils import SharedUtil
15 | import json
16 |
17 | def search_weather_info(function_args):
18 | code = function_args.get("code") # required
19 | information = SharedUtil.showAndExecutePythonCode(code)
20 | if information:
21 | return json.loads(information)["information"]
22 | return "Not found!"
23 |
24 | functionSignature = {
25 | "intent": [
26 | "access to internet real-time information",
27 | ],
28 | "examples": [
29 | "What's the current weather",
30 | ],
31 | "name": "search_weather_info",
32 | "description": f'''Answer query about weather''',
33 | "parameters": {
34 | "type": "object",
35 | "properties": {
36 | "code": {
37 | "type": "string",
38 | "description": f"""Python code that use my OpenWeatherMap API key '{config.openweathermapApi}' to resolve my request.
39 | To work with OpenWeatherMap API key, you may integrate python package geocoder in the code to find the required Latitude and Longitude.
40 | In the last line of your code, use 'print' function to print the requested information, without additional description or comment.""",
41 | },
42 | },
43 | "required": ["code"],
44 | },
45 | }
46 |
47 | config.addFunctionCall(signature=functionSignature, method=search_weather_info)
48 | else:
49 | config.print("To use plugin 'search weather info', you need to set up an OpenWeatherMap API key first.")
50 | config.print3("Read: https://github.com/eliranwong/letmedoit/wiki/OpenWeatherMap-API-Setup")
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/plugins/send email.py:
--------------------------------------------------------------------------------
1 | """
2 | LetMeDoIt AI Plugin - send emails
3 |
4 | send google or outlook emails
5 |
6 | [FUNCTION_CALL]
7 | """
8 |
9 | from letmedoit import config
10 | from letmedoit.utils.shared_utils import SharedUtil
11 | import urllib.parse
12 |
13 | """
14 | # Information
15 |
16 | To send an email using a single URL via Google Mail, you can use the following format:
17 |
18 | https://mail.google.com/mail/?view=cm&fs=1&to=Recipient&subject=Subject&body=Body
19 |
20 | You need to replace the parameters with the values you want, such as:
21 |
22 | Recipient: The email address of the person you want to send the email to (URL encoded format).
23 | Subject: The subject line of the email (URL encoded format).
24 | Body: The content of the email (URL encoded format).
25 | For example, if you want to send an email with the following details:
26 |
27 | Recipient: john.doe@example.com
28 | Subject: Hello
29 | Body: How are you?
30 | You can use this URL:
31 |
32 | https://mail.google.com/mail/?view=cm&fs=1&to=john.doe%40example.com&su=Hello&body=How%20are%20you%3F
33 |
34 | When you click on this URL, it will open a new window in Google Mail and fill in the email details for you. You can then send or edit the email as you wish.
35 | """
36 |
37 | """
38 | To send an email using a single URL via Microsoft Outlook web version, you can use the following format:
39 |
40 | https://outlook.office.com/owa/?path=/mail/action/compose
41 | &to=Recipient
42 | &su=Subject
43 | &body=Body
44 |
45 | You need to replace the parameters with the values you want, such as:
46 |
47 | Recipient: The email address of the person you want to send the email to (URL encoded format).
48 | Subject: The subject line of the email (URL encoded format).
49 | Body: The content of the email (URL encoded format).
50 | For example, if you want to send an email with the following details:
51 |
52 | Recipient: john.doe@example.com
53 | Subject: Hello
54 | Body: How are you?
55 | You can use this URL:
56 |
57 | https://outlook.office.com/owa/?path=/mail/action/compose&to=john.doe%40example.com&subject=Hello&body=How%20are%20you%3F
58 |
59 | When you click on this URL, it will open a new window in Outlook web app and fill in the email details for you. You can then send or edit the email as you wish.
60 | """
61 |
62 | def send_email(function_args):
63 | email = function_args.get("email") # required
64 | recipient = function_args.get("recipient", "") # optional
65 | subject = function_args.get("subject") # required
66 | body = function_args.get("body", "") # optional
67 |
68 | subject = urllib.parse.quote(subject)
69 | body = urllib.parse.quote(body)
70 |
71 | def getGoogleLink():
72 | link = "https://mail.google.com/mail/?view=cm&fs=1"
73 | if recipient:
74 | link += f"&to={recipient}"
75 | if subject:
76 | link += f"&su={subject}"
77 | if body:
78 | link += f"&body={body}"
79 | return link
80 |
81 | def getOutlookLink():
82 | link = "https://outlook.office.com/owa/?path=/mail/action/compose"
83 | if recipient:
84 | link += f"&to={recipient}"
85 | if subject:
86 | link += f"&subject={subject}"
87 | if body:
88 | link += f"&body={body}"
89 | return link
90 |
91 | SharedUtil.openURL(getOutlookLink() if email == "outlook" else getGoogleLink())
92 |
93 | return "Done!"
94 |
95 | functionSignature = {
96 | "intent": [
97 | "arrange activities",
98 | "access to internet real-time information",
99 | ],
100 | "examples": [
101 | "Send email",
102 | ],
103 | "name": "send_email",
104 | "description": "send email",
105 | "parameters": {
106 | "type": "object",
107 | "properties": {
108 | "email": {
109 | "type": "string",
110 | "description": "The email application. Return 'gmail' if not given.",
111 | "enum": ['gmail', 'outlook'],
112 | },
113 | "recipient": {
114 | "type": "string",
115 | "description": "The recipient of the email.",
116 | },
117 | "subject": {
118 | "type": "string",
119 | "description": "Give a title to the email.",
120 | },
121 | "body": {
122 | "type": "string",
123 | "description": "The body or content of the email.",
124 | },
125 | },
126 | "required": ["email", "subject"],
127 | },
128 | }
129 |
130 | config.addFunctionCall(signature=functionSignature, method=send_email)
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/plugins/send tweet.py:
--------------------------------------------------------------------------------
1 | """
2 | LetMeDoIt AI Plugin - send a tweet to twitter
3 |
4 | send a tweet to twitter
5 |
6 | [FUNCTION_CALL]
7 | """
8 |
9 | from letmedoit import config
10 | from letmedoit.utils.shared_utils import SharedUtil
11 | import urllib.parse
12 |
13 | def send_tweet(function_args):
14 | message = function_args.get("message") # required
15 | config.stopSpinning()
16 | if message:
17 | SharedUtil.openURL(f"""https://twitter.com/intent/tweet?text={urllib.parse.quote(message)}""")
18 | return ""
19 |
20 | functionSignature = {
21 | "intent": [
22 | "social media",
23 | "access to internet real-time information",
24 | ],
25 | "examples": [
26 | "Send a tweet",
27 | ],
28 | "name": "send_tweet",
29 | "description": f'''Send a tweet to twitter''',
30 | "parameters": {
31 | "type": "object",
32 | "properties": {
33 | "message": {
34 | "type": "string",
35 | "description": "The message that is to be sent to twitter",
36 | },
37 | },
38 | "required": ["message"],
39 | },
40 | }
41 |
42 | config.addFunctionCall(signature=functionSignature, method=send_tweet)
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/plugins/send whatsapp messages.py:
--------------------------------------------------------------------------------
1 | """
2 | LetMeDoIt AI Plugin - send whatsapp messages
3 |
4 | send whatsapp messages
5 |
6 | [FUNCTION_CALL]
7 | """
8 |
9 | from letmedoit import config
10 | import re, pywhatkit
11 |
12 | def send_whatsapp(function_args):
13 | recipient = function_args.get("recipient") # required
14 | message = function_args.get("message") # required
15 | config.stopSpinning()
16 | if re.search("^[\+\(\)0-9]+?$", recipient):
17 | pywhatkit.sendwhatmsg_instantly(recipient, message)
18 | else:
19 | pywhatkit.sendwhatmsg_to_group_instantly(recipient, message)
20 | return "Done!"
21 |
22 | functionSignature = {
23 | "intent": [
24 | "arrange activities",
25 | "access to internet real-time information",
26 | ],
27 | "examples": [
28 | "Send WhatsApp",
29 | ],
30 | "name": "send_whatsapp",
31 | "description": f'''Send WhatsApp messages''',
32 | "parameters": {
33 | "type": "object",
34 | "properties": {
35 | "recipient": {
36 | "type": "string",
37 | "description": "Recipient's phone number or group name. Phone number is preferred. Figure out the group name only if phone number is not provided.",
38 | },
39 | "message": {
40 | "type": "string",
41 | "description": "The message that is to be sent to the recipient",
42 | },
43 | },
44 | "required": ["recipient", "message"],
45 | },
46 | }
47 |
48 | config.addFunctionCall(signature=functionSignature, method=send_whatsapp)
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/plugins/simplified Chinese to traditional Chinese.py:
--------------------------------------------------------------------------------
1 | """
2 | LetMeDoIt AI Plugin - convert simplified Chinese into traditional Chinese
3 |
4 | Convert simplified Chinese into traditional Chinese in text output
5 | """
6 |
7 | try:
8 | from opencc import OpenCC
9 | except:
10 | from letmedoit.utils.install import installmodule
11 | installmodule(f"--upgrade opencc")
12 |
13 | from letmedoit import config
14 | from opencc import OpenCC
15 |
16 | def convertToTraditionalChinese(text):
17 | if text:
18 | return OpenCC('s2t').convert(text)
19 | else:
20 | return text
21 |
22 | config.outputTransformers.append(convertToTraditionalChinese)
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/plugins/solve math problems.py:
--------------------------------------------------------------------------------
1 | """
2 | LetMeDoIt AI Plugin - solve math problems
3 |
4 | solve math problems with integrated "AutoGen Math Solver"
5 |
6 | [FUNCTION_CALL]
7 | """
8 |
9 |
10 | from letmedoit import config
11 | from letmedoit.automath import AutoGenMath
12 |
13 | def solve_math(function_args):
14 | query = function_args.get("query") # required
15 | config.stopSpinning()
16 | config.print2("AutoGen Math Solver launched!")
17 | last_message = AutoGenMath().getResponse(query)
18 | config.currentMessages += last_message
19 | config.print2("AutoGen Math Solver closed!")
20 | return ""
21 |
22 | functionSignature = {
23 | "intent": [
24 | "resolve a math question",
25 | ],
26 | "examples": [
27 | "Calculate",
28 | ],
29 | "name": "solve_math",
30 | "description": "solve math problems",
31 | "parameters": {
32 | "type": "object",
33 | "properties": {
34 | "query": {
35 | "type": "string",
36 | "description": "Math problem in detail",
37 | },
38 | },
39 | "required": ["query"],
40 | },
41 | }
42 |
43 | config.addFunctionCall(signature=functionSignature, method=solve_math)
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/qt.py:
--------------------------------------------------------------------------------
1 | import os
2 | thisFile = os.path.realpath(__file__)
3 | packageFolder = os.path.dirname(thisFile)
4 | package = os.path.basename(packageFolder)
5 | if os.getcwd() != packageFolder:
6 | os.chdir(packageFolder)
7 |
8 | from letmedoit import config
9 | config.isTermux = True if os.path.isdir("/data/data/com.termux/files/home") else False
10 | config.letMeDoItAIFolder = packageFolder
11 | apps = {
12 | "myhand": ("MyHand", "MyHand Bot"),
13 | "letmedoit": ("LetMeDoIt", "LetMeDoIt AI"),
14 | "taskwiz": ("TaskWiz", "TaskWiz AI"),
15 | "cybertask": ("CyberTask", "CyberTask AI"),
16 | }
17 | if not hasattr(config, "letMeDoItName") or not config.letMeDoItName:
18 | config.letMeDoItName = apps[package][-1] if package in apps else "LetMeDoIt AI"
19 | from letmedoit.utils.config_tools import setConfig
20 | config.setConfig = setConfig
21 | ## alternative to include config restoration method
22 | #from letmedoit.utils.config_tools import *
23 | from letmedoit.utils.shared_utils import SharedUtil
24 | config.includeIpInSystemMessageTemp = True
25 | config.getLocalStorage = SharedUtil.getLocalStorage
26 | config.print = config.print2 = config.print3 = print
27 | config.addFunctionCall = SharedUtil.addFunctionCall
28 | config.divider = "--------------------"
29 | SharedUtil.setOsOpenCmd()
30 |
31 | import sys
32 | from letmedoit.gui.chatgui import ChatGui
33 | from PySide6.QtWidgets import QApplication
34 |
35 | def main():
36 | app = QApplication(sys.argv)
37 | ChatGui(standalone=True).show()
38 | sys.exit(app.exec())
39 |
40 | if __name__ == "__main__":
41 | main()
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/requirements.txt:
--------------------------------------------------------------------------------
1 | openai==1.19.0
2 | groq==0.5.0
3 | requests
4 | argparse
5 | pendulum
6 | folium
7 | seaborn[stats]
8 | sympy
9 | numpy
10 | prompt_toolkit
11 | Pygments
12 | datetime
13 | netifaces
14 | geocoder
15 | googlesearch-python
16 | art
17 | apsw
18 | gTTS
19 | google-cloud-speech
20 | google-cloud-texttospeech
21 | google-cloud-aiplatform==1.47.0
22 | pywhatkit
23 | yt-dlp
24 | rembg
25 | qrcode
26 | pyperclip
27 | colorama
28 | pillow
29 | docker
30 | einops
31 | transformers==4.40.1
32 | torch==2.2.2
33 | torchvision==0.17.2
34 | sentence-transformers
35 | chromadb==0.4.24
36 | unstructured[all-docs]
37 | pyautogen[retrievechat,autobuild]==0.2.24
38 | autogenstudio==0.0.56
39 | tiktoken
40 | pygame
41 | PySide6
42 | feedparser
43 | html2text
44 | pypdf
45 | PyMuPDF
46 | yfinance
47 | setuptools-rust
48 | SpeechRecognition
49 | openai-whisper
50 | soundfile==0.12.1
51 | sounddevice==0.4.6
52 | elevenlabs==1.0.3
53 | ollama==0.1.8
54 | llama-cpp-python[server]==0.2.61
55 | huggingface-hub
56 | langchain==0.1.13
57 | langchain_openai==0.1.3
58 | pydub
59 | stable-diffusion-cpp-python
60 | pytz
61 | geopy
62 | guidance==0.1.13
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/temp/Readme.md:
--------------------------------------------------------------------------------
1 | temporary materials are located here
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/utils/config_tools.py:
--------------------------------------------------------------------------------
1 | from letmedoit import config
2 | import pprint, re, os, shutil
3 | from letmedoit.utils.config_essential import defaultSettings
4 | from letmedoit.utils.shared_utils import SharedUtil
5 | from prompt_toolkit.shortcuts import yes_no_dialog
6 |
7 | def loadConfig(configPath):
8 | with open(configPath, "r", encoding="utf-8") as fileObj:
9 | configs = fileObj.read()
10 | configs = "from letmedoit import config\n" + re.sub("^([A-Za-z])", r"config.\1", configs, flags=re.M)
11 | exec(configs, globals())
12 | config.loadConfig = loadConfig
13 |
14 | def setConfig(defaultSettings, thisTranslation={}, temporary=False):
15 | for key, value in defaultSettings:
16 | if not hasattr(config, key):
17 | value = pprint.pformat(value)
18 | exec(f"""config.{key} = {value} """)
19 | if temporary:
20 | config.excludeConfigList.append(key)
21 | if thisTranslation:
22 | for i in thisTranslation:
23 | if not i in config.thisTranslation:
24 | config.thisTranslation[i] = thisTranslation[i]
25 | config.setConfig = setConfig
26 |
27 | storageDir = SharedUtil.getLocalStorage()
28 | if os.path.isdir(storageDir):
29 | configFile = os.path.join(config.letMeDoItAIFolder, "config.py")
30 | if os.path.getsize(configFile) == 0:
31 | # It means that it is either a newly installed copy or an upgraded copy
32 |
33 | # delete old shortcut files so that newer versions of shortcuts can be created
34 | appName = config.letMeDoItName.split()[0]
35 | shortcutFiles = (f"{appName}.bat", f"{appName}.command", f"{appName}.desktop", f"{appName}Tray.bat", f"{appName}Tray.command", f"{appName}Tray.desktop")
36 | for shortcutFile in shortcutFiles:
37 | shortcut = os.path.join(config.letMeDoItAIFolder, shortcutFile)
38 | if os.path.isfile(shortcut):
39 | os.remove(shortcut)
40 | # delete system tray shortcuts
41 | shortcut_dir = os.path.join(config.letMeDoItAIFolder, "shortcuts")
42 | shutil.rmtree(shortcut_dir, ignore_errors=True)
43 |
44 | # check if config backup is available
45 | backupFile = os.path.join(storageDir, "config_backup.py")
46 | if os.path.isfile(backupFile):
47 | restore_backup = yes_no_dialog(
48 | title="Configuration Backup Found",
49 | text=f"Do you want to use the following backup?\n{backupFile}"
50 | ).run()
51 | if restore_backup:
52 | try:
53 | loadConfig(backupFile)
54 | shutil.copy(backupFile, configFile)
55 | print("Configuration backup restored!")
56 | #config.restartApp()
57 | except:
58 | print("Failed to restore backup!")
59 | setConfig(defaultSettings)
60 | # Google Credentials
61 | # set required file
62 |
63 | config.google_cloud_credentials_file = os.path.join(storageDir, "credentials_google_cloud.json")
64 | if config.google_cloud_credentials and os.path.isfile(config.google_cloud_credentials):
65 | os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = config.google_cloud_credentials
66 | else:
67 | gccfile2 = os.path.join(storageDir, "credentials_googleaistudio.json")
68 | gccfile3 = os.path.join(storageDir, "credentials_googletts.json")
69 |
70 | if os.path.isfile(config.google_cloud_credentials_file):
71 | config.google_cloud_credentials = config.google_cloud_credentials_file
72 | elif os.path.isfile(gccfile2):
73 | config.google_cloud_credentials = gccfile2
74 | elif os.path.isfile(gccfile3):
75 | config.google_cloud_credentials = gccfile3
76 | else:
77 | config.google_cloud_credentials = ""
78 | os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = config.google_cloud_credentials if config.google_cloud_credentials else ""
79 |
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/utils/file_utils.py:
--------------------------------------------------------------------------------
1 | import os, glob
2 |
3 |
4 | class FileUtil:
5 |
6 | @staticmethod
7 | # Note: pathlib.Path(file).stem does not work with file name containg more than one dot, e.g. "*.db.sqlite"
8 | def fileNamesWithoutExtension(dir, ext):
9 | files = glob.glob(os.path.join(dir, "*.{0}".format(ext)))
10 | return sorted([file[len(dir)+1:-(len(ext)+1)] for file in files if os.path.isfile(file)])
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/utils/install.py:
--------------------------------------------------------------------------------
1 | import subprocess, re, sys
2 |
3 | def installmodule(module, update=True):
4 | #executablePath = os.path.dirname(sys.executable)
5 | #pippath = os.path.join(executablePath, "pip")
6 | #pip = pippath if os.path.isfile(pippath) else "pip"
7 | #pip3path = os.path.join(executablePath, "pip3")
8 | #pip3 = pip3path if os.path.isfile(pip3path) else "pip3"
9 |
10 | isInstalled, _ = subprocess.Popen("pip -V", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
11 | pipInstallCommand = f"{sys.executable} -m pip install"
12 |
13 | if isInstalled:
14 |
15 | if update:
16 | from letmedoit import config
17 | if not config.pipIsUpdated:
18 | pipFailedUpdated = "pip tool failed to be updated!"
19 | try:
20 | # Update pip tool in case it is too old
21 | updatePip = subprocess.Popen(f"{pipInstallCommand} --upgrade pip", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
22 | *_, stderr = updatePip.communicate()
23 | if not stderr:
24 | print("pip tool updated!")
25 | else:
26 | print(pipFailedUpdated)
27 | except:
28 | print(pipFailedUpdated)
29 | config.pipIsUpdated = True
30 | try:
31 | upgrade = (module.startswith("-U ") or module.startswith("--upgrade "))
32 | if upgrade:
33 | moduleName = re.sub("^[^ ]+? (.+?)$", r"\1", module)
34 | else:
35 | moduleName = module
36 | print(f"{'Upgrading' if upgrade else 'Installing'} '{moduleName}' ...")
37 | installNewModule = subprocess.Popen(f"{pipInstallCommand} {module}", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
38 | *_, stderr = installNewModule.communicate()
39 | if not stderr:
40 | print(f"Package '{moduleName}' {'upgraded' if upgrade else 'installed'}!")
41 | else:
42 | print(f"Failed {'upgrading' if upgrade else 'installing'} package '{moduleName}'!")
43 | if config.developer:
44 | print(stderr)
45 | return True
46 | except:
47 | return False
48 |
49 | else:
50 |
51 | print("pip command is not found!")
52 | return False
53 |
54 |
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/utils/ollama_models.py:
--------------------------------------------------------------------------------
1 | ollama_models = (
2 | "gemma",
3 | "gemma:2b",
4 | "gemma:7b",
5 | "llama2",
6 | "llama2:7b",
7 | "llama2:13b",
8 | "llama2:70b",
9 | "mistral",
10 | "mistral:7b",
11 | "mixtral",
12 | "mixtral:8x7b",
13 | "llava",
14 | "llava:7b",
15 | "llava:13b",
16 | "llava:34b",
17 | "neural-chat",
18 | "codellama",
19 | "codellama:7b",
20 | "codellama:13b",
21 | "codellama:34b",
22 | "codellama:70b",
23 | "codellama:7b-python",
24 | "codellama:13b-python",
25 | "codellama:34b-python",
26 | "codellama:70b-python",
27 | "dolphin-mixtral",
28 | "dolphin-mixtral:8x7b",
29 | "mistral-openorca",
30 | "mistral-openorca:7b",
31 | "llama2-uncensored",
32 | "llama2-uncensored:7b",
33 | "llama2-uncensored:70b",
34 | "orca-mini",
35 | "orca-mini:3b",
36 | "orca-mini:7b",
37 | "orca-mini:13b",
38 | "orca-mini:70b",
39 | "phi",
40 | "phi:2.7b",
41 | "deepseek-coder",
42 | "deepseek-coder:1.3b",
43 | "deepseek-coder:6.7b",
44 | "deepseek-coder:33b",
45 | "dolphin-mistral",
46 | "dolphin-mistral:7b",
47 | "vicuna",
48 | "vicuna:7b",
49 | "vicuna:13b",
50 | "vicuna:33b",
51 | "wizard-vicuna-uncensored",
52 | "wizard-vicuna-uncensored:7b",
53 | "wizard-vicuna-uncensored:13b",
54 | "wizard-vicuna-uncensored:30b",
55 | "zephyr",
56 | "openhermes",
57 | "wizardcoder",
58 | "wizardcoder:33b",
59 | "wizardcoder:python",
60 | "wizardcoder:7b-python",
61 | "wizardcoder:13b-python",
62 | "wizardcoder:34b-python",
63 | "phind-codellama",
64 | "llama2-chinese",
65 | "llama2-chinese:7b",
66 | "llama2-chinese:13b",
67 | "tinyllama",
68 | "openchat",
69 | "qwen",
70 | "qwen:0.5b",
71 | "qwen:1.8b",
72 | "qwen:4b",
73 | "qwen:7b",
74 | "qwen:14b",
75 | "qwen:72b",
76 | "orca2",
77 | "orca2:7b",
78 | "orca2:13b",
79 | "falcon",
80 | "falcon:7b",
81 | "falcon:40b",
82 | "falcon:180b",
83 | "wizard-math",
84 | "wizard-math:7b",
85 | "wizard-math:13b",
86 | "wizard-math:70b",
87 | "nous-hermes",
88 | "nous-hermes:7b",
89 | "nous-hermes:13b",
90 | "nous-hermes:70b",
91 | "dolphin-phi",
92 | "starling-lm",
93 | "starling-lm:7b",
94 | "codeup",
95 | "starcoder",
96 | "starcoder:1b",
97 | "starcoder:3b",
98 | "starcoder:7b",
99 | "starcoder:15b",
100 | "medllama2",
101 | "yi",
102 | "yi:6b",
103 | "yi:34b",
104 | "wizardlm-uncensored",
105 | "everythinglm",
106 | "bakllava",
107 | "stable-code",
108 | "stable-beluga",
109 | "stable-beluga:7b",
110 | "stable-beluga:13b",
111 | "stable-beluga:70b",
112 | "solar",
113 | "sqlcoder",
114 | "sqlcoder:7b",
115 | "sqlcoder:15b",
116 | "sqlcoder:70b",
117 | "tinydolphin",
118 | "yarn-mistral",
119 | "nous-hermes2-mixtral",
120 | "nous-hermes2-mixtral:dpo",
121 | "nous-hermes2-mixtral:8x7b",
122 | "samantha-mistral",
123 | "stablelm-zephyr",
124 | "wizard-vicuna",
125 | "meditron",
126 | "meditron:7b",
127 | "meditron:70b",
128 | "magicoder",
129 | "yarn-llama2",
130 | "yarn-llama2:7b",
131 | "yarn-llama2:13b",
132 | "stablelm2",
133 | "nous-hermes2",
134 | "nous-hermes2:10.7b",
135 | "nous-hermes2:34b",
136 | "deepseek-llm",
137 | "deepseek-llm:7b",
138 | "deepseek-llm:67b",
139 | "open-orca-platypus2",
140 | "llama-pro",
141 | "codebooga",
142 | "nexusraven",
143 | "mistrallite",
144 | "goliath",
145 | "notux",
146 | "alfred",
147 | "megadolphin",
148 | "wizardlm",
149 | "wizardlm:7b",
150 | "wizardlm:13b",
151 | "wizardlm:30b",
152 | "wizardlm:70b",
153 | "xwinlm",
154 | "xwinlm:7b",
155 | "xwinlm:13b",
156 | "xwinlm:70b",
157 | "notus",
158 | "duckdb-nsql",
159 | "nomic-embed-text",
160 | "all-minilm",
161 | "starcoder2",
162 | "starcoder2:3b",
163 | "starcoder2:7b",
164 | "starcoder2:15b",
165 | "dolphincoder",
166 | )
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/utils/promptValidator.py:
--------------------------------------------------------------------------------
1 | from letmedoit import config
2 | from letmedoit.utils.shared_utils import SharedUtil
3 | from prompt_toolkit.validation import Validator, ValidationError
4 | from prompt_toolkit.application import run_in_terminal
5 | import tiktoken
6 | import re
7 |
8 | class TokenValidator(Validator):
9 | def validate(self, document):
10 | #current_buffer = get_app().current_buffer
11 | currentInput = document.text
12 | if not config.dynamicTokenCount or not currentInput or currentInput.lower() in (config.exit_entry, config.cancel_entry, ".new", ".share", ".save"):
13 | pass
14 | else:
15 | try:
16 | encoding = tiktoken.encoding_for_model(config.chatGPTApiModel)
17 | except:
18 | encoding = tiktoken.get_encoding("cl100k_base")
19 | no_function_call_pattern = "\[NO_FUNCTION_CALL\]|\[CHAT\]|\[CHAT_[^\[\]]+?\]"
20 | #if "[NO_FUNCTION_CALL]" in currentInput:
21 | if re.search(no_function_call_pattern, currentInput):
22 | availableFunctionTokens = 0
23 | #currentInput = currentInput.replace("[NO_FUNCTION_CALL]", "")
24 | currentInput = re.sub(no_function_call_pattern, "", currentInput)
25 | else:
26 | availableFunctionTokens = SharedUtil.count_tokens_from_functions(config.chatGPTApiFunctionSignatures)
27 | currentInputTokens = len(encoding.encode(config.fineTuneUserInput(currentInput)))
28 | loadedMessageTokens = SharedUtil.count_tokens_from_messages(config.currentMessages)
29 | selectedModelLimit = SharedUtil.tokenLimits[config.chatGPTApiModel]
30 | #estimatedAvailableTokens = selectedModelLimit - availableFunctionTokens - loadedMessageTokens - currentInputTokens
31 |
32 | config.dynamicToolBarText = f""" Tokens: {(availableFunctionTokens + loadedMessageTokens + currentInputTokens)}/{selectedModelLimit} {str(config.hotkey_display_key_combo).replace("'", "")} shortcuts """
33 | #if config.conversationStarted:
34 | # config.dynamicToolBarText = config.dynamicToolBarText + " [ctrl+n] new"
35 | if selectedModelLimit - (availableFunctionTokens + loadedMessageTokens + currentInputTokens) >= config.chatGPTApiMinTokens:
36 | pass
37 | else:
38 | run_in_terminal(lambda: print(f"""Press '{str(config.hotkey_new).replace("'", "")[1:-1]}' to start a new chat!"""))
39 | raise ValidationError(message='Token limit reached!', cursor_position=document.cursor_position)
40 |
41 | class NumberValidator(Validator):
42 | def validate(self, document):
43 | text = document.text
44 |
45 | if text.lower() in (config.exit_entry, config.cancel_entry):
46 | pass
47 | elif text and not text.isdigit():
48 | i = 0
49 |
50 | # Get index of first non numeric character.
51 | # We want to move the cursor here.
52 | for i, c in enumerate(text):
53 | if not c.isdigit():
54 | break
55 |
56 | raise ValidationError(message='This entry accepts numbers only!', cursor_position=i)
57 |
58 |
59 | class FloatValidator(Validator):
60 | def validate(self, document):
61 | text = document.text
62 |
63 | if text.lower() in (config.exit_entry, config.cancel_entry):
64 | pass
65 | try:
66 | float(text)
67 | except:
68 | raise ValidationError(message='This entry accepts floating point numbers only!', cursor_position=0)
69 |
70 |
71 | class NoAlphaValidator(Validator):
72 | def validate(self, document):
73 | text = document.text
74 |
75 | if text.lower() in (config.exit_entry, config.cancel_entry):
76 | pass
77 | elif text and text.isalpha():
78 | i = 0
79 |
80 | # Get index of first non numeric character.
81 | # We want to move the cursor here.
82 | for i, c in enumerate(text):
83 | if c.isalpha():
84 | break
85 |
86 | raise ValidationError(message='This entry does not accept alphabet letters!', cursor_position=i)
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/utils/terminal_mode_dialogs.py:
--------------------------------------------------------------------------------
1 | from letmedoit import config
2 | from prompt_toolkit.formatted_text import HTML
3 | from prompt_toolkit.styles import Style
4 | from prompt_toolkit.shortcuts import input_dialog, radiolist_dialog, checkboxlist_dialog, message_dialog
5 | from prompt_toolkit.completion import WordCompleter, FuzzyCompleter
6 | from letmedoit.utils.promptValidator import NumberValidator
7 |
8 |
9 | class TerminalModeDialogs:
10 |
11 | def __init__(self, parent) -> None:
12 | self.parent = parent
13 | self.style = Style.from_dict(
14 | {
15 | "dialog": "bg:ansiblack",
16 | "dialog text-area": f"bg:ansiblack {config.terminalCommandEntryColor2}",
17 | "dialog text-area.prompt": config.terminalPromptIndicatorColor2,
18 | "dialog radio-checked": config.terminalResourceLinkColor,
19 | "dialog checkbox-checked": config.terminalResourceLinkColor,
20 | "dialog button.arrow": config.terminalResourceLinkColor,
21 | "dialog button.focused": f"bg:{config.terminalResourceLinkColor} ansiblack",
22 | "dialog frame.border": config.terminalResourceLinkColor,
23 | "dialog frame.label": f"bg:ansiblack {config.terminalResourceLinkColor}",
24 | "dialog.body": "bg:ansiblack ansiwhite",
25 | "dialog shadow": "bg:ansiblack",
26 | }
27 | ) if config.terminalResourceLinkColor.startswith("ansibright") else Style.from_dict(
28 | {
29 | "dialog": "bg:ansiwhite",
30 | "dialog text-area": f"bg:ansiblack {config.terminalCommandEntryColor2}",
31 | "dialog text-area.prompt": config.terminalPromptIndicatorColor2,
32 | "dialog radio-checked": config.terminalResourceLinkColor,
33 | "dialog checkbox-checked": config.terminalResourceLinkColor,
34 | "dialog button.arrow": config.terminalResourceLinkColor,
35 | "dialog button.focused": f"bg:{config.terminalResourceLinkColor} ansiblack",
36 | "dialog frame.border": config.terminalResourceLinkColor,
37 | "dialog frame.label": f"bg:ansiwhite {config.terminalResourceLinkColor}",
38 | "dialog.body": "bg:ansiwhite ansiblack",
39 | "dialog shadow": "bg:ansiwhite",
40 | }
41 | )
42 |
43 | # a wrapper to standard input_dialog; open radiolist_dialog showing available options when user input is not a valid option.
44 | def searchableInput(self, title="Text Entry", text="Enter / Search:", default="", completer=None, options=[], descriptions=[], validator=None, numberOnly=False, password=False, ok_text="OK", cancel_text="Cancel"):
45 | if completer is None and options:
46 | completer = FuzzyCompleter(WordCompleter(options, ignore_case=True))
47 | if validator is None and numberOnly:
48 | validator=NumberValidator()
49 | result = input_dialog(
50 | title=title,
51 | text=text,
52 | default=default,
53 | completer=completer,
54 | validator=validator,
55 | password=password,
56 | ok_text=ok_text,
57 | cancel_text=cancel_text,
58 | style=self.style,
59 | ).run().strip()
60 | if result.lower() == config.cancel_entry:
61 | return result
62 | if options:
63 | if result and result in options:
64 | return result
65 | else:
66 | return self.getValidOptions(options=options, descriptions=descriptions, filter=result, default=default)
67 | else:
68 | if result:
69 | return result
70 | else:
71 | return ""
72 |
73 | def getValidOptions(self, options=[], descriptions=[], bold_descriptions=False, filter="", default="", title="Available Options", text="Select an option:"):
74 | if not options:
75 | return ""
76 | filter = filter.strip().lower()
77 | if descriptions:
78 | descriptionslower = [i.lower() for i in descriptions]
79 | values = [(option, HTML(f"{descriptions[index]}") if bold_descriptions else descriptions[index]) for index, option in enumerate(options) if (filter in option.lower() or filter in descriptionslower[index])]
80 | else:
81 | values = [(option, option) for option in options if filter in option.lower()]
82 | if not values:
83 | if descriptions:
84 | values = [(option, HTML(f"{descriptions[index]}") if bold_descriptions else descriptions[index]) for index, option in enumerate(options)]
85 | else:
86 | values = [(option, option) for option in options]
87 | result = radiolist_dialog(
88 | title=title,
89 | text=text,
90 | values=values,
91 | default=default if default and default in options else values[0][0],
92 | style=self.style,
93 | ).run()
94 | if result:
95 | notice = f"You've chosen: {result}"
96 | config.print3(notice)
97 | return result
98 | return ""
99 |
100 | def displayFeatureMenu(self, heading, features):
101 | values = [(command, command if config.terminalDisplayCommandOnMenu else self.parent.dotCommands[command][0]) for command in features]
102 | result = radiolist_dialog(
103 | title=heading,
104 | text="Select a feature:",
105 | values=values,
106 | default=features[0],
107 | style=self.style,
108 | ).run()
109 | if result:
110 | self.parent.printRunningCommand(result)
111 | return self.parent.getContent(result)
112 | else:
113 | return self.parent.exitAction()
114 | #return ""
115 |
116 | def getMultipleSelection(self, title="Multiple Selection", text="Select item(s):", options=["ALL"], descriptions=[], default_values=["ALL"]):
117 | if descriptions:
118 | values = [(option, descriptions[index]) for index, option in enumerate(options)]
119 | else:
120 | values = [(option, option) for option in options]
121 | return checkboxlist_dialog(
122 | title=title,
123 | text=text,
124 | values=values,
125 | default_values=default_values,
126 | style=self.style,
127 | ).run()
128 |
--------------------------------------------------------------------------------
/package/letmedoit_b4_v3/utils/vlc_utils.py:
--------------------------------------------------------------------------------
1 | from letmedoit import config
2 | import os, sys, re, platform, subprocess, shutil
3 | from letmedoit.utils.shared_utils import SharedUtil
4 |
5 | class VlcUtil:
6 |
7 | macVlc = windowsVlc = ""
8 |
9 | @staticmethod
10 | def isVlcPlayerInstalled():
11 | # on macOS
12 | macVlc = "/Applications/VLC.app/Contents/MacOS/VLC"
13 | VlcUtil.macVlc = macVlc if platform.system() == "Darwin" and os.path.isfile(macVlc) else ""
14 | # on Windows
15 | windowsVlc = r'C:\Program Files\VideoLAN\VLC\vlc.exe'
16 | if platform.system() == "Windows":
17 | if os.path.isfile(windowsVlc):
18 | VlcUtil.windowsVlc = windowsVlc
19 | elif shutil.which("vlc"):
20 | # Windows users can install vlc command with scoop
21 | # read: https://github.com/ScoopInstaller/Scoop
22 | # instll scoop
23 | # > iwr -useb get.scoop.sh | iex
24 | # > scoop install aria2
25 | # install vlc
26 | # > scoop bucket add extras
27 | # > scoop install vlc
28 | VlcUtil.windowsVlc = "vlc"
29 | else:
30 | VlcUtil.windowsVlc = ""
31 | if (VlcUtil.macVlc or VlcUtil.windowsVlc or shutil.which("vlc")):
32 | return True
33 | else:
34 | return False
35 |
36 | @staticmethod
37 | def openVlcPlayer():
38 | def run(command):
39 | os.system("{0}{1} > /dev/null 2>&1 &".format("nohup " if shutil.which("nohup") else "", command))
40 | VlcUtil.closeVlcPlayer()
41 | try:
42 | if VlcUtil.windowsVlc:
43 | os.system(VlcUtil.windowsVlc)
44 | elif VlcUtil.macVlc:
45 | run(VlcUtil.macVlc)
46 | elif shutil.which("vlc"):
47 | run("vlc")
48 | except:
49 | print("No VLC player is found!")
50 |
51 | @staticmethod
52 | def closeVlcPlayer():
53 | try:
54 | if platform.system() == "Windows":
55 | os.system("taskkill /IM vlc.exe /F")
56 | else:
57 | os.system("pkill VLC")
58 | os.system("pkill vlc")
59 | except:
60 | pass
61 |
62 | @staticmethod
63 | def playMediaFile(filePath, vlcSpeed=None, audioGui=False):
64 | if vlcSpeed is None:
65 | vlcSpeed = config.vlcSpeed
66 | # get full path and escape double quote
67 | if isinstance(filePath, str):
68 | filePath = os.path.abspath(filePath).replace('"', '\\"')
69 | else:
70 | # when filePath is a list
71 | filePath = [os.path.abspath(i).replace('"', '\\"') for i in filePath]
72 | filePath = '" "'.join(filePath)
73 | VlcUtil.playMediaFileVlcGui(filePath, vlcSpeed) if re.search("(.mp4|.avi)$", filePath.lower()[-4:]) or audioGui else VlcUtil.playMediaFileVlcNoGui(filePath, vlcSpeed)
74 |
75 | # play audio file with vlc without gui
76 | @staticmethod
77 | def playMediaFileVlcNoGui(filePath, vlcSpeed=None):
78 | if vlcSpeed is None:
79 | vlcSpeed = config.vlcSpeed
80 | # vlc on macOS
81 | if VlcUtil.macVlc:
82 | command = f'''{VlcUtil.macVlc} --intf rc --play-and-exit --rate {vlcSpeed} "{filePath}" &> /dev/null'''
83 | # vlc on windows
84 | elif VlcUtil.windowsVlc:
85 | command = f'''"{VlcUtil.windowsVlc}" --intf dummy --play-and-exit --rate {vlcSpeed} "{filePath}"'''
86 | # vlc on other platforms
87 | elif shutil.which("cvlc"):
88 | command = f'''cvlc --play-and-exit --rate {vlcSpeed} "{filePath}" &> /dev/null'''
89 | # use .communicate() to wait for the playback to be completed as .wait() or checking pid existence does not work
90 | subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
91 |
92 |
93 | # play video file with vlc with gui
94 | @staticmethod
95 | def playMediaFileVlcGui(filePath, vlcSpeed):
96 | # vlc on macOS
97 | if VlcUtil.macVlc:
98 | command = f'''{VlcUtil.macVlc} --play-and-exit --rate {vlcSpeed} "{filePath}" &> /dev/null'''
99 | # vlc on windows
100 | elif VlcUtil.windowsVlc:
101 | command = f'''"{VlcUtil.windowsVlc}" --play-and-exit --rate {vlcSpeed} "{filePath}"'''
102 | # vlc on other platforms
103 | elif shutil.which("vlc"):
104 | command = f'''vlc --play-and-exit --rate {vlcSpeed} "{filePath}" &> /dev/null'''
105 | # use .communicate() to wait for the playback to be completed as .wait() or checking pid existence does not work
106 | subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
107 |
108 | if __name__ == '__main__':
109 | speed = float(sys.argv[1])
110 | audioFile = " ".join(sys.argv[2:])
111 | VlcUtil.playMediaFile(audioFile, speed)
112 | isVlcPlaying = os.path.join("temp", "isVlcPlaying")
113 | if os.path.isfile(isVlcPlaying):
114 | os.remove(isVlcPlaying)
115 |
116 |
--------------------------------------------------------------------------------
/package/package_name.txt:
--------------------------------------------------------------------------------
1 | letmedoit
--------------------------------------------------------------------------------
/package/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup
2 | import os, shutil
3 |
4 | # Notes: Steps to change package name
5 | # 1. change folder name "letmedoit" to
6 | # 2. edit package/package_name.txt and change its content to
7 | # 3. search for "from letmedoit" and replace with "from "
8 |
9 | # package name
10 | package_name_0 = os.path.join("package_name.txt")
11 | with open(package_name_0, "r", encoding="utf-8") as fileObj:
12 | package = fileObj.read()
13 | package_name_1 = os.path.join(package, "package_name.txt") # package readme
14 | shutil.copy(package_name_0, package_name_1)
15 |
16 | # delete old shortcut files
17 | apps = {
18 | "myhand": ("MyHand", "MyHand Bot"),
19 | "letmedoit": ("LetMeDoIt", "LetMeDoIt AI"),
20 | "taskwiz": ("TaskWiz", "TaskWiz AI"),
21 | "cybertask": ("CyberTask", "CyberTask AI"),
22 | }
23 | appName, appFullName = apps[package]
24 | shortcutFiles = (f"{appName}.bat", f"{appName}.command", f"{appName}.desktop")
25 | for shortcutFile in shortcutFiles:
26 | shortcut = os.path.join(package, shortcutFile)
27 | if os.path.isfile(shortcut):
28 | os.remove(shortcut)
29 |
30 | # update package readme
31 | latest_readme = os.path.join("..", "README.md") # github repository readme
32 | package_readme = os.path.join(package, "README.md") # package readme
33 | shutil.copy(latest_readme, package_readme)
34 | with open(package_readme, "r", encoding="utf-8") as fileObj:
35 | long_description = fileObj.read()
36 |
37 | # get required packages
38 | install_requires = []
39 | with open(os.path.join(package, "requirements.txt"), "r") as fileObj:
40 | for line in fileObj.readlines():
41 | mod = line.strip()
42 | if mod:
43 | install_requires.append(mod)
44 |
45 | # make sure config.py is empty
46 | open(os.path.join(package, "config.py"), "w").close()
47 |
48 | # https://packaging.python.org/en/latest/guides/distributing-packages-using-setuptools/
49 | setup(
50 | name=package,
51 | version="3.0.4",
52 | python_requires=">=3.8, <3.13",
53 | description=f"{appFullName}, an advanced AI assistant, leveraging the capabilities of AI models, to resolve daily tasks for you.",
54 | long_description=long_description,
55 | author="Eliran Wong",
56 | author_email="support@letmedoit.ai",
57 | packages=[
58 | package,
59 | ],
60 | package_data={
61 | package: ["*.*"],
62 | },
63 | license="GNU General Public License (GPL)",
64 | install_requires=install_requires,
65 | extras_require={
66 | 'genai': ["google-genai>=1.1.0"], # Dependencies for running Vertex AI
67 | },
68 | entry_points={
69 | "console_scripts": [
70 | f"{package}={package}.main:main",
71 | f"lmdi={package}.main:main",
72 | f"{package}lite={package}.main:lite",
73 | f"lmdil={package}.main:lite",
74 | ],
75 | },
76 | keywords="ai assistant openai chatgpt gemini autogen rag interpreter auto-heal",
77 | url="https://letmedoit.ai",
78 | project_urls={
79 | "Source": "https://github.com/eliranwong/letmedoit",
80 | "Tracker": "https://github.com/eliranwong/letmedoit/issues",
81 | "Documentation": "https://github.com/eliranwong/letmedoit/wiki",
82 | "Funding": "https://www.paypal.me/letmedoitai",
83 | },
84 | classifiers=[
85 | # Reference: https://pypi.org/classifiers/
86 |
87 | # How mature is this project? Common values are
88 | # 3 - Alpha
89 | # 4 - Beta
90 | # 5 - Production/Stable
91 | 'Development Status :: 5 - Production/Stable',
92 |
93 | # Indicate who your project is intended for
94 | 'Intended Audience :: End Users/Desktop',
95 | 'Topic :: Utilities',
96 | 'Topic :: Scientific/Engineering :: Artificial Intelligence',
97 | 'Topic :: Software Development :: Build Tools',
98 |
99 | # Pick your license as you wish (should match "license" above)
100 | 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
101 |
102 | # Specify the Python versions you support here. In particular, ensure
103 | # that you indicate whether you support Python 2, Python 3 or both.
104 | 'Programming Language :: Python :: 3.8',
105 | 'Programming Language :: Python :: 3.9',
106 | 'Programming Language :: Python :: 3.10',
107 | 'Programming Language :: Python :: 3.11',
108 | 'Programming Language :: Python :: 3.12',
109 | ],
110 | )
111 |
--------------------------------------------------------------------------------
/package/upload_to_pypi.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | python3 setup.py sdist bdist_wheel
3 | twine upload dist/*
--------------------------------------------------------------------------------
/package/upload_to_pypi_android.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | python3 setup_android.py sdist bdist_wheel
3 | twine upload dist/*
--------------------------------------------------------------------------------
/tests/my_package/README.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/eliranwong/letmedoit/46416a1d2c363fd0f99697edcfa7d56b5a4ba88a/tests/my_package/README.md
--------------------------------------------------------------------------------
/tests/my_package/my_package/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/eliranwong/letmedoit/46416a1d2c363fd0f99697edcfa7d56b5a4ba88a/tests/my_package/my_package/__init__.py
--------------------------------------------------------------------------------
/tests/my_package/my_package/cli.py:
--------------------------------------------------------------------------------
1 | # my_package/cli.py
2 | from my_package.main import main
3 |
4 | def cli():
5 | main()
6 |
7 | if __name__ == "__main__":
8 | cli()
9 |
--------------------------------------------------------------------------------
/tests/my_package/my_package/main.py:
--------------------------------------------------------------------------------
1 | # main.py
2 | def main():
3 | print("Hello, world!")
4 |
5 | if __name__ == "__main__":
6 | main()
7 |
--------------------------------------------------------------------------------
/tests/my_package/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup, find_packages
2 |
3 | setup(
4 | name="my-package",
5 | version="0.1.0",
6 | packages=find_packages(),
7 | install_requires=[
8 | # List your dependencies here
9 | ],
10 | entry_points={
11 | "console_scripts": [
12 | "hello=my_package.cli:cli",
13 | ],
14 | },
15 | )
16 |
--------------------------------------------------------------------------------
/tests/old/bible_old.zip:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/eliranwong/letmedoit/46416a1d2c363fd0f99697edcfa7d56b5a4ba88a/tests/old/bible_old.zip
--------------------------------------------------------------------------------
/tests/pip_instructions_keep.md:
--------------------------------------------------------------------------------
1 | To create a Python package with a CLI command, you can follow these steps:
2 |
3 | 1. **Project Structure:**
4 | Create a directory structure for your project. For example:
5 | ```
6 | my_package/
7 | ├── my_package/
8 | │ ├── __init__.py
9 | │ └── cli.py
10 | │ └── main.py
11 | ├── setup.py
12 | └── README.md
13 | ```
14 |
15 | 2. **Write Your Code:**
16 | Inside `main.py`, put your main program logic:
17 | ```python
18 | # main.py
19 | def main():
20 | print("Hello, world!")
21 |
22 | if __name__ == "__main__":
23 | main()
24 | ```
25 |
26 | 3. **Command Line Interface (CLI):**
27 | Create a new file named `cli.py` to handle the command line interface:
28 | ```python
29 | # my_package/cli.py
30 | from my_package.main import main
31 |
32 | def cli():
33 | main()
34 |
35 | if __name__ == "__main__":
36 | cli()
37 | ```
38 |
39 | 4. **Setup Script:**
40 | Create a `setup.py` file to define package metadata and dependencies:
41 | ```python
42 | # setup.py
43 | from setuptools import setup, find_packages
44 |
45 | setup(
46 | name="my-package",
47 | version="0.1.0",
48 | packages=find_packages(),
49 | install_requires=[
50 | # List your dependencies here
51 | ],
52 | entry_points={
53 | "console_scripts": [
54 | "hello=my_package.cli:cli",
55 | ],
56 | },
57 | )
58 | ```
59 |
60 | 5. **Install and Test Locally:**
61 | Install your package locally to test it:
62 | ```bash
63 | pip install -e .
64 | ```
65 |
66 | 6. **Distribution:**
67 | To distribute your package, you can create a source distribution or a wheel:
68 | ```bash
69 | pip install wheel
70 | python3 setup.py sdist bdist_wheel
71 | ```
72 |
73 | 7. Add API Tokens
74 | Log in pypi account
75 | Enable 2-factor-authentication 2FA
76 | Add API Tokens
77 |
78 | 8. **Upload to PyPI:**
79 | If you want others to easily install your package using `pip`, you can upload it to PyPI. Make sure you have an account on PyPI, then install and use `twine` for uploading:
80 | ```bash
81 | pip install twine
82 | twine upload dist/*
83 | ```
84 |
85 | [pypi]
86 | username = __token__
87 | password = ***********************************
88 |
89 | 9. **Install and Run:**
90 | Users can then install your package and use the CLI command:
91 | ```bash
92 | pip install my-package
93 | hello
94 | ```
95 |
96 | This structure and setup will work on both macOS and Windows. Users can run the `hello` command from the terminal after installing your package via `pip`.
97 |
98 | # To enable 2FA
99 |
100 | To set up Two-Factor Authentication (2FA) with an authentication application for
101 | your PyPI account, you will need to follow these general steps:
102 |
103 | 1. **Download an Authentication Application**: You will need a Time-based One
104 | -Time Password (TOTP) application on your device. Common choices include Google
105 | Authenticator, Authy, or similar apps available in your device's app store.
106 |
107 | 2. **Log in to Your PyPI Account**: Access your PyPI account by logging in
108 | through the [PyPI website](https://pypi.org/).
109 |
110 | 3. **Access Account Settings**: Once logged in, navigate to your account
111 | settings.
112 |
113 | 4. **Add 2FA**: Look for an option that says "Add 2FA with authentication
114 | application" or similar. Select this option to begin the setup process.
115 |
116 | 5. **Scan QR Code**: The PyPI website will provide you with a QR code. Open your
117 | authentication application and use it to scan this QR code. This will link your
118 | PyPI account with the authentication app.
119 |
120 | 6. **Enter the Generated Code**: After scanning the QR code, your authentication
121 | app will generate a 6-digit code. Enter this code on the PyPI website to verify
122 | the setup.
123 |
124 | 7. **Complete the Setup**: Follow any additional prompts to complete the setup
125 | process. Make sure to save any backup codes provided during the setup in a
126 | secure location, as these can be used to access your account if your
127 | authentication device is unavailable.
128 |
129 | For detailed instructions and help, you can refer to the [PyPI help page](https
130 | ://pypi.org/help/) or the [blog post about securing PyPI accounts with 2FA](
131 | https://blog.pypi.org/posts/2023-05-25-securing-pypi-with-2fa/).
132 |
133 | Please note that the exact steps may vary slightly depending on updates to the
134 | PyPI website or the authentication application you choose to use. Always follow
135 | the most current instructions provided by PyPI during the setup process.
--------------------------------------------------------------------------------
/tests/prompt_toolkit_dynamic_layout.py:
--------------------------------------------------------------------------------
1 | from prompt_toolkit.application import Application
2 | from prompt_toolkit.layout import HSplit, VSplit, Layout, Dimension
3 | from prompt_toolkit.widgets import TextArea
4 | from prompt_toolkit.application.current import get_app
5 | from prompt_toolkit.key_binding import KeyBindings
6 |
7 | class DynamicLayout:
8 |
9 | def __init__(self):
10 | self.text_area1 = TextArea(text='Text Area 1')
11 | self.text_area2 = TextArea(text='Text Area 2')
12 |
13 | def getLayout(self):
14 | height, width = self.app.output.get_size()
15 | #print(width, height)
16 |
17 | if width > height:
18 | #print("landscape")
19 | return Layout(VSplit([self.text_area1, self.text_area2]))
20 | else:
21 | #print("portrait")
22 | return Layout(HSplit([self.text_area1, self.text_area2]))
23 |
24 | def updateLayout(self):
25 | self.app.layout = self.getLayout()
26 |
27 | def run(self):
28 | kb = KeyBindings()
29 |
30 | @kb.add('c-l') # change layout
31 | def _(event):
32 | self.updateLayout()
33 |
34 | self.app = Application(full_screen=True, key_bindings=kb)
35 | self.app.layout = self.getLayout()
36 | self.app.run()
37 |
38 | DynamicLayout().run()
--------------------------------------------------------------------------------
/tests/stream_function_response.py:
--------------------------------------------------------------------------------
1 | null = None
2 | events = [
3 | {
4 | "id": "chatcmpl-80uNudhwznSPoOflCoKaKQZVGA531",
5 | "object": "chat.completion.chunk",
6 | "created": 1695227434,
7 | "model": "gpt-3.5-turbo-16k-0613",
8 | "choices": [
9 | {
10 | "index": 0,
11 | "delta": {
12 | "role": "assistant",
13 | "content": null,
14 | "function_call": {
15 | "name": "integrate_google_searches",
16 | "arguments": ""
17 | }
18 | },
19 | "finish_reason": null
20 | }
21 | ]
22 | },
23 | {
24 | "id": "chatcmpl-80uNudhwznSPoOflCoKaKQZVGA531",
25 | "object": "chat.completion.chunk",
26 | "created": 1695227434,
27 | "model": "gpt-3.5-turbo-16k-0613",
28 | "choices": [
29 | {
30 | "index": 0,
31 | "delta": {
32 | "function_call": {
33 | "arguments": "{\n"
34 | }
35 | },
36 | "finish_reason": null
37 | }
38 | ]
39 | },
40 | {
41 | "id": "chatcmpl-80uNudhwznSPoOflCoKaKQZVGA531",
42 | "object": "chat.completion.chunk",
43 | "created": 1695227434,
44 | "model": "gpt-3.5-turbo-16k-0613",
45 | "choices": [
46 | {
47 | "index": 0,
48 | "delta": {
49 | "function_call": {
50 | "arguments": " "
51 | }
52 | },
53 | "finish_reason": null
54 | }
55 | ]
56 | },
57 | {
58 | "id": "chatcmpl-80uNudhwznSPoOflCoKaKQZVGA531",
59 | "object": "chat.completion.chunk",
60 | "created": 1695227434,
61 | "model": "gpt-3.5-turbo-16k-0613",
62 | "choices": [
63 | {
64 | "index": 0,
65 | "delta": {
66 | "function_call": {
67 | "arguments": " \""
68 | }
69 | },
70 | "finish_reason": null
71 | }
72 | ]
73 | },
74 | {
75 | "id": "chatcmpl-80uNudhwznSPoOflCoKaKQZVGA531",
76 | "object": "chat.completion.chunk",
77 | "created": 1695227434,
78 | "model": "gpt-3.5-turbo-16k-0613",
79 | "choices": [
80 | {
81 | "index": 0,
82 | "delta": {
83 | "function_call": {
84 | "arguments": "keywords"
85 | }
86 | },
87 | "finish_reason": null
88 | }
89 | ]
90 | },
91 | {
92 | "id": "chatcmpl-80uNudhwznSPoOflCoKaKQZVGA531",
93 | "object": "chat.completion.chunk",
94 | "created": 1695227434,
95 | "model": "gpt-3.5-turbo-16k-0613",
96 | "choices": [
97 | {
98 | "index": 0,
99 | "delta": {
100 | "function_call": {
101 | "arguments": "\":"
102 | }
103 | },
104 | "finish_reason": null
105 | }
106 | ]
107 | },
108 | {
109 | "id": "chatcmpl-80uNudhwznSPoOflCoKaKQZVGA531",
110 | "object": "chat.completion.chunk",
111 | "created": 1695227434,
112 | "model": "gpt-3.5-turbo-16k-0613",
113 | "choices": [
114 | {
115 | "index": 0,
116 | "delta": {
117 | "function_call": {
118 | "arguments": " \""
119 | }
120 | },
121 | "finish_reason": null
122 | }
123 | ]
124 | },
125 | {
126 | "id": "chatcmpl-80uNudhwznSPoOflCoKaKQZVGA531",
127 | "object": "chat.completion.chunk",
128 | "created": 1695227434,
129 | "model": "gpt-3.5-turbo-16k-0613",
130 | "choices": [
131 | {
132 | "index": 0,
133 | "delta": {
134 | "function_call": {
135 | "arguments": "El"
136 | }
137 | },
138 | "finish_reason": null
139 | }
140 | ]
141 | },
142 | {
143 | "id": "chatcmpl-80uNudhwznSPoOflCoKaKQZVGA531",
144 | "object": "chat.completion.chunk",
145 | "created": 1695227434,
146 | "model": "gpt-3.5-turbo-16k-0613",
147 | "choices": [
148 | {
149 | "index": 0,
150 | "delta": {
151 | "function_call": {
152 | "arguments": "iran"
153 | }
154 | },
155 | "finish_reason": null
156 | }
157 | ]
158 | },
159 | {
160 | "id": "chatcmpl-80uNudhwznSPoOflCoKaKQZVGA531",
161 | "object": "chat.completion.chunk",
162 | "created": 1695227434,
163 | "model": "gpt-3.5-turbo-16k-0613",
164 | "choices": [
165 | {
166 | "index": 0,
167 | "delta": {
168 | "function_call": {
169 | "arguments": " Wong"
170 | }
171 | },
172 | "finish_reason": null
173 | }
174 | ]
175 | },
176 | {
177 | "id": "chatcmpl-80uNudhwznSPoOflCoKaKQZVGA531",
178 | "object": "chat.completion.chunk",
179 | "created": 1695227434,
180 | "model": "gpt-3.5-turbo-16k-0613",
181 | "choices": [
182 | {
183 | "index": 0,
184 | "delta": {
185 | "function_call": {
186 | "arguments": "\"\n"
187 | }
188 | },
189 | "finish_reason": null
190 | }
191 | ]
192 | },
193 | {
194 | "id": "chatcmpl-80uNudhwznSPoOflCoKaKQZVGA531",
195 | "object": "chat.completion.chunk",
196 | "created": 1695227434,
197 | "model": "gpt-3.5-turbo-16k-0613",
198 | "choices": [
199 | {
200 | "index": 0,
201 | "delta": {
202 | "function_call": {
203 | "arguments": "}"
204 | }
205 | },
206 | "finish_reason": null
207 | }
208 | ]
209 | },
210 | {
211 | "id": "chatcmpl-80uNudhwznSPoOflCoKaKQZVGA531",
212 | "object": "chat.completion.chunk",
213 | "created": 1695227434,
214 | "model": "gpt-3.5-turbo-16k-0613",
215 | "choices": [
216 | {
217 | "index": 0,
218 | "delta": {},
219 | "finish_reason": "function_call"
220 | }
221 | ]
222 | },
223 | ]
224 |
225 | func_name = ""
226 | func_args = ""
227 | for event in events:
228 | delta = event["choices"][0]["delta"]
229 | if not func_name and delta and delta.get("function_call") and delta["function_call"].get("name"):
230 | func_name += delta["function_call"]["name"]
231 | if delta and delta.get("function_call"):
232 | func_args += delta["function_call"]["arguments"]
233 |
234 | print(func_name)
235 | print(func_args)
--------------------------------------------------------------------------------
/tests/terminal.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | import subprocess
3 | from prompt_toolkit.application import Application
4 | from prompt_toolkit.key_binding import KeyBindings
5 | from prompt_toolkit.layout.containers import HSplit, Window
6 | from prompt_toolkit.layout.layout import Layout
7 | from prompt_toolkit.styles import Style
8 | from prompt_toolkit.widgets import SearchToolbar, TextArea
9 |
10 | help_text = """A simple terminal emulator by Eliran Wong"""
11 |
12 |
13 | class TerminalEmulator:
14 |
15 | def __init__(self):
16 |
17 | # The layout.
18 | self.output_field = TextArea(
19 | style="class:output-field",
20 | text=help_text,
21 | focusable=True,
22 | focus_on_click=True,
23 | read_only=True,
24 | )
25 | self.search_field = SearchToolbar()
26 | self.input_field = TextArea(
27 | height=1,
28 | prompt=">>> ",
29 | style="class:input-field",
30 | multiline=False,
31 | wrap_lines=False,
32 | search_field=self.search_field,
33 | focusable=True,
34 | focus_on_click=True,
35 | )
36 | self.input_field.accept_handler = self.run_command
37 |
38 | def run_command(self, buffer):
39 | self.output_field.text = f"{self.output_field.text}\n\n>>> {buffer.text}"
40 |
41 | # Start the subprocess with stdout set to PIPE
42 | process = subprocess.Popen(self.input_field.text, shell=True, stdout=subprocess.PIPE)
43 |
44 | # Read the output in real-time
45 | while True:
46 | # Read a line from the output
47 | output = process.stdout.readline().decode().strip()
48 |
49 | # If there is no more output, break the loop
50 | if not output:
51 | break
52 |
53 | # Do something with the output
54 | self.output_field.text = f"{self.output_field.text}\n{output}"
55 | self.output_field.buffer.cursor_position = len(self.output_field.text)
56 |
57 | # Wait for the process to complete
58 | process.wait()
59 | # reset is necessary after another full-screen application is closed
60 | self.app.reset()
61 |
62 |
63 | def run(self):
64 |
65 | # The key bindings.
66 | kb = KeyBindings()
67 |
68 | @kb.add("c-c")
69 | @kb.add("c-q")
70 | def _(event):
71 | "Pressing Ctrl-Q or Ctrl-C will exit the user interface."
72 | event.app.exit()
73 |
74 | # Style.
75 | style = Style(
76 | [
77 | ("output-field", "bg:#000000 #ffffff"),
78 | ("input-field", "bg:#000000 #ffffff"),
79 | ("line", "#004400"),
80 | ]
81 | )
82 |
83 | # Run application.
84 | container = HSplit(
85 | [
86 | self.output_field,
87 | Window(height=1, char="-", style="class:line"),
88 | self.input_field,
89 | self.search_field,
90 | ]
91 | )
92 | self.app = Application(
93 | layout=Layout(container, focused_element=self.input_field),
94 | key_bindings=kb,
95 | style=style,
96 | mouse_support=True,
97 | full_screen=True,
98 | )
99 |
100 | self.app.run()
101 |
102 |
103 | if __name__ == "__main__":
104 | TerminalEmulator().run()
105 |
--------------------------------------------------------------------------------
/tests/tray_mini.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from PySide6.QtWidgets import QSystemTrayIcon, QMenu, QApplication
3 | from PySide6.QtGui import QIcon, QAction, QGuiApplication
4 |
5 | class SystemTrayIcon(QSystemTrayIcon):
6 | def __init__(self, icon, parent=None):
7 | super().__init__(icon, parent)
8 |
9 | self.menu = QMenu(parent)
10 | self.exitAction = QAction("Exit", self)
11 | self.exitAction.triggered.connect(self.exit)
12 | self.menu.addAction(self.exitAction)
13 |
14 | self.setContextMenu(self.menu)
15 |
16 | def exit(self):
17 | self.setVisible(False)
18 | QGuiApplication.instance().quit()
19 |
20 | if __name__ == "__main__":
21 | app = QApplication(sys.argv)
22 |
23 | icon = QIcon("/home/eliran/Desktop/image/image.jpg")
24 | trayIcon = SystemTrayIcon(icon)
25 | trayIcon.show()
26 |
27 | sys.exit(app.exec())
28 |
--------------------------------------------------------------------------------