├── .github ├── ISSUE_TEMPLATE │ ├── add_agent.md │ ├── bug_report.md │ └── feature_request.md └── workflows │ └── ci.yaml ├── .gitignore ├── Argcfile.sh ├── LICENSE ├── README.md ├── agents ├── coder │ ├── README.md │ ├── index.yaml │ ├── tools.sh │ └── tools.txt ├── demo │ ├── README.md │ ├── index.yaml │ ├── tools.js │ ├── tools.py │ ├── tools.sh │ └── tools.txt ├── json-viewer │ ├── README.md │ ├── index.yaml │ ├── package.json │ └── tools.js ├── sql │ ├── README.md │ ├── index.yaml │ └── tools.sh └── todo │ ├── README.md │ ├── index.yaml │ └── tools.sh ├── docs ├── agent.md ├── argcfile.md ├── environment-variables.md └── tool.md ├── mcp ├── bridge │ ├── README.md │ ├── index.js │ └── package.json └── server │ ├── README.md │ ├── index.js │ └── package.json ├── scripts ├── build-declarations.js ├── build-declarations.py ├── build-declarations.sh ├── check-deps.sh ├── create-tool.sh ├── declarations-util.sh ├── mcp.sh ├── run-agent.js ├── run-agent.py ├── run-agent.sh ├── run-mcp-tool.sh ├── run-tool.js ├── run-tool.py └── run-tool.sh ├── tools ├── demo_js.js ├── demo_py.py ├── demo_sh.sh ├── execute_command.sh ├── execute_js_code.js ├── execute_py_code.py ├── execute_sql_code.sh ├── fetch_url_via_curl.sh ├── fetch_url_via_jina.sh ├── fs_cat.sh ├── fs_ls.sh ├── fs_mkdir.sh ├── fs_patch.sh ├── fs_rm.sh ├── fs_write.sh ├── get_current_time.sh ├── get_current_weather.sh ├── search_arxiv.sh ├── search_wikipedia.sh ├── search_wolframalpha.sh ├── send_mail.sh ├── send_twilio.sh ├── web_search_aichat.sh ├── web_search_perplexity.sh └── web_search_tavily.sh └── utils ├── guard_operation.sh ├── guard_path.sh └── patch.awk /.github/ISSUE_TEMPLATE/add_agent.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Add a New AI Agent 3 | about: Propose an idea or submit a new AI agent for inclusion 4 | title: '[Agent Request] Add ' 5 | labels: enhancement 6 | assignees: '' 7 | 8 | --- 9 | 10 | 11 | 12 | **Agent Description** 13 | 14 | 15 | **Agent Conversation Starters** 16 | 17 | 18 | **Agent Tools** 19 | 20 | 21 | 22 | 23 | **Additional context** 24 | 25 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | 11 | 12 | **Describe the bug** 13 | 17 | 18 | **Expected behavior** 19 | 20 | 21 | **Screenshots/Logs** 22 | 23 | 24 | **Environment** 25 | 26 | 27 | **Additional context** 28 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: enhancement 6 | assignees: '' 7 | 8 | --- 9 | 10 | 11 | 12 | **Is your feature request related to a problem? Please describe.** 13 | 14 | 15 | **Describe the solution you'd like** 16 | 17 | 18 | **Describe alternatives you've considered** 19 | 20 | 21 | **Additional context** 22 | -------------------------------------------------------------------------------- /.github/workflows/ci.yaml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | pull_request: 5 | branches: 6 | - '*' 7 | push: 8 | branches: 9 | - main 10 | 11 | defaults: 12 | run: 13 | shell: bash 14 | 15 | jobs: 16 | all: 17 | name: All 18 | 19 | runs-on: ${{ matrix.os }} 20 | 21 | strategy: 22 | matrix: 23 | os: [ubuntu-latest, macos-latest, windows-latest] 24 | 25 | steps: 26 | - uses: actions/checkout@v4 27 | 28 | - uses: sigoden/install-binary@v1 29 | with: 30 | repo: sigoden/argc 31 | 32 | - name: Check versions 33 | run: argc version 34 | 35 | - name: Setup Node.js 36 | uses: actions/setup-node@v4 37 | 38 | - name: Setup Python 39 | uses: actions/setup-python@v5 40 | with: 41 | python-version: '3.11' 42 | 43 | - name: Link web-search and code-interpreter 44 | run: | 45 | argc link-web-search web_search_perplexity.sh 46 | argc link-code-interpreter execute_py_code.py 47 | 48 | - name: Run Test 49 | run: argc test 50 | env: 51 | PYTHONIOENCODING: utf-8 -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /tmp 2 | /tools.txt 3 | /agents.txt 4 | functions.json 5 | /bin 6 | /cache 7 | /agents/_* 8 | /tools/_* 9 | /tools/web_search.* 10 | /tools/code_interpreter.* 11 | /.env 12 | __pycache__ 13 | /.venv 14 | node_modules 15 | /package.json 16 | package-lock.json 17 | *.lock 18 | /mcp.json -------------------------------------------------------------------------------- /Argcfile.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | # @meta dotenv 5 | 6 | BIN_DIR=bin 7 | TMP_DIR="cache/__tmp__" 8 | VENV_DIR=".venv" 9 | 10 | LANG_CMDS=( \ 11 | "sh:bash" \ 12 | "js:node" \ 13 | "py:python" \ 14 | ) 15 | 16 | # @cmd Run the tool 17 | # @option -C --cwd Change the current working directory 18 | # @alias tool:run 19 | # @arg tool![`_choice_tool`] The tool name 20 | # @arg json The json data 21 | run@tool() { 22 | if [[ -z "$argc_json" ]]; then 23 | declaration="$(generate-declarations@tool "$argc_tool" | jq -r '.[0]')" 24 | if [[ -n "$declaration" ]]; then 25 | _ask_json_data "$declaration" 26 | fi 27 | fi 28 | if [[ -z "$argc_json" ]]; then 29 | _die "error: no JSON data" 30 | fi 31 | lang="${argc_tool##*.}" 32 | cmd="$(_lang_to_cmd "$lang")" 33 | run_tool_script="scripts/run-tool.$lang" 34 | [[ -n "$argc_cwd" ]] && cd "$argc_cwd" 35 | exec "$cmd" "$run_tool_script" "$argc_tool" "$argc_json" 36 | } 37 | 38 | # @cmd Run the agent 39 | # @alias agent:run 40 | # @option -C --cwd Change the current working directory 41 | # @arg agent![`_choice_agent`] The agent name 42 | # @arg action![?`_choice_agent_action`] The agent action 43 | # @arg json The json data 44 | run@agent() { 45 | if [[ -z "$argc_json" ]]; then 46 | declaration="$(generate-declarations@agent "$argc_agent" | jq --arg name "$argc_action" '.[] | select(.name == $name)')" 47 | if [[ -n "$declaration" ]]; then 48 | _ask_json_data "$declaration" 49 | fi 50 | fi 51 | if [[ -z "$argc_json" ]]; then 52 | _die "error: no JSON data" 53 | fi 54 | tools_path="$(_get_agent_tools_path "$argc_agent")" 55 | lang="${tools_path##*.}" 56 | cmd="$(_lang_to_cmd "$lang")" 57 | run_agent_script="scripts/run-agent.$lang" 58 | [[ -n "$argc_cwd" ]] && cd "$argc_cwd" 59 | exec "$cmd" "$run_agent_script" "$argc_agent" "$argc_action" "$argc_json" 60 | } 61 | 62 | # @cmd Build the project 63 | build() { 64 | if [[ -f tools.txt ]]; then 65 | argc build@tool 66 | else 67 | echo 'Skipped building tools since tools.txt is missing' 68 | fi 69 | if [[ -f agents.txt ]]; then 70 | argc build@agent 71 | else 72 | echo 'Skipped building agents since agents.txt is missing' 73 | fi 74 | if [[ -f mcp.json ]]; then 75 | argc mcp merge-functions -S 76 | fi 77 | } 78 | 79 | # @cmd Build tools 80 | # @alias tool:build 81 | # @option --names-file=tools.txt Path to a file containing tool filenames, one per line. 82 | # This file specifies which tools will be used. 83 | # @option --declarations-file=functions.json Path to a json file to save function declarations 84 | # @arg tools*[`_choice_tool`] The tool filenames 85 | build@tool() { 86 | if [[ "${#argc_tools[@]}" -gt 0 ]]; then 87 | mkdir -p "$TMP_DIR" 88 | argc_names_file="$TMP_DIR/tools.txt" 89 | printf "%s\n" "${argc_tools[@]}" > "$argc_names_file" 90 | elif [[ "$argc_declarations_file" == "functions.json" ]]; then 91 | argc clean@tool 92 | fi 93 | argc build-declarations@tool --names-file "${argc_names_file}" --declarations-file "${argc_declarations_file}" 94 | argc build-bin@tool --names-file "${argc_names_file}" 95 | } 96 | 97 | # @cmd Build tools to bin 98 | # @alias tool:build-bin 99 | # @option --names-file=tools.txt Path to a file containing tool filenames, one per line. 100 | # @arg tools*[`_choice_tool`] The tool filenames 101 | build-bin@tool() { 102 | mkdir -p "$BIN_DIR" 103 | if [[ "${#argc_tools[@]}" -gt 0 ]]; then 104 | names=("${argc_tools[@]}" ) 105 | elif [[ -f "$argc_names_file" ]]; then 106 | names=($(cat "$argc_names_file" | grep -v '^#')) 107 | if [[ "${#names[@]}" -gt 0 ]]; then 108 | (cd "$BIN_DIR" && rm -rf "${names[@]}") 109 | fi 110 | fi 111 | if [[ -z "$names" ]]; then 112 | _die "error: no tools provided. '$argc_names_file' is missing. please create it and add some tools." 113 | fi 114 | not_found_tools=() 115 | for name in "${names[@]}"; do 116 | basename="${name%.*}" 117 | lang="${name##*.}" 118 | tool_path="tools/$name" 119 | if [[ -f "$tool_path" ]]; then 120 | if _is_win; then 121 | bin_file="$BIN_DIR/$basename.cmd" 122 | _build_win_shim tool $lang > "$bin_file" 123 | else 124 | bin_file="$BIN_DIR/$basename" 125 | if [[ "$lang" == "py" && -d "$VENV_DIR" ]]; then 126 | rm -rf "$bin_file" 127 | _build_py_shim tool $lang > "$bin_file" 128 | chmod +x "$bin_file" 129 | else 130 | ln -s -f "$PWD/scripts/run-tool.$lang" "$bin_file" 131 | fi 132 | fi 133 | echo "Build bin/$basename" 134 | else 135 | not_found_tools+=("$name") 136 | fi 137 | done 138 | if [[ -n "$not_found_tools" ]]; then 139 | _die "error: not found tools: ${not_found_tools[*]}" 140 | fi 141 | } 142 | 143 | # @cmd Build tools function declarations file 144 | # @alias tool:build-declarations 145 | # @option --names-file=tools.txt Path to a file containing tool filenames, one per line. 146 | # @option --declarations-file=functions.json Path to a json file to save function declarations 147 | # @arg tools*[`_choice_tool`] The tool filenames 148 | build-declarations@tool() { 149 | if [[ "${#argc_tools[@]}" -gt 0 ]]; then 150 | names=("${argc_tools[@]}" ) 151 | elif [[ -f "$argc_names_file" ]]; then 152 | names=($(cat "$argc_names_file" | grep -v '^#')) 153 | fi 154 | if [[ -z "$names" ]]; then 155 | _die "error: no tools provided. '$argc_names_file' is missing. please create it and add some tools." 156 | fi 157 | json_list=() 158 | not_found_tools=() 159 | build_failed_tools=() 160 | for name in "${names[@]}"; do 161 | lang="${name##*.}" 162 | tool_path="tools/$name" 163 | if [[ ! -f "$tool_path" ]]; then 164 | not_found_tools+=("$name") 165 | continue; 166 | fi 167 | json_data="$(generate-declarations@tool "$name" | jq -r '.[0]')" || { 168 | build_failed_tools+=("$name") 169 | } 170 | if [[ "$json_data" == "null" ]]; then 171 | _die "error: failed to build declarations for tool $name" 172 | fi 173 | json_list+=("$json_data") 174 | done 175 | if [[ -n "$not_found_tools" ]]; then 176 | _die "error: not found tools: ${not_found_tools[*]}" 177 | fi 178 | if [[ -n "$build_failed_tools" ]]; then 179 | _die "error: invalid tools: ${build_failed_tools[*]}" 180 | fi 181 | json_data="$(echo "${json_list[@]}" | jq -s '.')" 182 | if [[ "$argc_declarations_file" == "-" ]]; then 183 | echo "$json_data" 184 | else 185 | echo "Build $argc_declarations_file" 186 | echo "$json_data" > "$argc_declarations_file" 187 | fi 188 | } 189 | 190 | 191 | # @cmd Generate function declaration for the tool 192 | # @alias tool:generate-declarations 193 | # @arg tool![`_choice_tool`] The tool name 194 | generate-declarations@tool() { 195 | lang="${1##*.}" 196 | cmd="$(_lang_to_cmd "$lang")" 197 | "$cmd" "scripts/build-declarations.$lang" "tools/$1" 198 | } 199 | 200 | # @cmd Build agents 201 | # @alias agent:build 202 | # @option --names-file=agents.txt Path to a file containing agent filenames, one per line. 203 | # @arg agents*[`_choice_agent`] The agent filenames 204 | build@agent() { 205 | if [[ "${#argc_agents[@]}" -gt 0 ]]; then 206 | mkdir -p "$TMP_DIR" 207 | argc_names_file="$TMP_DIR/agents.txt" 208 | printf "%s\n" "${argc_agents[@]}" > "$argc_names_file" 209 | else 210 | argc clean@agent 211 | fi 212 | argc build-declarations@agent --names-file "${argc_names_file}" 213 | argc build-bin@agent --names-file "${argc_names_file}" 214 | } 215 | 216 | # @cmd Build agents to bin 217 | # @alias agent:build-bin 218 | # @option --names-file=agents.txt Path to a file containing agent dirs, one per line. 219 | # @arg agents*[`_choice_agent`] The agent names 220 | build-bin@agent() { 221 | mkdir -p "$BIN_DIR" 222 | if [[ "${#argc_agents[@]}" -gt 0 ]]; then 223 | names=("${argc_agents[@]}" ) 224 | elif [[ -f "$argc_names_file" ]]; then 225 | names=($(cat "$argc_names_file" | grep -v '^#')) 226 | if [[ "${#names[@]}" -gt 0 ]]; then 227 | (cd "$BIN_DIR" && rm -rf "${names[@]}") 228 | fi 229 | fi 230 | if [[ -z "$names" ]]; then 231 | _die "error: no agents provided. '$argc_names_file' is missing. please create it and add some agents." 232 | fi 233 | not_found_agents=() 234 | for name in "${names[@]}"; do 235 | agent_dir="agents/$name" 236 | found=false 237 | for item in "${LANG_CMDS[@]}"; do 238 | lang="${item%:*}" 239 | agent_tools_path="$agent_dir/tools.$lang" 240 | if [[ -f "$agent_tools_path" ]]; then 241 | found=true 242 | if _is_win; then 243 | bin_file="$BIN_DIR/$name.cmd" 244 | _build_win_shim agent $lang > "$bin_file" 245 | else 246 | bin_file="$BIN_DIR/$name" 247 | if [[ "$lang" == "py" && -d "$VENV_DIR" ]]; then 248 | rm -rf "$bin_file" 249 | _build_py_shim tool $lang > "$bin_file" 250 | chmod +x "$bin_file" 251 | else 252 | ln -s -f "$PWD/scripts/run-agent.$lang" "$bin_file" 253 | fi 254 | fi 255 | echo "Build bin/$name" 256 | tool_names_file="$agent_dir/tools.txt" 257 | if [[ -f "$tool_names_file" ]]; then 258 | argc build-bin@tool --names-file "${tool_names_file}" 259 | fi 260 | break 261 | fi 262 | done 263 | if [[ "$found" == "false" ]] && [[ ! -d "$agent_dir" ]]; then 264 | not_found_agents+=("$name") 265 | fi 266 | done 267 | if [[ -n "$not_found_agents" ]]; then 268 | _die "error: not found agents: ${not_found_agents[*]}" 269 | fi 270 | } 271 | 272 | # @cmd Build agents function declarations file 273 | # @alias agent:build-declarations 274 | # @option --names-file=agents.txt Path to a file containing agent dirs, one per line. 275 | # @arg agents*[`_choice_agent`] The tool filenames 276 | build-declarations@agent() { 277 | if [[ "${#argc_agents[@]}" -gt 0 ]]; then 278 | names=("${argc_agents[@]}" ) 279 | elif [[ -f "$argc_names_file" ]]; then 280 | names=($(cat "$argc_names_file" | grep -v '^#')) 281 | fi 282 | if [[ -z "$names" ]]; then 283 | _die "error: no agents provided. '$argc_names_file' is missing. please create it and add some agents." 284 | fi 285 | not_found_agents=() 286 | build_failed_agents=() 287 | exist_tools="$(ls -1 tools)" 288 | for name in "${names[@]}"; do 289 | agent_dir="agents/$name" 290 | declarations_file="$agent_dir/functions.json" 291 | tool_names_file="$agent_dir/tools.txt" 292 | found=false 293 | if [[ -d "$agent_dir" ]]; then 294 | found=true 295 | ok=true 296 | json_data="" 297 | agent_json_data="" 298 | tools_json_data="" 299 | for item in "${LANG_CMDS[@]}"; do 300 | lang="${item%:*}" 301 | agent_tools_path="$agent_dir/tools.$lang" 302 | if [[ -f "$agent_tools_path" ]]; then 303 | agent_json_data="$(generate-declarations@agent "$name")" || { 304 | ok=false 305 | build_failed_agents+=("$name") 306 | } 307 | break 308 | fi 309 | done 310 | if [[ -f "$tool_names_file" ]]; then 311 | if grep -q '^web_search\.' "$tool_names_file" && ! grep -q '^web_search\.' <<<"$exist_tools"; then 312 | echo "WARNING: no found web_search tool, please run \`argc link-web-search \` to set one." 313 | fi 314 | if grep -q '^code_interpreter\.' "$tool_names_file" && ! grep -q '^code_interpreter\.' <<<"$exist_tools"; then 315 | echo "WARNING: no found code_interpreter tool, please run \`argc link-code-interpreter \` to set one." 316 | fi 317 | tools_json_data="$(argc build-declarations@tool --names-file="$tool_names_file" --declarations-file=-)" || { 318 | ok=false 319 | build_failed_agents+=("$name") 320 | } 321 | fi 322 | if [[ "$ok" == "true" ]]; then 323 | if [[ -n "$agent_json_data" ]] && [[ -n "$tools_json_data" ]]; then 324 | json_data="$(echo "[$agent_json_data,$tools_json_data]" | jq 'flatten')" 325 | elif [[ -n "$agent_json_data" ]]; then 326 | json_data="$agent_json_data" 327 | elif [[ -n "$tools_json_data" ]]; then 328 | json_data="$tools_json_data" 329 | fi 330 | if [[ -n "$json_data" ]]; then 331 | echo "Build $declarations_file" 332 | echo "$json_data" > "$declarations_file" 333 | fi 334 | fi 335 | fi 336 | if [[ "$found" == "false" ]]; then 337 | not_found_agents+=("$name") 338 | fi 339 | done 340 | if [[ -n "$not_found_agents" ]]; then 341 | _die "error: not found agents: ${not_found_agents[*]}" 342 | fi 343 | if [[ -n "$build_failed_agents" ]]; then 344 | _die "error: invalid agents: ${build_failed_agents[*]}" 345 | fi 346 | } 347 | 348 | # @cmd Generate function declarations for the agent 349 | # @alias agent:generate-declarations 350 | # @flag --oneline Summary JSON in one line 351 | # @arg agent![`_choice_agent`] The agent name 352 | generate-declarations@agent() { 353 | tools_path="$(_get_agent_tools_path "$1")" 354 | if [[ -z "$tools_path" ]]; then 355 | _die "error: no found entry file at agents/$1/tools." 356 | fi 357 | lang="${tools_path##*.}" 358 | cmd="$(_lang_to_cmd "$lang")" 359 | json="$("$cmd" "scripts/build-declarations.$lang" "$tools_path" | jq 'map(. + {agent: true})')" 360 | if [[ -n "$argc_oneline" ]]; then 361 | echo "$json" | jq -r '.[] | .name + ": " + (.description | split("\n"))[0]' 362 | else 363 | echo "$json" 364 | fi 365 | } 366 | 367 | # @cmd Check environment variables, Node/Python dependencies, MCP-Bridge-Server status 368 | check() { 369 | argc check@tool 370 | argc check@agent 371 | argc mcp check 372 | } 373 | 374 | # @cmd Check dependencies and environment variables for a specific tool 375 | # @alias tool:check 376 | # @arg tools*[`_choice_tool`] The tool name 377 | check@tool() { 378 | if [[ "${#argc_tools[@]}" -gt 0 ]]; then 379 | tool_names=("${argc_tools[@]}") 380 | else 381 | tool_names=($(cat tools.txt | grep -v '^#')) 382 | fi 383 | for name in "${tool_names[@]}"; do 384 | tool_path="tools/$name" 385 | echo "Check $tool_path" 386 | if [[ -f "$tool_path" ]]; then 387 | _check_bin "${name%.*}" 388 | _check_envs "$tool_path" 389 | ./scripts/check-deps.sh "$tool_path" 390 | else 391 | echo "✗ not found tool file" 392 | fi 393 | done 394 | } 395 | 396 | # @cmd Check dependencies and environment variables for a specific agent 397 | # @alias agent:check 398 | # @arg agents*[`_choice_agent`] The agent name 399 | check@agent() { 400 | if [[ "${#argc_agents[@]}" -gt 0 ]]; then 401 | agent_names=("${argc_agents[@]}") 402 | else 403 | agent_names=($(cat agents.txt | grep -v '^#')) 404 | fi 405 | for name in "${agent_names[@]}"; do 406 | agent_dir="agents/$name" 407 | echo "Check $agent_dir" 408 | if [[ -d "$agent_dir" ]]; then 409 | for item in "${LANG_CMDS[@]}"; do 410 | lang="${item%:*}" 411 | agent_tools_path="$agent_dir/tools.$lang" 412 | if [[ -f "$agent_tools_path" ]]; then 413 | _check_bin "$name" 414 | _check_envs "$agent_tools_path" 415 | ./scripts/check-deps.sh "$agent_tools_path" 416 | break 417 | fi 418 | done 419 | else 420 | echo "✗ not found agent dir" 421 | fi 422 | done 423 | } 424 | 425 | # @cmd List tools which can be put into functions.txt 426 | # @alias tool:list 427 | # Examples: 428 | # argc list-tools > tools.txt 429 | list@tool() { 430 | _choice_tool 431 | } 432 | 433 | # @cmd List agents which can be put into agents.txt 434 | # @alias agent:list 435 | # Examples: 436 | # argc list-agents > agents.txt 437 | list@agent() { 438 | _choice_agent 439 | } 440 | 441 | # @cmd Test the project 442 | test() { 443 | test@tool 444 | test@agent 445 | } 446 | 447 | # @cmd Test tools 448 | # @alias tool:test 449 | test@tool() { 450 | mkdir -p "$TMP_DIR" 451 | names_file="$TMP_DIR/tools.txt" 452 | declarations_file="$TMP_DIR/functions.json" 453 | argc list@tool > "$names_file" 454 | argc build@tool --names-file "$names_file" --declarations-file "$declarations_file" 455 | test-demo@tool 456 | } 457 | 458 | # @cmd Test demo tools 459 | # @alias tool:test-demo 460 | test-demo@tool() { 461 | for item in "${LANG_CMDS[@]}"; do 462 | lang="${item%:*}" 463 | tool="demo_$lang.$lang" 464 | echo "---- Test $tool ---" 465 | argc build-bin@tool "$tool" 466 | argc run@tool $tool '{ 467 | "boolean": true, 468 | "string": "Hello", 469 | "string_enum": "foo", 470 | "integer": 123, 471 | "number": 3.14, 472 | "array": [ 473 | "a", 474 | "b", 475 | "c" 476 | ], 477 | "string_optional": "OptionalValue", 478 | "array_optional": [ 479 | "x", 480 | "y" 481 | ] 482 | }' 483 | echo 484 | done 485 | } 486 | 487 | # @cmd Test agents 488 | # @alias agent:test 489 | test@agent() { 490 | mkdir -p "$TMP_DIR" 491 | names_file="$TMP_DIR/agents.txt" 492 | argc list@agent > "$names_file" 493 | argc build@agent --names-file "$names_file" 494 | test-demo@agent 495 | } 496 | 497 | # @cmd Test demo agents 498 | # @alias agent:test-demo 499 | test-demo@agent() { 500 | echo "---- Test demo agent ---" 501 | args=(demo get_ipinfo '{}') 502 | argc run@agent "${args[@]}" 503 | for item in "${LANG_CMDS[@]}"; do 504 | cmd="${item#*:}" 505 | lang="${item%:*}" 506 | echo "---- Test agents/demo/tools.$lang ---" 507 | if [[ "$cmd" == "sh" ]]; then 508 | "$(argc --argc-shell-path)" ./scripts/run-agent.sh "${args[@]}" 509 | elif command -v "$cmd" &> /dev/null; then 510 | $cmd ./scripts/run-agent.$lang "${args[@]}" 511 | echo 512 | fi 513 | done 514 | } 515 | 516 | # @cmd Clean the project 517 | clean() { 518 | clean@tool 519 | clean@agent 520 | rm -rf "$BIN_DIR/"* 521 | } 522 | 523 | # @cmd Clean tools 524 | # @alias tool:clean 525 | clean@tool() { 526 | _choice_tool | sed -E 's/\.([a-z]+)$//' | xargs -I{} rm -rf "$BIN_DIR/{}" 527 | rm -rf functions.json 528 | } 529 | 530 | # @cmd Clean agents 531 | # @alias agent:clean 532 | clean@agent() { 533 | _choice_agent | xargs -I{} rm -rf "$BIN_DIR/{}" 534 | _choice_agent | xargs -I{} rm -rf agents/{}/functions.json 535 | } 536 | 537 | # @cmd Link a tool as web_search tool 538 | # 539 | # Example: 540 | # argc link-web-search web_search_perplexity.sh 541 | # @arg tool![`_choice_web_search`] The tool work as web_search 542 | link-web-search() { 543 | _link_tool $1 web_search 544 | } 545 | 546 | # @cmd Link a tool as code_interpreter tool 547 | # 548 | # Example: 549 | # argc link-code-interpreter execute_py_code.py 550 | # @arg tool![`_choice_code_interpreter`] The tool work as code_interpreter 551 | link-code-interpreter() { 552 | _link_tool $1 code_interpreter 553 | } 554 | 555 | # @cmd Link this repo to aichat functions_dir 556 | link-to-aichat() { 557 | functions_dir="$(aichat --info | grep -w functions_dir | awk '{$1=""; print substr($0,2)}')" 558 | if [[ -z "$functions_dir" ]]; then 559 | _die "error: your aichat version don't support function calling" 560 | fi 561 | if [[ ! -e "$functions_dir" ]]; then 562 | if _is_win; then 563 | current_dir="$(cygpath -w "$(pwd)")" 564 | cmd <<< "mklink /D \"${functions_dir%/}\" \"${current_dir%/}\"" > /dev/null 565 | else 566 | ln -s "$(pwd)" "$functions_dir" 567 | fi 568 | echo "$functions_dir symlinked" 569 | else 570 | echo "$functions_dir already exists" 571 | fi 572 | } 573 | 574 | # @cmd Run mcp command 575 | # @arg args~[?`_choice_mcp_args`] The mcp command and arguments 576 | mcp() { 577 | bash ./scripts/mcp.sh "$@" 578 | } 579 | 580 | # @cmd Create a boilplate tool script 581 | # @alias tool:create 582 | # @arg args~ 583 | create@tool() { 584 | ./scripts/create-tool.sh "$@" 585 | } 586 | 587 | # @cmd Displays version information for required tools 588 | version() { 589 | uname -a 590 | if command -v aichat &> /dev/null; then 591 | aichat --version 592 | fi 593 | argc --argc-version 594 | jq --version 595 | ls --version 2>&1 | head -n 1 596 | for item in "${LANG_CMDS[@]}"; do 597 | cmd="${item#*:}" 598 | if [[ "$cmd" == "bash" ]]; then 599 | echo "$(argc --argc-shell-path) $("$(argc --argc-shell-path)" --version | head -n 1)" 600 | elif command -v "$cmd" &> /dev/null; then 601 | echo "$(_normalize_path "$(which $cmd)") $($cmd --version)" 602 | fi 603 | done 604 | } 605 | 606 | _lang_to_cmd() { 607 | match_lang="$1" 608 | for item in "${LANG_CMDS[@]}"; do 609 | lang="${item%:*}" 610 | if [[ "$lang" == "$match_lang" ]]; then 611 | echo "${item#*:}" 612 | fi 613 | done 614 | } 615 | 616 | _get_agent_tools_path() { 617 | name="$1" 618 | for item in "${LANG_CMDS[@]}"; do 619 | lang="${item%:*}" 620 | entry_file="agents/$name/tools.$lang" 621 | if [[ -f "agents/$name/tools.$lang" ]]; then 622 | echo "$entry_file" 623 | break 624 | fi 625 | done 626 | } 627 | 628 | _build_win_shim() { 629 | kind="$1" 630 | lang="$2" 631 | cmd="$(_lang_to_cmd "$lang")" 632 | if [[ "$lang" == "sh" ]]; then 633 | run="\"$(argc --argc-shell-path)\" --noprofile --norc" 634 | else 635 | if [[ "$cmd" == "python" && -d "$VENV_DIR" ]]; then 636 | run="call \"$(_normalize_path "$PWD/$VENV_DIR/Scripts/activate.bat")\" && python" 637 | else 638 | run="\"$(_normalize_path "$(which $cmd)")\"" 639 | fi 640 | fi 641 | cat <<-EOF 642 | @echo off 643 | setlocal 644 | 645 | set "bin_dir=%~dp0" 646 | for %%i in ("%bin_dir:~0,-1%") do set "script_dir=%%~dpi" 647 | set "script_name=%~n0" 648 | 649 | $run "%script_dir%scripts\run-$kind.$lang" "%script_name%" %* 650 | EOF 651 | } 652 | 653 | _build_py_shim() { 654 | kind="$1" 655 | lang="$2" 656 | cat <<-'EOF' | sed -e "s|__ROOT_DIR__|$PWD|g" -e "s|__VENV_DIR__|$VENV_DIR|g" -e "s/__KIND__/$kind/g" 657 | #!/usr/bin/env bash 658 | set -e 659 | 660 | if [[ -f "__ROOT_DIR__/__VENV_DIR__/bin/activate" ]]; then 661 | source "__ROOT_DIR__/__VENV_DIR__/bin/activate" 662 | fi 663 | 664 | python "__ROOT_DIR__/scripts/run-__KIND__.py" "$(basename "$0")" "$@" 665 | EOF 666 | } 667 | 668 | _check_bin() { 669 | bin_name="$1" 670 | if _is_win; then 671 | bin_name+=".cmd" 672 | fi 673 | if [[ ! -f "$BIN_DIR/$bin_name" ]]; then 674 | echo "✗ missing bin/$bin_name" 675 | fi 676 | } 677 | 678 | _check_envs() { 679 | script_path="$1" 680 | envs=( $(sed -E -n 's/.* @env ([A-Z0-9_]+)!.*/\1/p' $script_path) ) 681 | missing_envs=() 682 | for env in $envs; do 683 | if [[ -z "${!env}" ]]; then 684 | missing_envs+=("$env") 685 | fi 686 | done 687 | if [[ -n "$missing_envs" ]]; then 688 | echo "✗ missing envs ${missing_envs[*]}" 689 | fi 690 | } 691 | 692 | _link_tool() { 693 | from="$1" 694 | to="$2.${1##*.}" 695 | rm -rf tools/$to 696 | if _is_win; then 697 | (cd tools && cp -f $from $to) 698 | else 699 | (cd tools && ln -s $from $to) 700 | fi 701 | (cd tools && ls -l $to) 702 | } 703 | 704 | _ask_json_data() { 705 | declaration="$1" 706 | echo 'Missing the JSON data but here are its properties:' 707 | echo "$declaration" | ./scripts/declarations-util.sh pretty-print | sed -n '2,$s/^/>/p' 708 | echo 'Generate placeholder data:' 709 | data="$(echo "$declaration" | _declarations_json_data)" 710 | echo "> $data" 711 | read -e -r -p 'JSON data (Press ENTER to use placeholder): ' res 712 | if [[ -z "$res" ]]; then 713 | argc_json="$data" 714 | else 715 | argc_json="$res" 716 | fi 717 | } 718 | 719 | _declarations_json_data() { 720 | ./scripts/declarations-util.sh generate-json | tail -n +2 721 | } 722 | 723 | _normalize_path() { 724 | if _is_win; then 725 | cygpath -w "$1" 726 | else 727 | echo "$1" 728 | fi 729 | } 730 | 731 | _is_win() { 732 | if [[ "$OS" == "Windows_NT" ]]; then 733 | return 0 734 | else 735 | return 1 736 | fi 737 | } 738 | 739 | _argc_before() { 740 | if [[ -d ".venv/bin/activate" ]]; then 741 | source .venv/bin/activate 742 | fi 743 | } 744 | 745 | _choice_tool() { 746 | for item in "${LANG_CMDS[@]}"; do 747 | lang="${item%:*}" 748 | cmd="${item#*:}" 749 | if command -v "$cmd" &> /dev/null; then 750 | ls -1 tools | grep "\.$lang$" 751 | fi 752 | done 753 | } 754 | 755 | _choice_web_search() { 756 | _choice_tool | grep '^web_search_' 757 | } 758 | 759 | _choice_code_interpreter() { 760 | _choice_tool | grep '^execute_.*_code' 761 | } 762 | 763 | _choice_agent() { 764 | ls -1 agents 765 | } 766 | 767 | _choice_agent_action() { 768 | if [[ "$ARGC_COMPGEN" -eq 1 ]]; then 769 | expr="s/: /\t/" 770 | else 771 | expr="s/:.*//" 772 | fi 773 | argc generate-declarations@agent "$1" --oneline | sed "$expr" 774 | } 775 | 776 | _choice_mcp_args() { 777 | if [[ "$ARGC_COMPGEN" -eq 1 ]]; then 778 | args=( "${argc__positionals[@]}" ) 779 | args[-1]="$ARGC_LAST_ARG" 780 | argc --argc-compgen generic scripts/mcp.sh mcp "${args[@]}" 781 | else 782 | :; 783 | fi 784 | } 785 | 786 | _die() { 787 | echo "$*" >&2 788 | exit 1 789 | } 790 | 791 | if _is_win; then set -o igncr; fi 792 | 793 | # See more details at https://github.com/sigoden/argc 794 | eval "$(argc --argc-eval "$0" "$@")" 795 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) sigoden 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # LLM Functions 2 | 3 | This project empowers you to effortlessly build powerful LLM tools and agents using familiar languages like Bash, JavaScript, and Python. 4 | 5 | Forget complex integrations, **harness the power of [function calling](https://platform.openai.com/docs/guides/function-calling)** to connect your LLMs directly to custom code and unlock a world of possibilities. Execute system commands, process data, interact with APIs – the only limit is your imagination. 6 | 7 | **Tools Showcase** 8 | ![llm-function-tool](https://github.com/user-attachments/assets/40c77413-30ba-4f0f-a2c7-19b042a1b507) 9 | 10 | **Agents showcase** 11 | ![llm-function-agent](https://github.com/user-attachments/assets/6e380069-8211-4a16-8592-096e909b921d) 12 | 13 | ## Prerequisites 14 | 15 | Make sure you have the following tools installed: 16 | 17 | - [argc](https://github.com/sigoden/argc): A bash command-line framework and command runner 18 | - [jq](https://github.com/jqlang/jq): A JSON processor 19 | 20 | ## Getting Started with [AIChat](https://github.com/sigoden/aichat) 21 | 22 | **Currently, AIChat is the only CLI tool that supports `llm-functions`. We look forward to more tools supporting `llm-functions`.** 23 | 24 | ### 1. Clone the repository 25 | 26 | ```sh 27 | git clone https://github.com/sigoden/llm-functions 28 | cd llm-functions 29 | ``` 30 | 31 | ### 2. Build tools and agents 32 | 33 | #### I. Create a `./tools.txt` file with each tool filename on a new line. 34 | 35 | ``` 36 | get_current_weather.sh 37 | execute_command.sh 38 | #execute_py_code.py 39 | ``` 40 | 41 |
42 | Where is the web_search tool? 43 |
44 | 45 | The `web_search` tool itself doesn't exist directly, Instead, you can choose from a variety of web search tools. 46 | 47 | To use one as the `web_search` tool, follow these steps: 48 | 49 | 1. **Choose a Tool:** Available tools include: 50 | * `web_search_cohere.sh` 51 | * `web_search_perplexity.sh` 52 | * `web_search_tavily.sh` 53 | * `web_search_vertexai.sh` 54 | 55 | 2. **Link Your Choice:** Use the `argc` command to link your chosen tool as `web_search`. For example, to use `web_search_perplexity.sh`: 56 | 57 | ```sh 58 | $ argc link-web-search web_search_perplexity.sh 59 | ``` 60 | 61 | This command creates a symbolic link, making `web_search.sh` point to your selected `web_search_perplexity.sh` tool. 62 | 63 | Now there is a `web_search.sh` ready to be added to your `./tools.txt`. 64 | 65 |
66 | 67 | #### II. Create a `./agents.txt` file with each agent name on a new line. 68 | 69 | ``` 70 | coder 71 | todo 72 | ``` 73 | 74 | #### III. Build `bin` and `functions.json` 75 | 76 | ```sh 77 | argc build 78 | ``` 79 | 80 | #### IV. Ensure that everything is ready (environment variables, Node/Python dependencies, mcp-bridge server) 81 | 82 | ```sh 83 | argc check 84 | ``` 85 | 86 | ### 3. Link LLM-functions and AIChat 87 | 88 | AIChat expects LLM-functions to be placed in AIChat's **functions_dir** so that AIChat can use the tools and agents that LLM-functions provides. 89 | 90 | You can symlink this repository directory to AIChat's **functions_dir** with: 91 | 92 | ```sh 93 | ln -s "$(pwd)" "$(aichat --info | sed -n 's/^functions_dir\s\+//p')" 94 | # OR 95 | argc link-to-aichat 96 | ``` 97 | 98 | Alternatively, you can tell AIChat where the LLM-functions directory is by using an environment variable: 99 | 100 | ```sh 101 | export AICHAT_FUNCTIONS_DIR="$(pwd)" 102 | ``` 103 | 104 | ### 4. Start using the functions 105 | 106 | Done! Now you can use the tools and agents with AIChat. 107 | 108 | ```sh 109 | aichat --role %functions% what is the weather in Paris? 110 | aichat --agent todo list all my todos 111 | ``` 112 | 113 | ## Writing Your Own Tools 114 | 115 | Building tools for our platform is remarkably straightforward. You can leverage your existing programming knowledge, as tools are essentially just functions written in your preferred language. 116 | 117 | LLM Functions automatically generates the JSON declarations for the tools based on **comments**. Refer to `./tools/demo_tool.{sh,js,py}` for examples of how to use comments for autogeneration of declarations. 118 | 119 | ### Bash 120 | 121 | Create a new bashscript in the [./tools/](./tools/) directory (.e.g. `execute_command.sh`). 122 | 123 | ```sh 124 | #!/usr/bin/env bash 125 | set -e 126 | 127 | # @describe Execute the shell command. 128 | # @option --command! The command to execute. 129 | 130 | main() { 131 | eval "$argc_command" >> "$LLM_OUTPUT" 132 | } 133 | 134 | eval "$(argc --argc-eval "$0" "$@")" 135 | ``` 136 | 137 | ### Javascript 138 | 139 | Create a new javascript in the [./tools/](./tools/) directory (.e.g. `execute_js_code.js`). 140 | 141 | ```js 142 | /** 143 | * Execute the javascript code in node.js. 144 | * @typedef {Object} Args 145 | * @property {string} code - Javascript code to execute, such as `console.log("hello world")` 146 | * @param {Args} args 147 | */ 148 | exports.run = function ({ code }) { 149 | eval(code); 150 | } 151 | 152 | ``` 153 | 154 | ### Python 155 | 156 | Create a new python script in the [./tools/](./tools/) directory (e.g. `execute_py_code.py`). 157 | 158 | ```py 159 | def run(code: str): 160 | """Execute the python code. 161 | Args: 162 | code: Python code to execute, such as `print("hello world")` 163 | """ 164 | exec(code) 165 | 166 | ``` 167 | 168 | ## Writing Your Own Agents 169 | 170 | Agent = Prompt + Tools (Function Calling) + Documents (RAG), which is equivalent to OpenAI's GPTs. 171 | 172 | The agent has the following folder structure: 173 | ``` 174 | └── agents 175 | └── myagent 176 | ├── functions.json # JSON declarations for functions (Auto-generated) 177 | ├── index.yaml # Agent definition 178 | ├── tools.txt # Shared tools 179 | └── tools.{sh,js,py} # Agent tools 180 | ``` 181 | 182 | The agent definition file (`index.yaml`) defines crucial aspects of your agent: 183 | 184 | ```yaml 185 | name: TestAgent 186 | description: This is test agent 187 | version: 0.1.0 188 | instructions: You are a test ai agent to ... 189 | conversation_starters: 190 | - What can you do? 191 | variables: 192 | - name: foo 193 | description: This is a foo 194 | documents: 195 | - local-file.txt 196 | - local-dir/ 197 | - https://example.com/remote-file.txt 198 | ``` 199 | 200 | Refer to [./agents/demo](https://github.com/sigoden/llm-functions/tree/main/agents/demo) for examples of how to implement a agent. 201 | 202 | ## MCP (Model Context Protocol) 203 | 204 | - [mcp/server](https://github.com/sigoden/llm-functions/tree/main/mcp/server): Let LLM-Functions tools/agents be used through the Model Context Protocol. 205 | - [mcp/bridge](https://github.com/sigoden/llm-functions/tree/main/mcp/bridge): Let external MCP tools be used by LLM-Functions. 206 | 207 | ## Documents 208 | 209 | - [Tool Guide](https://github.com/sigoden/llm-functions/blob/main/docs/tool.md) 210 | - [Agent Guide](https://github.com/sigoden/llm-functions/blob/main/docs/agent.md) 211 | - [Argc Commands](https://github.com/sigoden/llm-functions/blob/main/docs/argcfile.md) 212 | 213 | ## License 214 | 215 | The project is under the MIT License, Refer to the [LICENSE](https://github.com/sigoden/llm-functions/blob/main/LICENSE) file for detailed information. 216 | -------------------------------------------------------------------------------- /agents/coder/README.md: -------------------------------------------------------------------------------- 1 | # Coder 2 | 3 | An AI agent that assists your coding tasks. 4 | 5 | ## Features 6 | 7 | - 🏗️ Intelligent project structure creation and management 8 | - 🖼️ Convert screenshots into clean, functional code 9 | - 📁 Comprehensive file system operations (create folders, files, read/write files) 10 | - 🧐 Advanced code analysis and improvement suggestions 11 | - 📊 Precise diff-based file editing for controlled code modifications 12 | 13 | ## Examples 14 | 15 | ![image](https://github.com/user-attachments/assets/97324fa9-f5ea-44cd-8aea-024d1442ca81) 16 | 17 | https://github.com/user-attachments/assets/9363990f-15a9-48c6-b227-8900cfbe0a18 18 | 19 | ## Similar Projects 20 | 21 | - https://github.com/Doriandarko/claude-engineer 22 | - https://github.com/paul-gauthier/aider -------------------------------------------------------------------------------- /agents/coder/index.yaml: -------------------------------------------------------------------------------- 1 | name: Coder 2 | description: An AI agent that assists your coding tasks 3 | version: 0.1.0 4 | instructions: | 5 | You are an exceptional software developer with vast knowledge across multiple programming languages, frameworks, and best practices. Your capabilities include: 6 | 7 | 1. Creating and managing project structures 8 | 2. Writing, debugging, and improving code across multiple languages 9 | 3. Providing architectural insights and applying design patterns 10 | 4. Staying current with the latest technologies and best practices 11 | 5. Analyzing and manipulating files within the project directory 12 | 13 | Available tools and their optimal use cases: 14 | 15 | 1. fs_mkdir: Create new directories in the project structure. 16 | 2. fs_create: Generate new files with specified contents. 17 | 3. fs_patch: Examine and modify existing files. 18 | 4. fs_cat: View the contents of existing files without making changes. 19 | 5. fs_ls: Understand the current project structure or locate specific files. 20 | 21 | Tool Usage Guidelines: 22 | - Always use the most appropriate tool for the task at hand. 23 | - For file modifications, use fs_patch. Read the file first, then apply changes if needed. 24 | - After making changes, always review the diff output to ensure accuracy. 25 | 26 | Project Creation and Management: 27 | 1. Start by creating a root folder for new projects. 28 | 2. Create necessary subdirectories and files within the root folder. 29 | 3. Organize the project structure logically, following best practices for the specific project type. 30 | 31 | Code Editing Best Practices: 32 | 1. Always read the file content before making changes. 33 | 2. Analyze the code and determine necessary modifications. 34 | 3. Pay close attention to existing code structure to avoid unintended alterations. 35 | 4. Review changes thoroughly after each modification. 36 | 37 | Always strive for accuracy, clarity, and efficiency in your responses and actions. 38 | 39 | Answer the user's request using relevant tools (if they are available). Before calling a tool, do some analysis within tags. First, think about which of the provided tools is the relevant tool to answer the user's request. Second, go through each of the required parameters of the relevant tool and determine if the user has directly provided or given enough information to infer a value. When deciding if the parameter can be inferred, carefully consider all the context to see if it supports a specific value. If all of the required parameters are present or can be reasonably inferred, close the thinking tag and proceed with the tool call. BUT, if one of the values for a required parameter is missing, DO NOT invoke the function (not even with fillers for the missing params) and instead, ask the user to provide the missing parameters. DO NOT ask for more information on optional parameters if it is not provided. 40 | 41 | Do not reflect on the quality of the returned search results in your response. 42 | 43 | conversation_starters: 44 | - "Create a new Python project structure for a web application" 45 | - "Explain the code in file.py and suggest improvements" 46 | - "Search for the latest best practices in React development" 47 | - "Help me debug this error: [paste your error message]" 48 | -------------------------------------------------------------------------------- /agents/coder/tools.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | # @env LLM_OUTPUT=/dev/stdout The output path 5 | 6 | ROOT_DIR="${LLM_ROOT_DIR:-$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)}" 7 | 8 | # @cmd Create a new file at the specified path with contents. 9 | # @option --path! The path where the file should be created 10 | # @option --contents! The contents of the file 11 | fs_create() { 12 | "$ROOT_DIR/utils/guard_path.sh" "$argc_path" "Create '$argc_path'?" 13 | mkdir -p "$(dirname "$argc_path")" 14 | printf "%s" "$argc_contents" > "$argc_path" 15 | echo "File created: $argc_path" >> "$LLM_OUTPUT" 16 | } 17 | 18 | # See more details at https://github.com/sigoden/argc 19 | eval "$(argc --argc-eval "$0" "$@")" 20 | -------------------------------------------------------------------------------- /agents/coder/tools.txt: -------------------------------------------------------------------------------- 1 | fs_mkdir.sh 2 | fs_ls.sh 3 | fs_patch.sh 4 | fs_cat.sh -------------------------------------------------------------------------------- /agents/demo/README.md: -------------------------------------------------------------------------------- 1 | # Demo 2 | 3 | This agent serves as a demo to guide agent development and showcase various agent capabilities. 4 | -------------------------------------------------------------------------------- /agents/demo/index.yaml: -------------------------------------------------------------------------------- 1 | name: Demo 2 | description: An AI agent that demonstrates agent capabilities 3 | version: 0.1.0 4 | instructions: | 5 | You are a AI agent designed to demonstrate agent capabilities. 6 | 7 | 8 | {{__tools__}} 9 | 10 | 11 | 12 | os: {{__os__}} 13 | os_family: {{__os_family__}} 14 | arch: {{__arch__}} 15 | shell: {{__shell__}} 16 | locale: {{__locale__}} 17 | now: {{__now__}} 18 | cwd: {{__cwd__}} 19 | 20 | 21 | 22 | username: {{username}} 23 | 24 | variables: 25 | - name: username 26 | description: Your user name 27 | conversation_starters: 28 | - What is my username? 29 | - What is my current shell? 30 | - What is my ip? 31 | - How much disk space is left on my PC?? 32 | - How to create an agent? 33 | documents: 34 | - README.md 35 | - https://github.com/sigoden/llm-functions/blob/main/README.md -------------------------------------------------------------------------------- /agents/demo/tools.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Get the system info 3 | */ 4 | exports.get_ipinfo = async function () { 5 | const res = await fetch("https://httpbin.org/ip") 6 | return res.json(); 7 | } 8 | -------------------------------------------------------------------------------- /agents/demo/tools.py: -------------------------------------------------------------------------------- 1 | import urllib.request 2 | 3 | def get_ipinfo(): 4 | """ 5 | Get the ip info 6 | """ 7 | with urllib.request.urlopen("https://httpbin.org/ip") as response: 8 | data = response.read() 9 | return data.decode('utf-8') 10 | -------------------------------------------------------------------------------- /agents/demo/tools.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | # @env LLM_OUTPUT=/dev/stdout The output path 5 | 6 | # @cmd Get the ip info 7 | get_ipinfo() { 8 | curl -fsSL https://httpbin.org/ip >> "$LLM_OUTPUT" 9 | } 10 | 11 | # See more details at https://github.com/sigoden/argc 12 | eval "$(argc --argc-eval "$0" "$@")" 13 | -------------------------------------------------------------------------------- /agents/demo/tools.txt: -------------------------------------------------------------------------------- 1 | execute_command.sh -------------------------------------------------------------------------------- /agents/json-viewer/README.md: -------------------------------------------------------------------------------- 1 | # Json-Viewer 2 | 3 | An AI agent to view and filter json data 4 | 5 | The agent only sends the JSON schema instead of the JSON data to the LLM, which has the following advantages: 6 | 7 | - Less data transmission, faster response speed, and lower token costs. 8 | - More privacy, as no actual JSON data is transmitted. 9 | 10 | ![json-viewer](https://github.com/user-attachments/assets/3ae126f4-d741-4929-bf70-640530ccdfd8) -------------------------------------------------------------------------------- /agents/json-viewer/index.yaml: -------------------------------------------------------------------------------- 1 | name: Json-Viewer 2 | description: An AI agent to view and filter json data 3 | version: 0.1.0 4 | instructions: "" 5 | dynamic_instructions: true -------------------------------------------------------------------------------- /agents/json-viewer/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "dependencies": { 3 | "@inquirer/input": "^4.0.2", 4 | "to-json-schema": "^0.2.5" 5 | } 6 | } 7 | -------------------------------------------------------------------------------- /agents/json-viewer/tools.js: -------------------------------------------------------------------------------- 1 | const fs = require("node:fs/promises"); 2 | const { exec, spawn } = require("node:child_process"); 3 | const { promisify } = require("node:util"); 4 | const path = require("node:path"); 5 | const { tmpdir } = require("node:os"); 6 | 7 | const toJsonSchema = require('to-json-schema'); 8 | const input = require("@inquirer/input").default; 9 | 10 | exports._instructions = async function () { 11 | const value = await input({ message: "Enter the json file path or command to generate json", required: true }); 12 | let json_file_path; 13 | let generate_json_command_context = ""; 14 | try { 15 | await fs.access(value); 16 | json_file_path = value; 17 | } catch { 18 | generate_json_command_context = `command_to_generate_json: \`${value}\`\n`; 19 | const { stdout } = await promisify(exec)(value, { maxBuffer: 100 * 1024 * 1024 }); 20 | json_file_path = path.join(tmpdir(), `${process.env.LLM_AGENT_NAME}-${process.pid}.data.json`); 21 | await fs.writeFile(json_file_path, stdout); 22 | console.log(`ⓘ Generated json data saved to: ${json_file_path}`); 23 | } 24 | 25 | const json_data = await fs.readFile(json_file_path, "utf8"); 26 | const json_schema = toJsonSchema(JSON.parse(json_data)); 27 | 28 | return `You are a AI agent that can view and filter json data with jq. 29 | 30 | ## Context 31 | ${generate_json_command_context}json_file_path: ${json_file_path} 32 | json_schema: ${JSON.stringify(json_schema, null, 2)} 33 | ` 34 | } 35 | 36 | /** 37 | * Print the json data. 38 | * 39 | * @typedef {Object} Args 40 | * @property {string} json_file_path The json file path 41 | * @property {string} jq_expr The jq expression 42 | * @param {Args} args 43 | */ 44 | exports.print_json = async function (args) { 45 | const { json_file_path, jq_expr } = args; 46 | return new Promise((resolve, reject) => { 47 | const child = spawn("jq", ["-r", jq_expr, json_file_path], { stdio: "inherit" }); 48 | 49 | child.on('close', code => { 50 | if (code === 0) { 51 | resolve(); 52 | } else { 53 | reject(new Error(`jq exited with code ${code}`)); 54 | } 55 | }); 56 | 57 | child.on('error', err => { 58 | reject(err); 59 | }); 60 | }); 61 | } 62 | -------------------------------------------------------------------------------- /agents/sql/README.md: -------------------------------------------------------------------------------- 1 | # SQL 2 | 3 | An AI agent that helps you manage a SQL database. 4 | 5 | > The tool script uses [usql](https://github.com/xo/usql) to interact with SQL, it supports all mainstream databases. 6 | 7 | ![image](https://github.com/user-attachments/assets/28bc1118-5f87-4571-a1c9-6c8cec4636d5) 8 | -------------------------------------------------------------------------------- /agents/sql/index.yaml: -------------------------------------------------------------------------------- 1 | name: Sql 2 | description: An AI agent that helps you manage a SQL database 3 | version: 0.1.0 4 | instructions: | 5 | You are an AI agent that manages a SQL database. 6 | 7 | Available tools: 8 | {{__tools__}} 9 | variables: 10 | - name: dsn 11 | description: The database connection url. e.g. pgsql://user:pass@host:port 12 | conversation_starters: 13 | - What you can do? -------------------------------------------------------------------------------- /agents/sql/tools.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | ROOT_DIR="${LLM_ROOT_DIR:-$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)}" 6 | 7 | # @meta require-tools usql 8 | # @env LLM_AGENT_VAR_DSN! The database connection url. e.g. pgsql://user:pass@host:port 9 | 10 | # @cmd Execute a SELECT query 11 | # @option --query! SELECT SQL query to execute 12 | read_query() { 13 | if ! grep -qi '^select' <<<"$argc_query"; then 14 | echo "error: only SELECT query is allowed" >&2 15 | exit 1 16 | fi 17 | _run_sql "$argc_query" 18 | } 19 | 20 | # @cmd Execute an SQL query 21 | # @option --query! SQL query to execute 22 | write_query() { 23 | "$ROOT_DIR/utils/guard_operation.sh" "Execute SQL?" 24 | _run_sql "$argc_query" 25 | } 26 | 27 | # @cmd List all tables 28 | list_tables() { 29 | _run_sql "\dt+" 30 | } 31 | 32 | # @cmd Get the schema information for a specific table 33 | # @option --table-name! Name of the table to describe 34 | describe_table() { 35 | _run_sql "\d $argc_table_name" 36 | } 37 | 38 | _run_sql() { 39 | usql "$LLM_AGENT_VAR_DSN" -c "$1" >> "$LLM_OUTPUT" 40 | } 41 | 42 | # See more details at https://github.com/sigoden/argc 43 | eval "$(argc --argc-eval "$0" "$@")" 44 | -------------------------------------------------------------------------------- /agents/todo/README.md: -------------------------------------------------------------------------------- 1 | # Todo 2 | 3 | An AI agent that helps you manage a todo list. 4 | 5 | ![image](https://github.com/user-attachments/assets/6e380069-8211-4a16-8592-096e909b921d) 6 | -------------------------------------------------------------------------------- /agents/todo/index.yaml: -------------------------------------------------------------------------------- 1 | name: Todo 2 | description: An AI agent that helps you manage a todo list 3 | version: 0.1.0 4 | instructions: | 5 | You are AI agent that manage a todo list. 6 | 7 | Available tools: 8 | {{__tools__}} 9 | 10 | When outputting the todo list to the user, don't simply print JSON data; instead, output it in Markdown format. 11 | `{"id": 1, "desc": "Buy milk", "done": true }` => `1. [x] Buy milk` 12 | `{"id": 2, "desc": "Buy eggs", "done": false}` => `2. [ ] Buy eggs` 13 | 14 | conversation_starters: 15 | - "List all todos" 16 | - "Clean the entire todo list" 17 | - "Add a new todo: Buy milk" 18 | - "Done todo id=1" 19 | - "Delete todo id=1" -------------------------------------------------------------------------------- /agents/todo/tools.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | # @env LLM_OUTPUT=/dev/stdout The output path 5 | 6 | ROOT_DIR="${LLM_ROOT_DIR:-$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)}" 7 | 8 | # @cmd Add a new todo item 9 | # @option --desc! The todo description 10 | add_todo() { 11 | todos_file="$(_get_todos_file)" 12 | if [[ -f "$todos_file" ]]; then 13 | data="$(cat "$todos_file")" 14 | num="$(echo "$data" | jq '[.[].id] | max + 1')" 15 | else 16 | num=1 17 | data="[]" 18 | fi 19 | echo "$data" | \ 20 | jq --arg new_id $num --arg new_desc "$argc_desc" \ 21 | '. += [{"id": $new_id | tonumber, "desc": $new_desc, "done": false}]' \ 22 | > "$todos_file" 23 | echo "Successfully added todo id=$num" >> "$LLM_OUTPUT" 24 | } 25 | 26 | # @cmd Delete an todo item 27 | # @option --id! The todo id 28 | del_todo() { 29 | todos_file="$(_get_todos_file)" 30 | if [[ -f "$todos_file" ]]; then 31 | data="$(cat "$todos_file")" 32 | echo "$data" | \ 33 | jq '[.[] | select(.id != '$argc_id')]' \ 34 | > "$todos_file" 35 | echo "Successfully deleted todo id=$argc_id" >> "$LLM_OUTPUT" 36 | else 37 | echo "The operation failed because the todo list is currently empty." >> "$LLM_OUTPUT" 38 | fi 39 | } 40 | 41 | # @cmd Set a todo item status as done 42 | # @option --id! The todo id 43 | done_todo() { 44 | todos_file="$(_get_todos_file)" 45 | if [[ -f "$todos_file" ]]; then 46 | data="$(cat "$todos_file")" 47 | echo "$data" | \ 48 | jq '. |= map(if .id == '$argc_id' then .done = true else . end)' \ 49 | > "$todos_file" 50 | echo "Successfully mark todo id=$argc_id as done" >> "$LLM_OUTPUT" 51 | else 52 | echo "The operation failed because the todo list is currently empty." >> "$LLM_OUTPUT" 53 | fi 54 | } 55 | 56 | # @cmd Display the current todo list in json format 57 | list_todos() { 58 | todos_file="$(_get_todos_file)" 59 | if [[ -f "$todos_file" ]]; then 60 | cat "$todos_file" >> "$LLM_OUTPUT" 61 | else 62 | echo '[]' >> "$LLM_OUTPUT" 63 | fi 64 | } 65 | 66 | # @cmd Clean the entire todo list 67 | clear_todos() { 68 | todos_file="$(_get_todos_file)" 69 | if [[ -f "$todos_file" ]]; then 70 | "$ROOT_DIR/utils/guard_operation.sh" "Clean the entire todo list?" 71 | rm -rf "$todos_file" 72 | echo "Successfully cleaned the entire todo list" >> "$LLM_OUTPUT" 73 | else 74 | echo "The operation failed because the todo list is currently empty." >> "$LLM_OUTPUT" 75 | fi 76 | } 77 | 78 | _get_todos_file() { 79 | todos_dir="${LLM_AGENT_CACHE_DIR:-.}" 80 | mkdir -p "$todos_dir" 81 | echo "$todos_dir/todos.json" 82 | } 83 | 84 | # See more details at https://github.com/sigoden/argc 85 | eval "$(argc --argc-eval "$0" "$@")" 86 | -------------------------------------------------------------------------------- /docs/agent.md: -------------------------------------------------------------------------------- 1 | # Agent 2 | 3 | ## folder structure 4 | 5 | The agent follows a specific organizational structure to ensure streamlined functionality and easy access to essential files: 6 | ``` 7 | └── agents 8 | └── myagent 9 | ├── functions.json # Auto-generated JSON declarations for functions 10 | ├── index.yaml # Main agent definition file 11 | ├── tools.txt # List of shared tools 12 | └── tools.{sh,js,py} # Scripts implementing agent-specific tools 13 | ``` 14 | 15 | ## index.yaml 16 | 17 | This is the main definition file for your agent where you provide all essential information and configuration for the agent. 18 | 19 | ### metadata 20 | 21 | Metadata provides basic information about the agent: 22 | 23 | - `name`: A unique name for your agent, which helps in identifying and referencing the agent. 24 | - `description`: A brief explanation of what the agent is or its primary purpose. 25 | - `version`: The version number of the agent, which helps track changes or updates to the agent over time. 26 | 27 | ```yaml 28 | name: TestAgent 29 | description: This is test agent 30 | version: 0.1.0 31 | ``` 32 | 33 | ### instructions 34 | 35 | Defines the initial context or behavior directives for the agent: 36 | 37 | ```yaml 38 | instructions: You are a test ai agent to ... 39 | ``` 40 | 41 | ### variables 42 | 43 | Variables store user-related data, such as behavior or preferences. Below is the syntax for defining variables: 44 | 45 | ```yaml 46 | variables: 47 | - name: foo 48 | description: This is a foo 49 | - name: bar 50 | description: This is a bar with default value 51 | default: val 52 | ``` 53 | > For sensitive information such as api_key, client_id, client_secret, and token, it's recommended to use environment variables instead of agent variables. 54 | 55 | When use define variables, please avoid these built-in variables: 56 | 57 | | name | description | example | 58 | | :-------------- | :-------------------------------------------- | :----------------------- | 59 | | `__os__` | Operating system name | linux | 60 | | `__os_family__` | Operating system family | unix | 61 | | `__arch__` | System architecture | x86_64 | 62 | | `__shell__` | Current user's default shell | bash | 63 | | `__locale__` | User's preferred language and region settings | en-US | 64 | | `__now__` | Current timestamp in ISO 8601 format | 2024-07-29T08:11:24.367Z | 65 | | `__cwd__` | Current working directory | /tmp | 66 | | `__tools__` | List of agent tools | | 67 | 68 | Variables can be used within `instructions` and within tool scripts: 69 | 70 | ```yaml 71 | instructions: | 72 | The instructions can access user-defined variables: {{foo}} and {{bar}}, or built-in variables: {{__cwd__}} 73 | ``` 74 | 75 | ```sh 76 | echo "he tools script can access user-defined variables in environment variables: $LLM_AGENT_VAR_FOO and $LLM_AGENT_VAR_BAR" 77 | ``` 78 | 79 | ### documents 80 | 81 | A list of resources or references that the agent can access. Documents are used for building RAG. 82 | 83 | ```yaml 84 | documents: 85 | - local-file.txt 86 | - local-dir/ 87 | - https://example.com/remote-file.txt 88 | ``` 89 | 90 | > All local files and directories are relative to the agent directory (where index.yaml is located). 91 | 92 | ### conversation_starters 93 | 94 | Define Predefined prompts or questions that users can ask to initiate interactions or conversations with the agent. 95 | This helps provide guidance for users on how to engage with the agent effectively. 96 | 97 | ```yaml 98 | conversation_starters: 99 | - What can you do? 100 | ``` 101 | 102 | ## tools.{sh,js,py} 103 | 104 | Scripts for implementing tools tailored to the agent's unique requirements. 105 | 106 | ## tools.txt 107 | 108 | `tools.txt` facilitates the reuse of tools specified in the `/tools` directory within this project. 109 | -------------------------------------------------------------------------------- /docs/argcfile.md: -------------------------------------------------------------------------------- 1 | # Argcfile 2 | 3 | The [Argcfile.sh](https://github.com/sigoden/llm-functions/blob/main/Argcfile.sh) is a powerful Bash script designed to streamline the process of managing LLM functions and agents in your AIChat environment. 4 | 5 | We encourage running `Argcfile.sh` using `argc`. Because `argc` provides better autocompletion, it can also be used without trouble on Windows. 6 | 7 | Argcfile.sh is to argc what Makefile is to make. 8 | 9 | https://github.com/user-attachments/assets/1acef548-4735-49c1-8f60-c4e0baf528de 10 | 11 | ## Usage 12 | 13 | ```sh 14 | # -------- Help -------- 15 | argc -h # Print help information 16 | argc -h # Print help information for 17 | 18 | # -------- Build -------- 19 | # Build all 20 | argc build 21 | 22 | # Build all tools 23 | argc build@tool 24 | # Build specific tools 25 | argc build@tool get_current_weather.sh execute_command.sh 26 | 27 | # Build all agents 28 | argc build@agent 29 | # Build specific agents 30 | argc build@agent coder todo 31 | 32 | # -------- Check -------- 33 | # Check all 34 | argc check 35 | 36 | # Check all tools 37 | argc check@tool 38 | # Check specific tools 39 | argc check@tool get_current_weather.sh execute_command.sh 40 | 41 | # Check all agents 42 | argc check@agent 43 | # Check specific agents 44 | argc check@agent coder todo 45 | 46 | # -------- Run -------- 47 | # Run tool 48 | argc run@tool get_current_weather.sh '{"location":"London"}' 49 | # Run agent tool 50 | argc run@agent todo add_todo '{"desc":"Watch a movie"}' 51 | 52 | # -------- Test -------- 53 | # Test all 54 | argc test 55 | # Test tools 56 | argc test@tool 57 | # Test agents 58 | argc test@agent 59 | 60 | # -------- Clean -------- 61 | # Clean all 62 | argc clean 63 | # Clean tools 64 | argc clean@tool 65 | # Clean agents 66 | argc clean@agent 67 | 68 | # -------- Link -------- 69 | argc link-web-search web_search_tavily.sh 70 | argc link-code-interpreter execute_py_code.py 71 | 72 | # -------- Misc -------- 73 | # Link this repo to aichat functions_dir 74 | argc link-to-aichat 75 | # Displays version information for required tools 76 | argc version 77 | ``` 78 | 79 | ## MCP Usage 80 | 81 | ```sh 82 | # Start/restart the mcp bridge server 83 | argc mcp start 84 | 85 | # Stop the mcp bridge server 86 | argc mcp stop 87 | 88 | # Run the mcp tool 89 | argc mcp run@tool fs_read_file '{"path":"/tmp/file1"}' 90 | 91 | # Show the logs 92 | argc mcp logs 93 | ``` 94 | -------------------------------------------------------------------------------- /docs/environment-variables.md: -------------------------------------------------------------------------------- 1 | # Environment Variables 2 | 3 | ## Injected by `run-tool.*`/`run-agent.*` 4 | 5 | | Name | Description | 6 | | --------------------- | -------------------------------------------------------------------------------------------------------------------------- | 7 | | `LLM_ROOT_DIR` | Path to `` | 8 | | `LLM_TOOL_NAME` | Tool name, such as `execute_command` | 9 | | `LLM_TOOL_CACHE_DIR` | Path to `/cache/`,
The tool script can use this directory to store some cache data | 10 | | `LLM_AGENT_NAME` | Agent name, such as `todo` | 11 | | `LLM_AGENT_FUNC` | Agent function, such as `list_todos` | 12 | | `LLM_AGENT_ROOT_DIR` | Path to `/agents/` | 13 | | `LLM_AGENT_CACHE_DIR` | Path to `/cache/`,
The agent tool script can use this directory to store some cache data | 14 | 15 | ## Injected by runtime (AIChat) 16 | 17 | | Name | Description | 18 | | ---------------------- | ---------------------------------------------------- | 19 | | `LLM_OUTPUT` | File to store the the execution results of the tool. | 20 | | `LLM_AGENT_VAR_` | Agent variables. | 21 | 22 | ## Provided by users 23 | 24 | | Name | Description | 25 | | ---------------------- | ------------------------------------------------------------------------------------------------------------ | 26 | | `LLM_DUMP_RESULTS` | Controls whether to print the execution results of the tool, e.g. `get_current_weather\|fs.*\|todo:.*`, `.*` | 27 | | `LLM_MCP_NEED_CONFIRM`| Controls whether to prompt for confirmation before executing certain tools, e.g., `git_commit\|git_reset`, `.*` . | 28 | | `LLM_MCP_SKIP_CONFIRM`| Controls whether to bypass confirmation requests for certain tools, e.g., `git_status\|git_diff.*`, `.*` . | 29 | 30 | > LLM-Functions supports `.env`, just put environment variables into dotenv file to make it work. -------------------------------------------------------------------------------- /docs/tool.md: -------------------------------------------------------------------------------- 1 | # Tool 2 | 3 | This document guides you on creating custom tools for the LLM Functions framework in Bash, JavaScript, and Python. 4 | 5 | ## Defining Tool Parameters 6 | 7 | To define the parameters that your tool accepts, you will use specially formatted comments within your tool's source code. 8 | The `Argcfile.sh` script utilizes these comments to automatically generate the function declarations needed by the LLM. 9 | 10 | ### Json Schema 11 | 12 | The following JSON schema includes various types of properties. We will use this as an example to see how to write comments in each language so they can be automatically generated. 13 | 14 | ```json 15 | { 16 | "name": "demo", 17 | "description": "Demonstrate how to create a tool using Javascript and how to use comments.", 18 | "parameters": { 19 | "type": "object", 20 | "properties": { 21 | "string": { 22 | "type": "string", 23 | "description": "Define a required string property" 24 | }, 25 | "string_enum": { 26 | "type": "string", 27 | "enum": [ 28 | "foo", 29 | "bar" 30 | ], 31 | "description": "Define a required string property with enum" 32 | }, 33 | "string_optional": { 34 | "type": "string", 35 | "description": "Define a optional string property" 36 | }, 37 | "boolean": { 38 | "type": "boolean", 39 | "description": "Define a required boolean property" 40 | }, 41 | "integer": { 42 | "type": "integer", 43 | "description": "Define a required integer property" 44 | }, 45 | "number": { 46 | "type": "number", 47 | "description": "Define a required number property" 48 | }, 49 | "array": { 50 | "type": "array", 51 | "items": { 52 | "type": "string" 53 | }, 54 | "description": "Define a required string array property" 55 | }, 56 | "array_optional": { 57 | "type": "array", 58 | "items": { 59 | "type": "string" 60 | }, 61 | "description": "Define a optional string array property" 62 | } 63 | }, 64 | "required": [ 65 | "string", 66 | "string_enum", 67 | "boolean", 68 | "integer", 69 | "number", 70 | "array" 71 | ] 72 | } 73 | } 74 | ``` 75 | 76 | ### Bash 77 | 78 | Use `# @describe`, `# @option`, and `# @flag` comments to define your tool's parameters. 79 | 80 | * `# @describe `: A brief description of your tool's functionality. This is required. 81 | 82 | * `# @option --[!][] `: Defines an option. 83 | * `--`: The name of the option (use kebab-case). 84 | * `!`: Indicates a required option. 85 | * ``: The data type (e.g., `INT`, `NUM`, ``). If omitted, defaults to `STRING`. 86 | * ``: Any constraints (e.g., `[foo|bar]` for an enum). 87 | * ``: A description of the option. 88 | 89 | * `# @flag -- `: Defines a boolean flag. 90 | * `--`: The name of the flag (use kebab-case). 91 | * ``: A description of the flag. 92 | 93 | **Example ([tools/demo_sh.sh](https://github.com/sigoden/llm-functions/blob/main/tools/demo_sh.sh)):** 94 | 95 | ```sh file=tools/demo_sh.sh 96 | #!/usr/bin/env bash 97 | set -e 98 | 99 | # @describe Demonstrate how to create a tool using Bash and how to use comment tags. 100 | # @option --string! Define a required string property 101 | # @option --string-enum![foo|bar] Define a required string property with enum 102 | # @option --string-optional Define a optional string property 103 | # @flag --boolean Define a boolean property 104 | # @option --integer! Define a required integer property 105 | # @option --number! Define a required number property 106 | # @option --array+ Define a required string array property 107 | # @option --array-optional* Define a optional string array property 108 | 109 | # @env LLM_OUTPUT=/dev/stdout The output path 110 | 111 | main() { 112 | # ... your bash code ... 113 | } 114 | 115 | eval "$(argc --argc-eval "$0" "$@")" 116 | ``` 117 | 118 | ### JavaScript 119 | 120 | Use JSDoc-style comments to define your tool's parameters. The `@typedef` block defines the argument object, and each property within that object represents a parameter. 121 | 122 | * `/** ... */`: JSDoc comment block containing the description and parameter definitions. 123 | * `@typedef {Object} Args`: Defines the type of the argument object. 124 | * `@property {} `: Defines a property (parameter) of the `Args` object. 125 | * ``: The data type (e.g., `string`, `boolean`, `number`, `string[]`, `{foo|bar}`). 126 | * ``: The name of the parameter. 127 | * ``: A description of the parameter. 128 | * `[]`: Indicates an optional parameter. 129 | 130 | **Example ([tools/demo_js.js](https://github.com/sigoden/llm-functions/blob/main/tools/demo_js.js)):** 131 | 132 | ```js file=tools/demo_js.js 133 | /** 134 | * Demonstrate how to create a tool using Javascript and how to use comments. 135 | * @typedef {Object} Args 136 | * @property {string} string - Define a required string property 137 | * @property {'foo'|'bar'} string_enum - Define a required string property with enum 138 | * @property {string} [string_optional] - Define a optional string property 139 | * @property {boolean} boolean - Define a required boolean property 140 | * @property {Integer} integer - Define a required integer property 141 | * @property {number} number - Define a required number property 142 | * @property {string[]} array - Define a required string array property 143 | * @property {string[]} [array_optional] - Define a optional string array property 144 | * @param {Args} args 145 | */ 146 | exports.run = function (args) { 147 | // ... your JavaScript code ... 148 | } 149 | ``` 150 | 151 | Of course, you can also use ESM `export` expressions to export functions. 152 | ```js 153 | export function run() { 154 | // ... your JavaScript code ... 155 | } 156 | ``` 157 | 158 | ### Python 159 | 160 | Use type hints and docstrings to define your tool's parameters. 161 | 162 | * `def run(...)`: Function definition. 163 | * ` : `: Type hints with descriptions in the docstring. 164 | * ``: The data type (e.g., `str`, `bool`, `int`, `float`, `List[str]`, `Literal["foo", "bar"]`). 165 | * ``: The name of the parameter. 166 | * ``: Description of the parameter. 167 | * `Optional[...]`: Indicates an optional parameter. 168 | 169 | **Example ([tools/demo_py.py](https://github.com/sigoden/llm-functions/blob/main/tools/demo_py.py)):** 170 | 171 | ```py file=tools/demo_py.py 172 | def run( 173 | string: str, 174 | string_enum: Literal["foo", "bar"], 175 | boolean: bool, 176 | integer: int, 177 | number: float, 178 | array: List[str], 179 | string_optional: Optional[str] = None, 180 | array_optional: Optional[List[str]] = None, 181 | ): 182 | """Demonstrate how to create a tool using Python and how to use comments. 183 | Args: 184 | string: Define a required string property 185 | string_enum: Define a required string property with enum 186 | boolean: Define a required boolean property 187 | integer: Define a required integer property 188 | number: Define a required number property 189 | array: Define a required string array property 190 | string_optional: Define a optional string property 191 | array_optional: Define a optional string array property 192 | """ 193 | # ... your Python code ... 194 | ``` 195 | ## Common tools 196 | 197 | Common tools can be found in `tools/.{sh,js,py}`. Each script defines a single tool. 198 | 199 | ## Agent tools 200 | 201 | Agents can possess their own toolset scripts located under `agents//tools.{sh,js,py}`, which can contain multiple tool functions. 202 | 203 | The following is an example of git agent: 204 | 205 | ### Bash 206 | 207 | ```sh file=agents/git/tools.sh 208 | # @cmd Shows the working tree status 209 | git_status() { 210 | # ... your bash code ... 211 | } 212 | 213 | # @cmd Shows differences between branches or commits 214 | # @option --target! Shows differences between branches or commits 215 | git_diff() { 216 | # ... your bash code ... 217 | } 218 | 219 | eval "$(argc --argc-eval "$0" "$@")" 220 | ``` 221 | 222 | > In `tools/.sh`, we use the `@describe` comment tag and a single `main` function, since it has only one function and no subcommands. 223 | > In `agent//tools.sh`, we use the `@cmd` comment tag and named functions, since it can have multiple tool functions. 224 | 225 | ### JavaScript 226 | 227 | ```js file=agents/git/tools.js 228 | /** 229 | * Shows the working tree status 230 | */ 231 | exports.git_status = function() { 232 | // ... your JavaScript code ... 233 | } 234 | 235 | /** 236 | * Shows differences between branches or commits 237 | * @typedef {Object} Args 238 | * @property {string} target - Shows differences between branches or commits 239 | * @param {Args} args 240 | */ 241 | exports.git_diff = function() { 242 | // ... your JavaScript code ... 243 | } 244 | ``` 245 | 246 | ### Python 247 | 248 | ```py file=agents/git/tools.py 249 | def git_status(): 250 | """Shows the working tree status""" 251 | # ... your Python code ... 252 | 253 | 254 | def git_diff(target: str): 255 | """Shows differences between branches or commits 256 | Args: 257 | target: Shows differences between branches or commits 258 | """ 259 | # ... your Python code ... 260 | ``` 261 | 262 | ## Quickly Create Tools 263 | 264 | ### Use argc 265 | 266 | `Argcfile.sh` provides a tool `create@tool` to quickly create tool scripts. 267 | 268 | ```sh 269 | argc create@tool _test.sh foo bar! baz+ qux* 270 | ``` 271 | 272 | The argument details 273 | 274 | - `_test.sh`: The name of the tool script you want to create. The file extension can only be `.sh`, `.js`, or `.py`. 275 | - `foo bar! baz+ qux*`: The parameters for the tool. 276 | 277 | The suffixes attached to the tool's parameters define their characteristics: 278 | 279 | - `!`: Indicates that the property is required. 280 | - `*`: Specifies that the property value should be an array. 281 | - `+`: Marks the property as required, with the value also needing to be an array. 282 | - No suffix: Denotes that the property is optional. 283 | 284 | ### Use aichat 285 | 286 | AI is smart enough to automatically create tool scripts for us. We just need to provide the documentation and describe the requirements well. 287 | 288 | Use aichat to create a common tool script: 289 | ``` 290 | aichat -f docs/tool.md <<-'EOF' 291 | create tools/get_youtube_transcript.py 292 | 293 | description: Extract transcripts from YouTube videos 294 | parameters: 295 | url (required): YouTube video URL or video ID 296 | lang (default: "en"): Language code for transcript (e.g., "ko", "en") 297 | EOF 298 | ``` 299 | 300 | Use aichat to create a agent tools script: 301 | ``` 302 | aichat -f docs/agent.md -f docs/tool.md <<-'EOF' 303 | 304 | create a spotify agent 305 | 306 | index.yaml: 307 | name: spotify 308 | description: An AI agent that works with Spotify 309 | 310 | tools.py: 311 | search: Search for tracks, albums, artists, or playlists on Spotify 312 | query (required): Query term 313 | qtype (default: "track"): Type of items to search for (track, album, artist, playlist, or comma-separated combination) 314 | limit (default: 10): Maximum number of items to return 315 | get_info: Get detailed information about a Spotify item (track, album, artist, or playlist) 316 | item_id (required): ID of the item to get information about 317 | qtype (default: "track"): Type of item: 'track', 'album', 'artist', or 'playlist' 318 | get_queue: Get the playback queue 319 | add_queue: Add tracks to the playback queue 320 | track_id (required): Track ID to add to queue 321 | get_track: Get information about user's current track 322 | start: Starts of resumes playback 323 | track_id (required): Specifies track to play 324 | pause: Pauses current playback 325 | skip: Skips current track 326 | num_skips (default: 1): Number of tracks to skip 327 | EOF 328 | ``` -------------------------------------------------------------------------------- /mcp/bridge/README.md: -------------------------------------------------------------------------------- 1 | # MCP-Bridge 2 | 3 | Let external MCP tools be used by LLM-Functions. 4 | 5 | ## Get Started 6 | 7 | ### 1. Create a `mcp.json` at ``. 8 | 9 | ```json 10 | { 11 | "mcpServers": { 12 | "sqlite": { 13 | "command": "uvx", 14 | "args": [ 15 | "mcp-server-sqlite", 16 | "--db-path", 17 | "/tmp/foo.db" 18 | ] 19 | }, 20 | "git": { 21 | "command": "uvx", 22 | "args": [ 23 | "mcp-server-git", 24 | "--repository", 25 | "path/to/git/repo" 26 | ], 27 | "prefix": false 28 | }, 29 | "github": { 30 | "command": "npx", 31 | "args": [ 32 | "-y", 33 | "@modelcontextprotocol/server-github" 34 | ], 35 | "env": { 36 | "GITHUB_PERSONAL_ACCESS_TOKEN": "" 37 | } 38 | } 39 | } 40 | } 41 | ``` 42 | 43 | > MCP-Bridge will launch the server and register all the tools listed by the server. 44 | 45 | > To avoid name clashes, The server automatically prefix tool names with `_`. You can disable this behavior by add `prefix: false` to server configuration. 46 | 47 | ### 2. Run the bridge server, build mcp tool binaries, update functions.json, all with: 48 | 49 | ``` 50 | argc mcp start 51 | ``` 52 | 53 | > Run `argc mcp stop` to stop the bridge server, recover functions.json. 54 | 55 | > Run `argc mcp logs` to check the server's logs. 56 | -------------------------------------------------------------------------------- /mcp/bridge/index.js: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | import * as path from "node:path"; 4 | import * as fs from "node:fs"; 5 | import { Client } from "@modelcontextprotocol/sdk/client/index.js"; 6 | import { StdioClientTransport } from "@modelcontextprotocol/sdk/client/stdio.js"; 7 | import express from "express"; 8 | 9 | const app = express(); 10 | const PORT = process.env.MCP_BRIDGE_PORT || 8808; 11 | 12 | let [rootDir] = process.argv.slice(2); 13 | 14 | if (!rootDir) { 15 | console.error("Usage: mcp-bridge "); 16 | process.exit(1); 17 | } 18 | 19 | let mcpServers = {}; 20 | const mcpJsonPath = path.join(rootDir, "mcp.json"); 21 | try { 22 | const data = await fs.promises.readFile(mcpJsonPath, "utf8"); 23 | mcpServers = JSON.parse(data)?.mcpServers; 24 | } catch { 25 | console.error(`Failed to read json at '${mcpJsonPath}'`); 26 | process.exit(1); 27 | } 28 | 29 | async function startMcpServer(id, serverConfig) { 30 | console.log(`Starting ${id} server...`); 31 | const capabilities = { tools: {} }; 32 | const { prefix = true, ...rest } = serverConfig; 33 | const transport = new StdioClientTransport({ 34 | ...rest, 35 | }); 36 | const client = new Client( 37 | { name: id, version: "1.0.0" }, 38 | { capabilities } 39 | ); 40 | await client.connect(transport); 41 | const { tools: toolDefinitions } = await client.listTools() 42 | const tools = toolDefinitions.map( 43 | ({ name, description, inputSchema }) => 44 | ({ 45 | spec: { 46 | name: `${formatToolName(id, name, prefix)}`, 47 | description, 48 | parameters: inputSchema, 49 | mcp: id, 50 | }, 51 | impl: async args => { 52 | const res = await client.callTool({ 53 | name: name, 54 | arguments: args, 55 | }); 56 | const content = res.content; 57 | let text = arrayify(content)?.map(c => { 58 | switch (c.type) { 59 | case "text": 60 | return c.text || "" 61 | case "image": 62 | return c.data 63 | case "resource": 64 | return c.resource?.uri || "" 65 | default: 66 | return c 67 | } 68 | }).join("\n"); 69 | if (res.isError) { 70 | text = `Tool Error\n${text}`; 71 | } 72 | return text; 73 | }, 74 | }) 75 | ); 76 | return { 77 | tools, 78 | [Symbol.asyncDispose]: async () => { 79 | try { 80 | console.log(`Closing ${id} server...`); 81 | await client.close(); 82 | await transport.close(); 83 | } catch { } 84 | }, 85 | } 86 | } 87 | 88 | async function runBridge() { 89 | let hasError = false; 90 | let runningMcpServers = await Promise.all( 91 | Object.entries(mcpServers).map( 92 | async ([name, serverConfig]) => { 93 | try { 94 | return await startMcpServer(name, serverConfig) 95 | } catch (err) { 96 | hasError = true; 97 | console.error(`Failed to start ${name} server; ${err.message}`) 98 | } 99 | } 100 | ) 101 | ); 102 | runningMcpServers = runningMcpServers.filter(s => !!s); 103 | const stopMcpServers = () => Promise.all(runningMcpServers.map(s => s[Symbol.asyncDispose]())); 104 | if (hasError) { 105 | await stopMcpServers(); 106 | return; 107 | } 108 | 109 | const definitions = runningMcpServers.flatMap(s => s.tools.map(t => t.spec)); 110 | const runTool = async (name, args) => { 111 | for (const server of runningMcpServers) { 112 | const tool = server.tools.find(t => t.spec.name === name); 113 | if (tool) { 114 | return tool.impl(args); 115 | } 116 | } 117 | return `Not found tool '${name}'`; 118 | }; 119 | 120 | app.use((err, _req, res, _next) => { 121 | res.status(500).send(err?.message || err); 122 | }); 123 | 124 | app.use(express.json()); 125 | 126 | app.get("/", (_req, res) => { 127 | res.send(`# MCP Bridge API 128 | 129 | - POST /tools/:name 130 | \`\`\` 131 | curl -X POST http://localhost:8808/tools/filesystem_write_file \\ 132 | -H 'content-type: application/json' \\ 133 | -d '{"path": "/tmp/file1", "content": "hello world"}' 134 | \`\`\` 135 | - GET /tools 136 | \`\`\` 137 | curl http://localhost:8808/tools 138 | \`\`\` 139 | `); 140 | }); 141 | 142 | app.get("/tools", (_req, res) => { 143 | res.json(definitions); 144 | }); 145 | 146 | app.post("/tools/:name", async (req, res) => { 147 | try { 148 | const output = await runTool(req.params.name, req.body); 149 | res.send(output); 150 | } catch (err) { 151 | res.status(500).send(err); 152 | } 153 | }); 154 | 155 | app.get("/pid", (_req, res) => { 156 | res.send(process.pid.toString()); 157 | }); 158 | 159 | app.get("/health", (_req, res) => { 160 | res.send("OK"); 161 | }); 162 | 163 | app.use((_req, res, _next) => { 164 | res.status(404).send("Not found"); 165 | }); 166 | 167 | const server = app.listen(PORT, () => { 168 | console.log(`Server is running on port ${PORT}`); 169 | }); 170 | 171 | return async () => { 172 | server.close(() => console.log("Http server closed")); 173 | await stopMcpServers(); 174 | }; 175 | } 176 | 177 | function arrayify(a) { 178 | let r; 179 | if (a === undefined) r = []; 180 | else if (Array.isArray(a)) r = a.slice(0); 181 | else r = [a]; 182 | 183 | return r 184 | } 185 | 186 | function formatToolName(serverName, toolName, prefix) { 187 | const name = prefix ? `${serverName}_${toolName}` : toolName; 188 | return name.toLowerCase().replace(/-/g, "_"); 189 | } 190 | 191 | runBridge() 192 | .then(stop => { 193 | if (stop) { 194 | process.on('SIGINT', stop); 195 | process.on('SIGTERM', stop); 196 | } 197 | }) 198 | .catch(console.error); -------------------------------------------------------------------------------- /mcp/bridge/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "mcp-bridge", 3 | "version": "1.0.0", 4 | "description": "Let MCP tools be used by LLM functions", 5 | "license": "MIT", 6 | "author": "sigoden ", 7 | "homepage": "https://github.com/sigoden/llm-functions/tree/main/mcp/bridge", 8 | "repository": { 9 | "type": "git", 10 | "url": "git+https://github.com/sigoden/llm-functions.git", 11 | "directory": "mcp/bridge" 12 | }, 13 | "private": true, 14 | "type": "module", 15 | "bin": { 16 | "mcp-bridge": "index.js" 17 | }, 18 | "dependencies": { 19 | "@modelcontextprotocol/sdk": "^1.0.3", 20 | "express": "^4.21.2" 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /mcp/server/README.md: -------------------------------------------------------------------------------- 1 | # MCP-Server 2 | 3 | Let LLM-functions tools/agents be used through the Model Context Protocol. 4 | 5 | ## Serve tools 6 | 7 | ```json 8 | { 9 | "mcpServers": { 10 | "tools": { 11 | "command": "npx", 12 | "args": [ 13 | "mcp-llm-functions", 14 | "" 15 | ] 16 | } 17 | } 18 | } 19 | ``` 20 | 21 | ## Serve the agent 22 | 23 | ```json 24 | { 25 | "mcpServers": { 26 | "": { 27 | "command": "node", 28 | "args": [ 29 | "mcp-llm-functions", 30 | "" 31 | "", 32 | ] 33 | } 34 | } 35 | } 36 | ``` 37 | 38 | ## Environment Variables 39 | 40 | - `AGENT_TOOLS_ONLY`: Set to `true` or `1` to ignore shared tools and display only agent tools. -------------------------------------------------------------------------------- /mcp/server/index.js: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | import * as path from "node:path"; 4 | import * as fs from "node:fs"; 5 | import * as os from "node:os"; 6 | import { v4 as uuid } from "uuid"; 7 | import { spawn } from "node:child_process"; 8 | import { Server } from "@modelcontextprotocol/sdk/server/index.js"; 9 | import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js"; 10 | import { 11 | CallToolRequestSchema, 12 | ListToolsRequestSchema, 13 | } from "@modelcontextprotocol/sdk/types.js"; 14 | 15 | let [rootDir, agentName] = process.argv.slice(2); 16 | if (!rootDir) { 17 | console.error("Usage: mcp-llm-functions []"); 18 | process.exit(1); 19 | } 20 | rootDir = path.resolve(rootDir); 21 | 22 | let functionsJsonPath = path.join(rootDir, "functions.json"); 23 | if (agentName) { 24 | functionsJsonPath = path.join(rootDir, "agents", agentName, "functions.json"); 25 | } 26 | let functions = []; 27 | try { 28 | const data = await fs.promises.readFile(functionsJsonPath, "utf8"); 29 | functions = JSON.parse(data); 30 | } catch { 31 | console.error(`Failed to read functions at '${functionsJsonPath}'`); 32 | process.exit(1); 33 | } 34 | const agentToolsOnly = process.env["AGENT_TOOLS_ONLY"] === "true" || process.env["AGENT_TOOLS_ONLY"] === "1"; 35 | functions = functions.filter(f => { 36 | if (f.mcp) { 37 | return false; 38 | } 39 | if (agentToolsOnly) { 40 | return f.agent; 41 | } else { 42 | return true; 43 | } 44 | }); 45 | 46 | const env = Object.assign({}, process.env, { 47 | PATH: `${path.join(rootDir, "bin")}:${process.env.PATH}` 48 | }); 49 | 50 | const server = new Server( 51 | { 52 | name: `llm-functions/${agentName || "common-tools"}`, 53 | version: "0.1.0", 54 | }, 55 | { 56 | capabilities: { 57 | tools: {}, 58 | }, 59 | }, 60 | ); 61 | 62 | server.setRequestHandler(ListToolsRequestSchema, async () => { 63 | return { 64 | tools: functions.map((f) => ({ 65 | name: f.name, 66 | description: f.description, 67 | inputSchema: f.parameters, 68 | })), 69 | }; 70 | }); 71 | 72 | server.setRequestHandler(CallToolRequestSchema, async (request) => { 73 | const functionObj = functions.find((f) => f.name === request.params.name); 74 | if (!functionObj) { 75 | throw new Error(`Unknown tool '${request.params.name}'`); 76 | } 77 | let command = request.params.name; 78 | let args = [JSON.stringify(request.params.arguments || {})]; 79 | if (agentName && functionObj.agent) { 80 | args.unshift(command); 81 | command = agentName; 82 | } 83 | const tmpFile = path.join(os.tmpdir(), `mcp-llm-functions-${process.pid}-eval-${uuid()}`); 84 | const { exitCode, stderr } = await runCommand(command, args, { ...env, LLM_OUTPUT: tmpFile }); 85 | if (exitCode === 0) { 86 | let output = ''; 87 | try { 88 | output = await fs.promises.readFile(tmpFile, "utf8"); 89 | } catch { }; 90 | return { 91 | content: [{ type: "text", text: output }], 92 | }; 93 | } else { 94 | return { 95 | isError: true, 96 | content: [{ type: "text", text: stderr }], 97 | }; 98 | } 99 | }); 100 | 101 | function runCommand(command, args, env) { 102 | return new Promise(resolve => { 103 | const child = spawn(command, args, { 104 | stdio: ['ignore', 'ignore', 'pipe'], 105 | env, 106 | }); 107 | 108 | let stderr = ''; 109 | 110 | child.stderr.on('data', (data) => { 111 | stderr += data.toString(); 112 | }); 113 | 114 | child.on('close', (exitCode) => { 115 | resolve({ exitCode, stderr }); 116 | }); 117 | 118 | child.on('error', (err) => { 119 | resolve({ exitCode: 1, stderr: `Command execution failed: ${err.message}` }); 120 | }); 121 | }); 122 | } 123 | 124 | async function runServer() { 125 | const transport = new StdioServerTransport(); 126 | await server.connect(transport); 127 | } 128 | 129 | runServer().catch(console.error); -------------------------------------------------------------------------------- /mcp/server/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "mcp-llm-functions", 3 | "version": "1.2.0", 4 | "description": "Let LLM-functions tools/agents be used through the Model Context Protocol", 5 | "license": "MIT", 6 | "author": "sigoden ", 7 | "homepage": "https://github.com/sigoden/llm-functions/tree/main/mcp/server", 8 | "repository": { 9 | "type": "git", 10 | "url": "git+https://github.com/sigoden/llm-functions.git", 11 | "directory": "mcp/server" 12 | }, 13 | "publishConfig": { 14 | "access": "public" 15 | }, 16 | "type": "module", 17 | "bin": { 18 | "mcp-llm-functions": "index.js" 19 | }, 20 | "dependencies": { 21 | "@modelcontextprotocol/sdk": "^1.1.0", 22 | "uuid": "^11.0.3" 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /scripts/build-declarations.js: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | const fs = require("fs"); 4 | const path = require("path"); 5 | 6 | const TOOL_ENTRY_FUNC = "run"; 7 | 8 | function main() { 9 | const scriptfile = process.argv[2]; 10 | const isTool = path.dirname(scriptfile) == "tools"; 11 | const contents = fs.readFileSync(process.argv[2], "utf8"); 12 | const functions = extractFunctions(contents, isTool); 13 | let declarations = []; 14 | for (const { funcName, jsdoc } of functions) { 15 | const { description, params } = parseJsDoc(jsdoc, funcName); 16 | if (!description) continue; 17 | const declaration = buildDeclaration(funcName, description, params); 18 | declarations.push(declaration); 19 | } 20 | if (isTool) { 21 | const name = getBasename(scriptfile); 22 | if (declarations.length > 0) { 23 | declarations = declarations.slice(0, 1); 24 | declarations[0].name = name; 25 | } 26 | } 27 | console.log(JSON.stringify(declarations, null, 2)); 28 | } 29 | 30 | /** 31 | * @param {string} contents 32 | * @param {bool} isTool 33 | */ 34 | function extractFunctions(contents, isTool) { 35 | const output = []; 36 | const lines = contents.split("\n"); 37 | let isInComment = false; 38 | let jsdoc = ""; 39 | let incompleteComment = ""; 40 | for (let line of lines) { 41 | if (/^\s*\/\*/.test(line)) { 42 | isInComment = true; 43 | incompleteComment += `\n${line}`; 44 | } else if (/^\s*\*\//.test(line)) { 45 | isInComment = false; 46 | incompleteComment += `\n${line}`; 47 | jsdoc = incompleteComment; 48 | incompleteComment = ""; 49 | } else if (isInComment) { 50 | incompleteComment += `\n${line}`; 51 | } else { 52 | if (!jsdoc || line.trim() === "") { 53 | continue; 54 | } 55 | if (isTool) { 56 | if (new RegExp(`^export (async )?function ${TOOL_ENTRY_FUNC}|^exports\.${TOOL_ENTRY_FUNC}`).test(line)) { 57 | output.push({ 58 | funcName: TOOL_ENTRY_FUNC, 59 | jsdoc, 60 | }); 61 | } 62 | } else { 63 | let match = /^export (async )?function ([A-Za-z0-9_]+)/.exec(line); 64 | let funcName = null; 65 | if (match) { 66 | funcName = match[2]; 67 | } 68 | if (!funcName) { 69 | match = /^exports\.([A-Za-z0-9_]+) = (async )?function /.exec(line); 70 | if (match) { 71 | funcName = match[1]; 72 | } 73 | } 74 | if (funcName && !funcName.startsWith("_")) { 75 | output.push({ funcName, jsdoc }); 76 | } 77 | } 78 | jsdoc = ""; 79 | } 80 | } 81 | return output; 82 | } 83 | 84 | /** 85 | * @param {string} jsdoc 86 | * @param {string} funcName, 87 | */ 88 | function parseJsDoc(jsdoc, funcName) { 89 | const lines = jsdoc.split("\n"); 90 | let description = ""; 91 | const rawParams = []; 92 | let tag = ""; 93 | for (let line of lines) { 94 | line = line.replace(/^\s*(\/\*\*|\*\/|\*)/, "").trim(); 95 | let match = /^@(\w+)/.exec(line); 96 | if (match) { 97 | tag = match[1]; 98 | } 99 | if (!tag) { 100 | description += `\n${line}`; 101 | } else if (tag == "property") { 102 | if (match) { 103 | rawParams.push(line.slice(tag.length + 1).trim()); 104 | } else { 105 | rawParams[rawParams.length - 1] += `\n${line}`; 106 | } 107 | } 108 | } 109 | const params = []; 110 | for (const rawParam of rawParams) { 111 | try { 112 | params.push(parseParam(rawParam)); 113 | } catch (err) { 114 | throw new Error( 115 | `Unable to parse function '${funcName}' of jsdoc '@property ${rawParam}'`, 116 | ); 117 | } 118 | } 119 | return { 120 | description: description.trim(), 121 | params, 122 | }; 123 | } 124 | 125 | /** 126 | * @typedef {ReturnType} Param 127 | */ 128 | 129 | /** 130 | * @param {string} rawParam 131 | */ 132 | function parseParam(rawParam) { 133 | const regex = /^{([^}]+)} +(\S+)( *- +| +)?/; 134 | const match = regex.exec(rawParam); 135 | if (!match) { 136 | throw new Error(`Invalid jsdoc comment`); 137 | } 138 | const type = match[1]; 139 | let name = match[2]; 140 | const description = rawParam.replace(regex, ""); 141 | 142 | let required = true; 143 | if (/^\[.*\]$/.test(name)) { 144 | name = name.slice(1, -1); 145 | required = false; 146 | } 147 | let property = buildProperty(type, description); 148 | return { name, property, required }; 149 | } 150 | 151 | /** 152 | * @param {string} type 153 | * @param {string} description 154 | */ 155 | function buildProperty(type, description) { 156 | type = type.toLowerCase(); 157 | const property = {}; 158 | if (type.includes("|")) { 159 | property.type = "string"; 160 | property.enum = type.replace(/'/g, "").split("|"); 161 | } else if (type === "boolean") { 162 | property.type = "boolean"; 163 | } else if (type === "string") { 164 | property.type = "string"; 165 | } else if (type === "integer") { 166 | property.type = "integer"; 167 | } else if (type === "number") { 168 | property.type = "number"; 169 | } else if (type === "string[]") { 170 | property.type = "array"; 171 | property.items = { type: "string" }; 172 | } else { 173 | throw new Error(`Unsupported type '${type}'`); 174 | } 175 | property.description = description; 176 | return property; 177 | } 178 | 179 | /** 180 | * @param {string} name 181 | * @param {string} description 182 | * @param {Param[]} params 183 | */ 184 | function buildDeclaration(name, description, params) { 185 | const declaration = { 186 | name, 187 | description, 188 | parameters: { 189 | type: "object", 190 | properties: {}, 191 | }, 192 | }; 193 | const schema = declaration.parameters; 194 | const requiredParams = []; 195 | for (const { name, property, required } of params) { 196 | schema.properties[name] = property; 197 | if (required) { 198 | requiredParams.push(name); 199 | } 200 | } 201 | if (requiredParams.length > 0) { 202 | schema.required = requiredParams; 203 | } 204 | return declaration; 205 | } 206 | 207 | /** 208 | * @param {string} filePath 209 | */ 210 | function getBasename(filePath) { 211 | const filenameWithExt = filePath.split(/[/\\]/).pop(); 212 | 213 | const lastDotIndex = filenameWithExt.lastIndexOf("."); 214 | 215 | if (lastDotIndex === -1) { 216 | return filenameWithExt; 217 | } 218 | 219 | return filenameWithExt.substring(0, lastDotIndex); 220 | } 221 | 222 | main(); 223 | -------------------------------------------------------------------------------- /scripts/build-declarations.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import ast 4 | import os 5 | import json 6 | import re 7 | import sys 8 | from collections import OrderedDict 9 | 10 | TOOL_ENTRY_FUNC = "run" 11 | 12 | 13 | def main(is_tool=True): 14 | scriptfile = sys.argv[1] 15 | is_tool = os.path.dirname(scriptfile) == "tools" 16 | 17 | with open(scriptfile, "r", encoding="utf-8") as f: 18 | contents = f.read() 19 | 20 | functions = extract_functions(contents, is_tool) 21 | declarations = [] 22 | for function in functions: 23 | func_name, docstring, func_args = function 24 | description, params = parse_docstring(docstring) 25 | if not description: 26 | continue 27 | declarations.append( 28 | build_declaration(func_name, description, params, func_args) 29 | ) 30 | 31 | if is_tool: 32 | name = os.path.splitext(os.path.basename(scriptfile))[0] 33 | if declarations: 34 | declarations = declarations[0:1] 35 | declarations[0]["name"] = name 36 | 37 | print(json.dumps(declarations, indent=2)) 38 | 39 | 40 | def extract_functions(contents: str, is_tool: bool): 41 | tree = ast.parse(contents) 42 | output = [] 43 | for node in ast.walk(tree): 44 | if not isinstance(node, ast.FunctionDef): 45 | continue 46 | func_name = node.name 47 | if is_tool and func_name != TOOL_ENTRY_FUNC: 48 | continue 49 | if func_name.startswith("_"): 50 | continue 51 | docstring = ast.get_docstring(node) or "" 52 | func_args = OrderedDict() 53 | for arg in node.args.args: 54 | arg_name = arg.arg 55 | arg_type = get_arg_type(arg.annotation) 56 | func_args[arg_name] = arg_type 57 | output.append((func_name, docstring, func_args)) 58 | return output 59 | 60 | 61 | def get_arg_type(annotation) -> str: 62 | if annotation is None: 63 | return "" 64 | elif isinstance(annotation, ast.Name): 65 | return annotation.id 66 | elif isinstance(annotation, ast.Subscript): 67 | if isinstance(annotation.value, ast.Name): 68 | type_name = annotation.value.id 69 | if type_name == "List": 70 | child = get_arg_type(annotation.slice) 71 | return f"list[{child}]" 72 | if type_name == "Literal": 73 | literals = [ast.unparse(el) for el in annotation.slice.elts] 74 | return f"{'|'.join(literals)}" 75 | if type_name == "Optional": 76 | child = get_arg_type(annotation.slice) 77 | return f"{child}?" 78 | return "any" 79 | 80 | 81 | def parse_docstring(docstring: str): 82 | lines = docstring.splitlines() 83 | description = "" 84 | rawParams = [] 85 | is_in_args = False 86 | for line in lines: 87 | if not is_in_args: 88 | if line.startswith("Args:"): 89 | is_in_args = True 90 | else: 91 | description += f"\n{line}" 92 | continue 93 | else: 94 | if re.search(r"^\s+", line): 95 | rawParams.append(line.strip()) 96 | else: 97 | break 98 | params = {} 99 | for rawParam in rawParams: 100 | name, type_, param_description = parse_param(rawParam) 101 | params[name] = (type_, param_description) 102 | return (description.strip(), params) 103 | 104 | 105 | def parse_param(raw_param: str): 106 | name = "" 107 | description = "" 108 | type_from_comment = "" 109 | if ":" in raw_param: 110 | name, description = raw_param.split(":", 1) 111 | name = name.strip() 112 | description = description.strip() 113 | else: 114 | name = raw_param 115 | if " " in name: 116 | name, type_from_comment = name.split(" ", 1) 117 | type_from_comment = type_from_comment.strip() 118 | 119 | if type_from_comment.startswith("(") and type_from_comment.endswith(")"): 120 | type_from_comment = type_from_comment[1:-1] 121 | type_parts = [value.strip() for value in type_from_comment.split(",")] 122 | type_ = type_parts[0] 123 | if "optional" in type_parts[1:]: 124 | type_ = f"{type_}?" 125 | 126 | return (name, type_, description) 127 | 128 | 129 | def build_declaration( 130 | name: str, description: str, params: dict, args: OrderedDict[str, str] 131 | ) -> dict[str, dict]: 132 | declaration = { 133 | "name": name, 134 | "description": description, 135 | "parameters": { 136 | "type": "object", 137 | "properties": {}, 138 | }, 139 | } 140 | schema = declaration["parameters"] 141 | required_params = [] 142 | for arg_name, arg_type in args.items(): 143 | type_ = arg_type 144 | description = "" 145 | required = True 146 | if params.get(arg_name): 147 | param_type, description = params[arg_name] 148 | if not type_: 149 | type_ = param_type 150 | if type_.endswith("?"): 151 | type_ = type_[:-1] 152 | required = False 153 | try: 154 | property = build_property(type_, description) 155 | except: 156 | raise ValueError(f"Unable to parse arg '{arg_name}' of function '{name}'") 157 | schema["properties"][arg_name] = property 158 | if required: 159 | required_params.append(arg_name) 160 | if required_params: 161 | schema["required"] = required_params 162 | return declaration 163 | 164 | 165 | def build_property(type_: str, description: str): 166 | property = {} 167 | if "|" in type_: 168 | property["type"] = "string" 169 | property["enum"] = type_.replace("'", "").split("|") 170 | elif type_ == "bool": 171 | property["type"] = "boolean" 172 | elif type_ == "str": 173 | property["type"] = "string" 174 | elif type_ == "int": 175 | property["type"] = "integer" 176 | elif type_ == "float": 177 | property["type"] = "number" 178 | elif type_ == "list[str]": 179 | property["type"] = "array" 180 | property["items"] = {"type": "string"} 181 | elif type_ == "": 182 | property["type"] = "string" 183 | else: 184 | raise ValueError(f"Unsupported type `{type_}`") 185 | property["description"] = description 186 | return property 187 | 188 | 189 | if __name__ == "__main__": 190 | main() 191 | -------------------------------------------------------------------------------- /scripts/build-declarations.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | main() { 4 | scriptfile="$1" 5 | is_tool=false 6 | if [[ "$(dirname "$scriptfile")" == tools ]]; then 7 | is_tool=true 8 | fi 9 | if [[ "$is_tool" == "true" ]]; then 10 | expr='[.]' 11 | else 12 | expr='.subcommands' 13 | fi 14 | argc --argc-export "$scriptfile" | \ 15 | jq "$expr" | \ 16 | build_declarations 17 | } 18 | 19 | build_declarations() { 20 | jq --arg is_tool "$is_tool" -r ' 21 | def filter_declaration: 22 | (if $is_tool == "true" then 23 | . 24 | else 25 | select(.name | startswith("_") | not) 26 | end) | select(.description != ""); 27 | 28 | def parse_description(flag_option): 29 | if flag_option.describe == "" then 30 | {} 31 | else 32 | { "description": flag_option.describe } 33 | end; 34 | 35 | def parse_enum(flag_option): 36 | if flag_option.choice.type == "Values" then 37 | { "enum": flag_option.choice.data } 38 | else 39 | {} 40 | end; 41 | 42 | def parse_property(flag_option): 43 | [ 44 | { condition: (flag_option.flag == true), result: { type: "boolean" } }, 45 | { condition: (flag_option.multiple_occurs == true), result: { type: "array", items: { type: "string" } } }, 46 | { condition: (flag_option.notations[0] == "INT"), result: { type: "integer" } }, 47 | { condition: (flag_option.notations[0] == "NUM"), result: { type: "number" } }, 48 | { condition: true, result: { type: "string" } } ] 49 | | map(select(.condition) | .result) | first 50 | | (. + parse_description(flag_option)) 51 | | (. + parse_enum(flag_option)) 52 | ; 53 | 54 | 55 | def parse_parameter(flag_options): 56 | { 57 | type: "object", 58 | properties: (reduce flag_options[] as $item ({}; . + { ($item.id | sub("-"; "_"; "g")): parse_property($item) })), 59 | required: [flag_options[] | select(.required == true) | .id | sub("-"; "_"; "g")], 60 | }; 61 | 62 | def parse_declaration: 63 | { 64 | name: (.name | sub("-"; "_"; "g")), 65 | description: .describe, 66 | parameters: parse_parameter([.flag_options[] | select(.id != "help" and .id != "version")]) 67 | }; 68 | [ 69 | .[] | parse_declaration | filter_declaration 70 | ]' 71 | } 72 | 73 | main "$@" -------------------------------------------------------------------------------- /scripts/check-deps.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | # @describe Check dependencies 5 | # 6 | # Examples: 7 | # ./scripts/check-deps.sh tools/execute_sql_code.sh 8 | # ./scripts/check-deps.sh agents/json-viewer/tools.js 9 | # 10 | # @arg script-path! The script file path 11 | 12 | main() { 13 | script_path="$argc_script_path" 14 | if [[ ! -f "$script_path" ]]; then 15 | echo "✗ not found $script_path" 16 | exit 0 17 | fi 18 | ext="${script_path##*.}" 19 | if [[ "$script_path" == tools/* ]]; then 20 | if [[ "$ext" == "sh" ]]; then 21 | check_sh_dependencies 22 | fi 23 | elif [[ "$script_path" == agents/* ]]; then 24 | if [[ "$ext" == "sh" ]]; then 25 | check_sh_dependencies 26 | elif [[ "$ext" == "js" ]]; then 27 | check_agent_js_dependencies 28 | elif [[ "$ext" == "py" ]]; then 29 | check_agent_py_dependencies 30 | fi 31 | fi 32 | } 33 | 34 | check_sh_dependencies() { 35 | deps=( $(sed -E -n 's/.*@meta require-tools //p' "$script_path") ) 36 | missing_deps=() 37 | for dep in "${deps[@]}"; do 38 | if ! command -v "$dep" &> /dev/null; then 39 | missing_deps+=("$dep") 40 | fi 41 | done 42 | if [[ -n "${missing_deps}" ]]; then 43 | echo "✗ missing tools: ${missing_deps[*]}" 44 | fi 45 | } 46 | 47 | check_agent_js_dependencies() { 48 | agent_dir="$(dirname "$script_path")" 49 | if [[ -f "$agent_dir/package.json" ]]; then 50 | npm ls --prefix="$agent_dir" --depth=0 --silent >/dev/null 2>&1 || \ 51 | { 52 | cmd="cd $agent_dir && npm install" 53 | echo "✗ missing node modules" 54 | read -p "? run \`$cmd\` to fix [Y/n] " choice 55 | if [[ "$choice" == "Y" || "$choice" == "y" || -z "$choice" ]]; then 56 | (eval "$cmd") 57 | fi 58 | } 59 | fi 60 | } 61 | 62 | check_agent_py_dependencies() { 63 | agent_dir="$(dirname "$script_path")" 64 | if [[ -f "$agent_dir/requirements.txt" ]]; then 65 | python <(cat "$agent_dir/requirements.txt" | sed -E -n 's/^([A-Za-z_]+).*/import \1/p') >/dev/null 2>&1 || \ 66 | { 67 | cmd="cd $agent_dir && pip install -r requirements.txt" 68 | echo "✗ missing python modules" 69 | read -p "? run \`$cmd\` to fix [Y/n] " choice 70 | if [[ "$choice" == "Y" || "$choice" == "y" || -z "$choice" ]]; then 71 | (eval "$cmd") 72 | fi 73 | } 74 | fi 75 | } 76 | 77 | # See more details at https://github.com/sigoden/argc 78 | eval "$(argc --argc-eval "$0" "$@")" 79 | -------------------------------------------------------------------------------- /scripts/create-tool.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | # @describe Create a boilplate tool script 5 | # 6 | # Examples: 7 | # ./scripts/create-tool.sh _test.py foo bar! baz+ qux* 8 | # 9 | # @option --description The tool description 10 | # @flag --force Override the exist tool file 11 | # @arg name! The script file name 12 | # @arg params* The script parameters 13 | 14 | main() { 15 | output="tools/$argc_name" 16 | if [[ -f "$output" ]] && [[ -z "$argc_force" ]]; then 17 | _die "$output already exists" 18 | fi 19 | ext="${argc_name##*.}" 20 | description="${argc_description:-"The description for the tool"}" 21 | support_exts=('.sh' '.js' '.py') 22 | if [[ "$ext" == "$argc_name" ]]; then 23 | _die "error: no extension name, pelease add one of ${support_exts[*]}" 24 | fi 25 | case $ext in 26 | sh) create_sh ;; 27 | js) create_js ;; 28 | py) create_py ;; 29 | *) _die "error: invalid extension name: $ext, must be one of ${support_exts[*]}" ;; 30 | esac 31 | echo "$output generated" 32 | } 33 | 34 | create_sh() { 35 | cat <<-'EOF' > "$output" 36 | #!/usr/bin/env bash 37 | set -e 38 | 39 | EOF 40 | echo "# @describe $description" >> "$output" 41 | for param in "${argc_params[@]}"; do 42 | echo "# @option --$(echo $param | sed 's/-/_/g')" >> "$output" 43 | done 44 | cat <<-'EOF' >> "$output" 45 | 46 | main() { 47 | ( set -o posix ; set ) | grep ^argc_ 48 | } 49 | 50 | eval "$(argc --argc-eval "$0" "$@")" 51 | EOF 52 | chmod +x "$output" 53 | } 54 | 55 | create_js() { 56 | properties='' 57 | for param in "${argc_params[@]}"; do 58 | if [[ "$param" == *'!' ]]; then 59 | param="${param:0:$((${#param}-1))}" 60 | property=" * @property {string} $param - " 61 | elif [[ "$param" == *'+' ]]; then 62 | param="${param:0:$((${#param}-1))}" 63 | property=" * @property {string[]} $param - " 64 | elif [[ "$param" == *'*' ]]; then 65 | param="${param:0:$((${#param}-1))}" 66 | property=" * @property {string[]} [$param] - " 67 | else 68 | property=" * @property {string} [$param] - " 69 | fi 70 | properties+=$'\n'"$property" 71 | done 72 | cat < "$output" 73 | /** 74 | * ${description} 75 | * @typedef {Object} Args${properties} 76 | * @param {Args} args 77 | */ 78 | exports.run = function (args) { 79 | console.log(args); 80 | } 81 | EOF 82 | } 83 | 84 | create_py() { 85 | has_array_param=false 86 | has_optional_pram=false 87 | required_properties='' 88 | optional_properties='' 89 | required_arguments=() 90 | optional_arguments=() 91 | indent=" " 92 | for param in "${argc_params[@]}"; do 93 | optional=false 94 | if [[ "$param" == *'!' ]]; then 95 | param="${param:0:$((${#param}-1))}" 96 | type="str" 97 | elif [[ "$param" == *'+' ]]; then 98 | param="${param:0:$((${#param}-1))}" 99 | type="List[str]" 100 | has_array_param=true 101 | elif [[ "$param" == *'*' ]]; then 102 | param="${param:0:$((${#param}-1))}" 103 | type="Optional[List[str]] = None" 104 | optional=true 105 | has_array_param=true 106 | else 107 | optional=true 108 | type="Optional[str] = None" 109 | fi 110 | if [[ "$optional" == "true" ]]; then 111 | has_optional_pram=true 112 | optional_arguments+="$param: $type, " 113 | optional_properties+=$'\n'"$indent$indent$param: -" 114 | else 115 | required_arguments+="$param: $type, " 116 | required_properties+=$'\n'"$indent$indent$param: -" 117 | fi 118 | done 119 | import_typing_members=() 120 | if [[ "$has_array_param" == "true" ]]; then 121 | import_typing_members+=("List") 122 | fi 123 | if [[ "$has_optional_pram" == "true" ]]; then 124 | import_typing_members+=("Optional") 125 | fi 126 | imports="" 127 | if [[ -n "$import_typing_members" ]]; then 128 | members="$(echo "${import_typing_members[*]}" | sed 's/ /, /')" 129 | imports="from typing import $members"$'\n' 130 | fi 131 | if [[ -n "$imports" ]]; then 132 | imports="$imports"$'\n' 133 | fi 134 | cat < "$output" 135 | ${imports} 136 | def run(${required_arguments}${optional_arguments}): 137 | """${description} 138 | Args:${required_properties}${optional_properties} 139 | """ 140 | pass 141 | EOF 142 | } 143 | 144 | build_schema() { 145 | echo '{ 146 | "name": "'"${argc_name%%.*}"'", 147 | "description": "", 148 | "parameters": '"$(build_properties)"' 149 | }' | jq '.' | sed '2,$s/^/ /g' 150 | } 151 | 152 | build_properties() { 153 | required_params=() 154 | properties='' 155 | for param in "${argc_params[@]}"; do 156 | if [[ "$param" == *'!' ]]; then 157 | param="${param:0:$((${#param}-1))}" 158 | required_params+=("$param") 159 | property='{"'"$param"'":{"type":"string","description":""}}' 160 | elif [[ "$param" == *'+' ]]; then 161 | param="${param:0:$((${#param}-1))}" 162 | required_params+=("$param") 163 | property='{"'"$param"'":{"type":"array","description":"","items": {"type":"string"}}}' 164 | elif [[ "$param" == *'*' ]]; then 165 | param="${param:0:$((${#param}-1))}" 166 | property='{"'"$param"'":{"type":"array","description":"","items": {"type":"string"}}}' 167 | else 168 | property='{"'"$param"'":{"type":"string","description":""}}' 169 | fi 170 | properties+="$property" 171 | done 172 | required='' 173 | for param in "${required_params[@]}"; do 174 | if [[ -z "$required" ]]; then 175 | required=',"required":[' 176 | fi 177 | required+="\"$param\"," 178 | done 179 | if [[ -n "$required" ]]; then 180 | required="${required:0:$((${#required}-1))}" 181 | required+="]" 182 | fi 183 | echo '{ 184 | "type": "object", 185 | "properties": '"$(echo "$properties" | jq -s 'add')$required"' 186 | }' | jq '.' 187 | } 188 | 189 | _die() { 190 | echo "$*" >&2 191 | exit 1 192 | } 193 | 194 | # See more details at https://github.com/sigoden/argc 195 | eval "$(argc --argc-eval "$0" "$@")" -------------------------------------------------------------------------------- /scripts/declarations-util.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | # @describe Utils for declarations json file 6 | 7 | # @cmd Pretty print declarations 8 | # 9 | # Examples: 10 | # ./scripts/declarations.sh pretty-print functions.json 11 | # cat functions.json | ./scripts/declarations.sh pretty-print functions.json 12 | # @flag --no-type Do not to display param type info 13 | # @arg json-file The json file, Read stdin if omitted 14 | pretty-print() { 15 | _run _pretty_print 16 | } 17 | 18 | # @cmd Generate placeholder json according to declarations 19 | # Examples: 20 | # ./scripts/declarations.sh generate-json functions.json 21 | # cat functions.json | ./scripts/declarations.sh generate-json functions.json 22 | # @arg json-file The json file, Read stdin if omitted 23 | generate-json() { 24 | _run _generate_json 25 | } 26 | 27 | _run() { 28 | func="$1" 29 | _get_declarations_data 30 | if [[ "$json_type" == "object" ]]; then 31 | echo "$json_data" | $func 32 | elif [[ "$json_type" == "array" ]]; then 33 | for i in $(seq 1 $json_array_len); do 34 | echo "$json_data" | jq '.['$((i-1))']' | $func 35 | done 36 | fi 37 | } 38 | 39 | _get_declarations_data() { 40 | if [[ -f "$argc_json_file" ]]; then 41 | json_data="$(cat "$argc_json_file")" 42 | else 43 | json_data="$(cat)" 44 | fi 45 | json_type="$(echo "$json_data" | jq -r ' 46 | if type == "array" then 47 | (. | length) as $len | "array;\($len)" 48 | else 49 | if type == "object" then 50 | type 51 | else 52 | "" 53 | end 54 | end 55 | ' 2>/dev/null || true)" 56 | if [[ "$json_type" == *object* ]]; then 57 | :; 58 | elif [[ "$json_type" == *array* ]]; then 59 | json_array_len="${json_type#*;}" 60 | json_type="${json_type%%;*}" 61 | if [[ ! "$json_array_len" -gt 0 ]]; then 62 | json_type="" 63 | fi 64 | fi 65 | if [[ -z "$json_type" ]]; then 66 | echo "error: invalid JSON data" >&2 67 | exit 1 68 | fi 69 | } 70 | 71 | _pretty_print() { 72 | jq --arg no_type "$argc_no_type" -r ' 73 | def get_type: 74 | .value.type as $type | 75 | (if .required then "" else "?" end) as $symbol | 76 | (.value.enum // []) as $enum | 77 | ([ 78 | { condition: ($type == "array"), result: "string[]" }, 79 | { condition: ($type == "string" and ($enum | length > 0)), result: ($enum | join("|")) }, 80 | { condition: ($type == "string"), result: "" }, 81 | { condition: true, result: $type } 82 | ] | map(select(.condition) | .result) | first) as $kind | 83 | if $kind != "" then "(\($kind))\($symbol)" else $symbol end; 84 | 85 | def oneline_description: split("\n")[0]; 86 | 87 | def parse_property: 88 | .key as $key | 89 | (.value.description | oneline_description) as $description | 90 | (if $no_type != "1" then (. | get_type) else "" end) as $type | 91 | " \($key)\($type): \($description)"; 92 | 93 | def print_params: 94 | .parameters | 95 | .required as $requiredProperties | 96 | .properties | to_entries[] | 97 | .key as $key | .+ { "required": ($requiredProperties | index($key) != null) } | 98 | parse_property; 99 | 100 | def print_title: 101 | (.description | oneline_description) as $description | 102 | "\(.name): \($description)"; 103 | 104 | print_title, print_params 105 | ' 106 | } 107 | 108 | _generate_json() { 109 | jq -r -c ' 110 | def convert_string: 111 | if has("enum") then .enum[0] else "foo" end; 112 | 113 | def parse_property: 114 | .key as $key | 115 | .value.type as $type | 116 | [ 117 | { condition: ($type == "string"), result: { $key: (.value | convert_string) }}, 118 | { condition: ($type == "boolean"), result: { $key: false }}, 119 | { condition: ($type == "integer"), result: { $key: 42 }}, 120 | { condition: ($type == "number"), result: { $key: 3.14 }}, 121 | { condition: ($type == "array"), result: { $key: [ "v1" ] } } 122 | ] | map(select(.condition) | .result) | first; 123 | 124 | .name, 125 | ( 126 | .parameters | 127 | [ 128 | .properties | to_entries[] | parse_property 129 | ] | add // {} 130 | ) 131 | ' 132 | } 133 | 134 | # See more details at https://github.com/sigoden/argc 135 | eval "$(argc --argc-eval "$0" "$@")" 136 | -------------------------------------------------------------------------------- /scripts/mcp.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | ROOT_DIR="$(cd -- "$( dirname -- "${BASH_SOURCE[0]}" )/.." &> /dev/null && pwd)" 5 | BIN_DIR="$ROOT_DIR/bin" 6 | MCP_DIR="$ROOT_DIR/cache/__mcp__" 7 | MCP_LOG_FILE="$MCP_DIR/mcp-bridge.log" 8 | MCP_JSON_PATH="$ROOT_DIR/mcp.json" 9 | FUNCTIONS_JSON_PATH="$ROOT_DIR/functions.json" 10 | MCP_BRIDGE_PORT="${MCP_BRIDGE_PORT:-8808}" 11 | 12 | # @cmd Start/restart the mcp bridge server 13 | # @alias restart 14 | start() { 15 | if [[ ! -f "$MCP_JSON_PATH" ]]; then 16 | _die "error: not found mcp.json" 17 | fi 18 | stop 19 | mkdir -p "$MCP_DIR" 20 | index_js="$ROOT_DIR/mcp/bridge/index.js" 21 | llm_functions_dir="$ROOT_DIR" 22 | if _is_win; then 23 | index_js="$(cygpath -w "$index_js")" 24 | llm_functions_dir="$(cygpath -w "$llm_functions_dir")" 25 | fi 26 | echo "Start MCP Bridge server..." 27 | echo "Install node dependencies..." > "$MCP_LOG_FILE" 28 | (cd "$ROOT_DIR/mcp/bridge" && npm install 1>/dev/null 2>> "$MCP_LOG_FILE") 29 | nohup node "$index_js" "$llm_functions_dir" >> "$MCP_LOG_FILE" 2>&1 & 30 | wait-for-server 31 | echo "Merge MCP tools into functions.json" 32 | "$0" merge-functions -S 33 | build-bin 34 | } 35 | 36 | # @cmd Stop the mcp bridge server 37 | stop() { 38 | pid="$(get-server-pid)" 39 | if [[ -n "$pid" ]]; then 40 | if _is_win; then 41 | taskkill /PID "$pid" /F > /dev/null 2>&1 || true 42 | else 43 | kill -9 "$pid" > /dev/null 2>&1 || true 44 | fi 45 | fi 46 | "$0" recovery-functions -S 47 | } 48 | 49 | # @cmd Check the mcp bridge server is running 50 | check() { 51 | if [[ -f "$MCP_JSON_PATH" ]]; then 52 | echo "Check mcp/bridge" 53 | pid="$(get-server-pid)" 54 | if [[ -z "$pid" ]]; then 55 | stop 56 | echo "✗ server is not running" 57 | fi 58 | fi 59 | } 60 | 61 | # @cmd Run the mcp tool 62 | # @arg tool![`_choice_tool`] The tool name 63 | # @arg json The json data 64 | run@tool() { 65 | if [[ -z "$argc_json" ]]; then 66 | declaration="$(generate-declarations | jq --arg tool "$argc_tool" -r '.[] | select(.name == $tool)')" 67 | if [[ -n "$declaration" ]]; then 68 | _ask_json_data "$declaration" 69 | fi 70 | fi 71 | if [[ -z "$argc_json" ]]; then 72 | _die "error: no JSON data" 73 | fi 74 | bash "$ROOT_DIR/scripts/run-mcp-tool.sh" "$argc_tool" "$argc_json" 75 | } 76 | 77 | # @cmd Show the logs 78 | # @flag -f --follow Follow mode 79 | logs() { 80 | if [[ ! -f "$MCP_LOG_FILE" ]]; then 81 | _die "error: not found log file at '$MCP_LOG_FILE'" 82 | fi 83 | if [[ -n "$argc_follow" ]]; then 84 | tail -f "$MCP_LOG_FILE" 85 | else 86 | cat "$MCP_LOG_FILE" 87 | fi 88 | } 89 | 90 | # @cmd Build tools to bin 91 | build-bin() { 92 | mkdir -p "$BIN_DIR" 93 | tools=( $(generate-declarations | jq -r '.[].name') ) 94 | for tool in "${tools[@]}"; do 95 | if _is_win; then 96 | bin_file="$BIN_DIR/$tool.cmd" 97 | _build_win_shim > "$bin_file" 98 | else 99 | bin_file="$BIN_DIR/$tool" 100 | ln -s -f "$ROOT_DIR/scripts/run-mcp-tool.sh" "$bin_file" 101 | fi 102 | echo "Build bin/$tool" 103 | done 104 | } 105 | 106 | # @cmd Merge mcp tools into functions.json 107 | # @flag -S --save Save to functions.json 108 | merge-functions() { 109 | result="$(jq --argjson json1 "$("$0" recovery-functions)" --argjson json2 "$(generate-declarations)" -n '($json1 + $json2)')" 110 | if [[ -n "$argc_save" ]]; then 111 | printf "%s" "$result" > "$FUNCTIONS_JSON_PATH" 112 | else 113 | printf "%s" "$result" 114 | fi 115 | } 116 | 117 | # @cmd Unmerge mcp tools from functions.json 118 | # @flag -S --save Save to functions.json 119 | recovery-functions() { 120 | functions="[]" 121 | if [[ -f "$FUNCTIONS_JSON_PATH" ]]; then 122 | functions="$(cat "$FUNCTIONS_JSON_PATH")" 123 | fi 124 | result="$(printf "%s" "$functions" | jq 'map(select(has("mcp") | not))')" 125 | if [[ -n "$argc_save" ]]; then 126 | printf "%s" "$result" > "$FUNCTIONS_JSON_PATH" 127 | else 128 | printf "%s" "$result" 129 | fi 130 | } 131 | 132 | # @cmd Generate function declarations for the mcp tools 133 | generate-declarations() { 134 | pid="$(get-server-pid)" 135 | if [[ -n "$pid" ]]; then 136 | curl -sS http://localhost:$MCP_BRIDGE_PORT/tools 137 | else 138 | echo "[]" 139 | fi 140 | } 141 | 142 | # @cmd Wait for the mcp bridge server to ready 143 | wait-for-server() { 144 | while true; do 145 | if [[ "$(curl -fsS http://localhost:$MCP_BRIDGE_PORT/health 2>&1)" == "OK" ]]; then 146 | break; 147 | fi 148 | sleep 1 149 | done 150 | } 151 | 152 | # @cmd Get the server pid 153 | get-server-pid() { 154 | curl -fsSL http://localhost:$MCP_BRIDGE_PORT/pid 2>/dev/null || true 155 | } 156 | 157 | _ask_json_data() { 158 | declaration="$1" 159 | echo 'Missing the JSON data but here are its properties:' 160 | echo "$declaration" | ./scripts/declarations-util.sh pretty-print | sed -n '2,$s/^/>/p' 161 | echo 'Generate placeholder data:' 162 | data="$(echo "$declaration" | _declarations_json_data)" 163 | echo "> $data" 164 | read -e -r -p 'JSON data (Press ENTER to use placeholder): ' res 165 | if [[ -z "$res" ]]; then 166 | argc_json="$data" 167 | else 168 | argc_json="$res" 169 | fi 170 | } 171 | 172 | _declarations_json_data() { 173 | ./scripts/declarations-util.sh generate-json | tail -n +2 174 | } 175 | 176 | _build_win_shim() { 177 | run="\"$(argc --argc-shell-path)\" --noprofile --norc" 178 | cat <<-EOF 179 | @echo off 180 | setlocal 181 | 182 | set "bin_dir=%~dp0" 183 | for %%i in ("%bin_dir:~0,-1%") do set "script_dir=%%~dpi" 184 | set "script_name=%~n0" 185 | 186 | $run "%script_dir%scripts\run-mcp-tool.sh" "%script_name%" %* 187 | EOF 188 | } 189 | 190 | _is_win() { 191 | if [[ "$OS" == "Windows_NT" ]]; then 192 | return 0 193 | else 194 | return 1 195 | fi 196 | } 197 | 198 | _choice_tool() { 199 | generate-declarations | jq -r '.[].name' 200 | } 201 | 202 | _die() { 203 | echo "$*" >&2 204 | exit 1 205 | } 206 | 207 | # See more details at https://github.com/sigoden/argc 208 | eval "$(argc --argc-eval "$0" "$@")" 209 | -------------------------------------------------------------------------------- /scripts/run-agent.js: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | // Usage: ./run-agent.js 4 | 5 | const path = require("path"); 6 | const { readFile, writeFile } = require("fs/promises"); 7 | const os = require("os"); 8 | 9 | async function main() { 10 | const [agentName, agentFunc, rawData] = parseArgv("run-agent.js"); 11 | const agentData = parseRawData(rawData); 12 | 13 | const rootDir = path.resolve(__dirname, ".."); 14 | await setupEnv(rootDir, agentName, agentFunc); 15 | 16 | const agentToolsPath = path.resolve(rootDir, `agents/${agentName}/tools.js`); 17 | await run(agentName, agentToolsPath, agentFunc, agentData); 18 | } 19 | 20 | function parseArgv(thisFileName) { 21 | let agentName = process.argv[1]; 22 | let agentFunc = ""; 23 | let agentData = null; 24 | 25 | if (agentName.endsWith(thisFileName)) { 26 | agentName = process.argv[2]; 27 | agentFunc = process.argv[3]; 28 | agentData = process.argv[4]; 29 | } else { 30 | agentName = path.basename(agentName); 31 | agentFunc = process.argv[2]; 32 | agentData = process.argv[3]; 33 | } 34 | 35 | if (agentName && agentName.endsWith(".js")) { 36 | agentName = agentName.slice(0, -3); 37 | } 38 | 39 | if (!agentData || !agentFunc || !agentName) { 40 | console.log(`Usage: ./run-agent.js `); 41 | process.exit(1); 42 | } 43 | 44 | return [agentName, agentFunc, agentData]; 45 | } 46 | 47 | function parseRawData(data) { 48 | if (!data) { 49 | throw new Error("No JSON data"); 50 | } 51 | try { 52 | return JSON.parse(data); 53 | } catch { 54 | throw new Error("Invalid JSON data"); 55 | } 56 | } 57 | 58 | async function setupEnv(rootDir, agentName, agentFunc) { 59 | await loadEnv(path.resolve(rootDir, ".env")); 60 | process.env["LLM_ROOT_DIR"] = rootDir; 61 | process.env["LLM_AGENT_NAME"] = agentName; 62 | process.env["LLM_AGENT_FUNC"] = agentFunc; 63 | process.env["LLM_AGENT_ROOT_DIR"] = path.resolve( 64 | rootDir, 65 | "agents", 66 | agentName, 67 | ); 68 | process.env["LLM_AGENT_CACHE_DIR"] = path.resolve( 69 | rootDir, 70 | "cache", 71 | agentName, 72 | ); 73 | } 74 | 75 | async function loadEnv(filePath) { 76 | let lines = []; 77 | try { 78 | const data = await readFile(filePath, "utf-8"); 79 | lines = data.split("\n"); 80 | } catch { 81 | return; 82 | } 83 | 84 | const envVars = new Map(); 85 | 86 | for (const line of lines) { 87 | if (line.trim().startsWith("#") || line.trim() === "") { 88 | continue; 89 | } 90 | 91 | const [key, ...valueParts] = line.split("="); 92 | const envName = key.trim(); 93 | 94 | if (!process.env[envName]) { 95 | let envValue = valueParts.join("=").trim(); 96 | if ((envValue.startsWith('"') && envValue.endsWith('"')) || (envValue.startsWith("'") && envValue.endsWith("'"))) { 97 | envValue = envValue.slice(1, -1); 98 | } 99 | envVars.set(envName, envValue); 100 | } 101 | } 102 | 103 | for (const [envName, envValue] of envVars.entries()) { 104 | process.env[envName] = envValue; 105 | } 106 | } 107 | 108 | async function run(agentName, agentPath, agentFunc, agentData) { 109 | if (os.platform() === "win32") { 110 | agentPath = `file://${agentPath}`; 111 | } 112 | const mod = await import(agentPath); 113 | if (!mod || !mod[agentFunc]) { 114 | throw new Error(`Not module function '${agentFunc}' at '${agentPath}'`); 115 | } 116 | const value = await mod[agentFunc](agentData); 117 | await returnToLLM(value); 118 | await dumpResult(`${agentName}:${agentFunc}`); 119 | } 120 | 121 | async function returnToLLM(value) { 122 | if (value === null || value === undefined) { 123 | return; 124 | } 125 | const write = async (value) => { 126 | if (process.env["LLM_OUTPUT"]) { 127 | await writeFile(process.env["LLM_OUTPUT"], value); 128 | } else { 129 | process.stdout.write(value); 130 | } 131 | } 132 | const type = typeof value; 133 | if (type === "string" || type === "number" || type === "boolean") { 134 | await write(value.toString()); 135 | } else if (type === "object") { 136 | const proto = Object.prototype.toString.call(value); 137 | if (proto === "[object Object]" || proto === "[object Array]") { 138 | const valueStr = JSON.stringify(value, null, 2); 139 | require("assert").deepStrictEqual(value, JSON.parse(valueStr)); 140 | await write(valueStr); 141 | } 142 | } 143 | } 144 | 145 | async function dumpResult(name) { 146 | if (!process.env["LLM_DUMP_RESULTS"] || !process.env["LLM_OUTPUT"] || !process.stdout.isTTY) { 147 | return; 148 | } 149 | let showResult = false; 150 | try { 151 | if (new RegExp(`\\b(${process.env["LLM_DUMP_RESULTS"]})\\b`).test(name)) { 152 | showResult = true; 153 | } 154 | } catch { } 155 | 156 | if (!showResult) { 157 | return; 158 | } 159 | 160 | let data = ""; 161 | try { 162 | data = await readFile(process.env["LLM_OUTPUT"], "utf-8"); 163 | } catch { 164 | return; 165 | } 166 | process.stdout.write(`\x1b[2m----------------------\n${data}\n----------------------\x1b[0m\n`); 167 | } 168 | 169 | main().catch((err) => { 170 | console.error(err); 171 | process.exit(1); 172 | }); -------------------------------------------------------------------------------- /scripts/run-agent.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Usage: ./run-agent.py 4 | 5 | import os 6 | import re 7 | import json 8 | import sys 9 | import importlib.util 10 | 11 | 12 | def main(): 13 | (agent_name, agent_func, raw_data) = parse_argv("run-agent.py") 14 | agent_data = parse_raw_data(raw_data) 15 | 16 | root_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) 17 | setup_env(root_dir, agent_name, agent_func) 18 | 19 | agent_tools_path = os.path.join(root_dir, f"agents/{agent_name}/tools.py") 20 | run(agent_name, agent_tools_path, agent_func, agent_data) 21 | 22 | 23 | def parse_raw_data(data): 24 | if not data: 25 | raise ValueError("No JSON data") 26 | 27 | try: 28 | return json.loads(data) 29 | except Exception: 30 | raise ValueError("Invalid JSON data") 31 | 32 | 33 | def parse_argv(this_file_name): 34 | argv = sys.argv[:] + [None] * max(0, 4 - len(sys.argv)) 35 | 36 | agent_name = argv[0] 37 | agent_func = "" 38 | agent_data = "" 39 | 40 | if agent_name.endswith(this_file_name): 41 | if len(sys.argv) > 3: 42 | agent_name = sys.argv[1] 43 | agent_func = sys.argv[2] 44 | agent_data = sys.argv[3] 45 | else: 46 | if len(sys.argv) > 2: 47 | agent_name = os.path.basename(agent_name) 48 | agent_func = sys.argv[1] 49 | agent_data = sys.argv[2] 50 | 51 | if agent_name and agent_name.endswith(".py"): 52 | agent_name = agent_name[:-3] 53 | 54 | if (not agent_data) or (not agent_func) or (not agent_name): 55 | print("Usage: ./run-agent.py ", file=sys.stderr) 56 | sys.exit(1) 57 | 58 | return agent_name, agent_func, agent_data 59 | 60 | 61 | def setup_env(root_dir, agent_name, agent_func): 62 | load_env(os.path.join(root_dir, ".env")) 63 | os.environ["LLM_ROOT_DIR"] = root_dir 64 | os.environ["LLM_AGENT_NAME"] = agent_name 65 | os.environ["LLM_AGENT_FUNC"] = agent_func 66 | os.environ["LLM_AGENT_ROOT_DIR"] = os.path.join(root_dir, "agents", agent_name) 67 | os.environ["LLM_AGENT_CACHE_DIR"] = os.path.join(root_dir, "cache", agent_name) 68 | 69 | 70 | def load_env(file_path): 71 | try: 72 | with open(file_path, "r") as f: 73 | lines = f.readlines() 74 | except: 75 | return 76 | 77 | env_vars = {} 78 | 79 | for line in lines: 80 | line = line.strip() 81 | if line.startswith("#") or not line: 82 | continue 83 | 84 | key, *value_parts = line.split("=") 85 | env_name = key.strip() 86 | 87 | if env_name not in os.environ: 88 | env_value = "=".join(value_parts).strip() 89 | if (env_value.startswith('"') and env_value.endswith('"')) or (env_value.startswith("'") and env_value.endswith("'")): 90 | env_value = env_value[1:-1] 91 | env_vars[env_name] = env_value 92 | 93 | os.environ.update(env_vars) 94 | 95 | 96 | def run(agent_name, agent_path, agent_func, agent_data): 97 | spec = importlib.util.spec_from_file_location( 98 | os.path.basename(agent_path), agent_path 99 | ) 100 | mod = importlib.util.module_from_spec(spec) 101 | spec.loader.exec_module(mod) 102 | 103 | if not hasattr(mod, agent_func): 104 | raise Exception(f"Not module function '{agent_func}' at '{agent_path}'") 105 | 106 | value = getattr(mod, agent_func)(**agent_data) 107 | return_to_llm(value) 108 | dump_result(rf'{agent_name}:{agent_func}') 109 | 110 | 111 | def return_to_llm(value): 112 | if value is None: 113 | return 114 | 115 | if "LLM_OUTPUT" in os.environ: 116 | writer = open(os.environ["LLM_OUTPUT"], "w") 117 | else: 118 | writer = sys.stdout 119 | 120 | value_type = type(value).__name__ 121 | if value_type in ("str", "int", "float", "bool"): 122 | writer.write(str(value)) 123 | elif value_type == "dict" or value_type == "list": 124 | value_str = json.dumps(value, indent=2) 125 | assert value == json.loads(value_str) 126 | writer.write(value_str) 127 | 128 | 129 | def dump_result(name): 130 | if (not os.getenv("LLM_DUMP_RESULTS")) or (not os.getenv("LLM_OUTPUT")) or (not os.isatty(1)): 131 | return 132 | 133 | show_result = False 134 | try: 135 | if re.search(rf'\b({os.environ["LLM_DUMP_RESULTS"]})\b', name): 136 | show_result = True 137 | except: 138 | pass 139 | 140 | if not show_result: 141 | return 142 | 143 | try: 144 | with open(os.environ["LLM_OUTPUT"], "r", encoding="utf-8") as f: 145 | data = f.read() 146 | except: 147 | return 148 | 149 | print(f"\x1b[2m----------------------\n{data}\n----------------------\x1b[0m") 150 | 151 | 152 | if __name__ == "__main__": 153 | main() -------------------------------------------------------------------------------- /scripts/run-agent.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Usage: ./run-agent.sh 4 | 5 | set -e 6 | 7 | main() { 8 | root_dir="$(cd -- "$( dirname -- "${BASH_SOURCE[0]}" )/.." &> /dev/null && pwd)" 9 | self_name=run-agent.sh 10 | parse_argv "$@" 11 | setup_env 12 | tools_path="$root_dir/agents/$agent_name/tools.sh" 13 | run 14 | } 15 | 16 | parse_argv() { 17 | if [[ "$0" == *"$self_name" ]]; then 18 | agent_name="$1" 19 | agent_func="$2" 20 | agent_data="$3" 21 | else 22 | agent_name="$(basename "$0")" 23 | agent_func="$1" 24 | agent_data="$2" 25 | fi 26 | if [[ "$agent_name" == *.sh ]]; then 27 | agent_name="${agent_name:0:$((${#agent_name}-3))}" 28 | fi 29 | if [[ -z "$agent_data" ]] || [[ -z "$agent_func" ]] || [[ -z "$agent_name" ]]; then 30 | die "usage: ./run-agent.sh " 31 | fi 32 | } 33 | 34 | setup_env() { 35 | load_env "$root_dir/.env" 36 | export LLM_ROOT_DIR="$root_dir" 37 | export LLM_AGENT_NAME="$agent_name" 38 | export LLM_AGENT_FUNC="$agent_func" 39 | export LLM_AGENT_ROOT_DIR="$LLM_ROOT_DIR/agents/$agent_name" 40 | export LLM_AGENT_CACHE_DIR="$LLM_ROOT_DIR/cache/$agent_name" 41 | } 42 | 43 | load_env() { 44 | local env_file="$1" env_vars 45 | if [[ -f "$env_file" ]]; then 46 | while IFS='=' read -r key value; do 47 | if [[ "$key" == $'#'* ]] || [[ -z "$key" ]]; then 48 | continue 49 | fi 50 | if [[ -z "${!key+x}" ]]; then 51 | env_vars="$env_vars $key=$value" 52 | fi 53 | done < <(cat "$env_file"; echo "") 54 | if [[ -n "$env_vars" ]]; then 55 | eval "export $env_vars" 56 | fi 57 | fi 58 | } 59 | 60 | run() { 61 | if [[ -z "$agent_data" ]]; then 62 | die "error: no JSON data" 63 | fi 64 | 65 | if [[ "$OS" == "Windows_NT" ]]; then 66 | set -o igncr 67 | tools_path="$(cygpath -w "$tools_path")" 68 | tool_data="$(echo "$tool_data" | sed 's/\\/\\\\/g')" 69 | fi 70 | 71 | jq_script="$(cat <<-'EOF' 72 | def escape_shell_word: 73 | tostring 74 | | gsub("'"; "'\"'\"'") 75 | | gsub("\n"; "'$'\\n''") 76 | | "'\(.)'"; 77 | def to_args: 78 | to_entries | .[] | 79 | (.key | split("_") | join("-")) as $key | 80 | if .value | type == "array" then 81 | .value | .[] | "--\($key) \(. | escape_shell_word)" 82 | elif .value | type == "boolean" then 83 | if .value then "--\($key)" else "" end 84 | else 85 | "--\($key) \(.value | escape_shell_word)" 86 | end; 87 | [ to_args ] | join(" ") 88 | EOF 89 | )" 90 | args="$(echo "$agent_data" | jq -r "$jq_script" 2>/dev/null)" || { 91 | die "error: invalid JSON data" 92 | } 93 | 94 | if [[ -z "$LLM_OUTPUT" ]]; then 95 | is_temp_llm_output=1 96 | export LLM_OUTPUT="$(mktemp)" 97 | fi 98 | eval "'$tools_path' '$agent_func' $args" 99 | if [[ "$is_temp_llm_output" -eq 1 ]]; then 100 | cat "$LLM_OUTPUT" 101 | else 102 | dump_result "${LLM_AGENT_NAME}:${LLM_AGENT_FUNC}" 103 | fi 104 | } 105 | 106 | dump_result() { 107 | if [[ "$LLM_OUTPUT" == "/dev/stdout" ]] || [[ -z "$LLM_DUMP_RESULTS" ]] || [[ ! -t 1 ]]; then 108 | return; 109 | fi 110 | if grep -q -w -E "$LLM_DUMP_RESULTS" <<<"$1"; then 111 | cat <&2 121 | exit 1 122 | } 123 | 124 | main "$@" 125 | 126 | -------------------------------------------------------------------------------- /scripts/run-mcp-tool.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Usage: ./run-mcp-tool.sh 4 | 5 | set -e 6 | 7 | main() { 8 | root_dir="$(cd -- "$( dirname -- "${BASH_SOURCE[0]}" )/.." &> /dev/null && pwd)" 9 | self_name=run-mcp-tool.sh 10 | parse_argv "$@" 11 | load_env "$root_dir/.env" 12 | run 13 | } 14 | 15 | parse_argv() { 16 | if [[ "$0" == *"$self_name" ]]; then 17 | tool_name="$1" 18 | tool_data="$2" 19 | else 20 | tool_name="$(basename "$0")" 21 | tool_data="$1" 22 | fi 23 | if [[ "$tool_name" == *.sh ]]; then 24 | tool_name="${tool_name:0:$((${#tool_name}-3))}" 25 | fi 26 | if [[ -z "$tool_data" ]] || [[ -z "$tool_name" ]]; then 27 | die "usage: ./run-tool.sh " 28 | fi 29 | } 30 | 31 | 32 | load_env() { 33 | local env_file="$1" env_vars 34 | if [[ -f "$env_file" ]]; then 35 | while IFS='=' read -r key value; do 36 | if [[ "$key" == $'#'* ]] || [[ -z "$key" ]]; then 37 | continue 38 | fi 39 | if [[ -z "${!key+x}" ]]; then 40 | env_vars="$env_vars $key=$value" 41 | fi 42 | done < <(cat "$env_file"; echo "") 43 | if [[ -n "$env_vars" ]]; then 44 | eval "export $env_vars" 45 | fi 46 | fi 47 | } 48 | 49 | run() { 50 | if [[ -z "$tool_data" ]]; then 51 | die "error: no JSON data" 52 | fi 53 | 54 | if [[ "$OS" == "Windows_NT" ]]; then 55 | set -o igncr 56 | tool_data="$(echo "$tool_data" | sed 's/\\/\\\\/g')" 57 | fi 58 | 59 | if [[ -z "$LLM_OUTPUT" ]]; then 60 | is_temp_llm_output=1 61 | export LLM_OUTPUT="$(mktemp)" 62 | fi 63 | 64 | if [[ -n "$LLM_MCP_SKIP_CONFIRM" ]]; then 65 | if grep -q -w -E "$LLM_MCP_SKIP_CONFIRM" <<<"$tool_name"; then 66 | skip_confirm=1 67 | fi 68 | fi 69 | if [[ -n "$LLM_MCP_NEED_CONFIRM" ]]; then 70 | if grep -q -w -E "$LLM_MCP_NEED_CONFIRM" <<<"$tool_name"; then 71 | skip_confirm=0 72 | fi 73 | fi 74 | if [[ -t 1 ]] && [[ "$skip_confirm" -ne 1 ]]; then 75 | read -r -p "Are you sure you want to continue? [Y/n] " ans 76 | if [[ "$ans" == "N" || "$ans" == "n" ]]; then 77 | echo "error: canceled!" 2>&1 78 | exit 1 79 | fi 80 | fi 81 | 82 | curl -sS "http://localhost:${MCP_BRIDGE_PORT:-8808}/tools/$tool_name" \ 83 | -X POST \ 84 | -H 'content-type: application/json' \ 85 | -d "$tool_data" > "$LLM_OUTPUT" 86 | 87 | if [[ "$is_temp_llm_output" -eq 1 ]]; then 88 | cat "$LLM_OUTPUT" 89 | else 90 | dump_result "$tool_name" 91 | fi 92 | } 93 | 94 | dump_result() { 95 | if [[ "$LLM_OUTPUT" == "/dev/stdout" ]] || [[ -z "$LLM_DUMP_RESULTS" ]] || [[ ! -t 1 ]]; then 96 | return; 97 | fi 98 | if grep -q -w -E "$LLM_DUMP_RESULTS" <<<"$1"; then 99 | cat < 4 | 5 | const path = require("path"); 6 | const { readFile, writeFile } = require("fs/promises"); 7 | const os = require("os"); 8 | 9 | async function main() { 10 | const [toolName, rawData] = parseArgv("run-tool.js"); 11 | const toolData = parseRawData(rawData); 12 | 13 | const rootDir = path.resolve(__dirname, ".."); 14 | await setupEnv(rootDir, toolName); 15 | 16 | const toolPath = path.resolve(rootDir, `tools/${toolName}.js`); 17 | await run(toolName, toolPath, "run", toolData); 18 | } 19 | 20 | function parseArgv(thisFileName) { 21 | let toolName = process.argv[1]; 22 | let toolData = null; 23 | 24 | if (toolName.endsWith(thisFileName)) { 25 | toolName = process.argv[2]; 26 | toolData = process.argv[3]; 27 | } else { 28 | toolName = path.basename(toolName); 29 | toolData = process.argv[2]; 30 | } 31 | 32 | if (toolName && toolName.endsWith(".js")) { 33 | toolName = toolName.slice(0, -3); 34 | } 35 | 36 | if (!toolData || !toolName) { 37 | console.log(`Usage: ./run-tools.js `); 38 | process.exit(1); 39 | } 40 | 41 | return [toolName, toolData]; 42 | } 43 | 44 | function parseRawData(data) { 45 | if (!data) { 46 | throw new Error("No JSON data"); 47 | } 48 | try { 49 | return JSON.parse(data); 50 | } catch { 51 | throw new Error("Invalid JSON data"); 52 | } 53 | } 54 | 55 | async function setupEnv(rootDir, toolName) { 56 | await loadEnv(path.resolve(rootDir, ".env")); 57 | process.env["LLM_ROOT_DIR"] = rootDir; 58 | process.env["LLM_TOOL_NAME"] = toolName; 59 | process.env["LLM_TOOL_CACHE_DIR"] = path.resolve(rootDir, "cache", toolName); 60 | } 61 | 62 | async function loadEnv(filePath) { 63 | let lines = []; 64 | try { 65 | const data = await readFile(filePath, "utf-8"); 66 | lines = data.split("\n"); 67 | } catch { 68 | return; 69 | } 70 | 71 | const envVars = new Map(); 72 | 73 | for (const line of lines) { 74 | if (line.trim().startsWith("#") || line.trim() === "") { 75 | continue; 76 | } 77 | 78 | const [key, ...valueParts] = line.split("="); 79 | const envName = key.trim(); 80 | 81 | if (!process.env[envName]) { 82 | let envValue = valueParts.join("=").trim(); 83 | if ((envValue.startsWith('"') && envValue.endsWith('"')) || (envValue.startsWith("'") && envValue.endsWith("'"))) { 84 | envValue = envValue.slice(1, -1); 85 | } 86 | envVars.set(envName, envValue); 87 | } 88 | } 89 | 90 | for (const [envName, envValue] of envVars.entries()) { 91 | process.env[envName] = envValue; 92 | } 93 | } 94 | 95 | async function run(toolName, toolPath, toolFunc, toolData) { 96 | if (os.platform() === "win32") { 97 | toolPath = `file://${toolPath}`; 98 | } 99 | const mod = await import(toolPath); 100 | if (!mod || !mod[toolFunc]) { 101 | throw new Error(`Not module function '${toolFunc}' at '${toolPath}'`); 102 | } 103 | const value = await mod[toolFunc](toolData); 104 | await returnToLLM(value); 105 | await dumpResult(toolName); 106 | } 107 | 108 | async function returnToLLM(value) { 109 | if (value === null || value === undefined) { 110 | return; 111 | } 112 | const write = async (value) => { 113 | if (process.env["LLM_OUTPUT"]) { 114 | await writeFile(process.env["LLM_OUTPUT"], value); 115 | } else { 116 | process.stdout.write(value); 117 | } 118 | } 119 | const type = typeof value; 120 | if (type === "string" || type === "number" || type === "boolean") { 121 | await write(value.toString()); 122 | } else if (type === "object") { 123 | const proto = Object.prototype.toString.call(value); 124 | if (proto === "[object Object]" || proto === "[object Array]") { 125 | const valueStr = JSON.stringify(value, null, 2); 126 | require("assert").deepStrictEqual(value, JSON.parse(valueStr)); 127 | await write(valueStr); 128 | } 129 | } 130 | } 131 | 132 | async function dumpResult(name) { 133 | if (!process.env["LLM_DUMP_RESULTS"] || !process.env["LLM_OUTPUT"] || !process.stdout.isTTY) { 134 | return; 135 | } 136 | let showResult = false; 137 | try { 138 | if (new RegExp(`\\b(${process.env["LLM_DUMP_RESULTS"]})\\b`).test(name)) { 139 | showResult = true; 140 | } 141 | } catch { } 142 | 143 | if (!showResult) { 144 | return; 145 | } 146 | 147 | let data = ""; 148 | try { 149 | data = await readFile(process.env["LLM_OUTPUT"], "utf-8"); 150 | } catch { 151 | return; 152 | } 153 | process.stdout.write(`\x1b[2m----------------------\n${data}\n----------------------\x1b[0m\n`); 154 | } 155 | 156 | main().catch((err) => { 157 | console.error(err); 158 | process.exit(1); 159 | }); -------------------------------------------------------------------------------- /scripts/run-tool.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Usage: ./run-tool.py 4 | 5 | import os 6 | import re 7 | import json 8 | import sys 9 | import importlib.util 10 | 11 | 12 | def main(): 13 | (tool_name, raw_data) = parse_argv("run-tool.py") 14 | tool_data = parse_raw_data(raw_data) 15 | 16 | root_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) 17 | setup_env(root_dir, tool_name) 18 | 19 | tool_path = os.path.join(root_dir, f"tools/{tool_name}.py") 20 | run(tool_name, tool_path, "run", tool_data) 21 | 22 | 23 | def parse_raw_data(data): 24 | if not data: 25 | raise ValueError("No JSON data") 26 | 27 | try: 28 | return json.loads(data) 29 | except Exception: 30 | raise ValueError("Invalid JSON data") 31 | 32 | 33 | def parse_argv(this_file_name): 34 | argv = sys.argv[:] + [None] * max(0, 3 - len(sys.argv)) 35 | 36 | tool_name = argv[0] 37 | tool_data = "" 38 | 39 | if tool_name.endswith(this_file_name): 40 | if len(sys.argv) > 2: 41 | tool_name = argv[1] 42 | tool_data = argv[2] 43 | else: 44 | if len(sys.argv) > 1: 45 | tool_name = os.path.basename(tool_name) 46 | tool_data = sys.argv[1] 47 | 48 | if tool_name and tool_name.endswith(".py"): 49 | tool_name = tool_name[:-3] 50 | 51 | if (not tool_data) or (not tool_name): 52 | print("Usage: ./run-tool.py ", file=sys.stderr) 53 | sys.exit(1) 54 | 55 | return tool_name, tool_data 56 | 57 | 58 | def setup_env(root_dir, tool_name): 59 | load_env(os.path.join(root_dir, ".env")) 60 | os.environ["LLM_ROOT_DIR"] = root_dir 61 | os.environ["LLM_TOOL_NAME"] = tool_name 62 | os.environ["LLM_TOOL_CACHE_DIR"] = os.path.join(root_dir, "cache", tool_name) 63 | 64 | 65 | def load_env(file_path): 66 | try: 67 | with open(file_path, "r") as f: 68 | lines = f.readlines() 69 | except: 70 | return 71 | 72 | env_vars = {} 73 | 74 | for line in lines: 75 | line = line.strip() 76 | if line.startswith("#") or not line: 77 | continue 78 | 79 | key, *value_parts = line.split("=") 80 | env_name = key.strip() 81 | 82 | if env_name not in os.environ: 83 | env_value = "=".join(value_parts).strip() 84 | if (env_value.startswith('"') and env_value.endswith('"')) or (env_value.startswith("'") and env_value.endswith("'")): 85 | env_value = env_value[1:-1] 86 | env_vars[env_name] = env_value 87 | 88 | os.environ.update(env_vars) 89 | 90 | 91 | def run(tool_name, tool_path, tool_func, tool_data): 92 | spec = importlib.util.spec_from_file_location( 93 | os.path.basename(tool_path), tool_path 94 | ) 95 | mod = importlib.util.module_from_spec(spec) 96 | spec.loader.exec_module(mod) 97 | 98 | if not hasattr(mod, tool_func): 99 | raise Exception(f"Not module function '{tool_func}' at '{tool_path}'") 100 | 101 | value = getattr(mod, tool_func)(**tool_data) 102 | return_to_llm(value) 103 | dump_result(tool_name) 104 | 105 | 106 | def return_to_llm(value): 107 | if value is None: 108 | return 109 | 110 | if "LLM_OUTPUT" in os.environ: 111 | writer = open(os.environ["LLM_OUTPUT"], "w") 112 | else: 113 | writer = sys.stdout 114 | 115 | value_type = type(value).__name__ 116 | if value_type in ("str", "int", "float", "bool"): 117 | writer.write(str(value)) 118 | elif value_type == "dict" or value_type == "list": 119 | value_str = json.dumps(value, indent=2) 120 | assert value == json.loads(value_str) 121 | writer.write(value_str) 122 | 123 | 124 | def dump_result(name): 125 | if (not os.getenv("LLM_DUMP_RESULTS")) or (not os.getenv("LLM_OUTPUT")) or (not os.isatty(1)): 126 | return 127 | 128 | show_result = False 129 | try: 130 | if re.search(rf'\b({os.environ["LLM_DUMP_RESULTS"]})\b', name): 131 | show_result = True 132 | except: 133 | pass 134 | 135 | if not show_result: 136 | return 137 | 138 | try: 139 | with open(os.environ["LLM_OUTPUT"], "r", encoding="utf-8") as f: 140 | data = f.read() 141 | except: 142 | return 143 | 144 | print(f"\x1b[2m----------------------\n{data}\n----------------------\x1b[0m") 145 | 146 | 147 | if __name__ == "__main__": 148 | main() -------------------------------------------------------------------------------- /scripts/run-tool.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Usage: ./run-tool.sh 4 | 5 | set -e 6 | 7 | main() { 8 | root_dir="$(cd -- "$( dirname -- "${BASH_SOURCE[0]}" )/.." &> /dev/null && pwd)" 9 | self_name=run-tool.sh 10 | parse_argv "$@" 11 | setup_env 12 | tool_path="$root_dir/tools/$tool_name.sh" 13 | run 14 | } 15 | 16 | parse_argv() { 17 | if [[ "$0" == *"$self_name" ]]; then 18 | tool_name="$1" 19 | tool_data="$2" 20 | else 21 | tool_name="$(basename "$0")" 22 | tool_data="$1" 23 | fi 24 | if [[ "$tool_name" == *.sh ]]; then 25 | tool_name="${tool_name:0:$((${#tool_name}-3))}" 26 | fi 27 | if [[ -z "$tool_data" ]] || [[ -z "$tool_name" ]]; then 28 | die "usage: ./run-tool.sh " 29 | fi 30 | } 31 | 32 | setup_env() { 33 | load_env "$root_dir/.env" 34 | export LLM_ROOT_DIR="$root_dir" 35 | export LLM_TOOL_NAME="$tool_name" 36 | export LLM_TOOL_CACHE_DIR="$LLM_ROOT_DIR/cache/$tool_name" 37 | } 38 | 39 | load_env() { 40 | local env_file="$1" env_vars 41 | if [[ -f "$env_file" ]]; then 42 | while IFS='=' read -r key value; do 43 | if [[ "$key" == $'#'* ]] || [[ -z "$key" ]]; then 44 | continue 45 | fi 46 | if [[ -z "${!key+x}" ]]; then 47 | env_vars="$env_vars $key=$value" 48 | fi 49 | done < <(cat "$env_file"; echo "") 50 | if [[ -n "$env_vars" ]]; then 51 | eval "export $env_vars" 52 | fi 53 | fi 54 | } 55 | 56 | run() { 57 | if [[ -z "$tool_data" ]]; then 58 | die "error: no JSON data" 59 | fi 60 | 61 | if [[ "$OS" == "Windows_NT" ]]; then 62 | set -o igncr 63 | tool_path="$(cygpath -w "$tool_path")" 64 | tool_data="$(echo "$tool_data" | sed 's/\\/\\\\/g')" 65 | fi 66 | 67 | jq_script="$(cat <<-'EOF' 68 | def escape_shell_word: 69 | tostring 70 | | gsub("'"; "'\"'\"'") 71 | | gsub("\n"; "'$'\\n''") 72 | | "'\(.)'"; 73 | def to_args: 74 | to_entries | .[] | 75 | (.key | split("_") | join("-")) as $key | 76 | if .value | type == "array" then 77 | .value | .[] | "--\($key) \(. | escape_shell_word)" 78 | elif .value | type == "boolean" then 79 | if .value then "--\($key)" else "" end 80 | else 81 | "--\($key) \(.value | escape_shell_word)" 82 | end; 83 | [ to_args ] | join(" ") 84 | EOF 85 | )" 86 | args="$(echo "$tool_data" | jq -r "$jq_script" 2>/dev/null)" || { 87 | die "error: invalid JSON data" 88 | } 89 | if [[ -z "$LLM_OUTPUT" ]]; then 90 | is_temp_llm_output=1 91 | export LLM_OUTPUT="$(mktemp)" 92 | fi 93 | eval "'$tool_path' $args" 94 | if [[ "$is_temp_llm_output" -eq 1 ]]; then 95 | cat "$LLM_OUTPUT" 96 | else 97 | dump_result "$tool_name" 98 | fi 99 | } 100 | 101 | dump_result() { 102 | if [[ "$LLM_OUTPUT" == "/dev/stdout" ]] || [[ -z "$LLM_DUMP_RESULTS" ]] || [[ ! -t 1 ]]; then 103 | return; 104 | fi 105 | if grep -q -w -E "$LLM_DUMP_RESULTS" <<<"$1"; then 106 | cat <&2 116 | exit 1 117 | } 118 | 119 | main "$@" 120 | -------------------------------------------------------------------------------- /tools/demo_js.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Demonstrate how to create a tool using Javascript and how to use comments. 3 | * @typedef {Object} Args 4 | * @property {string} string - Define a required string property 5 | * @property {'foo'|'bar'} string_enum - Define a required string property with enum 6 | * @property {string} [string_optional] - Define a optional string property 7 | * @property {boolean} boolean - Define a required boolean property 8 | * @property {Integer} integer - Define a required integer property 9 | * @property {number} number - Define a required number property 10 | * @property {string[]} array - Define a required string array property 11 | * @property {string[]} [array_optional] - Define a optional string array property 12 | * @param {Args} args 13 | */ 14 | exports.run = function (args) { 15 | let output = `string: ${args.string} 16 | string_enum: ${args.string_enum} 17 | string_optional: ${args.string_optional} 18 | boolean: ${args.boolean} 19 | integer: ${args.integer} 20 | number: ${args.number} 21 | array: ${args.array} 22 | array_optional: ${args.array_optional}`; 23 | for (const [key, value] of Object.entries(process.env)) { 24 | if (key.startsWith("LLM_")) { 25 | output = `${output}\n${key}: ${value}`; 26 | } 27 | } 28 | return output; 29 | } 30 | -------------------------------------------------------------------------------- /tools/demo_py.py: -------------------------------------------------------------------------------- 1 | import os 2 | from typing import List, Literal, Optional 3 | 4 | def run( 5 | string: str, 6 | string_enum: Literal["foo", "bar"], 7 | boolean: bool, 8 | integer: int, 9 | number: float, 10 | array: List[str], 11 | string_optional: Optional[str] = None, 12 | array_optional: Optional[List[str]] = None, 13 | ): 14 | """Demonstrate how to create a tool using Python and how to use comments. 15 | Args: 16 | string: Define a required string property 17 | string_enum: Define a required string property with enum 18 | boolean: Define a required boolean property 19 | integer: Define a required integer property 20 | number: Define a required number property 21 | array: Define a required string array property 22 | string_optional: Define a optional string property 23 | array_optional: Define a optional string array property 24 | """ 25 | output = f"""string: {string} 26 | string_enum: {string_enum} 27 | string_optional: {string_optional} 28 | boolean: {boolean} 29 | integer: {integer} 30 | number: {number} 31 | array: {array} 32 | array_optional: {array_optional}""" 33 | 34 | for key, value in os.environ.items(): 35 | if key.startswith("LLM_"): 36 | output = f"{output}\n{key}: {value}" 37 | 38 | return output 39 | -------------------------------------------------------------------------------- /tools/demo_sh.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | # @describe Demonstrate how to create a tool using Bash and how to use comment tags. 5 | # @option --string! Define a required string property 6 | # @option --string-enum![foo|bar] Define a required string property with enum 7 | # @option --string-optional Define a optional string property 8 | # @flag --boolean Define a boolean property 9 | # @option --integer! Define a required integer property 10 | # @option --number! Define a required number property 11 | # @option --array+ Define a required string array property 12 | # @option --array-optional* Define a optional string array property 13 | 14 | # @env LLM_OUTPUT=/dev/stdout The output path 15 | 16 | main() { 17 | cat <> "$LLM_OUTPUT" 18 | string: ${argc_string} 19 | string_enum: ${argc_string_enum} 20 | string_optional: ${argc_string_optional} 21 | boolean: ${argc_boolean} 22 | integer: ${argc_integer} 23 | number: ${argc_number} 24 | array: ${argc_array[@]} 25 | array_optional: ${argc_array_optional[@]} 26 | $(printenv | grep '^LLM_') 27 | EOF 28 | } 29 | 30 | eval "$(argc --argc-eval "$0" "$@")" 31 | -------------------------------------------------------------------------------- /tools/execute_command.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | # @describe Execute the shell command. 5 | # @option --command! The command to execute. 6 | 7 | # @env LLM_OUTPUT=/dev/stdout The output path 8 | 9 | ROOT_DIR="${LLM_ROOT_DIR:-$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)}" 10 | 11 | main() { 12 | "$ROOT_DIR/utils/guard_operation.sh" 13 | eval "$argc_command" >> "$LLM_OUTPUT" 14 | } 15 | 16 | eval "$(argc --argc-eval "$0" "$@")" 17 | -------------------------------------------------------------------------------- /tools/execute_js_code.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Execute the javascript code in node.js. 3 | * @typedef {Object} Args 4 | * @property {string} code - Javascript code to execute, such as `console.log("hello world")` 5 | * @param {Args} args 6 | */ 7 | exports.run = function ({ code }) { 8 | let output = ""; 9 | const oldStdoutWrite = process.stdout.write.bind(process.stdout); 10 | process.stdout.write = (chunk, _encoding, callback) => { 11 | output += chunk; 12 | if (callback) callback(); 13 | }; 14 | 15 | const value = eval(code); 16 | if (value !== undefined) { 17 | output += value; 18 | } 19 | 20 | process.stdout.write = oldStdoutWrite; 21 | return output; 22 | } 23 | -------------------------------------------------------------------------------- /tools/execute_py_code.py: -------------------------------------------------------------------------------- 1 | import ast 2 | import io 3 | from contextlib import redirect_stdout 4 | 5 | 6 | def run(code: str): 7 | """Execute the python code. 8 | Args: 9 | code: Python code to execute, such as `print("hello world")` 10 | """ 11 | output = io.StringIO() 12 | with redirect_stdout(output): 13 | value = exec_with_return(code, {}, {}) 14 | 15 | if value is not None: 16 | output.write(str(value)) 17 | 18 | return output.getvalue() 19 | 20 | 21 | def exec_with_return(code: str, globals: dict, locals: dict): 22 | a = ast.parse(code) 23 | last_expression = None 24 | if a.body: 25 | if isinstance(a_last := a.body[-1], ast.Expr): 26 | last_expression = ast.unparse(a.body.pop()) 27 | elif isinstance(a_last, ast.Assign): 28 | last_expression = ast.unparse(a_last.targets[0]) 29 | elif isinstance(a_last, (ast.AnnAssign, ast.AugAssign)): 30 | last_expression = ast.unparse(a_last.target) 31 | exec(ast.unparse(a), globals, locals) 32 | if last_expression: 33 | return eval(last_expression, globals, locals) 34 | -------------------------------------------------------------------------------- /tools/execute_sql_code.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | # @describe Execute the sql code. 5 | # @option --code! The code to execute. 6 | 7 | # @meta require-tools usql 8 | 9 | # @env USQL_DSN! The database connection url. e.g. pgsql://user:pass@host:port 10 | # @env LLM_OUTPUT=/dev/stdout The output path 11 | 12 | ROOT_DIR="${LLM_ROOT_DIR:-$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)}" 13 | 14 | main() { 15 | if ! grep -qi '^select' <<<"$argc_code"; then 16 | "$ROOT_DIR/utils/guard_operation.sh" 17 | fi 18 | usql -c "$argc_code" "$USQL_DSN" >> "$LLM_OUTPUT" 19 | } 20 | 21 | eval "$(argc --argc-eval "$0" "$@")" 22 | -------------------------------------------------------------------------------- /tools/fetch_url_via_curl.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | # @describe Extract the content from a given URL. 5 | # @option --url! The URL to scrape. 6 | 7 | # @meta require-tools pandoc 8 | 9 | # @env LLM_OUTPUT=/dev/stdout The output path 10 | 11 | main() { 12 | # span and div tags are dropped from the HTML https://pandoc.org/MANUAL.html#raw-htmltex and sed removes any inline SVG images in image tags from the Markdown content. 13 | curl -fsSL "$argc_url" | \ 14 | pandoc -f html-native_divs-native_spans -t gfm-raw_html --wrap=none | \ 15 | sed -E 's/!\[[^]]*\]\([^)]*\)//g' \ 16 | >> "$LLM_OUTPUT" 17 | } 18 | 19 | eval "$(argc --argc-eval "$0" "$@")" 20 | -------------------------------------------------------------------------------- /tools/fetch_url_via_jina.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | # @describe Extract the content from a given URL. 5 | # @option --url! The URL to scrape. 6 | 7 | # @env JINA_API_KEY The api key 8 | # @env LLM_OUTPUT=/dev/stdout The output path 9 | 10 | main() { 11 | curl_args=() 12 | if [[ -n "$JINA_API_KEY" ]]; then 13 | curl_args+=("-H" "Authorization: Bearer $JINA_API_KEY") 14 | fi 15 | curl -fsSL "${curl_args[@]}" "https://r.jina.ai/$argc_url" >> "$LLM_OUTPUT" 16 | } 17 | 18 | eval "$(argc --argc-eval "$0" "$@")" 19 | -------------------------------------------------------------------------------- /tools/fs_cat.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | # @describe Read the contents of a file at the specified path. 5 | # Use this when you need to examine the contents of an existing file. 6 | 7 | # @option --path! The path of the file to read 8 | 9 | # @env LLM_OUTPUT=/dev/stdout The output path 10 | 11 | main() { 12 | cat "$argc_path" >> "$LLM_OUTPUT" 13 | } 14 | 15 | eval "$(argc --argc-eval "$0" "$@")" 16 | -------------------------------------------------------------------------------- /tools/fs_ls.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | # @describe List all files and directories at the specified path. 5 | 6 | # @option --path! The path of the directory to list 7 | 8 | # @env LLM_OUTPUT=/dev/stdout The output path 9 | 10 | main() { 11 | ls -1 "$argc_path" >> "$LLM_OUTPUT" 12 | } 13 | 14 | eval "$(argc --argc-eval "$0" "$@")" 15 | -------------------------------------------------------------------------------- /tools/fs_mkdir.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | # @describe Create a new directory at the specified path. 5 | 6 | # @option --path! The path of the directory to create 7 | 8 | # @env LLM_OUTPUT=/dev/stdout The output path 9 | 10 | main() { 11 | mkdir -p "$argc_path" 12 | echo "Directory created: $argc_path" >> "$LLM_OUTPUT" 13 | } 14 | 15 | eval "$(argc --argc-eval "$0" "$@")" 16 | -------------------------------------------------------------------------------- /tools/fs_patch.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | # @describe Apply a patch to a file at the specified path. 5 | # This can be used to edit the file, without having to rewrite the whole file. 6 | 7 | # @option --path! The path of the file to apply to 8 | # @option --contents! The patch to apply to the file 9 | # 10 | # Here is an example of a patch block that can be applied to modify the file to request the user's name: 11 | # --- a/hello.py 12 | # +++ b/hello.py 13 | # \@@ ... @@ 14 | # def hello(): 15 | # - print("Hello World") 16 | # + name = input("What is your name? ") 17 | # + print(f"Hello {name}") 18 | 19 | # @env LLM_OUTPUT=/dev/stdout The output path 20 | 21 | ROOT_DIR="${LLM_ROOT_DIR:-$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)}" 22 | 23 | main() { 24 | if [ ! -f "$argc_path" ]; then 25 | echo "Not found file: $argc_path" 26 | exit 1 27 | fi 28 | new_contents="$(awk -f "$ROOT_DIR/utils/patch.awk" "$argc_path" <(printf "%s" "$argc_contents"))" 29 | printf "%s" "$new_contents" | git diff --no-index "$argc_path" - || true 30 | "$ROOT_DIR/utils/guard_operation.sh" "Apply changes?" 31 | printf "%s" "$new_contents" > "$argc_path" 32 | 33 | echo "The patch applied to: $argc_path" >> "$LLM_OUTPUT" 34 | } 35 | 36 | eval "$(argc --argc-eval "$0" "$@")" 37 | -------------------------------------------------------------------------------- /tools/fs_rm.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | # @describe Remove the file or directory at the specified path. 5 | 6 | # @option --path! The path of the file or directory to remove 7 | 8 | # @env LLM_OUTPUT=/dev/stdout The output path 9 | 10 | ROOT_DIR="${LLM_ROOT_DIR:-$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)}" 11 | 12 | main() { 13 | if [[ -f "$argc_path" ]]; then 14 | "$ROOT_DIR/utils/guard_path.sh" "$argc_path" "Remove '$argc_path'?" 15 | rm -rf "$argc_path" 16 | fi 17 | echo "Path removed: $argc_path" >> "$LLM_OUTPUT" 18 | } 19 | 20 | eval "$(argc --argc-eval "$0" "$@")" 21 | -------------------------------------------------------------------------------- /tools/fs_write.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | # @describe Write the full file contents to a file at the specified path. 5 | 6 | # @option --path! The path of the file to write to 7 | # @option --contents! The full contents to write to the file 8 | 9 | # @env LLM_OUTPUT=/dev/stdout The output path 10 | 11 | ROOT_DIR="${LLM_ROOT_DIR:-$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)}" 12 | 13 | main() { 14 | if [[ -f "$argc_path" ]]; then 15 | printf "%s" "$argc_contents" | git diff --no-index "$argc_path" - || true 16 | "$ROOT_DIR/utils/guard_operation.sh" "Apply changes?" 17 | else 18 | "$ROOT_DIR/utils/guard_path.sh" "$argc_path" "Write '$argc_path'?" 19 | mkdir -p "$(dirname "$argc_path")" 20 | fi 21 | printf "%s" "$argc_contents" > "$argc_path" 22 | echo "The contents written to: $argc_path" >> "$LLM_OUTPUT" 23 | } 24 | 25 | eval "$(argc --argc-eval "$0" "$@")" 26 | -------------------------------------------------------------------------------- /tools/get_current_time.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | # @describe Get the current time. 5 | 6 | # @env LLM_OUTPUT=/dev/stdout The output path 7 | 8 | main() { 9 | date >> "$LLM_OUTPUT" 10 | } 11 | 12 | eval "$(argc --argc-eval "$0" "$@")" 13 | -------------------------------------------------------------------------------- /tools/get_current_weather.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | # @describe Get the current weather in a given location. 5 | # @option --location! The city and optionally the state or country, e.g., "London", "San Francisco, CA". 6 | 7 | # @env LLM_OUTPUT=/dev/stdout The output path 8 | 9 | main() { 10 | curl -fsSL "https://wttr.in/$(echo "$argc_location" | sed 's/ /+/g')?format=4&M" \ 11 | >> "$LLM_OUTPUT" 12 | } 13 | 14 | eval "$(argc --argc-eval "$0" "$@")" 15 | -------------------------------------------------------------------------------- /tools/search_arxiv.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | # @describe Search arXiv for a query and return the top papers. 5 | 6 | # @option --query! The query to search for. 7 | 8 | # @env ARXIV_MAX_RESULTS=3 The max results to return. 9 | # @env LLM_OUTPUT=/dev/stdout The output path 10 | 11 | main() { 12 | encoded_query="$(jq -nr --arg q "$argc_query" '$q|@uri')" 13 | url="http://export.arxiv.org/api/query?search_query=all:$encoded_query&max_results=$ARXIV_MAX_RESULTS" 14 | curl -fsSL "$url" >> "$LLM_OUTPUT" 15 | } 16 | 17 | eval "$(argc --argc-eval "$0" "$@")" 18 | -------------------------------------------------------------------------------- /tools/search_wikipedia.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | # @describe Search Wikipedia for a query. 5 | # Uses it to get detailed information about a public figure, interpretation of a complex scientific concept or in-depth connectivity of a significant historical event,. 6 | 7 | # @option --query! The query to search for. 8 | 9 | # @env LLM_OUTPUT=/dev/stdout The output path 10 | 11 | main() { 12 | encoded_query="$(jq -nr --arg q "$argc_query" '$q|@uri')" 13 | base_url="https://en.wikipedia.org/w/api.php" 14 | url="$base_url?action=query&list=search&srprop=&srlimit=1&limit=1&srsearch=$encoded_query&srinfo=suggestion&format=json" 15 | json="$(curl -fsSL "$url")" 16 | suggestion="$(echo "$json" | jq -r '.query.searchinfo.suggestion // empty')" 17 | title="$(echo "$json" | jq -r '.query.search[0].title // empty')" 18 | pageid="$(echo "$json" | jq -r '.query.search[0].pageid // empty')" 19 | if [[ -z "$title" || -z "$pageid" ]]; then 20 | echo "error: no results for '$argc_query'" >&2 21 | exit 1 22 | fi 23 | title="$(echo "$title" | tr ' ' '_')" 24 | url="$base_url?action=query&prop=extracts&explaintext=&titles=$title&exintro=&format=json" 25 | curl -fsSL "$url" | jq -r '.query.pages["'"$pageid"'"].extract' >> "$LLM_OUTPUT" 26 | } 27 | 28 | eval "$(argc --argc-eval "$0" "$@")" 29 | -------------------------------------------------------------------------------- /tools/search_wolframalpha.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | # @describe Get an answer to a question using Wolfram Alpha. Input should the query in English. 5 | # Use it to answer user questions that require computation, detailed facts, data analysis, or complex queries. 6 | 7 | # @option --query! The query to search for. 8 | 9 | # @env WOLFRAM_API_ID! The api id 10 | # @env LLM_OUTPUT=/dev/stdout The output path 11 | 12 | main() { 13 | encoded_query="$(jq -nr --arg q "$argc_query" '$q|@uri')" 14 | url="https://api.wolframalpha.com/v2/query?appid=$WOLFRAM_API_ID&input=$encoded_query&output=json&format=plaintext" 15 | curl -fsSL "$url" | jq '[.queryresult | .pods[] | {title:.title, values:[.subpods[].plaintext | select(. != "")]}]' \ 16 | >> "$LLM_OUTPUT" 17 | } 18 | 19 | eval "$(argc --argc-eval "$0" "$@")" 20 | -------------------------------------------------------------------------------- /tools/send_mail.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | # @describe Send a email. 5 | # @option --recipient! The recipient of the email. 6 | # @option --subject! The subject of the email. 7 | # @option --body! The body of the email. 8 | 9 | # @env EMAIL_SMTP_ADDR! The SMTP Address, e.g. smtps://smtp.gmail.com:465 10 | # @env EMAIL_SMTP_USER! The SMTP User, e.g. alice@gmail.com 11 | # @env EMAIL_SMTP_PASS! The SMTP Password 12 | # @env EMAIL_SENDER_NAME The sender name 13 | # @env LLM_OUTPUT=/dev/stdout The output path 14 | 15 | main() { 16 | sender_name="${EMAIL_SENDER_NAME:-$(echo "$EMAIL_SMTP_USER" | awk -F'@' '{print $1}')}" 17 | printf "%s\n" "From: $sender_name <$EMAIL_SMTP_USER> 18 | To: $argc_recipient 19 | Subject: $argc_subject 20 | 21 | $argc_body" | \ 22 | curl -fsS --ssl-reqd \ 23 | --url "$EMAIL_SMTP_ADDR" \ 24 | --user "$EMAIL_SMTP_USER:$EMAIL_SMTP_PASS" \ 25 | --mail-from "$EMAIL_SMTP_USER" \ 26 | --mail-rcpt "$argc_recipient" \ 27 | --upload-file - 28 | echo "Email sent successfully" >> "$LLM_OUTPUT" 29 | } 30 | 31 | eval "$(argc --argc-eval "$0" "$@")" 32 | -------------------------------------------------------------------------------- /tools/send_twilio.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | # @describe Send SMS or Twilio Messaging Channels messages using Twilio API. 5 | # @option --to-number! The recipient's phone number. Prefix with 'whatsapp:' for WhatsApp messages, e.g. whatsapp:+1234567890 6 | # @option --message! The content of the message to be sent 7 | 8 | # @env TWILIO_ACCOUNT_SID! The twilio account sid 9 | # @env TWILIO_AUTH_TOKEN! The twilio auth token 10 | # @env TWILIO_FROM_NUMBER! The twilio from number 11 | # @env LLM_OUTPUT=/dev/stdout The output path 12 | 13 | main() { 14 | from_number="$TWILIO_FROM_NUMBER" 15 | to_number="$argc_to_number" 16 | if [[ "$to_number" == 'whatsapp:'* ]]; then 17 | from_number="whatsapp:$from_number" 18 | fi 19 | if [[ "$to_number" != 'whatsapp:'* && "$to_number" != '+'* ]]; then 20 | to_number="+$to_number" 21 | fi 22 | res="$(curl -s -X POST "https://api.twilio.com/2010-04-01/Accounts/$TWILIO_ACCOUNT_SID/Messages.json" \ 23 | -u "$TWILIO_ACCOUNT_SID:$TWILIO_AUTH_TOKEN" \ 24 | -w "\n%{http_code}" \ 25 | --data-urlencode "From=$from_number" \ 26 | --data-urlencode "To=$to_number" \ 27 | --data-urlencode "Body=$argc_message")" 28 | status="$(echo "$res" | tail -n 1)" 29 | body="$(echo "$res" | head -n -1)" 30 | if [[ "$status" -ge 200 && "$status" -lt 300 ]]; then 31 | if [[ "$(echo "$body" | jq -r 'has("sid")')" == "true" ]]; then 32 | echo "Message sent successfully" >> "$LLM_OUTPUT" 33 | else 34 | _die "error: $body" 35 | fi 36 | else 37 | _die "error: $body" 38 | fi 39 | } 40 | 41 | _die() { 42 | echo "$*" >&2 43 | exit 1 44 | } 45 | 46 | eval "$(argc --argc-eval "$0" "$@")" 47 | -------------------------------------------------------------------------------- /tools/web_search_aichat.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | # @describe Perform a web search to get up-to-date information or additional context. 5 | # Use this when you need current information or feel a search could provide a better answer. 6 | 7 | # @option --query! The query to search for. 8 | 9 | # @meta require-tools aichat 10 | 11 | # @env WEB_SEARCH_MODEL! The model for web-searching. 12 | # 13 | # supported aichat models: 14 | # - gemini:gemini-2.0-* 15 | # - vertexai:gemini-* 16 | # - perplexity:* 17 | # - ernie:* 18 | # @env LLM_OUTPUT=/dev/stdout The output path 19 | 20 | main() { 21 | client="${WEB_SEARCH_MODEL%%:*}" 22 | if [[ "$client" == "gemini" ]]; then 23 | export AICHAT_PATCH_GEMINI_CHAT_COMPLETIONS='{".*":{"body":{"tools":[{"google_search":{}}]}}}' 24 | elif [[ "$client" == "vertexai" ]]; then 25 | export AICHAT_PATCH_VERTEXAI_CHAT_COMPLETIONS='{ 26 | "gemini-1.5-.*":{"body":{"tools":[{"googleSearchRetrieval":{}}]}}, 27 | "gemini-2.0-.*":{"body":{"tools":[{"google_search":{}}]}} 28 | }' 29 | elif [[ "$client" == "ernie" ]]; then 30 | export AICHAT_PATCH_ERNIE_CHAT_COMPLETIONS='{".*":{"body":{"web_search":{"enable":true}}}}' 31 | fi 32 | aichat -m "$WEB_SEARCH_MODEL" "$argc_query" >> "$LLM_OUTPUT" 33 | } 34 | 35 | eval "$(argc --argc-eval "$0" "$@")" 36 | -------------------------------------------------------------------------------- /tools/web_search_perplexity.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | # @describe Perform a web search using Perplexity API to get up-to-date information or additional context. 5 | # Use this when you need current information or feel a search could provide a better answer. 6 | 7 | # @option --query! The query to search for. 8 | 9 | # @env PERPLEXITY_API_KEY! The api key 10 | # @env PERPLEXITY_WEB_SEARCH_MODEL=llama-3.1-sonar-small-128k-online The LLM model for web search 11 | # @env LLM_OUTPUT=/dev/stdout The output path 12 | 13 | main() { 14 | curl -fsS -X POST https://api.perplexity.ai/chat/completions \ 15 | -H "authorization: Bearer $PERPLEXITY_API_KEY" \ 16 | -H "accept: application/json" \ 17 | -H "content-type: application/json" \ 18 | --data ' 19 | { 20 | "model": "'"$PERPLEXITY_WEB_SEARCH_MODEL"'", 21 | "messages": [ 22 | { 23 | "role": "user", 24 | "content": "'"$argc_query"'" 25 | } 26 | ] 27 | } 28 | ' | \ 29 | jq -r '.choices[0].message.content' \ 30 | >> "$LLM_OUTPUT" 31 | } 32 | 33 | eval "$(argc --argc-eval "$0" "$@")" 34 | -------------------------------------------------------------------------------- /tools/web_search_tavily.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | # @describe Perform a web search using Tavily API to get up-to-date information or additional context. 5 | # Use this when you need current information or feel a search could provide a better answer. 6 | 7 | # @option --query! The query to search for. 8 | 9 | # @env TAVILY_API_KEY! The api key 10 | # @env LLM_OUTPUT=/dev/stdout The output path The output path 11 | 12 | main() { 13 | curl -fsSL -X POST https://api.tavily.com/search \ 14 | -H "content-type: application/json" \ 15 | -d ' 16 | { 17 | "api_key": "'"$TAVILY_API_KEY"'", 18 | "query": "'"$argc_query"'", 19 | "include_answer": true 20 | }' | \ 21 | jq -r '.answer' >> "$LLM_OUTPUT" 22 | } 23 | 24 | eval "$(argc --argc-eval "$0" "$@")" 25 | -------------------------------------------------------------------------------- /utils/guard_operation.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Guard an operation with a confirmation prompt. 4 | 5 | main() { 6 | if [ -t 1 ]; then 7 | confirmation_prompt="${1:-"Are you sure you want to continue?"}" 8 | read -r -p "$confirmation_prompt [Y/n] " ans 9 | if [[ "$ans" == "N" || "$ans" == "n" ]]; then 10 | echo "error: aborted!" 2>&1 11 | exit 1 12 | fi 13 | fi 14 | } 15 | 16 | main "$@" 17 | -------------------------------------------------------------------------------- /utils/guard_path.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | main() { 4 | if [[ "$#" -ne 2 ]]; then 5 | echo "Usage: guard_path.sh " >&2 6 | exit 1 7 | fi 8 | if [ -t 1 ]; then 9 | path="$(_to_realpath "$1")" 10 | confirmation_prompt="$2" 11 | if [[ ! "$path" == "$(pwd)"* ]]; then 12 | read -r -p "$confirmation_prompt [Y/n] " ans 13 | if [[ "$ans" == "N" || "$ans" == "n" ]]; then 14 | echo "error: aborted!" >&2 15 | exit 1 16 | fi 17 | fi 18 | fi 19 | } 20 | 21 | _to_realpath() { 22 | path="$1" 23 | if [[ $OS == "Windows_NT" ]]; then 24 | path="$(cygpath -u "$path")" 25 | fi 26 | awk -v path="$path" -v pwd="$PWD" ' 27 | BEGIN { 28 | if (path !~ /^\//) { 29 | path = pwd "/" path 30 | } 31 | if (path ~ /\/\.{1,2}?$/) { 32 | isDir = 1 33 | } 34 | split(path, parts, "/") 35 | newPartsLength = 0 36 | for (i = 1; i <= length(parts); i++) { 37 | part = parts[i] 38 | if (part == "..") { 39 | if (newPartsLength > 0) { 40 | delete newParts[newPartsLength--] 41 | } 42 | } else if (part != "." && part != "") { 43 | newParts[++newPartsLength] = part 44 | } 45 | } 46 | if (isDir == 1 || newPartsLength == 0) { 47 | newParts[++newPartsLength] = "" 48 | } 49 | printf "/" 50 | for (i = 1; i <= newPartsLength; i++) { 51 | newPart = newParts[i] 52 | printf newPart 53 | if (i < newPartsLength) { 54 | printf "/" 55 | } 56 | } 57 | }' 58 | } 59 | 60 | main "$@" 61 | -------------------------------------------------------------------------------- /utils/patch.awk: -------------------------------------------------------------------------------- 1 | #!/usr/bin/awk -f 2 | 3 | # Apply a diff file to an original 4 | # Usage: awk -f patch.awk target-file patch-file 5 | 6 | FNR == NR { 7 | lines[FNR] = $0 8 | next; 9 | } 10 | 11 | { 12 | patchLines[FNR] = $0 13 | } 14 | 15 | END { 16 | totalPatchLines=length(patchLines) 17 | totalLines = length(lines) 18 | patchLineIndex = 1 19 | 20 | mode = "none" 21 | 22 | while (patchLineIndex <= totalPatchLines) { 23 | line = patchLines[patchLineIndex] 24 | 25 | if (line ~ /^--- / || line ~ /^\+\+\+ /) { 26 | patchLineIndex++ 27 | continue 28 | } 29 | 30 | if (line ~ /^@@ /) { 31 | mode = "hunk" 32 | hunkIndex++ 33 | patchLineIndex++ 34 | continue 35 | } 36 | 37 | if (mode == "hunk") { 38 | while (patchLineIndex <= totalPatchLines && line ~ /^[-+ ]|^\s*$/ && line !~ /^--- /) { 39 | sanitizedLine = substr(line, 2) 40 | if (line !~ /^\+/) { 41 | hunkTotalOriginalLines[hunkIndex]++; 42 | hunkOriginalLines[hunkIndex,hunkTotalOriginalLines[hunkIndex]] = sanitizedLine 43 | } 44 | if (line !~ /^-/) { 45 | hunkTotalUpdatedLines[hunkIndex]++; 46 | hunkUpdatedLines[hunkIndex,hunkTotalUpdatedLines[hunkIndex]] = sanitizedLine 47 | } 48 | patchLineIndex++ 49 | line = patchLines[patchLineIndex] 50 | } 51 | mode = "none" 52 | } else { 53 | patchLineIndex++ 54 | } 55 | } 56 | 57 | if (hunkIndex == 0) { 58 | print "error: no patch" > "/dev/stderr" 59 | exit 1 60 | } 61 | 62 | totalHunks = hunkIndex 63 | hunkIndex = 1 64 | 65 | # inspectHunks() 66 | 67 | for (lineIndex = 1; lineIndex <= totalLines; lineIndex++) { 68 | line = lines[lineIndex] 69 | nextLineIndex = 0 70 | 71 | if (hunkIndex <= totalHunks && line == hunkOriginalLines[hunkIndex,1]) { 72 | nextLineIndex = lineIndex + 1 73 | for (i = 2; i <= hunkTotalOriginalLines[hunkIndex]; i++) { 74 | if (lines[nextLineIndex] != hunkOriginalLines[hunkIndex,i]) { 75 | nextLineIndex = 0 76 | break 77 | } 78 | nextLineIndex++ 79 | } 80 | } 81 | if (nextLineIndex > 0) { 82 | for (i = 1; i <= hunkTotalUpdatedLines[hunkIndex]; i++) { 83 | print hunkUpdatedLines[hunkIndex,i] 84 | } 85 | hunkIndex++ 86 | lineIndex = nextLineIndex - 1; 87 | } else { 88 | print line 89 | } 90 | } 91 | 92 | if (hunkIndex != totalHunks + 1) { 93 | print "error: unable to apply patch" > "/dev/stderr" 94 | exit 1 95 | } 96 | } 97 | 98 | function inspectHunks() { 99 | print "/* Begin inspecting hunks" 100 | for (i = 1; i <= totalHunks; i++) { 101 | print ">>>>>> Original" 102 | for (j = 1; j <= hunkTotalOriginalLines[i]; j++) { 103 | print hunkOriginalLines[i,j] 104 | } 105 | print "======" 106 | for (j = 1; j <= hunkTotalUpdatedLines[i]; j++) { 107 | print hunkUpdatedLines[i,j] 108 | } 109 | print "<<<<<< Updated" 110 | } 111 | print "End inspecting hunks */\n" 112 | } --------------------------------------------------------------------------------