├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ ├── config.yml │ └── feature_request.md ├── PULL_REQUEST_TEMPLATE.md └── workflows │ ├── docs.yml │ ├── python-package.yml │ └── ui.yml ├── .gitignore ├── .gitmodules ├── .pre-commit-config.yaml ├── .style.yapf ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.rst ├── LICENSE ├── MANIFEST.in ├── README.md ├── __init__.py ├── burr ├── __init__.py ├── cli │ ├── __init__.py │ ├── __main__.py │ └── demo_data.py ├── common │ ├── __init__.py │ ├── async_utils.py │ └── types.py ├── core │ ├── __init__.py │ ├── action.py │ ├── application.py │ ├── graph.py │ ├── implementations.py │ ├── parallelism.py │ ├── persistence.py │ ├── serde.py │ ├── state.py │ ├── typing.py │ └── validation.py ├── examples ├── integrations │ ├── __init__.py │ ├── base.py │ ├── hamilton.py │ ├── haystack.py │ ├── notebook.py │ ├── opentelemetry.py │ ├── persisters │ │ ├── __init__.py │ │ ├── b_aiosqlite.py │ │ ├── b_asyncpg.py │ │ ├── b_mongodb.py │ │ ├── b_psycopg2.py │ │ ├── b_pymongo.py │ │ ├── b_redis.py │ │ └── postgresql.py │ ├── pydantic.py │ ├── ray.py │ ├── serde │ │ ├── __init__.py │ │ ├── langchain.py │ │ ├── pandas.py │ │ ├── pickle.py │ │ └── pydantic.py │ └── streamlit.py ├── lifecycle │ ├── __init__.py │ ├── base.py │ ├── default.py │ └── internal.py ├── log_setup.py ├── py.typed ├── system.py ├── telemetry.py ├── testing │ └── __init__.py ├── tracking │ ├── __init__.py │ ├── base.py │ ├── client.py │ ├── common │ │ ├── __init__.py │ │ └── models.py │ ├── s3client.py │ ├── server │ │ ├── backend.py │ │ ├── demo_data │ │ │ ├── demo_chatbot │ │ │ │ ├── chat-1-giraffe │ │ │ │ │ ├── graph.json │ │ │ │ │ ├── log.jsonl │ │ │ │ │ └── metadata.json │ │ │ │ ├── chat-2-geography │ │ │ │ │ ├── graph.json │ │ │ │ │ ├── log.jsonl │ │ │ │ │ └── metadata.json │ │ │ │ ├── chat-3-physics │ │ │ │ │ ├── graph.json │ │ │ │ │ ├── log.jsonl │ │ │ │ │ └── metadata.json │ │ │ │ ├── chat-4-philosophy │ │ │ │ │ ├── graph.json │ │ │ │ │ ├── log.jsonl │ │ │ │ │ └── metadata.json │ │ │ │ ├── chat-5-jokes │ │ │ │ │ ├── graph.json │ │ │ │ │ ├── log.jsonl │ │ │ │ │ └── metadata.json │ │ │ │ └── chat-6-demonstrate-errors │ │ │ │ │ ├── graph.json │ │ │ │ │ ├── log.jsonl │ │ │ │ │ └── metadata.json │ │ │ ├── demo_chatbot_with_traces │ │ │ │ ├── chat-1-giraffe │ │ │ │ │ ├── graph.json │ │ │ │ │ ├── log.jsonl │ │ │ │ │ └── metadata.json │ │ │ │ ├── chat-2-geography │ │ │ │ │ ├── graph.json │ │ │ │ │ ├── log.jsonl │ │ │ │ │ └── metadata.json │ │ │ │ ├── chat-3-physics │ │ │ │ │ ├── graph.json │ │ │ │ │ ├── log.jsonl │ │ │ │ │ └── metadata.json │ │ │ │ ├── chat-4-philosophy │ │ │ │ │ ├── graph.json │ │ │ │ │ ├── log.jsonl │ │ │ │ │ └── metadata.json │ │ │ │ ├── chat-5-jokes │ │ │ │ │ ├── graph.json │ │ │ │ │ ├── log.jsonl │ │ │ │ │ └── metadata.json │ │ │ │ └── chat-6-demonstrate-errors │ │ │ │ │ ├── graph.json │ │ │ │ │ ├── log.jsonl │ │ │ │ │ └── metadata.json │ │ │ ├── demo_conversational-rag │ │ │ │ ├── rag-1-food │ │ │ │ │ ├── graph.json │ │ │ │ │ ├── log.jsonl │ │ │ │ │ └── metadata.json │ │ │ │ ├── rag-2-work-history │ │ │ │ │ ├── graph.json │ │ │ │ │ ├── log.jsonl │ │ │ │ │ └── metadata.json │ │ │ │ ├── rag-3-activities │ │ │ │ │ ├── graph.json │ │ │ │ │ ├── log.jsonl │ │ │ │ │ └── metadata.json │ │ │ │ └── rag-4-everything │ │ │ │ │ ├── graph.json │ │ │ │ │ ├── log.jsonl │ │ │ │ │ └── metadata.json │ │ │ └── demo_counter │ │ │ │ ├── count-to-1 │ │ │ │ ├── graph.json │ │ │ │ ├── log.jsonl │ │ │ │ └── metadata.json │ │ │ │ ├── count-to-10 │ │ │ │ ├── graph.json │ │ │ │ ├── log.jsonl │ │ │ │ └── metadata.json │ │ │ │ ├── count-to-100 │ │ │ │ ├── graph.json │ │ │ │ ├── log.jsonl │ │ │ │ └── metadata.json │ │ │ │ ├── count-to-42 │ │ │ │ ├── graph.json │ │ │ │ ├── log.jsonl │ │ │ │ └── metadata.json │ │ │ │ └── count-to-50 │ │ │ │ ├── graph.json │ │ │ │ ├── log.jsonl │ │ │ │ └── metadata.json │ │ ├── requirements-s3.txt │ │ ├── run.py │ │ ├── run.sh │ │ ├── s3 │ │ │ ├── README.md │ │ │ ├── __init__.py │ │ │ ├── architecture.png │ │ │ ├── backend.py │ │ │ ├── deployment │ │ │ │ ├── Dockerfile │ │ │ │ ├── nginx.conf │ │ │ │ └── terraform │ │ │ │ │ ├── .gitignore │ │ │ │ │ ├── .terraform.lock.hcl │ │ │ │ │ ├── alb.tf │ │ │ │ │ ├── auto_scaling.tf │ │ │ │ │ ├── ecs.tf │ │ │ │ │ ├── iam.tf │ │ │ │ │ ├── logs.tf │ │ │ │ │ ├── network.tf │ │ │ │ │ ├── outputs.tf │ │ │ │ │ ├── provider.tf │ │ │ │ │ ├── security.tf │ │ │ │ │ ├── templates │ │ │ │ │ └── ecs │ │ │ │ │ │ └── burr_app.json.tpl │ │ │ │ │ └── variable.tf │ │ │ ├── initialize_db.py │ │ │ ├── models.py │ │ │ ├── pyproject.toml │ │ │ ├── settings.py │ │ │ └── utils.py │ │ └── schema.py │ └── utils.py ├── version.py └── visibility │ ├── __init__.py │ └── tracing.py ├── burr_logo.png ├── burr_logo.svg ├── chatbot.gif ├── docs ├── Makefile ├── README-internal.md ├── _static │ ├── burr_sdlc.png │ ├── chatbot.png │ ├── custom.css │ ├── demo_graph.png │ ├── meme.png │ ├── parallelism.png │ ├── recursive_steps.png │ └── testimonials.css ├── concepts │ ├── actions.rst │ ├── additional-visibility.rst │ ├── hooks.rst │ ├── index.rst │ ├── overview.rst │ ├── parallelism.rst │ ├── planned-capabilities.rst │ ├── recursion.rst │ ├── sdlc.rst │ ├── serde.rst │ ├── state-machine.rst │ ├── state-persistence.rst │ ├── state-typing.rst │ ├── state.rst │ ├── streaming-actions.rst │ ├── sync-vs-async.rst │ ├── tracking.rst │ └── transitions.rst ├── conf.py ├── contributing │ ├── architecture.rst │ ├── contributing.rst │ ├── index.rst │ ├── iterating.rst │ └── setup.rst ├── examples │ ├── agents │ │ ├── _agent_patterns │ │ │ ├── agent_supervisor.png │ │ │ ├── hierarchical_agent_teams.png │ │ │ ├── multi_agent_collaboration.png │ │ │ └── multi_modal_agent.png │ │ ├── _divide-and-conquer.png │ │ ├── agent-patterns.md │ │ ├── divide-and-conquer.md │ │ └── index.rst │ ├── chatbots │ │ ├── basic-chatbot.ipynb │ │ ├── gpt-like-chatbot.ipynb │ │ ├── index.rst │ │ └── rag-chatbot-hamilton.ipynb │ ├── data-science │ │ ├── _ml_training.png │ │ ├── _simulation.png │ │ ├── index.rst │ │ ├── ml_training.md │ │ └── simulation.md │ ├── deployment │ │ ├── index.rst │ │ ├── infrastructure.rst │ │ ├── monitoring.rst │ │ └── web-server.rst │ ├── guardrails │ │ ├── _creating_tests.png │ │ ├── creating_tests.rst │ │ └── index.rst │ ├── index.rst │ └── simple │ │ ├── choose-your-own-adventure.ipynb │ │ ├── counter.ipynb │ │ ├── cowsay.ipynb │ │ └── index.rst ├── getting_started │ ├── index.rst │ ├── install.rst │ ├── simple-example.rst │ ├── up-next.rst │ └── why-burr.rst ├── index.rst ├── main.rst ├── make.bat ├── make_testimonials.py ├── reference │ ├── actions.rst │ ├── application.rst │ ├── conditions.rst │ ├── index.rst │ ├── integrations │ │ ├── hamilton.rst │ │ ├── haystack.rst │ │ ├── index.rst │ │ ├── langchain.rst │ │ ├── opentelemetry.rst │ │ ├── pydantic.rst │ │ ├── ray.rst │ │ ├── streamlit.rst │ │ └── traceloop.rst │ ├── lifecycle.rst │ ├── parallelism.rst │ ├── persister.rst │ ├── serde.rst │ ├── state.rst │ ├── telemetry.rst │ ├── tracking.rst │ ├── typing.rst │ └── visibility.rst └── robots.txt ├── examples ├── README.md ├── __init__.py ├── adaptive-crag │ ├── README.md │ ├── __init__.py │ ├── application.py │ ├── burr_docs │ │ ├── actions.txt │ │ ├── applications.txt │ │ ├── cheat_sheet.txt │ │ ├── state.txt │ │ └── transitions.txt │ ├── notebook.ipynb │ ├── requirements.txt │ └── statemachine.png ├── conversational-rag │ ├── README.md │ ├── __init__.py │ ├── graph_db_example │ │ ├── README.md │ │ ├── UFC_Graph.png │ │ ├── application.py │ │ ├── data │ │ │ ├── raw_fighter_details.csv │ │ │ └── raw_total_fight_data.csv │ │ ├── graph_schema.py │ │ ├── hamilton_ingest.py │ │ ├── ingest_fighters.png │ │ ├── ingest_fighters.py │ │ ├── ingest_fights.png │ │ ├── ingest_fights.py │ │ ├── ingest_notebook.ipynb │ │ ├── notebook.ipynb │ │ ├── requirements.txt │ │ ├── statemachine.png │ │ └── utils.py │ └── simple_example │ │ ├── README.md │ │ ├── __init__.py │ │ ├── application.py │ │ ├── notebook.ipynb │ │ ├── requirements.txt │ │ └── statemachine.png ├── custom-serde │ ├── README.md │ ├── __init__.py │ ├── application.py │ ├── notebook.ipynb │ ├── requirements.txt │ ├── run.py │ └── statemachine.png ├── deep-researcher │ ├── README.md │ ├── __init__.py │ ├── application.py │ ├── notebook.ipynb │ ├── prompts.py │ ├── requirements.txt │ ├── server.py │ ├── statemachine.png │ └── utils.py ├── deployment │ └── aws │ │ └── lambda │ │ ├── Dockerfile │ │ ├── README.md │ │ ├── app │ │ ├── __init__.py │ │ ├── counter_app.py │ │ └── lambda_handler.py │ │ └── requirements.txt ├── email-assistant │ ├── Dockerfile │ ├── README.md │ ├── __init__.py │ ├── application.py │ ├── docker-compose.yaml │ ├── nginx.conf │ ├── notebook.ipynb │ ├── requirements.txt │ ├── server.py │ ├── statemachine.png │ └── wrapper.sh ├── hamilton-integration │ ├── README.md │ ├── __init__.py │ ├── actions │ │ ├── __init__.py │ │ ├── ask_question.py │ │ └── ingest_blog.py │ ├── application.py │ ├── burr_ui_app_v2.png │ ├── burr_ui_app_v3.png │ ├── hamilton_ui.png │ ├── notebook.ipynb │ ├── requirements.txt │ └── statemachine.png ├── haystack-integration │ ├── README.md │ ├── __init__.py │ ├── application.py │ ├── notebook.ipynb │ └── statemachine.png ├── hello-world-counter │ ├── README.md │ ├── __init__.py │ ├── application.py │ ├── application_classbased.py │ ├── notebook.ipynb │ ├── requirements.txt │ ├── statemachine.png │ └── streamlit_app.py ├── image-telephone │ ├── README.md │ ├── __init__.py │ ├── application.py │ ├── notebook.ipynb │ ├── requirements.txt │ └── statemachine.png ├── instructor-gemini-flash │ ├── README.md │ ├── __init__.py │ ├── application.py │ ├── notebook.ipynb │ ├── requirements.txt │ └── statemachine.png ├── integrations │ └── hamilton │ │ ├── README.md │ │ ├── image-telephone │ │ ├── README.md │ │ ├── application.py │ │ └── requirements.txt │ │ └── statemachine.png ├── llm-adventure-game │ ├── README.md │ ├── __init__.py │ ├── application.py │ ├── notebook.ipynb │ ├── requirements.txt │ └── statemachine.png ├── ml-training │ ├── README.md │ ├── application.py │ └── statemachine.png ├── multi-agent-collaboration │ ├── README.md │ ├── __init__.py │ ├── hamilton │ │ ├── README.md │ │ ├── __init__.py │ │ ├── alternative_implementation.py │ │ ├── application.py │ │ ├── func_agent.py │ │ ├── notebook.ipynb │ │ ├── requirements.txt │ │ └── statemachine.png │ ├── lcel │ │ ├── README.md │ │ ├── __init__.py │ │ ├── application.py │ │ ├── notebook.ipynb │ │ ├── requirements.txt │ │ └── statemachine.png │ └── requirements.txt ├── multi-modal-chatbot │ ├── .DS_Store │ ├── README.md │ ├── __init__.py │ ├── application.py │ ├── burr_demo.ipynb │ ├── notebook.ipynb │ ├── requirements.txt │ ├── server.py │ ├── simple_streamlit_app.py │ ├── statemachine.png │ └── streamlit_app.py ├── openai-compatible-agent │ ├── README.md │ ├── __init__.py │ ├── application.py │ ├── requirements.txt │ ├── server.py │ └── statemachine.png ├── opentelemetry │ ├── README.md │ ├── __init__.py │ ├── application.py │ ├── notebook.ipynb │ └── statemachine.png ├── other-examples │ ├── cowsay │ │ ├── README.md │ │ ├── application.py │ │ ├── digraph │ │ ├── digraph.png │ │ ├── notebook.ipynb │ │ ├── requirements.txt │ │ └── streamlit_app.py │ └── hamilton-multi-modal │ │ ├── __init__.py │ │ ├── application.py │ │ └── dag.py ├── parallelism │ ├── README.md │ └── notebook.ipynb ├── pytest │ ├── README.md │ ├── burr_sdlc.png │ ├── burr_ui.png │ ├── conftest.py │ ├── diagnosis.png │ ├── e2e_test_cases.json │ ├── hypotheses_test_cases.json │ ├── requirements.txt │ ├── some_actions.py │ └── test_some_actions.py ├── rag-lancedb-ingestion │ ├── README.md │ ├── __init__.py │ ├── application.py │ ├── burr-ui.gif │ ├── ingestion.py │ ├── notebook.ipynb │ ├── requirements.txt │ ├── statemachine.png │ └── utils.py ├── ray │ ├── README.md │ ├── __init__.py │ ├── application.py │ ├── notebook.ipynb │ ├── statemachine.png │ └── substatemachine.png ├── recursive │ ├── README.md │ ├── __init__.py │ ├── application.py │ ├── notebook.ipynb │ ├── requirements.txt │ ├── statemachine.png │ └── statemachine_sub.png ├── simple-chatbot-intro │ ├── README.md │ ├── __init__.py │ ├── application.py │ ├── notebook.ipynb │ ├── requirements.txt │ ├── statemachine.png │ ├── statemachine_initial.png │ └── statemachine_safe.png ├── simulation │ ├── README.md │ ├── application.py │ └── statemachine.png ├── streaming-fastapi │ ├── README.md │ ├── __init__.py │ ├── application.py │ ├── notebook.ipynb │ ├── server.py │ ├── statemachine.png │ └── streamlit_app.py ├── streaming-overview │ ├── README.md │ ├── __init__.py │ ├── application.py │ ├── async_application.py │ ├── notebook.ipynb │ ├── requirements.txt │ ├── statemachine.png │ └── streamlit_app.py ├── talks │ └── data_for_ai_oct_2024.ipynb ├── templates │ ├── README.md │ ├── agent_supervisor.png │ ├── agent_supervisor.py │ ├── hierarchical_agent_teams.png │ ├── hierarchical_agent_teams.py │ ├── multi_agent_collaboration.png │ ├── multi_agent_collaboration.py │ ├── multi_modal_agent.png │ └── multi_modal_agent.py ├── test-case-creation │ ├── README.md │ ├── __init__.py │ ├── application.py │ ├── notebook.ipynb │ ├── prompt_for_more.json │ ├── requirements.txt │ ├── statemachine.png │ └── test_application.py ├── tool-calling │ ├── README.md │ ├── __init__.py │ ├── application.py │ ├── notebook.ipynb │ ├── requirements.txt │ └── statemachine.png ├── tracing-and-spans │ ├── README.md │ ├── __init__.py │ ├── application.py │ ├── burr_otel_demo.ipynb │ ├── notebook.ipynb │ ├── requirements.txt │ ├── statemachine.png │ └── tracing_screencap.png ├── typed-state │ ├── README.md │ ├── __init__.py │ ├── application.py │ ├── curls.sh │ ├── notebook.ipynb │ ├── server.py │ └── statemachine.png ├── validate_examples.py ├── web-server │ └── README.md └── youtube-to-social-media-post │ ├── README.md │ ├── __init__.py │ ├── application.py │ ├── notebook.ipynb │ ├── server.py │ └── statemachine.png ├── pyproject.toml ├── setup.cfg ├── telemetry └── ui │ ├── .eslintignore │ ├── .eslintrc.js │ ├── .gitignore │ ├── .prettierignore │ ├── .prettierrc.json │ ├── README.md │ ├── package-lock.json │ ├── package.json │ ├── public │ ├── favicon.ico │ ├── index.html │ ├── logo.png │ ├── manifest.json │ └── robots.txt │ ├── scripts │ ├── client-gen.sh │ ├── model_costs.json │ └── token_costs.py │ ├── src │ ├── App.css │ ├── App.test.tsx │ ├── App.tsx │ ├── api │ │ ├── core │ │ │ ├── ApiError.ts │ │ │ ├── ApiRequestOptions.ts │ │ │ ├── ApiResult.ts │ │ │ ├── CancelablePromise.ts │ │ │ ├── OpenAPI.ts │ │ │ └── request.ts │ │ ├── index.ts │ │ ├── models │ │ │ ├── ActionModel.ts │ │ │ ├── AnnotationCreate.ts │ │ │ ├── AnnotationDataPointer.ts │ │ │ ├── AnnotationObservation.ts │ │ │ ├── AnnotationOut.ts │ │ │ ├── AnnotationUpdate.ts │ │ │ ├── ApplicationLogs.ts │ │ │ ├── ApplicationModel.ts │ │ │ ├── ApplicationPage.ts │ │ │ ├── ApplicationSummary.ts │ │ │ ├── AttributeModel.ts │ │ │ ├── BackendSpec.ts │ │ │ ├── BeginEntryModel.ts │ │ │ ├── BeginSpanModel.ts │ │ │ ├── ChatItem.ts │ │ │ ├── ChildApplicationModel.ts │ │ │ ├── DraftInit.ts │ │ │ ├── EmailAssistantState.ts │ │ │ ├── EndEntryModel.ts │ │ │ ├── EndSpanModel.ts │ │ │ ├── EndStreamModel.ts │ │ │ ├── Feedback.ts │ │ │ ├── FirstItemStreamModel.ts │ │ │ ├── HTTPValidationError.ts │ │ │ ├── IndexingJob.ts │ │ │ ├── InitializeStreamModel.ts │ │ │ ├── PointerModel.ts │ │ │ ├── Project.ts │ │ │ ├── PromptInput.ts │ │ │ ├── QuestionAnswers.ts │ │ │ ├── ResearchSummary.ts │ │ │ ├── Span.ts │ │ │ ├── Step.ts │ │ │ ├── TransitionModel.ts │ │ │ └── ValidationError.ts │ │ └── services │ │ │ └── DefaultService.ts │ ├── components │ │ ├── common │ │ │ ├── button.tsx │ │ │ ├── chip.tsx │ │ │ ├── dates.tsx │ │ │ ├── drawer.tsx │ │ │ ├── fieldset.tsx │ │ │ ├── href.tsx │ │ │ ├── input.tsx │ │ │ ├── layout.tsx │ │ │ ├── link.tsx │ │ │ ├── loading.tsx │ │ │ ├── modelCost.tsx │ │ │ ├── pagination.tsx │ │ │ ├── switch.tsx │ │ │ ├── table.tsx │ │ │ ├── tabs.tsx │ │ │ ├── text.tsx │ │ │ ├── textarea.tsx │ │ │ └── tooltip.tsx │ │ ├── nav │ │ │ ├── appcontainer.tsx │ │ │ └── breadcrumb.tsx │ │ └── routes │ │ │ ├── AdminView.tsx │ │ │ ├── AppList.tsx │ │ │ ├── ProjectList.tsx │ │ │ └── app │ │ │ ├── ActionView.tsx │ │ │ ├── AnnotationsView.tsx │ │ │ ├── AppView.tsx │ │ │ ├── DataView.tsx │ │ │ ├── GraphView.tsx │ │ │ ├── InsightsView.tsx │ │ │ ├── ReproduceView.tsx │ │ │ ├── StateMachine.tsx │ │ │ └── StepList.tsx │ ├── examples │ │ ├── Chatbot.tsx │ │ ├── Common.tsx │ │ ├── Counter.tsx │ │ ├── DeepResearcher.tsx │ │ ├── EmailAssistant.tsx │ │ ├── MiniTelemetry.tsx │ │ └── StreamingChatbot.tsx │ ├── index.css │ ├── index.tsx │ ├── react-app-env.d.ts │ ├── reportWebVitals.ts │ ├── setupTests.ts │ ├── utils.tsx │ └── utils │ │ └── tailwind.ts │ ├── tailwind.config.js │ └── tsconfig.json └── tests ├── common └── test_async_utils.py ├── conftest.py ├── core ├── test_action.py ├── test_application.py ├── test_graph.py ├── test_graphviz_display.py ├── test_implementations.py ├── test_parallelism.py ├── test_persistence.py ├── test_serde.py ├── test_state.py └── test_validation.py ├── integration_tests └── test_app.py ├── integrations ├── persisters │ ├── test_b_aiosqlite.py │ ├── test_b_mongodb.py │ ├── test_b_redis.py │ └── test_postgresql.py ├── serde │ ├── test_langchain.py │ ├── test_pandas.py │ ├── test_pickle.py │ └── test_pydantic.py ├── test_burr_hamilton.py ├── test_burr_haystack.py ├── test_burr_opentelemetry.py ├── test_burr_pydantic.py ├── test_burr_pydantic_future_annotations.py ├── test_burr_ray.py └── test_opentelemetry.py ├── pytest.ini ├── test_end_to_end.py ├── tracking ├── test_common_models.py └── test_local_tracking_client.py └── visibility └── test_tracing.py /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a bug report to help us improve Burr. 4 | title: Bug Report 5 | labels: triage 6 | assignees: '' 7 | 8 | --- 9 | 10 | Short description explaining the high-level reason for the new issue. 11 | 12 | # Current behavior 13 | 14 | 15 | ## Stack Traces 16 | (If applicable) 17 | 18 | ## Screenshots 19 | (If applicable) 20 | 21 | 22 | ## Steps to replicate behavior 23 | 1. 24 | 25 | ## Library & System Information 26 | E.g. python version, burr library version, linux, etc. 27 | 28 | 29 | # Expected behavior 30 | 31 | 32 | # Additional context 33 | Add any other context about the problem here. 34 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: true 2 | contact_links: 3 | - name: Burr Open Source Discord Server 4 | url: https://discord.gg/SEjkgvYHRb 5 | about: We are piloting using discrod for chat. Feel free to try asking questions there first. 6 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest a feature/enhancement for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | [Short description explaining the high-level reason for the pull request] 2 | 3 | ## Changes 4 | 5 | ## How I tested this 6 | 7 | ## Notes 8 | 9 | ## Checklist 10 | 11 | - [ ] PR has an informative and human-readable title (this will be pulled into the release notes) 12 | - [ ] Changes are limited to a single goal (no scope creep) 13 | - [ ] Code passed the pre-commit check & code is left cleaner/nicer than when first encountered. 14 | - [ ] Any _change_ in functionality is tested 15 | - [ ] New functions are documented (with a description, list of inputs, and expected output) 16 | - [ ] Placeholder code is flagged / future TODOs are captured in comments 17 | - [ ] Project documentation has been updated if adding/changing functionality. 18 | -------------------------------------------------------------------------------- /.github/workflows/ui.yml: -------------------------------------------------------------------------------- 1 | name: Node.js CI 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | paths: 8 | - 'telemetry/ui/**' 9 | pull_request: 10 | types: [opened, synchronize, reopened] 11 | paths: 12 | - 'telemetry/ui/**' 13 | 14 | jobs: 15 | build: 16 | runs-on: ubuntu-latest 17 | defaults: 18 | run: 19 | working-directory: telemetry/ui 20 | strategy: 21 | matrix: 22 | node-version: [16.x] 23 | steps: 24 | - uses: actions/checkout@v3 25 | - name: Use Node.js ${{ matrix.node-version }} 26 | uses: actions/setup-node@v1 27 | with: 28 | node-version: ${{ matrix.node-version }} 29 | - run: npm install --ignore-scripts 30 | - run: npm run build 31 | - run: npm run lint:fix 32 | - run: npm run format:fix 33 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "examples/deployment/bentoml/BentoBurr"] 2 | path = examples/deployment/bentoml/BentoBurr 3 | url = https://github.com/bentoml/BentoBurr 4 | -------------------------------------------------------------------------------- /.style.yapf: -------------------------------------------------------------------------------- 1 | [style] 2 | based_on_style = google 3 | 4 | # max characters per line 5 | COLUMN_LIMIT = 100 6 | 7 | # Put closing brackets on a separate line, dedented, if the bracketed expression can't fit in a single line 8 | DEDENT_CLOSING_BRACKETS = true 9 | 10 | # Place each dictionary entry onto its own line. 11 | EACH_DICT_ENTRY_ON_SEPARATE_LINE = true 12 | 13 | # Join short lines into one line. E.g., single line if statements. 14 | JOIN_MULTIPLE_LINES = true 15 | 16 | # Insert a blank line before a def or class immediately nested within another def or class 17 | BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF = true 18 | 19 | # Split before arguments if the argument list is terminated by a comma. 20 | SPLIT_ARGUMENTS_WHEN_COMMA_TERMINATED = true 21 | 22 | # If an argument / parameter list is going to be split, then split before the first argument 23 | SPLIT_BEFORE_FIRST_ARGUMENT = true 24 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2023-2025 DAGWorks Inc. 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without modification, are permitted 5 | (subject to the limitations in the disclaimer below) provided that the following conditions are met: 6 | 7 | * Redistributions of source code must retain the above copyright notice, this list of conditions and the following 8 | disclaimer. 9 | * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following 10 | disclaimer in the documentation and/or other materials provided with the distribution. 11 | * Neither the name of Stitch Fix, DAGWorks, nor the names of its contributors may be used to endorse or promote products derived 12 | from this software without specific prior written permission. 13 | 14 | NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY 15 | THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 16 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 17 | COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 19 | OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 20 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 21 | POSSIBILITY OF SUCH DAMAGE. 22 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | recursive-include burr/tracking/server/demo_data * 2 | recursive-include burr/tracking/server/build * 3 | recursive-include examples/ * 4 | global-include *.typed 5 | -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/__init__.py -------------------------------------------------------------------------------- /burr/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/burr/__init__.py -------------------------------------------------------------------------------- /burr/cli/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/burr/cli/__init__.py -------------------------------------------------------------------------------- /burr/common/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/burr/common/__init__.py -------------------------------------------------------------------------------- /burr/common/async_utils.py: -------------------------------------------------------------------------------- 1 | import inspect 2 | from typing import AsyncGenerator, AsyncIterable, Generator, List, TypeVar, Union 3 | 4 | T = TypeVar("T") 5 | 6 | GenType = TypeVar("GenType") 7 | 8 | SyncOrAsyncIterable = Union[AsyncIterable[T], List[T]] 9 | SyncOrAsyncGenerator = Union[Generator[GenType, None, None], AsyncGenerator[GenType, None]] 10 | SyncOrAsyncGeneratorOrItemOrList = Union[SyncOrAsyncGenerator[GenType], List[GenType], GenType] 11 | 12 | 13 | async def asyncify_generator( 14 | generator: SyncOrAsyncGenerator[GenType], 15 | ) -> AsyncGenerator[GenType, None]: 16 | """Convert a sync generator to an async generator. 17 | 18 | :param generator: sync generator 19 | :return: async generator 20 | """ 21 | if inspect.isasyncgen(generator): 22 | async for item in generator: 23 | yield item 24 | else: 25 | for item in generator: 26 | yield item 27 | 28 | 29 | async def arealize(maybe_async_generator: SyncOrAsyncGenerator[GenType]) -> List[GenType]: 30 | """Realize an async generator or async iterable to a list. 31 | 32 | :param maybe_async_generator: async generator or async iterable 33 | :return: list of items -- fully realized 34 | """ 35 | if inspect.isasyncgen(maybe_async_generator): 36 | out = [item async for item in maybe_async_generator] 37 | else: 38 | out = [item for item in maybe_async_generator] 39 | return out 40 | -------------------------------------------------------------------------------- /burr/common/types.py: -------------------------------------------------------------------------------- 1 | import abc 2 | import dataclasses 3 | from typing import Any, Optional 4 | 5 | try: 6 | from typing import Self 7 | except ImportError: 8 | Self = Any 9 | 10 | 11 | # This contains common types 12 | # Currently the types are a little closer to the logic than we'd like 13 | # We'll want to break them out into interfaces and put more here eventually 14 | # This will help avoid the ugly if TYPE_CHECKING imports; 15 | @dataclasses.dataclass 16 | class ParentPointer: 17 | app_id: str 18 | partition_key: Optional[str] 19 | sequence_id: Optional[int] 20 | 21 | 22 | class BaseCopyable(abc.ABC): 23 | """Interface for copying objects. This is used internally.""" 24 | 25 | @abc.abstractmethod 26 | def copy(self) -> "Self": 27 | pass 28 | -------------------------------------------------------------------------------- /burr/core/__init__.py: -------------------------------------------------------------------------------- 1 | from burr.core.action import Action, Condition, Result, action, default, expr, when 2 | from burr.core.application import ( 3 | Application, 4 | ApplicationBuilder, 5 | ApplicationContext, 6 | ApplicationGraph, 7 | ) 8 | from burr.core.graph import Graph, GraphBuilder 9 | from burr.core.state import State 10 | 11 | __all__ = [ 12 | "action", 13 | "Action", 14 | "Application", 15 | "ApplicationBuilder", 16 | "ApplicationGraph", 17 | "ApplicationContext", 18 | "Condition", 19 | "default", 20 | "expr", 21 | "Result", 22 | "State", 23 | "when", 24 | "Graph", 25 | "GraphBuilder", 26 | ] 27 | -------------------------------------------------------------------------------- /burr/core/implementations.py: -------------------------------------------------------------------------------- 1 | from burr.core import State 2 | from burr.core.action import Action 3 | 4 | 5 | class Placeholder(Action): 6 | """This is a placeholder action -- you would expect it to break if you tried to run it. It is specifically 7 | for the following workflow: 8 | 1. Create your state machine out of placeholders to model it 9 | 2. Visualize the state machine 10 | 2. Replace the placeholders with real actions as you see fit 11 | """ 12 | 13 | def __init__(self, reads: list[str], writes: list[str]): 14 | super().__init__() 15 | self._reads = reads 16 | self._writes = writes 17 | 18 | def run(self, state: State) -> dict: 19 | raise NotImplementedError( 20 | f"This is a placeholder action and thus you are unable to run. Please implement: {self}!" 21 | ) 22 | 23 | def update(self, result: dict, state: State) -> State: 24 | raise NotImplementedError( 25 | f"This is a placeholder action and thus cannot update state. Please implement: {self}!" 26 | ) 27 | 28 | @property 29 | def reads(self) -> list[str]: 30 | return self._reads 31 | 32 | @property 33 | def writes(self) -> list[str]: 34 | return self._writes 35 | -------------------------------------------------------------------------------- /burr/core/validation.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Optional 2 | 3 | BASE_ERROR_MESSAGE = ( 4 | "-------------------------------------------------------------------\n" 5 | "Oh no an error! Need help with Burr?\n" 6 | "Join our discord and ask for help! https://discord.gg/4FxBMyzW5n\n" 7 | "-------------------------------------------------------------------\n" 8 | ) 9 | 10 | 11 | def assert_set(value: Optional[Any], field: str, method: str): 12 | if value is None: 13 | raise ValueError( 14 | BASE_ERROR_MESSAGE 15 | + f"Must call `{method}` before building application! Do so with ApplicationBuilder." 16 | ) 17 | -------------------------------------------------------------------------------- /burr/examples: -------------------------------------------------------------------------------- 1 | ../examples -------------------------------------------------------------------------------- /burr/integrations/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/burr/integrations/__init__.py -------------------------------------------------------------------------------- /burr/integrations/base.py: -------------------------------------------------------------------------------- 1 | def require_plugin(import_error: ImportError, plugin_name: str): 2 | raise ImportError( 3 | f"Missing plugin {plugin_name}! To use the {plugin_name} plugin, you must install the 'extras' target [{plugin_name}] with burr[{plugin_name}] " 4 | f"(replace with your package manager of choice). Note that, if you're using poetry, you cannot install burr with burr[start], so " 5 | f"you'll have to install the components individually. See https://burr.dagworks.io/getting_started/install/ " 6 | f"for more details." 7 | ) from import_error 8 | -------------------------------------------------------------------------------- /burr/integrations/persisters/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/burr/integrations/persisters/__init__.py -------------------------------------------------------------------------------- /burr/integrations/ray.py: -------------------------------------------------------------------------------- 1 | import concurrent.futures 2 | 3 | import ray 4 | 5 | 6 | class RayExecutor(concurrent.futures.Executor): 7 | """Ray parallel executor -- implementation of concurrent.futures.Executor. 8 | Currently experimental""" 9 | 10 | def __init__(self, shutdown_on_end: bool = False): 11 | """Creates a Ray executor -- remember to call ray.init() before running anything!""" 12 | self.shutdown_on_end = shutdown_on_end 13 | 14 | def submit(self, fn, *args, **kwargs): 15 | """Submits to ray -- creates a python future by calling ray.remote 16 | 17 | :param fn: Function to submit 18 | :param args: Args for the fn 19 | :param kwargs: Kwargs for the fn 20 | :return: The future for the fn 21 | """ 22 | if not ray.is_initialized(): 23 | raise RuntimeError("Ray is not initialized. Call ray.init() before running anything!") 24 | ray_fn = ray.remote(fn) 25 | object_ref = ray_fn.remote(*args, **kwargs) 26 | future = object_ref.future() 27 | 28 | return future 29 | 30 | def shutdown(self, wait=True, **kwargs): 31 | """Shuts down the executor by shutting down ray 32 | 33 | :param wait: Whether to wait -- required for hte API but not respected (yet) 34 | :param kwargs: Keyword arguments -- not used yet 35 | """ 36 | if self.shutdown_on_end: 37 | if ray.is_initialized(): 38 | ray.shutdown() 39 | -------------------------------------------------------------------------------- /burr/integrations/serde/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/burr/integrations/serde/__init__.py -------------------------------------------------------------------------------- /burr/integrations/serde/pydantic.py: -------------------------------------------------------------------------------- 1 | # try to import to serialize Pydantic Objects 2 | import importlib 3 | 4 | import pydantic 5 | 6 | from burr.core import serde 7 | 8 | 9 | @serde.serialize.register(pydantic.BaseModel) 10 | def serialize_pydantic(value: pydantic.BaseModel, **kwargs) -> dict: 11 | """Uses pydantic to dump the model to a dictionary and then adds the __pydantic_class to the dictionary.""" 12 | _dict = value.model_dump() 13 | _dict[serde.KEY] = "pydantic" 14 | # get qualified name of pydantic class. The module name should be fully qualified. 15 | _dict["__pydantic_class"] = f"{value.__class__.__module__}.{value.__class__.__name__}" 16 | return _dict 17 | 18 | 19 | @serde.deserializer.register("pydantic") 20 | def deserialize_pydantic(value: dict, **kwargs) -> pydantic.BaseModel: 21 | """Deserializes a pydantic object from a dictionary. 22 | This will pop the __pydantic_class and then import the class. 23 | """ 24 | value.pop(serde.KEY) 25 | pydantic_class_name = value.pop("__pydantic_class") 26 | module_name, class_name = pydantic_class_name.rsplit(".", 1) 27 | module = importlib.import_module(module_name) 28 | pydantic_class = getattr(module, class_name) 29 | return pydantic_class.model_validate(value) 30 | -------------------------------------------------------------------------------- /burr/lifecycle/__init__.py: -------------------------------------------------------------------------------- 1 | from burr.lifecycle.base import ( 2 | LifecycleAdapter, 3 | PostApplicationCreateHook, 4 | PostApplicationExecuteCallHook, 5 | PostApplicationExecuteCallHookAsync, 6 | PostEndSpanHook, 7 | PostRunStepHook, 8 | PostRunStepHookAsync, 9 | PreApplicationExecuteCallHook, 10 | PreApplicationExecuteCallHookAsync, 11 | PreRunStepHook, 12 | PreRunStepHookAsync, 13 | PreStartSpanHook, 14 | ) 15 | from burr.lifecycle.default import StateAndResultsFullLogger 16 | 17 | __all__ = [ 18 | "PreRunStepHook", 19 | "PreRunStepHookAsync", 20 | "PostRunStepHook", 21 | "PostRunStepHookAsync", 22 | "PreApplicationExecuteCallHook", 23 | "PreApplicationExecuteCallHookAsync", 24 | "PostApplicationExecuteCallHook", 25 | "PostApplicationExecuteCallHookAsync", 26 | "LifecycleAdapter", 27 | "StateAndResultsFullLogger", 28 | "PostApplicationCreateHook", 29 | "PostEndSpanHook", 30 | "PreStartSpanHook", 31 | ] 32 | -------------------------------------------------------------------------------- /burr/log_setup.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import sys 3 | 4 | LOG_LEVELS = { 5 | "CRITICAL": logging.CRITICAL, 6 | "ERROR": logging.ERROR, 7 | "WARNING": logging.WARNING, 8 | "INFO": logging.INFO, 9 | "DEBUG": logging.DEBUG, 10 | } 11 | 12 | 13 | # this is suboptimal but python has no public mapping of log names to levels 14 | 15 | 16 | def setup_logging(log_level: int = logging.INFO): 17 | """Helper function to setup logging to console. 18 | :param log_level: Log level to use when logging 19 | """ 20 | root_logger = logging.getLogger("") # root logger 21 | formatter = logging.Formatter("[%(levelname)s] %(asctime)s %(name)s(%(lineno)s): %(message)s") 22 | stream_handler = logging.StreamHandler(sys.stdout) 23 | stream_handler.setFormatter(formatter) 24 | if not len(root_logger.handlers): 25 | # assumes we have already been set up. 26 | root_logger.addHandler(stream_handler) 27 | root_logger.setLevel(log_level) 28 | -------------------------------------------------------------------------------- /burr/py.typed: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/burr/py.typed -------------------------------------------------------------------------------- /burr/system.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import os 3 | import sys 4 | 5 | IS_WINDOWS = os.name == "nt" 6 | 7 | if sys.version_info >= (3, 11): 8 | utc = datetime.UTC 9 | else: 10 | utc = datetime.timezone.utc 11 | 12 | 13 | def now(): 14 | return datetime.datetime.now(utc) 15 | -------------------------------------------------------------------------------- /burr/tracking/__init__.py: -------------------------------------------------------------------------------- 1 | from .client import LocalTrackingClient 2 | 3 | __all__ = ["LocalTrackingClient"] 4 | -------------------------------------------------------------------------------- /burr/tracking/base.py: -------------------------------------------------------------------------------- 1 | import abc 2 | 3 | from burr.lifecycle import ( 4 | PostApplicationCreateHook, 5 | PostEndSpanHook, 6 | PostRunStepHook, 7 | PreRunStepHook, 8 | PreStartSpanHook, 9 | ) 10 | from burr.lifecycle.base import ( 11 | DoLogAttributeHook, 12 | PostEndStreamHook, 13 | PostStreamItemHook, 14 | PreStartStreamHook, 15 | ) 16 | 17 | 18 | class SyncTrackingClient( 19 | PostApplicationCreateHook, 20 | PreRunStepHook, 21 | PostRunStepHook, 22 | PreStartSpanHook, 23 | PostEndSpanHook, 24 | DoLogAttributeHook, 25 | PreStartStreamHook, 26 | PostStreamItemHook, 27 | PostEndStreamHook, 28 | abc.ABC, 29 | ): 30 | """Base class for synchronous tracking clients. All tracking clients must implement from this 31 | TODO -- create an async tracking client""" 32 | 33 | @abc.abstractmethod 34 | def copy(self): 35 | pass 36 | 37 | 38 | TrackingClient = SyncTrackingClient 39 | -------------------------------------------------------------------------------- /burr/tracking/common/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/burr/tracking/common/__init__.py -------------------------------------------------------------------------------- /burr/tracking/server/demo_data/demo_chatbot/chat-1-giraffe/metadata.json: -------------------------------------------------------------------------------- 1 | {"type": "application_metadata", "partition_key": null, "parent_pointer": null, "spawning_parent_pointer": null} -------------------------------------------------------------------------------- /burr/tracking/server/demo_data/demo_chatbot/chat-2-geography/metadata.json: -------------------------------------------------------------------------------- 1 | {"type": "application_metadata", "partition_key": null, "parent_pointer": null, "spawning_parent_pointer": null} -------------------------------------------------------------------------------- /burr/tracking/server/demo_data/demo_chatbot/chat-3-physics/metadata.json: -------------------------------------------------------------------------------- 1 | {"type": "application_metadata", "partition_key": null, "parent_pointer": null, "spawning_parent_pointer": null} -------------------------------------------------------------------------------- /burr/tracking/server/demo_data/demo_chatbot/chat-4-philosophy/metadata.json: -------------------------------------------------------------------------------- 1 | {"type": "application_metadata", "partition_key": null, "parent_pointer": null, "spawning_parent_pointer": null} -------------------------------------------------------------------------------- /burr/tracking/server/demo_data/demo_chatbot/chat-5-jokes/metadata.json: -------------------------------------------------------------------------------- 1 | {"type": "application_metadata", "partition_key": null, "parent_pointer": null, "spawning_parent_pointer": null} -------------------------------------------------------------------------------- /burr/tracking/server/demo_data/demo_chatbot/chat-6-demonstrate-errors/metadata.json: -------------------------------------------------------------------------------- 1 | {"type": "application_metadata", "partition_key": null, "parent_pointer": null, "spawning_parent_pointer": null} -------------------------------------------------------------------------------- /burr/tracking/server/demo_data/demo_chatbot_with_traces/chat-1-giraffe/metadata.json: -------------------------------------------------------------------------------- 1 | {"type": "application_metadata", "partition_key": null, "parent_pointer": null, "spawning_parent_pointer": null} -------------------------------------------------------------------------------- /burr/tracking/server/demo_data/demo_chatbot_with_traces/chat-2-geography/metadata.json: -------------------------------------------------------------------------------- 1 | {"type": "application_metadata", "partition_key": null, "parent_pointer": null, "spawning_parent_pointer": null} -------------------------------------------------------------------------------- /burr/tracking/server/demo_data/demo_chatbot_with_traces/chat-3-physics/metadata.json: -------------------------------------------------------------------------------- 1 | {"type": "application_metadata", "partition_key": null, "parent_pointer": null, "spawning_parent_pointer": null} -------------------------------------------------------------------------------- /burr/tracking/server/demo_data/demo_chatbot_with_traces/chat-4-philosophy/metadata.json: -------------------------------------------------------------------------------- 1 | {"type": "application_metadata", "partition_key": null, "parent_pointer": null, "spawning_parent_pointer": null} -------------------------------------------------------------------------------- /burr/tracking/server/demo_data/demo_chatbot_with_traces/chat-5-jokes/metadata.json: -------------------------------------------------------------------------------- 1 | {"type": "application_metadata", "partition_key": null, "parent_pointer": null, "spawning_parent_pointer": null} -------------------------------------------------------------------------------- /burr/tracking/server/demo_data/demo_chatbot_with_traces/chat-6-demonstrate-errors/metadata.json: -------------------------------------------------------------------------------- 1 | {"type": "application_metadata", "partition_key": null, "parent_pointer": null, "spawning_parent_pointer": null} -------------------------------------------------------------------------------- /burr/tracking/server/demo_data/demo_conversational-rag/rag-1-food/metadata.json: -------------------------------------------------------------------------------- 1 | {"type": "application_metadata", "partition_key": null, "parent_pointer": null, "spawning_parent_pointer": null} -------------------------------------------------------------------------------- /burr/tracking/server/demo_data/demo_conversational-rag/rag-2-work-history/metadata.json: -------------------------------------------------------------------------------- 1 | {"type": "application_metadata", "partition_key": null, "parent_pointer": null, "spawning_parent_pointer": null} -------------------------------------------------------------------------------- /burr/tracking/server/demo_data/demo_conversational-rag/rag-3-activities/metadata.json: -------------------------------------------------------------------------------- 1 | {"type": "application_metadata", "partition_key": null, "parent_pointer": null, "spawning_parent_pointer": null} -------------------------------------------------------------------------------- /burr/tracking/server/demo_data/demo_conversational-rag/rag-4-everything/metadata.json: -------------------------------------------------------------------------------- 1 | {"type": "application_metadata", "partition_key": null, "parent_pointer": null, "spawning_parent_pointer": null} -------------------------------------------------------------------------------- /burr/tracking/server/demo_data/demo_counter/count-to-1/graph.json: -------------------------------------------------------------------------------- 1 | {"type": "application", "entrypoint": "counter", "actions": [{"type": "action", "name": "counter", "reads": ["counter"], "writes": ["counter"], "code": "@action(reads=[\"counter\"], writes=[\"counter\"])\ndef counter(state: State) -> State:\n result = {\"counter\": state[\"counter\"] + 1}\n print(f\"counted to {result['counter']}\")\n return state.update(**result)\n", "inputs": [], "optional_inputs": []}, {"type": "action", "name": "result", "reads": ["counter"], "writes": [], "code": "class Result(Action):\n def __init__(self, *fields: str):\n \"\"\"Represents a result action. This is purely a convenience class to\n pull data from state and give it out to the result. It does nothing to\n the state itself.\n\n :param fields: Fields to pull from the state and put into results\n \"\"\"\n super(Result, self).__init__()\n self._fields = fields\n\n def run(self, state: State) -> dict:\n return {key: value for key, value in state.get_all().items() if key in self._fields}\n\n def update(self, result: dict, state: State) -> State:\n return state # does not modify state in any way\n\n @property\n def reads(self) -> list[str]:\n return list(self._fields)\n\n @property\n def writes(self) -> list[str]:\n return []\n", "inputs": [], "optional_inputs": []}], "transitions": [{"type": "transition", "from_": "counter", "to": "counter", "condition": "counter < count_to"}, {"type": "transition", "from_": "counter", "to": "result", "condition": "default"}]} -------------------------------------------------------------------------------- /burr/tracking/server/demo_data/demo_counter/count-to-1/log.jsonl: -------------------------------------------------------------------------------- 1 | {"type":"begin_entry","start_time":"2024-08-27T11:59:58.569646","action":"counter","inputs":{},"sequence_id":0} 2 | {"type":"end_entry","end_time":"2024-08-27T11:59:58.569778","action":"counter","result":{},"exception":null,"state":{"count_to":1,"counter":1,"__SEQUENCE_ID":0,"__PRIOR_STEP":"counter"},"sequence_id":0} 3 | {"type":"begin_entry","start_time":"2024-08-27T11:59:58.569978","action":"result","inputs":{},"sequence_id":1} 4 | {"type":"end_entry","end_time":"2024-08-27T11:59:58.570052","action":"result","result":{"counter":1},"exception":null,"state":{"count_to":1,"counter":1,"__SEQUENCE_ID":1,"__PRIOR_STEP":"result"},"sequence_id":1} 5 | -------------------------------------------------------------------------------- /burr/tracking/server/demo_data/demo_counter/count-to-1/metadata.json: -------------------------------------------------------------------------------- 1 | {"type": "application_metadata", "partition_key": "user_0", "parent_pointer": null, "spawning_parent_pointer": null} -------------------------------------------------------------------------------- /burr/tracking/server/demo_data/demo_counter/count-to-10/graph.json: -------------------------------------------------------------------------------- 1 | {"type": "application", "entrypoint": "counter", "actions": [{"type": "action", "name": "counter", "reads": ["counter"], "writes": ["counter"], "code": "@action(reads=[\"counter\"], writes=[\"counter\"])\ndef counter(state: State) -> State:\n result = {\"counter\": state[\"counter\"] + 1}\n print(f\"counted to {result['counter']}\")\n return state.update(**result)\n", "inputs": [], "optional_inputs": []}, {"type": "action", "name": "result", "reads": ["counter"], "writes": [], "code": "class Result(Action):\n def __init__(self, *fields: str):\n \"\"\"Represents a result action. This is purely a convenience class to\n pull data from state and give it out to the result. It does nothing to\n the state itself.\n\n :param fields: Fields to pull from the state and put into results\n \"\"\"\n super(Result, self).__init__()\n self._fields = fields\n\n def run(self, state: State) -> dict:\n return {key: value for key, value in state.get_all().items() if key in self._fields}\n\n def update(self, result: dict, state: State) -> State:\n return state # does not modify state in any way\n\n @property\n def reads(self) -> list[str]:\n return list(self._fields)\n\n @property\n def writes(self) -> list[str]:\n return []\n", "inputs": [], "optional_inputs": []}], "transitions": [{"type": "transition", "from_": "counter", "to": "counter", "condition": "counter < count_to"}, {"type": "transition", "from_": "counter", "to": "result", "condition": "default"}]} -------------------------------------------------------------------------------- /burr/tracking/server/demo_data/demo_counter/count-to-10/metadata.json: -------------------------------------------------------------------------------- 1 | {"type": "application_metadata", "partition_key": "user_1", "parent_pointer": null, "spawning_parent_pointer": null} -------------------------------------------------------------------------------- /burr/tracking/server/demo_data/demo_counter/count-to-100/graph.json: -------------------------------------------------------------------------------- 1 | {"type": "application", "entrypoint": "counter", "actions": [{"type": "action", "name": "counter", "reads": ["counter"], "writes": ["counter"], "code": "@action(reads=[\"counter\"], writes=[\"counter\"])\ndef counter(state: State) -> State:\n result = {\"counter\": state[\"counter\"] + 1}\n print(f\"counted to {result['counter']}\")\n return state.update(**result)\n", "inputs": [], "optional_inputs": []}, {"type": "action", "name": "result", "reads": ["counter"], "writes": [], "code": "class Result(Action):\n def __init__(self, *fields: str):\n \"\"\"Represents a result action. This is purely a convenience class to\n pull data from state and give it out to the result. It does nothing to\n the state itself.\n\n :param fields: Fields to pull from the state and put into results\n \"\"\"\n super(Result, self).__init__()\n self._fields = fields\n\n def run(self, state: State) -> dict:\n return {key: value for key, value in state.get_all().items() if key in self._fields}\n\n def update(self, result: dict, state: State) -> State:\n return state # does not modify state in any way\n\n @property\n def reads(self) -> list[str]:\n return list(self._fields)\n\n @property\n def writes(self) -> list[str]:\n return []\n", "inputs": [], "optional_inputs": []}], "transitions": [{"type": "transition", "from_": "counter", "to": "counter", "condition": "counter < count_to"}, {"type": "transition", "from_": "counter", "to": "result", "condition": "default"}]} -------------------------------------------------------------------------------- /burr/tracking/server/demo_data/demo_counter/count-to-100/metadata.json: -------------------------------------------------------------------------------- 1 | {"type": "application_metadata", "partition_key": "user_2", "parent_pointer": null, "spawning_parent_pointer": null} -------------------------------------------------------------------------------- /burr/tracking/server/demo_data/demo_counter/count-to-42/graph.json: -------------------------------------------------------------------------------- 1 | {"type": "application", "entrypoint": "counter", "actions": [{"type": "action", "name": "counter", "reads": ["counter"], "writes": ["counter"], "code": "@action(reads=[\"counter\"], writes=[\"counter\"])\ndef counter(state: State) -> State:\n result = {\"counter\": state[\"counter\"] + 1}\n print(f\"counted to {result['counter']}\")\n return state.update(**result)\n", "inputs": [], "optional_inputs": []}, {"type": "action", "name": "result", "reads": ["counter"], "writes": [], "code": "class Result(Action):\n def __init__(self, *fields: str):\n \"\"\"Represents a result action. This is purely a convenience class to\n pull data from state and give it out to the result. It does nothing to\n the state itself.\n\n :param fields: Fields to pull from the state and put into results\n \"\"\"\n super(Result, self).__init__()\n self._fields = fields\n\n def run(self, state: State) -> dict:\n return {key: value for key, value in state.get_all().items() if key in self._fields}\n\n def update(self, result: dict, state: State) -> State:\n return state # does not modify state in any way\n\n @property\n def reads(self) -> list[str]:\n return list(self._fields)\n\n @property\n def writes(self) -> list[str]:\n return []\n", "inputs": [], "optional_inputs": []}], "transitions": [{"type": "transition", "from_": "counter", "to": "counter", "condition": "counter < count_to"}, {"type": "transition", "from_": "counter", "to": "result", "condition": "default"}]} -------------------------------------------------------------------------------- /burr/tracking/server/demo_data/demo_counter/count-to-42/metadata.json: -------------------------------------------------------------------------------- 1 | {"type": "application_metadata", "partition_key": "user_4", "parent_pointer": null, "spawning_parent_pointer": null} -------------------------------------------------------------------------------- /burr/tracking/server/demo_data/demo_counter/count-to-50/graph.json: -------------------------------------------------------------------------------- 1 | {"type": "application", "entrypoint": "counter", "actions": [{"type": "action", "name": "counter", "reads": ["counter"], "writes": ["counter"], "code": "@action(reads=[\"counter\"], writes=[\"counter\"])\ndef counter(state: State) -> State:\n result = {\"counter\": state[\"counter\"] + 1}\n print(f\"counted to {result['counter']}\")\n return state.update(**result)\n", "inputs": [], "optional_inputs": []}, {"type": "action", "name": "result", "reads": ["counter"], "writes": [], "code": "class Result(Action):\n def __init__(self, *fields: str):\n \"\"\"Represents a result action. This is purely a convenience class to\n pull data from state and give it out to the result. It does nothing to\n the state itself.\n\n :param fields: Fields to pull from the state and put into results\n \"\"\"\n super(Result, self).__init__()\n self._fields = fields\n\n def run(self, state: State) -> dict:\n return {key: value for key, value in state.get_all().items() if key in self._fields}\n\n def update(self, result: dict, state: State) -> State:\n return state # does not modify state in any way\n\n @property\n def reads(self) -> list[str]:\n return list(self._fields)\n\n @property\n def writes(self) -> list[str]:\n return []\n", "inputs": [], "optional_inputs": []}], "transitions": [{"type": "transition", "from_": "counter", "to": "counter", "condition": "counter < count_to"}, {"type": "transition", "from_": "counter", "to": "result", "condition": "default"}]} -------------------------------------------------------------------------------- /burr/tracking/server/demo_data/demo_counter/count-to-50/metadata.json: -------------------------------------------------------------------------------- 1 | {"type": "application_metadata", "partition_key": "user_3", "parent_pointer": null, "spawning_parent_pointer": null} -------------------------------------------------------------------------------- /burr/tracking/server/requirements-s3.txt: -------------------------------------------------------------------------------- 1 | aerich 2 | aiobotocore 3 | fastapi-utils 4 | tortoise-orm[accel, asyncmy] 5 | -------------------------------------------------------------------------------- /burr/tracking/server/run.sh: -------------------------------------------------------------------------------- 1 | uvicorn run:app --host 0.0.0.0 --port 7241 2 | -------------------------------------------------------------------------------- /burr/tracking/server/s3/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/burr/tracking/server/s3/__init__.py -------------------------------------------------------------------------------- /burr/tracking/server/s3/architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/burr/tracking/server/s3/architecture.png -------------------------------------------------------------------------------- /burr/tracking/server/s3/deployment/Dockerfile: -------------------------------------------------------------------------------- 1 | # Use an official Python runtime as a parent image 2 | FROM python:3.11-slim 3 | 4 | 5 | # Set environment variables 6 | ENV PYTHONUNBUFFERED 1 7 | 8 | # Set working directory 9 | WORKDIR /app 10 | 11 | # Copy the current directory contents into the container at /app 12 | COPY . /app 13 | 14 | # Install dependencies and git 15 | RUN apt-get update && apt-get install -y \ 16 | git \ 17 | nginx \ 18 | gcc \ 19 | && apt-get clean 20 | 21 | # Install the dependencies 22 | # TODO -- use the right version 23 | #RUN pip install "git+https://github.com/dagworks-inc/burr.git@tracker-s3#egg=burr[tracking-server-s3]" 24 | RUN pip install "burr[tracking-server-s3]>=0.29.0" 25 | 26 | # Copy the nginx config file 27 | COPY nginx.conf /etc/nginx/nginx.conf 28 | 29 | # Expose the port FastAPI will run on and the port NGINX will listen to 30 | EXPOSE 8000 31 | EXPOSE 80 32 | 33 | ENV BURR_S3_BUCKET burr-prod-test 34 | ENV BURR_load_snapshot_on_start True 35 | ENV BURR_snapshot_interval_milliseconds 3_600_000 36 | ENV BURR_update_interval_milliseconds 150_000 37 | ENV BURR_BACKEND_IMPL s3 38 | ENV ENV DEBIAN_FRONTEND noninteractive 39 | ENV BURR_BACKEND_IMPL burr.tracking.server.s3.backend.SQLiteS3Backend 40 | 41 | 42 | # Command to run FastAPI server and NGINX 43 | CMD ["sh", "-c", "uvicorn burr.tracking.server.run:app --host 0.0.0.0 --port 8000 & nginx -g 'daemon off;'"] 44 | -------------------------------------------------------------------------------- /burr/tracking/server/s3/deployment/nginx.conf: -------------------------------------------------------------------------------- 1 | events { 2 | worker_connections 1024; 3 | } 4 | 5 | http { 6 | server { 7 | listen 80; 8 | 9 | location / { 10 | proxy_pass http://127.0.0.1:8000; 11 | proxy_set_header Host $host; 12 | proxy_set_header X-Real-IP $remote_addr; 13 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 14 | proxy_set_header X-Forwarded-Proto $scheme; 15 | } 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /burr/tracking/server/s3/deployment/terraform/.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled files 2 | *.tfstate 3 | *.tfstate.backup 4 | *.tfvars 5 | .terraform.tfstate.lock.info 6 | 7 | 8 | 9 | # Module directory 10 | .terraform/ 11 | -------------------------------------------------------------------------------- /burr/tracking/server/s3/deployment/terraform/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/hashicorp/aws" { 5 | version = "5.61.0" 6 | hashes = [ 7 | "h1:qYXhPfMOxgOYuSjfe7+P2wdqx4oMkPYgH4XUN3fJb54=", 8 | "zh:1a0a150b6adaeacc8f56763182e76c6219ac67de1217b269d24b770067b7bab0", 9 | "zh:1d9c3a8ac3934a147569254d6e2e6ea5293974d0595c02c9e1aa31499a8f0042", 10 | "zh:1f4d1d5e2e02fd5cccafa28dade8735a3059ed1ca3284fb40116cdb67d0e7ee4", 11 | "zh:26be6f759bded469de477f54c7eb7a9ca9f137a3b52f9fd26cbd864f16973912", 12 | "zh:276e308ae7aa281fe24f7275673aa05f00cb830b83c2b9797f9aa55f10769c52", 13 | "zh:45c09beeadb4269d518de0bd341cbe9f061157ab54c543d39168ecefff40bbe2", 14 | "zh:58fb5ef076dc63e284ce28b47b7cc35a17d2596f11e2373fe568c6140277e9d8", 15 | "zh:64d51cc1ad412379f64b75883a881a5d682a8e9737ad14479f6a2d62e77f7dbe", 16 | "zh:71e2e332317cf095288d65a801e95b65fd696204997b2db5250862d6c5669518", 17 | "zh:9864014aa4716b5bfb3b27d009f158dd6a67c215fd0dfbe3a5d1a7cee72c5677", 18 | "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425", 19 | "zh:d8bf9ba43bd938faab37d8fb515c32a905d6dace60f5ff2663b06ffdc89a62e9", 20 | "zh:e654be9d3980e7cc70f9825fe0d0205e254edd87832f18b2d7f9c72b09b776cd", 21 | "zh:ee5ce6fbe75be3e90cabba3fad76fcfde50ab795e523b4ee917cfe8ba8ad42fe", 22 | "zh:ef12098e7b3ddf9ab286bb209de87dfa8e52106049ced0841e3e6487dbff3659", 23 | ] 24 | } 25 | 26 | provider "registry.terraform.io/hashicorp/template" { 27 | version = "2.2.0" 28 | hashes = [ 29 | "h1:fWFikA+NOeeN7c0so3ULsaMyLlkJ8USk80pqRaSuj48=", 30 | ] 31 | } 32 | -------------------------------------------------------------------------------- /burr/tracking/server/s3/deployment/terraform/alb.tf: -------------------------------------------------------------------------------- 1 | resource "aws_alb" "main" { 2 | name = "burr-load-balancer" 3 | subnets = aws_subnet.public.*.id 4 | security_groups = [aws_security_group.lb.id] 5 | } 6 | 7 | resource "aws_alb_target_group" "app" { 8 | name = "burr-target-group" 9 | port = 80 10 | protocol = "HTTP" 11 | vpc_id = aws_vpc.main.id 12 | target_type = "ip" 13 | 14 | health_check { 15 | healthy_threshold = "3" 16 | interval = "30" 17 | protocol = "HTTP" 18 | matcher = "200" 19 | timeout = "3" 20 | path = var.health_check_path 21 | unhealthy_threshold = "2" 22 | } 23 | } 24 | 25 | # Redirect all traffic from the ALB to the target group 26 | resource "aws_alb_listener" "front_end" { 27 | load_balancer_arn = aws_alb.main.id 28 | port = var.app_port 29 | protocol = "HTTP" 30 | 31 | default_action { 32 | target_group_arn = aws_alb_target_group.app.id 33 | type = "forward" 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /burr/tracking/server/s3/deployment/terraform/logs.tf: -------------------------------------------------------------------------------- 1 | # logs.tf 2 | 3 | # Set up CloudWatch group and log stream and retain logs for 30 days 4 | resource "aws_cloudwatch_log_group" "burr_log_group" { 5 | name = "/ecs/burr-app" 6 | retention_in_days = 30 7 | 8 | tags = { 9 | Name = "burr-log-group" 10 | } 11 | } 12 | 13 | resource "aws_cloudwatch_log_stream" "burr_log_stream" { 14 | name = "burr-log-stream" 15 | log_group_name = aws_cloudwatch_log_group.burr_log_group.name 16 | } 17 | -------------------------------------------------------------------------------- /burr/tracking/server/s3/deployment/terraform/outputs.tf: -------------------------------------------------------------------------------- 1 | output "alb_hostname" { 2 | value = "${aws_alb.main.dns_name}:3000" 3 | } 4 | -------------------------------------------------------------------------------- /burr/tracking/server/s3/deployment/terraform/provider.tf: -------------------------------------------------------------------------------- 1 | # Specify the provider and access details 2 | 3 | provider "aws" { 4 | access_key = var.aws_access_key 5 | secret_key = var.aws_secret_key 6 | region = var.aws_region 7 | } 8 | -------------------------------------------------------------------------------- /burr/tracking/server/s3/deployment/terraform/security.tf: -------------------------------------------------------------------------------- 1 | # ALB security Group: Edit to restrict access to the application 2 | resource "aws_security_group" "lb" { 3 | name = "burr-load-balancer-security-group" 4 | description = "controls access to the ALB" 5 | vpc_id = aws_vpc.main.id 6 | 7 | ingress { 8 | protocol = "tcp" 9 | from_port = var.app_port 10 | to_port = var.app_port 11 | cidr_blocks = ["0.0.0.0/0"] 12 | } 13 | 14 | egress { 15 | protocol = "-1" 16 | from_port = 0 17 | to_port = 0 18 | cidr_blocks = ["0.0.0.0/0"] 19 | } 20 | } 21 | 22 | # Traffic to the ECS cluster should only come from the ALB 23 | resource "aws_security_group" "ecs_tasks" { 24 | name = "burr-ecs-tasks-security-group" 25 | description = "allow inbound access from the ALB only" 26 | vpc_id = aws_vpc.main.id 27 | 28 | ingress { 29 | protocol = "tcp" 30 | from_port = var.app_port 31 | to_port = var.app_port 32 | security_groups = [aws_security_group.lb.id] 33 | } 34 | 35 | egress { 36 | protocol = "-1" 37 | from_port = 0 38 | to_port = 0 39 | cidr_blocks = ["0.0.0.0/0"] 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /burr/tracking/server/s3/deployment/terraform/templates/ecs/burr_app.json.tpl: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "name": "burr-app", 4 | "image": "${app_image}", 5 | "cpu": ${fargate_cpu}, 6 | "memory": ${fargate_memory}, 7 | "networkMode": "awsvpc", 8 | "logConfiguration": { 9 | "logDriver": "awslogs", 10 | "options": { 11 | "awslogs-group": "/ecs/burr-app", 12 | "awslogs-region": "${aws_region}", 13 | "awslogs-stream-prefix": "ecs" 14 | } 15 | }, 16 | "portMappings": [ 17 | { 18 | "containerPort": ${app_port}, 19 | "hostPort": ${app_port} 20 | } 21 | ] 22 | } 23 | ] 24 | -------------------------------------------------------------------------------- /burr/tracking/server/s3/deployment/terraform/variable.tf: -------------------------------------------------------------------------------- 1 | # variables.tf 2 | 3 | variable "aws_access_key" { 4 | description = "The IAM public access key" 5 | } 6 | 7 | variable "aws_secret_key" { 8 | description = "IAM secret access key" 9 | } 10 | 11 | variable "aws_region" { 12 | description = "The AWS region things are created in" 13 | } 14 | 15 | variable "ec2_task_execution_role_name" { 16 | description = "ECS task execution role name" 17 | default = "myEcsTaskExecutionRole" 18 | } 19 | 20 | variable "ecs_auto_scale_role_name" { 21 | description = "ECS auto scale role name" 22 | default = "myEcsAutoScaleRole" 23 | } 24 | 25 | variable "az_count" { 26 | description = "Number of AZs to cover in a given region" 27 | default = "2" 28 | } 29 | 30 | # TODO -- get this to use a public image that is pre-built 31 | variable "app_image" { 32 | description = "Docker image to run in the ECS cluster" 33 | default = "929301070231.dkr.ecr.us-west-2.amazonaws.com/burr-prod-test:latest" 34 | } 35 | 36 | variable "app_port" { 37 | description = "Port exposed by the docker image to redirect traffic to" 38 | default = 8000 39 | 40 | } 41 | 42 | variable "app_count" { 43 | description = "Number of docker containers to run" 44 | default = 1 45 | } 46 | 47 | variable "health_check_path" { 48 | default = "/ready" 49 | } 50 | 51 | variable "fargate_cpu" { 52 | description = "Fargate instance CPU units to provision (1 vCPU = 1024 CPU units)" 53 | default = "1024" 54 | } 55 | 56 | variable "fargate_memory" { 57 | description = "Fargate instance memory to provision (in MiB)" 58 | default = "2048" 59 | } 60 | -------------------------------------------------------------------------------- /burr/tracking/server/s3/initialize_db.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pathlib import Path 3 | 4 | from tortoise import Tortoise, run_async 5 | 6 | from burr.tracking.server.s3 import settings 7 | 8 | DB_PATH = Path("~/.burr_server/db.sqlite3").expanduser() 9 | 10 | 11 | async def connect(): 12 | if not os.path.exists(DB_PATH): 13 | os.makedirs(os.path.dirname(DB_PATH), exist_ok=True) 14 | await Tortoise.init( 15 | config=settings.TORTOISE_ORM, 16 | ) 17 | 18 | 19 | # 20 | async def first_time_init(): 21 | await connect() 22 | # Generate the schema 23 | await Tortoise.generate_schemas() 24 | 25 | 26 | if __name__ == "__main__": 27 | # db_path = sys.argv[1] 28 | run_async(first_time_init()) 29 | -------------------------------------------------------------------------------- /burr/tracking/server/s3/pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.aerich] 2 | tortoise_orm = "burr.tracking.server.s3.settings.TORTOISE_ORM" 3 | location = "./burr/tracking/server/s3/migrations" 4 | src_folder = "./." 5 | -------------------------------------------------------------------------------- /burr/tracking/server/s3/settings.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | BURR_SERVER_ROOT = os.environ.get("BURR_SERVER_ROOT", os.path.expanduser("~/.burr_server")) 4 | BURR_DB_FILENAME = os.environ.get("BURR_DB_FILENAME", "db.sqlite3") 5 | 6 | DB_PATH = os.path.join( 7 | BURR_SERVER_ROOT, 8 | BURR_DB_FILENAME, 9 | ) 10 | TORTOISE_ORM = { 11 | "connections": {"default": f"sqlite:///{DB_PATH}"}, 12 | "apps": { 13 | "models": { 14 | "models": ["burr.tracking.server.s3.models", "aerich.models"], 15 | "default_connection": "default", 16 | }, 17 | }, 18 | } 19 | -------------------------------------------------------------------------------- /burr/tracking/server/s3/utils.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from typing import Awaitable, TypeVar 3 | 4 | AwaitableType = TypeVar("AwaitableType") 5 | 6 | 7 | async def gather_with_concurrency(n, *coros: Awaitable[AwaitableType]) -> tuple[AwaitableType, ...]: 8 | semaphore = asyncio.Semaphore(n) 9 | 10 | async def sem_coro(coro: Awaitable[AwaitableType]) -> AwaitableType: 11 | async with semaphore: 12 | return await coro 13 | 14 | return await asyncio.gather(*(sem_coro(c) for c in coros)) 15 | -------------------------------------------------------------------------------- /burr/tracking/utils.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | 4 | def safe_json_load(line: bytes): 5 | # Every once in a while we'll hit a non-utf-8 character 6 | # In this case we replace it and hope for the best 7 | return json.loads(line.decode("utf-8", errors="replace")) 8 | -------------------------------------------------------------------------------- /burr/version.py: -------------------------------------------------------------------------------- 1 | import importlib.metadata 2 | 3 | __version__ = importlib.metadata.version("burr") 4 | -------------------------------------------------------------------------------- /burr/visibility/__init__.py: -------------------------------------------------------------------------------- 1 | from burr.visibility.tracing import ActionSpan, ActionSpanTracer, TracerFactory, trace 2 | 3 | __all__ = ["TracerFactory", "ActionSpan", "ActionSpanTracer", "trace"] 4 | -------------------------------------------------------------------------------- /burr_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/burr_logo.png -------------------------------------------------------------------------------- /chatbot.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/chatbot.gif -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /docs/README-internal.md: -------------------------------------------------------------------------------- 1 | # Internal README for docs 2 | 3 | This documentation aims to follow the [diataxis](diataxis.fr) approach to documentation. This outlines: 4 | 5 | 1. Tutorials [getting_started](./getting_started) 6 | 2. How-to guides (examples in the repo) 7 | 3. References [reference](./reference) 8 | 4. Explanation [concepts](./concepts) 9 | 10 | TODO: 11 | 12 | - [ ] fill in all docs todos 13 | - [ ] Add examples for Hamilton integration, streamlit integration 14 | -------------------------------------------------------------------------------- /docs/_static/burr_sdlc.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/docs/_static/burr_sdlc.png -------------------------------------------------------------------------------- /docs/_static/chatbot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/docs/_static/chatbot.png -------------------------------------------------------------------------------- /docs/_static/custom.css: -------------------------------------------------------------------------------- 1 | /* Enable line wrapping for code blocks */ 2 | .highlight pre { 3 | white-space: pre-wrap; 4 | word-wrap: break-word; 5 | } 6 | -------------------------------------------------------------------------------- /docs/_static/demo_graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/docs/_static/demo_graph.png -------------------------------------------------------------------------------- /docs/_static/meme.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/docs/_static/meme.png -------------------------------------------------------------------------------- /docs/_static/parallelism.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/docs/_static/parallelism.png -------------------------------------------------------------------------------- /docs/_static/recursive_steps.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/docs/_static/recursive_steps.png -------------------------------------------------------------------------------- /docs/_static/testimonials.css: -------------------------------------------------------------------------------- 1 | .testimonial-container { 2 | display: flex; 3 | flex-wrap: wrap; 4 | gap: 1rem; 5 | justify-content: center; 6 | margin: 2rem 0; 7 | } 8 | 9 | .testimonial-card { 10 | background: #fff; 11 | border: 1px solid #ddd; 12 | border-radius: 8px; 13 | box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1); 14 | max-width: 300px; 15 | width: 100%; 16 | padding: 1rem; 17 | text-align: center; 18 | transition: transform 0.2s ease-in-out; 19 | } 20 | 21 | .testimonial-card:hover { 22 | transform: scale(1.05); 23 | } 24 | 25 | .testimonial-photo img { 26 | border-radius: 50%; 27 | height: 80px; 28 | width: 80px; 29 | object-fit: cover; 30 | margin-bottom: 1rem; 31 | } 32 | 33 | .testimonial-content p { 34 | font-style: italic; 35 | color: #555; 36 | } 37 | 38 | .testimonial-content h4 { 39 | margin: 0.5rem 0 0; 40 | font-size: 1.1rem; 41 | font-weight: bold; 42 | color: #555; 43 | } 44 | 45 | .testimonial-content span { 46 | color: #999; 47 | font-size: 0.9rem; 48 | } 49 | -------------------------------------------------------------------------------- /docs/concepts/index.rst: -------------------------------------------------------------------------------- 1 | ==================== 2 | Concepts 3 | ==================== 4 | 5 | Overview of the concepts -- read these to get a mental model for how Burr works. 6 | 7 | .. _concepts: 8 | 9 | .. toctree:: 10 | :maxdepth: 2 11 | 12 | overview 13 | sdlc 14 | actions 15 | state 16 | state-machine 17 | transitions 18 | tracking 19 | state-persistence 20 | serde 21 | streaming-actions 22 | state-typing 23 | hooks 24 | additional-visibility 25 | parallelism 26 | recursion 27 | sync-vs-async 28 | planned-capabilities 29 | -------------------------------------------------------------------------------- /docs/concepts/overview.rst: -------------------------------------------------------------------------------- 1 | =========== 2 | Cheat Sheet 3 | =========== 4 | 5 | This is a quick overview of Burr's design -- the concepts are explored in more detail in the following sections. Read over this for a very high-level overview, or use this as a cheat sheet later. 6 | 7 | - With Burr you write an :ref:`Application ` -- this manages control flow (allowing for automated or user-blocking workflows), :ref:`persistence ` to DBs, logs :ref:`telemetry `, and delegates to a variety of plugins/integrations. 8 | - Applications are composed of :ref:`actions ` (functions that write to/read from state), and :ref:`transitions ` (functions that determine the next action to execute based on state). 9 | - :ref:`State ` is immutable and uses the special Burr :py:class:`State ` API. You write to it by applying a state operation (e.g. ``state = state.update(key=value)``, which returns a new state instance with the updated value. 10 | - All other production/debugging concerns are implemented as :ref:`hooks `, which are simple callbacks that are called at various points in the application lifecycle (store/retrieve state, log information, etc...). 11 | 12 | Note that we did not mention LLMs above at all! That's good -- you want your LLM frameworks to be ever-so-slightly decoupled from them for the best experience (all of AI is just software, plain and simple, after all...). 13 | 14 | And that's the basics! Let's dive into the details. 15 | -------------------------------------------------------------------------------- /docs/concepts/sdlc.rst: -------------------------------------------------------------------------------- 1 | ================================ 2 | SDLC with LLMs 3 | ================================ 4 | If you're building an LLM-based application, you'll want to follow a slightly different software development lifecycle (SDLC) 5 | than you would for a traditional software project. Here's a rough outline of what that might look like: 6 | 7 | .. image:: ../_static/burr_sdlc.png 8 | :alt: SDLC with LLMs 9 | :align: center 10 | 11 | The two cycles that exist are: 12 | 13 | 1. App Dev Loop. 14 | 2. Test Driven Development Loop. 15 | 16 | and you will use one to feed into the other, etc. 17 | 18 | Walking through the diagram the SDLC looks like this: 19 | 20 | 1. Write code with Burr. 21 | 2. Use Burr's integrated observability, and trace all parts of your application. 22 | 3. With the data collected, you can: (1) annotate what was captured and export it, or (2) create a pytest fixture with it. 23 | 4. Create a data set from the annotated data or by running tests. 24 | 5. Evaluate the data set. 25 | 6. Analyze the results. 26 | 7. Either adjust code or prompts, or ship the code. 27 | 8. Iterate using one of the loops... 28 | -------------------------------------------------------------------------------- /docs/contributing/architecture.rst: -------------------------------------------------------------------------------- 1 | ============ 2 | Architecture 3 | ============ 4 | 5 | Some notes on the design/implementation of Burr: 6 | 7 | -------------- 8 | Python Package 9 | -------------- 10 | 11 | Dependencies 12 | ------------ 13 | A note on dependencies: 14 | 15 | - The core Burr library will have zero dependencies. Currently its only dependency in hamilton, but that will be removed in the future. 16 | - Any other extensions (the server, the CLI, etc...) are allowed dependencies -- specify these as install targets in ``pyproject.toml`` 17 | - The dependencies/plugins will live alongside the core library, and contain guards to ensure that the right libraries are installed. You can do this with ``burr.integrations.base.require_plugins`` 18 | 19 | Coding style 20 | ------------ 21 | We use type hints for function parameters (and in rare cases inline) to aid development but it is not enforced -- use your best judgement 22 | 23 | Versioning 24 | ---------- 25 | 26 | We adhere to `sem-var `_ for versioning. We ensure that: 27 | 28 | - Every public facing function/class/variable should: 29 | - have a docstring 30 | - have type-hints 31 | - be exposed through the documentation 32 | 33 | If it is not exposed through the documentation it is assumed to be private, and thus will 34 | not be subject to sem-var rules. 35 | 36 | We currently are not versioning the server or CLI, but are versioning the core library. Note 37 | that it starts at ``0.x``, which means that we are allowed to make a backwards-incompatible change. 38 | We will make every effort not to do so -- and will provide a migration guide/script if we do. 39 | -------------------------------------------------------------------------------- /docs/contributing/index.rst: -------------------------------------------------------------------------------- 1 | ========================= 2 | Contributing 3 | ========================= 4 | 5 | .. _examples: 6 | 7 | Instructions to develop/get started with `Burr`! If you don't know where 8 | to start, you can always reach out to us: 9 | - Start an issue or discussion on our `GitHub `_. 10 | - Reach out to us by `email `_ 11 | 12 | .. toctree:: 13 | :maxdepth: 2 14 | 15 | setup 16 | iterating 17 | architecture 18 | contributing 19 | -------------------------------------------------------------------------------- /docs/contributing/setup.rst: -------------------------------------------------------------------------------- 1 | ===== 2 | Setup 3 | ===== 4 | 5 | These instructions will be assuming use of `pip `_ and `virtualenv `_. 6 | Replace with your package manager of choice if you prefer. 7 | 8 | ---------- 9 | Clone/fork 10 | ---------- 11 | 12 | To get started, create a fork of Burr on the github UI, and clone it to your local machine. 13 | 14 | .. code-block:: bash 15 | 16 | git clone https://github.com//burr.git 17 | 18 | 19 | ---------- 20 | Installing 21 | ---------- 22 | 23 | Next you'll want to ``cd`` into the directory and install 24 | ``burr`` in developer mode: 25 | 26 | .. code-block:: bash 27 | 28 | cd burr 29 | pip install -e ".[developer]" 30 | 31 | This will install all potential dependencies. Burr will work with ``python >=3.9``. 32 | 33 | ------------------ 34 | Linting/Pre-Commit 35 | ------------------ 36 | 37 | Burr has pre-commit hooks enabled. This comes with the ``developer`` extras. 38 | You can always run the pre-commit hooks manually (on all files). Do this 39 | if it somehow wasn't configured and its in a bad state. 40 | 41 | .. code-block:: bash 42 | 43 | pre-commit run --all 44 | 45 | For the UI, we leverage husky and lint-staged to run the pre-commit hooks on the client side. 46 | This actually runs pre-commits for the whole repository, so you can run through husky if you want. 47 | 48 | You can also run the pre-commit hooks for the UI manually: 49 | 50 | .. code-block:: bash 51 | 52 | npm run lint:fix 53 | npm run format:fix 54 | 55 | from within the ``telemetry/ui`` directory. 56 | -------------------------------------------------------------------------------- /docs/examples/agents/_agent_patterns/agent_supervisor.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/docs/examples/agents/_agent_patterns/agent_supervisor.png -------------------------------------------------------------------------------- /docs/examples/agents/_agent_patterns/hierarchical_agent_teams.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/docs/examples/agents/_agent_patterns/hierarchical_agent_teams.png -------------------------------------------------------------------------------- /docs/examples/agents/_agent_patterns/multi_agent_collaboration.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/docs/examples/agents/_agent_patterns/multi_agent_collaboration.png -------------------------------------------------------------------------------- /docs/examples/agents/_agent_patterns/multi_modal_agent.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/docs/examples/agents/_agent_patterns/multi_modal_agent.png -------------------------------------------------------------------------------- /docs/examples/agents/_divide-and-conquer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/docs/examples/agents/_divide-and-conquer.png -------------------------------------------------------------------------------- /docs/examples/agents/agent-patterns.md: -------------------------------------------------------------------------------- 1 | # Agent patterns 2 | 3 | We have the bones of a few applications that can be used to create agents. These are not fully fleshed out, but they 4 | can be used as a starting point for creating your own agents. 5 | 6 | We have the following templates: 7 | 8 | ## Multimodal agent 9 | 10 | [Code template](https://github.com/DAGWorks-Inc/burr/tree/main/examples/templates/multi_modal_agent.py) 11 | 12 | ![](./_agent_patterns/multi_modal_agent.png) 13 | 14 | ## Multi-agent collaboration 15 | 16 | [Code template](https://github.com/DAGWorks-Inc/burr/tree/main/examples/templates/multi_agent_collaboration.py) 17 | 18 | ![](./_agent_patterns/multi_agent_collaboration.png) 19 | 20 | ## Supervisor agent 21 | 22 | [Code template](https://github.com/DAGWorks-Inc/burr/tree/main/examples/templates/agent_supervisor.py) 23 | 24 | ![](./_agent_patterns/agent_supervisor.png) 25 | 26 | 27 | ## Hierarchical teams 28 | 29 | [Code template](https://github.com/DAGWorks-Inc/burr/tree/main/examples/templates/hierarchical_agent_teams.py) 30 | 31 | ![](./_agent_patterns/hierarchical_agent_teams.png) 32 | -------------------------------------------------------------------------------- /docs/examples/agents/index.rst: -------------------------------------------------------------------------------- 1 | ========== 2 | 🤖 Agents 3 | ========== 4 | 5 | Burr allows you to create agents that can interact with each other via State. 6 | 7 | .. toctree:: 8 | :maxdepth: 2 9 | 10 | divide-and-conquer 11 | agent-patterns 12 | -------------------------------------------------------------------------------- /docs/examples/chatbots/index.rst: -------------------------------------------------------------------------------- 1 | ============ 2 | 📞 Chatbots 3 | ============ 4 | 5 | .. toctree:: 6 | :maxdepth: 2 7 | 8 | basic-chatbot 9 | gpt-like-chatbot 10 | rag-chatbot-hamilton 11 | -------------------------------------------------------------------------------- /docs/examples/data-science/_ml_training.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/docs/examples/data-science/_ml_training.png -------------------------------------------------------------------------------- /docs/examples/data-science/_simulation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/docs/examples/data-science/_simulation.png -------------------------------------------------------------------------------- /docs/examples/data-science/index.rst: -------------------------------------------------------------------------------- 1 | =============== 2 | 🧪 Data science 3 | =============== 4 | 5 | .. toctree:: 6 | :maxdepth: 2 7 | 8 | ml_training 9 | simulation 10 | -------------------------------------------------------------------------------- /docs/examples/deployment/index.rst: -------------------------------------------------------------------------------- 1 | ============= 2 | ✈ Deployment 3 | ============= 4 | 5 | Burr is specifically meant to make getting your product in production easier and faster. 6 | This section covers examples of getting Burr to production, as well as a brief overview of approaches/requirements. 7 | 8 | To deploy a Burr application in production, you need to do three things: 9 | 10 | 1. Place your Burr application in some place you can trigger it. E.g. a web-service, a script, a library, etc. 11 | 2. Provision infrastructure to run (1) 12 | 3. Monitor your application in production (highly recommended, but not required) 13 | 14 | Due to the large number of methods people have for deploying applications, we will not cover all of them here. That said, 15 | we really appreciate contributions! Please `open an issue `_ if there's an example you'd like, and :ref:`contribute back ` if you 16 | have an example that would add to this guide. We have created a variety of issues with placeholders and link to them in the docs. 17 | 18 | .. toctree:: 19 | :maxdepth: 2 20 | 21 | web-server 22 | infrastructure 23 | monitoring 24 | -------------------------------------------------------------------------------- /docs/examples/deployment/infrastructure.rst: -------------------------------------------------------------------------------- 1 | ------------------------------------- 2 | Provisioning Infrastructure/Deploying 3 | ------------------------------------- 4 | 5 | Burr is not opinionated about the method of deployment/cloud one uses. Any method that can run python code, or web-service will work 6 | (AWS, vercel, etc...). Note we aim to have more examples here -- see `this issue `_ to track! 7 | 8 | - `Deploying Burr in an AWS lambda function `_ 9 | - `Deploying Burr using BentoML `_ 10 | 11 | 12 | Using BentoML 13 | ------------- 14 | `BentoML `_ is a specialized tool to package, deploy, and manage AI services. 15 | For example, it allows you to create a REST API for your Burr application with minimal effort. 16 | See the `Burr + BentoML example `_ for more information. 17 | -------------------------------------------------------------------------------- /docs/examples/guardrails/_creating_tests.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/docs/examples/guardrails/_creating_tests.png -------------------------------------------------------------------------------- /docs/examples/guardrails/index.rst: -------------------------------------------------------------------------------- 1 | ======================= 2 | 🚧 Guardrails / Tests 3 | ======================= 4 | 5 | .. toctree:: 6 | :maxdepth: 2 7 | 8 | creating_tests 9 | -------------------------------------------------------------------------------- /docs/examples/index.rst: -------------------------------------------------------------------------------- 1 | ======== 2 | Cookbook 3 | ======== 4 | 5 | .. _examples: 6 | 7 | These are still under progress -- see the github `examples directory `_ 8 | for the latest. 9 | 10 | Examples of more complex/powerful use-cases of Burr. Download/copy these to adapt to your use-cases. 11 | 12 | .. toctree:: 13 | :maxdepth: 2 14 | 15 | simple/index 16 | chatbots/index 17 | agents/index 18 | deployment/index 19 | guardrails/index 20 | data-science/index 21 | -------------------------------------------------------------------------------- /docs/examples/simple/index.rst: -------------------------------------------------------------------------------- 1 | ================ 2 | 🧸 Toy examples 3 | ================ 4 | 5 | The following toy examples illustrate the basics of Burr. If you're interested in more real-life applications, skip ahead to chatbots. 6 | 7 | .. toctree:: 8 | :maxdepth: 2 9 | 10 | counter 11 | cowsay 12 | choose-your-own-adventure 13 | -------------------------------------------------------------------------------- /docs/getting_started/index.rst: -------------------------------------------------------------------------------- 1 | .. _gettingstarted: 2 | 3 | ==================== 4 | Get started 5 | ==================== 6 | 7 | The following section of the docs will walk you through Burr and how to integrate into your project: 8 | 9 | .. toctree:: 10 | :maxdepth: 1 11 | 12 | why-burr 13 | install 14 | simple-example 15 | up-next 16 | -------------------------------------------------------------------------------- /docs/getting_started/why-burr.rst: -------------------------------------------------------------------------------- 1 | ========= 2 | Why Burr? 3 | ========= 4 | 5 | Why do you need a state machine for your applications? Won't the normal programming constructs suffice? 6 | 7 | **Yes, until a point.** Let's take a look at what you need to build a production-level LLM application: 8 | 9 | 1. **Tracing/telemetry** -- LLMs can be chaotic, and you need visibility into what decisions it made and how long it took to make them. 10 | 2. **State persistence** -- thinking about how to save/load your application is a whole other level of infrastructure you need to worry about. 11 | 3. **Visualization/debugging** -- when developing you'll want to be able to view what it is doing/did + load up the data at any point 12 | 4. **Manage interaction between users/LLM** -- pause for input in certain conditions 13 | 5. **Data gathering for evaluation + test generation** -- storing data run in production to use for later analysis/fine-tuning 14 | 15 | You can always patch together various frameworks or build it all yourself, but at that point you're going to be spending a lot of time on tasks that 16 | are not related to the core value proposition of your software. 17 | 18 | **Burr was built to make these all easier.** 19 | 20 | By modeling your application as a state machine of simple python constructs you can have the best of both worlds. 21 | Bring in whatever infrastructure/tooling you want and get all of the above. Burr is meant to start off as an extremely lightweight tool to 22 | make building LLM (+ a wide swath of other) applications easier. The value compounds as you leverage more of the ecosystems, plugins, and additional 23 | features it provides. 24 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. include:: main.rst 2 | 3 | .. toctree:: 4 | :maxdepth: 2 5 | :hidden: 6 | :caption: Burr 7 | 8 | getting_started/index 9 | examples/index 10 | concepts/index 11 | reference/index 12 | contributing/index 13 | 14 | .. toctree:: 15 | :hidden: 16 | :maxdepth: 2 17 | :caption: Burr Cloud 18 | 19 | Waitist 20 | 21 | .. toctree:: 22 | :hidden: 23 | :caption: RESOURCES 24 | 25 | Blog 26 | Discord community server 27 | GitHub 28 | Twitter 29 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=. 11 | set BUILDDIR=_build 12 | 13 | %SPHINXBUILD% >NUL 2>NUL 14 | if errorlevel 9009 ( 15 | echo. 16 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 17 | echo.installed, then set the SPHINXBUILD environment variable to point 18 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 19 | echo.may add the Sphinx directory to PATH. 20 | echo. 21 | echo.If you don't have Sphinx installed, grab it from 22 | echo.https://www.sphinx-doc.org/ 23 | exit /b 1 24 | ) 25 | 26 | if "%1" == "" goto help 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /docs/reference/actions.rst: -------------------------------------------------------------------------------- 1 | ================= 2 | Actions 3 | ================= 4 | 5 | .. _actionref: 6 | 7 | .. autoclass:: burr.core.action.Action 8 | :members: 9 | :inherited-members: 10 | :show-inheritance: 11 | 12 | .. automethod:: __init__ 13 | 14 | .. autoclass:: burr.core.action.Result 15 | :members: 16 | 17 | .. automethod:: __init__ 18 | 19 | 20 | .. autoclass:: burr.core.action.Input 21 | :members: 22 | 23 | .. automethod:: __init__ 24 | 25 | .. autoclass:: burr.core.action.action 26 | :members: 27 | 28 | .. automethod:: __init__ 29 | 30 | 31 | .. autofunction:: burr.core.action.bind 32 | 33 | 34 | .. autoclass:: burr.core.action.Function 35 | :members: 36 | 37 | .. autoclass:: burr.core.action.Reducer 38 | :members: 39 | 40 | .. autoclass:: burr.core.action.StreamingResultContainer 41 | :members: 42 | 43 | .. autoclass:: burr.core.action.AsyncStreamingResultContainer 44 | :members: 45 | 46 | .. autoclass:: burr.core.action.streaming_action 47 | :members: 48 | 49 | .. autoclass:: burr.core.action.StreamingAction 50 | :members: 51 | -------------------------------------------------------------------------------- /docs/reference/application.rst: -------------------------------------------------------------------------------- 1 | ============ 2 | Applications 3 | ============ 4 | 5 | Use this to build and manage a state Machine. You should only ever instantiate the ``ApplicationBuilder`` class, 6 | and not the ``Application`` class directly. 7 | 8 | 9 | .. autoclass:: burr.core.application.ApplicationBuilder 10 | :members: 11 | 12 | .. _applicationref: 13 | 14 | .. autoclass:: burr.core.application.Application 15 | :members: 16 | 17 | .. automethod:: __init__ 18 | 19 | .. autoclass:: burr.core.application.ApplicationGraph 20 | :members: 21 | 22 | .. autoclass:: burr.core.application.ApplicationContext 23 | :members: 24 | 25 | ========== 26 | Graph APIs 27 | ========== 28 | 29 | You can, optionally, use the graph API along with the :py:meth:`burr.core.application.ApplicationBuilder.with_graph` 30 | method. While this is a little more verbose, it helps decouple application logic from graph logic, and is useful in a host 31 | of situations. 32 | 33 | The ``GraphBuilder`` class is used to build a graph, and the ``Graph`` class can be passed to the application builder. 34 | 35 | .. autoclass:: burr.core.graph.GraphBuilder 36 | :members: 37 | 38 | .. autoclass:: burr.core.graph.Graph 39 | :members: 40 | -------------------------------------------------------------------------------- /docs/reference/conditions.rst: -------------------------------------------------------------------------------- 1 | ======================= 2 | Conditions/Transitions 3 | ======================= 4 | 5 | .. _transitionref: 6 | 7 | Conditions represent choices to move between actions -- these are read by the application builder when executing the graph. 8 | Note that these will always be specified in order -- the first condition that evaluates to ``True`` will be the selected action. 9 | 10 | .. autoclass:: burr.core.action.Condition 11 | :special-members: __and__, __or__, __invert__ 12 | :members: 13 | 14 | .. automethod:: __init__ 15 | -------------------------------------------------------------------------------- /docs/reference/index.rst: -------------------------------------------------------------------------------- 1 | 2 | .. _reference: 3 | 4 | ======================== 5 | API reference 6 | ======================== 7 | 8 | Reference documentation. Anything in here is part of the public (semantically versioned) API, unless marked otherwise. 9 | That means that if you find something not in here, it's not part of the public API and may change without notice. If you 10 | need functionality that is not publicly exposed, please open an issue and we can discuss adding it! 11 | 12 | .. toctree:: 13 | :maxdepth: 2 14 | 15 | application 16 | actions 17 | state 18 | serde 19 | persister 20 | conditions 21 | tracking 22 | visibility 23 | lifecycle 24 | parallelism 25 | typing 26 | integrations/index 27 | telemetry 28 | -------------------------------------------------------------------------------- /docs/reference/integrations/hamilton.rst: -------------------------------------------------------------------------------- 1 | ======================= 2 | Hamilton 3 | ======================= 4 | 5 | Full Hamilton integration. Touch-points are custom Hamilton actions. This is installed by default. 6 | 7 | .. autofunction:: burr.integrations.hamilton.Hamilton 8 | 9 | .. autofunction:: burr.integrations.hamilton.from_state 10 | 11 | .. autofunction:: burr.integrations.hamilton.from_value 12 | 13 | .. autofunction:: burr.integrations.hamilton.update_state 14 | 15 | .. autofunction:: burr.integrations.hamilton.append_state 16 | -------------------------------------------------------------------------------- /docs/reference/integrations/haystack.rst: -------------------------------------------------------------------------------- 1 | ======== 2 | Haystack 3 | ======== 4 | 5 | The Haystack integration allows you to use ``Component`` as Burr ``Action`` using the ``HaystackAction`` construct. You can visit the examples in ``burr/examples/haystack-integration`` for a notebook tutorial. 6 | 7 | .. autoclass:: burr.integrations.haystack.HaystackAction 8 | 9 | .. autofunction:: burr.integrations.haystack.haystack_pipeline_to_burr_graph 10 | -------------------------------------------------------------------------------- /docs/reference/integrations/index.rst: -------------------------------------------------------------------------------- 1 | ============ 2 | Integrations 3 | ============ 4 | 5 | Integrations -- we will be adding more 6 | 7 | .. toctree:: 8 | :maxdepth: 2 9 | 10 | hamilton 11 | streamlit 12 | opentelemetry 13 | traceloop 14 | langchain 15 | pydantic 16 | haystack 17 | ray 18 | -------------------------------------------------------------------------------- /docs/reference/integrations/langchain.rst: -------------------------------------------------------------------------------- 1 | --------- 2 | Langchain 3 | --------- 4 | 5 | Burr works out of the box with langchain, as Burr delegates to any python code. 6 | 7 | There are multiple examples of Burr leveraging langchain, including: 8 | 9 | - `Multi agent collaboration `_ 10 | - `LCEL + Hamilton together `_ 11 | 12 | Burr also provides custom ser/deserialization for langchain objects. See the following resources: 13 | 1. `Example `_ 14 | 2. :ref:`Custom serialization docs ` 15 | 3. `Langchain serialization plugin `_ 16 | 17 | We are working on adding more builtin support for LCEL (LCELActions), and considering adding burr callbacks for tracing langgraph in the Burr 18 | UI. If you have any suggestions, please let us know. 19 | -------------------------------------------------------------------------------- /docs/reference/integrations/opentelemetry.rst: -------------------------------------------------------------------------------- 1 | .. _opentelintegrationref: 2 | 3 | -------------- 4 | OpenTelemetry 5 | -------------- 6 | 7 | Burr has two integrations with OpenTelemetry: 8 | 9 | 10 | 1. Burr can log traces to OpenTelemetry 11 | 2. Burr can capture any traces logged within an action and log them to OpenTelemetry 12 | 13 | See the following resources for more information: 14 | 15 | - :ref:`Tracing/OpenTelemetry ` 16 | - `Example in the repository `_ 17 | - `Blog post `_ 18 | - `OpenTelemetry `_ 19 | 20 | Reference for the various useful methods: 21 | 22 | .. autoclass:: burr.integrations.opentelemetry.OpenTelemetryBridge 23 | :members: 24 | 25 | .. autofunction:: burr.integrations.opentelemetry.init_instruments 26 | -------------------------------------------------------------------------------- /docs/reference/integrations/pydantic.rst: -------------------------------------------------------------------------------- 1 | ======== 2 | Pydantic 3 | ======== 4 | 5 | `Pydantic `_ integrations come in two forms: 6 | 7 | - Serialization/deserialization of state (see :ref:`serderef` for more information) 8 | - Type checking of state (see :ref:`statetypingref` for more information) 9 | -------------------------------------------------------------------------------- /docs/reference/integrations/ray.rst: -------------------------------------------------------------------------------- 1 | === 2 | Ray 3 | === 4 | 5 | The Burr Ray integration allows you to run :ref:`parallel sub-applications ` on `Ray `_. 6 | 7 | .. autoclass:: burr.integrations.ray.RayExecutor 8 | :members: 9 | -------------------------------------------------------------------------------- /docs/reference/integrations/streamlit.rst: -------------------------------------------------------------------------------- 1 | ======================= 2 | Streamlit 3 | ======================= 4 | 5 | Full Streamlit integration. Tough-points are utility functions. 6 | It is likely this will adapt/change over time, so it is only recommended to use this for debugging/developing. 7 | 8 | Install with pypi: 9 | 10 | .. code-block:: bash 11 | 12 | pip install burr[streamlit] 13 | 14 | .. autoclass:: burr.integrations.streamlit.AppState 15 | :members: 16 | 17 | .. autofunction:: burr.integrations.streamlit.load_state_from_log_file 18 | 19 | .. autofunction:: burr.integrations.streamlit.get_state 20 | 21 | .. autofunction:: burr.integrations.streamlit.update_state 22 | 23 | .. autofunction:: burr.integrations.streamlit.render_state_machine 24 | 25 | .. autofunction:: burr.integrations.streamlit.render_action 26 | 27 | .. autofunction:: burr.integrations.streamlit.render_state_results 28 | 29 | .. autofunction:: burr.integrations.streamlit.set_slider_to_current 30 | 31 | .. autofunction:: burr.integrations.streamlit.render_explorer 32 | -------------------------------------------------------------------------------- /docs/reference/integrations/traceloop.rst: -------------------------------------------------------------------------------- 1 | --------- 2 | Traceloop 3 | --------- 4 | 5 | `Traceloop `_ is an `OpenTelemetry `_ vendor 6 | that has a special focus on AI observability. 7 | 8 | Integration with Burr is done through the :ref:`opentelemetry integration `. 9 | 10 | See the following resources for more information about how to leverage opentelemetry and traceloop's 11 | `openllmetry `_ library to instrument your 12 | application: 13 | 14 | - `Example in the repository `_ 15 | - `Blog post `_ 16 | -------------------------------------------------------------------------------- /docs/reference/parallelism.rst: -------------------------------------------------------------------------------- 1 | .. _parallelismref: 2 | 3 | =========== 4 | Parallelism 5 | =========== 6 | 7 | Tools to make sub-actions/sub-graphs easier to work with. Read the docs on :ref:`parallelism` for more information. 8 | 9 | .. autoclass:: burr.core.parallelism.RunnableGraph 10 | :members: 11 | 12 | .. autoclass:: burr.core.parallelism.SubGraphTask 13 | :members: 14 | 15 | .. autoclass:: burr.core.parallelism.TaskBasedParallelAction 16 | :members: 17 | 18 | .. autoclass:: burr.core.parallelism.MapActionsAndStates 19 | :members: 20 | 21 | .. autoclass:: burr.core.parallelism.MapActions 22 | :members: 23 | 24 | .. autoclass:: burr.core.parallelism.MapStates 25 | :members: 26 | 27 | .. automethod:: burr.core.parallelism.map_reduce_action 28 | -------------------------------------------------------------------------------- /docs/reference/state.rst: -------------------------------------------------------------------------------- 1 | ================= 2 | State 3 | ================= 4 | 5 | Use the state API to manipulate the state of the application. 6 | 7 | .. autoclass:: burr.core.state.State 8 | :members: 9 | 10 | .. automethod:: __init__ 11 | 12 | 13 | Custom field level serialization and deserialization 14 | ---------------------------------------------------- 15 | Use the following to register custom field level serialization and deserialization functions. 16 | Note: this registration is global for any state field with the same name. 17 | 18 | .. autofunction:: burr.core.state.register_field_serde 19 | -------------------------------------------------------------------------------- /docs/reference/telemetry.rst: -------------------------------------------------------------------------------- 1 | ============================== 2 | Usage analytics + data privacy 3 | ============================== 4 | 5 | By default, when using Burr, it collects anonymous usage data to help improve Burr and know where to apply development efforts. 6 | 7 | We capture events on the following occasions: 8 | 9 | 1. When an application is built 10 | 2. When one of the ``execution`` functions is run in ``Application`` 11 | 3. When a CLI command is run 12 | 13 | The captured data is limited to: 14 | 15 | - Operating System and Python version 16 | - A persistent UUID to indentify the session, stored in ~/.burr.conf. 17 | - The name of the function/CLI command that was run 18 | 19 | If you're worried, see ``telemetry.py`` for details. 20 | 21 | If you do not wish to participate, one can opt-out with one of the following methods: 22 | 23 | 1. Set it to false programmatically in your code before creating a Burr application builder: 24 | 25 | .. code-block:: python 26 | 27 | from burr import telemetry 28 | telemetry.disable_telemetry() 29 | 30 | 2. Set the key telemetry_enabled to false in ``~/.burr.conf`` under the DEFAULT section: 31 | 32 | .. code-block:: ini 33 | 34 | [DEFAULT] 35 | telemetry_enabled = False 36 | 37 | 3. Set BURR_TELEMETRY_ENABLED=false as an environment variable. Either setting it for your shell session: 38 | 39 | .. code-block:: bash 40 | 41 | export BURR_TELEMETRY_ENABLED=false 42 | 43 | or passing it as part of the run command: 44 | 45 | .. code-block:: bash 46 | 47 | BURR_TELEMETRY_ENABLED=false python NAME_OF_MY_DRIVER.py 48 | -------------------------------------------------------------------------------- /docs/reference/tracking.rst: -------------------------------------------------------------------------------- 1 | ======== 2 | Tracking 3 | ======== 4 | 5 | Reference on the Tracking/Telemetry API. 6 | Rather, you should use this through/in conjunction with :py:meth:`burr.core.application.ApplicationBuilder.with_tracker`. 7 | 8 | 9 | .. autoclass:: burr.tracking.LocalTrackingClient 10 | :members: 11 | 12 | .. automethod:: __init__ 13 | -------------------------------------------------------------------------------- /docs/reference/typing.rst: -------------------------------------------------------------------------------- 1 | .. _statetypingref: 2 | 3 | ====== 4 | Typing 5 | ====== 6 | 7 | Reference on the State typing API. Note that docs here are for both core burr and the pydantic plugin 8 | (which does the interesting typing). For an overview, read :ref:`statetyping`. 9 | 10 | 11 | .. autoclass:: burr.core.typing.TypingSystem 12 | :members: 13 | 14 | .. automethod:: __init__ 15 | 16 | .. autoclass:: burr.integrations.pydantic.PydanticTypingSystem 17 | :members: 18 | 19 | .. automethod:: __init__ 20 | -------------------------------------------------------------------------------- /docs/reference/visibility.rst: -------------------------------------------------------------------------------- 1 | .. _visibility: 2 | 3 | 4 | ====================== 5 | Tracing Inside Actions 6 | ====================== 7 | 8 | Tooling for gaining visibility inside actions. You will never instantiate these directly, 9 | they are all injected by the framework. This is purely for reference. 10 | 11 | .. autoclass:: burr.visibility.tracing.TracerFactory 12 | :members: 13 | 14 | .. autoclass:: burr.visibility.tracing.ActionSpanTracer 15 | :members: 16 | 17 | 18 | .. autoclass:: burr.visibility.tracing.ActionSpan 19 | :members: 20 | 21 | .. autoclass:: burr.visibility.tracing.trace 22 | :special-members: __init__ 23 | -------------------------------------------------------------------------------- /docs/robots.txt: -------------------------------------------------------------------------------- 1 | User-agent: * 2 | 3 | Sitemap: https://burr.dagworks.io/en/latest/sitemap.xml 4 | -------------------------------------------------------------------------------- /examples/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/__init__.py -------------------------------------------------------------------------------- /examples/adaptive-crag/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/adaptive-crag/__init__.py -------------------------------------------------------------------------------- /examples/adaptive-crag/burr_docs/cheat_sheet.txt: -------------------------------------------------------------------------------- 1 | Cheat Sheet 2 | This is a quick overview of Burr’s design – the concepts are explored in more detail in the following sections. Read over this for a very high-level overview, or use this as a cheat sheet later. 3 | 4 | With Burr you write an Application – this manages control flow (allowing for automated or user-blocking workflows), persistence to DBs, logs telemetry, and delegates to a variety of plugins/integrations. 5 | 6 | Applications are composed of actions (functions that write to/read from state), and transitions (functions that determine the next action to execute based on state). 7 | 8 | State is immutable and uses the special Burr State API. You write to it by applying a state operation (e.g. state = state.update(key=value), which returns a new state instance with the updated value. 9 | 10 | All other production/debugging concerns are implemented as hooks, which are simple callbacks that are called at various points in the application lifecycle (store/retrieve state, log information, etc…). 11 | 12 | Note that we did not mention LLMs above at all! That’s good – you want your LLM frameworks to be ever-so-slightly decoupled from them for the best experience (all of AI is just software, plain and simple, after all…). 13 | 14 | And that’s the basics! Let’s dive into the details. 15 | -------------------------------------------------------------------------------- /examples/adaptive-crag/requirements.txt: -------------------------------------------------------------------------------- 1 | burr[start] 2 | exa_py 3 | google-generativeai 4 | instructor 5 | lancedb 6 | pydantic 7 | python-dotenv 8 | -------------------------------------------------------------------------------- /examples/adaptive-crag/statemachine.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/adaptive-crag/statemachine.png -------------------------------------------------------------------------------- /examples/conversational-rag/README.md: -------------------------------------------------------------------------------- 1 | # Conversational RAG examples 2 | Here we curate different examples of how to build a Conversational RAG agent using different approaches/backends. 3 | 4 | ## [Simple Example](simple_example/) 5 | This example demonstrates how to build a conversational RAG agent with "memory". 6 | 7 | The "memory" here is stored in state, which Burr then can help you track, 8 | manage, and introspect. 9 | 10 | 11 | ## [Graph DB Example](graph_db_example/) 12 | This demo illustrates how to build a RAG Q&A AI agent over the [UFC stats dataset](https://www.kaggle.com/datasets/rajeevw/ufcdata). 13 | This one uses a Knowledge Graph that is stored in [FalkorDB](https://www.falkordb.com/) to query for 14 | information about UFC fighters and fights. 15 | -------------------------------------------------------------------------------- /examples/conversational-rag/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/conversational-rag/__init__.py -------------------------------------------------------------------------------- /examples/conversational-rag/graph_db_example/UFC_Graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/conversational-rag/graph_db_example/UFC_Graph.png -------------------------------------------------------------------------------- /examples/conversational-rag/graph_db_example/ingest_fighters.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/conversational-rag/graph_db_example/ingest_fighters.png -------------------------------------------------------------------------------- /examples/conversational-rag/graph_db_example/ingest_fights.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/conversational-rag/graph_db_example/ingest_fights.png -------------------------------------------------------------------------------- /examples/conversational-rag/graph_db_example/requirements.txt: -------------------------------------------------------------------------------- 1 | burr[start,graphviz] 2 | falkordb==1.0.4 3 | openai 4 | sf-hamilton[sdk,visualization] 5 | -------------------------------------------------------------------------------- /examples/conversational-rag/graph_db_example/statemachine.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/conversational-rag/graph_db_example/statemachine.png -------------------------------------------------------------------------------- /examples/conversational-rag/simple_example/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/conversational-rag/simple_example/__init__.py -------------------------------------------------------------------------------- /examples/conversational-rag/simple_example/requirements.txt: -------------------------------------------------------------------------------- 1 | burr[start] 2 | faiss-cpu 3 | langchain 4 | langchain-community 5 | langchain-openai 6 | sf-hamilton 7 | -------------------------------------------------------------------------------- /examples/conversational-rag/simple_example/statemachine.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/conversational-rag/simple_example/statemachine.png -------------------------------------------------------------------------------- /examples/custom-serde/README.md: -------------------------------------------------------------------------------- 1 | # Adding custom serialization and deserialization 2 | 3 | This example shows how to extend serialization/deserialization of fields in state. 4 | 5 | ## Prerequisites 6 | `` 7 | pip install -r requirements.txt 8 | `` 9 | 10 | ## Running the demo 11 | `` 12 | python application.py 13 | `` 14 | 15 | or 16 | 17 | `` 18 | pip install jupyter 19 | jupyter notebook 20 | `` 21 | 22 | and running the notebook. Or 23 | Open In Colab 24 | . 25 | 26 | ## Explanation 27 | See the notebook and/or `application.py` for more information. 28 | 29 | Or [watch this video walkthrough](https://www.youtube.com/watch?v=Squ5IAeQBzc). 30 | -------------------------------------------------------------------------------- /examples/custom-serde/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/custom-serde/__init__.py -------------------------------------------------------------------------------- /examples/custom-serde/requirements.txt: -------------------------------------------------------------------------------- 1 | burr[start] 2 | langchain_community 3 | langchain_core 4 | pydantic 5 | -------------------------------------------------------------------------------- /examples/custom-serde/run.py: -------------------------------------------------------------------------------- 1 | """ 2 | Example of running the application 3 | from another module to make sure the 4 | SERDE classes are registered in a non __main__ 5 | module namespace. 6 | 7 | e.g. python run.py 8 | and then 9 | burr-test-case create --project-name serde-example --app-id APP_ID --sequence-id 3 --serde-module application.py 10 | """ 11 | import pprint 12 | import uuid 13 | 14 | import application # noqa 15 | from application import build_application 16 | 17 | from burr.core import State 18 | 19 | # build 20 | app = build_application("client-123", str(uuid.uuid4())) 21 | app.visualize( 22 | output_file_path="statemachine", include_conditions=True, include_state=True, format="png" 23 | ) 24 | # run 25 | action, result, state = app.run( 26 | halt_after=["terminal_action"], inputs={"user_input": "hello world"} 27 | ) 28 | # serialize 29 | serialized_state = state.serialize() 30 | pprint.pprint(serialized_state) 31 | # deserialize 32 | deserialized_state = State.deserialize(serialized_state) 33 | # assert that the state is the same after serialization and deserialization 34 | assert state.get_all() == deserialized_state.get_all() 35 | -------------------------------------------------------------------------------- /examples/custom-serde/statemachine.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/custom-serde/statemachine.png -------------------------------------------------------------------------------- /examples/deep-researcher/README.md: -------------------------------------------------------------------------------- 1 | # Deep Researcher 2 | 3 | ## Introduction 4 | 5 | The structure of the research assistant is taken from a [langchain and langgraph example](https://github.com/langchain-ai/local-deep-researcher). It is rewritten here in the Burr framework in `application.py`. 6 | 7 | ![Deep Researcher](statemachine.png) 8 | 9 | The helper code in `prompts.py` and `utils.py` is directly taken from the original deep researcher codebase. The MIT license for the code is included in both those files. 10 | 11 | ## Prerequisites 12 | 13 | Set the configuration variables at the beginning of the main section of `application.py`. 14 | 15 | Then install Python modules 16 | ```sh 17 | pip install -r requirements.txt 18 | ``` 19 | 20 | You will need accounts for [Tavily search](https://tavily.com/) and the [OpenAI API](https://platform.openai.com/docs/overview). Once you have those accounts, set the environment variables TAVILY_API_KEY and OPENAI_API_KEY and run the script. 21 | 22 | ```sh 23 | export OPENAI_API_KEY="YOUR_OPENAI_KEY" 24 | export TAVILY_API_KEY="YOUR_TAVILY_API_KEY" 25 | python application.py 26 | ``` 27 | -------------------------------------------------------------------------------- /examples/deep-researcher/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/deep-researcher/__init__.py -------------------------------------------------------------------------------- /examples/deep-researcher/requirements.txt: -------------------------------------------------------------------------------- 1 | burr[start] 2 | openai 3 | tavily-python 4 | -------------------------------------------------------------------------------- /examples/deep-researcher/statemachine.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/deep-researcher/statemachine.png -------------------------------------------------------------------------------- /examples/deployment/aws/lambda/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM public.ecr.aws/lambda/python:3.11 2 | 3 | COPY requirements.txt ./ 4 | RUN pip install -r requirements.txt 5 | 6 | # add env variables as needed. 7 | # ENV .. 8 | 9 | COPY app ./app 10 | 11 | CMD ["app.lambda_handler.lambda_handler"] 12 | -------------------------------------------------------------------------------- /examples/deployment/aws/lambda/app/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/deployment/aws/lambda/app/__init__.py -------------------------------------------------------------------------------- /examples/deployment/aws/lambda/app/counter_app.py: -------------------------------------------------------------------------------- 1 | """ 2 | This is a very simple counting application. 3 | 4 | It's here to help you get the mechanics of deploying a Burr application to AWS Lambda. 5 | """ 6 | 7 | import time 8 | 9 | import burr.core 10 | from burr.core import Application, Result, State, default, expr 11 | from burr.core.action import action 12 | from burr.core.graph import GraphBuilder 13 | 14 | 15 | @action(reads=["counter"], writes=["counter"]) 16 | def counter(state: State) -> State: 17 | result = {"counter": state["counter"] + 1} 18 | time.sleep(0.5) # sleep to simulate a longer running function 19 | return state.update(**result) 20 | 21 | 22 | # our graph. 23 | graph = ( 24 | GraphBuilder() 25 | .with_actions(counter=counter, result=Result("counter")) 26 | .with_transitions( 27 | ("counter", "counter", expr("counter < counter_limit")), 28 | ("counter", "result", default), 29 | ) 30 | .build() 31 | ) 32 | 33 | 34 | def application(count_up_to: int = 10) -> Application: 35 | """function to return a burr application""" 36 | return ( 37 | burr.core.ApplicationBuilder() 38 | .with_graph(graph) 39 | .with_state(**{"counter": 0, "counter_limit": count_up_to}) 40 | .with_entrypoint("counter") 41 | .build() 42 | ) 43 | -------------------------------------------------------------------------------- /examples/deployment/aws/lambda/app/lambda_handler.py: -------------------------------------------------------------------------------- 1 | from . import counter_app 2 | 3 | 4 | def lambda_handler(event, context): 5 | count_up_to = int(event["body"]["number"]) 6 | 7 | app = counter_app.application(count_up_to) 8 | action, result, state = app.run(halt_after=["result"]) 9 | 10 | return {"statusCode": 200, "body": state.serialize()} 11 | 12 | 13 | if __name__ == "__main__": 14 | print(lambda_handler({"body": {"number": 10}}, None)) 15 | -------------------------------------------------------------------------------- /examples/deployment/aws/lambda/requirements.txt: -------------------------------------------------------------------------------- 1 | burr 2 | # for tracking you'd add extra dependencies 3 | -------------------------------------------------------------------------------- /examples/email-assistant/Dockerfile: -------------------------------------------------------------------------------- 1 | # Dockerfile for Email Assistant 2 | # This container sets up a Python environment for running the Email Assistant server 3 | # along with the Burr UI. It exposes ports 7241 and 7242 for the UI and API respectively. 4 | 5 | FROM python:3.11-bookworm 6 | 7 | # Set the working directory in the container 8 | WORKDIR /app 9 | 10 | # Install any needed packages specified in requirements.txt 11 | # Assuming you have a requirements.txt file with all the necessary dependencies 12 | COPY requirements.txt /app/ 13 | RUN pip install -r requirements.txt 14 | 15 | # Copy the current directory contents into the container at /app 16 | COPY server.py /app 17 | COPY wrapper.sh /app 18 | 19 | # Make port 7241 and 7242 available to the world outside this container 20 | EXPOSE 7241 7242 21 | 22 | # Make the wrapper script executable 23 | RUN chmod +x wrapper.sh 24 | 25 | # Run wrapper.sh when the container launches 26 | CMD ["./wrapper.sh"] 27 | -------------------------------------------------------------------------------- /examples/email-assistant/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/email-assistant/__init__.py -------------------------------------------------------------------------------- /examples/email-assistant/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '3.8' 2 | 3 | services: 4 | app: 5 | build: 6 | context: . 7 | dockerfile: Dockerfile 8 | ports: 9 | - "7241:7241" 10 | - "7242:7242" 11 | environment: 12 | - OPENAI_API_KEY=${OPENAI_API_KEY} 13 | 14 | nginx: 15 | image: nginx:alpine 16 | ports: 17 | - "80:80" 18 | volumes: 19 | - ./nginx.conf:/etc/nginx/nginx.conf:ro 20 | depends_on: 21 | - app 22 | 23 | networks: 24 | default: 25 | name: fastapi-burr-network 26 | -------------------------------------------------------------------------------- /examples/email-assistant/nginx.conf: -------------------------------------------------------------------------------- 1 | events { 2 | worker_connections 1024; 3 | } 4 | 5 | http { 6 | server { 7 | listen 80; 8 | server_name telemetry.localhost; 9 | 10 | location / { 11 | proxy_pass http://app:7241; 12 | proxy_set_header Host $host; 13 | proxy_set_header X-Real-IP $remote_addr; 14 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 15 | proxy_set_header X-Forwarded-Proto $scheme; 16 | } 17 | } 18 | 19 | server { 20 | listen 80; 21 | server_name api.localhost; 22 | 23 | location / { 24 | proxy_pass http://app:7242; 25 | proxy_set_header Host $host; 26 | proxy_set_header X-Real-IP $remote_addr; 27 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 28 | proxy_set_header X-Forwarded-Proto $scheme; 29 | } 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /examples/email-assistant/requirements.txt: -------------------------------------------------------------------------------- 1 | burr[start] 2 | openai 3 | -------------------------------------------------------------------------------- /examples/email-assistant/statemachine.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/email-assistant/statemachine.png -------------------------------------------------------------------------------- /examples/email-assistant/wrapper.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Start the Burr UI 4 | burr --host 0.0.0.0 & 5 | 6 | # Start the FastAPI server using uvicorn 7 | uvicorn server:app --host 0.0.0.0 --port 7242 8 | -------------------------------------------------------------------------------- /examples/hamilton-integration/README.md: -------------------------------------------------------------------------------- 1 | # Modular RAG with Burr and Hamilton 2 | 3 | This examples shows the "2-layer" approach to building RAG and LLM agents using Burr and Hamilton. 4 | 5 | You will find: 6 | 7 | - `notebook.ipynb` contains a guide on how to build a modular RAG application. It details how a typicaly project evolves and how Burr and Hamilton can help you achieve the desired modularity. 8 | - `application.py` and `actions/` contain the code from the final application version showed in the notebook. 9 | -------------------------------------------------------------------------------- /examples/hamilton-integration/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/hamilton-integration/__init__.py -------------------------------------------------------------------------------- /examples/hamilton-integration/actions/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/hamilton-integration/actions/__init__.py -------------------------------------------------------------------------------- /examples/hamilton-integration/actions/ask_question.py: -------------------------------------------------------------------------------- 1 | import lancedb 2 | import openai 3 | 4 | 5 | def relevant_chunks(user_query: str) -> list[dict]: 6 | chunks_table = lancedb.connect("./blogs").open_table("chunks") 7 | search_results = ( 8 | chunks_table.search(user_query).select(["text", "url", "position"]).limit(3).to_list() 9 | ) 10 | return search_results 11 | 12 | 13 | def system_prompt(relevant_chunks: list[dict]) -> str: 14 | relevant_content = "\n".join([c["text"] for c in relevant_chunks]) 15 | return ( 16 | "Answer the user's questions based on the provided blog post content. " 17 | "Answer in a concise and helpful manner, and tell the user " 18 | "if you don't know the answer or you're unsure.\n\n" 19 | f"BLOG CONTENT:\n{relevant_content}" 20 | ) 21 | 22 | 23 | def llm_answer(system_prompt: str, user_query: str) -> str: 24 | client = openai.OpenAI() 25 | response = client.chat.completions.create( 26 | model="gpt-4o-mini", 27 | messages=[ 28 | {"role": "system", "content": system_prompt}, 29 | {"role": "user", "content": user_query}, 30 | ], 31 | ) 32 | return response.choices[0].message.content 33 | -------------------------------------------------------------------------------- /examples/hamilton-integration/burr_ui_app_v2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/hamilton-integration/burr_ui_app_v2.png -------------------------------------------------------------------------------- /examples/hamilton-integration/burr_ui_app_v3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/hamilton-integration/burr_ui_app_v3.png -------------------------------------------------------------------------------- /examples/hamilton-integration/hamilton_ui.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/hamilton-integration/hamilton_ui.png -------------------------------------------------------------------------------- /examples/hamilton-integration/requirements.txt: -------------------------------------------------------------------------------- 1 | burr[start,opentelemetry] 2 | lancedb 3 | openai 4 | opentelemetry-instrumentation-lancedb 5 | opentelemetry-instrumentation-openai 6 | pyarrow 7 | pydantic 8 | requests 9 | sf-hamilton[visualization] 10 | -------------------------------------------------------------------------------- /examples/hamilton-integration/statemachine.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/hamilton-integration/statemachine.png -------------------------------------------------------------------------------- /examples/haystack-integration/README.md: -------------------------------------------------------------------------------- 1 | # Haystack + Burr integration 2 | 3 | Haystack is a Python library to build AI pipelines. It assembles `Component` objects into a `Pipeline`, which is a graph of operations. One benefit of Haystack is that it provides many pre-built components to manage documents and interact with LLMs. 4 | 5 | This notebook shows how to convert a Haystack `Component` into a Burr `Action` and a `Pipeline` into a `Graph`. This allows you to integrate Haystack with Burr and leverage other Burr and Burr UI features! 6 | -------------------------------------------------------------------------------- /examples/haystack-integration/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/haystack-integration/__init__.py -------------------------------------------------------------------------------- /examples/haystack-integration/statemachine.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/haystack-integration/statemachine.png -------------------------------------------------------------------------------- /examples/hello-world-counter/README.md: -------------------------------------------------------------------------------- 1 | # Counter 2 | 3 | This is an example of a simple state machine. 4 | 5 | We have three files: 6 | 7 | - [application.py](application.py) -- This contains a mainline to run the counter as well as a function to export the counter (for later use) 8 | - [requirements.txt](requirements.txt) -- Just the requirements. All this needs is Burr/Streamlit 9 | - [streamlit_app.py](streamlit_app.py) -- This contains a simple Streamlit app to interact with the counter. 10 | - [notebook.ipynb](notebook.ipynb) -- A notebook that shows the counter app too. Open the notebook 11 | Open In Colab 12 | 13 | 14 | To run just the application, you can run: 15 | 16 | ```bash 17 | python application.py 18 | ``` 19 | 20 | To run the streamlit app, you can run: 21 | 22 | ```bash 23 | streamlit run streamlit_app.py 24 | ``` 25 | 26 | This will open a chrome window and print out the URL. The state machine this encapsulates takes the following form: 27 | 28 | ![State Machine](statemachine.png) 29 | 30 | Note: if you are looking for an example of the class based action API, then 31 | take a look at `application_classbased.py`. 32 | -------------------------------------------------------------------------------- /examples/hello-world-counter/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/hello-world-counter/__init__.py -------------------------------------------------------------------------------- /examples/hello-world-counter/requirements.txt: -------------------------------------------------------------------------------- 1 | burr[streamlit,start] 2 | -------------------------------------------------------------------------------- /examples/hello-world-counter/statemachine.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/hello-world-counter/statemachine.png -------------------------------------------------------------------------------- /examples/image-telephone/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/image-telephone/__init__.py -------------------------------------------------------------------------------- /examples/image-telephone/requirements.txt: -------------------------------------------------------------------------------- 1 | burr 2 | openai 3 | requests 4 | sf-hamilton[visualization] 5 | -------------------------------------------------------------------------------- /examples/image-telephone/statemachine.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/image-telephone/statemachine.png -------------------------------------------------------------------------------- /examples/instructor-gemini-flash/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/instructor-gemini-flash/__init__.py -------------------------------------------------------------------------------- /examples/instructor-gemini-flash/requirements.txt: -------------------------------------------------------------------------------- 1 | burr[start] 2 | google-generativeai 3 | instructor 4 | pydantic 5 | python-dotenv 6 | -------------------------------------------------------------------------------- /examples/instructor-gemini-flash/statemachine.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/instructor-gemini-flash/statemachine.png -------------------------------------------------------------------------------- /examples/integrations/hamilton/README.md: -------------------------------------------------------------------------------- 1 | # Examples using the Hamilton integration 2 | 3 | Burr has some syntactic sugar for integrating with Hamilton. Find examples of them here. 4 | 5 | ## Examples 6 | 7 | - [Image Telephone](image-telephone/README.md) 8 | -------------------------------------------------------------------------------- /examples/integrations/hamilton/image-telephone/README.md: -------------------------------------------------------------------------------- 1 | This example mirrors the main image-telephone example, but 2 | uses the syntactic sugar for Hamilton integrations. 3 | 4 | For an explanation see the [main example](../../../image-telephone/README.md). 5 | -------------------------------------------------------------------------------- /examples/integrations/hamilton/image-telephone/requirements.txt: -------------------------------------------------------------------------------- 1 | burr 2 | openai 3 | requests 4 | sf-hamilton 5 | -------------------------------------------------------------------------------- /examples/integrations/hamilton/statemachine.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/integrations/hamilton/statemachine.png -------------------------------------------------------------------------------- /examples/llm-adventure-game/README.md: -------------------------------------------------------------------------------- 1 | A simple text-based game in the style of 80s adventure games, in which you play 2 | a hungry corgi. 3 | 4 | How to run: 5 | 6 | ``` 7 | OPENAI_API_KEY= python application.py 8 | ``` 9 | 10 | Open the notebook 11 | Open In Colab 12 | 13 | 14 | ![State Machine](statemachine.png) 15 | -------------------------------------------------------------------------------- /examples/llm-adventure-game/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/llm-adventure-game/__init__.py -------------------------------------------------------------------------------- /examples/llm-adventure-game/requirements.txt: -------------------------------------------------------------------------------- 1 | burr[start] 2 | openai 3 | -------------------------------------------------------------------------------- /examples/llm-adventure-game/statemachine.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/llm-adventure-game/statemachine.png -------------------------------------------------------------------------------- /examples/ml-training/statemachine.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/ml-training/statemachine.png -------------------------------------------------------------------------------- /examples/multi-agent-collaboration/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/multi-agent-collaboration/__init__.py -------------------------------------------------------------------------------- /examples/multi-agent-collaboration/hamilton/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/multi-agent-collaboration/hamilton/__init__.py -------------------------------------------------------------------------------- /examples/multi-agent-collaboration/hamilton/requirements.txt: -------------------------------------------------------------------------------- 1 | burr[start] 2 | langchain-community 3 | langchain-core 4 | langchain-experimental 5 | openai 6 | sf-hamilton[visualization] 7 | -------------------------------------------------------------------------------- /examples/multi-agent-collaboration/hamilton/statemachine.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/multi-agent-collaboration/hamilton/statemachine.png -------------------------------------------------------------------------------- /examples/multi-agent-collaboration/lcel/README.md: -------------------------------------------------------------------------------- 1 | # Multi Agent Collaboration 2 | 3 | This example resembles the example found originally [here](https://github.com/langchain-ai/langgraph/blob/main/examples/multi_agent/multi-agent-collaboration.ipynb). 4 | 5 | 6 | # Tracing 7 | You'll see that in `application.py` we 8 | have some lightweight `tracing` set up for Hamilton. This is a simple way to plug into Burr's 9 | tracer functionality -- this will allow you to see more in the Burr UI. 10 | 11 | More functionality is on the roadmap! 12 | 13 | # Running the example 14 | Install the dependencies: 15 | 16 | ```bash 17 | pip install "burr[start]" -r requirements.txt 18 | ``` 19 | 20 | Make sure you have the API Keys in your environment: 21 | 22 | ```bash 23 | export OPENAI_API_KEY=YOUR_KEY 24 | export TAVILY_API_KEY=YOUR_KEY 25 | ``` 26 | 27 | Run the notebook: 28 | 29 | Open In Colab 30 | 31 | or do it manually: 32 | ```bash 33 | jupyter notebook 34 | ``` 35 | and open the notebook `notebook.ipynb`. 36 | 37 | ```bash 38 | python application.py 39 | ``` 40 | Application run: 41 | ![lcel image](statemachine.png) 42 | 43 | # What to adjust 44 | There are a few things: 45 | 46 | 1. The `query` that you provide for the agents to work over. Set this as the initial human message in state. 47 | 2. You can adjust the `prompts` used by the agents in the code. 48 | -------------------------------------------------------------------------------- /examples/multi-agent-collaboration/lcel/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/multi-agent-collaboration/lcel/__init__.py -------------------------------------------------------------------------------- /examples/multi-agent-collaboration/lcel/requirements.txt: -------------------------------------------------------------------------------- 1 | burr[start] 2 | langchain-community 3 | langchain-core 4 | langchain-experimental 5 | openai 6 | -------------------------------------------------------------------------------- /examples/multi-agent-collaboration/lcel/statemachine.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/multi-agent-collaboration/lcel/statemachine.png -------------------------------------------------------------------------------- /examples/multi-agent-collaboration/requirements.txt: -------------------------------------------------------------------------------- 1 | burr[start] 2 | langchain-community 3 | langchain-core 4 | langchain-experimental 5 | openai 6 | sf-hamilton[visualization] 7 | -------------------------------------------------------------------------------- /examples/multi-modal-chatbot/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/multi-modal-chatbot/.DS_Store -------------------------------------------------------------------------------- /examples/multi-modal-chatbot/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/multi-modal-chatbot/__init__.py -------------------------------------------------------------------------------- /examples/multi-modal-chatbot/requirements.txt: -------------------------------------------------------------------------------- 1 | burr[streamlit,start] 2 | openai 3 | -------------------------------------------------------------------------------- /examples/multi-modal-chatbot/simple_streamlit_app.py: -------------------------------------------------------------------------------- 1 | import uuid 2 | 3 | import application as chatbot_application 4 | import streamlit as st 5 | 6 | import burr.core 7 | 8 | 9 | def render_chat_message(chat_item: dict): 10 | content = chat_item["content"] 11 | content_type = chat_item["type"] 12 | role = chat_item["role"] 13 | with st.chat_message(role): 14 | if content_type == "image": 15 | st.image(content) 16 | elif content_type == "code": 17 | st.code(content) 18 | elif content_type == "text": 19 | st.write(content) 20 | 21 | 22 | def initialize_app() -> burr.core.Application: 23 | if "burr_app" not in st.session_state: 24 | st.session_state.burr_app = chatbot_application.application( 25 | app_id=f"chat:{str(uuid.uuid4())[0:6]}" 26 | ) 27 | return st.session_state.burr_app 28 | 29 | 30 | def main(): 31 | st.title("Chatbot example with Burr") 32 | app = initialize_app() 33 | 34 | prompt = st.chat_input("Ask me a question!", key="chat_input") 35 | for chat_message in app.state.get("chat_history", []): 36 | render_chat_message(chat_message) 37 | if prompt: 38 | for action, result, state in app.iterate( 39 | inputs={"prompt": prompt}, halt_after=["response"] 40 | ): 41 | if action.name in ["prompt", "response"]: 42 | last_chat_item = state.get("chat_history", [])[-1] 43 | render_chat_message(last_chat_item) 44 | 45 | 46 | if __name__ == "__main__": 47 | main() 48 | -------------------------------------------------------------------------------- /examples/multi-modal-chatbot/statemachine.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/multi-modal-chatbot/statemachine.png -------------------------------------------------------------------------------- /examples/openai-compatible-agent/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/openai-compatible-agent/__init__.py -------------------------------------------------------------------------------- /examples/openai-compatible-agent/application.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | 3 | import burr.core 4 | from burr.core import Application, State 5 | from burr.core.action import action 6 | 7 | 8 | @action(reads=[], writes=[]) 9 | def dummy_bot(state: State, user_input: str): 10 | if "time" in user_input: 11 | current_time = datetime.datetime.now() 12 | reply = f"It is currently {current_time}" 13 | else: 14 | reply = "🤖 Ask me about the time" 15 | 16 | results = dict(content=reply) 17 | return results, state.update(**results) 18 | 19 | 20 | def build_application() -> Application: 21 | return ( 22 | burr.core.ApplicationBuilder() 23 | .with_actions(dummy_bot) 24 | .with_transitions(("dummy_bot", "dummy_bot")) 25 | .with_identifiers(app_id="burr-openai") 26 | .with_entrypoint("dummy_bot") 27 | .build() 28 | ) 29 | 30 | 31 | if __name__ == "__main__": 32 | app = build_application() 33 | app.visualize( 34 | output_file_path="statemachine", 35 | include_conditions=False, 36 | view=True, 37 | format="png", 38 | ) 39 | -------------------------------------------------------------------------------- /examples/openai-compatible-agent/requirements.txt: -------------------------------------------------------------------------------- 1 | burr[start] 2 | fastapi 3 | openai 4 | uvicorn 5 | -------------------------------------------------------------------------------- /examples/openai-compatible-agent/statemachine.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/openai-compatible-agent/statemachine.png -------------------------------------------------------------------------------- /examples/opentelemetry/README.md: -------------------------------------------------------------------------------- 1 | # OpenTelemetry + Burr 2 | 3 | This goes over how to use Burr with OpenTelemetry. 4 | 5 | We have two modes: 6 | 7 | 1. Log OpenTelemetry traces to the Burr UI 8 | 2. Log Burr to OpenTelemetry 9 | 10 | See [notebook.ipynb](./notebook.ipynb) for a simple overview. 11 | See [application.py](./application.py) for the full code 12 | 13 | See the [documentation](https://burr.dagworks.io/concepts/additional-visibility/#open-telemetry) for more info 14 | -------------------------------------------------------------------------------- /examples/opentelemetry/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/opentelemetry/__init__.py -------------------------------------------------------------------------------- /examples/opentelemetry/statemachine.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/opentelemetry/statemachine.png -------------------------------------------------------------------------------- /examples/other-examples/cowsay/README.md: -------------------------------------------------------------------------------- 1 | # Cowsay 2 | 3 | This is an example of a simple infinite state machine. 4 | 5 | We have three files: 6 | 7 | - [application.py](application.py) -- This contains a mainline to run the cowsay app as well as a function to export the app (for later use) 8 | - [requirements.txt](requirements.txt) -- Just the requirements. All this needs is Burr/Streamlit/cowsay 9 | - [streamlit_app.py](streamlit_app.py) -- This contains a simple Streamlit app to interact with the cow 10 | - [notebook.ipynb](notebook.ipynb) -- A notebook that helps show things. 11 | Open In Colab 12 | 13 | 14 | To run just the application, you can run: 15 | 16 | ```bash 17 | python application.py 18 | ``` 19 | 20 | Note this is an infinte state machine, so this will run forever! Thus remember to ctrl-c eventually. 21 | To run the streamlit app, you can run: 22 | 23 | ```bash 24 | streamlit run streamlit_app.py 25 | ``` 26 | 27 | This allows you to press a button and see the cow say something (or see it decide not to speak). 28 | 29 | This will open a chrome window and print out the URL. The state machine this encapsulates takes the following form: 30 | 31 | ![State Machine](digraph.png) 32 | -------------------------------------------------------------------------------- /examples/other-examples/cowsay/digraph: -------------------------------------------------------------------------------- 1 | digraph { 2 | graph [compound=false concentrate=false rankdir=TB ranksep=0.4] 3 | say_nothing [label=say_nothing shape=box style=rounded] 4 | say_hello [label=say_hello shape=box style=rounded] 5 | cow_should_speak [label=cow_should_speak shape=box style=rounded] 6 | cow_should_speak -> say_hello [label=cow_should_speak style=dashed] 7 | say_hello -> cow_should_speak [style=solid] 8 | cow_should_speak -> say_nothing [label="not cow_should_speak" style=dashed] 9 | say_nothing -> cow_should_speak [style=solid] 10 | } 11 | -------------------------------------------------------------------------------- /examples/other-examples/cowsay/digraph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/other-examples/cowsay/digraph.png -------------------------------------------------------------------------------- /examples/other-examples/cowsay/requirements.txt: -------------------------------------------------------------------------------- 1 | burr[streamlit,graphviz,start] 2 | cowsay 3 | -------------------------------------------------------------------------------- /examples/other-examples/hamilton-multi-modal/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/other-examples/hamilton-multi-modal/__init__.py -------------------------------------------------------------------------------- /examples/parallelism/README.md: -------------------------------------------------------------------------------- 1 | # Parallelism 2 | 3 | In this example we go over Burr's parallelism capabilities. It is based on the documentation (https://burr.dagworks.io/concepts/parallelism/), demonstrating the `MapStates` capabilities. 4 | 5 | See [the notebook](./notebook.ipynb) for the full example. Or 6 | Open In Colab 7 | 8 | 9 | You can follow along with this [youtube video](https://youtu.be/G7lw63IBSmY) 10 | -------------------------------------------------------------------------------- /examples/pytest/burr_sdlc.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/pytest/burr_sdlc.png -------------------------------------------------------------------------------- /examples/pytest/burr_ui.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/pytest/burr_ui.png -------------------------------------------------------------------------------- /examples/pytest/diagnosis.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/pytest/diagnosis.png -------------------------------------------------------------------------------- /examples/pytest/e2e_test_cases.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "action": "run_hypothesis", 4 | "name": "run_hypothesis_0", 5 | "input_state": { 6 | "audio": "Patient exhibits mucus dripping from nostrils and coughing." 7 | }, 8 | "expected_state": { 9 | "final_diagnosis": "Common cold" 10 | } 11 | }, 12 | { 13 | "action": "run_hypothesis", 14 | "name": "run_hypothesis_1", 15 | "input_state": { 16 | "audio": "Patient has a limp and is unable to flex right ankle. Ankle is swollen." 17 | }, 18 | "expected_state": { 19 | "final_diagnosis": "Sprained ankle" 20 | } 21 | }, 22 | { 23 | "action": "run_hypothesis", 24 | "name": "run_hypothesis_2", 25 | "input_state": { 26 | "audio": "Patient fell off and landed on their right arm. Their right wrist is swollen, they can still move their fingers, and there is only minor pain or discomfort when the wrist is moved or touched." 27 | }, 28 | "expected_state": { 29 | "final_diagnosis": "Healthy individual" 30 | } 31 | } 32 | ] 33 | -------------------------------------------------------------------------------- /examples/pytest/hypotheses_test_cases.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "action": "run_hypothesis", 4 | "name": "run_hypothesis_0", 5 | "input_state": { 6 | "transcription": "Patient exhibits mucus dripping from nostrils and coughing.", 7 | "hypothesis": "Common cold" 8 | }, 9 | "expected_state": { 10 | "diagnosis": "yes" 11 | } 12 | }, 13 | { 14 | "action": "run_hypothesis", 15 | "name": "run_hypothesis_1", 16 | "input_state": { 17 | "transcription": "Patient has a limp and is unable to flex right ankle. Ankle is swollen.", 18 | "hypothesis": "Sprained ankle" 19 | }, 20 | "expected_state": { 21 | "diagnosis": "yes" 22 | } 23 | }, 24 | { 25 | "action": "run_hypothesis", 26 | "name": "run_hypothesis_2", 27 | "input_state": { 28 | "transcription": "Patient fell off and landed on their right arm. Their right wrist is swollen, they can still move their fingers, and there is only minor pain or discomfort when the wrist is moved or touched.", 29 | "hypothesis": "Broken arm" 30 | }, 31 | "expected_state": { 32 | "diagnosis": "no" 33 | } 34 | } 35 | ] 36 | -------------------------------------------------------------------------------- /examples/pytest/requirements.txt: -------------------------------------------------------------------------------- 1 | burr 2 | pytest 3 | pytest-harvest 4 | -------------------------------------------------------------------------------- /examples/rag-lancedb-ingestion/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/rag-lancedb-ingestion/__init__.py -------------------------------------------------------------------------------- /examples/rag-lancedb-ingestion/burr-ui.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/rag-lancedb-ingestion/burr-ui.gif -------------------------------------------------------------------------------- /examples/rag-lancedb-ingestion/requirements.txt: -------------------------------------------------------------------------------- 1 | beautifulsoup4 2 | burr[start] 3 | dlt[lancedb] 4 | feedparser 5 | lancedb 6 | openai 7 | opentelemetry-instrumentation-lancedb 8 | opentelemetry-instrumentation-openai 9 | -------------------------------------------------------------------------------- /examples/rag-lancedb-ingestion/statemachine.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/rag-lancedb-ingestion/statemachine.png -------------------------------------------------------------------------------- /examples/rag-lancedb-ingestion/utils.py: -------------------------------------------------------------------------------- 1 | import base64 2 | import getpass 3 | import hashlib 4 | import os 5 | 6 | 7 | def set_environment_variables(): 8 | import dlt.destinations.impl.lancedb.models # noqa 9 | 10 | if os.environ.get("OPENAI_API_KEY") is None: 11 | os.environ["OPENAI_API_KEY"] = getpass.getpass("Enter OPENAI_API_KEY: ") 12 | 13 | os.environ["DESTINATION__LANCEDB__EMBEDDING_MODEL_PROVIDER"] = "openai" 14 | os.environ["DESTINATION__LANCEDB__EMBEDDING_MODEL"] = "text-embedding-3-small" 15 | 16 | os.environ["DESTINATION__LANCEDB__CREDENTIALS__URI"] = ".lancedb" 17 | os.environ["DESTINATION__LANCEDB__CREDENTIALS__EMBEDDING_MODEL_PROVIDER_API_KEY"] = os.environ[ 18 | "OPENAI_API_KEY" 19 | ] 20 | 21 | 22 | def _compact_hash(digest: bytes) -> str: 23 | """Compact the hash to a string that's safe to pass around.""" 24 | return base64.urlsafe_b64encode(digest).decode() 25 | 26 | 27 | def hash_primitive(obj, *args, **kwargs) -> str: 28 | """Convert the primitive to a string and hash it""" 29 | hash_object = hashlib.md5(str(obj).encode()) 30 | return _compact_hash(hash_object.digest()) 31 | 32 | 33 | def hash_set(obj, *args, **kwargs) -> str: 34 | """Hash each element of the set, then sort hashes, and 35 | create a hash of hashes. 36 | For the same objects in the set, the hashes will be the 37 | same. 38 | """ 39 | hashes = (hash_primitive(elem) for elem in obj) 40 | sorted_hashes = sorted(hashes) 41 | 42 | hash_object = hashlib.sha224() 43 | for hash in sorted_hashes: 44 | hash_object.update(hash.encode()) 45 | 46 | return _compact_hash(hash_object.digest()) 47 | -------------------------------------------------------------------------------- /examples/ray/README.md: -------------------------------------------------------------------------------- 1 | # Parallelism on Burr 2 | 3 | This is supporting code for two blog posts: 4 | 1. [Parallel Multi Agent Workflows with Burr](https://blog.dagworks.io/p/93838d1f-52b5-4a72-999f-9cab9733d4fe) 5 | 2. [Parallel, Fault-Tolerant Agents with Burr/Ray](https://blog.dagworks.io/p/5baf1077-2490-44bc-afff-fcdafe18e819) 6 | 7 | You can find basic code in [application.py](application.py) and run it in [notebook.ipynb](notebook.ipynb). Read the blog posts to get a sense for the motivation/design behind this. 8 | -------------------------------------------------------------------------------- /examples/ray/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/ray/__init__.py -------------------------------------------------------------------------------- /examples/ray/statemachine.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/ray/statemachine.png -------------------------------------------------------------------------------- /examples/ray/substatemachine.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/ray/substatemachine.png -------------------------------------------------------------------------------- /examples/recursive/README.md: -------------------------------------------------------------------------------- 1 | # Recursive Applications 2 | 3 | Burr supports running applications inside applications. Currently this is done through sharing links in tracking, 4 | but we will be adding more features to make this more ergonomic. 5 | 6 | ## Example Structure 7 | 8 | This example runs a simple applications. We: 9 | 1. Gather input from the user for a poem subject, a maximum number of drafts, and a list of poem types in parallel 10 | 2. Run a recursive application that generates a poem for each poem type 11 | 3. Display the generated poems back to the user 12 | 13 | The overall application looks like this: 14 | 15 | ![Recursive Application](statemachine.png) 16 | 17 | The `generate_all_poems` then runs `n` (one for each poem type) instances of the sub-application: 18 | 19 | ![Recursive Application](statemachine_sub.png) 20 | 21 | ## Running the example 22 | 23 | To run this, you'll need an OpenAI key set. 24 | Then you can run: 25 | 26 | ```bash 27 | export OPENAI_API_KEY=... 28 | python application.py 29 | ``` 30 | 31 | This will generate three poems about state machines, one for each type of poem, then combine at the end. 32 | 33 | To change the poem type, edit `application.py` and change the `poem_types` list in the mainline. 34 | 35 | Run the example with `python application.py`. Then, ensure `burr` is running, and navigate to the 36 | UI: [http://localhost:7241/project/demo:parallelism_poem_generation](http://localhost:7241/project/demo:parallelism_poem_generation). 37 | 38 | 39 | FOr more information, read the documentation on [recursive applications](https://burr.dagworks.io/concepts/recursion). 40 | ``` 41 | -------------------------------------------------------------------------------- /examples/recursive/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/recursive/__init__.py -------------------------------------------------------------------------------- /examples/recursive/requirements.txt: -------------------------------------------------------------------------------- 1 | burr[learn] 2 | openai 3 | -------------------------------------------------------------------------------- /examples/recursive/statemachine.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/recursive/statemachine.png -------------------------------------------------------------------------------- /examples/recursive/statemachine_sub.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/recursive/statemachine_sub.png -------------------------------------------------------------------------------- /examples/simple-chatbot-intro/README.md: -------------------------------------------------------------------------------- 1 | Example that goes with [introductory blog post](https://blog.dagworks.io/p/burr-develop-stateful-ai-applications). 2 | 3 | ## 🏃Quick start 4 | 5 | ```bash 6 | pip install "burr[start]" jupyter 7 | ``` 8 | 9 | Run the notebook: 10 | 11 | ```bash 12 | jupyter notebook 13 | ``` 14 | 15 | Then open `notebook.ipynb` and run the cells. Or 16 | Open In Colab 17 | 18 | -------------------------------------------------------------------------------- /examples/simple-chatbot-intro/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/simple-chatbot-intro/__init__.py -------------------------------------------------------------------------------- /examples/simple-chatbot-intro/requirements.txt: -------------------------------------------------------------------------------- 1 | burr[start] 2 | -------------------------------------------------------------------------------- /examples/simple-chatbot-intro/statemachine.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/simple-chatbot-intro/statemachine.png -------------------------------------------------------------------------------- /examples/simple-chatbot-intro/statemachine_initial.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/simple-chatbot-intro/statemachine_initial.png -------------------------------------------------------------------------------- /examples/simple-chatbot-intro/statemachine_safe.png: -------------------------------------------------------------------------------- 1 | digraph { 2 | graph [compound=false concentrate=false rankdir=TB ranksep=0.4] 3 | human_input [label=human_input shape=box style=rounded] 4 | input__prompt [label="input: prompt" shape=oval style=dashed] 5 | input__prompt -> human_input 6 | ai_response [label=ai_response shape=box style=rounded] 7 | safety_check [label=safety_check shape=box style=rounded] 8 | unsafe_response [label=unsafe_response shape=box style=rounded] 9 | human_input -> safety_check [style=solid] 10 | safety_check -> unsafe_response [label="safe=False" style=dashed] 11 | safety_check -> ai_response [label="safe=True" style=dashed] 12 | unsafe_response -> human_input [style=solid] 13 | ai_response -> human_input [style=solid] 14 | } 15 | -------------------------------------------------------------------------------- /examples/simulation/statemachine.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/simulation/statemachine.png -------------------------------------------------------------------------------- /examples/streaming-fastapi/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/streaming-fastapi/__init__.py -------------------------------------------------------------------------------- /examples/streaming-fastapi/statemachine.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/streaming-fastapi/statemachine.png -------------------------------------------------------------------------------- /examples/streaming-overview/README.md: -------------------------------------------------------------------------------- 1 | # Streaming chatbot 2 | 3 | This example shows how we can use the streaming API 4 | to respond to return quicker results to the user and build a more 5 | seamless interactive experience. 6 | 7 | This is the same chatbot as the one in the `chatbot` example, 8 | but it is built slightly differently (for streaming purposes). 9 | 10 | ## How to use 11 | 12 | Run `streamlit run streamlit_app.py` from the command line and you will see the chatbot in action. 13 | Open up the burr UI `burr` and you can track the chatbot. 14 | 15 | ## Async 16 | 17 | We also have an async version in [async_application.py](async_application.py) 18 | which demonstrates how to use streaming async. We have not hooked this up 19 | to a streamlit application yet, but that should be trivial. 20 | 21 | ## Notebook 22 | The notebook also shows how things work. 23 | Open In Colab 24 | 25 | -------------------------------------------------------------------------------- /examples/streaming-overview/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/streaming-overview/__init__.py -------------------------------------------------------------------------------- /examples/streaming-overview/requirements.txt: -------------------------------------------------------------------------------- 1 | burr[start] 2 | -------------------------------------------------------------------------------- /examples/streaming-overview/statemachine.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/streaming-overview/statemachine.png -------------------------------------------------------------------------------- /examples/templates/README.md: -------------------------------------------------------------------------------- 1 | # Templates 2 | 3 | Some bones of a Burr application to get you started. 4 | 5 | ## multi_modal_agent 6 | Simple example of a multi-modal agent that can switch between different modalities. 7 | 8 | ![multi_modal_agent](multi_modal_agent.png)) 9 | 10 | ## multi_agent_collaboration 11 | Example of a multi-agent system where agents can collaborate to solve a task. 12 | 13 | ![multi_agent_collaboration](multi_agent_collaboration.png) 14 | 15 | ## agent_supervisor 16 | Example of a multi-agent system where agents are supervised by a supervisor agent. 17 | 18 | ![agent_supervisor](agent_supervisor.png) 19 | 20 | ## hierarchical_agent_teams 21 | Example of a multi-agent system where agents are organized in hierarchical teams. 22 | Recursively each team mirrors the `agent_supervisor` example above. 23 | 24 | Note: we're working on a better API to express this so you can also see the 25 | entire hierarchy in the diagram. 26 | 27 | ![hierarchical_agent_teams](hierarchical_agent_teams.png) 28 | -------------------------------------------------------------------------------- /examples/templates/agent_supervisor.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/templates/agent_supervisor.png -------------------------------------------------------------------------------- /examples/templates/hierarchical_agent_teams.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/templates/hierarchical_agent_teams.png -------------------------------------------------------------------------------- /examples/templates/multi_agent_collaboration.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/templates/multi_agent_collaboration.png -------------------------------------------------------------------------------- /examples/templates/multi_modal_agent.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/templates/multi_modal_agent.png -------------------------------------------------------------------------------- /examples/test-case-creation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/test-case-creation/__init__.py -------------------------------------------------------------------------------- /examples/test-case-creation/prompt_for_more.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "action": "prompt_for_more", 4 | "name": "demo-test", 5 | "input_state": { 6 | "prompt": "", 7 | "chat_history": [] 8 | }, 9 | "expected_state": { 10 | "prompt": "", 11 | "chat_history": [], 12 | "response": { 13 | "content": "None of the response modes I support apply to your question. Please clarify?", 14 | "type": "text", 15 | "role": "assistant" 16 | } 17 | } 18 | } 19 | ] 20 | -------------------------------------------------------------------------------- /examples/test-case-creation/requirements.txt: -------------------------------------------------------------------------------- 1 | burr[start] 2 | -------------------------------------------------------------------------------- /examples/test-case-creation/statemachine.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/test-case-creation/statemachine.png -------------------------------------------------------------------------------- /examples/tool-calling/README.md: -------------------------------------------------------------------------------- 1 | # Tool-calling 2 | 3 | This example shows the basic tool-calling design pattern for agents. 4 | 5 | While this leverages the [OpenAI API](https://platform.openai.com/docs/guides/function-calling), the lessons are the same whether you use different tool-calling APIs (E.G. [Anthropic](https://docs.anthropic.com/en/docs/build-with-claude/tool-use)), or general structured outputs (E.G. with [instructor](https://useinstructor.com/)). 6 | 7 | Rather than explain the code here, we direct you to the [blog post](https://blog.dagworks.io/p/agentic-design-pattern-1-tool-calling) 8 | 9 | # Files 10 | 11 | - [application.py](application.py) -- contains code for calling tools + orchestrating them 12 | - [notebook.ipynb](notebook.ipynb) -- walks you through the example with the same code 13 | - [requirements.txt] -- install this to get the right environment 14 | -------------------------------------------------------------------------------- /examples/tool-calling/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/tool-calling/__init__.py -------------------------------------------------------------------------------- /examples/tool-calling/requirements.txt: -------------------------------------------------------------------------------- 1 | burr 2 | openai 3 | opentelemetry-instrumentation-openai 4 | -------------------------------------------------------------------------------- /examples/tool-calling/statemachine.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/tool-calling/statemachine.png -------------------------------------------------------------------------------- /examples/tracing-and-spans/README.md: -------------------------------------------------------------------------------- 1 | # Traces and spans 2 | 3 | This demo covers the tracing/span capabilities in Burr. 4 | For additional information, read over: [the documentation](https://burr.dagworks.io/concepts/additional-visibility/). 5 | This does the same thing as the standard [multi-modal example](../multi-modal-chatbot), but leverages traces. 6 | 7 | Note that you'll likely be integrating tracing into whatever framework (langchain/hamilton) you're using -- we're 8 | still building out capabilities to do this more automatically. 9 | 10 | These traces are used in the Burr UI. E.G. as follows: 11 | 12 | ![tracing](tracing_screencap.png) 13 | 14 | The notebook also shows how things work. 15 | Open In Colab 16 | 17 | -------------------------------------------------------------------------------- /examples/tracing-and-spans/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/tracing-and-spans/__init__.py -------------------------------------------------------------------------------- /examples/tracing-and-spans/requirements.txt: -------------------------------------------------------------------------------- 1 | burr[start] 2 | -------------------------------------------------------------------------------- /examples/tracing-and-spans/statemachine.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/tracing-and-spans/statemachine.png -------------------------------------------------------------------------------- /examples/tracing-and-spans/tracing_screencap.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/tracing-and-spans/tracing_screencap.png -------------------------------------------------------------------------------- /examples/typed-state/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/typed-state/__init__.py -------------------------------------------------------------------------------- /examples/typed-state/curls.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Default to the 'social_media_post' endpoint if no argument is passed 4 | ENDPOINT="social_media_post" 5 | if [[ "$1" == "streaming_async" ]]; then 6 | ENDPOINT="social_media_post_streaming_async" 7 | elif [[ "$1" == "streaming" ]]; then 8 | ENDPOINT="social_media_post_streaming" 9 | fi 10 | 11 | # Perform the curl request to the chosen endpoint 12 | curl -X 'GET' "http://localhost:7443/$ENDPOINT" \ 13 | -s -H 'Accept: application/json' \ 14 | --no-buffer | jq --unbuffered -c '.' | while IFS= read -r line; do 15 | if [[ "$line" != "" ]]; then # Check for non-empty lines 16 | clear 17 | echo "$line" | jq --color-output . 18 | sleep .01 # Add a small delay for visual clarity 19 | fi 20 | done 21 | -------------------------------------------------------------------------------- /examples/typed-state/statemachine.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/typed-state/statemachine.png -------------------------------------------------------------------------------- /examples/youtube-to-social-media-post/README.md: -------------------------------------------------------------------------------- 1 | # Build trustworthy LLM agents & applications for production with Instructor + Burr 2 | 3 | The challenge with large language models (LLMs) is handling the 5% of the time they say crazy things. Being able to debug why an output is bad and having tools for fixing are critical requirements for making LLM features / agents trustworthy and available to users. 4 | 5 | In this example, you'll learn how `instructor` can make LLMs reliability produce structured outputs, and how `burr` helps you introspect, debug, and create tests for your application. 6 | 7 | While a LLM/agent demo takes a few lines of code, production user-facing features aren't as simple. To be trustworthy, they need to work as intended consistently, requiring much more scrutiny and the right instrumentation. Your agent won't be perfect from the start, and tools should help you improve it iteratively and ship updates with confidence. 8 | 9 | Building our app with Burr provides several benefits that we'll detail next: 10 | - **Observability**: monitor in real-time and log the execution of your `Application` and view it in Burr's web user interface 11 | - **Persistence**: At any point, you can save the application `State`. This allows you to create user sessions (e.g., the conversation history menu in ChatGPT), which helps developers investigate bugs, iterate over possible code paths, and generate test cases to create guardrails. 12 | - **Portability**: your `Application` can run in a notebook, as a script, as a web service, or anywhere Python runs. We'll show how to use Burr with [FastAPI](https://fastapi.tiangolo.com/). 13 | -------------------------------------------------------------------------------- /examples/youtube-to-social-media-post/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/youtube-to-social-media-post/__init__.py -------------------------------------------------------------------------------- /examples/youtube-to-social-media-post/server.py: -------------------------------------------------------------------------------- 1 | import contextlib 2 | import logging 3 | from typing import Optional 4 | 5 | import application 6 | import fastapi 7 | import uvicorn 8 | 9 | from burr.core import Application 10 | 11 | logger = logging.getLogger(__name__) 12 | 13 | # define a global `burr_app` variable 14 | burr_app: Optional[Application] = None 15 | 16 | 17 | def get_burr_app() -> Application: 18 | """Retrieve the global Burr app.""" 19 | if burr_app is None: 20 | raise RuntimeError("Burr app wasn't instantiated.") 21 | return burr_app 22 | 23 | 24 | @contextlib.asynccontextmanager 25 | async def lifespan(app: fastapi.FastAPI): 26 | """Instantiate the Burr application on FastAPI startup.""" 27 | # set value for the global `burr_app` variable 28 | global burr_app 29 | burr_app = application.build_application() 30 | yield 31 | 32 | 33 | app = fastapi.FastAPI(lifespan=lifespan) 34 | 35 | 36 | @app.get("/social_media_post") 37 | def social_media_post(youtube_url: str, burr_app: Application = fastapi.Depends(get_burr_app)): 38 | """Creates a completion for the chat message""" 39 | _, _, state = burr_app.run(halt_after=["generate_post"], inputs={"youtube_url": youtube_url}) 40 | 41 | post = state["post"] 42 | return {"formatted_post": post.display(), "post": post.model_dump()} 43 | 44 | 45 | if __name__ == "__main__": 46 | uvicorn.run("server:app", host="127.0.0.1", port=7443, reload=True) 47 | -------------------------------------------------------------------------------- /examples/youtube-to-social-media-post/statemachine.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/examples/youtube-to-social-media-post/statemachine.png -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length = 100 3 | exclude = build/,.git/,venv/ 4 | ignore = 5 | # whitespace before ':' 6 | E203, 7 | # module level import not at top of file 8 | E402, 9 | # line too long 10 | E501, 11 | # line break before binary operator 12 | W503, 13 | # invalid escape sequence 14 | W605 15 | 16 | [isort] 17 | known_first_party=hamilton 18 | known_local_folder=tests 19 | -------------------------------------------------------------------------------- /telemetry/ui/.eslintignore: -------------------------------------------------------------------------------- 1 | build/* 2 | tailwind.config.js 3 | .eslintrc.js 4 | src/api # generated code 5 | -------------------------------------------------------------------------------- /telemetry/ui/.eslintrc.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | env: { 3 | browser: true, 4 | es2021: true 5 | }, 6 | extends: [ 7 | 'eslint:recommended', 8 | 'plugin:react/recommended', 9 | 'plugin:@typescript-eslint/recommended', 10 | 'plugin:prettier/recommended' 11 | ], 12 | overrides: [], 13 | parser: '@typescript-eslint/parser', 14 | parserOptions: { 15 | ecmaVersion: 'latest', 16 | sourceType: 'module' 17 | }, 18 | plugins: ['react', '@typescript-eslint', 'react-hooks'], 19 | rules: { 20 | 'react-hooks/rules-of-hooks': 'error', // Checks rules of Hooks 21 | 'react-hooks/exhaustive-deps': 'off', // Checks effect dependencies 22 | 'react/react-in-jsx-scope': 'off', 23 | '@typescript-eslint/ban-ts-comment': 'off', 24 | 'react/prop-types': 'off', //Appears to be busted: see https://stackoverflow.com/questions/38684925/react-eslint-error-missing-in-props-validation 25 | eqeqeq: 'error', 26 | 'no-console': 'warn' 27 | }, 28 | settings: { 29 | react: { 30 | version: 'detect' 31 | } 32 | } 33 | }; 34 | -------------------------------------------------------------------------------- /telemetry/ui/.gitignore: -------------------------------------------------------------------------------- 1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files. 2 | 3 | # dependencies 4 | /node_modules 5 | /.pnp 6 | .pnp.js 7 | 8 | # testing 9 | /coverage 10 | 11 | # production 12 | /build 13 | 14 | # misc 15 | .DS_Store 16 | .env.local 17 | .env.development.local 18 | .env.test.local 19 | .env.production.local 20 | 21 | npm-debug.log* 22 | yarn-debug.log* 23 | yarn-error.log* 24 | -------------------------------------------------------------------------------- /telemetry/ui/.prettierignore: -------------------------------------------------------------------------------- 1 | build/* 2 | tailwind.config.js 3 | .eslitrc.js 4 | -------------------------------------------------------------------------------- /telemetry/ui/.prettierrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "semi": true, 3 | "tabWidth": 2, 4 | "printWidth": 100, 5 | "singleQuote": true, 6 | "trailingComma": "none", 7 | "jsxBracketSameLine": true 8 | } 9 | -------------------------------------------------------------------------------- /telemetry/ui/public/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/telemetry/ui/public/favicon.ico -------------------------------------------------------------------------------- /telemetry/ui/public/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/burr/853675d2a814c6fde1525010a696d10c033af410/telemetry/ui/public/logo.png -------------------------------------------------------------------------------- /telemetry/ui/public/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "short_name": "Burr", 3 | "name": "Burr tracking UI", 4 | "icons": [ 5 | { 6 | "src": "favicon.ico", 7 | "sizes": "64x64 32x32 24x24 16x16", 8 | "type": "image/x-icon" 9 | }, 10 | { 11 | "src": "logo.png", 12 | "type": "image/png", 13 | "sizes": "192x192" 14 | }, 15 | { 16 | "src": "logo.png", 17 | "type": "image/png", 18 | "sizes": "512x512" 19 | } 20 | ], 21 | "start_url": ".", 22 | "display": "standalone", 23 | "theme_color": "#000000", 24 | "background_color": "#ffffff" 25 | } 26 | -------------------------------------------------------------------------------- /telemetry/ui/public/robots.txt: -------------------------------------------------------------------------------- 1 | # https://www.robotstxt.org/robotstxt.html 2 | User-agent: * 3 | Disallow: 4 | -------------------------------------------------------------------------------- /telemetry/ui/scripts/client-gen.sh: -------------------------------------------------------------------------------- 1 | # run this from the ui/ directory 2 | # ensure your server is running on port 7241 (or change the port below) 3 | # TODO -- move to a package.json script 4 | npx openapi-typescript-codegen --input http://localhost:7241/openapi.json --output ./src/api 5 | -------------------------------------------------------------------------------- /telemetry/ui/scripts/token_costs.py: -------------------------------------------------------------------------------- 1 | url = "https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json" 2 | 3 | # Load the data 4 | import requests 5 | import json 6 | 7 | response = requests.get(url) 8 | data = response.json() 9 | 10 | keys_wanted = ["input_cost_per_token", "output_cost_per_token", "max_tokens", "max_input_tokens", "max_output_tokens"] 11 | 12 | del data["sample_spec"] 13 | # Extract the keys we want 14 | for model_entry in data: 15 | for key in list(data[model_entry].keys()): 16 | if key not in keys_wanted: 17 | del data[model_entry][key] 18 | 19 | # save the data 20 | with open("model_costs.json", "w") as f: 21 | json.dump(data, f) 22 | -------------------------------------------------------------------------------- /telemetry/ui/src/App.css: -------------------------------------------------------------------------------- 1 | .App { 2 | text-align: center; 3 | } 4 | 5 | .App-logo { 6 | height: 40vmin; 7 | pointer-events: none; 8 | } 9 | 10 | @media (prefers-reduced-motion: no-preference) { 11 | .App-logo { 12 | animation: App-logo-spin infinite 20s linear; 13 | } 14 | } 15 | 16 | .App-header { 17 | background-color: #282c34; 18 | min-height: 100vh; 19 | display: flex; 20 | flex-direction: column; 21 | align-items: center; 22 | justify-content: center; 23 | font-size: calc(10px + 2vmin); 24 | color: white; 25 | } 26 | 27 | .App-link { 28 | color: #61dafb; 29 | } 30 | 31 | @keyframes App-logo-spin { 32 | from { 33 | transform: rotate(0deg); 34 | } 35 | to { 36 | transform: rotate(360deg); 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /telemetry/ui/src/App.test.tsx: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | import { render, screen } from '@testing-library/react'; 3 | import App from './App'; 4 | 5 | test('renders learn react link', () => { 6 | render(); 7 | const linkElement = screen.getByText(/learn react/i); 8 | expect(linkElement).toBeInTheDocument(); 9 | }); 10 | -------------------------------------------------------------------------------- /telemetry/ui/src/api/core/ApiError.ts: -------------------------------------------------------------------------------- 1 | /* generated using openapi-typescript-codegen -- do no edit */ 2 | /* istanbul ignore file */ 3 | /* tslint:disable */ 4 | /* eslint-disable */ 5 | import type { ApiRequestOptions } from './ApiRequestOptions'; 6 | import type { ApiResult } from './ApiResult'; 7 | 8 | export class ApiError extends Error { 9 | public readonly url: string; 10 | public readonly status: number; 11 | public readonly statusText: string; 12 | public readonly body: any; 13 | public readonly request: ApiRequestOptions; 14 | 15 | constructor(request: ApiRequestOptions, response: ApiResult, message: string) { 16 | super(message); 17 | 18 | this.name = 'ApiError'; 19 | this.url = response.url; 20 | this.status = response.status; 21 | this.statusText = response.statusText; 22 | this.body = response.body; 23 | this.request = request; 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /telemetry/ui/src/api/core/ApiRequestOptions.ts: -------------------------------------------------------------------------------- 1 | /* generated using openapi-typescript-codegen -- do no edit */ 2 | /* istanbul ignore file */ 3 | /* tslint:disable */ 4 | /* eslint-disable */ 5 | export type ApiRequestOptions = { 6 | readonly method: 'GET' | 'PUT' | 'POST' | 'DELETE' | 'OPTIONS' | 'HEAD' | 'PATCH'; 7 | readonly url: string; 8 | readonly path?: Record; 9 | readonly cookies?: Record; 10 | readonly headers?: Record; 11 | readonly query?: Record; 12 | readonly formData?: Record; 13 | readonly body?: any; 14 | readonly mediaType?: string; 15 | readonly responseHeader?: string; 16 | readonly errors?: Record; 17 | }; 18 | -------------------------------------------------------------------------------- /telemetry/ui/src/api/core/ApiResult.ts: -------------------------------------------------------------------------------- 1 | /* generated using openapi-typescript-codegen -- do no edit */ 2 | /* istanbul ignore file */ 3 | /* tslint:disable */ 4 | /* eslint-disable */ 5 | export type ApiResult = { 6 | readonly url: string; 7 | readonly ok: boolean; 8 | readonly status: number; 9 | readonly statusText: string; 10 | readonly body: any; 11 | }; 12 | -------------------------------------------------------------------------------- /telemetry/ui/src/api/core/OpenAPI.ts: -------------------------------------------------------------------------------- 1 | /* generated using openapi-typescript-codegen -- do no edit */ 2 | /* istanbul ignore file */ 3 | /* tslint:disable */ 4 | /* eslint-disable */ 5 | import type { ApiRequestOptions } from './ApiRequestOptions'; 6 | 7 | type Resolver = (options: ApiRequestOptions) => Promise; 8 | type Headers = Record; 9 | 10 | export type OpenAPIConfig = { 11 | BASE: string; 12 | VERSION: string; 13 | WITH_CREDENTIALS: boolean; 14 | CREDENTIALS: 'include' | 'omit' | 'same-origin'; 15 | TOKEN?: string | Resolver | undefined; 16 | USERNAME?: string | Resolver | undefined; 17 | PASSWORD?: string | Resolver | undefined; 18 | HEADERS?: Headers | Resolver | undefined; 19 | ENCODE_PATH?: ((path: string) => string) | undefined; 20 | }; 21 | 22 | export const OpenAPI: OpenAPIConfig = { 23 | BASE: '', 24 | VERSION: '0.1.0', 25 | WITH_CREDENTIALS: false, 26 | CREDENTIALS: 'include', 27 | TOKEN: undefined, 28 | USERNAME: undefined, 29 | PASSWORD: undefined, 30 | HEADERS: undefined, 31 | ENCODE_PATH: undefined 32 | }; 33 | -------------------------------------------------------------------------------- /telemetry/ui/src/api/models/ActionModel.ts: -------------------------------------------------------------------------------- 1 | /* generated using openapi-typescript-codegen -- do no edit */ 2 | /* istanbul ignore file */ 3 | /* tslint:disable */ 4 | /* eslint-disable */ 5 | /** 6 | * Pydantic model that represents an action for storing/visualization in the UI 7 | */ 8 | export type ActionModel = { 9 | type?: string; 10 | name: string; 11 | reads: Array; 12 | writes: Array; 13 | code: string; 14 | inputs?: Array; 15 | optional_inputs?: Array; 16 | }; 17 | -------------------------------------------------------------------------------- /telemetry/ui/src/api/models/AnnotationCreate.ts: -------------------------------------------------------------------------------- 1 | /* generated using openapi-typescript-codegen -- do no edit */ 2 | /* istanbul ignore file */ 3 | /* tslint:disable */ 4 | /* eslint-disable */ 5 | import type { AnnotationObservation } from './AnnotationObservation'; 6 | /** 7 | * Generic link for indexing job -- can be exposed in 'admin mode' in the UI 8 | */ 9 | export type AnnotationCreate = { 10 | span_id: string | null; 11 | step_name: string; 12 | tags: Array; 13 | observations: Array; 14 | }; 15 | -------------------------------------------------------------------------------- /telemetry/ui/src/api/models/AnnotationDataPointer.ts: -------------------------------------------------------------------------------- 1 | /* generated using openapi-typescript-codegen -- do no edit */ 2 | /* istanbul ignore file */ 3 | /* tslint:disable */ 4 | /* eslint-disable */ 5 | export type AnnotationDataPointer = { 6 | type: AnnotationDataPointer.type; 7 | field_name: string; 8 | span_id: string | null; 9 | }; 10 | export namespace AnnotationDataPointer { 11 | export enum type { 12 | STATE_FIELD = 'state_field', 13 | ATTRIBUTE = 'attribute' 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /telemetry/ui/src/api/models/AnnotationObservation.ts: -------------------------------------------------------------------------------- 1 | /* generated using openapi-typescript-codegen -- do no edit */ 2 | /* istanbul ignore file */ 3 | /* tslint:disable */ 4 | /* eslint-disable */ 5 | import type { AnnotationDataPointer } from './AnnotationDataPointer'; 6 | export type AnnotationObservation = { 7 | data_fields: Record; 8 | thumbs_up_thumbs_down: boolean | null; 9 | data_pointers: Array; 10 | }; 11 | -------------------------------------------------------------------------------- /telemetry/ui/src/api/models/AnnotationOut.ts: -------------------------------------------------------------------------------- 1 | /* generated using openapi-typescript-codegen -- do no edit */ 2 | /* istanbul ignore file */ 3 | /* tslint:disable */ 4 | /* eslint-disable */ 5 | import type { AnnotationObservation } from './AnnotationObservation'; 6 | /** 7 | * Generic link for indexing job -- can be exposed in 'admin mode' in the UI 8 | */ 9 | export type AnnotationOut = { 10 | span_id: string | null; 11 | step_name: string; 12 | tags: Array; 13 | observations: Array; 14 | id: number; 15 | project_id: string; 16 | app_id: string; 17 | partition_key: string | null; 18 | step_sequence_id: number; 19 | created: string; 20 | updated: string; 21 | }; 22 | -------------------------------------------------------------------------------- /telemetry/ui/src/api/models/AnnotationUpdate.ts: -------------------------------------------------------------------------------- 1 | /* generated using openapi-typescript-codegen -- do no edit */ 2 | /* istanbul ignore file */ 3 | /* tslint:disable */ 4 | /* eslint-disable */ 5 | import type { AnnotationObservation } from './AnnotationObservation'; 6 | /** 7 | * Generic link for indexing job -- can be exposed in 'admin mode' in the UI 8 | */ 9 | export type AnnotationUpdate = { 10 | span_id?: string | null; 11 | step_name: string; 12 | tags?: Array | null; 13 | observations: Array; 14 | }; 15 | -------------------------------------------------------------------------------- /telemetry/ui/src/api/models/ApplicationLogs.ts: -------------------------------------------------------------------------------- 1 | /* generated using openapi-typescript-codegen -- do no edit */ 2 | /* istanbul ignore file */ 3 | /* tslint:disable */ 4 | /* eslint-disable */ 5 | import type { ApplicationModel } from './ApplicationModel'; 6 | import type { ChildApplicationModel } from './ChildApplicationModel'; 7 | import type { PointerModel } from './PointerModel'; 8 | import type { Step } from './Step'; 9 | /** 10 | * Application logs are purely flat -- 11 | * we will likely be rethinking this but for now this provides for easy parsing. 12 | */ 13 | export type ApplicationLogs = { 14 | application: ApplicationModel; 15 | children: Array; 16 | steps: Array; 17 | parent_pointer?: PointerModel | null; 18 | spawning_parent_pointer?: PointerModel | null; 19 | }; 20 | -------------------------------------------------------------------------------- /telemetry/ui/src/api/models/ApplicationModel.ts: -------------------------------------------------------------------------------- 1 | /* generated using openapi-typescript-codegen -- do no edit */ 2 | /* istanbul ignore file */ 3 | /* tslint:disable */ 4 | /* eslint-disable */ 5 | import type { ActionModel } from './ActionModel'; 6 | import type { TransitionModel } from './TransitionModel'; 7 | /** 8 | * Pydantic model that represents an application for storing/visualization in the UI 9 | */ 10 | export type ApplicationModel = { 11 | type?: string; 12 | entrypoint: string; 13 | actions: Array; 14 | transitions: Array; 15 | }; 16 | -------------------------------------------------------------------------------- /telemetry/ui/src/api/models/ApplicationPage.ts: -------------------------------------------------------------------------------- 1 | /* generated using openapi-typescript-codegen -- do no edit */ 2 | /* istanbul ignore file */ 3 | /* tslint:disable */ 4 | /* eslint-disable */ 5 | import type { ApplicationSummary } from './ApplicationSummary'; 6 | export type ApplicationPage = { 7 | applications: Array; 8 | total: number; 9 | has_another_page: boolean; 10 | }; 11 | -------------------------------------------------------------------------------- /telemetry/ui/src/api/models/ApplicationSummary.ts: -------------------------------------------------------------------------------- 1 | /* generated using openapi-typescript-codegen -- do no edit */ 2 | /* istanbul ignore file */ 3 | /* tslint:disable */ 4 | /* eslint-disable */ 5 | import type { PointerModel } from './PointerModel'; 6 | export type ApplicationSummary = { 7 | app_id: string; 8 | partition_key: string | null; 9 | first_written: string; 10 | last_written: string; 11 | num_steps: number; 12 | tags: Record; 13 | parent_pointer?: PointerModel | null; 14 | spawning_parent_pointer?: PointerModel | null; 15 | }; 16 | -------------------------------------------------------------------------------- /telemetry/ui/src/api/models/AttributeModel.ts: -------------------------------------------------------------------------------- 1 | /* generated using openapi-typescript-codegen -- do no edit */ 2 | /* istanbul ignore file */ 3 | /* tslint:disable */ 4 | /* eslint-disable */ 5 | /** 6 | * Represents a logged artifact 7 | */ 8 | export type AttributeModel = { 9 | type?: string; 10 | key: string; 11 | action_sequence_id: number; 12 | span_id: string | null; 13 | value: Record | string | number | boolean | null; 14 | tags: Record; 15 | time_logged?: string | null; 16 | }; 17 | -------------------------------------------------------------------------------- /telemetry/ui/src/api/models/BackendSpec.ts: -------------------------------------------------------------------------------- 1 | /* generated using openapi-typescript-codegen -- do no edit */ 2 | /* istanbul ignore file */ 3 | /* tslint:disable */ 4 | /* eslint-disable */ 5 | /** 6 | * Generic link for indexing job -- can be exposed in 'admin mode' in the UI 7 | */ 8 | export type BackendSpec = { 9 | indexing: boolean; 10 | snapshotting: boolean; 11 | supports_demos: boolean; 12 | supports_annotations: boolean; 13 | }; 14 | -------------------------------------------------------------------------------- /telemetry/ui/src/api/models/BeginEntryModel.ts: -------------------------------------------------------------------------------- 1 | /* generated using openapi-typescript-codegen -- do no edit */ 2 | /* istanbul ignore file */ 3 | /* tslint:disable */ 4 | /* eslint-disable */ 5 | /** 6 | * Pydantic model that represents an entry for the beginning of a step 7 | */ 8 | export type BeginEntryModel = { 9 | type?: string; 10 | start_time: string; 11 | action: string; 12 | inputs: Record; 13 | sequence_id: number; 14 | }; 15 | -------------------------------------------------------------------------------- /telemetry/ui/src/api/models/BeginSpanModel.ts: -------------------------------------------------------------------------------- 1 | /* generated using openapi-typescript-codegen -- do no edit */ 2 | /* istanbul ignore file */ 3 | /* tslint:disable */ 4 | /* eslint-disable */ 5 | /** 6 | * Pydantic model that represents an entry for the beginning of a span 7 | */ 8 | export type BeginSpanModel = { 9 | type?: string; 10 | start_time: string; 11 | action_sequence_id: number; 12 | span_id: string; 13 | span_name: string; 14 | parent_span_id: string | null; 15 | span_dependencies: Array; 16 | }; 17 | -------------------------------------------------------------------------------- /telemetry/ui/src/api/models/ChatItem.ts: -------------------------------------------------------------------------------- 1 | /* generated using openapi-typescript-codegen -- do no edit */ 2 | /* istanbul ignore file */ 3 | /* tslint:disable */ 4 | /* eslint-disable */ 5 | /** 6 | * Pydantic model for a chat item. This is used to render the chat history. 7 | */ 8 | export type ChatItem = { 9 | content: string; 10 | type: ChatItem.type; 11 | role: ChatItem.role; 12 | }; 13 | export namespace ChatItem { 14 | export enum type { 15 | IMAGE = 'image', 16 | TEXT = 'text', 17 | CODE = 'code', 18 | ERROR = 'error' 19 | } 20 | export enum role { 21 | USER = 'user', 22 | ASSISTANT = 'assistant' 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /telemetry/ui/src/api/models/ChildApplicationModel.ts: -------------------------------------------------------------------------------- 1 | /* generated using openapi-typescript-codegen -- do no edit */ 2 | /* istanbul ignore file */ 3 | /* tslint:disable */ 4 | /* eslint-disable */ 5 | import type { PointerModel } from './PointerModel'; 6 | /** 7 | * Stores data about a child application (either a fork or a spawned application). 8 | * This allows us to link from parent -> child in the UI. 9 | */ 10 | export type ChildApplicationModel = { 11 | type?: string; 12 | child: PointerModel; 13 | event_time: string; 14 | event_type: ChildApplicationModel.event_type; 15 | sequence_id: number | null; 16 | }; 17 | export namespace ChildApplicationModel { 18 | export enum event_type { 19 | FORK = 'fork', 20 | SPAWN_START = 'spawn_start' 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /telemetry/ui/src/api/models/DraftInit.ts: -------------------------------------------------------------------------------- 1 | /* generated using openapi-typescript-codegen -- do no edit */ 2 | /* istanbul ignore file */ 3 | /* tslint:disable */ 4 | /* eslint-disable */ 5 | export type DraftInit = { 6 | email_to_respond: string; 7 | response_instructions: string; 8 | }; 9 | -------------------------------------------------------------------------------- /telemetry/ui/src/api/models/EmailAssistantState.ts: -------------------------------------------------------------------------------- 1 | /* generated using openapi-typescript-codegen -- do no edit */ 2 | /* istanbul ignore file */ 3 | /* tslint:disable */ 4 | /* eslint-disable */ 5 | export type EmailAssistantState = { 6 | app_id: string; 7 | email_to_respond: string | null; 8 | response_instructions: string | null; 9 | questions: Array | null; 10 | answers: Array | null; 11 | drafts: Array; 12 | feedback_history: Array; 13 | final_draft: string | null; 14 | next_step: EmailAssistantState.next_step; 15 | }; 16 | export namespace EmailAssistantState { 17 | export enum next_step { 18 | PROCESS_INPUT = 'process_input', 19 | CLARIFY_INSTRUCTIONS = 'clarify_instructions', 20 | PROCESS_FEEDBACK = 'process_feedback' 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /telemetry/ui/src/api/models/EndEntryModel.ts: -------------------------------------------------------------------------------- 1 | /* generated using openapi-typescript-codegen -- do no edit */ 2 | /* istanbul ignore file */ 3 | /* tslint:disable */ 4 | /* eslint-disable */ 5 | /** 6 | * Pydantic model that represents an entry for the end of a step 7 | */ 8 | export type EndEntryModel = { 9 | type?: string; 10 | end_time: string; 11 | action: string; 12 | result: Record | null; 13 | exception: string | null; 14 | state: Record; 15 | sequence_id: number; 16 | }; 17 | -------------------------------------------------------------------------------- /telemetry/ui/src/api/models/EndSpanModel.ts: -------------------------------------------------------------------------------- 1 | /* generated using openapi-typescript-codegen -- do no edit */ 2 | /* istanbul ignore file */ 3 | /* tslint:disable */ 4 | /* eslint-disable */ 5 | /** 6 | * Pydantic model that represents an entry for the end of a span 7 | */ 8 | export type EndSpanModel = { 9 | type?: string; 10 | end_time: string; 11 | action_sequence_id: number; 12 | span_id: string; 13 | }; 14 | -------------------------------------------------------------------------------- /telemetry/ui/src/api/models/EndStreamModel.ts: -------------------------------------------------------------------------------- 1 | /* generated using openapi-typescript-codegen -- do no edit */ 2 | /* istanbul ignore file */ 3 | /* tslint:disable */ 4 | /* eslint-disable */ 5 | /** 6 | * Pydantic model that represents an entry for the first item of a stream 7 | */ 8 | export type EndStreamModel = { 9 | type?: string; 10 | action_sequence_id: number; 11 | span_id: string | null; 12 | end_time: string; 13 | items_streamed: number; 14 | }; 15 | -------------------------------------------------------------------------------- /telemetry/ui/src/api/models/Feedback.ts: -------------------------------------------------------------------------------- 1 | /* generated using openapi-typescript-codegen -- do no edit */ 2 | /* istanbul ignore file */ 3 | /* tslint:disable */ 4 | /* eslint-disable */ 5 | export type Feedback = { 6 | feedback: string; 7 | }; 8 | -------------------------------------------------------------------------------- /telemetry/ui/src/api/models/FirstItemStreamModel.ts: -------------------------------------------------------------------------------- 1 | /* generated using openapi-typescript-codegen -- do no edit */ 2 | /* istanbul ignore file */ 3 | /* tslint:disable */ 4 | /* eslint-disable */ 5 | /** 6 | * Pydantic model that represents an entry for the first item of a stream 7 | */ 8 | export type FirstItemStreamModel = { 9 | type?: string; 10 | action_sequence_id: number; 11 | span_id: string | null; 12 | first_item_time: string; 13 | }; 14 | -------------------------------------------------------------------------------- /telemetry/ui/src/api/models/HTTPValidationError.ts: -------------------------------------------------------------------------------- 1 | /* generated using openapi-typescript-codegen -- do no edit */ 2 | /* istanbul ignore file */ 3 | /* tslint:disable */ 4 | /* eslint-disable */ 5 | import type { ValidationError } from './ValidationError'; 6 | export type HTTPValidationError = { 7 | detail?: Array; 8 | }; 9 | -------------------------------------------------------------------------------- /telemetry/ui/src/api/models/IndexingJob.ts: -------------------------------------------------------------------------------- 1 | /* generated using openapi-typescript-codegen -- do no edit */ 2 | /* istanbul ignore file */ 3 | /* tslint:disable */ 4 | /* eslint-disable */ 5 | /** 6 | * Generic link for indexing job -- can be exposed in 'admin mode' in the UI 7 | */ 8 | export type IndexingJob = { 9 | id: number; 10 | start_time: string; 11 | end_time: string | null; 12 | status: string; 13 | records_processed: number; 14 | metadata: Record; 15 | }; 16 | -------------------------------------------------------------------------------- /telemetry/ui/src/api/models/InitializeStreamModel.ts: -------------------------------------------------------------------------------- 1 | /* generated using openapi-typescript-codegen -- do no edit */ 2 | /* istanbul ignore file */ 3 | /* tslint:disable */ 4 | /* eslint-disable */ 5 | /** 6 | * Pydantic model that represents an entry for the beginning of a stream 7 | */ 8 | export type InitializeStreamModel = { 9 | type?: string; 10 | action_sequence_id: number; 11 | span_id: string | null; 12 | stream_init_time: string; 13 | }; 14 | -------------------------------------------------------------------------------- /telemetry/ui/src/api/models/PointerModel.ts: -------------------------------------------------------------------------------- 1 | /* generated using openapi-typescript-codegen -- do no edit */ 2 | /* istanbul ignore file */ 3 | /* tslint:disable */ 4 | /* eslint-disable */ 5 | /** 6 | * Stores pointers to unique identifiers for an application. 7 | * This is used by a few different places to, say, store parent references 8 | * bewteen application instances. 9 | */ 10 | export type PointerModel = { 11 | type?: string; 12 | app_id: string; 13 | sequence_id: number | null; 14 | partition_key: string | null; 15 | }; 16 | -------------------------------------------------------------------------------- /telemetry/ui/src/api/models/Project.ts: -------------------------------------------------------------------------------- 1 | /* generated using openapi-typescript-codegen -- do no edit */ 2 | /* istanbul ignore file */ 3 | /* tslint:disable */ 4 | /* eslint-disable */ 5 | export type Project = { 6 | name: string; 7 | id: string; 8 | last_written: string; 9 | created: string; 10 | num_apps: number; 11 | uri: string; 12 | }; 13 | -------------------------------------------------------------------------------- /telemetry/ui/src/api/models/PromptInput.ts: -------------------------------------------------------------------------------- 1 | /* generated using openapi-typescript-codegen -- do no edit */ 2 | /* istanbul ignore file */ 3 | /* tslint:disable */ 4 | /* eslint-disable */ 5 | export type PromptInput = { 6 | prompt: string; 7 | }; 8 | -------------------------------------------------------------------------------- /telemetry/ui/src/api/models/QuestionAnswers.ts: -------------------------------------------------------------------------------- 1 | /* generated using openapi-typescript-codegen -- do no edit */ 2 | /* istanbul ignore file */ 3 | /* tslint:disable */ 4 | /* eslint-disable */ 5 | export type QuestionAnswers = { 6 | answers: Array; 7 | }; 8 | -------------------------------------------------------------------------------- /telemetry/ui/src/api/models/ResearchSummary.ts: -------------------------------------------------------------------------------- 1 | /* generated using openapi-typescript-codegen -- do no edit */ 2 | /* istanbul ignore file */ 3 | /* tslint:disable */ 4 | /* eslint-disable */ 5 | export type ResearchSummary = { 6 | running_summary: string; 7 | }; 8 | -------------------------------------------------------------------------------- /telemetry/ui/src/api/models/Span.ts: -------------------------------------------------------------------------------- 1 | /* generated using openapi-typescript-codegen -- do no edit */ 2 | /* istanbul ignore file */ 3 | /* tslint:disable */ 4 | /* eslint-disable */ 5 | import type { BeginSpanModel } from './BeginSpanModel'; 6 | import type { EndSpanModel } from './EndSpanModel'; 7 | /** 8 | * Represents a span. These have action sequence IDs associated with 9 | * them to put them in order. 10 | */ 11 | export type Span = { 12 | begin_entry: BeginSpanModel; 13 | end_entry: EndSpanModel | null; 14 | }; 15 | -------------------------------------------------------------------------------- /telemetry/ui/src/api/models/Step.ts: -------------------------------------------------------------------------------- 1 | /* generated using openapi-typescript-codegen -- do no edit */ 2 | /* istanbul ignore file */ 3 | /* tslint:disable */ 4 | /* eslint-disable */ 5 | import type { AttributeModel } from './AttributeModel'; 6 | import type { BeginEntryModel } from './BeginEntryModel'; 7 | import type { EndEntryModel } from './EndEntryModel'; 8 | import type { EndStreamModel } from './EndStreamModel'; 9 | import type { FirstItemStreamModel } from './FirstItemStreamModel'; 10 | import type { InitializeStreamModel } from './InitializeStreamModel'; 11 | import type { Span } from './Span'; 12 | /** 13 | * Log of astep -- has a start and an end. 14 | */ 15 | export type Step = { 16 | step_start_log: BeginEntryModel; 17 | step_end_log: EndEntryModel | null; 18 | spans: Array; 19 | attributes: Array; 20 | streaming_events: Array; 21 | }; 22 | -------------------------------------------------------------------------------- /telemetry/ui/src/api/models/TransitionModel.ts: -------------------------------------------------------------------------------- 1 | /* generated using openapi-typescript-codegen -- do no edit */ 2 | /* istanbul ignore file */ 3 | /* tslint:disable */ 4 | /* eslint-disable */ 5 | /** 6 | * Pydantic model that represents a transition for storing/visualization in the UI 7 | */ 8 | export type TransitionModel = { 9 | type?: string; 10 | from_: string; 11 | to: string; 12 | condition: string; 13 | }; 14 | -------------------------------------------------------------------------------- /telemetry/ui/src/api/models/ValidationError.ts: -------------------------------------------------------------------------------- 1 | /* generated using openapi-typescript-codegen -- do no edit */ 2 | /* istanbul ignore file */ 3 | /* tslint:disable */ 4 | /* eslint-disable */ 5 | export type ValidationError = { 6 | loc: Array; 7 | msg: string; 8 | type: string; 9 | }; 10 | -------------------------------------------------------------------------------- /telemetry/ui/src/components/common/href.tsx: -------------------------------------------------------------------------------- 1 | /** 2 | * Simple component to display text as a link 3 | * Meant to be used consistently 4 | */ 5 | export const LinkText = (props: { href: string; text: string }) => { 6 | return ( 7 | { 11 | // Quick trick to ensure that this takes priority and if this has a parent href, it doesn't trigger 12 | e.stopPropagation(); 13 | }} 14 | > 15 | {props.text} 16 | 17 | ); 18 | }; 19 | -------------------------------------------------------------------------------- /telemetry/ui/src/components/common/link.tsx: -------------------------------------------------------------------------------- 1 | /** 2 | * Tailwind catalyst component 3 | * 4 | * This is an abstraction of a link -- we'll need to 5 | * ensure this does what we want. 6 | */ 7 | 8 | import { Link as RouterLink } from 'react-router-dom'; 9 | import React from 'react'; 10 | 11 | export const Link = React.forwardRef(function Link( 12 | props: { href: string } & React.ComponentPropsWithoutRef<'a'>, 13 | ref: React.ForwardedRef 14 | ) { 15 | return ( 16 | 17 | // 18 | // 19 | // 20 | ); 21 | }); 22 | -------------------------------------------------------------------------------- /telemetry/ui/src/components/common/loading.tsx: -------------------------------------------------------------------------------- 1 | /** 2 | * Simple loading component 3 | */ 4 | export const Loading = () => { 5 | return ( 6 |
7 |
8 |
9 |
10 |
11 | ); 12 | }; 13 | -------------------------------------------------------------------------------- /telemetry/ui/src/components/common/text.tsx: -------------------------------------------------------------------------------- 1 | import { clsx } from 'clsx'; 2 | import { Link } from './link'; 3 | 4 | export function Text({ className, ...props }: React.ComponentPropsWithoutRef<'p'>) { 5 | return ( 6 |

11 | ); 12 | } 13 | 14 | export function TextLink({ className, ...props }: React.ComponentPropsWithoutRef) { 15 | return ( 16 | 23 | ); 24 | } 25 | 26 | export function Strong({ className, ...props }: React.ComponentPropsWithoutRef<'strong'>) { 27 | return ( 28 | 29 | ); 30 | } 31 | 32 | export function Code({ className, ...props }: React.ComponentPropsWithoutRef<'code'>) { 33 | return ( 34 | 41 | ); 42 | } 43 | -------------------------------------------------------------------------------- /telemetry/ui/src/components/common/tooltip.tsx: -------------------------------------------------------------------------------- 1 | import { ReactNode, useState } from 'react'; 2 | 3 | export const Tooltip: React.FC<{ text: string; children: ReactNode }> = ({ text, children }) => { 4 | const [isVisible, setIsVisible] = useState(false); 5 | const [tooltipStyle, setTooltipStyle] = useState({}); 6 | 7 | const handleMouseEnter = (e: React.MouseEvent) => { 8 | const { clientX, clientY } = e; 9 | setTooltipStyle({ 10 | left: `${clientX}px`, 11 | top: `${clientY - 5}px`, // 5px above the cursor 12 | position: 'absolute' 13 | }); 14 | setIsVisible(true); 15 | }; 16 | 17 | const handleMouseLeave = () => { 18 | setIsVisible(false); 19 | }; 20 | 21 | return ( 22 |

23 |
28 | {children} 29 |
30 | {isVisible && ( 31 |
32 | {text} 33 |
34 | )} 35 |
36 | ); 37 | }; 38 | 39 | export default Tooltip; 40 | -------------------------------------------------------------------------------- /telemetry/ui/src/examples/Counter.tsx: -------------------------------------------------------------------------------- 1 | import { Link } from 'react-router-dom'; 2 | 3 | export const Counter = () => { 4 | return ( 5 |
6 |

7 | {' '} 8 | This is a WIP! Please check back soon or comment/vote/contribute at the{' '} 9 | 13 | github issue 14 | 15 | . 16 |

17 |
18 | ); 19 | }; 20 | -------------------------------------------------------------------------------- /telemetry/ui/src/examples/MiniTelemetry.tsx: -------------------------------------------------------------------------------- 1 | import { AppView } from '../components/routes/app/AppView'; 2 | 3 | export const MiniTelemetry = (props: { 4 | projectId: string; 5 | partitionKey: string | null; 6 | appId: string | undefined; 7 | }) => { 8 | //TODO -- put this upstream 9 | const { projectId, appId, partitionKey } = props; 10 | if (appId === undefined) { 11 | return
; 12 | } 13 | return ( 14 | 24 | ); 25 | }; 26 | -------------------------------------------------------------------------------- /telemetry/ui/src/index.css: -------------------------------------------------------------------------------- 1 | /* Tailwind imports */ 2 | @tailwind base; 3 | @tailwind components; 4 | @tailwind utilities; 5 | 6 | /* Non-tailwind CSS */ 7 | .hide-scrollbar::-webkit-scrollbar { 8 | display: none; 9 | } 10 | 11 | /* Hide scrollbar for IE, Edge, and Firefox */ 12 | .hide-scrollbar { 13 | -ms-overflow-style: none; /* IE and Edge */ 14 | scrollbar-width: none; /* Firefox */ 15 | } 16 | -------------------------------------------------------------------------------- /telemetry/ui/src/index.tsx: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | import ReactDOM from 'react-dom/client'; 3 | import './index.css'; 4 | import App from './App'; 5 | import reportWebVitals from './reportWebVitals'; 6 | 7 | const root = ReactDOM.createRoot(document.getElementById('root') as HTMLElement); 8 | root.render( 9 | 10 | 11 | 12 | ); 13 | 14 | // If you want to start measuring performance in your app, pass a function 15 | // to log results (for example: reportWebVitals(console.log)) 16 | // or send to an analytics endpoint. Learn more: https://bit.ly/CRA-vitals 17 | reportWebVitals(); 18 | -------------------------------------------------------------------------------- /telemetry/ui/src/react-app-env.d.ts: -------------------------------------------------------------------------------- 1 | /// 2 | -------------------------------------------------------------------------------- /telemetry/ui/src/reportWebVitals.ts: -------------------------------------------------------------------------------- 1 | import { ReportHandler } from 'web-vitals'; 2 | 3 | const reportWebVitals = (onPerfEntry?: ReportHandler) => { 4 | if (onPerfEntry && onPerfEntry instanceof Function) { 5 | import('web-vitals').then(({ getCLS, getFID, getFCP, getLCP, getTTFB }) => { 6 | getCLS(onPerfEntry); 7 | getFID(onPerfEntry); 8 | getFCP(onPerfEntry); 9 | getLCP(onPerfEntry); 10 | getTTFB(onPerfEntry); 11 | }); 12 | } 13 | }; 14 | 15 | export default reportWebVitals; 16 | -------------------------------------------------------------------------------- /telemetry/ui/src/setupTests.ts: -------------------------------------------------------------------------------- 1 | // jest-dom adds custom jest matchers for asserting on DOM nodes. 2 | // allows you to do things like: 3 | // expect(element).toHaveTextContent(/react/i) 4 | // learn more: https://github.com/testing-library/jest-dom 5 | import '@testing-library/jest-dom'; 6 | -------------------------------------------------------------------------------- /telemetry/ui/src/utils.tsx: -------------------------------------------------------------------------------- 1 | import { useParams } from 'react-router-dom'; 2 | import { AttributeModel, Step } from './api'; 3 | 4 | export type Status = 'success' | 'failure' | 'running'; 5 | /* 6 | * Gets the action given a step. 7 | * TODO -- put this in the BE 8 | */ 9 | export const getActionStatus = (action: Step) => { 10 | if (action.step_end_log === null) { 11 | return 'running'; 12 | } 13 | if (action?.step_end_log?.exception) { 14 | return 'failure'; 15 | } 16 | return 'success'; 17 | }; 18 | 19 | export const getUniqueAttributeID = (attribute: AttributeModel) => { 20 | return `${attribute.action_sequence_id}-${attribute.span_id}`; 21 | }; 22 | 23 | export const useLocationParams = () => { 24 | const { projectId, appId, partitionKey } = useParams(); 25 | return { 26 | projectId: projectId as string, 27 | appId: appId as string, 28 | partitionKey: (partitionKey as string) === 'null' ? null : (partitionKey as string) 29 | }; 30 | }; 31 | -------------------------------------------------------------------------------- /telemetry/ui/src/utils/tailwind.ts: -------------------------------------------------------------------------------- 1 | export const classNames = (...classes: string[]): string => { 2 | return classes.filter(Boolean).join(' '); 3 | }; 4 | -------------------------------------------------------------------------------- /telemetry/ui/tailwind.config.js: -------------------------------------------------------------------------------- 1 | /** @type {import('tailwindcss').Config} */ 2 | module.exports = { 3 | content: ["./src/**/*.{js,jsx,ts,tsx}"], 4 | theme: { 5 | extend: { 6 | colors: { 7 | dwdarkblue: "rgb(43,49,82)", 8 | dwred: "rgb(234,85,86)", 9 | dwlightblue: "rgb(66,157,188)", 10 | dwwhite: "white", 11 | dwblack: "black", 12 | },}, 13 | }, 14 | // Only use this for debugging! 15 | plugins: [ 16 | require("tailwindcss-question-mark"), 17 | ], 18 | darkMode: 'false', 19 | }; 20 | -------------------------------------------------------------------------------- /telemetry/ui/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "es2022", 4 | "lib": ["dom", "dom.iterable", "esnext"], 5 | "allowJs": true, 6 | "skipLibCheck": true, 7 | "esModuleInterop": true, 8 | "allowSyntheticDefaultImports": true, 9 | "strict": true, 10 | "forceConsistentCasingInFileNames": true, 11 | "noFallthroughCasesInSwitch": true, 12 | "module": "esnext", 13 | "moduleResolution": "node", 14 | "resolveJsonModule": true, 15 | "isolatedModules": true, 16 | "noEmit": true, 17 | "jsx": "react-jsx" 18 | }, 19 | "include": ["src"] 20 | } 21 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | from burr.telemetry import disable_telemetry 2 | 3 | disable_telemetry() 4 | -------------------------------------------------------------------------------- /tests/core/test_graphviz_display.py: -------------------------------------------------------------------------------- 1 | import pathlib 2 | 3 | import pytest 4 | 5 | from burr.core.graph import GraphBuilder 6 | 7 | from tests.core.test_graph import PassedInAction 8 | 9 | 10 | @pytest.fixture 11 | def base_counter_action(): 12 | yield PassedInAction( 13 | reads=["count"], 14 | writes=["count"], 15 | fn=lambda state: {"count": state.get("count", 0) + 1}, 16 | update_fn=lambda result, state: state.update(**result), 17 | inputs=[], 18 | ) 19 | 20 | 21 | @pytest.fixture 22 | def graph(base_counter_action): 23 | yield ( 24 | GraphBuilder() 25 | .with_actions(counter=base_counter_action) 26 | .with_transitions(("counter", "counter")) 27 | .build() 28 | ) 29 | 30 | 31 | @pytest.mark.parametrize( 32 | "filename, write_dot", [("app", False), ("app.png", False), ("app", True), ("app.png", True)] 33 | ) 34 | def test_visualize_dot_output(graph, tmp_path: pathlib.Path, filename: str, write_dot: bool): 35 | """Handle file generation with `graph.Digraph` `.render()` and `.pipe()`""" 36 | output_file_path = f"{tmp_path}/{filename}" 37 | 38 | graph.visualize( 39 | output_file_path=output_file_path, 40 | write_dot=write_dot, 41 | ) 42 | 43 | # assert pathlib.Path(tmp_path, "app.png").exists() 44 | assert pathlib.Path(tmp_path, "app").exists() == write_dot 45 | 46 | 47 | def test_visualize_no_dot_output(graph, tmp_path: pathlib.Path): 48 | """Check that no dot file is generated when output_file_path=None""" 49 | dot_file_path = tmp_path / "dag" 50 | 51 | graph.visualize(output_file_path=None) 52 | 53 | assert not dot_file_path.exists() 54 | -------------------------------------------------------------------------------- /tests/core/test_implementations.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from burr.core import State 4 | from burr.core.implementations import Placeholder 5 | 6 | 7 | def test_placedholder_action(): 8 | action = Placeholder(reads=["foo"], writes=["bar"]).with_name("test") 9 | assert action.reads == ["foo"] 10 | assert action.writes == ["bar"] 11 | with pytest.raises(NotImplementedError): 12 | action.run(State({})) 13 | 14 | with pytest.raises(NotImplementedError): 15 | action.update({}, State({})) 16 | -------------------------------------------------------------------------------- /tests/core/test_serde.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from burr.core.serde import StringDispatch, deserialize, serialize 4 | 5 | 6 | def test_serialize_primitive_types(): 7 | assert serialize(1) == 1 8 | assert serialize(1.0) == 1.0 9 | assert serialize("test") == "test" 10 | assert serialize(True) is True 11 | 12 | 13 | def test_serialize_list(): 14 | assert serialize([1, 2, 3]) == [1, 2, 3] 15 | assert serialize(["a", "b", "c"]) == ["a", "b", "c"] 16 | 17 | 18 | def test_serialize_dict(): 19 | assert serialize({"key": "value"}) == {"key": "value"} 20 | assert serialize({"key1": 1, "key2": 2}) == {"key1": 1, "key2": 2} 21 | 22 | 23 | def test_deserialize_primitive_types(): 24 | assert deserialize(1) == 1 25 | assert deserialize(1.0) == 1.0 26 | assert deserialize("test") == "test" 27 | assert deserialize(True) is True 28 | 29 | 30 | def test_deserialize_list(): 31 | assert deserialize([1, 2, 3]) == [1, 2, 3] 32 | assert deserialize(["a", "b", "c"]) == ["a", "b", "c"] 33 | 34 | 35 | def test_deserialize_dict(): 36 | assert deserialize({"key": "value"}) == {"key": "value"} 37 | assert deserialize({"key1": 1, "key2": 2}) == {"key1": 1, "key2": 2} 38 | 39 | 40 | def test_string_dispatch_no_key(): 41 | dispatch = StringDispatch() 42 | with pytest.raises(ValueError): 43 | dispatch.call("nonexistent_key") 44 | 45 | 46 | def test_string_dispatch_with_key(): 47 | dispatch = StringDispatch() 48 | dispatch.register("test_key")(lambda x: x) 49 | assert dispatch.call("test_key", "test_value") == "test_value" 50 | -------------------------------------------------------------------------------- /tests/core/test_validation.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from burr.core.validation import assert_set 4 | 5 | 6 | def test__assert_set(): 7 | assert_set("foo", "foo", "bar") 8 | 9 | 10 | def test__assert_set_unset(): 11 | with pytest.raises(ValueError, match="bar"): 12 | assert_set(None, "foo", "bar") 13 | -------------------------------------------------------------------------------- /tests/integrations/serde/test_pandas.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | 3 | from burr.core import serde, state 4 | 5 | 6 | def test_serde_of_pandas_dataframe(tmp_path): 7 | df = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) 8 | og = state.State({"df": df}) 9 | serialized = og.serialize(pandas_kwargs={"path": tmp_path}) 10 | assert serialized["df"][serde.KEY] == "pandas.DataFrame" 11 | assert serialized["df"]["path"].startswith(str(tmp_path)) 12 | assert ( 13 | "df_a23d165ed4a2b8c6ccf24ac6276e35a9dc312e2828b4d0810416f4d47c614c7f.parquet" 14 | in serialized["df"]["path"] 15 | ) 16 | ng = state.State.deserialize(serialized, pandas_kwargs={"path": tmp_path}) 17 | assert isinstance(ng["df"], pd.DataFrame) 18 | pd.testing.assert_frame_equal(ng["df"], df) 19 | -------------------------------------------------------------------------------- /tests/integrations/serde/test_pickle.py: -------------------------------------------------------------------------------- 1 | from burr.core import serde, state 2 | from burr.integrations.serde import pickle 3 | 4 | 5 | class User: 6 | def __init__(self, name, email): 7 | self.name = name 8 | self.email = email 9 | 10 | 11 | def test_serde_of_pickle_object(): 12 | pickle.register_type_to_pickle(User) 13 | user = User(name="John Doe", email="john.doe@example.com") 14 | og = state.State({"user": user, "test": "test"}) 15 | serialized = og.serialize() 16 | assert serialized == { 17 | "user": { 18 | serde.KEY: "pickle", 19 | "value": b"\x80\x04\x95Q\x00\x00\x00\x00\x00\x00\x00\x8c\x0btest_pi" 20 | b"ckle\x94\x8c\x04User\x94\x93\x94)\x81\x94}\x94(\x8c\x04na" 21 | b"me\x94\x8c\x08John Doe\x94\x8c\x05email\x94\x8c\x14john" 22 | b".doe@example.com\x94ub.", 23 | }, 24 | "test": "test", 25 | } 26 | ng = state.State.deserialize(serialized) 27 | assert isinstance(ng["user"], User) 28 | assert ng["user"].name == "John Doe" 29 | assert ng["user"].email == "john.doe@example.com" 30 | -------------------------------------------------------------------------------- /tests/integrations/serde/test_pydantic.py: -------------------------------------------------------------------------------- 1 | from pydantic import BaseModel 2 | 3 | from burr.core import serde, state 4 | 5 | 6 | class User(BaseModel): 7 | name: str 8 | email: str 9 | 10 | 11 | def test_serde_of_pydantic_model(): 12 | user = User(name="John Doe", email="john.doe@example.com") 13 | og = state.State({"user": user}) 14 | serialized = og.serialize() 15 | assert serialized == { 16 | "user": { 17 | serde.KEY: "pydantic", 18 | "__pydantic_class": "test_pydantic.User", 19 | "email": "john.doe@example.com", 20 | "name": "John Doe", 21 | } 22 | } 23 | ng = state.State.deserialize(serialized) 24 | assert isinstance(ng["user"], User) 25 | assert ng["user"].name == "John Doe" 26 | assert ng["user"].email == "john.doe@example.com" 27 | -------------------------------------------------------------------------------- /tests/integrations/test_burr_opentelemetry.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | import pydantic 4 | import pytest 5 | 6 | from burr.core import serde 7 | from burr.integrations.opentelemetry import convert_to_otel_attribute 8 | 9 | 10 | class SampleModel(pydantic.BaseModel): 11 | foo: int 12 | bar: bool 13 | 14 | 15 | @pytest.mark.parametrize( 16 | "value, expected", 17 | [ 18 | ("hello", "hello"), 19 | (1, 1), 20 | ((1, 1), [1, 1]), 21 | ((1.0, 1.0), [1.0, 1.0]), 22 | ((True, True), [True, True]), 23 | (("hello", "hello"), ["hello", "hello"]), 24 | (SampleModel(foo=1, bar=True), json.dumps(serde.serialize(SampleModel(foo=1, bar=True)))), 25 | ], 26 | ) 27 | def test_convert_to_otel_attribute(value, expected): 28 | assert convert_to_otel_attribute(value) == expected 29 | -------------------------------------------------------------------------------- /tests/integrations/test_burr_pydantic_future_annotations.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import os 4 | 5 | """This tests that the pydantic integration allows for future import of annotations""" 6 | 7 | file_name = os.path.join(os.path.dirname(__file__), "test_burr_pydantic.py") 8 | eval(compile(open(file_name).read(), file_name, "exec")) 9 | -------------------------------------------------------------------------------- /tests/integrations/test_opentelemetry.py: -------------------------------------------------------------------------------- 1 | import typing 2 | 3 | import burr.integrations.opentelemetry as burr_otel 4 | 5 | 6 | def test_instrument_specs_match_instruments_literal(): 7 | assert set(typing.get_args(burr_otel.INSTRUMENTS)) == set(burr_otel.INSTRUMENTS_SPECS.keys()) 8 | -------------------------------------------------------------------------------- /tests/pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | asyncio_mode=auto 3 | -------------------------------------------------------------------------------- /tests/tracking/test_common_models.py: -------------------------------------------------------------------------------- 1 | from burr.core import Action, State 2 | from burr.tracking.common.models import ActionModel 3 | 4 | 5 | class ActionWithCustomSource(Action): 6 | def __init__(self): 7 | super().__init__() 8 | 9 | @property 10 | def reads(self) -> list[str]: 11 | return [] 12 | 13 | def run(self, state: State, **run_kwargs) -> dict: 14 | return {} 15 | 16 | @property 17 | def writes(self) -> list[str]: 18 | return [] 19 | 20 | def update(self, result: dict, state: State) -> State: 21 | return state 22 | 23 | def get_source(self) -> str: 24 | return "custom source code" 25 | 26 | 27 | def test_action_with_custom_source(): 28 | model = ActionModel.from_action(ActionWithCustomSource().with_name("foo")) 29 | assert model.code == "custom source code" 30 | --------------------------------------------------------------------------------