├── logging └── empty.txt ├── working_files └── empty.txt ├── workflow_tools ├── phases │ ├── base │ │ └── __init__.py │ ├── sink │ │ ├── __init__.py │ │ ├── phase_sink_knowledge.py │ │ └── phase_sink_prerequisites.py │ ├── source │ │ ├── __init__.py │ │ ├── phase_source_knowledge.py │ │ └── phase_source_prerequisites.py │ ├── shared │ │ └── __init__.py │ ├── diagnose │ │ └── __init__.py │ └── __init__.py ├── integrations │ └── __init__.py ├── core │ └── __init__.py ├── services │ └── __init__.py └── __init__.py ├── resources ├── python │ ├── others │ │ ├── opc_ua_server │ │ │ ├── requirements.txt │ │ │ ├── dockerfile │ │ │ ├── library.json │ │ │ └── README.md │ │ ├── streamlit │ │ │ ├── requirements.txt │ │ │ ├── icon.png │ │ │ ├── screenshot.png │ │ │ ├── dockerfile │ │ │ └── library.json │ │ └── jupyterlab │ │ │ ├── icon.png │ │ │ ├── requirements.txt │ │ │ ├── library.json │ │ │ ├── README.md │ │ │ └── Notebook.ipynb │ ├── sources │ │ ├── confluent_kafka │ │ │ ├── .gitignore │ │ │ ├── requirements.txt │ │ │ ├── icon.png │ │ │ ├── dockerfile │ │ │ └── README.md │ │ ├── snowplow_source │ │ │ ├── .gitignore │ │ │ ├── requirements.txt │ │ │ ├── icon.png │ │ │ ├── dockerfile │ │ │ ├── README.md │ │ │ └── library.json │ │ ├── environment_source │ │ │ ├── .gitignore │ │ │ ├── requirements.txt │ │ │ ├── icon.png │ │ │ ├── dockerfile │ │ │ ├── main.py │ │ │ └── library.json │ │ ├── opc_ua_client │ │ │ ├── requirements.txt │ │ │ ├── icon.png │ │ │ ├── dockerfile │ │ │ ├── main.py │ │ │ └── README.md │ │ ├── starter_source │ │ │ ├── requirements.txt │ │ │ ├── dockerfile │ │ │ ├── library.json │ │ │ └── README.md │ │ ├── flet-input-form │ │ │ ├── requirements.txt │ │ │ ├── icon.png │ │ │ ├── library.json │ │ │ └── dockerfile │ │ ├── s3_source │ │ │ ├── requirements.txt │ │ │ ├── icon.png │ │ │ ├── dockerfile │ │ │ ├── main.py │ │ │ └── README.md │ │ ├── MQTT │ │ │ ├── requirements.txt │ │ │ ├── icon.png │ │ │ ├── dockerfile │ │ │ └── README.md │ │ ├── demo_data │ │ │ ├── requirements.txt │ │ │ ├── dockerfile │ │ │ ├── library.json │ │ │ └── main.py │ │ ├── simple_csv │ │ │ ├── requirements.txt │ │ │ ├── dockerfile │ │ │ ├── library.json │ │ │ └── README.md │ │ ├── hivemq │ │ │ ├── requirements.txt │ │ │ ├── icon.png │ │ │ ├── dockerfile │ │ │ └── README.md │ │ ├── postgres_cdc │ │ │ ├── requirements.txt │ │ │ ├── icon.png │ │ │ ├── setup_logger.py │ │ │ ├── dockerfile │ │ │ ├── README.md │ │ │ └── postgres_helper.py │ │ ├── influxdb_3 │ │ │ ├── requirements.txt │ │ │ ├── icon.png │ │ │ └── dockerfile │ │ ├── influxdb_2 │ │ │ ├── requirements.txt │ │ │ ├── icon.png │ │ │ └── dockerfile │ │ ├── redis_source │ │ │ ├── requirements.txt │ │ │ ├── icon.png │ │ │ ├── dockerfile │ │ │ └── library.json │ │ ├── sql_cdc │ │ │ ├── requirements.txt │ │ │ ├── icon.png │ │ │ ├── .gitignore │ │ │ └── dockerfile │ │ ├── wikipedia-source-claude-coded │ │ │ ├── requirements.txt │ │ │ ├── dockerfile │ │ │ ├── app.yaml │ │ │ └── README.md │ │ ├── segment_webhook │ │ │ ├── requirements.txt │ │ │ ├── icon.jpg │ │ │ ├── dockerfile │ │ │ ├── library.json │ │ │ ├── README.md │ │ │ └── main.py │ │ ├── coinbase_websocket_tutorial_source │ │ │ └── requirements.txt │ │ ├── http_api_sample │ │ │ ├── requirements.txt │ │ │ ├── library.json │ │ │ ├── setup_logging.py │ │ │ ├── dockerfile │ │ │ ├── README.md │ │ │ └── main.py │ │ └── http_api_source │ │ │ ├── requirements.txt │ │ │ ├── icon.png │ │ │ ├── setup_logging.py │ │ │ ├── dockerfile │ │ │ ├── library.json │ │ │ └── README.md │ ├── destinations │ │ ├── flet-waveform │ │ │ ├── requirements.txt │ │ │ ├── icon.png │ │ │ ├── library.json │ │ │ ├── dockerfile │ │ │ └── README.md │ │ ├── TDengine │ │ │ ├── requirements.txt │ │ │ ├── icon.webp │ │ │ └── dockerfile │ │ ├── confluent_kafka │ │ │ ├── requirements.txt │ │ │ ├── icon.png │ │ │ ├── dockerfile │ │ │ ├── README.md │ │ │ └── library.json │ │ ├── mongodb │ │ │ ├── requirements.txt │ │ │ ├── icon.png │ │ │ └── dockerfile │ │ ├── redis_dest │ │ │ ├── requirements.txt │ │ │ ├── icon.png │ │ │ ├── dockerfile │ │ │ └── main.py │ │ ├── starter_destination │ │ │ ├── requirements.txt │ │ │ ├── library.json │ │ │ ├── dockerfile │ │ │ └── README.md │ │ ├── MQTT │ │ │ ├── requirements.txt │ │ │ ├── icon.png │ │ │ ├── dockerfile │ │ │ └── README.md │ │ ├── influxdb_3 │ │ │ ├── requirements.txt │ │ │ ├── icon.png │ │ │ ├── dockerfile │ │ │ └── main.py │ │ ├── hivemq │ │ │ ├── requirements.txt │ │ │ ├── icon.png │ │ │ ├── dockerfile │ │ │ └── README.md │ │ ├── postgres │ │ │ ├── requirements.txt │ │ │ ├── icon.png │ │ │ ├── dockerfile │ │ │ └── main.py │ │ ├── elasticsearch │ │ │ ├── requirements.txt │ │ │ ├── icon.png │ │ │ └── dockerfile │ │ ├── slack_notifications │ │ │ ├── requirements.txt │ │ │ ├── icon.png │ │ │ ├── dockerfile │ │ │ ├── library.json │ │ │ ├── main.py │ │ │ └── README.md │ │ ├── s3-iceberg-destination │ │ │ ├── requirements.txt │ │ │ ├── icon.png │ │ │ ├── dockerfile │ │ │ └── main.py │ │ ├── websocket │ │ │ ├── requirements.txt │ │ │ ├── icon.png │ │ │ ├── dockerfile │ │ │ ├── test.py │ │ │ └── library.json │ │ ├── clickhouse-sink-claude-coded │ │ │ ├── requirements.txt │ │ │ ├── dockerfile │ │ │ ├── app.yaml │ │ │ └── README.md │ │ └── big_query │ │ │ ├── icon.png │ │ │ ├── requirements.txt │ │ │ ├── setup_logger.py │ │ │ ├── dockerfile │ │ │ ├── utils.py │ │ │ ├── main.py │ │ │ └── README.md │ ├── transformations │ │ ├── event_detection │ │ │ ├── requirements.txt │ │ │ ├── dockerfile │ │ │ ├── library.json │ │ │ ├── main.py │ │ │ └── README.md │ │ ├── matlab_runner │ │ │ ├── requirements.txt │ │ │ ├── main.py │ │ │ ├── dockerfile │ │ │ ├── library.json │ │ │ └── README.md │ │ ├── starter_transformation │ │ │ ├── requirements.txt │ │ │ ├── dockerfile │ │ │ ├── library.json │ │ │ ├── README.md │ │ │ └── main.py │ │ └── hugging_face_model │ │ │ ├── requirements.txt │ │ │ ├── dockerfile │ │ │ ├── main.py │ │ │ └── library.json │ └── readme.md └── _demo-clickhouse-db │ ├── requirements.txt │ ├── app.yaml │ ├── dockerfile │ ├── README.md │ └── main.py ├── docs └── images │ ├── sink_deployments.png │ ├── source_deployment.png │ ├── sink_workflow_steps.png │ ├── klauskode_splashscreen.png │ └── source_workflow_steps.png ├── .env.example ├── prompts ├── tasks │ ├── sink_schema_analysis.md │ ├── sink_schema_analysis_retry.md │ ├── claude_code_system_prompt.md │ ├── claude_code_debug_system_prompt.md │ ├── source_schema_analysis.md │ ├── source_schema_analysis_retry.md │ ├── log_analysis_task.md │ ├── diagnose_app_analysis.md │ ├── diagnose_follow_up.md │ └── diagnose_edit_code.md ├── agents │ ├── SourceSchemaAnalyzerAgent.md │ ├── AppNameSuggesterAgent.md │ └── SinkSchemaAnalyzerAgent.md └── diagnose │ ├── diagnose_follow_up.md │ ├── diagnose_app_analysis.md │ └── diagnose_edit_code.md ├── requirements.txt ├── config ├── local.yaml.example └── models.yaml.template └── .claude └── settings.json /logging/empty.txt: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /working_files/empty.txt: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /workflow_tools/phases/base/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /workflow_tools/phases/sink/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /workflow_tools/phases/source/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /resources/python/others/opc_ua_server/requirements.txt: -------------------------------------------------------------------------------- 1 | asyncua -------------------------------------------------------------------------------- /resources/python/sources/confluent_kafka/.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | __pycache__ -------------------------------------------------------------------------------- /resources/python/sources/snowplow_source/.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | __pycache__ -------------------------------------------------------------------------------- /resources/python/sources/environment_source/.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | __pycache__ -------------------------------------------------------------------------------- /resources/_demo-clickhouse-db/requirements.txt: -------------------------------------------------------------------------------- 1 | quixstreams==3.14.1 2 | python-dotenv -------------------------------------------------------------------------------- /resources/python/sources/opc_ua_client/requirements.txt: -------------------------------------------------------------------------------- 1 | asyncua 2 | quixstreams==3.8.1 -------------------------------------------------------------------------------- /resources/python/sources/confluent_kafka/requirements.txt: -------------------------------------------------------------------------------- 1 | quixstreams==2.9.0 2 | python-dotenv -------------------------------------------------------------------------------- /resources/python/sources/starter_source/requirements.txt: -------------------------------------------------------------------------------- 1 | quixstreams==3.14.1 2 | python-dotenv -------------------------------------------------------------------------------- /resources/python/destinations/flet-waveform/requirements.txt: -------------------------------------------------------------------------------- 1 | flet 2 | quixstreams 3 | python-dotenv -------------------------------------------------------------------------------- /resources/python/sources/flet-input-form/requirements.txt: -------------------------------------------------------------------------------- 1 | flet 2 | quixstreams 3 | python-dotenv -------------------------------------------------------------------------------- /resources/python/sources/s3_source/requirements.txt: -------------------------------------------------------------------------------- 1 | quixstreams[s3]==3.7.0 2 | python-dotenv 3 | -------------------------------------------------------------------------------- /resources/python/destinations/TDengine/requirements.txt: -------------------------------------------------------------------------------- 1 | quixstreams[tdengine]==3.22.0 2 | python-dotenv -------------------------------------------------------------------------------- /resources/python/destinations/confluent_kafka/requirements.txt: -------------------------------------------------------------------------------- 1 | quixstreams==2.9.0 2 | python-dotenv 3 | -------------------------------------------------------------------------------- /resources/python/destinations/mongodb/requirements.txt: -------------------------------------------------------------------------------- 1 | quixstreams[mongodb]==3.14.1 2 | python-dotenv -------------------------------------------------------------------------------- /resources/python/destinations/redis_dest/requirements.txt: -------------------------------------------------------------------------------- 1 | quixstreams==2.9.0 2 | redis 3 | python-dotenv -------------------------------------------------------------------------------- /resources/python/destinations/starter_destination/requirements.txt: -------------------------------------------------------------------------------- 1 | quixstreams==3.13.1 2 | python-dotenv -------------------------------------------------------------------------------- /resources/python/sources/MQTT/requirements.txt: -------------------------------------------------------------------------------- 1 | quixstreams==2.9.0 2 | paho-mqtt==2.1.0 3 | python-dotenv -------------------------------------------------------------------------------- /resources/python/sources/demo_data/requirements.txt: -------------------------------------------------------------------------------- 1 | quixstreams[pandas]==3.8.1 2 | python-dotenv 3 | -------------------------------------------------------------------------------- /resources/python/sources/simple_csv/requirements.txt: -------------------------------------------------------------------------------- 1 | quixstreams==2.9.0 2 | python-dotenv 3 | pandas -------------------------------------------------------------------------------- /resources/python/transformations/event_detection/requirements.txt: -------------------------------------------------------------------------------- 1 | quixstreams==3.2.1 2 | python-dotenv -------------------------------------------------------------------------------- /resources/python/transformations/matlab_runner/requirements.txt: -------------------------------------------------------------------------------- 1 | quixstreams==3.9.0 2 | python-dotenv -------------------------------------------------------------------------------- /resources/python/destinations/MQTT/requirements.txt: -------------------------------------------------------------------------------- 1 | quixstreams==2.9.0 2 | paho-mqtt==2.1.0 3 | python-dotenv -------------------------------------------------------------------------------- /resources/python/destinations/influxdb_3/requirements.txt: -------------------------------------------------------------------------------- 1 | quixstreams[influxdb3]==3.16.1 2 | python-dotenv -------------------------------------------------------------------------------- /resources/python/others/streamlit/requirements.txt: -------------------------------------------------------------------------------- 1 | streamlit 2 | websockets 3 | pandas 4 | plotly 5 | asyncio -------------------------------------------------------------------------------- /resources/python/sources/hivemq/requirements.txt: -------------------------------------------------------------------------------- 1 | quixstreams==2.9.0 2 | paho-mqtt==2.1.0 3 | python-dotenv -------------------------------------------------------------------------------- /resources/python/destinations/hivemq/requirements.txt: -------------------------------------------------------------------------------- 1 | quixstreams==2.9.0 2 | paho-mqtt==2.1.0 3 | python-dotenv -------------------------------------------------------------------------------- /resources/python/destinations/postgres/requirements.txt: -------------------------------------------------------------------------------- 1 | quixstreams[postgresql]==3.20.0 2 | python-dotenv 3 | -------------------------------------------------------------------------------- /resources/python/sources/postgres_cdc/requirements.txt: -------------------------------------------------------------------------------- 1 | quixstreams==2.9.0 2 | psycopg2-binary 3 | python-dotenv -------------------------------------------------------------------------------- /resources/python/transformations/starter_transformation/requirements.txt: -------------------------------------------------------------------------------- 1 | quixstreams==3.14.1 2 | python-dotenv -------------------------------------------------------------------------------- /resources/python/destinations/elasticsearch/requirements.txt: -------------------------------------------------------------------------------- 1 | quixstreams[elasticsearch]==3.14.1 2 | python-dotenv 3 | -------------------------------------------------------------------------------- /resources/python/destinations/slack_notifications/requirements.txt: -------------------------------------------------------------------------------- 1 | quixstreams==2.9.0 2 | requests 3 | python-dotenv -------------------------------------------------------------------------------- /resources/python/sources/influxdb_3/requirements.txt: -------------------------------------------------------------------------------- 1 | quixstreams==2.9.0 2 | influxdb3-python==0.3.6 3 | python-dotenv -------------------------------------------------------------------------------- /resources/python/destinations/s3-iceberg-destination/requirements.txt: -------------------------------------------------------------------------------- 1 | quixstreams[iceberg_aws]==3.1.0 2 | python-dotenv 3 | -------------------------------------------------------------------------------- /resources/python/destinations/websocket/requirements.txt: -------------------------------------------------------------------------------- 1 | quixstreams>=3.0 2 | python-dotenv 3 | websockets>=14.0 4 | asyncio -------------------------------------------------------------------------------- /resources/python/sources/influxdb_2/requirements.txt: -------------------------------------------------------------------------------- 1 | quixstreams==2.9.0 2 | influxdb-client==1.39.0 3 | pandas 4 | python-dotenv -------------------------------------------------------------------------------- /resources/python/sources/redis_source/requirements.txt: -------------------------------------------------------------------------------- 1 | quixstreams==2.9.0 2 | pandas 3 | redis 4 | python-dotenv 5 | 6 | 7 | -------------------------------------------------------------------------------- /resources/python/sources/sql_cdc/requirements.txt: -------------------------------------------------------------------------------- 1 | quixstreams==2.9.0 2 | pyodbc 3 | requests 4 | SQLAlchemy 5 | python-dotenv -------------------------------------------------------------------------------- /resources/python/sources/wikipedia-source-claude-coded/requirements.txt: -------------------------------------------------------------------------------- 1 | quixstreams==3.22.0 2 | python-dotenv 3 | requests-sse -------------------------------------------------------------------------------- /resources/python/sources/segment_webhook/requirements.txt: -------------------------------------------------------------------------------- 1 | quixstreams==2.9.0 2 | flask 3 | requests 4 | waitress 5 | python-dotenv -------------------------------------------------------------------------------- /resources/python/sources/snowplow_source/requirements.txt: -------------------------------------------------------------------------------- 1 | quixstreams==2.9.0 2 | boto3 3 | snowplow_analytics_sdk 4 | python-dotenv -------------------------------------------------------------------------------- /docs/images/sink_deployments.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quixio/klaus-kode-agentic-integrator/HEAD/docs/images/sink_deployments.png -------------------------------------------------------------------------------- /docs/images/source_deployment.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quixio/klaus-kode-agentic-integrator/HEAD/docs/images/source_deployment.png -------------------------------------------------------------------------------- /resources/python/destinations/clickhouse-sink-claude-coded/requirements.txt: -------------------------------------------------------------------------------- 1 | quixstreams==3.22.0 2 | python-dotenv 3 | clickhouse-connect -------------------------------------------------------------------------------- /resources/python/sources/coinbase_websocket_tutorial_source/requirements.txt: -------------------------------------------------------------------------------- 1 | python-dateutil 2 | websockets 3 | quixstreams~=3.4.0 4 | -------------------------------------------------------------------------------- /resources/python/transformations/hugging_face_model/requirements.txt: -------------------------------------------------------------------------------- 1 | quixstreams==2.9.0 2 | transformers 3 | tensorflow 4 | python-dotenv -------------------------------------------------------------------------------- /docs/images/sink_workflow_steps.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quixio/klaus-kode-agentic-integrator/HEAD/docs/images/sink_workflow_steps.png -------------------------------------------------------------------------------- /resources/python/sources/environment_source/requirements.txt: -------------------------------------------------------------------------------- 1 | git+https://github.com/quixio/quix-streams.git@alpha/kafka-source 2 | python-dotenv -------------------------------------------------------------------------------- /docs/images/klauskode_splashscreen.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quixio/klaus-kode-agentic-integrator/HEAD/docs/images/klauskode_splashscreen.png -------------------------------------------------------------------------------- /docs/images/source_workflow_steps.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quixio/klaus-kode-agentic-integrator/HEAD/docs/images/source_workflow_steps.png -------------------------------------------------------------------------------- /resources/python/sources/MQTT/icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quixio/klaus-kode-agentic-integrator/HEAD/resources/python/sources/MQTT/icon.png -------------------------------------------------------------------------------- /resources/python/sources/hivemq/icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quixio/klaus-kode-agentic-integrator/HEAD/resources/python/sources/hivemq/icon.png -------------------------------------------------------------------------------- /resources/python/sources/http_api_sample/requirements.txt: -------------------------------------------------------------------------------- 1 | quixstreams==3.2.1 2 | flask 3 | flask_cors 4 | flasgger==0.9.7b2 5 | waitress 6 | python-dotenv -------------------------------------------------------------------------------- /resources/python/sources/http_api_source/requirements.txt: -------------------------------------------------------------------------------- 1 | quixstreams==3.2.1 2 | flask 3 | flask_cors 4 | flasgger==0.9.7b2 5 | waitress 6 | python-dotenv -------------------------------------------------------------------------------- /resources/python/destinations/MQTT/icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quixio/klaus-kode-agentic-integrator/HEAD/resources/python/destinations/MQTT/icon.png -------------------------------------------------------------------------------- /resources/python/others/jupyterlab/icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quixio/klaus-kode-agentic-integrator/HEAD/resources/python/others/jupyterlab/icon.png -------------------------------------------------------------------------------- /resources/python/others/streamlit/icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quixio/klaus-kode-agentic-integrator/HEAD/resources/python/others/streamlit/icon.png -------------------------------------------------------------------------------- /resources/python/sources/s3_source/icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quixio/klaus-kode-agentic-integrator/HEAD/resources/python/sources/s3_source/icon.png -------------------------------------------------------------------------------- /resources/python/sources/sql_cdc/icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quixio/klaus-kode-agentic-integrator/HEAD/resources/python/sources/sql_cdc/icon.png -------------------------------------------------------------------------------- /resources/python/destinations/hivemq/icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quixio/klaus-kode-agentic-integrator/HEAD/resources/python/destinations/hivemq/icon.png -------------------------------------------------------------------------------- /resources/python/sources/influxdb_2/icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quixio/klaus-kode-agentic-integrator/HEAD/resources/python/sources/influxdb_2/icon.png -------------------------------------------------------------------------------- /resources/python/sources/influxdb_3/icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quixio/klaus-kode-agentic-integrator/HEAD/resources/python/sources/influxdb_3/icon.png -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | ANTHROPIC_API_KEY= 2 | QUIX_TOKEN= 3 | QUIX_BASE_URL=https://portal-api.cloud.quix.io -------------------------------------------------------------------------------- /resources/python/destinations/TDengine/icon.webp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quixio/klaus-kode-agentic-integrator/HEAD/resources/python/destinations/TDengine/icon.webp -------------------------------------------------------------------------------- /resources/python/destinations/big_query/icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quixio/klaus-kode-agentic-integrator/HEAD/resources/python/destinations/big_query/icon.png -------------------------------------------------------------------------------- /resources/python/destinations/mongodb/icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quixio/klaus-kode-agentic-integrator/HEAD/resources/python/destinations/mongodb/icon.png -------------------------------------------------------------------------------- /resources/python/destinations/postgres/icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quixio/klaus-kode-agentic-integrator/HEAD/resources/python/destinations/postgres/icon.png -------------------------------------------------------------------------------- /resources/python/destinations/websocket/icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quixio/klaus-kode-agentic-integrator/HEAD/resources/python/destinations/websocket/icon.png -------------------------------------------------------------------------------- /resources/python/others/streamlit/screenshot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quixio/klaus-kode-agentic-integrator/HEAD/resources/python/others/streamlit/screenshot.png -------------------------------------------------------------------------------- /resources/python/sources/opc_ua_client/icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quixio/klaus-kode-agentic-integrator/HEAD/resources/python/sources/opc_ua_client/icon.png -------------------------------------------------------------------------------- /resources/python/sources/postgres_cdc/icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quixio/klaus-kode-agentic-integrator/HEAD/resources/python/sources/postgres_cdc/icon.png -------------------------------------------------------------------------------- /resources/python/sources/redis_source/icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quixio/klaus-kode-agentic-integrator/HEAD/resources/python/sources/redis_source/icon.png -------------------------------------------------------------------------------- /resources/python/destinations/influxdb_3/icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quixio/klaus-kode-agentic-integrator/HEAD/resources/python/destinations/influxdb_3/icon.png -------------------------------------------------------------------------------- /resources/python/destinations/redis_dest/icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quixio/klaus-kode-agentic-integrator/HEAD/resources/python/destinations/redis_dest/icon.png -------------------------------------------------------------------------------- /resources/python/sources/confluent_kafka/icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quixio/klaus-kode-agentic-integrator/HEAD/resources/python/sources/confluent_kafka/icon.png -------------------------------------------------------------------------------- /resources/python/sources/flet-input-form/icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quixio/klaus-kode-agentic-integrator/HEAD/resources/python/sources/flet-input-form/icon.png -------------------------------------------------------------------------------- /resources/python/sources/http_api_source/icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quixio/klaus-kode-agentic-integrator/HEAD/resources/python/sources/http_api_source/icon.png -------------------------------------------------------------------------------- /resources/python/sources/segment_webhook/icon.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quixio/klaus-kode-agentic-integrator/HEAD/resources/python/sources/segment_webhook/icon.jpg -------------------------------------------------------------------------------- /resources/python/sources/snowplow_source/icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quixio/klaus-kode-agentic-integrator/HEAD/resources/python/sources/snowplow_source/icon.png -------------------------------------------------------------------------------- /resources/python/destinations/elasticsearch/icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quixio/klaus-kode-agentic-integrator/HEAD/resources/python/destinations/elasticsearch/icon.png -------------------------------------------------------------------------------- /resources/python/destinations/flet-waveform/icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quixio/klaus-kode-agentic-integrator/HEAD/resources/python/destinations/flet-waveform/icon.png -------------------------------------------------------------------------------- /resources/python/sources/environment_source/icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quixio/klaus-kode-agentic-integrator/HEAD/resources/python/sources/environment_source/icon.png -------------------------------------------------------------------------------- /resources/python/destinations/confluent_kafka/icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quixio/klaus-kode-agentic-integrator/HEAD/resources/python/destinations/confluent_kafka/icon.png -------------------------------------------------------------------------------- /resources/python/destinations/slack_notifications/icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quixio/klaus-kode-agentic-integrator/HEAD/resources/python/destinations/slack_notifications/icon.png -------------------------------------------------------------------------------- /resources/python/destinations/s3-iceberg-destination/icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quixio/klaus-kode-agentic-integrator/HEAD/resources/python/destinations/s3-iceberg-destination/icon.png -------------------------------------------------------------------------------- /resources/python/sources/sql_cdc/.gitignore: -------------------------------------------------------------------------------- 1 | **/ref/ 2 | **/obj/ 3 | **/bin/ 4 | DotSettings.user 5 | **DotSettings.user 6 | ca.cert 7 | .idea/ 8 | __pycache__/ 9 | certificates/ 10 | state/ -------------------------------------------------------------------------------- /resources/_demo-clickhouse-db/app.yaml: -------------------------------------------------------------------------------- 1 | name: Clickhouse Internal 2 | language: python 3 | dockerfile: dockerfile 4 | runEntryPoint: main.py 5 | defaultFile: main.py 6 | libraryItemId: starter-transformation 7 | -------------------------------------------------------------------------------- /resources/python/others/jupyterlab/requirements.txt: -------------------------------------------------------------------------------- 1 | quixstreams==3.14.1 2 | python-dotenv 3 | jupyterlab 4 | notebook 5 | numpy 6 | pandas 7 | matplotlib 8 | seaborn 9 | scikit-learn 10 | scipy 11 | bcrypt -------------------------------------------------------------------------------- /prompts/tasks/sink_schema_analysis.md: -------------------------------------------------------------------------------- 1 | Here is a sample of messages from a Kafka topic. Please analyze the data structure and provide a clear, human-readable markdown description of the schema. 2 | 3 | Full Data Sample: 4 | ```json 5 | {data_sample} 6 | ``` -------------------------------------------------------------------------------- /resources/python/destinations/big_query/requirements.txt: -------------------------------------------------------------------------------- 1 | --extra-index-url https://pkgs.dev.azure.com/quix-analytics/53f7fe95-59fe-4307-b479-2473b96de6d1/_packaging/public/pypi/simple/ 2 | quixstreams==2.9.0a 3 | google-cloud-bigquery 4 | python-dotenv 5 | -------------------------------------------------------------------------------- /workflow_tools/integrations/__init__.py: -------------------------------------------------------------------------------- 1 | # integrations/__init__.py 2 | """External system integrations.""" 3 | 4 | from .quix_tools import * 5 | from .credential_mapper import * 6 | from .credentials_parser import * 7 | from .deployment_monitoring import * -------------------------------------------------------------------------------- /resources/python/transformations/matlab_runner/main.py: -------------------------------------------------------------------------------- 1 | import matlab.engine 2 | 3 | print("Starting matlab") 4 | 5 | eng = matlab.engine.start_matlab() 6 | 7 | print("Calculate square root") 8 | output = eng.sqrt(8.0) 9 | 10 | print("Result:") 11 | print (output) -------------------------------------------------------------------------------- /resources/_demo-clickhouse-db/dockerfile: -------------------------------------------------------------------------------- 1 | FROM clickhouse/clickhouse-server:23.3 2 | 3 | ENV DEBIAN_FRONTEND=noninteractive 4 | RUN mkdir -p /app 5 | WORKDIR /app 6 | 7 | ENV CLICKHOUSE_USER=clickadmin 8 | ENV CLICKHOUSE_PASSWORD=clickpass 9 | 10 | EXPOSE 8123 11 | EXPOSE 9000 -------------------------------------------------------------------------------- /resources/python/readme.md: -------------------------------------------------------------------------------- 1 | # Python samples 2 | 3 | Samples for read, write and models. 4 | 5 | ## Develop locally 6 | 7 | Whether you're on Windows, Linux, Mac or Docker, we've got you covered. Check out the readme.md file for your environment [here](local-development/) to install the dependencies and get up and running. -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | openai-agents[litellm]==0.2.3 2 | openai<=1.97.1 # Pin to compatible version due to breaking changes in newer versions 3 | claude-code-sdk 4 | pandas 5 | tabulate 6 | python-dotenv 7 | pyyaml 8 | requests 9 | anyio 10 | backoff 11 | rich>=13.0.0 12 | questionary>=2.0.0 13 | nest_asyncio>=1.5.0 14 | keyboard>=0.13.5 # For interruption mechanism 15 | -------------------------------------------------------------------------------- /workflow_tools/core/__init__.py: -------------------------------------------------------------------------------- 1 | # core/__init__.py 2 | """Core utilities and management.""" 3 | 4 | from .config_loader import * 5 | from .prompt_manager import PromptManager 6 | from .triage_agent import TriageAgent 7 | from .placeholder_workflows import PlaceholderWorkflowFactory 8 | from .error_handler import * 9 | from .logger_service import * 10 | from .interfaces import * -------------------------------------------------------------------------------- /workflow_tools/phases/shared/__init__.py: -------------------------------------------------------------------------------- 1 | # Shared components used by both sink and source workflows 2 | from .phase_deployment import DeploymentPhase 3 | from .phase_monitoring import MonitoringPhase 4 | 5 | # Knowledge components (moved from knowledge directory) 6 | from .env_var_management import EnvVarManager 7 | from .app_management import AppManager 8 | from .cache_utils import CacheUtils -------------------------------------------------------------------------------- /resources/python/destinations/big_query/setup_logger.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | # Set up logger 4 | PROD_ENV = False 5 | logger = logging.getLogger("BigQuery Sink") 6 | logging.basicConfig() 7 | 8 | if PROD_ENV: 9 | logger.setLevel(logging.INFO) 10 | logger.info("Running in Production Mode...") 11 | else: 12 | logger.setLevel(logging.DEBUG) 13 | logger.info("Running in Debug Mode...") -------------------------------------------------------------------------------- /resources/python/sources/postgres_cdc/setup_logger.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | # Set up logger 4 | PROD_ENV = False 5 | logger = logging.getLogger("Postgres CDC") 6 | logging.basicConfig() 7 | 8 | if PROD_ENV: 9 | logger.setLevel(logging.INFO) 10 | logger.info("Running in Production Mode...") 11 | else: 12 | logger.setLevel(logging.DEBUG) 13 | logger.info("Running in Debug Mode...") -------------------------------------------------------------------------------- /prompts/tasks/sink_schema_analysis_retry.md: -------------------------------------------------------------------------------- 1 | I previously analyzed this Kafka topic data and provided this analysis: 2 | 3 | ```markdown 4 | {previous_analysis} 5 | ``` 6 | 7 | However, the user has provided the following correction/feedback: 8 | "{user_feedback}" 9 | 10 | Please provide an updated and corrected schema analysis based on this feedback. 11 | 12 | Original Data Sample: 13 | ```json 14 | {data_sample} 15 | ``` -------------------------------------------------------------------------------- /resources/python/sources/http_api_sample/library.json: -------------------------------------------------------------------------------- 1 | { 2 | "libraryItemId": "http-api-sample", 3 | "name": "HTTP API Sample", 4 | "language": "Python", 5 | "IsHighlighted": true, 6 | "DisplayOrder": 6, 7 | "tags": { 8 | "Complexity": ["Easy"] 9 | }, 10 | "shortDescription": "Run a Flask HTTP API.", 11 | "DefaultFile": "main.py", 12 | "EntryPoint": "dockerfile", 13 | "RunEntryPoint": "main.py", 14 | "Variables": [] 15 | } 16 | -------------------------------------------------------------------------------- /config/local.yaml.example: -------------------------------------------------------------------------------- 1 | # Local Configuration Example 2 | # Copy this file to config/local.yaml and customize for your installation 3 | # config/local.yaml is ignored by git and won't be committed 4 | 5 | # Claude CLI installation path (optional - will auto-detect if not specified) 6 | # Uncomment and set to your Claude CLI installation path if auto-detection fails 7 | # claude_cli_path: /home/yourusername/.claude/local/node_modules/.bin 8 | 9 | # You can also set this via environment variable: 10 | # export CLAUDE_CLI_PATH=/path/to/claude/cli/bin -------------------------------------------------------------------------------- /resources/python/others/streamlit/dockerfile: -------------------------------------------------------------------------------- 1 | # Use an official Python runtime as a parent image 2 | FROM python:3.12-slim 3 | 4 | # Set the working directory in the container 5 | WORKDIR /app 6 | 7 | # Copy the current directory contents into the container at /app 8 | COPY . /app 9 | 10 | # Install required packages 11 | RUN pip install --no-cache-dir -r requirements.txt 12 | 13 | # Expose the port that Streamlit uses 14 | EXPOSE 80 15 | 16 | # Run the Streamlit app 17 | CMD ["streamlit", "run", "main.py", "--server.port=80", "--server.enableCORS=false", "--server.address=0.0.0.0"] -------------------------------------------------------------------------------- /resources/python/transformations/matlab_runner/dockerfile: -------------------------------------------------------------------------------- 1 | FROM mathworks/matlab:r2023b 2 | 3 | USER root 4 | 5 | # Install Python 6 | RUN apt-get update && apt-get install -y \ 7 | python3.10 python3.10-dev python3.10-venv 8 | 9 | USER matlab 10 | 11 | ENV MW_LICENSING_DIAGNOSTICS=1 12 | 13 | ENV MLM_LICENSE_FILE=27000@your-license-server 14 | 15 | # Add MATLAB Runtime 16 | ENV LD_LIBRARY_PATH=/opt/matlab/R2024b/bin/glnxa64:$LD_LIBRARY_PATH 17 | 18 | # Install the MATLAB Python Engine 19 | RUN pip3 install matlabengine==23.2.3 20 | 21 | COPY main.py /app/main.py 22 | 23 | ENTRYPOINT ["python", "/app/main.py"] -------------------------------------------------------------------------------- /workflow_tools/phases/diagnose/__init__.py: -------------------------------------------------------------------------------- 1 | # __init__.py - Diagnose workflow phases 2 | 3 | from .phase_diagnose_app_selection import DiagnoseAppSelectionPhase 4 | from .phase_diagnose_app_download import DiagnoseAppDownloadPhase 5 | from .phase_diagnose_edit import DiagnoseEditPhase 6 | from .phase_diagnose_sandbox import DiagnoseSandboxPhase 7 | from .phase_diagnose_deployment_sync import DiagnoseDeploymentSyncPhase 8 | 9 | __all__ = [ 10 | 'DiagnoseAppSelectionPhase', 11 | 'DiagnoseAppDownloadPhase', 12 | 'DiagnoseEditPhase', 13 | 'DiagnoseSandboxPhase', 14 | 'DiagnoseDeploymentSyncPhase' 15 | ] -------------------------------------------------------------------------------- /prompts/tasks/claude_code_system_prompt.md: -------------------------------------------------------------------------------- 1 | 2 | You are an experienced senior data engineer who is helping helping to create a {workflow_type} application in Python. Your job is to help end users get data out different systems, process it and send it to other systems. The user has already set up a starter template and wants you to modify it. 3 | 4 | 5 | 6 | You are working from the main workflow directory. The application files are located in: {app_path} 7 | Always read and write files using the full relative path from your current directory (e.g., {app_path}/main.py, {app_path}/requirements.txt, etc.) 8 | -------------------------------------------------------------------------------- /resources/python/others/streamlit/library.json: -------------------------------------------------------------------------------- 1 | { 2 | "libraryItemId": "streamlit", 3 | "name": "Streamlit Dashboard", 4 | "language": "Python", 5 | "IconFile": "icon.png", 6 | "tags": { 7 | "Complexity": ["Medium"], 8 | "Technology": ["Quix Streams", "Streamlit"] 9 | }, 10 | "shortDescription": "Run a Streamlit real-time dashboard that displays data from database.", 11 | "longDescription": "Run a Streamlit real-time dashboard that displays data from database. In the example there is InfluxDb2 example.", 12 | "DefaultFile": "main.py", 13 | "EntryPoint": "dockerfile", 14 | "RunEntryPoint": "main.py", 15 | "Variables": [ 16 | ] 17 | } 18 | -------------------------------------------------------------------------------- /prompts/tasks/claude_code_debug_system_prompt.md: -------------------------------------------------------------------------------- 1 | 2 | You are an experienced senior data engineer who has been asked to debug a Python data processing application that encountered errors. You MUST fix the issues by EDITING at least one of the code files, environment variables and/or dependencies. 3 | 4 | 5 | 6 | - You are working from the main workflow directory. The application files are located in: {app_path} 7 | - Always read and write files using the full relative path from your current directory (e.g., {app_path}/main.py, {app_path}/requirements.txt, etc.) 8 | - **YOU MUST USE THE EDIT TOOL TO ACTUALLY FIX THE FILES - DO NOT JUST READ AND ANALYZE THEM!** 9 | -------------------------------------------------------------------------------- /prompts/agents/SourceSchemaAnalyzerAgent.md: -------------------------------------------------------------------------------- 1 | 2 | You are an expert data analyst specializing in source data schema analysis. 3 | 4 | 5 | 6 | You will be given sample data from an external source system and need to analyze it comprehensively. 7 | 8 | 9 | 10 | 1. Analyze the structure and format of the incoming data 11 | 2. Identify data types, patterns, and key fields 12 | 3. Create comprehensive schema documentation 13 | 4. Suggest appropriate Kafka message structure for this data - this must be in JSON format. 14 | 5. Identify any data transformation needs 15 | 16 | 17 | 18 | Focus on creating clear, detailed schema documentation that will help with code generation. 19 | -------------------------------------------------------------------------------- /prompts/tasks/source_schema_analysis.md: -------------------------------------------------------------------------------- 1 | Analyze the following sample data from {technology_name}: 2 | 3 | Sample Data: 4 | {sample_data} 5 | 6 | Please provide a comprehensive analysis including: 7 | 8 | 1. **Data Structure**: Describe the overall format and structure 9 | 2. **Field Analysis**: List all fields/columns with their data types 10 | 3. **Data Patterns**: Identify any patterns, ranges, or constraints 11 | 4. **Key Fields**: Identify primary keys, timestamps, or important identifiers 12 | 5. **Data Quality**: Note any missing values, inconsistencies, or issues 13 | 6. **Kafka Message Format**: Suggest how this data should be structured as Kafka messages in JSON format. 14 | 7. **Transformation Needs**: Identify any data transformations that might be needed 15 | 16 | Focus on providing actionable insights for code generation. -------------------------------------------------------------------------------- /resources/python/destinations/s3-iceberg-destination/dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.11.1-slim-buster 2 | 3 | # Set environment variables to non-interactive and unbuffered output 4 | ENV DEBIAN_FRONTEND=noninteractive \ 5 | PYTHONUNBUFFERED=1 \ 6 | PYTHONIOENCODING=UTF-8 7 | 8 | # Set the working directory inside the container 9 | WORKDIR /app 10 | 11 | # Copy only the requirements file(s) to leverage Docker cache 12 | # Assuming all requirements files are in the root or subdirectories 13 | COPY ./requirements.txt ./ 14 | 15 | # Install dependencies 16 | # Adding `--no-cache-dir` to avoid storing unnecessary files and potentially reduce image size 17 | RUN pip install --no-cache-dir -r requirements.txt 18 | 19 | # Copy the rest of the application 20 | COPY . . 21 | 22 | # Set the command to run your application 23 | ENTRYPOINT ["python3", "main.py"] -------------------------------------------------------------------------------- /resources/python/sources/http_api_sample/setup_logging.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | 4 | def get_logger(): 5 | 6 | logging.basicConfig( 7 | level=logging.DEBUG, 8 | format='[%(asctime)s] [%(levelname)s]: %(message)s', 9 | datefmt='%Y-%m-%d %H:%M:%S' 10 | ) 11 | 12 | # Set up logging 13 | logger = logging.getLogger('waitress') 14 | logger.setLevel(logging.DEBUG) # Set to DEBUG for more detailed output 15 | logger.propagate = False # Prevent the log messages from propagating to the root logger 16 | 17 | # Create handlers (console and file handler for example) 18 | console_handler = logging.StreamHandler() 19 | formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') 20 | console_handler.setFormatter(formatter) 21 | logger.addHandler(console_handler) 22 | 23 | return logger -------------------------------------------------------------------------------- /resources/python/sources/http_api_source/setup_logging.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | 4 | def get_logger(): 5 | 6 | logging.basicConfig( 7 | level=logging.DEBUG, 8 | format='[%(asctime)s] [%(levelname)s]: %(message)s', 9 | datefmt='%Y-%m-%d %H:%M:%S' 10 | ) 11 | 12 | # Set up logging 13 | logger = logging.getLogger('waitress') 14 | logger.setLevel(logging.DEBUG) # Set to DEBUG for more detailed output 15 | logger.propagate = False # Prevent the log messages from propagating to the root logger 16 | 17 | # Create handlers (console and file handler for example) 18 | console_handler = logging.StreamHandler() 19 | formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') 20 | console_handler.setFormatter(formatter) 21 | logger.addHandler(console_handler) 22 | 23 | return logger -------------------------------------------------------------------------------- /workflow_tools/services/__init__.py: -------------------------------------------------------------------------------- 1 | """Service layer for workflow tools.""" 2 | 3 | from .data_specification_collector import DataSpecificationCollector 4 | from .dependency_parser import DependencyParser 5 | from .debug_analyzer import DebugAnalyzer 6 | from .file_manager import FileManager 7 | from .knowledge_gatherer import KnowledgeGatheringService 8 | from .log_analyzer import LogAnalyzer 9 | from .requirements_updater import RequirementsUpdater 10 | from .sandbox_error_handler import SandboxErrorHandler 11 | from .runner_utils import run_agent_with_retry, run_agent_with_fallback 12 | 13 | __all__ = [ 14 | 'DataSpecificationCollector', 15 | 'DependencyParser', 16 | 'DebugAnalyzer', 17 | 'FileManager', 18 | 'KnowledgeGatheringService', 19 | 'LogAnalyzer', 20 | 'RequirementsUpdater', 21 | 'SandboxErrorHandler', 22 | 'run_agent_with_retry', 23 | 'run_agent_with_fallback' 24 | ] -------------------------------------------------------------------------------- /resources/python/destinations/starter_destination/library.json: -------------------------------------------------------------------------------- 1 | { 2 | "libraryItemId": "starter-destination", 3 | "name": "Starter Sink", 4 | "language": "Python", 5 | "IsHighlighted": true, 6 | "DisplayOrder": 3, 7 | "tags": { 8 | "Complexity": ["Easy"], 9 | "Technology": ["Quix Streams"], 10 | "Pipeline Stage": ["Destination"], 11 | "Popular Subjects": ["Quick Start"], 12 | "Type": ["Basic templates"] 13 | }, 14 | "shortDescription": "Consume data from a Kafka topic and sink it to any destination that you configure.", 15 | "DefaultFile": "main.py", 16 | "EntryPoint": "dockerfile", 17 | "RunEntryPoint": "main.py", 18 | "Variables": [ 19 | { 20 | "Name": "input", 21 | "Type": "EnvironmentVariable", 22 | "InputType": "InputTopic", 23 | "Description": "Name of the input topic to listen to.", 24 | "DefaultValue": "transform" 25 | } 26 | ] 27 | } 28 | -------------------------------------------------------------------------------- /resources/python/destinations/starter_destination/dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.12.5-slim-bookworm 2 | 3 | # Set environment variables for non-interactive setup and unbuffered output 4 | ENV DEBIAN_FRONTEND=noninteractive \ 5 | PYTHONUNBUFFERED=1 \ 6 | PYTHONIOENCODING=UTF-8 \ 7 | PYTHONPATH="/app" 8 | 9 | # Build argument for setting the main app path 10 | ARG MAINAPPPATH=. 11 | 12 | # Set working directory inside the container 13 | WORKDIR /app 14 | 15 | # Copy requirements to leverage Docker cache 16 | COPY "${MAINAPPPATH}/requirements.txt" "${MAINAPPPATH}/requirements.txt" 17 | 18 | # Install dependencies without caching 19 | RUN pip install --no-cache-dir -r "${MAINAPPPATH}/requirements.txt" 20 | 21 | # Copy entire application into container 22 | COPY . . 23 | 24 | # Set working directory to main app path 25 | WORKDIR "/app/${MAINAPPPATH}" 26 | 27 | # Define the container's startup command 28 | ENTRYPOINT ["python3", "main.py"] -------------------------------------------------------------------------------- /resources/python/sources/MQTT/dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.12.5-slim-bookworm 2 | 3 | # Set environment variables for non-interactive setup and unbuffered output 4 | ENV DEBIAN_FRONTEND=noninteractive \ 5 | PYTHONUNBUFFERED=1 \ 6 | PYTHONIOENCODING=UTF-8 \ 7 | PYTHONPATH="/app" 8 | 9 | # Build argument for setting the main app path 10 | ARG MAINAPPPATH=. 11 | 12 | # Set working directory inside the container 13 | WORKDIR /app 14 | 15 | # Copy requirements to leverage Docker cache 16 | COPY "${MAINAPPPATH}/requirements.txt" "${MAINAPPPATH}/requirements.txt" 17 | 18 | # Install dependencies without caching 19 | RUN pip install --no-cache-dir -r "${MAINAPPPATH}/requirements.txt" 20 | 21 | # Copy entire application into container 22 | COPY . . 23 | 24 | # Set working directory to main app path 25 | WORKDIR "/app/${MAINAPPPATH}" 26 | 27 | # Define the container's startup command 28 | ENTRYPOINT ["python3", "main.py"] -------------------------------------------------------------------------------- /resources/python/destinations/MQTT/dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.12.5-slim-bookworm 2 | 3 | # Set environment variables for non-interactive setup and unbuffered output 4 | ENV DEBIAN_FRONTEND=noninteractive \ 5 | PYTHONUNBUFFERED=1 \ 6 | PYTHONIOENCODING=UTF-8 \ 7 | PYTHONPATH="/app" 8 | 9 | # Build argument for setting the main app path 10 | ARG MAINAPPPATH=. 11 | 12 | # Set working directory inside the container 13 | WORKDIR /app 14 | 15 | # Copy requirements to leverage Docker cache 16 | COPY "${MAINAPPPATH}/requirements.txt" "${MAINAPPPATH}/requirements.txt" 17 | 18 | # Install dependencies without caching 19 | RUN pip install --no-cache-dir -r "${MAINAPPPATH}/requirements.txt" 20 | 21 | # Copy entire application into container 22 | COPY . . 23 | 24 | # Set working directory to main app path 25 | WORKDIR "/app/${MAINAPPPATH}" 26 | 27 | # Define the container's startup command 28 | ENTRYPOINT ["python3", "main.py"] -------------------------------------------------------------------------------- /resources/python/destinations/clickhouse-sink-claude-coded/dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.12.5-slim-bookworm 2 | 3 | # Set environment variables for non-interactive setup and unbuffered output 4 | ENV DEBIAN_FRONTEND=noninteractive \ 5 | PYTHONUNBUFFERED=1 \ 6 | PYTHONIOENCODING=UTF-8 \ 7 | PYTHONPATH="/app" 8 | 9 | # Build argument for setting the main app path 10 | ARG MAINAPPPATH=. 11 | 12 | # Set working directory inside the container 13 | WORKDIR /app 14 | 15 | # Copy requirements to leverage Docker cache 16 | COPY "${MAINAPPPATH}/requirements.txt" "${MAINAPPPATH}/requirements.txt" 17 | 18 | # Install dependencies without caching 19 | RUN pip install --no-cache-dir -r "${MAINAPPPATH}/requirements.txt" 20 | 21 | # Copy entire application into container 22 | COPY . . 23 | 24 | # Set working directory to main app path 25 | WORKDIR "/app/${MAINAPPPATH}" 26 | 27 | # Define the container's startup command 28 | ENTRYPOINT ["python3", "main.py"] -------------------------------------------------------------------------------- /resources/python/destinations/mongodb/dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.12.5-slim-bookworm 2 | 3 | # Set environment variables for non-interactive setup and unbuffered output 4 | ENV DEBIAN_FRONTEND=noninteractive \ 5 | PYTHONUNBUFFERED=1 \ 6 | PYTHONIOENCODING=UTF-8 \ 7 | PYTHONPATH="/app" 8 | 9 | # Build argument for setting the main app path 10 | ARG MAINAPPPATH=. 11 | 12 | # Set working directory inside the container 13 | WORKDIR /app 14 | 15 | # Copy requirements to leverage Docker cache 16 | COPY "${MAINAPPPATH}/requirements.txt" "${MAINAPPPATH}/requirements.txt" 17 | 18 | # Install dependencies without caching 19 | RUN pip install --no-cache-dir -r "${MAINAPPPATH}/requirements.txt" 20 | 21 | # Copy entire application into container 22 | COPY . . 23 | 24 | # Set working directory to main app path 25 | WORKDIR "/app/${MAINAPPPATH}" 26 | 27 | # Define the container's startup command 28 | ENTRYPOINT ["python3", "main.py"] -------------------------------------------------------------------------------- /resources/python/sources/demo_data/dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.12.5-slim-bookworm 2 | 3 | # Set environment variables for non-interactive setup and unbuffered output 4 | ENV DEBIAN_FRONTEND=noninteractive \ 5 | PYTHONUNBUFFERED=1 \ 6 | PYTHONIOENCODING=UTF-8 \ 7 | PYTHONPATH="/app" 8 | 9 | # Build argument for setting the main app path 10 | ARG MAINAPPPATH=. 11 | 12 | # Set working directory inside the container 13 | WORKDIR /app 14 | 15 | # Copy requirements to leverage Docker cache 16 | COPY "${MAINAPPPATH}/requirements.txt" "${MAINAPPPATH}/requirements.txt" 17 | 18 | # Install dependencies without caching 19 | RUN pip install --no-cache-dir -r "${MAINAPPPATH}/requirements.txt" 20 | 21 | # Copy entire application into container 22 | COPY . . 23 | 24 | # Set working directory to main app path 25 | WORKDIR "/app/${MAINAPPPATH}" 26 | 27 | # Define the container's startup command 28 | ENTRYPOINT ["python3", "main.py"] -------------------------------------------------------------------------------- /resources/python/sources/hivemq/dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.12.5-slim-bookworm 2 | 3 | # Set environment variables for non-interactive setup and unbuffered output 4 | ENV DEBIAN_FRONTEND=noninteractive \ 5 | PYTHONUNBUFFERED=1 \ 6 | PYTHONIOENCODING=UTF-8 \ 7 | PYTHONPATH="/app" 8 | 9 | # Build argument for setting the main app path 10 | ARG MAINAPPPATH=. 11 | 12 | # Set working directory inside the container 13 | WORKDIR /app 14 | 15 | # Copy requirements to leverage Docker cache 16 | COPY "${MAINAPPPATH}/requirements.txt" "${MAINAPPPATH}/requirements.txt" 17 | 18 | # Install dependencies without caching 19 | RUN pip install --no-cache-dir -r "${MAINAPPPATH}/requirements.txt" 20 | 21 | # Copy entire application into container 22 | COPY . . 23 | 24 | # Set working directory to main app path 25 | WORKDIR "/app/${MAINAPPPATH}" 26 | 27 | # Define the container's startup command 28 | ENTRYPOINT ["python3", "main.py"] -------------------------------------------------------------------------------- /resources/python/sources/s3_source/dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.12.5-slim-bookworm 2 | 3 | # Set environment variables for non-interactive setup and unbuffered output 4 | ENV DEBIAN_FRONTEND=noninteractive \ 5 | PYTHONUNBUFFERED=1 \ 6 | PYTHONIOENCODING=UTF-8 \ 7 | PYTHONPATH="/app" 8 | 9 | # Build argument for setting the main app path 10 | ARG MAINAPPPATH=. 11 | 12 | # Set working directory inside the container 13 | WORKDIR /app 14 | 15 | # Copy requirements to leverage Docker cache 16 | COPY "${MAINAPPPATH}/requirements.txt" "${MAINAPPPATH}/requirements.txt" 17 | 18 | # Install dependencies without caching 19 | RUN pip install --no-cache-dir -r "${MAINAPPPATH}/requirements.txt" 20 | 21 | # Copy entire application into container 22 | COPY . . 23 | 24 | # Set working directory to main app path 25 | WORKDIR "/app/${MAINAPPPATH}" 26 | 27 | # Define the container's startup command 28 | ENTRYPOINT ["python3", "main.py"] -------------------------------------------------------------------------------- /resources/python/destinations/TDengine/dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.12.5-slim-bookworm 2 | 3 | # Set environment variables for non-interactive setup and unbuffered output 4 | ENV DEBIAN_FRONTEND=noninteractive \ 5 | PYTHONUNBUFFERED=1 \ 6 | PYTHONIOENCODING=UTF-8 \ 7 | PYTHONPATH="/app" 8 | 9 | # Build argument for setting the main app path 10 | ARG MAINAPPPATH=. 11 | 12 | # Set working directory inside the container 13 | WORKDIR /app 14 | 15 | # Copy requirements to leverage Docker cache 16 | COPY "${MAINAPPPATH}/requirements.txt" "${MAINAPPPATH}/requirements.txt" 17 | 18 | # Install dependencies without caching 19 | RUN pip install --no-cache-dir -r "${MAINAPPPATH}/requirements.txt" 20 | 21 | # Copy entire application into container 22 | COPY . . 23 | 24 | # Set working directory to main app path 25 | WORKDIR "/app/${MAINAPPPATH}" 26 | 27 | # Define the container's startup command 28 | ENTRYPOINT ["python3", "main.py"] -------------------------------------------------------------------------------- /resources/python/destinations/hivemq/dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.12.5-slim-bookworm 2 | 3 | # Set environment variables for non-interactive setup and unbuffered output 4 | ENV DEBIAN_FRONTEND=noninteractive \ 5 | PYTHONUNBUFFERED=1 \ 6 | PYTHONIOENCODING=UTF-8 \ 7 | PYTHONPATH="/app" 8 | 9 | # Build argument for setting the main app path 10 | ARG MAINAPPPATH=. 11 | 12 | # Set working directory inside the container 13 | WORKDIR /app 14 | 15 | # Copy requirements to leverage Docker cache 16 | COPY "${MAINAPPPATH}/requirements.txt" "${MAINAPPPATH}/requirements.txt" 17 | 18 | # Install dependencies without caching 19 | RUN pip install --no-cache-dir -r "${MAINAPPPATH}/requirements.txt" 20 | 21 | # Copy entire application into container 22 | COPY . . 23 | 24 | # Set working directory to main app path 25 | WORKDIR "/app/${MAINAPPPATH}" 26 | 27 | # Define the container's startup command 28 | ENTRYPOINT ["python3", "main.py"] -------------------------------------------------------------------------------- /resources/python/destinations/influxdb_3/dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.12.5-slim-bookworm 2 | 3 | # Set environment variables for non-interactive setup and unbuffered output 4 | ENV DEBIAN_FRONTEND=noninteractive \ 5 | PYTHONUNBUFFERED=1 \ 6 | PYTHONIOENCODING=UTF-8 \ 7 | PYTHONPATH="/app" 8 | 9 | # Build argument for setting the main app path 10 | ARG MAINAPPPATH=. 11 | 12 | # Set working directory inside the container 13 | WORKDIR /app 14 | 15 | # Copy requirements to leverage Docker cache 16 | COPY "${MAINAPPPATH}/requirements.txt" "${MAINAPPPATH}/requirements.txt" 17 | 18 | # Install dependencies without caching 19 | RUN pip install --no-cache-dir -r "${MAINAPPPATH}/requirements.txt" 20 | 21 | # Copy entire application into container 22 | COPY . . 23 | 24 | # Set working directory to main app path 25 | WORKDIR "/app/${MAINAPPPATH}" 26 | 27 | # Define the container's startup command 28 | ENTRYPOINT ["python3", "main.py"] -------------------------------------------------------------------------------- /resources/python/destinations/websocket/dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.12.5-slim-bookworm 2 | 3 | # Set environment variables for non-interactive setup and unbuffered output 4 | ENV DEBIAN_FRONTEND=noninteractive \ 5 | PYTHONUNBUFFERED=1 \ 6 | PYTHONIOENCODING=UTF-8 \ 7 | PYTHONPATH="/app" 8 | 9 | # Build argument for setting the main app path 10 | ARG MAINAPPPATH=. 11 | 12 | # Set working directory inside the container 13 | WORKDIR /app 14 | 15 | # Copy requirements to leverage Docker cache 16 | COPY "${MAINAPPPATH}/requirements.txt" "${MAINAPPPATH}/requirements.txt" 17 | 18 | # Install dependencies without caching 19 | RUN pip install --no-cache-dir -r "${MAINAPPPATH}/requirements.txt" 20 | 21 | # Copy entire application into container 22 | COPY . . 23 | 24 | # Set working directory to main app path 25 | WORKDIR "/app/${MAINAPPPATH}" 26 | 27 | # Define the container's startup command 28 | ENTRYPOINT ["python3", "main.py"] -------------------------------------------------------------------------------- /resources/python/others/opc_ua_server/dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.12.5-slim-bookworm 2 | 3 | # Set environment variables for non-interactive setup and unbuffered output 4 | ENV DEBIAN_FRONTEND=noninteractive \ 5 | PYTHONUNBUFFERED=1 \ 6 | PYTHONIOENCODING=UTF-8 \ 7 | PYTHONPATH="/app" 8 | 9 | # Build argument for setting the main app path 10 | ARG MAINAPPPATH=. 11 | 12 | # Set working directory inside the container 13 | WORKDIR /app 14 | 15 | # Copy requirements to leverage Docker cache 16 | COPY "${MAINAPPPATH}/requirements.txt" "${MAINAPPPATH}/requirements.txt" 17 | 18 | # Install dependencies without caching 19 | RUN pip install --no-cache-dir -r "${MAINAPPPATH}/requirements.txt" 20 | 21 | # Copy entire application into container 22 | COPY . . 23 | 24 | # Set working directory to main app path 25 | WORKDIR "/app/${MAINAPPPATH}" 26 | 27 | # Define the container's startup command 28 | ENTRYPOINT ["python3", "main.py"] -------------------------------------------------------------------------------- /resources/python/sources/influxdb_2/dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.12.5-slim-bookworm 2 | 3 | # Set environment variables for non-interactive setup and unbuffered output 4 | ENV DEBIAN_FRONTEND=noninteractive \ 5 | PYTHONUNBUFFERED=1 \ 6 | PYTHONIOENCODING=UTF-8 \ 7 | PYTHONPATH="/app" 8 | 9 | # Build argument for setting the main app path 10 | ARG MAINAPPPATH=. 11 | 12 | # Set working directory inside the container 13 | WORKDIR /app 14 | 15 | # Copy requirements to leverage Docker cache 16 | COPY "${MAINAPPPATH}/requirements.txt" "${MAINAPPPATH}/requirements.txt" 17 | 18 | # Install dependencies without caching 19 | RUN pip install --no-cache-dir -r "${MAINAPPPATH}/requirements.txt" 20 | 21 | # Copy entire application into container 22 | COPY . . 23 | 24 | # Set working directory to main app path 25 | WORKDIR "/app/${MAINAPPPATH}" 26 | 27 | # Define the container's startup command 28 | ENTRYPOINT ["python3", "main.py"] -------------------------------------------------------------------------------- /resources/python/sources/influxdb_3/dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.12.5-slim-bookworm 2 | 3 | # Set environment variables for non-interactive setup and unbuffered output 4 | ENV DEBIAN_FRONTEND=noninteractive \ 5 | PYTHONUNBUFFERED=1 \ 6 | PYTHONIOENCODING=UTF-8 \ 7 | PYTHONPATH="/app" 8 | 9 | # Build argument for setting the main app path 10 | ARG MAINAPPPATH=. 11 | 12 | # Set working directory inside the container 13 | WORKDIR /app 14 | 15 | # Copy requirements to leverage Docker cache 16 | COPY "${MAINAPPPATH}/requirements.txt" "${MAINAPPPATH}/requirements.txt" 17 | 18 | # Install dependencies without caching 19 | RUN pip install --no-cache-dir -r "${MAINAPPPATH}/requirements.txt" 20 | 21 | # Copy entire application into container 22 | COPY . . 23 | 24 | # Set working directory to main app path 25 | WORKDIR "/app/${MAINAPPPATH}" 26 | 27 | # Define the container's startup command 28 | ENTRYPOINT ["python3", "main.py"] -------------------------------------------------------------------------------- /resources/python/sources/opc_ua_client/dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.12.5-slim-bookworm 2 | 3 | # Set environment variables for non-interactive setup and unbuffered output 4 | ENV DEBIAN_FRONTEND=noninteractive \ 5 | PYTHONUNBUFFERED=1 \ 6 | PYTHONIOENCODING=UTF-8 \ 7 | PYTHONPATH="/app" 8 | 9 | # Build argument for setting the main app path 10 | ARG MAINAPPPATH=. 11 | 12 | # Set working directory inside the container 13 | WORKDIR /app 14 | 15 | # Copy requirements to leverage Docker cache 16 | COPY "${MAINAPPPATH}/requirements.txt" "${MAINAPPPATH}/requirements.txt" 17 | 18 | # Install dependencies without caching 19 | RUN pip install --no-cache-dir -r "${MAINAPPPATH}/requirements.txt" 20 | 21 | # Copy entire application into container 22 | COPY . . 23 | 24 | # Set working directory to main app path 25 | WORKDIR "/app/${MAINAPPPATH}" 26 | 27 | # Define the container's startup command 28 | ENTRYPOINT ["python3", "main.py"] -------------------------------------------------------------------------------- /resources/python/sources/postgres_cdc/dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.12.5-slim-bookworm 2 | 3 | # Set environment variables for non-interactive setup and unbuffered output 4 | ENV DEBIAN_FRONTEND=noninteractive \ 5 | PYTHONUNBUFFERED=1 \ 6 | PYTHONIOENCODING=UTF-8 \ 7 | PYTHONPATH="/app" 8 | 9 | # Build argument for setting the main app path 10 | ARG MAINAPPPATH=. 11 | 12 | # Set working directory inside the container 13 | WORKDIR /app 14 | 15 | # Copy requirements to leverage Docker cache 16 | COPY "${MAINAPPPATH}/requirements.txt" "${MAINAPPPATH}/requirements.txt" 17 | 18 | # Install dependencies without caching 19 | RUN pip install --no-cache-dir -r "${MAINAPPPATH}/requirements.txt" 20 | 21 | # Copy entire application into container 22 | COPY . . 23 | 24 | # Set working directory to main app path 25 | WORKDIR "/app/${MAINAPPPATH}" 26 | 27 | # Define the container's startup command 28 | ENTRYPOINT ["python3", "main.py"] -------------------------------------------------------------------------------- /resources/python/sources/redis_source/dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.12.5-slim-bookworm 2 | 3 | # Set environment variables for non-interactive setup and unbuffered output 4 | ENV DEBIAN_FRONTEND=noninteractive \ 5 | PYTHONUNBUFFERED=1 \ 6 | PYTHONIOENCODING=UTF-8 \ 7 | PYTHONPATH="/app" 8 | 9 | # Build argument for setting the main app path 10 | ARG MAINAPPPATH=. 11 | 12 | # Set working directory inside the container 13 | WORKDIR /app 14 | 15 | # Copy requirements to leverage Docker cache 16 | COPY "${MAINAPPPATH}/requirements.txt" "${MAINAPPPATH}/requirements.txt" 17 | 18 | # Install dependencies without caching 19 | RUN pip install --no-cache-dir -r "${MAINAPPPATH}/requirements.txt" 20 | 21 | # Copy entire application into container 22 | COPY . . 23 | 24 | # Set working directory to main app path 25 | WORKDIR "/app/${MAINAPPPATH}" 26 | 27 | # Define the container's startup command 28 | ENTRYPOINT ["python3", "main.py"] -------------------------------------------------------------------------------- /resources/python/sources/simple_csv/dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.12.5-slim-bookworm 2 | 3 | # Set environment variables for non-interactive setup and unbuffered output 4 | ENV DEBIAN_FRONTEND=noninteractive \ 5 | PYTHONUNBUFFERED=1 \ 6 | PYTHONIOENCODING=UTF-8 \ 7 | PYTHONPATH="/app" 8 | 9 | # Build argument for setting the main app path 10 | ARG MAINAPPPATH=. 11 | 12 | # Set working directory inside the container 13 | WORKDIR /app 14 | 15 | # Copy requirements to leverage Docker cache 16 | COPY "${MAINAPPPATH}/requirements.txt" "${MAINAPPPATH}/requirements.txt" 17 | 18 | # Install dependencies without caching 19 | RUN pip install --no-cache-dir -r "${MAINAPPPATH}/requirements.txt" 20 | 21 | # Copy entire application into container 22 | COPY . . 23 | 24 | # Set working directory to main app path 25 | WORKDIR "/app/${MAINAPPPATH}" 26 | 27 | # Define the container's startup command 28 | ENTRYPOINT ["python3", "main.py"] -------------------------------------------------------------------------------- /resources/python/sources/starter_source/dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.12.5-slim-bookworm 2 | 3 | # Set environment variables for non-interactive setup and unbuffered output 4 | ENV DEBIAN_FRONTEND=noninteractive \ 5 | PYTHONUNBUFFERED=1 \ 6 | PYTHONIOENCODING=UTF-8 \ 7 | PYTHONPATH="/app" 8 | 9 | # Build argument for setting the main app path 10 | ARG MAINAPPPATH=. 11 | 12 | # Set working directory inside the container 13 | WORKDIR /app 14 | 15 | # Copy requirements to leverage Docker cache 16 | COPY "${MAINAPPPATH}/requirements.txt" "${MAINAPPPATH}/requirements.txt" 17 | 18 | # Install dependencies without caching 19 | RUN pip install --no-cache-dir -r "${MAINAPPPATH}/requirements.txt" 20 | 21 | # Copy entire application into container 22 | COPY . . 23 | 24 | # Set working directory to main app path 25 | WORKDIR "/app/${MAINAPPPATH}" 26 | 27 | # Define the container's startup command 28 | ENTRYPOINT ["python3", "main.py"] -------------------------------------------------------------------------------- /resources/python/destinations/elasticsearch/dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.12.5-slim-bookworm 2 | 3 | # Set environment variables for non-interactive setup and unbuffered output 4 | ENV DEBIAN_FRONTEND=noninteractive \ 5 | PYTHONUNBUFFERED=1 \ 6 | PYTHONIOENCODING=UTF-8 \ 7 | PYTHONPATH="/app" 8 | 9 | # Build argument for setting the main app path 10 | ARG MAINAPPPATH=. 11 | 12 | # Set working directory inside the container 13 | WORKDIR /app 14 | 15 | # Copy requirements to leverage Docker cache 16 | COPY "${MAINAPPPATH}/requirements.txt" "${MAINAPPPATH}/requirements.txt" 17 | 18 | # Install dependencies without caching 19 | RUN pip install --no-cache-dir -r "${MAINAPPPATH}/requirements.txt" 20 | 21 | # Copy entire application into container 22 | COPY . . 23 | 24 | # Set working directory to main app path 25 | WORKDIR "/app/${MAINAPPPATH}" 26 | 27 | # Define the container's startup command 28 | ENTRYPOINT ["python3", "main.py"] -------------------------------------------------------------------------------- /resources/python/destinations/postgres/dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.12.5-slim-bookworm 2 | 3 | # Set environment variables for non-interactive setup and unbuffered output 4 | ENV DEBIAN_FRONTEND=noninteractive \ 5 | PYTHONUNBUFFERED=1 \ 6 | PYTHONIOENCODING=UTF-8 \ 7 | PYTHONPATH="/app" 8 | 9 | # Build argument for setting the main app path 10 | ARG MAINAPPPATH=. 11 | 12 | # Set working directory inside the container 13 | WORKDIR /app 14 | 15 | # Copy requirements to leverage Docker cache 16 | COPY "${MAINAPPPATH}/requirements.txt" "${MAINAPPPATH}/requirements.txt" 17 | 18 | # Install dependencies without caching 19 | RUN pip install --no-cache-dir -r "${MAINAPPPATH}/requirements.txt" 20 | 21 | # Copy entire application into container 22 | COPY . . 23 | 24 | # Set working directory to main app path 25 | WORKDIR "/app/${MAINAPPPATH}" 26 | 27 | # Define the container's startup command 28 | ENTRYPOINT ["python3", "main.py"] 29 | -------------------------------------------------------------------------------- /resources/python/destinations/redis_dest/dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.12.5-slim-bookworm 2 | 3 | # Set environment variables for non-interactive setup and unbuffered output 4 | ENV DEBIAN_FRONTEND=noninteractive \ 5 | PYTHONUNBUFFERED=1 \ 6 | PYTHONIOENCODING=UTF-8 \ 7 | PYTHONPATH="/app" 8 | 9 | # Build argument for setting the main app path 10 | ARG MAINAPPPATH=. 11 | 12 | # Set working directory inside the container 13 | WORKDIR /app 14 | 15 | # Copy requirements to leverage Docker cache 16 | COPY "${MAINAPPPATH}/requirements.txt" "${MAINAPPPATH}/requirements.txt" 17 | 18 | # Install dependencies without caching 19 | RUN pip install --no-cache-dir -r "${MAINAPPPATH}/requirements.txt" 20 | 21 | # Copy entire application into container 22 | COPY . . 23 | 24 | # Set working directory to main app path 25 | WORKDIR "/app/${MAINAPPPATH}" 26 | 27 | # Define the container's startup command 28 | ENTRYPOINT ["python3", "main.py"] -------------------------------------------------------------------------------- /resources/python/sources/http_api_sample/dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.12.5-slim-bookworm 2 | 3 | # Set environment variables for non-interactive setup and unbuffered output 4 | ENV DEBIAN_FRONTEND=noninteractive \ 5 | PYTHONUNBUFFERED=1 \ 6 | PYTHONIOENCODING=UTF-8 \ 7 | PYTHONPATH="/app" 8 | 9 | # Build argument for setting the main app path 10 | ARG MAINAPPPATH=. 11 | 12 | # Set working directory inside the container 13 | WORKDIR /app 14 | 15 | # Copy requirements to leverage Docker cache 16 | COPY "${MAINAPPPATH}/requirements.txt" "${MAINAPPPATH}/requirements.txt" 17 | 18 | # Install dependencies without caching 19 | RUN pip install --no-cache-dir -r "${MAINAPPPATH}/requirements.txt" 20 | 21 | # Copy entire application into container 22 | COPY . . 23 | 24 | # Set working directory to main app path 25 | WORKDIR "/app/${MAINAPPPATH}" 26 | 27 | # Define the container's startup command 28 | ENTRYPOINT ["python3", "main.py"] -------------------------------------------------------------------------------- /resources/python/sources/http_api_source/dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.12.5-slim-bookworm 2 | 3 | # Set environment variables for non-interactive setup and unbuffered output 4 | ENV DEBIAN_FRONTEND=noninteractive \ 5 | PYTHONUNBUFFERED=1 \ 6 | PYTHONIOENCODING=UTF-8 \ 7 | PYTHONPATH="/app" 8 | 9 | # Build argument for setting the main app path 10 | ARG MAINAPPPATH=. 11 | 12 | # Set working directory inside the container 13 | WORKDIR /app 14 | 15 | # Copy requirements to leverage Docker cache 16 | COPY "${MAINAPPPATH}/requirements.txt" "${MAINAPPPATH}/requirements.txt" 17 | 18 | # Install dependencies without caching 19 | RUN pip install --no-cache-dir -r "${MAINAPPPATH}/requirements.txt" 20 | 21 | # Copy entire application into container 22 | COPY . . 23 | 24 | # Set working directory to main app path 25 | WORKDIR "/app/${MAINAPPPATH}" 26 | 27 | # Define the container's startup command 28 | ENTRYPOINT ["python3", "main.py"] -------------------------------------------------------------------------------- /resources/python/sources/segment_webhook/dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.12.5-slim-bookworm 2 | 3 | # Set environment variables for non-interactive setup and unbuffered output 4 | ENV DEBIAN_FRONTEND=noninteractive \ 5 | PYTHONUNBUFFERED=1 \ 6 | PYTHONIOENCODING=UTF-8 \ 7 | PYTHONPATH="/app" 8 | 9 | # Build argument for setting the main app path 10 | ARG MAINAPPPATH=. 11 | 12 | # Set working directory inside the container 13 | WORKDIR /app 14 | 15 | # Copy requirements to leverage Docker cache 16 | COPY "${MAINAPPPATH}/requirements.txt" "${MAINAPPPATH}/requirements.txt" 17 | 18 | # Install dependencies without caching 19 | RUN pip install --no-cache-dir -r "${MAINAPPPATH}/requirements.txt" 20 | 21 | # Copy entire application into container 22 | COPY . . 23 | 24 | # Set working directory to main app path 25 | WORKDIR "/app/${MAINAPPPATH}" 26 | 27 | # Define the container's startup command 28 | ENTRYPOINT ["python3", "main.py"] -------------------------------------------------------------------------------- /resources/python/sources/snowplow_source/dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.12.5-slim-bookworm 2 | 3 | # Set environment variables for non-interactive setup and unbuffered output 4 | ENV DEBIAN_FRONTEND=noninteractive \ 5 | PYTHONUNBUFFERED=1 \ 6 | PYTHONIOENCODING=UTF-8 \ 7 | PYTHONPATH="/app" 8 | 9 | # Build argument for setting the main app path 10 | ARG MAINAPPPATH=. 11 | 12 | # Set working directory inside the container 13 | WORKDIR /app 14 | 15 | # Copy requirements to leverage Docker cache 16 | COPY "${MAINAPPPATH}/requirements.txt" "${MAINAPPPATH}/requirements.txt" 17 | 18 | # Install dependencies without caching 19 | RUN pip install --no-cache-dir -r "${MAINAPPPATH}/requirements.txt" 20 | 21 | # Copy entire application into container 22 | COPY . . 23 | 24 | # Set working directory to main app path 25 | WORKDIR "/app/${MAINAPPPATH}" 26 | 27 | # Define the container's startup command 28 | ENTRYPOINT ["python3", "main.py"] -------------------------------------------------------------------------------- /.claude/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "permissions": { 3 | "deny": [ 4 | "Read(../**)", 5 | "Write(../**)", 6 | "Edit(../**)", 7 | "MultiEdit(../**)", 8 | "Read(/**)", 9 | "Write(/**)", 10 | "Edit(/**)", 11 | "MultiEdit(/**)", 12 | "Read(~/**)", 13 | "Write(~/**)", 14 | "Edit(~/**)", 15 | "MultiEdit(~/**)", 16 | "Bash(cd:../**)", 17 | "Bash(cd:/**)", 18 | "Bash(cd:~/**)" 19 | ], 20 | "allow": [ 21 | "Read(./**)", 22 | "Write(./**)", 23 | "Edit(./**)", 24 | "MultiEdit(./**)", 25 | "Glob(./**)", 26 | "Grep(./**)", 27 | "Bash(git:*)", 28 | "Bash(python:*)", 29 | "Bash(pip:*)", 30 | "Bash(ls:*)", 31 | "Bash(pwd:*)", 32 | "Bash(echo:*)", 33 | "Bash(mkdir:./**)", 34 | "Bash(rm:./**)", 35 | "Bash(cp:./**)", 36 | "Bash(mv:./**)" 37 | ] 38 | }, 39 | "additionalDirectories": [] 40 | } -------------------------------------------------------------------------------- /resources/python/destinations/big_query/dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.12.5-slim-bookworm 2 | 3 | # Set environment variables for non-interactive setup and unbuffered output 4 | ENV DEBIAN_FRONTEND=noninteractive \ 5 | PYTHONUNBUFFERED=1 \ 6 | PYTHONIOENCODING=UTF-8 \ 7 | PYTHONPATH="/app" 8 | 9 | # Build argument for setting the main app path 10 | ARG MAINAPPPATH=. 11 | 12 | # Set working directory inside the container 13 | WORKDIR /app 14 | 15 | # Copy requirements to leverage Docker cache 16 | COPY "${MAINAPPPATH}/requirements.txt" "${MAINAPPPATH}/requirements.txt" 17 | 18 | # Install dependencies without caching 19 | RUN pip install --no-cache-dir -r "${MAINAPPPATH}/requirements.txt" 20 | 21 | # Copy entire application into container 22 | COPY . . 23 | 24 | # Set working directory to main app path 25 | WORKDIR "/app/${MAINAPPPATH}" 26 | 27 | # Define the container's startup command 28 | ENTRYPOINT ["python3", "main.py"] 29 | -------------------------------------------------------------------------------- /resources/python/destinations/slack_notifications/dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.12.5-slim-bookworm 2 | 3 | # Set environment variables for non-interactive setup and unbuffered output 4 | ENV DEBIAN_FRONTEND=noninteractive \ 5 | PYTHONUNBUFFERED=1 \ 6 | PYTHONIOENCODING=UTF-8 \ 7 | PYTHONPATH="/app" 8 | 9 | # Build argument for setting the main app path 10 | ARG MAINAPPPATH=. 11 | 12 | # Set working directory inside the container 13 | WORKDIR /app 14 | 15 | # Copy requirements to leverage Docker cache 16 | COPY "${MAINAPPPATH}/requirements.txt" "${MAINAPPPATH}/requirements.txt" 17 | 18 | # Install dependencies without caching 19 | RUN pip install --no-cache-dir -r "${MAINAPPPATH}/requirements.txt" 20 | 21 | # Copy entire application into container 22 | COPY . . 23 | 24 | # Set working directory to main app path 25 | WORKDIR "/app/${MAINAPPPATH}" 26 | 27 | # Define the container's startup command 28 | ENTRYPOINT ["python3", "main.py"] -------------------------------------------------------------------------------- /resources/python/sources/confluent_kafka/dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.12.5-slim-bookworm 2 | 3 | # Set environment variables for non-interactive setup and unbuffered output 4 | ENV DEBIAN_FRONTEND=noninteractive \ 5 | PYTHONUNBUFFERED=1 \ 6 | PYTHONIOENCODING=UTF-8 \ 7 | PYTHONPATH="/app" 8 | 9 | # Build argument for setting the main app path 10 | ARG MAINAPPPATH=. 11 | 12 | # Set working directory inside the container 13 | WORKDIR /app 14 | 15 | # Copy requirements to leverage Docker cache 16 | COPY "${MAINAPPPATH}/requirements.txt" "${MAINAPPPATH}/requirements.txt" 17 | 18 | # Install dependencies without caching 19 | RUN pip install --no-cache-dir -r "${MAINAPPPATH}/requirements.txt" 20 | 21 | # Copy entire application into container 22 | COPY . . 23 | 24 | # Set working directory to main app path 25 | WORKDIR "/app/${MAINAPPPATH}" 26 | 27 | # Define the container's startup command 28 | ENTRYPOINT ["python3", "main.py"] 29 | -------------------------------------------------------------------------------- /resources/python/transformations/event_detection/dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.12.5-slim-bookworm 2 | 3 | # Set environment variables for non-interactive setup and unbuffered output 4 | ENV DEBIAN_FRONTEND=noninteractive \ 5 | PYTHONUNBUFFERED=1 \ 6 | PYTHONIOENCODING=UTF-8 \ 7 | PYTHONPATH="/app" 8 | 9 | # Build argument for setting the main app path 10 | ARG MAINAPPPATH=. 11 | 12 | # Set working directory inside the container 13 | WORKDIR /app 14 | 15 | # Copy requirements to leverage Docker cache 16 | COPY "${MAINAPPPATH}/requirements.txt" "${MAINAPPPATH}/requirements.txt" 17 | 18 | # Install dependencies without caching 19 | RUN pip install --no-cache-dir -r "${MAINAPPPATH}/requirements.txt" 20 | 21 | # Copy entire application into container 22 | COPY . . 23 | 24 | # Set working directory to main app path 25 | WORKDIR "/app/${MAINAPPPATH}" 26 | 27 | # Define the container's startup command 28 | ENTRYPOINT ["python3", "main.py"] -------------------------------------------------------------------------------- /prompts/agents/AppNameSuggesterAgent.md: -------------------------------------------------------------------------------- 1 | 2 | You are a helpful assistant that suggests concise, descriptive application names based on user requirements. 3 | 4 | 5 | 6 | Given the user's description of what they want to build or connect to, suggest a short, meaningful application name. 7 | 8 | 9 | 10 | - Is descriptive and relates to the functionality 11 | - Uses lowercase letters, numbers, and hyphens only 12 | - Is between 3-30 characters long 13 | - Avoids generic names like "app", "test", "demo" 14 | - Clearly indicates the purpose (e.g., "weather-api-source", "postgres-sink", "mqtt-sensor-reader") 15 | 16 | 17 | 18 | {requirements} 19 | 20 | 21 | 22 | {workflow_type} 23 | 24 | 25 | 26 | Provide ONLY the suggested application name, nothing else. No explanation, no alternatives, just the name itself. 27 | -------------------------------------------------------------------------------- /resources/python/destinations/confluent_kafka/dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.12.5-slim-bookworm 2 | 3 | # Set environment variables for non-interactive setup and unbuffered output 4 | ENV DEBIAN_FRONTEND=noninteractive \ 5 | PYTHONUNBUFFERED=1 \ 6 | PYTHONIOENCODING=UTF-8 \ 7 | PYTHONPATH="/app" 8 | 9 | # Build argument for setting the main app path 10 | ARG MAINAPPPATH=. 11 | 12 | # Set working directory inside the container 13 | WORKDIR /app 14 | 15 | # Copy requirements to leverage Docker cache 16 | COPY "${MAINAPPPATH}/requirements.txt" "${MAINAPPPATH}/requirements.txt" 17 | 18 | # Install dependencies without caching 19 | RUN pip install --no-cache-dir -r "${MAINAPPPATH}/requirements.txt" 20 | 21 | # Copy entire application into container 22 | COPY . . 23 | 24 | # Set working directory to main app path 25 | WORKDIR "/app/${MAINAPPPATH}" 26 | 27 | # Define the container's startup command 28 | ENTRYPOINT ["python3", "main.py"] 29 | -------------------------------------------------------------------------------- /resources/python/destinations/s3-iceberg-destination/main.py: -------------------------------------------------------------------------------- 1 | from quixstreams import Application 2 | from quixstreams.sinks.community.iceberg import IcebergSink, AWSIcebergConfig 3 | import os 4 | 5 | from dotenv import load_dotenv 6 | load_dotenv() 7 | 8 | app = Application(consumer_group="destination-v1", 9 | auto_offset_reset = "earliest", 10 | commit_interval=5) 11 | 12 | input_topic = app.topic(os.environ["input"]) 13 | 14 | iceberg_sink = IcebergSink( 15 | data_catalog_spec="aws_glue", 16 | table_name=os.environ["table_name"], 17 | config=AWSIcebergConfig( 18 | aws_s3_uri=os.environ["AWS_S3_URI"], 19 | aws_secret_access_key=os.environ["AWS_SECRET_ACCESS_KEY"], 20 | aws_access_key_id=os.environ["AWS_ACCESS_KEY_ID"], 21 | aws_region=os.environ["AWS_REGION"])) 22 | 23 | sdf = app.dataframe(input_topic) 24 | sdf.sink(iceberg_sink) 25 | 26 | if __name__ == "__main__": 27 | app.run(sdf) -------------------------------------------------------------------------------- /resources/python/sources/wikipedia-source-claude-coded/dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.12.5-slim-bookworm 2 | 3 | # Set environment variables for non-interactive setup and unbuffered output 4 | ENV DEBIAN_FRONTEND=noninteractive \ 5 | PYTHONUNBUFFERED=1 \ 6 | PYTHONIOENCODING=UTF-8 \ 7 | PYTHONPATH="/app" 8 | 9 | # Build argument for setting the main app path 10 | ARG MAINAPPPATH=. 11 | 12 | # Set working directory inside the container 13 | WORKDIR /app 14 | 15 | # Copy requirements to leverage Docker cache 16 | COPY "${MAINAPPPATH}/requirements.txt" "${MAINAPPPATH}/requirements.txt" 17 | 18 | # Install dependencies without caching 19 | RUN pip install --no-cache-dir -r "${MAINAPPPATH}/requirements.txt" 20 | 21 | # Copy entire application into container 22 | COPY . . 23 | 24 | # Set working directory to main app path 25 | WORKDIR "/app/${MAINAPPPATH}" 26 | 27 | # Define the container's startup command 28 | ENTRYPOINT ["python3", "main.py"] -------------------------------------------------------------------------------- /resources/python/transformations/hugging_face_model/dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.12.5-slim-bookworm 2 | 3 | # Set environment variables for non-interactive setup and unbuffered output 4 | ENV DEBIAN_FRONTEND=noninteractive \ 5 | PYTHONUNBUFFERED=1 \ 6 | PYTHONIOENCODING=UTF-8 \ 7 | PYTHONPATH="/app" 8 | 9 | # Build argument for setting the main app path 10 | ARG MAINAPPPATH=. 11 | 12 | # Set working directory inside the container 13 | WORKDIR /app 14 | 15 | # Copy requirements to leverage Docker cache 16 | COPY "${MAINAPPPATH}/requirements.txt" "${MAINAPPPATH}/requirements.txt" 17 | 18 | # Install dependencies without caching 19 | RUN pip install --no-cache-dir -r "${MAINAPPPATH}/requirements.txt" 20 | 21 | # Copy entire application into container 22 | COPY . . 23 | 24 | # Set working directory to main app path 25 | WORKDIR "/app/${MAINAPPPATH}" 26 | 27 | # Define the container's startup command 28 | ENTRYPOINT ["python3", "main.py"] -------------------------------------------------------------------------------- /resources/python/transformations/starter_transformation/dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.12.5-slim-bookworm 2 | 3 | # Set environment variables for non-interactive setup and unbuffered output 4 | ENV DEBIAN_FRONTEND=noninteractive \ 5 | PYTHONUNBUFFERED=1 \ 6 | PYTHONIOENCODING=UTF-8 \ 7 | PYTHONPATH="/app" 8 | 9 | # Build argument for setting the main app path 10 | ARG MAINAPPPATH=. 11 | 12 | # Set working directory inside the container 13 | WORKDIR /app 14 | 15 | # Copy requirements to leverage Docker cache 16 | COPY "${MAINAPPPATH}/requirements.txt" "${MAINAPPPATH}/requirements.txt" 17 | 18 | # Install dependencies without caching 19 | RUN pip install --no-cache-dir -r "${MAINAPPPATH}/requirements.txt" 20 | 21 | # Copy entire application into container 22 | COPY . . 23 | 24 | # Set working directory to main app path 25 | WORKDIR "/app/${MAINAPPPATH}" 26 | 27 | # Define the container's startup command 28 | ENTRYPOINT ["python3", "main.py"] -------------------------------------------------------------------------------- /resources/python/sources/wikipedia-source-claude-coded/app.yaml: -------------------------------------------------------------------------------- 1 | name: wikipedia-sandbox 2 | language: python 3 | variables: 4 | - name: output 5 | inputType: OutputTopic 6 | description: Name of the output topic to write into 7 | defaultValue: output-topic 8 | required: true 9 | - name: WIKIPEDIA_STREAM_URL 10 | inputType: FreeText 11 | description: Wikipedia EventStreams API URL 12 | defaultValue: https://stream.wikimedia.org/v2/stream/recentchange 13 | required: true 14 | - name: TARGET_WIKI 15 | inputType: FreeText 16 | description: Target wiki to filter events (e.g., en.wikipedia.org) 17 | defaultValue: en.wikipedia.org 18 | required: true 19 | - name: MAX_EVENTS 20 | inputType: FreeText 21 | description: Maximum number of events to process (for testing) 22 | defaultValue: "100" 23 | required: false 24 | dockerfile: dockerfile 25 | runEntryPoint: main.py 26 | defaultFile: main.py 27 | libraryItemId: starter-source 28 | -------------------------------------------------------------------------------- /resources/python/sources/flet-input-form/library.json: -------------------------------------------------------------------------------- 1 | { 2 | "libraryItemId": "flet-input-form", 3 | "name": "Flet Input Form", 4 | "language": "Python", 5 | "tags": { 6 | "Complexity": ["Medium"], 7 | "Technology": ["Quix Streams", "Flet"], 8 | "Pipeline Stage": ["Source"], 9 | "Popular Subjects": ["Data Input", "Manufacturing", "Web Forms"], 10 | "Type": ["Input Form"] 11 | }, 12 | "shortDescription": "A web-based input form template built with Flet that demonstrates how to create forms with validation and send data to Kafka topics using Quix Streams.", 13 | "DefaultFile": "main.py", 14 | "EntryPoint": "dockerfile", 15 | "RunEntryPoint": "main.py", 16 | "IconFile": "icon.png", 17 | "Variables": [ 18 | { 19 | "Name": "output", 20 | "Type": "EnvironmentVariable", 21 | "InputType": "OutputTopic", 22 | "Description": "Name of the output topic to send data to.", 23 | "DefaultValue": "data-input" 24 | } 25 | ] 26 | } 27 | -------------------------------------------------------------------------------- /resources/python/destinations/flet-waveform/library.json: -------------------------------------------------------------------------------- 1 | { 2 | "libraryItemId": "flet-waveform", 3 | "name": "Flet Waveform", 4 | "language": "Python", 5 | "tags": { 6 | "Complexity": ["Medium"], 7 | "Technology": ["Quix Streams", "Flet"], 8 | "Pipeline Stage": ["Destination"], 9 | "Popular Subjects": ["Data Visualization", "Real-time"], 10 | "Type": ["Visualization"] 11 | }, 12 | "shortDescription": "Real-time web-based data visualization using Flet framework to display streaming sensor data with interactive charts.", 13 | "DefaultFile": "main.py", 14 | "EntryPoint": "dockerfile", 15 | "RunEntryPoint": "main.py", 16 | "IconFile": "icon.png", 17 | "Variables": [ 18 | { 19 | "Name": "input", 20 | "Type": "EnvironmentVariable", 21 | "InputType": "InputTopic", 22 | "Description": "Name of the input topic to listen to (expects data with 'temperature' and 'humidity' fields).", 23 | "DefaultValue": "sensor-data" 24 | } 25 | ] 26 | } 27 | -------------------------------------------------------------------------------- /resources/python/transformations/matlab_runner/library.json: -------------------------------------------------------------------------------- 1 | { 2 | "libraryItemId": "matlab-runner", 3 | "name": "Matlab Runner", 4 | "language": "Python", 5 | "tags": { 6 | "Complexity": ["Medium"], 7 | "Pipeline Stage": ["Transformation"] 8 | }, 9 | "shortDescription": "Import and run Matlab functions within Quix", 10 | "DefaultFile": "main.py", 11 | "EntryPoint": "dockerfile", 12 | "RunEntryPoint": "main.py", 13 | "Variables": [ 14 | { 15 | "Name": "input", 16 | "Type": "EnvironmentVariable", 17 | "InputType": "InputTopic", 18 | "Description": "This is the raw data input topic", 19 | "DefaultValue": "", 20 | "Required": true 21 | }, 22 | { 23 | "Name": "output", 24 | "Type": "EnvironmentVariable", 25 | "InputType": "OutputTopic", 26 | "Description": "This is the output for the hugging face model score", 27 | "DefaultValue": "hugging-face-output", 28 | "Required": true 29 | } 30 | ] 31 | } 32 | -------------------------------------------------------------------------------- /resources/python/destinations/big_query/utils.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | 3 | def format_nanoseconds(nanoseconds: int): 4 | dt = datetime.fromtimestamp(nanoseconds / 1e9) 5 | return '{}.{:09.0f}'.format(dt.strftime('%Y-%m-%dT%H:%M:%S'), nanoseconds % 1e9) 6 | 7 | 8 | def flatten_json(nested_json, separator='_'): 9 | """ 10 | Flatten a nested JSON object. 11 | 12 | :param nested_json: The JSON object to flatten. 13 | :param separator: The separator to use between nested keys. 14 | :return: A flattened JSON object. 15 | """ 16 | out = {} 17 | 18 | def flatten(x, name=''): 19 | if type(x) is dict: 20 | for a in x: 21 | flatten(x[a], name + a + separator) 22 | elif type(x) is list: 23 | i = 0 24 | for a in x: 25 | flatten(a, name + str(i) + separator) 26 | i += 1 27 | else: 28 | out[name[:-1]] = x 29 | 30 | flatten(nested_json) 31 | return out 32 | -------------------------------------------------------------------------------- /resources/python/others/opc_ua_server/library.json: -------------------------------------------------------------------------------- 1 | { 2 | "libraryItemId": "opc-ua-server", 3 | "name": "OPC UA Server", 4 | "language": "Python", 5 | "tags": { 6 | "Pipeline Stage": ["Server"], 7 | "Category": ["External APIs"], 8 | "Type": ["Auxiliary Services"] 9 | }, 10 | "shortDescription": "This is an OPC UA server for testing purposes.", 11 | "DefaultFile": "main.py", 12 | "EntryPoint": "dockerfile", 13 | "RunEntryPoint": "main.py", 14 | "Variables": [], 15 | "DeploySettings": { 16 | "DeploymentType": "Service", 17 | "CpuMillicores": 200, 18 | "MemoryInMb": 500, 19 | "Replicas": 1, 20 | "PublicAccess": false, 21 | "ValidateConnection": false, 22 | "Network": { 23 | "ServiceName": "intopcserver", 24 | "Ports": 25 | [ 26 | { 27 | "Port": 4840, 28 | "TargetPort": 4840 29 | } 30 | ] 31 | } 32 | } 33 | } -------------------------------------------------------------------------------- /resources/python/sources/flet-input-form/dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.12.5-slim-bookworm 2 | 3 | # Set environment variables for non-interactive setup and unbuffered output 4 | ENV DEBIAN_FRONTEND=noninteractive \ 5 | PYTHONUNBUFFERED=1 \ 6 | PYTHONIOENCODING=UTF-8 \ 7 | PYTHONPATH="/app" 8 | 9 | # Build argument for setting the main app path 10 | ARG MAINAPPPATH=. 11 | 12 | # Set working directory inside the container 13 | WORKDIR /app 14 | 15 | # Copy requirements to leverage Docker cache 16 | COPY "${MAINAPPPATH}/requirements.txt" "${MAINAPPPATH}/requirements.txt" 17 | 18 | # Install dependencies without caching 19 | RUN pip install --no-cache-dir -r "${MAINAPPPATH}/requirements.txt" 20 | 21 | # Copy entire application into container 22 | COPY . . 23 | 24 | # Set working directory to main app path 25 | WORKDIR "/app/${MAINAPPPATH}" 26 | 27 | # Expose the port that Flet uses 28 | EXPOSE 80 29 | 30 | # Define the container's startup command 31 | ENTRYPOINT ["python3", "main.py"] 32 | 33 | 34 | -------------------------------------------------------------------------------- /resources/python/sources/simple_csv/library.json: -------------------------------------------------------------------------------- 1 | { 2 | "libraryItemId": "simple-csv-source", 3 | "name": "CSV Source", 4 | "language": "Python", 5 | "tags": { 6 | "Technology": ["Quix Streams"], 7 | "Pipeline Stage": ["Source"], 8 | "Popular Subjects": ["Quick Start"], 9 | "Type": ["Demos"] 10 | }, 11 | "shortDescription": "Read data from a CSV file and publish it to a Kafka topic.", 12 | "DefaultFile": "main.py", 13 | "EntryPoint": "dockerfile", 14 | "RunEntryPoint": "main.py", 15 | "Variables": [ 16 | { 17 | "Name": "output", 18 | "Type": "EnvironmentVariable", 19 | "InputType": "OutputTopic", 20 | "Description": "Name of the output topic to write into", 21 | "DefaultValue": "csv-data", 22 | "Required": true 23 | } 24 | ], 25 | "DeploySettings": { 26 | "DeploymentType": "Job", 27 | "CpuMillicores": 200, 28 | "MemoryInMb": 200, 29 | "Replicas": 1, 30 | "PublicAccess": false, 31 | "ValidateConnection": false 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /resources/python/destinations/flet-waveform/dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.12.5-slim-bookworm 2 | 3 | # Set environment variables for non-interactive setup and unbuffered output 4 | ENV DEBIAN_FRONTEND=noninteractive \ 5 | PYTHONUNBUFFERED=1 \ 6 | PYTHONIOENCODING=UTF-8 \ 7 | PYTHONPATH="/app" 8 | 9 | # Build argument for setting the main app path 10 | ARG MAINAPPPATH=. 11 | 12 | # Set working directory inside the container 13 | WORKDIR /app 14 | 15 | # Copy requirements to leverage Docker cache 16 | COPY "${MAINAPPPATH}/requirements.txt" "${MAINAPPPATH}/requirements.txt" 17 | 18 | # Install dependencies without caching 19 | RUN pip install --no-cache-dir -r "${MAINAPPPATH}/requirements.txt" 20 | 21 | # Copy entire application into container 22 | COPY . . 23 | 24 | # Set working directory to main app path 25 | WORKDIR "/app/${MAINAPPPATH}" 26 | 27 | # Expose the port that Flet uses 28 | EXPOSE 80 29 | 30 | # Define the container's startup command 31 | ENTRYPOINT ["python3", "main.py"] 32 | 33 | 34 | -------------------------------------------------------------------------------- /resources/python/sources/environment_source/dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.12.5-slim-bookworm 2 | 3 | # Set environment variables to non-interactive and unbuffered output 4 | ENV DEBIAN_FRONTEND=noninteractive \ 5 | PYTHONUNBUFFERED=1 \ 6 | PYTHONIOENCODING=UTF-8 7 | 8 | # Update the package list and install Git. 9 | # Remove after proper QuixStreams release. 10 | RUN apt-get update && \ 11 | apt-get install -y git && \ 12 | apt-get clean 13 | 14 | # Set the working directory inside the container 15 | WORKDIR /app 16 | 17 | # Copy only the requirements file(s) to leverage Docker cache 18 | # Assuming all requirements files are in the root or subdirectories 19 | COPY ./requirements.txt ./ 20 | 21 | # Install dependencies 22 | # Adding `--no-cache-dir` to avoid storing unnecessary files and potentially reduce image size 23 | RUN pip install --no-cache-dir -r requirements.txt 24 | 25 | # Copy the rest of the application 26 | COPY . . 27 | 28 | # Set the command to run your application 29 | ENTRYPOINT ["python3", "main.py"] 30 | -------------------------------------------------------------------------------- /resources/python/sources/s3_source/main.py: -------------------------------------------------------------------------------- 1 | from quixstreams import Application 2 | from quixstreams.sources.community.file import FileSource 3 | from quixstreams.sources.community.file.origins import S3Origin 4 | 5 | import os 6 | from dotenv import load_dotenv 7 | load_dotenv() 8 | 9 | app = Application() 10 | 11 | # create an output topic 12 | output_topic = app.topic(os.environ['output']) 13 | 14 | # describe the file origin and access credentials 15 | origin = S3Origin( 16 | bucket=os.environ['S3_BUCKET'], 17 | aws_access_key_id=os.environ['S3_ACCESS_KEY_ID'], 18 | aws_secret_access_key=os.environ['S3_SECRET'], 19 | region_name=os.environ['S3_REGION'], 20 | ) 21 | 22 | # create a file source, describing the file path and formats 23 | source = FileSource( 24 | directory=os.environ['S3_FOLDER_PATH'], 25 | origin=origin, 26 | format=os.environ['S3_FILE_FORMAT'], 27 | compression=os.environ['S3_FILE_COMPRESSION'], 28 | ) 29 | 30 | app.add_source(source) 31 | 32 | if __name__ == "__main__": 33 | app.run() -------------------------------------------------------------------------------- /resources/python/sources/starter_source/library.json: -------------------------------------------------------------------------------- 1 | { 2 | "libraryItemId": "starter-source", 3 | "name": "Starter Source", 4 | "language": "Python", 5 | "IsHighlighted": true, 6 | "DisplayOrder": 1, 7 | "tags": { 8 | "Technology": ["Quix Streams"], 9 | "Pipeline Stage": ["Source"], 10 | "Popular Subjects": ["Quick Start"], 11 | "Type": ["Demos"] 12 | }, 13 | "shortDescription": "Publish hard-coded lines of JSON data to a Kafka topic.", 14 | "DefaultFile": "main.py", 15 | "EntryPoint": "dockerfile", 16 | "RunEntryPoint": "main.py", 17 | "Variables": [ 18 | { 19 | "Name": "output", 20 | "Type": "EnvironmentVariable", 21 | "InputType": "OutputTopic", 22 | "Description": "Name of the output topic to write into", 23 | "DefaultValue": "csv-data", 24 | "Required": true 25 | } 26 | ], 27 | "DeploySettings": { 28 | "DeploymentType": "Job", 29 | "CpuMillicores": 100, 30 | "MemoryInMb": 150, 31 | "Replicas": 1, 32 | "PublicAccess": false, 33 | "ValidateConnection": false 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /resources/python/sources/sql_cdc/dockerfile: -------------------------------------------------------------------------------- 1 | FROM quixanalytics/legacy-pythonbaseimage:1.2.3-ubuntu20.04 2 | 3 | WORKDIR /app 4 | 5 | RUN apt-get update && \ 6 | apt-get install python-dev gcc -y && \ 7 | apt-get install lsb-core -y && \ 8 | curl https://packages.microsoft.com/keys/microsoft.asc | apt-key add - && \ 9 | curl https://packages.microsoft.com/config/ubuntu/$(lsb_release -rs)/prod.list > /etc/apt/sources.list.d/mssql-release.list && \ 10 | apt-get update && \ 11 | ACCEPT_EULA=Y apt-get install -y msodbcsql18 && \ 12 | # optional: for bcp and sqlcmd && \ 13 | ACCEPT_EULA=Y apt-get install -y mssql-tools18 && \ 14 | echo 'export PATH="$PATH:/opt/mssql-tools18/bin"' >> ~/.bashrc && \ 15 | . ~/.bashrc && \ 16 | # optional: for unixODBC development headers && \ 17 | apt-get install -y unixodbc-dev 18 | 19 | 20 | COPY . . 21 | RUN find | grep requirements.txt | xargs -I '{}' python3 -m pip install -r '{}' --extra-index-url https://pkgs.dev.azure.com/quix-analytics/53f7fe95-59fe-4307-b479-2473b96de6d1/_packaging/public/pypi/simple/ 22 | ENTRYPOINT python3 main.py -------------------------------------------------------------------------------- /resources/python/destinations/big_query/main.py: -------------------------------------------------------------------------------- 1 | import os 2 | from setup_logger import logger 3 | 4 | from quixstreams import Application 5 | 6 | # for local dev, load env vars from a .env file 7 | from dotenv import load_dotenv 8 | load_dotenv() 9 | 10 | from big_query_sink import BigQuerySink 11 | 12 | TABLE_NAME = os.environ["TABLE_NAME"] 13 | PROJECT_ID = os.environ["PROJECT_ID"] 14 | DATASET_ID = os.environ["DATASET_ID"] 15 | DATASET_LOCATION = os.environ["DATASET_LOCATION"] 16 | SERVICE_ACCOUNT_JSON = os.environ["SERVICE_ACCOUNT_JSON"] 17 | 18 | big_query_sink = BigQuerySink( 19 | PROJECT_ID, 20 | DATASET_ID, 21 | DATASET_LOCATION, 22 | TABLE_NAME, 23 | SERVICE_ACCOUNT_JSON, 24 | logger) 25 | 26 | big_query_sink.connect() 27 | 28 | app = Application( 29 | consumer_group=os.environ["CONSUMER_GROUP"], 30 | auto_offset_reset = "earliest", 31 | commit_interval=1, 32 | commit_every=100) 33 | 34 | input_topic = app.topic(os.environ["input"]) 35 | 36 | sdf = app.dataframe(input_topic) 37 | sdf.sink(big_query_sink) 38 | 39 | if __name__ == "__main__": 40 | app.run(sdf) 41 | -------------------------------------------------------------------------------- /prompts/tasks/source_schema_analysis_retry.md: -------------------------------------------------------------------------------- 1 | I previously analyzed this data from {technology_name} and provided this analysis: 2 | 3 | ```markdown 4 | {previous_analysis} 5 | ``` 6 | 7 | However, the user has provided the following correction/feedback: 8 | "{user_feedback}" 9 | 10 | Please provide an updated and corrected schema analysis based on this feedback. 11 | 12 | Original Sample Data: 13 | {sample_data} 14 | 15 | Please provide a comprehensive analysis including: 16 | 1. **Data Structure**: Describe the overall format and structure 17 | 2. **Field Analysis**: List all fields/columns with their data types 18 | 3. **Data Patterns**: Identify any patterns, ranges, or constraints 19 | 4. **Key Fields**: Identify primary keys, timestamps, or important identifiers 20 | 5. **Data Quality**: Note any missing values, inconsistencies, or issues 21 | 6. **Kafka Message Format**: Suggest how this data should be structured as Kafka messages in JSON format. 22 | 7. **Transformation Needs**: Identify any data transformations that might be needed 23 | 24 | Focus on addressing the user's feedback while providing actionable insights for code generation. -------------------------------------------------------------------------------- /resources/python/sources/http_api_source/library.json: -------------------------------------------------------------------------------- 1 | { 2 | "libraryItemId": "web-api-gateway", 3 | "name": "HTTP API Source", 4 | "language": "Python", 5 | "IsHighlighted": true, 6 | "DisplayOrder": 6, 7 | "tags": { 8 | "Pipeline Stage": ["Source"], 9 | "Popular Subjects": ["Quick Start"], 10 | "Category": ["External APIs"], 11 | "Type": ["Connectors"] 12 | }, 13 | "shortDescription": "Run a Flask web gateway and use it to publish to a Kafka topic via HTTP POST requests.", 14 | "DefaultFile": "main.py", 15 | "EntryPoint": "dockerfile", 16 | "RunEntryPoint": "main.py", 17 | "IconFile": "icon.png", 18 | "Variables": [ 19 | { 20 | "Name": "output", 21 | "Type": "EnvironmentVariable", 22 | "InputType": "OutputTopic", 23 | "Description": "This is the output topic for hello world data", 24 | "DefaultValue": "http-source", 25 | "Required": true 26 | } 27 | ], 28 | "DeploySettings": { 29 | "DeploymentType": "Service", 30 | "CpuMillicores": 200, 31 | "MemoryInMb": 500, 32 | "Replicas": 1, 33 | "PublicAccess": true, 34 | "UrlPrefix": "gateway" 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /workflow_tools/phases/sink/phase_sink_knowledge.py: -------------------------------------------------------------------------------- 1 | # phase_sink_knowledge.py - Sink Knowledge Gathering Phase 2 | 3 | from agents import RunConfig 4 | from workflow_tools.contexts import WorkflowContext 5 | from workflow_tools.services.knowledge_gatherer import KnowledgeGatheringService 6 | from workflow_tools.phases.base.base_phase import BasePhase, PhaseResult 7 | 8 | 9 | class SinkKnowledgePhase(BasePhase): 10 | """Handles sink knowledge gathering and application setup using unified service.""" 11 | 12 | phase_name = "sink_knowledge" 13 | phase_description = "Gather sink knowledge and setup application" 14 | 15 | def __init__(self, context: WorkflowContext, run_config=None, debug_mode: bool = False): 16 | super().__init__(context, debug_mode) 17 | self.run_config = run_config or RunConfig(workflow_name="Create Quix Sink (V2)") 18 | self.knowledge_service = KnowledgeGatheringService(context, self.run_config, debug_mode) 19 | 20 | async def execute(self) -> PhaseResult: 21 | """Gather knowledge about destination technology and set up application.""" 22 | return await self.knowledge_service.gather_knowledge("sink") -------------------------------------------------------------------------------- /resources/python/transformations/hugging_face_model/main.py: -------------------------------------------------------------------------------- 1 | import os 2 | from quixstreams import Application 3 | from transformers import pipeline 4 | import json 5 | 6 | # Load environment variables (useful when working locally) 7 | from dotenv import load_dotenv 8 | load_dotenv() 9 | 10 | app = Application(consumer_group="hugging-face-model-v1", auto_offset_reset="earliest") 11 | 12 | input_topic = app.topic(os.environ["input"]) 13 | output_topic = app.topic(os.environ["output"]) 14 | 15 | # Download the Hugging Face model (list of available models here: https://huggingface.co/models) 16 | # suggested default is distilbert-base-uncased-finetuned-sst-2-english 17 | model_name = os.environ["HuggingFaceModel"] 18 | print("Downloading {0} model...".format(model_name)) 19 | model_pipeline = pipeline(model=model_name) 20 | 21 | sdf = app.dataframe(input_topic) 22 | 23 | # Assuming the input data has a 'text' column that you want to process with the model 24 | sdf['model_result'] = sdf['text'].apply(lambda t: json.dumps(model_pipeline(t))) 25 | 26 | # Send the processed data to the output topic 27 | sdf = sdf.to_topic(output_topic) 28 | 29 | if __name__ == "__main__": 30 | app.run(sdf) -------------------------------------------------------------------------------- /resources/python/others/jupyterlab/library.json: -------------------------------------------------------------------------------- 1 | { 2 | "libraryItemId": "jupyterlab", 3 | "name": "JupyterLab", 4 | "language": "docker", 5 | "tags": { 6 | "Category": ["Data Exploration"], 7 | "Type": ["Auxiliary Services"] 8 | }, 9 | "shortDescription": "Run JupyterLab in your pipeline.", 10 | "DefaultFile": "dockerfile", 11 | "EntryPoint": "dockerfile", 12 | "IconFile": "icon.png", 13 | "DeploySettings": { 14 | "DeploymentType": "Service", 15 | "CpuMillicores": 500, 16 | "MemoryInMb": 2000, 17 | "Replicas": 1, 18 | "PublicAccess": true, 19 | "UrlPrefix": "jupyterlab", 20 | "State": { 21 | "Enabled": true, 22 | "Size": 1 23 | }, 24 | "Network": { 25 | "ServiceName": "jupyterlab", 26 | "Ports": 27 | [ 28 | { 29 | "Port": 80, 30 | "TargetPort": 8888 31 | } 32 | ] 33 | } 34 | }, 35 | "Variables": [ 36 | { 37 | "Name": "JUPYTER_PASSWORD", 38 | "Type": "EnvironmentVariable", 39 | "InputType": "Secret", 40 | "Description": "The allowed password for connecting to JupyterLab", 41 | "Required": true 42 | } 43 | ] 44 | } 45 | -------------------------------------------------------------------------------- /resources/python/transformations/starter_transformation/library.json: -------------------------------------------------------------------------------- 1 | { 2 | "libraryItemId": "starter-transformation", 3 | "name": "Starter Transformation", 4 | "language": "Python", 5 | "IsHighlighted": true, 6 | "DisplayOrder": 2, 7 | "tags": { 8 | "Complexity": ["Easy"], 9 | "Technology": ["Quix Streams"], 10 | "Pipeline Stage": ["Transformation"], 11 | "Popular Subjects": ["Quick Start"], 12 | "Type": ["Basic templates"] 13 | }, 14 | "shortDescription": "Consume data from a topic, apply a simple transformation and publish the result to an output topic.", 15 | "DefaultFile": "main.py", 16 | "EntryPoint": "dockerfile", 17 | "RunEntryPoint": "main.py", 18 | "Variables": [ 19 | { 20 | "Name": "input", 21 | "Type": "EnvironmentVariable", 22 | "InputType": "InputTopic", 23 | "Description": "Name of the input topic to listen to.", 24 | "DefaultValue": "csv-data" 25 | }, 26 | { 27 | "Name": "output", 28 | "Type": "EnvironmentVariable", 29 | "InputType": "OutputTopic", 30 | "Description": "Name of the output topic to write to.", 31 | "DefaultValue": "transform" 32 | } 33 | ] 34 | } 35 | -------------------------------------------------------------------------------- /workflow_tools/phases/source/phase_source_knowledge.py: -------------------------------------------------------------------------------- 1 | # phase_source_knowledge.py - Source Knowledge Gathering Phase 2 | 3 | from agents import RunConfig 4 | from workflow_tools.contexts import WorkflowContext 5 | from workflow_tools.services.knowledge_gatherer import KnowledgeGatheringService 6 | from workflow_tools.phases.base.base_phase import BasePhase, PhaseResult 7 | 8 | 9 | class SourceKnowledgePhase(BasePhase): 10 | """Handles source knowledge gathering and application setup using unified service.""" 11 | 12 | phase_name = "source_knowledge" 13 | phase_description = "Gather source knowledge and setup application" 14 | 15 | def __init__(self, context: WorkflowContext, run_config=None, debug_mode: bool = False): 16 | super().__init__(context, debug_mode) 17 | self.run_config = run_config or RunConfig(workflow_name="Create Quix Source (V1)") 18 | self.knowledge_service = KnowledgeGatheringService(context, self.run_config, debug_mode) 19 | 20 | async def execute(self) -> PhaseResult: 21 | """Gather knowledge about source technology and set up application.""" 22 | return await self.knowledge_service.gather_knowledge("source") -------------------------------------------------------------------------------- /resources/python/destinations/websocket/test.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import websockets 3 | import base64 4 | 5 | async def connect(): 6 | # NOTE: If using the URL from the websocket server deployed in Quix Cloud, replace the https:// protocol with wss:// 7 | uri = "YOUR WEBSOCKET URL" 8 | username = "admin" # set to your username 9 | password = "admin" # set to your password 10 | 11 | # Encode credentials 12 | credentials = f"{username}:{password}" 13 | encoded_credentials = base64.b64encode(credentials.encode('utf-8')).decode('utf-8') 14 | 15 | # Define headers 16 | headers = { 17 | "Authorization": f"Basic {encoded_credentials}" 18 | } 19 | 20 | try: 21 | async with websockets.connect(uri, extra_headers=headers) as websocket: 22 | print("Connected to the WebSocket server") 23 | 24 | while True: 25 | response = await websocket.recv() 26 | print(f"Received from server: {response}") 27 | 28 | except Exception as e: 29 | print(f"An error occurred while connecting: {e}") 30 | 31 | # Run the connect function 32 | asyncio.get_event_loop().run_until_complete(connect()) -------------------------------------------------------------------------------- /resources/python/sources/http_api_sample/README.md: -------------------------------------------------------------------------------- 1 | # HTTP API Source 2 | 3 | [This code sample](https://github.com/quixio/quix-samples/tree/main/python/sources/http_api_gateway) demonstrates how to run a Flask HTTP API within Quix. 4 | 5 | ## How to run 6 | 7 | Create a [Quix](https://portal.platform.quix.io/signup?xlink=github) account or log-in and visit the Samples to use this project 8 | 9 | Clicking `Edit code` on the Sample, forks the project to your own Git repo so you can customize it before deploying. 10 | 11 | You can test your endpoint by sending a message via curl: 12 | `curl -X POST -H "Content-Type: application/json" -d '{"sessionId": "000001", "name": "Tony Hawk", "purchase": "skateboard" }' https:///data/ 13 | ` 14 | 15 | ## Contribute 16 | 17 | Submit forked projects to the Quix [GitHub](https://github.com/quixio/quix-samples) repo. Any new project that we accept will be attributed to you and you'll receive $200 in Quix credit. 18 | 19 | ## Open source 20 | 21 | This project is open source under the Apache 2.0 license and available in our [GitHub](https://github.com/quixio/quix-samples) repo. 22 | 23 | Please star us and mention us on social to show your appreciation. 24 | -------------------------------------------------------------------------------- /resources/python/destinations/clickhouse-sink-claude-coded/app.yaml: -------------------------------------------------------------------------------- 1 | name: clickhouse-wikipedia-sink 2 | language: python 3 | variables: 4 | - name: input 5 | inputType: InputTopic 6 | description: Name of the input topic to listen to. 7 | defaultValue: wikipedia-data 8 | - name: CLICKHOUSE_HOST 9 | inputType: FreeText 10 | description: ClickHouse server hostname 11 | defaultValue: quix-clickhouse 12 | - name: CLICKHOUSE_PORT 13 | inputType: FreeText 14 | description: ClickHouse server port 15 | defaultValue: '8123' 16 | - name: CLICKHOUSE_DATABASE 17 | inputType: FreeText 18 | description: ClickHouse database name 19 | defaultValue: default 20 | - name: CLICKHOUSE_USERNAME 21 | inputType: FreeText 22 | description: ClickHouse username 23 | defaultValue: clickadmin 24 | - name: CLICKHOUSE_PASSWORD 25 | inputType: Secret 26 | description: ClickHouse password 27 | defaultValue: CLICKHOUSE_PW_KEY 28 | - name: CLICKHOUSE_TABLE 29 | inputType: FreeText 30 | description: ClickHouse table name for Wikipedia page edit data 31 | defaultValue: en_wikipedia_pageedits 32 | dockerfile: dockerfile 33 | runEntryPoint: main.py 34 | defaultFile: main.py 35 | libraryItemId: starter-destination 36 | -------------------------------------------------------------------------------- /resources/python/transformations/event_detection/library.json: -------------------------------------------------------------------------------- 1 | { 2 | "libraryItemId": "event-detection-transformation", 3 | "name": "Event Detection Transformation", 4 | "language": "Python", 5 | "IsHighlighted": true, 6 | "DisplayOrder": 5, 7 | "tags": { 8 | "Complexity": ["Easy"], 9 | "Technology": ["Quix Streams"], 10 | "Pipeline Stage": ["Transformation"], 11 | "Type": ["Code samples"], 12 | "Popular Subjects": ["Quick Start"] 13 | }, 14 | "shortDescription": "Consume data from a topic, filter it and publish the result to an output topic.", 15 | "DefaultFile": "quix_function.py", 16 | "EntryPoint": "dockerfile", 17 | "RunEntryPoint": "main.py", 18 | "Variables": [ 19 | { 20 | "Name": "input", 21 | "Type": "EnvironmentVariable", 22 | "InputType": "InputTopic", 23 | "Description": "This is the input topic for f1 data", 24 | "DefaultValue": "f1-data", 25 | "Required": true 26 | }, 27 | { 28 | "Name": "output", 29 | "Type": "EnvironmentVariable", 30 | "InputType": "OutputTopic", 31 | "Description": "This is the output topic for hard braking events", 32 | "DefaultValue": "hard-braking", 33 | "Required": true 34 | } 35 | ] 36 | } 37 | -------------------------------------------------------------------------------- /resources/python/others/jupyterlab/README.md: -------------------------------------------------------------------------------- 1 | # JupyterLab 2 | 3 | This sample demonstrates how to deploy a JupyterLab instance so you can run 4 | Jupyter notebooks from the platform. 5 | 6 | ## How to Run 7 | 8 | 1. Log in or sign up at [Quix](https://portal.platform.quix.io/signup?xlink=github) and navigate to the Code Samples section. 9 | 2. Click **Deploy** to launch a pre-built container. 10 | 3. Fill in the required environment variables for your JupyterLab instance. 11 | 4. Enable state, otherwise changes will be lost on restart. Please note, the necessary storage type may not be supported on all Quix Platforms. 12 | 13 | For more configuration options and details, refer to [Mongo Docker Hub](https://hub.docker.com/_/mongo). 14 | 15 | 16 | ## Contribute 17 | 18 | Feel free to fork this project on the [GitHub](https://github.com/quixio/quix-samples) repository and contribute your enhancements. Any accepted contributions will be attributed accordingly. 19 | 20 | ## License & Support 21 | 22 | This project is open source under the Apache 2.0 license and available in our [GitHub](https://github.com/quixio/quix-samples) repo. Remember, this image is provided by the [docker community](https://github.com/docker-library/mongo) and is offered as-is, with no MongoDB specific support from Quix. 23 | -------------------------------------------------------------------------------- /resources/python/destinations/slack_notifications/library.json: -------------------------------------------------------------------------------- 1 | { 2 | "libraryItemId": "slack-destination", 3 | "name": "Slack Sink", 4 | "language": "Python", 5 | "tags": { 6 | "Pipeline Stage": ["Destination"], 7 | "Type": ["Connectors"], 8 | "Category": ["Alerting", "External APIs"] 9 | }, 10 | "shortDescription": "Consume data from a Kafka topic and send Slack notifications based on your matching criteria.", 11 | "DefaultFile": "quix_function.py", 12 | "EntryPoint": "dockerfile", 13 | "RunEntryPoint": "main.py", 14 | "IconFile": "icon.png", 15 | "Variables": [ 16 | { 17 | "Name": "input", 18 | "Type": "EnvironmentVariable", 19 | "InputType": "InputTopic", 20 | "Description": "Name of the input topic to listen to.", 21 | "DefaultValue": "hard-braking", 22 | "Required": true 23 | }, 24 | { 25 | "Name": "webhook_url", 26 | "Type": "EnvironmentVariable", 27 | "InputType": "FreeText", 28 | "Description": "The webhook url to send notifications to", 29 | "Required": true 30 | } 31 | ], 32 | "DeploySettings": { 33 | "DeploymentType": "Service", 34 | "CpuMillicores": 200, 35 | "MemoryInMb": 200, 36 | "Replicas": 1, 37 | "PublicAccess": false, 38 | "ValidateConnection": false 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /resources/python/sources/demo_data/library.json: -------------------------------------------------------------------------------- 1 | { 2 | "libraryItemId": "demo-data-source", 3 | "name": "Demo Data Source", 4 | "language": "Python", 5 | "IsHighlighted": true, 6 | "DisplayOrder": 4, 7 | "tags": { 8 | "Technology": ["Quix Streams"], 9 | "Pipeline Stage": ["Source"], 10 | "Popular Subjects": ["Quick Start"], 11 | "Type": ["Demos"] 12 | }, 13 | "shortDescription": "Publish F1 telemetry data from a CSV file to a Kafka topic.", 14 | "longDescription": "Publish F1 Telemetry data, recorded from an F1 game session, into a topic.
You'll have access to the speed, acceleration, break usage and other detailed data from an F1 game, rebroadcast as if in real time.", 15 | "DefaultFile": "main.py", 16 | "EntryPoint": "dockerfile", 17 | "RunEntryPoint": "main.py", 18 | "Variables": [ 19 | { 20 | "Name": "output", 21 | "Type": "EnvironmentVariable", 22 | "InputType": "OutputTopic", 23 | "Description": "Name of the output topic to write into", 24 | "DefaultValue": "f1-data", 25 | "Required": true 26 | } 27 | ], 28 | "DeploySettings": { 29 | "DeploymentType": "Job", 30 | "CpuMillicores": 200, 31 | "MemoryInMb": 200, 32 | "Replicas": 1, 33 | "PublicAccess": false, 34 | "ValidateConnection": false 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /prompts/tasks/log_analysis_task.md: -------------------------------------------------------------------------------- 1 | Analyze these execution logs to determine if the test was successful. 2 | 3 | TEST OBJECTIVE: 4 | {test_objective} 5 | 6 | WORKFLOW TYPE: {workflow_type} 7 | 8 | {original_instructions_section}{code_section}EXECUTION LOGS: 9 | {logs} 10 | 11 | Please analyze these logs and determine: 12 | 1. Was the test successful? (YES/NO) 13 | 2. What is your confidence level? (HIGH/MEDIUM/LOW) 14 | 3. What key indicators led to your conclusion? 15 | 4. Provide a brief reasoning for your determination 16 | 5. If unsuccessful, what recommendation do you have? 17 | 18 | IMPORTANT: You must provide a structured JSON response with the following format: 19 | {{ 20 | "success": true/false, 21 | "confidence": "high/medium/low", 22 | "reasoning": "Brief explanation of your determination", 23 | "key_indicators": ["indicator1", "indicator2", ...], 24 | "recommendation": "Optional recommendation if test failed" 25 | }} 26 | 27 | Focus on understanding the actual behavior, not just looking for error keywords. For example: 28 | - If data was successfully retrieved/processed, it's likely successful even without explicit success messages 29 | - If the code achieved its objective (e.g., fetched data, connected to service), consider it successful 30 | - Look for patterns indicating normal operation vs actual failures -------------------------------------------------------------------------------- /resources/python/sources/segment_webhook/library.json: -------------------------------------------------------------------------------- 1 | { 2 | "libraryItemId": "segment-webhook-source", 3 | "name": "Segment Source", 4 | "language": "Python", 5 | "tags": { 6 | "Pipeline Stage": ["Source"], 7 | "Type": ["Connectors"], 8 | "Category": ["External APIs"] 9 | }, 10 | "shortDescription": "Read event data from Segment and publish it to a Kafka topic.", 11 | "DefaultFile": "main.py", 12 | "EntryPoint": "dockerfile", 13 | "RunEntryPoint": "main.py", 14 | "IconFile": "icon.jpg", 15 | "Variables": [ 16 | { 17 | "Name": "output", 18 | "Type": "EnvironmentVariable", 19 | "InputType": "OutputTopic", 20 | "Description": "Name of the output topic to stream into", 21 | "DefaultValue": "segment-data", 22 | "Required": true 23 | }, 24 | { 25 | "Name": "shared_secret", 26 | "Type": "EnvironmentVariable", 27 | "InputType": "FreeText", 28 | "Description": "The secret you configured in Segment", 29 | "DefaultValue": "", 30 | "Required": true 31 | } 32 | ], 33 | "DeploySettings": { 34 | "DeploymentType": "Service", 35 | "CpuMillicores": 200, 36 | "MemoryInMb": 200, 37 | "Replicas": 1, 38 | "ValidateConnection": true, 39 | "PublicAccess": true, 40 | "UrlPrefix": "segment-webhook" 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /prompts/agents/SinkSchemaAnalyzerAgent.md: -------------------------------------------------------------------------------- 1 | 2 | You are an expert data analyst specializing in Kafka message schema analysis. 3 | 4 | 5 | 6 | Analyze a sample of messages from a Kafka topic and describe the data structure in a clear, human-readable markdown format. 7 | 8 | 9 | 10 | Your description should include: 11 | - An overview of the general structure 12 | - A breakdown of each field, its likely data type (e.g., string, integer, float, boolean, ISO 8601 timestamp), and a brief description 13 | - Notes on any fields that appear to be optional or have inconsistent values 14 | 15 | 16 | 17 | IMPORTANT: Pay special attention to where the actual data payload is located. Sometimes it's directly in the message, sometimes it's nested in a 'value' field, and sometimes the 'value' field contains a JSON string that needs parsing. 18 | Be very explicit about the message structure and how to access the actual data fields. 19 | 20 | 21 | 22 | To be extra safe: Include a sample of one message in the schema analysis with the prefix: HERE IS A MESSAGE EXAMPLE: 23 | 24 | 25 | 26 | Your output will be saved as documentation for the next step, so make it clear and well-structured. 27 | -------------------------------------------------------------------------------- /resources/python/transformations/matlab_runner/README.md: -------------------------------------------------------------------------------- 1 | # Matlab Runner 2 | 3 | [This code sample](https://github.com/quixio/quix-samples/tree/main/python/transformations/matlab_runner) demonstrates how to import the Matlab engine and run a Matlab process from Quix. 4 | 5 | ## How to run 6 | 7 | Create a [Quix](https://portal.platform.quix.io/signup?xlink=github) account or log-in and visit the Samples to use this project. 8 | 9 | Clicking `Edit code` on the Sample, forks the project to your own Git repo so you can customize it before deploying. 10 | 11 | `important`: Update the license server on line 13 of the docker file to reflect your Matlab license server. 12 | 13 | `ENV MLM_LICENSE_FILE=27000@your-license-server` 14 | 15 | 16 | ## Environment variables 17 | 18 | The code sample uses the following environment variables: 19 | 20 | - **input**: Name of the input topic to listen to. 21 | - **output**: Name of the output topic to write to. 22 | 23 | ## Contribute 24 | 25 | Submit forked projects to the Quix [GitHub](https://github.com/quixio/quix-samples) repo. Any new project that we accept will be attributed to you and you'll receive $200 in Quix credit. 26 | 27 | ## Open source 28 | 29 | This project is open source under the Apache 2.0 license and available in our [GitHub](https://github.com/quixio/quix-samples) repo. 30 | 31 | Please star us and mention us on social to show your appreciation. -------------------------------------------------------------------------------- /workflow_tools/phases/source/phase_source_prerequisites.py: -------------------------------------------------------------------------------- 1 | # phase_source_prerequisites.py - Source Prerequisites Collection Phase 2 | 3 | from workflow_tools.common import WorkflowContext 4 | from workflow_tools.phases.base.base_phase import BasePhase, PhaseResult 5 | from workflow_tools.services.prerequisites_collector import PrerequisitesCollector 6 | 7 | 8 | class SourcePrerequisitesCollectionPhase(BasePhase): 9 | """Handles workspace, topic selection, and source technology selection for source workflows.""" 10 | 11 | phase_name = "source_prerequisites" 12 | phase_description = "Collect source prerequisites" 13 | 14 | def __init__(self, context: WorkflowContext, run_config=None, debug_mode: bool = False): 15 | super().__init__(context, debug_mode) 16 | self.run_config = run_config 17 | self.prerequisites_collector = PrerequisitesCollector(context, debug_mode, run_config) 18 | 19 | async def execute(self) -> PhaseResult: 20 | """Execute the source prerequisites collection using centralized service.""" 21 | success = await self.prerequisites_collector.collect_prerequisites("source") 22 | 23 | if success: 24 | return PhaseResult(success=True, message="Source prerequisites collected successfully") 25 | else: 26 | return PhaseResult(success=False, message="Failed to collect source prerequisites") -------------------------------------------------------------------------------- /resources/python/sources/opc_ua_client/main.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import json 4 | 5 | from quixstreams import Application 6 | from opc_ua_source import OpcUaSource 7 | 8 | OPC_URL = os.environ["OPC_SERVER_URL"] 9 | OPC_NAMESPACE = os.environ["OPC_NAMESPACE"] 10 | LOGLEVEL = os.getenv("LOGLEVEL", "INFO") 11 | TOPIC_NAME = os.environ["output"] 12 | 13 | params_to_process = os.getenv("PARAMETER_NAMES_TO_PROCESS", '') 14 | params_to_process = params_to_process.replace("'", "\"") 15 | PARAMETER_NAMES_TO_PROCESS = json.loads(params_to_process) 16 | 17 | logging.getLogger("asyncua.common.subscription").setLevel(logging.WARNING) 18 | logging.getLogger("asyncua.client.ua_client.UaClient").setLevel(logging.WARNING) 19 | logging.basicConfig(level=logging.INFO) 20 | 21 | 22 | # Create an Application 23 | app = Application( 24 | consumer_group="data_source", 25 | auto_create_topics=True, 26 | loglevel=LOGLEVEL, 27 | ) 28 | 29 | opc_ua_source = OpcUaSource("opc_ua_source", OPC_URL, OPC_NAMESPACE, PARAMETER_NAMES_TO_PROCESS) 30 | 31 | # define the topic using the "output" environment variable 32 | topic = app.topic(TOPIC_NAME) 33 | 34 | app.add_source(opc_ua_source, topic) 35 | 36 | 37 | if __name__ == "__main__": 38 | try: 39 | # logging.basicConfig(level=logging.INFO) 40 | app.run() 41 | except KeyboardInterrupt: 42 | print("\nProgram interrupted by user. Exiting gracefully.") -------------------------------------------------------------------------------- /resources/python/transformations/event_detection/main.py: -------------------------------------------------------------------------------- 1 | import os 2 | from quixstreams import Application 3 | from datetime import datetime 4 | 5 | # for local dev, load env vars from a .env file 6 | from dotenv import load_dotenv 7 | load_dotenv() 8 | 9 | app = Application(consumer_group="hard-braking-v1", auto_offset_reset="earliest", use_changelog_topics=False) 10 | 11 | input_topic = app.topic(os.environ["input"]) 12 | output_topic = app.topic(os.environ["output"]) 13 | 14 | sdf = app.dataframe(input_topic) 15 | 16 | # Filter items out without brake value. 17 | sdf = sdf[sdf.contains("Brake")] 18 | 19 | # Calculate hopping window of 1s with 200ms steps. 20 | sdf = sdf.apply(lambda row: float(row["Brake"])) \ 21 | .hopping_window(1000, 200).mean().final() 22 | 23 | sdf.print() 24 | 25 | # Filter only windows where average brake force exceeded 50%. 26 | sdf = sdf[sdf["value"] > 0.5] 27 | 28 | # Create nice JSON alert message. 29 | sdf = sdf.apply(lambda row: { 30 | "Timestamp": str(datetime.fromtimestamp(row["start"]/1000)), 31 | "Alert": { 32 | "Title": "Hard braking detected.", 33 | "Message": "For last 1 second, average braking power was " + str(row["value"]) 34 | } 35 | }) 36 | 37 | # Print JSON messages in console. 38 | sdf.print() 39 | 40 | # Send the message to the output topic 41 | sdf.to_topic(output_topic) 42 | 43 | if __name__ == "__main__": 44 | app.run() -------------------------------------------------------------------------------- /workflow_tools/phases/sink/phase_sink_prerequisites.py: -------------------------------------------------------------------------------- 1 | # phase_sink_prerequisites.py - Sink Prerequisites Collection Phase 2 | 3 | from workflow_tools.contexts import WorkflowContext 4 | from workflow_tools.phases.base.base_phase import BasePhase, PhaseResult 5 | from workflow_tools.services.prerequisites_collector import PrerequisitesCollector 6 | 7 | 8 | class SinkPrerequisitesCollectionPhase(BasePhase): 9 | """Handles workspace, source topic selection, and destination technology selection for sink workflows.""" 10 | 11 | phase_name = "sink_prerequisites" 12 | phase_description = "Collect workspace, topic, and destination technology for sink workflow" 13 | 14 | def __init__(self, context: WorkflowContext, run_config=None, debug_mode: bool = False): 15 | super().__init__(context, debug_mode) 16 | self.run_config = run_config 17 | self.prerequisites_collector = PrerequisitesCollector(context, debug_mode, run_config) 18 | 19 | async def execute(self) -> PhaseResult: 20 | """Execute the sink prerequisites collection using centralized service.""" 21 | success = await self.prerequisites_collector.collect_prerequisites("sink") 22 | 23 | if success: 24 | return PhaseResult(success=True, message="Sink prerequisites collected successfully") 25 | else: 26 | return PhaseResult(success=False, message="Failed to collect sink prerequisites") -------------------------------------------------------------------------------- /resources/python/sources/http_api_source/README.md: -------------------------------------------------------------------------------- 1 | # HTTP API Source 2 | 3 | [This code sample](https://github.com/quixio/quix-samples/tree/main/python/sources/http_source) demonstrates how to run a Flask HTTP API as a web gateway and use it to publish to a Kafka topic via HTTP POST requests. 4 | 5 | ## How to run 6 | 7 | Create a [Quix](https://portal.platform.quix.io/signup?xlink=github) account or log-in and visit the Samples to use this project 8 | 9 | Clicking `Edit code` on the Sample, forks the project to your own Git repo so you can customize it before deploying. 10 | 11 | You can test your endpoint by sending a message via curl: 12 | `curl -X POST -H "Content-Type: application/json" -d '{"sessionId": "000001", "name": "Tony Hawk", "purchase": "skateboard" }' https:///data/ 13 | ` 14 | 15 | ## Environment variables 16 | 17 | The code sample uses the following environment variables: 18 | 19 | - **output**: This is the output topic for hello world data. 20 | 21 | ## Contribute 22 | 23 | Submit forked projects to the Quix [GitHub](https://github.com/quixio/quix-samples) repo. Any new project that we accept will be attributed to you and you'll receive $200 in Quix credit. 24 | 25 | ## Open source 26 | 27 | This project is open source under the Apache 2.0 license and available in our [GitHub](https://github.com/quixio/quix-samples) repo. 28 | 29 | Please star us and mention us on social to show your appreciation. 30 | 31 | 32 | 33 | image: Flaticon.com -------------------------------------------------------------------------------- /resources/python/transformations/event_detection/README.md: -------------------------------------------------------------------------------- 1 | # Event Detection 2 | 3 | [This code sample](https://github.com/quixio/quix-samples/tree/main/python/transformations/Event-Detection) demonstrates how to consume data from a Kafka topic, filter the data based on some criteria and publish the result to an output topic. 4 | 5 | It shows how to make real-time decisions on your data by looking at the `Brake` value and if a condition is met will output JSON message to the output topic. 6 | 7 | You can adapt this code to suit your needs. 8 | 9 | ## How to run 10 | 11 | Create a [Quix](https://portal.platform.quix.io/signup?xlink=github) account or log-in and visit the Samples to use this project. 12 | 13 | Clicking `Edit code` on the Sample, forks the project to your own Git repo so you can customize it before deploying. 14 | 15 | ## Environment variables 16 | 17 | The code sample uses the following environment variables: 18 | 19 | - **input**: This is the input topic for f1 data. 20 | - **output**: This is the output topic for hard braking events. 21 | 22 | ## Contribute 23 | 24 | Submit forked projects to the Quix [GitHub](https://github.com/quixio/quix-samples) repo. Any new project that we accept will be attributed to you and you'll receive $200 in Quix credit. 25 | 26 | ## Open source 27 | 28 | This project is open source under the Apache 2.0 license and available in our [GitHub](https://github.com/quixio/quix-samples) repo. 29 | 30 | Please star us and mention us on social to show your appreciation. 31 | -------------------------------------------------------------------------------- /resources/python/destinations/redis_dest/main.py: -------------------------------------------------------------------------------- 1 | from quixstreams import Application 2 | 3 | import os 4 | import json 5 | import redis 6 | 7 | 8 | # for local dev, load env vars from a .env file 9 | from dotenv import load_dotenv 10 | load_dotenv() 11 | 12 | r = redis.Redis( 13 | host=os.environ['redis_host'], 14 | port=int(int(os.environ['redis_port'])), 15 | password=os.environ['redis_password'], 16 | username=os.environ['redis_username'] if 'redis_username' in os.environ else None, 17 | decode_responses=True) 18 | 19 | redis_key_prefix = os.environ['redis_key_prefix'] 20 | 21 | app = Application(consumer_group="redis-destination") 22 | 23 | input_topic = app.topic(os.environ["input"]) 24 | 25 | 26 | def send_data_to_redis(value: dict) -> None: 27 | print(value) 28 | 29 | # Convert the entire dictionary to a JSON string 30 | json_data = json.dumps(value) 31 | 32 | # Use a Redis key for storing the JSON data. This key can be a combination of 33 | # some unique identifier in your value dict, like a timestamp or a specific tag. 34 | # For this example, let's assume you have a unique 'id' in your value dict. 35 | key = f"{redis_key_prefix}:{value['key']}" 36 | 37 | # Store the JSON string in Redis 38 | r.set(key, json_data) 39 | 40 | print(f"Data stored in Redis under key: {key}") 41 | 42 | 43 | sdf = app.dataframe(input_topic) 44 | sdf = sdf.update(send_data_to_redis) 45 | 46 | if __name__ == "__main__": 47 | print("Starting application") 48 | app.run(sdf) 49 | -------------------------------------------------------------------------------- /resources/python/destinations/influxdb_3/main.py: -------------------------------------------------------------------------------- 1 | # import Utility modules 2 | import os 3 | 4 | # import vendor-specific modules 5 | from quixstreams import Application 6 | from quixstreams.sinks.core.influxdb3 import InfluxDB3Sink 7 | 8 | # for local dev, load env vars from a .env file 9 | from dotenv import load_dotenv 10 | load_dotenv() 11 | 12 | 13 | tag_keys = keys.split(",") if (keys := os.environ.get("INFLUXDB_TAG_KEYS")) else [] 14 | field_keys = keys.split(",") if (keys := os.environ.get("INFLUXDB_FIELD_KEYS")) else [] 15 | measurement_name = os.environ.get("INFLUXDB_MEASUREMENT_NAME", "measurement1") 16 | time_setter = col if (col := os.environ.get("TIMESTAMP_COLUMN")) else None 17 | 18 | influxdb_v3_sink = InfluxDB3Sink( 19 | token=os.environ["INFLUXDB_TOKEN"], 20 | host=os.environ["INFLUXDB_HOST"], 21 | organization_id=os.environ["INFLUXDB_ORG"], 22 | tags_keys=tag_keys, 23 | fields_keys=field_keys, 24 | time_setter=time_setter, 25 | database=os.environ["INFLUXDB_DATABASE"], 26 | measurement=measurement_name, 27 | ) 28 | 29 | 30 | app = Application( 31 | consumer_group=os.environ.get("CONSUMER_GROUP_NAME", "influxdb-data-writer"), 32 | auto_offset_reset="earliest", 33 | commit_every=int(os.environ.get("BUFFER_SIZE", "1000")), 34 | commit_interval=float(os.environ.get("BUFFER_DELAY", "1")), 35 | ) 36 | input_topic = app.topic(os.environ["input"]) 37 | 38 | sdf = app.dataframe(input_topic) 39 | sdf.sink(influxdb_v3_sink) 40 | 41 | 42 | if __name__ == "__main__": 43 | app.run() 44 | -------------------------------------------------------------------------------- /resources/python/destinations/websocket/library.json: -------------------------------------------------------------------------------- 1 | { 2 | "libraryItemId": "websocket-destination", 3 | "name": "Websocket Destination", 4 | "language": "Python", 5 | "tags": { 6 | "Pipeline Stage": ["Destination"], 7 | "Type": ["Connectors"], 8 | "Category": ["External APIs"] 9 | }, 10 | "shortDescription": "Send data from Kafka to a client connected to this websocket server", 11 | "DefaultFile": "main.py", 12 | "EntryPoint": "dockerfile", 13 | "RunEntryPoint": "main.py", 14 | "IconFile": "icon.png", 15 | "Variables": [{ 16 | "Name": "input", 17 | "Type": "EnvironmentVariable", 18 | "InputType": "InputTopic", 19 | "Description": "Name of the input topic to listen to.", 20 | "Required": true 21 | }, { 22 | "Name": "WS_USERNAME", 23 | "Type": "EnvironmentVariable", 24 | "InputType": "Secret", 25 | "Description": "Websocket username", 26 | "DefaultValue": "websocket_username", 27 | "Required": true 28 | }, { 29 | "Name": "WS_PASSWORD", 30 | "Type": "EnvironmentVariable", 31 | "InputType": "Secret", 32 | "Description": "Websocket password", 33 | "DefaultValue": "websocket_password", 34 | "Required": true 35 | }], 36 | "DeploySettings": { 37 | "DeploymentType": "Service", 38 | "CpuMillicores": 200, 39 | "MemoryInMb": 200, 40 | "Replicas": 1, 41 | "PublicAccess": true, 42 | "UrlPrefix": "websocket", 43 | "ValidateConnection": false 44 | } 45 | } -------------------------------------------------------------------------------- /resources/python/destinations/slack_notifications/main.py: -------------------------------------------------------------------------------- 1 | from quixstreams import Application 2 | import os 3 | import requests 4 | 5 | # for local dev, load env vars from a .env file 6 | from dotenv import load_dotenv 7 | load_dotenv() 8 | 9 | # Quix injects credentials automatically to the client. 10 | # Alternatively, you can always pass an SDK token manually as an argument. 11 | app = Application(consumer_group="slack-notifications") 12 | 13 | print("Opening input and output topics") 14 | input_topic = app.topic(os.getenv("input", "")) 15 | 16 | webhook_url = os.getenv("webhook_url", "") 17 | 18 | if input_topic is None: 19 | raise ValueError("input topic is required") 20 | 21 | if webhook_url is None: 22 | raise ValueError("webhook url is required") 23 | 24 | # create the streaming dataframe 25 | sdf = app.dataframe(input_topic) 26 | 27 | # filter out inbound data without the 'message' column 28 | sdf = sdf[sdf.contains("message")] 29 | 30 | # this code assumes the data contains a 'messages' column 31 | # which contains the message to be sent to slack 32 | def send_to_slack(data): 33 | # transmit your message to slack immediately 34 | slack_message = {"text": str(data["message"])} 35 | requests.post(webhook_url, json = slack_message) 36 | # if you want to batch the messages, implement a rolling window using the 37 | # QuixStreams client library https://quix.io/docs/quix-streams/windowing.html 38 | 39 | # apply a function to the incoming data 40 | sdf = sdf.apply(send_to_slack) 41 | 42 | if __name__ == "__main__": 43 | print("Starting application") 44 | app.run(sdf) 45 | -------------------------------------------------------------------------------- /resources/python/sources/opc_ua_client/README.md: -------------------------------------------------------------------------------- 1 | # OPC UA Client 2 | 3 | [This connector](https://github.com/quixio/quix-samples/tree/main/python/sources/opc_ua_client) allows you to connect to your OPC UA server to capture and handle your data in Quix. 4 | 5 | ## How to run 6 | 7 | Create a [Quix](https://portal.platform.quix.io/signup?xlink=github) account or log-in and visit the `Connectors` tab to use this connector. 8 | 9 | Locate and click the connector tile, fill in the required parameters, then click `Test connection & deploy` to deploy the connector to your Quix instance. 10 | 11 | Clicking `Customise` allows you to view or save the code to the repo that backs your Quix cloud instance. 12 | 13 | ## Environment Variables 14 | 15 | The connector uses the following environment variables: 16 | 17 | - **output**: Name of the output topic to publish to. 18 | - **OPC_SERVER_URL**: The URL to your OPC UA server. 19 | - **OPC_NAMESPACE**: The namespace of the data coming from your OPC UA server. 20 | - **PARAMETER_NAMES_TO_PROCESS**: List of parameters from your OPC UA server that you want to process. e.g. ['a', 'b', 'c']. NB:Use single quotes. 21 | 22 | 23 | ## Contribute 24 | 25 | Submit forked projects to the Quix [GitHub](https://github.com/quixio/quix-samples) repo. Any new project that we accept will be attributed to you and you'll receive $200 in Quix credit. 26 | 27 | ## Open source 28 | 29 | This project is open source under the Apache 2.0 license and available in our [GitHub](https://github.com/quixio/quix-samples) repo. 30 | 31 | Please star us and mention us on social to show your appreciation. 32 | -------------------------------------------------------------------------------- /resources/python/destinations/starter_destination/README.md: -------------------------------------------------------------------------------- 1 | # Starter transformation 2 | 3 | [This code sample](https://github.com/quixio/quix-samples/tree/main/python/destinations/starter_destination) demonstrates how use the Quix Streams Sink framework to 4 | consume and alter data from a Kafka topic, and publish these results to an external 5 | destination. 6 | 7 | This is just a template, so add your own operations as required. 8 | 9 | ## How to run 10 | 11 | Create a [Quix](https://portal.platform.quix.io/signup?xlink=github) account or log-in and visit the Samples to use this project. 12 | 13 | Clicking `Edit code` on the Sample, forks the project to your own Git repo so you can customize it before deploying. 14 | 15 | ## Environment variables 16 | 17 | The code sample uses the following environment variables: 18 | 19 | - **input**: Name of the input topic to listen to. 20 | - **output**: Name of the output topic to write to. 21 | 22 | ## Using Premade Sinks 23 | 24 | Quix Streams has numerous prebuilt sinks available to use out of the box, so be 25 | sure to [check them out!](https://quix.io/docs/quix-streams/connectors/sinks/index.html) 26 | 27 | 28 | ## Contribute 29 | 30 | Submit forked projects to the Quix [GitHub](https://github.com/quixio/quix-samples) repo. Any new project that we accept will be attributed to you and you'll receive $200 in Quix credit. 31 | 32 | ## Open source 33 | 34 | This project is open source under the Apache 2.0 license and available in our [GitHub](https://github.com/quixio/quix-samples) repo. 35 | 36 | Please star us and mention us on social to show your appreciation. 37 | -------------------------------------------------------------------------------- /resources/python/destinations/clickhouse-sink-claude-coded/README.md: -------------------------------------------------------------------------------- 1 | # Starter transformation 2 | 3 | [This code sample](https://github.com/quixio/quix-samples/tree/main/python/destinations/starter_destination) demonstrates how use the Quix Streams Sink framework to 4 | consume and alter data from a Kafka topic, and publish these results to an external 5 | destination. 6 | 7 | This is just a template, so add your own operations as required. 8 | 9 | ## How to run 10 | 11 | Create a [Quix](https://portal.platform.quix.io/signup?xlink=github) account or log-in and visit the Samples to use this project. 12 | 13 | Clicking `Edit code` on the Sample, forks the project to your own Git repo so you can customize it before deploying. 14 | 15 | ## Environment variables 16 | 17 | The code sample uses the following environment variables: 18 | 19 | - **input**: Name of the input topic to listen to. 20 | - **output**: Name of the output topic to write to. 21 | 22 | ## Using Premade Sinks 23 | 24 | Quix Streams has numerous prebuilt sinks available to use out of the box, so be 25 | sure to [check them out!](https://quix.io/docs/quix-streams/connectors/sinks/index.html) 26 | 27 | 28 | ## Contribute 29 | 30 | Submit forked projects to the Quix [GitHub](https://github.com/quixio/quix-samples) repo. Any new project that we accept will be attributed to you and you'll receive $200 in Quix credit. 31 | 32 | ## Open source 33 | 34 | This project is open source under the Apache 2.0 license and available in our [GitHub](https://github.com/quixio/quix-samples) repo. 35 | 36 | Please star us and mention us on social to show your appreciation. -------------------------------------------------------------------------------- /resources/python/sources/simple_csv/README.md: -------------------------------------------------------------------------------- 1 | # Demo CSV Data 2 | 3 | [This code sample](https://github.com/quixio/quix-samples/tree/main/python/sources/simple_csv) demonstrates how to read data from a simple, three-column CSV file and publish that data to a Kafka topic. 4 | 5 | ## How to run 6 | 7 | Create a [Quix](https://portal.platform.quix.io/signup?xlink=github) account or log-in and visit the Samples to use this project. 8 | 9 | Clicking `Deploy` on the Sample, deploys a pre-built container in Quix. Complete the environment variables to configure the container. 10 | 11 | Clicking `Edit code` on the Sample, forks the project to your own Git repo so you can customize it before deploying. 12 | 13 | ## Environment variables 14 | 15 | The code sample uses the following environment variables: 16 | 17 | - **Topic**: Name of the output topic to write into. 18 | 19 | ## Your data 20 | 21 | To publish your own data, replace the CSV data with your own. 22 | 23 | After saving the sample to your workspace, you can copy and paste your data into the CSV file or upload your own CSV file and change the python code to look for your CSV file name instead of `demo-data.csv`. 24 | 25 | ## Contribute 26 | 27 | Submit forked projects to the Quix [GitHub](https://github.com/quixio/quix-samples) repo. Any new project that we accept will be attributed to you and you'll receive $200 in Quix credit. 28 | 29 | ## Open source 30 | 31 | This project is open source under the Apache 2.0 license and available in our [GitHub](https://github.com/quixio/quix-samples) repo. 32 | 33 | Please star us and mention us on social to show your appreciation. 34 | -------------------------------------------------------------------------------- /resources/python/sources/starter_source/README.md: -------------------------------------------------------------------------------- 1 | # Starter data source 2 | 3 | [This code sample](https://github.com/quixio/quix-samples/tree/main/python/sources/starter_source) demonstrates how to use the Quix Streams Source framework to publish 4 | hard-coded lines of JSON data to a Kafka topic. 5 | 6 | This boilerplate runs in Quix Cloud without any necessary alterations. 7 | 8 | ## How to run 9 | 10 | Create a [Quix](https://portal.platform.quix.io/signup?xlink=github) account or log-in and visit the Samples to use this project. 11 | 12 | Clicking `Deploy` on the Sample, deploys a pre-built container in Quix. Complete the environment variables to configure the container. 13 | 14 | Clicking `Edit code` on the Sample, forks the project to your own Git repo so you can customize it before deploying. 15 | 16 | ## Environment variables 17 | 18 | The code sample uses the following environment variables: 19 | 20 | - **output**: Name of the output topic to write into. 21 | 22 | ## Using Premade Sources 23 | 24 | Quix Streams has numerous prebuilt sources available to use out of the box, so be 25 | sure to [check them out!](https://quix.io/docs/quix-streams/connectors/sources/index.html) 26 | 27 | ## Contribute 28 | 29 | Submit forked projects to the Quix [GitHub](https://github.com/quixio/quix-samples) repo. Any new project that we accept will be attributed to you and you'll receive $200 in Quix credit. 30 | 31 | ## Open source 32 | 33 | This project is open source under the Apache 2.0 license and available in our [GitHub](https://github.com/quixio/quix-samples) repo. 34 | 35 | Please star us and mention us on social to show your appreciation. 36 | -------------------------------------------------------------------------------- /resources/python/sources/wikipedia-source-claude-coded/README.md: -------------------------------------------------------------------------------- 1 | # Starter data source 2 | 3 | [This code sample](https://github.com/quixio/quix-samples/tree/main/python/sources/starter_source) demonstrates how to use the Quix Streams Source framework to publish 4 | hard-coded lines of JSON data to a Kafka topic. 5 | 6 | This boilerplate runs in Quix Cloud without any necessary alterations. 7 | 8 | ## How to run 9 | 10 | Create a [Quix](https://portal.platform.quix.io/signup?xlink=github) account or log-in and visit the Samples to use this project. 11 | 12 | Clicking `Deploy` on the Sample, deploys a pre-built container in Quix. Complete the environment variables to configure the container. 13 | 14 | Clicking `Edit code` on the Sample, forks the project to your own Git repo so you can customize it before deploying. 15 | 16 | ## Environment variables 17 | 18 | The code sample uses the following environment variables: 19 | 20 | - **output**: Name of the output topic to write into. 21 | 22 | ## Using Premade Sources 23 | 24 | Quix Streams has numerous prebuilt sources available to use out of the box, so be 25 | sure to [check them out!](https://quix.io/docs/quix-streams/connectors/sources/index.html) 26 | 27 | ## Contribute 28 | 29 | Submit forked projects to the Quix [GitHub](https://github.com/quixio/quix-samples) repo. Any new project that we accept will be attributed to you and you'll receive $200 in Quix credit. 30 | 31 | ## Open source 32 | 33 | This project is open source under the Apache 2.0 license and available in our [GitHub](https://github.com/quixio/quix-samples) repo. 34 | 35 | Please star us and mention us on social to show your appreciation. -------------------------------------------------------------------------------- /resources/python/destinations/confluent_kafka/README.md: -------------------------------------------------------------------------------- 1 | # Confluent Kafka 2 | 3 | [This connector](https://github.com/quixio/quix-samples/tree/main/python/destinations/confluent_kafka) demonstrates how to consume data from a Kafka topic in Quix and publish it to a topic in Confluent Cloud. 4 | 5 | ## How to run 6 | 7 | Create a [Quix](https://portal.platform.quix.io/signup?xlink=github) account or log-in and visit the `Connectors` tab to use this connector. 8 | 9 | Clicking `Set up connector` allows you to enter your connection details and runtime parameters. 10 | 11 | Then either: 12 | * click `Test connection & deploy` to deploy the pre-built and configured container into Quix. 13 | 14 | * or click `Customise connector` to inspect or alter the code before deployment. 15 | 16 | ## Environment variables 17 | 18 | The connector uses the following environment variables: 19 | 20 | - **input**: This is the Quix topic to listen to. 21 | - **kafka_topic**: The Confluent Kafka Topic you wish to read from. 22 | - **kafka_key**: Obtained from the Confluent Kafka portal. 23 | - **kafka_secret**: Obtained from the Confluent Kafka portal. 24 | - **kafka_broker_address**: Obtained from the Confluent Kafka portal. 25 | 26 | ## Contribute 27 | 28 | Submit forked projects to the Quix [GitHub](https://github.com/quixio/quix-samples) repo. Any new project that we accept will be attributed to you and you'll receive $200 in Quix credit. 29 | 30 | ## Open source 31 | 32 | This project is open source under the Apache 2.0 license and available in our [GitHub](https://github.com/quixio/quix-samples) repo. 33 | 34 | Please star us and mention us on social to show your appreciation. 35 | -------------------------------------------------------------------------------- /resources/python/destinations/slack_notifications/README.md: -------------------------------------------------------------------------------- 1 | # Slack Notifications 2 | 3 | [This connector](https://github.com/quixio/quix-samples/tree/main/python/destinations/slack_notifications) demonstrates how to consume data from a Kafka topic and trigger a Slack webhook based on specific matching critera that you apply to the incoming data. 4 | 5 | ## How to run 6 | 7 | Create a [Quix](https://portal.platform.quix.io/signup?xlink=github) account or log-in and visit the `Connectors` tab to use this connector. 8 | 9 | Clicking `Set up connector` allows you to enter your connection details and runtime parameters. 10 | 11 | Then either: 12 | * click `Test connection & deploy` to deploy the pre-built and configured container into Quix. 13 | 14 | * or click `Customise connector` to inspect or alter the code before deployment. 15 | 16 | ## Environment Variables 17 | 18 | The connector uses the following environment variables: 19 | 20 | - **input**: Name of the input topic to listen to. 21 | - **webhook_url**: The webhook url to send notifications to 22 | 23 | ## Requirements / Prerequisites 24 | 25 | You'll need to have access to Slack and be able to set up a webhook here: https://api.slack.com/apps 26 | 27 | ## Contribute 28 | 29 | Submit forked projects to the Quix [GitHub](https://github.com/quixio/quix-samples) repo. Any new project that we accept will be attributed to you and you'll receive $200 in Quix credit. 30 | 31 | ## Open source 32 | 33 | This project is open source under the Apache 2.0 license and available in our [GitHub](https://github.com/quixio/quix-samples) repo. 34 | 35 | Please star us and mention us on social to show your appreciation. 36 | -------------------------------------------------------------------------------- /workflow_tools/phases/__init__.py: -------------------------------------------------------------------------------- 1 | # phases/__init__.py 2 | """Workflow phases organized by type.""" 3 | 4 | # Import all phases for easy access 5 | from .base.base_phase import BasePhase, PhaseResult 6 | 7 | from .sink.phase_sink_prerequisites import SinkPrerequisitesCollectionPhase 8 | from .sink.phase_sink_schema import SinkSchemaPhase 9 | from .sink.phase_sink_knowledge import SinkKnowledgePhase 10 | from .sink.phase_sink_generation import SinkGenerationPhase 11 | from .sink.phase_sink_sandbox import SinkSandboxPhase 12 | 13 | from .source.phase_source_prerequisites import SourcePrerequisitesCollectionPhase 14 | from .source.phase_source_schema import SourceSchemaPhase 15 | from .source.phase_source_knowledge import SourceKnowledgePhase 16 | from .source.phase_source_generation import SourceGenerationPhase 17 | from .source.phase_source_connection_testing import SourceConnectionTestingPhase 18 | from .source.phase_source_sandbox import SourceSandboxPhase 19 | 20 | from .shared.phase_deployment import DeploymentPhase 21 | from .shared.phase_monitoring import MonitoringPhase 22 | 23 | __all__ = [ 24 | # Base classes 25 | 'BasePhase', 'PhaseResult', 26 | 27 | # Sink phases 28 | 'SinkPrerequisitesCollectionPhase', 29 | 'SinkSchemaPhase', 30 | 'SinkKnowledgePhase', 31 | 'SinkGenerationPhase', 32 | 'SinkSandboxPhase', 33 | 34 | # Source phases 35 | 'SourcePrerequisitesCollectionPhase', 36 | 'SourceSchemaPhase', 37 | 'SourceKnowledgePhase', 38 | 'SourceGenerationPhase', 39 | 'SourceConnectionTestingPhase', 40 | 'SourceSandboxPhase', 41 | 42 | # Shared phases 43 | 'DeploymentPhase', 44 | 'MonitoringPhase', 45 | ] -------------------------------------------------------------------------------- /resources/_demo-clickhouse-db/README.md: -------------------------------------------------------------------------------- 1 | # Starter transformation 2 | 3 | [This code sample](https://github.com/quixio/quix-samples/tree/main/python/transformations/starter_transformation) demonstrates how to consume and transform data from a Kafka topic 4 | and publish these results to another Kafka topic, all using our `StreamingDataFrame`. 5 | 6 | This boilerplate will run in Quix Cloud but largely has placeholder operations, so you 7 | will need to add your own to do something besides printing the data to console! 8 | 9 | ## How to run 10 | 11 | Create a [Quix](https://portal.platform.quix.io/signup?xlink=github) account or log-in and visit the Samples to use this project. 12 | 13 | Clicking `Edit code` on the Sample, forks the project to your own Git repo so you can customize it before deploying. 14 | 15 | ## Environment variables 16 | 17 | The code sample uses the following environment variables: 18 | 19 | - **input**: Name of the input topic to listen to. 20 | - **output**: Name of the output topic to write to. 21 | 22 | ## Possible `StreamingDataFrame` Operations 23 | 24 | Many different operations and transformations are available, so 25 | be sure to [explore what's possible](https://quix.io/docs/quix-streams/processing.html)! 26 | 27 | 28 | ## Contribute 29 | 30 | Submit forked projects to the Quix [GitHub](https://github.com/quixio/quix-samples) repo. Any new project that we accept will be attributed to you and you'll receive $200 in Quix credit. 31 | 32 | ## Open source 33 | 34 | This project is open source under the Apache 2.0 license and available in our [GitHub](https://github.com/quixio/quix-samples) repo. 35 | 36 | Please star us and mention us on social to show your appreciation. -------------------------------------------------------------------------------- /resources/python/sources/environment_source/main.py: -------------------------------------------------------------------------------- 1 | import os 2 | import traceback 3 | from quixstreams import Application 4 | from quixstreams.sources.kafka import QuixEnvironmentSource 5 | from dotenv import load_dotenv 6 | 7 | def main(): 8 | app = Application() 9 | 10 | # Load environment variables from .env file for local development 11 | load_dotenv() 12 | 13 | # Setup output topic 14 | output_topic = app.topic(os.environ["topic"]) 15 | 16 | # Get necessary environment variables for Quix input topic 17 | source_workspace_id = os.environ["source_workspace_id"] 18 | source_sdk_token = os.environ["source_sdk_token"] 19 | 20 | # Optional environment variables 21 | consumer_group = os.environ.get("consumer_group", "quix_environment_source") 22 | auto_offset_reset = os.environ.get("auto_offset_reset", "earliest") 23 | 24 | # Setup input topic 25 | input_topic = QuixEnvironmentSource( 26 | os.environ["topic"], 27 | app.config, 28 | os.environ["topic"], 29 | quix_workspace_id=source_workspace_id, 30 | quix_sdk_token=source_sdk_token, 31 | consumer_group=consumer_group, 32 | auto_offset_reset=auto_offset_reset, 33 | shutdown_timeout=30 34 | ) 35 | 36 | app.add_source(input_topic, output_topic) 37 | print("CONNECTED!") 38 | 39 | # Start the application 40 | app._run() 41 | 42 | 43 | if __name__ == "__main__": 44 | try: 45 | main() 46 | except Exception as e: 47 | print("ERROR! - An error occurred in the application.") 48 | traceback.print_exc() 49 | if 'app' in locals(): # Ensure app exists before stopping it 50 | app.stop() -------------------------------------------------------------------------------- /resources/python/transformations/starter_transformation/README.md: -------------------------------------------------------------------------------- 1 | # Starter transformation 2 | 3 | [This code sample](https://github.com/quixio/quix-samples/tree/main/python/transformations/starter_transformation) demonstrates how to consume and transform data from a Kafka topic 4 | and publish these results to another Kafka topic, all using our `StreamingDataFrame`. 5 | 6 | This boilerplate will run in Quix Cloud but largely has placeholder operations, so you 7 | will need to add your own to do something besides printing the data to console! 8 | 9 | ## How to run 10 | 11 | Create a [Quix](https://portal.platform.quix.io/signup?xlink=github) account or log-in and visit the Samples to use this project. 12 | 13 | Clicking `Edit code` on the Sample, forks the project to your own Git repo so you can customize it before deploying. 14 | 15 | ## Environment variables 16 | 17 | The code sample uses the following environment variables: 18 | 19 | - **input**: Name of the input topic to listen to. 20 | - **output**: Name of the output topic to write to. 21 | 22 | ## Possible `StreamingDataFrame` Operations 23 | 24 | Many different operations and transformations are available, so 25 | be sure to [explore what's possible](https://quix.io/docs/quix-streams/processing.html)! 26 | 27 | 28 | ## Contribute 29 | 30 | Submit forked projects to the Quix [GitHub](https://github.com/quixio/quix-samples) repo. Any new project that we accept will be attributed to you and you'll receive $200 in Quix credit. 31 | 32 | ## Open source 33 | 34 | This project is open source under the Apache 2.0 license and available in our [GitHub](https://github.com/quixio/quix-samples) repo. 35 | 36 | Please star us and mention us on social to show your appreciation. 37 | -------------------------------------------------------------------------------- /resources/python/sources/s3_source/README.md: -------------------------------------------------------------------------------- 1 | # S3 Source Connector 2 | 3 | [This connector](https://github.com/quixio/quix-samples/tree/main/python/sources/s3_source) demonstrates how to connect to Amazon S3 to read files into a Kafka topic. 4 | 5 | ## How to run 6 | 7 | Create a [Quix](https://portal.platform.quix.io/signup?xlink=github) account or log-in and visit the `Connectors` tab to use this connector. 8 | 9 | Clicking `Set up connector` allows you to enter your connection details and runtime parameters. 10 | 11 | Then either: 12 | * click `Test connection & deploy` to deploy the pre-built and configured container into Quix. 13 | 14 | * or click `Customise connector` to inspect or alter the code before deployment. 15 | 16 | ## Environment variables 17 | 18 | This connector uses the following environment variables: 19 | 20 | - **output**: The output topic to stream Segment data into 21 | - **S3_BUCKET**: The URI or URL to your S3 bucket 22 | - **S3_REGION**: The region of your S3 bucket 23 | - **S3_SECRET**: Your AWS secret 24 | - **S3_ACCESS_KEY_ID**: Your AWS Access Key 25 | - **S3_FOLDER_PATH**: The path to the S3 folder to consume 26 | - **S3_FILE_FORMAT**: The file format of the files 27 | - **S3_FILE_COMPRESSION**: The type of file compression used for the files 28 | 29 | ## Contribute 30 | 31 | Submit forked projects to the Quix [GitHub](https://github.com/quixio/quix-samples) repo. Any new project that we accept will be attributed to you and you'll receive $200 in Quix credit. 32 | 33 | ## Open source 34 | 35 | This project is open source under the Apache 2.0 license and available in our [GitHub](https://github.com/quixio/quix-samples) repo. 36 | 37 | Please star us and mention us on social to show your appreciation. 38 | -------------------------------------------------------------------------------- /resources/python/sources/MQTT/README.md: -------------------------------------------------------------------------------- 1 | # MQTT 2 | 3 | [This connector](https://github.com/quixio/quix-samples/tree/main/python/sources/MQTT) demonstrates how to consume data from an MQTT broker and publish that data to a Kafka topic. 4 | 5 | ## How to run 6 | 7 | Create a [Quix](https://portal.platform.quix.io/signup?xlink=github) account or log-in and visit the `Connectors` tab to use this connector. 8 | 9 | Clicking `Set up connector` allows you to enter your connection details and runtime parameters. 10 | 11 | Then either: 12 | * click `Test connection & deploy` to deploy the pre-built and configured container into Quix. 13 | 14 | * or click `Customise connector` to inspect or alter the code before deployment. 15 | 16 | ## Environment Variables 17 | 18 | The connector uses the following environment variables: 19 | 20 | - **output**: Name of the output topic to publish to. 21 | - **mqtt_topic**: The MQTT topic to listen to. Can use wildcards e.g. MyTopic/# 22 | - **mqtt_server**: The address of your MQTT server. 23 | - **mqtt_port**: The port of your MQTT server. 24 | - **mqtt_username**: Username of your MQTT user. 25 | - **mqtt_password**: Password for the MQTT user. 26 | 27 | ## Requirements / Prerequisites 28 | 29 | You'll need to have a MQTT broker either locally or in the cloud 30 | 31 | ## Contribute 32 | 33 | Submit forked projects to the Quix [GitHub](https://github.com/quixio/quix-samples) repo. Any new project that we accept will be attributed to you and you'll receive $200 in Quix credit. 34 | 35 | ## Open source 36 | 37 | This project is open source under the Apache 2.0 license and available in our [GitHub](https://github.com/quixio/quix-samples) repo. 38 | 39 | Please star us and mention us on social to show your appreciation. 40 | -------------------------------------------------------------------------------- /resources/_demo-clickhouse-db/main.py: -------------------------------------------------------------------------------- 1 | # import the Quix Streams modules for interacting with Kafka. 2 | # For general info, see https://quix.io/docs/quix-streams/introduction.html 3 | from quixstreams import Application 4 | 5 | import os 6 | 7 | # for local dev, load env vars from a .env file 8 | # from dotenv import load_dotenv 9 | # load_dotenv() 10 | 11 | 12 | def main(): 13 | """ 14 | Transformations generally read from, and produce to, Kafka topics. 15 | 16 | They are conducted with Applications and their accompanying StreamingDataFrames 17 | which define what transformations to perform on incoming data. 18 | 19 | Be sure to explicitly produce output to any desired topic(s); it does not happen 20 | automatically! 21 | 22 | To learn about what operations are possible, the best place to start is: 23 | https://quix.io/docs/quix-streams/processing.html 24 | """ 25 | 26 | # Setup necessary objects 27 | app = Application( 28 | consumer_group="my_transformation", 29 | auto_create_topics=True, 30 | auto_offset_reset="earliest" 31 | ) 32 | input_topic = app.topic(name=os.environ["input"]) 33 | output_topic = app.topic(name=os.environ["output"]) 34 | sdf = app.dataframe(topic=input_topic) 35 | 36 | # Do StreamingDataFrame operations/transformations here 37 | sdf = sdf.apply(lambda row: row).filter(lambda row: True) 38 | sdf = sdf.print(metadata=True) 39 | 40 | # Finish off by writing to the final result to the output topic 41 | sdf.to_topic(output_topic) 42 | 43 | # With our pipeline defined, now run the Application 44 | app.run() 45 | 46 | 47 | # It is recommended to execute Applications under a conditional main 48 | if __name__ == "__main__": 49 | main() -------------------------------------------------------------------------------- /resources/python/sources/segment_webhook/README.md: -------------------------------------------------------------------------------- 1 | # Segment 2 | 3 | [This connector](https://github.com/quixio/quix-samples/tree/main/python/sources/segment_webhook) demonstrates how to connect to Segment, read event data and publish that data to a Kafka topic. 4 | 5 | It's secure, using a secret shared with both Quix and Segment. 6 | 7 | And we've used [Waitress](https://github.com/Pylons/waitress), "a production-quality pure-Python WSGI server with very acceptable performance. It has no dependencies except ones that live in the Python standard library. It runs on CPython on Unix and Windows.” 8 | 9 | ## How to run 10 | 11 | Create a [Quix](https://portal.platform.quix.io/signup?xlink=github) account or log-in and visit the `Connectors` tab to use this connector. 12 | 13 | Clicking `Set up connector` allows you to enter your connection details and runtime parameters. 14 | 15 | Then either: 16 | * click `Test connection & deploy` to deploy the pre-built and configured container into Quix. 17 | 18 | * or click `Customise connector` to inspect or alter the code before deployment. 19 | 20 | ## Environment variables 21 | 22 | This connector uses the following environment variables: 23 | 24 | - **output**: The output topic to stream Segment data into 25 | - **shared_secret**: The secret you configured in Segment 26 | 27 | ## Contribute 28 | 29 | Submit forked projects to the Quix [GitHub](https://github.com/quixio/quix-samples) repo. Any new project that we accept will be attributed to you and you'll receive $200 in Quix credit. 30 | 31 | ## Open source 32 | 33 | This project is open source under the Apache 2.0 license and available in our [GitHub](https://github.com/quixio/quix-samples) repo. 34 | 35 | Please star us and mention us on social to show your appreciation. 36 | -------------------------------------------------------------------------------- /resources/python/sources/hivemq/README.md: -------------------------------------------------------------------------------- 1 | # HiveMQ 2 | 3 | [This connector](https://github.com/quixio/quix-samples/tree/main/python/sources/hivemq) demonstrates how to consume data from a HiveMQ broker's MQTT topic and publish that data to a Kafka topic. 4 | 5 | ## How to run 6 | 7 | Create a [Quix](https://portal.platform.quix.io/signup?xlink=github) account or log-in and visit the `Connectors` tab to use this connector. 8 | 9 | Clicking `Set up connector` allows you to enter your connection details and runtime parameters. 10 | 11 | Then either: 12 | * click `Test connection & deploy` to deploy the pre-built and configured container into Quix. 13 | 14 | * or click `Customise connector` to inspect or alter the code before deployment. 15 | 16 | ## Environment Variables 17 | 18 | The connector uses the following environment variables: 19 | 20 | - **output**: Name of the output topic to publish to. 21 | - **mqtt_topic**: The MQTT topic to listen to. Can use wildcards e.g. MyTopic/# 22 | - **mqtt_server**: The address of your MQTT server. 23 | - **mqtt_port**: The port of your MQTT server. 24 | - **mqtt_username**: Username of your MQTT user. 25 | - **mqtt_password**: Password for the MQTT user. 26 | 27 | ## Requirements / Prerequisites 28 | 29 | You'll need to have a HiveMQ broker running either locally or in the cloud 30 | 31 | ## Contribute 32 | 33 | Submit forked projects to the Quix [GitHub](https://github.com/quixio/quix-samples) repo. Any new project that we accept will be attributed to you and you'll receive $200 in Quix credit. 34 | 35 | ## Open source 36 | 37 | This project is open source under the Apache 2.0 license and available in our [GitHub](https://github.com/quixio/quix-samples) repo. 38 | 39 | Please star us and mention us on social to show your appreciation. 40 | -------------------------------------------------------------------------------- /resources/python/transformations/starter_transformation/main.py: -------------------------------------------------------------------------------- 1 | # import the Quix Streams modules for interacting with Kafka. 2 | # For general info, see https://quix.io/docs/quix-streams/introduction.html 3 | from quixstreams import Application 4 | 5 | import os 6 | 7 | # for local dev, load env vars from a .env file 8 | # from dotenv import load_dotenv 9 | # load_dotenv() 10 | 11 | 12 | def main(): 13 | """ 14 | Transformations generally read from, and produce to, Kafka topics. 15 | 16 | They are conducted with Applications and their accompanying StreamingDataFrames 17 | which define what transformations to perform on incoming data. 18 | 19 | Be sure to explicitly produce output to any desired topic(s); it does not happen 20 | automatically! 21 | 22 | To learn about what operations are possible, the best place to start is: 23 | https://quix.io/docs/quix-streams/processing.html 24 | """ 25 | 26 | # Setup necessary objects 27 | app = Application( 28 | consumer_group="my_transformation", 29 | auto_create_topics=True, 30 | auto_offset_reset="earliest" 31 | ) 32 | input_topic = app.topic(name=os.environ["input"]) 33 | output_topic = app.topic(name=os.environ["output"]) 34 | sdf = app.dataframe(topic=input_topic) 35 | 36 | # Do StreamingDataFrame operations/transformations here 37 | sdf = sdf.apply(lambda row: row).filter(lambda row: True) 38 | sdf = sdf.print(metadata=True) 39 | 40 | # Finish off by writing to the final result to the output topic 41 | sdf.to_topic(output_topic) 42 | 43 | # With our pipeline defined, now run the Application 44 | app.run() 45 | 46 | 47 | # It is recommended to execute Applications under a conditional main 48 | if __name__ == "__main__": 49 | main() 50 | -------------------------------------------------------------------------------- /resources/python/sources/snowplow_source/README.md: -------------------------------------------------------------------------------- 1 | # Snowplow 2 | 3 | [This connector](https://github.com/quixio/quix-samples/tree/main/python/sources/snowplow_source) demonstrates how to read data from Snowplow and publish it to a Kafka topic. 4 | 5 | Note that this connector works with Snowplow instances on AWS Kinesis, so you will need connection details for Kinesis. 6 | 7 | ## How to run 8 | 9 | Create a [Quix](https://portal.platform.quix.io/signup?xlink=github) account or log-in and visit the `Connectors` tab to use this connector. 10 | 11 | Clicking `Set up connector` allows you to enter your connection details and runtime parameters. 12 | 13 | Then either: 14 | * click `Test connection & deploy` to deploy the pre-built and configured container into Quix. 15 | 16 | * or click `Customise connector` to inspect or alter the code before deployment. 17 | 18 | ## Environment variables 19 | 20 | The connector uses the following environment variables: 21 | 22 | - **output**: This is the Quix Topic that will receive the stream. 23 | - **aws_access_key_id**: AWS Access Key Id. 24 | - **aws_secret_access_key**: AWS Secret Access Key. 25 | - **aws_region_name**: AWS Region Name. 26 | - **aws_stream_name**: The name of the AWS stream you want to use. 27 | 28 | ## Requirements/prerequisites 29 | 30 | You will need Snowplow deployed to AWS to use this project. 31 | 32 | ## Contribute 33 | 34 | Submit forked projects to the Quix [GitHub](https://github.com/quixio/quix-samples) repo. Any new project that we accept will be attributed to you and you'll receive $200 in Quix credit. 35 | 36 | ## Open source 37 | 38 | This project is open source under the Apache 2.0 license and available in our [GitHub](https://github.com/quixio/quix-samples) repo. 39 | 40 | Please star us and mention us on social to show your appreciation. 41 | -------------------------------------------------------------------------------- /resources/python/destinations/MQTT/README.md: -------------------------------------------------------------------------------- 1 | # MQTT 2 | 3 | [This connector](https://github.com/quixio/quix-samples/tree/main/python/destinations/MQTT) demonstrates how to produce data from a Kafka topic and publish it to an MQTT broker. 4 | 5 | The MQTT topic the example produces to will be `mqtt_topic_root`/`message_key`. 6 | 7 | ## How to run 8 | 9 | Create a [Quix](https://portal.platform.quix.io/signup?xlink=github) account or log-in and visit the `Connectors` tab to use this connector. 10 | 11 | Clicking `Set up connector` allows you to enter your connection details and runtime parameters. 12 | 13 | Then either: 14 | * click `Test connection & deploy` to deploy the pre-built and configured container into Quix. 15 | 16 | * or click `Customise connector` to inspect or alter the code before deployment. 17 | 18 | ## Requirements / Prerequisites 19 | 20 | You'll need to have a MQTT either locally or in the cloud 21 | 22 | ## Environment Variables 23 | 24 | The connector uses the following environment variables: 25 | 26 | - **input**: Name of the input topic to listen to. 27 | - **mqtt_topic_root**: The root for messages in MQTT, this can be anything. 28 | - **mqtt_server**: The address of your MQTT server. 29 | - **mqtt_port**: The port of your MQTT server. 30 | - **mqtt_username**: Username of your MQTT user. 31 | - **mqtt_password**: Password for the MQTT user. 32 | 33 | ## Contribute 34 | 35 | Submit forked projects to the Quix [GitHub](https://github.com/quixio/quix-samples) repo. Any new project that we accept will be attributed to you and you'll receive $200 in Quix credit. 36 | 37 | ## Open source 38 | 39 | This project is open source under the Apache 2.0 license and available in our [GitHub](https://github.com/quixio/quix-samples) repo. 40 | 41 | Please star us and mention us on social to show your appreciation. 42 | -------------------------------------------------------------------------------- /resources/python/destinations/hivemq/README.md: -------------------------------------------------------------------------------- 1 | # HiveMQ 2 | 3 | [This connector](https://github.com/quixio/quix-samples/tree/main/python/destinations/hivemq) demonstrates how to consume data from a Kafka topic and publish it to a HiveMQ broker's MQTT topic. 4 | 5 | The MQTT topic the example produces to will be `mqtt_topic_root`/`message_key`. 6 | 7 | ## How to run 8 | 9 | Create a [Quix](https://portal.platform.quix.io/signup?xlink=github) account or log-in and visit the `Connectors` tab to use this connector. 10 | 11 | Clicking `Set up connector` allows you to enter your connection details and runtime parameters. 12 | 13 | Then either: 14 | * click `Test connection & deploy` to deploy the pre-built and configured container into Quix. 15 | 16 | * or click `Customise connector` to inspect or alter the code before deployment. 17 | 18 | ## Requirements / Prerequisites 19 | 20 | You'll need to have a HiveMQ broker running either locally or in the cloud 21 | 22 | ## Environment Variables 23 | 24 | The code sample uses the following environment variables: 25 | 26 | - **input**: Name of the input topic to listen to. 27 | - **mqtt_topic_root**: The root for messages in MQTT, this can be anything. 28 | - **mqtt_server**: The address of your MQTT server. 29 | - **mqtt_port**: The port of your MQTT server. 30 | - **mqtt_username**: Username of your MQTT user. 31 | - **mqtt_password**: Password for the MQTT user. 32 | 33 | ## Contribute 34 | 35 | Submit forked projects to the Quix [GitHub](https://github.com/quixio/quix-samples) repo. Any new project that we accept will be attributed to you and you'll receive $200 in Quix credit. 36 | 37 | ## Open source 38 | 39 | This project is open source under the Apache 2.0 license and available in our [GitHub](https://github.com/quixio/quix-samples) repo. 40 | 41 | Please star us and mention us on social to show your appreciation. 42 | -------------------------------------------------------------------------------- /resources/python/sources/demo_data/main.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import pandas as pd 4 | 5 | # import the dotenv module to load environment variables from a file 6 | from dotenv import load_dotenv 7 | from quixstreams import Application 8 | from quixstreams.sources.community.pandas import PandasDataFrameSource 9 | 10 | load_dotenv(override=False) 11 | 12 | # Define the topic using the "output" environment variable 13 | topic_name = os.getenv("output") 14 | if topic_name is None: 15 | raise ValueError( 16 | "The 'output' environment variable is required. This is the output topic that data will be published to." 17 | ) 18 | 19 | # Use "Quix__Deployment__Id" env variable as a consumer group name to run multiple instances independently 20 | # If not provided, "demo-data" will be used 21 | consumer_group = os.getenv('Quix__Deployment__Id', 'demo-data') 22 | 23 | # Create an Application. 24 | app = Application(consumer_group=consumer_group) 25 | 26 | # Specify the filename here 27 | filename = "demo-data.csv" 28 | 29 | # Read data from the csv file 30 | df = pd.read_csv(filename) 31 | 32 | # Create a Source instance based on a DataFrame 33 | pandas_source = PandasDataFrameSource( 34 | df=df, 35 | name=f"pandas-source-{consumer_group}", # Use random consumer group as a name to generate random intermediate topics on each Source run 36 | key_column="SessionId", # use the "SessionId" column for message keys 37 | delay=0.05, # Add a delay between each row to simulate real-time processing 38 | ) 39 | output_topic = app.topic(topic_name) 40 | 41 | # Connect the Source to a StreamingDataFrame 42 | sdf = app.dataframe(source=pandas_source) 43 | 44 | # Print incoming data 45 | sdf.print(metadata=True) 46 | # Send data to the output topic 47 | sdf.to_topic(output_topic) 48 | 49 | 50 | if __name__ == "__main__": 51 | # Start the application 52 | app.run(sdf) 53 | -------------------------------------------------------------------------------- /prompts/tasks/diagnose_app_analysis.md: -------------------------------------------------------------------------------- 1 | # Application Analysis 2 | 3 | You are analyzing an existing Quix application to help the user understand what it does and identify potential issues or improvements. 4 | 5 | ## Application Details 6 | - **Application Name:** {app_name} 7 | - **Application ID:** {app_id} 8 | - **Workspace:** {workspace_id} 9 | 10 | ## Files to Analyze 11 | 12 | The application files are located in: `{app_directory}` 13 | 14 | Please analyze the following files: 15 | {file_list} 16 | 17 | ## Your Task 18 | 19 | 1. **Examine the main application file** (typically `app.py` or `main.py`) 20 | 2. **Review the configuration** in `app.yaml` to understand: 21 | - Environment variables and their purposes 22 | - Dependencies listed in requirements 23 | - The application's input/output topics 24 | 3. **Identify the application's purpose** based on: 25 | - Code structure and logic 26 | - External systems it connects to 27 | - Data transformations it performs 28 | 4. **Note any potential issues** such as: 29 | - Missing error handling 30 | - Hardcoded values that should be variables 31 | - Deprecated libraries or patterns 32 | - Performance bottlenecks 33 | 34 | ## Output Format 35 | 36 | Provide a concise summary in the following format: 37 | 38 | ### Application Summary 39 | [2-3 sentences describing what this application does] 40 | 41 | ### Key Components 42 | - **Input:** [What data it reads and from where] 43 | - **Processing:** [Main operations/transformations] 44 | - **Output:** [What it produces and where it sends it] 45 | 46 | ### Configuration 47 | - **Environment Variables:** [List key variables and their purposes] 48 | - **Dependencies:** [Notable libraries used] 49 | 50 | ### Observations 51 | [Any issues, improvements, or notable patterns you've identified] 52 | 53 | ### Recommendation 54 | [Brief suggestion on whether to run as-is, fix issues first, or provide more context] -------------------------------------------------------------------------------- /prompts/tasks/diagnose_follow_up.md: -------------------------------------------------------------------------------- 1 | # Follow-up Code Enhancement Instructions 2 | 3 | The application has been successfully tested and the user wants to make additional changes or improvements. 4 | 5 | ## Context 6 | - **Application:** {app_name} 7 | - **Previous Changes:** {previous_changes} 8 | - **Current Working State:** The application is running successfully 9 | - **User's New Request:** {user_requirements} 10 | 11 | ## Application Analysis 12 | 13 | {app_analysis} 14 | 15 | ## Your Task 16 | 17 | The user has requested additional modifications to the working application. Based on their request: 18 | 19 | 1. **Build upon the existing working code** - Don't break what's already working 20 | 2. **Implement the requested changes**, which could include: 21 | - Adding new features 22 | - Performance optimizations 23 | - Code refactoring 24 | - Improved error handling 25 | - Enhanced logging/monitoring 26 | - Dependency updates 27 | 3. **Maintain backward compatibility** unless explicitly told otherwise 28 | 4. **Test your changes** won't break existing functionality 29 | 30 | ## Important Guidelines 31 | 32 | - **Start from the current working version** in `{app_directory}` 33 | - **Preserve all existing functionality** that the user hasn't asked to change 34 | - **Keep the same configuration structure** unless changes are needed 35 | - **Document significant additions** with clear comments 36 | - **Ensure new dependencies are added** to requirements.txt 37 | 38 | ## Expected Output 39 | 40 | After implementing the changes: 41 | 1. Describe what you added or modified 42 | 2. Explain how it enhances the application 43 | 3. Note any new configuration requirements 44 | 4. Highlight any risks or considerations 45 | 5. Suggest testing approaches for the new changes 46 | 47 | Remember: This is an iterative improvement on a working application. Be careful not to break existing functionality while adding enhancements. -------------------------------------------------------------------------------- /resources/python/destinations/flet-waveform/README.md: -------------------------------------------------------------------------------- 1 | # Flet Real-time Waveform Viewer 2 | 3 | [This code sample](https://github.com/quixio/quix-samples/tree/main/python/destinations/flet) demonstrates how to create a real-time data visualization web application using Flet and Quix Streams. The application consumes data from a Kafka topic and displays sensor data (temperature and humidity) in real-time charts. 4 | 5 | This destination creates an interactive web interface that visualizes streaming data with live updates and connection status monitoring. 6 | 7 | ## How to run 8 | 9 | Create a [Quix](https://portal.platform.quix.io/signup?xlink=github) account or log-in and visit the Samples to use this project. 10 | 11 | Clicking `Edit code` on the Sample, forks the project to your own Git repo so you can customize it before deploying. 12 | 13 | ## Environment variables 14 | 15 | The code sample uses the following environment variables: 16 | 17 | - **input**: Name of the input topic to listen to (expects data with 'temperature' and 'humidity' fields). 18 | 19 | ## Features 20 | 21 | - **Real-time Data Visualization**: Live charts displaying temperature and humidity data 22 | - **Connection Status Monitoring**: Visual indicators for Kafka connection status 23 | - **Web Interface**: Accessible via web browser on port 80 24 | - **Multi-instance Support**: Handles multiple browser tabs/instances gracefully 25 | - **Responsive Design**: Adapts to different screen sizes 26 | 27 | 28 | ## Contribute 29 | 30 | Submit forked projects to the Quix [GitHub](https://github.com/quixio/quix-samples) repo. Any new project that we accept will be attributed to you and you'll receive $200 in Quix credit. 31 | 32 | ## Open source 33 | 34 | This project is open source under the Apache 2.0 license and available in our [GitHub](https://github.com/quixio/quix-samples) repo. 35 | 36 | Please star us and mention us on social to show your appreciation. 37 | -------------------------------------------------------------------------------- /config/models.yaml.template: -------------------------------------------------------------------------------- 1 | models: 2 | claude_code_sdk: 3 | model: sonnet 4 | max_turns: 50 5 | allowed_tools: 6 | - Read 7 | - Write 8 | - Edit 9 | - Bash 10 | - Glob 11 | - Grep 12 | permission_mode: acceptEdits 13 | debug: 14 | max_turns: 50 15 | model: sonnet 16 | sink: 17 | provider: anthropic 18 | model: claude-opus-4-1-20250805 19 | fallback: gpt-4o 20 | bypass_litellm: false 21 | source: 22 | provider: anthropic 23 | model: claude-opus-4-1-20250805 24 | fallback: gpt-4o 25 | bypass_litellm: false 26 | # cli_path will be auto-detected or set via environment variable 27 | schema_analysis: 28 | sink: 29 | provider: openai 30 | model: gpt-4o 31 | bypass_litellm: false 32 | source: 33 | provider: openai 34 | model: gpt-4o 35 | bypass_litellm: false 36 | template_matching: 37 | provider: openai 38 | model: gpt-4o 39 | bypass_litellm: false 40 | debugging: 41 | provider: openai 42 | model: gpt-5 43 | fallback: gpt-4o 44 | bypass_litellm: true 45 | verbosity: high 46 | reasoning: medium 47 | env_var_translation: 48 | provider: openai 49 | model: gpt-4o 50 | bypass_litellm: false 51 | tech_preparation: 52 | provider: openai 53 | model: gpt-4o 54 | bypass_litellm: false 55 | log_analysis: 56 | provider: openai 57 | model: gpt-5 58 | fallback: gpt-4o 59 | bypass_litellm: true 60 | verbosity: low 61 | reasoning: minimal 62 | parameters: 63 | temperature: 64 | code_generation: 0.1 65 | schema_analysis: 0.3 66 | template_matching: 0.2 67 | debugging: 0.3 68 | tech_preparation: 0.5 69 | max_tokens: 70 | code_generation: 8000 71 | schema_analysis: 4000 72 | template_matching: 2000 73 | debugging: 4000 74 | timeout: 75 | default: 120 76 | code_generation: 180 77 | debugging: 240 -------------------------------------------------------------------------------- /resources/python/sources/confluent_kafka/README.md: -------------------------------------------------------------------------------- 1 | # Confluent Kafka 2 | 3 | [This connector](https://github.com/quixio/quix-samples/tree/main/python/sources/confluent_kafka) demonstrates how to consume data from a Kafka topic in Confluent Cloud and publish the data to a Kafka topic configured in Quix. 4 | 5 | ## How to run 6 | 7 | Create a [Quix](https://portal.platform.quix.io/signup?xlink=github) account or log-in and visit the `Connectors` tab to use this connector. 8 | 9 | Clicking `Set up connector` allows you to enter your connection details and runtime parameters. 10 | 11 | Then either: 12 | * click `Test connection & deploy` to deploy the pre-built and configured container into Quix. 13 | 14 | * or click `Customise connector` to inspect or alter the code before deployment. 15 | 16 | ## Environment variables 17 | 18 | The connector uses the following environment variables: 19 | 20 | - **output**: This is the Quix Topic that will receive the stream. 21 | - **kafka_key**: Obtained from the Confluent Kafka portal. 22 | - **kafka_secret**: Obtained from the Confluent Kafka portal. 23 | - **kafka_broker_address**: Obtained from the Confluent Kafka portal. 24 | - **kafka_topic**: The Confluent Kafka Topic you wish to read from. 25 | - **kafka_ca_location**: (Optional) Path to the SSL CA certificate file for secure connections. If not provided, the system's default CA certificates will be used. 26 | - **kafka_sasl_mechanism**: (Optional) SASL mechanism for authentication. Defaults to "SCRAM-SHA-256". 27 | 28 | ## Contribute 29 | 30 | Submit forked projects to the Quix [GitHub](https://github.com/quixio/quix-samples) repo. Any new project that we accept will be attributed to you and you'll receive $200 in Quix credit. 31 | 32 | ## Open source 33 | 34 | This project is open source under the Apache 2.0 license and available in our [GitHub](https://github.com/quixio/quix-samples) repo. 35 | 36 | Please star us and mention us on social to show your appreciation. 37 | -------------------------------------------------------------------------------- /prompts/diagnose/diagnose_follow_up.md: -------------------------------------------------------------------------------- 1 | Follow-up Code Enhancement Instructions 2 | 3 | 4 | The application has been successfully tested and the user wants to make additional changes or improvements. 5 | 6 | 7 | 8 | - **Application:** {app_name} 9 | - **Previous Changes:** {previous_changes} 10 | - **Current Working State:** The application is running successfully 11 | - **User's New Request:** {user_requirements} 12 | 13 | 14 | 15 | The user has requested additional modifications to the working application. Based on their request: 16 | 17 | 1. **Build upon the existing working code** - Don't break what's already working 18 | 2. **Implement the requested changes**, which could include: 19 | - Adding new features 20 | - Performance optimizations 21 | - Code refactoring 22 | - Improved error handling 23 | - Enhanced logging/monitoring 24 | - Dependency updates 25 | 3. **Maintain backward compatibility** unless explicitly told otherwise 26 | 4. **Test your changes** won't break existing functionality 27 | 28 | 29 | 30 | - **Start from the current working version** in `{app_directory}` 31 | - **Preserve all existing functionality** that the user hasn't asked to change 32 | - **Keep the same configuration structure** unless changes are needed 33 | - **Document significant additions** with clear comments 34 | - **Ensure new dependencies are added** to requirements.txt 35 | 36 | 37 | 38 | After implementing the changes: 39 | 1. Describe what you added or modified 40 | 2. Explain how it enhances the application 41 | 3. Note any new configuration requirements 42 | 4. Highlight any risks or considerations 43 | 5. Suggest testing approaches for the new changes 44 | 45 | 46 | 47 | This is an iterative improvement on a working application. Be careful not to break existing functionality while adding enhancements. 48 | -------------------------------------------------------------------------------- /prompts/tasks/diagnose_edit_code.md: -------------------------------------------------------------------------------- 1 | # Code Update Instructions for Diagnose Workflow 2 | 3 | You are working with an existing Quix application. The user wants to modify, enhance, or debug it. 4 | 5 | ## Context 6 | - **Application:** {app_name} 7 | - **User Request:** {user_requirements} 8 | - **Previous Analysis:** {app_analysis} 9 | 10 | ## Log Analysis (if applicable) 11 | {log_analysis} 12 | 13 | ## Your Task 14 | 15 | Based on the user's request, you need to: 16 | 17 | 1. **Understand the current implementation** and how it works 18 | 2. **Implement the requested changes**, which could be: 19 | - Bug fixes if issues were identified 20 | - New features or enhancements 21 | - Performance improvements 22 | - Code refactoring or cleanup 23 | - Adding better error handling or logging 24 | 3. **Ensure compatibility** with the Quix platform requirements 25 | 4. **Preserve existing functionality** unless explicitly asked to change it 26 | 27 | ## Important Guidelines 28 | 29 | - **Preserve existing environment variables** unless adding new ones 30 | - **Maintain the application's core purpose** unless asked to change it 31 | - **Follow the existing code style** and patterns 32 | - **Add proper error handling** where it improves robustness 33 | - **Use appropriate logging** for debugging and monitoring 34 | - **Follow Python best practices** 35 | - **Ensure all dependencies are in requirements.txt** 36 | - **Document significant changes** with comments if helpful 37 | 38 | ## Code Location 39 | The application files are in: `{app_directory}` 40 | 41 | ## Expected Output 42 | 43 | After making changes: 44 | 1. Explain what you changed and why 45 | 2. List any new environment variables added 46 | 3. Note any new dependencies required 47 | 4. Highlight any breaking changes or important considerations 48 | 5. Suggest any follow-up improvements if relevant 49 | 50 | Remember: The goal is to fulfill the user's request while maintaining stability and compatibility with the existing Quix deployment. -------------------------------------------------------------------------------- /resources/python/sources/postgres_cdc/README.md: -------------------------------------------------------------------------------- 1 | # PostgreSQL CDC 2 | 3 | [This connector](https://github.com/quixio/quix-samples/tree/main/python/sources/postgres_cdc) demonstrates how to capture changes to a PostgreSQL database table (using CDC) and publish the change events to a Kafka topic. 4 | 5 | ## How to run 6 | 7 | Create a [Quix](https://portal.platform.quix.io/signup?xlink=github) account or log-in and visit the `Connectors` tab to use this connector. 8 | 9 | Clicking `Set up connector` allows you to enter your connection details and runtime parameters. 10 | 11 | Then either: 12 | * click `Test connection & deploy` to deploy the pre-built and configured container into Quix. 13 | 14 | * or click `Customise connector` to inspect or alter the code before deployment. 15 | 16 | ## Environment variables 17 | 18 | The connector uses the following environment variables: 19 | 20 | - **output**: Name of the output topic to write into. 21 | - **PG_HOST**: The IP address or fully qualified domain name of your server. 22 | - **PG_PORT**: The Port number to use for communication with the server. 23 | - **PG_DATABASE**: The name of the database for CDC. 24 | - **PG_USER**: The username of the sink should use to interact with the database. 25 | - **PG_PASSWORD**: The password for the user configured above. 26 | - **PG_SCHEMA**: The name of the schema for CDC. 27 | - **PG_TABLE**: The name of the table for CDC. 28 | 29 | ## Requirements / Prerequisites 30 | 31 | - A Postgres Database. 32 | - Set `wal_level = logical` in `postgresql.conf`. 33 | 34 | ## Contribute 35 | 36 | Submit forked projects to the Quix [GitHub](https://github.com/quixio/quix-samples) repo. Any new project that we accept will be attributed to you and you'll receive $200 in Quix credit. 37 | 38 | ## Open source 39 | 40 | This project is open source under the Apache 2.0 license and available in our [GitHub](https://github.com/quixio/quix-samples) repo. 41 | 42 | Please star us and mention us on social to show your appreciation. 43 | -------------------------------------------------------------------------------- /prompts/diagnose/diagnose_app_analysis.md: -------------------------------------------------------------------------------- 1 | Application Analysis 2 | 3 | 4 | You are analyzing an existing application to help the user understand what it does and identify potential issues or improvements. 5 | 6 | 7 | 8 | - **Application Name:** {app_name} 9 | - **Application ID:** {app_id} 10 | - **Workspace:** {workspace_id} 11 | 12 | 13 | 14 | The application files are located in: `{app_directory}` 15 | 16 | Please analyze the following files: 17 | {file_list} 18 | 19 | 20 | 21 | 1. **Examine the main application file** (typically `app.py` or `main.py`) 22 | 2. **Review the configuration** in `app.yaml` to understand: 23 | - Environment variables and their purposes 24 | - Dependencies listed in requirements 25 | - The application's input/output topics 26 | 3. **Identify the application's purpose** based on: 27 | - Code structure and logic 28 | - External systems it connects to 29 | - Data transformations it performs 30 | 4. **Note any potential issues** such as: 31 | - Missing error handling 32 | - Hardcoded values that should be variables 33 | - Deprecated libraries or patterns 34 | - Performance bottlenecks 35 | 36 | 37 | 38 | Provide a concise summary in the following format: 39 | 40 | ### Application Summary 41 | [2-3 sentences describing what this application does] 42 | 43 | ### Key Components 44 | - **Input:** [What data it reads and from where] 45 | - **Processing:** [Main operations/transformations] 46 | - **Output:** [What it produces and where it sends it] 47 | 48 | ### Configuration 49 | - **Environment Variables:** [List key variables and their purposes] 50 | - **Dependencies:** [Notable libraries used] 51 | 52 | ### Observations 53 | [Any issues, improvements, or notable patterns you've identified] 54 | 55 | ### Recommendation 56 | [Brief suggestion on whether to run as-is, fix issues first, or provide more context] 57 | -------------------------------------------------------------------------------- /resources/python/others/opc_ua_server/README.md: -------------------------------------------------------------------------------- 1 | # OPC UA Server 2 | 3 | [This code](https://github.com/quixio/quix-samples/tree/main/python/others/opc_ua_server) is a demo OPC UA server designed to help you start integrating your on site OPC UA servers with Quix. 4 | 5 | ## How to run 6 | 7 | Create a [Quix](https://portal.platform.quix.io/signup?xlink=github) account or log-in and visit the `Templates` tab to use this connector. 8 | 9 | Clicking `Deploy` allows you to deploy the server to your Quix instance. 10 | Clicking `Preview code` allows you to view or save the code to the repo that backs your Quix cloud instance. 11 | 12 | ## Usage and Config 13 | 14 | Upon deployment the following YAML will be added to your environments YAML file: 15 | 16 | ``` 17 | - name: Demo OPC UA Server 18 | application: OPCServer 19 | version: latest 20 | deploymentType: Service 21 | resources: 22 | cpu: 200 23 | memory: 500 24 | replicas: 1 25 | network: 26 | serviceName: intopcserver 27 | ports: 28 | - port: 4840 29 | targetPort: 4840 30 | ``` 31 | 32 | The server does not require any configuration. 33 | However, you should note that when deployed within Quix, the following YAML settings are worth knowing about. 34 | 35 | * network - These network setting will allow the server to be accessed on the specified port *within* the Quix server. They do not enable access from the internet. 36 | * service name and port - these will be used by the client to access the server, again, within the Quix backend network. 37 | 38 | ## Contribute 39 | 40 | Submit forked projects to the Quix [GitHub](https://github.com/quixio/quix-samples) repo. Any new project that we accept will be attributed to you and you'll receive $200 in Quix credit. 41 | 42 | ## Open source 43 | 44 | This project is open source under the Apache 2.0 license and available in our [GitHub](https://github.com/quixio/quix-samples) repo. 45 | 46 | Please star us and mention us on social to show your appreciation. 47 | -------------------------------------------------------------------------------- /workflow_tools/__init__.py: -------------------------------------------------------------------------------- 1 | # workflow_tools/__init__.py - Package initialization with organized structure 2 | 3 | # Core components (keep at root level) 4 | from .contexts import WorkflowContext 5 | from .common import printer, workflow_logger 6 | from .workflow_types import WorkflowType, WorkflowInfo 7 | from .service_container import ServiceContainer, get_global_container 8 | from .workflow_factory import WorkflowFactory 9 | 10 | # Import from organized subfolders 11 | from .core.triage_agent import TriageAgent 12 | from .core.placeholder_workflows import PlaceholderWorkflowFactory 13 | from .core.prompt_manager import load_agent_instructions, load_task_prompt 14 | from .integrations.credentials_parser import CredentialsParser 15 | 16 | # Import all phases from the phases module 17 | from .phases import ( 18 | SinkPrerequisitesCollectionPhase, 19 | SourcePrerequisitesCollectionPhase, 20 | SinkSchemaPhase, 21 | SinkKnowledgePhase, 22 | SinkGenerationPhase, 23 | SinkSandboxPhase, 24 | SourceKnowledgePhase, 25 | SourceConnectionTestingPhase, 26 | SourceSchemaPhase, 27 | SourceGenerationPhase, 28 | SourceSandboxPhase, 29 | DeploymentPhase, 30 | MonitoringPhase, 31 | ) 32 | 33 | __all__ = [ 34 | 'WorkflowContext', 35 | 'printer', 36 | 'workflow_logger', 37 | 'WorkflowType', 38 | 'WorkflowInfo', 39 | 'ServiceContainer', 40 | 'get_global_container', 41 | 'WorkflowFactory', 42 | 'TriageAgent', 43 | 'PlaceholderWorkflowFactory', 44 | 'CredentialsParser', 45 | 'load_agent_instructions', 46 | 'load_task_prompt', 47 | 'SinkPrerequisitesCollectionPhase', 48 | 'SourcePrerequisitesCollectionPhase', 49 | 'SinkSchemaPhase', 50 | 'SinkKnowledgePhase', 51 | 'SinkGenerationPhase', 52 | 'SinkSandboxPhase', 53 | 'SourceKnowledgePhase', 54 | 'SourceConnectionTestingPhase', 55 | 'SourceSchemaPhase', 56 | 'SourceGenerationPhase', 57 | 'SourceSandboxPhase', 58 | 'DeploymentPhase', 59 | 'MonitoringPhase', 60 | ] -------------------------------------------------------------------------------- /resources/python/sources/redis_source/library.json: -------------------------------------------------------------------------------- 1 | { 2 | "libraryItemId": "redis-source", 3 | "name": "Redis Source", 4 | "language": "Python", 5 | "tags": { 6 | "Pipeline Stage": ["Source"], 7 | "Type": ["Connectors"], 8 | "Category": ["Cache"] 9 | }, 10 | "shortDescription": "Periodically query a Redis database and publish the results to a Kafka topic.", 11 | "DefaultFile": "main.py", 12 | "EntryPoint": "dockerfile", 13 | "RunEntryPoint": "main.py", 14 | "IconFile": "icon.png", 15 | "Variables": [ 16 | { 17 | "Name": "output", 18 | "Type": "EnvironmentVariable", 19 | "InputType": "OutputTopic", 20 | "Description": "This is the Quix topic that will receive the stream", 21 | "DefaultValue": "output", 22 | "Required": true 23 | }, 24 | { 25 | "Name": "redis_host", 26 | "Type": "EnvironmentVariable", 27 | "InputType": "FreeText", 28 | "Description": "Host address for the Redis instance", 29 | "DefaultValue": "", 30 | "Required": true 31 | }, 32 | { 33 | "Name": "redis_port", 34 | "Type": "EnvironmentVariable", 35 | "InputType": "FreeText", 36 | "Description": "Port for the Redis instance", 37 | "DefaultValue": "6379", 38 | "Required": true 39 | }, 40 | { 41 | "Name": "redis_password", 42 | "Type": "EnvironmentVariable", 43 | "InputType": "FreeText", 44 | "Description": "Password for the Redis instance", 45 | "DefaultValue": "", 46 | "Required": false 47 | }, 48 | { 49 | "Name": "redis_username", 50 | "Type": "EnvironmentVariable", 51 | "InputType": "FreeText", 52 | "Description": "Username for the Redis instance", 53 | "DefaultValue": "", 54 | "Required": false 55 | } 56 | ], 57 | "DeploySettings": { 58 | "DeploymentType": "Service", 59 | "CpuMillicores": 200, 60 | "MemoryInMb": 500, 61 | "Replicas": 1, 62 | "PublicAccess": false, 63 | "ValidateConnection": true 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /resources/python/destinations/big_query/README.md: -------------------------------------------------------------------------------- 1 | # BigQuery 2 | 3 | [This connector](https://github.com/quixio/quix-samples/tree/main/python/destinations/big_query) is used to stream data from Quix to a BigQuery data warehouse. It handles both parameter and event data. 4 | 5 | ## How to run 6 | 7 | Create a [Quix](https://portal.platform.quix.io/signup?xlink=github) account or log-in and visit the `Connectors` tab to use this connector. 8 | 9 | Clicking `Set up connector` allows you to enter your connection details and runtime parameters. 10 | 11 | Then either: 12 | * click `Test connection & deploy` to deploy the pre-built and configured container into Quix. 13 | 14 | * or click `Customise connector` to inspect or alter the code before deployment. 15 | 16 | ## Environment variables 17 | 18 | The connector uses the following environment variables: 19 | 20 | - **input**: Name of the input topic to read from. 21 | - **PROJECT_ID**: The BigQuery GCP Project ID. 22 | - **DATASET_ID**: The target Bigquery dataset ID. 23 | - **DATASET_LOCATION**: Location of BigQuery dataset. 24 | - **SERVICE_ACCOUNT_JSON**: The service account json string for the BigQuery GCP project. [Tutorial on how to create service account.](https://cloud.google.com/iam/docs/creating-managing-service-accounts#iam-service-accounts-create-console) 25 | - **MAX_QUEUE_SIZE**: Max queue size for the sink ingestion. 26 | 27 | ## Known limitations 28 | - BigQuery fails to immediately recognize new Schema changes such as adding a new field when streaming insert data. 29 | - BigQuery doesn't allow deleting data when streaming insert data. 30 | 31 | ## Contribute 32 | 33 | Submit forked projects to the Quix [GitHub](https://github.com/quixio/quix-samples) repo. Any new project that we accept will be attributed to you and you'll receive $200 in Quix credit. 34 | 35 | ## Open source 36 | 37 | This project is open source under the Apache 2.0 license and available in our [GitHub](https://github.com/quixio/quix-samples) repo. 38 | 39 | Please star us and mention us on social to show your appreciation. 40 | 41 | -------------------------------------------------------------------------------- /resources/python/destinations/postgres/main.py: -------------------------------------------------------------------------------- 1 | import os 2 | from quixstreams import Application 3 | from quixstreams.sinks.community.postgresql import ( 4 | PostgreSQLSink, 5 | PrimaryKeySetter, 6 | TableName, 7 | ) 8 | # Load environment variables from a .env file for local development 9 | from dotenv import load_dotenv 10 | load_dotenv() 11 | 12 | 13 | def _as_bool(env_var: str, default="false") -> bool: 14 | return os.environ.get(env_var, default).lower() == "true" 15 | 16 | 17 | def _as_iterable(env_var) -> list[str]: 18 | return keys.split(",") if (keys := os.environ.get(env_var)) else [] 19 | 20 | 21 | # Potential Callables - can manually edit these to instead use your own callables. 22 | # --Required-- 23 | table_name: TableName = os.getenv("POSTGRES_TABLE", "default_table") 24 | # --Optional-- 25 | primary_key_columns: PrimaryKeySetter = _as_iterable("POSTGRES_PRIMARY_KEY_COLUMNS") 26 | 27 | 28 | # Initialize PostgreSQL Sink 29 | postgres_sink = PostgreSQLSink( 30 | host=os.environ["POSTGRES_HOST"], 31 | port=int(os.environ["POSTGRES_PORT"]), 32 | dbname=os.environ["POSTGRES_DBNAME"], 33 | user=os.environ["POSTGRES_USER"], 34 | password=os.environ["POSTGRES_PASSWORD"], 35 | table_name=table_name, 36 | schema_name=os.getenv("POSTGRES_SCHEMA", "public"), 37 | schema_auto_update=_as_bool("POSTGRES_SCHEMA_AUTO_UPDATE", "true"), 38 | primary_key_columns=primary_key_columns, 39 | upsert_on_primary_key=_as_bool("POSTGRES_UPSERT_ON_PRIMARY_KEY"), 40 | ) 41 | 42 | # Initialize the application 43 | app = Application( 44 | consumer_group=os.environ["CONSUMER_GROUP_NAME"], 45 | auto_offset_reset="earliest", 46 | commit_interval=float(os.environ.get("BATCH_TIMEOUT", "1")), 47 | commit_every=int(os.environ.get("BATCH_SIZE", "1000")) 48 | ) 49 | 50 | # Define the input topic 51 | input_topic = app.topic(os.environ["input"], key_deserializer="string") 52 | 53 | # Process and sink data 54 | sdf = app.dataframe(input_topic) 55 | sdf.sink(postgres_sink) 56 | 57 | if __name__ == "__main__": 58 | app.run(sdf) -------------------------------------------------------------------------------- /resources/python/transformations/hugging_face_model/library.json: -------------------------------------------------------------------------------- 1 | { 2 | "libraryItemId": "hugging-face-model", 3 | "name": "Hugging Face Model Transformation", 4 | "language": "Python", 5 | "tags": { 6 | "Complexity": ["Medium"], 7 | "Technology": ["Quix Streams"], 8 | "Pipeline Stage": ["Transformation"], 9 | "Popular Subjects": ["Sentiment Analysis", "Machine Learning"] 10 | }, 11 | "shortDescription": "Consume data from a topic, use an ML model to run inference on the data and publish the results to an output topic.", 12 | "DefaultFile": "hugging_face_model.py", 13 | "EntryPoint": "dockerfile", 14 | "RunEntryPoint": "main.py", 15 | "Variables": [ 16 | { 17 | "Name": "input", 18 | "Type": "EnvironmentVariable", 19 | "InputType": "InputTopic", 20 | "Description": "This is the raw data input topic", 21 | "DefaultValue": "", 22 | "Required": true 23 | }, 24 | { 25 | "Name": "output", 26 | "Type": "EnvironmentVariable", 27 | "InputType": "OutputTopic", 28 | "Description": "This is the output for the hugging face model score", 29 | "DefaultValue": "hugging-face-output", 30 | "Required": true 31 | }, 32 | { 33 | "Name": "HuggingFaceModel", 34 | "Type": "EnvironmentVariable", 35 | "InputType": "FreeText", 36 | "Description": "Name of the Hugging Face model to be used", 37 | "DefaultValue": "distilbert-base-uncased-finetuned-sst-2-english", 38 | "Required": true 39 | }, 40 | { 41 | "Name": "TextColumnName", 42 | "Type": "EnvironmentVariable", 43 | "InputType": "FreeText", 44 | "Description": "For the table structured input, specify name of the column where input text is located.", 45 | "DefaultValue": "text", 46 | "Required": false 47 | } 48 | ], 49 | "DeploySettings": { 50 | "DeploymentType": "Service", 51 | "CpuMillicores": 200, 52 | "MemoryInMb": 1000, 53 | "Replicas": 1, 54 | "PublicAccess": false, 55 | "ValidateConnection": false 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /resources/python/sources/http_api_sample/main.py: -------------------------------------------------------------------------------- 1 | import os 2 | from flask import Flask, request, Response, redirect 3 | from flasgger import Swagger 4 | from waitress import serve 5 | 6 | from setup_logging import get_logger 7 | 8 | # for local dev, load env vars from a .env file 9 | from dotenv import load_dotenv 10 | load_dotenv() 11 | 12 | logger = get_logger() 13 | 14 | app = Flask(__name__) 15 | 16 | app.config['SWAGGER'] = { 17 | 'title': 'HTTP API Source', 18 | 'description': 'Test your HTTP API with this Swagger interface. Send data and see it arrive in Quix.', 19 | 'uiversion': 3 20 | } 21 | 22 | swagger = Swagger(app) 23 | 24 | @app.route("/", methods=['GET']) 25 | def redirect_to_swagger(): 26 | return redirect("/apidocs/") 27 | 28 | @app.route("/data/", methods=['POST']) 29 | def post_data_without_key(): 30 | """ 31 | Post data without key 32 | --- 33 | parameters: 34 | - in: body 35 | name: body 36 | schema: 37 | type: object 38 | properties: 39 | some_value: 40 | type: string 41 | responses: 42 | 200: 43 | description: Data received successfully 44 | """ 45 | data = request.json 46 | 47 | logger.debug(f"{data}") 48 | 49 | # do something with your data here. 50 | # to see how to publish data to a Quix topic, see this connector: 51 | # https://github.com/quixio/quix-samples/tree/main/python/sources/http_source 52 | 53 | response = Response(status=200) 54 | response.headers.add('Access-Control-Allow-Origin', '*') 55 | 56 | return response 57 | 58 | 59 | if __name__ == '__main__': 60 | 61 | service_url = os.environ["Quix__Deployment__Network__PublicUrl"] 62 | 63 | print("=" * 60) 64 | print(" " * 20 + "CURL EXAMPLE") 65 | print("=" * 60) 66 | print( 67 | f""" 68 | curl -L -X POST \\ 69 | -H 'Content-Type: application/json' \\ 70 | -d '{{"key": "value"}}' \\ 71 | {service_url}/data 72 | """ 73 | ) 74 | print("=" * 60) 75 | 76 | serve(app, host="0.0.0.0", port=80) 77 | -------------------------------------------------------------------------------- /resources/python/destinations/confluent_kafka/library.json: -------------------------------------------------------------------------------- 1 | { 2 | "libraryItemId": "confluent-kafka-destination", 3 | "name": "Confluent Kafka Sink", 4 | "language": "Python", 5 | "tags": { 6 | "Pipeline Stage": ["Destination"], 7 | "Type": ["Connectors"], 8 | "Category": ["Data streaming"] 9 | }, 10 | "shortDescription": "Consume data from a Kafka topic in Quix and publish it to a topic in Confluent Cloud", 11 | "DefaultFile": "quix_function.py", 12 | "EntryPoint": "dockerfile", 13 | "RunEntryPoint": "main.py", 14 | "IconFile": "icon.png", 15 | "Variables": [ 16 | { 17 | "Name": "input", 18 | "Type": "EnvironmentVariable", 19 | "InputType": "InputTopic", 20 | "Description": "Name of the Quix input topic to listen to.", 21 | "DefaultValue": "", 22 | "Required": true 23 | }, 24 | { 25 | "Name": "kafka_topic", 26 | "Type": "EnvironmentVariable", 27 | "InputType": "FreeText", 28 | "Description": "Name of the Confluent Kafka topic to sink data to", 29 | "Required": true 30 | }, 31 | { 32 | "Name": "kafka_key", 33 | "Type": "EnvironmentVariable", 34 | "InputType": "Secret", 35 | "Description": "Obtained from the Confluent Kafka portal", 36 | "DefaultValue": "", 37 | "Required": true 38 | }, 39 | { 40 | "Name": "kafka_secret", 41 | "Type": "EnvironmentVariable", 42 | "InputType": "Secret", 43 | "Description": "Obtained from the Confluent Kafka portal", 44 | "DefaultValue": "", 45 | "Required": true 46 | }, 47 | { 48 | "Name": "kafka_broker_address", 49 | "Type": "EnvironmentVariable", 50 | "InputType": "FreeText", 51 | "Description": "Obtained from the Confluent Kafka portal", 52 | "DefaultValue": "", 53 | "Required": true 54 | } 55 | ], 56 | "DeploySettings": { 57 | "DeploymentType": "Service", 58 | "CpuMillicores": 200, 59 | "MemoryInMb": 200, 60 | "Replicas": 1, 61 | "PublicAccess": false, 62 | "ValidateConnection": true 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /resources/python/sources/postgres_cdc/postgres_helper.py: -------------------------------------------------------------------------------- 1 | import psycopg2 2 | import os 3 | 4 | def connect_postgres(): 5 | # Postgres Constants 6 | PG_HOST = os.environ["PG_HOST"] 7 | PG_PORT = os.environ["PG_PORT"] 8 | PG_USER = os.environ["PG_USER"] 9 | PG_PASSWORD = os.environ["PG_PASSWORD"] 10 | PG_DATABASE = os.environ["PG_DATABASE"] 11 | 12 | conn = psycopg2.connect( 13 | database = PG_DATABASE, user = PG_USER, password = PG_PASSWORD, host = PG_HOST, port = PG_PORT 14 | ) 15 | return conn 16 | 17 | 18 | def run_query(conn, query: str): 19 | cur = conn.cursor() 20 | cur.execute(query) 21 | conn.commit() 22 | cur.close() 23 | 24 | 25 | def create_logical_slot(slot_name: str): 26 | conn = connect_postgres() 27 | query = f''' 28 | SELECT pg_create_logical_replication_slot('{slot_name}', 'wal2json'); 29 | ''' 30 | try: 31 | run_query(conn, query) 32 | conn.close() 33 | 34 | except psycopg2.errors.DuplicateObject: 35 | print(f"Replication slot {slot_name} already exists.") 36 | conn.close() 37 | 38 | else: 39 | conn.close() 40 | 41 | 42 | def create_publication_on_table(publication_name: str, table_name: str): 43 | conn = connect_postgres() 44 | query = f''' 45 | CREATE PUBLICATION {publication_name} FOR TABLE {table_name}; 46 | ''' 47 | try: 48 | run_query(conn, query) 49 | conn.close() 50 | 51 | except psycopg2.errors.DuplicateObject: 52 | print(f"Publication {publication_name} already exists.") 53 | conn.close() 54 | 55 | except psycopg2.errors.UndefinedTable: 56 | print(f"{table_name} not found.") 57 | conn.close() 58 | raise 59 | 60 | else: 61 | conn.close() 62 | raise 63 | 64 | def get_changes(conn, slot_name: str): 65 | query = f''' 66 | SELECT data FROM pg_logical_slot_get_changes('{slot_name}', NULL, NULL); 67 | ''' 68 | cur = conn.cursor() 69 | cur.execute(query) 70 | records = cur.fetchall() 71 | cur.close() 72 | return records 73 | 74 | -------------------------------------------------------------------------------- /prompts/diagnose/diagnose_edit_code.md: -------------------------------------------------------------------------------- 1 | Code Update Instructions for Diagnose Workflow 2 | 3 | 4 | You are working with an existing application. The user wants to modify, enhance, or debug it. 5 | 6 | 7 | 8 | - **Application:** {app_name} 9 | - **User Request:** {user_requirements} 10 | - **Previous Analysis:** {app_analysis} 11 | 12 | 13 | 14 | {log_analysis} 15 | 16 | 17 | 18 | Based on the user's request, you need to: 19 | 20 | 1. **Understand the current implementation** and how it works 21 | 2. **Implement the requested changes**, which could be: 22 | - Bug fixes if issues were identified 23 | - New features or enhancements 24 | - Performance improvements 25 | - Code refactoring or cleanup 26 | - Adding better error handling or logging 27 | 3. **Ensure compatibility** with platform requirements 28 | 4. **Preserve existing functionality** unless explicitly asked to change it 29 | 30 | 31 | 32 | - **Preserve existing environment variables** unless adding new ones 33 | - **Maintain the application's core purpose** unless asked to change it 34 | - **Follow the existing code style** and patterns 35 | - **Add proper error handling** where it improves robustness 36 | - **Use appropriate logging** for debugging and monitoring 37 | - **Follow Python best practices** 38 | - **Ensure all dependencies are in requirements.txt** 39 | - **Document significant changes** with comments if helpful 40 | 41 | 42 | 43 | The application files are in: `{app_directory}` 44 | 45 | 46 | 47 | After making changes: 48 | 1. Explain what you changed and why 49 | 2. List any new environment variables added 50 | 3. Note any new dependencies required 51 | 4. Highlight any breaking changes or important considerations 52 | 5. Suggest any follow-up improvements if relevant 53 | 54 | 55 | 56 | The goal is to fulfill the user's request while maintaining stability and compatibility with the existing deployment. 57 | -------------------------------------------------------------------------------- /resources/python/sources/environment_source/library.json: -------------------------------------------------------------------------------- 1 | { 2 | "libraryItemId": "environment-source", 3 | "name": "Environment Source", 4 | "language": "Python", 5 | "tags": { 6 | "Pipeline Stage": ["Source"], 7 | "Type": ["Connectors"], 8 | "Category": ["Data streaming"] 9 | }, 10 | "shortDescription": "Consume data from a Kafka topic in another environment. Useful to mirror production data to dev environments.", 11 | "DefaultFile": "main.py", 12 | "EntryPoint": "dockerfile", 13 | "RunEntryPoint": "main.py", 14 | "IconFile": "icon.png", 15 | "Variables": [ 16 | { 17 | "Name": "topic", 18 | "Type": "EnvironmentVariable", 19 | "InputType": "OutputTopic", 20 | "Description": "The Quix topic that will be mirrored from the source environment.", 21 | "DefaultValue": "", 22 | "Required": false 23 | }, 24 | { 25 | "Name": "source_workspace_id", 26 | "Type": "EnvironmentVariable", 27 | "InputType": "FreeText", 28 | "Description": "Source workspace ID", 29 | "DefaultValue": "", 30 | "Required": true 31 | }, 32 | { 33 | "Name": "source_sdk_token", 34 | "Type": "EnvironmentVariable", 35 | "InputType": "Secret", 36 | "Description": "SDK token for source", 37 | "DefaultValue": "source_sdk_token_key", 38 | "Required": true 39 | }, 40 | { 41 | "Name": "consumer_group", 42 | "Type": "EnvironmentVariable", 43 | "InputType": "FreeText", 44 | "Description": "Kafka consumer group", 45 | "DefaultValue": "quix_environment_source", 46 | "Required": false 47 | }, 48 | { 49 | "Name": "auto_offset_reset", 50 | "Type": "EnvironmentVariable", 51 | "InputType": "FreeText", 52 | "Description": "Auto offset reset policy", 53 | "DefaultValue": "earliest", 54 | "Required": false 55 | } 56 | ], 57 | "DeploySettings": { 58 | "DeploymentType": "Service", 59 | "CpuMillicores": 200, 60 | "MemoryInMb": 500, 61 | "Replicas": 1, 62 | "PublicAccess": false, 63 | "ValidateConnection": true 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /resources/python/others/jupyterlab/Notebook.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 8, 6 | "id": "5e598c31-3b11-405b-acdc-c6665378e9ec", 7 | "metadata": {}, 8 | "outputs": [], 9 | "source": [ 10 | "topic=\"your-topic-name\"" 11 | ] 12 | }, 13 | { 14 | "cell_type": "code", 15 | "execution_count": null, 16 | "id": "eabc6a12-848f-4b30-a92b-ef49c85dcef7", 17 | "metadata": {}, 18 | "outputs": [], 19 | "source": [ 20 | "from quixstreams import Application\n", 21 | "from quixstreams.sinks.core.list import ListSink\n", 22 | "\n", 23 | "import os\n", 24 | "\n", 25 | "\n", 26 | "# Setup necessary objects\n", 27 | "app = Application(\n", 28 | " consumer_group=\"topic-query-v1\",\n", 29 | " auto_offset_reset=\"earliest\"\n", 30 | ")\n", 31 | "input_topic = app.topic(name=topic)\n", 32 | "sdf = app.dataframe(topic=input_topic)\n", 33 | "\n", 34 | "list_sink = ListSink() # sink will be a list-like object\n", 35 | "sdf.sink(list_sink)\n", 36 | "\n", 37 | "app.run(timeout=3.0, count=100)" 38 | ] 39 | }, 40 | { 41 | "cell_type": "code", 42 | "execution_count": null, 43 | "id": "5c3437f0-aa8a-41a8-b85a-62a79fe712da", 44 | "metadata": {}, 45 | "outputs": [], 46 | "source": [ 47 | "import pandas as pd\n", 48 | "df = pd.DataFrame(list_sink)\n", 49 | "df.head(10)" 50 | ] 51 | }, 52 | { 53 | "cell_type": "code", 54 | "execution_count": null, 55 | "id": "d7ee9398-28d7-4934-90ba-076ec25f1ef0", 56 | "metadata": {}, 57 | "outputs": [], 58 | "source": [] 59 | } 60 | ], 61 | "metadata": { 62 | "kernelspec": { 63 | "display_name": "Python 3 (ipykernel)", 64 | "language": "python", 65 | "name": "python3" 66 | }, 67 | "language_info": { 68 | "codemirror_mode": { 69 | "name": "ipython", 70 | "version": 3 71 | }, 72 | "file_extension": ".py", 73 | "mimetype": "text/x-python", 74 | "name": "python", 75 | "nbconvert_exporter": "python", 76 | "pygments_lexer": "ipython3", 77 | "version": "3.11.12" 78 | } 79 | }, 80 | "nbformat": 4, 81 | "nbformat_minor": 5 82 | } 83 | -------------------------------------------------------------------------------- /resources/python/sources/segment_webhook/main.py: -------------------------------------------------------------------------------- 1 | from quixstreams import Application 2 | from flask import Flask, request 3 | from datetime import datetime 4 | from waitress import serve 5 | import os 6 | import json 7 | import hmac 8 | import hashlib 9 | 10 | # Load environment variables (useful when working locally) 11 | from dotenv import load_dotenv 12 | load_dotenv() 13 | 14 | # Create a Quix Application, this manages the connection to the Quix platform 15 | quix_app = Application() 16 | 17 | # Create the producer, this is used to write data to the output topic 18 | producer = quix_app.get_producer() 19 | 20 | # Check the output topic is configured 21 | output_topic_name = os.getenv("output", "") 22 | if output_topic_name == "": 23 | raise ValueError("output_topic environment variable is required") 24 | output_topic = quix_app.topic(output_topic_name) 25 | 26 | # create the flask app 27 | flask_app = Flask("Segment Webhook") 28 | 29 | # this is unauthenticated, anyone could post anything to you! 30 | @flask_app.route("/webhook", methods=['POST']) 31 | def webhook(): 32 | 33 | # get the shared secret from environment variables 34 | secret = os.environ["shared_secret"] 35 | # convert to a byte array 36 | secret_bytes = bytearray(secret, "utf-8") 37 | 38 | # get the signature from the headers 39 | header_sig = request.headers['x-signature'] 40 | 41 | # compute a hash-based message authentication code (HMAC) 42 | hex_digest = hmac.new(secret_bytes, request.get_data(), hashlib.sha1).hexdigest() 43 | 44 | # compare the HMAC to the header signature provided by Segment 45 | if(header_sig != hex_digest): 46 | # if they don't match its no bueno 47 | return "ERROR!", 401 48 | 49 | # if they do then publish to the topic 50 | producer.produce(topic=output_topic.name, 51 | key=str(request.json["type"]), 52 | value=json.dumps(request.json)) 53 | 54 | return "OK", 200 55 | 56 | 57 | print("CONNECTED!") 58 | 59 | # you can use flas_app.run for dev, but its not secure, stable or particularly efficient 60 | 61 | # use waitress instead for production 62 | serve(flask_app, host='0.0.0.0', port = 80) -------------------------------------------------------------------------------- /resources/python/sources/snowplow_source/library.json: -------------------------------------------------------------------------------- 1 | { 2 | "libraryItemId": "snowplow-source", 3 | "name": "Snowplow Source", 4 | "language": "Python", 5 | "tags": { 6 | "Type": ["Connectors"], 7 | "Pipeline Stage": ["Source"], 8 | "Category": ["SQL DB"] 9 | }, 10 | "shortDescription": "Read data from Snowplow and publish it to a Kafka topic.", 11 | "longDescription": "Use this connector to connect to Snowplow and publish your data to a Kafka topic .
This connector is for an AWS Kinesis backed Snowplow installation", 12 | "DefaultFile": "quix_functions.py", 13 | "EntryPoint": "dockerfile", 14 | "RunEntryPoint": "main.py", 15 | "IconFile": "icon.png", 16 | "Variables": [ 17 | { 18 | "Name": "output", 19 | "Type": "EnvironmentVariable", 20 | "InputType": "OutputTopic", 21 | "Description": "This is the Quix Topic that will receive the stream", 22 | "DefaultValue": "snowplow-data", 23 | "Required": true 24 | }, 25 | { 26 | "Name": "aws_access_key_id", 27 | "Type": "EnvironmentVariable", 28 | "InputType": "FreeText", 29 | "Description": "AWS Access Key Id", 30 | "DefaultValue": "", 31 | "Required": true 32 | }, 33 | { 34 | "Name": "aws_secret_access_key", 35 | "Type": "EnvironmentVariable", 36 | "InputType": "Secret", 37 | "Description": "AWS Secret Access Key", 38 | "DefaultValue": "", 39 | "Required": true 40 | }, 41 | { 42 | "Name": "aws_region_name", 43 | "Type": "EnvironmentVariable", 44 | "InputType": "FreeText", 45 | "Description": "AWS Region Name", 46 | "DefaultValue": "", 47 | "Required": true 48 | }, 49 | { 50 | "Name": "aws_stream_name", 51 | "Type": "EnvironmentVariable", 52 | "InputType": "FreeText", 53 | "Description": "The name of the AWS stream you want to use", 54 | "DefaultValue": "", 55 | "Required": true 56 | } 57 | ], 58 | "DeploySettings": { 59 | "DeploymentType": "Job", 60 | "CpuMillicores": 200, 61 | "MemoryInMb": 200, 62 | "Replicas": 1, 63 | "PublicAccess": false, 64 | "ValidateConnection": true 65 | } 66 | } 67 | --------------------------------------------------------------------------------