├── .gitignore
├── requirements.txt
├── src
├── __init__.py
├── logging_setup.py
├── main.py
├── config.py
├── state.py
├── api.py
└── sync.py
├── .dockerignore
├── config.sample.json
├── .env
├── docker-compose.yml
├── Dockerfile
├── .github
└── workflows
│ └── Package Version.yaml
├── README.md
└── install.sh
/.gitignore:
--------------------------------------------------------------------------------
1 | *.zip
2 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | requests==2.32.3
2 |
--------------------------------------------------------------------------------
/src/__init__.py:
--------------------------------------------------------------------------------
1 | # intentionally empty
2 |
3 |
--------------------------------------------------------------------------------
/.dockerignore:
--------------------------------------------------------------------------------
1 | .git
2 | __pycache__/
3 | *.pyc
4 | *.pyo
5 | *.log
6 | *.db
7 | data/
8 | config/
9 | .vscode/
10 | .idea/
11 | .DS_Store
12 |
--------------------------------------------------------------------------------
/config.sample.json:
--------------------------------------------------------------------------------
1 | {
2 | "central_server": {
3 | "url": "http://host.docker.internal:PORT/WEBPATH",
4 | "username": "username",
5 | "password": "password"
6 | },
7 | "nodes": [
8 | {
9 | "url": "http://IP:PORT/WEBPATH",
10 |
11 | "username": "username",
12 | "password": "password"
13 | }
14 | ]
15 | }
16 |
--------------------------------------------------------------------------------
/.env:
--------------------------------------------------------------------------------
1 | # Interval (in minutes) between each sync cycle
2 | SYNC_INTERVAL_MINUTES=5
3 |
4 | # Network settings for APIManager
5 | NET_PARALLEL_NODE_CALLS=true # Enable parallel API calls to nodes
6 | NET_MAX_WORKERS=12 # Maximum number of parallel workers
7 | NET_REQUEST_TIMEOUT=15 # Request timeout in seconds
8 | NET_CONNECT_POOL_SIZE=100 # Connection pool size for HTTP requests
9 | NET_VALIDATE_TTL_SECONDS=180 # Session validation TTL in seconds
10 |
11 | # Database settings (SQLite PRAGMA)
12 | DB_WAL=1 # Enable Write-Ahead Logging (WAL) mode
13 | DB_SYNCHRONOUS=NORMAL # Synchronous mode: FULL | NORMAL | OFF
14 | DB_CACHE_SIZE_MB=64 # SQLite cache
15 |
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | services:
2 | dds-nodex:
3 | build: .
4 | image: dds-nodex:prod
5 | container_name: dds-nodex
6 | restart: unless-stopped
7 | environment:
8 | LOG_LEVEL: ${LOG_LEVEL:-INFO}
9 | SYNC_INTERVAL_MINUTES: ${SYNC_INTERVAL_MINUTES:-1}
10 | REQUEST_TIMEOUT: ${REQUEST_TIMEOUT:-10}
11 | DATA_DIR: /app/data
12 | DB_FILE: /app/data/traffic_state.db
13 | ENABLE_FILE_LOG: ${ENABLE_FILE_LOG:-0}
14 | CONFIG_FILE: /app/config/config.json
15 | HEALTH_MAX_AGE: ${HEALTH_MAX_AGE:-180}
16 | volumes:
17 | - /var/lib/dds-nodex/data:/app/data
18 | - /var/lib/dds-nodex/config:/app/config:ro
19 | read_only: true
20 | tmpfs: [ "/tmp" ]
21 | security_opt: [ "no-new-privileges:true" ]
22 | cap_drop: [ "ALL" ]
23 | extra_hosts:
24 | - "host.docker.internal:host-gateway"
25 |
--------------------------------------------------------------------------------
/src/logging_setup.py:
--------------------------------------------------------------------------------
1 | import logging, os
2 | from logging.handlers import RotatingFileHandler
3 |
4 | def setup_logging(data_dir: str, level: str = "INFO"):
5 | log_level = getattr(logging, (level or "INFO").upper(), logging.INFO)
6 | logger = logging.getLogger()
7 | logger.setLevel(log_level)
8 |
9 | # Always log to stdout (container standard)
10 | sh = logging.StreamHandler()
11 | sh.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s"))
12 | logger.handlers = [sh]
13 |
14 | # File logging only if needed
15 | if os.getenv("ENABLE_FILE_LOG", "0") == "1":
16 | os.makedirs(data_dir, exist_ok=True)
17 | log_path = os.path.join(data_dir, "sync.log")
18 | fh = RotatingFileHandler(
19 | filename=log_path, maxBytes=10 * 1024 * 1024, backupCount=5, encoding="utf-8"
20 | )
21 | fh.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s"))
22 | logger.addHandler(fh)
23 |
24 | return logger
25 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.12-slim
2 |
3 | # Install minimal tools and tini for proper signal/zombie handling
4 | RUN apt-get update && apt-get install -y --no-install-recommends \
5 | ca-certificates tini \
6 | && rm -rf /var/lib/apt/lists/*
7 |
8 | # Create a non-root user for running the application
9 | RUN useradd -m -u 10001 app
10 | WORKDIR /app
11 |
12 | # Install Python dependencies
13 | COPY requirements.txt /app/
14 | RUN pip install --no-cache-dir -r /app/requirements.txt
15 |
16 | # Copy application source code
17 | COPY src/ /app/src/
18 |
19 | # Create config and data directories, set ownership to app user
20 | RUN mkdir -p /app/config /app/data && chown -R app:app /app
21 |
22 | # Set environment variables
23 | ENV PYTHONUNBUFFERED=1 \
24 | LOG_LEVEL=INFO \
25 | HEALTH_MAX_AGE=180 \
26 | CONFIG_FILE=/app/config/config.json \
27 | DATA_DIR=/app/data \
28 | DB_FILE=/app/data/traffic_state.db \
29 | ENABLE_FILE_LOG=0
30 |
31 | # Healthcheck: .heartbeat file must be newer than HEALTH_MAX_AGE seconds
32 | HEALTHCHECK --interval=30s --timeout=3s --retries=3 CMD \
33 | python -c "import os,sys,time; p=os.path.join(os.getenv('DATA_DIR','/app/data'),'.heartbeat'); mx=int(os.getenv('HEALTH_MAX_AGE','180')); sys.exit(0 if (os.path.exists(p) and (time.time()-os.path.getmtime(p) < mx)) else 1)"
34 |
35 | # Run securely as the app user
36 | USER app
37 | ENTRYPOINT ["/usr/bin/tini","--"]
38 | CMD ["python","-m","src.main"]
39 |
--------------------------------------------------------------------------------
/.github/workflows/Package Version.yaml:
--------------------------------------------------------------------------------
1 | name: Package Version
2 |
3 | on:
4 | workflow_dispatch:
5 | inputs:
6 | version:
7 | description: "Version folder to package (e.g. v1.3)"
8 | required: true
9 | type: string
10 |
11 | permissions:
12 | contents: write # برای ساخت/آپلود Release
13 |
14 | jobs:
15 | zip-and-release:
16 | runs-on: ubuntu-latest
17 |
18 | steps:
19 | - name: Checkout
20 | uses: actions/checkout@v4
21 | with:
22 | fetch-depth: 0
23 |
24 | - name: Validate folder structure
25 | run: |
26 | set -e
27 | VERSION="${{ github.event.inputs.version }}"
28 | test -d "$VERSION/dds-nodex-${VERSION}" || {
29 | echo "Expected folder: ${VERSION}/dds-nodex-${VERSION}/ not found."
30 | exit 1
31 | }
32 | echo "Ok: found ${VERSION}/dds-nodex-${VERSION}/"
33 |
34 | - name: Create ZIP with contents (no extra folder)
35 | run: |
36 | set -e
37 | VERSION="${{ github.event.inputs.version }}"
38 | cd "$VERSION/dds-nodex-${VERSION}"
39 | zip -r "../${VERSION}.zip" .
40 | ls -lah ..
41 |
42 | - name: Create or update GitHub Release and upload asset
43 | uses: softprops/action-gh-release@v2
44 | with:
45 | tag_name: ${{ github.event.inputs.version }}
46 | name: ${{ github.event.inputs.version }}
47 | draft: false
48 | prerelease: false
49 | files: |
50 | ${{ github.event.inputs.version }}/${{ github.event.inputs.version }}.zip
51 | env:
52 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
53 |
--------------------------------------------------------------------------------
/src/main.py:
--------------------------------------------------------------------------------
1 | import os
2 | import time
3 | import signal
4 | import shutil
5 | from .logging_setup import setup_logging
6 | from .config import ConfigManager
7 | from .state import TrafficStateManager
8 | from .api import APIManager
9 | from .sync import SyncManager
10 |
11 | HEARTBEAT_FILE = ".heartbeat"
12 |
13 | def migrate_db_if_needed(logger, new_db_path, legacy_candidates):
14 | """If the new DB does not exist and one of the legacy paths exists, copy it (with wal/shm)."""
15 | if os.path.exists(new_db_path):
16 | return
17 | for old in legacy_candidates:
18 | if old and os.path.exists(old):
19 | logger.info(f"Migrating legacy SQLite DB from {old} -> {new_db_path}")
20 | os.makedirs(os.path.dirname(new_db_path), exist_ok=True)
21 | shutil.copy2(old, new_db_path)
22 | for suffix in ("-wal", "-shm"):
23 | old_side, new_side = old + suffix, new_db_path + suffix
24 | if os.path.exists(old_side):
25 | shutil.copy2(old_side, new_side)
26 | logger.info("DB migration completed.")
27 | return
28 |
29 | def write_heartbeat(path):
30 | try:
31 | with open(path, "w", encoding="utf-8") as f:
32 | f.write(str(int(time.time())))
33 | except Exception as e:
34 | # Log to root logger (stdout)
35 | import logging
36 | logging.error(f"Failed to write heartbeat: {e}")
37 |
38 | def main():
39 | data_dir = os.getenv("DATA_DIR", "/app/data")
40 | os.makedirs(data_dir, exist_ok=True)
41 |
42 | logger = setup_logging(data_dir, os.getenv("LOG_LEVEL","INFO"))
43 | logger.info("Starting sync-worker v0.1 (dockerized + safe patches)")
44 |
45 | cfg_file = os.getenv("CONFIG_FILE", "/app/config/config.json")
46 | config_manager = ConfigManager(config_file=cfg_file)
47 |
48 | # DB path: can be overridden by ENV
49 | db_path = os.getenv("DB_FILE", os.path.join(data_dir, "traffic_state.db"))
50 | legacy_dbs = [
51 | "/app/traffic_state.db",
52 | "/app/src/traffic_state.db",
53 | ]
54 | migrate_db_if_needed(logger, db_path, legacy_dbs)
55 |
56 | traffic_state_manager = TrafficStateManager(
57 | db_file=db_path,
58 | db_opts=config_manager.db()
59 | )
60 | api_manager = APIManager(net_opts=config_manager.net())
61 | sync_manager = SyncManager(api_manager, config_manager, traffic_state_manager)
62 |
63 | # interval from config (or ENV override)
64 | interval_min_env = os.getenv("SYNC_INTERVAL_MINUTES")
65 | if interval_min_env is not None:
66 | try:
67 | interval_sec = max(1, int(interval_min_env)) * 60
68 | except Exception:
69 | interval_sec = max(1, int(config_manager.get_interval())) * 60
70 | else:
71 | interval_sec = max(1, int(config_manager.get_interval())) * 60
72 |
73 | stop = {"flag": False}
74 | def _graceful(signum, frame):
75 | logger.info(f"Received signal {signum}; shutting down gracefully...")
76 | stop["flag"] = True
77 | signal.signal(signal.SIGINT, _graceful)
78 | signal.signal(signal.SIGTERM, _graceful)
79 |
80 | hb_path = os.path.join(data_dir, HEARTBEAT_FILE)
81 |
82 | while not stop["flag"]:
83 | # v0.1: heartbeat is always updated (service health is independent of cycle success)
84 | write_heartbeat(hb_path)
85 | try:
86 | logger.info("Starting sync cycle")
87 | sync_manager.sync_inbounds_and_clients()
88 | sync_manager.sync_traffic()
89 | logger.info("Sync cycle completed successfully")
90 | except Exception as e:
91 | logger.error(f"Sync cycle failed: {e}")
92 | time.sleep(interval_sec)
93 |
94 | logger.info("Exited cleanly.")
95 |
96 | if __name__ == "__main__":
97 | main()
98 |
99 |
--------------------------------------------------------------------------------
/src/config.py:
--------------------------------------------------------------------------------
1 | # src/config.py
2 | import os
3 | import json
4 | import logging
5 |
6 | def _parse_bool(val, default=False):
7 | if val is None:
8 | return default
9 | v = str(val).strip().lower()
10 | return v in ("1", "true", "yes", "on")
11 |
12 | def _parse_int(val, default):
13 | if val is None:
14 | return default
15 | try:
16 | return int(str(val).strip())
17 | except Exception:
18 | return default
19 |
20 | class ConfigManager:
21 | # Used by main.py: config_file is passed in as an argument
22 | def __init__(self, config_file='config.json'):
23 | self.config_file = config_file
24 | self.config = self.load_config()
25 |
26 | def load_config(self):
27 | try:
28 | with open(self.config_file, 'r', encoding='utf-8') as f:
29 | config = json.load(f)
30 | if not config.get('central_server') or not config.get('nodes'):
31 | raise ValueError("Missing central_server or nodes in config")
32 |
33 | # --- Set default values if missing ---
34 | config.setdefault('sync_interval_minutes', 1)
35 | config.setdefault('net', {})
36 | config.setdefault('db', {})
37 | config['net'].setdefault('parallel_node_calls', True)
38 | config['net'].setdefault('max_workers', 8)
39 | config['net'].setdefault('request_timeout', 10)
40 | config['net'].setdefault('connect_pool_size', 50)
41 | # NEW: TTL for session validation
42 | config['net'].setdefault('validate_ttl_seconds', 60)
43 |
44 | config['db'].setdefault('wal', True)
45 | config['db'].setdefault('synchronous', 'NORMAL') # Options: FULL/NORMAL/OFF
46 | config['db'].setdefault('cache_size_mb', 20)
47 |
48 | # --- Override config values with environment variables ---
49 | # sync interval
50 | config['sync_interval_minutes'] = _parse_int(
51 | os.getenv("SYNC_INTERVAL_MINUTES"),
52 | config['sync_interval_minutes']
53 | )
54 |
55 | # network settings
56 | config['net']['parallel_node_calls'] = _parse_bool(
57 | os.getenv("NET_PARALLEL_NODE_CALLS"),
58 | config['net']['parallel_node_calls']
59 | )
60 | config['net']['max_workers'] = _parse_int(
61 | os.getenv("NET_MAX_WORKERS"),
62 | config['net']['max_workers']
63 | )
64 | config['net']['request_timeout'] = _parse_int(
65 | os.getenv("NET_REQUEST_TIMEOUT"),
66 | config['net']['request_timeout']
67 | )
68 | config['net']['connect_pool_size'] = _parse_int(
69 | os.getenv("NET_CONNECT_POOL_SIZE"),
70 | config['net']['connect_pool_size']
71 | )
72 | # NEW: TTL override from ENV
73 | config['net']['validate_ttl_seconds'] = _parse_int(
74 | os.getenv("NET_VALIDATE_TTL_SECONDS"),
75 | config['net']['validate_ttl_seconds']
76 | )
77 |
78 | # database settings
79 | db_wal_env = os.getenv("DB_WAL")
80 | if db_wal_env is not None:
81 | config['db']['wal'] = _parse_bool(db_wal_env, config['db']['wal'])
82 |
83 | db_sync_env = os.getenv("DB_SYNCHRONOUS")
84 | if db_sync_env is not None:
85 | sync_mode = str(db_sync_env).strip().upper()
86 | if sync_mode in ("FULL", "NORMAL", "OFF"):
87 | config['db']['synchronous'] = sync_mode
88 | else:
89 | logging.warning(f"Invalid DB_SYNCHRONOUS='{db_sync_env}', keeping '{config['db']['synchronous']}'")
90 |
91 | config['db']['cache_size_mb'] = _parse_int(
92 | os.getenv("DB_CACHE_SIZE_MB"),
93 | config['db']['cache_size_mb']
94 | )
95 |
96 | return config
97 |
98 | except FileNotFoundError:
99 | logging.error(f"Config file {self.config_file} not found")
100 | raise
101 | except json.JSONDecodeError:
102 | logging.error(f"Invalid JSON in {self.config_file}")
103 | raise
104 | except ValueError as e:
105 | logging.error(f"Config error: {e}")
106 | raise
107 |
108 | def get_central_server(self):
109 | return self.config.get('central_server', {})
110 |
111 | def get_nodes(self):
112 | return self.config.get('nodes', [])
113 |
114 | def get_interval(self):
115 | return self.config.get('sync_interval_minutes', 1)
116 |
117 | def net(self):
118 | return self.config.get('net', {})
119 |
120 | def db(self):
121 | return self.config.get('db', {})
122 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Nodex - Unofficial version of the 3X-ui Node
2 |
3 |
4 | همگامسازی کامل اینباندها و کلاینتها + همگامسازی ترافیک بین سرورها
5 | سبک، ایمن، داکرایز شده، و قابلتنظیم با فایل .env
6 |
7 |
8 | ## ⚡ نــــــودکس
9 |
10 |
11 |
12 | نودکس یک سرویس سبک و داکرشده است که بهعنوان «نود» برای پنل مرکزی 3X-UI عمل میکند. این سرویس:
13 |
14 | - اینباندها و کلاینتها را از **سرور مرکزی** به **نودها** سینک میکند.
15 | - مصرف ترافیک کلاینتها را بین چند نود **تجمیع و همگام** میکند.
16 | - با API رسمی پنل (مسیرهای `/panel/api/...`) کار میکند.
17 | - ذخیرهسازی حالت/ترافیک را داخل SQLite (با WAL) انجام میدهد.
18 |
19 |
20 |
21 | ## 🍀 ویژگیها
22 |
23 |
24 |
25 | - **سینک کامل اینباندها** (ایجاد/بهروزرسانی در نودها بر اساس پنل مرکزی)
26 | - **سینک کلاینتها** (حذف/افزودن/آپدیت مطابق با مرکزی)
27 | - **همگامسازی ترافیک**: جمعزدن مصرف از نودها و اعمال شمارندهٔ یکپارچه
28 | - **داکرایز کامل**: ایمیج کمحجم Python 3.12 + `tini` + اجرا بهصورت غیرروت
29 | - **ایمن بهصورت پیشفرض**: `read_only`، `no-new-privileges`، `cap_drop: ALL`، `tmpfs` برای `/tmp`
30 | - **Healthcheck داخلی**: مبتنی بر فایل `.heartbeat`
31 | - **تنظیمات ساده**: از طریق `config.json` و متغیرهای `.env`
32 | - **لاگگیری استاندارد** (Stdout) + **لاگ فایل** (اختیاری با `ENABLE_FILE_LOG=1`)
33 | - **موازیسازی درخواستها** به نودها (قابل کنترل با `NET_*`)
34 |
35 |
36 |
37 | ## 📁 ساختار و مسیرهای پیشفرض
38 |
39 |
40 |
41 | - دادهها و دیتابیس: `/var/lib/dds-nodex/data`
42 | - پیکربندی فقطخواندنی: `/var/lib/dds-nodex/config`
43 | - فایل پایگاهداده: `/var/lib/dds-nodex/data/traffic_state.db`
44 | - فایل کانفینگ: `/var/lib/dds-nodex/config/config.json`
45 |
46 | درون کانتینر:
47 |
48 | - `DATA_DIR=/app/data`
49 | - `DB_FILE=/app/data/traffic_state.db`
50 | - `CONFIG_FILE=/app/config/config.json`
51 |
52 |
53 |
54 | ## 🧩 فایلهای نمونه مهم
55 |
56 | ### 1) نمونهٔ کانفیگ (`config.sample.json`)
57 |
58 |
59 |
60 | یک کپی با نام `config.json` داخل مسیر پیکربندی بسازید:
61 |
62 |
63 |
64 | ```json
65 | {
66 | "central_server": {
67 | "url": "http://host.docker.internal:PORT/WEBPATH",
68 | "username": "username",
69 | "password": "password"
70 | },
71 | "nodes": [
72 | {
73 | "url": "http://IP:PORT/WEBPATH",
74 | "username": "username",
75 | "password": "password"
76 | }
77 | ]
78 | }
79 | ```
80 |
81 |
82 |
83 | **نکته**: حتما برای نود های خود از SSL استفاده کنید . عدم استفاده از HTTPS امنیت شما را به خطر می اندازد. مسیر پنل معمولاً شبیه `http://IP:PORT` یا `https://IP:PORT/panel` است. `WEBPATH` را مطابق پنل خود بگذارید.
84 |
85 | ### 2) متغیرهای محیطی (`.env`)
86 |
87 |
88 |
89 |
90 |
91 | ```env
92 | SYNC_INTERVAL_MINUTES=1
93 | # فاصله بین سیکلهای سینک (دقیقه)
94 |
95 |
96 | # شبکه/پرفورمنس
97 | NET_PARALLEL_NODE_CALLS=true
98 | NET_MAX_WORKERS=12
99 | NET_REQUEST_TIMEOUT=15
100 | NET_CONNECT_POOL_SIZE=100
101 | NET_VALIDATE_TTL_SECONDS=180
102 |
103 | # تنظیمات SQLite
104 | DB_WAL=1
105 | DB_SYNCHRONOUS=NORMAL # FULL | NORMAL | OFF
106 | DB_CACHE_SIZE_MB=64
107 |
108 | # لاگ فایل (غیرفعال = 0، فعال = 1)
109 | ENABLE_FILE_LOG=0
110 |
111 | # سطح لاگ
112 | LOG_LEVEL=INFO
113 |
114 | # سلامت سرویس (ثانیه): حداکثر سن .heartbeat
115 | HEALTH_MAX_AGE=180
116 | ```
117 |
118 |
119 |
120 | ### 3) داکر-کمپوز
121 |
122 |
123 |
124 | م.دکس با این Compose اجرا میشود (خلاصهٔ کانفیگ):
125 |
126 |
127 |
128 | ```yaml
129 | services:
130 | dds-nodex:
131 | build: .
132 | image: dds-nodex:prod
133 | container_name: dds-nodex
134 | restart: unless-stopped
135 | environment:
136 | LOG_LEVEL: ${LOG_LEVEL:-INFO}
137 | SYNC_INTERVAL_MINUTES: ${SYNC_INTERVAL_MINUTES:-1}
138 | REQUEST_TIMEOUT: ${REQUEST_TIMEOUT:-10}
139 | DATA_DIR: /app/data
140 | DB_FILE: /app/data/traffic_state.db
141 | ENABLE_FILE_LOG: ${ENABLE_FILE_LOG:-0}
142 | CONFIG_FILE: /app/config/config.json
143 | HEALTH_MAX_AGE: ${HEALTH_MAX_AGE:-180}
144 | volumes:
145 | - /var/lib/dds-nodex/data:/app/data
146 | - /var/lib/dds-nodex/config:/app/config:ro
147 | read_only: true
148 | tmpfs: ["/tmp"]
149 | security_opt: ["no-new-privileges:true"]
150 | cap_drop: ["ALL"]
151 | extra_hosts:
152 | - "host.docker.internal:host-gateway"
153 | ```
154 |
155 |
156 |
157 | ## 🔌 شروع به کار
158 |
159 |
160 |
161 | ### برای نصب نسخه های مختلف از این اسکریپت استفاده کنید
162 |
163 |
164 |
165 | ```bash
166 | curl -sSL https://raw.githubusercontent.com/azavaxhuman/Nodex/refs/heads/main/install.sh -o install.sh && chmod +x install.sh && ./install.sh
167 | ```
168 |
169 | #### پس از اجرا شما هر بار میتوانید با دستور dds-nodex منو را فراخوانی کنید.
170 |
171 | ## ❤️ حمایت مالی (Donate)
172 |
173 | اگر Nodex برای شما مفید بود، میتوانید از پروژه حمایت کنید:
174 |
175 | | Cryptocurrency | Wallet Address |
176 | | ----------------- | ------------------------------------------ |
177 | | USDT (BEP20) | 0xFA231ce9128AC097F70F5efcfFb3d918645e1Ca9 |
178 | | DogeCoin (DOGE) | DRXjceAoxBRzNsNgVR3GduPSau4xiv179y |
179 | | TRON (TRX-TRC20 ) | TJWnK1fCcxwsemyYgYjebKnsBfofCFy3Pc |
180 |
181 | ## 🧠 نحوهٔ کار (High-level)
182 |
183 |
184 |
185 | 1. **لوگین به پنل مرکزی و نودها** (Sessionهای پایدار با TTL)
186 | 2. **دریافت لیست اینباندها از مرکزی** → اعمال روی نودها (افزودن/بهروزرسانی در صورت تغییر)
187 | 3. **سینک کلاینتها داخل هر اینباند** (ایجاد/حذف/بهروزرسانی)
188 | 4. **ترافیک**: Nodex مجموع مصرف کلاینت را از نودها جمع میکند و بهصورت کل واحد برای کلاینت نگه میدارد و روی همهٔ نودها همگام میکند.
189 | 5. **SQLite + WAL**: ذخیرهٔ حالت/مصرف با قفلگذاری Thread-safe
190 | 6. **Healthcheck** بر پایهٔ تازه بودن فایل `.heartbeat` نسبت به `HEALTH_MAX_AGE`
191 |
192 |
193 |
194 | ## 🛡️ نکات امنیتی
195 |
196 |
197 |
198 | - `read_only: true` + `no-new-privileges` + `cap_drop: ALL`
199 | - کانفیگ بهصورت فقطخواندنی در کانتینر (`/app/config:ro`)
200 |
201 | **پیشنهاد**: Nodex را پشت یک شبکهٔ داخلی Docker یا سرور داخل همان شبکه/دیتاسنتر با پنلها قرار دهید.
202 |
203 |
204 |
205 | ## 🔍 عیبیابی و لاگها
206 |
207 |
208 |
209 | ### مشاهدهٔ لاگها:
210 |
211 |
212 |
213 | ```bash
214 | dds-nodex --logs
215 | ```
216 |
217 | یا از طریق اجرای دستور dds-nodex و انتخاب مشاهده ی لاگ ها از منو میتوانید لاگ هارا به صورت زنده مشاهده کنید.
218 |
219 |
220 |
221 | ### فعال کردن لاگ فایل:
222 |
223 | داخل `.env` مقدار `ENABLE_FILE_LOG=1`
224 | مسیر لاگ: `/var/lib/dds-nodex/data/sync.log`
225 |
226 | ### مشکلات رایج:
227 |
228 | - **401/403**: نام کاربری/رمز یا مسیر `WEBPATH` اشتباه است.
229 | - **Timeout**: `NET_REQUEST_TIMEOUT` را بالا ببرید یا اتصال بین Nodex و سرورها را بررسی کنید.
230 | - **Healthcheck Fail**: مقدار `HEALTH_MAX_AGE` کم است یا سینک پیدرپی شکست میخورد (لاگ را چک کنید).
231 | - **DB Lock**: فضای دیسک/مجوز پوشهٔ `data` را بررسی کنید.
232 |
233 |
234 |
235 | ## ❓ پرسشهای متداول
236 |
237 | ### آیا Nodex دادهٔ اصلی پنل را تغییر میدهد؟
238 |
239 | خیر؛ Nodex با API کار میکند و تغییرات را در سطح اینباند/کلاینتهای نودها اعمال میکند.
240 |
241 | ### چند نود همزمان پشتیبانی میشود؟
242 |
243 | بهصورت پیشفرض موازیسازی فعال است؛ با `NET_MAX_WORKERS` میتوانید متناسب با منابع افزایش/کاهش دهید.
244 |
245 | ### اگر DB قدیمی دارم؟
246 |
247 | Nodex در شروع، در صورت وجود DB قدیمی در مسیرهای قدیمی، آنرا به مسیر جدید مهاجرت میدهد (بههمراه wal/shm).
248 |
249 | ## 📜 لایسنس
250 |
251 | این پروژه تحت مجوزی منتشر شده که در فایل `LICENSE` آمده است (در صورت عدم وجود، لطفاً مجوز مدنظرتان را اضافه کنید).
252 |
253 | ## 🧾 تغییرات (Changelog)
254 |
255 | ### v1.3
256 |
257 | - بهینهسازی موازیسازی درخواستها
258 | - Healthcheck مبتنی بر `.heartbeat`
259 | - PRAGMAهای SQLite قابل تنظیم
260 | - بهبود لاگینگ
261 |
--------------------------------------------------------------------------------
/src/state.py:
--------------------------------------------------------------------------------
1 | import sqlite3
2 | import threading
3 | import time
4 | import logging
5 |
6 | class TrafficStateManager:
7 | def __init__(self, db_file='traffic_state.db', db_opts=None):
8 | self.db_file = db_file
9 | self.lock = threading.Lock()
10 | self.conn = sqlite3.connect(self.db_file, check_same_thread=False, isolation_level=None)
11 | self.conn.execute("PRAGMA foreign_keys=ON;")
12 | # PRAGMAs
13 | if db_opts:
14 | if db_opts.get('wal', True):
15 | self.conn.execute("PRAGMA journal_mode=WAL;")
16 | sync_mode = db_opts.get('synchronous', 'NORMAL').upper()
17 | if sync_mode not in ('FULL', 'NORMAL', 'OFF'):
18 | sync_mode = 'NORMAL'
19 | self.conn.execute(f"PRAGMA synchronous={sync_mode};")
20 | cache_mb = int(db_opts.get('cache_size_mb', 20))
21 | self.conn.execute(f"PRAGMA cache_size=-{cache_mb * 1024};") # negative => KB
22 | self.conn.execute("PRAGMA temp_store=MEMORY;")
23 | self.init_db()
24 |
25 | def init_db(self):
26 | with self.lock, self.conn:
27 | c = self.conn.cursor()
28 | # مجموع کل کاربر در سیکل
29 | c.execute('''
30 | CREATE TABLE IF NOT EXISTS client_totals (
31 | email TEXT PRIMARY KEY,
32 | total_up INTEGER NOT NULL DEFAULT 0,
33 | total_down INTEGER NOT NULL DEFAULT 0,
34 | cycle_started_at INTEGER
35 | )
36 | ''')
37 | # baseline هر سرور (آخرین عدد کل نوشتهشده روی آن سرور)
38 | c.execute('''
39 | CREATE TABLE IF NOT EXISTS server_counters (
40 | email TEXT NOT NULL,
41 | server_url TEXT NOT NULL,
42 | last_up INTEGER NOT NULL DEFAULT 0,
43 | last_down INTEGER NOT NULL DEFAULT 0,
44 | PRIMARY KEY (email, server_url)
45 | )
46 | ''')
47 | # <<< جدید: مصرف انباشتهی هر نود از ابتدای سیکل جاری >>>
48 | c.execute('''
49 | CREATE TABLE IF NOT EXISTS node_totals (
50 | email TEXT NOT NULL,
51 | server_url TEXT NOT NULL,
52 | up_total INTEGER NOT NULL DEFAULT 0,
53 | down_total INTEGER NOT NULL DEFAULT 0,
54 | PRIMARY KEY (email, server_url)
55 | )
56 | ''')
57 | c.execute("CREATE INDEX IF NOT EXISTS idx_node_totals_email ON node_totals(email)")
58 |
59 | # ---- total getters/setters ----
60 | def get_total(self, email):
61 | with self.lock:
62 | row = self.conn.execute(
63 | "SELECT total_up,total_down FROM client_totals WHERE email=?", (email,)
64 | ).fetchone()
65 | return (row[0], row[1]) if row else (0, 0)
66 |
67 | def set_total(self, email, up, down):
68 | # idempotent write; only write if changed
69 | with self.lock, self.conn:
70 | row = self.conn.execute(
71 | "SELECT total_up,total_down FROM client_totals WHERE email=?", (email,)
72 | ).fetchone()
73 | if row and row[0] == up and row[1] == down:
74 | return False # no change
75 | self.conn.execute("""
76 | INSERT INTO client_totals(email,total_up,total_down,cycle_started_at)
77 | VALUES(?,?,?,COALESCE((SELECT cycle_started_at FROM client_totals WHERE email=?), NULL))
78 | ON CONFLICT(email) DO UPDATE
79 | SET total_up=excluded.total_up, total_down=excluded.total_down
80 | """, (email, up, down, email))
81 | return True
82 |
83 | def set_cycle_started_at(self, email, ts):
84 | with self.lock, self.conn:
85 | self.conn.execute("""
86 | INSERT INTO client_totals(email,total_up,total_down,cycle_started_at)
87 | VALUES(?,?,?,?)
88 | ON CONFLICT(email) DO UPDATE SET cycle_started_at=excluded.cycle_started_at
89 | """, (email, 0, 0, ts))
90 |
91 | # ---- per-server baseline getters/setters ----
92 | def get_last_counter(self, email, server_url):
93 | with self.lock:
94 | row = self.conn.execute("""
95 | SELECT last_up,last_down FROM server_counters
96 | WHERE email=? AND server_url=?
97 | """, (email, server_url)).fetchone()
98 | return (row[0], row[1]) if row else None
99 |
100 | def set_last_counter(self, email, server_url, up, down):
101 | with self.lock, self.conn:
102 | # only write if changed
103 | row = self.conn.execute("""
104 | SELECT last_up,last_down FROM server_counters
105 | WHERE email=? AND server_url=?
106 | """, (email, server_url)).fetchone()
107 | if row and row[0] == up and row[1] == down:
108 | return False
109 | self.conn.execute("""
110 | INSERT INTO server_counters(email,server_url,last_up,last_down)
111 | VALUES(?,?,?,?)
112 | ON CONFLICT(email,server_url) DO UPDATE
113 | SET last_up=excluded.last_up, last_down=excluded.last_down
114 | """, (email, server_url, up, down))
115 | return True
116 |
117 | def set_last_counters_batch(self, email, items):
118 | # items: Iterable[(server_url, up, down)]
119 | with self.lock, self.conn:
120 | self.conn.executemany("""
121 | INSERT INTO server_counters(email,server_url,last_up,last_down)
122 | VALUES(?,?,?,?)
123 | ON CONFLICT(email,server_url) DO UPDATE
124 | SET last_up=excluded.last_up,last_down=excluded.last_down
125 | """, [(email, srv, up, down) for (srv, up, down) in items])
126 |
127 | # ---- per-node accumulation (جدید) ----
128 | def add_node_delta(self, email: str, server_url: str, du: int, dd: int) -> None:
129 | """انباشتن دلتاهای مصرف برای نود مشخص (از ابتدای سیکل جاری)."""
130 | if not du and not dd:
131 | return
132 | with self.lock, self.conn:
133 | self.conn.execute("""
134 | INSERT INTO node_totals(email, server_url, up_total, down_total)
135 | VALUES(?,?,?,?)
136 | ON CONFLICT(email, server_url) DO UPDATE SET
137 | up_total = node_totals.up_total + excluded.up_total,
138 | down_total= node_totals.down_total+ excluded.down_total
139 | """, (email, server_url, int(du or 0), int(dd or 0)))
140 |
141 | def reset_node_totals(self, email: str) -> None:
142 | """در شروع سیکل جدید، per-node مربوط به کاربر را صفر میکند."""
143 | with self.lock, self.conn:
144 | self.conn.execute("DELETE FROM node_totals WHERE email=?", (email,))
145 |
146 | def reset_cycle(self, email, currents_by_server, central_url):
147 | """
148 | شروع سیکل جدید:
149 | - total کاربر را برابر مقدار فعلی سرور مرکزی میگذارد
150 | - baseline تمام سرورها را به مقدار فعلیشان تنظیم میکند
151 | - و per-node را صفر میکند (node_totals DELETE)
152 | """
153 | with self.lock, self.conn:
154 | now_ts = int(time.time())
155 | cup, cdown = currents_by_server.get(central_url, (0, 0))
156 | # صفر کردن per-node برای این کاربر
157 | self.conn.execute("DELETE FROM node_totals WHERE email=?", (email,))
158 | # ثبت total و زمان شروع سیکل
159 | self.conn.execute("""
160 | INSERT INTO client_totals(email,total_up,total_down,cycle_started_at)
161 | VALUES(?,?,?,?)
162 | ON CONFLICT(email) DO UPDATE
163 | SET total_up=excluded.total_up,total_down=excluded.total_down,cycle_started_at=excluded.cycle_started_at
164 | """, (email, cup, cdown, now_ts))
165 | # بهروز کردن baseline همهی سرورها
166 | self.conn.executemany("""
167 | INSERT INTO server_counters(email,server_url,last_up,last_down)
168 | VALUES(?,?,?,?)
169 | ON CONFLICT(email,server_url) DO UPDATE
170 | SET last_up=excluded.last_up,last_down=excluded.last_down
171 | """, [(email, srv, up, down) for srv, (up, down) in currents_by_server.items()])
172 | logging.info(f"Cycle reset for {email}: total set to central ({cup},{cdown}); baselines updated; node_totals cleared.")
173 |
--------------------------------------------------------------------------------
/src/api.py:
--------------------------------------------------------------------------------
1 | # src/api.py
2 | import os
3 | import time
4 | import json
5 | import logging
6 | import requests
7 | from urllib.parse import quote
8 |
9 | class APIManager:
10 | """
11 | APIManager main responsibilities:
12 | - Manages persistent sessions with TTL-based reuse
13 | - Handles request timeouts
14 | - URL-encodes sensitive fields like email and client_id
15 | """
16 |
17 | def __init__(self, net_opts=None):
18 | self.sessions = {} # Maps base_url to requests.Session
19 | self.net_opts = net_opts or {}
20 | self.timeout = int(self.net_opts.get("request_timeout", 10))
21 | # Tracks last successful validation timestamp for each base_url
22 | self._last_valid = {} # base_url -> timestamp
23 | self._validate_ttl = int(
24 | os.getenv("NET_VALIDATE_TTL_SECONDS", str(self.net_opts.get("validate_ttl_seconds", 60)))
25 | )
26 |
27 | # ---------------------- Session Management ----------------------
28 | def _get_session(self, base_url: str) -> requests.Session:
29 | """
30 | Returns a persistent session for the given base_url.
31 | Creates a new session if one does not exist.
32 | """
33 | base_url = base_url.rstrip("/")
34 | s = self.sessions.get(base_url)
35 | if s is None:
36 | s = requests.Session()
37 | s.headers.update({
38 | "User-Agent": "dds-sync-worker/0.1",
39 | "Accept": "application/json, text/plain, */*",
40 | })
41 | self.sessions[base_url] = s
42 | return s
43 |
44 | def _validate_session(self, base: str, s: requests.Session) -> bool:
45 | """
46 | Validates the session for the given base_url using TTL:
47 | - If last validation was within TTL, returns True.
48 | - Otherwise, performs a GET request to /panel/api/inbounds/list and checks for success.
49 | """
50 | now = time.time()
51 | ts = self._last_valid.get(base)
52 | if ts and (now - ts) < self._validate_ttl:
53 | return True
54 |
55 | try:
56 | r = s.get(f"{base}/panel/api/inbounds/list", timeout=self.timeout)
57 | if r.status_code != 200:
58 | return False
59 | jr = r.json()
60 | ok = bool(jr.get("success"))
61 | if ok:
62 | self._last_valid[base] = now
63 | return ok
64 | except Exception:
65 | return False
66 |
67 | # ---------------------- Authentication ----------------------
68 | def login(self, server: dict) -> requests.Session:
69 | """
70 | Logs in to the server and returns a session.
71 | If a valid session exists (within TTL), it is reused.
72 | Expects server dict with keys: url, username, password.
73 | """
74 | base = server["url"].rstrip("/")
75 | s = self._get_session(base)
76 |
77 | # Reuse session if still valid (no need to call /login)
78 | if self._validate_session(base, s):
79 | logging.info(f"Reusing session for {base}")
80 | return s
81 |
82 | payload = {"username": server.get("username", ""), "password": server.get("password", "")}
83 | try:
84 | r = s.post(f"{base}/login", json=payload, timeout=self.timeout)
85 | r.raise_for_status()
86 | jr = r.json()
87 | if jr.get("success"):
88 | self._last_valid[base] = time.time()
89 | logging.info(f"Logged in via /login for {base}")
90 | return s
91 | raise RuntimeError(f"Login failed: {jr.get('msg', 'unknown error')}")
92 | except Exception as e:
93 | logging.error(f"Login request error for {base}: {e}")
94 | raise
95 |
96 | # ---------------------- Inbounds Management ----------------------
97 | def get_inbounds(self, server: dict, session: requests.Session):
98 | """
99 | Retrieves the list of inbounds from the server.
100 | Returns an empty list on error.
101 | """
102 | base = server["url"].rstrip("/")
103 | s = self._get_session(base)
104 | try:
105 | r = s.get(f"{base}/panel/api/inbounds/list", timeout=self.timeout)
106 | r.raise_for_status()
107 | jr = r.json()
108 | return jr.get("obj") or []
109 | except Exception as e:
110 | logging.error(f"Error fetching inbounds from {base}: {e}")
111 | return []
112 |
113 | def add_inbound(self, server: dict, session: requests.Session, inbound: dict) -> None:
114 | """
115 | Adds a new inbound to the server.
116 | Logs an error if the operation fails.
117 | """
118 | base = server["url"].rstrip("/")
119 | s = self._get_session(base)
120 | try:
121 | r = s.post(f"{base}/panel/api/inbounds/add", json=inbound, timeout=self.timeout)
122 | r.raise_for_status()
123 | jr = r.json()
124 | if not jr.get("success"):
125 | logging.error(f"Failed to add inbound {inbound.get('id')} on {base}: {jr.get('msg', 'No message')}")
126 | except Exception as e:
127 | logging.error(f"Error adding inbound {inbound.get('id')} on {base}: {e}")
128 |
129 | def update_inbound(self, server: dict, session: requests.Session, inbound_id: int, inbound: dict) -> None:
130 | """
131 | Updates an existing inbound on the server.
132 | Logs an error if the operation fails.
133 | """
134 | base = server["url"].rstrip("/")
135 | s = self._get_session(base)
136 | try:
137 | r = s.post(f"{base}/panel/api/inbounds/update/{inbound_id}", json=inbound, timeout=self.timeout)
138 | r.raise_for_status()
139 | jr = r.json()
140 | if not jr.get("success"):
141 | logging.error(f"Failed to update inbound {inbound_id} on {base}: {jr.get('msg', 'No message')}")
142 | except Exception as e:
143 | logging.error(f"Error updating inbound {inbound_id} on {base}: {e}")
144 |
145 | def delete_inbound(self, server: dict, session: requests.Session, inbound_id: int) -> None:
146 | """
147 | Deletes an inbound from the server.
148 | Logs an error if the operation fails.
149 | """
150 | base = server["url"].rstrip("/")
151 | s = self._get_session(base)
152 | try:
153 | r = s.post(f"{base}/panel/api/inbounds/del/{inbound_id}", timeout=self.timeout)
154 | r.raise_for_status()
155 | jr = r.json()
156 | if not jr.get("success"):
157 | logging.error(f"Failed to delete inbound {inbound_id} on {base}: {jr.get('msg', 'No message')}")
158 | except Exception as e:
159 | logging.error(f"Error deleting inbound {inbound_id} on {base}: {e}")
160 |
161 | # ---------------------- Client Management ----------------------
162 | def add_client(self, server: dict, session: requests.Session, inbound_id: int, client: dict) -> None:
163 | """
164 | Adds a new client to the specified inbound.
165 | Logs an error if the operation fails.
166 | """
167 | base = server["url"].rstrip("/")
168 | s = self._get_session(base)
169 | payload = {"id": inbound_id, "settings": json.dumps({"clients": [client]})}
170 | try:
171 | r = s.post(f"{base}/panel/api/inbounds/addClient", json=payload, timeout=self.timeout)
172 | r.raise_for_status()
173 | jr = r.json()
174 | if not jr.get("success"):
175 | logging.error(f"Failed to add client {client.get('email')} on {base}: {jr.get('msg', 'No message')}")
176 | except Exception as e:
177 | logging.error(f"Error adding client {client.get('email')} on {base}: {e}")
178 |
179 | def update_client(self, server: dict, session: requests.Session, client_id, inbound_id: int, client: dict) -> None:
180 | """
181 | Updates an existing client for the specified inbound.
182 | Logs an error if the operation fails.
183 | """
184 | base = server["url"].rstrip("/")
185 | s = self._get_session(base)
186 | safe_id = quote(str(client_id), safe="")
187 | url = f"{base}/panel/api/inbounds/updateClient/{safe_id}"
188 | payload = {"id": inbound_id, "settings": json.dumps({"clients": [client]})}
189 | try:
190 | r = s.post(url, json=payload, timeout=self.timeout)
191 | r.raise_for_status()
192 | jr = r.json()
193 | if not jr.get("success"):
194 | logging.error(f"Failed to update client {client_id} on {base}: {jr.get('msg', 'No message')}")
195 | except Exception as e:
196 | logging.error(f"Error updating client {client_id} on {base}: {e}")
197 |
198 | def delete_client(self, server: dict, session: requests.Session, inbound_id: int, client_id) -> None:
199 | """
200 | Deletes a client from the specified inbound.
201 | Logs an error if the operation fails.
202 | """
203 | base = server["url"].rstrip("/")
204 | s = self._get_session(base)
205 | safe_id = quote(str(client_id), safe="")
206 | url = f"{base}/panel/api/inbounds/{inbound_id}/delClient/{safe_id}"
207 | try:
208 | r = s.post(url, timeout=self.timeout)
209 | r.raise_for_status()
210 | jr = r.json()
211 | if not jr.get("success"):
212 | logging.error(f"Failed to delete client {client_id} on {base}: {jr.get('msg', 'No message')}")
213 | except Exception as e:
214 | logging.error(f"Error deleting client {client_id} on {base}: {e}")
215 |
216 | # ---------------------- Traffic Management ----------------------
217 | def get_client_traffic(self, server: dict, session: requests.Session, email: str):
218 | """
219 | Retrieves upload and download traffic statistics for the specified client email.
220 | Returns (0, 0) on error.
221 | """
222 | base = server["url"].rstrip("/")
223 | s = self._get_session(base)
224 | safe_email = quote(email, safe="")
225 | url = f"{base}/panel/api/inbounds/getClientTraffics/{safe_email}"
226 | try:
227 | r = s.get(url, timeout=self.timeout)
228 | r.raise_for_status()
229 | jr = r.json()
230 | if jr.get("success"):
231 | obj = jr.get("obj") or {}
232 | up = int(obj.get("up", 0) or 0)
233 | down = int(obj.get("down", 0) or 0)
234 | return up, down
235 | return (0, 0)
236 | except Exception as e:
237 | logging.error(f"Error fetching traffic for {email} on {base}: {e}")
238 | return (0, 0)
239 |
240 | def update_client_traffic(self, server: dict, session: requests.Session, email: str, up: int, down: int) -> None:
241 | """
242 | Updates the traffic statistics for the specified client email.
243 | This endpoint may not be supported by all panels; errors are logged.
244 | """
245 | base = server["url"].rstrip("/")
246 | s = self._get_session(base)
247 | safe_email = quote(email, safe="")
248 | url = f"{base}/panel/api/inbounds/updateClientTraffic/{safe_email}"
249 | payload = {"upload": int(up), "download": int(down)}
250 | try:
251 | r = s.post(url, json=payload, timeout=self.timeout)
252 | r.raise_for_status()
253 | jr = r.json()
254 | if not jr.get("success"):
255 | logging.error(f"Failed to update traffic for {email} on {base}: {jr.get('msg', 'No message')}")
256 | except Exception as e:
257 | logging.error(f"Error updating traffic for {email} on {base}: {e}")
258 |
--------------------------------------------------------------------------------
/install.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # DDS-Nodex Manager (EN + Better UX) — final, IFS-safe, pretty menu + .env editor
3 | set -Eeuo pipefail
4 | IFS=$'\n\t'
5 |
6 | # ==================== Defaults ====================
7 | readonly ZIP_URL_DEFAULT="${ZIP_URL:-https://github.com/azavaxhuman/Nodex/releases/download/v1.4/v1.4.zip}"
8 | readonly APP_HOME_DEFAULT="${APP_HOME:-/opt/dds-nodex}"
9 | readonly DATA_DIR_DEFAULT="${DATA_DIR:-/var/lib/dds-nodex/data}"
10 | readonly CONFIG_DIR_DEFAULT="${CONFIG_DIR:-/var/lib/dds-nodex/config}"
11 | readonly UID_APP_DEFAULT="${UID_APP:-10001}"
12 | readonly GID_APP_DEFAULT="${GID_APP:-10001}"
13 | readonly BIN_PATH_DEFAULT="${BIN_PATH:-/usr/local/bin/dds-nodex}"
14 |
15 | COMPOSE_FILES=("docker-compose.yml" "compose.yml" "compose.yaml")
16 |
17 | # ==================== Mutable (via flags) ====================
18 | ZIP_URL="$ZIP_URL_DEFAULT"
19 | APP_HOME="$APP_HOME_DEFAULT"
20 | DATA_DIR="$DATA_DIR_DEFAULT"
21 | CONFIG_DIR="$CONFIG_DIR_DEFAULT"
22 | UID_APP="$UID_APP_DEFAULT"
23 | GID_APP="$GID_APP_DEFAULT"
24 | BIN_PATH="$BIN_PATH_DEFAULT"
25 | REQUIRED_FILES=("Dockerfile" "requirements.txt")
26 | NONINTERACTIVE=false
27 | SKIP_DOCKER_INSTALL=false
28 | COMPOSE_CMD=() # array, safe with IFS
29 |
30 | # ==================== UI ====================
31 | ce() { local c="$1"; shift || true; local code=0
32 | case "$c" in red)code=31;;green)code=32;;yellow)code=33;;blue)code=34;;magenta)code=35;;cyan)code=36;;bold)code=1;; *)code=0;; esac
33 | echo -e "\033[${code}m$*\033[0m"
34 | }
35 | ts() { date '+%Y-%m-%d %H:%M:%S'; }
36 | log() { ce "$1" "[$(ts)] $2"; }
37 | section(){ ce magenta "\n────────────────────────────────────────────────────"; ce bold " $1"; ce magenta "────────────────────────────────────────────────────\n"; }
38 | step(){ log blue "[STEP] $1"; }
39 | ok() { log green "[OK] $1"; }
40 | warn(){ log yellow "[WARN] $1"; }
41 | fatal(){ log red "[FATAL] $1"; exit "${2:-1}"; }
42 | info(){ log cyan "[INFO] $1"; }
43 | success(){ log bold "[SUCCESS] $1"; }
44 |
45 | pause(){ $NONINTERACTIVE && return 0; read -n1 -s -r -p "Press any key to continue..." _; echo; }
46 | confirm(){ $NONINTERACTIVE && return 0; read -p "$1 [y/N]: " -r; [[ "${REPLY:-}" =~ ^[Yy]$ ]]; }
47 |
48 | # ==================== Traps ====================
49 | cleanup(){ true; }
50 | on_err(){ fatal "Command failed. Check messages above." "$?"; }
51 | trap cleanup EXIT
52 | trap on_err ERR
53 |
54 | # ==================== Core ====================
55 | require_root(){ [[ "$(id -u)" -eq 0 ]] || fatal "Run as root (sudo)."; }
56 |
57 | detect_compose(){
58 | if command -v docker &>/dev/null && docker compose version &>/dev/null; then
59 | COMPOSE_CMD=(docker compose)
60 | elif command -v docker-compose &>/dev/null; then
61 | COMPOSE_CMD=(docker-compose)
62 | else
63 | COMPOSE_CMD=()
64 | fi
65 | }
66 |
67 | need_compose(){
68 | if ((${#COMPOSE_CMD[@]}==0)); then
69 | detect_compose
70 | ((${#COMPOSE_CMD[@]}==0)) && fatal "Docker Compose not available."
71 | fi
72 | }
73 |
74 | install_deps(){
75 | local deps=(curl unzip ca-certificates gnupg lsb-release)
76 | local miss=()
77 | for d in "${deps[@]}"; do command -v "$d" &>/dev/null || miss+=("$d"); done
78 | if ((${#miss[@]})); then
79 | step "Installing dependencies: ${miss[*]}"
80 | apt-get update -y
81 | DEBIAN_FRONTEND=noninteractive apt-get install -y "${miss[@]}"
82 | ok "Dependencies installed."
83 | else ok "All dependencies present."; fi
84 | }
85 |
86 | ensure_docker(){
87 | detect_compose
88 | if ! command -v docker &>/dev/null; then
89 | $SKIP_DOCKER_INSTALL && fatal "Docker not found and auto-install disabled."
90 | step "Installing Docker…"; curl -fsSL https://get.docker.com | sh; ok "Docker installed."
91 | fi
92 | detect_compose
93 | if ((${#COMPOSE_CMD[@]}==0)); then
94 | step "Installing Docker Compose plugin…"
95 | apt-get update -y || true; apt-get install -y docker-compose-plugin || true
96 | detect_compose
97 | ((${#COMPOSE_CMD[@]}==0)) && fatal "Docker Compose not available."
98 | fi
99 | ok "Docker/Compose ready."
100 | }
101 |
102 | create_user_group(){
103 | # group by GID
104 | local grp_name
105 | if getent group | awk -F: -v gid="$GID_APP" '$3==gid{found=1; print $1; exit} END{exit !found}'; then
106 | grp_name="$(getent group | awk -F: -v gid="$GID_APP" '$3==gid{print $1; exit}')"
107 | ok "Group with GID ${GID_APP} exists: ${grp_name}"
108 | else
109 | grp_name="appgrp"
110 | groupadd -g "${GID_APP}" "${grp_name}"
111 | ok "Group ${grp_name} created (GID ${GID_APP})."
112 | fi
113 |
114 | # user by UID
115 | local usr_name
116 | if getent passwd | awk -F: -v uid="$UID_APP" '$3==uid{found=1; print $1; exit} END{exit !found}'; then
117 | usr_name="$(getent passwd | awk -F: -v uid="$UID_APP" '$3==uid{print $1; exit}')"
118 | ok "User with UID ${UID_APP} exists: ${usr_name}"
119 | else
120 | usr_name="appusr"
121 | useradd -u "${UID_APP}" -g "${GID_APP}" -M -s /usr/sbin/nologin "${usr_name}"
122 | ok "User ${usr_name} created (UID ${UID_APP})."
123 | fi
124 | }
125 |
126 | create_dirs(){
127 | mkdir -p "${APP_HOME}" "${DATA_DIR}" "${CONFIG_DIR}"
128 | chown -R "${UID_APP}:${GID_APP}" "${DATA_DIR}" "${CONFIG_DIR}"
129 | # If app writes to APP_HOME, uncomment:
130 | # chown -R "${UID_APP}:${GID_APP}" "${APP_HOME}"
131 | }
132 |
133 | validate_files(){
134 | for f in "${REQUIRED_FILES[@]}"; do
135 | [[ -f "${APP_HOME}/$f" ]] || fatal "Required file missing: $f"
136 | ok "Found $f"
137 | done
138 | local found=false
139 | for c in "${COMPOSE_FILES[@]}"; do
140 | if [[ -f "${APP_HOME}/$c" ]]; then ok "Found $c"; found=true; break; fi
141 | done
142 | $found || fatal "No compose file found (checked: ${COMPOSE_FILES[*]})."
143 | }
144 |
145 | parse_zip(){
146 | case "$ZIP_URL" in
147 | file://*) ZIP_PATH="${ZIP_URL#file://}"; [[ -f "$ZIP_PATH" ]] || fatal "Zip not found: $ZIP_PATH" ;;
148 | http://*|https://*) ZIP_PATH="" ;;
149 | *) fatal "Unsupported ZIP_URL: $ZIP_URL" ;;
150 | esac
151 | }
152 |
153 | download_extract(){
154 | section "Download & Extract"
155 | parse_zip
156 | local tmp; tmp="$(mktemp --suffix=.zip)"
157 |
158 | if [[ -n "${ZIP_PATH:-}" ]]; then
159 | info "Copying local archive: $ZIP_PATH"
160 | cp -f "$ZIP_PATH" "$tmp"
161 | else
162 | info "Downloading: $ZIP_URL"
163 | curl -fSL --retry 3 --retry-delay 2 "$ZIP_URL" -o "$tmp"
164 | fi
165 | ok "Archive ready."
166 |
167 | info "Unpacking to ${APP_HOME}…"
168 | mkdir -p "${APP_HOME}"
169 | unzip -oq "$tmp" -d "${APP_HOME}"
170 | rm -f "$tmp"
171 | ok "Unpacked."
172 |
173 | # --- نکتهٔ مهم: اگر فقط یک دایرکتوری تاپلول بود، محتواش رو به APP_HOME «پروموت» کن
174 | shopt -s nullglob dotglob
175 | local entries=("${APP_HOME}"/*)
176 | if (( ${#entries[@]} == 1 )) && [[ -d "${entries[0]}" ]]; then
177 | local top="${entries[0]}"
178 | info "Detected single top-level dir '$(basename "$top")' → promoting contents to ${APP_HOME}"
179 | mv "${top}/"* "${APP_HOME}/" 2>/dev/null || true
180 | rmdir "$top" 2>/dev/null || true
181 | ok "Contents promoted."
182 | fi
183 | shopt -u nullglob dotglob
184 | }
185 |
186 |
187 | setup_config(){
188 | section "Configuration"
189 | local sample="${APP_HOME}/config.sample.json"
190 | local cfg="${CONFIG_DIR}/config.json"
191 | if [[ -f "$cfg" ]]; then ok "config.json already exists."
192 | else
193 | if [[ -f "$sample" ]]; then cp -f "$sample" "$cfg"; ok "config.json created from sample."
194 | else warn "No config.sample.json; create ${cfg} manually."; fi
195 | fi
196 | }
197 |
198 | backup_config(){
199 | local cfg="${CONFIG_DIR}/config.json"
200 | [[ -f "$cfg" ]] || return 0
201 | local t; t="$(date +%Y%m%d-%H%M%S)"
202 | cp -f "$cfg" "${cfg}.bak-${t}"
203 | ok "Config backup: ${cfg}.bak-${t}"
204 | }
205 |
206 | # ======== NEW: .env editor ========
207 | backup_env(){
208 | local envf="${APP_HOME}/.env"
209 | [[ -f "$envf" ]] || return 0
210 | local t; t="$(date +%Y%m%d-%H%M%S)"
211 | cp -f "$envf" "${envf}.bak-${t}"
212 | ok ".env backup: ${envf}.bak-${t}"
213 | }
214 |
215 | edit_env(){
216 | section "Edit .env"
217 | local editor="${EDITOR:-nano}"
218 | local envf="${APP_HOME}/.env"
219 | if [[ ! -f "$envf" ]]; then
220 | touch "$envf"
221 | chown "${UID_APP}:${GID_APP}" "$envf" || true
222 | ok "Created empty ${envf}"
223 | fi
224 | backup_env
225 | info "Opening with ${editor}…"; "$editor" "$envf"; ok ".env saved."
226 | }
227 | # =================================
228 |
229 | compose_build_up(){
230 | section "Docker Compose Deploy"
231 | validate_files
232 | need_compose
233 | (cd "${APP_HOME}" && "${COMPOSE_CMD[@]}" build && ok "Built." && "${COMPOSE_CMD[@]}" up -d && ok "Service started.")
234 | }
235 |
236 | show_logs(){
237 | local lines="${1:-100}"
238 | section "Recent Logs"
239 | need_compose
240 | (cd "${APP_HOME}" && "${COMPOSE_CMD[@]}" logs --tail "$lines" --no-log-prefix) || warn "No logs."
241 | }
242 |
243 | tail_follow_logs(){
244 | section "Live Logs (press Ctrl+C to stop)"
245 | need_compose
246 | (cd "${APP_HOME}" && "${COMPOSE_CMD[@]}" logs -f --tail=50 --no-log-prefix) || warn "No logs."
247 | }
248 |
249 | edit_config(){
250 | section "Edit Config"
251 | local editor="${EDITOR:-nano}"
252 | local cfg="${CONFIG_DIR}/config.json"
253 | if [[ ! -f "$cfg" ]]; then
254 | local sample="${APP_HOME}/config.sample.json"
255 | [[ -f "$sample" ]] || fatal "config.sample.json missing; cannot create config.json."
256 | cp -f "$sample" "$cfg"; ok "config.json created."
257 | fi
258 | backup_config
259 | info "Opening with ${editor}…"; "$editor" "$cfg"; ok "Config saved."
260 | }
261 |
262 | safe_delete_principals(){
263 | local u_entry g_entry u_uid g_gid
264 | u_entry="$(getent passwd appusr || true)"
265 | g_entry="$(getent group appgrp || true)"
266 | if [[ -n "$u_entry" ]]; then
267 | u_uid="$(echo "$u_entry" | awk -F: '{print $3}')"
268 | [[ "$u_uid" == "$UID_APP" ]] && userdel appusr || true
269 | fi
270 | if [[ -n "$g_entry" ]]; then
271 | g_gid="$(echo "$g_entry" | awk -F: '{print $3}')"
272 | [[ "$g_gid" == "$GID_APP" ]] && groupdel appgrp || true
273 | fi
274 | }
275 |
276 | uninstall_stack(){
277 | section "Uninstall DDS-Nodex"
278 | if ! $NONINTERACTIVE; then
279 | confirm "Are you sure you want to uninstall and DELETE ALL DATA?" || { warn "Uninstall cancelled."; return; }
280 | read -p "Type DELETE to confirm: " -r; [[ "${REPLY:-}" == "DELETE" ]] || { warn "Uninstall aborted."; return; }
281 | else
282 | warn "Non-interactive uninstall: skipping confirmations (dangerous)."
283 | fi
284 |
285 | need_compose || true
286 | step "Stopping service…"; (cd "${APP_HOME}" && "${COMPOSE_CMD[@]}" down) || warn "Service not running."
287 | step "Removing files…"; rm -rf "${APP_HOME}" "${DATA_DIR}" "${CONFIG_DIR}"
288 | step "Removing app user/group…"; safe_delete_principals
289 | success "DDS-Nodex uninstalled."
290 | }
291 |
292 | service_status(){ section "Service Status"; need_compose; (cd "${APP_HOME}" && "${COMPOSE_CMD[@]}" ps) || warn "No status."; }
293 | disk_usage(){ section "Disk Usage"; du -sh "${APP_HOME}" "${DATA_DIR}" "${CONFIG_DIR}" 2>/dev/null || true; }
294 | list_containers(){ section "Docker Containers"; docker ps -a || true; }
295 | list_images(){ section "Docker Images"; docker images || true; }
296 | restart_service(){ section "Restart Service"; need_compose; (cd "${APP_HOME}" && "${COMPOSE_CMD[@]}" restart && ok "Restarted.") || warn "Service not found."; }
297 |
298 | register_cmd(){
299 | local bin_dir; bin_dir="$(dirname "$BIN_PATH")"
300 | mkdir -p "$bin_dir"
301 | [[ -f "$BIN_PATH" ]] && return 0
302 |
303 | local script_path; script_path="$(realpath "${BASH_SOURCE[0]}")"
304 |
305 | cat > "$BIN_PATH" < now_ms
68 |
69 | def _is_ended(self, c, now_ms):
70 | """Client has ended if expiryTime is in the past or negative."""
71 | exp = self._to_int(c.get('expiryTime'), 0)
72 | return (exp > 0 and exp <= now_ms) or exp < 0
73 |
74 | # -------------------------------
75 | # Inbounds & Clients synchronization
76 | # -------------------------------
77 | def sync_inbounds_and_clients(self):
78 | central = self.config_manager.get_central_server()
79 | nodes = self.config_manager.get_nodes()
80 |
81 | try:
82 | central_session = self.api_manager.login(central)
83 | central_inbounds = self.api_manager.get_inbounds(central, central_session)
84 | if not central_inbounds:
85 | logging.error("No inbounds retrieved from central server, skipping sync")
86 | return
87 | except Exception as e:
88 | logging.error(f"Failed to connect to central server: {e}")
89 | return
90 |
91 | # Parse central inbounds and extract client lists
92 | parsed_central = []
93 | for ib in central_inbounds:
94 | settings = {}
95 | try:
96 | settings = json.loads(ib.get('settings') or '{}') or {}
97 | except Exception:
98 | pass
99 | parsed_central.append((ib, settings.get('clients', [])))
100 |
101 | for node in nodes:
102 | try:
103 | node_session = self.api_manager.login(node)
104 | node_inbounds = self.api_manager.get_inbounds(node, node_session)
105 | node_inbound_map = {inbound['id']: inbound for inbound in node_inbounds}
106 |
107 | # Synchronize inbounds (central -> node)
108 | for central_inbound, _ in parsed_central:
109 | cid = central_inbound['id']
110 | if cid not in node_inbound_map:
111 | self.api_manager.add_inbound(node, node_session, central_inbound)
112 | else:
113 | self.api_manager.update_inbound(node, node_session, cid, central_inbound)
114 | node_inbound_map.pop(cid, None)
115 |
116 | # Remove inbounds that are not present on the central server
117 | for inbound_id in list(node_inbound_map.keys()):
118 | self.api_manager.delete_inbound(node, node_session, inbound_id)
119 |
120 | # Synchronize clients with SAFU-aware policy
121 | now_ms = self._now_ms()
122 |
123 | for central_inbound, c_clients in parsed_central:
124 | cid = central_inbound['id']
125 |
126 | # Get clients from node
127 | node_inbound = next((ni for ni in node_inbounds if ni['id'] == cid), None)
128 | n_clients = []
129 | if node_inbound:
130 | try:
131 | n_clients = (json.loads(node_inbound.get('settings') or '{}') or {}).get('clients', [])
132 | except Exception:
133 | n_clients = []
134 |
135 | protocol = (central_inbound.get('protocol') or '').lower()
136 |
137 | # Build protocol-aware client maps
138 | n_client_map = { self._client_key(cl, protocol): cl for cl in n_clients if self._client_key(cl, protocol) }
139 | c_client_map = { self._client_key(cl, protocol): cl for cl in c_clients if self._client_key(cl, protocol) }
140 |
141 | # --- 1) If central has fresh SAFU clients: push them directly to node, skip merging
142 | if any(self._is_safu_fresh(ccl) for ccl in c_clients):
143 | for k, ccl in c_client_map.items():
144 | if not self._is_safu_fresh(ccl):
145 | continue # Only process fresh SAFU clients
146 | # Push to node (restore from Ended to SAFU)
147 | if k in n_client_map:
148 | nid = self._client_id_for_api(n_client_map[k], protocol)
149 | # If exists on node, update
150 | if nid is not None:
151 | try:
152 | self.api_manager.update_client(node, node_session, nid, cid, ccl)
153 | except Exception as _e:
154 | logging.error(f"Failed to push SAFU from central to node for client {k}: {_e}")
155 | else:
156 | # If not on node, add
157 | try:
158 | self.api_manager.add_client(node, node_session, cid, ccl)
159 | except Exception as _e:
160 | logging.error(f"Failed to add SAFU client {k} to node: {_e}")
161 |
162 | # Continue to general PUSH phase (central -> node)
163 | # (In this case, merging from node to central is intentionally skipped)
164 |
165 | else:
166 | # --- 2) If central does not have fresh SAFU: only promote active start time from node to central if needed
167 | for k, ccl in c_client_map.items():
168 | ncl = n_client_map.get(k)
169 | if not ncl:
170 | continue
171 |
172 | central_exp = self._to_int(ccl.get('expiryTime'), 0)
173 | node_exp = self._to_int(ncl.get('expiryTime'), 0)
174 |
175 | central_started_active = central_exp > now_ms
176 | node_started_active = node_exp > now_ms
177 |
178 | should_promote = (not central_started_active) and node_started_active
179 | if should_promote:
180 | # Promote start time from node to central (minimum of positive values)
181 | merged = node_exp if central_exp <= 0 else min(central_exp, node_exp)
182 | if merged != central_exp and merged > now_ms:
183 | ccl['expiryTime'] = merged
184 | if 'startAfterFirstUse' in ccl and ccl.get('startAfterFirstUse') is True:
185 | ccl['startAfterFirstUse'] = False
186 | try:
187 | client_id = self._client_id_for_api(ccl, protocol) or self._client_id_for_api(ncl, protocol)
188 | if client_id is None:
189 | logging.warning(f"[SAFU-MERGE] Missing clientId for protocol={protocol} key={k} on inbound {cid}; central update skipped.")
190 | else:
191 | self.api_manager.update_client(central, central_session, client_id, cid, ccl)
192 | logging.info(f"[SAFU-MERGE] expiryTime merged to central for client {k} (inbound {cid}): {central_exp} -> {merged}")
193 | except Exception as _e:
194 | logging.error(f"Failed to update central client {k} after SAFU merge: {_e}")
195 | # If node is Ended, do not promote to central
196 |
197 | # --- 3) Final PUSH: central version (after above policy) to node
198 | # Add or update clients
199 | for ccl in c_clients:
200 | k = self._client_key(ccl, protocol)
201 | if k in n_client_map:
202 | nid = self._client_id_for_api(n_client_map[k], protocol)
203 | try:
204 | self.api_manager.update_client(node, node_session, nid, cid, ccl)
205 | except Exception as _e:
206 | logging.error(f"Failed to update client {k} on node: {_e}")
207 | # Remove from deletion candidates
208 | n_client_map.pop(k, None)
209 | else:
210 | try:
211 | self.api_manager.add_client(node, node_session, cid, ccl)
212 | except Exception as _e:
213 | logging.error(f"Failed to add client {k} on node: {_e}")
214 |
215 | # Remove clients that are not present on central
216 | for k, ncl in list(n_client_map.items()):
217 | n_clid = self._client_id_for_api(ncl, protocol)
218 | if n_clid is not None:
219 | try:
220 | self.api_manager.delete_client(node, node_session, cid, n_clid)
221 | except Exception as _e:
222 | logging.error(f"Failed to delete extra client {k} on node: {_e}")
223 |
224 | except Exception as e:
225 | logging.error(f"Error syncing with node {node['url']}: {e}")
226 |
227 | # -------------------------------
228 | # Traffic synchronization (V2)
229 | # -------------------------------
230 | def _fetch_node_traffic_parallel(self, nodes_by_url, node_sessions, email):
231 | """Parallelize traffic reads (I/O-bound only). Writes remain serial."""
232 | currents_by_server = {}
233 | futures = {}
234 | max_workers = min(len(node_sessions), self.config_manager.net().get('max_workers', 8))
235 | if max_workers <= 0:
236 | max_workers = 1
237 |
238 | with ThreadPoolExecutor(max_workers=max_workers) as ex:
239 | for srv_url, sess in node_sessions.items():
240 | node = nodes_by_url.get(srv_url)
241 | if not node or not sess:
242 | continue
243 | futures[ex.submit(self.api_manager.get_client_traffic, node, sess, email)] = srv_url
244 |
245 | for fut in as_completed(futures):
246 | srv_url = futures[fut]
247 | try:
248 | n_up, n_down = fut.result()
249 | currents_by_server[srv_url] = (n_up, n_down)
250 | except Exception as e:
251 | logging.error(f"Traffic fetch failed for {email} on {srv_url}: {e}")
252 | # مهم: روی خطا baseline لمس نشه → None برای skip در حلقهی دلتا
253 | currents_by_server[srv_url] = None
254 |
255 | return currents_by_server
256 |
257 | def sync_traffic(self):
258 | central = self.config_manager.get_central_server()
259 | nodes = self.config_manager.get_nodes()
260 | net_opts = self.config_manager.net()
261 |
262 | # دلخواه: سقف دلتا در هر اینتروال (بایت). اگر 0 یا منفی، غیرفعال.
263 | delta_cap = int(net_opts.get('delta_max_bytes_per_interval', 0) or 0)
264 |
265 | # Login to central server
266 | try:
267 | central_sess = self.api_manager.login(central)
268 | except Exception as e:
269 | logging.error(f"Failed to connect to central server: {e}")
270 | return
271 |
272 | # Get client list from central server
273 | try:
274 | central_inbounds = self.api_manager.get_inbounds(central, central_sess)
275 | if not central_inbounds:
276 | logging.error("No inbounds retrieved from central server, skipping traffic sync")
277 | return
278 | except Exception as e:
279 | logging.error(f"Failed to get inbounds from central server: {e}")
280 | return
281 |
282 | # Collect client emails from central server (including settings)
283 | client_emails = set()
284 | for inbound in central_inbounds:
285 | for client in inbound.get('clientStats') or []:
286 | if client and 'email' in client:
287 | client_emails.add(client['email'])
288 | try:
289 | s = json.loads(inbound.get('settings') or '{}') or {}
290 | for c in s.get('clients', []):
291 | e = c.get('email')
292 | if e:
293 | client_emails.add(e)
294 | except Exception:
295 | pass
296 |
297 | # Login to nodes (optional)
298 | node_sessions = {}
299 | for node in nodes:
300 | try:
301 | node_sessions[node['url']] = self.api_manager.login(node)
302 | except Exception as e:
303 | logging.error(f"Failed to login node {node['url']}: {e}")
304 |
305 | nodes_by_url = {node['url']: node for node in nodes}
306 | parallel_reads = net_opts.get('parallel_node_calls', True)
307 |
308 | for email in client_emails:
309 | try:
310 | # 1) Read current traffic from all servers
311 | currents_by_server = {}
312 | c_up, c_down = self.api_manager.get_client_traffic(central, central_sess, email)
313 | currents_by_server[central['url']] = (c_up, c_down)
314 |
315 | if parallel_reads and node_sessions:
316 | currents_by_server.update(
317 | self._fetch_node_traffic_parallel(nodes_by_url, node_sessions, email)
318 | )
319 | else:
320 | for srv_url, sess in node_sessions.items():
321 | node = nodes_by_url.get(srv_url)
322 | if not node or not sess:
323 | continue
324 | try:
325 | n_up, n_down = self.api_manager.get_client_traffic(node, sess, email)
326 | currents_by_server[srv_url] = (n_up, n_down)
327 | except Exception as e:
328 | logging.error(f"Traffic fetch failed for {email} on {srv_url}: {e}")
329 | # مهم: روی خطا baseline لمس نشه → None برای skip در حلقهی دلتا
330 | currents_by_server[srv_url] = None
331 |
332 | # 2) Detect first time or central reset
333 | last_central = self.traffic_state_manager.get_last_counter(email, central['url'])
334 | if last_central is None:
335 | # First observation of this user -> start cycle at central snapshot
336 | self.traffic_state_manager.reset_cycle(email, currents_by_server, central['url'])
337 | total_up, total_down = currents_by_server[central['url']]
338 |
339 | # Write total to central + nodes; سپس baseline هر سرور = total (اگر write موفق بود)
340 | try:
341 | self.api_manager.update_client_traffic(central, central_sess, email, total_up, total_down)
342 | self.traffic_state_manager.set_last_counter(email, central['url'], total_up, total_down)
343 | except Exception as e:
344 | logging.error(f"[INIT] Failed to write total to central for {email}: {e}")
345 |
346 | for srv_url, sess in node_sessions.items():
347 | node = nodes_by_url.get(srv_url)
348 | if node and sess:
349 | try:
350 | self.api_manager.update_client_traffic(node, sess, email, total_up, total_down)
351 | self.traffic_state_manager.set_last_counter(email, srv_url, total_up, total_down)
352 | except Exception as e:
353 | logging.error(f"[INIT] Failed to write total to node {srv_url} for {email}: {e}")
354 |
355 | # total را در state هم بنویسیم تا پایدار باشد
356 | self.traffic_state_manager.set_total(email, total_up, total_down)
357 |
358 | logging.info(f"[INIT] {email}: total set to central current ({total_up},{total_down}); baselines initialized & aligned to total; node_totals cleared.")
359 | continue
360 |
361 | last_cu, last_cd = last_central
362 | # IMPORTANT: consider central reset only if BOTH counters dropped (real reset)
363 | central_reset = (c_up < last_cu) and (c_down < last_cd)
364 | if central_reset:
365 | # Start a new cycle (central reset) using current observations as baselines
366 | self.traffic_state_manager.reset_cycle(email, currents_by_server, central['url'])
367 | total_up, total_down = currents_by_server[central['url']]
368 |
369 | # Write total to central + nodes; سپس baseline هر سرور = total (اگر write موفق بود)
370 | try:
371 | self.api_manager.update_client_traffic(central, central_sess, email, total_up, total_down)
372 | self.traffic_state_manager.set_last_counter(email, central['url'], total_up, total_down)
373 | except Exception as e:
374 | logging.error(f"[CENTRAL RESET] Failed to write total to central for {email}: {e}")
375 |
376 | for srv_url, sess in node_sessions.items():
377 | node = nodes_by_url.get(srv_url)
378 | if node and sess:
379 | try:
380 | self.api_manager.update_client_traffic(node, sess, email, total_up, total_down)
381 | self.traffic_state_manager.set_last_counter(email, srv_url, total_up, total_down)
382 | except Exception as e:
383 | logging.error(f"[CENTRAL RESET] Failed to write total to node {srv_url} for {email}: {e}")
384 |
385 | # total را هم ذخیره میکنیم
386 | self.traffic_state_manager.set_total(email, total_up, total_down)
387 |
388 | logging.warning(
389 | f"[CENTRAL RESET] {email}: total reset to central current ({total_up},{total_down}); baselines reinitialized & aligned; node_totals cleared."
390 | )
391 | continue
392 |
393 | # 3) If no central reset: calculate per-server deltas (Scenario 1..3)
394 | total_up, total_down = self.traffic_state_manager.get_total(email)
395 | added_up, added_down = 0, 0
396 |
397 | for srv_url, cur_pair in currents_by_server.items():
398 | # اگر خواندن نود fail بوده، این چرخه برای آن نود را نادیده بگیر و baseline را لمس نکن
399 | if cur_pair is None:
400 | logging.warning(f"[SKIP NODE] {email} @ {srv_url}: traffic read failed; keeping previous baseline.")
401 | continue
402 |
403 | cur_up, cur_down = cur_pair
404 | last = self.traffic_state_manager.get_last_counter(email, srv_url)
405 | if last is None:
406 | # First observation from this server: baseline = current (delta 0)
407 | self.traffic_state_manager.set_last_counter(email, srv_url, cur_up, cur_down)
408 | continue
409 |
410 | last_up, last_down = last
411 |
412 | # Real reset on this node: BOTH directions dropped -> delta=0
413 | if (cur_up < last_up) and (cur_down < last_down):
414 | du = 0
415 | dd = 0
416 | logging.warning(
417 | f"[NODE COUNTER DROP] {email} @ {srv_url}: "
418 | f"last=({last_up},{last_down}) -> cur=({cur_up},{cur_down}); treat as reset (delta=0)."
419 | )
420 | else:
421 | # Safe component-wise delta (no negatives)
422 | du = max(0, cur_up - last_up)
423 | dd = max(0, cur_down - last_down)
424 |
425 | # دلتا غیرعادی را محدود کنیم (اختیاری)
426 | if delta_cap > 0:
427 | if (du + dd) > delta_cap:
428 | logging.warning(f"[DELTA CLAMP] {email} @ {srv_url}: (du+dd)={(du+dd)} > cap={delta_cap}; clamped to 0 for this interval.")
429 | du = 0
430 | dd = 0
431 |
432 | # Always update per-node baseline to the current observation
433 | self.traffic_state_manager.set_last_counter(email, srv_url, cur_up, cur_down)
434 |
435 | # Accumulate only positive deltas
436 | if du > 0 or dd > 0:
437 | added_up += du
438 | added_down += dd
439 | self.traffic_state_manager.add_node_delta(email, srv_url, du, dd)
440 |
441 | # 4) Add deltas and save new total (only if changed)
442 | changed = False
443 | if added_up != 0 or added_down != 0:
444 | total_up += added_up
445 | total_down += added_down
446 | changed = self.traffic_state_manager.set_total(email, total_up, total_down)
447 |
448 | # 5) Write total to central and nodes; سپس baseline سرورِ موفق = total
449 | if changed:
450 | # Central first
451 | central_written = False
452 | try:
453 | self.api_manager.update_client_traffic(central, central_sess, email, total_up, total_down)
454 | self.traffic_state_manager.set_last_counter(email, central['url'], total_up, total_down)
455 | central_written = True
456 | except Exception as e:
457 | logging.error(f"[WRITE] Failed to write total to central for {email}: {e}")
458 |
459 | # Nodes
460 | for srv_url, sess in node_sessions.items():
461 | node = nodes_by_url.get(srv_url)
462 | if not node or not sess:
463 | continue
464 | try:
465 | self.api_manager.update_client_traffic(node, sess, email, total_up, total_down)
466 | # فقط اگر write موفق بود baseline را همراستا کنیم
467 | self.traffic_state_manager.set_last_counter(email, srv_url, total_up, total_down)
468 | except Exception as e:
469 | logging.error(f"[WRITE] Failed to write total to node {srv_url} for {email}: {e}")
470 |
471 | logging.debug(f"[DELTA ADD] {email}: +({added_up},{added_down}) -> total=({total_up},{total_down})")
472 |
473 | except Exception as e:
474 | logging.error(f"Error syncing traffic for {email}: {e}")
475 |
--------------------------------------------------------------------------------