├── .gitignore ├── blocker_conf ├── .env.example ├── vector │ └── vector.toml └── docker-compose.yml ├── blocker ├── go.mod ├── internal │ ├── models │ │ └── models.go │ ├── config │ │ └── config.go │ ├── logger │ │ └── logger.go │ ├── services │ │ ├── command │ │ │ └── executor.go │ │ └── rabbitmq │ │ │ └── consumer.go │ ├── processor │ │ └── processor.go │ └── worker │ │ └── worker.go ├── go.sum ├── Dockerfile └── cmd │ └── blocker_worker │ └── main.go ├── observer ├── Dockerfile ├── go.mod ├── internal │ ├── models │ │ └── models.go │ ├── services │ │ ├── alerter │ │ │ └── alerter.go │ │ ├── publisher │ │ │ └── publisher.go │ │ └── storage │ │ │ └── redis.go │ ├── scripts │ │ └── add_and_check_ip.lua │ ├── api │ │ └── server.go │ ├── config │ │ └── config.go │ ├── monitor │ │ └── monitor.go │ └── processor │ │ └── processor.go ├── cmd │ └── observer_service │ │ └── main.go └── go.sum ├── observer_conf ├── vector.toml ├── nginx.conf ├── .env.example └── docker-compose.yml ├── nftables_example.conf ├── README.md └── LICENSE.md /.gitignore: -------------------------------------------------------------------------------- 1 | .env 2 | -------------------------------------------------------------------------------- /blocker_conf/.env.example: -------------------------------------------------------------------------------- 1 | APP_PORT= 2 | RABBITMQ_URL=amqps:// 3 | SSL_CERT= -------------------------------------------------------------------------------- /blocker/go.mod: -------------------------------------------------------------------------------- 1 | module blocker-worker 2 | 3 | go 1.24.4 4 | 5 | require github.com/rabbitmq/amqp091-go v1.10.0 6 | -------------------------------------------------------------------------------- /blocker/internal/models/models.go: -------------------------------------------------------------------------------- 1 | package models 2 | 3 | // BlockingPayload представляет структуру входящих сообщений из RabbitMQ. 4 | type BlockingPayload struct { 5 | IPs []string `json:"ips"` 6 | Duration string `json:"duration"` 7 | } -------------------------------------------------------------------------------- /blocker/go.sum: -------------------------------------------------------------------------------- 1 | github.com/rabbitmq/amqp091-go v1.10.0 h1:STpn5XsHlHGcecLmMFCtg7mqq0RnD+zFr4uzukfVhBw= 2 | github.com/rabbitmq/amqp091-go v1.10.0/go.mod h1:Hy4jKW5kQART1u+JkDTF9YYOQUHXqMuhrgxOEeS7G4o= 3 | go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= 4 | go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= 5 | -------------------------------------------------------------------------------- /blocker/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.24.4-alpine AS builder 2 | 3 | WORKDIR /app 4 | 5 | COPY go.mod go.sum ./ 6 | RUN go mod download 7 | 8 | COPY . . 9 | 10 | RUN CGO_ENABLED=0 GOOS=linux go build -ldflags="-w -s" -o blocker-worker ./cmd/blocker_worker 11 | 12 | FROM alpine:3.22 13 | 14 | RUN apk --no-cache add nftables 15 | 16 | WORKDIR /app 17 | 18 | COPY --from=builder /app/blocker-worker . 19 | 20 | CMD ["/app/blocker-worker"] -------------------------------------------------------------------------------- /observer/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.24.4-alpine AS builder 2 | 3 | WORKDIR /app 4 | 5 | COPY go.mod go.sum ./ 6 | RUN go mod download 7 | 8 | COPY . . 9 | 10 | RUN CGO_ENABLED=0 GOOS=linux go build -ldflags="-w -s" -o observer_service ./cmd/observer_service 11 | 12 | FROM alpine:3.22 13 | 14 | WORKDIR /app 15 | 16 | COPY --from=builder /app/observer_service . 17 | 18 | COPY --from=builder /app/internal/scripts/add_and_check_ip.lua ./internal/scripts/add_and_check_ip.lua 19 | 20 | CMD ["/app/observer_service"] -------------------------------------------------------------------------------- /blocker/cmd/blocker_worker/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "blocker-worker/internal/config" 5 | "blocker-worker/internal/logger" 6 | "blocker-worker/internal/processor" 7 | "blocker-worker/internal/services/command" 8 | "blocker-worker/internal/worker" 9 | ) 10 | 11 | func main() { 12 | // 1. Инициализация зависимостей 13 | l := logger.New() 14 | cfg := config.New() 15 | cmdExecutor := command.NewExecutor(l) 16 | msgProcessor := processor.NewMessageProcessor(l, cmdExecutor) 17 | 18 | // 2. Инициализация главного воркера 19 | appWorker := worker.New(l, cfg, msgProcessor) 20 | 21 | // 3. Запуск приложения 22 | appWorker.Run() 23 | } -------------------------------------------------------------------------------- /blocker/internal/config/config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "os" 5 | "time" 6 | ) 7 | 8 | const ( 9 | defaultRabbitMQURL = "amqp://guest:guest@localhost/" 10 | reconnectDelay = 5 * time.Second 11 | ) 12 | 13 | // Config хранит конфигурацию приложения. 14 | type Config struct { 15 | RabbitMQURL string 16 | ReconnectDelay time.Duration 17 | } 18 | 19 | // New создает новый экземпляр Config из переменных окружения. 20 | func New() *Config { 21 | rabbitmqURL := os.Getenv("RABBITMQ_URL") 22 | if rabbitmqURL == "" { 23 | rabbitmqURL = defaultRabbitMQURL 24 | } 25 | 26 | return &Config{ 27 | RabbitMQURL: rabbitmqURL, 28 | ReconnectDelay: reconnectDelay, 29 | } 30 | } -------------------------------------------------------------------------------- /observer_conf/vector.toml: -------------------------------------------------------------------------------- 1 | # Источник: принимаем данные по HTTP от Nginx. 2 | [sources.http_receiver] 3 | type = "http" 4 | # Слушаем на всех интерфейсах внутри Docker-сети на порту 8686 5 | address = "0.0.0.0:8686" 6 | # Указываем, что тело запроса - это JSON 7 | decoding.codec = "json" 8 | 9 | # Назначение: отправляем полученные данные в сервис-наблюдатель. 10 | [sinks.observer_service] 11 | type = "http" 12 | inputs = ["http_receiver"] 13 | # Используем имя сервиса 'observer' из docker-compose и его внутренний порт 14 | uri = "http://observer:9000/log-entry" 15 | method = "post" 16 | encoding.codec = "json" 17 | 18 | # Настройки пакетной отправки 19 | batch.max_events = 100 20 | batch.timeout_secs = 5 21 | 22 | -------------------------------------------------------------------------------- /blocker/internal/logger/logger.go: -------------------------------------------------------------------------------- 1 | package logger 2 | 3 | import ( 4 | "log" 5 | "os" 6 | ) 7 | 8 | const logPrefix = "[BlockerWorker]" 9 | 10 | // Logger — это обертка для стандартного log.Logger для добавления префикса. 11 | type Logger struct { 12 | *log.Logger 13 | } 14 | 15 | // New создает новый экземпляр Logger. 16 | func New() *Logger { 17 | return &Logger{ 18 | Logger: log.New(os.Stdout, "", log.LstdFlags), 19 | } 20 | } 21 | 22 | // Info логирует информационное сообщение. 23 | func (l *Logger) Info(msg string) { 24 | l.Printf("INFO - %s - %s", logPrefix, msg) 25 | } 26 | 27 | // Error логирует сообщение об ошибке. 28 | func (l *Logger) Error(msg string) { 29 | l.Printf("ERROR - %s - %s", logPrefix, msg) 30 | } 31 | 32 | // Warning логирует предупреждение. 33 | func (l *Logger) Warning(msg string) { 34 | l.Printf("WARNING - %s - %s", logPrefix, msg) 35 | } -------------------------------------------------------------------------------- /observer_conf/nginx.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 443 ssl; 3 | http2 on; 4 | server_name HEAD_DOMAIN; 5 | 6 | ssl_certificate /etc/letsencrypt/live/HEAD_DOMAIN/fullchain.pem; 7 | ssl_certificate_key /etc/letsencrypt/live/HEAD_DOMAIN/privkey.pem; 8 | 9 | ssl_protocols TLSv1.2 TLSv1.3; 10 | ssl_ciphers 'TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384'; 11 | ssl_prefer_server_ciphers off; 12 | ssl_session_cache shared:SSL:10m; 13 | ssl_session_timeout 10m; 14 | 15 | location / { 16 | proxy_pass http://vector-aggregator:8686; 17 | 18 | proxy_set_header Host $host; 19 | proxy_set_header X-Real-IP $remote_addr; 20 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 21 | proxy_set_header X-Forwarded-Proto $scheme; 22 | } 23 | } 24 | 25 | -------------------------------------------------------------------------------- /blocker/internal/services/command/executor.go: -------------------------------------------------------------------------------- 1 | package command 2 | 3 | import ( 4 | "blocker-worker/internal/logger" 5 | "context" 6 | "fmt" 7 | "os/exec" 8 | "strings" 9 | ) 10 | 11 | // Executor отвечает за выполнение внешних команд. 12 | type Executor struct { 13 | logger *logger.Logger 14 | } 15 | 16 | // NewExecutor создает новый исполнитель команд. 17 | func NewExecutor(l *logger.Logger) *Executor { 18 | return &Executor{logger: l} 19 | } 20 | 21 | // RunNftCommand безопасно выполняет команду nftables. 22 | func (e *Executor) RunNftCommand(ctx context.Context, args ...string) error { 23 | // Добавляем 'nft' в начало списка аргументов 24 | fullArgs := append([]string{"nft"}, args...) 25 | 26 | cmd := exec.CommandContext(ctx, fullArgs[0], fullArgs[1:]...) 27 | output, err := cmd.CombinedOutput() 28 | 29 | fullCommandStr := strings.Join(fullArgs, " ") 30 | if err != nil { 31 | e.logger.Error(fmt.Sprintf("Ошибка выполнения команды '%s': %s", fullCommandStr, string(output))) 32 | return err 33 | } 34 | 35 | e.logger.Info(fmt.Sprintf("Команда '%s' выполнена успешно.", fullCommandStr)) 36 | return nil 37 | } -------------------------------------------------------------------------------- /observer_conf/.env.example: -------------------------------------------------------------------------------- 1 | # Переменные для сервиса observer 2 | REDIS_URL=redis://redis:6379/0 3 | # Лимит ип адресов у юзера, например когда значение станет больше 12-ти, все замеченные ип адреса будут заблокированы 4 | MAX_IPS_PER_USER=12 5 | # Время жизни ип адреса у юзера (в секундах, по умолчанию 1 час) 6 | USER_IP_TTL_SECONDS=86400 7 | # Кулдан уведомления (например по вебхуку в тг), время в секундах 8 | ALERT_COOLDOWN_SECONDS=30 9 | # Задержка в секундах перед очисткой IP-адресов пользователя после блокировки 10 | CLEAR_IPS_DELAY_SECONDS=30 11 | 12 | # Укажите реальный URL вебхука вашего ТГ-бота 13 | ALERT_WEBHOOK_URL=https://:<ПОРТ>/webhook/alert 14 | # Через запятую, без пробелов 15 | EXCLUDED_USERS= 16 | # IP-адреса, которые никогда не будут заблокированы, перечисляются через запятую (тут указываем адрес нод), записывать желательно без пробелов 17 | EXCLUDED_IPS=8.8.8.8,1.1.1.1,192.168.1.1 18 | 19 | RABBIT_USER= 20 | RABBIT_PASSWD= 21 | RABBITMQ_URL= 22 | #EXAMPLE RABBITMQ_URL=amqp://user:password@rabbitmq:5672/ 23 | 24 | 25 | # Время блокировки (в минутах), передаётся в nftables 26 | BLOCK_DURATION=5m -------------------------------------------------------------------------------- /observer/go.mod: -------------------------------------------------------------------------------- 1 | module observer_service 2 | 3 | go 1.24.4 4 | 5 | require ( 6 | github.com/gin-gonic/gin v1.10.1 7 | github.com/rabbitmq/amqp091-go v1.10.0 8 | github.com/redis/go-redis/v9 v9.11.0 9 | ) 10 | 11 | require ( 12 | github.com/bytedance/sonic v1.11.6 // indirect 13 | github.com/bytedance/sonic/loader v0.1.1 // indirect 14 | github.com/cespare/xxhash/v2 v2.3.0 // indirect 15 | github.com/cloudwego/base64x v0.1.4 // indirect 16 | github.com/cloudwego/iasm v0.2.0 // indirect 17 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect 18 | github.com/gabriel-vasile/mimetype v1.4.3 // indirect 19 | github.com/gin-contrib/sse v0.1.0 // indirect 20 | github.com/go-playground/locales v0.14.1 // indirect 21 | github.com/go-playground/universal-translator v0.18.1 // indirect 22 | github.com/go-playground/validator/v10 v10.20.0 // indirect 23 | github.com/goccy/go-json v0.10.2 // indirect 24 | github.com/json-iterator/go v1.1.12 // indirect 25 | github.com/klauspost/cpuid/v2 v2.2.7 // indirect 26 | github.com/leodido/go-urn v1.4.0 // indirect 27 | github.com/mattn/go-isatty v0.0.20 // indirect 28 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 29 | github.com/modern-go/reflect2 v1.0.2 // indirect 30 | github.com/pelletier/go-toml/v2 v2.2.2 // indirect 31 | github.com/twitchyliquid64/golang-asm v0.15.1 // indirect 32 | github.com/ugorji/go/codec v1.2.12 // indirect 33 | golang.org/x/arch v0.8.0 // indirect 34 | golang.org/x/crypto v0.36.0 // indirect 35 | golang.org/x/net v0.38.0 // indirect 36 | golang.org/x/sys v0.31.0 // indirect 37 | golang.org/x/text v0.23.0 // indirect 38 | google.golang.org/protobuf v1.34.1 // indirect 39 | gopkg.in/yaml.v3 v3.0.1 // indirect 40 | ) 41 | -------------------------------------------------------------------------------- /blocker_conf/vector/vector.toml: -------------------------------------------------------------------------------- 1 | # Источник данных: указываем, откуда читать логи. 2 | # Мы будем читать access.log из директории, которую пробросим из remnanode. 3 | [sources.xray_access_logs] 4 | type = "file" 5 | # ВАЖНО: Путь внутри контейнера Vector. Мы пробросим /var/log/remnanode с хоста. 6 | include = ["/var/log/remnanode/access.log"] 7 | # Начинаем читать с конца файла, чтобы не обрабатывать старые записи при перезапуске 8 | read_from = "end" 9 | 10 | # Трансформация: парсим каждую строку лога, чтобы извлечь нужные данные. 11 | [transforms.parse_xray_log] 12 | type = "remap" 13 | inputs = ["xray_access_logs"] 14 | source = ''' 15 | # (tcp:)? означает, что группа "tcp:" может присутствовать 0 или 1 раз. 16 | pattern = r'from (tcp:)?(?P\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}):\d+.*? email: (?P\S+)' 17 | 18 | parsed, err = parse_regex(.message, pattern) 19 | 20 | if err != null { 21 | log("Не удалось распарсить строку лога: " + err, level: "warn") 22 | abort 23 | } 24 | 25 | . = { 26 | "user_email": parsed.email, 27 | "source_ip": parsed.ip, 28 | "timestamp": to_string(now()) 29 | } 30 | ''' 31 | 32 | # Назначение: отправляем обработанные данные на наш центральный сервис-наблюдатель. 33 | [sinks.central_observer_api] 34 | type = "http" 35 | inputs = ["parse_xray_log"] 36 | # ВАЖНО: Указываем HTTPS и ваш домен! 37 | uri = "https://HEAD_DOMAIN:38213/" 38 | method = "post" 39 | encoding.codec = "json" 40 | compression = "gzip" 41 | 42 | [sinks.central_observer_api.batch] 43 | max_events = 100 44 | timeout_secs = 5 45 | 46 | [sinks.central_observer_api.request] 47 | retry_attempts = 5 48 | retry_backoff_secs = 2 49 | 50 | [sinks.central_observer_api.tls] -------------------------------------------------------------------------------- /observer/internal/models/models.go: -------------------------------------------------------------------------------- 1 | package models 2 | 3 | // LogEntry представляет запись лога, поступающую в сервис. 4 | type LogEntry struct { 5 | UserEmail string `json:"user_email" binding:"required"` 6 | SourceIP string `json:"source_ip" binding:"required"` 7 | } 8 | 9 | // AlertPayload представляет данные для отправки в вебхук. 10 | type AlertPayload struct { 11 | UserIdentifier string `json:"user_identifier"` 12 | DetectedIPsCount int `json:"detected_ips_count"` 13 | Limit int `json:"limit"` 14 | AllUserIPs []string `json:"all_user_ips"` 15 | BlockDuration string `json:"block_duration"` 16 | ViolationType string `json:"violation_type"` 17 | } 18 | 19 | // UserIPStats содержит статистику по IP-адресам пользователя для мониторинга. 20 | type UserIPStats struct { 21 | Email string `json:"email"` 22 | IPCount int `json:"ip_count"` 23 | Limit int `json:"limit"` 24 | IPs []string `json:"ips"` 25 | IPsWithTTL []string `json:"ips_with_ttl"` 26 | MinTTLHours float64 `json:"min_ttl_hours"` 27 | MaxTTLHours float64 `json:"max_ttl_hours"` 28 | Status string `json:"status"` 29 | HasAlertCooldown bool `json:"has_alert_cooldown"` 30 | IsExcluded bool `json:"excluded"` 31 | IsDebug bool `json:"is_debug"` 32 | } 33 | 34 | // BlockMessage представляет сообщение для отправки в очередь на блокировку. 35 | type BlockMessage struct { 36 | IPs []string `json:"ips"` 37 | Duration string `json:"duration"` 38 | } 39 | 40 | // CheckResult представляет результат выполнения Lua-скрипта. 41 | type CheckResult struct { 42 | StatusCode int64 43 | CurrentIPCount int64 44 | IsNewIP bool 45 | AllUserIPs []string 46 | } -------------------------------------------------------------------------------- /observer/internal/services/alerter/alerter.go: -------------------------------------------------------------------------------- 1 | package alerter 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "fmt" 7 | "log" 8 | "net/http" 9 | "observer_service/internal/models" 10 | "time" 11 | ) 12 | 13 | // Notifier определяет интерфейс для отправки уведомлений. 14 | type Notifier interface { 15 | SendAlert(payload models.AlertPayload) error 16 | } 17 | 18 | // WebhookAlerter реализует Notifier для отправки вебхуков. 19 | type WebhookAlerter struct { 20 | client *http.Client 21 | url string 22 | } 23 | 24 | // NewWebhookAlerter создает новый экземпляр WebhookAlerter. 25 | func NewWebhookAlerter(url string) *WebhookAlerter { 26 | return &WebhookAlerter{ 27 | client: &http.Client{ 28 | Timeout: 15 * time.Second, 29 | }, 30 | url: url, 31 | } 32 | } 33 | 34 | // SendAlert отправляет уведомление на заданный URL. 35 | func (a *WebhookAlerter) SendAlert(payload models.AlertPayload) error { 36 | if a.url == "" { 37 | log.Println("ALERT_WEBHOOK_URL не задан, вебхук не отправляется") 38 | return nil 39 | } 40 | 41 | jsonData, err := json.Marshal(payload) 42 | if err != nil { 43 | return fmt.Errorf("ошибка сериализации payload: %w", err) 44 | } 45 | 46 | log.Printf("Попытка отправить вебхук на URL: %s", a.url) 47 | resp, err := a.client.Post(a.url, "application/json", bytes.NewBuffer(jsonData)) 48 | if err != nil { 49 | return fmt.Errorf("сетевая ошибка при отправке вебхука: %w", err) 50 | } 51 | defer resp.Body.Close() 52 | 53 | var respBody bytes.Buffer 54 | _, _ = respBody.ReadFrom(resp.Body) 55 | 56 | log.Printf("Вебхук-уведомление для %s отправлен. Статус ответа: %d. Тело ответа: %s", 57 | payload.UserIdentifier, resp.StatusCode, respBody.String()) 58 | 59 | if resp.StatusCode >= 400 { 60 | return fmt.Errorf("сервер вебхука ответил ошибкой: %s", resp.Status) 61 | } 62 | 63 | return nil 64 | } -------------------------------------------------------------------------------- /blocker_conf/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | blocker-xray: 3 | container_name: blocker-xray 4 | hostname: blocker-xray 5 | image: quay.io/0fl01/blocker-xray-go:0.0.6 6 | restart: unless-stopped 7 | network_mode: host 8 | logging: 9 | driver: "json-file" 10 | options: 11 | max-size: "8m" 12 | max-file: "5" 13 | env_file: 14 | - .env 15 | cap_add: 16 | - NET_ADMIN 17 | - NET_RAW 18 | depends_on: 19 | - remnanode 20 | deploy: 21 | resources: 22 | limits: 23 | memory: 64M 24 | cpus: '0.25' 25 | reservations: 26 | memory: 32M 27 | cpus: '0.10' 28 | 29 | 30 | # Ниже "пример" настройки ноды под self steal, но вам главное требуется настроить логирование и vector согласно примеру ниже: 31 | remnanode: 32 | container_name: remnanode 33 | hostname: remnanode 34 | image: 0fl01/remnawave-node:0.0.1 35 | restart: unless-stopped 36 | network_mode: host 37 | logging: 38 | driver: "json-file" 39 | options: 40 | max-size: "8m" 41 | max-file: "5" 42 | env_file: 43 | - .env 44 | volumes: 45 | - /dev/shm:/dev/shm:rw 46 | - /var/log/remnanode:/var/log/remnanode # <------- Важный параметр, отсюда обсервер берёт метрики по клиентам 47 | 48 | vector: 49 | image: timberio/vector:0.48.0-alpine 50 | container_name: vector 51 | hostname: vector 52 | restart: unless-stopped 53 | network_mode: host 54 | command: ["--config", "/etc/vector/vector.toml"] 55 | depends_on: 56 | - remnanode 57 | volumes: 58 | - ./vector/vector.toml:/etc/vector/vector.toml:ro 59 | - /var/log/remnanode:/var/log/remnanode:ro 60 | logging: 61 | driver: "json-file" 62 | options: 63 | max-size: "8m" 64 | max-file: "3" 65 | deploy: 66 | resources: 67 | limits: 68 | memory: 128M 69 | cpus: '0.25' 70 | reservations: 71 | memory: 64M 72 | cpus: '0.10' 73 | -------------------------------------------------------------------------------- /blocker/internal/processor/processor.go: -------------------------------------------------------------------------------- 1 | package processor 2 | 3 | import ( 4 | "blocker-worker/internal/logger" 5 | "blocker-worker/internal/models" 6 | "blocker-worker/internal/services/command" 7 | "context" 8 | "encoding/json" 9 | "fmt" 10 | "regexp" 11 | "sync" 12 | ) 13 | 14 | var validDurationPattern = regexp.MustCompile(`^\d+[smhd]$`) 15 | 16 | // MessageProcessor инкапсулирует логику обработки одного сообщения RabbitMQ. 17 | type MessageProcessor struct { 18 | logger *logger.Logger 19 | executor *command.Executor 20 | } 21 | 22 | // NewMessageProcessor создает новый обработчик сообщений. 23 | func NewMessageProcessor(l *logger.Logger, exec *command.Executor) *MessageProcessor { 24 | return &MessageProcessor{ 25 | logger: l, 26 | executor: exec, 27 | } 28 | } 29 | 30 | // Process принимает тело сообщения и выполняет действие по блокировке. 31 | func (p *MessageProcessor) Process(ctx context.Context, body []byte) error { 32 | var payload models.BlockingPayload 33 | if err := json.Unmarshal(body, &payload); err != nil { 34 | p.logger.Error(fmt.Sprintf("Не удалось декодировать JSON из сообщения: %s", string(body))) 35 | return err 36 | } 37 | 38 | if len(payload.IPs) == 0 { 39 | p.logger.Info("Сообщение не содержит IP-адресов для блокировки, пропускаем.") 40 | return nil 41 | } 42 | 43 | duration := payload.Duration 44 | if duration == "" { 45 | duration = "5m" 46 | } 47 | 48 | if !validDurationPattern.MatchString(duration) { 49 | err := fmt.Errorf("недопустимый формат duration, сообщение отклонено: '%s'", duration) 50 | p.logger.Error(err.Error()) 51 | return err 52 | } 53 | 54 | var wg sync.WaitGroup 55 | for _, ip := range payload.IPs { 56 | wg.Add(1) 57 | go func(ipAddress string) { 58 | defer wg.Done() 59 | err := p.executor.RunNftCommand(ctx, "add", "element", "inet", "firewall", "user_blacklist", "{", ipAddress, "timeout", duration, "}") 60 | if err != nil { 61 | p.logger.Error(fmt.Sprintf("Ошибка при обработке IP %s: %v", ipAddress, err)) 62 | } 63 | }(ip) 64 | } 65 | wg.Wait() 66 | return nil 67 | } -------------------------------------------------------------------------------- /observer/internal/scripts/add_and_check_ip.lua: -------------------------------------------------------------------------------- 1 | -- KEYS[1]: ключ множества IP пользователя (например, user_ips:email@example.com) 2 | -- KEYS[2]: ключ кулдауна алертов для пользователя (например, alert_sent:email@example.com) 3 | -- ARGV[1]: IP-адрес для добавления 4 | -- ARGV[2]: TTL для IP-адреса в секундах 5 | -- ARGV[3]: Лимит IP-адресов для пользователя 6 | -- ARGV[4]: TTL для кулдауна алертов в секундах 7 | 8 | -- Добавляем IP в множество. Если он уже там, команда ничего не сделает, но вернет 0. 9 | local isNewIp = redis.call('SADD', KEYS[1], ARGV[1]) 10 | 11 | -- Устанавливаем/обновляем TTL для конкретного IP-адреса. 12 | -- Ключ для TTL формируется из ключа множества и самого IP. 13 | local ipTtlKey = 'ip_ttl:' .. string.sub(KEYS[1], 10) .. ':' .. ARGV[1] 14 | redis.call('SETEX', ipTtlKey, ARGV[2], '1') 15 | 16 | -- Обновляем TTL для самого множества IP-адресов пользователя. 17 | -- Это гарантирует, что множество будет автоматически удалено, если пользователь неактивен. 18 | redis.call('EXPIRE', KEYS[1], ARGV[2]) 19 | 20 | -- Получаем текущее количество IP в множестве. Это быстрая операция O(1). 21 | local currentIpCount = redis.call('SCARD', KEYS[1]) 22 | local ipLimit = tonumber(ARGV[3]) 23 | 24 | -- Проверяем, превышен ли лимит 25 | if currentIpCount > ipLimit then 26 | -- Лимит превышен. Проверяем, был ли уже отправлен алерт (существует ли ключ кулдауна). 27 | local alertSent = redis.call('EXISTS', KEYS[2]) 28 | if alertSent == 0 then 29 | -- Кулдауна нет. Устанавливаем его и возвращаем сигнал на блокировку. 30 | redis.call('SETEX', KEYS[2], ARGV[4], '1') 31 | -- Получаем все IP пользователя для отправки в сообщении о блокировке. 32 | local allIps = redis.call('SMEMBERS', KEYS[1]) 33 | -- Возвращаем статус 1 (блокировать) и список IP 34 | return {1, allIps} 35 | else 36 | -- Кулдаун уже есть. Ничего не делаем. 37 | -- Возвращаем статус 2 (лимит превышен, но алерт на кулдауне) 38 | return {2, currentIpCount} 39 | end 40 | end 41 | 42 | -- Лимит не превышен. Возвращаем статус 0 (все в порядке) и текущее количество IP. 43 | return {0, currentIpCount, isNewIp} -------------------------------------------------------------------------------- /observer_conf/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | observer: 3 | container_name: observer 4 | image: quay.io/0fl01/observer-xray-go:0.0.19 5 | restart: unless-stopped 6 | expose: 7 | - "9000" 8 | env_file: 9 | - .env 10 | depends_on: 11 | rabbitmq: 12 | condition: service_healthy 13 | vector-aggregator: 14 | condition: service_started 15 | redis: 16 | condition: service_started 17 | networks: 18 | - observer-net 19 | logging: 20 | driver: json-file 21 | options: 22 | max-size: "8m" 23 | max-file: "3" 24 | 25 | rabbitmq: 26 | image: rabbitmq:4.1.2-management-alpine 27 | container_name: rabbimq-xray 28 | restart: unless-stopped 29 | ports: 30 | - "5672:5672" 31 | - "15672:15672" 32 | volumes: 33 | - rabbitmq-data:/var/lib/rabbitmq 34 | environment: 35 | - RABBITMQ_DEFAULT_USER=${RABBIT_USER} 36 | - RABBITMQ_DEFAULT_PASS=${RABBIT_PASSWD} 37 | networks: 38 | - observer-net 39 | healthcheck: 40 | test: ["CMD", "rabbitmq-diagnostics", "ping"] 41 | interval: 10s 42 | timeout: 5s 43 | retries: 5 44 | start_period: 20s 45 | 46 | vector-aggregator: 47 | image: timberio/vector:0.48.0-alpine 48 | container_name: vector-aggregator 49 | restart: unless-stopped 50 | volumes: 51 | - ./vector.toml:/etc/vector/vector.toml:ro 52 | expose: 53 | - "8686" 54 | command: ["--config", "/etc/vector/vector.toml"] 55 | networks: 56 | - observer-net 57 | logging: 58 | driver: json-file 59 | options: 60 | max-size: "8m" 61 | max-file: "3" 62 | 63 | nginx: 64 | image: nginx:mainline-alpine 65 | container_name: nginx-proxy 66 | restart: unless-stopped 67 | ports: 68 | - "38213:443" 69 | volumes: 70 | - ./nginx.conf:/etc/nginx/conf.d/default.conf:ro 71 | - /etc/letsencrypt:/etc/letsencrypt:ro 72 | depends_on: 73 | - vector-aggregator 74 | networks: 75 | - observer-net 76 | logging: 77 | driver: json-file 78 | options: 79 | max-size: "8m" 80 | max-file: "3" 81 | 82 | redis: 83 | image: redis:8.2-m01-alpine3.22 84 | container_name: redis 85 | restart: unless-stopped 86 | expose: 87 | - "6379" 88 | volumes: 89 | - redis-data:/data 90 | networks: 91 | - observer-net 92 | logging: 93 | driver: json-file 94 | options: 95 | max-size: "8m" 96 | max-file: "3" 97 | 98 | networks: 99 | observer-net: 100 | driver: bridge 101 | 102 | volumes: 103 | redis-data: 104 | rabbitmq-data: 105 | -------------------------------------------------------------------------------- /observer/internal/api/server.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | import ( 4 | "context" 5 | "log" 6 | "net/http" 7 | "observer_service/internal/models" 8 | "observer_service/internal/processor" 9 | "observer_service/internal/services/publisher" 10 | "observer_service/internal/services/storage" 11 | "time" 12 | 13 | "github.com/gin-gonic/gin" 14 | ) 15 | 16 | type Server struct { 17 | router *gin.Engine 18 | processor *processor.LogProcessor 19 | storage storage.IPStorage 20 | publisher publisher.EventPublisher 21 | port string 22 | } 23 | 24 | func NewServer(port string, proc *processor.LogProcessor, storage storage.IPStorage, pub publisher.EventPublisher) *Server { 25 | gin.SetMode(gin.ReleaseMode) 26 | router := gin.Default() 27 | router.Use(gin.Logger()) 28 | router.Use(gin.Recovery()) 29 | 30 | s := &Server{ 31 | router: router, 32 | processor: proc, 33 | storage: storage, 34 | publisher: pub, 35 | port: port, 36 | } 37 | 38 | s.setupRoutes() 39 | return s 40 | } 41 | 42 | func (s *Server) GetRouter() *gin.Engine { 43 | return s.router 44 | } 45 | 46 | func (s *Server) setupRoutes() { 47 | s.router.POST("/log-entry", s.handleProcessLogEntries) 48 | s.router.GET("/health", s.handleHealthCheck) 49 | } 50 | 51 | func (s *Server) Run() error { 52 | return s.router.Run(":" + s.port) 53 | } 54 | 55 | func (s *Server) handleProcessLogEntries(c *gin.Context) { 56 | var entries []models.LogEntry 57 | if err := c.ShouldBindJSON(&entries); err != nil { 58 | c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) 59 | return 60 | } 61 | 62 | if err := s.processor.EnqueueEntries(entries); err != nil { 63 | log.Printf("Warning: log queue is full. Rejecting request for %d entries. Error: %v", len(entries), err) 64 | c.JSON(http.StatusServiceUnavailable, gin.H{ 65 | "error": "Service is temporarily overloaded. Please try again later.", 66 | }) 67 | return 68 | } 69 | 70 | c.JSON(http.StatusAccepted, gin.H{ 71 | "status": "accepted", 72 | "processed_entries": len(entries), 73 | }) 74 | } 75 | 76 | func (s *Server) handleHealthCheck(c *gin.Context) { 77 | ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) 78 | defer cancel() 79 | 80 | status := http.StatusOK 81 | response := gin.H{ 82 | "redis_connection": "ok", 83 | "rabbitmq_connection": "ok", 84 | } 85 | 86 | // Check Redis 87 | if err := s.storage.Ping(ctx); err != nil { 88 | status = http.StatusServiceUnavailable 89 | response["redis_connection"] = "failed" 90 | } 91 | 92 | // Check RabbitMQ 93 | if err := s.publisher.Ping(); err != nil { 94 | status = http.StatusServiceUnavailable 95 | response["rabbitmq_connection"] = "failed" 96 | } 97 | 98 | if status == http.StatusOK { 99 | response["status"] = "ok" 100 | } else { 101 | response["status"] = "error" 102 | } 103 | 104 | c.JSON(status, response) 105 | } -------------------------------------------------------------------------------- /observer/cmd/observer_service/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "log" 6 | "net/http" 7 | "os" 8 | "os/signal" 9 | "sync" 10 | "syscall" 11 | "time" 12 | 13 | "observer_service/internal/api" 14 | "observer_service/internal/config" 15 | "observer_service/internal/monitor" 16 | "observer_service/internal/processor" 17 | "observer_service/internal/services/alerter" 18 | "observer_service/internal/services/publisher" 19 | "observer_service/internal/services/storage" 20 | ) 21 | 22 | func main() { 23 | log.SetFlags(log.LstdFlags | log.Lshortfile) 24 | 25 | cfg := config.New() 26 | 27 | // Контекст для сигнализации о завершении работы фоновых процессов 28 | ctx, cancel := context.WithCancel(context.Background()) 29 | // WaitGroup для ожидания завершения всех фоновых горутин 30 | var wg sync.WaitGroup 31 | 32 | redisStore, err := storage.NewRedisStore(ctx, cfg.RedisURL, "internal/scripts/add_and_check_ip.lua") 33 | if err != nil { 34 | log.Fatalf("Критическая ошибка: не удалось подключиться к Redis: %v", err) 35 | } 36 | defer redisStore.Close() 37 | 38 | rabbitPublisher, err := publisher.NewRabbitMQPublisher(cfg.RabbitMQURL, cfg.BlockingExchangeName) 39 | if err != nil { 40 | log.Fatalf("Критическая ошибка: не удалось подключиться к RabbitMQ: %v", err) 41 | } 42 | defer rabbitPublisher.Close() 43 | 44 | webhookAlerter := alerter.NewWebhookAlerter(cfg.AlertWebhookURL) 45 | 46 | logProcessor := processor.NewLogProcessor(redisStore, rabbitPublisher, webhookAlerter, cfg) 47 | poolMonitor := monitor.NewPoolMonitor(redisStore, cfg) 48 | apiServer := api.NewServer(cfg.Port, logProcessor, redisStore, rabbitPublisher) 49 | 50 | // Сообщаем WaitGroup, что будем ждать три горутины 51 | wg.Add(3) 52 | go poolMonitor.Run(ctx, &wg) 53 | go logProcessor.StartWorkerPool(ctx, &wg) 54 | go logProcessor.StartSideEffectWorkerPool(ctx, &wg) // Запускаем новый пул воркеров 55 | 56 | srv := &http.Server{ 57 | Addr: ":" + cfg.Port, 58 | Handler: apiServer.GetRouter(), // Получаем роутер из нашего api.Server 59 | } 60 | 61 | go func() { 62 | log.Printf("Сервер Observer Service запущен на порту %s", cfg.Port) 63 | if err := srv.ListenAndServe(); err != nil && err != http.ErrServerClosed { 64 | log.Fatalf("Ошибка запуска сервера: %v", err) 65 | } 66 | }() 67 | 68 | quit := make(chan os.Signal, 1) 69 | signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM) 70 | <-quit 71 | 72 | log.Println("Получен сигнал завершения, начинаю остановку сервиса...") 73 | 74 | shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 10*time.Second) 75 | defer shutdownCancel() 76 | 77 | if err := srv.Shutdown(shutdownCtx); err != nil { 78 | log.Printf("Ошибка при остановке HTTP-сервера: %v", err) 79 | } else { 80 | log.Println("HTTP-сервер успешно остановлен.") 81 | } 82 | 83 | cancel() 84 | 85 | log.Println("Ожидание завершения фоновых процессов...") 86 | wg.Wait() 87 | 88 | log.Println("Все фоновые процессы остановлены. Сервис успешно остановлен.") 89 | } -------------------------------------------------------------------------------- /blocker/internal/worker/worker.go: -------------------------------------------------------------------------------- 1 | package worker 2 | 3 | import ( 4 | "blocker-worker/internal/config" 5 | "blocker-worker/internal/logger" 6 | "blocker-worker/internal/processor" 7 | "blocker-worker/internal/services/rabbitmq" 8 | "context" 9 | "fmt" 10 | "os" 11 | "os/signal" 12 | "syscall" 13 | "time" 14 | 15 | amqp "github.com/rabbitmq/amqp091-go" 16 | ) 17 | 18 | // Worker — это основная структура приложения. 19 | type Worker struct { 20 | logger *logger.Logger 21 | cfg *config.Config 22 | processor *processor.MessageProcessor 23 | ctx context.Context 24 | cancel context.CancelFunc 25 | } 26 | 27 | // New создает нового Worker'а. 28 | func New(l *logger.Logger, cfg *config.Config, proc *processor.MessageProcessor) *Worker { 29 | ctx, cancel := context.WithCancel(context.Background()) 30 | return &Worker{ 31 | logger: l, 32 | cfg: cfg, 33 | processor: proc, 34 | ctx: ctx, 35 | cancel: cancel, 36 | } 37 | } 38 | 39 | // handleMessage является функцией обратного вызова для потребителя RabbitMQ. 40 | func (w *Worker) handleMessage(ctx context.Context, msg amqp.Delivery) error { 41 | err := w.processor.Process(ctx, msg.Body) 42 | if err != nil { 43 | w.logger.Error(fmt.Sprintf("Произошла ошибка при обработке сообщения: %v. Сообщение не будет подтверждено.", err)) 44 | if errNack := msg.Nack(false, false); errNack != nil { 45 | w.logger.Error(fmt.Sprintf("Ошибка при Nack сообщения: %v", errNack)) 46 | } 47 | return nil 48 | } 49 | 50 | if errAck := msg.Ack(false); errAck != nil { 51 | w.logger.Error(fmt.Sprintf("Ошибка при Ack сообщения: %v", errAck)) 52 | return errAck 53 | } 54 | return nil 55 | } 56 | 57 | // Run запускает воркер, обрабатывает корректное завершение и переподключения к RabbitMQ. 58 | func (w *Worker) Run() { 59 | w.logger.Info("Запуск Blocker Worker...") 60 | 61 | sigChan := make(chan os.Signal, 1) 62 | signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM) 63 | go func() { 64 | <-sigChan 65 | w.logger.Info("Получен сигнал завершения, начинаем остановку...") 66 | w.cancel() 67 | }() 68 | 69 | for { 70 | select { 71 | case <-w.ctx.Done(): 72 | w.logger.Info("Воркер остановлен.") 73 | return 74 | default: 75 | consumer := rabbitmq.NewConsumer(w.logger, w.cfg.RabbitMQURL) 76 | if err := consumer.Connect(); err != nil { 77 | w.logger.Warning(fmt.Sprintf("Не удалось подключиться к RabbitMQ: %v. Повторная попытка через %v...", err, w.cfg.ReconnectDelay)) 78 | w.waitOrExit() 79 | continue 80 | } 81 | 82 | err := consumer.SetupAndConsume(w.ctx, w.handleMessage) 83 | consumer.Close() 84 | 85 | if err != nil { 86 | w.logger.Warning(fmt.Sprintf("Соединение с RabbitMQ потеряно: %v. Повторная попытка через %v...", err, w.cfg.ReconnectDelay)) 87 | } 88 | 89 | w.waitOrExit() 90 | } 91 | } 92 | } 93 | 94 | // waitOrExit приостанавливает выполнение на время задержки переподключения, но немедленно выходит, если контекст отменен. 95 | func (w *Worker) waitOrExit() { 96 | select { 97 | case <-w.ctx.Done(): 98 | return 99 | case <-time.After(w.cfg.ReconnectDelay): 100 | } 101 | } -------------------------------------------------------------------------------- /blocker/internal/services/rabbitmq/consumer.go: -------------------------------------------------------------------------------- 1 | package rabbitmq 2 | 3 | import ( 4 | "blocker-worker/internal/logger" 5 | "context" 6 | "fmt" 7 | amqp "github.com/rabbitmq/amqp091-go" 8 | ) 9 | 10 | const ( 11 | blockingExchangeName = "blocking_exchange" 12 | ) 13 | 14 | // Consumer обрабатывает подключение и получение сообщений из RabbitMQ. 15 | type Consumer struct { 16 | logger *logger.Logger 17 | conn *amqp.Connection 18 | channel *amqp.Channel 19 | rabbitMQURL string 20 | } 21 | 22 | // NewConsumer создает нового потребителя RabbitMQ. 23 | func NewConsumer(l *logger.Logger, url string) *Consumer { 24 | return &Consumer{ 25 | logger: l, 26 | rabbitMQURL: url, 27 | } 28 | } 29 | 30 | // Connect устанавливает соединение с RabbitMQ и настраивает канал. 31 | func (c *Consumer) Connect() error { 32 | var err error 33 | c.conn, err = amqp.Dial(c.rabbitMQURL) 34 | if err != nil { 35 | return fmt.Errorf("не удалось подключиться к RabbitMQ: %w", err) 36 | } 37 | 38 | c.channel, err = c.conn.Channel() 39 | if err != nil { 40 | c.conn.Close() // Очищаем соединение, если канал не удалось создать 41 | return fmt.Errorf("не удалось открыть канал: %w", err) 42 | } 43 | 44 | c.logger.Info("Соединение с RabbitMQ успешно установлено.") 45 | return nil 46 | } 47 | 48 | // SetupAndConsume настраивает обменник, очередь и начинает потребление сообщений. 49 | // Принимает функцию-обработчик для обработки сообщений. 50 | func (c *Consumer) SetupAndConsume(ctx context.Context, handler func(context.Context, amqp.Delivery) error) error { 51 | if err := c.channel.Qos(1, 0, false); err != nil { 52 | return fmt.Errorf("не удалось установить QoS: %w", err) 53 | } 54 | 55 | err := c.channel.ExchangeDeclare( 56 | blockingExchangeName, "fanout", true, false, false, false, nil, 57 | ) 58 | if err != nil { 59 | return fmt.Errorf("не удалось объявить exchange: %w", err) 60 | } 61 | 62 | q, err := c.channel.QueueDeclare( 63 | "", false, false, true, false, nil, 64 | ) 65 | if err != nil { 66 | return fmt.Errorf("не удалось объявить очередь: %w", err) 67 | } 68 | 69 | err = c.channel.QueueBind( 70 | q.Name, "", blockingExchangeName, false, nil, 71 | ) 72 | if err != nil { 73 | return fmt.Errorf("не удалось привязать очередь: %w", err) 74 | } 75 | 76 | msgs, err := c.channel.Consume( 77 | q.Name, "", false, false, false, false, nil, 78 | ) 79 | if err != nil { 80 | return fmt.Errorf("не удалось зарегистрировать потребителя: %w", err) 81 | } 82 | 83 | c.logger.Info(" [*] Воркер готов. Ожидание сообщений о блокировке...") 84 | 85 | for { 86 | select { 87 | case <-ctx.Done(): 88 | c.logger.Info("Контекст отменен, прекращаем потребление сообщений.") 89 | return nil 90 | case msg, ok := <-msgs: 91 | if !ok { 92 | return fmt.Errorf("канал сообщений RabbitMQ был закрыт") 93 | } 94 | if err := handler(ctx, msg); err != nil { 95 | c.logger.Error(fmt.Sprintf("Ошибка при вызове обработчика сообщения: %v", err)) 96 | } 97 | } 98 | } 99 | } 100 | 101 | // Close корректно закрывает соединение и канал. 102 | func (c *Consumer) Close() { 103 | if c.channel != nil { 104 | c.channel.Close() 105 | } 106 | if c.conn != nil { 107 | c.conn.Close() 108 | } 109 | c.logger.Info("Соединение с RabbitMQ закрыто.") 110 | } -------------------------------------------------------------------------------- /observer/internal/config/config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "log" 5 | "os" 6 | "strconv" 7 | "strings" 8 | "time" 9 | ) 10 | 11 | // Config хранит всю конфигурацию приложения. 12 | type Config struct { 13 | Port string 14 | RedisURL string 15 | RabbitMQURL string 16 | MaxIPsPerUser int 17 | AlertWebhookURL string 18 | UserIPTTL time.Duration 19 | AlertCooldown time.Duration 20 | ClearIPsDelay time.Duration 21 | BlockDuration string 22 | BlockingExchangeName string 23 | MonitoringInterval time.Duration 24 | DebugEmail string 25 | DebugIPLimit int 26 | ExcludedUsers map[string]bool 27 | ExcludedIPs map[string]bool 28 | WorkerPoolSize int 29 | LogChannelBufferSize int 30 | SideEffectWorkerPoolSize int 31 | SideEffectChannelBufferSize int 32 | } 33 | 34 | // New загружает конфигурацию из переменных окружения. 35 | func New() *Config { 36 | cfg := &Config{ 37 | Port: getEnv("PORT", "9000"), 38 | RedisURL: getEnv("REDIS_URL", "redis://localhost:6379/0"), 39 | RabbitMQURL: getEnv("RABBITMQ_URL", "amqp://guest:guest@localhost/"), 40 | MaxIPsPerUser: getEnvInt("MAX_IPS_PER_USER", 3), 41 | AlertWebhookURL: getEnv("ALERT_WEBHOOK_URL", ""), 42 | UserIPTTL: time.Duration(getEnvInt("USER_IP_TTL_SECONDS", 24*60*60)) * time.Second, 43 | AlertCooldown: time.Duration(getEnvInt("ALERT_COOLDOWN_SECONDS", 60*60)) * time.Second, 44 | ClearIPsDelay: time.Duration(getEnvInt("CLEAR_IPS_DELAY_SECONDS", 30)) * time.Second, 45 | BlockDuration: getEnv("BLOCK_DURATION", "5m"), 46 | BlockingExchangeName: getEnv("BLOCKING_EXCHANGE_NAME", "blocking_exchange"), 47 | MonitoringInterval: time.Duration(getEnvInt("MONITORING_INTERVAL", 300)) * time.Second, 48 | DebugEmail: getEnv("DEBUG_EMAIL", ""), 49 | DebugIPLimit: getEnvInt("DEBUG_IP_LIMIT", 1), 50 | ExcludedUsers: parseSet(getEnv("EXCLUDED_USERS", "")), 51 | ExcludedIPs: parseSet(getEnv("EXCLUDED_IPS", "")), 52 | WorkerPoolSize: getEnvInt("WORKER_POOL_SIZE", 20), 53 | LogChannelBufferSize: getEnvInt("LOG_CHANNEL_BUFFER_SIZE", 100), 54 | SideEffectWorkerPoolSize: getEnvInt("SIDE_EFFECT_WORKER_POOL_SIZE", 10), 55 | SideEffectChannelBufferSize: getEnvInt("SIDE_EFFECT_CHANNEL_BUFFER_SIZE", 50), 56 | } 57 | 58 | log.Printf("Конфигурация загружена. Порт: %s", cfg.Port) 59 | log.Printf("Пул воркеров обработки логов: %d воркеров, размер буфера канала: %d", cfg.WorkerPoolSize, cfg.LogChannelBufferSize) 60 | log.Printf("Пул воркеров побочных задач (алерты, очистка): %d воркеров, размер буфера канала: %d", cfg.SideEffectWorkerPoolSize, cfg.SideEffectChannelBufferSize) 61 | if len(cfg.ExcludedUsers) > 0 { 62 | log.Printf("Загружен список исключений: %d пользователей", len(cfg.ExcludedUsers)) 63 | } 64 | if len(cfg.ExcludedIPs) > 0 { 65 | log.Printf("Загружен список исключений IP-адресов: %d", len(cfg.ExcludedIPs)) 66 | } 67 | if cfg.DebugEmail != "" { 68 | log.Printf("Режим дебага включен для email: %s с лимитом IP: %d", cfg.DebugEmail, cfg.DebugIPLimit) 69 | } 70 | 71 | return cfg 72 | } 73 | 74 | func getEnv(key, defaultValue string) string { 75 | if value := os.Getenv(key); value != "" { 76 | return value 77 | } 78 | return defaultValue 79 | } 80 | 81 | func getEnvInt(key string, defaultValue int) int { 82 | if value := os.Getenv(key); value != "" { 83 | if intValue, err := strconv.Atoi(value); err == nil { 84 | return intValue 85 | } 86 | } 87 | return defaultValue 88 | } 89 | 90 | func parseSet(value string) map[string]bool { 91 | set := make(map[string]bool) 92 | if value == "" { 93 | return set 94 | } 95 | items := strings.Split(value, ",") 96 | for _, item := range items { 97 | item = strings.TrimSpace(item) 98 | if item != "" { 99 | set[item] = true 100 | } 101 | } 102 | return set 103 | } -------------------------------------------------------------------------------- /observer/internal/services/publisher/publisher.go: -------------------------------------------------------------------------------- 1 | package publisher 2 | 3 | import ( 4 | "encoding/json" 5 | "errors" 6 | "fmt" 7 | "log" 8 | "observer_service/internal/models" 9 | "sync" 10 | "time" 11 | 12 | "github.com/rabbitmq/amqp091-go" 13 | ) 14 | 15 | // EventPublisher определяет интерфейс для публикации событий. 16 | type EventPublisher interface { 17 | PublishBlockMessage(ips []string, duration string) error 18 | Close() error 19 | Ping() error 20 | } 21 | 22 | // RabbitMQPublisher реализует EventPublisher для RabbitMQ. 23 | type RabbitMQPublisher struct { 24 | conn *amqp091.Connection 25 | channel *amqp091.Channel 26 | exchangeName string 27 | url string 28 | mux sync.Mutex 29 | maxRetries int 30 | retryDelay time.Duration 31 | } 32 | 33 | // NewRabbitMQPublisher создает и настраивает нового издателя RabbitMQ. 34 | func NewRabbitMQPublisher(url, exchangeName string) (*RabbitMQPublisher, error) { 35 | p := &RabbitMQPublisher{ 36 | url: url, 37 | exchangeName: exchangeName, 38 | maxRetries: 5, 39 | retryDelay: 2 * time.Second, 40 | } 41 | 42 | if err := p.connectWithRetry(); err != nil { 43 | return nil, fmt.Errorf("не удалось подключиться к RabbitMQ при инициализации: %w", err) 44 | } 45 | 46 | return p, nil 47 | } 48 | 49 | func (p *RabbitMQPublisher) connect() error { 50 | conn, err := amqp091.Dial(p.url) 51 | if err != nil { 52 | return fmt.Errorf("ошибка подключения к RabbitMQ: %w", err) 53 | } 54 | 55 | ch, err := conn.Channel() 56 | if err != nil { 57 | conn.Close() 58 | return fmt.Errorf("ошибка создания канала RabbitMQ: %w", err) 59 | } 60 | 61 | err = ch.ExchangeDeclare( 62 | p.exchangeName, // name 63 | "fanout", // type 64 | true, // durable 65 | false, // auto-deleted 66 | false, // internal 67 | false, // no-wait 68 | nil, // arguments 69 | ) 70 | if err != nil { 71 | ch.Close() 72 | conn.Close() 73 | return fmt.Errorf("ошибка создания exchange: %w", err) 74 | } 75 | 76 | p.conn = conn 77 | p.channel = ch 78 | log.Println("Успешное (пере)подключение к RabbitMQ и настройка канала.") 79 | return nil 80 | } 81 | 82 | func (p *RabbitMQPublisher) connectWithRetry() error { 83 | var err error 84 | for i := 0; i < p.maxRetries; i++ { 85 | err = p.connect() 86 | if err == nil { 87 | return nil 88 | } 89 | log.Printf("Не удалось подключиться к RabbitMQ (попытка %d/%d): %v. Повтор через %v...", i+1, p.maxRetries, err, p.retryDelay) 90 | time.Sleep(p.retryDelay) 91 | } 92 | return fmt.Errorf("не удалось подключиться к RabbitMQ после %d попыток: %w", p.maxRetries, err) 93 | } 94 | 95 | // PublishBlockMessage публикует сообщение о блокировке с логикой переподключения. 96 | func (p *RabbitMQPublisher) PublishBlockMessage(ips []string, duration string) error { 97 | p.mux.Lock() 98 | defer p.mux.Unlock() 99 | 100 | blockMsg := models.BlockMessage{ 101 | IPs: ips, 102 | Duration: duration, 103 | } 104 | body, err := json.Marshal(blockMsg) 105 | if err != nil { 106 | return fmt.Errorf("ошибка сериализации сообщения о блокировке: %w", err) 107 | } 108 | 109 | for i := 0; i < p.maxRetries; i++ { 110 | if p.conn == nil || p.conn.IsClosed() { 111 | log.Println("Соединение с RabbitMQ потеряно. Попытка переподключения...") 112 | if err := p.connectWithRetry(); err != nil { 113 | log.Printf("Не удалось восстановить соединение с RabbitMQ: %v", err) 114 | time.Sleep(p.retryDelay) 115 | continue 116 | } 117 | } 118 | 119 | err = p.channel.Publish( 120 | p.exchangeName, 121 | "", 122 | false, 123 | false, 124 | amqp091.Publishing{ 125 | ContentType: "application/json", 126 | Body: body, 127 | DeliveryMode: amqp091.Persistent, 128 | }, 129 | ) 130 | 131 | if err == nil { 132 | return nil // Успех 133 | } 134 | 135 | log.Printf("Ошибка публикации сообщения в RabbitMQ (попытка %d/%d): %v. Повтор...", i+1, p.maxRetries, err) 136 | if p.conn != nil { 137 | p.conn.Close() // Принудительно закрываем, чтобы пересоздать на следующей итерации 138 | } 139 | time.Sleep(p.retryDelay) 140 | } 141 | 142 | return fmt.Errorf("критическая ошибка: не удалось опубликовать сообщение в RabbitMQ после %d попыток", p.maxRetries) 143 | } 144 | 145 | // Ping проверяет текущее состояние соединения с RabbitMQ без попытки переподключения. 146 | func (p *RabbitMQPublisher) Ping() error { 147 | p.mux.Lock() 148 | defer p.mux.Unlock() 149 | 150 | if p.conn == nil || p.conn.IsClosed() { 151 | return errors.New("rabbitmq connection is not active") 152 | } 153 | if p.channel == nil { 154 | return errors.New("rabbitmq channel is not active") 155 | } 156 | return nil 157 | } 158 | 159 | // Close закрывает соединение с RabbitMQ. 160 | func (p *RabbitMQPublisher) Close() error { 161 | if p.conn != nil && !p.conn.IsClosed() { 162 | return p.conn.Close() 163 | } 164 | return nil 165 | } -------------------------------------------------------------------------------- /nftables_example.conf: -------------------------------------------------------------------------------- 1 | #!/usr/sbin/nft -f 2 | 3 | flush ruleset 4 | 5 | # Константы для портов 6 | define SSH_PORT = 22 7 | define CONTROL_PORT = 2222 8 | define MONITORING_PORT = 9100 9 | define WEB_PORTS = { 80, 443 } 10 | 11 | table inet firewall { 12 | 13 | # Динамический blacklist для "DDoS-источников" 14 | set ddos_blacklist { 15 | type ipv4_addr 16 | flags timeout 17 | timeout 5m 18 | size 8192 19 | comment "Dynamic blacklist for DDoS sources" 20 | } 21 | 22 | # Сюда будут добавляться заблокированные ип адреса, обнаруженных observer 23 | set user_blacklist { 24 | type ipv4_addr 25 | flags timeout 26 | size 8192 27 | comment "Dynamic blacklist for subscription policy violators" 28 | } 29 | 30 | # Сюда добавляете ип адрес remnawave панель 31 | set control_plane_sources { 32 | type ipv4_addr 33 | elements = { IP_ADRESS } 34 | } 35 | 36 | # Сюда можно добавить хост для сбора метрик (например прометеус/blackbox-exporter) 37 | set monitoring_sources { 38 | type ipv4_addr 39 | elements = { IP_ADRESS } 40 | } 41 | 42 | set tls_flood_sources { 43 | type ipv4_addr 44 | flags timeout 45 | timeout 15m 46 | size 4096 47 | } 48 | 49 | chain prerouting { 50 | type filter hook prerouting priority raw; policy accept; 51 | 52 | # Блокировка нарушителей (обнаруженных с помощью observer) 53 | ip saddr @user_blacklist drop comment "Drop traffic from policy violators" 54 | 55 | ip6 version 6 drop comment "Block IPv6 completely" 56 | iif != lo ip saddr 127.0.0.0/8 drop comment "Block spoofed loopback from external" 57 | ip frag-off & 0x1fff != 0 drop comment "Drop fragmented packets" 58 | 59 | tcp flags & (fin|syn|rst|psh|ack|urg) == 0 drop comment "Drop NULL packets" 60 | tcp flags & (fin|syn|rst|psh|ack|urg) == fin|syn|rst|psh|ack|urg drop comment "Drop XMAS packets" 61 | tcp flags & (syn|rst) == syn|rst drop comment "Drop SYN+RST packets" 62 | tcp flags & (syn|fin) == syn|fin drop comment "Drop SYN+FIN packets" 63 | 64 | fib daddr type broadcast drop comment "Drop broadcast early" 65 | fib daddr type multicast drop comment "Drop multicast early" 66 | fib daddr type anycast drop comment "Drop anycast early" 67 | 68 | ip saddr @ddos_blacklist drop comment "Drop blacklisted source early" 69 | 70 | tcp dport $SSH_PORT tcp flags & (syn|ack) == syn limit rate 5/second burst 3 packets accept comment "SSH SYN flood limit" 71 | tcp dport $SSH_PORT tcp flags & (syn|ack) == syn add @ddos_blacklist { ip saddr timeout 5m } drop comment "Blacklist SSH flooders" 72 | 73 | ip protocol icmp icmp type echo-request limit rate 2/second burst 2 packets accept comment "Allow limited ping" 74 | ip protocol icmp icmp type echo-request add @ddos_blacklist { ip saddr timeout 5m } drop comment "Blacklist ping flooders" 75 | } 76 | 77 | chain forward { 78 | type filter hook forward priority filter; policy drop; 79 | ct state established,related accept comment "Allow established forward" 80 | drop comment "Drop forward" 81 | } 82 | 83 | chain output { 84 | type filter hook output priority filter; policy accept; 85 | } 86 | 87 | chain filter_input { 88 | type filter hook input priority filter; policy drop; 89 | 90 | iif lo accept comment "Allow loopback" 91 | ct state invalid drop comment "Drop invalid packets" 92 | ct state established,related accept comment "Allow established" 93 | 94 | ip saddr @ddos_blacklist drop comment "Drop known DDoS sources" 95 | 96 | # Connection limits 97 | tcp dport $WEB_PORTS ct count over 100 drop comment "Limit concurrent web connections" 98 | tcp dport $SSH_PORT ct count over 15 drop comment "Limit SSH connections" 99 | ct count over 100 drop comment "Limit total connections per IP" 100 | 101 | # SSH protection 102 | tcp dport $SSH_PORT ct state new meter ssh_meter { ip saddr limit rate 5/minute burst 3 packets } accept comment "SSH rate limit" 103 | tcp dport $SSH_PORT ct state new add @ddos_blacklist { ip saddr timeout 5m } drop comment "SSH flood → blacklist" 104 | 105 | # Control и monitoring 106 | ip saddr @control_plane_sources tcp dport $CONTROL_PORT ct state new accept comment "Control plane" 107 | ip saddr @monitoring_sources tcp dport $MONITORING_PORT ct state new accept comment "Monitoring" 108 | 109 | # TLS protection 110 | ip saddr @tls_flood_sources drop comment "Drop TLS flooded IPs" 111 | tcp dport 443 ct state new meter tls_meter { ip saddr limit rate 400/second burst 300 packets } accept comment "TLS connections" 112 | tcp dport 443 ct state new add @tls_flood_sources { ip saddr timeout 5m } drop comment "TLS flood → temp block" 113 | 114 | # HTTP для cert renewal 115 | tcp dport 80 ct state new meter cert_meter { ip saddr limit rate 5/minute burst 3 packets } accept comment "HTTP cert renewal" 116 | 117 | drop comment "Default drop" 118 | } 119 | } -------------------------------------------------------------------------------- /observer/internal/monitor/monitor.go: -------------------------------------------------------------------------------- 1 | package monitor 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "log" 7 | "math" 8 | "observer_service/internal/config" 9 | "observer_service/internal/models" 10 | "observer_service/internal/services/storage" 11 | "sort" 12 | "strings" 13 | "sync" 14 | "time" 15 | ) 16 | 17 | // PoolMonitor выполняет периодический мониторинг пулов IP. 18 | type PoolMonitor struct { 19 | storage storage.IPStorage 20 | cfg *config.Config 21 | } 22 | 23 | // NewPoolMonitor создает новый экземпляр PoolMonitor. 24 | func NewPoolMonitor(s storage.IPStorage, cfg *config.Config) *PoolMonitor { 25 | return &PoolMonitor{ 26 | storage: s, 27 | cfg: cfg, 28 | } 29 | } 30 | 31 | // Run запускает бесконечный цикл мониторинга. 32 | func (m *PoolMonitor) Run(ctx context.Context, wg *sync.WaitGroup) { 33 | // Гарантируем вызов Done() при выходе из функции 34 | defer wg.Done() 35 | 36 | log.Printf("Мониторинг IP-пулов запущен с интервалом %v", m.cfg.MonitoringInterval) 37 | ticker := time.NewTicker(m.cfg.MonitoringInterval) 38 | defer ticker.Stop() 39 | 40 | for { 41 | select { 42 | case <-ticker.C: 43 | m.performMonitoring(context.Background()) 44 | case <-ctx.Done(): 45 | log.Println("Остановка мониторинга IP-пулов.") 46 | return 47 | } 48 | } 49 | } 50 | 51 | func (m *PoolMonitor) performMonitoring(ctx context.Context) { 52 | userEmails, err := m.storage.GetAllUserEmails(ctx) 53 | if err != nil { 54 | log.Printf("Ошибка мониторинга (GetAllUserEmails): %v", err) 55 | return 56 | } 57 | 58 | now := time.Now().Format("2006-01-02 15:04:05") 59 | if len(userEmails) == 0 { 60 | fmt.Printf("[%s] === IP POOLS MONITORING === НЕТ АКТИВНЫХ ПОЛЬЗОВАТЕЛЕЙ\n", now) 61 | return 62 | } 63 | 64 | fmt.Printf("\n[%s] === IP POOLS MONITORING START ===\n", now) 65 | defer fmt.Printf("[%s] === IP POOLS MONITORING END ===\n\n", time.Now().Format("2006-01-02 15:04:05")) 66 | 67 | var allStats []models.UserIPStats 68 | for _, email := range userEmails { 69 | stats, err := m.buildUserStats(ctx, email) 70 | if err != nil { 71 | log.Printf("Ошибка при сборе статистики для %s: %v", email, err) 72 | continue 73 | } 74 | if stats != nil { 75 | allStats = append(allStats, *stats) 76 | } 77 | } 78 | 79 | sort.Slice(allStats, func(i, j int) bool { 80 | return allStats[i].IPCount > allStats[j].IPCount 81 | }) 82 | 83 | m.printSummary(allStats) 84 | m.printTopUsers(allStats) 85 | m.printOverLimitUsers(allStats) 86 | } 87 | 88 | func (m *PoolMonitor) buildUserStats(ctx context.Context, email string) (*models.UserIPStats, error) { 89 | activeIPs, err := m.storage.GetUserActiveIPs(ctx, email) 90 | if err != nil { 91 | return nil, err 92 | } 93 | if len(activeIPs) == 0 { 94 | return nil, nil 95 | } 96 | 97 | userLimit := m.getUserIPLimit(email) 98 | ipCount := len(activeIPs) 99 | status := "NORMAL" 100 | if float64(ipCount) >= float64(userLimit)*0.8 { 101 | status = "NEAR_LIMIT" 102 | } 103 | if ipCount > userLimit { 104 | status = "OVER_LIMIT" 105 | } 106 | 107 | hasCooldown, _ := m.storage.HasAlertCooldown(ctx, email) 108 | 109 | var ips, ipsWithTTL []string 110 | var ttlValues []int 111 | for ip, ttl := range activeIPs { 112 | ips = append(ips, ip) 113 | ipsWithTTL = append(ipsWithTTL, fmt.Sprintf("%s(%.1fh)", ip, float64(ttl)/3600.0)) 114 | ttlValues = append(ttlValues, ttl) 115 | } 116 | sort.Strings(ips) 117 | sort.Strings(ipsWithTTL) 118 | 119 | minTTL, maxTTL := 0.0, 0.0 120 | if len(ttlValues) > 0 { 121 | sort.Ints(ttlValues) 122 | minTTL = float64(ttlValues[0]) / 3600.0 123 | maxTTL = float64(ttlValues[len(ttlValues)-1]) / 3600.0 124 | } 125 | 126 | return &models.UserIPStats{ 127 | Email: email, 128 | IPCount: ipCount, 129 | Limit: userLimit, 130 | IPs: ips, 131 | IPsWithTTL: ipsWithTTL, 132 | MinTTLHours: math.Round(minTTL*10) / 10, 133 | MaxTTLHours: math.Round(maxTTL*10) / 10, 134 | Status: status, 135 | HasAlertCooldown: hasCooldown, 136 | IsExcluded: m.cfg.ExcludedUsers[email], 137 | IsDebug: m.cfg.DebugEmail != "" && email == m.cfg.DebugEmail, 138 | }, nil 139 | } 140 | 141 | func (m *PoolMonitor) printSummary(stats []models.UserIPStats) { 142 | var total, nearLimit, overLimit, excluded, debug int 143 | total = len(stats) 144 | for _, s := range stats { 145 | if s.Status == "NEAR_LIMIT" { 146 | nearLimit++ 147 | } 148 | if s.Status == "OVER_LIMIT" { 149 | overLimit++ 150 | } 151 | if s.IsExcluded { 152 | excluded++ 153 | } 154 | if s.IsDebug { 155 | debug++ 156 | } 157 | } 158 | fmt.Println("📊 ОБЩАЯ СТАТИСТИКА:") 159 | fmt.Printf(" 👥 Всего активных пользователей: %d\n", total) 160 | fmt.Printf(" ⚠️ Близко к лимиту: %d\n", nearLimit) 161 | fmt.Printf(" 🚨 Превышение лимита: %d\n", overLimit) 162 | fmt.Printf(" 🛡️ Исключенных пользователей: %d\n", excluded) 163 | if m.cfg.DebugEmail != "" { 164 | fmt.Printf(" 🐛 Debug пользователей: %d\n", debug) 165 | } 166 | } 167 | 168 | func (m *PoolMonitor) printTopUsers(stats []models.UserIPStats) { 169 | fmt.Println("\n📈 ТОП ПОЛЬЗОВАТЕЛИ ПО КОЛИЧЕСТВУ IP:") 170 | limit := 10 171 | if len(stats) < limit { 172 | limit = len(stats) 173 | } 174 | for i := 0; i < limit; i++ { 175 | user := stats[i] 176 | fmt.Printf(" %2d. %s %s%s\n", i+1, getStatusEmoji(user.Status), user.Email, getMarkers(user)) 177 | fmt.Printf(" IP: %d/%d | TTL: %.1f-%.1fh\n", user.IPCount, user.Limit, user.MinTTLHours, user.MaxTTLHours) 178 | fmt.Printf(" IPs: %s\n", strings.Join(user.IPsWithTTL, ", ")) 179 | } 180 | } 181 | 182 | func (m *PoolMonitor) printOverLimitUsers(stats []models.UserIPStats) { 183 | var overLimitUsers []models.UserIPStats 184 | for _, user := range stats { 185 | if user.Status == "OVER_LIMIT" { 186 | overLimitUsers = append(overLimitUsers, user) 187 | } 188 | } 189 | if len(overLimitUsers) > 0 { 190 | fmt.Println("\n🚨 ПОЛЬЗОВАТЕЛИ С ПРЕВЫШЕНИЕМ ЛИМИТА:") 191 | for _, user := range overLimitUsers { 192 | fmt.Printf(" • %s%s\n", user.Email, getMarkers(user)) 193 | fmt.Printf(" IP: %d/%d | TTL: %.1f-%.1fh\n", user.IPCount, user.Limit, user.MinTTLHours, user.MaxTTLHours) 194 | fmt.Printf(" IPs: %s\n", strings.Join(user.IPsWithTTL, ", ")) 195 | } 196 | } 197 | } 198 | 199 | func (m *PoolMonitor) getUserIPLimit(userEmail string) int { 200 | if m.cfg.DebugEmail != "" && userEmail == m.cfg.DebugEmail { 201 | return m.cfg.DebugIPLimit 202 | } 203 | return m.cfg.MaxIPsPerUser 204 | } 205 | 206 | func getStatusEmoji(status string) string { 207 | switch status { 208 | case "NORMAL": 209 | return "✅" 210 | case "NEAR_LIMIT": 211 | return "⚠️" 212 | case "OVER_LIMIT": 213 | return "🚨" 214 | default: 215 | return "❓" 216 | } 217 | } 218 | 219 | func getMarkers(user models.UserIPStats) string { 220 | var markers []string 221 | if user.IsExcluded { 222 | markers = append(markers, "[EXCLUDED]") 223 | } 224 | if user.HasAlertCooldown { 225 | markers = append(markers, "[ALERT_COOLDOWN]") 226 | } 227 | if user.IsDebug { 228 | markers = append(markers, "[DEBUG]") 229 | } 230 | if len(markers) > 0 { 231 | return " " + strings.Join(markers, " ") 232 | } 233 | return "" 234 | } -------------------------------------------------------------------------------- /observer/internal/services/storage/redis.go: -------------------------------------------------------------------------------- 1 | package storage 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "log" 7 | "observer_service/internal/models" 8 | "os" 9 | "strings" 10 | "time" 11 | 12 | "github.com/redis/go-redis/v9" 13 | ) 14 | 15 | // Скрипт для атомарной очистки всех ключей пользователя. 16 | // Он получает все IP из множества пользователя, формирует список всех связанных ключей 17 | // (само множество и ключи TTL для каждого IP) и удаляет их одной командой DEL. 18 | // Это гарантирует, что никакие новые IP не "просочатся" между чтением и удалением. 19 | // 20 | // KEYS[1]: ключ множества IP пользователя 21 | // ARGV[1]: префикс для ключей TTL 22 | const clearUserIPsScript = ` 23 | local ips = redis.call('SMEMBERS', KEYS[1]) 24 | local keysToDelete = { KEYS[1] } 25 | 26 | for i, ip in ipairs(ips) do 27 | table.insert(keysToDelete, ARGV[1] .. ':' .. ip) 28 | end 29 | 30 | return redis.call('DEL', unpack(keysToDelete)) 31 | ` 32 | 33 | // IPStorage определяет интерфейс для работы с хранилищем IP-адресов. 34 | type IPStorage interface { 35 | CheckAndAddIP(ctx context.Context, email, ip string, limit int, ttl, cooldown time.Duration) (*models.CheckResult, error) 36 | ClearUserIPs(ctx context.Context, email string) (int, error) 37 | GetUserActiveIPs(ctx context.Context, userEmail string) (map[string]int, error) 38 | GetAllUserEmails(ctx context.Context) ([]string, error) 39 | HasAlertCooldown(ctx context.Context, userEmail string) (bool, error) 40 | Ping(ctx context.Context) error 41 | Close() error 42 | } 43 | 44 | // RedisStore реализует IPStorage с использованием Redis. 45 | type RedisStore struct { 46 | client *redis.Client 47 | addCheckScriptSHA string 48 | clearScriptSHA string 49 | } 50 | 51 | // NewRedisStore создает новый экземпляр RedisStore. 52 | func NewRedisStore(ctx context.Context, redisURL, scriptPath string) (*RedisStore, error) { 53 | opt, err := redis.ParseURL(redisURL) 54 | if err != nil { 55 | return nil, fmt.Errorf("ошибка парсинга Redis URL: %w", err) 56 | } 57 | client := redis.NewClient(opt) 58 | 59 | if err := client.Ping(ctx).Err(); err != nil { 60 | return nil, fmt.Errorf("ошибка подключения к Redis: %w", err) 61 | } 62 | 63 | // Загрузка скрипта проверки и добавления IP из файла 64 | addCheckScript, err := os.ReadFile(scriptPath) 65 | if err != nil { 66 | return nil, fmt.Errorf("ошибка чтения Lua-скрипта '%s': %w", scriptPath, err) 67 | } 68 | addCheckScriptSHA, err := client.ScriptLoad(ctx, string(addCheckScript)).Result() 69 | if err != nil { 70 | return nil, fmt.Errorf("ошибка загрузки Lua-скрипта (add/check) в Redis: %w", err) 71 | } 72 | 73 | // Загрузка скрипта атомарной очистки из константы 74 | clearScriptSHA, err := client.ScriptLoad(ctx, clearUserIPsScript).Result() 75 | if err != nil { 76 | return nil, fmt.Errorf("ошибка загрузки Lua-скрипта (clear) в Redis: %w", err) 77 | } 78 | 79 | log.Println("Успешное подключение к Redis и загрузка Lua-скриптов.") 80 | return &RedisStore{ 81 | client: client, 82 | addCheckScriptSHA: addCheckScriptSHA, 83 | clearScriptSHA: clearScriptSHA, 84 | }, nil 85 | } 86 | 87 | // CheckAndAddIP выполняет Lua-скрипт для атомарной проверки и добавления IP. 88 | func (s *RedisStore) CheckAndAddIP(ctx context.Context, email, ip string, limit int, ttl, cooldown time.Duration) (*models.CheckResult, error) { 89 | userIPsSetKey := fmt.Sprintf("user_ips:%s", email) 90 | alertSentKey := fmt.Sprintf("alert_sent:%s", email) 91 | 92 | args := []interface{}{ 93 | ip, 94 | int(ttl.Seconds()), 95 | limit, 96 | int(cooldown.Seconds()), 97 | } 98 | 99 | result, err := s.client.EvalSha(ctx, s.addCheckScriptSHA, []string{userIPsSetKey, alertSentKey}, args...).Result() 100 | if err != nil { 101 | return nil, fmt.Errorf("ошибка выполнения Lua-скрипта для %s: %w", email, err) 102 | } 103 | 104 | resSlice, ok := result.([]interface{}) 105 | if !ok || len(resSlice) < 1 { 106 | return nil, fmt.Errorf("неожиданный результат от Lua-скрипта для %s", email) 107 | } 108 | 109 | statusCode, _ := resSlice[0].(int64) 110 | checkResult := &models.CheckResult{StatusCode: statusCode} 111 | 112 | switch statusCode { 113 | case 0: // OK 114 | checkResult.CurrentIPCount, _ = resSlice[1].(int64) 115 | isNew, _ := resSlice[2].(int64) 116 | checkResult.IsNewIP = isNew == 1 117 | case 1: // Limit exceeded, block 118 | ipInterfaces, _ := resSlice[1].([]interface{}) 119 | for _, ipInt := range ipInterfaces { 120 | if ipStr, ok := ipInt.(string); ok { 121 | checkResult.AllUserIPs = append(checkResult.AllUserIPs, ipStr) 122 | } 123 | } 124 | checkResult.CurrentIPCount = int64(len(checkResult.AllUserIPs)) 125 | case 2: // Limit exceeded, on cooldown 126 | checkResult.CurrentIPCount, _ = resSlice[1].(int64) 127 | } 128 | 129 | return checkResult, nil 130 | } 131 | 132 | // ClearUserIPs атомарно удаляет все ключи, связанные с пользователем, используя Lua-скрипт. 133 | func (s *RedisStore) ClearUserIPs(ctx context.Context, email string) (int, error) { 134 | userIpsKey := fmt.Sprintf("user_ips:%s", email) 135 | ipTtlPrefix := fmt.Sprintf("ip_ttl:%s", email) 136 | 137 | // Выполняем Lua-скрипт, который атомарно получает список IP и удаляет все связанные ключи. 138 | deleted, err := s.client.EvalSha(ctx, s.clearScriptSHA, []string{userIpsKey}, ipTtlPrefix).Int64() 139 | if err != nil { 140 | // Скрипт DEL не должен возвращать redis.Nil, но на всякий случай проверяем. 141 | if err == redis.Nil { 142 | return 0, nil 143 | } 144 | return 0, fmt.Errorf("ошибка выполнения Lua-скрипта (clear) для %s: %w", email, err) 145 | } 146 | 147 | return int(deleted), nil 148 | } 149 | 150 | // GetUserActiveIPs возвращает активные IP пользователя с их TTL. 151 | func (s *RedisStore) GetUserActiveIPs(ctx context.Context, userEmail string) (map[string]int, error) { 152 | userIpsKey := fmt.Sprintf("user_ips:%s", userEmail) 153 | ips, err := s.client.SMembers(ctx, userIpsKey).Result() 154 | if err != nil { 155 | return nil, err 156 | } 157 | if len(ips) == 0 { 158 | return make(map[string]int), nil 159 | } 160 | 161 | activeIPs := make(map[string]int) 162 | pipe := s.client.Pipeline() 163 | ttlResults := make(map[string]*redis.DurationCmd) 164 | 165 | for _, ip := range ips { 166 | ipTtlKey := fmt.Sprintf("ip_ttl:%s:%s", userEmail, ip) 167 | ttlResults[ip] = pipe.TTL(ctx, ipTtlKey) 168 | } 169 | _, err = pipe.Exec(ctx) 170 | if err != nil && err != redis.Nil { 171 | return nil, err 172 | } 173 | 174 | for ip, cmd := range ttlResults { 175 | ttl, err := cmd.Result() 176 | if err != nil || ttl <= 0 { 177 | continue 178 | } 179 | activeIPs[ip] = int(ttl.Seconds()) 180 | } 181 | return activeIPs, nil 182 | } 183 | 184 | // GetAllUserEmails сканирует ключи Redis для получения всех username (email) пользователей. 185 | func (s *RedisStore) GetAllUserEmails(ctx context.Context) ([]string, error) { 186 | var cursor uint64 187 | var emails []string 188 | for { 189 | var keys []string 190 | var err error 191 | keys, cursor, err = s.client.Scan(ctx, cursor, "user_ips:*", 500).Result() 192 | if err != nil { 193 | return nil, fmt.Errorf("ошибка при сканировании ключей (SCAN): %w", err) 194 | } 195 | for _, key := range keys { 196 | parts := strings.SplitN(key, ":", 2) 197 | if len(parts) == 2 { 198 | emails = append(emails, parts[1]) 199 | } 200 | } 201 | if cursor == 0 { 202 | break 203 | } 204 | } 205 | return emails, nil 206 | } 207 | 208 | // HasAlertCooldown проверяет наличие ключа кулдауна для пользователя. 209 | func (s *RedisStore) HasAlertCooldown(ctx context.Context, userEmail string) (bool, error) { 210 | alertCooldownKey := fmt.Sprintf("alert_sent:%s", userEmail) 211 | res, err := s.client.Exists(ctx, alertCooldownKey).Result() 212 | if err != nil { 213 | return false, err 214 | } 215 | return res > 0, nil 216 | } 217 | 218 | // Ping проверяет соединение с Redis. 219 | func (s *RedisStore) Ping(ctx context.Context) error { 220 | return s.client.Ping(ctx).Err() 221 | } 222 | 223 | // Close закрывает соединение с Redis. 224 | func (s *RedisStore) Close() error { 225 | return s.client.Close() 226 | } -------------------------------------------------------------------------------- /observer/internal/processor/processor.go: -------------------------------------------------------------------------------- 1 | package processor 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "log" 7 | "observer_service/internal/config" 8 | "observer_service/internal/models" 9 | "observer_service/internal/services/alerter" 10 | "observer_service/internal/services/publisher" 11 | "observer_service/internal/services/storage" 12 | "sync" 13 | "time" 14 | ) 15 | 16 | // LogProcessor обрабатывает входящие логи. 17 | type LogProcessor struct { 18 | storage storage.IPStorage 19 | publisher publisher.EventPublisher 20 | alerter alerter.Notifier 21 | cfg *config.Config 22 | logChannel chan []models.LogEntry // Канал для получения пачек логов 23 | sideEffectChannel chan func() // Канал для побочных задач (алерты, очистка) 24 | } 25 | 26 | // NewLogProcessor создает новый экземпляр LogProcessor. 27 | func NewLogProcessor(s storage.IPStorage, p publisher.EventPublisher, a alerter.Notifier, cfg *config.Config) *LogProcessor { 28 | return &LogProcessor{ 29 | storage: s, 30 | publisher: p, 31 | alerter: a, 32 | cfg: cfg, 33 | logChannel: make(chan []models.LogEntry, cfg.LogChannelBufferSize), 34 | sideEffectChannel: make(chan func(), cfg.SideEffectChannelBufferSize), 35 | } 36 | } 37 | 38 | // StartWorkerPool запускает пул горутин-воркеров для обработки логов. 39 | func (p *LogProcessor) StartWorkerPool(ctx context.Context, mainWg *sync.WaitGroup) { 40 | defer mainWg.Done() 41 | 42 | var workerWg sync.WaitGroup 43 | log.Printf("Запуск пула воркеров обработки логов в количестве %d...", p.cfg.WorkerPoolSize) 44 | 45 | for i := 0; i < p.cfg.WorkerPoolSize; i++ { 46 | workerWg.Add(1) 47 | go func(workerID int) { 48 | defer workerWg.Done() 49 | log.Printf("Воркер обработки логов %d запущен", workerID) 50 | for entries := range p.logChannel { 51 | p.ProcessEntries(ctx, entries) 52 | } 53 | log.Printf("Воркер обработки логов %d останавливается.", workerID) 54 | }(i + 1) 55 | } 56 | 57 | <-ctx.Done() 58 | log.Println("Получен сигнал остановки для воркеров обработки логов. Закрываю канал...") 59 | close(p.logChannel) 60 | workerWg.Wait() 61 | log.Println("Все воркеры обработки логов успешно остановлены.") 62 | } 63 | 64 | // StartSideEffectWorkerPool запускает пул воркеров для выполнения побочных задач. 65 | func (p *LogProcessor) StartSideEffectWorkerPool(ctx context.Context, mainWg *sync.WaitGroup) { 66 | defer mainWg.Done() 67 | 68 | var workerWg sync.WaitGroup 69 | log.Printf("Запуск пула воркеров побочных задач в количестве %d...", p.cfg.SideEffectWorkerPoolSize) 70 | 71 | for i := 0; i < p.cfg.SideEffectWorkerPoolSize; i++ { 72 | workerWg.Add(1) 73 | go func(workerID int) { 74 | defer workerWg.Done() 75 | log.Printf("Воркер побочных задач %d запущен", workerID) 76 | for task := range p.sideEffectChannel { 77 | // Проверяем, не был ли контекст отменен перед выполнением задачи 78 | select { 79 | case <-ctx.Done(): 80 | log.Printf("Воркер побочных задач %d пропустил задачу из-за отмены контекста.", workerID) 81 | default: 82 | task() 83 | } 84 | } 85 | log.Printf("Воркер побочных задач %d останавливается.", workerID) 86 | }(i + 1) 87 | } 88 | 89 | <-ctx.Done() 90 | log.Println("Получен сигнал остановки для воркеров побочных задач. Закрываю канал...") 91 | close(p.sideEffectChannel) 92 | workerWg.Wait() 93 | log.Println("Все воркеры побочных задач успешно остановлены.") 94 | } 95 | 96 | // EnqueueEntries добавляет пачку логов в очередь на обработку. 97 | func (p *LogProcessor) EnqueueEntries(entries []models.LogEntry) error { 98 | defer func() { 99 | if r := recover(); r != nil { 100 | log.Println("Попытка записи в закрытый канал логов. Сервис находится в процессе остановки.") 101 | } 102 | }() 103 | 104 | select { 105 | case p.logChannel <- entries: 106 | return nil 107 | default: 108 | return errors.New("log channel is full, rejecting new entries") 109 | } 110 | } 111 | 112 | // enqueueSideEffectTask добавляет побочную задачу в очередь на выполнение. 113 | func (p *LogProcessor) enqueueSideEffectTask(task func()) { 114 | defer func() { 115 | if r := recover(); r != nil { 116 | log.Println("Попытка записи в закрытый канал побочных задач. Сервис находится в процессе остановки.") 117 | } 118 | }() 119 | 120 | select { 121 | case p.sideEffectChannel <- task: 122 | // Задача успешно добавлена в очередь 123 | default: 124 | log.Println("Warning: очередь побочных задач заполнена. Задача отброшена.") 125 | } 126 | } 127 | 128 | // ProcessEntries обрабатывает пачку записей логов. 129 | func (p *LogProcessor) ProcessEntries(ctx context.Context, entries []models.LogEntry) { 130 | for _, entry := range entries { 131 | select { 132 | case <-ctx.Done(): 133 | log.Printf("Обработка пачки прервана из-за отмены контекста: %v", ctx.Err()) 134 | return 135 | default: 136 | p.processSingleEntry(ctx, entry) 137 | } 138 | } 139 | } 140 | 141 | func (p *LogProcessor) processSingleEntry(ctx context.Context, entry models.LogEntry) { 142 | if p.cfg.ExcludedUsers[entry.UserEmail] { 143 | return // Пользователь в списке исключений 144 | } 145 | 146 | userIPLimit := p.getUserIPLimit(entry.UserEmail) 147 | debugMarker := p.getDebugMarker(entry.UserEmail) 148 | 149 | res, err := p.storage.CheckAndAddIP(ctx, entry.UserEmail, entry.SourceIP, userIPLimit, p.cfg.UserIPTTL, p.cfg.AlertCooldown) 150 | if err != nil { 151 | // Проверяем, не была ли ошибка вызвана отменой контекста 152 | if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { 153 | log.Printf("Операция CheckAndAddIP отменена для %s: %v", entry.UserEmail, err) 154 | } else { 155 | log.Printf("Ошибка обработки записи для %s: %v", entry.UserEmail, err) 156 | } 157 | return 158 | } 159 | 160 | if res.StatusCode == 0 && res.IsNewIP { 161 | log.Printf("Новый IP для пользователя %s%s: %s. Всего IP: %d/%d", 162 | entry.UserEmail, debugMarker, entry.SourceIP, res.CurrentIPCount, userIPLimit) 163 | } 164 | 165 | if res.StatusCode == 1 { // Лимит превышен, нужна блокировка 166 | log.Printf("ПРЕВЫШЕНИЕ ЛИМИТА%s: Пользователь %s, IP-адресов: %d/%d", 167 | debugMarker, entry.UserEmail, res.CurrentIPCount, userIPLimit) 168 | 169 | ipsToBlock := p.filterExcludedIPs(res.AllUserIPs, entry.UserEmail) 170 | 171 | if len(ipsToBlock) > 0 { 172 | if err := p.publisher.PublishBlockMessage(ipsToBlock, p.cfg.BlockDuration); err != nil { 173 | log.Printf("Ошибка отправки сообщения о блокировке: %v", err) 174 | } else { 175 | log.Printf("Сообщение о блокировке %d IP-адресов для %s%s отправлено", len(ipsToBlock), entry.UserEmail, debugMarker) 176 | p.enqueueSideEffectTask(func() { 177 | p.scheduleIPsClear(ctx, entry.UserEmail) 178 | }) 179 | } 180 | } 181 | 182 | alertPayload := models.AlertPayload{ 183 | UserIdentifier: entry.UserEmail, 184 | DetectedIPsCount: int(res.CurrentIPCount), 185 | Limit: userIPLimit, 186 | AllUserIPs: res.AllUserIPs, 187 | BlockDuration: p.cfg.BlockDuration, 188 | ViolationType: "ip_limit_exceeded", 189 | } 190 | p.enqueueSideEffectTask(func() { 191 | if err := p.alerter.SendAlert(alertPayload); err != nil { 192 | log.Printf("Ошибка отправки вебхук-уведомления: %v", err) 193 | } 194 | }) 195 | } 196 | } 197 | 198 | func (p *LogProcessor) getUserIPLimit(userEmail string) int { 199 | if p.cfg.DebugEmail != "" && userEmail == p.cfg.DebugEmail { 200 | return p.cfg.DebugIPLimit 201 | } 202 | return p.cfg.MaxIPsPerUser 203 | } 204 | 205 | func (p *LogProcessor) getDebugMarker(userEmail string) string { 206 | if p.cfg.DebugEmail != "" && userEmail == p.cfg.DebugEmail { 207 | return " [DEBUG]" 208 | } 209 | return "" 210 | } 211 | 212 | func (p *LogProcessor) filterExcludedIPs(ips []string, email string) []string { 213 | var filtered []string 214 | for _, ip := range ips { 215 | if p.cfg.ExcludedIPs[ip] { 216 | log.Printf("IP-адрес %s для пользователя %s пропущен, так как находится в списке исключений.", ip, email) 217 | continue 218 | } 219 | filtered = append(filtered, ip) 220 | } 221 | return filtered 222 | } 223 | 224 | func (p *LogProcessor) scheduleIPsClear(ctx context.Context, userEmail string) { 225 | log.Printf("Планирование отложенной очистки IP для %s через %v.", userEmail, p.cfg.ClearIPsDelay) 226 | 227 | time.AfterFunc(p.cfg.ClearIPsDelay, func() { 228 | if ctx.Err() != nil { 229 | log.Printf("Отложенная очистка IP для %s отменена из-за остановки сервиса.", userEmail) 230 | return 231 | } 232 | 233 | opCtx, cancel := context.WithTimeout(ctx, 10*time.Second) 234 | defer cancel() 235 | 236 | cleared, err := p.storage.ClearUserIPs(opCtx, userEmail) 237 | if err != nil { 238 | if errors.Is(err, context.Canceled) { 239 | log.Printf("Отложенная очистка IP для %s отменена из-за остановки сервиса во время выполнения.", userEmail) 240 | } else if errors.Is(err, context.DeadlineExceeded) { 241 | log.Printf("Таймаут при отложенной очистке IP для %s.", userEmail) 242 | } else { 243 | log.Printf("Ошибка при отложенной очистке IP для %s: %v", userEmail, err) 244 | } 245 | return 246 | } 247 | log.Printf("Отложенная очистка IP для %s%s выполнена. Очищено ключей: %d", 248 | userEmail, p.getDebugMarker(userEmail), cleared) 249 | }) 250 | } -------------------------------------------------------------------------------- /observer/go.sum: -------------------------------------------------------------------------------- 1 | github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= 2 | github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= 3 | github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= 4 | github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= 5 | github.com/bytedance/sonic v1.11.6 h1:oUp34TzMlL+OY1OUWxHqsdkgC/Zfc85zGqw9siXjrc0= 6 | github.com/bytedance/sonic v1.11.6/go.mod h1:LysEHSvpvDySVdC2f87zGWf6CIKJcAvqab1ZaiQtds4= 7 | github.com/bytedance/sonic/loader v0.1.1 h1:c+e5Pt1k/cy5wMveRDyk2X4B9hF4g7an8N3zCYjJFNM= 8 | github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU= 9 | github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= 10 | github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= 11 | github.com/cloudwego/base64x v0.1.4 h1:jwCgWpFanWmN8xoIUHa2rtzmkd5J2plF/dnLS6Xd/0Y= 12 | github.com/cloudwego/base64x v0.1.4/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w= 13 | github.com/cloudwego/iasm v0.2.0 h1:1KNIy1I1H9hNNFEEH3DVnI4UujN+1zjpuk6gwHLTssg= 14 | github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY= 15 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 16 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 17 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 18 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= 19 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= 20 | github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0= 21 | github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk= 22 | github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= 23 | github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= 24 | github.com/gin-gonic/gin v1.10.1 h1:T0ujvqyCSqRopADpgPgiTT63DUQVSfojyME59Ei63pQ= 25 | github.com/gin-gonic/gin v1.10.1/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y= 26 | github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= 27 | github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= 28 | github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= 29 | github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= 30 | github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= 31 | github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= 32 | github.com/go-playground/validator/v10 v10.20.0 h1:K9ISHbSaI0lyB2eWMPJo+kOS/FBExVwjEviJTixqxL8= 33 | github.com/go-playground/validator/v10 v10.20.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= 34 | github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= 35 | github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= 36 | github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= 37 | github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 38 | github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= 39 | github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= 40 | github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= 41 | github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= 42 | github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM= 43 | github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= 44 | github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M= 45 | github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= 46 | github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= 47 | github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= 48 | github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= 49 | github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= 50 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= 51 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= 52 | github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= 53 | github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= 54 | github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= 55 | github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= 56 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 57 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 58 | github.com/rabbitmq/amqp091-go v1.10.0 h1:STpn5XsHlHGcecLmMFCtg7mqq0RnD+zFr4uzukfVhBw= 59 | github.com/rabbitmq/amqp091-go v1.10.0/go.mod h1:Hy4jKW5kQART1u+JkDTF9YYOQUHXqMuhrgxOEeS7G4o= 60 | github.com/redis/go-redis/v9 v9.11.0 h1:E3S08Gl/nJNn5vkxd2i78wZxWAPNZgUNTp8WIJUAiIs= 61 | github.com/redis/go-redis/v9 v9.11.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw= 62 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 63 | github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= 64 | github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= 65 | github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= 66 | github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= 67 | github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 68 | github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 69 | github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= 70 | github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= 71 | github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= 72 | github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= 73 | github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= 74 | github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= 75 | github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= 76 | github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE= 77 | github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= 78 | go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= 79 | go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= 80 | golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= 81 | golang.org/x/arch v0.8.0 h1:3wRIsP3pM4yUptoR96otTUOXI367OS0+c9eeRi9doIc= 82 | golang.org/x/arch v0.8.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= 83 | golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= 84 | golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= 85 | golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= 86 | golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= 87 | golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 88 | golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 89 | golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= 90 | golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= 91 | golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= 92 | golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= 93 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= 94 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 95 | google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= 96 | google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= 97 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= 98 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 99 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 100 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 101 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 102 | nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50= 103 | rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= 104 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Remnawave Observer: Модуль для борьбы с совместным использованием подписок 2 | 3 | ![Go](https://img.shields.io/badge/Go-1.24-00ADD8?style=for-the-badge&logo=go) 4 | ![Docker](https://img.shields.io/badge/Docker-28.0-2496ED?style=for-the-badge&logo=docker) 5 | ![RabbitMQ](https://img.shields.io/badge/RabbitMQ-4.1-FF6600?style=for-the-badge&logo=rabbitmq) 6 | ![Redis](https://img.shields.io/badge/Redis-8.2-DC382D?style=for-the-badge&logo=redis) 7 | ![nftables](https://img.shields.io/badge/nftables-1.0.9-orange?style=for-the-badge) 8 | 9 | > [!IMPORTANT] 10 | > **AI-Generated Project / Proof of Concept** 11 | > 12 | > Этот проект является **Proof of Concept (PoC)** и был полностью разработан с использованием искусственного интеллекта. 13 | > 14 | > Весь исходный код, архитектурные решения и документация были сгенерированы моделью **LLM Gemini 2.5 Pro** (`Gemini 2.5 Pro Preview 06-05`). 15 | > Роль автора заключалась в проектировании архитектуры, системном дизайне (DevOps) и промпт-инжиниринге. Ручное написание кода на Go не производилось. 16 | > Используйте в продакшене с осторожностью и предварительным тестированием. 17 | 18 | ## Краткое описание 19 | 20 | **Remnawave Observer** — это внешний модуль для расширения функционала панели управления Remnawave. Его основная задача — автоматически обнаруживать и блокировать пользователей, которые делятся своей подпиской с другими людьми, передавая им доступ. Система отслеживает, с какого количества уникальных IP-адресов подключается каждый пользователь, и при превышении установленного лимита временно блокирует доступ для всех IP-адресов этого пользователя. 21 | 22 | Это помогает защитить ваш сервис от несанкционированного использования и потери дохода. 23 | 24 | ## Проблема 25 | 26 | Когда пользователь покупает подписку на одного человека, а затем делится доступом с друзьями, семьей или продает его третьим лицам, сервис теряет потенциальных клиентов и, соответственно, доход. Ручное отслеживание таких нарушителей практически невозможно и требует огромных временных затрат. 27 | 28 | ## Решение 29 | 30 | Remnawave Observer автоматизирует этот процесс. Он работает в фоновом режиме, анализируя подключения пользователей к вашим серверам (нодам) и принимая меры в реальном времени. 31 | 32 | ## Как это работает? 33 | 34 | Система состоит из двух основных частей: центрального **сервера-наблюдателя (Observer)** и **агентов-блокировщиков (Blocker)**, установленных на каждой вашей ноде (сервере с Xray). 35 | 36 | Вот пошаговая схема работы: 37 | 38 | 1. **Сбор данных**: Пользователь подключается к одной из ваших `remnanode` (серверу с Xray). Xray записывает в лог-файл информацию о подключении: время, IP-адрес и `email` пользователя. 39 | 2. **Парсинг логов**: На каждой ноде работает легкий сервис `Vector`. Он непрерывно читает лог-файл Xray, извлекает из него только нужную информацию (`email` и `IP-адрес`) и отправляет эти данные на центральный сервер-наблюдатель. 40 | 3. **Анализ и подсчет**: Центральный сервер `Observer` получает данные от всех нод. Он использует базу данных `Redis` для ведения учета: для каждого пользователя (`email`) он хранит список уникальных IP-адресов, с которых были подключения. Система умная: даже если у вас 10 нод и пользователь переключается между ними, `Observer` не создаст дубликатов и будет вести единый счетчик IP для каждого пользователя. 41 | 4. **Принятие решения**: `Observer` постоянно сравнивает количество IP-адресов пользователя с установленным лимитом (например, 12 IP). 42 | 5. **Команда на блокировку**: Как только лимит превышен, `Observer` отправляет команду на блокировку через брокер сообщений `RabbitMQ`. Эта команда содержит список всех IP-адресов нарушителя. 43 | 6. **Исполнение блокировки**: Агент `Blocker` на *каждой* вашей ноде получает эту команду. Он немедленно добавляет все IP-адреса из команды в специальный "черный список" в `nftables` (современный файрвол в Linux). 44 | 7. **Результат**: `nftables` на всех нодах начинает блокировать любой трафик от этих IP-адресов на определенное время. Пользователь и те, с кем он поделился доступом, теряют подключение. 45 | 46 | ### Визуальная схема архитектуры 47 | 48 | ``` 49 | [ Пользователь 1 (IP: A) ] ----> [ Remnanode 1 ] 50 | | 51 | [ Пользователь 1 (IP: B) ] ----> [ Remnanode 2 ] 52 | | 53 | [ Пользователь 1 (IP: C) ] ----> [ Remnanode 3 ] 54 | | 55 | V 56 | +-----------------------------------------------------------------------------+ 57 | | На каждой Remnanode: | 58 | | 1. Xray (пишет в access.log) | 59 | | 2. Vector (парсит лог -> {email, ip}) ----> [ Nginx на сервере Observer ] | 60 | | 3. Blocker (слушает команды от RabbitMQ) | 61 | | 4. nftables (файрвол с черным списком `user_blacklist`) | 62 | +-----------------------------------------------------------------------------+ 63 | | 64 | V 65 | +-------------------------------------------------------------------------+ 66 | | Центральный сервер Observer: | 67 | | 1. Nginx (принимает данные от Vector'ов) | 68 | | 2. Vector Aggregator (передает данные в Observer Service) | 69 | | 3. Observer Service (главная логика) | 70 | | |-> [ Redis ] (хранит: user -> {IP1, IP2, ...}) | 71 | | | | 72 | | +-- (Если лимит превышен) --> [ RabbitMQ ] (отправляет команду) | 73 | | | | 74 | | 4. RabbitMQ (брокер сообщений) -------+--> (Команда всем Blocker'ам) | 75 | +-------------------------------------------------------------------------+ 76 | ``` 77 | 78 | ## Системные требования 79 | 80 | ### 1. Сервер для Observer (панель) 81 | 82 | * **ОС**: Debian 12 83 | * **RAM**: 2 GB 84 | * **CPU**: 1 vCPU 85 | 86 | ### 2. Сервер для Remnanode (каждая нода с Xray) 87 | 88 | * **ОС**: Debian 12 89 | * **RAM**: 1 GB 90 | * **CPU**: 1 vCPU 91 | * **Зависимости**: Установленный и включенный пакет `nftables`. 92 | 93 | ## Установка и настройка 94 | 95 | ### Шаг 1: Настройка сервера Observer (центральная панель) 96 | 97 | 1. Подключитесь к серверу, где будет работать Observer. 98 | 2. Установите `docker` и `docker-compose`. 99 | 3. Скопируйте папку `observer_conf` на сервер (например, через `scp` или `git clone`). 100 | ```bash 101 | # Пример, если вы клонировали весь репозиторий: 102 | cd *repo*/observer_conf 103 | ``` 104 | 4. Создайте и отредактируйте файл с переменными окружения: 105 | ```bash 106 | cp .env.example .env 107 | vim .env 108 | ``` 109 | Заполните переменные: 110 | * `MAX_IPS_PER_USER`: Рекомендуемое значение **12** или выше. Это позволяет пользователям без проблем переключаться между домашним Wi-Fi, мобильным интернетом (LTE), рабочим Wi-Fi и т.д. 111 | * `ALERT_WEBHOOK_URL`: URL для отправки уведомлений (например, в Telegram shop bot). 112 | * `EXCLUDED_USERS`: Username пользователей через запятую, которых не нужно проверять (например, `self,13_12143423`). 113 | * `EXCLUDED_IPS`: **ВАЖНО!** IP-адреса, которые никогда не будут заблокированы. Укажите здесь IP-адреса всех ваших нод и сервера Observer, чтобы избежать случайной блокировки (например, `8.8.8.8,1.1.1.1,192.168.1.1`). 114 | * `BLOCK_DURATION`: Укажите время блокировки для пользователя в минутах, (поумолчанию 5 минут), (например: `1m`) 115 | * `USER_IP_TTL_SECONDS`: Укажите TTL, Как долго будет жить "отпечаток" ип адреса юзера, до того как будет удалён из редис, (Рекомендую `86400`, т.е 1 час) 116 | * `CLEAR_IPS_DELAY_SECONDS`: Таймаут хранения отпечатка ип адреса в редис, после применения блокировки юзера (Рекомендую `30` секунд) 117 | * `RABBIT_USER`, `RABBIT_PASSWD`: Создайте надежные логин и пароль для RabbitMQ. 118 | * `RABBITMQ_URL`: Сформируйте URL для **внутреннего** использования сервисом Observer. Пример: `amqp://myuser:mypassword@rabbitmq:5672/`. 119 | 120 | 5. Настройте Nginx. Вам понадобится домен и SSL-сертификат (например, от Let's Encrypt). 121 | ```bash 122 | vim nginx.conf 123 | ``` 124 | Замените все вхождения `HEAD_DOMAIN` на ваш реальный домен. Убедитесь, что SSL-сертификаты (`fullchain.pem` и `privkey.pem`) находятся по указанным путям `/etc/letsencrypt/live/HEAD_DOMAIN/`. 125 | 126 | 6. **Важно!** Для подключения удаленных нод к RabbitMQ необходимо использовать безопасное соединение. Убедитесь, что порт `5671` (стандартный для AMQPS) вашего сервера Observer доступен извне. Вам может потребоваться дополнительная настройка прокси или самого RabbitMQ для работы с SSL. 127 | 128 | 7. Запустите все сервисы: 129 | ```bash 130 | docker-compose up -d 131 | ``` 132 | 133 | ### Шаг 2: Настройка каждой Remnanode (ноды с Xray) 134 | 135 | На *каждом* сервере, где работает Xray, выполните следующие действия. 136 | 137 | #### 2.1. Настройка файрвола `nftables` 138 | 139 | Это **критически важный** шаг. `Blocker` жестко запрограммирован на работу с конкретным набором правил в `nftables`. 140 | 141 | **!! Убедитесь что у вас отключены другие фаерволы по типу ufw или iptables !!** 142 | 143 | 1. Установите `nftables`: 144 | ```bash 145 | apt update && apt install nftables -y 146 | ``` 147 | 148 | 2. Создайте и откройте файл конфигурации: 149 | ```bash 150 | vim /etc/nftables.conf 151 | ``` 152 | 153 | 3. Скопируйте в него всё содержимое из файла `nftables_example.conf`, который находится в корне репозитория. 154 | 155 | 4. **Отредактируйте файл `/etc/nftables.conf`**: 156 | * Найдите строку `define SSH_PORT = 22` и **обязательно** измените порт, если вы используете нестандартный SSH-порт для подключения к ноде. Например, если ваш SSH порт 6666, измените на `define SSH_PORT = 6666`. 157 | * Найдите секцию `set control_plane_sources` и в `elements = { IP_ADRESS }` впишите IP-адрес вашего сервера **Observer** и панели управления **Remnawave**. 158 | * Найдите секцию `set monitoring_sources` и в `elements = { IP_ADRESS }` впишите IP-адрес вашего сервера мониторинга, если он есть. 159 | 160 | 5. Примените правила и добавьте `nftables` в автозагрузку: 161 | ```bash 162 | # Применяем правила из файла 163 | nft -f /etc/nftables.conf 164 | 165 | # Включаем сервис и добавляем в автозагрузку, чтобы правила применялись после перезагрузки 166 | systemctl enable --now nftables 167 | ``` 168 | **Внимание!** `Blocker` будет добавлять IP-адреса в набор `set user_blacklist` в таблице `table inet firewall`. Не изменяйте эти имена, иначе блокировка работать не будет. 169 | 170 | #### 2.2. Интеграция Blocker и Vector в Remnanode 171 | 172 | Сервисы `blocker-xray` и `vector` должны быть добавлены в ваш существующий `docker-compose.yml` файл, который управляет `remnanode` (обычно находится в `/opt/remnanode/`). 173 | 174 | 1. Перейдите в рабочую директорию вашей ноды: 175 | ```bash 176 | cd /opt/remnanode/ 177 | ``` 178 | 179 | 2. Создайте файл `.env` для хранения учетных данных RabbitMQ: 180 | ```bash 181 | vim .env 182 | ``` 183 | Добавьте в него следующую строку, заменив значения на ваши: 184 | ``` 185 | # Используйте протокол amqps для безопасного соединения 186 | RABBITMQ_URL=amqps://ВАШ_RABBIT_USER:ВАШ_RABBIT_PASSWD@ВАШ_ДОМЕН_OBSERVER:5671/ 187 | ``` 188 | 189 | 3. Скопируйте конфигурацию для Vector в текущую директорию. Предполагается, что вы скачали репозиторий. 190 | ```bash 191 | # Скопируйте файл из скачанного репозитория в текущую папку 192 | cp /path/to/remnawave-observer-main/blocker_conf/vector/vector.toml ./vector.toml 193 | ``` 194 | 195 | 4. Отредактируйте скопированный файл `vector.toml`: 196 | ```bash 197 | vim vector.toml 198 | ``` 199 | Найдите строку `uri = "https://HEAD_DOMAIN:38213/"` и замените `HEAD_DOMAIN` на домен вашего сервера Observer. 200 | 201 | 5. Откройте ваш основной файл `docker-compose.yml` (например, `/opt/remnanode/docker-compose.yml`). 202 | 203 | 6. Добавьте сервисы `blocker-xray` и `vector` в конец этого файла. Убедитесь, что отступы соответствуют синтаксису YAML. 204 | 205 | ```yaml 206 | # ... ваш сервис remnanode и другие сервисы ... 207 | 208 | blocker-xray: 209 | container_name: blocker-xray 210 | hostname: blocker-xray 211 | image: quay.io/0fl01/blocker-xray-go:0.0.6 212 | restart: unless-stopped 213 | network_mode: host 214 | logging: 215 | driver: "json-file" 216 | options: 217 | max-size: "8m" 218 | max-file: "5" 219 | env_file: 220 | - .env 221 | cap_add: 222 | - NET_ADMIN 223 | - NET_RAW 224 | depends_on: 225 | - remnanode 226 | deploy: 227 | resources: 228 | limits: 229 | memory: 64M 230 | cpus: '0.25' 231 | reservations: 232 | memory: 32M 233 | cpus: '0.10' 234 | 235 | vector: 236 | image: timberio/vector:0.48.0-alpine 237 | container_name: vector 238 | hostname: vector 239 | restart: unless-stopped 240 | network_mode: host 241 | command: ["--config", "/etc/vector/vector.toml"] 242 | depends_on: 243 | - remnanode 244 | volumes: 245 | # Путь к файлу vector.toml, который вы создали на шаге 4 246 | - ./vector.toml:/etc/vector/vector.toml:ro 247 | # Путь к логам remnanode, должен совпадать с тем, что в сервисе remnanode 248 | - /var/log/remnanode:/var/log/remnanode:ro 249 | logging: 250 | driver: "json-file" 251 | options: 252 | max-size: "8m" 253 | max-file: "3" 254 | deploy: 255 | resources: 256 | limits: 257 | memory: 128M 258 | cpus: '0.25' 259 | reservations: 260 | memory: 64M 261 | cpus: '0.10' 262 | ``` 263 | 264 | 7. Убедитесь, что ваш сервис `remnanode` в этом же `docker-compose.yml` имеет volume для логов, как в примере: `volumes: - /var/log/remnanode:/var/log/remnanode`. 265 | 266 | 8. Перезапустите все сервисы, чтобы применить изменения: 267 | ```bash 268 | docker-compose up -d 269 | ``` 270 | 271 | Повторите **Шаг 2** для всех ваших нод. 272 | 273 | ## Совместимость 274 | 275 | * **Debian 12**: Это основная и полностью поддерживаемая операционная система. Вся разработка и тестирование велись именно на ней. 276 | * **Ubuntu 24.04**: Теоретически, система должна работать, так как она использует Docker и стандартные утилиты Linux (такие как `nftables`). Однако полная совместимость не гарантируется, и могут возникнуть непредвиденные проблемы или различия в поведении. **Настоятельно рекомендуется использовать Debian 12 для стабильной и предсказуемой работы.** 277 | 278 | 279 | --- 280 | 281 | 282 | ### Дополнительные возможности: Уведомления в Telegram через Webhook 283 | 284 | Remnawave Observer не только блокирует нарушителей, но и может информировать вас об этих событиях в реальном времени через вебхуки. Это особенно полезно для интеграции с вашим Telegram-ботом, который, например, управляет продажей подписок. 285 | 286 | #### Как это работает? 287 | 288 | 1. Когда `Observer` обнаруживает, что пользователь превысил лимит IP-адресов, он немедленно отправляет `POST`-запрос на URL, который вы указали в конфигурации. 289 | 2. Этот запрос содержит JSON-тело со всей информацией о нарушении. 290 | 291 | **Структура JSON-уведомления (`AlertPayload`):** 292 | 293 | ```json 294 | { 295 | "user_identifier": "54_217217281", 296 | "detected_ips_count": 13, 297 | "limit": 12, 298 | "all_user_ips": [ 299 | "1.1.1.1", 300 | "2.2.2.2", 301 | "3.3.3.3" 302 | ], 303 | "block_duration": "5m", 304 | "violation_type": "ip_limit_exceeded" 305 | } 306 | ``` 307 | 308 | * `user_identifier`: Идентификатор пользователя (его username). 309 | * `detected_ips_count`: Общее количество уникальных IP, обнаруженных у пользователя. 310 | * `limit`: Установленный лимит IP для этого пользователя. 311 | * `all_user_ips`: Список всех IP-адресов, которые были заблокированы. 312 | * `block_duration`: На какой срок была применена блокировка. 313 | * `violation_type`: Тип нарушения. 314 | 315 | #### Как подключить к вашему Telegram-боту? 316 | 317 | Предположим, у вас есть бот для продажи подписок. Вы можете научить его принимать эти вебхуки и реагировать на них. 318 | 319 | **Шаг 1: Создайте эндпоинт (endpoint) в вашем боте** 320 | 321 | Ваш бот должен "слушать" входящие POST-запросы по определенному адресу. Например, `https://bot.yourdomain.com/webhook/alert`. Этот адрес вы и укажете в `ALERT_WEBHOOK_URL`. 322 | 323 | * В коде вашего бота (на Python, Go, Node.js и т.д.) создайте обработчик для этого маршрута, который будет принимать `POST`-запросы. 324 | 325 | **Шаг 2: Настройте Observer** 326 | 327 | В файле `observer_conf/.env` на вашем сервере Observer укажите URL из предыдущего шага: 328 | ``` 329 | ALERT_WEBHOOK_URL=https://bot.yourdomain.com/webhook/alert 330 | ``` 331 | **Совет по безопасности:** Чтобы никто другой не мог отправлять фейковые уведомления, используйте секретный ключ в URL: 332 | `ALERT_WEBHOOK_URL=https://bot.yourdomain.com/webhook/alert/a1b2c3d4-e5f6-7890-g1h2-i3j4k5l6m7n8` 333 | 334 | **Шаг 3: Обработайте данные в боте** 335 | 336 | Когда бот получает вебхук, его код должен: 337 | 1. Прочитать и распарсить JSON-тело запроса. 338 | 2. Извлечь из него нужные данные (`user_identifier`, `detected_ips_count` и т.д.). 339 | 3. Сформировать сообщение для администратора и отправить его в нужный чат. 340 | 341 | **Пример сообщения, которое может сформировать ваш бот:** 342 | 343 | > 🚨 **Обнаружено нарушение!** 344 | > 345 | > **Пользователь:** `15_327832732` 346 | > **Превышен лимит IP-адресов:** **13 / 12** 347 | > 348 | > Все IP-адреса пользователя заблокированы на **5m**. 349 | 350 | **Шаг 4: (Опционально) Интеграция с логикой бота** 351 | 352 | Используя `user_identifier` (email), вы можете связать это событие с базой данных пользователей вашего бота и выполнить дополнительные действия: 353 | * Пометить пользователя в вашей системе как "нарушителя". 354 | * Отправить предупреждение самому пользователю через бота. 355 | --- 356 | 357 | ### Детальная настройка и принцип работы связки Nginx + Vector 358 | 359 | Чтобы система работала надежно и безопасно, данные от каждой `remnanode` к центральному серверу `Observer` должны передаваться по зашифрованному каналу. Для этого мы используем связку из Nginx (в качестве реверс-прокси с SSL) и Vector. 360 | 361 | **Общая схема потока данных:** 362 | 363 | `Vector (на Remnanode)` -> `Интернет (HTTPS)` -> `Nginx (на Observer)` -> `Vector-агрегатор (на Observer)` -> `Observer Service` 364 | 365 | #### На сервере Observer 366 | 367 | На центральном сервере Nginx выполняет две ключевые функции: 368 | 1. **Терминирование SSL**: Принимает зашифрованный трафик от нод, расшифровывает его с помощью вашего SSL-сертификата. 369 | 2. **Проксирование**: Передает уже расшифрованный, "чистый" HTTP-трафик на сервис `vector-aggregator`, который работает внутри Docker-сети и не доступен извне напрямую. 370 | 371 | ##### 1. Конфигурация docker compose (`observer_conf/docker-compose.yml`) 372 | 373 | ```yml 374 | services: 375 | observer-remna: 376 | container_name: observer-remna 377 | image: quay.io/0fl01/observer-xray-go:0.0.19 378 | restart: unless-stopped 379 | expose: 380 | - "9000" 381 | env_file: 382 | - .env 383 | depends_on: 384 | rabbitmq-obs: 385 | condition: service_healthy 386 | vector-aggregator: 387 | condition: service_started 388 | redis-obs: 389 | condition: service_started 390 | networks: 391 | - observer-net 392 | - remnawave-network 393 | logging: 394 | driver: json-file 395 | options: 396 | max-size: "8m" 397 | max-file: "3" 398 | 399 | rabbitmq-obs: 400 | image: rabbitmq:4.1.2-alpine 401 | container_name: rabbitmq-obs 402 | restart: unless-stopped 403 | expose: 404 | - "5672" 405 | - "15672" 406 | volumes: 407 | - rabbitmq-obs-data:/var/lib/rabbitmq 408 | environment: 409 | - RABBITMQ_DEFAULT_USER=chumba 410 | - RABBITMQ_DEFAULT_PASS=${RABBIT_PASSWD} 411 | networks: 412 | - observer-net 413 | healthcheck: 414 | test: ["CMD", "rabbitmq-diagnostics", "ping"] 415 | interval: 10s 416 | timeout: 5s 417 | retries: 5 418 | start_period: 20s 419 | 420 | vector-aggregator: 421 | image: timberio/vector:0.48.0-alpine 422 | container_name: vector-aggregator 423 | restart: unless-stopped 424 | volumes: 425 | - ./vector.toml:/etc/vector/vector.toml:ro 426 | expose: 427 | - "8686" 428 | command: ["--config", "/etc/vector/vector.toml"] 429 | networks: 430 | - observer-net 431 | logging: 432 | driver: json-file 433 | options: 434 | max-size: "8m" 435 | max-file: "3" 436 | 437 | nginx-obs: 438 | image: nginx:mainline-alpine 439 | container_name: nginx-obs 440 | restart: unless-stopped 441 | ports: 442 | - "38213:38213" # Vector HTTPS 443 | - "38214:38214" # RabbitMQ AMQP SSL 444 | volumes: 445 | - ./nginx.conf:/etc/nginx/nginx.conf:ro 446 | - /etc/letsencrypt:/etc/letsencrypt:ro 447 | depends_on: 448 | vector-aggregator: 449 | condition: service_started 450 | rabbitmq-obs: 451 | condition: service_healthy 452 | networks: 453 | - observer-net 454 | logging: 455 | driver: json-file 456 | options: 457 | max-size: "8m" 458 | max-file: "3" 459 | 460 | redis-obs: 461 | image: redis:8.2-m01-alpine3.22 462 | container_name: redis-obs 463 | restart: unless-stopped 464 | expose: 465 | - "6379" 466 | volumes: 467 | - redis-obs-data:/data 468 | networks: 469 | - observer-net 470 | logging: 471 | driver: json-file 472 | options: 473 | max-size: "8m" 474 | max-file: "3" 475 | 476 | networks: 477 | observer-net: 478 | driver: bridge 479 | remnawave-network: 480 | external: true 481 | 482 | volumes: 483 | redis-obs-data: 484 | rabbitmq-obs-data: 485 | ``` 486 | 487 | ##### 2. Конфигурация Nginx (`observer_conf/nginx.conf`) 488 | 489 | ```nginx 490 | user nginx; 491 | worker_processes auto; 492 | pid /var/run/nginx.pid; 493 | include /etc/nginx/modules-enabled/*.conf; 494 | 495 | events { 496 | worker_connections 1024; 497 | } 498 | 499 | # Stream блок для AMQP с SSL 500 | stream { 501 | # AMQP порт с SSL терминацией 502 | server { 503 | listen 38214 ssl; 504 | 505 | ssl_certificate /etc/letsencrypt/live/HEAD_DOMAIN/fullchain.pem; 506 | ssl_certificate_key /etc/letsencrypt/live/HEAD_DOMAIN/privkey.pem; 507 | ssl_protocols TLSv1.2 TLSv1.3; 508 | ssl_ciphers 'TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384'; 509 | ssl_prefer_server_ciphers off; 510 | ssl_session_cache shared:SSL_STREAM:10m; 511 | ssl_session_timeout 10m; 512 | ssl_handshake_timeout 10s; 513 | 514 | proxy_pass rabbitmq-obs:5672; 515 | proxy_timeout 300s; 516 | proxy_connect_timeout 5s; 517 | } 518 | } 519 | 520 | # HTTP блок для веб-интерфейсов 521 | http { 522 | include /etc/nginx/mime.types; 523 | default_type application/octet-stream; 524 | sendfile on; 525 | keepalive_timeout 65; 526 | 527 | # Логирование 528 | log_format main '$remote_addr - $remote_user [$time_local] "$request" ' 529 | '$status $body_bytes_sent "$http_referer" ' 530 | '"$http_user_agent" "$http_x_forwarded_for"'; 531 | 532 | access_log /var/log/nginx/access.log main; 533 | error_log /var/log/nginx/error.log warn; 534 | 535 | # Vector на порту 38213 536 | server { 537 | listen 38213 ssl; 538 | http2 on; 539 | server_name HEAD_DOMAIN; 540 | 541 | ssl_certificate /etc/letsencrypt/live/HEAD_DOMAIN/fullchain.pem; 542 | ssl_certificate_key /etc/letsencrypt/live/HEAD_DOMAIN/privkey.pem; 543 | ssl_protocols TLSv1.2 TLSv1.3; 544 | ssl_ciphers 'TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384'; 545 | ssl_prefer_server_ciphers off; 546 | ssl_session_cache shared:SSL_HTTP:10m; 547 | ssl_session_timeout 10m; 548 | 549 | add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always; 550 | 551 | location / { 552 | proxy_pass http://vector-aggregator:8686; 553 | proxy_set_header Host $host; 554 | proxy_set_header X-Real-IP $remote_addr; 555 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 556 | proxy_set_header X-Forwarded-Proto $scheme; 557 | proxy_redirect off; 558 | } 559 | } 560 | 561 | # RabbitMQ Management UI на порту 38215 562 | server { 563 | listen 38215 ssl; 564 | http2 on; 565 | server_name HEAD_DOMAIN; 566 | 567 | ssl_certificate /etc/letsencrypt/live/HEAD_DOMAIN/fullchain.pem; 568 | ssl_certificate_key /etc/letsencrypt/live/HEAD_DOMAIN/privkey.pem; 569 | ssl_protocols TLSv1.2 TLSv1.3; 570 | ssl_ciphers 'TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384'; 571 | ssl_prefer_server_ciphers off; 572 | ssl_session_cache shared:SSL_HTTP:10m; 573 | ssl_session_timeout 10m; 574 | 575 | add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always; 576 | 577 | location / { 578 | proxy_pass http://rabbitmq-obs:15672; 579 | proxy_set_header Host $host; 580 | proxy_set_header X-Real-IP $remote_addr; 581 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 582 | proxy_set_header X-Forwarded-Proto $scheme; 583 | proxy_redirect off; 584 | 585 | # Поддержка WebSocket для RabbitMQ Management 586 | proxy_http_version 1.1; 587 | proxy_set_header Upgrade $http_upgrade; 588 | proxy_set_header Connection "upgrade"; 589 | proxy_cache_bypass $http_upgrade; 590 | } 591 | } 592 | } 593 | ``` 594 | 595 | ##### 3. Конфигурация Vector-агрегатора (`observer_conf/vector.toml`) 596 | 597 | Этот Vector работает как приемник данных от Nginx и передатчик для основного сервиса `observer`. 598 | 599 | ```toml 600 | # Источник: принимаем данные по HTTP от Nginx. 601 | [sources.http_receiver] 602 | type = "http" 603 | # Слушаем на всех интерфейсах ВНУТРИ Docker-сети на порту 8686 604 | # Именно сюда Nginx и отправляет трафик 605 | address = "0.0.0.0:8686" 606 | decoding.codec = "json" 607 | 608 | # Назначение: отправляем полученные данные в сервис-наблюдатель. 609 | [sinks.observer_service] 610 | type = "http" 611 | inputs = ["http_receiver"] 612 | # Используем имя сервиса 'observer' из docker-compose и его внутренний порт 613 | # Это уже внутренняя коммуникация между контейнерами 614 | uri = "http://observer:9000/log-entry" 615 | method = "post" 616 | encoding.codec = "json" 617 | 618 | batch.max_events = 100 619 | batch.timeout_secs = 5 620 | 621 | ``` 622 | 623 | #### На каждой ноде (Remnanode) 624 | 625 | На каждой ноде Vector работает как агент: он читает локальные логи и отправляет их на защищенный публичный адрес вашего сервера Observer. 626 | 627 | ##### 4. Конфигурация Vector-агента (`blocker_conf/vector/vector.toml`) 628 | 629 | ```toml 630 | # Источник данных: читаем access.log из директории remnanode. 631 | [sources.xray_access_logs] 632 | type = "file" 633 | include = ["/var/log/remnanode/access.log"] 634 | read_from = "end" 635 | 636 | # Трансформация: парсим каждую строку лога, чтобы извлечь email и IP. 637 | [transforms.parse_xray_log] 638 | type = "remap" 639 | inputs = ["xray_access_logs"] 640 | source = ''' 641 | pattern = r'from (tcp:)?(?P\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}):\d+.*? email: (?P\S+)' 642 | parsed, err = parse_regex(.message, pattern) 643 | if err != null { 644 | log("Не удалось распарсить строку лога: " + err, level: "warn") 645 | abort 646 | } 647 | . = { "user_email": parsed.email, "source_ip": parsed.ip, "timestamp": to_string(now()) } 648 | ''' 649 | 650 | # Назначение: отправляем обработанные данные на наш центральный сервер-наблюдатель. 651 | [sinks.central_observer_api] 652 | type = "http" 653 | inputs = ["parse_xray_log"] 654 | # ВАЖНО: Указываем HTTPS и ваш домен! 655 | # Vector на ноде обращается к публичному адресу сервера Observer. 656 | # Запрос сначала попадает на Nginx, который слушает порт 443 (в docker-compose он проброшен как 38213:443). 657 | # Поэтому здесь мы указываем внешний порт 38213. 658 | uri = "https://HEAD_DOMAIN:38213/" 659 | method = "post" 660 | encoding.codec = "json" 661 | compression = "gzip" 662 | 663 | [sinks.central_observer_api.batch] 664 | max_events = 100 665 | timeout_secs = 5 666 | 667 | [sinks.central_observer_api.request] 668 | retry_attempts = 5 669 | retry_backoff_secs = 2 670 | 671 | [sinks.central_observer_api.tls] 672 | ``` 673 | 674 | 675 | --- 676 | 677 | ## Связь с автором 678 | 679 | Если у вас возникли вопросы, предложения по улучшению или вы нашли ошибку в работе модуля, вы можете связаться со мной напрямую. 680 | 681 | Я открыт для обсуждения: 682 | - Вопросов по установке и настройке. 683 | - Предложений по новому функционалу. 684 | - Сообщений о багах и ошибках. 685 | - Вопросов сотрудничества. 686 | 687 | [![Telegram](https://img.shields.io/badge/Telegram-OFL01-2CA5E0?style=for-the-badge&logo=telegram&logoColor=white)](https://t.me/OFL01) 688 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | GNU AFFERO GENERAL PUBLIC LICENSE 2 | Version 3, 19 November 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | Preamble 9 | 10 | The GNU Affero General Public License is a free, copyleft license for 11 | software and other kinds of works, specifically designed to ensure 12 | cooperation with the community in the case of network server software. 13 | 14 | The licenses for most software and other practical works are designed 15 | to take away your freedom to share and change the works. By contrast, 16 | our General Public Licenses are intended to guarantee your freedom to 17 | share and change all versions of a program--to make sure it remains free 18 | software for all its users. 19 | 20 | When we speak of free software, we are referring to freedom, not 21 | price. Our General Public Licenses are designed to make sure that you 22 | have the freedom to distribute copies of free software (and charge for 23 | them if you wish), that you receive source code or can get it if you 24 | want it, that you can change the software or use pieces of it in new 25 | free programs, and that you know you can do these things. 26 | 27 | Developers that use our General Public Licenses protect your rights 28 | with two steps: (1) assert copyright on the software, and (2) offer 29 | you this License which gives you legal permission to copy, distribute 30 | and/or modify the software. 31 | 32 | A secondary benefit of defending all users' freedom is that 33 | improvements made in alternate versions of the program, if they 34 | receive widespread use, become available for other developers to 35 | incorporate. Many developers of free software are heartened and 36 | encouraged by the resulting cooperation. However, in the case of 37 | software used on network servers, this result may fail to come about. 38 | The GNU General Public License permits making a modified version and 39 | letting the public access it on a server without ever releasing its 40 | source code to the public. 41 | 42 | The GNU Affero General Public License is designed specifically to 43 | ensure that, in such cases, the modified source code becomes available 44 | to the community. It requires the operator of a network server to 45 | provide the source code of the modified version running there to the 46 | users of that server. Therefore, public use of a modified version, on 47 | a publicly accessible server, gives the public access to the source 48 | code of the modified version. 49 | 50 | An older license, called the Affero General Public License and 51 | published by Affero, was designed to accomplish similar goals. This is 52 | a different license, not a version of the Affero GPL, but Affero has 53 | released a new version of the Affero GPL which permits relicensing under 54 | this license. 55 | 56 | The precise terms and conditions for copying, distribution and 57 | modification follow. 58 | 59 | TERMS AND CONDITIONS 60 | 61 | 0. Definitions. 62 | 63 | "This License" refers to version 3 of the GNU Affero General Public License. 64 | 65 | "Copyright" also means copyright-like laws that apply to other kinds of 66 | works, such as semiconductor masks. 67 | 68 | "The Program" refers to any copyrightable work licensed under this 69 | License. Each licensee is addressed as "you". "Licensees" and 70 | "recipients" may be individuals or organizations. 71 | 72 | To "modify" a work means to copy from or adapt all or part of the work 73 | in a fashion requiring copyright permission, other than the making of an 74 | exact copy. The resulting work is called a "modified version" of the 75 | earlier work or a work "based on" the earlier work. 76 | 77 | A "covered work" means either the unmodified Program or a work based 78 | on the Program. 79 | 80 | To "propagate" a work means to do anything with it that, without 81 | permission, would make you directly or secondarily liable for 82 | infringement under applicable copyright law, except executing it on a 83 | computer or modifying a private copy. Propagation includes copying, 84 | distribution (with or without modification), making available to the 85 | public, and in some countries other activities as well. 86 | 87 | To "convey" a work means any kind of propagation that enables other 88 | parties to make or receive copies. Mere interaction with a user through 89 | a computer network, with no transfer of a copy, is not conveying. 90 | 91 | An interactive user interface displays "Appropriate Legal Notices" 92 | to the extent that it includes a convenient and prominently visible 93 | feature that (1) displays an appropriate copyright notice, and (2) 94 | tells the user that there is no warranty for the work (except to the 95 | extent that warranties are provided), that licensees may convey the 96 | work under this License, and how to view a copy of this License. If 97 | the interface presents a list of user commands or options, such as a 98 | menu, a prominent item in the list meets this criterion. 99 | 100 | 1. Source Code. 101 | 102 | The "source code" for a work means the preferred form of the work 103 | for making modifications to it. "Object code" means any non-source 104 | form of a work. 105 | 106 | A "Standard Interface" means an interface that either is an official 107 | standard defined by a recognized standards body, or, in the case of 108 | interfaces specified for a particular programming language, one that 109 | is widely used among developers working in that language. 110 | 111 | The "System Libraries" of an executable work include anything, other 112 | than the work as a whole, that (a) is included in the normal form of 113 | packaging a Major Component, but which is not part of that Major 114 | Component, and (b) serves only to enable use of the work with that 115 | Major Component, or to implement a Standard Interface for which an 116 | implementation is available to the public in source code form. A 117 | "Major Component", in this context, means a major essential component 118 | (kernel, window system, and so on) of the specific operating system 119 | (if any) on which the executable work runs, or a compiler used to 120 | produce the work, or an object code interpreter used to run it. 121 | 122 | The "Corresponding Source" for a work in object code form means all 123 | the source code needed to generate, install, and (for an executable 124 | work) run the object code and to modify the work, including scripts to 125 | control those activities. However, it does not include the work's 126 | System Libraries, or general-purpose tools or generally available free 127 | programs which are used unmodified in performing those activities but 128 | which are not part of the work. For example, Corresponding Source 129 | includes interface definition files associated with source files for 130 | the work, and the source code for shared libraries and dynamically 131 | linked subprograms that the work is specifically designed to require, 132 | such as by intimate data communication or control flow between those 133 | subprograms and other parts of the work. 134 | 135 | The Corresponding Source need not include anything that users 136 | can regenerate automatically from other parts of the Corresponding 137 | Source. 138 | 139 | The Corresponding Source for a work in source code form is that 140 | same work. 141 | 142 | 2. Basic Permissions. 143 | 144 | All rights granted under this License are granted for the term of 145 | copyright on the Program, and are irrevocable provided the stated 146 | conditions are met. This License explicitly affirms your unlimited 147 | permission to run the unmodified Program. The output from running a 148 | covered work is covered by this License only if the output, given its 149 | content, constitutes a covered work. This License acknowledges your 150 | rights of fair use or other equivalent, as provided by copyright law. 151 | 152 | You may make, run and propagate covered works that you do not 153 | convey, without conditions so long as your license otherwise remains 154 | in force. You may convey covered works to others for the sole purpose 155 | of having them make modifications exclusively for you, or provide you 156 | with facilities for running those works, provided that you comply with 157 | the terms of this License in conveying all material for which you do 158 | not control copyright. Those thus making or running the covered works 159 | for you must do so exclusively on your behalf, under your direction 160 | and control, on terms that prohibit them from making any copies of 161 | your copyrighted material outside their relationship with you. 162 | 163 | Conveying under any other circumstances is permitted solely under 164 | the conditions stated below. Sublicensing is not allowed; section 10 165 | makes it unnecessary. 166 | 167 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law. 168 | 169 | No covered work shall be deemed part of an effective technological 170 | measure under any applicable law fulfilling obligations under article 171 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or 172 | similar laws prohibiting or restricting circumvention of such 173 | measures. 174 | 175 | When you convey a covered work, you waive any legal power to forbid 176 | circumvention of technological measures to the extent such circumvention 177 | is effected by exercising rights under this License with respect to 178 | the covered work, and you disclaim any intention to limit operation or 179 | modification of the work as a means of enforcing, against the work's 180 | users, your or third parties' legal rights to forbid circumvention of 181 | technological measures. 182 | 183 | 4. Conveying Verbatim Copies. 184 | 185 | You may convey verbatim copies of the Program's source code as you 186 | receive it, in any medium, provided that you conspicuously and 187 | appropriately publish on each copy an appropriate copyright notice; 188 | keep intact all notices stating that this License and any 189 | non-permissive terms added in accord with section 7 apply to the code; 190 | keep intact all notices of the absence of any warranty; and give all 191 | recipients a copy of this License along with the Program. 192 | 193 | You may charge any price or no price for each copy that you convey, 194 | and you may offer support or warranty protection for a fee. 195 | 196 | 5. Conveying Modified Source Versions. 197 | 198 | You may convey a work based on the Program, or the modifications to 199 | produce it from the Program, in the form of source code under the 200 | terms of section 4, provided that you also meet all of these conditions: 201 | 202 | a) The work must carry prominent notices stating that you modified 203 | it, and giving a relevant date. 204 | 205 | b) The work must carry prominent notices stating that it is 206 | released under this License and any conditions added under section 207 | 7. This requirement modifies the requirement in section 4 to 208 | "keep intact all notices". 209 | 210 | c) You must license the entire work, as a whole, under this 211 | License to anyone who comes into possession of a copy. This 212 | License will therefore apply, along with any applicable section 7 213 | additional terms, to the whole of the work, and all its parts, 214 | regardless of how they are packaged. This License gives no 215 | permission to license the work in any other way, but it does not 216 | invalidate such permission if you have separately received it. 217 | 218 | d) If the work has interactive user interfaces, each must display 219 | Appropriate Legal Notices; however, if the Program has interactive 220 | interfaces that do not display Appropriate Legal Notices, your 221 | work need not make them do so. 222 | 223 | A compilation of a covered work with other separate and independent 224 | works, which are not by their nature extensions of the covered work, 225 | and which are not combined with it such as to form a larger program, 226 | in or on a volume of a storage or distribution medium, is called an 227 | "aggregate" if the compilation and its resulting copyright are not 228 | used to limit the access or legal rights of the compilation's users 229 | beyond what the individual works permit. Inclusion of a covered work 230 | in an aggregate does not cause this License to apply to the other 231 | parts of the aggregate. 232 | 233 | 6. Conveying Non-Source Forms. 234 | 235 | You may convey a covered work in object code form under the terms 236 | of sections 4 and 5, provided that you also convey the 237 | machine-readable Corresponding Source under the terms of this License, 238 | in one of these ways: 239 | 240 | a) Convey the object code in, or embodied in, a physical product 241 | (including a physical distribution medium), accompanied by the 242 | Corresponding Source fixed on a durable physical medium 243 | customarily used for software interchange. 244 | 245 | b) Convey the object code in, or embodied in, a physical product 246 | (including a physical distribution medium), accompanied by a 247 | written offer, valid for at least three years and valid for as 248 | long as you offer spare parts or customer support for that product 249 | model, to give anyone who possesses the object code either (1) a 250 | copy of the Corresponding Source for all the software in the 251 | product that is covered by this License, on a durable physical 252 | medium customarily used for software interchange, for a price no 253 | more than your reasonable cost of physically performing this 254 | conveying of source, or (2) access to copy the 255 | Corresponding Source from a network server at no charge. 256 | 257 | c) Convey individual copies of the object code with a copy of the 258 | written offer to provide the Corresponding Source. This 259 | alternative is allowed only occasionally and noncommercially, and 260 | only if you received the object code with such an offer, in accord 261 | with subsection 6b. 262 | 263 | d) Convey the object code by offering access from a designated 264 | place (gratis or for a charge), and offer equivalent access to the 265 | Corresponding Source in the same way through the same place at no 266 | further charge. You need not require recipients to copy the 267 | Corresponding Source along with the object code. If the place to 268 | copy the object code is a network server, the Corresponding Source 269 | may be on a different server (operated by you or a third party) 270 | that supports equivalent copying facilities, provided you maintain 271 | clear directions next to the object code saying where to find the 272 | Corresponding Source. Regardless of what server hosts the 273 | Corresponding Source, you remain obligated to ensure that it is 274 | available for as long as needed to satisfy these requirements. 275 | 276 | e) Convey the object code using peer-to-peer transmission, provided 277 | you inform other peers where the object code and Corresponding 278 | Source of the work are being offered to the general public at no 279 | charge under subsection 6d. 280 | 281 | A separable portion of the object code, whose source code is excluded 282 | from the Corresponding Source as a System Library, need not be 283 | included in conveying the object code work. 284 | 285 | A "User Product" is either (1) a "consumer product", which means any 286 | tangible personal property which is normally used for personal, family, 287 | or household purposes, or (2) anything designed or sold for incorporation 288 | into a dwelling. In determining whether a product is a consumer product, 289 | doubtful cases shall be resolved in favor of coverage. For a particular 290 | product received by a particular user, "normally used" refers to a 291 | typical or common use of that class of product, regardless of the status 292 | of the particular user or of the way in which the particular user 293 | actually uses, or expects or is expected to use, the product. A product 294 | is a consumer product regardless of whether the product has substantial 295 | commercial, industrial or non-consumer uses, unless such uses represent 296 | the only significant mode of use of the product. 297 | 298 | "Installation Information" for a User Product means any methods, 299 | procedures, authorization keys, or other information required to install 300 | and execute modified versions of a covered work in that User Product from 301 | a modified version of its Corresponding Source. The information must 302 | suffice to ensure that the continued functioning of the modified object 303 | code is in no case prevented or interfered with solely because 304 | modification has been made. 305 | 306 | If you convey an object code work under this section in, or with, or 307 | specifically for use in, a User Product, and the conveying occurs as 308 | part of a transaction in which the right of possession and use of the 309 | User Product is transferred to the recipient in perpetuity or for a 310 | fixed term (regardless of how the transaction is characterized), the 311 | Corresponding Source conveyed under this section must be accompanied 312 | by the Installation Information. But this requirement does not apply 313 | if neither you nor any third party retains the ability to install 314 | modified object code on the User Product (for example, the work has 315 | been installed in ROM). 316 | 317 | The requirement to provide Installation Information does not include a 318 | requirement to continue to provide support service, warranty, or updates 319 | for a work that has been modified or installed by the recipient, or for 320 | the User Product in which it has been modified or installed. Access to a 321 | network may be denied when the modification itself materially and 322 | adversely affects the operation of the network or violates the rules and 323 | protocols for communication across the network. 324 | 325 | Corresponding Source conveyed, and Installation Information provided, 326 | in accord with this section must be in a format that is publicly 327 | documented (and with an implementation available to the public in 328 | source code form), and must require no special password or key for 329 | unpacking, reading or copying. 330 | 331 | 7. Additional Terms. 332 | 333 | "Additional permissions" are terms that supplement the terms of this 334 | License by making exceptions from one or more of its conditions. 335 | Additional permissions that are applicable to the entire Program shall 336 | be treated as though they were included in this License, to the extent 337 | that they are valid under applicable law. If additional permissions 338 | apply only to part of the Program, that part may be used separately 339 | under those permissions, but the entire Program remains governed by 340 | this License without regard to the additional permissions. 341 | 342 | When you convey a copy of a covered work, you may at your option 343 | remove any additional permissions from that copy, or from any part of 344 | it. (Additional permissions may be written to require their own 345 | removal in certain cases when you modify the work.) You may place 346 | additional permissions on material, added by you to a covered work, 347 | for which you have or can give appropriate copyright permission. 348 | 349 | Notwithstanding any other provision of this License, for material you 350 | add to a covered work, you may (if authorized by the copyright holders of 351 | that material) supplement the terms of this License with terms: 352 | 353 | a) Disclaiming warranty or limiting liability differently from the 354 | terms of sections 15 and 16 of this License; or 355 | 356 | b) Requiring preservation of specified reasonable legal notices or 357 | author attributions in that material or in the Appropriate Legal 358 | Notices displayed by works containing it; or 359 | 360 | c) Prohibiting misrepresentation of the origin of that material, or 361 | requiring that modified versions of such material be marked in 362 | reasonable ways as different from the original version; or 363 | 364 | d) Limiting the use for publicity purposes of names of licensors or 365 | authors of the material; or 366 | 367 | e) Declining to grant rights under trademark law for use of some 368 | trade names, trademarks, or service marks; or 369 | 370 | f) Requiring indemnification of licensors and authors of that 371 | material by anyone who conveys the material (or modified versions of 372 | it) with contractual assumptions of liability to the recipient, for 373 | any liability that these contractual assumptions directly impose on 374 | those licensors and authors. 375 | 376 | All other non-permissive additional terms are considered "further 377 | restrictions" within the meaning of section 10. If the Program as you 378 | received it, or any part of it, contains a notice stating that it is 379 | governed by this License along with a term that is a further 380 | restriction, you may remove that term. If a license document contains 381 | a further restriction but permits relicensing or conveying under this 382 | License, you may add to a covered work material governed by the terms 383 | of that license document, provided that the further restriction does 384 | not survive such relicensing or conveying. 385 | 386 | If you add terms to a covered work in accord with this section, you 387 | must place, in the relevant source files, a statement of the 388 | additional terms that apply to those files, or a notice indicating 389 | where to find the applicable terms. 390 | 391 | Additional terms, permissive or non-permissive, may be stated in the 392 | form of a separately written license, or stated as exceptions; 393 | the above requirements apply either way. 394 | 395 | 8. Termination. 396 | 397 | You may not propagate or modify a covered work except as expressly 398 | provided under this License. Any attempt otherwise to propagate or 399 | modify it is void, and will automatically terminate your rights under 400 | this License (including any patent licenses granted under the third 401 | paragraph of section 11). 402 | 403 | However, if you cease all violation of this License, then your 404 | license from a particular copyright holder is reinstated (a) 405 | provisionally, unless and until the copyright holder explicitly and 406 | finally terminates your license, and (b) permanently, if the copyright 407 | holder fails to notify you of the violation by some reasonable means 408 | prior to 60 days after the cessation. 409 | 410 | Moreover, your license from a particular copyright holder is 411 | reinstated permanently if the copyright holder notifies you of the 412 | violation by some reasonable means, this is the first time you have 413 | received notice of violation of this License (for any work) from that 414 | copyright holder, and you cure the violation prior to 30 days after 415 | your receipt of the notice. 416 | 417 | Termination of your rights under this section does not terminate the 418 | licenses of parties who have received copies or rights from you under 419 | this License. If your rights have been terminated and not permanently 420 | reinstated, you do not qualify to receive new licenses for the same 421 | material under section 10. 422 | 423 | 9. Acceptance Not Required for Having Copies. 424 | 425 | You are not required to accept this License in order to receive or 426 | run a copy of the Program. Ancillary propagation of a covered work 427 | occurring solely as a consequence of using peer-to-peer transmission 428 | to receive a copy likewise does not require acceptance. However, 429 | nothing other than this License grants you permission to propagate or 430 | modify any covered work. These actions infringe copyright if you do 431 | not accept this License. Therefore, by modifying or propagating a 432 | covered work, you indicate your acceptance of this License to do so. 433 | 434 | 10. Automatic Licensing of Downstream Recipients. 435 | 436 | Each time you convey a covered work, the recipient automatically 437 | receives a license from the original licensors, to run, modify and 438 | propagate that work, subject to this License. You are not responsible 439 | for enforcing compliance by third parties with this License. 440 | 441 | An "entity transaction" is a transaction transferring control of an 442 | organization, or substantially all assets of one, or subdividing an 443 | organization, or merging organizations. If propagation of a covered 444 | work results from an entity transaction, each party to that 445 | transaction who receives a copy of the work also receives whatever 446 | licenses to the work the party's predecessor in interest had or could 447 | give under the previous paragraph, plus a right to possession of the 448 | Corresponding Source of the work from the predecessor in interest, if 449 | the predecessor has it or can get it with reasonable efforts. 450 | 451 | You may not impose any further restrictions on the exercise of the 452 | rights granted or affirmed under this License. For example, you may 453 | not impose a license fee, royalty, or other charge for exercise of 454 | rights granted under this License, and you may not initiate litigation 455 | (including a cross-claim or counterclaim in a lawsuit) alleging that 456 | any patent claim is infringed by making, using, selling, offering for 457 | sale, or importing the Program or any portion of it. 458 | 459 | 11. Patents. 460 | 461 | A "contributor" is a copyright holder who authorizes use under this 462 | License of the Program or a work on which the Program is based. The 463 | work thus licensed is called the contributor's "contributor version". 464 | 465 | A contributor's "essential patent claims" are all patent claims 466 | owned or controlled by the contributor, whether already acquired or 467 | hereafter acquired, that would be infringed by some manner, permitted 468 | by this License, of making, using, or selling its contributor version, 469 | but do not include claims that would be infringed only as a 470 | consequence of further modification of the contributor version. For 471 | purposes of this definition, "control" includes the right to grant 472 | patent sublicenses in a manner consistent with the requirements of 473 | this License. 474 | 475 | Each contributor grants you a non-exclusive, worldwide, royalty-free 476 | patent license under the contributor's essential patent claims, to 477 | make, use, sell, offer for sale, import and otherwise run, modify and 478 | propagate the contents of its contributor version. 479 | 480 | In the following three paragraphs, a "patent license" is any express 481 | agreement or commitment, however denominated, not to enforce a patent 482 | (such as an express permission to practice a patent or covenant not to 483 | sue for patent infringement). To "grant" such a patent license to a 484 | party means to make such an agreement or commitment not to enforce a 485 | patent against the party. 486 | 487 | If you convey a covered work, knowingly relying on a patent license, 488 | and the Corresponding Source of the work is not available for anyone 489 | to copy, free of charge and under the terms of this License, through a 490 | publicly available network server or other readily accessible means, 491 | then you must either (1) cause the Corresponding Source to be so 492 | available, or (2) arrange to deprive yourself of the benefit of the 493 | patent license for this particular work, or (3) arrange, in a manner 494 | consistent with the requirements of this License, to extend the patent 495 | license to downstream recipients. "Knowingly relying" means you have 496 | actual knowledge that, but for the patent license, your conveying the 497 | covered work in a country, or your recipient's use of the covered work 498 | in a country, would infringe one or more identifiable patents in that 499 | country that you have reason to believe are valid. 500 | 501 | If, pursuant to or in connection with a single transaction or 502 | arrangement, you convey, or propagate by procuring conveyance of, a 503 | covered work, and grant a patent license to some of the parties 504 | receiving the covered work authorizing them to use, propagate, modify 505 | or convey a specific copy of the covered work, then the patent license 506 | you grant is automatically extended to all recipients of the covered 507 | work and works based on it. 508 | 509 | A patent license is "discriminatory" if it does not include within 510 | the scope of its coverage, prohibits the exercise of, or is 511 | conditioned on the non-exercise of one or more of the rights that are 512 | specifically granted under this License. You may not convey a covered 513 | work if you are a party to an arrangement with a third party that is 514 | in the business of distributing software, under which you make payment 515 | to the third party based on the extent of your activity of conveying 516 | the work, and under which the third party grants, to any of the 517 | parties who would receive the covered work from you, a discriminatory 518 | patent license (a) in connection with copies of the covered work 519 | conveyed by you (or copies made from those copies), or (b) primarily 520 | for and in connection with specific products or compilations that 521 | contain the covered work, unless you entered into that arrangement, 522 | or that patent license was granted, prior to 28 March 2007. 523 | 524 | Nothing in this License shall be construed as excluding or limiting 525 | any implied license or other defenses to infringement that may 526 | otherwise be available to you under applicable patent law. 527 | 528 | 12. No Surrender of Others' Freedom. 529 | 530 | If conditions are imposed on you (whether by court order, agreement or 531 | otherwise) that contradict the conditions of this License, they do not 532 | excuse you from the conditions of this License. If you cannot convey a 533 | covered work so as to satisfy simultaneously your obligations under this 534 | License and any other pertinent obligations, then as a consequence you may 535 | not convey it at all. For example, if you agree to terms that obligate you 536 | to collect a royalty for further conveying from those to whom you convey 537 | the Program, the only way you could satisfy both those terms and this 538 | License would be to refrain entirely from conveying the Program. 539 | 540 | 13. Remote Network Interaction; Use with the GNU General Public License. 541 | 542 | Notwithstanding any other provision of this License, if you modify the 543 | Program, your modified version must prominently offer all users 544 | interacting with it remotely through a computer network (if your version 545 | supports such interaction) an opportunity to receive the Corresponding 546 | Source of your version by providing access to the Corresponding Source 547 | from a network server at no charge, through some standard or customary 548 | means of facilitating copying of software. This Corresponding Source 549 | shall include the Corresponding Source for any work covered by version 3 550 | of the GNU General Public License that is incorporated pursuant to the 551 | following paragraph. 552 | 553 | Notwithstanding any other provision of this License, you have 554 | permission to link or combine any covered work with a work licensed 555 | under version 3 of the GNU General Public License into a single 556 | combined work, and to convey the resulting work. The terms of this 557 | License will continue to apply to the part which is the covered work, 558 | but the work with which it is combined will remain governed by version 559 | 3 of the GNU General Public License. 560 | 561 | 14. Revised Versions of this License. 562 | 563 | The Free Software Foundation may publish revised and/or new versions of 564 | the GNU Affero General Public License from time to time. Such new versions 565 | will be similar in spirit to the present version, but may differ in detail to 566 | address new problems or concerns. 567 | 568 | Each version is given a distinguishing version number. If the 569 | Program specifies that a certain numbered version of the GNU Affero General 570 | Public License "or any later version" applies to it, you have the 571 | option of following the terms and conditions either of that numbered 572 | version or of any later version published by the Free Software 573 | Foundation. If the Program does not specify a version number of the 574 | GNU Affero General Public License, you may choose any version ever published 575 | by the Free Software Foundation. 576 | 577 | If the Program specifies that a proxy can decide which future 578 | versions of the GNU Affero General Public License can be used, that proxy's 579 | public statement of acceptance of a version permanently authorizes you 580 | to choose that version for the Program. 581 | 582 | Later license versions may give you additional or different 583 | permissions. However, no additional obligations are imposed on any 584 | author or copyright holder as a result of your choosing to follow a 585 | later version. 586 | 587 | 15. Disclaimer of Warranty. 588 | 589 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY 590 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT 591 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY 592 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, 593 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 594 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM 595 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF 596 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 597 | 598 | 16. Limitation of Liability. 599 | 600 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 601 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS 602 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY 603 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE 604 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF 605 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD 606 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), 607 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF 608 | SUCH DAMAGES. 609 | 610 | 17. Interpretation of Sections 15 and 16. 611 | 612 | If the disclaimer of warranty and limitation of liability provided 613 | above cannot be given local legal effect according to their terms, 614 | reviewing courts shall apply local law that most closely approximates 615 | an absolute waiver of all civil liability in connection with the 616 | Program, unless a warranty or assumption of liability accompanies a 617 | copy of the Program in return for a fee. 618 | 619 | END OF TERMS AND CONDITIONS 620 | 621 | How to Apply These Terms to Your New Programs 622 | 623 | If you develop a new program, and you want it to be of the greatest 624 | possible use to the public, the best way to achieve this is to make it 625 | free software which everyone can redistribute and change under these terms. 626 | 627 | To do so, attach the following notices to the program. It is safest 628 | to attach them to the start of each source file to most effectively 629 | state the exclusion of warranty; and each file should have at least 630 | the "copyright" line and a pointer to where the full notice is found. 631 | 632 | 633 | Copyright (C) 634 | 635 | This program is free software: you can redistribute it and/or modify 636 | it under the terms of the GNU Affero General Public License as published 637 | by the Free Software Foundation, either version 3 of the License, or 638 | (at your option) any later version. 639 | 640 | This program is distributed in the hope that it will be useful, 641 | but WITHOUT ANY WARRANTY; without even the implied warranty of 642 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 643 | GNU Affero General Public License for more details. 644 | 645 | You should have received a copy of the GNU Affero General Public License 646 | along with this program. If not, see . 647 | 648 | Also add information on how to contact you by electronic and paper mail. 649 | 650 | If your software can interact with users remotely through a computer 651 | network, you should also make sure that it provides a way for users to 652 | get its source. For example, if your program is a web application, its 653 | interface could display a "Source" link that leads users to an archive 654 | of the code. There are many ways you could offer source, and different 655 | solutions will be better for different programs; see section 13 for the 656 | specific requirements. 657 | 658 | You should also get your employer (if you work as a programmer) or school, 659 | if any, to sign a "copyright disclaimer" for the program, if necessary. 660 | For more information on this, and how to apply and follow the GNU AGPL, see 661 | . --------------------------------------------------------------------------------