├── .github
├── PULL_REQUEST_TEMPLATE.md
└── workflows
│ ├── build-docs.yml
│ ├── commit-message-check.yml
│ ├── osv-scanner.yml
│ └── pr-check.yml
├── .gitignore
├── LICENSE
├── Makefile
├── NOTICE
├── README
├── agent.yml
├── config
├── config.go
└── generated.go
├── docs
├── .gitignore
├── Makefile
├── README.md
├── config.yaml
├── content.en
│ ├── _index.md
│ ├── docs
│ │ ├── configuration
│ │ │ ├── _index.md
│ │ │ └── processors
│ │ │ │ ├── _index.md
│ │ │ │ ├── es_cluster_health.md
│ │ │ │ ├── es_cluster_stats.md
│ │ │ │ ├── es_index_stats.md
│ │ │ │ ├── es_logs_processor.md
│ │ │ │ ├── es_node_stats.md
│ │ │ │ └── logs_processor.md
│ │ ├── getting-started
│ │ │ ├── _index.md
│ │ │ └── install.md
│ │ └── release-notes
│ │ │ └── _index.md
│ └── menu
│ │ └── index.md
├── content.zh
│ ├── _index.md
│ ├── docs
│ │ ├── configuration
│ │ │ ├── _index.md
│ │ │ └── processors
│ │ │ │ ├── _index.md
│ │ │ │ ├── es_cluster_health.md
│ │ │ │ ├── es_cluster_stats.md
│ │ │ │ ├── es_index_stats.md
│ │ │ │ ├── es_logs_processor.md
│ │ │ │ ├── es_node_stats.md
│ │ │ │ └── logs_processor.md
│ │ ├── getting-started
│ │ │ ├── _index.md
│ │ │ └── install.md
│ │ ├── release-notes
│ │ │ └── _index.md
│ │ └── resources
│ │ │ └── _index.md
│ └── menu
│ │ └── index.md
└── static
│ └── img
│ ├── logo-en.svg
│ └── logo-zh.svg
├── generated_metrics_tasks.go
├── lib
├── process
│ ├── discover.go
│ ├── discover_test.go
│ └── elastic.go
├── reader
│ ├── common
│ │ ├── bytes.go
│ │ ├── datetime.go
│ │ ├── dtfmt
│ │ │ ├── builder.go
│ │ │ ├── ctx.go
│ │ │ ├── doc.go
│ │ │ ├── dtfmt.go
│ │ │ ├── elems.go
│ │ │ ├── fields.go
│ │ │ ├── fmt.go
│ │ │ ├── prog.go
│ │ │ └── util.go
│ │ ├── match
│ │ │ ├── cmp.go
│ │ │ ├── compile.go
│ │ │ ├── matcher.go
│ │ │ ├── matcher_bench_test.go
│ │ │ ├── matcher_test.go
│ │ │ ├── matchers.go
│ │ │ └── optimize.go
│ │ └── streambuf
│ │ │ ├── ascii.go
│ │ │ ├── io.go
│ │ │ ├── net.go
│ │ │ └── streambuf.go
│ ├── harvester
│ │ ├── harvester.go
│ │ ├── harvester_config.go
│ │ └── harvester_test.go
│ ├── linenumber
│ │ ├── line_number.go
│ │ ├── line_number_config.go
│ │ └── line_reader.go
│ ├── message.go
│ ├── multiline
│ │ ├── counter.go
│ │ ├── message_buffer.go
│ │ ├── multiline.go
│ │ ├── multiline_config.go
│ │ ├── pattern.go
│ │ └── while.go
│ ├── reader.go
│ ├── readfile
│ │ ├── encode.go
│ │ ├── encoding
│ │ │ ├── encoding.go
│ │ │ ├── mixed.go
│ │ │ └── utf16.go
│ │ ├── limit.go
│ │ ├── line.go
│ │ ├── line_terminator.go
│ │ ├── metafields.go
│ │ ├── strip_newline.go
│ │ └── timeout.go
│ └── readjson
│ │ ├── docker_json.go
│ │ ├── docker_json_config.go
│ │ ├── json.go
│ │ └── json_config.go
└── util
│ ├── elastic.go
│ ├── file.go
│ └── network.go
├── main.go
└── plugin
├── api
├── discover.go
├── init.go
├── log.go
└── model.go
├── elastic
├── esinfo.go
├── esinfo_test.go
├── logging
│ ├── es_logs.go
│ └── es_logs_test.go
└── metric
│ ├── cluster_health
│ └── cluster_health.go
│ ├── cluster_stats
│ └── cluster_stats.go
│ ├── index_stats
│ └── index_stats.go
│ ├── metric.go
│ └── node_stats
│ ├── node_stats.go
│ └── node_stats_test.go
└── logs
├── file_detect.go
├── file_detect_unix.go
├── file_detect_windows.go
├── logs.go
└── store.go
/.github/PULL_REQUEST_TEMPLATE.md:
--------------------------------------------------------------------------------
1 | ## What does this PR do
2 |
3 | ## Rationale for this change
4 |
5 | ## Standards checklist
6 |
7 | - [ ] The PR title is descriptive
8 | - [ ] The commit messages are [semantic](https://www.conventionalcommits.org/)
9 | - [ ] Necessary tests are added
10 | - [ ] Updated the release notes
11 | - [ ] Necessary documents have been added if this is a new feature
12 | - [ ] Performance tests checked, no obvious performance degradation
--------------------------------------------------------------------------------
/.github/workflows/build-docs.yml:
--------------------------------------------------------------------------------
1 | name: Build and Deploy Docs
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 | - 'v*'
8 | tags:
9 | - 'v*'
10 |
11 | jobs:
12 | build-deploy-docs:
13 | runs-on: ubuntu-latest
14 |
15 | steps:
16 | - name: Checkout Product Repo
17 | uses: actions/checkout@v2
18 | with:
19 | fetch-depth: 0
20 |
21 | - name: Set Variables Based on Ref
22 | id: vars
23 | run: |
24 | PRODUCT_NAME=$(basename $(pwd)) # Get the directory name as the product name
25 | echo "PRODUCT_NAME=$PRODUCT_NAME" >> $GITHUB_ENV
26 | CURRENT_REF=${GITHUB_REF##*/}
27 | IS_SEMVER=false
28 | SEMVER_REGEX="^v([0-9]+)\.([0-9]+)\.([0-9]+)$"
29 |
30 | if [[ "${GITHUB_REF_TYPE}" == "branch" ]]; then
31 | if [[ "$CURRENT_REF" == "main" ]]; then
32 | echo "VERSION=main" >> $GITHUB_ENV
33 | echo "BRANCH=main" >> $GITHUB_ENV
34 | elif [[ "$CURRENT_REF" =~ $SEMVER_REGEX ]]; then
35 | IS_SEMVER=true
36 | echo "VERSION=$CURRENT_REF" >> $GITHUB_ENV
37 | echo "BRANCH=$CURRENT_REF" >> $GITHUB_ENV
38 | else
39 | echo "Branch '$CURRENT_REF' is not a valid semantic version. Skipping build."
40 | exit 0
41 | fi
42 | elif [[ "${GITHUB_REF_TYPE}" == "tag" ]]; then
43 | if [[ "$CURRENT_REF" =~ $SEMVER_REGEX ]]; then
44 | IS_SEMVER=true
45 | echo "VERSION=$CURRENT_REF" >> $GITHUB_ENV
46 | echo "BRANCH=main" >> $GITHUB_ENV # Set BRANCH to 'main' for tags
47 | else
48 | echo "Tag '$CURRENT_REF' is not a valid semantic version. Skipping build."
49 | exit 0
50 | fi
51 | fi
52 |
53 | # Gather branches and tags, filter for semantic versions, sort, remove duplicates
54 | VERSIONS=$(git for-each-ref refs/remotes/origin refs/tags --format="%(refname:short)" | \
55 | grep -E "v[0-9]+\.[0-9]+\.[0-9]+$" | awk -F'[v]' '{print "v"$2}' | sort -Vr | uniq | tr '\n' ',' | sed 's/,$//')
56 | echo "VERSIONS=main,$VERSIONS" >> $GITHUB_ENV
57 |
58 | - name: Install Hugo
59 | run: |
60 | wget https://github.com/gohugoio/hugo/releases/download/v0.79.1/hugo_extended_0.79.1_Linux-64bit.tar.gz
61 | tar -xzvf hugo_extended_0.79.1_Linux-64bit.tar.gz
62 | sudo mv hugo /usr/local/bin/
63 |
64 | - name: Checkout Docs Repo
65 | uses: actions/checkout@v2
66 | with:
67 | repository: infinilabs/docs
68 | path: docs-output
69 | token: ${{ secrets.DOCS_DEPLOYMENT_TOKEN }}
70 |
71 | - name: Build Documentation
72 | run: |
73 | (cd docs && OUTPUT=$(pwd)/../docs-output make docs-build docs-place-redirect)
74 |
75 | - name: Commit and Push Changes to Docs Repo
76 | working-directory: docs-output
77 | run: |
78 | git config user.name "GitHub Actions"
79 | git config user.email "actions@github.com"
80 |
81 | if [[ -n $(git status --porcelain) ]]; then
82 | git add .
83 | git commit -m "Rebuild $PRODUCT_NAME docs for version $VERSION"
84 | git push origin main
85 | else
86 | echo "No changes to commit."
87 | fi
88 |
89 | - name: Rebuild Docs for Latest Version (main), if not already on main
90 | run: |
91 | # Only rebuild the main branch docs if the current ref is not "main"
92 | if [[ "$CURRENT_REF" != "main" ]]; then
93 | echo "Switching to main branch and rebuilding docs for 'latest'"
94 |
95 | # Checkout the main branch of the product repo to rebuild docs for "latest"
96 | git checkout main
97 |
98 | # Ensure the latest changes are pulled
99 | git pull origin main
100 |
101 | # Build Docs for Main Branch (latest)
102 | (cd docs && OUTPUT=$(pwd)/../docs-output VERSION="main" BRANCH="main" make docs-build docs-place-redirect)
103 |
104 | # Commit and Push Latest Docs to Main
105 | cd docs-output
106 | git config user.name "GitHub Actions"
107 | git config user.email "actions@github.com"
108 |
109 | if [[ -n $(git status --porcelain) ]]; then
110 | git add .
111 | git commit -m "Rebuild $PRODUCT_NAME docs for main branch with latest version"
112 | git push origin main
113 | else
114 | echo "No changes to commit for main."
115 | fi
116 | else
117 | echo "Current ref is 'main', skipping rebuild for 'latest'."
118 | fi
119 | working-directory: ./ # Working in the product repo
120 |
--------------------------------------------------------------------------------
/.github/workflows/commit-message-check.yml:
--------------------------------------------------------------------------------
1 | name: 'commit-message-check'
2 | on:
3 | pull_request:
4 |
5 | jobs:
6 | check-commit-message:
7 | name: check-subject
8 | runs-on: ubuntu-latest
9 | steps:
10 | - name: check-subject-type
11 | uses: gsactions/commit-message-checker@v2
12 | with:
13 | checkAllCommitMessages: 'true' # optional: this checks all commits associated with a pull request
14 | excludeDescription: 'true' # optional: this excludes the description body of a pull request
15 | accessToken: ${{ secrets.GITHUB_TOKEN }}
16 | pattern: '^(change:|feat:|improve:|perf:|dep:|docs:|test:|ci:|style:|refactor:|fix:|fixdoc:|fixup:|merge|Merge|update|Update|bumpver:|chore:|build:) .+$'
17 | flags: 'gm'
18 | error: |
19 | Subject line has to contain a commit type, e.g.: "chore: blabla" or a merge commit e.g.: "merge xxx".
20 | Valid types are:
21 | change - API breaking change
22 | feat - API compatible new feature
23 | improve - Become better without functional changes
24 | perf - Performance improvement
25 | dep - dependency update
26 | docs - docs update
27 | test - test udpate
28 | ci - CI workflow update
29 | refactor - refactor without function change.
30 | fix - fix bug
31 | fixdoc - fix doc
32 | fixup - minor change: e.g., fix sth mentioned in a review.
33 | bumpver - Bump to a new version.
34 | chore - Nothing important.
35 | build - bot: dependabot.
--------------------------------------------------------------------------------
/.github/workflows/osv-scanner.yml:
--------------------------------------------------------------------------------
1 | # This workflow uses actions that are not certified by GitHub.
2 | # They are provided by a third-party and are governed by
3 | # separate terms of service, privacy policy, and support
4 | # documentation.
5 |
6 | # A sample workflow which sets up periodic OSV-Scanner scanning for vulnerabilities,
7 | # in addition to a PR check which fails if new vulnerabilities are introduced.
8 | #
9 | # For more examples and options, including how to ignore specific vulnerabilities,
10 | # see https://google.github.io/osv-scanner/github-action/
11 |
12 | name: OSV-Scanner
13 |
14 | on:
15 | pull_request:
16 | branches: [ "main" ]
17 |
18 | permissions:
19 | # Required to upload SARIF file to CodeQL. See: https://github.com/github/codeql-action/issues/2117
20 | actions: read
21 | # Require writing security events to upload SARIF file to security tab
22 | security-events: write
23 | # Only need to read contents
24 | contents: read
25 |
26 | jobs:
27 | scan-pr:
28 | uses: "google/osv-scanner-action/.github/workflows/osv-scanner-reusable.yml@v1.9.1"
29 | with:
30 | # Example of specifying custom arguments
31 | scan-args: |-
32 | -r
33 | --skip-git
34 | ./
35 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.iml
2 | *.idea/**/*
3 | *.idea
4 | .idea/*
5 | .gradle/**/*
6 | .gradle/*
7 | out/**/*
8 | out/*
9 | build/**/*
10 | build/*
11 | gen/*
12 | */bin/*
13 | *.class
14 | *.iws
15 | *.ipr
16 | */.idea/**
17 | */.idea/**/**
18 | */.gradle/**
19 | */out/**
20 | */build/**
21 | */*.iml
22 | */gen/*
23 | */bin/**
24 | */*.class
25 | *.iws
26 | *.ipr
27 | */R.java
28 | gen/
29 | classes/
30 | bin/
31 | *.log
32 | */*/proguard_logs/**
33 | /private_test
34 | *.exe
35 | /log
36 | /out
37 | /data
38 | /bloomfilter.bin
39 | /bin
40 | /src/github.com
41 | /pkg
42 | /pkg/*
43 | /cluster
44 | .DS_Store
45 | /bin-run/
46 | /leveldb
47 | /dist
48 | vendor
49 | .git
50 | trash
51 | *.so
52 | .public
53 | generated_*.go
54 | config/generated.go
55 | config/generat*.go
56 | config/*.tpl
57 | config/*.yml
58 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | SHELL=/bin/bash
2 |
3 | # APP info
4 | APP_NAME := agent
5 | APP_VERSION := 1.0.0_SNAPSHOT
6 | APP_CONFIG := $(APP_NAME).yml
7 | APP_EOLDate ?= "2126-12-31T10:10:10Z"
8 | APP_STATIC_FOLDER := .public
9 | APP_STATIC_PACKAGE := public
10 | APP_UI_FOLDER := ui
11 | APP_PLUGIN_FOLDER := plugin
12 | GOMODULE := false
13 |
14 | include ../framework/Makefile
15 |
--------------------------------------------------------------------------------
/NOTICE:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/infinilabs/agent/be98dcfa65b6550de4a9defad736e3f6803ee6ce/NOTICE
--------------------------------------------------------------------------------
/README:
--------------------------------------------------------------------------------
1 | Agent Listening:
2 | - Check with console every 60s
3 | - Is there any configure change? (templates\configuration\keystore\etc)
4 | - Is there any new service?(plugins\scripts\etc)
5 | - Pulling new configure from server
6 | - Pulling new service from server
7 | - Restarting service
8 | - Report status to server
9 |
--------------------------------------------------------------------------------
/agent.yml:
--------------------------------------------------------------------------------
1 | env:
2 | API_BINDING: "0.0.0.0:2900"
3 |
4 | path.data: data
5 | path.logs: log
6 | path.configs: "config"
7 | configs.auto_reload: true
8 |
9 | resource_limit.cpu.max_num_of_cpus: 1
10 | resource_limit:
11 | memory:
12 | max_in_bytes: 533708800 #50MB
13 |
14 | task:
15 | max_concurrent_tasks: 3
16 |
17 | stats:
18 | include_storage_stats_in_api: false
19 |
20 | elastic:
21 | skip_init_metadata_on_start: true
22 | metadata_refresh:
23 | enabled: false
24 | health_check:
25 | enabled: true
26 | interval: 60s
27 | availability_check:
28 | enabled: false
29 | interval: 60s
30 |
31 | disk_queue:
32 | max_msg_size: 20485760
33 | max_bytes_per_file: 20485760
34 | max_used_bytes: 524288000
35 | retention.max_num_of_local_files: 1
36 | compress:
37 | idle_threshold: 1
38 | num_of_files_decompress_ahead: 0
39 | segment:
40 | enabled: true
41 |
42 | api:
43 | enabled: true
44 | network:
45 | binding: $[[env.API_BINDING]]
46 | # tls:
47 | # enabled: true
48 | # cert_file: /etc/ssl.crt
49 | # key_file: /etc/ssl.key
50 | # skip_insecure_verify: false
51 |
52 | agent:
53 |
54 | metrics:
55 | enabled: true
56 |
57 | configs:
58 | #for managed client's setting
59 | managed: true # managed by remote servers
60 | panic_on_config_error: false #ignore config error
61 | interval: "1s"
62 | servers: # config servers
63 | - "http://localhost:9000"
64 | max_backup_files: 5
65 | soft_delete: false
66 | # tls: #for mTLS connection with config servers
67 | # enabled: true
68 | # cert_file: /etc/ssl.crt
69 | # key_file: /etc/ssl.key
70 | # skip_insecure_verify: false
--------------------------------------------------------------------------------
/config/config.go:
--------------------------------------------------------------------------------
1 | package config
2 |
--------------------------------------------------------------------------------
/config/generated.go:
--------------------------------------------------------------------------------
1 | package config
2 |
3 | const LastCommitLog = "N/A"
4 |
5 | const BuildDate = "N/A"
6 |
7 | const EOLDate = "N/A"
8 |
9 | const Version = "0.0.1-SNAPSHOT"
10 |
11 | const BuildNumber = "001"
12 |
--------------------------------------------------------------------------------
/docs/.gitignore:
--------------------------------------------------------------------------------
1 | /public/
2 | /resources/
3 | /themes/
4 | /config.bak
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | SHELL=/bin/bash
2 |
3 | # Basic info
4 | PRODUCT?= $(shell basename "$(shell cd .. && pwd)")
5 | BRANCH?= main
6 | VERSION?= $(shell [[ "$(BRANCH)" == "main" ]] && echo "main" || echo "$(BRANCH)")
7 | CURRENT_VERSION?= $(VERSION)
8 | VERSIONS?= "main"
9 | OUTPUT?= "/tmp/docs"
10 | THEME_FOLDER?= "themes/book"
11 | THEME_REPO?= "https://github.com/infinilabs/docs-theme.git"
12 | THEME_BRANCH?= "main"
13 |
14 | .PHONY: docs-build
15 |
16 | default: docs-build
17 |
18 | docs-init:
19 | @if [ ! -d $(THEME_FOLDER) ]; then echo "theme does not exist";(git clone -b $(THEME_BRANCH) $(THEME_REPO) $(THEME_FOLDER) ) fi
20 |
21 | docs-env:
22 | @echo "Debugging Variables:"
23 | @echo "PRODUCT: $(PRODUCT)"
24 | @echo "BRANCH: $(BRANCH)"
25 | @echo "VERSION: $(VERSION)"
26 | @echo "CURRENT_VERSION: $(CURRENT_VERSION)"
27 | @echo "VERSIONS: $(VERSIONS)"
28 | @echo "OUTPUT: $(OUTPUT)"
29 |
30 | docs-config: docs-init
31 | cp config.yaml config.bak
32 | # Detect OS and apply the appropriate sed command
33 | @if [ "$$(uname)" = "Darwin" ]; then \
34 | echo "Running on macOS"; \
35 | sed -i '' "s/BRANCH/$(VERSION)/g" config.yaml; \
36 | else \
37 | echo "Running on Linux"; \
38 | sed -i 's/BRANCH/$(VERSION)/g' config.yaml; \
39 | fi
40 |
41 | docs-build: docs-config
42 | hugo --minify --theme book --destination="$(OUTPUT)/$(PRODUCT)/$(VERSION)" \
43 | --baseURL="/$(PRODUCT)/$(VERSION)"
44 | @$(MAKE) docs-restore-generated-file
45 |
46 | docs-serve: docs-config
47 | hugo serve
48 | @$(MAKE) docs-restore-generated-file
49 |
50 | docs-place-redirect:
51 | echo "
REDIRECT TO THE LATEST_VERSION.
" > $(OUTPUT)/$(PRODUCT)/index.html
52 |
53 | docs-restore-generated-file:
54 | mv config.bak config.yaml
--------------------------------------------------------------------------------
/docs/README.md:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/docs/config.yaml:
--------------------------------------------------------------------------------
1 | # VERSIONS=latest,v1.0 hugo --minify --baseURL="/product/v1.0/" -d public/product/v1.0
2 |
3 | title: INFINI Agent
4 | theme: book
5 |
6 | # Book configuration
7 | disablePathToLower: true
8 | enableGitInfo: false
9 |
10 | outputs:
11 | home:
12 | - HTML
13 | - RSS
14 | - JSON
15 |
16 | # Needed for mermaid/katex shortcodes
17 | markup:
18 | goldmark:
19 | renderer:
20 | unsafe: true
21 | tableOfContents:
22 | startLevel: 1
23 |
24 | # Multi-lingual mode config
25 | # There are different options to translate files
26 | # See https://gohugo.io/content-management/multilingual/#translation-by-filename
27 | # And https://gohugo.io/content-management/multilingual/#translation-by-content-directory
28 | defaultContentLanguage: en
29 | languages:
30 | en:
31 | languageName: English
32 | contentDir: content.en
33 | weight: 3
34 | zh:
35 | languageName: 简体中文
36 | contentDir: content.zh
37 | weight: 4
38 |
39 | menu:
40 | before: []
41 | after:
42 | - name: "Github"
43 | url: "https://github.com/infinilabs/agent"
44 | weight: 10
45 |
46 | params:
47 | # (Optional, default light) Sets color theme: light, dark or auto.
48 | # Theme 'auto' switches between dark and light modes based on browser/os preferences
49 | BookTheme: "auto"
50 |
51 | # (Optional, default true) Controls table of contents visibility on right side of pages.
52 | # Start and end levels can be controlled with markup.tableOfContents setting.
53 | # You can also specify this parameter per page in front matter.
54 | BookToC: true
55 |
56 | # (Optional, default none) Set the path to a logo for the book. If the logo is
57 | # /static/logo.png then the path would be logo.png
58 | BookLogo: img/logo
59 |
60 | # (Optional, default none) Set leaf bundle to render as side menu
61 | # When not specified file structure and weights will be used
62 | # BookMenuBundle: /menu
63 |
64 | # (Optional, default docs) Specify root page to render child pages as menu.
65 | # Page is resoled by .GetPage function: https://gohugo.io/functions/getpage/
66 | # For backward compatibility you can set '*' to render all sections to menu. Acts same as '/'
67 | BookSection: docs
68 |
69 | # Set source repository location.
70 | # Used for 'Last Modified' and 'Edit this page' links.
71 | BookRepo: https://github.com/infinilabs/agent
72 |
73 | # Enable "Edit this page" links for 'doc' page type.
74 | # Disabled by default. Uncomment to enable. Requires 'BookRepo' param.
75 | # Edit path must point to root directory of repo.
76 | BookEditPath: edit/BRANCH/docs
77 |
78 | # Configure the date format used on the pages
79 | # - In git information
80 | # - In blog posts
81 | BookDateFormat: "January 2, 2006"
82 |
83 | # (Optional, default true) Enables search function with flexsearch,
84 | # Index is built on fly, therefore it might slowdown your website.
85 | # Configuration for indexing can be adjusted in i18n folder per language.
86 | BookSearch: false
87 |
88 | # (Optional, default true) Enables comments template on pages
89 | # By default partals/docs/comments.html includes Disqus template
90 | # See https://gohugo.io/content-management/comments/#configure-disqus
91 | # Can be overwritten by same param in page frontmatter
92 | BookComments: false
93 |
94 | # /!\ This is an experimental feature, might be removed or changed at any time
95 | # (Optional, experimental, default false) Enables portable links and link checks in markdown pages.
96 | # Portable links meant to work with text editors and let you write markdown without {{< relref >}} shortcode
97 | # Theme will print warning if page referenced in markdown does not exists.
98 | BookPortableLinks: true
99 |
100 | # /!\ This is an experimental feature, might be removed or changed at any time
101 | # (Optional, experimental, default false) Enables service worker that caches visited pages and resources for offline use.
102 | BookServiceWorker: false
--------------------------------------------------------------------------------
/docs/content.en/_index.md:
--------------------------------------------------------------------------------
1 | ---
2 | weight: 10
3 | title: INFINI Agent
4 | bookCollapseSection: true
5 | ---
6 |
7 | # INFINI Agent
8 |
9 | ## Introduction
10 |
11 | **INFINI Agent** is used for collecting and uploading the logs and metric information of Elasticsearch, Easysearch, and Opensearch clusters. It is managed through INFINI Console, supports mainstream operating systems and platforms. The installation package is lightweight and has no external dependencies, enabling quick and convenient installation.
12 |
13 | ## Features
14 |
15 | - Collect and upload health information of Elasticsearch and other clusters, as well as cluster stats, index stats, and node stats information.
16 | - Collect and upload the logs of instance nodes such as Elasticsearch.
17 | - Collect and upload host metric information.
18 |
19 | {{< button relref="../docs/getting-started/install/" >}}Getting Started Now{{< /button >}}
20 |
21 | ## Community
22 |
23 | Fell free to join the Discord server to discuss anything around this project:
24 |
25 | [Discord Server](https://discord.gg/4tKTMkkvVX)
26 |
27 | ## Who Is Using?
28 |
29 | If you are using INFINI Agent and feel it pretty good, please [let us know](https://discord.gg/4tKTMkkvVX). Thank you for your support.
--------------------------------------------------------------------------------
/docs/content.en/docs/configuration/_index.md:
--------------------------------------------------------------------------------
1 | ---
2 | weight: 20
3 | title: "References"
4 | bookCollapseSection: true
5 | ---
6 |
7 | # References
8 |
9 | - [env](https://docs.infinilabs.com/gateway/main/docs/references/config/#environment-variables)
10 | - [path](https://docs.infinilabs.com/gateway/main/docs/references/config/#path)
11 | - [log](https://docs.infinilabs.com/gateway/main/docs/references/config/#log)
12 | - [configs](https://docs.infinilabs.com/gateway/main/docs/references/config/#configs)
13 | - [api](https://docs.infinilabs.com/gateway/main/docs/references/config/#api)
14 | - [badger](https://docs.infinilabs.com/gateway/main/docs/references/config/#badger)
15 | - [disk_queue](https://docs.infinilabs.com/gateway/main/docs/references/config/#local-disk-queue)
16 | - [elasticsearch](https://docs.infinilabs.com/gateway/main/docs/references/elasticsearch/)
17 | - [resource_limit](https://docs.infinilabs.com/gateway/main/docs/references/config/#resource-limitations)
18 | - [metrics](https://docs.infinilabs.com/gateway/main/docs/references/config/#metrics)
19 | - [node](https://docs.infinilabs.com/gateway/main/docs/references/config/#node)
20 | - [processors](./processors/_index.md)
21 | - [other](https://docs.infinilabs.com/gateway/main/docs/references/config/#misc)
22 |
--------------------------------------------------------------------------------
/docs/content.en/docs/configuration/processors/_index.md:
--------------------------------------------------------------------------------
1 | ---
2 | weight: 20
3 | title: "Offline Processor"
4 | bookCollapseSection: true
5 | ---
6 |
7 | # Processor List
8 |
9 | ## Metric Collection
10 |
11 | - [es_cluster_health](./es_cluster_health.md)
12 | - [es_cluster_stats](./es_cluster_stats.md)
13 | - [es_index_stats](./es_index_stats.md)
14 | - [es_node_stats](./es_node_stats.md)
15 |
16 | ## Log Collection
17 |
18 | - [es_logs_processor](./es_logs_processor.md)
19 | - [logs_processor](./logs_processor.md)
20 |
21 | For more processor content, please see [processors](https://docs.infinilabs.com/gateway/main/docs/references/processors/).
--------------------------------------------------------------------------------
/docs/content.en/docs/configuration/processors/es_cluster_health.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: es_cluster_health
3 | ---
4 |
5 | # es_cluster_health
6 |
7 | ## Description
8 |
9 | Collect the cluster health metrics.
10 |
11 | ## Configuration Example
12 |
13 | ```yaml
14 | pipeline
15 | - name: collect_default_es_cluster_health
16 | auto_start: true
17 | keep_running: true
18 | retry_delay_in_ms: 3000
19 | processor:
20 | - es_cluster_health:
21 | elasticsearch: default
22 | ```
23 |
24 | ## Parameter Description
25 |
26 | | Name | Type | Description |
27 | | --- | --- | --- |
28 | | elasticsearch | string | Cluster instance name (Please see [elasticsearch](https://docs.infinilabs.com/gateway/main/docs/references/elasticsearch/) `name` parameter) |
29 | | labels | map | Custom labels |
30 |
--------------------------------------------------------------------------------
/docs/content.en/docs/configuration/processors/es_cluster_stats.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: es_cluster_stats
3 | ---
4 |
5 | # es_cluster_stats
6 |
7 | ## Description
8 |
9 | Collect the cluster stats metrics.
10 |
11 | ## Configuration Example
12 |
13 | ```yaml
14 | pipeline
15 | - name: collect_default_es_cluster_stats
16 | auto_start: true
17 | keep_running: true
18 | retry_delay_in_ms: 3000
19 | processor:
20 | - es_cluster_stats:
21 | elasticsearch: default
22 | ```
23 |
24 | ## Parameter Description
25 |
26 | | Name | Type | Description |
27 | | --- | --- | --- |
28 | | elasticsearch | string | Cluster instance name (Please see [elasticsearch](https://docs.infinilabs.com/gateway/main/docs/references/elasticsearch/) `name` parameter) |
29 | | labels | map | Custom labels |
--------------------------------------------------------------------------------
/docs/content.en/docs/configuration/processors/es_index_stats.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: es_index_stats
3 | ---
4 |
5 | # es_index_stats
6 |
7 | ## Description
8 |
9 | Collect the cluster index stats metrics.
10 |
11 | ## Configuration Example
12 |
13 | ```yaml
14 | pipeline
15 | - name: collect_default_es_index_stats
16 | auto_start: true
17 | keep_running: true
18 | retry_delay_in_ms: 3000
19 | processor:
20 | - es_index_stats:
21 | elasticsearch: default
22 | ```
23 |
24 | ## Parameter Description
25 |
26 | | Name | Type | Description |
27 | | --- | --- | --- |
28 | | elasticsearch | string | Cluster instance name (Please see [elasticsearch](https://docs.infinilabs.com/gateway/main/docs/references/elasticsearch/) `name` parameter) |
29 | | all_index_stats | bool | Whether to enable the metric collection of all indexes, default is `true`. |
30 | | index_primary_stats | bool | Whether to enable the metric collection of index primary shards, default is `true`. |
31 | | labels | map | Custom labels |
--------------------------------------------------------------------------------
/docs/content.en/docs/configuration/processors/es_logs_processor.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: es_logs_processor
3 | ---
4 |
5 | # es_logs_processor
6 |
7 | ## Description
8 |
9 | Collect the cluster log files.
10 |
11 | ## Configuration Example
12 |
13 | ```yaml
14 | pipeline
15 | - name: collect_default_es_logs
16 | auto_start: true
17 | keep_running: true
18 | retry_delay_in_ms: 3000
19 | processor:
20 | - es_logs_processor:
21 | elasticsearch: default
22 | logs_path: "/opt/dev-environment/ecloud/logs"
23 | queue_name: logs
24 | ```
25 |
26 | ## Parameter Description
27 |
28 | | Name | Type | Description |
29 | | --- | --- | --- |
30 | | queue_name | string | Log collection queue name |
31 | | elasticsearch | string | Cluster instance name (Please see [elasticsearch](https://docs.infinilabs.com/gateway/main/docs/references/elasticsearch/) `name` parameter) |
32 | | logs_path | string | Cluster log path |
33 | | labels | map | Custom labels |
--------------------------------------------------------------------------------
/docs/content.en/docs/configuration/processors/es_node_stats.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: es_node_stats
3 | ---
4 |
5 | # es_node_stats
6 |
7 | ## Description
8 |
9 | Collect the cluster node stats metrics.
10 |
11 | ## Configuration Example
12 |
13 | ```yaml
14 | pipeline
15 | - name: collect_default_es_node_stats
16 | auto_start: true
17 | keep_running: true
18 | retry_delay_in_ms: 3000
19 | processor:
20 | - es_node_stats:
21 | elasticsearch: default
22 | ```
23 |
24 | ## Parameter Description
25 |
26 | | Name | Type | Description |
27 | | --- | --- | --- |
28 | | elasticsearch | string | Cluster instance name (Please see [elasticsearch](https://docs.infinilabs.com/gateway/main/docs/references/elasticsearch/) `name` parameter) |
29 | | level | string | Metric level, Optional `cluster`, `indices`, `shards`, default is `shards`。 |
30 | | labels | map | Custom labels |
--------------------------------------------------------------------------------
/docs/content.en/docs/configuration/processors/logs_processor.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: logs_processor
3 | ---
4 |
5 | # logs_processor
6 |
7 | ## Description
8 |
9 | Collect the local log files.
10 |
11 | ## Configuration Example
12 |
13 | The elasticsearch log files as an example.
14 |
15 | ```yaml
16 | pipeline:
17 | - name: log_collect
18 | auto_start: true
19 | keep_running: true
20 | retry_delay_in_ms: 3000
21 | processor:
22 | - logs_processor:
23 | queue_name: "logs"
24 | logs_path: "/opt/es/elasticsearch-7.7.1/logs"
25 | # metadata for all log items
26 | metadata:
27 | category: elasticsearch
28 | patterns:
29 | - pattern: ".*_server.json$"
30 | type: json
31 | metadata:
32 | name: server
33 | timestamp_fields: ["timestamp", "@timestamp"]
34 | remove_fields:
35 | [
36 | "type",
37 | "cluster.name",
38 | "cluster.uuid",
39 | "node.name",
40 | "node.id",
41 | "timestamp",
42 | "@timestamp",
43 | ]
44 | - pattern: "gc.log$"
45 | type: text
46 | metadata:
47 | name: gc
48 | timestamp_patterns:
49 | - "\\d{4}-\\d{1,2}-\\d{1,2}T\\d{1,2}:\\d{1,2}:\\d{1,2}.\\d{3}\\+\\d{4}"
50 | - "\\d{4}-\\d{1,2}-\\d{1,2} \\d{1,2}:\\d{1,2}:\\d{1,2},\\d{3}"
51 | - "\\d{4}-\\d{1,2}-\\d{1,2}T\\d{1,2}:\\d{1,2}:\\d{1,2},\\d{3}"
52 | - pattern: ".*.log$"
53 | type: multiline
54 | line_pattern: '^\['
55 | metadata:
56 | name: server
57 | timestamp_patterns:
58 | - "\\d{4}-\\d{1,2}-\\d{1,2}T\\d{1,2}:\\d{1,2}:\\d{1,2}.\\d{3}\\+\\d{4}"
59 | - "\\d{4}-\\d{1,2}-\\d{1,2} \\d{1,2}:\\d{1,2}:\\d{1,2},\\d{3}"
60 | - "\\d{4}-\\d{1,2}-\\d{1,2}T\\d{1,2}:\\d{1,2}:\\d{1,2},\\d{3}"
61 | ```
62 |
63 | ## Parameter Description
64 |
65 | | Name | Type | Description |
66 | | --- | --- | --- |
67 | | queue_name | string | Log files collection queue name |
68 | | logs_path | string | Log files path |
69 | | metadata | map | Configure the metadata for the log files |
70 | | patterns | object | Patterns configuration for log files |
71 | | patterns.pattern | string | Pattern for log files |
72 | | patterns.metadata | map | Configure the metadata for the log files which matched |
73 | | patterns.type | string | Log type, support `json`,`text`,`multiline` |
74 | | patterns.line_pattern | string | When the log type is multiline, the pattern for a new line |
75 | | patterns.remove_fields | []string | Fields that need to be removed (available when the log type is `json`) |
76 | | patterns.timestamp_fields | []string | Timestamp field (available when the log type is `json`) |
77 | | patterns.timestamp_patterns | []string | Timestamp pattern (available when the log type is `text` and `multiline` ) |
78 |
--------------------------------------------------------------------------------
/docs/content.en/docs/getting-started/_index.md:
--------------------------------------------------------------------------------
1 | ---
2 | weight: 10
3 | title: Getting Started
4 | bookCollapseSection: true
5 | ---
6 |
--------------------------------------------------------------------------------
/docs/content.en/docs/getting-started/install.md:
--------------------------------------------------------------------------------
1 | ---
2 | weight: 10
3 | title: Installing the Agent
4 | asciinema: true
5 | ---
6 |
7 | # Installing the Agent
8 |
9 | INFINI Agent supports mainstream operating systems and platforms. The program package is small, with no extra external dependency. So, the agent can be installed very rapidly
10 |
11 | ## Downloading
12 |
13 | **Automatic install**
14 |
15 | ```bash
16 | curl -sSL http://get.infini.cloud | bash -s -- -p agent
17 | ```
18 |
19 | > The above script can automatically download the latest version of the corresponding platform's agent and extract it to /opt/agent
20 |
21 | > The optional parameters for the script are as follows:
22 |
23 | > _-v [version number](Default to use the latest version number)_
24 |
25 | > _-d [installation directory] (default installation to /opt/agent)_
26 |
27 | **Manual install**
28 |
29 | Select a package for downloading in the following URL based on your operating system and platform:
30 |
31 | [https://release.infinilabs.com/agent/](https://release.infinilabs.com/agent/)
32 |
33 | ## System Service
34 |
35 | To run the data platform of INFINI Agent as a background task, run the following commands:
36 |
37 | ```
38 | ➜ ./agent -service install
39 | Success
40 | ➜ ./agent -service start
41 | Success
42 | ```
43 |
44 | Unloading the service is simple. To unload the service, run the following commands:
45 |
46 | ```
47 | ➜ ./agent -service stop
48 | Success
49 | ➜ ./agent -service uninstall
50 | Success
51 | ```
52 |
--------------------------------------------------------------------------------
/docs/content.en/menu/index.md:
--------------------------------------------------------------------------------
1 | ---
2 | headless: true
3 | ---
4 |
5 | - [**Documentation**]({{< relref "/docs/" >}})
6 |
--------------------------------------------------------------------------------
/docs/content.zh/_index.md:
--------------------------------------------------------------------------------
1 | ---
2 | weight: 10
3 | title: INFINI Agent
4 | bookCollapseSection: true
5 | ---
6 |
7 | # INFINI Agent
8 |
9 | ## 介绍
10 |
11 | **INFINI Agent** 负责采集和上传 Elasticsearch, Easysearch, Opensearch 集群的日志和指标信息,通过 INFINI Console 管理,支持主流操作系统和平台,安装包轻量且无任何外部依赖,可以快速方便地安装。
12 |
13 | ## 特性
14 |
15 | - 采集上传 Elasticsearch 等集群健康信息/集群 stats/索引 stats/节点 stats 信息
16 | - 采集上传 Elasticsearch 等实例节点日志
17 | - 采集主机指标信息
18 |
19 | {{< button relref="../docs/getting-started/install/" >}}即刻开始{{< /button >}}
20 |
21 | ## 社区
22 |
23 | [加入我们的 Discord](https://discord.gg/4tKTMkkvVX)
24 |
25 | ## 谁在用?
26 |
27 | 如果您正在使用 INFINI Agent,并且您觉得它还不错的话,请[告诉我们](https://discord.gg/4tKTMkkvVX),感谢您的支持。
--------------------------------------------------------------------------------
/docs/content.zh/docs/configuration/_index.md:
--------------------------------------------------------------------------------
1 | ---
2 | weight: 20
3 | title: 参数配置
4 | bookCollapseSection: true
5 | ---
6 |
7 | # 参数配置
8 |
9 | - [env](http://infinilabs.cn/docs/latest/gateway/references/config/#使用环境变量)
10 | - [path](http://infinilabs.cn/docs/latest/gateway/references/config/#path)
11 | - [log](http://infinilabs.cn/docs/latest/gateway/references/config/#log)
12 | - [configs](http://infinilabs.cn/docs/latest/gateway/references/config/#configs)
13 | - [api](http://infinilabs.cn/docs/latest/gateway/references/config/#api)
14 | - [badger](http://infinilabs.cn/docs/latest/gateway/references/config/#badger)
15 | - [disk_queue](http://infinilabs.cn/docs/latest/gateway/references/config/#本地磁盘队列)
16 | - [elasticsearch](https://infinilabs.cn/docs/latest/gateway/references/elasticsearch/)
17 | - [resource_limit](http://infinilabs.cn/docs/latest/gateway/references/config/#资源限制)
18 | - [metrics](http://infinilabs.cn/docs/latest/gateway/references/config/#metrics)
19 | - [node](http://infinilabs.cn/docs/latest/gateway/references/config/#node)
20 | - [processors](./processors/_index.md)
21 | - [其他参数](http://infinilabs.cn/docs/latest/gateway/references/config/#其它配置-1)
22 |
--------------------------------------------------------------------------------
/docs/content.zh/docs/configuration/processors/_index.md:
--------------------------------------------------------------------------------
1 | ---
2 | weight: 20
3 | title: 离线处理器
4 | bookCollapseSection: true
5 | ---
6 |
7 | # 处理器列表
8 |
9 | ## 指标采集
10 |
11 | - [es_cluster_health](./es_cluster_health.md)
12 | - [es_cluster_stats](./es_cluster_stats.md)
13 | - [es_index_stats](./es_index_stats.md)
14 | - [es_node_stats](./es_node_stats.md)
15 |
16 | ## 日志采集
17 |
18 | - [es_logs_processor](./es_logs_processor.md)
19 | - [logs_processor](./logs_processor.md)
20 |
21 | 更多处理器内容,请查看 [processors](https://infinilabs.cn/docs/latest/gateway/references/processors/)
--------------------------------------------------------------------------------
/docs/content.zh/docs/configuration/processors/es_cluster_health.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: es_cluster_health
3 | ---
4 |
5 | # es_cluster_health
6 |
7 | ## 描述
8 |
9 | 采集集群健康指标。
10 |
11 | ## 配置示例
12 |
13 | ```yaml
14 | pipeline
15 | - name: collect_default_es_cluster_health
16 | auto_start: true
17 | keep_running: true
18 | retry_delay_in_ms: 3000
19 | processor:
20 | - es_cluster_health:
21 | elasticsearch: default
22 | ```
23 |
24 | ## 参数说明
25 |
26 | | 名称 | 类型 | 说明 |
27 | | --- | --- | --- |
28 | | elasticsearch | string | 集群实例名称(请参考 [elasticsearch](https://infinilabs.cn/docs/latest/gateway/references/elasticsearch/) 的 `name` 参数) |
29 | | labels | map | 自定义标签 |
30 |
--------------------------------------------------------------------------------
/docs/content.zh/docs/configuration/processors/es_cluster_stats.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: es_cluster_stats
3 | ---
4 |
5 | # es_cluster_stats
6 |
7 | ## 描述
8 |
9 | 采集集群 stats 指标。
10 |
11 | ## 配置示例
12 |
13 | ```yaml
14 | pipeline
15 | - name: collect_default_es_cluster_stats
16 | auto_start: true
17 | keep_running: true
18 | retry_delay_in_ms: 3000
19 | processor:
20 | - es_cluster_stats:
21 | elasticsearch: default
22 | ```
23 |
24 | ## 参数说明
25 |
26 | | 名称 | 类型 | 说明 |
27 | | --- | --- | --- |
28 | | elasticsearch | string | 集群实例名称(请参考 [elasticsearch](https://infinilabs.cn/docs/latest/gateway/references/elasticsearch/) 的 `name` 参数) |
29 | | labels | map | 自定义标签 |
--------------------------------------------------------------------------------
/docs/content.zh/docs/configuration/processors/es_index_stats.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: es_index_stats
3 | ---
4 |
5 | # es_index_stats
6 |
7 | ## 描述
8 |
9 | 采集集群索引 stats 指标。
10 |
11 | ## 配置示例
12 |
13 | ```yaml
14 | pipeline
15 | - name: collect_default_es_index_stats
16 | auto_start: true
17 | keep_running: true
18 | retry_delay_in_ms: 3000
19 | processor:
20 | - es_index_stats:
21 | elasticsearch: default
22 | ```
23 |
24 | ## 参数说明
25 |
26 | | 名称 | 类型 | 说明 |
27 | | --- | --- | --- |
28 | | elasticsearch | string | 集群实例名称(请参考 [elasticsearch](https://infinilabs.cn/docs/latest/gateway/references/elasticsearch/) 的 `name` 参数) |
29 | | all_index_stats | bool | 是否开启所有索引指标的采集,默认 `true` |
30 | | index_primary_stats | bool | 是否开启索引主分片指标的采集,默认 `true` |
31 | | labels | map | 自定义标签 |
--------------------------------------------------------------------------------
/docs/content.zh/docs/configuration/processors/es_logs_processor.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: es_logs_processor
3 | ---
4 |
5 | # es_logs_processor
6 |
7 | ## 描述
8 |
9 | 采集集群日志。
10 |
11 | ## 配置示例
12 |
13 | ```yaml
14 | pipeline
15 | - name: collect_default_es_logs
16 | auto_start: true
17 | keep_running: true
18 | retry_delay_in_ms: 3000
19 | processor:
20 | - es_logs_processor:
21 | elasticsearch: default
22 | logs_path: "/opt/dev-environment/ecloud/logs"
23 | queue_name: logs
24 | ```
25 |
26 | ## 参数说明
27 |
28 | | 名称 | 类型 | 说明 |
29 | | --- | --- | --- |
30 | | queue_name | string | 日志采集队列名称 |
31 | | elasticsearch | string | 集群实例名称(请参考 [elasticsearch](https://infinilabs.cn/docs/latest/gateway/references/elasticsearch/) 的 `name` 参数) |
32 | | logs_path | string | 集群日志存储路径 |
33 | | labels | map | 自定义标签 |
--------------------------------------------------------------------------------
/docs/content.zh/docs/configuration/processors/es_node_stats.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: es_node_stats
3 | ---
4 |
5 | # es_node_stats
6 |
7 | ## 描述
8 |
9 | 采集集群节点 stats 指标。
10 |
11 | ## 配置示例
12 |
13 | ```yaml
14 | pipeline
15 | - name: collect_default_es_node_stats
16 | auto_start: true
17 | keep_running: true
18 | retry_delay_in_ms: 3000
19 | processor:
20 | - es_node_stats:
21 | elasticsearch: default
22 | ```
23 |
24 | ## 参数说明
25 |
26 | | 名称 | 类型 | 说明 |
27 | | --- | --- | --- |
28 | | elasticsearch | string | 集群实例名称(请参考 [elasticsearch](https://infinilabs.cn/docs/latest/gateway/references/elasticsearch/) 的 `name` 参数) |
29 | | level | string | 指标级别,可选 `cluster`,`indices`,`shards`,默认 `shards`。 |
30 | | labels | map | 自定义标签 |
--------------------------------------------------------------------------------
/docs/content.zh/docs/configuration/processors/logs_processor.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: logs_processor
3 | ---
4 |
5 | # logs_processor
6 |
7 | ## 描述
8 |
9 | logs_processor 处理器可采集任意日志。
10 |
11 | ## 配置示例
12 |
13 | 以 elasticsearch 日志为例。
14 |
15 | ```yaml
16 | pipeline:
17 | - name: log_collect
18 | auto_start: true
19 | keep_running: true
20 | retry_delay_in_ms: 3000
21 | processor:
22 | - logs_processor:
23 | queue_name: "logs"
24 | logs_path: "/opt/es/elasticsearch-7.7.1/logs"
25 | # metadata for all log items
26 | metadata:
27 | category: elasticsearch
28 | patterns:
29 | - pattern: ".*_server.json$"
30 | type: json
31 | metadata:
32 | name: server
33 | timestamp_fields: ["timestamp", "@timestamp"]
34 | remove_fields:
35 | [
36 | "type",
37 | "cluster.name",
38 | "cluster.uuid",
39 | "node.name",
40 | "node.id",
41 | "timestamp",
42 | "@timestamp",
43 | ]
44 | - pattern: "gc.log$"
45 | type: text
46 | metadata:
47 | name: gc
48 | timestamp_patterns:
49 | - "\\d{4}-\\d{1,2}-\\d{1,2}T\\d{1,2}:\\d{1,2}:\\d{1,2}.\\d{3}\\+\\d{4}"
50 | - "\\d{4}-\\d{1,2}-\\d{1,2} \\d{1,2}:\\d{1,2}:\\d{1,2},\\d{3}"
51 | - "\\d{4}-\\d{1,2}-\\d{1,2}T\\d{1,2}:\\d{1,2}:\\d{1,2},\\d{3}"
52 | - pattern: ".*.log$"
53 | type: multiline
54 | line_pattern: '^\['
55 | metadata:
56 | name: server
57 | timestamp_patterns:
58 | - "\\d{4}-\\d{1,2}-\\d{1,2}T\\d{1,2}:\\d{1,2}:\\d{1,2}.\\d{3}\\+\\d{4}"
59 | - "\\d{4}-\\d{1,2}-\\d{1,2} \\d{1,2}:\\d{1,2}:\\d{1,2},\\d{3}"
60 | - "\\d{4}-\\d{1,2}-\\d{1,2}T\\d{1,2}:\\d{1,2}:\\d{1,2},\\d{3}"
61 | ```
62 |
63 | ## 参数说明
64 |
65 | | 名称 | 类型 | 说明 |
66 | | --- | --- | --- |
67 | | queue_name | string | 日志采集队列名称 |
68 | | logs_path | string | 日志路径 |
69 | | metadata | map | 指定日志的相关元数据 |
70 | | patterns | object | 具体日志文件的相关匹配配置,按顺序执行 |
71 | | patterns.pattern | string | 日志文件匹配规则 |
72 | | patterns.metadata | map | 日志文件相关元数据配置 |
73 | | patterns.type | string | 日志类型,支持 `json`,`text`,`multiline` |
74 | | patterns.line_pattern | string | 日志类型为 multiline 时,对于新的一行的匹配规则 |
75 | | patterns.remove_fields | []string | 日志文件中需要移除的字段(当日志类型为 json 时可用) |
76 | | patterns.timestamp_fields | []string | 日志数据中时间戳的字段名(当日志类型为 json 时可用) |
77 | | patterns.timestamp_patterns | []string | 日志数据中时间戳的匹配规则(当日志类型为 text,multiline 时可用) |
78 |
--------------------------------------------------------------------------------
/docs/content.zh/docs/getting-started/_index.md:
--------------------------------------------------------------------------------
1 | ---
2 | weight: 10
3 | title: 入门指南
4 | bookCollapseSection: true
5 | ---
6 |
--------------------------------------------------------------------------------
/docs/content.zh/docs/getting-started/install.md:
--------------------------------------------------------------------------------
1 | ---
2 | weight: 10
3 | title: 下载安装
4 | asciinema: true
5 | ---
6 |
7 | # 安装 INFINI Agent
8 |
9 | INFINI Agent 支持主流的操作系统和平台,程序包很小,没有任何额外的外部依赖,安装起来应该是很快的 :)
10 |
11 | ## 下载安装
12 |
13 | **自动安装**
14 |
15 | ```bash
16 | curl -sSL http://get.infini.cloud | bash -s -- -p agent
17 | ```
18 |
19 | > 通过以上脚本可自动下载相应平台的 agent 最新版本并解压到/opt/agent
20 |
21 | > 脚本的可选参数如下:
22 |
23 | > _-v [版本号](默认采用最新版本号)_
24 |
25 | > _-d [安装目录](默认安装到/opt/agent)_
26 |
27 | **手动安装**
28 |
29 | 根据您所在的操作系统和平台选择下面相应的下载地址:
30 |
31 | [https://release.infinilabs.com/agent/](https://release.infinilabs.com/agent/)
32 |
33 | ## 配置服务后台运行
34 |
35 | 如果希望将 INFINI Agent 以后台服务任务的方式运行,如下:
36 |
37 | ```
38 | ➜ ./agent -service install
39 | Success
40 | ➜ ./agent -service start
41 | Success
42 | ```
43 |
44 | 卸载服务也很简单,如下:
45 |
46 | ```
47 | ➜ ./agent -service stop
48 | Success
49 | ➜ ./agent -service uninstall
50 | Success
51 | ```
52 |
--------------------------------------------------------------------------------
/docs/content.zh/docs/release-notes/_index.md:
--------------------------------------------------------------------------------
1 | ---
2 | weight: 80
3 | title: "版本历史"
4 | ---
5 |
6 | # 版本发布日志
7 |
8 | 这里是 INFINI Agent 历史版本发布的相关说明。
9 |
10 |
11 | ## Latest (In development)
12 | ### ❌ Breaking changes
13 | ### 🚀 Features
14 | ### 🐛 Bug fix
15 | ### ✈️ Improvements
16 |
17 | ## 1.29.4 (2025-05-16)
18 | ### ❌ Breaking changes
19 | ### 🚀 Features
20 | ### 🐛 Bug fix
21 | - fix: 修复 `k8s` 环境下自动生成采集任务的 `endpoint`
22 | ### ✈️ Improvements
23 | - chore: 优化节点发现日志输出 (#31)
24 | - chore: 优化启动时进行磁盘容量检查,并进行异常处理 (#31)
25 | - chore: 增加默认配置关闭 `metadata_refresh` (#31)
26 | - 同步更新 [Framework v1.1.7](https://docs.infinilabs.com/framework/v1.1.7) 修复的一些已知问题
27 |
28 | ## 1.29.3 (2025-04-27)
29 | - 同步更新 [Framework v1.1.6](https://docs.infinilabs.com/framework/v1.1.6) 修复的一些已知问题
30 |
31 | ## 1.29.2 (2025-03-31)
32 | - 同步更新 [Framework v1.1.5](https://docs.infinilabs.com/framework/v1.1.5) 修复的一些已知问题
33 |
34 | ## 1.29.1 (2025-03-14)
35 |
36 | 同步更新 [Framework v1.1.4](https://docs.infinilabs.com/framework/v1.1.4) 修复的一些已知问题
37 |
38 |
39 | ## 1.29.0 (2025-02-27)
40 |
41 | ### Breaking changes
42 |
43 | ### Features
44 | - 支持实时查看 gzip 压缩的日志文件 (#22)
45 | - 采集日志级别适配 ECSJsonLayout 格式 (#24)
46 |
47 | ### Bug fix
48 |
49 | ### Improvements
50 | - 默认开启指标采集配置 (#23)
51 | - 同步更新 [Framework v1.1.3](https://docs.infinilabs.com/framework/v1.1.3) 修复的一些已知问题
52 |
53 | ## 1.28.2 (2025-02-15)
54 |
55 | ### Improvements
56 |
57 | - 添加了日志并优化了一些设置 (#17)
58 | - 修复了在 Docker 中使用不同用户进程时注册失败的问题 (#11)
59 | - 同步更新 [Framework v1.1.2](https://docs.infinilabs.com/framework/v1.1.2) 修复的一些已知问题
60 |
61 | ## 1.28.1 (2025-01-24)
62 |
63 | ### Improvements
64 |
65 | - 修复一个空指针判断的问题
66 | - 同步更新 Framework 修复的一些已知问题
67 |
68 | ## 1.28.0 (2025-01-11)
69 |
70 | ### Improvements
71 |
72 | - 同步更新 Framework 修复的一些已知问题
73 |
74 | ## 1.27.0 (2024-12-13)
75 |
76 | ### Improvements
77 |
78 | - 代码开源,统一采用 [Github 仓库](https://github.com/infinilabs/agent) 进行开发
79 | - 与 INFINI Console 统一版本号
80 | - 同步更新 Framework 修复的已知问题
81 | - 支持 K8S 环境指标采集
82 |
83 | ## 1.26.1 (2024-08-13)
84 |
85 | ### Improvements
86 |
87 | - 与 INFINI Console 统一版本号
88 | - 同步更新 Framework 修复的已知问题
89 |
90 | ## 1.26.0 (2024-06-07)
91 |
92 | ### Improvements
93 |
94 | - 与 INFINI Console 统一版本号
95 | - 同步更新 Framework 修复的已知问题
96 |
97 | ## 1.25.0 (2024-04-30)
98 |
99 | ### Improvements
100 |
101 | - 保持与 Console 相同版本号
102 | - 同步更新 Framework 修复的已知问题
103 |
104 | ## 1.24.0 (2024-04-15)
105 |
106 | ### Improvements
107 |
108 | - 保持与 Console 相同版本号
109 |
110 | ## 1.23.0 (2024-03-01)
111 |
112 | ### Bug fix
113 |
114 | - 修复删除实例队列后消费的 Offset 未重置问题
115 |
116 | ## 1.22.0 (2024-01-26)
117 |
118 | ### Improvements
119 |
120 | - 与 INFINI Console 统一版本号
121 |
122 | ## 0.7.1 (2023-12-01)
123 |
124 | ### Features
125 |
126 | - 添加 http processor
127 |
128 | ### Bug fix
129 |
130 | - 修复由 Framework Bug 造成连接数不释放、内存异常增长的问题
131 |
132 | ### Improvements
133 |
134 | - 进一步优化内存占用,降到 50M 以下
135 |
136 | ## 0.7.0 (2023-11-03)
137 |
138 | ### Breaking changes
139 |
140 | ### Features
141 |
142 | - 限制探针资源消耗,限制 CPU 的使用
143 | - 优化探针内存使用,10 倍降低
144 | - 支持集中配置管理,支持动态下发
145 | - 支持探针一键安装和自动注册
146 | - 优化节点指标采集,仅采集本节点指标
147 |
148 | ### Bug fix
149 |
150 | ### Improvements
151 |
152 | - 节点统计的重构
153 | - 删除无用的文件
154 | - 修复节点发现问题
155 | - 避免远程配置问题导致的 panic
156 | - 添加发现未知节点的 API
157 | - 重构节点发现逻辑
158 | - 根据新 API 进行重构
159 |
160 | ## 0.6.1 (2023-08-03)
161 |
162 | ### Bug fix
163 |
164 | - 修复发现节点进程信息时获取 ES 节点端口不对的问题
165 |
166 | ## 0.6.0 (2023-07-21)
167 |
168 | ### Improvements
169 |
170 | - 采集监控指标添加 cluster_uuid 信息
171 |
172 | ### Bug fix
173 |
174 | - 修复发现节点进程信息时获取不到最新集群配置的问题
175 |
176 | ## 0.5.1 (2023-06-30)
177 |
178 | ### Improvements
179 |
180 | - 优化查看节点日志文件性能
181 |
182 | ## 0.5.0 (2023-06-08)
183 |
184 | ### Features
185 |
186 | - 支持将 Agent 注册到 Console
187 | - 添加保存配置到动态加载目录接口
188 |
189 | ### Improvements
190 |
191 | - 优化自动发现 Easysearch 实例进程
192 | - 优化查看 Easysearch 实例日志相关 API
193 |
194 | ## 0.4.0 (2023-05-10)
195 |
196 | ### Features
197 |
198 | - 新增 `logs_processor` ,配置采集本地日志文件
199 |
200 | ### Breaking changes
201 |
202 | - `es_logs_processor` 调整日志字段
203 | - `created`重命名为`timestamp`
204 | - 自动提取`payload.timestamp` `payload.@timestmap`字段到`timestamp`
205 | - `es_logs_processor` 删除 `enable`选项
206 |
207 | ## 0.3.0 (2023-04-14)
208 |
209 | ### Features
210 |
211 | - 新增 `es_cluster_health` 采集 Easysearch 集群健康信息
212 | - 新增 `es_cluster_stats` 采集 Easysearch 集群 stats
213 | - 新增 `es_index_stats` 采集 Easysearch 索引 stats
214 | - 新增 `es_node_stats` 采集 Easysearch 节点 stats
215 | - 新增 `es_logs_processor` 采集 Easysearch 日志
216 |
--------------------------------------------------------------------------------
/docs/content.zh/docs/resources/_index.md:
--------------------------------------------------------------------------------
1 | ---
2 | weight: 100
3 | title: "其它资源"
4 | ---
5 |
6 | # 其它资源
7 |
8 | 这里是一些和 Agent 有关的外部有用资源。
9 |
10 | ## 文章
11 |
12 | - [Console 纳管 Elasticsearch 9(二):结合 Agent 实现日志监控](https://infinilabs.cn/blog/2025/console-manage-es9-logs/) | 2025-04-27
13 |
14 | ## 视频
15 |
16 | - [INFINI Agent 的快速安装及使用](https://www.bilibili.com/video/BV1yX4y1q7H8) | 2023-06-27
17 |
--------------------------------------------------------------------------------
/docs/content.zh/menu/index.md:
--------------------------------------------------------------------------------
1 | ---
2 | headless: true
3 | ---
4 |
5 | - [**Documentation**]({{< relref "/docs/" >}})
6 |
--------------------------------------------------------------------------------
/docs/static/img/logo-en.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/docs/static/img/logo-zh.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/generated_metrics_tasks.go:
--------------------------------------------------------------------------------
1 | /* Copyright © INFINI Ltd. All rights reserved.
2 | * Web: https://infinilabs.com
3 | * Email: hello#infini.ltd */
4 |
5 | package main
6 |
7 | import (
8 | "bytes"
9 | "fmt"
10 | "infini.sh/agent/lib/util"
11 | "infini.sh/framework/core/global"
12 | "infini.sh/framework/core/keystore"
13 | "infini.sh/framework/core/kv"
14 | "infini.sh/framework/core/model"
15 | util2 "infini.sh/framework/core/util"
16 | "infini.sh/framework/lib/go-ucfg"
17 | "infini.sh/framework/modules/configs/config"
18 | "os"
19 | "text/template"
20 | )
21 |
22 | func generatedMetricsTasksConfig() error {
23 | alreadyGenerated, err := kv.GetValue("app", []byte("auto_generated_metrics_tasks"))
24 | if err != nil {
25 | return fmt.Errorf("get kv auto_generated_metrics_tasks error: %w", err)
26 | }
27 | if string(alreadyGenerated) == "true" {
28 | return nil
29 | }
30 | nodeLabels := global.Env().SystemConfig.NodeConfig.Labels
31 | var clusterID string
32 | if len(nodeLabels) > 0 {
33 | clusterID = nodeLabels["cluster_id"]
34 | }
35 |
36 | schema := os.Getenv("schema")
37 | port := os.Getenv("http.port")
38 |
39 | // 如果从环境变量中获取不到 则使用默认值
40 | if schema == "" {
41 | schema = "https" //k8s easysearch is always be https protocol
42 | }
43 | if port == "" {
44 | port = "9200" //k8s easysearch port is always 9200
45 | }
46 | endpoint := fmt.Sprintf("%s://%s:%s", schema, util2.LocalAddress, port)
47 | v, err := keystore.GetValue("agent_user")
48 | if err != nil {
49 | return fmt.Errorf("get agent_user error: %w", err)
50 | }
51 | username := string(v)
52 | v, err = keystore.GetValue("agent_passwd")
53 | if err != nil {
54 | return fmt.Errorf("get agent_passwd error: %w", err)
55 | }
56 | password := string(v)
57 | auth := &model.BasicAuth{
58 | Username: username,
59 | Password: ucfg.SecretString(password),
60 | }
61 | clusterInfo, err := util.GetClusterVersion(endpoint, auth)
62 | if err != nil {
63 | return fmt.Errorf("get cluster info error: %w", err)
64 | }
65 | nodeUUID, nodeInfo, err := util.GetLocalNodeInfo(endpoint, auth)
66 | if err != nil {
67 | return fmt.Errorf("get local node info error: %w", err)
68 | }
69 | nodeLogsPath := nodeInfo.GetPathLogs()
70 | taskTpl := `configs.template:
71 | - name: "{{.cluster_id}}_{{.node_uuid}}"
72 | path: "./config/task_config.tpl"
73 | variable:
74 | TASK_ID: "{{.cluster_id}}_{{.node_uuid}}"
75 | CLUSTER_ID: "{{.cluster_id}}"
76 | CLUSTER_UUID: "{{.cluster_uuid}}"
77 | NODE_UUID: "{{.node_uuid}}"
78 | CLUSTER_VERSION: "{{.cluster_version}}"
79 | CLUSTER_DISTRIBUTION: "{{.cluster_distribution}}"
80 | CLUSTER_ENDPOINT: ["{{.cluster_endpoint}}"]
81 | CLUSTER_USERNAME: "{{.username}}"
82 | CLUSTER_PASSWORD: "{{.password}}"
83 | CLUSTER_LEVEL_TASKS_ENABLED: false
84 | NODE_LEVEL_TASKS_ENABLED: true
85 | LOG_TASKS_ENABLED: true
86 | NODE_LOGS_PATH: "{{.node_logs_path}}"
87 | #MANAGED: false`
88 | tpl, err := template.New("metrics_tasks").Parse(taskTpl)
89 | if err != nil {
90 | return fmt.Errorf("parse template error: %w", err)
91 | }
92 | var buf bytes.Buffer
93 | err = tpl.Execute(&buf, map[string]interface{}{
94 | "cluster_id": clusterID,
95 | "node_uuid": nodeUUID,
96 | "cluster_version": clusterInfo.Version.Number,
97 | "cluster_distribution": clusterInfo.Version.Distribution,
98 | "cluster_uuid": clusterInfo.ClusterUUID,
99 | "cluster_endpoint": endpoint,
100 | "username": username,
101 | "password": password,
102 | "node_logs_path": nodeLogsPath,
103 | })
104 | if err != nil {
105 | return fmt.Errorf("execute template error: %w", err)
106 | }
107 | err = config.SaveConfigStr("generated_metrics_tasks.yml", buf.String())
108 | if err != nil {
109 | return fmt.Errorf("save config error: %w", err)
110 | }
111 | err = kv.AddValue("app", []byte("auto_generated_metrics_tasks"), []byte("true"))
112 | if err != nil {
113 | return fmt.Errorf("add kv auto_generated_metrics_tasks error: %w", err)
114 | }
115 | return nil
116 | }
117 |
--------------------------------------------------------------------------------
/lib/process/discover_test.go:
--------------------------------------------------------------------------------
1 | /* Copyright © INFINI Ltd. All rights reserved.
2 | * Web: https://infinilabs.com
3 | * Email: hello#infini.ltd */
4 |
5 | package process
6 |
7 | import (
8 | "fmt"
9 | "github.com/stretchr/testify/assert"
10 | "infini.sh/framework/core/model"
11 | "testing"
12 | )
13 |
14 | func TestDiscover(t *testing.T) {
15 | pinfos, err := DiscoverESProcessors(ElasticFilter)
16 | if err != nil {
17 | t.Fatal(err)
18 | }
19 | fmt.Println(pinfos)
20 | }
21 |
22 | func TestTryGetESClusterInfo(t *testing.T) {
23 | addr := model.ListenAddr{
24 | Port: 9206,
25 | IP: "*",
26 | }
27 | _, info, err := tryGetESClusterInfo(addr)
28 | fmt.Println(info, err)
29 | }
30 |
31 | func TestParsePathValue(t *testing.T) {
32 | cmdline := "/opt/es/elasticsearch-7.7.1/jdk.app/Contents/Home/bin/java -Xshare:auto -Des.networkaddress.cache.ttl=60 -Des.networkaddress.cache.negative.ttl=10 -XX:+AlwaysPreTouch -Xss1m -Djava.awt.headless=true -Dfile.encoding=UTF-8 -Djna.nosys=true -XX:-OmitStackTraceInFastThrow -XX:+ShowCodeDetailsInExceptionMessages -Dio.netty.noUnsafe=true -Dio.netty.noKeySetOptimization=true -Dio.netty.recycler.maxCapacityPerThread=0 -Dio.netty.allocator.numDirectArenas=0 -Dlog4j.shutdownHookEnabled=false -Dlog4j2.disable.jmx=true -Djava.locale.providers=SPI,COMPAT -Xms1g -Xmx1g -XX:+UseG1GC -XX:G1ReservePercent=25 -XX:InitiatingHeapOccupancyPercent=30 -Djava.io.tmpdir=/var/folders/f6/2gqtmknx4jn357m0vv8151lc0000gn/T/elasticsearch-12464305898562497433 -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=data -XX:ErrorFile=logs/hs_err_pid%p.log -Xlog:gc*,gc+age=trace,safepoint:file=logs/gc.log:utctime,pid,tags:filecount=32,filesize=64m -XX:MaxDirectMemorySize=536870912 -Des.path.home=/opt/es/elasticsearch-7.7.1 -Des.path.conf=/opt/es/elasticsearch-7.7.1/config -Des.distribution.flavor=default -Des.distribution.type=tar -Des.bundled_jdk=true -cp /opt/es/elasticsearch-7.7.1/lib/* org.elasticsearch.bootstrap.Elasticsearch"
33 | p, _ := parsePathValue(cmdline, `\-Des\.path\.home=(.*?)\s+`)
34 | fmt.Println(p)
35 | }
36 |
37 | func TestElasticFilter(t *testing.T) {
38 | cmds := []string{
39 | "/opt/es/elasticsearch-8.3.3/jdk.app/Contents/Home/bin/java -Des.networkaddress.cache.ttl=60 -Des.networkaddress.cache.negative.ttl=10 -Djava.security.manager=allow -XX:+AlwaysPreTouch -Xss1m -Djava.awt.headless=true -Dfile.encoding=UTF-8 -Djna.nosys=true -XX:-OmitStackTraceInFastThrow -Dio.netty.noUnsafe=true -Dio.netty.noKeySetOptimization=true -Dio.netty.recycler.maxCapacityPerThread=0 -Dlog4j.shutdownHookEnabled=false -Dlog4j2.disable.jmx=true -Dlog4j2.formatMsgNoLookups=true -Djava.locale.providers=SPI,COMPAT --add-opens=java.base/java.io=ALL-UNNAMED -XX:+UseG1GC -Djava.io.tmpdir=/var/folders/f6/2gqtmknx4jn357m0vv8151lc0000gn/T/elasticsearch-734978348591728761 -XX:+HeapDumpOnOutOfMemoryError -XX:+ExitOnOutOfMemoryError -XX:HeapDumpPath=data -XX:ErrorFile=logs/hs_err_pid%p.log -Xlog:gc*,gc+age=trace,safepoint:file=logs/gc.log:utctime,pid,tags:filecount=32,filesize=64m -Xms8192m -Xmx8192m -XX:MaxDirectMemorySize=4294967296 -XX:InitiatingHeapOccupancyPercent=30 -XX:G1ReservePercent=25 -Des.distribution.type=tar --module-path /opt/es/elasticsearch-8.3.3/lib -m org.elasticsearch.server/org.elasticsearch.bootstrap.Elasticsearch",
40 | "/opt/opensearch/opensearch-1.0.0/jdk/bin/java -Xshare:auto -Dopensearch.networkaddress.cache.ttl=60 -Dopensearch.networkaddress.cache.negative.ttl=10 -XX:+AlwaysPreTouch -Xss1m -Djava.awt.headless=true -Dfile.encoding=UTF-8 -Djna.nosys=true -XX:-OmitStackTraceInFastThrow -XX:+ShowCodeDetailsInExceptionMessages -Dio.netty.noUnsafe=true -Dio.netty.noKeySetOptimization=true -Dio.netty.recycler.maxCapacityPerThread=0 -Dio.netty.allocator.numDirectArenas=0 -Dlog4j.shutdownHookEnabled=false -Dlog4j2.disable.jmx=true -Djava.locale.providers=SPI,COMPAT -Xms1g -Xmx1g -XX:+UseG1GC -XX:G1ReservePercent=25 -XX:InitiatingHeapOccupancyPercent=30 -Djava.io.tmpdir=/tmp/opensearch-2153174206831327614 -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=data -XX:ErrorFile=logs/hs_err_pid%p.log -Xlog:gc*,gc+age=trace,safepoint:file=logs/gc.log:utctime,pid,tags:filecount=32,filesize=64m -Dclk.tck=100 -Djdk.attach.allowAttachSelf=true -Djava.security.policy=/opt/opensearch/opensearch-1.0.0/plugins/opensearch-performance-analyzer/pa_config/opensearch_security.policy -XX:MaxDirectMemorySize=536870912 -Dopensearch.path.home=/opt/opensearch/opensearch-1.0.0 -Dopensearch.path.conf=/opt/opensearch/opensearch-1.0.0/config -Dopensearch.distribution.type=tar -Dopensearch.bundled_jdk=true -cp /opt/opensearch/opensearch-1.0.0/lib/* org.opensearch.bootstrap.OpenSearch -d",
41 | "/opt/search/packages/jdk/15.0.1//bin/java -Xshare:auto -Des.networkaddress.cache.ttl=60 -Des.networkaddress.cache.negative.ttl=10 -XX:+AlwaysPreTouch -Xss1m -Djava.awt.headless=true -Dfile.encoding=UTF-8 -Djna.nosys=true -XX:-OmitStackTraceInFastThrow -XX:+ShowCodeDetailsInExceptionMessages -Dio.netty.noUnsafe=true -Dio.netty.noKeySetOptimization=true -Dio.netty.recycler.maxCapacityPerThread=0 -Dio.netty.allocator.numDirectArenas=0 -Dlog4j.shutdownHookEnabled=false -Dlog4j2.disable.jmx=true -Djava.locale.providers=SPI,COMPAT -Xms1g -Xmx1g -XX:+UseG1GC -XX:G1ReservePercent=25 -XX:InitiatingHeapOccupancyPercent=30 -Djava.io.tmpdir=/tmp/easysearch-1966601411600284833 -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=data -XX:ErrorFile=logs/hs_err_pid%p.log -Xlog:gc*,gc+age=trace,safepoint:file=logs/gc.log:utctime,pid,tags:filecount=32,filesize=64m -XX:MaxDirectMemorySize=536870912 -Des.path.home=/opt/search/instances/easysearch-3node/ccr_node/easysearch-1.1.2-SNAPSHOT -Des.path.conf=/opt/search/instances/easysearch-3node/ccr_node/easysearch-1.1.2-SNAPSHOT/config -Des.distribution.flavor=oss -Des.distribution.type=tar -Des.bundled_jdk=false -cp /opt/search/instances/easysearch-3node/ccr_node/easysearch-1.1.2-SNAPSHOT/lib/* org.easysearch.bootstrap.Easysearch -d",
42 | }
43 | for _, cmd := range cmds {
44 | assert.Equal(t, true, ElasticFilter(cmd))
45 | }
46 | }
47 |
--------------------------------------------------------------------------------
/lib/process/elastic.go:
--------------------------------------------------------------------------------
1 | /* Copyright © INFINI Ltd. All rights reserved.
2 | * Web: https://infinilabs.com
3 | * Email: hello#infini.ltd */
4 |
5 | package process
6 |
7 | import (
8 | "context"
9 | "errors"
10 | "fmt"
11 | log "github.com/cihub/seelog"
12 | util2 "infini.sh/agent/lib/util"
13 | "infini.sh/framework/core/elastic"
14 | "infini.sh/framework/core/model"
15 | "infini.sh/framework/core/util"
16 | "net/http"
17 | "regexp"
18 | "strconv"
19 | "strings"
20 | "time"
21 | )
22 |
23 | func DiscoverESNodeFromEndpoint(endpoint string, auth *model.BasicAuth) (*elastic.LocalNodeInfo, error) {
24 | localNodeInfo := elastic.LocalNodeInfo{}
25 | var (
26 | nodeInfo *elastic.NodesInfo
27 | err error
28 | nodeID string
29 | )
30 |
31 | nodeID, nodeInfo, err = util2.GetLocalNodeInfo(endpoint, auth)
32 | if err != nil {
33 | return nil, fmt.Errorf("get node info error: %w", err)
34 | }
35 |
36 | clusterInfo, err := util2.GetClusterVersion(endpoint, auth)
37 | if err != nil {
38 | return nil, fmt.Errorf("get cluster info error: %w", err)
39 | }
40 |
41 | localNodeInfo.NodeInfo = nodeInfo
42 | localNodeInfo.ClusterInfo = clusterInfo
43 | localNodeInfo.NodeUUID = nodeID
44 |
45 | return &localNodeInfo, nil
46 | }
47 |
48 | func getNodeSchema(schema, pubAddr string, auth *model.BasicAuth) string {
49 | url := fmt.Sprintf("%s://%s", schema, pubAddr)
50 | _, err := util2.GetClusterVersion(url, auth)
51 | if err != nil {
52 | log.Debug(err)
53 | if schema == "http" {
54 | return "https"
55 | }
56 | return "http"
57 | }
58 | return schema
59 | }
60 |
61 | func getListenAddresses(boundAddresses []string) []model.ListenAddr {
62 | var listenAddresses []model.ListenAddr
63 | for _, boundAddr := range boundAddresses {
64 | if idx := strings.LastIndex(boundAddr, ":"); idx > -1 {
65 | addr := model.ListenAddr{
66 | IP: boundAddr[:idx],
67 | }
68 | if idx < len(boundAddr)-1 {
69 | addr.Port, _ = strconv.Atoi(boundAddr[idx+1:])
70 | }
71 | listenAddresses = append(listenAddresses, addr)
72 | }
73 | }
74 | return listenAddresses
75 | }
76 |
77 | func DiscoverESNode(cfgs []elastic.ElasticsearchConfig) (*elastic.DiscoveryResult, error) {
78 | nodes := map[string]*elastic.LocalNodeInfo{}
79 | processInfos, err := DiscoverESProcessors(ElasticFilter)
80 | if err != nil {
81 | return nil, err
82 | }
83 |
84 | unknowProcess := []model.ProcessInfo{}
85 | findPIds := map[int]string{}
86 | for _, processInfo := range processInfos {
87 | //try connect
88 | for _, addr := range processInfo.ListenAddresses {
89 | endpoint, info, err := tryGetESClusterInfo(addr)
90 | if info != nil && info.ClusterUUID != "" {
91 |
92 | nodeID, nodeInfo, err := util2.GetLocalNodeInfo(endpoint, nil)
93 | if err != nil {
94 | log.Error(err)
95 | continue
96 | }
97 |
98 | if nodeInfo.Process.Id == processInfo.PID {
99 | localNodeInfo := elastic.LocalNodeInfo{}
100 | localNodeInfo.NodeInfo = nodeInfo
101 | localNodeInfo.ClusterInfo = info
102 | localNodeInfo.NodeUUID = nodeID
103 | nodes[localNodeInfo.NodeUUID] = &localNodeInfo
104 | findPIds[localNodeInfo.NodeInfo.Process.Id] = localNodeInfo.NodeUUID
105 | }
106 | break
107 | }
108 | if err == ErrUnauthorized {
109 | unknowProcess = append(unknowProcess, processInfo)
110 | break
111 | }
112 | }
113 | }
114 |
115 | newProcess := []model.ProcessInfo{}
116 | for _, process := range unknowProcess {
117 | if _, ok := findPIds[process.PID]; !ok {
118 | newProcess = append(newProcess, process)
119 | }
120 | }
121 |
122 | result := elastic.DiscoveryResult{
123 | Nodes: nodes,
124 | UnknownProcess: newProcess,
125 | }
126 |
127 | return &result, nil
128 | }
129 |
130 | var ErrUnauthorized = errors.New(http.StatusText(http.StatusUnauthorized))
131 |
132 | func tryGetESClusterInfo(addr model.ListenAddr) (string, *elastic.ClusterInformation, error) {
133 | var ip = addr.IP
134 | if ip == "*" {
135 | _, ip, _, _ = util.GetPublishNetworkDeviceInfo(".*")
136 | }
137 | schemas := []string{"http", "https"}
138 | clusterInfo := &elastic.ClusterInformation{}
139 | var endpoint string
140 | for _, schema := range schemas {
141 |
142 | if util.ContainStr(ip, ":") && !util.PrefixStr(ip, "[") {
143 | ip = fmt.Sprintf("[%s]", ip)
144 | }
145 |
146 | endpoint = fmt.Sprintf("%s://%s:%d", schema, ip, addr.Port)
147 | ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
148 | defer cancel()
149 |
150 | req := &util.Request{
151 | Method: util.Verb_GET,
152 | Url: endpoint,
153 | Context: ctx,
154 | }
155 | result, err := util.ExecuteRequest(req)
156 | if err != nil {
157 | if !strings.Contains(err.Error(), "transport connection broken") && !strings.Contains(err.Error(), "EOF") {
158 | return endpoint, nil, err
159 | }
160 | log.Debug(err)
161 | continue
162 | }
163 | if result.StatusCode == http.StatusUnauthorized {
164 | return endpoint, nil, ErrUnauthorized
165 | }
166 |
167 | err = util.FromJSONBytes(result.Body, &clusterInfo)
168 | if err == nil {
169 | return endpoint, clusterInfo, err
170 | }
171 | }
172 | return endpoint, clusterInfo, nil
173 | }
174 |
175 | func parseNodeInfoFromCmdline(cmdline string) (pathHome, pathConfig string, err error) {
176 | pathHome, err = parsePathValue(cmdline, `\-Des\.path\.home=(.*?)\s+`)
177 | if err != nil {
178 | return
179 | }
180 | pathConfig, err = parsePathValue(cmdline, `\-Des\.path\.conf=(.*?)\s+`)
181 | return pathHome, pathConfig, err
182 | }
183 |
184 | func parsePathValue(cmdline string, regStr string) (string, error) {
185 | reg, err := regexp.Compile(regStr)
186 | if err != nil {
187 | return "", err
188 | }
189 | matches := reg.FindStringSubmatch(cmdline)
190 | if len(matches) > 1 {
191 | return matches[1], nil
192 | }
193 | return "", nil
194 | }
195 |
--------------------------------------------------------------------------------
/lib/reader/common/bytes.go:
--------------------------------------------------------------------------------
1 | // Licensed to Elasticsearch B.V. under one or more contributor
2 | // license agreements. See the NOTICE file distributed with
3 | // this work for additional information regarding copyright
4 | // ownership. Elasticsearch B.V. licenses this file to you under
5 | // the Apache License, Version 2.0 (the "License"); you may
6 | // not use this file except in compliance with the License.
7 | // You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | package common
19 |
20 | import (
21 | "bytes"
22 | "crypto/rand"
23 | "encoding/binary"
24 | "errors"
25 | "fmt"
26 | "io"
27 | "unicode/utf16"
28 | "unicode/utf8"
29 | )
30 |
31 | const (
32 | // 0xd800-0xdc00 encodes the high 10 bits of a pair.
33 | // 0xdc00-0xe000 encodes the low 10 bits of a pair.
34 | // the value is those 20 bits plus 0x10000.
35 | surr1 = 0xd800
36 | surr2 = 0xdc00
37 | surr3 = 0xe000
38 | replacementChar = '\uFFFD' // Unicode replacement character
39 | )
40 |
41 | // Byte order utilities
42 |
43 | func BytesNtohs(b []byte) uint16 {
44 | return uint16(b[0])<<8 | uint16(b[1])
45 | }
46 |
47 | func BytesNtohl(b []byte) uint32 {
48 | return uint32(b[0])<<24 | uint32(b[1])<<16 |
49 | uint32(b[2])<<8 | uint32(b[3])
50 | }
51 |
52 | func BytesHtohl(b []byte) uint32 {
53 | return uint32(b[3])<<24 | uint32(b[2])<<16 |
54 | uint32(b[1])<<8 | uint32(b[0])
55 | }
56 |
57 | func BytesNtohll(b []byte) uint64 {
58 | return uint64(b[0])<<56 | uint64(b[1])<<48 |
59 | uint64(b[2])<<40 | uint64(b[3])<<32 |
60 | uint64(b[4])<<24 | uint64(b[5])<<16 |
61 | uint64(b[6])<<8 | uint64(b[7])
62 | }
63 |
64 | // Ipv4_Ntoa transforms an IP4 address in it's dotted notation
65 | func IPv4Ntoa(ip uint32) string {
66 | return fmt.Sprintf("%d.%d.%d.%d",
67 | byte(ip>>24), byte(ip>>16),
68 | byte(ip>>8), byte(ip))
69 | }
70 |
71 | // ReadString extracts the first null terminated string from
72 | // a slice of bytes.
73 | func ReadString(s []byte) (string, error) {
74 | i := bytes.IndexByte(s, 0)
75 | if i < 0 {
76 | return "", errors.New("No string found")
77 | }
78 | res := string(s[:i])
79 | return res, nil
80 | }
81 |
82 | // RandomBytes return a slice of random bytes of the defined length
83 | func RandomBytes(length int) ([]byte, error) {
84 | r := make([]byte, length)
85 | _, err := rand.Read(r)
86 |
87 | if err != nil {
88 | return nil, err
89 | }
90 |
91 | return r, nil
92 | }
93 |
94 | func UTF16ToUTF8Bytes(in []byte, out io.Writer) error {
95 | if len(in)%2 != 0 {
96 | return fmt.Errorf("input buffer must have an even length (length=%d)", len(in))
97 | }
98 |
99 | var runeBuf [4]byte
100 | var v1, v2 uint16
101 | for i := 0; i < len(in); i += 2 {
102 | v1 = uint16(in[i]) | uint16(in[i+1])<<8
103 | // Stop at null-terminator.
104 | if v1 == 0 {
105 | return nil
106 | }
107 |
108 | switch {
109 | case v1 < surr1, surr3 <= v1:
110 | n := utf8.EncodeRune(runeBuf[:], rune(v1))
111 | out.Write(runeBuf[:n])
112 | case surr1 <= v1 && v1 < surr2 && len(in) > i+2:
113 | v2 = uint16(in[i+2]) | uint16(in[i+3])<<8
114 | if surr2 <= v2 && v2 < surr3 {
115 | // valid surrogate sequence
116 | r := utf16.DecodeRune(rune(v1), rune(v2))
117 | n := utf8.EncodeRune(runeBuf[:], r)
118 | out.Write(runeBuf[:n])
119 | }
120 | i += 2
121 | default:
122 | // invalid surrogate sequence
123 | n := utf8.EncodeRune(runeBuf[:], replacementChar)
124 | out.Write(runeBuf[:n])
125 | }
126 | }
127 | return nil
128 | }
129 |
130 | func StringToUTF16Bytes(in string) []byte {
131 | var u16 []uint16 = utf16.Encode([]rune(in))
132 | buf := &bytes.Buffer{}
133 | binary.Write(buf, binary.LittleEndian, u16)
134 | return buf.Bytes()
135 | }
136 |
--------------------------------------------------------------------------------
/lib/reader/common/datetime.go:
--------------------------------------------------------------------------------
1 | // Licensed to Elasticsearch B.V. under one or more contributor
2 | // license agreements. See the NOTICE file distributed with
3 | // this work for additional information regarding copyright
4 | // ownership. Elasticsearch B.V. licenses this file to you under
5 | // the Apache License, Version 2.0 (the "License"); you may
6 | // not use this file except in compliance with the License.
7 | // You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | package common
19 |
20 | import (
21 | "encoding/binary"
22 | "encoding/json"
23 | "errors"
24 | "fmt"
25 | "hash"
26 | "infini.sh/agent/lib/reader/common/dtfmt"
27 | "time"
28 | )
29 |
30 | const (
31 | millisecPrecision TimestampPrecision = iota + 1
32 | microsecPrecision
33 | nanosecPrecision
34 |
35 | DefaultTimestampPrecision = millisecPrecision
36 |
37 | tsLayoutMillis = "2006-01-02T15:04:05.000Z"
38 | tsLayoutMicros = "2006-01-02T15:04:05.000000Z"
39 | tsLayoutNanos = "2006-01-02T15:04:05.000000000Z"
40 |
41 | millisecPrecisionFmt = "yyyy-MM-dd'T'HH:mm:ss.fff'Z'"
42 | microsecPrecisionFmt = "yyyy-MM-dd'T'HH:mm:ss.ffffff'Z'"
43 | nanosecPrecisionFmt = "yyyy-MM-dd'T'HH:mm:ss.fffffffff'Z'"
44 |
45 | localMillisecPrecisionFmt = "yyyy-MM-dd'T'HH:mm:ss.fffz"
46 | localMicrosecPrecisionFmt = "yyyy-MM-dd'T'HH:mm:ss.ffffffz"
47 | localNanosecPrecisionFmt = "yyyy-MM-dd'T'HH:mm:ss.fffffffffz"
48 | )
49 |
50 | // timestampFmt stores the format strings for both UTC and local
51 | // form of a specific precision.
52 | type timestampFmt struct {
53 | utc string
54 | local string
55 | }
56 |
57 | var (
58 | defaultParseFormats = []string{
59 | tsLayoutMillis,
60 | tsLayoutMicros,
61 | tsLayoutNanos,
62 | }
63 |
64 | precisions = map[TimestampPrecision]timestampFmt{
65 | millisecPrecision: timestampFmt{utc: millisecPrecisionFmt, local: localMillisecPrecisionFmt},
66 | microsecPrecision: timestampFmt{utc: microsecPrecisionFmt, local: localMicrosecPrecisionFmt},
67 | nanosecPrecision: timestampFmt{utc: nanosecPrecisionFmt, local: localNanosecPrecisionFmt},
68 | }
69 |
70 | // tsFmt is the selected timestamp format
71 | tsFmt = precisions[DefaultTimestampPrecision]
72 | // timeFormatter is a datettime formatter with a selected timestamp precision in UTC.
73 | timeFormatter = dtfmt.MustNewFormatter(tsFmt.utc)
74 | )
75 |
76 | // Time is an abstraction for the time.Time type
77 | type Time time.Time
78 |
79 | type TimestampPrecision uint8
80 |
81 | type TimestampConfig struct {
82 | Precision TimestampPrecision `config:"precision"`
83 | }
84 |
85 | func defaultTimestampConfig() TimestampConfig {
86 | return TimestampConfig{Precision: DefaultTimestampPrecision}
87 | }
88 |
89 | func (p *TimestampPrecision) Unpack(v string) error {
90 | switch v {
91 | case "millisecond", "":
92 | *p = millisecPrecision
93 | case "microsecond":
94 | *p = microsecPrecision
95 | case "nanosecond":
96 | *p = nanosecPrecision
97 | default:
98 | return fmt.Errorf("invalid timestamp precision %s, available options: millisecond, microsecond, nanosecond", v)
99 | }
100 | return nil
101 | }
102 |
103 | // TimestampFormat returns the datettime format string
104 | // with the configured timestamp precision. It can return
105 | // either the UTC format or the local one.
106 | func TimestampFormat(local bool) string {
107 | if local {
108 | return tsFmt.local
109 | }
110 | return tsFmt.utc
111 | }
112 |
113 | // MarshalJSON implements json.Marshaler interface.
114 | // The time is a quoted string in the JsTsLayout format.
115 | func (t Time) MarshalJSON() ([]byte, error) {
116 | str, _ := timeFormatter.Format(time.Time(t).UTC())
117 | return json.Marshal(str)
118 | }
119 |
120 | // UnmarshalJSON implements js.Unmarshaler interface.
121 | // The time is expected to be a quoted string in TsLayout
122 | // format.
123 | func (t *Time) UnmarshalJSON(data []byte) (err error) {
124 | if data[0] != []byte(`"`)[0] || data[len(data)-1] != []byte(`"`)[0] {
125 | return errors.New("not quoted")
126 | }
127 | *t, err = ParseTime(string(data[1 : len(data)-1]))
128 | return err
129 | }
130 |
131 | func (t Time) Hash32(h hash.Hash32) error {
132 | err := binary.Write(h, binary.LittleEndian, time.Time(t).UnixNano())
133 | return err
134 | }
135 |
136 | // ParseTime parses a time in the MillisTsLayout, then micros and finally nanos.
137 | func ParseTime(timespec string) (Time, error) {
138 | var err error
139 | var t time.Time
140 |
141 | for _, layout := range defaultParseFormats {
142 | t, err = time.Parse(layout, timespec)
143 | if err == nil {
144 | break
145 | }
146 | }
147 |
148 | return Time(t), err
149 | }
150 |
151 | func (t Time) String() string {
152 | str, _ := timeFormatter.Format(time.Time(t).UTC())
153 | return str
154 | }
155 |
156 | // MustParseTime is a convenience equivalent of the ParseTime function
157 | // that panics in case of errors.
158 | func MustParseTime(timespec string) Time {
159 | ts, err := ParseTime(timespec)
160 | if err != nil {
161 | panic(err)
162 | }
163 |
164 | return ts
165 | }
166 |
--------------------------------------------------------------------------------
/lib/reader/common/dtfmt/ctx.go:
--------------------------------------------------------------------------------
1 | // Licensed to Elasticsearch B.V. under one or more contributor
2 | // license agreements. See the NOTICE file distributed with
3 | // this work for additional information regarding copyright
4 | // ownership. Elasticsearch B.V. licenses this file to you under
5 | // the Apache License, Version 2.0 (the "License"); you may
6 | // not use this file except in compliance with the License.
7 | // You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | package dtfmt
19 |
20 | import (
21 | "time"
22 | )
23 |
24 | // ctx stores pre-computed time fields used by the formatter.
25 | type ctx struct {
26 | year int
27 | month time.Month
28 | day int
29 | weekday time.Weekday
30 | yearday int
31 | isoWeek, isoYear int
32 |
33 | hour, min, sec int
34 | nano int
35 |
36 | tzOffset int
37 |
38 | buf []byte
39 | }
40 |
41 | type ctxConfig struct {
42 | date bool
43 | clock bool
44 | weekday bool
45 | yearday bool
46 | nano bool
47 | iso bool
48 | tzOffset bool
49 | }
50 |
51 | func (c *ctx) initTime(config *ctxConfig, t time.Time) {
52 | if config.date {
53 | c.year, c.month, c.day = t.Date()
54 | }
55 | if config.clock {
56 | c.hour, c.min, c.sec = t.Clock()
57 | }
58 | if config.iso {
59 | c.isoYear, c.isoWeek = t.ISOWeek()
60 | }
61 |
62 | if config.nano {
63 | c.nano = t.Nanosecond()
64 | }
65 |
66 | if config.yearday {
67 | c.yearday = t.YearDay()
68 | }
69 |
70 | if config.weekday {
71 | c.weekday = t.Weekday()
72 | }
73 |
74 | if config.tzOffset {
75 | _, c.tzOffset = t.Zone()
76 | }
77 | }
78 |
79 | func (c *ctxConfig) enableDate() {
80 | c.date = true
81 | }
82 |
83 | func (c *ctxConfig) enableClock() {
84 | c.clock = true
85 | }
86 |
87 | func (c *ctxConfig) enableNano() {
88 | c.nano = true
89 | }
90 |
91 | func (c *ctxConfig) enableWeekday() {
92 | c.weekday = true
93 | }
94 |
95 | func (c *ctxConfig) enableYearday() {
96 | c.yearday = true
97 | }
98 |
99 | func (c *ctxConfig) enableISO() {
100 | c.iso = true
101 | }
102 |
103 | func (c *ctxConfig) enableTimeZoneOffset() {
104 | c.tzOffset = true
105 | }
106 |
--------------------------------------------------------------------------------
/lib/reader/common/dtfmt/doc.go:
--------------------------------------------------------------------------------
1 | // Licensed to Elasticsearch B.V. under one or more contributor
2 | // license agreements. See the NOTICE file distributed with
3 | // this work for additional information regarding copyright
4 | // ownership. Elasticsearch B.V. licenses this file to you under
5 | // the Apache License, Version 2.0 (the "License"); you may
6 | // not use this file except in compliance with the License.
7 | // You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | // Package dtfmt provides time formatter support with pattern syntax mostly
19 | // similar to joda DateTimeFormat. The pattern syntax supported is a subset
20 | // (mostly compatible) with joda DateTimeFormat.
21 | //
22 | // Symbol Meaning Type Supported Examples
23 | // ------ ------- ------- --------- -------
24 | // G era text no AD
25 | // C century of era (>=0) number no 20
26 | // Y year of era (>=0) year yes 1996
27 | //
28 | // x weekyear year yes 1996
29 | // w week of weekyear number yes 27
30 | // e day of week number yes 2
31 | // E day of week text yes Tuesday; Tue
32 | //
33 | // y year year yes 1996
34 | // D day of year number yes 189
35 | // M month of year month yes July; Jul; 07
36 | // d day of month number yes 10
37 | //
38 | // a halfday of day text yes PM
39 | // K hour of halfday (0~11) number yes 0
40 | // h clockhour of halfday (1~12) number yes 12
41 | //
42 | // H hour of day (0~23) number yes 0
43 | // k clockhour of day (1~24) number yes 24
44 | // m minute of hour number yes 30
45 | // s second of minute number yes 55
46 | // S fraction of second nanoseconds yes 978000
47 | // f fraction of seconds nanoseconds yes 123456789
48 | // multiple of 3
49 | // z time zone text no Pacific Standard Time; PST
50 | // Z time zone offset/id zone no -0800; -08:00; America/Los_Angeles
51 | //
52 | // ' escape for text delimiter
53 | // '' single quote literal
54 | //
55 | // The format is based on pattern letter count. Any character not in the range
56 | // [a-z][A-Z] is interpreted as literal and copied into final string as is.
57 | // Arbitrary Literals can also be written using single quotes `'`
58 | //
59 | // Types: Notes:
60 | // ------ ------
61 | // text Use full form if number of letters is >= 4.
62 | // Otherwise a short form is used (if available).
63 | //
64 | // number Minimum number of digits depends on number of letters.
65 | // Shorter numbers are zero-padded.
66 | //
67 | // year mostly like number. If Pattern length is 2,
68 | // the year will be displayed as zero-based year
69 | // of the century (modulo 100)
70 | //
71 | // month If pattern length >= 3, formatting is according to
72 | // text type. Otherwise number type
73 | // formatting rules are applied.
74 | //
75 | // millis Not yet supported
76 | //
77 | // zone Not yet supported
78 | //
79 | // literal Literals are copied as is into formatted string
80 | package dtfmt
81 |
--------------------------------------------------------------------------------
/lib/reader/common/dtfmt/dtfmt.go:
--------------------------------------------------------------------------------
1 | // Licensed to Elasticsearch B.V. under one or more contributor
2 | // license agreements. See the NOTICE file distributed with
3 | // this work for additional information regarding copyright
4 | // ownership. Elasticsearch B.V. licenses this file to you under
5 | // the Apache License, Version 2.0 (the "License"); you may
6 | // not use this file except in compliance with the License.
7 | // You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | package dtfmt
19 |
20 | import (
21 | "time"
22 | )
23 |
24 | // Format applies the format-pattern to the given timestamp.
25 | // Returns the formatted string or an error if pattern is invalid.
26 | func Format(t time.Time, pattern string) (string, error) {
27 | f, err := NewFormatter(pattern)
28 | if err != nil {
29 | return "", err
30 | }
31 | return f.Format(t)
32 | }
33 |
--------------------------------------------------------------------------------
/lib/reader/common/dtfmt/elems.go:
--------------------------------------------------------------------------------
1 | // Licensed to Elasticsearch B.V. under one or more contributor
2 | // license agreements. See the NOTICE file distributed with
3 | // this work for additional information regarding copyright
4 | // ownership. Elasticsearch B.V. licenses this file to you under
5 | // the Apache License, Version 2.0 (the "License"); you may
6 | // not use this file except in compliance with the License.
7 | // You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | package dtfmt
19 |
20 | import (
21 | "errors"
22 | "fmt"
23 | "unicode/utf8"
24 | )
25 |
26 | type element interface {
27 | requires(c *ctxConfig) error
28 | estimateSize() int
29 | compile() (prog, error)
30 | }
31 |
32 | type runeLiteral struct {
33 | r rune
34 | }
35 |
36 | type stringLiteral struct {
37 | s []byte
38 | }
39 |
40 | type unpaddedNumber struct {
41 | ft fieldType
42 | maxDigits int
43 | signed bool
44 | }
45 |
46 | type paddedNumber struct {
47 | ft fieldType
48 | divExp int
49 | minDigits, maxDigits, fractDigits int
50 | signed bool
51 | }
52 |
53 | type textField struct {
54 | ft fieldType
55 | short bool
56 | }
57 |
58 | type twoDigitYear struct {
59 | ft fieldType
60 | }
61 |
62 | type paddingZeros struct {
63 | count int
64 | }
65 |
66 | func (runeLiteral) requires(*ctxConfig) error { return nil }
67 | func (runeLiteral) estimateSize() int { return 1 }
68 |
69 | func (stringLiteral) requires(*ctxConfig) error { return nil }
70 | func (s stringLiteral) estimateSize() int { return len(s.s) }
71 |
72 | func (n unpaddedNumber) requires(c *ctxConfig) error {
73 | return numRequires(c, n.ft)
74 | }
75 |
76 | func (n unpaddedNumber) estimateSize() int {
77 | return numSize(n.maxDigits, n.signed)
78 | }
79 |
80 | func (n paddedNumber) requires(c *ctxConfig) error {
81 | return numRequires(c, n.ft)
82 | }
83 |
84 | func (n paddedNumber) estimateSize() int {
85 | return numSize(n.maxDigits, n.signed)
86 | }
87 |
88 | func (n twoDigitYear) requires(c *ctxConfig) error {
89 | return numRequires(c, n.ft)
90 | }
91 |
92 | func (twoDigitYear) estimateSize() int { return 2 }
93 |
94 | func numSize(digits int, signed bool) int {
95 | if signed {
96 | return digits + 1
97 | }
98 | return digits
99 | }
100 |
101 | func numRequires(c *ctxConfig, ft fieldType) error {
102 | switch ft {
103 | case ftYear, ftMonthOfYear, ftDayOfMonth:
104 | c.enableDate()
105 |
106 | case ftWeekyear, ftWeekOfWeekyear:
107 | c.enableISO()
108 |
109 | case ftDayOfYear:
110 | c.enableYearday()
111 |
112 | case ftDayOfWeek:
113 | c.enableWeekday()
114 |
115 | case ftHalfdayOfDay,
116 | ftHourOfHalfday,
117 | ftClockhourOfHalfday,
118 | ftClockhourOfDay,
119 | ftHourOfDay,
120 | ftMinuteOfDay,
121 | ftMinuteOfHour,
122 | ftSecondOfDay,
123 | ftSecondOfMinute:
124 | c.enableClock()
125 |
126 | case ftNanoOfSecond:
127 | c.enableNano()
128 | }
129 |
130 | return nil
131 | }
132 |
133 | func (f textField) requires(c *ctxConfig) error {
134 | switch f.ft {
135 | case ftHalfdayOfDay:
136 | c.enableClock()
137 | case ftMonthOfYear:
138 | c.enableDate()
139 | case ftDayOfWeek:
140 | c.enableWeekday()
141 | case ftTimeZoneOffset:
142 | c.enableTimeZoneOffset()
143 | default:
144 | return fmt.Errorf("time field %v not supported by text", f.ft)
145 | }
146 | return nil
147 | }
148 |
149 | func (f textField) estimateSize() int {
150 | switch f.ft {
151 | case ftHalfdayOfDay:
152 | return 2
153 | case ftDayOfWeek:
154 | if f.short {
155 | return 3
156 | }
157 | return 9 // max(weekday) = len(Wednesday)
158 | case ftMonthOfYear:
159 | if f.short {
160 | return 6
161 | }
162 | return 9 // max(month) = len(September)
163 | case ftTimeZoneOffset:
164 | return 6
165 | default:
166 | return 0
167 | }
168 | }
169 |
170 | func (r runeLiteral) compile() (prog, error) {
171 | switch utf8.RuneLen(r.r) {
172 | case -1:
173 | return prog{}, errors.New("invalid rune")
174 | }
175 |
176 | var tmp [8]byte
177 | l := utf8.EncodeRune(tmp[:], r.r)
178 | return makeCopy(tmp[:l])
179 | }
180 |
181 | func (s stringLiteral) compile() (prog, error) {
182 | return makeCopy(s.s)
183 | }
184 |
185 | func (n unpaddedNumber) compile() (prog, error) {
186 | return makeProg(opNum, byte(n.ft))
187 | }
188 |
189 | func (n paddedNumber) compile() (prog, error) {
190 | switch {
191 | case n.fractDigits != 0:
192 | return makeProg(opExtNumFractPadded, byte(n.ft), byte(n.divExp), byte(n.maxDigits), byte(n.fractDigits))
193 | case n.divExp == 0:
194 | return makeProg(opNumPadded, byte(n.ft), byte(n.maxDigits))
195 | default:
196 | return makeProg(opExtNumPadded, byte(n.ft), byte(n.divExp), byte(n.maxDigits))
197 | }
198 | }
199 |
200 | func (n twoDigitYear) compile() (prog, error) {
201 | return makeProg(opTwoDigit, byte(n.ft))
202 | }
203 |
204 | func (f textField) compile() (prog, error) {
205 | if f.short {
206 | return makeProg(opTextShort, byte(f.ft))
207 | }
208 | return makeProg(opTextLong, byte(f.ft))
209 | }
210 |
211 | func (p paddingZeros) requires(c *ctxConfig) error { return nil }
212 | func (p paddingZeros) estimateSize() int { return p.count }
213 | func (p paddingZeros) compile() (prog, error) {
214 | return makeProg(opZeros, byte(p.count))
215 | }
216 |
--------------------------------------------------------------------------------
/lib/reader/common/dtfmt/fields.go:
--------------------------------------------------------------------------------
1 | // Licensed to Elasticsearch B.V. under one or more contributor
2 | // license agreements. See the NOTICE file distributed with
3 | // this work for additional information regarding copyright
4 | // ownership. Elasticsearch B.V. licenses this file to you under
5 | // the Apache License, Version 2.0 (the "License"); you may
6 | // not use this file except in compliance with the License.
7 | // You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | package dtfmt
19 |
20 | import (
21 | "errors"
22 | )
23 |
24 | type fieldType uint8
25 |
26 | const (
27 | ftYear fieldType = iota
28 | ftDayOfYear
29 | ftMonthOfYear
30 | ftDayOfMonth
31 | ftWeekyear
32 | ftWeekOfWeekyear
33 | ftDayOfWeek
34 | ftHalfdayOfDay
35 | ftHourOfHalfday
36 | ftClockhourOfHalfday
37 | ftClockhourOfDay
38 | ftHourOfDay
39 | ftMinuteOfDay
40 | ftMinuteOfHour
41 | ftSecondOfDay
42 | ftSecondOfMinute
43 | ftTimeZoneOffset
44 | ftNanoOfSecond
45 | )
46 |
47 | func getIntField(ft fieldType, ctx *ctx) int {
48 | switch ft {
49 | case ftYear:
50 | return ctx.year
51 |
52 | case ftDayOfYear:
53 | return ctx.yearday
54 |
55 | case ftMonthOfYear:
56 | return int(ctx.month)
57 |
58 | case ftDayOfMonth:
59 | return ctx.day
60 |
61 | case ftWeekyear:
62 | return ctx.isoYear
63 |
64 | case ftWeekOfWeekyear:
65 | return ctx.isoWeek
66 |
67 | case ftDayOfWeek:
68 | return int(ctx.weekday)
69 |
70 | case ftHalfdayOfDay:
71 | if ctx.hour < 12 {
72 | return 0 // AM
73 | }
74 | return 1 // PM
75 |
76 | case ftHourOfHalfday:
77 | if ctx.hour < 12 {
78 | return ctx.hour
79 | }
80 | return ctx.hour - 12
81 |
82 | case ftClockhourOfHalfday:
83 | if ctx.hour < 12 {
84 | return ctx.hour + 1
85 | }
86 | return ctx.hour - 12 + 1
87 |
88 | case ftClockhourOfDay:
89 | return ctx.hour + 1
90 |
91 | case ftHourOfDay:
92 | return ctx.hour
93 |
94 | case ftMinuteOfDay:
95 | return ctx.hour*60 + ctx.min
96 |
97 | case ftMinuteOfHour:
98 | return ctx.min
99 |
100 | case ftSecondOfDay:
101 | return (ctx.hour*60+ctx.min)*60 + ctx.sec
102 |
103 | case ftSecondOfMinute:
104 | return ctx.sec
105 |
106 | case ftNanoOfSecond:
107 | return ctx.nano
108 | }
109 |
110 | return 0
111 |
112 | }
113 |
114 | func getTextField(ft fieldType, ctx *ctx) (string, error) {
115 | switch ft {
116 | case ftHalfdayOfDay:
117 | if ctx.hour < 12 {
118 | return "AM", nil
119 | }
120 | return "PM", nil
121 | case ftDayOfWeek:
122 | return ctx.weekday.String(), nil
123 | case ftMonthOfYear:
124 | return ctx.month.String(), nil
125 | case ftTimeZoneOffset:
126 | return tzOffsetString(ctx)
127 | default:
128 | return "", errors.New("no text field")
129 | }
130 | }
131 |
132 | func tzOffsetString(ctx *ctx) (string, error) {
133 | buf := make([]byte, 6)
134 |
135 | tzOffsetMinutes := ctx.tzOffset / 60 // convert to minutes
136 | if tzOffsetMinutes >= 0 {
137 | buf[0] = '+'
138 | } else {
139 | buf[0] = '-'
140 | tzOffsetMinutes = -tzOffsetMinutes
141 | }
142 |
143 | tzOffsetHours := tzOffsetMinutes / 60
144 | tzOffsetMinutes = tzOffsetMinutes % 60
145 | buf[1] = byte(tzOffsetHours/10) + '0'
146 | buf[2] = byte(tzOffsetHours%10) + '0'
147 | buf[3] = ':'
148 | buf[4] = byte(tzOffsetMinutes/10) + '0'
149 | buf[5] = byte(tzOffsetMinutes%10) + '0'
150 | return string(buf), nil
151 | }
152 |
153 | func getTextFieldShort(ft fieldType, ctx *ctx) (string, error) {
154 | switch ft {
155 | case ftHalfdayOfDay:
156 | if ctx.hour < 12 {
157 | return "AM", nil
158 | }
159 | return "PM", nil
160 | case ftDayOfWeek:
161 | return ctx.weekday.String()[:3], nil
162 | case ftMonthOfYear:
163 | return ctx.month.String()[:3], nil
164 | default:
165 | return "", errors.New("no text field")
166 | }
167 | }
168 |
--------------------------------------------------------------------------------
/lib/reader/common/dtfmt/prog.go:
--------------------------------------------------------------------------------
1 | // Licensed to Elasticsearch B.V. under one or more contributor
2 | // license agreements. See the NOTICE file distributed with
3 | // this work for additional information regarding copyright
4 | // ownership. Elasticsearch B.V. licenses this file to you under
5 | // the Apache License, Version 2.0 (the "License"); you may
6 | // not use this file except in compliance with the License.
7 | // You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | package dtfmt
19 |
20 | import (
21 | "errors"
22 | )
23 |
24 | type prog struct {
25 | p []byte
26 | }
27 |
28 | const (
29 | opNone byte = iota
30 | opCopy1 // copy next byte
31 | opCopy2 // copy next 2 bytes
32 | opCopy3 // copy next 3 bytes
33 | opCopy4 // copy next 4 bytes
34 | opCopyShort // [op, len, content[len]]
35 | opCopyLong // [op, len1, len, content[len1<<8 + len]]
36 | opNum // [op, ft]
37 | opNumPadded // [op, ft, digits]
38 | opExtNumPadded // [op, ft, divExp, digits]
39 | opExtNumFractPadded // [op, ft, divExp, digits, fractDigits]
40 | opZeros // [op, count]
41 | opTwoDigit // [op, ft]
42 | opTextShort // [op, ft]
43 | opTextLong // [op, ft]
44 | )
45 |
46 | var pow10Table [10]int
47 |
48 | func init() {
49 | x := 1
50 | for i := range pow10Table {
51 | pow10Table[i] = x
52 | x *= 10
53 | }
54 | }
55 |
56 | func (p prog) eval(bytes []byte, ctx *ctx) ([]byte, error) {
57 | for i := 0; i < len(p.p); {
58 | op := p.p[i]
59 | i++
60 |
61 | switch op {
62 | case opNone:
63 |
64 | case opCopy1:
65 | bytes = append(bytes, p.p[i])
66 | i++
67 | case opCopy2:
68 | bytes = append(bytes, p.p[i], p.p[i+1])
69 | i += 2
70 | case opCopy3:
71 | bytes = append(bytes, p.p[i], p.p[i+1], p.p[i+2])
72 | i += 3
73 | case opCopy4:
74 | bytes = append(bytes, p.p[i], p.p[i+1], p.p[i+2], p.p[i+3])
75 | i += 4
76 | case opCopyShort:
77 | l := int(p.p[i])
78 | i++
79 | bytes = append(bytes, p.p[i:i+l]...)
80 | i += l
81 | case opCopyLong:
82 | l := int(p.p[i])<<8 | int(p.p[i+1])
83 | i += 2
84 | bytes = append(bytes, p.p[i:i+l]...)
85 | i += l
86 | case opNum:
87 | ft := fieldType(p.p[i])
88 | i++
89 | v := getIntField(ft, ctx)
90 | bytes = appendUnpadded(bytes, v)
91 | case opNumPadded:
92 | ft, digits := fieldType(p.p[i]), int(p.p[i+1])
93 | i += 2
94 | v := getIntField(ft, ctx)
95 | bytes = appendPadded(bytes, v, digits)
96 | case opExtNumPadded:
97 | ft, divExp, digits := fieldType(p.p[i]), int(p.p[i+1]), int(p.p[i+2])
98 | div := pow10Table[divExp]
99 | i += 3
100 | v := getIntField(ft, ctx)
101 | bytes = appendPadded(bytes, v/div, digits)
102 | case opExtNumFractPadded:
103 | ft, divExp, digits, fractDigits := fieldType(p.p[i]), int(p.p[i+1]), int(p.p[i+2]), int(p.p[i+3])
104 | div := pow10Table[divExp]
105 | i += 4
106 | v := getIntField(ft, ctx)
107 | bytes = appendFractPadded(bytes, v/div, digits, fractDigits)
108 | case opZeros:
109 | digits := int(p.p[i])
110 | i++
111 | for x := 0; x < digits; x++ {
112 | bytes = append(bytes, '0')
113 | }
114 | case opTwoDigit:
115 | ft := fieldType(p.p[i])
116 | i++
117 | v := getIntField(ft, ctx)
118 | bytes = appendPadded(bytes, v%100, 2)
119 | case opTextShort:
120 | ft := fieldType(p.p[i])
121 | i++
122 | s, err := getTextFieldShort(ft, ctx)
123 | if err != nil {
124 | return bytes, err
125 | }
126 | bytes = append(bytes, s...)
127 | case opTextLong:
128 | ft := fieldType(p.p[i])
129 | i++
130 | s, err := getTextField(ft, ctx)
131 | if err != nil {
132 | return bytes, err
133 | }
134 | bytes = append(bytes, s...)
135 | default:
136 | return bytes, errors.New("unknown opcode")
137 | }
138 | }
139 |
140 | return bytes, nil
141 | }
142 |
143 | func makeProg(b ...byte) (prog, error) {
144 | return prog{b}, nil
145 | }
146 |
147 | func makeCopy(b []byte) (prog, error) {
148 | l := len(b)
149 | switch l {
150 | case 0:
151 | return prog{}, nil
152 | case 1:
153 | return makeProg(opCopy1, b[0])
154 | case 2:
155 | return makeProg(opCopy2, b[0], b[1])
156 | case 3:
157 | return makeProg(opCopy2, b[0], b[1], b[2])
158 | case 4:
159 | return makeProg(opCopy2, b[0], b[1], b[2], b[3])
160 | }
161 |
162 | if l < 256 {
163 | return prog{append([]byte{opCopyShort, byte(l)}, b...)}, nil
164 | }
165 | if l < (1 << 16) {
166 | l1 := byte(l >> 8)
167 | l2 := byte(l)
168 | return prog{append([]byte{opCopyLong, l1, l2}, b...)}, nil
169 | }
170 |
171 | return prog{}, errors.New("literal too long")
172 | }
173 |
--------------------------------------------------------------------------------
/lib/reader/common/dtfmt/util.go:
--------------------------------------------------------------------------------
1 | // Licensed to Elasticsearch B.V. under one or more contributor
2 | // license agreements. See the NOTICE file distributed with
3 | // this work for additional information regarding copyright
4 | // ownership. Elasticsearch B.V. licenses this file to you under
5 | // the Apache License, Version 2.0 (the "License"); you may
6 | // not use this file except in compliance with the License.
7 | // You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | package dtfmt
19 |
20 | import (
21 | "strconv"
22 | )
23 |
24 | // appendUnpadded appends the string representation of the integer value to the
25 | // buffer.
26 | func appendUnpadded(bs []byte, i int) []byte {
27 | return strconv.AppendInt(bs, int64(i), 10)
28 | }
29 |
30 | // appendPadded appends a number value as string to the buffer. The string will
31 | // be prefixed with '0' in case the encoded string value is takes less then
32 | // 'digits' bytes.
33 | //
34 | // for example:
35 | //
36 | // appendPadded(..., 10, 5) -> 00010
37 | // appendPadded(..., 12345, 5) -> 12345
38 | func appendPadded(bs []byte, val, digits int) []byte {
39 | if val < 0 {
40 | bs = append(bs, '-')
41 | val = -1
42 | }
43 |
44 | // compute number of initial padding zeroes
45 | var padDigits int
46 | switch {
47 | case val < 10:
48 | padDigits = digits - 1
49 | case val < 100:
50 | padDigits = digits - 2
51 | case val < 1000:
52 | padDigits = digits - 3
53 | case val < 10000:
54 | padDigits = digits - 4
55 | case val < 100000:
56 | padDigits = digits - 5
57 | case val < 1000000:
58 | padDigits = digits - 6
59 | case val < 10000000:
60 | padDigits = digits - 7
61 | case val < 100000000:
62 | padDigits = digits - 8
63 | case val < 1000000000:
64 | padDigits = digits - 9
65 | default:
66 | padDigits = digits - 1
67 | for tmp := val; tmp > 10; tmp = tmp / 10 {
68 | padDigits--
69 | }
70 | }
71 | for i := 0; i < padDigits; i++ {
72 | bs = append(bs, '0')
73 | }
74 |
75 | // encode value
76 | if val < 10 {
77 | return append(bs, byte(val)+'0')
78 | }
79 | return strconv.AppendInt(bs, int64(val), 10)
80 | }
81 |
82 | // appendFractPadded appends a number value as string to the buffer.
83 | // The string will be prefixed with '0' in case the value is smaller than
84 | // a value that can be represented with 'digits'.
85 | // Trailing zeroes at the end will be removed, such that only a multiple of fractSz
86 | // digits will be printed. If the value is 0, a total of 'fractSz' zeros will
87 | // be printed.
88 | //
89 | // for example:
90 | //
91 | // appendFractPadded(..., 0, 9, 3) -> "000"
92 | // appendFractPadded(..., 123000, 9, 3) -> "000123"
93 | // appendFractPadded(..., 120000, 9, 3) -> "000120"
94 | // appendFractPadded(..., 120000010, 9, 3) -> "000120010"
95 | // appendFractPadded(..., 123456789, 6, 3) -> "123456"
96 | func appendFractPadded(bs []byte, val, digits, fractSz int) []byte {
97 | if fractSz == 0 || digits <= fractSz {
98 | return appendPadded(bs, val, digits)
99 | }
100 |
101 | initalLen := len(bs)
102 | bs = appendPadded(bs, val, digits)
103 |
104 | // find and remove trailing zeroes, such that a multiple of fractSz is still
105 | // serialized
106 |
107 | // find index range of last digits in buffer, such that a multiple of fractSz
108 | // will be kept if the range of digits is removed.
109 | // invariant: 0 <= end - begin <= fractSz
110 | end := len(bs)
111 | digits = end - initalLen
112 | begin := initalLen + ((digits-1)/fractSz)*fractSz
113 |
114 | // remove trailing zeros, such that a multiple of fractSz digits will be
115 | // present in the final buffer. At minimum fractSz digits will always be
116 | // reported.
117 | for {
118 | if !allZero(bs[begin:end]) {
119 | break
120 | }
121 |
122 | digits -= (end - begin)
123 | end = begin
124 | begin -= fractSz
125 |
126 | if digits <= fractSz {
127 | break
128 | }
129 | }
130 |
131 | return bs[:end]
132 | }
133 |
134 | func allZero(buf []byte) bool {
135 | for _, b := range buf {
136 | if b != '0' {
137 | return false
138 | }
139 | }
140 | return true
141 | }
142 |
--------------------------------------------------------------------------------
/lib/reader/common/match/compile.go:
--------------------------------------------------------------------------------
1 | // Licensed to Elasticsearch B.V. under one or more contributor
2 | // license agreements. See the NOTICE file distributed with
3 | // this work for additional information regarding copyright
4 | // ownership. Elasticsearch B.V. licenses this file to you under
5 | // the Apache License, Version 2.0 (the "License"); you may
6 | // not use this file except in compliance with the License.
7 | // You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | package match
19 |
20 | import (
21 | "regexp"
22 | "regexp/syntax"
23 | )
24 |
25 | func compile(r *syntax.Regexp) (stringMatcher, error) {
26 | switch {
27 | case isSubstringLiteral(r):
28 | s := string(r.Rune)
29 | return &substringMatcher{s, []byte(s)}, nil
30 |
31 | case isExactLiteral(r):
32 | s := string(r.Sub[1].Rune)
33 | return &equalsMatcher{s, []byte(s)}, nil
34 |
35 | case isAltLiterals(r):
36 | var literals [][]byte
37 | for _, sub := range r.Sub {
38 | literals = append(literals, []byte(string(sub.Rune)))
39 | }
40 | return &altSubstringMatcher{literals}, nil
41 |
42 | case isOneOfLiterals(r):
43 | var literals [][]byte
44 | for _, sub := range r.Sub[1].Sub {
45 | literals = append(literals, []byte(string(sub.Rune)))
46 | }
47 | return &oneOfMatcher{literals}, nil
48 |
49 | case isPrefixLiteral(r):
50 | s := []byte(string(r.Sub[1].Rune))
51 | return &prefixMatcher{s}, nil
52 |
53 | case isPrefixAltLiterals(r):
54 | var literals [][]byte
55 | for _, sub := range r.Sub[1].Sub {
56 | literals = append(literals, []byte(string(sub.Rune)))
57 | }
58 | return &altPrefixMatcher{literals}, nil
59 |
60 | case isPrefixNumDate(r):
61 | return compilePrefixNumDate(r)
62 |
63 | case isEmptyText(r):
64 | var m *emptyStringMatcher
65 | return m, nil
66 |
67 | case isEmptyTextWithWhitespace(r):
68 | var m *emptyWhiteStringMatcher
69 | return m, nil
70 |
71 | case isAnyMatch(r):
72 | var m *matchAny
73 | return m, nil
74 |
75 | default:
76 |
77 | r, err := regexp.Compile(r.String())
78 | if err != nil {
79 | return nil, err
80 | }
81 | return r, nil
82 | }
83 | }
84 |
85 | func compilePrefixNumDate(r *syntax.Regexp) (stringMatcher, error) {
86 | m := &prefixNumDate{}
87 |
88 | i := 1
89 | if r.Sub[i].Op == syntax.OpLiteral {
90 | m.prefix = []byte(string(r.Sub[i].Rune))
91 | i++
92 | }
93 |
94 | digitLen := func(r *syntax.Regexp) int {
95 | if r.Op == syntax.OpConcat {
96 | return len(r.Sub)
97 | }
98 | return 1
99 | }
100 |
101 | var digits []int
102 | var seps [][]byte
103 |
104 | digits = append(digits, digitLen(r.Sub[i]))
105 | i++
106 |
107 | for i < len(r.Sub) {
108 | lit := []byte(string(r.Sub[i].Rune))
109 | i++
110 |
111 | // capture literal suffix
112 | if i == len(r.Sub) {
113 | m.suffix = lit
114 | break
115 | }
116 |
117 | seps = append(seps, lit)
118 | digits = append(digits, digitLen(r.Sub[i]))
119 | i++
120 | }
121 |
122 | minLen := len(m.prefix) + len(m.suffix)
123 | for _, d := range digits {
124 | minLen += d
125 | }
126 | for _, sep := range seps {
127 | minLen += len(sep)
128 | }
129 |
130 | m.digits = digits
131 | m.seps = seps
132 | m.minLen = minLen
133 |
134 | return m, nil
135 | }
136 |
--------------------------------------------------------------------------------
/lib/reader/common/match/matcher.go:
--------------------------------------------------------------------------------
1 | // Licensed to Elasticsearch B.V. under one or more contributor
2 | // license agreements. See the NOTICE file distributed with
3 | // this work for additional information regarding copyright
4 | // ownership. Elasticsearch B.V. licenses this file to you under
5 | // the Apache License, Version 2.0 (the "License"); you may
6 | // not use this file except in compliance with the License.
7 | // You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | package match
19 |
20 | import "regexp/syntax"
21 |
22 | type Matcher struct {
23 | stringMatcher
24 | }
25 |
26 | type ExactMatcher struct {
27 | stringMatcher
28 | }
29 |
30 | type stringMatcher interface {
31 | // MatchString tries to find a matching substring.
32 | MatchString(s string) (matched bool)
33 |
34 | // Match tries to find a matching substring.
35 | Match(bs []byte) (matched bool)
36 |
37 | // Describe the generator
38 | String() string
39 | }
40 |
41 | func MustCompile(pattern string) Matcher {
42 | m, err := Compile(pattern)
43 | if err != nil {
44 | panic(err)
45 | }
46 | return m
47 | }
48 |
49 | func MustCompileExact(pattern string) ExactMatcher {
50 | m, err := CompileExact(pattern)
51 | if err != nil {
52 | panic(err)
53 | }
54 | return m
55 | }
56 |
57 | // CompileString matches a substring only, the input is not interpreted as
58 | // regular expression
59 | func CompileString(in string) (Matcher, error) {
60 | if in == "" {
61 | return Matcher{(*emptyStringMatcher)(nil)}, nil
62 | }
63 | return Matcher{&substringMatcher{in, []byte(in)}}, nil
64 | }
65 |
66 | // Compile regular expression to string matcher. String matcher by default uses
67 | // regular expressions as provided by regexp library, but tries to optimize some
68 | // common cases, replacing expensive patterns with cheaper custom implementations
69 | // or removing terms not necessary for string matching.
70 | func Compile(pattern string) (Matcher, error) {
71 | regex, err := syntax.Parse(pattern, syntax.Perl)
72 | if err != nil {
73 | return Matcher{}, err
74 | }
75 |
76 | regex = optimize(regex).Simplify()
77 | m, err := compile(regex)
78 | return Matcher{m}, err
79 | }
80 |
81 | func CompileExact(pattern string) (ExactMatcher, error) {
82 | regex, err := syntax.Parse(pattern, syntax.Perl)
83 | if err != nil {
84 | return ExactMatcher{}, err
85 | }
86 |
87 | regex = regex.Simplify()
88 | if regex.Op != syntax.OpConcat {
89 | regex = &syntax.Regexp{
90 | Op: syntax.OpConcat,
91 | Sub: []*syntax.Regexp{
92 | patBeginText,
93 | regex,
94 | patEndText,
95 | },
96 | Flags: regex.Flags,
97 | }
98 | } else {
99 | if !eqPrefixRegex(regex, patBeginText) {
100 | regex.Sub = append([]*syntax.Regexp{patBeginText}, regex.Sub...)
101 | }
102 | if !eqSuffixRegex(regex, patEndText) {
103 | regex.Sub = append(regex.Sub, patEndText)
104 | }
105 | }
106 |
107 | regex = optimize(regex).Simplify()
108 | m, err := compile(regex)
109 | return ExactMatcher{m}, err
110 | }
111 |
112 | func (m *Matcher) Unpack(s string) error {
113 | tmp, err := Compile(s)
114 | if err != nil {
115 | return err
116 | }
117 |
118 | *m = tmp
119 | return nil
120 | }
121 |
122 | // MatchAnyString succeeds if any string in the given array contains a match.
123 | func (m *Matcher) MatchAnyString(strs interface{}) bool {
124 | return matchAnyStrings(m.stringMatcher, strs)
125 | }
126 |
127 | // MatchAllStrings succeeds if all strings in the given array contain a match.
128 | func (m *Matcher) MatchAllStrings(strs interface{}) bool {
129 | return matchAllStrings(m.stringMatcher, strs)
130 | }
131 |
132 | // MatchAnyString succeeds if any string in the given array is an exact match.
133 | func (m *ExactMatcher) MatchAnyString(strs interface{}) bool {
134 | return matchAnyStrings(m.stringMatcher, strs)
135 | }
136 |
137 | // MatchAllStrings succeeds if all strings in the given array are an exact match.
138 | func (m *ExactMatcher) MatchAllStrings(strs interface{}) bool {
139 | return matchAllStrings(m.stringMatcher, strs)
140 | }
141 |
142 | func (m *ExactMatcher) Unpack(s string) error {
143 | tmp, err := CompileExact(s)
144 | if err != nil {
145 | return err
146 | }
147 |
148 | *m = tmp
149 | return nil
150 | }
151 |
152 | func matchAnyStrings(m stringMatcher, strs interface{}) bool {
153 | switch v := strs.(type) {
154 | case []interface{}:
155 | for _, s := range v {
156 | if str, ok := s.(string); ok && m.MatchString(str) {
157 | return true
158 | }
159 | }
160 | case []string:
161 | for _, s := range v {
162 | if m.MatchString(s) {
163 | return true
164 | }
165 | }
166 | }
167 | return false
168 | }
169 |
170 | func matchAllStrings(m stringMatcher, strs interface{}) bool {
171 | switch v := strs.(type) {
172 | case []interface{}:
173 | for _, s := range v {
174 | if str, ok := s.(string); ok && !m.MatchString(str) {
175 | return false
176 | }
177 | }
178 | case []string:
179 | for _, s := range v {
180 | if !m.MatchString(s) {
181 | return false
182 | }
183 | }
184 | }
185 | return true
186 | }
187 |
--------------------------------------------------------------------------------
/lib/reader/common/streambuf/io.go:
--------------------------------------------------------------------------------
1 | // Licensed to Elasticsearch B.V. under one or more contributor
2 | // license agreements. See the NOTICE file distributed with
3 | // this work for additional information regarding copyright
4 | // ownership. Elasticsearch B.V. licenses this file to you under
5 | // the Apache License, Version 2.0 (the "License"); you may
6 | // not use this file except in compliance with the License.
7 | // You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | package streambuf
19 |
20 | import (
21 | "io"
22 | "unicode/utf8"
23 | )
24 |
25 | func (b *Buffer) ioErr() error {
26 | err := b.Err()
27 | if err == ErrUnexpectedEOB || err == ErrNoMoreBytes {
28 | return io.EOF
29 | }
30 | return err
31 | }
32 |
33 | func (b *Buffer) ioBufferEndError() error {
34 | err := b.bufferEndError()
35 | if err == ErrUnexpectedEOB || err == ErrNoMoreBytes {
36 | return io.EOF
37 | }
38 | return err
39 | }
40 |
41 | // ReadByte reads and returns next byte from the buffer.
42 | // If no byte is available returns either ErrNoMoreBytes (if buffer allows
43 | // adding more bytes) or io.EOF
44 | func (b *Buffer) ReadByte() (byte, error) {
45 | if b.Failed() {
46 | return 0, b.ioErr()
47 | }
48 | if !b.Avail(1) {
49 | return 0, b.ioBufferEndError()
50 | }
51 | c := b.data[b.mark]
52 | b.Advance(1)
53 | return c, nil
54 | }
55 |
56 | // Unreads the last byte returned by most recent read operation.
57 | func (b *Buffer) UnreadByte() error {
58 | err := b.ioErr()
59 | if err != nil && err != io.EOF {
60 | return err
61 | }
62 | if b.mark == 0 {
63 | return ErrOutOfRange
64 | }
65 |
66 | if b.mark == b.offset {
67 | b.offset--
68 | }
69 | b.mark--
70 | b.available++
71 | return nil
72 | }
73 |
74 | // WriteByte appends the byte c to the buffer if buffer is not fixed.
75 | func (b *Buffer) WriteByte(c byte) error {
76 | p := [1]byte{c}
77 | _, err := b.Write(p[:])
78 | return err
79 | }
80 |
81 | // Read reads up to len(p) bytes into p if buffer is not in a failed state.
82 | // Returns ErrNoMoreBytes or io.EOF (fixed buffer) if no bytes are available.
83 | func (b *Buffer) Read(p []byte) (int, error) {
84 | if b.Failed() {
85 | return 0, b.ioErr()
86 | }
87 | if b.Len() == 0 {
88 | return 0, b.ioBufferEndError()
89 | }
90 |
91 | tmp := b.Bytes()
92 | n := copy(p, tmp)
93 | b.Advance(n)
94 | return n, nil
95 | }
96 |
97 | // Write writes p to the buffer if buffer is not fixed. Returns the number of
98 | // bytes written or ErrOperationNotAllowed if buffer is fixed.
99 | func (b *Buffer) Write(p []byte) (int, error) {
100 | err := b.doAppend(p, false, -1)
101 | if err != nil {
102 | return 0, b.ioErr()
103 | }
104 | return len(p), nil
105 | }
106 |
107 | // ReadFrom reads data from r until error or io.EOF and appends it to the buffer.
108 | // The amount of bytes read is returned plus any error except io.EOF.
109 | func (b *Buffer) ReadFrom(r io.Reader) (int64, error) {
110 | err := b.err
111 | if err != nil && err != ErrNoMoreBytes {
112 | return 0, b.ioErr()
113 | }
114 | if b.fixed {
115 | return 0, ErrOperationNotAllowed
116 | }
117 |
118 | var buf [4096]byte
119 | var total int64
120 | for {
121 | n, err := r.Read(buf[:])
122 | if err != nil {
123 | if err == io.EOF {
124 | break
125 | }
126 | return total, err
127 | }
128 | _, err = b.Write(buf[:n])
129 | if err != nil {
130 | return total, err
131 | }
132 | total += int64(n)
133 | }
134 |
135 | return total, nil
136 | }
137 |
138 | // ReadRune reads and returns the next UTF-8-encoded Unicode code point from the
139 | // buffer. If no bytes are available, the error returned is ErrNoMoreBytes (if
140 | // buffer supports adding more bytes) or io.EOF. If the bytes are an erroneous
141 | // UTF-8 encoding, it consumes one byte and returns U+FFFD, 1.
142 | func (b *Buffer) ReadRune() (rune, int, error) {
143 | if b.err != nil {
144 | return 0, 0, b.ioErr()
145 | }
146 | if b.available == 0 {
147 | return 0, 0, b.ioBufferEndError()
148 | }
149 |
150 | if c := b.data[b.mark]; c < utf8.RuneSelf {
151 | b.Advance(1)
152 | return rune(c), 1, nil
153 | }
154 | c, size := utf8.DecodeRune(b.data[b.mark:])
155 | b.Advance(size)
156 | return c, size, nil
157 | }
158 |
159 | // ReadAt reads bytes at off into p starting at the buffer its read marker.
160 | // The read marker is not updated. If number of bytes returned is less len(p) or
161 | // no bytes are available at off, io.EOF will be returned in err. If off is < 0,
162 | // err is set to ErrOutOfRange.
163 | func (b *Buffer) ReadAt(p []byte, off int64) (n int, err error) {
164 | if b.err != nil {
165 | return 0, b.ioErr()
166 | }
167 |
168 | if off < 0 {
169 | return 0, ErrOutOfRange
170 | }
171 |
172 | off += int64(b.mark)
173 | if off >= int64(len(b.data)) {
174 | return 0, ErrOutOfRange
175 | }
176 |
177 | end := off + int64(len(p))
178 | if end > int64(len(b.data)) {
179 | err = io.EOF
180 | end = int64(len(b.data))
181 | }
182 | copy(p, b.data[off:end])
183 | return int(end - off), err
184 | }
185 |
186 | // WriteAt writes the content of p at off starting at recent read marker
187 | // (already consumed bytes). Returns number of bytes written n = len(p) and err
188 | // is nil if off and off+len(p) are within bounds, else n=0 and err is set to
189 | // ErrOutOfRange.
190 | func (b *Buffer) WriteAt(p []byte, off int64) (n int, err error) {
191 | if b.err != nil {
192 | return 0, b.ioErr()
193 | }
194 |
195 | end := off + int64(b.mark) + int64(len(p))
196 | maxInt := int((^uint(0)) >> 1)
197 | if off < 0 || end > int64(maxInt) {
198 | return 0, ErrOutOfRange
199 | }
200 |
201 | // copy p into buffer
202 | n = copy(b.sliceAt(int(off), len(p)), p)
203 | return n, nil
204 | }
205 |
--------------------------------------------------------------------------------
/lib/reader/common/streambuf/net.go:
--------------------------------------------------------------------------------
1 | // Licensed to Elasticsearch B.V. under one or more contributor
2 | // license agreements. See the NOTICE file distributed with
3 | // this work for additional information regarding copyright
4 | // ownership. Elasticsearch B.V. licenses this file to you under
5 | // the Apache License, Version 2.0 (the "License"); you may
6 | // not use this file except in compliance with the License.
7 | // You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | package streambuf
19 |
20 | import common "infini.sh/agent/lib/reader/common"
21 |
22 | // read integers in network byte order
23 |
24 | // Parse 8bit binary value from Buffer.
25 | func (b *Buffer) ReadNetUint8() (uint8, error) {
26 | if b.Failed() {
27 | return 0, b.err
28 | }
29 | tmp := b.data[b.mark:]
30 | if err := b.Advance(1); err != nil {
31 | return 0, err
32 | }
33 | value := tmp[0]
34 | return value, nil
35 | }
36 |
37 | // Write 8bit binary value to Buffer.
38 | func (b *Buffer) WriteNetUint8(u uint8) error {
39 | return b.Append([]byte{u})
40 | }
41 |
42 | // Parse 8bit binary value from the buffer at index. Will not advance the read
43 | // buffer
44 | func (b *Buffer) ReadNetUint8At(index int) (uint8, error) {
45 | if b.Failed() {
46 | return 0, b.err
47 | }
48 | if !b.Avail(1 + index) {
49 | return 0, b.bufferEndError()
50 | }
51 | return b.data[index+b.mark], nil
52 | }
53 |
54 | // Write 8bit binary value at index.
55 | func (b *Buffer) WriteNetUint8At(u uint8, index int) error {
56 | if b.err != nil {
57 | return b.err
58 | }
59 | b.sliceAt(index, 1)[0] = u
60 | return nil
61 | }
62 |
63 | // Parse 16bit binary value in network byte order from Buffer
64 | // (converted to Host order).
65 | func (b *Buffer) ReadNetUint16() (uint16, error) {
66 | if b.Failed() {
67 | return 0, b.err
68 | }
69 | tmp := b.data[b.mark:]
70 | if err := b.Advance(2); err != nil {
71 | return 0, err
72 | }
73 | value := common.BytesNtohs(tmp)
74 | return value, nil
75 | }
76 |
77 | // Write 16bit binary value in network byte order to buffer.
78 | func (b *Buffer) WriteNetUint16(u uint16) error {
79 | return b.WriteNetUint16At(u, b.available)
80 | }
81 |
82 | // Parse 16bit binary value from the buffer at index. Will not advance the read
83 | // buffer
84 | func (b *Buffer) ReadNetUint16At(index int) (uint16, error) {
85 | if b.Failed() {
86 | return 0, b.err
87 | }
88 | if !b.Avail(2 + index) {
89 | return 0, b.bufferEndError()
90 | }
91 | return common.BytesNtohs(b.data[index+b.mark:]), nil
92 | }
93 |
94 | // Write 16bit binary value at index in network byte order to buffer.
95 | func (b *Buffer) WriteNetUint16At(u uint16, index int) error {
96 | if b.err != nil {
97 | return b.err
98 | }
99 | tmp := b.sliceAt(index, 2)
100 | tmp[0] = uint8(u >> 8)
101 | tmp[1] = uint8(u)
102 | return nil
103 | }
104 |
105 | // Parse 32bit binary value in network byte order from Buffer
106 | // (converted to Host order).
107 | func (b *Buffer) ReadNetUint32() (uint32, error) {
108 | if b.Failed() {
109 | return 0, b.err
110 | }
111 | tmp := b.data[b.mark:]
112 | if err := b.Advance(4); err != nil {
113 | return 0, err
114 | }
115 | value := common.BytesNtohl(tmp)
116 | return value, nil
117 | }
118 |
119 | // Write 32bit binary value in network byte order to buffer.
120 | func (b *Buffer) WriteNetUint32(u uint32) error {
121 | return b.WriteNetUint32At(u, b.available)
122 | }
123 |
124 | // Parse 32bit binary value from the buffer at index. Will not advance the read
125 | // buffer
126 | func (b *Buffer) ReadNetUint32At(index int) (uint32, error) {
127 | if b.Failed() {
128 | return 0, b.err
129 | }
130 | if !b.Avail(4 + index) {
131 | return 0, b.bufferEndError()
132 | }
133 | return common.BytesNtohl(b.data[index+b.mark:]), nil
134 | }
135 |
136 | // Write 32bit binary value at index in network byte order to buffer.
137 | func (b *Buffer) WriteNetUint32At(u uint32, index int) error {
138 | if b.err != nil {
139 | return b.err
140 | }
141 | tmp := b.sliceAt(index, 4)
142 | tmp[0] = uint8(u >> 24)
143 | tmp[1] = uint8(u >> 16)
144 | tmp[2] = uint8(u >> 8)
145 | tmp[3] = uint8(u)
146 | return nil
147 | }
148 |
149 | // Parse 64bit binary value in network byte order from Buffer
150 | // (converted to Host order).
151 | func (b *Buffer) ReadNetUint64() (uint64, error) {
152 | if b.Failed() {
153 | return 0, b.err
154 | }
155 | tmp := b.data[b.mark:]
156 | if err := b.Advance(8); err != nil {
157 | return 0, err
158 | }
159 | value := common.BytesNtohll(tmp)
160 | return value, nil
161 | }
162 |
163 | // Write 64bit binary value in network byte order to buffer.
164 | func (b *Buffer) WriteNetUint64(u uint64) error {
165 | return b.WriteNetUint64At(u, b.available)
166 | }
167 |
168 | // Parse 64bit binary value from the buffer at index. Will not advance the read
169 | // buffer
170 | func (b *Buffer) ReadNetUint64At(index int) (uint64, error) {
171 | if b.Failed() {
172 | return 0, b.err
173 | }
174 | if !b.Avail(8 + index) {
175 | return 0, b.bufferEndError()
176 | }
177 | return common.BytesNtohll(b.data[index+b.mark:]), nil
178 | }
179 |
180 | // Write 64bit binary value at index in network byte order to buffer.
181 | func (b *Buffer) WriteNetUint64At(u uint64, index int) error {
182 | if b.err != nil {
183 | return b.err
184 | }
185 | tmp := b.sliceAt(index, 8)
186 | tmp[0] = uint8(u >> 56)
187 | tmp[1] = uint8(u >> 48)
188 | tmp[2] = uint8(u >> 40)
189 | tmp[3] = uint8(u >> 32)
190 | tmp[4] = uint8(u >> 24)
191 | tmp[5] = uint8(u >> 16)
192 | tmp[6] = uint8(u >> 8)
193 | tmp[7] = uint8(u)
194 | return nil
195 | }
196 |
--------------------------------------------------------------------------------
/lib/reader/harvester/harvester.go:
--------------------------------------------------------------------------------
1 | /* Copyright © INFINI LTD. All rights reserved.
2 | * Web: https://infinilabs.com
3 | * Email: hello#infini.ltd */
4 |
5 | package harvester
6 |
7 | import (
8 | "fmt"
9 | "io"
10 | "os"
11 |
12 | "infini.sh/agent/lib/reader"
13 | "infini.sh/agent/lib/reader/linenumber"
14 | "infini.sh/agent/lib/reader/multiline"
15 | "infini.sh/agent/lib/reader/readfile"
16 | "infini.sh/agent/lib/reader/readfile/encoding"
17 | "infini.sh/agent/lib/reader/readjson"
18 | "infini.sh/framework/core/errors"
19 | )
20 |
21 | type Harvester struct {
22 | reader reader.Reader
23 | file *os.File
24 | config Config
25 | offset int64
26 |
27 | encodingFactory encoding.EncodingFactory
28 | encoding encoding.Encoding
29 | }
30 |
31 | func NewHarvester(path string, offset int64) (*Harvester, error) {
32 | f, err := readOpen(path)
33 | if f == nil || err != nil {
34 | return nil, errors.Errorf("failed to open file(%s),%v", path, err)
35 | }
36 | _, err = f.Seek(offset, io.SeekStart)
37 | if err != nil {
38 | return nil, err
39 | }
40 | h := &Harvester{
41 | file: f,
42 | config: defaultConfig(),
43 | offset: offset,
44 | }
45 | encodingFactory, ok := encoding.FindEncoding(h.config.Encoding)
46 | if !ok || encodingFactory == nil {
47 | return nil, fmt.Errorf("unknown encoding('%v')", h.config.Encoding)
48 | }
49 | h.encodingFactory = encodingFactory
50 | h.encoding, err = h.encodingFactory(f)
51 | if err != nil {
52 | return nil, err
53 | }
54 | return h, nil
55 | }
56 |
57 | func readOpen(path string) (*os.File, error) {
58 | flag := os.O_RDONLY
59 | perm := os.FileMode(0)
60 | return os.OpenFile(path, flag, perm)
61 | }
62 |
63 | func (h *Harvester) NewJsonFileReader(pattern string, showLineNumber bool) (reader.Reader, error) {
64 | var r reader.Reader
65 | var err error
66 | if h == nil || h.file == nil {
67 | return nil, fmt.Errorf("file is nil")
68 | }
69 |
70 | encReaderMaxBytes := h.config.MaxBytes * 4
71 | r, err = readfile.NewEncodeReader(h.file, readfile.Config{
72 | Codec: h.encoding,
73 | BufferSize: h.config.BufferSize,
74 | MaxBytes: encReaderMaxBytes,
75 | Terminator: h.config.LineTerminator,
76 | })
77 | if err != nil {
78 | return nil, err
79 | }
80 |
81 | if h.config.JSON != nil {
82 | r = readjson.NewJSONReader(r, h.config.JSON)
83 | }
84 |
85 | //r = readfile.NewStripNewline(r, h.config.LineTerminator)
86 |
87 | h.config.Multiline = multiline.DefaultConfig(pattern)
88 | r, err = multiline.New(r, "", h.config.MaxBytes, h.config.Multiline)
89 | if err != nil {
90 | return nil, err
91 | }
92 | r = readfile.NewLimitReader(r, h.config.MaxBytes)
93 | if showLineNumber {
94 | h.config.LineNumber = linenumber.NewConfig(h.offset, h.file, io.SeekStart)
95 | h.reader = linenumber.NewLineNumberReader(r, h.config.LineNumber)
96 | } else {
97 | h.reader = r
98 | }
99 | return h.reader, nil
100 | }
101 |
102 | func (h *Harvester) NewLogFileReader(pattern string, showLineNumber bool) (reader.Reader, error) {
103 | var r reader.Reader
104 | var err error
105 |
106 | if h == nil || h.file == nil {
107 | return nil, fmt.Errorf("file is nil")
108 | }
109 | encReaderMaxBytes := h.config.MaxBytes * 4
110 | r, err = readfile.NewEncodeReader(h.file, readfile.Config{
111 | Codec: h.encoding,
112 | BufferSize: h.config.BufferSize,
113 | MaxBytes: encReaderMaxBytes,
114 | Terminator: h.config.LineTerminator,
115 | })
116 | if err != nil {
117 | return nil, err
118 | }
119 |
120 | //r = readfile.NewStripNewline(r, h.config.LineTerminator)
121 |
122 | h.config.Multiline = multiline.DefaultConfig(pattern)
123 | r, err = multiline.New(r, "", h.config.MaxBytes, h.config.Multiline)
124 | if err != nil {
125 | return nil, err
126 | }
127 | r = readfile.NewLimitReader(r, h.config.MaxBytes)
128 | if showLineNumber {
129 | h.config.LineNumber = linenumber.NewConfig(h.offset, h.file, io.SeekStart)
130 | h.reader = linenumber.NewLineNumberReader(r, h.config.LineNumber)
131 | } else {
132 | h.reader = r
133 | }
134 | return h.reader, nil
135 | }
136 |
137 | func (h *Harvester) Close() error {
138 | err := h.reader.Close()
139 | if err != nil {
140 | return err
141 | }
142 | return nil
143 | }
144 |
--------------------------------------------------------------------------------
/lib/reader/harvester/harvester_config.go:
--------------------------------------------------------------------------------
1 | /* Copyright © INFINI LTD. All rights reserved.
2 | * Web: https://infinilabs.com
3 | * Email: hello#infini.ltd */
4 |
5 | package harvester
6 |
7 | import (
8 | "infini.sh/agent/lib/reader/linenumber"
9 | "infini.sh/agent/lib/reader/multiline"
10 | "infini.sh/agent/lib/reader/readfile"
11 | "infini.sh/agent/lib/reader/readjson"
12 | )
13 |
14 | const (
15 | Byte = 1 << (iota * 10)
16 | KiByte
17 | MiByte
18 | GiByte
19 | TiByte
20 | PiByte
21 | EiByte
22 | )
23 |
24 | type Config struct {
25 | Encoding string `config:"encoding"`
26 | BufferSize int `config:"harvester_buffer_size"`
27 | MaxBytes int `config:"max_bytes" validate:"min=0,nonzero"`
28 | LineTerminator readfile.LineTerminator `config:"line_terminator"`
29 | JSON *readjson.Config `config:"json"`
30 | Multiline *multiline.Config `config:"multiline"`
31 | LineNumber *linenumber.Config
32 | }
33 |
34 | func defaultConfig() Config {
35 | return Config{
36 | BufferSize: 16 * KiByte,
37 | MaxBytes: 10 * MiByte,
38 | LineTerminator: readfile.AutoLineTerminator,
39 | }
40 | }
41 |
--------------------------------------------------------------------------------
/lib/reader/linenumber/line_number.go:
--------------------------------------------------------------------------------
1 | /* Copyright © INFINI LTD. All rights reserved.
2 | * Web: https://infinilabs.com
3 | * Email: hello#infini.ltd */
4 |
5 | package linenumber
6 |
7 | import (
8 | "bufio"
9 | "errors"
10 | "infini.sh/agent/lib/reader"
11 | "io"
12 | "os"
13 | )
14 |
15 | type LineNumberReader struct {
16 | reader reader.Reader
17 | cfg *Config
18 | currentOffset int64
19 | scanner *bufio.Scanner
20 | currentLine int64
21 | innerFile *os.File
22 | }
23 |
24 | func NewLineNumberReader(r reader.Reader, config *Config) *LineNumberReader {
25 | lineReader := &LineNumberReader{
26 | reader: r,
27 | cfg: config,
28 | }
29 | lineReader.currentOffset = config.Offset
30 | return lineReader
31 | }
32 |
33 | func (r *LineNumberReader) Next() (reader.Message, error) {
34 | message, err := r.reader.Next()
35 | if err != nil {
36 | return message, err
37 | }
38 | if r.cfg == nil {
39 | return message, errors.New("config can not be nil")
40 | }
41 | if r.cfg.file == nil {
42 | return message, nil
43 | }
44 | if r.scanner == nil {
45 | fileName := r.cfg.file.Name()
46 | flag := os.O_RDONLY
47 | perm := os.FileMode(0)
48 | r.innerFile, err = os.OpenFile(fileName, flag, perm)
49 | if err != nil {
50 | return message, err
51 | }
52 | r.innerFile.Seek(0, io.SeekStart)
53 | r.scanner = bufio.NewScanner(r.innerFile)
54 | }
55 |
56 | var offset int64 = 0
57 | if r.currentLine > 0 {
58 | offset = r.currentOffset
59 | }
60 | line := r.currentLine
61 | var contentLen int64 = 0
62 | var content string
63 | //获取当前文件换行符的长度。因为在StripNewline这一步的时候,是去掉了每一行的换行符,所以差值为换行符的长度。这里就不再处理一遍换行符了
64 | for r.scanner.Scan() {
65 | //scanner返回的内容,是已经去掉换行符的
66 | content = r.scanner.Text()
67 | contentLen = int64(len([]byte(content)))
68 | offset += contentLen + 1
69 | line++
70 | //当前读到的offset,已经超过了用户指定的offset + 当前内容的长度, 那说明offset已经超过范围
71 | //add 1 for the newline character
72 | if offset > (r.currentOffset + contentLen + 1) {
73 | offset -= contentLen + 1
74 | //skip prefix lines
75 | r.currentOffset = offset
76 | break
77 | }
78 | //当前读到的offset 大于了用户指定的offset,说明已经到了目标为止,记录行号
79 | if offset > r.currentOffset {
80 | message.LineNumbers = append(message.LineNumbers, line)
81 | break
82 | }
83 | }
84 | r.currentOffset = offset
85 | r.currentLine = line
86 | message.Offset = offset
87 | return message, nil
88 | }
89 |
90 | func (r *LineNumberReader) Close() error {
91 | r.innerFile.Close()
92 | return r.reader.Close()
93 | }
94 |
--------------------------------------------------------------------------------
/lib/reader/linenumber/line_number_config.go:
--------------------------------------------------------------------------------
1 | /* Copyright © INFINI LTD. All rights reserved.
2 | * Web: https://infinilabs.com
3 | * Email: hello#infini.ltd */
4 |
5 | package linenumber
6 |
7 | import "os"
8 |
9 | type Config struct {
10 | Offset int64
11 | file *os.File
12 | whence int //io.SeekStart / io.SeekEnd
13 | }
14 |
15 | func NewConfig(offset int64, f *os.File, whence int) *Config {
16 | return &Config{
17 | Offset: offset,
18 | file: f,
19 | whence: whence,
20 | }
21 | }
22 |
--------------------------------------------------------------------------------
/lib/reader/linenumber/line_reader.go:
--------------------------------------------------------------------------------
1 | /* Copyright © INFINI LTD. All rights reserved.
2 | * Web: https://infinilabs.com
3 | * Email: hello#infini.ltd */
4 |
5 | package linenumber
6 |
7 | import (
8 | "bufio"
9 | "infini.sh/agent/lib/reader"
10 | "io"
11 | "os"
12 | )
13 |
14 | type LinePlainTextReader struct {
15 | currentOffset int64
16 | scanner *bufio.Scanner
17 | currentLine int64
18 | innerFile *os.File
19 | startLine int64
20 | }
21 |
22 | func NewLinePlainTextReader(filePath string, startLineNumber int64, whence int) (*LinePlainTextReader, error) {
23 | lineReader := &LinePlainTextReader{
24 | startLine: startLineNumber,
25 | }
26 | flag := os.O_RDONLY
27 | perm := os.FileMode(0)
28 | var err error
29 | lineReader.innerFile, err = os.OpenFile(filePath, flag, perm)
30 | if err != nil {
31 | return nil, err
32 | }
33 | lineReader.innerFile.Seek(0, whence)
34 | lineReader.scanner = bufio.NewScanner(lineReader.innerFile)
35 | return lineReader, nil
36 | }
37 |
38 | func (r *LinePlainTextReader) Next() (message reader.Message, err error) {
39 | var offset int64 = 0
40 | if r.currentLine > 0 {
41 | offset = r.currentOffset
42 | }
43 | line := r.currentLine
44 | var contentLen int64 = 0
45 | var content []byte
46 | for r.scanner.Scan() {
47 | content = r.scanner.Bytes()
48 | contentLen = int64(len(content))
49 | //add 1 for the newline character
50 | offset += contentLen + 1
51 | line++
52 | if line < r.startLine {
53 | //skip prefix lines
54 | r.currentOffset = offset
55 | continue
56 | }
57 | message.LineNumbers = append(message.LineNumbers, line)
58 | break
59 | }
60 | if len(content) == 0 || line < r.startLine {
61 | return message, io.EOF
62 | }
63 | r.currentOffset = offset
64 | r.currentLine = line
65 | message.Offset = offset
66 | message.Content = content
67 | return message, nil
68 | }
69 |
70 | func (r *LinePlainTextReader) Close() error {
71 | return r.innerFile.Close()
72 | }
73 |
--------------------------------------------------------------------------------
/lib/reader/message.go:
--------------------------------------------------------------------------------
1 | /* Copyright © INFINI LTD. All rights reserved.
2 | * Web: https://infinilabs.com
3 | * Email: hello#infini.ltd */
4 |
5 | package reader
6 |
7 | import (
8 | "infini.sh/framework/core/util"
9 | "time"
10 | )
11 |
12 | type Message struct {
13 | Ts time.Time // timestamp the content was read
14 | Content []byte `json:"content"` // actual content read
15 | Bytes int `json:"bytes,omitempty"` // total number of bytes read to generate the message
16 | Fields util.MapStr // optional fields that can be added by reader
17 | Meta util.MapStr // deprecated
18 | LineNumbers []int64 `json:"line_numbers"` // line numbers of current content
19 | Offset int64 `json:"offset"` // content offset in file
20 | }
21 |
22 | // IsEmpty returns true in case the message is empty
23 | // A message with only newline character is counted as an empty message
24 | func (m *Message) IsEmpty() bool {
25 | // If no Bytes were read, event is empty
26 | // For empty line Bytes is at least 1 because of the newline char
27 | if m.Bytes == 0 {
28 | return true
29 | }
30 |
31 | // Content length can be 0 because of JSON events. Content and Fields must be empty.
32 | if len(m.Content) == 0 && len(m.Fields) == 0 {
33 | return true
34 | }
35 |
36 | return false
37 | }
38 |
39 | // AddFields adds fields to the message.
40 | func (m *Message) AddFields(fields util.MapStr) {
41 | if fields == nil {
42 | return
43 | }
44 |
45 | if m.Fields == nil {
46 | m.Fields = util.MapStr{}
47 | }
48 | m.Fields.Update(fields)
49 | }
50 |
51 | // AddFlagsWithKey adds flags to the message with an arbitrary key.
52 | // If the field does not exist, it is created.
53 | func (m *Message) AddFlagsWithKey(key string, flags ...string) error {
54 | if len(flags) == 0 {
55 | return nil
56 | }
57 |
58 | if m.Fields == nil {
59 | m.Fields = util.MapStr{}
60 | }
61 |
62 | return util.AddTagsWithKey(m.Fields, key, flags)
63 | }
64 |
--------------------------------------------------------------------------------
/lib/reader/multiline/counter.go:
--------------------------------------------------------------------------------
1 | // Licensed to Elasticsearch B.V. under one or more contributor
2 | // license agreements. See the NOTICE file distributed with
3 | // this work for additional information regarding copyright
4 | // ownership. Elasticsearch B.V. licenses this file to you under
5 | // the Apache License, Version 2.0 (the "License"); you may
6 | // not use this file except in compliance with the License.
7 | // You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | package multiline
19 |
20 | import (
21 | "infini.sh/agent/lib/reader"
22 | "io"
23 | )
24 |
25 | type counterReader struct {
26 | reader reader.Reader
27 | state func(*counterReader) (reader.Message, error)
28 | linesCount int // number of lines to collect
29 | msgBuffer *messageBuffer
30 | }
31 |
32 | func newMultilineCountReader(
33 | r reader.Reader,
34 | separator string,
35 | maxBytes int,
36 | config *Config,
37 | ) (reader.Reader, error) {
38 | maxLines := config.LinesCount
39 | if l := config.MaxLines; l != nil && 0 < *l {
40 | maxLines = *l
41 | }
42 |
43 | return &counterReader{
44 | reader: r,
45 | state: (*counterReader).readFirst,
46 | linesCount: config.LinesCount,
47 | msgBuffer: newMessageBuffer(maxBytes, maxLines, []byte(separator), config.SkipNewLine),
48 | }, nil
49 | }
50 |
51 | // Next returns next multi-line event.
52 | func (cr *counterReader) Next() (reader.Message, error) {
53 | return cr.state(cr)
54 | }
55 |
56 | func (cr *counterReader) readFirst() (reader.Message, error) {
57 | for {
58 | message, err := cr.reader.Next()
59 | if err != nil {
60 | return message, err
61 | }
62 |
63 | if message.Bytes == 0 {
64 | continue
65 | }
66 |
67 | cr.msgBuffer.startNewMessage(message)
68 | if cr.msgBuffer.processedLines == cr.linesCount {
69 | msg := cr.msgBuffer.finalize()
70 | return msg, nil
71 | }
72 |
73 | cr.setState((*counterReader).readNext)
74 | return cr.readNext()
75 | }
76 | }
77 |
78 | func (cr *counterReader) readNext() (reader.Message, error) {
79 | for {
80 | message, err := cr.reader.Next()
81 | if err != nil {
82 | // handle error without any bytes returned from reader
83 | if message.Bytes == 0 {
84 | // no lines buffered -> return error
85 | if cr.msgBuffer.isEmpty() {
86 | return reader.Message{}, err
87 | }
88 |
89 | // lines buffered, return multiline and error on next read
90 | msg := cr.msgBuffer.finalize()
91 | cr.msgBuffer.setErr(err)
92 | cr.setState((*counterReader).readFailed)
93 | return msg, nil
94 | }
95 |
96 | // handle error with some content being returned by reader and
97 | // line matching multiline criteria or no multiline started yet
98 | if cr.msgBuffer.isEmptyMessage() {
99 | cr.msgBuffer.addLine(message)
100 |
101 | // return multiline and error on next read
102 | msg := cr.msgBuffer.finalize()
103 | cr.msgBuffer.setErr(err)
104 | cr.setState((*counterReader).readFailed)
105 | return msg, nil
106 | }
107 | }
108 |
109 | // add line to current multiline event
110 | cr.msgBuffer.addLine(message)
111 | if cr.msgBuffer.processedLines == cr.linesCount {
112 | msg := cr.msgBuffer.finalize()
113 | cr.setState((*counterReader).readFirst)
114 | return msg, nil
115 | }
116 | }
117 | }
118 |
119 | func (cr *counterReader) readFailed() (reader.Message, error) {
120 | err := cr.msgBuffer.err
121 | cr.msgBuffer.setErr(nil)
122 | cr.resetState()
123 | return reader.Message{}, err
124 | }
125 |
126 | // resetState sets state of the reader to readFirst
127 | func (cr *counterReader) resetState() {
128 | cr.setState((*counterReader).readFirst)
129 | }
130 |
131 | // setState sets state to the given function
132 | func (cr *counterReader) setState(next func(cr *counterReader) (reader.Message, error)) {
133 | cr.state = next
134 | }
135 |
136 | func (cr *counterReader) Close() error {
137 | cr.setState((*counterReader).readClosed)
138 | return cr.reader.Close()
139 | }
140 |
141 | func (cr *counterReader) readClosed() (reader.Message, error) {
142 | return reader.Message{}, io.EOF
143 | }
144 |
--------------------------------------------------------------------------------
/lib/reader/multiline/message_buffer.go:
--------------------------------------------------------------------------------
1 | // Licensed to Elasticsearch B.V. under one or more contributor
2 | // license agreements. See the NOTICE file distributed with
3 | // this work for additional information regarding copyright
4 | // ownership. Elasticsearch B.V. licenses this file to you under
5 | // the Apache License, Version 2.0 (the "License"); you may
6 | // not use this file except in compliance with the License.
7 | // You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | package multiline
19 |
20 | import "infini.sh/agent/lib/reader"
21 |
22 | type messageBuffer struct {
23 | maxBytes int // bytes stored in content
24 | maxLines int
25 | separator []byte
26 | skipNewline bool
27 | last []byte
28 | numLines int
29 | processedLines int
30 | truncated int
31 | err error // last seen error
32 | message reader.Message
33 | }
34 |
35 | func newMessageBuffer(maxBytes, maxLines int, separator []byte, skipNewline bool) *messageBuffer {
36 | return &messageBuffer{
37 | maxBytes: maxBytes,
38 | maxLines: maxLines,
39 | separator: separator,
40 | skipNewline: skipNewline,
41 | message: reader.Message{},
42 | err: nil,
43 | }
44 | }
45 |
46 | func (b *messageBuffer) startNewMessage(msg reader.Message) {
47 | b.clear()
48 | b.load(msg)
49 | }
50 |
51 | // load loads the reader with the given message. It is recommended to either
52 | // run clear or finalize before.
53 | func (b *messageBuffer) load(m reader.Message) {
54 | b.addLine(m)
55 | // Timestamp of first message is taken as overall timestamp
56 | b.message.Ts = m.Ts
57 | b.message.AddFields(m.Fields)
58 | }
59 |
60 | // clearBuffer resets the reader buffer variables
61 | func (b *messageBuffer) clear() {
62 | b.message = reader.Message{}
63 | b.last = nil
64 | b.numLines = 0
65 | b.processedLines = 0
66 | b.truncated = 0
67 | b.err = nil
68 | }
69 |
70 | // addLine adds the read content to the message
71 | // The content is only added if maxBytes and maxLines is not exceed. In case one of the
72 | // two is exceeded, addLine keeps processing but does not add it to the content.
73 | func (b *messageBuffer) addLine(m reader.Message) {
74 | if m.Bytes <= 0 {
75 | return
76 | }
77 |
78 | sz := len(b.message.Content)
79 | addSeparator := len(b.message.Content) > 0 && len(b.separator) > 0 && !b.skipNewline
80 | if addSeparator {
81 | sz += len(b.separator)
82 | }
83 |
84 | space := b.maxBytes - sz
85 |
86 | maxBytesReached := (b.maxBytes <= 0 || space > 0)
87 | maxLinesReached := (b.maxLines <= 0 || b.numLines < b.maxLines)
88 |
89 | if maxBytesReached && maxLinesReached {
90 | if space < 0 || space > len(m.Content) {
91 | space = len(m.Content)
92 | }
93 |
94 | tmp := b.message.Content
95 | if addSeparator {
96 | tmp = append(tmp, b.separator...)
97 | }
98 | b.message.Content = append(tmp, m.Content[:space]...)
99 | b.numLines++
100 |
101 | // add number of truncated bytes to fields
102 | diff := len(m.Content) - space
103 | if diff > 0 {
104 | b.truncated += diff
105 | }
106 | } else {
107 | // increase the number of skipped bytes, if cannot add
108 | b.truncated += len(m.Content)
109 |
110 | }
111 | b.processedLines++
112 |
113 | b.last = m.Content
114 | b.message.Bytes += m.Bytes
115 | b.message.AddFields(m.Fields)
116 | }
117 |
118 | // finalize writes the existing content into the returned message and resets all reader variables.
119 | func (b *messageBuffer) finalize() reader.Message {
120 | if b.truncated > 0 {
121 | b.message.AddFlagsWithKey("log.flags", "truncated")
122 | }
123 |
124 | if b.numLines > 1 {
125 | b.message.AddFlagsWithKey("log.flags", "multiline")
126 | }
127 |
128 | // Copy message from existing content
129 | msg := b.message
130 |
131 | b.clear()
132 | return msg
133 | }
134 |
135 | func (b *messageBuffer) setErr(err error) {
136 | b.err = err
137 | }
138 |
139 | func (b *messageBuffer) isEmpty() bool {
140 | return b.numLines == 0
141 | }
142 |
143 | func (b *messageBuffer) isEmptyMessage() bool {
144 | return b.message.Bytes == 0
145 | }
146 |
--------------------------------------------------------------------------------
/lib/reader/multiline/multiline.go:
--------------------------------------------------------------------------------
1 | // Licensed to Elasticsearch B.V. under one or more contributor
2 | // license agreements. See the NOTICE file distributed with
3 | // this work for additional information regarding copyright
4 | // ownership. Elasticsearch B.V. licenses this file to you under
5 | // the Apache License, Version 2.0 (the "License"); you may
6 | // not use this file except in compliance with the License.
7 | // You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | package multiline
19 |
20 | import (
21 | "fmt"
22 | "infini.sh/agent/lib/reader"
23 | )
24 |
25 | // New creates a new multi-line reader combining stream of
26 | // line events into stream of multi-line events.
27 | func New(
28 | r reader.Reader,
29 | separator string,
30 | maxBytes int,
31 | config *Config,
32 | ) (reader.Reader, error) {
33 | switch config.Type {
34 | case patternMode:
35 | return newMultilinePatternReader(r, separator, maxBytes, config)
36 | case countMode:
37 | return newMultilineCountReader(r, separator, maxBytes, config)
38 | case whilePatternMode:
39 | return newMultilineWhilePatternReader(r, separator, maxBytes, config)
40 | default:
41 | return nil, fmt.Errorf("unknown multiline type %d", config.Type)
42 | }
43 | }
44 |
--------------------------------------------------------------------------------
/lib/reader/multiline/multiline_config.go:
--------------------------------------------------------------------------------
1 | // Licensed to Elasticsearch B.V. under one or more contributor
2 | // license agreements. See the NOTICE file distributed with
3 | // this work for additional information regarding copyright
4 | // ownership. Elasticsearch B.V. licenses this file to you under
5 | // the Apache License, Version 2.0 (the "License"); you may
6 | // not use this file except in compliance with the License.
7 | // You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | package multiline
19 |
20 | import (
21 | "errors"
22 | "fmt"
23 | "infini.sh/agent/lib/reader/common/match"
24 | "log"
25 | "time"
26 | )
27 |
28 | type multilineType uint8
29 |
30 | const (
31 | patternMode multilineType = iota
32 | countMode
33 | whilePatternMode
34 |
35 | patternStr = "pattern"
36 | countStr = "count"
37 | whilePatternStr = "while_pattern"
38 | )
39 |
40 | var (
41 | multilineTypes = map[string]multilineType{
42 | patternStr: patternMode,
43 | countStr: countMode,
44 | whilePatternStr: whilePatternMode,
45 | }
46 |
47 | ErrMissingPattern = errors.New("multiline.pattern cannot be empty when pattern based matching is selected")
48 | ErrMissingCount = errors.New("multiline.count cannot be empty when count based aggregation is selected")
49 | )
50 |
51 | // Config holds the options of multiline readers.
52 | type Config struct {
53 | Type multilineType `config:"type"`
54 |
55 | Negate bool `config:"negate"`
56 | Match string `config:"match"`
57 | MaxLines *int `config:"max_lines"`
58 | Pattern *match.Matcher `config:"pattern"`
59 | Timeout *time.Duration `config:"timeout" validate:"positive"`
60 | FlushPattern *match.Matcher `config:"flush_pattern"`
61 |
62 | LinesCount int `config:"count_lines" validate:"positive"`
63 | SkipNewLine bool `config:"skip_newline"`
64 | }
65 |
66 | // Validate validates the Config option for multiline reader.
67 | func (c *Config) Validate() error {
68 | if c.Type == patternMode {
69 | if c.Match != "after" && c.Match != "before" {
70 | return fmt.Errorf("unknown matcher type: %s", c.Match)
71 | }
72 | if c.Pattern == nil {
73 | return ErrMissingPattern
74 | }
75 | } else if c.Type == countMode {
76 | if c.LinesCount == 0 {
77 | return ErrMissingCount
78 | }
79 | } else if c.Type == whilePatternMode {
80 | if c.Pattern == nil {
81 | return ErrMissingPattern
82 | }
83 | } else {
84 | return fmt.Errorf("unknown multiline type %d", c.Type)
85 | }
86 | return nil
87 | }
88 |
89 | // Unpack selects the approriate aggregation method for creating multiline events.
90 | // If it is not configured pattern matching is chosen.
91 | func (m *multilineType) Unpack(value string) error {
92 | if value == "" {
93 | *m = patternMode
94 | return nil
95 | }
96 |
97 | s, ok := multilineTypes[value]
98 | if !ok {
99 | return fmt.Errorf("unknown multiline type: %s", value)
100 | }
101 | *m = s
102 | return nil
103 | }
104 |
105 | func DefaultConfig(pattern string) *Config {
106 | match, err := match.Compile(pattern)
107 | if err != nil {
108 | log.Println(err)
109 | }
110 | return &Config{
111 | Type: patternMode,
112 | Negate: true,
113 | Match: "after",
114 | Pattern: &match,
115 | }
116 | }
117 |
--------------------------------------------------------------------------------
/lib/reader/reader.go:
--------------------------------------------------------------------------------
1 | /* Copyright © INFINI LTD. All rights reserved.
2 | * Web: https://infinilabs.com
3 | * Email: hello#infini.ltd */
4 |
5 | package reader
6 |
7 | import "io"
8 |
9 | type Reader interface {
10 | io.Closer
11 | Next() (Message, error)
12 | }
13 |
--------------------------------------------------------------------------------
/lib/reader/readfile/encode.go:
--------------------------------------------------------------------------------
1 | // Licensed to Elasticsearch B.V. under one or more contributor
2 | // license agreements. See the NOTICE file distributed with
3 | // this work for additional information regarding copyright
4 | // ownership. Elasticsearch B.V. licenses this file to you under
5 | // the Apache License, Version 2.0 (the "License"); you may
6 | // not use this file except in compliance with the License.
7 | // You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | package readfile
19 |
20 | import (
21 | "bytes"
22 | "infini.sh/agent/lib/reader"
23 | "infini.sh/agent/lib/reader/readfile/encoding"
24 | "infini.sh/framework/core/util"
25 | "io"
26 |
27 | "time"
28 | )
29 |
30 | // Reader produces lines by reading lines from an io.Reader
31 | // through a decoder converting the reader it's encoding to utf-8.
32 | type EncoderReader struct {
33 | reader *LineReader
34 | }
35 |
36 | // Config stores the configuration for the readers required to read
37 | // a file line by line
38 | type Config struct {
39 | Codec encoding.Encoding
40 | BufferSize int
41 | Terminator LineTerminator
42 | MaxBytes int
43 | }
44 |
45 | // New creates a new Encode reader from input reader by applying
46 | // the given codec.
47 | func NewEncodeReader(r io.ReadCloser, config Config) (EncoderReader, error) {
48 | eReader, err := NewLineReader(r, config)
49 | return EncoderReader{eReader}, err
50 | }
51 |
52 | // Next reads the next line from it's initial io.Reader
53 | // This converts a io.Reader to a reader.reader
54 | func (r EncoderReader) Next() (reader.Message, error) {
55 | c, sz, err := r.reader.Next()
56 | // Creating message object
57 | return reader.Message{
58 | Ts: time.Now(),
59 | Content: bytes.Trim(c, "\xef\xbb\xbf"),
60 | Bytes: sz,
61 | Fields: util.MapStr{},
62 | }, err
63 | }
64 |
65 | func (r EncoderReader) Close() error {
66 | return r.reader.Close()
67 | }
68 |
--------------------------------------------------------------------------------
/lib/reader/readfile/encoding/encoding.go:
--------------------------------------------------------------------------------
1 | // Licensed to Elasticsearch B.V. under one or more contributor
2 | // license agreements. See the NOTICE file distributed with
3 | // this work for additional information regarding copyright
4 | // ownership. Elasticsearch B.V. licenses this file to you under
5 | // the Apache License, Version 2.0 (the "License"); you may
6 | // not use this file except in compliance with the License.
7 | // You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | package encoding
19 |
20 | import (
21 | "io"
22 | "strings"
23 |
24 | "golang.org/x/text/encoding"
25 | "golang.org/x/text/encoding/charmap"
26 | "golang.org/x/text/encoding/htmlindex"
27 | "golang.org/x/text/encoding/simplifiedchinese"
28 | )
29 |
30 | type EncodingFactory func(io.Reader) (Encoding, error)
31 |
32 | type Encoding encoding.Encoding
33 |
34 | var encodings = map[string]EncodingFactory{
35 | // default
36 | "nop": Plain,
37 | "plain": Plain,
38 |
39 | // utf8 (validate input) - shadow htmlindex utf8 codecs not validating input
40 | "unicode-1-1-utf-8": utf8Encoding,
41 | "utf-8": utf8Encoding,
42 | "utf8": utf8Encoding,
43 |
44 | // simplified chinese
45 | "gbk": enc(simplifiedchinese.GBK), // shadow htmlindex using 'GB10830' for GBK
46 |
47 | // 8bit charmap encodings
48 | "iso8859-6e": enc(charmap.ISO8859_6E),
49 | "iso8859-6i": enc(charmap.ISO8859_6I),
50 | "iso8859-8e": enc(charmap.ISO8859_8E),
51 | "iso8859-8i": enc(charmap.ISO8859_8I),
52 |
53 | "iso8859-1": enc(charmap.ISO8859_1), // latin-1
54 | "iso8859-2": enc(charmap.ISO8859_2), // latin-2
55 | "iso8859-3": enc(charmap.ISO8859_3), // latin-3
56 | "iso8859-4": enc(charmap.ISO8859_4), // latin-4
57 | "iso8859-5": enc(charmap.ISO8859_5), // latin/cyrillic
58 | "iso8859-6": enc(charmap.ISO8859_6), // latin/arabic
59 | "iso8859-7": enc(charmap.ISO8859_7), // latin/greek
60 | "iso8859-8": enc(charmap.ISO8859_8), // latin/hebrew
61 | "iso8859-9": enc(charmap.ISO8859_9), // latin-5
62 | "iso8859-10": enc(charmap.ISO8859_10), // latin-6
63 | "iso8859-13": enc(charmap.ISO8859_13), // latin-7
64 | "iso8859-14": enc(charmap.ISO8859_14), // latin-8
65 | "iso8859-15": enc(charmap.ISO8859_15), // latin-9
66 | "iso8859-16": enc(charmap.ISO8859_16), // latin-10
67 |
68 | // ibm codepages
69 | "cp437": enc(charmap.CodePage437),
70 | "cp850": enc(charmap.CodePage850),
71 | "cp852": enc(charmap.CodePage852),
72 | "cp855": enc(charmap.CodePage855),
73 | "cp858": enc(charmap.CodePage858),
74 | "cp860": enc(charmap.CodePage860),
75 | "cp862": enc(charmap.CodePage862),
76 | "cp863": enc(charmap.CodePage863),
77 | "cp865": enc(charmap.CodePage865),
78 | "cp866": enc(charmap.CodePage866),
79 | "ebcdic-037": enc(charmap.CodePage037),
80 | "ebcdic-1040": enc(charmap.CodePage1140),
81 | "ebcdic-1047": enc(charmap.CodePage1047),
82 |
83 | // cyrillic
84 | "koi8r": enc(charmap.KOI8R),
85 | "koi8u": enc(charmap.KOI8U),
86 |
87 | // macintosh
88 | "macintosh": enc(charmap.Macintosh),
89 | "macintosh-cyrillic": enc(charmap.MacintoshCyrillic),
90 |
91 | // windows
92 | "windows1250": enc(charmap.Windows1250), // central and eastern european
93 | "windows1251": enc(charmap.Windows1251), // russian, serbian cyrillic
94 | "windows1252": enc(charmap.Windows1252), // legacy
95 | "windows1253": enc(charmap.Windows1253), // modern greek
96 | "windows1254": enc(charmap.Windows1254), // turkish
97 | "windows1255": enc(charmap.Windows1255), // hebrew
98 | "windows1256": enc(charmap.Windows1256), // arabic
99 | "windows1257": enc(charmap.Windows1257), // estonian, latvian, lithuanian
100 | "windows1258": enc(charmap.Windows1258), // vietnamese
101 | "windows874": enc(charmap.Windows874),
102 |
103 | // utf16 bom codecs (seekable data source required)
104 | "utf-16-bom": utf16BOMRequired,
105 | "utf-16be-bom": utf16BOMBigEndian,
106 | "utf-16le-bom": utf16BOMLittleEndian,
107 | }
108 |
109 | // Plain file encoding not transforming any read bytes.
110 | var Plain = enc(encoding.Nop)
111 |
112 | // UTF-8 encoding copying input to output sequence replacing invalid UTF-8
113 | // converted to '\uFFFD'.
114 | //
115 | // See: http://encoding.spec.whatwg.org/#replacement
116 | var utf8Encoding = enc(mixed{})
117 |
118 | // FindEncoding searches for an EncodingFactoryby name.
119 | func FindEncoding(name string) (EncodingFactory, bool) {
120 | if name == "" {
121 | return Plain, true
122 | }
123 | d, ok := encodings[strings.ToLower(name)]
124 | if ok {
125 | return d, ok
126 | }
127 |
128 | codec, err := htmlindex.Get(name)
129 | if err != nil {
130 | return nil, false
131 | }
132 | return enc(codec), true
133 | }
134 |
135 | func enc(e Encoding) EncodingFactory {
136 | return func(io.Reader) (Encoding, error) {
137 | return e, nil
138 | }
139 | }
140 |
--------------------------------------------------------------------------------
/lib/reader/readfile/encoding/mixed.go:
--------------------------------------------------------------------------------
1 | // Licensed to Elasticsearch B.V. under one or more contributor
2 | // license agreements. See the NOTICE file distributed with
3 | // this work for additional information regarding copyright
4 | // ownership. Elasticsearch B.V. licenses this file to you under
5 | // the Apache License, Version 2.0 (the "License"); you may
6 | // not use this file except in compliance with the License.
7 | // You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | package encoding
19 |
20 | import "golang.org/x/text/encoding"
21 |
22 | // mixed encoder is a copy of encoding.Replacement
23 | // The difference between the two is that for the Decoder the Encoder is used
24 | // The reasons is that during decoding UTF-8 we want to have the behaviour of the encoding,
25 | // means copying all and replacing invalid UTF-8 chars.
26 | type mixed struct{}
27 |
28 | func (mixed) NewDecoder() *encoding.Decoder {
29 | return &encoding.Decoder{Transformer: encoding.Replacement.NewEncoder().Transformer}
30 | }
31 |
32 | func (mixed) NewEncoder() *encoding.Encoder {
33 | return &encoding.Encoder{Transformer: encoding.Replacement.NewEncoder().Transformer}
34 | }
35 |
--------------------------------------------------------------------------------
/lib/reader/readfile/encoding/utf16.go:
--------------------------------------------------------------------------------
1 | // Licensed to Elasticsearch B.V. under one or more contributor
2 | // license agreements. See the NOTICE file distributed with
3 | // this work for additional information regarding copyright
4 | // ownership. Elasticsearch B.V. licenses this file to you under
5 | // the Apache License, Version 2.0 (the "License"); you may
6 | // not use this file except in compliance with the License.
7 | // You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | package encoding
19 |
20 | import (
21 | "errors"
22 | "io"
23 | "os"
24 |
25 | "golang.org/x/text/encoding/unicode"
26 | "golang.org/x/text/transform"
27 | )
28 |
29 | type endianness int8
30 |
31 | const (
32 | unknownEndianness endianness = iota
33 | bigEndian
34 | littleEndian
35 | )
36 |
37 | var ErrUnsupportedSourceTypeBOM = errors.New("source type not support by BOM based encoding")
38 |
39 | // utf16 BOM based encodings. Only seekable data sources are supported for
40 | // the need to check the optional Byte Order Marker being available in data source
41 | // before configuring the actual decoder and encoder.
42 | var (
43 | // BOM is required, as no fallback is specified
44 | utf16BOMRequired = utf16BOM(unknownEndianness)
45 |
46 | // BOM is optional. Falls back to BigEndian if missing
47 | utf16BOMBigEndian = utf16BOM(bigEndian)
48 |
49 | // BOM is optional. Falls back to LittleEndian if missing
50 | utf16BOMLittleEndian = utf16BOM(littleEndian)
51 | )
52 |
53 | var utf16Map = map[endianness]Encoding{
54 | bigEndian: unicode.UTF16(unicode.BigEndian, unicode.IgnoreBOM),
55 | littleEndian: unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM),
56 | }
57 |
58 | func utf16BOM(e endianness) EncodingFactory {
59 | return func(in_ io.Reader) (Encoding, error) {
60 | in, ok := in_.(io.ReadSeeker)
61 | if !ok {
62 | return nil, ErrUnsupportedSourceTypeBOM
63 | }
64 |
65 | return utf16Seekable(in, e)
66 | }
67 | }
68 |
69 | func utf16Seekable(in io.ReadSeeker, endianness endianness) (Encoding, error) {
70 | // remember file offset in case we have to back off
71 | offset, err := in.Seek(0, os.SEEK_CUR)
72 | if err != nil {
73 | return nil, err
74 | }
75 |
76 | // goto beginning of file
77 | keepOffset := offset == 0
78 | if _, err = in.Seek(0, os.SEEK_SET); err != nil {
79 | return nil, err
80 | }
81 |
82 | // read Byte Order Marker (BOM)
83 | var buf [2]byte
84 | n, err := in.Read(buf[:])
85 | if err != nil {
86 | in.Seek(offset, os.SEEK_SET)
87 | return nil, err
88 | }
89 | if n < 2 {
90 | in.Seek(offset, os.SEEK_SET)
91 | return nil, transform.ErrShortSrc
92 | }
93 |
94 | // determine endianness from BOM
95 | inEndianness := unknownEndianness
96 | switch {
97 | case buf[0] == 0xfe && buf[1] == 0xff:
98 | inEndianness = bigEndian
99 | case buf[0] == 0xff && buf[1] == 0xfe:
100 | inEndianness = littleEndian
101 | }
102 |
103 | // restore offset if BOM is missing or this function was not
104 | // called with read pointer at beginning of file
105 | if !keepOffset || inEndianness == unknownEndianness {
106 | if _, err = in.Seek(offset, os.SEEK_SET); err != nil {
107 | return nil, err
108 | }
109 | }
110 |
111 | // choose encoding based on BOM
112 | if encoding, ok := utf16Map[inEndianness]; ok {
113 | return encoding, nil
114 | }
115 |
116 | // fall back to configured endianness
117 | if encoding, ok := utf16Map[endianness]; ok {
118 | return encoding, nil
119 | }
120 |
121 | // no encoding for configured endianness found => fail
122 | return nil, unicode.ErrMissingBOM
123 | }
124 |
--------------------------------------------------------------------------------
/lib/reader/readfile/limit.go:
--------------------------------------------------------------------------------
1 | // Licensed to Elasticsearch B.V. under one or more contributor
2 | // license agreements. See the NOTICE file distributed with
3 | // this work for additional information regarding copyright
4 | // ownership. Elasticsearch B.V. licenses this file to you under
5 | // the Apache License, Version 2.0 (the "License"); you may
6 | // not use this file except in compliance with the License.
7 | // You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | package readfile
19 |
20 | import (
21 | "fmt"
22 | "infini.sh/agent/lib/reader"
23 | )
24 |
25 | // Reader sets an upper limited on line length. Lines longer
26 | // then the max configured line length will be snapped short.
27 | type LimitReader struct {
28 | reader reader.Reader
29 | maxBytes int
30 | }
31 |
32 | // New creates a new reader limiting the line length.
33 | func NewLimitReader(r reader.Reader, maxBytes int) *LimitReader {
34 | return &LimitReader{reader: r, maxBytes: maxBytes}
35 | }
36 |
37 | // Next returns the next line.
38 | func (r *LimitReader) Next() (reader.Message, error) {
39 | message, err := r.reader.Next()
40 | if len(message.Content) > r.maxBytes {
41 | tmp := make([]byte, r.maxBytes)
42 | n := copy(tmp, message.Content)
43 | if n != r.maxBytes {
44 | return message, fmt.Errorf("unexpected number of bytes were copied, %d instead of limit %d", n, r.maxBytes)
45 | }
46 | message.Content = tmp
47 | message.AddFlagsWithKey("log.flags", "truncated")
48 | }
49 | return message, err
50 | }
51 |
52 | func (r *LimitReader) Close() error {
53 | return r.reader.Close()
54 | }
55 |
--------------------------------------------------------------------------------
/lib/reader/readfile/line_terminator.go:
--------------------------------------------------------------------------------
1 | // Licensed to Elasticsearch B.V. under one or more contributor
2 | // license agreements. See the NOTICE file distributed with
3 | // this work for additional information regarding copyright
4 | // ownership. Elasticsearch B.V. licenses this file to you under
5 | // the Apache License, Version 2.0 (the "License"); you may
6 | // not use this file except in compliance with the License.
7 | // You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | package readfile
19 |
20 | import "fmt"
21 |
22 | // LineTerminator is the option storing the line terminator characters
23 | // Supported newline reference: https://en.wikipedia.org/wiki/Newline#Unicode
24 | type LineTerminator uint8
25 |
26 | const (
27 | // InvalidTerminator is the invalid terminator
28 | InvalidTerminator LineTerminator = iota
29 | // AutoLineTerminator accepts both LF and CR+LF
30 | AutoLineTerminator
31 | // LineFeed is the unicode char LF
32 | LineFeed
33 | // VerticalTab is the unicode char VT
34 | VerticalTab
35 | // FormFeed is the unicode char FF
36 | FormFeed
37 | // CarriageReturn is the unicode char CR
38 | CarriageReturn
39 | // CarriageReturnLineFeed is the unicode chars CR+LF
40 | CarriageReturnLineFeed
41 | // NextLine is the unicode char NEL
42 | NextLine
43 | // LineSeparator is the unicode char LS
44 | LineSeparator
45 | // ParagraphSeparator is the unicode char PS
46 | ParagraphSeparator
47 | // NullTerminator
48 | NullTerminator
49 | )
50 |
51 | var (
52 | lineTerminators = map[string]LineTerminator{
53 | "auto": AutoLineTerminator,
54 | "line_feed": LineFeed,
55 | "vertical_tab": VerticalTab,
56 | "form_feed": FormFeed,
57 | "carriage_return": CarriageReturn,
58 | "carriage_return_line_feed": CarriageReturnLineFeed,
59 | "next_line": NextLine,
60 | "line_separator": LineSeparator,
61 | "paragraph_separator": ParagraphSeparator,
62 | "null_terminator": NullTerminator,
63 | }
64 |
65 | lineTerminatorCharacters = map[LineTerminator][]byte{
66 | AutoLineTerminator: []byte{'\u000A'},
67 | LineFeed: []byte{'\u000A'},
68 | VerticalTab: []byte{'\u000B'},
69 | FormFeed: []byte{'\u000C'},
70 | CarriageReturn: []byte{'\u000D'},
71 | CarriageReturnLineFeed: []byte("\u000D\u000A"),
72 | NextLine: []byte{'\u0085'},
73 | LineSeparator: []byte("\u2028"),
74 | ParagraphSeparator: []byte("\u2029"),
75 | NullTerminator: []byte{'\u0000'},
76 | }
77 | )
78 |
79 | // Unpack unpacks the configuration from the config file
80 | func (l *LineTerminator) Unpack(option string) error {
81 | terminator, ok := lineTerminators[option]
82 | if !ok {
83 | return fmt.Errorf("invalid line terminator: %s", option)
84 | }
85 |
86 | *l = terminator
87 |
88 | return nil
89 | }
90 |
--------------------------------------------------------------------------------
/lib/reader/readfile/metafields.go:
--------------------------------------------------------------------------------
1 | // Licensed to Elasticsearch B.V. under one or more contributor
2 | // license agreements. See the NOTICE file distributed with
3 | // this work for additional information regarding copyright
4 | // ownership. Elasticsearch B.V. licenses this file to you under
5 | // the Apache License, Version 2.0 (the "License"); you may
6 | // not use this file except in compliance with the License.
7 | // You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | package readfile
19 |
20 | import (
21 | "infini.sh/agent/lib/reader"
22 | "infini.sh/framework/core/util"
23 | )
24 |
25 | // Reader produces lines by reading lines from an io.Reader
26 | // through a decoder converting the reader it's encoding to utf-8.
27 | type FileMetaReader struct {
28 | reader reader.Reader
29 | path string
30 | offset int64
31 | }
32 |
33 | // New creates a new Encode reader from input reader by applying
34 | // the given codec.
35 | func NewFilemeta(r reader.Reader, path string, offset int64) reader.Reader {
36 | return &FileMetaReader{r, path, offset}
37 | }
38 |
39 | // Next reads the next line from it's initial io.Reader
40 | // This converts a io.Reader to a reader.reader
41 | func (r *FileMetaReader) Next() (reader.Message, error) {
42 | message, err := r.reader.Next()
43 |
44 | // if the message is empty, there is no need to enrich it with file metadata
45 | if message.IsEmpty() {
46 | r.offset += int64(message.Bytes)
47 | return message, err
48 | }
49 |
50 | message.Fields.DeepUpdate(util.MapStr{
51 | "log": util.MapStr{
52 | "offset": r.offset,
53 | "file": util.MapStr{
54 | "path": r.path,
55 | },
56 | },
57 | })
58 |
59 | r.offset += int64(message.Bytes)
60 |
61 | return message, err
62 | }
63 |
64 | func (r *FileMetaReader) Close() error {
65 | return r.reader.Close()
66 | }
67 |
--------------------------------------------------------------------------------
/lib/reader/readfile/strip_newline.go:
--------------------------------------------------------------------------------
1 | // Licensed to Elasticsearch B.V. under one or more contributor
2 | // license agreements. See the NOTICE file distributed with
3 | // this work for additional information regarding copyright
4 | // ownership. Elasticsearch B.V. licenses this file to you under
5 | // the Apache License, Version 2.0 (the "License"); you may
6 | // not use this file except in compliance with the License.
7 | // You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | package readfile
19 |
20 | import (
21 | "bytes"
22 | "infini.sh/agent/lib/reader"
23 | )
24 |
25 | // StripNewline reader removes the last trailing newline characters from
26 | // read lines.
27 | type StripNewline struct {
28 | reader reader.Reader
29 | nl []byte
30 | lineEndingFunc func(*StripNewline, []byte) int
31 | }
32 |
33 | // New creates a new line reader stripping the last tailing newline.
34 | func NewStripNewline(r reader.Reader, terminator LineTerminator) *StripNewline {
35 | lineEndingFunc := (*StripNewline).lineEndingChars
36 | if terminator == AutoLineTerminator {
37 | lineEndingFunc = (*StripNewline).autoLineEndingChars
38 | }
39 |
40 | return &StripNewline{
41 | reader: r,
42 | nl: lineTerminatorCharacters[terminator],
43 | lineEndingFunc: lineEndingFunc,
44 | }
45 | }
46 |
47 | // Next returns the next line.
48 | func (p *StripNewline) Next() (reader.Message, error) {
49 | message, err := p.reader.Next()
50 | if err != nil {
51 | return message, err
52 | }
53 |
54 | L := message.Content
55 | message.Content = L[:len(L)-p.lineEndingFunc(p, L)]
56 |
57 | return message, err
58 | }
59 |
60 | // isLine checks if the given byte array is a line, means has a line ending \n
61 | func (p *StripNewline) isLine(l []byte) bool {
62 | return bytes.HasSuffix(l, p.nl)
63 | }
64 |
65 | func (p *StripNewline) lineEndingChars(l []byte) int {
66 | if !p.isLine(l) {
67 | return 0
68 | }
69 |
70 | return len(p.nl)
71 | }
72 |
73 | func (p *StripNewline) autoLineEndingChars(l []byte) int {
74 | if !p.isLine(l) {
75 | return 0
76 | }
77 |
78 | if len(l) > 1 && l[len(l)-2] == '\r' {
79 | return 2
80 | }
81 | return 1
82 | }
83 |
84 | func (p *StripNewline) Close() error {
85 | return p.reader.Close()
86 | }
87 |
--------------------------------------------------------------------------------
/lib/reader/readfile/timeout.go:
--------------------------------------------------------------------------------
1 | // Licensed to Elasticsearch B.V. under one or more contributor
2 | // license agreements. See the NOTICE file distributed with
3 | // this work for additional information regarding copyright
4 | // ownership. Elasticsearch B.V. licenses this file to you under
5 | // the Apache License, Version 2.0 (the "License"); you may
6 | // not use this file except in compliance with the License.
7 | // You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | package readfile
19 |
20 | import (
21 | "errors"
22 | "infini.sh/agent/lib/reader"
23 | "io"
24 | "time"
25 | )
26 |
27 | var (
28 | errTimeout = errors.New("timeout")
29 | )
30 |
31 | // TimeoutReader will signal some configurable timeout error if no
32 | // new line can be returned in time.
33 | type TimeoutReader struct {
34 | reader reader.Reader
35 | timeout time.Duration
36 | signal error
37 | running bool
38 | ch chan lineMessage
39 | done chan struct{}
40 | }
41 |
42 | type lineMessage struct {
43 | line reader.Message
44 | err error
45 | }
46 |
47 | // NewTimeoutReader returns a new timeout reader from an input line reader.
48 | func NewTimeoutReader(reader reader.Reader, signal error, t time.Duration) *TimeoutReader {
49 | if signal == nil {
50 | signal = errTimeout
51 | }
52 |
53 | return &TimeoutReader{
54 | reader: reader,
55 | signal: signal,
56 | timeout: t,
57 | ch: make(chan lineMessage, 1),
58 | done: make(chan struct{}),
59 | }
60 | }
61 |
62 | // Next returns the next line. If no line was returned before timeout, the
63 | // configured timeout error is returned.
64 | // For handline timeouts a goroutine is started for reading lines from
65 | // configured line reader. Only when underlying reader returns an error, the
66 | // goroutine will be finished.
67 | func (r *TimeoutReader) Next() (reader.Message, error) {
68 | if !r.running {
69 | r.running = true
70 | go func() {
71 | for {
72 | message, err := r.reader.Next()
73 | select {
74 | case <-r.done:
75 | return
76 | case r.ch <- lineMessage{message, err}:
77 | if err != nil {
78 | return
79 | }
80 | }
81 | }
82 | }()
83 | }
84 | timer := time.NewTimer(r.timeout)
85 | select {
86 | case msg := <-r.ch:
87 | if msg.err != nil {
88 | r.running = false
89 | }
90 | timer.Stop()
91 | return msg.line, msg.err
92 | case <-timer.C:
93 | return reader.Message{}, r.signal
94 | case <-r.done:
95 | return reader.Message{}, io.EOF
96 | }
97 | }
98 |
99 | func (r *TimeoutReader) Close() error {
100 | close(r.done)
101 |
102 | return r.reader.Close()
103 | }
104 |
--------------------------------------------------------------------------------
/lib/reader/readjson/docker_json_config.go:
--------------------------------------------------------------------------------
1 | // Licensed to Elasticsearch B.V. under one or more contributor
2 | // license agreements. See the NOTICE file distributed with
3 | // this work for additional information regarding copyright
4 | // ownership. Elasticsearch B.V. licenses this file to you under
5 | // the Apache License, Version 2.0 (the "License"); you may
6 | // not use this file except in compliance with the License.
7 | // You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | package readjson
19 |
20 | import "fmt"
21 |
22 | type ContainerFormat uint8
23 |
24 | type Stream uint8
25 |
26 | const (
27 | Auto ContainerFormat = iota + 1
28 | CRI
29 | Docker
30 | JSONFile
31 |
32 | All Stream = iota + 1
33 | Stdout
34 | Stderr
35 | )
36 |
37 | var (
38 | containerFormats = map[string]ContainerFormat{
39 | "auto": Auto,
40 | "cri": CRI,
41 | "docker": Docker,
42 | "json-file": JSONFile,
43 | }
44 |
45 | containerStreams = map[string]Stream{
46 | "all": All,
47 | "stdout": Stdout,
48 | "stderr": Stderr,
49 | }
50 | )
51 |
52 | type ContainerJSONConfig struct {
53 | Stream Stream `config:"stream"`
54 | Format ContainerFormat `config:"format"`
55 | }
56 |
57 | func DefaultContainerConfig() ContainerJSONConfig {
58 | return ContainerJSONConfig{
59 | Format: Auto,
60 | Stream: All,
61 | }
62 | }
63 |
64 | func (f *ContainerFormat) Unpack(v string) error {
65 | val, ok := containerFormats[v]
66 | if !ok {
67 | keys := make([]string, len(containerFormats))
68 | i := 0
69 | for k := range containerFormats {
70 | keys[i] = k
71 | i++
72 | }
73 | return fmt.Errorf("unknown container log format: %s, supported values: %+v", v, keys)
74 | }
75 | *f = val
76 | return nil
77 | }
78 |
79 | func (s *Stream) Unpack(v string) error {
80 | val, ok := containerStreams[v]
81 | if !ok {
82 | keys := make([]string, len(containerStreams))
83 | i := 0
84 | for k := range containerStreams {
85 | keys[i] = k
86 | i++
87 | }
88 | return fmt.Errorf("unknown streams: %s, supported values: %+v", v, keys)
89 | }
90 | *s = val
91 | return nil
92 | }
93 |
94 | func (s *Stream) String() string {
95 | for k, v := range containerStreams {
96 | if v == *s {
97 | return k
98 | }
99 | }
100 | return ""
101 | }
102 |
--------------------------------------------------------------------------------
/lib/reader/readjson/json_config.go:
--------------------------------------------------------------------------------
1 | // Licensed to Elasticsearch B.V. under one or more contributor
2 | // license agreements. See the NOTICE file distributed with
3 | // this work for additional information regarding copyright
4 | // ownership. Elasticsearch B.V. licenses this file to you under
5 | // the Apache License, Version 2.0 (the "License"); you may
6 | // not use this file except in compliance with the License.
7 | // You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | package readjson
19 |
20 | // Config holds the options a JSON reader.
21 | type Config struct {
22 | MessageKey string `config:"message_key"`
23 | DocumentID string `config:"document_id"`
24 | KeysUnderRoot bool `config:"keys_under_root"`
25 | OverwriteKeys bool `config:"overwrite_keys"`
26 | AddErrorKey bool `config:"add_error_key"`
27 | IgnoreDecodingError bool `config:"ignore_decoding_error"`
28 | ExpandKeys bool `config:"expand_keys"`
29 | }
30 |
31 | type ParserConfig struct {
32 | Config `config:",inline"`
33 | Field string `config:"field"`
34 | Target string `config:"target"`
35 | }
36 |
37 | // Validate validates the Config option for JSON reader.
38 | func (c *Config) Validate() error {
39 | return nil
40 | }
41 |
--------------------------------------------------------------------------------
/lib/util/elastic.go:
--------------------------------------------------------------------------------
1 | /* Copyright © INFINI Ltd. All rights reserved.
2 | * Web: https://infinilabs.com
3 | * Email: hello#infini.ltd */
4 |
5 | package util
6 |
7 | import (
8 | "context"
9 | "fmt"
10 | "infini.sh/framework/core/elastic"
11 | "infini.sh/framework/core/model"
12 | "infini.sh/framework/core/util"
13 | "time"
14 | )
15 |
16 | func GetLocalNodeInfo(endpoint string, auth *model.BasicAuth) (string, *elastic.NodesInfo, error) {
17 | url := fmt.Sprintf("%s/_nodes/_local", endpoint)
18 | req := util.Request{
19 | Method: util.Verb_GET,
20 | Url: url,
21 | }
22 | if auth != nil {
23 | req.SetBasicAuth(auth.Username, auth.Password.Get())
24 | }
25 | ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
26 | defer cancel()
27 | req.Context = ctx
28 | resp, err := util.ExecuteRequest(&req)
29 |
30 | if err != nil {
31 | return "", nil, err
32 | }
33 | if resp.StatusCode != 200 {
34 | return "", nil, fmt.Errorf("%s", string(resp.Body))
35 | }
36 |
37 | node := elastic.NodesResponse{}
38 | err = util.FromJSONBytes(resp.Body, &node)
39 | if err != nil {
40 | return "", nil, err
41 | }
42 | for k, n := range node.Nodes {
43 | return k, &n, nil
44 | }
45 | return "", nil, fmt.Errorf("%s", "node not found")
46 | }
47 |
48 | func GetClusterVersion(endpoint string, auth *model.BasicAuth) (*elastic.ClusterInformation, error) {
49 | req := util.Request{
50 | Method: util.Verb_GET,
51 | Url: endpoint,
52 | }
53 | if auth != nil {
54 | req.SetBasicAuth(auth.Username, auth.Password.Get())
55 | }
56 | ctx, cancel := context.WithTimeout(context.Background(), time.Second)
57 | defer cancel()
58 | req.Context = ctx
59 | resp, err := util.ExecuteRequest(&req)
60 |
61 | if err != nil {
62 | return nil, err
63 | }
64 | if resp.StatusCode != 200 {
65 | return nil, fmt.Errorf("%s", string(resp.Body))
66 | }
67 |
68 | version := elastic.ClusterInformation{}
69 | err = util.FromJSONBytes(resp.Body, &version)
70 | if err != nil {
71 | return nil, err
72 | }
73 | return &version, nil
74 | }
75 |
--------------------------------------------------------------------------------
/lib/util/file.go:
--------------------------------------------------------------------------------
1 | /* Copyright © INFINI Ltd. All rights reserved.
2 | * Web: https://infinilabs.com
3 | * Email: hello#infini.ltd */
4 |
5 | package util
6 |
7 | import (
8 | "bufio"
9 | "compress/gzip"
10 | "fmt"
11 | "io"
12 | "os"
13 | "path/filepath"
14 | )
15 |
16 | func CountFileRows(filePath string) (int64, error) {
17 | file, err := os.Open(filePath)
18 | if err != nil {
19 | return 0, err
20 | }
21 | defer file.Close()
22 |
23 | var count int64 = 0
24 | scanner := bufio.NewScanner(file)
25 | for scanner.Scan() {
26 | count++
27 | }
28 | return count, nil
29 | }
30 |
31 | // ResolveSymlink Parse the target file path of the soft link
32 | func ResolveSymlink(link string) (string, error) {
33 | realPath, err := filepath.EvalSymlinks(link)
34 | if err != nil {
35 | return "", err
36 | }
37 | absPath, err := filepath.Abs(realPath)
38 | if err != nil {
39 | return "", err
40 | }
41 | return absPath, nil
42 | }
43 |
44 | // UnpackGzipFile extracts a .gz file and writes the output to a regular file.
45 | func UnpackGzipFile(gzFile, outputFile string) error {
46 | f, err := os.Open(gzFile)
47 | if err != nil {
48 | return fmt.Errorf("failed to open .gz file %s: %w", gzFile, err)
49 | }
50 | defer f.Close()
51 |
52 | // Create a gzip reader
53 | gzReader, err := gzip.NewReader(f)
54 | if err != nil {
55 | return fmt.Errorf("failed to create gzip reader with file %s: %w", gzFile, err)
56 | }
57 | defer gzReader.Close()
58 |
59 | // Create the output file
60 | outFile, err := os.Create(outputFile)
61 | if err != nil {
62 | return fmt.Errorf("failed to create output file %s: %w", outputFile, err)
63 | }
64 | defer outFile.Close()
65 |
66 | // Copy decompressed content to output file
67 | _, err = io.Copy(outFile, gzReader)
68 | if err != nil {
69 | return fmt.Errorf("failed to write to output file %s: %w", outputFile, err)
70 | }
71 | return nil
72 | }
73 |
--------------------------------------------------------------------------------
/lib/util/network.go:
--------------------------------------------------------------------------------
1 | /* Copyright © INFINI LTD. All rights reserved.
2 | * Web: https://infinilabs.com
3 | * Email: hello#infini.ltd */
4 |
5 | package util
6 |
7 | import (
8 | "net"
9 | "strings"
10 | )
11 |
12 | func GetClientIp(filter string) (string, error) {
13 | ret, err := net.InterfaceByName(filter)
14 | if err != nil {
15 | return "", err
16 | }
17 | address, err := ret.Addrs()
18 | if err != nil {
19 | return "", err
20 | }
21 | var ipStr string
22 | for _, addr := range address {
23 | var (
24 | ip net.IP
25 | )
26 | switch v := addr.(type) {
27 | case *net.IPNet:
28 | ip = v.IP
29 | case *net.IPAddr:
30 | ip = v.IP
31 | }
32 | ipStr = ip.String()
33 | if strings.Contains(ipStr, "::") {
34 | ipStr = strings.Split(ipStr, "::")[1]
35 | }
36 | }
37 | return ipStr, nil
38 | }
39 |
--------------------------------------------------------------------------------
/main.go:
--------------------------------------------------------------------------------
1 | /* ©INFINI, All Rights Reserved.
2 | * mail: contact#infini.ltd */
3 |
4 | package main
5 |
6 | import (
7 | "context"
8 | _ "expvar"
9 | log "github.com/cihub/seelog"
10 | "infini.sh/agent/config"
11 | _ "infini.sh/agent/plugin"
12 | api3 "infini.sh/agent/plugin/api"
13 | "infini.sh/framework"
14 | "infini.sh/framework/core/global"
15 | "infini.sh/framework/core/module"
16 | task2 "infini.sh/framework/core/task"
17 | "infini.sh/framework/core/util"
18 | "infini.sh/framework/modules/api"
19 | "infini.sh/framework/modules/elastic"
20 | "infini.sh/framework/modules/keystore"
21 | "infini.sh/framework/modules/metrics"
22 | "infini.sh/framework/modules/pipeline"
23 | queue2 "infini.sh/framework/modules/queue/disk_queue"
24 | stats2 "infini.sh/framework/modules/stats"
25 | "infini.sh/framework/modules/task"
26 | _ "infini.sh/framework/plugins/elastic/bulk_indexing"
27 | _ "infini.sh/framework/plugins/elastic/indexing_merge"
28 | _ "infini.sh/framework/plugins/http"
29 | _ "infini.sh/framework/plugins/queue/consumer"
30 | "infini.sh/framework/plugins/simple_kv"
31 | "os"
32 | "runtime"
33 | )
34 |
35 | func main() {
36 |
37 | terminalHeader := (" _ ___ __ __ _____ \n")
38 | terminalHeader += (" /_\\ / _ \\ /__\\/\\ \\ \\/__ \\\n")
39 | terminalHeader += (" //_\\\\ / /_\\//_\\ / \\/ / / /\\/\n")
40 | terminalHeader += ("/ _ \\/ /_\\\\//__/ /\\ / / / \n")
41 | terminalHeader += ("\\_/ \\_/\\____/\\__/\\_\\ \\/ \\/ \n\n")
42 |
43 | terminalFooter := ""
44 |
45 | app := framework.NewApp("agent", "A light-weight but powerful cloud agent.",
46 | util.TrimSpaces(config.Version), util.TrimSpaces(config.BuildNumber), util.TrimSpaces(config.LastCommitLog), util.TrimSpaces(config.BuildDate), util.TrimSpaces(config.EOLDate), terminalHeader, terminalFooter)
47 |
48 | app.Init(nil)
49 |
50 | defer app.Shutdown()
51 |
52 | if app.Setup(func() {
53 |
54 | //load core modules first
55 | module.RegisterSystemModule(&elastic.ElasticModule{})
56 | module.RegisterSystemModule(&stats2.SimpleStatsModule{})
57 | module.RegisterSystemModule(&simple_kv.SimpleKV{})
58 | module.RegisterSystemModule(&queue2.DiskQueue{})
59 |
60 | module.RegisterSystemModule(&api.APIModule{})
61 | module.RegisterSystemModule(&pipeline.PipeModule{})
62 | module.RegisterSystemModule(&task.TaskModule{})
63 |
64 | module.RegisterUserPlugin(&metrics.MetricsModule{})
65 | module.RegisterUserPlugin(&keystore.KeystoreModule{})
66 |
67 | api3.InitAPI()
68 | }, func() {
69 | defer func() {
70 | if r := recover(); r != nil {
71 | var v string
72 | switch r.(type) {
73 | case error:
74 | v = r.(error).Error()
75 | case runtime.Error:
76 | v = r.(runtime.Error).Error()
77 | case string:
78 | v = r.(string)
79 | }
80 | log.Errorf("error on start module [%v]", v)
81 | log.Flush()
82 | os.Exit(1)
83 | }
84 | }()
85 | //start each module, with enabled provider
86 | module.Start()
87 | if global.Env().SystemConfig.Configs.AllowGeneratedMetricsTasks {
88 | taskID := util.GetUUID()
89 | task2.RegisterScheduleTask(task2.ScheduleTask{
90 | ID: taskID,
91 | Description: "generated metrics tasks for agent",
92 | Type: "interval",
93 | Interval: "20s",
94 | Task: func(ctx context.Context) {
95 | err := generatedMetricsTasksConfig()
96 | if err != nil {
97 | log.Error("error generating metrics tasks config: ", err)
98 | return
99 | }
100 | //clean up task after success
101 | task2.DeleteTask(taskID)
102 | },
103 | })
104 | }
105 | //if agent is mark as deleted, cleanup local configs
106 |
107 | }, nil) {
108 | app.Run()
109 | }
110 |
111 | }
112 |
--------------------------------------------------------------------------------
/plugin/api/discover.go:
--------------------------------------------------------------------------------
1 | /* Copyright © INFINI Ltd. All rights reserved.
2 | * Web: https://infinilabs.com
3 | * Email: hello#infini.ltd */
4 |
5 | package api
6 |
7 | import (
8 | "fmt"
9 | "net/http"
10 |
11 | log "github.com/cihub/seelog"
12 | "infini.sh/agent/lib/process"
13 | httprouter "infini.sh/framework/core/api/router"
14 | "infini.sh/framework/core/config"
15 | "infini.sh/framework/core/elastic"
16 | "infini.sh/framework/core/env"
17 | "infini.sh/framework/core/global"
18 | "infini.sh/framework/core/util"
19 | )
20 |
21 | // local exists nodes, find new nodes in runtime
22 | func (handler *AgentAPI) getESNodes(w http.ResponseWriter, req *http.Request, params httprouter.Params) {
23 | var configs []elastic.ElasticsearchConfig
24 | appCfg, err := getAppConfig()
25 | if err != nil {
26 | _, err = env.ParseConfig("elasticsearch", &configs)
27 | } else {
28 | _, err = env.ParseConfigSection(appCfg, "elasticsearch", &configs)
29 | }
30 | if err != nil {
31 | log.Errorf("try to load elasticsearch config error: %v", err)
32 | }
33 | for i := range configs {
34 | if configs[i].ID == "" {
35 | configs[i].ID = configs[i].Name
36 | }
37 | }
38 |
39 | //found local nodes
40 | result, err := process.DiscoverESNode(configs)
41 | if err != nil {
42 | log.Errorf("get local nodes error: %v", err)
43 | handler.WriteError(w, err.Error(), http.StatusInternalServerError)
44 | return
45 | }
46 |
47 | handler.WriteJSON(w, result, http.StatusOK)
48 | }
49 |
50 | func getAppConfig() (*config.Config, error) {
51 | configFile := global.Env().GetConfigFile()
52 | configDir := global.Env().GetConfigDir()
53 | parentCfg, err := config.LoadFile(configFile)
54 | if err != nil {
55 | return nil, fmt.Errorf("failed to load config file: %v, path: %s", err, configFile)
56 | }
57 | childCfg, err := config.LoadPath(configDir)
58 | if err != nil {
59 | return nil, fmt.Errorf("failed to load config dir: %v, path: %s", err, configDir)
60 | }
61 | err = parentCfg.Merge(childCfg)
62 | return parentCfg, nil
63 | }
64 |
65 | func (handler *AgentAPI) getESNodeInfo(w http.ResponseWriter, req *http.Request, params httprouter.Params) {
66 | esConfig := elastic.ElasticsearchConfig{}
67 | err := handler.DecodeJSON(req, &esConfig)
68 | if err != nil {
69 | handler.WriteError(w, err.Error(), http.StatusInternalServerError)
70 | return
71 | }
72 |
73 | if global.Env().IsDebug {
74 | log.Debug("esConfig: ", util.MustToJSON(esConfig))
75 | }
76 |
77 | localNodeInfo, err := process.DiscoverESNodeFromEndpoint(esConfig.GetAnyEndpoint(), esConfig.BasicAuth)
78 | if err != nil {
79 | handler.WriteError(w, err.Error(), http.StatusInternalServerError)
80 | return
81 | }
82 |
83 | handler.WriteJSON(w, localNodeInfo, http.StatusOK)
84 | }
85 |
--------------------------------------------------------------------------------
/plugin/api/init.go:
--------------------------------------------------------------------------------
1 | /* Copyright © INFINI Ltd. All rights reserved.
2 | * Web: https://infinilabs.com
3 | * Email: hello#infini.ltd */
4 |
5 | package api
6 |
7 | import "infini.sh/framework/core/api"
8 |
9 | type AgentAPI struct {
10 | api.Handler
11 | }
12 |
13 | func InitAPI() {
14 | agentAPI := AgentAPI{}
15 |
16 | //discovery local nodes
17 | api.HandleAPIMethod(api.GET, "/elasticsearch/node/_discovery", agentAPI.getESNodes)
18 | api.HandleAPIMethod(api.POST, "/elasticsearch/node/_info", agentAPI.getESNodeInfo) //get node info by connect to this node
19 | api.HandleAPIMethod(api.POST, "/elasticsearch/logs/_list", agentAPI.getElasticLogFiles)
20 | api.HandleAPIMethod(api.POST, "/elasticsearch/logs/_read", agentAPI.readElasticLogFile)
21 | }
22 |
--------------------------------------------------------------------------------
/plugin/api/log.go:
--------------------------------------------------------------------------------
1 | /* Copyright © INFINI Ltd. All rights reserved.
2 | * Web: https://infinilabs.com
3 | * Email: hello#infini.ltd */
4 |
5 | package api
6 |
7 | import (
8 | "fmt"
9 | log "github.com/cihub/seelog"
10 | "infini.sh/agent/lib/reader/linenumber"
11 | util2 "infini.sh/agent/lib/util"
12 | httprouter "infini.sh/framework/core/api/router"
13 | "infini.sh/framework/core/global"
14 | "infini.sh/framework/core/util"
15 | "io"
16 | "net/http"
17 | "os"
18 | "path"
19 | "path/filepath"
20 | "runtime"
21 | "strings"
22 | )
23 |
24 | func (handler *AgentAPI) getElasticLogFiles(w http.ResponseWriter, req *http.Request, params httprouter.Params) {
25 | reqBody := GetElasticLogFilesReq{}
26 | handler.DecodeJSON(req, &reqBody)
27 | if reqBody.LogsPath == "" {
28 | handler.WriteError(w, "miss param logs_path", http.StatusInternalServerError)
29 | return
30 | }
31 |
32 | fileInfos, err := os.ReadDir(reqBody.LogsPath)
33 | if err != nil {
34 | log.Error(err)
35 | handler.WriteJSON(w, err.Error(), http.StatusInternalServerError)
36 | return
37 | }
38 | var files []util.MapStr
39 | for _, info := range fileInfos {
40 | if info.IsDir() {
41 | continue
42 | }
43 | fInfo, err := info.Info()
44 | if err != nil {
45 | log.Error(err)
46 | continue
47 | }
48 | filePath := path.Join(reqBody.LogsPath, info.Name())
49 | totalRows, err := util2.CountFileRows(filePath)
50 | if err != nil {
51 | log.Error(err)
52 | continue
53 | }
54 | files = append(files, util.MapStr{
55 | "name": fInfo.Name(),
56 | "size_in_bytes": fInfo.Size(),
57 | "modify_time": fInfo.ModTime(),
58 | "total_rows": totalRows,
59 | })
60 | }
61 |
62 | handler.WriteJSON(w, util.MapStr{
63 | "result": files,
64 | "success": true,
65 | }, http.StatusOK)
66 | }
67 |
68 | func (handler *AgentAPI) readElasticLogFile(w http.ResponseWriter, req *http.Request, params httprouter.Params) {
69 | reqBody := ReadElasticLogFileReq{}
70 | err := handler.DecodeJSON(req, &reqBody)
71 | if err != nil {
72 | log.Error(err)
73 | handler.WriteError(w, err.Error(), http.StatusInternalServerError)
74 | return
75 | }
76 |
77 | logFilePath := filepath.Join(reqBody.LogsPath, reqBody.FileName)
78 | if reqBody.StartLineNumber < 0 {
79 | reqBody.StartLineNumber = 0
80 | }
81 | if strings.HasSuffix(reqBody.FileName, ".gz") {
82 | // read gzip log file, and then unpack it to tmp file
83 | tmpFilePath := filepath.Join(os.TempDir(), "agent", strings.TrimSuffix(reqBody.FileName, ".gz"))
84 | if !util.FileExists(tmpFilePath) {
85 | fileDir := filepath.Dir(tmpFilePath)
86 | if !util.FileExists(fileDir) {
87 | err = os.MkdirAll(fileDir, os.ModePerm)
88 | if err != nil {
89 | log.Error(err)
90 | handler.WriteJSON(w, err.Error(), http.StatusInternalServerError)
91 | return
92 | }
93 | }
94 | err = util2.UnpackGzipFile(logFilePath, tmpFilePath)
95 | if err != nil {
96 | log.Error(err)
97 | handler.WriteJSON(w, err.Error(), http.StatusInternalServerError)
98 | return
99 | }
100 | }
101 | logFilePath = tmpFilePath
102 | }
103 | r, err := linenumber.NewLinePlainTextReader(logFilePath, reqBody.StartLineNumber, io.SeekStart)
104 | if err != nil {
105 | log.Error(err)
106 | handler.WriteJSON(w, err.Error(), http.StatusInternalServerError)
107 | return
108 | }
109 |
110 | defer func() {
111 | if !global.Env().IsDebug {
112 | if r := recover(); r != nil {
113 | var v string
114 | switch r.(type) {
115 | case error:
116 | v = r.(error).Error()
117 | case runtime.Error:
118 | v = r.(runtime.Error).Error()
119 | case string:
120 | v = r.(string)
121 | }
122 | log.Error("error on exit disk_queue,", v)
123 | }
124 | }
125 | if r != nil {
126 | r.Close()
127 | }
128 | }()
129 |
130 | var msgs []util.MapStr
131 | isEOF := false
132 | for i := 0; i < reqBody.Lines; i++ {
133 | msg, err := r.Next()
134 | if err != nil {
135 | if err == io.EOF {
136 | isEOF = true
137 | break
138 | } else {
139 | log.Error(err)
140 | handler.WriteError(w, fmt.Sprintf("read logs error: %v", err), http.StatusInternalServerError)
141 | return
142 | }
143 | }
144 | msgs = append(msgs, util.MapStr{
145 | "content": string(msg.Content),
146 | "bytes": msg.Bytes,
147 | "offset": msg.Offset,
148 | "line_number": coverLineNumbers(msg.LineNumbers),
149 | })
150 | }
151 | handler.WriteJSON(w, util.MapStr{
152 | "result": msgs,
153 | "success": true,
154 | "EOF": isEOF,
155 | }, http.StatusOK)
156 | }
157 |
158 | func coverLineNumbers(numbers []int64) interface{} {
159 | if len(numbers) == 1 {
160 | return numbers[0]
161 | } else {
162 | return numbers
163 | }
164 | }
165 |
--------------------------------------------------------------------------------
/plugin/api/model.go:
--------------------------------------------------------------------------------
1 | /* Copyright © INFINI Ltd. All rights reserved.
2 | * Web: https://infinilabs.com
3 | * Email: hello#infini.ltd */
4 |
5 | package api
6 |
7 | type GetElasticLogFilesReq struct {
8 | LogsPath string `json:"logs_path"`
9 | }
10 |
11 | type ReadElasticLogFileReq struct {
12 | LogsPath string `json:"logs_path"`
13 | FileName string `json:"file_name"`
14 | Offset int64 `json:"offset"`
15 | Lines int `json:"lines"`
16 | StartLineNumber int64 `json:"start_line_number"`
17 | }
18 |
--------------------------------------------------------------------------------
/plugin/elastic/esinfo_test.go:
--------------------------------------------------------------------------------
1 | package elastic
2 |
3 | import (
4 | "fmt"
5 | "os"
6 | "testing"
7 | )
8 |
9 | func TestESInfo(t *testing.T) {
10 | t.Skip()
11 | }
12 |
13 | func readFile(fname string) {
14 | file, err := os.Open(fname)
15 | if err != nil {
16 | panic(err)
17 | }
18 | defer file.Close()
19 |
20 | buf := make([]byte, 62)
21 | stat, err := os.Stat(fname)
22 | start := stat.Size() - 62
23 | _, err = file.ReadAt(buf, start)
24 | if err == nil {
25 | fmt.Printf("%s\n", buf)
26 | }
27 |
28 | }
29 |
--------------------------------------------------------------------------------
/plugin/elastic/logging/es_logs_test.go:
--------------------------------------------------------------------------------
1 | package logging
2 |
3 | import (
4 | "testing"
5 |
6 | "github.com/stretchr/testify/assert"
7 | )
8 |
9 | func TestFixLogPath(t *testing.T) {
10 | v := fixLogPath(`C:/logs/246`)
11 | assert.Equal(t, v, `C:\logs\246`)
12 | }
13 |
--------------------------------------------------------------------------------
/plugin/elastic/metric/cluster_health/cluster_health.go:
--------------------------------------------------------------------------------
1 | /* Copyright © INFINI Ltd. All rights reserved.
2 | * Web: https://infinilabs.com
3 | * Email: hello#infini.ltd */
4 |
5 | package cluster_health
6 |
7 | import (
8 | "fmt"
9 | log "github.com/cihub/seelog"
10 | "infini.sh/framework/core/config"
11 | "infini.sh/framework/core/elastic"
12 | "infini.sh/framework/core/event"
13 | "infini.sh/framework/core/pipeline"
14 | "infini.sh/framework/core/util"
15 | "infini.sh/framework/modules/elastic/adapter"
16 | )
17 |
18 | const processorName = "es_cluster_health"
19 |
20 | func init() {
21 | pipeline.RegisterProcessorPlugin(processorName, newProcessor)
22 | }
23 |
24 | func newProcessor(c *config.Config) (pipeline.Processor, error) {
25 | cfg := Config{}
26 | if err := c.Unpack(&cfg); err != nil {
27 | log.Error(err)
28 | return nil, fmt.Errorf("failed to unpack the configuration of %s processor: %s", processorName, err)
29 | }
30 | processor := ClusterHealth{
31 | config: &cfg,
32 | }
33 | _, err := adapter.GetClusterUUID(processor.config.Elasticsearch)
34 | if err != nil {
35 | log.Error(" get cluster uuid error: ", err)
36 | }
37 | return &processor, nil
38 | }
39 |
40 | type Config struct {
41 | Elasticsearch string `config:"elasticsearch,omitempty"`
42 | Labels map[string]interface{} `config:"labels,omitempty"`
43 | }
44 |
45 | type ClusterHealth struct {
46 | config *Config
47 | }
48 |
49 | func (p *ClusterHealth) Name() string {
50 | return processorName
51 | }
52 |
53 | func (p *ClusterHealth) Process(c *pipeline.Context) error {
54 | meta := elastic.GetMetadata(p.config.Elasticsearch)
55 | return p.Collect(p.config.Elasticsearch, meta)
56 | }
57 |
58 | func (p *ClusterHealth) Collect(k string, v *elastic.ElasticsearchMetadata) error {
59 |
60 | log.Trace("collecting custer health metrics for :", k)
61 |
62 | client := elastic.GetClientNoPanic(k)
63 | if client == nil {
64 | return nil
65 | }
66 | var health *elastic.ClusterHealth
67 | var err error
68 | health, err = client.ClusterHealthSpecEndpoint(nil, v.Config.GetAnyEndpoint(), "")
69 | if err != nil {
70 | log.Error(v.Config.Name, " get cluster health error: ", err)
71 | return err
72 | }
73 |
74 | labels := util.MapStr{
75 | "cluster_id": v.Config.ID,
76 | "cluster_uuid": v.Config.ClusterUUID,
77 | }
78 | if len(p.config.Labels) > 0 {
79 | for k, v := range p.config.Labels {
80 | labels[k] = v
81 | }
82 | }
83 | item := event.Event{
84 | Metadata: event.EventMetadata{
85 | Category: "elasticsearch",
86 | Name: "cluster_health",
87 | Datatype: "snapshot",
88 | Labels: labels,
89 | },
90 | }
91 | item.Fields = util.MapStr{
92 | "elasticsearch": util.MapStr{
93 | "cluster_health": health,
94 | },
95 | }
96 | return event.Save(&item)
97 | }
98 |
--------------------------------------------------------------------------------
/plugin/elastic/metric/cluster_stats/cluster_stats.go:
--------------------------------------------------------------------------------
1 | /* Copyright © INFINI Ltd. All rights reserved.
2 | * Web: https://infinilabs.com
3 | * Email: hello#infini.ltd */
4 |
5 | package cluster_stats
6 |
7 | import (
8 | "fmt"
9 | log "github.com/cihub/seelog"
10 | "infini.sh/framework/core/config"
11 | "infini.sh/framework/core/elastic"
12 | "infini.sh/framework/core/event"
13 | "infini.sh/framework/core/pipeline"
14 | "infini.sh/framework/core/util"
15 | "infini.sh/framework/modules/elastic/adapter"
16 | )
17 |
18 | const processorName = "es_cluster_stats"
19 |
20 | func init() {
21 | pipeline.RegisterProcessorPlugin(processorName, newProcessor)
22 | }
23 |
24 | func newProcessor(c *config.Config) (pipeline.Processor, error) {
25 | cfg := Config{}
26 | if err := c.Unpack(&cfg); err != nil {
27 | log.Error(err)
28 | return nil, fmt.Errorf("failed to unpack the configuration of %s processor: %s", processorName, err)
29 | }
30 | processor := ClusterStats{
31 | config: &cfg,
32 | }
33 | _, err := adapter.GetClusterUUID(processor.config.Elasticsearch)
34 | if err != nil {
35 | log.Error(" get cluster uuid error: ", err)
36 | }
37 | return &processor, nil
38 | }
39 |
40 | type Config struct {
41 | Elasticsearch string `config:"elasticsearch,omitempty"`
42 | Labels map[string]interface{} `config:"labels,omitempty"`
43 | }
44 |
45 | type ClusterStats struct {
46 | config *Config
47 | }
48 |
49 | func (p *ClusterStats) Name() string {
50 | return processorName
51 | }
52 |
53 | func (p *ClusterStats) Process(c *pipeline.Context) error {
54 | meta := elastic.GetMetadata(p.config.Elasticsearch)
55 | return p.Collect(p.config.Elasticsearch, meta)
56 | }
57 |
58 | func (p *ClusterStats) Collect(k string, v *elastic.ElasticsearchMetadata) error {
59 |
60 | log.Trace("collecting custer state metrics for :", k)
61 |
62 | client := elastic.GetClientNoPanic(k)
63 | if client == nil {
64 | return nil
65 | }
66 |
67 | var stats *elastic.ClusterStats
68 | var err error
69 | stats, err = client.GetClusterStatsSpecEndpoint(nil, "", v.Config.GetAnyEndpoint())
70 | if err != nil {
71 | log.Error(v.Config.Name, " get cluster stats error: ", err)
72 | return err
73 | }
74 | labels := util.MapStr{
75 | "cluster_id": v.Config.ID,
76 | "cluster_uuid": v.Config.ClusterUUID,
77 | }
78 | if len(p.config.Labels) > 0 {
79 | for k, v := range p.config.Labels {
80 | labels[k] = v
81 | }
82 | }
83 | item := event.Event{
84 | Metadata: event.EventMetadata{
85 | Category: "elasticsearch",
86 | Name: "cluster_stats",
87 | Datatype: "snapshot",
88 | Labels: labels,
89 | },
90 | }
91 |
92 | item.Fields = util.MapStr{
93 | "elasticsearch": util.MapStr{
94 | "cluster_stats": stats,
95 | },
96 | }
97 |
98 | return event.Save(&item)
99 | }
100 |
--------------------------------------------------------------------------------
/plugin/elastic/metric/index_stats/index_stats.go:
--------------------------------------------------------------------------------
1 | /* Copyright © INFINI Ltd. All rights reserved.
2 | * Web: https://infinilabs.com
3 | * Email: hello#infini.ltd */
4 |
5 | package index_stats
6 |
7 | import (
8 | "fmt"
9 | log "github.com/cihub/seelog"
10 | "infini.sh/framework/core/config"
11 | "infini.sh/framework/core/elastic"
12 | "infini.sh/framework/core/event"
13 | "infini.sh/framework/core/pipeline"
14 | "infini.sh/framework/core/util"
15 | "infini.sh/framework/modules/elastic/adapter"
16 | )
17 |
18 | const processorName = "es_index_stats"
19 |
20 | func init() {
21 | pipeline.RegisterProcessorPlugin(processorName, newProcessor)
22 | }
23 |
24 | func newProcessor(c *config.Config) (pipeline.Processor, error) {
25 | cfg := Config{
26 | AllIndexStats: true,
27 | IndexPrimaryStats: true,
28 | IndexTotalStats: true,
29 | }
30 | if err := c.Unpack(&cfg); err != nil {
31 | log.Error(err)
32 | return nil, fmt.Errorf("failed to unpack the configuration of %s processor: %s", processorName, err)
33 | }
34 | processor := IndexStats{
35 | config: &cfg,
36 | }
37 | _, err := adapter.GetClusterUUID(processor.config.Elasticsearch)
38 | if err != nil {
39 | log.Error(" get cluster uuid error: ", err)
40 | }
41 | return &processor, nil
42 | }
43 |
44 | type Config struct {
45 | Elasticsearch string `config:"elasticsearch,omitempty"`
46 | AllIndexStats bool `config:"all_index_stats,omitempty"`
47 | IndexPrimaryStats bool `config:"index_primary_stats"`
48 | IndexTotalStats bool `config:"index_total_stats"`
49 | Labels map[string]interface{} `config:"labels,omitempty"`
50 | }
51 |
52 | type IndexStats struct {
53 | config *Config
54 | }
55 |
56 | func (p *IndexStats) Name() string {
57 | return processorName
58 | }
59 |
60 | func (p *IndexStats) Process(c *pipeline.Context) error {
61 | meta := elastic.GetMetadata(p.config.Elasticsearch)
62 | return p.Collect(p.config.Elasticsearch, meta)
63 | }
64 |
65 | func (p *IndexStats) Collect(k string, v *elastic.ElasticsearchMetadata) error {
66 | var (
67 | shards []elastic.CatShardResponse
68 | err error
69 | )
70 | client := elastic.GetClientNoPanic(k)
71 | if client == nil {
72 | return nil
73 | }
74 | shards, err = client.CatShardsSpecEndpoint(v.Config.GetAnyEndpoint())
75 | if err != nil {
76 | return err
77 | }
78 | indexStats, err := client.GetStats()
79 | if err != nil {
80 | return err
81 | }
82 |
83 | if indexStats != nil {
84 | var indexInfos *map[string]elastic.IndexInfo
85 | shardInfos := map[string][]elastic.CatShardResponse{}
86 |
87 | if v.IsAvailable() {
88 | indexInfos, err = client.GetIndices("")
89 | if err != nil {
90 | log.Error(v.Config.Name, " get indices info error: ", err)
91 | }
92 |
93 | for _, item := range shards {
94 | if _, ok := shardInfos[item.Index]; !ok {
95 | shardInfos[item.Index] = []elastic.CatShardResponse{
96 | item,
97 | }
98 | } else {
99 | shardInfos[item.Index] = append(shardInfos[item.Index], item)
100 | }
101 | }
102 | }
103 |
104 | if p.config.AllIndexStats {
105 | p.SaveIndexStats(v.Config.ID, v.Config.ClusterUUID, "_all", "_all", indexStats.All.Primaries, indexStats.All.Total, nil, nil)
106 | }
107 |
108 | for x, y := range indexStats.Indices {
109 | var indexInfo elastic.IndexInfo
110 | var shardInfo []elastic.CatShardResponse
111 | if indexInfos != nil {
112 | indexInfo = (*indexInfos)[x]
113 | }
114 | if shardInfos != nil {
115 | shardInfo = shardInfos[x]
116 | }
117 | p.SaveIndexStats(v.Config.ID, v.Config.ClusterUUID, y.Uuid, x, y.Primaries, y.Total, &indexInfo, shardInfo)
118 | }
119 |
120 | }
121 | return nil
122 | }
123 |
124 | func (p *IndexStats) SaveIndexStats(clusterId, clusterUUID, indexID, indexName string, primary, total elastic.IndexLevelStats, info *elastic.IndexInfo, shardInfo []elastic.CatShardResponse) {
125 | newIndexID := fmt.Sprintf("%s:%s", clusterId, indexName)
126 | if indexID == "_all" {
127 | newIndexID = indexID
128 | }
129 | labels := util.MapStr{
130 | "cluster_id": clusterId,
131 | "index_id": newIndexID,
132 | "index_uuid": indexID,
133 | "index_name": indexName,
134 | }
135 | if clusterUUID != "" {
136 | labels["cluster_uuid"] = clusterUUID
137 | }
138 | if len(p.config.Labels) > 0 {
139 | for k, v := range p.config.Labels {
140 | labels[k] = v
141 | }
142 | }
143 | item := event.Event{
144 | Metadata: event.EventMetadata{
145 | Category: "elasticsearch",
146 | Name: "index_stats",
147 | Datatype: "snapshot",
148 | Labels: labels,
149 | },
150 | }
151 |
152 | mtr := util.MapStr{}
153 | if p.config.IndexPrimaryStats {
154 | mtr["primaries"] = primary
155 | mtr["total"] = total
156 | mtr["index_info"] = info
157 | mtr["shard_info"] = shardInfo
158 | }
159 |
160 | item.Fields = util.MapStr{
161 | "elasticsearch": util.MapStr{
162 | "index_stats": mtr,
163 | },
164 | }
165 |
166 | event.Save(&item)
167 | }
168 |
--------------------------------------------------------------------------------
/plugin/elastic/metric/metric.go:
--------------------------------------------------------------------------------
1 | /* Copyright © INFINI Ltd. All rights reserved.
2 | * Web: https://infinilabs.com
3 | * Email: hello#infini.ltd */
4 |
5 | package metric
6 |
7 | import (
8 | _ "infini.sh/agent/plugin/elastic/metric/node_stats"
9 | )
10 |
--------------------------------------------------------------------------------
/plugin/logs/file_detect.go:
--------------------------------------------------------------------------------
1 | /* Copyright © INFINI LTD. All rights reserved.
2 | * Web: https://infinilabs.com
3 | * Email: hello#infini.ltd */
4 |
5 | package logs
6 |
7 | import (
8 | "context"
9 | "os"
10 | "path/filepath"
11 |
12 | log "github.com/cihub/seelog"
13 | "infini.sh/agent/lib/util"
14 | )
15 |
16 | type Operation uint8
17 |
18 | const (
19 | OpDone Operation = iota
20 | OpCreate
21 | OpWrite
22 | OpTruncate
23 | )
24 |
25 | type FSEvent struct {
26 | Pattern *Pattern // matched pattern
27 | Path string
28 | Offset int64
29 | Op Operation
30 | Info os.FileInfo
31 | State FileState
32 | }
33 |
34 | func NewFileDetector(rootPath string, patterns []*Pattern) *FileDetector {
35 | return &FileDetector{
36 | root: rootPath,
37 | patterns: patterns,
38 | events: make(chan FSEvent),
39 | }
40 | }
41 |
42 | type FileDetector struct {
43 | root string
44 | patterns []*Pattern
45 | prev map[string]os.FileInfo
46 | events chan FSEvent
47 | }
48 |
49 | func (w *FileDetector) Detect(ctx context.Context) {
50 | defer func() {
51 | w.events <- doneEvent()
52 | }()
53 |
54 | if len(w.patterns) == 0 {
55 | return
56 | }
57 | err := filepath.Walk(w.root, func(path string, info os.FileInfo, err error) error {
58 | if ctx.Err() != nil {
59 | return ctx.Err()
60 | }
61 | if info == nil {
62 | log.Warnf("missing file info for path [%s]", path)
63 | return nil
64 | }
65 | if info.IsDir() {
66 | return nil
67 | }
68 | for _, pattern := range w.patterns {
69 | if !pattern.patternRegex.MatchString(info.Name()) {
70 | continue
71 | }
72 | w.judgeEvent(ctx, path, info, pattern)
73 | break
74 | }
75 | return nil
76 | })
77 | if err != nil {
78 | log.Errorf("failed to walk logs under [%s], err: %v", w.root, err)
79 | }
80 | }
81 |
82 | func (w *FileDetector) judgeEvent(ctx context.Context, path string, info os.FileInfo, pattern *Pattern) {
83 | if info.Mode()&os.ModeSymlink != 0 {
84 | realPath, err := util.ResolveSymlink(path)
85 | if err != nil {
86 | log.Error(err)
87 | return
88 | }
89 | info, err = os.Lstat(realPath)
90 | if err != nil {
91 | log.Error(err)
92 | return
93 | }
94 | path = realPath
95 | }
96 | preState, err := GetFileState(path)
97 | isSameFile := w.IsSameFile(preState, info, path)
98 | if err != nil || preState == (FileState{}) || !isSameFile {
99 | select {
100 | case <-ctx.Done():
101 | return
102 | case w.events <- createEvent(path, info, pattern, preState):
103 | }
104 | return
105 | }
106 |
107 | if preState.ModTime.UnixNano() != info.ModTime().UnixNano() {
108 | //mod time changed, if pre info has same size or bigger => truncate
109 | if preState.Size >= info.Size() {
110 | select {
111 | case <-ctx.Done():
112 | return
113 | case w.events <- truncateEvent(path, info, pattern, preState):
114 | }
115 | } else {
116 | select {
117 | case <-ctx.Done():
118 | return
119 | case w.events <- writeEvent(path, info, pattern, preState):
120 | }
121 | }
122 | }
123 | }
124 |
125 | func (w *FileDetector) Event() FSEvent {
126 | return <-w.events
127 | }
128 |
129 | func createEvent(path string, fi os.FileInfo, pattern *Pattern, state FileState) FSEvent {
130 | return FSEvent{pattern, path, -1, OpCreate, fi, state}
131 | }
132 |
133 | func writeEvent(path string, fi os.FileInfo, pattern *Pattern, state FileState) FSEvent {
134 | return FSEvent{pattern, path, -1, OpWrite, fi, state}
135 | }
136 |
137 | func truncateEvent(path string, fi os.FileInfo, pattern *Pattern, state FileState) FSEvent {
138 | return FSEvent{pattern, path, -1, OpTruncate, fi, state}
139 | }
140 |
141 | func doneEvent() FSEvent {
142 | return FSEvent{nil, "", -1, OpDone, nil, FileState{}}
143 | }
144 |
--------------------------------------------------------------------------------
/plugin/logs/file_detect_unix.go:
--------------------------------------------------------------------------------
1 | //go:build aix || darwin || dragonfly || freebsd || (js && wasm) || linux || netbsd || openbsd || solaris
2 |
3 | /* Copyright © INFINI Ltd. All rights reserved.
4 | * Web: https://infinilabs.com
5 | * Email: hello#infini.ltd */
6 |
7 | package logs
8 |
9 | import (
10 | "fmt"
11 | "os"
12 | "syscall"
13 | )
14 |
15 | // IsSameFile whether preState's file info and current file info describe the same file
16 | func (w *FileDetector) IsSameFile(preState FileState, currentInfo os.FileInfo, path string) bool {
17 | if preState == (FileState{}) {
18 | return false
19 | }
20 | preStateMap, ok := preState.Sys.(map[string]interface{})
21 | if !ok {
22 | return false
23 | }
24 | devF64, ok := preStateMap["Dev"].(float64)
25 | if !ok {
26 | return false
27 | }
28 | inoF64, ok := preStateMap["Ino"].(float64)
29 | if !ok {
30 | return false
31 | }
32 | dev := int32(devF64)
33 | ino := uint64(inoF64)
34 | current := currentInfo.Sys().(*syscall.Stat_t)
35 | if current == nil {
36 | return false
37 | }
38 | return uint64(dev) == uint64(current.Dev) && ino == current.Ino
39 | }
40 |
41 | func LoadFileID(fi os.FileInfo, path string) (map[string]interface{}, error) {
42 | st, ok := fi.Sys().(*syscall.Stat_t)
43 | if !ok {
44 | return nil, fmt.Errorf("failed to cast file info sys to stat_t")
45 | }
46 | id := map[string]interface{}{
47 | "Dev": st.Dev,
48 | "Ino": st.Ino,
49 | }
50 | return id, nil
51 | }
52 |
--------------------------------------------------------------------------------
/plugin/logs/file_detect_windows.go:
--------------------------------------------------------------------------------
1 | /* Copyright © INFINI Ltd. All rights reserved.
2 | * Web: https://infinilabs.com
3 | * Email: hello#infini.ltd */
4 |
5 | package logs
6 |
7 | import (
8 | "os"
9 | "syscall"
10 | )
11 |
12 | // IsSameFile whether preState's file info and current file info describe the same file
13 | func (w *FileDetector) IsSameFile(preState FileState, currentInfo os.FileInfo, path string) bool {
14 | if preState == (FileState{}) {
15 | return false
16 | }
17 | preStateMap, ok := preState.Sys.(map[string]interface{})
18 | if !ok {
19 | return false
20 | }
21 | vol, ok := preStateMap["VolumeSerialNumber"].(float64)
22 | if !ok {
23 | return false
24 | }
25 | idxhi, ok := preStateMap["FileIndexHigh"].(float64)
26 | if !ok {
27 | return false
28 | }
29 | idxlo, ok := preStateMap["FileIndexLow"].(float64)
30 | if !ok {
31 | return false
32 | }
33 | fstate, err := LoadFileID(currentInfo, path)
34 | if err != nil {
35 | return false
36 | }
37 |
38 | return fstate["VolumeSerialNumber"] == uint32(vol) && fstate["FileIndexHigh"] == uint32(idxhi) && fstate["FileIndexLow"] == uint32(idxlo)
39 | }
40 |
41 | func LoadFileID(fi os.FileInfo, path string) (map[string]interface{}, error) {
42 | pathp, err := syscall.UTF16PtrFromString(path)
43 | if err != nil {
44 | return nil, err
45 | }
46 |
47 | attrs := uint32(syscall.FILE_FLAG_BACKUP_SEMANTICS | syscall.FILE_FLAG_OPEN_REPARSE_POINT)
48 |
49 | h, err := syscall.CreateFile(pathp, 0, 0, nil, syscall.OPEN_EXISTING, attrs, 0)
50 | if err != nil {
51 | return nil, err
52 | }
53 | defer syscall.CloseHandle(h)
54 | var i syscall.ByHandleFileInformation
55 | err = syscall.GetFileInformationByHandle(h, &i)
56 | if err != nil {
57 | return nil, err
58 | }
59 | return map[string]interface{}{
60 | "VolumeSerialNumber": i.VolumeSerialNumber,
61 | "FileIndexHigh": i.FileIndexHigh,
62 | "FileIndexLow": i.FileIndexLow,
63 | }, nil
64 | }
65 |
--------------------------------------------------------------------------------
/plugin/logs/store.go:
--------------------------------------------------------------------------------
1 | /* Copyright © INFINI LTD. All rights reserved.
2 | * Web: https://infinilabs.com
3 | * Email: hello#infini.ltd */
4 |
5 | package logs
6 |
7 | import (
8 | "encoding/json"
9 | "time"
10 |
11 | log "github.com/cihub/seelog"
12 | "infini.sh/framework/core/event"
13 | "infini.sh/framework/core/kv"
14 | "infini.sh/framework/core/util"
15 | )
16 |
17 | const (
18 | KVLogfileStateBucket = "log_state_bucket"
19 | )
20 |
21 | type FileState struct {
22 | Name string `json:"name"`
23 | Size int64 `json:"size"`
24 | ModTime time.Time `json:"mod_time"`
25 | Path string `json:"path"`
26 | Offset int64 `json:"offset"`
27 | Sys any `json:"sys"`
28 | }
29 |
30 | func SaveFileState(path string, source FileState) {
31 | err := kv.AddValue(KVLogfileStateBucket, []byte(path), util.MustToJSONBytes(source))
32 | if err != nil {
33 | log.Error(err)
34 | }
35 | }
36 |
37 | func GetFileState(path string) (FileState, error) {
38 | ret, err := kv.GetValue(KVLogfileStateBucket, []byte(path))
39 | if err != nil {
40 | return FileState{}, err
41 | }
42 | var state FileState
43 | err = json.Unmarshal(ret, &state)
44 | if err != nil {
45 | return FileState{}, err
46 | }
47 | return state, nil
48 | }
49 |
50 | type LogEvent struct {
51 | AgentMeta *event.AgentMeta `json:"agent" elastic_mapping:"agent: { type: object }"`
52 | Meta util.MapStr `json:"metadata" elastic_mapping:"metadata: { type: object }"`
53 | Fields util.MapStr `json:"payload" elastic_mapping:"payload: { type: object }"`
54 | Timestamp string `json:"timestamp,omitempty" elastic_mapping:"timestamp: { type: date }"`
55 | }
56 |
57 | type Cluster struct {
58 | Name string `json:"name"`
59 | ID string `json:"id"`
60 | UUID string `json:"uuid"`
61 | }
62 |
63 | type Node struct {
64 | Name string `json:"name"`
65 | ID string `json:"id"`
66 | Port int `json:"port"`
67 | }
68 |
69 | type File struct {
70 | Path string `json:"path"`
71 | Offset int64 `json:"offset"`
72 | }
73 |
--------------------------------------------------------------------------------