├── lombok.config ├── NOTICE ├── .gitignore ├── src ├── main │ └── java │ │ └── com │ │ └── aws │ │ └── greengrass │ │ └── logmanager │ │ ├── model │ │ ├── EventType.java │ │ ├── ComponentType.java │ │ ├── LogFileInformation.java │ │ ├── CloudWatchAttemptLogFileInformation.java │ │ ├── ComponentLogFileInformation.java │ │ ├── ComponentLogConfiguration.java │ │ ├── CloudWatchAttempt.java │ │ ├── CloudWatchAttemptLogInformation.java │ │ ├── ProcessingFiles.java │ │ ├── LogFile.java │ │ └── LogFileGroup.java │ │ ├── exceptions │ │ └── InvalidLogGroupException.java │ │ ├── services │ │ └── DiskSpaceManagementService.java │ │ ├── util │ │ ├── CloudWatchClientFactory.java │ │ ├── SdkClientWrapper.java │ │ └── ConfigUtil.java │ │ ├── PositionTrackingBufferedReader.java │ │ └── CloudWatchLogsUploader.java ├── integrationtests │ ├── resources │ │ └── com │ │ │ └── aws │ │ │ └── greengrass │ │ │ └── integrationtests │ │ │ └── logmanager │ │ │ ├── doNotDeleteFilesAfterUpload.yaml │ │ │ ├── smallPeriodicIntervalSystemComponentConfig.yaml │ │ │ ├── smallPeriodicIntervalOnlyReqUserComponentConfig.yaml │ │ │ ├── smallSpaceManagementPeriodicIntervalConfig.yaml │ │ │ ├── smallPeriodicIntervalUserComponentConfigNoMultiline.yaml │ │ │ ├── smallPeriodicIntervalUserComponentConfig.yaml │ │ │ └── configsDifferentFromDefaults.yaml │ └── java │ │ └── com │ │ └── aws │ │ └── greengrass │ │ └── integrationtests │ │ └── logmanager │ │ ├── util │ │ └── LogFileHelper.java │ │ └── SpaceManagementTest.java └── test │ ├── resources │ └── com │ │ └── aws │ │ └── greengrass │ │ └── logmanager │ │ ├── testlogs1.log │ │ └── testlogs2.log │ └── java │ └── com │ └── aws │ └── greengrass │ └── logmanager │ ├── util │ ├── TestUtils.java │ └── ConfigUtilTest.java │ ├── model │ ├── ProcessingFilesTest.java │ ├── LogFileGroupTest.java │ └── LogFileTest.java │ └── services │ └── DiskSpaceManagementServiceTest.java ├── .github ├── ISSUE_TEMPLATE │ ├── config.yml │ ├── feature-request.md │ └── bug.md ├── PULL_REQUEST_TEMPLATE.md ├── workflows │ ├── codeql.yml │ ├── uat.yaml │ ├── maven.yml │ └── externalPR.yml └── scripts │ └── cover2cover.py ├── CODE_OF_CONDUCT.md ├── gdk-config.json ├── recipe.yaml ├── uat ├── testing-features │ ├── src │ │ └── main │ │ │ ├── resources │ │ │ └── greengrass │ │ │ │ └── recipes │ │ │ │ └── recipe.yaml │ │ │ └── java │ │ │ └── com │ │ │ └── aws │ │ │ └── greengrass │ │ │ ├── steps │ │ │ ├── NetworkSteps.java │ │ │ └── FileSteps.java │ │ │ ├── resources │ │ │ ├── CloudWatchLogsModule.java │ │ │ ├── CloudWatchLogStreamSpecModel.java │ │ │ ├── CloudWatchLogStreamModel.java │ │ │ └── CloudWatchLogsLifecycle.java │ │ │ └── NetworkUtils.java │ └── pom.xml ├── custom-components │ ├── src │ │ ├── main │ │ │ ├── java │ │ │ │ └── com │ │ │ │ │ └── aws │ │ │ │ │ └── greengrass │ │ │ │ │ ├── Main.java │ │ │ │ │ └── artifacts │ │ │ │ │ └── LogGenerator.java │ │ │ └── resources │ │ │ │ └── recipes │ │ │ │ └── LogGenerator.yaml │ │ └── test │ │ │ └── java │ │ │ └── com │ │ │ └── aws │ │ │ └── greengrass │ │ │ └── LogGeneratorTest.java │ └── pom.xml ├── codebuild │ └── uat_linux_buildspec.yaml ├── README.md └── pom.xml ├── CHANGELOG.md ├── codestyle ├── findbugs-exclude.xml ├── IntelliJ.xml ├── pmd-eg-tests-ruleset.xml └── pmd-eg-ruleset.xml ├── CONTRIBUTING.md ├── README.md └── LICENSE /lombok.config: -------------------------------------------------------------------------------- 1 | lombok.addLombokGeneratedAnnotation = true 2 | -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | AWS Greengrass Log Manager 2 | Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | SPDX-License-Identifier: Apache-2.0 4 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | greengrass-build 2 | target 3 | .DS_Store 4 | ._* 5 | *.class 6 | **/*.jar 7 | *~ 8 | *.metrics 9 | .idea 10 | coverage 11 | *.iml 12 | bin 13 | greengrass*.log 14 | -------------------------------------------------------------------------------- /src/main/java/com/aws/greengrass/logmanager/model/EventType.java: -------------------------------------------------------------------------------- 1 | package com.aws.greengrass.logmanager.model; 2 | 3 | public enum EventType { 4 | ALL_COMPONENTS_PROCESSED 5 | } 6 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: false 2 | contact_links: 3 | - name: General Support - AWS re:Post 4 | url: https://repost.aws/ 5 | about: Please ask and answer questions here. 6 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | ## Code of Conduct 2 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 3 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 4 | opensource-codeofconduct@amazon.com with any additional questions or comments. 5 | -------------------------------------------------------------------------------- /src/main/java/com/aws/greengrass/logmanager/model/ComponentType.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | * SPDX-License-Identifier: Apache-2.0 4 | */ 5 | 6 | package com.aws.greengrass.logmanager.model; 7 | 8 | public enum ComponentType { 9 | GreengrassSystemComponent, UserComponent 10 | } 11 | -------------------------------------------------------------------------------- /gdk-config.json: -------------------------------------------------------------------------------- 1 | { 2 | "component" :{ 3 | "aws.greengrass.LogManager": { 4 | "author": "AWS", 5 | "version": "NEXT_PATCH", 6 | "build": { 7 | "build_system": "maven" 8 | }, 9 | "publish": { 10 | "bucket": "BUCKETNAME", 11 | "region": "REGION" 12 | } 13 | } 14 | }, 15 | "gdk_version": "1.1.0" 16 | } 17 | -------------------------------------------------------------------------------- /recipe.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | RecipeFormatVersion: '2020-01-25' 3 | ComponentName: '{COMPONENT_NAME}' 4 | ComponentDescription: AWS Greengrass Log Manager 5 | ComponentPublisher: AWS 6 | ComponentVersion: '{COMPONENT_VERSION}' 7 | ComponentType: aws.greengrass.plugin 8 | Manifests: 9 | - Artifacts: 10 | - URI: s3://gg-dev-artifacts-$stage/$componentName/$version/LogManager.jar 11 | -------------------------------------------------------------------------------- /uat/testing-features/src/main/resources/greengrass/recipes/recipe.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | RecipeFormatVersion: '2020-01-25' 3 | ComponentName: 'aws.greengrass.LogManager' 4 | ComponentDescription: AWS Greengrass Log Manager 5 | ComponentPublisher: AWS 6 | ComponentVersion: '2.4.0' 7 | ComponentType: aws.greengrass.plugin 8 | Manifests: 9 | - Artifacts: 10 | - URI: "classpath:/greengrass/artifacts/aws.greengrass.LogManager.jar" 11 | Permission: 12 | Read: ALL 13 | Execute: ALL -------------------------------------------------------------------------------- /src/main/java/com/aws/greengrass/logmanager/exceptions/InvalidLogGroupException.java: -------------------------------------------------------------------------------- 1 | package com.aws.greengrass.logmanager.exceptions; 2 | 3 | public class InvalidLogGroupException extends Exception { 4 | // custom serialVersionUID for class extends Serializable class 5 | private static final long serialVersionUID = 456; 6 | 7 | public InvalidLogGroupException(String s) { 8 | super(s); 9 | } 10 | 11 | public InvalidLogGroupException(String s, Exception e) { 12 | super(s, e); 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /src/main/java/com/aws/greengrass/logmanager/model/LogFileInformation.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | * SPDX-License-Identifier: Apache-2.0 4 | */ 5 | 6 | package com.aws.greengrass.logmanager.model; 7 | 8 | import lombok.Builder; 9 | import lombok.Getter; 10 | import lombok.Value; 11 | 12 | 13 | @Builder 14 | @Value 15 | @Getter 16 | public class LogFileInformation { 17 | private LogFile logFile; 18 | private long startPosition; 19 | private String fileHash; 20 | } 21 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | **Issue #, if available:** 2 | 3 | **Description of changes:** 4 | 5 | **Why is this change necessary:** 6 | 7 | **How was this change tested:** 8 | 9 | **Any additional information or context required to review the change:** 10 | 11 | **Checklist:** 12 | - [ ] Updated the README if applicable 13 | - [ ] Updated or added new unit tests 14 | - [ ] Updated or added new integration tests 15 | - [ ] Updated or added new end-to-end tests 16 | 17 | By submitting this pull request, I confirm that you can use, modify, copy, and redistribute this contribution, under the terms of your choice. 18 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | ## v2.1.0 4 | 5 | ### Bug fixes and improvements 6 | 7 | * Use defaults for logFileDirectoryPath and logFileRegex that work for Greengrass components that print to standard output (stdout) and standard error (stderr). 8 | * Correctly route traffic through a configured network proxy when uploading logs to CloudWatch Logs. 9 | * Correctly handle colon characters (:) in log stream names. CloudWatch Logs log stream names don't support colons. 10 | * Simplify log stream names by removing thing group names from the log stream. 11 | * Remove an error log message that prints during normal behavior. 12 | -------------------------------------------------------------------------------- /uat/custom-components/src/main/java/com/aws/greengrass/Main.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | * SPDX-License-Identifier: Apache-2.0 4 | */ 5 | 6 | package com.aws.greengrass; 7 | 8 | import java.util.function.Consumer; 9 | 10 | public final class Main { 11 | private Main(){} 12 | public static void main(String[] args) throws ClassNotFoundException, IllegalAccessException, 13 | InstantiationException { 14 | ((Consumer) Class.forName("com.aws.greengrass.artifacts." + System.getProperty("componentName")) 15 | .newInstance()).accept(args); 16 | } 17 | } -------------------------------------------------------------------------------- /src/integrationtests/resources/com/aws/greengrass/integrationtests/logmanager/doNotDeleteFilesAfterUpload.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | services: 3 | aws.greengrass.LogManager: 4 | configuration: 5 | periodicUploadIntervalSec: 10 6 | logsUploaderConfiguration: 7 | componentLogsConfigurationMap: 8 | UserComponentA: 9 | logFileRegex: '^integTestRandomLogFiles.log\w*' 10 | logFileDirectoryPath: '{{logFileDirectoryPath}}' 11 | deleteLogFileAfterCloudUpload: 'false' 12 | 13 | main: 14 | lifecycle: 15 | install: 16 | all: echo All installed 17 | dependencies: 18 | - aws.greengrass.LogManager 19 | -------------------------------------------------------------------------------- /src/integrationtests/resources/com/aws/greengrass/integrationtests/logmanager/smallPeriodicIntervalSystemComponentConfig.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | services: 3 | aws.greengrass.LogManager: 4 | configuration: 5 | periodicUploadIntervalSec: 10 6 | logsUploaderConfiguration: 7 | systemLogsConfiguration: 8 | uploadToCloudWatch: true 9 | minimumLogLevel: 'INFO' 10 | diskSpaceLimit: '25' 11 | diskSpaceLimitUnit: 'MB' 12 | deleteLogFileAfterCloudUpload: 'true' 13 | multiLineStartPattern: "[^\\s]" 14 | main: 15 | lifecycle: 16 | install: 17 | all: echo All installed 18 | dependencies: 19 | - aws.greengrass.LogManager 20 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature-request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: "\U0001F680 Feature Request" 3 | about: Request a new feature 4 | title: "(module name): short issue description" 5 | labels: feature-request, needs-triage 6 | --- 7 | 8 | **Feature Description** 9 | A short description of the feature you are proposing. 10 | 11 | **Use Case** 12 | Why do you need this feature? 13 | 14 | **Proposed Solution** 15 | Please include prototype/workaround/sketch/reference implementation 16 | 17 | **Other** 18 | Add detailed explanation, stacktraces, related issues, links for us to have context, etc 19 | 20 | 21 | * [ ] :wave: I may be able to implement this feature request 22 | * [ ] :warning: This feature might incur a breaking change 23 | -------------------------------------------------------------------------------- /src/integrationtests/resources/com/aws/greengrass/integrationtests/logmanager/smallPeriodicIntervalOnlyReqUserComponentConfig.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | services: 3 | aws.greengrass.LogManager: 4 | configuration: 5 | periodicUploadIntervalSec: 10 6 | logsUploaderConfiguration: 7 | componentLogsConfiguration: 8 | - componentName: 'UserComponentB' 9 | minimumLogLevel: 'INFO' 10 | diskSpaceLimit: '25' 11 | diskSpaceLimitUnit: 'MB' 12 | deleteLogFileAfterCloudUpload: 'true' 13 | multiLineStartPattern: "[^\\s]" 14 | main: 15 | lifecycle: 16 | install: 17 | all: echo All installed 18 | dependencies: 19 | - aws.greengrass.LogManager 20 | -------------------------------------------------------------------------------- /codestyle/findbugs-exclude.xml: -------------------------------------------------------------------------------- 1 | 2 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /src/integrationtests/resources/com/aws/greengrass/integrationtests/logmanager/smallSpaceManagementPeriodicIntervalConfig.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | services: 3 | aws.greengrass.LogManager: 4 | configuration: 5 | logsUploaderConfiguration: 6 | componentLogsConfigurationMap: 7 | UserComponentA: 8 | logFileRegex: '^integTestRandomLogFiles.log\w*' 9 | logFileDirectoryPath: '{{logFileDirectoryPath}}' 10 | minimumLogLevel: 'INFO' 11 | diskSpaceLimit: '105' 12 | diskSpaceLimitUnit: 'KB' 13 | deleteLogFileAfterCloudUpload: 'true' 14 | periodicUploadIntervalSec: 1 15 | main: 16 | lifecycle: 17 | install: echo All installed 18 | dependencies: 19 | - aws.greengrass.LogManager 20 | -------------------------------------------------------------------------------- /src/integrationtests/resources/com/aws/greengrass/integrationtests/logmanager/smallPeriodicIntervalUserComponentConfigNoMultiline.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | services: 3 | aws.greengrass.LogManager: 4 | configuration: 5 | periodicUploadIntervalSec: 10 6 | logsUploaderConfiguration: 7 | componentLogsConfiguration: 8 | - componentName: 'UserComponentA' 9 | logFileRegex: '^integTestRandomLogFiles.log\w*' 10 | logFileDirectoryPath: '{{logFileDirectoryPath}}' 11 | minimumLogLevel: 'INFO' 12 | diskSpaceLimit: '25' 13 | diskSpaceLimitUnit: 'MB' 14 | deleteLogFileAfterCloudUpload: 'true' 15 | main: 16 | lifecycle: 17 | install: 18 | all: echo All installed 19 | dependencies: 20 | - aws.greengrass.LogManager 21 | -------------------------------------------------------------------------------- /src/main/java/com/aws/greengrass/logmanager/model/CloudWatchAttemptLogFileInformation.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | * SPDX-License-Identifier: Apache-2.0 4 | */ 5 | 6 | package com.aws.greengrass.logmanager.model; 7 | 8 | import lombok.Builder; 9 | import lombok.Data; 10 | import lombok.Getter; 11 | import lombok.Setter; 12 | 13 | @Builder 14 | @Data 15 | @Getter 16 | @Setter 17 | public class CloudWatchAttemptLogFileInformation { 18 | private long startPosition; 19 | private long bytesRead; 20 | private long lastModifiedTime; 21 | //TODO: this fileHash added here is only for passing the tests for the current small PR, it will be removed and 22 | // passed in another way, when the feature is fully enbaled. 23 | private String fileHash; 24 | } 25 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: "\U0001F41B Bug Report" 3 | about: Create a report to help us improve 4 | title: "(module name): short issue description" 5 | labels: bug, needs-triage 6 | --- 7 | 8 | **Describe the bug** 9 | A clear and concise description of what the bug is. 10 | 11 | **To Reproduce** 12 | Steps to reproduce the behavior. If possible, provide a minimal amount of code that causes the bug. 13 | 14 | **Expected behavior** 15 | A clear and concise description of what you expected to happen. 16 | 17 | **Actual behavior** 18 | Tell us what actually happened. 19 | 20 | **Environment** 21 | - OS: [e.g. Ubuntu 20.04] 22 | - JDK version: 23 | - Nucleus version: 24 | - Log Manager version: 25 | 26 | **Additional context** 27 | Add any other context about the problem here. 28 | 29 | E.g. what is the impact of the bug? 30 | -------------------------------------------------------------------------------- /src/integrationtests/resources/com/aws/greengrass/integrationtests/logmanager/smallPeriodicIntervalUserComponentConfig.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | services: 3 | aws.greengrass.LogManager: 4 | configuration: 5 | periodicUploadIntervalSec: 10 6 | updateToTlogIntervalSec: 20 7 | logsUploaderConfiguration: 8 | componentLogsConfiguration: 9 | - componentName: 'UserComponentA' 10 | logFileRegex: '^integTestRandomLogFiles.log\w*' 11 | logFileDirectoryPath: '{{logFileDirectoryPath}}' 12 | minimumLogLevel: 'INFO' 13 | diskSpaceLimit: '25' 14 | diskSpaceLimitUnit: 'MB' 15 | deleteLogFileAfterCloudUpload: 'true' 16 | multiLineStartPattern: "[^\\s]" 17 | main: 18 | lifecycle: 19 | install: 20 | all: echo All installed 21 | dependencies: 22 | - aws.greengrass.LogManager 23 | -------------------------------------------------------------------------------- /src/main/java/com/aws/greengrass/logmanager/model/ComponentLogFileInformation.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | * SPDX-License-Identifier: Apache-2.0 4 | */ 5 | 6 | package com.aws.greengrass.logmanager.model; 7 | 8 | import lombok.Builder; 9 | import lombok.Getter; 10 | import lombok.Value; 11 | import org.slf4j.event.Level; 12 | 13 | import java.util.ArrayList; 14 | import java.util.List; 15 | import java.util.regex.Pattern; 16 | 17 | @Builder 18 | @Value 19 | @Getter 20 | public class ComponentLogFileInformation { 21 | @Builder.Default 22 | private List logFileInformationList = new ArrayList<>(); 23 | private String name; 24 | private Pattern multiLineStartPattern; 25 | private Level desiredLogLevel; 26 | private ComponentType componentType; 27 | private LogFileGroup logFileGroup; 28 | } 29 | -------------------------------------------------------------------------------- /src/main/java/com/aws/greengrass/logmanager/model/ComponentLogConfiguration.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | * SPDX-License-Identifier: Apache-2.0 4 | */ 5 | 6 | package com.aws.greengrass.logmanager.model; 7 | 8 | import lombok.Builder; 9 | import lombok.Data; 10 | import lombok.Getter; 11 | import org.slf4j.event.Level; 12 | 13 | import java.nio.file.Path; 14 | import java.util.regex.Pattern; 15 | 16 | @Builder 17 | @Data 18 | @Getter 19 | public class ComponentLogConfiguration { 20 | private Pattern fileNameRegex; 21 | private Path directoryPath; 22 | private String name; 23 | private Pattern multiLineStartPattern; 24 | @Builder.Default 25 | private Level minimumLogLevel = Level.INFO; 26 | private Long diskSpaceLimit; 27 | private boolean deleteLogFileAfterCloudUpload; 28 | private boolean uploadToCloudWatch; 29 | private ComponentType componentType; 30 | } 31 | -------------------------------------------------------------------------------- /.github/workflows/codeql.yml: -------------------------------------------------------------------------------- 1 | name: "CodeQL" 2 | 3 | on: 4 | pull_request: 5 | branches: [ "*" ] 6 | schedule: 7 | - cron: "34 1 * * 5" 8 | 9 | jobs: 10 | analyze: 11 | name: Analyze 12 | runs-on: ubuntu-latest 13 | permissions: 14 | actions: read 15 | contents: read 16 | security-events: write 17 | 18 | strategy: 19 | fail-fast: false 20 | matrix: 21 | language: [ java ] 22 | 23 | steps: 24 | - name: Checkout 25 | uses: actions/checkout@v4 26 | 27 | - name: Initialize CodeQL 28 | uses: github/codeql-action/init@v2 29 | with: 30 | languages: ${{ matrix.language }} 31 | queries: +security-and-quality 32 | 33 | - name: Autobuild 34 | uses: github/codeql-action/autobuild@v2 35 | 36 | - name: Perform CodeQL Analysis 37 | uses: github/codeql-action/analyze@v2 38 | with: 39 | category: "/language:${{ matrix.language }}" 40 | -------------------------------------------------------------------------------- /uat/codebuild/uat_linux_buildspec.yaml: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | # SPDX-License-Identifier: Apache-2.0 4 | # 5 | 6 | version: 0.2 7 | phases: 8 | install: 9 | runtime-versions: 10 | java: corretto11 11 | build: 12 | commands: 13 | - curl -s https://d2s8p88vqu9w66.cloudfront.net/releases/greengrass-nucleus-latest.zip > /tmp/greengrass-nucleus-latest.zip 14 | - mvn -U -ntp verify -DskipTests=true 15 | - mvn -U -ntp clean verify -f uat/pom.xml 16 | - java -Dggc.archive=/tmp/greengrass-nucleus-latest.zip 17 | -Dtags='LogManager&!unstable' -Dggc.install.root=$CODEBUILD_SRC_DIR -Dggc.log.level=INFO -Daws.region=us-west-2 18 | -jar uat/testing-features/target/greengrass-log-manager-testing-features.jar 19 | 20 | artifacts: 21 | files: 22 | - 'testResults/**/*' 23 | name: 'LogManagerUatLinuxLogs.zip' 24 | 25 | reports: 26 | uat-reports: 27 | files: 28 | - "TEST-greengrass-results.xml" 29 | file-format: "JUNITXML" 30 | -------------------------------------------------------------------------------- /src/integrationtests/resources/com/aws/greengrass/integrationtests/logmanager/configsDifferentFromDefaults.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | services: 3 | aws.greengrass.LogManager: 4 | configuration: 5 | periodicUploadIntervalSec: 60 6 | logsUploaderConfiguration: 7 | componentLogsConfigurationMap: 8 | UserComponentA: 9 | multiLineStartPattern: '^\\d.*$' 10 | logFileRegex: '^integTestRandomLogFiles.log\w*' 11 | logFileDirectoryPath: '{{logFileDirectoryPath}}' 12 | minimumLogLevel: 'TRACE' 13 | diskSpaceLimit: '10' 14 | diskSpaceLimitUnit: 'GB' 15 | deleteLogFileAfterCloudUpload: 'true' 16 | systemLogsConfiguration: 17 | uploadToCloudWatch: 'true' 18 | minimumLogLevel: 'TRACE' 19 | diskSpaceLimit: '25' 20 | diskSpaceLimitUnit: 'MB' 21 | deleteLogFileAfterCloudUpload: 'true' 22 | main: 23 | lifecycle: 24 | install: 25 | all: echo All installed 26 | dependencies: 27 | - aws.greengrass.LogManager -------------------------------------------------------------------------------- /uat/testing-features/src/main/java/com/aws/greengrass/steps/NetworkSteps.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | * SPDX-License-Identifier: Apache-2.0 4 | */ 5 | 6 | package com.aws.greengrass.steps; 7 | 8 | import com.aws.greengrass.NetworkUtils; 9 | import io.cucumber.java.Before; 10 | import io.cucumber.java.en.Given; 11 | import io.cucumber.java.en.When; 12 | 13 | import java.io.IOException; 14 | 15 | public class NetworkSteps { 16 | 17 | @Before 18 | public void connectToNetwork() throws IOException, InterruptedException { 19 | NetworkUtils.recoverNetwork(); 20 | } 21 | 22 | @Given("device network connectivity is {word}") 23 | @When("I set device network connectivity to {word}") 24 | public void setDeviceNetwork(final String connectivity) throws IOException, InterruptedException { 25 | if ("offline".equals(connectivity.toLowerCase())) { 26 | NetworkUtils.disconnectNetwork(); 27 | } else { 28 | NetworkUtils.recoverNetwork(); 29 | } 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /src/main/java/com/aws/greengrass/logmanager/model/CloudWatchAttempt.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | * SPDX-License-Identifier: Apache-2.0 4 | */ 5 | 6 | package com.aws.greengrass.logmanager.model; 7 | 8 | import lombok.Data; 9 | import lombok.Getter; 10 | import lombok.NoArgsConstructor; 11 | import lombok.Setter; 12 | 13 | import java.util.HashSet; 14 | import java.util.Map; 15 | import java.util.Set; 16 | 17 | @NoArgsConstructor 18 | @Getter 19 | @Data 20 | @Setter 21 | public class CloudWatchAttempt { 22 | // TODO: Need to implement retry mechanism. 23 | protected static final int MAX_RETRIES = 5; 24 | //TODO: Check if we can consolidate logStreamUploadedMap here. 25 | private Map logStreamsToLogEventsMap; 26 | private String logGroupName; 27 | 28 | /** 29 | * This will be used in the uploader to keep track of which log groups and log streams in an attempt have been 30 | * successfully uploaded to cloud. 31 | */ 32 | private Set logStreamUploadedSet = new HashSet<>(); 33 | } 34 | -------------------------------------------------------------------------------- /uat/custom-components/src/main/resources/recipes/LogGenerator.yaml: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | # SPDX-License-Identifier: Apache-2.0 4 | # 5 | 6 | --- 7 | RecipeFormatVersion: '2020-01-25' 8 | ComponentName: LogGenerator 9 | ComponentVersion: '0.0.0' 10 | ComponentDescription: Component to generate log files with random log messages. Log files rotate with different name. 11 | ComponentPublisher: AWS 12 | ComponentConfiguration: 13 | DefaultConfiguration: 14 | "LogFileName": "testlogs" 15 | "FileSize": 5 16 | "FileSizeUnit": "KB" 17 | "WriteFrequencyMs": 100 18 | "NumberOfLogLines": 20 19 | "LogsDirectory": "" 20 | Manifests: 21 | - Artifacts: 22 | - URI: classpath:/local-store/artifacts/custom-components.jar 23 | Lifecycle: 24 | Run: >- 25 | java -Dlog.level=INFO -DcomponentName="LogGenerator" -jar {artifacts:path}/custom-components.jar 26 | "{configuration:/LogFileName}" "{configuration:/FileSize}" "{configuration:/FileSizeUnit}" 27 | "{configuration:/WriteFrequencyMs}" 28 | "{configuration:/NumberOfLogLines}" "{configuration:/LogsDirectory}" 29 | -------------------------------------------------------------------------------- /.github/workflows/uat.yaml: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | # SPDX-License-Identifier: Apache-2.0 4 | # 5 | name: OTF UATS 6 | 7 | on: 8 | push: 9 | branches: 'main' 10 | pull_request: 11 | branches: 'main' 12 | 13 | env: 14 | AWS_REGION: "us-west-2" 15 | CODE_BUILD_PROJECT_LINUX: "LogManagerUatCodeBuildLinux" 16 | AWS_ROLE_TO_ASSUME: "arn:aws:iam::686385081908:role/aws-greengrass-log-manager-codebuild-uat-role-linux" 17 | 18 | jobs: 19 | uat-linux: 20 | permissions: 21 | id-token: write 22 | contents: read 23 | runs-on: ${{ matrix.os }} 24 | strategy: 25 | matrix: 26 | os: [ ubuntu-latest ] 27 | steps: 28 | - name: configure aws credentials 29 | uses: aws-actions/configure-aws-credentials@v4 30 | with: 31 | role-to-assume: ${{ env.AWS_ROLE_TO_ASSUME }} 32 | role-session-name: logManagerCI 33 | aws-region: ${{ env.AWS_REGION }} 34 | - name: Run UAT on linux 35 | uses: aws-actions/aws-codebuild-run-build@v1 36 | with: 37 | project-name: ${{ env.CODE_BUILD_PROJECT_LINUX }} 38 | buildspec-override: uat/codebuild/uat_linux_buildspec.yaml 39 | -------------------------------------------------------------------------------- /src/test/resources/com/aws/greengrass/logmanager/testlogs1.log: -------------------------------------------------------------------------------- 1 | {"contexts":{"component":"demo","device":"asdf"},"eventType":"th1-event","level":null,"loggerName":"com.aws.greengrass.logging.examples.LoggerDemo","message":"test th1 info","timestamp":1608292800000} 2 | {"contexts":{"component":"demo","device":"asdf"},"eventType":"th2-event","level":"INFO","loggerName":"com.aws.greengrass.logging.examples.LoggerDemo","message":"test th2 info","timestamp":1608292800000} 3 | {"contexts":{"component":"th1-override","device":"asdf"},"level":"DEBUG","loggerName":"com.aws.greengrass.logging.examples.LoggerDemo","message":"test th1 debug","timestamp":1608292800000} 4 | {"contexts":{"component":"demo","device":"asdf"},"level":"INFO","loggerName":"com.aws.greengrass.logging.examples.LoggerDemo","message":"test main info","timestamp":1608292800000} 5 | {"cause":{"localizedMessage":"some error","message":"some error","stackTrace":[{"className":"com.aws.greengrass.logging.examples.LoggerDemo","fileName":"LoggerDemo.java","lineNumber":56,"methodName":"main","nativeMethod":false}],"suppressed":[]},"contexts":{"key2":"value2","component":"demo","device":"asdf"},"eventType":"error-event","level":"ERROR","loggerName":"com.aws.greengrass.logging.examples.LoggerDemo","message":"test error","timestamp":1608292800000} 6 | {"something which parses": "as json, but isn't a GreengrassStructuredLogMessage"} 7 | -------------------------------------------------------------------------------- /uat/testing-features/src/main/java/com/aws/greengrass/resources/CloudWatchLogsModule.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | * SPDX-License-Identifier: Apache-2.0 4 | */ 5 | 6 | package com.aws.greengrass.resources; 7 | 8 | import com.aws.greengrass.testing.modules.AbstractAWSResourceModule; 9 | import com.aws.greengrass.testing.modules.model.AWSResourcesContext; 10 | import com.google.auto.service.AutoService; 11 | import com.google.inject.Module; 12 | import com.google.inject.Provides; 13 | import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; 14 | import software.amazon.awssdk.http.apache.ApacheHttpClient; 15 | import software.amazon.awssdk.services.cloudwatchlogs.CloudWatchLogsClient; 16 | 17 | import javax.inject.Singleton; 18 | 19 | @AutoService(Module.class) 20 | public class CloudWatchLogsModule extends AbstractAWSResourceModule { 21 | 22 | @Provides 23 | @Singleton 24 | @Override 25 | protected CloudWatchLogsClient providesClient( 26 | AwsCredentialsProvider provider, 27 | AWSResourcesContext context, 28 | ApacheHttpClient.Builder httpClientBuilder) { 29 | return CloudWatchLogsClient.builder() 30 | .credentialsProvider(provider) 31 | .region(context.region()) 32 | .httpClientBuilder(httpClientBuilder) 33 | .build(); 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /uat/testing-features/src/main/java/com/aws/greengrass/resources/CloudWatchLogStreamSpecModel.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | * SPDX-License-Identifier: Apache-2.0 4 | */ 5 | 6 | package com.aws.greengrass.resources; 7 | 8 | import com.aws.greengrass.testing.api.model.TestingModel; 9 | import com.aws.greengrass.testing.resources.AWSResources; 10 | import com.aws.greengrass.testing.resources.ResourceSpec; 11 | import org.immutables.value.Value; 12 | import software.amazon.awssdk.services.cloudwatchlogs.CloudWatchLogsClient; 13 | 14 | @TestingModel 15 | @Value.Immutable 16 | public interface CloudWatchLogStreamSpecModel extends ResourceSpec { 17 | String logGroupName(); 18 | 19 | String logStreamName(); 20 | 21 | @Override 22 | default CloudWatchLogStreamSpec create(CloudWatchLogsClient client, AWSResources resources) { 23 | // This method doesn't actually create a CloudWatch stream. It just registers the resource as 24 | // created do it can get deleted during teardown. 25 | CloudWatchLogStream stream = CloudWatchLogStream.builder() 26 | .groupName(logGroupName()) 27 | .streamName(logStreamName()) 28 | .build(); 29 | 30 | return CloudWatchLogStreamSpec.builder() 31 | .from(this) 32 | .created(true) 33 | .resource(stream) 34 | .build(); 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /uat/testing-features/src/main/java/com/aws/greengrass/resources/CloudWatchLogStreamModel.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | * SPDX-License-Identifier: Apache-2.0 4 | */ 5 | 6 | package com.aws.greengrass.resources; 7 | 8 | import com.aws.greengrass.logging.api.Logger; 9 | import com.aws.greengrass.logging.impl.LogManager; 10 | import com.aws.greengrass.testing.api.model.TestingModel; 11 | import com.aws.greengrass.testing.resources.AWSResource; 12 | import org.immutables.value.Value; 13 | import software.amazon.awssdk.services.cloudwatchlogs.CloudWatchLogsClient; 14 | import software.amazon.awssdk.services.cloudwatchlogs.model.DeleteLogStreamRequest; 15 | import software.amazon.awssdk.services.cloudwatchlogs.model.ResourceNotFoundException; 16 | 17 | @TestingModel 18 | @Value.Immutable 19 | public interface CloudWatchLogStreamModel extends AWSResource { 20 | Logger logger = LogManager.getLogger(CloudWatchLogStream.class); 21 | 22 | String groupName(); 23 | 24 | String streamName(); 25 | 26 | 27 | 28 | @Override 29 | default void remove(CloudWatchLogsClient client) { 30 | DeleteLogStreamRequest request = 31 | DeleteLogStreamRequest.builder().logGroupName(groupName()).logStreamName(streamName()).build(); 32 | 33 | try { 34 | client.deleteLogStream(request); 35 | } catch (ResourceNotFoundException notFound) { 36 | logger.atDebug().kv("streamName", streamName()).cause(notFound).log("Failed to delete stream"); 37 | } 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /src/main/java/com/aws/greengrass/logmanager/services/DiskSpaceManagementService.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | * SPDX-License-Identifier: Apache-2.0 4 | */ 5 | 6 | package com.aws.greengrass.logmanager.services; 7 | 8 | import com.aws.greengrass.logmanager.model.LogFile; 9 | import com.aws.greengrass.logmanager.model.LogFileGroup; 10 | 11 | import java.util.ArrayList; 12 | import java.util.List; 13 | 14 | 15 | public class DiskSpaceManagementService { 16 | 17 | /** 18 | * Deleted the file to make sure the log group is below its configured 19 | * disk space usage limit. 20 | * 21 | * @param group - a Log File group 22 | */ 23 | public List freeDiskSpace(LogFileGroup group) { 24 | if (!group.getMaxBytes().isPresent()) { 25 | return null; 26 | } 27 | 28 | long bytesDeleted = 0; 29 | long minimumBytesToBeDeleted = Math.max(group.totalSizeInBytes() - group.getMaxBytes().get(), 0); 30 | List deletableFiles = group.getProcessedLogFiles(); 31 | List deletedHashes = new ArrayList<>(); 32 | 33 | for (LogFile logFile: deletableFiles) { 34 | if (bytesDeleted >= minimumBytesToBeDeleted) { 35 | break; 36 | } 37 | long filesSize = logFile.length(); 38 | 39 | if (group.remove(logFile)) { 40 | deletedHashes.add(logFile.hashString()); 41 | bytesDeleted += filesSize; 42 | } 43 | } 44 | 45 | return deletedHashes; 46 | } 47 | } -------------------------------------------------------------------------------- /uat/custom-components/src/test/java/com/aws/greengrass/LogGeneratorTest.java: -------------------------------------------------------------------------------- 1 | package com.aws.greengrass; 2 | 3 | import org.junit.jupiter.api.Test; 4 | import org.junit.jupiter.api.extension.ExtendWith; 5 | import org.junit.jupiter.api.io.TempDir; 6 | import org.mockito.junit.jupiter.MockitoExtension; 7 | 8 | import java.nio.file.Path; 9 | import java.util.Arrays; 10 | import java.util.function.Consumer; 11 | 12 | import static org.junit.jupiter.api.Assertions.assertTrue; 13 | 14 | @ExtendWith({MockitoExtension.class}) 15 | public class LogGeneratorTest { 16 | @TempDir 17 | static Path tempPath; 18 | String logFileName = "localtest"; 19 | String logWriteFreqMs = "100"; 20 | String totalLogNumbers = "50"; 21 | String fileSizeBytes = "1024"; 22 | String fileSizeUnit = "KB"; 23 | String componentName = "com.aws.greengrass.artifacts.LogGenerator"; 24 | String activeFileName = logFileName + ".log"; 25 | 26 | @Test 27 | void GIVEN_request_THEN_log_file_Created() 28 | throws ClassNotFoundException, IllegalAccessException, InstantiationException { 29 | 30 | String[] args = {logFileName, fileSizeBytes, fileSizeUnit, logWriteFreqMs, 31 | totalLogNumbers, tempPath.toString()}; 32 | ((Consumer) Class.forName(componentName).newInstance()).accept(args); 33 | 34 | // check if log file is created 35 | String[] pathnames = tempPath.toFile().list(); 36 | assertTrue(pathnames.length >= 1); 37 | assertTrue(Arrays.asList(pathnames).contains(activeFileName)); 38 | assertTrue(tempPath.resolve(activeFileName).toFile().length() > 0); 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /uat/testing-features/src/main/java/com/aws/greengrass/NetworkUtils.java: -------------------------------------------------------------------------------- 1 | package com.aws.greengrass; 2 | 3 | import java.io.IOException; 4 | import java.util.concurrent.TimeUnit; 5 | 6 | // TODO: Use dynamic dispatch to call the methods below. Currently this class only support linux. 7 | // this should eventually be integrated as part of OTF and removed from here 8 | public class NetworkUtils { 9 | protected static final String[] NETWORK_PORTS = {"443", "8888", "8889"}; 10 | private static final String ENABLE_OPTION = "--insert"; 11 | private static final String DISABLE_OPTION = "--delete"; 12 | private static final String IPTABLE_COMMAND_STR = "sudo iptables %s OUTPUT -p tcp --dport %s -j REJECT && " + 13 | "sudo iptables %s INPUT -p tcp --sport %s -j REJECT"; 14 | 15 | public static void disconnectNetwork() throws InterruptedException, IOException { 16 | modifyInterfaceUpDownPolicy(IPTABLE_COMMAND_STR, ENABLE_OPTION, "connection-loss", NETWORK_PORTS); 17 | } 18 | 19 | public static void recoverNetwork() throws InterruptedException, IOException { 20 | modifyInterfaceUpDownPolicy(IPTABLE_COMMAND_STR, DISABLE_OPTION, "connection-recover", NETWORK_PORTS); 21 | } 22 | 23 | private static void modifyInterfaceUpDownPolicy(String iptableCommandString, String option, String eventName, String... ports) throws InterruptedException, 24 | IOException { 25 | for (String port : ports) { 26 | new ProcessBuilder().command("sh", "-c", String.format(iptableCommandString, option, port, option, port)) 27 | .start().waitFor(2, TimeUnit.SECONDS); 28 | } 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /src/main/java/com/aws/greengrass/logmanager/model/CloudWatchAttemptLogInformation.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | * SPDX-License-Identifier: Apache-2.0 4 | */ 5 | 6 | package com.aws.greengrass.logmanager.model; 7 | 8 | import lombok.Builder; 9 | import lombok.Data; 10 | import lombok.Getter; 11 | import lombok.Setter; 12 | import software.amazon.awssdk.services.cloudwatchlogs.model.InputLogEvent; 13 | 14 | import java.util.ArrayList; 15 | import java.util.Comparator; 16 | import java.util.HashMap; 17 | import java.util.List; 18 | import java.util.Map; 19 | import java.util.PriorityQueue; 20 | import java.util.Queue; 21 | 22 | @Builder 23 | @Data 24 | @Getter 25 | @Setter 26 | public class CloudWatchAttemptLogInformation { 27 | public static final Comparator EVENT_COMPARATOR = Comparator.comparing(InputLogEvent::timestamp); 28 | @Builder.Default 29 | private Queue logEvents = new PriorityQueue<>(EVENT_COMPARATOR); 30 | @Builder.Default 31 | private Map attemptLogFileInformationMap = new HashMap<>(); 32 | private String componentName; 33 | private LogFileGroup logFileGroup; 34 | 35 | /** 36 | * Get the log events in chronological order. 37 | * 38 | * @return sorted events 39 | */ 40 | public List getSortedLogEvents() { 41 | // Sort by timestamp because CloudWatch requires that the logs are in chronological order. 42 | // Priority queue conversion to a list does NOT maintain proper ordering, so we must sort ourselves. 43 | ArrayList l = new ArrayList<>(logEvents); 44 | l.sort(EVENT_COMPARATOR); 45 | return l; 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /uat/testing-features/src/main/java/com/aws/greengrass/resources/CloudWatchLogsLifecycle.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | * SPDX-License-Identifier: Apache-2.0 4 | */ 5 | 6 | package com.aws.greengrass.resources; 7 | 8 | import com.aws.greengrass.testing.resources.AWSResourceLifecycle; 9 | import com.aws.greengrass.testing.resources.AbstractAWSResourceLifecycle; 10 | import com.google.auto.service.AutoService; 11 | import software.amazon.awssdk.core.pagination.sync.SdkIterable; 12 | import software.amazon.awssdk.services.cloudwatchlogs.CloudWatchLogsClient; 13 | import software.amazon.awssdk.services.cloudwatchlogs.model.DescribeLogStreamsRequest; 14 | import software.amazon.awssdk.services.cloudwatchlogs.model.LogStream; 15 | 16 | import javax.inject.Inject; 17 | 18 | @AutoService(AWSResourceLifecycle.class) 19 | public class CloudWatchLogsLifecycle extends AbstractAWSResourceLifecycle { 20 | 21 | @Inject 22 | public CloudWatchLogsLifecycle(CloudWatchLogsClient client) { 23 | super(client, CloudWatchLogStreamSpec.class); 24 | } 25 | 26 | /** 27 | * Retrieves the streams for a given CloudWatch log group if there are any. 28 | * @param groupName name of the CloudWatch group 29 | * @param logStreamNamePattern name of the log CloudWatch log stream 30 | */ 31 | public SdkIterable findStream(String groupName, String logStreamNamePattern) { 32 | DescribeLogStreamsRequest request = DescribeLogStreamsRequest.builder() 33 | .logGroupName(groupName) 34 | .logStreamNamePrefix(logStreamNamePattern) 35 | .descending(true) 36 | .build(); 37 | 38 | return client.describeLogStreamsPaginator(request).logStreams(); 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /uat/README.md: -------------------------------------------------------------------------------- 1 | ## Log Manager User Acceptance Tests 2 | User Acceptance Tests for Log Manager run using `aws-greengrass-testing-standalone` as a library. They execute E2E 3 | tests which will spin up an instance of Greengrass on your device and execute different sets of tests, by installing 4 | the `aws.greengrass.LogManager` component. 5 | 6 | ## Running UATs locally 7 | 8 | Ensure credentials are available by setting them in environment variables. In unix based systems: 9 | 10 | ```bash 11 | export AWS_ACCESS_KEY_ID= 12 | export AWS_SECRET_ACCESS_KEY= 13 | ``` 14 | 15 | on Windows Powershell 16 | 17 | ```bash 18 | $Env:AWS_ACCESS_KEY_ID= 19 | $Env:AWS_SECRET_ACCESS_KEY= 20 | ``` 21 | 22 | For UATs to run you will need to package your entire application along with `aws-greengrass-testing-standalone` into 23 | an uber jar. To do run (from the root of the project) 24 | 25 | ``` 26 | mvn -U -ntp clean verify -f uat/pom.xml 27 | ``` 28 | 29 | Note: Everytime you make changes to the codebase you will have to rebuild the uber jar for those changes to be present 30 | on the final artifact. 31 | 32 | Finally, download the zip containing the latest version of the Nucleus, which will be used to provision Greengrass for 33 | the UATs. 34 | 35 | ```bash 36 | curl -s https://d2s8p88vqu9w66.cloudfront.net/releases/greengrass-nucleus-latest.zip > greengrass-nucleus-latest.zip 37 | ``` 38 | 39 | Execute the UATs by running the following command from the root of the project. 40 | 41 | ``` 42 | sudo java -Dggc.archive= -Dtest.log.path= -Dtags=LogManager -jar uat/target/greengrass-log-manager-uat-artifact.jar 43 | ``` 44 | 45 | Command arguments: 46 | 47 | Dggc.archive - path to the nucleus zip that was downloaded 48 | Dtest.log.path - path where you would like the test results to be stored -------------------------------------------------------------------------------- /.github/workflows/maven.yml: -------------------------------------------------------------------------------- 1 | name: Java CI 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | branches: '*' 9 | 10 | jobs: 11 | build: 12 | runs-on: ubuntu-latest 13 | steps: 14 | - uses: actions/checkout@v4 15 | with: 16 | fetch-depth: 0 17 | - uses: wagoid/commitlint-github-action@v6 18 | - name: Set up JDK 1.8 19 | uses: actions/setup-java@v4 20 | with: 21 | distribution: corretto 22 | java-version: 8 23 | cache: maven 24 | - run: rm -rf /tmp/* 25 | continue-on-error: true 26 | - name: Build with Maven 27 | env: 28 | AWS_REGION: us-west-2 29 | run: mvn -ntp -U clean verify 30 | - name: Upload Failed Test Report 31 | uses: actions/upload-artifact@v4 32 | if: failure() 33 | with: 34 | name: Failed Test Report 35 | path: target/surefire-reports 36 | - name: Upload Coverage 37 | uses: actions/upload-artifact@v4 38 | if: always() 39 | with: 40 | name: Coverage Report 41 | path: target/jacoco-report 42 | - name: Upload unit test coverage to Codecov 43 | uses: codecov/codecov-action@v3 44 | with: 45 | directory: target/jacoco-report 46 | files: jacoco.xml 47 | flags: unit 48 | - name: Upload integration test coverage to Codecov 49 | uses: codecov/codecov-action@v3 50 | with: 51 | directory: target/jacoco-report/jacoco-it 52 | files: jacoco.xml 53 | flags: integration 54 | - name: Convert Jacoco unit test report to Cobertura 55 | run: python3 .github/scripts/cover2cover.py target/jacoco-report/jacoco.xml src/main/java > target/jacoco-report/cobertura.xml 56 | - name: Convert Jacoco integ test report to Cobertura 57 | run: python3 .github/scripts/cover2cover.py target/jacoco-report/jacoco-it/jacoco.xml src/main/java > target/jacoco-report/cobertura-it.xml 58 | - name: Save PR number 59 | env: 60 | PR_NUMBER: ${{ github.event.number }} 61 | PR_SHA: ${{ github.event.pull_request.head.sha }} 62 | run: | 63 | mkdir -p ./pr/jacoco-report 64 | echo "$PR_NUMBER" | tr -cd '0-9' > ./pr/NR 65 | echo "$PR_SHA" | tr -cd 'a-fA-F0-9' > ./pr/SHA 66 | 67 | cp target/jacoco-report/cobertura.xml ./pr/jacoco-report/cobertura.xml 68 | cp target/jacoco-report/cobertura-it.xml ./pr/jacoco-report/cobertura-it.xml 69 | if: github.event_name == 'pull_request' 70 | - name: Upload files 71 | uses: actions/upload-artifact@v4 72 | with: 73 | name: pr 74 | path: pr/ 75 | if: github.event_name == 'pull_request' 76 | -------------------------------------------------------------------------------- /src/test/resources/com/aws/greengrass/logmanager/testlogs2.log: -------------------------------------------------------------------------------- 1 | 2020-12-17T22:58:52.686Z [INFO] (pool-2-thread-13) com.aws.greengrass.lifecyclemanager.GenericExternalService: service-awaiting-start. waiting for dependencies to start. {serviceName=main, currentState=INSTALLED} 2 | 2020-12-17T22:58:52.688Z [INFO] (ComponentWithManyChildProcesses-lifecycle) com.aws.greengrass.lifecyclemanager.GenericExternalService: service-report-state. {serviceName=ComponentWithManyChildProcesses, currentState=NEW, newState=INSTALLED} 3 | 2020-12-17T22:58:52.688Z [INFO] (ComponentWithManyChildProcesses-lifecycle) com.aws.greengrass.lifecyclemanager.GenericExternalService: service-set-state. {serviceName=ComponentWithManyChildProcesses, currentState=NEW, newState=INSTALLED} 4 | 2020-12-17T22:58:52.689Z [INFO] (pool-2-thread-9) com.aws.greengrass.lifecyclemanager.GenericExternalService: service-awaiting-start. waiting for dependencies to start. {serviceName=ComponentWithManyChildProcesses, currentState=INSTALLED} 5 | 2020-12-17T22:58:52.690Z [INFO] (pool-2-thread-9) com.aws.greengrass.lifecyclemanager.GenericExternalService: service-starting. {serviceName=ComponentWithManyChildProcesses, currentState=INSTALLED} 6 | 2020-12-17T22:58:52.690Z [INFO] (pool-2-thread-9) com.aws.greengrass.lifecyclemanager.GenericExternalService: service-report-state. {serviceName=ComponentWithManyChildProcesses, currentState=INSTALLED, newState=STARTING} 7 | 2020-12-17T22:58:52.691Z [INFO] (ComponentWithManyChildProcesses-lifecycle) com.aws.greengrass.lifecyclemanager.GenericExternalService: service-set-state. {serviceName=ComponentWithManyChildProcesses, currentState=INSTALLED, newState=STARTING} 8 | 2020-12-17T14:58:52.692+08:00 [INFO] (pool-2-thread-9) com.aws.greengrass.lifecyclemanager.GenericExternalService: service-report-state. {serviceName=ComponentWithManyChildProcesses, currentState=STARTING, newState=FINISHED} 9 | 2020-12-17T22:58:52.693Z [INFO] (pool-2-thread-9) com.aws.greengrass.lifecyclemanager.GenericExternalService: generic-service-finished. Nothing done. {serviceName=ComponentWithManyChildProcesses, currentState=STARTING} 10 | 2020-12-17T22:58:52.693Z [INFO] (ComponentWithManyChildProcesses-lifecycle) com.aws.greengrass.lifecyclemanager.GenericExternalService: service-report-state. {serviceName=ComponentWithManyChildProcesses, currentState=STARTING, newState=STOPPING} 11 | 2020-12-17T22:58:52.694Z [INFO] (ComponentWithManyChildProcesses-lifecycle) com.aws.greengrass.lifecyclemanager.GenericExternalService: service-set-state. {serviceName=ComponentWithManyChildProcesses, currentState=STARTING, newState=FINISHED} 12 | 2020-12-17T22:58:52.695Z [INFO] (ComponentWithManyChildProcesses-lifecycle) com.aws.greengrass.lifecyclemanager.GenericExternalService: service-set-state. {serviceName=ComponentWithManyChildProcesses, currentState=FINISHED, newState=STOPPING} 13 | 2020-12-17T22:58:52.695Z [INFO] (pool-2-thread-13) com.aws.greengrass.lifecyclemanager.GenericExternalService: service-starting. {serviceName=main, currentState=INSTALLED} -------------------------------------------------------------------------------- /codestyle/IntelliJ.xml: -------------------------------------------------------------------------------- 1 | 5 | 6 | 7 | 70 | -------------------------------------------------------------------------------- /src/test/java/com/aws/greengrass/logmanager/util/TestUtils.java: -------------------------------------------------------------------------------- 1 | package com.aws.greengrass.logmanager.util; 2 | 3 | import com.aws.greengrass.logmanager.model.LogFile; 4 | 5 | import java.io.BufferedReader; 6 | import java.io.File; 7 | import java.io.IOException; 8 | import java.io.InputStreamReader; 9 | import java.io.OutputStream; 10 | import java.net.URI; 11 | import java.nio.charset.StandardCharsets; 12 | import java.nio.file.Files; 13 | import java.nio.file.Path; 14 | import java.util.Random; 15 | 16 | public final class TestUtils { 17 | private static Random rnd = new Random(); 18 | 19 | private TestUtils() { } 20 | 21 | public static String givenAStringOfSize(int bytesNeeded) { 22 | StringBuilder testStrings = new StringBuilder(); 23 | String testChars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqestuvwxyz0123456789"; 24 | while (testStrings.length() < bytesNeeded) { 25 | int charIdx = (int) (rnd.nextFloat() * testChars.length()); 26 | testStrings.append(testChars.charAt(charIdx)); 27 | } 28 | return testStrings.toString(); 29 | } 30 | 31 | public static void writeFile(File file, byte[] byteArray) throws IOException { 32 | try (OutputStream fileOutputStream = Files.newOutputStream(file.toPath())) { 33 | fileOutputStream.write(byteArray); 34 | } 35 | } 36 | 37 | public static File createFileWithContent(Path filePath, String content) throws IOException { 38 | File file = new File(filePath.toUri()); 39 | byte[] bytesArray = content.getBytes(StandardCharsets.UTF_8); 40 | writeFile(file, bytesArray); 41 | return file; 42 | } 43 | 44 | public static LogFile createLogFileWithSize(URI uri, int bytesNeeded) throws IOException { 45 | LogFile file = new LogFile(uri); 46 | byte[] bytesArray = givenAStringOfSize(bytesNeeded).getBytes(StandardCharsets.UTF_8); 47 | writeFile(file, bytesArray); 48 | return file; 49 | } 50 | 51 | @SuppressWarnings("PMD.AssignmentInOperand") 52 | public static String readFileContent(File file) throws IOException { 53 | StringBuilder content = new StringBuilder(); 54 | 55 | try (BufferedReader br = new BufferedReader(new InputStreamReader(Files.newInputStream(file.toPath())))) { 56 | String line; 57 | while ((line = br.readLine()) != null) { 58 | content.append(line); 59 | } 60 | } 61 | return content.toString(); 62 | } 63 | 64 | public static File rotateFilesByRenamingThem(File... files) throws IOException { 65 | // Create new active file 66 | String activeFilePath = files[0].getAbsolutePath(); 67 | 68 | for (int i = files.length - 1; i >= 0; i--) { 69 | File current = files[i]; 70 | // to avoid changing the file on the array. Simulates closer what would happen on a real scenario 71 | File toModify = new File(activeFilePath + "." + (i + 1)); 72 | current.renameTo(toModify); 73 | } 74 | 75 | File activeFile = new File(activeFilePath); 76 | activeFile.createNewFile(); 77 | 78 | return activeFile; 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing Guidelines 2 | 3 | Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional 4 | documentation, we greatly value feedback and contributions from our community. 5 | 6 | Please read through this document before submitting any issues or pull requests to ensure we have all the necessary 7 | information to effectively respond to your bug report or contribution. 8 | 9 | 10 | ## Reporting Bugs/Feature Requests 11 | 12 | We welcome you to use the GitHub issue tracker to report bugs or suggest features. 13 | 14 | When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already 15 | reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: 16 | 17 | * A reproducible test case or series of steps 18 | * The version of our code being used 19 | * Any modifications you've made relevant to the bug 20 | * Anything unusual about your environment or deployment 21 | 22 | 23 | ## Contributing via Pull Requests 24 | Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: 25 | 26 | 1. You are working against the latest source on the *main* branch. 27 | 2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. 28 | 3. You open an issue to discuss any significant work - we would hate for your time to be wasted. 29 | 30 | To send us a pull request, please: 31 | 32 | 1. Fork the repository. 33 | 2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. 34 | 3. Ensure local tests pass. 35 | 4. Commit to your fork using clear commit messages. 36 | 5. Send us a pull request, answering any default questions in the pull request interface. 37 | 6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. 38 | 39 | GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and 40 | [creating a pull request](https://help.github.com/articles/creating-a-pull-request/). 41 | 42 | 43 | ## Finding contributions to work on 44 | Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start. 45 | 46 | 47 | ## Code of Conduct 48 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 49 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 50 | opensource-codeofconduct@amazon.com with any additional questions or comments. 51 | 52 | 53 | ## Security issue notifications 54 | If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. 55 | 56 | 57 | ## Licensing 58 | 59 | See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. 60 | -------------------------------------------------------------------------------- /.github/workflows/externalPR.yml: -------------------------------------------------------------------------------- 1 | name: External PR 2 | 3 | on: 4 | workflow_run: 5 | workflows: ["Java CI"] 6 | types: 7 | - completed 8 | 9 | permissions: 10 | pull-requests: write 11 | 12 | jobs: 13 | comment: 14 | runs-on: ubuntu-latest 15 | if: ${{ github.event.workflow_run.event == 'pull_request' && github.event.workflow_run.conclusion == 'success' }} 16 | name: Comment 17 | steps: 18 | - uses: actions/checkout@v4 19 | with: 20 | fetch-depth: 0 21 | - name: 'Download artifact' 22 | uses: actions/github-script@v7 23 | with: 24 | script: | 25 | var artifacts = await github.rest.actions.listWorkflowRunArtifacts({ 26 | owner: context.repo.owner, 27 | repo: context.repo.repo, 28 | run_id: ${{github.event.workflow_run.id }}, 29 | }); 30 | var matchArtifact = artifacts.data.artifacts.filter((artifact) => { 31 | return artifact.name == "pr" 32 | })[0]; 33 | var download = await github.rest.actions.downloadArtifact({ 34 | owner: context.repo.owner, 35 | repo: context.repo.repo, 36 | artifact_id: matchArtifact.id, 37 | archive_format: 'zip', 38 | }); 39 | var fs = require('fs'); 40 | fs.writeFileSync('${{github.workspace}}/pr.zip', Buffer.from(download.data)); 41 | - run: unzip pr.zip 42 | - name: outputs 43 | run: |- 44 | echo "PR=$(cat NR)" >> $GITHUB_ENV 45 | echo "SHA=$(cat SHA)" >> $GITHUB_ENV 46 | - name: cobertura-report-unit-test 47 | uses: 5monkeys/cobertura-action@v14 48 | continue-on-error: true 49 | with: 50 | # The GITHUB_TOKEN for this repo 51 | repo_token: ${{ github.token }} 52 | # Path to the cobertura file. 53 | path: jacoco-report/cobertura.xml 54 | # If files with 100% should be skipped from report. 55 | skip_covered: false 56 | # Minimum allowed coverage percentage as an integer. 57 | minimum_coverage: 65 58 | # Show line rate as specific column. 59 | show_line: true 60 | # Show branch rate as specific column. 61 | show_branch: true 62 | # Use class names instead of the filename 63 | show_class_names: true 64 | # Use a unique name for the report and comment 65 | report_name: Unit Tests Coverage Report 66 | pull_request_number: ${{ env.PR }} 67 | - name: cobertura-report-integration-test 68 | uses: 5monkeys/cobertura-action@v14 69 | continue-on-error: true 70 | with: 71 | # The GITHUB_TOKEN for this repo 72 | repo_token: ${{ github.token }} 73 | # Path to the cobertura file. 74 | path: jacoco-report/cobertura-it.xml 75 | # If files with 100% should be skipped from report. 76 | skip_covered: false 77 | # Minimum allowed coverage percentage as an integer. 78 | minimum_coverage: 58 79 | # Show line rate as specific column. 80 | show_line: true 81 | # Show branch rate as specific column. 82 | show_branch: true 83 | # Use class names instead of the filename 84 | show_class_names: true 85 | # Use a unique name for the report and comment 86 | report_name: Integration Tests Coverage Report 87 | pull_request_number: ${{ env.PR }} 88 | -------------------------------------------------------------------------------- /uat/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 8 | 9 | 11 | 4.0.0 12 | com.aws.greengrass 13 | log-manager-uat 14 | 1.0-SNAPSHOT 15 | pom 16 | 17 | 18 | custom-components 19 | 20 | 21 | testing-features 22 | 23 | 24 | 25 | 8 26 | 8 27 | 28 | 29 | 30 | 31 | org.apache.maven.plugins 32 | maven-pmd-plugin 33 | 3.13.0 34 | 35 | 36 | 0 37 | true 38 | 39 | ../codestyle/pmd-eg-ruleset.xml 40 | ../codestyle/pmd-eg-tests-ruleset.xml 41 | 42 | true 43 | ${skipTests} 44 | 45 | 46 | 47 | test 48 | 49 | check 50 | 51 | 52 | 53 | 54 | 55 | maven-checkstyle-plugin 56 | 3.1.0 57 | 58 | 59 | com.puppycrawl.tools 60 | checkstyle 61 | 8.29 62 | 63 | 64 | 65 | true 66 | ../codestyle/checkstyle.xml 67 | warning 68 | 0 69 | ${skipTests} 70 | 71 | 72 | 73 | validate 74 | validate 75 | 76 | check 77 | 78 | 79 | 80 | 81 | 82 | 83 | -------------------------------------------------------------------------------- /src/main/java/com/aws/greengrass/logmanager/util/CloudWatchClientFactory.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | * SPDX-License-Identifier: Apache-2.0 4 | */ 5 | 6 | package com.aws.greengrass.logmanager.util; 7 | 8 | import com.aws.greengrass.deployment.DeviceConfiguration; 9 | import com.aws.greengrass.tes.LazyCredentialProvider; 10 | import com.aws.greengrass.util.Coerce; 11 | import com.aws.greengrass.util.ProxyUtils; 12 | import lombok.Getter; 13 | import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; 14 | import software.amazon.awssdk.core.retry.RetryPolicy; 15 | import software.amazon.awssdk.core.retry.backoff.BackoffStrategy; 16 | import software.amazon.awssdk.core.retry.conditions.OrRetryCondition; 17 | import software.amazon.awssdk.core.retry.conditions.RetryCondition; 18 | import software.amazon.awssdk.core.retry.conditions.RetryOnExceptionsCondition; 19 | import software.amazon.awssdk.regions.Region; 20 | import software.amazon.awssdk.services.cloudwatchlogs.CloudWatchLogsClient; 21 | import software.amazon.awssdk.services.cloudwatchlogs.model.LimitExceededException; 22 | import software.amazon.awssdk.services.iam.model.ServiceFailureException; 23 | 24 | import java.util.Arrays; 25 | import java.util.HashSet; 26 | import java.util.Set; 27 | import java.util.function.Supplier; 28 | import javax.inject.Inject; 29 | 30 | @Getter 31 | public class CloudWatchClientFactory { 32 | private final Supplier clientFactory; 33 | private final SdkClientWrapper wrapper; 34 | private final Region region; 35 | private final LazyCredentialProvider credentialsProvider; 36 | //TODO: Handle fips 37 | //private static String CLOUD_WATCH_FIPS_HOST = "logs-fips.%s.amazonaws.com"; 38 | private static final Set> retryableCWLogsExceptions = 39 | new HashSet<>(Arrays.asList(LimitExceededException.class, ServiceFailureException.class)); 40 | 41 | private static final RetryCondition retryCondition = OrRetryCondition 42 | .create(RetryCondition.defaultRetryCondition(), 43 | RetryOnExceptionsCondition.create(retryableCWLogsExceptions)); 44 | 45 | private static final RetryPolicy retryPolicy = 46 | RetryPolicy.builder().numRetries(5).backoffStrategy(BackoffStrategy.defaultStrategy()) 47 | .retryCondition(retryCondition).build(); 48 | 49 | /** 50 | * Constructor. 51 | * 52 | * @param deviceConfiguration device configuration 53 | * @param credentialsProvider credential provider from TES 54 | */ 55 | @Inject 56 | public CloudWatchClientFactory(DeviceConfiguration deviceConfiguration, 57 | LazyCredentialProvider credentialsProvider) { 58 | this.region = Region.of(Coerce.toString(deviceConfiguration.getAWSRegion())); 59 | this.credentialsProvider = credentialsProvider; 60 | this.clientFactory = this::createClient; 61 | this.wrapper = new SdkClientWrapper<>(clientFactory); 62 | } 63 | 64 | private CloudWatchLogsClient createClient() { 65 | return CloudWatchLogsClient.builder() 66 | .credentialsProvider(credentialsProvider) 67 | .httpClient(ProxyUtils.getSdkHttpClient()) 68 | .overrideConfiguration(ClientOverrideConfiguration.builder() 69 | .retryPolicy(retryPolicy) 70 | .build()) 71 | .region(region) 72 | .build(); 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /uat/custom-components/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 8 | 9 | 12 | 4.0.0 13 | log-manager-custom-components 14 | com.aws.greengrass 15 | 1.0-SNAPSHOT 16 | 17 | 8 18 | 8 19 | 20 | 21 | 22 | 23 | org.apache.maven.plugins 24 | maven-shade-plugin 25 | 3.2.4 26 | 27 | 28 | package 29 | 30 | shade 31 | 32 | 33 | custom-components 34 | 35 | 36 | com.aws.greengrass.Main 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | software.amazon.awssdk.iotdevicesdk 48 | aws-iot-device-sdk 49 | 1.10.3 50 | 51 | 52 | com.fasterxml.jackson.core 53 | jackson-databind 54 | 2.12.7.1 55 | 56 | 57 | org.slf4j 58 | slf4j-api 59 | 2.0.0-alpha1 60 | 61 | 62 | org.slf4j 63 | slf4j-simple 64 | 2.0.0-alpha1 65 | 66 | 67 | ch.qos.logback 68 | logback-classic 69 | 1.3.13 70 | 71 | 72 | ch.qos.logback 73 | logback-core 74 | 1.3.16 75 | 76 | 77 | org.junit.jupiter 78 | junit-jupiter-api 79 | 5.8.1 80 | test 81 | 82 | 83 | org.mockito 84 | mockito-junit-jupiter 85 | 4.0.0 86 | test 87 | 88 | 89 | 90 | -------------------------------------------------------------------------------- /src/test/java/com/aws/greengrass/logmanager/util/ConfigUtilTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | * SPDX-License-Identifier: Apache-2.0 4 | */ 5 | 6 | package com.aws.greengrass.logmanager.util; 7 | 8 | import com.aws.greengrass.config.Topics; 9 | import com.aws.greengrass.config.UpdateBehaviorTree; 10 | import com.aws.greengrass.dependency.Context; 11 | import com.aws.greengrass.util.Utils; 12 | import org.junit.jupiter.api.AfterEach; 13 | import org.junit.jupiter.api.Test; 14 | 15 | import java.util.Map; 16 | import java.util.concurrent.atomic.AtomicInteger; 17 | 18 | import static org.junit.jupiter.api.Assertions.assertEquals; 19 | 20 | class ConfigUtilTest { 21 | private final Context context = new Context(); 22 | 23 | @AfterEach() 24 | void after() { 25 | context.shutdown(); 26 | } 27 | 28 | @Test 29 | public void update_from_map_updates_when_changes_exist() { 30 | Topics root = Topics.of(context, "a", null); 31 | AtomicInteger callbackCount = new AtomicInteger(); 32 | root.subscribe((w, n) -> { 33 | callbackCount.incrementAndGet(); 34 | }); 35 | 36 | Map map1 = Utils.immutableMap("B", 1, "C", 2); 37 | long now = System.currentTimeMillis(); 38 | root.updateFromMap(map1, new UpdateBehaviorTree(UpdateBehaviorTree.UpdateBehavior.REPLACE, now)); 39 | context.waitForPublishQueueToClear(); 40 | 41 | assertEquals(5, callbackCount.get()); 42 | 43 | ConfigUtil.updateFromMapWhenChanged(root, map1, 44 | new UpdateBehaviorTree(UpdateBehaviorTree.UpdateBehavior.REPLACE, now)); 45 | 46 | // Nothing should have changed 47 | context.waitForPublishQueueToClear(); 48 | assertEquals(5, callbackCount.get()); 49 | 50 | Map map2 = Utils.immutableMap("C", 1); 51 | ConfigUtil.updateFromMapWhenChanged(root, map2, 52 | new UpdateBehaviorTree(UpdateBehaviorTree.UpdateBehavior.REPLACE, now)); 53 | 54 | // 2 events to remove B and update the value of C 55 | context.waitForPublishQueueToClear(); 56 | assertEquals(7, callbackCount.get()); 57 | 58 | // Try pushing timestamp forward 59 | ConfigUtil.updateFromMapWhenChanged(root, map2, 60 | new UpdateBehaviorTree(UpdateBehaviorTree.UpdateBehavior.REPLACE, now+10)); 61 | 62 | // Nothing should have changed 63 | context.waitForPublishQueueToClear(); 64 | assertEquals(7, callbackCount.get()); 65 | 66 | // Add in some nesting 67 | Map map3 = Utils.immutableMap("C", Utils.immutableMap("A", 2)); 68 | ConfigUtil.updateFromMapWhenChanged(root, map3, 69 | new UpdateBehaviorTree(UpdateBehaviorTree.UpdateBehavior.REPLACE, now+10)); 70 | 71 | // 4 events more. remove C, add C as Topics, add A, update A 72 | context.waitForPublishQueueToClear(); 73 | assertEquals(11, callbackCount.get()); 74 | 75 | ConfigUtil.updateFromMapWhenChanged(root, map3, 76 | new UpdateBehaviorTree(UpdateBehaviorTree.UpdateBehavior.REPLACE, now+20)); 77 | 78 | // Nothing should have changed 79 | context.waitForPublishQueueToClear(); 80 | assertEquals(11, callbackCount.get()); 81 | 82 | Map map4 = Utils.immutableMap("C", Utils.immutableMap("A", 1)); 83 | ConfigUtil.updateFromMapWhenChanged(root, map4, 84 | new UpdateBehaviorTree(UpdateBehaviorTree.UpdateBehavior.REPLACE, now+20)); 85 | 86 | // A changed 87 | context.waitForPublishQueueToClear(); 88 | assertEquals(12, callbackCount.get()); 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /src/main/java/com/aws/greengrass/logmanager/util/SdkClientWrapper.java: -------------------------------------------------------------------------------- 1 | package com.aws.greengrass.logmanager.util; 2 | 3 | import com.aws.greengrass.logging.api.Logger; 4 | import com.aws.greengrass.logging.impl.LogManager; 5 | import lombok.Getter; 6 | import lombok.Setter; 7 | import org.apache.http.NoHttpResponseException; 8 | import software.amazon.awssdk.core.SdkClient; 9 | import software.amazon.awssdk.core.exception.SdkClientException; 10 | 11 | import java.net.SocketException; 12 | import java.util.function.Function; 13 | import java.util.function.Supplier; 14 | 15 | public final class SdkClientWrapper { 16 | private static final Logger logger = LogManager.getLogger(SdkClientWrapper.class); 17 | @Getter 18 | // Setter only for unit testing purpose 19 | @Setter 20 | private volatile T client; 21 | private final Supplier clientFactory; 22 | 23 | public SdkClientWrapper(Supplier clientFactory) { 24 | this.clientFactory = clientFactory; 25 | this.client = clientFactory.get(); 26 | } 27 | 28 | /** 29 | * Executes the given operation on the client, handling potential SDK client exceptions. 30 | * 31 | *

This method applies the provided operation to the client. If an {@link SdkClientException} 32 | * occurs and the client needs refreshing (as determined by {@link #shouldRefreshClient(SdkClientException)}), 33 | * it will attempt to refresh the client and retry the operation once.

34 | * 35 | * @param The return type of the operation 36 | * @param operation A function that takes the client of type T and returns a result of type R 37 | * @return The result of the operation 38 | * @throws SdkClientException If the operation fails and the client cannot be refreshed or fails after refresh 39 | * @throws RuntimeException If an unexpected error occurs during execution 40 | */ 41 | public R execute(final Function operation) { 42 | try { 43 | return operation.apply(client); 44 | } catch (SdkClientException e) { 45 | if (shouldRefreshClient(e)) { 46 | logger.atDebug().log("Client needs refresh due to: {}", e.getMessage()); 47 | try { 48 | refreshClient(); 49 | return operation.apply(client); 50 | } catch (SdkClientException retryException) { 51 | logger.atError().log("Failed to execute operation after client refresh", retryException); 52 | throw retryException; 53 | } 54 | } 55 | logger.atError().log("SDK client operation failed", e); 56 | throw e; 57 | } 58 | } 59 | 60 | private void refreshClient() { 61 | synchronized (this) { 62 | if (client != null) { 63 | try { 64 | client.close(); 65 | } catch (SdkClientException e) { 66 | logger.atError().log("Error closing client: " + e.getMessage()); 67 | } 68 | } 69 | // Creates new client when refresh needed 70 | client = clientFactory.get(); 71 | } 72 | } 73 | 74 | private boolean shouldRefreshClient(SdkClientException e) { 75 | Throwable cause = e; 76 | while (cause != null) { 77 | if (cause.getMessage() != null && cause instanceof SocketException 78 | && "connection reset".contains(cause.getMessage().toLowerCase())) { 79 | return true; 80 | } 81 | if (cause instanceof NoHttpResponseException) { 82 | return true; 83 | } 84 | // Add other conditions that should trigger a client refresh here 85 | cause = cause.getCause(); 86 | } 87 | return false; 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /src/integrationtests/java/com/aws/greengrass/integrationtests/logmanager/util/LogFileHelper.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | * SPDX-License-Identifier: Apache-2.0 4 | */ 5 | 6 | package com.aws.greengrass.integrationtests.logmanager.util; 7 | 8 | import com.aws.greengrass.logging.api.Logger; 9 | import com.aws.greengrass.logging.impl.LogManager; 10 | import com.aws.greengrass.logging.impl.config.LogStore; 11 | import com.aws.greengrass.logging.impl.config.model.LogConfigUpdate; 12 | 13 | import java.io.BufferedWriter; 14 | import java.io.File; 15 | import java.io.IOException; 16 | import java.nio.file.Files; 17 | import java.nio.file.Path; 18 | import java.nio.file.StandardOpenOption; 19 | import java.util.ArrayList; 20 | import java.util.List; 21 | import java.util.Random; 22 | 23 | public final class LogFileHelper { 24 | public static final int DEFAULT_FILE_SIZE = 10_240; 25 | public static final int DEFAULT_LOG_LINE_IN_FILE = 10; 26 | 27 | private LogFileHelper() { } 28 | 29 | public static void addDataToFile(String data, Path filePath) throws IOException { 30 | try (BufferedWriter writer = Files.newBufferedWriter(filePath, StandardOpenOption.APPEND, 31 | StandardOpenOption.CREATE)) { 32 | writer.write(data + System.lineSeparator()); 33 | } 34 | } 35 | 36 | public static List generateRandomMessages() { 37 | List msgs = new ArrayList<>(); 38 | for (int i = 0; i < 10; i++) { 39 | int leftLimit = 48; // numeral '0' 40 | int rightLimit = 122; // letter 'z' 41 | int targetStringLength = 1024; 42 | Random random = new Random(); 43 | 44 | String generatedString = random.ints(leftLimit, rightLimit + 1) 45 | .filter(s -> (s <= 57 || s >= 65) && (s <= 90 || s >= 97)) 46 | .limit(targetStringLength) 47 | .collect(StringBuilder::new, StringBuilder::appendCodePoint, StringBuilder::append) 48 | .toString(); 49 | msgs.add(generatedString); 50 | } 51 | return msgs; 52 | } 53 | 54 | public static File createTempFileAndWriteData(Path tempDirectoryPath, String fileNamePrefix, String fileNameSuffix) 55 | throws IOException { 56 | Path filePath = Files.createTempFile(tempDirectoryPath, fileNamePrefix, fileNameSuffix); 57 | File file = filePath.toFile(); 58 | List randomMessages = generateRandomMessages(); 59 | for (String messageBytes : randomMessages) { 60 | addDataToFile(messageBytes, file.toPath()); 61 | } 62 | return file; 63 | } 64 | 65 | public static void writeExampleLogs(Path tempDirectoryPath, String fileNamePrefix) 66 | throws IOException { 67 | Logger l = LogManager.getLogger(fileNamePrefix, 68 | LogConfigUpdate.builder().fileName(fileNamePrefix) 69 | .outputDirectory(tempDirectoryPath.toString()).outputType(LogStore.FILE).build()); 70 | List randomMessages = generateRandomMessages(); 71 | for (String messageBytes : randomMessages) { 72 | l.info(messageBytes); 73 | } 74 | l.error("this is an error", new RuntimeException("known")); 75 | l.info("after error"); 76 | } 77 | 78 | public static void createFileAndWriteData(Path tempDirectoryPath, String fileName) 79 | throws IOException { 80 | Path filePath = tempDirectoryPath.resolve(fileName + ".log"); 81 | if (!Files.exists(filePath)) { 82 | Files.createFile(filePath); 83 | } 84 | File file = filePath.toFile(); 85 | List randomMessages = generateRandomMessages(); 86 | for (String messageBytes : randomMessages) { 87 | addDataToFile(messageBytes, file.toPath()); 88 | } 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## Log Manager 2 | ![CI](https://github.com/aws-greengrass/aws-greengrass-log-manager/actions/workflows/maven.yml/badge.svg?branch=main) 3 | ![UAT](https://github.com/aws-greengrass/aws-greengrass-log-manager/actions/workflows/uat.yaml/badge.svg?branch=main) 4 | 5 | 6 | The log manager component collects and optionally uploads logs from Greengrass core devices to Amazon CloudWatch Logs. 7 | You can configure system logs and logs for each component. Log Manager is an optional internal Greengrass service that runs in the same JVM as the 8 | [Greengrass nucleus](https://github.com/aws/aws-greengrass-nucleus). 9 | 10 | Log Manager has two major features: **Logs Uploader** and **Disk Space Management** 11 | 12 | **Logs Uploader** -- 13 | It is responsible for uploading logs from the device from greengrass as well as non-greengrass components to CloudWatch. 14 | Since the customers can use either the Greengrass Logging and Metrics service framework or any other framework to log, the 15 | logs uploader needs to be smart in order to handle these different formats of logs. 16 | The logs uploader should be able to handle any network disruptions or device reboots. The logs uploader should smartly 17 | manage the log rotation for different logging frameworks and upload the logs on a “best effort” basis. 18 | 19 | The customers can add each component's configuration for where the log files are location and how they are rotated. The 20 | logs uploader will then perform a k-way merge and update the logs to CloudWatch in batches. After merging the different 21 | log files the logs uploader will create the log groups and log streams as needed before pushing all the log events to 22 | CloudWatch. 23 | 24 | **Disk Space Management** -- 25 | This feature is responsible for managing the space taken by the logs on the device. The customers can configure the log manager 26 | to delete the log files after all the logs from it have been uploaded to CloudWatch. The customers can also configure 27 | the log manager to manage the disk space taken by the log files on the disk. The log manager will try to keep the logs below 28 | the threshold specified by the customer. 29 | 30 | ## FAQ 31 | 32 | ## Sample Configuration 33 | **YAML example** 34 | ``` 35 | Manifests: 36 | - Dependencies: 37 | aws.greengrass.LogManager 38 | - aws.greengrass.LogManager: 39 | Configuration: 40 | logsUploaderConfiguration: 41 | componentLogsConfigurationMap: 42 | : 43 | logFileRegex: '\\w*.log' 44 | logFileDirectoryPath: '/path/to/logs/directory/' 45 | minimumLogLevel: 'INFO' 46 | diskSpaceLimit: '25' 47 | diskSpaceLimitUnit: 'MB' 48 | deleteLogFileAfterCloudUpload: true 49 | systemLogsConfiguration: 50 | uploadToCloudWatch: true 51 | minimumLogLevel: 'INFO' 52 | diskSpaceLimit: '25' 53 | diskSpaceLimitUnit: 'MB' 54 | deleteLogFileAfterCloudUpload: true 55 | 56 | ``` 57 | 58 | **JSON example** 59 | ``` 60 | { 61 | "logsUploaderConfiguration":{ 62 | "systemLogsConfiguration":{ 63 | "uploadToCloudWatch":true, 64 | "minimumLogLevel":"INFO", 65 | "diskSpaceLimit":25, 66 | "diskSpaceLimitUnit":"MB", 67 | "deleteLogFileAfterCloudUpload":true 68 | }, 69 | "componentLogsConfigurationMap": { 70 | "": { 71 | "minimumLogLevel":"INFO", 72 | "logFileDirectoryPath":"/path/to/logs/directory/", 73 | "logFileRegex":"\\w*.log", 74 | "diskSpaceLimit":25, 75 | "diskSpaceLimitUnit":"MB", 76 | "deleteLogFileAfterCloudUpload":true 77 | } 78 | } 79 | }, 80 | "periodicUploadIntervalSec":600 81 | } 82 | ``` 83 | ## Security 84 | 85 | See [CONTRIBUTING](CONTRIBUTING.md#security-issue-notifications) for more information. 86 | 87 | ## License 88 | 89 | This project is licensed under the Apache-2.0 License. 90 | 91 | -------------------------------------------------------------------------------- /codestyle/pmd-eg-tests-ruleset.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 7 | 8 | 12 | 13 | Custom Rules 14 | 15 | 16 | .* 17 | .*Test.* 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | -------------------------------------------------------------------------------- /src/main/java/com/aws/greengrass/logmanager/util/ConfigUtil.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | * SPDX-License-Identifier: Apache-2.0 4 | */ 5 | 6 | package com.aws.greengrass.logmanager.util; 7 | 8 | import com.aws.greengrass.config.CaseInsensitiveString; 9 | import com.aws.greengrass.config.Node; 10 | import com.aws.greengrass.config.Topic; 11 | import com.aws.greengrass.config.Topics; 12 | import com.aws.greengrass.config.UnsupportedInputTypeException; 13 | import com.aws.greengrass.config.UpdateBehaviorTree; 14 | import com.aws.greengrass.logging.api.Logger; 15 | import com.aws.greengrass.logging.impl.LogManager; 16 | import edu.umd.cs.findbugs.annotations.NonNull; 17 | 18 | import java.util.HashSet; 19 | import java.util.Map; 20 | import java.util.Objects; 21 | import java.util.Set; 22 | 23 | public final class ConfigUtil { 24 | private static final Logger logger = LogManager.getLogger(ConfigUtil.class); 25 | 26 | private ConfigUtil() { 27 | } 28 | 29 | /** 30 | * Same as topics.updateFromMap, but only makes the update when the value actually changes, skipping any unnecessary 31 | * timestampUpdated events. Ideally this code would exist in Topics, but it isn't, so we need to do this in order to 32 | * maintain compatibility. 33 | * 34 | * @param topics Topics to update with values from the map 35 | * @param newValues the new value to apply 36 | * @param ubt update behavior tree 37 | */ 38 | public static void updateFromMapWhenChanged(Topics topics, Map newValues, UpdateBehaviorTree ubt) { 39 | Set childrenToRemove = new HashSet<>(topics.children.keySet()); 40 | 41 | newValues.forEach((okey, value) -> { 42 | CaseInsensitiveString key = new CaseInsensitiveString(okey); 43 | childrenToRemove.remove(key); 44 | updateChild(topics, key, value, ubt); 45 | }); 46 | 47 | childrenToRemove.forEach(childName -> { 48 | UpdateBehaviorTree childMergeBehavior = ubt.getChildBehavior(childName.toString()); 49 | 50 | // remove the existing child if its merge behavior is REPLACE 51 | if (childMergeBehavior.getBehavior() == UpdateBehaviorTree.UpdateBehavior.REPLACE) { 52 | topics.remove(topics.children.get(childName)); 53 | } 54 | }); 55 | } 56 | 57 | private static void updateChild(Topics t, CaseInsensitiveString key, Object value, 58 | @NonNull UpdateBehaviorTree mergeBehavior) { 59 | UpdateBehaviorTree childMergeBehavior = mergeBehavior.getChildBehavior(key.toString()); 60 | 61 | Node existingChild = t.children.get(key); 62 | // if new node is a container node 63 | if (value instanceof Map) { 64 | // if existing child is a container node 65 | if (existingChild == null || existingChild instanceof Topics) { 66 | updateFromMapWhenChanged(t.createInteriorChild(key.toString()), (Map) value, childMergeBehavior); 67 | } else { 68 | t.remove(existingChild); 69 | Topics newNode = t.createInteriorChild(key.toString(), mergeBehavior.getTimestampToUse()); 70 | updateFromMapWhenChanged(newNode, (Map) value, childMergeBehavior); 71 | } 72 | // if new node is a leaf node 73 | } else { 74 | try { 75 | if (existingChild == null || existingChild instanceof Topic) { 76 | Topic node = t.createLeafChild(key.toString()); 77 | if (!Objects.equals(node.getOnce(), value)) { 78 | node.withValueChecked(childMergeBehavior.getTimestampToUse(), value); 79 | } 80 | } else { 81 | t.remove(existingChild); 82 | Topic newNode = t.createLeafChild(key.toString()); 83 | newNode.withValueChecked(childMergeBehavior.getTimestampToUse(), value); 84 | } 85 | } catch (UnsupportedInputTypeException e) { 86 | logger.error("Should never fail in updateChild", e); 87 | } 88 | } 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /codestyle/pmd-eg-ruleset.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 7 | 8 | 12 | 13 | Custom Rules 14 | 15 | 16 | .*Test.* 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 87 | 88 | Without a call to .log(), your logging won't actually be appended into the log. 89 | 90 | 1 91 | 92 | 93 | 94 | 95 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | -------------------------------------------------------------------------------- /uat/custom-components/src/main/java/com/aws/greengrass/artifacts/LogGenerator.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | * SPDX-License-Identifier: Apache-2.0 4 | */ 5 | 6 | package com.aws.greengrass.artifacts; 7 | 8 | import ch.qos.logback.classic.Level; 9 | import ch.qos.logback.classic.Logger; 10 | import ch.qos.logback.classic.LoggerContext; 11 | import ch.qos.logback.classic.encoder.PatternLayoutEncoder; 12 | import ch.qos.logback.classic.spi.ILoggingEvent; 13 | import ch.qos.logback.core.rolling.RollingFileAppender; 14 | import ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy; 15 | import ch.qos.logback.core.util.FileSize; 16 | import org.slf4j.helpers.BasicMDCAdapter; 17 | import org.slf4j.helpers.NOPMDCAdapter; 18 | import software.amazon.awssdk.aws.greengrass.model.InvalidArgumentsError; 19 | 20 | import java.util.concurrent.TimeUnit; 21 | import java.util.function.Consumer; 22 | 23 | /* 24 | This component rotates files with different name. Namely, the file is rotated and renamed by adding the timestamp and 25 | some seq number to differ from others. The rolling policy is LogBack.TimeBasedRollingPolicy, 26 | more information at https://logback.qos.ch/manual/appenders.html 27 | */ 28 | public class LogGenerator implements Consumer { 29 | private static final String rotationNamePattern = "_%d{yyyy-MM-dd_HH-mm}_%i"; 30 | private String logFileName; 31 | private int writeFreqMs; 32 | private int fileSizeBytes; 33 | private int numberOfLogs; 34 | private String logDirectory; 35 | 36 | 37 | @Override 38 | public void accept(String[] args) { 39 | init(args); 40 | 41 | try { 42 | generateLogs(); 43 | } catch (Exception e) { 44 | System.exit(0); 45 | } 46 | } 47 | 48 | private void init(String[] args) { 49 | logFileName = args[0]; 50 | fileSizeBytes = getFileSizeInBytes(Double.parseDouble(args[1]), args[2]); 51 | writeFreqMs = Integer.parseInt(args[3]); 52 | numberOfLogs = Integer.parseInt(args[4]); 53 | logDirectory = args[5]; 54 | 55 | if (logDirectory.isEmpty()) { 56 | throw new InvalidArgumentsError("LogDirectory is required"); 57 | } 58 | } 59 | 60 | private void generateLogs() throws InterruptedException { 61 | Logger logger = configureLogger(); 62 | 63 | for (int i = 1; i <= numberOfLogs; i++) { 64 | String logLine = String.format("(seq: %d)", i); 65 | logger.info(logLine); // INFO LogGenerator (seq: 1) 66 | TimeUnit.MILLISECONDS.sleep(writeFreqMs); 67 | } 68 | } 69 | 70 | private Logger configureLogger() { 71 | LoggerContext loggerContext = new LoggerContext(); 72 | // Must set an MDC adapter for 1.3.8+. https://github.com/qos-ch/logback/issues/709 73 | loggerContext.setMDCAdapter(new BasicMDCAdapter()); 74 | // loggerContext.setMDCAdapter(new NOPMDCAdapter()); 75 | Logger logger = loggerContext.getLogger("LogGenerator"); 76 | 77 | // appender: output destination 78 | RollingFileAppender appender = new RollingFileAppender<>(); 79 | appender.setFile(logDirectory + "/" + logFileName + ".log"); 80 | appender.setAppend(true); 81 | appender.setContext(loggerContext); 82 | 83 | // rolling policy 84 | SizeAndTimeBasedRollingPolicy rollingPolicy = new SizeAndTimeBasedRollingPolicy<>(); 85 | rollingPolicy.setMaxFileSize(new FileSize(fileSizeBytes)); 86 | rollingPolicy.setFileNamePattern(logDirectory + "/" + logFileName + rotationNamePattern + ".log"); 87 | rollingPolicy.setParent(appender); 88 | rollingPolicy.setContext(loggerContext); 89 | appender.setRollingPolicy(rollingPolicy); 90 | rollingPolicy.start(); 91 | 92 | // encoder 93 | PatternLayoutEncoder encoder = new PatternLayoutEncoder(); 94 | encoder.setPattern("%d{yyyy-MM-dd HH:mm:ss} %-5level %logger{36} - %msg%n"); 95 | encoder.setContext(loggerContext); 96 | encoder.start(); 97 | 98 | // attach encoder to appender, then start 99 | appender.setEncoder(encoder); 100 | appender.start(); 101 | 102 | // set logger 103 | logger.addAppender(appender); 104 | logger.setLevel(Level.INFO); 105 | 106 | return logger; 107 | } 108 | 109 | private int getFileSizeInBytes(double fileSize, String fileSizeUnit) { 110 | int fileSizeBytes = 0; 111 | switch (fileSizeUnit) { 112 | case "KB": 113 | fileSizeBytes = (int) (fileSize * FileSize.KB_COEFFICIENT); 114 | break; 115 | case "MB": 116 | fileSizeBytes = (int) (fileSize * FileSize.MB_COEFFICIENT); 117 | break; 118 | case "GB": 119 | fileSizeBytes = (int) (fileSize * FileSize.GB_COEFFICIENT); 120 | break; 121 | default: 122 | throw new UnsupportedOperationException("Unsupported file size unit"); 123 | } 124 | return fileSizeBytes; 125 | } 126 | } 127 | 128 | -------------------------------------------------------------------------------- /src/test/java/com/aws/greengrass/logmanager/model/ProcessingFilesTest.java: -------------------------------------------------------------------------------- 1 | package com.aws.greengrass.logmanager.model; 2 | 3 | import com.aws.greengrass.logmanager.LogManagerService; 4 | import com.aws.greengrass.testcommons.testutilities.GGExtension; 5 | import org.junit.jupiter.api.AfterEach; 6 | import org.junit.jupiter.api.BeforeEach; 7 | import org.junit.jupiter.api.Test; 8 | import org.junit.jupiter.api.extension.ExtendWith; 9 | import org.mockito.MockedStatic; 10 | import org.mockito.ScopedMock; 11 | import org.mockito.junit.jupiter.MockitoExtension; 12 | 13 | import java.time.Clock; 14 | import java.time.Instant; 15 | import java.util.HashMap; 16 | import java.util.Map; 17 | import java.util.Optional; 18 | 19 | import static org.junit.jupiter.api.Assertions.assertEquals; 20 | import static org.mockito.Mockito.mockStatic; 21 | import static org.mockito.Mockito.spy; 22 | import static org.mockito.Mockito.when; 23 | 24 | 25 | @ExtendWith({MockitoExtension.class, GGExtension.class}) 26 | public class ProcessingFilesTest { 27 | private Optional> clockMock; 28 | 29 | @BeforeEach 30 | void setup() { 31 | clockMock = Optional.empty(); 32 | } 33 | 34 | @AfterEach 35 | void cleanup() { 36 | this.clockMock.ifPresent(ScopedMock::close); 37 | } 38 | 39 | @SuppressWarnings("PMD.CloseResource") 40 | private void mockInstant(long expected) { 41 | this.clockMock.ifPresent(ScopedMock::close); 42 | Clock spyClock = spy(Clock.class); 43 | MockedStatic clockMock; 44 | clockMock = mockStatic(Clock.class); 45 | clockMock.when(Clock::systemUTC).thenReturn(spyClock); 46 | when(spyClock.instant()).thenReturn(Instant.ofEpochMilli(expected)); 47 | this.clockMock = Optional.of(clockMock); 48 | } 49 | 50 | @Test 51 | void GIVEN_filledProcessingFiles_WHEN_toMap_returnsMapRepresentation() { 52 | // Given 53 | ProcessingFiles processingFiles = new ProcessingFiles(5); 54 | LogManagerService.CurrentProcessingFileInformation fileInformationOne = 55 | LogManagerService.CurrentProcessingFileInformation.builder() 56 | .fileName("test.log") 57 | .fileHash("kj35435") 58 | .startPosition(1000) 59 | .lastModifiedTime(Instant.now().toEpochMilli()) 60 | .build(); 61 | processingFiles.put(fileInformationOne); 62 | LogManagerService.CurrentProcessingFileInformation fileInformationTwo = 63 | LogManagerService.CurrentProcessingFileInformation.builder() 64 | .fileName("test_2023.log") 65 | .fileHash("54321") 66 | .startPosition(1000) 67 | .lastModifiedTime(Instant.now().toEpochMilli()) 68 | .build(); 69 | processingFiles.put(fileInformationTwo); 70 | 71 | 72 | // Then 73 | Map expected = new HashMap(){{ 74 | put("kj35435", fileInformationOne.convertToMapOfObjects()); 75 | put("54321", fileInformationTwo.convertToMapOfObjects()); 76 | }}; 77 | assertEquals(expected, processingFiles.toMap()); 78 | } 79 | 80 | @Test 81 | void GIVEN_filledProcessingFiles_WHEN_getMostRecentlyUsed_THEN_mostRecentValueReturned() { 82 | // Given 83 | ProcessingFiles processingFiles = new ProcessingFiles(60); 84 | 85 | processingFiles.put(LogManagerService.CurrentProcessingFileInformation.builder() 86 | .fileName("test.log") 87 | .fileHash("12345") 88 | .startPosition(1000) 89 | .lastModifiedTime(Instant.now().toEpochMilli()) 90 | .build()); 91 | processingFiles.put(LogManagerService.CurrentProcessingFileInformation.builder() 92 | .fileName("test_2023.log") 93 | .fileHash("54321") 94 | .startPosition(1000) 95 | .lastModifiedTime(Instant.now().toEpochMilli()) 96 | .build()); 97 | 98 | // Then 99 | assertEquals(processingFiles.getMostRecentlyUsed().getFileHash(), "54321"); 100 | } 101 | 102 | @Test 103 | void GIVEN_processingFilesWithValue_WHEN_newValueAdded_THEN_stateEntriesGetCleared() { 104 | 105 | // Hold entries for a max of 1 day, unless accessed before that time 106 | ProcessingFiles processingFiles = new ProcessingFiles( 60 * 60 * 24); 107 | Instant now = Instant.now(); 108 | 109 | // 2 days ago 110 | mockInstant(now.minusSeconds(60 * 60 * 24 * 2).toEpochMilli()); 111 | 112 | processingFiles.put(LogManagerService.CurrentProcessingFileInformation.builder() 113 | .fileName("test.log") 114 | .fileHash("12345") 115 | .startPosition(1000) 116 | .lastModifiedTime(Instant.now().toEpochMilli()) 117 | .build()); 118 | 119 | // Reset time back to now 120 | mockInstant(now.toEpochMilli()); 121 | 122 | processingFiles.put(LogManagerService.CurrentProcessingFileInformation.builder() 123 | .fileName("test_2023.log") 124 | .fileHash("54321") 125 | .startPosition(1000) 126 | .lastModifiedTime(now.minusSeconds(60 * 60 * 24 * 2).toEpochMilli()) 127 | .build()); 128 | 129 | assertEquals(processingFiles.size(), 1); 130 | } 131 | } 132 | -------------------------------------------------------------------------------- /src/main/java/com/aws/greengrass/logmanager/model/ProcessingFiles.java: -------------------------------------------------------------------------------- 1 | package com.aws.greengrass.logmanager.model; 2 | 3 | import com.aws.greengrass.logging.api.Logger; 4 | import com.aws.greengrass.logging.impl.LogManager; 5 | import com.aws.greengrass.logmanager.LogManagerService; 6 | import lombok.Builder; 7 | import lombok.Getter; 8 | import lombok.Setter; 9 | 10 | import java.time.Instant; 11 | import java.util.HashMap; 12 | import java.util.HashSet; 13 | import java.util.Iterator; 14 | import java.util.List; 15 | import java.util.Map; 16 | import java.util.Set; 17 | 18 | 19 | /** 20 | * Cache that holds information about files being processed. It will remove entries that have not been accessed in the 21 | * last maxInactiveTimeSeconds 22 | */ 23 | public class ProcessingFiles { 24 | private final Map cache; 25 | private final int maxInactiveTimeSeconds; 26 | private Node mostRecentlyUsed; 27 | private static final Logger logger = LogManager.getLogger(ProcessingFiles.class); 28 | 29 | 30 | 31 | /** 32 | * Creates an instance of the ProcessingFiles. 33 | * 34 | * @param maxInactiveTimeSeconds - the maximum amount of seconds an entry can be in the cache without being 35 | * accessed 36 | */ 37 | public ProcessingFiles(int maxInactiveTimeSeconds) { 38 | this.cache = new HashMap<>(); 39 | this.maxInactiveTimeSeconds = maxInactiveTimeSeconds; 40 | } 41 | 42 | /** 43 | * Stores the currently processing file information only if the entry does not exist. 44 | * 45 | * @param value - currently processing file information 46 | */ 47 | public void putIfAbsent(LogManagerService.CurrentProcessingFileInformation value) { 48 | if (cache.get(value.getFileHash()) == null) { 49 | put(value); 50 | } 51 | } 52 | 53 | /** 54 | * Stored the currently processing file information. 55 | * 56 | * @param value - currently processing file information 57 | */ 58 | public void put(LogManagerService.CurrentProcessingFileInformation value) { 59 | this.mostRecentlyUsed = Node.builder().lastAccessed(value.getLastAccessedTime()).info(value).build(); 60 | cache.put(value.getFileHash(), this.mostRecentlyUsed); 61 | evictStaleEntries(); 62 | } 63 | 64 | /** 65 | * Removes an entry from the cache for a provided file hash. 66 | * 67 | * @param fileHash - A file hash. 68 | */ 69 | public void remove(String fileHash) { 70 | if (fileHash == null) { 71 | return; 72 | } 73 | 74 | if (cache.remove(fileHash) != null) { 75 | logger.atDebug().kv("hash", fileHash).log("Evicted file from cache"); 76 | } 77 | } 78 | 79 | /** 80 | * Removes a list of entries from the cache. 81 | * 82 | * @param deletedHashes - A list of file hashes to remove 83 | */ 84 | public void remove(List deletedHashes) { 85 | if (deletedHashes != null) { 86 | deletedHashes.forEach(this::remove); 87 | } 88 | } 89 | 90 | /** 91 | * Returns a currently processing file information for a file hash. 92 | * 93 | * @param fileHash - A file hash 94 | */ 95 | public LogManagerService.CurrentProcessingFileInformation get(String fileHash) { 96 | Node node = this.cache.get(fileHash); 97 | 98 | if (node != null) { 99 | node.setLastAccessed(Instant.now().toEpochMilli()); 100 | node.getInfo().setLastAccessedTime(node.getLastAccessed()); 101 | mostRecentlyUsed = node; 102 | return node.getInfo(); 103 | } 104 | 105 | return null; 106 | } 107 | 108 | public int size() { 109 | return cache.size(); 110 | } 111 | 112 | private void evictStaleEntries() { 113 | // TODO: This could be improved by additionally storing the nodes on a Heap 114 | Iterator> it = cache.entrySet().iterator(); 115 | Set toRemove = new HashSet<>(); 116 | Instant deadline = Instant.now().minusSeconds(maxInactiveTimeSeconds); 117 | 118 | while (it.hasNext()) { 119 | Node node = it.next().getValue(); 120 | Instant lastAccessed = Instant.ofEpochMilli(node.getLastAccessed()); 121 | 122 | if (lastAccessed.isBefore(deadline)) { 123 | toRemove.add(node.info.getFileHash()); 124 | } 125 | } 126 | 127 | toRemove.forEach(cache::remove); 128 | } 129 | 130 | 131 | /** 132 | * Converts the objects stored in the cache into a map. Used serialize the processing files to be stored 133 | * on the runtime config. 134 | */ 135 | public Map toMap() { 136 | HashMap map = new HashMap<>(); 137 | 138 | cache.forEach((key, value) -> { 139 | map.put(key, value.getInfo().convertToMapOfObjects()); 140 | }); 141 | 142 | return map; 143 | } 144 | 145 | /** 146 | * Returns the most recently used item. 147 | */ 148 | public LogManagerService.CurrentProcessingFileInformation getMostRecentlyUsed() { 149 | if (mostRecentlyUsed != null) { 150 | return mostRecentlyUsed.getInfo(); 151 | } 152 | 153 | return null; 154 | } 155 | 156 | @Builder 157 | @Getter 158 | @Setter 159 | private static class Node { 160 | private long lastAccessed; 161 | private LogManagerService.CurrentProcessingFileInformation info; 162 | } 163 | } 164 | -------------------------------------------------------------------------------- /src/test/java/com/aws/greengrass/logmanager/model/LogFileGroupTest.java: -------------------------------------------------------------------------------- 1 | package com.aws.greengrass.logmanager.model; 2 | 3 | import com.aws.greengrass.logmanager.exceptions.InvalidLogGroupException; 4 | import com.aws.greengrass.testcommons.testutilities.GGExtension; 5 | import org.junit.jupiter.api.BeforeEach; 6 | import org.junit.jupiter.api.Test; 7 | import org.junit.jupiter.api.extension.ExtendWith; 8 | import org.junit.jupiter.api.io.TempDir; 9 | import org.mockito.junit.jupiter.MockitoExtension; 10 | 11 | import java.io.File; 12 | import java.io.IOException; 13 | import java.nio.charset.StandardCharsets; 14 | import java.nio.file.Path; 15 | import java.time.Instant; 16 | import java.util.Arrays; 17 | import java.util.List; 18 | import java.util.concurrent.TimeUnit; 19 | import java.util.regex.Pattern; 20 | 21 | import static com.aws.greengrass.logmanager.util.TestUtils.createLogFileWithSize; 22 | import static com.aws.greengrass.logmanager.util.TestUtils.givenAStringOfSize; 23 | import static com.aws.greengrass.logmanager.util.TestUtils.readFileContent; 24 | import static com.aws.greengrass.logmanager.util.TestUtils.writeFile; 25 | import static org.junit.jupiter.api.Assertions.assertEquals; 26 | import static org.junit.jupiter.api.Assertions.assertFalse; 27 | import static org.junit.jupiter.api.Assertions.assertTrue; 28 | 29 | @ExtendWith({MockitoExtension.class, GGExtension.class}) 30 | public class LogFileGroupTest { 31 | @TempDir 32 | static Path directoryPath; 33 | @TempDir 34 | private Path workDir; 35 | 36 | @BeforeEach 37 | void setup() { 38 | Arrays.stream(directoryPath.toFile().listFiles()).forEach(File::delete); 39 | } 40 | 41 | 42 | public LogFile arrangeLogFile(String name, int byteSize) throws InterruptedException, IOException { 43 | LogFile file = createLogFileWithSize(directoryPath.resolve(name).toUri(), byteSize); 44 | // Wait to avoid file's lastModified to be the same if called to fast 45 | TimeUnit.MILLISECONDS.sleep(100); 46 | return file; 47 | } 48 | 49 | @Test 50 | void GIVEN_log_files_THEN_find_the_active_file() throws IOException, InterruptedException, 51 | InvalidLogGroupException { 52 | LogFile file = new LogFile(directoryPath.resolve("greengrass_test.log_1").toUri()); 53 | byte[] bytesArray = givenAStringOfSize(1024).getBytes(StandardCharsets.UTF_8); 54 | writeFile(file, bytesArray); 55 | 56 | //Intentionally sleep lazily here to differ the creation time of two files. 57 | TimeUnit.SECONDS.sleep(1); 58 | 59 | LogFile file2 = new LogFile(directoryPath.resolve("greengrass_test.log_2").toUri()); 60 | byte[] bytesArray2 = givenAStringOfSize(1024).getBytes(StandardCharsets.UTF_8); 61 | writeFile(file2, bytesArray2); 62 | 63 | Pattern pattern = Pattern.compile("^greengrass_test.log\\w*$"); 64 | ComponentLogConfiguration compLogInfo = ComponentLogConfiguration.builder() 65 | .directoryPath(directoryPath) 66 | .fileNameRegex(pattern).name("greengrass_test").build(); 67 | Instant instant = Instant.EPOCH; 68 | LogFileGroup logFileGroup = LogFileGroup.create(compLogInfo, instant, workDir); 69 | 70 | assertEquals(2, logFileGroup.getLogFiles().size()); 71 | assertFalse(logFileGroup.isActiveFile(file)); 72 | assertTrue(logFileGroup.isActiveFile(file2)); 73 | } 74 | 75 | 76 | @Test 77 | void GIVEN_fileWithNotEnoughBytes_WHEN_logGroupCreated_THEN_itIsExcluded() throws IOException, 78 | InvalidLogGroupException { 79 | // Given 80 | File rotatedFile = createLogFileWithSize(directoryPath.resolve("test.log.1").toUri(), 2048); 81 | createLogFileWithSize(directoryPath.resolve("test.log").toUri(), 500); // active file 82 | 83 | // When 84 | Pattern pattern = Pattern.compile("test.log\\w*"); 85 | ComponentLogConfiguration compLogInfo = ComponentLogConfiguration.builder() 86 | .directoryPath(directoryPath) 87 | .fileNameRegex(pattern).name("greengrass_test").build(); 88 | Instant instant = Instant.EPOCH; 89 | LogFileGroup logFileGroup = LogFileGroup.create(compLogInfo, instant, workDir); 90 | 91 | // Then 92 | assertEquals(1, logFileGroup.getLogFiles().size()); 93 | LogFile logFile = logFileGroup.getLogFiles().get(0); 94 | assertEquals(readFileContent(rotatedFile), readFileContent(logFile)); 95 | } 96 | 97 | @Test 98 | void GIVEN_last_processedTime_THEN_it_returns_the_unprocessed_files_correctly() throws IOException, 99 | InterruptedException, InvalidLogGroupException { 100 | // Given 101 | LogFile cLogFile = arrangeLogFile("test.log.3", 2048); 102 | LogFile bLogFile = arrangeLogFile("test.log.2", 2048); 103 | LogFile aLogFile = arrangeLogFile("test.log.1", 2048); 104 | LogFile activeFile = arrangeLogFile("test.log", 1024); 105 | Instant lastProcessedFileInstant = Instant.ofEpochMilli(bLogFile.lastModified()); 106 | 107 | // When 108 | ComponentLogConfiguration config = ComponentLogConfiguration.builder() 109 | .directoryPath(directoryPath) 110 | .fileNameRegex(Pattern.compile("test.log\\w*")).name("greengrass_test") 111 | .build(); 112 | LogFileGroup group = LogFileGroup.create(config, lastProcessedFileInstant, workDir); 113 | 114 | // Then 115 | List processed = group.getProcessedLogFiles(); 116 | assertEquals(2, processed.size()); 117 | assertTrue(processed.stream().anyMatch(f -> f.getName().equals(cLogFile.getName()))); 118 | assertTrue(processed.stream().anyMatch(f -> f.getName().equals(bLogFile.getName()))); 119 | 120 | List unprocessed = group.getLogFiles(); 121 | assertEquals(2, unprocessed.size()); 122 | assertTrue(unprocessed.stream().anyMatch(f -> f.getName().equals(aLogFile.getName()))); 123 | assertTrue(unprocessed.stream().anyMatch(f -> f.getName().equals(activeFile.getName()))); 124 | } 125 | } 126 | -------------------------------------------------------------------------------- /.github/scripts/cover2cover.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 4 | # SPDX-License-Identifier: Apache-2.0 5 | 6 | import sys 7 | import xml.etree.ElementTree as ET 8 | import re 9 | import os.path 10 | 11 | # branch-rate="0.0" complexity="0.0" line-rate="1.0" 12 | # branch="true" hits="1" number="86" 13 | 14 | def find_lines(j_package, filename): 15 | """Return all elements for a given source file in a package.""" 16 | lines = list() 17 | sourcefiles = j_package.findall("sourcefile") 18 | for sourcefile in sourcefiles: 19 | if sourcefile.attrib.get("name") == os.path.basename(filename): 20 | lines = lines + sourcefile.findall("line") 21 | return lines 22 | 23 | def line_is_after(jm, start_line): 24 | return int(jm.attrib.get('line', 0)) > start_line 25 | 26 | def method_lines(jmethod, jmethods, jlines): 27 | """Filter the lines from the given set of jlines that apply to the given jmethod.""" 28 | start_line = int(jmethod.attrib.get('line', 0)) 29 | larger = list(int(jm.attrib.get('line', 0)) for jm in jmethods if line_is_after(jm, start_line)) 30 | end_line = min(larger) if len(larger) else 99999999 31 | 32 | for jline in jlines: 33 | if start_line <= int(jline.attrib['nr']) < end_line: 34 | yield jline 35 | 36 | def convert_lines(j_lines, into): 37 | """Convert the JaCoCo elements into Cobertura elements, add them under the given element.""" 38 | c_lines = ET.SubElement(into, 'lines') 39 | for jline in j_lines: 40 | mb = int(jline.attrib['mb']) 41 | cb = int(jline.attrib['cb']) 42 | ci = int(jline.attrib['ci']) 43 | 44 | cline = ET.SubElement(c_lines, 'line') 45 | cline.set('number', jline.attrib['nr']) 46 | cline.set('hits', '1' if ci > 0 else '0') # Probably not true but no way to know from JaCoCo XML file 47 | 48 | if mb + cb > 0: 49 | percentage = str(int(100 * (float(cb) / (float(cb) + float(mb))))) + '%' 50 | cline.set('branch', 'true') 51 | cline.set('condition-coverage', percentage + ' (' + str(cb) + '/' + str(cb + mb) + ')') 52 | 53 | cond = ET.SubElement(ET.SubElement(cline, 'conditions'), 'condition') 54 | cond.set('number', '0') 55 | cond.set('type', 'jump') 56 | cond.set('coverage', percentage) 57 | else: 58 | cline.set('branch', 'false') 59 | 60 | def guess_filename(path_to_class): 61 | m = re.match('([^$]*)', path_to_class) 62 | return (m.group(1) if m else path_to_class) + '.java' 63 | 64 | def add_counters(source, target): 65 | target.set('line-rate', counter(source, 'LINE')) 66 | target.set('branch-rate', counter(source, 'BRANCH')) 67 | target.set('complexity', counter(source, 'COMPLEXITY', sum)) 68 | 69 | def fraction(covered, missed): 70 | return covered / (covered + missed) 71 | 72 | def sum(covered, missed): 73 | return covered + missed 74 | 75 | def counter(source, type, operation=fraction): 76 | cs = source.findall('counter') 77 | c = next((ct for ct in cs if ct.attrib.get('type') == type), None) 78 | 79 | if c is not None: 80 | covered = float(c.attrib['covered']) 81 | missed = float(c.attrib['missed']) 82 | 83 | return str(operation(covered, missed)) 84 | else: 85 | return '0.0' 86 | 87 | def convert_method(j_method, j_lines): 88 | c_method = ET.Element('method') 89 | c_method.set('name', j_method.attrib['name']) 90 | c_method.set('signature', j_method.attrib['desc']) 91 | 92 | add_counters(j_method, c_method) 93 | convert_lines(j_lines, c_method) 94 | 95 | return c_method 96 | 97 | def convert_class(j_class, j_package): 98 | c_class = ET.Element('class') 99 | c_class.set('name', j_class.attrib['name'].replace('/', '.')) 100 | c_class.set('filename', guess_filename(j_class.attrib['name'])) 101 | 102 | all_j_lines = list(find_lines(j_package, c_class.attrib['filename'])) 103 | 104 | c_methods = ET.SubElement(c_class, 'methods') 105 | all_j_methods = list(j_class.findall('method')) 106 | for j_method in all_j_methods: 107 | j_method_lines = method_lines(j_method, all_j_methods, all_j_lines) 108 | c_methods.append(convert_method(j_method, j_method_lines)) 109 | 110 | add_counters(j_class, c_class) 111 | convert_lines(all_j_lines, c_class) 112 | 113 | return c_class 114 | 115 | def convert_package(j_package): 116 | c_package = ET.Element('package') 117 | c_package.attrib['name'] = j_package.attrib['name'].replace('/', '.') 118 | 119 | c_classes = ET.SubElement(c_package, 'classes') 120 | for j_class in j_package.findall('class'): 121 | # Only output the class if it has methods to be covered 122 | if j_class.findall('method'): 123 | c_classes.append(convert_class(j_class, j_package)) 124 | 125 | add_counters(j_package, c_package) 126 | 127 | return c_package 128 | 129 | def convert_root(source, target, source_roots): 130 | target.set('timestamp', str(int(source.find('sessioninfo').attrib['start']) / 1000)) 131 | 132 | sources = ET.SubElement(target, 'sources') 133 | for s in source_roots: 134 | ET.SubElement(sources, 'source').text = s 135 | 136 | packages = ET.SubElement(target, 'packages') 137 | for package in source.findall('package'): 138 | packages.append(convert_package(package)) 139 | 140 | add_counters(source, target) 141 | 142 | def jacoco2cobertura(filename, source_roots): 143 | if filename == '-': 144 | root = ET.fromstring(sys.stdin.read()) 145 | else: 146 | tree = ET.parse(filename) 147 | root = tree.getroot() 148 | 149 | into = ET.Element('coverage') 150 | convert_root(root, into, source_roots) 151 | print('') 152 | print(ET.tostring(into, encoding='unicode')) 153 | 154 | if __name__ == '__main__': 155 | if len(sys.argv) < 2: 156 | print("Usage: cover2cover.py FILENAME [SOURCE_ROOTS]") 157 | sys.exit(1) 158 | 159 | filename = sys.argv[1] 160 | source_roots = sys.argv[2:] if 2 < len(sys.argv) else '.' 161 | 162 | jacoco2cobertura(filename, source_roots) 163 | -------------------------------------------------------------------------------- /src/test/java/com/aws/greengrass/logmanager/services/DiskSpaceManagementServiceTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | * SPDX-License-Identifier: Apache-2.0 4 | */ 5 | 6 | package com.aws.greengrass.logmanager.services; 7 | 8 | import com.aws.greengrass.logmanager.exceptions.InvalidLogGroupException; 9 | import com.aws.greengrass.logmanager.model.ComponentLogConfiguration; 10 | import com.aws.greengrass.logmanager.model.LogFile; 11 | import com.aws.greengrass.logmanager.model.LogFileGroup; 12 | import com.aws.greengrass.testcommons.testutilities.GGExtension; 13 | import com.aws.greengrass.testcommons.testutilities.GGServiceTestUtil; 14 | import org.junit.jupiter.api.Test; 15 | import org.junit.jupiter.api.extension.ExtendWith; 16 | import org.junit.jupiter.api.io.TempDir; 17 | import org.mockito.junit.jupiter.MockitoExtension; 18 | 19 | import java.io.File; 20 | import java.io.IOException; 21 | import java.nio.file.Files; 22 | import java.nio.file.Path; 23 | import java.time.Instant; 24 | import java.util.concurrent.TimeUnit; 25 | import java.util.regex.Pattern; 26 | 27 | import static com.aws.greengrass.logmanager.util.TestUtils.createLogFileWithSize; 28 | import static com.aws.greengrass.logmanager.util.TestUtils.rotateFilesByRenamingThem; 29 | import static org.junit.jupiter.api.Assertions.assertEquals; 30 | import static org.junit.jupiter.api.Assertions.assertTrue; 31 | 32 | 33 | @ExtendWith({MockitoExtension.class, GGExtension.class}) 34 | class DiskSpaceManagementServiceTest extends GGServiceTestUtil { 35 | @TempDir 36 | private Path directoryPath; 37 | 38 | @TempDir 39 | private Path workDirPath; 40 | 41 | public LogFileGroup arrangeLogGroup(Pattern pattern, Instant lastProcessedFileInstant, long diskSpaceLimitBytes) 42 | throws InvalidLogGroupException { 43 | ComponentLogConfiguration config = ComponentLogConfiguration.builder() 44 | .directoryPath(directoryPath) 45 | .fileNameRegex(pattern).name("greengrass_test") 46 | .diskSpaceLimit(diskSpaceLimitBytes) 47 | .build(); 48 | return LogFileGroup.create(config, lastProcessedFileInstant, workDirPath); 49 | } 50 | 51 | public LogFile arrangeLogFile(String name, int byteSize) throws InterruptedException, IOException { 52 | LogFile file = createLogFileWithSize(directoryPath.resolve(name).toUri(), byteSize); 53 | // Wait to avoid file's lastModified to be the same if called to fast 54 | TimeUnit.MILLISECONDS.sleep(100); 55 | return file; 56 | } 57 | 58 | @Test 59 | void GIVEN_log_files_WHEN_max_disk_usage_exceeded_THEN_files_removed() throws IOException, InvalidLogGroupException, 60 | InterruptedException { 61 | // Given 62 | LogFile aLogFile = arrangeLogFile("test.log.1", 2048); 63 | arrangeLogFile("test.log", 1024); 64 | Instant lastProcessedInstant = Instant.ofEpochMilli(aLogFile.lastModified()); 65 | 66 | LogFileGroup group = arrangeLogGroup( 67 | Pattern.compile("test.log\\w*"), lastProcessedInstant, 1024L); 68 | 69 | // When 70 | DiskSpaceManagementService service = new DiskSpaceManagementService(); 71 | service.freeDiskSpace(group); 72 | 73 | // Then 74 | assertEquals(1, group.getLogFiles().size()); 75 | assertTrue(Files.notExists(aLogFile.toPath())); 76 | } 77 | 78 | @Test 79 | void GIVEN_log_files_WHEN_max_disk_usage_exceeded_THEN_only_unprocessed_files_are_deletable() 80 | throws IOException, InvalidLogGroupException, InterruptedException { 81 | // Given 82 | LogFile cLogFile = arrangeLogFile("test.log.3", 2048); 83 | LogFile bLogFile = arrangeLogFile("test.log.2", 2048); 84 | LogFile aLogFile = arrangeLogFile("test.log.1", 2048); 85 | LogFile activeFile = arrangeLogFile("test.log", 1024); 86 | Instant lastProcessedFileInstant = Instant.ofEpochMilli(bLogFile.lastModified()); 87 | LogFileGroup group = arrangeLogGroup(Pattern.compile("test.log\\w*"), lastProcessedFileInstant, 1024L); 88 | 89 | // When 90 | DiskSpaceManagementService service = new DiskSpaceManagementService(); 91 | service.freeDiskSpace(group); 92 | 93 | // Then 94 | assertEquals(2, group.getLogFiles().size()); 95 | assertTrue(Files.notExists(cLogFile.toPath())); 96 | assertTrue(Files.notExists(bLogFile.toPath())); 97 | assertTrue(Files.exists(aLogFile.toPath())); 98 | assertTrue(Files.exists(activeFile.toPath())); 99 | } 100 | 101 | @Test 102 | void GIVEN_log_files_WHEN_max_disk_usage_exceeded_THEN_active_file_is_not_removed() 103 | throws IOException, InvalidLogGroupException, InterruptedException { 104 | // Given 105 | LogFile activeFile = arrangeLogFile("test.log", 1024); 106 | Instant lastProcessedFileInstant = Instant.EPOCH; 107 | LogFileGroup group = arrangeLogGroup(Pattern.compile("test.log\\w*"), lastProcessedFileInstant, 0L); 108 | 109 | // When 110 | DiskSpaceManagementService service = new DiskSpaceManagementService(); 111 | service.freeDiskSpace(group); 112 | 113 | // Then 114 | assertEquals(1, group.getLogFiles().size()); 115 | assertTrue(Files.exists(activeFile.toPath())); 116 | } 117 | 118 | @Test 119 | void GIVEN_file_was_delete_by_externally_WHEN_freeing_space_THEN_it_does_not_fail() 120 | throws IOException, InvalidLogGroupException, InterruptedException { 121 | // Given 122 | LogFile aLogFile = arrangeLogFile("test.log.1", 2048); 123 | arrangeLogFile("test.log", 1024); 124 | Instant lastProcessedFileInstant = Instant.ofEpochMilli(aLogFile.lastModified()); 125 | LogFileGroup group = arrangeLogGroup(Pattern.compile("test.log\\w*"), lastProcessedFileInstant, 0L); 126 | 127 | aLogFile.delete(); // Deleted externally 128 | assertTrue(Files.notExists(aLogFile.toPath())); 129 | 130 | // When 131 | DiskSpaceManagementService service = new DiskSpaceManagementService(); 132 | service.freeDiskSpace(group); 133 | 134 | // Then 135 | assertEquals(1, group.getLogFiles().size()); 136 | } 137 | 138 | @Test 139 | void GIVEN_file_rotates_WHEN_freeing_space_THEN_it_deletes_the_correct_file() 140 | throws IOException, InvalidLogGroupException, InterruptedException { 141 | // Given 142 | LogFile aLogFile = arrangeLogFile("test.log.1", 2048); 143 | LogFile prevActive = arrangeLogFile("test.log", 1024); 144 | Instant lastProcessedFileInstant = Instant.ofEpochMilli(aLogFile.lastModified()); 145 | LogFileGroup group = arrangeLogGroup(Pattern.compile("test.log\\w*"), lastProcessedFileInstant, 0L); 146 | 147 | // When 148 | File newActive = rotateFilesByRenamingThem(prevActive, aLogFile); // Files rotate before diskSpace runs 149 | assertTrue(Files.exists(directoryPath.resolve("test.log.2"))); 150 | 151 | DiskSpaceManagementService service = new DiskSpaceManagementService(); 152 | service.freeDiskSpace(group); 153 | 154 | // Then 155 | assertEquals(directoryPath.resolve("test.log"), newActive.toPath()); 156 | assertEquals(1, group.getLogFiles().size()); 157 | assertTrue(Files.notExists(directoryPath.resolve("test.log.2"))); 158 | assertTrue(Files.exists(directoryPath.resolve("test.log.1"))); 159 | assertTrue(Files.exists(directoryPath.resolve("test.log"))); 160 | } 161 | } -------------------------------------------------------------------------------- /src/main/java/com/aws/greengrass/logmanager/PositionTrackingBufferedReader.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | * SPDX-License-Identifier: Apache-2.0 4 | */ 5 | 6 | package com.aws.greengrass.logmanager; 7 | 8 | import java.io.IOException; 9 | import java.io.Reader; 10 | 11 | /** 12 | * An implementation of BufferedReader which adds a position() method to get an accurate reading of the number of bytes 13 | * which have been read so far. 14 | * Reimplement BufferedReader because we cannot override the readline method to properly count the bytes read. 15 | */ 16 | @SuppressWarnings({"checkstyle:MemberName", "checkstyle:JavadocParagraph", 17 | "checkstyle:JavadocMethod", "checkstyle:VariableDeclarationUsageDistance", 18 | "PMD.NullAssignment", "PMD.UselessParentheses", "PMD.AvoidBranchingStatementAsLastInLoop"}) 19 | public class PositionTrackingBufferedReader extends Reader { 20 | public static final String NOT_IMPLEMENTED = "Not implemented"; 21 | private long position = 0; 22 | 23 | private Reader in; 24 | 25 | private char[] cb; 26 | private int nChars; 27 | private int nextChar; 28 | 29 | /** If the next character is a line feed, skip it. */ 30 | private boolean skipLF = false; 31 | /** The skipLF flag when the mark was set. */ 32 | 33 | private static int defaultCharBufferSize = 8192; 34 | private static int defaultExpectedLineLength = 80; 35 | 36 | /** 37 | * Creates a buffering character-input stream that uses an input buffer of 38 | * the specified size. 39 | * 40 | * @param in A Reader 41 | * @param sz Input-buffer size 42 | * 43 | * @exception IllegalArgumentException If {@code sz <= 0} 44 | */ 45 | public PositionTrackingBufferedReader(Reader in, int sz) { 46 | super(in); 47 | if (sz <= 0) { 48 | throw new IllegalArgumentException("Buffer size <= 0"); 49 | } 50 | this.in = in; 51 | cb = new char[sz]; 52 | nextChar = nChars = 0; 53 | } 54 | 55 | /** 56 | * Creates a buffering character-input stream that uses a default-sized 57 | * input buffer. 58 | * 59 | * @param in A Reader 60 | */ 61 | public PositionTrackingBufferedReader(Reader in) { 62 | this(in, defaultCharBufferSize); 63 | } 64 | 65 | /** Checks to make sure that the stream has not been closed. */ 66 | private void ensureOpen() throws IOException { 67 | if (in == null) { 68 | throw new IOException("Stream closed"); 69 | } 70 | } 71 | 72 | @Override 73 | public int read() throws IOException { 74 | throw new UnsupportedOperationException(NOT_IMPLEMENTED); 75 | } 76 | 77 | @Override 78 | public int read(char[] cbuf, int off, int len) throws IOException { 79 | throw new UnsupportedOperationException(NOT_IMPLEMENTED); 80 | } 81 | 82 | private void fill() throws IOException { 83 | int n; 84 | do { 85 | n = in.read(cb, 0, cb.length); 86 | } while (n == 0); 87 | if (n > 0) { 88 | nChars = n; 89 | nextChar = 0; 90 | } 91 | } 92 | 93 | /** 94 | * Reads a line of text. A line is considered to be terminated by any one 95 | * of a line feed ('\n'), a carriage return ('\r'), or a carriage return 96 | * followed immediately by a linefeed. 97 | * 98 | * @return A String containing the contents of the line, not including 99 | * any line-termination characters, or null if the end of the 100 | * stream has been reached 101 | * 102 | * @exception IOException If an I/O error occurs 103 | * 104 | * @see java.nio.file.Files#readAllLines 105 | */ 106 | public String readLine() throws IOException { 107 | StringBuilder s = null; 108 | int startChar; 109 | 110 | synchronized (lock) { 111 | ensureOpen(); 112 | boolean omitLF = skipLF; 113 | 114 | while (true) { 115 | if (nextChar >= nChars) { 116 | fill(); 117 | } 118 | if (nextChar >= nChars) { /* EOF */ 119 | if (s != null && s.length() > 0) { 120 | return s.toString(); 121 | } else { 122 | return null; 123 | } 124 | } 125 | boolean eol = false; 126 | char c = 0; 127 | int i; 128 | 129 | /* Skip a leftover '\n', if necessary */ 130 | if (omitLF && (cb[nextChar] == '\n')) { 131 | nextChar++; 132 | } 133 | skipLF = false; 134 | omitLF = false; 135 | 136 | for (i = nextChar; i < nChars; i++) { 137 | c = cb[i]; 138 | // Greengrass-added position tracking 139 | position++; 140 | if ((c == '\n') || (c == '\r')) { 141 | eol = true; 142 | break; 143 | } 144 | } 145 | 146 | startChar = nextChar; 147 | nextChar = i; 148 | 149 | if (eol) { 150 | String str; 151 | if (s == null) { 152 | str = new String(cb, startChar, i - startChar); 153 | } else { 154 | s.append(cb, startChar, i - startChar); 155 | str = s.toString(); 156 | } 157 | nextChar++; 158 | if (c == '\r') { 159 | skipLF = true; 160 | // Greengrass-added position tracking 161 | position++; 162 | } 163 | return str; 164 | } 165 | 166 | if (s == null) { 167 | s = new StringBuilder(defaultExpectedLineLength); 168 | } 169 | s.append(cb, startChar, i - startChar); 170 | } 171 | } 172 | } 173 | 174 | @Override 175 | public long skip(long n) throws IOException { 176 | throw new UnsupportedOperationException(NOT_IMPLEMENTED); 177 | } 178 | 179 | @Override 180 | public boolean ready() throws IOException { 181 | throw new UnsupportedOperationException(NOT_IMPLEMENTED); 182 | } 183 | 184 | @Override 185 | public boolean markSupported() { 186 | return false; 187 | } 188 | 189 | @Override 190 | public void mark(int readAheadLimit) throws IOException { 191 | throw new UnsupportedOperationException(NOT_IMPLEMENTED); 192 | } 193 | 194 | @Override 195 | public void reset() throws IOException { 196 | throw new UnsupportedOperationException(NOT_IMPLEMENTED); 197 | } 198 | 199 | @Override 200 | public void close() throws IOException { 201 | synchronized (lock) { 202 | if (in == null) { 203 | return; 204 | } 205 | try { 206 | in.close(); 207 | } finally { 208 | in = null; 209 | cb = null; 210 | } 211 | } 212 | } 213 | 214 | public long position() { 215 | return position; 216 | } 217 | 218 | /** 219 | * Set the starting position counter. This doesn't actually change anything in the stream, ie. this method will 220 | * not skip x bytes. 221 | * 222 | * @param startPosition the initial position to begin the counter. 223 | */ 224 | public void setInitialPosition(long startPosition) { 225 | position = startPosition; 226 | } 227 | } 228 | -------------------------------------------------------------------------------- /uat/testing-features/src/main/java/com/aws/greengrass/steps/FileSteps.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | * SPDX-License-Identifier: Apache-2.0 4 | */ 5 | 6 | package com.aws.greengrass.steps; 7 | 8 | import com.aws.greengrass.testing.model.ScenarioContext; 9 | import com.aws.greengrass.testing.model.TestContext; 10 | import com.aws.greengrass.testing.platform.Platform; 11 | import com.google.inject.Inject; 12 | import io.cucumber.guice.ScenarioScoped; 13 | import io.cucumber.java.en.And; 14 | import io.cucumber.java.en.Given; 15 | import io.cucumber.java.en.Then; 16 | import org.apache.commons.text.RandomStringGenerator; 17 | import org.apache.logging.log4j.LogManager; 18 | import org.apache.logging.log4j.Logger; 19 | 20 | import java.io.BufferedWriter; 21 | import java.io.File; 22 | import java.io.IOException; 23 | import java.nio.file.Files; 24 | import java.nio.file.Path; 25 | import java.nio.file.Paths; 26 | import java.nio.file.StandardOpenOption; 27 | import java.util.ArrayList; 28 | import java.util.Arrays; 29 | import java.util.Comparator; 30 | import java.util.List; 31 | import java.util.Objects; 32 | import java.util.UUID; 33 | import java.util.stream.Collectors; 34 | 35 | import static org.junit.jupiter.api.Assertions.assertEquals; 36 | 37 | @ScenarioScoped 38 | public class FileSteps { 39 | 40 | private static final RandomStringGenerator RANDOM_STRING_GENERATOR = 41 | new RandomStringGenerator.Builder().withinRange('a', 'z').build(); 42 | private static Logger LOGGER = LogManager.getLogger(FileSteps.class); 43 | private final Platform platform; 44 | private final TestContext testContext; 45 | private final ScenarioContext scenarioContext; 46 | 47 | 48 | /** 49 | * Arranges some log files with content on the /logs folder for a component 50 | * to simulate a devices where logs have already bee written. 51 | * 52 | * @param platform number of log files to write. 53 | * @param testContext name of the component. 54 | * @param scenarioContext name of the component. 55 | */ 56 | @Inject 57 | public FileSteps(Platform platform, TestContext testContext, ScenarioContext scenarioContext) { 58 | this.platform = platform; 59 | this.testContext = testContext; 60 | this.scenarioContext = scenarioContext; 61 | } 62 | 63 | private static List generateRandomMessages(int n, int length) { 64 | List msgs = new ArrayList<>(); 65 | for (int i = 0; i < n; i++) { 66 | // TODO: Improves this as this is not how the logger writes the logs 67 | msgs.add(RANDOM_STRING_GENERATOR.generate(length)); 68 | } 69 | return msgs; 70 | } 71 | 72 | private static List getComponentLogFiles(String componentName, Path logsDirectory) { 73 | return Arrays.stream(logsDirectory.toFile().listFiles()) 74 | .filter(File::isFile) 75 | .filter(file -> file.getName() 76 | .startsWith(componentName)) 77 | .sorted(Comparator.comparingLong(File::lastModified)) 78 | .collect(Collectors.toList()); 79 | } 80 | 81 | /** 82 | * Arranges some log files with content on the /logs folder for a component 83 | * to simulate a devices where logs have already bee written. 84 | * 85 | * @param numFiles number of log files to write. 86 | * @param componentName name of the component. 87 | * @throws IOException thrown when file fails to be written. 88 | */ 89 | @And("{int} temporary rotated log files for component {word} have been created") 90 | public void arrangeComponentLogFiles(int numFiles, String componentName) throws IOException { 91 | Path logsDirectory = testContext.installRoot().resolve("logs"); 92 | LOGGER.info("Writing {} log files into {}", numFiles, logsDirectory.toString()); 93 | if (!platform.files().exists(logsDirectory)) { 94 | throw new IllegalStateException("No logs directory"); 95 | } 96 | scenarioContext.put(componentName + "LogDirectory", logsDirectory.toString()); 97 | String filePrefix = "greengrass"; 98 | if (!Objects.equals("aws.greengrass.Nucleus", componentName)) { 99 | filePrefix = componentName; 100 | } 101 | String fileName = ""; 102 | for (int i = 0; i < numFiles; i++) { 103 | fileName = String.format("%s_%s.log", filePrefix, UUID.randomUUID()); 104 | createFileAndWriteData(logsDirectory, fileName, false); 105 | } 106 | } 107 | 108 | @Given("I create a log directory for component called {word}") 109 | public void arrangeLogDirectory(String directoryAlias) { 110 | Path logsDirectory = testContext.installRoot().resolve("logs"); 111 | File componentLogsDirectory = new File(logsDirectory.toFile().getAbsolutePath() + "/" + UUID.randomUUID()); 112 | componentLogsDirectory.mkdirs(); 113 | LOGGER.info("Log directory alias {} referencing {}", directoryAlias, componentLogsDirectory.getAbsolutePath()); 114 | scenarioContext.put(directoryAlias, componentLogsDirectory.getAbsolutePath()); 115 | } 116 | 117 | private void createFileAndWriteData(Path tempDirectoryPath, String fileNamePrefix, boolean isTemp) throws 118 | IOException { 119 | Path filePath; 120 | if (isTemp) { 121 | filePath = Files.createTempFile(tempDirectoryPath, fileNamePrefix, ""); 122 | } else { 123 | filePath = Files.createFile(tempDirectoryPath.resolve(fileNamePrefix)); 124 | } 125 | File file = filePath.toFile(); 126 | List randomMessages = generateRandomMessages(10, 1024); 127 | for (String messageBytes : randomMessages) { 128 | addDataToFile(messageBytes, file.toPath()); 129 | } 130 | } 131 | 132 | private void addDataToFile(String data, Path filePath) throws IOException { 133 | try (BufferedWriter writer = Files.newBufferedWriter(filePath, StandardOpenOption.APPEND)) { 134 | writer.write(data + "\r\n"); 135 | } 136 | } 137 | 138 | /** 139 | * Arranges some log files with content on the /logs folder for a component 140 | * to simulate a devices where logs have already bee written. 141 | * 142 | * @param componentName name of the component. 143 | * @param nfiles number of log files to write. 144 | */ 145 | @Then("I verify that {int} log files for component {word} are still available") 146 | public void verifyRotatedFilesAvailable(int nfiles, String componentName) { 147 | Path logsDirectory = testContext.installRoot().resolve("logs"); 148 | if (!platform.files().exists(logsDirectory)) { 149 | throw new IllegalStateException("No logs directory"); 150 | } 151 | List componentFiles = getComponentLogFiles(componentName, logsDirectory); 152 | assertEquals(nfiles, componentFiles.size()); 153 | } 154 | 155 | /** 156 | * Arranges some log files with content on the /logs folder for a component 157 | * to have already bee written simulate a devices where logs. 158 | * 159 | * @param componentName name of the component. 160 | */ 161 | @And("I verify the rotated files are deleted and that the active log file is present for component {word} on directory {word}") 162 | public void verifyActiveFile(String componentName, String directoryAlias) { 163 | Path logsDirectory = Paths.get(scenarioContext.get(directoryAlias)); 164 | 165 | if (!platform.files().exists(logsDirectory)) { 166 | throw new IllegalStateException("No logs directory"); 167 | } 168 | List sortedFileList = getComponentLogFiles(componentName, logsDirectory); 169 | assertEquals(1, sortedFileList.size()); 170 | } 171 | } -------------------------------------------------------------------------------- /uat/testing-features/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 8 | 9 | 11 | 4.0.0 12 | com.aws.greengrass 13 | log-manager-testing-features 14 | 1.0-SNAPSHOT 15 | 16 | 1.8 17 | 1.8 18 | 2.17.254 19 | false 20 | 21 | 22 | 23 | greengrass-common 24 | greengrass common 25 | 26 | https://d2jrmugq4soldf.cloudfront.net/snapshots 27 | 28 | 29 | 30 | 31 | com.aws.greengrass 32 | aws-greengrass-testing-standalone 33 | 1.2.0-SNAPSHOT 34 | compile 35 | 36 | 37 | software.amazon.awssdk 38 | cloudwatch 39 | ${aws.sdk.version} 40 | 41 | 42 | software.amazon.awssdk 43 | cloudwatchlogs 44 | ${aws.sdk.version} 45 | 46 | 47 | org.projectlombok 48 | lombok 49 | 1.18.22 50 | provided 51 | 52 | 53 | com.google.auto.service 54 | auto-service 55 | 1.0.1 56 | 57 | 58 | org.immutables 59 | value 60 | 2.9.2 61 | provided 62 | 63 | 64 | org.apache.commons 65 | commons-text 66 | 1.10.0 67 | 68 | 69 | com.aws.greengrass 70 | logging 71 | 2.1.0-SNAPSHOT 72 | 73 | 74 | org.junit.jupiter 75 | junit-jupiter-engine 76 | 5.6.2 77 | 78 | 79 | 80 | 81 | 82 | org.apache.maven.plugins 83 | maven-antrun-plugin 84 | 3.0.0 85 | 86 | 87 | copy-artifact-to-classpath 88 | process-classes 89 | 90 | run 91 | 92 | 93 | 94 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | maven-resources-plugin 103 | 2.6 104 | 105 | 106 | copy-local-store-recipe-resources 107 | process-classes 108 | 109 | copy-resources 110 | 111 | 112 | ${basedir}/target/classes/local-store/recipes 113 | 114 | 115 | ${basedir}/../../uat/custom-components/target/classes/recipes 116 | 117 | 118 | 119 | 120 | 121 | copy-local-store-artifact-resources 122 | process-classes 123 | 124 | copy-resources 125 | 126 | 127 | ${basedir}/target/classes/local-store/artifacts/ 128 | 129 | 130 | ${basedir}/../../uat/custom-components/target/ 131 | 132 | custom-components.jar 133 | 134 | 135 | 136 | 137 | 138 | 139 | 140 | 141 | org.apache.maven.plugins 142 | maven-shade-plugin 143 | 3.2.2 144 | 145 | 146 | package 147 | 148 | shade 149 | 150 | 151 | greengrass-log-manager-testing-features 152 | 153 | 154 | 155 | 156 | com.aws.greengrass.testing.launcher.TestLauncher 157 | 158 | 159 | 160 | 161 | 162 | 163 | 164 | 165 | 166 | -------------------------------------------------------------------------------- /src/test/java/com/aws/greengrass/logmanager/model/LogFileTest.java: -------------------------------------------------------------------------------- 1 | package com.aws.greengrass.logmanager.model; 2 | 3 | import com.aws.greengrass.logmanager.util.TestUtils; 4 | import com.aws.greengrass.testcommons.testutilities.GGExtension; 5 | import com.aws.greengrass.util.Utils; 6 | import org.junit.jupiter.api.BeforeEach; 7 | import org.junit.jupiter.api.Test; 8 | import org.junit.jupiter.api.extension.ExtendWith; 9 | import org.junit.jupiter.api.io.TempDir; 10 | import org.mockito.junit.jupiter.MockitoExtension; 11 | 12 | import static com.aws.greengrass.logmanager.model.LogFile.HASH_VALUE_OF_EMPTY_STRING; 13 | import static com.aws.greengrass.logmanager.util.TestUtils.givenAStringOfSize; 14 | import static com.aws.greengrass.logmanager.util.TestUtils.readFileContent; 15 | import static com.aws.greengrass.logmanager.util.TestUtils.rotateFilesByRenamingThem; 16 | import static com.aws.greengrass.logmanager.util.TestUtils.writeFile; 17 | import static com.aws.greengrass.util.Digest.calculate; 18 | 19 | import java.io.File; 20 | import java.io.IOException; 21 | import java.nio.charset.StandardCharsets; 22 | import java.nio.file.Path; 23 | import java.security.NoSuchAlgorithmException; 24 | import java.util.Arrays; 25 | 26 | import static org.junit.jupiter.api.Assertions.assertEquals; 27 | 28 | 29 | @ExtendWith({MockitoExtension.class, GGExtension.class}) 30 | public class LogFileTest { 31 | 32 | @TempDir 33 | static Path directoryPath; 34 | private final static int DEFAULT_BYTES_FOR_DIGEST_NUM = 1024; 35 | 36 | @BeforeEach 37 | void setup() { 38 | Arrays.stream(directoryPath.toFile().listFiles()).forEach(File::delete); 39 | } 40 | 41 | @Test 42 | void GIVEN_empty_file_WHEN_calculate_file_hash_THEN_we_get_null() throws IOException { 43 | LogFile file = new LogFile(directoryPath.resolve("greengrass_test.log").toUri()); 44 | byte[] bytesArray = givenAStringOfSize(0).getBytes(StandardCharsets.UTF_8); 45 | writeFile(file, bytesArray); 46 | String fileHash = file.hashString(); 47 | assertEquals(fileHash, HASH_VALUE_OF_EMPTY_STRING); 48 | } 49 | 50 | @Test 51 | void GIVEN_log_file_with_less_than_target_lines_in_one_line_WHEN_calculate_file_hash_THEN_we_get_null() 52 | throws IOException { 53 | LogFile file = new LogFile(directoryPath.resolve("greengrass_test.log").toUri()); 54 | byte[] bytesArray = givenAStringOfSize(DEFAULT_BYTES_FOR_DIGEST_NUM - 100).getBytes(StandardCharsets.UTF_8); 55 | writeFile(file, bytesArray); 56 | String fileHash = file.hashString(); 57 | assertEquals(fileHash, HASH_VALUE_OF_EMPTY_STRING); 58 | } 59 | 60 | @Test 61 | void GIVEN_log_file_with_equal_to_target_lines_in_one_line_WHEN_calculate_file_hash_THEN_we_get_null() 62 | throws IOException, NoSuchAlgorithmException { 63 | LogFile file = new LogFile(directoryPath.resolve("greengrass_test.log").toUri()); 64 | byte[] bytesArray = givenAStringOfSize(DEFAULT_BYTES_FOR_DIGEST_NUM).getBytes(StandardCharsets.UTF_8); 65 | writeFile(file, bytesArray); 66 | String fileHash = file.hashString(); 67 | String msg = new String(bytesArray); 68 | assertEquals(fileHash, calculate(msg)); 69 | } 70 | 71 | @Test 72 | void GIVEN_log_file_with_more_than_target_lines_in_one_line_WHEN_calculate_file_hash_THEN_we_get_hash() 73 | throws IOException, NoSuchAlgorithmException { 74 | LogFile file = new LogFile(directoryPath.resolve("greengrass_test.log").toUri()); 75 | byte[] bytesArray = givenAStringOfSize(DEFAULT_BYTES_FOR_DIGEST_NUM + 100).getBytes(StandardCharsets.UTF_8); 76 | writeFile(file, bytesArray); 77 | String fileHash = file.hashString(); 78 | String msg = new String(bytesArray, 0, DEFAULT_BYTES_FOR_DIGEST_NUM); 79 | assertEquals(fileHash, calculate(msg)); 80 | } 81 | 82 | @Test 83 | void GIVEN_log_file_with_less_than_target_lines_but_two_lines_WHEN_calculate_file_hash_THEN_we_get_hash() 84 | throws IOException, NoSuchAlgorithmException { 85 | LogFile file = new LogFile(directoryPath.resolve("greengrass_test.log").toUri()); 86 | // create a string as an entire line 87 | StringBuilder builder = new StringBuilder(); 88 | builder.append(givenAStringOfSize(DEFAULT_BYTES_FOR_DIGEST_NUM - 100)).append(System.lineSeparator()); 89 | writeFile(file, builder.toString().getBytes(StandardCharsets.UTF_8)); 90 | String fileHash = file.hashString(); 91 | assertEquals(fileHash, calculate(builder.toString())); 92 | } 93 | 94 | @Test 95 | void GIVEN_log_file_with_more_than_target_lines_but_two_lines_WHEN_calculate_file_hash_THEN_we_get_hash() 96 | throws IOException, NoSuchAlgorithmException { 97 | LogFile file = new LogFile(directoryPath.resolve("greengrass_test.log").toUri()); 98 | // create a string as an entire line 99 | StringBuilder builder = new StringBuilder(); 100 | builder.append(givenAStringOfSize(DEFAULT_BYTES_FOR_DIGEST_NUM - 100)).append(System.lineSeparator()); 101 | String expectedHash = calculate(builder.toString()); 102 | builder.append(givenAStringOfSize(100)); 103 | writeFile(file, builder.toString().getBytes(StandardCharsets.UTF_8)); 104 | String fileHash = file.hashString(); 105 | assertEquals(fileHash, expectedHash); 106 | } 107 | 108 | @Test 109 | void GIVEN_logFileTrackingHardlink_WHEN_trackedFileRotates_THEN_itReadsTheCorrectContents() throws IOException { 110 | // Given 111 | 112 | Path testPath = directoryPath.resolve("itDeletesTheOriginalFile"); 113 | Utils.createPaths(testPath.resolve("hardlinks")); 114 | 115 | File file = TestUtils.createFileWithContent( 116 | testPath.resolve("test.log"), "rotated"); 117 | LogFile logFile = LogFile.of(file, testPath.resolve("hardlinks")); 118 | 119 | // When 120 | 121 | rotateFilesByRenamingThem(new File[]{file}); 122 | 123 | // Assert 124 | 125 | assertEquals("rotated", readFileContent(logFile)); 126 | } 127 | 128 | @Test 129 | void GIVEN_logFileTrackingHardlink_WHEN_trackedFileRotates_THEN_itDeletesTheOriginalFile() throws IOException { 130 | // Given 131 | 132 | Utils.createPaths(directoryPath.resolve("hardlinks")); 133 | File file = TestUtils.createFileWithContent( 134 | directoryPath.resolve("test.log"), "rotated"); 135 | LogFile logFile = LogFile.of(file, directoryPath.resolve("hardlinks")); 136 | 137 | // When 138 | 139 | rotateFilesByRenamingThem(new File[]{file}); 140 | logFile.delete(); 141 | 142 | // Assert 143 | 144 | File[] directoryFiles = Arrays.stream(directoryPath.toFile().listFiles()) 145 | .filter(File::isFile) 146 | .toArray(File[]::new); 147 | assertEquals(directoryFiles.length, 1); 148 | File remainingFile = directoryFiles[0]; 149 | assertEquals(remainingFile.toPath(), file.toPath()); 150 | Utils.deleteFileRecursively(directoryPath.resolve("hardlinks").toFile()); 151 | } 152 | 153 | /** 154 | * This scenario happens if the customer has a setup where they are logging files on a different volume and the 155 | * rotation policy rotates logs reusing existing rotated file names. 156 | */ 157 | @Test 158 | void GIVEN_logFileTrackingRegularFile_WHEN_trackedFileRotates_THEN_itDeletesTheWrongFile() throws IOException { 159 | // Given 160 | 161 | File file = TestUtils.createFileWithContent( 162 | directoryPath.resolve("test.log"), "rotated"); 163 | LogFile logFile = LogFile.of(file); 164 | 165 | // When 166 | 167 | rotateFilesByRenamingThem(new File[]{file}); 168 | logFile.delete(); 169 | 170 | // Assert 171 | 172 | File[] directoryFiles = Arrays.stream(directoryPath.toFile().listFiles()) 173 | .filter(File::isFile) 174 | .toArray(File[]::new); 175 | assertEquals(directoryFiles.length, 1); 176 | 177 | File remainingFile = directoryFiles[0]; 178 | assertEquals(remainingFile.toPath(), directoryPath.resolve("test.log.1")); 179 | } 180 | } 181 | -------------------------------------------------------------------------------- /src/main/java/com/aws/greengrass/logmanager/model/LogFile.java: -------------------------------------------------------------------------------- 1 | package com.aws.greengrass.logmanager.model; 2 | 3 | import com.aws.greengrass.logging.api.Logger; 4 | import com.aws.greengrass.logging.impl.LogManager; 5 | import com.aws.greengrass.logmanager.LogManagerService; 6 | import com.aws.greengrass.util.Utils; 7 | import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; 8 | 9 | import java.io.File; 10 | import java.io.FileNotFoundException; 11 | import java.io.IOException; 12 | import java.io.InputStream; 13 | import java.net.URI; 14 | import java.nio.channels.ClosedByInterruptException; 15 | import java.nio.charset.StandardCharsets; 16 | import java.nio.file.Files; 17 | import java.nio.file.Path; 18 | import java.nio.file.Paths; 19 | import java.security.NoSuchAlgorithmException; 20 | import java.util.Objects; 21 | import java.util.Optional; 22 | 23 | import static com.aws.greengrass.util.Digest.calculate; 24 | 25 | @SuppressFBWarnings("EQ_DOESNT_OVERRIDE_EQUALS") 26 | public class LogFile extends File { 27 | // custom serialVersionUID for class extends Serializable class 28 | private static final long serialVersionUID = 123; 29 | private static final Logger logger = LogManager.getLogger(LogManagerService.class); 30 | public static final int bytesNeeded = 1024; 31 | public static final String HASH_VALUE_OF_EMPTY_STRING = ""; 32 | private final String sourcePath; 33 | private String hash = HASH_VALUE_OF_EMPTY_STRING; 34 | 35 | public LogFile(Path sourcePath, Path hardLinkPath) { 36 | super(hardLinkPath.toString()); 37 | this.sourcePath = sourcePath.toString(); 38 | } 39 | 40 | public LogFile(URI uri) { 41 | super(uri); 42 | this.sourcePath = uri.getPath(); 43 | } 44 | 45 | /** 46 | * Convert the file to LogFile. 47 | * 48 | * @param file The file to be converted. 49 | */ 50 | public static LogFile of(File file) { 51 | return new LogFile(file.toURI()); 52 | } 53 | 54 | /** 55 | * Convert the file to LogFile. 56 | * 57 | * @param sourceFile The file to be converted. 58 | * @param hardLinkDirectory path where the hardlink should be stored 59 | * @throws IOException if can't find the source path 60 | */ 61 | public static LogFile of(File sourceFile, Path hardLinkDirectory) throws IOException { 62 | // Why do we need this? - Hardlinks can't get the path of their source file. We need to keep track of which path 63 | // it was originally created with and in case the file on that path changes we need to scan the directory to 64 | // check for the inode that matches the hardlink to get the correct path. 65 | Path destinationPath = hardLinkDirectory.resolve(sourceFile.getName()); 66 | Path sourcePath = sourceFile.toPath(); 67 | Files.createLink(destinationPath, sourcePath); 68 | return new LogFile(sourcePath, destinationPath); 69 | } 70 | 71 | /** 72 | * Read target bytes from the file. 73 | * 74 | * @return read byte array. 75 | */ 76 | private String readBytesToString() { 77 | byte[] bytesReadArray = new byte[bytesNeeded]; 78 | int bytesRead; 79 | try (InputStream r = Files.newInputStream(this.toPath())) { 80 | bytesRead = r.read(bytesReadArray); 81 | String bytesReadString = new String(bytesReadArray, StandardCharsets.UTF_8); 82 | // if there is an entire line before 1KB, we hash the line; Otherwise, we hash 1KB to prevent super long 83 | // single line. 84 | if (bytesReadString.indexOf('\n') > -1) { 85 | return bytesReadString.substring(0, bytesReadString.indexOf('\n') + 1); 86 | } 87 | if (bytesRead >= bytesNeeded) { 88 | return bytesReadString; 89 | } 90 | } catch (FileNotFoundException e) { 91 | // The file may be deleted as expected. 92 | logger.atDebug().cause(e).log("The file {} does not exist", this.getAbsolutePath()); 93 | } catch (ClosedByInterruptException e) { 94 | Thread.currentThread().interrupt(); 95 | logger.atDebug().log("Interrupted while getting log file hash"); 96 | } catch (IOException e) { 97 | // File may not exist 98 | logger.atError().cause(e).log("Unable to read file {}", this.getAbsolutePath()); 99 | } 100 | return ""; 101 | } 102 | 103 | /** 104 | * Get the hash of the logfile with target lines. 105 | * 106 | * @return the calculated hash value of the logfile, empty string if not enough lines for digest. 107 | */ 108 | public String hashString() { 109 | if (!Objects.equals(this.hash, HASH_VALUE_OF_EMPTY_STRING)) { 110 | return this.hash; 111 | } 112 | 113 | if (!this.exists()) { 114 | return this.hash; 115 | } 116 | 117 | try { 118 | String stringToHash = readBytesToString(); 119 | if (!stringToHash.isEmpty()) { 120 | this.hash = calculate(stringToHash); 121 | } 122 | } catch (NoSuchAlgorithmException e) { 123 | logger.atError().cause(e).log("The digest algorithm is invalid"); 124 | } 125 | 126 | return this.hash; 127 | } 128 | 129 | public boolean isEmpty() { 130 | return Utils.isEmpty(this.hashString()); 131 | } 132 | 133 | /** 134 | * Determines if a file has rotated by comparing the current file on the sourcePath against the hardlink 135 | * path. This returns false if the file that the hardlink is tracking is not the same it was 136 | * originally created with. 137 | */ 138 | public boolean hasRotated() { 139 | try { 140 | return !Files.isSameFile(Paths.get(sourcePath), toPath()); 141 | } catch (IOException e) { 142 | return true; 143 | } 144 | } 145 | 146 | /** 147 | * Deletes the hardlink (if there is one) and the file being tracked. 148 | */ 149 | @Override 150 | public boolean delete() { 151 | if (Objects.equals(toPath(), Paths.get(sourcePath))) { 152 | return super.delete(); 153 | } else { 154 | return deleteHardLinkWithSourceFile(); 155 | } 156 | } 157 | 158 | private boolean deleteHardLinkWithSourceFile() { 159 | Path source = Paths.get(sourcePath); 160 | 161 | if (this.hasRotated()) { 162 | // We can't get the source path from a hard link, so we will try to find the tracked file and delete it if 163 | // it is possible 164 | try { 165 | Optional hardLinkTargetPath = getHardLinkSourcePath(source); 166 | 167 | if (hardLinkTargetPath.isPresent()) { 168 | Files.deleteIfExists(hardLinkTargetPath.get()); 169 | } 170 | } catch (IOException e) { 171 | logger.atWarn().cause(e).kv("hardlink", toPath()).kv("originalSource", sourcePath) 172 | .log("Unable to delete hardlink source path"); 173 | } 174 | } else { 175 | try { 176 | Files.deleteIfExists(source); 177 | } catch (IOException e) { 178 | logger.atWarn().cause(e).kv("sourcePath", sourcePath).log("Unable to delete file"); 179 | } 180 | } 181 | 182 | return super.delete(); // delete hardlink 183 | } 184 | 185 | private Optional getHardLinkSourcePath(Path sourcePath) throws IOException { 186 | Path sourceDir = sourcePath.getParent(); 187 | 188 | if (sourceDir == null) { 189 | return Optional.empty(); 190 | } 191 | 192 | File[] files = sourceDir.toFile().listFiles(); 193 | 194 | if (files == null) { 195 | return Optional.empty(); 196 | } 197 | 198 | for (File file : files) { 199 | if (Files.isSameFile(file.toPath(), toPath())) { 200 | return Optional.of(file.toPath()); 201 | } 202 | } 203 | 204 | return Optional.empty(); 205 | } 206 | 207 | 208 | /** 209 | * Returns the source path of the file original file. 210 | * 211 | * @deprecated do not use in versions greater than 2.2. It is just being used so upgrade-downgrade still works 212 | */ 213 | @Deprecated 214 | public String getSourcePath() { 215 | return sourcePath; 216 | } 217 | } 218 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. -------------------------------------------------------------------------------- /src/main/java/com/aws/greengrass/logmanager/model/LogFileGroup.java: -------------------------------------------------------------------------------- 1 | package com.aws.greengrass.logmanager.model; 2 | 3 | import com.aws.greengrass.logging.api.Logger; 4 | import com.aws.greengrass.logging.impl.LogManager; 5 | import com.aws.greengrass.logmanager.exceptions.InvalidLogGroupException; 6 | import com.aws.greengrass.util.Utils; 7 | import lombok.Getter; 8 | 9 | import java.io.File; 10 | import java.io.IOException; 11 | import java.net.URI; 12 | import java.nio.file.Path; 13 | import java.time.Instant; 14 | import java.util.ArrayList; 15 | import java.util.Arrays; 16 | import java.util.Collections; 17 | import java.util.Comparator; 18 | import java.util.HashMap; 19 | import java.util.List; 20 | import java.util.Map; 21 | import java.util.Optional; 22 | import java.util.concurrent.ConcurrentHashMap; 23 | import java.util.regex.Pattern; 24 | import java.util.stream.Collectors; 25 | 26 | import static com.aws.greengrass.logmanager.model.LogFile.HASH_VALUE_OF_EMPTY_STRING; 27 | 28 | 29 | public final class LogFileGroup { 30 | private static final Logger logger = LogManager.getLogger(LogFileGroup.class); 31 | private final boolean isUsingHardlinks; 32 | @Getter 33 | private final Optional maxBytes; 34 | private final Instant lastProcessed; 35 | private final List logFiles; 36 | private final Map fileHashToLogFile; 37 | @Getter 38 | private final Pattern filePattern; 39 | 40 | 41 | /** 42 | * Returns a list of log files that have already been processed. 43 | */ 44 | public List getProcessedLogFiles() { 45 | return this.logFiles.stream() 46 | // Greater than or equal comparison means lastProcessed is afterOrEqual to the file.lastModified 47 | .filter(file -> this.lastProcessed.compareTo(Instant.ofEpochMilli(file.lastModified())) >= 0) 48 | .filter(file -> !this.isActiveFile(file)) 49 | .collect(Collectors.toList()); 50 | } 51 | 52 | /** 53 | * Returns a list of log files that have not yet bee processed. 54 | */ 55 | public List getLogFiles() { 56 | return this.logFiles.stream() 57 | .filter(file -> this.lastProcessed.isBefore(Instant.ofEpochMilli(file.lastModified()))) 58 | .collect(Collectors.toList()); 59 | } 60 | 61 | private LogFileGroup( 62 | List files, 63 | Pattern filePattern, 64 | Map fileHashToLogFile, 65 | boolean isUsingHardlinks, 66 | Instant lastProcessed, 67 | Optional maxBytes 68 | ) { 69 | this.logFiles = files; 70 | this.filePattern = filePattern; 71 | this.fileHashToLogFile = fileHashToLogFile; 72 | this.isUsingHardlinks = isUsingHardlinks; 73 | this.lastProcessed = lastProcessed; 74 | this.maxBytes = maxBytes; 75 | } 76 | 77 | /** 78 | * Create a list of Logfiles that are sorted based on lastModified time. 79 | * 80 | * @param componentLogConfiguration component log configuration 81 | * @param lastProcessed the saved updated time of the last uploaded log of a component. 82 | * @param workDir component work directory 83 | * @return list of logFile. 84 | * @throws InvalidLogGroupException the exception if this is not a valid directory. 85 | */ 86 | public static LogFileGroup create(ComponentLogConfiguration componentLogConfiguration, 87 | Instant lastProcessed, Path workDir) 88 | throws InvalidLogGroupException { 89 | URI directoryURI = componentLogConfiguration.getDirectoryPath().toUri(); 90 | File folder = new File(directoryURI); 91 | 92 | // Setup directories 93 | 94 | if (!folder.isDirectory()) { 95 | throw new InvalidLogGroupException(String.format("%s must be a directory", directoryURI)); 96 | } 97 | 98 | String componentName = componentLogConfiguration.getName(); 99 | Path componentHardlinksDirectory = workDir.resolve(componentName); 100 | 101 | // TODO: Potential TOCTOU race condition if 2 threads or even the same thread creates a log group 102 | // at different points in time. Files might get deleted beforehand. CAREFUL how we use this for now 103 | try { 104 | Utils.deleteFileRecursively(componentHardlinksDirectory.toFile()); 105 | Utils.createPaths(componentHardlinksDirectory); 106 | } catch (IOException e) { 107 | throw new InvalidLogGroupException( 108 | String.format("%s failed to create hard link directory", componentHardlinksDirectory), e); 109 | } 110 | 111 | // Get component files 112 | 113 | File[] files = folder.listFiles(); 114 | Pattern filePattern = componentLogConfiguration.getFileNameRegex(); 115 | 116 | if (files == null || files.length == 0) { 117 | logger.atDebug().kv("component", componentName) 118 | .kv("directory", directoryURI) 119 | .log("No component logs are found in the directory"); 120 | return new LogFileGroup( 121 | Collections.emptyList(), filePattern, new HashMap<>(), false, 122 | lastProcessed,Optional.empty()); 123 | } 124 | 125 | boolean isUsingHardlinks = true; 126 | List logFiles; 127 | 128 | files = Arrays.stream(files) 129 | .filter(File::isFile) 130 | .filter(file -> filePattern.matcher(file.getName()).find()) 131 | .toArray(File[]::new); 132 | 133 | // Convert files into log files 134 | 135 | try { 136 | logFiles = convertToLogFiles(files, componentHardlinksDirectory); 137 | } catch (IOException e) { 138 | logger.atDebug().cause(e).log("Failed to create hardlinks for files. Falling back to using regular " 139 | + " files"); 140 | isUsingHardlinks = false; 141 | logFiles = convertToLogFiles(files); 142 | } 143 | 144 | // Filter out files that can't be processed because they have no hash. 1. Empty 2. bytes < 1024 145 | 146 | logFiles = logFiles.stream() 147 | .filter(logFile -> !logFile.hashString().equals(HASH_VALUE_OF_EMPTY_STRING)) 148 | .collect(Collectors.toList()); 149 | 150 | 151 | // Cache the logFiles by hash 152 | 153 | Map fileHashToLogFileMap = new ConcurrentHashMap<>(); 154 | logFiles.forEach(logFile -> { 155 | fileHashToLogFileMap.put(logFile.hashString(), logFile); 156 | }); 157 | 158 | Optional maxBytes = Optional.ofNullable(componentLogConfiguration.getDiskSpaceLimit()); 159 | return new LogFileGroup(logFiles, filePattern, fileHashToLogFileMap, isUsingHardlinks, lastProcessed, maxBytes); 160 | } 161 | 162 | /** 163 | * Transform the files into log files that track the file through a hardlink that is created on the 164 | * hardLinkDirectory. 165 | * Files created this way can be tracked regardless of whether the underlying file rotates. 166 | * 167 | * @param files - A array of files 168 | * @param hardLinkDirectory - A Path to the hardlink directory. Must be on the same volume the files are being 169 | * created 170 | * @throws IOException - If it fails to create the hard link 171 | */ 172 | private static List convertToLogFiles(File[] files, Path hardLinkDirectory) throws IOException { 173 | List logFiles = new ArrayList<>(files.length); 174 | 175 | // TODO: We have to add the rotation detection mechanism here otherwise there is a chance that while we are 176 | // looping and creating the hardlinks the files gets rotated so the path that 177 | 178 | for (File file : files) { 179 | logFiles.add(LogFile.of(file, hardLinkDirectory)); 180 | } 181 | 182 | logFiles.sort(Comparator.comparingLong(LogFile::lastModified)); 183 | 184 | return logFiles; 185 | } 186 | 187 | /** 188 | * Transform the files into log files that point to the path of the file that is being passed in. It behaves the 189 | * same 190 | * as a java File. It is not resilient to file rotations. Meaning that if the underlying file rotates it will 191 | * get a reference to a different file than the one it was originally created with. 192 | * 193 | * @param files - An array of files 194 | */ 195 | private static List convertToLogFiles(File... files) { 196 | List logFiles = new ArrayList<>(files.length); 197 | 198 | for (File file : files) { 199 | logFiles.add(LogFile.of(file)); 200 | } 201 | 202 | logFiles.sort(Comparator.comparingLong(LogFile::lastModified)); 203 | logFiles.remove(logFiles.size() - 1); // remove the active file 204 | 205 | return logFiles; 206 | } 207 | 208 | /** 209 | * Get the LogFile object from the fileHash. 210 | * 211 | * @param fileHash the fileHash obtained from uploader. 212 | * @return the logFile. 213 | */ 214 | public LogFile getFile(String fileHash) { 215 | return fileHashToLogFile.get(fileHash); 216 | } 217 | 218 | /** 219 | * Returns the size in bytes of all the contents being tracked on by the log group. 220 | */ 221 | public long totalSizeInBytes() { 222 | long bytes = 0; 223 | for (LogFile log : logFiles) { 224 | bytes += log.length(); 225 | } 226 | return bytes; 227 | } 228 | 229 | /** 230 | * Validate if the logFile is the active of one logFileGroup. 231 | * 232 | * @param file the target file. 233 | * @return boolean. 234 | */ 235 | public boolean isActiveFile(LogFile file) { 236 | if (!isUsingHardlinks) { 237 | return false; 238 | } 239 | 240 | if (logFiles.isEmpty()) { 241 | return false; 242 | } 243 | 244 | LogFile activeFile = logFiles.get(logFiles.size() - 1); 245 | 246 | if (activeFile.hasRotated()) { 247 | return false; 248 | } 249 | 250 | return file.hashString().equals(activeFile.hashString()); 251 | } 252 | 253 | /** 254 | * Deletes a log file and stops tacking it. 255 | * 256 | * @param logFile - A Log File 257 | */ 258 | public boolean remove(LogFile logFile) { 259 | // Safely delete the file 260 | boolean result = logFile.delete(); 261 | 262 | if (result) { 263 | logger.atInfo().log("Successfully deleted file: {}", logFile.getSourcePath()); 264 | 265 | // Stop tracking the file 266 | logFiles.remove(this.fileHashToLogFile.get(logFile.hashString())); 267 | this.fileHashToLogFile.remove(logFile.hashString()); 268 | } 269 | 270 | return result; 271 | } 272 | } 273 | -------------------------------------------------------------------------------- /src/main/java/com/aws/greengrass/logmanager/CloudWatchLogsUploader.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | * SPDX-License-Identifier: Apache-2.0 4 | */ 5 | 6 | package com.aws.greengrass.logmanager; 7 | 8 | import com.aws.greengrass.logging.api.Logger; 9 | import com.aws.greengrass.logging.impl.LogManager; 10 | import com.aws.greengrass.logmanager.model.CloudWatchAttempt; 11 | import com.aws.greengrass.logmanager.util.CloudWatchClientFactory; 12 | import com.aws.greengrass.logmanager.util.SdkClientWrapper; 13 | import lombok.Getter; 14 | import lombok.Setter; 15 | import software.amazon.awssdk.awscore.exception.AwsServiceException; 16 | import software.amazon.awssdk.core.exception.SdkException; 17 | import software.amazon.awssdk.services.cloudwatchlogs.CloudWatchLogsClient; 18 | import software.amazon.awssdk.services.cloudwatchlogs.model.CloudWatchLogsException; 19 | import software.amazon.awssdk.services.cloudwatchlogs.model.CreateLogGroupRequest; 20 | import software.amazon.awssdk.services.cloudwatchlogs.model.CreateLogStreamRequest; 21 | import software.amazon.awssdk.services.cloudwatchlogs.model.DataAlreadyAcceptedException; 22 | import software.amazon.awssdk.services.cloudwatchlogs.model.InputLogEvent; 23 | import software.amazon.awssdk.services.cloudwatchlogs.model.InvalidSequenceTokenException; 24 | import software.amazon.awssdk.services.cloudwatchlogs.model.PutLogEventsRequest; 25 | import software.amazon.awssdk.services.cloudwatchlogs.model.PutLogEventsResponse; 26 | import software.amazon.awssdk.services.cloudwatchlogs.model.ResourceAlreadyExistsException; 27 | import software.amazon.awssdk.services.cloudwatchlogs.model.ResourceNotFoundException; 28 | 29 | import java.util.Collections; 30 | import java.util.List; 31 | import java.util.Map; 32 | import java.util.concurrent.ConcurrentHashMap; 33 | import java.util.concurrent.atomic.AtomicReference; 34 | import java.util.function.Consumer; 35 | import javax.inject.Inject; 36 | 37 | public class CloudWatchLogsUploader { 38 | private final Logger logger = LogManager.getLogger(CloudWatchLogsUploader.class); 39 | private final Map> listeners = new ConcurrentHashMap<>(); 40 | @Setter 41 | private CloudWatchLogsClient cloudWatchLogsClient; 42 | // Getter only for unit testing purpose 43 | @Getter 44 | private final SdkClientWrapper cloudWatchWrapper; 45 | private static final int MAX_RETRIES = 3; 46 | 47 | // logGroup -> logStream -> savedSequenceToken 48 | final Map> logGroupsToSequenceTokensMap = new ConcurrentHashMap<>(); 49 | 50 | @Inject 51 | public CloudWatchLogsUploader(CloudWatchClientFactory cloudWatchClientFactory) { 52 | this.cloudWatchWrapper = cloudWatchClientFactory.getWrapper(); 53 | } 54 | 55 | /** 56 | * Uploads the input log events for each stream within the CloudWatchAttempt. It will create the log group/stream 57 | * if necessary. 58 | * After successfully uploading a log stream to cloudwatch, it will add the necessary information so that the log 59 | * manager can persist that information. 60 | * 61 | * @param attempt {@link CloudWatchAttempt} 62 | * @param tryCount The upload try count. 63 | */ 64 | public void upload(CloudWatchAttempt attempt, int tryCount) { 65 | try { 66 | attempt.getLogStreamsToLogEventsMap().forEach((streamName, attemptLogInformation) -> { 67 | boolean success = uploadLogs(attempt.getLogGroupName(), streamName, 68 | attemptLogInformation.getSortedLogEvents(), tryCount); 69 | if (success) { 70 | attempt.getLogStreamUploadedSet().add(streamName); 71 | } 72 | }); 73 | } catch (SdkException e) { 74 | logger.atError().cause(e).log("Unable to upload logs for log group {}", attempt.getLogGroupName()); 75 | } 76 | listeners.values().forEach(consumer -> consumer.accept(attempt)); 77 | } 78 | 79 | /** 80 | * Register a listener to get cloud watch attempt status. 81 | * 82 | * @param callback The callback function to invoke. 83 | * @param name The unique name for the service subscribing. 84 | */ 85 | public void registerAttemptStatus(String name, Consumer callback) { 86 | listeners.putIfAbsent(name, callback); 87 | } 88 | 89 | /** 90 | * Unregister a listener to get cloud watch attempt status. 91 | * 92 | * @param name The unique name for the service subscribing. 93 | */ 94 | public void unregisterAttemptStatus(String name) { 95 | listeners.remove(name); 96 | } 97 | 98 | /** 99 | * Uploads logs to CloudWatch. 100 | * 101 | * @param logEvents The log events to upload to CloudWatch. 102 | * @param logGroupName The log group name to upload the logs to. 103 | * @param logStreamName The log steam name to upload the logs to. 104 | * @param tryCount The upload try count. 105 | */ 106 | private boolean uploadLogs(String logGroupName, String logStreamName, List logEvents, 107 | int tryCount) { 108 | if (tryCount > MAX_RETRIES) { 109 | logger.atError().log("Unable to upload {} logs to {}-{} as max retry ({}) times reached", 110 | logEvents.size(), logGroupName, logStreamName, MAX_RETRIES); 111 | return false; 112 | } 113 | // If there are no logs available to upload, then return true so that we don't read those files again. 114 | // This can occur if the log files read have logs below the desired log level. 115 | // By returning true, we will ensure that we won't read those log files again. 116 | if (logEvents.isEmpty()) { 117 | return true; 118 | } 119 | logger.atTrace().log("Uploading {} logs to {}-{}", logEvents.size(), logGroupName, logStreamName); 120 | AtomicReference sequenceToken = new AtomicReference<>(); 121 | logGroupsToSequenceTokensMap.computeIfPresent(logGroupName, (groupName, streamToSequenceTokenMap) -> { 122 | streamToSequenceTokenMap.computeIfPresent(logStreamName, (streamName, savedSequenceToken) -> { 123 | sequenceToken.set(savedSequenceToken); 124 | return savedSequenceToken; 125 | }); 126 | return streamToSequenceTokenMap; 127 | }); 128 | PutLogEventsRequest request = PutLogEventsRequest.builder() 129 | .overrideConfiguration(builder -> 130 | // provide the log-format header of json/emf 131 | builder.headers( 132 | Collections.singletonMap("x-amzn-logs-format", Collections.singletonList("json/emf")) 133 | ) 134 | ) 135 | .logEvents(logEvents) 136 | .logGroupName(logGroupName) 137 | .logStreamName(logStreamName) 138 | .sequenceToken(sequenceToken.get()) 139 | .build(); 140 | 141 | try { 142 | PutLogEventsResponse putLogEventsResponse = cloudWatchWrapper 143 | .execute(client -> client.putLogEvents(request)); 144 | if (putLogEventsResponse.nextSequenceToken() != null) { 145 | addNextSequenceToken(logGroupName, logStreamName, putLogEventsResponse.nextSequenceToken()); 146 | } 147 | if (putLogEventsResponse.rejectedLogEventsInfo() != null) { 148 | logger.atError().log("Log events rejected by CloudWatch {}", 149 | putLogEventsResponse.rejectedLogEventsInfo()); 150 | } 151 | return true; 152 | } catch (InvalidSequenceTokenException e) { 153 | // Get correct token using describe 154 | if (tryCount < MAX_RETRIES) { 155 | logger.atInfo().log("Invalid token while uploading logs to {}-{}. Retrying with the expected sequence " 156 | + "token in CloudWatch response.", logGroupName, logStreamName); 157 | } else { 158 | logger.atError().log("Invalid token while uploading logs to {}-{} with max retry ({}) times " 159 | + "reached.", 160 | logGroupName, logStreamName, tryCount); 161 | } 162 | addNextSequenceToken(logGroupName, logStreamName, e.expectedSequenceToken()); 163 | // TODO: better do the retry mechanism? Maybe need to have a scheduled task to handle this. 164 | return uploadLogs(logGroupName, logStreamName, logEvents, tryCount + 1); 165 | } catch (DataAlreadyAcceptedException e) { 166 | // Don't do anything since the data already exists. 167 | addNextSequenceToken(logGroupName, logStreamName, e.expectedSequenceToken()); 168 | return true; 169 | } catch (ResourceNotFoundException e) { 170 | // Handle no log group/log stream 171 | logger.atInfo().log("Unable to find log group- {} or log stream - {}. Creating them now.", 172 | logGroupName, logStreamName); 173 | createNewLogGroup(logGroupName); 174 | createNewLogSteam(logGroupName, logStreamName); 175 | return uploadLogs(logGroupName, logStreamName, logEvents, tryCount + 1); 176 | } catch (AwsServiceException e) { 177 | // Back off for some time and then retry 178 | logger.atError().cause(e).log("Unable to upload {} logs to {}-{}", logEvents.size(), logGroupName, 179 | logStreamName); 180 | } 181 | return false; 182 | } 183 | 184 | /** 185 | * Creates the log group on CloudWatch. 186 | * 187 | * @param logGroupName The log group name. 188 | */ 189 | private void createNewLogGroup(String logGroupName) { 190 | logger.atDebug().log("Creating log group {}", logGroupName); 191 | CreateLogGroupRequest request = CreateLogGroupRequest.builder().logGroupName(logGroupName).build(); 192 | try { 193 | this.cloudWatchWrapper.execute(client -> client.createLogGroup(request)); 194 | } catch (ResourceAlreadyExistsException e) { 195 | // Don't do anything if the resource already exists. 196 | } catch (CloudWatchLogsException e) { 197 | logger.atError().cause(e).log("Unable to create log group {}.", logGroupName); 198 | throw e; 199 | } 200 | } 201 | 202 | /** 203 | * Creates the log stream within the log group. 204 | * 205 | * @param logGroupName The log group name. 206 | * @param logStreamName The log stream name. 207 | */ 208 | private void createNewLogSteam(String logGroupName, String logStreamName) { 209 | logger.atDebug().log("Creating log stream {} for group {}", logStreamName, logGroupName); 210 | CreateLogStreamRequest request = CreateLogStreamRequest.builder() 211 | .logGroupName(logGroupName) 212 | .logStreamName(logStreamName) 213 | .build(); 214 | try { 215 | this.cloudWatchWrapper.execute(client -> client.createLogStream(request)); 216 | } catch (ResourceAlreadyExistsException e) { 217 | // Don't do anything if the resource already exists. 218 | } catch (CloudWatchLogsException e) { 219 | logger.atError().cause(e).log("Unable to create log stream {} for group {}.", logStreamName, logGroupName); 220 | throw e; 221 | } 222 | } 223 | 224 | /** 225 | * Keeping this package-private for unit tests. 226 | * 227 | * @param logGroupName The CloudWatch log group 228 | * @param logStreamName The CloudWatch log stream within the log group 229 | * @param nextSequenceToken The next token to be associated to the PutEvents request for the log group and stream. 230 | */ 231 | void addNextSequenceToken(String logGroupName, String logStreamName, String nextSequenceToken) { 232 | // TODO: clean up old streams/tokens. Maybe allow a max of 5 streams for each log group. 233 | logGroupsToSequenceTokensMap.computeIfAbsent(logGroupName, key -> new ConcurrentHashMap<>()) 234 | .put(logStreamName, nextSequenceToken); 235 | } 236 | } 237 | -------------------------------------------------------------------------------- /src/integrationtests/java/com/aws/greengrass/integrationtests/logmanager/SpaceManagementTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | * SPDX-License-Identifier: Apache-2.0 4 | */ 5 | 6 | package com.aws.greengrass.integrationtests.logmanager; 7 | 8 | import ch.qos.logback.core.util.FileSize; 9 | import com.aws.greengrass.config.Topics; 10 | import com.aws.greengrass.config.UpdateBehaviorTree; 11 | import com.aws.greengrass.dependency.State; 12 | import com.aws.greengrass.deployment.DeviceConfiguration; 13 | import com.aws.greengrass.deployment.exceptions.DeviceConfigurationException; 14 | import com.aws.greengrass.integrationtests.BaseITCase; 15 | import com.aws.greengrass.lifecyclemanager.Kernel; 16 | import com.aws.greengrass.logmanager.LogManagerService; 17 | import com.aws.greengrass.logmanager.exceptions.InvalidLogGroupException; 18 | import com.aws.greengrass.logmanager.model.ComponentLogConfiguration; 19 | import com.aws.greengrass.testcommons.testutilities.GGExtension; 20 | import com.aws.greengrass.util.exceptions.TLSAuthException; 21 | import org.junit.jupiter.api.AfterEach; 22 | import org.junit.jupiter.api.BeforeEach; 23 | import org.junit.jupiter.api.Test; 24 | import org.junit.jupiter.api.extension.ExtendWith; 25 | import org.junit.jupiter.api.extension.ExtensionContext; 26 | import org.mockito.Mock; 27 | import org.mockito.junit.jupiter.MockitoExtension; 28 | import software.amazon.awssdk.crt.CrtRuntimeException; 29 | import software.amazon.awssdk.services.cloudwatchlogs.CloudWatchLogsClient; 30 | import software.amazon.awssdk.services.cloudwatchlogs.model.PutLogEventsRequest; 31 | import software.amazon.awssdk.services.cloudwatchlogs.model.PutLogEventsResponse; 32 | 33 | import java.io.File; 34 | import java.io.IOException; 35 | import java.net.URI; 36 | import java.net.URISyntaxException; 37 | import java.nio.charset.StandardCharsets; 38 | import java.nio.file.Files; 39 | import java.nio.file.NoSuchFileException; 40 | import java.nio.file.Path; 41 | import java.nio.file.Paths; 42 | import java.time.Duration; 43 | import java.time.Instant; 44 | import java.time.format.DateTimeParseException; 45 | import java.util.HashMap; 46 | import java.util.Map; 47 | import java.util.concurrent.CountDownLatch; 48 | import java.util.concurrent.TimeUnit; 49 | import java.util.regex.Pattern; 50 | 51 | import static com.aws.greengrass.componentmanager.KernelConfigResolver.CONFIGURATION_CONFIG_KEY; 52 | import static com.aws.greengrass.integrationtests.logmanager.util.LogFileHelper.createTempFileAndWriteData; 53 | import static com.aws.greengrass.logmanager.LogManagerService.COMPONENT_LOGS_CONFIG_MAP_TOPIC_NAME; 54 | import static com.aws.greengrass.logmanager.LogManagerService.DELETE_LOG_FILES_AFTER_UPLOAD_CONFIG_TOPIC_NAME; 55 | import static com.aws.greengrass.logmanager.LogManagerService.LOGS_UPLOADER_CONFIGURATION_TOPIC; 56 | import static com.aws.greengrass.testcommons.testutilities.ExceptionLogProtector.ignoreExceptionOfType; 57 | import static com.github.grantwest.eventually.EventuallyLambdaMatcher.eventuallyEval; 58 | import static org.hamcrest.MatcherAssert.assertThat; 59 | import static org.hamcrest.Matchers.is; 60 | import static org.junit.jupiter.api.Assertions.assertEquals; 61 | import static org.junit.jupiter.api.Assertions.assertTrue; 62 | import static org.mockito.ArgumentMatchers.any; 63 | import static org.mockito.Mockito.lenient; 64 | 65 | @ExtendWith({GGExtension.class, MockitoExtension.class}) 66 | class SpaceManagementTest extends BaseITCase { 67 | private static Kernel kernel; 68 | private static DeviceConfiguration deviceConfiguration; 69 | private LogManagerService logManagerService; 70 | private Path tempDirectoryPath; 71 | 72 | private static final String componentName = "UserComponentA"; 73 | 74 | @Mock 75 | private CloudWatchLogsClient cloudWatchLogsClient; 76 | 77 | @BeforeEach 78 | void beforeEach(ExtensionContext context) { 79 | ignoreExceptionOfType(context, InterruptedException.class); 80 | ignoreExceptionOfType(context, TLSAuthException.class); 81 | ignoreExceptionOfType(context, NoSuchFileException.class); 82 | ignoreExceptionOfType(context, DateTimeParseException.class); 83 | ignoreExceptionOfType(context, CrtRuntimeException.class); 84 | } 85 | 86 | @AfterEach 87 | void afterEach() { 88 | kernel.shutdown(); 89 | } 90 | 91 | private long writeNLogFiles(Path path, int numberOfFiles, String pattern) throws IOException { 92 | long totalLength = 0; 93 | 94 | for (int i = 0; i < numberOfFiles; i++) { 95 | File file = createTempFileAndWriteData(path, pattern, ""); 96 | totalLength+= file.length(); 97 | } 98 | 99 | return totalLength; 100 | } 101 | 102 | private long getTotalLogFilesBytesFor(ComponentLogConfiguration logConfiguration) throws InvalidLogGroupException { 103 | long bytes = 0; 104 | URI directoryURI = logConfiguration.getDirectoryPath().toUri(); 105 | File folder = new File(directoryURI); 106 | File[] files = folder.listFiles(); 107 | 108 | if (files == null) { 109 | return bytes; 110 | } 111 | 112 | for (File log : files) { 113 | bytes += log.length(); 114 | } 115 | 116 | return bytes; 117 | } 118 | 119 | private void assertLogFileSizeEventuallyBelowBytes(ComponentLogConfiguration logConfiguration, long bytes) { 120 | assertThat(String.format("log group size should eventually be less than %s bytes", bytes),() -> { 121 | try { 122 | long logFilesSize = getTotalLogFilesBytesFor(logConfiguration); 123 | return logFilesSize <= bytes; 124 | } catch (InvalidLogGroupException e) { 125 | return false; 126 | } 127 | }, eventuallyEval(is(true), Duration.ofSeconds(60))); 128 | } 129 | 130 | private void assertLogFilesAreNotDeleted(ComponentLogConfiguration logConfiguration) throws 131 | InvalidLogGroupException, InterruptedException { 132 | long originalBytes = getTotalLogFilesBytesFor(logConfiguration); 133 | Instant deadline = Instant.now().plusSeconds(20); 134 | 135 | while (Instant.now().isBefore(deadline)) { 136 | long currentBytes = getTotalLogFilesBytesFor(logConfiguration); 137 | assertEquals(originalBytes, currentBytes); 138 | Thread.sleep(500); 139 | } 140 | } 141 | 142 | 143 | void setupKernel(Path storeDirectory) 144 | throws InterruptedException, URISyntaxException, IOException, DeviceConfigurationException { 145 | lenient().when(cloudWatchLogsClient.putLogEvents(any(PutLogEventsRequest.class))) 146 | .thenReturn(PutLogEventsResponse.builder().nextSequenceToken("nextToken").build()); 147 | System.setProperty("root", tempRootDir.toAbsolutePath().toString()); 148 | CountDownLatch logManagerRunning = new CountDownLatch(1); 149 | 150 | kernel = new Kernel(); 151 | 152 | Path testRecipePath = Paths.get(LogManagerTest.class 153 | .getResource("smallSpaceManagementPeriodicIntervalConfig.yaml").toURI()); 154 | String content = new String(Files.readAllBytes(testRecipePath), StandardCharsets.UTF_8); 155 | content = content.replaceAll("\\{\\{logFileDirectoryPath}}", storeDirectory.toAbsolutePath().toString()); 156 | 157 | Path tempConfigPath = Files.createTempDirectory(tempRootDir, "config").resolve("smallSpaceManagementPeriodicIntervalConfig.yaml"); 158 | Files.write(tempConfigPath, content.getBytes(StandardCharsets.UTF_8)); 159 | 160 | kernel.parseArgs("-i", tempConfigPath.toAbsolutePath().toString()); 161 | kernel.getContext().addGlobalStateChangeListener((service, oldState, newState) -> { 162 | if (service.getName().equals(LogManagerService.LOGS_UPLOADER_SERVICE_TOPICS) 163 | && newState.equals(State.RUNNING)) { 164 | logManagerRunning.countDown(); 165 | logManagerService = (LogManagerService) service; 166 | } 167 | }); 168 | deviceConfiguration = new DeviceConfiguration(kernel, "ThingName", "xxxxxx-ats.iot.us-east-1.amazonaws.com", 169 | "xxxxxx.credentials.iot.us-east-1.amazonaws.com", "privKeyFilePath", "certFilePath", "caFilePath", 170 | "us-east-1", "roleAliasName"); 171 | kernel.getContext().put(DeviceConfiguration.class, deviceConfiguration); 172 | // set required instances from context 173 | kernel.launch(); 174 | assertTrue(logManagerRunning.await(10, TimeUnit.SECONDS)); 175 | logManagerService.getUploader().getCloudWatchWrapper().setClient(cloudWatchLogsClient); 176 | } 177 | 178 | @Test 179 | void GIVEN_user_component_config_with_space_management_WHEN_space_exceeds_THEN_excess_log_files_are_deleted() 180 | throws Exception { 181 | // Given 182 | 183 | lenient().when(cloudWatchLogsClient.putLogEvents(any(PutLogEventsRequest.class))) 184 | .thenReturn(PutLogEventsResponse.builder().nextSequenceToken("nextToken").build()); 185 | 186 | tempDirectoryPath = Files.createDirectory(tempRootDir.resolve("IntegrationTestsTemporaryLogFiles")); 187 | // This method configures the LogManager to get logs with the pattern ^integTestRandomLogFiles.log\w* inside 188 | // then tempDirectoryPath with a diskSpaceLimit of 105kb 189 | setupKernel(tempDirectoryPath); 190 | 191 | // When 192 | 193 | // The total size will be 150kb (each log file written is 10kb * 15) given the max space is 105 kb, then we 194 | // expect it to delete 5 files for the total log size to be under 105kb 195 | writeNLogFiles(tempDirectoryPath,15, "integTestRandomLogFiles.log_"); 196 | 197 | 198 | // Then 199 | 200 | ComponentLogConfiguration compLogInfo = ComponentLogConfiguration.builder() 201 | .directoryPath(tempDirectoryPath) 202 | .fileNameRegex(Pattern.compile("^integTestRandomLogFiles.log\\w*")) 203 | .name("IntegrationTestsTemporaryLogFiles") 204 | .build(); 205 | assertLogFileSizeEventuallyBelowBytes(compLogInfo, 105 * FileSize.KB_COEFFICIENT); 206 | } 207 | 208 | @Test 209 | void GIVEN_diskSpaceManagementConfigured_WHEN_configurationRemoved_THEN_logFilesStopBeingDeleted() throws Exception { 210 | // Given 211 | tempDirectoryPath = Files.createDirectory(tempRootDir.resolve("IntegrationTestsTemporaryLogFiles")); 212 | setupKernel(tempDirectoryPath); // starts the LM with the smallSpaceManagementPeriodicIntervalConfig.yaml config 213 | 214 | long totalBytes = writeNLogFiles(tempDirectoryPath,15, "integTestRandomLogFiles.log_"); 215 | long totalKb = totalBytes / FileSize.KB_COEFFICIENT; 216 | assertTrue(totalKb > 105); // 105 Kb is the amount on the configuration 217 | 218 | // Then - wait for the files to be deleted 219 | 220 | ComponentLogConfiguration compLogInfo = ComponentLogConfiguration.builder() 221 | .directoryPath(tempDirectoryPath) 222 | .fileNameRegex(Pattern.compile("^integTestRandomLogFiles.log\\w*")) 223 | .name("IntegrationTestsTemporaryLogFiles") 224 | .build(); 225 | assertLogFileSizeEventuallyBelowBytes(compLogInfo, 105 * FileSize.KB_COEFFICIENT); 226 | 227 | // When 228 | 229 | Topics topics = kernel.locate(LogManagerService.LOGS_UPLOADER_SERVICE_TOPICS).getConfig(); 230 | Map componentConfig = new HashMap(){{ 231 | put(componentName, new HashMap(){{ 232 | put(DELETE_LOG_FILES_AFTER_UPLOAD_CONFIG_TOPIC_NAME, "false"); 233 | }}); 234 | }}; 235 | UpdateBehaviorTree behaviour = new UpdateBehaviorTree(UpdateBehaviorTree.UpdateBehavior.REPLACE, System.currentTimeMillis()); 236 | topics.lookupTopics(CONFIGURATION_CONFIG_KEY, LOGS_UPLOADER_CONFIGURATION_TOPIC, COMPONENT_LOGS_CONFIG_MAP_TOPIC_NAME) 237 | .updateFromMap(componentConfig, behaviour); 238 | kernel.getContext().waitForPublishQueueToClear(); 239 | 240 | // When 241 | 242 | writeNLogFiles(tempDirectoryPath,5, "integTestRandomLogFiles.log_"); 243 | long kbInDisk = getTotalLogFilesBytesFor(compLogInfo) * FileSize.KB_COEFFICIENT; 244 | assertTrue(kbInDisk > 105); // 105 Kb is the amount on the configuration 245 | 246 | // Then 247 | assertLogFilesAreNotDeleted(compLogInfo); 248 | } 249 | } 250 | --------------------------------------------------------------------------------