├── .github ├── CONTRIBUTING.md ├── ISSUE_TEMPLATE │ ├── config.yml │ ├── issue-bug.yml │ └── issue-enhance.yml ├── PULL_REQUEST_TEMPLATE.md └── TEMPLATE-README.md ├── .gitignore ├── CODEOWNERS ├── LICENSE ├── NOTICE ├── README.md ├── chug ├── chug.go ├── chug_suite_test.go ├── chug_test.go └── match_log_entry_test.go ├── docs └── usage.md ├── go.mod ├── go.sum ├── handler.go ├── handler_test.go ├── internal └── truncate │ ├── package.go │ ├── truncate.go │ ├── truncate_suite_test.go │ └── truncate_test.go ├── json_redacter.go ├── json_redacter_test.go ├── lager_suite_test.go ├── lagerctx ├── context.go ├── context_test.go └── lagerctx_suite_test.go ├── lagerflags ├── LICENSE ├── NOTICE ├── README.md ├── integration │ ├── integration_suite_test.go │ ├── main.go │ └── package.go ├── lagerflags.go ├── lagerflags_suite_test.go ├── lagerflags_test.go ├── redact_secrets.go ├── redact_secrets_test.go ├── timeformat.go └── timeformat_test.go ├── lagertest └── test_sink.go ├── logger.go ├── logger_test.go ├── models.go ├── reconfigurable_sink.go ├── reconfigurable_sink_test.go ├── redacting_sink.go ├── redacting_sink_test.go ├── scripts ├── create-docker-container.bash ├── docker │ └── test.bash └── test-in-docker.bash ├── slog_sink.go ├── slog_sink_test.go ├── tools.go ├── truncating_sink.go ├── truncating_sink_test.go ├── writer_sink.go └── writer_sink_test.go /.github/CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | Contributor License Agreement 2 | --------------- 3 | 4 | Follow these steps to make a contribution to any of our open source repositories: 5 | 6 | 1. Ensure that you have completed our CLA Agreement for [individuals](https://www.cloudfoundry.org/wp-content/uploads/2015/07/CFF_Individual_CLA.pdf) or [corporations](https://www.cloudfoundry.org/wp-content/uploads/2015/07/CFF_Corporate_CLA.pdf). 7 | 8 | 1. Set your name and email (these should match the information on your submitted CLA) 9 | ``` 10 | git config --global user.name "Firstname Lastname" 11 | git config --global user.email "your_email@example.com" 12 | ``` 13 | 14 | 1. All contributions must be sent using GitHub pull requests as they create a nice audit trail and structured approach. 15 | 16 | The originating github user has to either have a github id on-file with the list of approved users that have signed 17 | the CLA or they can be a public "member" of a GitHub organization for a group that has signed the corporate CLA. 18 | This enables the corporations to manage their users themselves instead of having to tell us when someone joins/leaves an organization. By removing a user from an organization's GitHub account, their new contributions are no longer approved because they are no longer covered under a CLA. 19 | 20 | If a contribution is deemed to be covered by an existing CLA, then it is analyzed for engineering quality and product 21 | fit before merging it. 22 | 23 | If a contribution is not covered by the CLA, then the automated CLA system notifies the submitter politely that we 24 | cannot identify their CLA and ask them to sign either an individual or corporate CLA. This happens automatically as a 25 | comment on pull requests. 26 | 27 | When the project receives a new CLA, it is recorded in the project records, the CLA is added to the database for the 28 | automated system uses, then we manually make the Pull Request as having a CLA on-file. 29 | 30 | 31 | Initial Setup 32 | --------------- 33 | - Install docker 34 | 35 | - Add required directories 36 | 37 | ```bash 38 | # create parent directory 39 | mkdir -p ~/workspace 40 | cd ~/workspace 41 | 42 | # clone ci 43 | git clone https://github.com/cloudfoundry/wg-app-platform-runtime-ci.git 44 | 45 | # clone repo 46 | git clone https://github.com/cloudfoundry/.git --recursive 47 | cd 48 | ``` 49 | 50 | Running Tests 51 | --------------- 52 | 53 | 54 | - `./scripts/create-docker-container.bash`: This will create a docker container with appropriate mounts. 55 | - `./scripts/test-in-docker.bash`: Create docker container and run all tests and setup in a single script. 56 | - `./scripts/test-in-docker.bash `: For running tests under a specific package and/or sub-package 57 | 58 | > [!TIP] 59 | > If Running tests for this repo requires a DB flavor. The above scripts will default to mysql DB. Set DB environment variable for alternate DBs. Valid Options: mysql-8.0(or mysql),mysql-5.7,postgres 60 | 61 | When inside docker container: 62 | 63 | - `/repo/scripts/docker/test.bash`: This will run all tests in this repo 64 | - `/repo/scripts/docker/test.bash `: This will only run a package's tests 65 | - `/repo/scripts/docker/test.bash `: This will only run sub-package tests for package 66 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: false 2 | contact_links: 3 | - name: CloudFoundry slack 4 | url: https://cloudfoundry.slack.com 5 | about: For help or questions about this component, you can reach the maintainers on Slack 6 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/issue-bug.yml: -------------------------------------------------------------------------------- 1 | name: Bug 2 | description: Report a defect, such as a bug or regression. 3 | title: "Start the title with a verb (e.g. Change header styles). Use the imperative mood in the title (e.g. Fix, not Fixed or Fixes header styles)" 4 | labels: 5 | - bug 6 | body: 7 | - type: textarea 8 | id: current 9 | attributes: 10 | label: Current behavior 11 | validations: 12 | required: true 13 | - type: markdown 14 | id: current_md 15 | attributes: 16 | value: | 17 | - Explain, in detail, what the current state of the world is 18 | - Include code snippets, log output, and analysis as necessary to explain the whole problem 19 | - Include links to logs, GitHub issues, slack conversations, etc.. to tell us where the problem came from 20 | - Steps to reproduce 21 | - type: textarea 22 | id: desired 23 | attributes: 24 | label: Desired behavior 25 | validations: 26 | required: true 27 | - type: markdown 28 | id: desired_md 29 | attributes: 30 | value: | 31 | - Describe how the problem should be fixed 32 | - Does this require a new bosh release? 33 | - Does it require configuration changes in cf-deployment? 34 | - Do we need to have a special release note? 35 | - Do we need to update repo documentation? 36 | - type: input 37 | id: version 38 | attributes: 39 | label: Affected Version 40 | description: Please enter the version 41 | validations: 42 | required: true 43 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/issue-enhance.yml: -------------------------------------------------------------------------------- 1 | name: Enhance 2 | description: Propose an enhancement or new feature. 3 | title: "Start the title with a verb (e.g. Change header styles). Use the imperative mood in the title (e.g. Fix, not Fixed or Fixes header styles)" 4 | labels: 5 | - enhancement 6 | body: 7 | - type: textarea 8 | id: change 9 | attributes: 10 | label: Proposed Change 11 | validations: 12 | required: true 13 | - type: markdown 14 | id: change_md 15 | attributes: 16 | value: | 17 | Briefly explain why this feature is necessary in the following format 18 | 19 | **As a** *developer/operator/whatever* 20 | **I want** *this ability to do X* 21 | **So that** *I can do Y* 22 | 23 | - Provide details of where this request is coming from including links, GitHub Issues, etc.. 24 | - Provide details of prior work (if applicable) including links to commits, github issues, etc... 25 | - type: textarea 26 | id: acceptance 27 | attributes: 28 | label: Acceptance criteria 29 | validations: 30 | required: true 31 | - type: markdown 32 | id: acceptance_md 33 | attributes: 34 | value: | 35 | Detail the exact work that is required to accept this story in the following format 36 | 37 | **Scenario:** *describe scenario* 38 | **Given** *I have some sort of configuration* 39 | **When** *I do X* 40 | **And** *do Y* 41 | **Then** *I see the desired behavior* 42 | 43 | - type: textarea 44 | id: related 45 | attributes: 46 | label: Related links 47 | description: Please list related links for this issue 48 | placeholder: | 49 | - [ ] code.cloudfoundry.org/bbs for links 50 | - [x] cloudfoundry/rep#123 for issues/prs 51 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | - [ ] Read the [Contributing document](../blob/-/.github/CONTRIBUTING.md). 2 | 3 | Summary 4 | --------------- 5 | 10 | 11 | 12 | Backward Compatibility 13 | --------------- 14 | Breaking Change? **Yes/No** 15 | 22 | -------------------------------------------------------------------------------- /.github/TEMPLATE-README.md: -------------------------------------------------------------------------------- 1 | 2 | > [!IMPORTANT] 3 | > Content in this directory is managed by the CI task `sync-dot-github-dir`. 4 | 5 | Changing templates 6 | --------------- 7 | These templates are synced from [these shared templates](https://github.com/cloudfoundry/wg-app-platform-runtime-ci/tree/main/shared/github). 8 | Each pipeline will contain a `sync-dot-github-dir-*` job for updating the content of these files. 9 | If you would like to modify these, please change them in the shared group. 10 | It's also possible to override the templates on pipeline's parent directory by introducing a custom 11 | template in `$PARENT_TEMPLATE_DIR/github/FILENAME` or `$PARENT_TEMPLATE_DIR/github/REPO_NAME/FILENAME` in CI repo 12 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Builds 2 | bin 3 | 4 | # Binaries for programs and plugins 5 | *.exe 6 | *.exe~ 7 | *.dll 8 | *.so 9 | *.dylib 10 | 11 | # IntelliJ 12 | .idea 13 | 14 | # Dependencies 15 | vendor 16 | 17 | # macOS 18 | .DS_Store 19 | 20 | # Vim files 21 | [._]*.s[a-v][a-z] 22 | !*.svg # comment out if you don't need vector files 23 | [._]*.sw[a-p] 24 | [._]s[a-rt-v][a-z] 25 | [._]ss[a-gi-z] 26 | [._]sw[a-p] 27 | Session.vim 28 | Sessionx.vim 29 | .netrwhist 30 | *~ 31 | tags 32 | [._]*.un~ 33 | 34 | # Test binary, built with `go test -c` 35 | *.test 36 | 37 | # Output of the go coverage tool, specifically when used with LiteIDE 38 | *.out 39 | -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @cloudfoundry/wg-app-runtime-platform-diego-approvers 2 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2015-Present CloudFoundry.org Foundation, Inc. All Rights Reserved. 2 | 3 | This project contains software that is Copyright (c) 2014-2015 Pivotal Software, Inc. 4 | 5 | Licensed under the Apache License, Version 2.0 (the "License"); 6 | you may not use this file except in compliance with the License. 7 | You may obtain a copy of the License at 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | 17 | This project may include a number of subcomponents with separate 18 | copyright notices and license terms. Your use of these subcomponents 19 | is subject to the terms and conditions of each subcomponent's license, 20 | as noted in the LICENSE file. 21 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # lager 2 | 3 | [![Go Report 4 | Card](https://goreportcard.com/badge/code.cloudfoundry.org/lager/v3)](https://goreportcard.com/report/code.cloudfoundry.org/lager/v3) 5 | [![Go 6 | Reference](https://pkg.go.dev/badge/code.cloudfoundry.org/lager.svg)](https://pkg.go.dev/code.cloudfoundry.org/lager/v3) 7 | 8 | Lager is a logging library for go 9 | 10 | > \[!NOTE\] 11 | > 12 | > This repository should be imported as 13 | > `code.cloudfoundry.org/lager/v3`. 14 | 15 | # Docs 16 | 17 | - [Usage](./docs/usage.md) 18 | 19 | # Contributing 20 | 21 | See the [Contributing.md](./.github/CONTRIBUTING.md) for more 22 | information on how to contribute. 23 | 24 | # Working Group Charter 25 | 26 | This repository is maintained by [App Runtime 27 | Platform](https://github.com/cloudfoundry/community/blob/main/toc/working-groups/app-runtime-platform.md) 28 | under `Diego` area. 29 | 30 | > \[!IMPORTANT\] 31 | > 32 | > Content in this file is managed by the [CI task 33 | > `sync-readme`](https://github.com/cloudfoundry/wg-app-platform-runtime-ci/blob/main/shared/tasks/sync-readme/metadata.yml) 34 | > and is generated by CI following a convention. 35 | -------------------------------------------------------------------------------- /chug/chug.go: -------------------------------------------------------------------------------- 1 | package chug 2 | 3 | import ( 4 | "bufio" 5 | "bytes" 6 | "encoding/json" 7 | "errors" 8 | "fmt" 9 | "io" 10 | "strconv" 11 | "strings" 12 | "time" 13 | 14 | "code.cloudfoundry.org/lager/v3" 15 | ) 16 | 17 | type Entry struct { 18 | IsLager bool 19 | Raw []byte 20 | Log LogEntry 21 | } 22 | 23 | type LogEntry struct { 24 | Timestamp time.Time 25 | LogLevel lager.LogLevel 26 | 27 | Source string 28 | Message string 29 | Session string 30 | 31 | Error error 32 | Trace string 33 | 34 | Data lager.Data 35 | } 36 | 37 | func toTimestamp(d string) (time.Time, error) { 38 | f, err := strconv.ParseFloat(d, 64) 39 | if err == nil { 40 | return time.Unix(0, int64(f*1e9)), nil 41 | } 42 | return time.Parse(time.RFC3339Nano, d) 43 | } 44 | 45 | // temporarily duplicated to make refactoring in small steps possible 46 | type prettyFormat struct { 47 | Timestamp string `json:"timestamp"` 48 | Level string `json:"level"` 49 | LogLevel lager.LogLevel `json:"log_level"` 50 | Source string `json:"source"` 51 | Message string `json:"message"` 52 | Data lager.Data `json:"data"` 53 | Error error `json:"-"` 54 | } 55 | 56 | func Chug(reader io.Reader, out chan<- Entry) { 57 | scanner := bufio.NewReader(reader) 58 | for { 59 | line, err := scanner.ReadBytes('\n') 60 | if len(line) > 0 { 61 | out <- entry(bytes.TrimSuffix(line, []byte{'\n'})) 62 | } 63 | if err != nil { 64 | break 65 | } 66 | } 67 | close(out) 68 | } 69 | 70 | func entry(raw []byte) (entry Entry) { 71 | copiedBytes := make([]byte, len(raw)) 72 | copy(copiedBytes, raw) 73 | entry = Entry{ 74 | IsLager: false, 75 | Raw: copiedBytes, 76 | } 77 | 78 | rawString := string(raw) 79 | idx := strings.Index(rawString, "{") 80 | if idx == -1 { 81 | return 82 | } 83 | 84 | var prettyLog prettyFormat 85 | decoder := json.NewDecoder(strings.NewReader(rawString[idx:])) 86 | err := decoder.Decode(&prettyLog) 87 | if err != nil { 88 | return 89 | } 90 | 91 | entry.Log, entry.IsLager = convertPrettyLog(prettyLog) 92 | 93 | return 94 | } 95 | 96 | func convertPrettyLog(lagerLog prettyFormat) (LogEntry, bool) { 97 | trace, err := traceFromData(lagerLog.Data) 98 | if err != nil { 99 | return LogEntry{}, false 100 | } 101 | 102 | session, err := sessionFromData(lagerLog.Data) 103 | if err != nil { 104 | return LogEntry{}, false 105 | } 106 | 107 | logLevel := lagerLog.LogLevel 108 | if lagerLog.Level != "" { 109 | logLevel, err = lager.LogLevelFromString(lagerLog.Level) 110 | if err != nil { 111 | return LogEntry{}, false 112 | } 113 | } 114 | 115 | var logErr error 116 | if logLevel == lager.ERROR || logLevel == lager.FATAL { 117 | logErr, err = errorFromData(lagerLog.Data) 118 | if err != nil { 119 | return LogEntry{}, false 120 | } 121 | } 122 | 123 | timestamp, err := toTimestamp(lagerLog.Timestamp) 124 | if err != nil { 125 | return LogEntry{}, false 126 | } 127 | 128 | return LogEntry{ 129 | Timestamp: timestamp, 130 | LogLevel: logLevel, 131 | Source: lagerLog.Source, 132 | Message: lagerLog.Message, 133 | Session: session, 134 | 135 | Error: logErr, 136 | Trace: trace, 137 | 138 | Data: lagerLog.Data, 139 | }, true 140 | } 141 | 142 | func traceFromData(data lager.Data) (string, error) { 143 | trace, ok := data["trace"] 144 | if ok { 145 | traceString, ok := trace.(string) 146 | if !ok { 147 | return "", fmt.Errorf("unable to convert trace: %v", trace) 148 | } 149 | delete(data, "trace") 150 | return traceString, nil 151 | } 152 | return "", nil 153 | } 154 | 155 | func sessionFromData(data lager.Data) (string, error) { 156 | session, ok := data["session"] 157 | if ok { 158 | sessionString, ok := session.(string) 159 | if !ok { 160 | return "", fmt.Errorf("unable to convert session: %v", session) 161 | } 162 | delete(data, "session") 163 | return sessionString, nil 164 | } 165 | return "", nil 166 | } 167 | 168 | func errorFromData(data lager.Data) (error, error) { 169 | err, ok := data["error"] 170 | if ok { 171 | errorString, ok := err.(string) 172 | if !ok { 173 | return nil, fmt.Errorf("unable to convert error: %v", err) 174 | } 175 | delete(data, "error") 176 | return errors.New(errorString), nil 177 | } 178 | return nil, nil 179 | } 180 | -------------------------------------------------------------------------------- /chug/chug_suite_test.go: -------------------------------------------------------------------------------- 1 | package chug_test 2 | 3 | import ( 4 | . "github.com/onsi/ginkgo/v2" 5 | . "github.com/onsi/gomega" 6 | 7 | "testing" 8 | ) 9 | 10 | func TestChug(t *testing.T) { 11 | RegisterFailHandler(Fail) 12 | RunSpecs(t, "Chug Suite") 13 | } 14 | -------------------------------------------------------------------------------- /chug/chug_test.go: -------------------------------------------------------------------------------- 1 | package chug_test 2 | 3 | import ( 4 | "code.cloudfoundry.org/lager/v3/chug" 5 | "errors" 6 | "io" 7 | "time" 8 | 9 | "code.cloudfoundry.org/lager/v3" 10 | . "github.com/onsi/ginkgo/v2" 11 | . "github.com/onsi/gomega" 12 | ) 13 | 14 | var _ = Describe("Chug", func() { 15 | var ( 16 | logger lager.Logger 17 | stream chan chug.Entry 18 | pipeReader *io.PipeReader 19 | pipeWriter *io.PipeWriter 20 | ) 21 | 22 | BeforeEach(func() { 23 | pipeReader, pipeWriter = io.Pipe() 24 | logger = lager.NewLogger("chug-test") 25 | logger.RegisterSink(lager.NewWriterSink(pipeWriter, lager.DEBUG)) 26 | stream = make(chan chug.Entry, 100) 27 | }) 28 | 29 | JustBeforeEach(func() { 30 | go chug.Chug(pipeReader, stream) 31 | }) 32 | 33 | AfterEach(func() { 34 | pipeWriter.Close() 35 | Eventually(stream).Should(BeClosed()) 36 | }) 37 | 38 | Context("when fed a stream of well-formed lager messages", func() { 39 | It("should return parsed lager messages", func() { 40 | data := lager.Data{"some-float": 3.0, "some-string": "foo"} 41 | logger.Debug("chug", data) 42 | logger.Info("again", data) 43 | 44 | entry := <-stream 45 | Expect(entry.IsLager).To(BeTrue()) 46 | Expect(entry.Log).To(MatchLogEntry(chug.LogEntry{ 47 | LogLevel: lager.DEBUG, 48 | Source: "chug-test", 49 | Message: "chug-test.chug", 50 | Data: data, 51 | })) 52 | 53 | entry = <-stream 54 | Expect(entry.IsLager).To(BeTrue()) 55 | Expect(entry.Log).To(MatchLogEntry(chug.LogEntry{ 56 | LogLevel: lager.INFO, 57 | Source: "chug-test", 58 | Message: "chug-test.again", 59 | Data: data, 60 | })) 61 | 62 | }) 63 | 64 | It("should parse the timestamp", func() { 65 | logger.Debug("chug") 66 | entry := <-stream 67 | Expect(entry.Log.Timestamp).To(BeTemporally("~", time.Now(), time.Second)) 68 | }) 69 | 70 | Context("when parsing an error message", func() { 71 | It("should include the error", func() { 72 | data := lager.Data{"some-float": 3.0, "some-string": "foo"} 73 | logger.Error("chug", errors.New("some-error"), data) 74 | Expect((<-stream).Log).To(MatchLogEntry(chug.LogEntry{ 75 | LogLevel: lager.ERROR, 76 | Source: "chug-test", 77 | Message: "chug-test.chug", 78 | Error: errors.New("some-error"), 79 | Data: lager.Data{"some-float": 3.0, "some-string": "foo"}, 80 | })) 81 | 82 | }) 83 | }) 84 | 85 | Context("when parsing an info message with an error", func() { 86 | It("should not take the error out of the data map", func() { 87 | data := lager.Data{"some-float": 3.0, "some-string": "foo", "error": "some-error"} 88 | logger.Info("chug", data) 89 | Expect((<-stream).Log).To(MatchLogEntry(chug.LogEntry{ 90 | LogLevel: lager.INFO, 91 | Source: "chug-test", 92 | Message: "chug-test.chug", 93 | Error: nil, 94 | Data: lager.Data{"some-float": 3.0, "some-string": "foo", "error": "some-error"}, 95 | })) 96 | 97 | }) 98 | }) 99 | 100 | Context("when multiple sessions have been established", func() { 101 | It("should build up the task array appropriately", func() { 102 | firstSession := logger.Session("first-session") 103 | firstSession.Info("encabulate") 104 | nestedSession := firstSession.Session("nested-session-1") 105 | nestedSession.Info("baconize") 106 | firstSession.Info("remodulate") 107 | nestedSession.Info("ergonomize") 108 | nestedSession = firstSession.Session("nested-session-2") 109 | nestedSession.Info("modernify") 110 | 111 | Expect((<-stream).Log).To(MatchLogEntry(chug.LogEntry{ 112 | LogLevel: lager.INFO, 113 | Source: "chug-test", 114 | Message: "chug-test.first-session.encabulate", 115 | Session: "1", 116 | Data: lager.Data{}, 117 | })) 118 | 119 | Expect((<-stream).Log).To(MatchLogEntry(chug.LogEntry{ 120 | LogLevel: lager.INFO, 121 | Source: "chug-test", 122 | Message: "chug-test.first-session.nested-session-1.baconize", 123 | Session: "1.1", 124 | Data: lager.Data{}, 125 | })) 126 | 127 | Expect((<-stream).Log).To(MatchLogEntry(chug.LogEntry{ 128 | LogLevel: lager.INFO, 129 | Source: "chug-test", 130 | Message: "chug-test.first-session.remodulate", 131 | Session: "1", 132 | Data: lager.Data{}, 133 | })) 134 | 135 | Expect((<-stream).Log).To(MatchLogEntry(chug.LogEntry{ 136 | LogLevel: lager.INFO, 137 | Source: "chug-test", 138 | Message: "chug-test.first-session.nested-session-1.ergonomize", 139 | Session: "1.1", 140 | Data: lager.Data{}, 141 | })) 142 | 143 | Expect((<-stream).Log).To(MatchLogEntry(chug.LogEntry{ 144 | LogLevel: lager.INFO, 145 | Source: "chug-test", 146 | Message: "chug-test.first-session.nested-session-2.modernify", 147 | Session: "1.2", 148 | Data: lager.Data{}, 149 | })) 150 | 151 | }) 152 | }) 153 | 154 | Context("when the input is formatted with human readable timestamps", func() { 155 | BeforeEach(func() { 156 | logger = lager.NewLogger("chug-test") 157 | logger.RegisterSink(lager.NewPrettySink(pipeWriter, lager.DEBUG)) 158 | }) 159 | 160 | It("should return parsed lager messages", func() { 161 | data := lager.Data{"some-float": 3.0, "some-string": "foo"} 162 | logger.Debug("chug", data) 163 | logger.Info("again", data) 164 | 165 | entry := <-stream 166 | Expect(entry.IsLager).To(BeTrue()) 167 | Expect(entry.Log).To(MatchLogEntry(chug.LogEntry{ 168 | LogLevel: lager.DEBUG, 169 | Source: "chug-test", 170 | Message: "chug-test.chug", 171 | Data: data, 172 | })) 173 | 174 | entry = <-stream 175 | Expect(entry.IsLager).To(BeTrue()) 176 | Expect(entry.Log).To(MatchLogEntry(chug.LogEntry{ 177 | LogLevel: lager.INFO, 178 | Source: "chug-test", 179 | Message: "chug-test.again", 180 | Data: data, 181 | })) 182 | }) 183 | }) 184 | }) 185 | 186 | Context("handling lager JSON that is surrounded by non-JSON", func() { 187 | var input []byte 188 | var entry chug.Entry 189 | 190 | JustBeforeEach(func() { 191 | input = []byte(`[some-component][e]{"timestamp":"1407102779.028711081","source":"chug-test","message":"chug-test.chug","log_level":0,"data":{"some-float":3,"some-string":"foo"}}...some trailing stuff`) 192 | _, err := pipeWriter.Write(input) 193 | Expect(err).NotTo(HaveOccurred()) 194 | _, err = pipeWriter.Write([]byte("\n")) 195 | Expect(err).NotTo(HaveOccurred()) 196 | 197 | Eventually(stream).Should(Receive(&entry)) 198 | }) 199 | 200 | It("should be a lager message", func() { 201 | Expect(entry.IsLager).To(BeTrue()) 202 | }) 203 | 204 | It("should contain all the data in Raw", func() { 205 | Expect(entry.Raw).To(Equal(input)) 206 | }) 207 | 208 | It("should succesfully parse the lager message", func() { 209 | Expect(entry.Log.Source).To(Equal("chug-test")) 210 | }) 211 | }) 212 | 213 | Context("handling malformed/non-lager data", func() { 214 | var input []byte 215 | var entry chug.Entry 216 | 217 | JustBeforeEach(func() { 218 | _, err := pipeWriter.Write(input) 219 | Expect(err).NotTo(HaveOccurred()) 220 | _, err = pipeWriter.Write([]byte("\n")) 221 | Expect(err).NotTo(HaveOccurred()) 222 | 223 | Eventually(stream).Should(Receive(&entry)) 224 | }) 225 | 226 | Context("when fed a stream of malformed lager messages", func() { 227 | Context("when the timestamp is invalid", func() { 228 | BeforeEach(func() { 229 | input = []byte(`{"timestamp":"tomorrow","source":"chug-test","message":"chug-test.chug","log_level":3,"data":{"some-float":3,"some-string":"foo","error":7}}`) 230 | }) 231 | 232 | itReturnsRawData(entry, input) 233 | }) 234 | 235 | Context("when the error does not parse", func() { 236 | BeforeEach(func() { 237 | input = []byte(`{"timestamp":"1407102779.028711081","source":"chug-test","message":"chug-test.chug","log_level":3,"data":{"some-float":3,"some-string":"foo","error":7}}`) 238 | }) 239 | 240 | itReturnsRawData(entry, input) 241 | }) 242 | 243 | Context("when the trace does not parse", func() { 244 | BeforeEach(func() { 245 | input = []byte(`{"timestamp":"1407102779.028711081","source":"chug-test","message":"chug-test.chug","log_level":3,"data":{"some-float":3,"some-string":"foo","trace":7}}`) 246 | }) 247 | 248 | itReturnsRawData(entry, input) 249 | }) 250 | 251 | Context("when the session does not parse", func() { 252 | BeforeEach(func() { 253 | input = []byte(`{"timestamp":"1407102779.028711081","source":"chug-test","message":"chug-test.chug","log_level":3,"data":{"some-float":3,"some-string":"foo","session":7}}`) 254 | }) 255 | 256 | itReturnsRawData(entry, input) 257 | }) 258 | }) 259 | 260 | Context("When fed JSON that is not a lager message at all", func() { 261 | BeforeEach(func() { 262 | input = []byte(`{"source":"chattanooga"}`) 263 | }) 264 | 265 | itReturnsRawData(entry, input) 266 | }) 267 | 268 | Context("When fed none-JSON that is not a lager message at all", func() { 269 | BeforeEach(func() { 270 | input = []byte(`ß`) 271 | }) 272 | 273 | itReturnsRawData(entry, input) 274 | }) 275 | }) 276 | 277 | Context("when writing is complete", func() { 278 | var input []byte 279 | var entry chug.Entry 280 | 281 | BeforeEach(func() { 282 | input = []byte("hello") 283 | }) 284 | 285 | JustBeforeEach(func() { 286 | _, err := pipeWriter.Write(input) 287 | Expect(err).NotTo(HaveOccurred()) 288 | _, err = pipeWriter.Write([]byte("\n")) 289 | Expect(err).NotTo(HaveOccurred()) 290 | Expect(pipeWriter.Close()).To(Succeed()) 291 | 292 | Eventually(stream).Should(Receive(&entry)) 293 | }) 294 | 295 | itReturnsRawData(entry, input) 296 | 297 | It("returns no more messages", func() { 298 | Consistently(stream).ShouldNot(Receive()) 299 | }) 300 | }) 301 | }) 302 | 303 | func itReturnsRawData(entry chug.Entry, input []byte) { 304 | It("returns raw data", func() { 305 | Expect(entry.IsLager).To(BeFalse()) 306 | Expect(entry.Log).To(BeZero()) 307 | Expect(entry.Raw).To(Equal(input)) 308 | }) 309 | } 310 | -------------------------------------------------------------------------------- /chug/match_log_entry_test.go: -------------------------------------------------------------------------------- 1 | package chug_test 2 | 3 | import ( 4 | "fmt" 5 | "reflect" 6 | 7 | "code.cloudfoundry.org/lager/v3/chug" 8 | "github.com/onsi/gomega/format" 9 | "github.com/onsi/gomega/types" 10 | ) 11 | 12 | func MatchLogEntry(entry chug.LogEntry) types.GomegaMatcher { 13 | return &logEntryMatcher{entry} 14 | } 15 | 16 | type logEntryMatcher struct { 17 | entry chug.LogEntry 18 | } 19 | 20 | func (m *logEntryMatcher) Match(actual interface{}) (success bool, err error) { 21 | actualEntry, ok := actual.(chug.LogEntry) 22 | if !ok { 23 | return false, fmt.Errorf("MatchLogEntry must be passed a chug.LogEntry. Got:\n%s", format.Object(actual, 1)) 24 | } 25 | 26 | return reflect.DeepEqual(m.entry.Error, actualEntry.Error) && 27 | m.entry.LogLevel == actualEntry.LogLevel && 28 | m.entry.Source == actualEntry.Source && 29 | m.entry.Message == actualEntry.Message && 30 | m.entry.Session == actualEntry.Session && 31 | m.entry.Trace == actualEntry.Trace && 32 | reflect.DeepEqual(m.entry.Data, actualEntry.Data), nil 33 | } 34 | 35 | func (m *logEntryMatcher) FailureMessage(actual interface{}) (message string) { 36 | return format.Message(actual, "to equal", m.entry) 37 | } 38 | 39 | func (m *logEntryMatcher) NegatedFailureMessage(actual interface{}) (message string) { 40 | return format.Message(actual, "not to equal", m.entry) 41 | } 42 | -------------------------------------------------------------------------------- /docs/usage.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Usage 3 | expires_at: never 4 | tags: [diego-release, lager] 5 | --- 6 | 7 | ## Usage 8 | 9 | Instantiate a logger with the name of your component. 10 | 11 | ```go 12 | import ( 13 | "code.cloudfoundry.org/lager/v3" 14 | ) 15 | 16 | logger := lager.NewLogger("my-app") 17 | ``` 18 | 19 | ### Lager and [`log/slog`](https://pkg.go.dev/log/slog) 20 | Lager was written long before Go 1.21 introduced structured logging in the standard library. 21 | There are some wrapper functions for interoperability between Lager and `slog`, 22 | which are only available when using Go 1.21 and higher. 23 | 24 | Lager can be used as an [`slog.Handler`](https://pkg.go.dev/log/slog#Handler) using the `NewHandler()` function: 25 | 26 | ```go 27 | func codeThatAcceptsSlog(l *slog.Logger) { ... } 28 | 29 | lagerLogger := lager.NewLogger("my-lager-logger") 30 | 31 | codeThatAcceptsSlog(slog.New(lager.NewHandler(lagerLogger))) 32 | ``` 33 | 34 | An `slog.Logger` can be used as a Lager `Sink` using the `NewSlogSink()` function: 35 | ```go 36 | var *slog.Logger l = codeThatReturnsSlog() 37 | 38 | lagerLogger := lager.NewLogger("my-lager-logger") 39 | 40 | lagerLogger.RegisterSink(lager.NewSlogSink(l)) 41 | ``` 42 | 43 | ### Sinks 44 | 45 | Lager can write logs to a variety of destinations. You can specify the destinations 46 | using Lager sinks: 47 | 48 | To write to an arbitrary `Writer` object: 49 | 50 | ```go 51 | logger.RegisterSink(lager.NewWriterSink(myWriter, lager.INFO)) 52 | ``` 53 | 54 | ### Emitting logs 55 | 56 | Lager supports the usual level-based logging, with an optional argument for arbitrary key-value data. 57 | 58 | ```go 59 | logger.Info("doing-stuff", lager.Data{ 60 | "informative": true, 61 | }) 62 | ``` 63 | 64 | output: 65 | ```json 66 | { "source": "my-app", "message": "doing-stuff", "data": { "informative": true }, "timestamp": 1232345, "log_level": 1 } 67 | ``` 68 | 69 | Error messages also take an `Error` object: 70 | 71 | ```go 72 | logger.Error("failed-to-do-stuff", errors.New("Something went wrong")) 73 | ``` 74 | 75 | output: 76 | ```json 77 | { "source": "my-app", "message": "failed-to-do-stuff", "data": { "error": "Something went wrong" }, "timestamp": 1232345, "log_level": 1 } 78 | ``` 79 | 80 | ### Sessions 81 | 82 | You can avoid repetition of contextual data using 'Sessions': 83 | 84 | ```go 85 | 86 | contextualLogger := logger.Session("my-task", lager.Data{ 87 | "request-id": 5, 88 | }) 89 | 90 | contextualLogger.Info("my-action") 91 | ``` 92 | 93 | output: 94 | 95 | ```json 96 | { "source": "my-app", "message": "my-task.my-action", "data": { "request-id": 5 }, "timestamp": 1232345, "log_level": 1 } 97 | ``` 98 | 99 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module code.cloudfoundry.org/lager/v3 2 | 3 | go 1.23.0 4 | 5 | toolchain go1.23.6 6 | 7 | require ( 8 | github.com/onsi/ginkgo/v2 v2.23.4 9 | github.com/onsi/gomega v1.37.0 10 | github.com/openzipkin/zipkin-go v0.4.3 11 | ) 12 | 13 | require ( 14 | github.com/go-logr/logr v1.4.2 // indirect 15 | github.com/go-task/slim-sprig/v3 v3.0.0 // indirect 16 | github.com/google/go-cmp v0.7.0 // indirect 17 | github.com/google/pprof v0.0.0-20250501235452-c0086092b71a // indirect 18 | go.uber.org/automaxprocs v1.6.0 // indirect 19 | golang.org/x/net v0.40.0 // indirect 20 | golang.org/x/sys v0.33.0 // indirect 21 | golang.org/x/text v0.25.0 // indirect 22 | golang.org/x/tools v0.33.0 // indirect 23 | gopkg.in/yaml.v3 v3.0.1 // indirect 24 | ) 25 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 2 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 3 | github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= 4 | github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= 5 | github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= 6 | github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= 7 | github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= 8 | github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= 9 | github.com/google/pprof v0.0.0-20250501235452-c0086092b71a h1:rDA3FfmxwXR+BVKKdz55WwMJ1pD2hJQNW31d+l3mPk4= 10 | github.com/google/pprof v0.0.0-20250501235452-c0086092b71a/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA= 11 | github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= 12 | github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= 13 | github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= 14 | github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= 15 | github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus= 16 | github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8= 17 | github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y= 18 | github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= 19 | github.com/openzipkin/zipkin-go v0.4.3 h1:9EGwpqkgnwdEIJ+Od7QVSEIH+ocmm5nPat0G7sjsSdg= 20 | github.com/openzipkin/zipkin-go v0.4.3/go.mod h1:M9wCJZFWCo2RiY+o1eBCEMe0Dp2S5LDHcMZmk3RmK7c= 21 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 22 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 23 | github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= 24 | github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= 25 | github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= 26 | github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= 27 | go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= 28 | go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= 29 | golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY= 30 | golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds= 31 | golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= 32 | golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= 33 | golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4= 34 | golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA= 35 | golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc= 36 | golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI= 37 | google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= 38 | google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= 39 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 40 | gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= 41 | gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 42 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 43 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 44 | -------------------------------------------------------------------------------- /handler.go: -------------------------------------------------------------------------------- 1 | //go:build go1.21 2 | 3 | package lager 4 | 5 | import ( 6 | "context" 7 | "fmt" 8 | "log/slog" 9 | ) 10 | 11 | // NewHandler wraps the logger as a slog.Handler 12 | // The supplied Logger must be a lager.logger 13 | // type created by lager.NewLogger(), otherwise 14 | // it panics. 15 | // 16 | // Note the following log level conversions: 17 | // 18 | // slog.LevelDebug -> lager.DEBUG 19 | // slog.LevelError -> lager.ERROR 20 | // slog.LevelError -> lager.FATAL 21 | // default -> lager.INFO 22 | func NewHandler(l Logger) slog.Handler { 23 | switch ll := l.(type) { 24 | case *logger: 25 | return &handler{logger: ll} 26 | default: 27 | panic("lager.Logger must be an instance of lager.logger") 28 | } 29 | } 30 | 31 | // Type decorator is used to decorate the attributes with groups and more attributes 32 | type decorator func(map[string]any) map[string]any 33 | 34 | // Type handler is a slog.Handler that wraps a lager logger. 35 | // It uses the logger concrete type rather than the Logger interface 36 | // because it uses methods not available on the interface. 37 | type handler struct { 38 | logger *logger 39 | decorators []decorator 40 | } 41 | 42 | // Enabled always returns true 43 | func (h *handler) Enabled(_ context.Context, _ slog.Level) bool { 44 | return true 45 | } 46 | 47 | // Handle converts a slog.Record into a lager.LogFormat and passes it to every Sink 48 | func (h *handler) Handle(_ context.Context, r slog.Record) error { 49 | log := LogFormat{ 50 | time: r.Time, 51 | Timestamp: formatTimestamp(r.Time), 52 | Source: h.logger.component, 53 | Message: fmt.Sprintf("%s.%s", h.logger.task, r.Message), 54 | LogLevel: toLogLevel(r.Level), 55 | Data: h.logger.baseData(h.decorate(attrFromRecord(r))), 56 | } 57 | 58 | for _, sink := range h.logger.sinks { 59 | sink.Log(log) 60 | } 61 | 62 | return nil 63 | } 64 | 65 | // WithAttrs returns a new slog.Handler which always adds the specified attributes 66 | func (h *handler) WithAttrs(attrs []slog.Attr) slog.Handler { 67 | return &handler{ 68 | logger: h.logger, 69 | decorators: append(h.decorators, attrDecorator(attrs)), 70 | } 71 | } 72 | 73 | // WithGroup returns a new slog.Handler which always logs attributes in the specified group 74 | func (h *handler) WithGroup(name string) slog.Handler { 75 | return &handler{ 76 | logger: h.logger, 77 | decorators: append(h.decorators, groupDecorator(name)), 78 | } 79 | } 80 | 81 | // decorate will decorate a body using the decorators that have been defined 82 | func (h *handler) decorate(body map[string]any) map[string]any { 83 | for i := len(h.decorators) - 1; i >= 0; i-- { // reverse iteration 84 | body = h.decorators[i](body) 85 | } 86 | return body 87 | } 88 | 89 | // attrDecorator returns a decorator for the specified attributes 90 | func attrDecorator(attrs []slog.Attr) decorator { 91 | return func(body map[string]any) map[string]any { 92 | if body == nil { 93 | body = make(map[string]any) 94 | } 95 | processAttrs(attrs, body) 96 | return body 97 | } 98 | } 99 | 100 | // groupDecorator returns a decorator for the specified group name 101 | func groupDecorator(group string) decorator { 102 | return func(body map[string]any) map[string]any { 103 | switch len(body) { 104 | case 0: 105 | return nil 106 | default: 107 | return map[string]any{group: body} 108 | } 109 | } 110 | } 111 | 112 | // attrFromRecord extracts and processes the attributes from a record 113 | func attrFromRecord(r slog.Record) map[string]any { 114 | if r.NumAttrs() == 0 { 115 | return nil 116 | } 117 | 118 | body := make(map[string]any, r.NumAttrs()) 119 | r.Attrs(func(attr slog.Attr) bool { 120 | processAttr(attr, body) 121 | return true 122 | }) 123 | 124 | return body 125 | } 126 | 127 | // processAttrs calls processAttr() for each attribute 128 | func processAttrs(attrs []slog.Attr, target map[string]any) { 129 | for _, attr := range attrs { 130 | processAttr(attr, target) 131 | } 132 | } 133 | 134 | // processAttr adds the attribute to the target with appropriate transformations 135 | func processAttr(attr slog.Attr, target map[string]any) { 136 | rv := attr.Value.Resolve() 137 | 138 | switch { 139 | case rv.Kind() == slog.KindGroup && attr.Key != "": 140 | nt := make(map[string]any) 141 | processAttrs(attr.Value.Group(), nt) 142 | target[attr.Key] = nt 143 | case rv.Kind() == slog.KindGroup && attr.Key == "": 144 | processAttrs(attr.Value.Group(), target) 145 | case attr.Key == "": 146 | // skip 147 | default: 148 | if rvAsError, isError := rv.Any().(error); isError { 149 | target[attr.Key] = rvAsError.Error() 150 | } else { 151 | target[attr.Key] = rv.Any() 152 | } 153 | } 154 | } 155 | 156 | // toLogLevel converts from slog levels to lager levels 157 | func toLogLevel(l slog.Level) LogLevel { 158 | switch l { 159 | case slog.LevelDebug: 160 | return DEBUG 161 | case slog.LevelError, slog.LevelWarn: 162 | return ERROR 163 | default: 164 | return INFO 165 | } 166 | } 167 | -------------------------------------------------------------------------------- /handler_test.go: -------------------------------------------------------------------------------- 1 | //go:build go1.21 2 | 3 | package lager_test 4 | 5 | import ( 6 | "code.cloudfoundry.org/lager/v3" 7 | "code.cloudfoundry.org/lager/v3/lagertest" 8 | "fmt" 9 | . "github.com/onsi/ginkgo/v2" 10 | . "github.com/onsi/gomega" 11 | . "github.com/onsi/gomega/gstruct" 12 | "log/slog" 13 | "strconv" 14 | "strings" 15 | "testing/slogtest" 16 | "time" 17 | ) 18 | 19 | var _ = Describe("NewHandler", func() { 20 | var ( 21 | s *lagertest.TestSink 22 | l lager.Logger 23 | h slog.Handler 24 | ) 25 | 26 | BeforeEach(func() { 27 | s = lagertest.NewTestSink() 28 | l = lager.NewLogger("test") 29 | l.RegisterSink(s) 30 | 31 | h = lager.NewHandler(l) 32 | }) 33 | 34 | It("logs a message", func() { 35 | slog.New(h).Info("foo", "bar", "baz") 36 | logs := s.Logs() 37 | Expect(logs).To(ConsistOf(MatchFields(IgnoreExtras, Fields{ 38 | "Source": Equal("test"), 39 | "Message": Equal("test.foo"), 40 | "Data": SatisfyAll( 41 | HaveLen(1), 42 | HaveKeyWithValue("bar", "baz"), 43 | ), 44 | "LogLevel": Equal(lager.INFO), 45 | }))) 46 | }) 47 | 48 | It("logs a debug message", func() { 49 | slog.New(h).Debug("foo", "bar", 3, slog.Int("baz", 42)) 50 | logs := s.Logs() 51 | Expect(logs).To(ConsistOf(MatchFields(IgnoreExtras, Fields{ 52 | "Source": Equal("test"), 53 | "Message": Equal("test.foo"), 54 | "Data": SatisfyAll( 55 | HaveLen(2), 56 | HaveKeyWithValue("bar", float64(3)), 57 | HaveKeyWithValue("baz", float64(42)), 58 | ), 59 | "LogLevel": Equal(lager.DEBUG), 60 | }))) 61 | }) 62 | 63 | It("logs an error message", func() { 64 | slog.New(h).Error("foo", "error", fmt.Errorf("boom")) 65 | logs := s.Logs() 66 | Expect(logs).To(ConsistOf(MatchFields(IgnoreExtras, Fields{ 67 | "Source": Equal("test"), 68 | "Message": Equal("test.foo"), 69 | "Data": SatisfyAll( 70 | HaveLen(1), 71 | HaveKeyWithValue("error", "boom"), 72 | ), 73 | "LogLevel": Equal(lager.ERROR), 74 | }))) 75 | }) 76 | 77 | It("behaves like a slog.NewHandler", func() { 78 | results := func() (result []map[string]any) { 79 | for _, l := range s.Logs() { 80 | d := l.Data 81 | 82 | t := parseTimestamp(l.Timestamp) 83 | if !t.IsZero() { 84 | d["time"] = t 85 | } 86 | 87 | d["level"] = l.LogLevel 88 | d["msg"] = strings.TrimPrefix(l.Message, "test.") 89 | result = append(result, d) 90 | } 91 | return result 92 | } 93 | 94 | Expect(slogtest.TestHandler(h, results)).To(Succeed()) 95 | }) 96 | }) 97 | 98 | // parseTimestamp turns a lager timestamp back into a time.Time 99 | // with a special case for the formatting of time.Time{} because 100 | // there is a test that check time.Time{} is ignored as a time value 101 | func parseTimestamp(input string) time.Time { 102 | GinkgoHelper() 103 | 104 | // This is what time.Time{} gets formatted as 105 | if input == "-6795364578.871345520" { 106 | return time.Time{} 107 | } 108 | 109 | f64, err := strconv.ParseFloat(input, 64) 110 | Expect(err).NotTo(HaveOccurred()) 111 | 112 | secs := int64(f64) 113 | nanos := int64((f64 - float64(secs)) * 1e9) 114 | return time.Unix(secs, nanos) 115 | } 116 | -------------------------------------------------------------------------------- /internal/truncate/package.go: -------------------------------------------------------------------------------- 1 | package truncate // import "code.cloudfoundry.org/lager/v3/internal/truncate" 2 | -------------------------------------------------------------------------------- /internal/truncate/truncate.go: -------------------------------------------------------------------------------- 1 | package truncate 2 | 3 | import ( 4 | "reflect" 5 | ) 6 | 7 | // Value recursively walks through the value provided by `v` and truncates 8 | // any strings longer than `maxLength`. 9 | // Example: 10 | // 11 | // type foobar struct{A string; B string} 12 | // truncate.Value(foobar{A:"foo",B:"bar"}, 20) == foobar{A:"foo",B:"bar"} 13 | // truncate.Value(foobar{A:strings.Repeat("a", 25),B:"bar"}, 20) == foobar{A:"aaaaaaaa-(truncated)",B:"bar"} 14 | func Value(v interface{}, maxLength int) interface{} { 15 | rv := reflect.ValueOf(v) 16 | tv := truncateValue(rv, maxLength) 17 | if rv != tv { 18 | return tv.Interface() 19 | } 20 | return v 21 | } 22 | 23 | func truncateValue(rv reflect.Value, maxLength int) reflect.Value { 24 | if maxLength <= 0 { 25 | return rv 26 | } 27 | 28 | switch rv.Kind() { 29 | case reflect.Interface: 30 | return truncateInterface(rv, maxLength) 31 | case reflect.Ptr: 32 | return truncatePtr(rv, maxLength) 33 | case reflect.Struct: 34 | return truncateStruct(rv, maxLength) 35 | case reflect.Map: 36 | return truncateMap(rv, maxLength) 37 | case reflect.Array: 38 | return truncateArray(rv, maxLength) 39 | case reflect.Slice: 40 | return truncateSlice(rv, maxLength) 41 | case reflect.String: 42 | return truncateString(rv, maxLength) 43 | } 44 | return rv 45 | } 46 | 47 | func truncateInterface(rv reflect.Value, maxLength int) reflect.Value { 48 | tv := truncateValue(rv.Elem(), maxLength) 49 | if tv != rv.Elem() { 50 | return tv 51 | } 52 | return rv 53 | } 54 | 55 | func truncatePtr(rv reflect.Value, maxLength int) reflect.Value { 56 | tv := truncateValue(rv.Elem(), maxLength) 57 | if rv.Elem() != tv { 58 | tvp := reflect.New(rv.Elem().Type()) 59 | tvp.Elem().Set(tv) 60 | return tvp 61 | } 62 | return rv 63 | } 64 | 65 | func truncateStruct(rv reflect.Value, maxLength int) reflect.Value { 66 | numFields := rv.NumField() 67 | fields := make([]reflect.Value, numFields) 68 | changed := false 69 | for i := 0; i < numFields; i++ { 70 | fv := rv.Field(i) 71 | tv := truncateValue(fv, maxLength) 72 | if fv != tv { 73 | changed = true 74 | } 75 | fields[i] = tv 76 | } 77 | if changed { 78 | nv := reflect.New(rv.Type()).Elem() 79 | for i, fv := range fields { 80 | nv.Field(i).Set(fv) 81 | } 82 | return nv 83 | } 84 | return rv 85 | } 86 | 87 | func truncateMap(rv reflect.Value, maxLength int) reflect.Value { 88 | keys := rv.MapKeys() 89 | truncatedMap := make(map[reflect.Value]reflect.Value) 90 | changed := false 91 | for _, key := range keys { 92 | mapV := rv.MapIndex(key) 93 | tv := truncateValue(mapV, maxLength) 94 | if mapV != tv { 95 | changed = true 96 | } 97 | truncatedMap[key] = tv 98 | } 99 | if changed { 100 | nv := reflect.MakeMap(rv.Type()) 101 | for k, v := range truncatedMap { 102 | nv.SetMapIndex(k, v) 103 | } 104 | return nv 105 | } 106 | return rv 107 | 108 | } 109 | 110 | func truncateArray(rv reflect.Value, maxLength int) reflect.Value { 111 | return truncateList(rv, maxLength, func(size int) reflect.Value { 112 | arrayType := reflect.ArrayOf(size, rv.Index(0).Type()) 113 | return reflect.New(arrayType).Elem() 114 | }) 115 | } 116 | 117 | func truncateSlice(rv reflect.Value, maxLength int) reflect.Value { 118 | return truncateList(rv, maxLength, func(size int) reflect.Value { 119 | return reflect.MakeSlice(rv.Type(), size, size) 120 | }) 121 | } 122 | 123 | func truncateList(rv reflect.Value, maxLength int, newList func(size int) reflect.Value) reflect.Value { 124 | size := rv.Len() 125 | truncatedValues := make([]reflect.Value, size) 126 | changed := false 127 | for i := 0; i < size; i++ { 128 | elemV := rv.Index(i) 129 | tv := truncateValue(elemV, maxLength) 130 | if elemV != tv { 131 | changed = true 132 | } 133 | truncatedValues[i] = tv 134 | } 135 | if changed { 136 | nv := newList(size) 137 | for i, v := range truncatedValues { 138 | nv.Index(i).Set(v) 139 | } 140 | return nv 141 | } 142 | return rv 143 | } 144 | 145 | func truncateString(rv reflect.Value, maxLength int) reflect.Value { 146 | s := String(rv.String(), maxLength) 147 | if s != rv.String() { 148 | return reflect.ValueOf(s) 149 | } 150 | return rv 151 | 152 | } 153 | 154 | const truncated = "-(truncated)" 155 | const lenTruncated = len(truncated) 156 | 157 | // String truncates long strings from the middle, but leaves strings shorter 158 | // than `maxLength` untouched. 159 | // If the string is shorter than the string "-(truncated)" and the string 160 | // exceeds `maxLength`, the output will not be truncated. 161 | // Example: 162 | // 163 | // truncate.String(strings.Repeat("a", 25), 20) == "aaaaaaaa-(truncated)" 164 | // truncate.String("foobar", 20) == "foobar" 165 | // truncate.String("foobar", 5) == "foobar" 166 | func String(s string, maxLength int) string { 167 | if maxLength <= 0 || len(s) < lenTruncated || len(s) <= maxLength { 168 | return s 169 | } 170 | 171 | strBytes := []byte(s) 172 | truncatedBytes := []byte(truncated) 173 | prefixLength := maxLength - lenTruncated 174 | prefix := strBytes[0:prefixLength] 175 | return string(append(prefix, truncatedBytes...)) 176 | } 177 | -------------------------------------------------------------------------------- /internal/truncate/truncate_suite_test.go: -------------------------------------------------------------------------------- 1 | package truncate_test 2 | 3 | import ( 4 | "testing" 5 | 6 | . "github.com/onsi/ginkgo/v2" 7 | . "github.com/onsi/gomega" 8 | ) 9 | 10 | func TestTruncate(t *testing.T) { 11 | RegisterFailHandler(Fail) 12 | RunSpecs(t, "Truncate Suite") 13 | } 14 | -------------------------------------------------------------------------------- /internal/truncate/truncate_test.go: -------------------------------------------------------------------------------- 1 | package truncate_test 2 | 3 | import ( 4 | "unsafe" 5 | 6 | "code.cloudfoundry.org/lager/v3/internal/truncate" 7 | . "github.com/onsi/ginkgo/v2" 8 | . "github.com/onsi/gomega" 9 | ) 10 | 11 | const longString = "aaaaaaaaaaaaaaaaaaaaaaaaa" 12 | const expectedTruncatedString = "aaaaaaaa-(truncated)" 13 | 14 | var _ = Describe("Truncate", func() { 15 | Describe("String", func() { 16 | It("does not truncate at all if maxLength is 0", func() { 17 | Expect(truncate.String(longString, 0)).To(Equal(longString)) 18 | }) 19 | It("does not truncate strings that are equal to the length limit", func() { 20 | Expect(truncate.String("foobar", 6)).To(Equal("foobar")) 21 | }) 22 | It("does not truncate strings that are under the length limit", func() { 23 | Expect(truncate.String("foobar", 10)).To(Equal("foobar")) 24 | }) 25 | It("does not truncate any strings under 12 characters long even if the string exceeds maxLength", func() { 26 | Expect(truncate.String("foobar", 5)).To(Equal("foobar")) 27 | }) 28 | It("truncates the end of long strings that are above the length limit to the length limit", func() { 29 | Expect(truncate.String(longString, 20)).To(Equal(expectedTruncatedString)) 30 | }) 31 | Describe("boundary conditions", func() { 32 | It("truncates when the length limit is exactly 12", func() { 33 | Expect(truncate.String(longString, 12)).To(Equal("-(truncated)")) 34 | }) 35 | It("truncates when the length limit is exactly 12+1", func() { 36 | Expect(truncate.String(longString, 13)).To(Equal("a-(truncated)")) 37 | }) 38 | }) 39 | }) 40 | Describe("Value", func() { 41 | Context("strings", func() { 42 | It("returns a new truncated string", func() { 43 | v := longString 44 | outV := truncate.Value(v, 20) 45 | Expect(outV.(string)).To(Equal(expectedTruncatedString)) 46 | }) 47 | It("leaves the reflect.Value of untruncated strings untouched", func() { 48 | v := "foobar" 49 | outV := truncate.Value(v, 20) 50 | Expect(outV).To(BeIdenticalTo(v)) 51 | }) 52 | }) 53 | Context("non-container values", func() { 54 | It("leaves values with a type that could not possibly contain a string untouched", func() { 55 | v := 64 56 | outV := truncate.Value(v, 20) 57 | Expect(outV).To(BeIdenticalTo(v)) 58 | }) 59 | }) 60 | Context("structs", func() { 61 | type dummyStruct struct { 62 | A string 63 | B string 64 | } 65 | type nestedStruct struct { 66 | A dummyStruct 67 | B string 68 | } 69 | It("truncates long strings inside of structs", func() { 70 | v := dummyStruct{A: "foobar", B: longString} 71 | outV := truncate.Value(v, 20) 72 | Expect(outV.(dummyStruct)).To(Equal(dummyStruct{A: "foobar", B: expectedTruncatedString})) 73 | }) 74 | It("leaves structs without long strings untouched", func() { 75 | v := dummyStruct{A: "foobar", B: "barbaz"} 76 | outV := truncate.Value(v, 20) 77 | Expect(outV).To(BeIdenticalTo(v)) 78 | }) 79 | It("truncates long strings inside of nested structs", func() { 80 | v := nestedStruct{A: dummyStruct{A: "foobar", B: longString}, B: "barbaz"} 81 | outV := truncate.Value(v, 20) 82 | Expect(outV.(nestedStruct)).To(Equal(nestedStruct{A: dummyStruct{A: "foobar", B: expectedTruncatedString}, B: "barbaz"})) 83 | }) 84 | It("leaves nested structs without long strings untouched", func() { 85 | v := nestedStruct{A: dummyStruct{A: "foobar", B: "bazbaq"}, B: "barbaz"} 86 | outV := truncate.Value(v, 20) 87 | Expect(outV).To(BeIdenticalTo(v)) 88 | }) 89 | }) 90 | Context("pointers", func() { 91 | Context("of strings", func() { 92 | It("truncates long strings referenced by pointers", func() { 93 | s := longString 94 | v := &s 95 | origPtr := uintptr(unsafe.Pointer(v)) 96 | outV := truncate.Value(v, 20) 97 | tsp := outV.(*string) 98 | Expect(uintptr(unsafe.Pointer(tsp))).ToNot(Equal(origPtr)) 99 | Expect(*tsp).To(Equal(expectedTruncatedString)) 100 | }) 101 | It("returns the same pointer if there are no strings truncated", func() { 102 | s := "foobar" 103 | v := &s 104 | origPtr := uintptr(unsafe.Pointer(v)) 105 | outV := truncate.Value(v, 20) 106 | tsp := outV.(*string) 107 | Expect(uintptr(unsafe.Pointer(tsp))).To(Equal(origPtr)) 108 | }) 109 | }) 110 | Context("of structs", func() { 111 | type dummyStruct struct { 112 | A string 113 | B string 114 | } 115 | 116 | It("truncates long strings in structs referenced by pointers", func() { 117 | v := &dummyStruct{A: "foobar", B: longString} 118 | origPtr := uintptr(unsafe.Pointer(v)) 119 | outV := truncate.Value(v, 20) 120 | tsp := outV.(*dummyStruct) 121 | Expect(uintptr(unsafe.Pointer(tsp))).ToNot(Equal(origPtr)) 122 | Expect(tsp).To(Equal(&dummyStruct{A: "foobar", B: expectedTruncatedString})) 123 | }) 124 | It("returns the same pointer if there are no strings truncated", func() { 125 | v := &dummyStruct{A: "foobar", B: "barbaz"} 126 | origPtr := uintptr(unsafe.Pointer(v)) 127 | outV := truncate.Value(v, 20) 128 | tsp := outV.(*dummyStruct) 129 | Expect(uintptr(unsafe.Pointer(tsp))).To(Equal(origPtr)) 130 | }) 131 | 132 | }) 133 | }) 134 | Context("maps", func() { 135 | type dummyStruct struct { 136 | A string 137 | B string 138 | } 139 | It("truncates long strings inside of maps", func() { 140 | v := map[string]interface{}{ 141 | "struct": dummyStruct{A: "foobar", B: longString}, 142 | "foo": longString, 143 | } 144 | outV := truncate.Value(v, 20) 145 | Expect(v).ToNot(Equal(outV)) 146 | Expect(outV.(map[string]interface{})).To(Equal(map[string]interface{}{ 147 | "struct": dummyStruct{A: "foobar", B: expectedTruncatedString}, 148 | "foo": expectedTruncatedString, 149 | })) 150 | }) 151 | It("leaves maps without long strings untouched", func() { 152 | v := map[string]interface{}{ 153 | "struct": dummyStruct{A: "foobar", B: "bar"}, 154 | "foo": "bar", 155 | } 156 | outV := truncate.Value(v, 20) 157 | Expect(outV).To(Equal(v)) 158 | }) 159 | }) 160 | Context("arrays", func() { 161 | It("truncates long strings inside of slices", func() { 162 | v := [2]string{"foobar", longString} 163 | outV := truncate.Value(v, 20) 164 | Expect(v).ToNot(Equal(outV)) 165 | Expect(outV.([2]string)).To(Equal([2]string{"foobar", expectedTruncatedString})) 166 | }) 167 | It("leaves arrays without long strings untouched", func() { 168 | v := [2]string{"foobar", "bazbaq"} 169 | outV := truncate.Value(v, 20) 170 | Expect(outV).To(Equal(v)) 171 | }) 172 | }) 173 | Context("slices", func() { 174 | It("truncates long strings inside of slices", func() { 175 | v := []string{"foobar", longString} 176 | outV := truncate.Value(v, 20) 177 | Expect(v).ToNot(Equal(outV)) 178 | Expect(outV.([]string)).To(Equal([]string{"foobar", expectedTruncatedString})) 179 | }) 180 | It("leaves slices without long strings untouched", func() { 181 | v := []string{"foobar", "bazbaq"} 182 | outV := truncate.Value(v, 20) 183 | Expect(outV).To(Equal(v)) 184 | }) 185 | }) 186 | }) 187 | }) 188 | -------------------------------------------------------------------------------- /json_redacter.go: -------------------------------------------------------------------------------- 1 | package lager 2 | 3 | import ( 4 | "encoding/json" 5 | "regexp" 6 | ) 7 | 8 | const awsAccessKeyIDPattern = `AKIA[A-Z0-9]{16}` 9 | const awsSecretAccessKeyPattern = `KEY["']?\s*(?::|=>|=)\s*["']?[A-Z0-9/\+=]{40}["']?` 10 | const cryptMD5Pattern = `\$1\$[A-Z0-9./]{1,16}\$[A-Z0-9./]{22}` 11 | const cryptSHA256Pattern = `\$5\$[A-Z0-9./]{1,16}\$[A-Z0-9./]{43}` 12 | const cryptSHA512Pattern = `\$6\$[A-Z0-9./]{1,16}\$[A-Z0-9./]{86}` 13 | const privateKeyHeaderPattern = `-----BEGIN(.*)PRIVATE KEY-----` 14 | 15 | type JSONRedacter struct { 16 | keyMatchers []*regexp.Regexp 17 | valueMatchers []*regexp.Regexp 18 | } 19 | 20 | func NewJSONRedacter(keyPatterns []string, valuePatterns []string) (*JSONRedacter, error) { 21 | if keyPatterns == nil { 22 | keyPatterns = []string{"[Pp]wd", "[Pp]ass"} 23 | } 24 | if valuePatterns == nil { 25 | valuePatterns = DefaultValuePatterns() 26 | } 27 | ret := &JSONRedacter{} 28 | for _, v := range keyPatterns { 29 | r, err := regexp.Compile(v) 30 | if err != nil { 31 | return nil, err 32 | } 33 | ret.keyMatchers = append(ret.keyMatchers, r) 34 | } 35 | for _, v := range valuePatterns { 36 | r, err := regexp.Compile(v) 37 | if err != nil { 38 | return nil, err 39 | } 40 | ret.valueMatchers = append(ret.valueMatchers, r) 41 | } 42 | return ret, nil 43 | } 44 | 45 | func (r JSONRedacter) Redact(data []byte) []byte { 46 | var jsonBlob interface{} 47 | err := json.Unmarshal(data, &jsonBlob) 48 | if err != nil { 49 | return handleError(err) 50 | } 51 | r.redactValue(&jsonBlob) 52 | 53 | data, err = json.Marshal(jsonBlob) 54 | if err != nil { 55 | return handleError(err) 56 | } 57 | 58 | return data 59 | } 60 | 61 | func (r JSONRedacter) redactValue(data *interface{}) interface{} { 62 | if data == nil { 63 | return data 64 | } 65 | 66 | if a, ok := (*data).([]interface{}); ok { 67 | r.redactArray(&a) 68 | } else if m, ok := (*data).(map[string]interface{}); ok { 69 | r.redactObject(&m) 70 | } else if s, ok := (*data).(string); ok { 71 | for _, m := range r.valueMatchers { 72 | if m.MatchString(s) { 73 | (*data) = "*REDACTED*" 74 | break 75 | } 76 | } 77 | } 78 | return (*data) 79 | } 80 | 81 | func (r JSONRedacter) redactArray(data *[]interface{}) { 82 | for i := range *data { 83 | r.redactValue(&((*data)[i])) 84 | } 85 | } 86 | 87 | func (r JSONRedacter) redactObject(data *map[string]interface{}) { 88 | for k, v := range *data { 89 | for _, m := range r.keyMatchers { 90 | if m.MatchString(k) { 91 | (*data)[k] = "*REDACTED*" 92 | break 93 | } 94 | } 95 | if (*data)[k] != "*REDACTED*" { 96 | (*data)[k] = r.redactValue(&v) 97 | } 98 | } 99 | } 100 | 101 | func handleError(err error) []byte { 102 | var content []byte 103 | if _, ok := err.(*json.UnsupportedTypeError); ok { 104 | data := map[string]interface{}{"lager serialisation error": err.Error()} 105 | content, err = json.Marshal(data) 106 | } 107 | if err != nil { 108 | panic(err) 109 | } 110 | return content 111 | } 112 | 113 | func DefaultValuePatterns() []string { 114 | return []string{awsAccessKeyIDPattern, awsSecretAccessKeyPattern, cryptMD5Pattern, cryptSHA256Pattern, cryptSHA512Pattern, privateKeyHeaderPattern} 115 | } 116 | -------------------------------------------------------------------------------- /json_redacter_test.go: -------------------------------------------------------------------------------- 1 | package lager_test 2 | 3 | import ( 4 | "code.cloudfoundry.org/lager/v3" 5 | 6 | . "github.com/onsi/ginkgo/v2" 7 | . "github.com/onsi/gomega" 8 | ) 9 | 10 | var _ = Describe("JSON Redacter", func() { 11 | var ( 12 | resp []byte 13 | err error 14 | jsonRedacter *lager.JSONRedacter 15 | ) 16 | 17 | BeforeEach(func() { 18 | jsonRedacter, err = lager.NewJSONRedacter(nil, []string{`amazonkey`, `AKIA[A-Z0-9]{16}`}) 19 | Expect(err).NotTo(HaveOccurred()) 20 | }) 21 | 22 | Context("when called with normal (non-secret) json", func() { 23 | BeforeEach(func() { 24 | resp = jsonRedacter.Redact([]byte(`{"foo":"bar"}`)) 25 | }) 26 | It("should return the same text", func() { 27 | Expect(resp).To(Equal([]byte(`{"foo":"bar"}`))) 28 | }) 29 | }) 30 | 31 | Context("when called with secrets inside the json", func() { 32 | BeforeEach(func() { 33 | resp = jsonRedacter.Redact([]byte(`{"foo":"fooval","password":"secret!","something":"AKIA1234567890123456"}`)) 34 | }) 35 | 36 | It("should redact the secrets", func() { 37 | Expect(resp).To(Equal([]byte(`{"foo":"fooval","password":"*REDACTED*","something":"*REDACTED*"}`))) 38 | }) 39 | }) 40 | 41 | Context("when a password flag is specified", func() { 42 | BeforeEach(func() { 43 | resp = jsonRedacter.Redact([]byte(`{"abcPwd":"abcd","password":"secret!","userpass":"fooval"}`)) 44 | }) 45 | 46 | It("should redact the secrets", func() { 47 | Expect(resp).To(Equal([]byte(`{"abcPwd":"*REDACTED*","password":"*REDACTED*","userpass":"*REDACTED*"}`))) 48 | }) 49 | }) 50 | 51 | Context("when called with an array of objects with a secret", func() { 52 | BeforeEach(func() { 53 | resp = jsonRedacter.Redact([]byte(`[{"foo":"fooval","password":"secret!","something":"amazonkey"}]`)) 54 | }) 55 | 56 | It("should redact the secrets", func() { 57 | Expect(resp).To(Equal([]byte(`[{"foo":"fooval","password":"*REDACTED*","something":"*REDACTED*"}]`))) 58 | }) 59 | }) 60 | 61 | Context("when called with a private key inside an array of strings", func() { 62 | BeforeEach(func() { 63 | resp = jsonRedacter.Redact([]byte(`["foo", "secret!", "amazonkey"]`)) 64 | }) 65 | 66 | It("should redact the amazonkey", func() { 67 | Expect(resp).To(Equal([]byte(`["foo","secret!","*REDACTED*"]`))) 68 | }) 69 | }) 70 | 71 | Context("when called with a private key inside a nested object", func() { 72 | BeforeEach(func() { 73 | resp = jsonRedacter.Redact([]byte(`{"foo":"fooval", "secret_stuff": {"password": "secret!"}}`)) 74 | }) 75 | 76 | It("should redact the amazonkey", func() { 77 | Expect(resp).To(Equal([]byte(`{"foo":"fooval","secret_stuff":{"password":"*REDACTED*"}}`))) 78 | }) 79 | }) 80 | 81 | It("DefaultValuePatterns returns the default set of value patterns", func() { 82 | Expect(lager.DefaultValuePatterns()).To(ContainElement(`AKIA[A-Z0-9]{16}`)) 83 | Expect(lager.DefaultValuePatterns()).To(ContainElement(`KEY["']?\s*(?::|=>|=)\s*["']?[A-Z0-9/\+=]{40}["']?`)) 84 | Expect(lager.DefaultValuePatterns()).To(ContainElement(`\$1\$[A-Z0-9./]{1,16}\$[A-Z0-9./]{22}`)) 85 | Expect(lager.DefaultValuePatterns()).To(ContainElement(`\$5\$[A-Z0-9./]{1,16}\$[A-Z0-9./]{43}`)) 86 | Expect(lager.DefaultValuePatterns()).To(ContainElement(`\$6\$[A-Z0-9./]{1,16}\$[A-Z0-9./]{86}`)) 87 | Expect(lager.DefaultValuePatterns()).To(ContainElement(`-----BEGIN(.*)PRIVATE KEY-----`)) 88 | }) 89 | }) 90 | -------------------------------------------------------------------------------- /lager_suite_test.go: -------------------------------------------------------------------------------- 1 | package lager_test 2 | 3 | import ( 4 | . "github.com/onsi/ginkgo/v2" 5 | . "github.com/onsi/gomega" 6 | "runtime" 7 | 8 | "testing" 9 | ) 10 | 11 | const MaxThreads = 100 12 | 13 | var _ = BeforeSuite(func() { 14 | runtime.GOMAXPROCS(MaxThreads) 15 | }) 16 | 17 | func TestLager(t *testing.T) { 18 | RegisterFailHandler(Fail) 19 | RunSpecs(t, "Lager Suite") 20 | } 21 | -------------------------------------------------------------------------------- /lagerctx/context.go: -------------------------------------------------------------------------------- 1 | // Package lagerctx provides convenience when using Lager with the context 2 | // feature of the standard library. 3 | package lagerctx 4 | 5 | import ( 6 | "context" 7 | "net/http" 8 | 9 | "code.cloudfoundry.org/lager/v3" 10 | ) 11 | 12 | // NewContext returns a derived context containing the logger. 13 | func NewContext(parent context.Context, logger lager.Logger) context.Context { 14 | return context.WithValue(parent, contextKey{}, logger) 15 | } 16 | 17 | // FromContext returns the logger contained in the context, or an inert logger 18 | // that will not log anything. 19 | func FromContext(ctx context.Context) lager.Logger { 20 | l, ok := ctx.Value(contextKey{}).(lager.Logger) 21 | if !ok { 22 | return &discardLogger{} 23 | } 24 | 25 | return l 26 | } 27 | 28 | // WithSession returns a new logger that has, for convenience, had a new 29 | // session created on it. 30 | func WithSession(ctx context.Context, task string, data ...lager.Data) lager.Logger { 31 | return FromContext(ctx).Session(task, data...) 32 | } 33 | 34 | // WithData returns a new logger that has, for convenience, had new data added 35 | // to on it. 36 | func WithData(ctx context.Context, data lager.Data) lager.Logger { 37 | return FromContext(ctx).WithData(data) 38 | } 39 | 40 | // contextKey is used to retrieve the logger from the context. 41 | type contextKey struct{} 42 | 43 | // discardLogger is an inert logger. 44 | type discardLogger struct{} 45 | 46 | func (*discardLogger) Debug(string, ...lager.Data) {} 47 | func (*discardLogger) Info(string, ...lager.Data) {} 48 | func (*discardLogger) Error(string, error, ...lager.Data) {} 49 | func (*discardLogger) Fatal(string, error, ...lager.Data) {} 50 | func (*discardLogger) RegisterSink(lager.Sink) {} 51 | func (*discardLogger) SessionName() string { return "" } 52 | func (d *discardLogger) Session(string, ...lager.Data) lager.Logger { return d } 53 | func (d *discardLogger) WithData(lager.Data) lager.Logger { return d } 54 | func (d *discardLogger) WithTraceInfo(*http.Request) lager.Logger { return d } 55 | -------------------------------------------------------------------------------- /lagerctx/context_test.go: -------------------------------------------------------------------------------- 1 | package lagerctx_test 2 | 3 | import ( 4 | "context" 5 | 6 | . "github.com/onsi/ginkgo/v2" 7 | . "github.com/onsi/gomega" 8 | "github.com/onsi/gomega/gbytes" 9 | 10 | "code.cloudfoundry.org/lager/v3" 11 | "code.cloudfoundry.org/lager/v3/lagerctx" 12 | "code.cloudfoundry.org/lager/v3/lagertest" 13 | ) 14 | 15 | var _ = Describe("Lager Context", func() { 16 | It("can store loggers inside contexts", func() { 17 | l := lagertest.NewTestLogger("lagerctx") 18 | ctx := lagerctx.NewContext(context.Background(), l) 19 | 20 | logger := lagerctx.FromContext(ctx) 21 | logger.Info("from-a-context") 22 | 23 | Expect(l.LogMessages()).To(HaveLen(1)) 24 | }) 25 | 26 | It("can add a session to the logger in the context", func() { 27 | l := lagertest.NewTestLogger("lagerctx") 28 | ctx := lagerctx.NewContext(context.Background(), l) 29 | 30 | logger := lagerctx.WithSession(ctx, "new-session", lager.Data{ 31 | "bespoke-data": "", 32 | }) 33 | logger.Info("from-a-context") 34 | 35 | Expect(l).To(gbytes.Say("new-session")) 36 | Expect(l).To(gbytes.Say("bespoke-data")) 37 | }) 38 | 39 | It("can add data to the logger in the context", func() { 40 | l := lagertest.NewTestLogger("lagerctx") 41 | ctx := lagerctx.NewContext(context.Background(), l) 42 | 43 | logger := lagerctx.WithData(ctx, lager.Data{ 44 | "bespoke-data": "", 45 | }) 46 | logger.Info("from-a-context") 47 | 48 | Expect(l).To(gbytes.Say("bespoke-data")) 49 | }) 50 | 51 | It("will be fine if there is no logger in the context", func() { 52 | logger := lagerctx.FromContext(context.Background()) 53 | logger.Info("from-a-context") 54 | }) 55 | }) 56 | -------------------------------------------------------------------------------- /lagerctx/lagerctx_suite_test.go: -------------------------------------------------------------------------------- 1 | package lagerctx_test 2 | 3 | import ( 4 | . "github.com/onsi/ginkgo/v2" 5 | . "github.com/onsi/gomega" 6 | 7 | "testing" 8 | ) 9 | 10 | func TestLagerctx(t *testing.T) { 11 | RegisterFailHandler(Fail) 12 | RunSpecs(t, "Lagerctx Suite") 13 | } 14 | -------------------------------------------------------------------------------- /lagerflags/LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | -------------------------------------------------------------------------------- /lagerflags/NOTICE: -------------------------------------------------------------------------------- 1 | CF Lager 2 | 3 | Copyright (c) 2014-Present CloudFoundry.org Foundation, Inc. All Rights Reserved. 4 | 5 | Licensed under the Apache License, Version 2.0 (the "License"); 6 | you may not use this file except in compliance with the License. 7 | You may obtain a copy of the License at 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | -------------------------------------------------------------------------------- /lagerflags/README.md: -------------------------------------------------------------------------------- 1 | lagerflags 2 | ======== 3 | 4 | **Note**: This repository should be imported as `code.cloudfoundry.org/lager/lagerflags`. 5 | 6 | This library provides a flag called `logLevel`. The logger returned by 7 | `lagerflags.New()` will use the value of that flag to determine the log level. 8 | 9 | To use, simply import this package in your `main.go` and call `lagerflags.New(COMPONENT_NAME)` to get a logger. 10 | 11 | For example: 12 | 13 | ```golang 14 | package main 15 | 16 | import ( 17 | "flag" 18 | "fmt" 19 | 20 | "code.cloudfoundry.org/lager/v3/lagerflags" 21 | "code.cloudfoundry.org/lager/v3" 22 | ) 23 | 24 | func main() { 25 | lagerflags.AddFlags(flag.CommandLine) 26 | 27 | flag.Parse() 28 | 29 | logger, reconfigurableSink := lagerflags.New("my-component") 30 | logger.Info("starting") 31 | 32 | // Display the current minimum log level 33 | fmt.Printf("Current log level is ") 34 | switch reconfigurableSink.GetMinLevel() { 35 | case lager.DEBUG: 36 | fmt.Println("debug") 37 | case lager.INFO: 38 | fmt.Println("info") 39 | case lager.ERROR: 40 | fmt.Println("error") 41 | case lager.FATAL: 42 | fmt.Println("fatal") 43 | } 44 | 45 | // Change the minimum log level dynamically 46 | reconfigurableSink.SetMinLevel(lager.ERROR) 47 | logger.Debug("will-not-log") 48 | } 49 | ``` 50 | 51 | Running the program above as `go run main.go --logLevel debug` will generate the following output: 52 | 53 | ``` 54 | {"timestamp":"1464388983.540486336","source":"my-component","message":"my-component.starting","log_level":1,"data":{}} 55 | Current log level is debug 56 | ``` 57 | -------------------------------------------------------------------------------- /lagerflags/integration/integration_suite_test.go: -------------------------------------------------------------------------------- 1 | package main_test 2 | 3 | import ( 4 | "os/exec" 5 | "time" 6 | 7 | . "github.com/onsi/ginkgo/v2" 8 | . "github.com/onsi/gomega" 9 | "github.com/onsi/gomega/gexec" 10 | 11 | "testing" 12 | ) 13 | 14 | var testBinary string 15 | 16 | func TestIntegration(t *testing.T) { 17 | RegisterFailHandler(Fail) 18 | RunSpecs(t, "Integration Suite") 19 | } 20 | 21 | var _ = BeforeSuite(func() { 22 | var err error 23 | testBinary, err = gexec.Build("code.cloudfoundry.org/lager/v3/lagerflags/integration", "-race") 24 | Expect(err).NotTo(HaveOccurred()) 25 | }) 26 | 27 | var _ = AfterSuite(func() { 28 | gexec.CleanupBuildArtifacts() 29 | }) 30 | 31 | var _ = Describe("CF-Lager", func() { 32 | It("provides flags", func() { 33 | session, err := gexec.Start(exec.Command(testBinary, "--help"), GinkgoWriter, GinkgoWriter) 34 | Expect(err).NotTo(HaveOccurred()) 35 | session.Wait(3 * time.Second) 36 | Expect(session.Err.Contents()).To(ContainSubstring("-logLevel")) 37 | }) 38 | 39 | It("pipes output to stdout", func() { 40 | session, err := gexec.Start(exec.Command(testBinary), GinkgoWriter, GinkgoWriter) 41 | Expect(err).NotTo(HaveOccurred()) 42 | session.Wait() 43 | 44 | Expect(session.Out.Contents()).To(ContainSubstring("info")) 45 | }) 46 | 47 | It("defaults to the info log level", func() { 48 | session, err := gexec.Start(exec.Command(testBinary), GinkgoWriter, GinkgoWriter) 49 | Expect(err).NotTo(HaveOccurred()) 50 | session.Wait() 51 | 52 | Expect(session.Out.Contents()).NotTo(ContainSubstring("debug")) 53 | Expect(session.Out.Contents()).To(ContainSubstring("info")) 54 | Expect(session.Out.Contents()).To(ContainSubstring("error")) 55 | Expect(session.Out.Contents()).To(ContainSubstring("fatal")) 56 | }) 57 | 58 | It("honors the passed-in log level", func() { 59 | session, err := gexec.Start(exec.Command(testBinary, "-logLevel=debug"), GinkgoWriter, GinkgoWriter) 60 | Expect(err).NotTo(HaveOccurred()) 61 | session.Wait() 62 | 63 | Expect(session.Out.Contents()).To(ContainSubstring("debug")) 64 | Expect(session.Out.Contents()).To(ContainSubstring("info")) 65 | Expect(session.Out.Contents()).To(ContainSubstring("error")) 66 | Expect(session.Out.Contents()).To(ContainSubstring("fatal")) 67 | }) 68 | }) 69 | -------------------------------------------------------------------------------- /lagerflags/integration/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "errors" 5 | "flag" 6 | 7 | "code.cloudfoundry.org/lager/v3" 8 | "code.cloudfoundry.org/lager/v3/lagerflags" 9 | ) 10 | 11 | func main() { 12 | lagerflags.AddFlags(flag.CommandLine) 13 | flag.Parse() 14 | 15 | logger, _ := lagerflags.New("cf-lager-integration") 16 | 17 | logger.Debug("component-does-action", lager.Data{"debug-detail": "foo"}) 18 | logger.Info("another-component-action", lager.Data{"info-detail": "bar"}) 19 | logger.Error("component-failed-something", errors.New("error"), lager.Data{"error-detail": "baz"}) 20 | logger.Fatal("component-failed-badly", errors.New("fatal"), lager.Data{"fatal-detail": "quux"}) 21 | } 22 | -------------------------------------------------------------------------------- /lagerflags/integration/package.go: -------------------------------------------------------------------------------- 1 | package main // import "code.cloudfoundry.org/lager/v3/lagerflags/integration" 2 | -------------------------------------------------------------------------------- /lagerflags/lagerflags.go: -------------------------------------------------------------------------------- 1 | package lagerflags 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | "os" 7 | 8 | "code.cloudfoundry.org/lager/v3" 9 | ) 10 | 11 | const ( 12 | DEBUG = "debug" 13 | INFO = "info" 14 | ERROR = "error" 15 | FATAL = "fatal" 16 | ) 17 | 18 | type LagerConfig struct { 19 | LogLevel string `json:"log_level,omitempty"` 20 | RedactSecrets bool `json:"redact_secrets,omitempty"` 21 | RedactPatterns []string `json:"redact_patterns,omitempty"` 22 | TimeFormat TimeFormat `json:"time_format"` 23 | MaxDataStringLength int `json:"max_data_string_length"` 24 | } 25 | 26 | func DefaultLagerConfig() LagerConfig { 27 | return LagerConfig{ 28 | LogLevel: string(INFO), 29 | RedactSecrets: false, 30 | RedactPatterns: nil, 31 | TimeFormat: FormatUnixEpoch, 32 | MaxDataStringLength: 0, 33 | } 34 | } 35 | 36 | var minLogLevel string 37 | var redactSecrets bool 38 | var redactPatterns RedactPatterns 39 | var timeFormat TimeFormat 40 | 41 | func AddFlags(flagSet *flag.FlagSet) { 42 | flagSet.StringVar( 43 | &minLogLevel, 44 | "logLevel", 45 | string(INFO), 46 | "log level: debug, info, error or fatal", 47 | ) 48 | flagSet.BoolVar( 49 | &redactSecrets, 50 | "redactSecrets", 51 | false, 52 | "use a redacting log sink to scrub sensitive values from data being logged", 53 | ) 54 | flagSet.Var( 55 | &redactPatterns, 56 | "redactPatterns", 57 | "Regex patterns to use to determine sensitive values for redaction", 58 | ) 59 | flagSet.Var( 60 | &timeFormat, 61 | "timeFormat", 62 | `Format for timestamp in component logs. Valid values are "unix-epoch" and "rfc3339".`, 63 | ) 64 | } 65 | 66 | func ConfigFromFlags() LagerConfig { 67 | return LagerConfig{ 68 | LogLevel: minLogLevel, 69 | RedactSecrets: redactSecrets, 70 | RedactPatterns: redactPatterns, 71 | TimeFormat: timeFormat, 72 | } 73 | } 74 | 75 | func New(component string) (lager.Logger, *lager.ReconfigurableSink) { 76 | return NewFromConfig(component, ConfigFromFlags()) 77 | } 78 | 79 | func NewFromSink(component string, sink lager.Sink) (lager.Logger, *lager.ReconfigurableSink) { 80 | return newLogger(component, minLogLevel, sink) 81 | } 82 | 83 | func NewFromConfig(component string, config LagerConfig) (lager.Logger, *lager.ReconfigurableSink) { 84 | var sink lager.Sink 85 | 86 | if config.TimeFormat == FormatRFC3339 { 87 | sink = lager.NewPrettySink(os.Stdout, lager.DEBUG) 88 | } else { 89 | sink = lager.NewWriterSink(os.Stdout, lager.DEBUG) 90 | } 91 | 92 | if config.RedactSecrets { 93 | var err error 94 | sink, err = lager.NewRedactingSink(sink, nil, config.RedactPatterns) 95 | if err != nil { 96 | panic(err) 97 | } 98 | } 99 | 100 | if config.MaxDataStringLength > 0 { 101 | sink = lager.NewTruncatingSink(sink, config.MaxDataStringLength) 102 | } 103 | 104 | return newLogger(component, config.LogLevel, sink) 105 | } 106 | 107 | func newLogger(component, minLogLevel string, inSink lager.Sink) (lager.Logger, *lager.ReconfigurableSink) { 108 | var minLagerLogLevel lager.LogLevel 109 | 110 | switch minLogLevel { 111 | case DEBUG: 112 | minLagerLogLevel = lager.DEBUG 113 | case INFO: 114 | minLagerLogLevel = lager.INFO 115 | case ERROR: 116 | minLagerLogLevel = lager.ERROR 117 | case FATAL: 118 | minLagerLogLevel = lager.FATAL 119 | default: 120 | panic(fmt.Errorf("unknown log level: %s", minLogLevel)) 121 | } 122 | 123 | logger := lager.NewLogger(component) 124 | 125 | sink := lager.NewReconfigurableSink(inSink, minLagerLogLevel) 126 | logger.RegisterSink(sink) 127 | 128 | return logger, sink 129 | } 130 | -------------------------------------------------------------------------------- /lagerflags/lagerflags_suite_test.go: -------------------------------------------------------------------------------- 1 | package lagerflags_test 2 | 3 | import ( 4 | . "github.com/onsi/ginkgo/v2" 5 | . "github.com/onsi/gomega" 6 | 7 | "testing" 8 | ) 9 | 10 | func TestLager(t *testing.T) { 11 | RegisterFailHandler(Fail) 12 | RunSpecs(t, "Lager Flags Suite") 13 | } 14 | -------------------------------------------------------------------------------- /lagerflags/lagerflags_test.go: -------------------------------------------------------------------------------- 1 | package lagerflags_test 2 | 3 | import ( 4 | "errors" 5 | "flag" 6 | "io" 7 | "os" 8 | "strings" 9 | 10 | . "github.com/onsi/ginkgo/v2" 11 | . "github.com/onsi/gomega" 12 | "github.com/onsi/gomega/gbytes" 13 | 14 | "code.cloudfoundry.org/lager/v3" 15 | "code.cloudfoundry.org/lager/v3/lagerflags" 16 | ) 17 | 18 | // TODO: Allow sink output to be redirected to a dependency injected 19 | // io.Writer 20 | func replaceStdoutWithBuf() (*gbytes.Buffer, *os.File) { 21 | buf := gbytes.NewBuffer() 22 | readPipe, writePipe, _ := os.Pipe() 23 | origStdout := os.Stdout 24 | os.Stdout = writePipe 25 | go func() { 26 | p := make([]byte, 20) 27 | for { 28 | n, err := readPipe.Read(p) 29 | if err == io.EOF { 30 | break 31 | } 32 | 33 | _, err = buf.Write(p[:n]) 34 | if err != nil { 35 | return 36 | } 37 | } 38 | }() 39 | return buf, origStdout 40 | } 41 | 42 | var _ = Describe("Lagerflags", func() { 43 | Context("when parsing flags", func() { 44 | var flagSet *flag.FlagSet 45 | 46 | BeforeEach(func() { 47 | flagSet = flag.NewFlagSet("test", flag.ContinueOnError) 48 | flagSet.SetOutput(io.Discard) 49 | lagerflags.AddFlags(flagSet) 50 | }) 51 | 52 | Describe("ConfigFromFlags", func() { 53 | It("creates the correct Lager config from parsed flags", func() { 54 | err := flagSet.Parse([]string{"-logLevel", "debug", "-redactSecrets", "-timeFormat", "unix-epoch"}) 55 | Expect(err).NotTo(HaveOccurred()) 56 | 57 | c := lagerflags.ConfigFromFlags() 58 | Expect(c).To(Equal(lagerflags.LagerConfig{ 59 | LogLevel: string(lagerflags.DEBUG), 60 | RedactSecrets: true, 61 | TimeFormat: lagerflags.FormatUnixEpoch, 62 | MaxDataStringLength: 0, 63 | })) 64 | }) 65 | }) 66 | 67 | Describe("New", func() { 68 | It("creates a logger that respects the log level from parsed flags", func() { 69 | err := flagSet.Parse([]string{"-logLevel", "error"}) 70 | Expect(err).NotTo(HaveOccurred()) 71 | 72 | buf, origStdout := replaceStdoutWithBuf() 73 | defer func() { 74 | os.Stdout = origStdout 75 | }() 76 | 77 | logger, _ := lagerflags.New("test") 78 | logger.Info("hello") 79 | Consistently(buf).ShouldNot(gbytes.Say("hello")) 80 | logger.Error("foo", errors.New("kaboom")) 81 | Eventually(buf).Should(gbytes.Say("kaboom")) 82 | }) 83 | 84 | It("creates a logger that respects the time format settings from parsed flags", func() { 85 | err := flagSet.Parse([]string{"-timeFormat", "rfc3339"}) 86 | Expect(err).NotTo(HaveOccurred()) 87 | 88 | buf, origStdout := replaceStdoutWithBuf() 89 | defer func() { 90 | os.Stdout = origStdout 91 | }() 92 | 93 | logger, _ := lagerflags.New("test") 94 | logger.Info("hello") 95 | Eventually(buf).Should(gbytes.Say(`"timestamp":"(\d+)-(\d+)-(\d+)[Tt](\d+):(\d+):(\d+).(\d+)Z`)) 96 | }) 97 | }) 98 | }) 99 | 100 | Describe("NewFromConfig", func() { 101 | It("creates a logger that respects the log level", func() { 102 | buf, origStdout := replaceStdoutWithBuf() 103 | defer func() { 104 | os.Stdout = origStdout 105 | }() 106 | 107 | logger, _ := lagerflags.NewFromConfig("test", lagerflags.LagerConfig{ 108 | LogLevel: lagerflags.ERROR, 109 | }) 110 | 111 | logger.Info("hello") 112 | Consistently(buf).ShouldNot(gbytes.Say("hello")) 113 | logger.Error("foo", errors.New("kaboom")) 114 | Eventually(buf).Should(gbytes.Say("kaboom")) 115 | }) 116 | 117 | It("creates a logger that respects the time format settings", func() { 118 | buf, origStdout := replaceStdoutWithBuf() 119 | defer func() { 120 | os.Stdout = origStdout 121 | }() 122 | 123 | logger, _ := lagerflags.NewFromConfig("test", lagerflags.LagerConfig{ 124 | LogLevel: lagerflags.INFO, 125 | TimeFormat: lagerflags.FormatRFC3339, 126 | }) 127 | 128 | logger.Info("hello") 129 | Eventually(buf).Should(gbytes.Say(`"timestamp":"(\d+)-(\d+)-(\d+)[Tt](\d+):(\d+):(\d+).(\d+)Z`)) 130 | }) 131 | 132 | It("creates a logger that redacts secrets", func() { 133 | buf, origStdout := replaceStdoutWithBuf() 134 | defer func() { 135 | os.Stdout = origStdout 136 | }() 137 | 138 | logger, _ := lagerflags.NewFromConfig("test", lagerflags.LagerConfig{ 139 | LogLevel: lagerflags.INFO, 140 | RedactSecrets: true, 141 | }) 142 | 143 | logger.Info("hello", lager.Data{"password": "data"}) 144 | Eventually(buf).Should(gbytes.Say(`"password":"\*REDACTED\*"`)) 145 | }) 146 | 147 | It("creates a logger that redacts secrets using the supplied redaction regex", func() { 148 | buf, origStdout := replaceStdoutWithBuf() 149 | defer func() { 150 | os.Stdout = origStdout 151 | }() 152 | 153 | logger, _ := lagerflags.NewFromConfig("test", lagerflags.LagerConfig{ 154 | LogLevel: lagerflags.INFO, 155 | RedactSecrets: true, 156 | RedactPatterns: []string{"bar"}, 157 | }) 158 | 159 | logger.Info("hello", lager.Data{"foo": "bar"}) 160 | Eventually(buf).Should(gbytes.Say(`"foo":"\*REDACTED\*"`)) 161 | }) 162 | 163 | It("creates a logger that truncates long strings", func() { 164 | buf, origStdout := replaceStdoutWithBuf() 165 | defer func() { 166 | os.Stdout = origStdout 167 | }() 168 | 169 | logger, _ := lagerflags.NewFromConfig("test", lagerflags.LagerConfig{ 170 | LogLevel: lagerflags.INFO, 171 | MaxDataStringLength: 20, 172 | }) 173 | 174 | logger.Info("hello", lager.Data{"password": strings.Repeat("a", 25)}) 175 | Eventually(buf).Should(gbytes.Say(`"password":"aaaaaaaa-\(truncated\)"`)) 176 | }) 177 | 178 | It("panics if the log level is unknown", func() { 179 | Expect(func() { 180 | _, _ = lagerflags.NewFromConfig("test", lagerflags.LagerConfig{ 181 | LogLevel: "foo", 182 | }) 183 | }).To(Panic()) 184 | }) 185 | }) 186 | }) 187 | -------------------------------------------------------------------------------- /lagerflags/redact_secrets.go: -------------------------------------------------------------------------------- 1 | package lagerflags 2 | 3 | import "strings" 4 | 5 | type RedactPatterns []string 6 | 7 | func (p *RedactPatterns) String() string { 8 | return strings.Join(*p, ",") 9 | } 10 | 11 | func (p *RedactPatterns) Set(value string) error { 12 | *p = append(*p, value) 13 | return nil 14 | } 15 | -------------------------------------------------------------------------------- /lagerflags/redact_secrets_test.go: -------------------------------------------------------------------------------- 1 | package lagerflags_test 2 | 3 | import ( 4 | "flag" 5 | 6 | "code.cloudfoundry.org/lager/v3/lagerflags" 7 | . "github.com/onsi/ginkgo/v2" 8 | . "github.com/onsi/gomega" 9 | ) 10 | 11 | var _ = Describe("RedactSecrets", func() { 12 | 13 | Context("RedactPatterns FlagSet", func() { 14 | var flagSet *flag.FlagSet 15 | var pattern lagerflags.RedactPatterns 16 | 17 | BeforeEach(func() { 18 | pattern = nil 19 | flagSet = flag.NewFlagSet("test", flag.ContinueOnError) 20 | flagSet.Usage = func() {} 21 | flagSet.SetOutput(nopWriter{}) 22 | flagSet.Var( 23 | &pattern, 24 | "pattern", 25 | `redaction pattern`, 26 | ) 27 | }) 28 | 29 | It("parses successfully when no flag is supplied", func() { 30 | Expect(flagSet.Parse([]string{})).To(Succeed()) 31 | Expect(pattern).To(BeNil()) 32 | }) 33 | 34 | It("parses successfully when one flag is supplied", func() { 35 | Expect(flagSet.Parse([]string{"-pattern", "foo"})).To(Succeed()) 36 | Expect(pattern).To(Equal(lagerflags.RedactPatterns{"foo"})) 37 | }) 38 | 39 | It("parses successfully when flags are supplied", func() { 40 | Expect(flagSet.Parse([]string{ 41 | "-pattern", 42 | "one", 43 | "-pattern", 44 | "two", 45 | "-pattern", 46 | "three", 47 | })).To(Succeed()) 48 | Expect(pattern).To(ContainElement("one")) 49 | Expect(pattern).To(ContainElement("two")) 50 | Expect(pattern).To(ContainElement("three")) 51 | }) 52 | }) 53 | }) 54 | -------------------------------------------------------------------------------- /lagerflags/timeformat.go: -------------------------------------------------------------------------------- 1 | package lagerflags 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | ) 7 | 8 | type TimeFormat int 9 | 10 | const ( 11 | FormatUnixEpoch TimeFormat = iota 12 | FormatRFC3339 13 | ) 14 | 15 | func (t TimeFormat) MarshalJSON() ([]byte, error) { 16 | if FormatUnixEpoch <= t && t <= FormatRFC3339 { 17 | return []byte(`"` + t.String() + `"`), nil 18 | } 19 | return nil, fmt.Errorf("invalid TimeFormat: %d", t) 20 | } 21 | 22 | // Set implements the flag.Getter interface 23 | func (t TimeFormat) Get(s string) interface{} { return t } 24 | 25 | // Set implements the flag.Value interface 26 | func (t *TimeFormat) Set(s string) error { 27 | switch s { 28 | case "unix-epoch", "0": 29 | *t = FormatUnixEpoch 30 | case "rfc3339", "1": 31 | *t = FormatRFC3339 32 | default: 33 | return errors.New(`invalid TimeFormat: "` + s + `"`) 34 | } 35 | return nil 36 | } 37 | 38 | func (t *TimeFormat) UnmarshalJSON(data []byte) error { 39 | if string(data) == "null" { 40 | return nil 41 | } 42 | // unqote 43 | if len(data) >= 2 && data[0] == '"' && data[len(data)-1] == '"' { 44 | data = data[1 : len(data)-1] 45 | } 46 | return t.Set(string(data)) 47 | } 48 | 49 | func (t TimeFormat) String() string { 50 | switch t { 51 | case FormatUnixEpoch: 52 | return "unix-epoch" 53 | case FormatRFC3339: 54 | return "rfc3339" 55 | } 56 | return "invalid" 57 | } 58 | -------------------------------------------------------------------------------- /lagerflags/timeformat_test.go: -------------------------------------------------------------------------------- 1 | package lagerflags_test 2 | 3 | import ( 4 | "encoding/json" 5 | "flag" 6 | "fmt" 7 | "strconv" 8 | 9 | "code.cloudfoundry.org/lager/v3/lagerflags" 10 | . "github.com/onsi/ginkgo/v2" 11 | . "github.com/onsi/gomega" 12 | ) 13 | 14 | var _ = Describe("TimeFormat", func() { 15 | const InvalidFormat = lagerflags.TimeFormat(123456) 16 | 17 | It("MarshalJSON", func() { 18 | b, err := json.Marshal(lagerflags.FormatUnixEpoch) 19 | Expect(err).NotTo(HaveOccurred()) 20 | Expect(b).To(MatchJSON(`"unix-epoch"`)) 21 | 22 | b, err = json.Marshal(lagerflags.FormatRFC3339) 23 | Expect(err).NotTo(HaveOccurred()) 24 | Expect(b).To(MatchJSON(`"rfc3339"`)) 25 | 26 | _, err = json.Marshal(InvalidFormat) 27 | Expect(err).To(HaveOccurred()) 28 | }) 29 | 30 | It("UnmarshalJSON", func() { 31 | var testCases = []struct { 32 | Format lagerflags.TimeFormat 33 | Data string 34 | Valid bool 35 | }{ 36 | { 37 | Format: lagerflags.FormatUnixEpoch, 38 | Data: `"unix-epoch"`, 39 | Valid: true, 40 | }, 41 | { 42 | Format: lagerflags.FormatRFC3339, 43 | Data: `"rfc3339"`, 44 | Valid: true, 45 | }, 46 | // integer values 47 | { 48 | Format: lagerflags.FormatUnixEpoch, 49 | Data: "0", 50 | Valid: true, 51 | }, 52 | { 53 | Format: lagerflags.FormatRFC3339, 54 | Data: "1", 55 | Valid: true, 56 | }, 57 | // invalid 58 | { 59 | Format: InvalidFormat, 60 | Data: "", 61 | Valid: false, 62 | }, 63 | { 64 | Format: lagerflags.FormatRFC3339, 65 | Data: `"RFC3339"`, 66 | Valid: false, 67 | }, 68 | } 69 | for _, test := range testCases { 70 | var tf lagerflags.TimeFormat 71 | err := json.Unmarshal([]byte(test.Data), &tf) 72 | if !test.Valid { 73 | Expect(err).To(HaveOccurred()) 74 | continue 75 | } 76 | Expect(err).NotTo(HaveOccurred()) 77 | Expect(tf).To(Equal(test.Format)) 78 | } 79 | }) 80 | 81 | Context("TimeFormat FlagSet", func() { 82 | var flagSet *flag.FlagSet 83 | var timeFormat lagerflags.TimeFormat 84 | 85 | BeforeEach(func() { 86 | timeFormat = InvalidFormat 87 | flagSet = flag.NewFlagSet("test", flag.ContinueOnError) 88 | flagSet.Usage = func() {} 89 | flagSet.SetOutput(nopWriter{}) 90 | flagSet.Var( 91 | &timeFormat, 92 | "timeFormat", 93 | `Format for timestamp in component logs. Valid values are "unix-epoch" and "rfc3339".`, 94 | ) 95 | }) 96 | 97 | testValidTimeFormatFlag := func(expected lagerflags.TimeFormat, argument string) { 98 | Expect(flagSet.Parse([]string{"-timeFormat", argument})).To(Succeed()) 99 | Expect(timeFormat).To(Equal(expected), 100 | fmt.Sprintf("Valid TimeFormat flag (expect: %q): %q", expected, argument)) 101 | } 102 | 103 | testInvalidTimeFormatFlag := func(argument string) { 104 | Expect(flagSet.Parse([]string{"-timeFormat", argument})).ToNot(Succeed(), 105 | fmt.Sprintf("Invalid TimeFormat flag: %q", argument)) 106 | } 107 | 108 | It("parses valid flags", func() { 109 | testValidTimeFormatFlag(lagerflags.FormatUnixEpoch, "unix-epoch") 110 | testValidTimeFormatFlag(lagerflags.FormatUnixEpoch, "0") 111 | testValidTimeFormatFlag(lagerflags.FormatRFC3339, "rfc3339") 112 | testValidTimeFormatFlag(lagerflags.FormatRFC3339, "1") 113 | }) 114 | 115 | It("errors when the flag is invalid", func() { 116 | testInvalidTimeFormatFlag("UNIX-EPOCH") 117 | testInvalidTimeFormatFlag("RFC3339") 118 | testInvalidTimeFormatFlag("") 119 | testInvalidTimeFormatFlag(strconv.Itoa(int(InvalidFormat))) 120 | }) 121 | }) 122 | }) 123 | 124 | type nopWriter struct{} 125 | 126 | func (nopWriter) Write(p []byte) (int, error) { return len(p), nil } 127 | -------------------------------------------------------------------------------- /lagertest/test_sink.go: -------------------------------------------------------------------------------- 1 | package lagertest 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "encoding/json" 7 | "io" 8 | "sync" 9 | 10 | "github.com/onsi/ginkgo/v2" 11 | "github.com/onsi/gomega/gbytes" 12 | 13 | "code.cloudfoundry.org/lager/v3" 14 | "code.cloudfoundry.org/lager/v3/lagerctx" 15 | ) 16 | 17 | type TestLogger struct { 18 | lager.Logger 19 | *TestSink 20 | } 21 | 22 | type TestSink struct { 23 | writeLock *sync.Mutex 24 | lager.Sink 25 | buffer *gbytes.Buffer 26 | Errors []error 27 | } 28 | 29 | func NewTestLogger(component string) *TestLogger { 30 | logger := lager.NewLogger(component) 31 | 32 | testSink := NewTestSink() 33 | logger.RegisterSink(testSink) 34 | logger.RegisterSink(lager.NewWriterSink(ginkgo.GinkgoWriter, lager.DEBUG)) 35 | 36 | return &TestLogger{logger, testSink} 37 | } 38 | 39 | func NewContext(parent context.Context, name string) context.Context { 40 | return lagerctx.NewContext(parent, NewTestLogger(name)) 41 | } 42 | 43 | func NewTestSink() *TestSink { 44 | buffer := gbytes.NewBuffer() 45 | 46 | return &TestSink{ 47 | writeLock: new(sync.Mutex), 48 | Sink: lager.NewWriterSink(buffer, lager.DEBUG), 49 | buffer: buffer, 50 | } 51 | } 52 | 53 | func (s *TestSink) Buffer() *gbytes.Buffer { 54 | return s.buffer 55 | } 56 | 57 | func (s *TestSink) Logs() []lager.LogFormat { 58 | logs := []lager.LogFormat{} 59 | 60 | decoder := json.NewDecoder(bytes.NewBuffer(s.buffer.Contents())) 61 | for { 62 | var log lager.LogFormat 63 | if err := decoder.Decode(&log); err == io.EOF { 64 | return logs 65 | } else if err != nil { 66 | panic(err) 67 | } 68 | logs = append(logs, log) 69 | } 70 | } 71 | 72 | func (s *TestSink) LogMessages() []string { 73 | logs := s.Logs() 74 | messages := make([]string, 0, len(logs)) 75 | for _, log := range logs { 76 | messages = append(messages, log.Message) 77 | } 78 | return messages 79 | } 80 | 81 | func (s *TestSink) Log(log lager.LogFormat) { 82 | s.writeLock.Lock() 83 | defer s.writeLock.Unlock() 84 | 85 | if log.Error != nil { 86 | s.Errors = append(s.Errors, log.Error) 87 | } 88 | s.Sink.Log(log) 89 | } 90 | -------------------------------------------------------------------------------- /logger.go: -------------------------------------------------------------------------------- 1 | package lager 2 | 3 | import ( 4 | "fmt" 5 | "net/http" 6 | "runtime" 7 | "strings" 8 | "sync/atomic" 9 | "time" 10 | 11 | "github.com/openzipkin/zipkin-go/idgenerator" 12 | "github.com/openzipkin/zipkin-go/model" 13 | ) 14 | 15 | const ( 16 | StackTraceBufferSize = 1024 * 100 17 | RequestIdHeader = "X-Vcap-Request-Id" 18 | ) 19 | 20 | type Logger interface { 21 | RegisterSink(Sink) 22 | Session(task string, data ...Data) Logger 23 | SessionName() string 24 | Debug(action string, data ...Data) 25 | Info(action string, data ...Data) 26 | Error(action string, err error, data ...Data) 27 | Fatal(action string, err error, data ...Data) 28 | WithData(Data) Logger 29 | WithTraceInfo(*http.Request) Logger 30 | } 31 | 32 | type logger struct { 33 | component string 34 | task string 35 | sinks []Sink 36 | sessionID string 37 | nextSession uint32 38 | data Data 39 | idGenerator idgenerator.IDGenerator 40 | } 41 | 42 | func NewLogger(component string) Logger { 43 | return &logger{ 44 | component: component, 45 | task: component, 46 | sinks: []Sink{}, 47 | data: Data{}, 48 | idGenerator: idgenerator.NewRandom128(), 49 | } 50 | } 51 | 52 | func (l *logger) RegisterSink(sink Sink) { 53 | l.sinks = append(l.sinks, sink) 54 | } 55 | 56 | func (l *logger) SessionName() string { 57 | return l.task 58 | } 59 | 60 | func (l *logger) Session(task string, data ...Data) Logger { 61 | sid := atomic.AddUint32(&l.nextSession, 1) 62 | 63 | var sessionIDstr string 64 | 65 | if l.sessionID != "" { 66 | sessionIDstr = fmt.Sprintf("%s.%d", l.sessionID, sid) 67 | } else { 68 | sessionIDstr = fmt.Sprintf("%d", sid) 69 | } 70 | 71 | return &logger{ 72 | component: l.component, 73 | task: fmt.Sprintf("%s.%s", l.task, task), 74 | sinks: l.sinks, 75 | sessionID: sessionIDstr, 76 | data: l.baseData(data...), 77 | idGenerator: l.idGenerator, 78 | } 79 | } 80 | 81 | func (l *logger) WithData(data Data) Logger { 82 | return &logger{ 83 | component: l.component, 84 | task: l.task, 85 | sinks: l.sinks, 86 | sessionID: l.sessionID, 87 | data: l.baseData(data), 88 | idGenerator: l.idGenerator, 89 | } 90 | } 91 | 92 | func (l *logger) WithTraceInfo(req *http.Request) Logger { 93 | traceIDHeader := req.Header.Get(RequestIdHeader) 94 | if traceIDHeader == "" { 95 | return l.WithData(nil) 96 | } 97 | traceHex := strings.Replace(traceIDHeader, "-", "", -1) 98 | traceID, err := model.TraceIDFromHex(traceHex) 99 | if err != nil { 100 | return l.WithData(nil) 101 | } 102 | 103 | spanID := l.idGenerator.SpanID(model.TraceID{}) 104 | return l.WithData(Data{"trace-id": traceID.String(), "span-id": spanID.String()}) 105 | } 106 | 107 | func (l *logger) Debug(action string, data ...Data) { 108 | t := time.Now().UTC() 109 | log := LogFormat{ 110 | time: t, 111 | Timestamp: formatTimestamp(t), 112 | Source: l.component, 113 | Message: fmt.Sprintf("%s.%s", l.task, action), 114 | LogLevel: DEBUG, 115 | Data: l.baseData(data...), 116 | } 117 | 118 | for _, sink := range l.sinks { 119 | sink.Log(log) 120 | } 121 | } 122 | 123 | func (l *logger) Info(action string, data ...Data) { 124 | t := time.Now().UTC() 125 | log := LogFormat{ 126 | time: t, 127 | Timestamp: formatTimestamp(t), 128 | Source: l.component, 129 | Message: fmt.Sprintf("%s.%s", l.task, action), 130 | LogLevel: INFO, 131 | Data: l.baseData(data...), 132 | } 133 | 134 | for _, sink := range l.sinks { 135 | sink.Log(log) 136 | } 137 | } 138 | 139 | func (l *logger) Error(action string, err error, data ...Data) { 140 | logData := l.baseData(data...) 141 | 142 | if err != nil { 143 | logData["error"] = err.Error() 144 | } 145 | 146 | t := time.Now().UTC() 147 | log := LogFormat{ 148 | time: t, 149 | Timestamp: formatTimestamp(t), 150 | Source: l.component, 151 | Message: fmt.Sprintf("%s.%s", l.task, action), 152 | LogLevel: ERROR, 153 | Data: logData, 154 | Error: err, 155 | } 156 | 157 | for _, sink := range l.sinks { 158 | sink.Log(log) 159 | } 160 | } 161 | 162 | func (l *logger) Fatal(action string, err error, data ...Data) { 163 | logData := l.baseData(data...) 164 | 165 | stackTrace := make([]byte, StackTraceBufferSize) 166 | stackSize := runtime.Stack(stackTrace, false) 167 | stackTrace = stackTrace[:stackSize] 168 | 169 | if err != nil { 170 | logData["error"] = err.Error() 171 | } 172 | 173 | logData["trace"] = string(stackTrace) 174 | 175 | t := time.Now().UTC() 176 | log := LogFormat{ 177 | time: t, 178 | Timestamp: formatTimestamp(t), 179 | Source: l.component, 180 | Message: fmt.Sprintf("%s.%s", l.task, action), 181 | LogLevel: FATAL, 182 | Data: logData, 183 | Error: err, 184 | } 185 | 186 | for _, sink := range l.sinks { 187 | sink.Log(log) 188 | } 189 | 190 | panic(err) 191 | } 192 | 193 | func (l *logger) baseData(givenData ...Data) Data { 194 | data := Data{} 195 | 196 | for k, v := range l.data { 197 | data[k] = v 198 | } 199 | 200 | if len(givenData) > 0 { 201 | for _, dataArg := range givenData { 202 | for key, val := range dataArg { 203 | data[key] = val 204 | } 205 | } 206 | } 207 | 208 | if l.sessionID != "" { 209 | data["session"] = l.sessionID 210 | } 211 | 212 | return data 213 | } 214 | 215 | func formatTimestamp(t time.Time) string { 216 | return fmt.Sprintf("%.9f", float64(t.UnixNano())/1e9) 217 | } 218 | -------------------------------------------------------------------------------- /logger_test.go: -------------------------------------------------------------------------------- 1 | package lager_test 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "net/http" 7 | "runtime" 8 | "strconv" 9 | "time" 10 | 11 | "code.cloudfoundry.org/lager/v3" 12 | "code.cloudfoundry.org/lager/v3/lagertest" 13 | 14 | . "github.com/onsi/ginkgo/v2" 15 | . "github.com/onsi/gomega" 16 | ) 17 | 18 | var _ = Describe("Logger", func() { 19 | var logger lager.Logger 20 | var testSink *lagertest.TestSink 21 | 22 | var component = "my-component" 23 | var action = "my-action" 24 | var logData = lager.Data{ 25 | "foo": "bar", 26 | "a-number": 7, 27 | } 28 | var anotherLogData = lager.Data{ 29 | "baz": "quux", 30 | "b-number": 43, 31 | } 32 | 33 | BeforeEach(func() { 34 | logger = lager.NewLogger(component) 35 | testSink = lagertest.NewTestSink() 36 | logger.RegisterSink(testSink) 37 | }) 38 | 39 | var TestCommonLogFeatures = func(level lager.LogLevel) { 40 | var log lager.LogFormat 41 | 42 | BeforeEach(func() { 43 | log = testSink.Logs()[0] 44 | }) 45 | 46 | It("writes a log to the sink", func() { 47 | Expect(testSink.Logs()).To(HaveLen(1)) 48 | }) 49 | 50 | It("records the source component", func() { 51 | Expect(log.Source).To(Equal(component)) 52 | }) 53 | 54 | It("outputs a properly-formatted message", func() { 55 | Expect(log.Message).To(Equal(fmt.Sprintf("%s.%s", component, action))) 56 | }) 57 | 58 | It("has a timestamp", func() { 59 | expectedTime := float64(time.Now().UnixNano()) / 1e9 60 | parsedTimestamp, err := strconv.ParseFloat(log.Timestamp, 64) 61 | Expect(err).NotTo(HaveOccurred()) 62 | Expect(parsedTimestamp).To(BeNumerically("~", expectedTime, 1.0)) 63 | }) 64 | 65 | It("sets the proper output level", func() { 66 | Expect(log.LogLevel).To(Equal(level)) 67 | }) 68 | } 69 | 70 | var TestLogData = func() { 71 | var log lager.LogFormat 72 | 73 | BeforeEach(func() { 74 | log = testSink.Logs()[0] 75 | }) 76 | 77 | It("data contains custom user data", func() { 78 | Expect(log.Data["foo"]).To(Equal("bar")) 79 | Expect(log.Data["a-number"]).To(BeNumerically("==", 7)) 80 | Expect(log.Data["baz"]).To(Equal("quux")) 81 | Expect(log.Data["b-number"]).To(BeNumerically("==", 43)) 82 | }) 83 | } 84 | 85 | Describe("Session", func() { 86 | var session lager.Logger 87 | 88 | BeforeEach(func() { 89 | session = logger.Session("sub-action") 90 | }) 91 | 92 | Describe("the returned logger", func() { 93 | JustBeforeEach(func() { 94 | session.Debug("some-debug-action", lager.Data{"level": "debug"}) 95 | session.Info("some-info-action", lager.Data{"level": "info"}) 96 | session.Error("some-error-action", errors.New("oh no!"), lager.Data{"level": "error"}) 97 | 98 | defer func() { 99 | recover() //nolint:errcheck 100 | }() 101 | 102 | session.Fatal("some-fatal-action", errors.New("oh no!"), lager.Data{"level": "fatal"}) 103 | }) 104 | 105 | It("logs with a shared session id in the data", func() { 106 | Expect(testSink.Logs()[0].Data["session"]).To(Equal("1")) 107 | Expect(testSink.Logs()[1].Data["session"]).To(Equal("1")) 108 | Expect(testSink.Logs()[2].Data["session"]).To(Equal("1")) 109 | Expect(testSink.Logs()[3].Data["session"]).To(Equal("1")) 110 | }) 111 | 112 | It("logs with the task added to the message", func() { 113 | Expect(testSink.Logs()[0].Message).To(Equal("my-component.sub-action.some-debug-action")) 114 | Expect(testSink.Logs()[1].Message).To(Equal("my-component.sub-action.some-info-action")) 115 | Expect(testSink.Logs()[2].Message).To(Equal("my-component.sub-action.some-error-action")) 116 | Expect(testSink.Logs()[3].Message).To(Equal("my-component.sub-action.some-fatal-action")) 117 | }) 118 | 119 | It("logs with the original data", func() { 120 | Expect(testSink.Logs()[0].Data["level"]).To(Equal("debug")) 121 | Expect(testSink.Logs()[1].Data["level"]).To(Equal("info")) 122 | Expect(testSink.Logs()[2].Data["level"]).To(Equal("error")) 123 | Expect(testSink.Logs()[3].Data["level"]).To(Equal("fatal")) 124 | }) 125 | 126 | Context("with data", func() { 127 | BeforeEach(func() { 128 | session = logger.Session("sub-action", lager.Data{"foo": "bar"}) 129 | }) 130 | 131 | It("logs with the data added to the message", func() { 132 | Expect(testSink.Logs()[0].Data["foo"]).To(Equal("bar")) 133 | Expect(testSink.Logs()[1].Data["foo"]).To(Equal("bar")) 134 | Expect(testSink.Logs()[2].Data["foo"]).To(Equal("bar")) 135 | Expect(testSink.Logs()[3].Data["foo"]).To(Equal("bar")) 136 | }) 137 | 138 | It("keeps the original data", func() { 139 | Expect(testSink.Logs()[0].Data["level"]).To(Equal("debug")) 140 | Expect(testSink.Logs()[1].Data["level"]).To(Equal("info")) 141 | Expect(testSink.Logs()[2].Data["level"]).To(Equal("error")) 142 | Expect(testSink.Logs()[3].Data["level"]).To(Equal("fatal")) 143 | }) 144 | }) 145 | 146 | Context("with another session", func() { 147 | BeforeEach(func() { 148 | session = logger.Session("next-sub-action") 149 | }) 150 | 151 | It("logs with a shared session id in the data", func() { 152 | Expect(testSink.Logs()[0].Data["session"]).To(Equal("2")) 153 | Expect(testSink.Logs()[1].Data["session"]).To(Equal("2")) 154 | Expect(testSink.Logs()[2].Data["session"]).To(Equal("2")) 155 | Expect(testSink.Logs()[3].Data["session"]).To(Equal("2")) 156 | }) 157 | 158 | It("logs with the task added to the message", func() { 159 | Expect(testSink.Logs()[0].Message).To(Equal("my-component.next-sub-action.some-debug-action")) 160 | Expect(testSink.Logs()[1].Message).To(Equal("my-component.next-sub-action.some-info-action")) 161 | Expect(testSink.Logs()[2].Message).To(Equal("my-component.next-sub-action.some-error-action")) 162 | Expect(testSink.Logs()[3].Message).To(Equal("my-component.next-sub-action.some-fatal-action")) 163 | }) 164 | }) 165 | 166 | Describe("WithData", func() { 167 | BeforeEach(func() { 168 | session = logger.WithData(lager.Data{"foo": "bar"}) 169 | }) 170 | 171 | It("returns a new logger with the given data", func() { 172 | Expect(testSink.Logs()[0].Data["foo"]).To(Equal("bar")) 173 | Expect(testSink.Logs()[1].Data["foo"]).To(Equal("bar")) 174 | Expect(testSink.Logs()[2].Data["foo"]).To(Equal("bar")) 175 | Expect(testSink.Logs()[3].Data["foo"]).To(Equal("bar")) 176 | }) 177 | 178 | It("does not append to the logger's task", func() { 179 | Expect(testSink.Logs()[0].Message).To(Equal("my-component.some-debug-action")) 180 | }) 181 | }) 182 | 183 | Context("with a nested session", func() { 184 | BeforeEach(func() { 185 | session = session.Session("sub-sub-action") 186 | }) 187 | 188 | It("logs with a shared session id in the data", func() { 189 | Expect(testSink.Logs()[0].Data["session"]).To(Equal("1.1")) 190 | Expect(testSink.Logs()[1].Data["session"]).To(Equal("1.1")) 191 | Expect(testSink.Logs()[2].Data["session"]).To(Equal("1.1")) 192 | Expect(testSink.Logs()[3].Data["session"]).To(Equal("1.1")) 193 | }) 194 | 195 | It("logs with the task added to the message", func() { 196 | Expect(testSink.Logs()[0].Message).To(Equal("my-component.sub-action.sub-sub-action.some-debug-action")) 197 | Expect(testSink.Logs()[1].Message).To(Equal("my-component.sub-action.sub-sub-action.some-info-action")) 198 | Expect(testSink.Logs()[2].Message).To(Equal("my-component.sub-action.sub-sub-action.some-error-action")) 199 | Expect(testSink.Logs()[3].Message).To(Equal("my-component.sub-action.sub-sub-action.some-fatal-action")) 200 | }) 201 | }) 202 | }) 203 | }) 204 | 205 | Describe("Debug", func() { 206 | Context("with log data", func() { 207 | BeforeEach(func() { 208 | logger.Debug(action, logData, anotherLogData) 209 | }) 210 | 211 | TestCommonLogFeatures(lager.DEBUG) 212 | TestLogData() 213 | }) 214 | 215 | Context("with no log data", func() { 216 | BeforeEach(func() { 217 | logger.Debug(action) 218 | }) 219 | 220 | TestCommonLogFeatures(lager.DEBUG) 221 | }) 222 | }) 223 | 224 | Describe("Info", func() { 225 | Context("with log data", func() { 226 | BeforeEach(func() { 227 | logger.Info(action, logData, anotherLogData) 228 | }) 229 | 230 | TestCommonLogFeatures(lager.INFO) 231 | TestLogData() 232 | }) 233 | 234 | Context("with no log data", func() { 235 | BeforeEach(func() { 236 | logger.Info(action) 237 | }) 238 | 239 | TestCommonLogFeatures(lager.INFO) 240 | }) 241 | }) 242 | 243 | Describe("Error", func() { 244 | var err = errors.New("oh noes!") 245 | Context("with log data", func() { 246 | BeforeEach(func() { 247 | logger.Error(action, err, logData, anotherLogData) 248 | }) 249 | 250 | TestCommonLogFeatures(lager.ERROR) 251 | TestLogData() 252 | 253 | It("data contains error message", func() { 254 | Expect(testSink.Logs()[0].Data["error"]).To(Equal(err.Error())) 255 | }) 256 | 257 | It("retains the original error values", func() { 258 | Expect(testSink.Errors).To(Equal([]error{err})) 259 | }) 260 | }) 261 | 262 | Context("with no log data", func() { 263 | BeforeEach(func() { 264 | logger.Error(action, err) 265 | }) 266 | 267 | TestCommonLogFeatures(lager.ERROR) 268 | 269 | It("data contains error message", func() { 270 | Expect(testSink.Logs()[0].Data["error"]).To(Equal(err.Error())) 271 | }) 272 | 273 | It("retains the original error values", func() { 274 | Expect(testSink.Errors).To(Equal([]error{err})) 275 | }) 276 | }) 277 | 278 | Context("with no error", func() { 279 | BeforeEach(func() { 280 | logger.Error(action, nil) 281 | }) 282 | 283 | TestCommonLogFeatures(lager.ERROR) 284 | 285 | It("does not contain the error message", func() { 286 | Expect(testSink.Logs()[0].Data).NotTo(HaveKey("error")) 287 | }) 288 | }) 289 | }) 290 | 291 | Describe("Fatal", func() { 292 | var err = errors.New("oh noes!") 293 | var fatalErr interface{} 294 | 295 | Context("with log data", func() { 296 | BeforeEach(func() { 297 | defer func() { 298 | fatalErr = recover() 299 | }() 300 | 301 | logger.Fatal(action, err, logData, anotherLogData) 302 | }) 303 | 304 | TestCommonLogFeatures(lager.FATAL) 305 | TestLogData() 306 | 307 | It("data contains error message", func() { 308 | Expect(testSink.Logs()[0].Data["error"]).To(Equal(err.Error())) 309 | }) 310 | 311 | It("data contains stack trace", func() { 312 | Expect(testSink.Logs()[0].Data["trace"]).NotTo(BeEmpty()) 313 | }) 314 | 315 | It("panics with the provided error", func() { 316 | Expect(fatalErr).To(Equal(err)) 317 | }) 318 | 319 | It("retains the original error values", func() { 320 | Expect(testSink.Errors).To(Equal([]error{err})) 321 | }) 322 | }) 323 | 324 | Context("with no log data", func() { 325 | BeforeEach(func() { 326 | defer func() { 327 | fatalErr = recover() 328 | }() 329 | 330 | logger.Fatal(action, err) 331 | }) 332 | 333 | TestCommonLogFeatures(lager.FATAL) 334 | 335 | It("data contains error message", func() { 336 | Expect(testSink.Logs()[0].Data["error"]).To(Equal(err.Error())) 337 | }) 338 | 339 | It("data contains stack trace", func() { 340 | Expect(testSink.Logs()[0].Data["trace"]).NotTo(BeEmpty()) 341 | }) 342 | 343 | It("panics with the provided error", func() { 344 | Expect(fatalErr).To(Equal(err)) 345 | }) 346 | 347 | It("retains the original error values", func() { 348 | Expect(testSink.Errors).To(Equal([]error{err})) 349 | }) 350 | }) 351 | 352 | Context("with no error", func() { 353 | BeforeEach(func() { 354 | defer func() { 355 | fatalErr = recover() 356 | }() 357 | 358 | logger.Fatal(action, nil) 359 | }) 360 | 361 | TestCommonLogFeatures(lager.FATAL) 362 | 363 | It("does not contain the error message", func() { 364 | Expect(testSink.Logs()[0].Data).NotTo(HaveKey("error")) 365 | }) 366 | 367 | It("data contains stack trace", func() { 368 | Expect(testSink.Logs()[0].Data["trace"]).NotTo(BeEmpty()) 369 | }) 370 | 371 | It("panics with the provided error (i.e. nil)", func() { 372 | Expect(fatalErr).To(MatchError(&runtime.PanicNilError{})) 373 | }) 374 | }) 375 | 376 | }) 377 | 378 | Describe("WithTraceInfo", func() { 379 | var req *http.Request 380 | 381 | BeforeEach(func() { 382 | var err error 383 | req, err = http.NewRequest("GET", "/foo", nil) 384 | Expect(err).ToNot(HaveOccurred()) 385 | }) 386 | 387 | Context("when request does not contain trace id", func() { 388 | It("does not set trace and span id", func() { 389 | logger = logger.WithTraceInfo(req) 390 | logger.Info("test-log") 391 | 392 | log := testSink.Logs()[0] 393 | 394 | Expect(log.Data).To(BeEmpty()) 395 | Expect(log.Data).To(BeEmpty()) 396 | }) 397 | }) 398 | 399 | Context("when request contains trace id", func() { 400 | It("sets trace and span id", func() { 401 | req.Header.Set("X-Vcap-Request-Id", "7f461654-74d1-1ee5-8367-77d85df2cdab") 402 | 403 | logger = logger.WithTraceInfo(req) 404 | logger.Info("test-log") 405 | 406 | log := testSink.Logs()[0] 407 | 408 | Expect(log.Data["trace-id"]).To(Equal("7f46165474d11ee5836777d85df2cdab")) 409 | Expect(log.Data["span-id"]).NotTo(BeEmpty()) 410 | }) 411 | 412 | It("generates new span id", func() { 413 | req.Header.Set("X-Vcap-Request-Id", "7f461654-74d1-1ee5-8367-77d85df2cdab") 414 | 415 | logger = logger.WithTraceInfo(req) 416 | logger.Info("test-log") 417 | 418 | log1 := testSink.Logs()[0] 419 | 420 | Expect(log1.Data["trace-id"]).To(Equal("7f46165474d11ee5836777d85df2cdab")) 421 | Expect(log1.Data["span-id"]).NotTo(BeEmpty()) 422 | 423 | logger = logger.WithTraceInfo(req) 424 | logger.Info("test-log") 425 | 426 | log2 := testSink.Logs()[1] 427 | 428 | Expect(log2.Data["trace-id"]).To(Equal("7f46165474d11ee5836777d85df2cdab")) 429 | Expect(log2.Data["span-id"]).NotTo(BeEmpty()) 430 | Expect(log2.Data["span-id"]).NotTo(Equal(log1.Data["span-id"])) 431 | }) 432 | }) 433 | 434 | Context("when request contains invalid trace id", func() { 435 | It("does not set trace and span id", func() { 436 | req.Header.Set("X-Vcap-Request-Id", "invalid-request-id") 437 | 438 | logger = logger.WithTraceInfo(req) 439 | logger.Info("test-log") 440 | 441 | log := testSink.Logs()[0] 442 | 443 | Expect(log.Data).To(BeEmpty()) 444 | Expect(log.Data).To(BeEmpty()) 445 | }) 446 | }) 447 | }) 448 | }) 449 | -------------------------------------------------------------------------------- /models.go: -------------------------------------------------------------------------------- 1 | package lager 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "strconv" 7 | "strings" 8 | "time" 9 | ) 10 | 11 | type LogLevel int 12 | 13 | const ( 14 | DEBUG LogLevel = iota 15 | INFO 16 | ERROR 17 | FATAL 18 | ) 19 | 20 | var logLevelStr = [...]string{ 21 | DEBUG: "debug", 22 | INFO: "info", 23 | ERROR: "error", 24 | FATAL: "fatal", 25 | } 26 | 27 | func (l LogLevel) String() string { 28 | if DEBUG <= l && l <= FATAL { 29 | return logLevelStr[l] 30 | } 31 | return "invalid" 32 | } 33 | 34 | func LogLevelFromString(s string) (LogLevel, error) { 35 | for k, v := range logLevelStr { 36 | if v == s { 37 | return LogLevel(k), nil 38 | } 39 | } 40 | return -1, fmt.Errorf("invalid log level: %s", s) 41 | } 42 | 43 | type Data map[string]interface{} 44 | 45 | type rfc3339Time time.Time 46 | 47 | const rfc3339Nano = "2006-01-02T15:04:05.000000000Z07:00" 48 | 49 | func (t rfc3339Time) MarshalJSON() ([]byte, error) { 50 | // Use AppendFormat to avoid slower string operations, instead we only 51 | // operate on a byte slice 52 | // Avoid creating a new copy of t with a cast, instead use type conversion 53 | stamp := append((time.Time)(t).UTC().AppendFormat([]byte{'"'}, rfc3339Nano), '"') 54 | return stamp, nil 55 | } 56 | 57 | func (t *rfc3339Time) UnmarshalJSON(data []byte) error { 58 | return (*time.Time)(t).UnmarshalJSON(data) 59 | } 60 | 61 | type LogFormat struct { 62 | Timestamp string `json:"timestamp"` 63 | Source string `json:"source"` 64 | Message string `json:"message"` 65 | LogLevel LogLevel `json:"log_level"` 66 | Data Data `json:"data"` 67 | Error error `json:"-"` 68 | time time.Time 69 | } 70 | 71 | func (log LogFormat) ToJSON() []byte { 72 | content, err := json.Marshal(log) 73 | if err != nil { 74 | log.Data = dataForJSONMarhallingError(err, log.Data) 75 | content, err = json.Marshal(log) 76 | if err != nil { 77 | panic(err) 78 | } 79 | } 80 | return content 81 | } 82 | 83 | type prettyLogFormat struct { 84 | Timestamp rfc3339Time `json:"timestamp"` 85 | Level string `json:"level"` 86 | Source string `json:"source"` 87 | Message string `json:"message"` 88 | Data Data `json:"data"` 89 | Error error `json:"-"` 90 | } 91 | 92 | func (log LogFormat) toPrettyJSON() []byte { 93 | t := log.time 94 | if t.IsZero() { 95 | t = parseTimestamp(log.Timestamp) 96 | } 97 | 98 | prettyLog := prettyLogFormat{ 99 | Timestamp: rfc3339Time(t), 100 | Level: log.LogLevel.String(), 101 | Source: log.Source, 102 | Message: log.Message, 103 | Data: log.Data, 104 | Error: log.Error, 105 | } 106 | 107 | content, err := json.Marshal(prettyLog) 108 | 109 | if err != nil { 110 | prettyLog.Data = dataForJSONMarhallingError(err, prettyLog.Data) 111 | content, err = json.Marshal(prettyLog) 112 | if err != nil { 113 | panic(err) 114 | } 115 | } 116 | 117 | return content 118 | } 119 | 120 | func dataForJSONMarhallingError(err error, data Data) Data { 121 | _, ok1 := err.(*json.UnsupportedTypeError) 122 | _, ok2 := err.(*json.MarshalerError) 123 | errKey := "unknown_error" 124 | if ok1 || ok2 { 125 | errKey = "lager serialisation error" 126 | } 127 | 128 | return map[string]interface{}{ 129 | errKey: err.Error(), 130 | "data_dump": fmt.Sprintf("%#v", data), 131 | } 132 | } 133 | 134 | func parseTimestamp(s string) time.Time { 135 | if s == "" { 136 | return time.Now() 137 | } 138 | n := strings.IndexByte(s, '.') 139 | if n <= 0 || n == len(s)-1 { 140 | return time.Now() 141 | } 142 | sec, err := strconv.ParseInt(s[:n], 10, 64) 143 | if err != nil || sec < 0 { 144 | return time.Now() 145 | } 146 | nsec, err := strconv.ParseInt(s[n+1:], 10, 64) 147 | if err != nil || nsec < 0 { 148 | return time.Now() 149 | } 150 | return time.Unix(sec, nsec) 151 | } 152 | -------------------------------------------------------------------------------- /reconfigurable_sink.go: -------------------------------------------------------------------------------- 1 | package lager 2 | 3 | import ( 4 | "sync/atomic" 5 | ) 6 | 7 | type ReconfigurableSink struct { 8 | sink Sink 9 | 10 | minLogLevel int32 11 | } 12 | 13 | func NewReconfigurableSink(sink Sink, initialMinLogLevel LogLevel) *ReconfigurableSink { 14 | return &ReconfigurableSink{ 15 | sink: sink, 16 | 17 | minLogLevel: int32(initialMinLogLevel), 18 | } 19 | } 20 | 21 | func (sink *ReconfigurableSink) Log(log LogFormat) { 22 | minLogLevel := LogLevel(atomic.LoadInt32(&sink.minLogLevel)) 23 | 24 | if log.LogLevel < minLogLevel { 25 | return 26 | } 27 | 28 | sink.sink.Log(log) 29 | } 30 | 31 | func (sink *ReconfigurableSink) SetMinLevel(level LogLevel) { 32 | atomic.StoreInt32(&sink.minLogLevel, int32(level)) 33 | } 34 | 35 | func (sink *ReconfigurableSink) GetMinLevel() LogLevel { 36 | return LogLevel(atomic.LoadInt32(&sink.minLogLevel)) 37 | } 38 | -------------------------------------------------------------------------------- /reconfigurable_sink_test.go: -------------------------------------------------------------------------------- 1 | package lager_test 2 | 3 | import ( 4 | "code.cloudfoundry.org/lager/v3" 5 | "code.cloudfoundry.org/lager/v3/lagertest" 6 | . "github.com/onsi/ginkgo/v2" 7 | . "github.com/onsi/gomega" 8 | ) 9 | 10 | var _ = Describe("ReconfigurableSink", func() { 11 | var ( 12 | testSink *lagertest.TestSink 13 | 14 | sink *lager.ReconfigurableSink 15 | ) 16 | 17 | BeforeEach(func() { 18 | testSink = lagertest.NewTestSink() 19 | 20 | sink = lager.NewReconfigurableSink(testSink, lager.INFO) 21 | }) 22 | 23 | It("returns the current level", func() { 24 | Expect(sink.GetMinLevel()).To(Equal(lager.INFO)) 25 | }) 26 | 27 | Context("when logging above the minimum log level", func() { 28 | var log lager.LogFormat 29 | 30 | BeforeEach(func() { 31 | log = lager.LogFormat{LogLevel: lager.INFO, Message: "hello world"} 32 | sink.Log(log) 33 | }) 34 | 35 | It("writes to the given sink", func() { 36 | Expect(testSink.Buffer().Contents()).To(MatchJSON(log.ToJSON())) 37 | }) 38 | }) 39 | 40 | Context("when logging below the minimum log level", func() { 41 | BeforeEach(func() { 42 | sink.Log(lager.LogFormat{LogLevel: lager.DEBUG, Message: "hello world"}) 43 | }) 44 | 45 | It("does not write to the given writer", func() { 46 | Expect(testSink.Buffer().Contents()).To(BeEmpty()) 47 | }) 48 | }) 49 | 50 | Context("when reconfigured to a new log level", func() { 51 | BeforeEach(func() { 52 | sink.SetMinLevel(lager.DEBUG) 53 | }) 54 | 55 | It("writes logs above the new log level", func() { 56 | log := lager.LogFormat{LogLevel: lager.DEBUG, Message: "hello world"} 57 | sink.Log(log) 58 | Expect(testSink.Buffer().Contents()).To(MatchJSON(log.ToJSON())) 59 | }) 60 | 61 | It("returns the newly updated level", func() { 62 | Expect(sink.GetMinLevel()).To(Equal(lager.DEBUG)) 63 | }) 64 | }) 65 | }) 66 | -------------------------------------------------------------------------------- /redacting_sink.go: -------------------------------------------------------------------------------- 1 | package lager 2 | 3 | import ( 4 | "encoding/json" 5 | ) 6 | 7 | type redactingSink struct { 8 | sink Sink 9 | jsonRedacter *JSONRedacter 10 | } 11 | 12 | // NewRedactingSink creates a sink that redacts sensitive information from the 13 | // data field. The old behavior of NewRedactingWriterSink (which was removed 14 | // in v2) can be obtained using the following code: 15 | // 16 | // redactingSink, err := NewRedactingSink( 17 | // NewWriterSink(writer, minLogLevel), 18 | // keyPatterns, 19 | // valuePatterns, 20 | // ) 21 | // 22 | // if err != nil { 23 | // return nil, err 24 | // } 25 | // 26 | // return NewReconfigurableSink( 27 | // redactingSink, 28 | // minLogLevel, 29 | // ), nil 30 | func NewRedactingSink(sink Sink, keyPatterns []string, valuePatterns []string) (Sink, error) { 31 | jsonRedacter, err := NewJSONRedacter(keyPatterns, valuePatterns) 32 | if err != nil { 33 | return nil, err 34 | } 35 | 36 | return &redactingSink{ 37 | sink: sink, 38 | jsonRedacter: jsonRedacter, 39 | }, nil 40 | } 41 | 42 | func (sink *redactingSink) Log(log LogFormat) { 43 | rawJSON, err := json.Marshal(log.Data) 44 | if err != nil { 45 | log.Data = dataForJSONMarhallingError(err, log.Data) 46 | 47 | rawJSON, err = json.Marshal(log.Data) 48 | if err != nil { 49 | panic(err) 50 | } 51 | } 52 | 53 | redactedJSON := sink.jsonRedacter.Redact(rawJSON) 54 | 55 | err = json.Unmarshal(redactedJSON, &log.Data) 56 | if err != nil { 57 | panic(err) 58 | } 59 | 60 | sink.sink.Log(log) 61 | } 62 | -------------------------------------------------------------------------------- /redacting_sink_test.go: -------------------------------------------------------------------------------- 1 | package lager_test 2 | 3 | import ( 4 | "encoding/json" 5 | 6 | "code.cloudfoundry.org/lager/v3" 7 | "code.cloudfoundry.org/lager/v3/lagertest" 8 | . "github.com/onsi/ginkgo/v2" 9 | . "github.com/onsi/gomega" 10 | ) 11 | 12 | var _ = Describe("RedactingSink", func() { 13 | var ( 14 | sink lager.Sink 15 | testSink *lagertest.TestSink 16 | ) 17 | 18 | BeforeEach(func() { 19 | testSink = lagertest.NewTestSink() 20 | 21 | var err error 22 | sink, err = lager.NewRedactingSink(testSink, nil, nil) 23 | Expect(err).NotTo(HaveOccurred()) 24 | }) 25 | 26 | Context("when given a valid set of data", func() { 27 | BeforeEach(func() { 28 | sink.Log(lager.LogFormat{ 29 | LogLevel: lager.INFO, 30 | Message: "hello world", 31 | Data: lager.Data{"password": "abcd"}, 32 | }) 33 | }) 34 | 35 | It("writes to the given sink", func() { 36 | Expect(testSink.Buffer().Contents()).To(MatchJSON(`{"timestamp":"","log_level":1,"source":"","message":"hello world","data":{"password":"*REDACTED*"}}`)) 37 | }) 38 | }) 39 | 40 | Context("when an unserializable data object is passed in", func() { 41 | BeforeEach(func() { 42 | sink.Log(lager.LogFormat{ 43 | LogLevel: lager.INFO, 44 | Message: "hello world", Data: map[string]interface{}{ 45 | "some_key": func() {}, 46 | }, 47 | }) 48 | }) 49 | 50 | It("logs the serialization error", func() { 51 | message := map[string]interface{}{} 52 | 53 | err := json.Unmarshal(testSink.Buffer().Contents(), &message) 54 | Expect(err).NotTo(HaveOccurred()) 55 | 56 | Expect(message["message"]).To(Equal("hello world")) 57 | Expect(message["log_level"]).To(Equal(float64(1))) 58 | Expect(message["data"].(map[string]interface{})["lager serialisation error"]).To(Equal("json: unsupported type: func()")) 59 | Expect(message["data"].(map[string]interface{})["data_dump"]).ToNot(BeEmpty()) 60 | }) 61 | }) 62 | }) 63 | -------------------------------------------------------------------------------- /scripts/create-docker-container.bash: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eu 4 | set -o pipefail 5 | 6 | THIS_FILE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 7 | CI="${THIS_FILE_DIR}/../../wg-app-platform-runtime-ci" 8 | . "$CI/shared/helpers/git-helpers.bash" 9 | REPO_NAME=$(git_get_remote_name) 10 | REPO_PATH="${THIS_FILE_DIR}/../" 11 | unset THIS_FILE_DIR 12 | 13 | IMAGE="cloudfoundry/tas-runtime-build" 14 | CONTAINER_NAME="$REPO_NAME-docker-container" 15 | 16 | if [[ -z "${*}" ]]; then 17 | ARGS="-it" 18 | else 19 | ARGS="${*}" 20 | fi 21 | 22 | docker pull "${IMAGE}" 23 | docker rm -f $CONTAINER_NAME 24 | docker run -it \ 25 | --env "REPO_NAME=$REPO_NAME" \ 26 | --env "REPO_PATH=/repo" \ 27 | --rm \ 28 | --name "$CONTAINER_NAME" \ 29 | -v "${REPO_PATH}:/repo" \ 30 | -v "${CI}:/ci" \ 31 | ${ARGS} \ 32 | "${IMAGE}" \ 33 | /bin/bash 34 | 35 | -------------------------------------------------------------------------------- /scripts/docker/test.bash: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eu 4 | set -o pipefail 5 | 6 | . "/ci/shared/helpers/git-helpers.bash" 7 | 8 | function test() { 9 | local package="${1:?Provide a package}" 10 | local sub_package="${2:-}" 11 | 12 | export DIR=${package} 13 | . <(/ci/shared/helpers/extract-default-params-for-task.bash /ci/shared/tasks/run-bin-test/linux.yml) 14 | 15 | export GOFLAGS="-buildvcs=false" 16 | /ci/shared/tasks/run-bin-test/task.bash "${sub_package}" 17 | } 18 | 19 | pushd / > /dev/null 20 | if [[ -n "${1:-}" ]]; then 21 | test "${1}" "${2:-}" 22 | else 23 | test "." 24 | fi 25 | popd > /dev/null 26 | -------------------------------------------------------------------------------- /scripts/test-in-docker.bash: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eu 4 | 5 | THIS_FILE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 6 | CI="${THIS_FILE_DIR}/../../wg-app-platform-runtime-ci" 7 | . "$CI/shared/helpers/git-helpers.bash" 8 | REPO_NAME=$(git_get_remote_name) 9 | 10 | CONTAINER_NAME="$REPO_NAME-docker-container" 11 | 12 | "${THIS_FILE_DIR}/create-docker-container.bash" -d 13 | 14 | docker exec $CONTAINER_NAME '/repo/scripts/docker/test.bash' "$@" 15 | -------------------------------------------------------------------------------- /slog_sink.go: -------------------------------------------------------------------------------- 1 | //go:build go1.21 2 | 3 | package lager 4 | 5 | import ( 6 | "context" 7 | "log/slog" 8 | ) 9 | 10 | // Type slogSink wraps an slog.Logger as a Sink 11 | type slogSink struct { 12 | logger *slog.Logger 13 | } 14 | 15 | // NewSlogSink wraps a slog.Logger as a lager Sink 16 | // This allows code using slog to integrate with code that uses lager 17 | // Note the following log level conversions: 18 | // 19 | // lager.DEBUG -> slog.LevelDebug 20 | // lager.ERROR -> slog.LevelError 21 | // lager.FATAL -> slog.LevelError 22 | // default -> slog.LevelInfo 23 | func NewSlogSink(l *slog.Logger) Sink { 24 | return &slogSink{logger: l} 25 | } 26 | 27 | // Log exists to implement the lager.Sink interface. 28 | func (l *slogSink) Log(f LogFormat) { 29 | // For lager.Error() and lager.Fatal() the error (and stacktrace) are already in f.Data 30 | r := slog.NewRecord(f.time, toSlogLevel(f.LogLevel), f.Message, 0) 31 | r.AddAttrs(toAttr(f.Data)...) 32 | 33 | // By calling the handler directly we can pass through the original timestamp, 34 | // whereas calling a method on the logger would generate a new timestamp 35 | l.logger.Handler().Handle(context.Background(), r) 36 | } 37 | 38 | // toAttr converts a lager.Data into []slog.Attr 39 | func toAttr(d Data) []slog.Attr { 40 | l := len(d) 41 | if l == 0 { 42 | return nil 43 | } 44 | 45 | attr := make([]slog.Attr, 0, l) 46 | for k, v := range d { 47 | attr = append(attr, slog.Any(k, v)) 48 | } 49 | 50 | return attr 51 | } 52 | 53 | // toSlogLevel converts lager log levels to slog levels 54 | func toSlogLevel(l LogLevel) slog.Level { 55 | switch l { 56 | case DEBUG: 57 | return slog.LevelDebug 58 | case ERROR, FATAL: 59 | return slog.LevelError 60 | default: 61 | return slog.LevelInfo 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /slog_sink_test.go: -------------------------------------------------------------------------------- 1 | //go:build go1.21 2 | 3 | package lager_test 4 | 5 | import ( 6 | "bytes" 7 | "code.cloudfoundry.org/lager/v3" 8 | "encoding/json" 9 | "fmt" 10 | . "github.com/onsi/ginkgo/v2" 11 | . "github.com/onsi/gomega" 12 | . "github.com/onsi/gomega/gstruct" 13 | "log/slog" 14 | ) 15 | 16 | var _ = Describe("NewSlogSink", func() { 17 | var ( 18 | buf bytes.Buffer 19 | logger lager.Logger 20 | ) 21 | 22 | matchTimestamp := MatchRegexp(`^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{5,9}Z$`) 23 | 24 | parsedLogMessage := func() (receiver map[string]any) { 25 | Expect(json.Unmarshal(buf.Bytes(), &receiver)).To(Succeed()) 26 | return 27 | } 28 | 29 | BeforeEach(func() { 30 | buf = bytes.Buffer{} 31 | logger = lager.NewLogger("fake-component") 32 | logger.RegisterSink(lager.NewSlogSink(slog.New(slog.NewJSONHandler(&buf, nil)))) 33 | }) 34 | 35 | It("logs Info()", func() { 36 | logger.Info("fake-info", lager.Data{"foo": "bar"}) 37 | 38 | Expect(parsedLogMessage()).To(MatchAllKeys(Keys{ 39 | "time": matchTimestamp, 40 | "level": Equal("INFO"), 41 | "msg": Equal("fake-component.fake-info"), 42 | "foo": Equal("bar"), 43 | })) 44 | }) 45 | 46 | It("logs Debug()", func() { 47 | logger.Debug("fake-debug", lager.Data{"foo": "bar"}) 48 | 49 | Expect(parsedLogMessage()).To(MatchAllKeys(Keys{ 50 | "time": matchTimestamp, 51 | "level": Equal("DEBUG"), 52 | "msg": Equal("fake-component.fake-debug"), 53 | "foo": Equal("bar"), 54 | })) 55 | }) 56 | 57 | It("logs Error()", func() { 58 | logger.Error("fake-error", fmt.Errorf("boom"), lager.Data{"foo": "bar"}) 59 | 60 | Expect(parsedLogMessage()).To(MatchAllKeys(Keys{ 61 | "time": matchTimestamp, 62 | "error": Equal("boom"), 63 | "level": Equal("ERROR"), 64 | "msg": Equal("fake-component.fake-error"), 65 | "foo": Equal("bar"), 66 | })) 67 | }) 68 | 69 | It("logs Fatal()", func() { 70 | Expect(func() { 71 | logger.Fatal("fake-fatal", fmt.Errorf("boom"), lager.Data{"foo": "bar"}) 72 | }).To(Panic()) 73 | 74 | Expect(parsedLogMessage()).To(MatchAllKeys(Keys{ 75 | "time": matchTimestamp, 76 | "error": Equal("boom"), 77 | "level": Equal("ERROR"), 78 | "msg": Equal("fake-component.fake-fatal"), 79 | "foo": Equal("bar"), 80 | "trace": ContainSubstring(`code.cloudfoundry.org/lager/v3.(*logger).Fatal`), 81 | })) 82 | }) 83 | }) 84 | -------------------------------------------------------------------------------- /tools.go: -------------------------------------------------------------------------------- 1 | //go:build tools 2 | // +build tools 3 | 4 | package lager 5 | 6 | import ( 7 | _ "github.com/onsi/ginkgo/v2/ginkgo" 8 | ) 9 | -------------------------------------------------------------------------------- /truncating_sink.go: -------------------------------------------------------------------------------- 1 | package lager 2 | 3 | import "code.cloudfoundry.org/lager/v3/internal/truncate" 4 | 5 | type truncatingSink struct { 6 | sink Sink 7 | maxDataStringLength int 8 | } 9 | 10 | // NewTruncatingSink returns a sink that truncates strings longer than the max 11 | // data string length 12 | // Example: 13 | // 14 | // writerSink := lager.NewWriterSink(os.Stdout, lager.INFO) 15 | // sink := lager.NewTruncatingSink(testSink, 20) 16 | // logger := lager.NewLogger("test") 17 | // logger.RegisterSink(sink) 18 | // logger.Info("message", lager.Data{"A": strings.Repeat("a", 25)}) 19 | func NewTruncatingSink(sink Sink, maxDataStringLength int) Sink { 20 | return &truncatingSink{ 21 | sink: sink, 22 | maxDataStringLength: maxDataStringLength, 23 | } 24 | } 25 | 26 | func (sink *truncatingSink) Log(log LogFormat) { 27 | truncatedData := Data{} 28 | for k, v := range log.Data { 29 | truncatedData[k] = truncate.Value(v, sink.maxDataStringLength) 30 | } 31 | log.Data = truncatedData 32 | sink.sink.Log(log) 33 | } 34 | -------------------------------------------------------------------------------- /truncating_sink_test.go: -------------------------------------------------------------------------------- 1 | package lager_test 2 | 3 | import ( 4 | "code.cloudfoundry.org/lager/v3" 5 | "code.cloudfoundry.org/lager/v3/lagertest" 6 | . "github.com/onsi/ginkgo/v2" 7 | . "github.com/onsi/gomega" 8 | ) 9 | 10 | const longString = "aaaaaaaaaaaaaaaaaaaaaaaaa" 11 | 12 | var _ = Describe("TruncatingSink", func() { 13 | var ( 14 | sink lager.Sink 15 | testSink *lagertest.TestSink 16 | ) 17 | 18 | type dummyStruct struct { 19 | A string 20 | } 21 | 22 | BeforeEach(func() { 23 | testSink = lagertest.NewTestSink() 24 | 25 | sink = lager.NewTruncatingSink(testSink, 20) 26 | }) 27 | Context("when given data that has only short strings", func() { 28 | BeforeEach(func() { 29 | sink.Log(lager.LogFormat{ 30 | LogLevel: lager.INFO, 31 | Message: "hello world", 32 | Data: lager.Data{"foo": "bar", "dummy": dummyStruct{A: "abcd"}}, 33 | }) 34 | }) 35 | It("writes the data to the given sink without truncating any strings", func() { 36 | Expect(testSink.Buffer().Contents()).To( 37 | MatchJSON(`{"timestamp":"","log_level":1,"source":"","message":"hello world","data":{"foo":"bar","dummy":{"A":"abcd"}}}`), 38 | ) 39 | }) 40 | }) 41 | Context("when given data that includes strings that exceed the configured max length", func() { 42 | BeforeEach(func() { 43 | sink.Log(lager.LogFormat{ 44 | LogLevel: lager.INFO, 45 | Message: "hello world", 46 | Data: lager.Data{"foo": longString, "dummy": dummyStruct{A: longString}}, 47 | }) 48 | }) 49 | It("truncates the data and writes to the given sink", func() { 50 | Expect(testSink.Buffer().Contents()).To( 51 | MatchJSON(`{"timestamp":"","log_level":1,"source":"","message":"hello world","data":{"foo":"aaaaaaaa-(truncated)","dummy":{"A":"aaaaaaaa-(truncated)"}}}`), 52 | ) 53 | }) 54 | }) 55 | }) 56 | -------------------------------------------------------------------------------- /writer_sink.go: -------------------------------------------------------------------------------- 1 | package lager 2 | 3 | import ( 4 | "io" 5 | "sync" 6 | ) 7 | 8 | // A Sink represents a write destination for a Logger. It provides 9 | // a thread-safe interface for writing logs 10 | type Sink interface { 11 | //Log to the sink. Best effort -- no need to worry about errors. 12 | Log(LogFormat) 13 | } 14 | 15 | type writerSink struct { 16 | writer io.Writer 17 | minLogLevel LogLevel 18 | writeL *sync.Mutex 19 | } 20 | 21 | func NewWriterSink(writer io.Writer, minLogLevel LogLevel) Sink { 22 | return &writerSink{ 23 | writer: writer, 24 | minLogLevel: minLogLevel, 25 | writeL: new(sync.Mutex), 26 | } 27 | } 28 | 29 | func (sink *writerSink) Log(log LogFormat) { 30 | if log.LogLevel < sink.minLogLevel { 31 | return 32 | } 33 | 34 | // Convert to json outside of critical section to minimize time spent holding lock 35 | message := append(log.ToJSON(), '\n') 36 | 37 | sink.writeL.Lock() 38 | sink.writer.Write(message) //nolint:errcheck 39 | sink.writeL.Unlock() 40 | } 41 | 42 | type prettySink struct { 43 | writer io.Writer 44 | minLogLevel LogLevel 45 | writeL sync.Mutex 46 | } 47 | 48 | func NewPrettySink(writer io.Writer, minLogLevel LogLevel) Sink { 49 | return &prettySink{ 50 | writer: writer, 51 | minLogLevel: minLogLevel, 52 | } 53 | } 54 | 55 | func (sink *prettySink) Log(log LogFormat) { 56 | if log.LogLevel < sink.minLogLevel { 57 | return 58 | } 59 | 60 | // Convert to json outside of critical section to minimize time spent holding lock 61 | message := append(log.toPrettyJSON(), '\n') 62 | 63 | sink.writeL.Lock() 64 | sink.writer.Write(message) //nolint:errcheck 65 | sink.writeL.Unlock() 66 | } 67 | -------------------------------------------------------------------------------- /writer_sink_test.go: -------------------------------------------------------------------------------- 1 | package lager_test 2 | 3 | import ( 4 | "bufio" 5 | "bytes" 6 | "encoding/json" 7 | "fmt" 8 | "io" 9 | "strconv" 10 | "strings" 11 | "sync" 12 | "time" 13 | 14 | "code.cloudfoundry.org/lager/v3" 15 | "code.cloudfoundry.org/lager/v3/chug" 16 | 17 | . "github.com/onsi/ginkgo/v2" 18 | . "github.com/onsi/gomega" 19 | "github.com/onsi/gomega/gbytes" 20 | ) 21 | 22 | var _ = Describe("WriterSink", func() { 23 | 24 | var sink lager.Sink 25 | var writer *copyWriter 26 | 27 | BeforeEach(func() { 28 | writer = NewCopyWriter() 29 | sink = lager.NewWriterSink(writer, lager.INFO) 30 | }) 31 | 32 | Context("when logging above the minimum log level", func() { 33 | BeforeEach(func() { 34 | sink.Log(lager.LogFormat{LogLevel: lager.INFO, Message: "hello world"}) 35 | }) 36 | 37 | It("writes to the given writer", func() { 38 | Expect(writer.Copy()).To(MatchJSON(`{"message":"hello world","log_level":1,"timestamp":"","source":"","data":null}`)) 39 | }) 40 | }) 41 | 42 | Context("when a unserializable object is passed into data", func() { 43 | BeforeEach(func() { 44 | sink.Log(lager.LogFormat{LogLevel: lager.INFO, Message: "hello world", Data: map[string]interface{}{"some_key": func() {}}}) 45 | }) 46 | 47 | It("logs the serialization error", func() { 48 | message := map[string]interface{}{} 49 | err := json.Unmarshal(writer.Copy(), &message) 50 | Expect(err).NotTo(HaveOccurred()) 51 | Expect(message["message"]).To(Equal("hello world")) 52 | Expect(message["log_level"]).To(Equal(float64(1))) 53 | Expect(message["data"].(map[string]interface{})["lager serialisation error"]).To(Equal("json: unsupported type: func()")) 54 | Expect(message["data"].(map[string]interface{})["data_dump"]).ToNot(BeEmpty()) 55 | }) 56 | }) 57 | 58 | Context("when logging below the minimum log level", func() { 59 | BeforeEach(func() { 60 | sink.Log(lager.LogFormat{LogLevel: lager.DEBUG, Message: "hello world"}) 61 | }) 62 | 63 | It("does not write to the given writer", func() { 64 | Expect(writer.Copy()).To(Equal([]byte{})) 65 | }) 66 | }) 67 | 68 | Context("when logging from multiple threads", func() { 69 | var content = "abcdefg " 70 | 71 | BeforeEach(func() { 72 | wg := new(sync.WaitGroup) 73 | for i := 0; i < MaxThreads; i++ { 74 | wg.Add(1) 75 | go func() { 76 | sink.Log(lager.LogFormat{LogLevel: lager.INFO, Message: content}) 77 | wg.Done() 78 | }() 79 | } 80 | wg.Wait() 81 | }) 82 | 83 | It("writes to the given writer", func() { 84 | lines := strings.Split(string(writer.Copy()), "\n") 85 | for _, line := range lines { 86 | if line == "" { 87 | continue 88 | } 89 | Expect(line).To(MatchJSON(fmt.Sprintf(`{"message":"%s","log_level":1,"timestamp":"","source":"","data":null}`, content))) 90 | } 91 | }) 92 | }) 93 | 94 | Context("when using a buffered writer", func() { 95 | var ( 96 | colWriter *collectingWriter 97 | bufferSize int 98 | ) 99 | 100 | BeforeEach(func() { 101 | colWriter = &collectingWriter{} 102 | }) 103 | 104 | JustBeforeEach(func() { 105 | bufWriter := bufio.NewWriterSize(colWriter, bufferSize) 106 | sink = lager.NewWriterSink(bufWriter, lager.INFO) 107 | }) 108 | 109 | Context("and the message has length exactly equal to the buffer size", func() { 110 | var message lager.LogFormat 111 | 112 | BeforeEach(func() { 113 | message = lager.LogFormat{LogLevel: lager.INFO, Message: "hello"} 114 | bufferSize = len(message.ToJSON()) 115 | }) 116 | 117 | It("does not write messages starting with a new line", func() { 118 | sink.Log(message) 119 | sink.Log(message) 120 | Expect(len(colWriter.writes)).To(Equal(2)) 121 | Expect(colWriter.writes[0]).NotTo(HavePrefix("\n")) 122 | Expect(colWriter.writes[1]).NotTo(HavePrefix("\n")) 123 | }) 124 | }) 125 | }) 126 | }) 127 | 128 | var _ = Describe("PrettyPrintWriter", func() { 129 | const MaxThreads = 100 130 | 131 | var buf *bytes.Buffer 132 | var sink lager.Sink 133 | var message lager.LogFormat 134 | 135 | BeforeEach(func() { 136 | message = lager.LogFormat{} 137 | buf = new(bytes.Buffer) 138 | sink = lager.NewPrettySink(buf, lager.INFO) 139 | }) 140 | 141 | It("renders in order: timestamp (in UTC), level, source, message and data fields", func() { 142 | expectedTime := time.Unix(0, 0) 143 | sink.Log(lager.LogFormat{ 144 | LogLevel: lager.INFO, 145 | Timestamp: formatTimestamp(expectedTime), 146 | }) 147 | logBuf := gbytes.BufferWithBytes(buf.Bytes()) 148 | Expect(logBuf).To(gbytes.Say(`{`)) 149 | Expect(logBuf).To(gbytes.Say(`"timestamp":"1970-01-01T00:00:00.000000000Z",`)) 150 | Expect(logBuf).To(gbytes.Say(`"level":"info",`)) 151 | Expect(logBuf).To(gbytes.Say(`"source":"",`)) 152 | Expect(logBuf).To(gbytes.Say(`"message":"",`)) 153 | Expect(logBuf).To(gbytes.Say(`"data":null`)) 154 | Expect(logBuf).To(gbytes.Say(`}`)) 155 | }) 156 | 157 | It("always prints the time stamp with 9 decimal places", func() { 158 | expectedTime := time.Unix(0, 123000000) 159 | sink.Log(lager.LogFormat{ 160 | LogLevel: lager.INFO, 161 | Timestamp: formatTimestamp(expectedTime), 162 | }) 163 | logBuf := gbytes.BufferWithBytes(buf.Bytes()) 164 | Expect(logBuf).To(gbytes.Say(`"timestamp":"1970-01-01T00:00:00.123000000Z",`)) 165 | }) 166 | 167 | Context("when the internal time field of the provided log is zero", func() { 168 | testTimestamp := func(expected time.Time) { 169 | expected = expected.UTC() 170 | Expect(json.Unmarshal(buf.Bytes(), &message)).To(Succeed()) 171 | timestamp, err := time.Parse(time.RFC3339Nano, message.Timestamp) 172 | Expect(err).NotTo(HaveOccurred()) 173 | Expect(timestamp).To(BeTemporally("~", expected, time.Minute)) 174 | } 175 | 176 | Context("and the unix epoch is set", func() { 177 | It("parses the timestamp", func() { 178 | expectedTime := time.Now().Add(time.Hour) 179 | sink.Log(lager.LogFormat{ 180 | LogLevel: lager.INFO, 181 | Timestamp: formatTimestamp(expectedTime), 182 | }) 183 | testTimestamp(expectedTime) 184 | }) 185 | }) 186 | 187 | Context("the unix epoch is empty or invalid", func() { 188 | var invalidTimestamps = []string{ 189 | "", 190 | "invalid", 191 | ".123", 192 | "123.", 193 | "123.456.", 194 | "123.456.789", 195 | strconv.FormatInt(time.Now().Unix(), 10), // invalid - missing "." 196 | strconv.FormatInt(-time.Now().Unix(), 10) + ".0", // negative 197 | time.Now().Format(time.RFC3339), 198 | time.Now().Format(time.RFC3339Nano), 199 | } 200 | 201 | It("uses the current time", func() { 202 | for _, ts := range invalidTimestamps { 203 | buf.Reset() 204 | sink.Log(lager.LogFormat{ 205 | Timestamp: ts, 206 | LogLevel: lager.INFO, 207 | }) 208 | testTimestamp(time.Now()) 209 | } 210 | }) 211 | }) 212 | }) 213 | 214 | Context("when logging at or above the minimum log level", func() { 215 | BeforeEach(func() { 216 | sink.Log(lager.LogFormat{LogLevel: lager.INFO, Message: "hello world"}) 217 | }) 218 | 219 | It("writes to the given writer", func() { 220 | log := firstLogEntry(buf) 221 | Expect(log.LogLevel).To(Equal(lager.INFO)) 222 | Expect(log.Message).To(Equal("hello world")) 223 | }) 224 | }) 225 | 226 | Context("when a unserializable object is passed into data", func() { 227 | BeforeEach(func() { 228 | invalid := lager.LogFormat{ 229 | LogLevel: lager.INFO, 230 | Message: "hello world", 231 | Data: lager.Data{"nope": func() {}}, 232 | } 233 | sink.Log(invalid) 234 | }) 235 | 236 | It("logs the serialization error", func() { 237 | log := firstLogEntry(buf) 238 | Expect(log.Message).To(Equal("hello world")) 239 | Expect(log.LogLevel).To(Equal(lager.INFO)) 240 | Expect(log.Data["lager serialisation error"]).To(Equal("json: unsupported type: func()")) 241 | Expect(log.Data["data_dump"]).ToNot(BeEmpty()) 242 | }) 243 | }) 244 | 245 | Context("when logging below the minimum log level", func() { 246 | BeforeEach(func() { 247 | sink.Log(lager.LogFormat{LogLevel: lager.DEBUG, Message: "hello world"}) 248 | }) 249 | 250 | It("does not write to the given writer", func() { 251 | Expect(buf).To(Equal(bytes.NewBuffer(nil))) 252 | }) 253 | }) 254 | 255 | Context("when logging from multiple threads", func() { 256 | var content = "abcdefg " 257 | 258 | BeforeEach(func() { 259 | wg := new(sync.WaitGroup) 260 | for i := 0; i < MaxThreads; i++ { 261 | wg.Add(1) 262 | go func() { 263 | sink.Log(lager.LogFormat{LogLevel: lager.INFO, Message: content}) 264 | wg.Done() 265 | }() 266 | } 267 | wg.Wait() 268 | }) 269 | 270 | It("writes to the given writer", func() { 271 | logs := logEntries(buf) 272 | for _, log := range logs { 273 | Expect(log.LogLevel).To(Equal(lager.INFO)) 274 | Expect(log.Message).To(Equal(content)) 275 | } 276 | }) 277 | }) 278 | 279 | Context("when using a buffered writer", func() { 280 | var ( 281 | colWriter *collectingWriter 282 | bufferSize int 283 | ) 284 | 285 | BeforeEach(func() { 286 | colWriter = &collectingWriter{} 287 | }) 288 | 289 | JustBeforeEach(func() { 290 | bufWriter := bufio.NewWriterSize(colWriter, bufferSize) 291 | sink = lager.NewPrettySink(bufWriter, lager.INFO) 292 | }) 293 | 294 | Context("and the message has length exactly equal to the buffer size", func() { 295 | var message lager.LogFormat 296 | 297 | BeforeEach(func() { 298 | message = lager.LogFormat{LogLevel: lager.INFO, Message: "hello"} 299 | bufferSize = len(message.ToJSON()) 300 | }) 301 | 302 | It("does not write messages starting with a new line", func() { 303 | sink.Log(message) 304 | sink.Log(message) 305 | Expect(len(colWriter.writes)).To(Equal(2)) 306 | Expect(colWriter.writes[0]).NotTo(HavePrefix("\n")) 307 | Expect(colWriter.writes[1]).NotTo(HavePrefix("\n")) 308 | }) 309 | }) 310 | }) 311 | }) 312 | 313 | // copyWriter is an INTENTIONALLY UNSAFE writer. Use it to test code that 314 | // should be handling thread safety. 315 | type copyWriter struct { 316 | contents []byte 317 | lock *sync.RWMutex 318 | } 319 | 320 | func NewCopyWriter() *copyWriter { 321 | return ©Writer{ 322 | contents: []byte{}, 323 | lock: new(sync.RWMutex), 324 | } 325 | } 326 | 327 | // no, we really mean RLock on write. 328 | func (writer *copyWriter) Write(p []byte) (n int, err error) { 329 | writer.lock.RLock() 330 | defer writer.lock.RUnlock() 331 | 332 | writer.contents = append(writer.contents, p...) 333 | return len(p), nil 334 | } 335 | 336 | func (writer *copyWriter) Copy() []byte { 337 | writer.lock.Lock() 338 | defer writer.lock.Unlock() 339 | 340 | contents := make([]byte, len(writer.contents)) 341 | copy(contents, writer.contents) 342 | return contents 343 | } 344 | 345 | type collectingWriter struct { 346 | writes []string 347 | } 348 | 349 | func (w *collectingWriter) Write(p []byte) (n int, err error) { 350 | w.writes = append(w.writes, string(p)) 351 | return len(p), nil 352 | } 353 | 354 | // duplicate of logger.go's formatTimestamp() function 355 | func formatTimestamp(t time.Time) string { 356 | return fmt.Sprintf("%.9f", float64(t.UnixNano())/1e9) 357 | } 358 | 359 | func firstLogEntry(r io.Reader) chug.LogEntry { 360 | entries := logEntries(r) 361 | Expect(len(entries)).To(BeNumerically(">", 0)) 362 | return entries[0] 363 | } 364 | 365 | func logEntries(r io.Reader) []chug.LogEntry { 366 | stream := make(chan chug.Entry, 42) 367 | go chug.Chug(r, stream) 368 | entries := []chug.LogEntry{} 369 | for entry := range stream { 370 | if entry.IsLager { 371 | entries = append(entries, entry.Log) 372 | } 373 | } 374 | return entries 375 | } 376 | --------------------------------------------------------------------------------