├── .gitignore
├── .travis.yml
├── LICENSE
├── README.md
├── build.sbt
├── img
├── netty-jaeger.png
└── netty-metrics.png
├── project
├── build.properties
└── plugins.sbt
├── src
├── main
│ ├── java
│ │ └── kamon
│ │ │ └── netty
│ │ │ └── util
│ │ │ └── QueueWrapperAdapter.java
│ ├── resources
│ │ ├── META-INF
│ │ │ └── aop.xml
│ │ └── reference.conf
│ └── scala
│ │ └── kamon
│ │ └── netty
│ │ ├── Metrics.scala
│ │ ├── Netty.scala
│ │ ├── instrumentation
│ │ ├── ChannelInstrumentation.scala
│ │ ├── EpollEventLoopInstrumentation.scala
│ │ ├── HttpClientInstrumentation.scala
│ │ ├── HttpServerInstrumentation.scala
│ │ ├── NioEventLoopInstrumentation.scala
│ │ ├── ServerBootstrapInstrumentation.scala
│ │ └── package.scala
│ │ └── util
│ │ ├── EventLoopUtils.scala
│ │ ├── Latency.scala
│ │ └── MonitoredQueue.scala
└── test
│ └── scala
│ └── kamon
│ ├── netty
│ ├── Clients.scala
│ ├── NettyHTTPTracingSpec.scala
│ ├── NettyMetricsSpec.scala
│ └── Servers.scala
│ └── testkit
│ ├── Reconfigure.scala
│ └── TestSpanReporter.scala
└── version.sbt
/.gitignore:
--------------------------------------------------------------------------------
1 | *.class
2 | *.log
3 | .history
4 | *.sc
5 | .pygments-cache
6 | .DS_Store
7 |
8 | # sbt specific
9 | dist/*
10 | target/
11 | lib_managed/
12 | src_managed/
13 | project/boot/
14 | project/plugins/project/
15 | .ensime
16 | .ensime_cache
17 |
18 | # Scala-IDE specific
19 | .scala_dependencies
20 | .idea
21 | .idea_modules
22 |
23 | # Intellij
24 | .idea/
25 | *.iml
26 | *.iws
27 |
28 | # Eclipse
29 | .project
30 | .settings
31 | .classpath
32 | .cache
33 | .cache-main
34 | .cache-tests
35 | bin/
36 |
37 | _site
38 |
39 | # Ignore Play! working directory #
40 | db
41 | eclipse
42 | lib
43 | log
44 | logs
45 | modules
46 | precompiled
47 | project/project
48 | project/target
49 | target
50 | tmp
51 | test-result
52 | server.pid
53 | *.iml
54 | *.eml
55 |
56 | # Default sigar library provision location.
57 | native/
58 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: scala
2 | script:
3 | - sbt test
4 | scala:
5 | - 2.12.2
6 | jdk:
7 | - oraclejdk8
8 | before_script:
9 | - mkdir $TRAVIS_BUILD_DIR/tmp
10 | - export SBT_OPTS="-Djava.io.tmpdir=$TRAVIS_BUILD_DIR/tmp"
11 | sudo: false
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "{}"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright {yyyy} {name of copyright owner}
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Kamon Netty
2 | [](https://travis-ci.org/kamon-io/kamon-netty)
3 | [](https://gitter.im/kamon-io/Kamon?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
4 | [](https://maven-badges.herokuapp.com/maven-central/io.kamon/kamon-netty_2.12)
5 |
6 |
7 | ### Getting Started
8 |
9 | The `kamon-netty` module ships with bytecode instrumentation that brings automatic traces and metrics to your
10 | Netty based applications and libraries.
11 |
12 |
13 | The kamon-netty module requires you to start your application using the AspectJ Weaver Agent. Kamon will warn you
14 | at startup if you failed to do so.
15 |
16 | Kamon Netty is currently available for Scala 2.11 and 2.12.
17 |
18 | Supported releases and dependencies are shown below.
19 |
20 | | kamon | status | jdk | scala
21 | |:------:|:------:|:----:|------------------
22 | | 1.0.0 | experimental | 1.8+ | 2.11, 2.12
23 |
24 | To get started with SBT, simply add the following to your `build.sbt` or `pom.xml`
25 | file:
26 |
27 | ```scala
28 | libraryDependencies += "io.kamon" %% "kamon-netty" % "1.0.0"
29 | ```
30 |
31 | ```xml
32 |
33 | io.kamon
34 | kamon-netty_2.12
35 | 1.0.0
36 |
37 | ```
38 |
39 | ### Documentation
40 |
41 | ### Event Loop Metrics ###
42 |
43 | The metrics that you will get for an __EventLoop__ are:
44 |
45 | * __registered-channels__: The number of registered Channels.
46 | * __task-processing-time__: A histogram that tracks the nanoseconds the last processing of all tasks took.
47 | * __task-queue-size__: The number of tasks that are pending for processing.
48 | * __task-waiting-time__: A histogram that tracks the waiting time in the queue.
49 |
50 | 
51 |
52 | ### Traces ###
53 |
54 | 
55 |
56 |
57 |
58 |
--------------------------------------------------------------------------------
/build.sbt:
--------------------------------------------------------------------------------
1 | /* =========================================================================================
2 | * Copyright © 2013-2017 the kamon project
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
5 | * except in compliance with the License. You may obtain a copy of the License at
6 | *
7 | * http://www.apache.org/licenses/LICENSE-2.0
8 | *
9 | * Unless required by applicable law or agreed to in writing, software distributed under the
10 | * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
11 | * either express or implied. See the License for the specific language governing permissions
12 | * and limitations under the License.
13 | * =========================================================================================
14 | */
15 |
16 | val kamonCore = "io.kamon" %% "kamon-core" % "1.0.0-RC1"
17 | val kamonTestkit = "io.kamon" %% "kamon-testkit" % "1.0.0-RC1"
18 |
19 | val netty = "io.netty" % "netty-all" % "4.0.51.Final"
20 | val nettyNative = "io.netty" % "netty-transport-native-epoll" % "4.0.51.Final" classifier "linux-x86_64"
21 | val logback = "ch.qos.logback" % "logback-classic" % "1.0.13"
22 |
23 |
24 | lazy val root = (project in file("."))
25 | .settings(Seq(
26 | name := "kamon-netty",
27 | scalaVersion := "2.12.3",
28 | crossScalaVersions := Seq("2.11.8", "2.12.3")))
29 | .enablePlugins(JavaAgent)
30 | .settings(isSnapshot := true)
31 | .settings(resolvers += Resolver.bintrayRepo("kamon-io", "snapshots"))
32 | .settings(javaAgents += "org.aspectj" % "aspectjweaver" % "1.8.10" % "compile;test;runtime")
33 | .settings(
34 | libraryDependencies ++=
35 | compileScope(kamonCore) ++
36 | providedScope(netty, nettyNative) ++
37 | testScope(scalatest, kamonTestkit, logbackClassic, logback))
38 |
39 |
40 |
--------------------------------------------------------------------------------
/img/netty-jaeger.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kamon-io/kamon-netty/b3f184afd8b0c3afc889a4ad8a56db7b780e5422/img/netty-jaeger.png
--------------------------------------------------------------------------------
/img/netty-metrics.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kamon-io/kamon-netty/b3f184afd8b0c3afc889a4ad8a56db7b780e5422/img/netty-metrics.png
--------------------------------------------------------------------------------
/project/build.properties:
--------------------------------------------------------------------------------
1 | sbt.version=0.13.13
2 |
--------------------------------------------------------------------------------
/project/plugins.sbt:
--------------------------------------------------------------------------------
1 | lazy val root: Project = project.in(file(".")).dependsOn(latestSbtUmbrella)
2 | lazy val latestSbtUmbrella = uri("git://github.com/kamon-io/kamon-sbt-umbrella.git")
3 |
4 | addSbtPlugin("com.lightbend.sbt" % "sbt-javaagent" % "0.1.3")
--------------------------------------------------------------------------------
/src/main/java/kamon/netty/util/QueueWrapperAdapter.java:
--------------------------------------------------------------------------------
1 | /*
2 | * =========================================================================================
3 | * Copyright © 2013-2017 the kamon project
4 | *
5 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
6 | * except in compliance with the License. You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software distributed under the
11 | * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
12 | * either express or implied. See the License for the specific language governing permissions
13 | * and limitations under the License.
14 | * =========================================================================================
15 | */
16 |
17 |
18 | package kamon.netty.util;
19 |
20 | import java.util.Collection;
21 | import java.util.Iterator;
22 | import java.util.Queue;
23 |
24 | public class QueueWrapperAdapter implements Queue {
25 |
26 | private final Queue underlying;
27 |
28 | public QueueWrapperAdapter(Queue underlying) {
29 | this.underlying = underlying;
30 | }
31 |
32 | @Override
33 | public int size() {
34 | return underlying.size();
35 | }
36 |
37 | @Override
38 | public boolean isEmpty() {
39 | return underlying.isEmpty();
40 | }
41 |
42 | @Override
43 | public boolean contains(Object o) {
44 | return underlying.contains(o);
45 | }
46 |
47 | @Override
48 | public Iterator iterator() {
49 | return underlying.iterator();
50 | }
51 |
52 | @Override
53 | public Object[] toArray() {
54 | return underlying.toArray();
55 | }
56 |
57 | @Override
58 | public T[] toArray(T[] a) {
59 | return underlying.toArray(a);
60 | }
61 |
62 | @Override
63 | public boolean add(E e) {
64 | return underlying.add(e);
65 | }
66 |
67 | @Override
68 | public boolean remove(Object o) {
69 | return underlying.remove(o);
70 | }
71 |
72 | @Override
73 | public boolean containsAll(Collection> c) {
74 | return underlying.containsAll(c);
75 | }
76 |
77 | @Override
78 | public boolean addAll(Collection extends E> c) {
79 | return underlying.addAll(c);
80 | }
81 |
82 | @Override
83 | public boolean removeAll(Collection> c) {
84 | return underlying.removeAll(c);
85 | }
86 |
87 | @Override
88 | public boolean retainAll(Collection> c) {
89 | return underlying.retainAll(c);
90 | }
91 |
92 | @Override
93 | public void clear() {
94 | underlying.clear();
95 | }
96 |
97 | @Override
98 | public boolean offer(E e) {
99 | return underlying.offer(e);
100 | }
101 |
102 | @Override
103 | public E remove() {
104 | return underlying.remove();
105 | }
106 |
107 | @Override
108 | public E poll() {
109 | return underlying.poll();
110 | }
111 |
112 | @Override
113 | public E element() {
114 | return underlying.element();
115 | }
116 |
117 | @Override
118 | public E peek() {
119 | return underlying.peek();
120 | }
121 | }
122 |
--------------------------------------------------------------------------------
/src/main/resources/META-INF/aop.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
--------------------------------------------------------------------------------
/src/main/resources/reference.conf:
--------------------------------------------------------------------------------
1 | # =================================== #
2 | # kamon-netty reference configuration #
3 | # =================================== #
4 |
5 | kamon {
6 | netty {
7 | # Fully qualified name of the implementation of kamon.netty.NameGenerator that will be used for assigning names
8 | # to traces.
9 | name-generator = kamon.netty.DefaultNameGenerator
10 | }
11 | }
--------------------------------------------------------------------------------
/src/main/scala/kamon/netty/Metrics.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * =========================================================================================
3 | * Copyright © 2013-2017 the kamon project
4 | *
5 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
6 | * except in compliance with the License. You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software distributed under the
11 | * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
12 | * either express or implied. See the License for the specific language governing permissions
13 | * and limitations under the License.
14 | * =========================================================================================
15 | */
16 |
17 | package kamon.netty
18 |
19 | import kamon.Kamon
20 | import kamon.metric.MeasurementUnit._
21 | import kamon.metric._
22 |
23 |
24 | object Metrics {
25 |
26 | /**
27 | * Metrics for Netty Event Loops:
28 | *
29 | * - registered-channels:The number of registered Channels.
30 | * - task-processing-time: The the number of nanoseconds the last processing of all tasks took.
31 | * - task-queue-size: The number of tasks that are pending for processing.
32 | * - task-waiting-time: The waiting time in the queue.
33 | */
34 | val registeredChannelsMetric = Kamon.minMaxCounter("netty.event-loop.registered-channels")
35 | val taskProcessingTimeMetric = Kamon.histogram("netty.event-loop.task-processing-time", time.nanoseconds)
36 | val taskQueueSizeMetric = Kamon.minMaxCounter("netty.event-loop.task-queue-size")
37 | val taskWaitingTimeMetric = Kamon.histogram("netty.event-loop.task-waiting-time", time.nanoseconds)
38 |
39 |
40 | def forEventLoop(name: String): EventLoopMetrics = {
41 | val eventLoopTags = Map("name" -> name)
42 | EventLoopMetrics(
43 | eventLoopTags,
44 | registeredChannelsMetric.refine(eventLoopTags),
45 | taskProcessingTimeMetric.refine(eventLoopTags),
46 | taskQueueSizeMetric.refine(eventLoopTags),
47 | taskWaitingTimeMetric.refine(eventLoopTags)
48 | )
49 | }
50 |
51 | case class EventLoopMetrics(tags: Map[String, String],
52 | registeredChannels: MinMaxCounter,
53 | taskProcessingTime: Histogram,
54 | taskQueueSize: MinMaxCounter,
55 | taskWaitingTime: Histogram) {
56 |
57 | def cleanup(): Unit = {
58 | registeredChannelsMetric.remove(tags)
59 | taskProcessingTimeMetric.remove(tags)
60 | taskQueueSizeMetric.remove(tags)
61 | taskWaitingTimeMetric.remove(tags)
62 | }
63 | }
64 | }
65 |
--------------------------------------------------------------------------------
/src/main/scala/kamon/netty/Netty.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * =========================================================================================
3 | * Copyright © 2013-2017 the kamon project
4 | *
5 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
6 | * except in compliance with the License. You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software distributed under the
11 | * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
12 | * either express or implied. See the License for the specific language governing permissions
13 | * and limitations under the License.
14 | * =========================================================================================
15 | */
16 |
17 | package kamon.netty
18 |
19 | import java.net.URI
20 |
21 | import com.typesafe.config.Config
22 | import io.netty.handler.codec.http.HttpRequest
23 | import kamon.util.DynamicAccess
24 | import kamon.{Kamon, OnReconfigureHook}
25 |
26 | object Netty {
27 | private var nameGenerator: NameGenerator = new DefaultNameGenerator()
28 |
29 | loadConfiguration(Kamon.config())
30 |
31 | def generateOperationName(request: HttpRequest): String =
32 | nameGenerator.generateOperationName(request)
33 |
34 | def generateHttpClientOperationName(request: HttpRequest): String =
35 | nameGenerator.generateHttpClientOperationName(request)
36 |
37 | Kamon.onReconfigure(new OnReconfigureHook {
38 | override def onReconfigure(newConfig: Config): Unit =
39 | Netty.loadConfiguration(newConfig)
40 | })
41 |
42 | private def loadConfiguration(config: Config): Unit = synchronized {
43 | val dynamic = new DynamicAccess(getClass.getClassLoader)
44 | val nameGeneratorFQCN = config.getString("kamon.netty.name-generator")
45 | nameGenerator = dynamic.createInstanceFor[NameGenerator](nameGeneratorFQCN, Nil).get
46 | }
47 | }
48 |
49 | trait NameGenerator {
50 | def generateOperationName(request: HttpRequest): String
51 | def generateHttpClientOperationName(request: HttpRequest): String
52 | }
53 |
54 | class DefaultNameGenerator extends NameGenerator {
55 |
56 | import java.util.Locale
57 |
58 | import scala.collection.concurrent.TrieMap
59 |
60 | private val localCache = TrieMap.empty[String, String]
61 | private val normalizePattern = """\$([^<]+)<[^>]+>""".r
62 |
63 | override def generateHttpClientOperationName(request: HttpRequest): String = {
64 | val uri = new URI(request.getUri)
65 | s"${uri.getAuthority}${uri.getPath}"
66 | }
67 |
68 | override def generateOperationName(request: HttpRequest): String = {
69 | localCache.getOrElseUpdate(s"${request.getMethod.name()}${request.getUri}", {
70 | // Convert paths of form GET /foo/bar/$paramname/blah to foo.bar.paramname.blah.get
71 | val uri = new URI(request.getUri)
72 | val p = normalizePattern.replaceAllIn(uri.getPath, "$1").replace('/', '.').dropWhile(_ == '.')
73 | val normalisedPath = {
74 | if (p.lastOption.exists(_ != '.')) s"$p."
75 | else p
76 | }
77 | s"$normalisedPath${request.getMethod.name().toLowerCase(Locale.ENGLISH)}"
78 | })
79 | }
80 | }
81 |
--------------------------------------------------------------------------------
/src/main/scala/kamon/netty/instrumentation/ChannelInstrumentation.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * =========================================================================================
3 | * Copyright © 2013-2017 the kamon project
4 | *
5 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
6 | * except in compliance with the License. You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software distributed under the
11 | * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
12 | * either express or implied. See the License for the specific language governing permissions
13 | * and limitations under the License.
14 | * =========================================================================================
15 | */
16 |
17 | package kamon.netty.instrumentation
18 |
19 | import kamon.Kamon
20 | import kamon.context.Context
21 | import org.aspectj.lang.annotation._
22 |
23 | import scala.beans.BeanProperty
24 |
25 |
26 | trait ChannelContextAware {
27 | @volatile var startTime: Long = 0
28 | @volatile @BeanProperty var context:Context = Kamon.currentContext()
29 | }
30 |
31 | trait RequestContextAware {
32 | @volatile @BeanProperty var context:Context = Kamon.currentContext()
33 | }
34 |
35 | @Aspect
36 | class ChannelInstrumentation {
37 | @DeclareMixin("io.netty.channel.Channel+")
38 | def mixinChannelToContextAware: ChannelContextAware = new ChannelContextAware{}
39 |
40 | @DeclareMixin("io.netty.handler.codec.http.HttpMessage+")
41 | def mixinRequestToContextAware: RequestContextAware = new RequestContextAware{}
42 |
43 | @After("execution(io.netty.handler.codec.http.HttpMessage+.new(..)) && this(request)")
44 | def afterCreation(request: RequestContextAware): Unit = {
45 | // Force traceContext initialization.
46 | request.getContext()
47 | }
48 | }
49 |
--------------------------------------------------------------------------------
/src/main/scala/kamon/netty/instrumentation/EpollEventLoopInstrumentation.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * =========================================================================================
3 | * Copyright © 2013-2017 the kamon project
4 | *
5 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
6 | * except in compliance with the License. You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software distributed under the
11 | * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
12 | * either express or implied. See the License for the specific language governing permissions
13 | * and limitations under the License.
14 | * =========================================================================================
15 | */
16 |
17 | package kamon.netty.instrumentation
18 |
19 | import java.util
20 |
21 | import io.netty.channel.{Channel, EventLoop}
22 | import io.netty.util.concurrent.EventExecutor
23 | import kamon.netty.Metrics
24 | import kamon.netty.util.MonitoredQueue
25 | import org.aspectj.lang.ProceedingJoinPoint
26 | import org.aspectj.lang.annotation._
27 |
28 | @Aspect
29 | class EpollEventLoopInstrumentation {
30 | import kamon.netty.util.EventLoopUtils._
31 |
32 | @After("execution(* io.netty.channel.epoll.EpollEventLoop.add(..)) && this(eventLoop)")
33 | def onAdd(eventLoop: EventExecutor): Unit =
34 | Metrics.forEventLoop(name(eventLoop)).registeredChannels.increment()
35 |
36 | @After("execution(* io.netty.channel.epoll.EpollEventLoop.remove(..)) && args(channel) && this(eventLoop)")
37 | def onRemove(eventLoop: EventExecutor, channel:Channel): Unit = {
38 | if(channel.isOpen)
39 | Metrics.forEventLoop(name(eventLoop)).registeredChannels.decrement()
40 | }
41 |
42 | @Around("execution(* io.netty.channel.epoll.EpollEventLoop.newTaskQueue(..)) && this(eventLoop)")
43 | def onNewTaskQueue(pjp: ProceedingJoinPoint, eventLoop: EventLoop): Any = {
44 | val queue = pjp.proceed().asInstanceOf[util.Queue[Runnable]]
45 | MonitoredQueue(eventLoop, queue)
46 | }
47 | }
48 |
--------------------------------------------------------------------------------
/src/main/scala/kamon/netty/instrumentation/HttpClientInstrumentation.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * =========================================================================================
3 | * Copyright © 2013-2017 the kamon project
4 | *
5 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
6 | * except in compliance with the License. You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software distributed under the
11 | * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
12 | * either express or implied. See the License for the specific language governing permissions
13 | * and limitations under the License.
14 | * =========================================================================================
15 | */
16 |
17 | package kamon.netty.instrumentation
18 |
19 | import java.util
20 |
21 | import io.netty.channel.ChannelHandlerContext
22 | import io.netty.handler.codec.http.HttpRequest
23 | import kamon.Kamon
24 | import kamon.netty.Netty
25 | import kamon.trace._
26 | import org.aspectj.lang.ProceedingJoinPoint
27 | import org.aspectj.lang.annotation._
28 |
29 | @Aspect
30 | class HttpClientInstrumentation {
31 |
32 | @Pointcut("execution(* io.netty.handler.codec.http.HttpClientCodec.Decoder.decode(..))")
33 | def decoderPointcut():Unit = {}
34 |
35 | @Pointcut("execution(* io.netty.handler.codec.http.HttpClientCodec.Encoder.encode(..))")
36 | def encoderPointcut():Unit = {}
37 |
38 | @Around("encoderPointcut() && args(ctx, request, out)")
39 | def onEncodeRequest(pjp: ProceedingJoinPoint, ctx: ChannelHandlerContext, request: HttpRequest, out: util.List[AnyRef]): AnyRef = {
40 | val currentContext = request.getContext()
41 | val clientSpan = currentContext.get(Span.ContextKey)
42 |
43 | if (clientSpan.isEmpty()) pjp.proceed()
44 | else {
45 | val clientRequestSpan = Kamon.buildSpan(Netty.generateHttpClientOperationName(request))
46 | .asChildOf(clientSpan)
47 | .withTag("span.kind", "client")
48 | .withTag("component", "netty")
49 | .withTag("http.method", request.getMethod.name())
50 | .withTag("http.url", request.getUri)
51 | .start()
52 |
53 | val newContext = currentContext.withKey(Span.ContextKey, clientRequestSpan)
54 |
55 | ctx.channel().toContextAware().setContext(newContext)
56 |
57 | pjp.proceed(Array(ctx, encodeContext(newContext, request), out))
58 | }
59 | }
60 |
61 | @After("decoderPointcut() && args(ctx, *, out)")
62 | def onDecodeResponse(ctx: ChannelHandlerContext, out: java.util.List[AnyRef]): Unit = {
63 | if (out.size() > 0 && out.get(0).isHttpResponse()) {
64 | val clientSpan = ctx.channel().getContext().get(Span.ContextKey)
65 | clientSpan.finish()
66 | }
67 | }
68 |
69 | @AfterThrowing("decoderPointcut() && args(ctx, *, *)")
70 | def onDecodeError(ctx: ChannelHandlerContext): Unit = {
71 | val clientSpan = ctx.channel().getContext().get(Span.ContextKey)
72 | clientSpan.addTag("error", value = true).finish()
73 | }
74 | }
75 |
--------------------------------------------------------------------------------
/src/main/scala/kamon/netty/instrumentation/HttpServerInstrumentation.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * =========================================================================================
3 | * Copyright © 2013-2017 the kamon project
4 | *
5 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
6 | * except in compliance with the License. You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software distributed under the
11 | * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
12 | * either express or implied. See the License for the specific language governing permissions
13 | * and limitations under the License.
14 | * =========================================================================================
15 | */
16 |
17 | package kamon.netty.instrumentation
18 |
19 | import io.netty.channel.ChannelHandlerContext
20 | import io.netty.handler.codec.http.HttpResponse
21 | import kamon.Kamon
22 | import kamon.netty.Netty
23 | import kamon.trace.Span
24 | import org.aspectj.lang.annotation.{After, Aspect, Before}
25 |
26 | @Aspect
27 | class HttpServerInstrumentation {
28 |
29 | @After("execution(* io.netty.handler.codec.http.HttpObjectDecoder+.decode(..)) && args(ctx, *, out)")
30 | def onDecodeRequest(ctx: ChannelHandlerContext, out:java.util.List[AnyRef]): Unit = {
31 | if (out.size() > 0 && out.get(0).isHttpRequest()) {
32 | val request = out.get(0).toHttpRequest()
33 | val channel = ctx.channel().toContextAware()
34 | val incomingContext = decodeContext(request)
35 | val serverSpan = Kamon.buildSpan(Netty.generateOperationName(request))
36 | .asChildOf(incomingContext.get(Span.ContextKey))
37 | .withStartTimestamp(channel.startTime)
38 | .withTag("span.kind", "server")
39 | .withTag("component", "netty")
40 | .withTag("http.method", request.getMethod.name())
41 | .withTag("http.url", request.getUri)
42 | .start()
43 |
44 | channel.setContext(incomingContext.withKey(Span.ContextKey, serverSpan))
45 | }
46 | }
47 |
48 | @Before("execution(* io.netty.handler.codec.http.HttpObjectEncoder+.encode(..)) && args(ctx, response, *)")
49 | def onEncodeResponse(ctx: ChannelHandlerContext, response:HttpResponse): Unit = {
50 | val serverSpan = ctx.channel().getContext().get(Span.ContextKey)
51 | if(isError(response.getStatus.code()))
52 | serverSpan.addTag("error", value = true)
53 | serverSpan.finish()
54 | }
55 | }
56 |
57 |
--------------------------------------------------------------------------------
/src/main/scala/kamon/netty/instrumentation/NioEventLoopInstrumentation.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * =========================================================================================
3 | * Copyright © 2013-2017 the kamon project
4 | *
5 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
6 | * except in compliance with the License. You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software distributed under the
11 | * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
12 | * either express or implied. See the License for the specific language governing permissions
13 | * and limitations under the License.
14 | * =========================================================================================
15 | */
16 |
17 | package kamon.netty.instrumentation
18 |
19 | import java.util
20 |
21 | import io.netty.channel.nio.NioEventLoop
22 | import io.netty.channel.{ChannelFuture, ChannelFutureListener}
23 | import kamon.metric.MinMaxCounter
24 | import kamon.netty.Metrics
25 | import kamon.netty.util.EventLoopUtils._
26 | import kamon.netty.util.MonitoredQueue
27 | import org.aspectj.lang.ProceedingJoinPoint
28 | import org.aspectj.lang.annotation._
29 |
30 | @Aspect
31 | class NioEventLoopInstrumentation {
32 |
33 | @Around("execution(* io.netty.channel.SingleThreadEventLoop.register(..)) && this(eventLoop)")
34 | def onRegister(pjp: ProceedingJoinPoint, eventLoop: NioEventLoop): Any = {
35 | val future = pjp.proceed().asInstanceOf[ChannelFuture]
36 | val registeredChannels = Metrics.forEventLoop(name(eventLoop)).registeredChannels
37 |
38 | if (future.isSuccess) registeredChannels.increment()
39 | else future.addListener(registeredChannelListener(registeredChannels))
40 | future
41 | }
42 |
43 | @Before("execution(* io.netty.channel.nio.NioEventLoop.cancel(..)) && this(eventLoop)")
44 | def onCancel(eventLoop: NioEventLoop): Unit = {
45 | val registeredChannels = Metrics.forEventLoop(name(eventLoop)).registeredChannels
46 | registeredChannels.decrement()
47 | }
48 |
49 | val registeredChannelListener: MinMaxCounter => ChannelFutureListener = registeredChannels => new ChannelFutureListener() {
50 | override def operationComplete(future: ChannelFuture): Unit = {
51 | if(future.isSuccess) {
52 | registeredChannels.increment()
53 | }
54 | }
55 | }
56 |
57 | @Around("execution(* io.netty.channel.nio.NioEventLoop.newTaskQueue(..)) && this(eventLoop)")
58 | def onNewTaskQueue(pjp: ProceedingJoinPoint, eventLoop: NioEventLoop): Any = {
59 | val queue = pjp.proceed().asInstanceOf[util.Queue[Runnable]]
60 | MonitoredQueue(eventLoop, queue)
61 | }
62 | }
63 |
64 |
65 |
--------------------------------------------------------------------------------
/src/main/scala/kamon/netty/instrumentation/ServerBootstrapInstrumentation.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * =========================================================================================
3 | * Copyright © 2013-2017 the kamon project
4 | *
5 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
6 | * except in compliance with the License. You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software distributed under the
11 | * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
12 | * either express or implied. See the License for the specific language governing permissions
13 | * and limitations under the License.
14 | * =========================================================================================
15 | */
16 |
17 | package kamon.netty.instrumentation
18 |
19 | import io.netty.channel.{Channel, ChannelHandler, ChannelHandlerContext, ChannelInboundHandlerAdapter}
20 | import kamon.util.Clock
21 | import org.aspectj.lang.annotation._
22 |
23 | @Aspect
24 | class ServerBootstrapInstrumentation {
25 |
26 | import ServerBootstrapInstrumentation._
27 |
28 | @Before("execution(* io.netty.bootstrap.ServerBootstrap.group(..)) && args(bossGroup, workerGroup)")
29 | def onNewServerBootstrap(bossGroup:NamedEventLoopGroup, workerGroup:NamedEventLoopGroup):Unit = {
30 | if(bossGroup == workerGroup) {
31 | bossGroup.name = BossGroupName
32 | workerGroup.name = BossGroupName
33 | } else {
34 | bossGroup.name = BossGroupName
35 | workerGroup.name = WorkerGroupName
36 | }
37 | }
38 |
39 | @After("execution(* io.netty.bootstrap.ServerBootstrap.ServerBootstrapAcceptor.channelRead(..)) && args(ctx, child)")
40 | def onChannelRead(ctx: ChannelHandlerContext, child: Channel):Unit = {
41 | val pipeline = child.pipeline()
42 | if(pipeline.get(KamonHandler) == null)
43 | pipeline.addFirst(KamonHandler, new KamonHandler())
44 | }
45 | }
46 |
47 | object ServerBootstrapInstrumentation {
48 | val BossGroupName = "boss-group"
49 | val WorkerGroupName = "worker-group"
50 | val KamonHandler = "kamon-handler"
51 |
52 | @ChannelHandler.Sharable
53 | private class KamonHandler extends ChannelInboundHandlerAdapter {
54 | override def channelRead(ctx: ChannelHandlerContext, msg: AnyRef): Unit = {
55 | ctx.channel().toContextAware().startTime = Clock.microTimestamp()
56 | super.channelRead(ctx, msg)
57 | }
58 | }
59 | }
60 |
61 | @Aspect
62 | class EventLoopMixin {
63 | @DeclareMixin("io.netty.channel.EventLoopGroup+")
64 | def mixinEventLoopGroupWithNamedEventLoopGroup: NamedEventLoopGroup = new NamedEventLoopGroup {}
65 | }
66 |
67 | trait NamedEventLoopGroup {
68 | var name:String = _
69 | }
--------------------------------------------------------------------------------
/src/main/scala/kamon/netty/instrumentation/package.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * =========================================================================================
3 | * Copyright © 2013-2017 the kamon project
4 | *
5 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
6 | * except in compliance with the License. You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software distributed under the
11 | * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
12 | * either express or implied. See the License for the specific language governing permissions
13 | * and limitations under the License.
14 | * =========================================================================================
15 | */
16 |
17 | package kamon.netty
18 |
19 |
20 | import io.netty.handler.codec.http.{HttpRequest, HttpResponse}
21 | import kamon.Kamon
22 | import kamon.context.{Context, TextMap}
23 |
24 | package object instrumentation {
25 |
26 | implicit class ChannelSyntax(val channel: io.netty.channel.Channel) extends AnyVal {
27 | def toContextAware(): ChannelContextAware =
28 | channel.asInstanceOf[ChannelContextAware]
29 |
30 | def getContext(): Context =
31 | channel.toContextAware().getContext
32 | }
33 |
34 | implicit class RequestSyntax(val request: HttpRequest) extends AnyVal {
35 | def toContextAware(): RequestContextAware =
36 | request.asInstanceOf[RequestContextAware]
37 |
38 | def getContext(): Context =
39 | request.toContextAware().getContext
40 | }
41 |
42 | implicit class HttpSyntax(val obj: AnyRef) extends AnyVal {
43 | def toHttpRequest(): HttpRequest =
44 | obj.asInstanceOf[HttpRequest]
45 |
46 | def isHttpRequest(): Boolean =
47 | obj.isInstanceOf[HttpRequest]
48 |
49 | def toHttpResponse(): HttpResponse =
50 | obj.asInstanceOf[HttpResponse]
51 |
52 | def isHttpResponse(): Boolean =
53 | obj.isInstanceOf[HttpResponse]
54 | }
55 |
56 | def isError(statusCode: Int): Boolean =
57 | statusCode >= 500 && statusCode < 600
58 |
59 | def encodeContext(ctx:Context, request:HttpRequest): HttpRequest = {
60 | val textMap = Kamon.contextCodec().HttpHeaders.encode(ctx)
61 | textMap.values.foreach { case (key, value) => request.headers().add(key, value) }
62 | request
63 | }
64 |
65 | def decodeContext(request: HttpRequest): Context = {
66 | val headersTextMap = readOnlyTextMapFromHeaders(request)
67 | Kamon.contextCodec().HttpHeaders.decode(headersTextMap)
68 | }
69 |
70 | private def readOnlyTextMapFromHeaders(request: HttpRequest): TextMap = new TextMap {
71 | import scala.collection.JavaConverters._
72 |
73 | private val headersMap = request.headers().iterator().asScala.map { h => h.getKey -> h.getValue }.toMap
74 |
75 | override def values: Iterator[(String, String)] = headersMap.iterator
76 | override def get(key: String): Option[String] = headersMap.get(key)
77 | override def put(key: String, value: String): Unit = {}
78 | }
79 | }
80 |
--------------------------------------------------------------------------------
/src/main/scala/kamon/netty/util/EventLoopUtils.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * =========================================================================================
3 | * Copyright © 2013-2017 the kamon project
4 | *
5 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
6 | * except in compliance with the License. You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software distributed under the
11 | * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
12 | * either express or implied. See the License for the specific language governing permissions
13 | * and limitations under the License.
14 | * =========================================================================================
15 | */
16 |
17 | package kamon.netty.util
18 |
19 | import io.netty.util.concurrent.EventExecutor
20 | import kamon.netty.instrumentation.NamedEventLoopGroup
21 |
22 | object EventLoopUtils {
23 | def name(eventLoop: EventExecutor): String = {
24 | val sanitize:String => String = str => str.replaceAll("(.)(\\p{Upper})", "$1-$2").toLowerCase()
25 | s"${eventLoop.parent().asInstanceOf[NamedEventLoopGroup].name}-${sanitize(eventLoop.getClass.getSimpleName)}"
26 | }
27 | }
28 |
--------------------------------------------------------------------------------
/src/main/scala/kamon/netty/util/Latency.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * =========================================================================================
3 | * Copyright © 2013-2017 the kamon project
4 | *
5 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
6 | * except in compliance with the License. You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software distributed under the
11 | * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
12 | * either express or implied. See the License for the specific language governing permissions
13 | * and limitations under the License.
14 | * =========================================================================================
15 | */
16 |
17 | package kamon.netty.util
18 |
19 | import kamon.metric.Histogram
20 | import kamon.util.Clock
21 |
22 |
23 | object Latency {
24 | def measure[A](histogram: Histogram)(thunk: ⇒ A): A = {
25 | val start = Clock.relativeNanoTimestamp()
26 | try thunk finally {
27 | val latency = Clock.relativeNanoTimestamp() - start
28 | histogram.record(latency)
29 | }
30 | }
31 | }
--------------------------------------------------------------------------------
/src/main/scala/kamon/netty/util/MonitoredQueue.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * =========================================================================================
3 | * Copyright © 2013-2017 the kamon project
4 | *
5 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
6 | * except in compliance with the License. You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software distributed under the
11 | * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
12 | * either express or implied. See the License for the specific language governing permissions
13 | * and limitations under the License.
14 | * =========================================================================================
15 | */
16 |
17 | package kamon.netty.util
18 |
19 | import java.util
20 |
21 | import io.netty.channel.EventLoop
22 | import kamon.Kamon
23 | import kamon.context.{Context, HasContext}
24 | import kamon.netty.Metrics
25 | import kamon.netty.Metrics.EventLoopMetrics
26 | import kamon.netty.util.EventLoopUtils.name
27 | import kamon.util.Clock
28 |
29 | class MonitoredQueue(eventLoop:EventLoop, underlying:util.Queue[Runnable]) extends QueueWrapperAdapter[Runnable](underlying) {
30 |
31 | import MonitoredQueue._
32 |
33 | implicit lazy val eventLoopMetrics: EventLoopMetrics = Metrics.forEventLoop(name(eventLoop))
34 |
35 | override def add(runnable: Runnable): Boolean = {
36 | eventLoopMetrics.taskQueueSize.increment()
37 | underlying.add(new TimedTask(runnable))
38 | }
39 |
40 | override def offer(runnable: Runnable): Boolean = {
41 | eventLoopMetrics.taskQueueSize.increment()
42 | underlying.offer(new TimedTask(runnable))
43 | }
44 |
45 | override def remove(): Runnable = {
46 | val runnable = underlying.remove()
47 | eventLoopMetrics.taskQueueSize.decrement()
48 | eventLoopMetrics.taskWaitingTime.record(timeInQueue(runnable))
49 | runnable
50 | }
51 |
52 | override def poll(): Runnable = {
53 | val runnable = underlying.poll()
54 |
55 | if(runnable != null) {
56 | eventLoopMetrics.taskQueueSize.decrement()
57 | eventLoopMetrics.taskWaitingTime.record(timeInQueue(runnable))
58 | }
59 | runnable
60 | }
61 | }
62 |
63 | object MonitoredQueue {
64 | def apply(eventLoop: EventLoop, underlying: util.Queue[Runnable]): MonitoredQueue =
65 | new MonitoredQueue(eventLoop, underlying)
66 |
67 | def timeInQueue(runnable: Runnable):Long =
68 | runnable.asInstanceOf[TimedTask].timeInQueue
69 |
70 | }
71 |
72 | private[this] class TimedTask(underlying:Runnable)(implicit metrics: EventLoopMetrics) extends Runnable with HasContext {
73 | val startTime:Long = Clock.relativeNanoTimestamp()
74 | val context: Context = Kamon.currentContext()
75 |
76 | override def run(): Unit =
77 | Kamon.withContext(context) {
78 | Latency.measure(metrics.taskProcessingTime)(underlying.run())
79 | }
80 |
81 | def timeInQueue: Long =
82 | Clock.relativeNanoTimestamp() - startTime
83 | }
84 |
--------------------------------------------------------------------------------
/src/test/scala/kamon/netty/Clients.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * =========================================================================================
3 | * Copyright © 2013-2017 the kamon project
4 | *
5 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
6 | * except in compliance with the License. You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software distributed under the
11 | * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
12 | * either express or implied. See the License for the specific language governing permissions
13 | * and limitations under the License.
14 | * =========================================================================================
15 | */
16 |
17 | package kamon.netty
18 |
19 | import java.net.InetSocketAddress
20 | import java.util.concurrent.{LinkedBlockingQueue, TimeUnit}
21 |
22 | import io.netty.bootstrap.Bootstrap
23 | import io.netty.buffer.Unpooled
24 | import io.netty.channel.nio.NioEventLoopGroup
25 | import io.netty.channel.socket.SocketChannel
26 | import io.netty.channel.socket.nio.NioSocketChannel
27 | import io.netty.channel.{Channel, ChannelHandlerContext, ChannelInboundHandlerAdapter, ChannelInitializer}
28 | import io.netty.handler.codec.http._
29 | import io.netty.util.CharsetUtil
30 |
31 | class NioEventLoopBasedClient(port: Int) {
32 |
33 | private val clientMessagesReceived = new LinkedBlockingQueue[AnyRef]()
34 | private val group = new NioEventLoopGroup(1)
35 | private val b = new Bootstrap
36 |
37 | b.group(group)
38 | .channel(classOf[NioSocketChannel])
39 | .handler(new HttpClientInitializer(clientMessagesReceived))
40 |
41 | val channel: Channel = b.connect(new InetSocketAddress(port)).sync.channel
42 |
43 | def close(): Unit = {
44 | channel.close
45 | group.shutdownGracefully()
46 | }
47 |
48 | def execute(request: DefaultFullHttpRequest, timeoutMillis: Long = 2000): FullHttpResponse = {
49 | val future = channel.write(request)
50 | channel.flush
51 | future.await(timeoutMillis)
52 | response()
53 | }
54 |
55 | def executeWithContent(request: DefaultHttpRequest, content: Seq[HttpContent], timeoutMillis: Long = 2000): FullHttpResponse = {
56 | val allFutures = (request +: content).map(channel.write)
57 | channel.flush
58 | allFutures.foreach(_.await(timeoutMillis))
59 | response()
60 | }
61 |
62 | def get(path: String): DefaultFullHttpRequest = {
63 | val request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, path)
64 | HttpHeaders.setContentLength(request, 0)
65 | request
66 | }
67 |
68 | def postWithChunks(path: String, chunks: String*): (DefaultHttpRequest, Seq[DefaultHttpContent]) = {
69 | val request = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, path)
70 | HttpHeaders.setTransferEncodingChunked(request)
71 | val httpChunks = chunks.map(chunk => new DefaultHttpContent(Unpooled.copiedBuffer(chunk, CharsetUtil.UTF_8)))
72 | (request, httpChunks :+ new DefaultLastHttpContent(Unpooled.EMPTY_BUFFER))
73 | }
74 |
75 | private def response(): FullHttpResponse =
76 | clientMessagesReceived.poll(2, TimeUnit.SECONDS).asInstanceOf[FullHttpResponse]
77 | }
78 |
79 | object NioEventLoopBasedClient {
80 | def apply(bindAddress: Int): NioEventLoopBasedClient = new NioEventLoopBasedClient(bindAddress)
81 | }
82 |
83 | object Clients {
84 | def withNioClient[A](bindAddress:Int = 9001)(thunk: NioEventLoopBasedClient => A): A = {
85 | val client = new NioEventLoopBasedClient(bindAddress)
86 | try thunk(client) finally client.close()
87 | }
88 | }
89 |
90 | private class HttpClientInitializer(received:java.util.Queue[AnyRef]) extends ChannelInitializer[SocketChannel] {
91 | override def initChannel(ch: SocketChannel): Unit = {
92 | val p = ch.pipeline
93 | p.addLast(new HttpClientCodec)
94 | p.addLast(new HttpObjectAggregator(1024))
95 | p.addLast(new HttpClientHandler(received))
96 | }
97 | }
98 |
99 | private class HttpClientHandler(received:java.util.Queue[AnyRef]) extends ChannelInboundHandlerAdapter {
100 | override def channelRead(ctx: ChannelHandlerContext, msg: AnyRef): Unit = {
101 | received.add(msg)
102 | }
103 | }
104 |
105 |
106 |
--------------------------------------------------------------------------------
/src/test/scala/kamon/netty/NettyHTTPTracingSpec.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * =========================================================================================
3 | * Copyright © 2013-2017 the kamon project
4 | *
5 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
6 | * except in compliance with the License. You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software distributed under the
11 | * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
12 | * either express or implied. See the License for the specific language governing permissions
13 | * and limitations under the License.
14 | * =========================================================================================
15 | */
16 |
17 | package kamon.netty
18 |
19 | import kamon.Kamon
20 | import kamon.context.Context
21 | import kamon.netty.Clients.withNioClient
22 | import kamon.netty.Servers.withNioServer
23 | import kamon.testkit.{MetricInspection, Reconfigure, TestSpanReporter}
24 | import kamon.trace.Span
25 | import kamon.trace.Span.TagValue
26 | import kamon.util.Registration
27 | import org.scalatest._
28 | import org.scalatest.concurrent.Eventually
29 | import org.scalatest.time.SpanSugar._
30 |
31 | class NettyHTTPTracingSpec extends WordSpec with Matchers with MetricInspection with Eventually
32 | with Reconfigure with BeforeAndAfterAll with OptionValues {
33 |
34 | "The Netty HTTP span propagation" should {
35 | "propagate the span from the client to the server" in {
36 | withNioServer() { port =>
37 | withNioClient(port) { httpClient =>
38 | val clientSpan = Kamon.buildSpan("test-span").start()
39 | Kamon.withContext(Context.create(Span.ContextKey, clientSpan)) {
40 | val httpGet = httpClient.get(s"http://localhost:$port/route?param=123")
41 | httpClient.execute(httpGet)
42 |
43 | eventually(timeout(2 seconds)) {
44 | val serverFinishedSpan = reporter.nextSpan().value
45 | val clientFinishedSpan = reporter.nextSpan().value
46 |
47 | serverFinishedSpan.operationName shouldBe "route.get"
48 | serverFinishedSpan.tags should contain ("span.kind" -> TagValue.String("server"))
49 |
50 | clientFinishedSpan.operationName shouldBe s"localhost:$port/route"
51 | clientFinishedSpan.tags should contain ("span.kind" -> TagValue.String("client"))
52 |
53 | serverFinishedSpan.context.traceID shouldBe clientFinishedSpan.context.traceID
54 | serverFinishedSpan.context.parentID shouldBe clientFinishedSpan.context.spanID
55 |
56 | reporter.nextSpan() shouldBe empty
57 | }
58 | }
59 | }
60 | }
61 | }
62 |
63 | "contain a span error when an internal server error(500) occurs" in {
64 | withNioServer() { port =>
65 | withNioClient(port) { httpClient =>
66 | val clientSpan = Kamon.buildSpan("test-span-with-error").start()
67 | Kamon.withContext(Context.create(Span.ContextKey, clientSpan)) {
68 | val httpGet = httpClient.get(s"http://localhost:$port/error")
69 | httpClient.execute(httpGet)
70 |
71 | eventually(timeout(2 seconds)) {
72 | val serverFinishedSpan = reporter.nextSpan().value
73 | val clientFinishedSpan = reporter.nextSpan().value
74 |
75 | serverFinishedSpan.operationName shouldBe "error.get"
76 | serverFinishedSpan.tags should contain allOf("span.kind" -> TagValue.String("server"), "error" -> TagValue.True)
77 |
78 | clientFinishedSpan.tags should contain ("span.kind" -> TagValue.String("client"))
79 | clientFinishedSpan.operationName shouldBe s"localhost:$port/error"
80 |
81 | serverFinishedSpan.context.parentID shouldBe clientFinishedSpan.context.spanID
82 | clientFinishedSpan.context.parentID shouldBe clientSpan.context.spanID
83 |
84 | serverFinishedSpan.context.traceID shouldBe clientFinishedSpan.context.traceID
85 | serverFinishedSpan.context.parentID shouldBe clientFinishedSpan.context.spanID
86 |
87 | reporter.nextSpan() shouldBe empty
88 | }
89 | }
90 | }
91 | }
92 | }
93 |
94 | "propagate the span from the client to the server with chunk-encoded request" in {
95 | withNioServer() { port =>
96 | withNioClient(port) { httpClient =>
97 | val clientSpan = Kamon.buildSpan("client-chunk-span").start()
98 | Kamon.withContext(Context.create(Span.ContextKey, clientSpan)) {
99 | val (httpPost, chunks) = httpClient.postWithChunks(s"http://localhost:$port/fetch-in-chunks", "test 1", "test 2")
100 | httpClient.executeWithContent(httpPost, chunks)
101 |
102 | eventually(timeout(2 seconds)) {
103 | val serverFinishedSpan = reporter.nextSpan().value
104 | val clientFinishedSpan = reporter.nextSpan().value
105 |
106 | serverFinishedSpan.operationName shouldBe "fetch-in-chunks.post"
107 | serverFinishedSpan.tags should contain ("span.kind" -> TagValue.String("server"))
108 |
109 | clientFinishedSpan.operationName shouldBe s"localhost:$port/fetch-in-chunks"
110 | clientFinishedSpan.tags should contain ("span.kind" -> TagValue.String("client"))
111 |
112 | serverFinishedSpan.context.parentID shouldBe clientFinishedSpan.context.spanID
113 | clientFinishedSpan.context.parentID shouldBe clientSpan.context.spanID
114 |
115 | reporter.nextSpan() shouldBe empty
116 | }
117 | }
118 | }
119 | }
120 | }
121 |
122 | "propagate the span from the client to the server with chunk-encoded response" in {
123 | withNioServer() { port =>
124 | withNioClient(port) { httpClient =>
125 | val clientSpan = Kamon.buildSpan("client-chunk-span").start()
126 | Kamon.withContext(Context.create(Span.ContextKey, clientSpan)) {
127 | val (httpPost, chunks) = httpClient.postWithChunks(s"http://localhost:$port/fetch-in-chunks", "test 1", "test 2")
128 | httpClient.executeWithContent(httpPost, chunks)
129 |
130 | eventually(timeout(2 seconds)) {
131 | val serverFinishedSpan = reporter.nextSpan().value
132 | val clientFinishedSpan = reporter.nextSpan().value
133 |
134 | serverFinishedSpan.operationName shouldBe "fetch-in-chunks.post"
135 | serverFinishedSpan.tags should contain ("span.kind" -> TagValue.String("server"))
136 |
137 | clientFinishedSpan.operationName shouldBe s"localhost:$port/fetch-in-chunks"
138 | clientFinishedSpan.tags should contain ("span.kind" -> TagValue.String("client"))
139 |
140 | serverFinishedSpan.context.parentID shouldBe clientFinishedSpan.context.spanID
141 | clientFinishedSpan.context.parentID shouldBe clientSpan.context.spanID
142 |
143 | reporter.nextSpan() shouldBe empty
144 | }
145 | }
146 | }
147 | }
148 | }
149 |
150 | "create a new span when it's coming a request without one" in {
151 | withNioServer() { port =>
152 | withNioClient(port) { httpClient =>
153 | val httpGet = httpClient.get(s"http://localhost:$port/route?param=123")
154 | httpClient.execute(httpGet)
155 |
156 | eventually(timeout(2 seconds)) {
157 | val serverFinishedSpan = reporter.nextSpan().value
158 |
159 | serverFinishedSpan.operationName shouldBe "route.get"
160 | serverFinishedSpan.tags should contain ("span.kind" -> TagValue.String("server"))
161 |
162 | serverFinishedSpan.context.parentID.string shouldBe ""
163 |
164 | reporter.nextSpan() shouldBe empty
165 | }
166 | }
167 | }
168 | }
169 |
170 | "create a new span for each request" in {
171 | withNioServer() { port =>
172 | withNioClient(port) { httpClient =>
173 | val clientSpan = Kamon.buildSpan("test-span").start()
174 | Kamon.withContext(Context.create(Span.ContextKey, clientSpan)) {
175 | httpClient.execute(httpClient.get(s"http://localhost:$port/route?param=123"))
176 | httpClient.execute(httpClient.get(s"http://localhost:$port/route?param=123"))
177 |
178 | eventually(timeout(2 seconds)) {
179 | val serverFinishedSpan1 = reporter.nextSpan().value
180 | val clientFinishedSpan1 = reporter.nextSpan().value
181 | val serverFinishedSpan2 = reporter.nextSpan().value
182 | val clientFinishedSpan2 = reporter.nextSpan().value
183 |
184 | serverFinishedSpan1.operationName shouldBe "route.get"
185 | serverFinishedSpan1.tags should contain ("span.kind" -> TagValue.String("server"))
186 |
187 | clientFinishedSpan1.operationName shouldBe s"localhost:$port/route"
188 | clientFinishedSpan1.tags should contain ("span.kind" -> TagValue.String("client"))
189 |
190 | serverFinishedSpan1.context.traceID shouldBe clientFinishedSpan1.context.traceID
191 | serverFinishedSpan1.context.parentID shouldBe clientFinishedSpan1.context.spanID
192 |
193 | serverFinishedSpan2.operationName shouldBe "route.get"
194 | serverFinishedSpan2.tags should contain ("span.kind" -> TagValue.String("server"))
195 |
196 | clientFinishedSpan2.operationName shouldBe s"localhost:$port/route"
197 | clientFinishedSpan2.tags should contain ("span.kind" -> TagValue.String("client"))
198 |
199 | serverFinishedSpan2.context.traceID shouldBe clientFinishedSpan2.context.traceID
200 | serverFinishedSpan2.context.parentID shouldBe clientFinishedSpan2.context.spanID
201 |
202 | clientFinishedSpan1.context.parentID shouldBe clientFinishedSpan2.context.parentID
203 |
204 | clientFinishedSpan1.context.parentID shouldBe clientSpan.context.spanID
205 |
206 | reporter.nextSpan() shouldBe empty
207 | }
208 | }
209 | }
210 | }
211 | }
212 | }
213 |
214 | @volatile var registration: Registration = _
215 | val reporter = new TestSpanReporter()
216 |
217 | override protected def beforeAll(): Unit = {
218 | enableFastSpanFlushing()
219 | sampleAlways()
220 | registration = Kamon.addReporter(reporter)
221 | }
222 |
223 | override protected def afterAll(): Unit = {
224 | registration.cancel()
225 | }
226 | }
227 |
--------------------------------------------------------------------------------
/src/test/scala/kamon/netty/NettyMetricsSpec.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * =========================================================================================
3 | * Copyright © 2013-2017 the kamon project
4 | *
5 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
6 | * except in compliance with the License. You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software distributed under the
11 | * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
12 | * either express or implied. See the License for the specific language governing permissions
13 | * and limitations under the License.
14 | * =========================================================================================
15 | */
16 |
17 | package kamon.netty
18 |
19 | import kamon.netty.Clients.withNioClient
20 | import kamon.netty.Metrics.{registeredChannelsMetric, taskProcessingTimeMetric, taskQueueSizeMetric, taskWaitingTimeMetric}
21 | import kamon.netty.Servers.{withEpollServer, withNioServer}
22 | import kamon.testkit.MetricInspection
23 | import org.scalatest.{Matchers, WordSpec}
24 |
25 | class NettyMetricsSpec extends WordSpec with Matchers with MetricInspection {
26 |
27 | "The NettyMetrics" should {
28 |
29 | "track the NioEventLoop in boss-group and worker-group" in {
30 | withNioServer() { port =>
31 | withNioClient(port) { httpClient =>
32 | val httpGet = httpClient.get(s"http://localhost:$port/route?param=123")
33 | httpClient.execute(httpGet)
34 |
35 | registeredChannelsMetric.valuesForTag("name") should contain atLeastOneOf("boss-group-nio-event-loop", "worker-group-nio-event-loop")
36 | taskProcessingTimeMetric.valuesForTag("name") should contain atLeastOneOf("boss-group-nio-event-loop", "worker-group-nio-event-loop")
37 | taskQueueSizeMetric.valuesForTag("name") should contain atLeastOneOf("boss-group-nio-event-loop", "worker-group-nio-event-loop")
38 | taskWaitingTimeMetric.valuesForTag("name") should contain atLeastOneOf("boss-group-nio-event-loop", "worker-group-nio-event-loop")
39 | }
40 | }
41 | }
42 |
43 | "track the EpollEventLoop in boss-group and worker-group" in {
44 | withEpollServer() { port =>
45 | withNioClient(port) { httpClient =>
46 | val httpGet = httpClient.get(s"http://localhost:$port/route?param=123")
47 | httpClient.execute(httpGet)
48 |
49 | registeredChannelsMetric.valuesForTag("name") should contain atLeastOneOf("boss-group-epoll-event-loop", "worker-group-epoll-event-loop")
50 | taskProcessingTimeMetric.valuesForTag("name") should contain atLeastOneOf("boss-group-epoll-event-loop", "worker-group-epoll-event-loop")
51 | taskQueueSizeMetric.valuesForTag("name") should contain atLeastOneOf("boss-group-epoll-event-loop", "worker-group-epoll-event-loop")
52 | taskWaitingTimeMetric.valuesForTag("name") should contain atLeastOneOf("boss-group-epoll-event-loop", "worker-group-epoll-event-loop")
53 | }
54 | }
55 | }
56 |
57 | "track the registered channels, task processing time and task queue size for NioEventLoop" in {
58 | withNioServer() { port =>
59 | withNioClient(port) { httpClient =>
60 | val httpGet = httpClient.get(s"http://localhost:$port/route?param=123")
61 | val response = httpClient.execute(httpGet)
62 | response.getStatus.code() should be(200)
63 |
64 | registeredChannelsMetric.valuesForTag("name") should contain("boss-group-nio-event-loop")
65 |
66 | val metrics = Metrics.forEventLoop("boss-group-nio-event-loop")
67 |
68 | metrics.registeredChannels.distribution().max should be > 0L
69 | metrics.taskProcessingTime.distribution().max should be > 0L
70 | metrics.taskQueueSize.distribution().max should be > 0L
71 | metrics.taskWaitingTime.distribution().max should be > 0L
72 | }
73 | }
74 | }
75 |
76 | "track the registered channels, task processing time and task queue size for EpollEventLoop" in {
77 | withEpollServer() { port =>
78 | withNioClient(port) { httpClient =>
79 | val httpGet = httpClient.get(s"http://localhost:$port/route?param=123")
80 | val response = httpClient.execute(httpGet)
81 | response.getStatus.code() should be(200)
82 |
83 | registeredChannelsMetric.valuesForTag("name") should contain("boss-group-epoll-event-loop")
84 |
85 | val metrics = Metrics.forEventLoop("boss-group-epoll-event-loop")
86 |
87 | metrics.registeredChannels.distribution().max should be >= 0L
88 | metrics.taskProcessingTime.distribution().max should be > 0L
89 | metrics.taskQueueSize.distribution().max should be >= 0L
90 | metrics.taskWaitingTime.distribution().max should be > 0L
91 | }
92 | }
93 | }
94 | }
95 | }
96 |
97 |
98 |
--------------------------------------------------------------------------------
/src/test/scala/kamon/netty/Servers.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * =========================================================================================
3 | * Copyright © 2013-2017 the kamon project
4 | *
5 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
6 | * except in compliance with the License. You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software distributed under the
11 | * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
12 | * either express or implied. See the License for the specific language governing permissions
13 | * and limitations under the License.
14 | * =========================================================================================
15 | */
16 |
17 | package kamon.netty
18 |
19 | import io.netty.bootstrap.ServerBootstrap
20 | import io.netty.buffer.{ByteBuf, Unpooled}
21 | import io.netty.channel.epoll.{EpollEventLoopGroup, EpollServerSocketChannel}
22 | import io.netty.channel.nio.NioEventLoopGroup
23 | import io.netty.channel.socket.SocketChannel
24 | import io.netty.channel.socket.nio.NioServerSocketChannel
25 | import io.netty.channel.{ChannelFutureListener, _}
26 | import io.netty.handler.codec.http._
27 | import io.netty.handler.stream.ChunkedWriteHandler
28 | import io.netty.util.CharsetUtil
29 |
30 |
31 | class NioEventLoopBasedServer(port: Int) {
32 | val bossGroup = new NioEventLoopGroup(1)
33 | val workerGroup = new NioEventLoopGroup
34 | val b = new ServerBootstrap
35 |
36 | b.group(bossGroup, workerGroup)
37 | .channel(classOf[NioServerSocketChannel])
38 | .childHandler(new HttpServerInitializer)
39 |
40 | val channel: Channel = b.bind(port).sync.channel
41 |
42 | def close(): Unit = {
43 | channel.close
44 | bossGroup.shutdownGracefully()
45 | workerGroup.shutdownGracefully()
46 | }
47 | }
48 |
49 | class EpollEventLoopBasedServer(port: Int) {
50 | val bossGroup = new EpollEventLoopGroup(1)
51 | val workerGroup = new EpollEventLoopGroup
52 | val b = new ServerBootstrap
53 |
54 | b.group(bossGroup, workerGroup)
55 | .channel(classOf[EpollServerSocketChannel])
56 | .childHandler(new HttpServerInitializer)
57 |
58 | val channel: Channel = b.bind(port).sync.channel
59 |
60 | def close(): Unit = {
61 | channel.close
62 | bossGroup.shutdownGracefully()
63 | workerGroup.shutdownGracefully()
64 | }
65 | }
66 |
67 | object Servers {
68 | def withNioServer[A](port:Int = 9001)(thunk: Int => A): A = {
69 | val server = new NioEventLoopBasedServer(port)
70 | try thunk(port) finally server.close()
71 | }
72 |
73 | def withEpollServer[A](port:Int = 9001)(thunk: Int => A): A = {
74 | val server = new EpollEventLoopBasedServer(port)
75 | try thunk(port) finally server.close()
76 | }
77 | }
78 |
79 | private class HttpServerInitializer extends ChannelInitializer[SocketChannel] {
80 | override def initChannel(ch: SocketChannel): Unit = {
81 | val p = ch.pipeline
82 | p.addLast(new HttpRequestDecoder(4096, 8192, 8192))
83 | p.addLast(new HttpResponseEncoder())
84 | p.addLast(new ChunkedWriteHandler)
85 | p.addLast(new HttpServerHandler)
86 | }
87 | }
88 |
89 | private class HttpServerHandler extends ChannelInboundHandlerAdapter {
90 | private val ContentOk = Array[Byte]('H', 'e', 'l', 'l', 'o', ' ', 'W', 'o', 'r', 'l', 'd')
91 | private val ContentError = Array[Byte]('E', 'r', 'r', 'o', 'r')
92 |
93 | override def channelRead(ctx: ChannelHandlerContext, msg: scala.Any): Unit = {
94 | if (msg.isInstanceOf[HttpRequest]) {
95 | val request = msg.asInstanceOf[HttpRequest]
96 |
97 | val isKeepAlive = HttpHeaders.isKeepAlive(request)
98 |
99 | if (request.getUri.contains("/error")) {
100 | val response = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.INTERNAL_SERVER_ERROR, Unpooled.wrappedBuffer(ContentError))
101 | response.headers.set("Content-Type", "text/plain")
102 | response.headers.set("Content-Length", response.content.readableBytes)
103 | val channelFuture = ctx.write(response)
104 | addCloseListener(isKeepAlive)(channelFuture)
105 | } else if (request.getUri.contains("/fetch-in-chunks")) {
106 | val response = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK)
107 | HttpHeaders.setTransferEncodingChunked(response)
108 | response.headers.set("Content-Type", "text/plain")
109 |
110 | ctx.write(response)
111 | .addListener(new ChannelFutureListener {
112 | override def operationComplete(cf: ChannelFuture): Unit =
113 | writeChunk(cf.channel()).addListener(new ChannelFutureListener {
114 | override def operationComplete(cf: ChannelFuture): Unit =
115 | writeChunk(cf.channel()).addListener(new ChannelFutureListener {
116 | override def operationComplete(cf: ChannelFuture) =
117 | writeChunk(cf.channel()).addListener(new ChannelFutureListener {
118 | override def operationComplete(cf: ChannelFuture) =
119 | (writeLastContent _).andThen(addCloseListener(isKeepAlive))(cf.channel())})})})})
120 |
121 | } else {
122 | val response = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK, Unpooled.wrappedBuffer(ContentOk))
123 | response.headers.set("Content-Type", "text/plain")
124 | response.headers.set("Content-Length", response.content.readableBytes)
125 | val channelFuture = ctx.write(response)
126 | addCloseListener(isKeepAlive)(channelFuture)
127 | }
128 |
129 | }
130 |
131 | }
132 |
133 | override def channelReadComplete(ctx: ChannelHandlerContext): Unit =
134 | ctx.flush()
135 |
136 | override def exceptionCaught(ctx: ChannelHandlerContext, cause: Throwable): Unit =
137 | ctx.close()
138 |
139 | private def writeChunk(channel: Channel, content: ByteBuf = Unpooled.wrappedBuffer(ContentOk)): ChannelFuture = {
140 | channel.writeAndFlush(new DefaultHttpContent(Unpooled.copiedBuffer("chunkkkkkkkkkkkkk", CharsetUtil.UTF_8)))
141 | }
142 |
143 | private def writeLastContent(channel: Channel): ChannelFuture = {
144 | channel.writeAndFlush(new DefaultLastHttpContent(Unpooled.EMPTY_BUFFER))
145 | }
146 |
147 | private def addCloseListener(isKeepAlive: Boolean)(f: ChannelFuture): Unit = {
148 | if (!isKeepAlive) f.addListener(ChannelFutureListener.CLOSE)
149 | }
150 | }
151 |
--------------------------------------------------------------------------------
/src/test/scala/kamon/testkit/Reconfigure.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * =========================================================================================
3 | * Copyright © 2013-2017 the kamon project
4 | *
5 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
6 | * except in compliance with the License. You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software distributed under the
11 | * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
12 | * either express or implied. See the License for the specific language governing permissions
13 | * and limitations under the License.
14 | * =========================================================================================
15 | */
16 |
17 | package kamon.testkit
18 |
19 | import com.typesafe.config.ConfigFactory
20 | import kamon.Kamon
21 |
22 | trait Reconfigure {
23 |
24 | def enableFastSpanFlushing(): Unit = {
25 | applyConfig("kamon.trace.tick-interval = 1 millisecond")
26 | }
27 |
28 | def sampleAlways(): Unit = {
29 | applyConfig("kamon.trace.sampler = always")
30 | }
31 |
32 | def sampleNever(): Unit = {
33 | applyConfig("kamon.trace.sampler = never")
34 | }
35 |
36 | private def applyConfig(configString: String): Unit = {
37 | Kamon.reconfigure(ConfigFactory.parseString(configString).withFallback(Kamon.config()))
38 | }
39 | }
--------------------------------------------------------------------------------
/src/test/scala/kamon/testkit/TestSpanReporter.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * =========================================================================================
3 | * Copyright © 2013-2017 the kamon project
4 | *
5 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
6 | * except in compliance with the License. You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software distributed under the
11 | * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
12 | * either express or implied. See the License for the specific language governing permissions
13 | * and limitations under the License.
14 | * =========================================================================================
15 | */
16 |
17 | package kamon.testkit
18 |
19 | import java.util.concurrent.LinkedBlockingQueue
20 |
21 | import com.typesafe.config.Config
22 | import kamon.SpanReporter
23 | import kamon.trace.Span
24 | import kamon.trace.Span.FinishedSpan
25 |
26 | class TestSpanReporter extends SpanReporter {
27 | import scala.collection.JavaConverters._
28 | private val reportedSpans = new LinkedBlockingQueue[FinishedSpan]()
29 |
30 | override def reportSpans(spans: Seq[Span.FinishedSpan]): Unit =
31 | reportedSpans.addAll(spans.asJava)
32 |
33 | def nextSpan(): Option[FinishedSpan] =
34 | Option(reportedSpans.poll())
35 |
36 | override def start(): Unit = {}
37 | override def stop(): Unit = {}
38 | override def reconfigure(config: Config): Unit = {}
39 | }
--------------------------------------------------------------------------------
/version.sbt:
--------------------------------------------------------------------------------
1 | version in ThisBuild := "1.0.0-RC1-SNAPSHOT"
2 |
--------------------------------------------------------------------------------