├── .asf.yaml ├── README.md ├── aggregation.graphqls ├── alarm.graphqls ├── async-profiler.graphqls ├── browser-log.graphqls ├── common.graphqls ├── continuous-profiling.graphqls ├── ebpf-profiling.graphqls ├── event.graphqls ├── hierarchy.graphqls ├── log.graphqls ├── metadata-v2.graphqls ├── metadata.graphqls ├── metric.graphqls ├── metrics-v2.graphqls ├── metrics-v3.graphqls ├── ondemand-pod-log.graphqls ├── profile.graphqls ├── record.graphqls ├── top-n-records.graphqls ├── topology.graphqls ├── trace.graphqls └── ui-configuration.graphqls /.asf.yaml: -------------------------------------------------------------------------------- 1 | 2 | # 3 | # Licensed to the Apache Software Foundation (ASF) under one or more 4 | # contributor license agreements. See the NOTICE file distributed with 5 | # this work for additional information regarding copyright ownership. 6 | # The ASF licenses this file to You under the Apache License, Version 2.0 7 | # (the "License"); you may not use this file except in compliance with 8 | # the License. You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | # 18 | 19 | github: 20 | description: Query Protocol for Apache SkyWalking in GraphQL format 21 | homepage: https://skywalking.apache.org/docs/main/latest/en/api/query-protocol/ 22 | labels: 23 | - skywalking 24 | - observability 25 | - apm 26 | enabled_merge_buttons: 27 | squash: true 28 | merge: false 29 | rebase: false 30 | protected_branches: 31 | master: {} 32 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Apache SkyWalking Query Protocol 2 | Query Protocol defines the communication protocol in the query stage. 3 | SkyWalking native UI and CLI use this protocol to fetch data from the backend consistently, don't need to worry about the backend update. 4 | 5 | Read [Query Protocol API docs](https://skywalking.apache.org/docs/main/latest/en/api/query-protocol/) for more details 6 | 7 | ## Release 8 | This repo wouldn't be released separately. All source codes have been included in the main repo release. The tags match the [main repo](https://github.com/apache/skywalking) tags. 9 | 10 | # License 11 | Apache 2.0 12 | -------------------------------------------------------------------------------- /aggregation.graphqls: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one 2 | # or more contributor license agreements. See the NOTICE file 3 | # distributed with this work for additional information 4 | # regarding copyright ownership. The ASF licenses this file 5 | # to you under the Apache License, Version 2.0 (the 6 | # "License"); you may not use this file except in compliance 7 | # with the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # Legacy metrics query protocol 18 | # Replaced by the metrics-v2 in the future 19 | 20 | type TopNEntity { 21 | name: String! 22 | id: ID! 23 | value: Long! 24 | } 25 | 26 | # The aggregation query is different with the metric query. 27 | # All aggregation queries require backend or/and storage do aggregation in query time. 28 | extend type Query { 29 | # TopN is an aggregation query. 30 | getServiceTopN(name: String!, topN: Int!, duration: Duration!, order: Order!): [TopNEntity!]! 31 | getAllServiceInstanceTopN(name: String!, topN: Int!, duration: Duration!, order: Order!): [TopNEntity!]! 32 | getServiceInstanceTopN(serviceId: ID!, name: String!, topN: Int!, duration: Duration!, order: Order!): [TopNEntity!]! 33 | getAllEndpointTopN(name: String!, topN: Int!, duration: Duration!, order: Order!): [TopNEntity!]! 34 | getEndpointTopN(serviceId: ID!, name: String!, topN: Int!, duration: Duration!, order: Order!): [TopNEntity!]! 35 | } 36 | -------------------------------------------------------------------------------- /alarm.graphqls: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one 2 | # or more contributor license agreements. See the NOTICE file 3 | # distributed with this work for additional information 4 | # regarding copyright ownership. The ASF licenses this file 5 | # to you under the Apache License, Version 2.0 (the 6 | # "License"); you may not use this file except in compliance 7 | # with the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | type MQEMetric { 18 | name: String! 19 | results: [MQEValues!]! 20 | } 21 | 22 | type AlarmSnapshot { 23 | # The MQE expression of the alarm rule. 24 | expression: String! 25 | # The metrics snapshot 26 | metrics: [MQEMetric!]! 27 | } 28 | 29 | type AlarmMessage { 30 | startTime: Long! 31 | scope: Scope 32 | id: ID! 33 | # The entity name of the alarm triggered. 34 | name: String! 35 | message: String! 36 | events: [Event!]! 37 | tags: [KeyValue!]! 38 | # The snapshot when the alarm triggered. 39 | snapshot: AlarmSnapshot! 40 | } 41 | 42 | type Alarms { 43 | msgs: [AlarmMessage!]! 44 | } 45 | 46 | input AlarmTag { 47 | key: String! 48 | value: String 49 | } 50 | 51 | extend type Query { 52 | getAlarm(duration: Duration!, scope: Scope, keyword: String, paging: Pagination!, tags: [AlarmTag]): Alarms 53 | # Read the list of searchable keys 54 | queryAlarmTagAutocompleteKeys(duration: Duration!):[String!] 55 | # Search the available value options of the given key. 56 | queryAlarmTagAutocompleteValues(tagKey: String! , duration: Duration!):[String!] 57 | } 58 | -------------------------------------------------------------------------------- /async-profiler.graphqls: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one 2 | # or more contributor license agreements. See the NOTICE file 3 | # distributed with this work for additional information 4 | # regarding copyright ownership. The ASF licenses this file 5 | # to you under the Apache License, Version 2.0 (the 6 | # "License"); you may not use this file except in compliance 7 | # with the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # Request to create a async-profiler task 18 | input AsyncProfilerTaskCreationRequest { 19 | # Define the service to execute the task 20 | serviceId: ID! 21 | # Define which instances need to execute tasks 22 | serviceInstanceIds: [String!]! 23 | # Define the duration of this task (second) 24 | duration: Int! 25 | # Define which event types this task needs to collect. 26 | events: [AsyncProfilerEventType!]! 27 | # other async-profiler execution options, e.g. alloc=2k,lock=2s 28 | execArgs: String 29 | } 30 | 31 | # AsyncProfilerTaskCreationResult is the result of the task creation request 32 | type AsyncProfilerTaskCreationResult { 33 | # Code defines the status of the response, i.e. success or failure. 34 | code: AsyncProfilerTaskCreationType! 35 | # ErrorReason gives detailed reason for the exception, if the code returned represents a kind of failure. 36 | errorReason: String 37 | # Task id, if code is SUCCESS. 38 | id: String 39 | } 40 | 41 | # AsyncProfiler task creation type 42 | enum AsyncProfilerTaskCreationType { 43 | # Task created successfully 44 | SUCCESS 45 | # Task creation failed due to argument errors 46 | ARGUMENT_ERROR 47 | # The current service already has a async-profiler task executing 48 | ALREADY_PROFILING_ERROR 49 | } 50 | 51 | # Request to query async-profiler task list 52 | input AsyncProfilerTaskListRequest { 53 | # ServiceId associated with the task 54 | serviceId: ID! 55 | # Time Range 56 | queryDuration: Duration 57 | # Limit defines the number of the tasks to be returned. 58 | limit: Int 59 | } 60 | 61 | # Request to query flame graph analyzation 62 | input AsyncProfilerAnalyzationRequest { 63 | # Define which task to analyze 64 | taskId: ID! 65 | # InstanceIds defines the instances to be included for analysis 66 | instanceIds: [String!]! 67 | # EventType is the specific JFR Event type to be selected for analysis even if multiple events are included in the JFR file. 68 | eventType: JFREventType! 69 | } 70 | 71 | # Define async-profiler task list result 72 | type AsyncProfilerTaskListResult { 73 | # If it is null or empty, it means the task is created successfully, otherwise it gets the creation error reason 74 | errorReason: String 75 | 76 | # Tasks is a list of async-profiler tasks belonging to the specific service 77 | tasks: [AsyncProfilerTask!] 78 | } 79 | 80 | # Define async-profiler task data 81 | # The fields definition is the same as AsyncProfilerTaskCreationRequest 82 | type AsyncProfilerTask { 83 | id: String! 84 | serviceId: String! 85 | serviceInstanceIds: [String!]! 86 | createTime: Long! 87 | events: [AsyncProfilerEventType!]! 88 | duration: Int! 89 | execArgs: String 90 | } 91 | 92 | # Define the flame graph results produced by async-profiler 93 | type AsyncProfilerStackTree { 94 | type: JFREventType! 95 | elements: [AsyncProfilerStackElement!] 96 | } 97 | 98 | # Define the thread stack analyze tree element 99 | type AsyncProfilerStackElement { 100 | # Id is the identity of the stack element 101 | id: ID! 102 | # ParentId is the identity of the parent stack element. Stack elements are organized as a tree. 103 | parentId: ID! 104 | # Method signatures in tree nodes 105 | codeSignature: String! 106 | # The total number of samples of the current tree node, including child nodes 107 | total: Long! 108 | # The sampling number of the current tree node, excluding samples of the children 109 | self: Long! 110 | } 111 | 112 | # Define the analysis results of the task 113 | type AsyncProfilerAnalyzation { 114 | # Displaying the tree structure data required for the flame graph 115 | tree: AsyncProfilerStackTree 116 | } 117 | 118 | # Defines task progress, including task logs, success and failure instances 119 | type AsyncProfilerTaskProgress { 120 | # All task execution logs of the current task 121 | logs: [AsyncProfilerTaskLog!] 122 | # ErrorInstanceIds gives instances that failed to execute the task 123 | errorInstanceIds: [ID] 124 | # SuccessInstanceIds gives instances that have executed the task successfully 125 | successInstanceIds: [ID] 126 | } 127 | 128 | # Define the log of a task executed by an instance 129 | type AsyncProfilerTaskLog { 130 | # The task id 131 | id: String! 132 | # InstanceId is the id of the instance which reported this task log 133 | instanceId: ID! 134 | instanceName: String! 135 | 136 | operationType: AsyncProfilerTaskLogOperationType! 137 | operationTime: Long! 138 | } 139 | 140 | # Define the execution progress of the task 141 | enum AsyncProfilerTaskLogOperationType { 142 | # NOTIFIED means the task has been issued to the Java Agent 143 | NOTIFIED, 144 | # EXECUTION_FINISHED means the Java Agent has finished the execution 145 | EXECUTION_FINISHED 146 | # JFR_UPLOAD_FILE_TOO_LARGE_ERROR means the Java Agent has finished the task but the target file is too large to be received by the OAP server 147 | JFR_UPLOAD_FILE_TOO_LARGE_ERROR 148 | # EXECUTION_TASK_ERROR means potential execution error caused by the Java Agent 149 | EXECUTION_TASK_ERROR 150 | } 151 | 152 | # Defines which event types async-profiler needs to collect 153 | enum AsyncProfilerEventType { 154 | CPU 155 | WALL 156 | LOCK 157 | ALLOC 158 | CTIMER 159 | ITIMER 160 | } 161 | 162 | # JFR event type 163 | enum JFREventType { 164 | EXECUTION_SAMPLE 165 | # The LOCK event is a combination of JAVA_MONITOR_ENTER and THREAD_PARK events. 166 | LOCK 167 | OBJECT_ALLOCATION_IN_NEW_TLAB 168 | OBJECT_ALLOCATION_OUTSIDE_TLAB 169 | PROFILER_LIVE_OBJECT 170 | } 171 | 172 | extend type Mutation { 173 | # Create a new async-profiler task 174 | createAsyncProfilerTask(asyncProfilerTaskCreationRequest: AsyncProfilerTaskCreationRequest!): AsyncProfilerTaskCreationResult! 175 | } 176 | 177 | extend type Query { 178 | # Query all task lists and sort them in descending order by start time 179 | queryAsyncProfilerTaskList(request: AsyncProfilerTaskListRequest!): AsyncProfilerTaskListResult! 180 | # Query task progress, including task logs 181 | queryAsyncProfilerTaskProgress(taskId: String!): AsyncProfilerTaskProgress! 182 | # Query the flame graph produced by async-profiler 183 | queryAsyncProfilerAnalyze(request: AsyncProfilerAnalyzationRequest!): AsyncProfilerAnalyzation! 184 | } 185 | 186 | -------------------------------------------------------------------------------- /browser-log.graphqls: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one 2 | # or more contributor license agreements. See the NOTICE file 3 | # distributed with this work for additional information 4 | # regarding copyright ownership. The ASF licenses this file 5 | # to you under the Apache License, Version 2.0 (the 6 | # "License"); you may not use this file except in compliance 7 | # with the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # Represent the conditions used for query BrowserErrorLogBrief 18 | input BrowserErrorLogQueryCondition { 19 | # The value of 0 means all services. 20 | serviceId: ID 21 | serviceVersionId: ID 22 | pagePathId: ID 23 | category: ErrorCategory 24 | # The time range of browser perf data time 25 | queryDuration: Duration 26 | paging: Pagination! 27 | } 28 | 29 | # The list of browser error log 30 | type BrowserErrorLogs { 31 | logs: [BrowserErrorLog!]! 32 | } 33 | 34 | # Represent the browser error log 35 | type BrowserErrorLog { 36 | service: String! 37 | serviceVersion: String! 38 | time: Long! 39 | pagePath: String! 40 | category: ErrorCategory! 41 | grade: String 42 | message: String 43 | line: Int 44 | col: Int 45 | stack: String 46 | errorUrl: String 47 | firstReportedError: Boolean! 48 | } 49 | 50 | enum ErrorCategory { 51 | ALL, 52 | AJAX, 53 | RESOURCE, 54 | VUE, 55 | PROMISE, 56 | JS, 57 | UNKNOWN 58 | } 59 | 60 | extend type Query { 61 | queryBrowserErrorLogs(condition: BrowserErrorLogQueryCondition): BrowserErrorLogs 62 | } 63 | -------------------------------------------------------------------------------- /common.graphqls: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one 2 | # or more contributor license agreements. See the NOTICE file 3 | # distributed with this work for additional information 4 | # regarding copyright ownership. The ASF licenses this file 5 | # to you under the Apache License, Version 2.0 (the 6 | # "License"); you may not use this file except in compliance 7 | # with the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | scalar Long 18 | 19 | schema { 20 | query: Query 21 | mutation: Mutation 22 | } 23 | 24 | #Root node 25 | type Query { 26 | version: String 27 | } 28 | 29 | type Mutation { 30 | version: String 31 | } 32 | 33 | # String key, String value pair. 34 | type KeyValue { 35 | key: String! 36 | value: String 37 | } 38 | 39 | # String key, Long/Int value pair. 40 | type KeyNumericValue { 41 | key: String! 42 | value: Long! 43 | } 44 | 45 | # The Duration defines the start and end time for each query operation. 46 | # Fields: `start` and `end` 47 | # represents the time span. And each of them matches the step. 48 | # ref https://www.ietf.org/rfc/rfc3339.txt 49 | # The time formats are 50 | # `SECOND` step: yyyy-MM-dd HHmmss 51 | # `MINUTE` step: yyyy-MM-dd HHmm 52 | # `HOUR` step: yyyy-MM-dd HH 53 | # `DAY` step: yyyy-MM-dd 54 | # `MONTH` step: yyyy-MM 55 | # Field: `step` 56 | # represents the accurate time point. 57 | # e.g. 58 | # if step==HOUR , start=2017-11-08 09, end=2017-11-08 19 59 | # then 60 | # metrics from the following time points expected 61 | # 2017-11-08 9:00 -> 2017-11-08 19:00 62 | # there are 11 time points (hours) in the time span. 63 | input Duration { 64 | start: String! 65 | end: String! 66 | step: Step! 67 | # Only for BanyanDB, the flag to query from cold stage, default is false. 68 | coldStage: Boolean 69 | } 70 | 71 | enum Step { 72 | DAY 73 | HOUR 74 | MINUTE 75 | SECOND 76 | } 77 | 78 | enum Order { 79 | ASC 80 | DES 81 | } 82 | 83 | input Pagination { 84 | # pageNum starts in 1, the default is 1. 85 | pageNum: Int 86 | pageSize: Int! 87 | # total is not provided since 9.1.0 88 | # The client side determines the existing of the next page, which should be TRUE when `size of response list` == `pageSize' 89 | } 90 | 91 | enum Language { 92 | # For not language based agent, the language is impossible to tell. 93 | UNKNOWN 94 | JAVA 95 | DOTNET 96 | NODEJS 97 | PYTHON 98 | RUBY 99 | GO 100 | LUA 101 | PHP 102 | } 103 | 104 | enum Scope { 105 | All 106 | Service 107 | ServiceInstance 108 | Endpoint 109 | Process 110 | ServiceRelation 111 | ServiceInstanceRelation 112 | EndpointRelation 113 | ProcessRelation 114 | } 115 | 116 | enum DetectPoint { 117 | CLIENT 118 | SERVER 119 | PROXY 120 | } 121 | 122 | type HealthStatus { 123 | # 0 means healthy, more than 0 means unhealthy 124 | # and less than 0 means oap doesn't startup. 125 | score: Int! 126 | # some details about the score value. 127 | details: String 128 | } 129 | 130 | # The Span for OAP internal debugging. 131 | # All time is in nanoseconds. 132 | type DebuggingSpan { 133 | spanId: Int! 134 | parentSpanId: Int! 135 | operation: String! 136 | startTime: Long! 137 | endTime: Long! 138 | duration: Long! 139 | msg: String 140 | error: String 141 | } 142 | 143 | # The Trace for OAP internal debugging. 144 | type DebuggingTrace { 145 | traceId: String! 146 | condition: String! 147 | startTime: Long! 148 | endTime: Long! 149 | duration: Long! 150 | spans: [DebuggingSpan!]! 151 | } 152 | 153 | extend type Query { 154 | # Query Health Checker module for the status of OAP server 155 | checkHealth: HealthStatus! 156 | } 157 | -------------------------------------------------------------------------------- /continuous-profiling.graphqls: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one 2 | # or more contributor license agreements. See the NOTICE file 3 | # distributed with this work for additional information 4 | # regarding copyright ownership. The ASF licenses this file 5 | # to you under the Apache License, Version 2.0 (the 6 | # "License"); you may not use this file except in compliance 7 | # with the License. You may obtain a copy of the License at 8 | # 9 | # http:#www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # Set policy configuration 18 | input ContinuousProfilingPolicyCreation { 19 | # service of the policy 20 | serviceId: ID! 21 | # target of the policy 22 | targets: [ContinuousProfilingPolicyTargetCreation!]! 23 | } 24 | 25 | input ContinuousProfilingPolicyTargetCreation { 26 | targetType: ContinuousProfilingTargetType! 27 | checkItems: [ContinuousProfilingPolicyItemCreation!]! 28 | } 29 | 30 | # Policy item of continuous profiling 31 | input ContinuousProfilingPolicyItemCreation { 32 | # define the monitor type to collect metrics 33 | type: ContinuousProfilingMonitorType! 34 | # threshold of policy, which decide by the monitor type 35 | threshold: String! 36 | # the length of time to evaluate the metrics 37 | period: Int! 38 | # how many times after the metrics match the threshold, will trigger profiling 39 | count: Int! 40 | # the URI path/regex filter when monitor the HTTP related types 41 | uriList: [String!] 42 | uriRegex: String 43 | } 44 | 45 | enum ContinuousProfilingTargetType { 46 | # eBPF On CPU Profiling 47 | ON_CPU, 48 | # eBPF Off CPU Profiling 49 | OFF_CPU, 50 | # eBPF Network Profiling 51 | NETWORK 52 | } 53 | 54 | type ContinuousProfilingSetResult { 55 | # TRUE if the policy is set successfully 56 | status: Boolean! 57 | # error reason when status == FALSE 58 | errorReason: String 59 | } 60 | 61 | type ContinuousProfilingPolicyTarget { 62 | type: ContinuousProfilingTargetType! 63 | checkItems: [ContinuousProfilingPolicyItem!]! 64 | triggeredCount: Int! 65 | lastTriggerTimestamp: Long 66 | } 67 | 68 | type ContinuousProfilingPolicyItem { 69 | type: ContinuousProfilingMonitorType! 70 | threshold: String! 71 | period: Int! 72 | count: Int! 73 | uriList: [String!] 74 | uriRegex: String 75 | } 76 | 77 | # The Continuous Profiling monitoring instance 78 | type ContinuousProfilingMonitoringInstance { 79 | id: ID! 80 | name: String! 81 | attributes: [Attribute!]! 82 | triggeredCount: Int! 83 | lastTriggerTimestamp: Long 84 | 85 | # monitoring process list under the monitoring instance 86 | processes: [ContinuousProfilingMonitoringProcess!]! 87 | } 88 | 89 | # The Continuous Profiling monitoring process 90 | type ContinuousProfilingMonitoringProcess { 91 | id: ID! 92 | name: String! 93 | detectType: String! 94 | labels: [String!]! 95 | triggeredCount: Int! 96 | lastTriggerTimestamp: Long 97 | } 98 | 99 | extend type Mutation { 100 | # set the continuous profiling policy 101 | setContinuousProfilingPolicy(request: ContinuousProfilingPolicyCreation!): ContinuousProfilingSetResult! 102 | } 103 | 104 | extend type Query { 105 | # query all continuous profiling task through service 106 | queryContinuousProfilingServiceTargets(serviceId: ID!): [ContinuousProfilingPolicyTarget!]! 107 | # query all continuous profiling monitoring instances through service and policy 108 | queryContinuousProfilingMonitoringInstances(serviceId: ID!, target: ContinuousProfilingTargetType!): [ContinuousProfilingMonitoringInstance!]! 109 | } -------------------------------------------------------------------------------- /ebpf-profiling.graphqls: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one 2 | # or more contributor license agreements. See the NOTICE file 3 | # distributed with this work for additional information 4 | # regarding copyright ownership. The ASF licenses this file 5 | # to you under the Apache License, Version 2.0 (the 6 | # "License"); you may not use this file except in compliance 7 | # with the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # The creation request of eBPF profiling fixed time task 18 | input EBPFProfilingTaskFixedTimeCreationRequest { 19 | # define which processes under the service need to be profiling 20 | serviceId: String! 21 | # aggregate which processes need to be profiling from labels 22 | processLabels: [String!]! 23 | 24 | # the task start timestamp(ms), if less then or equal zero means the task starts ASAP 25 | startTime: Long! 26 | # the profiling duration(s) 27 | duration: Int! 28 | 29 | # the task profiling target type 30 | targetType: EBPFProfilingTargetType! 31 | } 32 | 33 | # Settings for the behaviours of sampling. 34 | # HTTP request and response could be large between the system. 35 | # These settings help the agent to collect necessary data and cost reasonable resources for the agent, transforming, and OAP storage. 36 | input EBPFNetworkDataCollectingSettings { 37 | # Require to collect the complete request 38 | requireCompleteRequest: Boolean! 39 | # The max size of request context. The unit is byte. 40 | # Collect the whole request header and body if this is not set. 41 | maxRequestSize: Int 42 | 43 | # Require to collect the complete response 44 | requireCompleteResponse: Boolean! 45 | # The max size of response context. The unit is byte. 46 | # Collect the whole response header and body if this is not set. 47 | maxResponseSize: Int 48 | } 49 | 50 | # EBPFNetworkSamplingRule defines the URI scopes for HTTP request and response collecting under given HTTP response code circumstances. 51 | input EBPFNetworkSamplingRule { 52 | # The match pattern for HTTP request. This is HTTP URI-oriented. 53 | # matches all requests if not set 54 | uriRegex: String 55 | 56 | # the minimal request duration to activate the network data(HTTP request/response raw data) sampling. 57 | # Collecting requests without minimal request duration 58 | minDuration: Int 59 | # Collecting requests when the response code is 400-499 60 | when4xx: Boolean! 61 | # Collecting requests when the response code is 500-599 62 | when5xx: Boolean! 63 | 64 | # define the sampled data size and scopes. 65 | settings: EBPFNetworkDataCollectingSettings! 66 | } 67 | 68 | # The request of eBPF network profiling task 69 | input EBPFProfilingNetworkTaskRequest { 70 | # define which processes under the service instance need to be profiling 71 | instanceId: String! 72 | 73 | # The rule list for network profiling about collecting HTTP request and response raw data. 74 | # Set various rules for different HTTP URIs if necessary. 75 | samplings: [EBPFNetworkSamplingRule!] 76 | } 77 | 78 | # eBPF Profiling task creation result 79 | type EBPFProfilingTaskCreationResult { 80 | # TRUE if the task is created successfully 81 | status: Boolean! 82 | # error reason when status == FALSE 83 | errorReason: String 84 | 85 | # The task ID when status = TRUE 86 | id: String 87 | } 88 | 89 | # eBPF Profiling task keep alive result 90 | type EBPFNetworkKeepProfilingResult { 91 | # TRUE if the task is kept alive successfully 92 | # When one of the following conditions occurs, response in error status: 93 | # 1. The profiling task does not exist. 94 | # 2. The target type of profiling task is not "NETWORK". 95 | # 3. The profiling task is already finished when the OAP received keep-alive request. 96 | status: Boolean! 97 | # error reason when status == FALSE 98 | errorReason: String 99 | } 100 | 101 | # eBPF profiling task data 102 | type EBPFProfilingTask { 103 | # profiling task ID 104 | taskId: ID! 105 | # service of profiling task 106 | serviceId: ID! 107 | serviceName: String! 108 | # service instance of profiling task 109 | serviceInstanceId: ID 110 | serviceInstanceName: String 111 | # process labels for filter 112 | processLabels: [String!]! 113 | # process of profiling task triggered by continuous profiling 114 | processId: ID 115 | processName: String 116 | 117 | # Start time of the task, type is timestamp. 118 | taskStartTime: Long! 119 | # profiling task trigger type 120 | triggerType: EBPFProfilingTriggerType! 121 | # task profiling duration 122 | fixedTriggerDuration: Long 123 | # "CONTINUOUS_PROFILING" type task causes 124 | continuousProfilingCauses: [ContinuousProfilingTriggeredCause!] 125 | 126 | # profiling task target type 127 | targetType: EBPFProfilingTargetType! 128 | 129 | # the timestamp of creating this task 130 | createTime: Long! 131 | } 132 | 133 | type ContinuousProfilingTriggeredCause { 134 | # which type is reached the threshold 135 | # all causes threshold and current value has multiply with "100" for avoid float value 136 | type: ContinuousProfilingMonitorType! 137 | # single value based cause 138 | singleValue: ContinuousProfilingSingleValueCause 139 | # uri based cause 140 | uri: ContinuousProfilingURICause 141 | # summary message of cause, including type, value or uri 142 | message: String! 143 | } 144 | 145 | enum ContinuousProfilingMonitorType { 146 | # monitoring Process CPU percent, value in [0-100] 147 | PROCESS_CPU, 148 | # monitoring process thread count, value must bigger than zero 149 | PROCESS_THREAD_COUNT, 150 | # monitoring current system load 151 | SYSTEM_LOAD, 152 | # monitoring the process HTTP response error(status>=500) percent, value in [0-100] 153 | HTTP_ERROR_RATE, 154 | # monitoring the process HTTP response duration(ms) 155 | HTTP_AVG_RESPONSE_TIME 156 | } 157 | 158 | type ContinuousProfilingSingleValueCause { 159 | # defined threshold 160 | threshold: Long! 161 | # current value of the process 162 | current: Long! 163 | } 164 | 165 | type ContinuousProfilingURICause { 166 | # which URI triggered threshold(one of) 167 | uriRegex: String 168 | uriPath: String 169 | # defined threshold 170 | threshold: Long! 171 | # current value of the process URI 172 | current: Long! 173 | } 174 | 175 | type EBPFProfilingSchedule { 176 | # profiling task schedule ID 177 | scheduleId: ID! 178 | # profiling task ID 179 | taskId: ID! 180 | # process entity 181 | process: Process! 182 | # profiling schedule start timestamp(ms) 183 | startTime: Long! 184 | # profiling schedule finished timestamp(ms) 185 | endTime: Long! 186 | } 187 | 188 | input EBPFProfilingAnalyzeTimeRange { 189 | # start timestamp(ms) 190 | start: Long! 191 | # end timestamp(ms) 192 | end: Long! 193 | } 194 | 195 | type EBPFProfilingAnalyzation { 196 | # if not empty means backend has information gave to the user 197 | tip: String 198 | # profiling analyzed trees 199 | trees: [EBPFProfilingTree!]! 200 | } 201 | 202 | type EBPFProfilingTree { 203 | # profiling stack elements 204 | elements: [EBPFProfilingStackElement!]! 205 | } 206 | 207 | type EBPFProfilingStackElement { 208 | # the element ID 209 | id: String! 210 | # the parent element ID 211 | parentId: String! 212 | # stack element symbol name 213 | symbol: String! 214 | # stack element type 215 | stackType: EBPFProfilingStackType! 216 | # current stack element total dump count 217 | dumpCount: Long! 218 | } 219 | 220 | # Prepare for creating the eBPF profiling task needs data 221 | type EBPFProfilingTaskPrepare { 222 | # have enough process could profiling 223 | couldProfiling: Boolean! 224 | # all process unique labels for filter 225 | processLabels: [String!]! 226 | } 227 | 228 | enum EBPFProfilingStackType { 229 | KERNEL_SPACE, 230 | USER_SPACE 231 | } 232 | 233 | # Define when the profiling task would be execute 234 | enum EBPFProfilingTriggerType { 235 | # Appoint the task executing total duration 236 | FIXED_TIME, 237 | # Trigger by the reach the continuous profiling policy 238 | CONTINUOUS_PROFILING 239 | } 240 | 241 | # The way of profiling the process 242 | # relate with Linux function: https://man7.org/linux/man-pages/man2/perf_event_open.2.html 243 | enum EBPFProfilingTargetType { 244 | # Using "PERF_COUNT_SW_CPU_CLOCK" to profiling process with CPU clock 245 | ON_CPU, 246 | # Using "finish_task_switch" of kprobe to profiling process 247 | # relate with blog: https://www.brendangregg.com/offcpuanalysis.html 248 | OFF_CPU, 249 | # Using many syscall to complete network topology monitoring, such as sys_connect, sys_read, sys_write, etc. 250 | NETWORK 251 | } 252 | 253 | enum EBPFProfilingAnalyzeAggregateType { 254 | # Aggregate by the total duration of stack 255 | # For "OFF_CPU" target type of profiling: Statics the total time spent in off cpu. 256 | DURATION, 257 | # Aggregate by the trigger count 258 | # For "ON_CPU" target type of profiling: Statics the number of dump count. 259 | # For "OFF_CPU" target type of profiling: Statics the number of times the process is switched to off cpu by the scheduler. 260 | COUNT 261 | } 262 | 263 | extend type Mutation { 264 | # create a new eBPF fixed time profiling task 265 | createEBPFProfilingFixedTimeTask(request: EBPFProfilingTaskFixedTimeCreationRequest!): EBPFProfilingTaskCreationResult! 266 | 267 | # create a new eBPF network profiling task 268 | createEBPFNetworkProfiling(request: EBPFProfilingNetworkTaskRequest!): EBPFProfilingTaskCreationResult! 269 | # keep alive the eBPF profiling task 270 | keepEBPFNetworkProfiling(taskId: ID!): EBPFNetworkKeepProfilingResult! 271 | } 272 | 273 | extend type Query { 274 | # query eBPF profiling data for prepare create task 275 | queryPrepareCreateEBPFProfilingTaskData(serviceId: ID!): EBPFProfilingTaskPrepare! 276 | # query eBPF profiling task list 277 | # query `triggerType == FIXED_TIME` when triggerType is absent 278 | queryEBPFProfilingTasks(serviceId: ID, serviceInstanceId: ID, targets: [EBPFProfilingTargetType!], triggerType: EBPFProfilingTriggerType, duration: Duration): [EBPFProfilingTask!]! 279 | # query schedules from profiling task 280 | queryEBPFProfilingSchedules(taskId: ID!): [EBPFProfilingSchedule!]! 281 | # analyze the profiling schedule 282 | # aggregateType is "EBPFProfilingAnalyzeAggregateType#COUNT" as default. 283 | analysisEBPFProfilingResult(scheduleIdList: [ID!]!, timeRanges: [EBPFProfilingAnalyzeTimeRange!]!, aggregateType: EBPFProfilingAnalyzeAggregateType): EBPFProfilingAnalyzation! 284 | } 285 | -------------------------------------------------------------------------------- /event.graphqls: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one 2 | # or more contributor license agreements. See the NOTICE file 3 | # distributed with this work for additional information 4 | # regarding copyright ownership. The ASF licenses this file 5 | # to you under the Apache License, Version 2.0 (the 6 | # "License"); you may not use this file except in compliance 7 | # with the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | type Event { 18 | uuid: String! 19 | source: Source! 20 | name: String! 21 | type: EventType! 22 | message: String 23 | parameters: [KeyValue!] 24 | # The start time (in milliseconds) of the event, measured between the current time and midnight, January 1, 1970 UTC. 25 | # This field may be 0 if the user didn't report it or the `start` event is lost due to network issues. 26 | startTime: Long! 27 | # The end time (in milliseconds) of the event, measured between the current time and midnight, January 1, 1970 UTC. Empty or 0 if the event have not finished yet. 28 | # This field may be 0 if the user didn't report it or the `end` event is lost due to network issues. 29 | endTime: Long 30 | layer: String! 31 | } 32 | 33 | type Source { 34 | service: String 35 | serviceInstance: String 36 | endpoint: String 37 | } 38 | 39 | input SourceInput { 40 | service: String 41 | serviceInstance: String 42 | endpoint: String 43 | } 44 | 45 | enum EventType { 46 | Normal 47 | Error 48 | } 49 | 50 | type Events { 51 | events: [Event!]! 52 | } 53 | 54 | input EventQueryCondition { 55 | uuid: String 56 | source: SourceInput 57 | name: String 58 | type: EventType 59 | time: Duration 60 | order: Order 61 | layer: String 62 | paging: Pagination! 63 | } 64 | 65 | extend type Query { 66 | queryEvents(condition: EventQueryCondition): Events 67 | } 68 | -------------------------------------------------------------------------------- /hierarchy.graphqls: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one 2 | # or more contributor license agreements. See the NOTICE file 3 | # distributed with this work for additional information 4 | # regarding copyright ownership. The ASF licenses this file 5 | # to you under the Apache License, Version 2.0 (the 6 | # "License"); you may not use this file except in compliance 7 | # with the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | type HierarchyRelatedService { 18 | # The related service ID. 19 | id: ID! 20 | # The literal name of the #id. 21 | name: String! 22 | # The related service's Layer name. 23 | layer: String! 24 | normal: Boolean! 25 | } 26 | 27 | type HierarchyRelatedInstance { 28 | # The related instance ID. 29 | id: ID! 30 | # The literal name of the #id. Instance Name. 31 | name: String! 32 | # Service id 33 | serviceId: ID! 34 | # The literal name of the #serviceId. 35 | serviceName: String! 36 | # The service's Layer name. 37 | # Service could have multiple layers, this is the layer of the service that the instance belongs to. 38 | layer: String! 39 | normal: Boolean! 40 | } 41 | 42 | type HierarchyServiceRelation { 43 | upperService: HierarchyRelatedService! 44 | lowerService: HierarchyRelatedService! 45 | } 46 | 47 | type HierarchyInstanceRelation { 48 | upperInstance: HierarchyRelatedInstance! 49 | lowerInstance: HierarchyRelatedInstance! 50 | } 51 | 52 | type ServiceHierarchy { 53 | relations: [HierarchyServiceRelation!]! 54 | } 55 | 56 | type InstanceHierarchy { 57 | relations: [HierarchyInstanceRelation!]! 58 | } 59 | 60 | type LayerLevel { 61 | # The layer name. 62 | layer: String! 63 | # The layer level. 64 | # The level of the upper service should greater than the level of the lower service. 65 | level: Int! 66 | } 67 | 68 | extend type Query { 69 | # Query the service hierarchy, based on the given service. Will recursively return all related layers services in the hierarchy. 70 | getServiceHierarchy(serviceId: ID!, layer: String!): ServiceHierarchy! 71 | # Query the instance hierarchy, based on the given instance. Will return all direct related layers instances in the hierarchy, no recursive. 72 | getInstanceHierarchy(instanceId: ID!, layer: String!): InstanceHierarchy! 73 | # List layer hierarchy levels. The layer levels are defined in the `hierarchy-definition.yml`. 74 | listLayerLevels: [LayerLevel!]! 75 | } 76 | -------------------------------------------------------------------------------- /log.graphqls: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one 2 | # or more contributor license agreements. See the NOTICE file 3 | # distributed with this work for additional information 4 | # regarding copyright ownership. The ASF licenses this file 5 | # to you under the Apache License, Version 2.0 (the 6 | # "License"); you may not use this file except in compliance 7 | # with the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # The list of logs 18 | type Logs { 19 | # When this field is not empty, frontend should display it in UI 20 | errorReason: String 21 | logs: [Log!]! 22 | #For OAP internal query debugging 23 | debuggingTrace: DebuggingTrace 24 | } 25 | 26 | # Log info 27 | type Log { 28 | serviceName: String 29 | serviceId: ID 30 | serviceInstanceName: String 31 | serviceInstanceId: ID 32 | endpointName: String 33 | endpointId: ID 34 | traceId: String 35 | timestamp: Long! 36 | contentType: ContentType! 37 | content: String 38 | tags: [KeyValue!] 39 | } 40 | 41 | # Represent the conditions used for query logs 42 | input LogQueryCondition { 43 | # The value of 0 means all services. 44 | serviceId: ID 45 | serviceInstanceId: ID 46 | endpointId: ID 47 | # Related trace condition. 48 | # When use related trace condition, duration is not required. 49 | relatedTrace: TraceScopeCondition 50 | # The time range of log happened 51 | # [Required] duration is required in most query, only exception is when use relatedTrace. 52 | queryDuration: Duration 53 | paging: Pagination! 54 | tags: [LogTag!] 55 | # Fuzzy query conditions for the log content. 56 | # Use these 2 keyword related condition, when supportQueryLogsByKeywords returns TRUE. 57 | keywordsOfContent: [String!] 58 | excludingKeywordsOfContent: [String!] 59 | # Order by timestamp, default desc 60 | queryOrder: Order 61 | } 62 | 63 | # Trace related condition 64 | input TraceScopeCondition { 65 | traceId: String! 66 | segmentId: String 67 | spanId: Int 68 | } 69 | 70 | input LogTag { 71 | key: String! 72 | value: String 73 | } 74 | 75 | enum ContentType { 76 | TEXT 77 | JSON 78 | YAML 79 | } 80 | 81 | input LogTestRequest { 82 | # The log data of protocol https://github.com/apache/skywalking-data-collect-protocol/blob/e626ee04850703c220f64b642d2893fa65572943/logging/Logging.proto#41 83 | # in JSON format 84 | log: String! 85 | dsl: String! 86 | } 87 | 88 | type LogTestResponse { 89 | # The final log if it will be persisted, this can be empty if the log is dropped. 90 | log: Log 91 | # The metrics generated during the LAL when testing a LogTestRequest 92 | metrics: [LogTestMetrics!] 93 | } 94 | 95 | # The metrics generated during the LAL when testing a LogTestRequest 96 | type LogTestMetrics { 97 | name: String! 98 | tags: [KeyValue!] 99 | value: Long! 100 | timestamp: Long! 101 | } 102 | 103 | extend type Query { 104 | # Return true if the current storage implementation supports fuzzy query for logs. 105 | supportQueryLogsByKeywords: Boolean! 106 | queryLogs(condition: LogQueryCondition, debug: Boolean): Logs 107 | 108 | # Test the logs and get the results of the LAL output. 109 | test(requests: LogTestRequest!): LogTestResponse! 110 | 111 | queryLogTagAutocompleteKeys(duration: Duration!):[String!] 112 | queryLogTagAutocompleteValues(tagKey: String! , duration: Duration!):[String!] 113 | } 114 | -------------------------------------------------------------------------------- /metadata-v2.graphqls: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one 2 | # or more contributor license agreements. See the NOTICE file 3 | # distributed with this work for additional information 4 | # regarding copyright ownership. The ASF licenses this file 5 | # to you under the Apache License, Version 2.0 (the 6 | # "License"); you may not use this file except in compliance 7 | # with the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # Metadata query v2 protocol provides the layer-based query to various services monitored by SkyWalking ecosystem. 18 | # It would adopt multiple-layer modern cloud native infrastructure. 19 | # In the v9 core, v1 protocol is provided on the top of the v2 implementation. 20 | # The v1's services, Databases, Browsers are all services with layer=general, layer=database, layer=browser. 21 | # Each service would have native definition about instance and endpoint. 22 | 23 | # Service is a logic concept, representing a collection of runnable context. 24 | type Service { 25 | # Service ID = BASE64(name) + '.1' which keeps the most compatibility to 8.x data formats. 26 | # All metrics of the service would refer to this ID. 27 | # The layer ID would not be included in the service ID, as a service could have multidimensional monitoring, such as ALS + DP for the same service 28 | # ----- Storage ----- 29 | # Row ID in service_traffic entity includes layer ID. 30 | # Service ID = BASE64(name) + '.' + Layer ID 31 | # ------------------- 32 | id: ID! 33 | # The unqiue name gloablly. 34 | # Typically, name could be formated as `group::name` which would be recognized as a group and a short name. 35 | name: String! 36 | # The custom/logic group of the service 37 | group: String! 38 | # The unique name in the group. Mostly for visualization. 39 | shortName: String! 40 | # Layer represents an abstract framework in the computer science, such as operation system(VM layer), Kubernetes(k8s layer), 41 | # Service Mesh(typical Istio+Envoy layer). 42 | # The name of layer is a string, but we would reserve the following for visualization(UI) 43 | # https://github.com/apache/skywalking/blob/master/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/analysis/Layer.java 44 | # 45 | # UI uses this literal layer names to provide various layout for their services with metrics. 46 | # 47 | # The layer collection is from the instances of this service. So, one service could have multiple layer due to instance-level registration. 48 | layers: [String!]! 49 | # Normal service is the service having installed agent or metrics reported directly. 50 | # Unnormal service is conjectural service, usually detected by the agent. 51 | normal: Boolean 52 | } 53 | 54 | # The minimal runnable unit in the service. It provides consistent and fundamental capabilities in physical perspective. 55 | # A service, as a logic unit, have multiple instances in the runtime. 56 | # Such as, an OS-level processor, a pod in k8s, a running function in the FAAS engine. 57 | type ServiceInstance { 58 | id: ID! 59 | name: String! 60 | attributes: [Attribute!]! 61 | language: Language! 62 | instanceUUID: String! 63 | } 64 | 65 | type Attribute { 66 | name: String! 67 | value: String! 68 | } 69 | 70 | # The endpoint is the minimal functional unit. 71 | # Typically, it presents a URI or gRPC service name in the service. 72 | # Different from instance, this is a logical functional unit. 73 | type Endpoint { 74 | id: ID! 75 | name: String! 76 | } 77 | 78 | type EndpointInfo { 79 | id: ID! 80 | name: String! 81 | serviceId: ID! 82 | serviceName: String! 83 | } 84 | 85 | # The process is an operating system process under service instance. 86 | type Process { 87 | # Process entity 88 | id: ID! 89 | name: String! 90 | serviceId: ID! 91 | serviceName: String! 92 | instanceId: ID! 93 | instanceName: String! 94 | # Which agent report this process. 95 | agentId: String! 96 | # The process found type. 97 | detectType: String! 98 | # The process attributes, different process detect type have different corresponding attributes. 99 | attributes: [Attribute!]! 100 | # The process labels. 101 | labels: [String!]! 102 | } 103 | 104 | type TimeInfo { 105 | # server current timezone, format: +0800 106 | timezone: String 107 | # server current timestamp, format: 1569124528392 108 | currentTimestamp: Long 109 | } 110 | 111 | type RecordsTTL { 112 | # Cover hot and warm data for BanyanDB. 113 | normal: Int! 114 | trace: Int! 115 | zipkinTrace: Int! 116 | log: Int! 117 | browserErrorLog: Int! 118 | # Cold data, '-1' represents no cold stage data. 119 | coldNormal: Int! 120 | coldTrace: Int! 121 | coldZipkinTrace: Int! 122 | coldLog: Int! 123 | coldBrowserErrorLog: Int! 124 | } 125 | 126 | type MetricsTTL { 127 | # Cover hot and warm data for BanyanDB. 128 | minute: Int! 129 | hour: Int! 130 | day: Int! 131 | # Cold data, '-1' represents no cold stage data. 132 | coldMinute: Int! 133 | coldHour: Int! 134 | coldDay: Int! 135 | } 136 | 137 | extend type Query { 138 | # Read all available layers 139 | # UI could use this list to determine available dashboards/panels 140 | # The available layers would change with time in the runtime, because new service could be detected in any time. 141 | # This list should be loaded periodically. 142 | listLayers: [String!]! 143 | 144 | # Read the service list according to layer. 145 | listServices(layer: String): [Service!]! 146 | # Find service according to given ID. Return null if not existing. 147 | getService(serviceId: String!): Service 148 | # Search and find service according to given name. Return null if not existing. 149 | findService(serviceName: String!): Service 150 | 151 | # Read service instance list. 152 | listInstances(duration: Duration!, serviceId: ID!): [ServiceInstance!]! 153 | # Search and find service instance according to given ID. Return null if not existing. 154 | getInstance(instanceId: String!): ServiceInstance 155 | 156 | # Search and find matched endpoints according to given service and keyword(optional) 157 | # If no keyword, randomly choose endpoint based on `limit` value. 158 | # If duration is nil mean get all endpoints, otherwise, get the endpoint list in the given duration. 159 | findEndpoint(keyword: String, serviceId: ID!, limit: Int!, duration: Duration): [Endpoint!]! 160 | getEndpointInfo(endpointId: ID!): EndpointInfo 161 | 162 | # Read process list. 163 | listProcesses(duration: Duration!, instanceId: ID!): [Process!]! 164 | # Find process according to given ID. Return null if not existing. 165 | getProcess(processId: ID!): Process 166 | # Get the number of matched processes through serviceId, labels 167 | # Labels: the matched process should contain all labels 168 | # 169 | # The return is not a precise number, the process has its lifecycle, as it reboots and shutdowns with time. 170 | # The return number just gives an abstract of the scale of profiling that would be applied. 171 | estimateProcessScale(serviceId: ID!, labels: [String!]!): Long! 172 | 173 | getTimeInfo: TimeInfo 174 | # Get the TTL info of records 175 | getRecordsTTL: RecordsTTL 176 | # Get the TTL info of metrics 177 | getMetricsTTL: MetricsTTL 178 | } 179 | -------------------------------------------------------------------------------- /metadata.graphqls: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one 2 | # or more contributor license agreements. See the NOTICE file 3 | # distributed with this work for additional information 4 | # regarding copyright ownership. The ASF licenses this file 5 | # to you under the Apache License, Version 2.0 (the 6 | # "License"); you may not use this file except in compliance 7 | # with the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # Legacy metrics query protocol 18 | # Replaced by the metadata-v2 in the future 19 | 20 | type Database { 21 | id: ID! 22 | name: String! 23 | type: String! 24 | } 25 | 26 | extend type Query { 27 | # Normal service related meta info 28 | getAllServices(duration: Duration!, group: String): [Service!]! 29 | searchServices(duration: Duration!, keyword: String!): [Service!]! 30 | searchService(serviceCode: String!): Service 31 | 32 | # Fetch all services of Browser type 33 | getAllBrowserServices(duration: Duration!): [Service!]! 34 | searchBrowserServices(duration: Duration!, keyword: String!): [Service!]! 35 | searchBrowserService(serviceCode: String!): Service 36 | 37 | # Service instance query 38 | getServiceInstances(duration: Duration!, serviceId: ID!): [ServiceInstance!]! 39 | 40 | # Endpoint query 41 | # Consider there are huge numbers of endpoint, 42 | # must use endpoint owner's service id, keyword and limit filter to do query. 43 | searchEndpoint(keyword: String!, serviceId: ID!, limit: Int!): [Endpoint!]! 44 | 45 | # Database related meta info. 46 | getAllDatabases(duration: Duration!): [Database!]! 47 | } 48 | -------------------------------------------------------------------------------- /metric.graphqls: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one 2 | # or more contributor license agreements. See the NOTICE file 3 | # distributed with this work for additional information 4 | # regarding copyright ownership. The ASF licenses this file 5 | # to you under the Apache License, Version 2.0 (the 6 | # "License"); you may not use this file except in compliance 7 | # with the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # Legacy metrics query protocol 18 | # Replaced by the metrics-v2 in the future 19 | 20 | input MetricCondition { 21 | # Metric name, which should be defined in OAL script 22 | # Such as: 23 | # endpoint_resp_time = from(Endpoint.latency).avg() 24 | # Then, `endpoint_resp_time` 25 | name: String! 26 | # Id in this metric type. 27 | # In the above case, the id should be endpoint id. 28 | id: ID 29 | } 30 | 31 | input BatchMetricConditions { 32 | # Metric name, which should be defined in OAL script 33 | # Such as: 34 | # endpoint_resp_time = from(Endpoint.latency).avg() 35 | # Then, `endpoint_resp_time` 36 | name: String! 37 | # Id in this metric type. 38 | # In the above case, the id should be endpoint id. 39 | ids: [ID!]! 40 | } 41 | 42 | type IntValues { 43 | values: [KVInt!]! 44 | } 45 | 46 | type KVInt { 47 | id: ID! 48 | # This is the value, the caller must understand the Unit. 49 | # Such as: 50 | # 1. If ask for cpm metric, the unit and result should be count. 51 | # 2. If ask for response time (p99 or avg), the unit should be millisecond. 52 | value: Long! 53 | # Since 9.5.0, isEmptyValue indicates whether value == 0 represents actually zero(false, default) or no data(true). 54 | isEmptyValue: Boolean! 55 | } 56 | 57 | type Thermodynamic { 58 | # Each element in nodes represents a point in Thermodynamic Diagram 59 | # And the element includes three values: 60 | # 1) Time Bucket based on query duration 61 | # 2) Response time index. 62 | # Response time = [responseTimeStep * index, responseTimeStep * (index+1)) 63 | # The last element: [Response Time * index, MAX) 64 | # 3) The number of calls in this response time duration. 65 | # 66 | # Example: 67 | # [ [0, 0, 10], [0, 1, 43], ...] 68 | # These ^^^ two represent the left bottom element, and another element above it. 69 | nodes: [[Int]!]! 70 | axisYStep: Int! 71 | } 72 | 73 | 74 | extend type Query { 75 | getValues(metric: BatchMetricConditions!, duration: Duration!): IntValues 76 | getLinearIntValues(metric: MetricCondition!, duration: Duration!): IntValues 77 | # Query the type of metrics including multiple values, and format them as multiple linears. 78 | # The seq of these multiple lines base on the calculation func in OAL 79 | # Such as, should us this to query the result of func percentile(50,75,90,95,99) in OAL, 80 | # then five lines will be responded, p50 is the first element of return value. 81 | getMultipleLinearIntValues(metric: MetricCondition!, numOfLinear: Int!, duration: Duration!): [IntValues!]! 82 | getSubsetOfMultipleLinearIntValues(metric: MetricCondition!, linearIndex: [Int!]!, duration: Duration!): [IntValues!]! 83 | getThermodynamic(metric: MetricCondition!, duration: Duration!): Thermodynamic 84 | } 85 | -------------------------------------------------------------------------------- /metrics-v2.graphqls: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one 2 | # or more contributor license agreements. See the NOTICE file 3 | # distributed with this work for additional information 4 | # regarding copyright ownership. The ASF licenses this file 5 | # to you under the Apache License, Version 2.0 (the 6 | # "License"); you may not use this file except in compliance 7 | # with the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # Legacy metrics query protocol deprecated since 9.5.0. Replaced by the metrics-v3. 18 | 19 | # Metrics v2 query protocol is an alternative metrics query(s) of original v1, 20 | # defined in the metric.graphql, top-n-records.graphqls, and aggregation.graphqls. 21 | # By leveraging the new ID rule(no register) in the v8, we could query metrics based on name(s) directly. 22 | 23 | # Metrics type is a new concept since v8. 24 | enum MetricsType { 25 | # Can't find the metrics type definition. 26 | UNKNOWN 27 | # Regular value type is suitable for readMetricsValue, readMetricsValues and sortMetrics 28 | REGULAR_VALUE 29 | # Metrics value includes multiple labels, is suitable for readLabeledMetricsValues 30 | # Label should be assigned before the query happens, such as at the setting stage 31 | LABELED_VALUE 32 | # Heatmap value suitable for readHeatMap 33 | HEATMAP 34 | # Top metrics is for readSampledRecords only. 35 | SAMPLED_RECORD 36 | } 37 | 38 | input MetricsCondition { 39 | # Metrics name, which should be defined in OAL script 40 | # Such as: 41 | # endpoint_resp_time = from(Endpoint.latency).avg() 42 | # Then, `endpoint_resp_time` 43 | name: String! 44 | # Follow entity definition description. 45 | entity: Entity! 46 | } 47 | 48 | input TopNCondition { 49 | # Metrics name 50 | name: String! 51 | # Could be null if query the global top N. 52 | parentService: String 53 | # Normal service is the service having installed agent or metrics reported directly. 54 | # Unnormal service is conjectural service, usually detected by the agent. 55 | normal: Boolean 56 | # Indicate the metrics entity scope. 57 | # This is required in sortMetrics query. 58 | # Only accept scope = Service/ServiceInstance/Endpoint, ignore others due to those are pointless. 59 | scope: Scope 60 | topN: Int! 61 | order: Order! 62 | } 63 | 64 | # Define the metrics provided in the OAP server. 65 | type MetricDefinition { 66 | name: String! 67 | type: MetricsType! 68 | # Catalog includes 69 | # SERVICE_CATALOG,SERVICE_INSTANCE_CATALOG,ENDPOINT_CATALOG, 70 | # SERVICE_RELATION_CATALOG,SERVICE_INSTANCE_RELATION_CATALOG_NAME,ENDPOINT_RELATION_CATALOG_NAME 71 | catalog: String 72 | } 73 | 74 | type MetricsValues { 75 | # Could be null if no label assigned in the query condition 76 | label: String 77 | # Values of this label value. 78 | values: IntValues 79 | } 80 | 81 | type HeatMap { 82 | # Each element of values matches the time point of the query duration. 83 | # The element in the IntValues represents the value of the same index bucket 84 | values: [HeatMapColumn!]! 85 | # Bucket describes the ranges of #values represent. 86 | buckets: [Bucket!]! 87 | } 88 | 89 | type HeatMapColumn { 90 | id: ID! 91 | values: [Long!]! 92 | } 93 | 94 | # Bucket represents the value range. 95 | type Bucket { 96 | # Usually the number represents the min value of this bucket, 97 | # could be `infinite-` string as unbounded value 98 | min: String! 99 | # Usually the number represents the max value of this bucket, 100 | # could be `infinite+` string as unbounded value 101 | max: String! 102 | } 103 | 104 | type SelectedRecord { 105 | # Literal string name for visualization 106 | name: String! 107 | # ID represents the owner of this entity. 108 | id: ID! 109 | # Usually an integer value as this is metrics. 110 | value: String 111 | # Have value, Only if the record has related trace id. 112 | # UI should show this as an attached value. 113 | refId: ID 114 | } 115 | 116 | # Since 9.5.0, a value is Long type, and also nullable. 117 | type NullableValue { 118 | # This is the value, the caller must understand the Unit. 119 | # Such as: 120 | # 1. If ask for cpm metric, the unit and result should be count. 121 | # 2. If ask for response time (p99 or avg), the unit should be millisecond. 122 | value: Long! 123 | # isEmptyValue indicates whether value == 0 represents actually zero(false, default) or no data(true). 124 | isEmptyValue: Boolean! 125 | } 126 | 127 | extend type Query { 128 | # Since 9.5.0 `typeOfMetrics` and `listMetrics` are moved to metrics-v3. 129 | # Metrics definition metadata query. Response the metrics type which determines the suitable query methods. 130 | # typeOfMetrics(name: String!): MetricsType! 131 | # Get the list of all available metrics in the current OAP server. 132 | # Param, regex, could be used to filter the metrics by name. 133 | # listMetrics(regex: String): [MetricDefinition!]! 134 | 135 | # Read metrics single value in the duration of required metrics 136 | readMetricsValue(condition: MetricsCondition!, duration: Duration!): Long! 137 | # Read metrics single value in the duration of required metrics 138 | # NullableValue#isEmptyValue == true indicates no telemetry data rather than aggregated value is actually zero. 139 | readNullableMetricsValue(condition: MetricsCondition!, duration: Duration!): NullableValue! 140 | # Read time-series values in the duration of required metrics 141 | readMetricsValues(condition: MetricsCondition!, duration: Duration!): MetricsValues! 142 | # Read entity list of required metrics and parent entity type. 143 | sortMetrics(condition: TopNCondition!, duration: Duration!): [SelectedRecord!]! 144 | # Read value in the given time duration, usually as a linear. 145 | # labels: the labels you need to query. 146 | readLabeledMetricsValues(condition: MetricsCondition!, labels: [String!]!, duration: Duration!): [MetricsValues!]! 147 | # Heatmap is bucket based value statistic result. 148 | readHeatMap(condition: MetricsCondition!, duration: Duration!): HeatMap 149 | # Deprecated since 9.3.0, replaced by readRecords defined in record.graphqls 150 | # Read the sampled records 151 | # TopNCondition#scope is not required. 152 | readSampledRecords(condition: TopNCondition!, duration: Duration!): [SelectedRecord!]! 153 | } 154 | -------------------------------------------------------------------------------- /metrics-v3.graphqls: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one 2 | # or more contributor license agreements. See the NOTICE file 3 | # distributed with this work for additional information 4 | # regarding copyright ownership. The ASF licenses this file 5 | # to you under the Apache License, Version 2.0 (the 6 | # "License"); you may not use this file except in compliance 7 | # with the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # SkyWalking Metrics Query Expression(MQE) is an extension query mechanism. 18 | # MQE allows users to do simple query-stage calculation like well known PromQL. 19 | 20 | input Entity { 21 | # Deprecated from 9.4.0 22 | # Scope could be sensed automatically through given metric name. 23 | scope: Scope 24 | # 1. metrics Scope=Service, ServiceInstance and Endpoint, set necessary serviceName/serviceInstanceName/endpointName 25 | # 2. metrics Scope=ServiceRelation, ServiceInstanceRelation, EndpointRelation and ProcessRelation 26 | # serviceName/serviceInstanceName/endpointName/processName is/are the source(s) 27 | # destServiceName/destServiceInstanceName/destEndpointName/destProcessName is/are destination(s) 28 | # set necessary names of sources and destinations. 29 | serviceName: String 30 | # Normal service is the service having installed agent or metrics reported directly. 31 | # Unnormal service is conjectural service, usually detected by the agent. 32 | normal: Boolean 33 | serviceInstanceName: String 34 | endpointName: String 35 | processName: String 36 | destServiceName: String 37 | # Normal service is the service having installed agent or metrics reported directly. 38 | # Unnormal service is conjectural service, usually detected by the agent. 39 | destNormal: Boolean 40 | destServiceInstanceName: String 41 | destEndpointName: String 42 | destProcessName: String 43 | } 44 | 45 | enum ExpressionResultType { 46 | # Can't resolve the type of the given expression. 47 | UNKNOWN 48 | # A single value 49 | SINGLE_VALUE 50 | # A collection of time-series values. 51 | # The value could have labels or not. 52 | TIME_SERIES_VALUES 53 | # A collection of aggregated values through metric sort function 54 | SORTED_LIST 55 | # A collection of sampled records. 56 | # When the original metric type is sampled records 57 | RECORD_LIST 58 | } 59 | 60 | type Owner { 61 | # Scope=Service, return serviceID, serviceName 62 | # Scope=ServiceInstance, return serviceID, serviceName, serviceInstanceID, serviceInstanceName 63 | # Scope=Endpoint, return serviceID, serviceName, endpointID, endpointName 64 | scope: Scope 65 | serviceID: ID 66 | serviceName: String 67 | # Normal service is the service having installed agent or metrics reported directly. 68 | # Unnormal service is conjectural service, usually detected by the agent. 69 | normal: Boolean 70 | serviceInstanceID: ID 71 | serviceInstanceName: String 72 | endpointID: ID 73 | endpointName: String 74 | } 75 | 76 | type MQEValue { 77 | # Timestamp or name of the entity or record. It could be NULL if it is the result of an aggregate calculation. 78 | id: ID 79 | # The owner of this metric value. 80 | # When make a topN query, the owner info will be returned. 81 | # Since 10.2 82 | owner: Owner 83 | # Value is formatted double/int or NULL if the value is absent. 84 | value: String 85 | # Sampled record could associate with a trace. 86 | # This would be a trace ID only. 87 | traceID: ID 88 | } 89 | 90 | type MQEValues { 91 | # The metadata description of this value series. 92 | metric: Metadata! 93 | # 1. When the type == SINGLE_VALUE, values only have one value. 94 | # 2. When the type == TIME_SERIES_VALUES, values would match the given elements in the duration range. 95 | # 3. When the type == SORTED_LIST, values could be results of `sort(metric)` 96 | # 4. When the type == RECORD_LIST, values could be sampled records 97 | values: [MQEValue!]! 98 | } 99 | 100 | type Metadata { 101 | # Key-value pairs to describe the metric 102 | labels: [KeyValue!]! 103 | } 104 | 105 | type ExpressionResult { 106 | type: ExpressionResultType! 107 | # When the type == TIME_SERIES_VALUES or SINGLE_VALUE, the results would be a collection of MQEValues according to the metric labels. 108 | # In other legal type cases, only one MQEValues is expected in the array. 109 | results: [MQEValues!]! 110 | # When type == ExpressionResultType.UNKNOWN, 111 | # the error message includes the expression resolving errors. 112 | error: String 113 | debuggingTrace: DebuggingTrace 114 | } 115 | 116 | extend type Query { 117 | # Metrics definition metadata query. Response the metrics type which determines the suitable query methods. 118 | typeOfMetrics(name: String!): MetricsType! 119 | # Get the list of all available metrics in the current OAP server. 120 | # Param, regex, could be used to filter the metrics by name. 121 | listMetrics(regex: String): [MetricDefinition!]! 122 | # Param, if debug is true will enable the query tracing and return DebuggingTrace in the ExpressionResult. 123 | # Param, if dumpDBRsp is true the database response will dump into the DebuggingTrace span message. 124 | execExpression(expression: String!, entity: Entity!, duration: Duration!, debug: Boolean, dumpDBRsp: Boolean): ExpressionResult! 125 | } 126 | -------------------------------------------------------------------------------- /ondemand-pod-log.graphqls: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one 2 | # or more contributor license agreements. See the NOTICE file 3 | # distributed with this work for additional information 4 | # regarding copyright ownership. The ASF licenses this file 5 | # to you under the Apache License, Version 2.0 (the 6 | # "License"); you may not use this file except in compliance 7 | # with the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # Ondemand Pod log: fetch the Pod logs on users' demand, the logs 18 | # are fetched and displayed in real time, and are not persisted 19 | # in any kind. This is helpful when users want to do some experiments 20 | # and monitor the logs and see what's happing inside the service. 21 | # Note: if you print secrets in the logs they are also visible to 22 | # the UI so this feature is disabled by default, please read the 23 | # configuration documentation to enable this manually. 24 | 25 | input OndemandLogQueryCondition { 26 | serviceInstanceId: ID 27 | container: String! 28 | duration: Duration 29 | keywordsOfContent: [String!] 30 | excludingKeywordsOfContent: [String!] 31 | } 32 | 33 | input OndemandContainergQueryCondition { 34 | serviceInstanceId: ID 35 | } 36 | 37 | type PodContainers { 38 | # When this field is not empty, frontend should display it in UI 39 | errorReason: String 40 | 41 | containers: [String!]! 42 | } 43 | 44 | extend type Query { 45 | listContainers(condition: OndemandContainergQueryCondition): PodContainers 46 | ondemandPodLogs(condition: OndemandLogQueryCondition): Logs 47 | } 48 | -------------------------------------------------------------------------------- /profile.graphqls: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one 2 | # or more contributor license agreements. See the NOTICE file 3 | # distributed with this work for additional information 4 | # regarding copyright ownership. The ASF licenses this file 5 | # to you under the Apache License, Version 2.0 (the 6 | # "License"); you may not use this file except in compliance 7 | # with the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # Profile task create need data 18 | input ProfileTaskCreationRequest { 19 | # need to monitor service id 20 | serviceId: ID! 21 | # endpoint name to monitored under the special service. 22 | endpointName: String! 23 | # if null means the task starts ASAP, otherwise the task begin after the startTime(based on agent side time) 24 | startTime: Long 25 | # duration of this task (minute) 26 | duration: Int! 27 | # when the segment starts to execute, how long must it take before the monitor can be enbaled 28 | minDurationThreshold: Int! 29 | # when start monitor, time interval for each dumping the stack 30 | dumpPeriod: Int! 31 | # max number of traces will monitor on the sniffer 32 | maxSamplingCount: Int! 33 | } 34 | 35 | # Profile task create result 36 | type ProfileTaskCreationResult { 37 | # if null or empty means the task create success, otherwise get create error reason 38 | errorReason: String 39 | 40 | # get data id when create success 41 | id: String 42 | } 43 | 44 | # Profile task log operation type 45 | enum ProfileTaskLogOperationType { 46 | # when sniffer has notified 47 | NOTIFIED, 48 | # when sniffer has execution finished to report 49 | EXECUTION_FINISHED 50 | } 51 | 52 | # Profile task execute log 53 | type ProfileTaskLog { 54 | id: String! 55 | # execute instance 56 | instanceId: ID! 57 | instanceName: String! 58 | # operation type 59 | operationType: ProfileTaskLogOperationType! 60 | # operation time 61 | operationTime: Long! 62 | } 63 | 64 | # Profile 65 | type ProfileTask { 66 | id: String! 67 | # monitor service 68 | serviceId: ID! 69 | serviceName: String! 70 | # endpoint name to monitored under the special service. 71 | endpointName: String! 72 | # task start time (timestamp) 73 | startTime: Long! 74 | # duration of this task (minute) 75 | duration: Int! 76 | # when the segment starts to execute, how long must it take before the monitor can be enbaled 77 | minDurationThreshold: Int! 78 | # when start monitor, time interval for each dumping the stack 79 | dumpPeriod: Int! 80 | # max number of traces will monitor on the sniffer 81 | maxSamplingCount: Int! 82 | 83 | # instance operation logs 84 | logs: [ProfileTaskLog!]! 85 | } 86 | 87 | # Profile thread stack analyze tree element 88 | type ProfileStackElement { 89 | # work for tree building, id matches multiple parentId 90 | id: ID! 91 | parentId: ID! 92 | # stack code signature 93 | codeSignature: String! 94 | # Include the execution time of children(millisecond) 95 | duration: Int! 96 | # Exclude the execution time of children(millisecond) 97 | durationChildExcluded: Int! 98 | # continuous dump count 99 | count: Int! 100 | } 101 | 102 | # Profile thread stack anayze tree 103 | type ProfileStackTree { 104 | elements: [ProfileStackElement!]! 105 | } 106 | 107 | # Profile analyze result 108 | type ProfileAnalyzation { 109 | # if not empty means backend has information gave to the user 110 | # such as: a large number of snapshots, only analyze part of the data 111 | tip: String 112 | # thread stack dump analyze trees 113 | trees: [ProfileStackTree!]! 114 | } 115 | 116 | type ProfiledSpan { 117 | spanId: Int! 118 | parentSpanId: Int! 119 | segmentId: ID! 120 | refs: [Ref!]! 121 | serviceCode: String! 122 | serviceInstanceName: ID! 123 | startTime: Long! 124 | endTime: Long! 125 | endpointName: String 126 | # There are three span types: Local, Entry and Exit 127 | type: String! 128 | # Peer network id, e.g. host+port, ip+port 129 | peer: String 130 | component: String 131 | isError: Boolean 132 | # There are 5 layers: Unknown, Database, RPCFramework, Http, MQ and Cache 133 | layer: String 134 | tags: [KeyValue!]! 135 | logs: [LogEntity!]! 136 | # Status represents profiling data that covers the duration of the span. 137 | profiled: Boolean! 138 | } 139 | 140 | type ProfiledSegment { 141 | spans: [ProfiledSpan!]! 142 | } 143 | 144 | input ProfileAnalyzeTimeRange { 145 | start: Long! 146 | end: Long! 147 | } 148 | 149 | type ProfiledTraceSegments { 150 | traceId: String! 151 | instanceId: ID! 152 | instanceName: String! 153 | endpointNames: [String!]! 154 | duration: Int! 155 | start: String! 156 | spans: [ProfiledSpan!]! 157 | } 158 | 159 | input SegmentProfileAnalyzeQuery { 160 | segmentId: String! 161 | timeRange: ProfileAnalyzeTimeRange! 162 | } 163 | 164 | extend type Mutation { 165 | # crate new profile task 166 | createProfileTask(creationRequest: ProfileTaskCreationRequest): ProfileTaskCreationResult! 167 | } 168 | 169 | extend type Query { 170 | # query all task list, order by ProfileTask#startTime descending 171 | getProfileTaskList(serviceId: ID, endpointName: String): [ProfileTask!]! 172 | # query all task logs 173 | getProfileTaskLogs(taskID: String): [ProfileTaskLog!]! 174 | # query all task profiled segment list 175 | getProfileTaskSegments(taskID: ID!): [ProfiledTraceSegments!]! 176 | # analyze multiple profiled segments, start and end time use timestamp(millisecond) 177 | getSegmentsProfileAnalyze(queries: [SegmentProfileAnalyzeQuery!]!): ProfileAnalyzation! 178 | } 179 | -------------------------------------------------------------------------------- /record.graphqls: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one 2 | # or more contributor license agreements. See the NOTICE file 3 | # distributed with this work for additional information 4 | # regarding copyright ownership. The ASF licenses this file 5 | # to you under the Apache License, Version 2.0 (the 6 | # "License"); you may not use this file except in compliance 7 | # with the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # Legacy record query protocol deprecated since 9.5.0. Replaced by the metrics-v3. 18 | # Since 9.3.0 19 | # Record is a general and abstract type for collected raw data. 20 | # In the observability, traces and logs have specific and well-defined meanings, and the general records represent other 21 | # collected records. Such as sampled slow SQL statement, HTTP request raw data(request/response header/body) 22 | extend type Query { 23 | # Query collected records with given metric name and parent entity conditions, and return in the requested order. 24 | readRecords(condition: RecordCondition!, duration: Duration!): [Record!]! 25 | } 26 | 27 | input RecordCondition { 28 | # Metrics name 29 | # The scope of this metric is required to match the scope of the parent entity. 30 | name: String! 31 | # Follow entity definition description. 32 | # The owner of the sampled records 33 | parentEntity: Entity! 34 | topN: Int! 35 | order: Order! 36 | } 37 | 38 | type Record { 39 | # Literal string name for visualization 40 | name: String! 41 | # ID of this record 42 | id: ID! 43 | # Usually an integer value as this is a metric to measure this entity ID. 44 | value: String 45 | # Have value, Only if the record has related trace id. 46 | # UI should show this as an attached value. 47 | refId: ID 48 | } 49 | -------------------------------------------------------------------------------- /top-n-records.graphqls: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one 2 | # or more contributor license agreements. See the NOTICE file 3 | # distributed with this work for additional information 4 | # regarding copyright ownership. The ASF licenses this file 5 | # to you under the Apache License, Version 2.0 (the 6 | # "License"); you may not use this file except in compliance 7 | # with the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # Top N record query is different with aggregation Query. 18 | # Both of query results are top N, but aggregation topN query needs to do aggregation at query stage, 19 | # the top N record query is just do order and get the list. 20 | 21 | # Legacy metrics query protocol 22 | # Replaced by the metrics-v2 in the future 23 | 24 | # Top N query is based on latency order by given service and metric name. 25 | input TopNRecordsCondition { 26 | serviceId: ID! 27 | metricName: String! 28 | topN: Int! 29 | # Top N order, ASC or DES. 30 | order: Order! 31 | duration: Duration! 32 | } 33 | 34 | type TopNRecord { 35 | statement: String 36 | latency: Long! 37 | # Have value, Only if the record has the trace id. 38 | # Slow record 39 | traceId: String 40 | } 41 | 42 | extend type Query { 43 | getTopNRecords(condition: TopNRecordsCondition!): [TopNRecord!]! 44 | } 45 | -------------------------------------------------------------------------------- /topology.graphqls: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one 2 | # or more contributor license agreements. See the NOTICE file 3 | # distributed with this work for additional information 4 | # regarding copyright ownership. The ASF licenses this file 5 | # to you under the Apache License, Version 2.0 (the 6 | # "License"); you may not use this file except in compliance 7 | # with the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # The overview topology of the whole application cluster or services, 18 | type Topology { 19 | nodes: [Node!]! 20 | calls: [Call!]! 21 | debuggingTrace: DebuggingTrace 22 | } 23 | 24 | # The instance topology based on the given serviceIds 25 | type ServiceInstanceTopology { 26 | nodes: [ServiceInstanceNode!]! 27 | calls: [Call!]! 28 | debuggingTrace: DebuggingTrace 29 | } 30 | 31 | # The endpoint topology 32 | type EndpointTopology { 33 | nodes: [EndpointNode!]! 34 | calls: [Call!]! 35 | debuggingTrace: DebuggingTrace 36 | } 37 | 38 | # The process topology 39 | type ProcessTopology { 40 | nodes: [ProcessNode!]! 41 | calls: [Call!]! 42 | debuggingTrace: DebuggingTrace 43 | } 44 | 45 | # Node in Topology 46 | type Node { 47 | # The service ID of the node. 48 | id: ID! 49 | # The literal name of the #id. 50 | name: String! 51 | # The type name may be 52 | # 1. The service provider/middleware tech, such as: Tomcat, SpringMVC 53 | # 2. Conjectural Service, e.g. MySQL, Redis, Kafka 54 | type: String 55 | # It is a conjecture node or real node, to represent a service or endpoint. 56 | isReal: Boolean! 57 | # The layers of the service. 58 | layers: [String!]! 59 | } 60 | 61 | # Node in ServiceInstanceTopology 62 | type ServiceInstanceNode { 63 | # The instance id of each node, 64 | id: ID! 65 | # The literal name of the #id. Instance Name. 66 | name: String! 67 | # Service id 68 | serviceId: ID! 69 | # The literal name of the #serviceId. 70 | serviceName: String! 71 | # [Deprecated] 72 | # No type for service instance topology. 73 | type: String 74 | # It is a conjecture node or real node, to represent an instance. 75 | isReal: Boolean! 76 | } 77 | 78 | # Node in EndpointTopology 79 | type EndpointNode { 80 | # The instance id of each node, 81 | id: ID! 82 | # The literal name of the #id. Endpoint Name 83 | name: String! 84 | # Service id 85 | serviceId: ID! 86 | # The literal name of the #serviceId. 87 | serviceName: String! 88 | # [Deprecated] 89 | # No type for service instance topology. 90 | type: String 91 | # It is a conjuecture node or real node, to represent an instance. 92 | isReal: Boolean! 93 | } 94 | 95 | # Node in ProcessTopology 96 | type ProcessNode { 97 | # The process id of each node. 98 | id: ID! 99 | # Service id 100 | serviceId: ID! 101 | # The literal name of the #serviceId. 102 | serviceName: String! 103 | # Service Instance id 104 | serviceInstanceId: ID! 105 | # The literal name of the #serviceInstanceId. 106 | serviceInstanceName: String! 107 | # The name of the process 108 | name: String! 109 | # It's a virtual node or real node. 110 | # A unreal node means that the current node is not a process belonging to this instance, but interacting with the processes belonging to the instance. 111 | isReal: Boolean! 112 | } 113 | 114 | # The Call represents a directed distributed call, 115 | # from the `source` to the `target`. 116 | type Call { 117 | source: ID! 118 | # The protocol and tech stack used at source side in this distributed call 119 | # No value in instance topology and endpoint dependency. 120 | sourceComponents: [ID!]! 121 | target: ID! 122 | # The protocol and tech stack used at target side in this distributed call 123 | # No value in instance topology and endpoint dependency. 124 | targetComponents: [ID!]! 125 | id: ID! 126 | # The detect Points of this distributed call. 127 | detectPoints: [DetectPoint!]! 128 | } 129 | 130 | # Param, if debug is true will enable the query tracing and return DebuggingTrace in the result. 131 | extend type Query { 132 | # Query the global topology 133 | # When layer is specified, the topology of this layer would be queried 134 | getGlobalTopology(duration: Duration!, layer: String, debug: Boolean): Topology 135 | # Query the topology, based on the given service 136 | getServiceTopology(serviceId: ID!, duration: Duration!, debug: Boolean): Topology 137 | # Query the topology, based on the given services. 138 | # `#getServiceTopology` could be replaced by this. 139 | getServicesTopology(serviceIds: [ID!]!, duration: Duration!, debug: Boolean): Topology 140 | # Query the instance topology, based on the given clientServiceId and serverServiceId 141 | getServiceInstanceTopology(clientServiceId: ID!, serverServiceId: ID!, duration: Duration!, debug: Boolean): ServiceInstanceTopology 142 | # Query the topology, based on the given endpoint 143 | getEndpointTopology(endpointId: ID!, duration: Duration!): Topology 144 | # v2 of getEndpointTopology 145 | getEndpointDependencies(endpointId: ID!, duration: Duration!, debug: Boolean): EndpointTopology 146 | # Query the topology, based on the given instance 147 | getProcessTopology(serviceInstanceId: ID!, duration: Duration!, debug: Boolean): ProcessTopology 148 | } 149 | -------------------------------------------------------------------------------- /trace.graphqls: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one 2 | # or more contributor license agreements. See the NOTICE file 3 | # distributed with this work for additional information 4 | # regarding copyright ownership. The ASF licenses this file 5 | # to you under the Apache License, Version 2.0 (the 6 | # "License"); you may not use this file except in compliance 7 | # with the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # The list of traces 18 | type TraceBrief { 19 | traces: [BasicTrace!]! 20 | #For OAP internal query debugging 21 | debuggingTrace: DebuggingTrace 22 | } 23 | 24 | # Trace basic info 25 | type BasicTrace { 26 | segmentId: String! 27 | endpointNames: [String!]! 28 | duration: Int! 29 | start: String! 30 | isError: Boolean 31 | traceIds: [String!]! 32 | } 33 | 34 | # Represent the conditions used for query TraceBrief 35 | input TraceQueryCondition { 36 | # The value of 0 means all services. 37 | serviceId: ID 38 | serviceInstanceId: ID 39 | traceId: String 40 | endpointId: ID 41 | # The time range of traces started 42 | queryDuration: Duration 43 | # The min time of trace 44 | minTraceDuration: Int 45 | # The max time of trace 46 | maxTraceDuration: Int 47 | traceState: TraceState! 48 | queryOrder: QueryOrder! 49 | # Map to the tags included in the traces 50 | tags: [SpanTag!] 51 | paging: Pagination! 52 | } 53 | 54 | input SpanTag { 55 | key: String! 56 | value: String 57 | } 58 | 59 | enum TraceState { 60 | ALL 61 | SUCCESS 62 | ERROR 63 | } 64 | 65 | enum QueryOrder { 66 | BY_START_TIME 67 | BY_DURATION 68 | } 69 | 70 | # The trace represents a distributed trace, includes all segments and spans. 71 | type Trace { 72 | spans: [Span!]! 73 | #For OAP internal query debugging 74 | debuggingTrace: DebuggingTrace 75 | } 76 | 77 | type Span { 78 | traceId: ID! 79 | segmentId: ID! 80 | spanId: Int! 81 | parentSpanId: Int! 82 | refs: [Ref!]! 83 | serviceCode: String! 84 | serviceInstanceName: ID! 85 | # The start timestamp of the span in millisecond 86 | startTime: Long! 87 | # The end timestamp of the span in millisecond 88 | endTime: Long! 89 | endpointName: String 90 | # There are three span types: Local, Entry and Exit 91 | type: String! 92 | # Peer network id, e.g. host+port, ip+port 93 | peer: String 94 | # The name of the tech stack component used for the execution represented by the span. 95 | component: String 96 | # The error status is true when the execution returns error code or throws an exception(determined by the language). 97 | isError: Boolean 98 | # There are 5 layers: Unknown, Database, RPCFramework, Http, MQ and Cache 99 | layer: String 100 | # key-value(string) pairs to specify unique attributes of ths span 101 | tags: [KeyValue!]! 102 | # The events happen of the span, especially in-process. 103 | logs: [LogEntity!]! 104 | # The attached events happen in the span's context but out-of-process. 105 | # Check SpanAttachedEvent definition for more details. 106 | attachedEvents: [SpanAttachedEvent!]! 107 | } 108 | 109 | # Ref represents the link between the segment and its parents. 110 | # The parent(ref) may not exists, which means batch process. 111 | # The UI should display a list, representing the other trace IDs. 112 | type Ref { 113 | traceId: ID! 114 | parentSegmentId: ID! 115 | parentSpanId: Int! 116 | # Ref type represents why did the ref happen. 117 | # Include: 1) CrossProcess 2) CrossThread 118 | type: RefType! 119 | } 120 | 121 | enum RefType { 122 | CROSS_PROCESS, 123 | CROSS_THREAD 124 | } 125 | 126 | type LogEntity { 127 | # The timestamp of the log in millisecond 128 | time: Long! 129 | data: [KeyValue!] 130 | } 131 | 132 | # An instantaneous point on the time-line. 133 | # An instant represents a data point accurate to the nanosecond. 134 | # It is constituted by a long representing epoch-seconds and an int representing nanosecond-of-second, 135 | # which will always be between 0 and 999,999,999 136 | type Instant { 137 | # The number of seconds from the epoch of 1970-01-01T00:00:00Z. 138 | seconds: Long! 139 | # The number of nanoseconds, later along the time-line, from the seconds field. 140 | # This is always positive, and never exceeds 999,999,999. 141 | nanos: Int! 142 | } 143 | 144 | # SpanAttachedEvent represents an attached event for a traced RPC. 145 | # When an RPC is being traced by the in-process language agent, a span would be reported by the client-side agent. 146 | # And the rover would be aware of this RPC due to the existing tracing header. 147 | # Then, the rover agent collects extra information from the OS level to provide assistance information to diagnose network performance. 148 | # 149 | # Notice, THIS IS ALSO AVAILABLE FOR ZIPKIN SPAN. 150 | # ----------------------------------------------- 151 | # In SkyWalking, ZipkinQueryHandler provides full support for all Zipkin span queries. 152 | # SpanAttachedEvent query is supported through the trace query URI: /api/v2/trace/{traceId} 153 | # A new `attachedEvents` field would be added in JSONArray format with SpanAttachedEvent in JSON as elements. 154 | type SpanAttachedEvent { 155 | # The nanosecond timestamp of the event's start time. 156 | # Notice, most unit of timestamp in SkyWalking is milliseconds, but NANO-SECOND is required here. 157 | # Because the attached event happens in the OS syscall level, most of them are executed rapidly. 158 | startTime: Instant! 159 | # The official event name. 160 | # For example, the event name is a method signature from syscall stack. 161 | event: String! 162 | # [Optional] The nanosecond timestamp of the event's end time. 163 | endTime: Instant! 164 | # The tags for this event includes some extra OS level information, 165 | # such as 166 | # 1. net_device used for this exit span. 167 | # 2. network L7 protocol 168 | tags: [KeyValue]! 169 | # The summary of statistics during this event. 170 | # Each statistic provides a name(metric name) to represent the name, and an int64/long as the value. 171 | summary: [KeyNumericValue!]! 172 | } 173 | 174 | # Param, if debug is true will enable the query tracing and return DebuggingTrace in the result. 175 | extend type Query { 176 | # Search segment list with given conditions 177 | queryBasicTraces(condition: TraceQueryCondition, debug: Boolean): TraceBrief 178 | # Read the specific trace ID with given trace ID 179 | queryTrace(traceId: ID!, debug: Boolean): Trace 180 | # Only for BanyanDB, can be used to query the trace in the cold stage. 181 | queryTraceFromColdStage(traceId: ID!, duration: Duration!, debug: Boolean): Trace 182 | # Read the list of searchable keys 183 | queryTraceTagAutocompleteKeys(duration: Duration!):[String!] 184 | # Search the available value options of the given key. 185 | queryTraceTagAutocompleteValues(tagKey: String! , duration: Duration!):[String!] 186 | } 187 | -------------------------------------------------------------------------------- /ui-configuration.graphqls: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one 2 | # or more contributor license agreements. See the NOTICE file 3 | # distributed with this work for additional information 4 | # regarding copyright ownership. The ASF licenses this file 5 | # to you under the Apache License, Version 2.0 (the 6 | # "License"); you may not use this file except in compliance 7 | # with the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # Dashboard Configuration provides the management capabilities for SkyWalking native UI. 18 | 19 | type DashboardConfiguration { 20 | # ID is a generated UUID. 21 | id: String! 22 | # JSON based configuration. The format of text is the export result on the UI page. 23 | configuration: String! 24 | } 25 | 26 | type MenuItem { 27 | # Title name 28 | title: String! 29 | # Showing icon name 30 | icon: String 31 | # Linked layer name 32 | layer: String! 33 | # Activated menu should be listed on the menu, 34 | # otherwise, it should stay in marketplace. 35 | activate: Boolean! 36 | # Sub menu items 37 | subItems: [MenuItem!]! 38 | # Description of the item 39 | description: String 40 | # The document link for the latest version of this feature. 41 | documentLink: String 42 | # The i18n key for the title and description of this feature display in the UI. 43 | i18nKey: String 44 | } 45 | 46 | extend type Query { 47 | # Read an existing UI template according to given id. 48 | getTemplate(id: String!): DashboardConfiguration 49 | # Read all configuration templates。 50 | getAllTemplates: [DashboardConfiguration!]! 51 | # Read all menu items 52 | getMenuItems: [MenuItem!]! 53 | } 54 | 55 | # Used for add new template 56 | input NewDashboardSetting { 57 | # JSON based configuration. The format of text is the export result on the UI page. 58 | configuration: String! 59 | } 60 | 61 | input DashboardSetting { 62 | id: String! 63 | # JSON based configuration. The format of text is the export result on the UI page. 64 | configuration: String! 65 | } 66 | 67 | type TemplateChangeStatus { 68 | id: String! 69 | # True means change successfully. 70 | status: Boolean! 71 | message: String 72 | } 73 | 74 | # Template Management page provides the creation, update and deletion for the different template typs. 75 | extend type Mutation { 76 | addTemplate(setting: NewDashboardSetting!): TemplateChangeStatus! 77 | changeTemplate(setting: DashboardSetting!): TemplateChangeStatus! 78 | disableTemplate(id: String!): TemplateChangeStatus! 79 | } 80 | --------------------------------------------------------------------------------