├── .gitignore
├── LICENSE
├── Makefile
├── README.md
├── agent_client.go
├── agent_connection.go
├── api.go
├── bf_format
├── bf_writer.go
└── bf_writer_test.go
├── configuration.go
├── configuration_test.go
├── dashboard
├── .gitignore
├── .ncurc.json
├── .npmrc
├── config-overrides.js
├── package-lock.json
├── package.json
├── packaging
│ ├── packager.js
│ └── webpack.config-merge.js
├── public
│ └── index.html
└── src
│ ├── App.css
│ ├── App.js
│ ├── App.test.js
│ ├── Content
│ ├── Error.js
│ ├── ProfileList.js
│ ├── ProfilingStatus.js
│ └── index.js
│ ├── Icon
│ └── BlackfireLogo.js
│ ├── Timeago.js
│ ├── Timeagolib.js
│ ├── index.css
│ ├── index.js
│ ├── redux
│ ├── actions
│ │ └── DashboardActions.js
│ ├── constants
│ │ └── DashboardConstants.js
│ ├── fetcher.js
│ ├── reducers
│ │ ├── DashboardReducer.js
│ │ └── index.js
│ └── stores
│ │ ├── configureStore.dev.js
│ │ ├── configureStore.js
│ │ ├── configureStore.prod.js
│ │ └── importStore.js
│ ├── serviceWorker.js
│ └── setupTests.js
├── docker-compose.yml
├── docker
├── Dockerfile
└── entrypoint_dev.sh
├── fixtures
├── test2_blackfire.ini
└── test_blackfire.ini
├── go.mod
├── go.sum
├── http.go
├── log.go
├── pprof_reader
├── README.md
├── fixtures
│ ├── wt.bf
│ └── wt.pprof.gz
├── internal
│ └── profile
│ │ ├── LICENSE
│ │ ├── encode.go
│ │ ├── filter.go
│ │ ├── legacy_profile.go
│ │ ├── profile.go
│ │ ├── proto.go
│ │ └── prune.go
├── pprof_reader_test.go
├── profile.go
├── profile_test.go
└── test.bf
├── probe.go
├── profile.go
├── signal.go
└── statik
└── statik.go
/.gitignore:
--------------------------------------------------------------------------------
1 | go-probe.log
2 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (c) 2019-2023 Platform.sh
2 |
3 | Permission is hereby granted, free of charge, to any person obtaining a copy of
4 | this software and associated documentation files (the "Software"), to deal in
5 | the Software without restriction, including without limitation the rights to
6 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
7 | of the Software, and to permit persons to whom the Software is furnished to do
8 | so, subject to the following conditions:
9 |
10 | The above copyright notice and this permission notice shall be included in all
11 | copies or substantial portions of the Software.
12 |
13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19 | SOFTWARE.
20 |
21 |
22 | Code in pprof_reader/internal/profile governed by the Go license, specified in pprof_reader/internal/profile/LICENSE.
23 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | project ?= go-blackfire
2 |
3 | # Project name must be compatible with docker-compose
4 | override project := $(shell echo $(project) | tr -d -c '[a-z0-9]' | cut -c 1-55)
5 |
6 | COMPOSE=docker-compose --project-directory . -f docker-compose.yml --project-name $(project)
7 | RUN_DASHBOARD=$(COMPOSE) run --rm --no-deps go_dashboard
8 | ifdef CI
9 | COMPOSE_BUILD_OPT = --progress=plain
10 | endif
11 |
12 | .DEFAULT_GOAL := help
13 |
14 | ##
15 | #### Dashboard
16 | ##
17 |
18 | dashboard-clean: ## Clean dirs
19 | @$(RUN_DASHBOARD) rm -Rf "build" "dist"
20 | .PHONY: dashboard-clean
21 |
22 | dashboard-build: dashboard/node_modules dashboard-clean ## Build the app
23 | @$(RUN_DASHBOARD) npm run build
24 | @$(RUN_DASHBOARD) npm run merge
25 | @$(RUN_DASHBOARD) node packaging/packager.js
26 | @$(RUN_DASHBOARD) rm -Rf build
27 |
28 | @echo "\n\n\n\nApp has been built in \033[32mdashboard/dist/index.html\033[0m, run \033[32mnpm run serve\033[0m to use it\n\n"
29 | .PHONY: dashboard-build
30 |
31 | dashboard-update-statik: dashboard-build
32 | @go get -u github.com/rakyll/statik
33 | @statik -src=dashboard/dist/
34 |
35 | dashboard-serve-dev: dashboard/node_modules ## Serve the app for development purpose (live reload)
36 | @npm run --prefix=dashboard start
37 | .PHONY: dashboard-serve-dev
38 |
39 | dashboard-serve: dashboard-build ## Build then serve the app
40 | @npm run --prefix=dashboard serve
41 | .PHONY: dashboard-serve
42 |
43 | dashboard/node_modules: dashboard/package-lock.json
44 | @$(RUN_DASHBOARD) npm install
45 |
46 | dashboard-test: dashboard/node_modules
47 | @$(RUN_DASHBOARD) npm run test
48 |
49 | dashboard-eslint: dashboard/node_modules
50 | @$(RUN_DASHBOARD) npm run eslint
51 |
52 | dashboard-build-docker:
53 | @$(COMPOSE) pull --parallel
54 | @$(COMPOSE) build --pull --parallel $(COMPOSE_BUILD_OPT)
55 |
56 | down: ## Stop and remove containers, networks, images, and volumes
57 | @$(COMPOSE) down --remove-orphans
58 | .PHONY: down
59 |
60 | help:
61 | @grep -hE '(^[a-zA-Z_-]+:.*?##.*$$)|(^###)' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[32m%-30s\033[0m %s\n", $$1, $$2}' | sed -e 's/\[32m##/[33m\n/'
62 | .PHONY: help
63 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | Blackfire Profiler for Go
2 | =========================
3 |
4 | Welcome to the [Blackfire](https://blackfire.io) profiler SDK for Go!
5 |
6 | To integrate Blackfire into your Go applications, [read the
7 | documentation](https://docs.blackfire.io/go/integrations/sdk).
8 |
--------------------------------------------------------------------------------
/agent_client.go:
--------------------------------------------------------------------------------
1 | package blackfire
2 |
3 | // TODO: AgentTimeout
4 |
5 | import (
6 | "bytes"
7 | "encoding/base64"
8 | "encoding/json"
9 | "fmt"
10 | "io/ioutil"
11 | "net/http"
12 | "net/url"
13 | "os"
14 | "path"
15 | "regexp"
16 | "runtime"
17 | "strconv"
18 | "strings"
19 |
20 | "github.com/blackfireio/go-blackfire/bf_format"
21 | "github.com/blackfireio/go-blackfire/pprof_reader"
22 | "github.com/blackfireio/osinfo"
23 | "github.com/rs/zerolog"
24 | )
25 |
26 | type agentClient struct {
27 | agentNetwork string
28 | agentAddress string
29 | signingEndpoint *url.URL
30 | signingAuth string
31 | serverID string
32 | serverToken string
33 | links []*linksMap
34 | profiles []*Profile
35 | logger *zerolog.Logger
36 | signingResponse *signingResponseData
37 | signingResponseIsConsumed bool
38 | }
39 |
40 | type linksMap map[string]map[string]string
41 |
42 | func NewAgentClient(configuration *Configuration) (*agentClient, error) {
43 | agentNetwork, agentAddress, err := parseNetworkAddressString(configuration.AgentSocket)
44 | if err != nil {
45 | return nil, err
46 | }
47 |
48 | signingEndpoint := configuration.HTTPEndpoint
49 | signingEndpoint.Path = path.Join(signingEndpoint.Path, "/api/v1/signing")
50 |
51 | signingResponse, err := signingResponseFromBFQuery(configuration.BlackfireQuery)
52 | if err != nil {
53 | return nil, err
54 | }
55 |
56 | a := &agentClient{
57 | agentNetwork: agentNetwork,
58 | agentAddress: agentAddress,
59 | signingEndpoint: signingEndpoint,
60 | signingAuth: fmt.Sprintf("Basic %s", base64.StdEncoding.EncodeToString([]byte(configuration.ClientID+":"+configuration.ClientToken))),
61 | links: make([]*linksMap, 10),
62 | profiles: make([]*Profile, 10),
63 | logger: configuration.Logger,
64 | serverID: configuration.ServerID,
65 | serverToken: configuration.ServerToken,
66 | signingResponse: signingResponse,
67 | signingResponseIsConsumed: signingResponse == nil,
68 | }
69 | return a, nil
70 | }
71 |
72 | func (c *agentClient) CurrentBlackfireQuery() (string, error) {
73 | if err := c.updateSigningRequest(); err != nil {
74 | return "", err
75 | }
76 | return c.signingResponse.QueryString, nil
77 | }
78 |
79 | func (c *agentClient) LastProfiles() []*Profile {
80 | profiles := []*Profile{}
81 | for _, profile := range c.profiles {
82 | if profile == nil {
83 | continue
84 | }
85 | c.logger.Debug().Msgf("Blackfire: Get profile data for %s", profile.UUID)
86 | if err := profile.load(c.signingAuth); err != nil {
87 | c.logger.Debug().Msgf("Blackfire: Unable to get profile data for %s: %s", profile.UUID, err)
88 | continue
89 | }
90 | profiles = append(profiles, profile)
91 | }
92 | return profiles
93 | }
94 |
95 | func (c *agentClient) ProbeOptions() bf_format.ProbeOptions {
96 | return c.signingResponse.Options
97 | }
98 |
99 | func (c *agentClient) getGoVersion() string {
100 | return fmt.Sprintf("go-%s", runtime.Version()[2:])
101 | }
102 |
103 | func (c *agentClient) getBlackfireProbeHeader(hasBlackfireYaml bool) string {
104 | builder := strings.Builder{}
105 | builder.WriteString(c.getGoVersion())
106 | if hasBlackfireYaml {
107 | builder.WriteString(", blackfire_yml")
108 | }
109 | if c.signingResponse.Options.IsTimespanFlagSet() {
110 | builder.WriteString(", timespan")
111 | }
112 | return builder.String()
113 | }
114 |
115 | func (c *agentClient) loadBlackfireYaml() (data []byte, err error) {
116 | filenames := []string{".blackfire.yml", ".blackfire.yaml"}
117 |
118 | var filename string
119 | for _, filename = range filenames {
120 | if data, err = ioutil.ReadFile(filename); err == nil {
121 | c.logger.Debug().Msgf("Loaded %s", filename)
122 | break
123 | } else if os.IsNotExist(err) {
124 | c.logger.Debug().Msgf("%s does not exist", filename)
125 | } else {
126 | return nil, err
127 | }
128 | }
129 | if os.IsNotExist(err) {
130 | err = nil
131 | }
132 | return
133 | }
134 |
135 | func (c *agentClient) sendBlackfireYaml(conn *agentConnection, contents []byte) (err error) {
136 | if err = conn.WriteStringHeader("Blackfire-Yaml-Size", strconv.Itoa(len(contents))); err != nil {
137 | return
138 | }
139 |
140 | c.logger.Debug().Str("blackfire.yml", string(contents)).Msgf("Send blackfire.yml, size %d", len(contents))
141 | err = conn.WriteRawData(contents)
142 | return
143 | }
144 |
145 | func (c *agentClient) sendProfilePrologue(conn *agentConnection) (err error) {
146 | // https://private.blackfire.io/knowledge-base/protocol/profiler/04-sending.html
147 | bfQuery, err := c.CurrentBlackfireQuery()
148 | if err != nil {
149 | return
150 | }
151 |
152 | var osVersion url.Values
153 | if osVersion, err = getProfileOSHeaderValue(); err != nil {
154 | return
155 | }
156 |
157 | var blackfireYaml []byte
158 | if blackfireYaml, err = c.loadBlackfireYaml(); err != nil {
159 | return
160 | }
161 | hasBlackfireYaml := blackfireYaml != nil
162 |
163 | // These must be done separately from the rest of the headers because they
164 | // either must be sent in a specific order, or use nonstandard encoding.
165 | var orderedHeaders []string
166 | if c.serverID != "" && c.serverToken != "" {
167 | orderedHeaders = append(orderedHeaders, fmt.Sprintf("Blackfire-Auth: %v:%v", c.serverID, c.serverToken))
168 | }
169 | orderedHeaders = append(orderedHeaders, fmt.Sprintf("Blackfire-Query: %s", bfQuery))
170 | orderedHeaders = append(orderedHeaders, fmt.Sprintf("Blackfire-Probe: %s", c.getBlackfireProbeHeader(hasBlackfireYaml)))
171 |
172 | unorderedHeaders := make(map[string]interface{})
173 | unorderedHeaders["os-version"] = osVersion
174 |
175 | // We've now consumed the current Blackfire query, and must fetch a new one next time.
176 | c.signingResponseIsConsumed = true
177 |
178 | // Send the ordered headers first, then wait for the Blackfire-Response,
179 | // then send the unordered headers.
180 | if err = conn.WriteOrderedHeaders(orderedHeaders); err != nil {
181 | return
182 | }
183 |
184 | if hasBlackfireYaml {
185 | if err = conn.WriteEndOfHeaders(); err != nil {
186 | return
187 | }
188 |
189 | var responseName string
190 | var responseValue string
191 | if responseName, responseValue, err = conn.ReadEncodedHeader(); err != nil {
192 | return
193 | }
194 | switch responseName {
195 | case "Blackfire-Response":
196 | var values url.Values
197 | if values, err = url.ParseQuery(responseValue); err != nil {
198 | return
199 | }
200 | if result := values.Get("blackfire_yml"); result == "true" {
201 | if err = c.sendBlackfireYaml(conn, blackfireYaml); err != nil {
202 | return
203 | }
204 | }
205 | case "Blackfire-Error":
206 | return fmt.Errorf(strings.TrimSpace(responseValue))
207 | default:
208 | return fmt.Errorf("Unexpected agent response: %s", responseValue)
209 | }
210 | }
211 |
212 | if err = conn.WriteHeaders(unorderedHeaders); err != nil {
213 | return
214 | }
215 | err = conn.WriteEndOfHeaders()
216 | return
217 | }
218 |
219 | func (c *agentClient) SendProfile(profile *pprof_reader.Profile, title string) (err error) {
220 | var conn *agentConnection
221 | if conn, err = newAgentConnection(c.agentNetwork, c.agentAddress, c.logger); err != nil {
222 | return
223 | }
224 | defer func() {
225 | if err == nil {
226 | c.logger.Debug().Msgf("Profile sent")
227 | err = conn.Close()
228 | } else {
229 | // We want the error that occurred earlier, not an error from close.
230 | conn.Close()
231 | }
232 | }()
233 |
234 | if err = c.sendProfilePrologue(conn); err != nil {
235 | return
236 | }
237 |
238 | var response http.Header
239 | if response, err = conn.ReadResponse(); err != nil {
240 | return err
241 | }
242 | if response.Get("Blackfire-Error") != "" {
243 | return fmt.Errorf("Blackfire-Error: %s", response.Get("Blackfire-Error"))
244 | }
245 |
246 | profileBuffer := new(bytes.Buffer)
247 | if err := bf_format.WriteBFFormat(profile, profileBuffer, c.ProbeOptions(), title); err != nil {
248 | return err
249 | }
250 | encodedProfile := profileBuffer.Bytes()
251 |
252 | c.logger.Debug().Str("contents", string(encodedProfile)).Msg("Blackfire: Send profile")
253 | if err = conn.WriteRawData(encodedProfile); err != nil {
254 | return
255 | }
256 |
257 | return
258 | }
259 |
260 | func (c *agentClient) updateSigningRequest() (err error) {
261 | if !c.signingResponseIsConsumed {
262 | return
263 | }
264 |
265 | var response *http.Response
266 | c.logger.Debug().Msgf("Blackfire: Get authorization from %s", c.signingEndpoint)
267 | request, err := http.NewRequest("POST", c.signingEndpoint.String(), nil)
268 | if err != nil {
269 | return
270 | }
271 | request.Header.Add("Authorization", c.signingAuth)
272 | c.logger.Debug().Msg("Blackfire: Send signing request")
273 | client := http.DefaultClient
274 | response, err = client.Do(request)
275 | if err != nil {
276 | return
277 | }
278 | if response.StatusCode != 201 {
279 | return fmt.Errorf("Signing request to %s failed: %s", c.signingEndpoint, response.Status)
280 | }
281 | var responseData []byte
282 | responseData, err = ioutil.ReadAll(response.Body)
283 | if err != nil {
284 | return
285 | }
286 | c.logger.Debug().Interface("response", string(responseData)).Msg("Blackfire: Receive signing response")
287 | err = json.Unmarshal(responseData, &c.signingResponse)
288 | if err != nil {
289 | return fmt.Errorf("JSON error: %v", err)
290 | }
291 | if c.signingResponse.QueryString == "" {
292 | return fmt.Errorf("Signing response blackfire query was empty")
293 | }
294 | profileURL, ok := c.signingResponse.Links["profile"]
295 | if !ok {
296 | return fmt.Errorf("Signing response blackfire profile URL was empty")
297 | }
298 | c.links = append([]*linksMap{&c.signingResponse.Links}, c.links[:9]...)
299 | c.profiles = append([]*Profile{{
300 | UUID: c.signingResponse.UUID,
301 | URL: c.signingResponse.Links["graph_url"]["href"],
302 | APIURL: profileURL["href"],
303 | }}, c.profiles[:9]...)
304 |
305 | c.signingResponseIsConsumed = false
306 |
307 | return
308 | }
309 |
310 | var nonOptionQueryFields = map[string]bool{
311 | "expires": true,
312 | "userId": true,
313 | "agentIds": true,
314 | "collabToken": true,
315 | "signature": true,
316 | }
317 |
318 | func signingResponseFromBFQuery(query string) (response *signingResponseData, err error) {
319 | if query == "" {
320 | return
321 | }
322 | values, err := url.ParseQuery(query)
323 | if err != nil {
324 | return
325 | }
326 |
327 | firstValue := func(values url.Values, key string) string {
328 | if vArr := values[key]; vArr != nil {
329 | if len(vArr) > 0 {
330 | return vArr[0]
331 | }
332 | }
333 | return ""
334 | }
335 |
336 | expires, err := strconv.ParseUint(firstValue(values, "expires"), 10, 64)
337 | if err != nil {
338 | return
339 | }
340 |
341 | response = newSigningResponseData()
342 | response.Agents = values["agentIds"]
343 | response.CollabToken = firstValue(values, "collabToken")
344 | response.Expires = expires
345 | response.QueryString = query
346 | response.Signature = firstValue(values, "signature")
347 | response.UserID = firstValue(values, "userId")
348 |
349 | for key, arrValues := range values {
350 | if nonOptionQueryFields[key] {
351 | continue
352 | }
353 | if len(arrValues) < 1 {
354 | continue
355 | }
356 | response.Options[key] = arrValues[0]
357 | }
358 |
359 | return
360 | }
361 |
362 | func parseNetworkAddressString(agentSocket string) (network string, address string, err error) {
363 | re := regexp.MustCompile(`^([^:]+)://(.*)`)
364 | matches := re.FindAllStringSubmatch(agentSocket, -1)
365 | if matches == nil {
366 | err = fmt.Errorf("Could not parse agent socket value: [%v]", agentSocket)
367 | return
368 | }
369 | network = matches[0][1]
370 | address = matches[0][2]
371 | return
372 | }
373 |
374 | func getProfileOSHeaderValue() (values url.Values, err error) {
375 | var info *osinfo.OSInfo
376 | info, err = osinfo.GetOSInfo()
377 | if err != nil {
378 | return
379 | }
380 |
381 | values = make(url.Values)
382 | values["family"] = []string{info.Family}
383 | values["arch"] = []string{info.Architecture}
384 | values["id"] = []string{info.ID}
385 | values["version"] = []string{info.Version}
386 | if len(info.Codename) > 0 {
387 | values["codename"] = []string{info.Codename}
388 | }
389 | if len(info.Build) > 0 {
390 | values["build"] = []string{info.Build}
391 | }
392 |
393 | return values, nil
394 | }
395 |
396 | type signingResponseData struct {
397 | UserID string `json:"userId"`
398 | ProfileSlot string `json:"profileSlot"`
399 | CollabToken string `json:"collabToken"`
400 | Agents []string `json:"agents"`
401 | Expires uint64 `json:"expires,string"`
402 | Signature string `json:"signature"`
403 | Options bf_format.ProbeOptions `json:"options"`
404 | Links linksMap `json:"_links"`
405 | UUID string `json:"uuid"`
406 | QueryString string `json:"query_string"`
407 | }
408 |
409 | func newSigningResponseData() *signingResponseData {
410 | s := new(signingResponseData)
411 | s.Options = make(bf_format.ProbeOptions)
412 | return s
413 | }
414 |
--------------------------------------------------------------------------------
/agent_connection.go:
--------------------------------------------------------------------------------
1 | package blackfire
2 |
3 | import (
4 | "bufio"
5 | "fmt"
6 | "io"
7 | "net"
8 | "net/http"
9 | "net/textproto"
10 | "net/url"
11 | "regexp"
12 |
13 | "github.com/rs/zerolog"
14 | )
15 |
16 | var headerRegex *regexp.Regexp = regexp.MustCompile(`^([^:]+):(.*)`)
17 |
18 | type agentConnection struct {
19 | conn net.Conn
20 | reader *bufio.Reader
21 | writer *bufio.Writer
22 | logger *zerolog.Logger
23 | }
24 |
25 | func newAgentConnection(network, address string, logger *zerolog.Logger) (*agentConnection, error) {
26 | c := &agentConnection{
27 | logger: logger,
28 | }
29 | err := c.Init(network, address)
30 | return c, err
31 | }
32 |
33 | func (c *agentConnection) Init(network, address string) (err error) {
34 | if c.conn, err = net.Dial(network, address); err != nil {
35 | return
36 | }
37 |
38 | c.reader = bufio.NewReader(c.conn)
39 | c.writer = bufio.NewWriter(c.conn)
40 | return
41 | }
42 |
43 | func (c *agentConnection) ReadEncodedHeader() (name string, urlEncodedValue string, err error) {
44 | line, err := c.reader.ReadString('\n')
45 | if err != nil {
46 | return
47 | }
48 | if line == "\n" {
49 | return
50 | }
51 | c.logger.Debug().Str("read header", line).Msgf("Recv header")
52 | matches := headerRegex.FindAllStringSubmatch(line, -1)
53 | if matches == nil {
54 | err = fmt.Errorf("Could not parse header: [%s]", line)
55 | return
56 | }
57 | name = matches[0][1]
58 | urlEncodedValue = matches[0][2]
59 | return
60 | }
61 |
62 | func (c *agentConnection) ReadResponse() (http.Header, error) {
63 | tp := textproto.NewReader(c.reader)
64 | mimeHeader, err := tp.ReadMIMEHeader()
65 | if err != nil {
66 | if err == io.EOF {
67 | err = io.ErrUnexpectedEOF
68 | }
69 | return nil, err
70 | }
71 | return http.Header(mimeHeader), nil
72 | }
73 |
74 | func (c *agentConnection) WriteEncodedHeader(name string, urlEncodedValue string) error {
75 | line := fmt.Sprintf("%s: %s\n", name, urlEncodedValue)
76 | c.logger.Debug().Str("write header", line).Msgf("Send header")
77 | _, err := c.writer.WriteString(line)
78 | return err
79 | }
80 |
81 | func (c *agentConnection) WriteStringHeader(name string, value string) error {
82 | return c.WriteEncodedHeader(name, url.QueryEscape(value))
83 | }
84 |
85 | func (c *agentConnection) WriteMapHeader(name string, values url.Values) error {
86 | return c.WriteEncodedHeader(name, values.Encode())
87 | }
88 |
89 | // Write headers in a specific order.
90 | // The headers are assumed to be formatted and URL encoded properly.
91 | func (c *agentConnection) WriteOrderedHeaders(encodedHeaders []string) error {
92 | for _, header := range encodedHeaders {
93 | c.logger.Debug().Str("write header", header).Msgf("Send ordered header")
94 | if _, err := c.writer.WriteString(header); err != nil {
95 | return err
96 | }
97 | if _, err := c.writer.WriteString("\n"); err != nil {
98 | return err
99 | }
100 | }
101 | return nil
102 | }
103 |
104 | func (c *agentConnection) WriteHeaders(nonEncodedHeaders map[string]interface{}) error {
105 | for k, v := range nonEncodedHeaders {
106 | if asString, ok := v.(string); ok {
107 | if err := c.WriteStringHeader(k, asString); err != nil {
108 | return err
109 | }
110 | } else {
111 | if err := c.WriteMapHeader(k, v.(url.Values)); err != nil {
112 | return err
113 | }
114 | }
115 | }
116 | return nil
117 | }
118 |
119 | func (c *agentConnection) WriteEndOfHeaders() (err error) {
120 | c.logger.Debug().Msgf("Send end-of-headers")
121 | if _, err = c.writer.WriteString("\n"); err != nil {
122 | return
123 | }
124 | return c.Flush()
125 | }
126 |
127 | func (c *agentConnection) WriteRawData(data []byte) error {
128 | _, err := c.writer.Write(data)
129 | return err
130 | }
131 |
132 | func (c *agentConnection) Flush() error {
133 | return c.writer.Flush()
134 | }
135 |
136 | func (c *agentConnection) Close() error {
137 | c.Flush()
138 | return c.conn.Close()
139 | }
140 |
--------------------------------------------------------------------------------
/api.go:
--------------------------------------------------------------------------------
1 | package blackfire
2 |
3 | import (
4 | "errors"
5 | "time"
6 | )
7 |
8 | var ProfilerErrorAlreadyProfiling = errors.New("A Blackfire profile is currently in progress. Please wait for it to finish.")
9 |
10 | // Configure explicitely configures the probe. This should be done before any other API calls.
11 | //
12 | // Configuration is initialized in a set order, with later steps overriding
13 | // earlier steps:
14 | //
15 | // * Defaults
16 | // * INI file
17 | // * Explicit configuration in Go code
18 | // * Environment variables
19 | //
20 | // config will be ignored if nil.
21 | func Configure(config *Configuration) {
22 | globalProbe.Configure(config)
23 | }
24 |
25 | // IsProfiling checks if the profiler is running. Only one profiler may run at a time.
26 | func IsProfiling() bool {
27 | return globalProbe.IsProfiling()
28 | }
29 |
30 | // EnableNowFor profiles the current process for the specified duration, then
31 | // connects to the agent and uploads the generated profile.
32 | func EnableNowFor(duration time.Duration) Ender {
33 | globalProbe.EnableNowFor(duration)
34 | return globalProbe.ender
35 | }
36 |
37 | // EnableNow starts profiling. Profiling will continue until you call StopProfiling().
38 | // If you forget to stop profiling, it will automatically stop after the maximum
39 | // allowed duration (DefaultMaxProfileDuration or whatever you set via SetMaxProfileDuration()).
40 | func EnableNow() Ender {
41 | globalProbe.EnableNow()
42 | return globalProbe.ender
43 | }
44 |
45 | // Enable() only profiles when trigerred from an external event (like using blackfire run).
46 | func Enable() Ender {
47 | globalProbe.Enable()
48 | return globalProbe.ender
49 | }
50 |
51 | // Disable stops profiling.
52 | func Disable() {
53 | globalProbe.Disable()
54 | }
55 |
56 | // End ends the current profile, then blocks until the result is uploaded
57 | // to the agent.
58 | func End() {
59 | globalProbe.End()
60 | }
61 |
62 | // EndNoWait stops profiling, then uploads the result to the agent in a separate
63 | // goroutine. You must ensure that the program does not exit before uploading
64 | // is complete. If you can't make such a guarantee, use End() instead.
65 | func EndNoWait() {
66 | globalProbe.EndNoWait()
67 | }
68 |
69 | // GenerateSubProfileQuery generates a Blackfire query
70 | // to attach a subprofile with the current one as a parent
71 | func GenerateSubProfileQuery() (string, error) {
72 | return globalProbe.GenerateSubProfileQuery()
73 | }
74 |
75 | // SetCurrentTitle Sets the title to use for following profiles
76 | func SetCurrentTitle(title string) {
77 | globalProbe.SetCurrentTitle(title)
78 | }
79 |
80 | // globalProbe is the access point for all probe functionality. The API, signal,
81 | // and HTTP interfaces perform all operations by proxying to globalProbe. This
82 | // ensures that mutexes and other guards are respected, and no interface can
83 | // trigger functionality that others can't, or in a way that others can't.
84 | var globalProbe = newProbe()
85 |
--------------------------------------------------------------------------------
/bf_format/bf_writer.go:
--------------------------------------------------------------------------------
1 | package bf_format
2 |
3 | import (
4 | "bufio"
5 | "fmt"
6 | "io"
7 | "net/url"
8 | "os"
9 | "runtime"
10 | "strconv"
11 | "strings"
12 |
13 | "github.com/blackfireio/go-blackfire/pprof_reader"
14 | "github.com/blackfireio/osinfo"
15 | )
16 |
17 | // Write a parsed profile out as a Blackfire profile.
18 | func WriteBFFormat(profile *pprof_reader.Profile, w io.Writer, options ProbeOptions, title string) (err error) {
19 | const headerCostDimensions = "cpu pmu"
20 | const headerProfiledLanguage = "go"
21 | const headerProfilerType = "statistical"
22 |
23 | osInfo, err := osinfo.GetOSInfo()
24 | if err != nil {
25 | return
26 | }
27 |
28 | headers := make(map[string]string)
29 | headers["Cost-Dimensions"] = headerCostDimensions
30 | headers["graph-root-id"] = "go"
31 | headers["probed-os"] = osInfo.Name
32 | headers["profiler-type"] = headerProfilerType
33 | headers["probed-language"] = headerProfiledLanguage
34 | headers["probed-runtime"] = runtime.Version()
35 | headers["probed-cpu-sample-rate"] = strconv.Itoa(profile.CpuSampleRateHz)
36 | headers["probed-features"] = generateProbedFeaturesHeader(options)
37 | headers["Context"] = generateContextHeader()
38 |
39 | if title != "" {
40 | headers["Profile-Title"] = fmt.Sprintf(`{"blackfire-metadata":{"title":"%s"}}`, title)
41 | }
42 |
43 | bufW := bufio.NewWriter(w)
44 | defer func() {
45 | bufErr := bufW.Flush()
46 | if err != nil {
47 | err = bufErr
48 | }
49 | }()
50 |
51 | if _, err = bufW.WriteString("file-format: BlackfireProbe\n"); err != nil {
52 | return
53 | }
54 |
55 | // Begin headers
56 | for k, v := range headers {
57 | if _, err = bufW.WriteString(fmt.Sprintf("%s: %s\n", k, v)); err != nil {
58 | return
59 | }
60 | }
61 |
62 | if options.IsTimespanFlagSet() {
63 | if err = writeTimelineData(profile, bufW); err != nil {
64 | return
65 | }
66 | }
67 |
68 | // End of headers
69 | if _, err = bufW.WriteString("\n"); err != nil {
70 | return
71 | }
72 |
73 | // Profile data
74 | err = writeSamples(profile, bufW)
75 |
76 | return
77 | }
78 |
79 | func generateContextHeaderFromArgs(args []string) string {
80 | s := strings.Builder{}
81 | s.WriteString("script=")
82 | s.WriteString(url.QueryEscape(args[0]))
83 | for i := 0; i < len(args); i++ {
84 | argv := url.QueryEscape(fmt.Sprintf("argv[%d]", i))
85 | value := url.QueryEscape(args[i])
86 | s.WriteString(fmt.Sprintf("&%s=%s", argv, value))
87 | }
88 |
89 | return s.String()
90 | }
91 |
92 | func generateContextHeader() string {
93 | return generateContextHeaderFromArgs(os.Args)
94 | }
95 |
96 | func writeSamples(profile *pprof_reader.Profile, bufW *bufio.Writer) (err error) {
97 | totalCPUTime := uint64(0)
98 | totalMemUsage := uint64(0)
99 |
100 | for _, sample := range profile.Samples {
101 | totalCPUTime += sample.CPUTime
102 |
103 | if len(sample.Stack) == 0 {
104 | continue
105 | }
106 |
107 | // Fake "go" top-of-stack
108 | if _, err = bufW.WriteString(fmt.Sprintf("go==>%s//%d %d %d\n",
109 | sample.Stack[0].Name,
110 | sample.Count, sample.CPUTime, sample.MemUsage)); err != nil {
111 | return
112 | }
113 |
114 | stackMemUsage := uint64(0)
115 | // Skip index 0 because every edge needs a begin and end node
116 | for iStack := len(sample.Stack) - 1; iStack > 0; iStack-- {
117 | f := sample.Stack[iStack]
118 | edgeMemCost := f.DistributedMemoryCost * uint64(sample.Count)
119 | totalMemUsage += edgeMemCost
120 | stackMemUsage += edgeMemCost
121 |
122 | fPrev := sample.Stack[iStack-1]
123 | if _, err = bufW.WriteString(fmt.Sprintf("%s==>%s//%d %d %d\n",
124 | fPrev.Name, f.Name,
125 | sample.Count, sample.CPUTime, stackMemUsage)); err != nil {
126 | return
127 | }
128 | }
129 | }
130 |
131 | if _, err = bufW.WriteString(fmt.Sprintf("==>go//%d %d %d\n", 1, totalCPUTime, totalMemUsage)); err != nil {
132 | return
133 | }
134 |
135 | return
136 | }
137 |
138 | type timelineEntry struct {
139 | Parent *pprof_reader.Function
140 | Function *pprof_reader.Function
141 | CPUStart uint64
142 | CPUEnd uint64
143 | MemStart uint64
144 | MemEnd uint64
145 | }
146 |
147 | func (t *timelineEntry) String() string {
148 | return fmt.Sprintf("%v==>%v", t.Parent, t.Function)
149 | }
150 |
151 | func writeTimelineData(profile *pprof_reader.Profile, bufW *bufio.Writer) (err error) {
152 | tlEntriesByEndTime := make([]*timelineEntry, 0, 10)
153 |
154 | // Insert 2-level fake root so that the timeline visualizer has "go" as the
155 | // top of the stack.
156 | fakeStackTop := []*pprof_reader.Function{
157 | &pprof_reader.Function{
158 | Name: "golang",
159 | ReferenceCount: 1,
160 | },
161 | &pprof_reader.Function{
162 | Name: "go",
163 | ReferenceCount: 1,
164 | },
165 | }
166 |
167 | var alteredSamples []*pprof_reader.Sample
168 | for _, sample := range profile.Samples {
169 | newStack := make([]*pprof_reader.Function, 0, len(sample.Stack)+len(fakeStackTop))
170 | newStack = append(newStack, fakeStackTop...)
171 | newStack = append(newStack, sample.Stack...)
172 | alteredSamples = append(alteredSamples, sample.CloneWithStack(newStack))
173 | }
174 | profile = profile.CloneWithSamples(alteredSamples)
175 |
176 | // Keeps track of the currently "active" functions as we move from stack to stack.
177 | activeTLEntries := make(map[string]*timelineEntry)
178 | // Since these are fake, we need to manually add them to the active list.
179 | for _, f := range fakeStackTop {
180 | activeTLEntries[f.Name] = &timelineEntry{}
181 | }
182 |
183 | prevSample := &pprof_reader.Sample{}
184 | currentCPUTime := uint64(0)
185 | lastMatchStackIndex := 0
186 | for _, nowSample := range profile.Samples {
187 | prevStackEnd := len(prevSample.Stack) - 1
188 | nowStackEnd := len(nowSample.Stack) - 1
189 | shortestStackEnd := prevStackEnd
190 | if nowStackEnd < shortestStackEnd {
191 | shortestStackEnd = nowStackEnd
192 | }
193 |
194 | // Find the last index where the previous and current stack are in the same function.
195 | lastMatchStackIndex = 0
196 | for i := 0; i <= shortestStackEnd; i++ {
197 | if nowSample.Stack[i].Name != prevSample.Stack[i].Name {
198 | break
199 | }
200 | tlEntry := activeTLEntries[nowSample.Stack[i].Name]
201 | tlEntry.CPUEnd += nowSample.CPUTime
202 | lastMatchStackIndex = i
203 | }
204 |
205 | // If the previous stack has entries that the current does not, those
206 | // functions have now ended. Mark them ended in leaf-to-root order.
207 | if lastMatchStackIndex < prevStackEnd {
208 | for i := prevStackEnd; i > lastMatchStackIndex; i-- {
209 | functionName := prevSample.Stack[i].Name
210 | tlEntry := activeTLEntries[functionName]
211 | activeTLEntries[functionName] = nil
212 | tlEntriesByEndTime = append(tlEntriesByEndTime, tlEntry)
213 | }
214 | }
215 |
216 | // If the current stack has entries that the previous does not, they
217 | // are newly invoked functions, so mark them started.
218 | if lastMatchStackIndex < nowStackEnd {
219 | for i := lastMatchStackIndex + 1; i <= nowStackEnd; i++ {
220 | tlEntry := &timelineEntry{
221 | Parent: nowSample.Stack[i-1],
222 | Function: nowSample.Stack[i],
223 | MemStart: nowSample.MemUsage,
224 | MemEnd: nowSample.MemUsage,
225 | CPUStart: currentCPUTime,
226 | CPUEnd: currentCPUTime + nowSample.CPUTime,
227 | }
228 | activeTLEntries[tlEntry.Function.Name] = tlEntry
229 | }
230 | }
231 |
232 | currentCPUTime += nowSample.CPUTime
233 | prevSample = nowSample
234 | }
235 |
236 | // Artificially end all still-active functions because the profile is ended.
237 | // Like before, this must be done in leaf-to-root order.
238 | for i := lastMatchStackIndex; i >= 1; i-- {
239 | tlEntry := activeTLEntries[prevSample.Stack[i].Name]
240 | tlEntriesByEndTime = append(tlEntriesByEndTime, tlEntry)
241 | }
242 |
243 | for i, entry := range tlEntriesByEndTime {
244 | name := entry.Function.Name
245 |
246 | if entry.Parent != nil {
247 | pName := entry.Parent.Name
248 |
249 | if _, err = bufW.WriteString(fmt.Sprintf("Threshold-%d-start: %s==>%s//%d %d\n", i, pName, name, entry.CPUStart, entry.MemStart)); err != nil {
250 | return
251 | }
252 | if _, err = bufW.WriteString(fmt.Sprintf("Threshold-%d-end: %s==>%s//%d %d\n", i, pName, name, entry.CPUEnd, entry.MemEnd)); err != nil {
253 | return
254 | }
255 | } else {
256 | if _, err = bufW.WriteString(fmt.Sprintf("Threshold-%d-start: %s//%d %d\n", i, name, entry.CPUStart, entry.MemStart)); err != nil {
257 | return
258 | }
259 | if _, err = bufW.WriteString(fmt.Sprintf("Threshold-%d-end: %s//%d %d\n", i, name, entry.CPUEnd, entry.MemEnd)); err != nil {
260 | return
261 | }
262 | }
263 | }
264 |
265 | return
266 | }
267 |
268 | var allowedProbedFeatures = map[string]bool{
269 | "signature": true,
270 | "expires": true,
271 | "agentIds": true,
272 | "auto_enable": true,
273 | "aggreg_samples": true,
274 | "flag_cpu": true,
275 | "flag_memory": true,
276 | "flag_no_builtins": true,
277 | "flag_nw": true,
278 | "flag_fn_args": true,
279 | "flag_timespan": true,
280 | "flag_pdo": true,
281 | "flag_sessions": true,
282 | "flag_yml": true,
283 | "flag_composer": true,
284 | "config_yml": true,
285 | "profile_title": true,
286 | "sub_profile": true,
287 | "timespan_threshold": true,
288 | "no_pruning": true,
289 | "no_signature_forwarding": true,
290 | "no_anon": true,
291 | }
292 |
293 | func isAllowedProbedFeature(name string) bool {
294 | _, ok := allowedProbedFeatures[name]
295 | return ok
296 | }
297 |
298 | func generateProbedFeaturesHeader(options ProbeOptions) string {
299 | var builder strings.Builder
300 | firstItem := true
301 | for k, v := range options {
302 | if !isAllowedProbedFeature(k) {
303 | continue
304 | }
305 | if !firstItem {
306 | builder.WriteString("&")
307 | }
308 | builder.WriteString(fmt.Sprintf("%v=%v", k, v))
309 | firstItem = false
310 | }
311 | return builder.String()
312 | }
313 |
314 | type ProbeOptions map[string]interface{}
315 |
316 | func (p ProbeOptions) getOption(name string) interface{} {
317 | if value, ok := p[name]; ok {
318 | return value
319 | }
320 | return nil
321 | }
322 |
323 | func (p ProbeOptions) IsTimespanFlagSet() bool {
324 | // Super ugly, but the actual type can be anything the json decoder chooses,
325 | // so we must go by its string representation.
326 | return fmt.Sprintf("%v", p.getOption("flag_timespan")) == "1"
327 | }
328 |
--------------------------------------------------------------------------------
/bf_format/bf_writer_test.go:
--------------------------------------------------------------------------------
1 | package bf_format
2 |
3 | import (
4 | "bytes"
5 | "runtime"
6 | "strconv"
7 | "strings"
8 | "testing"
9 |
10 | "github.com/blackfireio/go-blackfire/pprof_reader"
11 | "github.com/blackfireio/osinfo"
12 | "github.com/stretchr/testify/assert"
13 | )
14 |
15 | type Headers map[string]interface{}
16 |
17 | func TestGenerateContextStringFromSlice(t *testing.T) {
18 | args := []string{"./test", "--bar"}
19 | expected := "script=.%2Ftest&argv%5B0%5D=.%2Ftest&argv%5B1%5D=--bar"
20 | got := generateContextHeaderFromArgs(args)
21 | if expected != got {
22 | t.Errorf("generateContextStringFromSlice: Expected %v. Got %v", expected, got)
23 | }
24 | }
25 |
26 | func TestProbeOptionsAccessors(t *testing.T) {
27 | assert := assert.New(t)
28 | options := make(ProbeOptions)
29 |
30 | assert.Nil(options.getOption("unknown"))
31 |
32 | expectedValue := "This is a string"
33 | options["my-key"] = expectedValue
34 | assert.Equal(expectedValue, options.getOption("my-key"))
35 |
36 | assert.False(options.IsTimespanFlagSet())
37 |
38 | options["flag_timespan"] = 0
39 | assert.False(options.IsTimespanFlagSet())
40 |
41 | options["flag_timespan"] = 1
42 | assert.True(options.IsTimespanFlagSet())
43 | }
44 |
45 | func TestWriteBFFormat(t *testing.T) {
46 | validProfile := pprof_reader.NewProfile()
47 | validProfile.CpuSampleRateHz = 42
48 | validProfile.Samples = append(validProfile.Samples, &pprof_reader.Sample{
49 | Count: 1,
50 | CPUTime: 100,
51 | })
52 |
53 | cases := []struct {
54 | name string
55 | profile *pprof_reader.Profile
56 | options ProbeOptions
57 | title string
58 | expectedHeaders Headers
59 | expectedBody string
60 | }{
61 | {
62 | "Empty case",
63 | pprof_reader.NewProfile(),
64 | make(ProbeOptions),
65 | "",
66 | Headers{},
67 | "==>go//1 0 0\n",
68 | },
69 | {
70 | "With Title",
71 | pprof_reader.NewProfile(),
72 | make(ProbeOptions),
73 | "This is my Title",
74 | Headers{
75 | "Profile-Title": `{"blackfire-metadata":{"title":"This is my Title"}}`,
76 | },
77 | "==>go//1 0 0\n",
78 | },
79 | {
80 | "With Features",
81 | pprof_reader.NewProfile(),
82 | ProbeOptions{
83 | "signature": "abcd",
84 | "auto_enable": "true",
85 | "no_pruning": "false",
86 | },
87 | "",
88 | Headers{},
89 | "==>go//1 0 0\n",
90 | },
91 | {
92 | "With invalid features",
93 | pprof_reader.NewProfile(),
94 | ProbeOptions{
95 | "unknown": "true",
96 | "ignored": "true",
97 | },
98 | "",
99 | Headers{"probed-features": ProbeOptions{}},
100 | "==>go//1 0 0\n",
101 | },
102 | {
103 | "With valid profile",
104 | validProfile,
105 | ProbeOptions{},
106 | "",
107 | Headers{},
108 | "==>go//1 100 0\n",
109 | },
110 | {
111 | "All mixed",
112 | validProfile,
113 | ProbeOptions{
114 | "signature": "abcd",
115 | "unknown": "true",
116 | "no_pruning": "false",
117 | "ignored": "true",
118 | },
119 | "My-title",
120 | Headers{
121 | "probed-features": ProbeOptions{
122 | "signature": "abcd",
123 | "no_pruning": "false",
124 | },
125 | "Profile-Title": `{"blackfire-metadata":{"title":"My-title"}}`,
126 | },
127 | "==>go//1 100 0\n",
128 | },
129 | }
130 |
131 | for _, c := range cases {
132 | t.Run(c.name, func(t *testing.T) {
133 | fullHeaders := defaultHeaders(c.profile, c.options, c.expectedHeaders)
134 | _TestWriteBFFormat(t, c.profile, c.options, c.title, fullHeaders, c.expectedBody)
135 | })
136 | }
137 | }
138 |
139 | func _TestWriteBFFormat(t *testing.T, profile *pprof_reader.Profile, options ProbeOptions, title string, expectedHeaders Headers, expectedBody string) {
140 | assert := assert.New(t)
141 | var buffer bytes.Buffer
142 |
143 | assert.Nil(WriteBFFormat(profile, &buffer, options, title))
144 | // file-format must always be first
145 | assert.Equal("file-format: BlackfireProbe\n", buffer.String()[:28])
146 |
147 | parts := strings.Split(buffer.String(), "\n\n")
148 | assert.Equal(2, len(parts))
149 |
150 | assert.Equal(expectedHeaders, headersToMap(parts[0]))
151 | assert.Equal(expectedBody, parts[1])
152 | }
153 |
154 | // headersToMap Order of headers in string is not predictable.
155 | // Then we convert them back to a map since assert library can
156 | // handle their comparison.
157 | func headersToMap(headers string) (m Headers) {
158 | m = Headers{}
159 | for _, line := range strings.Split(headers, "\n") {
160 | parts := strings.Split(line, ": ")
161 | m[parts[0]] = parts[1]
162 | }
163 | // probed-features are also built upon a map
164 | if features, found := m["probed-features"]; found {
165 | options := ProbeOptions{}
166 | if len(features.(string)) > 0 {
167 | for _, feature := range strings.Split(features.(string), "&") {
168 | parts := strings.Split(feature, "=")
169 | options[parts[0]] = parts[1]
170 | }
171 | }
172 | m["probed-features"] = options
173 | }
174 | return
175 | }
176 |
177 | func defaultHeaders(profile *pprof_reader.Profile, options ProbeOptions, override Headers) (headers Headers) {
178 | osInfo, err := osinfo.GetOSInfo()
179 | if err != nil {
180 | panic("Cannot retrieve osInfo")
181 | }
182 |
183 | headers = Headers{
184 | "file-format": "BlackfireProbe",
185 | "Cost-Dimensions": "cpu pmu",
186 | "graph-root-id": "go",
187 | "probed-os": osInfo.Name,
188 | "profiler-type": "statistical",
189 | "probed-language": "go",
190 | "probed-runtime": runtime.Version(),
191 | "probed-cpu-sample-rate": strconv.Itoa(profile.CpuSampleRateHz),
192 | "probed-features": options,
193 | "Context": generateContextHeader(),
194 | }
195 | for k, v := range override {
196 | headers[k] = v
197 | }
198 | return
199 | }
200 |
--------------------------------------------------------------------------------
/configuration.go:
--------------------------------------------------------------------------------
1 | package blackfire
2 |
3 | import (
4 | "errors"
5 | "fmt"
6 | "io/ioutil"
7 | "net/url"
8 | "os"
9 | "path"
10 | "path/filepath"
11 | "regexp"
12 | "runtime"
13 | "strconv"
14 | "sync"
15 | "time"
16 |
17 | "github.com/go-ini/ini"
18 | "github.com/rs/zerolog"
19 | )
20 |
21 | // This must match the value of `hz` in StartCPUProfile in runtime/pprof/pprof.go
22 | // It's always been 100hz since the beginning, so it should be safe.
23 | const golangDefaultCPUSampleRate = 100
24 |
25 | type Configuration struct {
26 | // The configuration path to the Blackfire CLI ini file
27 | // Defaults to ~/.blackfire.ini
28 | ConfigFile string
29 |
30 | // Time before dropping an unresponsive agent connection (default 250ms)
31 | AgentTimeout time.Duration
32 |
33 | // The socket to use when connecting to the Blackfire agent (default depends on OS)
34 | AgentSocket string
35 |
36 | // The Blackfire query string to be sent with any profiles. This is either
37 | // provided by the `blackfire run` command in an ENV variable, or acquired
38 | // via a signing request to Blackfire. You won't need to set this manually.
39 | BlackfireQuery string
40 |
41 | // Client ID to authenticate with the Blackfire API
42 | ClientID string
43 |
44 | // Client token to authenticate with the Blackfire API
45 | ClientToken string
46 |
47 | // Server ID for Blackfire-Auth header
48 | ServerID string
49 |
50 | // Server token for Blackfire-Auth header
51 | ServerToken string
52 |
53 | // The Blackfire API endpoint the profile data will be sent to (default https://blackfire.io)
54 | HTTPEndpoint *url.URL
55 |
56 | // A zerolog Logger (default stderr)
57 | Logger *zerolog.Logger
58 |
59 | // The maximum duration of a profile. A profile operation can never exceed
60 | // this duration (default 10 minutes).
61 | // This guards against runaway profile operations.
62 | MaxProfileDuration time.Duration
63 |
64 | // Default rate at which the CPU samples are taken. Values > 500 will likely
65 | // exceed the abilities of most environments.
66 | // See https://golang.org/src/runtime/pprof/pprof.go#L727
67 | DefaultCPUSampleRateHz int
68 |
69 | // If not empty, dump the original pprof profiles to this directory whenever
70 | // a profile ends.
71 | PProfDumpDir string
72 |
73 | // Disables the profiler unless the BLACKFIRE_QUERY env variable is set.
74 | // When the profiler is disabled, all API calls become no-ops.
75 | onDemandOnly bool
76 |
77 | loader sync.Once
78 | err error
79 | }
80 |
81 | func (c *Configuration) canProfile() bool {
82 | if c.BlackfireQuery == "" && c.onDemandOnly {
83 | return false
84 | }
85 | return true
86 | }
87 |
88 | func (c *Configuration) setEndpoint(endpoint string) error {
89 | u, err := url.Parse(endpoint)
90 | if err != nil {
91 | return err
92 | }
93 | c.HTTPEndpoint = u
94 | return nil
95 | }
96 |
97 | func (c *Configuration) getDefaultIniPath() string {
98 | getIniPath := func(dir string) string {
99 | if dir == "" {
100 | return ""
101 | }
102 | fileName := ".blackfire.ini"
103 | filePath := path.Join(path.Clean(dir), fileName)
104 | _, err := os.Stat(filePath)
105 | c.Logger.Debug().Msgf("Blackfire: Does configuration file exist at %s: %t", filePath, err == nil)
106 | if err != nil {
107 | return ""
108 | }
109 | return filePath
110 | }
111 |
112 | if iniPath := getIniPath(c.readEnvVar("BLACKFIRE_HOME")); iniPath != "" {
113 | return iniPath
114 | }
115 |
116 | if runtime.GOOS == "linux" {
117 | if iniPath := getIniPath(os.Getenv("XDG_CONFIG_HOME")); iniPath != "" {
118 | return iniPath
119 | }
120 | }
121 |
122 | if iniPath := getIniPath(os.Getenv("HOME")); iniPath != "" {
123 | return iniPath
124 | }
125 |
126 | if runtime.GOOS == "windows" {
127 | homedrive := os.Getenv("HOMEDRIVE")
128 | homepath := os.Getenv("HOMEPATH")
129 | if homedrive != "" && homepath != "" {
130 | dir := path.Join(path.Dir(homedrive), homepath)
131 | if iniPath := getIniPath(dir); iniPath != "" {
132 | return iniPath
133 | }
134 | }
135 | }
136 |
137 | return ""
138 | }
139 |
140 | func (c *Configuration) configureFromDefaults() {
141 | if c.AgentSocket == "" {
142 | switch runtime.GOOS {
143 | case "windows":
144 | c.AgentSocket = "tcp://127.0.0.1:8307"
145 | case "darwin":
146 | if runtime.GOARCH == "arm64" {
147 | c.AgentSocket = "unix:///opt/homebrew/var/run/blackfire-agent.sock"
148 | } else {
149 | c.AgentSocket = "unix:///usr/local/var/run/blackfire-agent.sock"
150 | }
151 | default:
152 | c.AgentSocket = "unix:///var/run/blackfire/agent.sock"
153 | }
154 | }
155 |
156 | if c.HTTPEndpoint == nil {
157 | c.setEndpoint("https://blackfire.io")
158 | }
159 | if c.AgentTimeout < 1 {
160 | c.AgentTimeout = time.Millisecond * 250
161 | }
162 | if c.MaxProfileDuration < 1 {
163 | c.MaxProfileDuration = time.Minute * 10
164 | }
165 | if c.DefaultCPUSampleRateHz == 0 {
166 | c.DefaultCPUSampleRateHz = golangDefaultCPUSampleRate
167 | }
168 | }
169 |
170 | func (c *Configuration) configureFromIniFile() {
171 | path := c.ConfigFile
172 | if path == "" {
173 | if path = c.getDefaultIniPath(); path == "" {
174 | return
175 | }
176 | }
177 |
178 | iniConfig, err := ini.Load(path)
179 | if err != nil {
180 | c.Logger.Error().Msgf("Blackfire: Could not load Blackfire config file %s: %v", path, err)
181 | return
182 | }
183 |
184 | section := iniConfig.Section("blackfire")
185 | if section.HasKey("client-id") && c.ClientID == "" {
186 | c.ClientID = c.getStringFromIniSection(section, "client-id")
187 | }
188 |
189 | if section.HasKey("client-token") && c.ClientToken == "" {
190 | c.ClientToken = c.getStringFromIniSection(section, "client-token")
191 | }
192 |
193 | if section.HasKey("endpoint") && c.HTTPEndpoint == nil {
194 | endpoint := c.getStringFromIniSection(section, "endpoint")
195 | if err := c.setEndpoint(endpoint); err != nil {
196 | c.Logger.Error().Msgf("Blackfire: Unable to set from ini file %s, endpoint %s: %v", path, endpoint, err)
197 | }
198 | }
199 |
200 | if section.HasKey("timeout") && c.AgentTimeout == 0 {
201 | timeout := c.getStringFromIniSection(section, "timeout")
202 | var err error
203 | if c.AgentTimeout, err = parseSeconds(timeout); err != nil {
204 | c.Logger.Error().Msgf("Blackfire: Unable to set from ini file %s, timeout %s: %v", path, timeout, err)
205 | }
206 | }
207 | }
208 |
209 | func (c *Configuration) configureFromEnv() {
210 | if v := c.readEnvVar("BLACKFIRE_AGENT_SOCKET"); v != "" {
211 | c.AgentSocket = v
212 | }
213 |
214 | if v := c.readEnvVar("BLACKFIRE_QUERY"); v != "" {
215 | c.BlackfireQuery = v
216 | os.Unsetenv("BLACKFIRE_QUERY")
217 | }
218 |
219 | if v := c.readEnvVar("BLACKFIRE_CLIENT_ID"); v != "" {
220 | c.ClientID = v
221 | }
222 |
223 | if v := c.readEnvVar("BLACKFIRE_CLIENT_TOKEN"); v != "" {
224 | c.ClientToken = v
225 | }
226 |
227 | if v := c.readEnvVar("BLACKFIRE_SERVER_ID"); v != "" {
228 | c.ServerID = v
229 | }
230 |
231 | if v := c.readEnvVar("BLACKFIRE_SERVER_TOKEN"); v != "" {
232 | c.ServerToken = v
233 | }
234 |
235 | if v := c.readEnvVar("BLACKFIRE_ENDPOINT"); v != "" {
236 | if err := c.setEndpoint(v); err != nil {
237 | c.Logger.Error().Msgf("Blackfire: Unable to set from env var BLACKFIRE_ENDPOINT %s: %v", v, err)
238 | }
239 | }
240 |
241 | if v := c.readEnvVar("BLACKFIRE_PPROF_DUMP_DIR"); v != "" {
242 | absPath, err := filepath.Abs(v)
243 | if err != nil {
244 | c.Logger.Error().Msgf("Blackfire: Unable to set pprof dump dir to %v: %v", v, err)
245 | } else {
246 | c.PProfDumpDir = absPath
247 | }
248 | }
249 | }
250 |
251 | func (c *Configuration) load() error {
252 | c.loader.Do(func() {
253 | if c.Logger == nil {
254 | logger := NewLoggerFromEnvVars()
255 | c.Logger = &logger
256 | }
257 | c.configureFromEnv()
258 | // Used for test purposes
259 | if "1" != os.Getenv("BLACKFIRE_INTERNAL_IGNORE_INI") {
260 | c.configureFromIniFile()
261 | }
262 | c.configureFromDefaults()
263 | if c.err = c.validate(); c.err != nil {
264 | c.Logger.Warn().Err(c.err).Msg("Blackfire: Bad configuration")
265 | }
266 | })
267 | return c.err
268 | }
269 |
270 | func (c *Configuration) validate() error {
271 | if c.BlackfireQuery == "" {
272 | if c.ClientID == "" || c.ClientToken == "" {
273 | return errors.New("either BLACKFIRE_QUERY must be set, or client ID and client token must be set")
274 | }
275 | }
276 |
277 | if c.PProfDumpDir != "" {
278 | info, err := os.Stat(c.PProfDumpDir)
279 | if err != nil {
280 | return fmt.Errorf("Cannot dump pprof files to %v: %v", c.PProfDumpDir, err)
281 | }
282 | if !info.IsDir() {
283 | return fmt.Errorf("Cannot dump pprof files to %v: not a directory", c.PProfDumpDir)
284 | }
285 |
286 | // There's no 100% portable way to check for writability, so we just create
287 | // a temp zero-byte file and see if it succeeds.
288 | exePath, err := os.Executable()
289 | if err != nil {
290 | exePath = "go-unknown"
291 | } else {
292 | exePath = path.Base(exePath)
293 | }
294 | testPath := path.Join(c.PProfDumpDir, exePath+"-writability-test")
295 | // Delete it before starting, and make sure it gets deleted after
296 | os.Remove(testPath)
297 | defer os.Remove(testPath)
298 | if err = ioutil.WriteFile(testPath, []byte{}, 0644); err != nil {
299 | return fmt.Errorf("Cannot dump pprof files to %v: directory does not seem writable: %v", c.PProfDumpDir, err)
300 | }
301 | }
302 | return nil
303 | }
304 |
305 | func (c *Configuration) readEnvVar(name string) string {
306 | if v := os.Getenv(name); v != "" {
307 | c.Logger.Debug().Msgf("Blackfire: Read ENV var %s: %s", name, v)
308 | return v
309 | }
310 | return ""
311 | }
312 |
313 | func (c *Configuration) getStringFromIniSection(section *ini.Section, key string) string {
314 | if v := section.Key(key).String(); v != "" {
315 | c.Logger.Debug().Msgf("Blackfire: Read INI key %s: %s", key, v)
316 | return v
317 | }
318 | return ""
319 | }
320 |
321 | func parseSeconds(value string) (time.Duration, error) {
322 | re := regexp.MustCompile(`([0-9.]+)`)
323 | found := re.FindStringSubmatch(value)
324 |
325 | if len(found) == 0 {
326 | return 0, fmt.Errorf("%s: No seconds value found", value)
327 | }
328 |
329 | seconds, err := strconv.ParseFloat(found[1], 64)
330 | if err != nil {
331 | return 0, err
332 | }
333 | return time.Duration(float64(time.Second) * seconds), nil
334 | }
335 |
--------------------------------------------------------------------------------
/configuration_test.go:
--------------------------------------------------------------------------------
1 | package blackfire
2 |
3 | import (
4 | "net/url"
5 | "os"
6 | "path/filepath"
7 | "testing"
8 | "time"
9 |
10 | "github.com/rs/zerolog"
11 | . "gopkg.in/check.v1"
12 | )
13 |
14 | func Test(t *testing.T) { TestingT(t) }
15 |
16 | type BlackfireSuite struct{}
17 |
18 | var _ = Suite(&BlackfireSuite{})
19 |
20 | func URL(contents string) *url.URL {
21 | result, err := url.Parse(contents)
22 | if err != nil {
23 | panic(err)
24 | }
25 | return result
26 | }
27 |
28 | func newConfig() *Configuration {
29 | logger := NewLogger(filepath.Join(os.TempDir(), "blackfire-manual.log"), 3)
30 | return &Configuration{
31 | AgentSocket: "tcp://127.0.0.1:3333",
32 | AgentTimeout: time.Second * 3,
33 | BlackfireQuery: "blackfire_query_manual",
34 | ClientID: "client_id_manual",
35 | ClientToken: "client_token_manual",
36 | HTTPEndpoint: URL("https://blackfire.io/manual"),
37 | Logger: &logger,
38 | }
39 | }
40 |
41 | func newMixedConfig() *Configuration {
42 | config := newConfig()
43 |
44 | // Use default
45 | os.Unsetenv("BLACKFIRE_ENDPOINT")
46 | config.HTTPEndpoint = nil
47 |
48 | // Use env
49 | config.Logger = nil
50 |
51 | // Use ini
52 | config.AgentTimeout = 0
53 |
54 | return config
55 | }
56 |
57 | func setupEnv() {
58 | os.Setenv("BLACKFIRE_AGENT_SOCKET", "tcp://127.0.0.1:2222")
59 | os.Setenv("BLACKFIRE_QUERY", "blackfire_query_env")
60 | os.Setenv("BLACKFIRE_CLIENT_ID", "client_id_env")
61 | os.Setenv("BLACKFIRE_CLIENT_TOKEN", "client_token_env")
62 | os.Setenv("BLACKFIRE_ENDPOINT", "https://blackfire.io/env")
63 | os.Setenv("BLACKFIRE_LOG_FILE", "stderr")
64 | os.Setenv("BLACKFIRE_LOG_LEVEL", "2")
65 | }
66 |
67 | func unsetEnv() {
68 | os.Unsetenv("BLACKFIRE_AGENT_SOCKET")
69 | os.Unsetenv("BLACKFIRE_QUERY")
70 | os.Unsetenv("BLACKFIRE_CLIENT_ID")
71 | os.Unsetenv("BLACKFIRE_CLIENT_TOKEN")
72 | os.Unsetenv("BLACKFIRE_ENDPOINT")
73 | os.Unsetenv("BLACKFIRE_LOG_FILE")
74 | os.Unsetenv("BLACKFIRE_LOG_LEVEL")
75 | }
76 |
77 | func newConfiguration(config *Configuration) *Configuration {
78 | if config == nil {
79 | config = &Configuration{}
80 | }
81 | config.load()
82 | return config
83 | }
84 |
85 | func setIgnoreIni() {
86 | os.Setenv("BLACKFIRE_INTERNAL_IGNORE_INI", "1")
87 | }
88 |
89 | func unsetIgnoreIni() {
90 | os.Unsetenv("BLACKFIRE_INTERNAL_IGNORE_INI")
91 | }
92 |
93 | func (s *BlackfireSuite) TestConfigurationPrecedence(c *C) {
94 | setIgnoreIni()
95 | defer unsetIgnoreIni()
96 | defer unsetEnv()
97 |
98 | os.Setenv("BLACKFIRE_AGENT_SOCKET", "tcp://127.0.0.1:2222")
99 |
100 | config := newConfiguration(&Configuration{AgentSocket: "tcp://127.0.0.1:2424"})
101 |
102 | c.Assert("tcp://127.0.0.1:2222", Equals, config.AgentSocket)
103 | }
104 |
105 | func (s *BlackfireSuite) TestConfigurationDefaults(c *C) {
106 | setIgnoreIni()
107 | defer unsetIgnoreIni()
108 | config := newConfiguration(nil)
109 | c.Assert("https://blackfire.io", Equals, config.HTTPEndpoint.String())
110 | c.Assert(zerolog.ErrorLevel, Equals, config.Logger.GetLevel())
111 | c.Assert(time.Millisecond*250, Equals, config.AgentTimeout)
112 | }
113 |
114 | func (s *BlackfireSuite) TestConfigurationIniFile(c *C) {
115 | config := newConfiguration(&Configuration{ConfigFile: "fixtures/test_blackfire.ini"})
116 | c.Assert("https://blackfire.io/ini", Equals, config.HTTPEndpoint.String())
117 | c.Assert("ab6f24b1-3103-4503-9f68-93d4b3f10c7c", Equals, config.ClientID)
118 | c.Assert("ec4f5fb9f43ec7004b44fc2f217c944c324c6225efcf144c2cee65eb5c45754c", Equals, config.ClientToken)
119 | c.Assert(time.Second*1, Equals, config.AgentTimeout)
120 | }
121 |
122 | func (s *BlackfireSuite) TestConfigurationEnv(c *C) {
123 | setupEnv()
124 | setIgnoreIni()
125 | defer unsetEnv()
126 |
127 | config := newConfiguration(nil)
128 | c.Assert("tcp://127.0.0.1:2222", Equals, config.AgentSocket)
129 | c.Assert("blackfire_query_env", Equals, config.BlackfireQuery)
130 | c.Assert("client_id_env", Equals, config.ClientID)
131 | c.Assert("client_token_env", Equals, config.ClientToken)
132 | c.Assert("https://blackfire.io/env", Equals, config.HTTPEndpoint.String())
133 | c.Assert(zerolog.WarnLevel, Equals, config.Logger.GetLevel())
134 | c.Assert(time.Millisecond*250, Equals, config.AgentTimeout)
135 |
136 | setupEnv()
137 | unsetIgnoreIni()
138 | config = newConfiguration(&Configuration{ConfigFile: "fixtures/test_blackfire.ini"})
139 | c.Assert("tcp://127.0.0.1:2222", Equals, config.AgentSocket)
140 | c.Assert("blackfire_query_env", Equals, config.BlackfireQuery)
141 | c.Assert("client_id_env", Equals, config.ClientID)
142 | c.Assert("client_token_env", Equals, config.ClientToken)
143 | c.Assert("https://blackfire.io/env", Equals, config.HTTPEndpoint.String())
144 | c.Assert(zerolog.WarnLevel, Equals, config.Logger.GetLevel())
145 | c.Assert(time.Second*1, Equals, config.AgentTimeout)
146 | }
147 |
148 | func (s *BlackfireSuite) TestConfigurationManual(c *C) {
149 | config := newConfig()
150 | setIgnoreIni()
151 | config.load()
152 | c.Assert("tcp://127.0.0.1:3333", Equals, config.AgentSocket)
153 | c.Assert("blackfire_query_manual", Equals, config.BlackfireQuery)
154 | c.Assert("client_id_manual", Equals, config.ClientID)
155 | c.Assert("client_token_manual", Equals, config.ClientToken)
156 | c.Assert("https://blackfire.io/manual", Equals, config.HTTPEndpoint.String())
157 | c.Assert(zerolog.InfoLevel, Equals, config.Logger.GetLevel())
158 | c.Assert(time.Second*3, Equals, config.AgentTimeout)
159 |
160 | unsetIgnoreIni()
161 | config = newConfig()
162 | config.ConfigFile = "fixtures/test_blackfire.ini"
163 | config.load()
164 | c.Assert("tcp://127.0.0.1:3333", Equals, config.AgentSocket)
165 | c.Assert("blackfire_query_manual", Equals, config.BlackfireQuery)
166 | c.Assert("client_id_manual", Equals, config.ClientID)
167 | c.Assert("client_token_manual", Equals, config.ClientToken)
168 | c.Assert("https://blackfire.io/manual", Equals, config.HTTPEndpoint.String())
169 | c.Assert(zerolog.InfoLevel, Equals, config.Logger.GetLevel())
170 | c.Assert(time.Second*3, Equals, config.AgentTimeout)
171 | }
172 |
173 | func (s *BlackfireSuite) TestConfigurationMixed(c *C) {
174 | setIgnoreIni()
175 | setupEnv()
176 | defer unsetEnv()
177 |
178 | config := newMixedConfig()
179 | config.load()
180 | c.Assert("tcp://127.0.0.1:2222", Equals, config.AgentSocket)
181 | c.Assert("blackfire_query_env", Equals, config.BlackfireQuery)
182 | c.Assert("client_id_env", Equals, config.ClientID)
183 | c.Assert("client_token_env", Equals, config.ClientToken)
184 | c.Assert("https://blackfire.io", Equals, config.HTTPEndpoint.String())
185 | c.Assert(zerolog.WarnLevel, Equals, config.Logger.GetLevel())
186 | c.Assert(time.Millisecond*250, Equals, config.AgentTimeout)
187 |
188 | unsetIgnoreIni()
189 | setupEnv()
190 | config = newMixedConfig()
191 | config.ConfigFile = "fixtures/test2_blackfire.ini"
192 | config.load()
193 | c.Assert("tcp://127.0.0.1:2222", Equals, config.AgentSocket)
194 | c.Assert("blackfire_query_env", Equals, config.BlackfireQuery)
195 | c.Assert("client_id_env", Equals, config.ClientID)
196 | c.Assert("client_token_env", Equals, config.ClientToken)
197 | c.Assert("https://blackfire.io", Equals, config.HTTPEndpoint.String())
198 | c.Assert(zerolog.WarnLevel, Equals, config.Logger.GetLevel())
199 | c.Assert(time.Second*1, Equals, config.AgentTimeout)
200 | }
201 |
--------------------------------------------------------------------------------
/dashboard/.gitignore:
--------------------------------------------------------------------------------
1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
2 |
3 | # dependencies
4 | /node_modules
5 |
6 | # testing
7 | /coverage
8 |
9 | # production
10 | /build
11 | /dist
12 |
13 | npm-debug.log*
14 | yarn-debug.log*
15 | yarn-error.log*
16 |
--------------------------------------------------------------------------------
/dashboard/.ncurc.json:
--------------------------------------------------------------------------------
1 | {
2 | "upgrade": true,
3 | "format": "group",
4 | "interactive": true,
5 | "peer": true
6 | }
7 |
--------------------------------------------------------------------------------
/dashboard/.npmrc:
--------------------------------------------------------------------------------
1 | legacy-peer-deps=true
2 |
--------------------------------------------------------------------------------
/dashboard/config-overrides.js:
--------------------------------------------------------------------------------
1 | /*eslint-disable no-unused-vars*/
2 |
3 | const MiniCssExtractPlugin = require('mini-css-extract-plugin');
4 |
5 | // this tweaks the webpack configuration, thanks to react-app-rewired
6 | module.exports = function override(config, env) {
7 | config.output.filename = 'static/js/[name].js';
8 | config.output.chunkFilename = 'static/js/[name].js';
9 | config.optimization.splitChunks = false;
10 | config.optimization.runtimeChunk = false;
11 |
12 | config.plugins.forEach((plugin) => {
13 | if (plugin instanceof MiniCssExtractPlugin) {
14 | plugin.options.filename = 'static/css/[name].css';
15 | }
16 | });
17 |
18 | return config;
19 | };
20 |
--------------------------------------------------------------------------------
/dashboard/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "go-dashboard",
3 | "version": "0.1.0",
4 | "private": true,
5 | "dependencies": {
6 | "@testing-library/jest-dom": "^6.1.5",
7 | "@testing-library/react": "^14.1.2",
8 | "@testing-library/user-event": "^14.5.1",
9 | "es6-promise": "^4.2.8",
10 | "immutable": "^4.3.4",
11 | "keymirror": "^0.1.1",
12 | "moment": "^2.29.4",
13 | "react": "^18.2.0",
14 | "react-dom": "^18.2.0",
15 | "react-redux": "^9.0.4",
16 | "react-scripts": "^5.0.1",
17 | "redux": "^5.0.1",
18 | "redux-thunk": "^3.1.0",
19 | "whatwg-fetch": "^3.6.20"
20 | },
21 | "scripts": {
22 | "start": "react-app-rewired start --scripts-version react-scripts",
23 | "build": "react-app-rewired build --scripts-version react-scripts",
24 | "merge": "webpack --config packaging/webpack.config-merge.js",
25 | "test": "CI=true react-app-rewired test --scripts-version react-scripts",
26 | "eject": "react-scripts eject",
27 | "serve": "serve -s dist",
28 | "eslint": "eslint src packaging *.js",
29 | "eslint-fix": "eslint --fix src packaging *.js"
30 | },
31 | "eslintConfig": {
32 | "extends": "react-app"
33 | },
34 | "browserslist": {
35 | "production": [
36 | ">0.2%",
37 | "not dead",
38 | "not op_mini all"
39 | ],
40 | "development": [
41 | "last 1 chrome version",
42 | "last 1 firefox version",
43 | "last 1 safari version"
44 | ]
45 | },
46 | "devDependencies": {
47 | "eslint": "^8.56.0",
48 | "eslint-config-airbnb": "^19.0.4",
49 | "react-app-rewired": "^2.2.1",
50 | "serve": "^14.2.1",
51 | "uglifyjs-webpack-plugin": "^2.2.0",
52 | "webpack": "^5.89.0",
53 | "webpack-cli": "^5.1.4"
54 | }
55 | }
56 |
--------------------------------------------------------------------------------
/dashboard/packaging/packager.js:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env node
2 |
3 | const fs = require('fs');
4 |
5 | const dir = `${__dirname}/..`;
6 |
7 | fs.mkdirSync(`${dir}/dist`, { recursive: true });
8 |
9 | fs.writeFileSync(`${dir}/dist/index.html`, fs.readFileSync(`${dir}/build/index.html`, { encoding: 'utf8' }).replace('', '').replace('', () => ``));
10 |
--------------------------------------------------------------------------------
/dashboard/packaging/webpack.config-merge.js:
--------------------------------------------------------------------------------
1 | const UglifyJsPlugin = require('uglifyjs-webpack-plugin');
2 | const path = require('path');
3 | const glob = require('glob');
4 |
5 | module.exports = {
6 | mode: 'production',
7 | entry: {
8 | 'bundle.js': glob.sync(`${__dirname}/../build/static/?(js|css)/*.?(js|css)`).map((f) => path.resolve(__dirname, f)),
9 | },
10 | output: {
11 | path: `${__dirname}/../build/merged`,
12 | filename: 'bundle.js',
13 | },
14 | module: {
15 | rules: [
16 | {
17 | test: /\.css$/,
18 | use: ['style-loader', 'css-loader'],
19 | },
20 | ],
21 | },
22 | optimization: {
23 | minimizer: [new UglifyJsPlugin()],
24 | },
25 | };
26 |
27 |
--------------------------------------------------------------------------------
/dashboard/public/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
12 | Blackfire Go Dashboard
13 |
14 |
15 |
16 |
17 |
18 |
19 |
--------------------------------------------------------------------------------
/dashboard/src/App.css:
--------------------------------------------------------------------------------
1 | body {
2 | font-family: "Helvetica Neue", Helvetica, Arial, sans-serif;
3 | font-size: 14px;
4 | background-color: #DDDDDD;
5 | }
6 |
7 | #root {
8 | margin: 0 auto;
9 | width: 800px;
10 | display: flex;
11 | flex-direction: column;
12 | background-color: #e5e5e5;
13 | min-height: 100vh;
14 | }
15 |
16 | a {
17 | color: #2da1bf;
18 | text-decoration: none;
19 | }
20 |
21 | .App-header {
22 | background-color: #323232;
23 | display: flex;
24 | flex-direction: column;
25 | align-items: center;
26 | justify-content: center;
27 | font-size: calc(10px + 2vmin);
28 | color: white;
29 | }
30 |
31 | .wrapper {
32 | width: 500px;
33 | margin: 0 auto;
34 | position: relative;
35 | padding: 20px 0;
36 | line-height: 35px;
37 | flex: 1;
38 | }
39 |
40 | .App-footer {
41 | bottom: 0;
42 | width: 100%;
43 | background-color: #F5F5F5;
44 | }
45 |
46 | .App-footer a {
47 | color: #e03c31;
48 | }
49 |
50 | .error {
51 | border: 1px solid #e03c31;
52 | border-radius: 5px;
53 | margin: 15px 0;
54 | padding: 5px 10px;
55 | }
56 |
57 | .App-link {
58 | color: #61dafb;
59 | }
60 |
61 | pre {
62 | line-height: 1em;
63 | min-height: 100px;
64 | max-height: 500px;
65 | overflow: scroll;
66 | }
67 |
--------------------------------------------------------------------------------
/dashboard/src/App.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import './App.css';
3 | import { Provider } from 'react-redux';
4 | import store from './redux/stores/configureStore';
5 | import BlackfireLogo from './Icon/BlackfireLogo';
6 | import Content from './Content';
7 |
8 | function App() {
9 | return (
10 |
11 |
16 |
17 |
25 |
26 | );
27 | }
28 |
29 | export default App;
30 |
--------------------------------------------------------------------------------
/dashboard/src/App.test.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { render } from '@testing-library/react';
3 | import App from './App';
4 |
5 | /*eslint-disable no-undef*/
6 |
7 | test('renders troubleshooting link', () => {
8 | const { getByText } = render();
9 | const linkElement = getByText(/Troubleshooting/i);
10 | expect(linkElement).toBeInTheDocument();
11 | });
12 |
--------------------------------------------------------------------------------
/dashboard/src/Content/Error.js:
--------------------------------------------------------------------------------
1 | import React, { Component } from 'react';
2 | import PropTypes from 'prop-types';
3 | import { connect } from 'react-redux';
4 | import { bindActionCreators } from 'redux';
5 | import * as DashboardActions from '../redux/actions/DashboardActions';
6 |
7 | class Error extends Component {
8 | timeout;
9 |
10 | componentDidMount() {
11 | if (this.props.error) {
12 | this._setTimeout();
13 | }
14 | }
15 |
16 | componentDidUpdate(prevProps) {
17 | if (prevProps.error !== this.props.error) {
18 | this._clearTimeout();
19 | if (this.props.error) {
20 | this._setTimeout();
21 | }
22 | }
23 | }
24 |
25 | componentWillUnmount() {
26 | this._clearTimeout();
27 | }
28 |
29 | _setTimeout() {
30 | this.timeout = setTimeout(() => {
31 | this.props.actions.clearError();
32 | this.timeout = null;
33 | }, 10000);
34 | }
35 |
36 | _clearTimeout() {
37 | if (this.timeout) {
38 | clearTimeout(this.timeout);
39 | }
40 | this.timeout = null;
41 | }
42 |
43 | render() {
44 | const { error } = this.props;
45 |
46 | if (!error) {
47 | return null;
48 | }
49 |
50 | return (
51 |
52 | {`${error.title} (${ error.detail})`}
53 |
54 | );
55 | }
56 | }
57 |
58 | Error.defaultProps = {
59 | error: null,
60 | };
61 |
62 | Error.propTypes = {
63 | actions: PropTypes.shape({
64 | clearError: PropTypes.func.isRequired,
65 | }).isRequired,
66 | error: PropTypes.shape({
67 | status: PropTypes.number.isRequired,
68 | title: PropTypes.string.isRequired,
69 | detail: PropTypes.string.isRequired,
70 | }),
71 | };
72 |
73 | function mapDispatchToProps(dispatch) {
74 | return {
75 | actions: bindActionCreators(DashboardActions, dispatch),
76 | };
77 | }
78 |
79 | export default connect((state) => ({
80 | error: state.DashboardReducer.get('error'),
81 | }), mapDispatchToProps)(Error);
82 |
--------------------------------------------------------------------------------
/dashboard/src/Content/ProfileList.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import PropTypes from 'prop-types';
3 | import { connect } from 'react-redux';
4 | import Timeago from '../Timeago';
5 |
6 | function upperCaseFirst(word) {
7 | if (!word || !word.length || word.length < 1) {
8 | return word;
9 | }
10 |
11 | return `${word[0].toUpperCase()}${word.substr(1)}`;
12 | }
13 |
14 | function ProfileList({ profiles }) {
15 | return (
16 |
17 |
{'Profiles:'}
18 | {profiles.map((profile) => (
19 |
26 | ))}
27 | {profiles.length === 0 ?
{'No profiles yet'} : null}
28 |
29 | );
30 | }
31 |
32 | ProfileList.propTypes = {
33 | profiles: PropTypes.arrayOf(PropTypes.shape({
34 | name: PropTypes.string,
35 | })).isRequired,
36 | };
37 |
38 | export default connect((state) => ({
39 | profiles: state.DashboardReducer.get('profiles'),
40 | }))(ProfileList);
41 |
--------------------------------------------------------------------------------
/dashboard/src/Content/ProfilingStatus.js:
--------------------------------------------------------------------------------
1 | import React, { Component } from 'react';
2 | import PropTypes from 'prop-types';
3 | import { connect } from 'react-redux';
4 | import { bindActionCreators } from 'redux';
5 | import * as DashboardActions from '../redux/actions/DashboardActions';
6 | import Error from "./Error";
7 |
8 | class ProfilingStatus extends Component {
9 | constructor(props) {
10 | super(props);
11 | this.profiling_title = '';
12 | }
13 |
14 | handleEnableProfiler = () => {
15 | this.props.actions.enableProfiler(this.profiling_title);
16 | }
17 |
18 | handleDisableProfiler = () => {
19 | this.props.actions.disableProfiler();
20 | }
21 |
22 | handleEndProfiler = () => {
23 | this.props.actions.endProfiler();
24 | }
25 |
26 | handleTitleChange = (event) => {
27 | this.profiling_title = event.target.value
28 | }
29 |
30 | render() {
31 | const { profiling_enabled, profiling_sample_rate, action_pending } = this.props;
32 |
33 | return (
34 |
35 |
{'Configuration:'}
36 |
37 |
{`Profiling is ${profiling_enabled ? 'enabled' : 'disabled'}`}
38 | {profiling_enabled &&

}
39 |
40 |
41 | {`Sample rate: ${profiling_sample_rate} Hz`}
42 |
43 |
{'Control:'}
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 | {action_pending &&

}
54 |
55 |
56 |
57 |
58 | );
59 | }
60 | }
61 |
62 | ProfilingStatus.propTypes = {
63 | action_pending: PropTypes.bool.isRequired,
64 | profiling_enabled: PropTypes.bool.isRequired,
65 | profiling_sample_rate: PropTypes.number.isRequired,
66 | actions: PropTypes.shape({
67 | enableProfiler: PropTypes.func.isRequired,
68 | disableProfiler: PropTypes.func.isRequired,
69 | endProfiler: PropTypes.func.isRequired,
70 | }).isRequired,
71 | };
72 |
73 | function mapDispatchToProps(dispatch) {
74 | return {
75 | actions: bindActionCreators(DashboardActions, dispatch),
76 | };
77 | }
78 |
79 | export default connect((state) => ({
80 | action_pending: state.DashboardReducer.get('profiler_enabling') || state.DashboardReducer.get('profiler_disabling') || state.DashboardReducer.get('profiler_ending'),
81 | profiling_enabled: state.DashboardReducer.get('profiling_enabled'),
82 | profiling_sample_rate: state.DashboardReducer.get('profiling_sample_rate'),
83 | }), mapDispatchToProps)(ProfilingStatus);
84 |
--------------------------------------------------------------------------------
/dashboard/src/Content/index.js:
--------------------------------------------------------------------------------
1 | import { bindActionCreators } from 'redux';
2 | import { connect } from 'react-redux';
3 | import PropTypes from 'prop-types';
4 | import React, { Component } from 'react';
5 | import ProfilingStatus from './ProfilingStatus';
6 | import ProfileList from './ProfileList';
7 | import * as DashboardActions from '../redux/actions/DashboardActions';
8 |
9 | class Content extends Component {
10 | constructor(props) {
11 | super(props);
12 | this.polling = null;
13 | }
14 |
15 | componentDidMount() {
16 | this._doLoad();
17 | this.poll();
18 | }
19 |
20 | componentWillUnmount() {
21 | this.clearInterval();
22 | }
23 |
24 | _doLoad(noLoading = false) {
25 | this.props.actions.loadDashboard(noLoading);
26 | }
27 |
28 | clearInterval() {
29 | if (this.polling !== null) {
30 | clearInterval(this.polling);
31 | }
32 | this.polling = null;
33 | }
34 |
35 | poll() {
36 | this.polling = setInterval(() => {
37 | this.periodicalPoll();
38 | }, 1000);
39 | }
40 |
41 | periodicalPoll() {
42 | this._doLoad(true);
43 | }
44 |
45 | render() {
46 | return (
47 |
51 | );
52 | }
53 | }
54 |
55 | Content.propTypes = {
56 | actions: PropTypes.shape({
57 | loadDashboard: PropTypes.func.isRequired,
58 | }).isRequired,
59 | };
60 |
61 | function mapDispatchToProps(dispatch) {
62 | return {
63 | actions: bindActionCreators(DashboardActions, dispatch),
64 | };
65 | }
66 |
67 | export default connect(
68 | undefined,
69 | mapDispatchToProps,
70 | )(Content);
71 |
--------------------------------------------------------------------------------
/dashboard/src/Icon/BlackfireLogo.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import PropTypes from 'prop-types';
3 |
4 | export default function BlackfireLogo(props) {
5 | return (
6 |
68 | );
69 | }
70 |
71 | BlackfireLogo.defaultProps = {
72 | className: '',
73 | style: {},
74 | };
75 |
76 | BlackfireLogo.propTypes = {
77 | className: PropTypes.string,
78 | style: PropTypes.objectOf(PropTypes.oneOfType([PropTypes.string.isRequired, PropTypes.number.isRequired]).isRequired),
79 | };
80 |
--------------------------------------------------------------------------------
/dashboard/src/Timeago.js:
--------------------------------------------------------------------------------
1 | import React, { PureComponent } from 'react';
2 | import PropTypes from 'prop-types';
3 | import moment from 'moment';
4 | import TimeagoLib from './Timeagolib';
5 |
6 | const monthNames = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'];
7 | const dayNames = ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'];
8 |
9 | function twoDigits(number) {
10 | number = `${number}`;
11 |
12 | return number.length === 2 ? number : `0${number}`;
13 | }
14 |
15 | export default class Timeago extends PureComponent {
16 | render() {
17 | const momentDate = moment(this.props.date);
18 |
19 | if (!momentDate.isValid()) {
20 | return null;
21 | }
22 |
23 | const normalizedDate = momentDate.toDate();
24 | const offset = normalizedDate.getTimezoneOffset() / -60;
25 | const gmt = offset === 0 ? 'UTC' : (`GMT${(offset > 0 ? '+' : '')}${offset}`);
26 |
27 | return (
28 |
31 | {TimeagoLib.timeAgoInWords(this.props.date)}
32 |
33 | );
34 | }
35 | }
36 |
37 | Timeago.defaultProps = {
38 | date: null,
39 | };
40 |
41 | Timeago.propTypes = {
42 | date: PropTypes.string,
43 | };
44 |
--------------------------------------------------------------------------------
/dashboard/src/Timeagolib.js:
--------------------------------------------------------------------------------
1 | // Inspired from
2 | // Copyright 2012, Terry Tai, Pragmatic.ly
3 | // https://pragmatic.ly/
4 | // Licensed under the MIT license.
5 | // https://github.com/pragmaticly/smart-time-ago/blob/master/LICENSE
6 |
7 | import moment from 'moment';
8 |
9 | function getTimeDistanceInMinutes(absolutTime) {
10 | const timeDistance = new Date().getTime() - absolutTime.getTime();
11 |
12 | return Math.round((Math.abs(timeDistance) / 1000) / 60);
13 | }
14 |
15 | class Timeago {
16 | constructor() {
17 | this.options = {
18 | selector: 'time.timeago',
19 | attr: 'datetime',
20 | dir: 'up',
21 | lang: {
22 | units: {
23 | second: 'second',
24 | seconds: 'seconds',
25 | minute: 'minute',
26 | minutes: 'minutes',
27 | hour: 'hour',
28 | hours: 'hours',
29 | day: 'day',
30 | days: 'days',
31 | month: 'month',
32 | months: 'months',
33 | year: 'year',
34 | years: 'years',
35 | },
36 | prefixes: {
37 | lt: 'less than a',
38 | about: '', // 'about',
39 | over: 'over',
40 | almost: 'almost',
41 | ago: '',
42 | },
43 | suffix: ' ago',
44 | },
45 | };
46 | }
47 |
48 | timeAgoInWords(timeString) {
49 | const momentDate = moment(timeString);
50 |
51 | if (!momentDate.isValid()) {
52 | return timeString;
53 | }
54 |
55 | const absoluteTime = momentDate.toDate();
56 | const direction = new Date().getTime() - absoluteTime.getTime() > 0;
57 |
58 | return `${this.options.lang.prefixes.ago}${this.distanceOfTimeInWords(absoluteTime)}${direction ? this.options.lang.suffix : ''}`;
59 | }
60 |
61 | distanceOfTimeInWords(absolutTime) {
62 | const dim = getTimeDistanceInMinutes(absolutTime);
63 |
64 | if (dim === 0) {
65 | return `${this.options.lang.prefixes.lt} ${this.options.lang.units.minute}`;
66 | } if (dim === 1) {
67 | return `1 ${this.options.lang.units.minute}`;
68 | } if (dim >= 2 && dim <= 44) {
69 | return `${dim} ${this.options.lang.units.minutes}`;
70 | } if (dim >= 45 && dim <= 89) {
71 | return `${this.options.lang.prefixes.about} 1 ${this.options.lang.units.hour}`;
72 | } if (dim >= 90 && dim <= 1439) {
73 | return `${this.options.lang.prefixes.about} ${Math.round(dim / 60)} ${this.options.lang.units.hours}`;
74 | } if (dim >= 1440 && dim <= 2519) {
75 | return `1 ${this.options.lang.units.day}`;
76 | } if (dim >= 2520 && dim <= 43199) {
77 | return `${Math.round(dim / 1440)} ${this.options.lang.units.days}`;
78 | } if (dim >= 43200 && dim <= 86399) {
79 | return `${this.options.lang.prefixes.about} 1 ${this.options.lang.units.month}`;
80 | } if (dim >= 86400 && dim <= 525599) {
81 | return `${Math.round(dim / 43200)} ${this.options.lang.units.months}`;
82 | } if (dim >= 525600 && dim <= 655199) {
83 | return `${this.options.lang.prefixes.about} 1 ${this.options.lang.units.year}`;
84 | } if (dim >= 655200 && dim <= 914399) {
85 | return `${this.options.lang.prefixes.over} 1 ${this.options.lang.units.year}`;
86 | } if (dim >= 914400 && dim <= 1051199) {
87 | return `${this.options.lang.prefixes.almost} 2 ${this.options.lang.units.years}`;
88 | }
89 |
90 | return `${this.options.lang.prefixes.about} ${Math.round(dim / 525600)} ${this.options.lang.units.years}`;
91 | }
92 | }
93 |
94 | export default new Timeago();
95 |
--------------------------------------------------------------------------------
/dashboard/src/index.css:
--------------------------------------------------------------------------------
1 | body {
2 | margin: 0;
3 | font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen',
4 | 'Ubuntu', 'Cantarell', 'Fira Sans', 'Droid Sans', 'Helvetica Neue',
5 | sans-serif;
6 | -webkit-font-smoothing: antialiased;
7 | -moz-osx-font-smoothing: grayscale;
8 | }
9 |
10 | code {
11 | font-family: source-code-pro, Menlo, Monaco, Consolas, 'Courier New',
12 | monospace;
13 | }
14 |
--------------------------------------------------------------------------------
/dashboard/src/index.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import ReactDOM from 'react-dom';
3 | import './index.css';
4 | import App from './App';
5 | import * as serviceWorker from './serviceWorker';
6 |
7 | ReactDOM.render(, document.getElementById('root'));
8 |
9 | // If you want your app to work offline and load faster, you can change
10 | // unregister() to register() below. Note this comes with some pitfalls.
11 | // Learn more about service workers: https://bit.ly/CRA-PWA
12 | serviceWorker.unregister();
13 |
--------------------------------------------------------------------------------
/dashboard/src/redux/actions/DashboardActions.js:
--------------------------------------------------------------------------------
1 | import DashboardConstants from '../constants/DashboardConstants';
2 | import doFetch from '../fetcher';
3 |
4 | /*eslint-disable import/prefer-default-export*/
5 |
6 | export function clearError() {
7 | return {
8 | type: DashboardConstants.CLEAR_ERROR,
9 | };
10 | }
11 |
12 | function dashboardLoaded(response) {
13 | return {
14 | type: DashboardConstants.DASHBOARD_LOADED,
15 | data: response.data,
16 | };
17 | }
18 |
19 | export function loadDashboard(noLoading = false) {
20 | return (dispatch) => {
21 | if (!noLoading) {
22 | dispatch({
23 | type: DashboardConstants.DASHBOARD_LOADING,
24 | });
25 | }
26 |
27 | doFetch('./dashboard_api')
28 | .then((response) => {
29 | dispatch(dashboardLoaded(response));
30 | }, (error) => {
31 | /*eslint-disable no-console*/
32 | console.log('Error while fetching API', error);
33 | /*eslint-enable no-console*/
34 | })
35 | .catch((error) => {
36 | /*eslint-disable no-console*/
37 | console.log('Error while loading dashboard', error);
38 | /*eslint-enable no-console*/
39 | });
40 | };
41 | }
42 |
43 | function craftErrorIfEmpty(response) {
44 | if (!response) {
45 | return {
46 | status: -1,
47 | title: 'Unable to join server',
48 | detail: 'No response data',
49 | };
50 | }
51 |
52 | const data = response.data;
53 |
54 | if (typeof data === 'string') {
55 | return {
56 | status: -1,
57 | title: 'Incorrect response',
58 | detail: 'Server returned a non JSON string.',
59 | };
60 | }
61 |
62 | return data;
63 | }
64 |
65 | export function enableProfiler(title) {
66 | return (dispatch) => {
67 | dispatch({
68 | type: DashboardConstants.PROFILER_ENABLING,
69 | });
70 |
71 | const query = 'title=' + encodeURIComponent(title.trim());
72 |
73 | doFetch('./enable?' + query, 'POST')
74 | .then((response) => {
75 | dispatch({
76 | type: DashboardConstants.PROFILER_ENABLED,
77 | data: response.data,
78 | });
79 | }, (error) => {
80 | dispatch({
81 | type: DashboardConstants.PROFILER_ENABLED,
82 | data: craftErrorIfEmpty(error.response),
83 | });
84 | /*eslint-disable no-console*/
85 | console.log('Error while enabling profiler', error);
86 | /*eslint-enable no-console*/
87 | })
88 | .catch((error) => {
89 | /*eslint-disable no-console*/
90 | console.log('Error after enabling profiler', error);
91 | /*eslint-enable no-console*/
92 | });
93 | };
94 | }
95 |
96 | export function disableProfiler() {
97 | return (dispatch) => {
98 | dispatch({
99 | type: DashboardConstants.PROFILER_DISABLING,
100 | });
101 |
102 | doFetch('./disable', 'POST')
103 | .then((response) => {
104 | dispatch({
105 | type: DashboardConstants.PROFILER_DISABLED,
106 | data: response.data,
107 | });
108 | }, (error) => {
109 | dispatch({
110 | type: DashboardConstants.PROFILER_DISABLED,
111 | data: craftErrorIfEmpty(error.response),
112 | });
113 | /*eslint-disable no-console*/
114 | console.log('Error while disabling profiler', error);
115 | /*eslint-enable no-console*/
116 | })
117 | .catch((error) => {
118 | /*eslint-disable no-console*/
119 | console.log('Error after disabling profiler', error);
120 | /*eslint-enable no-console*/
121 | });
122 | };
123 | }
124 |
125 | export function endProfiler() {
126 | return (dispatch) => {
127 | dispatch({
128 | type: DashboardConstants.PROFILER_ENDING,
129 | });
130 |
131 | doFetch('./end', 'POST')
132 | .then((response) => {
133 | dispatch({
134 | type: DashboardConstants.PROFILER_ENDED,
135 | data: response.data,
136 | });
137 | }, (error) => {
138 | dispatch({
139 | type: DashboardConstants.PROFILER_ENDED,
140 | data: craftErrorIfEmpty(error.response),
141 | });
142 | /*eslint-disable no-console*/
143 | console.log('Error while ending profiler', error);
144 | /*eslint-enable no-console*/
145 | })
146 | .catch((error) => {
147 | /*eslint-disable no-console*/
148 | console.log('Error after ending profiler', error);
149 | /*eslint-enable no-console*/
150 | });
151 | };
152 | }
153 |
--------------------------------------------------------------------------------
/dashboard/src/redux/constants/DashboardConstants.js:
--------------------------------------------------------------------------------
1 | import keyMirror from 'keymirror';
2 |
3 | const constants = keyMirror({
4 | CLEAR_ERROR: null,
5 | DASHBOARD_LOADING: null,
6 | DASHBOARD_LOADED: null,
7 | PROFILER_ENABLING: null,
8 | PROFILER_ENABLED: null,
9 | PROFILER_DISABLING: null,
10 | PROFILER_DISABLED: null,
11 | PROFILER_ENDING: null,
12 | PROFILER_ENDED: null,
13 | });
14 |
15 | export default constants;
16 |
--------------------------------------------------------------------------------
/dashboard/src/redux/fetcher.js:
--------------------------------------------------------------------------------
1 | import 'whatwg-fetch';
2 |
3 | require('es6-promise').polyfill();
4 |
5 | const headers = {
6 | 'X-Requested-With': 'XMLHttpRequest',
7 | Accept: 'application/json',
8 | };
9 | const cache = {};
10 | const jsonHeaders = ['application/json', 'application/problem+json'];
11 |
12 | function isJson(response) {
13 | return jsonHeaders.indexOf(response.headers.get('content-type')) >= 0
14 | }
15 |
16 | function checkStatus(response) {
17 | let retval;
18 |
19 | if (isJson(response) && response.status !== 204 && response.status !== 429) {
20 | retval = response.text().then((text) => {
21 | let data;
22 |
23 | try {
24 | data = JSON.parse(text);
25 | } catch (e) {
26 | data = text;
27 | }
28 |
29 | return {
30 | status: response.status,
31 | statusText: response.statusText,
32 | data,
33 | response,
34 | };
35 | });
36 | } else {
37 | retval = response.text().then((text) => ({
38 | status: response.status,
39 | statusText: response.statusText,
40 | data: text,
41 | response,
42 | }));
43 | }
44 |
45 | if (response.status === 429) {
46 | retval.then((retvalData) => {
47 | retvalData.data = {
48 | code: 429,
49 | message: 'Rate limit exceeded',
50 | errors: null,
51 | };
52 |
53 | return retvalData;
54 | });
55 | }
56 |
57 | if (response.status >= 200 && response.status < 300) {
58 | return retval;
59 | }
60 |
61 | return retval.then((retvalData) => {
62 | const error = new Error(retvalData.statusText);
63 |
64 | error.response = retvalData;
65 |
66 | throw error;
67 | });
68 | }
69 |
70 | export default function doFetch(url, method = 'GET', auth = {}, data = null, customHeaders = {}, progressCallback = null) {
71 | const body = {};
72 | const bodyHeaders = {};
73 |
74 | if (data !== null) {
75 | if (typeof data === 'object') {
76 | body.body = JSON.stringify(data);
77 | bodyHeaders['content-type'] = 'application/json';
78 | } else if (data) {
79 | body.body = data;
80 | }
81 | }
82 |
83 | function result() {
84 | const options = {
85 | method,
86 | headers: {
87 | ...headers, ...bodyHeaders, ...auth, ...customHeaders,
88 | },
89 | ...body,
90 | };
91 | const key = `${JSON.stringify(options)}@@@${url}`;
92 |
93 | // this is not an actual cache but an anti concurrent mitigation system
94 | if (cache[key] !== undefined) {
95 | return cache[key];
96 | }
97 |
98 | async function fetchResult() {
99 | const response = await fetch(url, options);
100 | const finalResponse = response.clone();
101 |
102 | if (!response.body || !response.body.getReader) {
103 | return finalResponse;
104 | }
105 |
106 | const reader = response.body.getReader();
107 | const contentLength = +response.headers.get('X-Blackfire-Content-Length');
108 |
109 | if (!progressCallback || contentLength === 0) {
110 | return finalResponse;
111 | }
112 |
113 | let receivedLength = 0;
114 | /*eslint-disable no-constant-condition*/
115 | while (true) {
116 | /*eslint-enable no-constant-condition*/
117 | /*eslint-disable no-await-in-loop*/
118 | const { done, value } = await reader.read();
119 | /*eslint-enable no-await-in-loop*/
120 |
121 | if (done) {
122 | break;
123 | }
124 |
125 | receivedLength += value.length;
126 | progressCallback(receivedLength, contentLength);
127 | }
128 |
129 | return finalResponse;
130 | }
131 |
132 | const ret = fetchResult()
133 | .then(checkStatus)
134 | .then((response) => {
135 | delete cache[key];
136 |
137 | return response;
138 | }, (error) => {
139 | delete cache[key];
140 |
141 | throw error;
142 | });
143 |
144 | cache[key] = ret;
145 |
146 | return ret;
147 | }
148 |
149 | return result();
150 | }
151 |
--------------------------------------------------------------------------------
/dashboard/src/redux/reducers/DashboardReducer.js:
--------------------------------------------------------------------------------
1 | import Immutable from 'immutable';
2 | import DashboardConstants from '../constants/DashboardConstants';
3 |
4 | const _state = Immutable.Map({
5 | loading: false,
6 | profiling_enabled: false,
7 | profiling_sample_rate: -1,
8 | profiles: [],
9 | profiler_enabling: false,
10 | profiler_disabling: false,
11 | profiler_ending: false,
12 | error: null,
13 | });
14 |
15 | function dashboardLoading(state) {
16 | return state.set('loading', true);
17 | }
18 |
19 | function isError(data) {
20 | return data.status;
21 | }
22 |
23 | function disablePropAndSetError(state, prop, data) {
24 | const is_error = isError(data);
25 |
26 | return state.withMutations((ctx) => {
27 | ctx
28 | .set(prop, false)
29 | .set('error', is_error ? data : null);
30 |
31 | if (!is_error && data.profiling.enabled !== undefined) {
32 | ctx
33 | .set('profiling_enabled', data.profiling.enabled)
34 | .set('profiling_sample_rate', data.profiling.sample_rate)
35 | .set('profiles', data.profiles._embedded);
36 | }
37 | });
38 | }
39 |
40 | function enablePropAndClearError(state, prop) {
41 | return state.withMutations((ctx) => {
42 | ctx
43 | .set(prop, true)
44 | .set('error', null);
45 | });
46 | }
47 |
48 | function dashboardLoaded(state, data) {
49 | return state.withMutations((ctx) => {
50 | ctx
51 | .set('loading', false)
52 | .set('profiling_enabled', data.profiling.enabled)
53 | .set('profiling_sample_rate', data.profiling.sample_rate)
54 | .set('profiles', data.profiles._embedded)
55 | });
56 | }
57 |
58 | function profilerDisabling(state) {
59 | return enablePropAndClearError(state, 'profiler_disabling');
60 | }
61 |
62 | function profilerDisabled(state, data) {
63 | return disablePropAndSetError(state, 'profiler_disabling', data);
64 | }
65 |
66 | function profilerEnabling(state) {
67 | return enablePropAndClearError(state, 'profiler_enabling');
68 | }
69 |
70 | function profilerEnabled(state, data) {
71 | return disablePropAndSetError(state, 'profiler_enabling', data);
72 | }
73 |
74 | function profilerEnding(state) {
75 | return enablePropAndClearError(state, 'profiler_ending');
76 | }
77 |
78 | function profilerEnded(state, data) {
79 | return disablePropAndSetError(state, 'profiler_ending', data);
80 | }
81 |
82 | function clearError(state) {
83 | return state.set('error', null);
84 | }
85 |
86 | export default function DashboardReducer(state = _state, action) {
87 | switch (action.type) {
88 | case DashboardConstants.DASHBOARD_LOADING:
89 | return dashboardLoading(state);
90 | case DashboardConstants.DASHBOARD_LOADED:
91 | return dashboardLoaded(state, action.data);
92 | case DashboardConstants.PROFILER_ENABLING:
93 | return profilerEnabling(state);
94 | case DashboardConstants.PROFILER_ENABLED:
95 | return profilerEnabled(state, action.data);
96 | case DashboardConstants.PROFILER_DISABLING:
97 | return profilerDisabling(state);
98 | case DashboardConstants.PROFILER_DISABLED:
99 | return profilerDisabled(state, action.data);
100 | case DashboardConstants.PROFILER_ENDING:
101 | return profilerEnding(state);
102 | case DashboardConstants.PROFILER_ENDED:
103 | return profilerEnded(state, action.data);
104 | case DashboardConstants.CLEAR_ERROR:
105 | return clearError(state);
106 | default:
107 | return state;
108 | }
109 | }
110 |
--------------------------------------------------------------------------------
/dashboard/src/redux/reducers/index.js:
--------------------------------------------------------------------------------
1 | import { combineReducers } from 'redux';
2 | import DashboardReducer from './DashboardReducer';
3 |
4 | export default () => combineReducers({
5 | DashboardReducer,
6 | });
7 |
--------------------------------------------------------------------------------
/dashboard/src/redux/stores/configureStore.dev.js:
--------------------------------------------------------------------------------
1 | /*eslint-disable global-require*/
2 |
3 | import { createStore, applyMiddleware, compose } from 'redux';
4 | import { thunk } from 'redux-thunk';
5 | import createRootReducer from '../reducers';
6 |
7 | export default function configureStore(initialState) {
8 | const store = createStore(
9 | createRootReducer(), // root reducer with router state
10 | initialState,
11 | compose(
12 | applyMiddleware(
13 | thunk,
14 | ),
15 | ),
16 | );
17 |
18 | return store;
19 | }
20 |
--------------------------------------------------------------------------------
/dashboard/src/redux/stores/configureStore.js:
--------------------------------------------------------------------------------
1 | import configureStore from './importStore';
2 |
3 | export default configureStore({});
4 |
--------------------------------------------------------------------------------
/dashboard/src/redux/stores/configureStore.prod.js:
--------------------------------------------------------------------------------
1 | import { createStore, applyMiddleware, compose } from 'redux';
2 | import { thunk } from 'redux-thunk';
3 | import createRootReducer from '../reducers';
4 |
5 | // Middleware you want to use in production:
6 |
7 | export default function configureStore(initialState) {
8 | // Note: only Redux >= 3.1.0 supports passing enhancer as third argument.
9 | // See https://github.com/rackt/redux/releases/tag/v3.1.0
10 | return createStore(
11 | createRootReducer(),
12 | initialState,
13 | compose(
14 | applyMiddleware(
15 | thunk,
16 | ),
17 | ),
18 | );
19 | }
20 |
--------------------------------------------------------------------------------
/dashboard/src/redux/stores/importStore.js:
--------------------------------------------------------------------------------
1 | /*eslint-disable global-require*/
2 |
3 | if (process.env.NODE_ENV === 'production') {
4 | module.exports = require('./configureStore.prod').default;
5 | } else {
6 | module.exports = require('./configureStore.dev').default;
7 | }
8 |
--------------------------------------------------------------------------------
/dashboard/src/serviceWorker.js:
--------------------------------------------------------------------------------
1 | // This optional code is used to register a service worker.
2 | // register() is not called by default.
3 |
4 | // This lets the app load faster on subsequent visits in production, and gives
5 | // it offline capabilities. However, it also means that developers (and users)
6 | // will only see deployed updates on subsequent visits to a page, after all the
7 | // existing tabs open on the page have been closed, since previously cached
8 | // resources are updated in the background.
9 |
10 | // To learn more about the benefits of this model and instructions on how to
11 | // opt-in, read https://bit.ly/CRA-PWA
12 |
13 | /*eslint-disable no-console*/
14 | /*eslint-disable no-eq-null*/
15 |
16 | const isLocalhost = Boolean(
17 | window.location.hostname === 'localhost' ||
18 | // [::1] is the IPv6 localhost address.
19 | window.location.hostname === '[::1]' ||
20 | // 127.0.0.0/8 are considered localhost for IPv4.
21 | window.location.hostname.match(
22 | /^127(?:\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}$/,
23 | ),
24 | );
25 |
26 | function registerValidSW(swUrl, config) {
27 | navigator.serviceWorker
28 | .register(swUrl)
29 | .then((registration) => {
30 | registration.onupdatefound = () => {
31 | const installingWorker = registration.installing;
32 | if (installingWorker == null) {
33 | return;
34 | }
35 | installingWorker.onstatechange = () => {
36 | if (installingWorker.state === 'installed') {
37 | if (navigator.serviceWorker.controller) {
38 | // At this point, the updated precached content has been fetched,
39 | // but the previous service worker will still serve the older
40 | // content until all client tabs are closed.
41 | console.log(
42 | 'New content is available and will be used when all ' +
43 | 'tabs for this page are closed. See https://bit.ly/CRA-PWA.',
44 | );
45 |
46 | // Execute callback
47 | if (config && config.onUpdate) {
48 | config.onUpdate(registration);
49 | }
50 | } else {
51 | // At this point, everything has been precached.
52 | // It's the perfect time to display a
53 | // "Content is cached for offline use." message.
54 | console.log('Content is cached for offline use.');
55 |
56 | // Execute callback
57 | if (config && config.onSuccess) {
58 | config.onSuccess(registration);
59 | }
60 | }
61 | }
62 | };
63 | };
64 | })
65 | .catch((error) => {
66 | console.error('Error during service worker registration:', error);
67 | });
68 | }
69 |
70 | function checkValidServiceWorker(swUrl, config) {
71 | // Check if the service worker can be found. If it can't reload the page.
72 | fetch(swUrl, {
73 | headers: { 'Service-Worker': 'script' },
74 | })
75 | .then((response) => {
76 | // Ensure service worker exists, and that we really are getting a JS file.
77 | const contentType = response.headers.get('content-type');
78 | if (
79 | response.status === 404 ||
80 | (contentType != null && contentType.indexOf('javascript') === -1)
81 | ) {
82 | // No service worker found. Probably a different app. Reload the page.
83 | navigator.serviceWorker.ready.then((registration) => {
84 | registration.unregister().then(() => {
85 | window.location.reload();
86 | });
87 | });
88 | } else {
89 | // Service worker found. Proceed as normal.
90 | registerValidSW(swUrl, config);
91 | }
92 | })
93 | .catch(() => {
94 | console.log(
95 | 'No internet connection found. App is running in offline mode.',
96 | );
97 | });
98 | }
99 |
100 | export function register(config) {
101 | if (process.env.NODE_ENV === 'production' && 'serviceWorker' in navigator) {
102 | // The URL constructor is available in all browsers that support SW.
103 | const publicUrl = new URL(process.env.PUBLIC_URL, window.location.href);
104 | if (publicUrl.origin !== window.location.origin) {
105 | // Our service worker won't work if PUBLIC_URL is on a different origin
106 | // from what our page is served on. This might happen if a CDN is used to
107 | // serve assets; see https://github.com/facebook/create-react-app/issues/2374
108 | return;
109 | }
110 |
111 | window.addEventListener('load', () => {
112 | const swUrl = `${process.env.PUBLIC_URL}/service-worker.js`;
113 |
114 | if (isLocalhost) {
115 | // This is running on localhost. Let's check if a service worker still exists or not.
116 | checkValidServiceWorker(swUrl, config);
117 |
118 | // Add some additional logging to localhost, pointing developers to the
119 | // service worker/PWA documentation.
120 | navigator.serviceWorker.ready.then(() => {
121 | console.log(
122 | 'This web app is being served cache-first by a service ' +
123 | 'worker. To learn more, visit https://bit.ly/CRA-PWA',
124 | );
125 | });
126 | } else {
127 | // Is not localhost. Just register service worker
128 | registerValidSW(swUrl, config);
129 | }
130 | });
131 | }
132 | }
133 |
134 | export function unregister() {
135 | if ('serviceWorker' in navigator) {
136 | navigator.serviceWorker.ready
137 | .then((registration) => {
138 | registration.unregister();
139 | })
140 | .catch((error) => {
141 | console.error(error.message);
142 | });
143 | }
144 | }
145 |
--------------------------------------------------------------------------------
/dashboard/src/setupTests.js:
--------------------------------------------------------------------------------
1 | import '@testing-library/jest-dom';
2 |
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '2'
2 |
3 | services:
4 | go_dashboard:
5 | build:
6 | context: docker/
7 | dockerfile: Dockerfile
8 | volumes:
9 | - ./:/app
10 | - ~/.ssh:/home/node/.ssh:ro
11 | environment:
12 | - "NODE_OPTIONS=--openssl-legacy-provider"
13 | working_dir: /app/dashboard
14 |
--------------------------------------------------------------------------------
/docker/Dockerfile:
--------------------------------------------------------------------------------
1 | ARG NODE_VERSION=21 # https://hub.docker.com/_/node/tags?page=1&name=19
2 | ARG GOSU_VERSION=1.17 # https://github.com/tianon/gosu/releases
3 |
4 | FROM alpine/curl as build_gosu
5 | ARG GOSU_VERSION
6 |
7 | RUN curl -fsLo /usr/local/bin/gosu https://github.com/tianon/gosu/releases/download/${GOSU_VERSION}/gosu-$([ "$(uname -m)" = "aarch64" ] && echo "arm64" || echo "amd64")
8 | RUN chmod +x /usr/local/bin/gosu
9 |
10 | FROM node:${NODE_VERSION}
11 |
12 | WORKDIR /app/dashboard
13 |
14 | COPY entrypoint_dev.sh /usr/local/bin/entrypoint_dev.sh
15 | COPY --from=build_gosu /usr/local/bin/gosu /usr/local/bin/gosu
16 |
17 | ENTRYPOINT ["/usr/local/bin/entrypoint_dev.sh"]
18 | CMD true
19 |
--------------------------------------------------------------------------------
/docker/entrypoint_dev.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -e
3 |
4 | # Set www-data uid & gid
5 | usermod -u $(stat -c %u /app) node || true
6 | groupmod -g $(stat -c %g /app) node || true
7 |
8 | # change to user node
9 | gosu node "$@"
10 |
--------------------------------------------------------------------------------
/fixtures/test2_blackfire.ini:
--------------------------------------------------------------------------------
1 | [blackfire]
2 |
3 | client-id=ab6f24b1-3103-4503-9f68-93d4b3f10c7c
4 | client-token=ec4f5fb9f43ec7004b44fc2f217c944c324c6225efcf144c2cee65eb5c45754c
5 | timeout=1s
6 |
--------------------------------------------------------------------------------
/fixtures/test_blackfire.ini:
--------------------------------------------------------------------------------
1 | [blackfire]
2 |
3 | client-id=ab6f24b1-3103-4503-9f68-93d4b3f10c7c
4 | client-token=ec4f5fb9f43ec7004b44fc2f217c944c324c6225efcf144c2cee65eb5c45754c
5 | endpoint=https://blackfire.io/ini
6 | timeout=1s
7 |
--------------------------------------------------------------------------------
/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/blackfireio/go-blackfire
2 |
3 | go 1.11
4 |
5 | require (
6 | github.com/blackfireio/osinfo v1.0.2
7 | github.com/go-ini/ini v1.51.1
8 | github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
9 | github.com/pkg/errors v0.8.1
10 | github.com/rakyll/statik v0.1.7
11 | github.com/rs/zerolog v1.17.2
12 | github.com/smartystreets/goconvey v1.6.4 // indirect
13 | github.com/stretchr/testify v1.7.0
14 | gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f
15 | gopkg.in/ini.v1 v1.62.0 // indirect
16 | )
17 |
--------------------------------------------------------------------------------
/go.sum:
--------------------------------------------------------------------------------
1 | github.com/blackfireio/osinfo v1.0.2 h1:u3ds4GS9l+WGEnNP0R7ED3JxhTXNd0Upg/Lg1rJZRCw=
2 | github.com/blackfireio/osinfo v1.0.2/go.mod h1:Pd987poVNmd5Wsx6PRPw4+w7kLlf9iJxoRKPtPAjOrA=
3 | github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
4 | github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
5 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
6 | github.com/go-ini/ini v1.51.1 h1:/QG3cj23k5V8mOl4JnNzUNhc1kr/jzMiNsNuWKcx8gM=
7 | github.com/go-ini/ini v1.51.1/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
8 | github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
9 | github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
10 | github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
11 | github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
12 | github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
13 | github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
14 | github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
15 | github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
16 | github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
17 | github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
18 | github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
19 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
20 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
21 | github.com/rakyll/statik v0.1.7 h1:OF3QCZUuyPxuGEP7B4ypUa7sB/iHtqOTDYZXGM8KOdQ=
22 | github.com/rakyll/statik v0.1.7/go.mod h1:AlZONWzMtEnMs7W4e/1LURLiI49pIMmp6V9Unghqrcc=
23 | github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
24 | github.com/rs/zerolog v1.17.2 h1:RMRHFw2+wF7LO0QqtELQwo8hqSmqISyCJeFeAAuWcRo=
25 | github.com/rs/zerolog v1.17.2/go.mod h1:9nvC1axdVrAHcu/s9taAVfBuIdTZLVQmKQyvrUjF5+I=
26 | github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
27 | github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
28 | github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
29 | github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
30 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
31 | github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
32 | github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
33 | github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
34 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
35 | golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
36 | golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
37 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
38 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
39 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
40 | golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
41 | golang.org/x/tools v0.0.0-20190828213141-aed303cbaa74/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
42 | golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
43 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
44 | gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
45 | gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
46 | gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU=
47 | gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
48 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
49 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
50 |
--------------------------------------------------------------------------------
/http.go:
--------------------------------------------------------------------------------
1 | package blackfire
2 |
3 | import (
4 | "encoding/json"
5 | "fmt"
6 | "io/ioutil"
7 | "net/http"
8 | "strconv"
9 | "strings"
10 | "time"
11 |
12 | _ "github.com/blackfireio/go-blackfire/statik"
13 | "github.com/rakyll/statik/fs"
14 | )
15 |
16 | type problem struct {
17 | Title string `json:"title"`
18 | Status int `json:"status"`
19 | Detail string `json:"detail"`
20 | }
21 |
22 | // NewServeMux returns an http.ServerMux that allows to manage profiling from HTTP
23 | func NewServeMux(prefix string) (mux *http.ServeMux, err error) {
24 | if err = globalProbe.configuration.load(); err != nil {
25 | return
26 | }
27 | prefix = strings.Trim(prefix, "/")
28 | mux = http.NewServeMux()
29 | mux.HandleFunc("/"+prefix+"/dashboard", DashboardHandler)
30 | mux.HandleFunc("/"+prefix+"/dashboard_api", DashboardApiHandler)
31 | mux.HandleFunc("/"+prefix+"/enable", EnableHandler)
32 | mux.HandleFunc("/"+prefix+"/disable", DisableHandler)
33 | mux.HandleFunc("/"+prefix+"/end", EndHandler)
34 |
35 | return
36 | }
37 |
38 | // DashboardHandler displays the current status of the profiler
39 | func DashboardHandler(w http.ResponseWriter, r *http.Request) {
40 | logger := globalProbe.configuration.Logger
41 | statikFS, err := fs.New()
42 | if err != nil {
43 | logger.Error().Msgf("Blackfire (HTTP): %s", err)
44 | w.WriteHeader(500)
45 | w.Write([]byte(err.Error()))
46 | return
47 | }
48 | f, err := statikFS.Open("/index.html")
49 | if err != nil {
50 | logger.Error().Msgf("Blackfire (HTTP): %s", err)
51 | w.WriteHeader(500)
52 | w.Write([]byte(err.Error()))
53 | return
54 | }
55 | defer f.Close()
56 | contents, err := ioutil.ReadAll(f)
57 | if err != nil {
58 | logger.Error().Msgf("Blackfire (HTTP): %s", err)
59 | w.WriteHeader(500)
60 | w.Write([]byte(err.Error()))
61 | return
62 | }
63 | w.Write(contents)
64 | }
65 |
66 | func DashboardApiHandler(w http.ResponseWriter, r *http.Request) {
67 | writeJsonStatus(w)
68 | }
69 |
70 | // EnableHandler starts profiling via HTTP
71 | func EnableHandler(w http.ResponseWriter, r *http.Request) {
72 | logger := globalProbe.configuration.Logger
73 | if title, found := parseString(r, "title"); found {
74 | globalProbe.SetCurrentTitle(title)
75 | }
76 | durationInSeconds, err := parseFloat(r, "duration")
77 | if err != nil {
78 | writeJsonError(w, &problem{Status: 400, Title: "Wrong duration", Detail: err.Error()})
79 | return
80 | }
81 |
82 | duration := time.Duration(durationInSeconds * float64(time.Second))
83 | if durationInSeconds > 0 {
84 | logger.Info().Msgf("Blackfire (HTTP): Profiling for %f seconds", float64(duration)/1000000000)
85 | } else {
86 | logger.Info().Msgf("Blackfire (HTTP): Enable profiling")
87 | }
88 | err = globalProbe.EnableNowFor(duration)
89 | if err != nil {
90 | writeJsonError(w, &problem{Status: 500, Title: "Enable error", Detail: err.Error()})
91 | } else {
92 | writeJsonStatus(w)
93 | }
94 | }
95 |
96 | // DisableHandler stops profiling via HTTP
97 | func DisableHandler(w http.ResponseWriter, r *http.Request) {
98 | logger := globalProbe.configuration.Logger
99 | logger.Info().Msgf("Blackfire (HTTP): Disable profiling")
100 | if err := globalProbe.Disable(); err != nil {
101 | writeJsonError(w, &problem{Status: 500, Title: "Disable error", Detail: err.Error()})
102 | } else {
103 | writeJsonStatus(w)
104 | }
105 | }
106 |
107 | // EndHandler stops profiling via HTTP and send the profile to the agent
108 | func EndHandler(w http.ResponseWriter, r *http.Request) {
109 | logger := globalProbe.configuration.Logger
110 | logger.Info().Msgf("Blackfire (HTTP): End profiling")
111 | if err := globalProbe.End(); err != nil {
112 | writeJsonError(w, &problem{Status: 500, Title: "End error", Detail: err.Error()})
113 | } else {
114 | writeJsonStatus(w)
115 | }
116 | }
117 |
118 | func parseFloat(r *http.Request, paramName string) (value float64, err error) {
119 | value = 0
120 | if values, ok := r.URL.Query()[paramName]; ok {
121 | if len(values) > 0 {
122 | value, err = strconv.ParseFloat(values[0], 64)
123 | }
124 | }
125 | return
126 | }
127 |
128 | func parseString(r *http.Request, paramName string) (value string, found bool) {
129 | value = ""
130 | if values, ok := r.URL.Query()[paramName]; ok {
131 | if len(values) > 0 {
132 | found = true
133 | value = values[0]
134 | }
135 | }
136 | return
137 | }
138 |
139 | func writeJsonError(w http.ResponseWriter, problem *problem) {
140 | logger := globalProbe.configuration.Logger
141 | logger.Error().Msgf("Blackfire (HTTP): %s: %s", problem.Title, problem.Detail)
142 | w.Header().Set("Content-Type", "application/problem+json")
143 | w.WriteHeader(problem.Status)
144 | data, _ := json.Marshal(problem)
145 | w.Write(data)
146 | }
147 |
148 | func writeJsonStatus(w http.ResponseWriter) {
149 | profiling := "false"
150 | if globalProbe.currentState == profilerStateEnabled {
151 | profiling = "true"
152 | }
153 | profiles := []string{}
154 | if globalProbe.agentClient != nil {
155 | for _, profile := range globalProbe.agentClient.LastProfiles() {
156 | profiles = append(profiles, fmt.Sprintf(`{
157 | "UUID": "%s",
158 | "url": "%s",
159 | "name": "%s",
160 | "status": "%s",
161 | "created_at": "%s"
162 | }`, profile.UUID, profile.URL, profile.Title, profile.Status.Name, profile.CreatedAt.Format(time.RFC3339)))
163 | }
164 | }
165 | w.Header().Set("Content-Type", "application/json")
166 | w.Write([]byte(fmt.Sprintf(`{
167 | "profiling": {
168 | "enabled": %s,
169 | "sample_rate": %d
170 | },
171 | "profiles": {
172 | "_embedded": [
173 | %s
174 | ]
175 | }
176 | }`, profiling, globalProbe.configuration.DefaultCPUSampleRateHz, strings.Join(profiles, ","))))
177 | }
178 |
--------------------------------------------------------------------------------
/log.go:
--------------------------------------------------------------------------------
1 | package blackfire
2 |
3 | import (
4 | "io"
5 | "log"
6 | "os"
7 | "strconv"
8 |
9 | "github.com/rs/zerolog"
10 | )
11 |
12 | func NewLogger(path string, level int) zerolog.Logger {
13 | return zerolog.New(logWriter(path)).Level(logLevel(level)).With().Timestamp().Logger()
14 | }
15 |
16 | func NewLoggerFromEnvVars() zerolog.Logger {
17 | level := 1
18 | if v := os.Getenv("BLACKFIRE_LOG_LEVEL"); v != "" {
19 | level, _ = strconv.Atoi(v)
20 | }
21 | path := ""
22 | if v := os.Getenv("BLACKFIRE_LOG_FILE"); v != "" {
23 | path = v
24 | }
25 | return zerolog.New(logWriter(path)).Level(logLevel(level)).With().Timestamp().Logger()
26 | }
27 |
28 | func logLevel(level int) zerolog.Level {
29 | if level < 1 {
30 | level = 1
31 | }
32 | if level > 4 {
33 | level = 4
34 | }
35 | var levels = map[int]zerolog.Level{
36 | 1: zerolog.ErrorLevel,
37 | 2: zerolog.WarnLevel,
38 | 3: zerolog.InfoLevel,
39 | 4: zerolog.DebugLevel,
40 | }
41 | return levels[level]
42 | }
43 |
44 | func logWriter(path string) io.Writer {
45 | if path == "" || path == "stderr" {
46 | return os.Stderr
47 | }
48 | if path == "stdout" {
49 | return os.Stdout
50 | }
51 | writer, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0664)
52 | if err != nil {
53 | log.Fatalf("could not open log file at %s: %v", path, err)
54 | }
55 | return writer
56 | }
57 |
--------------------------------------------------------------------------------
/pprof_reader/README.md:
--------------------------------------------------------------------------------
1 | PProf Reader
2 | ============
3 |
4 | Library to read profiles written in go's pprof format.
5 |
6 | PProf is sampled data encoded into protobufs, which is then gzipped.
7 | `internal/profile` is copied directly from golang's
8 | `src/runtime/pprof/internal/profile` directory.
9 |
10 | This library reads a pprof profile and converts it to an edge based graph
11 | similar to Blackfire.
12 |
13 | Usage:
14 |
15 | ```golang
16 | fr, err := os.Open(filename)
17 | if err != nil {
18 | return nil, err
19 | }
20 | defer fr.Close()
21 |
22 | profile, err := pprof_reader.ReadFromPProf(fr)
23 | if err != nil {
24 | return nil, err
25 | }
26 |
27 | err = pprof_reader.WriteBFFormat(profile, os.Stdout)
28 | ...
29 | ```
30 |
--------------------------------------------------------------------------------
/pprof_reader/fixtures/wt.bf:
--------------------------------------------------------------------------------
1 | file-format: BlackfireProbe
2 | Cost-Dimensions: wt cpu mu pmu
3 | graph-root-id: net/http.(*conn).serve
4 |
5 | net/http.checkConnErrorWriter.Write==>net.(*conn).Write//1 70000 0 0 0
6 | database/sql.(*DB).Query==>database/sql.(*DB).QueryContext//1 240000 0 0 0
7 | github.com/lib/pq.dial==>github.com/lib/pq.defaultDialer.DialContext//1 150000 0 0 0
8 | database/sql.(*DB).conn==>database/sql.(*driverConn).Close//1 110000 0 0 0
9 | syscall.Close==>syscall.Syscall//6 60000 0 0 0
10 | github.com/lib/pq.(*conn).auth==>github.com/lib/pq.(*conn).send//1 10000 0 0 0
11 | github.com/lib/pq.(*conn).auth==>github.com/lib/pq.(*conn).recv//1 10000 0 0 0
12 | database/sql.dsnConnector.Connect==>github.com/lib/pq.(*Driver).Open//1 270000 0 0 0
13 | net.socket==>net.(*netFD).dial//1 90000 0 0 0
14 | runtime.makeslice==>runtime.mallocgc//1 10000 0 0 0
15 | syscall.Syscall==>runtime.entersyscall//1 40000 0 0 0
16 | runtime.newstack==>runtime.copystack//1 20000 0 0 0
17 | database/sql.(*DB).removeDepLocked==>runtime.mapdelete//1 10000 0 0 0
18 | runtime.assertI2I2==>runtime.getitab//1 10000 0 0 0
19 | net/http.(*Request).wantsClose==>net/http.hasToken//1 10000 0 0 0
20 | net.(*sysDialer).doDialTCP==>net.internetSocket//1 130000 0 0 0
21 | main.(*Store).StoreWidget==>database/sql.(*DB).Exec//1 230000 0 0 0
22 | database/sql.(*driverConn).finalClose==>database/sql.withLock//1 100000 0 0 0
23 | database/sql.(*DB).queryDC==>database/sql.withLock//1 40000 0 0 0
24 | runtime.gentraceback==>runtime.funcspdelta//1 10000 0 0 0
25 | syscall.Getpeername==>syscall.getpeername//1 10000 0 0 0
26 | net/http.(*conn).close==>net.(*conn).Close//1 30000 0 0 0
27 | runtime.systemstack==>runtime.entersyscall_sysmon//1 40000 0 0 0
28 | runtime.newproc==>runtime.systemstack//1 10000 0 0 0
29 | github.com/lib/pq.(*conn).startup==>github.com/lib/pq.(*conn).processParameterStatus//1 10000 0 0 0
30 | github.com/gorilla/mux.(*Router).Match==>github.com/gorilla/mux.(*Route).Match//1 10000 0 0 0
31 | net.(*conn).Write==>net.(*netFD).Write//1 180000 0 0 0
32 | syscall.Socket==>syscall.socket//1 40000 0 0 0
33 | syscall.Connect==>syscall.connect//1 60000 0 0 0
34 | github.com/lib/pq.parseEnviron==>strings.SplitN//1 20000 0 0 0
35 | runtime.newproc1==>runtime.gfget//1 10000 0 0 0
36 | database/sql.(*DB).Exec==>database/sql.(*DB).ExecContext//1 230000 0 0 0
37 | database/sql.(*DB).execDC.func2==>database/sql.ctxDriverExec//1 30000 0 0 0
38 | database/sql.ctxDriverExec==>github.com/lib/pq.(*conn).ExecContext//1 30000 0 0 0
39 | github.com/lib/pq.(*conn).recv1Buf==>github.com/lib/pq.(*conn).recvMessage//1 20000 0 0 0
40 | github.com/lib/pq.DialOpen==>github.com/lib/pq.NewConnector//1 30000 0 0 0
41 | strings.SplitN==>strings.genSplit//1 20000 0 0 0
42 | github.com/lib/pq.(*conn).processParameterStatus==>time.LoadLocation//1 10000 0 0 0
43 | main.(*HttpServer).httpHandlerGetWidgets==>encoding/json.Marshal//1 10000 0 0 0
44 | bufio.(*Writer).Flush==>net/http.checkConnErrorWriter.Write//1 70000 0 0 0
45 | database/sql.(*DB).exec==>database/sql.(*DB).execDC//1 50000 0 0 0
46 | github.com/lib/pq.(*conn).ExecContext==>github.com/lib/pq.(*conn).Exec//1 30000 0 0 0
47 | runtime.entersyscall==>runtime.reentersyscall//1 40000 0 0 0
48 | github.com/lib/pq.(*conn).sendSimpleMessage==>net.(*conn).Write//1 60000 0 0 0
49 | net/http.(*connReader).abortPendingRead==>sync.(*Cond).Wait//1 10000 0 0 0
50 | github.com/lib/pq.(*conn).auth==>github.com/lib/pq.md5s//1 10000 0 0 0
51 | net.setKeepAlivePeriod==>internal/poll.(*FD).SetsockoptInt//1 20000 0 0 0
52 | database/sql.(*driverConn).Close==>database/sql.(*DB).removeDepLocked//1 10000 0 0 0
53 | encoding/json.sliceEncoder.encode==>encoding/json.arrayEncoder.encode//1 10000 0 0 0
54 | net.sockaddrToTCP==>runtime.newobject//1 10000 0 0 0
55 | time.readFile==>runtime.newstack//1 10000 0 0 0
56 | sync.(*Cond).Wait==>sync.runtime_notifyListWait//1 10000 0 0 0
57 | net.(*sysDialer).dialSerial==>net.(*sysDialer).dialSingle//1 130000 0 0 0
58 | net.socket==>net.sysSocket//1 40000 0 0 0
59 | github.com/lib/pq.(*conn).Close==>github.com/lib/pq.(*conn).Close.func1//1 40000 0 0 0
60 | github.com/lib/pq.(*stmt).exec==>github.com/lib/pq.(*conn).readBindResponse//1 20000 0 0 0
61 | bufio.(*Reader).ReadLine==>bufio.(*Reader).ReadSlice//1 20000 0 0 0
62 | sync.runtime_notifyListWait==>runtime.acquireSudog//1 10000 0 0 0
63 | internal/poll.runtime_pollOpen==>runtime.netpollopen//1 10000 0 0 0
64 | net/http.readRequest==>net/textproto.(*Reader).ReadMIMEHeader//1 20000 0 0 0
65 | syscall.setsockopt==>syscall.Syscall6//2 20000 0 0 0
66 | github.com/lib/pq.(*conn).simpleQuery==>github.com/lib/pq.(*conn).recv1//1 10000 0 0 0
67 | runtime.growslice==>runtime.mallocgc//1 10000 0 0 0
68 | internal/poll.(*FD).Write==>syscall.Write//1 170000 0 0 0
69 | net/http.(*conn).serve==>net/http.(*conn).serve.func1//1 30000 0 0 0
70 | internal/poll.(*FD).Read==>syscall.Read//1 80000 0 0 0
71 | net.(*netFD).dial==>net.sockaddrToTCP//1 10000 0 0 0
72 | github.com/lib/pq.(*conn).sendStartupPacket==>net.(*conn).Write//1 10000 0 0 0
73 | database/sql.(*DB).execDC==>runtime.assertI2I2//1 10000 0 0 0
74 | runtime.tracebackdefers==>runtime.adjustframe//1 10000 0 0 0
75 | net.(*conn).Close==>net.(*netFD).Close//1 70000 0 0 0
76 | net/textproto.(*Reader).ReadMIMEHeader==>runtime.makeslice//1 10000 0 0 0
77 | database/sql.(*DB).execDC==>database/sql.withLock//1 40000 0 0 0
78 | strings.genSplit==>strings.Index//1 20000 0 0 0
79 | runtime.mapaccess1_faststr==>runtime.add//1 10000 0 0 0
80 | internal/poll.(*FD).Init==>internal/poll.(*pollDesc).init//1 10000 0 0 0
81 | database/sql.withLock==>database/sql.(*DB).execDC.func2//1 30000 0 0 0
82 | runtime.futexwakeup==>runtime.futex//4 40000 0 0 0
83 | strings.IndexByte==>indexbytebody//1 10000 0 0 0
84 | time.loadTzinfoFromDirOrZip==>time.readFile//1 10000 0 0 0
85 | runtime.newobject==>runtime.mallocgc//1 30000 0 0 0
86 | main.(*Store).GetAllWidgets==>runtime.growslice//1 10000 0 0 0
87 | github.com/lib/pq.(*conn).startup==>github.com/lib/pq.(*conn).sendStartupPacket//1 10000 0 0 0
88 | net.internetSocket==>net.socket//1 130000 0 0 0
89 | database/sql.(*driverConn).finalClose.func2==>github.com/lib/pq.(*conn).Close//1 100000 0 0 0
90 | bufio.(*Reader).Read==>net.(*conn).Read//1 60000 0 0 0
91 | net.(*netFD).connect==>syscall.Connect//1 60000 0 0 0
92 | time.LoadLocation==>time.loadLocation//1 10000 0 0 0
93 | context.WithValue==>runtime.newobject//1 10000 0 0 0
94 | strings.IndexByte==>internal/bytealg.IndexByteString//1 10000 0 0 0
95 | github.com/lib/pq.parseEnviron==>runtime.makemap_small//1 10000 0 0 0
96 | net/http.(*conn).readRequest==>net/http.(*Request).wantsClose//1 10000 0 0 0
97 | net/http.(*conn).serve//1 680000 0 0 0
98 | database/sql.(*DB).queryDC.func1==>database/sql.ctxDriverQuery//1 40000 0 0 0
99 | strings.Index==>strings.IndexByte//1 20000 0 0 0
100 | net/http.(*conn).serve==>net/http.(*connReader).startBackgroundRead//1 10000 0 0 0
101 | github.com/gorilla/mux.routeRegexpGroup.setMatch==>regexp.(*Regexp).FindStringSubmatchIndex//1 10000 0 0 0
102 | internal/poll.(*FD).Write==>runtime.newstack//1 10000 0 0 0
103 | net/http.(*conn).serve==>net/http.(*response).finishRequest//1 80000 0 0 0
104 | net/http.serverHandler.ServeHTTP==>github.com/gorilla/mux.(*Router).ServeHTTP//1 500000 0 0 0
105 | database/sql.(*DB).conn==>database/sql.dsnConnector.Connect//1 270000 0 0 0
106 | github.com/lib/pq.(*conn).readBindResponse==>github.com/lib/pq.(*conn).recv1//1 10000 0 0 0
107 | github.com/lib/pq.(*conn).prepareTo==>github.com/lib/pq.(*conn).send//1 10000 0 0 0
108 | main.(*Store).GetAllWidgets==>database/sql.(*DB).Query//1 240000 0 0 0
109 | net/http.HandlerFunc.ServeHTTP==>main.(*HttpServer).httpHandlerCreateWidget//1 230000 0 0 0
110 | encoding/json.arrayEncoder.encode==>encoding/json.ptrEncoder.encode//1 10000 0 0 0
111 | net.(*sysDialer).dialTCP==>net.(*sysDialer).doDialTCP//1 130000 0 0 0
112 | github.com/lib/pq.(*Connector).open==>github.com/lib/pq.(*conn).startup//1 90000 0 0 0
113 | syscall.getpeername==>syscall.RawSyscall//1 10000 0 0 0
114 | time.loadTzinfo==>time.loadTzinfoFromDirOrZip//1 10000 0 0 0
115 | github.com/lib/pq.(*conn).startup==>github.com/lib/pq.(*conn).auth//1 30000 0 0 0
116 | database/sql.withLock==>database/sql.(*driverConn).finalClose.func2//1 100000 0 0 0
117 | net.(*netFD).Close==>internal/poll.(*FD).Close//1 70000 0 0 0
118 | github.com/lib/pq.(*conn).simpleQuery==>github.com/lib/pq.(*conn).send//1 30000 0 0 0
119 | bufio.(*Reader).ReadSlice==>bufio.(*Reader).fill//1 20000 0 0 0
120 | database/sql.(*DB).exec==>database/sql.(*DB).conn//1 180000 0 0 0
121 | database/sql.ctxDriverQuery==>github.com/lib/pq.(*conn).QueryContext//1 40000 0 0 0
122 | internal/poll.(*FD).SetsockoptInt==>syscall.SetsockoptInt//1 20000 0 0 0
123 | runtime.systemstack==>runtime.newproc.func1//1 10000 0 0 0
124 | net/http.(*connReader).startBackgroundRead==>runtime.newproc//1 10000 0 0 0
125 | time.loadLocation==>time.loadTzinfo//1 10000 0 0 0
126 | runtime.adjustframe==>runtime.getStackMap//1 10000 0 0 0
127 | github.com/lib/pq.(*conn).Close.func1==>net.(*conn).Close//1 40000 0 0 0
128 | runtime.mallocgc==>runtime.heapBitsSetType//4 40000 0 0 0
129 | database/sql.(*DB).query==>database/sql.(*DB).queryDC//1 40000 0 0 0
130 | github.com/lib/pq.(*conn).query==>github.com/lib/pq.(*conn).simpleQuery//1 40000 0 0 0
131 | io.ReadAtLeast==>bufio.(*Reader).Read//1 60000 0 0 0
132 | syscall.Write==>syscall.write//1 170000 0 0 0
133 | net.(*netFD).Read==>internal/poll.(*FD).Read//1 80000 0 0 0
134 | net/textproto.canonicalMIMEHeaderKey==>runtime.mapaccess1_faststr//1 10000 0 0 0
135 | syscall.read==>syscall.Syscall//4 80000 0 0 0
136 | syscall.SetsockoptInt==>syscall.setsockopt//1 20000 0 0 0
137 | regexp.(*Regexp).FindStringSubmatchIndex==>regexp.(*Regexp).pad//1 10000 0 0 0
138 | runtime.adjustdefers==>runtime.tracebackdefers//1 10000 0 0 0
139 | net/http.(*response).finishRequest==>net/http.(*connReader).abortPendingRead//1 10000 0 0 0
140 | runtime.notewakeup==>runtime.futexwakeup//1 40000 0 0 0
141 | net/textproto.(*Reader).ReadLine==>net/textproto.(*Reader).readLineSlice//1 20000 0 0 0
142 | net/textproto.(*Reader).readLineSlice==>bufio.(*Reader).ReadLine//1 20000 0 0 0
143 | net/http.HandlerFunc.ServeHTTP==>main.(*HttpServer).httpHandlerGetWidgets//1 260000 0 0 0
144 | github.com/lib/pq.DialOpen==>github.com/lib/pq.(*Connector).open//1 240000 0 0 0
145 | github.com/lib/pq.defaultDialer.DialContext==>net.(*Dialer).DialContext//1 150000 0 0 0
146 | runtime.netpollopen==>runtime.epollctl//1 10000 0 0 0
147 | io.ReadFull==>io.ReadAtLeast//1 60000 0 0 0
148 | net/http.(*connReader).Read==>net.(*conn).Read//1 20000 0 0 0
149 | github.com/lib/pq.(*conn).recv1==>github.com/lib/pq.(*conn).recv1Buf//1 20000 0 0 0
150 | github.com/gorilla/mux.(*Router).ServeHTTP==>github.com/gorilla/mux.(*Router).Match//1 10000 0 0 0
151 | runtime.makemap_small==>runtime.newobject//1 10000 0 0 0
152 | net.(*netFD).Write==>internal/poll.(*FD).Write//1 180000 0 0 0
153 | database/sql.(*driverConn).Close==>database/sql.(*driverConn).finalClose//1 100000 0 0 0
154 | net.(*netFD).dial==>net.(*netFD).connect//1 80000 0 0 0
155 | github.com/lib/pq.(*conn).send==>net.(*conn).Write//1 50000 0 0 0
156 | github.com/lib/pq.(*conn).Exec==>github.com/lib/pq.(*stmt).Exec//1 20000 0 0 0
157 | github.com/lib/pq.(*conn).Close==>github.com/lib/pq.(*conn).sendSimpleMessage//1 60000 0 0 0
158 | syscall.connect==>syscall.Syscall//6 60000 0 0 0
159 | github.com/lib/pq.(*conn).recv==>github.com/lib/pq.(*conn).recvMessage//1 50000 0 0 0
160 | syscall.write==>syscall.Syscall//17 170000 0 0 0
161 | net/http.(*conn).serve==>net/http.serverHandler.ServeHTTP//1 500000 0 0 0
162 | github.com/lib/pq.md5s==>runtime.convTslice//1 10000 0 0 0
163 | net/http.(*conn).readRequest==>net/http.readRequest//1 40000 0 0 0
164 | database/sql.withLock==>database/sql.(*DB).queryDC.func1//1 40000 0 0 0
165 | runtime.pcvalue==>runtime.step//1 10000 0 0 0
166 | net/textproto.(*Reader).ReadMIMEHeader==>net/textproto.canonicalMIMEHeaderKey//1 10000 0 0 0
167 | database/sql.(*DB).query==>database/sql.(*DB).conn//1 200000 0 0 0
168 | github.com/lib/pq.Open==>github.com/lib/pq.DialOpen//1 270000 0 0 0
169 | net.sysSocket==>syscall.Socket//1 40000 0 0 0
170 | internal/poll.(*FD).destroy==>syscall.Close//1 60000 0 0 0
171 | net/http.(*conn).serve.func1==>net/http.(*conn).close//1 30000 0 0 0
172 | github.com/lib/pq.(*conn).startup==>github.com/lib/pq.(*conn).recv//1 40000 0 0 0
173 | database/sql.(*DB).ExecContext==>database/sql.(*DB).exec//1 230000 0 0 0
174 | net.(*netFD).connect==>internal/poll.(*FD).Init//1 10000 0 0 0
175 | runtime.entersyscall_sysmon==>runtime.notewakeup//1 40000 0 0 0
176 | net/http.readRequest==>net/textproto.(*Reader).ReadLine//1 20000 0 0 0
177 | github.com/lib/pq.NewConnector==>github.com/lib/pq.parseEnviron//1 30000 0 0 0
178 | net.(*conn).Read==>net.(*netFD).Read//1 80000 0 0 0
179 | net.(*Dialer).DialContext==>net.setKeepAlivePeriod//1 20000 0 0 0
180 | encoding/json.ptrEncoder.encode==>encoding/json.structEncoder.encode//1 10000 0 0 0
181 | github.com/lib/pq.(*Driver).Open==>github.com/lib/pq.Open//1 270000 0 0 0
182 | github.com/lib/pq.(*Connector).open==>github.com/lib/pq.dial//1 150000 0 0 0
183 | internal/poll.(*FD).Close==>internal/poll.(*FD).decref//1 70000 0 0 0
184 | net/http.(*conn).serve==>net/http.(*conn).readRequest//1 50000 0 0 0
185 | github.com/lib/pq.(*stmt).Exec==>github.com/lib/pq.(*stmt).exec//1 20000 0 0 0
186 | net.(*netFD).connect==>syscall.Getpeername//1 10000 0 0 0
187 | net/http.(*conn).serve==>context.WithValue//1 10000 0 0 0
188 | runtime.reentersyscall==>runtime.systemstack//1 40000 0 0 0
189 | runtime.newproc.func1==>runtime.newproc1//1 10000 0 0 0
190 | encoding/json.Marshal==>encoding/json.(*encodeState).marshal//1 10000 0 0 0
191 | github.com/gorilla/mux.(*Router).ServeHTTP==>net/http.HandlerFunc.ServeHTTP//1 490000 0 0 0
192 | database/sql.(*DB).QueryContext==>database/sql.(*DB).query//1 240000 0 0 0
193 | net.(*sysDialer).dialSingle==>net.(*sysDialer).dialTCP//1 130000 0 0 0
194 | internal/poll.(*pollDesc).init==>internal/poll.runtime_pollOpen//1 10000 0 0 0
195 | syscall.Read==>syscall.read//1 80000 0 0 0
196 | encoding/json.(*encodeState).marshal==>encoding/json.(*encodeState).reflectValue//1 10000 0 0 0
197 | internal/poll.(*FD).decref==>internal/poll.(*FD).destroy//1 70000 0 0 0
198 | github.com/lib/pq.(*conn).QueryContext==>github.com/lib/pq.(*conn).query//1 40000 0 0 0
199 | github.com/lib/pq.(*conn).recvMessage==>io.ReadFull//1 60000 0 0 0
200 | net/http.(*response).finishRequest==>bufio.(*Writer).Flush//1 70000 0 0 0
201 | main.(*HttpServer).httpHandlerGetWidgets==>main.(*Store).GetAllWidgets//1 250000 0 0 0
202 | net.(*Dialer).DialContext==>net.(*sysDialer).dialSerial//1 130000 0 0 0
203 | syscall.socket==>syscall.RawSyscall//4 40000 0 0 0
204 | main.(*HttpServer).httpHandlerCreateWidget==>main.(*Store).StoreWidget//1 230000 0 0 0
205 | bufio.(*Reader).fill==>net/http.(*connReader).Read//1 20000 0 0 0
206 | encoding/json.(*encodeState).reflectValue==>encoding/json.sliceEncoder.encode//1 10000 0 0 0
207 | runtime.copystack==>runtime.adjustdefers//1 10000 0 0 0
208 | github.com/lib/pq.(*conn).Exec==>github.com/lib/pq.(*conn).prepareTo//1 10000 0 0 0
209 | runtime.copystack==>runtime.gentraceback//1 10000 0 0 0
210 | runtime.funcspdelta==>runtime.pcvalue//1 10000 0 0 0
211 | github.com/gorilla/mux.(*Route).Match==>github.com/gorilla/mux.routeRegexpGroup.setMatch//1 10000 0 0 0
212 |
--------------------------------------------------------------------------------
/pprof_reader/fixtures/wt.pprof.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blackfireio/go-blackfire/4a623d3140f4106dd77fadcb8cc14850426b2662/pprof_reader/fixtures/wt.pprof.gz
--------------------------------------------------------------------------------
/pprof_reader/internal/profile/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (c) 2009 The Go Authors. All rights reserved.
2 |
3 | Redistribution and use in source and binary forms, with or without
4 | modification, are permitted provided that the following conditions are
5 | met:
6 |
7 | * Redistributions of source code must retain the above copyright
8 | notice, this list of conditions and the following disclaimer.
9 | * Redistributions in binary form must reproduce the above
10 | copyright notice, this list of conditions and the following disclaimer
11 | in the documentation and/or other materials provided with the
12 | distribution.
13 | * Neither the name of Google Inc. nor the names of its
14 | contributors may be used to endorse or promote products derived from
15 | this software without specific prior written permission.
16 |
17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 |
--------------------------------------------------------------------------------
/pprof_reader/internal/profile/encode.go:
--------------------------------------------------------------------------------
1 | // Copyright 2014 The Go Authors. All rights reserved.
2 | // Use of this source code is governed by a BSD-style
3 | // license that can be found in the LICENSE file.
4 |
5 | package profile
6 |
7 | import (
8 | "errors"
9 | "fmt"
10 | "sort"
11 | )
12 |
13 | func (p *Profile) decoder() []decoder {
14 | return profileDecoder
15 | }
16 |
17 | // preEncode populates the unexported fields to be used by encode
18 | // (with suffix X) from the corresponding exported fields. The
19 | // exported fields are cleared up to facilitate testing.
20 | func (p *Profile) preEncode() {
21 | strings := make(map[string]int)
22 | addString(strings, "")
23 |
24 | for _, st := range p.SampleType {
25 | st.typeX = addString(strings, st.Type)
26 | st.unitX = addString(strings, st.Unit)
27 | }
28 |
29 | for _, s := range p.Sample {
30 | s.labelX = nil
31 | var keys []string
32 | for k := range s.Label {
33 | keys = append(keys, k)
34 | }
35 | sort.Strings(keys)
36 | for _, k := range keys {
37 | vs := s.Label[k]
38 | for _, v := range vs {
39 | s.labelX = append(s.labelX,
40 | Label{
41 | keyX: addString(strings, k),
42 | strX: addString(strings, v),
43 | },
44 | )
45 | }
46 | }
47 | var numKeys []string
48 | for k := range s.NumLabel {
49 | numKeys = append(numKeys, k)
50 | }
51 | sort.Strings(numKeys)
52 | for _, k := range numKeys {
53 | vs := s.NumLabel[k]
54 | for _, v := range vs {
55 | s.labelX = append(s.labelX,
56 | Label{
57 | keyX: addString(strings, k),
58 | numX: v,
59 | },
60 | )
61 | }
62 | }
63 | s.locationIDX = nil
64 | for _, l := range s.Location {
65 | s.locationIDX = append(s.locationIDX, l.ID)
66 | }
67 | }
68 |
69 | for _, m := range p.Mapping {
70 | m.fileX = addString(strings, m.File)
71 | m.buildIDX = addString(strings, m.BuildID)
72 | }
73 |
74 | for _, l := range p.Location {
75 | for i, ln := range l.Line {
76 | if ln.Function != nil {
77 | l.Line[i].functionIDX = ln.Function.ID
78 | } else {
79 | l.Line[i].functionIDX = 0
80 | }
81 | }
82 | if l.Mapping != nil {
83 | l.mappingIDX = l.Mapping.ID
84 | } else {
85 | l.mappingIDX = 0
86 | }
87 | }
88 | for _, f := range p.Function {
89 | f.nameX = addString(strings, f.Name)
90 | f.systemNameX = addString(strings, f.SystemName)
91 | f.filenameX = addString(strings, f.Filename)
92 | }
93 |
94 | p.dropFramesX = addString(strings, p.DropFrames)
95 | p.keepFramesX = addString(strings, p.KeepFrames)
96 |
97 | if pt := p.PeriodType; pt != nil {
98 | pt.typeX = addString(strings, pt.Type)
99 | pt.unitX = addString(strings, pt.Unit)
100 | }
101 |
102 | p.stringTable = make([]string, len(strings))
103 | for s, i := range strings {
104 | p.stringTable[i] = s
105 | }
106 | }
107 |
108 | func (p *Profile) encode(b *buffer) {
109 | for _, x := range p.SampleType {
110 | encodeMessage(b, 1, x)
111 | }
112 | for _, x := range p.Sample {
113 | encodeMessage(b, 2, x)
114 | }
115 | for _, x := range p.Mapping {
116 | encodeMessage(b, 3, x)
117 | }
118 | for _, x := range p.Location {
119 | encodeMessage(b, 4, x)
120 | }
121 | for _, x := range p.Function {
122 | encodeMessage(b, 5, x)
123 | }
124 | encodeStrings(b, 6, p.stringTable)
125 | encodeInt64Opt(b, 7, p.dropFramesX)
126 | encodeInt64Opt(b, 8, p.keepFramesX)
127 | encodeInt64Opt(b, 9, p.TimeNanos)
128 | encodeInt64Opt(b, 10, p.DurationNanos)
129 | if pt := p.PeriodType; pt != nil && (pt.typeX != 0 || pt.unitX != 0) {
130 | encodeMessage(b, 11, p.PeriodType)
131 | }
132 | encodeInt64Opt(b, 12, p.Period)
133 | }
134 |
135 | var profileDecoder = []decoder{
136 | nil, // 0
137 | // repeated ValueType sample_type = 1
138 | func(b *buffer, m message) error {
139 | x := new(ValueType)
140 | pp := m.(*Profile)
141 | pp.SampleType = append(pp.SampleType, x)
142 | return decodeMessage(b, x)
143 | },
144 | // repeated Sample sample = 2
145 | func(b *buffer, m message) error {
146 | x := new(Sample)
147 | pp := m.(*Profile)
148 | pp.Sample = append(pp.Sample, x)
149 | return decodeMessage(b, x)
150 | },
151 | // repeated Mapping mapping = 3
152 | func(b *buffer, m message) error {
153 | x := new(Mapping)
154 | pp := m.(*Profile)
155 | pp.Mapping = append(pp.Mapping, x)
156 | return decodeMessage(b, x)
157 | },
158 | // repeated Location location = 4
159 | func(b *buffer, m message) error {
160 | x := new(Location)
161 | pp := m.(*Profile)
162 | pp.Location = append(pp.Location, x)
163 | return decodeMessage(b, x)
164 | },
165 | // repeated Function function = 5
166 | func(b *buffer, m message) error {
167 | x := new(Function)
168 | pp := m.(*Profile)
169 | pp.Function = append(pp.Function, x)
170 | return decodeMessage(b, x)
171 | },
172 | // repeated string string_table = 6
173 | func(b *buffer, m message) error {
174 | err := decodeStrings(b, &m.(*Profile).stringTable)
175 | if err != nil {
176 | return err
177 | }
178 | if *&m.(*Profile).stringTable[0] != "" {
179 | return errors.New("string_table[0] must be ''")
180 | }
181 | return nil
182 | },
183 | // repeated int64 drop_frames = 7
184 | func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).dropFramesX) },
185 | // repeated int64 keep_frames = 8
186 | func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).keepFramesX) },
187 | // repeated int64 time_nanos = 9
188 | func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).TimeNanos) },
189 | // repeated int64 duration_nanos = 10
190 | func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).DurationNanos) },
191 | // optional string period_type = 11
192 | func(b *buffer, m message) error {
193 | x := new(ValueType)
194 | pp := m.(*Profile)
195 | pp.PeriodType = x
196 | return decodeMessage(b, x)
197 | },
198 | // repeated int64 period = 12
199 | func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).Period) },
200 | // repeated int64 comment = 13
201 | func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Profile).commentX) },
202 | // int64 defaultSampleType = 14
203 | func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).defaultSampleTypeX) },
204 | }
205 |
206 | // postDecode takes the unexported fields populated by decode (with
207 | // suffix X) and populates the corresponding exported fields.
208 | // The unexported fields are cleared up to facilitate testing.
209 | func (p *Profile) postDecode() error {
210 | var err error
211 |
212 | mappings := make(map[uint64]*Mapping)
213 | for _, m := range p.Mapping {
214 | m.File, err = getString(p.stringTable, &m.fileX, err)
215 | m.BuildID, err = getString(p.stringTable, &m.buildIDX, err)
216 | mappings[m.ID] = m
217 | }
218 |
219 | functions := make(map[uint64]*Function)
220 | for _, f := range p.Function {
221 | f.Name, err = getString(p.stringTable, &f.nameX, err)
222 | f.SystemName, err = getString(p.stringTable, &f.systemNameX, err)
223 | f.Filename, err = getString(p.stringTable, &f.filenameX, err)
224 | functions[f.ID] = f
225 | }
226 |
227 | locations := make(map[uint64]*Location)
228 | for _, l := range p.Location {
229 | l.Mapping = mappings[l.mappingIDX]
230 | l.mappingIDX = 0
231 | for i, ln := range l.Line {
232 | if id := ln.functionIDX; id != 0 {
233 | l.Line[i].Function = functions[id]
234 | if l.Line[i].Function == nil {
235 | return fmt.Errorf("Function ID %d not found", id)
236 | }
237 | l.Line[i].functionIDX = 0
238 | }
239 | }
240 | locations[l.ID] = l
241 | }
242 |
243 | for _, st := range p.SampleType {
244 | st.Type, err = getString(p.stringTable, &st.typeX, err)
245 | st.Unit, err = getString(p.stringTable, &st.unitX, err)
246 | }
247 |
248 | for _, s := range p.Sample {
249 | labels := make(map[string][]string)
250 | numLabels := make(map[string][]int64)
251 | for _, l := range s.labelX {
252 | var key, value string
253 | key, err = getString(p.stringTable, &l.keyX, err)
254 | if l.strX != 0 {
255 | value, err = getString(p.stringTable, &l.strX, err)
256 | labels[key] = append(labels[key], value)
257 | } else {
258 | numLabels[key] = append(numLabels[key], l.numX)
259 | }
260 | }
261 | if len(labels) > 0 {
262 | s.Label = labels
263 | }
264 | if len(numLabels) > 0 {
265 | s.NumLabel = numLabels
266 | }
267 | s.Location = nil
268 | for _, lid := range s.locationIDX {
269 | s.Location = append(s.Location, locations[lid])
270 | }
271 | s.locationIDX = nil
272 | }
273 |
274 | p.DropFrames, err = getString(p.stringTable, &p.dropFramesX, err)
275 | p.KeepFrames, err = getString(p.stringTable, &p.keepFramesX, err)
276 |
277 | if pt := p.PeriodType; pt == nil {
278 | p.PeriodType = &ValueType{}
279 | }
280 |
281 | if pt := p.PeriodType; pt != nil {
282 | pt.Type, err = getString(p.stringTable, &pt.typeX, err)
283 | pt.Unit, err = getString(p.stringTable, &pt.unitX, err)
284 | }
285 | for _, i := range p.commentX {
286 | var c string
287 | c, err = getString(p.stringTable, &i, err)
288 | p.Comments = append(p.Comments, c)
289 | }
290 |
291 | p.commentX = nil
292 | p.DefaultSampleType, err = getString(p.stringTable, &p.defaultSampleTypeX, err)
293 | p.stringTable = nil
294 | return nil
295 | }
296 |
297 | func (p *ValueType) decoder() []decoder {
298 | return valueTypeDecoder
299 | }
300 |
301 | func (p *ValueType) encode(b *buffer) {
302 | encodeInt64Opt(b, 1, p.typeX)
303 | encodeInt64Opt(b, 2, p.unitX)
304 | }
305 |
306 | var valueTypeDecoder = []decoder{
307 | nil, // 0
308 | // optional int64 type = 1
309 | func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).typeX) },
310 | // optional int64 unit = 2
311 | func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).unitX) },
312 | }
313 |
314 | func (p *Sample) decoder() []decoder {
315 | return sampleDecoder
316 | }
317 |
318 | func (p *Sample) encode(b *buffer) {
319 | encodeUint64s(b, 1, p.locationIDX)
320 | for _, x := range p.Value {
321 | encodeInt64(b, 2, x)
322 | }
323 | for _, x := range p.labelX {
324 | encodeMessage(b, 3, x)
325 | }
326 | }
327 |
328 | var sampleDecoder = []decoder{
329 | nil, // 0
330 | // repeated uint64 location = 1
331 | func(b *buffer, m message) error { return decodeUint64s(b, &m.(*Sample).locationIDX) },
332 | // repeated int64 value = 2
333 | func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Sample).Value) },
334 | // repeated Label label = 3
335 | func(b *buffer, m message) error {
336 | s := m.(*Sample)
337 | n := len(s.labelX)
338 | s.labelX = append(s.labelX, Label{})
339 | return decodeMessage(b, &s.labelX[n])
340 | },
341 | }
342 |
343 | func (p Label) decoder() []decoder {
344 | return labelDecoder
345 | }
346 |
347 | func (p Label) encode(b *buffer) {
348 | encodeInt64Opt(b, 1, p.keyX)
349 | encodeInt64Opt(b, 2, p.strX)
350 | encodeInt64Opt(b, 3, p.numX)
351 | }
352 |
353 | var labelDecoder = []decoder{
354 | nil, // 0
355 | // optional int64 key = 1
356 | func(b *buffer, m message) error { return decodeInt64(b, &m.(*Label).keyX) },
357 | // optional int64 str = 2
358 | func(b *buffer, m message) error { return decodeInt64(b, &m.(*Label).strX) },
359 | // optional int64 num = 3
360 | func(b *buffer, m message) error { return decodeInt64(b, &m.(*Label).numX) },
361 | }
362 |
363 | func (p *Mapping) decoder() []decoder {
364 | return mappingDecoder
365 | }
366 |
367 | func (p *Mapping) encode(b *buffer) {
368 | encodeUint64Opt(b, 1, p.ID)
369 | encodeUint64Opt(b, 2, p.Start)
370 | encodeUint64Opt(b, 3, p.Limit)
371 | encodeUint64Opt(b, 4, p.Offset)
372 | encodeInt64Opt(b, 5, p.fileX)
373 | encodeInt64Opt(b, 6, p.buildIDX)
374 | encodeBoolOpt(b, 7, p.HasFunctions)
375 | encodeBoolOpt(b, 8, p.HasFilenames)
376 | encodeBoolOpt(b, 9, p.HasLineNumbers)
377 | encodeBoolOpt(b, 10, p.HasInlineFrames)
378 | }
379 |
380 | var mappingDecoder = []decoder{
381 | nil, // 0
382 | func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).ID) }, // optional uint64 id = 1
383 | func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Start) }, // optional uint64 memory_offset = 2
384 | func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Limit) }, // optional uint64 memory_limit = 3
385 | func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Offset) }, // optional uint64 file_offset = 4
386 | func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).fileX) }, // optional int64 filename = 5
387 | func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).buildIDX) }, // optional int64 build_id = 6
388 | func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFunctions) }, // optional bool has_functions = 7
389 | func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFilenames) }, // optional bool has_filenames = 8
390 | func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasLineNumbers) }, // optional bool has_line_numbers = 9
391 | func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasInlineFrames) }, // optional bool has_inline_frames = 10
392 | }
393 |
394 | func (p *Location) decoder() []decoder {
395 | return locationDecoder
396 | }
397 |
398 | func (p *Location) encode(b *buffer) {
399 | encodeUint64Opt(b, 1, p.ID)
400 | encodeUint64Opt(b, 2, p.mappingIDX)
401 | encodeUint64Opt(b, 3, p.Address)
402 | for i := range p.Line {
403 | encodeMessage(b, 4, &p.Line[i])
404 | }
405 | }
406 |
407 | var locationDecoder = []decoder{
408 | nil, // 0
409 | func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).ID) }, // optional uint64 id = 1;
410 | func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).mappingIDX) }, // optional uint64 mapping_id = 2;
411 | func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).Address) }, // optional uint64 address = 3;
412 | func(b *buffer, m message) error { // repeated Line line = 4
413 | pp := m.(*Location)
414 | n := len(pp.Line)
415 | pp.Line = append(pp.Line, Line{})
416 | return decodeMessage(b, &pp.Line[n])
417 | },
418 | }
419 |
420 | func (p *Line) decoder() []decoder {
421 | return lineDecoder
422 | }
423 |
424 | func (p *Line) encode(b *buffer) {
425 | encodeUint64Opt(b, 1, p.functionIDX)
426 | encodeInt64Opt(b, 2, p.Line)
427 | }
428 |
429 | var lineDecoder = []decoder{
430 | nil, // 0
431 | // optional uint64 function_id = 1
432 | func(b *buffer, m message) error { return decodeUint64(b, &m.(*Line).functionIDX) },
433 | // optional int64 line = 2
434 | func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Line) },
435 | }
436 |
437 | func (p *Function) decoder() []decoder {
438 | return functionDecoder
439 | }
440 |
441 | func (p *Function) encode(b *buffer) {
442 | encodeUint64Opt(b, 1, p.ID)
443 | encodeInt64Opt(b, 2, p.nameX)
444 | encodeInt64Opt(b, 3, p.systemNameX)
445 | encodeInt64Opt(b, 4, p.filenameX)
446 | encodeInt64Opt(b, 5, p.StartLine)
447 | }
448 |
449 | var functionDecoder = []decoder{
450 | nil, // 0
451 | // optional uint64 id = 1
452 | func(b *buffer, m message) error { return decodeUint64(b, &m.(*Function).ID) },
453 | // optional int64 function_name = 2
454 | func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).nameX) },
455 | // optional int64 function_system_name = 3
456 | func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).systemNameX) },
457 | // repeated int64 filename = 4
458 | func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).filenameX) },
459 | // optional int64 start_line = 5
460 | func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).StartLine) },
461 | }
462 |
463 | func addString(strings map[string]int, s string) int64 {
464 | i, ok := strings[s]
465 | if !ok {
466 | i = len(strings)
467 | strings[s] = i
468 | }
469 | return int64(i)
470 | }
471 |
472 | func getString(strings []string, strng *int64, err error) (string, error) {
473 | if err != nil {
474 | return "", err
475 | }
476 | s := int(*strng)
477 | if s < 0 || s >= len(strings) {
478 | return "", errMalformed
479 | }
480 | *strng = 0
481 | return strings[s], nil
482 | }
483 |
--------------------------------------------------------------------------------
/pprof_reader/internal/profile/filter.go:
--------------------------------------------------------------------------------
1 | // Copyright 2014 The Go Authors. All rights reserved.
2 | // Use of this source code is governed by a BSD-style
3 | // license that can be found in the LICENSE file.
4 |
5 | // Implements methods to filter samples from profiles.
6 |
7 | package profile
8 |
9 | import "regexp"
10 |
11 | // FilterSamplesByName filters the samples in a profile and only keeps
12 | // samples where at least one frame matches focus but none match ignore.
13 | // Returns true is the corresponding regexp matched at least one sample.
14 | func (p *Profile) FilterSamplesByName(focus, ignore, hide *regexp.Regexp) (fm, im, hm bool) {
15 | focusOrIgnore := make(map[uint64]bool)
16 | hidden := make(map[uint64]bool)
17 | for _, l := range p.Location {
18 | if ignore != nil && l.matchesName(ignore) {
19 | im = true
20 | focusOrIgnore[l.ID] = false
21 | } else if focus == nil || l.matchesName(focus) {
22 | fm = true
23 | focusOrIgnore[l.ID] = true
24 | }
25 | if hide != nil && l.matchesName(hide) {
26 | hm = true
27 | l.Line = l.unmatchedLines(hide)
28 | if len(l.Line) == 0 {
29 | hidden[l.ID] = true
30 | }
31 | }
32 | }
33 |
34 | s := make([]*Sample, 0, len(p.Sample))
35 | for _, sample := range p.Sample {
36 | if focusedAndNotIgnored(sample.Location, focusOrIgnore) {
37 | if len(hidden) > 0 {
38 | var locs []*Location
39 | for _, loc := range sample.Location {
40 | if !hidden[loc.ID] {
41 | locs = append(locs, loc)
42 | }
43 | }
44 | if len(locs) == 0 {
45 | // Remove sample with no locations (by not adding it to s).
46 | continue
47 | }
48 | sample.Location = locs
49 | }
50 | s = append(s, sample)
51 | }
52 | }
53 | p.Sample = s
54 |
55 | return
56 | }
57 |
58 | // matchesName reports whether the function name or file in the
59 | // location matches the regular expression.
60 | func (loc *Location) matchesName(re *regexp.Regexp) bool {
61 | for _, ln := range loc.Line {
62 | if fn := ln.Function; fn != nil {
63 | if re.MatchString(fn.Name) {
64 | return true
65 | }
66 | if re.MatchString(fn.Filename) {
67 | return true
68 | }
69 | }
70 | }
71 | return false
72 | }
73 |
74 | // unmatchedLines returns the lines in the location that do not match
75 | // the regular expression.
76 | func (loc *Location) unmatchedLines(re *regexp.Regexp) []Line {
77 | var lines []Line
78 | for _, ln := range loc.Line {
79 | if fn := ln.Function; fn != nil {
80 | if re.MatchString(fn.Name) {
81 | continue
82 | }
83 | if re.MatchString(fn.Filename) {
84 | continue
85 | }
86 | }
87 | lines = append(lines, ln)
88 | }
89 | return lines
90 | }
91 |
92 | // focusedAndNotIgnored looks up a slice of ids against a map of
93 | // focused/ignored locations. The map only contains locations that are
94 | // explicitly focused or ignored. Returns whether there is at least
95 | // one focused location but no ignored locations.
96 | func focusedAndNotIgnored(locs []*Location, m map[uint64]bool) bool {
97 | var f bool
98 | for _, loc := range locs {
99 | if focus, focusOrIgnore := m[loc.ID]; focusOrIgnore {
100 | if focus {
101 | // Found focused location. Must keep searching in case there
102 | // is an ignored one as well.
103 | f = true
104 | } else {
105 | // Found ignored location. Can return false right away.
106 | return false
107 | }
108 | }
109 | }
110 | return f
111 | }
112 |
113 | // TagMatch selects tags for filtering
114 | type TagMatch func(key, val string, nval int64) bool
115 |
116 | // FilterSamplesByTag removes all samples from the profile, except
117 | // those that match focus and do not match the ignore regular
118 | // expression.
119 | func (p *Profile) FilterSamplesByTag(focus, ignore TagMatch) (fm, im bool) {
120 | samples := make([]*Sample, 0, len(p.Sample))
121 | for _, s := range p.Sample {
122 | focused, ignored := focusedSample(s, focus, ignore)
123 | fm = fm || focused
124 | im = im || ignored
125 | if focused && !ignored {
126 | samples = append(samples, s)
127 | }
128 | }
129 | p.Sample = samples
130 | return
131 | }
132 |
133 | // focusedTag checks a sample against focus and ignore regexps.
134 | // Returns whether the focus/ignore regexps match any tags
135 | func focusedSample(s *Sample, focus, ignore TagMatch) (fm, im bool) {
136 | fm = focus == nil
137 | for key, vals := range s.Label {
138 | for _, val := range vals {
139 | if ignore != nil && ignore(key, val, 0) {
140 | im = true
141 | }
142 | if !fm && focus(key, val, 0) {
143 | fm = true
144 | }
145 | }
146 | }
147 | for key, vals := range s.NumLabel {
148 | for _, val := range vals {
149 | if ignore != nil && ignore(key, "", val) {
150 | im = true
151 | }
152 | if !fm && focus(key, "", val) {
153 | fm = true
154 | }
155 | }
156 | }
157 | return fm, im
158 | }
159 |
--------------------------------------------------------------------------------
/pprof_reader/internal/profile/proto.go:
--------------------------------------------------------------------------------
1 | // Copyright 2014 The Go Authors. All rights reserved.
2 | // Use of this source code is governed by a BSD-style
3 | // license that can be found in the LICENSE file.
4 |
5 | // This file is a simple protocol buffer encoder and decoder.
6 | //
7 | // A protocol message must implement the message interface:
8 | // decoder() []decoder
9 | // encode(*buffer)
10 | //
11 | // The decode method returns a slice indexed by field number that gives the
12 | // function to decode that field.
13 | // The encode method encodes its receiver into the given buffer.
14 | //
15 | // The two methods are simple enough to be implemented by hand rather than
16 | // by using a protocol compiler.
17 | //
18 | // See profile.go for examples of messages implementing this interface.
19 | //
20 | // There is no support for groups, message sets, or "has" bits.
21 |
22 | package profile
23 |
24 | import "errors"
25 |
26 | type buffer struct {
27 | field int
28 | typ int
29 | u64 uint64
30 | data []byte
31 | tmp [16]byte
32 | }
33 |
34 | type decoder func(*buffer, message) error
35 |
36 | type message interface {
37 | decoder() []decoder
38 | encode(*buffer)
39 | }
40 |
41 | func marshal(m message) []byte {
42 | var b buffer
43 | m.encode(&b)
44 | return b.data
45 | }
46 |
47 | func encodeVarint(b *buffer, x uint64) {
48 | for x >= 128 {
49 | b.data = append(b.data, byte(x)|0x80)
50 | x >>= 7
51 | }
52 | b.data = append(b.data, byte(x))
53 | }
54 |
55 | func encodeLength(b *buffer, tag int, len int) {
56 | encodeVarint(b, uint64(tag)<<3|2)
57 | encodeVarint(b, uint64(len))
58 | }
59 |
60 | func encodeUint64(b *buffer, tag int, x uint64) {
61 | // append varint to b.data
62 | encodeVarint(b, uint64(tag)<<3|0)
63 | encodeVarint(b, x)
64 | }
65 |
66 | func encodeUint64s(b *buffer, tag int, x []uint64) {
67 | if len(x) > 2 {
68 | // Use packed encoding
69 | n1 := len(b.data)
70 | for _, u := range x {
71 | encodeVarint(b, u)
72 | }
73 | n2 := len(b.data)
74 | encodeLength(b, tag, n2-n1)
75 | n3 := len(b.data)
76 | copy(b.tmp[:], b.data[n2:n3])
77 | copy(b.data[n1+(n3-n2):], b.data[n1:n2])
78 | copy(b.data[n1:], b.tmp[:n3-n2])
79 | return
80 | }
81 | for _, u := range x {
82 | encodeUint64(b, tag, u)
83 | }
84 | }
85 |
86 | func encodeUint64Opt(b *buffer, tag int, x uint64) {
87 | if x == 0 {
88 | return
89 | }
90 | encodeUint64(b, tag, x)
91 | }
92 |
93 | func encodeInt64(b *buffer, tag int, x int64) {
94 | u := uint64(x)
95 | encodeUint64(b, tag, u)
96 | }
97 |
98 | func encodeInt64Opt(b *buffer, tag int, x int64) {
99 | if x == 0 {
100 | return
101 | }
102 | encodeInt64(b, tag, x)
103 | }
104 |
105 | func encodeInt64s(b *buffer, tag int, x []int64) {
106 | if len(x) > 2 {
107 | // Use packed encoding
108 | n1 := len(b.data)
109 | for _, u := range x {
110 | encodeVarint(b, uint64(u))
111 | }
112 | n2 := len(b.data)
113 | encodeLength(b, tag, n2-n1)
114 | n3 := len(b.data)
115 | copy(b.tmp[:], b.data[n2:n3])
116 | copy(b.data[n1+(n3-n2):], b.data[n1:n2])
117 | copy(b.data[n1:], b.tmp[:n3-n2])
118 | return
119 | }
120 | for _, u := range x {
121 | encodeInt64(b, tag, u)
122 | }
123 | }
124 |
125 | func encodeString(b *buffer, tag int, x string) {
126 | encodeLength(b, tag, len(x))
127 | b.data = append(b.data, x...)
128 | }
129 |
130 | func encodeStrings(b *buffer, tag int, x []string) {
131 | for _, s := range x {
132 | encodeString(b, tag, s)
133 | }
134 | }
135 |
136 | func encodeStringOpt(b *buffer, tag int, x string) {
137 | if x == "" {
138 | return
139 | }
140 | encodeString(b, tag, x)
141 | }
142 |
143 | func encodeBool(b *buffer, tag int, x bool) {
144 | if x {
145 | encodeUint64(b, tag, 1)
146 | } else {
147 | encodeUint64(b, tag, 0)
148 | }
149 | }
150 |
151 | func encodeBoolOpt(b *buffer, tag int, x bool) {
152 | if x == false {
153 | return
154 | }
155 | encodeBool(b, tag, x)
156 | }
157 |
158 | func encodeMessage(b *buffer, tag int, m message) {
159 | n1 := len(b.data)
160 | m.encode(b)
161 | n2 := len(b.data)
162 | encodeLength(b, tag, n2-n1)
163 | n3 := len(b.data)
164 | copy(b.tmp[:], b.data[n2:n3])
165 | copy(b.data[n1+(n3-n2):], b.data[n1:n2])
166 | copy(b.data[n1:], b.tmp[:n3-n2])
167 | }
168 |
169 | func unmarshal(data []byte, m message) (err error) {
170 | b := buffer{data: data, typ: 2}
171 | return decodeMessage(&b, m)
172 | }
173 |
174 | func le64(p []byte) uint64 {
175 | return uint64(p[0]) | uint64(p[1])<<8 | uint64(p[2])<<16 | uint64(p[3])<<24 | uint64(p[4])<<32 | uint64(p[5])<<40 | uint64(p[6])<<48 | uint64(p[7])<<56
176 | }
177 |
178 | func le32(p []byte) uint32 {
179 | return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24
180 | }
181 |
182 | func decodeVarint(data []byte) (uint64, []byte, error) {
183 | var i int
184 | var u uint64
185 | for i = 0; ; i++ {
186 | if i >= 10 || i >= len(data) {
187 | return 0, nil, errors.New("bad varint")
188 | }
189 | u |= uint64(data[i]&0x7F) << uint(7*i)
190 | if data[i]&0x80 == 0 {
191 | return u, data[i+1:], nil
192 | }
193 | }
194 | }
195 |
196 | func decodeField(b *buffer, data []byte) ([]byte, error) {
197 | x, data, err := decodeVarint(data)
198 | if err != nil {
199 | return nil, err
200 | }
201 | b.field = int(x >> 3)
202 | b.typ = int(x & 7)
203 | b.data = nil
204 | b.u64 = 0
205 | switch b.typ {
206 | case 0:
207 | b.u64, data, err = decodeVarint(data)
208 | if err != nil {
209 | return nil, err
210 | }
211 | case 1:
212 | if len(data) < 8 {
213 | return nil, errors.New("not enough data")
214 | }
215 | b.u64 = le64(data[:8])
216 | data = data[8:]
217 | case 2:
218 | var n uint64
219 | n, data, err = decodeVarint(data)
220 | if err != nil {
221 | return nil, err
222 | }
223 | if n > uint64(len(data)) {
224 | return nil, errors.New("too much data")
225 | }
226 | b.data = data[:n]
227 | data = data[n:]
228 | case 5:
229 | if len(data) < 4 {
230 | return nil, errors.New("not enough data")
231 | }
232 | b.u64 = uint64(le32(data[:4]))
233 | data = data[4:]
234 | default:
235 | return nil, errors.New("unknown type: " + string(b.typ))
236 | }
237 |
238 | return data, nil
239 | }
240 |
241 | func checkType(b *buffer, typ int) error {
242 | if b.typ != typ {
243 | return errors.New("type mismatch")
244 | }
245 | return nil
246 | }
247 |
248 | func decodeMessage(b *buffer, m message) error {
249 | if err := checkType(b, 2); err != nil {
250 | return err
251 | }
252 | dec := m.decoder()
253 | data := b.data
254 | for len(data) > 0 {
255 | // pull varint field# + type
256 | var err error
257 | data, err = decodeField(b, data)
258 | if err != nil {
259 | return err
260 | }
261 | if b.field >= len(dec) || dec[b.field] == nil {
262 | continue
263 | }
264 | if err := dec[b.field](b, m); err != nil {
265 | return err
266 | }
267 | }
268 | return nil
269 | }
270 |
271 | func decodeInt64(b *buffer, x *int64) error {
272 | if err := checkType(b, 0); err != nil {
273 | return err
274 | }
275 | *x = int64(b.u64)
276 | return nil
277 | }
278 |
279 | func decodeInt64s(b *buffer, x *[]int64) error {
280 | if b.typ == 2 {
281 | // Packed encoding
282 | data := b.data
283 | for len(data) > 0 {
284 | var u uint64
285 | var err error
286 |
287 | if u, data, err = decodeVarint(data); err != nil {
288 | return err
289 | }
290 | *x = append(*x, int64(u))
291 | }
292 | return nil
293 | }
294 | var i int64
295 | if err := decodeInt64(b, &i); err != nil {
296 | return err
297 | }
298 | *x = append(*x, i)
299 | return nil
300 | }
301 |
302 | func decodeUint64(b *buffer, x *uint64) error {
303 | if err := checkType(b, 0); err != nil {
304 | return err
305 | }
306 | *x = b.u64
307 | return nil
308 | }
309 |
310 | func decodeUint64s(b *buffer, x *[]uint64) error {
311 | if b.typ == 2 {
312 | data := b.data
313 | // Packed encoding
314 | for len(data) > 0 {
315 | var u uint64
316 | var err error
317 |
318 | if u, data, err = decodeVarint(data); err != nil {
319 | return err
320 | }
321 | *x = append(*x, u)
322 | }
323 | return nil
324 | }
325 | var u uint64
326 | if err := decodeUint64(b, &u); err != nil {
327 | return err
328 | }
329 | *x = append(*x, u)
330 | return nil
331 | }
332 |
333 | func decodeString(b *buffer, x *string) error {
334 | if err := checkType(b, 2); err != nil {
335 | return err
336 | }
337 | *x = string(b.data)
338 | return nil
339 | }
340 |
341 | func decodeStrings(b *buffer, x *[]string) error {
342 | var s string
343 | if err := decodeString(b, &s); err != nil {
344 | return err
345 | }
346 | *x = append(*x, s)
347 | return nil
348 | }
349 |
350 | func decodeBool(b *buffer, x *bool) error {
351 | if err := checkType(b, 0); err != nil {
352 | return err
353 | }
354 | if int64(b.u64) == 0 {
355 | *x = false
356 | } else {
357 | *x = true
358 | }
359 | return nil
360 | }
361 |
--------------------------------------------------------------------------------
/pprof_reader/internal/profile/prune.go:
--------------------------------------------------------------------------------
1 | // Copyright 2014 The Go Authors. All rights reserved.
2 | // Use of this source code is governed by a BSD-style
3 | // license that can be found in the LICENSE file.
4 |
5 | // Implements methods to remove frames from profiles.
6 |
7 | package profile
8 |
9 | import (
10 | "fmt"
11 | "regexp"
12 | )
13 |
14 | // Prune removes all nodes beneath a node matching dropRx, and not
15 | // matching keepRx. If the root node of a Sample matches, the sample
16 | // will have an empty stack.
17 | func (p *Profile) Prune(dropRx, keepRx *regexp.Regexp) {
18 | prune := make(map[uint64]bool)
19 | pruneBeneath := make(map[uint64]bool)
20 |
21 | for _, loc := range p.Location {
22 | var i int
23 | for i = len(loc.Line) - 1; i >= 0; i-- {
24 | if fn := loc.Line[i].Function; fn != nil && fn.Name != "" {
25 | funcName := fn.Name
26 | // Account for leading '.' on the PPC ELF v1 ABI.
27 | if funcName[0] == '.' {
28 | funcName = funcName[1:]
29 | }
30 | if dropRx.MatchString(funcName) {
31 | if keepRx == nil || !keepRx.MatchString(funcName) {
32 | break
33 | }
34 | }
35 | }
36 | }
37 |
38 | if i >= 0 {
39 | // Found matching entry to prune.
40 | pruneBeneath[loc.ID] = true
41 |
42 | // Remove the matching location.
43 | if i == len(loc.Line)-1 {
44 | // Matched the top entry: prune the whole location.
45 | prune[loc.ID] = true
46 | } else {
47 | loc.Line = loc.Line[i+1:]
48 | }
49 | }
50 | }
51 |
52 | // Prune locs from each Sample
53 | for _, sample := range p.Sample {
54 | // Scan from the root to the leaves to find the prune location.
55 | // Do not prune frames before the first user frame, to avoid
56 | // pruning everything.
57 | foundUser := false
58 | for i := len(sample.Location) - 1; i >= 0; i-- {
59 | id := sample.Location[i].ID
60 | if !prune[id] && !pruneBeneath[id] {
61 | foundUser = true
62 | continue
63 | }
64 | if !foundUser {
65 | continue
66 | }
67 | if prune[id] {
68 | sample.Location = sample.Location[i+1:]
69 | break
70 | }
71 | if pruneBeneath[id] {
72 | sample.Location = sample.Location[i:]
73 | break
74 | }
75 | }
76 | }
77 | }
78 |
79 | // RemoveUninteresting prunes and elides profiles using built-in
80 | // tables of uninteresting function names.
81 | func (p *Profile) RemoveUninteresting() error {
82 | var keep, drop *regexp.Regexp
83 | var err error
84 |
85 | if p.DropFrames != "" {
86 | if drop, err = regexp.Compile("^(" + p.DropFrames + ")$"); err != nil {
87 | return fmt.Errorf("failed to compile regexp %s: %v", p.DropFrames, err)
88 | }
89 | if p.KeepFrames != "" {
90 | if keep, err = regexp.Compile("^(" + p.KeepFrames + ")$"); err != nil {
91 | return fmt.Errorf("failed to compile regexp %s: %v", p.KeepFrames, err)
92 | }
93 | }
94 | p.Prune(drop, keep)
95 | }
96 | return nil
97 | }
98 |
--------------------------------------------------------------------------------
/pprof_reader/pprof_reader_test.go:
--------------------------------------------------------------------------------
1 | package pprof_reader
2 |
3 | import (
4 | "bufio"
5 | "bytes"
6 |
7 | // "io/ioutil"
8 | // "os"
9 | "testing"
10 | )
11 |
12 | func TestBaseName(t *testing.T) {
13 | path := "test.exe"
14 | expected := "test"
15 | actual := getBasename(path)
16 | if actual != expected {
17 | t.Errorf("Expected [%v] but got [%v]", expected, actual)
18 | }
19 |
20 | path = "test."
21 | expected = "test"
22 | actual = getBasename(path)
23 | if actual != expected {
24 | t.Errorf("Expected [%v] but got [%v]", expected, actual)
25 | }
26 |
27 | path = ".exe"
28 | expected = ""
29 | actual = getBasename(path)
30 | if actual != expected {
31 | t.Errorf("Expected [%v] but got [%v]", expected, actual)
32 | }
33 |
34 | path = "test"
35 | expected = "test"
36 | actual = getBasename(path)
37 | if actual != expected {
38 | t.Errorf("Expected [%v] but got [%v]", expected, actual)
39 | }
40 |
41 | path = "test.test.exe"
42 | expected = "test.test"
43 | actual = getBasename(path)
44 | if actual != expected {
45 | t.Errorf("Expected [%v] but got [%v]", expected, actual)
46 | }
47 | }
48 |
49 | func toLineSet(data []byte) map[string]bool {
50 | result := make(map[string]bool)
51 | r := bytes.NewReader(data)
52 | s := bufio.NewScanner(r)
53 |
54 | // Skip past headers, which are OS and go version dependent.
55 | for s.Scan() {
56 | if s.Text() == "" {
57 | break
58 | }
59 | }
60 |
61 | // We compare only the payload.
62 | for s.Scan() {
63 | result[s.Text()] = true
64 | }
65 |
66 | return result
67 | }
68 |
69 | // TODO: Disabled until the format settles down more.
70 | // This will change again with RAM usage.
71 | // func disableTestConversion(t *testing.T) {
72 | // filename := "fixtures/wt.pprof.gz"
73 | // fr, err := os.Open(filename)
74 | // if err != nil {
75 | // t.Error(err)
76 | // return
77 | // }
78 | // defer fr.Close()
79 |
80 | // // TODO: This will eventually load cpu and memory profile
81 | // profile, err := ReadFromPProf(fr, fr)
82 | // if err != nil {
83 | // t.Error(err)
84 | // return
85 | // }
86 |
87 | // expectedEntryPointCount := 6
88 | // if len(profile.EntryPoints) != expectedEntryPointCount {
89 | // t.Errorf("Expected %v entry points but got %v", expectedEntryPointCount, len(profile.EntryPoints))
90 | // }
91 |
92 | // expectedBytes, err := ioutil.ReadFile("fixtures/wt.bf")
93 | // if err != nil {
94 | // t.Error(err)
95 | // return
96 | // }
97 | // expected := toLineSet(expectedBytes)
98 |
99 | // var b bytes.Buffer
100 | // writer := bufio.NewWriter(&b)
101 | // err = WriteBFFormat(profile, writer)
102 | // if err != nil {
103 | // t.Error(err)
104 | // return
105 | // }
106 |
107 | // actual := toLineSet(b.Bytes())
108 |
109 | // if len(actual) != len(expected) {
110 | // t.Errorf("Expected lines (%v) != actual lines (%v)", len(expected), len(actual))
111 | // }
112 |
113 | // for k, _ := range expected {
114 | // _, ok := actual[k]
115 | // if !ok {
116 | // t.Errorf("Expected line [%v] not found in actual output", k)
117 | // }
118 | // }
119 |
120 | // for k, _ := range actual {
121 | // _, ok := expected[k]
122 | // if !ok {
123 | // t.Errorf("Unexpected line [%v] found in actual output", k)
124 | // }
125 | // }
126 | // }
127 |
--------------------------------------------------------------------------------
/pprof_reader/profile.go:
--------------------------------------------------------------------------------
1 | package pprof_reader
2 |
3 | import (
4 | "bytes"
5 | "fmt"
6 | "io/ioutil"
7 | "os"
8 | "path"
9 |
10 | pprof "github.com/blackfireio/go-blackfire/pprof_reader/internal/profile"
11 | )
12 |
13 | type Function struct {
14 | Name string
15 |
16 | // Memory usage is aggregated into one overall cost per function (stored as
17 | // MemoryCost here), so we must keep track of the number of times a function
18 | // is referenced in a profile, and then "distribute" the cost based on the
19 | // number of times it is referenced across the sample call stacks of a
20 | // profile. This value is calculated and cached in DistributedMemoryCost
21 | MemoryCost uint64
22 | DistributedMemoryCost uint64
23 | ReferenceCount int
24 | }
25 |
26 | func (f *Function) AddReferences(count int) {
27 | f.ReferenceCount += count
28 | f.DistributedMemoryCost = f.MemoryCost / uint64(f.ReferenceCount)
29 | }
30 |
31 | func (f *Function) String() string {
32 | return f.Name
33 | }
34 |
35 | type Sample struct {
36 | Count int
37 | CPUTime uint64
38 | MemUsage uint64
39 | Stack []*Function
40 | }
41 |
42 | func newSample(count int, cpuTime uint64, stack []*Function) *Sample {
43 | return &Sample{
44 | Count: count,
45 | CPUTime: cpuTime,
46 | Stack: stack,
47 | }
48 | }
49 |
50 | func (s *Sample) CloneWithStack(stack []*Function) *Sample {
51 | return &Sample{
52 | Count: s.Count,
53 | CPUTime: s.CPUTime,
54 | MemUsage: s.MemUsage,
55 | Stack: stack,
56 | }
57 | }
58 |
59 | // Profle contains a set of entry points, which collectively contain all sampled data
60 | type Profile struct {
61 | CpuSampleRateHz int
62 | USecPerSample uint64
63 | Samples []*Sample
64 | // Note: Matching by ID didn't work since there seems to be some duplication
65 | // in the pprof data. We match by name instead since it's guaranteed unique.
66 | Functions map[string]*Function
67 | }
68 |
69 | func NewProfile() *Profile {
70 | return &Profile{
71 | Functions: make(map[string]*Function),
72 | }
73 | }
74 |
75 | func (p *Profile) CloneWithSamples(samples []*Sample) *Profile {
76 | return &Profile{
77 | CpuSampleRateHz: p.CpuSampleRateHz,
78 | USecPerSample: p.USecPerSample,
79 | Samples: samples,
80 | Functions: p.Functions,
81 | }
82 | }
83 |
84 | func (p *Profile) getMatchingFunction(pf *pprof.Function) *Function {
85 | f, ok := p.Functions[pf.Name]
86 | if !ok {
87 | f = &Function{
88 | Name: pf.Name,
89 | }
90 | p.Functions[pf.Name] = f
91 | }
92 |
93 | return f
94 | }
95 |
96 | func (p *Profile) setCPUSampleRate(hz int) {
97 | p.CpuSampleRateHz = hz
98 | p.USecPerSample = uint64(1000000 / float64(p.CpuSampleRateHz))
99 | }
100 |
101 | func (p *Profile) HasData() bool {
102 | return len(p.Samples) > 0
103 | }
104 |
105 | // Read a pprof format profile and convert to our internal format.
106 | func ReadFromPProf(cpuBuffers, memBuffers []*bytes.Buffer) (*Profile, error) {
107 | profile := NewProfile()
108 |
109 | for _, buffer := range memBuffers {
110 | if p, err := pprof.Parse(buffer); err != nil {
111 | return nil, err
112 | } else {
113 | profile.addMemorySamples(p)
114 | }
115 | }
116 |
117 | for _, buffer := range cpuBuffers {
118 | if p, err := pprof.Parse(buffer); err != nil {
119 | return nil, err
120 | } else {
121 | profile.USecPerSample = uint64(p.Period) / 1000
122 | profile.CpuSampleRateHz = int(1000000 / profile.USecPerSample)
123 | profile.addCPUSamples(p)
124 | }
125 | }
126 |
127 | profile.postProcessSamples()
128 | return profile, nil
129 | }
130 |
131 | func (p *Profile) addMemorySamples(pp *pprof.Profile) {
132 | const valueIndex = 3
133 | for _, sample := range pp.Sample {
134 | memUsage := sample.Value[valueIndex]
135 | if memUsage > 0 {
136 | loc := sample.Location[0]
137 | line := loc.Line[0]
138 | f := p.getMatchingFunction(line.Function)
139 | f.MemoryCost += uint64(memUsage)
140 | }
141 | }
142 | }
143 |
144 | func (p *Profile) addCPUSamples(pp *pprof.Profile) {
145 | // All pprof profiles have count in index 0, and whatever value in index 1.
146 | // I haven't encountered a profile with sample value index > 1, and in fact
147 | // it cannot happen the way runtime.pprof does profiling atm.
148 | const countIndex = 0
149 | const valueIndex = 1
150 |
151 | for _, sample := range pp.Sample {
152 | callCount := sample.Value[countIndex]
153 | if callCount < 1 {
154 | callCount = 1
155 | }
156 | cpuTime := uint64(sample.Value[valueIndex]) / 1000 // Convert ns to us
157 |
158 | // A sample contains a stack trace, which is made of locations.
159 | // A location has one or more lines (>1 if functions are inlined).
160 | // Each line points to a function.
161 | stack := make([]*Function, 0, 10)
162 |
163 | // PProf stack data is stored leaf-first. We need it to be root-first.
164 | for i := len(sample.Location) - 1; i >= 0; i-- {
165 | location := sample.Location[i]
166 | for j := len(location.Line) - 1; j >= 0; j-- {
167 | line := location.Line[j]
168 | f := p.getMatchingFunction(line.Function)
169 | f.AddReferences(int(callCount))
170 | stack = append(stack, f)
171 | }
172 | }
173 |
174 | p.Samples = append(p.Samples, newSample(int(callCount), cpuTime, stack))
175 | }
176 | }
177 |
178 | func (p *Profile) postProcessSamples() {
179 | for _, sample := range p.Samples {
180 | decycleStack(sample.Stack)
181 | memUsage := uint64(0)
182 | for _, f := range sample.Stack {
183 | memUsage += f.DistributedMemoryCost
184 | }
185 | sample.MemUsage = memUsage
186 | }
187 | }
188 |
189 | // Decycle a sample's call stack.
190 | // If the same function is encountered multiple times in a goroutine stack,
191 | // create duplicates with @1, @2, etc appended to the name so that they show
192 | // up as different names in the BF visualizer.
193 | func decycleStack(stack []*Function) {
194 | seen := make(map[string]int)
195 | for i, f := range stack {
196 | if dupCount, ok := seen[f.Name]; ok {
197 | stack[i] = &Function{
198 | Name: fmt.Sprintf("%s@%d", f.Name, dupCount),
199 | MemoryCost: f.MemoryCost,
200 | DistributedMemoryCost: f.DistributedMemoryCost,
201 | ReferenceCount: f.ReferenceCount,
202 | }
203 | seen[f.Name] = dupCount + 1
204 | } else {
205 | seen[f.Name] = 1
206 | }
207 | }
208 | }
209 |
210 | func getBasename(path string) string {
211 | for i := len(path) - 1; i >= 0 && path[i] != '/'; i-- {
212 | if path[i] == '.' {
213 | return path[:i]
214 | }
215 | }
216 | return path
217 | }
218 |
219 | func getExeName() string {
220 | name, err := os.Executable()
221 | if err != nil {
222 | return "go-unknown"
223 | }
224 | return getBasename(path.Base(name))
225 | }
226 |
227 | func fileExists(path string) bool {
228 | _, err := os.Stat(path)
229 | return err == nil
230 | }
231 |
232 | func getCpuProfileDumpPath(pathPrefix string, index int) string {
233 | return fmt.Sprintf("%v-cpu-%v.pprof", pathPrefix, index)
234 | }
235 |
236 | func getMemProfileDumpPath(pathPrefix string, index int) string {
237 | return fmt.Sprintf("%v-mem-%v.pprof", pathPrefix, index)
238 | }
239 |
240 | func getDumpStartIndex(pathPrefix string) int {
241 | index := 1
242 | for {
243 | if !fileExists(getCpuProfileDumpPath(pathPrefix, index)) &&
244 | !fileExists(getMemProfileDumpPath(pathPrefix, index)) {
245 | return index
246 | }
247 | index++
248 | }
249 | }
250 |
251 | // DumpProfiles dumps the raw golang pprof files to the specified directory.
252 | // It uses the naming scheme exename-type-index.pprof, starting at the next
253 | // index after the last one found in the specified directory.
254 | func DumpProfiles(cpuBuffers, memBuffers []*bytes.Buffer, dstDir string) (err error) {
255 | pathPrefix := path.Join(dstDir, getExeName())
256 | startIndex := getDumpStartIndex(pathPrefix)
257 |
258 | for i, buff := range cpuBuffers {
259 | filename := getCpuProfileDumpPath(pathPrefix, startIndex+i)
260 | if err = ioutil.WriteFile(filename, buff.Bytes(), 0644); err != nil {
261 | return
262 | }
263 | }
264 | for i, buff := range memBuffers {
265 | filename := getMemProfileDumpPath(pathPrefix, startIndex+i)
266 | if err = ioutil.WriteFile(filename, buff.Bytes(), 0644); err != nil {
267 | return
268 | }
269 | }
270 | return
271 | }
272 |
--------------------------------------------------------------------------------
/pprof_reader/profile_test.go:
--------------------------------------------------------------------------------
1 | package pprof_reader
2 |
3 | import (
4 | "reflect"
5 | "testing"
6 | )
7 |
8 | func newTestStack(entries ...string) (stack []*Function) {
9 | for _, e := range entries {
10 | stack = append(stack, &Function{
11 | Name: e,
12 | })
13 | }
14 | return
15 | }
16 |
17 | func TestDecycleStack(t *testing.T) {
18 | expected := newTestStack("a", "b", "c", "b@1", "c@1", "d")
19 | actual := newTestStack("a", "b", "c", "b", "c", "d")
20 | decycleStack(actual)
21 | if !reflect.DeepEqual(expected, actual) {
22 | t.Errorf("Expected %v but got %v", expected, actual)
23 | }
24 | }
25 |
26 | func TestDecycleStackComplex(t *testing.T) {
27 | expected := newTestStack("a", "b", "c", "b@1", "c@1", "d", "a@1", "b@2", "c@2", "f")
28 | actual := newTestStack("a", "b", "c", "b", "c", "d", "a", "b", "c", "f")
29 | decycleStack(actual)
30 | if !reflect.DeepEqual(expected, actual) {
31 | t.Errorf("Expected %v but got %v", expected, actual)
32 | }
33 | }
34 |
--------------------------------------------------------------------------------
/pprof_reader/test.bf:
--------------------------------------------------------------------------------
1 | file-format: BlackfireProbe
2 | Cost-Dimensions: wt cpu mu pmu
3 | graph-root-id: net/http.(*conn).serve
4 |
5 | net/http.checkConnErrorWriter.Write==>net.(*conn).Write//1 70000 0 0 0
6 | database/sql.(*DB).Query==>database/sql.(*DB).QueryContext//1 240000 0 0 0
7 | github.com/lib/pq.dial==>github.com/lib/pq.defaultDialer.DialContext//1 150000 0 0 0
8 | database/sql.(*DB).conn==>database/sql.(*driverConn).Close//1 110000 0 0 0
9 | syscall.Close==>syscall.Syscall//6 60000 0 0 0
10 | github.com/lib/pq.(*conn).auth==>github.com/lib/pq.(*conn).send//1 10000 0 0 0
11 | github.com/lib/pq.(*conn).auth==>github.com/lib/pq.(*conn).recv//1 10000 0 0 0
12 | database/sql.dsnConnector.Connect==>github.com/lib/pq.(*Driver).Open//1 270000 0 0 0
13 | net.socket==>net.(*netFD).dial//1 90000 0 0 0
14 | runtime.makeslice==>runtime.mallocgc//1 10000 0 0 0
15 | syscall.Syscall==>runtime.entersyscall//1 40000 0 0 0
16 | runtime.newstack==>runtime.copystack//1 20000 0 0 0
17 | database/sql.(*DB).removeDepLocked==>runtime.mapdelete//1 10000 0 0 0
18 | runtime.assertI2I2==>runtime.getitab//1 10000 0 0 0
19 | net/http.(*Request).wantsClose==>net/http.hasToken//1 10000 0 0 0
20 | net.(*sysDialer).doDialTCP==>net.internetSocket//1 130000 0 0 0
21 | main.(*Store).StoreWidget==>database/sql.(*DB).Exec//1 230000 0 0 0
22 | database/sql.(*driverConn).finalClose==>database/sql.withLock//1 100000 0 0 0
23 | database/sql.(*DB).queryDC==>database/sql.withLock//1 40000 0 0 0
24 | runtime.gentraceback==>runtime.funcspdelta//1 10000 0 0 0
25 | syscall.Getpeername==>syscall.getpeername//1 10000 0 0 0
26 | net/http.(*conn).close==>net.(*conn).Close//1 30000 0 0 0
27 | runtime.systemstack==>runtime.entersyscall_sysmon//1 40000 0 0 0
28 | runtime.newproc==>runtime.systemstack//1 10000 0 0 0
29 | github.com/lib/pq.(*conn).startup==>github.com/lib/pq.(*conn).processParameterStatus//1 10000 0 0 0
30 | github.com/gorilla/mux.(*Router).Match==>github.com/gorilla/mux.(*Route).Match//1 10000 0 0 0
31 | net.(*conn).Write==>net.(*netFD).Write//1 180000 0 0 0
32 | syscall.Socket==>syscall.socket//1 40000 0 0 0
33 | syscall.Connect==>syscall.connect//1 60000 0 0 0
34 | github.com/lib/pq.parseEnviron==>strings.SplitN//1 20000 0 0 0
35 | runtime.newproc1==>runtime.gfget//1 10000 0 0 0
36 | database/sql.(*DB).Exec==>database/sql.(*DB).ExecContext//1 230000 0 0 0
37 | database/sql.(*DB).execDC.func2==>database/sql.ctxDriverExec//1 30000 0 0 0
38 | database/sql.ctxDriverExec==>github.com/lib/pq.(*conn).ExecContext//1 30000 0 0 0
39 | github.com/lib/pq.(*conn).recv1Buf==>github.com/lib/pq.(*conn).recvMessage//1 20000 0 0 0
40 | github.com/lib/pq.DialOpen==>github.com/lib/pq.NewConnector//1 30000 0 0 0
41 | strings.SplitN==>strings.genSplit//1 20000 0 0 0
42 | github.com/lib/pq.(*conn).processParameterStatus==>time.LoadLocation//1 10000 0 0 0
43 | main.(*HttpServer).httpHandlerGetWidgets==>encoding/json.Marshal//1 10000 0 0 0
44 | bufio.(*Writer).Flush==>net/http.checkConnErrorWriter.Write//1 70000 0 0 0
45 | database/sql.(*DB).exec==>database/sql.(*DB).execDC//1 50000 0 0 0
46 | github.com/lib/pq.(*conn).ExecContext==>github.com/lib/pq.(*conn).Exec//1 30000 0 0 0
47 | runtime.entersyscall==>runtime.reentersyscall//1 40000 0 0 0
48 | github.com/lib/pq.(*conn).sendSimpleMessage==>net.(*conn).Write//1 60000 0 0 0
49 | net/http.(*connReader).abortPendingRead==>sync.(*Cond).Wait//1 10000 0 0 0
50 | github.com/lib/pq.(*conn).auth==>github.com/lib/pq.md5s//1 10000 0 0 0
51 | net.setKeepAlivePeriod==>internal/poll.(*FD).SetsockoptInt//1 20000 0 0 0
52 | database/sql.(*driverConn).Close==>database/sql.(*DB).removeDepLocked//1 10000 0 0 0
53 | encoding/json.sliceEncoder.encode==>encoding/json.arrayEncoder.encode//1 10000 0 0 0
54 | net.sockaddrToTCP==>runtime.newobject//1 10000 0 0 0
55 | time.readFile==>runtime.newstack//1 10000 0 0 0
56 | sync.(*Cond).Wait==>sync.runtime_notifyListWait//1 10000 0 0 0
57 | net.(*sysDialer).dialSerial==>net.(*sysDialer).dialSingle//1 130000 0 0 0
58 | net.socket==>net.sysSocket//1 40000 0 0 0
59 | github.com/lib/pq.(*conn).Close==>github.com/lib/pq.(*conn).Close.func1//1 40000 0 0 0
60 | github.com/lib/pq.(*stmt).exec==>github.com/lib/pq.(*conn).readBindResponse//1 20000 0 0 0
61 | bufio.(*Reader).ReadLine==>bufio.(*Reader).ReadSlice//1 20000 0 0 0
62 | sync.runtime_notifyListWait==>runtime.acquireSudog//1 10000 0 0 0
63 | internal/poll.runtime_pollOpen==>runtime.netpollopen//1 10000 0 0 0
64 | net/http.readRequest==>net/textproto.(*Reader).ReadMIMEHeader//1 20000 0 0 0
65 | syscall.setsockopt==>syscall.Syscall6//2 20000 0 0 0
66 | github.com/lib/pq.(*conn).simpleQuery==>github.com/lib/pq.(*conn).recv1//1 10000 0 0 0
67 | runtime.growslice==>runtime.mallocgc//1 10000 0 0 0
68 | internal/poll.(*FD).Write==>syscall.Write//1 170000 0 0 0
69 | net/http.(*conn).serve==>net/http.(*conn).serve.func1//1 30000 0 0 0
70 | internal/poll.(*FD).Read==>syscall.Read//1 80000 0 0 0
71 | net.(*netFD).dial==>net.sockaddrToTCP//1 10000 0 0 0
72 | github.com/lib/pq.(*conn).sendStartupPacket==>net.(*conn).Write//1 10000 0 0 0
73 | database/sql.(*DB).execDC==>runtime.assertI2I2//1 10000 0 0 0
74 | runtime.tracebackdefers==>runtime.adjustframe//1 10000 0 0 0
75 | net.(*conn).Close==>net.(*netFD).Close//1 70000 0 0 0
76 | net/textproto.(*Reader).ReadMIMEHeader==>runtime.makeslice//1 10000 0 0 0
77 | database/sql.(*DB).execDC==>database/sql.withLock//1 40000 0 0 0
78 | strings.genSplit==>strings.Index//1 20000 0 0 0
79 | runtime.mapaccess1_faststr==>runtime.add//1 10000 0 0 0
80 | internal/poll.(*FD).Init==>internal/poll.(*pollDesc).init//1 10000 0 0 0
81 | database/sql.withLock==>database/sql.(*DB).execDC.func2//1 30000 0 0 0
82 | runtime.futexwakeup==>runtime.futex//4 40000 0 0 0
83 | strings.IndexByte==>indexbytebody//1 10000 0 0 0
84 | time.loadTzinfoFromDirOrZip==>time.readFile//1 10000 0 0 0
85 | runtime.newobject==>runtime.mallocgc//1 30000 0 0 0
86 | main.(*Store).GetAllWidgets==>runtime.growslice//1 10000 0 0 0
87 | github.com/lib/pq.(*conn).startup==>github.com/lib/pq.(*conn).sendStartupPacket//1 10000 0 0 0
88 | net.internetSocket==>net.socket//1 130000 0 0 0
89 | database/sql.(*driverConn).finalClose.func2==>github.com/lib/pq.(*conn).Close//1 100000 0 0 0
90 | bufio.(*Reader).Read==>net.(*conn).Read//1 60000 0 0 0
91 | net.(*netFD).connect==>syscall.Connect//1 60000 0 0 0
92 | time.LoadLocation==>time.loadLocation//1 10000 0 0 0
93 | context.WithValue==>runtime.newobject//1 10000 0 0 0
94 | strings.IndexByte==>internal/bytealg.IndexByteString//1 10000 0 0 0
95 | github.com/lib/pq.parseEnviron==>runtime.makemap_small//1 10000 0 0 0
96 | net/http.(*conn).readRequest==>net/http.(*Request).wantsClose//1 10000 0 0 0
97 | net/http.(*conn).serve//1 680000 0 0 0
98 | database/sql.(*DB).queryDC.func1==>database/sql.ctxDriverQuery//1 40000 0 0 0
99 | strings.Index==>strings.IndexByte//1 20000 0 0 0
100 | net/http.(*conn).serve==>net/http.(*connReader).startBackgroundRead//1 10000 0 0 0
101 | github.com/gorilla/mux.routeRegexpGroup.setMatch==>regexp.(*Regexp).FindStringSubmatchIndex//1 10000 0 0 0
102 | internal/poll.(*FD).Write==>runtime.newstack//1 10000 0 0 0
103 | net/http.(*conn).serve==>net/http.(*response).finishRequest//1 80000 0 0 0
104 | net/http.serverHandler.ServeHTTP==>github.com/gorilla/mux.(*Router).ServeHTTP//1 500000 0 0 0
105 | database/sql.(*DB).conn==>database/sql.dsnConnector.Connect//1 270000 0 0 0
106 | github.com/lib/pq.(*conn).readBindResponse==>github.com/lib/pq.(*conn).recv1//1 10000 0 0 0
107 | github.com/lib/pq.(*conn).prepareTo==>github.com/lib/pq.(*conn).send//1 10000 0 0 0
108 | main.(*Store).GetAllWidgets==>database/sql.(*DB).Query//1 240000 0 0 0
109 | net/http.HandlerFunc.ServeHTTP==>main.(*HttpServer).httpHandlerCreateWidget//1 230000 0 0 0
110 | encoding/json.arrayEncoder.encode==>encoding/json.ptrEncoder.encode//1 10000 0 0 0
111 | net.(*sysDialer).dialTCP==>net.(*sysDialer).doDialTCP//1 130000 0 0 0
112 | github.com/lib/pq.(*Connector).open==>github.com/lib/pq.(*conn).startup//1 90000 0 0 0
113 | syscall.getpeername==>syscall.RawSyscall//1 10000 0 0 0
114 | time.loadTzinfo==>time.loadTzinfoFromDirOrZip//1 10000 0 0 0
115 | github.com/lib/pq.(*conn).startup==>github.com/lib/pq.(*conn).auth//1 30000 0 0 0
116 | database/sql.withLock==>database/sql.(*driverConn).finalClose.func2//1 100000 0 0 0
117 | net.(*netFD).Close==>internal/poll.(*FD).Close//1 70000 0 0 0
118 | github.com/lib/pq.(*conn).simpleQuery==>github.com/lib/pq.(*conn).send//1 30000 0 0 0
119 | bufio.(*Reader).ReadSlice==>bufio.(*Reader).fill//1 20000 0 0 0
120 | database/sql.(*DB).exec==>database/sql.(*DB).conn//1 180000 0 0 0
121 | database/sql.ctxDriverQuery==>github.com/lib/pq.(*conn).QueryContext//1 40000 0 0 0
122 | internal/poll.(*FD).SetsockoptInt==>syscall.SetsockoptInt//1 20000 0 0 0
123 | runtime.systemstack==>runtime.newproc.func1//1 10000 0 0 0
124 | net/http.(*connReader).startBackgroundRead==>runtime.newproc//1 10000 0 0 0
125 | time.loadLocation==>time.loadTzinfo//1 10000 0 0 0
126 | runtime.adjustframe==>runtime.getStackMap//1 10000 0 0 0
127 | github.com/lib/pq.(*conn).Close.func1==>net.(*conn).Close//1 40000 0 0 0
128 | runtime.mallocgc==>runtime.heapBitsSetType//4 40000 0 0 0
129 | database/sql.(*DB).query==>database/sql.(*DB).queryDC//1 40000 0 0 0
130 | github.com/lib/pq.(*conn).query==>github.com/lib/pq.(*conn).simpleQuery//1 40000 0 0 0
131 | io.ReadAtLeast==>bufio.(*Reader).Read//1 60000 0 0 0
132 | syscall.Write==>syscall.write//1 170000 0 0 0
133 | net.(*netFD).Read==>internal/poll.(*FD).Read//1 80000 0 0 0
134 | net/textproto.canonicalMIMEHeaderKey==>runtime.mapaccess1_faststr//1 10000 0 0 0
135 | syscall.read==>syscall.Syscall//4 80000 0 0 0
136 | syscall.SetsockoptInt==>syscall.setsockopt//1 20000 0 0 0
137 | regexp.(*Regexp).FindStringSubmatchIndex==>regexp.(*Regexp).pad//1 10000 0 0 0
138 | runtime.adjustdefers==>runtime.tracebackdefers//1 10000 0 0 0
139 | net/http.(*response).finishRequest==>net/http.(*connReader).abortPendingRead//1 10000 0 0 0
140 | runtime.notewakeup==>runtime.futexwakeup//1 40000 0 0 0
141 | net/textproto.(*Reader).ReadLine==>net/textproto.(*Reader).readLineSlice//1 20000 0 0 0
142 | net/textproto.(*Reader).readLineSlice==>bufio.(*Reader).ReadLine//1 20000 0 0 0
143 | net/http.HandlerFunc.ServeHTTP==>main.(*HttpServer).httpHandlerGetWidgets//1 260000 0 0 0
144 | github.com/lib/pq.DialOpen==>github.com/lib/pq.(*Connector).open//1 240000 0 0 0
145 | github.com/lib/pq.defaultDialer.DialContext==>net.(*Dialer).DialContext//1 150000 0 0 0
146 | runtime.netpollopen==>runtime.epollctl//1 10000 0 0 0
147 | io.ReadFull==>io.ReadAtLeast//1 60000 0 0 0
148 | net/http.(*connReader).Read==>net.(*conn).Read//1 20000 0 0 0
149 | github.com/lib/pq.(*conn).recv1==>github.com/lib/pq.(*conn).recv1Buf//1 20000 0 0 0
150 | github.com/gorilla/mux.(*Router).ServeHTTP==>github.com/gorilla/mux.(*Router).Match//1 10000 0 0 0
151 | runtime.makemap_small==>runtime.newobject//1 10000 0 0 0
152 | net.(*netFD).Write==>internal/poll.(*FD).Write//1 180000 0 0 0
153 | database/sql.(*driverConn).Close==>database/sql.(*driverConn).finalClose//1 100000 0 0 0
154 | net.(*netFD).dial==>net.(*netFD).connect//1 80000 0 0 0
155 | github.com/lib/pq.(*conn).send==>net.(*conn).Write//1 50000 0 0 0
156 | github.com/lib/pq.(*conn).Exec==>github.com/lib/pq.(*stmt).Exec//1 20000 0 0 0
157 | github.com/lib/pq.(*conn).Close==>github.com/lib/pq.(*conn).sendSimpleMessage//1 60000 0 0 0
158 | syscall.connect==>syscall.Syscall//6 60000 0 0 0
159 | github.com/lib/pq.(*conn).recv==>github.com/lib/pq.(*conn).recvMessage//1 50000 0 0 0
160 | syscall.write==>syscall.Syscall//17 170000 0 0 0
161 | net/http.(*conn).serve==>net/http.serverHandler.ServeHTTP//1 500000 0 0 0
162 | github.com/lib/pq.md5s==>runtime.convTslice//1 10000 0 0 0
163 | net/http.(*conn).readRequest==>net/http.readRequest//1 40000 0 0 0
164 | database/sql.withLock==>database/sql.(*DB).queryDC.func1//1 40000 0 0 0
165 | runtime.pcvalue==>runtime.step//1 10000 0 0 0
166 | net/textproto.(*Reader).ReadMIMEHeader==>net/textproto.canonicalMIMEHeaderKey//1 10000 0 0 0
167 | database/sql.(*DB).query==>database/sql.(*DB).conn//1 200000 0 0 0
168 | github.com/lib/pq.Open==>github.com/lib/pq.DialOpen//1 270000 0 0 0
169 | net.sysSocket==>syscall.Socket//1 40000 0 0 0
170 | internal/poll.(*FD).destroy==>syscall.Close//1 60000 0 0 0
171 | net/http.(*conn).serve.func1==>net/http.(*conn).close//1 30000 0 0 0
172 | github.com/lib/pq.(*conn).startup==>github.com/lib/pq.(*conn).recv//1 40000 0 0 0
173 | database/sql.(*DB).ExecContext==>database/sql.(*DB).exec//1 230000 0 0 0
174 | net.(*netFD).connect==>internal/poll.(*FD).Init//1 10000 0 0 0
175 | runtime.entersyscall_sysmon==>runtime.notewakeup//1 40000 0 0 0
176 | net/http.readRequest==>net/textproto.(*Reader).ReadLine//1 20000 0 0 0
177 | github.com/lib/pq.NewConnector==>github.com/lib/pq.parseEnviron//1 30000 0 0 0
178 | net.(*conn).Read==>net.(*netFD).Read//1 80000 0 0 0
179 | net.(*Dialer).DialContext==>net.setKeepAlivePeriod//1 20000 0 0 0
180 | encoding/json.ptrEncoder.encode==>encoding/json.structEncoder.encode//1 10000 0 0 0
181 | github.com/lib/pq.(*Driver).Open==>github.com/lib/pq.Open//1 270000 0 0 0
182 | github.com/lib/pq.(*Connector).open==>github.com/lib/pq.dial//1 150000 0 0 0
183 | internal/poll.(*FD).Close==>internal/poll.(*FD).decref//1 70000 0 0 0
184 | net/http.(*conn).serve==>net/http.(*conn).readRequest//1 50000 0 0 0
185 | github.com/lib/pq.(*stmt).Exec==>github.com/lib/pq.(*stmt).exec//1 20000 0 0 0
186 | net.(*netFD).connect==>syscall.Getpeername//1 10000 0 0 0
187 | net/http.(*conn).serve==>context.WithValue//1 10000 0 0 0
188 | runtime.reentersyscall==>runtime.systemstack//1 40000 0 0 0
189 | runtime.newproc.func1==>runtime.newproc1//1 10000 0 0 0
190 | encoding/json.Marshal==>encoding/json.(*encodeState).marshal//1 10000 0 0 0
191 | github.com/gorilla/mux.(*Router).ServeHTTP==>net/http.HandlerFunc.ServeHTTP//1 490000 0 0 0
192 | database/sql.(*DB).QueryContext==>database/sql.(*DB).query//1 240000 0 0 0
193 | net.(*sysDialer).dialSingle==>net.(*sysDialer).dialTCP//1 130000 0 0 0
194 | internal/poll.(*pollDesc).init==>internal/poll.runtime_pollOpen//1 10000 0 0 0
195 | syscall.Read==>syscall.read//1 80000 0 0 0
196 | encoding/json.(*encodeState).marshal==>encoding/json.(*encodeState).reflectValue//1 10000 0 0 0
197 | internal/poll.(*FD).decref==>internal/poll.(*FD).destroy//1 70000 0 0 0
198 | github.com/lib/pq.(*conn).QueryContext==>github.com/lib/pq.(*conn).query//1 40000 0 0 0
199 | github.com/lib/pq.(*conn).recvMessage==>io.ReadFull//1 60000 0 0 0
200 | net/http.(*response).finishRequest==>bufio.(*Writer).Flush//1 70000 0 0 0
201 | main.(*HttpServer).httpHandlerGetWidgets==>main.(*Store).GetAllWidgets//1 250000 0 0 0
202 | net.(*Dialer).DialContext==>net.(*sysDialer).dialSerial//1 130000 0 0 0
203 | syscall.socket==>syscall.RawSyscall//4 40000 0 0 0
204 | main.(*HttpServer).httpHandlerCreateWidget==>main.(*Store).StoreWidget//1 230000 0 0 0
205 | bufio.(*Reader).fill==>net/http.(*connReader).Read//1 20000 0 0 0
206 | encoding/json.(*encodeState).reflectValue==>encoding/json.sliceEncoder.encode//1 10000 0 0 0
207 | runtime.copystack==>runtime.adjustdefers//1 10000 0 0 0
208 | github.com/lib/pq.(*conn).Exec==>github.com/lib/pq.(*conn).prepareTo//1 10000 0 0 0
209 | runtime.copystack==>runtime.gentraceback//1 10000 0 0 0
210 | runtime.funcspdelta==>runtime.pcvalue//1 10000 0 0 0
211 | github.com/gorilla/mux.(*Route).Match==>github.com/gorilla/mux.routeRegexpGroup.setMatch//1 10000 0 0 0
212 |
--------------------------------------------------------------------------------
/probe.go:
--------------------------------------------------------------------------------
1 | package blackfire
2 |
3 | import (
4 | "bufio"
5 | "bytes"
6 | "encoding/base64"
7 | "fmt"
8 | "math/rand"
9 | "net/url"
10 | "runtime"
11 | "runtime/debug"
12 | "runtime/pprof"
13 | "strings"
14 | "sync"
15 | "time"
16 |
17 | "github.com/blackfireio/go-blackfire/pprof_reader"
18 | "github.com/pkg/errors"
19 | )
20 |
21 | type profilerState int
22 |
23 | const (
24 | profilerStateOff profilerState = iota
25 | profilerStateEnabled
26 | profilerStateDisabled
27 | profilerStateSending
28 | )
29 |
30 | type probe struct {
31 | configuration *Configuration
32 | agentClient *agentClient
33 | mutex sync.Mutex
34 | profileDisableTrigger chan bool
35 | currentTitle string
36 | currentState profilerState
37 | cpuProfileBuffers []*bytes.Buffer
38 | memProfileBuffers []*bytes.Buffer
39 | profileEndCallback func()
40 | cpuSampleRate int
41 | ender Ender
42 | disabledFromPanic bool
43 | }
44 |
45 | var errDisabledFromPanic = errors.Errorf("Probe has been disabled due to a previous panic. Please check the logs for details.")
46 |
47 | type Ender interface {
48 | End()
49 | EndNoWait()
50 | }
51 |
52 | type ender struct {
53 | probe *probe
54 | }
55 |
56 | func (e *ender) End() {
57 | e.probe.End()
58 | }
59 |
60 | func (e *ender) EndNoWait() {
61 | e.probe.EndNoWait()
62 | }
63 |
64 | func newProbe() *probe {
65 | p := &probe{
66 | configuration: &Configuration{},
67 | }
68 | p.ender = &ender{
69 | probe: p,
70 | }
71 | p.currentTitle = "un-named profile"
72 | p.startTriggerRearmLoop()
73 | return p
74 | }
75 |
76 | func (p *probe) Configure(config *Configuration) {
77 | p.mutex.Lock()
78 | defer p.mutex.Unlock()
79 | p.configuration = config
80 | return
81 | }
82 |
83 | func (p *probe) IsProfiling() bool {
84 | if err := p.configuration.load(); err != nil {
85 | return false
86 | }
87 | if !p.configuration.canProfile() {
88 | return false
89 | }
90 | return p.currentState == profilerStateEnabled || p.currentState == profilerStateSending
91 | }
92 |
93 | func (p *probe) EnableNowFor(duration time.Duration) (err error) {
94 | if p.disabledFromPanic {
95 | return errDisabledFromPanic
96 | }
97 | defer func() {
98 | if r := recover(); r != nil {
99 | err = p.handlePanic(r)
100 | }
101 | }()
102 |
103 | if err = p.configuration.load(); err != nil {
104 | return
105 | }
106 | if !p.configuration.canProfile() {
107 | return
108 | }
109 | logger := p.configuration.Logger
110 |
111 | // Note: We do this once on each side of the mutex to be 100% sure that it's
112 | // impossible for deferred/idempotent calls to deadlock, here and forever.
113 | if !p.canEnableProfiling() {
114 | err = errors.Errorf("unable to enable profiling as state is %v", p.currentState)
115 | logger.Error().Err(err).Msgf("Blackfire: wrong profiler state")
116 | return
117 | }
118 |
119 | p.mutex.Lock()
120 | defer p.mutex.Unlock()
121 |
122 | if !p.canEnableProfiling() {
123 | err = errors.Errorf("unable to enable profiling as state is %v", p.currentState)
124 | logger.Error().Err(err).Msgf("Blackfire: wrong profiler state")
125 | return
126 | }
127 |
128 | if duration == 0 || duration > p.configuration.MaxProfileDuration {
129 | duration = p.configuration.MaxProfileDuration
130 | }
131 |
132 | if err = p.enableProfiling(); err != nil {
133 | return
134 | }
135 |
136 | channel := p.profileDisableTrigger
137 | shouldEndProfile := false
138 |
139 | go func() {
140 | <-time.After(duration)
141 | channel <- shouldEndProfile
142 | }()
143 |
144 | return
145 | }
146 |
147 | func (p *probe) EnableNow() (err error) {
148 | return p.EnableNowFor(p.configuration.MaxProfileDuration)
149 | }
150 |
151 | func (p *probe) Enable() (err error) {
152 | p.configuration.onDemandOnly = true
153 | return p.EnableNowFor(p.configuration.MaxProfileDuration)
154 | }
155 |
156 | func (p *probe) Disable() (err error) {
157 | if p.disabledFromPanic {
158 | return errDisabledFromPanic
159 | }
160 | defer func() {
161 | if r := recover(); r != nil {
162 | err = p.handlePanic(r)
163 | }
164 | }()
165 |
166 | if err = p.configuration.load(); err != nil {
167 | return
168 | }
169 | if !p.configuration.canProfile() {
170 | return
171 | }
172 | logger := p.configuration.Logger
173 |
174 | // Note: We do this once on each side of the mutex to be 100% sure that it's
175 | // impossible for deferred/idempotent calls to deadlock, here and forever.
176 | if !p.canDisableProfiling() {
177 | err = errors.Errorf("unable to disable profiling as state is %v", p.currentState)
178 | logger.Error().Err(err).Msgf("Blackfire: wrong profiler state")
179 | return
180 | }
181 |
182 | p.mutex.Lock()
183 | defer p.mutex.Unlock()
184 |
185 | if !p.canDisableProfiling() {
186 | err = errors.Errorf("unable to disable profiling as state is %v", p.currentState)
187 | logger.Error().Err(err).Msgf("Blackfire: wrong profiler state")
188 | return
189 | }
190 |
191 | p.triggerStopProfiler(false)
192 | return
193 | }
194 |
195 | func (p *probe) EndNoWait() (err error) {
196 | if p.disabledFromPanic {
197 | return errDisabledFromPanic
198 | }
199 | defer func() {
200 | if r := recover(); r != nil {
201 | err = p.handlePanic(r)
202 | }
203 | }()
204 |
205 | if err = p.configuration.load(); err != nil {
206 | return
207 | }
208 | if !p.configuration.canProfile() {
209 | return
210 | }
211 | logger := p.configuration.Logger
212 |
213 | // Note: We do this once on each side of the mutex to be 100% sure that it's
214 | // impossible for deferred/idempotent calls to deadlock, here and forever.
215 | if !p.canEndProfiling() {
216 | err = errors.Errorf("unable to end profiling as state is %v", p.currentState)
217 | logger.Error().Err(err).Msgf("Blackfire: wrong profiler state")
218 | return
219 | }
220 |
221 | p.mutex.Lock()
222 | defer p.mutex.Unlock()
223 |
224 | if !p.canEndProfiling() {
225 | err = errors.Errorf("unable to end profiling as state is %v", p.currentState)
226 | logger.Error().Err(err).Msgf("Blackfire: wrong profiler state")
227 | return
228 | }
229 |
230 | p.triggerStopProfiler(true)
231 | return
232 | }
233 |
234 | func (p *probe) End() (err error) {
235 | if p.disabledFromPanic {
236 | return errDisabledFromPanic
237 | }
238 | defer func() {
239 | if r := recover(); r != nil {
240 | err = p.handlePanic(r)
241 | }
242 | }()
243 |
244 | if err = p.configuration.load(); err != nil {
245 | return
246 | }
247 | if !p.configuration.canProfile() {
248 | return
249 | }
250 | logger := p.configuration.Logger
251 |
252 | // Note: We do this once on each side of the mutex to be 100% sure that it's
253 | // impossible for deferred/idempotent calls to deadlock, here and forever.
254 | if !p.canEndProfiling() {
255 | err = errors.Errorf("unable to end profiling and wait as state is %v", p.currentState)
256 | logger.Error().Err(err).Msgf("Blackfire: wrong profiler state")
257 | return
258 | }
259 |
260 | p.mutex.Lock()
261 | defer p.mutex.Unlock()
262 |
263 | if !p.canEndProfiling() {
264 | err = errors.Errorf("unable to end profiling and wait as state is %v", p.currentState)
265 | logger.Error().Err(err).Msg("Blackfire: wrong profiler state")
266 | return
267 | }
268 |
269 | logger.Debug().Msg("Blackfire: Ending the current profile and blocking until it's uploaded")
270 | if err = p.endProfile(); err != nil {
271 | logger.Error().Msgf("Blackfire (end profile): %v", err)
272 | return
273 | }
274 | logger.Debug().Msg("Blackfire: Profile uploaded. Unblocking.")
275 | return
276 | }
277 |
278 | func (p *probe) GenerateSubProfileQuery() (s string, err error) {
279 | if p.disabledFromPanic {
280 | err = errDisabledFromPanic
281 | return
282 | }
283 | defer func() {
284 | if r := recover(); r != nil {
285 | err = p.handlePanic(r)
286 | }
287 | }()
288 |
289 | if err := p.prepareAgentClient(); err != nil {
290 | return "", err
291 | }
292 | currentQuery, err := p.agentClient.CurrentBlackfireQuery()
293 | if err != nil {
294 | return "", err
295 | }
296 | parts := strings.Split(currentQuery, "signature=")
297 | if len(parts) < 2 {
298 | return "", errors.New("Blackfire: Unable to generate a sub-profile query")
299 | }
300 | challenge := strings.TrimRight(parts[0], "&")
301 | parts = strings.Split(parts[1], "&")
302 | signature := parts[0]
303 | args := make(url.Values)
304 | if len(parts) > 1 {
305 | args, err = url.ParseQuery(parts[1])
306 | if err != nil {
307 | return "", errors.Wrapf(err, "Blackfire: Unable to generate a sub-profile query")
308 | }
309 | }
310 | args.Del("aggreg_samples")
311 |
312 | parent := ""
313 | parts = strings.Split(args.Get("sub_profile"), ":")
314 | if len(parts) > 1 {
315 | parent = parts[1]
316 | }
317 | token := make([]byte, 7)
318 | rand.Read(token)
319 | id := base64.StdEncoding.EncodeToString(token)
320 | id = strings.TrimRight(id, "=")
321 | id = strings.ReplaceAll(id, "+", "A")
322 | id = strings.ReplaceAll(id, "/", "B")
323 | args.Set("sub_profile", parent+":"+id[0:9])
324 | return challenge + "&signature=" + signature + "&" + args.Encode(), nil
325 | }
326 |
327 | func (p *probe) SetCurrentTitle(title string) {
328 | p.currentTitle = title
329 | }
330 |
331 | func (p *probe) startTriggerRearmLoop() {
332 | go func() {
333 | for {
334 | // Use a large queue for the rare edge case where many goroutines
335 | // try to trigger the same channel before it gets rebuilt.
336 | p.profileDisableTrigger = make(chan bool, 100)
337 | shouldEndProfile := <-p.profileDisableTrigger
338 | p.onProfileDisableTriggered(shouldEndProfile, p.profileEndCallback)
339 |
340 | }
341 | }()
342 | }
343 |
344 | func (p *probe) addNewProfileBufferSet() {
345 | p.cpuProfileBuffers = append(p.cpuProfileBuffers, &bytes.Buffer{})
346 | p.memProfileBuffers = append(p.memProfileBuffers, &bytes.Buffer{})
347 | }
348 |
349 | func (p *probe) resetProfileBufferSet() {
350 | p.cpuProfileBuffers = p.cpuProfileBuffers[:0]
351 | p.memProfileBuffers = p.memProfileBuffers[:0]
352 | }
353 |
354 | func (p *probe) currentCPUBuffer() *bytes.Buffer {
355 | return p.cpuProfileBuffers[len(p.cpuProfileBuffers)-1]
356 | }
357 |
358 | func (p *probe) currentMemBuffer() *bytes.Buffer {
359 | return p.memProfileBuffers[len(p.memProfileBuffers)-1]
360 | }
361 |
362 | func (p *probe) prepareAgentClient() (err error) {
363 | if p.agentClient != nil {
364 | return nil
365 | }
366 | p.agentClient, err = NewAgentClient(p.configuration)
367 | return err
368 | }
369 |
370 | func (p *probe) canEnableProfiling() bool {
371 | switch p.currentState {
372 | case profilerStateOff, profilerStateDisabled:
373 | return true
374 | case profilerStateEnabled, profilerStateSending:
375 | return false
376 | default:
377 | panic(fmt.Errorf("Blackfire: Unhandled state: %v", p.currentState))
378 | }
379 | }
380 |
381 | func (p *probe) canDisableProfiling() bool {
382 | switch p.currentState {
383 | case profilerStateEnabled:
384 | return true
385 | case profilerStateOff, profilerStateDisabled, profilerStateSending:
386 | return false
387 | default:
388 | panic(fmt.Errorf("Blackfire: Unhandled state: %v", p.currentState))
389 | }
390 | }
391 |
392 | func (p *probe) canEndProfiling() bool {
393 | switch p.currentState {
394 | case profilerStateEnabled, profilerStateDisabled:
395 | return true
396 | case profilerStateOff, profilerStateSending:
397 | return false
398 | default:
399 | panic(fmt.Errorf("Blackfire: Unhandled state: %v", p.currentState))
400 | }
401 | }
402 |
403 | func (p *probe) enableProfiling() error {
404 | logger := p.configuration.Logger
405 | logger.Debug().Msgf("Blackfire: Start profiling")
406 |
407 | p.addNewProfileBufferSet()
408 |
409 | if p.cpuSampleRate == 0 {
410 | p.cpuSampleRate = p.configuration.DefaultCPUSampleRateHz
411 | }
412 |
413 | // We call SetCPUProfileRate before StartCPUProfile in order to lock in our
414 | // desired sample rate. When SetCPUProfileRate is called with a non-zero
415 | // value, profiling is considered "ON". Any attempt to change the sample
416 | // rate without first setting it back to 0 will fail. However, since
417 | // SetCPUProfileRate has no return value, there's no way to check for this
418 | // failure (Note: it will print "runtime: cannot set cpu profile rate until
419 | // previous profile has finished" to stderr). Since StartCPUProfile can't
420 | // know if its call to SetCPUProfileRate failed, it will just carry on with
421 | // the profiling (at our selected rate).
422 | runtime.SetCPUProfileRate(0)
423 | if p.cpuSampleRate != golangDefaultCPUSampleRate {
424 | // Only pre-set if it's different from what StartCPUProfile would set.
425 | // This avoids the unsightly error message whenever possible.
426 | runtime.SetCPUProfileRate(p.cpuSampleRate)
427 | }
428 | if err := pprof.StartCPUProfile(p.currentCPUBuffer()); err != nil {
429 | return err
430 | }
431 |
432 | p.currentState = profilerStateEnabled
433 | return nil
434 | }
435 |
436 | func (p *probe) disableProfiling() error {
437 | logger := p.configuration.Logger
438 | logger.Debug().Msgf("Blackfire: Stop profiling")
439 | if !p.canDisableProfiling() {
440 | return nil
441 | }
442 |
443 | defer func() {
444 | p.currentState = profilerStateDisabled
445 | }()
446 |
447 | pprof.StopCPUProfile()
448 |
449 | memWriter := bufio.NewWriter(p.currentMemBuffer())
450 | if err := pprof.WriteHeapProfile(memWriter); err != nil {
451 | return err
452 | }
453 | if err := memWriter.Flush(); err != nil {
454 | return err
455 | }
456 |
457 | return nil
458 | }
459 |
460 | func (p *probe) endProfile() error {
461 | logger := p.configuration.Logger
462 | logger.Debug().Msgf("Blackfire: End profile")
463 | if !p.canEndProfiling() {
464 | return nil
465 | }
466 |
467 | if err := p.disableProfiling(); err != nil {
468 | return err
469 | }
470 |
471 | if err := p.prepareAgentClient(); err != nil {
472 | return err
473 | }
474 |
475 | p.currentState = profilerStateSending
476 | defer func() {
477 | p.currentState = profilerStateOff
478 | }()
479 |
480 | if p.configuration.PProfDumpDir != "" {
481 | logger.Debug().Msgf("Dumping pprof profiles to %v", p.configuration.PProfDumpDir)
482 | pprof_reader.DumpProfiles(p.cpuProfileBuffers, p.memProfileBuffers, p.configuration.PProfDumpDir)
483 | }
484 |
485 | profile, err := pprof_reader.ReadFromPProf(p.cpuProfileBuffers, p.memProfileBuffers)
486 | if err != nil {
487 | return err
488 | }
489 | p.resetProfileBufferSet()
490 |
491 | if profile == nil {
492 | return fmt.Errorf("Profile was not created")
493 | }
494 |
495 | if !profile.HasData() {
496 | logger.Debug().Msgf("Blackfire: No samples recorded")
497 | return nil
498 | }
499 |
500 | if err := p.agentClient.SendProfile(profile, p.currentTitle); err != nil {
501 | return err
502 | }
503 |
504 | return err
505 | }
506 |
507 | func (p *probe) triggerStopProfiler(shouldEndProfile bool) {
508 | p.profileDisableTrigger <- shouldEndProfile
509 | }
510 |
511 | func (p *probe) onProfileDisableTriggered(shouldEndProfile bool, callback func()) {
512 | logger := p.configuration.Logger
513 | logger.Debug().Msgf("Blackfire: Received profile disable trigger. shouldEndProfile = %t, callback = %p", shouldEndProfile, callback)
514 | p.mutex.Lock()
515 | defer p.mutex.Unlock()
516 |
517 | if shouldEndProfile {
518 | if err := p.endProfile(); err != nil {
519 | logger.Error().Msgf("Blackfire (end profile): %v", err)
520 | }
521 | } else {
522 | if err := p.disableProfiling(); err != nil {
523 | logger.Error().Msgf("Blackfire (stop profiling): %v", err)
524 | }
525 | }
526 |
527 | if callback != nil {
528 | go callback()
529 | }
530 | }
531 |
532 | func (p *probe) handlePanic(r interface{}) error {
533 | p.disabledFromPanic = true
534 | p.configuration.Logger.Error().Msgf("Unexpected panic %v. Probe has been disabled.", r)
535 | p.configuration.Logger.Error().Msg(string(debug.Stack()))
536 | return fmt.Errorf("Unexpected panic %v. Probe has been disabled.", r)
537 | }
538 |
--------------------------------------------------------------------------------
/profile.go:
--------------------------------------------------------------------------------
1 | package blackfire
2 |
3 | import (
4 | "encoding/json"
5 | "fmt"
6 | "io/ioutil"
7 | "net/http"
8 | "time"
9 | )
10 |
11 | type Profile struct {
12 | UUID string
13 | URL string
14 | APIURL string
15 | Title string `json:"label"`
16 | CreatedAt BFTime `json:"created_at"`
17 | Status Status `json:"status"`
18 | Envelope Envelope `json:"envelope"`
19 | Links linksMap `json:"_links"`
20 |
21 | retries int
22 | loaded bool
23 | }
24 |
25 | type Envelope struct {
26 | Ct int `json:"ct"`
27 | CPU int `json:"cpu"`
28 | MU int `json:"mu"`
29 | PMU int `json:"pmu"`
30 | }
31 |
32 | type Status struct {
33 | Name string `json:"name"`
34 | Code int `json:"code"`
35 | FailureReason string `json:"failure_reason"`
36 | }
37 |
38 | type BFTime struct {
39 | time.Time
40 | }
41 |
42 | func (m *BFTime) UnmarshalJSON(b []byte) (err error) {
43 | s := string(b)
44 | // Get rid of the quotes "" around the value.
45 | s = s[1 : len(s)-1]
46 | t, err := time.Parse(time.RFC3339Nano, s)
47 | if err != nil {
48 | t, err = time.Parse("2006-01-02T15:04:05.999999999Z0700", s)
49 | }
50 | m.Time = t
51 | return
52 | }
53 |
54 | func (p *Profile) load(auth string) error {
55 | if p.loaded {
56 | return nil
57 | }
58 | p.retries += 1
59 | if p.retries > 60 {
60 | p.Status = Status{Name: "errored"}
61 | p.loaded = true
62 | return nil
63 | }
64 | request, err := http.NewRequest("GET", p.APIURL, nil)
65 | if err != nil {
66 | return err
67 | }
68 | request.Header.Add("Authorization", auth)
69 | client := http.DefaultClient
70 | response, err := client.Do(request)
71 | if err != nil {
72 | return err
73 | }
74 | if response.StatusCode == 404 {
75 | p.Status = Status{Name: "queued"}
76 | return nil
77 | }
78 | if response.StatusCode >= 400 {
79 | p.Status = Status{Name: "errored"}
80 | p.loaded = true
81 | return nil
82 | }
83 | responseData, err := ioutil.ReadAll(response.Body)
84 | if err != nil {
85 | return err
86 | }
87 | if err := json.Unmarshal(responseData, &p); err != nil {
88 | return fmt.Errorf("JSON error: %v", err)
89 | }
90 |
91 | if p.Status.Code > 0 {
92 | p.loaded = true
93 | }
94 | return nil
95 | }
96 |
--------------------------------------------------------------------------------
/signal.go:
--------------------------------------------------------------------------------
1 | package blackfire
2 |
3 | import (
4 | "os"
5 | "os/signal"
6 | "time"
7 | )
8 |
9 | // EnableOnSignal sets up a trigger to enable profiling when the specified signal is received.
10 | // The profiler will profile for the specified duration.
11 | func EnableOnSignal(sig os.Signal, duration time.Duration) (err error) {
12 | if err = globalProbe.configuration.load(); err != nil {
13 | return
14 | }
15 | if !globalProbe.configuration.canProfile() {
16 | return
17 | }
18 |
19 | logger := globalProbe.configuration.Logger
20 | logger.Info().Msgf("Blackfire (signal): Signal [%s] triggers profiling for %.0f seconds", sig, float64(duration)/1000000000)
21 |
22 | callFuncOnSignal(sig, func() {
23 | logger.Info().Msgf("Blackfire (%s): Profiling for %.0f seconds", sig, float64(duration)/1000000000)
24 | if err := globalProbe.EnableNowFor(duration); err != nil {
25 | logger.Error().Msgf("Blackfire (EnableOnSignal): %v", err)
26 | }
27 | })
28 |
29 | return
30 | }
31 |
32 | // DisableOnSignal sets up a trigger to disable profiling when the specified signal is received.
33 | func DisableOnSignal(sig os.Signal) (err error) {
34 | if err = globalProbe.configuration.load(); err != nil {
35 | return
36 | }
37 | if !globalProbe.configuration.canProfile() {
38 | return
39 | }
40 |
41 | logger := globalProbe.configuration.Logger
42 | logger.Info().Msgf("Blackfire (signal): Signal [%s] stops profiling", sig)
43 |
44 | callFuncOnSignal(sig, func() {
45 | logger.Info().Msgf("Blackfire (%s): Disable profiling", sig)
46 | if err := globalProbe.Disable(); err != nil {
47 | logger.Error().Msgf("Blackfire (DisableOnSignal): %v", err)
48 | }
49 | })
50 | return
51 | }
52 |
53 | // EndOnSignal sets up a trigger to end the current profile and upload to Blackfire when the
54 | // specified signal is received.
55 | func EndOnSignal(sig os.Signal) (err error) {
56 | if err = globalProbe.configuration.load(); err != nil {
57 | return
58 | }
59 | if !globalProbe.configuration.canProfile() {
60 | return
61 | }
62 |
63 | logger := globalProbe.configuration.Logger
64 | logger.Info().Msgf("Blackfire (signal): Signal [%s] ends the current profile", sig)
65 |
66 | callFuncOnSignal(sig, func() {
67 | logger.Info().Msgf("Blackfire (%s): End profile", sig)
68 | if err := globalProbe.EndNoWait(); err != nil {
69 | logger.Error().Msgf("Blackfire (EndOnSignal): %v", err)
70 | }
71 | })
72 | return
73 | }
74 |
75 | func callFuncOnSignal(sig os.Signal, function func()) {
76 | sigs := make(chan os.Signal, 1)
77 | signal.Notify(sigs, sig)
78 | go func() {
79 | for {
80 | <-sigs
81 | function()
82 | }
83 | }()
84 | }
85 |
--------------------------------------------------------------------------------