├── .gitignore ├── LICENSE ├── README.md ├── config └── config.exs ├── lib ├── recon.ex ├── recon_alloc.ex ├── recon_lib.ex └── recon_trace.ex ├── mix.exs ├── mix.lock └── test ├── recon_trace_test.exs └── test_helper.exs /.gitignore: -------------------------------------------------------------------------------- 1 | /_build 2 | /cover 3 | /deps 4 | /doc 5 | erl_crash.dump 6 | *.ez 7 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2015 - 2016, Tatsuya Kawano 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are met: 6 | 7 | * Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | 10 | * Redistributions in binary form must reproduce the above copyright notice, 11 | this list of conditions and the following disclaimer in the documentation 12 | and/or other materials provided with the distribution. 13 | 14 | * Neither the name of recon_ex nor the names of its 15 | contributors may be used to endorse or promote products derived from 16 | this software without specific prior written permission. 17 | 18 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ReconEx 2 | 3 | ReconEx is an Elixir wrapper for [Recon](https://ferd.github.io/recon/). 4 | It is a library to be dropped into any other Elixir project, to be 5 | used to assist DevOps people diagnose problems from `iex` shell in 6 | production Erlang VMs. 7 | 8 | Included modules are: 9 | 10 | - **Recon** 11 | * gathers information about processes and the general state of 12 | the VM, ports, and OTP behaviours running in the node. 13 | 14 | - **ReconAlloc** 15 | * provides functions to deal with Erlang's memory allocators. 16 | 17 | - **ReconLib** 18 | * provides useful functionality used by Recon when dealing 19 | with data from the node. 20 | 21 | - **ReconTrace** 22 | * production-safe tracing facilities. 23 | 24 | Documentation for the library can be obtained at 25 | https://hex.pm/packages/recon_ex (**TODO**) 26 | 27 | It is recommended that you use tags (**TODO**: create tags) if you do 28 | not want bleeding edge and development content for this library. 29 | 30 | 31 | ## Current Status 32 | 33 | Versions supported: 34 | 35 | - Elixir 1.1 or newer 36 | - Recon 2.5.0 or newer 37 | 38 | 39 | ## Try Them Out 40 | 41 | To build the library: 42 | 43 | ```shell-session 44 | mix deps.get 45 | mix compile 46 | iex -S mix 47 | ``` 48 | 49 | **TODO**: Some examples. 50 | 51 | 52 | ## Install As Dependency 53 | 54 | **TODO** 55 | 56 | 57 | ## Change Log 58 | 59 | **TODO** 60 | 61 | 62 | ## Special Thanks 63 | 64 | - Special thanks to Fred Hebert, the author of [Recon](https://ferd.github.io/recon/), 65 | and the all contributors to it. 66 | 67 | 68 | ## License 69 | 70 | This code, as the original Recon, is published under the BSD 3-clause 71 | License. See LICENSE file for more information. 72 | -------------------------------------------------------------------------------- /config/config.exs: -------------------------------------------------------------------------------- 1 | # This file is responsible for configuring your application 2 | # and its dependencies with the aid of the Mix.Config module. 3 | use Mix.Config 4 | 5 | # This configuration is loaded before any dependency and is restricted 6 | # to this project. If another project depends on this project, this 7 | # file won't be loaded nor affect the parent project. For this reason, 8 | # if you want to provide default values for your application for 9 | # 3rd-party users, it should be done in your "mix.exs" file. 10 | 11 | # You can configure for your application as: 12 | # 13 | # config :recon_ex, key: :value 14 | # 15 | # And access this configuration in your application as: 16 | # 17 | # Application.get_env(:recon_ex, :key) 18 | # 19 | # Or configure a 3rd-party app: 20 | # 21 | # config :logger, level: :info 22 | # 23 | 24 | # It is also possible to import configuration files, relative to this 25 | # directory. For example, you can emulate configuration per environment 26 | # by uncommenting the line below and defining dev.exs, test.exs and such. 27 | # Configuration from the imported file will override the ones defined 28 | # here (which is why it is important to import them last). 29 | # 30 | # import_config "#{Mix.env}.exs" 31 | -------------------------------------------------------------------------------- /lib/recon.ex: -------------------------------------------------------------------------------- 1 | defmodule Recon do 2 | require :recon 3 | 4 | @moduledoc """ 5 | `Recon`, as a module, provides access to the high-level 6 | functionality contained in the ReconEx application. 7 | 8 | It has functions in five main categories: 9 | 10 | 1. **State information** 11 | * Process information is everything that has to do with the 12 | general state of the node. Functions such as `info/1` and 13 | `info/3` are wrappers to provide more details than 14 | `:erlang.process_info/1`, while providing it in a 15 | production-safe manner. They have equivalents to 16 | `:erlang.process_info/2` in the functions `info/2` and `info/4`, 17 | respectively. 18 | * `proc_count/2` and `proc_window/3` are to be used when you 19 | require information about processes in a larger sense: 20 | biggest consumers of given process information (say memory or 21 | reductions), either absolutely or over a sliding time window, 22 | respectively. 23 | * `bin_leak/1` is a function that can be used to try and see if 24 | your Erlang node is leaking refc binaries. See the function 25 | itself for more details. 26 | * Functions to access node statistics, in a manner somewhat 27 | similar to what [vmstats](https://github.com/ferd/vmstats) 28 | provides as a library. There are 3 of them: 29 | `node_stats_print/2`, which displays them, `node_stats_list/2`, 30 | which returns them in a list, and `node_stats/4`, which 31 | provides a fold-like interface for stats gathering. For CPU 32 | usage specifically, see `scheduler_usage/1`. 33 | 2. **OTP tools** 34 | * This category provides tools to interact with pieces of OTP 35 | more easily. At this point, the only function included is 36 | `get_state/1`, which works as a wrapper around `get_state/2`, 37 | which works as a wrapper around `:sys.get_state/1` in OTP 38 | R16B01, and provides the required functionality for older 39 | versions of Erlang. 40 | 3. **Code Handling** 41 | * Specific functions are in `Recon` for the sole purpose of 42 | interacting with source and compiled code. `remote_load/1` and 43 | `remote_load/2` will allow to take a local module, and load it 44 | remotely (in a diskless manner) on another Erlang node you're 45 | connected to. 46 | * `source/1` allows to print the source of a loaded module, in 47 | case it's not available in the currently running node. 48 | 4. **Ports and Sockets** 49 | * To make it simpler to debug some network-related issues, recon 50 | contains functions to deal with Erlang ports (raw, file 51 | handles, or inet). Functions `tcp/0`, `udp/0`, `sctp/0`, 52 | `files/0`, and `port_types/0` will list all the Erlang ports of 53 | a given type. The latter function prints counts of all 54 | individual types. 55 | * Port state information can be useful to figure out why certain 56 | parts of the system misbehave. Functions such as `port_info/1` 57 | and `port_info/2` are wrappers to provide more similar or more 58 | details than `:erlang.port_info/1` and `:erlang.port_info/2`, 59 | and, for inet ports, statistics and options for each socket. 60 | * Finally, the functions `inet_count/2` and `inet_window/3` 61 | provide the absolute or sliding window functionality of 62 | `proc_count/2` and `proc_count/3` to inet ports and connections 63 | currently on the node. 64 | 5. **RPC** 65 | * These are wrappers to make RPC work simpler with clusters of 66 | Erlang nodes. Default RPC mechanisms (from the `:rpc` module) 67 | make it somewhat painful to call shell-defined funs over node 68 | boundaries. The functions `rpc/1`, `rpc/2`, and `rpc/3` will do 69 | it with a simpler interface. 70 | * Additionally, when you're running diagnostic code on remote 71 | nodes and want to know which node evaluated what result, using 72 | `named_rpc/1`, `named_rpc/2`, and `named_rpc/3` will wrap the 73 | results in a tuple that tells you which node it's coming from, 74 | making it easier to identify bad nodes. 75 | """ 76 | 77 | ############# 78 | ### TYPES ### 79 | ############# 80 | 81 | @type proc_attrs :: 82 | {pid, attr :: term, 83 | [ 84 | name :: 85 | atom 86 | | {:current_function, mfa} 87 | | {:initial_call, mfa}, 88 | ... 89 | ]} 90 | 91 | @type inet_attr_name :: :recv_cnt | :recv_oct | :send_cnt | :send_oct | :cnt | :oct 92 | 93 | @type inet_attrs :: {port, attr :: term, [{atom, term}]} 94 | 95 | @type pid_term :: 96 | pid 97 | | atom 98 | | charlist 99 | | {:global, term} 100 | | {:via, module, term} 101 | | {non_neg_integer, non_neg_integer, non_neg_integer} 102 | 103 | @type info_type :: :meta | :signals | :location | :memory_used | :work 104 | 105 | @type info_meta_key :: :registered_name | :dictionary | :group_leader | :status 106 | @type info_signals_key :: :links | :monitors | :monitored_by | :trap_exit 107 | @type info_location_key :: :initial_call | :current_stacktrace 108 | @type info_memory_key :: 109 | :memory | :message_queue_len | :heap_size | :total_heap_size | :garbage_collection 110 | @type info_work_key :: :reductions 111 | 112 | @type info_key :: 113 | info_meta_key | info_signals_key | info_location_key | info_memory_key | info_work_key 114 | 115 | @type stats :: {[absolutes :: {atom, term}], [increments :: {atom, term}]} 116 | 117 | @type interval_ms :: pos_integer 118 | @type time_ms :: pos_integer 119 | @type timeout_ms :: non_neg_integer | :infinity 120 | 121 | @type port_term :: port | charlist | atom | pos_integer 122 | 123 | @type port_info_type :: :meta | :signals | :io | :memory_used | :specific 124 | 125 | @type port_info_meta_key :: :registered_name | :id | :name | :os_pid 126 | @type port_info_signals_key :: :connected | :links | :monitors 127 | @type port_info_io_key :: :input | :output 128 | @type port_info_memory_key :: :memory | :queue_size 129 | @type port_info_specific_key :: atom 130 | 131 | @type port_info_key :: 132 | port_info_meta_key 133 | | port_info_signals_key 134 | | port_info_io_key 135 | | port_info_memory_key 136 | | port_info_specific_key 137 | 138 | @type nodes :: node | [node, ...] 139 | @type rpc_result :: {[success :: term], [fail :: term]} 140 | 141 | ################## 142 | ### PUBLIC API ### 143 | ################## 144 | 145 | ### Process Info ### 146 | 147 | @doc """ 148 | Equivalent to `info()` where `a`, `b`, and `c` are integers 149 | part of a pid. 150 | """ 151 | @spec info(non_neg_integer, non_neg_integer, non_neg_integer) :: 152 | [{info_type, [{info_key, term}]}, ...] 153 | def info(a, b, c), do: :recon.info(a, b, c) 154 | 155 | @doc """ 156 | Equivalent to `info(, key)` where `a`, `b`, and `c` are 157 | integers part of a pid. 158 | """ 159 | @spec info(non_neg_integer, non_neg_integer, non_neg_integer, key :: info_type | [atom] | atom) :: 160 | term 161 | def info(a, b, c, key), do: :recon.info(a, b, c, key) 162 | 163 | @doc """ 164 | Allows to be similar to `:erlang.process_info/1`, but excludes 165 | fields such as the mailbox, which have a tendency to grow and be 166 | unsafe when called in production systems. Also includes a few more 167 | fields than what is usually given (`monitors`, `monitored_by`, 168 | etc.), and separates the fields in a more readable format based on 169 | the type of information contained. 170 | 171 | Moreover, it will fetch and read information on local processes that 172 | were registered locally (an atom), globally (`{:global, name}`), or 173 | through another registry supported in the `{:via, module, name}` 174 | syntax (must have a `module.whereis_name/1` function). Pids can also 175 | be passed in as a string (`"PID#<0.39.0>"`, `"<0.39.0>"`) or a 176 | triple (`{0, 39, 0}`) and will be converted to be used. 177 | """ 178 | @spec info(pid_term) :: [{info_type, [{info_key, value :: term}]}, ...] 179 | def info(pid_term) do 180 | ReconLib.term_to_pid(pid_term) |> :recon.info() 181 | end 182 | 183 | @doc """ 184 | Allows to be similar to `:erlang.process_info/2`, but allows to sort 185 | fields by safe categories and pre-selections, avoiding items such as 186 | the mailbox, which may have a tendency to grow and be unsafe when 187 | called in production systems. 188 | 189 | Moreover, it will fetch and read information on local processes that 190 | were registered locally (an atom), globally (`{:global, name}`), or 191 | through another registry supported in the `{:via, module, name}` 192 | syntax (must have a `module.whereis_name/1` function). Pids can also 193 | be passed in as a string (`"#PID<0.39.0>"`, `"<0.39.0>"`) or a 194 | triple (`{0, 39, 0}`) and will be converted to be used. 195 | 196 | Although the type signature doesn't show it in generated 197 | documentation, a list of arguments or individual arguments accepted 198 | by `:erlang.process_info/2' and return them as that function would. 199 | 200 | A fake attribute `:binary_memory` is also available to return the 201 | amount of memory used by refc binaries for a process. 202 | """ 203 | @spec info(pid_term, info_type) :: {info_type, [{info_key, term}]} 204 | @spec info(pid_term, [atom]) :: [{atom, term}] 205 | @spec info(pid_term, atom) :: {atom, term} 206 | def info(pid_term, info_type_or_keys) do 207 | ReconLib.term_to_pid(pid_term) |> :recon.info(info_type_or_keys) 208 | end 209 | 210 | @doc """ 211 | Fetches a given attribute from all processes (except the caller) and 212 | returns the biggest `num` consumers. 213 | """ 214 | # @todo (Erlang Recon) Implement this function so it only stores 215 | # `num` entries in memory at any given time, instead of as many as 216 | # there are processes. 217 | @spec proc_count(attribute_name :: atom, non_neg_integer) :: [proc_attrs] 218 | def proc_count(attr_name, num) do 219 | :recon.proc_count(attr_name, num) 220 | end 221 | 222 | @doc """ 223 | Fetches a given attribute from all processes (except the caller) and 224 | returns the biggest entries, over a sliding time window. 225 | 226 | This function is particularly useful when processes on the node are 227 | mostly short-lived, usually too short to inspect through other 228 | tools, in order to figure out what kind of processes are eating 229 | through a lot resources on a given node. 230 | 231 | It is important to see this function as a snapshot over a sliding 232 | window. A program's timeline during sampling might look like this: 233 | 234 | ` --w---- [Sample1] ---x-------------y----- [Sample2] ---z--->` 235 | 236 | Some processes will live between `w` and die at `x`, some between 237 | `y` and `z`, and some between `x` and `y`. These samples will not be 238 | too significant as they're incomplete. If the majority of your 239 | processes run between a time interval `x`...`y` (in absolute terms), 240 | you should make sure that your sampling time is smaller than this so 241 | that for many processes, their lifetime spans the equivalent of `w` 242 | and `z`. Not doing this can skew the results: long-lived processes, 243 | that have 10 times the time to accumulate data (say reductions) will 244 | look like bottlenecks when they're not one. 245 | 246 | **Warning:** this function depends on data gathered at two 247 | snapshots, and then building a dictionary with entries to 248 | differentiate them. This can take a heavy toll on memory when you 249 | have many dozens of thousands of processes. 250 | """ 251 | @spec proc_window( 252 | attribute_name :: atom, 253 | non_neg_integer, 254 | milliseconds :: pos_integer 255 | ) :: [proc_attrs] 256 | def proc_window(attr_name, num, time) do 257 | :recon.proc_window(attr_name, num, time) 258 | end 259 | 260 | @doc """ 261 | Refc binaries can be leaking when barely-busy processes route them 262 | around and do little else, or when extremely busy processes reach a 263 | stable amount of memory allocated and do the vast majority of their 264 | work with refc binaries. When this happens, it may take a very long 265 | while before references get deallocated and refc binaries get to be 266 | garbage collected, leading to out of memory crashes. This function 267 | fetches the number of refc binary references in each process of the 268 | node, garbage collects them, and compares the resulting number of 269 | references in each of them. The function then returns the `n` 270 | processes that freed the biggest amount of binaries, potentially 271 | highlighting leaks. 272 | 273 | See [the Erlang/OTP Efficiency Guide](http://www.erlang.org/doc/efficiency_guide/binaryhandling.html#id65722) 274 | for more details on refc binaries. 275 | """ 276 | @spec bin_leak(pos_integer) :: [proc_attrs] 277 | def bin_leak(n), do: :recon.bin_leak(n) 278 | 279 | @doc """ 280 | Shorthand for `node_stats(n, interval, fn(x, _) -> IO.inspect(x, pretty: true) end, :ok)` 281 | """ 282 | @spec node_stats_print(repeat :: non_neg_integer, interval_ms) :: term 283 | def node_stats_print(n, interval) do 284 | fold_fun = fn x, _ -> 285 | IO.inspect(x, pretty: true) 286 | :ok 287 | end 288 | 289 | node_stats(n, interval, fold_fun, :ok) 290 | end 291 | 292 | @doc """ 293 | Because Erlang CPU usage as reported from `top` isn't the most 294 | reliable value (due to schedulers doing idle spinning to avoid going 295 | to sleep and impacting latency), a metric exists that is based on 296 | scheduler wall time. 297 | 298 | For any time interval, Scheduler wall time can be used as a measure 299 | of how **busy** a scheduler is. A scheduler is busy when: 300 | 301 | - executing process code 302 | - executing driver code 303 | - executing NIF code 304 | - executing BIFs 305 | - garbage collecting 306 | - doing memory management 307 | 308 | A scheduler isn't busy when doing anything else. 309 | """ 310 | @spec scheduler_usage(interval_ms) :: 311 | [{scheduler_id :: pos_integer, usage :: number()}] 312 | def scheduler_usage(interval) when is_integer(interval) do 313 | :recon.scheduler_usage(interval) 314 | end 315 | 316 | @doc """ 317 | Shorthand for `node_stats(n, interval, fn(x, acc) -> [x | acc] end, [])` 318 | with the results reversed to be in the right temporal order. 319 | """ 320 | 321 | @spec node_stats_list(repeat :: non_neg_integer, interval_ms) :: [stats] 322 | def node_stats_list(n, interval), do: :recon.node_stats_list(n, interval) 323 | 324 | @doc """ 325 | Gathers statistics `n` time, waiting `interval` milliseconds between 326 | each run, and accumulates results using a folding function `fold_fun`. 327 | The function will gather statistics in two forms: Absolutes and 328 | Increments. 329 | 330 | Absolutes are values that keep changing with time, and are useful to 331 | know about as a datapoint: process count, size of the run queue, 332 | error_logger queue length, and the memory of the node (total, 333 | processes, atoms, binaries, and ets tables). 334 | 335 | Increments are values that are mostly useful when compared to a 336 | previous one to have an idea what they're doing, because otherwise 337 | they'd never stop increasing: bytes in and out of the node, number 338 | of garbage collector runs, words of memory that were garbage 339 | collected, and the global reductions count for the node. 340 | """ 341 | @spec node_stats( 342 | non_neg_integer, 343 | interval_ms, 344 | fold_fun :: (stats, acc :: term -> term), 345 | acc0 :: term 346 | ) :: 347 | acc1 :: term 348 | def node_stats(n, interval, fold_fun, init) do 349 | :recon.node_stats(n, interval, fold_fun, init) 350 | end 351 | 352 | ### OTP & Manipulations ### 353 | 354 | @doc """ 355 | Shorthand call to `get_state(pid_term, 5000)` 356 | """ 357 | @spec get_state(pid_term) :: term 358 | def get_state(pid_term), do: :recon.get_state(pid_term) 359 | 360 | @doc """ 361 | Fetch the internal state of an OTP process. Calls `:sys.get_state/2` 362 | directly in OTP R16B01+, and fetches it dynamically on older 363 | versions of OTP. 364 | """ 365 | @spec get_state(pid_term, timeout_ms) :: term 366 | def get_state(pid_term, timeout), do: :recon.get_state(pid_term, timeout) 367 | 368 | ### Code & Stuff ### 369 | 370 | @doc """ 371 | Equivalent `remote_load(nodes(), mod)`. 372 | """ 373 | @spec remote_load(module) :: term 374 | def remote_load(mod), do: :recon.remote_load(mod) 375 | 376 | @doc """ 377 | Loads one or more modules remotely, in a diskless manner. Allows to 378 | share code loaded locally with a remote node that doesn't have it. 379 | """ 380 | @spec remote_load(nodes, module) :: term 381 | def remote_load(nodes, mod), do: :recon.remote_load(nodes, mod) 382 | 383 | @doc """ 384 | Obtain the source code of a module compiled with `debug_info`. The 385 | returned list sadly does not allow to format the types and typed 386 | records the way they look in the original module, but instead goes 387 | to an intermediary form used in the AST. They will still be placed 388 | in the right module attributes, however. 389 | """ 390 | # @todo (Erlang Recon) Figure out a way to pretty-print typespecs 391 | # and records. 392 | @spec source(module) :: iolist 393 | def source(module), do: :recon.source(module) 394 | 395 | # Ports Info # 396 | 397 | @doc """ 398 | Returns a list of all TCP ports (the data type) open on the node. 399 | """ 400 | @spec tcp :: [port] 401 | def tcp(), do: :recon.tcp() 402 | 403 | @doc """ 404 | Returns a list of all UDP ports (the data type) open on the node. 405 | """ 406 | @spec udp :: [port] 407 | def udp(), do: :recon.udp() 408 | 409 | @doc """ 410 | Returns a list of all SCTP ports (the data type) open on the node. 411 | """ 412 | @spec sctp :: [port] 413 | def sctp(), do: :recon.sctp() 414 | 415 | @doc """ 416 | Returns a list of all file handles open on the node. 417 | """ 418 | @spec files :: [port] 419 | def files(), do: :recon.files() 420 | 421 | @doc """ 422 | Shows a list of all different ports on the node with their 423 | respective types. 424 | """ 425 | @spec port_types :: [{type :: charlist, count :: pos_integer}] 426 | def port_types(), do: :recon.port_types() 427 | 428 | @doc """ 429 | Fetches a given attribute from all inet ports (TCP, UDP, SCTP) and 430 | returns the biggest `num` consumers. 431 | 432 | The values to be used can be the number of octets (bytes) sent, 433 | received, or both (`:send_oct`, `~recv_oct`, `:oct`, respectively), 434 | or the number of packets sent, received, or both (`:send_cnt`, 435 | `:recv_cnt`, `:cnt`, respectively). Individual absolute values for 436 | each metric will be returned in the 3rd position of the resulting 437 | tuple. 438 | """ 439 | # @todo Implement this function so it only stores `Num' entries in 440 | # memory at any given time, instead of as many as there are 441 | # processes. 442 | @spec inet_count(inet_attr_name, non_neg_integer) :: [inet_attrs] 443 | def inet_count(attr, num), do: :recon.inet_count(attr, num) 444 | 445 | @doc """ 446 | Fetches a given attribute from all inet ports (TCP, UDP, SCTP) and 447 | returns the biggest entries, over a sliding time window. 448 | 449 | **Warning:** this function depends on data gathered at two 450 | snapshots, and then building a dictionary with entries to 451 | differentiate them. This can take a heavy toll on memory when you 452 | have many dozens of thousands of ports open. 453 | 454 | The values to be used can be the number of octets (bytes) sent, 455 | received, or both (`:send_oct`, `:recv_oct`, `:oct`, respectively), 456 | or the number of packets sent, received, or both (`:send_cnt`, 457 | `:recv_cnt`, `:cnt`, respectively). Individual absolute values for 458 | each metric will be returned in the 3rd position of the resulting 459 | tuple. 460 | """ 461 | @spec inet_window(inet_attr_name, non_neg_integer, time_ms) :: [inet_attrs] 462 | def inet_window(attr, num, time) when is_atom(attr) do 463 | :recon.inet_window(attr, num, time) 464 | end 465 | 466 | @doc """ 467 | Allows to be similar to `:erlang.port_info/1`, but allows more 468 | flexible port usage: usual ports, ports that were registered locally 469 | (an atom), ports represented as strings (`"#Port<0.2013>"`), 470 | 471 | or through an index lookup (`2013`, for the same result as 472 | `"#Port<0.2013>"`). 473 | 474 | Moreover, the function will try to fetch implementation-specific 475 | details based on the port type (only inet ports have this feature so 476 | far). For example, TCP ports will include information about the 477 | remote peer, transfer statistics, and socket options being used. 478 | 479 | The information-specific and the basic port info are sorted and 480 | categorized in broader categories (`port_info_type()`). 481 | """ 482 | @spec port_info(port_term) :: 483 | [{port_info_type, [{port_info_key, term}]}, ...] 484 | def port_info(port_term) do 485 | ReconLib.term_to_port(port_term) |> :recon.port_info() 486 | end 487 | 488 | @doc """ 489 | Allows to be similar to `:erlang.port_info/2`, but allows more 490 | flexible port usage: usual ports, ports that were registered locally 491 | (an atom), ports represented as strings (`"#Port<0.2013>"`), 492 | or through an index lookup (`2013', for the same result as 493 | `"#Port<0.2013>"`). 494 | 495 | Moreover, the function allows to to fetch information by category as 496 | defined in `port_info_type()`, and although the type signature 497 | doesn't show it in the generated documentation, individual items 498 | accepted by `:erlang.port_info/2` are accepted, and lists of them 499 | too. 500 | """ 501 | @spec port_info(port_term, port_info_type) :: 502 | {port_info_type, [{port_info_key, term}]} 503 | @spec port_info(port_term, [atom]) :: [{atom, term}] 504 | @spec port_info(port_term, atom) :: {atom, term} 505 | def port_info(port_term, type_or_keys) when is_binary(port_term) do 506 | to_char_list(port_term) |> :recon.port_info(type_or_keys) 507 | end 508 | 509 | def port_info(port_term, type_or_keys) do 510 | :recon.port_info(port_term, type_or_keys) 511 | end 512 | 513 | ### RPC Utils ### 514 | 515 | @doc """ 516 | Shorthand for `rpc([node()|nodes()], fun)` 517 | """ 518 | @spec rpc((() -> term)) :: rpc_result 519 | def rpc(fun), do: :recon.rpc(fun) 520 | 521 | @doc """ 522 | Shorthand for `rpc(nodes, fun, :infinity)` 523 | """ 524 | @spec rpc(nodes, (() -> term)) :: rpc_result 525 | def rpc(nodes, fun), do: :recon.rpc(nodes, fun) 526 | 527 | @doc """ 528 | Runs an arbitrary fn (of arity 0) over one or more nodes. 529 | """ 530 | @spec rpc(nodes, (() -> term), timeout_ms) :: rpc_result 531 | def rpc(nodes, fun, timeout), do: :recon.rpc(nodes, fun, timeout) 532 | 533 | @doc """ 534 | Shorthand for `named_rpc([node()|nodes()], fun)` 535 | """ 536 | @spec named_rpc((() -> term)) :: rpc_result 537 | def named_rpc(fun), do: :recon.named_rpc(fun) 538 | 539 | @doc """ 540 | Shorthand for `named_rpc(nodes, fun, :infinity)` 541 | """ 542 | @spec named_rpc(nodes, (() -> term)) :: rpc_result 543 | def named_rpc(nodes, fun), do: :recon.named_rpc(nodes, fun) 544 | 545 | @doc """ 546 | Runs an arbitrary fun (of arity 0) over one or more nodes, and 547 | returns the name of the node that computed a given result along with 548 | it, in a tuple. 549 | """ 550 | @spec named_rpc(nodes, (() -> term), timeout_ms) :: rpc_result 551 | def named_rpc(nodes, fun, timeout), do: :recon.named_rpc(nodes, fun, timeout) 552 | end 553 | -------------------------------------------------------------------------------- /lib/recon_alloc.ex: -------------------------------------------------------------------------------- 1 | defmodule ReconAlloc do 2 | require :recon_alloc 3 | 4 | @moduledoc """ 5 | Functions to deal with 6 | [Erlang VM's memory allocators](http://www.erlang.org/doc/man/erts_alloc.html), 7 | or particularly, to try to present the allocator data in a way that 8 | makes it simpler to discover possible problems. 9 | 10 | Tweaking Erlang VM memory allocators and their behaviour is a very 11 | tricky ordeal whenever you have to give up the default settings. 12 | This module (and its documentation) will try and provide helpful 13 | pointers to help in this task. 14 | 15 | This module should mostly be helpful to figure out **if** there is a 16 | problem, but will offer little help to figure out **what** is wrong. 17 | 18 | To figure this out, you need to dig deeper into the allocator data 19 | (obtainable with `allocators/0`), and/or have some precise knowledge 20 | about the type of load and work done by the VM to be able to assess 21 | what each reaction to individual tweak should be. 22 | 23 | A lot of trial and error might be required to figure out if tweaks 24 | have helped or not, ultimately. 25 | 26 | In order to help do offline debugging of memory allocator problems 27 | ReconAlloc also has a few functions that store snapshots of the 28 | memory statistics. 29 | 30 | These snapshots can be used to freeze the current allocation values 31 | so that they do not change during analysis while using the regular 32 | functionality of this module, so that the allocator values can be 33 | saved, or that they can be shared, dumped, and reloaded for further 34 | analysis using files. See `snapshot_load/1` for a simple use-case. 35 | 36 | **Glossary:** 37 | 38 | - **sys_alloc** : System allocator, usually just malloc 39 | - **mseg_alloc** : Used by other allocators, can do mmap. Caches 40 | allocations 41 | - **temp_alloc** : Used for temporary allocations 42 | - **eheap_alloc** : Heap data (i.e. process heaps) allocator 43 | - **binary_alloc** : Global binary heap allocator 44 | - **ets_alloc** : ETS data allocator 45 | - **driver_alloc** : Driver data allocator 46 | - **sl_alloc** : Short-lived memory blocks allocator 47 | - **ll_alloc** : Long-lived data (i.e. Erlang code itself) 48 | allocator 49 | - **fix_alloc** : Frequently used fixed-size data allocator 50 | - **std_alloc** : Allocator for other memory blocks 51 | - **carrier** : 52 | When a given area of memory is allocated by the OS to the VM 53 | (through sys_alloc or mseg_alloc), it is put into a **carrier**. 54 | There are two kinds of carriers: multiblock and single block. The 55 | default carriers data is sent to are multiblock carriers, owned by 56 | a specific allocator (ets_alloc, binary_alloc, etc.). The specific 57 | allocator can thus do allocation for specific Erlang requirements 58 | within bits of memory that has been preallocated before. This 59 | allows more reuse, and we can even measure the cache hit rates 60 | `cache_hit_rates/0`. 61 | 62 | There is however a threshold above which an item in memory won't 63 | fit a multiblock carrier. When that happens, the specific 64 | allocator does a special allocation to a single block carrier. 65 | This is done by the allocator basically asking for space directly 66 | from sys_alloc or mseg_alloc rather than a previously multiblock 67 | area already obtained before. 68 | 69 | This leads to various allocation strategies where you decide to 70 | choose: 71 | 72 | * which multiblock carrier you're going to (if at all) 73 | * which block in that carrier you're going to 74 | 75 | See [the official documentation on erts_alloc](http://www.erlang.org/doc/man/erts_alloc.html) 76 | for more details. 77 | - **mbcs** : Multiblock carriers. 78 | - **sbcs** : Single block carriers. 79 | - **lmbcs** : Largest multiblock carrier size 80 | - **smbcs** : Smallest multiblock carrier size 81 | - **sbct** : Single block carrier threshold 82 | 83 | By default all sizes returned by this module are in bytes. You can 84 | change this by calling `set_unit/1`. 85 | """ 86 | 87 | ############# 88 | ### TYPES ### 89 | ############# 90 | 91 | @type allocator :: 92 | :temp_alloc 93 | | :eheap_alloc 94 | | :binary_alloc 95 | | :ets_alloc 96 | | :driver_alloc 97 | | :sl_alloc 98 | | :ll_alloc 99 | | :fix_alloc 100 | | :std_alloc 101 | @type instance :: non_neg_integer 102 | @type allocdata(t) :: {{allocator, instance}, t} 103 | 104 | # Snapshot handling 105 | @type memory :: [{atom, atom}] 106 | @type snapshot :: {memory, [allocdata(term)]} 107 | 108 | ############## 109 | ### Public ### 110 | ############## 111 | 112 | @doc """ 113 | Equivalent to `memory(key, :current)`. 114 | """ 115 | @spec memory(:used | :allocated | :unused) :: pos_integer 116 | @spec memory(:usage) :: number 117 | @spec memory(:allocated_types | :allocated_instances) :: 118 | [{allocator, pos_integer}] 119 | def memory(key), do: :recon_alloc.memory(key) 120 | 121 | @doc """ 122 | Reports one of multiple possible memory values for the entire 123 | node depending on what is to be reported: 124 | 125 | - `:used` reports the memory that is actively used for allocated 126 | Elixir/Erlang data. 127 | - `:allocated` reports the memory that is reserved by the VM. It 128 | includes the memory used, but also the memory yet-to-be-used but 129 | still given by the OS. This is the amount you want if you're 130 | dealing with ulimit and OS-reported values. 131 | - `:allocated_types` reports the memory that is reserved by the 132 | VM grouped into the different util allocators. 133 | - `:allocated_instances` reports the memory that is reserved by the VM 134 | grouped into the different schedulers. Note that instance id 0 is 135 | the global allocator used to allocate data from non-managed 136 | threads, i.e. async and driver threads. 137 | - `:unused` reports the amount of memory reserved by the VM that is 138 | not being allocated. Equivalent to `:allocated - :used` 139 | - `usage` returns a percentage (0.0 .. 1.0) of `used/allocated` 140 | memory ratios. 141 | 142 | The memory reported by `:allocated` should roughly match what the OS 143 | reports. If this amount is different by a large margin, it may be 144 | the sign that someone is allocating memory in C directly, outside of 145 | Erlang VM's own allocator -- a big warning sign. There are currently 146 | three sources of memory alloction that are not counted towards this 147 | value: The cached segments in the mseg allocator, any memory 148 | allocated as a super carrier, and small pieces of memory allocated 149 | during start-up before the memory allocators are initialized. 150 | 151 | Also note that low memory usages can be the sign of fragmentation in 152 | memory, in which case exploring which specific allocator is at fault 153 | is recommended (see `fragmentation/1`) 154 | """ 155 | @spec memory(:used | :allocated | :unused, :current | :max) :: pos_integer 156 | @spec memory(:usage, :current | :max) :: number 157 | @spec memory(:allocated_types | :allocated_instances, :current | :max) :: 158 | [{allocator, pos_integer}] 159 | def memory(type, keyword), do: :recon_alloc.memory(type, keyword) 160 | 161 | @doc """ 162 | Compares the block sizes to the carrier sizes, both for single block 163 | (`sbcs`) and multiblock (`mbcs`) carriers. 164 | 165 | The returned results are sorted by a weight system that is somewhat 166 | likely to return the most fragmented allocators first, based on 167 | their percentage of use and the total size of the carriers, for both 168 | `sbcs` and `mbcs`. 169 | 170 | The values can both be returned for `current' allocator values, and 171 | for `max` allocator values. The current values hold the present 172 | allocation numbers, and max values, the values at the peak. 173 | Comparing both together can give an idea of whether the node is 174 | currently being at its memory peak when possibly leaky, or if it 175 | isn't. This information can in turn influence the tuning of 176 | allocators to better fit sizes of blocks and/or carriers. 177 | """ 178 | @spec fragmentation(:current | :max) :: [allocdata([{atom, term}])] 179 | def fragmentation(keyword), do: :recon_alloc.fragmentation(keyword) 180 | 181 | @doc """ 182 | Looks at the `mseg_alloc` allocator (allocator used by all the 183 | allocators in `allocator/1`) and returns information relative to the 184 | cache hit rates. Unless memory has expected spiky behaviour, it 185 | should usually be above 0.80 (80%). 186 | 187 | Cache can be tweaked using three VM flags: `+MMmcs`, `+MMrmcbf`, and 188 | `+MMamcbf`. 189 | 190 | `+MMmcs` stands for the maximum amount of cached memory segments. 191 | Its default value is `10` and can be anything from 0 to 30. 192 | Increasing it first and verifying if cache hits get better should be 193 | the first step taken. 194 | 195 | The two other options specify what are the maximal values of a 196 | segment to cache, in relative (in percent) and absolute terms (in 197 | kilobytes), respectively. Increasing these may allow more segments 198 | to be cached, but should also add overheads to memory allocation. An 199 | Erlang node that has limited memory and increases these values may 200 | make things worse on that point. 201 | 202 | The values returned by this function are sorted by a weight 203 | combining the lower cache hit joined to the largest memory values 204 | allocated. 205 | """ 206 | @spec cache_hit_rates() :: [{{:instance, instance}, [{:hit_rate | :hits | :calls, term}]}] 207 | def cache_hit_rates(), do: :recon_alloc.cache_hit_rates() 208 | 209 | @doc """ 210 | Checks all allocators in `allocator/0` and returns the average block 211 | sizes being used for `mbcs` and `sbcs`. This value is interesting to 212 | use because it will tell us how large most blocks are. This can be 213 | related to the VM's largest multiblock carrier size (`lmbcs`) and 214 | smallest multiblock carrier size (`smbcs`) to specify allocation 215 | strategies regarding the carrier sizes to be used. 216 | 217 | This function isn't exceptionally useful unless you know you have 218 | some specific problem, say with sbcs/mbcs ratios (see 219 | `sbcs_to_mbcs/0`) or fragmentation for a specific allocator, and 220 | want to figure out what values to pick to increase or decrease sizes 221 | compared to the currently configured value. 222 | 223 | Do note that values for `lmbcs` and `smbcs` are going to be rounded 224 | up to the next power of two when configuring them. 225 | """ 226 | @spec average_block_sizes(:current | :max) :: 227 | [{allocator, [{:mbcs, :sbcs, number}]}] 228 | def average_block_sizes(keyword) do 229 | :recon_alloc.average_block_sizes(keyword) 230 | end 231 | 232 | @doc """ 233 | Compares the amount of single block carriers (`sbcs') vs the number 234 | of multiblock carriers (`mbcs') for each individual allocator in 235 | `allocator/0`. 236 | 237 | When a specific piece of data is allocated, it is compared to a 238 | threshold, called the **single block carrier threshold** 239 | (`sbct`). When the data is larger than the `sbct`, it gets sent to a 240 | single block carrier. When the data is smaller than the `sbct`, it 241 | gets placed into a multiblock carrier. 242 | 243 | mbcs are to be prefered to sbcs because they basically represent 244 | pre-allocated memory, whereas sbcs will map to one call to sys_alloc 245 | or mseg_alloc, which is more expensive than redistributing data that 246 | was obtained for multiblock carriers. Moreover, the VM is able to do 247 | specific work with mbcs that should help reduce fragmentation in 248 | ways sys_alloc or mmap usually won't. 249 | 250 | Ideally, most of the data should fit inside multiblock carriers. If 251 | most of the data ends up in `sbcs`, you may need to adjust the 252 | multiblock carrier sizes, specifically the maximal value (`lmbcs`) 253 | and the threshold (`sbct`). On 32 bit VMs, `sbct` is limited to 254 | 8MBs, but 64 bit VMs can go to pretty much any practical size. 255 | 256 | Given the value returned is a ratio of sbcs/mbcs, the higher the 257 | value, the worst the condition. The list is sorted accordingly. 258 | """ 259 | @spec sbcs_to_mbcs(:max | :current) :: [allocdata(term)] 260 | def sbcs_to_mbcs(keyword), do: :recon_alloc.sbcs_to_mbcs(keyword) 261 | 262 | @doc """ 263 | Returns a dump of all allocator settings and values. 264 | """ 265 | @spec allocators() :: [allocdata(term)] 266 | def allocators(), do: :recon_alloc.allocators() 267 | 268 | ######################### 269 | ### Snapshot handling ### 270 | ######################### 271 | 272 | @doc """ 273 | Take a new snapshot of the current memory allocator statistics. 274 | The snapshot is stored in the process dictionary of the calling 275 | process, with all the limitations that it implies (i.e. no 276 | garbage-collection). To unsert the snapshot, see 277 | `snapshot_clear/1`. 278 | """ 279 | @spec snapshot() :: snapshot | :undefined 280 | def snapshot(), do: :recon_alloc.snapshot() 281 | 282 | @doc """ 283 | Clear the current snapshot in the process dictionary, if present, 284 | and return the value it had before being unset. 285 | """ 286 | @spec snapshot_clear() :: snapshot | :undefined 287 | def snapshot_clear(), do: :recon_alloc.snapshot_clear() 288 | 289 | @doc """ 290 | Prints a dump of the current snapshot stored by `snapshot/0`. Prints 291 | `undefined` if no snapshot has been taken. 292 | """ 293 | @spec snapshot_print() :: :ok 294 | def snapshot_print() do 295 | IO.inspect(:recon_alloc.snapshot_get(), pretty: true) 296 | end 297 | 298 | @doc """ 299 | Returns the current snapshot stored by `@link snapshot/0`. Returns 300 | `undefined` if no snapshot has been taken. 301 | """ 302 | @spec snapshot_get() :: snapshot | :undefined 303 | def snapshot_get(), do: :recon_alloc.snapshot_get() 304 | 305 | @doc """ 306 | Save the current snapshot taken by `snapshot/0` to a file.If there 307 | is no current snapshot, a snaphot of the current allocator 308 | statistics will be written to the file. 309 | """ 310 | @spec snapshot_save(:file.name()) :: :ok 311 | def snapshot_save(filename), do: :recon_alloc.snapshot_save(filename) 312 | 313 | @doc """ 314 | Loads a snapshot from a given file. The format of the data in the 315 | file can be either the same as output by `snapshot_save/0`, or the 316 | output obtained by calling Erlang functions, equivalent to the 317 | following Elixir functions, and storing it in a file in Erlang's 318 | term format. 319 | 320 | ``` 321 | {:erlang.memory, 322 | :erlang.system_info(:alloc_util_allocators) ++ [:sys_alloc,:mseg_alloc] 323 | |> Enum.map &({&1, :erlang.system_info({:allocator, &1})}) 324 | } 325 | ``` 326 | 327 | If the latter option is taken, please remember to add a full stop at 328 | the end of the resulting Erlang term, as this function uses 329 | Erlang's `:file.consult/1` to load the file. 330 | 331 | **Example usage:** 332 | 333 | On target machine: 334 | 335 | ``` 336 | iex> ReconAlloc.snapshot 337 | :undefined 338 | iex> ReconAlloc.memory(:used) 339 | 18411064 340 | iex> ReconAlloc.snapshot_save("recon_snapshot.terms") 341 | :ok 342 | ``` 343 | 344 | On other machine: 345 | 346 | ``` 347 | iex> ReconAlloc.snapshot_load("recon_snapshot.terms") 348 | :undefined 349 | iex> ReconAlloc:memory(:used) 350 | 18411064 351 | ``` 352 | """ 353 | @spec snapshot_load(:file.name()) :: snapshot | :undefined 354 | def snapshot_load(filename), do: :recon_alloc.snapshot_load(filename) 355 | 356 | ######################### 357 | ### Handling of units ### 358 | ######################### 359 | 360 | @doc """ 361 | Sets the current unit to be used by recon_alloc. This effects all 362 | functions that return bytes. 363 | 364 | Eg. 365 | 366 | ``` 367 | iex> ReconAlloc.memory(:used, :current) 368 | 17548752 369 | iex> ReconAlloc.set_unit(:kilobyte) 370 | undefined 371 | iex> ReconAlloc.memory(:used, :current) 372 | 17576.90625 373 | ``` 374 | """ 375 | @spec set_unit(:byte | :kilobyte | :megabyte | :gigabyte) :: :ok 376 | def set_unit(unit), do: :recon_alloc.set_unit(unit) 377 | end 378 | -------------------------------------------------------------------------------- /lib/recon_lib.ex: -------------------------------------------------------------------------------- 1 | defmodule ReconLib do 2 | require :recon_lib 3 | 4 | @moduledoc """ 5 | Regroups useful functionality used by recon when dealing with data 6 | from the node. The functions in this module allow quick runtime 7 | access to fancier behaviour than what would be done using recon 8 | module itself. 9 | """ 10 | 11 | @type diff :: [Recon.proc_attrs() | Recon.inet_attrs()] 12 | @type milliseconds :: non_neg_integer 13 | @type interval_ms :: non_neg_integer 14 | 15 | @type scheduler_id :: pos_integer 16 | @type sched_time :: 17 | {scheduler_id, active_time :: non_neg_integer, total_time :: non_neg_integer} 18 | 19 | @doc """ 20 | Compare two samples and return a list based on some key. The type 21 | mentioned for the structure is `diff()` (`{key, val, other}`), which 22 | is compatible with the `Recon.proc_attrs()` type. 23 | """ 24 | @spec sliding_window(first :: diff, last :: diff) :: diff 25 | def sliding_window(first, last) do 26 | :recon_lib.sliding_window(first, last) 27 | end 28 | 29 | @doc """ 30 | Runs a fun once, waits `ms`, runs the fun again, and returns both 31 | results. 32 | """ 33 | @spec sample(milliseconds, (() -> term)) :: 34 | {first :: term, second :: term} 35 | def sample(delay, fun), do: :recon_lib.sample(delay, fun) 36 | 37 | @doc """ 38 | Takes a list of terms, and counts how often each of them appears in 39 | the list. The list returned is in no particular order. 40 | """ 41 | @spec count([term]) :: [{term, count :: integer}] 42 | def count(terms), do: :recon_lib.count(terms) 43 | 44 | @doc """ 45 | Returns a list of all the open ports in the VM, coupled with 46 | one of the properties desired from `:erlang.port_info/1` and 47 | `:erlang.port_info/2` 48 | """ 49 | @spec port_list(attr :: atom) :: [{port, term}] 50 | def port_list(attr), do: :recon_lib.port_list(attr) 51 | 52 | @doc """ 53 | Returns a list of all the open ports in the VM, but only if the 54 | `attr`'s resulting value matches `val`. `attr` must be a property 55 | accepted by `:erlang.port_info/2`. 56 | """ 57 | @spec port_list(attr :: atom, term) :: [port] 58 | def port_list(attr, val), do: :recon_lib.port_list(attr, val) 59 | 60 | @doc """ 61 | Returns the attributes (`Recon.proc_attrs/0`) of all processes of 62 | the node, except the caller. 63 | """ 64 | @spec proc_attrs(term) :: [Recon.proc_attrs()] 65 | def proc_attrs(attr_name) do 66 | :recon_lib.proc_attrs(attr_name) 67 | end 68 | 69 | @doc """ 70 | Returns the attributes of a given process. This form of attributes 71 | is standard for most comparison functions for processes in recon. 72 | 73 | A special attribute is `binary_memory`, which will reduce the memory 74 | used by the process for binary data on the global heap. 75 | """ 76 | @spec proc_attrs(term, pid) :: {:ok, Recon.proc_attrs()} | {:error, term} 77 | def proc_attrs(attr_name, pid) do 78 | :recon_lib.proc_attrs(attr_name, pid) 79 | end 80 | 81 | @doc """ 82 | Returns the attributes (Recon.inet_attrs/0) of all inet ports (UDP, 83 | SCTP, TCP) of the node. 84 | """ 85 | @spec inet_attrs(term) :: [Recon.inet_attrs()] 86 | def inet_attrs(attr_name), do: :recon_lib.inet_attrs(attr_name) 87 | 88 | @doc """ 89 | Returns the attributes required for a given inet port (UDP, SCTP, 90 | TCP). This form of attributes is standard for most comparison 91 | functions for processes in recon. 92 | """ 93 | @spec inet_attrs(Recon.inet_attri_name(), port) :: 94 | {:ok, Recon.inet_attrs()} | {:error, term} 95 | def inet_attrs(attr, port), do: :recon_lib.inet_attrs(attr, port) 96 | 97 | @doc """ 98 | Equivalent of `pid(x, y, z)` in the Elixir's iex shell. 99 | """ 100 | @spec triple_to_pid(non_neg_integer, non_neg_integer, non_neg_integer) :: pid 101 | def triple_to_pid(x, y, z), do: :recon_lib.triple_to_pid(x, y, z) 102 | 103 | @doc """ 104 | Transforms a given term to a pid. 105 | """ 106 | @spec term_to_pid(Recon.pid_term()) :: pid 107 | def term_to_pid(term) do 108 | pre_process_pid_term(term) |> :recon_lib.term_to_pid() 109 | end 110 | 111 | defp pre_process_pid_term({_a, _b, _c} = pid_term) do 112 | pid_term 113 | end 114 | 115 | defp pre_process_pid_term(<<"#PID", pid_term::binary>>) do 116 | to_char_list(pid_term) 117 | end 118 | 119 | defp pre_process_pid_term(pid_term) when is_binary(pid_term) do 120 | to_char_list(pid_term) 121 | end 122 | 123 | defp pre_process_pid_term(pid_term) do 124 | pid_term 125 | end 126 | 127 | @doc """ 128 | Transforms a given term to a port. 129 | """ 130 | @spec term_to_port(Recon.port_term()) :: port 131 | def term_to_port(term) when is_binary(term) do 132 | to_char_list(term) |> :recon_lib.term_to_port() 133 | end 134 | 135 | def term_to_port(term) do 136 | :recon_lib.term_to_port(term) 137 | end 138 | 139 | @doc """ 140 | Calls a given function every `interval` milliseconds and supports 141 | a map-like interface (each result is modified and returned) 142 | """ 143 | @spec time_map( 144 | n :: non_neg_integer, 145 | interval_ms, 146 | fun :: (state :: term -> {term, state :: term}), 147 | initial_state :: term, 148 | mapfun :: (term -> term) 149 | ) :: [term] 150 | def time_map(n, interval, fun, state, map_fun) do 151 | :recon_lib.time_map(n, interval, fun, state, map_fun) 152 | end 153 | 154 | @doc """ 155 | Calls a given function every `interval` milliseconds and supports 156 | a fold-like interface (each result is modified and accumulated) 157 | """ 158 | @spec time_fold( 159 | n :: non_neg_integer, 160 | interval_ms, 161 | fun :: (state :: term -> {term, state :: term}), 162 | initial_state :: term, 163 | foldfun :: (term, acc0 :: term -> acc1 :: term), 164 | initial_acc :: term 165 | ) :: [term] 166 | def time_fold(n, interval, fun, state, fold_fun, init) do 167 | :recon_lib.time_fold(n, interval, fun, state, fold_fun, init) 168 | end 169 | 170 | @doc """ 171 | Diffs two runs of :erlang.statistics(scheduler_wall_time) and 172 | returns usage metrics in terms of cores and 0..1 percentages. 173 | """ 174 | @spec scheduler_usage_diff(sched_time, sched_time) :: 175 | [{scheduler_id, usage :: number}] 176 | def scheduler_usage_diff(first, last) do 177 | :recon_lib.scheduler_usage_diff(first, last) 178 | end 179 | end 180 | -------------------------------------------------------------------------------- /lib/recon_trace.ex: -------------------------------------------------------------------------------- 1 | defmodule ReconTrace do 2 | require :recon_trace 3 | 4 | @moduledoc """ 5 | `ReconTrace` provides functions for tracing events in a safe 6 | manner for single Erlang virtual machine, currently for function 7 | calls only. Functionality includes: 8 | 9 | - Nicer to use interface (arguably) than `:dbg` or trace BIFs. 10 | - Protection against dumb decisions (matching all calls on a node 11 | being traced, for example) 12 | - Adding safe guards in terms of absolute trace count or 13 | rate-limitting 14 | - Nicer formatting than default traces 15 | 16 | ## Tracing Elixir and Erlang Code 17 | 18 | The Erlang Trace BIFs allow to trace any Elixir and Erlang code at 19 | all. They work in two parts: pid specifications, and trace patterns. 20 | 21 | Pid specifications let you decide which processes to target. They 22 | can be specific pids, `all` pids, `existing` pids, or `new` pids 23 | (those not spawned at the time of the function call). 24 | 25 | The trace patterns represent functions. Functions can be specified 26 | in two parts: specifying the modules, functions, and arguments, and 27 | then with Erlang match specifications to add constraints to 28 | arguments (see `calls/3` for details). 29 | 30 | What defines whether you get traced or not is the intersection of 31 | both: 32 | 33 | . _,--------,_ _,--------,_ 34 | ,-' `-,,-' `-, 35 | ,-' ,-' '-, `-, 36 | | Matching -' '- Matching | 37 | | Pids | Getting | Trace | 38 | | | Traced | Patterns | 39 | | -, ,- | 40 | '-, '-, ,-' ,-' 41 | '-,_ _,-''-,_ _,-' 42 | '--------' '--------' 43 | 44 | If either the pid specification excludes a process or a trace 45 | pattern excludes a given call, no trace will be received. 46 | 47 | ## Example Session 48 | 49 | First let's trace the `:queue.new` functions in any process: 50 | 51 | > ReconTrace.calls({:queue, :new, :_}, 1) 52 | 1 53 | 13:14:34.086078 <0.44.0> :queue.new 54 | Recon tracer rate limit tripped. 55 | 56 | The limit was set to `1` trace message at most, and `ReconTrace` 57 | let us know when that limit was reached. 58 | 59 | Let's instead look for all the `:queue.in/2` calls, to see what it 60 | is we're inserting in queues: 61 | 62 | > ReconTrace.calls({:queue, :in, 2}, 1) 63 | 1 64 | 13:14:55.365157 <0.44.0> :queue.in(a, {[], []}) 65 | Recon tracer rate limit tripped. 66 | 67 | In order to see the content we want, we should change the trace 68 | patterns to use a `fn` that matches on all arguments in a list 69 | (`_`) and returns `:return`. This last part will generate a second 70 | trace for each call that includes the return value: 71 | 72 | > ReconTrace.calls({:queue, :in, fn(_) -> :return end}, 3) 73 | 1 74 | 75 | 13:15:27.655132 <0.44.0> :queue.in(:a, {[], []}) 76 | 77 | 13:15:27.655467 <0.44.0> :queue.in/2 --> {[:a], []} 78 | 79 | 13:15:27.757921 <0.44.0> :queue.in(:a, {[], []}) 80 | Recon tracer rate limit tripped. 81 | 82 | Matching on argument lists can be done in a more complex manner: 83 | 84 | > ReconTrace.calls( 85 | ...> {:queue, :_, 86 | ...> fn([a, _]) when is_list(a); is_integer(a) andalso a > 1 -> :return end} 87 | ...> {10, 100} 88 | ...> ) 89 | 32 90 | 91 | 13:24:21.324309 <0.38.0> :queue.in(3, {[], []}) 92 | 93 | 13:24:21.371473 <0.38.0> :queue.in/2 --> {[3], []} 94 | 95 | 13:25:14.694865 <0.53.0> :queue.split(4, {[10, 9, 8, 7], [1, 2, 3, 4, 5, 6]}) 96 | 97 | 13:25:14.695194 <0.53.0> :queue.split/2 --> {{[4, 3, 2], [1]}, {[10, 9, 8, 7],[5, 6]}} 98 | 99 | > ReconTrace.clear 100 | :ok 101 | 102 | Note that in the pattern above, no specific function (`_`) was 103 | matched against. Instead, the `fn` used restricted functions to 104 | those having two arguments, the first of which is either a list or 105 | an integer greater than `1`. 106 | 107 | The limit was also set using `{10, 100}` instead of an integer, 108 | making the rate-limitting at 10 messages per 100 milliseconds, 109 | instead of an absolute value. 110 | 111 | Any tracing can be manually interrupted by calling 112 | `ReconTrace.clear/0`, or killing the shell process. 113 | 114 | Be aware that extremely broad patterns with lax rate-limitting (or 115 | very high absolute limits) may impact your node's stability in ways 116 | `ReconTrace` cannot easily help you with. 117 | 118 | In doubt, start with the most restrictive tracing possible, with low 119 | limits, and progressively increase your scope. 120 | 121 | See `calls/3` for more details and tracing possibilities. 122 | 123 | ## Structure 124 | 125 | This library is production-safe due to taking the following 126 | structure for tracing: 127 | 128 | ``` 129 | [IO/Group leader] <---------------------, 130 | | | 131 | [shell] ---> [tracer process] ----> [formatter] 132 | ``` 133 | 134 | The tracer process receives trace messages from the node, and 135 | enforces limits in absolute terms or trace rates, before forwarding 136 | the messages to the formatter. This is done so the tracer can do as 137 | little work as possible and never block while building up a large 138 | mailbox. 139 | 140 | The tracer process is linked to the shell, and the formatter to the 141 | tracer process. The formatter also traps exits to be able to handle 142 | all received trace messages until the tracer termination, but will 143 | then shut down as soon as possible. 144 | 145 | In case the operator is tracing from a remote shell which gets 146 | disconnected, the links between the shell and the tracer should make 147 | it so tracing is automatically turned off once you disconnect. 148 | 149 | If sending output to the Group Leader is not desired, you may specify 150 | a different `pid()` via the option `:io_server` in the `calls/3` 151 | function. For instance to write the traces to a file you can do 152 | something like 153 | 154 | > {:ok, dev} = File.open("/tmp/trace", [:write]) 155 | > ReconTrace.calls({:queue, :in, fn(_) -> :return end}, 3, 156 | > [{:io_server, dev}]) 157 | 1 158 | > 159 | Recon tracer rate limit tripped. 160 | > File.close(dev). 161 | 162 | The only output still sent to the Group Leader is the rate limit 163 | being tripped, and any errors. The rest will be sent to the other IO 164 | server (see http://erlang.org/doc/apps/stdlib/io_protocol.html). 165 | """ 166 | 167 | ############# 168 | ### TYPES ### 169 | ############# 170 | 171 | @type matchspec :: [{[term], [term], [term]}] 172 | @type shellfun :: (term -> term) 173 | @type formatterfun :: (tuple -> iodata) 174 | @type millisecs :: non_neg_integer 175 | @type pidspec :: :all | :existing | :new | Recon.pid_term() 176 | @type max_traces :: non_neg_integer 177 | @type max_rate :: {max_traces, millisecs} 178 | 179 | # trace options 180 | # default: all 181 | @type options :: [ 182 | {:pid, pidspec | [pidspec, ...]} 183 | # default: formatter 184 | | {:timestamp, :formatter | :trace} 185 | # default: args 186 | | {:args, :args | :arity} 187 | # default: group_leader() 188 | | {:io_server, pid} 189 | # default: internal formatter 190 | | {:formatter, formatterfun} 191 | # match pattern options 192 | # default: global 193 | | {:scope, :global | :local} 194 | ] 195 | 196 | @type mod :: :_ | module 197 | @type f :: :_ | atom 198 | @type args :: :_ | 0..255 | matchspec | shellfun 199 | @type tspec :: {mod, f, args} 200 | @type max :: max_traces | max_rate 201 | @type num_matches :: non_neg_integer 202 | 203 | ############## 204 | ### Public ### 205 | ############## 206 | 207 | @doc """ 208 | Stops all tracing at once. 209 | """ 210 | @spec clear() :: :ok 211 | def clear() do 212 | :recon_trace.clear() 213 | end 214 | 215 | @doc """ 216 | Equivalent to `calls/3`. 217 | """ 218 | @spec calls(tspec | [tspec, ...], max) :: num_matches 219 | def calls({_mod, _fun, _args} = tspec, max) do 220 | :recon_trace.calls(to_erl_tspec(tspec), max, formatter: &format/1) 221 | end 222 | 223 | def calls(tspecs, max) when is_list(tspecs) do 224 | Enum.map(tspecs, &to_erl_tspec/1) 225 | |> :recon_trace.calls(max, formatter: &format/1) 226 | end 227 | 228 | @doc """ 229 | Allows to set trace patterns and pid specifications to trace 230 | function calls. 231 | 232 | The basic calls take the trace patterns as tuples of the form 233 | `{module, function, args}` where: 234 | 235 | - `module` is any Elixir or Erlang module (e.g `Enum` or `:queue`) 236 | - `function` is any atom representing a function, or the wildcard 237 | pattern (`:_`) 238 | - `args` is either the arity of a function (`0`..`255`), a wildcard 239 | pattern (`:_`), 240 | a [match specification](http://learnyousomeerlang.com/ets#you-have-been-selected) 241 | or a function from a shell session that can be transformed into 242 | a match specification 243 | 244 | There is also an argument specifying either a maximal count (a 245 | number) of trace messages to be received, or a maximal frequency 246 | (`{num, millisecs}`). 247 | 248 | Here are examples of things to trace: 249 | 250 | - All calls from the `:queue` module, with 10 calls printed at most: 251 | `ReconTrace.calls({:queue, :_, :_}, 10)` 252 | - All calls to `:lists.seq(a, b)`, with 100 calls printed at most: 253 | `ReconTrace.calls({:lists, :seq, 2}, 100)` 254 | - All calls to `:lists.seq(a, b)`, with 100 calls per second at most: 255 | `ReconTrace.calls({:lists, :seq, 2}, {100, 1000})` 256 | - All calls to `:lists.seq(a, b, 2)` (all sequences increasing by two) 257 | with 100 calls at most: 258 | `ReconTrace.calls({:lists, :seq, fn([_, _, 2]) -> :ok end}, 100)` 259 | - All calls to `:erlang.iolist_to_binary/1` made with a binary as an 260 | argument already (kind of useless conversion!): 261 | `ReconTrace.calls({:erlang, :iolist_to_binary, fn([x]) when is_binary(x) -> :ok end}, 10)` 262 | - Calls to the queue module only in a given process `pid`, at a rate 263 | of 50 per second at most: 264 | `ReconTrace.calls({:queue, :_, :_}, {50, 1000}, [pid: pid])` 265 | - Print the traces with the function arity instead of literal 266 | arguments: 267 | `ReconTrace.calls(tspec, max, [args: :arity])` 268 | - Matching the `filter/2` functions of both `dict` and `lists` 269 | modules, across new processes only: 270 | `ReconTrace.calls([{:dict, :filter, 2}, {:lists, :filter, 2}], 10, [pid: :new])` 271 | - Tracing the `handle_call/3` functions of a given module for all 272 | new processes, and those of an existing one registered with 273 | `gproc`: 274 | `ReconTrace.calls({mod, :handle_call, 3}, {10, 100}, [{:pid, [{:via, :gproc, name}, :new]}` 275 | - Show the result of a given function call: 276 | `ReconTrace.calls({mod, fun, fn(_) -> :return end}, max, opts)` 277 | or 278 | `ReconTrace.calls({mod, fun, [{:_, [], [{:return_trace}]}]}, max, opts)`, 279 | the important bit being the `:return` or the `{:return_trace}` 280 | match spec value. 281 | 282 | There's a few more combination possible, with multiple trace 283 | patterns per call, and more options: 284 | 285 | - `{:pid, pid_spec}`: which processes to trace. Valid options is any 286 | of `all`, `new`, `existing`, or a process descriptor (`{a, b, c}`, 287 | `""`, an atom representing a name, `{:global, name}`, 288 | `{:via, registrar, name}`, or a pid). It's also possible to specify 289 | more than one by putting them in a list. 290 | - `{:timestamp, :formatter | :trace}`: by default, the formatter 291 | process adds timestamps to messages received. If accurate 292 | timestamps are required, it's possible to force the usage of 293 | timestamps within trace messages by adding the option 294 | `{:timestamp, :trace}`. 295 | - `{:args, :arity | :args}`: whether to print arity in function 296 | calls or their (by default) literal representation. 297 | - `{:scope, :global | :local}`: by default, only `global` (fully 298 | qualified function calls) are traced, not calls made internally. 299 | To force tracing of local calls, pass in `{:scope, :local}`. This 300 | is useful whenever you want to track the changes of code in a 301 | process that isn't called with `module.fun(args)`, but just 302 | `fun(args)`. 303 | - `{:formatter, fn(term) -> io_data() end}`: override the default 304 | formatting functionality provided by ReconTrace. 305 | - `{:io_server, pid() | atom()}`: by default, recon logs to the 306 | current group leader, usually the shell. This option allows to 307 | redirect trace output to a different IO server (such as a file 308 | handle). 309 | 310 | Also note that putting extremely large `max` values (i.e. `99999999` 311 | or `{10000, 1}`) will probably negate most of the safe-guarding this 312 | library does and be dangerous to your node. Similarly, tracing 313 | extremely large amounts of function calls (all of them, or all of 314 | `:io` for example) can be risky if more trace messages are generated 315 | than any process on the node could ever handle, despite the 316 | precautions taken by this library. 317 | """ 318 | @spec calls(tspec | [tspec, ...], max, options) :: num_matches 319 | def calls({_mod, _fun, _args} = tspec, max, opts) do 320 | :recon_trace.calls(to_erl_tspec(tspec), max, add_formatter(opts)) 321 | end 322 | 323 | def calls(tspecs, max, opts) when is_list(tspecs) do 324 | Enum.map(tspecs, &to_erl_tspec/1) 325 | |> :recon_trace.calls(max, add_formatter(opts)) 326 | end 327 | 328 | @doc """ 329 | Returns tspec with its `shellfun` replaced with `matchspec`. 330 | This futction is used by `calls/2` and `calls/3`. 331 | """ 332 | @spec to_erl_tspec(tspec) :: tspec 333 | def to_erl_tspec({mod, fun, shellfun}) when is_function(shellfun) do 334 | {mod, fun, fun_to_match_spec(shellfun)} 335 | end 336 | 337 | def to_erl_tspec({_mod, _fun, _arity_or_matchspec} = tspec) do 338 | tspec 339 | end 340 | 341 | @doc """ 342 | The default trace formatting functionality provided by ReconTrace. 343 | This can be overridden by passing 344 | `{:formatter, fn(term) -> io_data() end}` as an option to `calls/3`. 345 | """ 346 | @spec format(trace_msg :: tuple) :: iodata 347 | def format(trace_msg) do 348 | {type, pid, {hour, min, sec}, trace_info} = extract_info(trace_msg) 349 | header = :io_lib.format('~n~2.2.0w:~2.2.0w:~9.6.0f ~p', [hour, min, sec, pid]) 350 | body = format_body(type, trace_info) |> String.replace("~", "~~") 351 | '#{header} #{body}\n' 352 | end 353 | 354 | ############### 355 | ### Private ### 356 | ############### 357 | 358 | defp add_formatter(opts) do 359 | case :proplists.get_value(:formatter, opts) do 360 | func when is_function(func, 1) -> 361 | opts 362 | 363 | _ -> 364 | [{:formatter, &format/1} | opts] 365 | end 366 | end 367 | 368 | defp format_body(:receive, [msg]) do 369 | "< #{inspect(msg, pretty: true)}" 370 | end 371 | 372 | defp format_body(:send, [msg, to]) do 373 | " > #{inspect(to, pretty: true)}: #{inspect(msg, pretty: true)}" 374 | end 375 | 376 | defp format_body(:send_to_non_existing_process, [msg, to]) do 377 | " > (non_existent) #{inspect(to, pretty: true)}: #{inspect(msg, pretty: true)}" 378 | end 379 | 380 | defp format_body(:call, [{m, f, args}]) do 381 | "#{format_module(m)}.#{f}#{format_args(args)}" 382 | end 383 | 384 | defp format_body(:return_to, [{m, f, arity}]) do 385 | "#{format_module(m)}.#{f}/#{arity}" 386 | end 387 | 388 | defp format_body(:return_from, [{m, f, arity}, return]) do 389 | "#{format_module(m)}.#{f}/#{arity} --> #{inspect(return, pretty: true)}" 390 | end 391 | 392 | defp format_body(:exception_from, [{m, f, arity}, {class, val}]) do 393 | "#{format_module(m)}.#{f}/#{arity} #{class} #{inspect(val, pretty: true)}" 394 | end 395 | 396 | defp format_body(:spawn, [spawned, {m, f, args}]) do 397 | "spawned #{inspect(spawned, pretty: true)} as #{format_module(m)}.#{f}#{format_args(args)}" 398 | end 399 | 400 | defp format_body(:exit, [reason]) do 401 | "EXIT #{inspect(reason, pretty: true)}" 402 | end 403 | 404 | defp format_body(:link, [linked]) do 405 | "link(#{inspect(linked, pretty: true)})" 406 | end 407 | 408 | defp format_body(:unlink, [linked]) do 409 | "unlink(#{inspect(linked, pretty: true)})" 410 | end 411 | 412 | defp format_body(:getting_linked, [linker]) do 413 | "getting linked by #{inspect(linker, pretty: true)}" 414 | end 415 | 416 | defp format_body(:getting_unlinked, [unlinker]) do 417 | "getting unlinked by #{inspect(unlinker, pretty: true)}" 418 | end 419 | 420 | defp format_body(:register, [name]) do 421 | "registered as #{inspect(name, pretty: true)}" 422 | end 423 | 424 | defp format_body(:unregister, [name]) do 425 | "no longer registered as #{inspect(name, pretty: true)}" 426 | end 427 | 428 | defp format_body(:in, [{m, f, arity}]) do 429 | "scheduled in for #{format_module(m)}.#{f}/#{arity}" 430 | end 431 | 432 | defp format_body(:in, [0]) do 433 | "scheduled in" 434 | end 435 | 436 | defp format_body(:out, [{m, f, arity}]) do 437 | "scheduled out from #{format_module(m)}.#{f}/#{arity}" 438 | end 439 | 440 | defp format_body(:out, [0]) do 441 | "scheduled out" 442 | end 443 | 444 | defp format_body(:gc_start, [info]) do 445 | "gc beginning -- heap #{calc_total_heap_size(info)} bytes" 446 | end 447 | 448 | defp format_body(:gc_end, [info]) do 449 | "gc finished -- heap #{calc_total_heap_size(info)} bytes" 450 | end 451 | 452 | defp format_body(type, trace_info) do 453 | "unknown trace type #{inspect(type, pretty: true)} -- #{inspect(trace_info, pretty: true)}" 454 | end 455 | 456 | defp extract_info(trace_msg) do 457 | case :erlang.tuple_to_list(trace_msg) do 458 | [:trace_ts, pid, type | info] -> 459 | {trace_info, [timestamp]} = :lists.split(:erlang.length(info) - 1, info) 460 | {type, pid, to_hms(timestamp), trace_info} 461 | 462 | [:trace, pid, type | trace_info] -> 463 | {type, pid, to_hms(:os.timestamp()), trace_info} 464 | end 465 | end 466 | 467 | defp to_hms({_, _, micro} = stamp) do 468 | {_, {h, m, secs}} = :calendar.now_to_local_time(stamp) 469 | seconds = rem(secs, 60) + micro / 1_000_000 470 | {h, m, seconds} 471 | end 472 | 473 | defp to_hms(_) do 474 | {0, 0, 0} 475 | end 476 | 477 | defp format_module(module_atom) do 478 | to_string(module_atom) |> format_module1 479 | end 480 | 481 | defp format_module1(<<"Elixir.", module_str::binary>>) do 482 | module_str 483 | end 484 | 485 | defp format_module1(module_str) do 486 | ":" <> module_str 487 | end 488 | 489 | defp format_args(arity) when is_integer(arity) do 490 | "/#{arity}" 491 | end 492 | 493 | defp format_args(args) when is_list(args) do 494 | arg_str = Enum.map(args, &inspect(&1, pretty: true)) |> Enum.join(", ") 495 | "(" <> arg_str <> ")" 496 | end 497 | 498 | defp calc_total_heap_size(info) do 499 | info[:heap_size] + info[:old_heap_size] + info[:mbuf_size] 500 | end 501 | 502 | defp fun_to_match_spec(shell_fun) do 503 | case :erl_eval.fun_data(shell_fun) do 504 | {:fun_data, import_list, clauses} -> 505 | case :ms_transform.transform_from_shell(:dbg, clauses, import_list) do 506 | {:error, [{_, [{_, _, code} | _]} | _], _} -> 507 | IO.puts("Error: #{:ms_transform.format_error(code)}") 508 | {:error, :transform_error} 509 | 510 | [{args, gurds, [:return]}] -> 511 | [{args, gurds, [{:return_trace}]}] 512 | 513 | match_spec -> 514 | match_spec 515 | end 516 | 517 | false -> 518 | exit(:shell_funs_only) 519 | end 520 | end 521 | end 522 | -------------------------------------------------------------------------------- /mix.exs: -------------------------------------------------------------------------------- 1 | defmodule ReconEx.Mixfile do 2 | use Mix.Project 3 | 4 | @version "0.9.1" 5 | 6 | def project do 7 | [ 8 | app: :recon_ex, 9 | version: @version, 10 | elixir: "~> 1.1", 11 | description: "Elixir wrapper for Recon, diagnostic tools for production use", 12 | package: [ 13 | maintainers: ["Tatsuya Kawano"], 14 | licenses: ["BSD 3-clause"], 15 | links: %{"GitHub" => "https://github.com/tatsuya6502/recon_ex"} 16 | ], 17 | build_embedded: Mix.env() == :prod, 18 | start_permanent: Mix.env() == :prod, 19 | deps: deps() 20 | ] 21 | end 22 | 23 | def application do 24 | [applications: [:logger, :recon]] 25 | end 26 | 27 | defp deps do 28 | [ 29 | {:recon, "~> 2.5", manager: :rebar}, 30 | {:ex_doc, "~> 0.10.0", only: :dev}, 31 | {:earmark, "~> 0.1", only: :dev} 32 | # {:markdown, github: "devinus/markdown", only: :test} 33 | ] 34 | end 35 | end 36 | -------------------------------------------------------------------------------- /mix.lock: -------------------------------------------------------------------------------- 1 | %{ 2 | "earmark": {:hex, :earmark, "0.2.1", "ba6d26ceb16106d069b289df66751734802777a3cbb6787026dd800ffeb850f3", [:mix], [], "hexpm", "c86afb8d22a5aa8315afd4257c7512011c0c9a48b0fea43af7612836b958098b"}, 3 | "ex_doc": {:hex, :ex_doc, "0.10.0", "f49c237250b829df986486b38f043e6f8e19d19b41101987f7214543f75947ec", [:mix], [{:earmark, "~> 0.1.17 or ~> 0.2", [hex: :earmark, repo: "hexpm", optional: true]}], "hexpm", "3d9f15777aa3fb62700d5984eb09ceeb6c1574d61be0f70801e3390e36942b35"}, 4 | "recon": {:hex, :recon, "2.5.1", "430ffa60685ac1efdfb1fe4c97b8767c92d0d92e6e7c3e8621559ba77598678a", [:mix, :rebar3], [], "hexpm", "5721c6b6d50122d8f68cccac712caa1231f97894bab779eff5ff0f886cb44648"}, 5 | } 6 | -------------------------------------------------------------------------------- /test/recon_trace_test.exs: -------------------------------------------------------------------------------- 1 | defmodule ReconTraceTest do 2 | use ExUnit.Case 3 | # doctest ReconTrace 4 | 5 | import ReconTrace, only: [to_erl_tspec: 1, format: 1] 6 | 7 | test "to_erl_tspec/1" do 8 | shellfun = make_shellfun("fn([n, _]) when n > 10 -> :ok end") 9 | matchspec = [{[:"$1", :_], [{:>, :"$1", 10}], [:ok]}] 10 | 11 | assert to_erl_tspec({:queue, :in, shellfun}) == {:queue, :in, matchspec} 12 | assert to_erl_tspec({:queue, :in, matchspec}) == {:queue, :in, matchspec} 13 | assert to_erl_tspec({:queue, :in, 2}) == {:queue, :in, 2} 14 | 15 | shellfun = make_shellfun("fn([:item, _]) -> :return end") 16 | matchspec = [{[:item, :_], [], [{:return_trace}]}] 17 | 18 | assert to_erl_tspec({:queue, :in, shellfun}) == {:queue, :in, matchspec} 19 | end 20 | 21 | test "format/1 for :call" do 22 | ts = :os.timestamp() 23 | 24 | # Format an Elixir module call with atom and function 25 | assert format( 26 | {:trace_ts, pid(0, 1, 2), :call, {Emum, :each, [[:hello, "world"], &IO.puts/1]}, ts} 27 | ) == 28 | '\n#{format_timestamp(ts)} <0.1.2> Emum.each([:hello, "world"], &IO.puts/1)\n' 29 | 30 | # Format an Erlang module call 31 | assert format({:trace_ts, pid(0, 1, 2), :call, {:lists, :seq, [1, 10]}, ts}) == 32 | '\n#{format_timestamp(ts)} <0.1.2> :lists.seq(1, 10)\n' 33 | end 34 | 35 | test "format/1 for :return_to" do 36 | ts = :os.timestamp() 37 | 38 | assert format({:trace_ts, pid(0, 1, 2), :return_from, {Emum, :each, 2}, :ok, ts}) == 39 | '\n#{format_timestamp(ts)} <0.1.2> Emum.each/2 --> :ok\n' 40 | end 41 | 42 | ################# 43 | ### Utilities ### 44 | ################# 45 | 46 | @spec make_shellfun(binary) :: ([term] -> term) 47 | defp make_shellfun(fun_str) do 48 | to_char_list(fun_str) |> Code.eval_string([]) |> elem(0) 49 | end 50 | 51 | # defp make_shellfun_erl(erl_fun_str) do 52 | # {:ok, tokens, _} = to_char_list(erl_fun_str) |> :erl_scan.string 53 | # {:ok, [expr]} = :erl_parse.parse_exprs(tokens) 54 | # {:value, shellfun, _} = :erl_eval.expr(expr, []) 55 | # shellfun 56 | # end 57 | 58 | defp pid(a, b, c) do 59 | :erlang.list_to_pid('<#{a}.#{b}.#{c}>') 60 | end 61 | 62 | defp to_hms({_, _, micro} = ts) do 63 | {_, {h, m, secs}} = :calendar.now_to_local_time(ts) 64 | seconds = rem(secs, 60) + micro / 1_000_000 65 | {h, m, seconds} 66 | end 67 | 68 | defp format_hms({h, m, s}) do 69 | :io_lib.format('~2.2.0w:~2.2.0w:~9.6.0f', [h, m, s]) 70 | end 71 | 72 | defp format_timestamp(ts) do 73 | to_hms(ts) |> format_hms 74 | end 75 | end 76 | -------------------------------------------------------------------------------- /test/test_helper.exs: -------------------------------------------------------------------------------- 1 | ExUnit.start() 2 | --------------------------------------------------------------------------------